text
stringlengths 0
3.34M
|
---|
{-# OPTIONS --cubical #-}
open import Agda.Builtin.Cubical.Path
data I : Set where
zero one : I
zero≡one : zero ≡ one
|
function M = perform_local_dct_transform(M,dir,w)
% perform_local_dct_transform - perform a JPEG-like transfrom
%
% M = perform_local_dct_transform(M,dir,w, q);
%
% dir=+1 : forward transform
% dir=-1 : backward transform
%
% w is the size of the blocks.
%
% Copyright (c) 2004 Gabriel Peyre
if nargin<3
w = 8;
end
n = size(M,1);
for i=1:floor(n/w);
for j=1:floor(n/w);
selx = (i-1)*w+1:min(i*w,n);
sely = (j-1)*w+1:min(j*w,n);
if dir==1
M(selx,sely) = perform_dct_transform(M(selx,sely),+1);
else
M(selx,sely) = perform_dct_transform(M(selx,sely),-1);
end
end
end
|
Mermaid Sushi operates a takeout service within the Davis Food Coop with a Burmese sushi chef available to prepare custom platters! Several lunchsize combination platters are already prepared and ready to go in a refrigerated case, or customers can either order by phone or on the spot from the short takeout menu. Custom platters are also available, but its suggested you phone such requests in advance.
Prices are fairly reasonable for readymade sushi, and not overly dry, as is sometimes the case with refrigerated sushi. The sixpiece Mermaids Friends nigiri sushi combination included ebi (shrimp), hamachi (yellowtail), two pieces of tuna, salmon, and barbecued eel for $7.50; chopsticks, wasabi, pickled ginger, and a soy sauce packet are included. The fish itself was tender and the sushi prepared well.
Mermaid Sushi is headquartered in Santa Rosa, and operates a team of experienced sushi chefs who provide fresh, daily made, readytogo sushi to grocery stores and supermarkets throughout the United States.
To find out what other sushi can be found in Davis, check out our Restaurants page.
20060128 09:25:21 nbsp it this any good? my experience with store bought sushi is: No Users/ApolloStumpy
20060128 09:39:11 nbsp Ive been happy with all of the selections that Ive tried. It is made fresh right there, so it may not be like other store sushi that you have tried. Users/JasonAller
20060128 09:51:06 nbsp In my experience, the only downside to this stuff is the fact that Ive had each of their standard offerrings so often that Im a little tired of them. Definitely high quality for the price. Users/GrahamFreeman
20060906 19:13:07 nbsp i love this place, best take out sushi from a grocery store ive had! Users/MichelleAccurso
20060907 19:29:34 nbsp Theyve started doing brown rice sushi, too... Users/EmmaCoats
20070716 14:00:21 nbsp I just had some of their nigiri sushi (salmon+eel) and it was pretty good. Slightly pricy, but I think you get what you pay for ($7.99 gave me 3 LARGE peices of salmon and eel each). The fish tasted very good and Id definitely get it again. Users/RohiniJasavala
20080320 17:24:33 nbsp Does anyone know where I can buy sushi grade fish for making my own sushi?? Users/dorkboy42
From East to West: wiki:Sacramento:Seafood City, wiki:Sacramento:Otos Marketplace, Fins Market & Grill, wiki:Albany:Tokyo Fish Market. Seafood City and Fins Market & Grill dont seem to be particularly Japanese places though, so they may not be as good as the others for this. Users/NickSchmalenberger
20120113 21:28:22 nbsp Got some sushi takeout today. Heard about Mermaid Sushi from a couple of friends who recommended, so I went and ordered a Rainbow Roll and a Cali Roll (for the wife). The owner was very polite and knowledgeable with regards to his fish. I was impressed by his desire to offer fish from sustainable sources, even taking down some items from the menu because it was on the endangered list. Real crab in the cali rolls and the salmon and tuna on the Rainbow roll was delicious! Users/SteveWong
20120509 13:24:42 nbsp Amazing sushi, amazing owner who cares about this planet! Users/mm
|
import OCaml.IO
front_legss : List (Int, Int)
front_legss = [
(256, 246), (249, 287), (251, 343), (264, 346), (266, 306), (276, 276),
(282, 306), (278, 343), (292, 343), (298, 306), (298, 277), (299, 254),
(255, 246)
]
back_legss : List (Int, Int)
back_legss = [
(432, 243), (441, 289), (430, 334), (445, 334), (462, 275), (469, 328),
(476, 328), (478, 259), (454, 214), (461, 164), (407, 206)
]
set_color : Int -> OCaml_IO ()
set_color = ocamlCall "Graphics.set_color" (Int -> OCaml_IO ())
fe : Int -> Int -> Int -> Int -> Int -> OCaml_IO ()
fe y_max x y w h = let y = y_max - y in
ocamlCall "Graphics.fill_ellipse" (Int -> Int -> Int -> Int -> OCaml_IO ())
(x + (w `div` 2)) (y - (h `div` 2)) (w `div` 2) (h `div` 2)
fp : Int -> Ptr -> OCaml_IO ()
fp y_max ar = do
f <- ocamlCall "Idrisobj.f" (Int -> OCaml_IO Ptr) y_max
arr <- ocamlCall "Array.map"
(Ptr -> Ptr -> OCaml_IO Ptr) f ar
ocamlCall "Graphics.fill_poly" (Ptr -> OCaml_IO ()) arr
loop : () -> OCaml_IO ()
loop _ = do
ocamlCall "Graphics.wait_next_event" (List Int -> OCaml_IO Ptr)
[0,1,2,3,4]
loop ()
main : OCaml_IO ()
main = do
ocamlCall "Graphics.open_graph" (String -> OCaml_IO ()) ""
ocamlCall "Graphics.auto_synchronize" (Bool -> OCaml_IO ()) False
ocamlCall "Graphics.clear_graph" (() -> OCaml_IO ()) ()
ocamlCall "Graphics.synchronize" (() -> OCaml_IO ()) ()
y_max <- ocamlCall "Graphics.size_y" (() -> OCaml_IO Int) ()
orange <- ocamlCall "Graphics.rgb" (Int -> Int -> Int -> OCaml_IO Int)
198 141 62
white <- ocamlCall "Graphics.white" (OCaml_IO Int)
black <- ocamlCall "Graphics.black" (OCaml_IO Int)
back_legs <- ocamlCall "Idrisobj.back_legs" (OCaml_IO Ptr)
front_legs <- ocamlCall "Idrisobj.front_legs" (OCaml_IO Ptr)
set_color orange
fe y_max 185 90 250 147
fe y_max 269 54 68 98
fe y_max 143 138 127 94
set_color white
fe y_max 89 (-79) 195 227
set_color orange
fe y_max 134 93 62 122
fe y_max 97 101 86 47
fe y_max 354 63 68 118
fe y_max 367 101 98 109
fe y_max 247 176 68 94
fp y_max back_legs
fp y_max front_legs
ocamlCall "Graphics.moveto" (Int -> Int -> OCaml_IO ()) 200 40
set_color black
ocamlCall "Graphics.draw_string" (String -> OCaml_IO ())
"Bactrian the Double-Humped OCaml"
ocamlCall "Graphics.synchronize" (() -> OCaml_IO ()) ()
loop ()
|
Pentru propuneri/colaborari/ proiecte imi puteti scrie pe adresa: [email protected] .
Hey Buddy!, I found this information for you: "Contact". Here is the website link: https://www.adinanecula.ro/contact. Thank you. |
function rgb = brain
% returns a predefined color as [red green blue] values
%
% skin_surface = [255 213 119]/255;
% outer_skull_surface = [140 85 85]/255;
% inner_skull_surface = [202 100 100]/255;
% cortex = [255 213 119]/255;
% black = [0 0 0 ]/255;
% white = [255 255 255]/255;
% red = [255 0 0 ]/255;
% green = [0 192 0 ]/255;
% blue = [0 0 255]/255;
% yellow = [255 255 0 ]/255;
% cortex_light = [199 194 169]/255;
% cortex_dark = [100 97 85]/255;
rgb = [202 100 100]/255;
|
[STATEMENT]
lemma class_cons: "\<lbrakk> C \<noteq> fst x \<rbrakk> \<Longrightarrow> class (x # P) C = class P C"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. C \<noteq> fst x \<Longrightarrow> class (x # P) C = class P C
[PROOF STEP]
by (simp add: class_def) |
function[h]=letterlabels(arg1,arg2,arg3)
%LETTERLABELS For automatically putting letter labels on subplots.
%
% LETTERLABELS puts letter labels '(a)','(b)','(c)' etc., on the
% subplots of the current figure, in the upper left-hand corner of
% each subplot window.
%
% LETTERLABELS(I) specifies the labelling position, clockwise from
% top left:
%
% I=1 Top left
% I=2 Top right
% I=3 Bottom right
% I=4 Bottom left
%
% LETTERLABELS(H,I), where H is a vector of handles to subplots,
% puts labels on the subplots indicated by H.
%
% LETTERLABELS(H,I,'d'), begins the labelling with letter 'd'.
%
% LETTERLABELS will not put the letters on the right subplots if you
% click on them before hand (because this changes the order of the
% subplot handles). Also if you want to label the subplots in a
% nonstandard order, do this by reordering H.
% _________________________________________________________________
% This is part of JLAB --- type 'help jlab' for more information
% (C) 2000--2021 J.M. Lilly --- type 'help jlab_license' for details
if strcmpi(arg1,'--t')
return
end
py=0.08;
ar=get(gca,'plotboxaspectratio');
xstretch=ar(1)./ar(2);
px=py/xstretch*1.8;
firstlet=real('a');
i=1;
axhand=flipud(get(gcf,'children'));
bool=false(size(axhand));
for i=1:length(bool)
bool(i)=strcmp(axhand(i).Type,'axes');
end
axhand=axhand(bool);
for j=1:nargin
xx=eval(['arg' int2str(j)]);
isaxhandle=false;
if ishandle(xx)
if strcmp(get(xx,'Type'),'axes')
isaxhandle=true;
end
end
if ischar(xx)&~isaxhandle
firstlet=real(xx);
elseif (length(xx)==1)&&~isaxhandle
i=xx;
else
axhand=xx;
end
end
nax=length(axhand);
fact=1/100;
for j=1:length(axhand)
axes(axhand(j))
ax=axis;
isrevx=strcmpi(get(gca,'xdir'),'reverse');
isrevy=strcmpi(get(gca,'ydir'),'reverse');
if ~isrevx
x1=ax(1);
x2=ax(2);
else
x1=ax(2);
x2=ax(1);
end
if ~isrevy
y1=ax(3);
y2=ax(4);
else
y1=ax(4);
y2=ax(3);
end
if i==1
t=y2-(y2-y1)*fact;
b=y2-(y2-y1)*py;
l=x1+(x2-x1)*fact;
r=x1+(x2-x1)*px;
elseif i==2
t=y2-(y2-y1)*fact;
b=y2-(y2-y1)*py;
l=x2-(x2-x1)*px;
r=x2-(x2-x1)*fact;
elseif i==3
t=y1+(y2-y1)*py;
b=y1+(y2-y1)*fact;
l=x2-(x2-x1)*px;
r=x2-(x2-x1)*fact;
elseif i==4
t=y1+(y2-y1)*py;
b=y1+(y2-y1)*fact;
l=x1+(x2-x1)*fact;
r=x1+(x2-x1)*px;
end
%Tried setting log to lin before adding label but didn't help
%if ~strcmpi(get(gca,'yscale'),'log')
% h(j)=patch([l l r r],[b t t b],'w');
% set(h(j),'edgecolor',[1 1 1])
%end
cx=l/2+r/2;
cy=t/2+b/2;
text(l+(r-l)*0.2,cy,['(' char(firstlet-1+j),')']);
axis(ax)
end
if nargout==0
clear h
end
|
This year, the 65th anniversary of the Cannes Film Festival is being celebrated with a mixture of well-known international auteurs and rising star Hollywood talents. This balance between auteur films and glamour, which has become a traditional formula for the competition line-up, has been couched as "a voyage through cinematographies around the world", as festival general delegate Thierry Fremaux pointed out in his introduction to the program.
To mark this special milestone in its glorious history, the "Festival de Cannes" has devoted nearly three-quarters of its main competition program to festival loyalists: Alain Resnais, Jacques Audiard, Michael Haneke, Abbas Kiarostami, Ken Loach, Leos Carax, David Cronenberg, Matteo Garrone, Walter Salles, Thomas Vinterberg, Hong Sang-soo, Im Sang-soo, Carlos Reygadas, Ulrich Seidl, Cristian Mungiu and Sergei Loznitza, all of whom have been in competition in previous years. Among these respectable auteurs Loach, Kiarostami, Mungiu and Haneke have previously won the Golden Palm. A director not in this elite Cannes club that will have a place in the competition program for the first time is Egypt's Yousry Nasrallah with his post-Arab Spring film.
This year, a very important competition program ingredient is a strong selection of no less than five US films. Last time this happened was in 2007, with the Coen Brothers, Quentin Tarantino, Gus Van Sant, James Gray and David Fincher in the line-up. This time, these films have been made by newcomers to the competition: Wes Anderson, Andrew Dominik, John Hillcoat, Jeff Nichols and Lee Daniels. Their participation will, as expected, bring not only star wattage of Hollywood glamour to the "Palais des Festival" red carpet on the Croisette, but should also represent the new young voices of American cinema today.
Offering a certain corrective selection to the competition, the Directors' Fortnight and Critics' Week sections offer work from a very fine selection of auteurs from all sides of the world. Among them are some well-knowns such as Pablo Trapero, William Vega, Nelson Pereira Dos Santos, Pablo Larrain, Lou Ye, Koji Wakamatsu, Jaochim Lafosse and Xavier Dolan, and some directors with first features such as Brandon Cronenberg, Benh Zeitlin, Ashim Ahluwalia and Adam Leon.
Michael Haneke and Alain Resnais: Ballad With Love and Death. In the competition of the 65th Cannes Film Festival, two directors look at death, in their own different ways: Michael Haneke with Love (Amour) and Alain Resnais with You Ain't Seen Nothin' Yet (Vous n'avez encore rien vu).
A Sense of Wonder. Benh Zeitlin's debut has created a dazzling imaginary universe with a scope that feels real and cuts to the heart according to Ronald Rovers.
Father figures as recurring characters in Cannes' lineup. "Fatherhood, as both a challenge and a chance to become a better man, has been one of the recurring themes of the 65th edition of the Cannes Film Festival" — By Paola Casella.
Un Certain Regard: Diversity But Not Always Quality. "Going to see films in Un Certain Regard always causes hope and enthusiasm but we have to admit that this year, many hopes were dashed" — says Laura Laufer.
The Tragic Nun's Connection. "Beyond The Hills by Christian Mungiu is a deeply emotional and heart breaking film. Like its two main female protagonists, it is bipolar, both depressive and ecstatic." Borislav Andjelic's review.
A Song of Joy and Despair. Four years after Tony Manero impressed Cannes, and one and a half years after the Venice première of Post Mortem, No closes a trilogy that portrays Chile in the times of the coup and under the dictatorship. Pamela Biénzobas reviews.
The Return of American Independent Cinema? Is this really American Independent Cinema? This initial thought grew into a nagging doubt which frequently reared its head over the course of the festival. Review by Beatrice Behn.
In the Fog: A Film of Lasting Power and Relevance. Rita Di Santo reviews FIPRESCI winner Sergei Loznitsa's In the Fog.
Quinzaine 2012: An Overview of the New Authors. For Rui Tandinha this year's Quinzaine offered some good cinematic hopes, starting with Rengaine, by Rachid Djaidani. |
data TakeN: List a -> Type where
Fewer: TakeN xs
Exact: (nXs: List a) -> TakeN (nXs ++ rest)
total takeN: (n: Nat) -> (xs: List a) -> TakeN xs
takeN Z xs = Exact []
takeN (S k) [] = Fewer
takeN (S k) (x :: xs) = case takeN k xs of
Fewer => Fewer
Exact nXs => Exact (x :: nXs)
groupByN: (n: Nat) -> (xs: List a) -> List (List a)
groupByN n xs with (takeN n xs)
groupByN n xs | Fewer = [xs]
groupByN n (nXs ++ rest) | Exact nXs = nXs :: groupByN n rest
halves: List a -> (List a, List a)
halves xs =
let
halfLen = div (length xs) 2
in
splitInHalf halfLen xs
where
splitInHalf: Nat -> List a -> (List a, List a)
splitInHalf n xs with (takeN n xs)
splitInHalf n xs | Fewer = ([], xs)
splitInHalf n (nXs ++ rest) | Exact nXs = (nXs, rest)
example: List Int
example = [1,2,3,4,5,6,7,8,9]
emptyList: List Int
emptyList = []
|
using Test
using Liblet
# Constructors
test_production_wrong_lhs() = Production(1, ["a"])
test_production_nonempty_lhs() = Production("",[""])
test_production_wrong_rhs() = Production("a",[1])
test_production_nonemptystr_rhs() = Production("a",["a","","c"])
test_production_nonempty_rhs() = Production("a",[])
test_production_inset() = begin
P = Production("a", ["b", "c"])
Q = Production("a", ("b", "c"))
P == Q
end
test_production_aε() = Production("A", ["a", "ε"])
# Operators
test_production_unpack() = begin
lhs, rhs = Production("a", ["b", "c"])
("a", ["b", "c"]) == (lhs, rhs)
end
test_production_totalorder() = Production("a", ["b"]) > Production("a", ["a"])
test_production_from_string_cf() = parseproduction("A B -> c", true)
# astype0
test_production_astype0() = Production(["a"], ["b"]) == astype0(Production("a", ["b"]))
# suchthat
test_production_such_that_lhs() = suchthat(left = "X")(Production("X", ["x"]))
test_production_such_that_rhs() = suchthat(right = "x")(Production("X", ["x"]))
test_production_such_that_rhs_len() = suchthat(rightlen = 2)(Production("X", ["x", "y"]))
test_production_such_that_rhs_is_suffix_of() = suchthat(right_is_suffix_of = ["a", "x"])(Production("X", ["x"]))
# Tests
function runproductiontests()
@testset "production_tests" begin
@test_throws ArgumentError test_production_wrong_lhs()
@test_throws ArgumentError test_production_nonempty_lhs()
@test_throws ArgumentError test_production_wrong_rhs()
@test_throws ArgumentError test_production_nonemptystr_rhs()
@test_throws ArgumentError test_production_nonempty_rhs()
@test test_production_inset()
@test_throws ArgumentError test_production_aε()
@test test_production_unpack()
@test test_production_totalorder()
@test test_production_astype0()
@test test_production_such_that_lhs()
@test test_production_such_that_rhs()
@test test_production_such_that_rhs_len()
@test test_production_such_that_rhs_is_suffix_of()
end
end |
import pickle
import unittest
import numpy as np
from metaworlds.envs import PointEnv
from metaworlds.envs.sliding_mem_env import SlidingMemEnv
from tests.helpers import step_env
class TestSlidingMemEnv(unittest.TestCase):
def test_pickleable(self):
inner_env = PointEnv(goal=(1., 2.))
env = SlidingMemEnv(inner_env, n_steps=10)
round_trip = pickle.loads(pickle.dumps(env))
assert round_trip
assert round_trip.n_steps == env.n_steps
assert np.array_equal(round_trip.env._goal, env.env._goal)
step_env(round_trip)
round_trip.close()
env.close()
def test_does_not_modify_action(self):
inner_env = PointEnv(goal=(1., 2.))
env = SlidingMemEnv(inner_env, n_steps=10)
a = env.action_space.high + 1.
a_copy = a.copy()
env.reset()
env.step(a)
assert np.array_equal(a, a_copy)
env.close()
|
context("dimnames")
dn = list(c('a','b'),c('x','y'))
a = setNames(1:2, dn[[1]])
A = matrix(1:4, nrow=2, ncol=2, dimnames=dn)
DF = structure(list(y=3:4, z=c(6,5), x=1:2, A=c("b", "a")),
.Names=c("y","z","x","A"), row.names=1:2, class="data.frame")
ll = list(a=a, A=A, DF=DF)
test_that("vector and dropping rules", {
expect_equal(dimnames(a), dimnames(a, drop=FALSE), dn[1])
expect_equal(dimnames(a, drop=TRUE), dn[[1]])
expect_equal(dimnames(a, along=1), dn[[1]])
expect_equal(dimnames(a, along=1, drop=FALSE), dn[1])
})
test_that("ignore along for vector, not array", {
expect_equal(dimnames(a, along=2), dn[[1]])
expect_error(dimnmaes(as.array(a), along=2))
})
test_that("matrix", {
expect_equal(dimnames(A), dn)
expect_equal(dimnames(A, along=2), dn[[2]])
expect_error(dimnames(A, along=3))
})
test_that("data.frame", {
expect_equal(dimnames(DF, along=1), as.character(1:2))
})
test_that("list", {
dnl = dimnames(ll)
expect_equal(dnl$a, dn[1])
expect_equal(dnl$A, dn)
dnl1 = dimnames(ll, along=1)
expect_equal(dnl1$a, dnl1$A, dn[[1]])
})
test_that("zero-length", {
expect_equal(dimnames(c()), list(NULL)) #TODO: this should give integer()
expect_equal(dimnames(c(), null_as_integer=TRUE), list(integer()))
expect_null(dimnames(c(), drop=TRUE))
expect_equal(dimnames(c(), null_as_integer=TRUE, drop=TRUE), integer())
})
|
For people who are not eligible for a stem cell transplant , immunotherapy with a combination of histamine <unk> ( <unk> ) and interleukin 2 ( <unk> ) after the completion of consolidation has been shown to reduce the absolute relapse risk by 14 % , translating to a 50 % increase in the likelihood of maintained remission .
|
[STATEMENT]
lemma instrs_append_Write\<^sub>s\<^sub>b:
"instrs (sb@[Write\<^sub>s\<^sub>b volatile a sop v A L R W]) = instrs sb @ [Write volatile a sop A L R W]"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. instrs (sb @ [Write\<^sub>s\<^sub>b volatile a sop v A L R W]) = instrs sb @ [Write volatile a sop A L R W]
[PROOF STEP]
by (induct sb) (auto split: memref.splits) |
subroutine loadcn(id,u,r,npc,p,cr,nod,idirn,ncur,fac,tt)
c implicit double precision (a-h,o-z) dp
c
c apply concentrated loads and follower forces
c
common/bk17/dn1,dn2,nwebuf,ntime,numnp,neq,ibar,mthsol
common/bk27/nlcur,nptst,nthpy,nthpz,nthps,xmy,xmz,xms,nload,nptm
common/bks17/fmult(5)
dimension id(2,*),u(*),r(*),cr(4,*),nod(*),idirn(*),ncur(*),
1 fac(*),npc(*),p(*)
c
do 10 n=1,neq
10 r(n)=0.0
c
if (nload.eq.0) return
c
do 40 n=1,nload
ierr=0
xmag=fac(n)
iflwer=0
lcc=ncur(n)
if (lcc.gt.0) go to 20
iflwer=1
lcc=-lcc
20 loc=npc(lcc)
npoint=(npc(lcc+1)-loc)/2
f=0.0
c.... island (loadset command)
if(lcc.le.5)xmag=xmag*fmult(lcc)
call interp (p(loc),tt,npoint,f,xmag,ierr)
nod1=nod(n)
nod2=idirn(n)
if (iflwer.eq.1) go to 30
idof=id(nod2,nod1)
if (idof.eq.0) go to 40
r(idof)=r(idof)+f
go to 40
c
30 id1y=id(1,nod1)
id1z=id(2,nod1)
id2y=id(1,nod2)
id2z=id(2,nod2)
y1=cr(1,n)
z1=cr(2,n)
y2=cr(3,n)
z2=cr(4,n)
if (id1y.ne.0) y1=y1+u(id1y)
if (id1z.ne.0) z1=z1+u(id1z)
if (id2y.ne.0) y2=y2+u(id2y)
if (id2z.ne.0) z2=z2+u(id2z)
y12=y1-y2
z12=z1-z2
xl=sqrt(y12**2+z12**2)
if (id1y.ne.0) r(id1y)=r(id1y)+f*z12/xl
if (id1z.ne.0) r(id1z)=r(id1z)-f*y12/xl
40 continue
c
return
end
|
flat_torus_complex := proc(n::posint,m::posint)
local f,g,T,V,E,F,R,r;
f := (i) -> modp(i+1,n);
g := (j) -> modp(j+1,m);
V := [seq(seq([i,j],j=0..m-1),i=0..n-1)];
E := [seq(seq([[i,j],[f(i),j]],j=0..m-1),i=0..n-1),
seq(seq([[i,j],[i,g(j)]],j=0..m-1),i=0..n-1),
seq(seq([[i,j],[f(i),g(j)]],j=0..m-1),i=0..n-1)];
F := [seq(seq([[i,j],[f(i),j],[f(i),g(j)]],j=0..m-1),i=0..n-1),
seq(seq([[i,j],[i,g(j)],[f(i),g(j)]],j=0..m-1),i=0..n-1)];
T := table([]);
T["vertices"] := V;
T["edges"] := E;
T["faces"] := F;
T["max_simplices"] := F;
T["simplices"] := [map(v -> [v],V),op(E),op(F)];
R := 3;
r := 1;
T["embedding_dim"] := 3;
T["embedding"] :=
table([seq(seq([i,j] = evalf(torus_embedding(R,r)(i/n,j/m)),j=0..m-1),i=0..n-1)]);
T["plot"] := `plot/simplicial_complex`(T["vertices"])(T["faces"],3,T["embedding"]);
return eval(T);
end: |
[STATEMENT]
lemma foundation25': "\<tau> \<Turnstile> Q \<Longrightarrow> \<tau> \<Turnstile> (P or Q)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<tau> \<Turnstile> Q \<Longrightarrow> \<tau> \<Turnstile> P or Q
[PROOF STEP]
by(subst OclOr_commute, simp add: foundation25) |
theory ex_subminimal_logics
imports topo_negation_conditions topo_strict_implication
begin
nitpick_params[assms=true, user_axioms=true, show_all, expect=genuine, format=3] (*default Nitpick settings*)
section \<open>Example application: Subintuitionistic and subminimal logics\<close>
text\<open>\noindent{In this section we examine some special paracomplete logics. The idea is to illustrate an approach by
means of which we can obtain logics which are boldly paracomplete and (non-boldly) paraconsistent at the
same time, Johansson's 'minimal logic' @{cite JML} being the paradigmatic case we aim at modelling.}\<close>
text\<open>\noindent{Drawing upon the literature on Johanson's minimal logic, we introduce an unconstrained propositional
constant Q, which we then employ to define a 'rigid' frontier operation @{text "\<F>'"}.}\<close>
consts Q::"\<sigma>"
abbreviation "\<F>' \<equiv> \<lambda>X. Q"
abbreviation "\<I>' \<equiv> \<I>\<^sub>F \<F>'"
abbreviation "\<C>' \<equiv> \<C>\<^sub>F \<F>'"
abbreviation "\<B>' \<equiv> \<B>\<^sub>F \<F>'"
text\<open>\noindent{As defined, @{text "\<F>'"} (and its corresponding closure operation) satisfies several semantic conditions.}\<close>
lemma "Fr_1 \<F>' \<and> Fr_2 \<F>' \<and> Fr_4 \<F>'" by (simp add: Fr_1_def Fr_2_def Fr_4_def conn)
lemma "Cl_1 \<C>' \<and> Cl_2 \<C>' \<and> Cl_4 \<C>'" using ADDI_def CF2 IDEMb_def Cl_fr_def PC4 unfolding conn by auto
text\<open>\noindent{However Fr-3 is not valid. In fact, adding it by hand would collapse into classical logic (making all sets clopen).}\<close>
lemma "Fr_3 \<F>'" nitpick oops (*counterexample found*)
lemma "Cl_3 \<C>'" nitpick oops (*counterexample found*)
lemma "Fr_3 \<F>' \<Longrightarrow> \<forall>A. \<F>'(A) \<^bold>\<approx> \<^bold>\<bottom>" by (simp add: NOR_def)
text\<open>\noindent{In order to obtain a paracomplete logic not validating ECQ, we define negation as follows,}\<close>
abbreviation neg_IC::"\<sigma>\<Rightarrow>\<sigma>" ("\<^bold>\<not>") where "\<^bold>\<not>A \<equiv> \<C>'(\<I>(\<^bold>\<midarrow>A))"
text\<open>\noindent{and observe that some plausible semantic properties obtain:}\<close>
lemma Q_def1: "\<forall>A. Q \<^bold>\<approx> \<^bold>\<not>A \<^bold>\<and> \<^bold>\<not>(\<^bold>\<not>A)" using Cl_fr_def IF2 dEXP_def conn by auto
lemma Q_def2: "Fr_1b \<F> \<Longrightarrow> \<forall>A. Q \<^bold>\<approx> \<^bold>\<not>(A \<^bold>\<or> \<^bold>\<not>A)" by (smt Cl_fr_def IF2 dEXP_def MONO_def monI conn)
lemma neg_Idef: "\<forall>A. \<^bold>\<not>A \<^bold>\<approx> \<I>(\<^bold>\<midarrow>A) \<^bold>\<or> Q" by (simp add: Cl_fr_def)
lemma neg_Cdef: "Fr_2 \<F> \<Longrightarrow> \<forall>A. \<^bold>\<not>A \<^bold>\<approx> \<C>(A) \<^bold>\<rightarrow> Q" using Cl_fr_def Fr_2_def Int_fr_def conn by auto
text\<open>\noindent{The negation so defined validates some properties corresponding to a (rather weak) paracomplete logic:}\<close>
lemma "\<FF> \<F> \<Longrightarrow> TND \<^bold>\<not>" nitpick oops (*counterexample found: negation is paracomplete*)
lemma "\<FF> \<F> \<Longrightarrow> TNDw \<^bold>\<not>" nitpick oops
lemma "\<FF> \<F> \<Longrightarrow> TNDm \<^bold>\<not>" nitpick oops
lemma "\<FF> \<F> \<Longrightarrow> ECQ \<^bold>\<not>" nitpick oops (*counterexample found: negation is paraconsistent...*)
lemma ECQw: "ECQw \<^bold>\<not>" using Cl_fr_def Disj_I ECQw_def unfolding conn by auto (*...but not 'boldly' paraconsistent*)
lemma ECQm: "ECQm \<^bold>\<not>" using Cl_fr_def Disj_I ECQm_def unfolding conn by auto
lemma "\<FF> \<F> \<Longrightarrow> LNC \<^bold>\<not>" nitpick oops
lemma "\<FF> \<F> \<Longrightarrow> DNI \<^bold>\<not>" nitpick oops
lemma "\<FF> \<F> \<Longrightarrow> DNE \<^bold>\<not>" nitpick oops
lemma CoPw: "Fr_1b \<F> \<Longrightarrow> CoPw \<^bold>\<not>" using Cl_fr_def MONO_def monI unfolding Defs conn by smt
lemma "\<FF> \<F> \<Longrightarrow> CoP1 \<^bold>\<not>" nitpick oops
lemma "\<FF> \<F> \<Longrightarrow> CoP2 \<^bold>\<not>" nitpick oops
lemma "\<FF> \<F> \<Longrightarrow> CoP3 \<^bold>\<not>" nitpick oops
lemma "\<FF> \<F> \<Longrightarrow> XCoP \<^bold>\<not>" nitpick oops
lemma "\<FF> \<F> \<Longrightarrow> DM3 \<^bold>\<not>" nitpick oops
text\<open>\noindent{Moreover, we cannot have both DNI and DNE without validating ECQ (thus losing paraconsistency).}\<close>
lemma "DNI \<^bold>\<not> \<and> DNE \<^bold>\<not> \<longrightarrow> ECQ \<^bold>\<not>" using DNE_def ECQ_def Int_fr_def neg_Idef unfolding conn by (metis (no_types, lifting))
text\<open>\noindent{However, we can have all of De Morgan laws while keeping (non-bold) paraconsistency.}\<close>
lemma "\<sim>ECQ \<^bold>\<not> \<and> DM1 \<^bold>\<not> \<and> DM2 \<^bold>\<not> \<and> DM3 \<^bold>\<not> \<and> DM4 \<^bold>\<not> \<and> \<FF> \<F>" nitpick[satisfy,card w=3] oops (*(weakly paraconsistent) model found*)
text\<open>\noindent{Below we combine negation with strict implication and verify some interesting properties.
For instance, the following are not valid (and cannot become valid by adding semantic restrictions). }\<close>
lemma "\<FF> \<F> \<Longrightarrow> \<forall>a b. (\<^bold>\<not>a \<^bold>\<Rightarrow> (a \<^bold>\<Rightarrow> b)) \<^bold>\<approx> \<^bold>\<top>" nitpick oops (*counterexample found*)
lemma "\<FF> \<F> \<Longrightarrow> \<forall>a b. (\<^bold>\<not>a \<^bold>\<rightarrow> (a \<^bold>\<rightarrow> b)) \<^bold>\<approx> \<^bold>\<top>" nitpick oops
lemma "\<FF> \<F> \<Longrightarrow> \<forall>a b. (a \<^bold>\<and> \<^bold>\<not>a \<^bold>\<Rightarrow> b) \<^bold>\<approx> \<^bold>\<top>" nitpick oops
lemma "\<FF> \<F> \<Longrightarrow> \<forall>a b. (a \<^bold>\<and> \<^bold>\<not>a \<^bold>\<rightarrow> b) \<^bold>\<approx> \<^bold>\<top>" nitpick oops
lemma "\<FF> \<F> \<Longrightarrow> \<forall>a b. (a \<^bold>\<Rightarrow> (b \<^bold>\<or> \<^bold>\<not>b)) \<^bold>\<approx> \<^bold>\<top>" nitpick oops
lemma "\<FF> \<F> \<Longrightarrow> \<forall>a b. (a \<^bold>\<rightarrow> (b \<^bold>\<or> \<^bold>\<not>b)) \<^bold>\<approx> \<^bold>\<top>" nitpick oops
lemma "\<FF> \<F> \<Longrightarrow> \<forall>a. (a \<^bold>\<Rightarrow> \<^bold>\<not>a) \<^bold>\<Rightarrow> \<^bold>\<not>a \<^bold>\<approx> \<^bold>\<top>" nitpick oops
lemma "\<FF> \<F> \<Longrightarrow> \<forall>a. (a \<^bold>\<rightarrow> \<^bold>\<not>a) \<^bold>\<rightarrow> \<^bold>\<not>a \<^bold>\<approx> \<^bold>\<top>" nitpick oops
lemma "\<FF> \<F> \<Longrightarrow> \<forall>a b. (a \<^bold>\<and> \<^bold>\<not>a) \<^bold>\<Rightarrow> b \<^bold>\<approx> \<^bold>\<top>" nitpick oops
lemma "\<FF> \<F> \<Longrightarrow> \<forall>a b. (a \<^bold>\<and> \<^bold>\<not>a) \<^bold>\<rightarrow> b \<^bold>\<approx> \<^bold>\<top>" nitpick oops
lemma "\<FF> \<F> \<Longrightarrow> \<forall>a b. a \<^bold>\<Rightarrow> (b \<^bold>\<or> \<^bold>\<not>b) \<^bold>\<approx> \<^bold>\<top>" nitpick oops
lemma "\<FF> \<F> \<Longrightarrow> \<forall>a b. a \<^bold>\<rightarrow> (b \<^bold>\<or> \<^bold>\<not>b) \<^bold>\<approx> \<^bold>\<top>" nitpick oops
lemma "\<FF> \<F> \<Longrightarrow> \<forall>a b. (a \<^bold>\<leftrightarrow> b) \<^bold>\<Rightarrow> (\<^bold>\<not>a \<^bold>\<leftrightarrow> \<^bold>\<not>b) \<^bold>\<approx> \<^bold>\<top>" nitpick oops
lemma "\<FF> \<F> \<Longrightarrow> \<forall>a b. (a \<^bold>\<leftrightarrow> b) \<^bold>\<rightarrow> (\<^bold>\<not>a \<^bold>\<leftrightarrow> \<^bold>\<not>b) \<^bold>\<approx> \<^bold>\<top>" nitpick oops
lemma "\<FF> \<F> \<Longrightarrow> \<forall>a b. (a \<^bold>\<Rightarrow> b) \<^bold>\<and> (a \<^bold>\<Rightarrow> \<^bold>\<not>b) \<^bold>\<Rightarrow> \<^bold>\<not>a \<^bold>\<approx> \<^bold>\<top>" nitpick oops
lemma "\<FF> \<F> \<Longrightarrow> \<forall>a b. (a \<^bold>\<rightarrow> b) \<^bold>\<and> (a \<^bold>\<rightarrow> \<^bold>\<not>b) \<^bold>\<Rightarrow> \<^bold>\<not>a \<^bold>\<approx> \<^bold>\<top>" nitpick oops
lemma "\<FF> \<F> \<Longrightarrow> \<forall>a. (\<^bold>\<not>a \<^bold>\<Rightarrow> \<^bold>\<bottom>) \<^bold>\<approx> \<^bold>\<top>" nitpick oops
lemma "\<FF> \<F> \<Longrightarrow> \<forall>a. (\<^bold>\<not>a \<^bold>\<rightarrow> \<^bold>\<bottom>) \<^bold>\<approx> \<^bold>\<top>" nitpick oops
lemma "\<FF> \<F> \<Longrightarrow> \<forall>a. (\<^bold>\<not>a \<^bold>\<Rightarrow> \<^bold>\<not>(\<^bold>\<not>\<^bold>\<top>)) \<^bold>\<approx> \<^bold>\<top>" nitpick oops
lemma "\<FF> \<F> \<Longrightarrow> \<forall>a. (\<^bold>\<not>a \<^bold>\<rightarrow> \<^bold>\<not>(\<^bold>\<not>\<^bold>\<top>)) \<^bold>\<approx> \<^bold>\<top>" nitpick oops
lemma "\<FF> \<F> \<Longrightarrow> \<forall>a. \<^bold>\<not>(\<^bold>\<not>(\<^bold>\<not>a)) \<^bold>\<Rightarrow> \<^bold>\<not>a \<^bold>\<approx> \<^bold>\<top>" nitpick oops
lemma "\<FF> \<F> \<Longrightarrow> \<forall>a. \<^bold>\<not>(\<^bold>\<not>(\<^bold>\<not>a)) \<^bold>\<rightarrow> \<^bold>\<not>a \<^bold>\<approx> \<^bold>\<top>" nitpick oops
text\<open>\noindent{The (weak) local contraposition axiom is indeed valid under appropriate conditions.}\<close>
lemma lCoPw: "Fr_1 \<F> \<Longrightarrow> Fr_2 \<F> \<Longrightarrow> Fr_3 \<F> \<Longrightarrow> Fr_4 \<F> \<Longrightarrow> lCoPw(\<^bold>\<Rightarrow>) \<^bold>\<not>" proof -
assume fr1: "Fr_1 \<F>" and fr2: "Fr_2 \<F>" and fr3: "Fr_3 \<F>" and fr4: "Fr_4 \<F>"
{ fix a b
from fr2 have "\<^bold>\<not>b \<^bold>\<rightarrow> \<^bold>\<not>a \<^bold>\<approx> (\<C> a \<^bold>\<rightarrow> \<C> b) \<^bold>\<or> Q" using Cl_fr_def Fr_2_def Int_fr_def conn by auto
moreover from fr1 fr2 fr3 have "\<I>(a \<^bold>\<rightarrow> b) \<^bold>\<preceq> \<C> a \<^bold>\<rightarrow> \<C> b" using IC_imp by simp
ultimately have "\<I>(a \<^bold>\<rightarrow> b) \<^bold>\<preceq> \<^bold>\<not>b \<^bold>\<rightarrow> \<^bold>\<not>a" unfolding conn by simp
moreover from fr1 fr2 fr4 have "let A=(a \<^bold>\<rightarrow> b); B=(\<^bold>\<not>b \<^bold>\<rightarrow> \<^bold>\<not>a) in \<I> A \<^bold>\<preceq> B \<longrightarrow> \<I> A \<^bold>\<preceq> \<I> B"
using PF1 MONO_MULTa IF1a IF4 PI9 Int_9_def by smt
ultimately have "\<I>(a \<^bold>\<rightarrow> b) \<^bold>\<preceq> \<I>(\<^bold>\<not>b \<^bold>\<rightarrow> \<^bold>\<not>a)" by simp
} hence "lCoPw(\<^bold>\<Rightarrow>) \<^bold>\<not>" unfolding Defs conn by blast
thus ?thesis by simp
qed
lemma lCoPw_strict: "\<FF> \<F> \<Longrightarrow> \<forall>a b. (a \<^bold>\<Rightarrow> b) \<^bold>\<Rightarrow> (\<^bold>\<not>b \<^bold>\<Rightarrow> \<^bold>\<not>a) \<^bold>\<approx> \<^bold>\<top>" by (metis (no_types, lifting) DTw2 lCoPw lCoPw_def)
text\<open>\noindent{However, other (local) contraposition axioms are not valid.}\<close>
lemma "\<FF> \<F> \<Longrightarrow> lCoP1(\<^bold>\<Rightarrow>) \<^bold>\<not>" nitpick oops (*counterexample found*)
lemma "\<FF> \<F> \<Longrightarrow> lCoP2(\<^bold>\<Rightarrow>) \<^bold>\<not>" nitpick oops
lemma "\<FF> \<F> \<Longrightarrow> lCoP3(\<^bold>\<Rightarrow>) \<^bold>\<not>" nitpick oops
text\<open>\noindent{And this time no variant of disjunctive syllogism is valid.}\<close>
lemma "\<FF> \<F> \<Longrightarrow> DS1(\<^bold>\<Rightarrow>) \<^bold>\<not>" nitpick oops
lemma "\<FF> \<F> \<Longrightarrow> DS2(\<^bold>\<Rightarrow>) \<^bold>\<not>" nitpick oops
lemma "\<FF> \<F> \<Longrightarrow> DS2(\<^bold>\<Rightarrow>) \<^bold>\<not>" nitpick oops
lemma "\<FF> \<F> \<Longrightarrow> DS4(\<^bold>\<Rightarrow>) \<^bold>\<not>" nitpick oops
text\<open>\noindent{Interestingly, one of the local contraposition axioms (lCoP1) follows from DNI.}\<close>
lemma DNI_lCoP1: "Fr_1 \<F> \<Longrightarrow> Fr_2 \<F> \<Longrightarrow> Fr_3 \<F> \<Longrightarrow> Fr_4 \<F> \<Longrightarrow> DNI \<^bold>\<not> \<longrightarrow> lCoP1(\<^bold>\<Rightarrow>) \<^bold>\<not>" proof -
assume fr1: "Fr_1 \<F>" and fr2: "Fr_2 \<F>" and fr3: "Fr_3 \<F>" and fr4: "Fr_4 \<F>"
{ assume dni: "DNI \<^bold>\<not>"
{ fix a b
from fr1 fr2 fr3 fr4 have "lCoPw(\<^bold>\<Rightarrow>) \<^bold>\<not>" using lCoPw by simp
hence 1: "a \<^bold>\<Rightarrow> \<^bold>\<not>b \<^bold>\<preceq> \<^bold>\<not>(\<^bold>\<not>b) \<^bold>\<Rightarrow> \<^bold>\<not>a" unfolding lCoPw_def by simp
from fr1 have 2: "let A=b; B=\<^bold>\<not>(\<^bold>\<not>b); C=\<^bold>\<not>a in A \<^bold>\<preceq> B \<longrightarrow> \<I>(B \<^bold>\<rightarrow> C) \<^bold>\<preceq> \<I>(A \<^bold>\<rightarrow> C)" by (simp add: MONO_ant PF1 monI)
from dni have dnib: "b \<^bold>\<preceq> \<^bold>\<not>(\<^bold>\<not>b)" unfolding DNI_def by simp
from 1 2 dnib have "a \<^bold>\<Rightarrow> \<^bold>\<not>b \<^bold>\<preceq> b \<^bold>\<Rightarrow> \<^bold>\<not>a" unfolding conn by meson
} hence "lCoP1(\<^bold>\<Rightarrow>) \<^bold>\<not>" unfolding Defs by blast
} thus ?thesis by simp
qed
text\<open>\noindent{This entails some other interesting results.}\<close>
lemma DNI_CoP1: "Fr_1b \<F> \<Longrightarrow> DNI \<^bold>\<not> \<Longrightarrow> CoP1 \<^bold>\<not>" using CoP1_def2 CoPw by blast
lemma CoP1_LNC: "CoP1 \<^bold>\<not> \<Longrightarrow> LNC \<^bold>\<not>" using CoP1_def ECQm_def LNC_def Cl_fr_def Disj_I ECQm_def unfolding conn by smt
lemma DNI_LNC: "Fr_1b \<F> \<Longrightarrow> DNI \<^bold>\<not> \<Longrightarrow> LNC \<^bold>\<not>" by (simp add: CoP1_LNC DNI_CoP1)
text\<open>\noindent{The following variants of modus tollens also obtain.}\<close>
lemma MT: "Fr_1 \<F> \<Longrightarrow> Fr_2 \<F> \<Longrightarrow> Fr_3 \<F> \<Longrightarrow> \<forall>a b. (a \<^bold>\<Rightarrow> b) \<^bold>\<and> \<^bold>\<not>b \<^bold>\<preceq> \<^bold>\<not>a" using Cl_fr_def Fr_2_def IC_imp Int_fr_def unfolding conn by metis
lemma MT': "Fr_1 \<F> \<Longrightarrow> Fr_2 \<F> \<Longrightarrow> Fr_3 \<F> \<Longrightarrow> \<forall>a b. ((a \<^bold>\<Rightarrow> b) \<^bold>\<and> \<^bold>\<not>b) \<^bold>\<Rightarrow> \<^bold>\<not>a \<^bold>\<approx> \<^bold>\<top>" by (simp add: DTw2 MT)
text\<open>\noindent{We now semantically characterize (an approximation of) Johansson's Minimal Logic along with some
exemplary 'subminimal' logics (observing that many more are possible). We check some relevant properties.}\<close>
abbreviation "JML \<equiv> \<FF> \<F> \<and> DNI \<^bold>\<not>"
abbreviation "SML1 \<equiv> \<FF> \<F>" (*Fr_1 \<F> \<and> Fr_2 \<F> \<and> Fr_3 \<F> \<and> Fr_4 \<F>*)
abbreviation "SML2 \<equiv> Fr_1 \<F> \<and> Fr_2 \<F> \<and> Fr_3 \<F>"
abbreviation "SML3 \<equiv> Fr_1 \<F>"
abbreviation "SML4 \<equiv> Fr_1b \<F>"
text\<open>\noindent{TND:}\<close>
lemma "JML \<Longrightarrow> TND \<^bold>\<not>" nitpick oops (*counterexample found*)
lemma "JML \<Longrightarrow> TNDw \<^bold>\<not>" nitpick oops
lemma "JML \<Longrightarrow> TNDm \<^bold>\<not>" nitpick oops
text\<open>\noindent{ECQ:}\<close>
lemma "JML \<Longrightarrow> ECQ \<^bold>\<not>" nitpick oops
lemma "ECQw \<^bold>\<not>" using Cl_fr_def Disj_I ECQw_def unfolding conn by auto
lemma "ECQm \<^bold>\<not>" using Cl_fr_def Disj_I ECQm_def unfolding conn by auto
text\<open>\noindent{LNC:}\<close>
lemma "JML \<Longrightarrow> LNC \<^bold>\<not>" using DNI_LNC PF1 by blast
lemma "SML1 \<Longrightarrow> LNC \<^bold>\<not>" nitpick oops
text\<open>\noindent{(r)DNI/DNE:}\<close>
lemma "JML \<Longrightarrow> DNI \<^bold>\<not>" using CoP1_def2 by blast
lemma "SML1 \<Longrightarrow> rDNI \<^bold>\<not>" nitpick oops
lemma "JML \<Longrightarrow> rDNE \<^bold>\<not>" nitpick oops
text\<open>\noindent{CoP/MT:}\<close>
lemma "SML4 \<Longrightarrow> CoPw \<^bold>\<not>" unfolding Defs by (smt Cl_fr_def MONO_def monI conn)
lemma "JML \<Longrightarrow> CoP1 \<^bold>\<not>" using DNI_CoP1 PF1 by blast
lemma "SML1 \<Longrightarrow> MT1 \<^bold>\<not>" nitpick oops
lemma "JML \<Longrightarrow> MT2 \<^bold>\<not>" nitpick oops
lemma "JML \<Longrightarrow> MT3 \<^bold>\<not>" nitpick oops
text\<open>\noindent{XCoP:}\<close>
lemma "JML \<Longrightarrow> XCoP \<^bold>\<not>" nitpick oops
text\<open>\noindent{DM3/4:}\<close>
lemma "JML \<Longrightarrow> DM3 \<^bold>\<not>" nitpick oops
lemma "SML3 \<Longrightarrow> DM4 \<^bold>\<not>" by (simp add: DM4 PF1)
lemma "SML4 \<Longrightarrow> DM4 \<^bold>\<not>" nitpick oops
text\<open>\noindent{nNor/nDNor:}\<close>
lemma "SML2 \<Longrightarrow> nNor \<^bold>\<not>" using Cl_fr_def nNor_I nNor_def unfolding conn by auto
lemma "SML3 \<Longrightarrow> nNor \<^bold>\<not>" nitpick oops
lemma "JML \<Longrightarrow> nDNor \<^bold>\<not>" nitpick oops
text\<open>\noindent{lCoP classical:}\<close>
lemma "JML \<Longrightarrow> lCoPw(\<^bold>\<rightarrow>) \<^bold>\<not>" nitpick oops
lemma "JML \<Longrightarrow> lCoP1(\<^bold>\<rightarrow>) \<^bold>\<not>" nitpick oops
lemma "JML \<Longrightarrow> lCoP2(\<^bold>\<rightarrow>) \<^bold>\<not>" nitpick oops
lemma "JML \<Longrightarrow> lCoP3(\<^bold>\<rightarrow>) \<^bold>\<not>" nitpick oops
text\<open>\noindent{DS classical:}\<close>
lemma "JML \<Longrightarrow> DS1(\<^bold>\<rightarrow>) \<^bold>\<not>" nitpick oops
lemma "JML \<Longrightarrow> DS2(\<^bold>\<rightarrow>) \<^bold>\<not>" nitpick oops
text\<open>\noindent{lCoP strict:}\<close>
lemma "SML1 \<Longrightarrow> lCoPw(\<^bold>\<Rightarrow>) \<^bold>\<not>" using lCoPw by blast
lemma "SML2 \<Longrightarrow> lCoPw(\<^bold>\<Rightarrow>) \<^bold>\<not>" nitpick oops
lemma "JML \<Longrightarrow> lCoP1(\<^bold>\<Rightarrow>) \<^bold>\<not>" using CoP1_def2 DNI_lCoP1 by blast
lemma "SML1 \<Longrightarrow> lCoP1(\<^bold>\<Rightarrow>) \<^bold>\<not>" nitpick oops
lemma "JML \<Longrightarrow> lCoP2(\<^bold>\<Rightarrow>) \<^bold>\<not>" nitpick oops
lemma "JML \<Longrightarrow> lCoP3(\<^bold>\<Rightarrow>) \<^bold>\<not>" nitpick oops
text\<open>\noindent{lMT strict:}\<close>
lemma "SML2 \<Longrightarrow> lMT0(\<^bold>\<Rightarrow>) \<^bold>\<not>" unfolding Defs using MT by auto
lemma "SML3 \<Longrightarrow> lMT0(\<^bold>\<Rightarrow>) \<^bold>\<not>" (*nitpick*) oops (*no countermodel found*)
lemma "SML4 \<Longrightarrow> lMT0(\<^bold>\<Rightarrow>) \<^bold>\<not>" nitpick oops
lemma "JML \<Longrightarrow> lMT1(\<^bold>\<Rightarrow>) \<^bold>\<not>" by (smt DNI_lCoP1 DT1 lCoP1_def lMT1_def)
lemma "SML1 \<Longrightarrow> lMT1(\<^bold>\<Rightarrow>) \<^bold>\<not>" nitpick oops
lemma "JML \<Longrightarrow> lMT2(\<^bold>\<Rightarrow>) \<^bold>\<not>" nitpick oops
lemma "JML \<Longrightarrow> lMT3(\<^bold>\<Rightarrow>) \<^bold>\<not>" nitpick oops
text\<open>\noindent{DS strict:}\<close>
lemma "JML \<Longrightarrow> DS1(\<^bold>\<Rightarrow>) \<^bold>\<not>" nitpick oops
lemma "JML \<Longrightarrow> DS2(\<^bold>\<Rightarrow>) \<^bold>\<not>" nitpick oops
lemma "JML \<Longrightarrow> DS3(\<^bold>\<Rightarrow>) \<^bold>\<not>" nitpick oops
lemma "JML \<Longrightarrow> DS4(\<^bold>\<Rightarrow>) \<^bold>\<not>" nitpick oops
end
|
/-
Copyright (c) 2021 Yury Kudryashov. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yury Kudryashov
-/
import topology.algebra.monoid
import group_theory.group_action.prod
import group_theory.group_action.basic
import topology.homeomorph
import topology.algebra.mul_action2
/-!
# Continuous monoid action
In this file we define class `has_continuous_smul`. We say `has_continuous_smul M α` if `M` acts on
`α` and the map `(c, x) ↦ c • x` is continuous on `M × α`. We reuse this class for topological
(semi)modules, vector spaces and algebras.
## Main definitions
* `has_continuous_smul M α` : typeclass saying that the map `(c, x) ↦ c • x` is continuous
on `M × α`;
* `homeomorph.smul_of_ne_zero`: if a group with zero `G₀` (e.g., a field) acts on `α` and `c : G₀`
is a nonzero element of `G₀`, then scalar multiplication by `c` is a homeomorphism of `α`;
* `homeomorph.smul`: scalar multiplication by an element of a group `G` acting on `α`
is a homeomorphism of `α`.
* `units.has_continuous_smul`: scalar multiplication by `Mˣ` is continuous when scalar
multiplication by `M` is continuous. This allows `homeomorph.smul` to be used with on monoids
with `G = Mˣ`.
## Main results
Besides homeomorphisms mentioned above, in this file we provide lemmas like `continuous.smul`
or `filter.tendsto.smul` that provide dot-syntax access to `continuous_smul`.
-/
open_locale topological_space pointwise
open filter
/-- Class `has_continuous_smul M α` says that the scalar multiplication `(•) : M → α → α`
is continuous in both arguments. We use the same class for all kinds of multiplicative actions,
including (semi)modules and algebras. -/
class has_continuous_smul (M α : Type*) [has_scalar M α]
[topological_space M] [topological_space α] : Prop :=
(continuous_smul : continuous (λp : M × α, p.1 • p.2))
export has_continuous_smul (continuous_smul)
/-- Class `has_continuous_vadd M α` says that the additive action `(+ᵥ) : M → α → α`
is continuous in both arguments. We use the same class for all kinds of additive actions,
including (semi)modules and algebras. -/
class has_continuous_vadd (M α : Type*) [has_vadd M α]
[topological_space M] [topological_space α] : Prop :=
(continuous_vadd : continuous (λp : M × α, p.1 +ᵥ p.2))
export has_continuous_vadd (continuous_vadd)
attribute [to_additive] has_continuous_smul
variables {M α β : Type*} [topological_space M] [topological_space α]
section has_scalar
variables [has_scalar M α] [has_continuous_smul M α]
@[priority 100, to_additive] instance has_continuous_smul.has_continuous_smul₂ :
has_continuous_smul₂ M α :=
{ continuous_smul₂ := λ _, continuous_smul.comp (continuous_const.prod_mk continuous_id) }
@[to_additive]
lemma filter.tendsto.smul {f : β → M} {g : β → α} {l : filter β} {c : M} {a : α}
(hf : tendsto f l (𝓝 c)) (hg : tendsto g l (𝓝 a)) :
tendsto (λ x, f x • g x) l (𝓝 $ c • a) :=
(continuous_smul.tendsto _).comp (hf.prod_mk_nhds hg)
@[to_additive]
lemma filter.tendsto.const_smul {f : β → α} {l : filter β} {a : α} (hf : tendsto f l (𝓝 a))
(c : M) :
tendsto (λ x, c • f x) l (𝓝 (c • a)) :=
tendsto_const_nhds.smul hf
@[to_additive]
lemma filter.tendsto.smul_const {f : β → M} {l : filter β} {c : M}
(hf : tendsto f l (𝓝 c)) (a : α) :
tendsto (λ x, (f x) • a) l (𝓝 (c • a)) :=
hf.smul tendsto_const_nhds
variables [topological_space β] {f : β → M} {g : β → α} {b : β} {s : set β}
@[to_additive]
lemma continuous_within_at.smul (hf : continuous_within_at f s b)
(hg : continuous_within_at g s b) :
continuous_within_at (λ x, f x • g x) s b :=
hf.smul hg
@[to_additive]
lemma continuous_within_at.const_smul (hg : continuous_within_at g s b) (c : M) :
continuous_within_at (λ x, c • g x) s b :=
hg.const_smul c
@[to_additive]
lemma continuous_at.smul (hf : continuous_at f b) (hg : continuous_at g b) :
continuous_at (λ x, f x • g x) b :=
hf.smul hg
@[to_additive]
lemma continuous_at.const_smul (hg : continuous_at g b) (c : M) :
continuous_at (λ x, c • g x) b :=
hg.const_smul c
@[to_additive]
lemma continuous_on.smul (hf : continuous_on f s) (hg : continuous_on g s) :
continuous_on (λ x, f x • g x) s :=
λ x hx, (hf x hx).smul (hg x hx)
@[to_additive]
lemma continuous_on.const_smul (hg : continuous_on g s) (c : M) :
continuous_on (λ x, c • g x) s :=
λ x hx, (hg x hx).const_smul c
@[continuity, to_additive]
lemma continuous.smul (hf : continuous f) (hg : continuous g) :
continuous (λ x, f x • g x) :=
continuous_smul.comp (hf.prod_mk hg)
@[to_additive]
lemma continuous.const_smul (hg : continuous g) (c : M) :
continuous (λ x, c • g x) :=
continuous_smul.comp (continuous_const.prod_mk hg)
/-- If a scalar is central, then its right action is continuous when its left action is. -/
instance has_continuous_smul.op [has_scalar Mᵐᵒᵖ α] [is_central_scalar M α] :
has_continuous_smul Mᵐᵒᵖ α :=
⟨ suffices continuous (λ p : M × α, mul_opposite.op p.fst • p.snd),
from this.comp (continuous_unop.prod_map continuous_id),
by simpa only [op_smul_eq_smul] using (continuous_smul : continuous (λ p : M × α, _)) ⟩
end has_scalar
section monoid
variables [monoid M] [mul_action M α] [has_continuous_smul M α]
instance units.has_continuous_smul : has_continuous_smul Mˣ α :=
{ continuous_smul :=
show continuous ((λ p : M × α, p.fst • p.snd) ∘ (λ p : Mˣ × α, (p.1, p.2))),
from continuous_smul.comp ((units.continuous_coe.comp continuous_fst).prod_mk continuous_snd) }
@[to_additive]
lemma smul_closure_subset (c : M) (s : set α) : c • closure s ⊆ closure (c • s) :=
((set.maps_to_image _ _).closure $ continuous_id.const_smul c).image_subset
@[to_additive]
lemma smul_closure_orbit_subset (c : M) (x : α) :
c • closure (mul_action.orbit M x) ⊆ closure (mul_action.orbit M x) :=
(smul_closure_subset c _).trans $ closure_mono $ mul_action.smul_orbit_subset _ _
end monoid
section group
variables {G : Type*} [topological_space G] [group G] [mul_action G α]
[has_continuous_smul G α]
@[to_additive]
lemma tendsto_const_smul_iff {f : β → α} {l : filter β} {a : α} (c : G) :
tendsto (λ x, c • f x) l (𝓝 $ c • a) ↔ tendsto f l (𝓝 a) :=
⟨λ h, by simpa only [inv_smul_smul] using h.const_smul c⁻¹,
λ h, h.const_smul _⟩
variables [topological_space β] {f : β → α} {b : β} {s : set β}
@[to_additive]
lemma continuous_within_at_const_smul_iff (c : G) :
continuous_within_at (λ x, c • f x) s b ↔ continuous_within_at f s b :=
tendsto_const_smul_iff c
@[to_additive]
lemma continuous_on_const_smul_iff (c : G) : continuous_on (λ x, c • f x) s ↔ continuous_on f s :=
forall₂_congr $ λ b hb, continuous_within_at_const_smul_iff c
@[to_additive]
lemma continuous_at_const_smul_iff (c : G) :
continuous_at (λ x, c • f x) b ↔ continuous_at f b :=
tendsto_const_smul_iff c
@[to_additive]
lemma continuous_const_smul_iff (c : G) :
continuous (λ x, c • f x) ↔ continuous f :=
by simp only [continuous_iff_continuous_at, continuous_at_const_smul_iff]
@[to_additive]
lemma is_open_map_smul (c : G) : is_open_map (λ x : α, c • x) :=
(homeomorph.smul c).is_open_map
@[to_additive] lemma is_open.smul {s : set α} (hs : is_open s) (c : G) : is_open (c • s) :=
is_open_map_smul c s hs
@[to_additive]
lemma is_closed_map_smul (c : G) : is_closed_map (λ x : α, c • x) :=
(homeomorph.smul c).is_closed_map
@[to_additive] lemma is_closed.smul {s : set α} (hs : is_closed s) (c : G) : is_closed (c • s) :=
is_closed_map_smul c s hs
end group
section group_with_zero
variables {G₀ : Type*} [topological_space G₀] [group_with_zero G₀] [mul_action G₀ α]
[has_continuous_smul G₀ α]
lemma tendsto_const_smul_iff₀ {f : β → α} {l : filter β} {a : α} {c : G₀} (hc : c ≠ 0) :
tendsto (λ x, c • f x) l (𝓝 $ c • a) ↔ tendsto f l (𝓝 a) :=
tendsto_const_smul_iff (units.mk0 c hc)
variables [topological_space β] {f : β → α} {b : β} {c : G₀} {s : set β}
lemma continuous_within_at_const_smul_iff₀ (hc : c ≠ 0) :
continuous_within_at (λ x, c • f x) s b ↔ continuous_within_at f s b :=
tendsto_const_smul_iff (units.mk0 c hc)
lemma continuous_on_const_smul_iff₀ (hc : c ≠ 0) :
continuous_on (λ x, c • f x) s ↔ continuous_on f s :=
continuous_on_const_smul_iff (units.mk0 c hc)
lemma continuous_at_const_smul_iff₀ (hc : c ≠ 0) :
continuous_at (λ x, c • f x) b ↔ continuous_at f b :=
continuous_at_const_smul_iff (units.mk0 c hc)
lemma continuous_const_smul_iff₀ (hc : c ≠ 0) :
continuous (λ x, c • f x) ↔ continuous f :=
continuous_const_smul_iff (units.mk0 c hc)
/-- Scalar multiplication by a non-zero element of a group with zero acting on `α` is a
homeomorphism from `α` onto itself. -/
protected def homeomorph.smul_of_ne_zero (c : G₀) (hc : c ≠ 0) : α ≃ₜ α :=
homeomorph.smul (units.mk0 c hc)
lemma is_open_map_smul₀ {c : G₀} (hc : c ≠ 0) : is_open_map (λ x : α, c • x) :=
(homeomorph.smul_of_ne_zero c hc).is_open_map
lemma is_open.smul₀ {c : G₀} {s : set α} (hs : is_open s) (hc : c ≠ 0) : is_open (c • s) :=
is_open_map_smul₀ hc s hs
lemma interior_smul₀ {c : G₀} (hc : c ≠ 0) (s : set α) : interior (c • s) = c • interior s :=
((homeomorph.smul_of_ne_zero c hc).image_interior s).symm
/-- `smul` is a closed map in the second argument.
The lemma that `smul` is a closed map in the first argument (for a normed space over a complete
normed field) is `is_closed_map_smul_left` in `analysis.normed_space.finite_dimension`. -/
lemma is_closed_map_smul_of_ne_zero {c : G₀} (hc : c ≠ 0) : is_closed_map (λ x : α, c • x) :=
(homeomorph.smul_of_ne_zero c hc).is_closed_map
/-- `smul` is a closed map in the second argument.
The lemma that `smul` is a closed map in the first argument (for a normed space over a complete
normed field) is `is_closed_map_smul_left` in `analysis.normed_space.finite_dimension`. -/
lemma is_closed_map_smul₀ {𝕜 M : Type*} [division_ring 𝕜] [add_comm_monoid M] [topological_space M]
[t1_space M] [module 𝕜 M] [topological_space 𝕜] [has_continuous_smul 𝕜 M] (c : 𝕜) :
is_closed_map (λ x : M, c • x) :=
begin
rcases eq_or_ne c 0 with (rfl|hne),
{ simp only [zero_smul], exact is_closed_map_const },
{ exact (homeomorph.smul_of_ne_zero c hne).is_closed_map },
end
end group_with_zero
namespace is_unit
variables [monoid M] [mul_action M α] [has_continuous_smul M α]
lemma tendsto_const_smul_iff {f : β → α} {l : filter β} {a : α} {c : M} (hc : is_unit c) :
tendsto (λ x, c • f x) l (𝓝 $ c • a) ↔ tendsto f l (𝓝 a) :=
let ⟨u, hu⟩ := hc in hu ▸ tendsto_const_smul_iff u
variables [topological_space β] {f : β → α} {b : β} {c : M} {s : set β}
lemma continuous_within_at_const_smul_iff (hc : is_unit c) :
continuous_within_at (λ x, c • f x) s b ↔ continuous_within_at f s b :=
let ⟨u, hu⟩ := hc in hu ▸ continuous_within_at_const_smul_iff u
lemma continuous_on_const_smul_iff (hc : is_unit c) :
continuous_on (λ x, c • f x) s ↔ continuous_on f s :=
let ⟨u, hu⟩ := hc in hu ▸ continuous_on_const_smul_iff u
lemma continuous_at_const_smul_iff (hc : is_unit c) :
continuous_at (λ x, c • f x) b ↔ continuous_at f b :=
let ⟨u, hu⟩ := hc in hu ▸ continuous_at_const_smul_iff u
lemma continuous_const_smul_iff (hc : is_unit c) :
continuous (λ x, c • f x) ↔ continuous f :=
let ⟨u, hu⟩ := hc in hu ▸ continuous_const_smul_iff u
lemma is_open_map_smul (hc : is_unit c) : is_open_map (λ x : α, c • x) :=
let ⟨u, hu⟩ := hc in hu ▸ is_open_map_smul u
lemma is_closed_map_smul (hc : is_unit c) : is_closed_map (λ x : α, c • x) :=
let ⟨u, hu⟩ := hc in hu ▸ is_closed_map_smul u
end is_unit
@[to_additive]
instance has_continuous_mul.has_continuous_smul {M : Type*} [monoid M]
[topological_space M] [has_continuous_mul M] :
has_continuous_smul M M :=
⟨continuous_mul⟩
@[to_additive]
instance [topological_space β] [has_scalar M α] [has_scalar M β] [has_continuous_smul M α]
[has_continuous_smul M β] :
has_continuous_smul M (α × β) :=
⟨(continuous_fst.smul (continuous_fst.comp continuous_snd)).prod_mk
(continuous_fst.smul (continuous_snd.comp continuous_snd))⟩
@[to_additive]
instance {ι : Type*} {γ : ι → Type*}
[∀ i, topological_space (γ i)] [Π i, has_scalar M (γ i)] [∀ i, has_continuous_smul M (γ i)] :
has_continuous_smul M (Π i, γ i) :=
⟨continuous_pi $ λ i,
(continuous_fst.smul continuous_snd).comp $
continuous_fst.prod_mk ((continuous_apply i).comp continuous_snd)⟩
section lattice_ops
variables {ι : Type*} [has_scalar M β]
{ts : set (topological_space β)} (h : Π t ∈ ts, @has_continuous_smul M β _ _ t)
{ts' : ι → topological_space β} (h' : Π i, @has_continuous_smul M β _ _ (ts' i))
{t₁ t₂ : topological_space β} [h₁ : @has_continuous_smul M β _ _ t₁]
[h₂ : @has_continuous_smul M β _ _ t₂]
include h
@[to_additive] lemma has_continuous_smul_Inf :
@has_continuous_smul M β _ _ (Inf ts) :=
{ continuous_smul :=
begin
rw ← @Inf_singleton _ _ ‹topological_space M›,
exact continuous_Inf_rng (λ t ht, continuous_Inf_dom₂ (eq.refl _) ht
(@has_continuous_smul.continuous_smul _ _ _ _ t (h t ht)))
end }
omit h
include h'
@[to_additive] lemma has_continuous_smul_infi :
@has_continuous_smul M β _ _ (⨅ i, ts' i) :=
by {rw ← Inf_range, exact has_continuous_smul_Inf (set.forall_range_iff.mpr h')}
omit h'
include h₁ h₂
@[to_additive] lemma has_continuous_smul_inf :
@has_continuous_smul M β _ _ (t₁ ⊓ t₂) :=
by {rw inf_eq_infi, refine has_continuous_smul_infi (λ b, _), cases b; assumption}
omit h₁ h₂
end lattice_ops
|
(* begin hide *)
Require Import
Coq.Structures.OrderedTypeEx
Coq.FSets.FSetAVL
Coq.Arith.Compare_dec.
Require Import Aniceto.Set.
Require Import Aniceto.Map.
Require Import
Brenner.Semantics TaskMap PhaserMap Vars Syntax.
Require Import Aniceto.Graphs.Graph.
Require Aniceto.Graphs.Bipartite.Bipartite.
Require Aniceto.Graphs.Bipartite.Cycle.
Set Implicit Arguments.
Module C := Graphs.Bipartite.Cycle.
(* end hide *)
(** * Resource dependency state *)
(** We define an event as a pair of phaser ids and a natural (the phase). *)
Module EVT := PairOrderedType PHID Nat_as_OT.
(* begin hide *)
Module Set_EVT := FSetAVL.Make EVT.
Module Set_EVT_Extra := SetUtil Set_EVT.
Module Map_EVT := FMapAVL.Make EVT.
Module Map_EVT_Extra := MapUtil Map_EVT.
Definition set_event := Set_EVT.t.
(* end hide *)
Definition event := EVT.t.
(** Function [get_phaser] obtains the phaser id in the event. *)
Definition get_phaser (e:event) : phid := fst e.
(** Function [get_phase] obtains the phase number of the event. *)
Definition get_phase (e:event) : nat := snd e.
(** Phases from the same phaser are in a precedes relation. *)
Definition prec (e1:event) (e2:event) :=
get_phaser e1 = get_phaser e2 /\ get_phase e1 < get_phase e2.
(* begin hide *)
Section StateProps.
Variable s:state.
(* end hide *)
(**
Let [s] be a [state].
We say that a task [t] is waiting for an event [e]
when task [t] is executing an instruction [Await] and
the target phaser is defined in the phaser map. *)
Definition WaitOn (t:tid) (e:event) :=
exists prg,
Map_TID.MapsTo t (pcons (await (get_phaser e) (get_phase e)) prg) (get_tasks s) /\
Map_PHID.In (get_phaser e) (get_phasers s).
(** A task [t] is registered in an event [e] if [t] is registered
in phaser [get_phaser e] and in phase [get_phase r]; task [t] must
defined in the taskmap. *)
Definition Registered (t:tid) (e:event) :=
exists ph,
Map_PHID.MapsTo (get_phaser e) ph (get_phasers s) /\
Map_TID.MapsTo t (get_phase e) ph /\ Map_TID.In t (get_tasks s).
Lemma registered_def:
forall ph e t,
Map_PHID.MapsTo (get_phaser e) ph (get_phasers s) ->
Map_TID.MapsTo t (get_phase e) ph ->
Map_TID.In t (get_tasks s) ->
Registered t e.
Proof.
intros.
unfold Registered.
exists ph.
intuition.
Qed.
Lemma registered_in_tasks:
forall t e,
Registered t e ->
Map_TID.In t (get_tasks s).
Proof.
unfold Registered.
intros.
destruct H as (?,(?,(?,?))).
assumption.
Qed.
(** an event [e] impedes a task [t] this task is registered in a
event [e'] that precedes [e]; the impeding event must be the target of
a blocked task. *)
Definition ImpededBy(e:event) (t:tid) :=
(exists t', WaitOn t' e) /\
(exists e', Registered t e' /\ prec e' e).
Lemma impeded_by_def:
forall e t t' e',
WaitOn t' e ->
Registered t e' ->
prec e' e ->
ImpededBy e t.
Proof.
unfold ImpededBy.
intros.
split.
- exists t'.
assumption.
- exists e'.
intuition.
Qed.
Lemma impeded_by_in_tasks:
forall t e,
ImpededBy e t ->
Map_TID.In t (get_tasks s).
Proof.
intros.
destruct H as (_,(?,(H,_))).
eauto using registered_in_tasks.
Qed.
End StateProps.
(** We now characterize a deadlocked state.
Let [AllTasksWaitFor] be a state such that all tasks in the state
are waiting for an event. *)
Definition AllTasksWaitOn s :=
forall t, (Map_TID.In t (get_tasks s) -> exists e, WaitOn s t e).
(** Let [AllBlockedRegistered] be a state such that any task waiting for
an event, that event is also impeding another task in the state. *)
Definition AllImpededBy s :=
forall t e, WaitOn s t e -> exists t', ImpededBy s e t'.
(** A totally deadlocked state is such that all tasks are waiting for
events that are impeding a tasks in the task map. *)
Definition TotallyDeadlocked (s:state) :=
AllTasksWaitOn s /\ AllImpededBy s /\
exists t, Map_TID.In t (get_tasks s). (* nonempty *)
(* TODO: Now would be a nice time to show that a totally deadlocked state
does not reduce. For this we need to have a decidable reduction. *)
(** A [Deadlocked] state is such that we can take a partition of the task
map [tm] and [tm'] such that the state [(get_phasers s, tm)] is
totally deadlock. *)
Definition Deadlocked (s:state) :=
exists tm tm',
Map_TID_Props.Partition (get_tasks s) tm tm' /\
TotallyDeadlocked ((get_phasers s), tm).
(** A GRG is a bipartite graph that is defined
from relations [WaitOn] and [ImpededBy]. *)
Notation TEdge s := (Bipartite.AA (WaitOn s) (ImpededBy s)).
Notation REdge s := (Bipartite.BB (WaitOn s) (ImpededBy s)).
Notation TWalk s := (Walk (TEdge s)).
Notation RWalk s := (Walk (REdge s)).
Notation TCycle s := (Cycle (TEdge s)).
Notation RCycle s := (Cycle (REdge s)).
Notation t_edge := (tid * tid) % type.
Notation t_walk := (list t_edge).
(** In WFG an arc from task [t1] to [t2] is read as [t1] waits for [t2].
In Brenner this means that there exists an event [e] where task
[t1] is blocked and task [t2] has not arrived at event [e].*)
Lemma tedge_spec:
forall s (t1 t2:tid),
TEdge s (t1, t2) <->
exists e,
WaitOn s t1 e /\ ImpededBy s e t2.
Proof.
split.
+ intros.
simpl in H.
inversion H.
subst.
exists b.
intuition.
+ intros.
destruct H as (e, (H1, H2)).
simpl.
eauto using Bipartite.aa.
Qed.
(** In an SG an arc from [e1] to [e2] can be read
as event [e1] happens before event [e2].
In Brenner, this means that there is a task [t]
blocked in [e2] and impedes [e1]. Recall the definition
of [ImpededBy] that states that [t] is registered in
an event [e'] that precedes [e]; and that event
[e] is obtained because there exists some task blocked
in [e] (again by definition). *)
Lemma redge_spec:
forall s (e1 e2:event),
REdge s (e1, e2) <->
exists t,
ImpededBy s e1 t /\ WaitOn s t e2.
Proof.
split.
- intros.
inversion H.
subst.
exists a.
simpl in *.
intuition.
- intros [t (Hi, Hw)].
simpl.
eauto using Bipartite.bb.
Qed.
Section Graphs.
Variable s:state.
(** Since the graph is bipartite, then if we have a cycle in the WFG, then
there exists a cycle in the SG. *)
Theorem wfg_to_sg:
forall s c,
TCycle s c ->
exists c', RCycle s c'.
Proof.
intros.
eauto using Cycle.cycle_a_to_b.
Qed.
(** Vice-versa also holds. *)
Theorem sg_to_wfg:
forall c,
RCycle s c ->
exists c', TCycle s c'.
Proof.
intros.
eauto using Cycle.cycle_b_to_a.
Qed.
End Graphs.
Section Basic.
Variable s:state.
(** In our language tasks can only await a single phaser, so
it is easy to see that the [WaitOn] predicate is actually a function
from task ids to events. *)
Lemma wait_on_fun:
forall t e e',
WaitOn s t e ->
WaitOn s t e' ->
e = e'.
Proof.
intros.
unfold WaitOn in *.
destruct H as (p1, (H1, H2)).
destruct H0 as (p2, (H3, H4)).
(* MapsTo is functional, so p1 = p2 *)
assert (Heq:= @Map_TID_Facts.MapsTo_fun _ _ _ _ _
H1 H3).
inversion Heq.
destruct e as (p,n).
destruct e' as (p', n').
simpl in *.
auto.
Qed.
(** We show that any task id in the [WaitOn] is in the task map of [s]. *)
Lemma wait_on_in_tasks:
forall t e,
WaitOn s t e ->
Map_TID.In t (get_tasks s).
Proof.
intros.
unfold WaitOn in H.
destruct H as (p, (H1, H2)).
apply Map_TID_Extra.mapsto_to_in in H1.
assumption.
Qed.
Lemma impeded_by_in_phasermap:
forall e t,
ImpededBy s e t ->
Map_PHID.In (elt:=Phaser.phaser) (get_phaser e) (get_phasers s).
Proof.
intros.
destruct H as ((t',H),_).
unfold WaitOn in H.
destruct H as (_, (_, H)).
assumption.
Qed.
End Basic.
|
module rrsw_kg23
use shr_kind_mod, only: r8 => shr_kind_r8
! use parkind ,only : jpim, jprb
use parrrsw, only : ng23
implicit none
save
!-----------------------------------------------------------------
! rrtmg_sw ORIGINAL abs. coefficients for interval 23
! band 23: 8050-12850 cm-1 (low - h2o; high - nothing)
!
! Initial version: JJMorcrette, ECMWF, oct1999
! Revised: MJIacono, AER, jul2006
!-----------------------------------------------------------------
!
! name type purpose
! ---- : ---- : ---------------------------------------------
! kao : real
! kbo : real
! selfrefo: real
! forrefo : real
!sfluxrefo: real
!-----------------------------------------------------------------
integer, parameter :: no23 = 16
real(kind=r8) :: kao(5,13,no23)
real(kind=r8) :: selfrefo(10,no23), forrefo(3,no23)
real(kind=r8) :: sfluxrefo(no23)
real(kind=r8) :: raylo(no23)
integer :: layreffr
real(kind=r8) :: givfac
!-----------------------------------------------------------------
! rrtmg_sw COMBINED abs. coefficients for interval 23
! band 23: 8050-12850 cm-1 (low - h2o; high - nothing)
!
! Initial version: JJMorcrette, ECMWF, oct1999
! Revised: MJIacono, AER, jul2006
!-----------------------------------------------------------------
!
! name type purpose
! ---- : ---- : ---------------------------------------------
! ka : real
! kb : real
! absa : real
! absb : real
! selfref : real
! forref : real
! sfluxref: real
!-----------------------------------------------------------------
real(kind=r8) :: ka(5,13,ng23), absa(65,ng23)
real(kind=r8) :: selfref(10,ng23), forref(3,ng23)
real(kind=r8) :: sfluxref(ng23), rayl(ng23)
equivalence (ka(1,1,1),absa(1,1))
end module rrsw_kg23
|
------------------------------------------------------------------------
-- The Agda standard library
--
-- An inductive definition of the heterogeneous prefix relation
------------------------------------------------------------------------
{-# OPTIONS --without-K --safe #-}
module Data.List.Relation.Binary.Prefix.Heterogeneous where
open import Level
open import Data.List.Base as List using (List; []; _∷_)
open import Data.List.Relation.Binary.Pointwise
using (Pointwise; []; _∷_)
open import Data.Product using (∃; _×_; _,_; uncurry)
open import Relation.Binary using (REL; _⇒_)
module _ {a b r} {A : Set a} {B : Set b} (R : REL A B r) where
data Prefix : REL (List A) (List B) (a ⊔ b ⊔ r) where
[] : ∀ {bs} → Prefix [] bs
_∷_ : ∀ {a b as bs} → R a b → Prefix as bs → Prefix (a ∷ as) (b ∷ bs)
data PrefixView (as : List A) : List B → Set (a ⊔ b ⊔ r) where
_++_ : ∀ {cs} → Pointwise R as cs → ∀ ds → PrefixView as (cs List.++ ds)
module _ {a b r} {A : Set a} {B : Set b} {R : REL A B r} {a b as bs} where
head : Prefix R (a ∷ as) (b ∷ bs) → R a b
head (r ∷ rs) = r
tail : Prefix R (a ∷ as) (b ∷ bs) → Prefix R as bs
tail (r ∷ rs) = rs
uncons : Prefix R (a ∷ as) (b ∷ bs) → R a b × Prefix R as bs
uncons (r ∷ rs) = r , rs
module _ {a b r s} {A : Set a} {B : Set b} {R : REL A B r} {S : REL A B s} where
map : R ⇒ S → Prefix R ⇒ Prefix S
map R⇒S [] = []
map R⇒S (r ∷ rs) = R⇒S r ∷ map R⇒S rs
module _ {a b r} {A : Set a} {B : Set b} {R : REL A B r} where
toView : ∀ {as bs} → Prefix R as bs → PrefixView R as bs
toView [] = [] ++ _
toView (r ∷ rs) with toView rs
... | rs′ ++ ds = (r ∷ rs′) ++ ds
fromView : ∀ {as bs} → PrefixView R as bs → Prefix R as bs
fromView ([] ++ ds) = []
fromView ((r ∷ rs) ++ ds) = r ∷ fromView (rs ++ ds)
|
/-
Copyright (c) 2018 Johannes Hölzl. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Johannes Hölzl, Jens Wagemaker
! This file was ported from Lean 3 source module algebra.gcd_monoid.basic
! leanprover-community/mathlib commit 550b58538991c8977703fdeb7c9d51a5aa27df11
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathlib.Algebra.Associated
import Mathlib.Algebra.GroupPower.Lemmas
import Mathlib.Algebra.Ring.Regular
import Mathlib.Tactic.Set
/-!
# Monoids with normalization functions, `gcd`, and `lcm`
This file defines extra structures on `CancelCommMonoidWithZero`s, including `IsDomain`s.
## Main Definitions
* `NormalizationMonoid`
* `GCDMonoid`
* `NormalizedGCDMonoid`
* `gcdMonoid_of_gcd`, `gcdMonoid_of_exists_gcd`, `normalizedGCDMonoid_of_gcd`,
`normalizedGCDMonoid_of_exists_gcd`
* `gcdMonoid_of_lcm`, `gcdMonoid_of_exists_lcm`, `normalizedGCDMonoid_of_lcm`,
`normalizedGCDMonoid_of_exists_lcm`
For the `NormalizedGCDMonoid` instances on `ℕ` and `ℤ`, see `RingTheory.Int.Basic`.
## Implementation Notes
* `NormalizationMonoid` is defined by assigning to each element a `normUnit` such that multiplying
by that unit normalizes the monoid, and `normalize` is an idempotent monoid homomorphism. This
definition as currently implemented does casework on `0`.
* `GCDMonoid` contains the definitions of `gcd` and `lcm` with the usual properties. They are
both determined up to a unit.
* `NormalizedGCDMonoid` extends `NormalizationMonoid`, so the `gcd` and `lcm` are always
normalized. This makes `gcd`s of polynomials easier to work with, but excludes Euclidean domains,
and monoids without zero.
* `gcdMonoid_of_gcd` and `normalizedGCDMonoid_of_gcd` noncomputably construct a `GCDMonoid`
(resp. `NormalizedGCDMonoid`) structure just from the `gcd` and its properties.
* `gcdMonoid_of_exists_gcd` and `normalizedGCDMonoid_of_exists_gcd` noncomputably construct a
`GCDMonoid` (resp. `NormalizedGCDMonoid`) structure just from a proof that any two elements
have a (not necessarily normalized) `gcd`.
* `gcdMonoid_of_lcm` and `normalizedGCDMonoid_of_lcm` noncomputably construct a `GCDMonoid`
(resp. `NormalizedGCDMonoid`) structure just from the `lcm` and its properties.
* `gcdMonoid_of_exists_lcm` and `normalizedGCDMonoid_of_exists_lcm` noncomputably construct a
`GCDMonoid` (resp. `NormalizedGCDMonoid`) structure just from a proof that any two elements
have a (not necessarily normalized) `lcm`.
## TODO
* Port GCD facts about nats, definition of coprime
* Generalize normalization monoids to commutative (cancellative) monoids with or without zero
## Tags
divisibility, gcd, lcm, normalize
-/
variable {α : Type _}
-- Porting note: mathlib3 had a `@[protect_proj]` here, but adding `protected` to all the fields
-- adds unnecessary clutter to later code
/-- Normalization monoid: multiplying with `normUnit` gives a normal form for associated
elements. -/
class NormalizationMonoid (α : Type _) [CancelCommMonoidWithZero α] where
/-- `normUnit` assigns to each element of the monoid a unit of the monoid. -/
normUnit : α → αˣ
/-- The proposition that `normUnit` maps `0` to the identity. -/
normUnit_zero : normUnit 0 = 1
/-- The proposition that `normUnit` respects multiplication of non-zero elements. -/
normUnit_mul : ∀ {a b}, a ≠ 0 → b ≠ 0 → normUnit (a * b) = normUnit a * normUnit b
/-- The proposition that `normUnit` maps units to their inverses. -/
normUnit_coe_units : ∀ u : αˣ, normUnit u = u⁻¹
#align normalization_monoid NormalizationMonoid
export NormalizationMonoid (normUnit normUnit_zero normUnit_mul normUnit_coe_units)
attribute [simp] normUnit_coe_units normUnit_zero normUnit_mul
section NormalizationMonoid
variable [CancelCommMonoidWithZero α] [NormalizationMonoid α]
@[simp]
theorem normUnit_one : normUnit (1 : α) = 1 :=
normUnit_coe_units 1
#align norm_unit_one normUnit_one
-- Porting note: quite slow. Improve performance?
/-- Chooses an element of each associate class, by multiplying by `normUnit` -/
def normalize : α →*₀ α where
toFun x := x * normUnit x
map_zero' := by
simp only [normUnit_zero]
exact mul_one (0:α)
map_one' := by dsimp only; rw [normUnit_one, one_mul]; rfl
map_mul' x y :=
(by_cases fun hx : x = 0 => by dsimp only; rw [hx, zero_mul, zero_mul, zero_mul]) fun hx =>
(by_cases fun hy : y = 0 => by dsimp only; rw [hy, mul_zero, zero_mul, mul_zero]) fun hy => by
simp only [normUnit_mul hx hy, Units.val_mul]; simp only [mul_assoc, mul_left_comm y]
#align normalize normalize
theorem associated_normalize (x : α) : Associated x (normalize x) :=
⟨_, rfl⟩
#align associated_normalize associated_normalize
theorem normalize_associated (x : α) : Associated (normalize x) x :=
(associated_normalize _).symm
#align normalize_associated normalize_associated
theorem associated_normalize_iff {x y : α} : Associated x (normalize y) ↔ Associated x y :=
⟨fun h => h.trans (normalize_associated y), fun h => h.trans (associated_normalize y)⟩
#align associated_normalize_iff associated_normalize_iff
theorem normalize_associated_iff {x y : α} : Associated (normalize x) y ↔ Associated x y :=
⟨fun h => (associated_normalize _).trans h, fun h => (normalize_associated _).trans h⟩
#align normalize_associated_iff normalize_associated_iff
theorem Associates.mk_normalize (x : α) : Associates.mk (normalize x) = Associates.mk x :=
Associates.mk_eq_mk_iff_associated.2 (normalize_associated _)
#align associates.mk_normalize Associates.mk_normalize
@[simp]
theorem normalize_apply (x : α) : normalize x = x * normUnit x :=
rfl
#align normalize_apply normalize_apply
-- Porting note: `simp` can prove this
-- @[simp]
theorem normalize_zero : normalize (0 : α) = 0 :=
normalize.map_zero
#align normalize_zero normalize_zero
-- Porting note: `simp` can prove this
-- @[simp]
theorem normalize_one : normalize (1 : α) = 1 :=
normalize.map_one
#align normalize_one normalize_one
theorem normalize_coe_units (u : αˣ) : normalize (u : α) = 1 := by simp
#align normalize_coe_units normalize_coe_units
theorem normalize_eq_zero {x : α} : normalize x = 0 ↔ x = 0 :=
⟨fun hx => (associated_zero_iff_eq_zero x).1 <| hx ▸ associated_normalize _, by
rintro rfl; exact normalize_zero⟩
#align normalize_eq_zero normalize_eq_zero
theorem normalize_eq_one {x : α} : normalize x = 1 ↔ IsUnit x :=
⟨fun hx => isUnit_iff_exists_inv.2 ⟨_, hx⟩, fun ⟨u, hu⟩ => hu ▸ normalize_coe_units u⟩
#align normalize_eq_one normalize_eq_one
-- Porting note: quite slow. Improve performance?
@[simp]
theorem normUnit_mul_normUnit (a : α) : normUnit (a * normUnit a) = 1 := by
nontriviality α using Subsingleton.elim a 0
obtain rfl | h := eq_or_ne a 0
· rw [normUnit_zero, zero_mul, normUnit_zero]
· rw [normUnit_mul h (Units.ne_zero _), normUnit_coe_units, mul_inv_eq_one]
#align norm_unit_mul_norm_unit normUnit_mul_normUnit
theorem normalize_idem (x : α) : normalize (normalize x) = normalize x := by simp
#align normalize_idem normalize_idem
theorem normalize_eq_normalize {a b : α} (hab : a ∣ b) (hba : b ∣ a) :
normalize a = normalize b := by
nontriviality α
rcases associated_of_dvd_dvd hab hba with ⟨u, rfl⟩
refine' by_cases (by rintro rfl; simp only [zero_mul]) fun ha : a ≠ 0 => _
suffices a * ↑(normUnit a) = a * ↑u * ↑(normUnit a) * ↑u⁻¹ by
simpa only [normalize_apply, mul_assoc, normUnit_mul ha u.ne_zero, normUnit_coe_units]
calc
a * ↑(normUnit a) = a * ↑(normUnit a) * ↑u * ↑u⁻¹ := (Units.mul_inv_cancel_right _ _).symm
_ = a * ↑u * ↑(normUnit a) * ↑u⁻¹ := by rw [mul_right_comm a]
#align normalize_eq_normalize normalize_eq_normalize
theorem normalize_eq_normalize_iff {x y : α} : normalize x = normalize y ↔ x ∣ y ∧ y ∣ x :=
⟨fun h => ⟨Units.dvd_mul_right.1 ⟨_, h.symm⟩, Units.dvd_mul_right.1 ⟨_, h⟩⟩, fun ⟨hxy, hyx⟩ =>
normalize_eq_normalize hxy hyx⟩
#align normalize_eq_normalize_iff normalize_eq_normalize_iff
theorem dvd_antisymm_of_normalize_eq {a b : α} (ha : normalize a = a) (hb : normalize b = b)
(hab : a ∣ b) (hba : b ∣ a) : a = b :=
ha ▸ hb ▸ normalize_eq_normalize hab hba
#align dvd_antisymm_of_normalize_eq dvd_antisymm_of_normalize_eq
--can be proven by simp
theorem dvd_normalize_iff {a b : α} : a ∣ normalize b ↔ a ∣ b :=
Units.dvd_mul_right
#align dvd_normalize_iff dvd_normalize_iff
--can be proven by simp
theorem normalize_dvd_iff {a b : α} : normalize a ∣ b ↔ a ∣ b :=
Units.mul_right_dvd
#align normalize_dvd_iff normalize_dvd_iff
end NormalizationMonoid
namespace Associates
variable [CancelCommMonoidWithZero α] [NormalizationMonoid α]
attribute [local instance] Associated.setoid
/-- Maps an element of `Associates` back to the normalized element of its associate class -/
protected def out : Associates α → α :=
(Quotient.lift (normalize : α → α)) fun a _ ⟨_, hu⟩ =>
hu ▸ normalize_eq_normalize ⟨_, rfl⟩ (Units.mul_right_dvd.2 <| dvd_refl a)
#align associates.out Associates.out
@[simp]
theorem out_mk (a : α) : (Associates.mk a).out = normalize a :=
rfl
#align associates.out_mk Associates.out_mk
@[simp]
theorem out_one : (1 : Associates α).out = 1 :=
normalize_one
#align associates.out_one Associates.out_one
theorem out_mul (a b : Associates α) : (a * b).out = a.out * b.out :=
Quotient.inductionOn₂ a b fun _ _ => by
simp only [Associates.quotient_mk_eq_mk, out_mk, mk_mul_mk, normalize.map_mul]
#align associates.out_mul Associates.out_mul
theorem dvd_out_iff (a : α) (b : Associates α) : a ∣ b.out ↔ Associates.mk a ≤ b :=
Quotient.inductionOn b <| by
simp [Associates.out_mk, Associates.quotient_mk_eq_mk, mk_le_mk_iff_dvd_iff]
#align associates.dvd_out_iff Associates.dvd_out_iff
theorem out_dvd_iff (a : α) (b : Associates α) : b.out ∣ a ↔ b ≤ Associates.mk a :=
Quotient.inductionOn b <| by
simp [Associates.out_mk, Associates.quotient_mk_eq_mk, mk_le_mk_iff_dvd_iff]
#align associates.out_dvd_iff Associates.out_dvd_iff
@[simp]
theorem out_top : (⊤ : Associates α).out = 0 :=
normalize_zero
#align associates.out_top Associates.out_top
-- Porting note: lower priority to avoid linter complaints about simp-normal form
@[simp 1100]
theorem normalize_out (a : Associates α) :
normalize a.out = a.out :=
Quotient.inductionOn a normalize_idem
#align associates.normalize_out Associates.normalize_out
@[simp]
theorem mk_out (a : Associates α) : Associates.mk a.out = a :=
Quotient.inductionOn a mk_normalize
#align associates.mk_out Associates.mk_out
theorem out_injective : Function.Injective (Associates.out : _ → α) :=
Function.LeftInverse.injective mk_out
#align associates.out_injective Associates.out_injective
end Associates
-- Porting note: mathlib3 had a `@[protect_proj]` here, but adding `protected` to all the fields
-- adds unnecessary clutter to later code
/-- GCD monoid: a `CancelCommMonoidWithZero` with `gcd` (greatest common divisor) and
`lcm` (least common multiple) operations, determined up to a unit. The type class focuses on `gcd`
and we derive the corresponding `lcm` facts from `gcd`.
-/
class GCDMonoid (α : Type _) [CancelCommMonoidWithZero α] where
/-- The greatest common divisor between two elements. -/
gcd : α → α → α
/-- The least common multiple between two elements. -/
lcm : α → α → α
/-- The GCD is a divisor of the first element. -/
gcd_dvd_left : ∀ a b, gcd a b ∣ a
/-- The GCD is a divisor of the second element. -/
gcd_dvd_right : ∀ a b, gcd a b ∣ b
/-- Tny common divisor of both elements is a divisor of the GCD. -/
dvd_gcd : ∀ {a b c}, a ∣ c → a ∣ b → a ∣ gcd c b
/-- The product of two elements is `Associated` with the product of their GCD and LCM. -/
gcd_mul_lcm : ∀ a b, Associated (gcd a b * lcm a b) (a * b)
/-- `0` is left-absorbing. -/
lcm_zero_left : ∀ a, lcm 0 a = 0
/-- `0` is right-absorbing. -/
lcm_zero_right : ∀ a, lcm a 0 = 0
#align gcd_monoid GCDMonoid
/-- Normalized GCD monoid: a `CancelCommMonoidWithZero` with normalization and `gcd`
(greatest common divisor) and `lcm` (least common multiple) operations. In this setting `gcd` and
`lcm` form a bounded lattice on the associated elements where `gcd` is the infimum, `lcm` is the
supremum, `1` is bottom, and `0` is top. The type class focuses on `gcd` and we derive the
corresponding `lcm` facts from `gcd`.
-/
class NormalizedGCDMonoid (α : Type _) [CancelCommMonoidWithZero α] extends NormalizationMonoid α,
GCDMonoid α where
/-- The GCD is normalized to itself. -/
normalize_gcd : ∀ a b, normalize (gcd a b) = gcd a b
/-- The LCM is normalized to itself. -/
normalize_lcm : ∀ a b, normalize (lcm a b) = lcm a b
#align normalized_gcd_monoid NormalizedGCDMonoid
export GCDMonoid (gcd lcm gcd_dvd_left gcd_dvd_right dvd_gcd lcm_zero_left lcm_zero_right)
attribute [simp] lcm_zero_left lcm_zero_right
section GCDMonoid
variable [CancelCommMonoidWithZero α]
-- Porting note: lower priority to avoid linter complaints about simp-normal form
@[simp 1100]
theorem normalize_gcd [NormalizedGCDMonoid α] :
∀ a b : α, normalize (gcd a b) = gcd a b :=
NormalizedGCDMonoid.normalize_gcd
#align normalize_gcd normalize_gcd
theorem gcd_mul_lcm [GCDMonoid α] : ∀ a b : α, Associated (gcd a b * lcm a b) (a * b) :=
GCDMonoid.gcd_mul_lcm
#align gcd_mul_lcm gcd_mul_lcm
section GCD
theorem dvd_gcd_iff [GCDMonoid α] (a b c : α) : a ∣ gcd b c ↔ a ∣ b ∧ a ∣ c :=
Iff.intro (fun h => ⟨h.trans (gcd_dvd_left _ _), h.trans (gcd_dvd_right _ _)⟩) fun ⟨hab, hac⟩ =>
dvd_gcd hab hac
#align dvd_gcd_iff dvd_gcd_iff
theorem gcd_comm [NormalizedGCDMonoid α] (a b : α) : gcd a b = gcd b a :=
dvd_antisymm_of_normalize_eq (normalize_gcd _ _) (normalize_gcd _ _)
(dvd_gcd (gcd_dvd_right _ _) (gcd_dvd_left _ _))
(dvd_gcd (gcd_dvd_right _ _) (gcd_dvd_left _ _))
#align gcd_comm gcd_comm
theorem gcd_comm' [GCDMonoid α] (a b : α) : Associated (gcd a b) (gcd b a) :=
associated_of_dvd_dvd (dvd_gcd (gcd_dvd_right _ _) (gcd_dvd_left _ _))
(dvd_gcd (gcd_dvd_right _ _) (gcd_dvd_left _ _))
#align gcd_comm' gcd_comm'
theorem gcd_assoc [NormalizedGCDMonoid α] (m n k : α) : gcd (gcd m n) k = gcd m (gcd n k) :=
dvd_antisymm_of_normalize_eq (normalize_gcd _ _) (normalize_gcd _ _)
(dvd_gcd ((gcd_dvd_left (gcd m n) k).trans (gcd_dvd_left m n))
(dvd_gcd ((gcd_dvd_left (gcd m n) k).trans (gcd_dvd_right m n)) (gcd_dvd_right (gcd m n) k)))
(dvd_gcd
(dvd_gcd (gcd_dvd_left m (gcd n k)) ((gcd_dvd_right m (gcd n k)).trans (gcd_dvd_left n k)))
((gcd_dvd_right m (gcd n k)).trans (gcd_dvd_right n k)))
#align gcd_assoc gcd_assoc
theorem gcd_assoc' [GCDMonoid α] (m n k : α) : Associated (gcd (gcd m n) k) (gcd m (gcd n k)) :=
associated_of_dvd_dvd
(dvd_gcd ((gcd_dvd_left (gcd m n) k).trans (gcd_dvd_left m n))
(dvd_gcd ((gcd_dvd_left (gcd m n) k).trans (gcd_dvd_right m n)) (gcd_dvd_right (gcd m n) k)))
(dvd_gcd
(dvd_gcd (gcd_dvd_left m (gcd n k)) ((gcd_dvd_right m (gcd n k)).trans (gcd_dvd_left n k)))
((gcd_dvd_right m (gcd n k)).trans (gcd_dvd_right n k)))
#align gcd_assoc' gcd_assoc'
instance [NormalizedGCDMonoid α] : IsCommutative α gcd :=
⟨gcd_comm⟩
instance [NormalizedGCDMonoid α] : IsAssociative α gcd :=
⟨gcd_assoc⟩
theorem gcd_eq_normalize [NormalizedGCDMonoid α] {a b c : α} (habc : gcd a b ∣ c)
(hcab : c ∣ gcd a b) : gcd a b = normalize c :=
normalize_gcd a b ▸ normalize_eq_normalize habc hcab
#align gcd_eq_normalize gcd_eq_normalize
@[simp]
theorem gcd_zero_left [NormalizedGCDMonoid α] (a : α) : gcd 0 a = normalize a :=
gcd_eq_normalize (gcd_dvd_right 0 a) (dvd_gcd (dvd_zero _) (dvd_refl a))
#align gcd_zero_left gcd_zero_left
theorem gcd_zero_left' [GCDMonoid α] (a : α) : Associated (gcd 0 a) a :=
associated_of_dvd_dvd (gcd_dvd_right 0 a) (dvd_gcd (dvd_zero _) (dvd_refl a))
#align gcd_zero_left' gcd_zero_left'
@[simp]
theorem gcd_zero_right [NormalizedGCDMonoid α] (a : α) : gcd a 0 = normalize a :=
gcd_eq_normalize (gcd_dvd_left a 0) (dvd_gcd (dvd_refl a) (dvd_zero _))
#align gcd_zero_right gcd_zero_right
theorem gcd_zero_right' [GCDMonoid α] (a : α) : Associated (gcd a 0) a :=
associated_of_dvd_dvd (gcd_dvd_left a 0) (dvd_gcd (dvd_refl a) (dvd_zero _))
#align gcd_zero_right' gcd_zero_right'
@[simp]
theorem gcd_eq_zero_iff [GCDMonoid α] (a b : α) : gcd a b = 0 ↔ a = 0 ∧ b = 0 :=
Iff.intro
(fun h => by
let ⟨ca, ha⟩ := gcd_dvd_left a b
let ⟨cb, hb⟩ := gcd_dvd_right a b
rw [h, zero_mul] at ha hb
exact ⟨ha, hb⟩)
fun ⟨ha, hb⟩ => by
rw [ha, hb, ← zero_dvd_iff]
apply dvd_gcd <;> rfl
#align gcd_eq_zero_iff gcd_eq_zero_iff
@[simp]
theorem gcd_one_left [NormalizedGCDMonoid α] (a : α) : gcd 1 a = 1 :=
dvd_antisymm_of_normalize_eq (normalize_gcd _ _) normalize_one (gcd_dvd_left _ _) (one_dvd _)
#align gcd_one_left gcd_one_left
@[simp]
theorem gcd_one_left' [GCDMonoid α] (a : α) : Associated (gcd 1 a) 1 :=
associated_of_dvd_dvd (gcd_dvd_left _ _) (one_dvd _)
#align gcd_one_left' gcd_one_left'
@[simp]
theorem gcd_one_right [NormalizedGCDMonoid α] (a : α) : gcd a 1 = 1 :=
dvd_antisymm_of_normalize_eq (normalize_gcd _ _) normalize_one (gcd_dvd_right _ _) (one_dvd _)
#align gcd_one_right gcd_one_right
@[simp]
theorem gcd_one_right' [GCDMonoid α] (a : α) : Associated (gcd a 1) 1 :=
associated_of_dvd_dvd (gcd_dvd_right _ _) (one_dvd _)
#align gcd_one_right' gcd_one_right'
theorem gcd_dvd_gcd [GCDMonoid α] {a b c d : α} (hab : a ∣ b) (hcd : c ∣ d) : gcd a c ∣ gcd b d :=
dvd_gcd ((gcd_dvd_left _ _).trans hab) ((gcd_dvd_right _ _).trans hcd)
#align gcd_dvd_gcd gcd_dvd_gcd
@[simp]
theorem gcd_same [NormalizedGCDMonoid α] (a : α) : gcd a a = normalize a :=
gcd_eq_normalize (gcd_dvd_left _ _) (dvd_gcd (dvd_refl a) (dvd_refl a))
#align gcd_same gcd_same
@[simp]
theorem gcd_mul_left [NormalizedGCDMonoid α] (a b c : α) :
gcd (a * b) (a * c) = normalize a * gcd b c :=
(by_cases (by rintro rfl; simp only [zero_mul, gcd_zero_left, normalize_zero]))
fun ha : a ≠ 0 =>
suffices gcd (a * b) (a * c) = normalize (a * gcd b c) by simpa
let ⟨d, eq⟩ := dvd_gcd (dvd_mul_right a b) (dvd_mul_right a c)
gcd_eq_normalize
(eq.symm ▸ mul_dvd_mul_left a
(show d ∣ gcd b c from
dvd_gcd ((mul_dvd_mul_iff_left ha).1 <| eq ▸ gcd_dvd_left _ _)
((mul_dvd_mul_iff_left ha).1 <| eq ▸ gcd_dvd_right _ _)))
(dvd_gcd (mul_dvd_mul_left a <| gcd_dvd_left _ _) (mul_dvd_mul_left a <| gcd_dvd_right _ _))
#align gcd_mul_left gcd_mul_left
theorem gcd_mul_left' [GCDMonoid α] (a b c : α) :
Associated (gcd (a * b) (a * c)) (a * gcd b c) := by
obtain rfl | ha := eq_or_ne a 0
· simp only [zero_mul, gcd_zero_left']
obtain ⟨d, eq⟩ := dvd_gcd (dvd_mul_right a b) (dvd_mul_right a c)
apply associated_of_dvd_dvd
· rw [eq]
apply mul_dvd_mul_left
exact
dvd_gcd ((mul_dvd_mul_iff_left ha).1 <| eq ▸ gcd_dvd_left _ _)
((mul_dvd_mul_iff_left ha).1 <| eq ▸ gcd_dvd_right _ _)
· exact dvd_gcd (mul_dvd_mul_left a <| gcd_dvd_left _ _) (mul_dvd_mul_left a <| gcd_dvd_right _ _)
#align gcd_mul_left' gcd_mul_left'
@[simp]
theorem gcd_mul_right [NormalizedGCDMonoid α] (a b c : α) :
gcd (b * a) (c * a) = gcd b c * normalize a := by simp only [mul_comm, gcd_mul_left]
#align gcd_mul_right gcd_mul_right
@[simp]
theorem gcd_mul_right' [GCDMonoid α] (a b c : α) : Associated (gcd (b * a) (c * a)) (gcd b c * a) :=
by simp only [mul_comm, gcd_mul_left']
#align gcd_mul_right' gcd_mul_right'
theorem gcd_eq_left_iff [NormalizedGCDMonoid α] (a b : α) (h : normalize a = a) :
gcd a b = a ↔ a ∣ b :=
(Iff.intro fun eq => eq ▸ gcd_dvd_right _ _) fun hab =>
dvd_antisymm_of_normalize_eq (normalize_gcd _ _) h (gcd_dvd_left _ _) (dvd_gcd (dvd_refl a) hab)
#align gcd_eq_left_iff gcd_eq_left_iff
theorem gcd_eq_right_iff [NormalizedGCDMonoid α] (a b : α) (h : normalize b = b) :
gcd a b = b ↔ b ∣ a := by simpa only [gcd_comm a b] using gcd_eq_left_iff b a h
#align gcd_eq_right_iff gcd_eq_right_iff
theorem gcd_dvd_gcd_mul_left [GCDMonoid α] (m n k : α) : gcd m n ∣ gcd (k * m) n :=
gcd_dvd_gcd (dvd_mul_left _ _) dvd_rfl
#align gcd_dvd_gcd_mul_left gcd_dvd_gcd_mul_left
theorem gcd_dvd_gcd_mul_right [GCDMonoid α] (m n k : α) : gcd m n ∣ gcd (m * k) n :=
gcd_dvd_gcd (dvd_mul_right _ _) dvd_rfl
#align gcd_dvd_gcd_mul_right gcd_dvd_gcd_mul_right
theorem gcd_dvd_gcd_mul_left_right [GCDMonoid α] (m n k : α) : gcd m n ∣ gcd m (k * n) :=
gcd_dvd_gcd dvd_rfl (dvd_mul_left _ _)
#align gcd_dvd_gcd_mul_left_right gcd_dvd_gcd_mul_left_right
theorem gcd_dvd_gcd_mul_right_right [GCDMonoid α] (m n k : α) : gcd m n ∣ gcd m (n * k) :=
gcd_dvd_gcd dvd_rfl (dvd_mul_right _ _)
#align gcd_dvd_gcd_mul_right_right gcd_dvd_gcd_mul_right_right
theorem Associated.gcd_eq_left [NormalizedGCDMonoid α] {m n : α} (h : Associated m n) (k : α) :
gcd m k = gcd n k :=
dvd_antisymm_of_normalize_eq (normalize_gcd _ _) (normalize_gcd _ _) (gcd_dvd_gcd h.dvd dvd_rfl)
(gcd_dvd_gcd h.symm.dvd dvd_rfl)
#align associated.gcd_eq_left Associated.gcd_eq_left
theorem Associated.gcd_eq_right [NormalizedGCDMonoid α] {m n : α} (h : Associated m n) (k : α) :
gcd k m = gcd k n :=
dvd_antisymm_of_normalize_eq (normalize_gcd _ _) (normalize_gcd _ _) (gcd_dvd_gcd dvd_rfl h.dvd)
(gcd_dvd_gcd dvd_rfl h.symm.dvd)
#align associated.gcd_eq_right Associated.gcd_eq_right
theorem dvd_gcd_mul_of_dvd_mul [GCDMonoid α] {m n k : α} (H : k ∣ m * n) : k ∣ gcd k m * n :=
(dvd_gcd (dvd_mul_right _ n) H).trans (gcd_mul_right' n k m).dvd
#align dvd_gcd_mul_of_dvd_mul dvd_gcd_mul_of_dvd_mul
theorem dvd_mul_gcd_of_dvd_mul [GCDMonoid α] {m n k : α} (H : k ∣ m * n) : k ∣ m * gcd k n := by
rw [mul_comm] at H⊢
exact dvd_gcd_mul_of_dvd_mul H
#align dvd_mul_gcd_of_dvd_mul dvd_mul_gcd_of_dvd_mul
/-- Represent a divisor of `m * n` as a product of a divisor of `m` and a divisor of `n`.
In other words, the nonzero elements of a `GCDMonoid` form a decomposition monoid
(more widely known as a pre-Schreier domain in the context of rings).
Note: In general, this representation is highly non-unique.
See `Nat.prodDvdAndDvdOfDvdProd` for a constructive version on `ℕ`. -/
theorem exists_dvd_and_dvd_of_dvd_mul [GCDMonoid α] {m n k : α} (H : k ∣ m * n) :
∃ d₁ d₂, d₁ ∣ m ∧ d₂ ∣ n ∧ k = d₁ * d₂ := by
by_cases h0 : gcd k m = 0
· rw [gcd_eq_zero_iff] at h0
rcases h0 with ⟨rfl, rfl⟩
refine' ⟨0, n, dvd_refl 0, dvd_refl n, _⟩
simp
· obtain ⟨a, ha⟩ := gcd_dvd_left k m
refine' ⟨gcd k m, a, gcd_dvd_right _ _, _, ha⟩
suffices h : gcd k m * a ∣ gcd k m * n
· cases' h with b hb
use b
rw [mul_assoc] at hb
apply mul_left_cancel₀ h0 hb
rw [← ha]
exact dvd_gcd_mul_of_dvd_mul H
#align exists_dvd_and_dvd_of_dvd_mul exists_dvd_and_dvd_of_dvd_mul
theorem dvd_mul [GCDMonoid α] {k m n : α} : k ∣ m * n ↔ ∃ d₁ d₂, d₁ ∣ m ∧ d₂ ∣ n ∧ k = d₁ * d₂ := by
refine' ⟨exists_dvd_and_dvd_of_dvd_mul, _⟩
rintro ⟨d₁, d₂, hy, hz, rfl⟩
exact mul_dvd_mul hy hz
#align dvd_mul dvd_mul
theorem gcd_mul_dvd_mul_gcd [GCDMonoid α] (k m n : α) : gcd k (m * n) ∣ gcd k m * gcd k n := by
obtain ⟨m', n', hm', hn', h⟩ := exists_dvd_and_dvd_of_dvd_mul (gcd_dvd_right k (m * n))
replace h : gcd k (m * n) = m' * n' := h
rw [h]
have hm'n' : m' * n' ∣ k := h ▸ gcd_dvd_left _ _
apply mul_dvd_mul
· have hm'k : m' ∣ k := (dvd_mul_right m' n').trans hm'n'
exact dvd_gcd hm'k hm'
· have hn'k : n' ∣ k := (dvd_mul_left n' m').trans hm'n'
exact dvd_gcd hn'k hn'
#align gcd_mul_dvd_mul_gcd gcd_mul_dvd_mul_gcd
theorem gcd_pow_right_dvd_pow_gcd [GCDMonoid α] {a b : α} {k : ℕ} :
gcd a (b ^ k) ∣ gcd a b ^ k := by
by_cases hg : gcd a b = 0
· rw [gcd_eq_zero_iff] at hg
rcases hg with ⟨rfl, rfl⟩
exact
(gcd_zero_left' (0 ^ k : α)).dvd.trans
(pow_dvd_pow_of_dvd (gcd_zero_left' (0 : α)).symm.dvd _)
· induction' k with k hk
· rw [pow_zero, pow_zero]
exact (gcd_one_right' a).dvd
rw [pow_succ, pow_succ]
trans gcd a b * gcd a (b ^ k)
· exact gcd_mul_dvd_mul_gcd a b (b ^ k)
· exact (mul_dvd_mul_iff_left hg).mpr hk
#align gcd_pow_right_dvd_pow_gcd gcd_pow_right_dvd_pow_gcd
theorem gcd_pow_left_dvd_pow_gcd [GCDMonoid α] {a b : α} {k : ℕ} : gcd (a ^ k) b ∣ gcd a b ^ k :=
calc
gcd (a ^ k) b ∣ gcd b (a ^ k) := (gcd_comm' _ _).dvd
_ ∣ gcd b a ^ k := gcd_pow_right_dvd_pow_gcd
_ ∣ gcd a b ^ k := pow_dvd_pow_of_dvd (gcd_comm' _ _).dvd _
#align gcd_pow_left_dvd_pow_gcd gcd_pow_left_dvd_pow_gcd
theorem pow_dvd_of_mul_eq_pow [GCDMonoid α] {a b c d₁ d₂ : α} (ha : a ≠ 0) (hab : IsUnit (gcd a b))
{k : ℕ} (h : a * b = c ^ k) (hc : c = d₁ * d₂) (hd₁ : d₁ ∣ a) : d₁ ^ k ≠ 0 ∧ d₁ ^ k ∣ a := by
have h1 : IsUnit (gcd (d₁ ^ k) b) := by
apply isUnit_of_dvd_one
trans gcd d₁ b ^ k
· exact gcd_pow_left_dvd_pow_gcd
· apply IsUnit.dvd
apply IsUnit.pow
apply isUnit_of_dvd_one
apply dvd_trans _ hab.dvd
apply gcd_dvd_gcd hd₁ (dvd_refl b)
have h2 : d₁ ^ k ∣ a * b := by
use d₂ ^ k
rw [h, hc]
exact mul_pow d₁ d₂ k
rw [mul_comm] at h2
have h3 : d₁ ^ k ∣ a := by
apply (dvd_gcd_mul_of_dvd_mul h2).trans
rw [IsUnit.mul_left_dvd _ _ _ h1]
have h4 : d₁ ^ k ≠ 0 := by
intro hdk
rw [hdk] at h3
apply absurd (zero_dvd_iff.mp h3) ha
exact ⟨h4, h3⟩
#align pow_dvd_of_mul_eq_pow pow_dvd_of_mul_eq_pow
theorem exists_associated_pow_of_mul_eq_pow [GCDMonoid α] {a b c : α} (hab : IsUnit (gcd a b))
{k : ℕ} (h : a * b = c ^ k) : ∃ d : α, Associated (d ^ k) a := by
cases subsingleton_or_nontrivial α
· use 0
rw [Subsingleton.elim a (0 ^ k)]
by_cases ha : a = 0
· use 0
rw [ha]
obtain rfl | hk := k.eq_zero_or_pos
· exfalso
revert h
rw [ha, zero_mul, pow_zero]
apply zero_ne_one
· rw [zero_pow hk]
by_cases hb : b = 0
· use 1
rw [one_pow]
apply (associated_one_iff_isUnit.mpr hab).symm.trans
rw [hb]
exact gcd_zero_right' a
obtain rfl | hk := k.eq_zero_or_pos
· use 1
rw [pow_zero] at h⊢
use Units.mkOfMulEqOne _ _ h
rw [Units.val_mkOfMulEqOne, one_mul]
have hc : c ∣ a * b := by
rw [h]
exact dvd_pow_self _ hk.ne'
obtain ⟨d₁, d₂, hd₁, hd₂, hc⟩ := exists_dvd_and_dvd_of_dvd_mul hc
use d₁
obtain ⟨h0₁, ⟨a', ha'⟩⟩ := pow_dvd_of_mul_eq_pow ha hab h hc hd₁
rw [mul_comm] at h hc
rw [(gcd_comm' a b).isUnit_iff] at hab
obtain ⟨h0₂, ⟨b', hb'⟩⟩ := pow_dvd_of_mul_eq_pow hb hab h hc hd₂
rw [ha', hb', hc, mul_pow] at h
have h' : a' * b' = 1 := by
apply (mul_right_inj' h0₁).mp
rw [mul_one]
apply (mul_right_inj' h0₂).mp
rw [← h]
rw [mul_assoc, mul_comm a', ← mul_assoc _ b', ← mul_assoc b', mul_comm b']
use Units.mkOfMulEqOne _ _ h'
rw [Units.val_mkOfMulEqOne, ha']
#align exists_associated_pow_of_mul_eq_pow exists_associated_pow_of_mul_eq_pow
theorem exists_eq_pow_of_mul_eq_pow [GCDMonoid α] [Unique αˣ] {a b c : α} (hab : IsUnit (gcd a b))
{k : ℕ} (h : a * b = c ^ k) : ∃ d : α, a = d ^ k :=
let ⟨d, hd⟩ := exists_associated_pow_of_mul_eq_pow hab h
⟨d, (associated_iff_eq.mp hd).symm⟩
#align exists_eq_pow_of_mul_eq_pow exists_eq_pow_of_mul_eq_pow
theorem gcd_greatest {α : Type _} [CancelCommMonoidWithZero α] [NormalizedGCDMonoid α] {a b d : α}
(hda : d ∣ a) (hdb : d ∣ b) (hd : ∀ e : α, e ∣ a → e ∣ b → e ∣ d) :
GCDMonoid.gcd a b = normalize d :=
haveI h := hd _ (GCDMonoid.gcd_dvd_left a b) (GCDMonoid.gcd_dvd_right a b)
gcd_eq_normalize h (GCDMonoid.dvd_gcd hda hdb)
#align gcd_greatest gcd_greatest
theorem gcd_greatest_associated {α : Type _} [CancelCommMonoidWithZero α] [GCDMonoid α] {a b d : α}
(hda : d ∣ a) (hdb : d ∣ b) (hd : ∀ e : α, e ∣ a → e ∣ b → e ∣ d) :
Associated d (GCDMonoid.gcd a b) :=
haveI h := hd _ (GCDMonoid.gcd_dvd_left a b) (GCDMonoid.gcd_dvd_right a b)
associated_of_dvd_dvd (GCDMonoid.dvd_gcd hda hdb) h
#align gcd_greatest_associated gcd_greatest_associated
theorem isUnit_gcd_of_eq_mul_gcd {α : Type _} [CancelCommMonoidWithZero α] [GCDMonoid α]
{x y x' y' : α} (ex : x = gcd x y * x') (ey : y = gcd x y * y') (h : gcd x y ≠ 0) :
IsUnit (gcd x' y') := by
rw [← associated_one_iff_isUnit]
refine' Associated.of_mul_left _ (Associated.refl <| gcd x y) h
convert (gcd_mul_left' (gcd x y) x' y').symm using 1
rw [← ex, ← ey, mul_one]
#align is_unit_gcd_of_eq_mul_gcd isUnit_gcd_of_eq_mul_gcd
theorem extract_gcd {α : Type _} [CancelCommMonoidWithZero α] [GCDMonoid α] (x y : α) :
∃ x' y', x = gcd x y * x' ∧ y = gcd x y * y' ∧ IsUnit (gcd x' y') := by
by_cases h : gcd x y = 0
· obtain ⟨rfl, rfl⟩ := (gcd_eq_zero_iff x y).1 h
simp_rw [← associated_one_iff_isUnit]
exact ⟨1, 1, by rw [h, zero_mul], by rw [h, zero_mul], gcd_one_left' 1⟩
obtain ⟨x', ex⟩ := gcd_dvd_left x y
obtain ⟨y', ey⟩ := gcd_dvd_right x y
exact ⟨x', y', ex, ey, isUnit_gcd_of_eq_mul_gcd ex ey h⟩
#align extract_gcd extract_gcd
end GCD
section LCM
theorem lcm_dvd_iff [GCDMonoid α] {a b c : α} : lcm a b ∣ c ↔ a ∣ c ∧ b ∣ c := by
by_cases h : a = 0 ∨ b = 0
· rcases h with (rfl | rfl) <;>
simp (config := { contextual := true }) only [iff_def, lcm_zero_left, lcm_zero_right,
zero_dvd_iff, dvd_zero, eq_self_iff_true, and_true_iff, imp_true_iff]
· obtain ⟨h1, h2⟩ := not_or.1 h
have h : gcd a b ≠ 0 := fun H => h1 ((gcd_eq_zero_iff _ _).1 H).1
rw [← mul_dvd_mul_iff_left h, (gcd_mul_lcm a b).dvd_iff_dvd_left, ←
(gcd_mul_right' c a b).dvd_iff_dvd_right, dvd_gcd_iff, mul_comm b c, mul_dvd_mul_iff_left h1,
mul_dvd_mul_iff_right h2, and_comm]
#align lcm_dvd_iff lcm_dvd_iff
theorem dvd_lcm_left [GCDMonoid α] (a b : α) : a ∣ lcm a b :=
(lcm_dvd_iff.1 (dvd_refl (lcm a b))).1
#align dvd_lcm_left dvd_lcm_left
theorem dvd_lcm_right [GCDMonoid α] (a b : α) : b ∣ lcm a b :=
(lcm_dvd_iff.1 (dvd_refl (lcm a b))).2
#align dvd_lcm_right dvd_lcm_right
theorem lcm_dvd [GCDMonoid α] {a b c : α} (hab : a ∣ b) (hcb : c ∣ b) : lcm a c ∣ b :=
lcm_dvd_iff.2 ⟨hab, hcb⟩
#align lcm_dvd lcm_dvd
@[simp]
theorem lcm_eq_zero_iff [GCDMonoid α] (a b : α) : lcm a b = 0 ↔ a = 0 ∨ b = 0 :=
Iff.intro
(fun h : lcm a b = 0 => by
have : Associated (a * b) 0 := (gcd_mul_lcm a b).symm.trans <| by rw [h, mul_zero]
rwa [← mul_eq_zero, ← associated_zero_iff_eq_zero])
(by rintro (rfl | rfl) <;> [apply lcm_zero_left, apply lcm_zero_right])
#align lcm_eq_zero_iff lcm_eq_zero_iff
-- Porting note: lower priority to avoid linter complaints about simp-normal form
@[simp 1100]
theorem normalize_lcm [NormalizedGCDMonoid α] (a b : α) :
normalize (lcm a b) = lcm a b :=
NormalizedGCDMonoid.normalize_lcm a b
#align normalize_lcm normalize_lcm
theorem lcm_comm [NormalizedGCDMonoid α] (a b : α) : lcm a b = lcm b a :=
dvd_antisymm_of_normalize_eq (normalize_lcm _ _) (normalize_lcm _ _)
(lcm_dvd (dvd_lcm_right _ _) (dvd_lcm_left _ _))
(lcm_dvd (dvd_lcm_right _ _) (dvd_lcm_left _ _))
#align lcm_comm lcm_comm
theorem lcm_comm' [GCDMonoid α] (a b : α) : Associated (lcm a b) (lcm b a) :=
associated_of_dvd_dvd (lcm_dvd (dvd_lcm_right _ _) (dvd_lcm_left _ _))
(lcm_dvd (dvd_lcm_right _ _) (dvd_lcm_left _ _))
#align lcm_comm' lcm_comm'
theorem lcm_assoc [NormalizedGCDMonoid α] (m n k : α) : lcm (lcm m n) k = lcm m (lcm n k) :=
dvd_antisymm_of_normalize_eq (normalize_lcm _ _) (normalize_lcm _ _)
(lcm_dvd (lcm_dvd (dvd_lcm_left _ _) ((dvd_lcm_left _ _).trans (dvd_lcm_right _ _)))
((dvd_lcm_right _ _).trans (dvd_lcm_right _ _)))
(lcm_dvd ((dvd_lcm_left _ _).trans (dvd_lcm_left _ _))
(lcm_dvd ((dvd_lcm_right _ _).trans (dvd_lcm_left _ _)) (dvd_lcm_right _ _)))
#align lcm_assoc lcm_assoc
theorem lcm_assoc' [GCDMonoid α] (m n k : α) : Associated (lcm (lcm m n) k) (lcm m (lcm n k)) :=
associated_of_dvd_dvd
(lcm_dvd (lcm_dvd (dvd_lcm_left _ _) ((dvd_lcm_left _ _).trans (dvd_lcm_right _ _)))
((dvd_lcm_right _ _).trans (dvd_lcm_right _ _)))
(lcm_dvd ((dvd_lcm_left _ _).trans (dvd_lcm_left _ _))
(lcm_dvd ((dvd_lcm_right _ _).trans (dvd_lcm_left _ _)) (dvd_lcm_right _ _)))
#align lcm_assoc' lcm_assoc'
instance [NormalizedGCDMonoid α] : IsCommutative α lcm :=
⟨lcm_comm⟩
instance [NormalizedGCDMonoid α] : IsAssociative α lcm :=
⟨lcm_assoc⟩
theorem lcm_eq_normalize [NormalizedGCDMonoid α] {a b c : α} (habc : lcm a b ∣ c)
(hcab : c ∣ lcm a b) : lcm a b = normalize c :=
normalize_lcm a b ▸ normalize_eq_normalize habc hcab
#align lcm_eq_normalize lcm_eq_normalize
theorem lcm_dvd_lcm [GCDMonoid α] {a b c d : α} (hab : a ∣ b) (hcd : c ∣ d) : lcm a c ∣ lcm b d :=
lcm_dvd (hab.trans (dvd_lcm_left _ _)) (hcd.trans (dvd_lcm_right _ _))
#align lcm_dvd_lcm lcm_dvd_lcm
@[simp]
theorem lcm_units_coe_left [NormalizedGCDMonoid α] (u : αˣ) (a : α) : lcm (↑u) a = normalize a :=
lcm_eq_normalize (lcm_dvd Units.coe_dvd dvd_rfl) (dvd_lcm_right _ _)
#align lcm_units_coe_left lcm_units_coe_left
@[simp]
theorem lcm_units_coe_right [NormalizedGCDMonoid α] (a : α) (u : αˣ) : lcm a ↑u = normalize a :=
(lcm_comm a u).trans <| lcm_units_coe_left _ _
#align lcm_units_coe_right lcm_units_coe_right
@[simp]
theorem lcm_one_left [NormalizedGCDMonoid α] (a : α) : lcm 1 a = normalize a :=
lcm_units_coe_left 1 a
#align lcm_one_left lcm_one_left
@[simp]
theorem lcm_one_right [NormalizedGCDMonoid α] (a : α) : lcm a 1 = normalize a :=
lcm_units_coe_right a 1
#align lcm_one_right lcm_one_right
@[simp]
theorem lcm_same [NormalizedGCDMonoid α] (a : α) : lcm a a = normalize a :=
lcm_eq_normalize (lcm_dvd dvd_rfl dvd_rfl) (dvd_lcm_left _ _)
#align lcm_same lcm_same
@[simp]
theorem lcm_eq_one_iff [NormalizedGCDMonoid α] (a b : α) : lcm a b = 1 ↔ a ∣ 1 ∧ b ∣ 1 :=
Iff.intro (fun eq => eq ▸ ⟨dvd_lcm_left _ _, dvd_lcm_right _ _⟩) fun ⟨⟨c, hc⟩, ⟨d, hd⟩⟩ =>
show lcm (Units.mkOfMulEqOne a c hc.symm : α) (Units.mkOfMulEqOne b d hd.symm) = 1 by
rw [lcm_units_coe_left, normalize_coe_units]
#align lcm_eq_one_iff lcm_eq_one_iff
@[simp]
theorem lcm_mul_left [NormalizedGCDMonoid α] (a b c : α) :
lcm (a * b) (a * c) = normalize a * lcm b c :=
(by_cases (by rintro rfl; simp only [zero_mul, lcm_zero_left, normalize_zero]))
fun ha : a ≠ 0 =>
suffices lcm (a * b) (a * c) = normalize (a * lcm b c) by simpa
have : a ∣ lcm (a * b) (a * c) := (dvd_mul_right _ _).trans (dvd_lcm_left _ _)
let ⟨d, eq⟩ := this
lcm_eq_normalize
(lcm_dvd (mul_dvd_mul_left a (dvd_lcm_left _ _)) (mul_dvd_mul_left a (dvd_lcm_right _ _)))
(eq.symm ▸
(mul_dvd_mul_left a <|
lcm_dvd ((mul_dvd_mul_iff_left ha).1 <| eq ▸ dvd_lcm_left _ _)
((mul_dvd_mul_iff_left ha).1 <| eq ▸ dvd_lcm_right _ _)))
#align lcm_mul_left lcm_mul_left
@[simp]
theorem lcm_mul_right [NormalizedGCDMonoid α] (a b c : α) :
lcm (b * a) (c * a) = lcm b c * normalize a := by simp only [mul_comm, lcm_mul_left]
#align lcm_mul_right lcm_mul_right
theorem lcm_eq_left_iff [NormalizedGCDMonoid α] (a b : α) (h : normalize a = a) :
lcm a b = a ↔ b ∣ a :=
(Iff.intro fun eq => eq ▸ dvd_lcm_right _ _) fun hab =>
dvd_antisymm_of_normalize_eq (normalize_lcm _ _) h (lcm_dvd (dvd_refl a) hab) (dvd_lcm_left _ _)
#align lcm_eq_left_iff lcm_eq_left_iff
theorem lcm_eq_right_iff [NormalizedGCDMonoid α] (a b : α) (h : normalize b = b) :
lcm a b = b ↔ a ∣ b := by simpa only [lcm_comm b a] using lcm_eq_left_iff b a h
#align lcm_eq_right_iff lcm_eq_right_iff
theorem lcm_dvd_lcm_mul_left [GCDMonoid α] (m n k : α) : lcm m n ∣ lcm (k * m) n :=
lcm_dvd_lcm (dvd_mul_left _ _) dvd_rfl
#align lcm_dvd_lcm_mul_left lcm_dvd_lcm_mul_left
theorem lcm_dvd_lcm_mul_right [GCDMonoid α] (m n k : α) : lcm m n ∣ lcm (m * k) n :=
lcm_dvd_lcm (dvd_mul_right _ _) dvd_rfl
#align lcm_dvd_lcm_mul_right lcm_dvd_lcm_mul_right
theorem lcm_dvd_lcm_mul_left_right [GCDMonoid α] (m n k : α) : lcm m n ∣ lcm m (k * n) :=
lcm_dvd_lcm dvd_rfl (dvd_mul_left _ _)
#align lcm_dvd_lcm_mul_left_right lcm_dvd_lcm_mul_left_right
theorem lcm_dvd_lcm_mul_right_right [GCDMonoid α] (m n k : α) : lcm m n ∣ lcm m (n * k) :=
lcm_dvd_lcm dvd_rfl (dvd_mul_right _ _)
#align lcm_dvd_lcm_mul_right_right lcm_dvd_lcm_mul_right_right
theorem lcm_eq_of_associated_left [NormalizedGCDMonoid α] {m n : α} (h : Associated m n) (k : α) :
lcm m k = lcm n k :=
dvd_antisymm_of_normalize_eq (normalize_lcm _ _) (normalize_lcm _ _) (lcm_dvd_lcm h.dvd dvd_rfl)
(lcm_dvd_lcm h.symm.dvd dvd_rfl)
#align lcm_eq_of_associated_left lcm_eq_of_associated_left
theorem lcm_eq_of_associated_right [NormalizedGCDMonoid α] {m n : α} (h : Associated m n) (k : α) :
lcm k m = lcm k n :=
dvd_antisymm_of_normalize_eq (normalize_lcm _ _) (normalize_lcm _ _) (lcm_dvd_lcm dvd_rfl h.dvd)
(lcm_dvd_lcm dvd_rfl h.symm.dvd)
#align lcm_eq_of_associated_right lcm_eq_of_associated_right
end LCM
namespace GCDMonoid
theorem prime_of_irreducible [GCDMonoid α] {x : α} (hi : Irreducible x) : Prime x :=
⟨hi.ne_zero,
⟨hi.1, fun a b h => by
cases' gcd_dvd_left x a with y hy
cases' hi.isUnit_or_isUnit hy with hu hu
· right
trans gcd (x * b) (a * b)
apply dvd_gcd (dvd_mul_right x b) h
rw [(gcd_mul_right' b x a).dvd_iff_dvd_left]
exact (associated_unit_mul_left _ _ hu).dvd
· left
rw [hy]
exact dvd_trans (associated_mul_unit_left _ _ hu).dvd (gcd_dvd_right x a)⟩⟩
#align gcd_monoid.prime_of_irreducible GCDMonoid.prime_of_irreducible
theorem irreducible_iff_prime [GCDMonoid α] {p : α} : Irreducible p ↔ Prime p :=
⟨prime_of_irreducible, Prime.irreducible⟩
#align gcd_monoid.irreducible_iff_prime GCDMonoid.irreducible_iff_prime
end GCDMonoid
end GCDMonoid
section UniqueUnit
variable [CancelCommMonoidWithZero α] [Unique αˣ]
-- see Note [lower instance priority]
instance (priority := 100) normalizationMonoidOfUniqueUnits : NormalizationMonoid α where
normUnit _ := 1
normUnit_zero := rfl
normUnit_mul _ _ := (mul_one 1).symm
normUnit_coe_units _ := Subsingleton.elim _ _
#align normalization_monoid_of_unique_units normalizationMonoidOfUniqueUnits
instance uniqueNormalizationMonoidOfUniqueUnits : Unique (NormalizationMonoid α) where
default := normalizationMonoidOfUniqueUnits
uniq := fun ⟨u, _, _, _⟩ => by congr; simp
#align unique_normalization_monoid_of_unique_units uniqueNormalizationMonoidOfUniqueUnits
instance subsingleton_gcdMonoid_of_unique_units : Subsingleton (GCDMonoid α) :=
⟨fun g₁ g₂ => by
have hgcd : g₁.gcd = g₂.gcd := by
ext (a b)
refine' associated_iff_eq.mp (associated_of_dvd_dvd _ _)
-- Porting note: Lean4 seems to need help specifying `g₁` and `g₂`
· exact dvd_gcd (@gcd_dvd_left _ _ g₁ _ _) (@gcd_dvd_right _ _ g₁ _ _)
· exact @dvd_gcd _ _ g₁ _ _ _ (@gcd_dvd_left _ _ g₂ _ _) (@gcd_dvd_right _ _ g₂ _ _)
have hlcm : g₁.lcm = g₂.lcm := by
ext (a b)
-- Porting note: Lean4 seems to need help specifying `g₁` and `g₂`
refine' associated_iff_eq.mp (associated_of_dvd_dvd _ _)
· exact (@lcm_dvd_iff _ _ g₁ ..).mpr ⟨@dvd_lcm_left _ _ g₂ _ _, @dvd_lcm_right _ _ g₂ _ _⟩
· exact lcm_dvd_iff.mpr ⟨@dvd_lcm_left _ _ g₁ _ _, @dvd_lcm_right _ _ g₁ _ _⟩
cases g₁
cases g₂
dsimp only at hgcd hlcm
simp only [hgcd, hlcm]⟩
#align subsingleton_gcd_monoid_of_unique_units subsingleton_gcdMonoid_of_unique_units
instance subsingleton_normalizedGCDMonoid_of_unique_units : Subsingleton (NormalizedGCDMonoid α) :=
⟨by
intro a b
cases' a with a_norm a_gcd
cases' b with b_norm b_gcd
have := Subsingleton.elim a_gcd b_gcd
subst this
have := Subsingleton.elim a_norm b_norm
subst this
rfl⟩
#align subsingleton_normalized_gcd_monoid_of_unique_units subsingleton_normalizedGCDMonoid_of_unique_units
@[simp]
theorem normUnit_eq_one (x : α) : normUnit x = 1 :=
rfl
#align norm_unit_eq_one normUnit_eq_one
-- Porting note: `simp` can prove this
-- @[simp]
theorem normalize_eq (x : α) : normalize x = x :=
mul_one x
#align normalize_eq normalize_eq
/-- If a monoid's only unit is `1`, then it is isomorphic to its associates. -/
@[simps]
def associatesEquivOfUniqueUnits : Associates α ≃* α where
toFun := Associates.out
invFun := Associates.mk
left_inv := Associates.mk_out
right_inv _ := (Associates.out_mk _).trans <| normalize_eq _
map_mul' := Associates.out_mul
#align associates_equiv_of_unique_units associatesEquivOfUniqueUnits
#align associates_equiv_of_unique_units_symm_apply associatesEquivOfUniqueUnits_symm_apply
#align associates_equiv_of_unique_units_apply associatesEquivOfUniqueUnits_apply
end UniqueUnit
section IsDomain
variable [CommRing α] [IsDomain α] [NormalizedGCDMonoid α]
theorem gcd_eq_of_dvd_sub_right {a b c : α} (h : a ∣ b - c) : gcd a b = gcd a c := by
apply dvd_antisymm_of_normalize_eq (normalize_gcd _ _) (normalize_gcd _ _) <;>
rw [dvd_gcd_iff] <;>
refine' ⟨gcd_dvd_left _ _, _⟩
· rcases h with ⟨d, hd⟩
rcases gcd_dvd_right a b with ⟨e, he⟩
rcases gcd_dvd_left a b with ⟨f, hf⟩
use e - f * d
rw [mul_sub, ← he, ← mul_assoc, ← hf, ← hd, sub_sub_cancel]
· rcases h with ⟨d, hd⟩
rcases gcd_dvd_right a c with ⟨e, he⟩
rcases gcd_dvd_left a c with ⟨f, hf⟩
use e + f * d
rw [mul_add, ← he, ← mul_assoc, ← hf, ← hd, ← add_sub_assoc, add_comm c b, add_sub_cancel]
#align gcd_eq_of_dvd_sub_right gcd_eq_of_dvd_sub_right
theorem gcd_eq_of_dvd_sub_left {a b c : α} (h : a ∣ b - c) : gcd b a = gcd c a := by
rw [gcd_comm _ a, gcd_comm _ a, gcd_eq_of_dvd_sub_right h]
#align gcd_eq_of_dvd_sub_left gcd_eq_of_dvd_sub_left
end IsDomain
noncomputable section Constructors
open Associates
variable [CancelCommMonoidWithZero α]
private theorem map_mk_unit_aux [DecidableEq α] {f : Associates α →* α}
(hinv : Function.RightInverse f Associates.mk) (a : α) :
a * ↑(Classical.choose (associated_map_mk hinv a)) = f (Associates.mk a) :=
Classical.choose_spec (associated_map_mk hinv a)
/-- Define `NormalizationMonoid` on a structure from a `MonoidHom` inverse to `Associates.mk`. -/
def normalizationMonoidOfMonoidHomRightInverse [DecidableEq α] (f : Associates α →* α)
(hinv : Function.RightInverse f Associates.mk) :
NormalizationMonoid α where
normUnit a :=
if a = 0 then 1
else Classical.choose (Associates.mk_eq_mk_iff_associated.1 (hinv (Associates.mk a)).symm)
normUnit_zero := if_pos rfl
normUnit_mul {a b} ha hb := by
simp_rw [if_neg (mul_ne_zero ha hb), if_neg ha, if_neg hb, Units.ext_iff, Units.val_mul]
suffices
a * b * ↑(Classical.choose (associated_map_mk hinv (a * b))) =
a * ↑(Classical.choose (associated_map_mk hinv a)) *
(b * ↑(Classical.choose (associated_map_mk hinv b)))
by
apply mul_left_cancel₀ (mul_ne_zero ha hb) _
-- Porting note: original `simpa` fails with `unexpected bound variable #1`
-- simpa only [mul_assoc, mul_comm, mul_left_comm] using this
rw [this, mul_assoc, ← mul_assoc _ b, mul_comm _ b, ← mul_assoc, ← mul_assoc,
mul_assoc (a * b)]
rw [map_mk_unit_aux hinv a, map_mk_unit_aux hinv (a * b), map_mk_unit_aux hinv b, ←
MonoidHom.map_mul, Associates.mk_mul_mk]
normUnit_coe_units u := by
nontriviality α
simp_rw [if_neg (Units.ne_zero u), Units.ext_iff]
apply mul_left_cancel₀ (Units.ne_zero u)
rw [Units.mul_inv, map_mk_unit_aux hinv u,
Associates.mk_eq_mk_iff_associated.2 (associated_one_iff_isUnit.2 ⟨u, rfl⟩),
Associates.mk_one, MonoidHom.map_one]
#align normalization_monoid_of_monoid_hom_right_inverse normalizationMonoidOfMonoidHomRightInverse
/-- Define `GCDMonoid` on a structure just from the `gcd` and its properties. -/
noncomputable def gcdMonoidOfGCD [DecidableEq α] (gcd : α → α → α)
(gcd_dvd_left : ∀ a b, gcd a b ∣ a) (gcd_dvd_right : ∀ a b, gcd a b ∣ b)
(dvd_gcd : ∀ {a b c}, a ∣ c → a ∣ b → a ∣ gcd c b) : GCDMonoid α :=
{ gcd
gcd_dvd_left
gcd_dvd_right
dvd_gcd := fun {a b c} => dvd_gcd
lcm := fun a b =>
if a = 0 then 0 else Classical.choose ((gcd_dvd_left a b).trans (Dvd.intro b rfl))
gcd_mul_lcm := fun a b => by
-- Porting note: need `dsimp only` before `split_ifs`
dsimp only
split_ifs with a0
· rw [mul_zero, a0, zero_mul]
· rw [← Classical.choose_spec ((gcd_dvd_left a b).trans (Dvd.intro b rfl))]
lcm_zero_left := fun a => if_pos rfl
lcm_zero_right := fun a => by
-- Porting note: need `dsimp only` before `split_ifs`
dsimp only
split_ifs with a0
· rfl
have h := (Classical.choose_spec ((gcd_dvd_left a 0).trans (Dvd.intro 0 rfl))).symm
have a0' : gcd a 0 ≠ 0 := by
contrapose! a0
rw [← associated_zero_iff_eq_zero, ← a0]
exact associated_of_dvd_dvd (dvd_gcd (dvd_refl a) (dvd_zero a)) (gcd_dvd_left _ _)
apply Or.resolve_left (mul_eq_zero.1 _) a0'
rw [h, mul_zero] }
#align gcd_monoid_of_gcd gcdMonoidOfGCD
/-- Define `NormalizedGCDMonoid` on a structure just from the `gcd` and its properties. -/
noncomputable def normalizedGCDMonoidOfGCD [NormalizationMonoid α] [DecidableEq α] (gcd : α → α → α)
(gcd_dvd_left : ∀ a b, gcd a b ∣ a) (gcd_dvd_right : ∀ a b, gcd a b ∣ b)
(dvd_gcd : ∀ {a b c}, a ∣ c → a ∣ b → a ∣ gcd c b)
(normalize_gcd : ∀ a b, normalize (gcd a b) = gcd a b) : NormalizedGCDMonoid α :=
{ (inferInstance : NormalizationMonoid α) with
gcd
gcd_dvd_left
gcd_dvd_right
dvd_gcd := fun {a b c} => dvd_gcd
normalize_gcd
lcm := fun a b =>
if a = 0 then 0
else Classical.choose (dvd_normalize_iff.2 ((gcd_dvd_left a b).trans (Dvd.intro b rfl)))
normalize_lcm := fun a b => by
dsimp [normalize]
split_ifs with a0
· exact @normalize_zero α _ _
· have := (Classical.choose_spec
(dvd_normalize_iff.2 ((gcd_dvd_left a b).trans (Dvd.intro b rfl)))).symm
set l := Classical.choose (dvd_normalize_iff.2 ((gcd_dvd_left a b).trans (Dvd.intro b rfl)))
obtain rfl | hb := eq_or_ne b 0
-- Porting note: using `simp only` causes the propositions inside `Classical.choose` to
-- differ, so `set` is unable to produce `l = 0` inside `this`. See
-- https://leanprover.zulipchat.com/#narrow/stream/287929-mathlib4/topic/
-- Classical.2Echoose/near/317491179
· rw [mul_zero a, normalize_zero, mul_eq_zero] at this
obtain ha | hl := this
· apply (a0 _).elim
rw [← zero_dvd_iff, ← ha]
exact gcd_dvd_left _ _
· rw [hl, zero_mul]
have h1 : gcd a b ≠ 0 := by
have hab : a * b ≠ 0 := mul_ne_zero a0 hb
contrapose! hab
push_neg at hab
rw [← normalize_eq_zero, ← this, hab, zero_mul]
have h2 : normalize (gcd a b * l) = gcd a b * l := by rw [this, normalize_idem]
rw [← normalize_gcd] at this
rwa [normalize.map_mul, normalize_gcd, mul_right_inj' h1] at h2
gcd_mul_lcm := fun a b => by
-- Porting note: need `dsimp only`
dsimp only
split_ifs with a0
· rw [mul_zero, a0, zero_mul]
· rw [←
Classical.choose_spec (dvd_normalize_iff.2 ((gcd_dvd_left a b).trans (Dvd.intro b rfl)))]
exact normalize_associated (a * b)
lcm_zero_left := fun a => if_pos rfl
lcm_zero_right := fun a => by
-- Porting note: need `dsimp only`
dsimp only
split_ifs with a0
· rfl
rw [← normalize_eq_zero] at a0
have h :=
(Classical.choose_spec
(dvd_normalize_iff.2 ((gcd_dvd_left a 0).trans (Dvd.intro 0 rfl)))).symm
have gcd0 : gcd a 0 = normalize a := by
rw [← normalize_gcd]
exact normalize_eq_normalize (gcd_dvd_left _ _) (dvd_gcd (dvd_refl a) (dvd_zero a))
rw [← gcd0] at a0
apply Or.resolve_left (mul_eq_zero.1 _) a0
rw [h, mul_zero, normalize_zero] }
#align normalized_gcd_monoid_of_gcd normalizedGCDMonoidOfGCD
/-- Define `GCDMonoid` on a structure just from the `lcm` and its properties. -/
noncomputable def gcdMonoidOfLCM [DecidableEq α] (lcm : α → α → α)
(dvd_lcm_left : ∀ a b, a ∣ lcm a b) (dvd_lcm_right : ∀ a b, b ∣ lcm a b)
(lcm_dvd : ∀ {a b c}, c ∣ a → b ∣ a → lcm c b ∣ a) : GCDMonoid α :=
let exists_gcd a b := lcm_dvd (Dvd.intro b rfl) (Dvd.intro_left a rfl)
{ lcm
gcd := fun a b => if a = 0 then b else if b = 0 then a else Classical.choose (exists_gcd a b)
gcd_mul_lcm := fun a b => by
-- Porting note: need `dsimp only`
dsimp only
split_ifs with h h_1
· rw [h, eq_zero_of_zero_dvd (dvd_lcm_left _ _), mul_zero, zero_mul]
· rw [h_1, eq_zero_of_zero_dvd (dvd_lcm_right _ _), mul_zero]
rw [mul_comm, ← Classical.choose_spec (exists_gcd a b)]
lcm_zero_left := fun a => eq_zero_of_zero_dvd (dvd_lcm_left _ _)
lcm_zero_right := fun a => eq_zero_of_zero_dvd (dvd_lcm_right _ _)
gcd_dvd_left := fun a b => by
-- Porting note: need `dsimp only`
dsimp only
split_ifs with h h_1
· rw [h]
apply dvd_zero
· exact dvd_rfl
have h0 : lcm a b ≠ 0 := by
intro con
have h := lcm_dvd (Dvd.intro b rfl) (Dvd.intro_left a rfl)
rw [con, zero_dvd_iff, mul_eq_zero] at h
cases h
· exact absurd ‹a = 0› h
· exact absurd ‹b = 0› h_1
rw [← mul_dvd_mul_iff_left h0, ← Classical.choose_spec (exists_gcd a b), mul_comm,
mul_dvd_mul_iff_right h]
apply dvd_lcm_right
gcd_dvd_right := fun a b => by
-- Porting note: need `dsimp only`
dsimp only
split_ifs with h h_1
· exact dvd_rfl
· rw [h_1]
apply dvd_zero
have h0 : lcm a b ≠ 0 := by
intro con
have h := lcm_dvd (Dvd.intro b rfl) (Dvd.intro_left a rfl)
rw [con, zero_dvd_iff, mul_eq_zero] at h
cases h
· exact absurd ‹a = 0› h
· exact absurd ‹b = 0› h_1
rw [← mul_dvd_mul_iff_left h0, ← Classical.choose_spec (exists_gcd a b),
mul_dvd_mul_iff_right h_1]
apply dvd_lcm_left
dvd_gcd := fun {a b c} ac ab => by
-- Porting note: need `dsimp only`
dsimp only
split_ifs with h h_1
· exact ab
· exact ac
have h0 : lcm c b ≠ 0 := by
intro con
have h := lcm_dvd (Dvd.intro b rfl) (Dvd.intro_left c rfl)
rw [con, zero_dvd_iff, mul_eq_zero] at h
cases h
· exact absurd ‹c = 0› h
· exact absurd ‹b = 0› h_1
rw [← mul_dvd_mul_iff_left h0, ← Classical.choose_spec (exists_gcd c b)]
rcases ab with ⟨d, rfl⟩
rw [mul_eq_zero] at ‹a * d ≠ 0›
push_neg at h_1
rw [mul_comm a, ← mul_assoc, mul_dvd_mul_iff_right h_1.1]
apply lcm_dvd (Dvd.intro d rfl)
rw [mul_comm, mul_dvd_mul_iff_right h_1.2]
apply ac }
#align gcd_monoid_of_lcm gcdMonoidOfLCM
-- Porting note: very slow; improve performance?
/-- Define `NormalizedGCDMonoid` on a structure just from the `lcm` and its properties. -/
noncomputable def normalizedGCDMonoidOfLCM [NormalizationMonoid α] [DecidableEq α] (lcm : α → α → α)
(dvd_lcm_left : ∀ a b, a ∣ lcm a b) (dvd_lcm_right : ∀ a b, b ∣ lcm a b)
(lcm_dvd : ∀ {a b c}, c ∣ a → b ∣ a → lcm c b ∣ a)
(normalize_lcm : ∀ a b, normalize (lcm a b) = lcm a b) : NormalizedGCDMonoid α :=
let exists_gcd a b := dvd_normalize_iff.2 (lcm_dvd (Dvd.intro b rfl) (Dvd.intro_left a rfl))
{ (inferInstance : NormalizationMonoid α) with
lcm
gcd := fun a b =>
if a = 0 then normalize b
else if b = 0 then normalize a else Classical.choose (exists_gcd a b)
gcd_mul_lcm := fun a b => by
dsimp only
split_ifs with h h_1
· rw [h, eq_zero_of_zero_dvd (dvd_lcm_left _ _), mul_zero, zero_mul]
· rw [h_1, eq_zero_of_zero_dvd (dvd_lcm_right _ _), mul_zero, mul_zero]
rw [mul_comm, ← Classical.choose_spec (exists_gcd a b)]
exact normalize_associated (a * b)
normalize_lcm
normalize_gcd := fun a b => by
dsimp [normalize]
split_ifs with h h_1
· apply normalize_idem
· apply normalize_idem
have h0 : lcm a b ≠ 0 := by
intro con
have h := lcm_dvd (Dvd.intro b rfl) (Dvd.intro_left a rfl)
rw [con, zero_dvd_iff, mul_eq_zero] at h
cases h
· exact absurd ‹a = 0› h
· exact absurd ‹b = 0› h_1
apply mul_left_cancel₀ h0
refine' _root_.trans _ (Classical.choose_spec (exists_gcd a b))
conv_lhs =>
congr
rw [← normalize_lcm a b]
erw [← normalize.map_mul, ← Classical.choose_spec (exists_gcd a b), normalize_idem]
lcm_zero_left := fun a => eq_zero_of_zero_dvd (dvd_lcm_left _ _)
lcm_zero_right := fun a => eq_zero_of_zero_dvd (dvd_lcm_right _ _)
gcd_dvd_left := fun a b => by
dsimp only
split_ifs with h h_1
· rw [h]
apply dvd_zero
· exact (normalize_associated _).dvd
have h0 : lcm a b ≠ 0 := by
intro con
have h := lcm_dvd (Dvd.intro b rfl) (Dvd.intro_left a rfl)
rw [con, zero_dvd_iff, mul_eq_zero] at h
cases h
· exact absurd ‹a = 0› h
· exact absurd ‹b = 0› h_1
rw [← mul_dvd_mul_iff_left h0, ← Classical.choose_spec (exists_gcd a b), normalize_dvd_iff,
mul_comm, mul_dvd_mul_iff_right h]
apply dvd_lcm_right
gcd_dvd_right := fun a b => by
dsimp only
split_ifs with h h_1
· exact (normalize_associated _).dvd
· rw [h_1]
apply dvd_zero
have h0 : lcm a b ≠ 0 := by
intro con
have h := lcm_dvd (Dvd.intro b rfl) (Dvd.intro_left a rfl)
rw [con, zero_dvd_iff, mul_eq_zero] at h
cases h
· exact absurd ‹a = 0› h
· exact absurd ‹b = 0› h_1
rw [← mul_dvd_mul_iff_left h0, ← Classical.choose_spec (exists_gcd a b), normalize_dvd_iff,
mul_dvd_mul_iff_right h_1]
apply dvd_lcm_left
dvd_gcd := fun {a b c} ac ab => by
dsimp only
split_ifs with h h_1
· apply dvd_normalize_iff.2 ab
· apply dvd_normalize_iff.2 ac
have h0 : lcm c b ≠ 0 := by
intro con
have h := lcm_dvd (Dvd.intro b rfl) (Dvd.intro_left c rfl)
rw [con, zero_dvd_iff, mul_eq_zero] at h
cases h
· exact absurd ‹c = 0› h
· exact absurd ‹b = 0› h_1
rw [← mul_dvd_mul_iff_left h0, ←
Classical.choose_spec
(dvd_normalize_iff.2 (lcm_dvd (Dvd.intro b rfl) (Dvd.intro_left c rfl))),
dvd_normalize_iff]
rcases ab with ⟨d, rfl⟩
rw [mul_eq_zero] at h_1
push_neg at h_1
rw [mul_comm a, ← mul_assoc, mul_dvd_mul_iff_right h_1.1]
apply lcm_dvd (Dvd.intro d rfl)
rw [mul_comm, mul_dvd_mul_iff_right h_1.2]
apply ac }
#align normalized_gcd_monoid_of_lcm normalizedGCDMonoidOfLCM
/-- Define a `GCDMonoid` structure on a monoid just from the existence of a `gcd`. -/
noncomputable def gcdMonoidOfExistsGCD [DecidableEq α]
(h : ∀ a b : α, ∃ c : α, ∀ d : α, d ∣ a ∧ d ∣ b ↔ d ∣ c) : GCDMonoid α :=
gcdMonoidOfGCD (fun a b => Classical.choose (h a b))
(fun a b => ((Classical.choose_spec (h a b) (Classical.choose (h a b))).2 dvd_rfl).1)
(fun a b => ((Classical.choose_spec (h a b) (Classical.choose (h a b))).2 dvd_rfl).2)
fun {a b c} ac ab => (Classical.choose_spec (h c b) a).1 ⟨ac, ab⟩
#align gcd_monoid_of_exists_gcd gcdMonoidOfExistsGCD
/-- Define a `NormalizedGCDMonoid` structure on a monoid just from the existence of a `gcd`. -/
noncomputable def normalizedGCDMonoidOfExistsGCD [NormalizationMonoid α] [DecidableEq α]
(h : ∀ a b : α, ∃ c : α, ∀ d : α, d ∣ a ∧ d ∣ b ↔ d ∣ c) : NormalizedGCDMonoid α :=
normalizedGCDMonoidOfGCD (fun a b => normalize (Classical.choose (h a b)))
(fun a b =>
normalize_dvd_iff.2 ((Classical.choose_spec (h a b) (Classical.choose (h a b))).2 dvd_rfl).1)
(fun a b =>
normalize_dvd_iff.2 ((Classical.choose_spec (h a b) (Classical.choose (h a b))).2 dvd_rfl).2)
(fun {a b c} ac ab => dvd_normalize_iff.2 ((Classical.choose_spec (h c b) a).1 ⟨ac, ab⟩))
fun _ _ => normalize_idem _
#align normalized_gcd_monoid_of_exists_gcd normalizedGCDMonoidOfExistsGCD
/-- Define a `GCDMonoid` structure on a monoid just from the existence of an `lcm`. -/
noncomputable def gcdMonoidOfExistsLCM [DecidableEq α]
(h : ∀ a b : α, ∃ c : α, ∀ d : α, a ∣ d ∧ b ∣ d ↔ c ∣ d) : GCDMonoid α :=
gcdMonoidOfLCM (fun a b => Classical.choose (h a b))
(fun a b => ((Classical.choose_spec (h a b) (Classical.choose (h a b))).2 dvd_rfl).1)
(fun a b => ((Classical.choose_spec (h a b) (Classical.choose (h a b))).2 dvd_rfl).2)
fun {a b c} ac ab => (Classical.choose_spec (h c b) a).1 ⟨ac, ab⟩
#align gcd_monoid_of_exists_lcm gcdMonoidOfExistsLCM
/-- Define a `NormalizedGCDMonoid` structure on a monoid just from the existence of an `lcm`. -/
noncomputable def normalizedGCDMonoidOfExistsLCM [NormalizationMonoid α] [DecidableEq α]
(h : ∀ a b : α, ∃ c : α, ∀ d : α, a ∣ d ∧ b ∣ d ↔ c ∣ d) : NormalizedGCDMonoid α :=
normalizedGCDMonoidOfLCM (fun a b => normalize (Classical.choose (h a b)))
(fun a b =>
dvd_normalize_iff.2 ((Classical.choose_spec (h a b) (Classical.choose (h a b))).2 dvd_rfl).1)
(fun a b =>
dvd_normalize_iff.2 ((Classical.choose_spec (h a b) (Classical.choose (h a b))).2 dvd_rfl).2)
(fun {a b c} ac ab => normalize_dvd_iff.2 ((Classical.choose_spec (h c b) a).1 ⟨ac, ab⟩))
fun _ _ => normalize_idem _
#align normalized_gcd_monoid_of_exists_lcm normalizedGCDMonoidOfExistsLCM
end Constructors
namespace CommGroupWithZero
variable (G₀ : Type _) [CommGroupWithZero G₀] [DecidableEq G₀]
-- Porting note: very slow; improve performance?
-- see Note [lower instance priority]
instance (priority := 100) : NormalizedGCDMonoid G₀ where
normUnit x := if h : x = 0 then 1 else (Units.mk0 x h)⁻¹
normUnit_zero := dif_pos rfl
normUnit_mul := fun {x y} x0 y0 => Units.eq_iff.1 (by
-- Porting note: need `dsimp only`, also `simp` reaches maximum heartbeat
-- by Units.eq_iff.mp (by simp only [x0, y0, mul_comm])
dsimp only
split_ifs with h
· rw [mul_eq_zero] at h
cases h
· exact absurd ‹x = 0› x0
· exact absurd ‹y = 0› y0
· rw [Units.mk0_mul, mul_inv_rev, mul_comm] )
normUnit_coe_units u := by
-- Porting note: need `dsimp only`
dsimp only
rw [dif_neg (Units.ne_zero _), Units.mk0_val]
gcd a b := if a = 0 ∧ b = 0 then 0 else 1
lcm a b := if a = 0 ∨ b = 0 then 0 else 1
gcd_dvd_left a b := by
-- Porting note: need `dsimp only`
dsimp only
split_ifs with h
· rw [h.1]
· exact one_dvd _
gcd_dvd_right a b := by
-- Porting note: need `dsimp only`
dsimp only
split_ifs with h
· rw [h.2]
· exact one_dvd _
dvd_gcd := fun {a b c} hac hab => by
-- Porting note: need `dsimp only`
dsimp only
split_ifs with h
· apply dvd_zero
· rw [not_and_or] at h
cases h
· refine' isUnit_iff_dvd_one.mp (isUnit_of_dvd_unit _ (IsUnit.mk0 _ ‹c ≠ 0›))
exact hac
· refine' isUnit_iff_dvd_one.mp (isUnit_of_dvd_unit _ (IsUnit.mk0 _ ‹b ≠ 0›))
exact hab
gcd_mul_lcm a b := by
by_cases ha : a = 0
· simp only [ha, true_and, true_or, ite_true, mul_zero, zero_mul]
exact Associated.refl _
· by_cases hb : b = 0
· simp only [hb, and_true, or_true, ite_true, mul_zero]
exact Associated.refl _
-- Porting note: need `dsimp only`
· dsimp only
rw [if_neg (not_and_of_not_left _ ha), one_mul, if_neg (not_or_of_not ha hb)]
exact (associated_one_iff_isUnit.mpr ((IsUnit.mk0 _ ha).mul (IsUnit.mk0 _ hb))).symm
lcm_zero_left b := if_pos (Or.inl rfl)
lcm_zero_right a := if_pos (Or.inr rfl)
-- `split_ifs` wants to split `normalize`, so handle the cases manually
normalize_gcd a b := if h : a = 0 ∧ b = 0 then by simp [if_pos h] else by simp [if_neg h]
normalize_lcm a b := if h : a = 0 ∨ b = 0 then by simp [if_pos h] else by simp [if_neg h]
@[simp]
theorem coe_normUnit {a : G₀} (h0 : a ≠ 0) : (↑(normUnit a) : G₀) = a⁻¹ := by simp [normUnit, h0]
#align comm_group_with_zero.coe_norm_unit CommGroupWithZero.coe_normUnit
theorem normalize_eq_one {a : G₀} (h0 : a ≠ 0) : normalize a = 1 := by simp [normalize_apply, h0]
#align comm_group_with_zero.normalize_eq_one CommGroupWithZero.normalize_eq_one
end CommGroupWithZero
|
<a href="https://colab.research.google.com/github/leehanchung/cs224w/blob/main/notebooks/XCS224W_Colab3.ipynb" target="_parent"></a>
# **CS224W - Colab 3**
In Colab 2 we constructed GNN models by using PyTorch Geometric's built in GCN layer, `GCNConv`. In this Colab we will go a step deeper and implement the **GraphSAGE** ([Hamilton et al. (2017)](https://arxiv.org/abs/1706.02216)) and **GAT** ([Veličković et al. (2018)](https://arxiv.org/abs/1710.10903)) layers directly. Then we will run and test our models on the CORA dataset, a standard citation network benchmark dataset.
Next, we will use [DeepSNAP](https://snap.stanford.edu/deepsnap/), a Python library assisting efficient deep learning on graphs, to split the graphs in different settings and apply dataset transformations.
Lastly, using DeepSNAP's transductive link prediction dataset spliting functionality, we will construct a simple GNN model for the task of edge property predition (link prediction).
**Note**: Make sure to **sequentially run all the cells in each section** so that the intermediate variables / packages will carry over to the next cell
Have fun and good luck on Colab 3 :)
# Device
We recommend using a GPU for this Colab.
Please click `Runtime` and then `Change runtime type`. Then set the `hardware accelerator` to **GPU**.
## Installation
```python
# Install torch geometric
import os
if 'IS_GRADESCOPE_ENV' not in os.environ:
!pip uninstall torch-scatter --y
!pip uninstall torch-sparse --y
!pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-1.9.0+cu111.html
!pip install torch-sparse -f https://pytorch-geometric.com/whl/torch-1.9.0+cu111.html
!pip install torch-geometric
!pip install -q git+https://github.com/snap-stanford/deepsnap.git
```
Found existing installation: torch-scatter 2.0.8
Uninstalling torch-scatter-2.0.8:
Successfully uninstalled torch-scatter-2.0.8
Found existing installation: torch-sparse 0.6.12
Uninstalling torch-sparse-0.6.12:
Successfully uninstalled torch-sparse-0.6.12
Looking in links: https://pytorch-geometric.com/whl/torch-1.9.0+cu111.html
Collecting torch-scatter
Using cached https://data.pyg.org/whl/torch-1.9.0%2Bcu111/torch_scatter-2.0.8-cp37-cp37m-linux_x86_64.whl (10.4 MB)
Installing collected packages: torch-scatter
Successfully installed torch-scatter-2.0.8
Looking in links: https://pytorch-geometric.com/whl/torch-1.9.0+cu111.html
Collecting torch-sparse
Using cached https://data.pyg.org/whl/torch-1.9.0%2Bcu111/torch_sparse-0.6.12-cp37-cp37m-linux_x86_64.whl (3.7 MB)
Requirement already satisfied: scipy in /usr/local/lib/python3.7/dist-packages (from torch-sparse) (1.4.1)
Requirement already satisfied: numpy>=1.13.3 in /usr/local/lib/python3.7/dist-packages (from scipy->torch-sparse) (1.19.5)
Installing collected packages: torch-sparse
Successfully installed torch-sparse-0.6.12
Requirement already satisfied: torch-geometric in /usr/local/lib/python3.7/dist-packages (2.0.1)
Requirement already satisfied: yacs in /usr/local/lib/python3.7/dist-packages (from torch-geometric) (0.1.8)
Requirement already satisfied: requests in /usr/local/lib/python3.7/dist-packages (from torch-geometric) (2.23.0)
Requirement already satisfied: googledrivedownloader in /usr/local/lib/python3.7/dist-packages (from torch-geometric) (0.4)
Requirement already satisfied: tqdm in /usr/local/lib/python3.7/dist-packages (from torch-geometric) (4.62.3)
Requirement already satisfied: pandas in /usr/local/lib/python3.7/dist-packages (from torch-geometric) (1.1.5)
Requirement already satisfied: pyparsing in /usr/local/lib/python3.7/dist-packages (from torch-geometric) (2.4.7)
Requirement already satisfied: numpy in /usr/local/lib/python3.7/dist-packages (from torch-geometric) (1.19.5)
Requirement already satisfied: scikit-learn in /usr/local/lib/python3.7/dist-packages (from torch-geometric) (0.22.2.post1)
Requirement already satisfied: PyYAML in /usr/local/lib/python3.7/dist-packages (from torch-geometric) (3.13)
Requirement already satisfied: rdflib in /usr/local/lib/python3.7/dist-packages (from torch-geometric) (6.0.2)
Requirement already satisfied: scipy in /usr/local/lib/python3.7/dist-packages (from torch-geometric) (1.4.1)
Requirement already satisfied: networkx in /usr/local/lib/python3.7/dist-packages (from torch-geometric) (2.6.3)
Requirement already satisfied: jinja2 in /usr/local/lib/python3.7/dist-packages (from torch-geometric) (2.11.3)
Requirement already satisfied: MarkupSafe>=0.23 in /usr/local/lib/python3.7/dist-packages (from jinja2->torch-geometric) (2.0.1)
Requirement already satisfied: pytz>=2017.2 in /usr/local/lib/python3.7/dist-packages (from pandas->torch-geometric) (2018.9)
Requirement already satisfied: python-dateutil>=2.7.3 in /usr/local/lib/python3.7/dist-packages (from pandas->torch-geometric) (2.8.2)
Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.7/dist-packages (from python-dateutil>=2.7.3->pandas->torch-geometric) (1.15.0)
Requirement already satisfied: isodate in /usr/local/lib/python3.7/dist-packages (from rdflib->torch-geometric) (0.6.0)
Requirement already satisfied: setuptools in /usr/local/lib/python3.7/dist-packages (from rdflib->torch-geometric) (57.4.0)
Requirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests->torch-geometric) (2.10)
Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.7/dist-packages (from requests->torch-geometric) (2021.5.30)
Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from requests->torch-geometric) (1.24.3)
Requirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests->torch-geometric) (3.0.4)
Requirement already satisfied: joblib>=0.11 in /usr/local/lib/python3.7/dist-packages (from scikit-learn->torch-geometric) (1.0.1)
```python
import torch_geometric
torch_geometric.__version__
```
'2.0.1'
# 1) GNN Layers
## Implementing Layer Modules
In Colab 2, we implemented a GCN model for node and graph classification tasks. However, for that notebook we took advantage of PyG's built in GCN module. For Colab 3, we provide a build upon a general Graph Neural Network Stack, into which we will be able to plugin our own module implementations: GraphSAGE and GAT.
We will then use our layer implemenations to complete node classification on the CORA dataset, a standard citation network benchmark. In this dataset, nodes correspond to documents and edges correspond to undirected citations. Each node or document in the graph is assigned a class label and features based on the documents binarized bag-of-words representation. Specifically, the Cora graph has 2708 nodes, 5429 edges, 7 prediction classes, and 1433 features per node.
## GNN Stack Module
Below is the implementation of a general GNN stack, where we can plugin any GNN layer, such as **GraphSage**, **GAT**, etc. This module is provided for you. Your implementations of the **GraphSage** and **GAT** layers will function as components in the GNNStack Module.
```python
import torch
import torch_scatter
import torch.nn as nn
import torch.nn.functional as F
import torch_geometric.nn as pyg_nn
import torch_geometric.utils as pyg_utils
from torch import Tensor
from typing import Union, Tuple, Optional
from torch_geometric.typing import (OptPairTensor, Adj, Size, NoneType,
OptTensor)
from torch.nn import Parameter, Linear
from torch_sparse import SparseTensor, set_diag
from torch_geometric.nn.conv import MessagePassing
from torch_geometric.utils import remove_self_loops, add_self_loops, softmax
class GNNStack(torch.nn.Module):
def __init__(self, input_dim, hidden_dim, output_dim, args, emb=False):
super(GNNStack, self).__init__()
conv_model = self.build_conv_model(args.model_type)
self.convs = nn.ModuleList()
self.convs.append(conv_model(input_dim, hidden_dim))
assert (args.num_layers >= 1), 'Number of layers is not >=1'
for l in range(args.num_layers-1):
self.convs.append(conv_model(args.heads * hidden_dim, hidden_dim))
# post-message-passing
self.post_mp = nn.Sequential(
nn.Linear(args.heads * hidden_dim, hidden_dim), nn.Dropout(args.dropout),
nn.Linear(hidden_dim, output_dim))
self.dropout = args.dropout
self.num_layers = args.num_layers
self.emb = emb
def build_conv_model(self, model_type):
if model_type == 'GraphSage':
return GraphSage
elif model_type == 'GAT':
# When applying GAT with num heads > 1, you need to modify the
# input and output dimension of the conv layers (self.convs),
# to ensure that the input dim of the next layer is num heads
# multiplied by the output dim of the previous layer.
# HINT: In case you want to play with multiheads, you need to change the for-loop that builds up self.convs to be
# self.convs.append(conv_model(hidden_dim * num_heads, hidden_dim)),
# and also the first nn.Linear(hidden_dim * num_heads, hidden_dim) in post-message-passing.
return GAT
def forward(self, data):
x, edge_index, batch = data.x, data.edge_index, data.batch
for i in range(self.num_layers):
x = self.convs[i](x, edge_index)
x = F.relu(x)
x = F.dropout(x, p=self.dropout,training=self.training)
x = self.post_mp(x)
if self.emb == True:
return x
return F.log_softmax(x, dim=1)
def loss(self, pred, label):
return F.nll_loss(pred, label)
```
## Creating Our Own Message Passing Layer
Now let's start implementing our own message passing layers! Working through this part will help us become acutely familiar with the behind the scenes work of implementing Pytorch Message Passing Layers, allowing us to build our own GNN models. To do so, we will work with and implement 3 critcal functions needed to define a PyG Message Passing Layer: `forward`, `message`, and `aggregate`.
Before diving head first into the coding details, let us quickly review the key components of the message passing process. To do so, we will focus on a single round of messsage passing with respect to a single central node $x$. Before message passing, $x$ is associated with a feature vector $x^{l-1}$, and the goal of message passing is to update this feature vector as $x^l$. To do so, we implement the following steps: 1) each neighboring node $v$ passes its current message $v^{l-1}$ across the edge $(x, v)$ - 2) for the node $x$, we aggregate all of the messages of the neighboring nodes (for example through a sum or mean) - and 3) we transform the aggregated information by for example applying linear and non-linear transformations. Altogether, the message passing process is applied such that every node $u$ in our graph updates its embedding by acting as the central node $x$ in step 1-3 described above.
Now, we extending this process to that of a single message passing layer, the job of a message passing layer is to update the current feature representation or embedding of each node in a graph by propagating and transforming information within the graph. Overall, the general paradigm of a message passing layers is: 1) pre-processing -> 2) **message passing** / propagation -> 3) post-processing.
The `forward` fuction that we will implement for our message passing layer captures this execution logic. Namely, the `forward` function handles the pre and post-processing of node features / embeddings, as well as initiates message passing by calling the `propagate` function.
The `propagate` function encapsulates the message passing process! It does so by calling three important functions: 1) `message`, 2) `aggregate`, and 3) `update`. Our implementation will vary slightly from this, as we will not explicitly implement `update`, but instead place the logic for updating node embeddings after message passing and within the `forward` function. To be more specific, after information is propagated (message passing), we can further transform the node embeddings outputed by `propagate`. Therefore, the output of `forward` is exactly the node embeddings after one GNN layer.
Lastly, before starting to implement our own layer, let us dig a bit deeper into each of the functions described above:
1.
```
def propagate(edge_index, x=(x_i, x_j), extra=(extra_i, extra_j), size=size):
```
Calling `propagate` initiates the message passing process. Looking at the function parameters, we highlight a couple of key parameters.
- `edge_index` is passed to the forward function and captures the edge structure of the graph.
- `x=(x_i, x_j)` represents the node features that will be used in message passing. In order to explain why we pass the tuple `(x_i, x_j)`, we first look at how our edges are represented. For every edge $(i, j) \in \mathcal{E}$, we can differentiate $i$ as the source or central node ($x_{central}$) and j as the neighboring node ($x_{neighbor}$).
Taking the example of message passing above, for a central node $u$ we will aggregate and transform all of the messages associated with the nodes $v$ s.t. $(u, v) \in \mathcal{E}$ (i.e. $v \in \mathcal{N}_{u}$). Thus we see, the subscripts `_i` and `_j` allow us to specifcally differenciate features associated with central nodes (i.e. nodes recieving message information) and neighboring nodes (i.e. nodes passing messages).
This is definitely a somewhat confusing concept; however, one key thing to remember / wrap your head around is that depending on the perspective, a node $x$ acts as a central node or a neighboring node. In fact, in undirected graphs we store both edge directions (i.e. $(i, j)$ and $(j, i)$). From the central node perspective, `x_i`, x is collecting neighboring information to update its embedding. From a neighboring node perspective, `x_j`, x is passing its message information along the edge connecting it to a different central node.
- `extra=(extra_i, extra_j)` represents additional information that we can associate with each node beyond its current feature embedding. In fact, we can include as many additional parameters of the form `param=(param_i, param_j)` as we would like. Again, we highlight that indexing with `_i` and `_j` allows us to differentiate central and neighboring nodes.
The output of the `propagate` function is a matrix of node embeddings after the message passing process and has shape $[N, d]$.
2.
```
def message(x_j, ...):
```
The `message` function is called by propagate and constructs the messages from
neighboring nodes $j$ to central nodes $i$ for each edge $(i, j)$ in *edge_index*. This function can take any argument that was initially passed to `propagate`. Furthermore, we can again differentiate central nodes and neighboring nodes by appending `_i` or `_j` to the variable name, .e.g. `x_i` and `x_j`. Looking more specifically at the variables, we have:
- `x_j` represents a matrix of feature embeddings for all neighboring nodes passing their messages along their respective edge (i.e. all nodes $j$ for edges $(i, j) \in \mathcal{E}$). Thus, its shape is $[|\mathcal{E}|, d]$!
- In implementing GAT we will see how to access additional variables passed to propagate
Critically, we see that the output of the `message` function is a matrix of neighboring node embeddings ready to be aggregated, having shape $[|\mathcal{E}|, d]$.
3.
```
def aggregate(self, inputs, index, dim_size = None):
```
Lastly, the `aggregate` function is used to aggregate the messages from neighboring nodes. Looking at the parameters we highlight:
- `inputs` represents a matrix of the messages passed from neighboring nodes (i.e. the output of the `message` function).
- `index` has the same shape as `inputs` and tells us the central node that corresponding to each of the rows / messages $j$ in the `inputs` matrix. Thus, `index` tells us which rows / messages to aggregate for each central node.
The output of `aggregate` is of shape $[N, d]$.
For additional resources refer to the PyG documentation for implementing custom message passing layers: https://pytorch-geometric.readthedocs.io/en/latest/notes/create_gnn.html
## GraphSage Implementation
For our first GNN layer, we will implement the well known GraphSage ([Hamilton et al. (2017)](https://arxiv.org/abs/1706.02216)) layer!
For a given *central* node $v$ with current embedding $h_v^{l-1}$, the message passing update rule to tranform $h_v^{l-1} \rightarrow h_v^l$ is as follows:
\begin{equation}
h_v^{(l)} = W_l\cdot h_v^{(l-1)} + W_r \cdot AGG(\{h_u^{(l-1)}, \forall u \in N(v) \})
\end{equation}
where $W_1$ and $W_2$ are learanble weight matrices and the nodes $u$ are *neighboring* nodes. Additionally, we use mean aggregation for simplicity:
\begin{equation}
AGG(\{h_u^{(l-1)}, \forall u \in N(v) \}) = \frac{1}{|N(v)|} \sum_{u\in N(v)} h_u^{(l-1)}
\end{equation}
One thing to note is that we're adding a **skip connection** to our GraphSage implementation through the term $W_l\cdot h_v^{(l-1)}$.
Before implementing this update rule, we encourage you to think about how different parts of the formulas above correspond with the functions outlined earlier: 1) `forward`, 2) `message`, and 3) `aggregate`. As a hint, we are given what the aggregation function is (i.e. mean aggregation)! Now the question remains, what are the messages passed by each neighbor nodes and when do we call the `propagate` function?
Note: in this case the message function or messages are actually quite simple. Additionally, remember that the `propagate` function encapsulates the operations of / the outputs of the combined `message` and `aggregate` functions.
Lastly, $\ell$-2 normalization of the node embeddings is applied after each iteration.
<font color='red'>For the following questions, DON'T refer to any existing implementations online.</font>
```python
class GraphSage(MessagePassing):
def __init__(self, in_channels, out_channels, normalize = True,
bias = False, **kwargs):
super(GraphSage, self).__init__(**kwargs)
self.in_channels = in_channels
self.out_channels = out_channels
self.normalize = normalize
self.lin_l = None
self.lin_r = None
############################################################################
# TODO: Your code here!
# Define the layers needed for the message and update functions below.
# self.lin_l is the linear transformation that you apply to embedding
# for central node.
# self.lin_r is the linear transformation that you apply to aggregated
# message from neighbors.
# Our implementation is ~2 lines, but don't worry if you deviate from this.
self.lin_l = nn.Linear(self.in_channels, self.out_channels)
self.lin_r = nn.Linear(self.in_channels, self.out_channels)
############################################################################
self.reset_parameters()
def reset_parameters(self):
self.lin_l.reset_parameters()
self.lin_r.reset_parameters()
def forward(self, x, edge_index, size = None):
""""""
out = None
############################################################################
# TODO: Your code here!
# Implement message passing, as well as any post-processing (our update rule).
# 1. Call propagate function to conduct the message passing.
# 1.1 See the description of propagate above or the following link for more information:
# https://pytorch-geometric.readthedocs.io/en/latest/notes/create_gnn.html
# 1.2 We will only use the representation for neighbor nodes (x_j), so by default
# we pass the same representation for central and neighbor nodes as x=(x, x).
# 2. Update our node embedding with skip connection.
# 3. If normalize is set, do L-2 normalization (defined in
# torch.nn.functional)
#
# Our implementation is ~5 lines, but don't worry if you deviate from this.
x_propagate = self.propagate(edge_index, x=(x, x), size=size)
x = self.lin_l(x) + x_propagate
if self.normalize:
x = F.normalize(x)
out = x
############################################################################
return out
def message(self, x_j):
out = None
############################################################################
# TODO: Your code here!
# Implement your message function here.
# Hint: Look at the formulation of the mean aggregation function, focusing on
# what message each neighboring node passes.
#
# Our implementation is ~1 lines, but don't worry if you deviate from this.
out = self.lin_r(x_j)
############################################################################
return out
def aggregate(self, inputs, index, dim_size = None):
out = None
# The axis along which to index number of nodes.
node_dim = self.node_dim
############################################################################
# TODO: Your code here!
# Implement your aggregate function here.
# See here as how to use torch_scatter.scatter:
# https://pytorch-scatter.readthedocs.io/en/latest/functions/scatter.html#torch_scatter.scatter
#
# Our implementation is ~1 lines, but don't worry if you deviate from this.
out = torch_scatter.scatter(inputs, index, dim=node_dim, reduce='mean')
############################################################################
return out
```
## GAT Implementation
Attention mechanisms have become the state-of-the-art in many sequence-based tasks such as machine translation and learning sentence representations. One of the major benefits of attention-based mechanisms is their ability to focus on the most relevant parts of the input to make decisions. In this problem, we will see how attention mechanisms can be used to perform node classification over graph-structured data through the usage of Graph Attention Networks (GATs) ([Veličković et al. (2018)](https://arxiv.org/abs/1710.10903)).
The building block of the Graph Attention Network is the graph attention layer, which is a variant of the aggregation function. Let $N$ be the number of nodes and $F$ be the dimension of the feature vector for each node. The input to each graph attentional layer is a set of node features: $\mathbf{h} = \{\overrightarrow{h_1}, \overrightarrow{h_2}, \dots, \overrightarrow{h_N}$\}, $\overrightarrow{h_i} \in R^F$. The output of each graph attentional layer is a new set of node features, which may have a new dimension $F'$: $\mathbf{h'} = \{\overrightarrow{h_1'}, \overrightarrow{h_2'}, \dots, \overrightarrow{h_N'}\}$, with $\overrightarrow{h_i'} \in \mathbb{R}^{F'}$.
We will now describe how this transformation is performed for each graph attention layer. First, a shared linear transformation parametrized by the weight matrix $\mathbf{W} \in \mathbb{R}^{F' \times F}$ is applied to every node.
Next, we perform self-attention on the nodes. We use a shared attention function $a$:
\begin{equation}
a : \mathbb{R}^{F'} \times \mathbb{R}^{F'} \rightarrow \mathbb{R}.
\end{equation}
that computes the attention coefficients capturing the importance of node $j$'s features to node $i$:
\begin{equation}
e_{ij} = a(\mathbf{W_l}\overrightarrow{h_i}, \mathbf{W_r} \overrightarrow{h_j})
\end{equation}
The most general formulation of self-attention allows every node to attend to all other nodes which drops all structural information. However, to utilize graph structure in the attention mechanisms, we use **masked attention**. In masked attention, we only compute attention coefficients $e_{ij}$ for nodes $j \in \mathcal{N}_i$ where $\mathcal{N}_i$ is some neighborhood of node $i$ in the graph.
To easily compare coefficients across different nodes, we normalize the coefficients across $j$ using a softmax function:
\begin{equation}
\alpha_{ij} = \text{softmax}_j(e_{ij}) = \frac{\exp(e_{ij})}{\sum_{k \in \mathcal{N}_i} \exp(e_{ik})}
\end{equation}
For this problem, our attention mechanism $a$ will be a single-layer feedforward neural network parametrized by a weight vectors $\overrightarrow{a} \in \mathbb{R}^{F'}$ and $\overrightarrow{a} \in \mathbb{R}^{F'}$, followed by a LeakyReLU nonlinearity (with negative input slope 0.2). Let $\cdot^T$ represent transposition and $||$ represent concatenation. The coefficients computed by our attention mechanism may be expressed as:
\begin{equation}
\alpha_{ij} = \frac{\exp\Big(\text{LeakyReLU}\Big(\overrightarrow{a_l}^T \mathbf{W_l} \overrightarrow{h_i} + \overrightarrow{a_r}^T\mathbf{W_r}\overrightarrow{h_j}\Big)\Big)}{\sum_{k\in \mathcal{N}_i} \exp\Big(\text{LeakyReLU}\Big(\overrightarrow{a_l}^T \mathbf{W_l} \overrightarrow{h_i} + \overrightarrow{a_r}^T\mathbf{W_r}\overrightarrow{h_k}\Big)\Big)}
\end{equation}
For the following questions, we denote `alpha_l` = $\alpha_l = [...,\overrightarrow{a_l}^T \mathbf{W_l} \overrightarrow{h_i},...] \in \mathcal{R}^n$ and `alpha_r` = $\alpha_r = [..., \overrightarrow{a_r}^T \mathbf{W_r} \overrightarrow{h_j}, ...] \in \mathcal{R}^n$.
At every layer of GAT, after the attention coefficients are computed for that layer, the aggregation function can be computed by a weighted sum of neighborhood messages, where weights are specified by $\alpha_{ij}$.
Now, we use the normalized attention coefficients to compute a linear combination of the features corresponding to them. These aggregated features will serve as the final output features for every node.
\begin{equation}
h_i' = \sum_{j \in \mathcal{N}_i} \alpha_{ij} \mathbf{W_r} \overrightarrow{h_j}.
\end{equation}
At this point, we have covered a lot of information! Before reading further about multi-head attention, we encourage you to go again through the excersize of thinking about what components of the attention mechanism correspond with the different funcitons: 1) `forward`, 2) `message`, and 3 `aggregate`.
- Hint 1: Our aggregation is very similar to that of GraphSage except now we are using sum aggregation
- Hint 2: The terms we aggregate over again represent the individual message that each neighbor node j sends. Thus, we see that $\alpha_{ij}$ is part of the message each node sends and is thus computed during the message step. This makes sense since an attention weight is associated with each edge in the graph.
- Hint 3: Look at the terms in the definition of $\alpha_{ij}$. What values do we want to pre-process and pass as parameters to the `propagate` function. The parameters of `message(..., x_j, alpha_j, alpha_i, ...)` should give a good hint.
### Multi-Head Attention
To stabilize the learning process of self-attention, we use multi-head attention. To do this we use $K$ independent attention mechanisms, or ``heads'' compute output features as in the above equations. Then, we concatenate these output feature representations:
\begin{equation}
\overrightarrow{h_i}' = ||_{k=1}^K \Big(\sum_{j \in \mathcal{N}_i} \alpha_{ij}^{(k)} \mathbf{W_r}^{(k)} \overrightarrow{h_j}\Big)
\end{equation}
where $||$ is concentation, $\alpha_{ij}^{(k)}$ are the normalized attention coefficients computed by the $k$-th attention mechanism $(a^k)$, and $\mathbf{W}^{(k)}$ is the corresponding input linear transformation's weight matrix. Note that for this setting, $\mathbf{h'} \in \mathbb{R}^{KF'}$.
```python
class GAT(MessagePassing):
def __init__(self, in_channels, out_channels, heads = 2,
negative_slope = 0.2, dropout = 0., **kwargs):
super(GAT, self).__init__(node_dim=0, **kwargs)
self.in_channels = in_channels
self.out_channels = out_channels
self.heads = heads
self.negative_slope = negative_slope
self.dropout = dropout
self.lin_l = None
self.lin_r = None
self.att_l = None
self.att_r = None
############################################################################
# TODO: Your code here!
# Define the layers needed for the message functions below.
# self.lin_l is the linear transformation that you apply to embeddings
# BEFORE message passing.
#
# Pay attention to dimensions of the linear layers, since we're using
# multi-head attention.
# Our implementation is ~1 lines, but don't worry if you deviate from this.
self.lin_l = nn.Linear(self.in_channels, self.heads * self.out_channels)
############################################################################
self.lin_r = self.lin_l
############################################################################
# TODO: Your code here!
# Define the attention parameters \overrightarrow{a_l/r}^T in the above intro.
# You have to deal with multi-head scenarios.
# Use nn.Parameter instead of nn.Linear
# Our implementation is ~2 lines, but don't worry if you deviate from this.
self.att_l = nn.Parameter(torch.randn(heads, self.out_channels))
self.att_r = nn.Parameter(torch.randn(heads, self.out_channels))
############################################################################
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.lin_l.weight)
nn.init.xavier_uniform_(self.lin_r.weight)
nn.init.xavier_uniform_(self.att_l)
nn.init.xavier_uniform_(self.att_r)
def forward(self, x, edge_index, size = None):
H, C = self.heads, self.out_channels
############################################################################
# TODO: Your code here!
# Implement message passing, as well as any pre- and post-processing (our update rule).
# 1. First apply linear transformation to node embeddings, and split that
# into multiple heads. We use the same representations for source and
# target nodes, but apply different linear weights (W_l and W_r)
# 2. Calculate alpha vectors for central nodes (alpha_l) and neighbor nodes (alpha_r).
# 3. Call propagate function to conduct the message passing.
# 3.1 Remember to pass alpha = (alpha_l, alpha_r) as a parameter.
# 3.2 See there for more information: https://pytorch-geometric.readthedocs.io/en/latest/notes/create_gnn.html
# 4. Transform the output back to the shape of N * d.
# Our implementation is ~5 lines, but don't worry if you deviate from this.
# x_l dims: N x H x C
x_l = self.lin_l(x).view(-1, H, C)
# x_r dims: N x H x C
x_r = self.lin_r(x).view(-1, H, C)
# alpha_l dims: # 1 x H x C * N x H x C
alpha_l = self.att_l.unsqueeze(0) * x_l
# alpha_r dims: # 1 x H x C * N x H x C
alpha_r = self.att_r.unsqueeze(0) * x_r
out = self.propagate(edge_index, x = (x_l, x_r), alpha=(alpha_l, alpha_r))
out = out.view(-1, H*C)
############################################################################
return out
def message(self, x_j, alpha_j, alpha_i, index, ptr, size_i):
############################################################################
# TODO: Your code here!
# Implement your message function. Putting the attention in message
# instead of in update is a little tricky.
# 1. Calculate the final attention weights using alpha_i and alpha_j,
# and apply leaky Relu.
# 2. Calculate softmax over the neighbor nodes for all the nodes. Use
# torch_geometric.utils.softmax instead of the one in Pytorch.
# 3. Apply dropout to attention weights (alpha).
# 4. Multiply embeddings and attention weights. As a sanity check, the output
# should be of shape E * H * d.
# 5. ptr (LongTensor, optional): If given, computes the softmax based on
# sorted inputs in CSR representation. You can simply pass it to softmax.
# Our implementation is ~5 lines, but don't worry if you deviate from this.
alpha_ij = F.leaky_relu(alpha_i + alpha_j, negative_slope=self.negative_slope)
if ptr is None:
alpha_ij = softmax(alpha_ij, index)
else:
alpha_ij = softmax(alphaij, ptr)
alpha_ij = F.dropout(alpha_ij, p=self.dropout)
out = x_j * alpha_ij
############################################################################
return out
def aggregate(self, inputs, index, dim_size = None):
############################################################################
# TODO: Your code here!
# Implement your aggregate function here.
# See here as how to use torch_scatter.scatter: https://pytorch-scatter.readthedocs.io/en/latest/_modules/torch_scatter/scatter.html
# Pay attention to "reduce" parameter is different from that in GraphSage.
# Our implementation is ~1 lines, but don't worry if you deviate from this.
out = torch_scatter.scatter(inputs, index, dim=self.node_dim, reduce='sum')
############################################################################
return out
```
## Building Optimizers
This function has been implemented for you. **For grading purposes please use the default Adam optimizer**, but feel free to play with other types of optimizers on your own.
```python
import torch.optim as optim
def build_optimizer(args, params):
weight_decay = args.weight_decay
filter_fn = filter(lambda p : p.requires_grad, params)
if args.opt == 'adam':
optimizer = optim.Adam(filter_fn, lr=args.lr, weight_decay=weight_decay)
elif args.opt == 'sgd':
optimizer = optim.SGD(filter_fn, lr=args.lr, momentum=0.95, weight_decay=weight_decay)
elif args.opt == 'rmsprop':
optimizer = optim.RMSprop(filter_fn, lr=args.lr, weight_decay=weight_decay)
elif args.opt == 'adagrad':
optimizer = optim.Adagrad(filter_fn, lr=args.lr, weight_decay=weight_decay)
if args.opt_scheduler == 'none':
return None, optimizer
elif args.opt_scheduler == 'step':
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=args.opt_decay_step, gamma=args.opt_decay_rate)
elif args.opt_scheduler == 'cos':
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.opt_restart)
return scheduler, optimizer
```
## Training and Testing
Here we provide you with the functions to train and test. **Please do not modify this part for grading purposes.**
```python
import time
import networkx as nx
import numpy as np
import torch
import torch.optim as optim
from tqdm import trange
import pandas as pd
import copy
from torch_geometric.datasets import TUDataset
from torch_geometric.datasets import Planetoid
from torch_geometric.data import DataLoader
import torch_geometric.nn as pyg_nn
import matplotlib.pyplot as plt
def train(dataset, args):
print("Node task. test set size:", np.sum(dataset[0]['test_mask'].numpy()))
print()
test_loader = loader = DataLoader(dataset, batch_size=args.batch_size, shuffle=False)
# build model
model = GNNStack(dataset.num_node_features, args.hidden_dim, dataset.num_classes,
args)
scheduler, opt = build_optimizer(args, model.parameters())
# train
losses = []
test_accs = []
best_acc = 0
best_model = None
for epoch in trange(args.epochs, desc="Training", unit="Epochs"):
total_loss = 0
model.train()
for batch in loader:
opt.zero_grad()
pred = model(batch)
label = batch.y
pred = pred[batch.train_mask]
label = label[batch.train_mask]
loss = model.loss(pred, label)
loss.backward()
opt.step()
total_loss += loss.item() * batch.num_graphs
total_loss /= len(loader.dataset)
losses.append(total_loss)
if epoch % 10 == 0:
test_acc = test(test_loader, model)
test_accs.append(test_acc)
if test_acc > best_acc:
best_acc = test_acc
best_model = copy.deepcopy(model)
else:
test_accs.append(test_accs[-1])
return test_accs, losses, best_model, best_acc, test_loader
def test(loader, test_model, is_validation=False, save_model_preds=False, model_type=None):
test_model.eval()
correct = 0
# Note that Cora is only one graph!
for data in loader:
with torch.no_grad():
# max(dim=1) returns values, indices tuple; only need indices
pred = test_model(data).max(dim=1)[1]
label = data.y
mask = data.val_mask if is_validation else data.test_mask
# node classification: only evaluate on nodes in test set
pred = pred[mask]
label = label[mask]
if save_model_preds:
print ("Saving Model Predictions for Model Type", model_type)
data = {}
data['pred'] = pred.view(-1).cpu().detach().numpy()
data['label'] = label.view(-1).cpu().detach().numpy()
df = pd.DataFrame(data=data)
# Save locally as csv
df.to_csv('CORA-Node-' + model_type + '.csv', sep=',', index=False)
correct += pred.eq(label).sum().item()
total = 0
for data in loader.dataset:
total += torch.sum(data.val_mask if is_validation else data.test_mask).item()
return correct / total
class objectview(object):
def __init__(self, d):
self.__dict__ = d
```
## Let's Start the Training!
We will be working on the CORA dataset on node-level classification.
This part is implemented for you. **For grading purposes, please do not modify the default parameters.** However, feel free to play with different configurations just for fun!
**Submit your best accuracy and loss on Gradescope.**
```python
if 'IS_GRADESCOPE_ENV' not in os.environ:
for args in [
{'model_type': 'GraphSage', 'dataset': 'cora', 'num_layers': 2, 'heads': 1, 'batch_size': 32, 'hidden_dim': 32, 'dropout': 0.5, 'epochs': 500, 'opt': 'adam', 'opt_scheduler': 'none', 'opt_restart': 0, 'weight_decay': 5e-3, 'lr': 0.01},
]:
args = objectview(args)
for model in ['GraphSage', 'GAT']:
args.model_type = model
# Match the dimension.
if model == 'GAT':
args.heads = 2
else:
args.heads = 1
if args.dataset == 'cora':
dataset = Planetoid(root='/tmp/cora', name='Cora')
else:
raise NotImplementedError("Unknown dataset")
test_accs, losses, best_model, best_acc, test_loader = train(dataset, args)
print("Maximum test set accuracy: {0}".format(max(test_accs)))
print("Minimum loss: {0}".format(min(losses)))
# Run test for our best model to save the predictions!
test(test_loader, best_model, is_validation=False, save_model_preds=True, model_type=model)
print()
plt.title(dataset.name)
plt.plot(losses, label="training loss" + " - " + args.model_type)
plt.plot(test_accs, label="test accuracy" + " - " + args.model_type)
plt.legend()
plt.show()
```
## Question 1.1: What is the maximum accuracy obtained on the test set for GraphSage? (10 points)
Running the cell above will show the results of your best model and save your best model's predictions to a file named *CORA-Node-GraphSage.csv*.
As we have seen before you can view this file by clicking on the *Folder* icon on the left side pannel. When you sumbit your assignment, you will have to download this file and attatch it to your submission.
## Question 1.2: What is the maximum accuracy obtained on test set for GAT? (10 points)
Running the training cell above will also save your best GAT model predictions as *CORA-Node-GAT.csv*.
When you sumbit your assignment, you will have to download this file and attatch it to your submission.
# 2) DeepSNAP Basics
In previous Colabs, we have seen graph class (NetworkX) and tensor (PyG) representations of graphs. The graph class `nx.Graph` provides rich analysis and manipulation functionalities, such as computing the clustering coefficient and PageRank vector for a graph. When working with PyG we were then introduced to tensor based representation of graphs (i.e. edge tensor `edge_index` and node attributes tensors `x` and `y`).
In this section, we present DeepSNAP, a package that combines the benifits of both graph representations and offers a full pipeline for GNN training / validation / and testing. Namely, DeepSNAP includes a graph class representation to allow for more efficient graph manipulation and analysis in addition to a tensor based representation for efficient message passing computation.
In general, [DeepSNAP](https://github.com/snap-stanford/deepsnap) is a Python library to assist efficient deep learning on graphs. DeepSNAP enables flexible graph manipulation, standard graph learning pipelines, heterogeneous graphs, and ovearll represents a simple graph learning API. In more detail:
1. DeepSNAP allows for sophisticated graph manipulations, such as feature computation, pretraining, subgraph extraction etc. during/before training.
2. DeepSNAP standardizes the pipelines for node, edge, and graph-level prediction tasks under inductive or transductive settings. Specifically, DeepSNAP removes previous non-trivial / repetative design choices left to the user, such as how to split datasets. DeepSNAP thus greatly saves repetitive often non-trivial coding efforts and enables fair model comparison.
3. Many real-world graphs are heterogeneous in nature (i.e. include different node types or edge types). However, most packages lack complete support for heterogeneous graphs, including data storage and flexible message passing. DeepSNAP provides an efficient and flexible heterogeneous graph that supports both node and edge heterogeneity.
In this next section, we will focus on working with DeepSNAP for graph manipulation and dataset splitting.
[DeepSNAP](https://github.com/snap-stanford/deepsnap) is a newly released project and it is still under development. If you find any bugs or have any improvement ideas, feel free to raise issues or create pull requests on the GitHub directly :)
## Setup
```python
import torch
import networkx as nx
import matplotlib.pyplot as plt
from deepsnap.graph import Graph
from deepsnap.batch import Batch
from deepsnap.dataset import GraphDataset
from torch_geometric.datasets import Planetoid, TUDataset
from torch.utils.data import DataLoader
def visualize(G, color_map=None, seed=123):
if color_map is None:
color_map = '#c92506'
plt.figure(figsize=(8, 8))
nodes = nx.draw_networkx_nodes(G, pos=nx.spring_layout(G, seed=seed), \
label=None, node_color=color_map, node_shape='o', node_size=150)
edges = nx.draw_networkx_edges(G, pos=nx.spring_layout(G, seed=seed), alpha=0.5)
if color_map is not None:
plt.scatter([],[], c='#c92506', label='Nodes with label 0', edgecolors="black", s=140)
plt.scatter([],[], c='#fcec00', label='Nodes with label 1', edgecolors="black", s=140)
plt.legend(prop={'size': 13}, handletextpad=0)
nodes.set_edgecolor('black')
plt.show()
```
## DeepSNAP Graph
The `deepsnap.graph.Graph` class is the core class of DeepSNAP. It not only represents a graph in tensor format but also includes a graph object from a graph manipulation package.
Currently DeepSNAP supports [NetworkX](https://networkx.org/) and [Snap.py](https://snap.stanford.edu/snappy/doc/index.html) as back end graph manipulation packages.
In this Colab, we will focus on using NetworkX as the back end graph manipulation package.
### NetworkX to DeepSNAP
To begin, let us first work through converting a simple random NetworkX graph to a DeepSNAP graph.
```python
if 'IS_GRADESCOPE_ENV' not in os.environ:
num_nodes = 100
p = 0.05
seed = 100
# Generate a networkx random graph
G = nx.gnp_random_graph(num_nodes, p, seed=seed)
# Generate some random node features and labels
node_feature = {node : torch.rand([5, ]) for node in G.nodes()}
node_label = {node : torch.randint(0, 2, ()) for node in G.nodes()}
# Set the random features and labels to G
nx.set_node_attributes(G, node_feature, name='node_feature')
nx.set_node_attributes(G, node_label, name='node_label')
# Print one node example
for node in G.nodes(data=True):
print(node)
break
color_map = ['#c92506' if node[1]['node_label'].item() == 0 else '#fcec00' for node in G.nodes(data=True)]
# Visualize the graph
visualize(G, color_map=color_map)
# Transform the networkx graph into the deepsnap graph
graph = Graph(G)
# Print out the general deepsnap graph information
print(graph)
# DeepSNAP will convert node attributes to tensors
# Notice the type of tensors
print("Node feature (node_feature) has shape {} and type {}".format(graph.node_feature.shape, graph.node_feature.dtype))
print("Node label (node_label) has shape {} and type {}".format(graph.node_label.shape, graph.node_label.dtype))
# DeepSNAP will also generate the edge_index tensor
print("Edge index (edge_index) has shape {} and type {}".format(graph.edge_index.shape, graph.edge_index.dtype))
# Different from only storing tensors, deepsnap graph also references to the networkx graph
# We will discuss why the reference will be helpful later
print("The DeepSNAP graph has {} as the internal manupulation graph".format(type(graph.G)))
```
### Tensor graph attributes
Similar to the native PyG tensor based representation, DeepSNAP includes a graph tensor based representation with three levels of graph attributes. In this example, we primarily have **node level** attributes including `node_feature` and `node_label`. The other two levels of attributes are **edge** and **graph** attributes. Similar to node level attributes, these attributes are prefixed by their respective type. For example, the features become `edge_feature` or `graph_feature` and labels becomes `edge_label` or `graph_label` etc.
### Graph Object
DeepSNAP additionally allows us to easily access graph information through the backend graph object and graph manipulation package.
```python
if 'IS_GRADESCOPE_ENV' not in os.environ:
# Number of nodes
print("The random graph has {} nodes".format(graph.num_nodes))
# Number of edges
print("The random graph has {} edges".format(graph.num_edges))
```
The random graph has 100 nodes
The random graph has 262 edges
### PyG to DeepSNAP
Lastly, DeepSNAP provides functionality to automatically transform a PyG dataset into a list of DeepSNAP graphs.
Here we transform the CORA dataset into a list with one DeepSNAP graph (i.e. the singular CORA graph).
```python
if 'IS_GRADESCOPE_ENV' not in os.environ:
root = './tmp/cora'
name = 'Cora'
# The Cora dataset
pyg_dataset= Planetoid(root, name)
# PyG dataset to a list of deepsnap graphs
graphs = GraphDataset.pyg_to_graphs(pyg_dataset)
# Get the first deepsnap graph (CORA only has one graph)
graph = graphs[0]
print(graph)
```
Graph(G=[], edge_index=[2, 10556], edge_label_index=[2, 10556], node_feature=[2708, 1433], node_label=[2708], node_label_index=[2708])
## Question 2.1: How many classes are in the CORA graph? How many features does each node have? (5 points)
```python
def get_num_node_classes(graph):
# TODO: Implement a function that takes a deepsnap graph object
# and return the number of node classes of that graph.
num_node_classes = 0
############# Your code here #############
## (~1 line of code)
## Note
## 1. Colab autocomplete functionality might be useful
## 2. DeepSNAP documentation might be useful https://snap.stanford.edu/deepsnap/modules/graph.html
num_node_classes = graph.num_node_labels
##########################################
return num_node_classes
def get_num_node_features(graph):
# TODO: Implement a function that takes a deepsnap graph object
# and return the number of node features of that graph.
num_node_features = 0
############# Your code here #############
## (~1 line of code)
## Note
## 1. Colab autocomplete functionality might be useful
## 2. DeepSNAP documentation might be useful https://snap.stanford.edu/deepsnap/modules/graph.html
num_node_features = graph.num_node_features
##########################################
return num_node_features
if 'IS_GRADESCOPE_ENV' not in os.environ:
num_node_classes = get_num_node_classes(graph)
num_node_features = get_num_node_features(graph)
print("{} has {} classes".format(name, num_node_classes))
print("{} has {} features".format(name, num_node_features))
```
Cora has 7 classes
Cora has 1433 features
## DeepSNAP Dataset
Now, we will learn how to create DeepSNAP datasets. A `deepsnap.dataset.GraphDataset` contains a list of `deepsnap.graph.Graph` objects. In addition to the list of graphs, we specify what task the dataset will be used on, such as node level task (`task=node`), edge level task (`task=link_pred`) and graph level task (`task=graph`).
The GraphDataset class contains many other useful parameters that can be specified during initialization. If you are interested, you can take a look at the [documentation](https://snap.stanford.edu/deepsnap/modules/dataset.html#deepsnap-graphdataset).
As an example, we will first look at the COX2 dataset, which contains 467 graphs. In initializng our dataset, we convert the PyG dataset into its corresponding DeepSNAP dataset and specify the task to `graph`.
```python
if 'IS_GRADESCOPE_ENV' not in os.environ:
root = './tmp/cox2'
name = 'COX2'
# Load the dataset through PyG
pyg_dataset = TUDataset(root, name)
# Convert to a list of deepsnap graphs
graphs = GraphDataset.pyg_to_graphs(pyg_dataset)
# Convert list of deepsnap graphs to deepsnap dataset with specified task=graph
dataset = GraphDataset(graphs, task='graph')
print(dataset)
```
GraphDataset(467)
## Question 2.2: What is the label of the graph with index 100? (5 points)
```python
def get_graph_class(dataset, idx):
# TODO: Implement a function that takes a deepsnap dataset object,
# the index of a graph in the dataset, and returns the class/label
# of the graph (in integer).
label = -1
############# Your code here ############
## (~1 line of code)
## Notice
## 1. The graph label refers to a graph-level attribute
label = dataset[idx].graph_label
#########################################
return label
if 'IS_GRADESCOPE_ENV' not in os.environ:
graph_0 = dataset[0]
print(graph_0)
idx = 100
label = get_graph_class(dataset, idx)
print('Graph with index {} has label {}'.format(idx, label))
```
Graph(G=[], edge_index=[2, 82], edge_label_index=[2, 82], graph_label=[1], node_feature=[39, 35], node_label_index=[39], task=[])
Graph with index 100 has label tensor([0])
## Question 2.3: How many edges are in the graph with index 200? (5 points)
```python
def get_graph_num_edges(dataset, idx):
# TODO: Implement a function that takes a deepsnap dataset object,
# the index of a graph in dataset, and returns the number of
# edges in the graph (in integer).
num_edges = 0
############# Your code here ############
## (~1 lines of code)
## Note
## 1. You can use the class property directly
num_edges = dataset[idx].num_edges
#########################################
return num_edges
if 'IS_GRADESCOPE_ENV' not in os.environ:
idx = 200
num_edges = get_graph_num_edges(dataset, idx)
print('Graph with index {} has {} edges'.format(idx, num_edges))
```
Graph with index 200 has 49 edges
# 3) DeepSNAP Advanced
Now that we have learned the basics of DeepSNAP lets move on to some more advanced functionalities.
In this section we will use DeepSNAP for graph feature computation and transductive/inductive dataset splitting.
## Setup
```python
import torch
import networkx as nx
import matplotlib.pyplot as plt
from deepsnap.graph import Graph
from deepsnap.batch import Batch
from deepsnap.dataset import GraphDataset
from torch_geometric.datasets import Planetoid, TUDataset
from torch.utils.data import DataLoader
```
## Data Split in Graphs
As discussed in (LECTURE REFERENCE), data splitting for graphs can be much harder than for CV or NLP.
In general, data splitting is divided into two settings, **inductive** and **transductive**.
## Inductive Split
In an inductive setting, we split a list of multiple graphs into disjoint training/valiation and test sets.
Here is an example of using DeepSNAP to inductively split a list of graphs for a graph level task (graph classification etc.):
```python
if 'IS_GRADESCOPE_ENV' not in os.environ:
root = './tmp/cox2'
name = 'COX2'
pyg_dataset = TUDataset(root, name)
graphs = GraphDataset.pyg_to_graphs(pyg_dataset)
# Here we specify the task as graph-level task such as graph classification
task = 'graph'
dataset = GraphDataset(graphs, task=task)
# Specify transductive=False (inductive)
dataset_train, dataset_val, dataset_test = dataset.split(transductive=False, split_ratio=[0.8, 0.1, 0.1])
print("COX2 train dataset: {}".format(dataset_train))
print("COX2 validation dataset: {}".format(dataset_val))
print("COX2 test dataset: {}".format(dataset_test))
```
COX2 train dataset: GraphDataset(373)
COX2 validation dataset: GraphDataset(46)
COX2 test dataset: GraphDataset(48)
## Transductive Split
In the transductive setting, the training /validation / test sets are all over the same graph. As discussed in (LECTURE REF), we consider a transductive setting when we do not need to generalize to new unseen graphs.
As an example, here we transductively split the CORA graph for a node level task, such as node classification.
(Notice that in DeepSNAP the default split setting is random (i.e. DeepSNAP randomly splits the e.g. nodes into train / val / test); however, you can also use a fixed split by specifying `fixed_split=True` when loading the dataset from PyG or changing the `node_label_index` directly).
```python
if 'IS_GRADESCOPE_ENV' not in os.environ:
root = './tmp/cora'
name = 'Cora'
pyg_dataset = Planetoid(root, name)
graphs = GraphDataset.pyg_to_graphs(pyg_dataset)
# Here we specify the task as node-level task such as node classification
task = 'node'
dataset = GraphDataset(graphs, task=task)
# Specify we want the transductive splitting
dataset_train, dataset_val, dataset_test = dataset.split(transductive=True, split_ratio=[0.8, 0.1, 0.1])
print("Cora train dataset: {}".format(dataset_train))
print("Cora validation dataset: {}".format(dataset_val))
print("Cora test dataset: {}".format(dataset_test))
print("Original Cora has {} nodes".format(dataset.num_nodes[0]))
# The nodes in each set can be find in node_label_index
print("After the split, Cora has {} training nodes".format(dataset_train[0].node_label_index.shape[0]))
print("After the split, Cora has {} validation nodes".format(dataset_val[0].node_label_index.shape[0]))
print("After the split, Cora has {} test nodes".format(dataset_test[0].node_label_index.shape[0]))
```
Cora train dataset: GraphDataset(1)
Cora validation dataset: GraphDataset(1)
Cora test dataset: GraphDataset(1)
Original Cora has 2708 nodes
After the split, Cora has 2166 training nodes
After the split, Cora has 270 validation nodes
After the split, Cora has 272 test nodes
## Edge Level Split
Compared to node and graph level splitting, edge level splitting is a little bit tricky ;)
For edge level splitting we need to consider several different tasks:
1. Splitting positive edges into train / val / test datasets.
2. Sampling / re-sampling negative edges (i.e. edges not present in the graph).
3. Splitting edges into message passing and supervision edges.
With regard to point 3, for edge level data splitting we classify edges into two types. The first is `message passing` edges, edges that are used for message passing by our GNN. The second is `supervision`, edges that are used in the loss function for backpropagation. DeepSNAP allows for two different modes, where the `message passing` and `supervision` edges are either the same or disjoint.
### All Edge Splitting Mode
First, we explore the `edge_train_mode="all"` mode for edge level splitting, where the `message passing` and `supervision` edges are shared during training.
```python
if 'IS_GRADESCOPE_ENV' not in os.environ:
root = './tmp/cora'
name = 'Cora'
pyg_dataset = Planetoid(root, name)
graphs = GraphDataset.pyg_to_graphs(pyg_dataset)
# Specify task as link_pred for edge-level task
task = 'link_pred'
# Specify the train mode, "all" mode is default for deepsnap dataset
edge_train_mode = "all"
dataset = GraphDataset(graphs, task=task, edge_train_mode=edge_train_mode)
# Transductive link prediction split
dataset_train, dataset_val, dataset_test = dataset.split(transductive=True, split_ratio=[0.8, 0.1, 0.1])
print("Cora train dataset: {}".format(dataset_train))
print("Cora validation dataset: {}".format(dataset_val))
print("Cora test dataset: {}".format(dataset_test))
```
Cora train dataset: GraphDataset(1)
Cora validation dataset: GraphDataset(1)
Cora test dataset: GraphDataset(1)
/usr/local/lib/python3.7/dist-packages/torch/_tensor.py:575: UserWarning: floor_divide is deprecated, and will be removed in a future version of pytorch. It currently rounds toward 0 (like the 'trunc' function NOT 'floor'). This results in incorrect rounding for negative values.
To keep the current behavior, use torch.div(a, b, rounding_mode='trunc'), or for actual floor division, use torch.div(a, b, rounding_mode='floor'). (Triggered internally at /pytorch/aten/src/ATen/native/BinaryOps.cpp:467.)
return torch.floor_divide(self, other)
In DeepSNAP, the indices of supervision edges are stored in the `edge_label_index` tensor and the corresponding edge labels are stored in `edge_label` tensor.
```python
if 'IS_GRADESCOPE_ENV' not in os.environ:
print("Original Cora graph has {} edges".format(dataset[0].num_edges))
print()
print("Train set has {} message passing edge".format(dataset_train[0].edge_index.shape[1] // 2))
print("Train set has {} supervision (positive) edges".format(dataset_train[0].edge_label_index.shape[1] // 4))
print()
print("Validation set has {} message passing edge".format(dataset_val[0].edge_index.shape[1] // 2))
print("Validation set has {} supervision (positive) edges".format(dataset_val[0].edge_label_index.shape[1] // 4))
print()
print("Test set has {} message passing edge".format(dataset_test[0].edge_index.shape[1] // 2))
print("Test set has {} supervision (positive) edges".format(dataset_test[0].edge_label_index.shape[1] // 4))
```
Original Cora graph has 5278 edges
Train set has 4222 message passing edge
Train set has 4222 supervision (positive) edges
Validation set has 4222 message passing edge
Validation set has 527 supervision (positive) edges
Test set has 4749 message passing edge
Test set has 529 supervision (positive) edges
**Specific things to note in `all` mode**:
* At training time: the supervision edges are the same as the training message passing edges.
* At validation time: the message passing edges are the training message passing edges and training supervision edges (still the training message passing edges in this case). However, we now include a set of unseen validation supervision edges that are disjoint from the training supervision edges.
* At test time: the message passing edges are the union of training message passing edges, training supervision edges, and validation supervision edges. The test supervision edges then disjoint from the training supervision edges and validation supervision edges.
* We exclude negative edges in this illustration. However, the attributes `edge_label` and `edge_label_index` naturally also include the negative supervision edges (by default the number of negative edges is the same as the number of positive edges, hence the divide by 4 above).
Now, that we have seen the basics of the `all` method for edge splitting, we will implement a function that checks whether two edge index tensors are disjoint and explore more edge splitting properties by using that function.
## Question 3: Implement a function that checks whether two edge_index tensors are disjoint (i.e. do not share any common edges). Then answer the True/False questions below. (5 points)
```python
def edge_indices_disjoint(edge_index_1, edge_index_2):
# TODO: Implement this function that takes two edge index tensors,
# and returns whether these two edge index tensors are disjoint.
disjoint = None
############# Your code here ############
## (~5 lines of code)
## Note
## 1. Here disjoint means that there is no single edge belongs to both edge index tensors
## 2. You do not need to consider the undirected case. For example, if edge_index_1 contains
## edge (a, b) and edge_index_2 contains edge (b, a). We will treat them as disjoint in this
## function.
edge_index_1_np = edge_index_1.T.detach().cpu().numpy()
edge_index_2_np = edge_index_2.T.detach().cpu().numpy()
intercept = [x for x in set(tuple(x) for x in edge_index_1_np) & set(tuple(x) for x in edge_index_2_np)]
disjoint = len(intercept) == 0
#########################################
return disjoint
```
```python
if 'IS_GRADESCOPE_ENV' not in os.environ:
num_train_edges = dataset_train[0].edge_label_index.shape[1] // 2
train_pos_edge_index = dataset_train[0].edge_label_index[:, :num_train_edges]
train_neg_edge_index = dataset_train[0].edge_label_index[:, num_train_edges:]
print("3.1 Training (supervision) positve and negative edges are disjoint = {}"\
.format(edge_indices_disjoint(train_pos_edge_index, train_neg_edge_index)))
num_val_edges = dataset_val[0].edge_label_index.shape[1] // 2
val_pos_edge_index = dataset_val[0].edge_label_index[:, :num_val_edges]
val_neg_edge_index = dataset_val[0].edge_label_index[:, num_val_edges:]
print("3.2 Validation (supervision) positve and negative edges are disjoint = {}"\
.format(edge_indices_disjoint(val_pos_edge_index, val_neg_edge_index)))
num_test_edges = dataset_test[0].edge_label_index.shape[1] // 2
test_pos_edge_index = dataset_test[0].edge_label_index[:, :num_test_edges]
test_neg_edge_index = dataset_test[0].edge_label_index[:, num_test_edges:]
print("3.3 Test (supervision) positve and negative edges are disjoint = {}"\
.format(edge_indices_disjoint(test_pos_edge_index, test_neg_edge_index)))
print("3.4 Test (supervision) positve and validation (supervision) positve edges are disjoint = {}"\
.format(edge_indices_disjoint(test_pos_edge_index, val_pos_edge_index)))
print("3.5 Validation (supervision) positve and training (supervision) positve edges are disjoint = {}"\
.format(edge_indices_disjoint(val_pos_edge_index, train_pos_edge_index)))
```
3.1 Training (supervision) positve and negative edges are disjoint = True
3.2 Validation (supervision) positve and negative edges are disjoint = True
3.3 Test (supervision) positve and negative edges are disjoint = True
3.4 Test (supervision) positve and validation (supervision) positve edges are disjoint = True
3.5 Validation (supervision) positve and training (supervision) positve edges are disjoint = True
### Disjoint Edge Splitting Mode
Now we will look at a relatively more complex transductive edge split setting, the `edge_train_mode="disjoint"` mode in DeepSNAP. In this setting, the `message passing` and `supervision` edges are completely disjoint
```python
if 'IS_GRADESCOPE_ENV' not in os.environ:
edge_train_mode = "disjoint"
dataset = GraphDataset(graphs, task='link_pred', edge_train_mode=edge_train_mode)
orig_edge_index = dataset[0].edge_index
dataset_train, dataset_val, dataset_test = dataset.split(
transductive=True, split_ratio=[0.8, 0.1, 0.1])
train_message_edge_index = dataset_train[0].edge_index
train_sup_edge_index = dataset_train[0].edge_label_index
val_message_edge_index = dataset_val[0].edge_index
val_sup_edge_index = dataset_val[0].edge_label_index
test_message_edge_index = dataset_test[0].edge_index
test_sup_edge_index = dataset_test[0].edge_label_index
print("Original Cora graph has {} edges".format(dataset[0].num_edges))
print()
print("Train set has {} message passing edge".format(train_message_edge_index.shape[1] // 2))
print("Train set has {} supervision (positive) edges".format(train_sup_edge_index.shape[1] // 4))
print()
print("Validation set has {} message passing edge".format(val_message_edge_index.shape[1] // 2))
print("Validation set has {} supervision (positive) edges".format(val_sup_edge_index.shape[1] // 4))
print()
print("Test set has {} message passing edge".format(test_message_edge_index.shape[1] // 2))
print("Test set has {} supervision (positive) edges".format(test_sup_edge_index.shape[1] // 4))
```
Original Cora graph has 5278 edges
Train set has 3377 message passing edge
Train set has 845 supervision (positive) edges
Validation set has 4222 message passing edge
Validation set has 527 supervision (positive) edges
Test set has 4749 message passing edge
Test set has 529 supervision (positive) edges
**Specific things to note in `disjoint` mode**:
* At training time: the training supervision edges are disjoint from the training message passing edges.
* At validation time: the message passing edges are the union of training message passing edges and training supervision edges. The validation supervision edges are disjoint from both the training message passing and supervision edges.
* At test time: the message passing edges are the training message passing edges, training supervision edges, and validation supervision edges. The test supervision edges are disjoint from all the training and validation edges.
## Negative Edges
For edge level tasks, sampling negative edges is critical. Moreover, during each training iteration, we want to resample the negative edges.
Below we print the training and validation sets negative edges in two training iterations.
What we demonstrate is that the negative edges are only resampled during training.
```python
if 'IS_GRADESCOPE_ENV' not in os.environ:
dataset = GraphDataset(graphs, task='link_pred', edge_train_mode="disjoint")
datasets = {}
follow_batch = []
datasets['train'], datasets['val'], datasets['test'] = dataset.split(
transductive=True, split_ratio=[0.8, 0.1, 0.1])
dataloaders = {
split: DataLoader(
ds, collate_fn=Batch.collate(follow_batch),
batch_size=1, shuffle=(split=='train')
)
for split, ds in datasets.items()
}
neg_edges_1 = None
for batch in dataloaders['train']:
num_edges = batch.edge_label_index.shape[1] // 2
neg_edges_1 = batch.edge_label_index[:, num_edges:]
print("First iteration training negative edges:")
print(neg_edges_1)
break
neg_edges_2 = None
for batch in dataloaders['train']:
num_edges = batch.edge_label_index.shape[1] // 2
neg_edges_2 = batch.edge_label_index[:, num_edges:]
print("Second iteration training negative edges:")
print(neg_edges_2)
break
neg_edges_1 = None
for batch in dataloaders['val']:
num_edges = batch.edge_label_index.shape[1] // 2
neg_edges_1 = batch.edge_label_index[:, num_edges:]
print("First iteration validation negative edges:")
print(neg_edges_1)
break
neg_edges_2 = None
for batch in dataloaders['val']:
num_edges = batch.edge_label_index.shape[1] // 2
neg_edges_2 = batch.edge_label_index[:, num_edges:]
print("Second iteration validation negative edges:")
print(neg_edges_2)
break
```
First iteration training negative edges:
tensor([[ 929, 69, 1042, ..., 572, 1133, 358],
[1410, 2548, 2525, ..., 645, 2494, 2686]])
Second iteration training negative edges:
tensor([[1825, 2407, 2433, ..., 599, 940, 868],
[ 250, 1064, 514, ..., 1799, 2427, 52]])
First iteration validation negative edges:
tensor([[ 2, 1232, 972, ..., 1000, 2505, 1749],
[1156, 2353, 645, ..., 2365, 1618, 409]])
Second iteration validation negative edges:
tensor([[ 2, 1232, 972, ..., 1000, 2505, 1749],
[1156, 2353, 645, ..., 2365, 1618, 409]])
If you are interested in more graph splitting settings, please refer to the DeepSNAP dataset [documentation](https://snap.stanford.edu/deepsnap/modules/dataset.html).
## Graph Transformation and Feature Computation
The other core functionality of DeepSNAP is graph transformation / feature computation.
In DeepSNAP, we divide graph transformation / feature computation into two different types. The first includes transformations before training (e.g. transform the whole dataset before training directly), and the second includes transformations during training (transform batches of graphs).
Below is an example that uses the NetworkX back end to calculate the PageRank value for each node and subsequently transforms the node features by concatenating each nodes PageRank score (transform the dataset before training).
```python
def pagerank_transform_fn(graph):
# Get the referenced networkx graph
G = graph.G
# Calculate the pagerank by using networkx
pr = nx.pagerank(G)
# Transform the pagerank values to tensor
pr_feature = torch.tensor([pr[node] for node in range(graph.num_nodes)], dtype=torch.float32)
pr_feature = pr_feature.view(graph.num_nodes, 1)
# Concat the pagerank values to the node feature
graph.node_feature = torch.cat([graph.node_feature, pr_feature], dim=-1)
if 'IS_GRADESCOPE_ENV' not in os.environ:
root = './tmp/cox2'
name = 'COX2'
pyg_dataset = TUDataset(root, name)
graphs = GraphDataset.pyg_to_graphs(pyg_dataset)
dataset = GraphDataset(graphs, task='graph')
print("Number of features before transformation: {}".format(dataset.num_node_features))
dataset.apply_transform(pagerank_transform_fn, update_tensor=False)
print("Number of features after transformation: {}".format(dataset.num_node_features))
```
Number of features before transformation: 35
Number of features after transformation: 36
## Question 4: Implement a transformation that adds the clustering coefficient of each node to its feature vector and then report the clustering coefficient of the node with index 3 in the graph with index 406 (5 points).
```python
def cluster_transform_fn(graph):
# TODO: Implement a function that takes an deepsnap graph object and
# transform the graph by adding each node's clustering coefficient to its
# graph.node_feature representation
############# Your code here ############
## (~5 lines of code)
## Note
## 1. Compute the clustering coefficient value for each node and
## concat this value to the last dimension of graph.node_feature
# Get networkx graph
G = graph.G
# Calculate clustering coefficient/pagerank using networkx
pr = nx.algorithms.cluster.clustering(G)
# Transform pagerank value to tensor
pr_feature = torch.tensor([pr[node] for node in range(graph.num_nodes)], dtype=torch.float16)
pr_feature = pr_feature.view(graph.num_nodes, 1)
# concat pagerank values to the node features
graph.node_feature = torch.cat([graph.node_feature, pr_feature], dim=-1)
#########################################
if 'IS_GRADESCOPE_ENV' not in os.environ:
root = './cox2'
name = 'COX2'
pyg_dataset = TUDataset(root, name)
graphs = GraphDataset.pyg_to_graphs(pyg_dataset)
dataset = GraphDataset(graphs, task='graph')
# Transform the dataset
dataset.apply_transform(cluster_transform_fn, update_tensor=False)
node_idx = 3
graph_idx = 406
node_feature = dataset[graph_idx].node_feature
print("The node has clustering coefficient: {}".format(round(node_feature[node_idx][-1].item(), 2)))
```
The node has clustering coefficient: 0.17
### Final Thoughts
Apart from transforming the whole dataset before training, DeepSNAP can also transform the graph (usually sampled batches of graphs, `deepsnap.batch.Batch`) during each training iteration.
Also, DeepSNAP supports the synchronization of the transformation between the referenced graph objects and tensor representations. For example, you can just update the NetworkX graph object in the transform function and by specifying `update_tensor=True` the internal tensor representations will be automatically updated!
For more information, please refer to the DeepSNAP [documentation](https://snap.stanford.edu/deepsnap/).
# 4) Edge Level Prediction
From the last section, we learned how DeepSNAP trandsuctively splits edges for edge level tasks. For the last part of the notebook, we will use DeepSNAP and PyG together to implement a simple edge level prediction (link prediction) model!
Specifically, we will use a 2 layer GraphSAGE embedding model to generate node embeddings, and then compute link predictions through a dot product link prediction head. Namely, given an edge (u, v) with GNN feature embeddings $f_u$ and $f_v$, our link prediction head generates its link prediction as $f_u \cdot f_v$.
To give a brief intuition for this dot product link prediction model, we are learning a GNN that embedds nodes such that nodes that have an edge in the graph are closer within the embedding space than nodes that do not have an edge. The dot product provides a proxy for closeness in our embedding space where a high positive dot product indicates that two vectors are more closely aligned (the angle between the vectors is small), whereas a negative dot-product indicates that vectors are unaligned (the angle between the vectors is greater than 90).
```python
import copy
import torch
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
from deepsnap.graph import Graph
from deepsnap.batch import Batch
from deepsnap.dataset import GraphDataset
from torch_geometric.datasets import Planetoid, TUDataset
from torch.utils.data import DataLoader
import torch.nn.functional as F
from torch_geometric.nn import SAGEConv
class LinkPredModel(torch.nn.Module):
def __init__(self, input_dim, hidden_dim, num_classes, dropout=0.2):
super(LinkPredModel, self).__init__()
self.conv1 = SAGEConv(input_dim, hidden_dim)
self.conv2 = SAGEConv(hidden_dim, num_classes)
self.loss_fn = None
############# Your code here #############
## (~1 line of code)
## Note
## 1. Initialize the loss function to BCEWithLogitsLoss
self.loss_fn = nn.BCEWithLogitsLoss()
##########################################
self.dropout = dropout
def reset_parameters(self):
self.conv1.reset_parameters()
self.conv2.reset_parameters()
def forward(self, batch):
node_feature, edge_index, edge_label_index = batch.node_feature, batch.edge_index, batch.edge_label_index
############# Your code here #############
## (~6 line of code)
## Note
## 1. Feed the node feature into the first conv layer
## 2. Add a ReLU after the first conv layer
## 3. Add dropout after the ReLU (with probability self.dropout)
## 4. Feed the output to the second conv layer
## 5. Select the embeddings of the source nodes and destination nodes
## by using the edge_label_index and compute the similarity of each pair
## by dot product
x = self.conv1(node_feature, edge_index)
x = F.relu(x)
x = F.dropout(x, p=self.dropout)
x = self.conv2(x, edge_index)
x_src = x[edge_label_index[0]]
x_dst = x[edge_label_index[1]]
x_similarity = x_src * x_dst
pred = torch.sum(x_similarity, dim=-1)
##########################################
return pred
def loss(self, pred, link_label):
return self.loss_fn(pred, link_label)
```
```python
from sklearn.metrics import *
def train(model, dataloaders, optimizer, args):
val_max = 0
best_model = model
for epoch in range(1, args["epochs"]):
for i, batch in enumerate(dataloaders['train']):
batch.to(args["device"])
############# Your code here #############
## (~6 lines of code)
## Note
## 1. Zero grad the optimizer
## 2. Compute loss and backpropagate
## 3. Update the model parameters
optimizer.zero_grad()
pred = model(batch)
loss = model.loss(pred, batch.edge_label.type_as(pred))
loss.backward()
optimizer.step()
##########################################
log = 'Epoch: {:03d}, Train: {:.4f}, Val: {:.4f}, Test: {:.4f}, Loss: {}'
score_train = test(model, dataloaders['train'], args)
score_val = test(model, dataloaders['val'], args)
score_test = test(model, dataloaders['test'], args)
print(log.format(epoch, score_train, score_val, score_test, loss.item()))
if val_max < score_val:
val_max = score_val
best_model = copy.deepcopy(model)
return best_model
def test(model, dataloader, args, save_model_preds=False):
model.eval()
score = 0
preds = None
labels = None
############# Your code here #############
## (~7 lines of code)
## Note
## 1. Loop through batches in the dataloader (Note for us there is only one batch!)
## 2. Feed the batch to the model
## 3. Feed the model output to sigmoid
## 4. Compute the ROC-AUC score by using sklearn roc_auc_score function
## Note: Look into flattening and converting torch tensors into numpy arrays
## 5. Edge labels are stored in batch.edge_label
## 6. Make sure to save your **numpy** model predictions as 'preds'
## and the **numpy** edge labels as 'labels'
# for batch in dataloader:
for batch in dataloaders['test']:
batch.to(args['device'])
preds = model(batch)
preds = torch.sigmoid(preds).cpu().detach().numpy()
labels = batch.edge_label.cpu().detach().numpy()
score += roc_auc_score(labels, preds)
score /= len(dataloaders['test'])
##########################################
if save_model_preds:
print ("Saving Link Classification Model Predictions")
print()
data = {}
data['pred'] = preds
data['label'] = labels
df = pd.DataFrame(data=data)
# Save locally as csv
df.to_csv('CORA-Link-Prediction.csv', sep=',', index=False)
return score
```
```python
# Please don't change any parameters
args = {
"device" : 'cuda' if torch.cuda.is_available() else 'cpu',
"hidden_dim" : 128,
"epochs" : 200,
}
```
```python
if 'IS_GRADESCOPE_ENV' not in os.environ:
pyg_dataset = Planetoid('./tmp/cora', 'Cora')
graphs = GraphDataset.pyg_to_graphs(pyg_dataset)
dataset = GraphDataset(
graphs,
task='link_pred',
edge_train_mode="disjoint"
)
datasets = {}
datasets['train'], datasets['val'], datasets['test']= dataset.split(
transductive=True, split_ratio=[0.85, 0.05, 0.1])
input_dim = datasets['train'].num_node_features
num_classes = datasets['train'].num_edge_labels
model = LinkPredModel(input_dim, args["hidden_dim"], num_classes).to(args["device"])
model.reset_parameters()
optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4)
dataloaders = {split: DataLoader(
ds, collate_fn=Batch.collate([]),
batch_size=1, shuffle=(split=='train'))
for split, ds in datasets.items()}
best_model = train(model, dataloaders, optimizer, args)
log = "Best Model Accuraies Train: {:.4f}, Val: {:.4f}, Test: {:.4f}"
best_train_roc = test(best_model, dataloaders['train'], args)
best_val_roc = test(best_model, dataloaders['val'], args)
best_test_roc = test(best_model, dataloaders['test'], args, save_model_preds=True)
print(log.format(best_train_roc, best_val_roc, best_test_roc))
```
Epoch: 001, Train: 0.5019, Val: 0.4953, Test: 0.5062, Loss: 0.6932055354118347
Epoch: 002, Train: 0.5089, Val: 0.4964, Test: 0.4886, Loss: 0.6931920647621155
Epoch: 003, Train: 0.5206, Val: 0.4928, Test: 0.5118, Loss: 0.6932067275047302
Epoch: 004, Train: 0.5019, Val: 0.4990, Test: 0.5043, Loss: 0.6931806802749634
Epoch: 005, Train: 0.5082, Val: 0.5174, Test: 0.4950, Loss: 0.6932178139686584
Epoch: 006, Train: 0.5022, Val: 0.5271, Test: 0.5069, Loss: 0.6932305097579956
Epoch: 007, Train: 0.5222, Val: 0.4992, Test: 0.5077, Loss: 0.6932173371315002
Epoch: 008, Train: 0.4989, Val: 0.5117, Test: 0.5131, Loss: 0.6931739449501038
Epoch: 009, Train: 0.5138, Val: 0.5229, Test: 0.5192, Loss: 0.693060040473938
Epoch: 010, Train: 0.5168, Val: 0.5268, Test: 0.5048, Loss: 0.6931229829788208
Epoch: 011, Train: 0.5327, Val: 0.5091, Test: 0.5290, Loss: 0.6930479407310486
Epoch: 012, Train: 0.5337, Val: 0.5132, Test: 0.5371, Loss: 0.6930755972862244
Epoch: 013, Train: 0.5220, Val: 0.5220, Test: 0.5197, Loss: 0.693119466304779
Epoch: 014, Train: 0.5292, Val: 0.5178, Test: 0.5210, Loss: 0.6930415630340576
Epoch: 015, Train: 0.5166, Val: 0.5225, Test: 0.5263, Loss: 0.6930370330810547
Epoch: 016, Train: 0.5366, Val: 0.5217, Test: 0.5326, Loss: 0.693081796169281
Epoch: 017, Train: 0.5396, Val: 0.5155, Test: 0.5344, Loss: 0.6929906010627747
Epoch: 018, Train: 0.5304, Val: 0.5175, Test: 0.5372, Loss: 0.6929558515548706
Epoch: 019, Train: 0.5353, Val: 0.5420, Test: 0.5374, Loss: 0.6928719282150269
Epoch: 020, Train: 0.5443, Val: 0.5569, Test: 0.5219, Loss: 0.6928818225860596
Epoch: 021, Train: 0.5439, Val: 0.5291, Test: 0.5486, Loss: 0.6928641200065613
Epoch: 022, Train: 0.5366, Val: 0.5287, Test: 0.5459, Loss: 0.6927971243858337
Epoch: 023, Train: 0.5313, Val: 0.5400, Test: 0.5477, Loss: 0.6928063035011292
Epoch: 024, Train: 0.5432, Val: 0.5306, Test: 0.5574, Loss: 0.6927868127822876
Epoch: 025, Train: 0.5360, Val: 0.5359, Test: 0.5440, Loss: 0.6928010582923889
Epoch: 026, Train: 0.5425, Val: 0.5373, Test: 0.5201, Loss: 0.692813515663147
Epoch: 027, Train: 0.5476, Val: 0.5447, Test: 0.5412, Loss: 0.6927600502967834
Epoch: 028, Train: 0.5377, Val: 0.5493, Test: 0.5632, Loss: 0.6926769614219666
Epoch: 029, Train: 0.5447, Val: 0.5579, Test: 0.5498, Loss: 0.6925768852233887
Epoch: 030, Train: 0.5531, Val: 0.5458, Test: 0.5538, Loss: 0.6925328969955444
Epoch: 031, Train: 0.5502, Val: 0.5608, Test: 0.5495, Loss: 0.6924236416816711
Epoch: 032, Train: 0.5685, Val: 0.5580, Test: 0.5468, Loss: 0.6924571990966797
Epoch: 033, Train: 0.5701, Val: 0.5536, Test: 0.5451, Loss: 0.6924594640731812
Epoch: 034, Train: 0.5480, Val: 0.5416, Test: 0.5586, Loss: 0.6924310922622681
Epoch: 035, Train: 0.5610, Val: 0.5585, Test: 0.5397, Loss: 0.6923143267631531
Epoch: 036, Train: 0.5472, Val: 0.5463, Test: 0.5490, Loss: 0.6921097040176392
Epoch: 037, Train: 0.5622, Val: 0.5556, Test: 0.5273, Loss: 0.6921440958976746
Epoch: 038, Train: 0.5449, Val: 0.5430, Test: 0.5551, Loss: 0.6920964121818542
Epoch: 039, Train: 0.5575, Val: 0.5524, Test: 0.5576, Loss: 0.6919888257980347
Epoch: 040, Train: 0.5427, Val: 0.5541, Test: 0.5403, Loss: 0.6920359134674072
Epoch: 041, Train: 0.5478, Val: 0.5588, Test: 0.5605, Loss: 0.691764235496521
Epoch: 042, Train: 0.5579, Val: 0.5612, Test: 0.5501, Loss: 0.6918411254882812
Epoch: 043, Train: 0.5679, Val: 0.5459, Test: 0.5517, Loss: 0.6918407082557678
Epoch: 044, Train: 0.5434, Val: 0.5483, Test: 0.5473, Loss: 0.6916317939758301
Epoch: 045, Train: 0.5485, Val: 0.5527, Test: 0.5550, Loss: 0.6913734674453735
Epoch: 046, Train: 0.5520, Val: 0.5572, Test: 0.5527, Loss: 0.691251814365387
Epoch: 047, Train: 0.5697, Val: 0.5536, Test: 0.5556, Loss: 0.6910400390625
Epoch: 048, Train: 0.5674, Val: 0.5626, Test: 0.5664, Loss: 0.691244900226593
Epoch: 049, Train: 0.5519, Val: 0.5604, Test: 0.5554, Loss: 0.6907708644866943
Epoch: 050, Train: 0.5556, Val: 0.5720, Test: 0.5574, Loss: 0.6903576850891113
Epoch: 051, Train: 0.5573, Val: 0.5688, Test: 0.5653, Loss: 0.6909399628639221
Epoch: 052, Train: 0.5678, Val: 0.5613, Test: 0.5536, Loss: 0.6901348233222961
Epoch: 053, Train: 0.5552, Val: 0.5795, Test: 0.5688, Loss: 0.6899186372756958
Epoch: 054, Train: 0.5662, Val: 0.5630, Test: 0.5643, Loss: 0.6895727515220642
Epoch: 055, Train: 0.5702, Val: 0.5625, Test: 0.5529, Loss: 0.6899831295013428
Epoch: 056, Train: 0.5701, Val: 0.5714, Test: 0.5617, Loss: 0.688920259475708
Epoch: 057, Train: 0.5670, Val: 0.5650, Test: 0.5717, Loss: 0.688893735408783
Epoch: 058, Train: 0.5651, Val: 0.5668, Test: 0.5672, Loss: 0.6887857913970947
Epoch: 059, Train: 0.5730, Val: 0.5687, Test: 0.5598, Loss: 0.6882851719856262
Epoch: 060, Train: 0.5730, Val: 0.5745, Test: 0.5768, Loss: 0.688106119632721
Epoch: 061, Train: 0.5701, Val: 0.5684, Test: 0.5683, Loss: 0.6876559853553772
Epoch: 062, Train: 0.5765, Val: 0.5755, Test: 0.5710, Loss: 0.6875297427177429
Epoch: 063, Train: 0.5779, Val: 0.5758, Test: 0.5827, Loss: 0.6867380738258362
Epoch: 064, Train: 0.5782, Val: 0.5670, Test: 0.5797, Loss: 0.68626868724823
Epoch: 065, Train: 0.5750, Val: 0.5858, Test: 0.5769, Loss: 0.6862733364105225
Epoch: 066, Train: 0.5778, Val: 0.5756, Test: 0.5804, Loss: 0.6857287287712097
Epoch: 067, Train: 0.5749, Val: 0.5798, Test: 0.5715, Loss: 0.685171365737915
Epoch: 068, Train: 0.5849, Val: 0.5790, Test: 0.5741, Loss: 0.6841467618942261
Epoch: 069, Train: 0.5831, Val: 0.5791, Test: 0.5880, Loss: 0.6842892169952393
Epoch: 070, Train: 0.5765, Val: 0.5782, Test: 0.5797, Loss: 0.6838349103927612
Epoch: 071, Train: 0.5860, Val: 0.5885, Test: 0.5847, Loss: 0.6830412149429321
Epoch: 072, Train: 0.5827, Val: 0.5856, Test: 0.5859, Loss: 0.6808038949966431
Epoch: 073, Train: 0.5878, Val: 0.5872, Test: 0.5836, Loss: 0.6804813146591187
Epoch: 074, Train: 0.5849, Val: 0.5877, Test: 0.5882, Loss: 0.6788002252578735
Epoch: 075, Train: 0.5950, Val: 0.5847, Test: 0.5861, Loss: 0.6783311367034912
Epoch: 076, Train: 0.5907, Val: 0.5925, Test: 0.5909, Loss: 0.6774953603744507
Epoch: 077, Train: 0.5903, Val: 0.5897, Test: 0.5954, Loss: 0.6758286356925964
Epoch: 078, Train: 0.5999, Val: 0.5948, Test: 0.5963, Loss: 0.675263524055481
Epoch: 079, Train: 0.6012, Val: 0.6020, Test: 0.5992, Loss: 0.6710941195487976
Epoch: 080, Train: 0.6011, Val: 0.5963, Test: 0.6017, Loss: 0.6713895201683044
Epoch: 081, Train: 0.6026, Val: 0.5997, Test: 0.6035, Loss: 0.6704581379890442
Epoch: 082, Train: 0.6122, Val: 0.6075, Test: 0.6132, Loss: 0.6695705056190491
Epoch: 083, Train: 0.6164, Val: 0.6143, Test: 0.6156, Loss: 0.6671017408370972
Epoch: 084, Train: 0.6198, Val: 0.6248, Test: 0.6218, Loss: 0.6689134836196899
Epoch: 085, Train: 0.6330, Val: 0.6274, Test: 0.6404, Loss: 0.6630488038063049
Epoch: 086, Train: 0.6432, Val: 0.6436, Test: 0.6440, Loss: 0.6611646413803101
Epoch: 087, Train: 0.6535, Val: 0.6534, Test: 0.6497, Loss: 0.6594260931015015
Epoch: 088, Train: 0.6574, Val: 0.6575, Test: 0.6556, Loss: 0.6585035920143127
Epoch: 089, Train: 0.6550, Val: 0.6719, Test: 0.6709, Loss: 0.6572673916816711
Epoch: 090, Train: 0.6725, Val: 0.6675, Test: 0.6728, Loss: 0.651466429233551
Epoch: 091, Train: 0.6772, Val: 0.6765, Test: 0.6786, Loss: 0.6508798599243164
Epoch: 092, Train: 0.6733, Val: 0.6770, Test: 0.6773, Loss: 0.6480381488800049
Epoch: 093, Train: 0.6733, Val: 0.6829, Test: 0.6780, Loss: 0.6487643718719482
Epoch: 094, Train: 0.6846, Val: 0.6789, Test: 0.6798, Loss: 0.6418784856796265
Epoch: 095, Train: 0.6845, Val: 0.6838, Test: 0.6803, Loss: 0.6384830474853516
Epoch: 096, Train: 0.6805, Val: 0.6882, Test: 0.6869, Loss: 0.6390049457550049
Epoch: 097, Train: 0.6846, Val: 0.6803, Test: 0.6791, Loss: 0.641418993473053
Epoch: 098, Train: 0.6776, Val: 0.6836, Test: 0.6817, Loss: 0.6370711922645569
Epoch: 099, Train: 0.6855, Val: 0.6783, Test: 0.6785, Loss: 0.6326021552085876
Epoch: 100, Train: 0.6744, Val: 0.6765, Test: 0.6750, Loss: 0.6325771808624268
Epoch: 101, Train: 0.6755, Val: 0.6802, Test: 0.6804, Loss: 0.6332057118415833
Epoch: 102, Train: 0.6774, Val: 0.6818, Test: 0.6858, Loss: 0.6326180696487427
Epoch: 103, Train: 0.6832, Val: 0.6878, Test: 0.6849, Loss: 0.6269603371620178
Epoch: 104, Train: 0.6846, Val: 0.6784, Test: 0.6855, Loss: 0.6200358867645264
Epoch: 105, Train: 0.6769, Val: 0.6822, Test: 0.6828, Loss: 0.6257349252700806
Epoch: 106, Train: 0.6778, Val: 0.6847, Test: 0.6811, Loss: 0.6317898631095886
Epoch: 107, Train: 0.6798, Val: 0.6795, Test: 0.6824, Loss: 0.6347403526306152
Epoch: 108, Train: 0.6757, Val: 0.6804, Test: 0.6797, Loss: 0.6143648028373718
Epoch: 109, Train: 0.6861, Val: 0.6798, Test: 0.6769, Loss: 0.6243528127670288
Epoch: 110, Train: 0.6838, Val: 0.6796, Test: 0.6907, Loss: 0.622592031955719
Epoch: 111, Train: 0.6899, Val: 0.6895, Test: 0.6906, Loss: 0.6148200631141663
Epoch: 112, Train: 0.6933, Val: 0.6927, Test: 0.6928, Loss: 0.6142141222953796
Epoch: 113, Train: 0.6904, Val: 0.6858, Test: 0.6943, Loss: 0.61095130443573
Epoch: 114, Train: 0.6854, Val: 0.6899, Test: 0.6904, Loss: 0.6084436774253845
Epoch: 115, Train: 0.6851, Val: 0.6866, Test: 0.6905, Loss: 0.6014653444290161
Epoch: 116, Train: 0.6855, Val: 0.6905, Test: 0.6917, Loss: 0.6062939763069153
Epoch: 117, Train: 0.6884, Val: 0.6863, Test: 0.6922, Loss: 0.6057735085487366
Epoch: 118, Train: 0.6912, Val: 0.6885, Test: 0.6918, Loss: 0.6048821210861206
Epoch: 119, Train: 0.6924, Val: 0.6887, Test: 0.6927, Loss: 0.5992241501808167
Epoch: 120, Train: 0.6963, Val: 0.6995, Test: 0.6926, Loss: 0.6004308462142944
Epoch: 121, Train: 0.6916, Val: 0.6929, Test: 0.6951, Loss: 0.6008142828941345
Epoch: 122, Train: 0.6885, Val: 0.6908, Test: 0.6898, Loss: 0.597372829914093
Epoch: 123, Train: 0.6924, Val: 0.6954, Test: 0.6960, Loss: 0.6026260852813721
Epoch: 124, Train: 0.6953, Val: 0.6937, Test: 0.6927, Loss: 0.5994287133216858
Epoch: 125, Train: 0.6991, Val: 0.6923, Test: 0.6964, Loss: 0.5926941633224487
Epoch: 126, Train: 0.6901, Val: 0.6915, Test: 0.6953, Loss: 0.5947172045707703
Epoch: 127, Train: 0.6929, Val: 0.6982, Test: 0.6940, Loss: 0.5826076865196228
Epoch: 128, Train: 0.6934, Val: 0.7004, Test: 0.6951, Loss: 0.5893241763114929
Epoch: 129, Train: 0.6926, Val: 0.6915, Test: 0.6912, Loss: 0.5897101163864136
Epoch: 130, Train: 0.6890, Val: 0.6911, Test: 0.6837, Loss: 0.5968517661094666
Epoch: 131, Train: 0.6877, Val: 0.6908, Test: 0.6850, Loss: 0.589328408241272
Epoch: 132, Train: 0.6943, Val: 0.6917, Test: 0.6922, Loss: 0.5831955671310425
Epoch: 133, Train: 0.6971, Val: 0.6953, Test: 0.7014, Loss: 0.5696879625320435
Epoch: 134, Train: 0.6930, Val: 0.6998, Test: 0.6988, Loss: 0.5866789817810059
Epoch: 135, Train: 0.6912, Val: 0.6847, Test: 0.6834, Loss: 0.5844400525093079
Epoch: 136, Train: 0.6843, Val: 0.6922, Test: 0.6905, Loss: 0.5795565843582153
Epoch: 137, Train: 0.6946, Val: 0.6935, Test: 0.6870, Loss: 0.5705973505973816
Epoch: 138, Train: 0.6994, Val: 0.6960, Test: 0.6919, Loss: 0.573519229888916
Epoch: 139, Train: 0.6969, Val: 0.6982, Test: 0.6976, Loss: 0.5754662156105042
Epoch: 140, Train: 0.7000, Val: 0.6936, Test: 0.6962, Loss: 0.5710124373435974
Epoch: 141, Train: 0.6977, Val: 0.6974, Test: 0.6912, Loss: 0.5696238279342651
Epoch: 142, Train: 0.6941, Val: 0.6926, Test: 0.6963, Loss: 0.5677017569541931
Epoch: 143, Train: 0.6944, Val: 0.6818, Test: 0.6934, Loss: 0.5729352235794067
Epoch: 144, Train: 0.6935, Val: 0.6907, Test: 0.6890, Loss: 0.5693244338035583
Epoch: 145, Train: 0.6975, Val: 0.6938, Test: 0.7049, Loss: 0.5650897026062012
Epoch: 146, Train: 0.6992, Val: 0.7005, Test: 0.6973, Loss: 0.5605905652046204
Epoch: 147, Train: 0.6993, Val: 0.7074, Test: 0.6988, Loss: 0.5520856380462646
Epoch: 148, Train: 0.7046, Val: 0.6977, Test: 0.6991, Loss: 0.5596573948860168
Epoch: 149, Train: 0.7030, Val: 0.6995, Test: 0.7039, Loss: 0.5518948435783386
Epoch: 150, Train: 0.7009, Val: 0.7009, Test: 0.7007, Loss: 0.5539484620094299
Epoch: 151, Train: 0.7032, Val: 0.7044, Test: 0.7017, Loss: 0.5543316006660461
Epoch: 152, Train: 0.7123, Val: 0.7061, Test: 0.7035, Loss: 0.554809033870697
Epoch: 153, Train: 0.7123, Val: 0.7023, Test: 0.7083, Loss: 0.5473509430885315
Epoch: 154, Train: 0.7083, Val: 0.7049, Test: 0.7143, Loss: 0.5487227439880371
Epoch: 155, Train: 0.7078, Val: 0.7112, Test: 0.7117, Loss: 0.5427062511444092
Epoch: 156, Train: 0.7134, Val: 0.7131, Test: 0.7106, Loss: 0.5389374494552612
Epoch: 157, Train: 0.7166, Val: 0.7156, Test: 0.7157, Loss: 0.5452955365180969
Epoch: 158, Train: 0.7201, Val: 0.7202, Test: 0.7172, Loss: 0.5453580021858215
Epoch: 159, Train: 0.7228, Val: 0.7193, Test: 0.7295, Loss: 0.5366584658622742
Epoch: 160, Train: 0.7202, Val: 0.7210, Test: 0.7190, Loss: 0.5381338596343994
Epoch: 161, Train: 0.7270, Val: 0.7233, Test: 0.7250, Loss: 0.5340146422386169
Epoch: 162, Train: 0.7286, Val: 0.7189, Test: 0.7235, Loss: 0.5423163771629333
Epoch: 163, Train: 0.7274, Val: 0.7305, Test: 0.7249, Loss: 0.5440378785133362
Epoch: 164, Train: 0.7344, Val: 0.7310, Test: 0.7325, Loss: 0.5293395519256592
Epoch: 165, Train: 0.7375, Val: 0.7351, Test: 0.7351, Loss: 0.5210434198379517
Epoch: 166, Train: 0.7452, Val: 0.7383, Test: 0.7383, Loss: 0.533065140247345
Epoch: 167, Train: 0.7420, Val: 0.7399, Test: 0.7403, Loss: 0.5240846276283264
Epoch: 168, Train: 0.7326, Val: 0.7400, Test: 0.7425, Loss: 0.5234970450401306
Epoch: 169, Train: 0.7409, Val: 0.7451, Test: 0.7399, Loss: 0.5360404253005981
Epoch: 170, Train: 0.7472, Val: 0.7486, Test: 0.7388, Loss: 0.5209401845932007
Epoch: 171, Train: 0.7507, Val: 0.7517, Test: 0.7532, Loss: 0.5266425013542175
Epoch: 172, Train: 0.7503, Val: 0.7514, Test: 0.7527, Loss: 0.5186090469360352
Epoch: 173, Train: 0.7505, Val: 0.7531, Test: 0.7534, Loss: 0.5280333757400513
Epoch: 174, Train: 0.7552, Val: 0.7526, Test: 0.7503, Loss: 0.5169417262077332
Epoch: 175, Train: 0.7522, Val: 0.7548, Test: 0.7490, Loss: 0.5271726250648499
Epoch: 176, Train: 0.7538, Val: 0.7506, Test: 0.7491, Loss: 0.525481104850769
Epoch: 177, Train: 0.7562, Val: 0.7601, Test: 0.7537, Loss: 0.5083685517311096
Epoch: 178, Train: 0.7586, Val: 0.7582, Test: 0.7592, Loss: 0.5087206363677979
Epoch: 179, Train: 0.7539, Val: 0.7575, Test: 0.7601, Loss: 0.524002194404602
Epoch: 180, Train: 0.7592, Val: 0.7525, Test: 0.7563, Loss: 0.5085655450820923
Epoch: 181, Train: 0.7619, Val: 0.7643, Test: 0.7559, Loss: 0.5135030746459961
Epoch: 182, Train: 0.7499, Val: 0.7533, Test: 0.7544, Loss: 0.5254723429679871
Epoch: 183, Train: 0.7564, Val: 0.7625, Test: 0.7638, Loss: 0.5017015337944031
Epoch: 184, Train: 0.7559, Val: 0.7550, Test: 0.7587, Loss: 0.5086198449134827
Epoch: 185, Train: 0.7640, Val: 0.7551, Test: 0.7635, Loss: 0.5079658627510071
Epoch: 186, Train: 0.7547, Val: 0.7546, Test: 0.7589, Loss: 0.5196709632873535
Epoch: 187, Train: 0.7573, Val: 0.7547, Test: 0.7511, Loss: 0.5095032453536987
Epoch: 188, Train: 0.7583, Val: 0.7568, Test: 0.7608, Loss: 0.5064508318901062
Epoch: 189, Train: 0.7639, Val: 0.7599, Test: 0.7582, Loss: 0.506013810634613
Epoch: 190, Train: 0.7581, Val: 0.7564, Test: 0.7571, Loss: 0.5119727849960327
Epoch: 191, Train: 0.7545, Val: 0.7540, Test: 0.7552, Loss: 0.4914553463459015
Epoch: 192, Train: 0.7569, Val: 0.7505, Test: 0.7552, Loss: 0.5008942484855652
Epoch: 193, Train: 0.7584, Val: 0.7565, Test: 0.7597, Loss: 0.4939987063407898
Epoch: 194, Train: 0.7549, Val: 0.7504, Test: 0.7539, Loss: 0.49491646885871887
Epoch: 195, Train: 0.7568, Val: 0.7509, Test: 0.7587, Loss: 0.4982417821884155
Epoch: 196, Train: 0.7530, Val: 0.7523, Test: 0.7503, Loss: 0.506991446018219
Epoch: 197, Train: 0.7558, Val: 0.7564, Test: 0.7518, Loss: 0.5017726421356201
Epoch: 198, Train: 0.7604, Val: 0.7544, Test: 0.7535, Loss: 0.498566210269928
Epoch: 199, Train: 0.7577, Val: 0.7596, Test: 0.7664, Loss: 0.4957054555416107
Saving Link Classification Model Predictions
Best Model Accuraies Train: 0.7554, Val: 0.7567, Test: 0.7609
## Question 4: What is the maximum ROC-AUC score you get for your best_model on test set? (13 points)
After training your model, download and submit your best model prediction file: *CORA-Link-Prediction.csv*.
As we have seen before you can view this file by clicking on the *Folder* icon on the left side pannel.
# Submission
You will need to submit four files on Gradescope to complete this notebook.
1. Your completed *XCS224W_Colab3.ipynb*. From the "File" menu select "Download .ipynb" to save a local copy of your completed Colab.
2. *CORA-Node-GraphSage.csv*
3. *CORA-Node-GAT.csv*
4. *CORA-Link-Prediction.csv*
Download the csv files by selecting the *Folder* icon on the left panel.
To submit your work, zip the files downloaded in steps 1-4 above and submit to gradescope. **NOTE:** DO NOT rename any of the downloaded files.
```python
```
|
NEUE is part of the Student Recruitment and Retention Center.
Natives Empowering Through Unity and Education (NEUE), formerly known as American Indians for Recruitment and Retention (AIRR), was created by the Native American Student Union (NASU) to address the needs of the UC Davis American Indian population. NEUE is dedicated to reversing low graduation, recruitment and retention rates by utilizing a holistic approach to student development.
Efforts will include cultural identity workshops, support groups, community dialogue circles, retreats, and leadership development skills. NEUE also assists in several oncampus Native events.
NEUE is located in the basement of South Hall.
|
module Proofs.OrderTheory
import Common.Abbrev
import Common.Util
import Specifications.Order
%default total
%access export
rewriteReflexive : isReflexive rel -> (a,b : s) -> a = b -> rel a b
rewriteReflexive {rel} spec a b eq = rewriteRelation rel Refl eq (spec a)
orderContra : TotalOrderSpec leq -> (a,b : s) ->
Not (leq a b) -> (leq b a, Not (a = b))
orderContra spec a b given = (o2, contraposition o3 given) where
o1 : Either (leq a b) (leq b a)
o1 = totalOrder spec a b
o2 : leq b a
o2 = o1 `butNotLeft` given
o3 : a = b -> leq a b
o3 = rewriteReflexive (reflexive (partialOrder spec)) a b
|
SUBROUTINE seva2d(bkx,lx,bky,ly,cs,nx,ny,xl,yl,fs,ier,icalc)
c------------------------------------------------------------------------------
c-- S.Thompson 92/05/18 --
c-- Bicubic spline routines. --
c-- Put together with routines from E.Solano. --
c-- SMWolfe 93/12/17 --
c-- Modifed to avoid over-writing the original array. --
c-- 94/04/28 --
c-- Updated. --
c------------------------------------------------------------------------------
c Inputs:
c
c cs - array of spline coefficients of dimension (kubicx,
c lubicx,kubicy,lubicy) from sets2d.
c
c bkx, bky - interval coefficients of length lubicx+1 and lubicy+1 from
c sets2d.
c
c lx, ly - number of terms in bkx and bky from sets2d.
c
c xl, yl - the point at which interpolations are desired.
c
c nx, ny - grid dimensions
c
c Outputs:
c
c fs - vector containing results depending on icalc:
c icalc fs
c 1 f
c 2 fx
c 3 fy
c 4 fxy
c 5 fxx
c 6 fyy
c
c ier - error parameter.
c
c-------------------------------------------------------------------------------
USE stel_kinds, ONLY: rprec
USE spline_parm
IMPLICIT NONE
c
INTEGER :: ier, lx, ly, nx, ny, icalc
INTEGER :: lef, ibk, jj, mflag, ndummy
c real*8 cs(kubicx,lubicx,kubicy,lubicy),xl,yl,fs(6),bkx(1),bky(1)
REAL(rprec) :: xl,yl,fs(6),bkx(lx),bky(ly)
REAL(rprec) :: cs(kubicx,nx-kubicx+1,kubicy,ny-kubicy+1)
REAL(rprec) :: ppvalw
c
c Local Variable Specifications:
c
REAL(rprec) :: work0(4),work1(4),work2(4), h
INTEGER, PARAMETER :: n00 = 0, n11 = 1, n22 = 2
c
c Evaluate function and its partial derivatives at (XL, YL):
c
c
c First do all the lookup and interpolation stuff.
c This is the most time consuming part of the evaluation, so
c don't do more than needed.
c
CALL interv(bky,ly,yl,lef,mflag)
CALL interv(bkx,lx,xl,ibk,ndummy)
h = xl - bkx(ibk)
DO 41 jj=1,4
work0(jj) = ppvalw(cs(1,ibk,jj,lef),h,n00)
IF (icalc.eq.1) GOTO41
work1(jj) = ppvalw(cs(1,ibk,jj,lef),h,n11)
IF (icalc.le.4) GOTO41
work2(jj) = ppvalw(cs(1,ibk,jj,lef),h,n22)
41 CONTINUE
h = yl - bky(lef)
fs(1) = ppvalw(work0,h,n00)
IF (icalc.eq.1) RETURN
fs(2) = ppvalw(work1,h,n00)
IF (icalc.eq.2) RETURN
fs(3) = ppvalw(work0,h,n11)
IF (icalc.eq.3) RETURN
fs(4) = ppvalw(work1,h,n11)
IF (icalc.eq.4) RETURN
fs(5) = ppvalw(work2,h,n00)
IF (icalc.eq.5) RETURN
fs(6) = ppvalw(work0,h,n22)
C
RETURN
END
SUBROUTINE sets2d(s,cs,x,nx,bkx,lx,y,ny,bky,ly,wk,ier)
c------------------------------------------------------------------------------
c-- S.Thompson 92/05/18 --
c-- Bicubic spline routines. --
c-- Put together with routines from E.Solano. --
c-- SMWolfe 93/12/17 --
c-- Modifed to avoid over-writing the original array. --
c-- 94/04/28 --
c-- Updated. --
c------------------------------------------------------------------------------
c Inputs:
c
c s - nx by ny array containing the function values at (x,y).
c This is a 1-d array, k=k=(i-1)*ny+j.
c Modified ordering k = (j-1)*nx+i
c
c x, y - (x,y) location, arrays of length nx and ny.
c
c Outputs:
c
c cs - array of spline coefficients of dimension (kubicx,
c lubicx,kubicy,lubicy).
c
c bkx, bky - interval coefficients of length lubicx+1 and lubicy+1.
c
c lx, ly - number of terms in bkx and bky.
c
c ier - rror parameter.
c
c Work arrays:
c
c wk - of dimension at least nx by ny.
c------------------------------------------------------------------------------
USE stel_kinds, ONLY: rprec
USE spline_parm
c
c DIMENSION s(1), x(nx), y(ny), wk(nx,ny),
c . xknot(kubicx + nw), yknot(kubicy + nh),
c . cs(kubicx, lubicx, kubicy, lubicy),
c . bkx(lubicx + 1), bky(lubicy + 1)
INTEGER :: i, j, k, lx, nx, ny, ly, ier
REAL(rprec) :: s(nx*ny), x(nx), y(ny), wk(nx,ny),
. xknot(kubicx + nx), yknot(kubicy + ny),
. cs(kubicx, nx-kubicx+1, kubicy, ny-kubicy+1),
. bkx(nx-kubicx + 2), bky(ny-kubicy + 2)
c
c Set up knots:
c
c WRITE (6,*) x(38),y(42),s(41*nx+38)*x(38)
CALL eknot (nx, x, kubicx, xknot)
CALL eknot (ny, y, kubicy, yknot)
c
c Save the original, use the work array
c
DO 10 I=1,NX
DO 10 j=1,ny
c k=(i-1)*ny+j
k=(j-1)*nx+i
10 wk(i,j) = s(k)
c
c Calculate spline coefficients:
c
CALL spl2bc (x, y, nx, ny,xknot, yknot, wk)
c
c Coefficients stored in bkx, bky, and c:
c
CALL spl2pp (nx, ny, xknot, yknot, wk, bkx, lx, bky, ly, cs)
c
RETURN
END
SUBROUTINE spl2bc(rgrid,zgrid,mw,mh,rknot,zknot,copy)
calculates the b-spline coeficients
USE stel_kinds, ONLY: rprec
IMPLICIT INTEGER*4 (i-n), REAL(rprec) (a-h, o-z)
PARAMETER (nw=257,nh=257,krord=4,kzord=4)
REAL(rprec) :: rgrid(mw),zgrid(mh)
c DIMENSION rknot(nw+krord),zknot(nh+kzord),copy(mw,mh)
REAL(rprec) :: rknot(mw+krord),zknot(mh+kzord),copy(mw,mh)
c------------------------------------------------------------------
c-- change DIMENSION of work2 and work3 from nw to nh --
c-- to ensure the cases when nh > nw ll, 93/04/01 --
c------------------------------------------------------------------
DIMENSION work1(mw,mh),work2(mh),work3(mh,2*krord-1)
CALL spli2d(rgrid,copy,rknot,mw,krord,mh,work2,work3,work1,iflag)
IF (iflag.ne.1) PRINT *,' error in first spli2d, iflag=',iflag
CALL spli2d(zgrid,work1,zknot,mh,kzord,mw,work2,work3,copy,iflag)
IF (iflag.ne.1) PRINT *,' error in second spli2d, iflag=',iflag
RETURN
END
SUBROUTINE spl2pp(mw,mh,rknot,zknot,copy,breakr,lr,breakz,lz,coef)
c translates to pp representation
USE stel_kinds, ONLY: rprec
IMPLICIT INTEGER*4 (i-n), REAL(rprec) (a-h, o-z)
PARAMETER (nw=257,nh=257,krord=4,kzord=4)
c PARAMETER (lr0=nw-krord+1,lz0=nh-kzord+1)
c DIMENSION rknot(nw+krord),zknot(nh+kzord)
c DIMENSION copy(mw,mh),coef(krord,lr0,kzord,lz0)
c DIMENSION breakr(lr0+1),breakz(lz0+1)
c DIMENSION work4(krord,nw,nh), work5(nh,krord,lr0)
c * ,work6(kzord,kzord,nw,krord)
DIMENSION rknot(mw+krord),zknot(mh+kzord)
DIMENSION copy(mw,mh),coef(krord,mw-krord+1,kzord,mh-kzord+1)
DIMENSION breakr(mw-krord+2),breakz(mh-kzord+2)
DIMENSION work4(krord,nw,nh), work5(mh,krord,mw-krord+1)
* ,work6(kzord,kzord,nw,krord)
EQUIVALENCE (work4,work6)
CALL bspp2d(rknot,copy,mw,krord,mh,work4,breakr,work5,lr)
ndum=lr*krord
CALL bspp2d(zknot,work5,mh,kzord,ndum ,work6,breakz,coef,lz)
RETURN
END
SUBROUTINE eknot(n,x,k,xk)
c given the ordered data points x(1)<...<x(n), this SUBROUTINE generates
c a knot sequence with not-a-knot end conditions (like BSNAK from IMSL)
c Some of this is discussed in de Boor(1978), page 211.
USE stel_kinds, ONLY: rprec
IMPLICIT INTEGER*4 (i-n), REAL(rprec) (a-h, o-z)
DIMENSION x(n),xk(n+k)
INTEGER kh
c
DO i=1,k
xk(i)=x(1)
ii=i+n
xk(ii)= x(n)+1.e-5
END DO
kh=k/2
k2=kh+kh
IF (k2.eq.k) THEN
c even k, place knots at data points
DO i=k+1,n
xk(i)=x(i-kh)
END DO
ELSE
c odd k, place knots in between data points
DO i=k+1,n
xk(i)=.5*(x(i-kh)+x(i-1-kh))
END DO
END IF
RETURN
END
SUBROUTINE spli2d ( tau, gtau, t, n, k, m, work, q, bcoef, iflag )
calls bsplvb, banfac/slv
c this is an extended version of splint , for the use in tensor prod-
c uct interpolation.
c
c spli2d produces the b-spline coeff.s bcoef(j,.) of the spline of
c order k with knots t (i), i=1,..., n + k , which takes on the
c value gtau (i,j) at tau (i), i=1,..., n ; j=1,..., m .
c
c****** i n p u t ******
c tau array of length n , containing data point abscissae.
c a s s u m p t i o n . . . tau is strictly increasing
c gtau(.,j) corresponding array of length n , containing data point
c ordinates, j=1,...,m
c t knot sequence, of length n+k
c n number of data points and dimension of spline space s(k,t)
c k order of spline
c m number of data sets
c
c****** w o r k a r e a ******
c work a vector of length n
c
c****** o u t p u t ******
c q array of order (n,2*k-1), containing the triangular factoriz-
c ation of the coefficient matrix of the linear system for the b-
c coefficients of the spline interpolant.
c the b-coeffs for the interpolant of an additional data set
c (tau(i),htau(i)), i=1,...,n with the same data abscissae can
c be obtained without going through all the calculations in this
c routine, simply by loading htau into bcoef and then execut-
c ing the call banslv ( q, n, n, 2*k-1, k, bcoef )
c bcoef the b-coefficients of the interpolant, of length n
c iflag an integer indicating success (= 1) or failure (= 2)
c the linear system to be solved is (theoretically) invertible if
c and only if
c t(i) .lt. tau(i) .lt. tau(i+k), all i.
c violation of this condition is certain to lead to iflag = 2 .
c
c****** m e t h o d ******
c the i-th equation of the linear system a*bcoef = b for the b-co-
c effs of the interpolant enforces interpolation at tau(i), i=1,...,n.
c hence, b(i) = gtau(i), all i, and a is a band matrix with 2k-1
c bands (if it is invertible).
c the matrix a is generated row by row and stored, diagonal by di-
c agonal, in the c o l u m n s of the array q , with the main diag-
c onal going into column k . see comments in the program below.
c the banded system is then solved by a call to banfac (which con-
c structs the triangular factorization for a and stores it again in
c q ), followed by a call to banslv (which then obtains the solution
c bcoef by substitution).
c banfac does no pivoting, since the total positivity of the matrix
c a makes this unnecessary.
c
c integer iflag,k,m,n,i,ilp1mx,j,jj,kpkm1,left,np1
c real*8 bcoef(m,n),gtau(n,m),q(n,7),t(n+k),tau(n),work(n),taui
USE stel_kinds, ONLY: rprec
IMPLICIT INTEGER*4 (i-n), REAL(rprec) (a-h, o-z)
DIMENSION bcoef(m,n),gtau(n,m),q(n,2*k-1),t(n+k),tau(n),work(n)
c
nnn=1
np1 = n + 1
kpkm1 = 2*k - 1
left = k
c
c *** loop over i to construct the n interpolation equations
DO 30 i=1,n
iindex=i
taui = tau(iindex)
ilp1mx = MIN(iindex+k,np1)
c *** zero out all entries in row i of a (in the 2k-1 bands)
DO 13 j=1,kpkm1
13 q(iindex,j) = 0.
c *** find left in the closed interval (i,i+k-1) such that
c t(left) .le. tau(i) .lt. t(left+1)
c matrix is singular if this is not possible
left = MAX(left,i)
IF (taui .lt. t(left)) GOTO 998
15 IF (taui .lt. t(left+1)) GOTO 16
left = left + 1
IF (left .lt. ilp1mx) GOTO 15
left = left - 1
IF (taui .gt. t(left+1)) GOTO 998
c *** the i-th equation enforces interpolation at taui, hence
c a(i,j) = b(j,k,t)(taui), all j. only the k entries with j =
c left-k+1,...,left actually might be nonzero. these k numbers
c are returned, in work (used for temp.storage here), by the
c following
16 CALL bsplvb ( t, k, nnn, taui, left, work )
c we therefore want work(j) = b(left-k+j)(taui) to go into
c a(i,left-k+j), i.e., into q(i,left-i+j), since the i-th row of
c a is so stored in the i-th row of q that the (i,i)-entry of
c a goes into the k-th entry of q.
jj = left - iindex
DO 29 j=1,k
jj = jj+1
q(iindex,jj) = work(j)
29 CONTINUE
30 CONTINUE
c
c ***obtain factorization of a , stored again in q.
CALL banfac ( q, n, n, kpkm1, k, iflag )
GOTO (40,999), iflag
c *** solve a*bcoef = gtau by backsubstitution
40 DO 50 j=1,m
DO 41 i=1,n
41 work(i) = gtau(i,j)
CALL banslv ( q, n, n, kpkm1, k, work )
DO 50 i=1,n
50 bcoef(j,i) = work(i)
RETURN
998 iflag = 2
999 PRINT 699
699 FORMAT(41h linear system in splint not invertible)
RETURN
END
SUBROUTINE bspp2d ( t, bcoef, n, k, m, scrtch, break, coef, l )
calls bsplvb
c this is an extended version of bsplpp for use with tensor products
c
converts the b-representation t, bcoef(.,j), n, k of some spline into
c its pp-representation break, coef(j,.,.), l, k ; j=1, ..., m .
c
c****** i n p u t ******
c t knot sequence, of length n+k
c bcoef(.,j) b-spline coefficient sequence, of length n ;j=1,...,m
c n length of bcoef and dimension of spline space s(k,t)
c k order of the spline
c m number of data sets
c
c****** w o r k a r e a ******
c scrtch of size (k,k,m), needed to contain bcoeffs of a piece of
c the spline and its k-1 derivatives for each of the m sets
c
c****** o u t p u t ******
c break breakpoint sequence, of length l+1, contains (in increasing
c order) the distinct points in the sequence t(k), ..., t(n+1)
c coef(mm,.,.) array of size (k,n), with coef(mm,i,j) = (i-1)st der-
c ivative of mm-th spline at break(j) from the right, mm=1,.,m
c l number of polynomial pieces which make up the spline in the
c interval (t(k), t(n+1))
c
c****** m e t h o d ******
c for each breakpoint interval, the k relevant b-coeffs of the
c spline are found and then differenced repeatedly to get the b-coeffs
c of all the derivatives of the spline on that interval. the spline and
c its first k-1 derivatives are then evaluated at the left end
c point of that interval, using bsplvb repeatedly to obtain the val-
c ues of all b-splines of the appropriate order at that point.
c
USE stel_kinds, ONLY: rprec
IMPLICIT INTEGER*4 (i-n), REAL(rprec) (a-h, o-z)
PARAMETER (kmax=4)
INTEGER k,l,m,n, i,j,jp1,kmj,left
REAL(rprec) :: bcoef(n,m), break(*), coef(m,k,*),scrtch(k,k,m),
* t(n+k), biatx(kmax)
REAL(rprec) :: diff,fkmj,sum
c
n11=1
n22=2
l = 0
break(1) = t(k)
DO 50 left=k,n
c find the next nontrivial knot interval.
IF (t(left+1) .eq. t(left)) GOTO 50
l = l + 1
break(l+1) = t(left+1)
IF (k .gt. 1) GOTO 9
DO 5 mm=1,m
5 coef(mm,1,l) = bcoef(left,mm)
GOTO 50
c store the k b-spline coeff.s relevant to current knot interval
c in scrtch(.,1) .
9 DO 10 i=1,k
DO 10 mm=1,m
10 scrtch(i,1,mm) = bcoef(left-k+i,mm)
c for j=1,...,k-1, compute the k-j b-spline coeff.s relevant to
c current knot interval for the j-th derivative by differencing
c those for the (j-1)st derivative, and store in scrtch(.,j+1) .
DO 20 jp1=2,k
j = jp1 - 1
kmj = k - j
fkmj = REAL(kmj, rprec)
DO 20 i=1,kmj
diff = (t(left+i) - t(left+i - kmj))/fkmj
IF (diff .le. 0.) GOTO 20
DO 15 mm=1,m
15 scrtch(i,jp1,mm) =
* (scrtch(i+1,j,mm) - scrtch(i,j,mm))/diff
20 CONTINUE
c starting with the one b-spline of order 1 not zero at t(left),
c find the values at t(left) of the j+1 b-splines of order j+1
c not identically zero there from those of order j, then combine
c with the b-spline coeff.s found earlier to compute the (k-j)-
c th derivative at t(left) of the given spline.
CALL bsplvb ( t, n11, n11, t(left), left, biatx )
DO 25 mm=1,m
25 coef(mm,k,l) = scrtch(1 ,k,mm)
DO 30 jp1=2,k
CALL bsplvb ( t, jp1, n22, t(left), left, biatx )
kmj = k+1 - jp1
DO 30 mm=1,m
sum = 0.
DO 28 i=1,jp1
28 sum = biatx(i)*scrtch(i,kmj,mm) + sum
30 coef(mm,kmj,l) = sum
50 CONTINUE
RETURN
END
SUBROUTINE bsplvb ( t, jhigh, index, x, left, biatx )
calculates the value of all possibly nonzero b-splines at x of order
c
c jout = max( jhigh , (j+1)*(index-1) )
c
c with knot sequence t .
c
c****** i n p u t ******
c t.....knot sequence, of length left + jout , assumed to be nonde-
c creasing. a s s u m p t i o n . . . .
c t(left) .lt. t(left + 1) .
c d i v i s i o n b y z e r o will result if t(left) = t(left+1)
c jhigh,
c index.....integers which determine the order jout = max(jhigh,
c (j+1)*(index-1)) of the b-splines whose values at x are to
c be returned. index is used to avoid recalculations when seve-
c ral columns of the triangular array of b-spline values are nee-
c ded (e.g., in bvalue or in bsplvd ). precisely,
c if index = 1 ,
c the calculation starts from scratch and the entire triangular
c array of b-spline values of orders 1,2,...,jhigh is generated
c order by order , i.e., column by column .
c if index = 2 ,
c only the b-spline values of order j+1, j+2, ..., jout are ge-
c nerated, the assumption being that biatx , j , deltal , deltar
c are, on entry, as they were on exit at the previous call.
c in particular, if jhigh = 0, then jout = j+1, i.e., just
c the next column of b-spline values is generated.
c
c w a r n i n g . . . the restriction jout .le. jmax (= 20) is im-
c posed arbitrarily by the dimension statement for deltal and
c deltar below, but is n o w h e r e c h e c k e d for .
c
c x.....the point at which the b-splines are to be evaluated.
c left.....an integer chosen (usually) so that
c t(left) .le. x .le. t(left+1) .
c
c****** o u t p u t ******
c biatx.....array of length jout , with biatx(i) containing the val-
c ue at x of the polynomial of order jout which agrees with
c the b-spline b(left-jout+i,jout,t) on the interval (t(left),
c t(left+1)) .
c
c****** m e t h o d ******
c the recurrence relation
c
c x - t(i) t(i+j+1) - x
c b(i,j+1)(x) = -----------b(i,j)(x) + ---------------b(i+1,j)(x)
c t(i+j)-t(i) t(i+j+1)-t(i+1)
c
c is used (repeatedly) to generate the (j+1)-vector b(left-j,j+1)(x),
c ...,b(left,j+1)(x) from the j-vector b(left-j+1,j)(x),...,
c b(left,j)(x), storing the new values in biatx over the old. the
c facts that
c b(i,1) = 1 if t(i) .le. x .lt. t(i+1)
c and that
c b(i,j)(x) = 0 unless t(i) .le. x .lt. t(i+j)
c are used. the particular organization of the calculations follows al-
c gorithm (8) in chapter x of the text.
c
USE stel_kinds, ONLY: rprec
IMPLICIT INTEGER*4 (i-n), REAL(rprec) (a-h, o-z)
PARAMETER(jmax = 4)
INTEGER :: index, jhigh, left, i, j=1, jp1
REAL(rprec) :: x,saved,term
c real*8 biatx(jhigh),t(1),x,
DIMENSION deltal(jmax),deltar(jmax)
DIMENSION biatx(jhigh), t(left+jhigh)
current fortran standard makes it impossible to specify the length of
c t and of biatx precisely without the introduction of otherwise
c superfluous additional arguments.
SAVE deltal,deltar ! (valid in fortran 77)
c
GOTO (10,20), index
10 j = 1
biatx(1) = 1.
IF (j .ge. jhigh) GOTO 99
c
20 jp1 = j + 1
deltar(j) = t(left+j) - x
deltal(j) = x - t(left+1-j)
saved = 0.
DO 26 i=1,j
term = biatx(i)/(deltar(i) + deltal(jp1-i))
biatx(i) = saved + deltar(i)*term
26 saved = deltal(jp1-i)*term
biatx(jp1) = saved
j = jp1
IF (j .lt. jhigh) GOTO 20
c
99 RETURN
END
FUNCTION ppvalw (coef, x, jd )
C-----------------------------------------------------------------------
C Modified for optimization by S.J. Thompson, 30-Aug-1993
c Revised to eliminate call to interv by S.M.Wolfe, 17-Dec-1993
c and to use ASF's for evaluation
c This routine performs only the innermost guts of the spline evaluation
c Assumes k=4 (cubic spline only). No other cases considered.
c does not call interv
calculates value at x of jd-th derivative of pp fct from pp-repr
c
c****** i n p u t ****** to PPVALU, on which this is based.
c break, coef, l, k.....forms the pp-representation of the function f
c to be evaluated. specifically, the j-th derivative of f is
c given by
c
c (d**j)f(x) = coef(j+1,i) + h*(coef(j+2,i) + h*( ... (coef(k-1,i) +
c + h*coef(k,i)/(k-j-1))/(k-j-2) ... )/2)/1
c
c with h = x - break(i), and
c
c i = max( 1 , max( j ; break(j) .le. x , 1 .le. j .le. l ) ).
c
c x.....the point at which to evaluate.
c as used here, x is the distance from the break, not the absolute
c position.
c jd.....integer*4 giving the order of the derivative to be evaluat-
c ed. a s s u m e d to be zero or positive.
c
c****** o u t p u t ******
c ppvalw.....the value of the (jd)-th derivative of f at x.
c
c****** m e t h o d ******
c the interval index i , appropriate for x , is found through a
c call to interv . the formula above for the jd-th derivative
c of f is then evaluated (by nested multipication).
c
C-----------------------------------------------------------------------
C Variable declarations.
C-----------------------------------------------------------------------
USE stel_kinds, ONLY: rprec
IMPLICIT INTEGER*4 (i-n), REAL(rprec) (a-h, o-z)
INTEGER, INTENT(in) :: jd
REAL(rprec) :: ppvalw,x
REAL(rprec) :: coef(4)
c----------------------------------------------------------------------
c ASF's may be slightly more efficient than the alternative
c----------------------------------------------------------------------
d2(xx) = coef(4)*xx + coef(3)
d1(xx) = (coef(4)*xx/2 + coef(3))*xx + coef(2)
d0(xx) = ((coef(4)*xx/3 + coef(3))*xx/2 +
. coef(2))*xx + coef(1)
C-----------------------------------------------------------------------
C Derivatives of order k or higher are identically zero.
C-----------------------------------------------------------------------
C Evaluate jd-th derivative of i-th polynomial piece at x .
C-----------------------------------------------------------------------
GOTO(1,2,3) jd+1
ppvalw = 0.
PRINT *, 'Error (ppvalw): JD must be 0, 1, or 2.'
PRINT *, 'Execution terminated.'
RETURN
1 ppvalw = d0(x) ! k = 4 , jd = 0
RETURN
2 ppvalw = d1(x) ! k = 4 , jd = 1
RETURN
3 ppvalw = d2(x) ! k = 4 , jd = 2
RETURN
END
C
SUBROUTINE banslv ( a, nrow, n, ndiag, middle, b )
USE stel_kinds, ONLY: rprec
IMPLICIT INTEGER*4 (i-n), REAL(rprec) (a-h, o-z)
DIMENSION a(nrow,ndiag),b(n)
IF (n .eq. 1) GOTO 21
ilo = middle - 1
IF (ilo .lt. 1) GOTO 21
DO 19 i=2,n
jmax = MIN(i-1,ilo)
DO 19 j=1,jmax
19 b(i) = b(i) - b(i-j)*a(i,middle-j)
c
21 ihi = ndiag-middle
DO 30 i=n,1,-1
jmax = MIN(n-i,ihi)
IF (jmax .lt. 1) GOTO 30
DO 25 j=1,jmax
25 b(i) = b(i) - b(i+j)*a(i,middle+j)
30 b(i) = b(i)/a(i,middle)
RETURN
END
SUBROUTINE banfac ( a, nrow, n, ndiag, middle, iflag )
USE stel_kinds, ONLY: rprec
IMPLICIT INTEGER*4 (i-n), REAL(rprec) (a-h, o-z)
DIMENSION a(nrow,ndiag)
iflag = 1
ilo = middle - 1
IF (ilo) 999,10,19
10 DO 11 i=1,n
IF(a(i,1) .eq. 0.) GOTO 999
11 CONTINUE
RETURN
19 ihi = ndiag - middle
IF (ihi) 999,20,29
20 DO 25 i=1,n
IF (a(i,middle) .eq. 0.) GOTO 999
jmax = MIN(ilo,n-i)
IF (jmax .lt. 1) GOTO 25
DO 23 j=1,jmax
23 a(i+j,middle-j) = a(i+j,middle-j)/a(i,middle)
25 CONTINUE
RETURN
29 DO 50 i=1,n
diag = a(i,middle)
IF (diag .eq. 0.) GOTO 999
jmax = MIN(ilo,n-i)
IF(jmax .lt. 1) GOTO 50
kmax = MIN(ihi,n-i)
DO 33 j=1,jmax
mmj = middle-j
a(i+j,mmj) = a(i+j,mmj)/diag
DO 33 k=1,kmax
33 a(i+j,mmj+k) = a(i+j,mmj+k) - a(i+j,mmj)*a(i,middle+k)
50 CONTINUE
RETURN
999 iflag = 2
RETURN
END
SUBROUTINE interv ( xt, lxt, x, left, mflag )
computes left = max( i ; 1 .le. i .le. lxt .and. xt(i) .le. x ) .
c
c****** i n p u t ******
c xt.....a real*8 sequence, of length lxt , assumed to be nondecreasing
c lxt.....number of terms in the sequence xt .
c x.....the point whose location with respect to the sequence xt is
c to be determined.
c
c****** o u t p u t ******
c left, mflag.....both integers, whose value is
c
c 1 -1 if x .lt. xt(1)
c i 0 if xt(i) .le. x .lt. xt(i+1)
c lxt 1 if xt(lxt) .le. x
c
c in particular, mflag = 0 is the 'usual' case. mflag .ne. 0
c indicates that x lies outside the halfopen interval
c xt(1) .le. y .lt. xt(lxt) . the asymmetric treatment of the
c interval is due to the decision to make all pp functions cont-
c inuous from the right.
c
c****** m e t h o d ******
c the program is designed to be efficient in the common situation that
c it is called repeatedly, with x taken from an increasing or decrea-
c sing sequence. this will happen, e.g., when a pp function is to be
c graphed. the first guess for left is therefore taken to be the val-
c ue returned at the previous call and stored in the l o c a l varia-
c ble ilo . a first check ascertains that ilo .lt. lxt (this is nec-
c essary since the present call may have nothing to do with the previ-
c ous call). then, if xt(ilo) .le. x .lt. xt(ilo+1), we set left =
c ilo and are done after just three comparisons.
c otherwise, we repeatedly double the difference istep = ihi - ilo
c while also moving ilo and ihi in the direction of x , until
c xt(ilo) .le. x .lt. xt(ihi) ,
c after which we use bisection to get, in addition, ilo+1 = ihi .
c left = ilo is then returned.
c
USE stel_kinds, ONLY: rprec
IMPLICIT INTEGER*4 (i-n), REAL(rprec) (a-h, o-z)
INTEGER left,lxt,mflag, ihi,ilo,istep,middle
REAL(rprec) :: x
DIMENSION xt(lxt)
DATA ilo /1/
c save ilo (a valid fortran statement in the new 1977 standard)
ihi = ilo + 1
IF (ihi .lt. lxt) GOTO 20
IF (x .ge. xt(lxt)) GOTO 110
IF (lxt .le. 1) GOTO 90
ilo = lxt - 1
ihi = lxt
c
20 IF (x .ge. xt(ihi)) GOTO 40
IF (x .ge. xt(ilo)) GOTO 100
c
c **** now x .lt. xt(ilo) . decrease ilo to capture x .
30 istep = 1
31 ihi = ilo
ilo = ihi - istep
IF (ilo .le. 1) GOTO 35
IF (x .ge. xt(ilo)) GOTO 50
istep = istep*2
GOTO 31
35 ilo = 1
IF (x .lt. xt(1)) GOTO 90
GOTO 50
c **** now x .ge. xt(ihi) . increase ihi to capture x .
40 istep = 1
41 ilo = ihi
ihi = ilo + istep
IF (ihi .ge. lxt) GOTO 45
IF (x .lt. xt(ihi)) GOTO 50
istep = istep*2
GOTO 41
45 IF (x .ge. xt(lxt)) GOTO 110
ihi = lxt
c
c **** now xt(ilo) .le. x .lt. xt(ihi) . narrow the interval.
50 middle = (ilo + ihi)/2
IF (middle .eq. ilo) GOTO 100
c note. it is assumed that middle = ilo in case ihi = ilo+1 .
IF (x .lt. xt(middle)) GOTO 53
ilo = middle
GOTO 50
53 ihi = middle
GOTO 50
c**** set output and return.
90 mflag = -1
left = 1
RETURN
100 mflag = 0
left = ilo
RETURN
110 mflag = 1
left = lxt
RETURN
END
c
c This routine is required IF the CVS revision numbers are to
c survive an optimization.
c
c
c $Date: 2005/08/10 15:39:26 $ $Author: hirshman $
c
SUBROUTINE spline_rev(i)
character*10 s
IF( i .eq. 0) s =
.'@(#)$RCSfile: spline.f,v $ $Revision: 1.18 $\000'
RETURN
END
|
Rebecca sells and buys beautiful costume jewelry, from Victorian times through the 80s the kind of jewelry your mother and grandmother wore! Check back often, new jewelry added daily. Lots of good information on her site from how to clean jewelry to how to spot fake jewelry.
|
(* This Isabelle theory is produced using the TIP tool offered at the following website:
https://github.com/tip-org/tools
This file was originally provided as part of TIP benchmark at the following website:
https://github.com/tip-org/benchmarks
Yutaka Nagashima at CIIRC, CTU changed the TIP output theory file slightly
to make it compatible with Isabelle2017.
\:w
Some proofs were added by Yutaka Nagashima.*)
theory TIP_relaxedprefix_is_prefix_1
imports "../../Test_Base"
begin
datatype 'a list = nil2 | cons2 "'a" "'a list"
datatype It = A | B | C
fun x :: "'a list => 'a list => 'a list" where
"x (nil2) z = z"
| "x (cons2 z2 xs) z = cons2 z2 (x xs z)"
fun isPrefix :: "It list => It list => bool" where
"isPrefix (nil2) z = True"
| "isPrefix (cons2 z2 x2) (nil2) = False"
| "isPrefix (cons2 z2 x2) (cons2 x3 x4) =
((z2 = x3) & (isPrefix x2 x4))"
fun isRelaxedPrefix :: "It list => It list => bool" where
"isRelaxedPrefix (nil2) z = True"
| "isRelaxedPrefix (cons2 z2 (nil2)) z = True"
| "isRelaxedPrefix (cons2 z2 (cons2 x3 x4)) (nil2) = False"
| "isRelaxedPrefix (cons2 z2 (cons2 x3 x4)) (cons2 x5 x6) =
(if (z2 = x5) then isRelaxedPrefix (cons2 x3 x4) x6 else
isPrefix (cons2 x3 x4) (cons2 x5 x6))"
theorem property0 :
"isRelaxedPrefix xs (x xs ys)"
oops
end
|
% Downsampling procedure.
%
% Arguments:
% grayscale I image
% downsampling filter 'filter', should be a 1D separable filter.
% 'border_mode' should be 'circular', 'symmetric', or 'replicate'. See 'imfilter'.
%
% If image width W is odd, then the resulting image will have width (W-1)/2+1,
% Same for height.
%
% [email protected], August 2007
%
function R = downsample_(I, filter)
border_mode = 'symmetric';
% low pass, convolve with separable filter
R = imfilter(I,filter,border_mode); %horizontal
R = imfilter(R,filter',border_mode); %vertical
% decimate
r = size(I,1);
c = size(I,2);
R = R(1:2:r, 1:2:c, :); |
INTEGER*4 FUNCTION ISEARCH(X,A,N)
REAL A(N)
C
C-----INDEX SEARCH
IF (X.LE.A(1)) THEN
ISEARCH=1
ELSE IF (X.GE.A(N)) THEN
ISEARCH=N-1
ELSE
K=1
J=N
10 IF (K.EQ.J-1) GO TO 20
I=(K+J)/2
IF (X.LE.A(I)) THEN
J=I
ELSE
K=I
END IF
GO TO 10
20 ISEARCH=K
END IF
C
RETURN
END
|
# -*- coding: utf-8 -*-
"""
femagtools.dxfsl.corner
~~~~~~~~~~~~~~~~~~~~~~~
Authors: Ronald Tanner, beat Holm
"""
from __future__ import print_function
import numpy as np
import logging
from .functions import distance
logger = logging.getLogger('femagtools.corner')
class Corner(object):
def __init__(self, center, p):
self.__p = p
self.__dist = distance(center, p)
self.__keep = False
self.is_new_point = False
def point(self):
return self.__p
def is_equal(self, p, rtol=1e-04, atol=1e-04):
return (np.isclose(self.__p[0], p[0], rtol=rtol, atol=atol) and
np.isclose(self.__p[1], p[1], rtol=rtol, atol=atol))
def is_same_corner(self, c):
return self.is_equal(c.__p)
def set_keep_node(self):
self.__keep = True
def keep_node(self):
return self.__keep
def __eq__(self, c):
return (np.isclose(self.__p[0], c.__p[0], rtol=1e-04, atol=1e-04) and
np.isclose(self.__p[1], c.__p[1], rtol=1e-04, atol=1e-04))
def __lt__(self, c):
if self.__dist < c.__dist:
return 1
else:
return 0
def __str__(self):
return "Corner: p={}, keep={}".format(self.__p, self.__keep)
|
From Test Require Import tactic.
Section FOFProblem.
Variable Universe : Set.
Variable UniverseElement : Universe.
Variable wd_ : Universe -> Universe -> Prop.
Variable col_ : Universe -> Universe -> Universe -> Prop.
Variable col_swap1_1 : (forall A B C : Universe, (col_ A B C -> col_ B A C)).
Variable col_swap2_2 : (forall A B C : Universe, (col_ A B C -> col_ B C A)).
Variable col_triv_3 : (forall A B : Universe, col_ A B B).
Variable wd_swap_4 : (forall A B : Universe, (wd_ A B -> wd_ B A)).
Variable col_trans_5 : (forall P Q A B C : Universe, ((wd_ P Q /\ (col_ P Q A /\ (col_ P Q B /\ col_ P Q C))) -> col_ A B C)).
Theorem pipo_6 : (forall A B D T Bprime MB Bprimeprime : Universe, ((wd_ A B /\ (wd_ A D /\ (wd_ A T /\ (wd_ B D /\ (wd_ B T /\ (wd_ D T /\ (wd_ D Bprime /\ (wd_ B Bprime /\ (wd_ MB B /\ (wd_ MB T /\ (wd_ MB Bprime /\ (wd_ Bprime Bprimeprime /\ (wd_ MB Bprimeprime /\ (wd_ T Bprimeprime /\ (wd_ B Bprimeprime /\ (wd_ A Bprime /\ (col_ A B Bprime /\ (col_ Bprime MB Bprimeprime /\ (col_ B MB T /\ (col_ Bprime B D /\ col_ T D A)))))))))))))))))))) -> col_ A B T)).
Proof.
time tac.
Qed.
End FOFProblem.
|
[STATEMENT]
lemma exec_plan_Append:
fixes as_a as_b s
shows "exec_plan s (as_a @ as_b) = exec_plan (exec_plan s as_a) as_b"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. exec_plan s (as_a @ as_b) = exec_plan (exec_plan s as_a) as_b
[PROOF STEP]
by (induction as_a arbitrary: s as_b) auto |
# Estimates phylogentic species variability (PSV) across a global grid of 2x2 (resolution as in the original estimation of the presence absence matrix).
# The code also estimates spatial null models for PSV by shuffling the PAM in two ways:
# - NULL 1 randomly shuffles occurrences within sites (i.e., per-row shuffling), keeping the observed species richness.
# - NULL 2 randomly shuffles occurrences within families (i.e., per-column shuffling), keeping the observed family prevalence.
# PSV is estimated on a dated phylogeny that is pruned to includes a sinlge tip per family.
# Continues with R objects generated in previous steps.
tre.pruned <- drop.tip(tre,tip=list[which(duplicated(list_fams))])
tre.pruned<- ladderize(tre.pruned, FALSE)
list <- tre.pruned$tip.label
list_outs <- foreach(i=1:length(list),.combine=c) %do% {strsplit(list,"_")[[i]][2]}
outs<-c("Welwitschiaceae","Pinaceae","Ginkgoaceae","Cupressaceae","Gnetaceae")
tre.pruned <- drop.tip(tre.pruned,tip=tre.pruned$tip.label[which(list_outs%in%outs)])
list<-tre.pruned$tip.label
list <- foreach(i=1:length(list),.combine=c) %do% {strsplit(list,"_")[[i]][2]}
bb <- resB_fams[match(list,rownames(resB_fams)),]
match(list,rownames(bb))
print(bb[1:5,];tre.pruned$tip.label[1:5])
list <- tre.pruned$tip.label
list_fams <- foreach(i=1:length(list),.combine=c) %do% {strsplit(list,"_")[[i]][2]}
tre.pruned$tip.label<-list_fams
is.rooted(tre.pruned); is.ultrametric(tre.pruned)
tre.pruned
iden <- which(rowSums(prPAM_g2[,-c(1:2)]) > 5)
psv <- psv(samp = prPAM_g2[iden,-c(1:2)],tree = tre.pruned)
PSV_random_freq <- matrix(nrow = dim(prPAM_g2[iden,])[1], ncol = 1002)
for (i in 3:ncol(PSV_random_freq)){
cat(i, "\n")
psv_random <- psv(samp = randomizeMatrix(prPAM_g2[iden,-c(1:2)],"freq",10000000),tree = tre.pruned)
PSV_random_freq[,i] <- psv_random$PSVs
}
save(PSV_random_freq,file=paste(ruta_write,"PSV_random_freq.Rdata",sep=""))
PSV_random_rich <- matrix(nrow = dim(prPAM_g2[iden,])[1], ncol = 1002)
for (i in 1:ncol(PSV_random_rich)){
cat(i, "\n")
psv_random <- psv(samp = randomizeMatrix(prPAM_g2[iden,-c(1:2)],"rich",10000000),tree = tre.pruned)
PSV_random_rich[,i] <- psv_random$PSVs
}
save(PSV_random_rich,file=paste(ruta_write,"PSV_random_rich.Rdata",sep=""))
pdf(paste(ruta_write,"10.PSV_randonm.pdf",sep=""))
ssp <- coordinates(prPAM_g2[iden,1:2])
Data<-data.frame(Order=1:dim(prPAM_g2[iden,])[1],z=psv$PSVs[iden])
Data<-Data[order(Data$z),]
f <- clean_coordinates(as.data.frame(ssp), lon = "LON",lat = "LAT",
tests="seas",species=NULL,value="flagged",seas_scale=110)
k=20
breaks <- getJenksBreaks(Data$z,k)
cols_breaks <- rev(viridis(k,begin=0.0,end=1))
Data$col <- cols_breaks[k]
for (i in k:1){
Data$col[which(Data$z <= breaks[i])] <- cols_breaks[i]}
orderedcolors<-Data[order(Data$Order),"col"]
maps::map("world",interior=F)
points(ssp[f,],pch=22,lwd=0.1,bg=orderedcolors[f],cex=0.6,col=NULL)
rect(xleft =-180,xright = 190,ybottom = -86,ytop = 85,border = "black")
color.legend(xl=-35,xr=35,yb=-83,yt=-67,align="lt",gradient="x",
legend=round(range(psv$PSVs,na.rm = T),2),rect.col=cols_breaks)
abline(h=c(23.5,-23.5),lwd=2,lty=3)
title("Phylogenetic Species Variability")
SES_psv <- (psv$PSVs[iden] - apply(PSV_random_rich[,-c(1:2)],MARGIN=1,FUN=mean,na.rm=T))/(apply(PSV_random_rich[,-c(1:2)],MARGIN=1,FUN=sd,na.rm=T))
Data<-data.frame(Order=1:dim(prPAM_g2[iden,])[1],z=SES_psv)
Data<-Data[order(Data$z),]
k=20
breaks <- getJenksBreaks(Data$z,k)
cols_breaks <- rev(viridis(k))
Data$col <- cols_breaks[k]
for (i in k:1){
Data$col[which(Data$z <= breaks[i])] <- cols_breaks[i]}
orderedcolors<-Data[order(Data$Order),"col"]
maps::map("world",interior=F)
points(ssp[f,],pch=22,lwd=0.1,bg=orderedcolors[f],cex=0.5,col=NULL)
rect(xleft =-180,xright = 190,ybottom = -86,ytop = 85,border = "black")
color.legend(xl=-35,xr=35,yb=-83,yt=-67,align="lt",gradient="x",
legend=round(range(SES_psv,na.rm = T),2),rect.col=cols_breaks)
title("SES-Phylogenetic Species Variability (NULL1)")
SES_psv <- (psv$PSVs[iden] - apply(PSV_random_freq[,-c(1:2)],MARGIN=1,FUN=mean,na.rm=T))/(apply(PSV_random_freq[,-c(1:2)],MARGIN=1,FUN=sd,na.rm=T))
Data<-data.frame(Order=1:dim(prPAM_g2[iden,])[1],z=SES_psv)
Data<-Data[order(Data$z),]
k=20
breaks <- getJenksBreaks(Data$z,k)
cols_breaks <- rev(viridis(k))
Data$col <- cols_breaks[k]
for (i in k:1){
Data$col[which(Data$z <= breaks[i])] <- cols_breaks[i]}
orderedcolors<-Data[order(Data$Order),"col"]
maps::map("world",interior=F)
points(ssp[f,],pch=22,lwd=0.1,bg=orderedcolors[f],cex=0.5,col=NULL)
rect(xleft =-180,xright = 190,ybottom = -86,ytop = 85,border = "black")
color.legend(xl=-35,xr=35,yb=-83,yt=-67,align="lt",gradient="x",
legend=round(range(SES_psv,na.rm = T),2),rect.col=cols_breaks)
title("SES-Phylogenetic Species Variability (NULL2)")
dev.off()
|
lemma open_translation_subtract: fixes S :: "'a::real_normed_vector set" assumes "open S" shows "open ((\<lambda>x. x - a) ` S)" |
//
// Created by Jonathan Thorpe on 4/21/12.
// Copyright (c) 2012 JBAT. All rights reserved.
//
#include "VSCIMActionImplementations.h"
#include "VSCMIDIOutputManager.h"
#include "VSCMIDITasks.h"
#include "VSCMIDI.h"
#include <boost/assert.hpp>
#include <boost/foreach.hpp>
using namespace VSC;
using namespace VSC::IM;
//MARK: Extractor utils (DRY...)
MIDI::OutputOwner::SPtr VSC::IM::ExtractMIDIOutputOwnerForAction(Action::SPtr action)
{
BOOST_ASSERT(action);
if (!action) return MIDI::OutputOwner::SPtr();
Action::Implementation::SPtr implementation = action->getImplementation();
BOOST_ASSERT(implementation);
if (!implementation) return MIDI::OutputOwner::SPtr();
return boost::dynamic_pointer_cast<MIDI::OutputOwner>(implementation);
}
MIDI::ChannelOwner::SPtr VSC::IM::ExtractMIDIChannelOwnerForAction(Action::SPtr action)
{
BOOST_ASSERT(action);
if (!action) return MIDI::ChannelOwner::SPtr();
Action::Implementation::SPtr implementation = action->getImplementation();
BOOST_ASSERT(implementation);
if (!implementation) return MIDI::ChannelOwner::SPtr();
return boost::dynamic_pointer_cast<MIDI::ChannelOwner>(implementation);
}
MIDI::ControlNumberOwner::SPtr VSC::IM::ExtractMIDIControlNumberOwnerForAction(Action::SPtr action)
{
BOOST_ASSERT(action);
if (!action) return MIDI::ControlNumberOwner::SPtr();
Action::Implementation::SPtr implementation = action->getImplementation();
BOOST_ASSERT(implementation);
if (!implementation) return MIDI::ControlNumberOwner::SPtr();
return boost::dynamic_pointer_cast<MIDI::ControlNumberOwner>(implementation);
}
//MARK: Void
const Tasks Action::ImplementationVoid::generateTasksWithTargetValueMap(Event::TargetValueMap& valueMap)
{
Tasks tasks;
return tasks;
}
void Action::ImplementationVoid::setupMappings(Action::SPtr action)
{
// nothing ...
}
//MARK: MIDINoteOn
const Tasks Action::ImplementationMIDINoteOn::generateTasksWithTargetValueMap(Event::TargetValueMap& valueMap)
{
Tasks tasks;
MIDI::MIDISendMessageTask::Payload::SPtr payload(new MIDI::MIDISendMessageTask::Payload);
BOOST_ASSERT(payload->messageDescription);
payload->messageDescription->type = MIDI::MessageTypeNoteOn;
payload->messageDescription->parameterMap[MIDI::MessageParameterKeyChannel] = (unsigned char) this->getMIDIChannel();
payload->messageDescription->parameterMap[MIDI::MessageParameterKeyPitch] = (unsigned char) valueMap[TargetPitch];
payload->messageDescription->parameterMap[MIDI::MessageParameterKeyVelocity] = (unsigned char) valueMap[TargetVelocityOn];
payload->midiOutput = this->getMIDIOutput();
MIDI::MIDISendMessageTask::SPtr task(new MIDI::MIDISendMessageTask(boost::dynamic_pointer_cast<Task::Payload>(payload)));
tasks.push_back(task);
return tasks;
}
void Action::ImplementationMIDINoteOn::setupMappings(Action::SPtr action)
{
BOOST_ASSERT(action);
if (action)
{
const Targets& targets = action->getRequiredMappingTargets();
BOOST_FOREACH(const Trigger& trigger, AllowedTriggers())
{
BOOST_FOREACH(const Target& target, targets)
{
Mapping::SPtr mapping = action->getMapping(trigger, target);
BOOST_ASSERT(mapping);
if (mapping)
{
mapping->setMappingType(MappingTypeConstant);
mapping->setOffset(64.0);
}
}
}
}
}
//MARK: MIDINoteOff
const Tasks Action::ImplementationMIDINoteOff::generateTasksWithTargetValueMap(Event::TargetValueMap& valueMap)
{
Tasks tasks;
MIDI::MIDISendMessageTask::Payload::SPtr payload(new MIDI::MIDISendMessageTask::Payload);
BOOST_ASSERT(payload->messageDescription);
payload->messageDescription->type = MIDI::MessageTypeNoteOff;
payload->messageDescription->parameterMap[MIDI::MessageParameterKeyChannel] = (unsigned char)this->getMIDIChannel();
payload->messageDescription->parameterMap[MIDI::MessageParameterKeyPitch] = (unsigned char) valueMap[TargetPitch];
payload->messageDescription->parameterMap[MIDI::MessageParameterKeyVelocity] = (unsigned char) valueMap[TargetVelocityOff];
payload->midiOutput = this->getMIDIOutput();
MIDI::MIDISendMessageTask::SPtr task(new MIDI::MIDISendMessageTask(boost::dynamic_pointer_cast<Task::Payload>(payload)));
tasks.push_back(task);
return tasks;
}
void Action::ImplementationMIDINoteOff::setupMappings(Action::SPtr action)
{
BOOST_ASSERT(action);
if (action)
{
const Targets& targets = action->getRequiredMappingTargets();
BOOST_FOREACH(const Trigger& trigger, AllowedTriggers())
{
BOOST_FOREACH(const Target& target, targets)
{
Mapping::SPtr mapping = action->getMapping(trigger, target);
BOOST_ASSERT(mapping);
if (mapping)
{
mapping->setMappingType(MappingTypeConstant);
mapping->setOffset(64.0);
}
}
}
}
}
//MARK: MIDINoteOnAndOff
const Tasks Action::ImplementationMIDINoteOnAndOff::generateTasksWithTargetValueMap(Event::TargetValueMap& valueMap)
{
Tasks tasks;
MIDI::MIDISendMessageTask::Payload::SPtr onPayload(new MIDI::MIDISendMessageTask::Payload);
BOOST_ASSERT(onPayload->messageDescription);
onPayload->messageDescription->type = MIDI::MessageTypeNoteOn;
onPayload->messageDescription->parameterMap[MIDI::MessageParameterKeyChannel] = (unsigned char)this->getMIDIChannel();
onPayload->messageDescription->parameterMap[MIDI::MessageParameterKeyPitch] = (unsigned char) valueMap[TargetPitch];
onPayload->messageDescription->parameterMap[MIDI::MessageParameterKeyVelocity] = (unsigned char) valueMap[TargetVelocityOn];
onPayload->midiOutput = this->getMIDIOutput();
MIDI::MIDISendMessageTask::SPtr onTask(new MIDI::MIDISendMessageTask(boost::dynamic_pointer_cast<Task::Payload>(onPayload)));
tasks.push_back(onTask);
Float duration = valueMap[TargetDuration];
long milliseconds = (long) std::floor(duration * 1000.0);
TimeDuration timeDuration = boost::posix_time::millisec(milliseconds);
MIDI::MIDISendMessageTask::Payload::SPtr offPayload(new MIDI::MIDISendMessageTask::Payload);
BOOST_ASSERT(offPayload->messageDescription);
offPayload->timeOffset = timeDuration;
offPayload->messageDescription->type = MIDI::MessageTypeNoteOff;
offPayload->messageDescription->parameterMap[MIDI::MessageParameterKeyChannel] = (unsigned char)this->getMIDIChannel();
offPayload->messageDescription->parameterMap[MIDI::MessageParameterKeyPitch] = (unsigned char) valueMap[TargetPitch];
offPayload->messageDescription->parameterMap[MIDI::MessageParameterKeyVelocity] = (unsigned char) valueMap[TargetVelocityOff];
offPayload->midiOutput = this->getMIDIOutput();
MIDI::MIDISendMessageTask::SPtr offTask(new MIDI::MIDISendMessageTask(boost::dynamic_pointer_cast<Task::Payload>(offPayload)));
tasks.push_back(offTask);
return tasks;
}
void Action::ImplementationMIDINoteOnAndOff::setupMappings(Action::SPtr action)
{
BOOST_ASSERT(action);
if (action)
{
const Targets& targets = action->getRequiredMappingTargets();
BOOST_FOREACH(const Trigger& trigger, AllowedTriggers())
{
BOOST_FOREACH(const Target& target, targets)
{
Mapping::SPtr mapping = action->getMapping(trigger, target);
BOOST_ASSERT(mapping);
if (mapping)
{
mapping->setMappingType(MappingTypeConstant);
mapping->setOffset(64.0);
}
}
}
}
}
//MARK: MIDIControlChange
const Tasks Action::ImplementationMIDIControlChange::generateTasksWithTargetValueMap(Event::TargetValueMap& valueMap)
{
Tasks tasks;
MIDI::MIDISendMessageTask::Payload::SPtr payload(new MIDI::MIDISendMessageTask::Payload);
BOOST_ASSERT(payload->messageDescription);
payload->messageDescription->type = MIDI::MessageTypeNoteOff;
payload->messageDescription->parameterMap[MIDI::MessageParameterKeyChannel] = (unsigned char)this->getMIDIChannel();
payload->messageDescription->parameterMap[MIDI::MessageParameterKeyValue] = (unsigned char) valueMap[TargetValue];
payload->midiOutput = this->getMIDIOutput();
MIDI::MIDISendMessageTask::SPtr task(new MIDI::MIDISendMessageTask(boost::dynamic_pointer_cast<Task::Payload>(payload)));
tasks.push_back(task);
return tasks;
}
void Action::ImplementationMIDIControlChange::setupMappings(Action::SPtr action)
{
BOOST_ASSERT(action);
if (action)
{
const Targets& targets = action->getRequiredMappingTargets();
BOOST_FOREACH(const Trigger& trigger, AllowedTriggers())
{
BOOST_FOREACH(const Target& target, targets)
{
Mapping::SPtr mapping = action->getMapping(trigger, target);
BOOST_ASSERT(mapping);
if (mapping)
{
mapping->setMappingType(MappingTypeConstant);
mapping->setOffset(64.0);
}
}
}
}
}
|
Load LFindLoad.
From lfind Require Import LFind.
From QuickChick Require Import QuickChick.
From adtind Require Import goal33.
Derive Show for natural.
Derive Arbitrary for natural.
Instance Dec_Eq_natural : Dec_Eq natural.
Proof. dec_eq. Qed.
Lemma conj16eqsynthconj1 : forall (lv0 : natural), (@eq natural (Succ lv0) (Succ (plus Zero lv0))).
Admitted.
QuickChick conj16eqsynthconj1.
|
State Before: R : Type u
S : Type ?u.11734
inst✝ : Semiring R
n : ℕ
f : R[X]
⊢ f ∈ degreeLT R n ↔ degree f < ↑n State After: R : Type u
S : Type ?u.11734
inst✝ : Semiring R
n : ℕ
f : R[X]
⊢ (∀ (i : ℕ), f ∈ ⨅ (_ : i ≥ n), LinearMap.ker (lcoeff R i)) ↔ degree f < ↑n Tactic: rw [degreeLT, Submodule.mem_iInf] State Before: R : Type u
S : Type ?u.11734
inst✝ : Semiring R
n : ℕ
f : R[X]
⊢ (∀ (i : ℕ), f ∈ ⨅ (_ : i ≥ n), LinearMap.ker (lcoeff R i)) ↔ degree f < ↑n State After: R : Type u
S : Type ?u.11734
inst✝ : Semiring R
n : ℕ
f : R[X]
⊢ (∀ (i : ℕ), i ≥ n → f ∈ LinearMap.ker (lcoeff R i)) ↔ degree f < ↑n Tactic: conv_lhs => intro i; rw [Submodule.mem_iInf] State Before: R : Type u
S : Type ?u.11734
inst✝ : Semiring R
n : ℕ
f : R[X]
⊢ (∀ (i : ℕ), i ≥ n → f ∈ LinearMap.ker (lcoeff R i)) ↔ degree f < ↑n State After: R : Type u
S : Type ?u.11734
inst✝ : Semiring R
n : ℕ
f : R[X]
⊢ (∀ (i : ℕ), i ≥ n → f ∈ LinearMap.ker (lcoeff R i)) ↔ sup (support f) WithBot.some < ↑n Tactic: rw [degree, Finset.max_eq_sup_coe] State Before: R : Type u
S : Type ?u.11734
inst✝ : Semiring R
n : ℕ
f : R[X]
⊢ (∀ (i : ℕ), i ≥ n → f ∈ LinearMap.ker (lcoeff R i)) ↔ sup (support f) WithBot.some < ↑n State After: R : Type u
S : Type ?u.11734
inst✝ : Semiring R
n : ℕ
f : R[X]
⊢ (∀ (i : ℕ), i ≥ n → f ∈ LinearMap.ker (lcoeff R i)) ↔ ∀ (b : ℕ), b ∈ support f → ↑b < ↑n
R : Type u
S : Type ?u.11734
inst✝ : Semiring R
n : ℕ
f : R[X]
⊢ ⊥ < ↑n Tactic: rw [Finset.sup_lt_iff ?_] State Before: R : Type u
S : Type ?u.11734
inst✝ : Semiring R
n : ℕ
f : R[X]
⊢ (∀ (i : ℕ), i ≥ n → f ∈ LinearMap.ker (lcoeff R i)) ↔ ∀ (b : ℕ), b ∈ support f → ↑b < ↑n
R : Type u
S : Type ?u.11734
inst✝ : Semiring R
n : ℕ
f : R[X]
⊢ ⊥ < ↑n State After: R : Type u
S : Type ?u.11734
inst✝ : Semiring R
n : ℕ
f : R[X]
⊢ ⊥ < ↑n
R : Type u
S : Type ?u.11734
inst✝ : Semiring R
n : ℕ
f : R[X]
⊢ (∀ (i : ℕ), i ≥ n → f ∈ LinearMap.ker (lcoeff R i)) ↔ ∀ (b : ℕ), b ∈ support f → ↑b < ↑n Tactic: rotate_left State Before: R : Type u
S : Type ?u.11734
inst✝ : Semiring R
n : ℕ
f : R[X]
⊢ ⊥ < ↑n
R : Type u
S : Type ?u.11734
inst✝ : Semiring R
n : ℕ
f : R[X]
⊢ (∀ (i : ℕ), i ≥ n → f ∈ LinearMap.ker (lcoeff R i)) ↔ ∀ (b : ℕ), b ∈ support f → ↑b < ↑n State After: R : Type u
S : Type ?u.11734
inst✝ : Semiring R
n : ℕ
f : R[X]
⊢ (∀ (i : ℕ), i ≥ n → f ∈ LinearMap.ker (lcoeff R i)) ↔ ∀ (b : ℕ), b ∈ support f → ↑b < ↑n Tactic: apply WithBot.bot_lt_coe State Before: R : Type u
S : Type ?u.11734
inst✝ : Semiring R
n : ℕ
f : R[X]
⊢ (∀ (i : ℕ), i ≥ n → f ∈ LinearMap.ker (lcoeff R i)) ↔ ∀ (b : ℕ), b ∈ support f → ↑b < ↑n State After: no goals Tactic: conv_rhs =>
simp only [mem_support_iff]
intro b
rw [Nat.cast_withBot, WithBot.coe_lt_coe, lt_iff_not_le, Ne, not_imp_not] |
[GOAL]
α β : BddDistLatCat
e : ↑α.toDistLatCat ≃o ↑β.toDistLatCat
⊢ ((let src :=
{ toSupHom := { toFun := ↑e, map_sup' := (_ : ∀ (a b : ↑α.toDistLatCat), ↑e (a ⊔ b) = ↑e a ⊔ ↑e b) },
map_inf' := (_ : ∀ (a b : ↑α.toDistLatCat), ↑e (a ⊓ b) = ↑e a ⊓ ↑e b) };
{
toLatticeHom :=
{
toSupHom :=
{ toFun := ↑e,
map_sup' :=
(_ :
∀ (a b : ↑α.toDistLatCat),
SupHom.toFun
{
toSupHom :=
{ toFun := ↑e, map_sup' := (_ : ∀ (a b : ↑α.toDistLatCat), ↑e (a ⊔ b) = ↑e a ⊔ ↑e b) },
map_inf' := (_ : ∀ (a b : ↑α.toDistLatCat), ↑e (a ⊓ b) = ↑e a ⊓ ↑e b) }.toSupHom
(a ⊔ b) =
SupHom.toFun
{
toSupHom :=
{ toFun := ↑e,
map_sup' := (_ : ∀ (a b : ↑α.toDistLatCat), ↑e (a ⊔ b) = ↑e a ⊔ ↑e b) },
map_inf' := (_ : ∀ (a b : ↑α.toDistLatCat), ↑e (a ⊓ b) = ↑e a ⊓ ↑e b) }.toSupHom
a ⊔
SupHom.toFun
{
toSupHom :=
{ toFun := ↑e,
map_sup' := (_ : ∀ (a b : ↑α.toDistLatCat), ↑e (a ⊔ b) = ↑e a ⊔ ↑e b) },
map_inf' := (_ : ∀ (a b : ↑α.toDistLatCat), ↑e (a ⊓ b) = ↑e a ⊓ ↑e b) }.toSupHom
b) },
map_inf' :=
(_ :
∀ (a b : ↑α.toDistLatCat),
SupHom.toFun src.toSupHom (a ⊓ b) = SupHom.toFun src.toSupHom a ⊓ SupHom.toFun src.toSupHom b) },
map_top' := (_ : ↑e ⊤ = ⊤), map_bot' := (_ : ↑e ⊥ = ⊥) }) ≫
let src :=
{
toSupHom :=
{ toFun := ↑(OrderIso.symm e),
map_sup' :=
(_ :
∀ (a b : ↑β.toDistLatCat),
↑(OrderIso.symm e) (a ⊔ b) = ↑(OrderIso.symm e) a ⊔ ↑(OrderIso.symm e) b) },
map_inf' :=
(_ : ∀ (a b : ↑β.toDistLatCat), ↑(OrderIso.symm e) (a ⊓ b) = ↑(OrderIso.symm e) a ⊓ ↑(OrderIso.symm e) b) };
{
toLatticeHom :=
{
toSupHom :=
{ toFun := ↑(OrderIso.symm e),
map_sup' :=
(_ :
∀ (a b : ↑β.toDistLatCat),
SupHom.toFun
{
toSupHom :=
{ toFun := ↑(OrderIso.symm e),
map_sup' :=
(_ :
∀ (a b : ↑β.toDistLatCat),
↑(OrderIso.symm e) (a ⊔ b) = ↑(OrderIso.symm e) a ⊔ ↑(OrderIso.symm e) b) },
map_inf' :=
(_ :
∀ (a b : ↑β.toDistLatCat),
↑(OrderIso.symm e) (a ⊓ b) = ↑(OrderIso.symm e) a ⊓ ↑(OrderIso.symm e) b) }.toSupHom
(a ⊔ b) =
SupHom.toFun
{
toSupHom :=
{ toFun := ↑(OrderIso.symm e),
map_sup' :=
(_ :
∀ (a b : ↑β.toDistLatCat),
↑(OrderIso.symm e) (a ⊔ b) = ↑(OrderIso.symm e) a ⊔ ↑(OrderIso.symm e) b) },
map_inf' :=
(_ :
∀ (a b : ↑β.toDistLatCat),
↑(OrderIso.symm e) (a ⊓ b) =
↑(OrderIso.symm e) a ⊓ ↑(OrderIso.symm e) b) }.toSupHom
a ⊔
SupHom.toFun
{
toSupHom :=
{ toFun := ↑(OrderIso.symm e),
map_sup' :=
(_ :
∀ (a b : ↑β.toDistLatCat),
↑(OrderIso.symm e) (a ⊔ b) = ↑(OrderIso.symm e) a ⊔ ↑(OrderIso.symm e) b) },
map_inf' :=
(_ :
∀ (a b : ↑β.toDistLatCat),
↑(OrderIso.symm e) (a ⊓ b) =
↑(OrderIso.symm e) a ⊓ ↑(OrderIso.symm e) b) }.toSupHom
b) },
map_inf' :=
(_ :
∀ (a b : ↑β.toDistLatCat),
SupHom.toFun src.toSupHom (a ⊓ b) = SupHom.toFun src.toSupHom a ⊓ SupHom.toFun src.toSupHom b) },
map_top' := (_ : ↑(OrderIso.symm e) ⊤ = ⊤), map_bot' := (_ : ↑(OrderIso.symm e) ⊥ = ⊥) }) =
𝟙 α
[PROOFSTEP]
ext
[GOAL]
case w
α β : BddDistLatCat
e : ↑α.toDistLatCat ≃o ↑β.toDistLatCat
x✝ : (forget BddDistLatCat).obj α
⊢ ↑((let src :=
{ toSupHom := { toFun := ↑e, map_sup' := (_ : ∀ (a b : ↑α.toDistLatCat), ↑e (a ⊔ b) = ↑e a ⊔ ↑e b) },
map_inf' := (_ : ∀ (a b : ↑α.toDistLatCat), ↑e (a ⊓ b) = ↑e a ⊓ ↑e b) };
{
toLatticeHom :=
{
toSupHom :=
{ toFun := ↑e,
map_sup' :=
(_ :
∀ (a b : ↑α.toDistLatCat),
SupHom.toFun
{
toSupHom :=
{ toFun := ↑e,
map_sup' := (_ : ∀ (a b : ↑α.toDistLatCat), ↑e (a ⊔ b) = ↑e a ⊔ ↑e b) },
map_inf' := (_ : ∀ (a b : ↑α.toDistLatCat), ↑e (a ⊓ b) = ↑e a ⊓ ↑e b) }.toSupHom
(a ⊔ b) =
SupHom.toFun
{
toSupHom :=
{ toFun := ↑e,
map_sup' := (_ : ∀ (a b : ↑α.toDistLatCat), ↑e (a ⊔ b) = ↑e a ⊔ ↑e b) },
map_inf' := (_ : ∀ (a b : ↑α.toDistLatCat), ↑e (a ⊓ b) = ↑e a ⊓ ↑e b) }.toSupHom
a ⊔
SupHom.toFun
{
toSupHom :=
{ toFun := ↑e,
map_sup' := (_ : ∀ (a b : ↑α.toDistLatCat), ↑e (a ⊔ b) = ↑e a ⊔ ↑e b) },
map_inf' := (_ : ∀ (a b : ↑α.toDistLatCat), ↑e (a ⊓ b) = ↑e a ⊓ ↑e b) }.toSupHom
b) },
map_inf' :=
(_ :
∀ (a b : ↑α.toDistLatCat),
SupHom.toFun src.toSupHom (a ⊓ b) = SupHom.toFun src.toSupHom a ⊓ SupHom.toFun src.toSupHom b) },
map_top' := (_ : ↑e ⊤ = ⊤), map_bot' := (_ : ↑e ⊥ = ⊥) }) ≫
let src :=
{
toSupHom :=
{ toFun := ↑(OrderIso.symm e),
map_sup' :=
(_ :
∀ (a b : ↑β.toDistLatCat),
↑(OrderIso.symm e) (a ⊔ b) = ↑(OrderIso.symm e) a ⊔ ↑(OrderIso.symm e) b) },
map_inf' :=
(_ :
∀ (a b : ↑β.toDistLatCat),
↑(OrderIso.symm e) (a ⊓ b) = ↑(OrderIso.symm e) a ⊓ ↑(OrderIso.symm e) b) };
{
toLatticeHom :=
{
toSupHom :=
{ toFun := ↑(OrderIso.symm e),
map_sup' :=
(_ :
∀ (a b : ↑β.toDistLatCat),
SupHom.toFun
{
toSupHom :=
{ toFun := ↑(OrderIso.symm e),
map_sup' :=
(_ :
∀ (a b : ↑β.toDistLatCat),
↑(OrderIso.symm e) (a ⊔ b) = ↑(OrderIso.symm e) a ⊔ ↑(OrderIso.symm e) b) },
map_inf' :=
(_ :
∀ (a b : ↑β.toDistLatCat),
↑(OrderIso.symm e) (a ⊓ b) =
↑(OrderIso.symm e) a ⊓ ↑(OrderIso.symm e) b) }.toSupHom
(a ⊔ b) =
SupHom.toFun
{
toSupHom :=
{ toFun := ↑(OrderIso.symm e),
map_sup' :=
(_ :
∀ (a b : ↑β.toDistLatCat),
↑(OrderIso.symm e) (a ⊔ b) =
↑(OrderIso.symm e) a ⊔ ↑(OrderIso.symm e) b) },
map_inf' :=
(_ :
∀ (a b : ↑β.toDistLatCat),
↑(OrderIso.symm e) (a ⊓ b) =
↑(OrderIso.symm e) a ⊓ ↑(OrderIso.symm e) b) }.toSupHom
a ⊔
SupHom.toFun
{
toSupHom :=
{ toFun := ↑(OrderIso.symm e),
map_sup' :=
(_ :
∀ (a b : ↑β.toDistLatCat),
↑(OrderIso.symm e) (a ⊔ b) =
↑(OrderIso.symm e) a ⊔ ↑(OrderIso.symm e) b) },
map_inf' :=
(_ :
∀ (a b : ↑β.toDistLatCat),
↑(OrderIso.symm e) (a ⊓ b) =
↑(OrderIso.symm e) a ⊓ ↑(OrderIso.symm e) b) }.toSupHom
b) },
map_inf' :=
(_ :
∀ (a b : ↑β.toDistLatCat),
SupHom.toFun src.toSupHom (a ⊓ b) = SupHom.toFun src.toSupHom a ⊓ SupHom.toFun src.toSupHom b) },
map_top' := (_ : ↑(OrderIso.symm e) ⊤ = ⊤), map_bot' := (_ : ↑(OrderIso.symm e) ⊥ = ⊥) })
x✝ =
↑(𝟙 α) x✝
[PROOFSTEP]
exact e.symm_apply_apply _
[GOAL]
α β : BddDistLatCat
e : ↑α.toDistLatCat ≃o ↑β.toDistLatCat
⊢ ((let src :=
{
toSupHom :=
{ toFun := ↑(OrderIso.symm e),
map_sup' :=
(_ :
∀ (a b : ↑β.toDistLatCat),
↑(OrderIso.symm e) (a ⊔ b) = ↑(OrderIso.symm e) a ⊔ ↑(OrderIso.symm e) b) },
map_inf' :=
(_ : ∀ (a b : ↑β.toDistLatCat), ↑(OrderIso.symm e) (a ⊓ b) = ↑(OrderIso.symm e) a ⊓ ↑(OrderIso.symm e) b) };
{
toLatticeHom :=
{
toSupHom :=
{ toFun := ↑(OrderIso.symm e),
map_sup' :=
(_ :
∀ (a b : ↑β.toDistLatCat),
SupHom.toFun
{
toSupHom :=
{ toFun := ↑(OrderIso.symm e),
map_sup' :=
(_ :
∀ (a b : ↑β.toDistLatCat),
↑(OrderIso.symm e) (a ⊔ b) = ↑(OrderIso.symm e) a ⊔ ↑(OrderIso.symm e) b) },
map_inf' :=
(_ :
∀ (a b : ↑β.toDistLatCat),
↑(OrderIso.symm e) (a ⊓ b) = ↑(OrderIso.symm e) a ⊓ ↑(OrderIso.symm e) b) }.toSupHom
(a ⊔ b) =
SupHom.toFun
{
toSupHom :=
{ toFun := ↑(OrderIso.symm e),
map_sup' :=
(_ :
∀ (a b : ↑β.toDistLatCat),
↑(OrderIso.symm e) (a ⊔ b) = ↑(OrderIso.symm e) a ⊔ ↑(OrderIso.symm e) b) },
map_inf' :=
(_ :
∀ (a b : ↑β.toDistLatCat),
↑(OrderIso.symm e) (a ⊓ b) =
↑(OrderIso.symm e) a ⊓ ↑(OrderIso.symm e) b) }.toSupHom
a ⊔
SupHom.toFun
{
toSupHom :=
{ toFun := ↑(OrderIso.symm e),
map_sup' :=
(_ :
∀ (a b : ↑β.toDistLatCat),
↑(OrderIso.symm e) (a ⊔ b) = ↑(OrderIso.symm e) a ⊔ ↑(OrderIso.symm e) b) },
map_inf' :=
(_ :
∀ (a b : ↑β.toDistLatCat),
↑(OrderIso.symm e) (a ⊓ b) =
↑(OrderIso.symm e) a ⊓ ↑(OrderIso.symm e) b) }.toSupHom
b) },
map_inf' :=
(_ :
∀ (a b : ↑β.toDistLatCat),
SupHom.toFun src.toSupHom (a ⊓ b) = SupHom.toFun src.toSupHom a ⊓ SupHom.toFun src.toSupHom b) },
map_top' := (_ : ↑(OrderIso.symm e) ⊤ = ⊤), map_bot' := (_ : ↑(OrderIso.symm e) ⊥ = ⊥) }) ≫
let src :=
{ toSupHom := { toFun := ↑e, map_sup' := (_ : ∀ (a b : ↑α.toDistLatCat), ↑e (a ⊔ b) = ↑e a ⊔ ↑e b) },
map_inf' := (_ : ∀ (a b : ↑α.toDistLatCat), ↑e (a ⊓ b) = ↑e a ⊓ ↑e b) };
{
toLatticeHom :=
{
toSupHom :=
{ toFun := ↑e,
map_sup' :=
(_ :
∀ (a b : ↑α.toDistLatCat),
SupHom.toFun
{
toSupHom :=
{ toFun := ↑e, map_sup' := (_ : ∀ (a b : ↑α.toDistLatCat), ↑e (a ⊔ b) = ↑e a ⊔ ↑e b) },
map_inf' := (_ : ∀ (a b : ↑α.toDistLatCat), ↑e (a ⊓ b) = ↑e a ⊓ ↑e b) }.toSupHom
(a ⊔ b) =
SupHom.toFun
{
toSupHom :=
{ toFun := ↑e,
map_sup' := (_ : ∀ (a b : ↑α.toDistLatCat), ↑e (a ⊔ b) = ↑e a ⊔ ↑e b) },
map_inf' := (_ : ∀ (a b : ↑α.toDistLatCat), ↑e (a ⊓ b) = ↑e a ⊓ ↑e b) }.toSupHom
a ⊔
SupHom.toFun
{
toSupHom :=
{ toFun := ↑e,
map_sup' := (_ : ∀ (a b : ↑α.toDistLatCat), ↑e (a ⊔ b) = ↑e a ⊔ ↑e b) },
map_inf' := (_ : ∀ (a b : ↑α.toDistLatCat), ↑e (a ⊓ b) = ↑e a ⊓ ↑e b) }.toSupHom
b) },
map_inf' :=
(_ :
∀ (a b : ↑α.toDistLatCat),
SupHom.toFun src.toSupHom (a ⊓ b) = SupHom.toFun src.toSupHom a ⊓ SupHom.toFun src.toSupHom b) },
map_top' := (_ : ↑e ⊤ = ⊤), map_bot' := (_ : ↑e ⊥ = ⊥) }) =
𝟙 β
[PROOFSTEP]
ext
[GOAL]
case w
α β : BddDistLatCat
e : ↑α.toDistLatCat ≃o ↑β.toDistLatCat
x✝ : (forget BddDistLatCat).obj β
⊢ ↑((let src :=
{
toSupHom :=
{ toFun := ↑(OrderIso.symm e),
map_sup' :=
(_ :
∀ (a b : ↑β.toDistLatCat),
↑(OrderIso.symm e) (a ⊔ b) = ↑(OrderIso.symm e) a ⊔ ↑(OrderIso.symm e) b) },
map_inf' :=
(_ :
∀ (a b : ↑β.toDistLatCat),
↑(OrderIso.symm e) (a ⊓ b) = ↑(OrderIso.symm e) a ⊓ ↑(OrderIso.symm e) b) };
{
toLatticeHom :=
{
toSupHom :=
{ toFun := ↑(OrderIso.symm e),
map_sup' :=
(_ :
∀ (a b : ↑β.toDistLatCat),
SupHom.toFun
{
toSupHom :=
{ toFun := ↑(OrderIso.symm e),
map_sup' :=
(_ :
∀ (a b : ↑β.toDistLatCat),
↑(OrderIso.symm e) (a ⊔ b) = ↑(OrderIso.symm e) a ⊔ ↑(OrderIso.symm e) b) },
map_inf' :=
(_ :
∀ (a b : ↑β.toDistLatCat),
↑(OrderIso.symm e) (a ⊓ b) =
↑(OrderIso.symm e) a ⊓ ↑(OrderIso.symm e) b) }.toSupHom
(a ⊔ b) =
SupHom.toFun
{
toSupHom :=
{ toFun := ↑(OrderIso.symm e),
map_sup' :=
(_ :
∀ (a b : ↑β.toDistLatCat),
↑(OrderIso.symm e) (a ⊔ b) =
↑(OrderIso.symm e) a ⊔ ↑(OrderIso.symm e) b) },
map_inf' :=
(_ :
∀ (a b : ↑β.toDistLatCat),
↑(OrderIso.symm e) (a ⊓ b) =
↑(OrderIso.symm e) a ⊓ ↑(OrderIso.symm e) b) }.toSupHom
a ⊔
SupHom.toFun
{
toSupHom :=
{ toFun := ↑(OrderIso.symm e),
map_sup' :=
(_ :
∀ (a b : ↑β.toDistLatCat),
↑(OrderIso.symm e) (a ⊔ b) =
↑(OrderIso.symm e) a ⊔ ↑(OrderIso.symm e) b) },
map_inf' :=
(_ :
∀ (a b : ↑β.toDistLatCat),
↑(OrderIso.symm e) (a ⊓ b) =
↑(OrderIso.symm e) a ⊓ ↑(OrderIso.symm e) b) }.toSupHom
b) },
map_inf' :=
(_ :
∀ (a b : ↑β.toDistLatCat),
SupHom.toFun src.toSupHom (a ⊓ b) = SupHom.toFun src.toSupHom a ⊓ SupHom.toFun src.toSupHom b) },
map_top' := (_ : ↑(OrderIso.symm e) ⊤ = ⊤), map_bot' := (_ : ↑(OrderIso.symm e) ⊥ = ⊥) }) ≫
let src :=
{ toSupHom := { toFun := ↑e, map_sup' := (_ : ∀ (a b : ↑α.toDistLatCat), ↑e (a ⊔ b) = ↑e a ⊔ ↑e b) },
map_inf' := (_ : ∀ (a b : ↑α.toDistLatCat), ↑e (a ⊓ b) = ↑e a ⊓ ↑e b) };
{
toLatticeHom :=
{
toSupHom :=
{ toFun := ↑e,
map_sup' :=
(_ :
∀ (a b : ↑α.toDistLatCat),
SupHom.toFun
{
toSupHom :=
{ toFun := ↑e,
map_sup' := (_ : ∀ (a b : ↑α.toDistLatCat), ↑e (a ⊔ b) = ↑e a ⊔ ↑e b) },
map_inf' := (_ : ∀ (a b : ↑α.toDistLatCat), ↑e (a ⊓ b) = ↑e a ⊓ ↑e b) }.toSupHom
(a ⊔ b) =
SupHom.toFun
{
toSupHom :=
{ toFun := ↑e,
map_sup' := (_ : ∀ (a b : ↑α.toDistLatCat), ↑e (a ⊔ b) = ↑e a ⊔ ↑e b) },
map_inf' := (_ : ∀ (a b : ↑α.toDistLatCat), ↑e (a ⊓ b) = ↑e a ⊓ ↑e b) }.toSupHom
a ⊔
SupHom.toFun
{
toSupHom :=
{ toFun := ↑e,
map_sup' := (_ : ∀ (a b : ↑α.toDistLatCat), ↑e (a ⊔ b) = ↑e a ⊔ ↑e b) },
map_inf' := (_ : ∀ (a b : ↑α.toDistLatCat), ↑e (a ⊓ b) = ↑e a ⊓ ↑e b) }.toSupHom
b) },
map_inf' :=
(_ :
∀ (a b : ↑α.toDistLatCat),
SupHom.toFun src.toSupHom (a ⊓ b) = SupHom.toFun src.toSupHom a ⊓ SupHom.toFun src.toSupHom b) },
map_top' := (_ : ↑e ⊤ = ⊤), map_bot' := (_ : ↑e ⊥ = ⊥) })
x✝ =
↑(𝟙 β) x✝
[PROOFSTEP]
exact e.apply_symm_apply _
|
[STATEMENT]
lemma set_times_rearrange2: "a *o (b *o C) = (a * b) *o C"
for a b :: "'a::semigroup_mult"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. a *o (b *o C) = (a * b) *o C
[PROOF STEP]
by (auto simp add: elt_set_times_def mult.assoc) |
% $Id$
%\subsubsection{Restrictions and Future Work}
\begin{enumerate}
\item {\bf Support is limited to 1D and 2D regridding.} Regridding
support is limited to two dimensions.
\item {\bf Masks are not implemented.} Regridding methods take
masks in their argument lists, but they currently are not used or
applied.
\item {\bf Regridding only fills computational domains.} Currently,
regridded values are not automatically applied to halo or ghost
domains. Users must manually call Halo after regridding in order
to do so. This also means that Grid periodicity will not affect
regridding results without manual Halo calls.
\item {\bf Special pole treatment is not implemented.}
Conservative regridding methods do not yet have the special pole
treatment completed. Please see the conservative regrid algorithm
description (Section~\ref{sec:ConserveRegrid}) for further details.
\item {\bf Interpolation weights are not available to users.}
Currently, there is no method to allow users access to the interpolation
weights for any desired manipulations.
\item {\bf Spherical coordinates are not shifted.}
Regrid methods do not yet shift longitude coordinates
(or their equivalent) from the source Grid to place them on the same
360 degree (or 2*pi) range as the destination Grid. For Grids that
cover the entire sphere and assume a periodic longitude boundary,
this may produce errors along the Grid edges even though they share
the same 360 degree range.
\item {\bf Future regrid methods.} The following methods will
be added:
\begin{description}
\item [ESMF\_REGRID\_METHOD\_ADJOINT]
Create adjoint of existing regrid
\item [ESMF\_REGRID\_METHOD\_FILE]
Read a regrid from a file
\item [ESMF\_REGRID\_METHOD\_FOURIER]
Fourier transform
\item [ESMF\_REGRID\_METHOD\_INDEX]
Index-space regrid (shift, stencil)
\item [ESMF\_REGRID\_METHOD\_LEGENDRE]
Legendre transform
\item [ESMF\_REGRID\_METHOD\_NEARNBR]
Nearest-neighbor dist-weighted avg
\item [ESMF\_REGRID\_METHOD\_RASTER]
Regrid by rasterizing domain
\item [ESMF\_REGRID\_METHOD\_REGRIDCOPY]
Copy existing regrid
\item [ESMF\_REGRID\_METHOD\_SHIFT]
Shift addresses of existing regrid
\item [ESMF\_REGRID\_METHOD\_SPLINE]
Cubic spline for 1-d regridding
\item [ESMF\_REGRID\_METHOD\_USER]
User-supplied method
\end{description}
\end{enumerate}
|
module DepB
import Submodule.DepC
empty : ()
empty = ()
|
State Before: ι : Type u_1
V : Type u
inst✝¹ : Category V
inst✝ : HasZeroMorphisms V
c : ComplexShape ι
C : HomologicalComplex V c
i i' j : ι
rij : ComplexShape.Rel c i j
rij' : ComplexShape.Rel c i' j
⊢ eqToHom (_ : X C i = X C i') ≫ d C i' j = d C i j State After: ι : Type u_1
V : Type u
inst✝¹ : Category V
inst✝ : HasZeroMorphisms V
c : ComplexShape ι
C : HomologicalComplex V c
i j : ι
rij rij' : ComplexShape.Rel c i j
⊢ eqToHom (_ : X C i = X C i) ≫ d C i j = d C i j Tactic: obtain rfl := c.prev_eq rij rij' State Before: ι : Type u_1
V : Type u
inst✝¹ : Category V
inst✝ : HasZeroMorphisms V
c : ComplexShape ι
C : HomologicalComplex V c
i j : ι
rij rij' : ComplexShape.Rel c i j
⊢ eqToHom (_ : X C i = X C i) ≫ d C i j = d C i j State After: no goals Tactic: simp only [eqToHom_refl, id_comp] |
\begin{document}
\chapter{Background}
\section{Bitcoin}
The Bitcoin blockchain makes its first appearance in 2008, in an white paper written by someone under the pseudonym of Satoshi Nakamoto \cite{Nakamoto2008} and published on the Cypherpunks mailing list in what is described as "A purely peer-to-peer version of electronic cash that would allow online payments to be sent directly from one party to another without going through a financial institution". In his paper, Nakamoto addresses the problem of the trust-based transaction processors. In his work, Nakamoto asserts that the need of third party in electronic payments brings inherent weaknesses:
\begin{itemize}
\item There is no possibility of completely non-reversibile transactions (financial institutes cannot avoid to mediating disputes).
\item The cost of mediation increases the transaction costs, thus making it impossible for small casual transactions to be processed.
\item A system that can revert the state of a transactions needs to be trusted. For this reason, merchants tends to ask for customer information, something that doesn't happen with fiat currency.
\end{itemize}
Nakamoto suggestion is a peer-to-peer electronic cash system based on the concept of \textit{proof} instead of \textit{trust}: his idea is to make transactions that are computationally impractical to reverse along with a peer-to-peer distributed timestamp service to generate the computational proof of the chronological order the transactions. The other claim Nakamoto does on his paper is that such a system is resilient to attacks as long as the honest nodes of the peer-to-peer network control more computational power than any other cooperating group of attacker nodes, implicitly saying that the system is tolerant to Byzantine failures \cite{Lamport1982}.
\subsection{Bitcoin Transactions}
Nakamoto defines the peer-to-peer electronic cash system as a chain of digital transactions where each owner transfers the coin to the next by signing (digitally, in a cryptographic sense) the hash of the previous transactions and the public key of the receiver. The receiver can then verify the signatures to ensure that the chain of ownership was legal.
\begin{figure}
\caption{The transaction scheme shows the change of ownership, from \cite{Nakamoto2008}.}
\includegraphics[width=10cm]{transactions.png}
\centering
\end{figure}
The only problem here is that the payee can't prove that the amount the previous owner passed to it was double-spent before the change of ownership. That is, without the full-knowledge of the chronological order of the transactions, a malicious attacker can effectively reuse its money before the payee is able to use it.
What is needed then is a timestamps service that could put guarantees over the chronological order of the transactions. Common transaction processors in centralized systems check that there exists one and only transaction from the sender to the receiver. A decentralized system is then required to have the full knowledge of its history, that is, every player of its network must \textit{agree} over a single full history of the transactions. Requiring for every transaction that takes place on the Bitcoin network to be broadcast among all the users it's not a sufficient condition to ensure the agreement over a set of ordered transactions because of propagation delays and network errors, thus provoking inconsistency between each transactions set held by each node.
\subsection{Bitcoin Blockchain}
The solution that Bitcoin (and the majority of the other cryptocurrencies) adopted to provide the full history of the Bitcoin transaction is to store every user's transaction informations on a public ledger replicated among all the nodes of the network. These information are collected in blocks of transactions where the chronological order of the blocks is given by including in the block header the hash of the previous block. The final result is then an immutable chain of blocks where each block references its predecessor, thus every transaction included in a block it's ordered according to the order of the block itself.
\begin{figure}
\caption{Sketch that shows how the blocks refers with their respective predecessor.}
\includegraphics[width=10cm]{blockchain_schema.png}
\centering
\end{figure}
What is required then is that users must all agree on the very same public ledger in order to be sure that a transaction has not been double-spent.
\subsection{Consensus Mechanism}
The core innovation and the success of the blockchain technology resides in the consensus protocol which will be referred as \textit{Nakamoto Consensus}. Nakamoto Consensus main goal is to reach an agreement over the same public ledger owned by every node of the network, i.e. old blocks and new blocks must be the same for everyone.
The trick here is to use a challenging computational puzzle (that Nakamoto in his paper calls \textit{Proof-of-work}, but it's a misnomer\footnote{Bitcoin's PoW is not a real Proof of Work because it's a probabilistic puzzle, i.e. with a certain luck one is capable to find a solution with very little work. The very first Proof of Work was invented in 1992 by C. Dwork and M. Naor as an anti-spam system \cite{Dwork1992}}) to determine the next block in the chain. Every user can work on the puzzle and try their luck to get the possibility to find a new block.
A block is appended to the head of the blockchain if and only if it is the first to get announced and if it contains the correct solution to the computational puzzle. C. Decker and R. Wattenhofer \cite{Decker2013} analyzed the behavior of the information propagation in the Bitcoin network and found that the median time for a broadcasted block to reach all the peers is 6.5 seconds (whereas the mean is 12.6 seconds). Upon hearing the new block, the participant of the network verify that the block is indeed correct, append the block to the head of their blockchain (that is stored locally) and immediately start working on finding the next one.
The computational puzzle is only needed for all nodes to agree on a common value. Because of its random nature, it is likely that only one node will eventually find the solution to such puzzle; that node is de-facto elected to be leader for one round. Once a block is found, it gets propagated through the network. If the solution to the hashing puzzle is valid and if the transactions in the blocks are valid then the block is accepted into the blockchain. A transaction is valid if:
\begin{enumerate}
\item each transaction input matches a previous transaction output.
\item they are reedemed by their legitimate owners
\item the sum of values of all transaction outputs is less than or equal to the sum of the values of all inputs.
\end{enumerate}
This verification is performed by bitcoin nodes. It would be useless for a malicious user to forge invalid blocks, because any invalid block (malformed blocks, blocks that contain invalid transactions or blocks whose proof-of-work doesn't resolve the puzzle) would be discarded by the honest peers of the network.
Nakamoto claims that Bitcoin is safe from byzantine failures, as long as 50\% of the total computational power required to solve the cryptographic puzzle belongs to honest miners. If the attackers manage to get more than 50\% of computational power they are more likely to include the block they produced in the blockchain thus making them able to perform devastating strategies that would disrupt the coin (blacklisting particular addresses, coin hostage scenarios...).
It is possible though that two proof-of-work are found within a short time window. This situation is known as a \textit{temporary fork}, a situation in which the blockchain is split in two chains of equal size and was found to happen with a rate of 1,69 every 100 blocks (\(r = 1,69\%\)) \cite{Decker2013}. Miners will start to produce a block for either one of the two temporary heads of the chain; the random nature of the computational puzzle will eventually extend one of the two forks and sooner or later every node in the network will reach agreement over the longest chain. Resolving a fork is a crucial matter as forks, which are an inconsistent state of the blockchain, enable a disruptive attack on the network known as \textit{selfish mining} which reduces the computational powers requirements for attackers from 50\% of the total computational power to 33\% \cite{Eyal2013}. Because of this possibility, it has been discovered that Nakamoto suggestion for Byzantine Agreement doesn’t really solve Consensus but solves a weaker version of Consensus in which only agreement is satisfied but validity cannot be guaranteed with overwhelming probability both in synchronous \cite{Garay2015} and in asynchronous settings \cite{Pass2016}.
As long as blocks are added to the blockchain, transactions that are already included in the chain become more and more persistent as an adversary that wants to tamper an older block must produce a number of proof-of-works equals to the number of blocks that separates the tampered one from the blockchain head, because he will need to include the hash references of previous blocks to each subsequent block. A transaction that has been included to the blockchain is said to have $n$ confirmations if $n$ is the number of blocks that have been added on top of it. It is always advised to wait at least 6 confirmation blocks (roughly one hour) before considering a transaction persistent.
\subsection{Producing a block}
A node who decides to actively participate by creating new blocks is called \textit{miner}. A miner gather the maximum number of broadcasted transactions that can fit in a block (whose size is fixed) and start to try several \textit{nonces} in combination with the block data to produce a hash whose value has some specific characteristics. More precisely, in order for a block to be valid, the SHA-256 hash of a block's header must be lower to the current \textit{target}, which is a 256-bit number that all the Bitcoin clients share. To increase the possibilities for a transaction to be included in a block, users pay a fee to the miners in proportion to the size of the transaction. As showed in \cite{Gurcan2017}, fairness with respect to the probability for a transaction to be processed is not a guaranteed property of the Bitcoin protocol, and the probability for a transaction to be included in a block is proportional to the size of the fee.
Finding a block is a cryptographic puzzle, a simple brute force attack on the SHA-256 protocol, whose goal, by picking nonces at random, is to find a hash with \(d\) consecutive zero bits, where \(d\) is called \textit{difficulty} and is derived from the target value in the following way:
\[ d = 8 + \frac{log(D)}{16}\]
where
\[D = \frac{\text{max difficulty}}{\text{target}}\]
The difficulty is calibrated every 2016 blocks (roughly two weeks) to make sure that the blocks discovery rate takes approximately 10 minutes. Being a brute-force attack on a cryptographic hashing function, the mining operation requires a lot of computational power and the computational power requires a lot of electrical energy, which in 2014 D. Malone and K.J. O'Dwyer estimated to be comparable to Ireland's electricity consumption \cite{Malone2014}. That is, producing a block has a high production cost associated and indeed the whole system is designed around this trade-off in order to avoid 0-costs attacks. To incentivize the production of new blocks, there's a reward for each miner that can be obtained if and only if a block is successfully added to the blockchain: miners can include a \textit{coinbase} transaction which is capable to generate new coins for the creator of the block; furthermore, this is the only way in which new Bitcoins are input onto the network. Since the coinbase reward is programmed to be halved every 210'000 mined blocks, Nakamoto included transaction fees as another incentive for miners to perform mining. The incentive also protects the system from attackers: suppose an attacker is able to assemble the majority of the computational power, then he would have to choose between using this power to enable a double-spend attack or to generate new coins. It would be much more profitable for him to behave good and play by the rules than to destroy the currency unless someone is interested in shutting down Bitcoin permanently (the so-called "Goldfinger attack" \cite{Bonneau2015}).
\newpage
\subsection{Ownership}
Bitcoin relies on cryptography to allow users to perform operations in a completely secure and private way, making use of digital signatures to prove one's identity and hash function to verify the integrity of the data in the system.
There is no concept of "account" in Bitcoin. \textit{Ownership} of the currency means knowledge of the private key that is capable to redeem a certain output. Bitcoin transactions express the transfer of value from one Bitcoin address to another. A Bitcoin address is the partial hash of a user public keys and it is represented as a string of 26-35 alphanumeric characters.
Each transaction output has a signature validation routine (a script) that verifies the ownership for those that will claim it. This routine is called \textit{scriptPubKey}, also known as "pay-to-pub-key-hash" or P2PKH. It is a script written in the Bitcoin scripting language, a Forth-like, stack language that includes several built-in operations called \textit{opcodes}.
ECDSA is the protocol used by the Bitcoin network to provide a pair of private and public keys to every user. Every Bitcoin address is the result of the hash of the public portion of the ECDSA private/public key. The amount of Bitcoins that reside in an address can be spent only if a user can prove with his signature that he is the owner of the Bitcoin address. To redeem the coins of an address, the owner proves his identity by inputting the signature and the public key of the \textit{scriptSig} to a \textit{scriptPubKey} of a previous transaction.
Given the following \textit{scriptSig} and \textit{scriptPubKey}:
\begin{verbatim}
scriptPubKey: OP_DUP OP_HASH160 <pubKeyHash> OP_EQUALVERIFY OP_CHECKSIG
scriptSig: <sig> <pubKey>
\end{verbatim}
the script will execute in the following way\footnote{from the Bitcoin wiki. https://en.bitcoin.it/wiki/Transaction\#Pay-to-PubkeyHash}:
\begin{center}
\begin{tabulary}{\textwidth}{|L|C|L|}
\hline
\textbf{Stack} & \textbf{Script} & \textbf{Description} \\ \hline
Empty. & <sig><pubKey> OP\_DUP OP\_HASH160 <pubKeyHash> OP\_EQUALVERIFY OP\_CHECKSIG & scriptSig and scriptPubKey are combined. \\ \hline
<sig> <pubKey> & OP\_DUP OP\_HASH160 <pubKeyHash> OP\_EQUALVERIFY OP\_CHECKSIG & Constants are added to the stack. \\ \hline
<sig> <pubKey> <pubKey> & OP\_HASH160 <pubKeyHash> OP\_EQUALVERIFY OP\_CHECKSIG & Top stack item is duplicated. \\ \hline
<sig> <pubKey> <pubHashA> & <pubKeyHash> OP\_EQUALVERIFY OP\_CHECKSIG & Top stack item is hashed. \\ \hline
<sig> <pubKey> <pubHashA> <pubKeyHash> & OP\_EQUALVERIFY OP\_CHECKSIG & Constant added. \\ \hline
<sig> <pubKey> & OP\_CHECKSIG & Equality is checked between the top two stack items. \\ \hline
true & Empty. & Signature is checked for top two stack items. \\
\hline
\end{tabulary}
\end{center}
\subsection{Scaling issue}
Bitcoin has two main limitation that are strongly correlated: throughput of transactions processed and disk space occupied from the blockchain. The correlation is trivial: as long as a block holds more transactions, the throughput increases and the same does the size of the block. Balancing the trade-off is crucial to the cryptocurrency success or failure because either Bitcoin is small in size but is unable then to process more than a few transactions or either it becomes capable to process a great number of transactions while having huge increase in blockchain size.
The scaling debate as its root since the very first few days of the Bitcoin life: initially set to 1MB per block, was later changed to a soft-cap of 250KB for newly created blocks and later raised up to 750kB. Proposal for a further increase of the maximum block size up to 8MB were made and discussed \cite{BlocksizeIncrease2015}, but were found too risky. Some of these debates resulted in hard-forks, permanent splits that occur when the underlying protocol changes, and contrary to soft-forks these changes are not backward compatible, giving the possibility to miners who don't adhere to these protocol changes to form alternative payment systems that are generally called \textit{altcoins}.
By the end of the first half of 2015 the blocks were 40\% full. The risk of a saturated network was real, and a number of suggestion came from the Bitcoin developers (as well a proposal for a 4MB block size increase and a 12 seconds latency for each block \cite{Croman}, which are the settings that theoretically maximize the \textit{effective throughput}). However, adoption projections show a dramatic situation in a future where 7 billion people will perform 2 blockchain transactions per day, as by that time a block will be 24Gb each in size, and the protocol will produce 3.5TB data per day, 1.27TB per year \cite{SFBitcoinDevs2015}. Bigger blocks means also that fewer nodes would be capable to store these informations, hence the system will probably tend to get more and more centralized.
\section{Micropayments network}
Micropayments are financial transactions of small amounts of money that usually take place on the internet. These payment systems address the problem of transaction fees and processing time, making it easier and cheaper for one user to transfer small amount of funds to another user without incurring in high fees or be slowed down by a financial institutions verifications. There is yet no formal definition for "small amount of money": transaction processors like Visa enable funds transfers from 10\$\footnote{https://usa.visa.com/dam/VCOM/download/merchants/minimum-transactions-credit-card.pdf}, while Paypal and others allow for a minimum of 0.01\$.
Micropayments system were initially designed in 1960 \cite{Nelson1960}, even before the advent of internet, as a way to pay copyright holders for their fair share. In late 90s there were a lot of attempts on building micropayments networks especially from IBM and Compaq who had their own micropayment division. None of their solutions (along with dozens of others payment system) ever made it to become a micropayment standard. Nowadays the micropayment functionalities is implemented in the most online wallets (PayPal, Swish, Vipps, MobilePay, Postepay) and it is largely implemented in videogames where players can exchange real money with in-game currency in order to purchase virtual goods through in-game micro-transactions.
The difficulty here for trusted third parties is to keep the processing costs low while keeping available the mediator service in case of disputes and in the same time make earnings over the small transaction with transaction fees. If the burden of mediating disputes can be moved from external agent to payment network users then this would result in near-zero fee for transactions (in digital payments fees are required to prevent dust/flood attacks).
\section{Lightning Network}
In early 2016, J. Poon and T. Dryja released a white paper where a Bitcoin layer 2 decentralized payment system, called Lightning Network, is proposed. The goal of the Lightning Network is to address the scaling problem not on the Bitcoin protocol itself, but by building on top of the Bitcoin system a network of peer-to-peer micro-payment channels where each transactions takes place off-chain.
A micropayment channel is a trustless relationship between two peers of the Bitcoin network, where these two party continuously exchange coins and update their balances, avoiding to broadcast the transaction until both reach an agreement over a final state. This means that almost infinite transactions can occur between two peers without hitting the blockchain (and therefore without incurring in the scaling issues mentioned before), updating the channel balance everytime the two peers wish to perform a transaction and publishing onto the blockchain only the final state agreed by the two users.
The idea of micropayment channels is not new in the Bitcoin community and it has been around since 2013 \cite{micropayments:BitcoinOrg} \cite{Micropayments:Bitcoinj}. The intuition in Lightning Network was to connect the channels each other in order to allow for those who want to send a payment to any other peer in the network, to ask other users to cooperate in a trustless way to route the payment to the desired receiver.
\subsection{Bi-Directional Payment Channels}
The building block of the Lightning Network are bi-directional payment channels, channels in which a payer sends money to a payee and vice-versa. To build such channels it is required to leverage on the timestamp property of the blockchain and furthermore, a special redeem script is needed along with some particular op-codes. The core ingredients for a bi-directional channels are:
\begin{itemize}
\item \textit{2-of-2 Multisignature addresses}: Bitcoin addresses requires to verify two signatures instead of just one (as seen on the scriptPubKey example). An example of real case of 2-of-2 multisignature account are the husband and wife saving accounts where both signatures are required to spend the funds, preventing one spouse from spending without the other spouse's consent. More generally, Bitcoin supports $m$-of-$n$ multisignature addresses meaning that spending from such an address requires m signatures out of n.
\item \textit{Timelocks}: they are a special primitive functions for smart-contracts that prevents outputs from being spent until a specified block heights. Timelocks can either be \textit{absolute} or \textit{relative}:
\begin{itemize}
\item absolute timelocks are identified by a special field, nLockTime, that specifies the earliest time (i.e. the block height) a transaction may be added to a valid block. Op-code OP\_CHECKLOCKTIMEVERIFY \cite{CHECKLOCKTIMEVERIFY} (abbrv. CLTV), allow transactions outputs to be encumbered by the timelock.
\item relative timelocks are specified by the nSequence fields and allows input to specify the earliest time it can be added to a block based on how long ago the output being spent by that input was included in the blockchain. The OP\_CHECKSEQUENCEVERIFY \cite{CHECKSEQUENCEVERIFY} provides for relative timelocks the same feature that OP\_CHECKLOCKTIMEVERIFY provides for absolute timelocks.
\end{itemize}
\item \textit{SIGHASH\_NOINPUT}: it is a special function of the Bitcoin scripting language that allows dynamic binding of unspent transactions. It modifies the digest algorithm used in the signature creation and verification by removing the dependency on the previous output commitment. With SIGHASH\_NOINPUT\footnote{https://github.com/bitcoin/bips/blob/master/bip-0118.mediawiki\#backward-compatibility} it is possible to have chains of \textit{floating transactions}, i.e. transaction that are not yet published on the blockchain yet related each other.
\end{itemize}
\subsection{Opening a micro-payment channel}
To open a new bi-directional payment channel, two parties initially must fund the channel with the maximum amount of Bitcoin that they want to be able to spend inside it, by allocating funds on a 2-of-2 multisignature address, i.e. an address that need the cooperation (that is, signatures) of the two parties to be used for further transactions. This is the \textit{Funding Transaction}.
Before signing and broadcasting this transaction, the two party signs and exchange a new transaction called \textit{Commitment Transaction} spending the output of the funding transaction and whose goal is to express the current channel balance. The very first commitment transaction will simply return the money to their legitimate owners: this is done in order to avoid hostage scenarios in which one of the party is uncooperative and is willing to block any future operation from the funding transaction. For the sake of clarity, say Alice and Bob wants to open a new channel:
\begin{figure}[]
\includegraphics[width=9.5cm, keepaspectratio]{funding}
\centering
\caption{The funding transaction is the only one that gets published in the Bitcoin network. The disclosure of CT1 would refund Alice and Bob of their initial input.}
\end{figure}
\begin{enumerate}
\item Either one between Alice and Bob make a new funding transaction F allocating funds to a 2-of-2 multisignature address. The broadcast of the transaction is deferred (not yet broadcasted in the Bitcoin Network).
\item Alice prepares a commitment transaction CT\textsubscript{1} that spends from F (which does not exists yet inside the blockchain) and returns her money back to herself and to Bob. Bob does the same.
\item Alice signs her commitment transaction, CT\textsubscript{A1}, and gives it to Bob. Bob signs his transaction and gives it to Alice (CT\textsubscript{B1}).
\item Now Alice signs the transaction Bob gave her, CT\textsubscript{AB1}. Bob as well signs the transaction Alice gave him. Now they both have a refund that ensures that they will be able to get back the money from the 2-of-2 address of the funding transaction if the other party is uncooperative.
\item Now it is safe to broadcast the funding transaction onto the blockchain because both Alice and Bob have CT\textsubscript{AB1}, which permit to any of the two to quit the channel anytime they want and get their money back. By the time F gets mined into a block, a channel is opened between Alice and Bob.
\end{enumerate}
The process is completely asymmetric: every user involved creates its own transaction, signs it and then exchange it with the other user. In the end, Alice and Bob will hold two different transactions, with different transaction IDs, performing the very same operation. Recently a new protocol for the Lightning Network channel update mechanism named \textit{eltoo}\cite{Decker} has been proposed, addressing the asymmetric nature of the process and bringing other improvements on the security of the channels.
Once the opening process is done, the channel is ready for use. The commitment transaction represent the current channel balance, that is the output addresses in CT1 are Alice and Bob payouts. In the naive version of this protocol if Alice and Bob wish to update their balance, say Alice wants to give Bob 0.1 BTC, they can simply create a new commitment transaction CT2 where the outputs of CT2 allocates 0.6 BTC to Bob and 0.4 BTC to Alice and rebind the funding transaction outputs to the new commitment transaction input. However, this approach as serious flaws since there is no mechanism that prevents a malicious Alice to restore her previous balance by publishing CT1 before CT2 onto the blockchain, allowing a double-spend attack in a off-chain scenario.
\begin{figure}
\includegraphics[width=\linewidth]{broken_ct}
\centering
\caption{To update the balance, Alice creates a commitment transaction CT2 whose outputs reflect the payment she wishes to perform. In this broken implementation there is no way Bob can react to Alice publishing CT1 after getting paid by her.}
\end{figure}
\subsection{Fidelity Bonds}
It is necessary then to enforce a condition that prevents older commitment transaction to be broadcast. There are some tools in Bitcoin that let replace a transaction that has yet to be mined with a newer, higher fee transaction\footnote{https://github.com/bitcoin/bips/blob/master/bip-0125.mediawiki}. Yet, in the hypothesis that Bitcoin will be used by billions of people, this on-chain replacement mechanism doesn't scale well because of the scaling issues mentioned before, therefore the revocation of a commitment transaction must be enforced off-chain.
\textit{Fidelity Bonds} are a form of insurance protection against violations of the terms of an agreement, preventing malicious behaviors by any party who adhere to these. The contract terms for a Lightning Network channel is that both the parties involved in the channel agree on the latest commitment transaction. Any attempt on broadcasting an older commitment transaction should result on the loss of all funds in favor of the other party.
\begin{figure}
\centering
\begin{subfigure}{0.4\textwidth}
\centering
\includegraphics[width=.7\linewidth]{rsmc_a}
\caption{Alice perspective}
\end{subfigure}
\begin{subfigure}{0.4\textwidth}
\centering
\includegraphics[width=.7\linewidth]{rsmc_b}
\caption{Bob perspective}
\end{subfigure}
\caption{Each user holds different RSMC. Alice can broadcast red transactions, Bob the blue ones. If Alice wishes to broadcast CT\textsubscript{1A}, then she must wait \(n\) confirmation blocks before she can broadcast her refund.}
\label{rsmc}
\end{figure}
The enforcement of these fidelity bonds are done with the help of two special outputs: \textit{Revocable Sequence Maturity Contract} (abbrv. RSMC) and \textit{Breach Remedy} (abbrv. BR). RSMC are transactions encumbered by the nSequence parameter which is an integer describing how many blocks, starting from parent transaction confirmation, must be waited before the transactions gets mined. Each commitment transaction now has two output: an encumbered refund toward the owner of the CT and an immediate refund output towards the other user in the channel.
Figure \ref{rsmc} puts in evidence the user point of views with respect to their floating transaction chain according to the asymmetrical nature of the protocol. If either one of the two party of the channel wishes to broadcast their commitment transaction and redeem the coins then he/she will be forced by the RSMC to wait a number of confirmation blocks before being able to get his money back.
Encumbering a transaction is required to allow a user to take measures against a misbehavior of the counterpart whenever two users agreed on the latest channel balance. When a new pair of commitment transaction is agreed upon, the previous commitment transaction has to be invalidated. The invalidation routine consists in two new transactions called \textit{Breach Remedy}, that spends from each party old commitment transaction and whose spends transfer all the funds from one user to another whenever an older state is published onto the blockchain. The idea is that by being afraid to commit to such a strong punishment, a user would prefer to just delete old commitment transactions in order to not losing its fund.
\begin{figure}
\centering
\includegraphics[width=.7\linewidth]{breach_remedy_a}
\caption{Alice perspective. The encumered RSMC gives Bob time to publish the Breach Remedy and the refund to himself. Alice is encumbered by \textit{nSequence} blocks before she's able to do something.}
\end{figure}
To close a channel either a commitment transaction is published onto the blockchain, resulting on one of the two party waiting \(n\) blocks before being able to get his money back, or the two agree on the last final state of the balance, namely a \textit{Settlement Transaction}, in which its outputs are not encumbered. Yet it is important to notice that if the two parties remain cooperative, the channel can remain open indeterminately.
Recently, a new paper by Burchert C., Decker C. and Wattenhofer R. proposed an alternative to the one-to-one channels paradigm of the Lightning Network which is called "Channel Factory" \cite{Burchert2017} and that should decrease even further the number of on-chain transactions required for a fast-payment system to operate, where instead of having a single partner cooperating in the building of a channel, a user is assigned to a collaborating group where funds can be allocated dynamically within the group participants. The two implementations however are not mutually exclusive and while the channel factory reduces the number of on-chain transactions, the risks for uncooperative parties rises with the rising number of parties involved in a factory.
\subsection{Hashed Timelock Contracts}
The missing piece that allows to build a network of channel is the \textit{Hashed Time-Lock Contract}, a special contract that allows to maintain a global state across multiple channels based on time commitments (by using the nLockTime parameter) and pre-images discloures. A HTLC is a revocable contract between two parties enforcible via blockchain and the terms of contract are the following:
\begin{enumerate}
\item if Bob can produce to Alice an input R that hashes to the known hash H within 3 days, then the contract has to be settled and Alice pays Bob by the sum agreed.
\item after three days without any R disclosure the contract is considered void and the process must be invalidated.
\item the parties involved must payout according to the terms of the contract and close the contract early as soon as both agree.
\item fidelity bonds should occur when the terms of the contract are violated and have to be paid to the non-violating counterparty.
\end{enumerate}
Essentialy a HTLC is an output in the commitment transaction and has two possible spends, where one path is taken if Bob can produce R and the other occurs when a fixed time has passed (we will consider 3 days for further examples). The redeem script is the following:
\begin{center}
\begin{tabulary}{\textwidth}{|L|C|}
\hline
OP\_IF & \\
& OP\_HASH160 <Hash160(R)> OP\_EQUALVERIFY \\
& 2 <Alice2><Bob2>OP\_CHECKMULTISIG \\ \hline
OP\_ELSE & \\
& 2 <Alice1><Bob1>OP\_CHECKMULTISIG \\ \hline
OP\_ENDIF & \\
\hline
\end{tabulary}
\end{center}
\begin{figure}
\centering
\includegraphics[width=7cm]{htlc_a}
\caption{Alice perspective. The HTLC represents a pending state in which there is the intent to deliver 0.1 BTC to Bob.}
\label{htlc_a}
\end{figure}
If Alice wishes to send 0.1 BTC to Bob by using an HTLC, the two agree on building a new commitment transactions with 3 outputs where the first two are refunds for Alice and Bob (0.4 BTC and 0,5 BTC) while the last one is the 0.1 HTLC pending transaction from Alice to Bob that will either go to Bob if Bob knows R, or refund Alice of its 0.1 BTC in case 3 days elapsed. The invalidation process of an HTLC goes the same as for commitment transaction with HTLC Revocable Sequence Maturity Contract and HTLC Breach Remedy. Once Bob discloses the R, the two can cooperate to update the channel balance in a new commitment transaction in order to maintain the channel open or they could simply push the entire transactions chain onto the blockchain (and thus closing the channel).
\subsection{A Network of Channels}
The use of HTLCs is not intended for a single channel. Instead it is possible now to create a global state across multiple channels, allowing to build the multi-hop payment network that is the Lightning Network. HTLCs allows for time-constrained transactions that force the payee to prove the ownership and oblige the payers to transfer funds before the locktime runs off, in what is known as \textit{chain delegation}, where the burden of the transfer of funds between two users, which don't belong in the same channel, is delegated to the participants of the path that connects the two.
Let's say Alice wants to pay David and they don't belong in the same channel. Between Alice and David there are Bob and Carol. So Alice can \textit{delegate} Bob to pay David, Bob can do the same with Carol and lastly Carol pays David. With HTLCs these tasks can be set up so that each payment steps can occur before the next is processed in a cascading fashion thanks to the nLockTime parameter. By knowing the number of hops required to reach David, Alice uses that information as the HTLC expiry for its first hop with Bob (say 3 days). Bob will decrement its HTLC expiry by one day with Carol, and Carol will have a 1 day HTLC expiry with David.
\begin{figure}[h]
\centering
\includegraphics[width=0.7\linewidth]{decrementing_timelocks}
\end{figure}
In this way the participant will perform their task knowing that the step before has been already performed. Let's say Alice wants to pay David, then David picks a random number R, produces the hash H and gives H to Alice. Alice tells Bob that if he can show her R then he will be able to pull funds from her. Bob does the same with Carol and Carol with David. Now on day 1, David knows R so he can pull the funds from Carol. On day 2, Carol shows R to Bob so she can pull funds from Bob and finally on day 3 Bob shows R to Alice, and he's able to pull funds from her as well. Every pair of node settles the new balance with their neighbors in a new commitment transactions and thus the payment is finally over.
\begin{figure}[htbp]
\centering
\includegraphics[width=0.7\linewidth]{HTLC_broken_channel}
\caption{In case Carol sees that Bob is unresponsive, she can broadcast the current Commitment Transaction alongside the HTLC and the value R. Bob can later see the value R from the blockchain and fulfill his contract with Alice.}
\end{figure}
Suppose now there are problems between Bob and Carol during the payment, with Bob becoming unresponsive (i.e. broken link). Since Carol cannot settle a new commitment transactions with her updated balance she's forced to publish her chain of floating transaction along with the HTLC of the current operation to the blockchain, eventually disclosing the value of R. By knowing the value of R, Alice can consider the payment done. If Bob manages to return online before the HTLC with Alice expires, he will be able to observe R from the transaction that Carol has pushed onto the blockchain, and thus will pull funds from Alice as well (as long as the contract locktime has not elapsed).
Because of their time-dependent nature, nodes and channels are not intended to be considered as static features of a network: a channel can be suddenly closed whenever the locktime runs off, or maybe it is possible that some nodes are inactive during some period of the day (e.g. nodes shutting down during nighttime) so there's the need to capture this property in a model that can describe such dynamic behavior.
\end{document} |
Formal statement is: lemma dist_pos_lt: "x \<noteq> y \<Longrightarrow> 0 < dist x y" Informal statement is: If $x \neq y$, then $0 < \|x - y\|$. |
module PacketStruct
import IdrisNet.PacketLang
import Data.So
%access public
simpleStruct : PacketLang
simpleStruct = do
cstring
lstring 5
p_either (bits 32) (lstring 4)
listn 3 cstring
b1 <- bool
b2 <- bool
prop (prop_or (prop_bool b2)
(prop_bool b1))
myBoundedInt : Bounded 32
myBoundedInt = BInt 9001 Oh
simpleStructInstance : (mkTy simpleStruct)
simpleStructInstance = ("hello" ##
"world" ##
(Left myBoundedInt) ##
["hello", "you", "dears"] ##
True ##
False ##
(Right Oh))
simpleResponse : PacketLang
simpleResponse = do
cstring
cstring
simpleResponseInstance : (mkTy simpleResponse)
simpleResponseInstance = "Got" ## "It!"
|
Require Import Psatz.
Require Import String.
Require Import Program.
Require Export Complex.
Require Export Matrix.
Require Import List.
(*
Require Export CoRN.fta.FTA.
Require Export CoRN.coq_reals.Rreals_iso.
*)
(* polynomial represented by a list of coeficients and a degree*)
Definition Polynomial (n : nat) := list (Complex.C).
Definition WF_Poly {n : nat} (p : Polynomial n) :=
length p = (S n).
Definition eval_P (n : nat) (p : Polynomial n) (x : Complex.C):=
Csum (fun i => (nth i p C0)* x^i) (S n).
(*****************************************************)
(* First, we show that our C is the same as ccorns C *)
(*****************************************************)
(*
Definition CtoCC (c : Complex.C) : CC_set := Build_CC_set (RasIR (fst c)) (RasIR (snd c)).
Definition CCtoC (c : CC_set) : Complex.C := (IRasR (Re c), IRasR (Im c)).
Lemma CasCCasC_id : forall (x : Complex.C), (CCtoC (CtoCC x) = x).
Proof. intros.
unfold CtoCC, CCtoC.
simpl.
do 2 rewrite RasIRasR_id.
rewrite surjective_pairing.
easy.
Qed.
(*
Lemma CCasCasCC_id : forall (x : CC_set), (CtoCC (CCtoC x) = x).
Proof. intros.
unfold CtoCC, CCtoC.
simpl.
do 2 rewrite RasIRasR_id.
rewrite surjective_pairing.
easy.
Qed. *)
*)
Theorem Fundamental_Theorem_Algebra : forall {n : nat} (p : Polynomial n),
(n > 0)%nat -> (exists c : (R * R), eval_P n p c = C0).
Proof. Admitted.
|
lemma (in semiring_of_sets) sets_Collect_conj: assumes "{x\<in>\<Omega>. P x} \<in> M" "{x\<in>\<Omega>. Q x} \<in> M" shows "{x\<in>\<Omega>. Q x \<and> P x} \<in> M" |
\section{Input Device}\label{sec:Device Types / Input Device}
The virtio input device can be used to create virtual human interface
devices such as keyboards, mice and tablets. An instance of the virtio
device represents one such input device. Device behavior mirrors that
of the evdev layer in Linux, making pass-through implementations on top
of evdev easy.
This specification defines how evdev events are transported
over virtio and how the set of supported events is discovered by a driver.
It does not, however, define the semantics of input events as this is
dependent on the particular evdev implementation. For the list of events
used by Linux input devices, see
\href{https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/tree/include/uapi/linux/input-event-codes.h}{include/uapi/linux/input-event-codes.h}
in the Linux source tree.
\subsection{Device ID}\label{sec:Device Types / Input Device / Device ID}
18
\subsection{Virtqueues}\label{sec:Device Types / Input Device / Virtqueues}
\begin{description}
\item[0] eventq
\item[1] statusq
\end{description}
\subsection{Feature bits}\label{sec:Device Types / Input Device / Feature bits}
None.
\subsection{Device configuration layout}\label{sec:Device Types / Input Device / Device configuration layout}
Device configuration holds all information the guest needs to handle
the device, most importantly the events which are supported.
\begin{lstlisting}
enum virtio_input_config_select {
VIRTIO_INPUT_CFG_UNSET = 0x00,
VIRTIO_INPUT_CFG_ID_NAME = 0x01,
VIRTIO_INPUT_CFG_ID_SERIAL = 0x02,
VIRTIO_INPUT_CFG_ID_DEVIDS = 0x03,
VIRTIO_INPUT_CFG_PROP_BITS = 0x10,
VIRTIO_INPUT_CFG_EV_BITS = 0x11,
VIRTIO_INPUT_CFG_ABS_INFO = 0x12,
};
struct virtio_input_absinfo {
le32 min;
le32 max;
le32 fuzz;
le32 flat;
le32 res;
};
struct virtio_input_devids {
le16 bustype;
le16 vendor;
le16 product;
le16 version;
};
struct virtio_input_config {
u8 select;
u8 subsel;
u8 size;
u8 reserved[5];
union {
char string[128];
u8 bitmap[128];
struct virtio_input_absinfo abs;
struct virtio_input_devids ids;
} u;
};
\end{lstlisting}
To query a specific piece of information the driver sets
\field{select} and \field{subsel} accordingly, then checks \field{size}
to see how much information is available. \field{size} can be
zero if no information is available. Strings do not include a
NUL terminator. Related evdev ioctl names are provided for reference.
\begin{description}
\item[VIRTIO_INPUT_CFG_ID_NAME]
\field{subsel} is zero.
Returns the name of the device, in \field{u.string}.
Similar to EVIOCGNAME ioctl for Linux evdev devices.
\item[VIRTIO_INPUT_CFG_ID_SERIAL]
\field{subsel} is zero.
Returns the serial number of the device, in \field{u.string}.
\item[VIRTIO_INPUT_CFG_ID_DEVIDS]
\field{subsel} is zero.
Returns ID information of the device, in \field{u.ids}.
Similar to EVIOCGID ioctl for Linux evdev devices.
\item[VIRTIO_INPUT_CFG_PROP_BITS]
\field{subsel} is zero.
Returns input properties of the device, in \field{u.bitmap}.
Individual bits in the bitmap correspond to INPUT_PROP_*
constants used by the underlying evdev implementation.
Similar to EVIOCGPROP ioctl for Linux evdev devices.
\item[VIRTIO_INPUT_CFG_EV_BITS]
\field{subsel} specifies the event type using EV_*
constants in the underlying evdev implementation. If
\field{size} is non-zero the event type is supported and
a bitmap of supported event codes is returned in \field{u.bitmap}.
Individual bits in the bitmap correspond to
implementation-defined input event codes, for example keys
or pointing device axes.
Similar to EVIOCGBIT ioctl for Linux evdev devices.
\item[VIRTIO_INPUT_CFG_ABS_INFO]
\field{subsel} specifies the absolute axis using ABS_*
constants in the underlying evdev implementation.
Information about the axis will be returned in \field{u.abs}.
Similar to EVIOCGABS ioctl for Linux evdev devices.
\end{description}
\subsection{Device Initialization}\label{sec:Device Types / Input Device / Device Initialization}
\begin{enumerate}
\item The device is queried for supported event types and codes.
\item The eventq is populated with receive buffers.
\end{enumerate}
\drivernormative{\subsubsection}{Device Initialization}{Device Types / Input Device / Device Initialization}
A driver MUST set both \field{select} and \field{subsel} when querying
device configuration, in any order.
A driver MUST NOT write to configuration fields other than \field{select}
and \field{subsel}.
A driver SHOULD check the \field{size} field before accessing the
configuration information.
\devicenormative{\subsubsection}{Device Initialization}{Device Types / Input Device / Device Initialization}
A device MUST set the \field{size} field to zero if it doesn't support a
given \field{select} and \field{subsel} combination.
\subsection{Device Operation}\label{sec:Device Types / Input Device / Device Operation}
\begin{enumerate}
\item Input events such as press and release events for keys and
buttons, and motion events for pointing devices are sent from
the device to the driver using the eventq.
\item Status feedback such as keyboard LED updates are sent from the
driver to the device using the statusq.
\item Both queues use the same virtio_input_event struct.
\field{type}, \field{code} and \field{value} are filled according to
the Linux input layer (evdev) interface, except that the fields are
in little endian byte order whereas the evdev ioctl interface uses
native endian-ness.
\end{enumerate}
\begin{lstlisting}
struct virtio_input_event {
le16 type;
le16 code;
le32 value;
};
\end{lstlisting}
\drivernormative{\subsubsection}{Device Operation}{Device Types / Input Device / Device Operation}
A driver SHOULD keep the eventq populated with buffers. These buffers
MUST be device-writable and MUST be at least the size of
struct virtio_input_event.
Buffers placed into the statusq by a driver MUST be at least the size
of struct virtio_input_event.
A driver SHOULD ignore eventq input events it does not recognize. Note
that evdev devices generally maintain backward compatibility by sending
redundant events and relying on the consuming side using only the events
it understands and ignoring the rest.
\devicenormative{\subsubsection}{Device Operation}{Device Types / Input Device / Device Operation}
A device MAY drop input events if the eventq does not have enough
available buffers. It SHOULD NOT drop individual input events if
they are part of a sequence forming one input device update. For
example, a pointing device update typically consists of several
input events, one for each axis, and a terminating EV_SYN event.
A device SHOULD either buffer or drop the entire sequence.
|
lemma topological_tendstoD: "(f \<longlongrightarrow> l) F \<Longrightarrow> open S \<Longrightarrow> l \<in> S \<Longrightarrow> eventually (\<lambda>x. f x \<in> S) F" |
(* Author: Gertrud Bauer
*)
section\<open>Plane Graph Enumeration\<close>
theory Plane
imports Enumerator FaceDivision RTranCl
begin
definition maxGon :: "nat \<Rightarrow> nat" where
"maxGon p \<equiv> p+3"
declare maxGon_def [simp]
definition duplicateEdge :: "graph \<Rightarrow> face \<Rightarrow> vertex \<Rightarrow> vertex \<Rightarrow> bool" where
"duplicateEdge g f a b \<equiv>
2 \<le> directedLength f a b \<and> 2 \<le> directedLength f b a \<and> b \<in> set (neighbors g a)"
primrec containsUnacceptableEdgeSnd ::
"(nat \<Rightarrow> nat \<Rightarrow> bool) \<Rightarrow> nat \<Rightarrow> nat list \<Rightarrow> bool" where
"containsUnacceptableEdgeSnd N v [] = False" |
"containsUnacceptableEdgeSnd N v (w#ws) =
(case ws of [] \<Rightarrow> False
| (w'#ws') \<Rightarrow> if v < w \<and> w < w' \<and> N w w' then True
else containsUnacceptableEdgeSnd N w ws)"
primrec containsUnacceptableEdge :: "(nat \<Rightarrow> nat \<Rightarrow> bool) \<Rightarrow> nat list \<Rightarrow> bool" where
"containsUnacceptableEdge N [] = False" |
"containsUnacceptableEdge N (v#vs) =
(case vs of [] \<Rightarrow> False
| (w#ws) \<Rightarrow> if v < w \<and> N v w then True
else containsUnacceptableEdgeSnd N v vs)"
definition containsDuplicateEdge :: "graph \<Rightarrow> face \<Rightarrow> vertex \<Rightarrow> nat list \<Rightarrow> bool" where
"containsDuplicateEdge g f v is \<equiv>
containsUnacceptableEdge (\<lambda>i j. duplicateEdge g f (f\<^bsup>i\<^esup>\<bullet>v) (f\<^bsup>j\<^esup>\<bullet>v)) is"
definition containsDuplicateEdge' :: "graph \<Rightarrow> face \<Rightarrow> vertex \<Rightarrow> nat list \<Rightarrow> bool" where
"containsDuplicateEdge' g f v is \<equiv>
2 \<le> |is| \<and>
((\<exists>k < |is| - 2. let i0 = is!k; i1 = is!(k+1); i2 = is!(k+2) in
(duplicateEdge g f (f\<^bsup>i1 \<^esup>\<bullet>v) (f\<^bsup>i2 \<^esup>\<bullet>v)) \<and> (i0 < i1) \<and> (i1 < i2))
\<or> (let i0 = is!0; i1 = is!1 in
(duplicateEdge g f (f\<^bsup>i0 \<^esup>\<bullet>v) (f\<^bsup>i1 \<^esup>\<bullet>v)) \<and> (i0 < i1)))"
definition generatePolygon :: "nat \<Rightarrow> vertex \<Rightarrow> face \<Rightarrow> graph \<Rightarrow> graph list" where
"generatePolygon n v f g \<equiv>
let enumeration = enumerator n |vertices f|;
enumeration = [is \<leftarrow> enumeration. \<not> containsDuplicateEdge g f v is];
vertexLists = [indexToVertexList f v is. is \<leftarrow> enumeration] in
[subdivFace g f vs. vs \<leftarrow> vertexLists]"
definition next_plane0 :: "nat \<Rightarrow> graph \<Rightarrow> graph list" ("next'_plane0\<^bsub>_\<^esub>") where
"next_plane0\<^bsub>p\<^esub> g \<equiv>
if final g then []
else \<Squnion>\<^bsub>f\<in>nonFinals g\<^esub> \<Squnion>\<^bsub>v\<in>vertices f\<^esub> \<Squnion>\<^bsub>i\<in>[3..<Suc(maxGon p)]\<^esub> generatePolygon i v f g"
definition Seed :: "nat \<Rightarrow> graph" ("Seed\<^bsub>_\<^esub>") where
"Seed\<^bsub>p\<^esub> \<equiv> graph(maxGon p)"
lemma Seed_not_final[iff]: "\<not> final (Seed p)"
by(simp add:Seed_def graph_def finalGraph_def nonFinals_def)
definition PlaneGraphs0 :: "graph set" where
"PlaneGraphs0 \<equiv> \<Union>p. {g. Seed\<^bsub>p\<^esub> [next_plane0\<^bsub>p\<^esub>]\<rightarrow>* g \<and> final g}"
end
|
Set Warnings "-notation-overridden,-parsing".
From Coq Require Import Arith.Arith.
From Coq Require Import Arith.EqNat.
From Coq Require Import Init.Nat.
From Coq Require Import omega.Omega.
From Coq Require Import Lists.List.
Import ListNotations.
From PLF Require Import Maps.
From PLF Require Import Smallstep.
From PLF Require Import Types.
From PLF Require Import Imp.
Hint Constructors multi.
Module S.
Import Smallstep.
Definition stack := list nat.
Definition prog := list sinstr.
Inductive stack_step : state -> prog * stack -> prog * stack -> Prop :=
| SS_Push : forall st stk n p',
stack_step st (SPush n :: p', stk) (p', n :: stk)
| SS_Load : forall st stk i p',
stack_step st (SLoad i :: p', stk) (p', st i :: stk)
| SS_Plus : forall st stk n m p',
stack_step st (SPlus :: p', n::m::stk) (p', (m+n)::stk)
| SS_Minus : forall st stk n m p',
stack_step st (SMinus :: p', n::m::stk) (p', (m-n)::stk)
| SS_Mult : forall st stk n m p',
stack_step st (SMult :: p', n::m::stk) (p', (m*n)::stk).
Fixpoint s_compile (e : aexp) : list sinstr :=
match e with
| ANum v => [SPush v]
| AId i => [SLoad i]
| APlus a1 a2 => (s_compile a1) ++ (s_compile a2) ++ [SPlus]
| AMinus a1 a2 => (s_compile a1) ++ (s_compile a2) ++ [SMinus]
| AMult a1 a2 => (s_compile a1) ++ (s_compile a2) ++ [SMult]
end.
Theorem stack_step_deterministic : forall st,
deterministic (stack_step st).
Proof.
unfold deterministic.
intros st x y1 y2 H1 H2.
induction H1; inversion H2; reflexivity.
Qed.
Definition stack_multistep st := multi (stack_step st).
Definition compiler_is_correct_statement : Prop :=
forall (st : state) (e : aexp),
stack_multistep st (s_compile e, []) ([], [ aeval st e ]).
Theorem s_compile_aux : forall (e : aexp) (t: prog) (st : state) (stk1 : stack),
stack_multistep st (s_compile e ++ t, stk1) (t, aeval st e :: stk1).
Proof.
induction e.
- intros t st stk1. simpl. unfold stack_multistep. apply multi_R. constructor.
- intros t st stk1. simpl. unfold stack_multistep. apply multi_R. constructor.
- intros t st stk1. simpl. unfold stack_multistep.
unfold stack_multistep in IHe1. unfold stack_multistep in IHe2.
eapply multi_trans.
+ rewrite <- app_assoc. rewrite <- app_assoc. eapply IHe1.
+ eapply multi_trans.
* eapply IHe2.
* apply multi_R. constructor.
- (* same as previous case *)
intros t st stk1. simpl. unfold stack_multistep.
unfold stack_multistep in IHe1. unfold stack_multistep in IHe2.
eapply multi_trans.
+ rewrite <- app_assoc. rewrite <- app_assoc. eapply IHe1.
+ eapply multi_trans.
* eapply IHe2.
* apply multi_R. constructor.
- (* same as previous case *)
intros t st stk1. simpl. unfold stack_multistep.
unfold stack_multistep in IHe1. unfold stack_multistep in IHe2.
eapply multi_trans.
+ rewrite <- app_assoc. rewrite <- app_assoc. eapply IHe1.
+ eapply multi_trans.
* eapply IHe2.
* apply multi_R. constructor.
Qed.
Theorem compiler_is_correct : compiler_is_correct_statement.
Proof.
unfold compiler_is_correct_statement.
intros st e.
rewrite <- (app_nil_r (s_compile e)).
apply s_compile_aux.
Qed.
End S.
Module T.
Import Types.
Theorem progress : forall t T,
|- t \in T ->
value t \/ exists t', t --> t'.
Proof with auto.
intros t T HT.
induction HT...
(* The cases that were obviously values, like T_Tru and
T_Fls, were eliminated immediately by auto *)
- (* T_Test *)
right. inversion IHHT1; clear IHHT1.
+ (* t1 is a value *)
apply (bool_canonical t1 HT1) in H.
inversion H; subst; clear H.
exists t2...
exists t3...
+ (* t1 can take a step *)
inversion H as [t1' H1].
exists (test t1' t2 t3)...
- (* T_Succ *)
inversion IHHT.
+ left. apply (nat_canonical t1 HT) in H.
apply nv_scc in H. unfold value. right. assumption.
+ right. inversion H as [t1' H1]. exists (scc t1')...
- (* T_Prd *)
right. inversion IHHT; clear IHHT.
+ apply (nat_canonical t1 HT) in H.
inversion H; subst; clear H.
* exists zro...
* exists t...
+ inversion H. exists (prd x). apply ST_Prd. assumption.
- (* T_Iszro *)
right. inversion IHHT; clear IHHT.
+ apply (nat_canonical t1 HT) in H.
inversion H; subst; clear H.
* exists tru. apply ST_IszroZro.
* exists fls. apply ST_IszroScc. assumption.
+ inversion H. exists (iszro x). apply ST_Iszro. assumption.
Qed.
Theorem preservation : forall t t' T,
|- t \in T ->
t --> t' ->
|- t' \in T.
Proof with auto.
intros t t' T HT HE.
generalize dependent t'.
induction HT;
(* every case needs to introduce a couple of things *)
intros t' HE;
(* and we can deal with several impossible
cases all at once *)
try solve_by_invert.
- (* T_Test *) inversion HE; subst; clear HE.
+ (* ST_TESTTru *) assumption.
+ (* ST_TestFls *) assumption.
+ (* ST_Test *) apply T_Test; try assumption.
apply IHHT1; assumption.
- (* T_Scc *) inversion HE; subst; clear HE.
apply T_Scc. apply IHHT. assumption.
- (* T_Prd *) inversion HE; subst; clear HE.
+ apply HT.
+ inversion HT. assumption.
+ apply T_Prd. apply IHHT. assumption.
- (* T_Iszro *) inversion HE; subst; clear HE.
+ apply T_Tru.
+ apply T_Fls.
+ apply T_Iszro. apply IHHT. assumption.
Qed.
End T. |
SUBROUTINE WEIGED(BUF,LBUF)
!
! PROCESS WEIGHT (TRACE WEIGHTING, KILLING, REVERSING)
! ------- ------
!
! DOCUMENT DATE: 7 March 1991
!
! PROCESS WEIGHT WEIGHTS INDIVIDUAL TRACES BY MULTIPLYING THE ENTIRE TRACE
! BY A CONSTANT (SCALAR MULTIPLY).
! A WEIGHT OF 1. RESULTS IN NO AMPLITUDE CHANGE ON THE TRACE.
! A WEIGHT OF 0. RESULTS IN THE TRACE BEING KILLED.
! A WEIGHT OF -1. RESULTS IN THE TRACE BEING REVERSED IN POLARITY.
! ALL TRACES HAVE A DEFAULT WEIGHT OF 1. THIS MEANS THAT ONLY THOSE TRACES
! SPECIFIED BY THE USER WILL BE WEIGHTED. WEIGHTS ARE NOT SPACIALLY VARIED,
! THUS ONLY THOSE SHOTS ACTUALLY SPECIFIED WILL BE WEIGHTED.
! EACH PARAMETER LIST MUST BE TERMINATED WITH THE WORD END. THE ENTIRE SET
! OF WEIGHT PARAMETERS MUST BE TERMINATED BY THE WORD END.
!
! THE PARAMETER DICTIONARY
! --- --------- ----------
! FNO - THE FIRST SHOT (OR RP) TO APPLY THE WEIGHTS TO. SHOT (RP) NUMBERS
! MUST INCREASE MONOTONICALLY.
! PRESET=1
! LNO - THE LAST SHOT (RP) NUMBER TO APPLY THE WEIGHTS TO. LNO MUST BE
! LARGER THAN FNO IN EACH LIST AND MUST INCREASE LIST TO LIST.
! PRESET = 999999 E.G. LNO 1
! XWP - RANGE-WEIGHT-PAIRS. A LIST OF RANGE AND WEIGHT PAIRS.
! XWP MUST BE GIVEN WITH INCREASING RANGES. THE PROGRAM
! COMPUTES THE ABSOLUTE VALUE OF BOTH USER RANGES AND DATA RANGES.
! E.G. XWP 1000 3.0 2000 0. - TRACES WITH RANGES EXACTLY EQUAL TO
! 1000. WILL BE MULTIPLIED BY 3., AND TRACES WITH A RANGE OF EXACTLY
! 2000. WILL BE MULTIPLIED 0. (OR KILLED). ALL OTHER TRACES WILL NOT
! BE MULTIPLIED.
! DEFAULT=ALL 1.
! TWP - TRACE NUMBER-WEIGHT-PAIRS. A LIST OF TRACE NUMBERS (OF A SHOT OR RP)
! AND WEIGHTS (LISTED IN PAIRS). ONLY THOSE TRACES SPECIFIED WILL BE
! MULTIPLIED. TRACE NUMBERS MUST INCREASE WITHIN EACH LIST.
! E.G. TWP 4 -1. 20 0. INDICATES THAT TRACE 4 WILL BE INVERTED IN
! POLARITY AND TRACE 20 WILL BE KILLED.
! DEFAULT=ALL 1.
! WEIGHT - The multiplier for all traces of the specified shot/rp (FNO).
! The DEFAULT value is 1., which means that the weight is
! applied to shot FNO to LNO, then reset to 1. (the default value).
! DEFAULT 1. e.g. fno 1234 weight 0 end
! W - An abbreviation for WEIGHT. Equivalent to WEIGHT.
! END - TERMINATES EACH PARAMETER LIST.
!
! NOTE *****
! 1) EITHER XWP OR TWP MUST BE GIVEN.
! 2) IN ORDER TO APPLY THE SAME SET OF WEIGHTS TO ALL SHOTS (RPS), LNO
! MUST BE SET TO A VERY LARGE NUMBER. E.G. LNO 32767
! 3) A maximum of 100 TWP or XWP pairs may be given.
!
! EXAMPLE:
! PROCESS WEIGHT
! LNO 32767 XWP 1228 -1 1408 0 END
! END
! WILL REVERSE THE POLARITY OF ALL TRACES HAVING A RANGE OF 1228, AND WILL
! KILL ALL TRACES WITH A RANGE OF 1408, ON SHOTS (RPS) 1 THROUGH 32767.
!
!
! COPYRIGHTED (C) BY:
! PAUL HENKART, SCRIPPS INSTITUTION OF OCEANOGRAPHY, NOVEMBER 1980
! ALL RIGHTS RESERVED.
!
! THE PARAMETER LIST PASSED TO WEIGEX ON THE DISC LOOKS LIKE:
! WORD 1) FNO (32 BIT INTEGER)
! 2) LNO (32 BIT INTEGER)
! 3) ADDWB (32 BIT INTEGER)
! 4) NS (32 BIT INTEGER) - THE NUMBER OF WORDS IN XWP OR TWP
! 5) LTYPE (32 BIT INTEGER) - 'XWP ' OR 'TWP '
! 6) LPRINT (32 BIT INTEGER)
! 7) record weight (REAL)
! 9) - MAXXWP+NPARS) - XWP OR TWP ARRAY
!
! ARGUMENTS:
! BUF - A SCRATCH ARRAY AT LEAST 60 32 BIT WORDS LONG.
! LBUF - THE SAME ARRAY BUT THE 32 BIT INTEGER EQUIVALENT. NEEDED
! BECAUSE PRIME FORTRAN DOESN'T ALLOW EQUIVALENCING OF ARGUMENTS.
!
! mod 21 Feb 96 - The parameter "W" didn't work.
! mod 5 Nov 98 - Clarify a couple of TWP error messages
! mod 23 Sep 99 - Add IHDR, LHDR, HDR
! mod 29 Sep 02 - set nweigs = 0 when a parameter name is read because
! twp 1 twp 2 twp 3 twp 4 passed edit. ag geez.
! mod 11 Feb 05 - Allow ihdr/hdr/lhdr to be given with no other weights.
! mod 29 Jun 06 - Add type and type sdev
!
PARAMETER (NPARS=13) ! THE NUMBER OF USER PARAMETERS
PARAMETER (MULTIV=12) ! POINT TO THE FIRST MULTI-VALUED PARAMETER
PARAMETER (MAXXWP=200) ! THE MAXIMUM NUMBER OF TWPS OR XWPS THAT WEIGEX CAN HANDLE
PARAMETER (NWRDS=MAXXWP+NPARS) ! THE NUMBER OF WORDS IN EVERY PARAMETER LIST ON DISC
EQUIVALENCE (VALS(1),LVALS(1))
CHARACTER*7 NAMES(NPARS)
CHARACTER*1 TYPES(NPARS)
DIMENSION LENGTH(NPARS)
CHARACTER*80 TOKEN
DIMENSION VALS(NPARS),LVALS(NPARS)
CHARACTER*4 CVALS(NPARS)
CHARACTER*3 ADDWB
COMMON /EDITS/ IERROR,IWARN,IRUN,NOW,ICOMPT
COMMON /WEIGR/ MUNIT,NLISTS,npar,nwrd
DIMENSION BUF(111),LBUF(111)
INTEGER FNO, hdr, type
EQUIVALENCE (w,weight)
!
!
EQUIVALENCE (FNO,LVALS(1)),
2 (LNO,LVALS(2)),
3 (ADDWB,CVALS(3)),
4 (LPRINT,LVALS(4)),
5 (weight,vals(5)),
6 (w,vals(5)),
7 (ihdr,lvals(7)),
8 (lhdr, lvals(8)),
9 (hdr, lvals(9)),
& (inverse, lvals(10)),
1 (type, lvals(11)),
6 (XWP,VALS(11)),
7 (TWP,VALS(12))
DATA NAMES/'FNO ', 'LNO ', 'ADDWB ', 'LPRINT', 'WEIGHT',
* 'W ', 'IHDR ', 'LHDR ', 'HDR ', 'INVERSE',
& 'TYPE ',
& 'XWP ', 'TWP '/
DATA LENGTH/3,3,5,6,6,1,4,4,3,7,4,3,3/
DATA TYPES/'L','L','A','L',2*'F',3*'L',2*'A',2*'F'/
!****
!**** SET THE PRESETS
!****
npar = npars
nwrd = nwrds
FNO=1
LNO=999999
IADDWB=0
LLNO = -1
LTYPE=0
NLISTS=0
NS=0
nweigs = 0
LPRINT=0
weight = 1.
ihdr = 0
lhdr = 0
hdr = 0
inverse = 0
type = 0
!****
!0**** GET A PARAMETER FILE
!****
CALL GETFIL(1,MUNIT,token,ISTAT)
!****
!**** THE CURRENT COMMAND LINE IN THE SYSTEM BUFFER MAY HAVE THE PARAMETERS.
!**** GET A PARAMETER LIST FROM THE USER.
!****
NTOKES=1
100 CONTINUE
CALL GETOKE(TOKEN,NCHARS) ! GET A TOKEN FROM THE USER PARAMETER LINE
CALL UPCASE(TOKEN,NCHARS) ! CONVERT THE TOKEN TO UPPERCASE
IF(NCHARS.GT.0) GO TO 150
IF(NOW.EQ.1) PRINT 140
140 FORMAT(' < ENTER PARAMETERS >')
CALL RDLINE ! GET ANOTHER USER PARAMETER LINE
NTOKES=0
GO TO 100
150 CONTINUE
NTOKES=NTOKES+1
DO 190 I=1,NPARS ! SEE IF IT IS A PARAMETER NAME
LEN=LENGTH(I) ! GET THE LEGAL PARAMETER NAME LENGTH
IPARAM=I ! SAVE THE INDEX
IF(TOKEN(1:NCHARS).EQ.NAMES(I)(1:LEN).AND.NCHARS.EQ.LEN) GO TO 200
190 CONTINUE ! STILL LOOKING FOR THE NAME
IF(TOKEN(1:NCHARS).EQ.'END'.AND.NCHARS.EQ.3) GO TO 1000 ! END OF PARAM LIST?
IF(NS.NE.0) GO TO 230
PRINT 191, TOKEN(1:NCHARS)
191 FORMAT(' *** ERROR *** WEIGHT DOES NOT HAVE A PARAMETER ',
* 'NAMED ',A10)
IERROR=IERROR+1
GO TO 100
!****
!**** FOUND THE PARAMETER NAME, NOW FIND THE VALUE
!****
200 CONTINUE
ns = 0
NPARAM=IPARAM
210 CONTINUE ! NOW FIND THE VALUE
CALL GETOKE(TOKEN,NCHARS)
CALL UPCASE(TOKEN,NCHARS)
NTOKES=NTOKES+1
IF(NCHARS.GT.0) GO TO 230 ! END OF LINE?
IF(NOW.EQ.1) PRINT 140 ! THIS ALLOWS A PARAMETER TO BE ON A DIFFERENT LINE FROM THE NAME
CALL RDLINE ! GET ANOTHER LINE
NTOKES=0
GO TO 210
230 CONTINUE
IF( TYPES(NPARAM) .EQ. 'A' ) THEN
IF(NAMES(NPARAM).EQ.'ADDWB'.AND.TOKEN(1:NCHARS).EQ.'YES')
* IADDWB=1
IF(NAMES(NPARAM).EQ.'INVERSE'.AND.TOKEN(1:NCHARS).EQ.'YES')
* INVERSE=1
IF( names(nparam) .EQ. 'TYPE' .AND.token(1:nchars).EQ.'SDEV')
& type = 1
IF(names(nparam).EQ.'TYPE'.AND.token(1:nchars).NE.'SDEV')THEN
PRINT *,' *** ERROR *** ILLEGAL TYPE.'
ierror = ierror
ENDIF
GO TO 100
ENDIF
CALL DCODE(TOKEN,NCHARS,AREAL,ISTAT) ! TRY AND DECODE IT
IF(ISTAT.EQ.2) GO TO 420 ! =2 MEANS IT IS A NUMERIC
IERROR=IERROR+1 ! DCODE PRINTED AN ERROR
GO TO 100
420 IF(TYPES(NPARAM).EQ.'L') GO TO 500
IF( names(nparam) .EQ. 'W' ) nparam = nparam - 1
IF(NPARAM.LT.MULTIV) GO TO 490 ! IS IT A MULTIVALUED PARAMETER
NS=NS+1 ! THE TOKEN WAS A MULTI-VALUED PARAMETER
nweigs = ns
BUF(NS+NPARS)=AREAL
IF( names(NPARAM) .EQ. 'XWP') LTYPE=1
IF( names(NPARAM) .EQ. 'TWP') LTYPE=2
GO TO 100
490 VALS(NPARAM)=AREAL ! FLOATING POINT VALUES
GO TO 100
500 CONTINUE ! 32 BIT INTEGER VALUES
LVALS(NPARAM)=AREAL
GO TO 100
!****
!**** FINISHED A LIST, NOW DO THE ERROR AND VALIDITY CHECKS
!****
1000 CONTINUE ! MAKE SURE ALL SHOT & RP NUMBERS INCREASE
IF(FNO.GT.LLNO) GO TO 1020 ! IS FNO LARGER THAN THE LAST LNO
PRINT 1010
1010 FORMAT(' *** ERROR *** SHOT AND RP NUMBERS MUST INCREASE.')
IERROR=IERROR+1
1020 IF(LNO.GE.FNO) GO TO 1030 ! DO THEY INCREASE IN THIS LIST
PRINT 1010
IERROR=IERROR+1
1030 LLNO=LNO
IF( LTYPE .EQ. 0 .AND. weight .EQ. 1. .AND.
& ihdr+lhdr+hdr .EQ. 0 .AND. type .EQ. 0 ) THEN
PRINT *,' *** WARNING *** No weights given.'
iwarn = iwarn + 1
ENDIF
IF(LTYPE.EQ.2) GO TO 1200
DO I=1,nweigs,2
1130 BUF(NPARS+I)=ABS(BUF(NPARS+I)) ! USE THE ABS VALUE OF THE RANGES
ENDDO
IF(nweigs.LE.2) GO TO 1300
DO 1150 I=3,nweigs,2
IF(BUF(NPARS+I).GT.BUF(NPARS+I-2)) GO TO 1150
PRINT 1140
1140 FORMAT(' *** ERROR *** THE RANGES OF XWP MUST INCREASE.')
IERROR=IERROR+1
1150 CONTINUE
GO TO 1300
1200 CONTINUE ! CHECK THE TWP PARAMETER
DO i=1,nweigs,2
IF( buf(npars+i) .LT. 0 ) THEN
PRINT *,' *** ERROR *** Illegal TWP trace number ',
& buf(npars+i)
ierror = ierror + 1
ENDIF
ENDDO
IF(nweigs.LE.2) GO TO 1300
DO I=3,nweigs,2
IF( buf(npars+i) .LE. buf(npars+i-2) ) THEN
PRINT *,' *** ERROR *** TWP trace numbers must increase',
& buf(npars+i-2), buf(npars+i)
ierror = ierror + 1
ENDIF
ENDDO
1230 CONTINUE
1300 CONTINUE ! MAKE CHECKS COMMON TO BOTH XWP AND TWP
IF(MOD(nweigs,2).EQ.0) GO TO 1320
IF( ltype .EQ. 1 ) THEN
token = 'XWP'
ELSE
token = 'TWP'
ENDIF
PRINT 1310,token
1310 FORMAT(' *** ERROR *** ',A4,' MUST BE IN PAIRS.')
IERROR=IERROR+1
1320 CONTINUE ! MAKE SURE THE TIMES ARE OK
!****
!**** WRITE THE PARAMETER LIST TO DISC
!****
IF(nweigs.LE.MAXXWP) GO TO 1360
ITEMP=MAXXWP / 2
PRINT 1350,ITEMP
1350 FORMAT(' *** ERROR *** WEIGEX CAN HANDLE ONLY ',I3,' WEIGHTS')
IERROR=IERROR+1
1360 CONTINUE
LBUF(1)=FNO
LBUF(2)=LNO
LBUF(3)=IADDWB
LBUF(4)=nweigs
LBUF(5)=LTYPE
LBUF(6)=LPRINT
buf(7) = weight
lbuf(8) = ihdr
lbuf(9) = lhdr
lbuf(10) = hdr
lbuf(11) = inverse
lbuf(12) = type
ITEMP=NPARS+1
ITEMP1=NPARS+nweigs
IF( IAND(LPRINT,1) .EQ. 1 ) THEN
PRINT *, (LBUF(I),I=1,6),buf(7)
PRINT *, (lbuf(i),i=8,11)
PRINT *, (BUF(J),J=ITEMP,ITEMP1)
ENDIF
CALL WRDISC(MUNIT,BUF,NWRDS)
NLISTS=NLISTS+1
NS=0
nweigs = 0
LLNO=LNO
LNO=32768 ! DEFAULT THE DEFAULTS
weight = 1.
2020 CALL GETOKE(TOKEN,NCHARS) ! GET THE NEXT TOKEN
CALL UPCASE(TOKEN,NCHARS)
NTOKES=NTOKES+1
IF(NCHARS.GT.0) GO TO 2030 ! WAS IT THE END OF A LINE?
IF(NOW.EQ.1) PRINT 140
CALL RDLINE ! GET ANOTHER LINE
NTOKES=0
GO TO 2020
2030 IF(TOKEN(1:NCHARS).NE.'END'.OR.NCHARS.NE.3) GO TO 150
RETURN ! FINISHED ALL OF THE PARAMETERS!!!
END
|
(*
Author: René Thiemann
Akihisa Yamada
License: BSD
*)
section \<open>Complexity Carrier\<close>
text \<open>We define which properties a carrier of matrices must exhibit, so that it
can be used for checking complexity proofs.\<close>
theory Complexity_Carrier
imports
"Abstract-Rewriting.SN_Order_Carrier"
Ring_Hom_Matrix
Derivation_Bound
HOL.Real
begin
class large_real_ordered_semiring_1 = large_ordered_semiring_1 + real_embedding
instance real :: large_real_ordered_semiring_1 ..
instance int :: large_real_ordered_semiring_1 ..
instance rat :: large_real_ordered_semiring_1 ..
text \<open>For complexity analysis, we need a bounding function which tells us how often
one can strictly decrease a value. To this end, $\delta$-orderings are usually applied
when working with the reals or rational numbers.\<close>
locale complexity_one_mono_ordered_semiring_1 = one_mono_ordered_semiring_1 default gt
for gt :: "'a :: large_ordered_semiring_1 \<Rightarrow> 'a \<Rightarrow> bool" (infix "\<succ>" 50) and default :: 'a +
fixes bound :: "'a \<Rightarrow> nat"
assumes bound_mono: "\<And> a b. a \<ge> b \<Longrightarrow> bound a \<ge> bound b"
and bound_plus: "\<And> a b. bound (a + b) \<le> bound a + bound b"
and bound_plus_of_nat: "\<And> a n. a \<ge> 0 \<Longrightarrow> bound (a + of_nat n) = bound a + bound (of_nat n)"
and bound_zero[simp]: "bound 0 = 0"
and bound_one: "bound 1 \<ge> 1"
and bound: "\<And> a. deriv_bound {(a,b). b \<ge> 0 \<and> a \<succ> b} a (bound a)"
begin
lemma bound_linear: "\<exists> c. \<forall> n. bound (of_nat n) \<le> c * n"
proof (rule exI[of _ "bound 1"], intro allI)
fix n
show "bound (of_nat n) \<le> bound 1 * n"
proof (induct n)
case (Suc n)
have "bound (of_nat (Suc n)) = bound (1 + of_nat n)" by simp
also have "... \<le> bound 1 + bound (of_nat n)"
by (rule bound_plus)
also have "... \<le> bound 1 + bound 1 * n"
using Suc by auto
finally show ?case by auto
qed simp
qed
lemma bound_of_nat_times: "bound (of_nat n * v) \<le> n * bound v"
proof (induct n)
case (Suc n)
have "bound (of_nat (Suc n) * v) = bound (v + of_nat n * v)" by (simp add: field_simps)
also have "\<dots> \<le> bound v + bound (of_nat n * v)" by (rule bound_plus)
also have "\<dots> \<le> bound v + n * bound v" using Suc by auto
finally show ?case by simp
qed simp
lemma bound_mult_of_nat: "bound (a * of_nat n) \<le> bound a * bound (of_nat n)"
proof (induct n)
case (Suc n)
have "bound (a * of_nat (Suc n)) = bound (a + a * of_nat n)" by (simp add: field_simps)
also have "... \<le> bound a + bound (a * of_nat n)"
by (rule bound_plus)
also have "... \<le> bound a + bound a * bound (of_nat n)" using Suc by auto
also have "... = bound a * (1 + bound (of_nat n))" by (simp add: field_simps)
also have "... \<le> bound a * (bound (1 + of_nat n))"
proof (rule mult_le_mono2)
show "1 + bound(of_nat n) \<le> bound (1 + of_nat n)" using bound_one
using bound_plus
unfolding bound_plus_of_nat[OF one_ge_zero] by simp
qed
finally show ?case by simp
qed simp
lemma bound_pow_of_nat: "bound (a * of_nat n ^ deg) \<le> bound a * of_nat n ^ deg"
proof (induct deg)
case (Suc deg)
have "bound (a * of_nat n ^ Suc deg) = bound (of_nat n * (a * of_nat n ^ deg))"
by (simp add: field_simps)
also have "\<dots> \<le> n * bound (a * of_nat n ^ deg)"
by (rule bound_of_nat_times)
also have "\<dots> \<le> n * (bound a * of_nat n ^ deg)"
using Suc by auto
finally show ?case by (simp add: field_simps)
qed simp
end
end
|
# Matplotlib
[Matplotlib](http://matplotlib.org) is a Python package for 2D plotting and the `matplotlib.pyplot` sub-module contains many plotting functions to create various kinds of plots. Let's get started by importing `matplotlib.pyplot` and using `%matplotlib` [Jupyter magic](https://ipython.readthedocs.io/en/stable/interactive/magics.html#magic-matplotlib) to display plots in the notebook.
```python
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
```
## Basic Plotting
### Procedure
The general procedure to create a 2D line plot is:
1. Create a sequence of $x$ values.
2. Create a sequence of $y$ values.
3. Enter `plt.plot(x,y,[fmt],**kwargs)` where `[fmt]` is a (optional) format string and `**kwargs` are (optional) keyword arguments specifying line properties of the plot.
4. Use `pyplot` functions to add features to the figure such as a title, legend, grid lines, etc.
5. Enter `plt.show()` to display the resulting figure.
Let's begin with a basic example with a few random points:
```python
x = [-5,-2,0,1,3]
y = [2,-1,1,-4,3]
plt.plot(x,y)
plt.show()
```
The main things to notice are:
1. The sequences `x` and `y` define the coordinates of the points in the plot.
2. The line in the plot is constructed by connecting the points by straight lines.
The second observation implies that if we want to plot a smooth curve then we need to plot lots of points otherwise the plot will not be smooth. For example, we could try plotting the parabola $y = x^2$ for $x \in [-2,2]$ using only 5 points:
```python
x = [-2,-1,0,1,2]
y = [4,1,0,1,4]
plt.plot(x,y)
plt.show()
```
This is too few points to plot a smooth curve such as $y = x^2$ and so we need more points! Let's try again using the NumPy function `np.linspace` to create 100 points!
```python
x = np.linspace(-2,2,100)
y = x**2
plt.plot(x,y)
plt.show()
```
That's a better representation of the parabola $y = x^2$. Note that the number of points we use in a line plot (100 in this case) is completely arbitrary but the goal is to show a smooth graph for a smooth curve and so we just need to pick a big enough number depending on the function. But be careful not to generate too many points since a *very* large number of points will take a *long* time to plot!
Now that we have the general idea, let's look at adding style and features to our plots!
### Line Properties
A line appearing in a plot has several properties including color, transparency, style, width and markers. We can set these properties when we call `plt.plot` using the following keyword arguments:
| Property | Description |
| :---: | :--- |
| `alpha` | transparency (0.0 transparent through 1.0 opaque) |
| `color` (or `c`) | any matplotlib color |
| `label` | text appearing in legend |
| `linestyle` (or `ls`) | `solid`, `dashed`, `dashdot`, `dotted` |
| `linewidth` (or `lw`) | set width of the line |
| `marker` | set marker style |
| `markeredgecolor` (or `mec`) | any matplotlib color |
| `markerfacecolor` (or `mfc`) | any matplotlib color |
| `markersize` (or `ms`) | size of the marker |
Note that we can specify a [matplotlib color](https://matplotlib.org/api/colors_api.html) in several different ways including by name such as `blue` or `red`, or by a [RGB](https://www.w3schools.com/colors/colors_rgb.asp) tuple such as `(1,0,1)` for purple. For example, let's plot the function
$$
y = e^{-x^2}\cos(2 \pi x) \ \ , \ \ x \in [-2,2]
$$
```python
x = np.linspace(-2,2,41)
y = np.exp(-x**2) * np.cos(2*np.pi*x)
plt.plot(x,y,alpha=0.4,label='Decaying Cosine',
color='red',linestyle='dashed',linewidth=2,
marker='o',markersize=5,markerfacecolor='blue',
markeredgecolor='blue')
plt.ylim([-2,2])
plt.legend()
plt.show()
```
Notice that we used the pyplot function `plt.legend` to display the figure with a legend (showing the line label) and and `plt.ylim` to set the limits on the vertical axis to `[-2,2]`.
### Format Strings
A [format string](https://matplotlib.org/api/_as_gen/matplotlib.pyplot.plot.html#matplotlib-pyplot-plot) gives us a shortcut to add color, markers and line style to a line plot. For example, if we want to plot the function
$$
y = \frac{1}{1 + x^2} \ , \ x \in [-5,5]
$$
with a dashed black line and square markers, we could use keyword arguments:
```python
x = np.linspace(-5,5,41)
y = 1/(1 + x**2)
plt.plot(x,y,color='black',linestyle='dashed',marker='s')
plt.show()
```
Or we could use the corresponding format string `'ks--'` where `k` denotes a black line, `s` a square marker and `--` a dashed line:
```python
x = np.linspace(-5,5,41)
y = 1/(1 + x**2)
plt.plot(x,y,'ks--')
plt.show()
```
Much easier! See below for a list of colors, markers and linestyles.
#### Colors
| Character | Color |
| :---: | :---: |
| `b` | blue |
| `g` | green |
| `r` | red |
| `c` | cyan |
| `m` | magenta |
| `y` | yellow |
| `k` | black |
| `w` | white |
#### Markers
| Character | Marker |
| :---: | :---: |
| `.` | point |
| `o` | circle |
| `v` | triangle down |
| `^` | triangle up |
| `s` | square |
| `p` | pentagon |
| `*` | star |
| `+` | plus |
| `x` | x |
| `D` | diamond |
#### Line Styles
| Character | Line Style |
| :---: | :---: |
| `-` | solid line style |
| `--` | dashed line style |
| `-.` | dash-dot line style |
| `:` | dotted line style |
See the [matplotlib.pyplot.plot documentation](https://matplotlib.org/api/_as_gen/matplotlib.pyplot.plot.html) for more options.
### Pyplot Functions
There are many `pyplot` functions available for us to customize our figures. For example:
| Fucntion | Description |
| ---: | :--- |
| `plt.xlim` | set $x$ limits |
| `plt.ylim` | set $y$ limits |
| `plt.grid` | add grid lines |
| `plt.title` | add a title |
| `plt.xlabel` | add label to the horizontal axis |
| `plt.ylabel` | add label to the vertical axis |
| `plt.axis` | set axis properties (`equal`, `off`, `scaled`, etc.) |
| `plt.xticks` | set tick locations on the horizontal axis |
| `plt.yticks` | set tick locations on the vertical axis |
| `plt.legend` | display legend for several lines in the same figure |
| `plt.savefig` | save figure (as .png, .pdf, etc.) to working directory |
| `plt.figure` | create a new figure and set its properties |
See the [pyplot documentation](https://matplotlib.org/api/pyplot_summary.html) for a full list of functions.
## Examples
### Taylor Polynomials
Plot the function $y = \cos(x)$ along with its [Taylor polynomials](https://en.wikipedia.org/wiki/Taylor_series) of degrees 2 and 4.
```python
x = np.linspace(-6,6,50)
# Plot y = cos(x)
y = np.cos(x)
plt.plot(x,y,'b',label='cos(x)')
# Plot degree 2 Taylor polynomial
y2 = 1 - x**2/2
plt.plot(x,y2,'r-.',label='Degree 2')
# Plot degree 4 Taylor polynomial
y4 = 1 - x**2/2 + x**4/24
plt.plot(x,y4,'g:',label='Degree 4')
# Add features to our figure
plt.legend()
plt.grid(True,linestyle=':')
plt.xlim([-6,6])
plt.ylim([-4,4])
plt.title('Taylor Polynomials of cos(x) at x=0')
plt.xlabel('x')
plt.ylabel('y')
plt.show()
```
### Heart Curve
Plot the heart curve:
\begin{align}
x &= 16 \sin^3(t) \\\
y &= 13 \cos(t) - 5 \cos(2t) - 2 \cos(3t) - \cos(4t)
\end{align}
for $t \in [0,2\pi]$.
```python
t = np.linspace(0,2*np.pi,100)
x = 16*np.sin(t)**3
y = 13*np.cos(t) - 5*np.cos(2*t) - 2*np.cos(3*t) - np.cos(4*t)
# Plot line with RGB tuple (red=1, green=0.2, blue=0.5)
# and 20pt line width
plt.plot(x,y,c=(1,0.2,0.5),lw=20)
# Add features to our figure
plt.title('Heart!')
plt.axis('equal')
plt.axis('off')
plt.show()
```
## Subplots
The `plt.subplot` function takes at least 3 inputs $n$, $m$ and $i$ and creates a figure with a $n$ by $m$ grid of subplots and then sets the $i$th subplot (counting across the rows) as the current plot (ie. current axes object).
For example, consider the [sawtooth wave](https://en.wikipedia.org/wiki/Sawtooth_wave)
$$
f(t) = \frac{1}{2} - \frac{1}{\pi} \sum_{k=1}^{\infty} (-1)^k \frac{\sin(2 \pi k t)}{k}
$$
and let $f_N(t)$ denote the $N$th partial sum of the sawtooth wave:
$$
f_N(t) = \frac{1}{2} - \frac{1}{\pi} \sum_{k=1}^{N} (-1)^k \frac{\sin(2 \pi k t)}{k}
$$
Create a 2 by 2 grid of subplots to plot the first 4 partial sums:
\begin{align}
f_1(t) &= \frac{1}{2} + \frac{\sin(2 \pi t)}{\pi} \\\
f_2(t) &= \frac{1}{2} + \frac{\sin(2 \pi t)}{\pi} - \frac{\sin(4 \pi t)}{2\pi} \\\
f_3(t) &= \frac{1}{2} + \frac{\sin(2 \pi t)}{\pi} - \frac{\sin(4 \pi t)}{2\pi} + \frac{\sin(6 \pi t)}{3\pi} \\\
f_4(t) &= \frac{1}{2} + \frac{\sin(2 \pi t)}{\pi} - \frac{\sin(4 \pi t)}{2\pi} + \frac{\sin(6 \pi t)}{3\pi} - \frac{\sin(8 \pi t)}{4\pi}
\end{align}
```python
t = np.linspace(0,4,200)
fN = 1/2
for N in [1,2,3,4]:
fN = fN - (-1)**N * np.sin(2*N*np.pi*t)/(N*np.pi)
plt.subplot(2,2,N)
plt.plot(t,fN)
plt.title('N = {}'.format(N))
plt.tight_layout()
plt.show()
```
See the [documentation](https://matplotlib.org/api/_as_gen/matplotlib.pyplot.subplot.html) for more about subplots.
## Beyond Line Plots
### Scatter plots
A [scatter plot](https://matplotlib.org/api/_as_gen/matplotlib.pyplot.scatter.html) has 4 dimensions: $x$ coordinate, $y$ coordinate, size and color. Let's make a random scatter plot:
```python
# Set the number of dots in the plot
N = 2000
# Create random x and y coordinates sampled uniformly from [0,1]
x = np.random.rand(N)
y = np.random.rand(N)
# Create random array sampled uniformly from [20,120]
# `size` array is used below to set the size of each dot
size = 100*np.random.rand(N) + 20
# Create random 4-tuples sampled uniformly from [0,1]
# `colors` array is used below to set the color
# (red,green,blue,alpha) of each dot
colors = np.random.rand(N,4)
# Create a figure of size 12 by 5 and create scatter plot
plt.figure(figsize=(12,5))
plt.scatter(x,y,c=colors,s=size)
plt.axis('off')
plt.show()
```
### Histograms
Generate an array of 10000 random numbers sampled from the normal distribution and create a [histogram](https://matplotlib.org/api/_as_gen/matplotlib.pyplot.hist.html). Let's also superimpose the normal distribution:
$$
y = \frac{1}{\sqrt{2\pi}} e^{-x^2/2}
$$
```python
samples = np.random.randn(10000)
plt.hist(samples,bins=20,density=True,alpha=0.5,color=(0.3,0.8,0.1))
plt.title('Random Samples - Normal Distribution')
plt.ylabel('Frequency')
x = np.linspace(-4,4,100)
y = 1/(2*np.pi)**0.5 * np.exp(-x**2/2)
plt.plot(x,y,'b',alpha=0.8)
plt.show()
```
### Bar plots
Plot the [total precipitation in Vancouver](https://vancouver.weatherstats.ca/charts/precipitation-monthly.html) by month as a [bar plot](https://matplotlib.org/api/_as_gen/matplotlib.pyplot.bar.html):
```python
month = range(1,13)
precipitation = [98.8,128.8,206.0,138.5,102.2,46.4,1.8,5.0,29.4,114.8,197.0,170.6]
plt.bar(month,precipitation)
plt.xticks(month)
plt.yticks(range(0,300,50))
plt.grid(True,alpha=0.5,linestyle='--')
plt.title('Precipitation in Vancouver, 2017')
plt.ylabel('Total Precipitation (mm)')
plt.xlabel('Month')
plt.show()
```
|
(* Definition and basic facts about distributive lattices. *)
Structure Lattice :={
lt_carrier :> Type;
lt_and : lt_carrier -> lt_carrier -> lt_carrier;
lt_or : lt_carrier -> lt_carrier -> lt_carrier;
lt_zero : lt_carrier;
lt_one : lt_carrier;
lt_and_commute : forall x y, lt_and x y = lt_and y x;
lt_or_commute : forall x y, lt_or x y = lt_or y x;
lt_and_associate : forall x y z, lt_and x (lt_and y z) = lt_and (lt_and x y) z;
lt_or_associate : forall x y z, lt_or x (lt_or y z) = lt_or (lt_or x y) z;
lt_or_absorb : forall x y, lt_or x (lt_and x y) = x;
lt_and_absorb : forall x y, lt_and x (lt_or x y) = x;
lt_zero_identity_r : forall x, lt_or x lt_zero = x;
lt_one_identity_r : forall x, lt_and x lt_one = x;
lt_distribute_or : forall x y z, lt_or x (lt_and y z) = lt_and (lt_or x y) (lt_or x z);
lt_distribute_and : forall x y z, lt_and x (lt_or y z) = lt_or (lt_and x y) (lt_and x z)
}.
Hint Resolve lt_and_commute : lt_hints.
Hint Resolve lt_or_commute : lt_hints.
Hint Resolve lt_and_associate : lt_hints.
Hint Resolve lt_or_associate : lt_hints.
Hint Resolve lt_or_absorb : lt_hints.
Hint Resolve lt_and_absorb : lt_hints.
Hint Resolve lt_zero_identity_r : lt_hints.
Hint Resolve lt_one_identity_r : lt_hints.
Hint Resolve lt_distribute_or : lt_hints.
Hint Resolve lt_distribute_and : lt_hints.
Notation "p && q" := (lt_and _ p q) (at level 40, left associativity).
Notation "p || q" := (lt_or _ p q) (at level 50, left associativity).
Notation "1" := (lt_one _).
Notation "0" := (lt_zero _).
Structure LatticeHom (A B : Lattice) :=
{
lt_hom :> A -> B ;
lt_hom_and : forall x y , lt_hom (x && y) = lt_hom x && lt_hom y;
lt_hom_or : forall x y , lt_hom (x || y) = lt_hom x || lt_hom y;
lt_hom_zero : lt_hom 0 = 0;
lt_hom_one : lt_hom 1 = 1
}.
Definition lt_id {A : Lattice}: LatticeHom A A.
Proof.
refine {| lt_hom := fun x => x |} ; reflexivity.
Defined.
Definition lt_comp {A B C:Lattice}: LatticeHom B C -> LatticeHom A B -> LatticeHom A C.
Proof.
intros g f.
refine {| lt_hom := fun x => g (f x) |}.
- intros. rewrite lt_hom_and. rewrite lt_hom_and. reflexivity.
- intros; repeat (rewrite lt_hom_or). reflexivity.
- intros. repeat (rewrite lt_hom_zero). reflexivity.
- intros. repeat (rewrite lt_hom_one). reflexivity.
Defined.
Notation "g 'o' f" := (lt_comp g f) (at level 65, left associativity).
Lemma lt_id_left (A B : Lattice) (f : LatticeHom A B) (x: A): (lt_id o f) x = f x.
Proof.
reflexivity.
Qed.
Lemma comp_assoc
(A B C D : Lattice)
(f : LatticeHom A B)
(g : LatticeHom B C) (h : LatticeHom C D) (x : A) :
(h o (g o f)) x = ((h o g) o f) x.
Proof.
reflexivity.
Qed.
|
module Network.HTTP.Protocol
import Data.String
import Generics.Derive
%language ElabReflection
public export
data Protocol : Type where
HTTP : Protocol
HTTPS : Protocol
%runElab derive "Protocol" [Generic, Meta, Eq, DecEq, Show]
public export
protocol_port_number : Protocol -> Bits16
protocol_port_number HTTP = 80
protocol_port_number HTTPS = 443
public export
protocol_from_str : String -> Maybe Protocol
protocol_from_str protocol =
case toUpper protocol of
"HTTP" => Just HTTP
"HTTPS" => Just HTTPS
_ => Nothing
|
/-
Copyright (c) 2018 Chris Hughes. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Chris Hughes, Abhimanyu Pallavi Sudhir, Jean Lo, Calle Sönne
-/
import analysis.special_functions.exp
/-!
# Real logarithm
In this file we define `real.log` to be the logarithm of a real number. As usual, we extend it from
its domain `(0, +∞)` to a globally defined function. We choose to do it so that `log 0 = 0` and
`log (-x) = log x`.
We prove some basic properties of this function and show that it is continuous.
## Tags
logarithm, continuity
-/
open set filter function
open_locale topological_space
noncomputable theory
namespace real
variables {x y : ℝ}
/-- The real logarithm function, equal to the inverse of the exponential for `x > 0`,
to `log |x|` for `x < 0`, and to `0` for `0`. We use this unconventional extension to
`(-∞, 0]` as it gives the formula `log (x * y) = log x + log y` for all nonzero `x` and `y`, and
the derivative of `log` is `1/x` away from `0`. -/
@[pp_nodot] noncomputable def log (x : ℝ) : ℝ :=
if hx : x = 0 then 0 else exp_order_iso.symm ⟨|x|, abs_pos.2 hx⟩
lemma log_of_ne_zero (hx : x ≠ 0) : log x = exp_order_iso.symm ⟨|x|, abs_pos.2 hx⟩ := dif_neg hx
lemma log_of_pos (hx : 0 < x) : log x = exp_order_iso.symm ⟨x, hx⟩ :=
by { rw [log_of_ne_zero hx.ne'], congr, exact abs_of_pos hx }
lemma exp_log_eq_abs (hx : x ≠ 0) : exp (log x) = |x| :=
by rw [log_of_ne_zero hx, ← coe_exp_order_iso_apply, order_iso.apply_symm_apply, subtype.coe_mk]
lemma exp_log (hx : 0 < x) : exp (log x) = x :=
by { rw exp_log_eq_abs hx.ne', exact abs_of_pos hx }
lemma exp_log_of_neg (hx : x < 0) : exp (log x) = -x :=
by { rw exp_log_eq_abs (ne_of_lt hx), exact abs_of_neg hx }
@[simp] lemma log_exp (x : ℝ) : log (exp x) = x :=
exp_injective $ exp_log (exp_pos x)
lemma surj_on_log : surj_on log (Ioi 0) univ :=
λ x _, ⟨exp x, exp_pos x, log_exp x⟩
lemma log_surjective : surjective log :=
λ x, ⟨exp x, log_exp x⟩
@[simp] lemma range_log : range log = univ :=
log_surjective.range_eq
@[simp] lemma log_zero : log 0 = 0 := dif_pos rfl
@[simp] lemma log_one : log 1 = 0 :=
exp_injective $ by rw [exp_log zero_lt_one, exp_zero]
@[simp] lemma log_abs (x : ℝ) : log (|x|) = log x :=
begin
by_cases h : x = 0,
{ simp [h] },
{ rw [← exp_eq_exp, exp_log_eq_abs h, exp_log_eq_abs (abs_pos.2 h).ne', abs_abs] }
end
@[simp] lemma log_neg_eq_log (x : ℝ) : log (-x) = log x :=
by rw [← log_abs x, ← log_abs (-x), abs_neg]
lemma surj_on_log' : surj_on log (Iio 0) univ :=
λ x _, ⟨-exp x, neg_lt_zero.2 $ exp_pos x, by rw [log_neg_eq_log, log_exp]⟩
lemma log_mul (hx : x ≠ 0) (hy : y ≠ 0) : log (x * y) = log x + log y :=
exp_injective $
by rw [exp_log_eq_abs (mul_ne_zero hx hy), exp_add, exp_log_eq_abs hx, exp_log_eq_abs hy, abs_mul]
lemma log_div (hx : x ≠ 0) (hy : y ≠ 0) : log (x / y) = log x - log y :=
exp_injective $
by rw [exp_log_eq_abs (div_ne_zero hx hy), exp_sub, exp_log_eq_abs hx, exp_log_eq_abs hy, abs_div]
@[simp] lemma log_inv (x : ℝ) : log (x⁻¹) = -log x :=
begin
by_cases hx : x = 0, { simp [hx] },
rw [← exp_eq_exp, exp_log_eq_abs (inv_ne_zero hx), exp_neg, exp_log_eq_abs hx, abs_inv]
end
lemma log_le_log (h : 0 < x) (h₁ : 0 < y) : real.log x ≤ real.log y ↔ x ≤ y :=
by rw [← exp_le_exp, exp_log h, exp_log h₁]
lemma log_lt_log (hx : 0 < x) : x < y → log x < log y :=
by { intro h, rwa [← exp_lt_exp, exp_log hx, exp_log (lt_trans hx h)] }
lemma log_lt_log_iff (hx : 0 < x) (hy : 0 < y) : log x < log y ↔ x < y :=
by { rw [← exp_lt_exp, exp_log hx, exp_log hy] }
lemma log_le_iff_le_exp (hx : 0 < x) : log x ≤ y ↔ x ≤ exp y := by rw [←exp_le_exp, exp_log hx]
lemma log_lt_iff_lt_exp (hx : 0 < x) : log x < y ↔ x < exp y := by rw [←exp_lt_exp, exp_log hx]
lemma le_log_iff_exp_le (hy : 0 < y) : x ≤ log y ↔ exp x ≤ y := by rw [←exp_le_exp, exp_log hy]
lemma lt_log_iff_exp_lt (hy : 0 < y) : x < log y ↔ exp x < y := by rw [←exp_lt_exp, exp_log hy]
lemma log_pos_iff (hx : 0 < x) : 0 < log x ↔ 1 < x :=
by { rw ← log_one, exact log_lt_log_iff zero_lt_one hx }
lemma log_pos (hx : 1 < x) : 0 < log x :=
(log_pos_iff (lt_trans zero_lt_one hx)).2 hx
lemma log_neg_iff (h : 0 < x) : log x < 0 ↔ x < 1 :=
by { rw ← log_one, exact log_lt_log_iff h zero_lt_one }
lemma log_neg (h0 : 0 < x) (h1 : x < 1) : log x < 0 := (log_neg_iff h0).2 h1
lemma log_nonneg_iff (hx : 0 < x) : 0 ≤ log x ↔ 1 ≤ x :=
by rw [← not_lt, log_neg_iff hx, not_lt]
lemma log_nonneg (hx : 1 ≤ x) : 0 ≤ log x :=
(log_nonneg_iff (zero_lt_one.trans_le hx)).2 hx
lemma log_nonpos_iff (hx : 0 < x) : log x ≤ 0 ↔ x ≤ 1 :=
by rw [← not_lt, log_pos_iff hx, not_lt]
lemma log_nonpos_iff' (hx : 0 ≤ x) : log x ≤ 0 ↔ x ≤ 1 :=
begin
rcases hx.eq_or_lt with (rfl|hx),
{ simp [le_refl, zero_le_one] },
exact log_nonpos_iff hx
end
lemma log_nonpos (hx : 0 ≤ x) (h'x : x ≤ 1) : log x ≤ 0 :=
(log_nonpos_iff' hx).2 h'x
lemma strict_mono_on_log : strict_mono_on log (set.Ioi 0) :=
λ x hx y hy hxy, log_lt_log hx hxy
lemma strict_anti_on_log : strict_anti_on log (set.Iio 0) :=
begin
rintros x (hx : x < 0) y (hy : y < 0) hxy,
rw [← log_abs y, ← log_abs x],
refine log_lt_log (abs_pos.2 hy.ne) _,
rwa [abs_of_neg hy, abs_of_neg hx, neg_lt_neg_iff]
end
lemma log_inj_on_pos : set.inj_on log (set.Ioi 0) :=
strict_mono_on_log.inj_on
lemma eq_one_of_pos_of_log_eq_zero {x : ℝ} (h₁ : 0 < x) (h₂ : log x = 0) : x = 1 :=
log_inj_on_pos (set.mem_Ioi.2 h₁) (set.mem_Ioi.2 zero_lt_one) (h₂.trans real.log_one.symm)
lemma log_ne_zero_of_pos_of_ne_one {x : ℝ} (hx_pos : 0 < x) (hx : x ≠ 1) : log x ≠ 0 :=
mt (eq_one_of_pos_of_log_eq_zero hx_pos) hx
@[simp] lemma log_eq_zero {x : ℝ} : log x = 0 ↔ x = 0 ∨ x = 1 ∨ x = -1 :=
begin
split,
{ intros h,
rcases lt_trichotomy x 0 with x_lt_zero | rfl | x_gt_zero,
{ refine or.inr (or.inr (eq_neg_iff_eq_neg.mp _)),
rw [←log_neg_eq_log x] at h,
exact (eq_one_of_pos_of_log_eq_zero (neg_pos.mpr x_lt_zero) h).symm, },
{ exact or.inl rfl },
{ exact or.inr (or.inl (eq_one_of_pos_of_log_eq_zero x_gt_zero h)), }, },
{ rintro (rfl|rfl|rfl); simp only [log_one, log_zero, log_neg_eq_log], }
end
/-- The real logarithm function tends to `+∞` at `+∞`. -/
lemma tendsto_log_at_top : tendsto log at_top at_top :=
tendsto_comp_exp_at_top.1 $ by simpa only [log_exp] using tendsto_id
lemma tendsto_log_nhds_within_zero : tendsto log (𝓝[{0}ᶜ] 0) at_bot :=
begin
rw [← (show _ = log, from funext log_abs)],
refine tendsto.comp _ tendsto_abs_nhds_within_zero,
simpa [← tendsto_comp_exp_at_bot] using tendsto_id
end
lemma continuous_on_log : continuous_on log {0}ᶜ :=
begin
rw [continuous_on_iff_continuous_restrict, restrict],
conv in (log _) { rw [log_of_ne_zero (show (x : ℝ) ≠ 0, from x.2)] },
exact exp_order_iso.symm.continuous.comp (continuous_subtype_mk _ continuous_subtype_coe.norm)
end
@[continuity] lemma continuous_log : continuous (λ x : {x : ℝ // x ≠ 0}, log x) :=
continuous_on_iff_continuous_restrict.1 $ continuous_on_log.mono $ λ x hx, hx
@[continuity] lemma continuous_log' : continuous (λ x : {x : ℝ // 0 < x}, log x) :=
continuous_on_iff_continuous_restrict.1 $ continuous_on_log.mono $ λ x hx, ne_of_gt hx
lemma continuous_at_log (hx : x ≠ 0) : continuous_at log x :=
(continuous_on_log x hx).continuous_at $ is_open.mem_nhds is_open_compl_singleton hx
@[simp] lemma continuous_at_log_iff : continuous_at log x ↔ x ≠ 0 :=
begin
refine ⟨_, continuous_at_log⟩,
rintros h rfl,
exact not_tendsto_nhds_of_tendsto_at_bot tendsto_log_nhds_within_zero _
(h.tendsto.mono_left inf_le_left)
end
end real
section continuity
open real
variables {α : Type*}
lemma filter.tendsto.log {f : α → ℝ} {l : filter α} {x : ℝ} (h : tendsto f l (𝓝 x)) (hx : x ≠ 0) :
tendsto (λ x, log (f x)) l (𝓝 (log x)) :=
(continuous_at_log hx).tendsto.comp h
variables [topological_space α] {f : α → ℝ} {s : set α} {a : α}
lemma continuous.log (hf : continuous f) (h₀ : ∀ x, f x ≠ 0) : continuous (λ x, log (f x)) :=
continuous_on_log.comp_continuous hf h₀
lemma continuous_at.log (hf : continuous_at f a) (h₀ : f a ≠ 0) :
continuous_at (λ x, log (f x)) a :=
hf.log h₀
lemma continuous_within_at.log (hf : continuous_within_at f s a) (h₀ : f a ≠ 0) :
continuous_within_at (λ x, log (f x)) s a :=
hf.log h₀
lemma continuous_on.log (hf : continuous_on f s) (h₀ : ∀ x ∈ s, f x ≠ 0) :
continuous_on (λ x, log (f x)) s :=
λ x hx, (hf x hx).log (h₀ x hx)
end continuity
|
theory RedSafeCase
imports WTLemma RedSafeUnpack
begin
(* ###### case lemmas *)
lemma safe_app_op_case: "\<lbrakk> app_red_exp OpApp (s1, AppExp (OpExp xop) v) ax (s2, e2); FunTy tx tau r a = op_type xop \<rbrakk> \<Longrightarrow> well_typed env r_s1 e2 tau r_s1 empty_use_env"
apply (case_tac v)
apply (auto)
apply (case_tac ax)
apply (auto)
(* given xop: op, x1: const, assuming tau matches the return type of xop, we want to show that e2 is well-typed.
now e2 can either be an op or a const.
*)
apply (case_tac "\<not> (\<exists> rop. e2 = OpExp rop)")
apply (case_tac "\<not> (\<exists> c. e2 = ConstExp c)")
apply (auto)
apply (case_tac xop)
apply (auto)
(* the hard part is proving that e2 will have the right type. *)
apply (case_tac xop)
apply (auto)
apply (simp add: pure_fun_def)
apply (simp add: pure_fun_def)
apply (simp add: pure_fun_def)
apply (rule_tac id_leq_use_env)
apply (rule_tac leq_empty_use_env)
(* op case, same thing *)
apply (case_tac xop)
apply (auto)
apply (simp add: pure_fun_def)
apply (simp add: pure_fun_def)
apply (simp add: pure_fun_def)
apply (rule_tac id_leq_use_env)
apply (rule_tac leq_empty_use_env)
done
lemma well_typed_state_add_vars: "\<lbrakk> well_typed_state s1 env rs_map; fresh_var s1 x; well_typed_mem_value env r_s tau v; proper_mem_value rs_map v;
sub_use_env s1 r_s; sep_nres_map r_s (rem_env rs_map x) \<rbrakk> \<Longrightarrow>
well_typed_state (add_env s1 x v) (add_env env (Loc x) tau) (add_env rs_map x r_s)"
(* starts by proving the validity of the env + res map *)
apply (simp add: well_typed_state_def)
apply (auto)
apply (rule_tac dist_add_sub_env)
apply (simp)
apply (simp add: valid_nres_map_def)
apply (auto)
(* the state is still fully covered by the map *)
apply (simp add: full_nres_map_def)
apply (auto)
apply (simp add: add_env_def)
apply (auto)
apply (simp add: add_env_def)
apply (auto)
(* prove disjointness remains *)
apply (rule_tac disj_add_nres_map)
apply (auto)
(* prove containment remains *)
apply (rule_tac add_sub_nres_map2)
apply (rule_tac add_sub_nres_map1)
apply (simp_all)
apply (simp add: fresh_var_def)
(* now it remains to prove that all the values are still well-typed. we do the x = xa case first *)
apply (erule_tac x="xa" in allE)
apply (case_tac "x = xa")
apply (case_tac "add_env s1 x v x")
apply (simp add: add_env_def)
apply (auto)
apply (case_tac "add_env env (Loc x) tau (Loc x)")
apply (simp add: add_env_def)
apply (auto)
apply (cut_tac rs_map="rs_map" and x="x" and r_s="r_s" in nres_add_same)
apply (simp)
apply (rule_tac well_typed_mv_add_vars)
apply (simp add: add_env_def)
apply (simp add: fresh_var_def)
apply (simp add: sub_env_def)
apply (erule_tac x="Loc x" in allE)
apply (auto)
(* proper mem value *)
apply (rule_tac proper_add_mv)
apply (simp add: add_env_def)
apply (simp add: nres_lookup_def)
apply (simp add: valid_nres_map_def)
apply (simp add: full_nres_map_def)
apply (simp add: fresh_var_def)
apply (rule_tac leq_empty_use_env)
(* now we do that x \<noteq> xa case *)
apply (simp add: add_env_def)
apply (case_tac "s1 xa")
apply (auto)
apply (simp add: add_env_def)
apply (case_tac "env (Loc xa)")
apply (auto)
apply (cut_tac rs_map="rs_map" and x="x" and r_s="r_s" and y="xa" in nres_add_diff)
apply (simp_all)
apply (rule_tac well_typed_mv_add_vars)
apply (auto)
apply (simp add: fresh_var_def)
apply (simp add: sub_env_def)
apply (erule_tac x="Loc x" in allE)
apply (auto)
(* properness *)
apply (rule_tac proper_add_mv)
apply (simp)
apply (simp add: fresh_var_def)
apply (simp add: valid_nres_map_def)
apply (simp add: full_nres_map_def)
apply (simp add: nres_lookup_def)
apply (rule_tac leq_empty_use_env)
done
lemma sacc_make_act_case: "
\<lbrakk>well_typed_state s1 env rs_map; app_red_exp ConstApp (s1, AppExp (ConstExp c) v) (MakeAct x) (s2, e2); valid_exp_use_env s1 rs_map r_f;
well_typed env r_s1 v t1 r_s2 rx2; leq_use_env r_s3 (diff_use_env r_s2 (comp_use_env rx1 (lift_use_env rx2 r))); leq_use_env rx r_s3;
FunTy t1 t2 r a \<in> const_type c; c \<noteq> FixConst; ax = MakeAct x; leq_use_env r_s1 r_f\<rbrakk>
\<Longrightarrow> well_typed (add_env env (Loc x) t2) (add_use_env r_s1 (Loc x) OwnPerm) e2 t2 r_s3 rx"
(* e2 will always be the generated variable *)
apply (case_tac "e2 \<noteq> VarExp (LocType x x)")
apply (case_tac c)
apply (auto)
(* simple var reqs *)
apply (simp add: add_env_def)
apply (case_tac c)
apply (auto)
apply (simp add: add_env_def)
apply (auto)
apply (case_tac c)
apply (auto)
apply (simp add: pure_fun_def)
apply (rule_tac ereq_leq_use_envx)
apply (simp add: add_use_env_def)
(* prelim: x2 is fresh *)
apply (case_tac "\<not> fresh_var s1 x")
apply (case_tac c)
apply (auto)
(* prelim: cs is empty, remove elim *)(*
apply (case_tac "\<not> cs = {}")
apply (case_tac c)
apply (auto)*)
(* prelim: r_s3 \<le> r_s1 *)
apply (cut_tac r_sc="r_s3" and r_sb="diff_use_env r_s2 (comp_use_env rx1 (lift_use_env rx2 r))" and r_sa="r_s1" in trans_leq_use_env)
apply (rule_tac diff_leq_use_env)
apply (simp_all)
(* we can achieve the desired permissions by removing x from the end perms + reqs (which is okay since they dont appear anywhere else yet). *)
apply (rule_tac x="one_use_env (Loc x) OwnPerm" in exI)
apply (auto)
(* - the end perm bound is possible since we assume r_s3 didnt already contain x2 *)
apply (rule_tac t=" leq_use_env r_s3 (diff_use_env (add_use_env r_s1 (Loc x) OwnPerm) (comp_use_env (ereq_use_env (Loc x) t2) (one_use_env (Loc x) OwnPerm)))" and
s=" leq_use_env (rem_use_env r_s3 (Loc x)) (diff_use_env (add_use_env r_s1 (Loc x) OwnPerm) (comp_use_env (ereq_use_env (Loc x) t2) (one_use_env (Loc x) OwnPerm)))" in subst)
apply (cut_tac r_s="r_s3" and x="Loc x" in ignore_rem_use_env)
apply (rule_tac r_s="r_s1" in leq_use_none)
apply (auto)
apply (simp add: valid_exp_use_env_def)
apply (simp add: sub_use_env_def)
apply (auto)
apply (simp add: fresh_var_def)
apply (cut_tac r_x="r_s1" and r_s="r_f" and x="Loc x" in leq_use_none)
apply (auto)
(* - given this, we can finish proving the bound *)
apply (rule_tac rhs_unroll_dcl_use_env)
apply (rule_tac rhs_unroll_rem_use_env)
apply (rule_tac dist_rem_leq_use_env)
apply (rule_tac rhs_weak_leq_use_env)
apply (rule_tac weak_ereq_use_env)
apply (case_tac c)
apply (auto)
apply (simp add: pure_fun_def)
apply (simp add: unlim_def)
apply (rule_tac rhs_add_leq_use_env)
apply (auto)
(* - unrolling definitions to prove the subtracter bound *)
apply (simp add: leq_use_env_def)
apply (simp add: one_use_env_def)
apply (simp add: add_use_env_def)
(* - proving the requirement bound *)
apply (rule_tac r_sb="diff_use_env (ereq_use_env (Loc x) t2) (one_use_env (Loc x) OwnPerm)" in trans_leq_use_env)
apply (simp add: ereq_use_env_def)
apply (rule_tac diff_one_leq_use_env)
apply (rule_tac lhs_unroll_dcl_use_env)
apply (rule_tac dist_diff_leq_use_env)
apply (rule_tac self_diff_leq_use_env)
done
lemma add_env_force_ex: "\<lbrakk> \<forall> z. add_env s x v z = add_env s x w z \<rbrakk> \<Longrightarrow> v = w"
apply (erule_tac x="x" in allE)
apply (simp add: add_env_def)
done
lemma add_env_force: "\<lbrakk> add_env s x v = add_env s x w \<rbrakk> \<Longrightarrow> v = w"
apply (rule_tac s="s" and x="x" and v="v" and w="w" in add_env_force_ex)
apply (auto)
done
lemma disj_add_use_env: "\<lbrakk> disj_use_env r_s r_x; r_s x = NoPerm \<rbrakk> \<Longrightarrow> disj_use_env r_s (add_use_env r_x x r)"
apply (simp add: disj_use_env_def)
apply (simp add: add_use_env_def)
apply (simp add: mini_disj_use_env_def)
done
lemma strong_disj_add_use_env: "\<lbrakk> strong_disj_use_env r_s r_x; r_s x = NoPerm \<rbrakk> \<Longrightarrow> strong_disj_use_env r_s (add_use_env r_x x r)"
apply (simp add: strong_disj_use_env_def)
apply (simp add: add_use_env_def)
done
lemma add_valid_exp_use_env: "\<lbrakk> valid_nres_map s rs_map; valid_exp_use_env s rs_map r_s; fresh_var s x \<rbrakk> \<Longrightarrow>
valid_exp_use_env (add_env s x v) (add_env rs_map x empty_use_env) (add_use_env r_s (Loc x) OwnPerm)"
apply (simp add: valid_exp_use_env_def)
apply (auto)
(* domain preservation *)
apply (rule_tac rhs_add_sub_use_env)
apply (rule_tac add_sub_use_env)
apply (simp)
apply (simp add: add_env_def)
(* separation *)
apply (simp add: sep_nres_map_def)
apply (auto)
apply (case_tac "x = xa")
apply (auto)
apply (simp add: nres_add_same)
apply (rule_tac empty_strong_disj_use_env2)
apply (simp add: nres_add_diff)
apply (erule_tac x="xa" in allE)
apply (rule_tac comm_strong_disj_use_env)
apply (rule_tac strong_disj_add_use_env)
apply (rule_tac comm_strong_disj_use_env)
apply (simp)
apply (simp add: valid_nres_map_def)
apply (simp add: sub_nres_map_def)
apply (auto)
apply (erule_tac x="xa" in allE)
apply (simp add: sub_use_env_def)
apply (simp add: fresh_var_def)
apply (auto)
done
(*
the hard part that we're dealing with right now is that at the end using pairs does NOT work for the WRITE constants,
since we want "use" permission for the array, but "own" permission for the value being written.
what this suggests is that we have to allow for constant + application to be a value.
even if we do this, we need to give them all their own unique cases, just like for unpacking.
- this is more complicated than it is with constants, since the contents of the first arg will generally be somewhat complex.
*)
lemma saccmk2_var_type: "\<lbrakk> env (Loc x) = Some (ChanTy t c_end) \<rbrakk> \<Longrightarrow>
well_typed env (one_use_env (Loc x) OwnPerm) (VarExp (LocType x x)) (ChanTy t c_end) (one_use_env (Loc x) OwnPerm) (one_use_env (Loc x) OwnPerm)"
apply (auto)
apply (rule_tac ereq_leq_use_envx)
apply (simp add: one_use_env_def)
apply (rule_tac x="empty_use_env" in exI)
apply (auto)
apply (rule_tac rhs_weak_leq_use_env)
apply (rule_tac dist_weak_comp_use_env)
apply (rule_tac weak_ereq_use_env)
apply (simp add: unlim_def)
apply (simp add: weak_use_env_def)
apply (simp add: empty_use_env_def)
apply (rule_tac id_leq_use_env)
apply (rule_tac id_leq_use_env)
apply (rule_tac leq_empty_use_env)
apply (rule_tac diff_leq_use_env)
apply (rule_tac ereq_leq_use_envx)
apply (simp add: one_use_env_def)
done
lemma saccmk2_pair_type: "\<lbrakk> well_typed env (one_use_env x1 OwnPerm) v1 t1 (one_use_env x1 OwnPerm) (one_use_env x1 OwnPerm);
well_typed env (one_use_env x2 OwnPerm) v2 t2 (one_use_env x2 OwnPerm) (one_use_env x2 OwnPerm);
r_s1 x1 = OwnPerm; r_s1 x2 = OwnPerm; r_s2 x1 = NoPerm; r_s2 x2 = NoPerm;
leq_use_env r_s2 r_s1; leq_use_env rx r_s2; is_own r; x1 \<noteq> x2 \<rbrakk> \<Longrightarrow>
well_typed env r_s1 (PairExp v1 v2) (PairTy t1 t2 r) r_s2 rx"
apply (auto)
apply (rule_tac x="r_s1" in exI)
apply (rule_tac x="r_s1" in exI)
apply (rule_tac x="one_use_env x1 OwnPerm" in exI)
apply (auto)
apply (rule_tac r_s="one_use_env x1 OwnPerm" in well_typed_incr_simul_perm)
apply (simp add: leq_use_env_def)
apply (simp add: one_use_env_def)
apply (simp)
apply (rule_tac x="one_use_env x2 OwnPerm" in exI)
apply (auto)
apply (rule_tac r_s="one_use_env x2 OwnPerm" in well_typed_incr_simul_perm)
apply (simp add: leq_use_env_def)
apply (simp add: one_use_env_def)
apply (simp)
apply (simp add: is_own_def)
apply (simp add: leq_use_env_def)
apply (simp add: one_use_env_def)
apply (simp add: is_own_def)
apply (simp add: leq_use_env_def)
apply (simp add: one_use_env_def)
apply (simp add: is_own_def)
apply (case_tac "max_aff (req_type t1) (req_type t2)")
apply (auto)
apply (simp add: is_own_def)
apply (simp add: one_use_env_def)
apply (simp add: disj_use_env_def)
apply (simp add: mini_disj_use_env_def)
apply (rule_tac x="add_use_env (one_use_env x1 OwnPerm) x2 OwnPerm" in exI)
apply (auto)
apply (rule_tac mini_disj_diff_leq_use_env2)
apply (simp)
apply (simp add: mini_disj_use_env_def)
apply (simp add: one_use_env_def)
apply (simp add: add_use_env_def)
apply (auto)
apply (rule_tac add_leq_use_env)
apply (simp add: leq_use_env_def)
apply (simp add: one_use_env_def)
apply (auto)
apply (simp add: pair_req_def)
apply (auto)
apply (rule_tac leq_empty_use_env)
apply (simp add: leq_use_env_def)
apply (simp add: add_use_env_def)
apply (simp add: diff_use_env_def)
apply (simp add: comp_use_env_def)
apply (simp add: one_use_env_def)
apply (simp add: is_own_def)
done
lemma sacc_mk2_act_case: "
\<lbrakk>well_typed_state s1 env rs_map; valid_exp_use_env s1 rs_map r_f; well_typed env r_s1 v t1 r_s2 rx2; proper_exp rs_map (AppExp (ConstExp c) v);
leq_use_env r_s3 (diff_use_env r_s2 (comp_use_env (comp_use_env rx1 (lift_use_env rx2 r)) r_ex));
leq_use_env (comp_use_env rx1 (lift_use_env rx2 r)) r_s2; leq_use_env r_ex r_s1; leq_use_env rx r_s3;
leq_use_env (app_req rx1 rx2 r t2 r_ex) rx; leq_use_env r_s1 r_f; FunTy t1 t2 r a \<in> const_type c; c \<noteq> FixConst; ax = Mk2Act x31 x32;
is_value v; app_con s1 c v (Mk2Act x31 x32) (s2, e2)\<rbrakk>
\<Longrightarrow> \<exists>g_ax. well_typed (red_env env g_ax) (exp_red_use_env r_s1 g_ax) e2 t2 (end_red_use_env r_s3 g_ax) (end_red_use_env rx g_ax) \<and>
proper_exp (red_nres_map rs_map g_ax) e2 \<and>
well_typed_state s2 (red_env env g_ax) (red_nres_map rs_map g_ax) \<and>
valid_exp_use_env s2 (red_nres_map rs_map g_ax) (exp_red_use_env r_f g_ax) \<and>
safe_act s1 (infl_use_env r_f r_s3) g_ax \<and> corr_act (Mk2Act x31 x32) g_ax"
apply (case_tac c)
apply (auto)
apply (cut_tac eq_own)
apply (auto)
apply (rule_tac x="Add2ResAct x31 x32 t" in exI)
apply (auto)
apply (cut_tac env="add_env (add_env env (Loc x31) (ChanTy t SEnd)) (Loc x32) (ChanTy t REnd)" and
?r_s1.0="add_use_env (add_use_env r_s1 (Loc x31) OwnPerm) (Loc x32) OwnPerm" and
?r_s2.0="r_s3" and ?v1.0="VarExp (LocType x31 x31)" and ?v2.0="VarExp (LocType x32 x32)" and ?t1.0="ChanTy t SEnd" and ?t2.0="ChanTy t REnd"
and ?x1.0="Loc x31" and ?x2.0="Loc x32" in saccmk2_pair_type)
apply (rule_tac saccmk2_var_type)
apply (simp add: add_env_def)
apply (rule_tac saccmk2_var_type)
apply (simp add: add_env_def)
apply (auto)
apply (simp add: add_use_env_def)
apply (simp add: add_use_env_def)
apply (rule_tac r_s="r_f" in leq_use_none)
apply (rule_tac r_sb="r_s1" in trans_leq_use_env)
apply (simp)
apply (rule_tac r_sb="diff_use_env r_s2 (comp_use_env (comp_use_env rx1 (lift_use_env rx2 r)) r_ex)" in trans_leq_use_env)
apply (rule_tac diff_leq_use_env)
apply (simp_all)
apply (simp add: fresh_var_def)
apply (simp add: valid_exp_use_env_def)
apply (simp add: sub_use_env_def)
apply (auto)
apply (rule_tac r_s="r_f" in leq_use_none)
apply (rule_tac r_sb="r_s1" in trans_leq_use_env)
apply (simp)
apply (rule_tac r_sb="diff_use_env r_s2 (comp_use_env (comp_use_env rx1 (lift_use_env rx2 r)) r_ex)" in trans_leq_use_env)
apply (rule_tac diff_leq_use_env)
apply (simp_all)
apply (simp add: fresh_var_def)
apply (simp add: valid_exp_use_env_def)
apply (simp add: sub_use_env_def)
apply (auto)
apply (rule_tac rhs_add_leq_use_env)
apply (rule_tac rhs_add_leq_use_env)
apply (rule_tac r_sb="diff_use_env r_s2 (comp_use_env (comp_use_env rx1 (lift_use_env rx2 r)) r_ex)" in trans_leq_use_env)
apply (rule_tac diff_leq_use_env)
apply (simp_all)
apply (rule_tac x="ChanTy t REnd" in exI)
apply (rule_tac x="ra" in exI)
apply (auto)
apply (simp add: pure_fun_def)
apply (simp add: is_own_def)
apply (rule_tac x="r_s2a" in exI)
apply (rule_tac x="r_s3a" in exI)
apply (rule_tac x="rx1a" in exI)
apply (auto)
apply (rule_tac x="rx2a" in exI)
apply (auto)
apply (rule_tac x="r_exb" in exI)
apply (auto)
apply (simp add: pure_fun_def)
apply (simp add: is_own_def)
(* proving the state remains proper *)
apply (simp add: proper_exp_def)
apply (auto)
apply (rule_tac x="[]" in exI)
apply (simp)
apply (rule_tac x="[]" in exI)
apply (simp)
(* proving the state remains well-typed: environment containment *)
apply (simp add: well_typed_state_def)
apply (auto)
apply (rule_tac dist_add_sub_env)
apply (rule_tac dist_add_sub_env)
apply (simp)
(* res_map validity: completeness *)
apply (simp add: valid_nres_map_def)
apply (auto)
apply (rule_tac add_full_nres_map)
apply (rule_tac add_full_nres_map)
apply (simp)
(* - disjointness *)
apply (rule_tac disj_add_nres_map)
apply (rule_tac disj_add_nres_map)
apply (simp)
apply (simp add: sep_nres_map_def)
apply (simp add: empty_strong_disj_use_env1)
apply (simp add: sep_nres_map_def)
apply (simp add: empty_strong_disj_use_env1)
(* - element containment *)
apply (rule_tac dist_add_sub_nres_map)
apply (rule_tac dist_add_sub_nres_map)
apply (simp)
apply (rule_tac empty_sub_use_env)
apply (rule_tac empty_sub_use_env)
(* proving that the memory is still well-typed. starting with x = x31 / x = x32 *)
apply (case_tac "x = x31")
apply (simp add: add_env_def)
apply (case_tac "x = x32")
apply (simp add: add_env_def)
(* - otherwise compare with the originals *)
apply (simp add: add_env_def)
apply (erule_tac x="x" in allE)
apply (case_tac "s1 x")
apply (auto)
apply (simp add: add_env_def)
apply (case_tac "env (Loc x)")
apply (auto)
apply (rule_tac well_typed_mv_add_vars)
apply (rule_tac well_typed_mv_add_vars)
apply (simp add: nres_lookup_def)
apply (simp add: add_env_def)
apply (simp add: fresh_var_def)
apply (simp add: sub_env_def)
apply (auto)
apply (simp add: fresh_var_def)
apply (simp add: sub_env_def)
apply (simp add: add_env_def)
apply (auto)
(* - properness *)
apply (rule_tac proper_add_mv)
apply (rule_tac proper_add_mv)
apply (simp)
apply (simp add: fresh_var_def)
apply (simp add: valid_nres_map_def)
apply (simp add: full_nres_map_def)
apply (simp add: nres_lookup_def)
apply (rule_tac leq_empty_use_env)
apply (simp add: fresh_var_def)
apply (simp add: valid_nres_map_def)
apply (simp add: full_nres_map_def)
apply (simp add: nres_lookup_def)
apply (simp add: add_env_def)
apply (rule_tac leq_empty_use_env)
(* proving the new res_map is still valid: expression map containment *)
apply (simp add: valid_exp_use_env_def)
apply (auto)
apply (rule_tac rhs_add_sub_use_env)
apply (rule_tac rhs_add_sub_use_env)
apply (rule_tac add_sub_use_env)
apply (rule_tac add_sub_use_env)
apply (simp)
apply (simp add: add_env_def)
apply (simp add: add_env_def)
(* map separation *)
apply (rule_tac add_sep_nres_map)
apply (rule_tac add_sep_nres_map)
apply (simp add: sep_nres_map_def)
apply (auto)
apply (simp add: well_typed_state_def)
apply (simp add: valid_nres_map_def)
apply (simp add: sub_nres_map_def)
apply (rule_tac add_strong_disj_use_env)
apply (rule_tac add_strong_disj_use_env)
apply (auto)
apply (case_tac "\<forall>xa. nres_lookup rs_map x xa \<noteq> NoPerm \<longrightarrow> (\<exists>y. xa = Loc y \<and> (\<exists>ya. s1 y = Some ya))")
apply (erule_tac x="Loc x31" in allE)
apply (simp add: fresh_var_def)
apply (simp add: sub_use_env_def)
apply (case_tac "\<forall>xa. nres_lookup rs_map x xa \<noteq> NoPerm \<longrightarrow> (\<exists>y. xa = Loc y \<and> (\<exists>ya. s1 y = Some ya))")
apply (erule_tac x="Loc x32" in allE)
apply (simp add: fresh_var_def)
apply (simp add: sub_use_env_def)
apply (rule_tac empty_strong_disj_use_env2)
apply (rule_tac empty_strong_disj_use_env2)
apply (simp add: fresh_var_def)
apply (simp add: fresh_var_def)
done
(* the idea is that if we added a permission, we required it to be a variable not already in the env, ie a variable free in e2.
so then we can remove x by default. *)
(* in general, this statement says that given a constant-application, the result can be typed with a certain env + perm set,
so that the state remains well-typed relative to the env + global perm_map, and the env + perm set remains valid *)
lemma safe_app_con_case: "\<lbrakk> well_typed_state s1 env rs_map;
app_red_exp ConstApp (s1, AppExp (ConstExp c) v) ax (s2, e2); valid_exp_use_env s1 rs_map r_f;
well_typed env r_s1 v t1 r_s2 rx2; proper_exp rs_map (AppExp (ConstExp c) v);
leq_use_env r_s3 (diff_use_env r_s2 (comp_use_env (comp_use_env rx1 (lift_use_env rx2 r)) r_ex));
leq_use_env (comp_use_env rx1 (lift_use_env rx2 r)) r_s2; leq_use_env r_ex r_s1; disj_use_env rx1 (lift_use_env rx2 r);
leq_use_env rx r_s3; leq_use_env (app_req rx1 rx2 r t2 r_ex) rx; leq_use_env r_s1 r_f;
FunTy t1 t2 r a \<in> const_type c; c \<noteq> FixConst\<rbrakk>
\<Longrightarrow> \<exists>g_ax . well_typed (red_env env g_ax) (exp_red_use_env r_s1 g_ax) e2 t2 (end_red_use_env r_s3 g_ax) (end_red_use_env rx g_ax) \<and>
proper_exp (red_nres_map rs_map g_ax) e2 \<and> well_typed_state s2 (red_env env g_ax) (red_nres_map rs_map g_ax) \<and>
valid_exp_use_env s2 (red_nres_map rs_map g_ax) (exp_red_use_env r_f g_ax) \<and> safe_act s1 (infl_use_env r_f r_s3) g_ax \<and> corr_act ax g_ax"
apply (case_tac ax)
apply (auto)
(* no action cases *)
apply (cut_tac ?r_s2.0="r_s2" and ?r_s1.0="r_s1" and env="env" in well_typed_perm_leq)
apply (auto)
apply (case_tac c)
apply (auto)
apply (rule_tac sares_unpack_case)
apply (auto)
apply (simp add: upc_init_abbrev_def)
apply (rule_tac x="r_s1" in exI)
apply (auto)
apply (rule_tac id_leq_use_env)
apply (rule_tac x="rx1" in exI)
apply (auto)
apply (rule_tac r_sb="r_s2" in trans_leq_use_env)
apply (simp)
apply (rule_tac r_sb="comp_use_env rx1 (lift_use_env rx2 r)" in trans_leq_use_env)
apply (simp)
apply (rule_tac self_comp_leq_use_env1)
apply (rule_tac x="rx2" in exI)
apply (rule_tac x="r_s2" in exI)
apply (auto)
apply (rule_tac x="r_s2a" in exI)
apply (rule_tac x="r_s3a" in exI)
apply (rule_tac x="rx1a" in exI)
apply (auto)
(* new resource cases. in all cases t2 should be the correct type to add *)
apply (rule_tac x="AddResAct x2 t2 empty_use_env" in exI)
apply (auto)
(* - lemma for main well-typedness statement *)
apply (rule_tac ?s2.0="s2" and ?rx1.0="rx1" in sacc_make_act_case)
apply (auto)
apply (rule_tac r_sb="diff_use_env r_s2 (comp_use_env (comp_use_env rx1 (lift_use_env rx2 r)) r_ex)" in trans_leq_use_env)
apply (rule_tac lhs_unroll_dcl_use_env)
apply (rule_tac self_diff_leq_use_env)
apply (simp)
(* - proper expression *)
apply (case_tac c)
apply (auto)
apply (simp add: proper_exp_def)
apply (rule_tac x="[]" in exI)
apply (auto)
(* - well typed state. *)
apply (case_tac "\<not> (\<exists> v. s2 = add_env s1 x2 v)")
apply (case_tac c)
apply (auto)
apply (rule_tac well_typed_state_add_vars)
apply (auto)
apply (case_tac c)
apply (auto)
(* - wt mem val: array case *)
apply (case_tac c)
apply (auto)
apply (cut_tac s="s1" and x="x2" and v="va" and w="ArrValue []" in add_env_force)
apply (auto)
apply (simp add: pure_fun_def)
(* - valid res list *)
apply (rule_tac x="\<lambda> x. None" in exI)
apply (simp add: valid_res_list_def)
(* - properness *)
apply (case_tac c)
apply (auto)
apply (cut_tac s="s1" and x="x2" and v="va" and w="ArrValue []" in add_env_force)
apply (simp)
apply (auto)
(* - x22 is contained in rs_map (true because x22 is the empty set) *)
apply (simp add: sub_use_env_def)
apply (simp add: empty_use_env_def)
(* - map separation + strength *)
apply (simp add: sep_nres_map_def)
apply (auto)
apply (rule_tac empty_strong_disj_use_env1)
(* - valid use env. *)
apply (case_tac "\<not> (\<exists> v. s2 = add_env s1 x2 v)")
apply (case_tac c)
apply (auto)
apply (rule_tac add_valid_exp_use_env)
apply (simp add: well_typed_state_def)
apply (simp)
apply (case_tac c)
apply (auto)
apply (case_tac c)
apply (auto)
apply (simp add: fresh_var_def)
apply (rule_tac leq_empty_use_env)
(* dual new resource case. (basically only channel creation) *)
apply (rule_tac sacc_mk2_act_case)
apply (auto)
(* resource usage case. (currently empty) *)
apply (case_tac c)
apply (auto)
(*
apply (case_tac c)
apply (auto)
apply (case_tac c)
apply (auto)*)
done
lemma well_typed_empty_state: "well_typed_state empty_env empty_env empty_env"
apply (simp add: well_typed_state_def)
apply (auto)
apply (simp add: sub_env_def)
apply (simp add: empty_env_def)
apply (simp add: valid_nres_map_def)(*
apply (simp add: disj_res_map_def)
apply (auto)
apply (simp add: lookup_res_def)
apply (rule_tac empty_strong_disj_use_env1)*)
apply (auto)
apply (simp add: full_nres_map_def)
apply (simp add: empty_env_def)
apply (simp add: disj_nres_map_def)
apply (auto)
apply (simp add: nres_lookup_def)
apply (simp add: empty_env_def)
apply (rule_tac empty_strong_disj_use_env1)
apply (simp add: sub_nres_map_def)
apply (simp add: sub_use_env_def)
apply (simp add: nres_lookup_def)
apply (simp add: empty_env_def)
apply (simp add: empty_use_env_def)
apply (simp add: empty_env_def)
done
fun state_vars where
"state_vars s = { x | x. s x \<noteq> None }"
definition fv_restr_env where
"fv_restr_env e s = (\<lambda> x. if x \<in> free_vars e then s x else None)"
lemma fv_restr_env_use: "\<lbrakk> x \<in> free_vars e \<rbrakk> \<Longrightarrow> fv_restr_env e s x = s x"
apply (simp add: fv_restr_env_def)
done
lemma dist_rem_contain_env: "\<lbrakk> contain_env s s' \<rbrakk> \<Longrightarrow> contain_env (rem_env s x) (rem_env s' x)"
apply (simp add: contain_env_def)
apply (simp add: rem_env_def)
apply (auto)
apply (erule_tac x="xa" in allE)
apply (case_tac "s' xa")
apply (auto)
apply (simp add: rem_env_def)
done
lemma fv_contain_env: "\<lbrakk> free_vars e' \<subseteq> free_vars e \<rbrakk> \<Longrightarrow> contain_env (fv_restr_env e s) (fv_restr_env e' s)"
apply (simp add: contain_env_def)
apply (simp add: fv_restr_env_def)
apply (auto)
apply (case_tac "s x")
apply (auto)
apply (simp add: fv_restr_env_def)
apply (auto)
done
lemma rem_fv_contain_env: "\<lbrakk> free_vars e' - {x} \<subseteq> free_vars e \<rbrakk> \<Longrightarrow> contain_env (fv_restr_env e (rem_env s x)) (fv_restr_env e' (rem_env s x))"
apply (simp add: contain_env_def)
apply (simp add: fv_restr_env_def)
apply (auto)
apply (simp add: rem_env_def)
apply (auto)
apply (case_tac "s xa")
apply (auto)
apply (simp add: fv_restr_env_def)
apply (simp add: rem_env_def)
apply (auto)
done
lemma rem_fv_restr_env: "rem_env (fv_restr_env e s) x = fv_restr_env e (rem_env s x)"
apply (case_tac "\<not> (\<forall> y. rem_env (fv_restr_env e s) x y = fv_restr_env e (rem_env s x) y)")
apply (auto)
apply (simp add: rem_env_def)
apply (simp add: fv_restr_env_def)
apply (case_tac "x = y")
apply (auto)
apply (case_tac "x \<in> free_vars e")
apply (auto)
apply (simp add: fv_restr_env_def)
apply (case_tac "y \<in> free_vars e")
apply (auto)
done
(* the question is, how do we allow for replicable pairs while enforcing full resource disjointness?
i guess the "natural" way to do it based on what we already have is to simply allow pairs that contain
values that are not unique to also be values.
- if x is in the end perms, x is in the reqs. if r is own, (lift rx r) subtracts it out.
- if r is not own, it means that e is replicable, in which case it is again not a var,
- x is not in the end perms, which is trivial
an even cleaner solution is to make a "replicable" pair value and type accordingly.
in this case e is still a var, however at the level of reduction semantics, we can keep
the var out of the name set.
*)
end |
% X: data matrix, each row is one observation, each column is one feature
% d: reduced dimension
% Y: dimensionanlity-reduced data
% Copyright by Quan Wang, 2011/05/10
% Please cite: Quan Wang. Kernel Principal Component Analysis and its
% Applications in Face Recognition and Active Shape Models.
% arXiv:1207.3538 [cs.CV], 2012.
function Y=PCA(X,d)
%% eigenvalue analysis
Sx=cov(X);
[V,D]=eig(Sx);
eigValue=diag(D);
[eigValue,IX]=sort(eigValue,'descend');
eigVector=V(:,IX);
%% normailization
norm_eigVector=sqrt(sum(eigVector.^2));
eigVector=eigVector./repmat(norm_eigVector,size(eigVector,1),1);
%% dimensionality reduction
Y=X*eigVector(:,1:d);
|
{-# OPTIONS --cubical --no-import-sorts --safe #-}
module Cubical.ZCohomology.Properties where
open import Cubical.ZCohomology.Base
open import Cubical.HITs.S1
open import Cubical.HITs.Sn
open import Cubical.Foundations.HLevels
open import Cubical.Foundations.Function
open import Cubical.Foundations.Equiv
open import Cubical.Foundations.Prelude
open import Cubical.Foundations.Pointed
open import Cubical.Foundations.Transport
open import Cubical.Foundations.Isomorphism
open import Cubical.Foundations.GroupoidLaws
open import Cubical.Foundations.Univalence
open import Cubical.Data.Empty
open import Cubical.Data.Sigma hiding (_×_)
open import Cubical.HITs.Susp
open import Cubical.HITs.Wedge
open import Cubical.HITs.SetTruncation renaming (rec to sRec ; rec2 to sRec2 ; elim to sElim ; elim2 to sElim2 ; setTruncIsSet to §)
open import Cubical.Data.Int renaming (_+_ to _ℤ+_)
open import Cubical.Data.Nat
open import Cubical.HITs.Truncation renaming (elim to trElim ; map to trMap ; rec to trRec ; elim3 to trElim3)
open import Cubical.Homotopy.Loopspace
open import Cubical.Homotopy.Connected
open import Cubical.Homotopy.Freudenthal
open import Cubical.Algebra.Group
open import Cubical.Algebra.Semigroup
open import Cubical.Algebra.Monoid
open import Cubical.Foundations.Equiv.HalfAdjoint
open import Cubical.Data.NatMinusOne
open import Cubical.HITs.Pushout
open import Cubical.Data.Sum.Base
open import Cubical.Data.HomotopyGroup
open import Cubical.ZCohomology.KcompPrelims
open Iso renaming (inv to inv')
private
variable
ℓ ℓ' : Level
A : Type ℓ
B : Type ℓ'
A' : Pointed ℓ
infixr 34 _+ₖ_
infixr 34 _+ₕ_
is2ConnectedKn : (n : ℕ) → isConnected 2 (coHomK (suc n))
is2ConnectedKn zero = ∣ ∣ base ∣ ∣
, trElim (λ _ → isOfHLevelPath 2 (isOfHLevelTrunc 2) _ _)
(trElim (λ _ → isOfHLevelPath 3 (isOfHLevelSuc 2 (isOfHLevelTrunc 2)) _ _)
(toPropElim (λ _ → isOfHLevelTrunc 2 _ _) refl))
is2ConnectedKn (suc n) = ∣ ∣ north ∣ ∣
, trElim (λ _ → isOfHLevelPath 2 (isOfHLevelTrunc 2) _ _)
(trElim (λ _ → isProp→isOfHLevelSuc (3 + n) (isOfHLevelTrunc 2 _ _))
(suspToPropElim (ptSn (suc n)) (λ _ → isOfHLevelTrunc 2 _ _) refl))
isConnectedKn : (n : ℕ) → isConnected (2 + n) (coHomK (suc n))
isConnectedKn n = isOfHLevelRetractFromIso 0 (invIso (truncOfTruncIso (2 + n) 1)) (sphereConnected (suc n))
-- Induction principles for cohomology groups
-- If we want to show a proposition about some x : Hⁿ(A), it suffices to show it under the
-- assumption that x = ∣f∣₂ and that f is pointed
coHomPointedElim : {A : Type ℓ} (n : ℕ) (a : A) {B : coHom (suc n) A → Type ℓ'}
→ ((x : coHom (suc n) A) → isProp (B x))
→ ((f : A → coHomK (suc n)) → f a ≡ coHom-pt (suc n) → B ∣ f ∣₂)
→ (x : coHom (suc n) A) → B x
coHomPointedElim {ℓ' = ℓ'} {A = A} n a isprop indp =
sElim (λ _ → isOfHLevelSuc 1 (isprop _))
λ f → helper n isprop indp f (f a) refl
where
helper : (n : ℕ) {B : coHom (suc n) A → Type ℓ'}
→ ((x : coHom (suc n) A) → isProp (B x))
→ ((f : A → coHomK (suc n)) → f a ≡ coHom-pt (suc n) → B ∣ f ∣₂)
→ (f : A → coHomK (suc n))
→ (x : coHomK (suc n))
→ f a ≡ x → B ∣ f ∣₂
-- pattern matching a bit extra to avoid isOfHLevelPlus'
helper zero isprop ind f =
trElim (λ _ → isOfHLevelPlus {n = 1} 2 (isPropΠ λ _ → isprop _))
(toPropElim (λ _ → isPropΠ λ _ → isprop _) (ind f))
helper (suc zero) isprop ind f =
trElim (λ _ → isOfHLevelPlus {n = 1} 3 (isPropΠ λ _ → isprop _))
(suspToPropElim base (λ _ → isPropΠ λ _ → isprop _) (ind f))
helper (suc (suc zero)) isprop ind f =
trElim (λ _ → isOfHLevelPlus {n = 1} 4 (isPropΠ λ _ → isprop _))
(suspToPropElim north (λ _ → isPropΠ λ _ → isprop _) (ind f))
helper (suc (suc (suc n))) isprop ind f =
trElim (λ _ → isOfHLevelPlus' {n = 5 + n} 1 (isPropΠ λ _ → isprop _))
(suspToPropElim north (λ _ → isPropΠ λ _ → isprop _) (ind f))
coHomPointedElim2 : {A : Type ℓ} (n : ℕ) (a : A) {B : coHom (suc n) A → coHom (suc n) A → Type ℓ'}
→ ((x y : coHom (suc n) A) → isProp (B x y))
→ ((f g : A → coHomK (suc n)) → f a ≡ coHom-pt (suc n) → g a ≡ coHom-pt (suc n) → B ∣ f ∣₂ ∣ g ∣₂)
→ (x y : coHom (suc n) A) → B x y
coHomPointedElim2 {ℓ' = ℓ'} {A = A} n a isprop indp = sElim2 (λ _ _ → isOfHLevelSuc 1 (isprop _ _))
λ f g → helper n a isprop indp f g (f a) (g a) refl refl
where
helper : (n : ℕ) (a : A) {B : coHom (suc n) A → coHom (suc n) A → Type ℓ'}
→ ((x y : coHom (suc n) A) → isProp (B x y))
→ ((f g : A → coHomK (suc n)) → f a ≡ coHom-pt (suc n) → g a ≡ coHom-pt (suc n) → B ∣ f ∣₂ ∣ g ∣₂)
→ (f g : A → coHomK (suc n))
→ (x y : coHomK (suc n))
→ f a ≡ x → g a ≡ y
→ B ∣ f ∣₂ ∣ g ∣₂
helper zero a isprop indp f g =
elim2 (λ _ _ → isOfHLevelPlus {n = 1} 2 (isPropΠ2 λ _ _ → isprop _ _))
(toPropElim2 (λ _ _ → isPropΠ2 λ _ _ → isprop _ _) (indp f g))
helper (suc zero) a isprop indp f g =
elim2 (λ _ _ → isOfHLevelPlus {n = 1} 3 (isPropΠ2 λ _ _ → isprop _ _))
(suspToPropElim2 base (λ _ _ → isPropΠ2 λ _ _ → isprop _ _) (indp f g))
helper (suc (suc zero)) a isprop indp f g =
elim2 (λ _ _ → isOfHLevelPlus {n = 1} 4 (isPropΠ2 λ _ _ → isprop _ _))
(suspToPropElim2 north (λ _ _ → isPropΠ2 λ _ _ → isprop _ _) (indp f g))
helper (suc (suc (suc n))) a isprop indp f g =
elim2 (λ _ _ → isOfHLevelPlus' {n = 5 + n} 1 (isPropΠ2 λ _ _ → isprop _ _))
(suspToPropElim2 north (λ _ _ → isPropΠ2 λ _ _ → isprop _ _) (indp f g))
{- Equivalence between cohomology of A and reduced cohomology of (A + 1) -}
coHomRed+1Equiv : (n : ℕ) →
(A : Type ℓ) →
(coHom n A) ≡ (coHomRed n ((A ⊎ Unit , inr (tt))))
coHomRed+1Equiv zero A i = ∥ helpLemma {C = (Int , pos 0)} i ∥₂
module coHomRed+1 where
helpLemma : {C : Pointed ℓ} → ( (A → (typ C)) ≡ ((((A ⊎ Unit) , inr (tt)) →∙ C)))
helpLemma {C = C} = isoToPath (iso map1
map2
(λ b → linvPf b)
(λ _ → refl))
where
map1 : (A → typ C) → ((((A ⊎ Unit) , inr (tt)) →∙ C))
map1 f = map1' , refl
module helpmap where
map1' : A ⊎ Unit → fst C
map1' (inl x) = f x
map1' (inr x) = pt C
map2 : ((((A ⊎ Unit) , inr (tt)) →∙ C)) → (A → typ C)
map2 (g , pf) x = g (inl x)
linvPf : (b :((((A ⊎ Unit) , inr (tt)) →∙ C))) → map1 (map2 b) ≡ b
linvPf (f , snd) i = (λ x → helper x i) , λ j → snd ((~ i) ∨ j)
where
helper : (x : A ⊎ Unit) → ((helpmap.map1') (map2 (f , snd)) x) ≡ f x
helper (inl x) = refl
helper (inr tt) = sym snd
coHomRed+1Equiv (suc zero) A i = ∥ coHomRed+1.helpLemma A i {C = (coHomK 1 , ∣ base ∣)} i ∥₂
coHomRed+1Equiv (suc (suc n)) A i = ∥ coHomRed+1.helpLemma A i {C = (coHomK (2 + n) , ∣ north ∣)} i ∥₂
-----------
Kn→ΩKn+1 : (n : ℕ) → coHomK n → typ (Ω (coHomK-ptd (suc n)))
Kn→ΩKn+1 n = Iso.fun (Iso-Kn-ΩKn+1 n)
ΩKn+1→Kn : (n : ℕ) → typ (Ω (coHomK-ptd (suc n))) → coHomK n
ΩKn+1→Kn n = Iso.inv (Iso-Kn-ΩKn+1 n)
Kn≃ΩKn+1 : {n : ℕ} → coHomK n ≃ typ (Ω (coHomK-ptd (suc n)))
Kn≃ΩKn+1 {n = n} = isoToEquiv (Iso-Kn-ΩKn+1 n)
---------- Algebra/Group stuff --------
0ₖ : (n : ℕ) → coHomK n
0ₖ = coHom-pt
_+ₖ_ : {n : ℕ} → coHomK n → coHomK n → coHomK n
_+ₖ_ {n = n} x y = ΩKn+1→Kn n (Kn→ΩKn+1 n x ∙ Kn→ΩKn+1 n y)
-ₖ_ : {n : ℕ} → coHomK n → coHomK n
-ₖ_ {n = n} x = ΩKn+1→Kn n (sym (Kn→ΩKn+1 n x))
-- subtraction as a binary operator
_-ₖ_ : {n : ℕ} → coHomK n → coHomK n → coHomK n
_-ₖ_ {n = n} x y = ΩKn+1→Kn n (Kn→ΩKn+1 n x ∙ sym (Kn→ΩKn+1 n y))
+ₖ-syntax : (n : ℕ) → coHomK n → coHomK n → coHomK n
+ₖ-syntax n = _+ₖ_ {n = n}
-ₖ-syntax : (n : ℕ) → coHomK n → coHomK n
-ₖ-syntax n = -ₖ_ {n = n}
-'ₖ-syntax : (n : ℕ) → coHomK n → coHomK n → coHomK n
-'ₖ-syntax n = _-ₖ_ {n = n}
syntax +ₖ-syntax n x y = x +[ n ]ₖ y
syntax -ₖ-syntax n x = -[ n ]ₖ x
syntax -'ₖ-syntax n x y = x -[ n ]ₖ y
Kn→ΩKn+10ₖ : (n : ℕ) → Kn→ΩKn+1 n (0ₖ n) ≡ refl
Kn→ΩKn+10ₖ zero = sym (rUnit refl)
Kn→ΩKn+10ₖ (suc zero) i j = ∣ (rCancel (merid base) i j) ∣
Kn→ΩKn+10ₖ (suc (suc n)) i j = ∣ (rCancel (merid north) i j) ∣
ΩKn+1→Kn-refl : (n : ℕ) → ΩKn+1→Kn n refl ≡ 0ₖ n
ΩKn+1→Kn-refl zero = refl
ΩKn+1→Kn-refl (suc zero) = refl
ΩKn+1→Kn-refl (suc (suc zero)) = refl
ΩKn+1→Kn-refl (suc (suc (suc zero))) = refl
ΩKn+1→Kn-refl (suc (suc (suc (suc zero)))) = refl
ΩKn+1→Kn-refl (suc (suc (suc (suc (suc n))))) = refl
-0ₖ : {n : ℕ} → -[ n ]ₖ (0ₖ n) ≡ (0ₖ n)
-0ₖ {n = n} = (λ i → ΩKn+1→Kn n (sym (Kn→ΩKn+10ₖ n i)))
∙∙ (λ i → ΩKn+1→Kn n (Kn→ΩKn+10ₖ n (~ i)))
∙∙ Iso.leftInv (Iso-Kn-ΩKn+1 n) (0ₖ n)
+ₖ→∙ : (n : ℕ) (a b : coHomK n) → Kn→ΩKn+1 n (a +[ n ]ₖ b) ≡ Kn→ΩKn+1 n a ∙ Kn→ΩKn+1 n b
+ₖ→∙ n a b = Iso.rightInv (Iso-Kn-ΩKn+1 n) (Kn→ΩKn+1 n a ∙ Kn→ΩKn+1 n b)
lUnitₖ : (n : ℕ) (x : coHomK n) → (0ₖ n) +[ n ]ₖ x ≡ x
lUnitₖ 0 x = Iso.leftInv (Iso-Kn-ΩKn+1 zero) x
lUnitₖ (suc zero) = trElim (λ _ → isOfHLevelPath 3 (isOfHLevelTrunc 3) _ _) λ x → Iso.leftInv (Iso-Kn-ΩKn+1 1) ∣ x ∣
lUnitₖ (suc (suc n)) x =
(λ i → ΩKn+1→Kn (2 + n) (Kn→ΩKn+10ₖ (2 + n) i ∙ Kn→ΩKn+1 (2 + n) x)) ∙∙
(cong (ΩKn+1→Kn (2 + n)) (sym (lUnit (Kn→ΩKn+1 (2 + n) x)))) ∙∙
Iso.leftInv (Iso-Kn-ΩKn+1 (2 + n)) x
rUnitₖ : (n : ℕ) (x : coHomK n) → x +[ n ]ₖ (0ₖ n) ≡ x
rUnitₖ 0 x = Iso.leftInv (Iso-Kn-ΩKn+1 zero) x
rUnitₖ (suc zero) = trElim (λ _ → isOfHLevelPath 3 (isOfHLevelTrunc 3) _ _) λ x → Iso.leftInv (Iso-Kn-ΩKn+1 1) ∣ x ∣
rUnitₖ (suc (suc n)) x =
(λ i → ΩKn+1→Kn (2 + n) (Kn→ΩKn+1 (2 + n) x ∙ Kn→ΩKn+10ₖ (2 + n) i))
∙∙ (cong (ΩKn+1→Kn (2 + n)) (sym (rUnit (Kn→ΩKn+1 (2 + n) x))))
∙∙ Iso.leftInv (Iso-Kn-ΩKn+1 (2 + n)) x
rCancelₖ : (n : ℕ) (x : coHomK n) → x +[ n ]ₖ (-[ n ]ₖ x) ≡ (0ₖ n)
rCancelₖ zero x = (λ i → ΩKn+1→Kn 0 (Kn→ΩKn+1 zero x ∙ Iso.rightInv (Iso-Kn-ΩKn+1 zero) (sym (Kn→ΩKn+1 zero x)) i)) ∙
cong (ΩKn+1→Kn 0) (rCancel (Kn→ΩKn+1 zero x))
rCancelₖ (suc n) x = (λ i → ΩKn+1→Kn (suc n) (Kn→ΩKn+1 (1 + n) x ∙ Iso.rightInv (Iso-Kn-ΩKn+1 (1 + n)) (sym (Kn→ΩKn+1 (1 + n) x)) i)) ∙
cong (ΩKn+1→Kn (suc n)) (rCancel (Kn→ΩKn+1 (1 + n) x)) ∙
(λ i → ΩKn+1→Kn (suc n) (Kn→ΩKn+10ₖ (suc n) (~ i))) ∙
Iso.leftInv (Iso-Kn-ΩKn+1 (suc n)) (0ₖ (suc n))
lCancelₖ : (n : ℕ) (x : coHomK n) → (-[ n ]ₖ x) +[ n ]ₖ x ≡ (0ₖ n)
lCancelₖ 0 x = (λ i → ΩKn+1→Kn 0 (Iso.rightInv (Iso-Kn-ΩKn+1 zero) (sym (Kn→ΩKn+1 zero x)) i ∙ Kn→ΩKn+1 zero x)) ∙
cong (ΩKn+1→Kn 0) (lCancel (Kn→ΩKn+1 zero x))
lCancelₖ (suc n) x = (λ i → ΩKn+1→Kn (suc n) (Iso.rightInv (Iso-Kn-ΩKn+1 (1 + n)) (sym (Kn→ΩKn+1 (1 + n) x)) i ∙ Kn→ΩKn+1 (1 + n) x)) ∙
cong (ΩKn+1→Kn (suc n)) (lCancel (Kn→ΩKn+1 (1 + n) x)) ∙
(λ i → (ΩKn+1→Kn (suc n)) (Kn→ΩKn+10ₖ (suc n) (~ i))) ∙
Iso.leftInv (Iso-Kn-ΩKn+1 (suc n)) (0ₖ (suc n))
assocₖ : (n : ℕ) (x y z : coHomK n) → ((x +[ n ]ₖ y) +[ n ]ₖ z) ≡ (x +[ n ]ₖ (y +[ n ]ₖ z))
assocₖ n x y z = ((λ i → ΩKn+1→Kn n (Kn→ΩKn+1 n (ΩKn+1→Kn n (Kn→ΩKn+1 n x ∙ Kn→ΩKn+1 n y)) ∙ Kn→ΩKn+1 n z)) ∙∙
(λ i → ΩKn+1→Kn n (Iso.rightInv (Iso-Kn-ΩKn+1 n) (Kn→ΩKn+1 n x ∙ Kn→ΩKn+1 n y) i ∙ Kn→ΩKn+1 n z)) ∙∙
(λ i → ΩKn+1→Kn n (assoc (Kn→ΩKn+1 n x) (Kn→ΩKn+1 n y) (Kn→ΩKn+1 n z) (~ i)))) ∙
(λ i → ΩKn+1→Kn n ((Kn→ΩKn+1 n x) ∙ Iso.rightInv (Iso-Kn-ΩKn+1 n) ((Kn→ΩKn+1 n y ∙ Kn→ΩKn+1 n z)) (~ i)))
cancelₖ : (n : ℕ) (x : coHomK n) → x -[ n ]ₖ x ≡ (0ₖ n)
cancelₖ zero x = cong (ΩKn+1→Kn 0) (rCancel (Kn→ΩKn+1 zero x))
cancelₖ (suc zero) x = cong (ΩKn+1→Kn 1) (rCancel (Kn→ΩKn+1 1 x))
cancelₖ (suc (suc zero)) x = cong (ΩKn+1→Kn 2) (rCancel (Kn→ΩKn+1 2 x))
cancelₖ (suc (suc (suc zero))) x = cong (ΩKn+1→Kn 3) (rCancel (Kn→ΩKn+1 3 x))
cancelₖ (suc (suc (suc (suc zero)))) x = cong (ΩKn+1→Kn 4) (rCancel (Kn→ΩKn+1 4 x))
cancelₖ (suc (suc (suc (suc (suc n))))) x = cong (ΩKn+1→Kn (5 + n)) (rCancel (Kn→ΩKn+1 (5 + n) x))
-rUnitₖ : (n : ℕ) (x : coHomK n) → x -[ n ]ₖ 0ₖ n ≡ x
-rUnitₖ zero x = rUnitₖ zero x
-rUnitₖ (suc n) x = cong (λ y → ΩKn+1→Kn (suc n) (Kn→ΩKn+1 (suc n) x ∙ sym y)) (Kn→ΩKn+10ₖ (suc n))
∙∙ cong (ΩKn+1→Kn (suc n)) (sym (rUnit (Kn→ΩKn+1 (suc n) x)))
∙∙ Iso.leftInv (Iso-Kn-ΩKn+1 (suc n)) x
isComm∙ : ∀ {ℓ} (A : Pointed ℓ) → Type ℓ
isComm∙ A = (p q : typ (Ω A)) → p ∙ q ≡ q ∙ p
abstract
isCommA→isCommTrunc : ∀ {ℓ} {A : Pointed ℓ} (n : ℕ) → isComm∙ A → isOfHLevel (suc n) (typ A) → isComm∙ (∥ typ A ∥ (suc n) , ∣ pt A ∣)
isCommA→isCommTrunc {A = (A , a)} n comm hlev p q =
((λ i j → (Iso.leftInv (truncIdempotentIso (suc n) hlev) ((p ∙ q) j) (~ i)))
∙∙ (λ i → cong {B = λ _ → ∥ A ∥ (suc n) } (λ x → ∣ x ∣) (cong (trRec hlev (λ x → x)) (p ∙ q)))
∙∙ (λ i → cong {B = λ _ → ∥ A ∥ (suc n) } (λ x → ∣ x ∣) (congFunct {A = ∥ A ∥ (suc n)} {B = A} (trRec hlev (λ x → x)) p q i)))
∙ ((λ i → cong {B = λ _ → ∥ A ∥ (suc n) } (λ x → ∣ x ∣) (comm (cong (trRec hlev (λ x → x)) p) (cong (trRec hlev (λ x → x)) q) i))
∙∙ (λ i → cong {B = λ _ → ∥ A ∥ (suc n) } (λ x → ∣ x ∣) (congFunct {A = ∥ A ∥ (suc n)} {B = A} (trRec hlev (λ x → x)) q p (~ i)))
∙∙ (λ i j → (Iso.leftInv (truncIdempotentIso (suc n) hlev) ((q ∙ p) j) i)))
isCommΩK1 : (n : ℕ) → isComm∙ ((Ω^ n) (coHomK-ptd 1))
isCommΩK1 zero = isCommA→isCommTrunc 2 comm-ΩS¹ isGroupoidS¹
isCommΩK1 (suc n) = Eckmann-Hilton n
open Iso renaming (inv to inv')
ptdIso→comm : ∀ {ℓ ℓ'} {A : Pointed ℓ} {B : Type ℓ'} (e : Iso (typ A) B) → isComm∙ A → isComm∙ (B , Iso.fun e (pt A))
ptdIso→comm {A = (A , a)} {B = B} e comm p q =
sym (rightInv (congIso e) (p ∙ q))
∙∙ (cong (fun (congIso e)) ((invCongFunct e p q)
∙∙ (comm (inv' (congIso e) p) (inv' (congIso e) q))
∙∙ (sym (invCongFunct e q p))))
∙∙ rightInv (congIso e) (q ∙ p)
isCommΩK : (n : ℕ) → isComm∙ (coHomK-ptd n)
isCommΩK zero p q = isSetInt _ _ (p ∙ q) (q ∙ p)
isCommΩK (suc zero) = isCommA→isCommTrunc 2 comm-ΩS¹ isGroupoidS¹
isCommΩK (suc (suc n)) = subst isComm∙ (λ i → coHomK (2 + n) , ΩKn+1→Kn-refl (2 + n) i) (ptdIso→comm {A = (_ , _)} (invIso (Iso-Kn-ΩKn+1 (2 + n))) (Eckmann-Hilton 0))
commₖ : (n : ℕ) (x y : coHomK n) → (x +[ n ]ₖ y) ≡ (y +[ n ]ₖ x)
commₖ 0 x y i = ΩKn+1→Kn 0 (isCommΩK1 0 (Kn→ΩKn+1 0 x) (Kn→ΩKn+1 0 y) i)
commₖ 1 x y i = ΩKn+1→Kn 1 (ptdIso→comm {A = ((∣ north ∣ ≡ ∣ north ∣) , snd ((Ω^ 1) (coHomK 3 , ∣ north ∣)))}
{B = coHomK 2}
(invIso (Iso-Kn-ΩKn+1 2)) (Eckmann-Hilton 0) (Kn→ΩKn+1 1 x) (Kn→ΩKn+1 1 y) i)
commₖ 2 x y i = ΩKn+1→Kn 2 (ptdIso→comm {A = (∣ north ∣ ≡ ∣ north ∣) , snd ((Ω^ 1) (coHomK 4 , ∣ north ∣))}
{B = coHomK 3}
(invIso (Iso-Kn-ΩKn+1 3)) (Eckmann-Hilton 0) (Kn→ΩKn+1 2 x) (Kn→ΩKn+1 2 y) i)
commₖ 3 x y i = ΩKn+1→Kn 3 (ptdIso→comm {A = (∣ north ∣ ≡ ∣ north ∣) , snd ((Ω^ 1) (coHomK 5 , ∣ north ∣))}
{B = coHomK 4}
(invIso (Iso-Kn-ΩKn+1 4)) (Eckmann-Hilton 0) (Kn→ΩKn+1 3 x) (Kn→ΩKn+1 3 y) i)
commₖ (suc (suc (suc (suc n)))) x y i =
ΩKn+1→Kn (4 + n) (ptdIso→comm {A = (∣ north ∣ ≡ ∣ north ∣) , snd ((Ω^ 1) (coHomK (6 + n) , ∣ north ∣))}
{B = coHomK (5 + n)}
(invIso (Iso-Kn-ΩKn+1 (5 + n))) (Eckmann-Hilton 0) (Kn→ΩKn+1 (4 + n) x) (Kn→ΩKn+1 (4 + n) y) i)
rUnitₖ' : (n : ℕ) (x : coHomK n) → x +[ n ]ₖ (0ₖ n) ≡ x
rUnitₖ' n x = commₖ n x (0ₖ n) ∙ lUnitₖ n x
-distrₖ : (n : ℕ) (x y : coHomK n) → -[ n ]ₖ (x +[ n ]ₖ y) ≡ (-[ n ]ₖ x) +[ n ]ₖ (-[ n ]ₖ y)
-distrₖ n x y = ((λ i → ΩKn+1→Kn n (sym (Kn→ΩKn+1 n (ΩKn+1→Kn n (Kn→ΩKn+1 n x ∙ Kn→ΩKn+1 n y))))) ∙∙
(λ i → ΩKn+1→Kn n (sym (Iso.rightInv (Iso-Kn-ΩKn+1 n) (Kn→ΩKn+1 n x ∙ Kn→ΩKn+1 n y) i))) ∙∙
(λ i → ΩKn+1→Kn n (symDistr (Kn→ΩKn+1 n x) (Kn→ΩKn+1 n y) i))) ∙∙
(λ i → ΩKn+1→Kn n (Iso.rightInv (Iso-Kn-ΩKn+1 n) (sym (Kn→ΩKn+1 n y)) (~ i) ∙ (Iso.rightInv (Iso-Kn-ΩKn+1 n) (sym (Kn→ΩKn+1 n x)) (~ i)))) ∙∙
commₖ n (-[ n ]ₖ y) (-[ n ]ₖ x)
private
rCancelLem : (n : ℕ) (x : coHomK n) → ΩKn+1→Kn n ((Kn→ΩKn+1 n x) ∙ refl) ≡ ΩKn+1→Kn n (Kn→ΩKn+1 n x)
rCancelLem zero x = refl
rCancelLem (suc n) x = cong (ΩKn+1→Kn (suc n)) (sym (rUnit (Kn→ΩKn+1 (suc n) x)))
lCancelLem : (n : ℕ) (x : coHomK n) → ΩKn+1→Kn n (refl ∙ (Kn→ΩKn+1 n x)) ≡ ΩKn+1→Kn n (Kn→ΩKn+1 n x)
lCancelLem zero x = refl
lCancelLem (suc n) x = cong (ΩKn+1→Kn (suc n)) (sym (lUnit (Kn→ΩKn+1 (suc n) x)))
-cancelRₖ : (n : ℕ) (x y : coHomK n) → (y +[ n ]ₖ x) -[ n ]ₖ x ≡ y
-cancelRₖ n x y = (cong (ΩKn+1→Kn n) ((cong (_∙ sym (Kn→ΩKn+1 n x)) (+ₖ→∙ n y x))
∙∙ sym (assoc _ _ _)
∙∙ cong (Kn→ΩKn+1 n y ∙_) (rCancel _)))
∙∙ rCancelLem n y
∙∙ Iso.leftInv (Iso-Kn-ΩKn+1 n) y
-cancelLₖ : (n : ℕ) (x y : coHomK n) → (x +[ n ]ₖ y) -[ n ]ₖ x ≡ y
-cancelLₖ n x y = cong (λ z → z -[ n ]ₖ x) (commₖ n x y) ∙ -cancelRₖ n x y
-+cancelₖ : (n : ℕ) (x y : coHomK n) → (x -[ n ]ₖ y) +[ n ]ₖ y ≡ x
-+cancelₖ n x y = (cong (ΩKn+1→Kn n) ((cong (_∙ (Kn→ΩKn+1 n y)) (Iso.rightInv (Iso-Kn-ΩKn+1 n) (Kn→ΩKn+1 n x ∙ sym (Kn→ΩKn+1 n y))))
∙∙ sym (assoc _ _ _)
∙∙ cong (Kn→ΩKn+1 n x ∙_) (lCancel _)))
∙∙ rCancelLem n x
∙∙ Iso.leftInv (Iso-Kn-ΩKn+1 n) x
---- Group structure of cohomology groups ---
_+ₕ_ : {n : ℕ} → coHom n A → coHom n A → coHom n A
_+ₕ_ {n = n} = sRec2 § λ a b → ∣ (λ x → a x +[ n ]ₖ b x) ∣₂
-ₕ_ : {n : ℕ} → coHom n A → coHom n A
-ₕ_ {n = n} = sRec § λ a → ∣ (λ x → -[ n ]ₖ a x) ∣₂
_-ₕ_ : {n : ℕ} → coHom n A → coHom n A → coHom n A
_-ₕ_ {n = n} = sRec2 § λ a b → ∣ (λ x → a x -[ n ]ₖ b x) ∣₂
+ₕ-syntax : (n : ℕ) → coHom n A → coHom n A → coHom n A
+ₕ-syntax n = _+ₕ_ {n = n}
-ₕ-syntax : (n : ℕ) → coHom n A → coHom n A
-ₕ-syntax n = -ₕ_ {n = n}
-ₕ'-syntax : (n : ℕ) → coHom n A → coHom n A → coHom n A
-ₕ'-syntax n = _-ₕ_ {n = n}
syntax +ₕ-syntax n x y = x +[ n ]ₕ y
syntax -ₕ-syntax n x = -[ n ]ₕ x
syntax -ₕ'-syntax n x y = x -[ n ]ₕ y
0ₕ : (n : ℕ) → coHom n A
0ₕ n = ∣ (λ _ → (0ₖ n)) ∣₂
rUnitₕ : (n : ℕ) (x : coHom n A) → x +[ n ]ₕ (0ₕ n) ≡ x
rUnitₕ n = sElim (λ _ → isOfHLevelPath 1 (§ _ _))
λ a i → ∣ funExt (λ x → rUnitₖ n (a x)) i ∣₂
lUnitₕ : (n : ℕ) (x : coHom n A) → (0ₕ n) +[ n ]ₕ x ≡ x
lUnitₕ n = sElim (λ _ → isOfHLevelPath 1 (§ _ _))
λ a i → ∣ funExt (λ x → lUnitₖ n (a x)) i ∣₂
rCancelₕ : (n : ℕ) (x : coHom n A) → x +[ n ]ₕ (-[ n ]ₕ x) ≡ 0ₕ n
rCancelₕ n = sElim (λ _ → isOfHLevelPath 1 (§ _ _))
λ a i → ∣ funExt (λ x → rCancelₖ n (a x)) i ∣₂
lCancelₕ : (n : ℕ) (x : coHom n A) → (-[ n ]ₕ x) +[ n ]ₕ x ≡ 0ₕ n
lCancelₕ n = sElim (λ _ → isOfHLevelPath 1 (§ _ _))
λ a i → ∣ funExt (λ x → lCancelₖ n (a x)) i ∣₂
assocₕ : (n : ℕ) (x y z : coHom n A) → ((x +[ n ]ₕ y) +[ n ]ₕ z) ≡ (x +[ n ]ₕ (y +[ n ]ₕ z))
assocₕ n = elim3 (λ _ _ _ → isOfHLevelPath 1 (§ _ _))
λ a b c i → ∣ funExt (λ x → assocₖ n (a x) (b x) (c x)) i ∣₂
commₕ : (n : ℕ) (x y : coHom n A) → (x +[ n ]ₕ y) ≡ (y +[ n ]ₕ x)
commₕ n = sElim2 (λ _ _ → isOfHLevelPath 1 (§ _ _))
λ a b i → ∣ funExt (λ x → commₖ n (a x) (b x)) i ∣₂
cancelₕ : (n : ℕ) (x : coHom n A) → x -[ n ]ₕ x ≡ 0ₕ n
cancelₕ n = sElim (λ _ → isOfHLevelPath 1 (§ _ _))
λ a i → ∣ funExt (λ x → cancelₖ n (a x)) i ∣₂
-ₖ-ₖ : (n : ℕ) (x : coHomK n) → (-[ n ]ₖ (-[ n ]ₖ x)) ≡ x
-ₖ-ₖ n x = cong ((ΩKn+1→Kn n) ∘ sym) (Iso.rightInv (Iso-Kn-ΩKn+1 n) (sym (Kn→ΩKn+1 n x))) ∙ Iso.leftInv (Iso-Kn-ΩKn+1 n) x
-- Proof that rUnitₖ and lUnitₖ agree on 0ₖ. Needed for Mayer-Vietoris.
private
rUnitlUnitGen : ∀ {ℓ ℓ'} {A : Type ℓ} {B : Type ℓ'} {b : B} (e : Iso A (b ≡ b))
(0A : A)
(0fun : fun e 0A ≡ refl)
→ Path (inv' e (fun e 0A ∙ fun e 0A) ≡ 0A)
(cong (inv' e) (cong (_∙ fun e 0A) 0fun) ∙∙ cong (inv' e) (sym (lUnit (fun e 0A))) ∙∙ Iso.leftInv e 0A)
(cong (inv' e) (cong (fun e 0A ∙_) 0fun) ∙∙ cong (inv' e) (sym (rUnit (fun e 0A))) ∙∙ Iso.leftInv e 0A)
rUnitlUnitGen e 0A 0fun =
(λ i → cong (inv' e) (cong (_∙ fun e 0A) 0fun) ∙∙ rUnit (cong (inv' e) (sym (lUnit (fun e 0A)))) i ∙∙ Iso.leftInv e 0A)
∙ ((λ i → (λ j → inv' e (0fun (~ i ∧ j) ∙ 0fun (j ∧ i)))
∙∙ ((λ j → inv' e (0fun (~ i ∨ j) ∙ 0fun i))
∙∙ cong (inv' e) (sym (lUnit (0fun i)))
∙∙ λ j → inv' e (0fun (i ∧ (~ j))))
∙∙ Iso.leftInv e 0A)
∙∙ (λ i → (λ j → inv' e (fun e 0A ∙ 0fun j))
∙∙ (λ j → inv' e (0fun (j ∧ ~ i) ∙ refl))
∙∙ cong (inv' e) (sym (rUnit (0fun (~ i))))
∙∙ (λ j → inv' e (0fun (~ i ∧ ~ j)))
∙∙ Iso.leftInv e 0A)
∙∙ λ i → cong (inv' e) (cong (fun e 0A ∙_) 0fun)
∙∙ rUnit (cong (inv' e) (sym (rUnit (fun e 0A)))) (~ i)
∙∙ Iso.leftInv e 0A)
rUnitlUnit0 : (n : ℕ) → rUnitₖ n (0ₖ n) ≡ lUnitₖ n (0ₖ n)
rUnitlUnit0 0 = refl
rUnitlUnit0 (suc zero) = refl
rUnitlUnit0 (suc (suc n)) = sym (rUnitlUnitGen (Iso-Kn-ΩKn+1 (2 + n)) (0ₖ (2 + n)) (Kn→ΩKn+10ₖ (2 + n)))
-cancelLₕ : (n : ℕ) (x y : coHom n A) → (x +[ n ]ₕ y) -[ n ]ₕ x ≡ y
-cancelLₕ n = sElim2 (λ _ _ → isOfHLevelPath 1 (§ _ _))
λ a b i → ∣ (λ x → -cancelLₖ n (a x) (b x) i) ∣₂
-cancelRₕ : (n : ℕ) (x y : coHom n A) → (y +[ n ]ₕ x) -[ n ]ₕ x ≡ y
-cancelRₕ n = sElim2 (λ _ _ → isOfHLevelPath 1 (§ _ _))
λ a b i → ∣ (λ x → -cancelRₖ n (a x) (b x) i) ∣₂
-+cancelₕ : (n : ℕ) (x y : coHom n A) → (x -[ n ]ₕ y) +[ n ]ₕ y ≡ x
-+cancelₕ n = sElim2 (λ _ _ → isOfHLevelPath 1 (§ _ _))
λ a b i → ∣ (λ x → -+cancelₖ n (a x) (b x) i) ∣₂
-- Group structure of reduced cohomology groups (in progress - might need K to compute properly first) ---
+ₕ∙ : {A : Pointed ℓ} (n : ℕ) → coHomRed n A → coHomRed n A → coHomRed n A
+ₕ∙ zero = sRec2 § λ { (a , pa) (b , pb) → ∣ (λ x → a x +[ zero ]ₖ b x) , (λ i → (pa i +[ zero ]ₖ pb i)) ∣₂ }
+ₕ∙ (suc zero) = sRec2 § λ { (a , pa) (b , pb) → ∣ (λ x → a x +[ 1 ]ₖ b x) , (λ i → pa i +[ 1 ]ₖ pb i) ∙ lUnitₖ 1 (0ₖ 1) ∣₂ }
+ₕ∙ (suc (suc n)) = sRec2 § λ { (a , pa) (b , pb) → ∣ (λ x → a x +[ (2 + n) ]ₖ b x) , (λ i → pa i +[ (2 + n) ]ₖ pb i) ∙ lUnitₖ (2 + n) (0ₖ (2 + n)) ∣₂ }
open IsSemigroup
open IsMonoid
open GroupStr
open GroupHom
coHomGr : ∀ {ℓ} (n : ℕ) (A : Type ℓ) → Group {ℓ}
coHomGr n A = coHom n A , coHomGrnA
where
coHomGrnA : GroupStr (coHom n A)
0g coHomGrnA = 0ₕ n
GroupStr._+_ coHomGrnA = λ x y → x +[ n ]ₕ y
- coHomGrnA = λ x → -[ n ]ₕ x
isGroup coHomGrnA = helper
where
abstract
helper : IsGroup (0ₕ n) (λ x y → x +[ n ]ₕ y) (λ x → -[ n ]ₕ x)
helper = makeIsGroup § (λ x y z → sym (assocₕ n x y z)) (rUnitₕ n) (lUnitₕ n) (rCancelₕ n) (lCancelₕ n)
×coHomGr : (n : ℕ) (A : Type ℓ) (B : Type ℓ') → Group
×coHomGr n A B = dirProd (coHomGr n A) (coHomGr n B)
coHomFun : ∀ {ℓ ℓ'} {A : Type ℓ} {B : Type ℓ'} (n : ℕ) (f : A → B) → coHom n B → coHom n A
coHomFun n f = sRec § λ β → ∣ β ∘ f ∣₂
-distrLemma : ∀ {ℓ ℓ'} {A : Type ℓ} {B : Type ℓ'} (n m : ℕ) (f : GroupHom (coHomGr n A) (coHomGr m B))
(x y : coHom n A)
→ fun f (x -[ n ]ₕ y) ≡ fun f x -[ m ]ₕ fun f y
-distrLemma n m f' x y = sym (-cancelRₕ m (f y) (f (x -[ n ]ₕ y)))
∙∙ cong (λ x → x -[ m ]ₕ f y) (sym (isHom f' (x -[ n ]ₕ y) y))
∙∙ cong (λ x → x -[ m ]ₕ f y) ( cong f (-+cancelₕ n _ _))
where
f = fun f'
--- the loopspace of Kₙ is commutative regardless of base
addIso : (n : ℕ) (x : coHomK n) → Iso (coHomK n) (coHomK n)
fun (addIso n x) y = y +[ n ]ₖ x
inv' (addIso n x) y = y -[ n ]ₖ x
rightInv (addIso n x) y = -+cancelₖ n y x
leftInv (addIso n x) y = -cancelRₖ n x y
isCommΩK-based : (n : ℕ) (x : coHomK n) → isComm∙ (coHomK n , x)
isCommΩK-based zero x p q = isSetInt _ _ (p ∙ q) (q ∙ p)
isCommΩK-based (suc zero) x =
subst isComm∙ (λ i → coHomK 1 , lUnitₖ 1 x i)
(ptdIso→comm {A = (_ , 0ₖ 1)} (addIso 1 x)
(isCommΩK 1))
isCommΩK-based (suc (suc n)) x =
subst isComm∙ (λ i → coHomK (suc (suc n)) , lUnitₖ (suc (suc n)) x i)
(ptdIso→comm {A = (_ , 0ₖ (suc (suc n)))} (addIso (suc (suc n)) x)
(isCommΩK (suc (suc n))))
addLemma : (a b : Int) → a +[ 0 ]ₖ b ≡ (a ℤ+ b)
addLemma a b = (cong (ΩKn+1→Kn 0) (sym (congFunct ∣_∣ (intLoop a) (intLoop b))))
∙∙ (λ i → ΩKn+1→Kn 0 (cong ∣_∣ (intLoop-hom a b i)))
∙∙ Iso.leftInv (Iso-Kn-ΩKn+1 0) (a ℤ+ b)
---
-- hidden versions of cohom stuff using the "lock" hack. The locked versions can be used when proving things.
-- Swapping "key" for "tt*" will then give computing functions.
Unit' : Type₀
Unit' = lockUnit {ℓ-zero}
lock : ∀ {ℓ} {A : Type ℓ} → Unit' → A → A
lock unlock = λ x → x
module lockedCohom (key : Unit') where
+K : (n : ℕ) → coHomK n → coHomK n → coHomK n
+K n = lock key (_+ₖ_ {n = n})
-K : (n : ℕ) → coHomK n → coHomK n
-K n = lock key (-ₖ_ {n = n})
-Kbin : (n : ℕ) → coHomK n → coHomK n → coHomK n
-Kbin n = lock key (_-ₖ_ {n = n})
rUnitK : (n : ℕ) (x : coHomK n) → +K n x (0ₖ n) ≡ x
rUnitK n x = pm key
where
pm : (t : Unit') → lock t (_+ₖ_ {n = n}) x (0ₖ n) ≡ x
pm unlock = rUnitₖ n x
lUnitK : (n : ℕ) (x : coHomK n) → +K n (0ₖ n) x ≡ x
lUnitK n x = pm key
where
pm : (t : Unit') → lock t (_+ₖ_ {n = n}) (0ₖ n) x ≡ x
pm unlock = lUnitₖ n x
rCancelK : (n : ℕ) (x : coHomK n) → +K n x (-K n x) ≡ 0ₖ n
rCancelK n x = pm key
where
pm : (t : Unit') → lock t (_+ₖ_ {n = n}) x (lock t (-ₖ_ {n = n}) x) ≡ 0ₖ n
pm unlock = rCancelₖ n x
lCancelK : (n : ℕ) (x : coHomK n) → +K n (-K n x) x ≡ 0ₖ n
lCancelK n x = pm key
where
pm : (t : Unit') → lock t (_+ₖ_ {n = n}) (lock t (-ₖ_ {n = n}) x) x ≡ 0ₖ n
pm unlock = lCancelₖ n x
-cancelRK : (n : ℕ) (x y : coHomK n) → -Kbin n (+K n y x) x ≡ y
-cancelRK n x y = pm key
where
pm : (t : Unit') → lock t (_-ₖ_ {n = n}) (lock t (_+ₖ_ {n = n}) y x) x ≡ y
pm unlock = -cancelRₖ n x y
-cancelLK : (n : ℕ) (x y : coHomK n) → -Kbin n (+K n x y) x ≡ y
-cancelLK n x y = pm key
where
pm : (t : Unit') → lock t (_-ₖ_ {n = n}) (lock t (_+ₖ_ {n = n}) x y) x ≡ y
pm unlock = -cancelLₖ n x y
-+cancelK : (n : ℕ) (x y : coHomK n) → +K n (-Kbin n x y) y ≡ x
-+cancelK n x y = pm key
where
pm : (t : Unit') → lock t (_+ₖ_ {n = n}) (lock t (_-ₖ_ {n = n}) x y) y ≡ x
pm unlock = -+cancelₖ n x y
cancelK : (n : ℕ) (x : coHomK n) → -Kbin n x x ≡ 0ₖ n
cancelK n x = pm key
where
pm : (t : Unit') → (lock t (_-ₖ_ {n = n}) x x) ≡ 0ₖ n
pm unlock = cancelₖ n x
assocK : (n : ℕ) (x y z : coHomK n) → +K n (+K n x y) z ≡ +K n x (+K n y z)
assocK n x y z = pm key
where
pm : (t : Unit') → lock t (_+ₖ_ {n = n}) (lock t (_+ₖ_ {n = n}) x y) z
≡ lock t (_+ₖ_ {n = n}) x (lock t (_+ₖ_ {n = n}) y z)
pm unlock = assocₖ n x y z
commK : (n : ℕ) (x y : coHomK n) → +K n x y ≡ +K n y x
commK n x y = pm key
where
pm : (t : Unit') → lock t (_+ₖ_ {n = n}) x y ≡ lock t (_+ₖ_ {n = n}) y x
pm unlock = commₖ n x y
-- cohom
+H : (n : ℕ) (x y : coHom n A) → coHom n A
+H n = sRec2 § λ a b → ∣ (λ x → +K n (a x) (b x)) ∣₂
-H : (n : ℕ) (x : coHom n A) → coHom n A
-H n = sRec § λ a → ∣ (λ x → -K n (a x)) ∣₂
-Hbin : (n : ℕ) → coHom n A → coHom n A → coHom n A
-Hbin n = sRec2 § λ a b → ∣ (λ x → -Kbin n (a x) (b x)) ∣₂
rUnitH : (n : ℕ) (x : coHom n A) → +H n x (0ₕ n) ≡ x
rUnitH n = sElim (λ _ → isOfHLevelPath 1 (§ _ _))
λ a i → ∣ funExt (λ x → rUnitK n (a x)) i ∣₂
lUnitH : (n : ℕ) (x : coHom n A) → +H n (0ₕ n) x ≡ x
lUnitH n = sElim (λ _ → isOfHLevelPath 1 (§ _ _))
λ a i → ∣ funExt (λ x → lUnitK n (a x)) i ∣₂
rCancelH : (n : ℕ) (x : coHom n A) → +H n x (-H n x) ≡ 0ₕ n
rCancelH n = sElim (λ _ → isOfHLevelPath 1 (§ _ _))
λ a i → ∣ funExt (λ x → rCancelK n (a x)) i ∣₂
lCancelH : (n : ℕ) (x : coHom n A) → +H n (-H n x) x ≡ 0ₕ n
lCancelH n = sElim (λ _ → isOfHLevelPath 1 (§ _ _))
λ a i → ∣ funExt (λ x → lCancelK n (a x)) i ∣₂
assocH : (n : ℕ) (x y z : coHom n A) → (+H n (+H n x y) z) ≡ (+H n x (+H n y z))
assocH n = elim3 (λ _ _ _ → isOfHLevelPath 1 (§ _ _))
λ a b c i → ∣ funExt (λ x → assocK n (a x) (b x) (c x)) i ∣₂
commH : (n : ℕ) (x y : coHom n A) → (+H n x y) ≡ (+H n y x)
commH n = sElim2 (λ _ _ → isOfHLevelPath 1 (§ _ _))
λ a b i → ∣ funExt (λ x → commK n (a x) (b x)) i ∣₂
-cancelRH : (n : ℕ) (x y : coHom n A) → -Hbin n (+H n y x) x ≡ y
-cancelRH n = sElim2 (λ _ _ → isOfHLevelPath 1 (§ _ _))
λ a b i → ∣ (λ x → -cancelRK n (a x) (b x) i) ∣₂
-cancelLH : (n : ℕ) (x y : coHom n A) → -Hbin n (+H n x y) x ≡ y
-cancelLH n = sElim2 (λ _ _ → isOfHLevelPath 1 (§ _ _))
λ a b i → ∣ (λ x → -cancelLK n (a x) (b x) i) ∣₂
-+cancelH : (n : ℕ) (x y : coHom n A) → +H n (-Hbin n x y) y ≡ x
-+cancelH n = sElim2 (λ _ _ → isOfHLevelPath 1 (§ _ _))
λ a b i → ∣ (λ x → -+cancelK n (a x) (b x) i) ∣₂
+K→∙ : (key : Unit') (n : ℕ) (a b : coHomK n) → Kn→ΩKn+1 n (lockedCohom.+K key n a b) ≡ Kn→ΩKn+1 n a ∙ Kn→ΩKn+1 n b
+K→∙ unlock = +ₖ→∙
+H≡+ₕ : (key : Unit') (n : ℕ) → lockedCohom.+H key {A = A} n ≡ _+ₕ_ {n = n}
+H≡+ₕ unlock _ = refl
rUnitlUnit0K : (key : Unit') (n : ℕ) → lockedCohom.rUnitK key n (0ₖ n) ≡ lockedCohom.lUnitK key n (0ₖ n)
rUnitlUnit0K unlock = rUnitlUnit0
|
= = = Breeding = = =
|
\section*{Terms and Abbreviations}
\addcontentsline{toc}{section}{Terms and Abbreviations}
\textit{No new terms are utilized in this document.}
|
```python
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import itertools
from collections import defaultdict
import copy
from image_matrix_helper import compute_master_list, imshow_list, rgb_map
import random
import time
nb_start = time.time()
```
## Hints at Non-Equilibrium Behavior
In this notebook, we again simulate the systems presented in `search_and_combinatorics.ipynb`, but we do so in order to see how the number of correctly placed particles evolves towards its equilibrium value over the course of the simulation.
We will use much of the code from that section so we copy it here without additional explanation
#### Parameter function definitions
```python
# helper function definitions
gamma_func = lambda E0, Ev, T: 4*np.sqrt(2)*np.exp(E0/T)*(Ev/T)**(3/2)
delta_func = lambda Del, T: np.exp(Del/T)
phi_func = lambda x, z, gamma, delta: x*(1+ 1/(z*gamma))/(1-delta)
```
#### Microstate transitions
```python
## dissociation operator
def trans_dissoc(free_objs, bound_objs):
# indices of non-empty
indxs = [i for i, x in enumerate(bound_objs) if x != "-"]
# random choice for bound object
random_indx = random.choice(indxs)
## new state vector
free_objs_new = copy.deepcopy(free_objs)
bound_objs_new = copy.deepcopy(bound_objs)
# putting empty slot
bound_objs_new[random_indx] = '-'
# appending previously bound object to free objects
free_objs_new.append(bound_objs[random_indx])
return free_objs_new, bound_objs_new
## association operator
def trans_assoc(free_objs, bound_objs):
# random element to associate
elem = random.choice(free_objs)
# indices of empty spaces
indxs = [i for i, x in enumerate(bound_objs) if x == "-"]
# random choice for empty space
random_indx = random.choice(indxs)
## new state vector
free_objs_new = copy.deepcopy(free_objs)
bound_objs_new = copy.deepcopy(bound_objs)
## state
free_objs_new.remove(elem)
bound_objs_new[random_indx] = elem
return free_objs_new, bound_objs_new
## permutation operator
def trans_perm(free_objs, bound_objs):
Ncomp = len(bound_objs)
i1 = int(random.choice(range(Ncomp)))
i2 = int(random.choice(range(Ncomp)))
## new omega vector
bound_objs_new = copy.deepcopy(bound_objs)
bound_objs_new[i2] = bound_objs[i1]
bound_objs_new[i1] = bound_objs[i2]
return free_objs, bound_objs_new
```
#### Logarithm of Botlzmann factor
The logarithm of the Botlzmann factor for a microstate (i.e., the temperature normalized negative energy of the microstate) is defined as
\begin{equation}
\beta E(\boldsymbol{k}, \boldsymbol{m}) = \sum_{i=1}^R(m_i \ln \delta_i + k_i \ln \gamma_i).
\label{eq:sim_en}
\end{equation}
```python
def log_boltz(free_objs, bound_objs, mstr_vec, deltas, gammas, name_key):
elem_set = list(set(mstr_vec))
count_dict = dict()
for elem in elem_set:
count_dict[elem] = bound_objs.count(elem)
bind_log_factor = 0
for elem in elem_set:
key = name_key[elem]
bind_log_factor += count_dict[elem]*np.log(gammas[key])
corr_log_factor = 0
for j in range(len(bound_objs)):
if bound_objs[j] == mstr_vec[j]:
elem = bound_objs[j]
key = name_key[elem]
corr_log_factor+=np.log(deltas[key])
return bind_log_factor+corr_log_factor
```
#### Function to count the number of correctly bound particles
```python
def m_calc(bound_objs, mstr_vec):
num = 0
for k in range(len(mstr_vec)):
if mstr_vec[k] == bound_objs[k]:
num += 1
return num
```
#### Metropolis Hastings algorithm
```python
### Metropolis Monte Carlo Algorithm
## loads uniform random sampling
runif = np.random.rand
def met_assembly_grid(Niter, free_objs, bound_objs, mstr_vec, deltas, gammas, name_key, only_physical_trans = False):
'''
#################################################################
# function to sample using Metropolis
#
# n_iter: number of iterations
# initial_state: initial state for the start position for our chain
# gamma: energy cost for incorrect component
# temp: temperature
##################################################################
'''
# Initialize state values
free_objs_vals = [0]*(Niter+1)
bound_objs_vals = [0]*(Niter+1)
# Set initial values
free_objs_vals[0] = free_objs[:]
bound_objs_vals[0] = bound_objs[:]
# Initialize acceptance counts
# We can use this to tune our number of steps
accepted = 0
# debugging code
debug_assoc, debug_dissoc, debug_perm = 0, 0, 0
for i in range(Niter):
# get current monomer and dimer states
current_free_objs = copy.deepcopy(free_objs_vals[i])
current_bound_objs = copy.deepcopy(bound_objs_vals[i])
N_free = len(current_free_objs)
N_bound = len(current_bound_objs)-len(current_free_objs)
u_trans = runif()
# only allow for physical transitions
if only_physical_trans:
u1, u2 = 1/2, 1
# includes permutation transition
else:
u1, u2 = 1/3, 2/3
if u_trans < u1: #first type of transition; monomer association
if N_free < 1:
log_alpha = np.log(1e-15)
else:
# proposed new monomer and dimer states
new_free_objs, new_bound_objs = trans_assoc(current_free_objs, current_bound_objs)
# transition elements
log_init = log_boltz(current_free_objs, current_bound_objs, mstr_vec, deltas, gammas, name_key)
log_final = log_boltz(new_free_objs, new_bound_objs, mstr_vec, deltas, gammas, name_key)
# weight
num = N_free*N_free
den = N_bound+1
# Log-acceptance rate
log_alpha = log_final-log_init+np.log(num/den)
elif u1 <= u_trans < u2: #second type of transition; bound monomer dissociation
if N_bound <1:
log_alpha = np.log(1e-15)
else:
# proposed new monomer and dimer states
new_free_objs, new_bound_objs = trans_dissoc(current_free_objs, current_bound_objs)
# transition elements
log_init = log_boltz(current_free_objs, current_bound_objs, mstr_vec, deltas, gammas, name_key)
log_final = log_boltz(new_free_objs, new_bound_objs, mstr_vec, deltas, gammas, name_key)
# weight
num = N_bound
den = (N_free+1)*(N_free+1)
# Log-acceptance rate
log_alpha = log_final-log_init+np.log(num/den)
elif u2 <= u_trans: #third type of transition; switching bounded elements
if N_bound <2:
log_alpha = np.log(1e-15)
else:
# proposed new monomer and dimer states
new_free_objs, new_bound_objs = trans_perm(current_free_objs, current_bound_objs)
# transition elements
log_init = log_boltz(current_free_objs, current_bound_objs, mstr_vec, deltas, gammas, name_key)
log_final = log_boltz(new_free_objs, new_bound_objs, mstr_vec, deltas, gammas, name_key)
# Log-acceptance rate
log_alpha = log_final-log_init
# Sample a uniform random variate
u = runif()
# Test proposed value
if np.log(u) < log_alpha:
# Accept
free_objs_vals[i+1] = new_free_objs
bound_objs_vals[i+1] = new_bound_objs
#log_current_prob = log_proposed_prob
accepted += 1
else:
# Stay put
free_objs_vals[i+1] = free_objs_vals[i]
bound_objs_vals[i+1] = bound_objs_vals[i]
# return our samples and the number of accepted steps
return free_objs_vals, bound_objs_vals, accepted
```
#### Image grid for completely correct configuration
```python
# defining master_list
master_list =compute_master_list()
# testing plot
imshow_list(master_list, title = 'Completely Correct Configuration');
# defining Nelems
Nelems = np.zeros(8)
key_list = list(rgb_map.keys())[:-1]
name_key_ = dict()
for j in range(len(key_list)):
name_key_[key_list[j]] = j
Nelems[j] = master_list.count(key_list[j])
```
```python
# displaying copy-number counts of the various elements
Nelems
```
array([ 9., 9., 10., 5., 7., 6., 3., 51.])
#### Simulating system
```python
# whether to only include physical transitions
only_physical_trans = True
# starting time
t0_start = time.time()
# setting parameter dictionary
param_dict = {'Search Limited': {'Del_bar':7.7501 ,
'sigma_D':2.0,
'E0_bar':3.0,
'sigma_E':1.0},
'Combinatorics Limited':{'Del_bar': 4.75,
'sigma_D': 2.0,
'E0_bar': 16.0,
'sigma_E':3.0},
'Indeterminate': {'Del_bar': 6.75,
'sigma_D': 2.0,
'E0_bar': 10.75,
'sigma_E': 3.0}, }
# dictionary that contains both physical and unphysical transitions
m_full_data_dict = defaultdict(dict)
num_trajs = 10 # number of trajectories to plot for each data set
for bool_ in [True, False]:
# defining whether to include only physical transitions
only_physical_trans = bool_
# empty list of list of trajectories
bound_list_dict = {'Search Limited': [[]]*num_trajs,
'Combinatorics Limited':[[]]*num_trajs,
'Indeterminate':[[]]*num_trajs }
# number of steps for MC algortihm
Nmc = 10000
# initial monomer and dimer states;
# system in microstate of all correct dimers
random.seed(0)
free_objs_0 = random.sample(master_list, len(master_list))
bound_objs_0 = ['-']*len(master_list)
mstr_vec = copy.deepcopy(master_list)
# make copy of initial monomer and dimer states
free_objs_copy = copy.deepcopy(free_objs_0)
bound_objs_copy = copy.deepcopy(bound_objs_0)
# temperature set
T0 = 0.5
for type_ in list(bound_list_dict.keys()):
# start time for particular type
t0 = time.time()
# getting parameter values
dict_vals = param_dict[type_]
# drawing energy values
np.random.seed(24)
R=8
Del_bar, sigma_D = dict_vals['Del_bar'], dict_vals['sigma_D']
Dels = np.random.randn(R)*sigma_D+Del_bar
E0_bar, sigma_E = dict_vals['E0_bar'], dict_vals['sigma_E']
E0s = np.random.randn(R)*sigma_E+E0_bar
Evs = np.ones(R)*0.001
# defining helper functions
gammas_ = gamma_func(E0s, Evs, T0)
deltas_ = delta_func(Dels, T0)
for k in range(num_trajs):
# metroplois generator
_, bound_list_dict[type_][k], _ = met_assembly_grid(Nmc,
free_objs_copy,
bound_objs_copy,
mstr_vec,
deltas_,
gammas_,
name_key_,
only_physical_trans=only_physical_trans)
t_prelim = time.time()
print("Temperature Run:",str(k+1),"; Current Time:", round(t_prelim-t0,2),"secs")
t1 = time.time()
print(f"\nTotal Simulation Run Time for {type_}: {round(t1-t0,2)} secs")
print("----------\n")
# copying data
m_full_data_dict[only_physical_trans] = bound_list_dict
t2 = time.time()
print("------------------------------\n------------------------------")
print(f"Total Simulation Run Time for Only Physical Trans = {only_physical_trans}: {round(t2-t0_start,2)} secs")
t3 = time.time()
print("------------------------------\n------------------------------")
print(f"Total Simulation Run Time for all: {round(t2-t0_start,2)} secs")
```
Temperature Run: 1 ; Current Time: 3.77 secs
Temperature Run: 2 ; Current Time: 7.26 secs
Temperature Run: 3 ; Current Time: 10.92 secs
Temperature Run: 4 ; Current Time: 15.27 secs
Temperature Run: 5 ; Current Time: 20.33 secs
Temperature Run: 6 ; Current Time: 24.63 secs
Temperature Run: 7 ; Current Time: 29.11 secs
Temperature Run: 8 ; Current Time: 32.96 secs
Temperature Run: 9 ; Current Time: 36.75 secs
Temperature Run: 10 ; Current Time: 41.17 secs
Total Simulation Run Time for Search Limited: 41.17 secs
----------
Temperature Run: 1 ; Current Time: 2.07 secs
Temperature Run: 2 ; Current Time: 4.06 secs
Temperature Run: 3 ; Current Time: 6.02 secs
Temperature Run: 4 ; Current Time: 8.54 secs
Temperature Run: 5 ; Current Time: 10.64 secs
Temperature Run: 6 ; Current Time: 12.97 secs
Temperature Run: 7 ; Current Time: 14.92 secs
Temperature Run: 8 ; Current Time: 16.97 secs
Temperature Run: 9 ; Current Time: 19.03 secs
Temperature Run: 10 ; Current Time: 21.1 secs
Total Simulation Run Time for Combinatorics Limited: 21.1 secs
----------
Temperature Run: 1 ; Current Time: 2.9 secs
Temperature Run: 2 ; Current Time: 5.13 secs
Temperature Run: 3 ; Current Time: 7.3 secs
Temperature Run: 4 ; Current Time: 9.52 secs
Temperature Run: 5 ; Current Time: 11.68 secs
Temperature Run: 6 ; Current Time: 13.88 secs
Temperature Run: 7 ; Current Time: 16.02 secs
Temperature Run: 8 ; Current Time: 18.37 secs
Temperature Run: 9 ; Current Time: 20.5 secs
Temperature Run: 10 ; Current Time: 22.7 secs
Total Simulation Run Time for Indeterminate: 22.7 secs
----------
------------------------------
------------------------------
Total Simulation Run Time for Only Physical Trans = True: 84.98 secs
Temperature Run: 1 ; Current Time: 4.43 secs
Temperature Run: 2 ; Current Time: 8.98 secs
Temperature Run: 3 ; Current Time: 13.4 secs
Temperature Run: 4 ; Current Time: 18.18 secs
Temperature Run: 5 ; Current Time: 22.89 secs
Temperature Run: 6 ; Current Time: 27.97 secs
Temperature Run: 7 ; Current Time: 33.13 secs
Temperature Run: 8 ; Current Time: 37.85 secs
Temperature Run: 9 ; Current Time: 42.39 secs
Temperature Run: 10 ; Current Time: 46.76 secs
Total Simulation Run Time for Search Limited: 46.76 secs
----------
Temperature Run: 1 ; Current Time: 3.67 secs
Temperature Run: 2 ; Current Time: 7.15 secs
Temperature Run: 3 ; Current Time: 10.9 secs
Temperature Run: 4 ; Current Time: 14.87 secs
Temperature Run: 5 ; Current Time: 18.39 secs
Temperature Run: 6 ; Current Time: 21.97 secs
Temperature Run: 7 ; Current Time: 25.6 secs
Temperature Run: 8 ; Current Time: 29.24 secs
Temperature Run: 9 ; Current Time: 32.66 secs
Temperature Run: 10 ; Current Time: 36.52 secs
Total Simulation Run Time for Combinatorics Limited: 36.52 secs
----------
Temperature Run: 1 ; Current Time: 3.38 secs
Temperature Run: 2 ; Current Time: 6.6 secs
Temperature Run: 3 ; Current Time: 9.91 secs
Temperature Run: 4 ; Current Time: 13.37 secs
Temperature Run: 5 ; Current Time: 16.7 secs
Temperature Run: 6 ; Current Time: 19.94 secs
Temperature Run: 7 ; Current Time: 23.57 secs
Temperature Run: 8 ; Current Time: 26.86 secs
Temperature Run: 9 ; Current Time: 30.28 secs
Temperature Run: 10 ; Current Time: 34.22 secs
Total Simulation Run Time for Indeterminate: 34.22 secs
----------
------------------------------
------------------------------
Total Simulation Run Time for Only Physical Trans = False: 202.48 secs
------------------------------
------------------------------
Total Simulation Run Time for all: 202.48 secs
#### Plotting simulated values of $m/N$ for each system type
```python
# dictionary that contains both physical and unphysical transitions
m_t_full_data_dict = defaultdict(dict)
# dictionary for final values of m/N
m_t_dict = {'Search Limited': [[]]*num_trajs,
'Combinatorics Limited':[[]]*num_trajs,
'Indeterminate':[[]]*num_trajs }
```
```python
for bool_ in [True, False]:
# defining whether to include only physical transitions
only_physical_trans = bool_
# getting list of bound states
bound_list_dict = m_full_data_dict[only_physical_trans]
# dictionary for final values of m/N
m_t_dict = {'Search Limited': [[]]*num_trajs,
'Combinatorics Limited':[[]]*num_trajs,
'Indeterminate':[[]]*num_trajs }
# filling in dictionary values for m/N
for type_ in list(m_t_dict.keys()):
m_t_dict[type_] = [[m_calc(elem, master_list)/(len(master_list)) for elem in bound_list_dict[type_][k]] for k in range(num_trajs)]
# dictionary for values of m/N
m_t_full_data_dict[bool_] = m_t_dict
```
```python
for bool_ in [True, False]:
# getting dictionary value for particular transition type
m_t_dict = m_t_full_data_dict[bool_]
for type_ in list(m_t_dict.keys()):
plt.figure(figsize = (7,5))
ax = plt.subplot(111)
for k in range(num_trajs):
plt.plot(m_t_dict[type_][k])
# plot formatting
ax.set_xlabel(r'simulation time', fontsize = 18,labelpad=10)
plt.xlim([0, 10000])
plt.ylim([0,1.1])
plt.ylabel(r'$m/N$', fontsize = 18)
# plt.yaxis.set_label_coords(-0.1,.5)
ax.axhline(y = 1.0, color = 'k', linestyle = 'dashed', linewidth = 2)
# Hide the right and top spines
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
# Only show ticks on the left and bottom spines
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
# increase label size
ax.tick_params(axis='both', which='major', labelsize=12)
ax.tick_params(axis='both', which='minor', labelsize=12)
# plt.legend(loc = 'best', fontsize = 12)
plt.grid(alpha = 0.45)
type_name = type_.replace(' ', '_').lower()
print('Only Physical Transitions:', bool_)
print('System Type:', type_)
print()
# if only_physical_trans:
# suffix = 'phys'
# else:
# suffix = 'nonphys'
# plt.savefig(f'hints_neqbm_{suffix}_{type_name}.png', bbox_inches='tight', format = 'png')
plt.show()
```
```python
print('Total Notebook Runtime: %.3f mins' % ((time.time()-nb_start)/60))
```
Total Notebook Runtime: 3.502 mins
```python
```
|
(*
Copyright (C) 2017 M.A.L. Marques
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
*)
(* type: work_gga_x *)
lambda := y -> 1/2*(1 + (1 - y^2)*log((1 + y)/abs(1 - y))/(2*y)):
f := x -> 1 + lambda(X2S*x/6)*x^2/(8*K_FACTOR_C): |
State Before: V : Type u_1
inst✝¹ : NormedAddCommGroup V
inst✝ : InnerProductSpace ℝ V
x✝ y✝ x y : V
r : ℝ
hr : 0 < r
⊢ angle (r • x) y = angle x y State After: no goals Tactic: rw [angle_comm, angle_smul_right_of_pos y x hr, angle_comm] |
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
h : Reflexive r
x y : α
hr : x ≠ y → r x y
⊢ r x y
[PROOFSTEP]
by_cases hxy : x = y
[GOAL]
case pos
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
h : Reflexive r
x y : α
hr : x ≠ y → r x y
hxy : x = y
⊢ r x y
[PROOFSTEP]
exact hxy ▸ h x
[GOAL]
case neg
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
h : Reflexive r
x y : α
hr : x ≠ y → r x y
hxy : ¬x = y
⊢ r x y
[PROOFSTEP]
exact hr hxy
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r✝ : α → β → Prop
p : β → γ → Prop
q : γ → δ → Prop
r : Prop → α → Prop
⊢ (fun x x_1 => x ↔ x_1) ∘r r = r
[PROOFSTEP]
have : (· ↔ ·) = (· = ·) := by funext a b; exact iff_eq_eq
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r✝ : α → β → Prop
p : β → γ → Prop
q : γ → δ → Prop
r : Prop → α → Prop
⊢ (fun x x_1 => x ↔ x_1) = fun x x_1 => x = x_1
[PROOFSTEP]
funext a b
[GOAL]
case h.h
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r✝ : α → β → Prop
p : β → γ → Prop
q : γ → δ → Prop
r : Prop → α → Prop
a b : Prop
⊢ (a ↔ b) = (a = b)
[PROOFSTEP]
exact iff_eq_eq
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r✝ : α → β → Prop
p : β → γ → Prop
q : γ → δ → Prop
r : Prop → α → Prop
this : (fun x x_1 => x ↔ x_1) = fun x x_1 => x = x_1
⊢ (fun x x_1 => x ↔ x_1) ∘r r = r
[PROOFSTEP]
rw [this, eq_comp]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r✝ : α → β → Prop
p : β → γ → Prop
q : γ → δ → Prop
r : α → Prop → Prop
⊢ (r ∘r fun x x_1 => x ↔ x_1) = r
[PROOFSTEP]
have : (· ↔ ·) = (· = ·) := by funext a b; exact iff_eq_eq
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r✝ : α → β → Prop
p : β → γ → Prop
q : γ → δ → Prop
r : α → Prop → Prop
⊢ (fun x x_1 => x ↔ x_1) = fun x x_1 => x = x_1
[PROOFSTEP]
funext a b
[GOAL]
case h.h
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r✝ : α → β → Prop
p : β → γ → Prop
q : γ → δ → Prop
r : α → Prop → Prop
a b : Prop
⊢ (a ↔ b) = (a = b)
[PROOFSTEP]
exact iff_eq_eq
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r✝ : α → β → Prop
p : β → γ → Prop
q : γ → δ → Prop
r : α → Prop → Prop
this : (fun x x_1 => x ↔ x_1) = fun x x_1 => x = x_1
⊢ (r ∘r fun x x_1 => x ↔ x_1) = r
[PROOFSTEP]
rw [this, comp_eq]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → β → Prop
p : β → γ → Prop
q : γ → δ → Prop
⊢ (r ∘r p) ∘r q = r ∘r p ∘r q
[PROOFSTEP]
funext a d
[GOAL]
case h.h
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → β → Prop
p : β → γ → Prop
q : γ → δ → Prop
a : α
d : δ
⊢ ((r ∘r p) ∘r q) a d = (r ∘r p ∘r q) a d
[PROOFSTEP]
apply propext
[GOAL]
case h.h.a
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → β → Prop
p : β → γ → Prop
q : γ → δ → Prop
a : α
d : δ
⊢ ((r ∘r p) ∘r q) a d ↔ (r ∘r p ∘r q) a d
[PROOFSTEP]
constructor
[GOAL]
case h.h.a.mp
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → β → Prop
p : β → γ → Prop
q : γ → δ → Prop
a : α
d : δ
⊢ ((r ∘r p) ∘r q) a d → (r ∘r p ∘r q) a d
case h.h.a.mpr
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → β → Prop
p : β → γ → Prop
q : γ → δ → Prop
a : α
d : δ
⊢ (r ∘r p ∘r q) a d → ((r ∘r p) ∘r q) a d
[PROOFSTEP]
exact fun ⟨c, ⟨b, hab, hbc⟩, hcd⟩ ↦ ⟨b, hab, c, hbc, hcd⟩
[GOAL]
case h.h.a.mpr
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → β → Prop
p : β → γ → Prop
q : γ → δ → Prop
a : α
d : δ
⊢ (r ∘r p ∘r q) a d → ((r ∘r p) ∘r q) a d
[PROOFSTEP]
exact fun ⟨b, hab, c, hbc, hcd⟩ ↦ ⟨c, ⟨b, hab, hbc⟩, hcd⟩
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → β → Prop
p : β → γ → Prop
q : γ → δ → Prop
⊢ flip (r ∘r p) = flip p ∘r flip r
[PROOFSTEP]
funext c a
[GOAL]
case h.h
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → β → Prop
p : β → γ → Prop
q : γ → δ → Prop
c : γ
a : α
⊢ flip (r ∘r p) c a = (flip p ∘r flip r) c a
[PROOFSTEP]
apply propext
[GOAL]
case h.h.a
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → β → Prop
p : β → γ → Prop
q : γ → δ → Prop
c : γ
a : α
⊢ flip (r ∘r p) c a ↔ (flip p ∘r flip r) c a
[PROOFSTEP]
constructor
[GOAL]
case h.h.a.mp
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → β → Prop
p : β → γ → Prop
q : γ → δ → Prop
c : γ
a : α
⊢ flip (r ∘r p) c a → (flip p ∘r flip r) c a
case h.h.a.mpr
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → β → Prop
p : β → γ → Prop
q : γ → δ → Prop
c : γ
a : α
⊢ (flip p ∘r flip r) c a → flip (r ∘r p) c a
[PROOFSTEP]
exact fun ⟨b, hab, hbc⟩ ↦ ⟨b, hbc, hab⟩
[GOAL]
case h.h.a.mpr
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → β → Prop
p : β → γ → Prop
q : γ → δ → Prop
c : γ
a : α
⊢ (flip p ∘r flip r) c a → flip (r ∘r p) c a
[PROOFSTEP]
exact fun ⟨b, hbc, hab⟩ ↦ ⟨b, hab, hbc⟩
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
rα : α → α → Prop
rβ : β → β → Prop
f : α → β
fib : Fibration rα rβ f
a : α
ha : Acc rα a
⊢ Acc rβ (f a)
[PROOFSTEP]
induction' ha with a _ ih
[GOAL]
case intro
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
rα : α → α → Prop
rβ : β → β → Prop
f : α → β
fib : Fibration rα rβ f
a✝ a : α
h✝ : ∀ (y : α), rα y a → Acc rα y
ih : ∀ (y : α), rα y a → Acc rβ (f y)
⊢ Acc rβ (f a)
[PROOFSTEP]
refine' Acc.intro (f a) fun b hr ↦ _
[GOAL]
case intro
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
rα : α → α → Prop
rβ : β → β → Prop
f : α → β
fib : Fibration rα rβ f
a✝ a : α
h✝ : ∀ (y : α), rα y a → Acc rα y
ih : ∀ (y : α), rα y a → Acc rβ (f y)
b : β
hr : rβ b (f a)
⊢ Acc rβ b
[PROOFSTEP]
obtain ⟨a', hr', rfl⟩ := fib hr
[GOAL]
case intro.intro.intro
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
rα : α → α → Prop
rβ : β → β → Prop
f : α → β
fib : Fibration rα rβ f
a✝ a : α
h✝ : ∀ (y : α), rα y a → Acc rα y
ih : ∀ (y : α), rα y a → Acc rβ (f y)
a' : α
hr' : rα a' a
hr : rβ (f a') (f a)
⊢ Acc rβ (f a')
[PROOFSTEP]
exact ih a' hr'
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a✝ b c d a : α
⊢ ReflTransGen r a a
[PROOFSTEP]
rfl
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a✝ b c d : α
p : α → α → Prop
hp : ∀ (a b : α), r a b → p a b
a : α
⊢ ReflGen p a a
[PROOFSTEP]
rfl
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c d : α
hab : ReflTransGen r a b
hbc : ReflTransGen r b c
⊢ ReflTransGen r a c
[PROOFSTEP]
induction hbc
[GOAL]
case refl
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c d : α
hab : ReflTransGen r a b
⊢ ReflTransGen r a b
case tail
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c d : α
hab : ReflTransGen r a b
b✝ c✝ : α
a✝¹ : ReflTransGen r b b✝
a✝ : r b✝ c✝
a_ih✝ : ReflTransGen r a b✝
⊢ ReflTransGen r a c✝
[PROOFSTEP]
case refl => assumption
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c d : α
hab : ReflTransGen r a b
⊢ ReflTransGen r a b
[PROOFSTEP]
case refl => assumption
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c d : α
hab : ReflTransGen r a b
⊢ ReflTransGen r a b
[PROOFSTEP]
assumption
[GOAL]
case tail
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c d : α
hab : ReflTransGen r a b
b✝ c✝ : α
a✝¹ : ReflTransGen r b b✝
a✝ : r b✝ c✝
a_ih✝ : ReflTransGen r a b✝
⊢ ReflTransGen r a c✝
[PROOFSTEP]
case tail c d _ hcd hac => exact hac.tail hcd
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c✝ d✝ : α
hab : ReflTransGen r a b
c d : α
a✝ : ReflTransGen r b c
hcd : r c d
hac : ReflTransGen r a c
⊢ ReflTransGen r a d
[PROOFSTEP]
case tail c d _ hcd hac => exact hac.tail hcd
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c✝ d✝ : α
hab : ReflTransGen r a b
c d : α
a✝ : ReflTransGen r b c
hcd : r c d
hac : ReflTransGen r a c
⊢ ReflTransGen r a d
[PROOFSTEP]
exact hac.tail hcd
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c d : α
hab : r a b
hbc : ReflTransGen r b c
⊢ ReflTransGen r a c
[PROOFSTEP]
induction hbc
[GOAL]
case refl
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c d : α
hab : r a b
⊢ ReflTransGen r a b
case tail
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c d : α
hab : r a b
b✝ c✝ : α
a✝¹ : ReflTransGen r b b✝
a✝ : r b✝ c✝
a_ih✝ : ReflTransGen r a b✝
⊢ ReflTransGen r a c✝
[PROOFSTEP]
case refl => exact refl.tail hab
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c d : α
hab : r a b
⊢ ReflTransGen r a b
[PROOFSTEP]
case refl => exact refl.tail hab
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c d : α
hab : r a b
⊢ ReflTransGen r a b
[PROOFSTEP]
exact refl.tail hab
[GOAL]
case tail
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c d : α
hab : r a b
b✝ c✝ : α
a✝¹ : ReflTransGen r b b✝
a✝ : r b✝ c✝
a_ih✝ : ReflTransGen r a b✝
⊢ ReflTransGen r a c✝
[PROOFSTEP]
case tail c d _ hcd hac => exact hac.tail hcd
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c✝ d✝ : α
hab : r a b
c d : α
a✝ : ReflTransGen r b c
hcd : r c d
hac : ReflTransGen r a c
⊢ ReflTransGen r a d
[PROOFSTEP]
case tail c d _ hcd hac => exact hac.tail hcd
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c✝ d✝ : α
hab : r a b
c d : α
a✝ : ReflTransGen r b c
hcd : r c d
hac : ReflTransGen r a c
⊢ ReflTransGen r a d
[PROOFSTEP]
exact hac.tail hcd
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c d : α
h : Symmetric r
⊢ Symmetric (ReflTransGen r)
[PROOFSTEP]
intro x y h
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c d : α
h✝ : Symmetric r
x y : α
h : ReflTransGen r x y
⊢ ReflTransGen r y x
[PROOFSTEP]
induction' h with z w _ b c
[GOAL]
case refl
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c d : α
h : Symmetric r
x y : α
⊢ ReflTransGen r x x
[PROOFSTEP]
rfl
[GOAL]
case tail
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b✝ c✝ d : α
h : Symmetric r
x y z w : α
a✝ : ReflTransGen r x z
b : r z w
c : ReflTransGen r z x
⊢ ReflTransGen r w x
[PROOFSTEP]
apply Relation.ReflTransGen.head (h b) c
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a✝ b c d : α
P : (a : α) → ReflTransGen r a b → Prop
a : α
h : ReflTransGen r a b
refl : P b (_ : ReflTransGen r b b)
head : ∀ {a c : α} (h' : r a c) (h : ReflTransGen r c b), P c h → P a (_ : ReflTransGen r a b)
⊢ P a h
[PROOFSTEP]
induction h
[GOAL]
case refl
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a✝ b c d a : α
P : (a_1 : α) → ReflTransGen r a_1 a → Prop
refl : P a (_ : ReflTransGen r a a)
head : ∀ {a_1 c : α} (h' : r a_1 c) (h : ReflTransGen r c a), P c h → P a_1 (_ : ReflTransGen r a_1 a)
⊢ P a (_ : ReflTransGen r a a)
case tail
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a✝² b c d a b✝ c✝ : α
a✝¹ : ReflTransGen r a b✝
a✝ : r b✝ c✝
a_ih✝ :
∀ {P : (a : α) → ReflTransGen r a b✝ → Prop},
P b✝ (_ : ReflTransGen r b✝ b✝) →
(∀ {a c : α} (h' : r a c) (h : ReflTransGen r c b✝), P c h → P a (_ : ReflTransGen r a b✝)) → P a a✝¹
P : (a : α) → ReflTransGen r a c✝ → Prop
refl : P c✝ (_ : ReflTransGen r c✝ c✝)
head : ∀ {a c : α} (h' : r a c) (h : ReflTransGen r c c✝), P c h → P a (_ : ReflTransGen r a c✝)
⊢ P a (_ : ReflTransGen r a c✝)
[PROOFSTEP]
case refl => exact refl
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a✝ b c d a : α
P : (a_1 : α) → ReflTransGen r a_1 a → Prop
refl : P a (_ : ReflTransGen r a a)
head : ∀ {a_1 c : α} (h' : r a_1 c) (h : ReflTransGen r c a), P c h → P a_1 (_ : ReflTransGen r a_1 a)
⊢ P a (_ : ReflTransGen r a a)
[PROOFSTEP]
case refl => exact refl
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a✝ b c d a : α
P : (a_1 : α) → ReflTransGen r a_1 a → Prop
refl : P a (_ : ReflTransGen r a a)
head : ∀ {a_1 c : α} (h' : r a_1 c) (h : ReflTransGen r c a), P c h → P a_1 (_ : ReflTransGen r a_1 a)
⊢ P a (_ : ReflTransGen r a a)
[PROOFSTEP]
exact refl
[GOAL]
case tail
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a✝² b c d a b✝ c✝ : α
a✝¹ : ReflTransGen r a b✝
a✝ : r b✝ c✝
a_ih✝ :
∀ {P : (a : α) → ReflTransGen r a b✝ → Prop},
P b✝ (_ : ReflTransGen r b✝ b✝) →
(∀ {a c : α} (h' : r a c) (h : ReflTransGen r c b✝), P c h → P a (_ : ReflTransGen r a b✝)) → P a a✝¹
P : (a : α) → ReflTransGen r a c✝ → Prop
refl : P c✝ (_ : ReflTransGen r c✝ c✝)
head : ∀ {a c : α} (h' : r a c) (h : ReflTransGen r c c✝), P c h → P a (_ : ReflTransGen r a c✝)
⊢ P a (_ : ReflTransGen r a c✝)
[PROOFSTEP]
case tail b c _ hbc ih =>
-- Porting note: Lean 3 figured out the motive and `apply ih` worked
refine @ih (λ {a : α} (hab : ReflTransGen r a b) => P a (ReflTransGen.tail hab hbc)) ?_ ?_
{exact head hbc _ refl
}
{exact fun h1 h2 ↦ head h1 (h2.tail hbc)
}
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a✝¹ b✝ c✝ d a b c : α
a✝ : ReflTransGen r a b
hbc : r b c
ih :
∀ {P : (a : α) → ReflTransGen r a b → Prop},
P b (_ : ReflTransGen r b b) →
(∀ {a c : α} (h' : r a c) (h : ReflTransGen r c b), P c h → P a (_ : ReflTransGen r a b)) → P a a✝
P : (a : α) → ReflTransGen r a c → Prop
refl : P c (_ : ReflTransGen r c c)
head : ∀ {a c_1 : α} (h' : r a c_1) (h : ReflTransGen r c_1 c), P c_1 h → P a (_ : ReflTransGen r a c)
⊢ P a (_ : ReflTransGen r a c)
[PROOFSTEP]
case tail b c _ hbc ih =>
-- Porting note: Lean 3 figured out the motive and `apply ih` worked
refine @ih (λ {a : α} (hab : ReflTransGen r a b) => P a (ReflTransGen.tail hab hbc)) ?_ ?_
{exact head hbc _ refl
}
{exact fun h1 h2 ↦ head h1 (h2.tail hbc)
}
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a✝¹ b✝ c✝ d a b c : α
a✝ : ReflTransGen r a b
hbc : r b c
ih :
∀ {P : (a : α) → ReflTransGen r a b → Prop},
P b (_ : ReflTransGen r b b) →
(∀ {a c : α} (h' : r a c) (h : ReflTransGen r c b), P c h → P a (_ : ReflTransGen r a b)) → P a a✝
P : (a : α) → ReflTransGen r a c → Prop
refl : P c (_ : ReflTransGen r c c)
head : ∀ {a c_1 : α} (h' : r a c_1) (h : ReflTransGen r c_1 c), P c_1 h → P a (_ : ReflTransGen r a c)
⊢ P a (_ : ReflTransGen r a c)
[PROOFSTEP]
refine @ih (λ {a : α} (hab : ReflTransGen r a b) => P a (ReflTransGen.tail hab hbc)) ?_ ?_
[GOAL]
case refine_1
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a✝¹ b✝ c✝ d a b c : α
a✝ : ReflTransGen r a b
hbc : r b c
ih :
∀ {P : (a : α) → ReflTransGen r a b → Prop},
P b (_ : ReflTransGen r b b) →
(∀ {a c : α} (h' : r a c) (h : ReflTransGen r c b), P c h → P a (_ : ReflTransGen r a b)) → P a a✝
P : (a : α) → ReflTransGen r a c → Prop
refl : P c (_ : ReflTransGen r c c)
head : ∀ {a c_1 : α} (h' : r a c_1) (h : ReflTransGen r c_1 c), P c_1 h → P a (_ : ReflTransGen r a c)
⊢ (fun {a} hab => P a (_ : ReflTransGen r a c)) (_ : ReflTransGen r b b)
case refine_2
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a✝¹ b✝ c✝ d a b c : α
a✝ : ReflTransGen r a b
hbc : r b c
ih :
∀ {P : (a : α) → ReflTransGen r a b → Prop},
P b (_ : ReflTransGen r b b) →
(∀ {a c : α} (h' : r a c) (h : ReflTransGen r c b), P c h → P a (_ : ReflTransGen r a b)) → P a a✝
P : (a : α) → ReflTransGen r a c → Prop
refl : P c (_ : ReflTransGen r c c)
head : ∀ {a c_1 : α} (h' : r a c_1) (h : ReflTransGen r c_1 c), P c_1 h → P a (_ : ReflTransGen r a c)
⊢ ∀ {a c_1 : α} (h' : r a c_1) (h : ReflTransGen r c_1 b),
(fun {a} hab => P a (_ : ReflTransGen r a c)) h →
(fun {a} hab => P a (_ : ReflTransGen r a c)) (_ : ReflTransGen r a b)
[PROOFSTEP]
{exact head hbc _ refl
}
[GOAL]
case refine_1
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a✝¹ b✝ c✝ d a b c : α
a✝ : ReflTransGen r a b
hbc : r b c
ih :
∀ {P : (a : α) → ReflTransGen r a b → Prop},
P b (_ : ReflTransGen r b b) →
(∀ {a c : α} (h' : r a c) (h : ReflTransGen r c b), P c h → P a (_ : ReflTransGen r a b)) → P a a✝
P : (a : α) → ReflTransGen r a c → Prop
refl : P c (_ : ReflTransGen r c c)
head : ∀ {a c_1 : α} (h' : r a c_1) (h : ReflTransGen r c_1 c), P c_1 h → P a (_ : ReflTransGen r a c)
⊢ (fun {a} hab => P a (_ : ReflTransGen r a c)) (_ : ReflTransGen r b b)
[PROOFSTEP]
exact head hbc _ refl
[GOAL]
case refine_2
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a✝¹ b✝ c✝ d a b c : α
a✝ : ReflTransGen r a b
hbc : r b c
ih :
∀ {P : (a : α) → ReflTransGen r a b → Prop},
P b (_ : ReflTransGen r b b) →
(∀ {a c : α} (h' : r a c) (h : ReflTransGen r c b), P c h → P a (_ : ReflTransGen r a b)) → P a a✝
P : (a : α) → ReflTransGen r a c → Prop
refl : P c (_ : ReflTransGen r c c)
head : ∀ {a c_1 : α} (h' : r a c_1) (h : ReflTransGen r c_1 c), P c_1 h → P a (_ : ReflTransGen r a c)
⊢ ∀ {a c_1 : α} (h' : r a c_1) (h : ReflTransGen r c_1 b),
(fun {a} hab => P a (_ : ReflTransGen r a c)) h →
(fun {a} hab => P a (_ : ReflTransGen r a c)) (_ : ReflTransGen r a b)
[PROOFSTEP]
{exact fun h1 h2 ↦ head h1 (h2.tail hbc)
}
[GOAL]
case refine_2
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a✝¹ b✝ c✝ d a b c : α
a✝ : ReflTransGen r a b
hbc : r b c
ih :
∀ {P : (a : α) → ReflTransGen r a b → Prop},
P b (_ : ReflTransGen r b b) →
(∀ {a c : α} (h' : r a c) (h : ReflTransGen r c b), P c h → P a (_ : ReflTransGen r a b)) → P a a✝
P : (a : α) → ReflTransGen r a c → Prop
refl : P c (_ : ReflTransGen r c c)
head : ∀ {a c_1 : α} (h' : r a c_1) (h : ReflTransGen r c_1 c), P c_1 h → P a (_ : ReflTransGen r a c)
⊢ ∀ {a c_1 : α} (h' : r a c_1) (h : ReflTransGen r c_1 b),
(fun {a} hab => P a (_ : ReflTransGen r a c)) h →
(fun {a} hab => P a (_ : ReflTransGen r a c)) (_ : ReflTransGen r a b)
[PROOFSTEP]
exact fun h1 h2 ↦ head h1 (h2.tail hbc)
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a✝ b✝ c d : α
P : {a b : α} → ReflTransGen r a b → Prop
a b : α
h : ReflTransGen r a b
ih₁ : ∀ (a : α), P (_ : ReflTransGen r a a)
ih₂ : ∀ {a b : α} (h : r a b), P (_ : ReflTransGen r a b)
ih₃ : ∀ {a b c : α} (h₁ : ReflTransGen r a b) (h₂ : ReflTransGen r b c), P h₁ → P h₂ → P (_ : ReflTransGen r a c)
⊢ P h
[PROOFSTEP]
induction h
[GOAL]
case refl
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a✝ b✝ c d : α
P : {a b : α} → ReflTransGen r a b → Prop
a b : α
ih₁ : ∀ (a : α), P (_ : ReflTransGen r a a)
ih₂ : ∀ {a b : α} (h : r a b), P (_ : ReflTransGen r a b)
ih₃ : ∀ {a b c : α} (h₁ : ReflTransGen r a b) (h₂ : ReflTransGen r b c), P h₁ → P h₂ → P (_ : ReflTransGen r a c)
⊢ P (_ : ReflTransGen r a a)
case tail
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a✝² b✝¹ c d : α
P : {a b : α} → ReflTransGen r a b → Prop
a b : α
ih₁ : ∀ (a : α), P (_ : ReflTransGen r a a)
ih₂ : ∀ {a b : α} (h : r a b), P (_ : ReflTransGen r a b)
ih₃ : ∀ {a b c : α} (h₁ : ReflTransGen r a b) (h₂ : ReflTransGen r b c), P h₁ → P h₂ → P (_ : ReflTransGen r a c)
b✝ c✝ : α
a✝¹ : ReflTransGen r a b✝
a✝ : r b✝ c✝
a_ih✝ : P a✝¹
⊢ P (_ : ReflTransGen r a c✝)
[PROOFSTEP]
case refl => exact ih₁ a
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a✝ b✝ c d : α
P : {a b : α} → ReflTransGen r a b → Prop
a b : α
ih₁ : ∀ (a : α), P (_ : ReflTransGen r a a)
ih₂ : ∀ {a b : α} (h : r a b), P (_ : ReflTransGen r a b)
ih₃ : ∀ {a b c : α} (h₁ : ReflTransGen r a b) (h₂ : ReflTransGen r b c), P h₁ → P h₂ → P (_ : ReflTransGen r a c)
⊢ P (_ : ReflTransGen r a a)
[PROOFSTEP]
case refl => exact ih₁ a
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a✝ b✝ c d : α
P : {a b : α} → ReflTransGen r a b → Prop
a b : α
ih₁ : ∀ (a : α), P (_ : ReflTransGen r a a)
ih₂ : ∀ {a b : α} (h : r a b), P (_ : ReflTransGen r a b)
ih₃ : ∀ {a b c : α} (h₁ : ReflTransGen r a b) (h₂ : ReflTransGen r b c), P h₁ → P h₂ → P (_ : ReflTransGen r a c)
⊢ P (_ : ReflTransGen r a a)
[PROOFSTEP]
exact ih₁ a
[GOAL]
case tail
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a✝² b✝¹ c d : α
P : {a b : α} → ReflTransGen r a b → Prop
a b : α
ih₁ : ∀ (a : α), P (_ : ReflTransGen r a a)
ih₂ : ∀ {a b : α} (h : r a b), P (_ : ReflTransGen r a b)
ih₃ : ∀ {a b c : α} (h₁ : ReflTransGen r a b) (h₂ : ReflTransGen r b c), P h₁ → P h₂ → P (_ : ReflTransGen r a c)
b✝ c✝ : α
a✝¹ : ReflTransGen r a b✝
a✝ : r b✝ c✝
a_ih✝ : P a✝¹
⊢ P (_ : ReflTransGen r a c✝)
[PROOFSTEP]
case tail b c hab hbc ih => exact ih₃ hab (single hbc) ih (ih₂ hbc)
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a✝ b✝¹ c✝ d : α
P : {a b : α} → ReflTransGen r a b → Prop
a b✝ : α
ih₁ : ∀ (a : α), P (_ : ReflTransGen r a a)
ih₂ : ∀ {a b : α} (h : r a b), P (_ : ReflTransGen r a b)
ih₃ : ∀ {a b c : α} (h₁ : ReflTransGen r a b) (h₂ : ReflTransGen r b c), P h₁ → P h₂ → P (_ : ReflTransGen r a c)
b c : α
hab : ReflTransGen r a b
hbc : r b c
ih : P hab
⊢ P (_ : ReflTransGen r a c)
[PROOFSTEP]
case tail b c hab hbc ih => exact ih₃ hab (single hbc) ih (ih₂ hbc)
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a✝ b✝¹ c✝ d : α
P : {a b : α} → ReflTransGen r a b → Prop
a b✝ : α
ih₁ : ∀ (a : α), P (_ : ReflTransGen r a a)
ih₂ : ∀ {a b : α} (h : r a b), P (_ : ReflTransGen r a b)
ih₃ : ∀ {a b c : α} (h₁ : ReflTransGen r a b) (h₂ : ReflTransGen r b c), P h₁ → P h₂ → P (_ : ReflTransGen r a c)
b c : α
hab : ReflTransGen r a b
hbc : r b c
ih : P hab
⊢ P (_ : ReflTransGen r a c)
[PROOFSTEP]
exact ih₃ hab (single hbc) ih (ih₂ hbc)
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c d : α
h : ReflTransGen r a b
⊢ a = b ∨ ∃ c, r a c ∧ ReflTransGen r c b
[PROOFSTEP]
induction h using Relation.ReflTransGen.head_induction_on
[GOAL]
case refl
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c d : α
⊢ b = b ∨ ∃ c, r b c ∧ ReflTransGen r c b
[PROOFSTEP]
left
[GOAL]
case refl.h
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c d : α
⊢ b = b
[PROOFSTEP]
rfl
[GOAL]
case head
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c d a✝¹ c✝ : α
h'✝ : r a✝¹ c✝
h✝ : ReflTransGen r c✝ b
a✝ : c✝ = b ∨ ∃ c, r c✝ c ∧ ReflTransGen r c b
⊢ a✝¹ = b ∨ ∃ c, r a✝¹ c ∧ ReflTransGen r c b
[PROOFSTEP]
right
[GOAL]
case head.h
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c d a✝¹ c✝ : α
h'✝ : r a✝¹ c✝
h✝ : ReflTransGen r c✝ b
a✝ : c✝ = b ∨ ∃ c, r c✝ c ∧ ReflTransGen r c b
⊢ ∃ c, r a✝¹ c ∧ ReflTransGen r c b
[PROOFSTEP]
exact ⟨_, by assumption, by assumption⟩
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c d a✝¹ c✝ : α
h'✝ : r a✝¹ c✝
h✝ : ReflTransGen r c✝ b
a✝ : c✝ = b ∨ ∃ c, r c✝ c ∧ ReflTransGen r c b
⊢ r a✝¹ ?m.17056
[PROOFSTEP]
assumption
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c d a✝¹ c✝ : α
h'✝ : r a✝¹ c✝
h✝ : ReflTransGen r c✝ b
a✝ : c✝ = b ∨ ∃ c, r c✝ c ∧ ReflTransGen r c b
⊢ ReflTransGen r c✝ b
[PROOFSTEP]
assumption
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c d : α
⊢ ReflTransGen r a b ↔ a = b ∨ ∃ c, r a c ∧ ReflTransGen r c b
[PROOFSTEP]
use cases_head
[GOAL]
case mpr
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c d : α
⊢ (a = b ∨ ∃ c, r a c ∧ ReflTransGen r c b) → ReflTransGen r a b
[PROOFSTEP]
rintro (rfl | ⟨c, hac, hcb⟩)
[GOAL]
case mpr.inl
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a c d : α
⊢ ReflTransGen r a a
[PROOFSTEP]
rfl
[GOAL]
case mpr.inr.intro.intro
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c✝ d c : α
hac : r a c
hcb : ReflTransGen r c b
⊢ ReflTransGen r a b
[PROOFSTEP]
exact head hac hcb
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c d : α
U : Relator.RightUnique r
ab : ReflTransGen r a b
ac : ReflTransGen r a c
⊢ ReflTransGen r b c ∨ ReflTransGen r c b
[PROOFSTEP]
induction' ab with b d _ bd IH
[GOAL]
case refl
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c d : α
U : Relator.RightUnique r
ac : ReflTransGen r a c
⊢ ReflTransGen r a c ∨ ReflTransGen r c a
[PROOFSTEP]
exact Or.inl ac
[GOAL]
case tail
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b✝ c d✝ : α
U : Relator.RightUnique r
ac : ReflTransGen r a c
b d : α
a✝ : ReflTransGen r a b
bd : r b d
IH : ReflTransGen r b c ∨ ReflTransGen r c b
⊢ ReflTransGen r d c ∨ ReflTransGen r c d
[PROOFSTEP]
rcases IH with (IH | IH)
[GOAL]
case tail.inl
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b✝ c d✝ : α
U : Relator.RightUnique r
ac : ReflTransGen r a c
b d : α
a✝ : ReflTransGen r a b
bd : r b d
IH : ReflTransGen r b c
⊢ ReflTransGen r d c ∨ ReflTransGen r c d
[PROOFSTEP]
rcases cases_head IH with (rfl | ⟨e, be, ec⟩)
[GOAL]
case tail.inl.inl
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b✝ d✝ : α
U : Relator.RightUnique r
b d : α
a✝ : ReflTransGen r a b
bd : r b d
ac : ReflTransGen r a b
IH : ReflTransGen r b b
⊢ ReflTransGen r d b ∨ ReflTransGen r b d
[PROOFSTEP]
exact Or.inr (single bd)
[GOAL]
case tail.inl.inr.intro.intro
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b✝ c d✝ : α
U : Relator.RightUnique r
ac : ReflTransGen r a c
b d : α
a✝ : ReflTransGen r a b
bd : r b d
IH : ReflTransGen r b c
e : α
be : r b e
ec : ReflTransGen r e c
⊢ ReflTransGen r d c ∨ ReflTransGen r c d
[PROOFSTEP]
cases U bd be
[GOAL]
case tail.inl.inr.intro.intro.refl
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b✝ c d✝ : α
U : Relator.RightUnique r
ac : ReflTransGen r a c
b d : α
a✝ : ReflTransGen r a b
bd : r b d
IH : ReflTransGen r b c
be : r b d
ec : ReflTransGen r d c
⊢ ReflTransGen r d c ∨ ReflTransGen r c d
[PROOFSTEP]
exact Or.inl ec
[GOAL]
case tail.inr
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b✝ c d✝ : α
U : Relator.RightUnique r
ac : ReflTransGen r a c
b d : α
a✝ : ReflTransGen r a b
bd : r b d
IH : ReflTransGen r c b
⊢ ReflTransGen r d c ∨ ReflTransGen r c d
[PROOFSTEP]
exact Or.inr (IH.tail bd)
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a✝ b✝ c d a b : α
h : TransGen r a b
⊢ ReflTransGen r a b
[PROOFSTEP]
induction' h with b h b c _ bc ab
[GOAL]
case single
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a✝ b✝¹ c d a b✝ b : α
h : r a b
⊢ ReflTransGen r a b
case tail
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a✝¹ b✝¹ c✝ d a b✝ b c : α
a✝ : TransGen r a b
bc : r b c
ab : ReflTransGen r a b
⊢ ReflTransGen r a c
[PROOFSTEP]
exact ReflTransGen.single h
[GOAL]
case tail
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a✝¹ b✝¹ c✝ d a b✝ b c : α
a✝ : TransGen r a b
bc : r b c
ab : ReflTransGen r a b
⊢ ReflTransGen r a c
[PROOFSTEP]
exact ReflTransGen.tail ab bc
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c d : α
hab : TransGen r a b
hbc : ReflTransGen r b c
⊢ TransGen r a c
[PROOFSTEP]
induction hbc
[GOAL]
case refl
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c d : α
hab : TransGen r a b
⊢ TransGen r a b
case tail
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c d : α
hab : TransGen r a b
b✝ c✝ : α
a✝¹ : ReflTransGen r b b✝
a✝ : r b✝ c✝
a_ih✝ : TransGen r a b✝
⊢ TransGen r a c✝
[PROOFSTEP]
case refl => assumption
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c d : α
hab : TransGen r a b
⊢ TransGen r a b
[PROOFSTEP]
case refl => assumption
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c d : α
hab : TransGen r a b
⊢ TransGen r a b
[PROOFSTEP]
assumption
[GOAL]
case tail
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c d : α
hab : TransGen r a b
b✝ c✝ : α
a✝¹ : ReflTransGen r b b✝
a✝ : r b✝ c✝
a_ih✝ : TransGen r a b✝
⊢ TransGen r a c✝
[PROOFSTEP]
case tail c d _ hcd hac => exact hac.tail hcd
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c✝ d✝ : α
hab : TransGen r a b
c d : α
a✝ : ReflTransGen r b c
hcd : r c d
hac : TransGen r a c
⊢ TransGen r a d
[PROOFSTEP]
case tail c d _ hcd hac => exact hac.tail hcd
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c✝ d✝ : α
hab : TransGen r a b
c d : α
a✝ : ReflTransGen r b c
hcd : r c d
hac : TransGen r a c
⊢ TransGen r a d
[PROOFSTEP]
exact hac.tail hcd
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c d : α
hab : ReflTransGen r a b
hbc : r b c
⊢ TransGen r a c
[PROOFSTEP]
induction hab generalizing c
[GOAL]
case refl
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b d c : α
hbc : r a c
⊢ TransGen r a c
case tail
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b d b✝ c✝ : α
a✝¹ : ReflTransGen r a b✝
a✝ : r b✝ c✝
a_ih✝ : ∀ {c : α}, r b✝ c → TransGen r a c
c : α
hbc : r c✝ c
⊢ TransGen r a c
[PROOFSTEP]
case refl => exact single hbc
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b d c : α
hbc : r a c
⊢ TransGen r a c
[PROOFSTEP]
case refl => exact single hbc
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b d c : α
hbc : r a c
⊢ TransGen r a c
[PROOFSTEP]
exact single hbc
[GOAL]
case tail
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b d b✝ c✝ : α
a✝¹ : ReflTransGen r a b✝
a✝ : r b✝ c✝
a_ih✝ : ∀ {c : α}, r b✝ c → TransGen r a c
c : α
hbc : r c✝ c
⊢ TransGen r a c
[PROOFSTEP]
case tail _ _ _ hdb IH => exact tail (IH hdb) hbc
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b d b✝ c✝ : α
a✝ : ReflTransGen r a b✝
hdb : r b✝ c✝
IH : ∀ {c : α}, r b✝ c → TransGen r a c
c : α
hbc : r c✝ c
⊢ TransGen r a c
[PROOFSTEP]
case tail _ _ _ hdb IH => exact tail (IH hdb) hbc
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b d b✝ c✝ : α
a✝ : ReflTransGen r a b✝
hdb : r b✝ c✝
IH : ∀ {c : α}, r b✝ c → TransGen r a c
c : α
hbc : r c✝ c
⊢ TransGen r a c
[PROOFSTEP]
exact tail (IH hdb) hbc
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a✝ b c d : α
P : (a : α) → TransGen r a b → Prop
a : α
h : TransGen r a b
base : ∀ {a : α} (h : r a b), P a (_ : TransGen r a b)
ih : ∀ {a c : α} (h' : r a c) (h : TransGen r c b), P c h → P a (_ : TransGen r a b)
⊢ P a h
[PROOFSTEP]
induction h
[GOAL]
case single
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a✝¹ b c d a b✝ : α
a✝ : r a b✝
P : (a : α) → TransGen r a b✝ → Prop
base : ∀ {a : α} (h : r a b✝), P a (_ : TransGen r a b✝)
ih : ∀ {a c : α} (h' : r a c) (h : TransGen r c b✝), P c h → P a (_ : TransGen r a b✝)
⊢ P a (_ : TransGen r a b✝)
case tail
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a✝² b c d a b✝ c✝ : α
a✝¹ : TransGen r a b✝
a✝ : r b✝ c✝
a_ih✝ :
∀ {P : (a : α) → TransGen r a b✝ → Prop},
(∀ {a : α} (h : r a b✝), P a (_ : TransGen r a b✝)) →
(∀ {a c : α} (h' : r a c) (h : TransGen r c b✝), P c h → P a (_ : TransGen r a b✝)) → P a a✝¹
P : (a : α) → TransGen r a c✝ → Prop
base : ∀ {a : α} (h : r a c✝), P a (_ : TransGen r a c✝)
ih : ∀ {a c : α} (h' : r a c) (h : TransGen r c c✝), P c h → P a (_ : TransGen r a c✝)
⊢ P a (_ : TransGen r a c✝)
[PROOFSTEP]
case single a h => exact base h
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a✝¹ b c d a✝ a : α
h : r a✝ a
P : (a_1 : α) → TransGen r a_1 a → Prop
base : ∀ {a_1 : α} (h : r a_1 a), P a_1 (_ : TransGen r a_1 a)
ih : ∀ {a_1 c : α} (h' : r a_1 c) (h : TransGen r c a), P c h → P a_1 (_ : TransGen r a_1 a)
⊢ P a✝ (_ : TransGen r a✝ a)
[PROOFSTEP]
case single a h => exact base h
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a✝¹ b c d a✝ a : α
h : r a✝ a
P : (a_1 : α) → TransGen r a_1 a → Prop
base : ∀ {a_1 : α} (h : r a_1 a), P a_1 (_ : TransGen r a_1 a)
ih : ∀ {a_1 c : α} (h' : r a_1 c) (h : TransGen r c a), P c h → P a_1 (_ : TransGen r a_1 a)
⊢ P a✝ (_ : TransGen r a✝ a)
[PROOFSTEP]
exact base h
[GOAL]
case tail
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a✝² b c d a b✝ c✝ : α
a✝¹ : TransGen r a b✝
a✝ : r b✝ c✝
a_ih✝ :
∀ {P : (a : α) → TransGen r a b✝ → Prop},
(∀ {a : α} (h : r a b✝), P a (_ : TransGen r a b✝)) →
(∀ {a c : α} (h' : r a c) (h : TransGen r c b✝), P c h → P a (_ : TransGen r a b✝)) → P a a✝¹
P : (a : α) → TransGen r a c✝ → Prop
base : ∀ {a : α} (h : r a c✝), P a (_ : TransGen r a c✝)
ih : ∀ {a c : α} (h' : r a c) (h : TransGen r c c✝), P c h → P a (_ : TransGen r a c✝)
⊢ P a (_ : TransGen r a c✝)
[PROOFSTEP]
case tail b c _ hbc h_ih =>
-- Lean 3 could figure out the motive and `apply h_ih` worked
refine @h_ih (λ {a : α} (hab : @TransGen α r a b) => P a (TransGen.tail hab hbc)) ?_ ?_;
exact fun h ↦ ih h (single hbc) (base hbc)
exact fun hab hbc ↦ ih hab _
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a✝¹ b✝ c✝ d a b c : α
a✝ : TransGen r a b
hbc : r b c
h_ih :
∀ {P : (a : α) → TransGen r a b → Prop},
(∀ {a : α} (h : r a b), P a (_ : TransGen r a b)) →
(∀ {a c : α} (h' : r a c) (h : TransGen r c b), P c h → P a (_ : TransGen r a b)) → P a a✝
P : (a : α) → TransGen r a c → Prop
base : ∀ {a : α} (h : r a c), P a (_ : TransGen r a c)
ih : ∀ {a c_1 : α} (h' : r a c_1) (h : TransGen r c_1 c), P c_1 h → P a (_ : TransGen r a c)
⊢ P a (_ : TransGen r a c)
[PROOFSTEP]
case tail b c _ hbc h_ih =>
-- Lean 3 could figure out the motive and `apply h_ih` worked
refine @h_ih (λ {a : α} (hab : @TransGen α r a b) => P a (TransGen.tail hab hbc)) ?_ ?_;
exact fun h ↦ ih h (single hbc) (base hbc)
exact fun hab hbc ↦ ih hab _
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a✝¹ b✝ c✝ d a b c : α
a✝ : TransGen r a b
hbc : r b c
h_ih :
∀ {P : (a : α) → TransGen r a b → Prop},
(∀ {a : α} (h : r a b), P a (_ : TransGen r a b)) →
(∀ {a c : α} (h' : r a c) (h : TransGen r c b), P c h → P a (_ : TransGen r a b)) → P a a✝
P : (a : α) → TransGen r a c → Prop
base : ∀ {a : α} (h : r a c), P a (_ : TransGen r a c)
ih : ∀ {a c_1 : α} (h' : r a c_1) (h : TransGen r c_1 c), P c_1 h → P a (_ : TransGen r a c)
⊢ P a (_ : TransGen r a c)
[PROOFSTEP]
refine @h_ih (λ {a : α} (hab : @TransGen α r a b) => P a (TransGen.tail hab hbc)) ?_ ?_
[GOAL]
case refine_1
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a✝¹ b✝ c✝ d a b c : α
a✝ : TransGen r a b
hbc : r b c
h_ih :
∀ {P : (a : α) → TransGen r a b → Prop},
(∀ {a : α} (h : r a b), P a (_ : TransGen r a b)) →
(∀ {a c : α} (h' : r a c) (h : TransGen r c b), P c h → P a (_ : TransGen r a b)) → P a a✝
P : (a : α) → TransGen r a c → Prop
base : ∀ {a : α} (h : r a c), P a (_ : TransGen r a c)
ih : ∀ {a c_1 : α} (h' : r a c_1) (h : TransGen r c_1 c), P c_1 h → P a (_ : TransGen r a c)
⊢ ∀ {a : α} (h : r a b), (fun {a} hab => P a (_ : TransGen r a c)) (_ : TransGen r a b)
case refine_2
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a✝¹ b✝ c✝ d a b c : α
a✝ : TransGen r a b
hbc : r b c
h_ih :
∀ {P : (a : α) → TransGen r a b → Prop},
(∀ {a : α} (h : r a b), P a (_ : TransGen r a b)) →
(∀ {a c : α} (h' : r a c) (h : TransGen r c b), P c h → P a (_ : TransGen r a b)) → P a a✝
P : (a : α) → TransGen r a c → Prop
base : ∀ {a : α} (h : r a c), P a (_ : TransGen r a c)
ih : ∀ {a c_1 : α} (h' : r a c_1) (h : TransGen r c_1 c), P c_1 h → P a (_ : TransGen r a c)
⊢ ∀ {a c_1 : α} (h' : r a c_1) (h : TransGen r c_1 b),
(fun {a} hab => P a (_ : TransGen r a c)) h → (fun {a} hab => P a (_ : TransGen r a c)) (_ : TransGen r a b)
[PROOFSTEP]
exact fun h ↦ ih h (single hbc) (base hbc)
[GOAL]
case refine_2
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a✝¹ b✝ c✝ d a b c : α
a✝ : TransGen r a b
hbc : r b c
h_ih :
∀ {P : (a : α) → TransGen r a b → Prop},
(∀ {a : α} (h : r a b), P a (_ : TransGen r a b)) →
(∀ {a c : α} (h' : r a c) (h : TransGen r c b), P c h → P a (_ : TransGen r a b)) → P a a✝
P : (a : α) → TransGen r a c → Prop
base : ∀ {a : α} (h : r a c), P a (_ : TransGen r a c)
ih : ∀ {a c_1 : α} (h' : r a c_1) (h : TransGen r c_1 c), P c_1 h → P a (_ : TransGen r a c)
⊢ ∀ {a c_1 : α} (h' : r a c_1) (h : TransGen r c_1 b),
(fun {a} hab => P a (_ : TransGen r a c)) h → (fun {a} hab => P a (_ : TransGen r a c)) (_ : TransGen r a b)
[PROOFSTEP]
exact fun hab hbc ↦ ih hab _
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a✝ b✝ c d : α
P : {a b : α} → TransGen r a b → Prop
a b : α
h : TransGen r a b
base : ∀ {a b : α} (h : r a b), P (_ : TransGen r a b)
ih : ∀ {a b c : α} (h₁ : TransGen r a b) (h₂ : TransGen r b c), P h₁ → P h₂ → P (_ : TransGen r a c)
⊢ P h
[PROOFSTEP]
induction h with
| single h => exact base h
| tail hab hbc h_ih => exact ih hab (single hbc) h_ih (base hbc)
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a✝ b✝ c d : α
P : {a b : α} → TransGen r a b → Prop
a b : α
h : TransGen r a b
base : ∀ {a b : α} (h : r a b), P (_ : TransGen r a b)
ih : ∀ {a b c : α} (h₁ : TransGen r a b) (h₂ : TransGen r b c), P h₁ → P h₂ → P (_ : TransGen r a c)
⊢ P h
[PROOFSTEP]
induction h with
| single h => exact base h
| tail hab hbc h_ih => exact ih hab (single hbc) h_ih (base hbc)
[GOAL]
case single
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a✝ b✝¹ c d : α
P : {a b : α} → TransGen r a b → Prop
a b : α
base : ∀ {a b : α} (h : r a b), P (_ : TransGen r a b)
ih : ∀ {a b c : α} (h₁ : TransGen r a b) (h₂ : TransGen r b c), P h₁ → P h₂ → P (_ : TransGen r a c)
b✝ : α
h : r a b✝
⊢ P (_ : TransGen r a b✝)
[PROOFSTEP]
| single h => exact base h
[GOAL]
case single
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a✝ b✝¹ c d : α
P : {a b : α} → TransGen r a b → Prop
a b : α
base : ∀ {a b : α} (h : r a b), P (_ : TransGen r a b)
ih : ∀ {a b c : α} (h₁ : TransGen r a b) (h₂ : TransGen r b c), P h₁ → P h₂ → P (_ : TransGen r a c)
b✝ : α
h : r a b✝
⊢ P (_ : TransGen r a b✝)
[PROOFSTEP]
exact base h
[GOAL]
case tail
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a✝ b✝¹ c d : α
P : {a b : α} → TransGen r a b → Prop
a b : α
base : ∀ {a b : α} (h : r a b), P (_ : TransGen r a b)
ih : ∀ {a b c : α} (h₁ : TransGen r a b) (h₂ : TransGen r b c), P h₁ → P h₂ → P (_ : TransGen r a c)
b✝ c✝ : α
hab : TransGen r a b✝
hbc : r b✝ c✝
h_ih : P hab
⊢ P (_ : TransGen r a c✝)
[PROOFSTEP]
| tail hab hbc h_ih => exact ih hab (single hbc) h_ih (base hbc)
[GOAL]
case tail
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a✝ b✝¹ c d : α
P : {a b : α} → TransGen r a b → Prop
a b : α
base : ∀ {a b : α} (h : r a b), P (_ : TransGen r a b)
ih : ∀ {a b c : α} (h₁ : TransGen r a b) (h₂ : TransGen r b c), P h₁ → P h₂ → P (_ : TransGen r a c)
b✝ c✝ : α
hab : TransGen r a b✝
hbc : r b✝ c✝
h_ih : P hab
⊢ P (_ : TransGen r a c✝)
[PROOFSTEP]
exact ih hab (single hbc) h_ih (base hbc)
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c d : α
hab : ReflTransGen r a b
hbc : TransGen r b c
⊢ TransGen r a c
[PROOFSTEP]
induction hbc
[GOAL]
case single
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c d : α
hab : ReflTransGen r a b
b✝ : α
a✝ : r b b✝
⊢ TransGen r a b✝
case tail
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c d : α
hab : ReflTransGen r a b
b✝ c✝ : α
a✝¹ : TransGen r b b✝
a✝ : r b✝ c✝
a_ih✝ : TransGen r a b✝
⊢ TransGen r a c✝
[PROOFSTEP]
case single c hbc => exact tail' hab hbc
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c✝ d : α
hab : ReflTransGen r a b
c : α
hbc : r b c
⊢ TransGen r a c
[PROOFSTEP]
case single c hbc => exact tail' hab hbc
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c✝ d : α
hab : ReflTransGen r a b
c : α
hbc : r b c
⊢ TransGen r a c
[PROOFSTEP]
exact tail' hab hbc
[GOAL]
case tail
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c d : α
hab : ReflTransGen r a b
b✝ c✝ : α
a✝¹ : TransGen r b b✝
a✝ : r b✝ c✝
a_ih✝ : TransGen r a b✝
⊢ TransGen r a c✝
[PROOFSTEP]
case tail c d _ hcd hac => exact hac.tail hcd
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c✝ d✝ : α
hab : ReflTransGen r a b
c d : α
a✝ : TransGen r b c
hcd : r c d
hac : TransGen r a c
⊢ TransGen r a d
[PROOFSTEP]
case tail c d _ hcd hac => exact hac.tail hcd
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c✝ d✝ : α
hab : ReflTransGen r a b
c d : α
a✝ : TransGen r b c
hcd : r c d
hac : TransGen r a c
⊢ TransGen r a d
[PROOFSTEP]
exact hac.tail hcd
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c d : α
⊢ TransGen r a c ↔ ∃ b, ReflTransGen r a b ∧ r b c
[PROOFSTEP]
refine' ⟨fun h ↦ _, fun ⟨b, hab, hbc⟩ ↦ tail' hab hbc⟩
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c d : α
h : TransGen r a c
⊢ ∃ b, ReflTransGen r a b ∧ r b c
[PROOFSTEP]
cases' h with _ hac b _ hab hbc
[GOAL]
case single
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c d : α
hac : r a c
⊢ ∃ b, ReflTransGen r a b ∧ r b c
[PROOFSTEP]
exact ⟨_, by rfl, hac⟩
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c d : α
hac : r a c
⊢ ReflTransGen r a a
[PROOFSTEP]
rfl
[GOAL]
case tail
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b✝ c d b : α
hab : TransGen r a b
hbc : r b c
⊢ ∃ b, ReflTransGen r a b ∧ r b c
[PROOFSTEP]
exact ⟨_, hab.to_reflTransGen, hbc⟩
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c d : α
⊢ TransGen r a c ↔ ∃ b, r a b ∧ ReflTransGen r b c
[PROOFSTEP]
refine' ⟨fun h ↦ _, fun ⟨b, hab, hbc⟩ ↦ head' hab hbc⟩
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c d : α
h : TransGen r a c
⊢ ∃ b, r a b ∧ ReflTransGen r b c
[PROOFSTEP]
induction h
[GOAL]
case single
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c d b✝ : α
a✝ : r a b✝
⊢ ∃ b, r a b ∧ ReflTransGen r b b✝
case tail
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c d b✝ c✝ : α
a✝¹ : TransGen r a b✝
a✝ : r b✝ c✝
a_ih✝ : ∃ b, r a b ∧ ReflTransGen r b b✝
⊢ ∃ b, r a b ∧ ReflTransGen r b c✝
[PROOFSTEP]
case single c hac => exact ⟨_, hac, by rfl⟩
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c✝ d c : α
hac : r a c
⊢ ∃ b, r a b ∧ ReflTransGen r b c
[PROOFSTEP]
case single c hac => exact ⟨_, hac, by rfl⟩
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c✝ d c : α
hac : r a c
⊢ ∃ b, r a b ∧ ReflTransGen r b c
[PROOFSTEP]
exact ⟨_, hac, by rfl⟩
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c✝ d c : α
hac : r a c
⊢ ReflTransGen r c c
[PROOFSTEP]
rfl
[GOAL]
case tail
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c d b✝ c✝ : α
a✝¹ : TransGen r a b✝
a✝ : r b✝ c✝
a_ih✝ : ∃ b, r a b ∧ ReflTransGen r b b✝
⊢ ∃ b, r a b ∧ ReflTransGen r b c✝
[PROOFSTEP]
case tail b c _ hbc IH =>
rcases IH with ⟨d, had, hdb⟩
exact ⟨_, had, hdb.tail hbc⟩
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b✝ c✝ d b c : α
a✝ : TransGen r a b
hbc : r b c
IH : ∃ b_1, r a b_1 ∧ ReflTransGen r b_1 b
⊢ ∃ b, r a b ∧ ReflTransGen r b c
[PROOFSTEP]
case tail b c _ hbc IH =>
rcases IH with ⟨d, had, hdb⟩
exact ⟨_, had, hdb.tail hbc⟩
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b✝ c✝ d b c : α
a✝ : TransGen r a b
hbc : r b c
IH : ∃ b_1, r a b_1 ∧ ReflTransGen r b_1 b
⊢ ∃ b, r a b ∧ ReflTransGen r b c
[PROOFSTEP]
rcases IH with ⟨d, had, hdb⟩
[GOAL]
case intro.intro
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b✝ c✝ d✝ b c : α
a✝ : TransGen r a b
hbc : r b c
d : α
had : r a d
hdb : ReflTransGen r d b
⊢ ∃ b, r a b ∧ ReflTransGen r b c
[PROOFSTEP]
exact ⟨_, had, hdb.tail hbc⟩
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c d : α
h : Acc r a
⊢ Acc (Relation.TransGen r) a
[PROOFSTEP]
induction' h with x _ H
[GOAL]
case intro
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c d x : α
h✝ : ∀ (y : α), r y x → Acc r y
H : ∀ (y : α), r y x → Acc (Relation.TransGen r) y
⊢ Acc (Relation.TransGen r) x
[PROOFSTEP]
refine' Acc.intro x fun y hy ↦ _
[GOAL]
case intro
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c d x : α
h✝ : ∀ (y : α), r y x → Acc r y
H : ∀ (y : α), r y x → Acc (Relation.TransGen r) y
y : α
hy : Relation.TransGen r y x
⊢ Acc (Relation.TransGen r) y
[PROOFSTEP]
cases' hy with _ hyx z _ hyz hzx
[GOAL]
case intro.single
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c d x : α
h✝ : ∀ (y : α), r y x → Acc r y
H : ∀ (y : α), r y x → Acc (Relation.TransGen r) y
y : α
hyx : r y x
⊢ Acc (Relation.TransGen r) y
case intro.tail
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c d x : α
h✝ : ∀ (y : α), r y x → Acc r y
H : ∀ (y : α), r y x → Acc (Relation.TransGen r) y
y z : α
hyz : Relation.TransGen r y z
hzx : r z x
⊢ Acc (Relation.TransGen r) y
[PROOFSTEP]
exacts [H y hyx, (H z hzx).inv hyz]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a✝ b✝ c d : α
trans : Transitive r
a b : α
h : TransGen r a b
⊢ r a b
[PROOFSTEP]
induction h
[GOAL]
case single
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a✝¹ b✝¹ c d : α
trans : Transitive r
a b b✝ : α
a✝ : r a b✝
⊢ r a b✝
case tail
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a✝² b✝¹ c d : α
trans : Transitive r
a b b✝ c✝ : α
a✝¹ : TransGen r a b✝
a✝ : r b✝ c✝
a_ih✝ : r a b✝
⊢ r a c✝
[PROOFSTEP]
case single _ hc => exact hc
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a✝ b✝¹ c d : α
trans : Transitive r
a b b✝ : α
hc : r a b✝
⊢ r a b✝
[PROOFSTEP]
case single _ hc => exact hc
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a✝ b✝¹ c d : α
trans : Transitive r
a b b✝ : α
hc : r a b✝
⊢ r a b✝
[PROOFSTEP]
exact hc
[GOAL]
case tail
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a✝² b✝¹ c d : α
trans : Transitive r
a b b✝ c✝ : α
a✝¹ : TransGen r a b✝
a✝ : r b✝ c✝
a_ih✝ : r a b✝
⊢ r a c✝
[PROOFSTEP]
case tail c d _ hcd hac => exact trans hac hcd
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a✝¹ b✝ c✝ d✝ : α
trans : Transitive r
a b c d : α
a✝ : TransGen r a c
hcd : r c d
hac : r a c
⊢ r a d
[PROOFSTEP]
case tail c d _ hcd hac => exact trans hac hcd
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a✝¹ b✝ c✝ d✝ : α
trans : Transitive r
a b c d : α
a✝ : TransGen r a c
hcd : r c d
hac : r a c
⊢ r a d
[PROOFSTEP]
exact trans hac hcd
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a✝ b✝ c d : α
p : β → β → Prop
a b : α
f : α → β
h : ∀ (a b : α), r a b → p (f a) (f b)
hab : TransGen r a b
⊢ TransGen p (f a) (f b)
[PROOFSTEP]
induction hab
[GOAL]
case single
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a✝¹ b✝¹ c d : α
p : β → β → Prop
a b : α
f : α → β
h : ∀ (a b : α), r a b → p (f a) (f b)
b✝ : α
a✝ : r a b✝
⊢ TransGen p (f a) (f b✝)
case tail
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a✝² b✝¹ c d : α
p : β → β → Prop
a b : α
f : α → β
h : ∀ (a b : α), r a b → p (f a) (f b)
b✝ c✝ : α
a✝¹ : TransGen r a b✝
a✝ : r b✝ c✝
a_ih✝ : TransGen p (f a) (f b✝)
⊢ TransGen p (f a) (f c✝)
[PROOFSTEP]
case single c hac => exact TransGen.single (h a c hac)
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a✝ b✝ c✝ d : α
p : β → β → Prop
a b : α
f : α → β
h : ∀ (a b : α), r a b → p (f a) (f b)
c : α
hac : r a c
⊢ TransGen p (f a) (f c)
[PROOFSTEP]
case single c hac => exact TransGen.single (h a c hac)
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a✝ b✝ c✝ d : α
p : β → β → Prop
a b : α
f : α → β
h : ∀ (a b : α), r a b → p (f a) (f b)
c : α
hac : r a c
⊢ TransGen p (f a) (f c)
[PROOFSTEP]
exact TransGen.single (h a c hac)
[GOAL]
case tail
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a✝² b✝¹ c d : α
p : β → β → Prop
a b : α
f : α → β
h : ∀ (a b : α), r a b → p (f a) (f b)
b✝ c✝ : α
a✝¹ : TransGen r a b✝
a✝ : r b✝ c✝
a_ih✝ : TransGen p (f a) (f b✝)
⊢ TransGen p (f a) (f c✝)
[PROOFSTEP]
case tail c d _ hcd hac => exact TransGen.tail hac (h c d hcd)
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a✝¹ b✝ c✝ d✝ : α
p : β → β → Prop
a b : α
f : α → β
h : ∀ (a b : α), r a b → p (f a) (f b)
c d : α
a✝ : TransGen r a c
hcd : r c d
hac : TransGen p (f a) (f c)
⊢ TransGen p (f a) (f d)
[PROOFSTEP]
case tail c d _ hcd hac => exact TransGen.tail hac (h c d hcd)
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a✝¹ b✝ c✝ d✝ : α
p : β → β → Prop
a b : α
f : α → β
h : ∀ (a b : α), r a b → p (f a) (f b)
c d : α
a✝ : TransGen r a c
hcd : r c d
hac : TransGen p (f a) (f c)
⊢ TransGen p (f a) (f d)
[PROOFSTEP]
exact TransGen.tail hac (h c d hcd)
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a✝ b✝ c d : α
p : β → β → Prop
a b : α
f : α → β
h : ∀ (a b : α), r a b → TransGen p (f a) (f b)
hab : TransGen r a b
⊢ TransGen p (f a) (f b)
[PROOFSTEP]
simpa [transGen_idem] using hab.lift f h
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c d : α
h : TransGen r b a
⊢ TransGen (Function.swap r) a b
[PROOFSTEP]
induction' h with b h b c _ hbc ih
[GOAL]
case single
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b✝ c d b : α
h : r b✝ b
⊢ TransGen (Function.swap r) b b✝
[PROOFSTEP]
exact TransGen.single h
[GOAL]
case tail
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b✝ c✝ d b c : α
a✝ : TransGen r b✝ b
hbc : r b c
ih : TransGen (Function.swap r) b b✝
⊢ TransGen (Function.swap r) c b✝
[PROOFSTEP]
exact ih.head hbc
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c d : α
h : ∀ (b : α), ¬r a b
⊢ ReflTransGen r a b ↔ b = a
[PROOFSTEP]
rw [cases_head_iff]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c d : α
h : ∀ (b : α), ¬r a b
⊢ (a = b ∨ ∃ c, r a c ∧ ReflTransGen r c b) ↔ b = a
[PROOFSTEP]
simp [h, eq_comm]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c d : α
⊢ ReflTransGen r a b ↔ b = a ∨ TransGen r a b
[PROOFSTEP]
refine' ⟨fun h ↦ _, fun h ↦ _⟩
[GOAL]
case refine'_1
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c d : α
h : ReflTransGen r a b
⊢ b = a ∨ TransGen r a b
[PROOFSTEP]
cases' h with c _ hac hcb
[GOAL]
case refine'_1.refl
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a c d : α
⊢ a = a ∨ TransGen r a a
[PROOFSTEP]
exact Or.inl rfl
[GOAL]
case refine'_1.tail
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c✝ d c : α
hac : ReflTransGen r a c
hcb : r c b
⊢ b = a ∨ TransGen r a b
[PROOFSTEP]
exact Or.inr (TransGen.tail' hac hcb)
[GOAL]
case refine'_2
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c d : α
h : b = a ∨ TransGen r a b
⊢ ReflTransGen r a b
[PROOFSTEP]
rcases h with (rfl | h)
[GOAL]
case refine'_2.inl
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
b c d : α
⊢ ReflTransGen r b b
[PROOFSTEP]
rfl
[GOAL]
case refine'_2.inr
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c d : α
h : TransGen r a b
⊢ ReflTransGen r a b
[PROOFSTEP]
exact h.to_reflTransGen
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a✝ b✝ c d : α
refl : Reflexive r
trans : Transitive r
a b : α
h : ReflTransGen r a b
⊢ r a b
[PROOFSTEP]
induction' h with b c _ h₂ IH
[GOAL]
case refl
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a✝ b✝ c d : α
refl : Reflexive r
trans : Transitive r
a b : α
⊢ r a a
[PROOFSTEP]
apply refl
[GOAL]
case tail
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a✝¹ b✝¹ c✝ d : α
refl : Reflexive r
trans : Transitive r
a b✝ b c : α
a✝ : ReflTransGen r a b
h₂ : r b c
IH : r a b
⊢ r a c
[PROOFSTEP]
exact trans IH h₂
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a✝ b✝ c d : α
p : β → β → Prop
a b : α
f : α → β
h : ∀ (a b : α), r a b → ReflTransGen p (f a) (f b)
hab : ReflTransGen r a b
⊢ ReflTransGen p (f a) (f b)
[PROOFSTEP]
simpa [refl_trans_gen_idem] using hab.lift f h
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c d : α
h : ReflTransGen r b a
⊢ ReflTransGen (Function.swap r) a b
[PROOFSTEP]
induction' h with b c _ hbc ih
[GOAL]
case refl
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c d : α
⊢ ReflTransGen (Function.swap r) b b
[PROOFSTEP]
rfl
[GOAL]
case tail
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b✝ c✝ d b c : α
a✝ : ReflTransGen r b✝ b
hbc : r b c
ih : ReflTransGen (Function.swap r) b b✝
⊢ ReflTransGen (Function.swap r) c b✝
[PROOFSTEP]
exact ih.head hbc
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c d : α
h : ∀ (a b c : α), r a b → r a c → ∃ d, ReflGen r b d ∧ ReflTransGen r c d
hab : ReflTransGen r a b
hac : ReflTransGen r a c
⊢ Join (ReflTransGen r) b c
[PROOFSTEP]
induction hab
[GOAL]
case refl
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c d : α
h : ∀ (a b c : α), r a b → r a c → ∃ d, ReflGen r b d ∧ ReflTransGen r c d
hac : ReflTransGen r a c
⊢ Join (ReflTransGen r) a c
case tail
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c d : α
h : ∀ (a b c : α), r a b → r a c → ∃ d, ReflGen r b d ∧ ReflTransGen r c d
hac : ReflTransGen r a c
b✝ c✝ : α
a✝¹ : ReflTransGen r a b✝
a✝ : r b✝ c✝
a_ih✝ : Join (ReflTransGen r) b✝ c
⊢ Join (ReflTransGen r) c✝ c
[PROOFSTEP]
case refl => exact ⟨c, hac, refl⟩
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c d : α
h : ∀ (a b c : α), r a b → r a c → ∃ d, ReflGen r b d ∧ ReflTransGen r c d
hac : ReflTransGen r a c
⊢ Join (ReflTransGen r) a c
[PROOFSTEP]
case refl => exact ⟨c, hac, refl⟩
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c d : α
h : ∀ (a b c : α), r a b → r a c → ∃ d, ReflGen r b d ∧ ReflTransGen r c d
hac : ReflTransGen r a c
⊢ Join (ReflTransGen r) a c
[PROOFSTEP]
exact ⟨c, hac, refl⟩
[GOAL]
case tail
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c d : α
h : ∀ (a b c : α), r a b → r a c → ∃ d, ReflGen r b d ∧ ReflTransGen r c d
hac : ReflTransGen r a c
b✝ c✝ : α
a✝¹ : ReflTransGen r a b✝
a✝ : r b✝ c✝
a_ih✝ : Join (ReflTransGen r) b✝ c
⊢ Join (ReflTransGen r) c✝ c
[PROOFSTEP]
case tail d e _ hde ih =>
rcases ih with ⟨b, hdb, hcb⟩
have : ∃ a, ReflTransGen r e a ∧ ReflGen r b a := by
clear hcb
induction hdb
case refl => exact ⟨e, refl, ReflGen.single hde⟩
case tail f b _ hfb ih =>
rcases ih with ⟨a, hea, hfa⟩
cases' hfa with _ hfa
· exact ⟨b, hea.tail hfb, ReflGen.refl⟩
· rcases h _ _ _ hfb hfa with ⟨c, hbc, hac⟩
exact ⟨c, hea.trans hac, hbc⟩
rcases this with ⟨a, hea, hba⟩
cases' hba with _ hba
· exact ⟨b, hea, hcb⟩
· exact ⟨a, hea, hcb.tail hba⟩
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c d✝ : α
h : ∀ (a b c : α), r a b → r a c → ∃ d, ReflGen r b d ∧ ReflTransGen r c d
hac : ReflTransGen r a c
d e : α
a✝ : ReflTransGen r a d
hde : r d e
ih : Join (ReflTransGen r) d c
⊢ Join (ReflTransGen r) e c
[PROOFSTEP]
case tail d e _ hde ih =>
rcases ih with ⟨b, hdb, hcb⟩
have : ∃ a, ReflTransGen r e a ∧ ReflGen r b a := by
clear hcb
induction hdb
case refl => exact ⟨e, refl, ReflGen.single hde⟩
case tail f b _ hfb ih =>
rcases ih with ⟨a, hea, hfa⟩
cases' hfa with _ hfa
· exact ⟨b, hea.tail hfb, ReflGen.refl⟩
· rcases h _ _ _ hfb hfa with ⟨c, hbc, hac⟩
exact ⟨c, hea.trans hac, hbc⟩
rcases this with ⟨a, hea, hba⟩
cases' hba with _ hba
· exact ⟨b, hea, hcb⟩
· exact ⟨a, hea, hcb.tail hba⟩
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c d✝ : α
h : ∀ (a b c : α), r a b → r a c → ∃ d, ReflGen r b d ∧ ReflTransGen r c d
hac : ReflTransGen r a c
d e : α
a✝ : ReflTransGen r a d
hde : r d e
ih : Join (ReflTransGen r) d c
⊢ Join (ReflTransGen r) e c
[PROOFSTEP]
rcases ih with ⟨b, hdb, hcb⟩
[GOAL]
case intro.intro
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b✝ c d✝ : α
h : ∀ (a b c : α), r a b → r a c → ∃ d, ReflGen r b d ∧ ReflTransGen r c d
hac : ReflTransGen r a c
d e : α
a✝ : ReflTransGen r a d
hde : r d e
b : α
hdb : ReflTransGen r d b
hcb : ReflTransGen r c b
⊢ Join (ReflTransGen r) e c
[PROOFSTEP]
have : ∃ a, ReflTransGen r e a ∧ ReflGen r b a := by
clear hcb
induction hdb
case refl => exact ⟨e, refl, ReflGen.single hde⟩
case tail f b _ hfb ih =>
rcases ih with ⟨a, hea, hfa⟩
cases' hfa with _ hfa
· exact ⟨b, hea.tail hfb, ReflGen.refl⟩
· rcases h _ _ _ hfb hfa with ⟨c, hbc, hac⟩
exact ⟨c, hea.trans hac, hbc⟩
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b✝ c d✝ : α
h : ∀ (a b c : α), r a b → r a c → ∃ d, ReflGen r b d ∧ ReflTransGen r c d
hac : ReflTransGen r a c
d e : α
a✝ : ReflTransGen r a d
hde : r d e
b : α
hdb : ReflTransGen r d b
hcb : ReflTransGen r c b
⊢ ∃ a, ReflTransGen r e a ∧ ReflGen r b a
[PROOFSTEP]
clear hcb
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b✝ c d✝ : α
h : ∀ (a b c : α), r a b → r a c → ∃ d, ReflGen r b d ∧ ReflTransGen r c d
hac : ReflTransGen r a c
d e : α
a✝ : ReflTransGen r a d
hde : r d e
b : α
hdb : ReflTransGen r d b
⊢ ∃ a, ReflTransGen r e a ∧ ReflGen r b a
[PROOFSTEP]
induction hdb
[GOAL]
case refl
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b✝ c d✝ : α
h : ∀ (a b c : α), r a b → r a c → ∃ d, ReflGen r b d ∧ ReflTransGen r c d
hac : ReflTransGen r a c
d e : α
a✝ : ReflTransGen r a d
hde : r d e
b : α
⊢ ∃ a, ReflTransGen r e a ∧ ReflGen r d a
case tail
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b✝¹ c d✝ : α
h : ∀ (a b c : α), r a b → r a c → ∃ d, ReflGen r b d ∧ ReflTransGen r c d
hac : ReflTransGen r a c
d e : α
a✝² : ReflTransGen r a d
hde : r d e
b b✝ c✝ : α
a✝¹ : ReflTransGen r d b✝
a✝ : r b✝ c✝
a_ih✝ : ∃ a, ReflTransGen r e a ∧ ReflGen r b✝ a
⊢ ∃ a, ReflTransGen r e a ∧ ReflGen r c✝ a
[PROOFSTEP]
case refl => exact ⟨e, refl, ReflGen.single hde⟩
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b✝ c d✝ : α
h : ∀ (a b c : α), r a b → r a c → ∃ d, ReflGen r b d ∧ ReflTransGen r c d
hac : ReflTransGen r a c
d e : α
a✝ : ReflTransGen r a d
hde : r d e
b : α
⊢ ∃ a, ReflTransGen r e a ∧ ReflGen r d a
[PROOFSTEP]
case refl => exact ⟨e, refl, ReflGen.single hde⟩
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b✝ c d✝ : α
h : ∀ (a b c : α), r a b → r a c → ∃ d, ReflGen r b d ∧ ReflTransGen r c d
hac : ReflTransGen r a c
d e : α
a✝ : ReflTransGen r a d
hde : r d e
b : α
⊢ ∃ a, ReflTransGen r e a ∧ ReflGen r d a
[PROOFSTEP]
exact ⟨e, refl, ReflGen.single hde⟩
[GOAL]
case tail
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b✝¹ c d✝ : α
h : ∀ (a b c : α), r a b → r a c → ∃ d, ReflGen r b d ∧ ReflTransGen r c d
hac : ReflTransGen r a c
d e : α
a✝² : ReflTransGen r a d
hde : r d e
b b✝ c✝ : α
a✝¹ : ReflTransGen r d b✝
a✝ : r b✝ c✝
a_ih✝ : ∃ a, ReflTransGen r e a ∧ ReflGen r b✝ a
⊢ ∃ a, ReflTransGen r e a ∧ ReflGen r c✝ a
[PROOFSTEP]
case tail f b _ hfb ih =>
rcases ih with ⟨a, hea, hfa⟩
cases' hfa with _ hfa
· exact ⟨b, hea.tail hfb, ReflGen.refl⟩
· rcases h _ _ _ hfb hfa with ⟨c, hbc, hac⟩
exact ⟨c, hea.trans hac, hbc⟩
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b✝¹ c d✝ : α
h : ∀ (a b c : α), r a b → r a c → ∃ d, ReflGen r b d ∧ ReflTransGen r c d
hac : ReflTransGen r a c
d e : α
a✝¹ : ReflTransGen r a d
hde : r d e
b✝ f b : α
a✝ : ReflTransGen r d f
hfb : r f b
ih : ∃ a, ReflTransGen r e a ∧ ReflGen r f a
⊢ ∃ a, ReflTransGen r e a ∧ ReflGen r b a
[PROOFSTEP]
case tail f b _ hfb ih =>
rcases ih with ⟨a, hea, hfa⟩
cases' hfa with _ hfa
· exact ⟨b, hea.tail hfb, ReflGen.refl⟩
· rcases h _ _ _ hfb hfa with ⟨c, hbc, hac⟩
exact ⟨c, hea.trans hac, hbc⟩
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b✝¹ c d✝ : α
h : ∀ (a b c : α), r a b → r a c → ∃ d, ReflGen r b d ∧ ReflTransGen r c d
hac : ReflTransGen r a c
d e : α
a✝¹ : ReflTransGen r a d
hde : r d e
b✝ f b : α
a✝ : ReflTransGen r d f
hfb : r f b
ih : ∃ a, ReflTransGen r e a ∧ ReflGen r f a
⊢ ∃ a, ReflTransGen r e a ∧ ReflGen r b a
[PROOFSTEP]
rcases ih with ⟨a, hea, hfa⟩
[GOAL]
case intro.intro
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a✝² b✝¹ c d✝ : α
h : ∀ (a b c : α), r a b → r a c → ∃ d, ReflGen r b d ∧ ReflTransGen r c d
hac : ReflTransGen r a✝² c
d e : α
a✝¹ : ReflTransGen r a✝² d
hde : r d e
b✝ f b : α
a✝ : ReflTransGen r d f
hfb : r f b
a : α
hea : ReflTransGen r e a
hfa : ReflGen r f a
⊢ ∃ a, ReflTransGen r e a ∧ ReflGen r b a
[PROOFSTEP]
cases' hfa with _ hfa
[GOAL]
case intro.intro.refl
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b✝¹ c d✝ : α
h : ∀ (a b c : α), r a b → r a c → ∃ d, ReflGen r b d ∧ ReflTransGen r c d
hac : ReflTransGen r a c
d e : α
a✝¹ : ReflTransGen r a d
hde : r d e
b✝ f b : α
a✝ : ReflTransGen r d f
hfb : r f b
hea : ReflTransGen r e f
⊢ ∃ a, ReflTransGen r e a ∧ ReflGen r b a
[PROOFSTEP]
exact ⟨b, hea.tail hfb, ReflGen.refl⟩
[GOAL]
case intro.intro.single
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a✝² b✝¹ c d✝ : α
h : ∀ (a b c : α), r a b → r a c → ∃ d, ReflGen r b d ∧ ReflTransGen r c d
hac : ReflTransGen r a✝² c
d e : α
a✝¹ : ReflTransGen r a✝² d
hde : r d e
b✝ f b : α
a✝ : ReflTransGen r d f
hfb : r f b
a : α
hea : ReflTransGen r e a
hfa : r f a
⊢ ∃ a, ReflTransGen r e a ∧ ReflGen r b a
[PROOFSTEP]
rcases h _ _ _ hfb hfa with ⟨c, hbc, hac⟩
[GOAL]
case intro.intro.single.intro.intro
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a✝² b✝¹ c✝ d✝ : α
h : ∀ (a b c : α), r a b → r a c → ∃ d, ReflGen r b d ∧ ReflTransGen r c d
hac✝ : ReflTransGen r a✝² c✝
d e : α
a✝¹ : ReflTransGen r a✝² d
hde : r d e
b✝ f b : α
a✝ : ReflTransGen r d f
hfb : r f b
a : α
hea : ReflTransGen r e a
hfa : r f a
c : α
hbc : ReflGen r b c
hac : ReflTransGen r a c
⊢ ∃ a, ReflTransGen r e a ∧ ReflGen r b a
[PROOFSTEP]
exact ⟨c, hea.trans hac, hbc⟩
[GOAL]
case intro.intro
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b✝ c d✝ : α
h : ∀ (a b c : α), r a b → r a c → ∃ d, ReflGen r b d ∧ ReflTransGen r c d
hac : ReflTransGen r a c
d e : α
a✝ : ReflTransGen r a d
hde : r d e
b : α
hdb : ReflTransGen r d b
hcb : ReflTransGen r c b
this : ∃ a, ReflTransGen r e a ∧ ReflGen r b a
⊢ Join (ReflTransGen r) e c
[PROOFSTEP]
rcases this with ⟨a, hea, hba⟩
[GOAL]
case intro.intro.intro.intro
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a✝¹ b✝ c d✝ : α
h : ∀ (a b c : α), r a b → r a c → ∃ d, ReflGen r b d ∧ ReflTransGen r c d
hac : ReflTransGen r a✝¹ c
d e : α
a✝ : ReflTransGen r a✝¹ d
hde : r d e
b : α
hdb : ReflTransGen r d b
hcb : ReflTransGen r c b
a : α
hea : ReflTransGen r e a
hba : ReflGen r b a
⊢ Join (ReflTransGen r) e c
[PROOFSTEP]
cases' hba with _ hba
[GOAL]
case intro.intro.intro.intro.refl
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b✝ c d✝ : α
h : ∀ (a b c : α), r a b → r a c → ∃ d, ReflGen r b d ∧ ReflTransGen r c d
hac : ReflTransGen r a c
d e : α
a✝ : ReflTransGen r a d
hde : r d e
b : α
hdb : ReflTransGen r d b
hcb : ReflTransGen r c b
hea : ReflTransGen r e b
⊢ Join (ReflTransGen r) e c
[PROOFSTEP]
exact ⟨b, hea, hcb⟩
[GOAL]
case intro.intro.intro.intro.single
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a✝¹ b✝ c d✝ : α
h : ∀ (a b c : α), r a b → r a c → ∃ d, ReflGen r b d ∧ ReflTransGen r c d
hac : ReflTransGen r a✝¹ c
d e : α
a✝ : ReflTransGen r a✝¹ d
hde : r d e
b : α
hdb : ReflTransGen r d b
hcb : ReflTransGen r c b
a : α
hea : ReflTransGen r e a
hba : r b a
⊢ Join (ReflTransGen r) e c
[PROOFSTEP]
exact ⟨a, hea, hcb.tail hba⟩
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c d : α
r' : α → α → Prop
hr : Reflexive r
ht : Transitive r
h : ∀ (a b : α), r' a b → r a b
h' : ReflTransGen r' a b
⊢ r a b
[PROOFSTEP]
induction' h' with b c _ hbc ih
[GOAL]
case refl
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b c d : α
r' : α → α → Prop
hr : Reflexive r
ht : Transitive r
h : ∀ (a b : α), r' a b → r a b
⊢ r a a
[PROOFSTEP]
exact hr _
[GOAL]
case tail
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b✝ c✝ d : α
r' : α → α → Prop
hr : Reflexive r
ht : Transitive r
h : ∀ (a b : α), r' a b → r a b
b c : α
a✝ : ReflTransGen r' a b
hbc : r' b c
ih : r a b
⊢ r a c
[PROOFSTEP]
exact ht ih (h _ _ hbc)
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b : α
h : Equivalence r
⊢ EqvGen r a b → r a b
[PROOFSTEP]
intro h
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b : α
h✝ : Equivalence r
h : EqvGen r a b
⊢ r a b
[PROOFSTEP]
induction h with
| rel => assumption
| refl => exact h.1 _
| symm => apply h.symm; assumption
| trans _ _ _ _ _ hab hbc => exact h.trans hab hbc
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b : α
h✝ : Equivalence r
h : EqvGen r a b
⊢ r a b
[PROOFSTEP]
induction h with
| rel => assumption
| refl => exact h.1 _
| symm => apply h.symm; assumption
| trans _ _ _ _ _ hab hbc => exact h.trans hab hbc
[GOAL]
case rel
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b : α
h : Equivalence r
x✝ y✝ : α
a✝ : r x✝ y✝
⊢ r x✝ y✝
[PROOFSTEP]
| rel => assumption
[GOAL]
case rel
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b : α
h : Equivalence r
x✝ y✝ : α
a✝ : r x✝ y✝
⊢ r x✝ y✝
[PROOFSTEP]
assumption
[GOAL]
case refl
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b : α
h : Equivalence r
x✝ : α
⊢ r x✝ x✝
[PROOFSTEP]
| refl => exact h.1 _
[GOAL]
case refl
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b : α
h : Equivalence r
x✝ : α
⊢ r x✝ x✝
[PROOFSTEP]
exact h.1 _
[GOAL]
case symm
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b : α
h : Equivalence r
x✝ y✝ : α
a✝ : EqvGen r x✝ y✝
a_ih✝ : r x✝ y✝
⊢ r y✝ x✝
[PROOFSTEP]
| symm => apply h.symm; assumption
[GOAL]
case symm
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b : α
h : Equivalence r
x✝ y✝ : α
a✝ : EqvGen r x✝ y✝
a_ih✝ : r x✝ y✝
⊢ r y✝ x✝
[PROOFSTEP]
apply h.symm
[GOAL]
case symm
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b : α
h : Equivalence r
x✝ y✝ : α
a✝ : EqvGen r x✝ y✝
a_ih✝ : r x✝ y✝
⊢ r x✝ y✝
[PROOFSTEP]
assumption
[GOAL]
case trans
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b : α
h : Equivalence r
x✝ y✝ z✝ : α
a✝¹ : EqvGen r x✝ y✝
a✝ : EqvGen r y✝ z✝
hab : r x✝ y✝
hbc : r y✝ z✝
⊢ r x✝ z✝
[PROOFSTEP]
| trans _ _ _ _ _ hab hbc => exact h.trans hab hbc
[GOAL]
case trans
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
a b : α
h : Equivalence r
x✝ y✝ z✝ : α
a✝¹ : EqvGen r x✝ y✝
a✝ : EqvGen r y✝ z✝
hab : r x✝ y✝
hbc : r y✝ z✝
⊢ r x✝ z✝
[PROOFSTEP]
exact h.trans hab hbc
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r✝ : α → α → Prop
a b : α
r p : α → α → Prop
hrp : ∀ (a b : α), r a b → p a b
h : EqvGen r a b
⊢ EqvGen p a b
[PROOFSTEP]
induction h with
| rel a b h => exact EqvGen.rel _ _ (hrp _ _ h)
| refl => exact EqvGen.refl _
| symm a b _ ih => exact EqvGen.symm _ _ ih
| trans a b c _ _ hab hbc => exact EqvGen.trans _ _ _ hab hbc
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r✝ : α → α → Prop
a b : α
r p : α → α → Prop
hrp : ∀ (a b : α), r a b → p a b
h : EqvGen r a b
⊢ EqvGen p a b
[PROOFSTEP]
induction h with
| rel a b h => exact EqvGen.rel _ _ (hrp _ _ h)
| refl => exact EqvGen.refl _
| symm a b _ ih => exact EqvGen.symm _ _ ih
| trans a b c _ _ hab hbc => exact EqvGen.trans _ _ _ hab hbc
[GOAL]
case rel
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r✝ : α → α → Prop
a✝ b✝ : α
r p : α → α → Prop
hrp : ∀ (a b : α), r a b → p a b
a b : α
h : r a b
⊢ EqvGen p a b
[PROOFSTEP]
| rel a b h => exact EqvGen.rel _ _ (hrp _ _ h)
[GOAL]
case rel
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r✝ : α → α → Prop
a✝ b✝ : α
r p : α → α → Prop
hrp : ∀ (a b : α), r a b → p a b
a b : α
h : r a b
⊢ EqvGen p a b
[PROOFSTEP]
exact EqvGen.rel _ _ (hrp _ _ h)
[GOAL]
case refl
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r✝ : α → α → Prop
a b : α
r p : α → α → Prop
hrp : ∀ (a b : α), r a b → p a b
x✝ : α
⊢ EqvGen p x✝ x✝
[PROOFSTEP]
| refl => exact EqvGen.refl _
[GOAL]
case refl
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r✝ : α → α → Prop
a b : α
r p : α → α → Prop
hrp : ∀ (a b : α), r a b → p a b
x✝ : α
⊢ EqvGen p x✝ x✝
[PROOFSTEP]
exact EqvGen.refl _
[GOAL]
case symm
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r✝ : α → α → Prop
a✝¹ b✝ : α
r p : α → α → Prop
hrp : ∀ (a b : α), r a b → p a b
a b : α
a✝ : EqvGen r a b
ih : EqvGen p a b
⊢ EqvGen p b a
[PROOFSTEP]
| symm a b _ ih => exact EqvGen.symm _ _ ih
[GOAL]
case symm
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r✝ : α → α → Prop
a✝¹ b✝ : α
r p : α → α → Prop
hrp : ∀ (a b : α), r a b → p a b
a b : α
a✝ : EqvGen r a b
ih : EqvGen p a b
⊢ EqvGen p b a
[PROOFSTEP]
exact EqvGen.symm _ _ ih
[GOAL]
case trans
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r✝ : α → α → Prop
a✝² b✝ : α
r p : α → α → Prop
hrp : ∀ (a b : α), r a b → p a b
a b c : α
a✝¹ : EqvGen r a b
a✝ : EqvGen r b c
hab : EqvGen p a b
hbc : EqvGen p b c
⊢ EqvGen p a c
[PROOFSTEP]
| trans a b c _ _ hab hbc => exact EqvGen.trans _ _ _ hab hbc
[GOAL]
case trans
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r✝ : α → α → Prop
a✝² b✝ : α
r p : α → α → Prop
hrp : ∀ (a b : α), r a b → p a b
a b c : α
a✝¹ : EqvGen r a b
a✝ : EqvGen r b c
hab : EqvGen p a b
hbc : EqvGen p b c
⊢ EqvGen p a c
[PROOFSTEP]
exact EqvGen.trans _ _ _ hab hbc
|
State Before: α : Type ?u.39149
β : Type ?u.39152
K : Type u_1
inst✝ : DivisionRing K
a✝ b✝ c✝ d a b c : K
⊢ a / c - b / c = (a - b) / c State After: no goals Tactic: rw [sub_eq_add_neg, ← neg_div, div_add_div_same, sub_eq_add_neg] |
[STATEMENT]
lemma ntcf_Hom_NTMap_app[cat_cs_simps]:
assumes "[a, b]\<^sub>\<circ> \<in>\<^sub>\<circ> (op_cat \<AA> \<times>\<^sub>C \<BB>)\<lparr>Obj\<rparr>"
shows "Hom\<^sub>A\<^sub>.\<^sub>C\<^bsub>\<alpha>\<^esub>(\<phi>-,\<psi>-)\<lparr>NTMap\<rparr>\<lparr>a, b\<rparr>\<^sub>\<bullet> = ntcf_Hom_component \<phi> \<psi> a b"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Hom\<^sub>A\<^sub>.\<^sub>C\<^bsub>\<alpha>\<^esub>(\<phi>-,\<psi>-)\<lparr>NTMap\<rparr> \<lparr>a, b\<rparr>\<^sub>\<bullet> = ntcf_Hom_component \<phi> \<psi> a b
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
[a, b]\<^sub>\<circ> \<in>\<^sub>\<circ> (op_cat \<AA> \<times>\<^sub>C \<BB>)\<lparr>Obj\<rparr>
goal (1 subgoal):
1. Hom\<^sub>A\<^sub>.\<^sub>C\<^bsub>\<alpha>\<^esub>(\<phi>-,\<psi>-)\<lparr>NTMap\<rparr> \<lparr>a, b\<rparr>\<^sub>\<bullet> = ntcf_Hom_component \<phi> \<psi> a b
[PROOF STEP]
unfolding ntcf_Hom_components
[PROOF STATE]
proof (prove)
using this:
[a, b]\<^sub>\<circ> \<in>\<^sub>\<circ> (op_cat \<AA> \<times>\<^sub>C \<BB>)\<lparr>Obj\<rparr>
goal (1 subgoal):
1. (\<lambda>ab\<in>\<^sub>\<circ>(op_cat (\<phi>\<lparr>NTDGDom\<rparr>) \<times>\<^sub>C \<psi>\<lparr>NTDGDom\<rparr>)\<lparr>Obj\<rparr>. ntcf_Hom_component \<phi> \<psi> (ab\<lparr>[]\<^sub>\<circ>\<rparr>) (ab\<lparr>1\<^sub>\<nat>\<rparr>)) \<lparr>a, b\<rparr>\<^sub>\<bullet> = ntcf_Hom_component \<phi> \<psi> a b
[PROOF STEP]
by (simp add: nat_omega_simps cat_cs_simps) |
(*
* Copyright 2020, Data61, CSIRO (ABN 41 687 119 230)
*
* SPDX-License-Identifier: GPL-2.0-only
*)
theory Lookups_D
imports
"DSpec.Syscall_D"
"Lib.OptionMonadND"
begin
type_synonym 'a lookup = "cdl_state \<Rightarrow> 'a option"
definition
opt_cnode :: "cdl_object_id \<Rightarrow> cdl_cnode lookup"
where
"opt_cnode p \<equiv> DO
t \<leftarrow> \<lambda>s. cdl_objects s p;
case t of
CNode cnode \<Rightarrow> oreturn cnode
| _ \<Rightarrow> ofail
OD"
function resolve_cap ::
"cdl_cap \<Rightarrow> cdl_cptr \<Rightarrow> nat \<Rightarrow> (cdl_fault_error + cdl_cap_ref \<times> nat) lookup"
where
"resolve_cap cnode_cap cap_ptr remaining_size =
(if is_cnode_cap cnode_cap
then DO
\<comment> \<open>Fetch the next level CNode.\<close>
cnode \<leftarrow> opt_cnode $ cap_object cnode_cap;
radix_size \<leftarrow> oreturn $ cdl_cnode_size_bits cnode;
guard_size \<leftarrow> oreturn $ cap_guard_size cnode_cap;
cap_guard \<leftarrow> oreturn $ cap_guard cnode_cap;
level_size \<leftarrow> oreturn (radix_size + guard_size);
oassert (level_size \<noteq> 0);
\<comment> \<open>Ensure the guard matches up.\<close>
guard \<leftarrow> oreturn $ (cap_ptr >> (remaining_size-guard_size)) && (mask guard_size);
if \<not>(guard_size \<le> remaining_size \<and> guard = cap_guard) \<or>
level_size > remaining_size
then othrow FaultError
else DO
\<comment> \<open>Find the next slot.\<close>
offset \<leftarrow> oreturn $ (cap_ptr >> (remaining_size-level_size)) && (mask radix_size);
slot \<leftarrow> oreturn (cap_object cnode_cap, unat offset);
size_left \<leftarrow> oreturn (remaining_size - level_size);
if size_left = 0 then
oreturnOk (slot, 0)
else DO
next_cap \<leftarrow> opt_cap slot;
if is_cnode_cap next_cap then
resolve_cap next_cap cap_ptr size_left
else
oreturnOk (slot, size_left)
OD
OD
OD
else othrow FaultError)"
by auto
termination
by (relation "measure (\<lambda>(a,b,c). c)") auto
declare resolve_cap.simps [simp del]
declare resolve_address_bits.simps [simp del]
lemma throwError_FaultError [simp]:
"throwError FaultError = throw"
apply (cases "undefined::cdl_fault_error")
apply simp
done
lemma gets_the_get_cnode:
"gets_the (opt_cnode r) = get_cnode r"
apply (simp add: get_cnode_def opt_cnode_def)
apply (rule bind_cong, rule refl)
apply (clarsimp split: cdl_object.splits)
done
lemma gets_the_resolve_cap:
"gets_the (resolve_cap cnode_cap cap_ptr remaining_size) =
resolve_address_bits cnode_cap cap_ptr remaining_size"
apply (induct cnode_cap cap_ptr remaining_size rule: resolve_cap.induct [simplified])
apply (subst resolve_cap.simps)
apply (subst resolve_address_bits.simps)
apply (clarsimp simp: unlessE_def liftE_bindE assertE_liftE gets_the_get_cnode)
apply (rule bind_cong, rule refl)
apply (rule bind_apply_cong, rule refl)
apply (clarsimp simp: liftE_bindE)
apply (rule bind_apply_cong, rule refl)
apply (clarsimp simp: in_monad gets_the_get_cnode [symmetric])
done
definition resolve_address_bits' ::
"cdl_cap \<Rightarrow> cdl_cptr \<Rightarrow> nat \<Rightarrow> (cdl_cap_ref \<times> nat) lookup"
where
"resolve_address_bits' cap cptr n \<equiv> odrop $ resolve_cap cap cptr n"
definition
lookup_slot' :: "cdl_object_id \<Rightarrow> cdl_cptr \<Rightarrow> cdl_cap_ref lookup"
where
"lookup_slot' thread cptr \<equiv>
DO
cspace_root \<leftarrow> opt_cap (thread, tcb_cspace_slot);
(slot, _) \<leftarrow> resolve_address_bits' cspace_root cptr word_bits;
oreturn slot
OD"
definition
lookup_cap' :: "cdl_object_id \<Rightarrow> cdl_cptr \<Rightarrow> cdl_cap lookup"
where
"lookup_cap' thread cptr \<equiv>
DO
slot \<leftarrow> lookup_slot' thread cptr;
opt_cap slot
OD"
definition
lookup_cap_and_slot' :: "cdl_object_id \<Rightarrow> cdl_cptr \<Rightarrow> (cdl_cap \<times> cdl_cap_ref) lookup"
where
"lookup_cap_and_slot' thread cptr \<equiv>
DO
slot \<leftarrow> lookup_slot' thread cptr;
cap \<leftarrow> opt_cap slot;
oreturn (cap, slot)
OD"
definition
lookup_object :: "cdl_object_id \<Rightarrow> cdl_cptr \<Rightarrow> cdl_object_id lookup"
where
"lookup_object thread cptr \<equiv>
DO
cap \<leftarrow> lookup_cap' thread cptr;
oreturn $ cap_object cap
OD"
definition
lookup_extra_caps' :: "cdl_object_id \<Rightarrow> cdl_cptr list \<Rightarrow> (cdl_cap \<times> cdl_cap_ref) list lookup"
where
"lookup_extra_caps' thread cptrs \<equiv>
omap (\<lambda>cptr. lookup_cap_and_slot' thread cptr) cptrs"
end
|
(* Title: HOL/NSA/Filter.thy
Author: Jacques D. Fleuriot, University of Cambridge
Author: Lawrence C Paulson
Author: Brian Huffman
*)
section {* Filters and Ultrafilters *}
theory Filter
imports "~~/src/HOL/Library/Infinite_Set"
begin
subsection {* Definitions and basic properties *}
subsubsection {* Filters *}
locale filter =
fixes F :: "'a set set"
assumes UNIV [iff]: "UNIV \<in> F"
assumes empty [iff]: "{} \<notin> F"
assumes Int: "\<lbrakk>u \<in> F; v \<in> F\<rbrakk> \<Longrightarrow> u \<inter> v \<in> F"
assumes subset: "\<lbrakk>u \<in> F; u \<subseteq> v\<rbrakk> \<Longrightarrow> v \<in> F"
begin
lemma memD: "A \<in> F \<Longrightarrow> - A \<notin> F"
proof
assume "A \<in> F" and "- A \<in> F"
hence "A \<inter> (- A) \<in> F" by (rule Int)
thus "False" by simp
qed
lemma not_memI: "- A \<in> F \<Longrightarrow> A \<notin> F"
by (drule memD, simp)
lemma Int_iff: "(x \<inter> y \<in> F) = (x \<in> F \<and> y \<in> F)"
by (auto elim: subset intro: Int)
end
subsubsection {* Ultrafilters *}
locale ultrafilter = filter +
assumes ultra: "A \<in> F \<or> - A \<in> F"
begin
lemma memI: "- A \<notin> F \<Longrightarrow> A \<in> F"
using ultra [of A] by simp
lemma not_memD: "A \<notin> F \<Longrightarrow> - A \<in> F"
by (rule memI, simp)
lemma not_mem_iff: "(A \<notin> F) = (- A \<in> F)"
by (rule iffI [OF not_memD not_memI])
lemma Compl_iff: "(- A \<in> F) = (A \<notin> F)"
by (rule iffI [OF not_memI not_memD])
lemma Un_iff: "(x \<union> y \<in> F) = (x \<in> F \<or> y \<in> F)"
apply (rule iffI)
apply (erule contrapos_pp)
apply (simp add: Int_iff not_mem_iff)
apply (auto elim: subset)
done
end
subsubsection {* Free Ultrafilters *}
locale freeultrafilter = ultrafilter +
assumes infinite: "A \<in> F \<Longrightarrow> infinite A"
begin
lemma finite: "finite A \<Longrightarrow> A \<notin> F"
by (erule contrapos_pn, erule infinite)
lemma singleton: "{x} \<notin> F"
by (rule finite, simp)
lemma insert_iff [simp]: "(insert x A \<in> F) = (A \<in> F)"
apply (subst insert_is_Un)
apply (subst Un_iff)
apply (simp add: singleton)
done
lemma filter: "filter F" ..
lemma ultrafilter: "ultrafilter F" ..
end
subsection {* Collect properties *}
lemma (in filter) Collect_ex:
"({n. \<exists>x. P n x} \<in> F) = (\<exists>X. {n. P n (X n)} \<in> F)"
proof
assume "{n. \<exists>x. P n x} \<in> F"
hence "{n. P n (SOME x. P n x)} \<in> F"
by (auto elim: someI subset)
thus "\<exists>X. {n. P n (X n)} \<in> F" by fast
next
show "\<exists>X. {n. P n (X n)} \<in> F \<Longrightarrow> {n. \<exists>x. P n x} \<in> F"
by (auto elim: subset)
qed
lemma (in filter) Collect_conj:
"({n. P n \<and> Q n} \<in> F) = ({n. P n} \<in> F \<and> {n. Q n} \<in> F)"
by (subst Collect_conj_eq, rule Int_iff)
lemma (in ultrafilter) Collect_not:
"({n. \<not> P n} \<in> F) = ({n. P n} \<notin> F)"
by (subst Collect_neg_eq, rule Compl_iff)
lemma (in ultrafilter) Collect_disj:
"({n. P n \<or> Q n} \<in> F) = ({n. P n} \<in> F \<or> {n. Q n} \<in> F)"
by (subst Collect_disj_eq, rule Un_iff)
lemma (in ultrafilter) Collect_all:
"({n. \<forall>x. P n x} \<in> F) = (\<forall>X. {n. P n (X n)} \<in> F)"
apply (rule Not_eq_iff [THEN iffD1])
apply (simp add: Collect_not [symmetric])
apply (rule Collect_ex)
done
subsection {* Maximal filter = Ultrafilter *}
text {*
A filter F is an ultrafilter iff it is a maximal filter,
i.e. whenever G is a filter and @{term "F \<subseteq> G"} then @{term "F = G"}
*}
text {*
Lemmas that shows existence of an extension to what was assumed to
be a maximal filter. Will be used to derive contradiction in proof of
property of ultrafilter.
*}
lemma extend_lemma1: "UNIV \<in> F \<Longrightarrow> A \<in> {X. \<exists>f\<in>F. A \<inter> f \<subseteq> X}"
by blast
lemma extend_lemma2: "F \<subseteq> {X. \<exists>f\<in>F. A \<inter> f \<subseteq> X}"
by blast
lemma (in filter) extend_filter:
assumes A: "- A \<notin> F"
shows "filter {X. \<exists>f\<in>F. A \<inter> f \<subseteq> X}" (is "filter ?X")
proof (rule filter.intro)
show "UNIV \<in> ?X" by blast
next
show "{} \<notin> ?X"
proof (clarify)
fix f assume f: "f \<in> F" and Af: "A \<inter> f \<subseteq> {}"
from Af have fA: "f \<subseteq> - A" by blast
from f fA have "- A \<in> F" by (rule subset)
with A show "False" by simp
qed
next
fix u and v
assume u: "u \<in> ?X" and v: "v \<in> ?X"
from u obtain f where f: "f \<in> F" and Af: "A \<inter> f \<subseteq> u" by blast
from v obtain g where g: "g \<in> F" and Ag: "A \<inter> g \<subseteq> v" by blast
from f g have fg: "f \<inter> g \<in> F" by (rule Int)
from Af Ag have Afg: "A \<inter> (f \<inter> g) \<subseteq> u \<inter> v" by blast
from fg Afg show "u \<inter> v \<in> ?X" by blast
next
fix u and v
assume uv: "u \<subseteq> v" and u: "u \<in> ?X"
from u obtain f where f: "f \<in> F" and Afu: "A \<inter> f \<subseteq> u" by blast
from Afu uv have Afv: "A \<inter> f \<subseteq> v" by blast
from f Afv have "\<exists>f\<in>F. A \<inter> f \<subseteq> v" by blast
thus "v \<in> ?X" by simp
qed
lemma (in filter) max_filter_ultrafilter:
assumes max: "\<And>G. \<lbrakk>filter G; F \<subseteq> G\<rbrakk> \<Longrightarrow> F = G"
shows "ultrafilter_axioms F"
proof (rule ultrafilter_axioms.intro)
fix A show "A \<in> F \<or> - A \<in> F"
proof (rule disjCI)
let ?X = "{X. \<exists>f\<in>F. A \<inter> f \<subseteq> X}"
assume AF: "- A \<notin> F"
from AF have X: "filter ?X" by (rule extend_filter)
from UNIV have AX: "A \<in> ?X" by (rule extend_lemma1)
have FX: "F \<subseteq> ?X" by (rule extend_lemma2)
from X FX have "F = ?X" by (rule max)
with AX show "A \<in> F" by simp
qed
qed
lemma (in ultrafilter) max_filter:
assumes G: "filter G" and sub: "F \<subseteq> G" shows "F = G"
proof
show "F \<subseteq> G" using sub .
show "G \<subseteq> F"
proof
fix A assume A: "A \<in> G"
from G A have "- A \<notin> G" by (rule filter.memD)
with sub have B: "- A \<notin> F" by blast
thus "A \<in> F" by (rule memI)
qed
qed
subsection {* Ultrafilter Theorem *}
text "A local context makes proof of ultrafilter Theorem more modular"
context
fixes frechet :: "'a set set"
and superfrechet :: "'a set set set"
assumes infinite_UNIV: "infinite (UNIV :: 'a set)"
defines frechet_def: "frechet \<equiv> {A. finite (- A)}"
and superfrechet_def: "superfrechet \<equiv> {G. filter G \<and> frechet \<subseteq> G}"
begin
lemma superfrechetI:
"\<lbrakk>filter G; frechet \<subseteq> G\<rbrakk> \<Longrightarrow> G \<in> superfrechet"
by (simp add: superfrechet_def)
lemma superfrechetD1:
"G \<in> superfrechet \<Longrightarrow> filter G"
by (simp add: superfrechet_def)
lemma superfrechetD2:
"G \<in> superfrechet \<Longrightarrow> frechet \<subseteq> G"
by (simp add: superfrechet_def)
text {* A few properties of free filters *}
lemma filter_cofinite:
assumes inf: "infinite (UNIV :: 'a set)"
shows "filter {A:: 'a set. finite (- A)}" (is "filter ?F")
proof (rule filter.intro)
show "UNIV \<in> ?F" by simp
next
show "{} \<notin> ?F" using inf by simp
next
fix u v assume "u \<in> ?F" and "v \<in> ?F"
thus "u \<inter> v \<in> ?F" by simp
next
fix u v assume uv: "u \<subseteq> v" and u: "u \<in> ?F"
from uv have vu: "- v \<subseteq> - u" by simp
from u show "v \<in> ?F"
by (simp add: finite_subset [OF vu])
qed
text {*
We prove: 1. Existence of maximal filter i.e. ultrafilter;
2. Freeness property i.e ultrafilter is free.
Use a locale to prove various lemmas and then
export main result: The ultrafilter Theorem
*}
lemma filter_frechet: "filter frechet"
by (unfold frechet_def, rule filter_cofinite [OF infinite_UNIV])
lemma frechet_in_superfrechet: "frechet \<in> superfrechet"
by (rule superfrechetI [OF filter_frechet subset_refl])
lemma lemma_mem_chain_filter:
"\<lbrakk>c \<in> chains superfrechet; x \<in> c\<rbrakk> \<Longrightarrow> filter x"
by (unfold chains_def superfrechet_def, blast)
subsubsection {* Unions of chains of superfrechets *}
text "In this section we prove that superfrechet is closed
with respect to unions of non-empty chains. We must show
1) Union of a chain is a filter,
2) Union of a chain contains frechet.
Number 2 is trivial, but 1 requires us to prove all the filter rules."
lemma Union_chain_UNIV:
"\<lbrakk>c \<in> chains superfrechet; c \<noteq> {}\<rbrakk> \<Longrightarrow> UNIV \<in> \<Union>c"
proof -
assume 1: "c \<in> chains superfrechet" and 2: "c \<noteq> {}"
from 2 obtain x where 3: "x \<in> c" by blast
from 1 3 have "filter x" by (rule lemma_mem_chain_filter)
hence "UNIV \<in> x" by (rule filter.UNIV)
with 3 show "UNIV \<in> \<Union>c" by blast
qed
lemma Union_chain_empty:
"c \<in> chains superfrechet \<Longrightarrow> {} \<notin> \<Union>c"
proof
assume 1: "c \<in> chains superfrechet" and 2: "{} \<in> \<Union>c"
from 2 obtain x where 3: "x \<in> c" and 4: "{} \<in> x" ..
from 1 3 have "filter x" by (rule lemma_mem_chain_filter)
hence "{} \<notin> x" by (rule filter.empty)
with 4 show "False" by simp
qed
lemma Union_chain_Int:
"\<lbrakk>c \<in> chains superfrechet; u \<in> \<Union>c; v \<in> \<Union>c\<rbrakk> \<Longrightarrow> u \<inter> v \<in> \<Union>c"
proof -
assume c: "c \<in> chains superfrechet"
assume "u \<in> \<Union>c"
then obtain x where ux: "u \<in> x" and xc: "x \<in> c" ..
assume "v \<in> \<Union>c"
then obtain y where vy: "v \<in> y" and yc: "y \<in> c" ..
from c xc yc have "x \<subseteq> y \<or> y \<subseteq> x" using c unfolding chains_def chain_subset_def by auto
with xc yc have xyc: "x \<union> y \<in> c"
by (auto simp add: Un_absorb1 Un_absorb2)
with c have fxy: "filter (x \<union> y)" by (rule lemma_mem_chain_filter)
from ux have uxy: "u \<in> x \<union> y" by simp
from vy have vxy: "v \<in> x \<union> y" by simp
from fxy uxy vxy have "u \<inter> v \<in> x \<union> y" by (rule filter.Int)
with xyc show "u \<inter> v \<in> \<Union>c" ..
qed
lemma Union_chain_subset:
"\<lbrakk>c \<in> chains superfrechet; u \<in> \<Union>c; u \<subseteq> v\<rbrakk> \<Longrightarrow> v \<in> \<Union>c"
proof -
assume c: "c \<in> chains superfrechet"
and u: "u \<in> \<Union>c" and uv: "u \<subseteq> v"
from u obtain x where ux: "u \<in> x" and xc: "x \<in> c" ..
from c xc have fx: "filter x" by (rule lemma_mem_chain_filter)
from fx ux uv have vx: "v \<in> x" by (rule filter.subset)
with xc show "v \<in> \<Union>c" ..
qed
lemma Union_chain_filter:
assumes chain: "c \<in> chains superfrechet" and nonempty: "c \<noteq> {}"
shows "filter (\<Union>c)"
proof (rule filter.intro)
show "UNIV \<in> \<Union>c" using chain nonempty by (rule Union_chain_UNIV)
next
show "{} \<notin> \<Union>c" using chain by (rule Union_chain_empty)
next
fix u v assume "u \<in> \<Union>c" and "v \<in> \<Union>c"
with chain show "u \<inter> v \<in> \<Union>c" by (rule Union_chain_Int)
next
fix u v assume "u \<in> \<Union>c" and "u \<subseteq> v"
with chain show "v \<in> \<Union>c" by (rule Union_chain_subset)
qed
lemma lemma_mem_chain_frechet_subset:
"\<lbrakk>c \<in> chains superfrechet; x \<in> c\<rbrakk> \<Longrightarrow> frechet \<subseteq> x"
by (unfold superfrechet_def chains_def, blast)
lemma Union_chain_superfrechet:
"\<lbrakk>c \<noteq> {}; c \<in> chains superfrechet\<rbrakk> \<Longrightarrow> \<Union>c \<in> superfrechet"
proof (rule superfrechetI)
assume 1: "c \<in> chains superfrechet" and 2: "c \<noteq> {}"
thus "filter (\<Union>c)" by (rule Union_chain_filter)
from 2 obtain x where 3: "x \<in> c" by blast
from 1 3 have "frechet \<subseteq> x" by (rule lemma_mem_chain_frechet_subset)
also from 3 have "x \<subseteq> \<Union>c" by blast
finally show "frechet \<subseteq> \<Union>c" .
qed
subsubsection {* Existence of free ultrafilter *}
lemma max_cofinite_filter_Ex:
"\<exists>U\<in>superfrechet. \<forall>G\<in>superfrechet. U \<subseteq> G \<longrightarrow> G = U"
proof (rule Zorn_Lemma2, safe)
fix c assume c: "c \<in> chains superfrechet"
show "\<exists>U\<in>superfrechet. \<forall>G\<in>c. G \<subseteq> U" (is "?U")
proof (cases)
assume "c = {}"
with frechet_in_superfrechet show "?U" by blast
next
assume A: "c \<noteq> {}"
from A c have "\<Union>c \<in> superfrechet"
by (rule Union_chain_superfrechet)
thus "?U" by blast
qed
qed
lemma mem_superfrechet_all_infinite:
"\<lbrakk>U \<in> superfrechet; A \<in> U\<rbrakk> \<Longrightarrow> infinite A"
proof
assume U: "U \<in> superfrechet" and A: "A \<in> U" and fin: "finite A"
from U have fil: "filter U" and fre: "frechet \<subseteq> U"
by (simp_all add: superfrechet_def)
from fin have "- A \<in> frechet" by (simp add: frechet_def)
with fre have cA: "- A \<in> U" by (rule subsetD)
from fil A cA have "A \<inter> - A \<in> U" by (rule filter.Int)
with fil show "False" by (simp add: filter.empty)
qed
text {* There exists a free ultrafilter on any infinite set *}
lemma freeultrafilter_Ex:
"\<exists>U::'a set set. freeultrafilter U"
proof -
from max_cofinite_filter_Ex obtain U
where U: "U \<in> superfrechet"
and max [rule_format]: "\<forall>G\<in>superfrechet. U \<subseteq> G \<longrightarrow> G = U" ..
from U have fil: "filter U" by (rule superfrechetD1)
from U have fre: "frechet \<subseteq> U" by (rule superfrechetD2)
have ultra: "ultrafilter_axioms U"
proof (rule filter.max_filter_ultrafilter [OF fil])
fix G assume G: "filter G" and UG: "U \<subseteq> G"
from fre UG have "frechet \<subseteq> G" by simp
with G have "G \<in> superfrechet" by (rule superfrechetI)
from this UG show "U = G" by (rule max[symmetric])
qed
have free: "freeultrafilter_axioms U"
proof (rule freeultrafilter_axioms.intro)
fix A assume "A \<in> U"
with U show "infinite A" by (rule mem_superfrechet_all_infinite)
qed
from fil ultra free have "freeultrafilter U"
by (rule freeultrafilter.intro [OF ultrafilter.intro])
(* FIXME: unfold_locales should use chained facts *)
then show ?thesis ..
qed
end
hide_const (open) filter
end
|
"""
sequencingRRB(setting::RRBSetting, node::RestrictedSequencingNode, packet)
Call the correct rrb method based on the settings
# Arguments
- `setting::RRBSetting`: Which function to call
- `node::RestrictedSequencingNode`: The current node
- `packet`: Data with the necesary info
"""
function sequencingRRB(setting::RRBSetting, node::RestrictedSequencingNode, packet)
if setting == sop
return sopRRB(node.visited, getState(node), node.value, packet[1],packet[2])
end
end
|
State Before: α : Type u_1
β : Type u_2
γ : Type ?u.158809
δ : Type ?u.158812
R S : α → β → Prop
P : γ → δ → Prop
Rₐ : α → α → Prop
l₁ : List α
l₂ : List β
⊢ SublistForall₂ R l₁ l₂ ↔ ∃ l, Forall₂ R l₁ l ∧ l <+ l₂ State After: case mp
α : Type u_1
β : Type u_2
γ : Type ?u.158809
δ : Type ?u.158812
R S : α → β → Prop
P : γ → δ → Prop
Rₐ : α → α → Prop
l₁ : List α
l₂ : List β
h : SublistForall₂ R l₁ l₂
⊢ ∃ l, Forall₂ R l₁ l ∧ l <+ l₂
case mpr
α : Type u_1
β : Type u_2
γ : Type ?u.158809
δ : Type ?u.158812
R S : α → β → Prop
P : γ → δ → Prop
Rₐ : α → α → Prop
l₁ : List α
l₂ : List β
h : ∃ l, Forall₂ R l₁ l ∧ l <+ l₂
⊢ SublistForall₂ R l₁ l₂ Tactic: constructor <;> intro h State Before: case mp
α : Type u_1
β : Type u_2
γ : Type ?u.158809
δ : Type ?u.158812
R S : α → β → Prop
P : γ → δ → Prop
Rₐ : α → α → Prop
l₁ : List α
l₂ : List β
h : SublistForall₂ R l₁ l₂
⊢ ∃ l, Forall₂ R l₁ l ∧ l <+ l₂ State After: case mp.nil
α : Type u_1
β : Type u_2
γ : Type ?u.158809
δ : Type ?u.158812
R S : α → β → Prop
P : γ → δ → Prop
Rₐ : α → α → Prop
l₁ : List α
l₂ l✝ : List β
⊢ ∃ l, Forall₂ R [] l ∧ l <+ l✝
case mp.cons
α : Type u_1
β : Type u_2
γ : Type ?u.158809
δ : Type ?u.158812
R S : α → β → Prop
P : γ → δ → Prop
Rₐ : α → α → Prop
l₁ : List α
l₂ : List β
a : α
b : β
l1 : List α
l2 : List β
rab : R a b
a✝ : SublistForall₂ R l1 l2
ih : ∃ l, Forall₂ R l1 l ∧ l <+ l2
⊢ ∃ l, Forall₂ R (a :: l1) l ∧ l <+ b :: l2
case mp.cons_right
α : Type u_1
β : Type u_2
γ : Type ?u.158809
δ : Type ?u.158812
R S : α → β → Prop
P : γ → δ → Prop
Rₐ : α → α → Prop
l₁ : List α
l₂ : List β
b : β
l1 : List α
l2 : List β
a✝ : SublistForall₂ R l1 l2
ih : ∃ l, Forall₂ R l1 l ∧ l <+ l2
⊢ ∃ l, Forall₂ R l1 l ∧ l <+ b :: l2 Tactic: induction' h with _ a b l1 l2 rab _ ih b l1 l2 _ ih State Before: case mp.nil
α : Type u_1
β : Type u_2
γ : Type ?u.158809
δ : Type ?u.158812
R S : α → β → Prop
P : γ → δ → Prop
Rₐ : α → α → Prop
l₁ : List α
l₂ l✝ : List β
⊢ ∃ l, Forall₂ R [] l ∧ l <+ l✝ State After: no goals Tactic: exact ⟨nil, Forall₂.nil, nil_sublist _⟩ State Before: case mp.cons
α : Type u_1
β : Type u_2
γ : Type ?u.158809
δ : Type ?u.158812
R S : α → β → Prop
P : γ → δ → Prop
Rₐ : α → α → Prop
l₁ : List α
l₂ : List β
a : α
b : β
l1 : List α
l2 : List β
rab : R a b
a✝ : SublistForall₂ R l1 l2
ih : ∃ l, Forall₂ R l1 l ∧ l <+ l2
⊢ ∃ l, Forall₂ R (a :: l1) l ∧ l <+ b :: l2 State After: case mp.cons.intro.intro
α : Type u_1
β : Type u_2
γ : Type ?u.158809
δ : Type ?u.158812
R S : α → β → Prop
P : γ → δ → Prop
Rₐ : α → α → Prop
l₁ : List α
l₂ : List β
a : α
b : β
l1 : List α
l2 : List β
rab : R a b
a✝ : SublistForall₂ R l1 l2
l : List β
hl1 : Forall₂ R l1 l
hl2 : l <+ l2
⊢ ∃ l, Forall₂ R (a :: l1) l ∧ l <+ b :: l2 Tactic: obtain ⟨l, hl1, hl2⟩ := ih State Before: case mp.cons.intro.intro
α : Type u_1
β : Type u_2
γ : Type ?u.158809
δ : Type ?u.158812
R S : α → β → Prop
P : γ → δ → Prop
Rₐ : α → α → Prop
l₁ : List α
l₂ : List β
a : α
b : β
l1 : List α
l2 : List β
rab : R a b
a✝ : SublistForall₂ R l1 l2
l : List β
hl1 : Forall₂ R l1 l
hl2 : l <+ l2
⊢ ∃ l, Forall₂ R (a :: l1) l ∧ l <+ b :: l2 State After: no goals Tactic: refine' ⟨b :: l, Forall₂.cons rab hl1, hl2.cons_cons b⟩ State Before: case mp.cons_right
α : Type u_1
β : Type u_2
γ : Type ?u.158809
δ : Type ?u.158812
R S : α → β → Prop
P : γ → δ → Prop
Rₐ : α → α → Prop
l₁ : List α
l₂ : List β
b : β
l1 : List α
l2 : List β
a✝ : SublistForall₂ R l1 l2
ih : ∃ l, Forall₂ R l1 l ∧ l <+ l2
⊢ ∃ l, Forall₂ R l1 l ∧ l <+ b :: l2 State After: case mp.cons_right.intro.intro
α : Type u_1
β : Type u_2
γ : Type ?u.158809
δ : Type ?u.158812
R S : α → β → Prop
P : γ → δ → Prop
Rₐ : α → α → Prop
l₁ : List α
l₂ : List β
b : β
l1 : List α
l2 : List β
a✝ : SublistForall₂ R l1 l2
l : List β
hl1 : Forall₂ R l1 l
hl2 : l <+ l2
⊢ ∃ l, Forall₂ R l1 l ∧ l <+ b :: l2 Tactic: obtain ⟨l, hl1, hl2⟩ := ih State Before: case mp.cons_right.intro.intro
α : Type u_1
β : Type u_2
γ : Type ?u.158809
δ : Type ?u.158812
R S : α → β → Prop
P : γ → δ → Prop
Rₐ : α → α → Prop
l₁ : List α
l₂ : List β
b : β
l1 : List α
l2 : List β
a✝ : SublistForall₂ R l1 l2
l : List β
hl1 : Forall₂ R l1 l
hl2 : l <+ l2
⊢ ∃ l, Forall₂ R l1 l ∧ l <+ b :: l2 State After: no goals Tactic: exact ⟨l, hl1, hl2.trans (Sublist.cons _ (Sublist.refl _))⟩ State Before: case mpr
α : Type u_1
β : Type u_2
γ : Type ?u.158809
δ : Type ?u.158812
R S : α → β → Prop
P : γ → δ → Prop
Rₐ : α → α → Prop
l₁ : List α
l₂ : List β
h : ∃ l, Forall₂ R l₁ l ∧ l <+ l₂
⊢ SublistForall₂ R l₁ l₂ State After: case mpr.intro.intro
α : Type u_1
β : Type u_2
γ : Type ?u.158809
δ : Type ?u.158812
R S : α → β → Prop
P : γ → δ → Prop
Rₐ : α → α → Prop
l₁ : List α
l₂ l : List β
hl1 : Forall₂ R l₁ l
hl2 : l <+ l₂
⊢ SublistForall₂ R l₁ l₂ Tactic: obtain ⟨l, hl1, hl2⟩ := h State Before: case mpr.intro.intro
α : Type u_1
β : Type u_2
γ : Type ?u.158809
δ : Type ?u.158812
R S : α → β → Prop
P : γ → δ → Prop
Rₐ : α → α → Prop
l₁ : List α
l₂ l : List β
hl1 : Forall₂ R l₁ l
hl2 : l <+ l₂
⊢ SublistForall₂ R l₁ l₂ State After: case mpr.intro.intro
α : Type u_1
β : Type u_2
γ : Type ?u.158809
δ : Type ?u.158812
R S : α → β → Prop
P : γ → δ → Prop
Rₐ : α → α → Prop
l₂ l : List β
hl2 : l <+ l₂
⊢ ∀ {l₁ : List α}, Forall₂ R l₁ l → SublistForall₂ R l₁ l₂ Tactic: revert l₁ State Before: case mpr.intro.intro
α : Type u_1
β : Type u_2
γ : Type ?u.158809
δ : Type ?u.158812
R S : α → β → Prop
P : γ → δ → Prop
Rₐ : α → α → Prop
l₂ l : List β
hl2 : l <+ l₂
⊢ ∀ {l₁ : List α}, Forall₂ R l₁ l → SublistForall₂ R l₁ l₂ State After: case mpr.intro.intro.slnil
α : Type u_1
β : Type u_2
γ : Type ?u.158809
δ : Type ?u.158812
R S : α → β → Prop
P : γ → δ → Prop
Rₐ : α → α → Prop
l₂ l : List β
l₁ : List α
hl1 : Forall₂ R l₁ []
⊢ SublistForall₂ R l₁ []
case mpr.intro.intro.cons
α : Type u_1
β : Type u_2
γ : Type ?u.158809
δ : Type ?u.158812
R S : α → β → Prop
P : γ → δ → Prop
Rₐ : α → α → Prop
l₂ l l₁✝ l₂✝ : List β
a✝¹ : β
a✝ : l₁✝ <+ l₂✝
ih : ∀ {l₁ : List α}, Forall₂ R l₁ l₁✝ → SublistForall₂ R l₁ l₂✝
l₁ : List α
hl1 : Forall₂ R l₁ l₁✝
⊢ SublistForall₂ R l₁ (a✝¹ :: l₂✝)
case mpr.intro.intro.cons₂
α : Type u_1
β : Type u_2
γ : Type ?u.158809
δ : Type ?u.158812
R S : α → β → Prop
P : γ → δ → Prop
Rₐ : α → α → Prop
l₂ l l₁✝ l₂✝ : List β
a✝¹ : β
a✝ : l₁✝ <+ l₂✝
ih : ∀ {l₁ : List α}, Forall₂ R l₁ l₁✝ → SublistForall₂ R l₁ l₂✝
l₁ : List α
hl1 : Forall₂ R l₁ (a✝¹ :: l₁✝)
⊢ SublistForall₂ R l₁ (a✝¹ :: l₂✝) Tactic: induction' hl2 with _ _ _ _ ih _ _ _ _ ih <;> intro l₁ hl1 State Before: case mpr.intro.intro.slnil
α : Type u_1
β : Type u_2
γ : Type ?u.158809
δ : Type ?u.158812
R S : α → β → Prop
P : γ → δ → Prop
Rₐ : α → α → Prop
l₂ l : List β
l₁ : List α
hl1 : Forall₂ R l₁ []
⊢ SublistForall₂ R l₁ [] State After: case mpr.intro.intro.slnil
α : Type u_1
β : Type u_2
γ : Type ?u.158809
δ : Type ?u.158812
R S : α → β → Prop
P : γ → δ → Prop
Rₐ : α → α → Prop
l₂ l : List β
l₁ : List α
hl1 : Forall₂ R l₁ []
⊢ SublistForall₂ R [] [] Tactic: rw [forall₂_nil_right_iff.1 hl1] State Before: case mpr.intro.intro.slnil
α : Type u_1
β : Type u_2
γ : Type ?u.158809
δ : Type ?u.158812
R S : α → β → Prop
P : γ → δ → Prop
Rₐ : α → α → Prop
l₂ l : List β
l₁ : List α
hl1 : Forall₂ R l₁ []
⊢ SublistForall₂ R [] [] State After: no goals Tactic: exact SublistForall₂.nil State Before: case mpr.intro.intro.cons
α : Type u_1
β : Type u_2
γ : Type ?u.158809
δ : Type ?u.158812
R S : α → β → Prop
P : γ → δ → Prop
Rₐ : α → α → Prop
l₂ l l₁✝ l₂✝ : List β
a✝¹ : β
a✝ : l₁✝ <+ l₂✝
ih : ∀ {l₁ : List α}, Forall₂ R l₁ l₁✝ → SublistForall₂ R l₁ l₂✝
l₁ : List α
hl1 : Forall₂ R l₁ l₁✝
⊢ SublistForall₂ R l₁ (a✝¹ :: l₂✝) State After: no goals Tactic: exact SublistForall₂.cons_right (ih hl1) State Before: case mpr.intro.intro.cons₂
α : Type u_1
β : Type u_2
γ : Type ?u.158809
δ : Type ?u.158812
R S : α → β → Prop
P : γ → δ → Prop
Rₐ : α → α → Prop
l₂ l l₁✝ l₂✝ : List β
a✝¹ : β
a✝ : l₁✝ <+ l₂✝
ih : ∀ {l₁ : List α}, Forall₂ R l₁ l₁✝ → SublistForall₂ R l₁ l₂✝
l₁ : List α
hl1 : Forall₂ R l₁ (a✝¹ :: l₁✝)
⊢ SublistForall₂ R l₁ (a✝¹ :: l₂✝) State After: case mpr.intro.intro.cons₂.cons
α : Type u_1
β : Type u_2
γ : Type ?u.158809
δ : Type ?u.158812
R S : α → β → Prop
P : γ → δ → Prop
Rₐ : α → α → Prop
l₂ l l₁✝¹ l₂✝ : List β
a✝² : β
a✝¹ : l₁✝¹ <+ l₂✝
ih : ∀ {l₁ : List α}, Forall₂ R l₁ l₁✝¹ → SublistForall₂ R l₁ l₂✝
a✝ : α
l₁✝ : List α
hr : R a✝ a✝²
hl : Forall₂ R l₁✝ l₁✝¹
⊢ SublistForall₂ R (a✝ :: l₁✝) (a✝² :: l₂✝) Tactic: cases' hl1 with _ _ _ _ hr hl _ State Before: case mpr.intro.intro.cons₂.cons
α : Type u_1
β : Type u_2
γ : Type ?u.158809
δ : Type ?u.158812
R S : α → β → Prop
P : γ → δ → Prop
Rₐ : α → α → Prop
l₂ l l₁✝¹ l₂✝ : List β
a✝² : β
a✝¹ : l₁✝¹ <+ l₂✝
ih : ∀ {l₁ : List α}, Forall₂ R l₁ l₁✝¹ → SublistForall₂ R l₁ l₂✝
a✝ : α
l₁✝ : List α
hr : R a✝ a✝²
hl : Forall₂ R l₁✝ l₁✝¹
⊢ SublistForall₂ R (a✝ :: l₁✝) (a✝² :: l₂✝) State After: no goals Tactic: exact SublistForall₂.cons hr (ih hl) |
I gave it a five star rating, HOWEVER, I'm reducing that to 2 star because They make you write something in the review. IF YOU WANT THE CUSTOMER'S RATING, Then be satisfied with what the customer gives you PERIOD!!!!!!!!!!!!!!!!
Visconti Jaws BD12 Black Leather Tall Checkbook Wallet 4" x 6.5"
Store your credit cards and ID in a secure, handy way with this RFID credit card wallet. Fashioned from Italian top-grain leather for a sophisticated look, this wallet is fashionable and professional.
Dimensions 4.25 in. x 6.63 in.
I looked long and hard to find a replacement for my old, worn out leather wallet. I wanted one with may pockets for credit cards, IDs, etc, with pockets for bills and notes under the card area. I almost gave up the search, as it appeared no one makes them in this style, when I stumbled across this model at Overstock. It fits my need beautifully, with more than enough card slots. I keep my wallet in my front pocket, so its size is just right for me. It is Italian leather, and very nicely constructed. I expect it to last quite a long time.
This wallet had excellent quality, the leather was top notch. Plenty of room for ALL of your credit cards (or coupons, store cards, etc.). Overstock.com shipped super fast. Very pleased with this purchase.
I have always loved my old wallet given as a Christmas gift 30 years ago. It was the best, held a good number of credit cards, a place for my license,and for cash. I thought I would never find another like it. But I saw the Columbo Long Credit Card Wallet on Overstock and thought why not give it a try. It is fantastic. High quality leather, holds 21 cards and has a place for cash. I had read that the License holder was hard to extract a License from so I put mine in a Credit Card slot and the problem is solved. I am now happier with this new wallet than with my old reliable. Great Product.
I just received this fine wallet. It's very simple, and plain, and yet it looks very expensive and elegant (real leather), at least the cognac color I ordered. I have a very expensive wallet, but I cannot carry all my cards at once. With this one absolutely no problem at all. I have more than twenty, plus two large pockets for bill/receipts. The price is very low in comparison with other ones. I like the size, you can fit it nicely in any normal pants or shirts pockets. Go for it!!
I've been looking for a credit card that would accommodate 18+ credit cards. After unsuccessfully searching for the wallet at countless stores, I decided to check out overstock.com. The Colombo Long Credit Card is just perfect! I would highly recommend this product! It is strong, clean, and neat.
I looked long and hard to find a replacement for my old, worn out leather wallet. I wanted one with may pockets for credit cards, IDs, etc, with pockets for bills and notes under the card area. I almost gave up the search, as it appeared no one makes them in this style, when I stumbled across this model at Amazon. It fits my need beautifully, with more than enough card slots. I keep my wallet in my front pocket, so its size is just right for me. It is Italian leather, and very nicely constructed. I expect it to last quite a long time.
Purchased this Christmas for my husband, who had always carried a basic leather wallet. His ATM card literally broke off on the top portion, "twice" in the last year, due to the fact that his cards were all bunched in his traditional wallet. I've just asked him what he liked most about this wallet, he says "the fact that it was so thin now", compared to the "thickness" of the regular types he'd carried in the past. I'm sure now it has to be more comfortable to carry, not to mention to sit on, now that it is no longer 3" thick! It's just a very nice wallet. And a special "Thank You" to Overstock for the price!
Recently purchased this item since I seem to carry a lot of cards but I also carry lots of bills like one's, five's, ten's, twenties, and so on. So I also needed a capable bill holder and I saw this one had two bill holders. I was pleasantly surprised as to how it absorbed all cards and how neat and low key it closes. As for the bill compartments they were tight which I attributed to the new leather and still waiting if it will be easier to carry currency like on a oversized bi-fold, which is what I was using before. The bi-fold seemed to handle the bills with more composure but the Colombo Long Credit Card Wallet, as its name so implies, is the clear winner for carrying cards without a hassle. If the leather cedes and my bills gain ground I will be most satisfied for I killed two birds with one shot, sort of speak. I like it enough to rate it 4 stars and if it loosens up, well, the sky is the limit. Either way did not think once about returning this item and it is easy to carry around because of its low profile.
Until the day comes when one rfid coupled with biometrics can consolidate all my plastic into one card, something like this is a necessity. This does the job I need it to do in handsome fashion.
My wife bought this wallet for me as a birthday gift and I absolutely love it. At first I only used it for business travel but now use it every day. Great craftsmanship and design. I can now keep my work related cards& ID's separated from my personal cards. It took some getting used to pulling my wallet out of my inside suit jacket pocket and don't see myself ever going back to carrying a wallet in my rear pants pocket. ( A money clip does just fine on the weekends!) Plenty of space for cash, receipts, id's, credit and rewards cards. Best of all, this wallet is not bulky and fits nicely in to the inside pocket of a suit coat or blazer.
I had been searching for a wallet like this for over six months. Every store I would go into and nothing. I went on line to many websites and nothing. I went into my email and like always I had a email from Overstock.com and I started looking on web page and there it was. I ordered this wallet and when I got it I was very pleased with the price, quality and that it had 20 slots that would now hold all my charge cards. Finally I had what I wanted and it only took five minutes on Overstock and over six moths looking elsewhere. This wallet is top of the line and you will love it. Great shipping as always. Thanks to Overstock.com for bringing us nice items at a low price.
For people who have a lot of credit cards and want easy access to them at a glance, this is a excellent solution. This is not a "back pocket" wallet - too large and flat for that. The quality of the leather is better than I expected for the price. The slots are a snug fit for the cards, but will ease with use. All in all, I'm very satisfied.
well made but quite large for fitting in a pants pocket.
I love this wallet. I have had it for a year and still looks brand new. Holds several cards and is RFID. I surely recommend this wallet. Well worth the money!
It was nothing like it was shown in the picture not worth the money.
Exactly what I wanted. It arrived sooner than I expected, and is well made. It should last a long time. Would recommend it to anyone who needs large card case.
Great! Comfortable and silky feeling! It fits my king great!
It’s not as big as I thought, which, is a good thing. The quality is great, I just have to use it more, meaning; taking cards in and out to break in that process. Being new, it’s a bit tight.
Exactly what I was looking for. Has held up well.
"I need assurance that the black wallet is like the cognac. The photo for the black color does not look anything like the wallet I'm looking for."
They are the same, just different colors.
"The name of the Wallet is the Colombo RFID credit card wallet yet in the descriptions nowhere does it entail about the RFID features. Does it really have that security feature?"
Inside the wallet it is stamped RFID secure. It is a very nice wallet. As of today I haven't had a problem yet security wise.
"I am wondering about the color. When the cognac wallet is pictured by itself it has a darker color. When it is pictured beside the black wallet it is much lighter and looks more orangy. I am wondering which color is more accurate."
I have several purchased over the years, both black and cognac. My take is that the cognac is true to its name, but some might associate the color with light brown? Very nice, though, I would wait for a nice sale to purchase.
"Yes, it did come in a box. It's a Beautiful Walett, and would in courage you to make the purchase."
"Is it come with box? "
Yes, it came packaged in a box that was worthy of gift giving.
Yes, it comes in a box. Gave away as a gift in the box/ package that it came in.
"Is there room for a checkbook and register?"
Hello kellyjs, this wallet is designed mostly for credit cards only. It will likely be too small for a checkbook and register. Please let us know if you have additional questions. Thanks for shopping with us.
"When will the black be in stock again?"
Hello Richard.Kissick, items are restocked as they become available; however, we cannot guarantee an item will be restocked. Thank you for shopping with us. |
{-# OPTIONS --without-K --safe #-}
-- a categorical (i.e. non-skeletal) version of Lawvere Theory,
-- as per https://ncatlab.org/nlab/show/Lawvere+theory
module Categories.Theory.Lawvere where
open import Data.Nat using (ℕ)
open import Data.Product using (Σ; _,_)
open import Level
open import Categories.Category.Cartesian.Structure
open import Categories.Category using (Category; _[_,_])
open import Categories.Category.Instance.Setoids
open import Categories.Category.Monoidal.Instance.Setoids using (Setoids-CartesianCategory)
open import Categories.Category.Product
open import Categories.Functor using (Functor; _∘F_) renaming (id to idF)
open import Categories.Functor.Cartesian
open import Categories.Functor.Cartesian.Properties
import Categories.Morphism as Mor
open import Categories.NaturalTransformation using (NaturalTransformation)
private
variable
o ℓ e o′ ℓ′ e′ o″ ℓ″ e″ : Level
record FiniteProduct (o ℓ e : Level) : Set (suc (o ⊔ ℓ ⊔ e)) where
field
T : CartesianCategory o ℓ e
module T = CartesianCategory T
open Mor T.U
field
generic : T.Obj
field
obj-iso-to-generic-power : ∀ x → Σ ℕ (λ n → x ≅ T.power generic n)
record LT-Hom (T₁ : FiniteProduct o ℓ e) (T₂ : FiniteProduct o′ ℓ′ e′) : Set (o ⊔ ℓ ⊔ e ⊔ o′ ⊔ ℓ′ ⊔ e′) where
private
module T₁ = FiniteProduct T₁
module T₂ = FiniteProduct T₂
field
cartF : CartesianF T₁.T T₂.T
module cartF = CartesianF cartF
LT-id : {A : FiniteProduct o ℓ e} → LT-Hom A A
LT-id = record { cartF = idF-CartesianF _ }
LT-∘ : {A : FiniteProduct o ℓ e} {B : FiniteProduct o′ ℓ′ e′} {C : FiniteProduct o″ ℓ″ e″} →
LT-Hom B C → LT-Hom A B → LT-Hom A C
LT-∘ G H = record { cartF = ∘-CartesianF (cartF G) (cartF H) }
where open LT-Hom
record T-Algebra (FP : FiniteProduct o ℓ e) : Set (o ⊔ ℓ ⊔ e ⊔ suc (ℓ′ ⊔ e′)) where
private
module FP = FiniteProduct FP
field
cartF : CartesianF FP.T (Setoids-CartesianCategory ℓ′ e′)
module cartF = CartesianF cartF
mod : Functor FP.T.U (Setoids ℓ′ e′)
mod = cartF.F
|
/-
Copyright (c) 2019 Simon Hudon. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Simon Hudon
-/
import tactic.monotonicity
import tactic.norm_num
import algebra.order.ring
import measure_theory.measure.lebesgue
import measure_theory.function.locally_integrable
import data.list.defs
open list tactic tactic.interactive set
example
(h : 3 + 6 ≤ 4 + 5)
: 1 + 3 + 2 + 6 ≤ 4 + 2 + 1 + 5 :=
begin
ac_mono,
end
example
(h : 3 ≤ (4 : ℤ))
(h' : 5 ≤ (6 : ℤ))
: (1 + 3 + 2) - 6 ≤ (4 + 2 + 1 : ℤ) - 5 :=
begin
ac_mono,
mono,
end
example
(h : 3 ≤ (4 : ℤ))
(h' : 5 ≤ (6 : ℤ))
: (1 + 3 + 2) - 6 ≤ (4 + 2 + 1 : ℤ) - 5 :=
begin
transitivity (1 + 3 + 2 - 5 : ℤ),
{ ac_mono },
{ ac_mono },
end
example (x y z k : ℤ)
(h : 3 ≤ (4 : ℤ))
(h' : z ≤ y)
: (k + 3 + x) - y ≤ (k + 4 + x) - z :=
begin
mono, norm_num
end
example (x y z a b : ℤ)
(h : a ≤ (b : ℤ))
(h' : z ≤ y)
: (1 + a + x) - y ≤ (1 + b + x) - z :=
begin
transitivity (1 + a + x - z),
{ mono, },
{ mono, mono, mono },
end
example (x y z : ℤ)
(h' : z ≤ y)
: (1 + 3 + x) - y ≤ (1 + 4 + x) - z :=
begin
transitivity (1 + 3 + x - z),
{ mono },
{ mono, mono, norm_num },
end
example (x y z : ℤ)
(h : 3 ≤ (4 : ℤ))
(h' : z ≤ y)
: (1 + 3 + x) - y ≤ (1 + 4 + x) - z :=
begin
ac_mono, mono*
end
@[simp]
def list.le' {α : Type*} [has_le α] : list α → list α → Prop
| (x::xs) (y::ys) := x ≤ y ∧ list.le' xs ys
| [] [] := true
| _ _ := false
@[simp]
instance list_has_le {α : Type*} [has_le α] : has_le (list α) :=
⟨ list.le' ⟩
lemma list.le_refl {α : Type*} [preorder α] {xs : list α}
: xs ≤ xs :=
begin
induction xs with x xs,
{ trivial },
{ simp [has_le.le,list.le],
split, exact le_rfl, apply xs_ih }
end
-- @[trans]
lemma list.le_trans {α : Type*} [preorder α]
{xs zs : list α} (ys : list α)
(h : xs ≤ ys)
(h' : ys ≤ zs)
: xs ≤ zs :=
begin
revert ys zs,
induction xs with x xs
; intros ys zs h h'
; cases ys with y ys
; cases zs with z zs
; try { cases h ; cases h' ; done },
{ apply list.le_refl },
{ simp [has_le.le,list.le],
split,
apply le_trans h.left h'.left,
apply xs_ih _ h.right h'.right, }
end
@[mono]
lemma list_le_mono_left {α : Type*} [preorder α] {xs ys zs : list α}
(h : xs ≤ ys)
: xs ++ zs ≤ ys ++ zs :=
begin
revert ys,
induction xs with x xs ; intros ys h,
{ cases ys, apply list.le_refl, cases h },
{ cases ys with y ys, cases h, simp [has_le.le,list.le] at *,
revert h, apply and.imp_right,
apply xs_ih }
end
@[mono]
lemma list_le_mono_right {α : Type*} [preorder α] {xs ys zs : list α}
(h : xs ≤ ys)
: zs ++ xs ≤ zs ++ ys :=
begin
revert ys zs,
induction xs with x xs ; intros ys zs h,
{ cases ys, { simp, apply list.le_refl }, cases h },
{ cases ys with y ys, cases h, simp [has_le.le,list.le] at *,
suffices : list.le' ((zs ++ [x]) ++ xs) ((zs ++ [y]) ++ ys),
{ refine cast _ this, simp, },
apply list.le_trans (zs ++ [y] ++ xs),
{ apply list_le_mono_left,
induction zs with z zs,
{ simp [has_le.le,list.le], apply h.left },
{ simp [has_le.le,list.le], split, exact le_rfl,
apply zs_ih, } },
{ apply xs_ih h.right, } }
end
lemma bar_bar'
(h : [] ++ [3] ++ [2] ≤ [1] ++ [5] ++ [4])
: [] ++ [3] ++ [2] ++ [2] ≤ [1] ++ [5] ++ ([4] ++ [2]) :=
begin
ac_mono,
end
lemma bar_bar''
(h : [3] ++ [2] ++ [2] ≤ [5] ++ [4] ++ [])
: [1] ++ ([3] ++ [2]) ++ [2] ≤ [1] ++ [5] ++ ([4] ++ []) :=
begin
ac_mono,
end
lemma bar_bar
(h : [3] ++ [2] ≤ [5] ++ [4])
: [1] ++ [3] ++ [2] ++ [2] ≤ [1] ++ [5] ++ ([4] ++ [2]) :=
begin
ac_mono,
end
def P (x : ℕ) := 7 ≤ x
def Q (x : ℕ) := x ≤ 7
@[mono]
lemma P_mono {x y : ℕ}
(h : x ≤ y)
: P x → P y :=
by { intro h', apply le_trans h' h }
@[mono]
lemma Q_mono {x y : ℕ}
(h : y ≤ x)
: Q x → Q y :=
by apply le_trans h
example (x y z : ℕ)
(h : x ≤ y)
: P (x + z) → P (z + y) :=
begin
ac_mono,
ac_mono,
end
example (x y z : ℕ)
(h : y ≤ x)
: Q (x + z) → Q (z + y) :=
begin
ac_mono,
ac_mono,
end
example (x y z k m n : ℤ)
(h₀ : z ≤ 0)
(h₁ : y ≤ x)
: (m + x + n) * z + k ≤ z * (y + n + m) + k :=
begin
ac_mono,
ac_mono,
ac_mono,
end
example (x y z k m n : ℕ)
(h₀ : z ≥ 0)
(h₁ : x ≤ y)
: (m + x + n) * z + k ≤ z * (y + n + m) + k :=
begin
ac_mono,
ac_mono,
ac_mono,
end
example (x y z k m n : ℕ)
(h₀ : z ≥ 0)
(h₁ : x ≤ y)
: (m + x + n) * z + k ≤ z * (y + n + m) + k :=
begin
ac_mono,
-- ⊢ (m + x + n) * z ≤ z * (y + n + m)
ac_mono,
-- ⊢ m + x + n ≤ y + n + m
ac_mono,
end
example (x y z k m n : ℕ)
(h₀ : z ≥ 0)
(h₁ : x ≤ y)
: (m + x + n) * z + k ≤ z * (y + n + m) + k :=
by { ac_mono* := h₁ }
example (x y z k m n : ℕ)
(h₀ : z ≥ 0)
(h₁ : m + x + n ≤ y + n + m)
: (m + x + n) * z + k ≤ z * (y + n + m) + k :=
by { ac_mono* := h₁ }
example (x y z k m n : ℕ)
(h₀ : z ≥ 0)
(h₁ : n + x + m ≤ y + n + m)
: (m + x + n) * z + k ≤ z * (y + n + m) + k :=
begin
ac_mono* : m + x + n ≤ y + n + m,
transitivity ; [ skip , apply h₁ ],
apply le_of_eq,
ac_refl,
end
example (x y z k m n : ℤ)
(h₁ : x ≤ y)
: true :=
begin
have : (m + x + n) * z + k ≤ z * (y + n + m) + k,
{ ac_mono,
success_if_fail { ac_mono },
admit },
trivial
end
example (x y z k m n : ℕ)
(h₁ : x ≤ y)
: true :=
begin
have : (m + x + n) * z + k ≤ z * (y + n + m) + k,
{ ac_mono*,
change 0 ≤ z, apply nat.zero_le, },
trivial
end
example (x y z k m n : ℕ)
(h₁ : x ≤ y)
: true :=
begin
have : (m + x + n) * z + k ≤ z * (y + n + m) + k,
{ ac_mono,
change (m + x + n) * z ≤ z * (y + n + m),
admit },
trivial,
end
example (x y z k m n i j : ℕ)
(h₁ : x + i = y + j)
: (m + x + n + i) * z + k = z * (j + n + m + y) + k :=
begin
ac_mono^3,
cc
end
example (x y z k m n i j : ℕ)
(h₁ : x + i = y + j)
: z * (x + i + n + m) + k = z * (y + j + n + m) + k :=
begin
congr,
simp [h₁],
end
example (x y z k m n i j : ℕ)
(h₁ : x + i = y + j)
: (m + x + n + i) * z + k = z * (j + n + m + y) + k :=
begin
ac_mono*,
cc,
end
example (x y : ℕ)
(h : x ≤ y)
: true :=
begin
(do v ← mk_mvar,
p ← to_expr ```(%%v + x ≤ y + %%v),
assert `h' p),
ac_mono := h,
trivial,
exact 1,
end
example {x y z : ℕ} : true :=
begin
have : y + x ≤ y + z,
{ mono,
guard_target' x ≤ z,
admit },
trivial
end
example {x y z : ℕ} : true :=
begin
suffices : x + y ≤ z + y, trivial,
mono,
guard_target' x ≤ z,
admit,
end
example {x y z w : ℕ} : true :=
begin
have : x + y ≤ z + w,
{ mono,
guard_target' x ≤ z, admit,
guard_target' y ≤ w, admit },
trivial
end
example {x y z w : ℕ} : true :=
begin
have : x * y ≤ z * w,
{ mono with [0 ≤ z,0 ≤ y],
{ guard_target 0 ≤ z, admit },
{ guard_target 0 ≤ y, admit },
guard_target' x ≤ z, admit,
guard_target' y ≤ w, admit },
trivial
end
example {x y z w : Prop} : true :=
begin
have : x ∧ y → z ∧ w,
{ mono,
guard_target' x → z, admit,
guard_target' y → w, admit },
trivial
end
example {x y z w : Prop} : true :=
begin
have : x ∨ y → z ∨ w,
{ mono,
guard_target' x → z, admit,
guard_target' y → w, admit },
trivial
end
example {x y z w : ℤ} : true :=
begin
suffices : x + y < w + z, trivial,
have : x < w, admit,
have : y ≤ z, admit,
mono right,
end
example {x y z w : ℤ} : true :=
begin
suffices : x * y < w * z, trivial,
have : x < w, admit,
have : y ≤ z, admit,
mono right,
{ guard_target' 0 < y, admit },
{ guard_target' 0 ≤ w, admit },
end
open tactic
example (x y : ℕ)
(h : x ≤ y)
: true :=
begin
(do v ← mk_mvar,
p ← to_expr ```(%%v + x ≤ y + %%v),
assert `h' p),
ac_mono := h,
trivial,
exact 3
end
example {α} [linear_order α]
(a b c d e : α) :
max a b ≤ e → b ≤ e :=
by { mono, apply le_max_right }
example (a b c d e : Prop)
(h : d → a) (h' : c → e) :
(a ∧ b → c) ∨ d → (d ∧ b → e) ∨ a :=
begin
mono,
mono,
mono,
end
example : ∫ x in Icc 0 1, real.exp x ≤ ∫ x in Icc 0 1, real.exp (x+1) :=
begin
mono,
{ exact real.continuous_exp.locally_integrable is_compact_Icc },
{ exact (real.continuous_exp.comp $ continuous_add_right 1).locally_integrable
is_compact_Icc },
intro x,
dsimp only,
mono,
linarith
end
|
[STATEMENT]
lemma smcf_comp_is_semifunctor[smc_cs_intros]:
assumes "\<GG> : \<BB> \<mapsto>\<mapsto>\<^sub>S\<^sub>M\<^sub>C\<^bsub>\<alpha>\<^esub> \<CC>" and "\<FF> : \<AA> \<mapsto>\<mapsto>\<^sub>S\<^sub>M\<^sub>C\<^bsub>\<alpha>\<^esub> \<BB>"
shows "\<GG> \<circ>\<^sub>S\<^sub>M\<^sub>C\<^sub>F \<FF> : \<AA> \<mapsto>\<mapsto>\<^sub>S\<^sub>M\<^sub>C\<^bsub>\<alpha>\<^esub> \<CC>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<GG> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF> : \<AA> \<mapsto>\<mapsto>\<^sub>S\<^sub>M\<^sub>C\<^bsub>\<alpha>\<^esub> \<CC>
[PROOF STEP]
proof-
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<GG> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF> : \<AA> \<mapsto>\<mapsto>\<^sub>S\<^sub>M\<^sub>C\<^bsub>\<alpha>\<^esub> \<CC>
[PROOF STEP]
interpret L: is_semifunctor \<alpha> \<BB> \<CC> \<GG>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<GG> : \<BB> \<mapsto>\<mapsto>\<^sub>S\<^sub>M\<^sub>C\<^bsub>\<alpha>\<^esub> \<CC>
[PROOF STEP]
by (rule assms(1))
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<GG> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF> : \<AA> \<mapsto>\<mapsto>\<^sub>S\<^sub>M\<^sub>C\<^bsub>\<alpha>\<^esub> \<CC>
[PROOF STEP]
interpret R: is_semifunctor \<alpha> \<AA> \<BB> \<FF>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<FF> : \<AA> \<mapsto>\<mapsto>\<^sub>S\<^sub>M\<^sub>C\<^bsub>\<alpha>\<^esub> \<BB>
[PROOF STEP]
by (rule assms(2))
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<GG> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF> : \<AA> \<mapsto>\<mapsto>\<^sub>S\<^sub>M\<^sub>C\<^bsub>\<alpha>\<^esub> \<CC>
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<GG> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF> : \<AA> \<mapsto>\<mapsto>\<^sub>S\<^sub>M\<^sub>C\<^bsub>\<alpha>\<^esub> \<CC>
[PROOF STEP]
proof(rule is_semifunctorI, unfold dghm_comp_components(3,4))
[PROOF STATE]
proof (state)
goal (9 subgoals):
1. \<Z> \<alpha>
2. vfsequence (\<GG> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF>)
3. semicategory \<alpha> \<AA>
4. semicategory \<alpha> \<CC>
5. vcard (\<GG> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF>) = 4\<^sub>\<nat>
6. smcf_dghm (\<GG> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF>) : smc_dg \<AA> \<mapsto>\<mapsto>\<^sub>D\<^sub>G\<^bsub>\<alpha>\<^esub> smc_dg \<CC>
7. \<FF>\<lparr>HomDom\<rparr> = \<AA>
8. \<GG>\<lparr>HomCod\<rparr> = \<CC>
9. \<And>b c g a f. \<lbrakk>g : b \<mapsto>\<^bsub>\<AA>\<^esub> c; f : a \<mapsto>\<^bsub>\<AA>\<^esub> b\<rbrakk> \<Longrightarrow> (\<GG> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF>)\<lparr>ArrMap\<rparr>\<lparr>g \<circ>\<^sub>A\<^bsub>\<AA>\<^esub> f\<rparr> = (\<GG> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF>)\<lparr>ArrMap\<rparr>\<lparr>g\<rparr> \<circ>\<^sub>A\<^bsub>\<CC>\<^esub> (\<GG> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF>)\<lparr>ArrMap\<rparr>\<lparr>f\<rparr>
[PROOF STEP]
show "vfsequence (\<GG> \<circ>\<^sub>S\<^sub>M\<^sub>C\<^sub>F \<FF>)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. vfsequence (\<GG> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF>)
[PROOF STEP]
by (simp add: dghm_comp_def)
[PROOF STATE]
proof (state)
this:
vfsequence (\<GG> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF>)
goal (8 subgoals):
1. \<Z> \<alpha>
2. semicategory \<alpha> \<AA>
3. semicategory \<alpha> \<CC>
4. vcard (\<GG> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF>) = 4\<^sub>\<nat>
5. smcf_dghm (\<GG> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF>) : smc_dg \<AA> \<mapsto>\<mapsto>\<^sub>D\<^sub>G\<^bsub>\<alpha>\<^esub> smc_dg \<CC>
6. \<FF>\<lparr>HomDom\<rparr> = \<AA>
7. \<GG>\<lparr>HomCod\<rparr> = \<CC>
8. \<And>b c g a f. \<lbrakk>g : b \<mapsto>\<^bsub>\<AA>\<^esub> c; f : a \<mapsto>\<^bsub>\<AA>\<^esub> b\<rbrakk> \<Longrightarrow> (\<GG> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF>)\<lparr>ArrMap\<rparr>\<lparr>g \<circ>\<^sub>A\<^bsub>\<AA>\<^esub> f\<rparr> = (\<GG> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF>)\<lparr>ArrMap\<rparr>\<lparr>g\<rparr> \<circ>\<^sub>A\<^bsub>\<CC>\<^esub> (\<GG> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF>)\<lparr>ArrMap\<rparr>\<lparr>f\<rparr>
[PROOF STEP]
show "vcard (\<GG> \<circ>\<^sub>S\<^sub>M\<^sub>C\<^sub>F \<FF>) = 4\<^sub>\<nat>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. vcard (\<GG> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF>) = 4\<^sub>\<nat>
[PROOF STEP]
unfolding dghm_comp_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. vcard [\<GG>\<lparr>ObjMap\<rparr> \<circ>\<^sub>\<circ> \<FF>\<lparr>ObjMap\<rparr>, \<GG>\<lparr>ArrMap\<rparr> \<circ>\<^sub>\<circ> \<FF>\<lparr>ArrMap\<rparr>, \<FF>\<lparr>HomDom\<rparr>, \<GG>\<lparr>HomCod\<rparr>]\<^sub>\<circ> = 4\<^sub>\<nat>
[PROOF STEP]
by (simp add: nat_omega_simps)
[PROOF STATE]
proof (state)
this:
vcard (\<GG> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF>) = 4\<^sub>\<nat>
goal (7 subgoals):
1. \<Z> \<alpha>
2. semicategory \<alpha> \<AA>
3. semicategory \<alpha> \<CC>
4. smcf_dghm (\<GG> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF>) : smc_dg \<AA> \<mapsto>\<mapsto>\<^sub>D\<^sub>G\<^bsub>\<alpha>\<^esub> smc_dg \<CC>
5. \<FF>\<lparr>HomDom\<rparr> = \<AA>
6. \<GG>\<lparr>HomCod\<rparr> = \<CC>
7. \<And>b c g a f. \<lbrakk>g : b \<mapsto>\<^bsub>\<AA>\<^esub> c; f : a \<mapsto>\<^bsub>\<AA>\<^esub> b\<rbrakk> \<Longrightarrow> (\<GG> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF>)\<lparr>ArrMap\<rparr>\<lparr>g \<circ>\<^sub>A\<^bsub>\<AA>\<^esub> f\<rparr> = (\<GG> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF>)\<lparr>ArrMap\<rparr>\<lparr>g\<rparr> \<circ>\<^sub>A\<^bsub>\<CC>\<^esub> (\<GG> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF>)\<lparr>ArrMap\<rparr>\<lparr>f\<rparr>
[PROOF STEP]
fix g b c f a
[PROOF STATE]
proof (state)
goal (7 subgoals):
1. \<Z> \<alpha>
2. semicategory \<alpha> \<AA>
3. semicategory \<alpha> \<CC>
4. smcf_dghm (\<GG> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF>) : smc_dg \<AA> \<mapsto>\<mapsto>\<^sub>D\<^sub>G\<^bsub>\<alpha>\<^esub> smc_dg \<CC>
5. \<FF>\<lparr>HomDom\<rparr> = \<AA>
6. \<GG>\<lparr>HomCod\<rparr> = \<CC>
7. \<And>b c g a f. \<lbrakk>g : b \<mapsto>\<^bsub>\<AA>\<^esub> c; f : a \<mapsto>\<^bsub>\<AA>\<^esub> b\<rbrakk> \<Longrightarrow> (\<GG> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF>)\<lparr>ArrMap\<rparr>\<lparr>g \<circ>\<^sub>A\<^bsub>\<AA>\<^esub> f\<rparr> = (\<GG> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF>)\<lparr>ArrMap\<rparr>\<lparr>g\<rparr> \<circ>\<^sub>A\<^bsub>\<CC>\<^esub> (\<GG> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF>)\<lparr>ArrMap\<rparr>\<lparr>f\<rparr>
[PROOF STEP]
assume "g : b \<mapsto>\<^bsub>\<AA>\<^esub> c" "f : a \<mapsto>\<^bsub>\<AA>\<^esub> b"
[PROOF STATE]
proof (state)
this:
g : b \<mapsto>\<^bsub>\<AA>\<^esub> c
f : a \<mapsto>\<^bsub>\<AA>\<^esub> b
goal (7 subgoals):
1. \<Z> \<alpha>
2. semicategory \<alpha> \<AA>
3. semicategory \<alpha> \<CC>
4. smcf_dghm (\<GG> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF>) : smc_dg \<AA> \<mapsto>\<mapsto>\<^sub>D\<^sub>G\<^bsub>\<alpha>\<^esub> smc_dg \<CC>
5. \<FF>\<lparr>HomDom\<rparr> = \<AA>
6. \<GG>\<lparr>HomCod\<rparr> = \<CC>
7. \<And>b c g a f. \<lbrakk>g : b \<mapsto>\<^bsub>\<AA>\<^esub> c; f : a \<mapsto>\<^bsub>\<AA>\<^esub> b\<rbrakk> \<Longrightarrow> (\<GG> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF>)\<lparr>ArrMap\<rparr>\<lparr>g \<circ>\<^sub>A\<^bsub>\<AA>\<^esub> f\<rparr> = (\<GG> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF>)\<lparr>ArrMap\<rparr>\<lparr>g\<rparr> \<circ>\<^sub>A\<^bsub>\<CC>\<^esub> (\<GG> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF>)\<lparr>ArrMap\<rparr>\<lparr>f\<rparr>
[PROOF STEP]
with assms
[PROOF STATE]
proof (chain)
picking this:
\<GG> : \<BB> \<mapsto>\<mapsto>\<^sub>S\<^sub>M\<^sub>C\<^bsub>\<alpha>\<^esub> \<CC>
\<FF> : \<AA> \<mapsto>\<mapsto>\<^sub>S\<^sub>M\<^sub>C\<^bsub>\<alpha>\<^esub> \<BB>
g : b \<mapsto>\<^bsub>\<AA>\<^esub> c
f : a \<mapsto>\<^bsub>\<AA>\<^esub> b
[PROOF STEP]
show "(\<GG> \<circ>\<^sub>S\<^sub>M\<^sub>C\<^sub>F \<FF>)\<lparr>ArrMap\<rparr>\<lparr>g \<circ>\<^sub>A\<^bsub>\<AA>\<^esub> f\<rparr> =
(\<GG> \<circ>\<^sub>S\<^sub>M\<^sub>C\<^sub>F \<FF>)\<lparr>ArrMap\<rparr>\<lparr>g\<rparr> \<circ>\<^sub>A\<^bsub>\<CC>\<^esub> (\<GG> \<circ>\<^sub>S\<^sub>M\<^sub>C\<^sub>F \<FF>)\<lparr>ArrMap\<rparr>\<lparr>f\<rparr>"
[PROOF STATE]
proof (prove)
using this:
\<GG> : \<BB> \<mapsto>\<mapsto>\<^sub>S\<^sub>M\<^sub>C\<^bsub>\<alpha>\<^esub> \<CC>
\<FF> : \<AA> \<mapsto>\<mapsto>\<^sub>S\<^sub>M\<^sub>C\<^bsub>\<alpha>\<^esub> \<BB>
g : b \<mapsto>\<^bsub>\<AA>\<^esub> c
f : a \<mapsto>\<^bsub>\<AA>\<^esub> b
goal (1 subgoal):
1. (\<GG> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF>)\<lparr>ArrMap\<rparr>\<lparr>g \<circ>\<^sub>A\<^bsub>\<AA>\<^esub> f\<rparr> = (\<GG> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF>)\<lparr>ArrMap\<rparr>\<lparr>g\<rparr> \<circ>\<^sub>A\<^bsub>\<CC>\<^esub> (\<GG> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF>)\<lparr>ArrMap\<rparr>\<lparr>f\<rparr>
[PROOF STEP]
by (cs_concl cs_shallow cs_simp: smc_cs_simps cs_intro: smc_cs_intros)
[PROOF STATE]
proof (state)
this:
(\<GG> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF>)\<lparr>ArrMap\<rparr>\<lparr>g \<circ>\<^sub>A\<^bsub>\<AA>\<^esub> f\<rparr> = (\<GG> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF>)\<lparr>ArrMap\<rparr>\<lparr>g\<rparr> \<circ>\<^sub>A\<^bsub>\<CC>\<^esub> (\<GG> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF>)\<lparr>ArrMap\<rparr>\<lparr>f\<rparr>
goal (6 subgoals):
1. \<Z> \<alpha>
2. semicategory \<alpha> \<AA>
3. semicategory \<alpha> \<CC>
4. smcf_dghm (\<GG> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF>) : smc_dg \<AA> \<mapsto>\<mapsto>\<^sub>D\<^sub>G\<^bsub>\<alpha>\<^esub> smc_dg \<CC>
5. \<FF>\<lparr>HomDom\<rparr> = \<AA>
6. \<GG>\<lparr>HomCod\<rparr> = \<CC>
[PROOF STEP]
qed
(
auto
simp: slicing_commute[symmetric] smc_cs_simps smc_cs_intros
intro: dg_cs_intros slicing_intros
)
[PROOF STATE]
proof (state)
this:
\<GG> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF> : \<AA> \<mapsto>\<mapsto>\<^sub>S\<^sub>M\<^sub>C\<^bsub>\<alpha>\<^esub> \<CC>
goal:
No subgoals!
[PROOF STEP]
qed |
------------------------------------------------------------------------
-- The Agda standard library
--
-- Strings
------------------------------------------------------------------------
module Data.String where
open import Data.List as List using (_∷_; []; List)
open import Data.Vec as Vec using (Vec)
open import Data.Colist as Colist using (Colist)
open import Data.Char as Char using (Char)
open import Data.Bool using (Bool; true; false)
open import Function
open import Relation.Nullary
open import Relation.Nullary.Decidable
open import Relation.Binary
open import Relation.Binary.List.StrictLex as StrictLex
import Relation.Binary.On as On
open import Relation.Binary.PropositionalEquality as PropEq using (_≡_)
open import Relation.Binary.PropositionalEquality.TrustMe
import Data.String.Core as Core
open Core public using (String)
open Core
-- Possibly infinite strings.
Costring : Set
Costring = Colist Char
------------------------------------------------------------------------
-- Operations
infixr 5 _++_
_++_ : String → String → String
_++_ = primStringAppend
toList : String → List Char
toList = primStringToList
fromList : List Char → String
fromList = primStringFromList
toList∘fromList : ∀ s → toList (fromList s) ≡ s
toList∘fromList s = trustMe
fromList∘toList : ∀ s → fromList (toList s) ≡ s
fromList∘toList s = trustMe
toVec : (s : String) → Vec Char (List.length (toList s))
toVec s = Vec.fromList (toList s)
toCostring : String → Costring
toCostring = Colist.fromList ∘ toList
unlines : List String → String
unlines [] = ""
unlines (x ∷ xs) = x ++ "\n" ++ unlines xs
show : String → String
show = primShowString
-- Informative equality test.
_≟_ : Decidable {A = String} _≡_
s₁ ≟ s₂ with primStringEquality s₁ s₂
... | true = yes trustMe
... | false = no whatever
where postulate whatever : _
-- Boolean equality test.
--
-- Why is the definition _==_ = primStringEquality not used? One
-- reason is that the present definition can sometimes improve type
-- inference, at least with the version of Agda that is current at the
-- time of writing: see unit-test below.
infix 4 _==_
_==_ : String → String → Bool
s₁ == s₂ = ⌊ s₁ ≟ s₂ ⌋
private
-- The following unit test does not type-check (at the time of
-- writing) if _==_ is replaced by primStringEquality.
data P : (String → Bool) → Set where
p : (c : String) → P (_==_ c)
unit-test : P (_==_ "")
unit-test = p _
setoid : Setoid _ _
setoid = PropEq.setoid String
decSetoid : DecSetoid _ _
decSetoid = PropEq.decSetoid _≟_
-- Lexicographic ordering of strings.
strictTotalOrder : StrictTotalOrder _ _ _
strictTotalOrder =
On.strictTotalOrder
(StrictLex.<-strictTotalOrder Char.strictTotalOrder)
toList
|
Formal statement is: lemma support_on_if: "a \<noteq> 0 \<Longrightarrow> support_on A (\<lambda>x. if P x then a else 0) = {x\<in>A. P x}" Informal statement is: If $a \neq 0$, then the support of the function $x \mapsto a$ if $P(x)$ holds and $0$ otherwise is the set of all $x$ such that $P(x)$ holds. |
-- Andreas, 2012-01-13
module Issue555b where
data Empty : Set where
record Unit : Set where
constructor tt
-- Do we want to allow this?
data Exp (A : Set) : Set1
data Exp where -- ? needs to report that too few parameters are given
var : Exp Empty
app : {A B : Set} → Exp (A → B) → Exp A → Exp B
-- Basically, A is first declared as a parameter, but later,
-- in the definition, it is turned into an index.
bla : {A : Set} → Exp A → Unit
bla var = tt
bla (app f a) = bla f
|
```python
# imports
import xml.etree.ElementTree as ET
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from ipywidgets import interact, interactive, fixed, interact_manual
import ipywidgets as widgets
from IPython.display import display,clear_output
import glob
import scipy as scipy
import scipy.signal as signal
from scipy.interpolate import interp1d
from scipy.optimize import leastsq
from sympy import pprint
import sympy
from scipy.optimize import curve_fit
import peakutils
import mpld3
mpld3.enable_notebook()
import warnings
warnings.filterwarnings('ignore')
```
```python
# XML reader
class XML2DataFrame:
def __init__(self, xml_data):
self.root = ET.XML(xml_data)
def parse_root(self, root):
return [self.parse_element(child) for child in iter(root)]
def parse_element(self, element, parsed=None):
if parsed is None:
parsed = dict()
for key in element.keys():
parsed[element.tag +"_"+ key] = element.attrib.get(key)
# display(element.tag)
if element.text:
parsed[ element.tag ] = element.text
for child in list(element):
self.parse_element(child, parsed)
return parsed
def process_data(self):
structure_data = self.parse_root(self.root)
return pd.DataFrame(structure_data)
```
```python
#
recordings = [file for file in sorted(glob.glob('./Data/*.xml'))][::-1]
print('number of files: %s (%s subjects)'%(len(recordings),len(recordings)/8))
files_dropdown=widgets.Dropdown(
options=recordings,
description='recording:',
disabled=False, layout=widgets.Layout(width='50%'))
display(files_dropdown)
```
number of files: 2 (0.25 subjects)
<p>Failed to display Jupyter Widget of type <code>Dropdown</code>.</p>
<p>
If you're reading this message in the Jupyter Notebook or JupyterLab Notebook, it may mean
that the widgets JavaScript is still loading. If this message persists, it
likely means that the widgets JavaScript library is either not installed or
not enabled. See the <a href="https://ipywidgets.readthedocs.io/en/stable/user_install.html">Jupyter
Widgets Documentation</a> for setup instructions.
</p>
<p>
If you're reading this message in another frontend (for example, a static
rendering on GitHub or <a href="https://nbviewer.jupyter.org/">NBViewer</a>),
it may mean that your frontend doesn't currently support widgets.
</p>
# Load individual recording
```python
# Loading vr data
def LOAD(dataFile):
xml2df = XML2DataFrame(open( dataFile).read())
df = xml2df.process_data()
df=df.drop(['frame'], axis=1)
df=df.apply(pd.to_numeric, errors='ignore',downcast='integer')
return df
df=LOAD(files_dropdown.value)
# print(df.info())
# display(df.columns.values)
display(df.head())
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>Heading_Value_X</th>
<th>Heading_Value_Y</th>
<th>Participant_Age</th>
<th>Participant_Gender</th>
<th>Participant_Name</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
<td>P</td>
</tr>
<tr>
<th>1</th>
<td>1209.094</td>
<td>361.3125</td>
<td>NaN</td>
<td>NaN</td>
<td>P</td>
</tr>
<tr>
<th>2</th>
<td>1209.094</td>
<td>361.3125</td>
<td>NaN</td>
<td>NaN</td>
<td>P</td>
</tr>
<tr>
<th>3</th>
<td>1209.094</td>
<td>361.3125</td>
<td>NaN</td>
<td>NaN</td>
<td>P</td>
</tr>
<tr>
<th>4</th>
<td>1209.094</td>
<td>361.3125</td>
<td>NaN</td>
<td>NaN</td>
<td>P</td>
</tr>
</tbody>
</table>
</div>
```python
df.plot(y='Heading_Value_X')
```
|
GospelBreed.com is an online platform that delivers Christian Relationship Stories, Gospel Songs and Educative Articles among others.
Our ultimate goal is to breed kingdom champions by propelling you to act and become the person that God intends you to be.
Stay glued to us and You’ll get that inspirational thought that will spur you to greatness…. |
(* Property from Productive Use of Failure in Inductive Proof,
Andrew Ireland and Alan Bundy, JAR 1996.
This Isabelle theory is produced using the TIP tool offered at the following website:
https://github.com/tip-org/tools
This file was originally provided as part of TIP benchmark at the following website:
https://github.com/tip-org/benchmarks
Yutaka Nagashima at CIIRC, CTU changed the TIP output theory file slightly
to make it compatible with Isabelle2017.
Some proofs were added by Yutaka Nagashima.*)
theory TIP_prop_11
imports "../../Test_Base"
begin
datatype 'a list = nil2 | cons2 "'a" "'a list"
fun x :: "'a list => 'a list => 'a list" where
"x (nil2) z = z"
| "x (cons2 z2 xs) z = cons2 z2 (x xs z)"
fun rev :: "'a list => 'a list" where
"rev (nil2) = nil2"
| "rev (cons2 z xs) = x (rev xs) (cons2 z (nil2))"
lemma app_nil: "x y nil2 = y"
by (induct y, auto)
lemma app_assoc: "x (x w y) z = x w (x y z)"
by(induction w, auto)
lemma rev_app: "rev (x y z) = x (rev z) (rev y)"
apply(induction y, simp add: app_nil)
apply(simp add: app_assoc)
done
lemma revrev: "rev (rev y) = y"
apply(induct y rule: rev.induct, auto)
apply(simp add: rev_app)
done
theorem property0 :
"((rev (x (rev y) (rev z))) = (x z y))"
apply(induct y, auto)
apply(simp add: app_nil revrev)
apply(simp add: revrev rev_app)
done
end
|
\subsection{Small DB 2, the dynamic array version in C} % (fold)
\label{sub:small_db_2_the_dynamic_array_version}
\sref{sec:using_dynamic_memory_allocation}, \nameref{sec:using_dynamic_memory_allocation}, introduced a version of the Small DB program with a dynamic array structure, as opposed to the fixed array structure used to manage the rows in \cref{cha:more_data_types}. The C code for the altered functions and procedures is shown in \lref{clst:dynamic-array-db}, the original version can be found in \lref{lst:c-small-db}.
\straightcode{\ccode{clst:dynamic-array-db}{C code for the dynamic array version of Small DB, see \lref{lst:c-small-db} for the original version of this program}{code/c/dynamic-memory/array-db-for-chap.c}}
\mynote{
\begin{itemize}
\item This version of the Small DB program includes the ability to add, delete, and print rows from the data store.
\item The data store includes a dynamic array that is managed using \nameref{ssub:realloc}.
\item See \sref{sec:using_dynamic_memory_allocation} for a discussion of how this works.
\end{itemize}
} |
"""
signif_grps.py
This module contains functions to run analyses of ROI groups showing
significant differences between unexpected and expected sequences in the data
generated by the Allen Institute OpenScope experiments for the Credit
Assignment Project.
Authors: Colleen Gillon
Date: October, 2018
Note: this code uses python 3.7.
"""
import logging
import numpy as np
from util import gen_util, logger_util, math_util, rand_util
logger = logging.getLogger(__name__)
TAB = " "
#############################################
def sep_grps(sign_rois, nrois, grps="all", tails="2", add_exp=False):
"""
sep_grps(sign_rois, nrois)
Separate ROIs into groups based on whether their first/last quantile was
significant in a specific tail.
Required args:
- sign_rois (nested list): list of significant ROIs, structured as:
quantile (x tail)
- nrois (int) : total number of ROIs in data (signif or not)
Optional args:
- grps (str or list): set of groups or list of sets of groups to
return, e.g., "all", "change", "no_change",
"reduc", "incr"
default: "all"
- tails (str) : tail(s) used in analysis: "hi", "lo" or 2
default: 2
- add_exp (bool) : if True, group of ROIs showing no significance in
either is included in the groups returned
default: False
Returns:
- roi_grps (list) : lists structured as follows:
if grp parameter includes only one set,
ROIs per roi group
otherwise: sets x roi grps
numbers included in the group
- grp_names (list) : if grp parameter includes only one set, list of
names of roi grps (order preserved)
otherwise: list of sublists per set, each
containing names of roi grps per set
"""
grps = gen_util.list_if_not(grps)
# get ROI numbers for each group
if tails in ["hi", "lo"]:
# sign_rois[first/last]
all_rois = list(range(nrois))
unexp_unexp = list(set(sign_rois[0]) & set(sign_rois[1]))
unexp_exp = list(set(sign_rois[0]) - set(sign_rois[1]))
exp_unexp = list(set(sign_rois[1]) - set(sign_rois[0]))
exp_exp = list(
set(all_rois) - set(unexp_unexp) - set(unexp_exp) - set(exp_unexp))
# to store stats
roi_grps = [unexp_unexp, unexp_exp, exp_unexp, exp_exp]
grp_names = ["unexp_unexp", "unexp_exp", "exp_unexp", "exp_exp"]
exp_ind = 3
grp_inds = []
for i, g in enumerate(grps):
if g == "all":
grp_ind = list(range(len(roi_grps)))
elif g == "change":
grp_ind = [1, 2]
elif g == "no_change":
grp_ind = [0, 3]
elif g == "reduc":
grp_ind = [1]
elif g == "incr":
grp_ind = [2]
else:
gen_util.accepted_values_error(
"grps", g, ["all", "change", "no_change", "reduc", "incr"])
if add_exp and exp_ind not in grp_ind:
grp_ind.extend([exp_ind])
grp_inds.append(sorted(grp_ind))
elif str(tails) == "2":
# sign_rois[first/last][lo/up]
all_rois = list(range(nrois))
unexp_up_unexp_up = list(set(sign_rois[0][1]) & set(sign_rois[1][1]))
unexp_up_unexp_lo = list(set(sign_rois[0][1]) & set(sign_rois[1][0]))
unexp_lo_unexp_up = list(set(sign_rois[0][0]) & set(sign_rois[1][1]))
unexp_lo_unexp_lo = list(set(sign_rois[0][0]) & set(sign_rois[1][0]))
unexp_up_exp = list(
(set(sign_rois[0][1]) - set(sign_rois[1][1]) - \
set(sign_rois[1][0])))
unexp_lo_exp = list(
(set(sign_rois[0][0]) - set(sign_rois[1][1]) - \
set(sign_rois[1][0])))
exp_unexp_up = list(
(set(sign_rois[1][1]) - set(sign_rois[0][1]) - \
set(sign_rois[0][0])))
exp_unexp_lo = list(
(set(sign_rois[1][0]) - set(sign_rois[0][1]) - \
set(sign_rois[0][0])))
exp_exp = list(
(set(all_rois) - set(sign_rois[0][1]) - set(sign_rois[1][1]) - \
set(sign_rois[0][0]) - set(sign_rois[1][0])))
# to store stats
roi_grps = [unexp_up_unexp_up, unexp_up_unexp_lo, unexp_lo_unexp_up,
unexp_lo_unexp_lo, unexp_up_exp, unexp_lo_exp, exp_unexp_up,
exp_unexp_lo, exp_exp]
exp_ind = 8 # index of exp_exp
# group names
grp_names = ["unexp-up_unexp-up", "unexp-up_unexp-lo", "unexp-lo_unexp-up",
"unexp-lo_unexp-lo", "unexp-up_exp", "unexp-lo_exp", "exp_unexp-up",
"exp_unexp-lo", "exp_exp"]
exp_ind = 8
grp_inds = []
for i, g in enumerate(grps):
if g == "all":
grp_ind = list(range(len(roi_grps)))
elif g == "change":
grp_ind = [1, 2, 4, 5, 6, 7]
elif g == "no_change":
grp_ind = [0, 3, 8]
elif g == "reduc":
grp_ind = [1, 4, 7]
elif g == "incr":
grp_ind = [2, 5, 6]
else:
gen_util.accepted_values_error(
"grps", grps,
["all", "change", "no_change", "reduc", "incr"])
if add_exp and exp_ind not in grp_ind:
grp_ind.extend([exp_ind])
grp_inds.append(sorted(grp_ind))
all_roi_grps = [[roi_grps[i] for i in grp_ind] for grp_ind in grp_inds]
all_grp_names = [[grp_names[i] for i in grp_ind] for grp_ind in grp_inds]
if len(grps) == 1:
all_roi_grps = all_roi_grps[0]
all_grp_names = all_grp_names[0]
return all_roi_grps, all_grp_names
#############################################
def grp_stats(integ_stats, grps, plot_vals="both", op="diff", stats="mean",
error="std", scale=False):
"""
grp_stats(integ_stats, grps)
Calculate statistics (e.g. mean + sem) across quantiles for each group
and session.
Required args:
- integ_stats (list): list of 3D arrays of mean/medians of integrated
sequences, for each session structured as:
unexp if by_exp x
quantiles x
ROIs if byroi
- grps (list) : list of sublists per session, each containing
sublists per roi grp with ROI numbers included in
the group: session x roi_grp
Optional args:
- plot_vals (str): which values to return ("unexp", "exp" or "both")
default: "both"
- op (str) : operation to use to compare groups, if plot_vals
is "both"
i.e. "diff": grp1-grp2, or "ratio": grp1/grp2
default: "diff"
- stats (str) : statistic parameter, i.e. "mean" or "median"
default: "mean"
- error (str) : error statistic parameter, i.e. "std" or "sem"
default: "std"
- scale (bool) : if True, data is scaled using first quantile
Returns:
- all_grp_st (4D array): array of group stats (mean/median, error)
structured as:
session x quantile x grp x stat
- all_ns (2D array) : array of group ns, structured as:
session x grp
"""
n_sesses = len(integ_stats)
n_quants = integ_stats[0].shape[1]
n_stats = 2 + (stats == "median" and error == "std")
n_grps = len(grps[0])
all_grp_st = np.empty([n_sesses, n_quants, n_grps, n_stats])
all_ns = np.empty([n_sesses, n_grps], dtype=int)
for i, [sess_data, sess_grps] in enumerate(zip(integ_stats, grps)):
# calculate diff/ratio or retrieve exp/unexp
if plot_vals in ["exp", "unexp"]:
op = ["exp", "unexp"].index(plot_vals)
sess_data = math_util.calc_op(sess_data, op, dim=0)
for g, grp in enumerate(sess_grps):
all_ns[i, g] = len(grp)
all_grp_st[i, :, g, :] = np.nan
if len(grp) != 0:
grp_data = sess_data[:, grp]
if scale:
grp_data, _ = math_util.scale_data(
grp_data, axis=0, pos=0, sc_type="unit")
all_grp_st[i, :, g] = math_util.get_stats(
grp_data, stats, error, axes=1).T
return all_grp_st, all_ns
#############################################
def grp_traces_by_qu_unexp_sess(trace_data, analyspar, roigrppar, all_roi_grps):
"""
grp_traces_by_qu_unexp_sess(trace_data, analyspar, roigrppar, all_roi_grps)
Required args:
- trace_data (list) : list of 4D array of mean/medians traces
for each session, structured as:
unexp x quantiles x ROIs x frames
- analyspar (AnalysPar): named tuple containing analysis parameters
- roigrppar (RoiGrpPar): named tuple containing roi grouping parameters
- all_roi_grps (list) : list of sublists per session, each containing
sublists per roi grp with ROI numbers included
in the group: session x roi_grp
Returns:
- grp_stats (list): nested list of statistics for ROI groups
structured as:
sess x qu x ROI grp x stats x frame
"""
# calculate diff/ratio or retrieve exp/unexp
op = roigrppar.op
if roigrppar.plot_vals in ["exp", "unexp"]:
op = ["exp", "unexp"].index(roigrppar.plot_vals)
data_me = [math_util.calc_op(sess_me, op, dim=0) for sess_me in trace_data]
n_sesses = len(data_me)
n_quants = data_me[0].shape[0]
n_stats = 2 + (analyspar.stats == "median" and analyspar.error == "std")
n_frames = [me.shape[2] for me in data_me]
# sess x quantile (first/last) x ROI grp
empties = [np.empty([n_stats, n_fr]) * np.nan for n_fr in n_frames]
grp_stats = [[[] for _ in range(n_quants)] for _ in range(n_sesses)]
for i, sess in enumerate(data_me):
for q, quant in enumerate(sess):
for g, grp_rois in enumerate(all_roi_grps[i]):
# leave NaNs if no ROIs in group
if len(grp_rois) != 0:
grp_st = math_util.get_stats(
quant[grp_rois], analyspar.stats, analyspar.error,
axes=0)
else:
grp_st = empties[i]
grp_stats[i][q].append(grp_st.tolist())
return grp_stats
#############################################
def get_signif_rois(integ_data, permpar, stats="mean", op="diff", nanpol=None,
log_rois=True):
"""
get_signif_rois(integ_data, permpar)
Identifies ROIs showing significant unexpected responses in specified quantiles,
groups accordingly and retrieves statistics for each group.
Required args:
- integ_data (list): list of 2D array of ROI activity integrated
across frames.
unexp (0, 1) x array[ROI x sequences]
- permpar (PermPar): named tuple containing permutation parameters
(multcomp does not apply to identifiying
significant ROIs)
Optional args:
- stats (str) : statistic parameter, i.e. "mean" or "median"
default: "mean"
- op (str) : operation to identify significant ROIs
default: "diff"
- nanpol (str) : policy for NaNs, "omit" or None when taking
statistics
default: None
- log_rois (bool): if True, the indices of significant ROIs and
their actual difference values are logged
Returns:
- sign_rois (list): list of ROIs showing significant differences, or
list of lists if 2-tailed analysis [lo, up].
"""
n_exp = integ_data[1].shape[1]
# calculate real values (average across seqs)
data = [math_util.mean_med(integ_data[0], stats, axis=1, nanpol=nanpol),
math_util.mean_med(integ_data[1], stats, axis=1, nanpol=nanpol)]
# ROI x seq
qu_data_res = math_util.calc_op(np.asarray(data), op, dim=0)
# concatenate unexp and exp from quantile
qu_data_all = np.concatenate(integ_data, axis=1)
# run permutation to identify significant ROIs
all_rand_res = rand_util.permute_diff_ratio(
qu_data_all, n_exp, permpar.n_perms, stats, nanpol, op)
sign_rois = rand_util.id_elem(
all_rand_res, qu_data_res, permpar.tails, permpar.p_val,
log_elems=log_rois)
return sign_rois
#############################################
def signif_rois_by_grp_sess(sessids, integ_data, permpar, roigrppar,
qu_labs=["first quant", "last quant"],
stats="mean", nanpol=None):
"""
signif_rois_by_grp_sess(sessids, integ_data, permpar, roigrppar)
Identifies ROIs showing significant unexpected responses in specified quantiles,
groups accordingly and retrieves statistics for each group.
Required args:
- sessids (list) : list of Session IDs
- integ_data (list) : list of 2D array of ROI activity integrated
across frames. Should only include quantiles
retained for analysis:
sess x unexp (0, 1) x quantiles x
array[ROI x sequences]
- permpar (PermPar) : named tuple containing permutation parameters
- roigrppar (RoiGrpPar): named tuple containing roi grouping parameters
Optional args:
- qu_labs (list): quantiles being compared
default: ["first Q", "last Q"]
- stats (str) : statistic parameter, i.e. "mean" or "median"
default: "mean"
- nanpol (str) : policy for NaNs, "omit" or None when taking statistics
default: None
Returns:
- all_roi_grps (list) : list of sublists per session, containing ROI
numbers included in each group, structured as
follows:
if sets of groups are passed:
session x set x roi_grp
if one group is passed:
session x roi_grp
- grp_names (list) : list of names of the ROI groups in roi grp
lists (order preserved)
"""
if len(qu_labs) != 2:
raise ValueError("Identifying significant ROIs is only implemented "
"for 2 quantiles.")
logger.info("Identifying ROIs showing significant unexpected responses in "
f"{qu_labs[0].capitalize()} and/or {qu_labs[1].capitalize()}.",
extra={"spacing": "\n"})
all_roi_grps = []
for sessid, sess_data in zip(sessids, integ_data):
logger.info(f"Session {sessid}", extra={"spacing": "\n"})
sess_rois = []
nrois = sess_data[0][0].shape[0]
for q, q_lab in enumerate(qu_labs):
logger.info(f"{q_lab.capitalize()}", extra={"spacing": TAB})
sign_rois = get_signif_rois(
[sess_data[0][q], sess_data[1][q]], permpar, stats,
roigrppar.op, nanpol)
sess_rois.append(sign_rois)
grps = gen_util.list_if_not(roigrppar.grps)
if len(grps) == 1:
roi_grps, grp_names = sep_grps(
sess_rois, nrois=nrois, grps=roigrppar.grps,
tails=permpar.tails, add_exp=roigrppar.add_exp)
else:
roi_grps = []
for grp_set in roigrppar.grps:
roi_grps_set, _ = sep_grps(
sess_rois, nrois=nrois, grps=grp_set, tails=permpar.tails,
add_exp=False)
# flat, without duplicates
flat_grp = sorted(
list(set([roi for grp in roi_grps_set for roi in grp])))
roi_grps.append(flat_grp)
grp_names = roigrppar.grps
all_roi_grps.append(roi_grps)
return all_roi_grps, grp_names
|
# generate gene list from genes highly correlated with CD38-high B cells
source("R/functions/load_sig.r")
dir.create(file.path(PROJECT_DIR, "generated_data", "signatures"), showWarnings = F)
fn.cd38.cor = file.path(PROJECT_DIR, "generated_data", "CHI", "robust_corr_genes.txt")
gene.sig = load_sig(fn.cd38.cor, "cor.mean.sd.ratio", ntop=10)
fn.cd38.sig = file.path(PROJECT_DIR, "generated_data", "signatures", "CD38_ge_sig.txt")
gene.sig %>% as.data.frame() %>%
fwrite(fn.cd38.sig, col.names = F)
# generate gene lists from BTMs related to plasma cells
library(tmod)
data(tmod)
mod.id.li = tmod$MODULES %>% dplyr::filter(grepl("plasma",Title, ignore.case = F), Category=="immune") %>%
dplyr::select(ID) %>%
unlist(use.names=F)
mod.id.dc = c("DC.M4.11","DC.M7.7","DC.M7.32")
mod.id = c(mod.id.dc, mod.id.li)
mod.gene = tmod$MODULES2GENES[mod.id]
for(k in mod.id) {
fn.pb.sig = file.path(PROJECT_DIR, "generated_data", "signatures",
sprintf("PB_%s_ge_sig.txt", k))
mod.gene[[k]] %>% as.data.frame() %>%
fwrite(fn.pb.sig, col.names = F)
}
|
SUBROUTINE zreadx6 (IFLTAB, CPATH, IIHEAD, KIHEAD, NIHEAD,
* ICHEAD, KCHEAD, NCHEAD, IUHEAD, KUHEAD, NUHEAD, IDATA,
* KDATA, NDATA, IPLAN, LFOUND)
C
C
C Main routine for retrieving data (zrdbuf6 may also be used)
C
C Written by Bill Charley at HEC, 1989
C
INTEGER IFLTAB(*), IIHEAD(*), ICHEAD(*), IUHEAD(*), IDATA(*)
CHARACTER CPATH*(*)
LOGICAL LFOUND, LEND
integer KIHEAD,KCHEAD,KUHEAD,KDATA,IPLAN,N,J,JSIZE
integer NIHEAD,NCHEAD,NUHEAD,NDATA
C
INCLUDE 'zdsskz.h'
C
INCLUDE 'zdssiz.h'
C
INCLUDE 'zdssmz.h'
C
C
IF (MLEVEL.GE.11) WRITE ( MUNIT, 20) IFLTAB(KUNIT), CPATH,
* KIHEAD, KCHEAD, KUHEAD, KDATA, IPLAN
20 FORMAT (T6,'-----DSS---Debug: Enter zreadx6',/,T10,
* 'UNIT =',I5,' PATH: ',A,/,T10,'KIHEAD:',I8,', KCHEAD:',I8,
* ', KUHEAD:',I8,', KDATA:',I8,', IPLAN:',I4)
C
C
C
C Get the info block, and read the first portion of the header
IFLTAB(KRBNPA) = -1
CALL zrdbuf6 (IFLTAB, CPATH, IIHEAD, 0, N, IDATA, 0, J,
* LEND, IPLAN, LFOUND)
IF (IFLTAB(KSTAT).NE.0) RETURN
C
IF (LFOUND) THEN
C
C Get get any internal header area
JSIZE = INFO(NPPWRD+KINIHE)
NIHEAD = JSIZE
JSIZE = MIN0 (JSIZE, KIHEAD)
IF (JSIZE.GT.0)
* CALL zgtrec6(IFLTAB, IIHEAD, JSIZE, INFO(NPPWRD+KIAIHE), .FALSE.)
C
C Get get the compression header area
JSIZE = INFO(NPPWRD+KINCHE)
NCHEAD = JSIZE
JSIZE = MIN0 (JSIZE, KCHEAD)
IF (JSIZE.GT.0)
* CALL zgtrec6(IFLTAB, ICHEAD, JSIZE, INFO(NPPWRD+KIACHE), .FALSE.)
C
C Get get any user header area
JSIZE = INFO(NPPWRD+KINUHE)
NUHEAD = JSIZE
JSIZE = MIN0 (JSIZE, KUHEAD)
IF (JSIZE.GT.0)
* CALL zgtrec6(IFLTAB, IUHEAD, JSIZE, INFO(NPPWRD+KIAUHE), .FALSE.)
IF (IFLTAB(KSTAT).NE.0) RETURN
C
C Get the data
JSIZE = INFO(NPPWRD+KINDAT)
NDATA = JSIZE
JSIZE = MIN0 (JSIZE, KDATA)
IF (JSIZE.GT.0)
* CALL zgtrec6 (IFLTAB, IDATA, JSIZE, INFO(NPPWRD+KIADAT), .FALSE.)
C
ELSE
C
NIHEAD = 0
NCHEAD = 0
NUHEAD = 0
NDATA = 0
C
ENDIF
C
C
IF (MLEVEL.GE.11) WRITE ( MUNIT,820) NIHEAD, NCHEAD, NUHEAD,
* NDATA
820 FORMAT (T6,'-----DSS---Debug: Exit zreadx6',/,T10,
* 'NIHEAD:',I5,', NCHEAD:',I5,', NUHEAD:',I5,', NDATA:',I5)
C
RETURN
C
END
|
Formal statement is: corollary\<^marker>\<open>tag unimportant\<close> Cauchy_theorem_disc_simple: "\<lbrakk>f holomorphic_on (ball a e); valid_path g; path_image g \<subseteq> ball a e; pathfinish g = pathstart g\<rbrakk> \<Longrightarrow> (f has_contour_integral 0) g" Informal statement is: If $f$ is holomorphic on a disc, and $g$ is a closed path in the disc, then $\int_g f(z) dz = 0$. |
[STATEMENT]
lemma diff_frac_eq_2:
assumes "b \<noteq> (1::real)"
shows "1 - (a - b) / (1 - b) = (1 - a) / (1 - b)"
(is "?L = ?R")
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 1 - (a - b) / (1 - b) = (1 - a) / (1 - b)
[PROOF STEP]
proof-
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. 1 - (a - b) / (1 - b) = (1 - a) / (1 - b)
[PROOF STEP]
have b: "1 - b \<noteq> 0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 1 - b \<noteq> 0
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
b \<noteq> 1
goal (1 subgoal):
1. 1 - b \<noteq> 0
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
1 - b \<noteq> 0
goal (1 subgoal):
1. 1 - (a - b) / (1 - b) = (1 - a) / (1 - b)
[PROOF STEP]
hence "?L = (1 - b - (a - b)) / (1 - b)" (is "?L = ?A / ?B")
[PROOF STATE]
proof (prove)
using this:
1 - b \<noteq> 0
goal (1 subgoal):
1. 1 - (a - b) / (1 - b) = (1 - b - (a - b)) / (1 - b)
[PROOF STEP]
using diff_frac_eq_1
[PROOF STATE]
proof (prove)
using this:
1 - b \<noteq> 0
?b \<noteq> 0 \<Longrightarrow> 1 - ?a / ?b = (?b - ?a) / ?b
goal (1 subgoal):
1. 1 - (a - b) / (1 - b) = (1 - b - (a - b)) / (1 - b)
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
1 - (a - b) / (1 - b) = (1 - b - (a - b)) / (1 - b)
goal (1 subgoal):
1. 1 - (a - b) / (1 - b) = (1 - a) / (1 - b)
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
1 - (a - b) / (1 - b) = (1 - b - (a - b)) / (1 - b)
goal (1 subgoal):
1. 1 - (a - b) / (1 - b) = (1 - a) / (1 - b)
[PROOF STEP]
have "?A = 1 - a"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 1 - b - (a - b) = 1 - a
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
1 - b - (a - b) = 1 - a
goal (1 subgoal):
1. 1 - (a - b) / (1 - b) = (1 - a) / (1 - b)
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
1 - (a - b) / (1 - b) = (1 - a) / (1 - b)
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
1 - (a - b) / (1 - b) = (1 - a) / (1 - b)
goal (1 subgoal):
1. 1 - (a - b) / (1 - b) = (1 - a) / (1 - b)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
1 - (a - b) / (1 - b) = (1 - a) / (1 - b)
goal:
No subgoals!
[PROOF STEP]
qed |
lemma lipschitz_on_cmult_real [lipschitz_intros]: fixes f::"'a::metric_space \<Rightarrow> real" assumes "C-lipschitz_on U f" shows "(abs(a) * C)-lipschitz_on U (\<lambda>x. a * f x)" |
function v = so3_hatinv(xi)
v = [xi(3,2); xi(1,3); xi(2,1)]; |
module Group
import Monoid
%access public export
||| The proof that b is inverse of a
total
IsInverse : (typ : Type) -> ((*) : typ -> typ -> typ) -> (IdentityExists typ (*)) -> (a : typ) -> (b : typ) -> Type
IsInverse typ (*) pfid a b = ((a*b = fst(pfid)),(b*a = fst(pfid)))
||| Given a type and a binary operation the type of proofs that each element has its inverse
total
InverseExists : (typ : Type) -> ((*) : typ -> typ -> typ) -> Type
InverseExists typ (*) = (pfid : (IdentityExists typ (*)) ** ((a : typ) -> (a_inv ** (IsInverse typ (*) pfid a a_inv))))
--(pfid : (IdentityExists typ (*)) ** ((a : typ) -> (a_inv : typ ** ((a*a_inv = fst(pfid)),(a_inv*a = fst(pfid))))))
||| Given a type and a binary operation the type of proofs that the type along with the
||| operation is a group
total
IsGroup : (grp : Type) -> ((*) : grp -> grp -> grp) -> Type
IsGroup grp (*) = (Associative grp (*), (IdentityExists grp (*), InverseExists grp (*)))
||| Given a group gives it's identity with proof
total
Group_id : (grp : Type) -> ((*) : grp -> grp -> grp) -> (IsGroup grp (*)) -> (IdentityExists grp (*))
Group_id grp (*) pfgrp = (fst (snd pfgrp))
||| Generates inverses with proofs
total
Inv_with_pf : (grp : Type) -> ((*) : grp -> grp -> grp) -> (pfgrp : IsGroup grp (*)) -> (x : grp)
-> (y : grp ** (IsInverse grp (*) (fst (snd (snd pfgrp))) x y))
Inv_with_pf grp (*) pfgrp x = (snd (snd (snd pfgrp))) x
||| Generates inverses
total
Inv: (grp : Type) -> ((*) : grp -> grp -> grp) -> IsGroup grp (*) -> (x: grp) -> grp
Inv grp (*) pf x = fst (Inv_with_pf grp (*) pf x)
-- fst(snd(snd(snd(pf))) x)
||| Given a group, the type of proofs that it is abelian
total
IsAbelianGrp: (grp : Type) -> ((*) : grp -> grp -> grp) -> Type
IsAbelianGrp grp (*) = (IsGroup grp (*), Commutative grp (*))
--- (a:grp) -> (b:grp) -> (a*b = b*a)
||| The type of proofs that a given function f between x and y is injective
total
Inj: (x: Type) -> (y: Type) -> (f: x-> y) -> Type
Inj x y f = (a : x) -> (b : x) -> (f a = f b) -> (a = b)
||| The type of proofs that a function between groups is a group homomorphism
total
Hom: (grp : Type) -> ((*) : grp -> grp -> grp) -> (IsGroup grp (*)) ->
(g : Type) -> ((+) : g -> g -> g) -> (IsGroup g (+)) ->
(f : grp -> g) -> Type
Hom grp (*) pf1 g (+) pf2 f = ((IsIdentity g (+) e) , (
(a : grp) -> (b : grp) -> ((f (a*b)) = ((f a) + (f b))))) where
e = f(fst (Group_id grp (*) pf1))
||| The type of proofs that a given group is a subgroup of another, via injective homorphisms
total
Subgroup: (h: Type) -> ((+) : h -> h -> h) -> (IsGroup h (+)) ->
(g: Type) -> ((*) : g -> g -> g) -> (IsGroup g (*)) -> Type
Subgroup h (+) pfh g (*) pfg = ( f : (h -> g) **
(Hom h (+) pfh g (*) pfg f , Inj h g f))
--- DPair (h->g) (\f => ((Hom h (+) pfh g (*) pfg f), (Inj h g f)))
||| The type of proofs that a given subgroup is normal/self-conjugate
total
NSub: (h: Type) -> ((+) : h -> h -> h) -> (pfh: IsGroup h (+)) ->
(g: Type) -> ((*) : g -> g -> g) -> (pfg: IsGroup g (*)) ->
(Subgroup h (+) pfh g (*) pfg) -> Type
NSub h (+) pfh g (*) pfg (f ** pff) = (a : h) -> (b : g) -> (x : h ** (b*(f a)*(inv b) = (f x))) where
inv = Inv g (*) pfg
--The type of proofs that a type trav traverses a given coset only once, if at all
CosetInj: (h: Type) -> ((+) : h -> h -> h) -> (pf1: IsGroup h (+)) -> (g: Type) -> ((*) : g -> g -> g) -> (pf2: IsGroup g (*)) -> (sbgrp: Subgroup h (+) pf1 g (*) pf2) -> (trav: Type) -> (f: trav -> g) -> Type
CosetInj h (+) pf1 g (*) pf2 sbgrp trav f = ((x: trav) -> (y: trav) -> (p: h ** (f x) = (incl p)*(f y)) -> (x = y)) where
incl = (fst sbgrp)
--The type of proofs that a type trav traverses every coset of a given group g wrt a subgroup h
CosetAll: (h: Type) -> ((+) : h -> h -> h) -> (pf1: IsGroup h (+)) -> (g: Type) -> ((*) : g -> g -> g) -> (pf2: IsGroup g (*)) -> (sbgrp: Subgroup h (+) pf1 g (*) pf2) -> (trav: Type) -> (f: trav -> g) -> Type
CosetAll h (+) pf1 g (*) pf2 sbgrp trav f = ((a: g) -> (p: h ** (t: trav ** ((f t)*(incl p) = a)))) where
incl = (fst sbgrp)
-- Traversal type - the type of proofs that a given type traverses each coset exactly once
IsTraversal: (h: Type) -> ((+) : h -> h -> h) -> (pf1: IsGroup h (+)) -> (g: Type) -> ((*) : g -> g -> g) -> (pf2: IsGroup g (*)) -> (sbgrp: Subgroup h (+) pf1 g (*) pf2) -> (trav: Type) -> Type
IsTraversal h (+) pf1 g (*) pf2 sbgrp trav = DPair (trav -> g) (\f => (CosetInj h (+) pf1 g (*) pf2 sbgrp trav f, CosetAll h (+) pf1 g (*) pf2 sbgrp trav f))
--Defining the multiplication operation between elements of a traversal
MulTrav: (h: Type) -> ((+) : h -> h -> h) -> (pf1: IsGroup h (+)) -> (g: Type) -> ((*) : g -> g -> g) -> (pf2: IsGroup g (*)) -> (sbgrp: Subgroup h (+) pf1 g (*) pf2) -> (trav: Type) -> (IsTraversal h (+) pf1 g (*) pf2 sbgrp trav) -> trav -> trav -> trav
MulTrav h (+) pf1 g (*) pf2 sbgrp trav pftrav y z = (fst (snd ((snd (snd pftrav)) ((f y)*(f z))))) where
f: trav -> g
f = (fst pftrav)
--Proof of uniqueness of the coset representative in trav, in the following sense
--Proof that the operation that generates coset representative in trav for an element of g (from CosetAll) inverts the function generating a coset representative by going from trav to g (from IsTraversal)
corepfinv: (h: Type) -> ((+) : h -> h -> h) -> (pf1: IsGroup h (+)) -> (g: Type) -> ((*) : g -> g -> g) -> (pf2: IsGroup g (*)) -> (sbgrp: Subgroup h (+) pf1 g (*) pf2) -> (trav: Type) -> (pftrav: IsTraversal h (+) pf1 g (*) pf2 sbgrp trav) -> (y: trav) -> (((fst (snd ((snd (snd pftrav)) ((fst pftrav) y) ) )) ) = y)
corepfinv h (+) pf1 g (*) pf2 sbgrp trav pftrav y = (sym ((fst (snd pftrav)) y t (p ** (sym (snd (snd ((snd (snd pftrav)) (f y)) )))) )) where
t: trav
t = (fst (snd ((snd (snd pftrav)) ((fst pftrav) y) ) ))
f: trav -> g
f = (fst pftrav)
p: h
p = (fst ((snd (snd pftrav)) (f y)))
--Proof that the definitional equality for traversal multiplication holds
MulTravDefPf: (h: Type) -> ((+) : h -> h -> h) -> (pf1: IsGroup h (+)) -> (g: Type) -> ((*) : g -> g -> g) -> (pf2: IsGroup g (*)) -> (sbgrp: Subgroup h (+) pf1 g (*) pf2) -> (trav: Type) -> (pftrav: IsTraversal h (+) pf1 g (*) pf2 sbgrp trav) -> (y: trav) -> (z: trav) -> (MulTrav h (+) pf1 g (*) pf2 sbgrp trav pftrav y z) = ((fst (snd ((snd (snd pftrav)) (((fst pftrav) y)*((fst pftrav) z))) )))
MulTravDefPf h (-) pf1 g (*) pf2 sbgrp trav pftrav y z = Refl
--Proof that the function from g to trav recovered from CosetAll is in some sense a magma homorphism
MulTravHomPf: (h: Type) -> ((+) : h -> h -> h) -> (pf1: IsGroup h (+)) -> (g: Type) -> ((*) : g -> g -> g) -> (pf2: IsGroup g (*)) -> (sbgrp: Subgroup h (+) pf1 g (*) pf2) -> (trav: Type) -> (pftrav: IsTraversal h (+) pf1 g (*) pf2 sbgrp trav) -> (y: trav) -> (z: trav) -> ((fst (snd ((snd (snd pftrav)) (((fst pftrav) y)*((fst pftrav) z))) ))) = (MulTrav h (+) pf1 g (*) pf2 sbgrp trav pftrav ((fst (snd ((snd (snd pftrav)) ((fst pftrav) y)) ))) ((fst (snd ((snd (snd pftrav)) ((fst pftrav) z)) ))))
MulTravHomPf h (-) pf1 g (*) pf2 sbgrp trav pftrav y z = trans (sym (MulTravDefProof)) (trans (cong {f = f1} (sym CorepFInverseY)) (cong {f = f2} (sym CorepFInverseZ))) where
MulTravDefProof: (MulTrav h (-) pf1 g (*) pf2 sbgrp trav pftrav y z) = ((fst (snd ((snd (snd pftrav)) (((fst pftrav) y)*((fst pftrav) z))) )))
MulTravDefProof = (MulTravDefPf h (-) pf1 g (*) pf2 sbgrp trav pftrav y z )
f1: trav -> trav
f1 x = MulTrav h (-) pf1 g (*) pf2 sbgrp trav pftrav x z
f2: trav -> trav
f2 x = MulTrav h (-) pf1 g (*) pf2 sbgrp trav pftrav ((fst (snd ((snd (snd pftrav)) ((fst pftrav) y) ) )) ) x
CorepFInverseY: (((fst (snd ((snd (snd pftrav)) ((fst pftrav) y) ) )) ) = y)
CorepFInverseY = CorepFInv h (-) pf1 g (*) pf2 sbgrp trav pftrav y
CorepFInverseZ: (((fst (snd ((snd (snd pftrav)) ((fst pftrav) z) ) )) ) = z)
CorepFInverseZ = CorepFInv h (-) pf1 g (*) pf2 sbgrp trav pftrav z
|
Plan B has announced a nationwide UK tour this autumn.
The rapper-turned-singer, who scored a UK Number One album with ‘The Defamation Of Strickland Banks’ yesterday (April 18), will hit the road starting on October 6 in Nottingham.
Tickets go on sale this Friday (April 23). To check the availability of Plan B tickets and get all the latest listings, go to NME.COM/TICKETS now, or call 0871 230 1094. |
-- In a module instantiation 'module A = e', 'e' should have the form 'm e1 ..
-- en' where 'm' is a module name.
module NotAModuleExpr where
module Bad = \x -> x
|
/-
Copyright (c) 2021 Yaël Dillies. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yaël Dillies
! This file was ported from Lean 3 source module order.circular
! leanprover-community/mathlib commit a2d2e18906e2b62627646b5d5be856e6a642062f
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathlib.Data.Set.Basic
import Mathlib.Tactic.Set
/-!
# Circular order hierarchy
This file defines circular preorders, circular partial orders and circular orders.
## Hierarchy
* A ternary "betweenness" relation `btw : α → α → α → Prop` forms a `CircularOrder` if it is
- reflexive: `btw a a a`
- cyclic: `btw a b c → btw b c a`
- antisymmetric: `btw a b c → btw c b a → a = b ∨ b = c ∨ c = a`
- total: `btw a b c ∨ btw c b a`
along with a strict betweenness relation `sbtw : α → α → α → Prop` which respects
`sbtw a b c ↔ btw a b c ∧ ¬ btw c b a`, analogously to how `<` and `≤` are related, and is
- transitive: `sbtw a b c → sbtw b d c → sbtw a d c`.
* A `CircularPartialOrder` drops totality.
* A `CircularPreorder` further drops antisymmetry.
The intuition is that a circular order is a circle and `btw a b c` means that going around
clockwise from `a` you reach `b` before `c` (`b` is between `a` and `c` is meaningless on an
unoriented circle). A circular partial order is several, potentially intersecting, circles. A
circular preorder is like a circular partial order, but several points can coexist.
Note that the relations between `CircularPreorder`, `CircularPartialOrder` and `CircularOrder`
are subtler than between `Preorder`, `PartialOrder`, `LinearOrder`. In particular, one cannot
simply extend the `btw` of a `CircularPartialOrder` to make it a `CircularOrder`.
One can translate from usual orders to circular ones by "closing the necklace at infinity". See
`LE.toBtw` and `LT.toSBtw`. Going the other way involves "cutting the necklace" or
"rolling the necklace open".
## Examples
Some concrete circular orders one encounters in the wild are `ZMod n` for `0 < n`, `Circle`,
`Real.Angle`...
## Main definitions
* `Set.cIcc`: Closed-closed circular interval.
* `Set.cIoo`: Open-open circular interval.
## Notes
There's an unsolved diamond on `OrderDual α` here. The instances `LE α → Btw αᵒᵈ` and
`LT α → SBtw αᵒᵈ` can each be inferred in two ways:
* `LE α` → `Btw α` → `Btw αᵒᵈ` vs
`LE α` → `LE αᵒᵈ` → `Btw αᵒᵈ`
* `LT α` → `SBtw α` → `SBtw αᵒᵈ` vs
`LT α` → `LT αᵒᵈ` → `SBtw αᵒᵈ`
The fields are propeq, but not defeq. It is temporarily fixed by turning the circularizing instances
into definitions.
## TODO
Antisymmetry is quite weak in the sense that there's no way to discriminate which two points are
equal. This prevents defining closed-open intervals `cIco` and `cIoc` in the neat `=`-less way. We
currently haven't defined them at all.
What is the correct generality of "rolling the necklace" open? At least, this works for `α × β` and
`β × α` where `α` is a circular order and `β` is a linear order.
What's next is to define circular groups and provide instances for `ZMod n`, the usual circle group
`Circle`, `Real.Angle`, and `RootsOfUnity M`. What conditions do we need on `M` for this last one
to work?
We should have circular order homomorphisms. The typical example is
`days_to_month : days_of_the_year →c months_of_the_year` which relates the circular order of days
and the circular order of months. Is `α →c β` a good notation?
## References
* https://en.wikipedia.org/wiki/Cyclic_order
* https://en.wikipedia.org/wiki/Partial_cyclic_order
## Tags
circular order, cyclic order, circularly ordered set, cyclically ordered set
-/
/-- Syntax typeclass for a betweenness relation. -/
class Btw (α : Type _) where
/-- Betweenness for circular orders. `btw a b c` states that `b` is between `a` and `c` (in that
order). -/
btw : α → α → α → Prop
#align has_btw Btw
export Btw (btw)
/-- Syntax typeclass for a strict betweenness relation. -/
class SBtw (α : Type _) where
/-- Strict betweenness for circular orders. `sbtw a b c` states that `b` is strictly between `a`
and `c` (in that order). -/
sbtw : α → α → α → Prop
#align has_sbtw SBtw
export SBtw (sbtw)
/-- A circular preorder is the analogue of a preorder where you can loop around. `≤` and `<` are
replaced by ternary relations `btw` and `sbtw`. `btw` is reflexive and cyclic. `sbtw` is transitive.
-/
class CircularPreorder (α : Type _) extends Btw α, SBtw α where
/-- `a` is between `a` and `a`. -/
btw_refl (a : α) : btw a a a
/-- If `b` is between `a` and `c`, then `c` is between `b` and `a`.
This is motivated by imagining three points on a circle. -/
btw_cyclic_left {a b c : α} : btw a b c → btw b c a
sbtw := fun a b c => btw a b c ∧ ¬btw c b a
/-- Strict betweenness is given by betweenness in one direction and non-betweenness in the other.
I.e., if `b` is between `a` and `c` but not between `c` and `a`, then we say `b` is strictly
between `a` and `c`. -/
sbtw_iff_btw_not_btw {a b c : α} : sbtw a b c ↔ btw a b c ∧ ¬btw c b a := by intros; rfl
/-- For any fixed `c`, `fun a b ↦ sbtw a b c` is a transitive relation.
I.e., given `a` `b` `d` `c` in that "order", if we have `b` strictly between `a` and `c`, and `d`
strictly between `b` and `c`, then `d` is strictly between `a` and `c`. -/
sbtw_trans_left {a b c d : α} : sbtw a b c → sbtw b d c → sbtw a d c
#align circular_preorder CircularPreorder
export CircularPreorder (btw_refl btw_cyclic_left sbtw_trans_left)
/-- A circular partial order is the analogue of a partial order where you can loop around. `≤` and
`<` are replaced by ternary relations `btw` and `sbtw`. `btw` is reflexive, cyclic and
antisymmetric. `sbtw` is transitive. -/
class CircularPartialOrder (α : Type _) extends CircularPreorder α where
/-- If `b` is between `a` and `c` and also between `c` and `a`, then at least one pair of points
among `a`, `b`, `c` are identical. -/
btw_antisymm {a b c : α} : btw a b c → btw c b a → a = b ∨ b = c ∨ c = a
#align circular_partial_order CircularPartialOrder
export CircularPartialOrder (btw_antisymm)
/-- A circular order is the analogue of a linear order where you can loop around. `≤` and `<` are
replaced by ternary relations `btw` and `sbtw`. `btw` is reflexive, cyclic, antisymmetric and total.
`sbtw` is transitive. -/
class CircularOrder (α : Type _) extends CircularPartialOrder α where
/-- For any triple of points, the second is between the other two one way or another. -/
btw_total : ∀ a b c : α, btw a b c ∨ btw c b a
#align circular_order CircularOrder
export CircularOrder (btw_total)
/-! ### Circular preorders -/
section CircularPreorder
variable {α : Type _} [CircularPreorder α]
theorem btw_rfl {a : α} : btw a a a :=
btw_refl _
#align btw_rfl btw_rfl
-- TODO: `alias` creates a def instead of a lemma.
-- alias btw_cyclic_left ← has_btw.btw.cyclic_left
theorem Btw.btw.cyclic_left {a b c : α} (h : btw a b c) : btw b c a :=
btw_cyclic_left h
#align has_btw.btw.cyclic_left Btw.btw.cyclic_left
theorem btw_cyclic_right {a b c : α} (h : btw a b c) : btw c a b :=
h.cyclic_left.cyclic_left
#align btw_cyclic_right btw_cyclic_right
alias btw_cyclic_right ← Btw.btw.cyclic_right
#align has_btw.btw.cyclic_right Btw.btw.cyclic_right
/-- The order of the `↔` has been chosen so that `rw btw_cyclic` cycles to the right while
`rw ←btw_cyclic` cycles to the left (thus following the prepended arrow). -/
theorem btw_cyclic {a b c : α} : btw a b c ↔ btw c a b :=
⟨btw_cyclic_right, btw_cyclic_left⟩
#align btw_cyclic btw_cyclic
theorem sbtw_iff_btw_not_btw {a b c : α} : sbtw a b c ↔ btw a b c ∧ ¬btw c b a :=
CircularPreorder.sbtw_iff_btw_not_btw
#align sbtw_iff_btw_not_btw sbtw_iff_btw_not_btw
theorem btw_of_sbtw {a b c : α} (h : sbtw a b c) : btw a b c :=
(sbtw_iff_btw_not_btw.1 h).1
#align btw_of_sbtw btw_of_sbtw
alias btw_of_sbtw ← SBtw.sbtw.btw
#align has_sbtw.sbtw.btw SBtw.sbtw.btw
theorem not_btw_of_sbtw {a b c : α} (h : sbtw a b c) : ¬btw c b a :=
(sbtw_iff_btw_not_btw.1 h).2
#align not_btw_of_sbtw not_btw_of_sbtw
alias not_btw_of_sbtw ← SBtw.sbtw.not_btw
#align has_sbtw.sbtw.not_btw SBtw.sbtw.not_btw
theorem not_sbtw_of_btw {a b c : α} (h : btw a b c) : ¬sbtw c b a := fun h' => h'.not_btw h
#align not_sbtw_of_btw not_sbtw_of_btw
alias not_sbtw_of_btw ← Btw.btw.not_sbtw
#align has_btw.btw.not_sbtw Btw.btw.not_sbtw
theorem sbtw_of_btw_not_btw {a b c : α} (habc : btw a b c) (hcba : ¬btw c b a) : sbtw a b c :=
sbtw_iff_btw_not_btw.2 ⟨habc, hcba⟩
#align sbtw_of_btw_not_btw sbtw_of_btw_not_btw
alias sbtw_of_btw_not_btw ← Btw.btw.sbtw_of_not_btw
#align has_btw.btw.sbtw_of_not_btw Btw.btw.sbtw_of_not_btw
theorem sbtw_cyclic_left {a b c : α} (h : sbtw a b c) : sbtw b c a :=
h.btw.cyclic_left.sbtw_of_not_btw fun h' => h.not_btw h'.cyclic_left
#align sbtw_cyclic_left sbtw_cyclic_left
alias sbtw_cyclic_left ← SBtw.sbtw.cyclic_left
#align has_sbtw.sbtw.cyclic_left SBtw.sbtw.cyclic_left
theorem sbtw_cyclic_right {a b c : α} (h : sbtw a b c) : sbtw c a b :=
h.cyclic_left.cyclic_left
#align sbtw_cyclic_right sbtw_cyclic_right
alias sbtw_cyclic_right ← SBtw.sbtw.cyclic_right
#align has_sbtw.sbtw.cyclic_right SBtw.sbtw.cyclic_right
/-- The order of the `↔` has been chosen so that `rw sbtw_cyclic` cycles to the right while
`rw ←sbtw_cyclic` cycles to the left (thus following the prepended arrow). -/
theorem sbtw_cyclic {a b c : α} : sbtw a b c ↔ sbtw c a b :=
⟨sbtw_cyclic_right, sbtw_cyclic_left⟩
#align sbtw_cyclic sbtw_cyclic
-- TODO: `alias` creates a def instead of a lemma.
-- alias btw_trans_left ← has_btw.btw.trans_left
theorem SBtw.sbtw.trans_left {a b c d : α} (h : sbtw a b c) : sbtw b d c → sbtw a d c :=
sbtw_trans_left h
#align has_sbtw.sbtw.trans_left SBtw.sbtw.trans_left
theorem sbtw_trans_right {a b c d : α} (hbc : sbtw a b c) (hcd : sbtw a c d) : sbtw a b d :=
(hbc.cyclic_left.trans_left hcd.cyclic_left).cyclic_right
#align sbtw_trans_right sbtw_trans_right
alias sbtw_trans_right ← SBtw.sbtw.trans_right
#align has_sbtw.sbtw.trans_right SBtw.sbtw.trans_right
theorem sbtw_asymm {a b c : α} (h : sbtw a b c) : ¬sbtw c b a :=
h.btw.not_sbtw
#align sbtw_asymm sbtw_asymm
alias sbtw_asymm ← SBtw.sbtw.not_sbtw
#align has_sbtw.sbtw.not_sbtw SBtw.sbtw.not_sbtw
theorem sbtw_irrefl_left_right {a b : α} : ¬sbtw a b a := fun h => h.not_btw h.btw
#align sbtw_irrefl_left_right sbtw_irrefl_left_right
theorem sbtw_irrefl_left {a b : α} : ¬sbtw a a b := fun h => sbtw_irrefl_left_right h.cyclic_left
#align sbtw_irrefl_left sbtw_irrefl_left
theorem sbtw_irrefl_right {a b : α} : ¬sbtw a b b := fun h => sbtw_irrefl_left_right h.cyclic_right
#align sbtw_irrefl_right sbtw_irrefl_right
theorem sbtw_irrefl (a : α) : ¬sbtw a a a :=
sbtw_irrefl_left_right
#align sbtw_irrefl sbtw_irrefl
end CircularPreorder
/-! ### Circular partial orders -/
section CircularPartialOrder
variable {α : Type _} [CircularPartialOrder α]
-- TODO: `alias` creates a def instead of a lemma.
-- alias btw_antisymm ← has_btw.btw.antisymm
theorem Btw.btw.antisymm {a b c : α} (h : btw a b c) : btw c b a → a = b ∨ b = c ∨ c = a :=
btw_antisymm h
#align has_btw.btw.antisymm Btw.btw.antisymm
end CircularPartialOrder
/-! ### Circular orders -/
section CircularOrder
variable {α : Type _} [CircularOrder α]
theorem btw_refl_left_right (a b : α) : btw a b a :=
(or_self_iff _).1 (btw_total a b a)
#align btw_refl_left_right btw_refl_left_right
theorem btw_rfl_left_right {a b : α} : btw a b a :=
btw_refl_left_right _ _
#align btw_rfl_left_right btw_rfl_left_right
theorem btw_refl_left (a b : α) : btw a a b :=
btw_rfl_left_right.cyclic_right
#align btw_refl_left btw_refl_left
theorem btw_rfl_left {a b : α} : btw a a b :=
btw_refl_left _ _
#align btw_rfl_left btw_rfl_left
theorem btw_refl_right (a b : α) : btw a b b :=
btw_rfl_left_right.cyclic_left
#align btw_refl_right btw_refl_right
theorem btw_rfl_right {a b : α} : btw a b b :=
btw_refl_right _ _
#align btw_rfl_right btw_rfl_right
theorem sbtw_iff_not_btw {a b c : α} : sbtw a b c ↔ ¬btw c b a := by
rw [sbtw_iff_btw_not_btw]
exact and_iff_right_of_imp (btw_total _ _ _).resolve_left
#align sbtw_iff_not_btw sbtw_iff_not_btw
theorem btw_iff_not_sbtw {a b c : α} : btw a b c ↔ ¬sbtw c b a :=
iff_not_comm.1 sbtw_iff_not_btw
#align btw_iff_not_sbtw btw_iff_not_sbtw
end CircularOrder
/-! ### Circular intervals -/
namespace Set
section CircularPreorder
variable {α : Type _} [CircularPreorder α]
/-- Closed-closed circular interval -/
def cIcc (a b : α) : Set α :=
{ x | btw a x b }
#align set.cIcc Set.cIcc
/-- Open-open circular interval -/
def cIoo (a b : α) : Set α :=
{ x | sbtw a x b }
#align set.cIoo Set.cIoo
@[simp]
theorem mem_cIcc {a b x : α} : x ∈ cIcc a b ↔ btw a x b :=
Iff.rfl
#align set.mem_cIcc Set.mem_cIcc
@[simp]
theorem mem_cIoo {a b x : α} : x ∈ cIoo a b ↔ sbtw a x b :=
Iff.rfl
#align set.mem_cIoo Set.mem_cIoo
end CircularPreorder
section CircularOrder
variable {α : Type _} [CircularOrder α]
theorem left_mem_cIcc (a b : α) : a ∈ cIcc a b :=
btw_rfl_left
#align set.left_mem_cIcc Set.left_mem_cIcc
theorem right_mem_cIcc (a b : α) : b ∈ cIcc a b :=
btw_rfl_right
#align set.right_mem_cIcc Set.right_mem_cIcc
theorem compl_cIcc {a b : α} : cIcc a bᶜ = cIoo b a := by
ext
rw [Set.mem_cIoo, sbtw_iff_not_btw]
rfl
#align set.compl_cIcc Set.compl_cIcc
theorem compl_cIoo {a b : α} : cIoo a bᶜ = cIcc b a := by
ext
rw [Set.mem_cIcc, btw_iff_not_sbtw]
rfl
#align set.compl_cIoo Set.compl_cIoo
end CircularOrder
end Set
/-! ### Circularizing instances -/
/-- The betweenness relation obtained from "looping around" `≤`.
See note [reducible non-instances]. -/
@[reducible]
def LE.toBtw (α : Type _) [LE α] : Btw α where
btw a b c := a ≤ b ∧ b ≤ c ∨ b ≤ c ∧ c ≤ a ∨ c ≤ a ∧ a ≤ b
#align has_le.to_has_btw LE.toBtw
/-- The strict betweenness relation obtained from "looping around" `<`.
See note [reducible non-instances]. -/
@[reducible]
def LT.toSBtw (α : Type _) [LT α] : SBtw α where
sbtw a b c := a < b ∧ b < c ∨ b < c ∧ c < a ∨ c < a ∧ a < b
#align has_lt.to_has_sbtw LT.toSBtw
/-- The circular preorder obtained from "looping around" a preorder.
See note [reducible non-instances]. -/
@[reducible]
def Preorder.toCircularPreorder (α : Type _) [Preorder α] : CircularPreorder α where
btw a b c := a ≤ b ∧ b ≤ c ∨ b ≤ c ∧ c ≤ a ∨ c ≤ a ∧ a ≤ b
sbtw a b c := a < b ∧ b < c ∨ b < c ∧ c < a ∨ c < a ∧ a < b
btw_refl a := Or.inl ⟨le_rfl, le_rfl⟩
btw_cyclic_left {a b c} h := by
dsimp
rwa [← or_assoc, or_comm]
sbtw_trans_left {a b c d} := by
rintro (⟨hab, hbc⟩ | ⟨hbc, hca⟩ | ⟨hca, hab⟩) (⟨hbd, hdc⟩ | ⟨hdc, hcb⟩ | ⟨hcb, hbd⟩)
· exact Or.inl ⟨hab.trans hbd, hdc⟩
· exact (hbc.not_lt hcb).elim
· exact (hbc.not_lt hcb).elim
· exact Or.inr (Or.inl ⟨hdc, hca⟩)
· exact Or.inr (Or.inl ⟨hdc, hca⟩)
· exact (hbc.not_lt hcb).elim
· exact Or.inr (Or.inl ⟨hdc, hca⟩)
· exact Or.inr (Or.inl ⟨hdc, hca⟩)
· exact Or.inr (Or.inr ⟨hca, hab.trans hbd⟩)
sbtw_iff_btw_not_btw {a b c} := by
simp_rw [lt_iff_le_not_le]
have := le_trans a b c
have := le_trans b c a
have := le_trans c a b
tauto
#align preorder.to_circular_preorder Preorder.toCircularPreorder
/-- The circular partial order obtained from "looping around" a partial order.
See note [reducible non-instances]. -/
@[reducible]
def PartialOrder.toCircularPartialOrder (α : Type _) [PartialOrder α] : CircularPartialOrder α :=
{ Preorder.toCircularPreorder α with
btw_antisymm := fun {a b c} => by
rintro (⟨hab, hbc⟩ | ⟨hbc, hca⟩ | ⟨hca, hab⟩) (⟨hcb, hba⟩ | ⟨hba, hac⟩ | ⟨hac, hcb⟩)
· exact Or.inl (hab.antisymm hba)
· exact Or.inl (hab.antisymm hba)
· exact Or.inr (Or.inl <| hbc.antisymm hcb)
· exact Or.inr (Or.inl <| hbc.antisymm hcb)
· exact Or.inr (Or.inr <| hca.antisymm hac)
· exact Or.inr (Or.inl <| hbc.antisymm hcb)
· exact Or.inl (hab.antisymm hba)
· exact Or.inl (hab.antisymm hba)
· exact Or.inr (Or.inr <| hca.antisymm hac) }
#align partial_order.to_circular_partial_order PartialOrder.toCircularPartialOrder
/-- The circular order obtained from "looping around" a linear order.
See note [reducible non-instances]. -/
@[reducible]
def LinearOrder.toCircularOrder (α : Type _) [LinearOrder α] : CircularOrder α :=
{ PartialOrder.toCircularPartialOrder α with
btw_total := fun a b c => by
cases' le_total a b with hab hba <;> cases' le_total b c with hbc hcb <;>
cases' le_total c a with hca hac
· exact Or.inl (Or.inl ⟨hab, hbc⟩)
· exact Or.inl (Or.inl ⟨hab, hbc⟩)
· exact Or.inl (Or.inr <| Or.inr ⟨hca, hab⟩)
· exact Or.inr (Or.inr <| Or.inr ⟨hac, hcb⟩)
· exact Or.inl (Or.inr <| Or.inl ⟨hbc, hca⟩)
· exact Or.inr (Or.inr <| Or.inl ⟨hba, hac⟩)
· exact Or.inr (Or.inl ⟨hcb, hba⟩)
· exact Or.inr (Or.inr <| Or.inl ⟨hba, hac⟩) }
#align linear_order.to_circular_order LinearOrder.toCircularOrder
/-! ### Dual constructions -/
namespace OrderDual
instance btw (α : Type _) [Btw α] : Btw αᵒᵈ :=
⟨fun a b c : α => Btw.btw c b a⟩
instance sbtw (α : Type _) [SBtw α] : SBtw αᵒᵈ :=
⟨fun a b c : α => SBtw.sbtw c b a⟩
instance circularPreorder (α : Type _) [CircularPreorder α] : CircularPreorder αᵒᵈ :=
{ OrderDual.btw α,
OrderDual.sbtw α with
btw_refl := fun _ => @btw_refl α _ _
btw_cyclic_left := fun {_ _ _} => @btw_cyclic_right α _ _ _ _
sbtw_trans_left := fun {_ _ _ _} habc hbdc => hbdc.trans_right habc
sbtw_iff_btw_not_btw := fun {a b c} => @sbtw_iff_btw_not_btw α _ c b a }
instance circularPartialOrder (α : Type _) [CircularPartialOrder α] : CircularPartialOrder αᵒᵈ :=
{ OrderDual.circularPreorder α with
btw_antisymm := fun {_ _ _} habc hcba => @btw_antisymm α _ _ _ _ hcba habc }
instance (α : Type _) [CircularOrder α] : CircularOrder αᵒᵈ :=
{ OrderDual.circularPartialOrder α with
btw_total := fun {a b c} => @btw_total α _ c b a }
end OrderDual
|
(*
* Copyright 2014, NICTA
*
* This software may be distributed and modified according to the terms of
* the BSD 2-Clause license. Note that NO WARRANTY is provided.
* See "LICENSE_BSD2.txt" for details.
*
* @TAG(NICTA_BSD)
*)
(* License: BSD, terms see file ./LICENSE *)
(* Definitions supporting the extremely long CTypes.thy *)
theory CTypesDefs
imports
"~~/src/HOL/Library/Prefix_Order"
"../../lib/SignedWords"
CTypesBase
begin
section "C types setup"
type_synonym field_name = string
type_synonym qualified_field_name = "field_name list"
type_synonym typ_name = string
text {* A typ_desc wraps a typ_struct with a typ name.
A typ_struct is either a Scalar, with size, alignment and either a
field accessor/updator pair (for typ_info) or a 'normalisor'
(for typ_uinfo), or an Aggregate, with a list of typ_desc,
field name pairs.*}
datatype (plugins del: size)
'a typ_desc = TypDesc "'a typ_struct" typ_name
and 'a typ_struct = TypScalar nat nat "'a" |
TypAggregate "('a typ_desc,field_name) dt_pair list"
(* FIXME: eliminate eventually *)
datatype_compat dt_pair
datatype_compat typ_desc typ_struct
(* FIXME: these recreate the precise order of subgoals of the old datatype package *)
lemma typ_desc_induct:
"\<lbrakk>\<And>typ_struct list. P2 typ_struct \<Longrightarrow> P1 (TypDesc typ_struct list); \<And>nat1 nat2 a. P2 (TypScalar nat1 nat2 a);
\<And>list. P3 list \<Longrightarrow> P2 (TypAggregate list); P3 []; \<And>dt_pair list. \<lbrakk>P4 dt_pair; P3 list\<rbrakk> \<Longrightarrow> P3 (dt_pair # list);
\<And>typ_desc list. P1 typ_desc \<Longrightarrow> P4 (DTPair typ_desc list)\<rbrakk>
\<Longrightarrow> P1 typ_desc"
by (rule compat_typ_desc.induct)
lemma typ_struct_induct:
"\<lbrakk>\<And>typ_struct list. P2 typ_struct \<Longrightarrow> P1 (TypDesc typ_struct list); \<And>nat1 nat2 a. P2 (TypScalar nat1 nat2 a);
\<And>list. P3 list \<Longrightarrow> P2 (TypAggregate list); P3 []; \<And>dt_pair list. \<lbrakk>P4 dt_pair; P3 list\<rbrakk> \<Longrightarrow> P3 (dt_pair # list);
\<And>typ_desc list. P1 typ_desc \<Longrightarrow> P4 (DTPair typ_desc list)\<rbrakk>
\<Longrightarrow> P2 typ_struct"
by (rule compat_typ_struct.induct)
lemma typ_list_induct:
"\<lbrakk>\<And>typ_struct list. P2 typ_struct \<Longrightarrow> P1 (TypDesc typ_struct list); \<And>nat1 nat2 a. P2 (TypScalar nat1 nat2 a);
\<And>list. P3 list \<Longrightarrow> P2 (TypAggregate list); P3 []; \<And>dt_pair list. \<lbrakk>P4 dt_pair; P3 list\<rbrakk> \<Longrightarrow> P3 (dt_pair # list);
\<And>typ_desc list. P1 typ_desc \<Longrightarrow> P4 (DTPair typ_desc list)\<rbrakk>
\<Longrightarrow> P3 list"
by (rule compat_typ_desc_char_list_dt_pair_list.induct)
lemma typ_dt_pair_induct:
"\<lbrakk>\<And>typ_struct list. P2 typ_struct \<Longrightarrow> P1 (TypDesc typ_struct list); \<And>nat1 nat2 a. P2 (TypScalar nat1 nat2 a);
\<And>list. P3 list \<Longrightarrow> P2 (TypAggregate list); P3 []; \<And>dt_pair list. \<lbrakk>P4 dt_pair; P3 list\<rbrakk> \<Longrightarrow> P3 (dt_pair # list);
\<And>typ_desc list. P1 typ_desc \<Longrightarrow> P4 (DTPair typ_desc list)\<rbrakk>
\<Longrightarrow> P4 dt_pair"
by (rule compat_typ_desc_char_list_dt_pair.induct)
-- "Declare as default induct rule with old case names"
lemmas typ_desc_typ_struct_inducts [case_names
TypDesc TypScalar TypAggregate Nil_typ_desc Cons_typ_desc DTPair_typ_desc, induct type] =
typ_desc_induct typ_struct_induct typ_list_induct typ_dt_pair_induct
-- "Make sure list induct rule is tried first"
declare list.induct [induct type]
type_synonym 'a typ_pair = "('a typ_desc,field_name) dt_pair"
type_synonym typ_uinfo = "normalisor typ_desc"
type_synonym typ_uinfo_struct = "normalisor typ_struct"
type_synonym typ_uinfo_pair = "normalisor typ_pair"
record 'a field_desc =
field_access :: "'a \<Rightarrow> byte list \<Rightarrow> byte list"
field_update :: "byte list \<Rightarrow> 'a \<Rightarrow> 'a"
type_synonym 'a typ_info = "'a field_desc typ_desc"
type_synonym 'a typ_info_struct = "'a field_desc typ_struct"
type_synonym 'a typ_info_pair = "'a field_desc typ_pair"
definition fu_commutes :: "('b \<Rightarrow> 'a \<Rightarrow> 'a) \<Rightarrow> ('c \<Rightarrow> 'a \<Rightarrow> 'a) \<Rightarrow> bool" where
"fu_commutes f g \<equiv> \<forall>v bs bs'. f bs (g bs' v) = g bs' (f bs v)"
text {* size_td returns the sum of the sizes of all Scalar fields
comprising a typ_desc i.e. the overall size of the type *}
(* Could express this and many other typ_desc primrecs as tree fold/map
combos, but the intuition this way is clearer for anything non-trivial *)
primrec
size_td :: "'a typ_desc \<Rightarrow> nat" and
size_td_struct :: "'a typ_struct \<Rightarrow> nat" and
size_td_list :: "'a typ_pair list \<Rightarrow> nat" and
size_td_pair :: "'a typ_pair \<Rightarrow> nat"
where
tz0: "size_td (TypDesc st nm) = size_td_struct st"
| tz1: "size_td_struct (TypScalar n algn d) = n"
| tz2: "size_td_struct (TypAggregate xs) = size_td_list xs"
| tz3: "size_td_list [] = 0"
| tz4: "size_td_list (x#xs) = size_td_pair x + size_td_list xs"
| tz5: "size_td_pair (DTPair t n) = size_td t"
text {* access_ti overlays the byte-wise representation of an object
on a given byte list, given the typ_info (i.e. the layout) *}
primrec
access_ti :: "'a typ_info \<Rightarrow> ('a \<Rightarrow> byte list \<Rightarrow> byte list)" and
access_ti_struct :: "'a typ_info_struct \<Rightarrow>
('a \<Rightarrow> byte list \<Rightarrow> byte list)" and
access_ti_list :: "'a typ_info_pair list \<Rightarrow>
('a \<Rightarrow> byte list \<Rightarrow> byte list)" and
access_ti_pair :: "'a typ_info_pair \<Rightarrow> ('a \<Rightarrow> byte list \<Rightarrow> byte list)"
where
fa0: "access_ti (TypDesc st nm) = access_ti_struct st"
| fa1: "access_ti_struct (TypScalar n algn d) = field_access d"
| fa2: "access_ti_struct (TypAggregate xs) = access_ti_list xs"
| fa3: "access_ti_list [] = (\<lambda>v bs. [])"
| fa4: "access_ti_list (x#xs) =
(\<lambda>v bs. access_ti_pair x v (take (size_td_pair x) bs) @
access_ti_list xs v (drop (size_td_pair x) bs))"
| fa5: "access_ti_pair (DTPair t nm) = access_ti t"
text {* access_ti\<^sub>0 overlays the representation of an object on a
list of zero bytes *}
definition access_ti\<^sub>0 :: "'a typ_info \<Rightarrow> ('a \<Rightarrow> byte list)" where
"access_ti\<^sub>0 t \<equiv> \<lambda>v. access_ti t v (replicate (size_td t) 0)"
text {* update_ti updates an object, given a list of bytes (the
representation of the new value), and the typ_info *}
primrec
update_ti :: "'a typ_info \<Rightarrow> (byte list \<Rightarrow> 'a \<Rightarrow> 'a)" and
update_ti_struct :: "'a typ_info_struct \<Rightarrow> (byte list \<Rightarrow> 'a \<Rightarrow> 'a)" and
update_ti_list :: "'a typ_info_pair list \<Rightarrow> (byte list \<Rightarrow> 'a \<Rightarrow> 'a)" and
update_ti_pair :: "'a typ_info_pair \<Rightarrow> (byte list \<Rightarrow> 'a \<Rightarrow> 'a)"
where
fu0: "update_ti (TypDesc st nm) = update_ti_struct st"
| fu1: "update_ti_struct (TypScalar n algn d) = field_update d"
| fu2: "update_ti_struct (TypAggregate xs) = update_ti_list xs"
| fu3: "update_ti_list [] = (\<lambda>bs. id)"
| fu4: "update_ti_list (x#xs) = (\<lambda>bs v.
update_ti_pair x (take (size_td_pair x) bs)
(update_ti_list xs (drop (size_td_pair x) bs) v))"
| fu5: "update_ti_pair (DTPair t nm) = update_ti t"
text {* update_ti_t updates an object only if the length of the
supplied representation equals the object size *}
definition update_ti_t :: "'a typ_info \<Rightarrow> (byte list \<Rightarrow> 'a \<Rightarrow> 'a)" where
"update_ti_t t \<equiv> \<lambda>bs. if length bs = size_td t then
update_ti t bs else id"
definition update_ti_struct_t :: "'a typ_info_struct \<Rightarrow> (byte list \<Rightarrow> 'a \<Rightarrow> 'a)" where
"update_ti_struct_t t \<equiv> \<lambda>bs. if length bs = size_td_struct t then
update_ti_struct t bs else id"
definition update_ti_list_t :: "'a typ_info_pair list \<Rightarrow> (byte list \<Rightarrow> 'a \<Rightarrow> 'a)" where
"update_ti_list_t t \<equiv> \<lambda>bs. if length bs = size_td_list t then
update_ti_list t bs else id"
definition update_ti_pair_t :: "'a typ_info_pair \<Rightarrow> (byte list \<Rightarrow> 'a \<Rightarrow> 'a)" where
"update_ti_pair_t t \<equiv> \<lambda>bs. if length bs = size_td_pair t then
update_ti_pair t bs else id"
text {* field_desc generates the access/update pair for a field,
given the field's type_desc *}
definition field_desc :: "'a typ_info \<Rightarrow> 'a field_desc" where
"field_desc t \<equiv> \<lparr> field_access = access_ti t,
field_update = update_ti_t t \<rparr>"
declare field_desc_def [simp add]
definition field_desc_struct :: "'a typ_info_struct \<Rightarrow> 'a field_desc" where
"field_desc_struct t \<equiv> \<lparr> field_access = access_ti_struct t,
field_update = update_ti_struct_t t \<rparr>"
declare field_desc_struct_def [simp add]
definition field_desc_list :: "'a typ_info_pair list \<Rightarrow> 'a field_desc"
where
"field_desc_list t \<equiv> \<lparr> field_access = access_ti_list t,
field_update = update_ti_list_t t \<rparr>"
declare field_desc_list_def [simp add]
definition field_desc_pair :: "'a typ_info_pair \<Rightarrow> 'a field_desc"
where
"field_desc_pair t \<equiv> \<lparr> field_access = access_ti_pair t,
field_update = update_ti_pair_t t \<rparr>"
declare field_desc_pair_def [simp add]
primrec
map_td :: "(nat \<Rightarrow> nat \<Rightarrow> 'a \<Rightarrow> 'b) \<Rightarrow> 'a typ_desc \<Rightarrow> 'b typ_desc" and
map_td_struct :: "(nat \<Rightarrow> nat \<Rightarrow> 'a \<Rightarrow> 'b) \<Rightarrow> 'a typ_struct \<Rightarrow> 'b typ_struct" and
map_td_list :: "(nat \<Rightarrow> nat \<Rightarrow> 'a \<Rightarrow> 'b) \<Rightarrow> 'a typ_pair list \<Rightarrow>
'b typ_pair list" and
map_td_pair :: "(nat \<Rightarrow> nat \<Rightarrow> 'a \<Rightarrow> 'b) \<Rightarrow> 'a typ_pair \<Rightarrow> 'b typ_pair"
where
mat0: "map_td f (TypDesc st nm) = TypDesc (map_td_struct f st) nm"
| mat1: "map_td_struct f (TypScalar n algn d) = TypScalar n algn (f n algn d)"
| mat2: "map_td_struct f (TypAggregate xs) = TypAggregate (map_td_list f xs)"
| mat3: "map_td_list f [] = []"
| mat4: "map_td_list f (x#xs) = map_td_pair f x # map_td_list f xs"
| mat5: "map_td_pair f (DTPair t n) = DTPair (map_td f t) n"
definition field_norm :: "nat \<Rightarrow> nat \<Rightarrow> 'a field_desc \<Rightarrow> (byte list \<Rightarrow> byte list)"
where
"field_norm \<equiv> \<lambda>n algn d bs.
if length bs = n then
field_access d (field_update d bs undefined) (replicate n 0) else
[]"
definition export_uinfo :: "'a typ_info \<Rightarrow> typ_uinfo" where
"export_uinfo t \<equiv> map_td field_norm t"
primrec
wf_desc :: "'a typ_desc \<Rightarrow> bool" and
wf_desc_struct :: "'a typ_struct \<Rightarrow> bool" and
wf_desc_list :: "'a typ_pair list \<Rightarrow> bool" and
wf_desc_pair :: "'a typ_pair \<Rightarrow> bool"
where
wfd0: "wf_desc (TypDesc ts n) = wf_desc_struct ts"
| wfd1: "wf_desc_struct (TypScalar n algn d) = True"
| wfd2: "wf_desc_struct (TypAggregate ts) = wf_desc_list ts"
| wfd3: "wf_desc_list [] = True"
| wfd4: "wf_desc_list (x#xs) = (wf_desc_pair x \<and> \<not> dt_snd x \<in> dt_snd ` set xs \<and>
wf_desc_list xs)"
| wfd5: "wf_desc_pair (DTPair x n) = wf_desc x"
primrec
wf_size_desc :: "'a typ_desc \<Rightarrow> bool" and
wf_size_desc_struct :: "'a typ_struct \<Rightarrow> bool" and
wf_size_desc_list :: "'a typ_pair list \<Rightarrow> bool" and
wf_size_desc_pair :: "'a typ_pair \<Rightarrow> bool"
where
wfsd0: "wf_size_desc (TypDesc ts n) = wf_size_desc_struct ts"
| wfsd1: "wf_size_desc_struct (TypScalar n algn d) = (0 < n)"
| wfsd2: "wf_size_desc_struct (TypAggregate ts) =
(ts \<noteq> [] \<and> wf_size_desc_list ts)"
| wfsd3: "wf_size_desc_list [] = True"
| wfsd4: "wf_size_desc_list (x#xs) =
(wf_size_desc_pair x \<and> wf_size_desc_list xs)"
| wfsd5: "wf_size_desc_pair (DTPair x n) = wf_size_desc x"
definition
typ_struct :: "'a typ_desc \<Rightarrow> 'a typ_struct"
where
"typ_struct t = (case t of TypDesc st sz \<Rightarrow> st)"
lemma typ_struct [simp]:
"typ_struct (TypDesc st sz) = st"
by (simp add: typ_struct_def)
primrec
typ_name :: "'a typ_desc \<Rightarrow> typ_name"
where
"typ_name (TypDesc st nm) = nm"
primrec
norm_tu :: "typ_uinfo \<Rightarrow> normalisor" and
norm_tu_struct :: "typ_uinfo_struct \<Rightarrow> normalisor" and
norm_tu_list :: "typ_uinfo_pair list \<Rightarrow> normalisor" and
norm_tu_pair :: "typ_uinfo_pair \<Rightarrow> normalisor"
where
tn0: "norm_tu (TypDesc st nm) = norm_tu_struct st"
| tn1: "norm_tu_struct (TypScalar n aln f) = f"
| tn2: "norm_tu_struct (TypAggregate xs) = norm_tu_list xs"
| tn3: "norm_tu_list [] = (\<lambda>bs. [])"
| tn4: "norm_tu_list (x#xs) = (\<lambda>bs.
norm_tu_pair x (take (size_td_pair x) bs) @
norm_tu_list xs (drop (size_td_pair x) bs))"
| tn5: "norm_tu_pair (DTPair t n) = norm_tu t"
class c_type
instance c_type \<subseteq> type ..
consts
typ_info_t :: "'a::c_type itself \<Rightarrow> 'a typ_info"
typ_name_itself :: "'a::c_type itself \<Rightarrow> typ_name"
definition typ_uinfo_t :: "'a::c_type itself \<Rightarrow> typ_uinfo" where
"typ_uinfo_t t \<equiv> export_uinfo (typ_info_t TYPE('a))"
definition to_bytes :: "'a::c_type \<Rightarrow> byte list \<Rightarrow> byte list" where
"to_bytes v \<equiv> access_ti (typ_info_t TYPE('a)) v"
(* from_bytes is now total - all partial C types 'a need to be instantiated
as c_types using 'a option and the parser needs to do some work
extracting the value and generating guards for non-None when these are
used. Luckily for us in our work we never use them. *)
definition from_bytes :: "byte list \<Rightarrow> 'a::c_type" where
"from_bytes bs \<equiv>
field_update (field_desc (typ_info_t TYPE('a))) bs undefined"
type_synonym 'a flr = "('a typ_desc \<times> nat) option"
primrec
field_lookup :: "'a typ_desc \<Rightarrow> qualified_field_name \<Rightarrow> nat \<Rightarrow> 'a flr" and
field_lookup_struct :: "'a typ_struct \<Rightarrow> qualified_field_name \<Rightarrow> nat \<Rightarrow>
'a flr" and
field_lookup_list :: "'a typ_pair list \<Rightarrow> qualified_field_name \<Rightarrow> nat \<Rightarrow>
'a flr" and
field_lookup_pair :: "'a typ_pair \<Rightarrow> qualified_field_name \<Rightarrow> nat \<Rightarrow> 'a flr"
where
fl0: "field_lookup (TypDesc st nm) f m =
(if f=[] then Some (TypDesc st nm,m) else field_lookup_struct st f m)"
| fl1: "field_lookup_struct (TypScalar n algn d) f m = None"
| fl2: "field_lookup_struct (TypAggregate xs) f m = field_lookup_list xs f m"
| fl3: "field_lookup_list [] f m = None"
| fl4: "field_lookup_list (x#xs) f m = (
case field_lookup_pair x f m of
None \<Rightarrow> field_lookup_list xs f (m + size_td (dt_fst x)) |
Some y \<Rightarrow> Some y)"
| fl5: "field_lookup_pair (DTPair t nm) f m =
(if nm=hd f \<and> f \<noteq> [] then field_lookup t (tl f) m else None)"
definition map_td_flr :: "(nat \<Rightarrow> nat \<Rightarrow> 'a \<Rightarrow> 'b) \<Rightarrow>
('a typ_desc \<times> nat) option \<Rightarrow> 'b flr"
where
"map_td_flr f \<equiv> case_option None (\<lambda>(s,n). Some (map_td f s,n))"
definition
import_flr :: "(nat \<Rightarrow> nat \<Rightarrow> 'b \<Rightarrow> 'a) \<Rightarrow> 'a flr \<Rightarrow> ('b typ_desc \<times> nat) option \<Rightarrow> bool"
where
"import_flr f s k \<equiv> case_option (k=None)
(\<lambda>(s,m). case_option False (\<lambda>(t,n). n=m \<and> map_td f t=s) k )
s"
definition
field_offset_untyped :: "'a typ_desc \<Rightarrow> qualified_field_name \<Rightarrow> nat"
where
"field_offset_untyped t n \<equiv> snd (the (field_lookup t n 0))"
definition
field_offset :: "'a::c_type itself \<Rightarrow> qualified_field_name \<Rightarrow> nat"
where
"field_offset t n \<equiv> field_offset_untyped (typ_uinfo_t TYPE('a)) n"
definition
field_ti :: "'a::c_type itself \<Rightarrow> qualified_field_name \<rightharpoonup> 'a typ_info"
where
"field_ti t n \<equiv> case_option None (Some \<circ> fst)
(field_lookup (typ_info_t TYPE('a)) n 0)"
definition
field_size :: "'a::c_type itself \<Rightarrow> qualified_field_name \<Rightarrow> nat"
where
"field_size t n \<equiv> size_td (the (field_ti t n))"
definition
field_lvalue :: "'a::c_type ptr \<Rightarrow> qualified_field_name \<Rightarrow> addr" ("&'(_\<rightarrow>_')")
where
"&(p\<rightarrow>f) \<equiv> ptr_val (p::'a ptr) + of_nat (field_offset TYPE('a) f)"
definition
size_of :: "'a::c_type itself \<Rightarrow> nat" where
"size_of t \<equiv> size_td (typ_info_t TYPE('a))"
definition
norm_bytes :: "'a::c_type itself \<Rightarrow> normalisor" where
"norm_bytes t \<equiv> norm_tu (export_uinfo (typ_info_t t))"
definition to_bytes_p :: "'a::c_type \<Rightarrow> byte list" where
"to_bytes_p v \<equiv> to_bytes v (replicate (size_of TYPE('a)) 0)"
primrec
align_td :: "'a typ_desc \<Rightarrow> nat" and
align_td_struct :: "'a typ_struct \<Rightarrow> nat" and
align_td_list :: "'a typ_pair list \<Rightarrow> nat" and
align_td_pair :: "'a typ_pair \<Rightarrow> nat"
where
al0: "align_td (TypDesc st nm) = align_td_struct st"
| al1: "align_td_struct (TypScalar n algn d) = algn"
| al2: "align_td_struct (TypAggregate xs) = align_td_list xs"
| al3: "align_td_list [] = 0"
| al4: "align_td_list (x#xs) = max (align_td_pair x) (align_td_list xs)"
| al5: "align_td_pair (DTPair t n) = align_td t"
definition align_of :: "'a::c_type itself \<Rightarrow> nat" where
"align_of t \<equiv> 2^(align_td (typ_info_t TYPE('a)))"
definition
ptr_add :: "('a::c_type) ptr \<Rightarrow> int \<Rightarrow> 'a ptr" (infixl "+\<^sub>p" 65)
where
"ptr_add (a :: ('a::c_type) ptr) w \<equiv>
Ptr (ptr_val a + of_int w * of_nat (size_of (TYPE('a))))"
lemma ptr_add_def':
"ptr_add (Ptr p :: ('a::c_type) ptr) n
= (Ptr (p + of_int n * of_nat (size_of TYPE('a))))"
by (cases p, auto simp: ptr_add_def scast_id)
definition
ptr_sub :: "('a::c_type) ptr \<Rightarrow> ('a::c_type) ptr \<Rightarrow> 32 signed word" (infixl "-\<^sub>p" 65)
where
"ptr_sub (a :: ('a::c_type) ptr) p \<equiv>
ucast (ptr_val a - ptr_val p) div of_nat (size_of (TYPE('a)))"
definition ptr_aligned :: "'a::c_type ptr \<Rightarrow> bool" where
"ptr_aligned p \<equiv> align_of TYPE('a) dvd unat (ptr_val (p::'a ptr))"
primrec
td_set :: "'a typ_desc \<Rightarrow> nat \<Rightarrow> ('a typ_desc \<times> nat) set" and
td_set_struct :: "'a typ_struct \<Rightarrow> nat \<Rightarrow> ('a typ_desc \<times> nat) set" and
td_set_list :: "'a typ_pair list \<Rightarrow> nat \<Rightarrow> ('a typ_desc \<times> nat) set" and
td_set_pair :: "'a typ_pair \<Rightarrow> nat \<Rightarrow> ('a typ_desc \<times> nat) set"
where
ts0: "td_set (TypDesc st nm) m = {(TypDesc st nm,m)} \<union> td_set_struct st m"
| ts1: "td_set_struct (TypScalar n algn d) m = {}"
| ts2: "td_set_struct (TypAggregate xs) m = td_set_list xs m"
| ts3: "td_set_list [] m = {}"
| ts4: "td_set_list (x#xs) m = td_set_pair x m \<union> td_set_list xs (m + size_td (dt_fst x))"
| ts5: "td_set_pair (DTPair t nm) m = td_set t m"
instantiation typ_desc :: (type) ord
begin
definition
typ_tag_le_def: "s \<le> (t::'a typ_desc) \<equiv> (\<exists>n. (s,n) \<in> td_set t 0)"
definition
typ_tag_lt_def: "s < (t::'a typ_desc) \<equiv> s \<le> t \<and> s \<noteq> t"
instance ..
end
definition
fd_cons_double_update :: "'a field_desc \<Rightarrow> bool"
where
"fd_cons_double_update d \<equiv>
(\<forall>v bs bs'. length bs = length bs' \<longrightarrow> field_update d bs (field_update d bs' v) = field_update d bs v)"
definition
fd_cons_update_access :: "'a field_desc \<Rightarrow> nat \<Rightarrow> bool"
where
"fd_cons_update_access d n \<equiv>
(\<forall>v bs. length bs = n \<longrightarrow> field_update d (field_access d v bs) v = v)"
definition
norm_desc :: "'a field_desc \<Rightarrow> nat \<Rightarrow> (byte list \<Rightarrow> byte list)"
where
"norm_desc d n \<equiv> \<lambda>bs. field_access d (field_update d bs undefined) (replicate n 0)"
definition
fd_cons_length :: "'a field_desc \<Rightarrow> nat \<Rightarrow> bool"
where
"fd_cons_length d n \<equiv> \<forall>v bs. length bs = n \<longrightarrow> length (field_access d v bs) = n"
definition
fd_cons_access_update :: "'a field_desc \<Rightarrow> nat \<Rightarrow> bool"
where
"fd_cons_access_update d n \<equiv> \<forall>bs bs' v v'. length bs = n \<longrightarrow>
length bs' = n \<longrightarrow>
field_access d (field_update d bs v) bs' = field_access d (field_update d bs v') bs'"
definition
fd_cons_update_normalise :: "'a field_desc \<Rightarrow> nat \<Rightarrow> bool"
where
"fd_cons_update_normalise d n \<equiv>
(\<forall>v bs. length bs=n \<longrightarrow> field_update d (norm_desc d n bs) v = field_update d bs v)"
definition
fd_cons_desc :: "'a field_desc \<Rightarrow> nat \<Rightarrow> bool"
where
"fd_cons_desc d n \<equiv> fd_cons_double_update d \<and>
fd_cons_update_access d n \<and>
fd_cons_access_update d n \<and>
fd_cons_length d n"
definition
fd_cons :: "'a typ_info \<Rightarrow> bool"
where
"fd_cons t \<equiv> fd_cons_desc (field_desc t) (size_td t)"
definition
fd_cons_struct :: "'a typ_info_struct \<Rightarrow> bool"
where
"fd_cons_struct t \<equiv> fd_cons_desc (field_desc_struct t) (size_td_struct t)"
definition
fd_cons_list :: "'a typ_info_pair list \<Rightarrow> bool"
where
"fd_cons_list t \<equiv> fd_cons_desc (field_desc_list t) (size_td_list t)"
definition
fd_cons_pair :: "'a typ_info_pair \<Rightarrow> bool"
where
"fd_cons_pair t \<equiv> fd_cons_desc (field_desc_pair t) (size_td_pair t)"
definition
fa_fu_ind :: "'a field_desc \<Rightarrow> 'a field_desc \<Rightarrow> nat \<Rightarrow> nat \<Rightarrow>bool"
where
"fa_fu_ind d d' n n' \<equiv> \<forall>v bs bs'. length bs = n \<longrightarrow> length bs' = n' \<longrightarrow>
field_access d (field_update d' bs v) bs' = field_access d v bs'"
definition
wf_fdp :: "('a typ_info \<times> qualified_field_name) set \<Rightarrow> bool"
where
"wf_fdp t \<equiv> \<forall>x m. (x,m) \<in> t \<longrightarrow> (fd_cons x \<and> (\<forall>y n. (y,n) \<in> t \<and> \<not> m \<le> n \<and> \<not> n \<le> m
\<longrightarrow> fu_commutes (field_update (field_desc x)) (field_update (field_desc y)) \<and>
fa_fu_ind (field_desc x) (field_desc y) (size_td y) (size_td x)))"
lemma wf_fdp_list:
"wf_fdp (xs \<union> ys) \<Longrightarrow> wf_fdp xs \<and> wf_fdp ys"
by (auto simp: wf_fdp_def)
primrec
wf_fd :: "'a typ_info \<Rightarrow> bool" and
wf_fd_struct :: "'a typ_info_struct \<Rightarrow> bool" and
wf_fd_list :: "'a typ_info_pair list \<Rightarrow> bool" and
wf_fd_pair :: "'a typ_info_pair \<Rightarrow> bool"
where
wffd0: "wf_fd (TypDesc ts n) = (wf_fd_struct ts)"
| wffd1: "wf_fd_struct (TypScalar n algn d) = fd_cons_struct (TypScalar n algn d)"
| wffd2: "wf_fd_struct (TypAggregate ts) = wf_fd_list ts"
| wffd3: "wf_fd_list [] = True"
| wffd4: "wf_fd_list (x#xs) = (wf_fd_pair x \<and> wf_fd_list xs \<and>
fu_commutes (update_ti_pair_t x) (update_ti_list_t xs) \<and>
fa_fu_ind (field_desc_pair x) (field_desc_list xs) (size_td_list xs) (size_td_pair x)\<and>
fa_fu_ind (field_desc_list xs) (field_desc_pair x) (size_td_pair x) (size_td_list xs))"
| wffd5: "wf_fd_pair (DTPair x n) = wf_fd x"
definition
tf_set :: "'a typ_info \<Rightarrow> ('a typ_info \<times> qualified_field_name) set"
where
"tf_set td \<equiv> {(s,f) | s f. \<exists>n. field_lookup td f 0 = Some (s,n)}"
definition
tf_set_struct :: "'a typ_info_struct \<Rightarrow> ('a typ_info \<times> qualified_field_name) set"
where
"tf_set_struct td \<equiv> {(s,f) | s f. \<exists>n. field_lookup_struct td f 0 = Some (s,n)}"
definition
tf_set_list :: "'a typ_info_pair list \<Rightarrow> ('a typ_info \<times> qualified_field_name) set"
where
"tf_set_list td \<equiv> {(s,f) | s f. \<exists>n. field_lookup_list td f 0 = Some (s,n)}"
definition
tf_set_pair :: "'a typ_info_pair \<Rightarrow> ('a typ_info \<times> qualified_field_name) set"
where
"tf_set_pair td \<equiv> {(s,f) | s f. \<exists>n. field_lookup_pair td f 0 = Some (s,n)}"
record 'a leaf_desc =
lf_fd :: "'a field_desc"
lf_sz :: nat
lf_fn :: qualified_field_name
primrec
lf_set :: "'a typ_info \<Rightarrow> qualified_field_name \<Rightarrow> 'a leaf_desc set" and
lf_set_struct :: "'a typ_info_struct \<Rightarrow> qualified_field_name \<Rightarrow> 'a leaf_desc set" and
lf_set_list :: "'a typ_info_pair list \<Rightarrow> qualified_field_name \<Rightarrow> 'a leaf_desc set" and
lf_set_pair :: "'a typ_info_pair \<Rightarrow> qualified_field_name \<Rightarrow> 'a leaf_desc set"
where
fds0: "lf_set (TypDesc st nm) fn = lf_set_struct st fn"
| fds1: "lf_set_struct (TypScalar n algn d) fn = {(\<lparr> lf_fd = d, lf_sz = n, lf_fn = fn \<rparr>)}"
| fds2: "lf_set_struct (TypAggregate xs) fn = lf_set_list xs fn"
| fds3: "lf_set_list [] fn = {}"
| fds4: "lf_set_list (x#xs) fn = lf_set_pair x fn \<union> lf_set_list xs fn"
| fds5: "lf_set_pair (DTPair t n) fn = lf_set t (fn@[n])"
definition
wf_lf :: "'a leaf_desc set \<Rightarrow> bool"
where
"wf_lf D \<equiv> \<forall>x. x \<in> D \<longrightarrow> (fd_cons_desc (lf_fd x) (lf_sz x) \<and> (\<forall>y. y \<in> D \<longrightarrow> lf_fn y \<noteq> lf_fn x
\<longrightarrow> fu_commutes (field_update (lf_fd x)) (field_update (lf_fd y)) \<and>
fa_fu_ind (lf_fd x) (lf_fd y) (lf_sz y) (lf_sz x)))"
definition
ti_ind :: "'a leaf_desc set \<Rightarrow> 'a leaf_desc set \<Rightarrow> bool"
where
"ti_ind X Y \<equiv> \<forall>x y. x \<in> X \<and> y \<in> Y \<longrightarrow> (
fu_commutes (field_update (lf_fd x)) (field_update (lf_fd y)) \<and>
fa_fu_ind (lf_fd x) (lf_fd y) (lf_sz y) (lf_sz x) \<and>
fa_fu_ind (lf_fd y) (lf_fd x) (lf_sz x) (lf_sz y))"
definition
t2d :: "('a typ_info \<times> qualified_field_name) \<Rightarrow> 'a leaf_desc"
where
"t2d x \<equiv> \<lparr> lf_fd = field_desc (fst x), lf_sz = size_td (fst x), lf_fn = snd x\<rparr>"
definition
fd_consistent :: "'a typ_info \<Rightarrow> bool"
where
"fd_consistent t \<equiv> \<forall>f s n. field_lookup t f 0 = Some (s,n)
\<longrightarrow> fd_cons s"
class wf_type = c_type +
assumes wf_desc [simp]: "wf_desc (typ_info_t TYPE('a::c_type))"
assumes wf_size_desc [simp]: "wf_size_desc (typ_info_t TYPE('a::c_type))"
assumes wf_lf [simp]: "wf_lf (lf_set (typ_info_t TYPE('a::c_type)) [])"
definition
super_update_bs :: "byte list \<Rightarrow> byte list \<Rightarrow> nat \<Rightarrow> byte list"
where
"super_update_bs v bs n \<equiv> take n bs @ v @
drop (n + length v) bs"
definition
disj_fn :: "qualified_field_name \<Rightarrow> qualified_field_name \<Rightarrow> bool"
where
"disj_fn s t \<equiv> \<not> s \<le> t \<and> \<not> t \<le> s"
definition
fs_path :: "qualified_field_name list \<Rightarrow> qualified_field_name set"
where
"fs_path xs \<equiv> {x. \<exists>k. k \<in> set xs \<and> x \<le> k} \<union> {x. \<exists>k. k \<in> set xs \<and> k \<le> x}"
definition
field_names :: "'a typ_desc \<Rightarrow> qualified_field_name set"
where
"field_names t \<equiv> {f. field_lookup t f 0 \<noteq> None}"
definition
align_field :: "'a typ_desc \<Rightarrow> bool"
where
"align_field ti \<equiv> \<forall>f s n. field_lookup ti f 0 = Some (s,n) \<longrightarrow>
2^(align_td s) dvd n"
class mem_type_sans_size = wf_type +
assumes upd:
"length bs = size_of TYPE('a) \<longrightarrow>
update_ti_t (typ_info_t TYPE('a::c_type)) bs v
= update_ti_t (typ_info_t TYPE('a)) bs w"
assumes align_size_of: "align_of (TYPE('a::c_type)) dvd size_of TYPE('a)"
assumes align_field: "align_field (typ_info_t TYPE('a::c_type))"
class mem_type = mem_type_sans_size +
assumes max_size: "size_of (TYPE('a::c_type)) < addr_card"
primrec
aggregate :: "'a typ_desc \<Rightarrow> bool" and
aggregate_struct :: "'a typ_struct \<Rightarrow> bool"
where
"aggregate (TypDesc st tn) = aggregate_struct st"
| "aggregate_struct (TypScalar n algn d) = False"
| "aggregate_struct (TypAggregate ts) = True"
class simple_mem_type = mem_type +
assumes simple_tag: "\<not> aggregate (typ_info_t TYPE('a::c_type))"
definition
field_of :: "addr \<Rightarrow> 'a typ_desc \<Rightarrow> 'a typ_desc \<Rightarrow> bool"
where
"field_of q s t \<equiv> (s,unat q) \<in> td_set t 0"
definition
field_of_t :: "'a::c_type ptr \<Rightarrow> 'b::c_type ptr \<Rightarrow> bool"
where
"field_of_t p q \<equiv> field_of (ptr_val p - ptr_val q) (typ_uinfo_t TYPE('a))
(typ_uinfo_t TYPE('b))"
definition
h_val :: "heap_mem \<Rightarrow> 'a::c_type ptr \<Rightarrow> 'a"
where
"h_val h \<equiv> \<lambda>p. from_bytes (heap_list h (size_of TYPE ('a))
(ptr_val (p::'a ptr)))"
primrec
heap_update_list :: "addr \<Rightarrow> byte list \<Rightarrow> heap_mem \<Rightarrow> heap_mem"
where
heap_update_list_base: "heap_update_list p [] h = h"
| heap_update_list_rec:
"heap_update_list p (x#xs) h = heap_update_list (p + 1) xs (h(p:= x))"
type_synonym 'a typ_heap_g = "'a ptr \<Rightarrow> 'a"
(* FIXME: now redundant with h_val *)
definition
lift :: "heap_mem \<Rightarrow> 'a::c_type typ_heap_g"
where
"lift h \<equiv> h_val h"
definition
heap_update :: "'a::c_type ptr \<Rightarrow> 'a \<Rightarrow> heap_mem \<Rightarrow> heap_mem"
where
"heap_update p v h \<equiv> heap_update_list (ptr_val p) (to_bytes v (heap_list h (size_of TYPE('a)) (ptr_val p))) h"
fun
fold_td' :: "(typ_name \<Rightarrow> ('a \<times> field_name) list \<Rightarrow> 'a) \<times> 'a typ_desc \<Rightarrow> 'a"
where
fot0: "fold_td' (f,TypDesc st nm) = (case st of
TypScalar n algn d \<Rightarrow> d |
TypAggregate ts \<Rightarrow> f nm (map (\<lambda>x. case x of DTPair t n \<Rightarrow> (fold_td' (f,t),n)) ts))"
definition
fold_td :: "(typ_name \<Rightarrow> ('a \<times> field_name) list \<Rightarrow> 'a) \<Rightarrow> 'a typ_desc \<Rightarrow> 'a"
where
"fold_td \<equiv> \<lambda>f t. fold_td' (f,t)"
declare fold_td_def [simp]
definition
fold_td_struct :: "typ_name \<Rightarrow> (typ_name \<Rightarrow> ('a \<times> field_name) list \<Rightarrow> 'a) \<Rightarrow> 'a typ_struct \<Rightarrow> 'a"
where
"fold_td_struct tn f st \<equiv> (case st of
TypScalar n algn d \<Rightarrow> d |
TypAggregate ts \<Rightarrow> f tn (map (\<lambda>x. case x of DTPair t n \<Rightarrow> (fold_td' (f,t),n)) ts))"
declare fold_td_struct_def [simp]
definition
fold_td_list :: "typ_name \<Rightarrow> (typ_name \<Rightarrow> ('a \<times> field_name) list \<Rightarrow> 'a) \<Rightarrow> 'a typ_pair list \<Rightarrow> 'a"
where
"fold_td_list tn f ts \<equiv> f tn (map (\<lambda>x. case x of DTPair t n \<Rightarrow> (fold_td' (f,t),n)) ts)"
declare fold_td_list_def [simp]
definition
fold_td_pair :: "(typ_name \<Rightarrow> ('a \<times> field_name) list \<Rightarrow> 'a) \<Rightarrow> 'a typ_pair \<Rightarrow> 'a"
where
"fold_td_pair f x \<equiv> (case x of DTPair t n \<Rightarrow> fold_td' (f,t))"
declare fold_td_pair_def [simp]
fun
map_td' :: "(nat \<Rightarrow> nat \<Rightarrow> 'a \<Rightarrow> 'b) \<times> 'a typ_desc \<Rightarrow> 'b typ_desc"
where
"map_td' (f,TypDesc st nm) = (TypDesc (case st of
TypScalar n algn d \<Rightarrow> TypScalar n algn (f n algn d) |
TypAggregate ts \<Rightarrow> TypAggregate (map (\<lambda>x. case x of DTPair t n \<Rightarrow> DTPair (map_td' (f,t)) n) ts)) nm)"
definition
tnSum :: "typ_name \<Rightarrow> (nat \<times> field_name) list \<Rightarrow> nat"
where
"tnSum \<equiv> \<lambda>tn ts. foldr (op + o fst) ts 0"
definition
tnMax :: "typ_name \<Rightarrow> (nat \<times> field_name) list \<Rightarrow> nat"
where
"tnMax \<equiv> \<lambda>tn ts. foldr (\<lambda>x y. max (fst x) y) ts 0"
definition
wfd :: "typ_name \<Rightarrow> (bool \<times> field_name) list \<Rightarrow> bool"
where
"wfd \<equiv> \<lambda>tn ts. distinct (map snd ts) \<and> foldr (op \<and>) (map fst ts) True"
definition
wfsd :: "typ_name \<Rightarrow> (bool \<times> field_name) list \<Rightarrow> bool"
where
"wfsd \<equiv> \<lambda>tn ts. ts \<noteq> [] \<and> foldr (op \<and>) (map fst ts) True"
end
|
function [EC,ec,degij] = edge_nei_overlap_bd(CIJ)
%EDGE_NEI_OVERLAP_BD overlap amongst neighbors of two adjacent nodes
%
% [EC,ec,degij] = edge_nei_bd(CIJ);
%
% This function determines the neighbors of two nodes that are linked by
% an edge, and then computes their overlap. Connection matrix must be
% binary and directed. Entries of 'EC' that are 'inf' indicate that no
% edge is present. Entries of 'EC' that are 0 denote "local bridges",
% i.e. edges that link completely non-overlapping neighborhoods. Low
% values of EC indicate edges that are "weak ties".
%
% If CIJ is weighted, the weights are ignored. Neighbors of a node can be
% linked by incoming, outgoing, or reciprocal connections.
%
% Inputs: CIJ, directed (binary/weighted) connection matrix
%
% Outputs: EC, edge neighborhood overlap matrix
% ec, edge neighborhood overlap per edge, in vector format
% degij, degrees of node pairs connected by each edge
%
% Reference:
%
% Easley and Kleinberg (2010) Networks, Crowds, and Markets.
% Cambridge University Press, Chapter 3
%
% Olaf Sporns, Indiana University, 2012
[ik,jk,ck] = find(CIJ);
lel = length(ck);
N = size(CIJ,1);
[~,~,deg] = degrees_dir(CIJ);
ec = zeros(1,lel);
degij = zeros(2,lel);
for e=1:lel
neiik = setdiff(union(find(CIJ(ik(e),:)),find(CIJ(:,ik(e))')),[ik(e) jk(e)]);
neijk = setdiff(union(find(CIJ(jk(e),:)),find(CIJ(:,jk(e))')),[ik(e) jk(e)]);
ec(e) = length(intersect(neiik,neijk))/length(union(neiik,neijk));
degij(:,e) = [deg(ik(e)) deg(jk(e))];
end;
ff = find(CIJ);
EC = 1./zeros(N);
EC(ff) = ec; %#ok<FNDSB>
|
theory Rose_Tree
imports Main "HOL-Library.Sublist"
begin
text \<open>For theory \<open>Incredible_Trees\<close> we need rose trees; this theory contains
the generally useful part of that development.\<close>
subsubsection \<open>The rose tree data type\<close>
datatype 'a rose_tree = RNode (root: 'a) (children: "'a rose_tree list")
subsubsection \<open>The set of paths in a rose tree\<close>
text \<open>Too bad that @{command inductive_set} does not allow for varying parameters...\<close>
inductive it_pathsP :: "'a rose_tree \<Rightarrow> nat list \<Rightarrow> bool" where
it_paths_Nil: "it_pathsP t []"
| it_paths_Cons: "i < length (children t) \<Longrightarrow> children t ! i = t' \<Longrightarrow> it_pathsP t' is \<Longrightarrow> it_pathsP t (i#is)"
inductive_cases it_pathP_ConsE: "it_pathsP t (i#is)"
inductive_cases it_pathP_RNodeE: "it_pathsP (RNode r ants) is"
definition it_paths:: "'a rose_tree \<Rightarrow> nat list set" where
"it_paths t = Collect (it_pathsP t)"
lemma it_paths_eq [pred_set_conv]: "it_pathsP t = (\<lambda>x. x \<in> it_paths t)"
by(simp add: it_paths_def)
lemmas it_paths_intros [intro?] = it_pathsP.intros[to_set]
lemmas it_paths_induct [consumes 1, induct set: it_paths] = it_pathsP.induct[to_set]
lemmas it_paths_cases [consumes 1, cases set: it_paths] = it_pathsP.cases[to_set]
lemmas it_paths_ConsE = it_pathP_ConsE[to_set]
lemmas it_paths_RNodeE = it_pathP_RNodeE[to_set]
lemmas it_paths_simps = it_pathsP.simps[to_set]
lemmas it_paths_intros(1)[simp]
lemma it_paths_Union: "it_paths t \<subseteq> insert [] (Union (((\<lambda> (i,t). ((#) i) ` it_paths t) ` set (List.enumerate (0::nat) (children t)))))"
apply (rule)
apply (erule it_paths_cases)
apply (auto intro!: bexI simp add: in_set_enumerate_eq)
done
lemma finite_it_paths[simp]: "finite (it_paths t)"
by (induction t) (auto intro!: finite_subset[OF it_paths_Union] simp add: in_set_enumerate_eq)
subsubsection \<open>Indexing into a rose tree\<close>
fun tree_at :: "'a rose_tree \<Rightarrow> nat list \<Rightarrow> 'a rose_tree" where
"tree_at t [] = t"
| "tree_at t (i#is) = tree_at (children t ! i) is"
lemma it_paths_SnocE[elim_format]:
assumes "is @ [i] \<in> it_paths t"
shows "is \<in> it_paths t \<and> i < length (children (tree_at t is))"
using assms
by (induction "is" arbitrary: t)(auto intro!: it_paths_intros elim!: it_paths_ConsE)
lemma it_paths_strict_prefix:
assumes "is \<in> it_paths t"
assumes "strict_prefix is' is"
shows "is' \<in> it_paths t"
proof-
from assms(2)
obtain is'' where "is = is' @ is''" using strict_prefixE' by blast
from assms(1)[unfolded this]
show ?thesis
by(induction is' arbitrary: t) (auto elim!: it_paths_ConsE intro!: it_paths_intros)
qed
lemma it_paths_prefix:
assumes "is \<in> it_paths t"
assumes "prefix is' is"
shows "is' \<in> it_paths t"
using assms it_paths_strict_prefix strict_prefixI by fastforce
lemma it_paths_butlast:
assumes "is \<in> it_paths t"
shows "butlast is \<in> it_paths t"
using assms prefixeq_butlast by (rule it_paths_prefix)
lemma it_path_SnocI:
assumes "is \<in> it_paths t"
assumes "i < length (children (tree_at t is))"
shows "is @ [i] \<in> it_paths t"
using assms
by (induction t arbitrary: "is" i)
(auto 4 4 elim!: it_paths_RNodeE intro: it_paths_intros)
end
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.