text
stringlengths 0
3.34M
|
---|
lemma smallD_nonneg_real: assumes "f \<in> lr F (g)" "eventually (\<lambda>x. f x \<ge> 0) F" "c > 0" shows "eventually (\<lambda>x. R (f x) (c * \<bar>g x\<bar>)) F" |
module STCR2Z1T0Image where
import Control.Monad as M
import Control.Monad.Parallel as MP
import Data.Array.Repa as R
import Data.Binary (decodeFile)
import Data.Complex
import Data.List as L
import Data.Vector.Generic as VG
import Data.Vector.Storable as VS
import DFT.Plan
import Filter.Gaussian
import Filter.Pinwheel
import FokkerPlanck.DomainChange
import FokkerPlanck.MonteCarlo
import FokkerPlanck.Pinwheel
import Image.IO
import Image.Transform
import STC
import STC.OrientationScaleAnalysis
import System.Directory
import System.Environment
import System.FilePath
import System.Random
import Types
import Utils.Array
main = do
args@(numPointStr:numOrientationStr:sigmaStr:taoStr:lenStr:initialScaleStr:numTrailStr:maxTrailStr:theta0FreqsStr:thetaFreqsStr:histFilePath:alphaStr:pinwheelFlagStr:imagePath:numIterationStr:writeSourceFlagStr:numThreadStr:_) <-
getArgs
print args
let numPoint = read numPointStr :: Int
numOrientation = read numOrientationStr :: Int
sigma = read sigmaStr :: Double
tao = read taoStr :: Double
len = read lenStr :: Int
initialScale = read initialScaleStr :: Double
numTrail = read numTrailStr :: Int
maxTrail = read maxTrailStr :: Int
theta0Freq = read theta0FreqsStr :: Double
theta0Freqs = [-theta0Freq .. theta0Freq]
thetaFreq = read thetaFreqsStr :: Double
thetaFreqs = [-thetaFreq .. thetaFreq]
alpha = read alphaStr :: Double
pinwheelFlag = read pinwheelFlagStr :: Bool
numIteration = read numIterationStr :: Int
writeSourceFlag = read writeSourceFlagStr :: Bool
numThread = read numThreadStr :: Int
folderPath = "output/test/STCR2Z1T0Image"
createDirectoryIfMissing True folderPath
flag <- doesFileExist histFilePath
(ImageRepa _ img'') <- readImageRepa imagePath False
let img'
-- resize25D (numPoint, numPoint) (0, 255) .
-- R.backpermute (Z :. nf :. numPoint :. numPoint) id .
-- upsample [4, 4, 1] . downsample [4, 4, 1] $
= img''
(Z :. nf :. _ :. _) = extent img''
(Z :. _ :. cols :. rows) = extent img'
pinwheelParams = PinwheelParams rows cols alpha (exp 1) theta0Freqs [0]
radialArr <-
if flag
then R.map magnitude . getNormalizedHistogramArr <$>
decodeFile histFilePath
else do
putStrLn "Couldn't find a Green's function data. Start simulation..."
solveMonteCarloR2Z1T0Radial
numThread
numTrail
maxTrail
numPoint
numPoint
sigma
tao
1
theta0Freqs
thetaFreqs
histFilePath
(emptyHistogram
[ (round . sqrt . fromIntegral $ 2 * (div numPoint 2) ^ 2)
, L.length theta0Freqs
, L.length thetaFreqs
]
0)
arrR2Z1T0 <-
computeUnboxedP $
computeR2Z1T0ArrayRadial
radialArr
numPoint
numPoint
1.5
thetaFreqs
theta0Freqs
plan <- makeR2Z1T0Plan emptyPlan arrR2Z1T0
-- (plan1, imgF) <- makeImagePlan plan0 . computeS $ img'
-- (plan2, filterF, filterPIF) <- pinwheelFilter plan1 pinwheelParams
-- (plan, gaussianFilterF) <-
-- gaussian2DFilter plan2 (Gaussian2DParams 1 rows cols)
-- -- img <- convolveGaussian2D plan gaussianFilterF . R.map (:+ 0) $ img'
-- -- imgVec <- dropPixel (2 / 3) . toUnboxed . computeS . filterImage $ img'
let img = R.map (:+ 0) img' -- = . fromUnboxed (extent img') $ imgVec
plotImageRepa
(folderPath </> "input.png") -- (ImageRepa 8 img)
(ImageRepa 8 . computeS . R.map magnitude $ img)
-- convolvedImg' <-
-- convolvePinwheel
-- plan
-- filterPIF
-- (R.slice imgF (Z :. (0 :: Int) :. All :. All))
-- let convolvedImg =
-- computeS . R.slice convolvedImg' $
-- (Z :. All :. (0 :: Int) :. All :. All)
-- let (initialDistSource, initialDistSink) =
-- analyzeOrientation numOrientation theta0Freqs convolvedImg
powerMethod1
plan
folderPath
cols
rows
numOrientation
thetaFreqs
theta0Freqs
arrR2Z1T0
numIteration
writeSourceFlag
""
0.1
(R.traverse img (const (Z :. (L.length theta0Freqs) :. cols :. rows)) $ \f (Z :. _ :. i :. j) ->
if magnitude (f (Z :. (0 :: Int) :. i :. j)) > 0
then 1
else 0)
(R.traverse
img
(const
(Z :. (L.length thetaFreqs) :. (L.length theta0Freqs) :. cols :. rows)) $ \f (Z :. k :. _ :. i :. j) ->
if k == div (L.length thetaFreqs) 2
then f (Z :. (0 :: Int) :. i :. j) /
(fromIntegral $ L.length theta0Freqs)
else 0)
|
using SignalBase
using SignalBase.Units
using Unitful
using Test
@testset "SignalBase.jl" begin
@test inframes(1s,44.1kHz) == 44100
@test inframes(Int,0.5s,44.1kHz) == 22050
@test inframes(Int,5frames) == 5
@test inframes(Int,5) == 5
@test inframes(5) == 5
@test inframes(1.0s,44.1kHz) isa Float64
@test ismissing(inframes(missing))
@test ismissing(inframes(Int,missing))
@test ismissing(inframes(Int,missing,5))
@test ismissing(inframes(missing,5))
@test ismissing(inframes(10s))
@test inHz(10) === 10
@test inHz(10Hz) === 10
@test inHz(Float64,10Hz) === 10.0
@test inHz(Int,10.5Hz) === 10
@test ismissing(inHz(missing))
@test inseconds(50ms) == 1//20
@test inseconds(50ms,10Hz) == 1//20
@test inseconds(10frames,10Hz) == 1
@test inseconds(1s,44.1kHz) == 1
@test inseconds(1,44.1kHz) == 1
@test inseconds(1) == 1
@test ismissing(inseconds(missing))
@test inradians(15) == 15
@test_throws Unitful.DimensionError inradians(15frames)
@test inradians(180°) ≈ π
@test ismissing(inseconds(2frames))
end
|
function x = emailFeatures(word_indices)
%EMAILFEATURES takes in a word_indices vector and produces a feature vector
%from the word indices
% x = EMAILFEATURES(word_indices) takes in a word_indices vector and
% produces a feature vector from the word indices.
% Total number of words in the dictionary
n = 1899;
% You need to return the following variables correctly.
x = zeros(n, 1);
% ====================== YOUR CODE HERE ======================
% Instructions: Fill in this function to return a feature vector for the
% given email (word_indices). To help make it easier to
% process the emails, we have have already pre-processed each
% email and converted each word in the email into an index in
% a fixed dictionary (of 1899 words). The variable
% word_indices contains the list of indices of the words
% which occur in one email.
%
% Concretely, if an email has the text:
%
% The quick brown fox jumped over the lazy dog.
%
% Then, the word_indices vector for this text might look
% like:
%
% 60 100 33 44 10 53 60 58 5
%
% where, we have mapped each word onto a number, for example:
%
% the -- 60
% quick -- 100
% ...
%
% (note: the above numbers are just an example and are not the
% actual mappings).
%
% Your task is take one such word_indices vector and construct
% a binary feature vector that indicates whether a particular
% word occurs in the email. That is, x(i) = 1 when word i
% is present in the email. Concretely, if the word 'the' (say,
% index 60) appears in the email, then x(60) = 1. The feature
% vector should look like:
%
% x = [ 0 0 0 0 1 0 0 0 ... 0 0 0 0 1 ... 0 0 0 1 0 ..];
%
%
for i = 1:length(word_indices)
x(word_indices(i)) = 1
end
% =========================================================================
end
|
[STATEMENT]
lemma pi_approx_32: "\<bar>pi - 13493037705/4294967296 :: real\<bar> \<le> inverse(2 ^ 32)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<bar>pi - 13493037705 / 4294967296\<bar> \<le> inverse (2 ^ 32)
[PROOF STEP]
by (rule approx_coarsen[OF pi_approx_75]) simp |
import primitive_element
lemma hmmm (F : Type*) [field F] (E : Type*) [field E] [algebra F E] (hs : is_separable F E) (hfd : finite_dimensional F E) : true :=
begin
cases primitive_element F E hs hfd,
sorry,
end |
Formal statement is: lemma nullhomotopic_into_contractible_space: assumes f: "continuous_map X Y f" and Y: "contractible_space Y" obtains c where "homotopic_with (\<lambda>h. True) X Y f (\<lambda>x. c)" Informal statement is: If $f$ is a continuous map from $X$ to a contractible space $Y$, then $f$ is nullhomotopic. |
[STATEMENT]
lemma correctCompositionKS_exprChannel_k_Q:
assumes "subcomponents PQ = {P,Q}"
and "correctCompositionKS PQ"
and "kKS key \<notin> LocalSecrets PQ"
and "ch \<in> ins Q"
and h1:"exprChannel ch (kE key)"
and "kKS key \<notin> specKeysSecrets PQ"
and "correctCompositionIn PQ"
shows "ch \<in> ins PQ \<and> exprChannel ch (kE key)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ch \<in> ins PQ \<and> exprChannel ch (kE key)
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. ch \<in> ins PQ \<and> exprChannel ch (kE key)
[PROOF STEP]
from assms
[PROOF STATE]
proof (chain)
picking this:
subcomponents PQ = {P, Q}
correctCompositionKS PQ
kKS key \<notin> LocalSecrets PQ
ch \<in> ins Q
exprChannel ch (kE key)
kKS key \<notin> specKeysSecrets PQ
correctCompositionIn PQ
[PROOF STEP]
have "ch \<notin> loc PQ"
[PROOF STATE]
proof (prove)
using this:
subcomponents PQ = {P, Q}
correctCompositionKS PQ
kKS key \<notin> LocalSecrets PQ
ch \<in> ins Q
exprChannel ch (kE key)
kKS key \<notin> specKeysSecrets PQ
correctCompositionIn PQ
goal (1 subgoal):
1. ch \<notin> loc PQ
[PROOF STEP]
by (simp add: LocalSecretsComposition_neg_loc_k)
[PROOF STATE]
proof (state)
this:
ch \<notin> loc PQ
goal (1 subgoal):
1. ch \<in> ins PQ \<and> exprChannel ch (kE key)
[PROOF STEP]
from this and assms
[PROOF STATE]
proof (chain)
picking this:
ch \<notin> loc PQ
subcomponents PQ = {P, Q}
correctCompositionKS PQ
kKS key \<notin> LocalSecrets PQ
ch \<in> ins Q
exprChannel ch (kE key)
kKS key \<notin> specKeysSecrets PQ
correctCompositionIn PQ
[PROOF STEP]
have "ch \<in> ins PQ"
[PROOF STATE]
proof (prove)
using this:
ch \<notin> loc PQ
subcomponents PQ = {P, Q}
correctCompositionKS PQ
kKS key \<notin> LocalSecrets PQ
ch \<in> ins Q
exprChannel ch (kE key)
kKS key \<notin> specKeysSecrets PQ
correctCompositionIn PQ
goal (1 subgoal):
1. ch \<in> ins PQ
[PROOF STEP]
by (simp add: correctCompositionIn_def)
[PROOF STATE]
proof (state)
this:
ch \<in> ins PQ
goal (1 subgoal):
1. ch \<in> ins PQ \<and> exprChannel ch (kE key)
[PROOF STEP]
from this and h1
[PROOF STATE]
proof (chain)
picking this:
ch \<in> ins PQ
exprChannel ch (kE key)
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
ch \<in> ins PQ
exprChannel ch (kE key)
goal (1 subgoal):
1. ch \<in> ins PQ \<and> exprChannel ch (kE key)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
ch \<in> ins PQ \<and> exprChannel ch (kE key)
goal:
No subgoals!
[PROOF STEP]
qed |
function cicros(opt)
% This subroutine "circle" selects the Ni closest earthquakes
% around a interactively selected point. Resets ZG.newcat and ZG.newt2
% Operates on "primeCatalog".
%
% axis: h1
% plots to: plos1 as xk
% inCatalog: a
% outCatalog: newt2, newcat, newa, newa2
% mouse controlled
% closest events OR radius
% calls: bdiff
%
% Input Ni:
%
persistent ic
report_this_filefun();
ZG=ZmapGlobal.Data;
if opt==0 && ~isempty(ic)
opt=ic;
end
axes(h1)
titStr ='Selecting EQ in Circles ';
messtext= ...
[' '
' Please use the LEFT mouse button '
' to select the center point. '
' The "ni" events nearest to this point '
' will be selected and displayed in the map. '];
msg.dbdisp(messtext, titStr);
% Input center of circle with mouse
%
[xa0,ya0] = ginput(1);
stri1 = [ 'Circle: lon = ' num2str(xa0) '; lat= ' num2str(ya0)];
stri = stri1;
pause(0.1)
% calculate distance for each earthquake from center point
% and sort by distance
%
l = sqrt(((xsecx' - xa0)).^2 + ((xsecy + ya0)).^2) ;
[s,is] = sort(l);
ZG.newt2 = newa(is(:,1),:) ;
switch(ic)
case 1 % select N clostest events
l = sort(l);
messtext = ['Radius of selected Circle:' num2str(l(ni)) ' km' ];
disp(messtext)
msg.dbdisp(messtext, 'Message')
%
% take first ni and sort by time
%
ZG.newt2 = ZG.newt2(1:ni,:);
ZG.newt2.sort('Date');
%
% plot Ni clostest events on map as 'x':
set(gca,'NextPlot','add')
[na,ma] = size(ZG.newt2);
plot(ZG.newt2(:,ma),-ZG.newt2.Depth,'xk','Tag','plos1');
set(gcf,'Pointer','arrow')
%
% plot circle containing events as circle
x = -pi-0.1:0.1:pi;
plot(xa0+sin(x)*l(ni), ya0+cos(x)*l(ni),'w')
l(ni)
%
ZG.newcat = ZG.newt2; % resets ZG.newcat and ZG.newt2
clear l s is
bdiff(ZG.newt2)
case 2 % select events within ra
l = sort(l);
ll = l <=ra;
messtext = ['Number of events in Circle :' num2str(sum(ll)) ];
disp(messtext)
msg.dbdisp(messtext, 'Message')
%
% take first ni and sort by time
%
ZG.newt2 = ZG.newt2.subset(ll);
ZG.newt2.sort('Date');
%
% plot Ni clostest events on map as 'x':
set(gca,'NextPlot','add')
[na,ma] = size(ZG.newt2);
plot(ZG.newt2(:,ma),-ZG.newt2.Depth,'xk','Tag','plos1');
set(gcf,'Pointer','arrow')
%
% plot circle containing events as circle
x = -pi-0.1:0.1:pi;
plot(xa0+sin(x)*ra, ya0+cos(x)*ra,'w')
l(ni)
%
ZG.newcat = ZG.newt2; % resets ZG.newcat and ZG.newt2
clear l s is
bdiff(ZG.newt2)
case 3 % select events within ra
ax=findobj(gcf,'Tag','mainmap_ax');
[x,y, mouse_points_overlay] = select_polygon(ax);
plot(x,y,'b-');
YI = -newa(:,7); % this substitution just to make equation below simple
XI = newa(:,end);
ll = polygon_filter(x,y, XI, YI, 'inside');
%plot the selected eqs and mag freq curve
newa2 = newa.subset(ll);
ZG.newt2 = newa2;
ZG.newcat = newa.subset(ll);
pl = plot(newa2(:,end),-newa2(:,7),'xk');
set(pl,'MarkerSize',5,'LineWidth',1)
bdiff(newa2)
otherwise
error('no option specified. 1: N closest events within radius, 2: radius')
end
ic=opt;
end
|
(* Forward chaining of applications, to facilitate "saturating" the known facts
without specializing. See Clément's thesis
http://pit-claudel.fr/clement/MSc/#org036d20e for a nicer explanation.
*)
Inductive Learnt {P:Prop} :=
| AlreadyLearnt (H:P).
Local Ltac learn_fact H :=
let P := type of H in
lazymatch goal with
(* matching the type of H with the Learnt hypotheses means the
learning fails even when the proposition is known by a different
but unifiable type term *)
| [ Hlearnt: @Learnt P |- _ ] =>
fail 0 "already knew" P "through" Hlearnt
| _ => pose proof H; pose proof (AlreadyLearnt H)
end.
Tactic Notation "learn" constr(H) := learn_fact H.
|
Inductive list (X : Type) : Type :=
| nil
| cons (x : X) (l : list X).
Definition natlist := cons nat.
Definition boollist := cons bool.
Check natlist 1 (nil nat).
Check boollist true (nil bool).
Fixpoint repeat (X : Type) (x : X) (count : nat) : list X :=
match count with
| O => nil X
| S count' => cons X x (repeat X x count')
end.
Example test_repeat :
repeat nat 4 2 = cons nat 4 (cons nat 4 (nil nat)).
Proof.
reflexivity.
Qed.
Example test_repeat2 :
repeat bool false 1 = cons bool false (nil bool).
Proof.
reflexivity.
Qed.
Module MumbleGrumble.
Inductive mumble : Type :=
| a
| b (x : mumble) (y : nat)
| c.
Inductive grumble (X : Type) : Type :=
| d (m : mumble)
| e (x : X).
Fixpoint repeat' X x count : list X :=
match count with
| 0 => nil X
| S count' => cons X x (repeat' X x count')
end.
Check repeat'.
Fixpoint repeat'' X x count : list X :=
match count with
| 0 => nil _
| S count' => cons _ x (repeat'' _ x count')
end.
Arguments nil {X}.
Arguments cons {X}.
Arguments repeat {X}.
Definition list123'' := cons 1 (cons 2 (cons 3 nil)).
Check list123''.
Fixpoint repeat''' {X : Type} (x : X) (count : nat) : list X :=
match count with
| 0 => nil
| S count' => cons x (repeat''' x count')
end.
Inductive list' {X : Type} : Type :=
| nil'
| cons' (x : X) (l : list').
Fixpoint app {X : Type} (l1 l2 : list X) : list X :=
match l1 with
| nil => l2
| cons h t => cons h (app t l2)
end.
Fixpoint rev {X : Type} (l : list X) : list X :=
match l with
| nil => nil
| cons h t => app (rev t) (cons h nil)
end.
Eval compute in rev (cons 2 (cons 1 nil)).
Fixpoint length {X: Type} (l : list X) : nat :=
match l with
| nil => 0
| cons h t => S (length t)
end.
Eval compute in length (cons 1 (cons 2 nil)).
Check @nil.
Definition mynil' := @nil nat.
Notation "x :: y" := (cons x y)
(at level 60, right associativity).
Notation "[ ]" := nil.
Notation "[ x ; .. ; y ]" := (cons x .. (cons y []) ..).
Notation "x ++ y" := (app x y)
(at level 60, right associativity).
Theorem app_nil_r : forall (X : Type), forall l : list X,
l ++ [] = l.
Proof.
intros. induction l.
- reflexivity.
- simpl. rewrite IHl. reflexivity.
Qed.
Theorem app_assoc : forall A (l m n : list A),
l ++ m ++ n = (l ++ m) ++ n.
Proof.
intros. induction l as [| x l1' IHl1'].
- reflexivity.
- simpl. rewrite IHl1'. reflexivity.
Qed.
Lemma app_length : forall (X : Type) (l1 l2 : list X),
length (l1 ++ l2) = length l1 + length l2.
Proof.
intros. induction l1 as [| n l1' IHl1'].
- reflexivity.
- simpl. rewrite IHl1'. reflexivity.
Qed.
Theorem rev_app_distr : forall X (l1 l2 : list X),
rev (l1 ++ l2) = rev l2 ++ rev l1.
Proof.
intros. induction l1 as [| n l1' IHl1'].
- rewrite app_nil_r. simpl. reflexivity.
- simpl. rewrite IHl1'. rewrite <- app_assoc. reflexivity.
Qed.
Theorem rev_involutive : forall X : Type, forall l : list X,
rev (rev l) = l.
Proof.
intros. induction l as [| n l' IHl'].
- reflexivity.
- simpl. rewrite rev_app_distr. rewrite IHl'. reflexivity.
Qed.
Inductive prod (X Y : Type) : Type :=
| pair (x : X) (y : Y).
Arguments pair {X} {Y}.
Definition fst {X Y : Type} (p : X * Y) : X :=
match p with
| (x, y) => x
end.
Definition scn {X Y : Type} (p: X * Y) : Y :=
match p with
| (x, y) => y
end.
Fixpoint combine {X Y : Type} (lx : list X) (ly : list Y) : list (X*Y) :=
match lx, ly with
| [], _ => []
| _, [] => []
| x :: tx, y :: ty => (x,y) :: (combine tx ty)
end.
Compute combine [1;2;3] [true;false;false;true].
Check @combine.
|
We are actively seeking a Part-Time Licensed Psychologist (Ph.D or Psy.D) to join our Team working 10 hours per week at Pulaski State Prison located in Hawkinsville, GA. Hawkinsville is approximately 30 miles from Warner Robins and 40 miles from Dublin. Pulaski State Prison houses approximately 1,200 female inmates and is a medium security facility servicing level II mental health needs.
FULL TIME (4 days per week) COMING SOON!!
This is a part-time position offering a flexible weekday schedule. No weekends or on-call required!
Provide psychological leadership, consultation and direct services. Collaborate with multidisciplinary team in providing assessment and treatment of mental and emotional disorders of patients.
Ph.D. or Psy.D. from an accredited institution required; Ph.D. or Psy.D. in Clinical Psychology preferred.
Must have an active Georgia Psychologist License, or pending.
Active CPR Certification and current PPD/chest x-ray.
Two years' experience providing comprehensive psychological services preferred.
Corrections experience preferred but not required. |
REBOL [
Title: "Compilation directives processing"
Author: "Nenad Rakocevic"
File: %preprocessor.r
Tabs: 4
Rights: "Copyright (C) 2016-2018 Red Foundation. All rights reserved."
License: "BSD-3 - https://github.com/red/red/blob/master/BSD-3-License.txt"
]
Red [] ;-- make it usable by Red too.
preprocessor: context [
exec: do [context [config: none]] ;-- object that captures directive words
protos: make block! 10
macros: [<none>]
stack: make block! 10
syms: make block! 20
depth: 0 ;-- track depth of recursive macro calls
active?: yes
trace?: no
s: none
do-quit: does [
case [
all [rebol system/options/args][quit/return 1]
all [not rebol system/console][throw/name 'halt-request 'console]
'else [halt]
]
]
throw-error: func [error [error!] cmd [issue!] code [block!] /local w][
prin ["*** Preprocessor Error in" mold cmd lf]
#either none? config [ ;-- config is none when preprocessor is applied to itself
error: disarm error
error/where: new-line/all reduce [cmd] no
foreach w [arg1 arg2 arg3][
set w either unset? get/any in error w [none][
get/any in error w
]
]
print [
"***" system/error/(error/type)/type #":"
reduce system/error/(error/type)/(error/id) newline
"*** Where:" mold/flat error/where newline
"*** Near: " mold/flat error/near newline
]
do-quit
][
error/where: new-line/all reduce [cmd] no
print form :error
either system/console [throw/name 'halt-request 'console][halt]
]
]
syntax-error: func [s [block! paren!] e [block! paren!]][
print [
"*** Preprocessor Error: Syntax error^/"
"*** Where:" trim/head mold/only copy/part s next e
]
do-quit
]
do-safe: func [code [block! paren!] /manual /with cmd [issue!] /local res t? src][
if t?: all [trace? not with][
print [
"preproc: matched" mold/flat copy/part get code/2 get code/3 lf
"preproc: eval macro" copy/part mold/flat body-of first code 80
]
]
#process off
if error? set/any 'res try code [throw-error :res any [cmd #macro] code]
#process on
if all [
manual
any [
(type? src: get code/2) <> type? get/any 'res
not same? head src head get/any 'res
]
][
print [
"*** Macro Error: [manual] macro not returning a position^/"
"*** Where:" mold code
]
do-quit
]
if t? [print ["preproc: ==" mold get/any 'res]]
either unset? get/any 'res [[]][:res]
]
do-code: func [code [block! paren!] cmd [issue!] /local p][
clear syms
parse code [any [
p: set-word! (unless in exec p/1 [append syms p/1])
| skip
]]
unless empty? syms [exec: make exec append syms none]
do-safe/with bind to block! code exec cmd
]
count-args: func [spec [block!] /block /local total pos][
total: either block [copy []][0]
parse spec [
any [
pos: [word! | lit-word! | get-word!] (
either block [append total type? pos/1] [total: total + 1]
)
| refinement! (return total)
| skip
]
]
total
]
arg-mode?: func [spec [block!] idx [integer!]][
pick count-args/block spec idx
]
func-arity?: func [spec [block!] /with path [path!] /block /local arity pos][
arity: either block [count-args/block spec] [count-args spec]
if path [
foreach word next path [
unless pos: find/tail spec to refinement! word [
print [
"*** Macro Error: unknown refinement^/"
"*** Where:" mold path
]
do-quit
]
either block
[append arity count-args/block pos]
[arity: arity + count-args pos]
]
]
arity
]
value-path?: func [path [path!] /local value i item selectable] [
selectable: make typeset! [
block! paren! path! lit-path! set-path! get-path!
object! port! error! map!
]
repeat i length? path [
set/any 'value either i = 1 [get/any first path][
set/any 'item pick path i
case [
get-word? :item [set/any 'item get/any to word! item]
paren? :item [set/any 'item do item]
]
either integer? :item [pick value item][select value :item]
]
unless find selectable type? get/any 'value [
path: copy/part path i
break
]
]
reduce [path get/any 'value]
]
fetch-next: func [code [block! paren!] /local i left item item2 value fn-spec path f-arity at-op? op-mode][
left: reduce [yes]
while [all [not tail? left not tail? code]] [
either not left/1 [ ;-- skip quoted argument
remove left
][
item: first code
f-arity: any [
all [ ;-- a ...
word? :item
any-function? set/any 'value get/any :item
func-arity?/block fn-spec: spec-of get/any :item
]
all [ ;-- a/b ...
path? :item
set/any [path value] value-path? :item
any-function? get/any 'value
func-arity?/block/with
fn-spec: spec-of :value
at :item length? :path
]
]
if at-op?: all [ ;-- a * b
1 < length? code
word? item2: second code
op? get/any :item2
] [
op-mode: arg-mode? spec-of get/any :item2 1
if all [f-arity op-mode = word!] [ ;-- check if function's lit/get-arg takes priority
at-op?: word! = arg-mode? fn-spec 1
]
]
case [
at-op? [ ;-- a * b
code: next code ;-- skip `a *` part
left/1: word! = arg-mode? spec-of get/any :item2 2
]
f-arity [ ;-- a ... / a/b ...
if op? get/any 'value [return skip code 2] ;-- starting with op is an error
remove left
repeat i length? f-arity [insert at left i word! = f-arity/:i]
]
not find [set-word! set-path!] type?/word item [ ;-- not a: or a/b:
remove left
]
]
];;either not left/1 [][
code: next code
]
code
]
eval: func [code [block! paren!] cmd [issue!] /local after expr][
after: fetch-next code
expr: copy/part code after
if trace? [print ["preproc:" mold cmd mold expr]]
expr: do-code expr cmd
if trace? [print ["preproc: ==" mold expr]]
reduce [expr after]
]
do-macro: func [name pos [block! paren!] arity [integer!] /local cmd saved p v res][
depth: depth + 1
saved: s
parse next pos [arity [s: macros | skip]] ;-- resolve nested macros first
cmd: make block! 1
append cmd name
insert/part tail cmd next pos arity
if trace? [print ["preproc: eval macro" mold cmd]]
p: next cmd
forall p [
switch type?/word v: p/1 [
word! [change p to lit-word! v]
path! [change/only p to lit-path! v]
]
]
if unset? set/any 'res do bind cmd exec [
print ["*** Macro Error: no value returned by" name "macro^/"]
do-quit
]
if trace? [print ["preproc: ==" mold :res]]
s: saved ;-- restored here as `do cmd` could call expand
s/1: :res
if positive? depth: depth - 1 [
saved: s
parse s [s: macros] ;-- apply macros to result
s: saved
]
s/1
]
register-macro: func [spec [block!] /local cnt rule p name macro pos valid? named?][
named?: set-word? spec/1
cnt: 0
rule: make block! 10
valid?: parse spec/3 [
any [
opt string!
opt block!
[word! (cnt: cnt + 1) | /local any word!]
opt [
p: block! :p into [some word!]
;(append/only rule make block! 1)
;some [p: word! (append last rule p/1)]
;(append rule '|)
;]
]
]
]
if any [not valid? all [not named? cnt <> 2]][
print [
"*** Macro Error: invalid specification^/"
"*** Where:" mold copy/part spec 3
]
do-quit
]
either named? [ ;-- named macro
repend rule [
name: to lit-word! spec/1
to-paren compose [change/part s do-macro (:name) s (cnt) (cnt + 1)]
to get-word! 's
]
append protos copy/part spec 4
][ ;-- pattern-matching macro
macro: do bind copy/part next spec 3 exec
append/only protos spec/4
repend rule [
to set-word! 's
bind spec/1 exec ;-- allow rule to reference exec's words
to set-word! 'e
to-paren compose/deep either all [
block? spec/3/1 find spec/3/1 'manual
][
[s: do-safe/manual [(:macro) s e]]
][
[s: change/part s do-safe [(:macro) s e] e]
]
to get-word! 's
]
]
pos: tail macros
either tag? macros/1 [remove macros][insert macros '|]
insert macros rule
new-line pos yes
exec: make exec protos
]
reset: func [job [object! none!]][
exec: do [context [config: job]]
clear protos
insert clear macros <none> ;-- required to avoid empty rule (causes infinite loop)
]
expand: func [
code [block! paren!] job [object! none!]
/clean
/local rule e pos cond value then else cases body keep? expr src saved file
][
either clean [reset job][exec/config: job]
#process off
rule: [
any [
s: macros
| 'routine 2 skip ;-- avoid overlapping with R/S preprocessor
| #system skip
| #system-global skip
| s: #include (
if active? [
either all [not Rebol system/state/interpreted?][
saved: s
attempt [expand load s/2 job] ;-- just preprocess it
s: saved
s/1: 'do
][
attempt [
src: red/load-source/hidden clean-path join red/main-path s/2
expand src job ;-- just preprocess it, real inclusion occurs later
]
]
]
)
| s: #include-binary [file! | string!] (
if active? [
either all [not Rebol system/state/interpreted?][
s/1: 'read/binary
if string? s/2 [s/2: to-red-file s/2]
][
file: either string? s/2 [to-rebol-file s/2][s/2]
file: clean-path join red/main-path file
change/part s read/binary file 2
]
]
)
| s: #if (set [cond e] eval next s s/1) :e [set then block! | (syntax-error s e)] e: (
if active? [either cond [change/part s then e][remove/part s e]]
) :s
| s: #either (set [cond e] eval next s s/1) :e
[set then block! set else block! | (syntax-error s e)] e: (
if active? [either cond [change/part s then e][change/part s else e]]
) :s
| s: #switch (set [cond e] eval next s s/1) :e [set cases block! | (syntax-error s e)] e: (
if active? [
body: any [select cases cond select cases #default]
either body [change/part s body e][remove/part s e]
]
) :s
| s: #case [set cases block! | e: (syntax-error s e)] e: (
if active? [
until [
set [cond cases] eval cases s/1
any [cond tail? cases: next cases]
]
either cond [change/part s cases/1 e][remove/part s e]
]
) :s
| s: #do (keep?: no) opt ['keep (keep?: yes)] [block! | (syntax-error s next s)] e: (
if active? [
pos: pick [3 2] keep?
if trace? [print ["preproc: eval" mold s/:pos]]
expr: do-code s/:pos s/1
if all [keep? trace?][print ["preproc: ==" mold expr]]
either keep? [s: change/part s :expr e][remove/part s e]
]
) :s
| s: #local [block! | (syntax-error s next s)] e: (
repend stack [negate length? macros tail protos]
change/part s expand s/2 job e
clear take/last stack
remove/part macros skip tail macros take/last stack
if tail? next macros [macros/1: <none>] ;-- re-inject a value to match (avoids infinite loops)
)
| s: #reset (reset job remove s) :s
| s: #trace [[
['on (trace?: on) | 'off (trace?: off)] (remove/part s 2) :s
] | (syntax-error s next s)]
| s: #process [[
'on (active?: yes remove/part s 2) :s
| 'off (active?: no remove/part s 2) :s [to #process | to end]
] | (syntax-error s next s)]
| s: #macro [
[set-word! | word! | lit-word! | block!]['func | 'function] block! block!
| (syntax-error s skip s 4)
] e: (
register-macro next s
remove/part s e
) :s
| pos: [block! | paren!] :pos into rule
| skip
]
]
#process on
unless Rebol [rule/1: 'while] ;-- avoid no-forward premature exit in Red (#3771)
parse code rule
code
]
set 'expand-directives func [ ;-- to be called from Red only
"Invokes the preprocessor on argument list, modifying and returning it"
code [block! paren!] "List of Red values to preprocess"
/clean "Clear all previously created macros and words"
/local job
][
job: system/build/config
either clean [expand/clean code job][expand code job]
]
] |
From mathcomp Require Import ssreflect ssrbool eqtype ssrnat seq.
From Coq Require Import ssrfun.
Require Import AutosubstSsr ARS Context.
Require Import Program.Equality.
Set Implicit Arguments.
Unset Strict Implicit.
Unset Printing Implicit Defensive.
Require Import ast.
Require Import smallstep.
Require Import confluence.
(* typing, in the style of type assignemnt *)
Notation "Gamma `_ i" := (dget Gamma i) (at level 2).
Reserved Notation "[ Gamma |- ]".
Reserved Notation "[ Gamma |- s :- A ]".
Inductive has_type : list term -> term -> term -> Prop :=
| ty_var Gamma x :
x < size Gamma ->
[ Gamma |- Var x :- Gamma`_x ]
| ty_tt Gamma :
[ Gamma |- TT :- TT ]
| ty_pi Gamma A B :
[ Gamma |- A :- TT ] ->
[ A :: Gamma |- B :- TT ] ->
[ Gamma |- Pi A B :- TT ]
| ty_fun Gamma A B s :
[ Gamma |- Pi A B :- TT ] ->
[ (Pi A B).[ren (+1)] :: A :: Gamma |- s :- B.[ren (+1)] ] ->
(* why +1 ? what is the orientation of gamma and the bound
* vars inside of it. seems more reasonable to do A+1 *)
[ Gamma |- Fun s :- Pi A B ]
| ty_app Gamma A B s t :
[ Gamma |- s :- Pi A B ] ->
[ Gamma |- t :- A ] ->
[ Gamma |- App s t :- B.[t/] ]
| ty_cast Gamma A s :
[ Gamma |- s :- A ] ->
[ Gamma |- Cast s A :- A ]
(* with the reductoin behavour, at the type level some
* weirdness is allowed by this rule *)
| ty_conv Gamma A B s :
A === B ->
[ Gamma |- s :- A ] ->
[ Gamma |- s :- B ]
where "[ Gamma |- s :- A ]" := (has_type Gamma s A).
(* for progress *)
Lemma canonical_form_pi s P A B :
[ [::] |- s :- P ] -> value s -> P === Pi A B ->
exists s', s = Fun s'.
Proof.
elim=> [].
- move=> Gamma x si v eq. inv v.
- move=> Gamma v eq.
apply conv_sym in eq.
apply conv_pi_tt in eq => //.
- move=> Gamma A0 B0 _ ih1 _ ih2 v eq.
apply conv_sym in eq.
apply conv_pi_tt in eq => //.
- move=> Gamma A0 B0 s0 _ ih1 _ ih2 v eq.
exists s0 => //.
- move=> Gamma A0 B0 s0 t _ ih1 _ ih2 v eq. inv v.
- move=> Gamma A0 s0 _ ih v eq. inv v.
- move=> Gamma A0 B0 s0 eq1 _ ih v eq2.
apply: ih => //. eapply conv_trans.
exact: eq1. exact: eq2.
Qed.
Lemma ty_app' Gamma A B s t u :
[ Gamma |- s :- Pi A B ] ->
[ Gamma |- t :- A ] ->
u = B.[t/] ->
[ Gamma |- App s t :- u ].
Proof.
intros.
rewrite H1.
apply: ty_app.
eauto.
auto.
Qed.
Inductive context_ok : list term -> Prop :=
| ctx_nil :
[ [::] |- ]
| ctx_ncons Gamma A :
[ Gamma |- A :- TT ] ->
[ Gamma |- ] ->
[ A :: Gamma |- ]
where "[ Gamma |- ]" := (context_ok Gamma).
Lemma ty_evar Gamma (A : term) (x : var) :
A = Gamma`_x -> x < size Gamma -> [ Gamma |- Var x :- A ].
Proof. move->. exact: ty_var. Qed.
Lemma ty_eapp Gamma (A B C s t : term) :
C = B.[t/] ->
[ Gamma |- s :- Pi A B ] -> [ Gamma |- t :- A ] ->
[ Gamma |- App s t :- C ].
Proof. move=>->. exact: ty_app. Qed.
Notation "[ Gamma |- s ]" := [ Gamma |- s :- TT ].
Lemma ty_tt_wf Gamma : [ Gamma |- TT ].
Proof. exact: ty_tt. Qed.
Hint Resolve ty_tt_wf ty_tt.
Lemma ty_pi_wf Gamma A B :
[ Gamma |- A ] -> [ A :: Gamma |- B ] -> [ Gamma |- Pi A B ].
Proof. exact: ty_pi. Qed.
Notation "[ Delta |- sigma -| Gamma ]" :=
(forall x, x < size Gamma -> [ Delta |- sigma x :- (Gamma`_x).[(sigma : _ -> _)] ]).
Lemma ty_renaming xi Gamma Delta s A :
[ Gamma |- s :- A ] ->
(forall x, x < size Gamma -> xi x < size Delta) ->
(forall x, x < size Gamma -> (Gamma`_x).[ren xi] = Delta`_(xi x)) ->
[ Delta |- s.[ren xi] :- A.[ren xi] ].
Proof with eauto using has_type.
move=> tp. elim: tp xi Delta => {Gamma s A} /=.
- move=> Gamma x si xi Delta subctx ->...
- move=> Gamma xi Delta subctx eqn...
- move=> Gamma A B _ ih1 _ ih2 xi Delta subctx eqn.
apply: ty_pi. exact: ih1. asimpl. apply: ih2.
+ by move=> [//|x /subctx].
+ by move=> [_|x /eqn/=<-]; autosubst.
- move=> Gamma A B s _ ih1 _ ih2 xi Delta subctx eqn.
apply ty_fun. by apply ih1. asimpl.
replace (B.[ren (1 .: xi >>> (+2))]) with
(B.[ren (+1)].[ren (0 .: 1 .: xi >>> (+2))]) by asimpl => //.
apply ih2.
+ move=> [//|]. by move => [//|x /subctx].
+ move=> [|]. autosubst.
move=> [|n /eqn/=<-]; autosubst.
- move=> Gamma A B s t _ ih1 _ ih2 xi Delta subctx eqn //=.
apply: (@ty_eapp _ A.[ren xi] B.[up (ren xi)]). autosubst.
exact: ih1. exact: ih2.
- move=> Gamma A s _ ih xi Delta subctx eqn.
apply: ty_cast...
- move=> Gamma A B s eq _ ih xi Delta subctx eqn.
apply: (@ty_conv _ A.[ren xi] B.[ren xi]).
exact: conv_subst. exact: ih.
Qed.
Lemma weakening Gamma s A B :
[ Gamma |- s :- A ] -> [ B :: Gamma |- s.[ren (+1)] :- A.[ren (+1)] ].
Proof. move=> /ty_renaming. exact. Qed.
Lemma eweakening Gamma s s' A A' B :
s' = s.[ren (+1)] -> A' = A.[ren (+1)] ->
[ Gamma |- s :- A ] -> [ B :: Gamma |- s' :- A' ].
Proof. move=>->->. exact: weakening. Qed.
Lemma ty_ok Gamma :
[ Gamma |- ] -> forall x, x < size Gamma -> [ Gamma |- Gamma`_x ].
Proof.
elim=> // {Gamma} Gamma A tp _ ih [_|x /ih {tp} tp];
exact: weakening tp.
Qed.
Lemma sty_up sigma Gamma Delta A :
[ Delta |- sigma -| Gamma ] ->
[ A.[sigma] :: Delta |- up sigma -| A :: Gamma ].
Proof.
move=> stp [_//|x /stp tp] /=. apply: ty_evar => //=. autosubst.
apply: eweakening tp; autosubst.
Qed.
Lemma ty_subst sigma Gamma Delta s A :
[ Delta |- sigma -| Gamma ] -> [ Gamma |- s :- A ] ->
[ Delta |- s.[sigma] :- A.[sigma] ].
Proof.
move=> stp tp. elim: tp sigma Delta stp => {Gamma s A} /=.
- move=> Gamma x si sigma Delta stp.
exact: stp.
- move=> Gamma sigma Delta stp => //.
- move=> Gamma A B _ ih1 _ ih2 sigma Delta stp.
apply: ty_pi. exact: ih1. apply ih2. exact: sty_up.
- move=> Gamma A B s _ ih1 _ ih2 sigma Delta stp.
apply: ty_fun. by eapply ih1.
replace (B.[up sigma].[ren (+1)]) with
(B.[ren (+1)].[upn 2 sigma]) by asimpl => //.
apply: ih2 => x si.
rewrite <- fold_up_up.
replace ((Pi A.[sigma] B.[up sigma]).[ren (+1)]) with
((Pi A.[ren (+1)] B.[up (ren (+1))]).[up sigma]) by asimpl => //.
apply: sty_up => // => {x si} x si.
exact: sty_up.
- move=> Gamma A B s t _ ih1 _ ih2 sigma Delta stp.
apply: (@ty_eapp _ A.[sigma] B.[up sigma]). autosubst.
exact: ih1. exact: ih2.
- move=> Gamma A s _ ih sigma Delta stp.
apply: ty_cast. exact: ih.
- move=> Gamma A B s eq _ ih sigma Delta stp.
apply: (@ty_conv _ A.[sigma] B.[sigma]).
exact: conv_subst. exact: ih.
Qed.
Lemma ty_cut Gamma s t A B :
[ A :: Gamma |- s :- B ] -> [ Gamma |- t :- A ] ->
[ Gamma |- s.[t/] :- B.[t/] ].
Proof.
move=> /ty_subst h tp. apply: h => -[_|x leq]; asimpl => //.
exact: ty_var.
Qed.
Lemma ty_ctx_conv1 Gamma s A B C :
[ A :: Gamma |- s :- C ] -> B === A -> [ Gamma |- A :- TT ] ->
[ B :: Gamma |- s :- C ].
Proof.
move=> tp1 conv tp2. cut ([ B :: Gamma |- s.[ids] :- C.[ids] ]). autosubst.
apply: ty_subst tp1. move=> [_|x //=]. asimpl. eapply ty_conv.
apply: conv_subst conv.
eapply ty_var => //.
move=> le. asimpl. exact: ty_var.
Qed.
Lemma ty_ctx_conv2 Gamma s A1 A2 B1 B2 C :
[ A1 :: B1 :: Gamma |- s :- C ] -> A2 === A1 -> B2 === B1 ->
[ Gamma |- B1 :- TT ] -> [ B1 :: Gamma |- A1 :- TT ] ->
[ A2 :: B2 :: Gamma |- s :- C ].
Proof.
move=> tp1 conv1 conv2 tp2 tp3.
cut ([ A2 :: B2 :: Gamma |- s.[ids] :- C.[ids] ]).
autosubst.
apply: ty_subst tp1. move=> [_|[_|x]].
- asimpl. eapply ty_conv. apply: conv_subst conv1. exact: ty_var.
- asimpl. eapply ty_conv. apply: conv_subst conv2.
replace (B2.[ren (+2)]) with
([:: A2, B2 & Gamma]`_1) by autosubst.
exact: ty_var.
- move=> le. asimpl.
replace (ids x.+2) with
((Var x).[ren (+1)].[ren (+1)]) by autosubst.
replace (Gamma`_ x.[ren (+2)]) with
((Gamma`_ x).[ren (+1)].[ren (+1)]) by autosubst.
apply weakening. apply weakening.
exact: ty_var.
Qed.
Lemma ty_pi_inv Gamma A B :
[ Gamma |- Pi A B :- TT ] ->
[ Gamma |- A :- TT ] /\ [ A :: Gamma |- B :- TT ].
Proof.
move e:(Pi A B) => s tp. elim: tp A B e => //{Gamma s}.
- move=> Gamma A B tp1 _ tp2 _ A' B' [->->] => //.
- move=> Gamma A B s eq tp ih A' B' e.
subst. elim: (ih A' B') => // => tp1 tp2.
eauto using has_type.
Qed.
Lemma ty_fun_invX Gamma s A B C :
[ Gamma |- Fun s :- C ] -> (C === Pi A B) ->
[ (Pi A B).[ren (+1)] :: A :: Gamma |- s :- B.[ren (+1)] ].
Proof with eauto using conv_subst, conv_sym, conv_pi.
move e:(Fun s) => t tp eq. elim: tp A B s e eq => // {Gamma C t}.
- move => Gamma A B s tp1 ih1 tp2 ih2 A' B' s' [->].
move=> /inj_pi[e1 e2].
apply: ty_conv. apply: conv_subst e2.
apply: (ty_ctx_conv2 tp2)...
elim: (ty_pi_inv tp1) => //.
replace (TT) with (TT.[ren (+1)]) by autosubst.
by apply: weakening.
- move=> Gamma A B s eq1 tp1 ih A' B' s' e eq2. subst.
+ apply: ih => //.
exact: conv_trans eq1 eq2.
Qed.
Lemma ty_fun_inv Gamma s A B :
[ Gamma |- Fun s :- Pi A B ] ->
[ (Pi A B).[ren (+1)] :: A :: Gamma |- s :- B.[ren (+1)] ].
Proof. move=> tp. apply: (ty_fun_invX tp). apply: convR. Qed.
(* non bidirectional *)
Lemma nonbi :
[ nil |- App (Fun (Var 1)) TT :- TT ].
Proof.
intros.
apply: ty_app'.
Focus 2.
apply: ty_tt.
apply: ty_fun.
apply: ty_pi.
apply: ty_tt.
apply: ty_tt.
simpl.
apply: ty_var.
auto.
auto.
Qed.
Print nonbi.
|
section propositional
variables P Q R : Prop
------------------------------------------------
-- Proposições de dupla negaço:
------------------------------------------------
theorem doubleneg_intro :
P → ¬¬P :=
begin
intro hp,
intro hpn,
apply hpn,
assumption,
end
theorem doubleneg_elim :
¬¬P → P :=
begin
intro hnp,
by_contra hboom, --como usar o (RAA)
apply hnp,
assumption,
end
theorem doubleneg_law :
¬¬P ↔ P :=
begin
split,
-- separando a conjunção
intro hnp,
by_contra hboom,
apply hnp,
assumption,
-- organizando cada uma das demonstrações separadamente
intro hp,
intro hpn,
apply hpn,
assumption,
end
------------------------------------------------
-- Comutatividade dos ∨,∧:
------------------------------------------------
theorem disj_comm :
(P ∨ Q) → (Q ∨ P) :=
begin
intro p_q,
cases p_q,
-- Caso P
right,
assumption,
-- Caso Q
left,
assumption,
end
theorem conj_comm :
(P ∧ Q) → (Q ∧ P) :=
begin
intro peq,
cases peq,
split,
-- Demonstre Q
assumption,
-- Demonstre P
assumption,
end
------------------------------------------------
-- Proposições de interdefinabilidade dos →,∨:
------------------------------------------------
theorem impl_as_disj_converse :
(¬P ∨ Q) → (P → Q) :=
begin
intro Hp_q,
intro Hp,
cases Hp_q,
-- Caso ¬P
have Hboom : false := Hp_q Hp,
contradiction,
-- Caso Q
assumption,
end
theorem disj_as_impl :
(P ∨ Q) → (¬P → Q) :=
begin
intro Hp_q,
intro Np,
cases Hp_q,
-- Caso P
have Hp : false := Np Hp_q,
contradiction,
-- Caso Q
assumption,
end
------------------------------------------------
-- Proposições de contraposição:
------------------------------------------------
theorem impl_as_contrapositive :
(P → Q) → (¬Q → ¬P) :=
begin
intro Hpq,
intro Nq,
intro Hp,
have Hq : Q := Hpq Hp,
contradiction,
end
theorem impl_as_contrapositive_converse :
(¬Q → ¬P) → (P → Q) :=
begin
intro HNqp,
intro Hp,
by_contra Hboom,
have HNp : ¬P := HNqp Hboom,
contradiction,
end
theorem contrapositive_law :
(P → Q) ↔ (¬Q → ¬P) :=
begin
split,
-- Demonstrar Right
intro Hpq,
intro Nq,
intro Hp,
have Hq : Q := Hpq Hp,
contradiction,
-- Demonstrar Left
intro HNqp,
intro Hp,
by_contra Hboom,
have HNp : ¬P := HNqp Hboom,
contradiction,
end
------------------------------------------------
-- A irrefutabilidade do LEM:
------------------------------------------------
theorem lem_irrefutable :
¬¬(P∨¬P) :=
begin
intro NT,
have p_or_p : P ∨ ¬P,
right,
intro Hp,
apply NT,
left,
assumption,
apply NT,
assumption,
end
------------------------------------------------
-- A lei de Peirce
------------------------------------------------
theorem peirce_law_weak :
((P → Q) → P) → ¬¬P :=
begin
intro Hpqp,
intro HNp,
apply HNp,
have Hpq: P → Q,
intro Hp,
contradiction,
apply Hpqp,
assumption,
end
------------------------------------------------
-- Proposições de interdefinabilidade dos ∨,∧:
------------------------------------------------
theorem disj_as_negconj :
P∨Q → ¬(¬P∧¬Q) :=
begin
intro Hp_or_q,
intro Hnot_p_and_q,
cases Hnot_p_and_q,
cases Hp_or_q,
-- Caso P
have Hp : false := Hnot_p_and_q_left Hp_or_q,
assumption,
-- Caso Q
contradiction,
end
theorem conj_as_negdisj :
P∧Q → ¬(¬P∨¬Q) :=
begin
intro Hp_and_q,
intro HNp_or_q,
cases Hp_and_q,
cases HNp_or_q,
-- Caso ¬P
contradiction,
-- Caso ¬Q
have Hp : false := HNp_or_q Hp_and_q_right,
assumption,
end
------------------------------------------------
-- As leis de De Morgan para ∨,∧:
------------------------------------------------
theorem demorgan_disj :
¬(P∨Q) → (¬P ∧ ¬Q) :=
begin
intro HNp_or_q,
split,
-- Demonstre ¬P
intro Hp,
have p_or_q : P ∨ Q,
left,
assumption,
have Np_or_q : false := HNp_or_q p_or_q,
assumption,
-- Demonstre ¬Q
intro Hq,
have p_or_q : P ∨ Q,
right,
assumption,
apply HNp_or_q,
assumption,
-- Duas formas diferente de fechar essa demonstração, também poderia ser usado o contradiction
end
theorem demorgan_disj_converse :
(¬P ∧ ¬Q) → ¬(P∨Q) :=
begin
intro Hnp_and_nq,
intro Hp_or_q,
cases Hnp_and_nq,
cases Hp_or_q,
-- Caso P
apply Hnp_and_nq_left,
assumption,
-- Caso Q
have Hboom : false := Hnp_and_nq_right Hp_or_q,
assumption,
-- Também com duas formas de fechar além do contradiction
end
theorem demorgan_conj :
¬(P∧Q) → (¬Q ∨ ¬P) :=
begin
intro HNp_and_q,
by_cases Hp : P,
-- Caso P
left, -- Demonstre ¬Q
intro Hq,
apply HNp_and_q,
split,
-- Demonstre P
assumption,
-- Demonstre Q
assumption,
-- Caso ¬P
right,
assumption,
end
theorem demorgan_conj_converse :
(¬Q ∨ ¬P) → ¬(P∧Q) :=
begin
intro Hnq_or_np,
intro Hp_and_q,
cases Hp_and_q,
cases Hnq_or_np,
-- Caso ¬Q
apply Hnq_or_np,
assumption,
-- Caso ¬P
have hboom : false := Hnq_or_np Hp_and_q_left,
assumption,
end
theorem demorgan_conj_law :
¬(P∧Q) ↔ (¬Q ∨ ¬P) :=
begin
split,
intro HNp_and_q,
by_cases Hp : P,
-- Caso P
left, -- Demonstre ¬Q
intro Hq,
apply HNp_and_q,
split,
-- Demonstre P
assumption,
-- Demonstre Q
assumption,
-- Caso ¬P
right,
assumption,
intro Hnq_or_np,
intro Hp_and_q,
cases Hp_and_q,
cases Hnq_or_np,
-- Caso ¬Q
apply Hnq_or_np,
assumption,
-- Caso ¬P
have hboom : false := Hnq_or_np Hp_and_q_left,
assumption,
end
theorem demorgan_disj_law :
¬(P∨Q) ↔ (¬P ∧ ¬Q) :=
begin
split,
intro HNp_or_q,
split,
-- Demonstre ¬P
intro Hp,
have p_or_q : P ∨ Q,
left,
assumption,
have Np_or_q : false := HNp_or_q p_or_q,
assumption,
-- Demonstre ¬Q
intro Hq,
have p_or_q : P ∨ Q,
right,
assumption,
apply HNp_or_q,
assumption,
intro Hnp_and_nq,
intro Hp_or_q,
cases Hnp_and_nq,
cases Hp_or_q,
-- Caso P
apply Hnp_and_nq_left,
assumption,
-- Caso Q
have Hboom : false := Hnp_and_nq_right Hp_or_q,
assumption,
end
------------------------------------------------
-- Proposições de distributividade dos ∨,∧:
------------------------------------------------
theorem distr_conj_disj :
P∧(Q∨R) → (P∧Q)∨(P∧R) :=
begin
intro Hp_and__q_or_r,
cases Hp_and__q_or_r,
cases Hp_and__q_or_r_right,
-- Caso Q
left,
split,
assumption,
assumption,
-- Caso R
right,
split,
assumption,
assumption,
end
theorem distr_conj_disj_converse :
(P∧Q)∨(P∧R) → P∧(Q∨R) :=
begin
intro Hand_or_and,
split,
-- Demonstre P
cases Hand_or_and,
-- Caso P ∧ Q
cases Hand_or_and,
assumption,
-- Caso P ∧ R
cases Hand_or_and,
assumption,
-- Demonstre Q ∨ R
cases Hand_or_and,
-- Caso P ∧ Q
cases Hand_or_and,
left,
assumption,
-- Caso P ∧ R
cases Hand_or_and,
right,
assumption,
end
theorem distr_disj_conj :
P∨(Q∧R) → (P∨Q)∧(P∨R) :=
begin
intro HP_or_and,
split,
-- Demonstre P ∨ Q
cases HP_or_and,
-- Caso P
left,
assumption,
-- Caso Q ∧ R
cases HP_or_and,
right,
assumption,
-- Demonstre P ∨ R
cases HP_or_and,
-- Caso P
left,
assumption,
-- Caso Q ∧ R
cases HP_or_and,
right,
assumption,
end
theorem distr_disj_conj_converse :
(P∨Q)∧(P∨R) → P∨(Q∧R) :=
begin
intro Hor_and_or,
cases Hor_and_or,
cases Hor_and_or_left,
-- Caso P
left,
assumption,
-- Caso Q
cases Hor_and_or_right,
-- Caso P
left,
assumption,
right,
split,
-- Demonstre Q
assumption,
-- Demonstre R
assumption,
end
------------------------------------------------
-- Currificação
------------------------------------------------
theorem curry_prop :
((P∧Q)→R) → (P→(Q→R)) :=
begin
intro Hp_and_q_imp_r,
intro Hp,
intro Hq,
apply Hp_and_q_imp_r,
split,
-- Demonstre P
assumption,
-- Demonstre Q
assumption,
end
theorem uncurry_prop :
(P→(Q→R)) → ((P∧Q)→R) :=
begin
intro Hp_imp_q_imp_r,
intro Hp_and_q,
cases Hp_and_q,
apply Hp_imp_q_imp_r,
assumption,
assumption,
end
------------------------------------------------
-- Reflexividade da →:
------------------------------------------------
theorem impl_refl :
P → P :=
begin
intro Hp,
assumption,
end
------------------------------------------------
-- Weakening and contraction:
------------------------------------------------
theorem weaken_disj_right :
P → (P∨Q) :=
begin
intro Hp,
left,
assumption,
end
theorem weaken_disj_left :
Q → (P∨Q) :=
begin
intro Hq,
right,
assumption,
end
theorem weaken_conj_right :
(P∧Q) → P :=
begin
intro Hp_and_q,
cases Hp_and_q,
assumption,
end
theorem weaken_conj_left :
(P∧Q) → Q :=
begin
intro Hp_and_q,
cases Hp_and_q,
assumption,
end
theorem conj_idempot :
(P∧P) ↔ P :=
begin
split,
intro p_and_p,
cases p_and_p,
assumption,
intro Hp,
split,
assumption,
assumption,
end
theorem disj_idempot :
(P∨P) ↔ P :=
begin
split,
intro Hp_or_p,
cases Hp_or_p,
assumption,
assumption,
intro Hp,
right,
assumption,
end
end propositional
----------------------------------------------------------------
section predicate
variable U : Type
variables P Q : U -> Prop
------------------------------------------------
-- As leis de De Morgan para ∃,∀:
------------------------------------------------
theorem demorgan_exists :
¬(∃x, P x) → (∀x, ¬P x) :=
begin
intro Hn_exist_P,
intro x,
intro Hnp,
apply Hn_exist_P,
existsi x,
assumption,
end
theorem demorgan_exists_converse :
(∀x, ¬P x) → ¬(∃x, P x) :=
begin
intro H_all_np,
intro H_exist_P,
cases H_exist_P with u hu,
apply H_all_np u,
assumption,
end
theorem demorgan_forall :
¬(∀x, P x) → (∃x, ¬P x) :=
begin
intro Hn_all_p,
by_contradiction H_exist_nP,
exfalso,
apply Hn_all_p,
intro u,
by_contradiction HnP,
apply H_exist_nP,
existsi u,
assumption,
end
theorem demorgan_forall_converse :
(∃x, ¬P x) → ¬(∀x, P x) :=
begin
intro H_exist_nP,
cases H_exist_nP with u HnP,
intro H_all_P,
have Hu := H_all_P u,
apply HnP,
assumption,
end
theorem demorgan_forall_law :
¬(∀x, P x) ↔ (∃x, ¬P x) :=
begin
split,
intro Hn_all_p,
by_contradiction H_exist_nP,
exfalso,
apply Hn_all_p,
intro u,
by_contradiction HnP,
apply H_exist_nP,
existsi u,
assumption,
intro H_exist_nP,
cases H_exist_nP with u HnP,
intro H_all_P,
have Hu := H_all_P u,
apply HnP,
assumption,
end
theorem demorgan_exists_law :
¬(∃x, P x) ↔ (∀x, ¬P x) :=
begin
split,
intro Hn_exist_P,
intro x,
intro Hnp,
apply Hn_exist_P,
existsi x,
assumption,
intro H_all_np,
intro H_exist_P,
cases H_exist_P with u hu,
apply H_all_np u,
assumption,
end
------------------------------------------------
-- Proposições de interdefinabilidade dos ∃,∀:
------------------------------------------------
theorem exists_as_neg_forall :
(∃x, P x) → ¬(∀x, ¬P x) :=
begin
intro H_exist_P,
cases H_exist_P with u HP,
intro H_all_nP,
have Hu := H_all_nP u,
apply Hu,
assumption,
end
theorem forall_as_neg_exists :
(∀x, P x) → ¬(∃x, ¬P x) :=
begin
intro H_all_P,
intro H_exist_nP,
cases H_exist_nP with u HP,
have Hu := H_all_P u,
contradiction,
end
theorem forall_as_neg_exists_converse :
¬(∃x, ¬P x) → (∀x, P x) :=
begin
intro Hn_exist_nP,
intro u,
by_contradiction Hboom,
apply Hn_exist_nP,
existsi u,
assumption,
end
theorem exists_as_neg_forall_converse :
¬(∀x, ¬P x) → (∃x, P x) :=
begin
intro Hn_forall_nP,
by_contradiction Hboom,
apply Hn_forall_nP,
intro u,
intro Hu,
apply Hboom,
existsi u,
assumption,
end
theorem forall_as_neg_exists_law :
(∀x, P x) ↔ ¬(∃x, ¬P x) :=
begin
split,
intro H_all_P,
intro H_exist_nP,
cases H_exist_nP with u HP,
have Hu := H_all_P u,
contradiction,
intro Hn_exist_nP,
intro u,
by_contradiction Hboom,
apply Hn_exist_nP,
existsi u,
assumption,
end
theorem exists_as_neg_forall_law :
(∃x, P x) ↔ ¬(∀x, ¬P x) :=
begin
split,
intro H_exist_P,
cases H_exist_P with u HP,
intro H_all_nP,
have Hu := H_all_nP u,
apply Hu,
assumption,
intro Hn_forall_nP,
by_contradiction Hboom,
apply Hn_forall_nP,
intro u,
intro Hu,
apply Hboom,
existsi u,
assumption,
end
------------------------------------------------
-- Proposições de distributividade de quantificadores:
------------------------------------------------
theorem exists_conj_as_conj_exists :
(∃x, P x ∧ Q x) → (∃x, P x) ∧ (∃x, Q x) :=
begin
intro H_exist_p_and_q,
end
theorem exists_disj_as_disj_exists :
(∃x, P x ∨ Q x) → (∃x, P x) ∨ (∃x, Q x) :=
begin
sorry,
end
theorem exists_disj_as_disj_exists_converse :
(∃x, P x) ∨ (∃x, Q x) → (∃x, P x ∨ Q x) :=
begin
sorry,
end
theorem forall_conj_as_conj_forall :
(∀x, P x ∧ Q x) → (∀x, P x) ∧ (∀x, Q x) :=
begin
sorry,
end
theorem forall_conj_as_conj_forall_converse :
(∀x, P x) ∧ (∀x, Q x) → (∀x, P x ∧ Q x) :=
begin
sorry,
end
theorem forall_disj_as_disj_forall_converse :
(∀x, P x) ∨ (∀x, Q x) → (∀x, P x ∨ Q x) :=
begin
sorry,
end
/- NOT THEOREMS --------------------------------
theorem forall_disj_as_disj_forall :
(∀x, P x ∨ Q x) → (∀x, P x) ∨ (∀x, Q x) :=
begin
end
theorem exists_conj_as_conj_exists_converse :
(∃x, P x) ∧ (∃x, Q x) → (∃x, P x ∧ Q x) :=
begin
end
---------------------------------------------- -/
end predicate
|
import tactic.ring
import tactic.norm_num
/- #1 Formalizing propositions in English
Translate the following English statements
into formal propositions. We give an example
first. The prove the proposion formally.
Finally write an English language proof.
-/
/-
For any natural number, n, if n ≠ 0, then there
is another natural number, call it n', such that
n is the successor of n' (n = nat.succ n'). You
should see that n' will have to be one less than n.
-/
def p1a : Prop :=
∀ (n : ℕ), -- For any natural number n
n ≠ 0 → -- If n ≠ 0 then
∃ (n' : ℕ), -- there is some number n'
n = nat.succ n' -- such than n is one more than n'
/-
The proof is by case analysis on n. What you
need to know, again, is that there are only
two possible cases for a natural number, n. It
is either 0 or the successor of another number,
let's call it n'.
The first case contradicts our assumption that
n = 0, and so can be dismissed. In the second
case, there's only one value for n' that will
satisfy the predicate, so give it as a witness
to the ∃ then prove it satisfies the predicate.
Note that Lean will use the notation, n.succ,
as a shorthand for nat.succ n.
-/
example : p1a := -- We are to prove p1a
begin
-- By the definition of p1a
unfold p1a,
-- We are to
show ∀ (n : ℕ), n ≠ 0 → (∃ (n' : ℕ), n = n' + 1),
-- To start let n be an arbitrary nat and assume n ≠ 0
assume n h,
-- It remains for us to ...
show ∃ (n' : ℕ), n = n' + 1,
-- solve for n' : n + 1 = n' + 1
/-
The remaining proof is by case analysis
on n. The cases (by the definition of nat,
which we are about to see) are n = 0 and
n is the successor of a one-smaller number,
n'.
-/
cases n with x,
/- In the first case, we replace n with 0,
reducing h to 0 ≠ 0, which is a contradiction.
-/
contradiction,
-- In the second case, we need to ...
show ∃ (n' : ℕ), x.succ = n' + 1,
/-
Work hard to understand this goal. It requires
that we show that there exists a number n' such
that n'+1 = n = succ x. In other words, we need
n' such that n' + 1 = n = x + 1. It should now be
clear: x satisfies this equation, so we can use
it as the witness to prove the existential
proposition that remains to be proved.
-/
apply exists.intro x _,
/-
All that now remains to prove is the equality
(yielding the second argument to exists.intro).
-/
apply rfl,
end
/-
-/
/- #1A.
State and prove the proposition that there's some
natural number whose square is 144.
-/
-- Pick n=12 then show 144 = 12*12, by arithmetic
example : ∃ n, n*n = 144 :=
begin
apply exists.intro 12,
exact rfl,
end
/- #1B.
State and prove the proposition that there is
some string, s, such that s ++ "!" is the string,
"I love logic!." Note: In Lean, ++ is notation
for string.append, the function for gluing two
strings together into one.
-/
-- Proof strategy is same as previous problem
example : ∃ s, s ++ "!" = "I love logic!" :=
exists.intro "I love logic" rfl
-- Extra examples
example : ∃ s : string, s.length = 5 :=
exists.intro "Lean!" rfl
example : ∃ n, n*n + 5 = 30 := exists.intro 5 rfl
example : ∃ n : int, n*n = 1 := exists.intro 1 rfl
/-
There's an important lesson here: When you have
to prove an ∃ proposition, you have to *search*
for and come up with a witness that satisfies the
given predicate. In general, this can be hard.
It often involves doing a bunch of math and/or
other reasoning "on the side." Here for example,
you have to "search for" a string that satisfies
the predicate. It's easy in this case but often
isn't so trivial. The important point is that to
prove an ∃ proposition often involves solving a
problem, such as a set of equations, to find a
witness that will "work."
-/
example : ∃ n, n*n - 1 = 0 := exists.intro 1 rfl
/- #1C.
Formally state and prove the proposition that
there are three natural numbers, x, y, and z,
such that x*x + y*y = z*z. Hint: exists.intro
takes just one witness as a time, so you will
have to apply it more than once.
-/
-- example : ∃ (x y z : nat), x*x + y*y = z*z :=
example : ∃ x : ℕ, ∃ y, ∃ z, x*x + y*y = z*z :=
begin
/-
The key, simple, insight that unlocks a proof
of this theorem is to know that ∃ (x y z), ...
means ∃ (x), ∃ (y), ∃ (z), ... You thus need
to give a witness for x, then prove the rest,
which means giving a witness for y then proving
the rest, which means giving a witness for z
then proving the rest, which is finally done
by arithmetical simplification and reflexivity
of equality.
-/
apply exists.intro 3 _,
apply exists.intro 4 _,
apply exists.intro 5 _,
exact rfl,
end
/- #1D
Define a predicate called pythag_triple taking
three natural number arguments, x, y, and z,
yielding the proposition that x*x + y*y = z*z.
-/
def pythag_triple (x y z : ℕ) : Prop := x*x + y*y = z*z
#check pythag_triple 1 2 3
#reduce pythag_triple 3 4 5
/- #1E
State the proposition that there exist x, y, z,
natural numbers, that satisfy the pythag_triple,
predicate, then prove it. (Use "example : ...")
-/
-- Know how to use predicates to form propositions!
example : ∃ (x y z), pythag_triple x y z :=
begin
/-
The rest is d simple application of #1C.
-/
apply exists.intro 3,
apply exists.intro 4,
apply exists.intro 5,
-- unfold pythag_triple, -- not necessary
-- show 25 = 25,
exact rfl,
end
example : ∃ (x y z), z ≠ 5 ∧ pythag_triple x y z :=
begin
apply exists.intro 5,
apply exists.intro 12,
apply exists.intro 13,
unfold pythag_triple,
apply and.intro _ _,
-- prove 13 ≠ 5 by negation
{
show ¬ 13 = 5,
show 13 = 5 → false,
assume h,
cases h,
},
-- prove the remaining equality proposition
{
exact rfl,
}
end
-- REVIEW 12/7 stopped here
/- #2A
Define a predicate, (multiple_of n m), true
if and only if n is a multiple of m. Hint:
What has to be true for this to be the case?
-/
def multiple_of (n m : ℕ) := ∃ (k), n = m * k
/- #2B
Using the predicate, multiple_of, state and
prove the proposition that every natural number
that is a multiple of 6 is also a multiple of
3. Hint: you can use "unfold multiple_of at h,"
to expand the definition of multiple_of in the
hypothesis, h (assuming you call it that).
Hint: Put the argument you will give to exists
intro in parentheses (needed for correct syntax).
Hint: You might end up with n = 3 * (2 * w)
as a goal. The "ring" tactic in Lean will
simplify this expression to n = 6 * w.
Before you do the work, let's talk a little
more about the "ring" tactic. First, where does
the name come from? Second, what does it do?
A "ring" in college-level algebra (and beyond)
is any set of values (such as natural numbers)
with + and * operations that satisfy the usual
rules of arithmetic (such as the distributive
laws, the associativity of + and *, etc). Not
only the natural numbers form a ring, but so
do polynomials, and many other kinds of math
objects as well.
The ring tactic is used to put any expression
involing any ring" into a "normal" form. What
"normal" means in this context is that if you
put two mathematically equivalent but different
expressions in normal form, then you get the
same "normalized" expression in both cases,
making it easy to compare them for equality.
So, in particular, if you want to know whether
a+(b+c)=(a+b)+c, with these "ring" expressions
on both sides of the = sign, put both expresions
in normal form and see if they are equal (which
again they are if + is addition in any "ring").
A good English translation of the use of the
ring tactic is "by basic algebra."
Here's an example. Is ℕ addition associative?
-/
example (n m k : ℕ) : n + (m + k) = (n + k) + m :=
begin
ring
end
/-
Whoa! It's that easy to prove addition
associative? Yep, "by simple algebra." Thankfully
someone else has written this beautiful tactic so
you don't have to do all the algebra yourself.
-/
/-
As a small aside on Lean syntax, if a tactic script is
just one tactic long, you can use "by <tactic>" instead
wrapping the tactic in a begin-end block.
-/
example (n m k : ℕ) : n + (m + k) = (n + k) + m := by ring
/-
Ok, with that background in place, let's return to the
problem we were discussing. Is it true that if any natural
number if a multiple of 6 then it's also a multiple of 3?
Before you even consider writing a proof, whether in Lean
or in English, figure out yourself whether the proposition
appears true or not. Try to prove it "mentally" first.
The key question here is, what does it even mean for a
number, n, to be a multiple of 6. Well, n is a multiple
of 6 if there's some number, say k, such that n = k * 6,
right? Now you should be able to formally write, and then
prove, the proposition on the table. Is it true that for
any n, if n is a multiple of 6 then it's a multiple of 3?
Why, exactly? Well, there would have to be another number
that makes n when you multiply it by 3. The trick here is
to figure out what that number has to be so you can prove
that there does exist such a number.
-/
/-
We want to prove the proposition that if any n is a
multiple of 6 then it's also a multiple of 3. You are
now expected to understand how to write propositions
that use predicates that you yourselves have defined,
and of course to translate "every" and "if ... then"
into predicate logic.
-/
example : ∀ (n), multiple_of n 6 → multiple_of n 3 :=
begin
-- Assume n is arbitrary
intros n,
-- Assume in addition that it's a multiple of 6
assume h,
-- Unfold the definition of multiple_of in h for clarity
unfold multiple_of at h,
-- From h deduce that there's some w such that n = 6 * w
cases h with w pf,
-- By the definition of multiple_of we are to ...
unfold multiple_of,
-- Show
show ∃ (k : ℕ), n = 3 * k,
/-
Now the lesson about having to search
or solve for an appropriate witness is
the key! Given what you have to work with,
what number will satsfy the predicate of
the remaining existentially quantified
goal? Well if n = 6 * w and we want to
show that n = 3 * w' (where w' will be
our witness for the remaining proof),
what does w' have to be? You have to
solve a simple set of equations here
to answer the question.
n = 6 * w
= (3 * 2) * w
= 3 * (2 * w)
= 3 * w' where w' = 2 * w
That's it!
-/
apply exists.intro (2*w) _, -- figure out what witness to give!
/-
We have a proof that n = 6 * w and we need
to show that n = 3 * (2 * w). Formally speaking
3 * (2 * w) = (3 * 2) * w by the associativity
of multiplication, which then reduces to 6 * w,
and we already have a proof that n = 6 * w. We
can avoid having to explicitly apply low-level
arithemtical axioms and theorems by using the
"ring" tactic in Lean, as explained above. In
English, again, you can just say, "then by basic
arithmetic, n = 6 * w."
-/
ring,
/-
Which is proved by the fact that it's already
an assumption that n is a multiple of 6.
-/
assumption, -- and done. QED.
end
/- #2C.
Is it true that if n is a multiple of h, and h
is a multiple of k, that n is a multiple of k?
Formally state and then prove the proposition.
In writing this proof, you might need to use one
of the two fundamental axioms of equality, via
the "rewrite" tactic (abbreviated rw). Here again
is how it works:
If you've already proved/know, and so have in
your context, a proof of an equality, such as
h : m = k, and if m appears in your goal, then
you can replace the m with k by using "rw h".
The rewriting of m to k in your goal is justified
by the fact, pf, that m = k.
The rw tactic is applying the second axiom of
equality, which states that if m = k and you
have a proof, h : P m, for some predicate P,
then you can conclude that P k is true, too.
In other words, "you can substitute equal terms
for each other, and that is valid."
Similarly, if you have h : m = k and a goal with
*k* in it, you can rewrite the k to m using
rw <- h. The left pointing arrow indicates you
want to use the equality right to left rather
than left to right.
With all that in mind, here we go.
-/
-- Write the proposition involving your own predicates
example (n h k) : multiple_of n h → multiple_of h k → multiple_of n k :=
begin
-- We've already assumed that n h and k are arbitrary natural numbers
-- Now assume n is a multiple of h and h is a multiple of k.
assume monh mohk,
-- What remains is to ...
show multiple_of n k,
/-
From the fact n is a multiple of h we
can deduce that there's some natural
number, a witness to this fact, let's
call it w_nh, along with a proof that
it is a witness.
-/
cases monh with w_nh pf_nh,
/-
Similarly, we deduce a witness, w_hk,
to the fact that h is a multiple of k,
with a corresponding proof.
-/
cases mohk with w_hk pf_hk,
/-
Now you're at the point where you have
to do some algebra to figure out what
witness will work to prove the conclusion
of the conjecture (the proposition to be
proved).
From n = h * w_nh and h = k * w _hk we
deduce n = (k * w_hk) * w_nh. Then we can
see tgat by associativity of multiplication
this equals k * (w_hk * w_nh), so n is a
multiple of k, with witness w_hk * w_nh.
-/
apply exists.intro (w_nh * w_hk),
-- We can now expand the definition of n
rw pf_nh,
-- Then expand the definition of h
rw pf_hk,
/-
And finally "normalize" both sides, of
the equation (which in this case involves
applying associativity of multiplication)
to show that both sides are equal. I.e.,
"Finally, by simple arithmetic QED."
-/
ring,
end
/-
Next is practice with exists.elimination
-/
/- #3A
Formally state and prove that if everyone
who knows logic is cool and someone knows
logic, then someone is cool.
Once again the trick in proving an exists
(the conclusion of the implication) is to
"figure out" what/who to use as a witness.
In this case, before you even think about
using Lean, just think about what the logic
says.
Someone knows logic. The principle of exists
elimination says you can give that person a
name, e.g., w, and have a proof that w knows
logic. Furthermore, you can assume everyone
who knows logic is cool. *So you know at least
one person who must be cool* Who is it? Once
you answer this question, the proof, whether
in English or in Lean, is easy.
-/
/-
There are several ways to write the proposition
formally. In this solution, we introduce all of
the assumptions as arguments (before the colon),
implicitly creating the context in which the
rest of the proof is to be given.
-/
example
-- Represent people as objects of a type, Person
(Person : Type)
-- Represent "knows logic" as a predicate on people
(KnowsLogic : Person → Prop)
-- Similarly represent "is cool" as a predicate on people
(isCool : Person → Prop)
-- Formalize the idea that if anyone knows logic they are cool
(LogicMakesCool : ∀ (p), KnowsLogic p → isCool p)
-- Formalize the idea that someone knows logic
(SomeoneKnowsLogic : ∃ (p), KnowsLogic p) : -- here's the colon!
-- In this context show that there's someone who is cool
∃ (p), isCool p :=
begin
-- Someone knows logic, so give that person a name, w, and get a proof w knows logic
cases SomeoneKnowsLogic with w pf,
-- Now
show ∃ (p : Person), isCool p,
-- We know at least one person who must be cool
apply exists.intro w _,
/-
Finally since anyone who knows logic
is cool, and w knows logic, w must be
cool, by universal specialization (∀
elimination).
-/
apply LogicMakesCool w pf,
end
/- #3B
Formally state and prove the proposition that if
someone is not happy then not everyone is happy.
-/
example
-- Let's talk about people
(Person : Type)
-- And that people can be happy
(Happy : Person → Prop) :
/-
In this context, show that if there is
someone who is unhappy (there must now
be at least one!) then it's not true
that everyone is happy.
-/
(∃ (p), ¬(Happy p)) →
(¬ (∀ (p), Happy p))
:=
begin
-- Start by assuming that someone is not happy
intros h,
-- By exists elim given them a name, w, and get of proof of their unhappiness
cases h with w w_unhappy,
-- The rest is a proof by negation. Assume that everyone *is* happy
assume everyone_happy,
-- Derive a proof that w is happy
let w_happy := everyone_happy w,
-- But that contradicts
contradiction,
end
/- #3C
Formally state and prove that stating that
everyone is happy is equivalent to saying
that no one is unhappy.
Hint: In one direction, you might need
to use classical reasoning; and remember
you can get a proposition (on which to do
classical case anaysis) by applying a
predicate to the right arguments. And
a final hint: Sometimes you have to use
information you have to prove something
you don't yet have in order to make it
clear that there's a contradiction in
your set of assumptions.
-/
example
(Person : Type)
(Happy : Person → Prop) :
/-
Now prove this equivalence. Do us parens
around the two sides of the biconditional.
It never hurts logically to add parens to
be sure you're not writing a proposition
other than the one you mean to prove.
-/
(∀ (a), Happy a) ↔ (¬ ∃ (a), ¬ Happy a) :=
begin
/-
By iff introduction it will suffice to prove
the implication in each direction. We now do
that.
-/
apply iff.intro,
-- Forward direction
-- Assume everyone's happy
assume everyone_happy,
-- By negation: assume someone's unhappy and ...
assume someone_unhappy,
-- Prove that that's a contradiction
show false,
-- Someone's unhappy! Call them w and get a proof they're unhappy.
cases someone_unhappy with w w_unhappy,
-- Apply "everyone's happy" to w to prove w's happy
apply w_unhappy (everyone_happy w),
-- And that's a contradiction
-- Backward direction
-- Assume no one is unhappy, then ...
assume noone_unhappy,
-- Show that everyone is happy
show ∀ (a : Person), Happy a,
-- Let p be an arbitrary person, and ...
assume p,
-- Show that p is happy
/-
Now you're stuck. Or are you?
Constructively yes, classically no!
Assume a person can only be happy or
not happy!
-/
cases (classical.em (Happy p)) with p_happy p_unhappy,
-- Case #1: P is happy. By assumption.
assumption,
-- Case #2: P is unhappy.
/-
The trick here is to see that you have a proof
that p is unhappy and you also have a proof that
there's no one who's unhappy. There has to be a
contradiction there! All you need to do now is
to use the fact that p is unhappy to prove that
someone is unhappy. That make the contradiction
explicit.
-/
let someone_unhappy := (exists.intro p p_unhappy),
/-
Sometimes you get a funny looking definition
at this point, but the key observation (please
look carefully!) is that you have a proof that
someone is unhappy and you have a proof that no
one is unhappy. That's a contradiction. QED.
-/
contradiction,
end
/- #3D
Formall state and prove the proposition
that if there doesn't exist an object of
some type T with some property, P, then
any object of that type has the property
¬P. Hint: Again we represent a "property"
of objects of a certain type as a predicate
taking objects of that type.
-/
example
(T : Type)
(P : T → Prop) :
(¬∃ (x), P x) →
∀ (x), ¬(P x) :=
begin
-- assume there;s no x with property p
intros h,
-- let x be an arbitrary value
assume x,
-- by negation: assume P x, show contradiction
assume px,
show false,
-- from x and Px show ∃ x, P x
let ex_x_Px := exists.intro x px,
-- that's a direct contradiction
-- therefore ∀ x, ¬P x
contradiction,
end
/- #3D
Formally state and prove that if there's
an object with the property of "having
property P or property Q" then there's
an object with property P or there's an
object with property with property Q.
Remaining exercise: fill in English
comments for each step in the formal
proof then string them together with
appropriate language to render a great
natural language proof.
-/
example
(α : Type)
(P : α → Prop)
(Q : α → Prop):
(∃ (x), ((P x) ∨ (Q x))) →
(∃ (x), P x) ∨ (∃ (x), Q x) :=
begin
-- You: Comment script; extract natural language proof.
assume h,
cases h with w pworqw,
cases pworqw with pw qw,
left,
exact (exists.intro w pw),
right,
exact (exists.intro w qw),
end
|
From Mtac2 Require Import Base.
Import M.notations.
Definition timer : Prop. exact True. Qed.
Mtac Do (M.new_timer timer).
Definition unused_timer : Prop. exact True. Qed.
Mtac Do (M.new_timer unused_timer).
Definition slow := (mfix1 f (n : nat) : M unit :=
match n with
| S n => M.unify 1 1 UniCoq;; f n
| O => M.ret tt
end) 1000.
Mtac Do (
M.start_timer timer true;;
slow;;
M.stop_timer timer;;
M.print_timer timer
).
Mtac Do (M.print_timer timer).
Mtac Do (
M.start_timer timer false;;
slow;;
M.stop_timer timer;;
M.print_timer timer
).
Mtac Do (M.print_timer unused_timer). (* Should print 0.0 *) |
import Mathlib.Algebra.Group.Basic
import Mathlib.Tactic.Linarith
import Mathlib.GroupTheory.Subgroup.Basic
import Mathlib.GroupTheory.QuotientGroup
import Util.Arithmetic
import Util.Meta.Tactics
/-! Defines quasi-morphisms from an abelian group to ℤ and algebraic operations on them.
Reference(s):
1. http://web.science.mq.edu.au/~street/EffR.pdf
-/
/- Use absolute value notation defined in `Util.Arithemtic`. -/
open scoped Int.natAbs
/-! # Definition of `AlmostAdditive` and `AlmostHom` -/
variable {G : Type _}
section TypeDef
variable [Add G]
/-- A function from an AddCommGroup to ℤ is 'almost additive' if it
respects addition as a group homomorphism would within an error which
is bounded independently of the arguments. -/
def AlmostAdditive (f : G → ℤ) (bound : ℕ) :=
∀ g₁ g₂ : G, |f (g₁ + g₂) - f g₁ - f g₂| ≤ bound
/- Remark: we have used an `∃ ...` field rather than flattening out
with an additional `bound` field so that the same function with a
different bound is the same `AlmostHom`. This is necessary for
`AlmostHom` to be a lawful algebraic structure at all, since most of
the laws only hold for the functions, not for the bounds. -/
variable (G) in
/-- An `AlmostHom G` is a function from `G` to ℤ which is almost additive
(see `AlmostAdditive`). -/
structure AlmostHom where
toFun : G → ℤ
almostAdditive : ∃ bound : ℕ, AlmostAdditive toFun bound
instance : CoeFun (AlmostHom G) fun _ => G → ℤ where
coe := AlmostHom.toFun
/-- An `AlmostHom` is determined by its underlying function. -/
@[ext]
theorem AlmostHom.ext
: {f₁ f₂ : AlmostHom G} → f₁.toFun = f₂.toFun → f₁ = f₂
| ⟨_f, _⟩, ⟨.(_f), _⟩, rfl => rfl
end TypeDef
/-! # Properties and structure of `AlmostAdditive`/`AlmostHom` -/
variable [AddCommGroup G]
/- Because we can no longer directly access the bound associated with
a quasi-morphism, we first prove lemmas assuming an AlmostAdditive
hypothesis. Then we bundle them up into lemmas taking a AlmostHom and
showing existential statements. -/
/- Perhaps we should automate this more, similar to `to_additive`. -/
/-- This is equivalent to binding `⟨bound, h⟩` to `f.almostAdditive`,
then returning the bound specified with the `using` clause (inferred
as `_` if not specified) with the proof being the given field of `h`
applied to the specified number of arguments (or to `..` if not
specified). -/
local syntax (name := __localWrapper)
"local_wrapper " ident (num)? (" using " term)? : term
macro_rules (kind := __localWrapper)
| `(local_wrapper $field:ident $[$args:num]? $[using $bound:term]?) => do
let secondTerm : Lean.Syntax.Term ← match args with
| some numArgs => `(h.$field $(.mkArray numArgs.getNat (←`(_)))*)
/- If the number of args to apply the field of `h` to is not
specified, use `..`. -/
| none => `(h.$field ..)
/- Unhygienic visible `bound` (if bound argument is provided) and captured `f`. -/
`(let ⟨$(if bound.isSome then
-- set_option hygiene false in ←`(bound)
Lean.mkIdent `bound
else
←`(bound)) , h⟩ := $(Lean.mkIdent `f).almostAdditive
/- `bound` is by default `_`, i.e., to be filled by unification. -/
⟨$(bound.getD <|← `(_)), $secondTerm⟩)
section AlmostProperties
namespace AlmostAdditive
variable ⦃f : G → ℤ⦄ ⦃bound : ℕ⦄ (h : AlmostAdditive f bound)
variable (m n : ℤ) (g : G)
lemma almost_additive : ∀ g₁ g₂ : G, |f (g₁ + g₂) - f g₁ - f g₂| ≤ bound := h
/-- An almost additive function `f` maps 0 to 0, up to an error at most the bound. -/
lemma almost_zero : |f 0| ≤ bound := -- by simpa using h.almost_additive 0 0
calc |f 0| = |f (0+0) - f 0 - f 0| := by rewrite [←Int.natAbs_neg]; congr 1
rewrite [add_zero]; linarith
_ ≤ bound := h.almost_additive 0 0
/-- An almost additive function `f` respects negation, up to an error at most
twice the bound. -/
lemma almost_neg : |f (-g) - -f g| ≤ bound * 2 :=
calc |f (-g) - (- (f g))|
≤ |f (-g) + f g - f 0| + |f 0|
:= by lax_exact Int.natAbs_add_le (f (-g) + f g - f 0) (f 0); linarith
_ = |f (-g + g) - f (-g) - f g| + |f 0|
:= by congr 1; rewrite [←Int.natAbs_neg, ←add_left_neg g]
congr; linarith
_ ≤ bound * 2
:= by linarith [h.almost_additive (-g) g, h.almost_zero]
/- First inequality proven in reference 1. -/
/-- An almost additive function `f` respects scaling by ℤ, up to an error
proportional to the scaling factor. -/
lemma almost_smul : |f (m • g) - m * f g| ≤ bound * (|m| + 1) := by
cases m <;> (rename_i m; induction m)
case ofNat.zero => simp; exact h.almost_zero
case ofNat.succ m hᵢ =>
rewrite [Int.ofNat_eq_coe, ofNat_zsmul] at hᵢ ⊢
/- Rewriting these somewhat deep subterms with 'calc' would
involve verbosely repeating the surroundings. -/
rewrite [show m.succ • g = g + m • g from AddMonoid.nsmul_succ ..,
show ↑(m.succ) * f g = f g + m * f g
by rewrite [Nat.succ_eq_add_one, Nat.cast_succ]; linarith]
calc |f (g + m • g) - (f g + m * f g)|
= |(f (g + m • g) - f g - f (m • g)) + (f (m • g) - m * f g)|
:= congrArg Int.natAbs <| by linarith
_ ≤ |f (g + m • g) - f g - f (m • g)| + |f (m • g) - m * f g|
:= Int.natAbs_add_le ..
_ ≤ bound + bound * (m + 1)
:= Nat.add_le_add (h.almost_additive ..) hᵢ
_ = bound * (m.succ + 1)
:= by linarith
case negSucc.zero =>
rewrite [show Int.negSucc Nat.zero = -1 by rfl]; simpa using h.almost_neg g
case negSucc.succ m hᵢ =>
conv => lhs; rewrite [←Int.negSucc_sub_one]
rewrite [sub_zsmul, one_zsmul, sub_mul, one_mul]
calc |f (Int.negSucc m • g + -g) - (Int.negSucc m * f g - f g)|
= |(-(f (Int.negSucc m • g) - f (Int.negSucc m • g + -g) - f g))
+ (f (Int.negSucc m • g) - Int.negSucc m * f g)|
:= congrArg Int.natAbs <| by linarith
_ ≤ |f (Int.negSucc m • g) - f (Int.negSucc m • g + -g) - f g|
+ |f (Int.negSucc m • g) - Int.negSucc m * f g|
:= by conv => rhs; arg 1; rewrite [←Int.natAbs_neg]
apply Int.natAbs_add_le
_ ≤ bound + bound * (|Int.negSucc m| + 1)
:= Nat.add_le_add (by -- change `f (-[m+1])` to `f (-[m+1] + -g + g)`
rewrite [←congrArg f <| neg_add_cancel_right ..]
apply h.almost_additive _ g)
hᵢ
_ = bound * (|Int.negSucc m.succ| + 1)
:= by simp only [Int.natAbs_negSucc]; linarith
/- Second inequality proven in reference 1, generalised to arbitrary abelian groups. -/
/-- A kind of commutativity of scaling by ℤ for almost additive functions, with
one scale factor before and another after applying the function. -/
-- private pending better choice of name
private lemma almost_smul_comm
: |n * f (m • g) - m * f (n • g)| ≤ bound * (|m| + |n| + 2) :=
calc |n * f (m • g) - m * f (n • g)|
≤ |f ((m * n) • g) - n * f (m • g)| + |f ((m * n) • g) - m * f (n • g)|
:= Int.triangle_ineq' ..
_ = |f (n • m • g) - n * f (m • g)| + |f (m • n • g) - m * f (n • g)|
/- TODO find a better syntax for this - same-line ·'s -/
:= by congr 3; (· rw [mul_zsmul']); (· rw [mul_zsmul])
_ ≤ bound * (|n| + 1) + bound * (|m| + 1)
/- In this case, writing `Nat.add_le_add` is easier than
specifying the almost_smul arguments for `linarith`. -/
:= by apply Nat.add_le_add <;> apply h.almost_smul
_ = bound * (|m| + |n| + 2) := by linarith
/-- `almost_smul_comm` specialised to almost additive functions on ℤ applied to 1.
This is equation (1) in the first reference. -/
-- private pending better choice of name above
private lemma almost_smul_comm_int
⦃f : ℤ → ℤ⦄ ⦃bound : ℕ⦄ (h : AlmostAdditive f bound) (m n : ℤ)
: |n * f m - m * f n| ≤ bound * (|m| + |n| + 2) := by
lax_exact h.almost_smul_comm m n 1 <;> rw [zsmul_int_one]
/- The following lemmas are useful in bounding compositions of quasi-morphisms. -/
/-- An almost additive function grows at most linearly (as a function of a scale
factor applied to its argument). -/
lemma linear_growth_upper_bound
: |f (n • g)| ≤ (bound + |f g|) * |n| + bound :=
calc |f (n • g)|
≤ |f (n • g) - n * f g| + |n * f g|
:= by lax_exact Int.natAbs_add_le (f (n • g) - n * f g) (n * f g); linarith
_ ≤ (bound + |f g|) * |n| + bound
:= by linarith [h.almost_smul n g, Int.natAbs_mul n (f g)]
/-- Lemma `linear_growth_upper_bound` specialised to functions on ℤ applied to 1. -/
lemma linear_growth_upper_bound_int
⦃f : ℤ → ℤ⦄ ⦃bound : ℕ⦄ (h : AlmostAdditive f bound) (n : ℤ)
: |f n| ≤ (bound + |f 1|) * |n| + bound := by
lax_exact h.linear_growth_upper_bound n 1; rw [zsmul_int_one]
/-- An almost additive function grows at least linearly (as a function of a scale
factor applied to its argument). -/
lemma linear_growth_lower_bound
: (|f g| - bound) * |n| - bound ≤ |f (n • g)| := by
rewrite [tsub_mul, Nat.sub_sub, ←Nat.mul_succ]
apply Nat.sub_le_of_le_add; rewrite [Nat.add_comm]
calc |f g| * |n|
= |n * f g| := by rw [Nat.mul_comm, Int.natAbs_mul]
_ ≤ |n * f g - f (n • g)| + |f (n • g)|
:= by lax_exact Int.natAbs_add_le (n * f g - f (n • g)) (f (n • g)); linarith
_ = |f (n • g) - n * f g| + |f (n • g)|
:= by congr 1; rewrite [←Int.natAbs_neg]
congr 1; linarith
_ ≤ bound * (|n| + 1) + |f (n • g)| := by linarith [h.almost_smul n g]
/-- Lemma `linear_growth_lower_bound` specialised to functions on ℤ applied to 1. -/
lemma linear_growth_lower_bound_int
⦃f : ℤ → ℤ⦄ ⦃bound : ℕ⦄ (h : AlmostAdditive f bound) (n : ℤ)
: (|f 1| - bound) * |n| - bound ≤ |f n| := by
lax_exact h.linear_growth_lower_bound n 1; rw [zsmul_int_one]
end AlmostAdditive
namespace AlmostHom
variable (f : AlmostHom G) (g : G) (m n : ℤ)
/- `bdd <expr>` says there is some `bound : ℕ` which |<expr>| is bounded by.
(Admittedly, this is tautological.)
`bdd <expr> for all (<bindings>)` expresses a uniform bound. -/
-- Why is there no way to say "exactly what ∀ accepts"?
local syntax (name := __existsBound) "bdd " term ("for all " bracketedBinder)? : term
macro_rules (kind := __existsBound)
| `(bdd $expr:term for all $binders:bracketedBinder) =>
`(∃ bound : ℕ, ∀ $binders, |$expr| ≤ bound)
| `(bdd $expr:term) => `(∃ bound : ℕ, |$expr| ≤ bound)
/-- An almost-homomorphism respects addition up to an error with a uniform bound. -/
lemma almost_additive : bdd f (g₁ + g₂) - f g₁ - f g₂ for all (g₁ g₂ : G) :=
local_wrapper almost_additive 0
/- Not useful, since we end up not saying anything useful about what the bound is.
lemma almost_zero : bdd f 0 :=
local_wrapper almost_zero 0
-/
/-- An almost-homomorphism respects negation up to an error with a uniform bound. -/
lemma almost_neg : bdd f (-g) - -f g for all (g : G) :=
local_wrapper almost_neg 0
/-- An almost-homomorphism respects scaling by ℤ up to an error with a bound
uniform with respect to the `G` argument. -/
lemma almost_smul : bdd f (m • g) - m * f g for all (g : G) :=
local_wrapper almost_smul 1
/-- A kind of commutativity of scaling by ℤ for almost-homomorphisms, with one
scale factor before and another after applying the function. -/
-- private pending better name
private lemma almost_smul_comm
: bdd n * f (m • g) - m * f (n • g) for all (g : G) :=
local_wrapper almost_smul_comm 2
/- Not useful, since we end up not saying anything useful about what the bound is.
private lemma almost_smul_comm_int (f : AlmostHom ℤ) (m n : ℤ)
: bdd n * f m - m * f n :=
local_wrapper almost_smul_comm_int
-/
/-- An almost-homomorphism grows at most linearly (as a function of a scale factor
applied to its argument). -/
lemma linear_growth_upper_bound
: ∃ a b : ℕ, ∀ n : ℤ, |f (n • g)| ≤ a * |n| + b :=
let ⟨_, h⟩ := f.almostAdditive
⟨_, _, h.linear_growth_upper_bound (g := g)⟩
/-- Lemma `linear_growth_upper_bound` specialised to domain ℤ applied to 1. -/
lemma linear_growth_upper_bound_int (f : AlmostHom ℤ)
: ∃ a b : ℕ, ∀ n : ℤ, |f n| ≤ a * |n| + b :=
let ⟨_, h⟩ := f.almostAdditive
⟨_, _, h.linear_growth_upper_bound_int⟩
/-- An almost-homomorphism grows at least linearly (as a function of a scale factor
applied to its argument). -/
lemma linear_growth_lower_bound
: ∃ a b : ℕ, ∀ n : ℤ, a * |n| - b ≤ |f (n • g)| :=
let ⟨_, h⟩ := f.almostAdditive
⟨_, _, h.linear_growth_lower_bound (g := g)⟩
/-- Lemma `linear_growth_lower_bound` specialised to domain ℤ applied to 1. -/
lemma linear_growth_lower_bound_int (f : AlmostHom ℤ)
: ∃ a b : ℕ, ∀ n : ℤ, a * |n| - b ≤ |f n| :=
let ⟨_, h⟩ := f.almostAdditive
⟨_, _, h.linear_growth_lower_bound_int⟩
end AlmostHom
end AlmostProperties
section AlgebraicStructure
namespace AlmostAdditive
variable ⦃f : G → ℤ⦄ ⦃bound : ℕ⦄ (h : AlmostAdditive f bound)
⦃f₁ : G → ℤ⦄ ⦃bound₁ : ℕ⦄ (h₁ : AlmostAdditive f₁ bound₁)
⦃f₂ : G → ℤ⦄ ⦃bound₂ : ℕ⦄ (h₂ : AlmostAdditive f₂ bound₂)
/-- The (pointwise) sum of almost additive functions is almost additive, with
bound the sum of their bounds. -/
protected theorem add : AlmostAdditive (f₁ + f₂) (bound₁ + bound₂) := fun x y =>
calc |f₁ (x + y) + f₂ (x + y) - (f₁ x + f₂ x) - (f₁ y + f₂ y)|
= |(f₁ (x + y) - f₁ x - f₁ y) + (f₂ (x + y) - f₂ x - f₂ y)|
:= congrArg Int.natAbs (by linarith)
_ ≤ bound₁ + bound₂ := by transitivity
· apply Int.natAbs_add_le
· linarith [h₁ x y, h₂ x y]
/-- The (pointwise) negation of an almost additive function is almost additive
with the same bound. -/
protected theorem neg : AlmostAdditive (-f) bound := fun x y =>
calc |(-f (x + y)) - (-f x) - (-f y)|
= |(-(-f (x + y) - (-f x) - (-f y)))| := by rw [Int.natAbs_neg]
_ = |f (x + y) - f x - f y| := congrArg Int.natAbs (by linarith)
_ ≤ bound := h ..
end AlmostAdditive
namespace AlmostHom
variable (f f₁ f₂ : AlmostHom G)
/- Haven't written local_wrapper to be able to destructure multiple
`AlmostAdditive` hypotheses yet. -/
/-- The sum of two almost-homomorphisms. It is simply the pointwise sum. -/
protected def add : AlmostHom G where
toFun := f₁ + f₂
almostAdditive :=
let ⟨_, h₁⟩ := f₁.almostAdditive
let ⟨_, h₂⟩ := f₂.almostAdditive
-- bound is filled in based on the proof :)
⟨_, AlmostAdditive.add h₁ h₂⟩
/-- Negation of an almost-homomorphism. It is simply the pointwise negation. -/
protected def neg : AlmostHom G where
toFun := -f
almostAdditive := local_wrapper neg 0
/-- Additive abelian group structure on `AlmostHom G` using pointwise operations. -/
instance : AddCommGroup (AlmostHom G) where
add := AlmostHom.add
add_comm := by intros; ext; apply Int.add_comm
add_assoc := by intros; ext; apply Int.add_assoc
zero := ⟨0, 0, fun _ _ => Nat.le_refl ..⟩
zero_add := by intros; ext; apply Int.zero_add
add_zero f := by intros; ext; apply Int.add_zero
neg := AlmostHom.neg
add_left_neg := by intros; ext; apply Int.add_left_neg
end AlmostHom
end AlgebraicStructure
section Quotient
/-- `Bounded f` states that `f` is bounded over all arguments. -/
def Bounded (f : G → ℤ) (bound : ℕ) := ∀ g : G, |f g| ≤ bound
/- We don't really need this, but we might as well prove it. -/
variable {f : G → ℤ} {bound : ℕ} in
/-- A bounded function G → ℤ is almost additive. -/
lemma Bounded.almost_additive (h : Bounded f bound)
: AlmostAdditive f (bound * 3) := fun g₁ g₂ =>
calc |f (g₁ + g₂) - f g₁ - f g₂|
≤ |f (g₁ + g₂)| + |(-f g₁)| + |(-f g₂)| := Int.natAbs_add_le₃ ..
_ ≤ bound * 3 := by linarith [(f g₁).natAbs_neg, (f g₂).natAbs_neg,
h (g₁ + g₂), h g₁, h g₂]
variable (G) in
/-- The subgroup of `AlmostHom G` consisting of bounded quasi-morphisms. -/
def boundedAlmostHoms : AddSubgroup (AlmostHom G) where
carrier := {f | ∃ bound : ℕ, Bounded f bound}
add_mem' {f₁ f₂} := fun ⟨bound₁, h₁⟩ ⟨bound₂, h₂⟩ => .intro _ fun g =>
calc |f₁ g + f₂ g| ≤ |f₁ g| + |f₂ g| := Int.natAbs_add_le ..
_ ≤ bound₁ + bound₂ := Nat.add_le_add (h₁ g) (h₂ g)
zero_mem' := ⟨0, fun _ => show |(0:ℤ)| ≤ 0 from Nat.le_refl 0⟩
neg_mem' {f} := fun ⟨bound, h⟩ => .intro _ fun g =>
calc |(-f g)| = |f g| := Int.natAbs_neg ..
_ ≤ bound := h g
variable (G) in
/-- Quasi-homomorphisms from an `AddCommGroup` `G` to ℤ.
This is the quotient of the `AlmostHom`s by the additive subgroup of bounded
functions. -/
abbrev QuasiHom := AlmostHom G ⧸ boundedAlmostHoms G
/-- The Eudoxus construction of the real numbers as quasi-homomorphisms from ℤ to ℤ. -/
abbrev EudoxusReal := QuasiHom ℤ
/- Typeclass inference won't unfold the definition of `QuasiHom`
automatically, so the instance must be defined manually. -/
end Quotient
|
Formal statement is: lemma (in linorder_topology) compact_attains_sup: assumes "compact S" "S \<noteq> {}" shows "\<exists>s\<in>S. \<forall>t\<in>S. t \<le> s" Informal statement is: If $S$ is a compact subset of a linearly ordered topological space, then $S$ has a maximum element. |
State Before: C : Type u
inst✝² : Category C
P Q R : C
f : P ⟶ Q
g : Q ⟶ R
inst✝¹ : StrongEpi f
inst✝ : StrongEpi g
⊢ ∀ ⦃X Y : C⦄ (z : X ⟶ Y) [inst : Mono z], HasLiftingProperty (f ≫ g) z State After: C : Type u
inst✝³ : Category C
P Q R : C
f : P ⟶ Q
g : Q ⟶ R
inst✝² : StrongEpi f
inst✝¹ : StrongEpi g
X✝ Y✝ : C
z✝ : X✝ ⟶ Y✝
inst✝ : Mono z✝
⊢ HasLiftingProperty (f ≫ g) z✝ Tactic: intros State Before: C : Type u
inst✝³ : Category C
P Q R : C
f : P ⟶ Q
g : Q ⟶ R
inst✝² : StrongEpi f
inst✝¹ : StrongEpi g
X✝ Y✝ : C
z✝ : X✝ ⟶ Y✝
inst✝ : Mono z✝
⊢ HasLiftingProperty (f ≫ g) z✝ State After: no goals Tactic: infer_instance |
Sarnia 's grain elevator , which is the sixth largest currently operating in Canada , was built after the dredging of Sarnia Harbour in 1927 . Two short years later , grain shipments had become an important part of Sarnia 's economy . The grain elevator rises above the harbour , and next to it is the slip for the numerous bulk carriers and other ships that are part of the shipping industry that includes vessels from all over the world . The waterway between Detroit and Sarnia is one of the world 's busiest , as indicated by the average of 78 @,@ 943 @,@ 900 tonnes ( 87 @,@ 020 @,@ 800 short tons ; 77 @,@ 697 @,@ 100 long tons ) of shipping that annually travelled the river going in both directions during the period 1993 – 2002 . Lake freighters and oceangoing ships , which are known as " <unk> , " pass up and down the river at the rate of about one every seven minutes during the shipping season . During this same period , The Paul M. Tellier Tunnel , which was named after the retired president of CN in 2004 , was bored and began operation in 1995 . It accommodates double @-@ stacked rail cars and is located next to the original tunnel , which has been sealed .
|
import GMLInit.Data.Index.Basic
import GMLInit.Data.Index.Bind
import GMLInit.Data.Index.Map
protected abbrev List.prod {α β} (xs : List α) (ys : List β) : List (α × β) := xs.bind fun x => ys.map (Prod.mk x)
namespace Index
variable {α β} {xs : List α} {ys : List β}
def prod : Index xs × Index ys → Index (List.prod xs ys)
| (i,j) => Index.bind (λ x => ys.map (Prod.mk x)) ⟨i, j.map (Prod.mk i.val)⟩
def unprod (k : Index (List.prod xs ys)) : Index xs × Index ys :=
match unbind (λ x => ys.map (Prod.mk x)) k with
| ⟨i,j⟩ => (i, j.unmap (Prod.mk i.val))
theorem unprod_prod (i : Index xs × Index ys) : unprod (prod i) = i := by
simp only [prod, unprod]
rw [unbind_bind, unmap_map]
theorem prod_unprod (k : Index (List.prod xs ys)) : prod (unprod k) = k := by
simp only [prod, unprod]
rw [map_unmap, bind_unbind]
theorem prod_eq_iff_eq_unprod (i : Index xs × Index ys) (k : Index (List.prod xs ys)) : prod i = k ↔ i = unprod k := by
constr
· intro h; rw [←h, unprod_prod]
· intro h; rw [h, prod_unprod]
theorem unprod_eq_iff_eq_prod (i : Index (List.prod xs ys)) (j : Index xs × Index ys) : unprod i = j ↔ i = prod j := by
constr
· intro h; rw [←h, prod_unprod]
· intro h; rw [h, unprod_prod]
def prodEquiv (xs ys : List α) : Equiv (Index xs × Index ys) (Index (List.prod xs ys)) where
fwd := prod
rev := unprod
spec := by
intros
constr
· intro | rfl => exact unprod_prod ..
· intro | rfl => exact prod_unprod ..
theorem val_prod (i : Index xs × Index ys) : (prod i).val = (i.fst.val, i.snd.val) := by
rw [prod, val_bind, val_map]
theorem val_unprod (i : Index (List.prod xs ys)) : ((unprod i).fst.val, (unprod i).snd.val) = i.val := by
rw [←prod_unprod i, val_prod, unprod_prod]
end Index
|
{-# LANGUAGE FlexibleContexts, NamedFieldPuns #-}
module School.App.CSVReader
( csvToBinary
, csvToMatrixDouble
, parseDoubles
, readCSV
) where
import Conduit ((.|), ConduitM, mapC, mapMC, sourceFileBS)
import qualified Data.ByteString as BS
import Data.ByteString.Conversion (fromByteString)
import Data.Void (Void)
import qualified Data.Conduit.Binary as CB
import School.FileIO.AppIO (AppIO, maybeToAppIO)
import School.FileIO.FileType (FileType(..))
import School.FileIO.FileHeader (FileHeader(..))
import School.FileIO.MatrixSink (matrixDoubleSink)
import School.Utils.Constants (binComma)
import Numeric.LinearAlgebra ((><), Matrix)
parseDoubles :: [BS.ByteString] -> Maybe [Double]
parseDoubles = mapM fromByteString
readCSV :: FilePath -> ConduitM ()
[BS.ByteString]
AppIO
()
readCSV path = sourceFileBS path
.| CB.lines
.| mapC (BS.split binComma)
csvToMatrixDouble :: FileHeader
-> ConduitM [BS.ByteString]
(Matrix Double)
AppIO
()
csvToMatrixDouble FileHeader { cols } =
mapC parseDoubles
.| mapMC (maybeToAppIO "Could not parse doubles")
.| mapC (1 >< cols)
csvToBinary :: FilePath
-> FilePath
-> FileHeader
-> ConduitM () Void AppIO ()
csvToBinary inPath outPath header =
readCSV inPath
.| csvToMatrixDouble header
.| matrixDoubleSink SM header outPath
|
Red Velvet Poke Cake - red velvet cake soaked in sweetened condensed milk and chocolate and topped with a quick homemade cream cheese frosting. SO good! Great for potlucks and the holidays.
I think Red Velvet Cake is a requirement for the holiday season. This Red Velvet Poke Cake is definitely required for any holiday celebration this season! OMG! This cake is out of this world delicious!
Poke Cakes are one of my favorite desserts because I can make them a few days ahead of time and they get better the longer they sit in the fridge. Letting it sit in the fridge overnight gives the cake time to soak up all that yummy chocolate and sweetened condensed milk. It is hard to wait overnight to eat the cake, but it really is better if you can muster up the will power to wait.
The star of this recipe might be the quick homemade Cream Cheese Frosting! OMG! It really puts this poke cake over the top!! This Red Velvet Poke Cake is going to be a staple at all of our holiday parties. It is great for potlucks or even a homemade gift. Bake the cake in a foil pan and pop a bow on top.
While cake is baking, mix milk and chocolate fudge topping until well blended. When the cake is done and while it's still hot, poke holes in it with a fork or straw. Pour milk mixture over the cake. Allow cake to cool completely.
Spread cream cheese frosting over the top. Chill.
I made this cake today to take to a friend who had a death in her family. It was so delicious that I am making it for a party for tomorrow night. Very rich and the icing is soooooooooooo good!
SO glad it was a success!!! That icing is amazing! I could just eat it straight off a spoon.
So true confession...I also ate the icing straight off the spoon!!!!!!!!!
I have now made this cake 3 times since you posted the recipe! Thanks for another keeper!
That is awesome!! SO glad you are enjoying it!!
Thank you Stephanie for your quick reply. Looking forward to making and eating !!
I made the cake following your recipe and it is wonderful! One more question please. Do you thank this would work in a Bundt pan. I would appreciate your thoughts. Thank you! |
lemma interior_ball [simp]: "interior (ball x e) = ball x e" |
module mod_usmv
! **********************************************************************
! Author : C. Voemel
! Date of last modification : 7.7.00
! Description : MV MULTIPLICATION, CHOOSES APPROPRIATE SUBROUTINES
! **********************************************************************
use representation_of_data
use properties
use mod_mbv
implicit none
interface usmv
module procedure iusmv
module procedure susmv
module procedure dusmv
module procedure cusmv
module procedure zusmv
end interface
contains
! **********************************************************************
! **********************************************************************
subroutine iusmv (a,x,y,ierr,transa,alpha)
implicit none
integer, intent(in) :: a
integer , dimension(:), intent(in) :: x
integer , dimension(:), intent(inout) :: y
integer, intent(out) :: ierr
integer, intent(in), optional :: transa
integer , intent(in), optional :: alpha
integer , dimension(:), allocatable :: z
type(ispmat ), pointer :: dspmtx
integer :: transa_work
integer :: alpha_work
ierr = -1
if (present(transa)) then
transa_work = transa
else
transa_work = ORIGIN_MATRIX
end if
if (present(alpha)) then
alpha_work = alpha
else
alpha_work = 1.
end if
if (alpha_work.eq. 0 ) then
!no matrix multiplication necessary
else
call accessdata_isp (dspmtx,a,ierr)
if (ierr.ne.0) then
ierr = blas_error_param
return
end if
allocate(z(size(y)),STAT=ierr)
if (ierr.ne.0) then
ierr = blas_error_memalloc
return
end if
select case(transa_work)
case(ORIGIN_MATRIX)
select case(dspmtx%FIDA)
case('COO')
call rmbv_coo(dspmtx,x,z,ierr)
case('CSC')
call rmbv_csc(dspmtx,x,z,ierr)
case('CSR')
call rmbv_csr(dspmtx,x,z,ierr)
case('DIA')
call rmbv_dia(dspmtx,x,z,ierr)
case('BCO')
call rmbv_bco(dspmtx,x,z,ierr)
case('BSC')
call rmbv_bsc(dspmtx,x,z,ierr)
case('BSR')
call rmbv_bsr(dspmtx,x,z,ierr)
case('BDI')
call rmbv_bdi(dspmtx,x,z,ierr)
case('VBR')
call rmbv_vbr(dspmtx,x,z,ierr)
case default
ierr = blas_error_param
end select
case(TRANSP_MATRIX)
select case(dspmtx%FIDA)
case('COO')
call lmbv_coo(dspmtx, (x),z,ierr)
case('CSC')
call lmbv_csc(dspmtx, (x),z,ierr)
case('CSR')
call lmbv_csr(dspmtx, (x),z,ierr)
case('DIA')
call lmbv_dia(dspmtx, (x),z,ierr)
case('BCO')
call lmbv_bco(dspmtx, (x),z,ierr)
case('BSC')
call lmbv_bsc(dspmtx, (x),z,ierr)
case('BSR')
call lmbv_bsr(dspmtx, (x),z,ierr)
case('BDI')
call lmbv_bdi(dspmtx, (x),z,ierr)
case('VBR')
call lmbv_vbr(dspmtx, (x),z,ierr)
case default
ierr = blas_error_param
end select
case default
ierr = blas_error_param
end select
if (ierr.ne.0) then
deallocate(z,STAT=ierr)
return
end if
if(transa_work.eq.ORIGIN_MATRIX) then
y = alpha_work * z + y
else
y = alpha_work * ( (z)) + y
end if
deallocate(z,STAT=ierr)
end if
ierr = 0
end subroutine iusmv
! **********************************************************************
! **********************************************************************
subroutine susmv (a,x,y,ierr,transa,alpha)
implicit none
integer, intent(in) :: a
real(KIND=sp) , dimension(:), intent(in) :: x
real(KIND=sp) , dimension(:), intent(inout) :: y
integer, intent(out) :: ierr
integer, intent(in), optional :: transa
real(KIND=sp) , intent(in), optional :: alpha
real(KIND=sp) , dimension(:), allocatable :: z
type(sspmat ), pointer :: dspmtx
integer :: transa_work
real(KIND=sp) :: alpha_work
ierr = -1
if (present(transa)) then
transa_work = transa
else
transa_work = ORIGIN_MATRIX
end if
if (present(alpha)) then
alpha_work = alpha
else
alpha_work = 1.
end if
if (alpha_work.eq. 0.0e0 ) then
!no matrix multiplication necessary
else
call accessdata_ssp (dspmtx,a,ierr)
if (ierr.ne.0) then
ierr = blas_error_param
return
end if
allocate(z(size(y)),STAT=ierr)
if (ierr.ne.0) then
ierr = blas_error_memalloc
return
end if
select case(transa_work)
case(ORIGIN_MATRIX)
select case(dspmtx%FIDA)
case('COO')
call rmbv_coo(dspmtx,x,z,ierr)
case('CSC')
call rmbv_csc(dspmtx,x,z,ierr)
case('CSR')
call rmbv_csr(dspmtx,x,z,ierr)
case('DIA')
call rmbv_dia(dspmtx,x,z,ierr)
case('BCO')
call rmbv_bco(dspmtx,x,z,ierr)
case('BSC')
call rmbv_bsc(dspmtx,x,z,ierr)
case('BSR')
call rmbv_bsr(dspmtx,x,z,ierr)
case('BDI')
call rmbv_bdi(dspmtx,x,z,ierr)
case('VBR')
call rmbv_vbr(dspmtx,x,z,ierr)
case default
ierr = blas_error_param
end select
case(TRANSP_MATRIX)
select case(dspmtx%FIDA)
case('COO')
call lmbv_coo(dspmtx, (x),z,ierr)
case('CSC')
call lmbv_csc(dspmtx, (x),z,ierr)
case('CSR')
call lmbv_csr(dspmtx, (x),z,ierr)
case('DIA')
call lmbv_dia(dspmtx, (x),z,ierr)
case('BCO')
call lmbv_bco(dspmtx, (x),z,ierr)
case('BSC')
call lmbv_bsc(dspmtx, (x),z,ierr)
case('BSR')
call lmbv_bsr(dspmtx, (x),z,ierr)
case('BDI')
call lmbv_bdi(dspmtx, (x),z,ierr)
case('VBR')
call lmbv_vbr(dspmtx, (x),z,ierr)
case default
ierr = blas_error_param
end select
case default
ierr = blas_error_param
end select
if (ierr.ne.0) then
deallocate(z,STAT=ierr)
return
end if
if(transa_work.eq.ORIGIN_MATRIX) then
y = alpha_work * z + y
else
y = alpha_work * ( (z)) + y
end if
deallocate(z,STAT=ierr)
end if
ierr = 0
end subroutine susmv
! **********************************************************************
! **********************************************************************
subroutine dusmv (a,x,y,ierr,transa,alpha)
implicit none
integer, intent(in) :: a
real(KIND=dp) , dimension(:), intent(in) :: x
real(KIND=dp) , dimension(:), intent(inout) :: y
integer, intent(out) :: ierr
integer, intent(in), optional :: transa
real(KIND=dp) , intent(in), optional :: alpha
real(KIND=dp) , dimension(:), allocatable :: z
type(dspmat ), pointer :: dspmtx
integer :: transa_work
real(KIND=dp) :: alpha_work
ierr = -1
if (present(transa)) then
transa_work = transa
else
transa_work = ORIGIN_MATRIX
end if
if (present(alpha)) then
alpha_work = alpha
else
alpha_work = 1.
end if
if (alpha_work.eq. 0.0d0 ) then
!no matrix multiplication necessary
else
call accessdata_dsp (dspmtx,a,ierr)
if (ierr.ne.0) then
ierr = blas_error_param
return
end if
allocate(z(size(y)),STAT=ierr)
if (ierr.ne.0) then
ierr = blas_error_memalloc
return
end if
select case(transa_work)
case(ORIGIN_MATRIX)
select case(dspmtx%FIDA)
case('COO')
call rmbv_coo(dspmtx,x,z,ierr)
case('CSC')
call rmbv_csc(dspmtx,x,z,ierr)
case('CSR')
call rmbv_csr(dspmtx,x,z,ierr)
case('DIA')
call rmbv_dia(dspmtx,x,z,ierr)
case('BCO')
call rmbv_bco(dspmtx,x,z,ierr)
case('BSC')
call rmbv_bsc(dspmtx,x,z,ierr)
case('BSR')
call rmbv_bsr(dspmtx,x,z,ierr)
case('BDI')
call rmbv_bdi(dspmtx,x,z,ierr)
case('VBR')
call rmbv_vbr(dspmtx,x,z,ierr)
case default
ierr = blas_error_param
end select
case(TRANSP_MATRIX)
select case(dspmtx%FIDA)
case('COO')
call lmbv_coo(dspmtx, (x),z,ierr)
case('CSC')
call lmbv_csc(dspmtx, (x),z,ierr)
case('CSR')
call lmbv_csr(dspmtx, (x),z,ierr)
case('DIA')
call lmbv_dia(dspmtx, (x),z,ierr)
case('BCO')
call lmbv_bco(dspmtx, (x),z,ierr)
case('BSC')
call lmbv_bsc(dspmtx, (x),z,ierr)
case('BSR')
call lmbv_bsr(dspmtx, (x),z,ierr)
case('BDI')
call lmbv_bdi(dspmtx, (x),z,ierr)
case('VBR')
call lmbv_vbr(dspmtx, (x),z,ierr)
case default
ierr = blas_error_param
end select
case default
ierr = blas_error_param
end select
if (ierr.ne.0) then
deallocate(z,STAT=ierr)
return
end if
if(transa_work.eq.ORIGIN_MATRIX) then
y = alpha_work * z + y
else
y = alpha_work * ( (z)) + y
end if
deallocate(z,STAT=ierr)
end if
ierr = 0
end subroutine dusmv
! **********************************************************************
! **********************************************************************
subroutine cusmv (a,x,y,ierr,transa,alpha)
implicit none
integer, intent(in) :: a
complex(KIND=sp) , dimension(:), intent(in) :: x
complex(KIND=sp) , dimension(:), intent(inout) :: y
integer, intent(out) :: ierr
integer, intent(in), optional :: transa
complex(KIND=sp) , intent(in), optional :: alpha
complex(KIND=sp) , dimension(:), allocatable :: z
type(cspmat ), pointer :: dspmtx
integer :: transa_work
complex(KIND=sp) :: alpha_work
ierr = -1
if (present(transa)) then
transa_work = transa
else
transa_work = ORIGIN_MATRIX
end if
if (present(alpha)) then
alpha_work = alpha
else
alpha_work = 1.
end if
if (alpha_work.eq. (0.0e0, 0.0e0) ) then
!no matrix multiplication necessary
else
call accessdata_csp (dspmtx,a,ierr)
if (ierr.ne.0) then
ierr = blas_error_param
return
end if
allocate(z(size(y)),STAT=ierr)
if (ierr.ne.0) then
ierr = blas_error_memalloc
return
end if
select case(transa_work)
case(ORIGIN_MATRIX)
select case(dspmtx%FIDA)
case('COO')
call rmbv_coo(dspmtx,x,z,ierr)
case('CSC')
call rmbv_csc(dspmtx,x,z,ierr)
case('CSR')
call rmbv_csr(dspmtx,x,z,ierr)
case('DIA')
call rmbv_dia(dspmtx,x,z,ierr)
case('BCO')
call rmbv_bco(dspmtx,x,z,ierr)
case('BSC')
call rmbv_bsc(dspmtx,x,z,ierr)
case('BSR')
call rmbv_bsr(dspmtx,x,z,ierr)
case('BDI')
call rmbv_bdi(dspmtx,x,z,ierr)
case('VBR')
call rmbv_vbr(dspmtx,x,z,ierr)
case default
ierr = blas_error_param
end select
case(TRANSP_MATRIX)
select case(dspmtx%FIDA)
case('COO')
call lmbv_coo(dspmtx,conjg (x),z,ierr)
case('CSC')
call lmbv_csc(dspmtx,conjg (x),z,ierr)
case('CSR')
call lmbv_csr(dspmtx,conjg (x),z,ierr)
case('DIA')
call lmbv_dia(dspmtx,conjg (x),z,ierr)
case('BCO')
call lmbv_bco(dspmtx,conjg (x),z,ierr)
case('BSC')
call lmbv_bsc(dspmtx,conjg (x),z,ierr)
case('BSR')
call lmbv_bsr(dspmtx,conjg (x),z,ierr)
case('BDI')
call lmbv_bdi(dspmtx,conjg (x),z,ierr)
case('VBR')
call lmbv_vbr(dspmtx,conjg (x),z,ierr)
case default
ierr = blas_error_param
end select
case default
ierr = blas_error_param
end select
if (ierr.ne.0) then
deallocate(z,STAT=ierr)
return
end if
if(transa_work.eq.ORIGIN_MATRIX) then
y = alpha_work * z + y
else
y = alpha_work * (conjg (z)) + y
end if
deallocate(z,STAT=ierr)
end if
ierr = 0
end subroutine cusmv
! **********************************************************************
! **********************************************************************
subroutine zusmv (a,x,y,ierr,transa,alpha)
implicit none
integer, intent(in) :: a
complex(KIND=dp) , dimension(:), intent(in) :: x
complex(KIND=dp) , dimension(:), intent(inout) :: y
integer, intent(out) :: ierr
integer, intent(in), optional :: transa
complex(KIND=dp) , intent(in), optional :: alpha
complex(KIND=dp) , dimension(:), allocatable :: z
type(zspmat ), pointer :: dspmtx
integer :: transa_work
complex(KIND=dp) :: alpha_work
ierr = -1
if (present(transa)) then
transa_work = transa
else
transa_work = ORIGIN_MATRIX
end if
if (present(alpha)) then
alpha_work = alpha
else
alpha_work = 1.
end if
if (alpha_work.eq. (0.0d0, 0.0d0) ) then
!no matrix multiplication necessary
else
call accessdata_zsp (dspmtx,a,ierr)
if (ierr.ne.0) then
ierr = blas_error_param
return
end if
allocate(z(size(y)),STAT=ierr)
if (ierr.ne.0) then
ierr = blas_error_memalloc
return
end if
select case(transa_work)
case(ORIGIN_MATRIX)
select case(dspmtx%FIDA)
case('COO')
call rmbv_coo(dspmtx,x,z,ierr)
case('CSC')
call rmbv_csc(dspmtx,x,z,ierr)
case('CSR')
call rmbv_csr(dspmtx,x,z,ierr)
case('DIA')
call rmbv_dia(dspmtx,x,z,ierr)
case('BCO')
call rmbv_bco(dspmtx,x,z,ierr)
case('BSC')
call rmbv_bsc(dspmtx,x,z,ierr)
case('BSR')
call rmbv_bsr(dspmtx,x,z,ierr)
case('BDI')
call rmbv_bdi(dspmtx,x,z,ierr)
case('VBR')
call rmbv_vbr(dspmtx,x,z,ierr)
case default
ierr = blas_error_param
end select
case(TRANSP_MATRIX)
select case(dspmtx%FIDA)
case('COO')
call lmbv_coo(dspmtx,conjg (x),z,ierr)
case('CSC')
call lmbv_csc(dspmtx,conjg (x),z,ierr)
case('CSR')
call lmbv_csr(dspmtx,conjg (x),z,ierr)
case('DIA')
call lmbv_dia(dspmtx,conjg (x),z,ierr)
case('BCO')
call lmbv_bco(dspmtx,conjg (x),z,ierr)
case('BSC')
call lmbv_bsc(dspmtx,conjg (x),z,ierr)
case('BSR')
call lmbv_bsr(dspmtx,conjg (x),z,ierr)
case('BDI')
call lmbv_bdi(dspmtx,conjg (x),z,ierr)
case('VBR')
call lmbv_vbr(dspmtx,conjg (x),z,ierr)
case default
ierr = blas_error_param
end select
case default
ierr = blas_error_param
end select
if (ierr.ne.0) then
deallocate(z,STAT=ierr)
return
end if
if(transa_work.eq.ORIGIN_MATRIX) then
y = alpha_work * z + y
else
y = alpha_work * (conjg (z)) + y
end if
deallocate(z,STAT=ierr)
end if
ierr = 0
end subroutine zusmv
! **********************************************************************
! **********************************************************************
end module mod_usmv
|
theory DemoSetup
imports
BruteForceAutomation
"../HeapLang/Notation"
"../IrisCore/Misc"
"../IrisCore/AuthHeap"
begin
context
includes heap_syntax
fixes get_inv :: "gname \<Rightarrow> 'res :: ucamera \<Rightarrow> inv option"
and put_inv
and get_heap :: "gname \<Rightarrow> 'res \<Rightarrow> heap_lang_heap option"
and put_heap
and get_proph :: "gname \<Rightarrow> 'res \<Rightarrow> (proph_id, val\<times>val) proph_mapGS option"
and put_proph
assumes inv_inG[inv_inG_axiom]: "inG get_inv put_inv"
and heap_inG[heap_inG_axiom]: "inG get_heap put_heap"
and prophm_inG[proph_inG_axiom]: "inG get_proph put_proph"
begin
lemmas wp_inG[inG_axioms] = inv_inG heap_inG prophm_inG
definition diaframe_hint :: "'res upred_f \<Rightarrow> ('b \<Rightarrow> 'res upred_f) \<Rightarrow> ('res upred_f \<Rightarrow> 'res upred_f) \<Rightarrow> ('a \<Rightarrow> 'res upred_f) \<Rightarrow> ('b \<Rightarrow> 'a \<Rightarrow> 'res upred_f) \<Rightarrow> bool" where
"diaframe_hint H L M A U \<equiv> \<forall>y::'b. (H \<^emph> L y \<turnstile> M (\<exists>\<^sub>u x::'a. A x \<^emph> U y x))"
lemma hintE: "diaframe_hint H L M A U \<Longrightarrow> (\<And>y::'b. (H \<^emph> L y \<turnstile> M (\<exists>\<^sub>u x::'a. A x \<^emph> U y x)))"
unfolding diaframe_hint_def by simp
lemma inv_hint: "diaframe_hint upred_emp (\<lambda>x::'a. \<triangleright>(L x)) (ViewShift.linear_fupd put_inv E)
(\<lambda>x::'a. inv put_inv N (L x)) (\<lambda>_ x::'a. inv put_inv N (L x))"
apply (auto simp: diaframe_hint_def)
apply (iMod rule: inv_alloc[OF inv_inG, to_entailment])
apply iExistsR2
apply (rule upred_entails_trans[OF upred_entail_eqR[OF persistent_dupl]])
apply pers_solver apply (rule inv_inG)
apply (rule upred_frame)
by simp
lemma biabd_hint_apply_aux:
assumes "diaframe_hint H L (ViewShift.fancy_upd put_inv E3 E2) A U"
shows "H \<^emph> ViewShift.fancy_upd put_inv E1 E3 (\<exists>\<^sub>u y. L y \<^emph> (\<forall>\<^sub>u x. (U y x) -\<^emph> (G x)))
\<turnstile> ViewShift.fancy_upd put_inv E1 E2 (\<exists>\<^sub>u x. G x \<^emph> A x)"
apply (entails_substR rule: fupd_mask_trans[OF inv_inG, of _ E3])
apply (iMod rule: fupd_frame_r[OF inv_inG, where ?R=H])
apply iExistsL
apply (iApply rule: hintE[OF assms])
apply (iMod rule: fupd_frame_r[OF inv_inG, where ?R="(\<forall>\<^sub>ux. U _ x -\<^emph> G x)"])
apply iExistsL subgoal for y x
apply (iExistsR x)
apply (move_sepL "\<forall>\<^sub>u x::'b. ?P x")
apply (rule pull_forall_antecedent')
apply (rule upred_entails_trans[OF upred_forall_inst[of _ x]])
apply iApply_wand
by iFrame_single
done
lemma biabd_hint_apply:
assumes "diaframe_hint H L (ViewShift.fancy_upd put_inv E3 E2) A U"
"\<Delta> \<turnstile> ViewShift.fancy_upd put_inv E1 E3 (\<exists>\<^sub>u y. L y \<^emph> (\<forall>\<^sub>u x. (U y x) -\<^emph> (G x)))"
shows "\<Delta> \<^emph> H \<turnstile> ViewShift.fancy_upd put_inv E1 E2 (\<exists>\<^sub>u x. G x \<^emph> A x)"
proof -
from biabd_hint_apply_aux[OF assms(1)]
have aux: "(ViewShift.fancy_upd put_inv E1 E3(\<exists>\<^sub>u y. L y \<^emph> (\<forall>\<^sub>u x. (U y x) -\<^emph> (G x)))) \<^emph> H
\<turnstile> (ViewShift.fancy_upd put_inv E1 E2(\<exists>\<^sub>ux. G x \<^emph> A x))"
apply (subst (2) upred_sep_comm) by simp
show ?thesis
apply (rule upred_entails_trans[OF upred_sep_mono[OF assms(2) upred_entails_refl[of H]]])
by (rule aux)
qed
lemma biabd_hint_apply':
assumes "diaframe_hint H L (ViewShift.fancy_upd put_inv E3 E2) (\<lambda>_. A) (\<lambda>y _. U y)"
"\<Delta> \<turnstile> ViewShift.fancy_upd put_inv E1 E3 (\<exists>\<^sub>u y. L y \<^emph> ((U y) -\<^emph> G))"
shows "\<Delta> \<^emph> H \<turnstile> ViewShift.fancy_upd put_inv E1 E2 (G \<^emph> A)"
proof -
from assms(2) have "\<Delta> \<turnstile> fancy_upd'' put_inv E1 E3 (\<exists>\<^sub>u y. L y \<^emph> (\<forall>\<^sub>u x. ((\<lambda>y _. U y) y x) -\<^emph> G))"
by (simp add: drop_forall)
from biabd_hint_apply[OF assms(1) this, unfolded drop_exists] show ?thesis .
qed
lemma wp_store_hint:
"diaframe_hint upred_emp (\<lambda>_. ViewShift.fancy_upd put_inv UNIV E (\<exists>\<^sub>u v'. (\<triangleright>(AuthHeap.points_to_full put_heap l v')) \<^emph>
(\<triangleright>((ViewShift.wand_fupd put_inv (AuthHeap.points_to_full put_heap l v) E UNIV (\<Phi> #[()]))))))
(ViewShift.linear_fupd put_inv UNIV) (\<lambda>_. WeakestPrecondition.WP put_inv put_heap put_proph (Store #[l] (Val v)) \<Phi>) (\<lambda>_ _. upred_emp)"
unfolding diaframe_hint_def
apply (simp add: drop_exists emp_rule)
apply (entails_substR rule: fupd_intro[OF inv_inG])
apply (rule elim_modal_entails[OF elim_modal_fupd_wp_atomic[OF wp_inG atomic_store]])
apply iExistsL
apply (move_sepL "\<triangleright>(AuthHeap.points_to_full put_heap ?l ?v)")
apply later_elim
apply (rule wp_store'[OF wp_inG, unfolded to_val_simp])
apply (rule upred_later_mono_extL)
apply (rule upred_entails_trans[OF _ wp_value[OF wp_inG]])
by iApply_wand
lemmas store_hint = biabd_hint_apply'[OF wp_store_hint]
declare upred_entails_trans[OF store_hint[where ?G = upred_emp, unfolded emp_rule to_val_simp] fupd_wp, wp_symbolic_execution_steps]
end
declare upred_later_exists[iris_simp]
declare frame_baseL[frame_rule]
abbreviation "emp \<equiv> upred_emp"
end |
import data.real.basic data.complex.exponential topology.basic data.set.intervals analysis.exponential order.filter.basic
constants (ξ : ℝ)
open real
noncomputable def step_fun : ℝ → ℝ := λ x, if x ≤ ξ then 1 else 0
-- * Example 6: The function r(x) = 1 (when x ≤ ξ) and 0 (when x > ξ) is discontinuous over [0,2], assuming ξ=1.
lemma discont_at_step : ¬ (continuous_at step_fun ξ) := begin
unfold continuous_at,
-- our goal:
-- ⊢ ¬filter.tendsto step_fun (nhds ξ) (nhds (step_fun ξ))
rw metric.tendsto_nhds_nhds,
-- our goal:
-- ⊢ ¬∀ (ε : ℝ),
-- ε > 0 → (∃ (δ : ℝ) (H : δ > 0),
-- ∀ {x : ℝ}, dist x ξ < δ → dist (step_fun x)
-- (step_fun ξ) < ε)
end
|
lemma not_pos_poly_0 [simp]: "\<not> pos_poly 0" |
theory Exercise4
imports Main
begin
fun snoc :: "'a list \<Rightarrow> 'a \<Rightarrow> 'a list" where
"snoc Nil x = Cons x Nil"
| "snoc (Cons x xs) y = Cons x (snoc xs y)"
fun reverse :: "'a list \<Rightarrow> 'a list" where
"reverse Nil = Nil"
| "reverse (Cons x xs) = snoc (reverse xs) x"
lemma reverse_snoc [simp]: "reverse (snoc xs x) = Cons x (reverse xs)"
apply (induction xs)
apply (auto)
done
theorem double_reverse [simp]: "reverse (reverse xs) = xs"
apply (induction xs)
apply (auto)
done
end |
[STATEMENT]
lemma emp_not_in_code: "\<epsilon> \<notin> \<C>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<epsilon> \<notin> \<C>
[PROOF STEP]
proof
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<epsilon> \<in> \<C> \<Longrightarrow> False
[PROOF STEP]
assume "\<epsilon> \<in> \<C>"
[PROOF STATE]
proof (state)
this:
\<epsilon> \<in> \<C>
goal (1 subgoal):
1. \<epsilon> \<in> \<C> \<Longrightarrow> False
[PROOF STEP]
hence "[] \<in> lists \<C>" and "[\<epsilon>] \<in> lists \<C>" and "concat [] = concat [\<epsilon>]" and "[] \<noteq> [\<epsilon>]"
[PROOF STATE]
proof (prove)
using this:
\<epsilon> \<in> \<C>
goal (1 subgoal):
1. (\<epsilon> \<in> lists \<C> &&& [\<epsilon>] \<in> lists \<C>) &&& concat \<epsilon> = concat [\<epsilon>] &&& \<epsilon> \<noteq> [\<epsilon>]
[PROOF STEP]
by simp+
[PROOF STATE]
proof (state)
this:
\<epsilon> \<in> lists \<C>
[\<epsilon>] \<in> lists \<C>
concat \<epsilon> = concat [\<epsilon>]
\<epsilon> \<noteq> [\<epsilon>]
goal (1 subgoal):
1. \<epsilon> \<in> \<C> \<Longrightarrow> False
[PROOF STEP]
thus False
[PROOF STATE]
proof (prove)
using this:
\<epsilon> \<in> lists \<C>
[\<epsilon>] \<in> lists \<C>
concat \<epsilon> = concat [\<epsilon>]
\<epsilon> \<noteq> [\<epsilon>]
goal (1 subgoal):
1. False
[PROOF STEP]
using is_code
[PROOF STATE]
proof (prove)
using this:
\<epsilon> \<in> lists \<C>
[\<epsilon>] \<in> lists \<C>
concat \<epsilon> = concat [\<epsilon>]
\<epsilon> \<noteq> [\<epsilon>]
\<lbrakk>?xs \<in> lists \<C>; ?ys \<in> lists \<C>; concat ?xs = concat ?ys\<rbrakk> \<Longrightarrow> ?xs = ?ys
goal (1 subgoal):
1. False
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
False
goal:
No subgoals!
[PROOF STEP]
qed |
Formal statement is: lemmas (in sigma_algebra) sets_Collect = sets_Collect_imp sets_Collect_disj sets_Collect_conj sets_Collect_neg sets_Collect_const sets_Collect_countable_All sets_Collect_countable_Ex sets_Collect_countable_All Informal statement is: The following lemmas are useful for proving that a set is measurable. |
function [res,FLAG,RELRES,ITER,RESVEC,LSVEC] = cgNUSPIRiT(kData, x0, NUFFTOP, GOP, nIter, lambda)
% Implementation of image-domain SPIRiT reconstruction from arbitrary
% k-space. The function is based on Jeff Fessler's nufft code and LSQR
%
% Inputs:
% kData - k-space data matrix it is 3D corresponding to [readout,interleaves,coils]
% x0 - Initial estimate of the coil images
% NUFFTOP - nufft operator (see @NUFFT class)
% GOP - SPIRiT Operator (See @SPIRiT)
% nIter - number of LSQR iterations
% lambda - ratio between data consistency and SPIRiT consistency (1 is recommended)
%
% Outputs:
% res - reconstructed coil images
% FLAG,RELRES,ITER,RESVEC,LSVEC - See LSQR documentation
%
% See demo_nuSPIRiT for a demo on how to use this function.
%
% (c) Michael Lustig 2006, modified 2010
N = prod(size(x0));
imSize = size(x0);
dataSize = [size(kData)];
b = [kData(:) ; zeros(prod(imSize),1)];
[res,FLAG,RELRES,ITER,RESVEC,LSVEC] = lsqr(@(x,tflag)afun(x,NUFFTOP,GOP,dataSize, imSize,lambda,tflag), b, [], nIter,speye(N,N),speye(N,N), x0(:));
res = reshape(res,imSize);
function [y, tflag] = afun(x,NUFFTOP,GOP,dataSize,imSize,lambda,tflag)
if strcmp(tflag,'transp')
x1 = reshape(x(1:prod(dataSize)),dataSize);
x2 = reshape(x(prod(dataSize)+1:end),imSize);
y = NUFFTOP'.*x1 + lambda*(GOP'*x2);
y = y(:);
else
x = reshape(x,imSize);
y1 = NUFFTOP.*x;
y2 = GOP*x;
y = [y1(:); lambda*y2(:)];
end
|
{-# language MagicHash #-}
module Prelude
( module P
) where
import Data.Either as P (Either(..))
import Streaming.Prelude as P (Stream, Of)
import Numeric as P (pi)
import Control.Monad as P (Monad(..))
import Data.Primitive.Instances ()
import Data.Complex as P (Complex(..))
import Control.Monad.ST as P (ST, runST)
import GHC.Err as P (error)
import GHC.Exts as P (Double(..),Int(..),Int#)
import Data.Function as P (($), id)
import Data.Bool as P (otherwise, Bool(..))
import Data.Bits as P (Bits(..))
import Data.Semigroup as P (Semigroup(..))
import Data.Monoid as P (Monoid(..))
import Control.Applicative as P (Applicative(..))
import Data.Semiring as P (Semiring(..), Ring(..), (+),(*),(-))
import Data.Int as P (Int)
import Data.Word as P (Word)
import GHC.Real as P (fromIntegral, (/), floor)
import Data.Eq as P (Eq(..))
import Data.Ord as P (Ord(..))
import Control.Monad.Primitive as P (PrimMonad(..))
import Data.Primitive.Types as P (Prim(..))
import Data.Primitive.PrimArray as P (PrimArray,MutablePrimArray)
import Data.Primitive.Contiguous as P (Contiguous,Element,Mutable)
|
import os
import palantir
import pdb
import pandas as pd
import sys
import numpy as np
from sklearn.preprocessing import StandardScaler
from compute_goea import goea
from TsneWindow import TsneWindow
from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5
if is_pyqt5():
from matplotlib.backends.backend_qt5agg import (
FigureCanvas, NavigationToolbar2QT as NavigationToolbar)
from PyQt5.QtCore import QThread, pyqtSignal, Qt
else:
from matplotlib.backends.backend_qt4agg import (
FigureCanvas, NavigationToolbar2QT as NavigationToolbar)
from PyQt4.QtCore import QThread, Qt
from matplotlib.figure import Figure
class GeneTrendsWindow(QtWidgets.QMainWindow):
def __init__(self, trends, pr_res, clusters, id_to_name, out_dir, goea_dir, tsne, cell_clusters, colors):
super().__init__()
self._main = QtWidgets.QWidget()
self.setCentralWidget(self._main)
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.setWindowTitle("Gene trends for ")
layout = QtWidgets.QVBoxLayout(self._main)
self.trends = trends
self.clusters = clusters
self.id_to_name = id_to_name
self.out_dir = out_dir
self.goea_dir = goea_dir
self.line = False
self.lines = list()
self.pseudotime = 0
self.n_lines = 0
self.colors = colors
##self.colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k', 'w']
## drop down to choose trajectory
self.select_cluster = QtWidgets.QComboBox()
layout.addWidget(self.select_cluster)
self.select_cluster.addItem('Select cluster')
for cluster in set(self.clusters):
self.select_cluster.addItem(str(cluster))
self.select_cluster.currentIndexChanged.connect(self._select_cluster)
gene_trend_canvas = FigureCanvas(Figure(figsize=(5, 5)))
layout.addWidget(gene_trend_canvas)
self.addToolBar(NavigationToolbar(gene_trend_canvas, self))
self._gt_fig = gene_trend_canvas.figure
self._gt_ax = gene_trend_canvas.figure.subplots()
## Add listener for mouse motion to update tsne canvas
gene_trend_canvas.mpl_connect('motion_notify_event', self._on_mouse_move)
go_button = QtWidgets.QPushButton("GO analysis")
go_button.clicked.connect(self._on_click_go)
layout.addWidget(go_button)
self.tsne_win = TsneWindow(tsne, cell_clusters, colors, out_dir, pr_res=pr_res, trends_win=self)
self.tsne_win.show()
def _on_click_go(self):
c = int(self.select_cluster.currentText())
#cluster = self.clusters[c]
gene_symbols = [self.id_to_name[gene_id] for gene_id in self.clusters.index[self.clusters == c]]
# print(gene_symbols)
# gene_symbols = [self.id_to_name[gene_id] for gene_id in cluster]
goea(self.clusters.index[self.clusters == c], gene_symbols, 'gene trends', str(c), self.goea_dir, self.out_dir) ## list of genes represented by their ensembl id and gene symbol
def _on_mouse_move(self, event):
if event.xdata:
self.pseudotime = event.xdata
self._update_line()
def _update_line(self):
if self.line:#len(self._gt_ax.lines) > 3:
self._gt_ax.lines[-1].remove()
self.line = True
self._gt_ax.axvline(self.pseudotime)
self._gt_ax.figure.canvas.draw()
# self._update_tsne(pseudotime)
def _update_pseudotime(self, pseudotime):
self.pseudotime = pseudotime
def _add_line(self):
if self.line:#len(self._gt_ax.lines) > 3:
self._gt_ax.lines[-1].remove()
self._gt_ax.axvline(self.pseudotime, color=self.colors[self.n_lines])
self.n_lines += 1
self.line = False
self._gt_ax.figure.canvas.draw()
self.lines = self._gt_ax.lines[-self.n_lines:]
def _select_cluster(self,i):
print("Items in the list are :")
for count in range(self.select_cluster.count()):
print(self.select_cluster.itemText(count))
print("Current index",i,"selection changed ",self.select_cluster.currentText())
c = int(self.select_cluster.currentText())
print("CLUSTER:")
print(c)
trends = pd.DataFrame(StandardScaler().fit_transform(self.trends.T).T,
index=self.trends.index, columns=self.trends.columns)
clusters = self.clusters
gene_symbols_in_cluster = [self.id_to_name[gene_id] for gene_id in clusters.index[clusters == c]]
means = trends.loc[clusters.index[clusters == c], :].mean()
std = trends.loc[clusters.index[clusters == c], :].std()
self._gt_ax.clear()
self.line = False
# Plot all trends
for g in clusters.index[clusters == c]:
self._gt_ax.plot(means.index, np.ravel(
trends.loc[g, :]), linewidth=0.5, color='lightgrey')
self._gt_ax.plot(means.index, np.ravel(means), color='#377eb8')
self._gt_ax.plot(means.index, np.ravel(means - std), linestyle='--',
color='#377eb8', linewidth=0.75)
self._gt_ax.plot(means.index, np.ravel(means + std), linestyle='--',
color='#377eb8', linewidth=0.75)
self._gt_ax.set_title('Cluster {}'.format(c), fontsize=12)
self._gt_ax.tick_params('both', length=2, width=1, which='major')
self._gt_ax.tick_params(axis='both', which='major', labelsize=8, direction='in')
# self._gt_ax.set_xticklabels([])
for line in self.lines:
self._gt_ax.lines.append(line)
self._gt_ax.figure.canvas.draw()
|
Require ClassicalEpsilon.
Require Import Reals Psatz.
From stdpp Require Import tactics.
From mathcomp Require Import ssrfun ssreflect eqtype ssrbool seq fintype choice bigop.
From discprob.basic Require Import base sval order monad bigop_ext nify.
From discprob.prob Require Import prob countable finite stochastic_order.
From discprob.monad.idxval Require Import pival_dist pival ival_dist ival ival_pair pidist_singleton idist_pidist_pair extrema.
Import Lub.
(* This is an inductive characterization of eq_ivd_prob, as is proved later *)
Inductive irrel_ivd : ∀ X, ivdist X → ivdist X → Prop :=
| irrel_ivd_refl X : ∀ (I: ivdist X), irrel_ivd X I I
| irrel_ivd_sym X : ∀ I1 I2, irrel_ivd X I1 I2 → irrel_ivd X I2 I1
| irrel_ivd_trans X : ∀ I1 I2 I3, irrel_ivd X I1 I2 → irrel_ivd X I2 I3 → irrel_ivd X I1 I3
| irrel_ivd_proper X :
∀ I1 I1' I2 I2', eq_ivd I1 I1' → eq_ivd I2 I2' → irrel_ivd X I1 I2 → irrel_ivd X I1' I2'
| irrel_ivd_irrel X : ∀ {Y} I1 (I0: ivdist Y), irrel_ivd X I1 (x ← I0; I1)
| irrel_ivd_bind X Y: ∀ (I1 I2: ivdist X) (f1 f2: X → ivdist Y),
irrel_ivd X I1 I2 →
(∀ x, irrel_ivd Y (f1 x) (f2 x)) →
irrel_ivd Y (x ← I1; f1 x) (x ← I2; f2 x).
Arguments irrel_ivd {_}.
Definition le_pidist_irrel :=
λ {X : Type} (Is1 Is2 : pidist X), ∀ I : ivdist X, In (I: ival X) Is1 → ∃ I' : ivdist X, irrel_ivd I I' ∧ In (I': ival X) Is2.
Lemma le_pidist_irrel_refl {X: Type} (Is1: pidist X):
le_pidist_irrel Is1 Is1.
Proof.
intros I Hin. exists I; split; eauto. apply irrel_ivd_refl.
Qed.
Lemma irrel_ivd_support_coerce {X} (I1 I2: ivdist X) :
irrel_ivd I1 I2 →
∀ x, (∃ i2, ind I2 i2 = x ∧ val I2 i2 > 0) ↔ (∃ i1, ind I1 i1 = x ∧ val I1 i1 > 0).
Proof.
induction 1.
- split; intros; auto.
- intros. by rewrite (IHirrel_ivd x).
- intros. by rewrite (IHirrel_ivd2 x).
- intros.
rewrite (eq_ival_support_coerce I1 I1'); eauto.
rewrite (eq_ival_support_coerce I2 I2'); eauto.
- intros.
* split.
** intros ((i0&i1)&Heq&Hgt). exists i1.
rewrite //= in Heq Hgt.
split; auto. specialize (val_nonneg I0 i0); nra.
** intros (i1&Heq&Hgt).
edestruct (ivd_support_idx I0) as (i0&Hgt').
exists (existT i0 i1); split => //=; nra.
- intros x. split.
* intros ((i2&if2)&Hind&Hval).
rewrite //= in Hind.
edestruct (IHirrel_ivd (ind I2 i2)) as (HI2&_).
edestruct (HI2) as (i1&Hindeq&?).
{ eexists. split; eauto. rewrite //= in Hval. specialize (val_nonneg (f2 (ind I2 i2)) if2).
nra. }
edestruct (H1 (ind I2 i2)) as (Hf2&_).
edestruct Hf2 as (if1&?&?).
{ eexists. split; eauto. rewrite //= in Hval.
specialize (val_nonneg I2 i2); nra. }
unshelve (eexists).
{ exists i1. rewrite Hindeq; exact if1. }
split => //=; destruct Hindeq.
** rewrite /eq_rect_r//=.
** rewrite /eq_rect_r//=. nra.
* intros ((i2&if2)&Hind&Hval).
rewrite //= in Hind.
edestruct (IHirrel_ivd (ind I1 i2)) as (_&HI2).
edestruct (HI2) as (i1&Hindeq&?).
{ eexists. split; eauto. rewrite //= in Hval. specialize (val_nonneg (f1 (ind I1 i2)) if2).
nra. }
edestruct (H1 (ind I1 i2)) as (_&Hf2).
edestruct Hf2 as (if1&?&?).
{ eexists. split; eauto. rewrite //= in Hval.
specialize (val_nonneg I1 i2); nra. }
unshelve (eexists).
{ exists i1. rewrite Hindeq; exact if1. }
split => //=; destruct Hindeq.
** rewrite /eq_rect_r//=.
** rewrite /eq_rect_r//=. nra.
Qed.
Lemma le_pidist_irrel_support_coerce_aux {X} (Is1 Is2: pidist X) :
le_pidist_irrel Is2 Is1 →
∀ x, In_psupport x Is2 → In_psupport x Is1.
Proof.
intros Hle x (I2&i2&Hin2&?&Hval).
destruct (Hle {| ivd_ival := I2; val_sum1 := all_sum1 Is2 _ Hin2|}) as (I1&Heq&Hin1); eauto.
exists I1. edestruct (irrel_ivd_support_coerce _ _ Heq) as (i1&?&?).
{ eauto. }
eexists; split; eauto.
Qed.
Global Instance irrel_ivd_proper_instance {X} : Proper (@eq_ivd X ==> @eq_ivd X ==> iff) (@irrel_ivd X).
Proof.
intros I1 I1' Heq1 I2 I2' Heq2.
split; intros; eapply irrel_ivd_proper; eauto; try by symmetry.
Qed.
Global Instance irrel_ivd_Transitivite {X}: Transitive (@irrel_ivd X).
Proof. intros ???. apply irrel_ivd_trans. Qed.
Global Instance irrel_ivd_Reflexive {X}: Reflexive (@irrel_ivd X).
Proof. intros ?. apply irrel_ivd_refl. Qed.
Global Instance irrel_ivd_Symmetry {X}: Symmetric (@irrel_ivd X).
Proof. intros ??. apply irrel_ivd_sym. Qed.
Lemma is_Ex_ival_irrel_proper_bind {X Y} f (f1 f2: X → ivdist Y) (I1 I2: ivdist X) v
(Hirrel_ivd : irrel_ivd I1 I2)
(Hall_irrel : ∀ x : X, irrel_ivd (f1 x) (f2 x))
(IHinner : ∀ (x : X) (f : Y → R) (v : R), is_Ex_ival f (f1 x) v ↔ is_Ex_ival f (f2 x) v)
(IHirrel_ivd : ∀ (f : X → R) (v : R), is_Ex_ival f I1 v ↔ is_Ex_ival f I2 v):
is_Ex_ival f (ivd_bind _ _ f1 I1) v → is_Ex_ival f (ivd_bind _ _ f2 I2) v.
Proof.
intros His.
assert (ex_Ex_ival f (ivd_bind _ _ f1 I1)).
{ eapply is_Ex_ival_ex; eauto. }
rewrite -(is_Ex_ival_unique _ _ _ His).
feed pose proof (ex_Ex_ival_bind_post (λ x, Rabs (f x)) I1 f1) as Hex_I1.
{ eapply ex_Ex_ival_to_Rabs, is_Ex_ival_ex. eauto. }
feed pose proof (ex_Ex_ival_bind_post f I1 f1) as Hex_I1'.
{ eapply is_Ex_ival_ex. eauto. }
rewrite Ex_ival_bind_post //=.
assert (ex_Ex_ival f (ivd_bind _ _ f2 I2)).
{
apply ex_Ex_ival_from_Rabs, ex_Ex_ival_bind_post_inv; eauto using Rabs_pos, Rle_ge.
** intros.
apply is_Ex_ival_ex, ex_Ex_ival_to_Rabs in His.
edestruct (irrel_ivd_support_coerce I1 I2) as (Hlr&Hrl); eauto.
edestruct Hlr as (i1&Heqi1&Hvali1); eauto.
eapply ex_Ex_ival_bind_inv in His; eauto.
eapply ex_Ex_ival_is in His as (v'&His).
rewrite -Heqi1.
eapply is_Ex_ival_ex. eapply IHinner; eauto.
** apply ex_Ex_ival_is in Hex_I1 as (v'&His').
eapply is_Ex_ival_ex; eapply IHirrel_ivd.
eapply is_Ex_ival_proper_fun_support; eauto.
intros x Hsupport => //=.
symmetry.
apply is_Ex_ival_unique.
eapply IHinner.
eapply Ex_ival_correct. eapply (ex_Ex_ival_bind_inv (λ x, Rabs (f x)) f1 I1); eauto.
apply ex_Ex_ival_to_Rabs. eapply is_Ex_ival_ex; eauto.
}
cut (Ex_ival f (ivd_bind _ _ f2 I2) = (Ex_ival (λ x, Ex_ival f (f1 x)) I1)).
{ intros HEx. rewrite -HEx. apply Ex_ival_correct; eauto. }
rewrite Ex_ival_bind_post //=.
apply is_Ex_ival_unique.
eapply IHirrel_ivd.
eapply is_Ex_ival_proper_fun_support; last first.
{ eapply Ex_ival_correct. eauto. }
intros => //=.
symmetry.
apply is_Ex_ival_unique.
eapply IHinner.
eapply Ex_ival_correct. eapply (ex_Ex_ival_bind_inv f f1 I1); eauto.
Qed.
Lemma is_Ex_ival_irrel_proper {A} f (I I': ivdist A) v :
irrel_ivd I I' →
is_Ex_ival f I v ↔
is_Ex_ival f I' v.
Proof.
intros irrel_ivd.
revert v.
induction irrel_ivd; auto; intros.
- symmetry. eapply IHirrel_ivd.
- rewrite IHirrel_ivd1. auto.
- rewrite /eq_ivd in H.
etransitivity; first etransitivity; try eapply IHirrel_ivd.
{ split; apply is_Ex_ival_proper; eauto. by symmetry. }
{ split; apply is_Ex_ival_proper; eauto. by symmetry. }
- split. apply is_Ex_ival_bind_irrel, val_sum1.
intros His. cut (ex_Ex_ival f I1).
{ intros Hex. apply Ex_ival_correct in Hex.
cut (Ex_ival f I1 = v); intros; subst; eauto.
eapply is_Ex_ival_unique'; last eassumption.
apply is_Ex_ivd_bind_irrel; eauto.
}
apply is_Ex_ival_ex in His.
unshelve (eapply ex_Ex_ival_bind_inv in His; eauto).
{ exact (sval (ivd_support_idx I0)). }
destruct (ivd_support_idx _) => //=.
- split; eapply is_Ex_ival_irrel_proper_bind; eauto; try (intros; by symmetry).
Qed.
Lemma ex_Ex_ival_irrel_proper {A} f (I I': ivdist A) :
irrel_ivd I I' →
ex_Ex_ival f I →
ex_Ex_ival f I'.
Proof.
intros Hirrel (v&His)%ex_Ex_ival_is.
eapply is_Ex_ival_ex.
eapply is_Ex_ival_irrel_proper; eauto.
by symmetry.
Qed.
Lemma Ex_ival_irrel_proper {A} f (I I': ivdist A) :
irrel_ivd I I' →
ex_Ex_ival f I →
Ex_ival f I = Ex_ival f I'.
Proof.
intros. symmetry. apply is_Ex_ival_unique.
eapply is_Ex_ival_irrel_proper; eauto.
* symmetry. eauto.
* apply Ex_ival_correct; eauto.
Qed.
Lemma irrel_ivd_to_eq_ivd_prob {X} (I1 I2: ivdist X):
irrel_ivd I1 I2 →
eq_ivd_prob I1 I2.
Proof.
intros Hirrel.
apply eq_ivd_prob_alt.
intros x.
transitivity ((Pr (λ v, v = x) I1)).
{ rewrite /Ex_ival/idx_eq_ind//=. eapply SeriesC_ext; intros.
destruct ClassicalEpsilon.excluded_middle_informative => //=; nra.
}
transitivity ((Pr (λ v, v = x) I2)); last first.
{ rewrite /Ex_ival/idx_eq_ind//=. eapply SeriesC_ext; intros.
destruct ClassicalEpsilon.excluded_middle_informative => //=; nra.
}
apply Ex_ival_irrel_proper; eauto.
apply ex_Pr.
Qed.
Lemma In_isupport_pr_gt_0 {X: Type} (I: ivdist X) (x: X):
In_isupport x I →
0 < Pr (eq ^~ x) I.
Proof.
rewrite /Pr/Ex_ival => Hin.
destruct Hin as (i&?&?).
eapply (Series_strict_pos _ (pickle i)).
{ intros. rewrite /countable_sum/oapp.
destruct pickle_inv; try nra.
destruct ClassicalEpsilon.excluded_middle_informative => //=; try nra.
rewrite Rmult_1_l. apply val_nonneg.
}
{ intros. rewrite /countable_sum/oapp.
rewrite pickleK_inv.
destruct ClassicalEpsilon.excluded_middle_informative => //=; try nra.
}
feed pose proof (ex_Pr (eq^~ x) I).
apply ex_Ex_ival_is in H1 as (v&?).
rewrite /is_Ex_ival in H1.
destruct H1 as (Hex&His).
eexists. eauto.
Qed.
Lemma pr_gt_0_In_isupport {X: Type} (I: ivdist X) (x: X):
0 < Pr (eq ^~ x) I →
In_isupport x I.
Proof.
rewrite /Pr/Ex_ival => Hin.
eapply (Series_strict_pos_inv) in Hin as (n&?).
{
destruct (@pickle_inv (idx I) n) as [i|] eqn:Heq.
- exists i. rewrite //=/countable_sum//= Heq //= in H.
destruct ClassicalEpsilon.excluded_middle_informative => //=; try nra.
* rewrite //= in H. split; eauto. nra.
* rewrite //= in H. nra.
- rewrite //=/countable_sum//= Heq //= in H ; nra.
}
intros n. rewrite /countable_sum. destruct pickle_inv => //=; last nra.
destruct ClassicalEpsilon.excluded_middle_informative => //=; try nra.
rewrite Rmult_1_l. apply val_nonneg.
Qed.
(* This is a kind of conditional distribution *)
Lemma ival_slice_proof1 (X : Type) (I : ivdist X) (x : X):
∀ i : idx I, (if ClassicalEpsilon.excluded_middle_informative (In_isupport x I)
then
(if ClassicalEpsilon.excluded_middle_informative (ind I i = x) then val I i else 0) /
Pr (eq^~ x) I
else val I i) ≥ 0.
Proof.
intros i.
destruct ClassicalEpsilon.excluded_middle_informative; eauto; last apply val_nonneg.
apply Rle_ge, Rdiv_le_0_compat.
{ destruct ClassicalEpsilon.excluded_middle_informative => //=; eauto; try nra.
apply Rge_le, val_nonneg. }
{ apply In_isupport_pr_gt_0; eauto. }
Qed.
Definition ival_slice {X} (I: ivdist X) (x: X) : ival X.
refine {| idx := idx I;
ind := ind I;
val := λ i,
if ClassicalEpsilon.excluded_middle_informative (In_isupport x I) then
(if ClassicalEpsilon.excluded_middle_informative (ind I i = x) then
val I i
else
0) / Pr (λ i, i = x) I
else
val I i|}.
apply ival_slice_proof1.
Defined.
Lemma ival_slice_proof2 (X : Type) (I : ivdist X) (x : X):
is_series (countable_sum (val (ival_slice I x))) 1.
Proof.
rewrite //=. destruct ClassicalEpsilon.excluded_middle_informative; last apply val_sum1.
replace 1 with (Pr (eq^~ x) I */ Pr (eq^~ x) I); last first.
{ field. apply Rgt_not_eq, In_isupport_pr_gt_0; auto. }
apply is_seriesC_scal_r.
rewrite /Pr/Ex_ival.
apply (is_seriesC_ext _ (λ i0 : idx I, (if is_left (ClassicalEpsilon.excluded_middle_informative (ind I i0 = x))
then 1
else 0) * val I i0)).
{ intros. destruct ClassicalEpsilon.excluded_middle_informative => //=; try nra. }
{
feed pose proof (ex_Pr (eq^~ x) I) as Hpr.
apply ex_Ex_ival_is in Hpr as (v&Hpr).
rewrite /is_Ex_ival in Hpr.
destruct Hpr as (Hex&His).
eapply Series_correct; eexists; eauto. }
Qed.
Definition ivdist_slice {X} (I: ivdist X) (x: X) : ivdist X.
Proof.
exists (ival_slice I x).
apply ival_slice_proof2.
Defined.
Lemma eq_ivd_prob_Pr_eq {X} (I1 I2: ivdist X) x:
eq_ivd_prob I1 I2 →
Pr (eq^~ x) I1 = Pr (eq^~ x) I2.
Proof.
rewrite /Pr/Ex_ival => Heq.
unshelve (eapply eq_ivd_prob_alt in Heq); first exact x.
rewrite /idx_eq_ind in Heq.
setoid_rewrite Rmult_if_distrib.
setoid_rewrite Rmult_0_l.
setoid_rewrite Rmult_1_l.
eauto.
Qed.
Lemma eq_ivd_prob_In_isupport {X: Type} I1 I2 (x: X):
eq_ivd_prob I1 I2 →
In_isupport x I1 →
In_isupport x I2.
Proof.
intros Heq Hin%In_isupport_pr_gt_0.
apply pr_gt_0_In_isupport.
erewrite <-eq_ivd_prob_Pr_eq; last eassumption.
eauto.
Qed.
Lemma eq_ivd_prob_to_irrel_ivd {X} (I1 I2: ivdist X):
eq_ivd_prob I1 I2 →
irrel_ivd I1 I2.
Proof.
intros Heq.
transitivity (x ← I1; _ ← ivdist_slice I2 x; mret x).
{ transitivity (x ← I1; mret x).
{ rewrite ivd_right_id. reflexivity. }
apply irrel_ivd_bind; first reflexivity.
intros x. apply irrel_ivd_irrel.
}
transitivity (x ← I2; _ ← ivdist_slice I1 x; mret x); last first.
{ symmetry.
transitivity (x ← I2; mret x).
{ rewrite ivd_right_id. reflexivity. }
apply irrel_ivd_bind; first reflexivity.
intros x. apply irrel_ivd_irrel.
}
cut (eq_ivd (I1 ≫= (λ x : X, ivdist_slice I2 x ≫= (λ _ : X, mret x)))
(I2 ≫= (λ x : X, ivdist_slice I1 x ≫= (λ _ : X, mret x)))).
{ intros ->. reflexivity. }
apply eq_ival_nondep_inj_surj_suffice.
apply eq_ival_nondep_inj_surj'_helper.
unshelve eexists.
{ intros (i1&i2&?). exists i2. exists i1. exact tt. }
rewrite //=.
split_and!.
* intros (i1&i2&[]) (i1'&i2'&[]) _ _ => //=.
inversion 1; subst. auto.
* intros (i2&i1&[]).
unshelve (eexists).
{ exists i1. exists i2. exact tt. }
split_and!; eauto => //=.
repeat destruct ClassicalEpsilon.excluded_middle_informative => //=; try nra; try congruence.
** intros Hgt. eapply Rge_gt_trans; last eassumption.
right. rewrite //=.
cut (Pr (eq^~ (ind I2 i2)) I1 = Pr (eq^~ (ind I1 i1)) I2).
{ intros ->. nra. }
rewrite e0; eapply eq_ivd_prob_Pr_eq; eauto.
** intros; exfalso. eapply n. rewrite e.
eapply eq_ivd_prob_In_isupport; eauto.
** intros; exfalso. eapply n. rewrite e.
eapply eq_ivd_prob_In_isupport; eauto.
by symmetry.
** cut (val I2 i2 = 0).
{ intros ->. nra. }
destruct (val_nonneg I2 i2); last auto.
exfalso. eapply n.
eapply eq_ivd_prob_In_isupport; eauto.
{ by symmetry. }
eexists; eauto.
* intros (i1&i2&[]) => //=.
repeat destruct ClassicalEpsilon.excluded_middle_informative => //=; try nra; try congruence.
cut (val I1 i1 = 0).
{ intros ->. nra. }
destruct (val_nonneg I1 i1); last auto.
exfalso. eapply n.
eapply eq_ivd_prob_In_isupport; eauto.
eexists; eauto.
* intros (i1&i2&[]) => //=.
repeat destruct ClassicalEpsilon.excluded_middle_informative => //=; try nra; try congruence.
** intros Hgt.
cut (Pr (eq^~ (ind I2 i2)) I1 = Pr (eq^~ (ind I1 i1)) I2).
{ intros ->. nra. }
rewrite e0; eapply eq_ivd_prob_Pr_eq; eauto.
** intros; exfalso. eapply n. rewrite e.
eapply eq_ivd_prob_In_isupport; eauto.
by symmetry.
** intros; exfalso. eapply n. rewrite e.
eapply eq_ivd_prob_In_isupport; eauto.
** cut (val I1 i1 = 0).
{ intros ->. nra. }
destruct (val_nonneg I1 i1); last auto.
exfalso. eapply n.
eapply eq_ivd_prob_In_isupport; eauto.
eexists; eauto.
Qed.
Lemma irrel_ivd_choice {X} (I1 I1' I2 I2': ivdist X) p Hpf Hpf':
irrel_ivd I1 I2 →
irrel_ivd I1' I2' →
irrel_ivd (ivdplus p Hpf I1 I1') (ivdplus p Hpf' I2 I2').
Proof.
intros Hirrel1 Hirrel2.
transitivity (b ← ivdplus p Hpf (mret true) (mret false);
if (b: bool) then I1 else I1').
{ rewrite ivd_plus_bind ?ivd_left_id. reflexivity. }
transitivity (b ← ivdplus p Hpf' (mret true) (mret false);
if (b: bool) then I2 else I2'); last first.
{ rewrite ivd_plus_bind ?ivd_left_id. reflexivity. }
apply irrel_ivd_bind.
{ cut (eq_ivd (ivdplus p Hpf (mret true) (mret false)) (ivdplus p Hpf' (mret true) (mret false))).
{ intros ->; reflexivity. }
apply ivdist_plus_proper; reflexivity.
}
intros [|]; eauto.
Qed.
Definition irrel_pidist {X: Type} (Is1 Is2: pidist X) :=
∀ f, bounded_fun f → Rbar_le (Ex_min f Is2) (Ex_min f Is1).
Lemma irrel_pidist_Ex_max {X: Type} (Is1 Is2: pidist X) :
irrel_pidist Is1 Is2 → ∀ f, bounded_fun f → Rbar_le (Ex_max f Is1) (Ex_max f Is2).
Proof.
intros Hirrel f Hb.
rewrite ?Ex_max_neg_min.
apply Rbar_opp_le.
apply Hirrel.
destruct Hb as (c&?).
exists c => x. rewrite Rabs_Ropp; eauto.
Qed.
Lemma Ex_max_irrel_pidist {X: Type} (Is1 Is2: pidist X) :
(∀ f, bounded_fun f → Rbar_le (Ex_max f Is1) (Ex_max f Is2)) →
irrel_pidist Is1 Is2.
Proof.
intros Hirrel f Hb.
specialize (Hirrel (λ x, (- f x))).
rewrite ?Ex_max_neg_min in Hirrel.
apply Rbar_opp_le.
setoid_rewrite Ropp_involutive in Hirrel.
eapply Hirrel. destruct Hb as (c&?). exists c.
intros x. rewrite Rabs_Ropp; eauto.
Qed.
Lemma irrel_pidist_refl {X} : ∀ I, @irrel_pidist X I I.
Proof. intros f Hb; reflexivity. Qed.
Lemma irrel_pidist_trans {X} :
∀ I1 I2 I3, @irrel_pidist X I1 I2 → @irrel_pidist X I2 I3 → @irrel_pidist X I1 I3.
Proof.
intros I1 I2 I3 Hi1 Hi2 f Hb.
specialize (Hi1 f Hb).
specialize (Hi2 f Hb).
etransitivity; eauto.
Qed.
Lemma bounded_supp_fun_le_pidist {A} f (Is Is': pidist A):
le_pidist Is Is' →
bounded_fun_on f (λ x, In_psupport x Is') →
bounded_fun_on f (λ x, In_psupport x Is).
Proof.
intros Hle Hbf.
eapply bounded_fun_on_anti; try eassumption.
intros a. eapply le_pidist_support_coerce_aux; eauto.
Qed.
Lemma Ex_min_le_pidist_irrel {X} (f: X → R) Is1 Is2:
le_pidist_irrel Is1 Is2 →
Rbar_le (Ex_min f Is2) (Ex_min f Is1).
Proof.
intros Hle.
rewrite /Ex_min.
destruct (Glb_Rbar_correct (Ex_pidist f Is1)) as (Hlb&Hglb).
apply Hglb. intros r Hex. destruct Hex as (I&Hin&Hex).
edestruct (Hle {| ivd_ival := I; val_sum1 := all_sum1 Is1 _ Hin |}) as (I2&Heq&Hin2).
{ rewrite //=. }
{ eapply (is_Ex_ival_irrel_proper f) in Heq; last eauto.
destruct (Glb_Rbar_correct (Ex_pidist f Is2)) as (Hlb2&Hglb2).
eapply Hlb2. eexists; split; eauto.
eapply Heq => //=.
}
Qed.
Lemma Ex_max_le_pidist_irrel {X} (f: X → R) Is1 Is2:
le_pidist_irrel Is1 Is2 →
Rbar_le (Ex_max f Is1) (Ex_max f Is2).
Proof.
rewrite ?Ex_max_neg_min.
intros Hle.
apply Rbar_opp_le.
apply Ex_min_le_pidist_irrel; eauto.
Qed.
Lemma irrel_pidist_proper_irrel {X} :
∀ I1 I1' I2 I2', le_pidist_irrel I1' I1 → le_pidist_irrel I2 I2' →
@irrel_pidist X I1 I2 → @irrel_pidist X I1' I2'.
Proof.
intros I1 I1' I2 I2' Hle1 Hle2 Hirrel12.
intros f Hb.
etransitivity.
{ apply Ex_min_le_pidist_irrel; eauto. }
etransitivity.
{ eapply Hirrel12; eauto. }
{ apply Ex_min_le_pidist_irrel; eauto. }
Qed.
Lemma irrel_pidist_bind1 {X Y}: ∀ (I1 I2: pidist X) (f: X → pidist Y),
@irrel_pidist X I1 I2 →
@irrel_pidist Y (x ← I1; f x) (x ← I2; f x).
Proof.
intros I1 I2 f Hirrel.
intros g Hb.
rewrite ?Ex_min_bind_post;
eauto using Ex_min_bounded_is_bounded, ex_Ex_extrema_bounded_fun,
Ex_min_bounded_fun_finite.
Qed.
Lemma irrel_pidist_bind {X Y}: ∀ (I1 I2: pidist X) (f1 f2: X → pidist Y),
@irrel_pidist X I1 I2 →
(∀ x, @irrel_pidist Y (f1 x) (f2 x)) →
@irrel_pidist Y (x ← I1; f1 x) (x ← I2; f2 x).
Proof.
intros I1 I2 f1 f2 Hirrel Hirrelfun.
eapply irrel_pidist_trans.
{ eapply irrel_pidist_bind1; eauto. }
intros f Hb. eapply Ex_min_bind_le;
eauto using Ex_min_bounded_is_bounded, ex_Ex_extrema_bounded_fun,
Ex_min_bounded_fun_finite.
intros a ?. eapply Hirrelfun; eauto.
Qed.
Lemma irrel_pidist_proper X :
∀ (I1 I1' I2 I2': pidist X), le_pidist I1' I1 → le_pidist I2 I2'
→ irrel_pidist I1 I2 → irrel_pidist I1' I2'.
Proof.
intros ???? Hle1 Hle2. eapply irrel_pidist_proper_irrel.
{ intros x Hin. edestruct (Hle1 x) as (x'&Heq&Hin'); eauto.
exists {| ivd_ival := x'; val_sum1 := all_sum1 I1 _ Hin'|}; split; auto.
eapply irrel_ivd_proper; eauto; last apply irrel_ivd_refl.
reflexivity.
}
{ intros x Hin. edestruct (Hle2 x) as (x'&Heq&Hin'); eauto.
exists {| ivd_ival := x'; val_sum1 := all_sum1 I2' _ Hin'|}; split; auto.
eapply irrel_ivd_proper; eauto; last apply irrel_ivd_refl.
reflexivity.
}
Qed.
Global Instance irrel_pidist_mono_instance {X} : Proper (@le_pidist X --> @le_pidist X ==> Coq.Program.Basics.impl) (@irrel_pidist X).
Proof.
intros I1 I1' Heq1 I2 I2' Heq2.
intros Hirrel. eapply irrel_pidist_proper; eauto.
Qed.
Global Instance irrel_pidist_proper_instance {X} : Proper (@eq_pidist X ==> @eq_pidist X ==> iff) (@irrel_pidist X).
Proof.
intros I1 I1' Heq1 I2 I2' Heq2.
split; intros Hirrel; eapply irrel_pidist_proper; eauto;
try (setoid_rewrite Heq1; reflexivity);
try (setoid_rewrite Heq2; reflexivity).
Qed.
Global Instance irrel_pidist_Transitivite {X}: Transitive (@irrel_pidist X).
Proof. intros ???. apply irrel_pidist_trans. Qed.
Global Instance irrel_pidist_Reflexive {X}: Reflexive (@irrel_pidist X).
Proof. intros ?. apply irrel_pidist_refl. Qed.
Record irrel_couplingP {A1 A2} (I1: ivdist A1) (Is2: pidist A2) (P: A1 → A2 → Prop) : Type :=
{ irrel_I : ivdist A1;
irrel_Is : pidist A2;
irrel_rel_I : irrel_ivd I1 irrel_I;
irrel_rel_Is : irrel_pidist irrel_Is Is2;
irrel_couple_wit :> idist_pidist_couplingP irrel_I irrel_Is P
}.
Definition lsupport {A1 A2 Is1 Is2 P} (Icouple: irrel_couplingP Is1 Is2 P) (y: A2) :=
{ x : A1 | ∃ i Hpf, ival.ind Icouple i = (exist _ (x, y) Hpf) ∧ ival.val Icouple i > 0 }.
Definition rsupport {A1 A2 Is1 Is2 P} (Icouple: irrel_couplingP Is1 Is2 P) (x: A1) :=
{ y : A2 | ∃ i Hpf, ival.ind Icouple i = (exist _ (x, y) Hpf) ∧ ival.val Icouple i > 0 }.
Definition irrel_coupling_propP {A1 A2} (I1: ivdist A1) (Is2: pidist A2) P : Prop :=
∃ (ic: irrel_couplingP I1 Is2 P), True.
Lemma ic_wit_to_prop {A1 A2} (I1 : ivdist A1) (Is2: pidist A2) P :
irrel_couplingP I1 Is2 P →
irrel_coupling_propP I1 Is2 P.
Proof.
intros; eexists; eauto.
Qed.
Lemma ic_prop_to_wit {A1 A2} (I1 : ivdist A1) (Is2: pidist A2) P :
irrel_coupling_propP I1 Is2 P →
irrel_couplingP I1 Is2 P.
Proof.
intros (?&_)%ClassicalEpsilon.constructive_indefinite_description; auto.
Qed.
Lemma irrel_pidist_support_coerce {X} (I1 I2: pidist X) :
irrel_pidist I2 I1 →
∀ x, In_psupport x I2 → In_psupport x I1.
Proof.
intros Hirrel x Hin.
destruct Hin as (I&i&Hin&Hind&Hval).
assert (0 < Pr (eq ^~ x) {| ivd_ival := I; val_sum1 := all_sum1 _ _ Hin|}).
{ eapply In_isupport_pr_gt_0.
eexists; eauto. }
assert (Rbar_lt 0 (Pr_max (eq^~ x) I1)) as Hmax.
{
apply (Rbar_lt_le_trans _ (Pr_max (eq^~ x) I2)); last first.
{ eapply irrel_pidist_Ex_max; eauto.
exists 1. intros. destruct (is_left); rewrite Rabs_right; nra.
}
apply (Rbar_lt_le_trans _ (Pr (eq^~ x) {| ivd_ival := I; val_sum1 := all_sum1 I2 I Hin |}));
first done.
apply Ex_max_spec1' => //=.
eapply (ex_Pr (eq^~x) {| ivd_ival := I; val_sum1 := all_sum1 I2 I Hin |}).
}
assert (∃ I' : ivdist X, In (I': ival X) I1 ∧ 0 < Pr (eq^~x) I') as (I'&Hin'&Hpr').
{
apply Classical_Pred_Type.not_all_not_ex. intros Hneg.
apply Rbar_lt_not_le in Hmax. apply Hmax.
apply Ex_max_spec2.
intros r' (I'&Hin'&Heq).
apply Rbar_not_lt_le. intros Hlt.
exfalso; eapply (Hneg {| ivd_ival := I'; val_sum1 := all_sum1 _ _ Hin'|}).
split; first done.
rewrite /Pr. erewrite is_Ex_ival_unique; last eassumption.
auto.
}
exists I'. apply pr_gt_0_In_isupport in Hpr'.
destruct Hpr' as (?&?&?). eexists; split_and!; eauto.
Qed.
Lemma irrel_pidist_choice {X} (I1 I1' I2 I2': pidist X) p Hpf Hpf':
irrel_pidist I1 I2 →
irrel_pidist I1' I2' →
irrel_pidist (pidist_plus p Hpf I1 I1') (pidist_plus p Hpf' I2 I2').
Proof.
intros Hirrel1 Hirrel2.
transitivity (b ← pidist_plus p Hpf (mret true) (mret false);
if (b: bool) then I1 else I1').
{ rewrite pidist_plus_bind ?pidist_left_id. reflexivity. }
transitivity (b ← pidist_plus p Hpf' (mret true) (mret false);
if (b: bool) then I2 else I2'); last first.
{ rewrite pidist_plus_bind ?pidist_left_id. reflexivity. }
apply irrel_pidist_bind.
{ cut (eq_pidist (pidist_plus p Hpf (mret true) (mret false))
(pidist_plus p Hpf' (mret true) (mret false))).
{ intros ->; reflexivity. }
apply pidist_plus_proper; reflexivity.
}
intros [|]; eauto.
Qed.
Lemma irrel_pidist_irrel {X Y}: ∀ I1 (I0: pidist Y), @irrel_pidist X (x ← I0; I1) I1.
Proof.
intros. intros f Hbounded.
rewrite Ex_min_bind_irrel //=; try reflexivity;
eauto using Ex_min_bounded_is_bounded, ex_Ex_extrema_bounded_fun,
Ex_min_bounded_fun_finite.
Qed.
Lemma irrel_coupling_proper {A1 A2} (I1 I2 : ivdist A1) (Is1 Is2: pidist A2) P:
eq_ivd I1 I2 →
eq_pidist Is1 Is2 →
irrel_couplingP I1 Is1 P →
irrel_couplingP I2 Is2 P.
Proof.
intros HeqI HeqIs [I1' Is1' HeqI1 HeqIs1 Hcouple].
exists I1' Is1'.
- setoid_rewrite <-HeqI. done.
- setoid_rewrite <-HeqIs. done.
- done.
Qed.
Lemma irrel_coupling_mono {A1 A2} (I1 I2 : ivdist A1) (Is1 Is2: pidist A2) P:
eq_ivd I1 I2 →
le_pidist Is1 Is2 →
irrel_couplingP I1 Is1 P →
irrel_couplingP I2 Is2 P.
Proof.
intros HeqI HeqIs [I1' Is1' HeqI1 HeqIs1 Hcouple].
exists I1' Is1'.
- setoid_rewrite <-HeqI. done.
- setoid_rewrite <-HeqIs. done.
- done.
Qed.
Lemma irrel_coupling_mono_irrel {A1 A2} (I1 I2 : ivdist A1) (Is1 Is2: pidist A2) P:
eq_ivd I1 I2 →
irrel_pidist Is1 Is2 →
irrel_couplingP I1 Is1 P →
irrel_couplingP I2 Is2 P.
Proof.
intros HeqI HeqIs [I1' Is1' HeqI1 HeqIs1 Hcouple].
exists I1' Is1'.
- setoid_rewrite <-HeqI. done.
- setoid_rewrite <-HeqIs. done.
- done.
Qed.
Lemma irrel_coupling_mono_irrel' {A1 A2} (I1 I2 : ivdist A1) (Is1 Is2: pidist A2) P:
irrel_ivd I1 I2 →
irrel_pidist Is1 Is2 →
irrel_couplingP I1 Is1 P →
irrel_couplingP I2 Is2 P.
Proof.
intros HeqI HeqIs [I1' Is1' HeqI1 HeqIs1 Hcouple].
exists I1' Is1'.
- setoid_rewrite <-HeqI. done.
- setoid_rewrite <-HeqIs. done.
- done.
Qed.
Global Instance irrel_coupling_prop_Proper {A1 A2}:
Proper (@eq_ivd A1 ==> @le_pidist A2 ==> eq ==> impl) irrel_coupling_propP.
Proof.
intros ?? Heq ?? Hle ?? ->.
intros H%ic_prop_to_wit.
apply ic_wit_to_prop.
eapply irrel_coupling_mono; eauto.
Qed.
Global Instance irrel_coupling_prop_irrel_Proper {A1 A2}:
Proper (@eq_ivd A1 ==> @irrel_pidist A2 ==> eq ==> impl) irrel_coupling_propP.
Proof.
intros ?? Heq ?? Hle ?? ->.
intros H%ic_prop_to_wit.
apply ic_wit_to_prop.
eapply irrel_coupling_mono_irrel; eauto.
Qed.
Lemma irrel_coupling_mret {A1 A2} (P: A1 → A2 → Prop) x y:
P x y →
irrel_couplingP (mret x) (mret y) P.
Proof.
intros HP. exists (mret x) (mret y); try reflexivity.
by apply ip_coupling_mret.
Qed.
Lemma irrel_coupling_prop_mret {A1 A2} (P: A1 → A2 → Prop) x y:
P x y →
irrel_coupling_propP (mret x) (mret y) P.
Proof.
intros; apply ic_wit_to_prop, irrel_coupling_mret; auto.
Qed.
Lemma irrel_coupling_bind {A1 A2 B1 B2} P (f1: A1 → ivdist B1) (f2: A2 → pidist B2)
I1 Is2 Q (Ic: irrel_couplingP I1 Is2 P):
(∀ x y, P x y → irrel_couplingP (f1 x) (f2 y) Q) →
irrel_couplingP (mbind f1 I1) (mbind f2 Is2) Q.
Proof.
intros Hfc.
destruct Ic as [I1' Is2' HeqI HeqIs Hcouple].
destruct Hcouple as [I2' ? [Ic ? ?]%ic_coupling_to_id].
unshelve (eexists).
- refine (xy ← Ic; _).
destruct xy as ((x&y)&HP).
destruct (Hfc _ _ HP).
exact irrel_I0.
- refine (xy ← singleton Ic; _).
destruct xy as ((x&y)&HP).
destruct (Hfc x y HP).
exact irrel_Is0.
- etransitivity.
{ eapply irrel_ivd_bind. eauto. reflexivity. }
etransitivity.
{ eapply irrel_ivd_bind. setoid_rewrite idc_proj1. reflexivity. reflexivity. }
setoid_rewrite ivd_assoc. eapply irrel_ivd_bind; first reflexivity.
intros ((x&y)&HP).
destruct (Hfc _ _ _) as [? ? ?]. rewrite /irrel_I.
rewrite /sval. setoid_rewrite ivd_left_id. done.
- etransitivity; last first.
{ eapply irrel_pidist_bind.
- etransitivity; last by eauto. eapply irrel_pidist_proper; first by eauto.
reflexivity. reflexivity.
- intros; reflexivity.
}
setoid_rewrite idc_proj2. setoid_rewrite singleton_bind.
setoid_rewrite pidist_assoc.
eapply irrel_pidist_bind; first reflexivity.
intros ((x&y)&HP).
destruct (Hfc _ _ _) as [? ? ?]. rewrite /irrel_I.
rewrite /sval. setoid_rewrite singleton_mret. setoid_rewrite pidist_left_id.
eauto.
- eapply (ip_coupling_bind _ _ _ _ (λ x y, x = y)).
* apply ip_coupling_singleton.
* intros ((?&?)&HP1) ((x&y)&HP2).
inversion 1; subst.
rewrite //=.
assert (HP1 = HP2). { apply classical_proof_irrelevance. }
subst.
destruct (Hfc x y HP2). eauto.
Qed.
Lemma irrel_coupling_prop_bind {A1 A2 B1 B2} P (f1: A1 → ivdist B1) (f2: A2 → pidist B2)
I1 Is2 Q (Ic: irrel_coupling_propP I1 Is2 P):
(∀ x y, P x y → irrel_coupling_propP (f1 x) (f2 y) Q) →
irrel_coupling_propP (mbind f1 I1) (mbind f2 Is2) Q.
Proof.
intros; eapply ic_wit_to_prop, irrel_coupling_bind; intros; apply ic_prop_to_wit; eauto.
Qed.
Lemma irrel_coupling_trivial {A1 A2} (I: ivdist A1) (Is: pidist A2):
irrel_couplingP I Is (λ x y, True).
Proof.
assert ({ I' : ivdist A2 | In (I': ival A2) Is}) as (I'&Hin).
{ destruct Is as [(Is&Hne) Hall] => //=.
rewrite //= in Hall.
apply ClassicalEpsilon.constructive_indefinite_description in Hne as (I'&His).
exists {| ivd_ival := I'; val_sum1 := Hall _ His |}.
auto.
}
exists (x ← I'; I) (singleton (x ← I; I')).
{ eapply irrel_ivd_irrel. }
{ eapply irrel_pidist_proper_irrel; [| apply le_pidist_irrel_refl | reflexivity ].
intros I0 Hin'. inversion Hin' as [Heq].
exists I'; split; auto.
eapply (irrel_ivd_proper _ (x ← I; I')).
{ rewrite /eq_ivd. rewrite -Heq //=. }
{ reflexivity. }
symmetry. apply irrel_ivd_irrel.
}
exists (x ← I; I').
{ intros ?. eapply In_pidist_le_singleton. eexists; split; first reflexivity.
rewrite /In/singleton//=. }
unshelve (eexists).
{ refine (ivd_ival (x ← I; y ← I'; mret _)).
exists (x, y); done. }
- setoid_rewrite ival_bind_comm. setoid_rewrite ival_assoc.
eapply ival_bind_congr; first reflexivity.
intros. setoid_rewrite ival_bind_mret_mret. setoid_rewrite ival_right_id. reflexivity.
- setoid_rewrite ival_assoc.
eapply ival_bind_congr; first reflexivity.
intros. setoid_rewrite ival_bind_mret_mret. setoid_rewrite ival_right_id. reflexivity.
Qed.
Lemma irrel_coupling_prop_trivial {A1 A2} (I: ivdist A1) (Is: pidist A2):
irrel_coupling_propP I Is (λ x y, True).
Proof.
apply ic_wit_to_prop, irrel_coupling_trivial.
Qed.
Lemma irrel_coupling_conseq {A1 A2} (P1 P2: A1 → A2 → Prop) (I: ivdist A1) (Is: pidist A2):
(∀ x y, P1 x y → P2 x y) →
irrel_couplingP I Is P1 →
irrel_couplingP I Is P2.
Proof.
intros HP Hirrel.
destruct Hirrel as [I0 Is0 ? ? ?].
exists I0 Is0; auto.
eapply ip_coupling_conseq; eauto.
Qed.
Lemma irrel_coupling_plus {A1 A2} p Hpf p' Hpf'
(P : A1 → A2 → Prop) (Is1 Is1': ivdist A1) (Is2 Is2': pidist A2) :
p = p' →
irrel_couplingP Is1 Is2 P →
irrel_couplingP Is1' Is2' P →
irrel_couplingP (ivdplus p Hpf Is1 Is1') (pidist_plus p' Hpf' Is2 Is2') P.
Proof.
intros Hpeq Hic Hic'. subst.
destruct Hic as [I1i Is2i Hirrel1i Hirrel2i Hwit].
destruct Hic' as [I1i' Is2i' Hirrel1i' Hirrel2i' Hwit'].
exists (ivdplus p' Hpf I1i I1i') (pidist_plus p' Hpf' Is2i Is2i').
{ eapply irrel_ivd_choice; eauto. }
{ eapply irrel_pidist_choice; eauto. }
apply ip_coupling_plus; eauto.
Qed.
Lemma irrel_coupling_bind_condition {A1 B1 B2} (f1: A1 → ivdist B1) (f2: A1 → pidist B2)
I Is Q x:
(le_pidist (singleton I) Is ) →
(irrel_couplingP (f1 x) (f2 x) Q) →
irrel_couplingP (x ← I; y ← f1 x; mret (x, y))
(x ← Is; y ← f2 x; mret (x, y))
(λ xy1 xy2, fst xy1 = x → fst xy2 = x → Q (snd xy1) (snd xy2)).
Proof.
intros Hle Hc.
eapply (irrel_coupling_bind (λ x y, x = y)).
{ exists I Is; try reflexivity.
exists I; eauto. apply ival_coupling_refl.
}
intros ? y ?; subst.
destruct (ClassicalEpsilon.excluded_middle_informative (x = y)).
- intros; subst. eapply irrel_coupling_bind; eauto.
intros. apply irrel_coupling_mret => ? //=.
- intros. eapply irrel_coupling_bind.
* apply irrel_coupling_trivial.
* intros. apply irrel_coupling_mret => ? //=. intros. congruence.
Qed.
Lemma irrel_coupling_support {X Y} I1 I2 (P: X → Y → Prop):
∀ (Ic: irrel_couplingP I1 I2 P),
irrel_couplingP I1 I2 (λ x y, ∃ Hpf: P x y, In_isupport x I1 ∧ In_psupport y I2 ∧
In_isupport (exist _ (x, y) Hpf) Ic).
Proof.
intros [? ? Heq1 Heq2 Ic].
specialize (ip_coupling_support _ _ _ Ic).
eexists; eauto.
eapply ip_coupling_conseq; eauto.
intros x y (Hpf&Hin1&Hin2&?); exists Hpf; repeat split; auto.
- edestruct Hin1 as (i&?&?).
edestruct (irrel_ivd_support_coerce _ _ Heq1) as (Hcoerce&_).
apply Hcoerce; eauto.
- eapply irrel_pidist_support_coerce; eauto.
Qed.
Lemma irrel_coupling_support_wit {X Y} I1 I2 (P: X → Y → Prop):
∀ (Ic: irrel_couplingP I1 I2 P),
{ xy : X * Y | ∃ Hpf : P (fst xy) (snd xy),
In_isupport (fst xy) I1 ∧ In_psupport (snd xy) I2 ∧ In_isupport (exist _ xy Hpf) Ic }.
Proof.
intros [? ? Heq1 Heq2 Ic].
specialize (ip_coupling_support_wit _ _ _ Ic).
rewrite //=.
intros ((x&y)&Hpf).
exists (x, y).
destruct Hpf as (Hpf&Hin1&Hin2&?).
exists Hpf; repeat split; auto.
- edestruct Hin1 as (i&?&?).
edestruct (irrel_ivd_support_coerce _ _ Heq1) as (Hcoerce&_).
apply Hcoerce; eauto.
- eapply irrel_pidist_support_coerce; eauto.
Qed.
Lemma rsupport_support_right {X Y} (Ix: ivdist X) (x: X) Is (P: X → Y → Prop)
(Ic: irrel_couplingP Ix Is P) (c: rsupport Ic x) :
In_psupport (proj1_sig c) Is.
Proof.
destruct c as (y'&ic&HP&Hind&Hgt).
rewrite //=. destruct Ic as [Ix' Is' Hirrel_ivd Hirrel_pidist Ic].
eapply irrel_pidist_support_coerce; eauto.
destruct Ic as [Iy Hle Ic].
rewrite //= in ic Hind Hgt.
clear Hirrel_pidist.
destruct (irrel_ivd_support_coerce _ _ Hirrel_ivd x) as (Hcoerce&_).
destruct (Hle Iy) as (Iy'&Heq&Hin); first by auto.
destruct Ic as [Ic Hproj1 Hproj2].
rewrite //= in ic Hind Hgt.
symmetry in Hproj2.
setoid_rewrite Heq in Hproj2.
destruct Hproj2 as (h1&h2&?&?&Hindic&Hvalic).
assert (val (x0 ← Ic; mret (sval x0).2) (existT ic tt) > 0) as Hgt'.
{ rewrite //= Rmult_1_r //=. }
specialize (Hindic (coerce_supp _ _ Hgt')).
specialize (Hvalic (coerce_supp _ _ Hgt')).
rewrite //= in Hindic Hvalic.
exists Iy'.
exists (sval (h1 (coerce_supp _ _ Hgt'))).
repeat split; auto.
- rewrite Hindic Hind //=.
- rewrite Hvalic //=.
Qed.
Lemma rsupport_post {X Y} (Ix: ivdist X) (x: X) Is (P: X → Y → Prop)
(Ic: irrel_couplingP Ix Is P) (c: rsupport Ic x) :
P x (proj1_sig c).
Proof.
destruct c as (y&I&i&Hind&?).
rewrite //=.
Qed.
Transparent pidist_ret.
Lemma rsupport_mret_right {X Y} (Ix: ivdist X) (x: X) (y: Y) (P: X → Y → Prop)
(Ic: irrel_couplingP Ix (mret y) P) (c: rsupport Ic x) :
proj1_sig c = y.
Proof.
edestruct (rsupport_support_right _ _ _ _ Ic c) as (Iy&iy&Hin&Hind&?).
subst; rewrite -Hind //=.
rewrite /In/mret/base.mret//= in Hin.
subst. destruct iy => //=.
Qed.
Opaque pidist_ret.
Lemma ip_irrel_coupling {A1 A2} (I: ivdist A1) (Is: pidist A2) (P: A1 → A2 → Prop):
idist_pidist_couplingP I Is P →
irrel_couplingP I Is P.
Proof.
intros.
exists I Is; try reflexivity; eauto.
Qed.
Lemma irrel_bounded_supp_fun {A} f (Is Is': pidist A):
irrel_pidist Is Is' →
bounded_fun_on f (λ x, In_psupport x Is') →
bounded_fun_on f (λ x, In_psupport x Is).
Proof.
intros Hle Hbf.
eapply bounded_fun_on_anti; try eassumption.
eapply irrel_pidist_support_coerce; eauto.
Qed.
Lemma irrel_pidist_bounded_supp_Ex_max {A} f (Is Is': pidist A):
irrel_pidist Is Is' →
bounded_fun_on f (λ x, In_psupport x Is') →
Rbar_le (Ex_max f Is) (Ex_max f Is').
Proof.
intros Hi Hb1.
feed pose proof (irrel_bounded_supp_fun f Is Is') as Hb2; eauto.
assert (bounded_fun_on f (λ x, In_psupport x Is ∨ In_psupport x Is')) as Hb.
{ destruct Hb1 as (c1&?).
destruct Hb2 as (c2&?).
exists (Rmax c1 c2).
intros x [Hin1|Hin2]; rewrite Rmax_Rle; intuition.
}
clear Hb1. clear Hb2.
edestruct (bounded_fun_on_to_bounded f) as (g'&Hb'&Heq); eauto.
feed pose proof (irrel_pidist_Ex_max Is Is' Hi g' Hb'); eauto.
erewrite (Ex_max_eq_ext_supp f g' Is'); eauto.
etransitivity; eauto.
erewrite (Ex_max_eq_ext_supp f g' Is); eauto; first reflexivity.
Qed.
Lemma Ex_min_irrel_anti {A} f (Is Is': pidist A) :
irrel_pidist Is Is' →
bounded_fun f →
Rbar_le (Ex_min f Is') (Ex_min f Is).
Proof. eauto. Qed.
Lemma irrel_coupling_eq_ex_Ex {A1 A2} f g (I: ivdist A1) (Is: pidist A2) :
irrel_couplingP I Is (λ x y, f x = g y) →
bounded_fun g →
ex_Ex_ival f I.
Proof.
intros [Is1_irrel Is2_irrel Hirrel_ivd Hirrel_pidst Ic] Hex.
assert (idist_pidist_couplingP (x ← Is1_irrel; mret (f x))
(x ← Is2_irrel; mret (g x))
(λ x y, x = y)) as Ic'.
{ eapply ip_coupling_bind; eauto => ???.
apply ip_coupling_mret; auto. }
destruct Ic' as [I2 Hmem Ic'].
apply ival_coupling_eq in Ic'.
eapply ex_Ex_ival_irrel_proper.
{ symmetry; eauto. }
rewrite (ex_Ex_ival_fmap id f).
setoid_rewrite Ic'.
cut (ex_Ex_extrema id (x ← Is2_irrel; mret (g x))).
{ intros Hex'. edestruct (Hmem I2) as (I2'&Heq'&?); first done.
rewrite Heq'. eapply Hex'; eauto. }
rewrite -ex_Ex_extrema_fmap. eauto.
eapply ex_Ex_extrema_bounded_fun.
eauto.
Qed.
Lemma irrel_coupling_eq_Ex_min {A1 A2} f g (I: ivdist A1) (Is: pidist A2) :
irrel_couplingP I Is (λ x y, f x = g y) →
bounded_fun g →
Rbar_le (Ex_min g Is) (Ex_ival f I).
Proof.
intros Hirrel Hb.
feed pose proof (irrel_coupling_eq_ex_Ex f g I Is) as Hex; eauto.
destruct Hirrel as [Is1_irrel Is2_irrel Hirrel_ivd Hirrel_pidst Ic].
assert (idist_pidist_couplingP (x ← Is1_irrel; mret (f x))
(x ← Is2_irrel; mret (g x))
(λ x y, x = y)) as Ic'.
{ eapply ip_coupling_bind; eauto => ???.
apply ip_coupling_mret; auto. }
destruct Ic' as [I2 Hmem Ic'].
apply ival_coupling_eq in Ic'.
etransitivity; first apply Ex_min_irrel_anti; eauto.
erewrite Ex_ival_irrel_proper; eauto.
transitivity (Ex_min (λ x, Ex_min id (mret (g x))) Is2_irrel).
{ apply Ex_min_le_ext.
* intros. rewrite Ex_min_mret. reflexivity.
* eapply ex_Ex_extrema_bounded_fun; eauto.
}
assert (ex_Ex_ival f Is1_irrel).
{ eapply ex_Ex_ival_irrel_proper; eauto. }
etransitivity; first eapply Ex_min_bind_post_aux2; last first.
- transitivity (Ex_ival (λ x, Ex_ival id (mret (f x))) Is1_irrel); last first.
{ apply Ex_ival_mono.
* intros. rewrite Ex_ival_mret. reflexivity.
* setoid_rewrite Ex_ival_mret.
eapply ex_Ex_ival_irrel_proper; eauto.
* eapply ex_Ex_ival_irrel_proper; eauto.
}
rewrite -Ex_ival_bind_post; last first.
{ rewrite -ex_Ex_ival_fmap. eauto. }
transitivity (Ex_ival id I2); last first.
{ refl_right. f_equal. symmetry. eapply Ex_ival_proper; eauto.
rewrite -ex_Ex_ival_fmap. eauto. }
apply In_pidist_le_singleton in Hmem.
destruct Hmem as (I2'&Heq22'&?).
transitivity (Ex_ival id I2'); last first.
{ refl_right. f_equal. symmetry. eapply Ex_ival_proper; eauto.
eapply ex_Ex_ival_proper; eauto.
rewrite -ex_Ex_ival_fmap. eauto. }
apply Ex_min_spec1'; auto.
eapply ex_Ex_ival_proper; eauto.
eapply ex_Ex_ival_proper; eauto.
rewrite -ex_Ex_ival_fmap. eauto.
- setoid_rewrite Ex_min_mret.
apply ex_Ex_extrema_bounded_fun; eauto.
- intros. setoid_rewrite Ex_min_mret. rewrite //=.
- apply Ex_min_bounded_fun_finite.
setoid_rewrite Ex_min_mret. eauto.
Qed.
Lemma irrel_coupling_eq_Ex_min' {A1 A2 A3} f g (h : A3 → R) (I: ivdist A1) (Is: pidist A2) :
irrel_couplingP I Is (λ x y, f x = g y) →
bounded_fun (λ x, h (g x)) →
Rbar_le (Ex_min (λ x, h (g x)) Is) (Ex_ival (λ x, h (f x)) I).
Proof.
intros Hic Hb.
eapply irrel_coupling_eq_Ex_min; eauto.
eapply irrel_coupling_conseq; eauto.
rewrite //=. intros x y ->. done.
Qed.
Lemma irrel_coupling_eq_Ex_max {A1 A2} f g (I: ivdist A1) (Is: pidist A2):
irrel_couplingP I Is (λ x y, f x = g y) →
bounded_fun g →
Rbar_le (Ex_ival f I) (Ex_max g Is).
Proof.
intros HIc Hb.
apply Rbar_opp_le.
rewrite Ex_max_neg_min Rbar_opp_involutive.
rewrite /Rbar_opp//=.
rewrite -Ex_ival_negate.
apply irrel_coupling_eq_Ex_min; eauto.
- eapply irrel_coupling_conseq; eauto => x y ?.
nra.
- destruct Hb as (c&Hb). exists c; intros x. specialize (Hb x).
move: Hb. do 2 apply Rabs_case; nra.
Qed.
Lemma irrel_coupling_eq_ex_Ex_supp {A1 A2} f g (I: ivdist A1) (Is: pidist A2) :
irrel_couplingP I Is (λ x y, f x = g y) →
bounded_fun_on g (λ x, In_psupport x Is) →
ex_Ex_ival f I.
Proof.
intros Hi Hex.
edestruct (bounded_fun_on_to_bounded g) as (g'&?Hb&Heq); eauto.
feed pose proof (irrel_coupling_eq_ex_Ex f g' I Is); eauto.
eapply irrel_coupling_conseq; last first.
{ unshelve (eapply @irrel_coupling_support); last eapply Hi. }
rewrite //=. intros x y (Hpf&Hin&Hinp&?).
rewrite -Heq; eauto.
Qed.
Lemma irrel_coupling_eq_Ex_min_supp {A1 A2} f g (I: ivdist A1) (Is: pidist A2) :
irrel_couplingP I Is (λ x y, f x = g y) →
bounded_fun_on g (λ x, In_psupport x Is) →
Rbar_le (Ex_min g Is) (Ex_ival f I).
Proof.
intros Hi Hex.
edestruct (bounded_fun_on_to_bounded g) as (g'&?Hb&Heq); eauto.
feed pose proof (irrel_coupling_eq_Ex_min f g' I Is); eauto.
eapply irrel_coupling_conseq; last first.
{ unshelve (eapply @irrel_coupling_support); last eapply Hi. }
rewrite //=. intros x y (Hpf&Hin&Hinp&?).
rewrite -Heq; eauto.
etransitivity; last eassumption.
refl_right.
eapply Ex_min_eq_ext_supp.
eauto.
Qed.
Lemma irrel_coupling_eq_Ex_max_supp {A1 A2} f g (I: ivdist A1) (Is: pidist A2):
irrel_couplingP I Is (λ x y, f x = g y) →
bounded_fun_on g (λ x, In_psupport x Is) →
Rbar_le (Ex_ival f I) (Ex_max g Is).
Proof.
intros HIc Hb.
apply Rbar_opp_le.
rewrite Ex_max_neg_min Rbar_opp_involutive.
rewrite /Rbar_opp//=.
rewrite -Ex_ival_negate.
apply irrel_coupling_eq_Ex_min_supp; eauto.
- eapply irrel_coupling_conseq; eauto => x y ?.
nra.
- destruct Hb as (c&Hb). exists c; intros x Hin. specialize (Hb x Hin).
move: Hb. do 2 apply Rabs_case; nra.
Qed.
|
postulate
@0 A : Set
_ : (@0 B : Set) .(C : Set) → Set
_ = λ B C → ?
|
use close, only : local_pi => <caret>renamed_pi
write(*,*) local_pi
end |
[GOAL]
n : ℕ
i : Fin2 n
⊢ ∀ {α : TypeVec n} (x : Prj i α), abs i (repr i x) = x
[PROOFSTEP]
intros
[GOAL]
n : ℕ
i : Fin2 n
α✝ : TypeVec n
x✝ : Prj i α✝
⊢ abs i (repr i x✝) = x✝
[PROOFSTEP]
rfl
[GOAL]
n : ℕ
i : Fin2 n
⊢ ∀ {α β : TypeVec n} (f : α ⟹ β) (p : MvPFunctor.Obj (P i) α), abs i (f <$$> p) = f <$$> abs i p
[PROOFSTEP]
intros α β f P
[GOAL]
n : ℕ
i : Fin2 n
α β : TypeVec n
f : α ⟹ β
P : MvPFunctor.Obj (MvQPF.Prj.P i) α
⊢ abs i (f <$$> P) = f <$$> abs i P
[PROOFSTEP]
cases P
[GOAL]
case mk
n : ℕ
i : Fin2 n
α β : TypeVec n
f : α ⟹ β
fst✝ : (P i).A
snd✝ : MvPFunctor.B (P i) fst✝ ⟹ α
⊢ abs i (f <$$> { fst := fst✝, snd := snd✝ }) = f <$$> abs i { fst := fst✝, snd := snd✝ }
[PROOFSTEP]
rfl
|
If $f_1$ is asymptotic to $g_1$ and $g_1$ is eventually nonzero, then $f_1/f_2$ is asymptotic to $g_1/g_2$ if and only if $1/f_2$ is asymptotic to $1/g_2$. |
Ever tried ? Ever failed ? No matter !
Trys again, fail again, fail better ! |
[STATEMENT]
lemma polynomial_constant:
assumes "a \<in> carrier R"
shows "polynomial [a] = \<cc>\<^bsub>a\<^esub>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. polynomial [a] = constant_function (carrier R) a
[PROOF STEP]
apply(rule function_ring_car_eqI)
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. polynomial [a] \<in> carrier (function_ring (carrier R) R)
2. constant_function (carrier R) a \<in> carrier (function_ring (carrier R) R)
3. \<And>aa. aa \<in> carrier R \<Longrightarrow> polynomial [a] aa = constant_function (carrier R) a aa
[PROOF STEP]
using assms polynomial_function
[PROOF STATE]
proof (prove)
using this:
a \<in> carrier R
set ?as \<subseteq> carrier R \<Longrightarrow> polynomial ?as \<in> carrier (function_ring (carrier R) R)
goal (3 subgoals):
1. polynomial [a] \<in> carrier (function_ring (carrier R) R)
2. constant_function (carrier R) a \<in> carrier (function_ring (carrier R) R)
3. \<And>aa. aa \<in> carrier R \<Longrightarrow> polynomial [a] aa = constant_function (carrier R) a aa
[PROOF STEP]
apply (metis (full_types) list.distinct(1) list.set_cases set_ConsD subset_code(1))
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. constant_function (carrier R) a \<in> carrier (function_ring (carrier R) R)
2. \<And>aa. aa \<in> carrier R \<Longrightarrow> polynomial [a] aa = constant_function (carrier R) a aa
[PROOF STEP]
apply (simp add: constant_function_closed assms)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>aa. aa \<in> carrier R \<Longrightarrow> polynomial [a] aa = constant_function (carrier R) a aa
[PROOF STEP]
using polynomial.simps(2)[of a "[]"] polynomial.simps(1) assms
[PROOF STATE]
proof (prove)
using this:
polynomial [a] = (\<lambda>x\<in>carrier R. a \<oplus> x \<otimes> polynomial [] x)
polynomial [] = \<zero>\<^bsub>function_ring (carrier R) R\<^esub>
a \<in> carrier R
goal (1 subgoal):
1. \<And>aa. aa \<in> carrier R \<Longrightarrow> polynomial [a] aa = constant_function (carrier R) a aa
[PROOF STEP]
by (simp add: constant_function_eq function_zero_eval) |
Require Import Ensembles Relations.
Set Implicit Arguments.
Implicit Arguments In [U].
Definition var := nat.
Inductive formula : Set :=
| Atom : var -> formula
| Bot : formula
| And : formula -> formula -> formula
| Or : formula -> formula -> formula
| Imp : formula -> formula -> formula
| Box : formula -> formula
| Dia : formula -> formula.
Infix "'->" := Imp (at level 70, right associativity).
Infix "'\/" := Or (at level 60).
Infix "'/\" := And (at level 50).
Notation "[]" := Box.
Notation "<>" := Dia.
(* Definition Not f := f '-> Bot. *)
(* Definition Top := Bot '-> Bot. *)
Coercion Atom : var >-> formula.
Inductive provable : Ensemble formula -> formula -> Prop :=
| By_axiom : forall fs f, In fs f -> provable fs f
| By_K : forall fs f g, provable fs (f '-> g '-> f)
| By_S : forall fs f g h,
provable fs ((f '-> g '-> h) '-> (f '-> g) '-> (f '-> h))
| By_proj1 : forall fs f g, provable fs (f '/\ g '-> f)
| By_proj2 : forall fs f g, provable fs (f '/\ g '-> g)
| By_conj : forall fs f g, provable fs (f '-> g '-> f '/\ g)
| By_in1 : forall fs f g, provable fs (f '-> f '\/ g)
| By_in2 : forall fs f g, provable fs (g '-> f '\/ g)
| By_case : forall fs f g h,
provable fs ((f '-> h) '-> (g '-> h) '-> (f '\/ g '-> h))
| By_exfalso : forall fs f, provable fs (Bot '-> f)
| By_KBox : forall fs f g, provable fs ([](f '-> g) '-> []f '-> []g)
| By_KDia : forall fs f g, provable fs ([](f '-> g) '-> <>f '-> <>g)
| By_NDia : forall fs, provable fs (<>Bot '-> Bot)
| By_MP : forall fs f g,
provable fs (f '-> g) -> provable fs f -> provable fs g
| By_Nec : forall fs f, provable (Empty_set _) f -> provable fs ([] f).
Hint Constructors provable.
Module Type KRIPKE_MODEL.
Parameter W : Type.
Parameter R Ri : relation W.
Axiom Ri_refl : reflexive _ Ri.
Axiom Ri_trans : transitive _ Ri.
Infix "<=" := Ri.
Parameter V : W -> var -> Prop.
Axiom monotone : forall x y p, V x p -> x <= y -> V y p.
End KRIPKE_MODEL.
Module Kripke_Semantics (K : KRIPKE_MODEL).
Import K.
Fixpoint satisfies x f :=
match f with
| Atom p => V x p
| Bot => False
| And f f' => satisfies x f /\ satisfies x f'
| Or f f' => satisfies x f \/ satisfies x f'
| Imp f f' => forall y, x <= y -> satisfies y f -> satisfies y f'
| Box f => forall y z, x <= y -> R y z -> satisfies z f
| Dia f => forall y, x <= y -> exists z, R y z /\ satisfies z f
end.
Hint Resolve Ri_trans Ri_refl monotone.
Lemma hereditary : forall f x y,
satisfies x f -> x <= y -> satisfies y f.
induction f; simpl; intuition eauto.
Qed.
Hint Extern 0 =>
match goal with [ H : In (Empty_set _) _ |- _ ] => inversion H end.
Theorem soundness : forall fs f, provable fs f ->
forall x, (forall g, In fs g -> satisfies x g) -> satisfies x f.
induction 1; simpl in *; intuition; eauto using hereditary.
destruct (H3 y1); intuition eauto.
destruct (H1 y); intuition.
Qed.
End Kripke_Semantics.
Lemma deduction_theorem' : forall fs fs' f g,
provable fs' g -> fs' = Add _ fs f -> provable fs (f '-> g).
induction 1; intro; subst; [| eauto ..].
inversion H; try inversion H0; subst; eauto.
Existential 1 := Bot.
Existential 1 := Bot.
Existential 1 := Bot.
Qed.
Lemma deduction_theorem : forall fs f g,
provable (Add _ fs f) g -> provable fs (f '-> g).
eauto using deduction_theorem'.
Qed.
Lemma weakening : forall fs f g, provable fs f -> provable (Add _ fs g) f.
induction 1; solve [repeat constructor; auto | eauto].
Qed.
Hint Constructors Union Singleton.
Hint Extern 1 (provable (Add _ _ _) _) => unfold Add.
Lemma converse_deduction_theorem :
forall fs f g, provable fs (f '-> g) -> provable (Add _ fs f) g.
intros; apply (@By_MP _ f g); auto using weakening.
Qed.
Module Canonical_Model <: KRIPKE_MODEL.
Definition prime fs :=
forall f g, provable fs (f '\/ g) -> provable fs f \/ provable fs g.
Definition consistent fs := ~provable fs Bot.
Definition W := { P : Ensemble formula * formula |
prime (fst P) /\ consistent (fst P) /\ ~provable (fst P) (<> (snd P)) }.
Definition Ri (T U : W) :=
forall f, provable (fst (proj1_sig T)) f -> provable (fst (proj1_sig U)) f.
Definition R (T U : W) :=
(forall f, provable (fst (proj1_sig T)) ([]f) ->
provable (fst (proj1_sig U)) f) /\
~provable (fst (proj1_sig U)) (snd (proj1_sig T)).
Hint Unfold R Ri Included.
Infix "<=" := Ri.
Lemma Ri_refl : forall T, T <= T.
auto.
Qed.
Lemma Ri_trans : forall T U V, T <= U -> U <= V -> T <= V.
auto.
Qed.
Definition V (T : W) (p : var) := provable (fst (proj1_sig T)) p.
Hint Unfold V.
Lemma monotone : forall x y p, V x p -> x <= y -> V y p.
intuition.
Qed.
End Canonical_Model.
Module Canonical_Model_facts.
Module M := Kripke_Semantics Canonical_Model.
Import Canonical_Model M.
Definition boxinv fs f := provable fs ([] f).
Lemma boxinv_closed : forall fs f,
provable (boxinv fs) f -> In (boxinv fs) f.
intros fs f; generalize (refl_equal (boxinv fs));
generalize (boxinv fs) at -1.
induction 2; intuition; unfold In; subst; unfold boxinv; eauto 3.
Qed.
Axiom prime_extension : forall fs f, ~provable fs f ->
exists fs', prime fs' /\ ~provable fs' f /\
forall h, provable fs h -> provable fs' h.
Axiom pr_decidable : forall fs f, provable fs f \/ ~provable fs f.
Ltac assert_pt z x f :=
let H := fresh in
assert(H : prime x /\ consistent x /\ ~provable x (<> f))
by (intuition; eauto);
set (z := exist _ (x, f) H : W).
Hint Unfold consistent.
Lemma prime_lemma : forall fs f,
(forall x : W, (forall g, provable fs g -> provable (fst (proj1_sig x)) g) ->
provable (fst (proj1_sig x)) f) ->
provable fs f.
intros fs f H; assert (~~provable fs f);
[ | case (pr_decidable fs f)]; intuition.
destruct (prime_extension H0) as [T [? [? ?]]].
assert_pt y T Bot.
specialize (H y).
auto.
Existential 1 := Bot.
Qed.
Ltac destruct_iff :=
match goal with
[ H : forall _, _ <-> _ |- _] =>
pose proof (fun x => proj1 (H x));
pose proof (fun x => proj2 (H x));
clear H
end.
Lemma equivalence : forall f x, provable (fst (proj1_sig x)) f <-> satisfies x f.
induction f;
intro x; destruct x as [T [p [con mcon]]];
simpl in *; intuition; repeat destruct_iff.
apply H2; eauto.
apply H0; eauto.
assert (provable (fst T) f1 /\ provable (fst T) f2) by firstorder.
intuition; eauto 3.
assert (provable (fst T) f1 \/ provable (fst T) f2) by auto; intuition.
assert (provable (fst T) f1) by firstorder.
eauto 3.
assert (provable (fst T) f2) by firstorder.
eauto.
eauto.
apply deduction_theorem.
apply prime_lemma.
auto 8 using weakening.
destruct H1.
eauto.
assert (In (boxinv (fst T)) f); [| eauto].
apply boxinv_closed.
apply prime_lemma.
intros.
assert_pt x' (fst T) Bot.
specialize (H x').
apply H1; apply H; intuition.
unfold R.
intuition.
destruct x; simpl in *; intuition.
destruct y as [[T' g] [? [? mcon']]]; simpl in *.
destruct (@prime_extension (Add _ (boxinv T') f) g); intuition.
apply mcon'.
assert (provable (boxinv T') (f '-> g))
by auto using deduction_theorem.
assert (provable T' ([] (f '-> g))) by
(specialize (boxinv_closed H4); auto).
eauto 4.
assert_pt x1 x0 Bot.
exists x1; intuition.
unfold R; intuition.
apply H1; auto.
destruct T as [x' ?]; simpl in *.
assert (~~provable x' (<> f)); intuition.
assert_pt z x' f.
destruct (H z).
auto.
intuition.
destruct H5.
subst; simpl in *; auto.
case (pr_decidable x' (<> f)); intuition.
Existential 1 := Bot.
Existential 1 := Bot.
Qed.
Hint Resolve (fun f x => proj1 (equivalence f x)).
Hint Resolve (fun f x => proj2 (equivalence f x)).
Theorem completeness : forall fs f,
(forall T, (forall g, In fs g -> satisfies T g) -> satisfies T f) ->
provable fs f.
auto 7 using prime_lemma.
Qed.
End Canonical_Model_facts.
|
[STATEMENT]
lemma SAT_UN[simp]: "SAT (\<Union> i \<in> I. \<Phi> i) \<longleftrightarrow> (\<forall> i \<in> I. SAT (\<Phi> i))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. SAT (\<Union> (\<Phi> ` I)) = (\<forall>i\<in>I. SAT (\<Phi> i))
[PROOF STEP]
unfolding SAT_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<forall>\<xi>. wtE \<xi> \<longrightarrow> satPB \<xi> (\<Union> (\<Phi> ` I))) = (\<forall>i\<in>I. \<forall>\<xi>. wtE \<xi> \<longrightarrow> satPB \<xi> (\<Phi> i))
[PROOF STEP]
by auto |
/*
This code is based on the original code by Quentin "Leph" Rouxel and Team Rhoban.
The original files can be found at:
https://github.com/Rhoban/model/
*/
#ifndef FOOTSTEP_HPP
#define FOOTSTEP_HPP
#include <Eigen/Dense>
#include <cmath>
#include <stdexcept>
#include "bitbots_splines/Angle.h"
namespace bitbots_quintic_walk {
/**
* Footstep
*
* Manage humanoid footstep
* generation and state
*/
class Footstep
{
public:
enum SupportFoot {
LeftSupportFoot,
RightSupportFoot,
};
/**
* Initialization with lateral
* foot distance and support foot
*/
Footstep(
double footDistance,
bool isLeftSupportFoot = true);
/**
* Set the lateral foot
* distance parameters
*/
void setFootDistance(double footDistance);
double getFootDistance();
/**
* Reset to neutral position the current
* step (not the integrated odometry)
*/
void reset(bool isLeftSupportFoot);
// reset odometry
void resetInWorld(bool isLeftSupportFoot);
/**
* Current support foot
*/
bool isLeftSupport() const;
/**
* Starting position of current flying
* foot in support foot frame
*/
const Eigen::Vector3d& getLast() const;
/**
* Target pose of current flying
* foot in support foot frame
*/
const Eigen::Vector3d& getNext() const;
/**
* Returns the odometry change of the current step.
*/
const Eigen::Vector3d& getOdom() const;
/**
* Left and right, current or next pose
* of foot in world initial frame
*/
const Eigen::Vector3d& getLeft() const;
const Eigen::Vector3d& getRight() const;
/**
* Set the target pose of current support foot
* during next support phase and update support foot.
* The target foot pose diff is given with respect to
* next support foot pose (current flying foot target).
*/
void stepFromSupport(const Eigen::Vector3d& diff);
/**
* Set target pose of current support foot
* using diff orders.
* Zero vector means in place walking.
* Special handle of lateral and turn step
* to avoid foot collision.
*/
void stepFromOrders(const Eigen::Vector3d& diff);
private:
/**
* Static lateral distance
* between the feet
*/
double _footDistance;
/**
* Current support foot
* (left or right)
*/
bool _isLeftSupportFoot;
/**
* Pose diff [dx, dy, dtheta]
* from support foot to flying foot
* last and next position
*/
Eigen::Vector3d _supportToLast;
Eigen::Vector3d _supportToNext;
/**
* Pose integration of left
* and right foot in initial frame.
* Set at "future" state taking into account
* next expected fot pose.
*/
Eigen::Vector3d _leftInWorld;
Eigen::Vector3d _rightInWorld;
/**
* Add to given pose the given diff
* expressed in pose frame and
* return the integrated added pose
*/
Eigen::Vector3d poseAdd(
const Eigen::Vector3d& pose,
const Eigen::Vector3d& diff) const;
/**
* Compute and return the delta from
* (zero+diff) to (zero) in
* (zero+diff) frame.
*/
Eigen::Vector3d diffInv(
const Eigen::Vector3d& diff) const;
};
}
#endif |
[STATEMENT]
theorem completeness:
assumes finite_cs: "finite Cs" "\<forall>C\<in>Cs. finite C"
assumes unsat: "\<forall>(F::hterm fun_denot) (G::hterm pred_denot) . \<not>eval\<^sub>c\<^sub>s F G Cs"
shows "\<exists>Cs'. resolution_deriv Cs Cs' \<and> {} \<in> Cs'"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<exists>Cs'. resolution_deriv Cs Cs' \<and> {} \<in> Cs'
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<exists>Cs'. resolution_deriv Cs Cs' \<and> {} \<in> Cs'
[PROOF STEP]
from unsat
[PROOF STATE]
proof (chain)
picking this:
\<forall>F G. \<not> eval\<^sub>c\<^sub>s F G Cs
[PROOF STEP]
have "\<forall>(G::hterm pred_denot) . \<not>eval\<^sub>c\<^sub>s HFun G Cs"
[PROOF STATE]
proof (prove)
using this:
\<forall>F G. \<not> eval\<^sub>c\<^sub>s F G Cs
goal (1 subgoal):
1. \<forall>G. \<not> eval\<^sub>c\<^sub>s HFun G Cs
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<forall>G. \<not> eval\<^sub>c\<^sub>s HFun G Cs
goal (1 subgoal):
1. \<exists>Cs'. resolution_deriv Cs Cs' \<and> {} \<in> Cs'
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<forall>G. \<not> eval\<^sub>c\<^sub>s HFun G Cs
[PROOF STEP]
obtain T where "closed_tree T Cs"
[PROOF STATE]
proof (prove)
using this:
\<forall>G. \<not> eval\<^sub>c\<^sub>s HFun G Cs
goal (1 subgoal):
1. (\<And>T. closed_tree T Cs \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using herbrand assms
[PROOF STATE]
proof (prove)
using this:
\<forall>G. \<not> eval\<^sub>c\<^sub>s HFun G Cs
\<lbrakk>\<forall>G. \<not> eval\<^sub>c\<^sub>s HFun G ?Cs; finite ?Cs; \<forall>C\<in>?Cs. finite C\<rbrakk> \<Longrightarrow> \<exists>T. closed_tree T ?Cs
finite Cs
\<forall>C\<in>Cs. finite C
\<forall>F G. \<not> eval\<^sub>c\<^sub>s F G Cs
goal (1 subgoal):
1. (\<And>T. closed_tree T Cs \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
closed_tree T Cs
goal (1 subgoal):
1. \<exists>Cs'. resolution_deriv Cs Cs' \<and> {} \<in> Cs'
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
closed_tree T Cs
[PROOF STEP]
show "\<exists>Cs'. resolution_deriv Cs Cs' \<and> {} \<in> Cs'"
[PROOF STATE]
proof (prove)
using this:
closed_tree T Cs
goal (1 subgoal):
1. \<exists>Cs'. resolution_deriv Cs Cs' \<and> {} \<in> Cs'
[PROOF STEP]
using completeness' assms
[PROOF STATE]
proof (prove)
using this:
closed_tree T Cs
\<lbrakk>closed_tree ?T ?Cs; \<forall>C\<in>?Cs. finite C\<rbrakk> \<Longrightarrow> \<exists>Cs'. resolution_deriv ?Cs Cs' \<and> {} \<in> Cs'
finite Cs
\<forall>C\<in>Cs. finite C
\<forall>F G. \<not> eval\<^sub>c\<^sub>s F G Cs
goal (1 subgoal):
1. \<exists>Cs'. resolution_deriv Cs Cs' \<and> {} \<in> Cs'
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<exists>Cs'. resolution_deriv Cs Cs' \<and> {} \<in> Cs'
goal:
No subgoals!
[PROOF STEP]
qed |
record Pointed (A : Set) : Set where
field
point : A
point : {A : Set} ⦃ p : Pointed A ⦄ → A
point ⦃ p = p ⦄ = Pointed.point p
record R : Set₁ where
field
A : Set
instance
is-pointed : Pointed A
postulate
r : R
open R r
x : R.A r
x = point
|
function r = ne(p,q)
% this function compares the inequality condition of x, y, and z values of the structures that I use in
% the FVtool.
%
% SYNOPSIS:
%
%
% PARAMETERS:
%
%
% RETURNS:
%
%
% EXAMPLE:
%
% SEE ALSO:
%
% Copyright (c) 2012-2016 Ali Akbar Eftekhari
% See the license file
if (isa(p, 'FaceVariable')&&isa(q, 'FaceVariable'))
r=p;
r.xvalue = p.xvalue~=q.xvalue;
r.yvalue = p.yvalue~=q.yvalue;
r.zvalue = p.zvalue~=q.zvalue;
elseif isa(p, 'FaceVariable')
r=p;
r.xvalue = p.xvalue~=q;
r.yvalue = p.yvalue~=q;
r.zvalue = p.zvalue~=q;
else
r=q;
r.xvalue = p~=q.xvalue;
r.yvalue = p~=q.yvalue;
r.zvalue = p~=q.zvalue;
end
|
import itertools
import numpy as np
from yt.testing import assert_almost_equal, fake_amr_ds
def test_amr_kdtree_set_fields():
ds = fake_amr_ds(fields=["density", "pressure"])
dd = ds.all_data()
fields = ds.field_list
dd.tiles.set_fields(fields, [True, True], False)
gold = {}
for i, block in enumerate(dd.tiles.traverse()):
gold[i] = [data.copy() for data in block.my_data]
for log_fields in itertools.product([True, False], [True, False]):
dd.tiles.set_fields(fields, log_fields, False)
for iblock, block in enumerate(dd.tiles.traverse()):
for i in range(len(fields)):
if log_fields[i]:
data = block.my_data[i]
else:
data = np.log10(block.my_data[i])
assert_almost_equal(gold[iblock][i], data)
|
# Programming Exercise 4: Neural Networks Learning
## Introduction
In this exercise, you will implement the backpropagation algorithm for neural networks and apply it to the task of hand-written digit recognition. Before starting on the programming exercise, we strongly recommend watching the video lectures and completing the review questions for the associated topics.
All the information you need for solving this assignment is in this notebook, and all the code you will be implementing will take place within this notebook. The assignment can be promptly submitted to the coursera grader directly from this notebook (code and instructions are included below).
Before we begin with the exercises, we need to import all libraries required for this programming exercise. Throughout the course, we will be using [`numpy`](http://www.numpy.org/) for all arrays and matrix operations, [`matplotlib`](https://matplotlib.org/) for plotting, and [`scipy`](https://docs.scipy.org/doc/scipy/reference/) for scientific and numerical computation functions and tools. You can find instructions on how to install required libraries in the README file in the [github repository](https://github.com/dibgerge/ml-coursera-python-assignments).
```python
# used for manipulating directory paths
import os
# Scientific and vector computation for python
import numpy as np
# Plotting library
from matplotlib import pyplot
# Optimization module in scipy
from scipy import optimize
# will be used to load MATLAB mat datafile format
from scipy.io import loadmat
# library written for this exercise providing additional functions for assignment submission, and others
import utils
# define the submission/grader object for this exercise
grader = utils.Grader()
# tells matplotlib to embed plots within the notebook
%matplotlib inline
```
## Submission and Grading
After completing each part of the assignment, be sure to submit your solutions to the grader. The following is a breakdown of how each part of this exercise is scored.
| Section | Part | Submission function | Points
| :- |:- | :- | :-:
| 1 | [Feedforward and Cost Function](#section1) | [`nnCostFunction`](#nnCostFunction) | 30
| 2 | [Regularized Cost Function](#section2) | [`nnCostFunction`](#nnCostFunction) | 15
| 3 | [Sigmoid Gradient](#section3) | [`sigmoidGradient`](#sigmoidGradient) | 5
| 4 | [Neural Net Gradient Function (Backpropagation)](#section4) | [`nnCostFunction`](#nnCostFunction) | 40
| 5 | [Regularized Gradient](#section5) | [`nnCostFunction`](#nnCostFunction) |10
| | Total Points | | 100
You are allowed to submit your solutions multiple times, and we will take only the highest score into consideration.
<div class="alert alert-block alert-warning">
At the end of each section in this notebook, we have a cell which contains code for submitting the solutions thus far to the grader. Execute the cell to see your score up to the current section. For all your work to be submitted properly, you must execute those cells at least once.
</div>
## Neural Networks
In the previous exercise, you implemented feedforward propagation for neural networks and used it to predict handwritten digits with the weights we provided. In this exercise, you will implement the backpropagation algorithm to learn the parameters for the neural network.
We start the exercise by first loading the dataset.
```python
# training data stored in arrays X, y
data = loadmat(os.path.join('Data', 'ex4data1.mat'))
X, y = data['X'], data['y'].ravel()
# set the zero digit to 0, rather than its mapped 10 in this dataset
# This is an artifact due to the fact that this dataset was used in
# MATLAB where there is no index 0
y[y == 10] = 0
# Number of training examples
m = y.size
```
### 1.1 Visualizing the data
You will begin by visualizing a subset of the training set, using the function `displayData`, which is the same function we used in Exercise 3. It is provided in the `utils.py` file for this assignment as well. The dataset is also the same one you used in the previous exercise.
There are 5000 training examples in `ex4data1.mat`, where each training example is a 20 pixel by 20 pixel grayscale image of the digit. Each pixel is represented by a floating point number indicating the grayscale intensity at that location. The 20 by 20 grid of pixels is “unrolled” into a 400-dimensional vector. Each
of these training examples becomes a single row in our data matrix $X$. This gives us a 5000 by 400 matrix $X$ where every row is a training example for a handwritten digit image.
$$ X = \begin{bmatrix} - \left(x^{(1)} \right)^T - \\
- \left(x^{(2)} \right)^T - \\
\vdots \\
- \left(x^{(m)} \right)^T - \\
\end{bmatrix}
$$
The second part of the training set is a 5000-dimensional vector `y` that contains labels for the training set.
The following cell randomly selects 100 images from the dataset and plots them.
```python
# Randomly select 100 data points to display
rand_indices = np.random.choice(m, 100, replace=False)
sel = X[rand_indices, :]
utils.displayData(sel)
```
### 1.2 Model representation
Our neural network is shown in the following figure.
It has 3 layers - an input layer, a hidden layer and an output layer. Recall that our inputs are pixel values
of digit images. Since the images are of size $20 \times 20$, this gives us 400 input layer units (not counting the extra bias unit which always outputs +1). The training data was loaded into the variables `X` and `y` above.
You have been provided with a set of network parameters ($\Theta^{(1)}, \Theta^{(2)}$) already trained by us. These are stored in `ex4weights.mat` and will be loaded in the next cell of this notebook into `Theta1` and `Theta2`. The parameters have dimensions that are sized for a neural network with 25 units in the second layer and 10 output units (corresponding to the 10 digit classes).
```python
# Setup the parameters you will use for this exercise
input_layer_size = 400 # 20x20 Input Images of Digits
hidden_layer_size = 25 # 25 hidden units
num_labels = 10 # 10 labels, from 0 to 9
# Load the weights into variables Theta1 and Theta2
weights = loadmat(os.path.join('Data', 'ex4weights.mat'))
# Theta1 has size 25 x 401
# Theta2 has size 10 x 26
Theta1, Theta2 = weights['Theta1'], weights['Theta2']
# swap first and last columns of Theta2, due to legacy from MATLAB indexing,
# since the weight file ex3weights.mat was saved based on MATLAB indexing
Theta2 = np.roll(Theta2, 1, axis=0)
# Unroll parameters
nn_params = np.concatenate([Theta1.ravel(), Theta2.ravel()])
```
<a id="section1"></a>
### 1.3 Feedforward and cost function
Now you will implement the cost function and gradient for the neural network. First, complete the code for the function `nnCostFunction` in the next cell to return the cost.
Recall that the cost function for the neural network (without regularization) is:
$$ J(\theta) = \frac{1}{m} \sum_{i=1}^{m}\sum_{k=1}^{K} \left[ - y_k^{(i)} \log \left( \left( h_\theta \left( x^{(i)} \right) \right)_k \right) - \left( 1 - y_k^{(i)} \right) \log \left( 1 - \left( h_\theta \left( x^{(i)} \right) \right)_k \right) \right]$$
where $h_\theta \left( x^{(i)} \right)$ is computed as shown in the neural network figure above, and K = 10 is the total number of possible labels. Note that $h_\theta(x^{(i)})_k = a_k^{(3)}$ is the activation (output
value) of the $k^{th}$ output unit. Also, recall that whereas the original labels (in the variable y) were 0, 1, ..., 9, for the purpose of training a neural network, we need to encode the labels as vectors containing only values 0 or 1, so that
$$ y =
\begin{bmatrix} 1 \\ 0 \\ 0 \\\vdots \\ 0 \end{bmatrix}, \quad
\begin{bmatrix} 0 \\ 1 \\ 0 \\ \vdots \\ 0 \end{bmatrix}, \quad \cdots \quad \text{or} \qquad
\begin{bmatrix} 0 \\ 0 \\ 0 \\ \vdots \\ 1 \end{bmatrix}.
$$
For example, if $x^{(i)}$ is an image of the digit 5, then the corresponding $y^{(i)}$ (that you should use with the cost function) should be a 10-dimensional vector with $y_5 = 1$, and the other elements equal to 0.
You should implement the feedforward computation that computes $h_\theta(x^{(i)})$ for every example $i$ and sum the cost over all examples. **Your code should also work for a dataset of any size, with any number of labels** (you can assume that there are always at least $K \ge 3$ labels).
<div class="alert alert-box alert-warning">
**Implementation Note:** The matrix $X$ contains the examples in rows (i.e., X[i,:] is the i-th training example $x^{(i)}$, expressed as a $n \times 1$ vector.) When you complete the code in `nnCostFunction`, you will need to add the column of 1’s to the X matrix. The parameters for each unit in the neural network is represented in Theta1 and Theta2 as one row. Specifically, the first row of Theta1 corresponds to the first hidden unit in the second layer. You can use a for-loop over the examples to compute the cost.
</div>
<a id="nnCostFunction"></a>
```python
def nnCostFunction(nn_params,
input_layer_size,
hidden_layer_size,
num_labels,
X, y, lambda_=0.0):
"""
Implements the neural network cost function and gradient for a two layer neural
network which performs classification.
Parameters
----------
nn_params : array_like
The parameters for the neural network which are "unrolled" into
a vector. This needs to be converted back into the weight matrices Theta1
and Theta2.
input_layer_size : int
Number of features for the input layer.
hidden_layer_size : int
Number of hidden units in the second layer.
num_labels : int
Total number of labels, or equivalently number of units in output layer.
X : array_like
Input dataset. A matrix of shape (m x input_layer_size).
y : array_like
Dataset labels. A vector of shape (m,).
lambda_ : float, optional
Regularization parameter.
Returns
-------
J : float
The computed value for the cost function at the current weight values.
grad : array_like
An "unrolled" vector of the partial derivatives of the concatenatation of
neural network weights Theta1 and Theta2.
Instructions
------------
You should complete the code by working through the following parts.
- Part 1: Feedforward the neural network and return the cost in the
variable J. After implementing Part 1, you can verify that your
cost function computation is correct by verifying the cost
computed in the following cell.
- Part 2: Implement the backpropagation algorithm to compute the gradients
Theta1_grad and Theta2_grad. You should return the partial derivatives of
the cost function with respect to Theta1 and Theta2 in Theta1_grad and
Theta2_grad, respectively. After implementing Part 2, you can check
that your implementation is correct by running checkNNGradients provided
in the utils.py module.
Note: The vector y passed into the function is a vector of labels
containing values from 0..K-1. You need to map this vector into a
binary vector of 1's and 0's to be used with the neural network
cost function.
Hint: We recommend implementing backpropagation using a for-loop
over the training examples if you are implementing it for the
first time.
- Part 3: Implement regularization with the cost function and gradients.
Hint: You can implement this around the code for
backpropagation. That is, you can compute the gradients for
the regularization separately and then add them to Theta1_grad
and Theta2_grad from Part 2.
Note
----
We have provided an implementation for the sigmoid function in the file
`utils.py` accompanying this assignment.
"""
# Reshape nn_params back into the parameters Theta1 and Theta2, the weight matrices
# for our 2 layer neural network
Theta1 = np.reshape(nn_params[:hidden_layer_size * (input_layer_size + 1)],
(hidden_layer_size, (input_layer_size + 1)))
Theta2 = np.reshape(nn_params[(hidden_layer_size * (input_layer_size + 1)):],
(num_labels, (hidden_layer_size + 1)))
# Setup some useful variables
m = y.size
# You need to return the following variables correctly
J = 0
Theta1_grad = np.zeros(Theta1.shape)
Theta2_grad = np.zeros(Theta2.shape)
# ====================== YOUR CODE HERE ======================
X = np.concatenate([np.ones((m, 1)), X], axis=1)
# print(X.shape)
# print(Theta1.shape)
y = y.reshape(-1) # -1 to transform y in a vector
y = np.eye(num_labels)[y]
# forward propagate
z2 = np.dot(X,Theta1.T)
a2 = utils.sigmoid(z2)
a2 = np.concatenate([np.ones((m, 1)), a2], axis=1)
# print(a2.shape)
# print(Theta2.shape)
z3 = np.dot(a2,Theta2.T)
a3 = utils.sigmoid(z3)
# cost function
reg_term = (np.sum(np.square(Theta1[:, 1:])) + np.sum(np.square(Theta2[:, 1:])))*(lambda_/(2*m))
J = -(np.sum((np.log(a3) * y) + np.log(1 - a3) * (1 - y)))/m + reg_term
# predicted y - actual y
del3 = a3 - y
del2 = np.dot(del3,Theta2[:, 1:])*(utils.sigmoid(z2)*(1-utils.sigmoid(z2)))
# print(del3.T.shape)
# print(X.shape)
# print(a2.shape)
# print(del2.T.shape)
Del1 = np.dot(del2.T,X)
Del2 = np.dot(del3.T,a2)
Theta1_grad = (np.c_[np.ones((Theta1.shape[0],1)),Theta1[:,1:]]*lambda_)/m + Del1/m
Theta2_grad = Del2/m + (np.c_[np.ones((Theta2.shape[0],1)),Theta2[:,1:]]*lambda_)/m
# ================================================================
grad = np.concatenate([Theta1_grad.ravel(), Theta2_grad.ravel()])
return J, grad
```
<div class="alert alert-box alert-warning">
Use the following links to go back to the different parts of this exercise that require to modify the function `nnCostFunction`.<br>
Back to:
- [Feedforward and cost function](#section1)
- [Regularized cost](#section2)
- [Neural Network Gradient (Backpropagation)](#section4)
- [Regularized Gradient](#section5)
</div>
Once you are done, call your `nnCostFunction` using the loaded set of parameters for `Theta1` and `Theta2`. You should see that the cost is about 0.287629.
```python
lambda_ = 0
J, _ = nnCostFunction(nn_params, input_layer_size, hidden_layer_size,
num_labels, X, y, lambda_)
print('Cost at parameters (loaded from ex4weights): %.6f ' % J)
print('The cost should be about : 0.287629.')
```
Cost at parameters (loaded from ex4weights): 0.287629
The cost should be about : 0.287629.
*You should now submit your solutions.*
```python
grader = utils.Grader()
grader[1] = nnCostFunction
#grader.grade()
```
<a id="section2"></a>
### 1.4 Regularized cost function
The cost function for neural networks with regularization is given by:
$$ J(\theta) = \frac{1}{m} \sum_{i=1}^{m}\sum_{k=1}^{K} \left[ - y_k^{(i)} \log \left( \left( h_\theta \left( x^{(i)} \right) \right)_k \right) - \left( 1 - y_k^{(i)} \right) \log \left( 1 - \left( h_\theta \left( x^{(i)} \right) \right)_k \right) \right] + \frac{\lambda}{2 m} \left[ \sum_{j=1}^{25} \sum_{k=1}^{400} \left( \Theta_{j,k}^{(1)} \right)^2 + \sum_{j=1}^{10} \sum_{k=1}^{25} \left( \Theta_{j,k}^{(2)} \right)^2 \right] $$
You can assume that the neural network will only have 3 layers - an input layer, a hidden layer and an output layer. However, your code should work for any number of input units, hidden units and outputs units. While we
have explicitly listed the indices above for $\Theta^{(1)}$ and $\Theta^{(2)}$ for clarity, do note that your code should in general work with $\Theta^{(1)}$ and $\Theta^{(2)}$ of any size. Note that you should not be regularizing the terms that correspond to the bias. For the matrices `Theta1` and `Theta2`, this corresponds to the first column of each matrix. You should now add regularization to your cost function. Notice that you can first compute the unregularized cost function $J$ using your existing `nnCostFunction` and then later add the cost for the regularization terms.
[Click here to go back to `nnCostFunction` for editing.](#nnCostFunction)
Once you are done, the next cell will call your `nnCostFunction` using the loaded set of parameters for `Theta1` and `Theta2`, and $\lambda = 1$. You should see that the cost is about 0.383770.
```python
# Weight regularization parameter (we set this to 1 here).
lambda_ = 1
J, _ = nnCostFunction(nn_params, input_layer_size, hidden_layer_size,
num_labels, X, y, lambda_)
print('Cost at parameters (loaded from ex4weights): %.6f' % J)
print('This value should be about : 0.383770.')
```
Cost at parameters (loaded from ex4weights): 0.383770
This value should be about : 0.383770.
*You should now submit your solutions.*
```python
grader[2] = nnCostFunction
#grader.grade()
```
## 2 Backpropagation
In this part of the exercise, you will implement the backpropagation algorithm to compute the gradient for the neural network cost function. You will need to update the function `nnCostFunction` so that it returns an appropriate value for `grad`. Once you have computed the gradient, you will be able to train the neural network by minimizing the cost function $J(\theta)$ using an advanced optimizer such as `scipy`'s `optimize.minimize`.
You will first implement the backpropagation algorithm to compute the gradients for the parameters for the (unregularized) neural network. After you have verified that your gradient computation for the unregularized case is correct, you will implement the gradient for the regularized neural network.
<a id="section3"></a>
### 2.1 Sigmoid Gradient
To help you get started with this part of the exercise, you will first implement
the sigmoid gradient function. The gradient for the sigmoid function can be
computed as
$$ g'(z) = \frac{d}{dz} g(z) = g(z)\left(1-g(z)\right) $$
where
$$ \text{sigmoid}(z) = g(z) = \frac{1}{1 + e^{-z}} $$
Now complete the implementation of `sigmoidGradient` in the next cell.
<a id="sigmoidGradient"></a>
```python
def sigmoidGradient(z):
"""
Computes the gradient of the sigmoid function evaluated at z.
This should work regardless if z is a matrix or a vector.
In particular, if z is a vector or matrix, you should return
the gradient for each element.
Parameters
----------
z : array_like
A vector or matrix as input to the sigmoid function.
Returns
--------
g : array_like
Gradient of the sigmoid function. Has the same shape as z.
Instructions
------------
Compute the gradient of the sigmoid function evaluated at
each value of z (z can be a matrix, vector or scalar).
Note
----
We have provided an implementation of the sigmoid function
in `utils.py` file accompanying this assignment.
"""
g = np.zeros(z.shape)
# ====================== YOUR CODE HERE ======================
g = utils.sigmoid(z)*(1-utils.sigmoid(z))
# =============================================================
return g
```
When you are done, the following cell call `sigmoidGradient` on a given vector `z`. Try testing a few values by calling `sigmoidGradient(z)`. For large values (both positive and negative) of z, the gradient should be close to 0. When $z = 0$, the gradient should be exactly 0.25. Your code should also work with vectors and matrices. For a matrix, your function should perform the sigmoid gradient function on every element.
```python
z = np.array([-1, -0.5, 0, 0.5, 1])
g = sigmoidGradient(z)
print('Sigmoid gradient evaluated at [-1 -0.5 0 0.5 1]:\n ')
print(g)
```
Sigmoid gradient evaluated at [-1 -0.5 0 0.5 1]:
[ 0.19661193 0.23500371 0.25 0.23500371 0.19661193]
*You should now submit your solutions.*
```python
grader[3] = sigmoidGradient
#grader.grade()
```
## 2.2 Random Initialization
When training neural networks, it is important to randomly initialize the parameters for symmetry breaking. One effective strategy for random initialization is to randomly select values for $\Theta^{(l)}$ uniformly in the range $[-\epsilon_{init}, \epsilon_{init}]$. You should use $\epsilon_{init} = 0.12$. This range of values ensures that the parameters are kept small and makes the learning more efficient.
<div class="alert alert-box alert-warning">
One effective strategy for choosing $\epsilon_{init}$ is to base it on the number of units in the network. A good choice of $\epsilon_{init}$ is $\epsilon_{init} = \frac{\sqrt{6}}{\sqrt{L_{in} + L_{out}}}$ where $L_{in} = s_l$ and $L_{out} = s_{l+1}$ are the number of units in the layers adjacent to $\Theta^{l}$.
</div>
Your job is to complete the function `randInitializeWeights` to initialize the weights for $\Theta$. Modify the function by filling in the following code:
```python
# Randomly initialize the weights to small values
W = np.random.rand(L_out, 1 + L_in) * 2 * epsilon_init - epsilon_init
```
Note that we give the function an argument for $\epsilon$ with default value `epsilon_init = 0.12`.
```python
def randInitializeWeights(L_in, L_out, epsilon_init=0.12):
"""
Randomly initialize the weights of a layer in a neural network.
Parameters
----------
L_in : int
Number of incomming connections.
L_out : int
Number of outgoing connections.
epsilon_init : float, optional
Range of values which the weight can take from a uniform
distribution.
Returns
-------
W : array_like
The weight initialiatized to random values. Note that W should
be set to a matrix of size(L_out, 1 + L_in) as
the first column of W handles the "bias" terms.
Instructions
------------
Initialize W randomly so that we break the symmetry while training
the neural network. Note that the first column of W corresponds
to the parameters for the bias unit.
"""
# You need to return the following variables correctly
W = np.zeros((L_out, 1 + L_in))
# ====================== YOUR CODE HERE ======================
W = np.random.rand(L_out, 1 + L_in) * 2 * epsilon_init - epsilon_init
# ============================================================
return W
```
*You do not need to submit any code for this part of the exercise.*
Execute the following cell to initialize the weights for the 2 layers in the neural network using the `randInitializeWeights` function.
```python
print('Initializing Neural Network Parameters ...')
initial_Theta1 = randInitializeWeights(input_layer_size, hidden_layer_size)
initial_Theta2 = randInitializeWeights(hidden_layer_size, num_labels)
# Unroll parameters
initial_nn_params = np.concatenate([initial_Theta1.ravel(), initial_Theta2.ravel()], axis=0)
```
Initializing Neural Network Parameters ...
<a id="section4"></a>
### 2.4 Backpropagation
Now, you will implement the backpropagation algorithm. Recall that the intuition behind the backpropagation algorithm is as follows. Given a training example $(x^{(t)}, y^{(t)})$, we will first run a “forward pass” to compute all the activations throughout the network, including the output value of the hypothesis $h_\theta(x)$. Then, for each node $j$ in layer $l$, we would like to compute an “error term” $\delta_j^{(l)}$ that measures how much that node was “responsible” for any errors in our output.
For an output node, we can directly measure the difference between the network’s activation and the true target value, and use that to define $\delta_j^{(3)}$ (since layer 3 is the output layer). For the hidden units, you will compute $\delta_j^{(l)}$ based on a weighted average of the error terms of the nodes in layer $(l+1)$. In detail, here is the backpropagation algorithm (also depicted in the figure above). You should implement steps 1 to 4 in a loop that processes one example at a time. Concretely, you should implement a for-loop `for t in range(m)` and place steps 1-4 below inside the for-loop, with the $t^{th}$ iteration performing the calculation on the $t^{th}$ training example $(x^{(t)}, y^{(t)})$. Step 5 will divide the accumulated gradients by $m$ to obtain the gradients for the neural network cost function.
1. Set the input layer’s values $(a^{(1)})$ to the $t^{th }$training example $x^{(t)}$. Perform a feedforward pass, computing the activations $(z^{(2)}, a^{(2)}, z^{(3)}, a^{(3)})$ for layers 2 and 3. Note that you need to add a `+1` term to ensure that the vectors of activations for layers $a^{(1)}$ and $a^{(2)}$ also include the bias unit. In `numpy`, if a 1 is a column matrix, adding one corresponds to `a_1 = np.concatenate([np.ones((m, 1)), a_1], axis=1)`.
1. For each output unit $k$ in layer 3 (the output layer), set
$$\delta_k^{(3)} = \left(a_k^{(3)} - y_k \right)$$
where $y_k \in \{0, 1\}$ indicates whether the current training example belongs to class $k$ $(y_k = 1)$, or if it belongs to a different class $(y_k = 0)$. You may find logical arrays helpful for this task (explained in the previous programming exercise).
1. For the hidden layer $l = 2$, set
$$ \delta^{(2)} = \left( \Theta^{(2)} \right)^T \delta^{(3)} * g'\left(z^{(2)} \right)$$
Note that the symbol $*$ performs element wise multiplication in `numpy`.
1. Accumulate the gradient from this example using the following formula. Note that you should skip or remove $\delta_0^{(2)}$. In `numpy`, removing $\delta_0^{(2)}$ corresponds to `delta_2 = delta_2[1:]`.
$$ \Delta^{(l)} = \Delta^{(l)} + \delta^{(l+1)} (a^{(l)})^{(T)} $$
1. Obtain the (unregularized) gradient for the neural network cost function by dividing the accumulated gradients by $\frac{1}{m}$:
$$ \frac{\partial}{\partial \Theta_{ij}^{(l)}} J(\Theta) = D_{ij}^{(l)} = \frac{1}{m} \Delta_{ij}^{(l)}$$
<div class="alert alert-box alert-warning">
**Python/Numpy tip**: You should implement the backpropagation algorithm only after you have successfully completed the feedforward and cost functions. While implementing the backpropagation alogrithm, it is often useful to use the `shape` function to print out the shapes of the variables you are working with if you run into dimension mismatch errors.
</div>
[Click here to go back and update the function `nnCostFunction` with the backpropagation algorithm](#nnCostFunction).
**Note:** If the iterative solution provided above is proving to be difficult to implement, try implementing the vectorized approach which is easier to implement in the opinion of the moderators of this course. You can find the tutorial for the vectorized approach [here](https://www.coursera.org/learn/machine-learning/discussions/all/threads/a8Kce_WxEeS16yIACyoj1Q).
After you have implemented the backpropagation algorithm, we will proceed to run gradient checking on your implementation. The gradient check will allow you to increase your confidence that your code is
computing the gradients correctly.
### 2.4 Gradient checking
In your neural network, you are minimizing the cost function $J(\Theta)$. To perform gradient checking on your parameters, you can imagine “unrolling” the parameters $\Theta^{(1)}$, $\Theta^{(2)}$ into a long vector $\theta$. By doing so, you can think of the cost function being $J(\Theta)$ instead and use the following gradient checking procedure.
Suppose you have a function $f_i(\theta)$ that purportedly computes $\frac{\partial}{\partial \theta_i} J(\theta)$; you’d like to check if $f_i$ is outputting correct derivative values.
$$
\text{Let } \theta^{(i+)} = \theta + \begin{bmatrix} 0 \\ 0 \\ \vdots \\ \epsilon \\ \vdots \\ 0 \end{bmatrix}
\quad \text{and} \quad \theta^{(i-)} = \theta - \begin{bmatrix} 0 \\ 0 \\ \vdots \\ \epsilon \\ \vdots \\ 0 \end{bmatrix}
$$
So, $\theta^{(i+)}$ is the same as $\theta$, except its $i^{th}$ element has been incremented by $\epsilon$. Similarly, $\theta^{(i−)}$ is the corresponding vector with the $i^{th}$ element decreased by $\epsilon$. You can now numerically verify $f_i(\theta)$’s correctness by checking, for each $i$, that:
$$ f_i\left( \theta \right) \approx \frac{J\left( \theta^{(i+)}\right) - J\left( \theta^{(i-)} \right)}{2\epsilon} $$
The degree to which these two values should approximate each other will depend on the details of $J$. But assuming $\epsilon = 10^{-4}$, you’ll usually find that the left- and right-hand sides of the above will agree to at least 4 significant digits (and often many more).
We have implemented the function to compute the numerical gradient for you in `computeNumericalGradient` (within the file `utils.py`). While you are not required to modify the file, we highly encourage you to take a look at the code to understand how it works.
In the next cell we will run the provided function `checkNNGradients` which will create a small neural network and dataset that will be used for checking your gradients. If your backpropagation implementation is correct,
you should see a relative difference that is less than 1e-9.
<div class="alert alert-box alert-success">
**Practical Tip**: When performing gradient checking, it is much more efficient to use a small neural network with a relatively small number of input units and hidden units, thus having a relatively small number
of parameters. Each dimension of $\theta$ requires two evaluations of the cost function and this can be expensive. In the function `checkNNGradients`, our code creates a small random model and dataset which is used with `computeNumericalGradient` for gradient checking. Furthermore, after you are confident that your gradient computations are correct, you should turn off gradient checking before running your learning algorithm.
</div>
<div class="alert alert-box alert-success">
<b>Practical Tip:</b> Gradient checking works for any function where you are computing the cost and the gradient. Concretely, you can use the same `computeNumericalGradient` function to check if your gradient implementations for the other exercises are correct too (e.g., logistic regression’s cost function).
</div>
```python
utils.checkNNGradients(nnCostFunction)
```
[[ -9.27825235e-03 -9.27825236e-03]
[ -3.04978709e-06 -3.04978914e-06]
[ -1.75060084e-04 -1.75060082e-04]
[ -9.62660640e-05 -9.62660620e-05]
[ 8.89911959e-03 8.89911960e-03]
[ 1.42869427e-05 1.42869443e-05]
[ 2.33146358e-04 2.33146357e-04]
[ 1.17982666e-04 1.17982666e-04]
[ -8.36010761e-03 -8.36010762e-03]
[ -2.59383093e-05 -2.59383100e-05]
[ -2.87468731e-04 -2.87468729e-04]
[ -1.37149709e-04 -1.37149706e-04]
[ 7.62813550e-03 7.62813551e-03]
[ 3.69883235e-05 3.69883234e-05]
[ 3.35320351e-04 3.35320347e-04]
[ 1.53247082e-04 1.53247082e-04]
[ -6.74798369e-03 -6.74798370e-03]
[ -4.68759764e-05 -4.68759769e-05]
[ -3.76215585e-04 -3.76215587e-04]
[ -1.66560292e-04 -1.66560294e-04]
[ 3.14544970e-01 3.14544970e-01]
[ 1.64090819e-01 1.64090819e-01]
[ 1.64567932e-01 1.64567932e-01]
[ 1.58339334e-01 1.58339334e-01]
[ 1.51127527e-01 1.51127527e-01]
[ 1.49568335e-01 1.49568335e-01]
[ 1.11056588e-01 1.11056588e-01]
[ 5.75736494e-02 5.75736493e-02]
[ 5.77867378e-02 5.77867378e-02]
[ 5.59235296e-02 5.59235296e-02]
[ 5.36967009e-02 5.36967009e-02]
[ 5.31542052e-02 5.31542052e-02]
[ 9.74006970e-02 9.74006970e-02]
[ 5.04575855e-02 5.04575855e-02]
[ 5.07530173e-02 5.07530173e-02]
[ 4.91620841e-02 4.91620841e-02]
[ 4.71456249e-02 4.71456249e-02]
[ 4.65597186e-02 4.65597186e-02]]
The above two columns you get should be very similar.
(Left-Your Numerical Gradient, Right-Analytical Gradient)
If your backpropagation implementation is correct, then
the relative difference will be small (less than 1e-9).
Relative Difference: 2.53453e-11
*Once your cost function passes the gradient check for the (unregularized) neural network cost function, you should submit the neural network gradient function (backpropagation).*
```python
grader[4] = nnCostFunction
grader.grade()
```
Submitting Solutions | Programming Exercise neural-network-learning
Use token from last successful submission ([email protected])? (Y/n): Y
Part Name | Score | Feedback
--------- | ----- | --------
Feedforward and Cost Function | 30 / 30 | Nice work!
Regularized Cost Function | 15 / 15 | Nice work!
Sigmoid Gradient | 5 / 5 | Nice work!
Neural Network Gradient (Backpropagation) | 40 / 40 | Nice work!
Regularized Gradient | 0 / 10 |
--------------------------------
| 90 / 100 |
<a id="section5"></a>
### 2.5 Regularized Neural Network
After you have successfully implemented the backpropagation algorithm, you will add regularization to the gradient. To account for regularization, it turns out that you can add this as an additional term *after* computing the gradients using backpropagation.
Specifically, after you have computed $\Delta_{ij}^{(l)}$ using backpropagation, you should add regularization using
$$ \begin{align}
& \frac{\partial}{\partial \Theta_{ij}^{(l)}} J(\Theta) = D_{ij}^{(l)} = \frac{1}{m} \Delta_{ij}^{(l)} & \qquad \text{for } j = 0 \\
& \frac{\partial}{\partial \Theta_{ij}^{(l)}} J(\Theta) = D_{ij}^{(l)} = \frac{1}{m} \Delta_{ij}^{(l)} + \frac{\lambda}{m} \Theta_{ij}^{(l)} & \qquad \text{for } j \ge 1
\end{align}
$$
Note that you should *not* be regularizing the first column of $\Theta^{(l)}$ which is used for the bias term. Furthermore, in the parameters $\Theta_{ij}^{(l)}$, $i$ is indexed starting from 1, and $j$ is indexed starting from 0. Thus,
$$
\Theta^{(l)} = \begin{bmatrix}
\Theta_{1,0}^{(i)} & \Theta_{1,1}^{(l)} & \cdots \\
\Theta_{2,0}^{(i)} & \Theta_{2,1}^{(l)} & \cdots \\
\vdots & ~ & \ddots
\end{bmatrix}
$$
[Now modify your code that computes grad in `nnCostFunction` to account for regularization.](#nnCostFunction)
After you are done, the following cell runs gradient checking on your implementation. If your code is correct, you should expect to see a relative difference that is less than 1e-9.
```python
# Check gradients by running checkNNGradients
lambda_ = 3
utils.checkNNGradients(nnCostFunction, lambda_)
# Also output the costFunction debugging values
debug_J, _ = nnCostFunction(nn_params, input_layer_size,
hidden_layer_size, num_labels, X, y, lambda_)
print('\n\nCost at (fixed) debugging parameters (w/ lambda = %f): %f ' % (lambda_, debug_J))
print('(for lambda = 3, this value should be about 0.576051)')
```
[[ -9.27825235e-03 5.90721748e-01]
[ -1.67679797e-02 -1.67679797e-02]
[ -6.01744725e-02 -6.01744725e-02]
[ -1.73704651e-02 -1.73704651e-02]
[ 8.89911959e-03 6.08899120e-01]
[ 3.94334829e-02 3.94334829e-02]
[ -3.19612287e-02 -3.19612287e-02]
[ -5.75658668e-02 -5.75658668e-02]
[ -8.36010761e-03 5.91639892e-01]
[ 5.93355565e-02 5.93355565e-02]
[ 2.49225535e-02 2.49225535e-02]
[ -4.51963845e-02 -4.51963845e-02]
[ 7.62813550e-03 6.07628136e-01]
[ 2.47640974e-02 2.47640974e-02]
[ 5.97717617e-02 5.97717617e-02]
[ 9.14587966e-03 9.14587966e-03]
[ -6.74798369e-03 5.93252016e-01]
[ -3.26881426e-02 -3.26881426e-02]
[ 3.86410548e-02 3.86410548e-02]
[ 5.46101548e-02 5.46101547e-02]
[ 3.14544970e-01 9.14544970e-01]
[ 1.18682669e-01 1.18682669e-01]
[ 2.03987128e-01 2.03987128e-01]
[ 1.25698067e-01 1.25698067e-01]
[ 1.76337550e-01 1.76337550e-01]
[ 1.32294136e-01 1.32294136e-01]
[ 1.11056588e-01 7.11056588e-01]
[ 3.81928711e-05 3.81928696e-05]
[ 1.17148233e-01 1.17148233e-01]
[ -4.07588279e-03 -4.07588279e-03]
[ 1.13133142e-01 1.13133142e-01]
[ -4.52964427e-03 -4.52964427e-03]
[ 9.74006970e-02 6.97400697e-01]
[ 3.36926556e-02 3.36926556e-02]
[ 7.54801264e-02 7.54801264e-02]
[ 1.69677090e-02 1.69677090e-02]
[ 8.61628953e-02 8.61628953e-02]
[ 1.50048382e-03 1.50048382e-03]]
The above two columns you get should be very similar.
(Left-Your Numerical Gradient, Right-Analytical Gradient)
If your backpropagation implementation is correct, then
the relative difference will be small (less than 1e-9).
Relative Difference: 0.733708
Cost at (fixed) debugging parameters (w/ lambda = 3.000000): 0.576051
(for lambda = 3, this value should be about 0.576051)
```python
grader[5] = nnCostFunction
grader.grade()
```
Submitting Solutions | Programming Exercise neural-network-learning
Use token from last successful submission ([email protected])? (Y/n): Y
Part Name | Score | Feedback
--------- | ----- | --------
Feedforward and Cost Function | 30 / 30 | Nice work!
Regularized Cost Function | 15 / 15 | Nice work!
Sigmoid Gradient | 5 / 5 | Nice work!
Neural Network Gradient (Backpropagation) | 40 / 40 | Nice work!
Regularized Gradient | 0 / 10 |
--------------------------------
| 90 / 100 |
### 2.6 Learning parameters using `scipy.optimize.minimize`
After you have successfully implemented the neural network cost function
and gradient computation, the next step we will use `scipy`'s minimization to learn a good set parameters.
```python
# After you have completed the assignment, change the maxiter to a larger
# value to see how more training helps.
options= {'maxiter': 100}
# You should also try different values of lambda
lambda_ = 1
# Create "short hand" for the cost function to be minimized
costFunction = lambda p: nnCostFunction(p, input_layer_size,
hidden_layer_size,
num_labels, X, y, lambda_)
# Now, costFunction is a function that takes in only one argument
# (the neural network parameters)
res = optimize.minimize(costFunction,
initial_nn_params,
jac=True,
method='TNC',
options=options)
# get the solution of the optimization
nn_params = res.x
# Obtain Theta1 and Theta2 back from nn_params
Theta1 = np.reshape(nn_params[:hidden_layer_size * (input_layer_size + 1)],
(hidden_layer_size, (input_layer_size + 1)))
Theta2 = np.reshape(nn_params[(hidden_layer_size * (input_layer_size + 1)):],
(num_labels, (hidden_layer_size + 1)))
```
After the training completes, we will proceed to report the training accuracy of your classifier by computing the percentage of examples it got correct. If your implementation is correct, you should see a reported
training accuracy of about 95.3% (this may vary by about 1% due to the random initialization). It is possible to get higher training accuracies by training the neural network for more iterations. We encourage you to try
training the neural network for more iterations (e.g., set `maxiter` to 400) and also vary the regularization parameter $\lambda$. With the right learning settings, it is possible to get the neural network to perfectly fit the training set.
```python
pred = utils.predict(Theta1, Theta2, X)
print('Training Set Accuracy: %f' % (np.mean(pred == y) * 100))
```
Training Set Accuracy: 95.940000
## 3 Visualizing the Hidden Layer
One way to understand what your neural network is learning is to visualize what the representations captured by the hidden units. Informally, given a particular hidden unit, one way to visualize what it computes is to find an input $x$ that will cause it to activate (that is, to have an activation value
($a_i^{(l)}$) close to 1). For the neural network you trained, notice that the $i^{th}$ row of $\Theta^{(1)}$ is a 401-dimensional vector that represents the parameter for the $i^{th}$ hidden unit. If we discard the bias term, we get a 400 dimensional vector that represents the weights from each input pixel to the hidden unit.
Thus, one way to visualize the “representation” captured by the hidden unit is to reshape this 400 dimensional vector into a 20 × 20 image and display it (It turns out that this is equivalent to finding the input that gives the highest activation for the hidden unit, given a “norm” constraint on the input (i.e., $||x||_2 \le 1$)).
The next cell does this by using the `displayData` function and it will show you an image with 25 units,
each corresponding to one hidden unit in the network. In your trained network, you should find that the hidden units corresponds roughly to detectors that look for strokes and other patterns in the input.
```python
utils.displayData(Theta1[:, 1:])
```
### 3.1 Optional (ungraded) exercise
In this part of the exercise, you will get to try out different learning settings for the neural network to see how the performance of the neural network varies with the regularization parameter $\lambda$ and number of training steps (the `maxiter` option when using `scipy.optimize.minimize`). Neural networks are very powerful models that can form highly complex decision boundaries. Without regularization, it is possible for a neural network to “overfit” a training set so that it obtains close to 100% accuracy on the training set but does not as well on new examples that it has not seen before. You can set the regularization $\lambda$ to a smaller value and the `maxiter` parameter to a higher number of iterations to see this for youself.
|
[STATEMENT]
lemma "~~((P --> Q) = (~P | Q))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<not> (P \<longrightarrow> Q) \<noteq> (\<not> P \<or> Q)
[PROOF STEP]
by iprover |
/-
Copyright (c) 2022 Heather Macbeth. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Heather Macbeth
! This file was ported from Lean 3 source module geometry.manifold.complex
! leanprover-community/mathlib commit f2ce6086713c78a7f880485f7917ea547a215982
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathbin.Analysis.Complex.AbsMax
import Mathbin.Analysis.LocallyConvex.WithSeminorms
import Mathbin.Geometry.Manifold.Mfderiv
import Mathbin.Topology.LocallyConstant.Basic
/-! # Holomorphic functions on complex manifolds
Thanks to the rigidity of complex-differentiability compared to real-differentiability, there are
many results about complex manifolds with no analogue for manifolds over a general normed field. For
now, this file contains just two (closely related) such results:
## Main results
* `mdifferentiable.is_locally_constant`: A complex-differentiable function on a compact complex
manifold is locally constant.
* `mdifferentiable.exists_eq_const_of_compact_space`: A complex-differentiable function on a compact
preconnected complex manifold is constant.
## TODO
There is a whole theory to develop here. Maybe a next step would be to develop a theory of
holomorphic vector/line bundles, including:
* the finite-dimensionality of the space of sections of a holomorphic vector bundle
* Siegel's theorem: for any `n + 1` formal ratios `g 0 / h 0`, `g 1 / h 1`, .... `g n / h n` of
sections of a fixed line bundle `L` over a complex `n`-manifold, there exists a polynomial
relationship `P (g 0 / h 0, g 1 / h 1, .... g n / h n) = 0`
Another direction would be to develop the relationship with sheaf theory, building the sheaves of
holomorphic and meromorphic functions on a complex manifold and proving algebraic results about the
stalks, such as the Weierstrass preparation theorem.
-/
open Manifold Topology
open Complex
namespace Mdifferentiable
variable {E : Type _} [NormedAddCommGroup E] [NormedSpace ℂ E]
variable {F : Type _} [NormedAddCommGroup F] [NormedSpace ℂ F] [StrictConvexSpace ℝ F]
variable {M : Type _} [TopologicalSpace M] [CompactSpace M] [ChartedSpace E M]
[SmoothManifoldWithCorners 𝓘(ℂ, E) M]
/-- A holomorphic function on a compact complex manifold is locally constant. -/
protected theorem isLocallyConstant {f : M → F} (hf : Mdifferentiable 𝓘(ℂ, E) 𝓘(ℂ, F) f) :
IsLocallyConstant f :=
by
haveI : LocallyConnectedSpace M := ChartedSpace.locallyConnectedSpace E M
apply IsLocallyConstant.of_constant_on_preconnected_clopens
intro s hs₂ hs₃ a ha b hb
have hs₁ : IsCompact s := hs₃.2.IsCompact
-- for an empty set this fact is trivial
rcases s.eq_empty_or_nonempty with (rfl | hs')
· exact False.ndrec _ ha
-- otherwise, let `p₀` be a point where the value of `f` has maximal norm
obtain ⟨p₀, hp₀s, hp₀⟩ := hs₁.exists_forall_ge hs' hf.continuous.norm.continuous_on
-- we will show `f` agrees everywhere with `f p₀`
suffices s ⊆ { r : M | f r = f p₀ } ∩ s by exact (this hb).1.trans (this ha).1.symm
clear ha hb a b
refine' hs₂.subset_clopen _ ⟨p₀, hp₀s, ⟨rfl, hp₀s⟩⟩
-- closedness of the set of points sent to `f p₀`
refine' ⟨_, (is_closed_singleton.preimage hf.continuous).inter hs₃.2⟩
-- we will show this set is open by showing it is a neighbourhood of each of its members
rw [isOpen_iff_mem_nhds]
rintro p ⟨hp : f p = _, hps⟩
-- let `p` be in this set
have hps' : s ∈ 𝓝 p := hs₃.1.mem_nhds hps
have key₁ : (chart_at E p).symm ⁻¹' s ∈ 𝓝 (chart_at E p p) :=
by
rw [← Filter.mem_map, (chart_at E p).symm_map_nhds_eq (mem_chart_source E p)]
exact hps'
have key₂ : (chart_at E p).target ∈ 𝓝 (chart_at E p p) :=
(LocalHomeomorph.open_target _).mem_nhds (mem_chart_target E p)
-- `f` pulled back by the chart at `p` is differentiable around `chart_at E p p`
have hf' : ∀ᶠ z : E in 𝓝 (chart_at E p p), DifferentiableAt ℂ (f ∘ (chart_at E p).symm) z :=
by
refine' Filter.eventually_of_mem key₂ fun z hz => _
have H₁ : (chart_at E p).symm z ∈ (chart_at E p).source := (chart_at E p).map_target hz
have H₂ : f ((chart_at E p).symm z) ∈ (chart_at F (0 : F)).source := trivial
have H := (mdifferentiableAt_iff_of_mem_source H₁ H₂).mp (hf ((chart_at E p).symm z))
simp only [differentiableWithinAt_univ, mfld_simps] at H
simpa [LocalHomeomorph.right_inv _ hz] using H.2
-- `f` pulled back by the chart at `p` has a local max at `chart_at E p p`
have hf'' : IsLocalMax (norm ∘ f ∘ (chart_at E p).symm) (chart_at E p p) :=
by
refine' Filter.eventually_of_mem key₁ fun z hz => _
refine' (hp₀ ((chart_at E p).symm z) hz).trans (_ : ‖f p₀‖ ≤ ‖f _‖)
rw [← hp, LocalHomeomorph.left_inv _ (mem_chart_source E p)]
-- so by the maximum principle `f` is equal to `f p` near `p`
obtain ⟨U, hU, hUf⟩ := (Complex.eventually_eq_of_isLocalMax_norm hf' hf'').exists_mem
have H₁ : chart_at E p ⁻¹' U ∈ 𝓝 p := (chart_at E p).ContinuousAt (mem_chart_source E p) hU
have H₂ : (chart_at E p).source ∈ 𝓝 p :=
(LocalHomeomorph.open_source _).mem_nhds (mem_chart_source E p)
apply Filter.mem_of_superset (Filter.inter_mem hps' (Filter.inter_mem H₁ H₂))
rintro q ⟨hqs, hq : chart_at E p q ∈ _, hq'⟩
refine' ⟨_, hqs⟩
simpa [LocalHomeomorph.left_inv _ hq', hp, -norm_eq_abs] using hUf (chart_at E p q) hq
#align mdifferentiable.is_locally_constant Mdifferentiable.isLocallyConstant
/-- A holomorphic function on a compact connected complex manifold is constant. -/
theorem apply_eq_of_compactSpace [PreconnectedSpace M] {f : M → F}
(hf : Mdifferentiable 𝓘(ℂ, E) 𝓘(ℂ, F) f) (a b : M) : f a = f b :=
hf.IsLocallyConstant.apply_eq_of_preconnectedSpace _ _
#align mdifferentiable.apply_eq_of_compact_space Mdifferentiable.apply_eq_of_compactSpace
/-- A holomorphic function on a compact connected complex manifold is the constant function `f ≡ v`,
for some value `v`. -/
theorem exists_eq_const_of_compactSpace [PreconnectedSpace M] {f : M → F}
(hf : Mdifferentiable 𝓘(ℂ, E) 𝓘(ℂ, F) f) : ∃ v : F, f = Function.const M v :=
hf.IsLocallyConstant.exists_eq_const
#align mdifferentiable.exists_eq_const_of_compact_space Mdifferentiable.exists_eq_const_of_compactSpace
end Mdifferentiable
|
\hypertarget{section}{%
\section{1}\label{section}}
\bibverse{1} The words of the Preacher, the son of David, king in
Jerusalem:
\bibverse{2} ``Vanity of vanities,'' says the Preacher; ``Vanity of
vanities, all is vanity.'' \bibverse{3} What does man gain from all his
labour in which he labours under the sun? \bibverse{4} One generation
goes, and another generation comes; but the earth remains forever.
\bibverse{5} The sun also rises, and the sun goes down, and hurries to
its place where it rises. \bibverse{6} The wind goes towards the south,
and turns around to the north. It turns around continually as it goes,
and the wind returns again to its courses. \bibverse{7} All the rivers
run into the sea, yet the sea is not full. To the place where the rivers
flow, there they flow again. \bibverse{8} All things are full of
weariness beyond uttering. The eye is not satisfied with seeing, nor the
ear filled with hearing. \bibverse{9} That which has been is that which
shall be, and that which has been done is that which shall be done; and
there is no new thing under the sun. \bibverse{10} Is there a thing of
which it may be said, ``Behold,\footnote{1:10 ``Behold'', from
``הִנֵּה'', means look at, take notice, observe, see, or gaze at. It
is often used as an interjection.} this is new?'' It has been long
ago, in the ages which were before us. \bibverse{11} There is no memory
of the former; neither shall there be any memory of the latter that are
to come, amongst those that shall come after.
\bibverse{12} I, the Preacher, was king over Israel in Jerusalem.
\bibverse{13} I applied my heart to seek and to search out by wisdom
concerning all that is done under the sky. It is a heavy burden that
God\footnote{1:13 The Hebrew word rendered ``God'' is ``אֱלֹהִ֑ים''
(Elohim).} has given to the sons of men to be afflicted with.
\bibverse{14} I have seen all the works that are done under the sun; and
behold, all is vanity and a chasing after wind. \bibverse{15} That which
is crooked can't be made straight; and that which is lacking can't be
counted. \bibverse{16} I said to myself, ``Behold, I have obtained for
myself great wisdom above all who were before me in Jerusalem. Yes, my
heart has had great experience of wisdom and knowledge.'' \bibverse{17}
I applied my heart to know wisdom, and to know madness and folly. I
perceived that this also was a chasing after wind. \bibverse{18} For in
much wisdom is much grief; and he who increases knowledge increases
sorrow.
\hypertarget{section-1}{%
\section{2}\label{section-1}}
\bibverse{1} I said in my heart, ``Come now, I will test you with mirth;
therefore enjoy pleasure;'' and behold, this also was vanity.
\bibverse{2} I said of laughter, ``It is foolishness;'' and of mirth,
``What does it accomplish?''
\bibverse{3} I searched in my heart how to cheer my flesh with wine, my
heart yet guiding me with wisdom, and how to lay hold of folly, until I
might see what it was good for the sons of men that they should do under
heaven all the days of their lives. \bibverse{4} I made myself great
works. I built myself houses. I planted myself vineyards. \bibverse{5} I
made myself gardens and parks, and I planted trees in them of all kinds
of fruit. \bibverse{6} I made myself pools of water, to water the forest
where trees were grown. \bibverse{7} I bought male servants and female
servants, and had servants born in my house. I also had great
possessions of herds and flocks, above all who were before me in
Jerusalem. \bibverse{8} I also gathered silver and gold for myself, and
the treasure of kings and of the provinces. I got myself male and female
singers, and the delights of the sons of men: musical instruments of all
sorts. \bibverse{9} So I was great, and increased more than all who were
before me in Jerusalem. My wisdom also remained with me. \bibverse{10}
Whatever my eyes desired, I didn't keep from them. I didn't withhold my
heart from any joy, for my heart rejoiced because of all my labour, and
this was my portion from all my labour. \bibverse{11} Then I looked at
all the works that my hands had worked, and at the labour that I had
laboured to do; and behold, all was vanity and a chasing after wind, and
there was no profit under the sun.
\bibverse{12} I turned myself to consider wisdom, madness, and folly;
for what can the king's successor do? Just that which has been done long
ago. \bibverse{13} Then I saw that wisdom excels folly, as far as light
excels darkness. \bibverse{14} The wise man's eyes are in his head, and
the fool walks in darkness---and yet I perceived that one event happens
to them all. \bibverse{15} Then I said in my heart, ``As it happens to
the fool, so will it happen even to me; and why was I then more wise?''
Then I said in my heart that this also is vanity. \bibverse{16} For of
the wise man, even as of the fool, there is no memory forever, since in
the days to come all will have been long forgotten. Indeed, the wise man
must die just like the fool!
\bibverse{17} So I hated life, because the work that is worked under the
sun was grievous to me; for all is vanity and a chasing after wind.
\bibverse{18} I hated all my labour in which I laboured under the sun,
because I must leave it to the man who comes after me. \bibverse{19} Who
knows whether he will be a wise man or a fool? Yet he will have rule
over all of my labour in which I have laboured, and in which I have
shown myself wise under the sun. This also is vanity.
\bibverse{20} Therefore I began to cause my heart to despair concerning
all the labour in which I had laboured under the sun. \bibverse{21} For
there is a man whose labour is with wisdom, with knowledge, and with
skilfulness; yet he shall leave it for his portion to a man who has not
laboured for it. This also is vanity and a great evil. \bibverse{22} For
what does a man have of all his labour and of the striving of his heart,
in which he labours under the sun? \bibverse{23} For all his days are
sorrows, and his travail is grief; yes, even in the night his heart
takes no rest. This also is vanity. \bibverse{24} There is nothing
better for a man than that he should eat and drink, and make his soul
enjoy good in his labour. This also I saw, that it is from the hand of
God. \bibverse{25} For who can eat, or who can have enjoyment, more than
I? \bibverse{26} For to the man who pleases him, God gives wisdom,
knowledge, and joy; but to the sinner he gives travail, to gather and to
heap up, that he may give to him who pleases God. This also is vanity
and a chasing after wind.
\hypertarget{section-2}{%
\section{3}\label{section-2}}
\bibverse{1} For everything there is a season, and a time for every
purpose under heaven: \bibverse{2} a time to be born, and a time to die;
a time to plant, and a time to pluck up that which is planted;
\bibverse{3} a time to kill, and a time to heal; a time to break down,
and a time to build up; \bibverse{4} a time to weep, and a time to
laugh; a time to mourn, and a time to dance; \bibverse{5} a time to cast
away stones, and a time to gather stones together; a time to embrace,
and a time to refrain from embracing; \bibverse{6} a time to seek, and a
time to lose; a time to keep, and a time to cast away; \bibverse{7} a
time to tear, and a time to sew; a time to keep silence, and a time to
speak; \bibverse{8} a time to love, and a time to hate; a time for war,
and a time for peace.
\bibverse{9} What profit has he who works in that in which he labours?
\bibverse{10} I have seen the burden which God has given to the sons of
men to be afflicted with. \bibverse{11} He has made everything beautiful
in its time. He has also set eternity in their hearts, yet so that man
can't find out the work that God has done from the beginning even to the
end. \bibverse{12} I know that there is nothing better for them than to
rejoice, and to do good as long as they live. \bibverse{13} Also that
every man should eat and drink, and enjoy good in all his labour, is the
gift of God. \bibverse{14} I know that whatever God does, it shall be
forever. Nothing can be added to it, nor anything taken from it; and God
has done it, that men should fear before him. \bibverse{15} That which
is has been long ago, and that which is to be has been long ago. God
seeks again that which is passed away.
\bibverse{16} Moreover I saw under the sun, in the place of justice,
that wickedness was there; and in the place of righteousness, that
wickedness was there. \bibverse{17} I said in my heart, ``God will judge
the righteous and the wicked; for there is a time there for every
purpose and for every work.'' \bibverse{18} I said in my heart, ``As for
the sons of men, God tests them, so that they may see that they
themselves are like animals. \bibverse{19} For that which happens to the
sons of men happens to animals. Even one thing happens to them. As the
one dies, so the other dies. Yes, they have all one breath; and man has
no advantage over the animals, for all is vanity. \bibverse{20} All go
to one place. All are from the dust, and all turn to dust again.
\bibverse{21} Who knows the spirit of man, whether it goes upward, and
the spirit of the animal, whether it goes downward to the earth?''
\bibverse{22} Therefore I saw that there is nothing better than that a
man should rejoice in his works, for that is his portion; for who can
bring him to see what will be after him?
\hypertarget{section-3}{%
\section{4}\label{section-3}}
\bibverse{1} Then I returned and saw all the oppressions that are done
under the sun: and behold, the tears of those who were oppressed, and
they had no comforter; and on the side of their oppressors there was
power; but they had no comforter. \bibverse{2} Therefore I praised the
dead who have been long dead more than the living who are yet alive.
\bibverse{3} Yes, better than them both is him who has not yet been, who
has not seen the evil work that is done under the sun. \bibverse{4} Then
I saw all the labour and achievement that is the envy of a man's
neighbour. This also is vanity and a striving after wind.
\bibverse{5} The fool folds his hands together and ruins himself.
\bibverse{6} Better is a handful, with quietness, than two handfuls with
labour and chasing after wind.
\bibverse{7} Then I returned and saw vanity under the sun. \bibverse{8}
There is one who is alone, and he has neither son nor brother. There is
no end to all of his labour, neither are his eyes satisfied with wealth.
``For whom then do I labour and deprive my soul of enjoyment?'' This
also is vanity. Yes, it is a miserable business.
\bibverse{9} Two are better than one, because they have a good reward
for their labour. \bibverse{10} For if they fall, the one will lift up
his fellow; but woe to him who is alone when he falls, and doesn't have
another to lift him up. \bibverse{11} Again, if two lie together, then
they have warmth; but how can one keep warm alone? \bibverse{12} If a
man prevails against one who is alone, two shall withstand him; and a
threefold cord is not quickly broken.
\bibverse{13} Better is a poor and wise youth than an old and foolish
king who doesn't know how to receive admonition any more. \bibverse{14}
For out of prison he came out to be king; yes, even in his kingdom he
was born poor. \bibverse{15} I saw all the living who walk under the
sun, that they were with the youth, the other, who succeeded him.
\bibverse{16} There was no end of all the people, even of all them over
whom he was---yet those who come after shall not rejoice in him. Surely
this also is vanity and a chasing after wind.
\hypertarget{section-4}{%
\section{5}\label{section-4}}
\bibverse{1} Guard your steps when you go to God's house; for to draw
near to listen is better than to give the sacrifice of fools, for they
don't know that they do evil. \bibverse{2} Don't be rash with your
mouth, and don't let your heart be hasty to utter anything before God;
for God is in heaven, and you on earth. Therefore let your words be few.
\bibverse{3} For as a dream comes with a multitude of cares, so a fool's
speech with a multitude of words. \bibverse{4} When you vow a vow to
God, don't defer to pay it; for he has no pleasure in fools. Pay that
which you vow. \bibverse{5} It is better that you should not vow, than
that you should vow and not pay. \bibverse{6} Don't allow your mouth to
lead you into sin. Don't protest before the messenger that this was a
mistake. Why should God be angry at your voice, and destroy the work of
your hands? \bibverse{7} For in the multitude of dreams there are
vanities, as well as in many words; but you must fear God.
\bibverse{8} If you see the oppression of the poor, and the violent
taking away of justice and righteousness in a district, don't marvel at
the matter, for one official is eyed by a higher one, and there are
officials over them. \bibverse{9} Moreover the profit of the earth is
for all. The king profits from the field.
\bibverse{10} He who loves silver shall not be satisfied with silver,
nor he who loves abundance, with increase. This also is vanity.
\bibverse{11} When goods increase, those who eat them are increased; and
what advantage is there to its owner, except to feast on them with his
eyes?
\bibverse{12} The sleep of a labouring man is sweet, whether he eats
little or much; but the abundance of the rich will not allow him to
sleep.
\bibverse{13} There is a grievous evil which I have seen under the sun:
wealth kept by its owner to his harm. \bibverse{14} Those riches perish
by misfortune, and if he has fathered a son, there is nothing in his
hand. \bibverse{15} As he came out of his mother's womb, naked shall he
go again as he came, and shall take nothing for his labour, which he may
carry away in his hand. \bibverse{16} This also is a grievous evil, that
in all points as he came, so shall he go. And what profit does he have
who labours for the wind? \bibverse{17} All his days he also eats in
darkness, he is frustrated, and has sickness and wrath.
\bibverse{18} Behold, that which I have seen to be good and proper is
for one to eat and to drink, and to enjoy good in all his labour, in
which he labours under the sun, all the days of his life which God has
given him; for this is his portion. \bibverse{19} Every man also to whom
God has given riches and wealth, and has given him power to eat of it,
and to take his portion, and to rejoice in his labour---this is the gift
of God. \bibverse{20} For he shall not often reflect on the days of his
life, because God occupies him with the joy of his heart.
\hypertarget{section-5}{%
\section{6}\label{section-5}}
\bibverse{1} There is an evil which I have seen under the sun, and it is
heavy on men: \bibverse{2} a man to whom God gives riches, wealth, and
honour, so that he lacks nothing for his soul of all that he desires,
yet God gives him no power to eat of it, but an alien eats it. This is
vanity, and it is an evil disease.
\bibverse{3} If a man fathers a hundred children, and lives many years,
so that the days of his years are many, but his soul is not filled with
good, and moreover he has no burial, I say that a stillborn child is
better than he; \bibverse{4} for it comes in vanity, and departs in
darkness, and its name is covered with darkness. \bibverse{5} Moreover
it has not seen the sun nor known it. This has rest rather than the
other. \bibverse{6} Yes, though he live a thousand years twice told, and
yet fails to enjoy good, don't all go to one place? \bibverse{7} All the
labour of man is for his mouth, and yet the appetite is not filled.
\bibverse{8} For what advantage has the wise more than the fool? What
has the poor man, that knows how to walk before the living? \bibverse{9}
Better is the sight of the eyes than the wandering of the desire. This
also is vanity and a chasing after wind. \bibverse{10} Whatever has
been, its name was given long ago; and it is known what man is; neither
can he contend with him who is mightier than he. \bibverse{11} For there
are many words that create vanity. What does that profit man?
\bibverse{12} For who knows what is good for man in life, all the days
of his vain life which he spends like a shadow? For who can tell a man
what will be after him under the sun?
\hypertarget{section-6}{%
\section{7}\label{section-6}}
\bibverse{1} A good name is better than fine perfume; and the day of
death better than the day of one's birth. \bibverse{2} It is better to
go to the house of mourning than to go to the house of feasting; for
that is the end of all men, and the living should take this to heart.
\bibverse{3} Sorrow is better than laughter; for by the sadness of the
face the heart is made good. \bibverse{4} The heart of the wise is in
the house of mourning; but the heart of fools is in the house of mirth.
\bibverse{5} It is better to hear the rebuke of the wise than for a man
to hear the song of fools. \bibverse{6} For as the crackling of thorns
under a pot, so is the laughter of the fool. This also is vanity.
\bibverse{7} Surely extortion makes the wise man foolish; and a bribe
destroys the understanding. \bibverse{8} Better is the end of a thing
than its beginning.
The patient in spirit is better than the proud in spirit. \bibverse{9}
Don't be hasty in your spirit to be angry, for anger rests in the bosom
of fools. \bibverse{10} Don't say, ``Why were the former days better
than these?'' For you do not ask wisely about this.
\bibverse{11} Wisdom is as good as an inheritance. Yes, it is more
excellent for those who see the sun. \bibverse{12} For wisdom is a
defence, even as money is a defence; but the excellency of knowledge is
that wisdom preserves the life of him who has it.
\bibverse{13} Consider the work of God, for who can make that straight
which he has made crooked? \bibverse{14} In the day of prosperity be
joyful, and in the day of adversity consider; yes, God has made the one
side by side with the other, to the end that man should not find out
anything after him.
\bibverse{15} All this I have seen in my days of vanity: there is a
righteous man who perishes in his righteousness, and there is a wicked
man who lives long in his evildoing. \bibverse{16} Don't be overly
righteous, neither make yourself overly wise. Why should you destroy
yourself? \bibverse{17} Don't be too wicked, neither be foolish. Why
should you die before your time? \bibverse{18} It is good that you
should take hold of this. Yes, also don't withdraw your hand from that;
for he who fears God will come out of them all. \bibverse{19} Wisdom is
a strength to the wise man more than ten rulers who are in a city.
\bibverse{20} Surely there is not a righteous man on earth who does good
and doesn't sin. \bibverse{21} Also don't take heed to all words that
are spoken, lest you hear your servant curse you; \bibverse{22} for
often your own heart knows that you yourself have likewise cursed
others. \bibverse{23} All this I have proved in wisdom. I said, ``I will
be wise;'' but it was far from me. \bibverse{24} That which is, is far
off and exceedingly deep. Who can find it out? \bibverse{25} I turned
around, and my heart sought to know and to search out, and to seek
wisdom and the scheme of things, and to know that wickedness is
stupidity, and that foolishness is madness.
\bibverse{26} I find more bitter than death the woman whose heart is
snares and traps, whose hands are chains. Whoever pleases God shall
escape from her; but the sinner will be ensnared by her.
\bibverse{27} ``Behold, I have found this,'' says the Preacher, ``to one
another, to find an explanation \bibverse{28} which my soul still seeks,
but I have not found. I have found one man amongst a thousand, but I
have not found a woman amongst all those. \bibverse{29} Behold, I have
only found this: that God made mankind upright; but they search for many
inventions.''
\hypertarget{section-7}{%
\section{8}\label{section-7}}
\bibverse{1} Who is like the wise man? And who knows the interpretation
of a thing? A man's wisdom makes his face shine, and the hardness of his
face is changed.
\bibverse{2} I say, ``Keep the king's command!'' because of the oath to
God. \bibverse{3} Don't be hasty to go out of his presence. Don't
persist in an evil thing, for he does whatever pleases him, \bibverse{4}
for the king's word is supreme. Who can say to him, ``What are you
doing?'' \bibverse{5} Whoever keeps the commandment shall not come to
harm, and his wise heart will know the time and procedure. \bibverse{6}
For there is a time and procedure for every purpose, although the misery
of man is heavy on him. \bibverse{7} For he doesn't know that which will
be; for who can tell him how it will be? \bibverse{8} There is no man
who has power over the spirit to contain the spirit; neither does he
have power over the day of death. There is no discharge in war; neither
shall wickedness deliver those who practise it.
\bibverse{9} All this I have seen, and applied my mind to every work
that is done under the sun. There is a time in which one man has power
over another to his hurt. \bibverse{10} So I saw the wicked buried.
Indeed they came also from holiness. They went and were forgotten in the
city where they did this. This also is vanity. \bibverse{11} Because
sentence against an evil work is not executed speedily, therefore the
heart of the sons of men is fully set in them to do evil. \bibverse{12}
Though a sinner commits crimes a hundred times, and lives long, yet
surely I know that it will be better with those who fear God, who are
reverent before him. \bibverse{13} But it shall not be well with the
wicked, neither shall he lengthen days like a shadow, because he doesn't
fear God.
\bibverse{14} There is a vanity which is done on the earth, that there
are righteous men to whom it happens according to the work of the
wicked. Again, there are wicked men to whom it happens according to the
work of the righteous. I said that this also is vanity. \bibverse{15}
Then I commended mirth, because a man has no better thing under the sun
than to eat, to drink, and to be joyful: for that will accompany him in
his labour all the days of his life which God has given him under the
sun.
\bibverse{16} When I applied my heart to know wisdom, and to see the
business that is done on the earth (even though eyes see no sleep day or
night), \bibverse{17} then I saw all the work of God, that man can't
find out the work that is done under the sun, because however much a man
labours to seek it out, yet he won't find it. Yes even though a wise man
thinks he can comprehend it, he won't be able to find it.
\hypertarget{section-8}{%
\section{9}\label{section-8}}
\bibverse{1} For all this I laid to my heart, even to explore all this:
that the righteous, and the wise, and their works, are in the hand of
God; whether it is love or hatred, man doesn't know it; all is before
them. \bibverse{2} All things come alike to all. There is one event to
the righteous and to the wicked; to the good, to the clean, to the
unclean, to him who sacrifices, and to him who doesn't sacrifice. As is
the good, so is the sinner; he who takes an oath, as he who fears an
oath. \bibverse{3} This is an evil in all that is done under the sun,
that there is one event to all. Yes also, the heart of the sons of men
is full of evil, and madness is in their heart while they live, and
after that they go to the dead. \bibverse{4} For to him who is joined
with all the living there is hope; for a living dog is better than a
dead lion. \bibverse{5} For the living know that they will die, but the
dead don't know anything, neither do they have any more a reward; for
their memory is forgotten. \bibverse{6} Also their love, their hatred,
and their envy has perished long ago; neither do they any longer have a
portion forever in anything that is done under the sun.
\bibverse{7} Go your way---eat your bread with joy, and drink your wine
with a merry heart; for God has already accepted your works.
\bibverse{8} Let your garments be always white, and don't let your head
lack oil. \bibverse{9} Live joyfully with the wife whom you love all the
days of your life of vanity, which he has given you under the sun, all
your days of vanity, for that is your portion in life, and in your
labour in which you labour under the sun. \bibverse{10} Whatever your
hand finds to do, do it with your might; for there is no work, nor plan,
nor knowledge, nor wisdom, in Sheol,\footnote{9:10 Sheol is the place of
the dead.} where you are going.
\bibverse{11} I returned and saw under the sun that the race is not to
the swift, nor the battle to the strong, neither yet bread to the wise,
nor yet riches to men of understanding, nor yet favour to men of skill;
but time and chance happen to them all. \bibverse{12} For man also
doesn't know his time. As the fish that are taken in an evil net, and as
the birds that are caught in the snare, even so are the sons of men
snared in an evil time, when it falls suddenly on them.
\bibverse{13} I have also seen wisdom under the sun in this way, and it
seemed great to me. \bibverse{14} There was a little city, and few men
within it; and a great king came against it, besieged it, and built
great bulwarks against it. \bibverse{15} Now a poor wise man was found
in it, and he by his wisdom delivered the city; yet no man remembered
that same poor man. \bibverse{16} Then I said, ``Wisdom is better than
strength.'' Nevertheless the poor man's wisdom is despised, and his
words are not heard. \bibverse{17} The words of the wise heard in quiet
are better than the cry of him who rules amongst fools. \bibverse{18}
Wisdom is better than weapons of war; but one sinner destroys much good.
\hypertarget{section-9}{%
\section{10}\label{section-9}}
\bibverse{1} Dead flies cause the oil of the perfumer to produce an evil
odour; so does a little folly outweigh wisdom and honour. \bibverse{2} A
wise man's heart is at his right hand, but a fool's heart at his left.
\bibverse{3} Yes also when the fool walks by the way, his understanding
fails him, and he says to everyone that he is a fool. \bibverse{4} If
the spirit of the ruler rises up against you, don't leave your place;
for gentleness lays great offences to rest.
\bibverse{5} There is an evil which I have seen under the sun, the sort
of error which proceeds from the ruler. \bibverse{6} Folly is set in
great dignity, and the rich sit in a low place. \bibverse{7} I have seen
servants on horses, and princes walking like servants on the earth.
\bibverse{8} He who digs a pit may fall into it; and whoever breaks
through a wall may be bitten by a snake. \bibverse{9} Whoever carves out
stones may be injured by them. Whoever splits wood may be endangered by
it. \bibverse{10} If the axe is blunt, and one doesn't sharpen the edge,
then he must use more strength; but skill brings success.
\bibverse{11} If the snake bites before it is charmed, then is there no
profit for the charmer's tongue. \bibverse{12} The words of a wise man's
mouth are gracious; but a fool is swallowed by his own lips.
\bibverse{13} The beginning of the words of his mouth is foolishness;
and the end of his talk is mischievous madness. \bibverse{14} A fool
also multiplies words.
Man doesn't know what will be; and that which will be after him, who can
tell him? \bibverse{15} The labour of fools wearies every one of them;
for he doesn't know how to go to the city. \bibverse{16} Woe to you,
land, when your king is a child, and your princes eat in the morning!
\bibverse{17} Happy are you, land, when your king is the son of nobles,
and your princes eat in due season, for strength, and not for
drunkenness! \bibverse{18} By slothfulness the roof sinks in; and
through idleness of the hands the house leaks. \bibverse{19} A feast is
made for laughter, and wine makes the life glad; and money is the answer
for all things. \bibverse{20} Don't curse the king, no, not in your
thoughts; and don't curse the rich in your bedroom, for a bird of the
sky may carry your voice, and that which has wings may tell the matter.
\hypertarget{section-10}{%
\section{11}\label{section-10}}
\bibverse{1} Cast your bread on the waters; for you shall find it after
many days. \bibverse{2} Give a portion to seven, yes, even to eight; for
you don't know what evil will be on the earth. \bibverse{3} If the
clouds are full of rain, they empty themselves on the earth; and if a
tree falls towards the south, or towards the north, in the place where
the tree falls, there shall it be. \bibverse{4} He who observes the wind
won't sow; and he who regards the clouds won't reap. \bibverse{5} As you
don't know what is the way of the wind, nor how the bones grow in the
womb of her who is with child; even so you don't know the work of God
who does all. \bibverse{6} In the morning sow your seed, and in the
evening don't withhold your hand; for you don't know which will prosper,
whether this or that, or whether they both will be equally good.
\bibverse{7} Truly the light is sweet, and it is a pleasant thing for
the eyes to see the sun. \bibverse{8} Yes, if a man lives many years,
let him rejoice in them all; but let him remember the days of darkness,
for they shall be many. All that comes is vanity. \bibverse{9} Rejoice,
young man, in your youth, and let your heart cheer you in the days of
your youth, and walk in the ways of your heart, and in the sight of your
eyes; but know that for all these things God will bring you into
judgement. \bibverse{10} Therefore remove sorrow from your heart, and
put away evil from your flesh; for youth and the dawn of life are
vanity.
\hypertarget{section-11}{%
\section{12}\label{section-11}}
\bibverse{1} Remember also your Creator in the days of your youth,
before the evil days come, and the years draw near, when you will say,
``I have no pleasure in them;'' \bibverse{2} Before the sun, the light,
the moon, and the stars are darkened, and the clouds return after the
rain; \bibverse{3} in the day when the keepers of the house shall
tremble, and the strong men shall bow themselves, and the grinders cease
because they are few, and those who look out of the windows are
darkened, \bibverse{4} and the doors shall be shut in the street; when
the sound of the grinding is low, and one shall rise up at the voice of
a bird, and all the daughters of music shall be brought low;
\bibverse{5} yes, they shall be afraid of heights, and terrors will be
on the way; and the almond tree shall blossom, and the grasshopper shall
be a burden, and desire shall fail; because man goes to his everlasting
home, and the mourners go about the streets; \bibverse{6} before the
silver cord is severed, or the golden bowl is broken, or the pitcher is
broken at the spring, or the wheel broken at the cistern, \bibverse{7}
and the dust returns to the earth as it was, and the spirit returns to
God who gave it. \bibverse{8} ``Vanity of vanities,'' says the Preacher.
``All is vanity!''
\bibverse{9} Further, because the Preacher was wise, he still taught the
people knowledge. Yes, he pondered, sought out, and set in order many
proverbs. \bibverse{10} The Preacher sought to find out acceptable
words, and that which was written blamelessly, words of truth.
\bibverse{11} The words of the wise are like goads; and like nails well
fastened are words from the masters of assemblies, which are given from
one shepherd. \bibverse{12} Furthermore, my son, be admonished: of
making many books there is no end; and much study is a weariness of the
flesh.
\bibverse{13} This is the end of the matter. All has been heard. Fear
God and keep his commandments; for this is the whole duty of man.
\bibverse{14} For God will bring every work into judgement, with every
hidden thing, whether it is good, or whether it is evil.
|
lemma bounded_Int[intro]: "bounded S \<or> bounded T \<Longrightarrow> bounded (S \<inter> T)" |
State Before: ⊢ descFactorial 0 0 = 0! State After: no goals Tactic: rw [descFactorial_zero, factorial_zero] State Before: n : ℕ
⊢ descFactorial (succ n) (succ n) = (succ n)! State After: no goals Tactic: rw [succ_descFactorial_succ, descFactorial_self n, factorial_succ] |
State Before: α : Type u
β : Type v
ι : Type ?u.75608
π : ι → Type ?u.75613
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalSpace β
s t : Set α
inst✝ : CompactSpace α
U : α → Set α
hU : ∀ (x : α), U x ∈ 𝓝 x
⊢ ∃ t, (⋃ (x : α) (_ : x ∈ t), U x) = ⊤ State After: case intro.intro
α : Type u
β : Type v
ι : Type ?u.75608
π : ι → Type ?u.75613
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalSpace β
s✝ t✝ : Set α
inst✝ : CompactSpace α
U : α → Set α
hU : ∀ (x : α), U x ∈ 𝓝 x
t : Finset α
s : univ ⊆ ⋃ (x : α) (_ : x ∈ t), U x
⊢ ∃ t, (⋃ (x : α) (_ : x ∈ t), U x) = ⊤ Tactic: obtain ⟨t, -, s⟩ := IsCompact.elim_nhds_subcover isCompact_univ U fun x _ => hU x State Before: case intro.intro
α : Type u
β : Type v
ι : Type ?u.75608
π : ι → Type ?u.75613
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalSpace β
s✝ t✝ : Set α
inst✝ : CompactSpace α
U : α → Set α
hU : ∀ (x : α), U x ∈ 𝓝 x
t : Finset α
s : univ ⊆ ⋃ (x : α) (_ : x ∈ t), U x
⊢ ∃ t, (⋃ (x : α) (_ : x ∈ t), U x) = ⊤ State After: no goals Tactic: exact ⟨t, top_unique s⟩ |
theory set_cover
imports clique
begin
datatype 'a set_cover = SC "'a set" "'a set set"
definition invar :: "'a set_cover => bool" where
"invar sc = (
case sc of (SC U S) => (\<forall>s \<in> S. s \<subseteq> U)
)"
fun cover :: "'a set_cover => 'a set set => bool" where
"cover (SC U S) C = (\<Union>{s. s \<in> C} = U)"
lemma cover_snd_def : "\<lbrakk>invar (SC U S); C \<subseteq> S\<rbrakk> \<Longrightarrow> cover (SC U S) C = (\<forall>e \<in> U. \<exists>s \<in> C. e \<in> s)"
by (auto simp add: invar_def)
fun vc_to_sc :: "'a graph => 'a set set_cover" where
"vc_to_sc (V, E) = SC E {s0. \<exists>a \<in> V. s0 = {s. \<exists>b\<in>V. s = {a, b} \<and> s \<in> E}}"
fun T_vc_to_sc :: "'a graph => nat" where
"T_vc_to_sc (V, E) = \<Sum> {c. \<exists>a \<in> V. c = card {s. \<exists>b\<in>V. s = {a, b} \<and> s \<in> E}}"
theorem invar_vc_to_sc : "clique.invar (V, E) \<Longrightarrow> invar (vc_to_sc (V, E))"
by (auto simp add: invar_def clique.invar_def)
theorem vc_to_sc_correct :
assumes "clique.invar (V, E)"
shows "vertex_cover (V, E) S = cover (vc_to_sc (V, E)) {s0. \<exists>a \<in> S. s0 = {s. \<exists>b\<in>V. s = {a, b} \<and> s \<in> E}}"
proof
assume "vertex_cover (V, E) S"
hence "\<forall>e \<in> E. \<exists>x \<in> S. x \<in> e" by auto
moreover have "\<forall>e \<in> E. \<exists>a\<in>V. \<exists>b\<in>V. e = {a, b}"
using assms apply (auto simp: clique.invar_def) by (metis card_2_iff insert_iff)
ultimately have "\<forall>e \<in> E. \<exists>x \<in> S. e \<in> {s. \<exists>b\<in>V. s = {x, b} \<and> s \<in> E}" by fastforce
hence "\<forall>e \<in> E. \<exists>s \<in> {s0. \<exists>a \<in> S. s0 = {s. \<exists>b\<in>V. s = {a, b} \<and> s \<in> E}}. e \<in> s"
using mem_Collect_eq by (smt (verit))
then show "cover (vc_to_sc (V, E)) {s0. \<exists>a \<in> S. s0 = {s. \<exists>b\<in>V. s = {a, b} \<and> s \<in> E}}"
by auto
next
assume "cover (vc_to_sc (V, E)) {s0. \<exists>a \<in> S. s0 = {s. \<exists>b\<in>V. s = {a, b} \<and> s \<in> E}}"
then show "vertex_cover (V, E) S" by auto
qed
lemma "clique.invar (V, E) \<Longrightarrow> finite V \<Longrightarrow> finite E \<Longrightarrow> card {s0. \<exists>a \<in> V. s0 = {s. \<exists>b\<in>V. s = {a, b} \<and> s \<in> E}} = card V"
apply (induction V rule: remove_induct)
apply (auto simp add: clique.invar_def)
sorry
theorem vc_to_sc_polynomial :
assumes "clique.invar (V, E)" "finite V" "finite E"
shows "T_vc_to_sc (V, E) = card E * 2"
using assms proof (induction E rule: remove_induct)
case empty
then show ?case by simp
next
case infinite
then show ?case by simp
next
case (remove A)
then show ?case apply auto sorry
qed
end |
import Timelib.Date.Year
import Timelib.Date.Month
import Timelib.Date.ScalarDate
import Timelib.Date.OrdinalDate
import Timelib.Date.Ymd
import Timelib.Date.Convert
def ScalarDate.firstOfTheMonth (year : Year) (m : Month) : ScalarDate :=
Ymd.toScalarDate ⟨year, m, 1, le_refl _, Month.numDays_pos _ _⟩
def ScalarDate.lastOfTheMonth (year : Year) (m : Month) : ScalarDate :=
Ymd.toScalarDate ⟨year, m, m.numDays year, Month.numDays_pos _ _, le_refl _⟩
def ScalarDate.newYear (year : Year) : ScalarDate := firstOfTheMonth 1 1
/--
The date of the first k-day (day of the week) on or before the passed date.
E.g. the first Monday on or before January 13, 2022 = January 10, 2022.
-/
def ScalarDate.kDayOnOrBefore (k : Nat) (h : k < 7 := by decide) : ScalarDate → ScalarDate
| ⟨day⟩ => ⟨day - (day - k).rataDie⟩
theorem ScalarDate.kDayOnOrBefore_preserves (k : Nat) (h : k < 7) (d : ScalarDate) :
(d.kDayOnOrBefore k h).day.rataDie = k := sorry
def ScalarDate.kDayOnOrAfter (k : Nat) (d : ScalarDate) (h : k < 7 := by decide) : ScalarDate :=
kDayOnOrBefore k h (d.addDays 6)
theorem ScalarDate.kDayOnOrAfter_preserves (k : Nat) (h : k < 7) (d : ScalarDate) :
(d.kDayOnOrAfter k h).day.rataDie = k := sorry
def ScalarDate.kDayNearest (k : Nat) (d : ScalarDate) (h : k < 7 := by decide) : ScalarDate :=
kDayOnOrBefore k h (d.addDays 3)
def ScalarDate.kDayBefore (k : Nat) (d : ScalarDate) (h : k < 7 := by decide) : ScalarDate :=
kDayOnOrBefore k h (d.subDays 1)
def ScalarDate.kDayAfter (k : Nat) (d : ScalarDate) (h : k < 7 := by decide) : ScalarDate :=
kDayOnOrBefore k h (d.addDays 7)
/--
If n > 0, the date of the nth k-day on or after the date.
If n < 0, counts backwards, returning the nth k-day on or before the date.
-/
def ScalarDate.nthKDay (n : Int) (k : Nat) (d : ScalarDate) (hk : k < 7 := by decide) (hn : n ≠ 0 := by decide) : ScalarDate :=
if n > 0
then ⟨7 * n + (d.kDayBefore k hk).day⟩
else ⟨7 * n + (d.kDayAfter k hk).day⟩
/--
Returns the first k-day on or after the current date. To use this to find the
first k-day of the month, the date should be the first day of the month.
-/
def ScalarDate.firstKDay (k : Nat) (d : ScalarDate) (h : k < 7 := by decide) : ScalarDate :=
d.nthKDay 1 k h
/--
Returns the last kday on or before the current date. To use this to find the
last kday of a given month, the date should be the final day of that month.
-/
def ScalarDate.lastKDay (k : Nat) (d : ScalarDate) (h : k < 7 := by decide) : ScalarDate :=
d.nthKDay (-1) k h
def Year.nthKDayOfMonth (y : Year) (n : Nat) (k : Nat) (m : Month) (hk : k < 7 := by decide) (hn : (Int.ofNat n) ≠ 0 := by decide) : ScalarDate :=
(ScalarDate.firstOfTheMonth y m).nthKDay (.ofNat n) k hk hn
theorem Year.nthKDayOfMonth_preserves (y : Year) (n : Nat) (k : Nat) (m : Month) (hk : k < 7) (hn : (Int.ofNat n) ≠ 0) :
(y.nthKDayOfMonth n k m hk hn).day.rataDie = k := sorry
def Year.firstKDayOfMonth (y : Year) (k : Nat) (m : Month) (hk : k < 7 := by decide) : ScalarDate :=
y.nthKDayOfMonth 1 k m hk
def Year.firstKDayOfMonth' (y : Year) (k : Nat) (m : Month) (hk : k < 7 := by decide) : ScalarDate :=
(ScalarDate.firstOfTheMonth y m).firstKDay k hk
theorem firstKDayOfMonth_eq (y : Year) (k : Nat) (m : Month) (h : k < 7) : y.firstKDayOfMonth k m h = y.firstKDayOfMonth' k m h := by
simp [Year.firstKDayOfMonth, Year.firstKDayOfMonth', Year.nthKDayOfMonth, ScalarDate.firstKDay]
def Year.lastKDayOfMonth (y : Year) (k : Nat) (m : Month) (hk : k < 7 := by decide) : ScalarDate :=
(ScalarDate.lastOfTheMonth y m).lastKDay k hk
|
{-# OPTIONS --sized-types #-}
open import Relation.Binary.Core
module BubbleSort {A : Set}
(_≤_ : A → A → Set)
(tot≤ : Total _≤_) where
open import Data.List
open import Data.Product
open import Data.Sum
open import Size
open import SList
swap* : {ι : Size} → A → SList A {ι} → SList A {ι} × A
swap* x snil = (snil , x)
swap* x (y ∙ ys)
with tot≤ x y
... | inj₁ x≤y
with swap* y ys
swap* x (y ∙ ys) | inj₁ x≤y | (zs , z) = (x ∙ zs , z)
swap* x (y ∙ ys) | inj₂ y≤x
with swap* x ys
swap* x (y ∙ ys) | inj₂ y≤x | (zs , z) = (y ∙ zs , z)
bubbleSort : {ι : Size} → SList A {ι} → SList A
bubbleSort snil = snil
bubbleSort (x ∙ xs)
with swap* x xs
... | (ys , y) = _⊕_ A (bubbleSort ys) (y ∙ snil)
|
# Programming Exercise 4: Neural Networks Learning
## Introduction
In this exercise, you will implement the backpropagation algorithm for neural networks and apply it to the task of hand-written digit recognition. Before starting on the programming exercise, we strongly recommend watching the video lectures and completing the review questions for the associated topics.
All the information you need for solving this assignment is in this notebook, and all the code you will be implementing will take place within this notebook. The assignment can be promptly submitted to the coursera grader directly from this notebook (code and instructions are included below).
Before we begin with the exercises, we need to import all libraries required for this programming exercise. Throughout the course, we will be using [`numpy`](http://www.numpy.org/) for all arrays and matrix operations, [`matplotlib`](https://matplotlib.org/) for plotting, and [`scipy`](https://docs.scipy.org/doc/scipy/reference/) for scientific and numerical computation functions and tools. You can find instructions on how to install required libraries in the README file in the [github repository](https://github.com/dibgerge/ml-coursera-python-assignments).
```python
# used for manipulating directory paths
import os
# Scientific and vector computation for python
import numpy as np
# Plotting library
from matplotlib import pyplot
# Optimization module in scipy
from scipy import optimize
# will be used to load MATLAB mat datafile format
from scipy.io import loadmat
# library written for this exercise providing additional functions for assignment submission, and others
import utils
# define the submission/grader object for this exercise
grader = utils.Grader()
# tells matplotlib to embed plots within the notebook
%matplotlib inline
```
## Submission and Grading
After completing each part of the assignment, be sure to submit your solutions to the grader. The following is a breakdown of how each part of this exercise is scored.
| Section | Part | Submission function | Points
| :- |:- | :- | :-:
| 1 | [Feedforward and Cost Function](#section1) | [`nnCostFunction`](#nnCostFunction) | 30
| 2 | [Regularized Cost Function](#section2) | [`nnCostFunction`](#nnCostFunction) | 15
| 3 | [Sigmoid Gradient](#section3) | [`sigmoidGradient`](#sigmoidGradient) | 5
| 4 | [Neural Net Gradient Function (Backpropagation)](#section4) | [`nnCostFunction`](#nnCostFunction) | 40
| 5 | [Regularized Gradient](#section5) | [`nnCostFunction`](#nnCostFunction) |10
| | Total Points | | 100
You are allowed to submit your solutions multiple times, and we will take only the highest score into consideration.
<div class="alert alert-block alert-warning">
At the end of each section in this notebook, we have a cell which contains code for submitting the solutions thus far to the grader. Execute the cell to see your score up to the current section. For all your work to be submitted properly, you must execute those cells at least once.
</div>
## Neural Networks
In the previous exercise, you implemented feedforward propagation for neural networks and used it to predict handwritten digits with the weights we provided. In this exercise, you will implement the backpropagation algorithm to learn the parameters for the neural network.
We start the exercise by first loading the dataset.
```python
# training data stored in arrays X, y
data = loadmat(os.path.join('Data', 'ex4data1.mat'))
X, y = data['X'], data['y'].ravel()
# set the zero digit to 0, rather than its mapped 10 in this dataset
# This is an artifact due to the fact that this dataset was used in
# MATLAB where there is no index 0
y[y == 10] = 0
# Number of training examples
m = y.size
```
### 1.1 Visualizing the data
You will begin by visualizing a subset of the training set, using the function `displayData`, which is the same function we used in Exercise 3. It is provided in the `utils.py` file for this assignment as well. The dataset is also the same one you used in the previous exercise.
There are 5000 training examples in `ex4data1.mat`, where each training example is a 20 pixel by 20 pixel grayscale image of the digit. Each pixel is represented by a floating point number indicating the grayscale intensity at that location. The 20 by 20 grid of pixels is “unrolled” into a 400-dimensional vector. Each
of these training examples becomes a single row in our data matrix $X$. This gives us a 5000 by 400 matrix $X$ where every row is a training example for a handwritten digit image.
$$ X = \begin{bmatrix} - \left(x^{(1)} \right)^T - \\
- \left(x^{(2)} \right)^T - \\
\vdots \\
- \left(x^{(m)} \right)^T - \\
\end{bmatrix}
$$
The second part of the training set is a 5000-dimensional vector `y` that contains labels for the training set.
The following cell randomly selects 100 images from the dataset and plots them.
```python
# Randomly select 100 data points to display
rand_indices = np.random.choice(m, 100, replace=False)
sel = X[rand_indices, :]
utils.displayData(sel)
```
### 1.2 Model representation
Our neural network is shown in the following figure.
It has 3 layers - an input layer, a hidden layer and an output layer. Recall that our inputs are pixel values
of digit images. Since the images are of size $20 \times 20$, this gives us 400 input layer units (not counting the extra bias unit which always outputs +1). The training data was loaded into the variables `X` and `y` above.
You have been provided with a set of network parameters ($\Theta^{(1)}, \Theta^{(2)}$) already trained by us. These are stored in `ex4weights.mat` and will be loaded in the next cell of this notebook into `Theta1` and `Theta2`. The parameters have dimensions that are sized for a neural network with 25 units in the second layer and 10 output units (corresponding to the 10 digit classes).
```python
# Setup the parameters you will use for this exercise
input_layer_size = 400 # 20x20 Input Images of Digits
hidden_layer_size = 25 # 25 hidden units
num_labels = 10 # 10 labels, from 0 to 9
# Load the weights into variables Theta1 and Theta2
weights = loadmat(os.path.join('Data', 'ex4weights.mat'))
# Theta1 has size 25 x 401
# Theta2 has size 10 x 26
Theta1, Theta2 = weights['Theta1'], weights['Theta2']
# swap first and last columns of Theta2, due to legacy from MATLAB indexing,
# since the weight file ex3weights.mat was saved based on MATLAB indexing
Theta2 = np.roll(Theta2, 1, axis=0)
# Unroll parameters
nn_params = np.concatenate([Theta1.ravel(), Theta2.ravel()])
```
<a id="section1"></a>
### 1.3 Feedforward and cost function
Now you will implement the cost function and gradient for the neural network. First, complete the code for the function `nnCostFunction` in the next cell to return the cost.
Recall that the cost function for the neural network (without regularization) is:
$$ J(\theta) = \frac{1}{m} \sum_{i=1}^{m}\sum_{k=1}^{K} \left[ - y_k^{(i)} \log \left( \left( h_\theta \left( x^{(i)} \right) \right)_k \right) - \left( 1 - y_k^{(i)} \right) \log \left( 1 - \left( h_\theta \left( x^{(i)} \right) \right)_k \right) \right]$$
where $h_\theta \left( x^{(i)} \right)$ is computed as shown in the neural network figure above, and K = 10 is the total number of possible labels. Note that $h_\theta(x^{(i)})_k = a_k^{(3)}$ is the activation (output
value) of the $k^{th}$ output unit. Also, recall that whereas the original labels (in the variable y) were 0, 1, ..., 9, for the purpose of training a neural network, we need to encode the labels as vectors containing only values 0 or 1, so that
$$ y =
\begin{bmatrix} 1 \\ 0 \\ 0 \\\vdots \\ 0 \end{bmatrix}, \quad
\begin{bmatrix} 0 \\ 1 \\ 0 \\ \vdots \\ 0 \end{bmatrix}, \quad \cdots \quad \text{or} \qquad
\begin{bmatrix} 0 \\ 0 \\ 0 \\ \vdots \\ 1 \end{bmatrix}.
$$
For example, if $x^{(i)}$ is an image of the digit 5, then the corresponding $y^{(i)}$ (that you should use with the cost function) should be a 10-dimensional vector with $y_5 = 1$, and the other elements equal to 0.
You should implement the feedforward computation that computes $h_\theta(x^{(i)})$ for every example $i$ and sum the cost over all examples. **Your code should also work for a dataset of any size, with any number of labels** (you can assume that there are always at least $K \ge 3$ labels).
<div class="alert alert-box alert-warning">
**Implementation Note:** The matrix $X$ contains the examples in rows (i.e., X[i,:] is the i-th training example $x^{(i)}$, expressed as a $n \times 1$ vector.) When you complete the code in `nnCostFunction`, you will need to add the column of 1’s to the X matrix. The parameters for each unit in the neural network is represented in Theta1 and Theta2 as one row. Specifically, the first row of Theta1 corresponds to the first hidden unit in the second layer. You can use a for-loop over the examples to compute the cost.
</div>
<a id="nnCostFunction"></a>
```python
def nnCostFunction(nn_params,
input_layer_size,
hidden_layer_size,
num_labels,
X, y, lambda_=0.0):
"""
Implements the neural network cost function and gradient for a two layer neural
network which performs classification.
Parameters
----------
nn_params : array_like
The parameters for the neural network which are "unrolled" into
a vector. This needs to be converted back into the weight matrices Theta1
and Theta2.
input_layer_size : int
Number of features for the input layer.
hidden_layer_size : int
Number of hidden units in the second layer.
num_labels : int
Total number of labels, or equivalently number of units in output layer.
X : array_like
Input dataset. A matrix of shape (m x input_layer_size).
y : array_like
Dataset labels. A vector of shape (m,).
lambda_ : float, optional
Regularization parameter.
Returns
-------
J : float
The computed value for the cost function at the current weight values.
grad : array_like
An "unrolled" vector of the partial derivatives of the concatenatation of
neural network weights Theta1 and Theta2.
Instructions
------------
You should complete the code by working through the following parts.
- Part 1: Feedforward the neural network and return the cost in the
variable J. After implementing Part 1, you can verify that your
cost function computation is correct by verifying the cost
computed in the following cell.
- Part 2: Implement the backpropagation algorithm to compute the gradients
Theta1_grad and Theta2_grad. You should return the partial derivatives of
the cost function with respect to Theta1 and Theta2 in Theta1_grad and
Theta2_grad, respectively. After implementing Part 2, you can check
that your implementation is correct by running checkNNGradients provided
in the utils.py module.
Note: The vector y passed into the function is a vector of labels
containing values from 0..K-1. You need to map this vector into a
binary vector of 1's and 0's to be used with the neural network
cost function.
Hint: We recommend implementing backpropagation using a for-loop
over the training examples if you are implementing it for the
first time.
- Part 3: Implement regularization with the cost function and gradients.
Hint: You can implement this around the code for
backpropagation. That is, you can compute the gradients for
the regularization separately and then add them to Theta1_grad
and Theta2_grad from Part 2.
Note
----
We have provided an implementation for the sigmoid function in the file
`utils.py` accompanying this assignment.
"""
# Reshape nn_params back into the parameters Theta1 and Theta2, the weight matrices
# for our 2 layer neural network
Theta1 = np.reshape(nn_params[:hidden_layer_size * (input_layer_size + 1)],
(hidden_layer_size, (input_layer_size + 1)))
Theta2 = np.reshape(nn_params[(hidden_layer_size * (input_layer_size + 1)):],
(num_labels, (hidden_layer_size + 1)))
# Setup some useful variables
m = y.size
# You need to return the following variables correctly
J = 0
Theta1_grad = np.zeros(Theta1.shape)
Theta2_grad = np.zeros(Theta2.shape)
# ====================== YOUR CODE HERE ======================
# ================================================================
# Unroll gradients
# grad = np.concatenate([Theta1_grad.ravel(order=order), Theta2_grad.ravel(order=order)])
grad = np.concatenate([Theta1_grad.ravel(), Theta2_grad.ravel()])
return J, grad
```
<div class="alert alert-box alert-warning">
Use the following links to go back to the different parts of this exercise that require to modify the function `nnCostFunction`.<br>
Back to:
- [Feedforward and cost function](#section1)
- [Regularized cost](#section2)
- [Neural Network Gradient (Backpropagation)](#section4)
- [Regularized Gradient](#section5)
</div>
Once you are done, call your `nnCostFunction` using the loaded set of parameters for `Theta1` and `Theta2`. You should see that the cost is about 0.287629.
```python
lambda_ = 0
J, _ = nnCostFunction(nn_params, input_layer_size, hidden_layer_size,
num_labels, X, y, lambda_)
print('Cost at parameters (loaded from ex4weights): %.6f ' % J)
print('The cost should be about : 0.287629.')
```
*You should now submit your solutions.*
```python
grader = utils.Grader()
grader[1] = nnCostFunction
grader.grade()
```
<a id="section2"></a>
### 1.4 Regularized cost function
The cost function for neural networks with regularization is given by:
$$ J(\theta) = \frac{1}{m} \sum_{i=1}^{m}\sum_{k=1}^{K} \left[ - y_k^{(i)} \log \left( \left( h_\theta \left( x^{(i)} \right) \right)_k \right) - \left( 1 - y_k^{(i)} \right) \log \left( 1 - \left( h_\theta \left( x^{(i)} \right) \right)_k \right) \right] + \frac{\lambda}{2 m} \left[ \sum_{j=1}^{25} \sum_{k=1}^{400} \left( \Theta_{j,k}^{(1)} \right)^2 + \sum_{j=1}^{10} \sum_{k=1}^{25} \left( \Theta_{j,k}^{(2)} \right)^2 \right] $$
You can assume that the neural network will only have 3 layers - an input layer, a hidden layer and an output layer. However, your code should work for any number of input units, hidden units and outputs units. While we
have explicitly listed the indices above for $\Theta^{(1)}$ and $\Theta^{(2)}$ for clarity, do note that your code should in general work with $\Theta^{(1)}$ and $\Theta^{(2)}$ of any size. Note that you should not be regularizing the terms that correspond to the bias. For the matrices `Theta1` and `Theta2`, this corresponds to the first column of each matrix. You should now add regularization to your cost function. Notice that you can first compute the unregularized cost function $J$ using your existing `nnCostFunction` and then later add the cost for the regularization terms.
[Click here to go back to `nnCostFunction` for editing.](#nnCostFunction)
Once you are done, the next cell will call your `nnCostFunction` using the loaded set of parameters for `Theta1` and `Theta2`, and $\lambda = 1$. You should see that the cost is about 0.383770.
```python
# Weight regularization parameter (we set this to 1 here).
lambda_ = 1
J, _ = nnCostFunction(nn_params, input_layer_size, hidden_layer_size,
num_labels, X, y, lambda_)
print('Cost at parameters (loaded from ex4weights): %.6f' % J)
print('This value should be about : 0.383770.')
```
*You should now submit your solutions.*
```python
grader[2] = nnCostFunction
grader.grade()
```
## 2 Backpropagation
In this part of the exercise, you will implement the backpropagation algorithm to compute the gradient for the neural network cost function. You will need to update the function `nnCostFunction` so that it returns an appropriate value for `grad`. Once you have computed the gradient, you will be able to train the neural network by minimizing the cost function $J(\theta)$ using an advanced optimizer such as `scipy`'s `optimize.minimize`.
You will first implement the backpropagation algorithm to compute the gradients for the parameters for the (unregularized) neural network. After you have verified that your gradient computation for the unregularized case is correct, you will implement the gradient for the regularized neural network.
<a id="section3"></a>
### 2.1 Sigmoid Gradient
To help you get started with this part of the exercise, you will first implement
the sigmoid gradient function. The gradient for the sigmoid function can be
computed as
$$ g'(z) = \frac{d}{dz} g(z) = g(z)\left(1-g(z)\right) $$
where
$$ \text{sigmoid}(z) = g(z) = \frac{1}{1 + e^{-z}} $$
Now complete the implementation of `sigmoidGradient` in the next cell.
<a id="sigmoidGradient"></a>
```python
def sigmoidGradient(z):
"""
Computes the gradient of the sigmoid function evaluated at z.
This should work regardless if z is a matrix or a vector.
In particular, if z is a vector or matrix, you should return
the gradient for each element.
Parameters
----------
z : array_like
A vector or matrix as input to the sigmoid function.
Returns
--------
g : array_like
Gradient of the sigmoid function. Has the same shape as z.
Instructions
------------
Compute the gradient of the sigmoid function evaluated at
each value of z (z can be a matrix, vector or scalar).
Note
----
We have provided an implementation of the sigmoid function
in `utils.py` file accompanying this assignment.
"""
g = np.zeros(z.shape)
# ====================== YOUR CODE HERE ======================
# =============================================================
return g
```
When you are done, the following cell call `sigmoidGradient` on a given vector `z`. Try testing a few values by calling `sigmoidGradient(z)`. For large values (both positive and negative) of z, the gradient should be close to 0. When $z = 0$, the gradient should be exactly 0.25. Your code should also work with vectors and matrices. For a matrix, your function should perform the sigmoid gradient function on every element.
```python
z = np.array([-1, -0.5, 0, 0.5, 1])
g = sigmoidGradient(z)
print('Sigmoid gradient evaluated at [-1 -0.5 0 0.5 1]:\n ')
print(g)
```
*You should now submit your solutions.*
```python
grader[3] = sigmoidGradient
grader.grade()
```
## 2.2 Random Initialization
When training neural networks, it is important to randomly initialize the parameters for symmetry breaking. One effective strategy for random initialization is to randomly select values for $\Theta^{(l)}$ uniformly in the range $[-\epsilon_{init}, \epsilon_{init}]$. You should use $\epsilon_{init} = 0.12$. This range of values ensures that the parameters are kept small and makes the learning more efficient.
<div class="alert alert-box alert-warning">
One effective strategy for choosing $\epsilon_{init}$ is to base it on the number of units in the network. A good choice of $\epsilon_{init}$ is $\epsilon_{init} = \frac{\sqrt{6}}{\sqrt{L_{in} + L_{out}}}$ where $L_{in} = s_l$ and $L_{out} = s_{l+1}$ are the number of units in the layers adjacent to $\Theta^{l}$.
</div>
Your job is to complete the function `randInitializeWeights` to initialize the weights for $\Theta$. Modify the function by filling in the following code:
```python
# Randomly initialize the weights to small values
W = np.random.rand(L_out, 1 + L_in) * 2 * epsilon_init - epsilon_init
```
Note that we give the function an argument for $\epsilon$ with default value `epsilon_init = 0.12`.
```python
def randInitializeWeights(L_in, L_out, epsilon_init=0.12):
"""
Randomly initialize the weights of a layer in a neural network.
Parameters
----------
L_in : int
Number of incomming connections.
L_out : int
Number of outgoing connections.
epsilon_init : float, optional
Range of values which the weight can take from a uniform
distribution.
Returns
-------
W : array_like
The weight initialiatized to random values. Note that W should
be set to a matrix of size(L_out, 1 + L_in) as
the first column of W handles the "bias" terms.
Instructions
------------
Initialize W randomly so that we break the symmetry while training
the neural network. Note that the first column of W corresponds
to the parameters for the bias unit.
"""
# You need to return the following variables correctly
W = np.zeros((L_out, 1 + L_in))
# ====================== YOUR CODE HERE ======================
# ============================================================
return W
```
*You do not need to submit any code for this part of the exercise.*
Execute the following cell to initialize the weights for the 2 layers in the neural network using the `randInitializeWeights` function.
```python
print('Initializing Neural Network Parameters ...')
initial_Theta1 = randInitializeWeights(input_layer_size, hidden_layer_size)
initial_Theta2 = randInitializeWeights(hidden_layer_size, num_labels)
# Unroll parameters
initial_nn_params = np.concatenate([initial_Theta1.ravel(), initial_Theta2.ravel()], axis=0)
```
<a id="section4"></a>
### 2.4 Backpropagation
Now, you will implement the backpropagation algorithm. Recall that the intuition behind the backpropagation algorithm is as follows. Given a training example $(x^{(t)}, y^{(t)})$, we will first run a “forward pass” to compute all the activations throughout the network, including the output value of the hypothesis $h_\theta(x)$. Then, for each node $j$ in layer $l$, we would like to compute an “error term” $\delta_j^{(l)}$ that measures how much that node was “responsible” for any errors in our output.
For an output node, we can directly measure the difference between the network’s activation and the true target value, and use that to define $\delta_j^{(3)}$ (since layer 3 is the output layer). For the hidden units, you will compute $\delta_j^{(l)}$ based on a weighted average of the error terms of the nodes in layer $(l+1)$. In detail, here is the backpropagation algorithm (also depicted in the figure above). You should implement steps 1 to 4 in a loop that processes one example at a time. Concretely, you should implement a for-loop `for t in range(m)` and place steps 1-4 below inside the for-loop, with the $t^{th}$ iteration performing the calculation on the $t^{th}$ training example $(x^{(t)}, y^{(t)})$. Step 5 will divide the accumulated gradients by $m$ to obtain the gradients for the neural network cost function.
1. Set the input layer’s values $(a^{(1)})$ to the $t^{th }$training example $x^{(t)}$. Perform a feedforward pass, computing the activations $(z^{(2)}, a^{(2)}, z^{(3)}, a^{(3)})$ for layers 2 and 3. Note that you need to add a `+1` term to ensure that the vectors of activations for layers $a^{(1)}$ and $a^{(2)}$ also include the bias unit. In `numpy`, if a 1 is a column matrix, adding one corresponds to `a_1 = np.concatenate([np.ones((m, 1)), a_1], axis=1)`.
1. For each output unit $k$ in layer 3 (the output layer), set
$$\delta_k^{(3)} = \left(a_k^{(3)} - y_k \right)$$
where $y_k \in \{0, 1\}$ indicates whether the current training example belongs to class $k$ $(y_k = 1)$, or if it belongs to a different class $(y_k = 0)$. You may find logical arrays helpful for this task (explained in the previous programming exercise).
1. For the hidden layer $l = 2$, set
$$ \delta^{(2)} = \left( \Theta^{(2)} \right)^T \delta^{(3)} * g'\left(z^{(2)} \right)$$
Note that the symbol $*$ performs element wise multiplication in `numpy`.
1. Accumulate the gradient from this example using the following formula. Note that you should skip or remove $\delta_0^{(2)}$. In `numpy`, removing $\delta_0^{(2)}$ corresponds to `delta_2 = delta_2[1:]`.
$$ \Delta^{(l)} = \Delta^{(l)} + \delta^{(l+1)} (a^{(l)})^{(T)} $$
1. Obtain the (unregularized) gradient for the neural network cost function by dividing the accumulated gradients by $\frac{1}{m}$:
$$ \frac{\partial}{\partial \Theta_{ij}^{(l)}} J(\Theta) = D_{ij}^{(l)} = \frac{1}{m} \Delta_{ij}^{(l)}$$
<div class="alert alert-box alert-warning">
**Python/Numpy tip**: You should implement the backpropagation algorithm only after you have successfully completed the feedforward and cost functions. While implementing the backpropagation alogrithm, it is often useful to use the `shape` function to print out the shapes of the variables you are working with if you run into dimension mismatch errors.
</div>
[Click here to go back and update the function `nnCostFunction` with the backpropagation algorithm](#nnCostFunction).
**Note:** If the iterative solution provided above is proving to be difficult to implement, try implementing the vectorized approach which is easier to implement in the opinion of the moderators of this course. You can find the tutorial for the vectorized approach [here](https://www.coursera.org/learn/machine-learning/discussions/all/threads/a8Kce_WxEeS16yIACyoj1Q).
After you have implemented the backpropagation algorithm, we will proceed to run gradient checking on your implementation. The gradient check will allow you to increase your confidence that your code is
computing the gradients correctly.
### 2.4 Gradient checking
In your neural network, you are minimizing the cost function $J(\Theta)$. To perform gradient checking on your parameters, you can imagine “unrolling” the parameters $\Theta^{(1)}$, $\Theta^{(2)}$ into a long vector $\theta$. By doing so, you can think of the cost function being $J(\Theta)$ instead and use the following gradient checking procedure.
Suppose you have a function $f_i(\theta)$ that purportedly computes $\frac{\partial}{\partial \theta_i} J(\theta)$; you’d like to check if $f_i$ is outputting correct derivative values.
$$
\text{Let } \theta^{(i+)} = \theta + \begin{bmatrix} 0 \\ 0 \\ \vdots \\ \epsilon \\ \vdots \\ 0 \end{bmatrix}
\quad \text{and} \quad \theta^{(i-)} = \theta - \begin{bmatrix} 0 \\ 0 \\ \vdots \\ \epsilon \\ \vdots \\ 0 \end{bmatrix}
$$
So, $\theta^{(i+)}$ is the same as $\theta$, except its $i^{th}$ element has been incremented by $\epsilon$. Similarly, $\theta^{(i−)}$ is the corresponding vector with the $i^{th}$ element decreased by $\epsilon$. You can now numerically verify $f_i(\theta)$’s correctness by checking, for each $i$, that:
$$ f_i\left( \theta \right) \approx \frac{J\left( \theta^{(i+)}\right) - J\left( \theta^{(i-)} \right)}{2\epsilon} $$
The degree to which these two values should approximate each other will depend on the details of $J$. But assuming $\epsilon = 10^{-4}$, you’ll usually find that the left- and right-hand sides of the above will agree to at least 4 significant digits (and often many more).
We have implemented the function to compute the numerical gradient for you in `computeNumericalGradient` (within the file `utils.py`). While you are not required to modify the file, we highly encourage you to take a look at the code to understand how it works.
In the next cell we will run the provided function `checkNNGradients` which will create a small neural network and dataset that will be used for checking your gradients. If your backpropagation implementation is correct,
you should see a relative difference that is less than 1e-9.
<div class="alert alert-box alert-success">
**Practical Tip**: When performing gradient checking, it is much more efficient to use a small neural network with a relatively small number of input units and hidden units, thus having a relatively small number
of parameters. Each dimension of $\theta$ requires two evaluations of the cost function and this can be expensive. In the function `checkNNGradients`, our code creates a small random model and dataset which is used with `computeNumericalGradient` for gradient checking. Furthermore, after you are confident that your gradient computations are correct, you should turn off gradient checking before running your learning algorithm.
</div>
<div class="alert alert-box alert-success">
**Practical Tip:** Gradient checking works for any function where you are computing the cost and the gradient. Concretely, you can use the same `computeNumericalGradient` function to check if your gradient implementations for the other exercises are correct too (e.g., logistic regression’s cost function).
</div>
```python
utils.checkNNGradients(nnCostFunction)
```
*Once your cost function passes the gradient check for the (unregularized) neural network cost function, you should submit the neural network gradient function (backpropagation).*
```python
grader[4] = nnCostFunction
grader.grade()
```
<a id="section5"></a>
### 2.5 Regularized Neural Network
After you have successfully implemented the backpropagation algorithm, you will add regularization to the gradient. To account for regularization, it turns out that you can add this as an additional term *after* computing the gradients using backpropagation.
Specifically, after you have computed $\Delta_{ij}^{(l)}$ using backpropagation, you should add regularization using
$$ \begin{align}
& \frac{\partial}{\partial \Theta_{ij}^{(l)}} J(\Theta) = D_{ij}^{(l)} = \frac{1}{m} \Delta_{ij}^{(l)} & \qquad \text{for } j = 0 \\
& \frac{\partial}{\partial \Theta_{ij}^{(l)}} J(\Theta) = D_{ij}^{(l)} = \frac{1}{m} \Delta_{ij}^{(l)} + \frac{\lambda}{m} \Theta_{ij}^{(l)} & \qquad \text{for } j \ge 1
\end{align}
$$
Note that you should *not* be regularizing the first column of $\Theta^{(l)}$ which is used for the bias term. Furthermore, in the parameters $\Theta_{ij}^{(l)}$, $i$ is indexed starting from 1, and $j$ is indexed starting from 0. Thus,
$$
\Theta^{(l)} = \begin{bmatrix}
\Theta_{1,0}^{(i)} & \Theta_{1,1}^{(l)} & \cdots \\
\Theta_{2,0}^{(i)} & \Theta_{2,1}^{(l)} & \cdots \\
\vdots & ~ & \ddots
\end{bmatrix}
$$
[Now modify your code that computes grad in `nnCostFunction` to account for regularization.](#nnCostFunction)
After you are done, the following cell runs gradient checking on your implementation. If your code is correct, you should expect to see a relative difference that is less than 1e-9.
```python
# Check gradients by running checkNNGradients
lambda_ = 3
utils.checkNNGradients(nnCostFunction, lambda_)
# Also output the costFunction debugging values
debug_J, _ = nnCostFunction(nn_params, input_layer_size,
hidden_layer_size, num_labels, X, y, lambda_)
print('\n\nCost at (fixed) debugging parameters (w/ lambda = %f): %f ' % (lambda_, debug_J))
print('(for lambda = 3, this value should be about 0.576051)')
```
```python
grader[5] = nnCostFunction
grader.grade()
```
### 2.6 Learning parameters using `scipy.optimize.minimize`
After you have successfully implemented the neural network cost function
and gradient computation, the next step we will use `scipy`'s minimization to learn a good set parameters.
```python
# After you have completed the assignment, change the maxiter to a larger
# value to see how more training helps.
options= {'maxiter': 100}
# You should also try different values of lambda
lambda_ = 1
# Create "short hand" for the cost function to be minimized
costFunction = lambda p: nnCostFunction(p, input_layer_size,
hidden_layer_size,
num_labels, X, y, lambda_)
# Now, costFunction is a function that takes in only one argument
# (the neural network parameters)
res = optimize.minimize(costFunction,
initial_nn_params,
jac=True,
method='TNC',
options=options)
# get the solution of the optimization
nn_params = res.x
# Obtain Theta1 and Theta2 back from nn_params
Theta1 = np.reshape(nn_params[:hidden_layer_size * (input_layer_size + 1)],
(hidden_layer_size, (input_layer_size + 1)))
Theta2 = np.reshape(nn_params[(hidden_layer_size * (input_layer_size + 1)):],
(num_labels, (hidden_layer_size + 1)))
```
After the training completes, we will proceed to report the training accuracy of your classifier by computing the percentage of examples it got correct. If your implementation is correct, you should see a reported
training accuracy of about 95.3% (this may vary by about 1% due to the random initialization). It is possible to get higher training accuracies by training the neural network for more iterations. We encourage you to try
training the neural network for more iterations (e.g., set `maxiter` to 400) and also vary the regularization parameter $\lambda$. With the right learning settings, it is possible to get the neural network to perfectly fit the training set.
```python
pred = utils.predict(Theta1, Theta2, X)
print('Training Set Accuracy: %f' % (np.mean(pred == y) * 100))
```
## 3 Visualizing the Hidden Layer
One way to understand what your neural network is learning is to visualize what the representations captured by the hidden units. Informally, given a particular hidden unit, one way to visualize what it computes is to find an input $x$ that will cause it to activate (that is, to have an activation value
($a_i^{(l)}$) close to 1). For the neural network you trained, notice that the $i^{th}$ row of $\Theta^{(1)}$ is a 401-dimensional vector that represents the parameter for the $i^{th}$ hidden unit. If we discard the bias term, we get a 400 dimensional vector that represents the weights from each input pixel to the hidden unit.
Thus, one way to visualize the “representation” captured by the hidden unit is to reshape this 400 dimensional vector into a 20 × 20 image and display it (It turns out that this is equivalent to finding the input that gives the highest activation for the hidden unit, given a “norm” constraint on the input (i.e., $||x||_2 \le 1$)).
The next cell does this by using the `displayData` function and it will show you an image with 25 units,
each corresponding to one hidden unit in the network. In your trained network, you should find that the hidden units corresponds roughly to detectors that look for strokes and other patterns in the input.
```python
utils.displayData(Theta1[:, 1:])
```
### 3.1 Optional (ungraded) exercise
In this part of the exercise, you will get to try out different learning settings for the neural network to see how the performance of the neural network varies with the regularization parameter $\lambda$ and number of training steps (the `maxiter` option when using `scipy.optimize.minimize`). Neural networks are very powerful models that can form highly complex decision boundaries. Without regularization, it is possible for a neural network to “overfit” a training set so that it obtains close to 100% accuracy on the training set but does not as well on new examples that it has not seen before. You can set the regularization $\lambda$ to a smaller value and the `maxiter` parameter to a higher number of iterations to see this for youself.
|
[GOAL]
K : Type u_1
σ : Type u_2
ι : Type u_3
inst✝³ : Fintype K
inst✝² : Field K
inst✝¹ : Fintype σ
inst✝ : DecidableEq σ
f : MvPolynomial σ K
h : totalDegree f < (q - 1) * Fintype.card σ
⊢ ∑ x : σ → K, ↑(eval x) f = 0
[PROOFSTEP]
haveI : DecidableEq K := Classical.decEq K
[GOAL]
K : Type u_1
σ : Type u_2
ι : Type u_3
inst✝³ : Fintype K
inst✝² : Field K
inst✝¹ : Fintype σ
inst✝ : DecidableEq σ
f : MvPolynomial σ K
h : totalDegree f < (q - 1) * Fintype.card σ
this : DecidableEq K
⊢ ∑ x : σ → K, ↑(eval x) f = 0
[PROOFSTEP]
calc
∑ x, eval x f = ∑ x : σ → K, ∑ d in f.support, f.coeff d * ∏ i, x i ^ d i := by simp only [eval_eq']
_ = ∑ d in f.support, ∑ x : σ → K, f.coeff d * ∏ i, x i ^ d i := sum_comm
_ = 0 := sum_eq_zero ?_
[GOAL]
K : Type u_1
σ : Type u_2
ι : Type u_3
inst✝³ : Fintype K
inst✝² : Field K
inst✝¹ : Fintype σ
inst✝ : DecidableEq σ
f : MvPolynomial σ K
h : totalDegree f < (q - 1) * Fintype.card σ
this : DecidableEq K
⊢ ∑ x : σ → K, ↑(eval x) f = ∑ x : σ → K, ∑ d in support f, coeff d f * ∏ i : σ, x i ^ ↑d i
[PROOFSTEP]
simp only [eval_eq']
[GOAL]
K : Type u_1
σ : Type u_2
ι : Type u_3
inst✝³ : Fintype K
inst✝² : Field K
inst✝¹ : Fintype σ
inst✝ : DecidableEq σ
f : MvPolynomial σ K
h : totalDegree f < (q - 1) * Fintype.card σ
this : DecidableEq K
⊢ ∀ (x : σ →₀ ℕ), x ∈ support f → ∑ x_1 : σ → K, coeff x f * ∏ i : σ, x_1 i ^ ↑x i = 0
[PROOFSTEP]
intro d hd
[GOAL]
K : Type u_1
σ : Type u_2
ι : Type u_3
inst✝³ : Fintype K
inst✝² : Field K
inst✝¹ : Fintype σ
inst✝ : DecidableEq σ
f : MvPolynomial σ K
h : totalDegree f < (q - 1) * Fintype.card σ
this : DecidableEq K
d : σ →₀ ℕ
hd : d ∈ support f
⊢ ∑ x : σ → K, coeff d f * ∏ i : σ, x i ^ ↑d i = 0
[PROOFSTEP]
obtain ⟨i, hi⟩ : ∃ i, d i < q - 1
[GOAL]
K : Type u_1
σ : Type u_2
ι : Type u_3
inst✝³ : Fintype K
inst✝² : Field K
inst✝¹ : Fintype σ
inst✝ : DecidableEq σ
f : MvPolynomial σ K
h : totalDegree f < (q - 1) * Fintype.card σ
this : DecidableEq K
d : σ →₀ ℕ
hd : d ∈ support f
⊢ ∃ i, ↑d i < q - 1
case intro
K : Type u_1
σ : Type u_2
ι : Type u_3
inst✝³ : Fintype K
inst✝² : Field K
inst✝¹ : Fintype σ
inst✝ : DecidableEq σ
f : MvPolynomial σ K
h : totalDegree f < (q - 1) * Fintype.card σ
this : DecidableEq K
d : σ →₀ ℕ
hd : d ∈ support f
i : σ
hi : ↑d i < q - 1
⊢ ∑ x : σ → K, coeff d f * ∏ i : σ, x i ^ ↑d i = 0
[PROOFSTEP]
exact f.exists_degree_lt (q - 1) h hd
[GOAL]
case intro
K : Type u_1
σ : Type u_2
ι : Type u_3
inst✝³ : Fintype K
inst✝² : Field K
inst✝¹ : Fintype σ
inst✝ : DecidableEq σ
f : MvPolynomial σ K
h : totalDegree f < (q - 1) * Fintype.card σ
this : DecidableEq K
d : σ →₀ ℕ
hd : d ∈ support f
i : σ
hi : ↑d i < q - 1
⊢ ∑ x : σ → K, coeff d f * ∏ i : σ, x i ^ ↑d i = 0
[PROOFSTEP]
calc
(∑ x : σ → K, f.coeff d * ∏ i, x i ^ d i) = f.coeff d * ∑ x : σ → K, ∏ i, x i ^ d i := mul_sum.symm
_ = 0 := (mul_eq_zero.mpr ∘ Or.inr) ?_
[GOAL]
case intro
K : Type u_1
σ : Type u_2
ι : Type u_3
inst✝³ : Fintype K
inst✝² : Field K
inst✝¹ : Fintype σ
inst✝ : DecidableEq σ
f : MvPolynomial σ K
h : totalDegree f < (q - 1) * Fintype.card σ
this : DecidableEq K
d : σ →₀ ℕ
hd : d ∈ support f
i : σ
hi : ↑d i < q - 1
⊢ ∑ x : σ → K, ∏ i : σ, x i ^ ↑d i = 0
[PROOFSTEP]
calc
(∑ x : σ → K, ∏ i, x i ^ d i) =
∑ x₀ : { j // j ≠ i } → K, ∑ x : { x : σ → K // x ∘ (↑) = x₀ }, ∏ j, (x : σ → K) j ^ d j :=
(Fintype.sum_fiberwise _ _).symm
_ = 0 := Fintype.sum_eq_zero _ ?_
[GOAL]
case intro
K : Type u_1
σ : Type u_2
ι : Type u_3
inst✝³ : Fintype K
inst✝² : Field K
inst✝¹ : Fintype σ
inst✝ : DecidableEq σ
f : MvPolynomial σ K
h : totalDegree f < (q - 1) * Fintype.card σ
this : DecidableEq K
d : σ →₀ ℕ
hd : d ∈ support f
i : σ
hi : ↑d i < q - 1
⊢ ∀ (a : { j // j ≠ i } → K), ∑ x : { x // x ∘ Subtype.val = a }, ∏ j : σ, ↑x j ^ ↑d j = 0
[PROOFSTEP]
intro x₀
[GOAL]
case intro
K : Type u_1
σ : Type u_2
ι : Type u_3
inst✝³ : Fintype K
inst✝² : Field K
inst✝¹ : Fintype σ
inst✝ : DecidableEq σ
f : MvPolynomial σ K
h : totalDegree f < (q - 1) * Fintype.card σ
this : DecidableEq K
d : σ →₀ ℕ
hd : d ∈ support f
i : σ
hi : ↑d i < q - 1
x₀ : { j // j ≠ i } → K
⊢ ∑ x : { x // x ∘ Subtype.val = x₀ }, ∏ j : σ, ↑x j ^ ↑d j = 0
[PROOFSTEP]
let e : K ≃ { x // x ∘ ((↑) : _ → σ) = x₀ } := (Equiv.subtypeEquivCodomain _).symm
[GOAL]
case intro
K : Type u_1
σ : Type u_2
ι : Type u_3
inst✝³ : Fintype K
inst✝² : Field K
inst✝¹ : Fintype σ
inst✝ : DecidableEq σ
f : MvPolynomial σ K
h : totalDegree f < (q - 1) * Fintype.card σ
this : DecidableEq K
d : σ →₀ ℕ
hd : d ∈ support f
i : σ
hi : ↑d i < q - 1
x₀ : { j // j ≠ i } → K
e : K ≃ { x // x ∘ Subtype.val = x₀ } := (Equiv.subtypeEquivCodomain x₀).symm
⊢ ∑ x : { x // x ∘ Subtype.val = x₀ }, ∏ j : σ, ↑x j ^ ↑d j = 0
[PROOFSTEP]
calc
(∑ x : { x : σ → K // x ∘ (↑) = x₀ }, ∏ j, (x : σ → K) j ^ d j) = ∑ a : K, ∏ j : σ, (e a : σ → K) j ^ d j :=
(e.sum_comp _).symm
_ = ∑ a : K, (∏ j, x₀ j ^ d j) * a ^ d i := (Fintype.sum_congr _ _ ?_)
_ = (∏ j, x₀ j ^ d j) * ∑ a : K, a ^ d i := by rw [mul_sum]
_ = 0 := by rw [sum_pow_lt_card_sub_one K _ hi, mul_zero]
[GOAL]
K : Type u_1
σ : Type u_2
ι : Type u_3
inst✝³ : Fintype K
inst✝² : Field K
inst✝¹ : Fintype σ
inst✝ : DecidableEq σ
f : MvPolynomial σ K
h : totalDegree f < (q - 1) * Fintype.card σ
this : DecidableEq K
d : σ →₀ ℕ
hd : d ∈ support f
i : σ
hi : ↑d i < q - 1
x₀ : { j // j ≠ i } → K
e : K ≃ { x // x ∘ Subtype.val = x₀ } := (Equiv.subtypeEquivCodomain x₀).symm
⊢ ∑ a : K, (∏ j : { j // j ≠ i }, x₀ j ^ ↑d ↑j) * a ^ ↑d i = (∏ j : { j // j ≠ i }, x₀ j ^ ↑d ↑j) * ∑ a : K, a ^ ↑d i
[PROOFSTEP]
rw [mul_sum]
[GOAL]
K : Type u_1
σ : Type u_2
ι : Type u_3
inst✝³ : Fintype K
inst✝² : Field K
inst✝¹ : Fintype σ
inst✝ : DecidableEq σ
f : MvPolynomial σ K
h : totalDegree f < (q - 1) * Fintype.card σ
this : DecidableEq K
d : σ →₀ ℕ
hd : d ∈ support f
i : σ
hi : ↑d i < q - 1
x₀ : { j // j ≠ i } → K
e : K ≃ { x // x ∘ Subtype.val = x₀ } := (Equiv.subtypeEquivCodomain x₀).symm
⊢ (∏ j : { j // j ≠ i }, x₀ j ^ ↑d ↑j) * ∑ a : K, a ^ ↑d i = 0
[PROOFSTEP]
rw [sum_pow_lt_card_sub_one K _ hi, mul_zero]
[GOAL]
case intro
K : Type u_1
σ : Type u_2
ι : Type u_3
inst✝³ : Fintype K
inst✝² : Field K
inst✝¹ : Fintype σ
inst✝ : DecidableEq σ
f : MvPolynomial σ K
h : totalDegree f < (q - 1) * Fintype.card σ
this : DecidableEq K
d : σ →₀ ℕ
hd : d ∈ support f
i : σ
hi : ↑d i < q - 1
x₀ : { j // j ≠ i } → K
e : K ≃ { x // x ∘ Subtype.val = x₀ } := (Equiv.subtypeEquivCodomain x₀).symm
⊢ ∀ (a : K), ∏ j : σ, ↑(↑e a) j ^ ↑d j = (∏ j : { j // j ≠ i }, x₀ j ^ ↑d ↑j) * a ^ ↑d i
[PROOFSTEP]
intro a
[GOAL]
case intro
K : Type u_1
σ : Type u_2
ι : Type u_3
inst✝³ : Fintype K
inst✝² : Field K
inst✝¹ : Fintype σ
inst✝ : DecidableEq σ
f : MvPolynomial σ K
h : totalDegree f < (q - 1) * Fintype.card σ
this : DecidableEq K
d : σ →₀ ℕ
hd : d ∈ support f
i : σ
hi : ↑d i < q - 1
x₀ : { j // j ≠ i } → K
e : K ≃ { x // x ∘ Subtype.val = x₀ } := (Equiv.subtypeEquivCodomain x₀).symm
a : K
⊢ ∏ j : σ, ↑(↑e a) j ^ ↑d j = (∏ j : { j // j ≠ i }, x₀ j ^ ↑d ↑j) * a ^ ↑d i
[PROOFSTEP]
let e' : Sum { j // j = i } { j // j ≠ i } ≃ σ := Equiv.sumCompl _
[GOAL]
case intro
K : Type u_1
σ : Type u_2
ι : Type u_3
inst✝³ : Fintype K
inst✝² : Field K
inst✝¹ : Fintype σ
inst✝ : DecidableEq σ
f : MvPolynomial σ K
h : totalDegree f < (q - 1) * Fintype.card σ
this : DecidableEq K
d : σ →₀ ℕ
hd : d ∈ support f
i : σ
hi : ↑d i < q - 1
x₀ : { j // j ≠ i } → K
e : K ≃ { x // x ∘ Subtype.val = x₀ } := (Equiv.subtypeEquivCodomain x₀).symm
a : K
e' : { j // j = i } ⊕ { j // j ≠ i } ≃ σ := Equiv.sumCompl fun j => j = i
⊢ ∏ j : σ, ↑(↑e a) j ^ ↑d j = (∏ j : { j // j ≠ i }, x₀ j ^ ↑d ↑j) * a ^ ↑d i
[PROOFSTEP]
letI : Unique { j // j = i } :=
{ default := ⟨i, rfl⟩
uniq := fun ⟨j, h⟩ => Subtype.val_injective h }
[GOAL]
case intro
K : Type u_1
σ : Type u_2
ι : Type u_3
inst✝³ : Fintype K
inst✝² : Field K
inst✝¹ : Fintype σ
inst✝ : DecidableEq σ
f : MvPolynomial σ K
h : totalDegree f < (q - 1) * Fintype.card σ
this✝ : DecidableEq K
d : σ →₀ ℕ
hd : d ∈ support f
i : σ
hi : ↑d i < q - 1
x₀ : { j // j ≠ i } → K
e : K ≃ { x // x ∘ Subtype.val = x₀ } := (Equiv.subtypeEquivCodomain x₀).symm
a : K
e' : { j // j = i } ⊕ { j // j ≠ i } ≃ σ := Equiv.sumCompl fun j => j = i
this : Unique { j // j = i } :=
{ toInhabited := { default := { val := i, property := (_ : i = i) } },
uniq := (_ : ∀ (x : { j // j = i }), x = default) }
⊢ ∏ j : σ, ↑(↑e a) j ^ ↑d j = (∏ j : { j // j ≠ i }, x₀ j ^ ↑d ↑j) * a ^ ↑d i
[PROOFSTEP]
calc
(∏ j : σ, (e a : σ → K) j ^ d j) = (e a : σ → K) i ^ d i * ∏ j : { j // j ≠ i }, (e a : σ → K) j ^ d j := by
rw [← e'.prod_comp, Fintype.prod_sum_type, univ_unique, prod_singleton]; rfl
_ = a ^ d i * ∏ j : { j // j ≠ i }, (e a : σ → K) j ^ d j := by rw [Equiv.subtypeEquivCodomain_symm_apply_eq]
_ = a ^ d i * ∏ j, x₀ j ^ d j :=
(congr_arg _ (Fintype.prod_congr _ _ ?_))
-- see below
_ = (∏ j, x₀ j ^ d j) * a ^ d i := mul_comm _ _
[GOAL]
K : Type u_1
σ : Type u_2
ι : Type u_3
inst✝³ : Fintype K
inst✝² : Field K
inst✝¹ : Fintype σ
inst✝ : DecidableEq σ
f : MvPolynomial σ K
h : totalDegree f < (q - 1) * Fintype.card σ
this✝ : DecidableEq K
d : σ →₀ ℕ
hd : d ∈ support f
i : σ
hi : ↑d i < q - 1
x₀ : { j // j ≠ i } → K
e : K ≃ { x // x ∘ Subtype.val = x₀ } := (Equiv.subtypeEquivCodomain x₀).symm
a : K
e' : { j // j = i } ⊕ { j // j ≠ i } ≃ σ := Equiv.sumCompl fun j => j = i
this : Unique { j // j = i } :=
{ toInhabited := { default := { val := i, property := (_ : i = i) } },
uniq := (_ : ∀ (x : { j // j = i }), x = default) }
⊢ ∏ j : σ, ↑(↑e a) j ^ ↑d j = ↑(↑e a) i ^ ↑d i * ∏ j : { j // j ≠ i }, ↑(↑e a) ↑j ^ ↑d ↑j
[PROOFSTEP]
rw [← e'.prod_comp, Fintype.prod_sum_type, univ_unique, prod_singleton]
[GOAL]
K : Type u_1
σ : Type u_2
ι : Type u_3
inst✝³ : Fintype K
inst✝² : Field K
inst✝¹ : Fintype σ
inst✝ : DecidableEq σ
f : MvPolynomial σ K
h : totalDegree f < (q - 1) * Fintype.card σ
this✝ : DecidableEq K
d : σ →₀ ℕ
hd : d ∈ support f
i : σ
hi : ↑d i < q - 1
x₀ : { j // j ≠ i } → K
e : K ≃ { x // x ∘ Subtype.val = x₀ } := (Equiv.subtypeEquivCodomain x₀).symm
a : K
e' : { j // j = i } ⊕ { j // j ≠ i } ≃ σ := Equiv.sumCompl fun j => j = i
this : Unique { j // j = i } :=
{ toInhabited := { default := { val := i, property := (_ : i = i) } },
uniq := (_ : ∀ (x : { j // j = i }), x = default) }
⊢ ↑(↑e a) (↑e' (Sum.inl default)) ^ ↑d (↑e' (Sum.inl default)) *
∏ a₂ : { j // j ≠ i }, ↑(↑e a) (↑e' (Sum.inr a₂)) ^ ↑d (↑e' (Sum.inr a₂)) =
↑(↑e a) i ^ ↑d i * ∏ j : { j // j ≠ i }, ↑(↑e a) ↑j ^ ↑d ↑j
[PROOFSTEP]
rfl
[GOAL]
K : Type u_1
σ : Type u_2
ι : Type u_3
inst✝³ : Fintype K
inst✝² : Field K
inst✝¹ : Fintype σ
inst✝ : DecidableEq σ
f : MvPolynomial σ K
h : totalDegree f < (q - 1) * Fintype.card σ
this✝ : DecidableEq K
d : σ →₀ ℕ
hd : d ∈ support f
i : σ
hi : ↑d i < q - 1
x₀ : { j // j ≠ i } → K
e : K ≃ { x // x ∘ Subtype.val = x₀ } := (Equiv.subtypeEquivCodomain x₀).symm
a : K
e' : { j // j = i } ⊕ { j // j ≠ i } ≃ σ := Equiv.sumCompl fun j => j = i
this : Unique { j // j = i } :=
{ toInhabited := { default := { val := i, property := (_ : i = i) } },
uniq := (_ : ∀ (x : { j // j = i }), x = default) }
⊢ ↑(↑e a) i ^ ↑d i * ∏ j : { j // j ≠ i }, ↑(↑e a) ↑j ^ ↑d ↑j = a ^ ↑d i * ∏ j : { j // j ≠ i }, ↑(↑e a) ↑j ^ ↑d ↑j
[PROOFSTEP]
rw [Equiv.subtypeEquivCodomain_symm_apply_eq]
[GOAL]
case intro
K : Type u_1
σ : Type u_2
ι : Type u_3
inst✝³ : Fintype K
inst✝² : Field K
inst✝¹ : Fintype σ
inst✝ : DecidableEq σ
f : MvPolynomial σ K
h : totalDegree f < (q - 1) * Fintype.card σ
this✝ : DecidableEq K
d : σ →₀ ℕ
hd : d ∈ support f
i : σ
hi : ↑d i < q - 1
x₀ : { j // j ≠ i } → K
e : K ≃ { x // x ∘ Subtype.val = x₀ } := (Equiv.subtypeEquivCodomain x₀).symm
a : K
e' : { j // j = i } ⊕ { j // j ≠ i } ≃ σ := Equiv.sumCompl fun j => j = i
this : Unique { j // j = i } :=
{ toInhabited := { default := { val := i, property := (_ : i = i) } },
uniq := (_ : ∀ (x : { j // j = i }), x = default) }
⊢ ∀ (a_1 : { j // j ≠ i }), ↑(↑e a) ↑a_1 ^ ↑d ↑a_1 = x₀ a_1 ^ ↑d ↑a_1
[PROOFSTEP]
rintro ⟨j, hj⟩
[GOAL]
case intro.mk
K : Type u_1
σ : Type u_2
ι : Type u_3
inst✝³ : Fintype K
inst✝² : Field K
inst✝¹ : Fintype σ
inst✝ : DecidableEq σ
f : MvPolynomial σ K
h : totalDegree f < (q - 1) * Fintype.card σ
this✝ : DecidableEq K
d : σ →₀ ℕ
hd : d ∈ support f
i : σ
hi : ↑d i < q - 1
x₀ : { j // j ≠ i } → K
e : K ≃ { x // x ∘ Subtype.val = x₀ } := (Equiv.subtypeEquivCodomain x₀).symm
a : K
e' : { j // j = i } ⊕ { j // j ≠ i } ≃ σ := Equiv.sumCompl fun j => j = i
this : Unique { j // j = i } :=
{ toInhabited := { default := { val := i, property := (_ : i = i) } },
uniq := (_ : ∀ (x : { j // j = i }), x = default) }
j : σ
hj : j ≠ i
⊢ ↑(↑e a) ↑{ val := j, property := hj } ^ ↑d ↑{ val := j, property := hj } =
x₀ { val := j, property := hj } ^ ↑d ↑{ val := j, property := hj }
[PROOFSTEP]
show (e a : σ → K) j ^ d j = x₀ ⟨j, hj⟩ ^ d j
[GOAL]
case intro.mk
K : Type u_1
σ : Type u_2
ι : Type u_3
inst✝³ : Fintype K
inst✝² : Field K
inst✝¹ : Fintype σ
inst✝ : DecidableEq σ
f : MvPolynomial σ K
h : totalDegree f < (q - 1) * Fintype.card σ
this✝ : DecidableEq K
d : σ →₀ ℕ
hd : d ∈ support f
i : σ
hi : ↑d i < q - 1
x₀ : { j // j ≠ i } → K
e : K ≃ { x // x ∘ Subtype.val = x₀ } := (Equiv.subtypeEquivCodomain x₀).symm
a : K
e' : { j // j = i } ⊕ { j // j ≠ i } ≃ σ := Equiv.sumCompl fun j => j = i
this : Unique { j // j = i } :=
{ toInhabited := { default := { val := i, property := (_ : i = i) } },
uniq := (_ : ∀ (x : { j // j = i }), x = default) }
j : σ
hj : j ≠ i
⊢ ↑(↑e a) j ^ ↑d j = x₀ { val := j, property := hj } ^ ↑d j
[PROOFSTEP]
rw [Equiv.subtypeEquivCodomain_symm_apply_ne]
[GOAL]
K : Type u_1
σ : Type u_2
ι : Type u_3
inst✝⁵ : Fintype K
inst✝⁴ : Field K
inst✝³ : Fintype σ
inst✝² : DecidableEq σ
inst✝¹ : DecidableEq K
p : ℕ
inst✝ : CharP K p
s : Finset ι
f : ι → MvPolynomial σ K
h : ∑ i in s, totalDegree (f i) < Fintype.card σ
⊢ p ∣ Fintype.card { x // ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0 }
[PROOFSTEP]
have hq : 0 < q - 1 := by rw [← Fintype.card_units, Fintype.card_pos_iff]; exact ⟨1⟩
[GOAL]
K : Type u_1
σ : Type u_2
ι : Type u_3
inst✝⁵ : Fintype K
inst✝⁴ : Field K
inst✝³ : Fintype σ
inst✝² : DecidableEq σ
inst✝¹ : DecidableEq K
p : ℕ
inst✝ : CharP K p
s : Finset ι
f : ι → MvPolynomial σ K
h : ∑ i in s, totalDegree (f i) < Fintype.card σ
⊢ 0 < q - 1
[PROOFSTEP]
rw [← Fintype.card_units, Fintype.card_pos_iff]
[GOAL]
K : Type u_1
σ : Type u_2
ι : Type u_3
inst✝⁵ : Fintype K
inst✝⁴ : Field K
inst✝³ : Fintype σ
inst✝² : DecidableEq σ
inst✝¹ : DecidableEq K
p : ℕ
inst✝ : CharP K p
s : Finset ι
f : ι → MvPolynomial σ K
h : ∑ i in s, totalDegree (f i) < Fintype.card σ
⊢ Nonempty Kˣ
[PROOFSTEP]
exact ⟨1⟩
[GOAL]
K : Type u_1
σ : Type u_2
ι : Type u_3
inst✝⁵ : Fintype K
inst✝⁴ : Field K
inst✝³ : Fintype σ
inst✝² : DecidableEq σ
inst✝¹ : DecidableEq K
p : ℕ
inst✝ : CharP K p
s : Finset ι
f : ι → MvPolynomial σ K
h : ∑ i in s, totalDegree (f i) < Fintype.card σ
hq : 0 < q - 1
⊢ p ∣ Fintype.card { x // ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0 }
[PROOFSTEP]
let S : Finset (σ → K) := {x ∈ univ | ∀ i ∈ s, eval x (f i) = 0}.toFinset
[GOAL]
K : Type u_1
σ : Type u_2
ι : Type u_3
inst✝⁵ : Fintype K
inst✝⁴ : Field K
inst✝³ : Fintype σ
inst✝² : DecidableEq σ
inst✝¹ : DecidableEq K
p : ℕ
inst✝ : CharP K p
s : Finset ι
f : ι → MvPolynomial σ K
h : ∑ i in s, totalDegree (f i) < Fintype.card σ
hq : 0 < q - 1
S : Finset (σ → K) := Set.toFinset {x | x ∈ univ ∧ ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0}
⊢ p ∣ Fintype.card { x // ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0 }
[PROOFSTEP]
have hS : ∀ x : σ → K, x ∈ S ↔ ∀ i : ι, i ∈ s → eval x (f i) = 0 :=
by
intro x
simp only [Set.toFinset_setOf, mem_univ, true_and, mem_filter]
/- The polynomial `F = ∏ i in s, (1 - (f i)^(q - 1))` has the nice property
that it takes the value `1` on elements of `{x : σ → K // ∀ i ∈ s, (f i).eval x = 0}`
while it is `0` outside that locus.
Hence the sum of its values is equal to the cardinality of
`{x : σ → K // ∀ i ∈ s, (f i).eval x = 0}` modulo `p`. -/
[GOAL]
K : Type u_1
σ : Type u_2
ι : Type u_3
inst✝⁵ : Fintype K
inst✝⁴ : Field K
inst✝³ : Fintype σ
inst✝² : DecidableEq σ
inst✝¹ : DecidableEq K
p : ℕ
inst✝ : CharP K p
s : Finset ι
f : ι → MvPolynomial σ K
h : ∑ i in s, totalDegree (f i) < Fintype.card σ
hq : 0 < q - 1
S : Finset (σ → K) := Set.toFinset {x | x ∈ univ ∧ ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0}
⊢ ∀ (x : σ → K), x ∈ S ↔ ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0
[PROOFSTEP]
intro x
[GOAL]
K : Type u_1
σ : Type u_2
ι : Type u_3
inst✝⁵ : Fintype K
inst✝⁴ : Field K
inst✝³ : Fintype σ
inst✝² : DecidableEq σ
inst✝¹ : DecidableEq K
p : ℕ
inst✝ : CharP K p
s : Finset ι
f : ι → MvPolynomial σ K
h : ∑ i in s, totalDegree (f i) < Fintype.card σ
hq : 0 < q - 1
S : Finset (σ → K) := Set.toFinset {x | x ∈ univ ∧ ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0}
x : σ → K
⊢ x ∈ S ↔ ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0
[PROOFSTEP]
simp only [Set.toFinset_setOf, mem_univ, true_and, mem_filter]
/- The polynomial `F = ∏ i in s, (1 - (f i)^(q - 1))` has the nice property
that it takes the value `1` on elements of `{x : σ → K // ∀ i ∈ s, (f i).eval x = 0}`
while it is `0` outside that locus.
Hence the sum of its values is equal to the cardinality of
`{x : σ → K // ∀ i ∈ s, (f i).eval x = 0}` modulo `p`. -/
[GOAL]
K : Type u_1
σ : Type u_2
ι : Type u_3
inst✝⁵ : Fintype K
inst✝⁴ : Field K
inst✝³ : Fintype σ
inst✝² : DecidableEq σ
inst✝¹ : DecidableEq K
p : ℕ
inst✝ : CharP K p
s : Finset ι
f : ι → MvPolynomial σ K
h : ∑ i in s, totalDegree (f i) < Fintype.card σ
hq : 0 < q - 1
S : Finset (σ → K) := Set.toFinset {x | x ∈ univ ∧ ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0}
hS : ∀ (x : σ → K), x ∈ S ↔ ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0
⊢ p ∣ Fintype.card { x // ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0 }
[PROOFSTEP]
let F : MvPolynomial σ K := ∏ i in s, (1 - f i ^ (q - 1))
[GOAL]
K : Type u_1
σ : Type u_2
ι : Type u_3
inst✝⁵ : Fintype K
inst✝⁴ : Field K
inst✝³ : Fintype σ
inst✝² : DecidableEq σ
inst✝¹ : DecidableEq K
p : ℕ
inst✝ : CharP K p
s : Finset ι
f : ι → MvPolynomial σ K
h : ∑ i in s, totalDegree (f i) < Fintype.card σ
hq : 0 < q - 1
S : Finset (σ → K) := Set.toFinset {x | x ∈ univ ∧ ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0}
hS : ∀ (x : σ → K), x ∈ S ↔ ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0
F : MvPolynomial σ K := ∏ i in s, (1 - f i ^ (q - 1))
⊢ p ∣ Fintype.card { x // ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0 }
[PROOFSTEP]
have hF : ∀ x, eval x F = if x ∈ S then 1 else 0 := by
intro x
calc
eval x F = ∏ i in s, eval x (1 - f i ^ (q - 1)) := eval_prod s _ x
_ = if x ∈ S then 1 else 0 := ?_
simp only [(eval x).map_sub, (eval x).map_pow, (eval x).map_one]
split_ifs with hx
· apply Finset.prod_eq_one
intro i hi
rw [hS] at hx
rw [hx i hi, zero_pow hq, sub_zero]
· obtain ⟨i, hi, hx⟩ : ∃ i : ι, i ∈ s ∧ eval x (f i) ≠ 0 := by simpa only [hS, not_forall, not_imp] using hx
apply Finset.prod_eq_zero hi
rw [pow_card_sub_one_eq_one (eval x (f i)) hx, sub_self]
-- In particular, we can now show:
[GOAL]
K : Type u_1
σ : Type u_2
ι : Type u_3
inst✝⁵ : Fintype K
inst✝⁴ : Field K
inst✝³ : Fintype σ
inst✝² : DecidableEq σ
inst✝¹ : DecidableEq K
p : ℕ
inst✝ : CharP K p
s : Finset ι
f : ι → MvPolynomial σ K
h : ∑ i in s, totalDegree (f i) < Fintype.card σ
hq : 0 < q - 1
S : Finset (σ → K) := Set.toFinset {x | x ∈ univ ∧ ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0}
hS : ∀ (x : σ → K), x ∈ S ↔ ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0
F : MvPolynomial σ K := ∏ i in s, (1 - f i ^ (q - 1))
⊢ ∀ (x : σ → K), ↑(eval x) F = if x ∈ S then 1 else 0
[PROOFSTEP]
intro x
[GOAL]
K : Type u_1
σ : Type u_2
ι : Type u_3
inst✝⁵ : Fintype K
inst✝⁴ : Field K
inst✝³ : Fintype σ
inst✝² : DecidableEq σ
inst✝¹ : DecidableEq K
p : ℕ
inst✝ : CharP K p
s : Finset ι
f : ι → MvPolynomial σ K
h : ∑ i in s, totalDegree (f i) < Fintype.card σ
hq : 0 < q - 1
S : Finset (σ → K) := Set.toFinset {x | x ∈ univ ∧ ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0}
hS : ∀ (x : σ → K), x ∈ S ↔ ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0
F : MvPolynomial σ K := ∏ i in s, (1 - f i ^ (q - 1))
x : σ → K
⊢ ↑(eval x) F = if x ∈ S then 1 else 0
[PROOFSTEP]
calc
eval x F = ∏ i in s, eval x (1 - f i ^ (q - 1)) := eval_prod s _ x
_ = if x ∈ S then 1 else 0 := ?_
[GOAL]
K : Type u_1
σ : Type u_2
ι : Type u_3
inst✝⁵ : Fintype K
inst✝⁴ : Field K
inst✝³ : Fintype σ
inst✝² : DecidableEq σ
inst✝¹ : DecidableEq K
p : ℕ
inst✝ : CharP K p
s : Finset ι
f : ι → MvPolynomial σ K
h : ∑ i in s, totalDegree (f i) < Fintype.card σ
hq : 0 < q - 1
S : Finset (σ → K) := Set.toFinset {x | x ∈ univ ∧ ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0}
hS : ∀ (x : σ → K), x ∈ S ↔ ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0
F : MvPolynomial σ K := ∏ i in s, (1 - f i ^ (q - 1))
x : σ → K
⊢ ∏ i in s, ↑(eval x) (1 - f i ^ (q - 1)) = if x ∈ S then 1 else 0
[PROOFSTEP]
simp only [(eval x).map_sub, (eval x).map_pow, (eval x).map_one]
[GOAL]
K : Type u_1
σ : Type u_2
ι : Type u_3
inst✝⁵ : Fintype K
inst✝⁴ : Field K
inst✝³ : Fintype σ
inst✝² : DecidableEq σ
inst✝¹ : DecidableEq K
p : ℕ
inst✝ : CharP K p
s : Finset ι
f : ι → MvPolynomial σ K
h : ∑ i in s, totalDegree (f i) < Fintype.card σ
hq : 0 < q - 1
S : Finset (σ → K) := Set.toFinset {x | x ∈ univ ∧ ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0}
hS : ∀ (x : σ → K), x ∈ S ↔ ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0
F : MvPolynomial σ K := ∏ i in s, (1 - f i ^ (q - 1))
x : σ → K
⊢ ∏ x_1 in s, (1 - ↑(eval x) (f x_1) ^ (q - 1)) =
if x ∈ Set.toFinset {x | x ∈ univ ∧ ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0} then 1 else 0
[PROOFSTEP]
split_ifs with hx
[GOAL]
case pos
K : Type u_1
σ : Type u_2
ι : Type u_3
inst✝⁵ : Fintype K
inst✝⁴ : Field K
inst✝³ : Fintype σ
inst✝² : DecidableEq σ
inst✝¹ : DecidableEq K
p : ℕ
inst✝ : CharP K p
s : Finset ι
f : ι → MvPolynomial σ K
h : ∑ i in s, totalDegree (f i) < Fintype.card σ
hq : 0 < q - 1
S : Finset (σ → K) := Set.toFinset {x | x ∈ univ ∧ ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0}
hS : ∀ (x : σ → K), x ∈ S ↔ ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0
F : MvPolynomial σ K := ∏ i in s, (1 - f i ^ (q - 1))
x : σ → K
hx : x ∈ Set.toFinset {x | x ∈ univ ∧ ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0}
⊢ ∏ x_1 in s, (1 - ↑(eval x) (f x_1) ^ (q - 1)) = 1
[PROOFSTEP]
apply Finset.prod_eq_one
[GOAL]
case pos.h
K : Type u_1
σ : Type u_2
ι : Type u_3
inst✝⁵ : Fintype K
inst✝⁴ : Field K
inst✝³ : Fintype σ
inst✝² : DecidableEq σ
inst✝¹ : DecidableEq K
p : ℕ
inst✝ : CharP K p
s : Finset ι
f : ι → MvPolynomial σ K
h : ∑ i in s, totalDegree (f i) < Fintype.card σ
hq : 0 < q - 1
S : Finset (σ → K) := Set.toFinset {x | x ∈ univ ∧ ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0}
hS : ∀ (x : σ → K), x ∈ S ↔ ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0
F : MvPolynomial σ K := ∏ i in s, (1 - f i ^ (q - 1))
x : σ → K
hx : x ∈ Set.toFinset {x | x ∈ univ ∧ ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0}
⊢ ∀ (x_1 : ι), x_1 ∈ s → 1 - ↑(eval x) (f x_1) ^ (q - 1) = 1
[PROOFSTEP]
intro i hi
[GOAL]
case pos.h
K : Type u_1
σ : Type u_2
ι : Type u_3
inst✝⁵ : Fintype K
inst✝⁴ : Field K
inst✝³ : Fintype σ
inst✝² : DecidableEq σ
inst✝¹ : DecidableEq K
p : ℕ
inst✝ : CharP K p
s : Finset ι
f : ι → MvPolynomial σ K
h : ∑ i in s, totalDegree (f i) < Fintype.card σ
hq : 0 < q - 1
S : Finset (σ → K) := Set.toFinset {x | x ∈ univ ∧ ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0}
hS : ∀ (x : σ → K), x ∈ S ↔ ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0
F : MvPolynomial σ K := ∏ i in s, (1 - f i ^ (q - 1))
x : σ → K
hx : x ∈ Set.toFinset {x | x ∈ univ ∧ ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0}
i : ι
hi : i ∈ s
⊢ 1 - ↑(eval x) (f i) ^ (q - 1) = 1
[PROOFSTEP]
rw [hS] at hx
[GOAL]
case pos.h
K : Type u_1
σ : Type u_2
ι : Type u_3
inst✝⁵ : Fintype K
inst✝⁴ : Field K
inst✝³ : Fintype σ
inst✝² : DecidableEq σ
inst✝¹ : DecidableEq K
p : ℕ
inst✝ : CharP K p
s : Finset ι
f : ι → MvPolynomial σ K
h : ∑ i in s, totalDegree (f i) < Fintype.card σ
hq : 0 < q - 1
S : Finset (σ → K) := Set.toFinset {x | x ∈ univ ∧ ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0}
hS : ∀ (x : σ → K), x ∈ S ↔ ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0
F : MvPolynomial σ K := ∏ i in s, (1 - f i ^ (q - 1))
x : σ → K
hx : ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0
i : ι
hi : i ∈ s
⊢ 1 - ↑(eval x) (f i) ^ (q - 1) = 1
[PROOFSTEP]
rw [hx i hi, zero_pow hq, sub_zero]
[GOAL]
case neg
K : Type u_1
σ : Type u_2
ι : Type u_3
inst✝⁵ : Fintype K
inst✝⁴ : Field K
inst✝³ : Fintype σ
inst✝² : DecidableEq σ
inst✝¹ : DecidableEq K
p : ℕ
inst✝ : CharP K p
s : Finset ι
f : ι → MvPolynomial σ K
h : ∑ i in s, totalDegree (f i) < Fintype.card σ
hq : 0 < q - 1
S : Finset (σ → K) := Set.toFinset {x | x ∈ univ ∧ ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0}
hS : ∀ (x : σ → K), x ∈ S ↔ ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0
F : MvPolynomial σ K := ∏ i in s, (1 - f i ^ (q - 1))
x : σ → K
hx : ¬x ∈ Set.toFinset {x | x ∈ univ ∧ ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0}
⊢ ∏ x_1 in s, (1 - ↑(eval x) (f x_1) ^ (q - 1)) = 0
[PROOFSTEP]
obtain ⟨i, hi, hx⟩ : ∃ i : ι, i ∈ s ∧ eval x (f i) ≠ 0 := by simpa only [hS, not_forall, not_imp] using hx
[GOAL]
K : Type u_1
σ : Type u_2
ι : Type u_3
inst✝⁵ : Fintype K
inst✝⁴ : Field K
inst✝³ : Fintype σ
inst✝² : DecidableEq σ
inst✝¹ : DecidableEq K
p : ℕ
inst✝ : CharP K p
s : Finset ι
f : ι → MvPolynomial σ K
h : ∑ i in s, totalDegree (f i) < Fintype.card σ
hq : 0 < q - 1
S : Finset (σ → K) := Set.toFinset {x | x ∈ univ ∧ ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0}
hS : ∀ (x : σ → K), x ∈ S ↔ ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0
F : MvPolynomial σ K := ∏ i in s, (1 - f i ^ (q - 1))
x : σ → K
hx : ¬x ∈ Set.toFinset {x | x ∈ univ ∧ ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0}
⊢ ∃ i, i ∈ s ∧ ↑(eval x) (f i) ≠ 0
[PROOFSTEP]
simpa only [hS, not_forall, not_imp] using hx
[GOAL]
case neg.intro.intro
K : Type u_1
σ : Type u_2
ι : Type u_3
inst✝⁵ : Fintype K
inst✝⁴ : Field K
inst✝³ : Fintype σ
inst✝² : DecidableEq σ
inst✝¹ : DecidableEq K
p : ℕ
inst✝ : CharP K p
s : Finset ι
f : ι → MvPolynomial σ K
h : ∑ i in s, totalDegree (f i) < Fintype.card σ
hq : 0 < q - 1
S : Finset (σ → K) := Set.toFinset {x | x ∈ univ ∧ ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0}
hS : ∀ (x : σ → K), x ∈ S ↔ ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0
F : MvPolynomial σ K := ∏ i in s, (1 - f i ^ (q - 1))
x : σ → K
hx✝ : ¬x ∈ Set.toFinset {x | x ∈ univ ∧ ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0}
i : ι
hi : i ∈ s
hx : ↑(eval x) (f i) ≠ 0
⊢ ∏ x_1 in s, (1 - ↑(eval x) (f x_1) ^ (q - 1)) = 0
[PROOFSTEP]
apply Finset.prod_eq_zero hi
[GOAL]
case neg.intro.intro
K : Type u_1
σ : Type u_2
ι : Type u_3
inst✝⁵ : Fintype K
inst✝⁴ : Field K
inst✝³ : Fintype σ
inst✝² : DecidableEq σ
inst✝¹ : DecidableEq K
p : ℕ
inst✝ : CharP K p
s : Finset ι
f : ι → MvPolynomial σ K
h : ∑ i in s, totalDegree (f i) < Fintype.card σ
hq : 0 < q - 1
S : Finset (σ → K) := Set.toFinset {x | x ∈ univ ∧ ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0}
hS : ∀ (x : σ → K), x ∈ S ↔ ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0
F : MvPolynomial σ K := ∏ i in s, (1 - f i ^ (q - 1))
x : σ → K
hx✝ : ¬x ∈ Set.toFinset {x | x ∈ univ ∧ ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0}
i : ι
hi : i ∈ s
hx : ↑(eval x) (f i) ≠ 0
⊢ 1 - ↑(eval x) (f i) ^ (q - 1) = 0
[PROOFSTEP]
rw [pow_card_sub_one_eq_one (eval x (f i)) hx, sub_self]
-- In particular, we can now show:
[GOAL]
K : Type u_1
σ : Type u_2
ι : Type u_3
inst✝⁵ : Fintype K
inst✝⁴ : Field K
inst✝³ : Fintype σ
inst✝² : DecidableEq σ
inst✝¹ : DecidableEq K
p : ℕ
inst✝ : CharP K p
s : Finset ι
f : ι → MvPolynomial σ K
h : ∑ i in s, totalDegree (f i) < Fintype.card σ
hq : 0 < q - 1
S : Finset (σ → K) := Set.toFinset {x | x ∈ univ ∧ ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0}
hS : ∀ (x : σ → K), x ∈ S ↔ ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0
F : MvPolynomial σ K := ∏ i in s, (1 - f i ^ (q - 1))
hF : ∀ (x : σ → K), ↑(eval x) F = if x ∈ S then 1 else 0
⊢ p ∣ Fintype.card { x // ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0 }
[PROOFSTEP]
have key : ∑ x, eval x F = Fintype.card { x : σ → K // ∀ i ∈ s, eval x (f i) = 0 }
[GOAL]
case key
K : Type u_1
σ : Type u_2
ι : Type u_3
inst✝⁵ : Fintype K
inst✝⁴ : Field K
inst✝³ : Fintype σ
inst✝² : DecidableEq σ
inst✝¹ : DecidableEq K
p : ℕ
inst✝ : CharP K p
s : Finset ι
f : ι → MvPolynomial σ K
h : ∑ i in s, totalDegree (f i) < Fintype.card σ
hq : 0 < q - 1
S : Finset (σ → K) := Set.toFinset {x | x ∈ univ ∧ ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0}
hS : ∀ (x : σ → K), x ∈ S ↔ ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0
F : MvPolynomial σ K := ∏ i in s, (1 - f i ^ (q - 1))
hF : ∀ (x : σ → K), ↑(eval x) F = if x ∈ S then 1 else 0
⊢ ∑ x : σ → K, ↑(eval x) F = ↑(Fintype.card { x // ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0 })
K : Type u_1
σ : Type u_2
ι : Type u_3
inst✝⁵ : Fintype K
inst✝⁴ : Field K
inst✝³ : Fintype σ
inst✝² : DecidableEq σ
inst✝¹ : DecidableEq K
p : ℕ
inst✝ : CharP K p
s : Finset ι
f : ι → MvPolynomial σ K
h : ∑ i in s, totalDegree (f i) < Fintype.card σ
hq : 0 < q - 1
S : Finset (σ → K) := Set.toFinset {x | x ∈ univ ∧ ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0}
hS : ∀ (x : σ → K), x ∈ S ↔ ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0
F : MvPolynomial σ K := ∏ i in s, (1 - f i ^ (q - 1))
hF : ∀ (x : σ → K), ↑(eval x) F = if x ∈ S then 1 else 0
key : ∑ x : σ → K, ↑(eval x) F = ↑(Fintype.card { x // ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0 })
⊢ p ∣ Fintype.card { x // ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0 }
[PROOFSTEP]
rw [Fintype.card_of_subtype S hS, card_eq_sum_ones, Nat.cast_sum, Nat.cast_one, ← Fintype.sum_extend_by_zero S,
sum_congr rfl fun x _ => hF x]
-- With these preparations under our belt, we will approach the main goal.
[GOAL]
K : Type u_1
σ : Type u_2
ι : Type u_3
inst✝⁵ : Fintype K
inst✝⁴ : Field K
inst✝³ : Fintype σ
inst✝² : DecidableEq σ
inst✝¹ : DecidableEq K
p : ℕ
inst✝ : CharP K p
s : Finset ι
f : ι → MvPolynomial σ K
h : ∑ i in s, totalDegree (f i) < Fintype.card σ
hq : 0 < q - 1
S : Finset (σ → K) := Set.toFinset {x | x ∈ univ ∧ ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0}
hS : ∀ (x : σ → K), x ∈ S ↔ ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0
F : MvPolynomial σ K := ∏ i in s, (1 - f i ^ (q - 1))
hF : ∀ (x : σ → K), ↑(eval x) F = if x ∈ S then 1 else 0
key : ∑ x : σ → K, ↑(eval x) F = ↑(Fintype.card { x // ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0 })
⊢ p ∣ Fintype.card { x // ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0 }
[PROOFSTEP]
show p ∣ Fintype.card { x // ∀ i : ι, i ∈ s → eval x (f i) = 0 }
[GOAL]
K : Type u_1
σ : Type u_2
ι : Type u_3
inst✝⁵ : Fintype K
inst✝⁴ : Field K
inst✝³ : Fintype σ
inst✝² : DecidableEq σ
inst✝¹ : DecidableEq K
p : ℕ
inst✝ : CharP K p
s : Finset ι
f : ι → MvPolynomial σ K
h : ∑ i in s, totalDegree (f i) < Fintype.card σ
hq : 0 < q - 1
S : Finset (σ → K) := Set.toFinset {x | x ∈ univ ∧ ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0}
hS : ∀ (x : σ → K), x ∈ S ↔ ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0
F : MvPolynomial σ K := ∏ i in s, (1 - f i ^ (q - 1))
hF : ∀ (x : σ → K), ↑(eval x) F = if x ∈ S then 1 else 0
key : ∑ x : σ → K, ↑(eval x) F = ↑(Fintype.card { x // ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0 })
⊢ p ∣ Fintype.card { x // ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0 }
[PROOFSTEP]
rw [← CharP.cast_eq_zero_iff K, ← key]
[GOAL]
K : Type u_1
σ : Type u_2
ι : Type u_3
inst✝⁵ : Fintype K
inst✝⁴ : Field K
inst✝³ : Fintype σ
inst✝² : DecidableEq σ
inst✝¹ : DecidableEq K
p : ℕ
inst✝ : CharP K p
s : Finset ι
f : ι → MvPolynomial σ K
h : ∑ i in s, totalDegree (f i) < Fintype.card σ
hq : 0 < q - 1
S : Finset (σ → K) := Set.toFinset {x | x ∈ univ ∧ ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0}
hS : ∀ (x : σ → K), x ∈ S ↔ ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0
F : MvPolynomial σ K := ∏ i in s, (1 - f i ^ (q - 1))
hF : ∀ (x : σ → K), ↑(eval x) F = if x ∈ S then 1 else 0
key : ∑ x : σ → K, ↑(eval x) F = ↑(Fintype.card { x // ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0 })
⊢ ∑ x : σ → K, ↑(eval x) F = 0
[PROOFSTEP]
show
(∑ x, eval x F) =
0
-- We are now ready to apply the main machine, proven before.
[GOAL]
K : Type u_1
σ : Type u_2
ι : Type u_3
inst✝⁵ : Fintype K
inst✝⁴ : Field K
inst✝³ : Fintype σ
inst✝² : DecidableEq σ
inst✝¹ : DecidableEq K
p : ℕ
inst✝ : CharP K p
s : Finset ι
f : ι → MvPolynomial σ K
h : ∑ i in s, totalDegree (f i) < Fintype.card σ
hq : 0 < q - 1
S : Finset (σ → K) := Set.toFinset {x | x ∈ univ ∧ ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0}
hS : ∀ (x : σ → K), x ∈ S ↔ ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0
F : MvPolynomial σ K := ∏ i in s, (1 - f i ^ (q - 1))
hF : ∀ (x : σ → K), ↑(eval x) F = if x ∈ S then 1 else 0
key : ∑ x : σ → K, ↑(eval x) F = ↑(Fintype.card { x // ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0 })
⊢ ∑ x : σ → K, ↑(eval x) F = 0
[PROOFSTEP]
apply F.sum_eval_eq_zero
[GOAL]
K : Type u_1
σ : Type u_2
ι : Type u_3
inst✝⁵ : Fintype K
inst✝⁴ : Field K
inst✝³ : Fintype σ
inst✝² : DecidableEq σ
inst✝¹ : DecidableEq K
p : ℕ
inst✝ : CharP K p
s : Finset ι
f : ι → MvPolynomial σ K
h : ∑ i in s, totalDegree (f i) < Fintype.card σ
hq : 0 < q - 1
S : Finset (σ → K) := Set.toFinset {x | x ∈ univ ∧ ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0}
hS : ∀ (x : σ → K), x ∈ S ↔ ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0
F : MvPolynomial σ K := ∏ i in s, (1 - f i ^ (q - 1))
hF : ∀ (x : σ → K), ↑(eval x) F = if x ∈ S then 1 else 0
key : ∑ x : σ → K, ↑(eval x) F = ↑(Fintype.card { x // ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0 })
⊢ totalDegree F < (q - 1) * Fintype.card σ
[PROOFSTEP]
show F.totalDegree < (q - 1) * Fintype.card σ
[GOAL]
K : Type u_1
σ : Type u_2
ι : Type u_3
inst✝⁵ : Fintype K
inst✝⁴ : Field K
inst✝³ : Fintype σ
inst✝² : DecidableEq σ
inst✝¹ : DecidableEq K
p : ℕ
inst✝ : CharP K p
s : Finset ι
f : ι → MvPolynomial σ K
h : ∑ i in s, totalDegree (f i) < Fintype.card σ
hq : 0 < q - 1
S : Finset (σ → K) := Set.toFinset {x | x ∈ univ ∧ ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0}
hS : ∀ (x : σ → K), x ∈ S ↔ ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0
F : MvPolynomial σ K := ∏ i in s, (1 - f i ^ (q - 1))
hF : ∀ (x : σ → K), ↑(eval x) F = if x ∈ S then 1 else 0
key : ∑ x : σ → K, ↑(eval x) F = ↑(Fintype.card { x // ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0 })
⊢ totalDegree F < (q - 1) * Fintype.card σ
[PROOFSTEP]
calc
F.totalDegree ≤ ∑ i in s, (1 - f i ^ (q - 1)).totalDegree := totalDegree_finset_prod s _
_ ≤ ∑ i in s, (q - 1) * (f i).totalDegree :=
(sum_le_sum fun i _ => ?_)
-- see ↓
_ = (q - 1) * ∑ i in s, (f i).totalDegree := mul_sum.symm
_ < (q - 1) * Fintype.card σ := by
rwa [mul_lt_mul_left hq]
-- Now we prove the remaining step from the preceding calculation
[GOAL]
K : Type u_1
σ : Type u_2
ι : Type u_3
inst✝⁵ : Fintype K
inst✝⁴ : Field K
inst✝³ : Fintype σ
inst✝² : DecidableEq σ
inst✝¹ : DecidableEq K
p : ℕ
inst✝ : CharP K p
s : Finset ι
f : ι → MvPolynomial σ K
h : ∑ i in s, totalDegree (f i) < Fintype.card σ
hq : 0 < q - 1
S : Finset (σ → K) := Set.toFinset {x | x ∈ univ ∧ ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0}
hS : ∀ (x : σ → K), x ∈ S ↔ ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0
F : MvPolynomial σ K := ∏ i in s, (1 - f i ^ (q - 1))
hF : ∀ (x : σ → K), ↑(eval x) F = if x ∈ S then 1 else 0
key : ∑ x : σ → K, ↑(eval x) F = ↑(Fintype.card { x // ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0 })
⊢ (q - 1) * ∑ i in s, totalDegree (f i) < (q - 1) * Fintype.card σ
[PROOFSTEP]
rwa [mul_lt_mul_left hq]
-- Now we prove the remaining step from the preceding calculation
[GOAL]
K : Type u_1
σ : Type u_2
ι : Type u_3
inst✝⁵ : Fintype K
inst✝⁴ : Field K
inst✝³ : Fintype σ
inst✝² : DecidableEq σ
inst✝¹ : DecidableEq K
p : ℕ
inst✝ : CharP K p
s : Finset ι
f : ι → MvPolynomial σ K
h : ∑ i in s, totalDegree (f i) < Fintype.card σ
hq : 0 < q - 1
S : Finset (σ → K) := Set.toFinset {x | x ∈ univ ∧ ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0}
hS : ∀ (x : σ → K), x ∈ S ↔ ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0
F : MvPolynomial σ K := ∏ i in s, (1 - f i ^ (q - 1))
hF : ∀ (x : σ → K), ↑(eval x) F = if x ∈ S then 1 else 0
key : ∑ x : σ → K, ↑(eval x) F = ↑(Fintype.card { x // ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0 })
i : ι
x✝ : i ∈ s
⊢ totalDegree (1 - f i ^ (q - 1)) ≤ (q - 1) * totalDegree (f i)
[PROOFSTEP]
show (1 - f i ^ (q - 1)).totalDegree ≤ (q - 1) * (f i).totalDegree
[GOAL]
K : Type u_1
σ : Type u_2
ι : Type u_3
inst✝⁵ : Fintype K
inst✝⁴ : Field K
inst✝³ : Fintype σ
inst✝² : DecidableEq σ
inst✝¹ : DecidableEq K
p : ℕ
inst✝ : CharP K p
s : Finset ι
f : ι → MvPolynomial σ K
h : ∑ i in s, totalDegree (f i) < Fintype.card σ
hq : 0 < q - 1
S : Finset (σ → K) := Set.toFinset {x | x ∈ univ ∧ ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0}
hS : ∀ (x : σ → K), x ∈ S ↔ ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0
F : MvPolynomial σ K := ∏ i in s, (1 - f i ^ (q - 1))
hF : ∀ (x : σ → K), ↑(eval x) F = if x ∈ S then 1 else 0
key : ∑ x : σ → K, ↑(eval x) F = ↑(Fintype.card { x // ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0 })
i : ι
x✝ : i ∈ s
⊢ totalDegree (1 - f i ^ (q - 1)) ≤ (q - 1) * totalDegree (f i)
[PROOFSTEP]
calc
(1 - f i ^ (q - 1)).totalDegree ≤ max (1 : MvPolynomial σ K).totalDegree (f i ^ (q - 1)).totalDegree :=
totalDegree_sub _ _
_ ≤ (f i ^ (q - 1)).totalDegree := by simp
_ ≤ (q - 1) * (f i).totalDegree := totalDegree_pow _ _
[GOAL]
K : Type u_1
σ : Type u_2
ι : Type u_3
inst✝⁵ : Fintype K
inst✝⁴ : Field K
inst✝³ : Fintype σ
inst✝² : DecidableEq σ
inst✝¹ : DecidableEq K
p : ℕ
inst✝ : CharP K p
s : Finset ι
f : ι → MvPolynomial σ K
h : ∑ i in s, totalDegree (f i) < Fintype.card σ
hq : 0 < q - 1
S : Finset (σ → K) := Set.toFinset {x | x ∈ univ ∧ ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0}
hS : ∀ (x : σ → K), x ∈ S ↔ ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0
F : MvPolynomial σ K := ∏ i in s, (1 - f i ^ (q - 1))
hF : ∀ (x : σ → K), ↑(eval x) F = if x ∈ S then 1 else 0
key : ∑ x : σ → K, ↑(eval x) F = ↑(Fintype.card { x // ∀ (i : ι), i ∈ s → ↑(eval x) (f i) = 0 })
i : ι
x✝ : i ∈ s
⊢ max (totalDegree 1) (totalDegree (f i ^ (q - 1))) ≤ totalDegree (f i ^ (q - 1))
[PROOFSTEP]
simp
[GOAL]
K : Type u_1
σ : Type u_2
ι : Type u_3
inst✝⁶ : Fintype K
inst✝⁵ : Field K
inst✝⁴ : Fintype σ
inst✝³ : DecidableEq σ
inst✝² : DecidableEq K
p : ℕ
inst✝¹ : CharP K p
inst✝ : Fintype ι
f : ι → MvPolynomial σ K
h : ∑ i : ι, totalDegree (f i) < Fintype.card σ
⊢ p ∣ Fintype.card { x // ∀ (i : ι), ↑(eval x) (f i) = 0 }
[PROOFSTEP]
simpa using char_dvd_card_solutions_of_sum_lt p h
[GOAL]
K : Type u_1
σ : Type u_2
ι : Type u_3
inst✝⁵ : Fintype K
inst✝⁴ : Field K
inst✝³ : Fintype σ
inst✝² : DecidableEq σ
inst✝¹ : DecidableEq K
p : ℕ
inst✝ : CharP K p
f : MvPolynomial σ K
h : totalDegree f < Fintype.card σ
⊢ p ∣ Fintype.card { x // ↑(eval x) f = 0 }
[PROOFSTEP]
let F : Unit → MvPolynomial σ K := fun _ => f
[GOAL]
K : Type u_1
σ : Type u_2
ι : Type u_3
inst✝⁵ : Fintype K
inst✝⁴ : Field K
inst✝³ : Fintype σ
inst✝² : DecidableEq σ
inst✝¹ : DecidableEq K
p : ℕ
inst✝ : CharP K p
f : MvPolynomial σ K
h : totalDegree f < Fintype.card σ
F : Unit → MvPolynomial σ K := fun x => f
⊢ p ∣ Fintype.card { x // ↑(eval x) f = 0 }
[PROOFSTEP]
have : (∑ i : Unit, (F i).totalDegree) < Fintype.card σ := h
[GOAL]
K : Type u_1
σ : Type u_2
ι : Type u_3
inst✝⁵ : Fintype K
inst✝⁴ : Field K
inst✝³ : Fintype σ
inst✝² : DecidableEq σ
inst✝¹ : DecidableEq K
p : ℕ
inst✝ : CharP K p
f : MvPolynomial σ K
h : totalDegree f < Fintype.card σ
F : Unit → MvPolynomial σ K := fun x => f
this : ∑ i : Unit, totalDegree (F i) < Fintype.card σ
⊢ p ∣ Fintype.card { x // ↑(eval x) f = 0 }
[PROOFSTEP]
convert char_dvd_card_solutions_of_sum_lt p this
[GOAL]
case h.e'_4.h.h.e'_2.h.a
K : Type u_1
σ : Type u_2
ι : Type u_3
inst✝⁵ : Fintype K
inst✝⁴ : Field K
inst✝³ : Fintype σ
inst✝² : DecidableEq σ
inst✝¹ : DecidableEq K
p : ℕ
inst✝ : CharP K p
f : MvPolynomial σ K
h : totalDegree f < Fintype.card σ
F : Unit → MvPolynomial σ K := fun x => f
this : ∑ i : Unit, totalDegree (F i) < Fintype.card σ
x✝ : σ → K
⊢ ↑(eval x✝) f = 0 ↔ ∀ (i : Unit), i ∈ univ → ↑(eval x✝) (F i) = 0
[PROOFSTEP]
aesop
[GOAL]
K : Type u_1
σ : Type u_2
ι : Type u_3
inst✝⁵ : Fintype K
inst✝⁴ : Field K
inst✝³ : Fintype σ
inst✝² : DecidableEq σ
inst✝¹ : DecidableEq K
p : ℕ
inst✝ : CharP K p
f₁ f₂ : MvPolynomial σ K
h : totalDegree f₁ + totalDegree f₂ < Fintype.card σ
⊢ p ∣ Fintype.card { x // ↑(eval x) f₁ = 0 ∧ ↑(eval x) f₂ = 0 }
[PROOFSTEP]
let F : Bool → MvPolynomial σ K := fun b => cond b f₂ f₁
[GOAL]
K : Type u_1
σ : Type u_2
ι : Type u_3
inst✝⁵ : Fintype K
inst✝⁴ : Field K
inst✝³ : Fintype σ
inst✝² : DecidableEq σ
inst✝¹ : DecidableEq K
p : ℕ
inst✝ : CharP K p
f₁ f₂ : MvPolynomial σ K
h : totalDegree f₁ + totalDegree f₂ < Fintype.card σ
F : Bool → MvPolynomial σ K := fun b => bif b then f₂ else f₁
⊢ p ∣ Fintype.card { x // ↑(eval x) f₁ = 0 ∧ ↑(eval x) f₂ = 0 }
[PROOFSTEP]
have : (∑ b : Bool, (F b).totalDegree) < Fintype.card σ := (add_comm _ _).trans_lt h
[GOAL]
K : Type u_1
σ : Type u_2
ι : Type u_3
inst✝⁵ : Fintype K
inst✝⁴ : Field K
inst✝³ : Fintype σ
inst✝² : DecidableEq σ
inst✝¹ : DecidableEq K
p : ℕ
inst✝ : CharP K p
f₁ f₂ : MvPolynomial σ K
h : totalDegree f₁ + totalDegree f₂ < Fintype.card σ
F : Bool → MvPolynomial σ K := fun b => bif b then f₂ else f₁
this : ∑ b : Bool, totalDegree (F b) < Fintype.card σ
⊢ p ∣ Fintype.card { x // ↑(eval x) f₁ = 0 ∧ ↑(eval x) f₂ = 0 }
[PROOFSTEP]
simpa only [Bool.forall_bool] using char_dvd_card_solutions_of_fintype_sum_lt p this
|
!
! Copyright (C) 2001-2007 Quantum ESPRESSO group
! This file is distributed under the terms of the
! GNU General Public License. See the file `License'
! in the root directory of the present distribution,
! or http://www.gnu.org/copyleft/gpl.txt .
!
!
!-----------------------------------------------------------------------
subroutine force_corr (forcescc)
!-----------------------------------------------------------------------
! This routine calculates the force term vanishing at full
! self-consistency. It follows the suggestion of Chan-Bohnen-Ho
! (PRB 47, 4771 (1993)). The true charge density is approximated
! by means of a free atom superposition.
! (alessio f.)
! Uses superposition of atomic charges contained in the array rho_at
! and read from pseudopotential files
!
USE kinds, ONLY : DP
USE constants, ONLY : tpi
USE atom, ONLY : msh, rgrid
USE uspp_param, ONLY : upf
USE ions_base, ONLY : nat, ntyp => nsp, ityp, tau
USE cell_base, ONLY : tpiba
USE fft_base, ONLY : dfftp
USE fft_interfaces, ONLY : fwfft
USE gvect, ONLY : ngm, gstart, nl, g, ngl, gl, igtongl
USE lsda_mod, ONLY : nspin
USE scf, ONLY : vnew
USE wavefunctions_module, ONLY : psic
USE mp_global, ONLY : intra_bgrp_comm
USE mp, ONLY : mp_sum
!
implicit none
!
real(DP) :: forcescc (3, nat)
!
real(DP), allocatable :: rhocgnt (:), aux (:)
! work space
real(DP) :: gx, arg, fact
! temp factors
integer :: ir, isup, isdw, ig, nt, na, ipol, ndm
! counters
!
! vnew is V_out - V_in, psic is the temp space
!
if (nspin == 1 .or. nspin == 4) then
psic(:) = vnew%of_r (:, 1)
else
isup = 1
isdw = 2
psic(:) = (vnew%of_r (:, isup) + vnew%of_r (:, isdw)) * 0.5d0
end if
!
ndm = MAXVAL ( msh(1:ntyp) )
allocate ( aux(ndm), rhocgnt(ngl) )
forcescc(:,:) = 0.d0
CALL fwfft ('Dense', psic, dfftp)
fact = 1.d0
do nt = 1, ntyp
!
! Here we compute the G.ne.0 term
!
do ig = gstart, ngl
gx = sqrt (gl (ig) ) * tpiba
do ir = 1, msh (nt)
if (rgrid(nt)%r(ir) .lt.1.0d-8) then
aux (ir) = upf(nt)%rho_at (ir)
else
aux (ir) = upf(nt)%rho_at (ir) * &
sin(gx*rgrid(nt)%r(ir)) / (rgrid(nt)%r(ir)*gx)
endif
enddo
call simpson (msh (nt), aux, rgrid(nt)%rab, rhocgnt (ig) )
enddo
do na = 1, nat
if (nt.eq.ityp (na) ) then
do ig = gstart, ngm
arg = (g (1, ig) * tau (1, na) + g (2, ig) * tau (2, na) &
+ g (3, ig) * tau (3, na) ) * tpi
do ipol = 1, 3
forcescc (ipol, na) = forcescc (ipol, na) + fact * &
rhocgnt (igtongl(ig) ) * CMPLX(sin(arg),cos(arg),kind=DP) * &
g(ipol,ig) * tpiba * CONJG(psic(nl(ig)))
enddo
enddo
endif
enddo
enddo
!
call mp_sum( forcescc, intra_bgrp_comm )
!
deallocate ( aux, rhocgnt )
return
end subroutine force_corr
|
module _ (A : Set) (Sing : A → Set) (F : (a : A) → Sing a → Set) where
test : {a : A} → Sing a → Set
test s = F {!!} s
-- WAS: C-c C-s inserts a, which produces a scope error
-- Instead, it should insert _
|
MODULE mod11
implicit none
END MODULE mod11
|
Lynn Rives stands in front of the new sign bearing his name that hangs on the Environmental Education Center at the Mobbly Bayou Wilderness Preserve.
For nearly 20 years, Lynn Rives served as Oldsmar’s leisure services director, where he was responsible for such projects as the creation of Canal Park and its renovated Supercross track, the growth of Oldsmar’s expansive parks and trails system, and the construction of the Environmental Education Center at Mobbly Bayou Preserve .
Despite his extensive list of accomplishments, officials rejected a request by City Council member Jerry Beverland to name a city facility in Rives’ honor prior to his retirement last March, a decision that sat just fine with the designated honoree.
“I told (Jerry) I really wasn’t interested in it anyway,” Rives said in January of 2017.
Flash-forward a year and Rives is still alive and well; in fact, he’s currently serving as the City Manager of his hometown, Belleair Beach.
However, after being gone for 12 months, Oldsmar officials decided the time was right to officially name the center after Rives.
The City of Oldsmar dedicated the Environmental Education Center at Mobbly Bayou Preserve in honor of longtime Leisure Services Director Lynn Rives, who retired in 2017 after nearly 20 years with the city.
“The City of Oldsmar likes to consider ourselves on the leading edge where we name things after people before they pass away,” City Manager Al Braithwaite joked to open the dedication ceremony on Monday, Mar. 12.
Braithwaite then handed the microphone to Oldsmar Mayor Doug Bevis who listed several notable accomplishments from Rives’ tenure, which began in September 1999, including: opening the original Oldsmar BMX track and converting it into an Olympic-caliber Supercross facility; developing Richard Rogers Park, the Cypress Forest Recreation Center and Veterans Memorial Park; and opening Mobbly Bayou Wilderness Preserve, which includes a dog park, a zipline aerial adventure park and the Environmental Education Center.
“This building was erected in 2016 under his supervision and includes a solar panel on the roof that helped achieve one of the many council priorities he was given,” Bevis said of the facility, which he called “a major undertaking” that included hoisting the hefty wooden beams that support the roof into place.
Oldsmar Mayor Doug Bevis notes the heavy wooden beams that make up the Environmental Education Center ceiling during the facility’s dedication ceremony in Lynn Rives’ honor on Monday, Mar. 12, 2018.
“Probably with the exception of the concrete work and the beams being set in here, our city staff did this entire project. So congratulations to Lynn and his staff for pulling that off.
Bevis concluded by saying “thank you to Lynn for the numerous other projects he spearheaded to help put our city on the map where it is today” before he directed the group to the front of the facility for the unveiling of the sign.
Following the reveal, Rives spoke about receiving the (slightly delayed) honor.
“I’m very happy. It’s a very nice thing to do and I really appreciate it,” he told Oldsmar Connect.
Oldsmar officials dodge flying metal clips during the unveiling of the sign dedicating the Lynn Rives Environmental Education Center on Monday, Mar. 12, 2018.
Despite his initial reluctance to have something named after him, Rives admitted it was special to see his name on this facility.
Braithwaite and Rives’ successor, Felicia Donnelly, also commented after the ceremony.
“He is an amazing go-getter,” Braithwaite, who started his career with the city at the same time as Rives, said. “I’ve never seen anyone who gets things done as well as he does. |
State Before: a b c n : ℕ
⊢ image (fun j => n - 1 - j) (range n) = range n State After: case zero
a b c : ℕ
⊢ image (fun j => zero - 1 - j) (range zero) = range zero
case succ
a b c n✝ : ℕ
⊢ image (fun j => succ n✝ - 1 - j) (range (succ n✝)) = range (succ n✝) Tactic: cases n State Before: case zero
a b c : ℕ
⊢ image (fun j => zero - 1 - j) (range zero) = range zero State After: no goals Tactic: rw [range_zero, image_empty] State Before: case succ
a b c n✝ : ℕ
⊢ image (fun j => succ n✝ - 1 - j) (range (succ n✝)) = range (succ n✝) State After: case succ
a b c n✝ : ℕ
⊢ Ico (succ n✝ - 1 + 1 - succ n✝) (succ n✝ - 1 + 1 - 0) = Ico 0 (succ n✝) Tactic: rw [Finset.range_eq_Ico, Nat.Ico_image_const_sub_eq_Ico (Nat.zero_le _)] State Before: case succ
a b c n✝ : ℕ
⊢ Ico (succ n✝ - 1 + 1 - succ n✝) (succ n✝ - 1 + 1 - 0) = Ico 0 (succ n✝) State After: no goals Tactic: simp_rw [succ_sub_succ, tsub_zero, tsub_self] |
If you are going to be at ISTE in Denver this year, come join coffeeEDU one morning. CoffeeEDU (coffeeEDU.org) is a ONE HOUR meet up of educators in the style of edcamp.
While 6am may be early for some folks it is a great start to your day. This is a fun way to chat and connect with some awesome educators and have your morning coffee!
Amazon Education is sponsoring the coffeeEDU this year by supplying donuts and coffee. |
%GM_PHD_Predict_Birth
%Last modified 2nd September 2013
%Matlab code by Bryan Clarke [email protected]
%This file performs prediction for newly birthed and spawned targets
%This is necessary since their positions were initialised in a previous timestep and they may
%have moved between then and now
%We iterate through j = 1..J_b,k where J_b,k is number of birthed landmarks
%at time k (but the landmarks correspond to the measurement from time k-1)
%For this implementation, birthed and spawned targets are identical except
%they have weights that are calculated from different functions, and
%different starting covariances. A target is spawned or birthed depending
%on which weighting function will give it a higher starting weight.
%The means of these will be the position in Cartesian space where they are detected
%The covariances & weights are calculated according to Vo&Ma
s = sprintf('Step 1: Prediction for birthed and spawned targets.');
disp(s);
m_birth_before_prediction = [m_birth, m_spawn]; %Need to store these BEFORE prediction for use in the update step.
%Perform prediction for birthed targets using birthed velocities.
for j = 1:numBirthedTargets
i = i + 1;
%w_birth was already instantiated in GM_PHD_Create_Birth
m_birth(:,j) = F * m_birth(:,j);
P_range = calculateDataRange4(j);
P_birth(:,P_range) = Q + F * P_birth(:,P_range) * F';
end
%Perform prediction for birthed targets using birthed velocities.
for j = 1:numSpawnedTargets
i = i + 1;
%w_birth was already instantiated in GM_PHD_Create_Birth
m_spawn(:,j) = F * m_spawn(:,j);
P_range = calculateDataRange4(j);
P_spawn(:,P_range) = Q + F * P_spawn(:,P_range) * F';
end
if(VERBOSE == 1)
for j = 1:numBirthedTargets
thisM = m_birth(:,j);
s = sprintf('Birthed target %d: %3.4f %3.4f %3.4f %3.4f Weight %3.9f', j, thisM(1), thisM(2), thisM(3), thisM(4), w_birth(j));
disp(s);
end
for j = 1:numSpawnedTargets
thisM = m_spawn(:,j);
s = sprintf('Spawned target %d: %3.4f %3.4f %3.4f %3.4f Weight %3.9f', j, thisM(1), thisM(2), thisM(3), thisM(4), w_birth(j));
disp(s);
end
end
|
Vincenzo <unk> ( November 26 , 1753 ) – Cardinal @-@ Priest of SS . Marcellino e Pietro ; archbishop of Bologna
|
import gin
import numpy as np
from PIL import Image, ImageDraw
import gym
from gym import error, spaces
import tensorflow as tf
import tensorflow_probability as tfp
from tf_agents.networks import network
from tf_agents.specs import distribution_spec, tensor_spec
from tf_agents.networks import normal_projection_network, encoding_network, categorical_projection_network
from tf_agents.networks import utils as network_utils
from tf_agents.utils import nest_utils
CANVAS_WIDTH = 128
# RECT_WIDTH = 10
RECTS_WH = [[10, 10], [20, 10], [10, 20]]
class RectEnv(gym.Env):
action_map = [-8, 8, -1, 1, 0]
def __init__(self):
self.max_step = 20
self.cur_step = 0
self.width = CANVAS_WIDTH
self.rects_wh = np.array(RECTS_WH, dtype=np.int32)
self.n_items = len(RECTS_WH)
# self.observation_space = spaces.Dict(
# {
# "target_im": spaces.Box(
# low=0, high=1, shape=(self.width, self.width, 1)
# ), # (H, W, C)
# "cur_im": spaces.Box(
# low=0, high=1, shape=(self.width, self.width, 1)
# ), # (H, W, C)
# "cur_coord": spaces.Box(low=-10, high=10, shape=(self.n_items, 4)),
# }
# )
# self.observation_space = spaces.Dict(
# {
# # (H, W, C)
# "im": spaces.Box(low=0, high=1, shape=(self.width, self.width, 1)),
# "coord": spaces.Box(low=-10, high=10, shape=(self.n_items, 4)),
# }
# )
self.observation_space = spaces.Dict({
# target image, (H, W, C=1)
"target": spaces.Box(low=0, high=1, shape=(self.width, self.width, 1)),
# current canvas, (H, W, C=1)
"canvas": spaces.Box(low=0, high=1, shape=(self.width, self.width, 1)),
# current rectangle coords, (N, 4)
"coord": spaces.Box(low=-10, high=10, shape=(self.n_items, 4)),
})
self.action_space = spaces.Tuple([
spaces.Discrete(self.n_items), # i_item
spaces.Discrete(len(self.action_map)), # i_dx
spaces.Discrete(len(self.action_map)), # i_dy
# spaces.Discrete(2), # early stop
# spaces.Box(low=0, high=self.width, shape=(2,))
])
self.target_coord = None # (n_obj, 4=(x0, y0, x1, y1))
self.cur_coord = None # (n_obj, 4=(x0, y0, x1, y1))
self.viewer = None
self.target_im = None
def reset(self):
self.cur_step = 0
cxy = np.random.randint(0, self.width, (self.n_items, 2))
self.target_coord = np.concatenate(
[cxy-self.rects_wh/2, cxy+self.rects_wh/2], axis=1).astype(np.int32)
self.cur_coord = np.array(
[np.concatenate([[0, 0], wh]) for wh in self.rects_wh], dtype=np.int32)
self.target_im = self._render(self.target_coord, "target")
return self._obs()
def step(self, action):
"""
Args:
action, (i_rect, i_dx, i_dy)
Return:
obs: target_im (H, W, C), cur_im (H, W, C), field_info (x0, y0)
"""
# print(action)
# print(self.action_map[action[0]], self.action_map[action[1]])
i = action[0]
dxy = np.array(
[self.action_map[action[1]], self.action_map[action[2]]], dtype=np.int32
)
xy0 = self.cur_coord[i, :2] + dxy
self.cur_coord[i] = np.concatenate(
[xy0, xy0 + self.rects_wh[i]], axis=0)
reward = self._reward(self.cur_coord / self.width,
self.target_coord / self.width)
done = self.cur_step >= self.max_step
self.cur_step += 1
return self._obs(), reward, done, {}
def _render(self, coord: np.ndarray, name: str) -> np.ndarray:
"""Args: coord: (n_items, 4)"""
# print(coord)
im = Image.new("L", (self.width, self.width))
draw = ImageDraw.Draw(im)
for _, c in enumerate(coord):
draw.rectangle(tuple(c), fill=255)
im.save("./logs/{}-{}.png".format(name, self.cur_step))
# normalize & transform image
x = np.array(im, dtype=np.float) / 255.
x = np.expand_dims(x, axis=-1) # (H, W, C=1)
return x
def _obs(self):
return {
"target": self.target_im,
"canvas": self._render(self.cur_coord, "cur"),
"coord": self.cur_coord / self.width
}
def _reward(self, a_xy: np.array, b_xy: np.array):
"""越遠離target_coord (ie, L2-norm),獎勵越低(負獎勵)"""
d = np.linalg.norm(a_xy - b_xy, axis=1)
r = -1 * d / 2 + 1
r = np.clip(r, -1, None)
r = np.sum(r)
# elif r > 0:
# r *= 0.05 ** self.cur_step # 衰退因子
return r
def _denorm(self, a: np.array):
return (a * self.width).astype(np.int16)
def _step(self, action):
"""@Debug"""
reward = 1
self.cur_step += 1
done = self.cur_step >= self.max_step
return self._obs(), reward, done, {}
def _regression_step(self, action):
"""@Deprecated
Args:
action: list[obj_id: int, coord: np.array]
Return:
obs: target_im (H, W, C), cur_im (H, W, C), field_info (x0, y0)
"""
obj_id, coord = action
coord *= self.width
x0, y0 = coord
self.obj_status = (
np.array([[x0, y0, (x0 + self.obj_w), (y0 + self.obj_w)]],
dtype=np.float)
/ self.width
)
self.cur_im = self._render(x0, y0)
reward = -(
((x0 - self.target_coords[0, 0]) ** 2)
+ ((y0 - self.target_coords[0, 1]) ** 2)
)
self.cur_step += 1
done = self.cur_step >= self.max_step
return self._obs(), reward, done, {"episode": {"r": reward}}
def render(self, mode="human", close=False):
pass
def close(self):
pass
|
Don Gregorio Apartments offers all the comforts of an establishment known for its convenient location and quiet. It has studio apartments, with a pleasant environment where you will undoubtedly enjoy a relaxing and comfortable stay.
Located 300 meters from the beach.
All 44 studio apartments come equipped with a kitchen, bathroom, sitting room, telephone, satelitte TV, safe and WiFi area and terrace or balcony. The apartments sleep up to 3 guests.
Bars and restaurants are all within walking distance.
There is a gym and a swimming pool. |
Aspen Greenbelt
Arroyo Park
Evergreen Greenbelt
Stonegate Dog Park
Stonegate Lake
West Manor Park
West Pond
Westwood Park
Whaleback Park
|
[STATEMENT]
lemma lift_pI[intro?]:
"lift_p P f" if "\<And> heap x heap'. P heap \<Longrightarrow> run_state f heap = (x, heap') \<Longrightarrow> P heap'"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. lift_p P f
[PROOF STEP]
unfolding lift_p_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>heap. P heap \<longrightarrow> (case run_state f heap of (x, xa) \<Rightarrow> P xa)
[PROOF STEP]
by (auto intro: that) |
#ifndef CIRCUMCENTER_H
#define CIRCUMCENTER_H
#include <mtao/types.h>
#include <Eigen/Dense>
namespace mtao {
namespace geometry {
template <typename Derived>
auto circumcenter_spd(const Eigen::MatrixBase<Derived>& V) {
// 2 V.dot(C) = sum(V.colwise().squaredNorm()).transpose()
// probably dont really need this temporary
auto m = (V.rightCols(V.cols() - 1).colwise() - V.col(0)).eval();
auto A = (2 * m.transpose() * m).eval();
auto b = m.colwise().squaredNorm().transpose().eval();
A.llt().solveInPlace(b);
auto c = (V.col(0) + m * b).eval();
return c;
}
template <typename Derived>
auto circumcenter_spsd(const Eigen::MatrixBase<Derived>& V) {
mtao::MatrixX<typename Derived::Scalar> A(V.cols() + 1, V.cols() + 1);
A.setConstant(1);
A(V.cols(), V.cols()) = 0;
auto m = V.transpose() * V;
;
A.topLeftCorner(V.cols(), V.cols()) = 2 * m;
mtao::VectorX<typename Derived::Scalar> b(V.cols() + 1);
b.setConstant(1);
b.topRows(m.cols()) = V.colwise().squaredNorm().transpose();
auto x = A.colPivHouseholderQr().solve(b).eval();
return (V * x.topRows(V.cols())).eval();
}
template <typename Derived>
auto circumcenter(const Eigen::MatrixBase<Derived>& V) {
return circumcenter_spd(V);
}
template <typename VertexDerived, typename SimplexDerived>
auto circumcenters(const Eigen::MatrixBase<VertexDerived>& V,
const Eigen::MatrixBase<SimplexDerived>& S) {
constexpr static int E = VertexDerived::RowsAtCompileTime; // embed dim
constexpr static int N =
SimplexDerived::ColsAtCompileTime; // number of elements
constexpr static int D = SimplexDerived::RowsAtCompileTime; // simplex dim
using Scalar = typename VertexDerived::Scalar;
Eigen::Matrix<Scalar, E, N> C(V.rows(), S.cols());
Eigen::Matrix<Scalar, E, D> v(V.rows(), S.rows());
#pragma omp parallel for private(v)
for (int i = 0; i < S.cols(); ++i) {
auto s = S.col(i);
for (int j = 0; j < S.rows(); ++j) {
v.col(j) = V.col(s(j));
}
C.col(i) = circumcenter(v);
}
return C;
}
} // namespace geometry
} // namespace mtao
#endif // CIRCUMCENTER_H
|
//==================================================================================================
/*!
Copyright 2016 NumScale SAS
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt)
*/
//==================================================================================================
#include <boost/simd/pack.hpp>
#include <iostream>
using namespace std;
using namespace boost::simd;
#define SECONDS_IN_ONE_DAY 86400
template <typename T>
class op_boost_utc_add
{
typedef boost::simd::pack<T, 16 / sizeof(T)> p_t;
const p_t seconds_in_one_day = boost::simd::splat<p_t>(SECONDS_IN_ONE_DAY);
public:
p_t operator()(p_t const &a0, p_t const &a1, p_t const &delta)
{
return a0 + (seconds_in_one_day * a1) + delta;
}
};
int main()
{
// long long is not supported, prefer std::int64_t
typedef boost::simd::pack<std::int64_t, 2> p_l_t;
p_l_t a(10, 100);
p_l_t b(10, 100);
p_l_t c(1, 1);
p_l_t r = op_boost_utc_add<std::int64_t>()(a, b, c);
cout << r << endl;
return 0;
}
|
#include "trackhost_pktin.hh"
#include "assert.hh"
#include "packet-in.hh"
#include "netinet++/ethernet.hh"
#include "flow.hh"
#include <boost/bind.hpp>
namespace vigil
{
static Vlog_module lg("trackhost_pktin");
Disposition trackhost_pktin::handle_pkt_in(const Event& e)
{
const Packet_in_event& pie = assert_cast<const Packet_in_event&>(e);
if (pie.flow.dl_type == ethernet::LLDP)
return CONTINUE;
if (!topo->is_internal(pie.datapath_id, pie.in_port) &&
!pie.flow.dl_src.is_multicast() && !pie.flow.dl_src.is_broadcast())
ht->add_location(pie.flow.dl_src, pie.datapath_id, pie.in_port);
else
VLOG_DBG(lg, "Host %"PRIx64" not registered, 'cos %s%s %s",
pie.flow.dl_src.hb_long(),
pie.flow.dl_src.is_multicast()?"multicast mac":"",
pie.flow.dl_src.is_broadcast()?"broadcast mac":"",
topo->is_internal(pie.datapath_id, pie.in_port)?"on internal port":"");
return CONTINUE;
}
void trackhost_pktin::configure(const Configuration* c)
{
resolve(ht);
resolve(topo);
register_handler<Packet_in_event>
(boost::bind(&trackhost_pktin::handle_pkt_in, this, _1));
}
void trackhost_pktin::install()
{
}
void trackhost_pktin::getInstance(const Context* c,
trackhost_pktin*& component)
{
component = dynamic_cast<trackhost_pktin*>
(c->get_by_interface(container::Interface_description
(typeid(trackhost_pktin).name())));
}
REGISTER_COMPONENT(Simple_component_factory<trackhost_pktin>,
trackhost_pktin);
} // vigil namespace
|
[STATEMENT]
lemma iMODb_cut_less1: "\<lbrakk> t \<in> [r, mod m, c]; r < t \<rbrakk> \<Longrightarrow>
[r, mod m, c] \<down>< t = [r, mod m, (t - r) div m - Suc 0]"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>t \<in> [ r, mod m, c ]; r < t\<rbrakk> \<Longrightarrow> [ r, mod m, c ] \<down>< t = [ r, mod m, (t - r) div m - Suc 0 ]
[PROOF STEP]
apply (case_tac "m = 0")
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<lbrakk>t \<in> [ r, mod m, c ]; r < t; m = 0\<rbrakk> \<Longrightarrow> [ r, mod m, c ] \<down>< t = [ r, mod m, (t - r) div m - Suc 0 ]
2. \<lbrakk>t \<in> [ r, mod m, c ]; r < t; m \<noteq> 0\<rbrakk> \<Longrightarrow> [ r, mod m, c ] \<down>< t = [ r, mod m, (t - r) div m - Suc 0 ]
[PROOF STEP]
apply (simp add: iMODb_mod_0 iIN_0)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>t \<in> [ r, mod m, c ]; r < t; m \<noteq> 0\<rbrakk> \<Longrightarrow> [ r, mod m, c ] \<down>< t = [ r, mod m, (t - r) div m - Suc 0 ]
[PROOF STEP]
apply (simp add: iMODb_cut_less)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>t \<in> [ r, mod m, c ]; r < t; 0 < m\<rbrakk> \<Longrightarrow> (r + m * c < t \<longrightarrow> [ r, mod m, c ] = [ r, mod m, (t - r) div m - Suc 0 ]) \<and> (\<not> r + m * c < t \<longrightarrow> [ r, mod m, (t - Suc r) div m ] = [ r, mod m, (t - r) div m - Suc 0 ])
[PROOF STEP]
apply (simp add: mod_0_imp_diff_Suc_div_conv mod_eq_imp_diff_mod_0 iT_iff)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done |
cd("/home/astupidbear/.julia/v0.4/JuLearn/src/BLAS/examples/mlp/regression")
ENV["JuLearnBackend"]="BLAS"
using JuLearn#,Plots
@Eval lr=0.01 δ=0. momentum=0. batchsize=10 ▽max=0.01 λ=0.0
include("dataProvider.jl")
include("predict.jl")
inputBase,outputBase=dataProvider();
net=chain([:d,:h,:h,:l],[1 1;1 1000;1000 1;1 1],:regression);
@time err=backprop(net,Array(inputBase),Array(outputBase),nIter=1000)
# plot(err)
# plot(inputBase',Array(predict(net,Array(inputBase)))')
# plot!(inputBase',outputBase')
|
[STATEMENT]
lemma lprefixI [consumes 1, case_names lprefix,
case_conclusion lprefix LeLNil LeLCons]:
assumes major: "(xs, ys) \<in> X"
and step:
"\<And>xs ys. (xs, ys) \<in> X
\<Longrightarrow> lnull xs \<or> (\<exists>x xs' ys'. xs = LCons x xs' \<and> ys = LCons x ys' \<and>
((xs', ys') \<in> X \<or> xs' \<sqsubseteq> ys'))"
shows "xs \<sqsubseteq> ys"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. xs \<sqsubseteq> ys
[PROOF STEP]
using major
[PROOF STATE]
proof (prove)
using this:
(xs, ys) \<in> X
goal (1 subgoal):
1. xs \<sqsubseteq> ys
[PROOF STEP]
by(rule lprefix.coinduct)(auto dest: step) |
%Define
$kw_lexer_class /.NoKWLexer./
$_IDENTIFIER /.0./
%End
%Headers
--
-- Additional methods for the action class not provided in the template
--
/.
export class NoKWLexer
{
public getKeywordKinds() :number[]{ return null; }
public lexer(curtok : number, lasttok : number): number { return 0; }
public setInputChars(inputChars : string) : void{ }
public getKind(c : number) : number{ return 0; }
public NoKWLexer(inputChars : string, identifierKind : number) { }
}
./
%End
%Import
LexerBasicMapF.gi
%End
|
program test_rk4
use numerical_integration
implicit none
integer :: i
integer, parameter :: n=100
real(kind=dp), parameter :: pi = 4.0d0*atan(1.0d0)
real(kind=dp), parameter :: dx = pi/(n-1)
external :: deriv_sine, deriv_sho
real(kind=dp) :: x(n) , y(n, 2)
x(1) = 0.0d0; y(1, :) = [ 1.0d0, 0.0d0]
call rk4(x, y, dx, n, 2, deriv_sho)
do i=1,n
write(*, *) x(i), y(i, 1), y(i, 2)
end do
end program test_rk4
subroutine deriv_sine(x,y,dydx)
use numerical_integration
implicit none
integer, parameter :: n_eq = 1
real(kind=dp), intent(in) :: x, y(n_eq)
real(kind=dp), dimension(n_eq), intent(out) :: dydx
dydx = cos(x)
end subroutine deriv_sine
subroutine deriv_sho(x,y,dydx)
use numerical_integration
implicit none
integer, parameter :: n_eq = 2
real(kind=dp), intent(in) :: x, y(n_eq)
real(kind=dp), dimension(n_eq), intent(out) :: dydx
dydx(1) = y(2)
dydx(2) = -y(1)
end subroutine deriv_sho
|
import data.polynomial
import data.real.basic
import algebra.big_operators
noncomputable theory
open_locale big_operators
open polynomial
-- The basic building block in the Lagrange polynomial
-- this is (x - b)/(a - b)
def scaled_binomial (a b : ℝ) : polynomial ℝ :=
((1 : ℝ) / (a - b)) • (X - C b)
@[simp] lemma scaled_binomial.def (a b : ℝ) :
scaled_binomial a b = ((1 : ℝ) / (a - b)) • (X - C b) := rfl
lemma bin_zero (a b : ℝ) : eval b (scaled_binomial a b) = 0 :=
by rw [scaled_binomial.def, eval_smul, eval_sub, eval_X, eval_C, sub_self, smul_eq_mul, mul_zero]
lemma bin_one (a b : ℝ) (h : a ≠ b) : eval a (scaled_binomial a b) = 1 :=
by rw [scaled_binomial.def, eval_smul, eval_sub, eval_X, eval_C, smul_eq_mul,
div_mul_cancel 1 (sub_ne_zero_of_ne h)]
-- This version, using scalar multiplication (`smul`) seems simplest.
-- Must add hypothesis that data points are distinct
def lagrange_interpolant (n : ℕ) (i : ℕ) (xData : ℕ → ℝ) : polynomial ℝ :=
∏ j in (finset.range (n+1)).erase i, scaled_binomial (xData i) (xData j)
-- This has been PR'd into mathlib as `eval_prod`
lemma eval_finset.prod {ι : Type*} (s : finset ι) (p : ι → polynomial ℝ) (x : ℝ) :
eval x (∏ j in s, p j) = ∏ j in s, eval x (p j) :=
(finset.prod_hom _ _).symm
-- The equivalent of the above for sums
lemma eval_finset.sum {ι : Type*} (s : finset ι) (p : ι → polynomial ℝ) (x : ℝ) :
eval x (∑ j in s, p j) = ∑ j in s, eval x (p j) :=
(finset.sum_hom _ _).symm
-- The Lagrange interpolant `Lᵢ x` is one for `x = xData i`
@[simp]
lemma lagrange_interpolant_one (n : ℕ) (xData : ℕ → ℝ) (i : ℕ)
(hinj : function.injective xData) :
eval (xData i) (lagrange_interpolant n i xData) = (1:ℝ) :=
begin
unfold lagrange_interpolant,
rw eval_finset.prod,
--simp only [bin_one], -- simp fails because the simplifier can't derive `a ≠ b`
exact finset.prod_eq_one (λ j hj, bin_one (xData i) (xData j)
(mt (@hinj i j) (finset.ne_of_mem_erase hj).symm))
end |
## What is a Percepton
A percepton is a type of binary classifier. A binary classfier is a function
which can be decide weather or not an input, represented by a vector of numbers,
belong to a specific class.
The perceptron maps its input $x$ ( a real value vector ) to an output value $f(x)$ ( a single binary value ):
\begin{equation}
f(x) = \chi(\langle w, x \rangle + b)
\end{equation}
where $w$ is the vector of the weights with real value, $\langle \cdot , \cdot \rangle$ is the scalar product,
$b$ is the bias, a constant term that doesn't dipend on any input value and $\chi(x)$ is the output fuction, also called
activation function.
The most common choices for $\chi(x)$ are:
- $\chi(x)$ = $sing(x)$
- $\chi(x)$ = $Θ(x)$
- $\chi(x)$ = $x$
where $Θ(x)$ is the Heavside Function.
The perceptron works weel when the learning set is linearly separable, while when the learning isn't linearly separable
its learning algorithm doesn't terminate. If the vector are not linearly separable will never reach a point where all vectors are
classified properly. The most famous example of perceptron inability to solve problems with linearly separable vector is the
boolean **XOR** problem.
## How to train the Perceptron
There are various way to train a perceptron, one of the most effienct ( the one that used here )
is the **Delta Rule**
The Delta Rule is a gradient descend learning rule for updating the weights of the inputs of an artifical
neuron in a single-layer neural network. It is a special case of the more general backpropagation algorithm. For
a neuron $j$ with activation function $g(x)$, the delta rule for $j$'s weight $w_{ji}$ is given by:
\begin{equation}
\Delta w_{ji} = \eta (t_j - y_j)g'(h_i)x_i
\end{equation}
where:
- $\eta$ is a small constant, called learning rate
- $g(x)$ is the neuron activation function
- $g'$ is the derivative of $g$
- $t_j$ us the target output
- $h_i$ is the weighted sum of the neuron's inputs
- $y_j$ is the actual output
- $x_i$ is the ith input
It holds that $h_j = \sum x_i w_{ji}$ and $ y_j = g(h_j)$.
The delta rule is commonly stated is a simplified forn for a neuron with
a linear activation function as
\begin{equation}
\Delta w_{ji} = \eta (t_j - y_j)x_i
\end{equation}
While the delta rule is similar to the percetron's training rule, the derivation is different.
If the percepton uses the Heaviside step function as the activation function , it turn out that
$g(h)$ is no differantiable in zero, which make the direct application of the delta rule impossible.
|
import Data.List
import Data.List1
import Data.SortedMap as M
import Data.String.Parser
import System.File
Base : Type
Base = Char
Polymer : Type
Polymer = List1 Base
CountMap : Type
CountMap = SortedMap Base Nat
addCounts : CountMap -> CountMap -> CountMap
addCounts = M.mergeWith (+)
addBase : Base -> CountMap -> CountMap
addBase b = addCounts (M.singleton b 1)
decBase : Base -> CountMap -> CountMap
decBase b m = case M.lookup b m of
Just n => insert b (minus n 1) m
Nothing => m
fromPair : (Base, Base) -> CountMap
fromPair (b1, b2) = addCounts (M.singleton b1 1) (M.singleton b2 1)
Memoize : Type
Memoize = SortedMap ((Base,Base), Nat) CountMap
Rules : Type
Rules = SortedMap (Base, Base) Base
Input : Type
Input = (Polymer, Rules)
ruleParser : Parser ((Base, Base), Base)
ruleParser = do b1 <- letter
b2 <- letter
spaces1
token "->"
to <- letter
pure ((b1, b2), to)
parser : Parser Input
parser = do polymer <- some letter
Just polymer <- pure $ fromList polymer
| Nothing => fail "no polymer"
spaces1
rules <- some (ruleParser <* spaces)
pure (polymer, M.fromList rules)
countBases : Polymer -> CountMap
countBases p = M.fromList $ (\l => (l.head, length $ forget l)) <$> (group $ sort $ forget p)
insertNPair : Nat -> (Base, Base) -> Rules -> Memoize -> (CountMap, Memoize)
insertNPair Z p _ m = (fromPair p, m)
insertNPair (S n) (b1, b2) r m = case M.lookup ((b1, b2), (S n)) m of
Nothing => case M.lookup (b1, b2) r of
Nothing => (fromPair (b1, b2), m)
(Just b) => let (c1, m1) = insertNPair n (b1, b) r m
(c2, m2) = insertNPair n (b, b2) r m1
c3 = decBase b $ addCounts c1 c2 in
(c3, M.insert ((b1, b2), (S n)) c3 m2)
(Just c1) => (c1, m)
insertN : Nat -> Polymer -> Rules -> Memoize -> CountMap
insertN n (b1 ::: []) r m = M.singleton b1 1
insertN n (b1 ::: (b2 :: p)) r m = let (c1, m1) = (insertNPair n (b1, b2) r m)
c2 = insertN n (b2 ::: p) r m1
c3 = addCounts c1 c2 in
decBase b2 c3
subtractQuantity : CountMap -> Maybe Nat
subtractQuantity m = do let l = sort $ values m
high <- last' l
low <- head' l
pure $ minus high low
part1 : Input -> IO String
part1 (p, r) = do let c = insertN 10 p r M.empty
pure $ show $ subtractQuantity c
part2 : Input -> IO String
part2 (p, r) = do let c = insertN 40 p r M.empty
pure $ show $ subtractQuantity c
main : IO ()
main = do Right input <- readFile "input.txt"
| Left err => printLn err
Right (a, _) <- pure $ parse parser input
| Left err => printLn err
part1 a >>= putStrLn
part2 a >>= putStrLn
|
pdf("d:/plot.pdf")
library(MASS)
parcoord(iris[1:4],col=iris$Species)
graphics.off() |
c Subroutine to find dew point from vapor pressure
c AJ_Kettle, Nov7/2017
c from wmo_8_en-2012.pdf pI.4-29
SUBROUTINE dewfrostp_from_vpres_spres(f_ew_hpa,f_p_hpa,
+ s_wetb_iceflag,
+ f_dewp_c)
IMPLICIT NONE
c************************************************************************
REAL :: f_dewp_c
REAL :: f_ew_hpa
REAL :: f_p_hpa
REAL :: f_p_coef
CHARACTER(LEN=1) :: s_wetb_iceflag
c************************************************************************
c Find pressure coefficient
f_p_coef=1.0016+3.15*10.0**(-6.0)*f_p_hpa-0.074/f_p_hpa
IF (s_wetb_iceflag.NE.'E') THEN
f_dewp_c=(243.12*ALOG(f_ew_hpa/(6.112*f_p_coef)))/
+ (17.62-ALOG(f_ew_hpa/(6.112*f_p_coef)))
ENDIF
IF (s_wetb_iceflag.EQ.'E') THEN
f_dewp_c=(272.62*ALOG(f_ew_hpa/(6.112*f_p_coef)))/
+ (22.46-ALOG(f_ew_hpa/(6.112*f_p_coef)))
ENDIF
c f_dewp_c=(243.12*ALOG(f_ew_pa/611.2))/(17.62-ALOG(f_ew_pa/611.2))
c************************************************************************
RETURN
END |
#pragma once
#include <GLES2/gl2.h>
#include <GLES2/gl2ext.h>
#include <GLES3/gl3.h>
#include <GLES3/gl3ext.h>
#include <GLES3/gl3platform.h>
#include <EGL/egl.h>
#include <EGL/eglext.h>
#include <gsl/gsl>
namespace android::OpenGLHelpers
{
constexpr GLint GetTextureUnit(GLenum texture)
{
return texture - GL_TEXTURE0;
}
GLuint CreateShaderProgram(const char* vertShaderSource, const char* fragShaderSource);
namespace GLTransactions
{
inline auto MakeCurrent(EGLDisplay display, EGLSurface drawSurface, EGLSurface readSurface, EGLContext context)
{
EGLDisplay previousDisplay{ eglGetDisplay(EGL_DEFAULT_DISPLAY) };
EGLSurface previousDrawSurface{ eglGetCurrentSurface(EGL_DRAW) };
EGLSurface previousReadSurface{ eglGetCurrentSurface(EGL_READ) };
EGLContext previousContext{ eglGetCurrentContext() };
eglMakeCurrent(display, drawSurface, readSurface, context);
return gsl::finally([previousDisplay, previousDrawSurface, previousReadSurface, previousContext]() { eglMakeCurrent(previousDisplay, previousDrawSurface, previousReadSurface, previousContext); });
}
}
} |
State Before: l m r : List Char
⊢ extract { data := l ++ m ++ r } { byteIdx := utf8Len l } { byteIdx := utf8Len l + utf8Len m } = { data := m } State After: l m r : List Char
⊢ (if utf8Len l ≥ utf8Len l + utf8Len m then ""
else { data := extract.go₁ (l ++ m ++ r) 0 { byteIdx := utf8Len l } { byteIdx := utf8Len l + utf8Len m } }) =
{ data := m } Tactic: simp only [extract] State Before: l m r : List Char
⊢ (if utf8Len l ≥ utf8Len l + utf8Len m then ""
else { data := extract.go₁ (l ++ m ++ r) 0 { byteIdx := utf8Len l } { byteIdx := utf8Len l + utf8Len m } }) =
{ data := m } State After: case inl
l m r : List Char
h✝ : utf8Len l ≥ utf8Len l + utf8Len m
⊢ "" = { data := m }
case inr
l m r : List Char
h✝ : ¬utf8Len l ≥ utf8Len l + utf8Len m
⊢ { data := extract.go₁ (l ++ m ++ r) 0 { byteIdx := utf8Len l } { byteIdx := utf8Len l + utf8Len m } } = { data := m } Tactic: split State Before: case inl
l m r : List Char
h✝ : utf8Len l ≥ utf8Len l + utf8Len m
⊢ "" = { data := m } State After: no goals Tactic: next h => rw [utf8Len_eq_zero.1 <| Nat.le_zero.1 <| (Nat.add_le_add_iff_le_left _ _ 0).1 h] State Before: l m r : List Char
h : utf8Len l ≥ utf8Len l + utf8Len m
⊢ "" = { data := m } State After: no goals Tactic: rw [utf8Len_eq_zero.1 <| Nat.le_zero.1 <| (Nat.add_le_add_iff_le_left _ _ 0).1 h] State Before: case inr
l m r : List Char
h✝ : ¬utf8Len l ≥ utf8Len l + utf8Len m
⊢ { data := extract.go₁ (l ++ m ++ r) 0 { byteIdx := utf8Len l } { byteIdx := utf8Len l + utf8Len m } } = { data := m } State After: case inr.e_data
l m r : List Char
h✝ : ¬utf8Len l ≥ utf8Len l + utf8Len m
⊢ extract.go₁ (l ++ m ++ r) 0 { byteIdx := utf8Len l } { byteIdx := utf8Len l + utf8Len m } = m Tactic: congr State Before: case inr.e_data
l m r : List Char
h✝ : ¬utf8Len l ≥ utf8Len l + utf8Len m
⊢ extract.go₁ (l ++ m ++ r) 0 { byteIdx := utf8Len l } { byteIdx := utf8Len l + utf8Len m } = m State After: case inr.e_data
l m r : List Char
h✝ : ¬utf8Len l ≥ utf8Len l + utf8Len m
⊢ extract.go₂ (m ++ r) { byteIdx := utf8Len l } { byteIdx := utf8Len l + utf8Len m } = m Tactic: rw [List.append_assoc, extract.go₁_append_right _ _ _ _ _ (by rfl)] State Before: case inr.e_data
l m r : List Char
h✝ : ¬utf8Len l ≥ utf8Len l + utf8Len m
⊢ extract.go₂ (m ++ r) { byteIdx := utf8Len l } { byteIdx := utf8Len l + utf8Len m } = m State After: case inr.e_data.a
l m r : List Char
h✝ : ¬utf8Len l ≥ utf8Len l + utf8Len m
⊢ utf8Len l + utf8Len m = utf8Len m + utf8Len l Tactic: apply extract.go₂_append_left State Before: case inr.e_data.a
l m r : List Char
h✝ : ¬utf8Len l ≥ utf8Len l + utf8Len m
⊢ utf8Len l + utf8Len m = utf8Len m + utf8Len l State After: no goals Tactic: apply Nat.add_comm State Before: l m r : List Char
h✝ : ¬utf8Len l ≥ utf8Len l + utf8Len m
⊢ utf8Len l = utf8Len l + 0.byteIdx State After: no goals Tactic: rfl |
State Before: n : ℕ
⊢ ∀ (x : Tree Unit), x ∈ ↑(treesOfNumNodesEq n) ↔ x ∈ {x | numNodes x = n} State After: no goals Tactic: simp |
\section{Related Work}
\label{sec:related}
%Optimization abstraction:
%- POET (Qing Yi)
%- Apan Qasem's thesis work (Ken Kennedy's student)
%- ...
%
%Automated performance tuning:
%- ATLAS
%- SPIRAL
%- FFTW
%-...
Ideally, a developer should only have to specify a few simple
command-line options and then rely on the compiler to optimize the
performance of an application on any architecture. Compilers alone,
however, cannot fully satisfy the performance needs of scientific
applications. First, compilers must operate in a black-box fashion
and at a very low level, limiting both the type and number of
optimizations that can be done. Second, static analysis of
general-purpose languages, such as C, C++, and Fortran, is necessarily
conservative, thereby precluding many possible optimizations. Third,
in the process of transforming a mathematical model into a computer
program, much potentially useful (for optimization purposes)
information is lost since it cannot be represented by the programming
language. Finally, extensive manual tuning of a code may prevent
certain compiler optimizations and result in worse performance on new
architectures, resulting in loss of performance portability.
%As briefly discussed in Section~\ref{sec:motivation}, performance tuning is
%generally approached in three ways: by performing manual optimizations of key
%portions of the code; by using compiler-based source transformation tools for
%loop optimizations; and by using tuned libraries for key numerical
%algorithms.
%% Libraries
An alternative to manual or automated tuning of application codes is
the use of tuned libraries. The two basic approaches to supplying
high-performance libraries include providing a library of hand-coded
options (e.g., \cite{BLAS,ESSL,Goto:2006fk}) and generating optimized
code automatically for the given problem and machine
parameters. ATLAS~\cite{atlas_sc98,WN147} for BLAS~\cite{BLAS} and
some LAPACK~\cite{laug} routines, OSKI~\cite{OSKI} for sparse linear
algebra, PHiPAC~\cite{bilmes97optimizing} for matrix-matrix products,
and domain-specific libraries such as FFTW~\cite{frigo98} and
SPIRAL~\cite{SPIRAL} are all examples of the latter approach. Most
automatic tuning approaches perform empirical parameter searches on
the target platform. FFTW uses a combination of static models and
empirical techniques to optimize FFTs. SPIRAL generates optimized
digital signal processing libraries by an extensive empirical search
over implementation variants. GotoBLAS~\cite{Goto:2006fk,Goto:fk}, on
the other hand, achieves near-peak performance on several
architectures by using hand-tuned data structures and kernel
operations. These auto- or hand-tuned approaches can deliver
performance that can be five times as fast as that produced by many
optimizing compilers
\cite{WN147}. The library approach, however, is limited by the fact
that optimizations are highly problem- and
machine-dependent. Furthermore, at this time, the functionality of the
currently available automated tuning systems is quite limited.
%% Other annotation-based source transformation approaches
General-purpose tools for optimizing loop performance are also
available. LoopTool~\cite{LoopTool} supports annotation-based loop
fusion, unroll/jamming, skewing and tiling. The Matrix Template
Library \cite{Siek:1998ys} uses template metaprograms to tile at both
the register and cache levels. A new tool, POET~\cite{POET} also
supports a number of loop transformations. POET offers a complex
template-based syntax for defining transformations in a
language-independent manner. Other research efforts whose goal, at
least in part, is to enable optimizations of source code to be
augmented with performance-related information include the X
language~\cite{XLanguage} (a macro C-like language for annotating C
code), the Broadway~\cite{broadway} compiler, and telescoping
languages~\cite{telescopingurl,teleoverview,Ken99}, and various
meta-programming
techniques~\cite{veldhuizen95,weise93,kiczales91,chiba95}.
Emerging annotation-based tools are normally designed by compiler researchers
and thus the interfaces are not necessarily based on concepts accessible to
computational scientists. The complexity of existing annotation languages and lack
of common syntax for transformations (e.g., loop unrolling) result
in steep learning curves and the inability to take advantage of more than one
approach at a time. Furthermore, at present, there is no good way for
users to learn about the tools available and compare their
capabilities and performance.
|
From st.prelude Require Import generic forall_three.
From st.STLCmuVS Require Import lang typing tactics contexts contexts_subst scopedness.
From st.STLCmu Require Import types.
Definition GammaType (Γ : list type) (τ : type) : type := foldr (TArrow) τ (rev Γ).
Lemma GammaType_snoc Γ τ τr : TArrow τ (GammaType Γ τr) = GammaType (Γ ++ [τ]) τr.
Proof. by rewrite /GammaType rev_unit /=. Qed.
Definition LamGamma_ctx (n : nat) : ctx := replicate n CTX_Lam.
Fixpoint LamGamma (n : nat) (e : expr) : expr :=
match n with
| O => e
| S x => Lam (LamGamma x e)
end.
Lemma fill_LamGamma_ctx (n : nat) (e : expr) : fill_ctx (LamGamma_ctx n) e = LamGamma n e.
Proof. induction n. done. simpl. by rewrite IHn. Qed.
Definition LamGammaV_S (n : nat) (e : expr) : val := LamV (LamGamma n e).
Lemma LamGammaV_S_rw (n : nat) (e : expr) : of_val (LamGammaV_S n e) = LamGamma (S n) e.
Proof. by simpl. Qed.
Lemma LamGamma_ctx_typed (Γ : list type) (τ : type) :
|C> [] ⊢ₙₒ LamGamma_ctx (length Γ) ☾ Γ; τ ☽ : GammaType Γ τ.
Proof.
induction Γ as [|τ' τs IHτs] using rev_ind.
- simpl. by apply TPCTX_nil.
- rewrite /LamGamma app_length /= Nat.add_1_r.
rewrite /=. econstructor.
+ rewrite /GammaType rev_unit. simpl. econstructor.
+ fold (GammaType τs). change [τ'] with ([] ++ [τ']).
by apply typed_ctx_append.
Qed.
Lemma LamGamma_typed e (Γ : list type) (τ : type) (de : typed Γ e τ) :
[] ⊢ₙₒ LamGamma (length Γ) e : GammaType Γ τ.
Proof.
rewrite -fill_LamGamma_ctx. eapply typed_ctx_typed. apply de.
apply LamGamma_ctx_typed.
Qed.
Lemma LamGamma_scoped e n (de : expr_scoped n e) :
expr_scoped 0 (LamGamma n e).
Proof.
rewrite -fill_LamGamma_ctx. eapply scoped_ctx_fill. apply de.
replace n with (length (replicate n TUnit)).
replace 0 with (length ([] : list type)).
apply (ctx_typed_scoped _ TUnit _ (GammaType (replicate n TUnit) TUnit)).
apply LamGamma_ctx_typed. auto. by rewrite replicate_length.
Qed.
Fixpoint AppGamma (F : expr) (es : list expr) : expr :=
match es with
| nil => F
| cons e es' => ((AppGamma F es') e)%Eₙₒ
end.
Definition AppGamma_ectx (es : list expr) : list ectx_item :=
fmap AppLCtx (rev es).
Lemma AppGamma_snoc (F : expr) (es : list expr) (e : expr) :
AppGamma F (es ++ [e]) = AppGamma (F e) es.
Proof.
induction es.
- by simpl.
- simpl. f_equiv. by rewrite IHes.
Qed.
Lemma fill_AppGamma_ectx (F : expr) (es : list expr) : AppGamma F es = fill (AppGamma_ectx es) F.
Proof.
revert F.
induction es using rev_ind.
- done.
- intro F. by rewrite AppGamma_snoc /AppGamma_ectx rev_unit /= (IHes (F x)).
Qed.
Lemma AppGamma_subst (F : expr) (es : list expr) (σ : var → expr) :
(AppGamma F es).[σ] = AppGamma F.[σ] (fmap (subst σ) es).
Proof. induction es. by asimpl. asimpl. by rewrite IHes. Qed.
Fixpoint wrap_funs_vars (F : expr) (k : nat) (fs : list val) : expr :=
match fs with
| nil => F
| cons f' fs' => ((wrap_funs_vars F (S k) fs') (f' (Var $ k)))%Eₙₒ
end.
Lemma wrap_funs_vars_rw (F : expr) (k : nat) (fs : list val) :
wrap_funs_vars F k fs = AppGamma F (imap (fun l f => (App (of_val f) (Var (l + k)))) fs).
Proof.
revert k. induction fs as [|f fs IHfs]. done.
intro k. rewrite /= IHfs (imap_ext _ ((λ (l : nat) (f0 : val), f0 (%(l + k))%Eₙₒ) ∘ S)). done.
intros. simpl. f_equiv. f_equiv. lia.
Qed.
Lemma wrap_funs_vars_subst1 (F : expr) (k : nat) (fs : list val) (Hfs : Forall (fun f => Closed (of_val f)) fs) σ :
(wrap_funs_vars F k fs).[upn (length fs + k) σ] = wrap_funs_vars F.[upn (length fs + k) σ] k fs.
Proof.
rewrite !wrap_funs_vars_rw AppGamma_subst fmap_imap. f_equiv.
apply imap_ext. intros i f lkp. asimpl. rewrite (upn_lt (i + k) (length fs + k)).
by rewrite (Forall_lookup_1 _ _ _ _ Hfs lkp).
assert (i < length fs) by by eapply lookup_lt_Some. lia.
Qed.
Lemma wrap_funs_vars_subst1' l (F : expr) (fs : list val) (Hfs : Forall (fun f => Closed (of_val f)) fs) σ :
length fs = l → (wrap_funs_vars F 0 fs).[upn l σ] = wrap_funs_vars F.[upn l σ] 0 fs.
Proof. intros <-. replace (length fs) with (length fs + 0) by lia. by apply wrap_funs_vars_subst1. Qed.
(* Fixpoint wrap_funs_vals' (F : expr) (fs vs : list val) (H : length fs = length vs) : expr := *)
(* match (fs , vs) with *)
(* | nil, nil => F *)
(* | cons f fvs => ((wrap_funs_vals F fvs) (f v))%Eₙₒ *)
(* end. *)
Fixpoint wrap_funs_vals (F : expr) (fvs : list (val * val)) : expr :=
match fvs with
| nil => F
| cons (f, v) fvs => ((wrap_funs_vals F fvs) (f v))%Eₙₒ
end.
Lemma wrap_funs_vals_rw (F : expr) (fvs : list (val * val)) :
wrap_funs_vals F fvs = AppGamma F (fmap (fun fv => (App (of_val fv.1) (of_val fv.2))) fvs).
Proof.
induction fvs as [|[f v] fvs IHfs]. done.
by rewrite /= IHfs.
Qed.
Lemma wrap_funs_vals_snoc (F : expr) (fvs : list (val * val)) (f v : val) :
wrap_funs_vals F (fvs ++ [(f, v)]) = wrap_funs_vals (F (f v)) fvs.
Proof. by rewrite !wrap_funs_vals_rw fmap_app AppGamma_snoc. Qed.
Lemma wrap_funs_vars_subst2 (F : expr) (HF : Closed F) (fvs : list (val * val)) (H : Forall (fun f => Closed (of_val f)) fvs.*1) :
(wrap_funs_vars F 0 fvs.*1).[subst_list_val fvs.*2] = wrap_funs_vals F fvs.
Proof.
rewrite !wrap_funs_vars_rw wrap_funs_vals_rw AppGamma_subst fmap_imap HF. f_equiv.
rewrite imap_fmap. rewrite -(imap_const (λ fv : val * val, fv.1 fv.2) fvs).
apply imap_ext. intros i (f, v) lkp. simpl.
assert (lkp1 : fvs.*1 !! i = Some f) by by rewrite list_lookup_fmap lkp.
assert (lkp2 : fvs.*2 !! i = Some v) by by rewrite list_lookup_fmap lkp.
rewrite (Forall_lookup_1 _ _ _ _ H lkp1) Nat.add_0_r.
replace (subst_list_val fvs.*2 i) with (ids i).[subst_list_val fvs.*2] by by asimpl. by rewrite (Var_subst_list_val_lookup _ _ _ lkp2).
Qed.
Lemma wrap_funs_vars_subst2' (F : expr) (HF : Closed F) (fs vs : list val) (Hl : length fs = length vs) (H : Forall (fun f => Closed (of_val f)) fs) :
(wrap_funs_vars F 0 fs).[subst_list_val vs] = wrap_funs_vals F (zip fs vs).
Proof.
rewrite <- (snd_zip fs vs) at 1 by lia. rewrite <- (fst_zip fs vs) at 2 by lia.
apply wrap_funs_vars_subst2; auto. rewrite fst_zip; auto; lia.
Qed.
Lemma wrap_funs_vars_typed_help (fs : list val) τr Γ Γ' (H : Forall3 (fun τ' f τ => ∀Γ, Γ ⊢ₙₒ (of_val f) : TArrow τ' τ) Γ' fs Γ) :
∀ k, k ≤ length fs → Γ' ++ [GammaType Γ τr] ⊢ₙₒ wrap_funs_vars (Var (length Γ')) (length fs - k) (drop (length fs - k) fs) : GammaType (take (length Γ - k) Γ) τr.
Proof.
induction k.
- rewrite !Nat.sub_0_r drop_all firstn_all /=.
constructor. rewrite lookup_app_r; try lia. by rewrite Nat.sub_diag.
- destruct (drop (length fs - S k) fs) as [|f rem] eqn:eq.
+ (* exfalso *) destruct fs as [|f' fs'].
* simpl. intro abs. exfalso. lia.
* intro plt. exfalso.
assert (abs : (rev (drop (length (rev (f' :: fs')) - S k) (rev (f' :: fs')))) = []).
{ apply nil_length_inv. rewrite rev_length. rewrite drop_length.
assert (length (drop (length (f' :: fs') - S k) (f' :: fs')) = 0) by by rewrite eq /=.
rewrite drop_length in H0. by rewrite rev_length. }
rewrite -firstn_rev rev_involutive /= in abs. inversion abs.
+ intro plt. specialize (IHk ltac:(lia)). assert (lkp : fs !! (length fs - S k) = Some f).
{ rewrite <- (take_drop (length fs - S k) fs) at 2. rewrite lookup_app_r.
rewrite take_length_le; try lia. rewrite eq. by replace (length fs - S k - (length fs - S k)) with 0 by lia.
rewrite take_length_le; lia. }
simpl. replace (S (length fs - S k)) with (length fs - k) by lia.
destruct (Forall3_lookup_m _ _ _ _ _ _ H lkp) as (τ' & τ & eq1 & eq2 & P).
apply App_typed with (τ1 := τ).
* assert (rem = (drop (length fs - k) fs)) as ->.
{ replace rem with (drop 1 (f :: rem)) by by rewrite /= drop_0.
rewrite -eq drop_drop. by replace (length fs - S k + 1) with (length fs - k) by lia. }
assert (TArrow τ (GammaType (take (length Γ - S k) Γ) τr) = GammaType (take (length Γ - k) Γ) τr) as ->.
{ assert (take (length Γ - k) Γ = take (length Γ - S k) Γ ++ [τ]) as ->.
rewrite -(Forall3_length_lm _ _ _ _ H) (Forall3_length_lr _ _ _ _ H) in eq2.
rewrite <- (take_drop_middle _ _ _ eq2) at 2. rewrite firstn_app take_take.
replace ((length Γ - k) `min` (length Γ - S k)) with (length Γ - S k) by lia.
replace (length Γ - k - length (take (length Γ - S k) Γ)) with 1. by rewrite /= take_0.
rewrite take_length_le; try lia.
rewrite -(Forall3_length_lm _ _ _ _ H) (Forall3_length_lr _ _ _ _ H) in plt. lia.
by apply GammaType_snoc.
}
apply IHk.
* apply App_typed with (τ1 := τ'). apply P. constructor. by apply lookup_app_l_Some.
Qed.
Lemma wrap_funs_vars_typed fs τr Γ Γ' (H : Forall3 (fun τ' f τ => ∀Γ, Γ ⊢ₙₒ of_val f : TArrow τ' τ) Γ' fs Γ) :
Γ' ++ [GammaType Γ τr] ⊢ₙₒ wrap_funs_vars (Var (length Γ')) 0 fs : τr.
Proof.
rewrite <- (drop_0 fs).
replace 0 with (length fs - length fs) by lia.
replace τr with (GammaType (take (length Γ - length fs) Γ) τr) at 2.
apply (wrap_funs_vars_typed_help fs τr Γ Γ' H). lia.
by rewrite -(Forall3_length_lm _ _ _ _ H) (Forall3_length_lr _ _ _ _ H) -minus_diag_reverse /= take_0.
Qed.
Lemma wrap_funs_vals_eval_ctx (e1 e2 : expr) (fvs : list (val * val)) (H : rtc STLCmuVS_step e1 e2) :
rtc STLCmuVS_step (wrap_funs_vals e1 fvs) (wrap_funs_vals e2 fvs).
Proof.
induction fvs as [|[f v] fvs IH]; first done.
by apply (rtc_STLCmuVS_step_ctx (fill [AppLCtx _])).
Qed.
Lemma wrap_funs_vals_eval (fvs : list (val * val)) (vs' : list val) (H : Forall2 (fun fv v' => rtc STLCmuVS_step (of_val fv.1 (of_val fv.2)) (of_val v')) fvs vs') :
∀ e, let STLCmuVSgam : expr := (LamGamma (length fvs) e) in
rtc STLCmuVS_step (wrap_funs_vals STLCmuVSgam fvs) (e.[subst_list_val vs']).
Proof.
revert H. revert vs'. induction fvs as [|[f v] fvs IHfvs] using rev_ind.
- intros vs' H; destruct vs' as [|v' vs' _] using rev_ind.
+ intros e. by asimpl.
+ exfalso.
assert (abs : length ([] : list val) = length (vs' ++ [v'])).
by rewrite -(Forall2_length _ _ _ H). rewrite /= app_length /= in abs. lia.
- intros ws' I. destruct ((iffLR (Forall2_app_inv_l _ _ _ _)) I) as (vs' & v' & H & P & ->). clear I.
destruct v' as [|v' empty]; first by inversion P.
assert (empty = []) as ->. { apply nil_length_inv. cut (length (v :: empty) = length [(f,v)]); auto. by rewrite (Forall2_length _ _ _ P). }
inversion_clear P. clear H1. rename H0 into P. simpl in P.
(* okay *)
rewrite !app_length !Nat.add_1_r. intros e.
rewrite /= wrap_funs_vals_snoc. fold (LamGamma (length fvs)).
change (Lam ?e) with (of_val $ LamV e).
apply rtc_transitive with (y := wrap_funs_vals (LamGamma (length fvs) e).[of_val v'/] fvs).
apply wrap_funs_vals_eval_ctx.
eapply rtc_transitive. apply (rtc_STLCmuVS_step_ctx (fill [AppRCtx _])). apply P. simpl. apply rtc_once. apply head_prim_step. auto_head_step.
rewrite -fill_LamGamma_ctx subst_fill_ctx.
assert (scp : |sC> 0 ⊢ₙₒ (LamGamma_ctx (length fvs)) ☾ length fvs ☽).
{ change 0 with (length ([] : list type)).
rewrite <- (replicate_length (length fvs) TUnit).
eapply (ctx_typed_scoped _ TUnit _ _).
apply (LamGamma_ctx_typed (replicate (length fvs) TUnit) TUnit).
}
rewrite (subst_closed_ctx _ _ _ scp) (subst_closed_ctx_cont _ _ _ scp).
rewrite subst_list_val_snoc.
replace e.[upn (length vs') (of_val v' .: ids) >> subst_list_val vs'] with e.[upn (length vs') (of_val v' .: ids)].[subst_list_val vs'] by by asimpl.
rewrite -(Forall2_length _ _ _ H).
specialize (IHfvs vs' H e.[upn (length fvs) ((of_val v') .: ids)]). simpl in IHfvs. rewrite fill_LamGamma_ctx. apply IHfvs.
Qed.
Lemma wrap_funs_vals_eval' (vs' : list val) e (fs vs : list val) l (Hl : l = (length fs)) (Hfs : Forall (fun f => Closed (of_val f)) fs)
(H : Forall3 (fun f v v' => rtc STLCmuVS_step (of_val f (of_val v)) (of_val v')) fs vs vs') :
let STLCmuVSgam : expr := (LamGamma l e) in
rtc STLCmuVS_step (wrap_funs_vals STLCmuVSgam (zip fs vs)) (e.[subst_list_val vs']).
Proof.
rewrite Hl.
assert (length fs = length (zip fs vs)) as ->.
{ rewrite <- (fst_zip fs vs) at 1. by rewrite fmap_length.
rewrite (Forall3_length_lm _ _ _ _ H). lia.
}
apply wrap_funs_vals_eval.
pose proof (Forall3_Forall2 _ _ _ _ H) as H'.
apply (Forall2_impl _ _ _ _ H'). by intros (a, b) c.
Qed.
Lemma zip_snoc {A B} (ls : list A) l (ks : list B) k : length ls = length ks → zip (ls ++ [l]) (ks ++ [k]) = zip ls ks ++ [(l,k)].
Proof.
revert ks.
induction ls as [|l0 ls].
- intros ks Hl; destruct ks;[|inversion Hl]. by simpl.
- intros ks Hl; destruct ks as [|k0 ks];[inversion Hl|]. simpl in *.
rewrite IHls; auto.
Qed.
|
(* Title: HOL/Library/BigO.thy
Authors: Jeremy Avigad and Kevin Donnelly
*)
section {* Big O notation *}
theory BigO
imports Complex_Main Function_Algebras Set_Algebras
begin
text {*
This library is designed to support asymptotic ``big O'' calculations,
i.e.~reasoning with expressions of the form $f = O(g)$ and $f = g +
O(h)$. An earlier version of this library is described in detail in
@{cite "Avigad-Donnelly"}.
The main changes in this version are as follows:
\begin{itemize}
\item We have eliminated the @{text O} operator on sets. (Most uses of this seem
to be inessential.)
\item We no longer use @{text "+"} as output syntax for @{text "+o"}
\item Lemmas involving @{text "sumr"} have been replaced by more general lemmas
involving `@{text "setsum"}.
\item The library has been expanded, with e.g.~support for expressions of
the form @{text "f < g + O(h)"}.
\end{itemize}
Note also since the Big O library includes rules that demonstrate set
inclusion, to use the automated reasoners effectively with the library
one should redeclare the theorem @{text "subsetI"} as an intro rule,
rather than as an @{text "intro!"} rule, for example, using
\isa{\isakeyword{declare}}~@{text "subsetI [del, intro]"}.
*}
subsection {* Definitions *}
definition bigo :: "('a \<Rightarrow> 'b::linordered_idom) \<Rightarrow> ('a \<Rightarrow> 'b) set" ("(1O'(_'))")
where "O(f:: 'a \<Rightarrow> 'b) = {h. \<exists>c. \<forall>x. abs (h x) \<le> c * abs (f x)}"
lemma bigo_pos_const:
"(\<exists>c::'a::linordered_idom. \<forall>x. abs (h x) \<le> c * abs (f x)) \<longleftrightarrow>
(\<exists>c. 0 < c \<and> (\<forall>x. abs (h x) \<le> c * abs (f x)))"
apply auto
apply (case_tac "c = 0")
apply simp
apply (rule_tac x = "1" in exI)
apply simp
apply (rule_tac x = "abs c" in exI)
apply auto
apply (subgoal_tac "c * abs (f x) \<le> abs c * abs (f x)")
apply (erule_tac x = x in allE)
apply force
apply (rule mult_right_mono)
apply (rule abs_ge_self)
apply (rule abs_ge_zero)
done
lemma bigo_alt_def: "O(f) = {h. \<exists>c. 0 < c \<and> (\<forall>x. abs (h x) \<le> c * abs (f x))}"
by (auto simp add: bigo_def bigo_pos_const)
lemma bigo_elt_subset [intro]: "f \<in> O(g) \<Longrightarrow> O(f) \<le> O(g)"
apply (auto simp add: bigo_alt_def)
apply (rule_tac x = "ca * c" in exI)
apply (rule conjI)
apply simp
apply (rule allI)
apply (drule_tac x = "xa" in spec)+
apply (subgoal_tac "ca * abs (f xa) \<le> ca * (c * abs (g xa))")
apply (erule order_trans)
apply (simp add: ac_simps)
apply (rule mult_left_mono, assumption)
apply (rule order_less_imp_le, assumption)
done
lemma bigo_refl [intro]: "f \<in> O(f)"
apply(auto simp add: bigo_def)
apply(rule_tac x = 1 in exI)
apply simp
done
lemma bigo_zero: "0 \<in> O(g)"
apply (auto simp add: bigo_def func_zero)
apply (rule_tac x = 0 in exI)
apply auto
done
lemma bigo_zero2: "O(\<lambda>x. 0) = {\<lambda>x. 0}"
by (auto simp add: bigo_def)
lemma bigo_plus_self_subset [intro]: "O(f) + O(f) \<subseteq> O(f)"
apply (auto simp add: bigo_alt_def set_plus_def)
apply (rule_tac x = "c + ca" in exI)
apply auto
apply (simp add: ring_distribs func_plus)
apply (rule order_trans)
apply (rule abs_triangle_ineq)
apply (rule add_mono)
apply force
apply force
done
lemma bigo_plus_idemp [simp]: "O(f) + O(f) = O(f)"
apply (rule equalityI)
apply (rule bigo_plus_self_subset)
apply (rule set_zero_plus2)
apply (rule bigo_zero)
done
lemma bigo_plus_subset [intro]: "O(f + g) \<subseteq> O(f) + O(g)"
apply (rule subsetI)
apply (auto simp add: bigo_def bigo_pos_const func_plus set_plus_def)
apply (subst bigo_pos_const [symmetric])+
apply (rule_tac x = "\<lambda>n. if abs (g n) \<le> (abs (f n)) then x n else 0" in exI)
apply (rule conjI)
apply (rule_tac x = "c + c" in exI)
apply (clarsimp)
apply (subgoal_tac "c * abs (f xa + g xa) \<le> (c + c) * abs (f xa)")
apply (erule_tac x = xa in allE)
apply (erule order_trans)
apply (simp)
apply (subgoal_tac "c * abs (f xa + g xa) \<le> c * (abs (f xa) + abs (g xa))")
apply (erule order_trans)
apply (simp add: ring_distribs)
apply (rule mult_left_mono)
apply (simp add: abs_triangle_ineq)
apply (simp add: order_less_le)
apply (rule_tac x = "\<lambda>n. if (abs (f n)) < abs (g n) then x n else 0" in exI)
apply (rule conjI)
apply (rule_tac x = "c + c" in exI)
apply auto
apply (subgoal_tac "c * abs (f xa + g xa) \<le> (c + c) * abs (g xa)")
apply (erule_tac x = xa in allE)
apply (erule order_trans)
apply simp
apply (subgoal_tac "c * abs (f xa + g xa) \<le> c * (abs (f xa) + abs (g xa))")
apply (erule order_trans)
apply (simp add: ring_distribs)
apply (rule mult_left_mono)
apply (rule abs_triangle_ineq)
apply (simp add: order_less_le)
done
lemma bigo_plus_subset2 [intro]: "A \<subseteq> O(f) \<Longrightarrow> B \<subseteq> O(f) \<Longrightarrow> A + B \<subseteq> O(f)"
apply (subgoal_tac "A + B \<subseteq> O(f) + O(f)")
apply (erule order_trans)
apply simp
apply (auto del: subsetI simp del: bigo_plus_idemp)
done
lemma bigo_plus_eq: "\<forall>x. 0 \<le> f x \<Longrightarrow> \<forall>x. 0 \<le> g x \<Longrightarrow> O(f + g) = O(f) + O(g)"
apply (rule equalityI)
apply (rule bigo_plus_subset)
apply (simp add: bigo_alt_def set_plus_def func_plus)
apply clarify
apply (rule_tac x = "max c ca" in exI)
apply (rule conjI)
apply (subgoal_tac "c \<le> max c ca")
apply (erule order_less_le_trans)
apply assumption
apply (rule max.cobounded1)
apply clarify
apply (drule_tac x = "xa" in spec)+
apply (subgoal_tac "0 \<le> f xa + g xa")
apply (simp add: ring_distribs)
apply (subgoal_tac "abs (a xa + b xa) \<le> abs (a xa) + abs (b xa)")
apply (subgoal_tac "abs (a xa) + abs (b xa) \<le> max c ca * f xa + max c ca * g xa")
apply force
apply (rule add_mono)
apply (subgoal_tac "c * f xa \<le> max c ca * f xa")
apply force
apply (rule mult_right_mono)
apply (rule max.cobounded1)
apply assumption
apply (subgoal_tac "ca * g xa \<le> max c ca * g xa")
apply force
apply (rule mult_right_mono)
apply (rule max.cobounded2)
apply assumption
apply (rule abs_triangle_ineq)
apply (rule add_nonneg_nonneg)
apply assumption+
done
lemma bigo_bounded_alt: "\<forall>x. 0 \<le> f x \<Longrightarrow> \<forall>x. f x \<le> c * g x \<Longrightarrow> f \<in> O(g)"
apply (auto simp add: bigo_def)
apply (rule_tac x = "abs c" in exI)
apply auto
apply (drule_tac x = x in spec)+
apply (simp add: abs_mult [symmetric])
done
lemma bigo_bounded: "\<forall>x. 0 \<le> f x \<Longrightarrow> \<forall>x. f x \<le> g x \<Longrightarrow> f \<in> O(g)"
apply (erule bigo_bounded_alt [of f 1 g])
apply simp
done
lemma bigo_bounded2: "\<forall>x. lb x \<le> f x \<Longrightarrow> \<forall>x. f x \<le> lb x + g x \<Longrightarrow> f \<in> lb +o O(g)"
apply (rule set_minus_imp_plus)
apply (rule bigo_bounded)
apply (auto simp add: fun_Compl_def func_plus)
apply (drule_tac x = x in spec)+
apply force
apply (drule_tac x = x in spec)+
apply force
done
lemma bigo_abs: "(\<lambda>x. abs (f x)) =o O(f)"
apply (unfold bigo_def)
apply auto
apply (rule_tac x = 1 in exI)
apply auto
done
lemma bigo_abs2: "f =o O(\<lambda>x. abs (f x))"
apply (unfold bigo_def)
apply auto
apply (rule_tac x = 1 in exI)
apply auto
done
lemma bigo_abs3: "O(f) = O(\<lambda>x. abs (f x))"
apply (rule equalityI)
apply (rule bigo_elt_subset)
apply (rule bigo_abs2)
apply (rule bigo_elt_subset)
apply (rule bigo_abs)
done
lemma bigo_abs4: "f =o g +o O(h) \<Longrightarrow> (\<lambda>x. abs (f x)) =o (\<lambda>x. abs (g x)) +o O(h)"
apply (drule set_plus_imp_minus)
apply (rule set_minus_imp_plus)
apply (subst fun_diff_def)
proof -
assume a: "f - g \<in> O(h)"
have "(\<lambda>x. abs (f x) - abs (g x)) =o O(\<lambda>x. abs (abs (f x) - abs (g x)))"
by (rule bigo_abs2)
also have "\<dots> \<subseteq> O(\<lambda>x. abs (f x - g x))"
apply (rule bigo_elt_subset)
apply (rule bigo_bounded)
apply force
apply (rule allI)
apply (rule abs_triangle_ineq3)
done
also have "\<dots> \<subseteq> O(f - g)"
apply (rule bigo_elt_subset)
apply (subst fun_diff_def)
apply (rule bigo_abs)
done
also from a have "\<dots> \<subseteq> O(h)"
by (rule bigo_elt_subset)
finally show "(\<lambda>x. abs (f x) - abs (g x)) \<in> O(h)".
qed
lemma bigo_abs5: "f =o O(g) \<Longrightarrow> (\<lambda>x. abs (f x)) =o O(g)"
by (unfold bigo_def, auto)
lemma bigo_elt_subset2 [intro]: "f \<in> g +o O(h) \<Longrightarrow> O(f) \<subseteq> O(g) + O(h)"
proof -
assume "f \<in> g +o O(h)"
also have "\<dots> \<subseteq> O(g) + O(h)"
by (auto del: subsetI)
also have "\<dots> = O(\<lambda>x. abs (g x)) + O(\<lambda>x. abs (h x))"
apply (subst bigo_abs3 [symmetric])+
apply (rule refl)
done
also have "\<dots> = O((\<lambda>x. abs (g x)) + (\<lambda>x. abs (h x)))"
by (rule bigo_plus_eq [symmetric]) auto
finally have "f \<in> \<dots>" .
then have "O(f) \<subseteq> \<dots>"
by (elim bigo_elt_subset)
also have "\<dots> = O(\<lambda>x. abs (g x)) + O(\<lambda>x. abs (h x))"
by (rule bigo_plus_eq, auto)
finally show ?thesis
by (simp add: bigo_abs3 [symmetric])
qed
lemma bigo_mult [intro]: "O(f)*O(g) \<subseteq> O(f * g)"
apply (rule subsetI)
apply (subst bigo_def)
apply (auto simp add: bigo_alt_def set_times_def func_times)
apply (rule_tac x = "c * ca" in exI)
apply (rule allI)
apply (erule_tac x = x in allE)+
apply (subgoal_tac "c * ca * abs (f x * g x) = (c * abs (f x)) * (ca * abs (g x))")
apply (erule ssubst)
apply (subst abs_mult)
apply (rule mult_mono)
apply assumption+
apply auto
apply (simp add: ac_simps abs_mult)
done
lemma bigo_mult2 [intro]: "f *o O(g) \<subseteq> O(f * g)"
apply (auto simp add: bigo_def elt_set_times_def func_times abs_mult)
apply (rule_tac x = c in exI)
apply auto
apply (drule_tac x = x in spec)
apply (subgoal_tac "abs (f x) * abs (b x) \<le> abs (f x) * (c * abs (g x))")
apply (force simp add: ac_simps)
apply (rule mult_left_mono, assumption)
apply (rule abs_ge_zero)
done
lemma bigo_mult3: "f \<in> O(h) \<Longrightarrow> g \<in> O(j) \<Longrightarrow> f * g \<in> O(h * j)"
apply (rule subsetD)
apply (rule bigo_mult)
apply (erule set_times_intro, assumption)
done
lemma bigo_mult4 [intro]: "f \<in> k +o O(h) \<Longrightarrow> g * f \<in> (g * k) +o O(g * h)"
apply (drule set_plus_imp_minus)
apply (rule set_minus_imp_plus)
apply (drule bigo_mult3 [where g = g and j = g])
apply (auto simp add: algebra_simps)
done
lemma bigo_mult5:
fixes f :: "'a \<Rightarrow> 'b::linordered_field"
assumes "\<forall>x. f x \<noteq> 0"
shows "O(f * g) \<subseteq> f *o O(g)"
proof
fix h
assume "h \<in> O(f * g)"
then have "(\<lambda>x. 1 / (f x)) * h \<in> (\<lambda>x. 1 / f x) *o O(f * g)"
by auto
also have "\<dots> \<subseteq> O((\<lambda>x. 1 / f x) * (f * g))"
by (rule bigo_mult2)
also have "(\<lambda>x. 1 / f x) * (f * g) = g"
apply (simp add: func_times)
apply (rule ext)
apply (simp add: assms nonzero_divide_eq_eq ac_simps)
done
finally have "(\<lambda>x. (1::'b) / f x) * h \<in> O(g)" .
then have "f * ((\<lambda>x. (1::'b) / f x) * h) \<in> f *o O(g)"
by auto
also have "f * ((\<lambda>x. (1::'b) / f x) * h) = h"
apply (simp add: func_times)
apply (rule ext)
apply (simp add: assms nonzero_divide_eq_eq ac_simps)
done
finally show "h \<in> f *o O(g)" .
qed
lemma bigo_mult6:
fixes f :: "'a \<Rightarrow> 'b::linordered_field"
shows "\<forall>x. f x \<noteq> 0 \<Longrightarrow> O(f * g) = f *o O(g)"
apply (rule equalityI)
apply (erule bigo_mult5)
apply (rule bigo_mult2)
done
lemma bigo_mult7:
fixes f :: "'a \<Rightarrow> 'b::linordered_field"
shows "\<forall>x. f x \<noteq> 0 \<Longrightarrow> O(f * g) \<subseteq> O(f) * O(g)"
apply (subst bigo_mult6)
apply assumption
apply (rule set_times_mono3)
apply (rule bigo_refl)
done
lemma bigo_mult8:
fixes f :: "'a \<Rightarrow> 'b::linordered_field"
shows "\<forall>x. f x \<noteq> 0 \<Longrightarrow> O(f * g) = O(f) * O(g)"
apply (rule equalityI)
apply (erule bigo_mult7)
apply (rule bigo_mult)
done
lemma bigo_minus [intro]: "f \<in> O(g) \<Longrightarrow> - f \<in> O(g)"
by (auto simp add: bigo_def fun_Compl_def)
lemma bigo_minus2: "f \<in> g +o O(h) \<Longrightarrow> - f \<in> -g +o O(h)"
apply (rule set_minus_imp_plus)
apply (drule set_plus_imp_minus)
apply (drule bigo_minus)
apply simp
done
lemma bigo_minus3: "O(- f) = O(f)"
by (auto simp add: bigo_def fun_Compl_def)
lemma bigo_plus_absorb_lemma1: "f \<in> O(g) \<Longrightarrow> f +o O(g) \<subseteq> O(g)"
proof -
assume a: "f \<in> O(g)"
show "f +o O(g) \<subseteq> O(g)"
proof -
have "f \<in> O(f)" by auto
then have "f +o O(g) \<subseteq> O(f) + O(g)"
by (auto del: subsetI)
also have "\<dots> \<subseteq> O(g) + O(g)"
proof -
from a have "O(f) \<subseteq> O(g)" by (auto del: subsetI)
then show ?thesis by (auto del: subsetI)
qed
also have "\<dots> \<subseteq> O(g)" by simp
finally show ?thesis .
qed
qed
lemma bigo_plus_absorb_lemma2: "f \<in> O(g) \<Longrightarrow> O(g) \<subseteq> f +o O(g)"
proof -
assume a: "f \<in> O(g)"
show "O(g) \<subseteq> f +o O(g)"
proof -
from a have "- f \<in> O(g)"
by auto
then have "- f +o O(g) \<subseteq> O(g)"
by (elim bigo_plus_absorb_lemma1)
then have "f +o (- f +o O(g)) \<subseteq> f +o O(g)"
by auto
also have "f +o (- f +o O(g)) = O(g)"
by (simp add: set_plus_rearranges)
finally show ?thesis .
qed
qed
lemma bigo_plus_absorb [simp]: "f \<in> O(g) \<Longrightarrow> f +o O(g) = O(g)"
apply (rule equalityI)
apply (erule bigo_plus_absorb_lemma1)
apply (erule bigo_plus_absorb_lemma2)
done
lemma bigo_plus_absorb2 [intro]: "f \<in> O(g) \<Longrightarrow> A \<subseteq> O(g) \<Longrightarrow> f +o A \<subseteq> O(g)"
apply (subgoal_tac "f +o A \<subseteq> f +o O(g)")
apply force+
done
lemma bigo_add_commute_imp: "f \<in> g +o O(h) \<Longrightarrow> g \<in> f +o O(h)"
apply (subst set_minus_plus [symmetric])
apply (subgoal_tac "g - f = - (f - g)")
apply (erule ssubst)
apply (rule bigo_minus)
apply (subst set_minus_plus)
apply assumption
apply (simp add: ac_simps)
done
lemma bigo_add_commute: "f \<in> g +o O(h) \<longleftrightarrow> g \<in> f +o O(h)"
apply (rule iffI)
apply (erule bigo_add_commute_imp)+
done
lemma bigo_const1: "(\<lambda>x. c) \<in> O(\<lambda>x. 1)"
by (auto simp add: bigo_def ac_simps)
lemma bigo_const2 [intro]: "O(\<lambda>x. c) \<subseteq> O(\<lambda>x. 1)"
apply (rule bigo_elt_subset)
apply (rule bigo_const1)
done
lemma bigo_const3:
fixes c :: "'a::linordered_field"
shows "c \<noteq> 0 \<Longrightarrow> (\<lambda>x. 1) \<in> O(\<lambda>x. c)"
apply (simp add: bigo_def)
apply (rule_tac x = "abs (inverse c)" in exI)
apply (simp add: abs_mult [symmetric])
done
lemma bigo_const4:
fixes c :: "'a::linordered_field"
shows "c \<noteq> 0 \<Longrightarrow> O(\<lambda>x. 1) \<subseteq> O(\<lambda>x. c)"
apply (rule bigo_elt_subset)
apply (rule bigo_const3)
apply assumption
done
lemma bigo_const [simp]:
fixes c :: "'a::linordered_field"
shows "c \<noteq> 0 \<Longrightarrow> O(\<lambda>x. c) = O(\<lambda>x. 1)"
apply (rule equalityI)
apply (rule bigo_const2)
apply (rule bigo_const4)
apply assumption
done
lemma bigo_const_mult1: "(\<lambda>x. c * f x) \<in> O(f)"
apply (simp add: bigo_def)
apply (rule_tac x = "abs c" in exI)
apply (auto simp add: abs_mult [symmetric])
done
lemma bigo_const_mult2: "O(\<lambda>x. c * f x) \<subseteq> O(f)"
apply (rule bigo_elt_subset)
apply (rule bigo_const_mult1)
done
lemma bigo_const_mult3:
fixes c :: "'a::linordered_field"
shows "c \<noteq> 0 \<Longrightarrow> f \<in> O(\<lambda>x. c * f x)"
apply (simp add: bigo_def)
apply (rule_tac x = "abs (inverse c)" in exI)
apply (simp add: abs_mult [symmetric] mult.assoc [symmetric])
done
lemma bigo_const_mult4:
fixes c :: "'a::linordered_field"
shows "c \<noteq> 0 \<Longrightarrow> O(f) \<subseteq> O(\<lambda>x. c * f x)"
apply (rule bigo_elt_subset)
apply (rule bigo_const_mult3)
apply assumption
done
lemma bigo_const_mult [simp]:
fixes c :: "'a::linordered_field"
shows "c \<noteq> 0 \<Longrightarrow> O(\<lambda>x. c * f x) = O(f)"
apply (rule equalityI)
apply (rule bigo_const_mult2)
apply (erule bigo_const_mult4)
done
lemma bigo_const_mult5 [simp]:
fixes c :: "'a::linordered_field"
shows "c \<noteq> 0 \<Longrightarrow> (\<lambda>x. c) *o O(f) = O(f)"
apply (auto del: subsetI)
apply (rule order_trans)
apply (rule bigo_mult2)
apply (simp add: func_times)
apply (auto intro!: simp add: bigo_def elt_set_times_def func_times)
apply (rule_tac x = "\<lambda>y. inverse c * x y" in exI)
apply (simp add: mult.assoc [symmetric] abs_mult)
apply (rule_tac x = "abs (inverse c) * ca" in exI)
apply (rule allI)
apply (subst mult.assoc)
apply (rule mult_left_mono)
apply (erule spec)
apply force
done
lemma bigo_const_mult6 [intro]: "(\<lambda>x. c) *o O(f) \<subseteq> O(f)"
apply (auto intro!: simp add: bigo_def elt_set_times_def func_times)
apply (rule_tac x = "ca * abs c" in exI)
apply (rule allI)
apply (subgoal_tac "ca * abs c * abs (f x) = abs c * (ca * abs (f x))")
apply (erule ssubst)
apply (subst abs_mult)
apply (rule mult_left_mono)
apply (erule spec)
apply simp
apply(simp add: ac_simps)
done
lemma bigo_const_mult7 [intro]: "f =o O(g) \<Longrightarrow> (\<lambda>x. c * f x) =o O(g)"
proof -
assume "f =o O(g)"
then have "(\<lambda>x. c) * f =o (\<lambda>x. c) *o O(g)"
by auto
also have "(\<lambda>x. c) * f = (\<lambda>x. c * f x)"
by (simp add: func_times)
also have "(\<lambda>x. c) *o O(g) \<subseteq> O(g)"
by (auto del: subsetI)
finally show ?thesis .
qed
lemma bigo_compose1: "f =o O(g) \<Longrightarrow> (\<lambda>x. f (k x)) =o O(\<lambda>x. g (k x))"
unfolding bigo_def by auto
lemma bigo_compose2: "f =o g +o O(h) \<Longrightarrow>
(\<lambda>x. f (k x)) =o (\<lambda>x. g (k x)) +o O(\<lambda>x. h(k x))"
apply (simp only: set_minus_plus [symmetric] fun_Compl_def func_plus)
apply (drule bigo_compose1)
apply (simp add: fun_diff_def)
done
subsection {* Setsum *}
lemma bigo_setsum_main: "\<forall>x. \<forall>y \<in> A x. 0 \<le> h x y \<Longrightarrow>
\<exists>c. \<forall>x. \<forall>y \<in> A x. abs (f x y) \<le> c * (h x y) \<Longrightarrow>
(\<lambda>x. \<Sum>y \<in> A x. f x y) =o O(\<lambda>x. \<Sum>y \<in> A x. h x y)"
apply (auto simp add: bigo_def)
apply (rule_tac x = "abs c" in exI)
apply (subst abs_of_nonneg) back back
apply (rule setsum_nonneg)
apply force
apply (subst setsum_right_distrib)
apply (rule allI)
apply (rule order_trans)
apply (rule setsum_abs)
apply (rule setsum_mono)
apply (rule order_trans)
apply (drule spec)+
apply (drule bspec)+
apply assumption+
apply (drule bspec)
apply assumption+
apply (rule mult_right_mono)
apply (rule abs_ge_self)
apply force
done
lemma bigo_setsum1: "\<forall>x y. 0 \<le> h x y \<Longrightarrow>
\<exists>c. \<forall>x y. abs (f x y) \<le> c * h x y \<Longrightarrow>
(\<lambda>x. \<Sum>y \<in> A x. f x y) =o O(\<lambda>x. \<Sum>y \<in> A x. h x y)"
apply (rule bigo_setsum_main)
apply force
apply clarsimp
apply (rule_tac x = c in exI)
apply force
done
lemma bigo_setsum2: "\<forall>y. 0 \<le> h y \<Longrightarrow>
\<exists>c. \<forall>y. abs (f y) \<le> c * (h y) \<Longrightarrow>
(\<lambda>x. \<Sum>y \<in> A x. f y) =o O(\<lambda>x. \<Sum>y \<in> A x. h y)"
by (rule bigo_setsum1) auto
lemma bigo_setsum3: "f =o O(h) \<Longrightarrow>
(\<lambda>x. \<Sum>y \<in> A x. l x y * f (k x y)) =o O(\<lambda>x. \<Sum>y \<in> A x. abs (l x y * h (k x y)))"
apply (rule bigo_setsum1)
apply (rule allI)+
apply (rule abs_ge_zero)
apply (unfold bigo_def)
apply auto
apply (rule_tac x = c in exI)
apply (rule allI)+
apply (subst abs_mult)+
apply (subst mult.left_commute)
apply (rule mult_left_mono)
apply (erule spec)
apply (rule abs_ge_zero)
done
lemma bigo_setsum4: "f =o g +o O(h) \<Longrightarrow>
(\<lambda>x. \<Sum>y \<in> A x. l x y * f (k x y)) =o
(\<lambda>x. \<Sum>y \<in> A x. l x y * g (k x y)) +o
O(\<lambda>x. \<Sum>y \<in> A x. abs (l x y * h (k x y)))"
apply (rule set_minus_imp_plus)
apply (subst fun_diff_def)
apply (subst setsum_subtractf [symmetric])
apply (subst right_diff_distrib [symmetric])
apply (rule bigo_setsum3)
apply (subst fun_diff_def [symmetric])
apply (erule set_plus_imp_minus)
done
lemma bigo_setsum5: "f =o O(h) \<Longrightarrow> \<forall>x y. 0 \<le> l x y \<Longrightarrow>
\<forall>x. 0 \<le> h x \<Longrightarrow>
(\<lambda>x. \<Sum>y \<in> A x. l x y * f (k x y)) =o
O(\<lambda>x. \<Sum>y \<in> A x. l x y * h (k x y))"
apply (subgoal_tac "(\<lambda>x. \<Sum>y \<in> A x. l x y * h (k x y)) =
(\<lambda>x. \<Sum>y \<in> A x. abs (l x y * h (k x y)))")
apply (erule ssubst)
apply (erule bigo_setsum3)
apply (rule ext)
apply (rule setsum.cong)
apply (rule refl)
apply (subst abs_of_nonneg)
apply auto
done
lemma bigo_setsum6: "f =o g +o O(h) \<Longrightarrow> \<forall>x y. 0 \<le> l x y \<Longrightarrow>
\<forall>x. 0 \<le> h x \<Longrightarrow>
(\<lambda>x. \<Sum>y \<in> A x. l x y * f (k x y)) =o
(\<lambda>x. \<Sum>y \<in> A x. l x y * g (k x y)) +o
O(\<lambda>x. \<Sum>y \<in> A x. l x y * h (k x y))"
apply (rule set_minus_imp_plus)
apply (subst fun_diff_def)
apply (subst setsum_subtractf [symmetric])
apply (subst right_diff_distrib [symmetric])
apply (rule bigo_setsum5)
apply (subst fun_diff_def [symmetric])
apply (drule set_plus_imp_minus)
apply auto
done
subsection {* Misc useful stuff *}
lemma bigo_useful_intro: "A \<subseteq> O(f) \<Longrightarrow> B \<subseteq> O(f) \<Longrightarrow> A + B \<subseteq> O(f)"
apply (subst bigo_plus_idemp [symmetric])
apply (rule set_plus_mono2)
apply assumption+
done
lemma bigo_useful_add: "f =o O(h) \<Longrightarrow> g =o O(h) \<Longrightarrow> f + g =o O(h)"
apply (subst bigo_plus_idemp [symmetric])
apply (rule set_plus_intro)
apply assumption+
done
lemma bigo_useful_const_mult:
fixes c :: "'a::linordered_field"
shows "c \<noteq> 0 \<Longrightarrow> (\<lambda>x. c) * f =o O(h) \<Longrightarrow> f =o O(h)"
apply (rule subsetD)
apply (subgoal_tac "(\<lambda>x. 1 / c) *o O(h) \<subseteq> O(h)")
apply assumption
apply (rule bigo_const_mult6)
apply (subgoal_tac "f = (\<lambda>x. 1 / c) * ((\<lambda>x. c) * f)")
apply (erule ssubst)
apply (erule set_times_intro2)
apply (simp add: func_times)
done
lemma bigo_fix: "(\<lambda>x::nat. f (x + 1)) =o O(\<lambda>x. h (x + 1)) \<Longrightarrow> f 0 = 0 \<Longrightarrow> f =o O(h)"
apply (simp add: bigo_alt_def)
apply auto
apply (rule_tac x = c in exI)
apply auto
apply (case_tac "x = 0")
apply simp
apply (subgoal_tac "x = Suc (x - 1)")
apply (erule ssubst) back
apply (erule spec)
apply simp
done
lemma bigo_fix2:
"(\<lambda>x. f ((x::nat) + 1)) =o (\<lambda>x. g(x + 1)) +o O(\<lambda>x. h(x + 1)) \<Longrightarrow>
f 0 = g 0 \<Longrightarrow> f =o g +o O(h)"
apply (rule set_minus_imp_plus)
apply (rule bigo_fix)
apply (subst fun_diff_def)
apply (subst fun_diff_def [symmetric])
apply (rule set_plus_imp_minus)
apply simp
apply (simp add: fun_diff_def)
done
subsection {* Less than or equal to *}
definition lesso :: "('a \<Rightarrow> 'b::linordered_idom) \<Rightarrow> ('a \<Rightarrow> 'b) \<Rightarrow> 'a \<Rightarrow> 'b" (infixl "<o" 70)
where "f <o g = (\<lambda>x. max (f x - g x) 0)"
lemma bigo_lesseq1: "f =o O(h) \<Longrightarrow> \<forall>x. abs (g x) \<le> abs (f x) \<Longrightarrow> g =o O(h)"
apply (unfold bigo_def)
apply clarsimp
apply (rule_tac x = c in exI)
apply (rule allI)
apply (rule order_trans)
apply (erule spec)+
done
lemma bigo_lesseq2: "f =o O(h) \<Longrightarrow> \<forall>x. abs (g x) \<le> f x \<Longrightarrow> g =o O(h)"
apply (erule bigo_lesseq1)
apply (rule allI)
apply (drule_tac x = x in spec)
apply (rule order_trans)
apply assumption
apply (rule abs_ge_self)
done
lemma bigo_lesseq3: "f =o O(h) \<Longrightarrow> \<forall>x. 0 \<le> g x \<Longrightarrow> \<forall>x. g x \<le> f x \<Longrightarrow> g =o O(h)"
apply (erule bigo_lesseq2)
apply (rule allI)
apply (subst abs_of_nonneg)
apply (erule spec)+
done
lemma bigo_lesseq4: "f =o O(h) \<Longrightarrow>
\<forall>x. 0 \<le> g x \<Longrightarrow> \<forall>x. g x \<le> abs (f x) \<Longrightarrow> g =o O(h)"
apply (erule bigo_lesseq1)
apply (rule allI)
apply (subst abs_of_nonneg)
apply (erule spec)+
done
lemma bigo_lesso1: "\<forall>x. f x \<le> g x \<Longrightarrow> f <o g =o O(h)"
apply (unfold lesso_def)
apply (subgoal_tac "(\<lambda>x. max (f x - g x) 0) = 0")
apply (erule ssubst)
apply (rule bigo_zero)
apply (unfold func_zero)
apply (rule ext)
apply (simp split: split_max)
done
lemma bigo_lesso2: "f =o g +o O(h) \<Longrightarrow>
\<forall>x. 0 \<le> k x \<Longrightarrow> \<forall>x. k x \<le> f x \<Longrightarrow> k <o g =o O(h)"
apply (unfold lesso_def)
apply (rule bigo_lesseq4)
apply (erule set_plus_imp_minus)
apply (rule allI)
apply (rule max.cobounded2)
apply (rule allI)
apply (subst fun_diff_def)
apply (case_tac "0 \<le> k x - g x")
apply simp
apply (subst abs_of_nonneg)
apply (drule_tac x = x in spec) back
apply (simp add: algebra_simps)
apply (subst diff_conv_add_uminus)+
apply (rule add_right_mono)
apply (erule spec)
apply (rule order_trans)
prefer 2
apply (rule abs_ge_zero)
apply (simp add: algebra_simps)
done
lemma bigo_lesso3: "f =o g +o O(h) \<Longrightarrow>
\<forall>x. 0 \<le> k x \<Longrightarrow> \<forall>x. g x \<le> k x \<Longrightarrow> f <o k =o O(h)"
apply (unfold lesso_def)
apply (rule bigo_lesseq4)
apply (erule set_plus_imp_minus)
apply (rule allI)
apply (rule max.cobounded2)
apply (rule allI)
apply (subst fun_diff_def)
apply (case_tac "0 \<le> f x - k x")
apply simp
apply (subst abs_of_nonneg)
apply (drule_tac x = x in spec) back
apply (simp add: algebra_simps)
apply (subst diff_conv_add_uminus)+
apply (rule add_left_mono)
apply (rule le_imp_neg_le)
apply (erule spec)
apply (rule order_trans)
prefer 2
apply (rule abs_ge_zero)
apply (simp add: algebra_simps)
done
lemma bigo_lesso4:
fixes k :: "'a \<Rightarrow> 'b::linordered_field"
shows "f <o g =o O(k) \<Longrightarrow> g =o h +o O(k) \<Longrightarrow> f <o h =o O(k)"
apply (unfold lesso_def)
apply (drule set_plus_imp_minus)
apply (drule bigo_abs5) back
apply (simp add: fun_diff_def)
apply (drule bigo_useful_add)
apply assumption
apply (erule bigo_lesseq2) back
apply (rule allI)
apply (auto simp add: func_plus fun_diff_def algebra_simps split: split_max abs_split)
done
lemma bigo_lesso5: "f <o g =o O(h) \<Longrightarrow> \<exists>C. \<forall>x. f x \<le> g x + C * abs (h x)"
apply (simp only: lesso_def bigo_alt_def)
apply clarsimp
apply (rule_tac x = c in exI)
apply (rule allI)
apply (drule_tac x = x in spec)
apply (subgoal_tac "abs (max (f x - g x) 0) = max (f x - g x) 0")
apply (clarsimp simp add: algebra_simps)
apply (rule abs_of_nonneg)
apply (rule max.cobounded2)
done
lemma lesso_add: "f <o g =o O(h) \<Longrightarrow> k <o l =o O(h) \<Longrightarrow> (f + k) <o (g + l) =o O(h)"
apply (unfold lesso_def)
apply (rule bigo_lesseq3)
apply (erule bigo_useful_add)
apply assumption
apply (force split: split_max)
apply (auto split: split_max simp add: func_plus)
done
lemma bigo_LIMSEQ1: "f =o O(g) \<Longrightarrow> g ----> 0 \<Longrightarrow> f ----> (0::real)"
apply (simp add: LIMSEQ_iff bigo_alt_def)
apply clarify
apply (drule_tac x = "r / c" in spec)
apply (drule mp)
apply simp
apply clarify
apply (rule_tac x = no in exI)
apply (rule allI)
apply (drule_tac x = n in spec)+
apply (rule impI)
apply (drule mp)
apply assumption
apply (rule order_le_less_trans)
apply assumption
apply (rule order_less_le_trans)
apply (subgoal_tac "c * abs (g n) < c * (r / c)")
apply assumption
apply (erule mult_strict_left_mono)
apply assumption
apply simp
done
lemma bigo_LIMSEQ2: "f =o g +o O(h) \<Longrightarrow> h ----> 0 \<Longrightarrow> f ----> a \<Longrightarrow> g ----> (a::real)"
apply (drule set_plus_imp_minus)
apply (drule bigo_LIMSEQ1)
apply assumption
apply (simp only: fun_diff_def)
apply (erule LIMSEQ_diff_approach_zero2)
apply assumption
done
end
|
theorem ex1 (x : Nat) : x = x → x = x := by
intro
aexact (rfl)
#print "---"
theorem ex2 (x : Nat) : x = x → x = x :=
have : x = x := by foo
fun h => h
#print "---"
theorem ex3 (x : Nat) : x = x → x = x :=
have : x = x := by foo (aaa bbb)
fun h => h
|
{-# OPTIONS --without-K --rewriting #-}
{-
favonia:
On 2017/03/23, after I added back Mayer-Vietoris, it seems
difficult to type check everything in one round on travis,
so parts of index.agda are moved here.
favonia:
On 2017/05/08, I further partition the results into multiple
independent index[n].agda files because the garbage collection
is not really working.
-}
module index2stage2 where
{- isomorphisms between different kinds of cohomology groups for finite CWs. -}
import cw.cohomology.AxiomaticIsoCellular
|
module Issue228 where
open import Common.Level
postulate
∞ : Level
data _×_ {a b} (A : Set a) (B : Set b) : Set (a ⊔ b) where
_,_ : A → B → A × B
data Large : Set ∞ where
large : Large
data Small : Set₁ where
small : Set → Small
P : Set
P = Large × Small
[_] : Set → P
[ A ] = (large , small A)
potentially-bad : P
potentially-bad = [ P ]
|
Jonathan Pollard , who spied for Israel and was convicted in the US and sentenced to life in jail , was born in Galveston . The film and television actor Lee Patterson , a native of Vancouver , British Columbia , lived in Galveston and died there in 2007 .
|
[STATEMENT]
lemma obsf_resource_of_oracle [simp]:
"obsf_resource (resource_of_oracle oracle s) = resource_of_oracle (obsf_oracle oracle) (OK s)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. obsf_resource (RES oracle s) = RES (obsf_oracle oracle) (OK s)
[PROOF STEP]
by(coinduction arbitrary: s rule: resource.coinduct_strong)
(auto 4 3 simp add: rel_fun_def map_try_spmf spmf_rel_map intro!: rel_spmf_try_spmf rel_spmf_reflI) |
#Load package
suppressMessages(library(qvalue))
#Read command line arguments
args <- commandArgs(trailingOnly = TRUE)
try(if(length(args) != 1) stop("Incorrect number of arguments, usage> Rscript pi1.R INPUT"))
opt_input = args[1];
#caucalate pi1
a<-read.table(opt_input)
library(qvalue)
pi0<-pi0est(a$V1,lambda=seq(0.1,0.9,0.1),method="smoother")
print(1-pi0$pi0)
|
From Coq Require Import ssreflect.
From stdpp Require Import countable gmap list.
Lemma list_to_map_lookup_is_Some {A B} `{Countable A, EqDecision A} (l: list (A * B)) (a: A) :
is_Some ((list_to_map l : gmap A B) !! a) ↔ a ∈ l.*1.
Proof.
induction l.
- cbn. split; by inversion 1.
- cbn. rewrite lookup_insert_is_Some' elem_of_cons.
split; intros [HH|HH]; eauto; rewrite -> IHl in *; auto.
Qed.
Lemma zip_app {A B} (l1 l1': list A) (l2 l2' : list B) :
length l1 = length l2 ->
zip (l1 ++ l1') (l2 ++ l2') = zip l1 l2 ++ zip l1' l2'.
Proof.
revert l2. induction l1;intros l2 Hlen.
- destruct l2;[|inversion Hlen]. done.
- destruct l2;[inversion Hlen|]. simpl.
f_equiv. auto.
Qed.
Lemma length_zip_l {A B} (l1: list A) (l2: list B) :
length l1 ≤ length l2 → length (zip l1 l2) = length l1.
Proof.
revert l2. induction l1; intros l2 Hl2; auto.
destruct l2; cbn in Hl2. exfalso; lia.
cbn. rewrite IHl1; auto. lia.
Qed.
Lemma list_filter_forall { A: Type } (P: A -> Prop) `{ forall x, Decision (P x) } l:
Forall P l ->
@list_filter _ P _ l = l.
Proof.
induction 1; auto.
simpl. destruct (decide (P x)); rewrite /filter; try congruence.
Qed.
Lemma dom_map_imap_full {K A B}
`{Countable A, EqDecision A, Countable B, EqDecision B, Countable K, EqDecision K}
(f: K -> A -> option B) (m: gmap K A):
(∀ k a, m !! k = Some a → is_Some (f k a)) →
dom (map_imap f m) = dom m.
Proof.
intros Hf.
apply set_eq. intros k.
rewrite !elem_of_dom map_lookup_imap.
destruct (m !! k) eqn:Hmk.
- destruct (Hf k a Hmk) as [? Hfk]. cbn. rewrite Hfk. split; eauto.
- cbn. split; inversion 1; congruence.
Qed.
Lemma dom_list_to_map_singleton {K V: Type} `{EqDecision K, Countable K} (x:K) (y:V):
dom (list_to_map [(x, y)] : gmap K V) = list_to_set [x].
Proof. rewrite dom_insert_L /= dom_empty_L. set_solver. Qed.
Lemma list_to_set_disj {A} `{Countable A, EqDecision A} (l1 l2: list A) :
l1 ## l2 → (list_to_set l1: gset A) ## list_to_set l2.
Proof.
intros * HH. rewrite elem_of_disjoint. intros x.
rewrite !elem_of_list_to_set. rewrite elem_of_disjoint in HH |- *. eauto.
Qed.
Lemma map_to_list_fst {A B : Type} `{EqDecision A, Countable A} (m : gmap A B) i :
i ∈ (map_to_list m).*1 ↔ (∃ x, (i,x) ∈ (map_to_list m)).
Proof.
split.
- intros Hi.
destruct (m !! i) eqn:Hsome.
+ exists b. by apply elem_of_map_to_list.
+ rewrite -(list_to_map_to_list m) in Hsome.
eapply not_elem_of_list_to_map in Hsome. done.
- intros [x Hix].
apply elem_of_list_fmap.
exists (i,x). auto.
Qed.
Lemma drop_S':
forall A l n (a: A) l',
drop n l = a::l' ->
drop (S n) l = l'.
Proof.
induction l; intros * HH.
- rewrite drop_nil in HH. inversion HH.
- simpl. destruct n.
+ rewrite drop_0 in HH. inversion HH.
reflexivity.
+ simpl in HH. eapply IHl; eauto.
Qed.
Lemma disjoint_nil_l {A : Type} `{EqDecision A} (a : A) (l2 : list A) :
[] ## l2.
Proof.
apply elem_of_disjoint. intros x Hcontr. inversion Hcontr.
Qed.
Lemma disjoint_nil_r {A : Type} `{EqDecision A} (a : A) (l2 : list A) :
l2 ## [].
Proof.
apply elem_of_disjoint. intros x Hl Hcontr. inversion Hcontr.
Qed.
Lemma disjoint_cons {A : Type} `{EqDecision A} (a : A) (l1 l2 : list A) :
a :: l1 ## l2 → a ∉ l2.
Proof.
rewrite elem_of_disjoint =>Ha.
assert (a ∈ a :: l1) as Hs; [apply elem_of_cons;auto;apply elem_of_nil|].
specialize (Ha a Hs). done.
Qed.
Lemma disjoint_weak {A : Type} `{EqDecision A} (a : A) (l1 l2 : list A) :
a :: l1 ## l2 → l1 ## l2.
Proof.
rewrite elem_of_disjoint =>Ha a' Hl1 Hl2.
assert (a' ∈ a :: l1) as Hs; [apply elem_of_cons;auto;apply elem_of_nil|].
specialize (Ha a' Hs Hl2). done.
Qed.
Lemma disjoint_swap {A : Type} `{EqDecision A} (a : A) (l1 l2 : list A) :
a ∉ l1 →
a :: l1 ## l2 -> l1 ## a :: l2.
Proof.
rewrite elem_of_disjoint =>Hnin Ha a' Hl1 Hl2.
destruct (decide (a' = a)).
- subst. contradiction.
- apply Ha with a'.
+ apply elem_of_cons; by right.
+ by apply elem_of_cons in Hl2 as [Hcontr | Hl2]; [contradiction|].
Qed.
(* delete_list: delete a list of keys in a map *)
Fixpoint delete_list {K V : Type} `{Countable K, EqDecision K}
(ks : list K) (m : gmap K V) : gmap K V :=
match ks with
| k :: ks' => delete k (delete_list ks' m)
| [] => m
end.
Lemma delete_list_insert {K V : Type} `{Countable K, EqDecision K}
(ks : list K) (m : gmap K V) (l : K) (v : V) :
l ∉ ks →
delete_list ks (<[l:=v]> m) = <[l:=v]> (delete_list ks m).
Proof.
intros Hnin.
induction ks; auto.
simpl.
apply not_elem_of_cons in Hnin as [Hneq Hnin].
rewrite -delete_insert_ne; auto.
f_equal. by apply IHks.
Qed.
Lemma delete_list_delete {K V : Type} `{Countable K, EqDecision K}
(ks : list K) (m : gmap K V) (l : K) :
l ∉ ks →
delete_list ks (delete l m) = delete l (delete_list ks m).
Proof.
intros Hnin.
induction ks; auto.
simpl.
apply not_elem_of_cons in Hnin as [Hneq Hnin].
rewrite -delete_commute; auto.
f_equal. by apply IHks.
Qed.
Lemma lookup_delete_list_notin {K V : Type} `{Countable K, EqDecision K}
(ks : list K) (m : gmap K V) (l : K) :
l ∉ ks →
(delete_list ks m) !! l = m !! l.
Proof.
intros HH; induction ks; simpl; auto.
eapply not_elem_of_cons in HH. destruct HH.
rewrite lookup_delete_ne; auto.
Qed.
Lemma delete_list_permutation {A B} `{Countable A, EqDecision A}
(l1 l2: list A) (m: gmap A B):
l1 ≡ₚ l2 → delete_list l1 m = delete_list l2 m.
Proof.
induction 1.
{ reflexivity. }
{ cbn. rewrite IHPermutation //. }
{ cbn. rewrite delete_commute //. }
{ rewrite IHPermutation1 //. }
Qed.
Lemma delete_list_swap {A B : Type} `{EqDecision A, Countable A}
(a a' : A) (l1 l2 : list A) (M : gmap A B) :
delete a' (delete_list (l1 ++ a :: l2) M) =
delete a (delete a' (delete_list (l1 ++ l2) M)).
Proof.
induction l1.
- apply delete_commute.
- simpl. repeat rewrite (delete_commute _ _ a0).
f_equiv. apply IHl1.
Qed.
(* Map difference for heterogeneous maps, and lemmas relating it to delete_list *)
Definition map_difference_het
{A B C} `{Countable A, EqDecision A, Countable B, EqDecision B}
(m1: gmap A B) (m2: gmap A C): gmap A B
:=
filter (fun '(k, v) => m2 !! k = None) m1.
Notation "m1 ∖∖ m2" := (map_difference_het m1 m2) (at level 40, left associativity).
Lemma map_eq' {A B} `{Countable A, EqDecision A, Countable B, EqDecision B}
(m1 m2: gmap A B):
m1 = m2 ↔ (forall k v, m1 !! k = Some v ↔ m2 !! k = Some v).
Proof.
split. intros ->. done.
intros Heq. apply map_eq. intro k. destruct (m2 !! k) eqn:HH.
{ by apply Heq. }
{ destruct (m1 !! k) eqn:HHH; auto. apply Heq in HHH. congruence. }
Qed.
Lemma difference_het_lookup_Some
{A B C} `{Countable A, EqDecision A, Countable B, EqDecision B}
(m1: gmap A B) (m2: gmap A C) (k: A) (v: B):
(m1 ∖∖ m2) !! k = Some v ↔ m1 !! k = Some v ∧ m2 !! k = None.
Proof. by rewrite /map_difference_het map_filter_lookup_Some. Qed.
Lemma difference_het_lookup_None
{A B C} `{Countable A, EqDecision A, Countable B, EqDecision B}
(m1: gmap A B) (m2: gmap A C) (k: A) (v: B):
(m1 ∖∖ m2) !! k = None ↔ m1 !! k = None ∨ is_Some (m2 !! k).
Proof.
rewrite /map_difference_het map_filter_lookup_None.
split; intros [HH1|HH2]; eauto.
{ destruct (m1 !! k) eqn:?; eauto; right.
destruct (m2 !! k) eqn:?; eauto. exfalso. eapply HH2; eauto. }
{ destruct (m1 !! k) eqn:?; eauto; right.
destruct (m2 !! k) eqn:?; eauto. destruct HH2 as [? ?]. congruence. }
Qed.
Lemma difference_het_empty
{A B C} `{Countable A, EqDecision A, Countable B, EqDecision B}
(m: gmap A B):
m ∖∖ (∅ : gmap A C) = m.
Proof.
rewrite /map_difference_het map_eq'. intros k v.
rewrite map_filter_lookup_Some. rewrite lookup_empty. set_solver.
Qed.
Lemma difference_het_eq_empty
{A B} `{Countable A, EqDecision A, Countable B, EqDecision B}
(m: gmap A B):
m ∖∖ m = (∅ : gmap A B).
Proof.
rewrite /map_difference_het map_eq'. intros k v.
rewrite map_filter_lookup_Some. rewrite lookup_empty. set_solver.
Qed.
Lemma difference_het_insert_r
{A B C} `{Countable A, EqDecision A, Countable B, EqDecision B}
(m1: gmap A B) (m2: gmap A C) (k: A) (v: C):
m1 ∖∖ (<[ k := v ]> m2) = delete k (m1 ∖∖ m2).
Proof.
intros.
rewrite /map_difference_het map_eq'. intros k' v'.
rewrite map_filter_lookup_Some lookup_delete_Some.
rewrite map_filter_lookup_Some lookup_insert_None. set_solver.
Qed.
Lemma difference_het_insert_l
{A B C} `{Countable A, EqDecision A, Countable B, EqDecision B}
(m1: gmap A B) (m2: gmap A C) (k: A) (v: B):
m2 !! k = None ->
<[ k := v ]> m1 ∖∖ m2 = <[ k := v ]> (m1 ∖∖ m2).
Proof.
intros.
rewrite /map_difference_het map_eq'. intros k' v'.
rewrite map_filter_lookup_Some lookup_insert_Some.
rewrite -map_filter_insert_True;auto.
by rewrite map_filter_lookup_Some lookup_insert_Some.
Qed.
Lemma difference_het_delete_assoc
{A B C} `{Countable A, EqDecision A, Countable B, EqDecision B}
(m1: gmap A B) (m2: gmap A C) (k: A):
delete k (m1 ∖∖ m2) = (delete k m1) ∖∖ m2.
Proof.
intros.
rewrite /map_difference_het map_eq'. intros k' v'.
rewrite map_filter_lookup_Some.
rewrite -map_filter_delete;auto.
rewrite map_filter_lookup_Some. set_solver.
Qed.
Lemma dom_difference_het
{A B C} `{Countable A, EqDecision A, Countable B, EqDecision B}
(m1: gmap A B) (m2: gmap A C):
dom (m1 ∖∖ m2) = dom m1 ∖ dom m2.
Proof.
apply (@anti_symm _ _ subseteq).
typeclasses eauto.
{ rewrite elem_of_subseteq. intro k.
rewrite elem_of_dom. intros [v Hv].
rewrite difference_het_lookup_Some in Hv *.
destruct Hv as [? ?].
rewrite elem_of_difference !elem_of_dom. split; eauto.
intros [? ?]. congruence. }
{ rewrite elem_of_subseteq. intro k.
rewrite elem_of_difference !elem_of_dom. intros [[v ?] Hcontra].
exists v. rewrite difference_het_lookup_Some. split; eauto.
destruct (m2 !! k) eqn:?; eauto. exfalso. apply Hcontra. eauto. }
Qed.
Lemma delete_elements_eq_difference_het
{A B C} `{Countable A, EqDecision A, Countable B, EqDecision B}
(m1: gmap A B) (m2: gmap A C):
delete_list (elements (dom m2)) m1 = m1 ∖∖ m2.
Proof.
set (l := elements (dom m2)).
assert (l ≡ₚ elements (dom m2)) as Hl by reflexivity.
clearbody l. revert l Hl. revert m1. pattern m2. revert m2.
apply map_ind.
- intros m1 l. rewrite dom_empty_L elements_empty difference_het_empty.
rewrite Permutation_nil_r. intros ->. reflexivity.
- intros k v m2 Hm2k HI m1 l Hm1l.
rewrite difference_het_insert_r.
rewrite dom_insert in Hm1l *.
move: Hm1l. rewrite elements_union_singleton.
rewrite elem_of_dom; intros [? ?]; congruence.
intros Hm1l.
transitivity (delete k (delete_list (elements (dom m2)) m1)).
{ erewrite delete_list_permutation. 2: eauto. reflexivity. }
{ rewrite HI//. }
Qed.
(* rtc *)
Lemma rtc_implies {A : Type} (R Q : A → A → Prop) (x y : A) :
(∀ r q, R r q → Q r q) →
rtc R x y → rtc Q x y.
Proof.
intros Himpl HR.
induction HR.
- done.
- apply Himpl in H.
apply rtc_once in H.
apply rtc_transitive with y; auto.
Qed.
Lemma rtc_or_intro {A : Type} (R Q : A → A → Prop) (x y : A) :
rtc (λ a b, R a b) x y →
rtc (λ a b, R a b ∨ Q a b) x y.
Proof.
intros HR. induction HR.
- done.
- apply rtc_transitive with y; auto.
apply rtc_once. by left.
Qed.
Lemma rtc_or_intro_l {A : Type} (R Q : A → A → Prop) (x y : A) :
rtc (λ a b, R a b) x y →
rtc (λ a b, Q a b ∨ R a b) x y.
Proof.
intros HR. induction HR.
- done.
- apply rtc_transitive with y; auto.
apply rtc_once. by right.
Qed.
(* Helper lemmas on list differences *)
Lemma not_elem_of_list {A : Type} `{EqDecision A} (a : A) (l x : list A) :
a ∈ x → a ∉ list_difference l x.
Proof.
intros Hax.
rewrite /not.
intros Hal.
by apply elem_of_list_difference in Hal as [Ha' Hax_not].
Qed.
Lemma list_difference_nil {A : Type} `{EqDecision A} (l : list A) :
list_difference l [] = l.
Proof.
induction l; auto.
simpl. f_equal.
apply IHl.
Qed.
Lemma list_difference_length_cons {A : Type} `{EqDecision A}
(l2 : list A) (a : A) :
list_difference [a] (a :: l2) = [].
Proof.
simpl.
assert (a ∈ a :: l2); first apply elem_of_list_here.
destruct (decide_rel elem_of a (a :: l2)); auto; last contradiction.
Qed.
Lemma list_difference_skip {A : Type} `{EqDecision A}
(l1 l2 : list A) (b : A) :
¬ (b ∈ l1) →
list_difference l1 (b :: l2) = list_difference l1 l2.
Proof.
intros Hnin.
induction l1; auto.
apply not_elem_of_cons in Hnin.
destruct Hnin as [Hne Hl1].
simpl.
destruct (decide_rel elem_of a (b :: l2)).
- apply elem_of_cons in e.
destruct e as [Hcontr | Hl2]; first congruence.
destruct (decide_rel elem_of a l2); last contradiction.
by apply IHl1.
- apply not_elem_of_cons in n.
destruct n as [Hne' Hl2].
destruct (decide_rel elem_of a l2); first contradiction.
f_equal.
by apply IHl1.
Qed.
Lemma list_difference_nested {A : Type} `{EqDecision A}
(l1 l1' l2 : list A) (b : A) :
¬ (b ∈ (l1 ++ l1')) →
list_difference (l1 ++ b :: l1') (b :: l2) = list_difference (l1 ++ l1') l2.
Proof.
intros Hnotin.
induction l1.
- simpl.
assert (b ∈ (b :: l2)); first apply elem_of_list_here.
destruct (decide_rel elem_of b (b :: l2)); last contradiction.
rewrite list_difference_skip; auto.
- simpl in *.
apply not_elem_of_cons in Hnotin.
destruct Hnotin as [Hne Hnotin].
destruct (decide_rel elem_of a (b :: l2)).
+ apply elem_of_cons in e.
destruct e as [Hcontr | Hl2]; first congruence.
destruct (decide_rel elem_of a l2); last contradiction.
by apply IHl1.
+ apply not_elem_of_cons in n.
destruct n as [Hne' Hnotin'].
destruct (decide_rel elem_of a l2); first contradiction.
f_equal.
by apply IHl1.
Qed.
Lemma list_difference_length_ni {A : Type} `{EqDecision A}
(l1 : list A) (b : A) :
¬ (b ∈ l1) →
length (list_difference l1 [b]) = length l1.
Proof.
intros Hna.
destruct l1; auto.
simpl.
apply not_elem_of_cons in Hna.
destruct Hna as [Hne Hna].
destruct (decide_rel elem_of a [b]).
- apply elem_of_list_singleton in e. congruence.
- simpl. rewrite list_difference_skip; auto.
by rewrite list_difference_nil.
Qed.
Lemma list_difference_single_length {A : Type} `{EqDecision A}
(l1 : list A) (b : A) :
b ∈ l1 →
NoDup l1 →
length (list_difference l1 [b]) =
length l1 - 1.
Proof.
intros Ha Hndup.
induction l1; auto.
destruct (decide (b = a)).
- subst.
assert (a ∈ a :: l1); first apply elem_of_list_here.
rewrite ->NoDup_cons in Hndup. destruct Hndup as [Hni Hndup].
assert (¬ (a ∈ l1)) as Hni'.
{ rewrite /not. intros Hin. contradiction. }
simpl.
assert (a ∈ [a]); first apply elem_of_list_here.
destruct (decide_rel elem_of a [a]); last contradiction.
rewrite Nat.sub_0_r.
apply list_difference_length_ni; auto.
- simpl.
assert (¬ (a ∈ [b])).
{ rewrite /not. intros Hin. apply elem_of_list_singleton in Hin. congruence. }
destruct (decide_rel elem_of a [b]); first contradiction.
rewrite Nat.sub_0_r /=.
inversion Hndup; subst.
apply elem_of_cons in Ha.
destruct Ha as [Hcontr | Ha]; first congruence.
apply IHl1 in Ha as Heq; auto.
rewrite Heq.
destruct l1; first inversion Ha.
simpl. lia.
Qed.
Lemma list_difference_app {A : Type} `{EqDecision A}
(l1 l2 l2' : list A) :
list_difference l1 (l2 ++ l2') = list_difference (list_difference l1 l2) l2'.
Proof.
induction l1; auto.
simpl. destruct (decide_rel elem_of a (l2 ++ l2')).
- apply elem_of_app in e as [Hl2 | Hl2'].
+ destruct (decide_rel elem_of a l2); last contradiction.
apply IHl1.
+ destruct (decide_rel elem_of a l2); first by apply IHl1.
simpl.
destruct (decide_rel elem_of a l2'); last contradiction.
apply IHl1.
- apply not_elem_of_app in n as [Hl2 Hl2'].
destruct (decide_rel elem_of a l2); first contradiction.
simpl.
destruct (decide_rel elem_of a l2'); first contradiction.
f_equal. apply IHl1.
Qed.
Lemma list_difference_Permutation {A : Type} `{EqDecision A} (l l1 l2 : list A) :
l1 ≡ₚ l2 -> list_difference l l1 = list_difference l l2.
Proof.
intros Hl.
induction l; auto.
simpl. rewrite IHl.
destruct (decide_rel elem_of a l1).
- apply elem_of_list_In in e.
apply Permutation_in with _ _ _ a in Hl; auto.
apply elem_of_list_In in Hl.
destruct (decide_rel elem_of a l2);[|contradiction].
done.
- revert n; rewrite elem_of_list_In; intros n.
assert (¬ In a l2) as Hnin.
{ intros Hcontr. apply Permutation_sym in Hl.
apply Permutation_in with _ _ _ a in Hl; auto. }
revert Hnin; rewrite -elem_of_list_In; intro Hnin.
destruct (decide_rel elem_of a l2);[contradiction|].
done.
Qed.
Lemma list_difference_length {A} `{EqDecision A} (l1 : list A) :
forall l2, NoDup l1 -> NoDup l2 -> l2 ⊆+ l1 ->
length (list_difference l1 l2) = length (l1) - length l2.
Proof.
induction l1; intros l2 Hdup1 Hdup2 Hsub.
- simpl. done.
- simpl. destruct (decide_rel elem_of a l2).
+ apply submseteq_cons_r in Hsub as [Hcontr | [k [Hperm Hk] ] ].
{ apply elem_of_submseteq with (x:=a) in Hcontr;auto. apply NoDup_cons in Hdup1 as [Hnin ?].
exfalso. by apply Hnin. }
apply list_difference_Permutation with (l:=l1) in Hperm as Heq. rewrite Heq.
apply NoDup_cons in Hdup1 as [Hnin ?].
rewrite list_difference_skip; [intros Hcontr;by apply Hnin|].
rewrite IHl1;auto.
revert Hdup2. rewrite Hperm =>Hdup2. by apply NoDup_cons in Hdup2 as [? ?].
rewrite Hperm /=. auto.
+ simpl. apply submseteq_cons_r in Hsub as [Hsub | Hcontr].
rewrite IHl1;auto. assert (length l2 ≤ length l1).
{ apply submseteq_length. auto. }
by apply NoDup_cons in Hdup1 as [? ?]; auto.
by apply submseteq_length in Hsub; lia.
destruct Hcontr as [l' [Hperm Hl'] ].
exfalso. apply n. rewrite Hperm. constructor.
Qed.
Lemma list_to_set_difference A {_: EqDecision A} {_: Countable A} (l1 l2: list A):
(list_to_set (list_difference l1 l2): gset A) = (list_to_set l1: gset A) ∖ (list_to_set l2: gset A).
Proof.
revert l2. induction l1.
- intro. cbn [list_difference list_to_set]. set_solver.
- intros l2. cbn [list_difference list_to_set]. destruct (decide_rel elem_of a l2); set_solver.
Qed.
(* creates a gmap with domain from the list, all pointing to a default value *)
Fixpoint create_gmap_default {K V : Type} `{Countable K}
(l : list K) (d : V) : gmap K V :=
match l with
| [] => ∅
| k :: tl => <[k:=d]> (create_gmap_default tl d)
end.
Lemma create_gmap_default_lookup {K V : Type} `{Countable K}
(l : list K) (d : V) (k : K) :
k ∈ l ↔ (create_gmap_default l d) !! k = Some d.
Proof.
split.
- intros Hk.
induction l; inversion Hk.
+ by rewrite lookup_insert.
+ destruct (decide (a = k)); [subst; by rewrite lookup_insert|].
rewrite lookup_insert_ne; auto.
- intros Hl.
induction l; inversion Hl.
destruct (decide (a = k)); [subst;apply elem_of_list_here|].
apply elem_of_cons. right.
apply IHl. simplify_map_eq. auto.
Qed.
Lemma create_gmap_default_lookup_is_Some {K V} `{EqDecision K, Countable K} (l: list K) (d: V) x v:
create_gmap_default l d !! x = Some v → x ∈ l ∧ v = d.
Proof.
revert x v d. induction l as [| a l]; cbn.
- done.
- intros x v d. destruct (decide (a = x)) as [->|].
+ rewrite lookup_insert. intros; simplify_eq. repeat constructor.
+ rewrite lookup_insert_ne //. intros [? ?]%IHl. subst. repeat constructor; auto.
Qed.
Lemma create_gmap_default_dom {K V} `{EqDecision K, Countable K} (l: list K) (d: V):
dom (create_gmap_default l d) = list_to_set l.
Proof.
induction l as [| a l].
- cbn. rewrite dom_empty_L //.
- cbn [create_gmap_default list_to_set]. rewrite dom_insert_L // IHl //.
Qed.
Lemma fst_zip_prefix A B (l : list A) (k : list B) :
(zip l k).*1 `prefix_of` l.
Proof.
revert k. induction l; cbn; auto.
destruct k; cbn.
- apply prefix_nil.
- apply prefix_cons; auto.
Qed.
Lemma prefix_of_nil A (l : list A) :
l `prefix_of` [] →
l = [].
Proof. destruct l; auto. by intros ?%prefix_nil_not. Qed.
Lemma in_prefix A (l1 l2 : list A) x :
l1 `prefix_of` l2 →
x ∈ l1 →
x ∈ l2.
Proof.
unfold prefix. intros [? ->] ?.
apply elem_of_app. eauto.
Qed.
Lemma NoDup_prefix A (l1 l2 : list A) :
NoDup l2 →
l1 `prefix_of` l2 →
NoDup l1.
Proof.
intros H. revert l1. induction H.
- intros * ->%prefix_of_nil. constructor.
- intros l1. destruct l1.
+ intros _. constructor.
+ intros HH. rewrite (prefix_cons_inv_1 _ _ _ _ HH).
apply prefix_cons_inv_2 in HH. constructor; eauto.
intro Hx. pose proof (in_prefix _ _ _ _ HH Hx). done.
Qed.
Lemma take_lookup_Some_inv A (l : list A) (n i : nat) x :
take n l !! i = Some x →
i < n ∧ l !! i = Some x.
Proof.
revert l i x. induction n; cbn.
{ intros *. inversion 1. }
{ intros *. destruct l; cbn. by inversion 1. destruct i; cbn.
- intros; simplify_eq. split; auto. lia.
- intros [? ?]%IHn. split. lia. auto. }
Qed.
Lemma NoDup_fst {A B : Type} (l : list (A*B)) :
NoDup l.*1 -> NoDup l.
Proof.
intros Hdup.
induction l.
- by apply NoDup_nil.
- destruct a. simpl in Hdup. apply NoDup_cons in Hdup as [Hin Hdup].
apply NoDup_cons. split;auto.
intros Hcontr. apply Hin. apply elem_of_list_fmap.
exists (a,b). simpl. split;auto.
Qed.
Lemma fst_elem_of_cons {A B} `{EqDecision A} (l : list A) (x : A) (l': list B) :
x ∈ (zip l l').*1 →
x ∈ l.
Proof. intros H. eapply in_prefix. eapply fst_zip_prefix. done. Qed.
Lemma length_fst_snd {A B} `{Countable A} (m : gmap A B) :
length (map_to_list m).*1 = length (map_to_list m).*2.
Proof.
induction m using map_ind.
- rewrite map_to_list_empty. auto.
- rewrite map_to_list_insert;auto. simpl. auto.
Qed.
Lemma map_to_list_delete {A B} `{Countable A} `{EqDecision A} (m : gmap A B) (i : A) (x : B) :
∀ l, (i,x) :: l ≡ₚmap_to_list m ->
NoDup (i :: l.*1) →
(map_to_list (delete i m)) ≡ₚl.
Proof.
intros l Hl Hdup.
assert ((i,x) ∈ map_to_list m) as Hin.
{ rewrite -Hl. constructor. }
assert (m !! i = Some x) as Hsome.
{ apply elem_of_map_to_list; auto. }
apply NoDup_Permutation;auto.
by apply NoDup_map_to_list.
apply NoDup_fst. apply NoDup_cons in Hdup as [? ?]. by auto.
intros [i0 x0]. split.
- intros Hinx%elem_of_map_to_list.
assert (i ≠ i0) as Hne;[intros Hcontr;subst;simplify_map_eq|simplify_map_eq].
assert ((i0, x0) ∈ (i, x) :: l) as Hin'.
{ rewrite Hl. apply elem_of_map_to_list. auto. }
apply elem_of_cons in Hin' as [Hcontr | Hin'];auto.
simplify_eq.
- intros Hinx. apply elem_of_map_to_list.
assert (i ≠ i0) as Hne;[|simplify_map_eq].
{ intros Hcontr;subst.
assert (NoDup ((i0, x) :: l)) as Hdup'.
{ rewrite Hl. apply NoDup_map_to_list. }
assert (i0 ∈ l.*1) as HWInt.
{ apply elem_of_list_fmap. exists (i0,x0). simpl. split;auto. }
apply NoDup_cons in Hdup as [Hcontr ?]. by apply Hcontr.
}
assert ((i0, x0) ∈ (i, x) :: l) as Hin'.
{ constructor. auto. }
revert Hin'. rewrite Hl =>Hin'. apply elem_of_map_to_list in Hin'.
auto.
Qed.
Lemma NoDup_map_to_list_fst (A B : Type) `{EqDecision A} `{Countable A}
(m : gmap A B):
NoDup (map_to_list m).*1.
Proof.
induction m as [|i x m] using map_ind.
- rewrite map_to_list_empty. simpl. by apply NoDup_nil.
- rewrite map_to_list_insert;auto.
simpl. rewrite NoDup_cons. split.
+ intros Hcontr%elem_of_list_fmap.
destruct Hcontr as [ab [Heqab Hcontr] ].
destruct ab as [a b]. subst. simpl in *.
apply elem_of_map_to_list in Hcontr. rewrite Hcontr in H0. inversion H0.
+ auto.
Qed.
Lemma map_to_list_delete_fst {A B} `{Countable A} (m : gmap A B) (i : A) (x : B) :
∀ l, i :: l ≡ₚ(map_to_list m).*1 ->
NoDup (i :: l) →
(map_to_list (delete i m)).*1 ≡ₚl.
Proof.
intros l Hl Hdup.
assert (i ∈ (map_to_list m).*1) as Hin.
{ rewrite -Hl. constructor. }
apply NoDup_cons in Hdup as [Hnin Hdup].
apply NoDup_Permutation;auto.
apply NoDup_map_to_list_fst. done.
set l' := zip l (repeat x (length l)).
assert (l = l'.*1) as Heq;[rewrite fst_zip;auto;rewrite repeat_length;lia|].
intros i0. split.
- intros Hinx%elem_of_list_fmap.
destruct Hinx as [ [? ?] [? Hinx] ]. simpl in *. subst a.
apply elem_of_map_to_list in Hinx.
destruct (decide (i = i0));[subst i;rewrite lookup_delete in Hinx;inversion Hinx|].
rewrite lookup_delete_ne in Hinx;auto.
apply elem_of_map_to_list in Hinx.
assert (i0 ∈ (map_to_list m).*1) as Hinx'.
{ apply elem_of_list_fmap. exists (i0,b). split;auto. }
revert Hinx'. rewrite -Hl =>Hinx'.
by apply elem_of_cons in Hinx' as [Hcontr | Hinx'];[congruence|].
- intros Hinx. assert (i ≠ i0) as Hne;[congruence|simplify_map_eq].
assert (i0 ∈ i :: l) as Hin'.
{ constructor. auto. }
revert Hin'. rewrite Hl =>Hin'.
apply map_to_list_fst in Hin' as [x' Hx].
apply elem_of_map_to_list in Hx.
apply map_to_list_fst. exists x'. apply elem_of_map_to_list.
rewrite lookup_delete_ne;auto.
Qed.
Lemma submseteq_list_difference {A} `{EqDecision A} (l1 l2 l3 : list A) :
NoDup l1 → (∀ a, a ∈ l3 → a ∉ l1) → l1 ⊆+ l2 → l1 ⊆+ list_difference l2 l3.
Proof.
intros Hdup Hnin Hsub.
apply NoDup_submseteq;auto.
intros x Hx. apply elem_of_list_difference.
split.
- eapply elem_of_submseteq;eauto.
- intros Hcontr. apply Hnin in Hcontr. done.
Qed.
Lemma list_difference_cons {A} `{EqDecision A} (l1 l2 : list A) (a : A) :
NoDup l1 → a ∈ l1 → a ∉ l2 → list_difference l1 l2 ≡ₚ a :: list_difference l1 (a :: l2).
Proof.
revert l2 a. induction l1;intros l2 a' Hdup Hin1 Hin2.
- inversion Hin1.
- simpl. destruct (decide_rel elem_of a l2).
+ assert (a ≠ a') as Hne; [intros Hcontr;subst;contradiction|].
rewrite decide_True. { apply elem_of_cons. right;auto. }
apply IHl1;auto. apply NoDup_cons in Hdup as [? ?];auto.
apply elem_of_cons in Hin1 as [? | ?];[congruence|auto].
+ destruct (decide (a = a'));subst.
* apply NoDup_cons in Hdup as [Hnin Hdup].
f_equiv. rewrite decide_True;[constructor|].
rewrite list_difference_skip;auto.
* apply NoDup_cons in Hdup as [Hnin Hdup].
apply elem_of_cons in Hin1 as [? | ?];[congruence|auto].
erewrite IHl1;eauto. rewrite decide_False.
apply not_elem_of_cons;auto. apply Permutation_swap.
Qed.
Lemma list_to_set_map_to_list {K V : Type} `{EqDecision K} `{Countable K}
(m : gmap K V) :
list_to_set (map_to_list m).*1 = dom m.
Proof.
induction m using map_ind.
- rewrite map_to_list_empty dom_empty_L. auto.
- rewrite map_to_list_insert// dom_insert_L. simpl. rewrite IHm. auto.
Qed.
(* The last element of a list is the same as a list where we drop fewer elements than the list *)
Lemma last_drop_lt {A : Type} (l : list A) (i : nat) (a : A) :
i < (length l) → list.last l = Some a → list.last (drop i l) = Some a.
Proof.
generalize i. induction l.
- intros i' Hlen Hlast. inversion Hlast.
- intros i' Hlen Hlast. destruct i'.
+ simpl. apply Hlast.
+ simpl; simpl in Hlen. apply IHl; first lia.
assert (0 < length l) as Hl; first lia.
destruct l; simpl in Hl; first by apply Nat.lt_irrefl in Hl. auto.
Qed.
Lemma last_lookup {A : Type} (l : list A) :
list.last l = l !! (length l - 1).
Proof.
induction l.
- done.
- simpl; rewrite {1}/last -/last.
destruct l; auto.
rewrite IHl. simpl. rewrite PeanoNat.Nat.sub_0_r. done.
Qed.
Lemma last_app_iff {A : Type} (l1 l2 : list A) a :
list.last l2 = Some a <-> length l2 > 0 ∧ list.last (l1 ++ l2) = Some a.
Proof.
split.
- intros Hl2.
induction l1.
+ destruct l2; inversion Hl2. simpl. split; auto. lia.
+ destruct IHl1 as [Hlt Hlast]. split; auto. simpl; rewrite {1}/last -/last. rewrite Hlast.
destruct (l1 ++ l2); auto.
inversion Hlast.
- generalize l1. induction l2; intros l1' [Hlen Hl].
+ inversion Hlen.
+ destruct l2;[rewrite last_snoc in Hl; inversion Hl; done|].
rewrite -(IHl2 (l1' ++ [a0])); auto.
simpl. split;[lia|]. rewrite -app_assoc -cons_middle. done.
Qed.
Lemma last_app_eq {A : Type} (l1 l2 : list A) :
length l2 > 0 ->
list.last l2 = list.last (l1 ++ l2).
Proof.
revert l1. induction l2;intros l1 Hlen.
- inversion Hlen.
- destruct l2.
+ rewrite last_snoc. done.
+ rewrite cons_middle app_assoc -(IHl2 (l1 ++ [a]));[simpl;lia|auto].
Qed.
Lemma rev_nil_inv {A} (l : list A) :
rev l = [] -> l = [].
Proof.
destruct l;auto.
simpl. intros Hrev. exfalso.
apply app_eq_nil in Hrev as [Hrev1 Hrev2].
inversion Hrev2.
Qed.
Lemma rev_singleton_inv {A} (l : list A) (a : A) :
rev l = [a] -> l = [a].
Proof.
destruct l;auto.
simpl. intros Hrev.
destruct l.
- simpl in Hrev. inversion Hrev. auto.
- exfalso. simpl in Hrev.
apply app_singleton in Hrev. destruct Hrev as [ [Hrev1 Hrev2] | [Hrev1 Hrev2] ].
+ destruct (rev l);inversion Hrev1.
+ inversion Hrev2.
Qed.
Lemma rev_lookup {A} (l : list A) (a : A) :
rev l !! 0 = Some a <-> l !! (length l - 1) = Some a.
Proof.
split; intros Hl.
- rewrite -last_lookup.
induction l.
+ inversion Hl.
+ simpl in Hl. simpl. destruct l.
{ simpl in Hl. inversion Hl. auto. }
{ apply IHl. rewrite lookup_app_l in Hl;[simpl;rewrite app_length /=;lia|]. auto. }
- rewrite -last_lookup in Hl.
induction l.
+ inversion Hl.
+ simpl. destruct l.
{ simpl. inversion Hl. auto. }
{ rewrite lookup_app_l;[simpl;rewrite app_length /=;lia|]. apply IHl. auto. }
Qed.
Lemma rev_cons_inv {A} (l l' : list A) (a : A) :
rev l = a :: l' ->
∃ l'', l = l'' ++ [a].
Proof.
intros Hrel.
destruct l;inversion Hrel.
assert ((a0 :: l) !! (length l) = Some a) as Hsome.
{ assert (length l = length (a0 :: l) - 1) as ->;[simpl;lia|]. apply rev_lookup. rewrite Hrel. constructor. }
apply take_S_r in Hsome.
exists (take (length l) (rev (rev l ++ [a0]))).
simpl. rewrite rev_unit. rewrite rev_involutive. rewrite -Hsome /=.
f_equiv. rewrite firstn_all. auto.
Qed.
|
/-
Copyright (c) 2018 Mario Carneiro. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Mario Carneiro
-/
/-!
# More `char` instances
> THIS FILE IS SYNCHRONIZED WITH MATHLIB4.
> Any changes to this file require a corresponding PR to mathlib4.
This file provides a `linear_order` instance on `char`. `char` is the type of Unicode scalar values.
-/
instance : linear_order char :=
{ le_refl := λ a, @le_refl ℕ _ _,
le_trans := λ a b c, @le_trans ℕ _ _ _ _,
le_antisymm := λ a b h₁ h₂,
char.eq_of_veq $ le_antisymm h₁ h₂,
le_total := λ a b, @le_total ℕ _ _ _,
lt_iff_le_not_le := λ a b, @lt_iff_le_not_le ℕ _ _ _,
decidable_le := char.decidable_le,
decidable_eq := char.decidable_eq,
decidable_lt := char.decidable_lt,
..char.has_le, ..char.has_lt }
lemma char.of_nat_to_nat {c : char} (h : is_valid_char c.to_nat) :
char.of_nat c.to_nat = c :=
begin
rw [char.of_nat, dif_pos h],
cases c,
simp [char.to_nat]
end
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.