text
stringlengths 0
3.34M
|
---|
//
// FILE NAME: $HeadURL: svn+ssh://svn.cm.aol.com/advertising/adlearn/gen1/trunk/lib/cpp/DataProxy/AggregateStreamTransformer.hpp $
//
// REVISION: $Revision: 281517 $
//
// COPYRIGHT: (c) 2008 Advertising.com All Rights Reserved.
//
// LAST UPDATED: $Date: 2013-06-19 17:36:40 -0400 (Wed, 19 Jun 2013) $
// UPDATED BY: $Author: esaxe $
#ifndef _AGGREGATE_STREAM_TRANSFORMER_HPP_
#define _AGGREGATE_STREAM_TRANSFORMER_HPP_
#include "MVException.hpp"
#include "ITransformFunction.hpp"
#include <boost/shared_ptr.hpp>
#include <istream>
#include <map>
#include <string>
MV_MAKEEXCEPTIONCLASS( AggregateStreamTransformerException, MVException );
class AggregateStreamTransformer : public ITransformFunction
{
public:
AggregateStreamTransformer();
virtual ~AggregateStreamTransformer();
virtual boost::shared_ptr<std::istream> TransformInput( boost::shared_ptr< std::istream > i_pInput, const std::map< std::string, std::string >& i_rParameters );
};
#endif //_AGGREGATE_STREAM_TRANSFORMER_HPP_
|
{-# OPTIONS_GHC -XTemplateHaskell #-}
module Euler.Problem011Test (suite) where
import Numeric.LinearAlgebra.Data (Matrix, loadMatrix)
import Test.Tasty (TestTree)
import Test.Tasty.HUnit
import Test.Tasty.TH (testGroupGenerator)
import Euler.Problem011
suite :: TestTree
suite = $(testGroupGenerator)
matrix :: IO (Matrix Double)
matrix = loadMatrix "input/011.txt"
case_count_of_vectors_in_input :: Assertion
case_count_of_vectors_in_input =
matrix >>= \m -> 10 @=? (length . vectors . head $ submatrices 4 m)
case_count_of_4x4_submatrices_in_input :: Assertion
case_count_of_4x4_submatrices_in_input =
matrix >>= \m -> 17 * 17 @=? (length $ submatrices 4 m)
|
import Hw7
import Lean.Elab.Print
import Lean.Elab.Command
theorem desiredType : ∀ (α : Type) (A B : α → Prop), (∃ x, A x) → ∃ y, A y ∨ B y := sorry
open Lean
open Lean.Meta
open Lean.Elab.Command
def collectAxiomsOf (constName : Name) : MetaM (List String) := do
let env ← getEnv
let (_, s) := ((CollectAxioms.collect constName).run env).run {}
let a := s.axioms.toList.map toString
return a
#eval isDefEq (Expr.const ``desiredType []) (Expr.const ``problem2 [])
#eval collectAxiomsOf ``problem2
|
#define BOOST_TEST_MODULE example
#include <boost/test/included/unit_test.hpp>
struct F {
F() : i( 0 ) { BOOST_TEST_MESSAGE( "setup fixture" ); }
~F() { BOOST_TEST_MESSAGE( "teardown fixture" ); }
int i;
};
BOOST_FIXTURE_TEST_CASE( test_case1, F )
{
BOOST_CHECK( i == 1 );
++i;
}
BOOST_FIXTURE_TEST_CASE( test_case2, F )
{
BOOST_CHECK_EQUAL( i, 1 );
}
BOOST_AUTO_TEST_CASE( test_case3 )
{
BOOST_CHECK( true );
}
|
open import Numeral.Natural
open import Relator.Equals
open import Type.Properties.Decidable
open import Type
module Formalization.ClassicalPredicateLogic.Syntax.Substitution
{ℓₚ ℓᵥ ℓₒ}
(Prop : ℕ → Type{ℓₚ})
(Var : Type{ℓᵥ}) ⦃ var-eq-dec : Decidable(2)(_≡_ {T = Var}) ⦄
(Obj : ℕ → Type{ℓₒ})
where
open import Data.Boolean
open import Data.ListSized
import Data.ListSized.Functions as List
open import Formalization.ClassicalPredicateLogic.Syntax(Prop)(Var)(Obj)
private variable n : ℕ
substituteTerm : Var → Term → Term → Term
substituteTerm₊ : Var → Term → List(Term)(n) → List(Term)(n)
substituteTerm v t (var x) = if(decide(2)(_≡_) v x) then t else (var x)
substituteTerm v t (func f x) = func f (substituteTerm₊ v t x)
substituteTerm₊ {0} v t ∅ = ∅
substituteTerm₊ {𝐒(n)} v t (x ⊰ xs) = (substituteTerm v t x ⊰ substituteTerm₊ {n} v t xs)
substitute : Var → Term → Formula → Formula
substitute v t (f $ x) = f $ List.map (substituteTerm v t) x
substitute v t ⊤ = ⊤
substitute v t ⊥ = ⊥
substitute v t (φ ∧ ψ) = (substitute v t φ) ∧ (substitute v t ψ)
substitute v t (φ ∨ ψ) = (substitute v t φ) ∨ (substitute v t ψ)
substitute v t (φ ⟶ ψ) = (substitute v t φ) ⟶ (substitute v t ψ)
substitute v t (Ɐ(x) φ) = Ɐ(x) (if(decide(2)(_≡_) v x) then φ else (substitute v t φ))
substitute v t (∃(x) φ) = ∃(x) (if(decide(2)(_≡_) v x) then φ else (substitute v t φ))
|
Ireland has won more medals in boxing than in any other Olympic sport . Boxing is governed by the Irish Amateur Boxing Association . Michael Carruth won a gold medal and Wayne McCullough won a silver medal in the Barcelona Olympic Games and in 2008 Kenneth Egan won a silver medal in the Beijing Games . Paddy Barnes secured bronze in those games and gold in the 2010 European Amateur Boxing Championships ( where Ireland came 2nd in the overall medal table ) and 2010 Commonwealth Games . Katie Taylor has won gold in every European and World championship since 2005 . In August 2012 at the Olympic Games in London Katie Taylor created history by becoming the first Irish woman to win a gold medal in boxing in the 60 kg lightweight .
|
lemma norm_of_int [simp]: "norm (of_int z::'a::real_normed_algebra_1) = \<bar>of_int z\<bar>" |
using DataFrames, CSV, Statistics, StatsBase, XLSX
df = DataFrame(XLSX.readtable("./input/Risks Data - 1802.xlsx", 2)...)
select!(df, Symbol.(["Risk Factor Pathway", "Sector", "Model", "Scenario", "Geography", "Units", 2020.0, 2040.0, 2060.0, 2080.0, 2100.0]))
df = stack(df, 7:size(df)[2])
rename!(df, [:year, :value, :category, :variable, :model, :scenario, :region, :unit])
df = df[.|(df.category .== "Revenue"), :]
df = df[.|(df.variable .== "Gas", df.variable .== "Coal", df.variable .== "Oil"), :]
df = df[.|(df.model .== "REMIND-MAgPIE 1.7-3.0"), :]
# df = df[.|(df.scenario .== "PEP_NoPolicy", df.scenario .== "PEP_1p5C_red_eff", df.scenario .== "PEP_2C_red_eff"), :]
# df = unstack(df, :year, :value)
df.value = df.value / 1000000
CSV.write("../src/assets/data/oil-risks.csv", df)
# df2 = DataFrame(XLSX.readtable("./input/PepData_FinanceLearnModule.xlsx", 1)...)
# df2 = unstack(df2, :period, :value)
# df2.category = map(v -> v[1], split.(df2.variable, "|"))
# df2.var = map(v -> v[size(v)[1]], split.(df2.variable, "|"))
#
# select!(df2, Symbol.(["model", "scenario", "region", "unit", "var", "category", 2020, 2040, 2060, 2080, 2100]))
#
# df2 = stack(df2, 7:size(df2)[2])
# rename!(df2, [:year, :value, :model, :scenario, :region, :unit, :variable, :category])
#
# df2 = df2[.|(df2.category .== "Primary Energy", df2.category .== "Price"), :]
# df2 = df2[.|(df2.variable .== "Gas", df2.variable .== "Coal", df2.variable .== "Oil"), :]
#
# df2 = unstack(df2, :year, :value)
# df1 = unstack(df, :year, :value)
#
# combo = vcat(df1, df2)
# sort!(combo, [order(:scenario), order(:variable), order(:category)])
#
# CSV.write("../src/assets/data/oil-risks-unstacked.csv", combo)
# CSV.write("../src/assets/data/oil-risks-vars.csv", DataFrame(unique(df2.variable)))
#
# # DataFrame(unique(df2.variable))
# # df2 = unstack(df2, :year, :value)
#
# price = df2[df2.variable .== "Price|Primary Energy|Gas", :]
# CSV.write("../src/assets/data/oil-risks-price.csv", price)
# # price.unit
|
[STATEMENT]
lemma length_row_mat_to_cols_list [simp]:
assumes "i < dim_row A"
shows "length (row (mat_to_cols_list A) i) = dim_col A"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. length (Matrix_Legacy.row (mat_to_cols_list A) i) = dim_col A
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
i < dim_row A
goal (1 subgoal):
1. length (Matrix_Legacy.row (mat_to_cols_list A) i) = dim_col A
[PROOF STEP]
by (simp add: row_def) |
lemma holomorphic_on_add [holomorphic_intros]: "\<lbrakk>f holomorphic_on s; g holomorphic_on s\<rbrakk> \<Longrightarrow> (\<lambda>z. f z + g z) holomorphic_on s" |
# -*- coding: utf-8 -*-
from __future__ import print_function
import torch
from torch import nn
import numpy as np
from .line_ocr_engine import BaseEngineLineOCR
# scores_probs should be N,C,T, blank is last class
def greedy_decode_ctc(scores_probs, chars):
best = torch.argmax(scores_probs, 1) + 1
mask = best[:, :-1] == best[:, 1:]
best = best[:, 1:]
best[mask] = 0
best[best == scores_probs.shape[1]] = 0
best = best.cpu().numpy() - 1
outputs = []
for line in best:
line = line[np.nonzero(line >= 0)]
outputs.append(''.join([chars[c] for c in line]))
return outputs
class PytorchEngineLineOCR(BaseEngineLineOCR):
def __init__(self, json_def, gpu_id=0, batch_size=8):
super(PytorchEngineLineOCR, self).__init__(json_def, gpu_id=0, batch_size=8)
self.net_subsampling = 4
self.characters = list(self.characters) + ['|']
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
net = PYTORCH_NETS[self.net_name]
self.model = net[0](num_classes=len(self.characters), in_height=self.line_px_height, **net[1])
self.model.load_state_dict(torch.load(self.checkpoint, map_location=self.device))
self.model = self.model.to(self.device)
self.model = self.model.eval()
def run_ocr(self, batch_data):
with torch.no_grad():
batch_data = torch.from_numpy(batch_data).to(self.device).float() / 255.0
logits = self.model(batch_data)
decoded = greedy_decode_ctc(logits, self.characters)
logits = logits.permute(0, 2, 1).cpu().numpy()
return decoded, logits
def create_vgg_block_2d(in_channels, out_channels, stride=(2,2), layer_count=2, norm='bn'):
layers = []
for i in range(layer_count):
if norm == 'bn':
layers += [
nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1),
torch.nn.BatchNorm2d(out_channels),
torch.nn.LeakyReLU(),
]
elif norm == 'none':
layers += [
nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1),
torch.nn.LeakyReLU(),
]
else:
print(f'ERROR: Normalization "f{norm}" is not implemented')
raise "Unknown norm"
in_channels = out_channels
layers += [nn.MaxPool2d(kernel_size=stride, stride=stride)]
return nn.Sequential(*layers)
def create_vgg_block_1d(in_channels, out_channels, stride=(2,2), layer_count=2, norm='bn'):
layers = []
for i in range(layer_count):
if norm == 'bn':
layers += [
nn.Conv1d(in_channels, out_channels, kernel_size=3, stride=1, padding=1),
torch.nn.BatchNorm1d(out_channels),
torch.nn.LeakyReLU(),
]
elif norm == 'none':
layers += [
nn.Conv1d(in_channels, out_channels, kernel_size=3, stride=1, padding=1),
torch.nn.LeakyReLU(),
]
else:
print(f'ERROR: Normalization "f{norm}" is not implemented')
raise "Unknown norm"
in_channels = out_channels
return nn.Sequential(*layers)
class NET_VGG(nn.Module):
def __init__(self, num_classes, in_height=32, base_channels=16, conv_blocks=4, subsampling=4, in_channels=3, layers_2d=None):
super(NET_VGG, self).__init__()
if layers_2d is None:
layers_2d = 16
if type(layers_2d) is int:
import torchvision
vgg = torchvision.models.vgg16(pretrained=True)
layers_2d = list(vgg.features[:layers_2d])
start_level = 0
self.blocks_2d = []
actual_subsampling_h = 1
actual_subsampling_v = 1
for layer in layers_2d:
if type(layer) == torch.nn.modules.pooling.MaxPool2d:
if actual_subsampling_h < subsampling:
stride = (2, 2)
else:
stride = (2, 1)
self.blocks_2d += [nn.MaxPool2d(kernel_size=stride, stride=stride)]
actual_subsampling_h *= stride[1]
actual_subsampling_v *= stride[0]
start_level += 1
else:
self.blocks_2d.append(layer)
if type(layer) == torch.nn.modules.conv.Conv2d:
in_channels = layer.bias.shape[0]
out_channels = in_channels
for i in range(start_level, conv_blocks):
out_channels = base_channels*(2**i)
if actual_subsampling_h < subsampling:
stride=(2, 2)
else:
stride=(2, 1)
actual_subsampling_h *= stride[1]
actual_subsampling_v *= stride[0]
self.blocks_2d += [
create_vgg_block_2d(in_channels, out_channels, stride=stride, norm='none'),
torch.nn.BatchNorm2d(out_channels),
]
in_channels = out_channels
self.blocks_2d = nn.Sequential(*self.blocks_2d)
self.block_1d = create_vgg_block_1d(in_channels , out_channels)
self.gru = torch.nn.LSTM(out_channels, out_channels // 2, num_layers=2, bidirectional=True)
self.output_layer = nn.Conv1d(out_channels, num_classes, kernel_size=3, stride=1, padding=1)
def forward(self, x):
x = x.permute(0, 3, 1, 2)
out = self.blocks_2d(x)
out = torch.mean(out, 2)
out = self.block_1d(out)
out, _ = self.gru(out.permute(2, 0, 1))
out = out.permute(1, 2, 0)
out = self.output_layer(out)
return out
class VGG_conv_module(nn.Module):
def __init__(self, base_channels=16, conv_blocks=4, subsampling=4, in_channels=3, layers_2d=None):
super(VGG_conv_module, self).__init__()
if layers_2d is None:
layers_2d = 16
if type(layers_2d) is int:
import torchvision
vgg = torchvision.models.vgg16(pretrained=True)
layers_2d = list(vgg.features[:layers_2d])
start_level = 0
self.blocks_2d = []
actual_subsampling_h = 1
actual_subsampling_v = 1
for layer in layers_2d:
if type(layer) == torch.nn.modules.pooling.MaxPool2d:
if actual_subsampling_h < subsampling:
stride = (2, 2)
else:
stride = (2, 1)
self.blocks_2d += [nn.MaxPool2d(kernel_size=stride, stride=stride)]
actual_subsampling_h *= stride[1]
actual_subsampling_v *= stride[0]
start_level += 1
else:
self.blocks_2d.append(layer)
if type(layer) == torch.nn.modules.conv.Conv2d:
in_channels = layer.bias.shape[0]
print('Pretrained layers')
print(self.blocks_2d)
out_channels = in_channels
for i in range(start_level, conv_blocks):
out_channels = base_channels*(2**i)
if actual_subsampling_h < subsampling:
stride = (2, 2)
else:
stride = (2, 1)
actual_subsampling_h *= stride[1]
actual_subsampling_v *= stride[0]
self.blocks_2d += [
create_vgg_block_2d(in_channels, out_channels, stride=stride, norm='none'),
torch.nn.BatchNorm2d(out_channels),
]
in_channels = out_channels
self.blocks_2d = nn.Sequential(*self.blocks_2d)
self.out_channels = out_channels
def forward(self, x):
return self.blocks_2d(x.contiguous())
class MultiscaleRecurrentBlock(nn.Module):
def __init__(self, channels, layers_per_scale=2, scales=4):
super(MultiscaleRecurrentBlock, self).__init__()
self.layers = nn.ModuleList([torch.nn.LSTM(channels, channels // 2, num_layers=layers_per_scale, bidirectional=True)
for scale in range(scales)])
self.final_layer = torch.nn.LSTM(channels, channels // 2, num_layers=1, bidirectional=True)
def forward(self, x):
outputs = []
for depth, layer in enumerate(self.layers):
if depth == 0:
scaled_data = x
else:
scaled_data = torch.nn.functional.max_pool1d(scaled_data, kernel_size=2, stride=2)
out, _ = layer(scaled_data.permute(2, 0, 1))
out = out.permute(1, 2, 0)
if depth != 0:
out = torch.nn.functional.interpolate(out, scale_factor=2**depth, mode='nearest')
outputs.append(out)
out = outputs[0]
for output in outputs[1:]:
out = out + output
out, _ = self.final_layer(out.permute(2, 0, 1))
return out.permute(1, 2, 0)
class NET_VGG_LSTM(nn.Module):
def __init__(self, num_classes, in_height=32, in_channels=3, dropout_rate=0.0, base_channels=16, conv_blocks=4,
subsampling=4, layers_2d=None):
super(NET_VGG_LSTM, self).__init__()
self.output_subsampling = subsampling
self.blocks_2d = VGG_conv_module(base_channels=base_channels, conv_blocks=conv_blocks, subsampling=subsampling,
in_channels=in_channels, layers_2d=layers_2d)
rnn_channels = self.blocks_2d.out_channels
self.recurrent_block = MultiscaleRecurrentBlock(rnn_channels, layers_per_scale=2, scales=3)
self.output_layer = nn.Conv1d(rnn_channels, num_classes, kernel_size=3, stride=1, padding=1)
def forward(self, x):
x = x.permute(0, 3, 1, 2)
out = self.blocks_2d(x)
out, _ = torch.max(out, 2)
out = self.recurrent_block(out)
out = self.output_layer(out)
return out
PYTORCH_NETS = {
"VGG_B32_L16_S4_CB4": (NET_VGG, {'in_channels': 3, 'base_channels': 32, 'conv_blocks': 4, 'subsampling': 4, 'layers_2d': 16}),
"VGG_LSTM_B64_L17_S4_CB4": (NET_VGG_LSTM, {'in_channels': 3, 'base_channels': 64, 'conv_blocks': 4, 'subsampling': 4, 'layers_2d': 17})
}
|
#ifndef BIO_GSL_H_
#define BIO_GSL_H_
#include "bio/defs.h"
#include "bio/raii.h"
#include <gsl/gsl_integration.h>
BIO_NS_START
void
gsl_init();
template <>
RAII<gsl_integration_workspace *>::~RAII();
#define BIO_GSL_EXP(x) ((x) < GSL_LOG_DBL_MIN ? 0.0 : gsl_sf_exp(x))
BIO_NS_END
#endif //BIO_GSL_H_
|
module IdrisJvm.Data.IORef
import IdrisJvm.IO
export
data IORef a = MkIORef a
public export
interface HasReference (ffi : FFI) where
newIORef' : a -> IO' ffi (IORef a)
readIORef' : IORef a -> IO' ffi a
writeIORef' : IORef a -> a -> IO' ffi ()
export
modifyIORef': HasReference ffi => IORef a -> (a -> a) -> IO' ffi ()
modifyIORef' ref fn =
do
val <- readIORef' ref
writeIORef' ref (fn val)
export
Ref : Type
Ref = JVM_Native (Class ("io/github/mmhelloworld/idrisjvm/runtime/Ref"))
export
implementation HasReference FFI_JVM where
newIORef' val = (MkIORef . believe_me) <$> FFI.new (Object -> JVM_IO Ref) (believe_me val)
readIORef' (MkIORef ref) = believe_me <$> invokeInstance "getValue" (Ref -> JVM_IO Object) (believe_me ref)
writeIORef' (MkIORef ref) val = invokeInstance "setValue" (Ref -> Object -> JVM_IO ()) (believe_me ref) (believe_me val)
||| Build a new IORef
export
newIORef : a -> JVM_IO (IORef a)
newIORef = newIORef'
||| read the value of an IORef
export
readIORef : IORef a -> JVM_IO a
readIORef = readIORef'
||| write the value of an IORef
export
writeIORef : IORef a -> a -> JVM_IO ()
writeIORef = writeIORef'
||| mutate the contents of an IORef
export
modifyIORef : IORef a -> (a -> a) -> JVM_IO ()
modifyIORef = modifyIORef'
|
record T where
constructor mkT
|
paintings and objects will be shown.
Fine arts are an important part of creative and culture economy.
to familiarize people with their ideas and critical works.
against evil spirits, especially the evil influences of household spirits -Kikimora.
We as freelancer artists still need protection against Kikimoras.
Thus, the name has been chosen.
The charms shall protect the room and the exhibition.
The works will be displayed until October 10th, 2015.
" 5 vor 12 "
5 vor 12 - 7.00 P.M.
as well as art from Nepal.
The complete income will benefit the victims of the Nepal earthquake. |
lemma supp_sum_empty[simp]: "supp_sum f {} = 0" |
theory prop_18
imports Main
"$HIPSTER_HOME/IsaHipster"
begin
datatype Nat = Z | S "Nat"
fun plus :: "Nat => Nat => Nat" where
"plus (Z) y = y"
| "plus (S z) y = S (plus z y)"
fun lt :: "Nat => Nat => bool" where
"lt x (Z) = False"
| "lt (Z) (S z) = True"
| "lt (S x2) (S z) = lt x2 z"
(*hipster plus lt *)
theorem x0 :
"lt i (S (plus i m))"
by (tactic \<open>Subgoal.FOCUS_PARAMS (K (Tactic_Data.hard_tac @{context})) @{context} 1\<close>)
end
|
module RandomAccessList.Zeroless where
open import RandomAccessList.Zeroless.Core
open import RandomAccessList.Zeroless.Core.Properties
open import BuildingBlock.BinaryLeafTree using (BinaryLeafTree; Node; Leaf; split)
import BuildingBlock.BinaryLeafTree as BLT
open import Data.Fin using (Fin; fromℕ; fromℕ≤; reduce≥; toℕ)
import Data.Fin as Fin
open import Data.Nat
open import Data.Nat.DivMod
-- open import Data.Nat.Properties.Simple
open import Data.Nat.Etc
open import Data.Product
open import Function
open import Relation.Nullary using (yes; no)
open import Relation.Nullary.Negation using (contradiction; contraposition)
open import Relation.Binary.PropositionalEquality as PropEq
using (_≡_; _≢_; refl; cong; trans; sym; inspect)
open PropEq.≡-Reasoning
--------------------------------------------------------------------------------
-- Operations
--------------------------------------------------------------------------------
-- cons
consₙ : ∀ {n A} → BinaryLeafTree A n → 1-2-RAL A n → 1-2-RAL A n
consₙ a ( []) = a 1∷ []
consₙ a (x 1∷ xs) = a , x 2∷ xs
consₙ a (x , y 2∷ xs) = a 1∷ consₙ (Node x y) xs
cons : ∀ {A} → A → 1-2-RAL A 0 → 1-2-RAL A 0
cons a xs = consₙ (Leaf a) xs
-- head
headₙ : ∀ {n A} → (xs : 1-2-RAL A n) → ⟦ xs ⟧ ≢ 0 → BinaryLeafTree A n
headₙ {n} {A} [] p = contradiction (⟦[]⟧≡0 ([] {A} {n}) refl) p
headₙ (x 1∷ xs) p = x
headₙ (x , y 2∷ xs) p = x
head : ∀ {A} → (xs : 1-2-RAL A 0) → ⟦ xs ⟧ ≢ 0 → A
head xs p = BLT.head (headₙ xs p)
-- tail
tailₙ : ∀ {n A} → (xs : 1-2-RAL A n) → ⟦ xs ⟧ ≢ 0 → 1-2-RAL A n
tailₙ [] p = []
tailₙ (x 1∷ xs) p with ⟦ xs ⟧ ≟ 0
tailₙ (x 1∷ xs) p | yes q = []
tailₙ (x 1∷ xs) p | no ¬q =
let y₀ = proj₁ (split (headₙ xs ¬q))
y₁ = proj₂ (split (headₙ xs ¬q))
in y₀ , y₁ 2∷ tailₙ xs ¬q
tailₙ (x , y 2∷ xs) p = y 1∷ xs
tail : ∀ {A} → (xs : 1-2-RAL A 0) → ⟦ xs ⟧ ≢ 0 → 1-2-RAL A 0
tail = tailₙ
--------------------------------------------------------------------------------
-- Searching
--------------------------------------------------------------------------------
{-
data Occurrence : ℕ → Set where
here : ∀ {n b} → ℕ → Fin (b * 2 ^ n) → Occurrence n
there : ∀ {n} → Occurrence n
transportFin : ∀ {a b} → a ≡ b → Fin a → Fin b
transportFin refl i = i
search : ∀ {n A} → (xs : 1-2-RAL A n) → ℕ → Occurrence n
search [] i = there
search {n} (x 1∷ xs) i with (1 * 2 ^ n) ≤? i
search (x 1∷ xs) i | yes p = there
search {n} (x 1∷ xs) i | no ¬p = here 0 (fromℕ≤ (m≰n⇒n<m (1 * 2 ^ n) i ¬p))
search {n} (x , y 2∷ xs) i with (2 * 2 ^ n) ≤? i
search (x , y 2∷ xs) i | yes p = there
search {n} (x , y 2∷ xs) i | no ¬p with i divMod (2 ^ n)
search (x , y 2∷ xs) i | no ¬p | result zero remainder _ = here zero {! !}
search (x , y 2∷ xs) i | no ¬p | result (suc quotient) remainder _ = {! !}
elemAt : ∀ {n A} → (xs : 1-2-RAL A n) → Fin ⟦ xs ⟧ → A
elemAt {n} {A} [] i = contradiction (transportFin (⟦[]⟧≡0 ([] {A} {n}) refl) i) (λ ())
elemAt (x 1∷ xs) i = {! !}
elemAt (x , y 2∷ xs) i = {! !}
splitIndex1∷ : ∀ {n A} → (x : BinaryLeafTree A n) → (xs : 1-2-RAL A (suc n)) → ⟦ x 1∷ xs ⟧ ≡ (2 ^ n) + ⟦ xs ⟧
splitIndex1∷ {n} x xs =
begin
⟦ x 1∷ xs ⟧
≡⟨ +-*-suc (2 ^ n) (2 * ⟦ xs ⟧ₙ) ⟩
2 ^ n + 2 ^ n * (2 * ⟦ xs ⟧ₙ)
≡⟨ cong (_+_ (2 ^ n)) (sym (*-assoc (2 ^ n) 2 ⟦ xs ⟧ₙ)) ⟩
2 ^ n + 2 ^ n * 2 * ⟦ xs ⟧ₙ
≡⟨ cong (λ w → 2 ^ n + w * ⟦ xs ⟧ₙ) (*-comm (2 ^ n) 2) ⟩
(2 ^ n) + ⟦ xs ⟧
∎
n+n≡2*n : (n : ℕ) → n + n ≡ 2 * n
n+n≡2*n n = cong (_+_ n) (sym (+-right-identity n))
splitIndex2∷ : ∀ {n A}
→ (x : BinaryLeafTree A n)
→ (y : BinaryLeafTree A n)
→ (xs : 1-2-RAL A (suc n)) → ⟦ x , y 2∷ xs ⟧ ≡ 2 * (2 ^ n) + ⟦ xs ⟧
splitIndex2∷ {n} x y xs =
begin
⟦ x , y 2∷ xs ⟧
≡⟨ +-*-suc (2 ^ n) (suc (2 * ⟦ xs ⟧ₙ)) ⟩
2 ^ n + 2 ^ n * suc (2 * ⟦ xs ⟧ₙ)
≡⟨ cong (_+_ (2 ^ n)) (+-*-suc (2 ^ n) (2 * ⟦ xs ⟧ₙ)) ⟩
2 ^ n + (2 ^ n + 2 ^ n * (2 * ⟦ xs ⟧ₙ))
≡⟨ sym (+-assoc (2 ^ n) (2 ^ n) (2 ^ n * (2 * ⟦ xs ⟧ₙ))) ⟩
2 ^ n + 2 ^ n + 2 ^ n * (2 * ⟦ xs ⟧ₙ)
≡⟨ cong (λ w → 2 ^ n + 2 ^ n + w) (sym (*-assoc (2 ^ n) 2 ⟦ xs ⟧ₙ)) ⟩
2 ^ n + 2 ^ n + 2 ^ n * 2 * ⟦ xs ⟧ₙ
≡⟨ cong (λ w → 2 ^ n + 2 ^ n + w * ⟦ xs ⟧ₙ) (*-comm (2 ^ n) 2) ⟩
2 ^ n + 2 ^ n + 2 * 2 ^ n * ⟦ xs ⟧ₙ
≡⟨ cong (λ w → w + 2 * 2 ^ n * ⟦ xs ⟧ₙ) (n+n≡2*n (2 ^ n)) ⟩
2 * 2 ^ n + ⟦ xs ⟧
∎
{-
elemAt : ∀ {n A} → (xs : 1-2-RAL A n) → Fin ⟦ xs ⟧ → A
elemAt ( []) ()
elemAt {n} (x 1∷ xs) i with (2 ^ n) ≤? toℕ i
elemAt {n} (x 1∷ xs) i | yes p rewrite splitIndex1∷ x xs = elemAt xs (reduce≥ i p)
elemAt (x 1∷ xs) i | no ¬p = BLT.elemAt x (fromℕ≤ (BLT.¬a≤b⇒b<a ¬p))
elemAt {n} (x , y 2∷ xs) i with (2 * (2 ^ n)) ≤? toℕ i
elemAt (x , y 2∷ xs) i | yes p rewrite splitIndex2∷ x y xs = elemAt xs (reduce≥ i p)
elemAt {n} (x , y 2∷ xs) i | no ¬p with (2 ^ n) ≤? toℕ i
elemAt (x , y 2∷ xs) i | no ¬p | yes q rewrite splitIndex2∷ x y xs = BLT.elemAt y {! !} -- y
elemAt (x , y 2∷ xs) i | no ¬p | no ¬q = BLT.elemAt x (fromℕ≤ (BLT.¬a≤b⇒b<a ¬q)) -- x
-}
-- reduce≥ : ∀ {m n} (i : Fin (m N+ n)) (i≥m : toℕ i N≥ m) → Fin n
-- i : 2 * 2 ^ n + (2 * 2 ^ n) * ⟦ xs ⟧ₙ
-- m : 2 ^ n
-}
|
1-9. The gakkul vat, the gakkul vat! The gakkul vat, the lamsare vat! The gakkul vat, which puts us in a happy mood! The lamsare vat, which makes the heart rejoice! The ugurbal jar, glory of the house! The šaggub jar, filled with beer! The amam jar, which carries the beer from the lamsare vat! The troughs made with bur grass and the pails for kneading the dough! All the beautiful vessels are ready on their pot stands!
10-20. May the heart of your god be well disposed towards you! Let the eye of the gakkul vat be our eye, and let the heart of the gakkul vat be our heart! What makes your heart feel wonderful in itself also makes our hearts feel wonderful in themselves! We are in a happy mood, our hearts are joyful! You have poured a libation over the fated brick, and you have laid the foundations in peace and prosperity -- now may Ninkasi dwell with you! She should pour beer and wine for you! Let the pouring of the sweet liquor resound pleasantly for you!
21-31. In the troughs made with bur grass, there is sweet beer. I will have the cupbearers, the boys and the brewers stand by. As I spin around the lake of beer, while feeling wonderful, feeling wonderful, while drinking beer, in a blissful mood, while drinking alcohol and feeling exhilarated, with joy in the heart and a contented liver -- my heart is a heart filled with joy! I clothe my contented liver in a garment fit for a queen! The heart of Inana is happy once again; the heart of Inana is happy once again!
32. A …… to Ninkasi. |
import LMT
variable {I} [Nonempty I] {E} [Nonempty E] [Nonempty (A I E)]
example {a1 a2 a3 : A I E} :
((((a1).write i3 (v1)).write i2 (v1)).read i3) ≠ (v1) → False := by
arr
|
"""
Bridges `CP.Conjunction` to reification.
"""
struct Conjunction2ReificationBridge{T} <: MOIBC.AbstractBridge
var::MOI.VariableIndex
var_bin::MOI.ConstraintIndex{MOI.VariableIndex, MOI.ZeroOne}
cons_reif::Vector{MOI.ConstraintIndex}
# Ideally, Vector{MOI.ConstraintIndex{MOI.VectorAffineFunction{T}, CP.Reification{<: MOI.AbstractSet}}},
# but Julia has no notion of type erasure.
con_conjunction::MOI.ConstraintIndex{MOI.VariableIndex, MOI.EqualTo{T}}
end
function MOIBC.bridge_constraint(
::Type{Conjunction2ReificationBridge{T}},
model,
f::MOI.VectorOfVariables,
s::CP.Conjunction{S},
) where {T, S}
return MOIBC.bridge_constraint(
Conjunction2ReificationBridge{T},
model,
MOI.VectorAffineFunction{T}(f),
s,
)
end
function MOIBC.bridge_constraint(
::Type{Conjunction2ReificationBridge{T}},
model,
f::MOI.VectorAffineFunction{T},
s::CP.Conjunction{S},
) where {T, S}
var, var_bin = MOI.add_constrained_variable(model, MOI.ZeroOne())
f_scalars = MOIU.scalarize(f)
cons_reif = Vector{MOI.ConstraintIndex}(undef, length(s.constraints))
cur_dim = 1
for i in 1:length(s.constraints)
cons_reif[i] = MOI.add_constraint(
model,
MOIU.vectorize(
[
one(T) * var,
f_scalars[cur_dim : (cur_dim + MOI.dimension(s.constraints[i]) - 1)]...,
]
),
CP.Reification(s.constraints[i])
)
cur_dim += MOI.dimension(s.constraints[i])
end
con_conjunction = MOI.add_constraint(
model,
var,
MOI.EqualTo(one(T))
)
return Conjunction2ReificationBridge(var, var_bin, cons_reif, con_conjunction)
end
function MOI.supports_constraint(
::Type{Conjunction2ReificationBridge{T}},
::Union{Type{MOI.VectorOfVariables}, Type{MOI.VectorAffineFunction{T}}},
::Type{CP.Conjunction{S}},
) where {T, S}
return true
# Ideally, ensure that the underlying solver supports all the needed
# reified constraints:
# return all(MOI.supports_constraint(model, type, CP.Reification{C}) for C in S.parameters)
end
function MOIB.added_constrained_variable_types(::Type{Conjunction2ReificationBridge{T}}) where {T}
return [(MOI.ZeroOne,)]
end
function MOIB.added_constraint_types(::Type{Conjunction2ReificationBridge{T}}) where {T}
return [
(MOI.VectorAffineFunction{T}, CP.Reification), # TODO: how to be more precise?
(MOI.VariableIndex, MOI.EqualTo{T}),
]
end
function MOI.get(::Conjunction2ReificationBridge{T}, ::MOI.NumberOfVariables) where {T}
return 1
end
function MOI.get(
::Conjunction2ReificationBridge{T},
::MOI.NumberOfConstraints{
MOI.VariableIndex, MOI.ZeroOne,
},
) where {T}
return 1
end
function MOI.get(
b::Conjunction2ReificationBridge{T},
::MOI.NumberOfConstraints{
MOI.VectorAffineFunction{T}, CP.Reification,
},
) where {T}
return length(b.cons_reif)
end
function MOI.get(
::Conjunction2ReificationBridge{T},
::MOI.NumberOfConstraints{
MOI.VariableIndex, MOI.EqualTo{T},
},
) where {T}
return 1
end
function MOI.get(
b::Conjunction2ReificationBridge{T},
::MOI.ListOfVariableIndices,
) where {T}
return [b.var]
end
function MOI.get(
b::Conjunction2ReificationBridge{T},
::MOI.ListOfConstraintIndices{
MOI.VariableIndex, MOI.ZeroOne,
},
) where {T}
return [b.var_bin]
end
function MOI.get(
b::Conjunction2ReificationBridge{T},
::MOI.ListOfConstraintIndices{
MOI.VectorAffineFunction{T}, CP.Reification,
},
) where {T}
return copy(b.cons_reif)
end
function MOI.get(
b::Conjunction2ReificationBridge{T},
::MOI.ListOfConstraintIndices{
MOI.VariableIndex, MOI.EqualTo{T},
},
) where {T}
return [b.con_conjunction]
end
|
Section Approx2.
Variable Model : Type.
Definition BoolF : Type := Model -> bool.
Definition unsat (x : BoolF) : Prop :=
forall m, ~ eq_true (x m)
.
(* We approximate an element as:
p - condition under which the element is known.
a - value of the element when known.
b - value of the element when unknown.
*)
Definition approx (T : Type) (x : Model -> T) (p : BoolF) (a : Model -> T) (b : Model -> T) : Prop
:= (forall t, x t = if p t then a t else b t)
.
Theorem approx_useful_sat : forall x xp xa xb m,
approx x xp xa xb ->
eq_true (andb (xp m) (xa m)) ->
eq_true (x m)
.
Theorem approx_useful_unsat : forall x xp xa xb,
approx x xp xa xb ->
unsat
Lemma i_implies_o : forall xi xo xx,
approx xi xo xx ->
forall m, eq_true (xi m) -> eq_true (xo m)
.
Proof.
intros xi xo xx Ha m Hxi.
unfold approx in Ha.
decompose [and] Ha.
specialize H with m.
specialize H0 with m.
exact (H0 (H Hxi)).
Qed.
(* An approximation for something known completely *)
Theorem approx_known : forall x,
approx x x x
.
Proof.
intros x.
unfold approx.
split ; intros ; assumption.
Qed.
(* An approximation for something completely unknown *)
Theorem approx_unknown : forall x,
approx (fun _ => false) (fun _ => true) x
.
Proof.
intros x.
unfold approx.
split.
intros m Habsurd. inversion Habsurd.
split.
Qed.
Theorem approx_not : forall xi xo x,
approx xi xo x ->
approx (fun m => negb (xo m)) (fun m => negb (xi m)) (fun m => negb (x m))
.
Proof.
intros xi xo x Happrox.
unfold approx in *.
decompose [and] Happrox.
unfold not in *.
split.
intros m Ho. specialize H0 with m. destruct (x m).
set (Hno := H0 is_eq_true). destruct (xo m) ; assumption.
simpl. exact is_eq_true.
intros m Hx. specialize H with m. destruct (xi m).
set (Hnx := H is_eq_true). destruct (x m) ; assumption.
exact is_eq_true.
Qed.
Theorem approx_and : forall xi xo xx yi yo yy,
approx xi xo xx ->
approx yi yo yy ->
approx (fun m => andb (xi m) (yi m)) (fun m => andb (xo m) (yo m)) (fun m => andb (xx m) (yy m))
.
Proof.
intros xi xo xx yi yo yy Hax Hay.
unfold approx in *.
split.
intros m His.
decompose [and] Hax.
specialize H with m.
decompose [and] Hay.
specialize H1 with m.
destruct (xx m).
destruct (yy m).
exact (is_eq_true).
destruct (yi m).
exact (H1 is_eq_true).
destruct (xi m).
simpl in His. elim His. exact is_eq_true.
simpl in His. elim His. exact is_eq_true.
simpl.
destruct (xi m).
exact (H is_eq_true).
apply His.
intros m Hxy.
decompose [and] Hax.
specialize H0 with m.
decompose [and] Hay.
specialize H2 with m.
destruct (xo m).
destruct (yo m).
exact (is_eq_true).
apply H2. destruct (yy m).
exact is_eq_true.
destruct (xx m) ; apply Hxy.
destruct (xx m).
exact (H0 is_eq_true).
apply Hxy.
Qed.
(* Approximation of if then else *)
Theorem approx_ite : forall pi po p xi xo x yi yo y,
approx pi po p ->
approx xi xo x ->
approx yi yo y ->
approx (fun m => orb (andb (pi m) (xi m)) (andb (negb (po m)) (yi m)))
(fun m => orb (andb (po m) (xo m)) (andb (negb (pi m)) (yo m)))
(fun m => if p m then x m else y m)
.
Proof.
intros pi po p xi xo x yi yo y Hap Hax Hay.
unfold approx in *.
split.
intros m Htr.
decompose [and] Hap.
decompose [and] Hax.
decompose [and] Hay.
specialize H with m.
specialize H0 with m.
specialize H1 with m.
specialize H3 with m.
destruct (pi m), (po m), (p m), (xi m), (xo m), (x m), (yi m), (yo m), (y m) ; auto.
intros m Htr.
decompose [and] Hap.
decompose [and] Hax.
decompose [and] Hay.
specialize H with m.
specialize H0 with m.
specialize H2 with m.
specialize H4 with m.
destruct (pi m), (po m), (p m), (xi m), (xo m), (x m), (yi m), (yo m), (y m) ; auto.
Qed.
Variable Elem : Type.
Definition ElemF : Type := Model -> Elem.
(* We approximate an assignment-parameterized element x using:
p - a predicate determining when the approximation is exact
a - the approximation.
*)
Definition eapprox (p : BoolF) (a : ElemF) (x : ElemF) : Prop
:= forall m, eq_true (p m) -> a(m) = x(m)
.
Theorem eapprox_known : forall x,
eapprox (fun _ => true) x x
.
Proof.
intros x m Hp. reflexivity.
Qed.
Theorem eapprox_unknown : forall x bot,
eapprox (fun _ => false) bot x
.
Proof.
intros x bot m Hp.
inversion Hp.
Qed.
Theorem approx_unary : forall f xp xa xx,
eapprox xp xa xx ->
approx (fun m => andb (xp m) (f (xa m)))
(fun m => orb (negb (xp m)) (f (xa m)))
(fun m => f (xx m))
.
intros f xp xa xx Hax.
split.
unfold eapprox in Hax.
intro m.
specialize Hax with m.
destruct (xp m).
simpl.
rewrite (Hax is_eq_true).
trivial.
simpl.
intro Hsilly.
inversion Hsilly.
intros m Htr.
unfold eapprox in Hax.
specialize Hax with m.
destruct (xp m).
rewrite (Hax is_eq_true).
trivial.
exact (is_eq_true).
Qed.
Theorem approx_binary : forall f xp xa xx yp ya yy,
eapprox xp xa xx ->
eapprox yp ya yy ->
approx (fun m => andb (xp m) (andb (yp m) (f (xa m) (ya m))))
(fun m => orb (negb (xp m)) (orb (negb (yp m)) (f (xa m) (ya m))))
(fun m => f (xx m) (yy m))
.
Proof.
intros f xp xa xx yp ya yy Hax Hay.
split ; intros m H; unfold eapprox in *;
specialize Hax with m; specialize Hay with m ;
destruct (xp m), (yp m) ; simpl in H.
rewrite <- (Hax is_eq_true).
rewrite <- (Hay is_eq_true).
assumption.
inversion H.
inversion H.
inversion H.
rewrite (Hax is_eq_true).
rewrite (Hay is_eq_true).
assumption.
exact (is_eq_true).
exact (is_eq_true).
exact (is_eq_true).
Qed.
Theorem eapprox_unary : forall f xp xa xx,
eapprox xp xa xx ->
eapprox xp (fun m => f (xa m)) (fun m => f (xx m))
.
Proof.
intros f xp xa xx Hax.
unfold eapprox in *.
intros m Heq.
specialize Hax with m.
rewrite (Hax Heq).
reflexivity.
Qed.
Theorem eapprox_binary : forall f xp xa xx yp ya yy,
eapprox xp xa xx ->
eapprox yp ya yy ->
eapprox (fun m => andb (xp m) (yp m)) (fun m => f (xa m) (ya m)) (fun m => f (xx m) (yy m))
.
Proof.
intros f xp xa xx yp ya yy Hax Hay.
unfold eapprox in *.
intros m Heq.
specialize Hax with m.
specialize Hay with m.
destruct (xp m), (yp m).
rewrite (Hax is_eq_true).
rewrite (Hay is_eq_true).
reflexivity.
inversion Heq.
inversion Heq.
inversion Heq.
Qed.
Theorem eapprox_ite : forall pi po pp xp xa xx yp ya yy,
approx pi po pp ->
eapprox xp xa xx ->
eapprox yp ya yy ->
eapprox (fun m => orb (andb (pi m) (xp m)) (andb (negb (po m)) (yp m)))
(fun m => if pi m then xa m else ya m)
(fun m => if pp m then xx m else yy m)
.
Proof.
intros pi po pp xp xa xx yp ya yy Hap Hax Hay m H.
unfold approx in Hap.
decompose [and] Hap.
unfold eapprox in *.
specialize H0 with m.
specialize H1 with m.
specialize Hax with m.
specialize Hay with m.
destruct (pi m), (pp m), (xp m).
rewrite (Hax is_eq_true) ; reflexivity.
destruct (po m).
inversion H.
rewrite (Hax (H1 is_eq_true)) ; reflexivity.
set (Hsilly := H0 is_eq_true).
inversion Hsilly.
set (Hsilly := H0 is_eq_true) ; inversion Hsilly.
destruct (po m).
inversion H.
set (Hsilly := H1 is_eq_true) ; inversion Hsilly.
destruct (po m).
inversion H.
set (Hsilly := H1 is_eq_true) ; inversion Hsilly.
destruct (yp m).
exact (Hay is_eq_true).
destruct (po m) ; inversion H.
destruct (yp m).
exact (Hay is_eq_true).
destruct (po m) ; inversion H.
Qed.
End Approx2.
|
Set Implicit Arguments.
Require Import FCF.
Require Import PRF.
Require Import RndInList.
Local Open Scope list_scope.
Inductive in_oc_supp : forall (A B C : Set), C -> OracleComp A B C -> Prop :=
| in_oc_supp_Bind :
forall (A B C C' : Set)(c : OracleComp A B C')(f : C' -> OracleComp A B C) x y,
in_oc_supp x c ->
in_oc_supp y (f x) ->
in_oc_supp y (OC_Bind c f)
| in_oc_supp_Query :
forall (A B : Set)(a : A)(b : B),
in_oc_supp b (OC_Query B a)
| in_oc_supp_Ret :
forall (A B C : Set)(c : Comp C) x,
In x (getSupport c) ->
in_oc_supp x (OC_Ret A B c)
| in_oc_supp_Run :
forall (A A' B B' C S : Set)(eqds : EqDec S)(eqda : EqDec A)(eqdb : EqDec B)
(c : OracleComp A B C)(oc : S -> A -> OracleComp A' B' (B * S)) s x s',
in_oc_supp x c ->
in_oc_supp (x, s') (OC_Run _ _ _ c oc s).
Theorem in_oc_supp_complete :
forall (A B C : Set){eqdc : EqDec C}(c : OracleComp A B C) x
(S : Set)(eqds : EqDec S)(o : S -> A -> Comp (B * S))(s : S) s',
In (x, s') (getSupport (c _ _ o s)) ->
in_oc_supp x c .
induction c; intuition; repeat simp_in_support.
econstructor.
Local Opaque getSupport.
simpl in *.
repeat simp_in_support.
destruct x.
simpl.
destruct p.
simpl.
econstructor.
eapply (@IHc _ _ (S * S0)%type).
eapply H1.
simpl in *.
repeat simp_in_support.
econstructor.
intuition.
assert (EqDec C).
eapply oc_EqDec.
eapply c.
intuition.
assert (B * S).
eapply comp_base_exists.
eapply o0.
trivial.
intuition.
intuition.
intuition.
assert (EqDec (B * S)).
eapply comp_EqDec.
eapply o0; trivial.
eapply EqDec_pair_l.
eapply H2.
trivial.
simpl in *.
repeat simp_in_support.
destruct x0.
econstructor.
eapply (@H0 _ S).
eapply H2.
eapply (@H _ _ _ S).
eapply H3.
Grab Existential Variables.
eapply EqDec_pair_l.
eauto.
trivial.
Qed.
Local Open Scope nat_scope.
Inductive queries_at_most' : forall (A B C : Set), OracleComp A B C -> nat -> Prop :=
| qam_Bind :
forall (A B C C' : Set)(eqdc' : EqDec C')(c : OracleComp A B C')(f : C' -> OracleComp A B C) q1 q2,
queries_at_most' c q1 ->
(forall c',
in_oc_supp c' c ->
queries_at_most' (f c') q2) ->
queries_at_most' (OC_Bind c f) (q1 + q2)
| qam_Query :
forall (A B : Set)(a : A),
queries_at_most' (OC_Query B a) 1
| qam_Ret :
forall (A B C : Set)(c : Comp C),
queries_at_most' (OC_Ret A B c) 0
| qam_Run :
forall (A A' B B' C S : Set)(eqds : EqDec S)(eqda : EqDec A)(eqdb : EqDec B)
(c : OracleComp A B C)(oc : S -> A -> OracleComp A' B' (B * S)) s q1 q2,
queries_at_most' c q1 ->
(forall s a, queries_at_most' (oc s a) q2) ->
queries_at_most' (OC_Run _ _ _ c oc s) (q1 * q2)
| qam_le :
forall (A B C : Set)(c : OracleComp A B C) q1 q2,
queries_at_most' c q1 ->
q1 <= q2 ->
queries_at_most' c q2.
Theorem qam_count_gen' :
forall (A B C : Set)(c : OracleComp A B C)(q : nat),
queries_at_most' c q ->
forall (S : Set)(count : S -> nat)(eqds : EqDec S)(o : S -> A -> Comp (B * S))(s : S)(n : nat),
(forall a b x y,
In (a, b) (getSupport (o x y)) ->
count b <= n + (count x)) ->
forall a b,
In (a, b) (getSupport (c _ _ o s)) ->
count b <= q * n + (count s).
Opaque getSupport.
induction 1; intuition; simpl in *.
(* Bind case *)
repeat simp_in_support.
destruct x.
rewrite mult_plus_distr_r.
pose proof H4.
eapply (IHqueries_at_most' _ count) in H4.
eapply (H1 _ _ _ count) in H5.
rewrite H5.
rewrite plus_comm.
rewrite <- (plus_assoc).
rewrite (plus_comm (q2 * n)).
rewrite plus_assoc.
eapply plus_le_compat.
eauto.
eapply le_refl.
intuition.
intuition.
(* query case *)
rewrite plus_0_r.
eapply H; eauto.
(* ret case *)
Transparent getSupport.
repeat simp_in_support.
intuition.
(* run case *)
repeat simp_in_support.
destruct x.
simpl in *.
specialize (IHqueries_at_most' (S * S0)%type (fun p => count (snd p)) _
(fun (x : S * S0) (y : A) =>
p <-$ (oc (fst x) y) S0 eqds0 o (snd x);
ret (fst (fst p), (snd (fst p), snd p)))
(s, s0) (q2 * n)
).
eapply le_trans.
eapply IHqueries_at_most'.
intuition.
simpl.
repeat simp_in_support.
destruct x.
simpl in *.
eapply H1.
eauto.
eauto.
eauto.
simpl.
rewrite mult_assoc.
intuition.
(* trans case *)
eapply le_trans.
eapply IHqueries_at_most'.
eauto.
eauto.
eapply plus_le_compat; intuition.
eapply mult_le_compat; intuition.
Grab Existential Variables.
eapply in_oc_supp_complete; eauto.
Qed.
(*
Definition countOracle{A B State : Set}{eqdb : EqDec B}{eqds : EqDec State}(defB : B)(o : State -> A -> Comp (B * State)) p a :=
[s, n] <-2 p;
match n with
| 0 => ret (defB, p)
| S n' =>
[b', s'] <-$2 o s a;
ret (b', (s', n'))
end.
Theorem countOracle_equiv :
forall (A B C : Set)(defB : B)(eqdb : EqDec B)(eqdc : EqDec C)(c : OracleComp A B C) n,
queries_at_most' c n ->
forall (S : Set)(eqd : EqDec S)(o : S -> A -> Comp (B * S))(s : S),
comp_spec
(fun a b => fst a = fst b /\ snd a = fst (snd b))
(c _ _ o s)
(c _ _ (countOracle defB o) (s, n)).
induction 1; intuition; simpl in *.
admit.
admit.
admit.
comp_skip.
admit.
admit.
assert (EqDec C).
eapply EqDec_pair_l.
eauto.
trivial.
assert (B * S).
eapply oc_base_exists.
eapply oc
specialize (@IHqueries_at_most' eqdb0 H2 defB (S * S0)%type).
eapply H1.
eapply oc_comp_spec_eq.
Qed.
*)
Local Open Scope rat_scope.
Theorem evalDist_bind_event_le :
forall (A : Set)(c : Comp A)(f : A -> Comp bool)(evta : A -> bool) (k1 k2 : Rat),
Pr[a <-$ c; ret (evta a)] <= k1 ->
(forall a, In a (getSupport c) -> evta a = false -> Pr[f a] <= k2) ->
Pr[a <-$ c; f a] <= k1 + k2.
intuition.
simpl in *.
rewrite (sumList_partition evta).
eapply ratAdd_leRat_compat.
assert ( sumList (getSupport c)
(fun a : A => evalDist c a * Pr [f a ] * (if evta a then 1 else 0)) <=
sumList (getSupport c)
(fun b : A =>
evalDist c b * (if EqDec_dec bool_EqDec (evta b) true then 1 else 0)) ).
eapply sumList_le; intuition.
destruct (EqDec_dec bool_EqDec (evta a) true ).
rewrite e.
eapply ratMult_leRat_compat; intuition.
eapply leRat_trans.
eapply ratMult_leRat_compat.
eapply leRat_refl.
eapply evalDist_le_1.
rewrite ratMult_1_r.
intuition.
destruct (evta a); intuition.
repeat rewrite ratMult_0_r.
eapply rat0_le_all.
rewrite H1.
eapply H.
assert ( sumList (getSupport c)
(fun a : A => evalDist c a * Pr [f a ] * (if evta a then 0 else 1)) <=
sumList (getSupport c)
(fun a : A => k2 * (evalDist c a * (if evta a then 0 else 1)))).
eapply sumList_le.
intuition.
case_eq (evta a); intuition.
eapply eqRat_impl_leRat.
repeat rewrite ratMult_0_r.
intuition.
rewrite H0.
eapply eqRat_impl_leRat.
repeat rewrite ratMult_1_r.
eapply ratMult_comm.
trivial.
trivial.
rewrite H1.
rewrite sumList_factor_constant_l.
eapply leRat_trans.
Focus 2.
eapply eqRat_impl_leRat.
eapply ratMult_1_r.
eapply ratMult_leRat_compat; intuition.
assert (sumList (getSupport c)
(fun a : A => evalDist c a * (if evta a then 0 else 1)) <=
sumList (getSupport c)
(fun a : A => evalDist c a)).
eapply sumList_le.
intuition.
eapply leRat_trans.
Focus 2.
eapply eqRat_impl_leRat.
eapply ratMult_1_r.
eapply ratMult_leRat_compat; intuition.
destruct (evta a); intuition.
eapply rat0_le_all.
rewrite H2.
eapply evalDist_sum_le_1.
Qed.
Theorem oc_eventProb :
forall (A B C : Set)(c : OracleComp A B C) n,
queries_at_most' c n ->
forall
(S : Set)(eqds : EqDec S)(o : S -> A -> Comp (B * S))
(count : S -> nat)(evt : S -> bool)(s : S)(k : nat -> Rat) i,
(forall (n1 n2 : nat), (n1 <= n2)%nat -> (k n1 <= k n2)) ->
evt s = false ->
(forall s a, evt s = false -> Pr[p <-$ o s a; ret (evt (snd p))] <= (k (i + (count s))%nat)) ->
(forall s s' a b, In (b, s') (getSupport (o s a)) ->
count s' <= i + (count s))%nat ->
Pr[p <-$ c _ _ o s; ret (evt (snd p))] <= (n / 1) * (k (i * n + (count s))%nat).
Local Opaque evalDist.
induction 1; intuition; simpl in *.
inline_first.
assert (
Pr
[a <-$ c S eqds o s;
p <-$ ([z, s']<-2 a; (f z) S eqds o s'); ret evt (snd p) ] <=
q1 / 1 * k (i * q1 + count s)%nat +
q2 / 1 * k (i * q2 + (i * q1 + count s))%nat
).
eapply evalDist_bind_event_le.
eapply (@IHqueries_at_most' _ _ _ count evt); intuition.
intros.
comp_simp.
eapply leRat_trans.
eapply (@H1 _ _ _ _ _ count evt).
eapply H2.
trivial.
intuition.
intuition.
eapply ratMult_leRat_compat; intuition.
eapply H2.
eapply plus_le_compat; intuition.
Require Import RndInList.
eapply le_trans.
eapply (qam_count_gen' _ _ _ _ _ i).
intuition.
eapply H5.
eauto.
eauto.
rewrite mult_comm.
intuition.
rewrite H6.
clear H6.
eapply leRat_trans.
Focus 2.
eapply eqRat_impl_leRat.
symmetry.
rewrite ratAdd_num.
eapply ratMult_distrib_r.
eapply ratAdd_leRat_compat;
eapply ratMult_leRat_compat; intuition.
eapply H2.
rewrite mult_plus_distr_l.
rewrite plus_comm.
repeat rewrite <- plus_assoc.
eapply plus_le_compat; intuition.
rewrite H1; intuition.
rewrite <- ratMult_1_l.
eapply ratMult_leRat_compat; intuition.
eapply leRat_terms; intuition.
inline_first.
eapply distro_irr_le.
intuition.
comp_simp.
simpl.
rewrite H0.
rewrite evalDist_ret_0.
eapply rat0_le_all.
congruence.
inline_first.
assert (EqDec C).
eapply oc_EqDec; intros.
eapply c.
assert (B * S).
eapply oc_base_exists.
eapply oc.
trivial.
trivial.
intuition.
assert (B' * S0).
eapply comp_base_exists.
eapply o.
trivial.
trivial.
intuition.
intuition.
trivial.
assert
(
(evalDist
(Bind
(c (prod S S0) (pair_EqDec eqds eqds0)
(fun (x : prod S S0) (y : A) =>
Bind ((oc (fst x) y) S0 eqds0 o (snd x))
(fun p : prod (prod B S) S0 =>
Ret (EqDec_dec (pair_EqDec eqdb (pair_EqDec eqds eqds0)))
(pair (fst (fst p)) (pair (snd (fst p)) (snd p)))))
(pair s s0))
(fun a : prod C (prod S S0) =>
Bind
(Ret
(EqDec_dec
(pair_EqDec
(pair_EqDec
(oc_EqDec c
(fun x : A =>
fst
(oc_base_exists (oc s x)
(fun y : A' =>
fst (comp_base_exists (o s0 y)))))
(fun x : A =>
EqDec_pair_l
(oc_EqDec (oc s x)
(fun y : A' =>
fst (comp_base_exists (o s0 y)))
(fun y : A' =>
EqDec_pair_l (comp_EqDec (o s0 y)) s0)) s))
eqds) eqds0))
(pair (pair (fst a) (fst (snd a))) (snd (snd a))))
(fun p : prod (prod C S) S0 =>
Ret (EqDec_dec bool_EqDec) (evt (snd p))))) true)
==
Pr
[a <-$
c (S * S0)%type (pair_EqDec eqds eqds0)
(fun (x : S * S0) (y : A) =>
p <-$ (oc (fst x) y) S0 eqds0 o (snd x);
ret (fst (fst p), (snd (fst p), snd p))) (s, s0);
ret (evt (snd (snd a))) ]
).
comp_skip.
simpl.
intuition.
rewrite H7.
clear H7.
eapply leRat_trans.
eapply (@IHqueries_at_most' _ _
((fun (x : S * S0) (y : A) =>
p <-$ (oc (fst x) y) S0 eqds0 o (snd x);
ret (fst (fst p), (snd (fst p), snd p))))
(fun p => count (snd p)) (fun p => evt (snd p))
_ (fun x => q2 / 1 * k (x)%nat) (i * q2)%nat).
intuition.
eapply ratMult_leRat_compat; intuition.
trivial.
intuition.
comp_inline_l.
assert (
Pr
[a1 <-$ (oc (fst (a, b)) a0) S0 eqds0 o (snd (a, b));
p <-$ ret (fst (fst a1), (snd (fst a1), snd a1)); ret evt (snd (snd p)) ]
==
Pr
[a1 <-$ (oc (fst (a, b)) a0) S0 eqds0 o (snd (a, b));
ret evt (snd a1) ]
).
comp_skip.
simpl.
intuition.
rewrite H8.
clear H8.
simpl.
eapply leRat_trans.
eapply (@H1 ); intuition.
reflexivity.
intuition.
repeat simp_in_support.
destruct x.
simpl in *.
rewrite mult_comm.
eapply qam_count_gen'.
eapply H0.
intuition.
eapply H5.
eapply H7.
eapply H8.
simpl.
rewrite <- ratMult_assoc.
eapply ratMult_leRat_compat; intuition.
eapply eqRat_impl_leRat.
rewrite <- ratMult_num_den.
eapply eqRat_terms; intuition.
eapply H2.
eapply plus_le_compat; intuition.
rewrite <- mult_assoc.
eapply mult_le_compat; intuition.
rewrite mult_comm.
intuition.
rewrite IHqueries_at_most'.
eapply ratMult_leRat_compat; intuition.
eapply leRat_terms; intuition.
intuition.
trivial.
intuition.
intuition.
Grab Existential Variables.
trivial.
eapply in_oc_supp_complete; eauto.
Qed.
Theorem oc_eventProb_0_1 :
forall (S : Set)(count : S -> nat)(evt : S -> bool)(k : nat -> Rat)
(A B C : Set)(c : OracleComp A B C) n,
queries_at_most' c n ->
forall
(eqds : EqDec S)(o : S -> A -> Comp (B * S))
(s : S),
(forall (n1 n2 : nat), (n1 <= n2)%nat -> (k n1 <= k n2)) ->
evt s = false ->
(forall s a, evt s = false -> Pr[p <-$ o s a; ret (evt (snd p))] <= (k (1 + (count s))%nat)) ->
(forall s s' a b, In (b, s') (getSupport (o s a)) ->
count s' <= 1 + (count s))%nat ->
count s = 0%nat ->
Pr[p <-$ c _ _ o s; ret (evt (snd p))] <= (n / 1) * (k n).
intuition.
eapply leRat_trans.
eapply oc_eventProb; intuition.
eauto.
rewrite H4.
rewrite mult_1_l.
rewrite plus_0_r.
intuition.
Qed.
(*
Definition maxCount_o(D R S : Set){eqdr : EqDec R}{eqds : EqDec S}(count : S -> nat)(o : S -> D -> Comp (R * S))
(defR : R)(m : nat)(s : S)(d : D) :=
if (ge_dec (count s) m) then (ret (defR, s)) else
o s d.
Theorem maxCount_o_equiv :
forall (D R C: Set)(A : OracleComp D R C) (n1: nat),
queries_at_most A n1 ->
forall (S : Set)(count : S -> nat)(o : S -> D -> Comp (R * S))(defR : R)(eqdr : EqDec R)(eqds : EqDec S)(RndR : Comp R) s x n2,
(n1 + (count s) <= n2)%nat ->
evalDist (A _ _ o s) x ==
evalDist (A _ _ (maxCount_o count o defR n2) s) x.
Local Opaque evalDist.
induction 1; intuition; simpl in *.
comp_skip.
comp_simp.
eapply H1.
specialize (qam_count_gen H (fun (p : list (A * B)) => length p) _ (randomFunc RndR eqdd) ls); intuition.
eapply le_trans.
eapply plus_le_compat.
eapply le_refl.
eapply H4.
intuition; simpl in *.
unfold randomFunc in *.
destruct (arrayLookup eqdd x y); repeat simp_in_support.
omega.
simpl.
assert ( (S (length x) <= 1+ length x)%nat).
omega.
eapply H5.
eapply H3.
omega.
unfold randomFunc, randomFunc_max.
destruct (arrayLookup eqdd ls a).
eapply evalDist_ret_eq; intuition.
comp_skip.
destruct (ge_dec (length ls) n2).
omega.
eapply evalDist_ret_eq; intuition.
eapply eqRat_refl.
comp_skip.
remember (map
eapply (@IHqueries_at_most _ _ RndR ls.
trivial.
erewrite H4.
Print RndInList.
rewrite <- H2.
eapply plus_le_compat; intuition.
omega.
Qed.
*)
Section RandPermSwitching.
Variable A D R : Set.
Variable RndR : Comp R.
Hypothesis RndR_wf : well_formed_comp RndR.
Variable A2 : OracleComp D R bool.
Hypothesis eqdd : EqDec D.
Hypothesis eqdr : EqDec R.
Variable fBad : list (D * R) -> D -> R -> bool.
Variable badProb : nat -> Rat.
Hypothesis badProb_mono : forall n1 n2, (n1 <= n2)%nat -> badProb n1 <= badProb n2.
Hypothesis A2_wf : well_formed_oc A2.
Variable A2_queries : nat.
Hypothesis A2_queries_correct : queries_at_most' A2 A2_queries.
Hypothesis badProb_correct :
forall d f,
Pr [r <-$ RndR;
ret (fBad f d r)] <= badProb (S (length f)).
Hypothesis goodExists :
forall d f,
(length f <= A2_queries) %nat ->
exists r,
In r (getSupport RndR) /\
fBad f d r = false.
(*
Hypothesis fBad_single :
forall d r,
fBad ((d, r) :: nil) = false.
*)
(*
Hypothesis goodExists :
forall d f,
arrayLookup _ f d = None ->
exists r,
In r (getSupport RndR) /\
fBad f d r = false.
*)
Definition rndNotBad(d : D)(f : list (D * R)):=
Repeat RndR (fun r => negb (fBad f d r)).
Definition GenRP(s : list (D * R))(d : D) : Comp (R * list (D * R)) :=
match arrayLookup _ s d with
| Some r => ret (r, s)
| None => r <-$ (rndNotBad d s); ret (r, (d, r) :: s)
end.
Definition RPS_G0 :=
[b, _] <-$2 A2 _ _ (@randomFunc D R RndR _ ) nil;
ret b.
Definition RPS_G1 :=
[b, _] <-$2 A2 _ _ GenRP nil;
ret b.
Definition randomFunc_bad (s : list (D * R) * bool) d :=
[s, bad] <-2 s;
match (arrayLookup _ s d) with
| Some r => ret (r, (s, bad))
| None =>
r <-$ RndR;
newF <- ((d, r) :: s);
bad' <- fBad s d r;
ret (r, (newF, bad || bad'))
end.
Definition GenRP_bad (s : list (D * R) * bool) d :=
[s, bad] <-2 s;
match (arrayLookup _ s d) with
| Some r => ret (r, (s, bad))
| None =>
r <-$ RndR;
newF <- ((d, r) :: s);
bad' <- fBad s d r;
if (bad') then
(r <-$ rndNotBad d s;
ret (r, ((d, r) :: s, bad || bad')))
else
ret (r, (newF, bad || bad'))
end.
Definition RPS_G_1 :=
[b, s] <-$2 A2 _ _ (randomFunc_bad) (nil, false);
ret (b, snd s).
Definition RPS_G_2 :=
[b, s] <-$2 A2 _ _ (GenRP_bad) (nil, false);
ret (b, snd s).
Theorem randomFunc_bad_wf :
forall a b,
well_formed_comp (randomFunc_bad a b).
intuition.
unfold randomFunc_bad.
destruct (arrayLookup eqdd a0 b0); wftac.
Qed.
Theorem GenRP_bad_wf :
forall a b,
(length (fst a) <= A2_queries)%nat ->
well_formed_comp (GenRP_bad a b).
intuition.
simpl in *.
case_eq (arrayLookup eqdd a0 b0); intuition; wftac.
unfold rndNotBad; wftac.
edestruct goodExists ; eauto.
intuition.
econstructor.
unfold eq_dec; intuition.
eapply (EqDec_dec _).
trivial.
eapply filter_In; intuition.
eauto.
rewrite H4.
trivial.
Qed.
Theorem rf_rp_oc_eq_until_bad :
comp_spec
(fun y1 y2 =>
snd (snd y1) = snd (snd y2) /\
(snd (snd y1) = false -> fst (snd y1) = fst (snd y2) /\ fst y1 = fst y2))
(A2 (list (D * R) * bool)%type _
randomFunc_bad (nil, false))
(A2 (list (D * R) * bool)%type _
GenRP_bad (nil, false)).
eapply comp_spec_consequence.
eapply (@oc_comp_spec_eq_until_bad _ _ _ _ _ _ _ _ _ _ _ _ _
(fun a => snd a)
(fun a => snd a)
(fun a b => (fst a = fst b))).
intuition.
eapply randomFunc_bad_wf.
intuition.
eapply GenRP_bad_wf.
admit.
intuition.
simpl in *.
subst.
case_eq (arrayLookup eqdd a0 a1); intuition.
eapply comp_spec_ret; intuition.
comp_skip.
eapply comp_base_exists; eauto.
eapply comp_base_exists; eauto.
case_eq ( fBad a0 a1 b); intuition.
comp_irr_r.
unfold rndNotBad.
edestruct goodExists.
eauto.
intuition.
econstructor.
unfold eq_dec; intuition.
eapply (EqDec_dec _).
trivial.
eapply filter_In; intuition.
eauto.
rewrite H5.
trivial.
eapply comp_spec_ret.
simpl.
split.
intuition.
intros.
rewrite orb_true_r in H4.
discriminate.
eapply comp_spec_ret; intuition.
intuition.
simpl in *.
case_eq (arrayLookup eqdd a1 d); intuition.
rewrite H1 in H0.
repeat simp_in_support.
trivial.
rewrite H1 in H0.
repeat simp_in_support.
trivial.
intuition.
simpl in *.
case_eq (arrayLookup eqdd a1 d ); intuition;
rewrite H1 in H0;
repeat simp_in_support;
trivial.
trivial.
trivial.
intuition.
Grab Existential Variables.
trivial.
Qed.
Theorem RPS_G_1_2_bad_same :
Pr [x <-$ RPS_G_1; ret snd x ] == Pr [x <-$ RPS_G_2; ret snd x ].
unfold RPS_G_1, RPS_G_2.
inline_first.
eapply comp_spec_impl_eq.
comp_skip.
eapply rf_rp_oc_eq_until_bad.
intuition.
simpl in *.
intuition.
destruct b.
simpl in *.
destruct p; simpl in *.
subst.
comp_simp.
simpl.
eapply comp_spec_ret; intuition.
Qed.
Theorem RPG_G_1_2_eq_until_bad :
forall a : bool,
evalDist RPS_G_1 (a, false) == evalDist RPS_G_2 (a, false).
intuition.
unfold RPS_G_1, RPS_G_2.
eapply comp_spec_impl_eq.
comp_skip.
eapply rf_rp_oc_eq_until_bad.
intuition.
simpl in *.
intuition.
destruct b.
simpl in *.
destruct p; simpl in *.
eapply comp_spec_ret; intuition.
pairInv.
subst.
intuition.
subst.
intuition.
subst.
pairInv.
intuition.
subst.
trivial.
Qed.
Theorem RPS_G_1_bad_small :
Pr [x <-$ RPS_G_1; ret snd x ] <= A2_queries / 1 * badProb A2_queries.
unfold RPS_G_1.
inline_first.
assert (Pr
[a <-$
A2 (list (D * R) * bool)%type _
(randomFunc_bad) (nil, false);
x <-$ ([b, s]<-2 a; ret (b, snd s)); ret snd x ]
==
Pr
[a <-$
A2 (list (D * R) * bool)%type _
(randomFunc_bad) (nil, false);
ret snd (snd a) ]
).
comp_skip.
comp_simp.
reflexivity.
rewrite H.
clear H.
eapply leRat_trans.
eapply (@oc_eventProb_0_1 _ (fun p => length (fst p)) _ badProb).
eauto.
eapply badProb_mono.
trivial.
intuition.
simpl in *.
subst.
case_eq (arrayLookup eqdd a a0); intuition.
comp_simp.
simpl.
rewrite evalDist_ret_0.
eapply rat0_le_all.
congruence.
simpl in *.
eapply leRat_trans.
Focus 2.
eapply badProb_correct.
inline_first.
comp_skip.
simpl.
eapply leRat_refl.
intuition.
simpl in *.
destruct (arrayLookup eqdd a a1);
repeat simp_in_support.
omega.
simpl.
intuition.
trivial.
intuition.
Qed.
Theorem RPS_G_1_2_close :
| Pr[x <-$ RPS_G_1; ret fst x] - Pr[x <-$ RPS_G_2; ret fst x] | <= (A2_queries / 1) * badProb A2_queries.
intuition.
eapply leRat_trans.
eapply fundamental_lemma_h.
(* badness is the same. *)
eapply RPS_G_1_2_bad_same.
eapply RPG_G_1_2_eq_until_bad.
eapply RPS_G_1_bad_small.
Qed.
Theorem RPS_G0_equiv :
Pr[RPS_G0] == Pr[x <-$ RPS_G_1; ret fst x].
unfold RPS_G0, RPS_G_1.
inline_first.
eapply (comp_spec_eq_impl_eq).
comp_skip.
eapply (@oc_comp_spec_eq _ _ _ _ _ _ _ _ _ _ _ _ _ _ (fun a b => a = fst b)); trivial.
intuition.
subst.
unfold randomFunc, randomFunc_bad.
comp_simp.
simpl.
case_eq ( arrayLookup eqdd l a ); intuition.
eapply comp_spec_ret; intuition.
comp_skip.
eapply comp_base_exists; eauto.
eapply comp_base_exists; eauto.
eapply comp_spec_ret; intuition.
comp_simp.
simpl in *; intuition.
subst.
eapply comp_spec_eq_refl.
Qed.
Theorem RPS_G1_equiv :
Pr[x <-$ RPS_G_2; ret fst x] == Pr[RPS_G1].
unfold RPS_G_2, RPS_G1.
inline_first.
eapply comp_spec_eq_impl_eq.
comp_skip.
eapply (@oc_comp_spec_eq _ _ _ _ _ _ _ _ _ _ _ _ _ _ (fun a b => fst a = b)); trivial.
intuition.
unfold GenRP, GenRP_bad.
subst.
simpl.
case_eq (arrayLookup eqdd a0 a); intuition.
eapply comp_spec_ret; intuition.
unfold rndNotBad.
assert (
comp_spec
eq
(r <-$ (r <-$ RndR; if (negb (fBad a0 a r)) then (ret r) else (Repeat RndR (fun r : R => negb (fBad a0 a r))));
ret (r, (a, r) :: a0))
(r <-$ Repeat RndR (fun r : R => negb (fBad a0 a r));
ret (r, (a, r) :: a0))
).
eapply eq_impl_comp_spec_eq.
intuition.
symmetry.
comp_skip.
eapply repeat_unroll_eq.
trivial.
edestruct goodExists; eauto.
intuition.
econstructor.
eapply filter_In; intuition.
eauto.
rewrite H2.
trivial.
eapply comp_spec_eq_trans_r.
Focus 2.
eapply H0.
clear H0.
inline_first.
comp_skip.
eapply comp_base_exists; eauto.
eapply comp_base_exists; eauto.
case_eq (fBad a0 a b0); intuition.
simpl.
comp_skip.
eapply comp_spec_ret; intuition.
simpl.
comp_simp.
eapply comp_spec_ret; intuition.
comp_simp.
simpl in *.
intuition; subst.
eapply comp_spec_eq_refl.
Qed.
Theorem RPS_G0_1_close :
| Pr[RPS_G0] - Pr[RPS_G1] | <= (A2_queries / 1) * badProb A2_queries.
rewrite RPS_G0_equiv.
rewrite <- RPS_G1_equiv.
eapply RPS_G_1_2_close.
Qed.
End RandPermSwitching. |
! { dg-do compile }
! { dg-options "-Warray-temporaries" }
! PR 44235
! No temporary should be created for this, as the upper bounds
! are effectively identical.
program main
real a(10)
a = 0.
a(1:10:4) = a(1:9:4)
end program main
|
-- | This module provides generic functionality to deal with ensembles in
-- statistical mechanics.
module StatisticalMechanics.Ensemble where
import Numeric.Log
import Statistics.Probability
-- | The state probability functions provide conversion from some types @a@
-- into non-normalized probabilities. For "real" applications, using the
-- @logProbability@ function is preferred. This functions allows for easy
-- abstraction when types @a@ are given as fractions of some actual value (say:
-- deka-cal), or are discretized.
--
-- The returned values are not normalized, because we do not now the total
-- evidence @Z@ until integration over all states has happened -- which is not
-- feasible in a number of problems.
--
-- TODO replace @()@ with temperature and results with non-normalized @P@ or
-- @LogP@, depending. At some point we want to have type-level physical
-- quantities, hence the need for the second type.
class StateProbability a where
-- | Given a temperature and a state "energy", return the corresponding
-- non-normalized probability.
stateProbability
∷ Double
-- ^ this is @k*T@
→ a
-- ^ the energy (or discretized energy)
→ Probability NotNormalized Double
-- ^ probability of being in state @a@, but only proportional up to @1/Z@.
stateLogProbability
∷ Double
-- ^ this is @1/(k * T)@
→ a
-- ^ the energy (or discretized energy)
→ Log (Probability NotNormalized Double)
-- ^ resulting probability
instance StateProbability Double where
stateProbability kT x = Prob . exp . negate $ x/kT
{-# Inline stateProbability #-}
--stateLogProbability kT x = Exp . log . Prob . exp . negate $ x/kT
stateLogProbability kT x = Exp . Prob . negate $ x/kT
{-# Inline stateLogProbability #-}
|
import data.list tools.super .library_dev.data.list.comb .library_dev.data.list.set
open nat function tactic
-- lemmas of list
namespace list
variables {A B: Type}
private theorem mem_cons (x : A) (l : list A) : x ∈ x :: l :=
or.inl rfl
-- def upto : ℕ → list ℕ
-- | 0 := []
-- | (succ n) := n :: upto n
-- theorem lt_of_mem_upto {n i : nat} : i ∈ upto n → i < n :=
-- nat.rec_on n (λ h, absurd h (not_mem_nil i))
-- (λ a ih h, or.elim h
-- (begin intro l, rw l, apply lt_succ_self end)
-- (λ r, lt_trans (ih r) (lt_succ_self a)))
-- theorem upto_succ (n : nat) : upto (succ n) = n :: upto n := rfl
-- theorem length_upto : ∀ n, length (upto n) = n
-- | 0 := rfl
-- | (succ n) := begin simp [upto_succ, length_cons, length_upto] end
/- map -/
theorem map_nil (f : A → B) : list.map f [] = [] := rfl
end list
open list
-- lemmas of fin
namespace fin
attribute [simp]
private theorem length_nil {A : Type} : length (@nil A) = 0 :=
rfl
lemma val_mk (n i : nat) (Plt : i < n) : fin.val (fin.mk i Plt) = i := rfl
def upto (n : ℕ) : list (fin n) :=
dmap (λ i, i < n) fin.mk (list.upto n)
lemma map_val_upto (n : nat) : map fin.val (upto n) = list.upto n :=
map_dmap_of_inv_of_pos (val_mk n) (@lt_of_mem_upto n)
lemma length_upto (n : nat) : length (fin.upto n) = n :=
calc
length (fin.upto n) = length (list.upto n) : (map_val_upto n ▸ eq.symm (length_map fin.val (upto n)))
... = n : length_upto n
lemma upto_ne_nil_of_ne_zero (n : nat) (Hn : n ≠ 0) : fin.upto n ≠ [] :=
begin
intro Hup,
apply Hn,
rewrite [-(@length_nil (fin n)), -Hup],
apply eq.symm (length_upto _)
end
lemma mem_upto (n : nat) : ∀ (i : fin n), i ∈ upto n :=
take i, fin.rec_on i
(take ival Piltn,
have ival ∈ list.upto n, from mem_upto_of_lt Piltn,
mem_dmap Piltn this)
end fin
-- lemmas of bigops
-- namespace group_bigops
-- variables {A B : Type}
-- definition Suml (f : A → ℕ) : list A → ℕ
-- | [] := 0
-- | (a :: ls) := f a + Suml ls
-- definition addf {A B : Type} [sgB : add_semigroup B] (f : A → B) : B → A → B :=
-- λ b a, b + f a
-- definition Suml [add_monoid B] (l : list A) (f : A → B) : B :=
-- list.foldl (addf f) 0 l
-- theorem Suml_nil (f : A → B) : Suml [] f = 0 := Prodl_nil f
-- -- check add_monoid
-- end group_bigops
-- definitions
section
-- Suml
variables {A : Type}
definition Suml (f : A → ℕ) : list A → ℕ
| [] := 0
| (a :: ls) := f a + Suml ls
theorem le_add_of_le {a b c : ℕ} : a ≤ b → a ≤ b + c :=
begin
induction c with n ih, intro,super, intro h2,
apply le_succ_of_le, exact ih h2
end
theorem le_of_mem_Suml {f : A → ℕ} {a : A} {l : list A} :
a ∈ l → f a ≤ Suml f l :=
begin
induction l with b ls ih, intro h, exact absurd h (not_mem_nil _),
dsimp [Suml], intro h, assert h' : a = b ∨ a ∈ ls, exact h,
cases h' with l r, rw l, apply le_add_right,
rw add_comm,apply le_add_of_le, exact ih r
end
end
open list fin
inductive finite_tree : Type
| cons : Π {n : ℕ}, (fin n → finite_tree) → finite_tree
namespace finite_tree
-- definition size : finite_tree → ℕ
-- | (@cons n ts) := Suml (fin.upto n) (λ i, size (ts i)) + 1
def size : finite_tree → ℕ
| (@cons n ts) := Suml (λ i, size (ts i)) (fin.upto n) + 1
-- theorem exists_eq_cons_of_ne_nil {A : Type} {l : list A} : l ≠ [] → ∃ a, ∃ l', l = a::l' :=
-- list.rec_on l (λ H, absurd rfl H) (λ a l' IH H, ⟨a, ⟨l',rfl⟩⟩)
theorem pos_of_size (t : finite_tree) : 0 < size t :=
finite_tree.rec_on t (λ n ts ih, dec_trivial)
theorem lt_of_size_branches_aux {n : ℕ} (ts : fin n → finite_tree) (k : fin n) : size (ts k) < Suml (λ i, size (ts i)) (upto n) + 1 :=
begin
assert kin : k ∈ upto n, exact mem_upto n k,
assert h : size (ts k) ≤ Suml (λ i, size (ts i)) (upto n),
apply le_of_mem_Suml kin,
apply lt_succ_of_le, assumption
end
def embeds : finite_tree → finite_tree → Prop
| (@cons _ ts) (@cons _ us) := (∃ j, embeds (cons ts) (us j)) ∨
(∃ f, injective f ∧ ∀ i, embeds (ts i) (us (f i)))
infix ` ≼ `:50 := embeds -- \preceq
def node {ts : fin 0 → finite_tree} : finite_tree := @cons 0 ts
def {u} fin_zero_absurd {α : Sort u} (i : fin 0) : α :=
absurd i.2 (not_lt_zero i.1)
theorem node_embeds {ts : fin 0 → finite_tree} (t : finite_tree) : @cons 0 ts ≼ t :=
begin
induction t with n a ih,
dsimp [embeds],
pose f : fin 0 → fin n := λ i : fin 0, fin_zero_absurd i,
apply or.inr,
existsi f,
split,
intros i j hij, exact fin_zero_absurd i,
intro i, exact fin_zero_absurd i
end
theorem not_embeds_node {n : ℕ} {ts : fin (succ n) → finite_tree}
{tt : fin 0 → finite_tree}: ¬ cons ts ≼ cons tt :=
begin
intro h,
dsimp [embeds] at h,
cases h with h₁ h₂,
{ cases h₁ with i hi, exact fin_zero_absurd i },
cases h₂ with f hf, exact fin_zero_absurd (f ⟨0, zero_lt_succ _⟩)
end
theorem cons_embeds_cons_left {m n : ℕ} {ss : fin m → finite_tree} {ts : fin n → finite_tree}
{j : fin n} (H : cons ss ≼ ts j) :
cons ss ≼ cons ts :=
begin
cases m,
apply node_embeds, cases n,
exact fin_zero_absurd j,
apply or.inl (exists.intro j H)
end
theorem cons_embeds_cons_right {m n : ℕ} {ss : fin m → finite_tree} {ts : fin n → finite_tree}
{f : fin m → fin n} (injf : injective f) (Hf : ∀ i, ss i ≼ ts (f i)) :
cons ss ≼ cons ts :=
begin cases m, apply node_embeds, cases n,
exact fin_zero_absurd (f 0),
apply or.inr (exists.intro f (and.intro injf Hf))
end
theorem embeds_refl (t : finite_tree) : t ≼ t :=
begin
induction t with n a ih,
cases n, apply node_embeds,
apply cons_embeds_cons_right,
apply injective_id, exact ih
end
theorem embeds_trans_aux : ∀ {u s t}, t ≼ u → s ≼ t → s ≼ u :=
begin
intro u,
induction u with ul us ihu,
intros s t, cases s with sl ss,
cases t with tl ts,
intro H₁, dsimp [embeds] at H₁, cases H₁ with H₁₁ H₁₂,
cases H₁₁ with i H₁₁, intro H₂,
apply cons_embeds_cons_left (ihu _ H₁₁ H₂),
cases H₁₂ with f Hf, cases Hf with injf Hf,
intro H₂, dsimp [embeds] at H₂, cases H₂ with H₂₁ H₂₂,
cases H₂₁ with j H₂₁,
apply cons_embeds_cons_left (ihu _ (Hf j) H₂₁),
cases H₂₂ with g Hg, cases Hg with injg Hg,
apply cons_embeds_cons_right,
apply injective_comp injf injg,
intro i, apply ihu _ (Hf (g i)) (Hg i)
end
theorem embeds_trans {s t u : finite_tree} (H₁ : s ≼ t) (H₂ : t ≼ u) : s ≼ u :=
embeds_trans_aux H₂ H₁
-- proposition cons_embeds_iff {m : ℕ} (ss : fin m → finite_tree) (t : finite_tree) :
-- cons ss ≼ t ↔ ∃ n (ts : fin n → finite_tree), t = cons ts ∧
-- ((∃ j, cons ss ≼ ts j) ∨ (∃ f, injective f ∧ ∀ i, ss i ≼ ts (f i))) :=
-- begin
-- apply iff.intro,
-- intro H, cases t with n ts,
-- contradiction,
-- existsi n, existsi ts, split, reflexivity, apply cons_embeds_cons_dest H,
-- intro H, cases H with n H, cases H with ts H, cases H with teq H,
-- rewrite teq, exact H
-- end
end finite_tree
|
(* Title: statecharts/HA/HA.thy
Author: Steffen Helke, Software Engineering Group
Copyright 2010 Technische Universitaet Berlin
*)
section \<open>Syntax of Hierarchical Automata\<close>
theory HA
imports SA
begin
subsection \<open>Definitions\<close>
(* unique root automaton *)
definition
RootEx :: "[(('s,'e,'d)seqauto) set,
's \<rightharpoonup> ('s,'e,'d) seqauto set] => bool" where
"RootEx F G = (\<exists>! A. A \<in> F \<and> A \<notin> \<Union> (ran G))"
definition
Root :: "[(('s,'e,'d)seqauto) set,
's \<rightharpoonup> ('s,'e,'d) seqauto set]
=> ('s,'e,'d) seqauto" where
"Root F G = (@ A. A \<in> F \<and> A \<notin> \<Union> (ran G))"
(* mutually distinct state spaces *)
definition
MutuallyDistinct :: "(('s,'e,'d)seqauto) set => bool" where
"MutuallyDistinct F =
(\<forall> a \<in> F. \<forall> b \<in> F. a \<noteq> b \<longrightarrow> (States a) \<inter> (States b) = {})"
(* exactly one ancestor for every non root automaton *)
definition
OneAncestor :: "[(('s,'e,'d)seqauto) set,
's \<rightharpoonup> ('s,'e,'d) seqauto set] => bool" where
"OneAncestor F G =
(\<forall> A \<in> F - {Root F G} .
\<exists>! s. s \<in> (\<Union> A' \<in> F - {A} . States A') \<and>
A \<in> the (G s))"
(* composition function contains no cycles *)
definition
NoCycles :: "[(('s,'e,'d)seqauto) set,
's \<rightharpoonup> ('s,'e,'d) seqauto set] => bool" where
"NoCycles F G =
(\<forall> S \<in> Pow (\<Union> A \<in> F. States A).
S \<noteq> {} \<longrightarrow> (\<exists> s \<in> S. S \<inter> (\<Union> A \<in> the (G s). States A) = {}))"
(* properties of composition functions *)
definition
IsCompFun :: "[(('s,'e,'d)seqauto) set,
's \<rightharpoonup> ('s,'e,'d) seqauto set] => bool" where
"IsCompFun F G = ((dom G = (\<Union> A \<in> F. States A)) \<and>
(\<Union> (ran G) = (F - {Root F G})) \<and>
(RootEx F G) \<and>
(OneAncestor F G) \<and>
(NoCycles F G))"
subsubsection \<open>Well-formedness for the syntax of HA\<close>
definition
HierAuto :: "['d data,
(('s,'e,'d)seqauto) set,
'e set,
's \<rightharpoonup> (('s,'e,'d)seqauto) set]
=> bool" where
"HierAuto D F E G = ((\<Union> A \<in> F. SAEvents A) \<subseteq> E \<and>
MutuallyDistinct F \<and>
finite F \<and>
IsCompFun F G)"
lemma HierAuto_EmptySet:
"((@x. True),{Abs_seqauto ({@x. True}, (@x. True), {}, {})}, {},
Map.empty ( @x. True \<mapsto> {})) \<in> {(D,F,E,G) | D F E G. HierAuto D F E G}"
apply (unfold HierAuto_def IsCompFun_def Root_def RootEx_def MutuallyDistinct_def
OneAncestor_def NoCycles_def)
apply auto
done
definition
"hierauto =
{(D,F,E,G) |
(D::'d data)
(F::(('s,'e,'d) seqauto) set)
(E::('e set))
(G::('s \<rightharpoonup> (('s,'e,'d) seqauto) set)).
HierAuto D F E G}"
typedef ('s,'e,'d) hierauto =
"hierauto :: ('d data * ('s,'e,'d) seqauto set * 'e set * ('s \<rightharpoonup> ('s,'e,'d) seqauto set)) set"
unfolding hierauto_def
apply (rule exI)
apply (rule HierAuto_EmptySet)
done
definition
SAs :: "(('s,'e,'d) hierauto) => (('s,'e,'d) seqauto) set" where
"SAs = fst o snd o Rep_hierauto"
definition
HAEvents :: "(('s,'e,'d) hierauto) => ('e set)" where
"HAEvents = fst o snd o snd o Rep_hierauto"
definition
CompFun :: "(('s,'e,'d) hierauto) => ('s \<rightharpoonup> ('s,'e,'d) seqauto set)" where
"CompFun = (snd o snd o snd o Rep_hierauto)"
definition
HAStates :: "(('s,'e,'d) hierauto) => ('s set)" where
"HAStates HA = (\<Union> A \<in> (SAs HA). States A)"
definition
HADelta :: "(('s,'e,'d) hierauto) => (('s,'e,'d)trans)set" where
"HADelta HA = (\<Union> F \<in> (SAs HA). Delta F)"
definition
HAInitValue :: "(('s,'e,'d) hierauto) => 'd data" where
"HAInitValue == fst o Rep_hierauto"
definition
HAInitStates :: "(('s,'e,'d) hierauto) => 's set" where
"HAInitStates HA == \<Union> A \<in> (SAs HA). { InitState A }"
definition
HARoot :: "(('s,'e,'d) hierauto) => ('s,'e,'d)seqauto" where
"HARoot HA == Root (SAs HA) (CompFun HA)"
definition
HAInitState :: "(('s,'e,'d) hierauto) => 's" where
"HAInitState HA == InitState (HARoot HA)"
subsubsection \<open>State successor function\<close>
(* state successor function Chi *)
definition
Chi :: "('s,'e,'d)hierauto => 's => 's set" where
"Chi A == (\<lambda> S \<in> (HAStates A) .
{S'. \<exists> SA \<in> (SAs A) . SA \<in> the ((CompFun A) S) \<and> S' \<in> States SA })"
(* direct state successor relation ChiRel *)
definition
ChiRel :: "('s,'e,'d)hierauto => ('s *'s) set" where
"ChiRel A == { (S,S'). S \<in> HAStates A \<and> S' \<in> HAStates A \<and> S' \<in> (Chi A) S }"
(* indirect state successor relation ChiPlus *)
definition
ChiPlus :: "('s,'e,'d)hierauto => ('s *'s) set" where
"ChiPlus A == (ChiRel A) ^+"
definition
ChiStar :: "('s,'e,'d)hierauto => ('s *'s) set" where
"ChiStar A == (ChiRel A) ^*"
(* priority on transitions that are successors *)
definition
HigherPriority :: "[('s,'e,'d)hierauto,
('s,'e,'d)trans * ('s,'e,'d)trans] => bool" where
"HigherPriority A ==
\<lambda> (t,t') \<in> (HADelta A) \<times> (HADelta A).
(source t',source t) \<in> ChiPlus A"
subsubsection \<open>Configurations\<close>
(* initial configuration *)
definition
InitConf :: "('s,'e,'d)hierauto => 's set" where
"InitConf A == (((((HAInitStates A) \<times> (HAInitStates A)) \<inter> (ChiRel A))^* )
`` {HAInitState A})"
(* -------------------------------------------------------------- *)
(* First, the original definition calculating a step on *)
(* configurations given by *)
(* *)
(* E. Mikk, Y. Lakhnech, and M. Siegel. Hierarchical automata as *)
(* model for statecharts. In Asian Computing Science Conference *)
(* (ASIAN~97), Springer LNCS, 1345, 1997. *)
(*
"StepConf A C TS ==
(C - ((ChiStar A) `` (Source TS))) \<union>
(Target TS) \<union> (((ChiPlus A) `` (Target TS))
\<inter> (HAInitStates A))"
*)
(* *)
(* Note, that this semantic definition not preserves the *)
(* well-formedness of a Statecharts. Hence we use our definition. *)
(* -------------------------------------------------------------- *)
(* step on configurations *)
definition
StepConf :: "[('s,'e,'d)hierauto, 's set,
('s,'e,'d)trans set] => 's set" where
"StepConf A C TS ==
(C - ((ChiStar A) `` (Source TS))) \<union>
(Target TS) \<union>
((ChiRel A) `` (Target TS)) \<inter> (HAInitStates A) \<union>
((((ChiRel A) \<inter> ((HAInitStates A) \<times> (HAInitStates A)))\<^sup>+)
`` (((ChiRel A)`` (Target TS)) \<inter> (HAInitStates A)))"
subsection \<open>Lemmas\<close>
lemma Rep_hierauto_tuple:
"Rep_hierauto HA = (HAInitValue HA, SAs HA, HAEvents HA, CompFun HA)"
by (unfold SAs_def HAEvents_def CompFun_def HAInitValue_def, simp)
lemma Rep_hierauto_select:
"(HAInitValue HA, SAs HA, HAEvents HA, CompFun HA): hierauto"
by (rule Rep_hierauto_tuple [THEN subst], rule Rep_hierauto)
lemma HierAuto_select [simp]:
"HierAuto (HAInitValue HA) (SAs HA) (HAEvents HA) (CompFun HA)"
by (cut_tac Rep_hierauto_select, unfold hierauto_def, simp)
subsubsection \<open>\<open>HAStates\<close>\<close>
lemma finite_HAStates [simp]:
"finite (HAStates HA)"
apply (cut_tac Rep_hierauto_select)
apply (unfold hierauto_def HierAuto_def)
apply auto
apply (simp add: HAStates_def)
apply (rule finite_UN_I)
apply fast
apply (rule finite_States)
done
lemma HAStates_SA_mem:
"\<lbrakk> SA \<in> SAs A; S \<in> States SA \<rbrakk> \<Longrightarrow> S \<in> HAStates A"
by (unfold HAStates_def, auto)
lemma ChiRel_HAStates [simp]:
"(a,b) \<in> ChiRel A \<Longrightarrow> a \<in> HAStates A"
apply (unfold ChiRel_def)
apply auto
done
lemma ChiRel_HAStates2 [simp]:
"(a,b) \<in> ChiRel A \<Longrightarrow> b \<in> HAStates A"
apply (unfold ChiRel_def)
apply auto
done
subsubsection \<open>\<open>HAEvents\<close>\<close>
lemma HAEvents_SAEvents_SAs:
"\<Union>(SAEvents ` (SAs HA)) \<subseteq> HAEvents HA"
apply (cut_tac Rep_hierauto_select)
apply (unfold hierauto_def HierAuto_def)
apply fast
done
subsubsection \<open>\<open>NoCycles\<close>\<close>
lemma NoCycles_EmptySet [simp]:
"NoCycles {} S"
by (unfold NoCycles_def, auto)
lemma NoCycles_HA [simp]:
"NoCycles (SAs HA) (CompFun HA)"
apply (cut_tac Rep_hierauto_select)
apply (unfold hierauto_def HierAuto_def IsCompFun_def)
apply auto
done
subsubsection \<open>\<open>OneAncestor\<close>\<close>
lemma OneAncestor_HA [simp]:
"OneAncestor (SAs HA) (CompFun HA)"
apply (cut_tac Rep_hierauto_select)
apply (unfold hierauto_def HierAuto_def IsCompFun_def)
apply auto
done
subsubsection \<open>\<open>MutuallyDistinct\<close>\<close>
lemma MutuallyDistinct_Single [simp]:
"MutuallyDistinct {SA}"
by (unfold MutuallyDistinct_def, auto)
lemma MutuallyDistinct_EmptySet [simp]:
"MutuallyDistinct {}"
by (unfold MutuallyDistinct_def, auto)
lemma MutuallyDistinct_Insert:
"\<lbrakk> MutuallyDistinct S; (States A) \<inter> (\<Union> B \<in> S. States B) = {} \<rbrakk>
\<Longrightarrow> MutuallyDistinct (insert A S)"
by (unfold MutuallyDistinct_def, safe, fast+)
lemma MutuallyDistinct_Union:
"\<lbrakk> MutuallyDistinct A; MutuallyDistinct B;
(\<Union> C \<in> A. States C) \<inter> (\<Union> C \<in> B. States C) = {} \<rbrakk>
\<Longrightarrow> MutuallyDistinct (A \<union> B)"
by (unfold MutuallyDistinct_def, safe, blast+)
lemma MutuallyDistinct_HA [simp]:
"MutuallyDistinct (SAs HA)"
apply (cut_tac Rep_hierauto_select)
apply (unfold hierauto_def HierAuto_def IsCompFun_def)
apply auto
done
subsubsection \<open>\<open>RootEx\<close>\<close>
lemma RootEx_Root [simp]:
"RootEx F G \<Longrightarrow> Root F G \<in> F"
apply (unfold RootEx_def Root_def)
apply (erule ex1E)
apply (erule conjE)
apply (rule someI2)
apply blast+
done
lemma RootEx_Root_ran [simp]:
"RootEx F G \<Longrightarrow> Root F G \<notin> \<Union> (ran G)"
apply (unfold RootEx_def Root_def)
apply (erule ex1E)
apply (erule conjE)
apply (rule someI2)
apply blast+
done
lemma RootEx_States_Subset [simp]:
"(RootEx F G) \<Longrightarrow> States (Root F G) \<subseteq> (\<Union> x \<in> F . States x)"
apply (unfold RootEx_def Root_def)
apply (erule ex1E)
apply (erule conjE)
apply (rule someI2)
apply fast
apply (unfold UNION_eq)
apply (simp add: subset_eq)
apply auto
done
lemma RootEx_States_notdisjunct [simp]:
"RootEx F G \<Longrightarrow> States (Root F G) \<inter> (\<Union> x \<in> F . States x) \<noteq> {}"
apply (frule RootEx_States_Subset)
apply (case_tac "States (Root F G)={}")
prefer 2
apply fast
apply simp
done
lemma Root_neq_SA [simp]:
"\<lbrakk> RootEx F G; (\<Union> x \<in> F . States x) \<inter> States SA = {} \<rbrakk> \<Longrightarrow> Root F G \<noteq> SA"
apply (rule SA_States_disjunct)
apply (frule RootEx_States_Subset)
apply fast
done
lemma RootEx_HA [simp]:
"RootEx (SAs HA) (CompFun HA)"
apply (cut_tac Rep_hierauto_select)
apply (unfold hierauto_def HierAuto_def IsCompFun_def)
apply fast
done
subsubsection \<open>\<open>HARoot\<close>\<close>
lemma HARoot_SAs [simp]:
"(HARoot HA) \<in> SAs HA"
apply (unfold HARoot_def)
apply (cut_tac Rep_hierauto_select)
apply (unfold hierauto_def HierAuto_def)
apply auto
done
lemma States_HARoot_HAStates:
"States (HARoot HA) \<subseteq> HAStates HA"
apply (unfold HAStates_def)
apply auto
apply (rule_tac x="HARoot HA" in bexI)
apply auto
done
lemma SAEvents_HARoot_HAEvents:
"SAEvents (HARoot HA) \<subseteq> HAEvents HA"
apply (cut_tac Rep_hierauto_select)
apply (unfold hierauto_def HierAuto_def)
apply auto
apply (rename_tac S)
apply (unfold UNION_eq)
apply (simp add: subset_eq)
apply (erule_tac x=S in allE)
apply auto
done
lemma HARoot_ran_CompFun:
"HARoot HA \<notin> Union (ran (CompFun HA))"
apply (unfold HARoot_def)
apply (cut_tac Rep_hierauto_select)
apply (unfold IsCompFun_def hierauto_def HierAuto_def)
apply fast
done
lemma HARoot_ran_CompFun2:
"S \<in> ran (CompFun HA) \<longrightarrow> HARoot HA \<notin> S"
apply (unfold HARoot_def)
apply (cut_tac Rep_hierauto_select)
apply (unfold IsCompFun_def hierauto_def HierAuto_def)
apply fast
done
subsubsection \<open>\<open>CompFun\<close>\<close>
lemma IsCompFun_HA [simp]:
"IsCompFun (SAs HA) (CompFun HA)"
apply (cut_tac Rep_hierauto_select)
apply (unfold hierauto_def HierAuto_def)
apply auto
done
lemma dom_CompFun [simp]:
"dom (CompFun HA) = HAStates HA"
apply (cut_tac HA=HA in IsCompFun_HA)
apply (unfold IsCompFun_def HAStates_def)
apply auto
done
lemma ran_CompFun [simp]:
"Union (ran (CompFun HA)) = ((SAs HA) - {Root (SAs HA)(CompFun HA)})"
apply (cut_tac HA=HA in IsCompFun_HA)
apply (unfold IsCompFun_def)
apply fast
done
lemma ran_CompFun_subseteq:
"Union (ran (CompFun HA)) \<subseteq> (SAs HA)"
apply (cut_tac HA=HA in IsCompFun_HA)
apply (unfold IsCompFun_def)
apply fast
done
lemma ran_CompFun_is_not_SA:
"\<not> Sas \<subseteq> (SAs HA) \<Longrightarrow> Sas \<notin> (ran (CompFun HA))"
apply (cut_tac HA=HA in IsCompFun_HA)
apply (unfold IsCompFun_def)
apply fast
done
lemma HAStates_HARoot_CompFun [simp]:
"S \<in> HAStates HA \<Longrightarrow> HARoot HA \<notin> the (CompFun HA S)"
apply (rule ran_dom_the)
back
apply (simp add: HARoot_ran_CompFun2 HARoot_def HAStates_def)+
done
lemma HAStates_CompFun_SAs:
"S \<in> HAStates A \<Longrightarrow> the (CompFun A S) \<subseteq> SAs A"
apply auto
apply (rename_tac T)
apply (cut_tac HA=A in ran_CompFun)
apply (erule equalityE)
apply (erule_tac c=T in subsetCE)
apply (drule ran_dom_the)
apply auto
done
lemma HAStates_CompFun_notmem [simp]:
"\<lbrakk> S \<in> HAStates A; SA \<in> the (CompFun A S) \<rbrakk> \<Longrightarrow> S \<notin> States SA"
apply (unfold HAStates_def)
apply auto
apply (rename_tac T)
apply (cut_tac HA=A in MutuallyDistinct_HA)
apply (unfold MutuallyDistinct_def)
apply (erule_tac x=SA in ballE)
apply (erule_tac x=T in ballE)
apply auto
prefer 2
apply (cut_tac A=A and S=S in HAStates_CompFun_SAs)
apply (unfold HAStates_def)
apply simp
apply fast
apply fast
apply (cut_tac HA=A in NoCycles_HA)
apply (unfold NoCycles_def)
apply (erule_tac x="{S}" in ballE)
apply auto
done
lemma CompFun_Int_disjoint:
"\<lbrakk> S \<noteq> T; S \<in> HAStates A; T \<in> HAStates A \<rbrakk> \<Longrightarrow> the (CompFun A T) \<inter> the (CompFun A S) = {}"
apply auto
apply (rename_tac U)
apply (cut_tac HA=A in OneAncestor_HA)
apply (unfold OneAncestor_def)
apply (erule_tac x=U in ballE)
prefer 2
apply simp
apply (fold HARoot_def)
apply (frule HAStates_HARoot_CompFun)
apply simp
apply (frule HAStates_CompFun_SAs)
apply auto
apply (erule_tac x=S in allE)
apply (erule_tac x=T in allE)
apply auto
apply (cut_tac HA=A in NoCycles_HA)
apply (unfold NoCycles_def)
apply (simp only: HAStates_def)
apply safe
apply (erule_tac x="{S}" in ballE)
apply simp
apply fast
apply simp
apply (cut_tac HA=A in NoCycles_HA)
apply (unfold NoCycles_def)
apply (simp only: HAStates_def)
apply safe
apply (erule_tac x="{T}" in ballE)
apply simp
apply fast
apply simp
done
subsubsection \<open>\<open>SAs\<close>\<close>
lemma finite_SAs [simp]:
"finite (SAs HA)"
apply (cut_tac Rep_hierauto_select)
apply (unfold hierauto_def HierAuto_def)
apply fast
done
lemma HAStates_SAs_disjunct:
"HAStates HA1 \<inter> HAStates HA2 = {} \<Longrightarrow> SAs HA1 \<inter> SAs HA2 = {}"
apply (unfold UNION_eq HAStates_def Int_def)
apply auto
apply (rename_tac SA)
apply (cut_tac SA=SA in EX_State_SA)
apply (erule exE)
apply auto
done
lemma HAStates_CompFun_SAs_mem [simp]:
"\<lbrakk> S \<in> HAStates A; T \<in> the (CompFun A S) \<rbrakk> \<Longrightarrow> T \<in> SAs A"
apply (cut_tac A=A and S=S in HAStates_CompFun_SAs)
apply auto
done
lemma SAs_States_HAStates:
"SA \<in> SAs A \<Longrightarrow> States SA \<subseteq> HAStates A"
by (unfold HAStates_def, auto)
subsubsection \<open>\<open>HAInitState\<close>\<close>
lemma HAInitState_HARoot [simp]:
"HAInitState A \<in> States (HARoot A)"
by (unfold HAInitState_def, auto)
lemma HAInitState_HARoot2 [simp]:
"HAInitState A \<in> States (Root (SAs A) (CompFun A))"
by (fold HARoot_def, simp)
lemma HAInitStates_HAStates [simp]:
"HAInitStates A \<subseteq> HAStates A"
apply (unfold HAInitStates_def HAStates_def)
apply auto
done
lemma HAInitStates_HAStates2 [simp]:
"S \<in> HAInitStates A \<Longrightarrow> S \<in> HAStates A"
apply (cut_tac A=A in HAInitStates_HAStates)
apply fast
done
lemma HAInitState_HAStates [simp]:
"HAInitState A \<in> HAStates A"
apply (unfold HAStates_def)
apply auto
apply (rule_tac x="HARoot A" in bexI)
apply auto
done
lemma HAInitState_HAInitStates [simp]:
"HAInitState A \<in> HAInitStates A"
by (unfold HAInitStates_def HAInitState_def, auto)
lemma CompFun_HAInitStates_HAStates [simp]:
"\<lbrakk> S \<in> HAStates A; SA \<in> the (CompFun A S) \<rbrakk> \<Longrightarrow> (InitState SA) \<in> HAInitStates A"
apply (unfold HAInitStates_def)
apply auto
done
lemma CompFun_HAInitState_HAInitStates [simp]:
"\<lbrakk> SA \<in> the (CompFun A (HAInitState A)) \<rbrakk> \<Longrightarrow> (InitState SA) \<in> HAInitStates A"
apply (unfold HAInitStates_def)
apply auto
apply (rule_tac x=SA in bexI)
apply auto
apply (cut_tac A=A and S="HAInitState A" in HAStates_CompFun_SAs)
apply auto
done
lemma HAInitState_notmem_States [simp]:
"\<lbrakk> S \<in> HAStates A; SA \<in> the (CompFun A S) \<rbrakk> \<Longrightarrow> HAInitState A \<notin> States SA"
apply (cut_tac HA=A in MutuallyDistinct_HA)
apply (unfold MutuallyDistinct_def)
apply (erule_tac x=SA in ballE)
apply (erule_tac x="HARoot A" in ballE)
apply auto
done
lemma InitState_notmem_States [simp]:
"\<lbrakk> S \<in> HAStates A; SA \<in> the (CompFun A S);
T \<in> HAInitStates A; T \<noteq> InitState SA \<rbrakk>
\<Longrightarrow> T \<notin> States SA"
apply (unfold HAInitStates_def)
apply auto
apply (rename_tac SAA)
apply (cut_tac HA=A in MutuallyDistinct_HA)
apply (unfold MutuallyDistinct_def)
apply (erule_tac x=SA in ballE)
apply (erule_tac x=SAA in ballE)
apply auto
done
lemma InitState_States_notmem [simp]:
"\<lbrakk> B \<in> SAs A; C \<in> SAs A; B \<noteq> C \<rbrakk> \<Longrightarrow> InitState B \<notin> States C"
apply auto
apply (cut_tac HA=A in MutuallyDistinct_HA)
apply (unfold MutuallyDistinct_def)
apply force
done
lemma OneHAInitState_SAStates:
"\<lbrakk> S \<in> HAInitStates A; T \<in> HAInitStates A;
S \<in> States SA; T \<in> States SA; SA \<in> SAs A \<rbrakk> \<Longrightarrow>
S = T"
apply (unfold HAInitStates_def)
apply auto
apply (rename_tac AA AAA)
apply (case_tac "AA = SA")
apply auto
apply (case_tac "AAA = SA")
apply auto
done
subsubsection \<open>\<open>Chi\<close>\<close>
lemma HARootStates_notmem_Chi [simp]:
"\<lbrakk> S \<in> HAStates A; T \<in> States (HARoot A) \<rbrakk> \<Longrightarrow> T \<notin> Chi A S"
apply (unfold Chi_def restrict_def, auto)
apply (rename_tac SA)
apply (cut_tac HA="A" in MutuallyDistinct_HA)
apply (unfold MutuallyDistinct_def)
apply (erule_tac x="HARoot A" in ballE)
apply (erule_tac x="SA" in ballE)
apply auto
done
lemma SAStates_notmem_Chi [simp]:
"\<lbrakk> S \<in> States SA; T \<in> States SA;
SA \<in> SAs A \<rbrakk> \<Longrightarrow> T \<notin> Chi A S"
apply (unfold Chi_def restrict_def, auto)
apply (rename_tac SAA)
apply (cut_tac HA="A" in MutuallyDistinct_HA)
apply (unfold MutuallyDistinct_def)
apply (erule_tac x="SAA" in ballE)
apply (erule_tac x="SA" in ballE)
apply auto
apply (unfold HAStates_def)
apply auto
done
lemma HAInitState_notmem_Chi [simp]:
"S \<in> HAStates A \<Longrightarrow> HAInitState A \<notin> Chi A S"
by (unfold Chi_def restrict_def, auto)
lemma Chi_HAStates [simp]:
"T \<in> HAStates A \<Longrightarrow> (Chi A T) \<subseteq> HAStates A"
apply (unfold Chi_def restrict_def)
apply (auto)
apply (cut_tac A=A and S=T in HAStates_CompFun_SAs)
apply (unfold HAStates_def)
apply auto
done
lemma Chi_HAStates_Self [simp]:
"s \<in> HAStates a \<Longrightarrow> s \<notin> (Chi a s)"
by (unfold Chi_def restrict_def, auto)
lemma ChiRel_HAStates_Self [simp]:
"(s,s) \<notin> (ChiRel a)"
by( unfold ChiRel_def, auto)
lemma HAStates_Chi_NoCycles:
"\<lbrakk> s \<in> HAStates a; t \<in> HAStates a; s \<in> Chi a t \<rbrakk> \<Longrightarrow> t \<notin> Chi a s"
apply (unfold Chi_def restrict_def)
apply auto
apply (cut_tac HA=a in NoCycles_HA)
apply (unfold NoCycles_def)
apply (erule_tac x="{s,t}" in ballE)
apply auto
done
lemma HAStates_Chi_NoCycles_trans:
"\<lbrakk> s \<in> HAStates a; t \<in> HAStates a; u \<in> HAStates a;
t \<in> Chi a s; u \<in> Chi a t \<rbrakk> \<Longrightarrow> s \<notin> Chi a u"
apply (unfold Chi_def restrict_def)
apply auto
apply (cut_tac HA=a in NoCycles_HA)
apply (unfold NoCycles_def)
apply (erule_tac x="{s,t,u}" in ballE)
prefer 2
apply simp
apply (unfold HAStates_def)
apply auto
done
lemma SAStates_Chi_trans [rule_format]:
"\<lbrakk> U \<in> Chi A T; S \<in> Chi A U; T \<in> States SA;
SA \<in> SAs A; U \<in> HAStates A \<rbrakk> \<Longrightarrow> S \<notin> States SA"
apply (frule HAStates_SA_mem)
apply auto
apply (unfold Chi_def restrict_def)
apply auto
apply (rename_tac SAA SAAA)
apply (cut_tac HA=A in NoCycles_HA)
apply (unfold NoCycles_def)
apply (erule_tac x="{U,T}" in ballE)
prefer 2
apply (simp only: HAStates_def)
apply auto
apply (cut_tac HA=A in MutuallyDistinct_HA)
apply (unfold MutuallyDistinct_def)
apply (rotate_tac -1)
apply (erule_tac x=SA in ballE)
apply (rotate_tac -1)
apply (erule_tac x=SAAA in ballE)
apply auto
done
subsubsection \<open>\<open>ChiRel\<close>\<close>
lemma finite_ChiRel [simp]:
"finite (ChiRel A)"
apply (rule_tac B="HAStates A \<times> HAStates A" in finite_subset)
apply auto
done
lemma ChiRel_HAStates_subseteq [simp]:
"(ChiRel A) \<subseteq> (HAStates A \<times> HAStates A)"
apply (unfold ChiRel_def Chi_def restrict_def)
apply auto
done
lemma ChiRel_CompFun:
"s \<in> HAStates a \<Longrightarrow> ChiRel a `` {s} = (\<Union> x \<in> the (CompFun a s). States x)"
apply (unfold ChiRel_def Chi_def restrict_def Image_def)
apply simp
apply auto
apply (frule HAStates_CompFun_SAs_mem)
apply fast
apply (unfold HAStates_def)
apply fast
done
lemma ChiRel_HARoot:
"\<lbrakk> (x,y) \<in> ChiRel A \<rbrakk> \<Longrightarrow> y \<notin> States (HARoot A)"
apply (unfold ChiRel_def Chi_def)
apply auto
apply (unfold restrict_def)
apply auto
apply (rename_tac SA)
apply (frule HAStates_HARoot_CompFun)
apply (cut_tac HA=A in MutuallyDistinct_HA)
apply (unfold MutuallyDistinct_def)
apply auto
apply (erule_tac x=SA in ballE)
apply (erule_tac x="HARoot A" in ballE)
apply auto
done
lemma HAStates_CompFun_States_ChiRel:
"S \<in> HAStates A \<Longrightarrow> \<Union> (States ` the (CompFun A S)) = ChiRel A `` {S}"
apply (unfold ChiRel_def Chi_def restrict_def)
apply auto
apply (drule HAStates_CompFun_SAs)
apply (subst HAStates_def)
apply fast
done
lemma HAInitState_notmem_Range_ChiRel [simp]:
"HAInitState A \<notin> Range (ChiRel A)"
by (unfold ChiRel_def, auto)
lemma HAInitState_notmem_Range_ChiRel2 [simp]:
"(S,HAInitState A) \<notin> (ChiRel A)"
by (unfold ChiRel_def, auto)
lemma ChiRel_OneAncestor_notmem:
"\<lbrakk> S \<noteq> T; (S,U) \<in> ChiRel A\<rbrakk> \<Longrightarrow> (T,U) \<notin> ChiRel A"
apply (unfold ChiRel_def)
apply auto
apply (simp only: Chi_range_disjoint)
done
lemma ChiRel_OneAncestor:
"\<lbrakk> (S1,U) \<in> ChiRel A; (S2,U) \<in> ChiRel A \<rbrakk> \<Longrightarrow> S1 = S2"
apply (rule notnotD, rule notI)
apply (simp add: ChiRel_OneAncestor_notmem)
done
lemma CompFun_ChiRel:
"\<lbrakk> S1 \<in> HAStates A; SA \<in> the (CompFun A S1);
S2 \<in> States SA \<rbrakk> \<Longrightarrow> (S1,S2) \<in> ChiRel A"
apply (unfold ChiRel_def Chi_def restrict_def)
apply auto
apply (cut_tac A=A and S=S1 in HAStates_CompFun_SAs)
apply (unfold HAStates_def)
apply auto
done
lemma CompFun_ChiRel2:
"\<lbrakk> (S,T) \<in> ChiRel A; T \<in> States SA; SA \<in> SAs A \<rbrakk> \<Longrightarrow> SA \<in> the (CompFun A S)"
apply (unfold ChiRel_def Chi_def restrict_def)
apply auto
apply (rename_tac SAA)
apply (cut_tac HA=A in MutuallyDistinct_HA)
apply (unfold MutuallyDistinct_def)
apply (erule_tac x=SA in ballE)
apply (rotate_tac -1)
apply (erule_tac x=SAA in ballE)
apply auto
done
lemma ChiRel_HAStates_NoCycles:
"(s,t) \<in> (ChiRel a) \<Longrightarrow> (t,s) \<notin> (ChiRel a)"
apply (unfold ChiRel_def)
apply auto
apply (frule HAStates_Chi_NoCycles)
apply auto
done
lemma HAStates_ChiRel_NoCycles_trans:
"\<lbrakk> (s,t) \<in> (ChiRel a); (t,u) \<in> (ChiRel a) \<rbrakk> \<Longrightarrow> (u,s) \<notin> (ChiRel a)"
apply (unfold ChiRel_def)
apply auto
apply (frule HAStates_Chi_NoCycles_trans)
apply fast
back
back
prefer 3
apply fast
apply auto
done
lemma SAStates_ChiRel:
"\<lbrakk> S \<in> States SA; T \<in> States SA;
SA \<in> SAs A \<rbrakk> \<Longrightarrow> (S,T) \<notin> (ChiRel A)"
by (unfold ChiRel_def, auto)
lemma ChiRel_SA_OneAncestor:
"\<lbrakk> (S,T) \<in> ChiRel A; T \<in> States SA;
U \<in> States SA; SA \<in> SAs A \<rbrakk> \<Longrightarrow>
(S,U) \<in> ChiRel A"
apply (frule CompFun_ChiRel2)
apply auto
apply (rule CompFun_ChiRel)
apply auto
done
lemma ChiRel_OneAncestor2:
"\<lbrakk> S \<in> HAStates A; S \<notin> States (HARoot A) \<rbrakk> \<Longrightarrow>
\<exists>! T. (T,S) \<in> ChiRel A"
apply (unfold ChiRel_def)
apply auto
prefer 2
apply (rename_tac T U)
prefer 2
apply (unfold Chi_def restrict_def)
apply auto
prefer 2
apply (rename_tac SA SAA)
prefer 2
apply (cut_tac HA=A in OneAncestor_HA)
apply (unfold OneAncestor_def)
apply (fold HARoot_def)
apply auto
apply (simp cong: rev_conj_cong)
apply (unfold HAStates_def)
apply auto
apply (rename_tac SA)
apply (erule_tac x=SA in ballE)
apply auto
apply (case_tac "T = U")
apply auto
apply (frule CompFun_Int_disjoint)
apply (unfold HAStates_def)
apply auto
apply (case_tac "SA=SAA")
apply auto
apply (cut_tac HA=A in MutuallyDistinct_HA)
apply (unfold MutuallyDistinct_def)
apply (erule_tac x=SAA in ballE)
apply (erule_tac x=SA in ballE)
apply auto
apply (cut_tac S=T and A=A in HAStates_CompFun_SAs)
apply (unfold HAStates_def)
apply fast
apply fast
apply (cut_tac S=U and A=A in HAStates_CompFun_SAs)
apply (unfold HAStates_def)
apply fast
apply fast
done
lemma HARootStates_notmem_Range_ChiRel [simp]:
"S \<in> States (HARoot A) \<Longrightarrow> S \<notin> Range (ChiRel A)"
by (unfold ChiRel_def, auto)
lemma ChiRel_int_disjoint:
"S \<noteq> T \<Longrightarrow> (ChiRel A `` {S}) \<inter> (ChiRel A `` {T}) = {}"
apply (unfold ChiRel_def)
apply auto
apply (simp only: Chi_range_disjoint)
done
lemma SAStates_ChiRel_trans [rule_format]:
"\<lbrakk> (S,U) \<in> (ChiRel A); (U,T) \<in> ChiRel A;
S \<in> States SA; SA \<in> SAs A \<rbrakk> \<Longrightarrow> T \<notin> States SA"
apply auto
apply (unfold ChiRel_def)
apply auto
apply (frule SAStates_Chi_trans)
back
apply fast+
done
lemma HAInitStates_InitState_trancl:
" \<lbrakk> S \<in> HAInitStates (HA ST); A \<in> the (CompFun (HA ST) S) \<rbrakk> \<Longrightarrow>
(S, InitState A) \<in> (ChiRel (HA ST) \<inter> HAInitStates (HA ST) \<times> HAInitStates (HA ST))\<^sup>+"
apply (case_tac "S \<in> HAStates (HA ST)")
apply (frule CompFun_ChiRel)
apply fast+
apply (rule InitState_States)
apply auto
apply (rule r_into_trancl')
apply auto
apply (rule CompFun_HAInitStates_HAStates)
apply auto
done
lemma HAInitStates_InitState_trancl2:
"\<lbrakk> S \<in> HAStates (HA ST); A \<in> the (CompFun (HA ST) S);
(x, S) \<in> (ChiRel (HA ST) \<inter> HAInitStates (HA ST) \<times> HAInitStates (HA ST))\<^sup>+ \<rbrakk>
\<Longrightarrow> (x, InitState A) \<in> (ChiRel (HA ST) \<inter> HAInitStates (HA ST) \<times> HAInitStates (HA ST))\<^sup>+"
apply (rule_tac a="x" and b="S" and r="ChiRel (HA ST) \<inter> HAInitStates (HA ST) \<times> HAInitStates (HA ST)" in converse_trancl_induct)
apply auto
prefer 2
apply (rename_tac T U)
prefer 2
apply (case_tac "S \<in> HAStates (HA ST)")
apply (frule CompFun_ChiRel)
apply fast
apply (rule InitState_States)
apply simp
apply (rule trancl_trans [of _ S])
apply (rule r_into_trancl')
apply auto
apply (rule r_into_trancl')
apply auto
apply (rule CompFun_HAInitStates_HAStates)
prefer 2
apply fast
apply (cut_tac A="HA ST" in HAInitStates_HAStates, fast)
apply (rule_tac y = U in trancl_trans)
apply (rule r_into_trancl')
apply auto
done
subsubsection \<open>\<open>ChiPlus\<close>\<close>
lemma ChiPlus_ChiRel [simp]:
"(S,T) \<in> ChiRel A \<Longrightarrow> (S,T) \<in> ChiPlus A"
apply (unfold ChiPlus_def)
apply (frule r_into_trancl)
apply auto
done
lemma ChiPlus_HAStates [simp]:
"(ChiPlus A) \<subseteq> (HAStates A \<times> HAStates A)"
apply (unfold ChiPlus_def)
apply (rule trancl_subset_Sigma)
apply auto
done
lemma ChiPlus_subset_States:
"ChiPlus a `` {t} \<subseteq> \<Union>(States ` (SAs a))"
apply (cut_tac A=a in ChiPlus_HAStates)
apply (unfold HAStates_def)
apply auto
done
lemma finite_ChiPlus [simp]:
"finite (ChiPlus A)"
apply (rule_tac B="HAStates A \<times> HAStates A" in finite_subset)
apply auto
done
lemma ChiPlus_OneAncestor:
"\<lbrakk> S \<in> HAStates A; S \<notin> States (HARoot A) \<rbrakk> \<Longrightarrow>
\<exists> T. (T,S) \<in> ChiPlus A"
apply (unfold ChiPlus_def)
apply (frule ChiRel_OneAncestor2)
apply auto
done
lemma ChiPlus_HAStates_Left:
"(S,T) \<in> ChiPlus A \<Longrightarrow> S \<in> HAStates A"
apply (cut_tac A=A in ChiPlus_HAStates)
apply (unfold HAStates_def)
apply auto
done
lemma ChiPlus_HAStates_Right:
"(S,T) \<in> ChiPlus A \<Longrightarrow> T \<in> HAStates A"
apply (cut_tac A=A in ChiPlus_HAStates)
apply (unfold HAStates_def)
apply auto
done
lemma ChiPlus_ChiRel_int [rule_format]:
"\<lbrakk> (T,S) \<in> (ChiPlus A)\<rbrakk> \<Longrightarrow> (ChiPlus A `` {T}) \<inter> (ChiRel A `` {S}) = (ChiRel A `` {S})"
apply (unfold ChiPlus_def)
apply (rule_tac a="T" and b="S" and r="(ChiRel A)" in converse_trancl_induct)
apply auto
done
lemma ChiPlus_ChiPlus_int [rule_format]:
"\<lbrakk> (T,S) \<in> (ChiPlus A)\<rbrakk> \<Longrightarrow> (ChiPlus A `` {T}) \<inter> (ChiPlus A `` {S}) = (ChiPlus A `` {S})"
apply (unfold ChiPlus_def)
apply (rule_tac a="T" and b="S" and r="(ChiRel A)" in converse_trancl_induct)
apply auto
done
lemma ChiPlus_ChiRel_NoCycle_2 [rule_format]:
"\<lbrakk> (T,S) \<in> ChiPlus A\<rbrakk> \<Longrightarrow> (S,T) \<in> (ChiRel A) \<longrightarrow>
(insert S (insert T ({U. (T,U) \<in> ChiPlus A \<and> (U,S) \<in> ChiPlus A}))) \<inter> (ChiRel A `` {S}) \<noteq> {}"
apply (unfold ChiPlus_def)
apply (rule_tac a="T" and b="S" and r="(ChiRel A)" in converse_trancl_induct)
apply (unfold Image_def Int_def)
apply auto
done
lemma ChiPlus_ChiRel_NoCycle_3 [rule_format]:
"\<lbrakk> (T,S) \<in> ChiPlus A\<rbrakk> \<Longrightarrow> (S,T) \<in> (ChiRel A) \<longrightarrow> (T,U) \<in> ChiPlus A \<longrightarrow> (U, S) \<in> ChiPlus A \<longrightarrow>
(insert S (insert T ({U. (T,U) \<in> ChiPlus A \<and> (U,S) \<in> ChiPlus A}))) \<inter> (ChiRel A `` {U}) \<noteq> {}"
apply (unfold ChiPlus_def)
apply (rule_tac a="T" and b="S" and r="(ChiRel A)" in trancl_induct)
apply (unfold Image_def Int_def, simp)
apply (rename_tac V)
prefer 2
apply (rename_tac V W)
prefer 2
apply (simp, safe)
apply (simp only: ChiRel_HAStates_NoCycles)
apply simp
apply (case_tac "(U,W) \<in> (ChiRel A)", fast, rotate_tac 5, frule tranclD3, fast, blast intro: trancl_into_trancl)+
done
lemma ChiPlus_ChiRel_NoCycle_4 [rule_format]:
"\<lbrakk> (T,S) \<in> ChiPlus A \<rbrakk> \<Longrightarrow> (S,T) \<in> (ChiRel A) \<longrightarrow> ((ChiPlus A ``{T}) \<inter> (ChiRel A `` {S})) \<noteq> {}"
apply (unfold ChiPlus_def)
apply (rule_tac a="T" and b="S" and r="(ChiRel A)" in trancl_induct)
apply (unfold Image_def Int_def)
apply auto
apply (simp only: ChiRel_HAStates_NoCycles)
apply (rule_tac x=T in exI)
apply simp
apply (rule_tac x=T in exI)
apply simp
done
lemma ChiRel_ChiPlus_NoCycles:
"(S,T) \<in> (ChiRel A) \<Longrightarrow> (T,S) \<notin> (ChiPlus A)"
apply (cut_tac HA=A in NoCycles_HA)
apply (unfold NoCycles_def)
apply (erule_tac x="insert S (insert T ({U. (T,U) \<in> ChiPlus A \<and> (U,S) \<in> ChiPlus A}))" in ballE)
prefer 2
apply (simp add: ChiPlus_subset_States)
apply (cut_tac A=A in ChiPlus_HAStates)
apply (unfold HAStates_def)
apply auto
apply (frule ChiPlus_ChiRel_NoCycle_2)
apply fast
apply (simp add:ChiRel_CompFun)
apply (frule ChiPlus_ChiRel_NoCycle_1)
apply (simp add:ChiRel_CompFun)
apply (frule ChiPlus_ChiRel_NoCycle_3)
apply fast
apply fast
back
apply fast
apply (rename_tac V)
apply (case_tac "V \<in> HAStates A")
apply (simp add: ChiRel_CompFun)
apply (simp only: ChiPlus_HAStates_Right)
apply fast
done
lemma ChiPlus_ChiPlus_NoCycles:
"(S,T) \<in> (ChiPlus A) \<Longrightarrow> (T,S) \<notin> (ChiPlus A)"
apply (unfold ChiPlus_def)
apply (rule_tac a="S" and b="T" and r="(ChiRel A)" in trancl_induct)
apply fast
apply (frule ChiRel_ChiPlus_NoCycles)
apply (auto intro: trancl_into_trancl2 simp add:ChiPlus_def)
done
lemma ChiPlus_NoCycles [rule_format]:
"(S,T) \<in> (ChiPlus A) \<Longrightarrow> S \<noteq> T"
apply (frule ChiPlus_ChiPlus_NoCycles)
apply auto
done
lemma ChiPlus_NoCycles_2 [simp]:
"(S,S) \<notin> (ChiPlus A)"
apply (rule notI)
apply (frule ChiPlus_NoCycles)
apply fast
done
lemma ChiPlus_ChiPlus_NoCycles_2:
"\<lbrakk> (S,U) \<in> ChiPlus A; (U,T) \<in> ChiPlus A \<rbrakk> \<Longrightarrow> (T,S) \<notin> ChiPlus A"
apply (rule ChiPlus_ChiPlus_NoCycles)
apply (auto intro: trancl_trans simp add: ChiPlus_def)
done
lemma ChiRel_ChiPlus_trans:
"\<lbrakk> (U,S) \<in> ChiPlus A; (S,T) \<in> ChiRel A\<rbrakk> \<Longrightarrow> (U,T) \<in> ChiPlus A"
apply (unfold ChiPlus_def)
apply auto
done
lemma ChiRel_ChiPlus_trans2:
"\<lbrakk> (U,S) \<in> ChiRel A; (S,T) \<in> ChiPlus A \<rbrakk> \<Longrightarrow> (U,T) \<in> ChiPlus A"
apply (unfold ChiPlus_def)
apply auto
done
lemma ChiPlus_ChiRel_Ex [rule_format]:
"\<lbrakk> (S,T) \<in> ChiPlus A \<rbrakk> \<Longrightarrow> (S,T) \<notin> ChiRel A \<longrightarrow>
(\<exists> U. (S,U) \<in> ChiPlus A \<and> (U,T) \<in> ChiRel A)"
apply (unfold ChiPlus_def)
apply (rule_tac a="S" and b="T" and r="(ChiRel A)" in converse_trancl_induct)
apply auto
apply (rename_tac U)
apply (rule_tac x=U in exI)
apply auto
done
lemma ChiPlus_ChiRel_Ex2 [rule_format]:
"\<lbrakk> (S,T) \<in> ChiPlus A \<rbrakk> \<Longrightarrow> (S,T) \<notin> ChiRel A \<longrightarrow>
(\<exists> U. (S,U) \<in> ChiRel A \<and> (U,T) \<in> ChiPlus A)"
apply (unfold ChiPlus_def)
apply (rule_tac a="S" and b="T" and r="(ChiRel A)" in converse_trancl_induct)
apply auto
done
lemma HARootStates_Range_ChiPlus [simp]:
"\<lbrakk> S \<in> States (HARoot A) \<rbrakk> \<Longrightarrow> S \<notin> Range (ChiPlus A)"
by (unfold ChiPlus_def, auto)
lemma SAStates_ChiPlus_ChiRel_NoCycle_2 [rule_format]:
"\<lbrakk> (S,U) \<in> ChiPlus A \<rbrakk> \<Longrightarrow> (U,T) \<in> (ChiRel A) \<longrightarrow>
(insert S (insert U ({V. (S,V) \<in> ChiPlus A \<and> (V,U) \<in> ChiPlus A}))) \<inter> (ChiRel A `` {S}) \<noteq> {}"
apply (unfold ChiPlus_def)
apply (rule_tac a="S" and b="U" and r="(ChiRel A)" in converse_trancl_induct)
apply (unfold Image_def Int_def)
apply auto
done
(* TO DO *)
lemma SAStates_ChiPlus_ChiRel_NoCycle_3 [rule_format]:
"\<lbrakk> (S,U) \<in> ChiPlus A \<rbrakk> \<Longrightarrow> (U,T) \<in> (ChiRel A) \<longrightarrow> (S,s) \<in> ChiPlus A \<longrightarrow> (s,U) \<in> ChiPlus A \<longrightarrow>
(insert S (insert U ({V. (S,V) \<in> ChiPlus A \<and> (V,U) \<in> ChiPlus A}))) \<inter> (ChiRel A `` {s}) \<noteq> {}"
apply (unfold ChiPlus_def)
apply (rule_tac a="S" and b="U" and r="(ChiRel A)" in trancl_induct)
apply fast
apply (rename_tac W)
prefer 2
apply (rename_tac W X)
prefer 2
apply (unfold Image_def Int_def)
apply (simp, safe)
apply (fold ChiPlus_def)
apply (case_tac "(s,W) \<in> ChiRel A")
apply fast
apply (frule_tac S=s and T=W in ChiPlus_ChiRel_Ex2)
apply simp
apply safe
apply (rename_tac X)
apply (rule_tac x=X in exI)
apply (fast intro: ChiRel_ChiPlus_trans)
apply simp
apply (case_tac "(s,X) \<in> ChiRel A")
apply force
apply (frule_tac S=s and T=X in ChiPlus_ChiRel_Ex2)
apply simp
apply safe
apply (rename_tac Y)
apply (erule_tac x=Y in allE)
apply simp
apply (fast intro: ChiRel_ChiPlus_trans)
apply simp
apply (case_tac "(s,X) \<in> ChiRel A")
apply force
apply (frule_tac S=s and T=X in ChiPlus_ChiRel_Ex2)
apply simp
apply safe
apply (rename_tac Y)
apply (erule_tac x=Y in allE)
apply simp
apply (fast intro: ChiRel_ChiPlus_trans)
apply fastforce
apply simp
apply (erule_tac x=W in allE)
apply simp
apply simp
apply (rename_tac Y)
apply (erule_tac x=Y in allE)
apply simp
apply (fast intro: ChiRel_ChiPlus_trans)
done
lemma SAStates_ChiPlus2 [rule_format]:
"\<lbrakk> (S,T) \<in> ChiPlus A; SA \<in> SAs A \<rbrakk> \<Longrightarrow> S \<in> States SA \<longrightarrow> T \<notin> States SA"
apply (unfold ChiPlus_def)
apply (rule_tac a="S" and b="T" and r="(ChiRel A)" in trancl_induct)
apply auto
apply (rename_tac U)
apply (frule_tac S=S and T=U in SAStates_ChiRel)
apply auto
apply (fold ChiPlus_def)
apply (simp only: SAStates_ChiPlus_ChiRel_trans)
done
lemma SAStates_ChiPlus [rule_format]:
"\<lbrakk> S \<in> States SA; T \<in> States SA; SA \<in> SAs A \<rbrakk> \<Longrightarrow> (S,T) \<notin> ChiPlus A"
apply auto
apply (simp only: SAStates_ChiPlus2)
done
lemma SAStates_ChiPlus_ChiRel_OneAncestor [rule_format]:
"\<lbrakk> T \<in> States SA; SA \<in> SAs A; (S,U) \<in> ChiPlus A\<rbrakk> \<Longrightarrow> S \<noteq> T \<longrightarrow> S \<in> States SA \<longrightarrow> (T,U) \<notin> ChiRel A"
apply (unfold ChiPlus_def)
apply (rule_tac a="S" and b="U" and r="(ChiRel A)" in trancl_induct)
apply auto
apply (simp add: ChiRel_OneAncestor_notmem)
apply (rename_tac V W)
apply (fold ChiPlus_def)
apply (case_tac "V=T")
apply (simp add: ChiRel_OneAncestor_notmem SAStates_ChiPlus)+
done
lemma SAStates_ChiPlus_OneAncestor [rule_format]:
"\<lbrakk> T \<in> States SA; SA \<in> SAs A; (S,U) \<in> ChiPlus A \<rbrakk> \<Longrightarrow> S \<noteq> T \<longrightarrow>
S \<in> States SA \<longrightarrow> (T,U) \<notin> ChiPlus A"
apply (unfold ChiPlus_def)
apply (rule_tac a="S" and b="U" and r="(ChiRel A)" in trancl_induct)
apply auto
apply (fold ChiPlus_def)
apply (rename_tac V)
apply (frule_tac T=S and S=T and U=V in SAStates_ChiPlus_ChiRel_OneAncestor)
apply auto
apply (rename_tac V W)
apply (frule_tac S=T and T=W in ChiPlus_ChiRel_Ex)
apply auto
apply (frule_tac T=T and S=S and U=W in SAStates_ChiPlus_ChiRel_OneAncestor)
apply auto
apply (rule ChiRel_ChiPlus_trans)
apply auto
apply (rename_tac X)
apply (case_tac "V=X")
apply simp
apply (simp add: ChiRel_OneAncestor_notmem)
done
lemma ChiRel_ChiPlus_OneAncestor [rule_format]:
"\<lbrakk> (T,U) \<in> ChiPlus A \<rbrakk> \<Longrightarrow> T \<noteq> S \<longrightarrow> (S,U) \<in> ChiRel A \<longrightarrow> (T,S) \<in> ChiPlus A"
apply (unfold ChiPlus_def)
apply (rule_tac a="T" and b="U" and r="(ChiRel A)" in trancl_induct)
apply auto
apply (fast intro:ChiRel_OneAncestor)
apply (rename_tac V W)
apply (case_tac "S=V")
apply auto
apply (fast intro:ChiRel_OneAncestor)
done
lemma ChiPlus_SA_OneAncestor [rule_format]:
"\<lbrakk> (S,T) \<in> ChiPlus A;
U \<in> States SA; SA \<in> SAs A \<rbrakk> \<Longrightarrow> T \<in> States SA \<longrightarrow>
(S,U) \<in> ChiPlus A"
apply (unfold ChiPlus_def)
apply (rule_tac a="S" and b="T" and r="(ChiRel A)" in converse_trancl_induct)
apply auto
apply (frule ChiRel_SA_OneAncestor)
apply fast+
done
subsubsection \<open>\<open>ChiStar\<close>\<close>
lemma ChiPlus_ChiStar [simp]:
"\<lbrakk> (S,T) \<in> ChiPlus A \<rbrakk> \<Longrightarrow> (S,T) \<in> ChiStar A"
by (unfold ChiPlus_def ChiStar_def, auto)
lemma HARootState_Range_ChiStar [simp]:
"\<lbrakk> x \<noteq> S; S \<in> States (HARoot A) \<rbrakk> \<Longrightarrow> (x,S) \<notin> (ChiStar A)"
apply (unfold ChiStar_def)
apply (subst rtrancl_eq_or_trancl)
apply (fold ChiPlus_def)
apply auto
done
lemma ChiStar_Self [simp]:
"(S,S) \<in> ChiStar A"
apply (unfold ChiStar_def)
apply simp
done
lemma ChiStar_Image [simp]:
"S \<in> M \<Longrightarrow> S \<in> (ChiStar A `` M)"
apply (unfold Image_def)
apply (auto intro: ChiStar_Self)
done
lemma ChiStar_ChiPlus_noteq:
"\<lbrakk> S \<noteq> T; (S,T) \<in> ChiStar A \<rbrakk> \<Longrightarrow> (S,T) \<in> ChiPlus A"
apply (unfold ChiPlus_def ChiStar_def)
apply (simp add: rtrancl_eq_or_trancl)
done
lemma ChiRel_ChiStar_trans:
"\<lbrakk> (S,U) \<in> ChiStar A; (U,T) \<in> ChiRel A \<rbrakk> \<Longrightarrow> (S,T) \<in> ChiStar A"
apply (unfold ChiStar_def)
apply auto
done
subsubsection \<open>\<open>InitConf\<close>\<close>
lemma InitConf_HAStates [simp]:
"InitConf A \<subseteq> HAStates A"
apply (unfold InitConf_def HAStates_def)
apply auto
apply (rule rtrancl_induct)
back
apply auto
apply (rule_tac x="HARoot A" in bexI)
apply auto
apply (unfold HAStates_def ChiRel_def)
apply auto
done
lemma InitConf_HAStates2 [simp]:
"S \<in> InitConf A \<Longrightarrow> S \<in> HAStates A"
apply (cut_tac A=A in InitConf_HAStates)
apply fast
done
lemma HAInitState_InitConf [simp]:
"HAInitState A \<in> InitConf A"
by (unfold HAInitState_def InitConf_def, auto)
lemma InitConf_HAInitState_HARoot:
"[| S \<in> InitConf A; S \<noteq> HAInitState A |] ==> S \<notin> States (HARoot A)"
apply (unfold InitConf_def)
apply auto
apply (rule mp)
prefer 2
apply fast
back
apply (rule mp)
prefer 2
apply fast
back
back
apply (rule_tac b=S in rtrancl_induct)
apply auto
apply (simp add: ChiRel_HARoot)+
done
lemma InitConf_HARoot_HAInitState [simp]:
"\<lbrakk> S \<in> InitConf A; S \<in> States (HARoot A) \<rbrakk> \<Longrightarrow> S = HAInitState A"
apply (subst not_not [THEN sym])
apply (rule notI)
apply (simp add:InitConf_HAInitState_HARoot)
done
lemma HAInitState_CompFun_InitConf [simp]:
"[|SA \<in> the (CompFun A (HAInitState A)) |] ==> (InitState SA) \<in> InitConf A"
apply (unfold InitConf_def HAStates_def)
apply auto
apply (rule rtrancl_Int)
apply auto
apply (cut_tac A=A and S="HAInitState A" in HAStates_CompFun_States_ChiRel)
apply auto
apply (rule Image_singleton_iff [THEN subst])
apply (rotate_tac -1)
apply (drule sym)
apply simp
apply (rule_tac x=SA in bexI)
apply auto
done
lemma InitConf_HAInitStates:
"InitConf A \<subseteq> HAInitStates A"
apply (unfold InitConf_def)
apply (rule subsetI)
apply auto
apply (frule rtrancl_Int1)
apply (case_tac "x = HAInitState A")
apply simp
apply (rule rtrancl_mem_Sigma)
apply auto
done
lemma InitState_notmem_InitConf:
"[| SA \<in> the (CompFun A S); S \<in> InitConf A; T \<in> States SA;
T \<noteq> InitState SA |] ==> T \<notin> InitConf A"
apply (frule InitConf_HAStates2)
apply (unfold InitConf_def)
apply auto
apply (rule mp)
prefer 2
apply fast
apply (rule mp)
prefer 2
apply fast
back
apply (rule mp)
prefer 2
apply fast
back
back
apply (rule mp)
prefer 2
apply fast
back
back
back
apply (rule mp)
prefer 2
apply fast
back
back
back
back
apply (rule mp)
prefer 2
apply fast
back
back
back
back
back
apply (rule_tac b=T in rtrancl_induct)
apply auto
done
lemma InitConf_CompFun_InitState [simp]:
"\<lbrakk> SA \<in> the (CompFun A S); S \<in> InitConf A; T \<in> States SA;
T \<in> InitConf A \<rbrakk> \<Longrightarrow> T = InitState SA"
apply (subst not_not [THEN sym])
apply (rule notI)
apply (frule InitState_notmem_InitConf)
apply auto
done
lemma InitConf_ChiRel_Ancestor:
"\<lbrakk> T \<in> InitConf A; (S,T) \<in> ChiRel A \<rbrakk> \<Longrightarrow> S \<in> InitConf A"
apply (unfold InitConf_def)
apply auto
apply (erule rtranclE)
apply auto
apply (rename_tac U)
apply (cut_tac A=A in HAInitState_notmem_Range_ChiRel)
apply auto
apply (case_tac "U = S")
apply (auto simp add: ChiRel_OneAncestor)
done
lemma InitConf_CompFun_Ancestor:
"\<lbrakk> S \<in> HAStates A; SA \<in> the (CompFun A S); T \<in> InitConf A; T \<in> States SA \<rbrakk>
\<Longrightarrow> S \<in> InitConf A"
apply (rule InitConf_ChiRel_Ancestor)
apply auto
apply (rule CompFun_ChiRel)
apply auto
done
subsubsection \<open>\<open>StepConf\<close>\<close>
lemma StepConf_EmptySet [simp]:
"StepConf A C {} = C"
by (unfold StepConf_def, auto)
end
|
-- Andreas, 2017-10-04, issue #2752, report and test case by nad
--
-- Problem was: instance does not distribute into mutual blocks.
open import Agda.Builtin.List
open import Agda.Builtin.Size
mutual
data Rose (i : Size) (A : Set) : Set where
node : List (Rose′ i A) → Rose i A
data Rose′ (i : Size) (A : Set) : Set where
delay : {j : Size< i} → Rose j A → Rose′ i A
record Map (F : Set → Set) : Set₁ where
field
map : {A B : Set} → (A → B) → F A → F B
open Map ⦃ … ⦄ public
instance
Map-List : Map List
Map.map Map-List = λ where
f [] → []
f (x ∷ xs) → f x ∷ map f xs
instance
mutual
Map-Rose : ∀ {i} → Map (Rose i)
Map.map Map-Rose f (node xs) = node (map (map f) xs)
Map-Rose′ : ∀ {i} → Map (Rose′ i)
Map.map Map-Rose′ f (delay t) = delay (map f t)
-- Was: unresolved instance arguments.
-- Should succeed.
|
function filum_test02 ( )
%*****************************************************************************80
%
%% TEST02 tests FILE_CHAR_COUNT.
%
% Licensing:
%
% This code is distributed under the GNU LGPL license.
%
% Modified:
%
% 06 June 2007
%
% Author:
%
% John Burkardt
%
fprintf ( 1, '\n' );
fprintf ( 1, 'TEST02\n' );
fprintf ( 1, ' FILE_CHAR_COUNT counts the characters in a file.\n' );
filename = 'story.txt';
fprintf ( 1, '\n' );
fprintf ( 1, ' Examining file:\n' );
fprintf ( 1, ' "%s".', filename );
fprintf ( 1, '\n' );
char_num = file_char_count ( filename );
fprintf ( 1, ' Number of characters in file: %d\n', char_num );
return
end
|
subroutine hruaa(years)
!! ~ ~ ~ PURPOSE ~ ~ ~
!! this subroutine writes average annual HRU output to the output.hru file
!! ~ ~ ~ INCOMING VARIABLES ~ ~ ~
!! name |units |definition
!! ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
!! bio_aams(:) |metric tons/ha|average annual biomass (dry weight) in HRU
!! cpnm(:) |NA |four character code to represent crop name
!! deepst(:) |mm H2O |depth of water in deep aquifer
!! hru_km(:) |km^2 |area of HRU in square kilometers
!! hru_sub(:) |none |subbasin in which HRU is located
!! hruaao(1,:) |mm H2O |precipitation in HRU during simulation
!! hruaao(2,:) |mm H2O |amount of precipitation falling as freezing
!! |rain/snow in HRU during simulation
!! hruaao(3,:) |mm H2O |amount of snow melt in HRU during simulation
!! hruaao(4,:) |mm H2O |amount of surface runoff to main channel
!! |from HRU during simulation (ignores impact of
!! |transmission losses)
!! hruaao(5,:) |mm H2O |amount of lateral flow contribution to main
!! |channel from HRU during simulation
!! hruaao(6,:) |mm H2O |amount of groundwater flow contribution to
!! |main channel from HRU during simulation
!! hruaao(7,:) |mm H2O |amount of water moving from shallow aquifer
!! |to plants or soil profile in HRU during
!! |simulation
!! hruaao(8,:) |mm H2O |amount of water recharging deep aquifer in
!! |HRU during simulation
!! hruaao(9,:) |mm H2O |total amount of water entering both aquifers
!! |from HRU during simulation
!! hruaao(10,:) |mm H2O |water yield (total amount of water entering
!! |main channel) from HRU during simulation
!! hruaao(11,:) |mm H2O |amount of water percolating out of the soil
!! |profile and into the vadose zone in HRU
!! |during simulation
!! hruaao(12,:) |mm H2O |actual evapotranspiration in HRU during
!! |simulation
!! hruaao(13,:) |mm H2O |amount of transmission losses from tributary
!! |channels in HRU for simulation
!! hruaao(14,:) |metric tons/ha|sediment yield from HRU for simulation
!! hruaao(17,:) |kg N/ha |amount of nitrogen applied in continuous
!! |fertilizer operation in HRU for simulation
!! hruaao(18,:) |kg P/ha |amount of phosphorus applied in continuous
!! |fertilizer operation in HRU for simulation
!! hruaao(23,:) |mm H2O |amount of water removed from shallow aquifer
!! |in HRU for irrigation during simulation
!! hruaao(24,:) |mm H2O |amount of water removed from deep aquifer
!! |in HRU for irrigation during simulation
!! hruaao(25,:) |mm H2O |potential evapotranspiration in HRU during
!! |simulation
!! hruaao(26,:) |kg N/ha |annual amount of N (organic & mineral)
!! |applied in HRU during grazing
!! hruaao(27,:) |kg P/ha |annual amount of P (organic & mineral)
!! |applied in HRU during grazing
!! hruaao(28,:) |kg N/ha |average annual amount of N (organic &
!! |mineral) auto-applied in HRU
!! hruaao(29,:) |kg P/ha |average annual amount of P (organic &
!! |mineral) auto-applied in HRU
!! hruaao(31,:) |stress days |water stress days in HRU during simulation
!! hruaao(32,:) |stress days |temperature stress days in HRU during
!! |simulation
!! hruaao(33,:) |stress days |nitrogen stress days in HRU during simulation
!! hruaao(34,:) |stress days |phosphorus stress days in HRU during
!! |simulation
!! hruaao(35,:) |kg N/ha |organic nitrogen in surface runoff in HRU
!! |during simulation
!! hruaao(36,:) |kg P/ha |organic phosphorus in surface runoff in HRU
!! |during simulation
!! hruaao(37,:) |kg N/ha |nitrate in surface runoff in HRU during
!! |simulation
!! hruaao(38,:) |kg N/ha |nitrate in lateral flow in HRU during
!! |simulation
!! hruaao(39,:) |kg P/ha |soluble phosphorus in surface runoff in HRU
!! |during simulation
!! hruaao(40,:) |kg N/ha |amount of nitrogen removed from soil by plant
!! |uptake in HRU during simulation
!! hruaao(41,:) |kg N/ha |nitrate percolating past bottom of soil
!! |profile in HRU during simulation
!! hruaao(42,:) |kg P/ha |amount of phosphorus removed from soil by
!! |plant uptake in HRU during simulation
!! hruaao(43,:) |kg P/ha |amount of phosphorus moving from labile
!! |mineral to active mineral pool in HRU during
!! |simulation
!! hruaao(44,:) |kg P/ha |amount of phosphorus moving from active
!! |mineral to stable mineral pool in HRU during
!! |simulation
!! hruaao(45,:) |kg N/ha |amount of nitrogen applied to HRU in
!! |fertilizer and grazing operations during
!! |simulation
!! hruaao(46,:) |kg P/ha |amount of phosphorus applied to HRU in
!! |fertilizer and grazing operations during
!! |simulation
!! hruaao(47,:) |kg N/ha |amount of nitrogen added to soil by fixation
!! |in HRU during simulation
!! hruaao(48,:) |kg N/ha |amount of nitrogen lost by denitrification
!! |in HRU during simulation
!! hruaao(49,:) |kg N/ha |amount of nitrogen moving from active organic
!! |to nitrate pool in HRU during simulation
!! hruaao(50,:) |kg N/ha |amount of nitrogen moving from active organic
!! |to stable organic pool in HRU during
!! |simulation
!! hruaao(51,:) |kg P/ha |amount of phosphorus moving from organic to
!! |labile mineral pool in HRU during simulation
!! hruaao(52,:) |kg N/ha |amount of nitrogen moving from fresh organic
!! |to nitrate and active organic pools in HRU
!! |during simulation
!! hruaao(53,:) |kg P/ha |amount of phosphorus moving from fresh
!! |organic to the labile mineral and organic
!! |pools in HRU during simulation
!! hruaao(54,:) |kg N/ha |amount of nitrogen added to soil in rain
!! |during simulation
!! hruaao(61,:) |metric tons/ha|daily soil loss predicted with USLE equation
!! hruaao(63,:) |# bacteria/ha |less persistent bacteria transported to main
!! |channel from HRU during simulation
!! hruaao(64,:) |# bacteria/ha |persistent bacteria transported to main
!! |channel from HRU during simulation
!! hruaao(65,:) |kg N/ha |nitrate loading from groundwater in HRU to
!! |main channel during simulation
!! hruaao(66,:) |kg P/ha |soluble P loading from groundwater in HRU to
!! |main channel during simulation
!! hruaao(67,:) |kg P/ha |loading of mineral P attached to sediment
!! |in HRU to main channel during simulation
!! hrugis(:) |none |GIS code printed to output files(output.hru,.rch)
!! icr(:) |none |sequence number of crop grown within the
!! |current year
!! idplt(:) |none |land cover code from crop.dat
!! ipdvas(:) |none |output variable codes for output.hru file
!! isproj |none |special project code:
!! |1 test rewind (run simulation twice)
!! itots |none |number of output variables printed (output.hru)
!! lai_aamx(:) |none |average annual maximum leaf area index in
!! |HRU
!! mhruo |none |maximum number of variables written to
!! |HRU output file (output.hru)
!! nhru |none |number of HRUs in watershed
!! nmgt(:) |none |management code (for GIS output only)
!! nro(:) |none |sequence number of year in rotation
!! shallst(:) |mm H2O |depth of water in shallow aquifer
!! sol_sw(:) |mm H2O |amount of water stored in the soil profile
!! |on any given day
!! yldaa(:) |metric tons/ha|average annual yield (dry weight) in HRU
!! ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
!! ~ ~ ~ LOCAL DEFINITIONS ~ ~ ~
!! name |units |definition
!! ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
!! ii |none |counter
!! j |none |HRU number
!! pdvas(:) |varies |array to hold HRU output values
!! pdvs(:) |varies |array to hold selected HRU output values
!! |when user doesn't want to print all
!! sb |none |subbasin number
!! years |years |length of simulation
!! ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
!! ~ ~ ~ ~ ~ ~ END SPECIFICATIONS ~ ~ ~ ~ ~ ~
use parm
real, intent (in) :: years
integer :: j, sb, ii, iflag
real, dimension (mhruo) :: pdvas, pdvs
character (len=4) :: cropname
do j = 1, nhru
sb = hru_sub(j)
iflag = 0
do ii = 1, itoth
if (ipdhru(ii) == j) iflag = 1
end do
if (iflag == 1) then
pdvas = 0.
pdvs = 0.
pdvas(1) = hruaao(1,j)
pdvas(2) = hruaao(2,j)
pdvas(3) = hruaao(3,j)
pdvas(4) = hruaao(22,j)
pdvas(5) = hruaao(25,j)
pdvas(6) = hruaao(12,j)
pdvas(7) = hruaao(21,j) / 365.4
pdvas(8) = sol_sw(j)
pdvas(9) = hruaao(11,j)
pdvas(10) = hruaao(9,j)
pdvas(11) = hruaao(8,j)
pdvas(12) = hruaao(7,j)
pdvas(13) = hruaao(23,j)
pdvas(14) = hruaao(24,j)
pdvas(15) = shallst(j)
pdvas(16) = deepst(j)
pdvas(17) = hruaao(19,j)
pdvas(18) = hruaao(4,j)
pdvas(19) = hruaao(13,j)
pdvas(20) = hruaao(5,j)
pdvas(21) = hruaao(6,j)
pdvas(22) = hruaao(10,j)
pdvas(23) = hruaao(20,j) / 365.4
pdvas(24) = hruaao(57,j) / 365.4
pdvas(25) = hruaao(55,j) / 365.4
pdvas(26) = hruaao(56,j) / 365.4
pdvas(27) = hruaao(30,j) / 365.4
pdvas(28) = hruaao(58,j) / 365.4
pdvas(29) = hruaao(14,j)
pdvas(30) = hruaao(61,j)
pdvas(31) = hruaao(45,j)
pdvas(32) = hruaao(46,j)
pdvas(33) = hruaao(28,j)
pdvas(34) = hruaao(29,j)
pdvas(35) = hruaao(26,j)
pdvas(36) = hruaao(27,j)
pdvas(37) = hruaao(17,j)
pdvas(38) = hruaao(18,j)
pdvas(39) = hruaao(54,j)
pdvas(40) = hruaao(47,j)
pdvas(41) = hruaao(52,j)
pdvas(42) = hruaao(49,j)
pdvas(43) = hruaao(50,j)
pdvas(44) = hruaao(53,j)
pdvas(45) = hruaao(51,j)
pdvas(46) = hruaao(43,j)
pdvas(47) = hruaao(44,j)
pdvas(48) = hruaao(48,j)
pdvas(49) = hruaao(40,j)
pdvas(50) = hruaao(42,j)
pdvas(51) = hruaao(35,j)
pdvas(52) = hruaao(36,j)
pdvas(53) = hruaao(67,j)
pdvas(54) = hruaao(37,j)
pdvas(55) = hruaao(38,j)
pdvas(56) = hruaao(41,j)
pdvas(57) = hruaao(65,j)
pdvas(58) = hruaao(39,j)
pdvas(59) = hruaao(66,j)
pdvas(60) = hruaao(31,j)
pdvas(61) = hruaao(32,j)
pdvas(62) = hruaao(33,j)
pdvas(63) = hruaao(34,j)
pdvas(64) = bio_aams(j)
pdvas(65) = lai_aamx(j)
pdvas(66) = yldaa(j)
pdvas(67) = hruaao(63,j)
pdvas(68) = hruaao(64,j)
!! the following two variables are values at the of the year
!! they are not summed each day
pdvas(69) = wtab(j) !! based on 30 day antecedent climate(mm) (prec,et)
pdvas(70) = wtabelo !! based on depth from soil surface(mm)
!! added current snow content in the hru (not summed)
pdvas(71) = sno_hru(j)
!! added current soil carbon for first layer
pdvas(72) = cmup_kgh(j) !! first soil layer only
!! added current soil carbon integrated - aggregating all soil layers
pdvas(73) = cmtot_kgh(j)
!! adding qtile to output.hru write 3/2/2010 gsm
pdvas(74) = hruaao(62,j)
!! tileno3 - output.hru
pdvas(75) = hruaao(68,j)
!! latno3 - output.hru
pdvas(76) = hruaao(69,j)
!! gw deep
pdvas(77) = hruaao(70,j)
!! latq contribution
pdvas(78) = hruaao(71,j)
!! phos due to crack flow (tvap)
pdvas(79) = hruaao(72,j)
pdvas(80) = hruaao(73,j) / 365.4
pdvas(81) = hruaao(74,j) / 365.4
pdvas(82) = hruaao(75,j) / 365.4
pdvas(83) = hruaao(76,j) / 365.4
pdvas(84) = hruaao(77,j) / 365.4
pdvas(85) = hruaao(78,j) / 365.4
pdvas(86) = hruaao(79,j) / 365.4
pdvas(87) = hruaao(80,j) / 365.4
pdvas(88) = hruaao(81,j) / 365.4
pdvas(89) = hruaao(82,j) / 365.4
if (ipdvas(1) > 0) then
do ii = 1, itots
pdvs(ii) = pdvas(ipdvas(ii))
end do
idplant = idplt(j)
if (idplant > 0) then
cropname = cpnm(idplant)
else
cropname = "NOCR"
endif
if (iscen == 1 .and. isproj == 0) then
write (28,1000) cropname, j, subnum(j), hruno(j), sb,
& nmgt(j), years, hru_km(j), (pdvs(ii), ii = 1, itots)
else if (isproj == 1) then
write (21,1000) cropname, j, subnum(j), hruno(j),
& sb, nmgt(j), years, hru_km(j), (pdvs(ii), ii = 1, itots)
else if (iscen == 1 .and. isproj == 2) then
write (28,2000) cropname, j, subnum(j), hruno(j), sb,
& nmgt(j), years, hru_km(j), (pdvs(ii), ii = 1, itots), iyr
endif
else
if (iscen == 1 .and. isproj == 0) then
write (28,1001) cropname, j, subnum(j), hruno(j), sb,
& nmgt(j), years, hru_km(j), (pdvas(ii), ii = 1, mhruo)
else if (isproj == 1) then
write (21,1001) cropname, j, subnum(j), hruno(j),
& sb, nmgt(j), years, hru_km(j), (pdvas(ii), ii = 1, mhruo)
else if (iscen == 1 .and. isproj == 2) then
write (28,1001) cropname, j, subnum(j), hruno(j), sb,
& nmgt(j), years, hru_km(j), (pdvas(ii), ii = 1, mhruo), iyr
endif
end if
end if
end do
return
1000 format (a4,i5,1x,a5,a4,i5,1x,i4,1x,f4.1,e10.5,66f10.3,1x,
*e10.5,1x,e10.5,8e10.3,3f10.3)
2000 format (a4,i5,1x,a5,a4,i5,1x,i4,1x,f4.1,e10.5,66f10.3,1x,
*e10.5,1x,e10.5,5e10.3,6f10.3,1x,i4)
1001 format (a4,i7,1x,a5,a4,i5,1x,i4,1x,f4.1,e10.5,66f10.3,1x,
*e10.5,1x,e10.5,8e10.3,3f10.3,1x,i4)
end |
-- Moved from the successfull test-suite. See Issue 1481.
module tests.Nat where
data Nat : Set where
Z : Nat
S : Nat → Nat
{-# BUILTIN NATURAL Nat #-}
_+_ : Nat → Nat → Nat
Z + m = m
S n + m = S (n + m)
{-# BUILTIN NATPLUS _+_ #-}
_*_ : Nat → Nat → Nat
Z * m = Z
S n * m = m + (n * m)
{-# BUILTIN NATTIMES _*_ #-}
data Unit : Set where
unit : Unit
postulate
IO : Set → Set
String : Set
natToString : Nat → String
putStr : String → IO Unit
printNat : Nat → IO Unit
printNat n = putStr (natToString n)
{-# COMPILED_TYPE IO IO #-}
{-# COMPILED_EPIC natToString (n : Any) -> String = bigToStr(n) #-}
{-# COMPILED_EPIC putStr (a : String, u : Unit) ->
Unit = foreign Int "wputStr" (a : String); primUnit #-}
main : IO Unit
main = printNat (7 * 191)
-- should print 1337
|
{-# OPTIONS --without-K #-}
module Hmm where
-- using the HoTT-Agda library leads to highlighting error (missing metadata)
-- open import lib.Base
-- open import lib.types.Nat
-- using this library does not, yet the code is copied/pasted from HoTT-Agda
open import Base
S= : {m n : ℕ} → m == n → S m == S n
S= idp = idp
|
[STATEMENT]
lemma of_char_eq_iff [simp]:
\<open>of_char c = of_char d \<longleftrightarrow> c = d\<close>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (of_char c = of_char d) = (c = d)
[PROOF STEP]
by (auto intro: of_char_eqI) |
{- Byzantine Fault Tolerant Consensus Verification in Agda, version 0.9.
Copyright (c) 2020 Oracle and/or its affiliates.
Licensed under the Universal Permissive License v 1.0 as shown at https://opensource.oracle.com/licenses/upl
-}
open import LibraBFT.Prelude
open import LibraBFT.Hash
open import LibraBFT.Abstract.Types
open import LibraBFT.Impl.NetworkMsg
open import LibraBFT.Impl.Consensus.Types
open import LibraBFT.Concrete.System.Parameters
open import LibraBFT.Concrete.Obligations
import LibraBFT.Concrete.Properties.VotesOnce as VO
import LibraBFT.Concrete.Properties.LockedRound as LR
open import LibraBFT.Yasm.System ConcSysParms
open import LibraBFT.Yasm.Properties ConcSysParms
-- In this module, we assume that the implementation meets its
-- obligations, and use this assumption to prove that the
-- implementatioon enjoys one of the per-epoch correctness conditions
-- proved in Abstract.Properties. It can be extended to other
-- properties later.
module LibraBFT.Concrete.Properties (impl-correct : ImplObligations) where
open ImplObligations impl-correct
-- For any reachable state,
module _ {e}(st : SystemState e)(r : ReachableSystemState st)(eid : Fin e) where
open import LibraBFT.Concrete.System sps-cor
open PerState st r
open PerEpoch eid
import LibraBFT.Abstract.Records 𝓔 Hash _≟Hash_ (ConcreteVoteEvidence 𝓔) as Abs
open import LibraBFT.Abstract.RecordChain 𝓔 Hash _≟Hash_ (ConcreteVoteEvidence 𝓔)
open import LibraBFT.Abstract.System 𝓔 Hash _≟Hash_ (ConcreteVoteEvidence 𝓔)
open import LibraBFT.Abstract.Properties 𝓔 Hash _≟Hash_ (ConcreteVoteEvidence 𝓔)
open import LibraBFT.Abstract.Obligations.VotesOnce 𝓔 Hash _≟Hash_ (ConcreteVoteEvidence 𝓔)
open import LibraBFT.Abstract.Obligations.LockedRound 𝓔 Hash _≟Hash_ (ConcreteVoteEvidence 𝓔)
validState : ValidSysState ConcSystemState
validState = record
{ vss-votes-once = VO.Proof.voo sps-cor vo₁ vo₂ st r eid
; vss-locked-round = LR.Proof.lrr sps-cor lr₁ st r eid
}
open All-InSys-props (AbsSystemState.InSys ConcSystemState)
-- commited blocks do not conflict.
S5 : ∀{q q'}
→ {rc : RecordChain (Abs.Q q)} → All-InSys rc
→ {rc' : RecordChain (Abs.Q q')} → All-InSys rc'
→ {b b' : Abs.Block}
→ CommitRule rc b
→ CommitRule rc' b'
→ NonInjective-≡ Abs.bId ⊎ ((Abs.B b) ∈RC rc' ⊎ (Abs.B b') ∈RC rc)
S5 = CommitsDoNotConflict ConcSystemState validState
|
module GRIN.Opts.CopyPropogation
import Data.SortedMap
import GRIN.AST
import GRIN.GrinM
0 CopyMap : Type
CopyMap = SortedMap Var Var
isValVar : (Val name) -> Maybe Var
isValVar (SimpleVal (SVar var)) = Just var
isValVar _ = Nothing
isPureVar : SExp name -> Maybe Var
isPureVar (Pure val) = isValVar val
isPureVar _ = Nothing
copyPropVar : CopyMap -> Var -> Var
copyPropVar cm var = case lookup var cm of
Nothing => var
Just var' => var'
copyPropSVal : CopyMap -> SVal -> SVal
copyPropSVal _ (SLit lit) = SLit lit
copyPropSVal cm (SVar var) = SVar (copyPropVar cm var)
copyPropVal : CopyMap -> Val name -> Val name
copyPropVal cm (SimpleVal val) = SimpleVal (copyPropSVal cm val)
copyPropVal cm (ConstTagNode tag args) = ConstTagNode tag (copyPropSVal cm <$> args)
copyPropVal _ (ConstTag tag) = ConstTag tag
copyPropVal cm (VarTagNode var args) = VarTagNode var (copyPropSVal cm <$> args)
copyPropVal _ VUnit = VUnit
mutual
copyPropSExp : CopyMap -> SExp name -> SExp name
copyPropSExp cm (Do exp) = Do (copyPropExp cm exp)
copyPropSExp cm (App fn vars) = App fn (copyPropVar cm <$> vars)
copyPropSExp cm (Pure val) = Pure (copyPropVal cm val)
copyPropSExp cm (Store val) = Store (copyPropVal cm val)
copyPropSExp cm (Fetch i var) = Fetch i (copyPropVar cm var)
copyPropSExp cm (FetchI i n var) = FetchI i n (copyPropVar cm var)
copyPropSExp cm (Update var val) = Update (copyPropVar cm var) (copyPropVal cm val)
copyPropExp : CopyMap -> Exp name -> Exp name
copyPropExp cm (Bind val rhs rest) =
let rhs' = copyPropSExp cm rhs in
case isValVar val of
Nothing => Bind val rhs' (copyPropExp cm rest)
Just copy => case isPureVar rhs of
Nothing => Bind val rhs' (copyPropExp cm rest)
Just var => copyPropExp (insert copy var cm) rest
copyPropExp cm (Case val alts) = Case (copyPropVal cm val) (copyPropAlt cm <$> alts)
copyPropExp cm (SimpleExp exp) = SimpleExp (copyPropSExp cm exp)
copyPropAlt : CopyMap -> Alt name -> Alt name
copyPropAlt cm (MkAlt pat exp) = MkAlt pat (copyPropExp cm exp)
export
copyProp : Monad m => GrinT name m ()
copyProp = mapProg $ mapExpProg (copyPropExp empty)
|
MODULE el2INT_I
INTERFACE
! *
SUBROUTINE EL2INT(JJA,JJB,JA,JB,ICOLBREI,INTERACT)
INTEGER, INTENT(IN) :: JJA,JJB,JA,JB,ICOLBREI
INTEGER, INTENT(OUT) :: INTERACT
END SUBROUTINE
END INTERFACE
END MODULE
|
The derivative of the circlepath function is given by the formula $2 \pi i r e^{2 \pi i x}$. |
State Before: K : Type u
V✝ V₁ V₂ V₃ : Type v
V' V'₁ : Type v'
V'' : Type v''
ι : Type w
ι' : Type w'
η✝ : Type u₁'
φ : η✝ → Type ?u.526682
inst✝¹⁸ : Ring K
inst✝¹⁷ : StrongRankCondition K
inst✝¹⁶ : AddCommGroup V✝
inst✝¹⁵ : Module K V✝
inst✝¹⁴ : Module.Free K V✝
inst✝¹³ : AddCommGroup V'
inst✝¹² : Module K V'
inst✝¹¹ : Module.Free K V'
inst✝¹⁰ : AddCommGroup V₁
inst✝⁹ : Module K V₁
inst✝⁸ : Module.Free K V₁
inst✝⁷ : (i : η✝) → AddCommGroup (φ i)
inst✝⁶ : (i : η✝) → Module K (φ i)
inst✝⁵ : ∀ (i : η✝), Module.Free K (φ i)
inst✝⁴ : Fintype η✝
V η : Type u
inst✝³ : Fintype η
inst✝² : AddCommGroup V
inst✝¹ : Module K V
inst✝ : Module.Free K V
⊢ Module.rank K (η → V) = ↑(Fintype.card η) * Module.rank K V State After: no goals Tactic: rw [rank_pi, Cardinal.sum_const', Cardinal.mk_fintype] |
Increase quality and safety with Furnace tap out actuators.
Stabilze the molten metal flow and control your launder levels.
This Actuator is primarily used to control the flow of molten metal through the exit tap hole of a stationary furnace. The actuator is equipped with a built in 4-20mA position feed back. Adjustable speed, travel length, holding force, closing force and open force. |
State Before: α : Type u
β : Type v
p : α → Prop
f : Perm α
⊢ (∀ (x : α), p (↑f⁻¹ x) ↔ p (↑f (↑f⁻¹ x))) ↔ ∀ (x : α), p x ↔ p (↑f⁻¹ x) State After: no goals Tactic: simp_rw [f.apply_inv_self, Iff.comm] |
theory Testing
imports "HOL-Analysis.Multivariate_Analysis"
begin
lemma interior_ball: "(x \<in> interior S) \<longleftrightarrow> (\<exists> e. 0 < e & (ball x e) \<subseteq> S)"
proof-
{ assume "x \<in> interior S"
from this obtain T where T_def: "open T & x \<in> T & T \<subseteq> S" using interior_def by auto
hence "\<exists> e. 0 < e & (ball x e) \<subseteq> T" using open_contains_ball by auto
hence "\<exists> e. 0 < e & (ball x e) \<subseteq> S" using T_def by auto
} note imp1 = this
{ assume "(\<exists> e. 0 < e & (ball x e) \<subseteq> S)"
from this obtain e where e_def: "0 < e & (ball x e) \<subseteq> S" by auto
obtain T where T_def: "T = ball x e" by auto
then have "open T & x \<in> T & T \<subseteq> S" using open_ball e_def by auto
hence "x \<in> interior S" using interior_def by auto
} from this show ?thesis using imp1 by auto
qed
end |
[GOAL]
ι : Type u
s : Finset ι
f : ι → ℝ
n : ℕ
hf : ∀ (a : ι), a ∈ s → 0 ≤ f a
⊢ (∑ x in s, f x) ^ (n + 1) / ↑(card s) ^ n ≤ ∑ x in s, f x ^ (n + 1)
[PROOFSTEP]
rcases s.eq_empty_or_nonempty with (rfl | hs)
[GOAL]
case inl
ι : Type u
f : ι → ℝ
n : ℕ
hf : ∀ (a : ι), a ∈ ∅ → 0 ≤ f a
⊢ (∑ x in ∅, f x) ^ (n + 1) / ↑(card ∅) ^ n ≤ ∑ x in ∅, f x ^ (n + 1)
[PROOFSTEP]
simp_rw [Finset.sum_empty, zero_pow' _ (Nat.succ_ne_zero n), zero_div]
[GOAL]
case inl
ι : Type u
f : ι → ℝ
n : ℕ
hf : ∀ (a : ι), a ∈ ∅ → 0 ≤ f a
⊢ 0 ≤ 0
[PROOFSTEP]
rfl
[GOAL]
case inr
ι : Type u
s : Finset ι
f : ι → ℝ
n : ℕ
hf : ∀ (a : ι), a ∈ s → 0 ≤ f a
hs : Finset.Nonempty s
⊢ (∑ x in s, f x) ^ (n + 1) / ↑(card s) ^ n ≤ ∑ x in s, f x ^ (n + 1)
[PROOFSTEP]
have hs0 : 0 < (s.card : ℝ) := Nat.cast_pos.2 hs.card_pos
[GOAL]
case inr
ι : Type u
s : Finset ι
f : ι → ℝ
n : ℕ
hf : ∀ (a : ι), a ∈ s → 0 ≤ f a
hs : Finset.Nonempty s
hs0 : 0 < ↑(card s)
⊢ (∑ x in s, f x) ^ (n + 1) / ↑(card s) ^ n ≤ ∑ x in s, f x ^ (n + 1)
[PROOFSTEP]
suffices (∑ x in s, f x / s.card) ^ (n + 1) ≤ ∑ x in s, f x ^ (n + 1) / s.card by
rwa [← Finset.sum_div, ← Finset.sum_div, div_pow, pow_succ' (s.card : ℝ), ← div_div, div_le_iff hs0, div_mul,
div_self hs0.ne', div_one] at this
[GOAL]
ι : Type u
s : Finset ι
f : ι → ℝ
n : ℕ
hf : ∀ (a : ι), a ∈ s → 0 ≤ f a
hs : Finset.Nonempty s
hs0 : 0 < ↑(card s)
this : (∑ x in s, f x / ↑(card s)) ^ (n + 1) ≤ ∑ x in s, f x ^ (n + 1) / ↑(card s)
⊢ (∑ x in s, f x) ^ (n + 1) / ↑(card s) ^ n ≤ ∑ x in s, f x ^ (n + 1)
[PROOFSTEP]
rwa [← Finset.sum_div, ← Finset.sum_div, div_pow, pow_succ' (s.card : ℝ), ← div_div, div_le_iff hs0, div_mul,
div_self hs0.ne', div_one] at this
[GOAL]
case inr
ι : Type u
s : Finset ι
f : ι → ℝ
n : ℕ
hf : ∀ (a : ι), a ∈ s → 0 ≤ f a
hs : Finset.Nonempty s
hs0 : 0 < ↑(card s)
⊢ (∑ x in s, f x / ↑(card s)) ^ (n + 1) ≤ ∑ x in s, f x ^ (n + 1) / ↑(card s)
[PROOFSTEP]
have :=
@ConvexOn.map_sum_le ℝ ℝ ℝ ι _ _ _ _ _ _ (Set.Ici 0) (fun x => x ^ (n + 1)) s (fun _ => 1 / s.card) ((↑) ∘ f)
(convexOn_pow (n + 1)) ?_ ?_ fun i hi => Set.mem_Ici.2 (hf i hi)
[GOAL]
case inr.refine_3
ι : Type u
s : Finset ι
f : ι → ℝ
n : ℕ
hf : ∀ (a : ι), a ∈ s → 0 ≤ f a
hs : Finset.Nonempty s
hs0 : 0 < ↑(card s)
this :
(fun x => x ^ (n + 1)) (∑ i in s, (fun x => 1 / ↑(card s)) i • ((fun x => x) ∘ f) i) ≤
∑ i in s, (fun x => 1 / ↑(card s)) i • (fun x => x ^ (n + 1)) (((fun x => x) ∘ f) i)
⊢ (∑ x in s, f x / ↑(card s)) ^ (n + 1) ≤ ∑ x in s, f x ^ (n + 1) / ↑(card s)
[PROOFSTEP]
simpa only [inv_mul_eq_div, one_div, Algebra.id.smul_eq_mul] using this
[GOAL]
case inr.refine_1
ι : Type u
s : Finset ι
f : ι → ℝ
n : ℕ
hf : ∀ (a : ι), a ∈ s → 0 ≤ f a
hs : Finset.Nonempty s
hs0 : 0 < ↑(card s)
⊢ ∀ (i : ι), i ∈ s → 0 ≤ (fun x => 1 / ↑(card s)) i
[PROOFSTEP]
simp only [one_div, inv_nonneg, Nat.cast_nonneg, imp_true_iff]
[GOAL]
case inr.refine_2
ι : Type u
s : Finset ι
f : ι → ℝ
n : ℕ
hf : ∀ (a : ι), a ∈ s → 0 ≤ f a
hs : Finset.Nonempty s
hs0 : 0 < ↑(card s)
⊢ ∑ i in s, (fun x => 1 / ↑(card s)) i = 1
[PROOFSTEP]
simpa only [one_div, Finset.sum_const, nsmul_eq_mul] using mul_inv_cancel hs0.ne'
[GOAL]
ι : Type u
s : Finset ι
w z : ι → ℝ
hw : ∀ (i : ι), i ∈ s → 0 ≤ w i
hw' : ∑ i in s, w i = 1
hz : ∀ (i : ι), i ∈ s → 0 ≤ z i
p : ℝ
hp : 1 ≤ p
⊢ ∑ i in s, w i * z i ≤ (∑ i in s, w i * z i ^ p) ^ (1 / p)
[PROOFSTEP]
have : 0 < p := by positivity
[GOAL]
ι : Type u
s : Finset ι
w z : ι → ℝ
hw : ∀ (i : ι), i ∈ s → 0 ≤ w i
hw' : ∑ i in s, w i = 1
hz : ∀ (i : ι), i ∈ s → 0 ≤ z i
p : ℝ
hp : 1 ≤ p
⊢ 0 < p
[PROOFSTEP]
positivity
[GOAL]
ι : Type u
s : Finset ι
w z : ι → ℝ
hw : ∀ (i : ι), i ∈ s → 0 ≤ w i
hw' : ∑ i in s, w i = 1
hz : ∀ (i : ι), i ∈ s → 0 ≤ z i
p : ℝ
hp : 1 ≤ p
this : 0 < p
⊢ ∑ i in s, w i * z i ≤ (∑ i in s, w i * z i ^ p) ^ (1 / p)
[PROOFSTEP]
rw [← rpow_le_rpow_iff _ _ this, ← rpow_mul, one_div_mul_cancel (ne_of_gt this), rpow_one]
[GOAL]
ι : Type u
s : Finset ι
w z : ι → ℝ
hw : ∀ (i : ι), i ∈ s → 0 ≤ w i
hw' : ∑ i in s, w i = 1
hz : ∀ (i : ι), i ∈ s → 0 ≤ z i
p : ℝ
hp : 1 ≤ p
this : 0 < p
⊢ (∑ i in s, w i * z i) ^ p ≤ ∑ i in s, w i * z i ^ p
case hx
ι : Type u
s : Finset ι
w z : ι → ℝ
hw : ∀ (i : ι), i ∈ s → 0 ≤ w i
hw' : ∑ i in s, w i = 1
hz : ∀ (i : ι), i ∈ s → 0 ≤ z i
p : ℝ
hp : 1 ≤ p
this : 0 < p
⊢ 0 ≤ ∑ i in s, w i * z i ^ p
ι : Type u
s : Finset ι
w z : ι → ℝ
hw : ∀ (i : ι), i ∈ s → 0 ≤ w i
hw' : ∑ i in s, w i = 1
hz : ∀ (i : ι), i ∈ s → 0 ≤ z i
p : ℝ
hp : 1 ≤ p
this : 0 < p
⊢ 0 ≤ ∑ i in s, w i * z i
ι : Type u
s : Finset ι
w z : ι → ℝ
hw : ∀ (i : ι), i ∈ s → 0 ≤ w i
hw' : ∑ i in s, w i = 1
hz : ∀ (i : ι), i ∈ s → 0 ≤ z i
p : ℝ
hp : 1 ≤ p
this : 0 < p
⊢ 0 ≤ (∑ i in s, w i * z i ^ p) ^ (1 / p)
[PROOFSTEP]
exact rpow_arith_mean_le_arith_mean_rpow s w z hw hw' hz hp
[GOAL]
case hx
ι : Type u
s : Finset ι
w z : ι → ℝ
hw : ∀ (i : ι), i ∈ s → 0 ≤ w i
hw' : ∑ i in s, w i = 1
hz : ∀ (i : ι), i ∈ s → 0 ≤ z i
p : ℝ
hp : 1 ≤ p
this : 0 < p
⊢ 0 ≤ ∑ i in s, w i * z i ^ p
ι : Type u
s : Finset ι
w z : ι → ℝ
hw : ∀ (i : ι), i ∈ s → 0 ≤ w i
hw' : ∑ i in s, w i = 1
hz : ∀ (i : ι), i ∈ s → 0 ≤ z i
p : ℝ
hp : 1 ≤ p
this : 0 < p
⊢ 0 ≤ ∑ i in s, w i * z i
ι : Type u
s : Finset ι
w z : ι → ℝ
hw : ∀ (i : ι), i ∈ s → 0 ≤ w i
hw' : ∑ i in s, w i = 1
hz : ∀ (i : ι), i ∈ s → 0 ≤ z i
p : ℝ
hp : 1 ≤ p
this : 0 < p
⊢ 0 ≤ (∑ i in s, w i * z i ^ p) ^ (1 / p)
[PROOFSTEP]
all_goals
apply_rules [sum_nonneg, rpow_nonneg_of_nonneg]
intro i hi
apply_rules [mul_nonneg, rpow_nonneg_of_nonneg, hw i hi, hz i hi]
[GOAL]
case hx
ι : Type u
s : Finset ι
w z : ι → ℝ
hw : ∀ (i : ι), i ∈ s → 0 ≤ w i
hw' : ∑ i in s, w i = 1
hz : ∀ (i : ι), i ∈ s → 0 ≤ z i
p : ℝ
hp : 1 ≤ p
this : 0 < p
⊢ 0 ≤ ∑ i in s, w i * z i ^ p
[PROOFSTEP]
apply_rules [sum_nonneg, rpow_nonneg_of_nonneg]
[GOAL]
case hx
ι : Type u
s : Finset ι
w z : ι → ℝ
hw : ∀ (i : ι), i ∈ s → 0 ≤ w i
hw' : ∑ i in s, w i = 1
hz : ∀ (i : ι), i ∈ s → 0 ≤ z i
p : ℝ
hp : 1 ≤ p
this : 0 < p
hw'_symm : 1 = ∑ i in s, w i
⊢ ∀ (i : ι), i ∈ s → 0 ≤ w i * z i ^ p
[PROOFSTEP]
intro i hi
[GOAL]
case hx
ι : Type u
s : Finset ι
w z : ι → ℝ
hw : ∀ (i : ι), i ∈ s → 0 ≤ w i
hw' : ∑ i in s, w i = 1
hz : ∀ (i : ι), i ∈ s → 0 ≤ z i
p : ℝ
hp : 1 ≤ p
this : 0 < p
hw'_symm : 1 = ∑ i in s, w i
i : ι
hi : i ∈ s
⊢ 0 ≤ w i * z i ^ p
[PROOFSTEP]
apply_rules [mul_nonneg, rpow_nonneg_of_nonneg, hw i hi, hz i hi]
[GOAL]
ι : Type u
s : Finset ι
w z : ι → ℝ
hw : ∀ (i : ι), i ∈ s → 0 ≤ w i
hw' : ∑ i in s, w i = 1
hz : ∀ (i : ι), i ∈ s → 0 ≤ z i
p : ℝ
hp : 1 ≤ p
this : 0 < p
⊢ 0 ≤ ∑ i in s, w i * z i
[PROOFSTEP]
apply_rules [sum_nonneg, rpow_nonneg_of_nonneg]
[GOAL]
ι : Type u
s : Finset ι
w z : ι → ℝ
hw : ∀ (i : ι), i ∈ s → 0 ≤ w i
hw' : ∑ i in s, w i = 1
hz : ∀ (i : ι), i ∈ s → 0 ≤ z i
p : ℝ
hp : 1 ≤ p
this : 0 < p
hw'_symm : 1 = ∑ i in s, w i
⊢ ∀ (i : ι), i ∈ s → 0 ≤ w i * z i
[PROOFSTEP]
intro i hi
[GOAL]
ι : Type u
s : Finset ι
w z : ι → ℝ
hw : ∀ (i : ι), i ∈ s → 0 ≤ w i
hw' : ∑ i in s, w i = 1
hz : ∀ (i : ι), i ∈ s → 0 ≤ z i
p : ℝ
hp : 1 ≤ p
this : 0 < p
hw'_symm : 1 = ∑ i in s, w i
i : ι
hi : i ∈ s
⊢ 0 ≤ w i * z i
[PROOFSTEP]
apply_rules [mul_nonneg, rpow_nonneg_of_nonneg, hw i hi, hz i hi]
[GOAL]
ι : Type u
s : Finset ι
w z : ι → ℝ
hw : ∀ (i : ι), i ∈ s → 0 ≤ w i
hw' : ∑ i in s, w i = 1
hz : ∀ (i : ι), i ∈ s → 0 ≤ z i
p : ℝ
hp : 1 ≤ p
this : 0 < p
⊢ 0 ≤ (∑ i in s, w i * z i ^ p) ^ (1 / p)
[PROOFSTEP]
apply_rules [sum_nonneg, rpow_nonneg_of_nonneg]
[GOAL]
case hx
ι : Type u
s : Finset ι
w z : ι → ℝ
hw : ∀ (i : ι), i ∈ s → 0 ≤ w i
hw' : ∑ i in s, w i = 1
hz : ∀ (i : ι), i ∈ s → 0 ≤ z i
p : ℝ
hp : 1 ≤ p
this : 0 < p
hw'_symm : 1 = ∑ i in s, w i
⊢ ∀ (i : ι), i ∈ s → 0 ≤ w i * z i ^ p
[PROOFSTEP]
intro i hi
[GOAL]
case hx
ι : Type u
s : Finset ι
w z : ι → ℝ
hw : ∀ (i : ι), i ∈ s → 0 ≤ w i
hw' : ∑ i in s, w i = 1
hz : ∀ (i : ι), i ∈ s → 0 ≤ z i
p : ℝ
hp : 1 ≤ p
this : 0 < p
hw'_symm : 1 = ∑ i in s, w i
i : ι
hi : i ∈ s
⊢ 0 ≤ w i * z i ^ p
[PROOFSTEP]
apply_rules [mul_nonneg, rpow_nonneg_of_nonneg, hw i hi, hz i hi]
[GOAL]
ι : Type u
s : Finset ι
w z : ι → ℝ≥0
hw' : ∑ i in s, w i = 1
n : ℕ
⊢ (∑ i in s, w i * z i) ^ n ≤ ∑ i in s, w i * z i ^ n
[PROOFSTEP]
exact_mod_cast
Real.pow_arith_mean_le_arith_mean_pow s _ _ (fun i _ => (w i).coe_nonneg) (by exact_mod_cast hw')
(fun i _ => (z i).coe_nonneg) n
[GOAL]
ι : Type u
s : Finset ι
w z : ι → ℝ≥0
hw' : ∑ i in s, w i = 1
n : ℕ
⊢ ∑ i in s, ↑(w i) = 1
[PROOFSTEP]
exact_mod_cast hw'
[GOAL]
ι : Type u
s : Finset ι
f : ι → ℝ≥0
n : ℕ
⊢ ↑((∑ x in s, f x) ^ (n + 1)) / ↑(card s) ^ n ≤ ↑(∑ x in s, f x ^ (n + 1))
[PROOFSTEP]
simpa only [← NNReal.coe_le_coe, NNReal.coe_sum, Nonneg.coe_div, NNReal.coe_pow] using
@Real.pow_sum_div_card_le_sum_pow ι s (((↑) : ℝ≥0 → ℝ) ∘ f) n fun _ _ => NNReal.coe_nonneg _
[GOAL]
ι : Type u
s : Finset ι
w z : ι → ℝ≥0
hw' : ∑ i in s, w i = 1
p : ℝ
hp : 1 ≤ p
⊢ (∑ i in s, w i * z i) ^ p ≤ ∑ i in s, w i * z i ^ p
[PROOFSTEP]
exact_mod_cast
Real.rpow_arith_mean_le_arith_mean_rpow s _ _ (fun i _ => (w i).coe_nonneg) (by exact_mod_cast hw')
(fun i _ => (z i).coe_nonneg) hp
[GOAL]
ι : Type u
s : Finset ι
w z : ι → ℝ≥0
hw' : ∑ i in s, w i = 1
p : ℝ
hp : 1 ≤ p
⊢ ∑ i in s, ↑(w i) = 1
[PROOFSTEP]
exact_mod_cast hw'
[GOAL]
ι : Type u
s : Finset ι
w₁ w₂ z₁ z₂ : ℝ≥0
hw' : w₁ + w₂ = 1
p : ℝ
hp : 1 ≤ p
⊢ (w₁ * z₁ + w₂ * z₂) ^ p ≤ w₁ * z₁ ^ p + w₂ * z₂ ^ p
[PROOFSTEP]
have h := rpow_arith_mean_le_arith_mean_rpow univ ![w₁, w₂] ![z₁, z₂] ?_ hp
[GOAL]
case refine_2
ι : Type u
s : Finset ι
w₁ w₂ z₁ z₂ : ℝ≥0
hw' : w₁ + w₂ = 1
p : ℝ
hp : 1 ≤ p
h :
(∑ i : Fin (Nat.succ (Nat.succ 0)), Matrix.vecCons w₁ ![w₂] i * Matrix.vecCons z₁ ![z₂] i) ^ p ≤
∑ i : Fin (Nat.succ (Nat.succ 0)), Matrix.vecCons w₁ ![w₂] i * Matrix.vecCons z₁ ![z₂] i ^ p
⊢ (w₁ * z₁ + w₂ * z₂) ^ p ≤ w₁ * z₁ ^ p + w₂ * z₂ ^ p
[PROOFSTEP]
simpa [Fin.sum_univ_succ] using h
[GOAL]
case refine_1
ι : Type u
s : Finset ι
w₁ w₂ z₁ z₂ : ℝ≥0
hw' : w₁ + w₂ = 1
p : ℝ
hp : 1 ≤ p
⊢ ∑ i : Fin (Nat.succ (Nat.succ 0)), Matrix.vecCons w₁ ![w₂] i = 1
[PROOFSTEP]
simp [hw', Fin.sum_univ_succ]
[GOAL]
ι : Type u
s : Finset ι
z₁ z₂ : ℝ≥0
p : ℝ
hp : 1 ≤ p
⊢ (z₁ + z₂) ^ p ≤ 2 ^ (p - 1) * (z₁ ^ p + z₂ ^ p)
[PROOFSTEP]
rcases eq_or_lt_of_le hp with (rfl | h'p)
[GOAL]
case inl
ι : Type u
s : Finset ι
z₁ z₂ : ℝ≥0
hp : 1 ≤ 1
⊢ (z₁ + z₂) ^ 1 ≤ 2 ^ (1 - 1) * (z₁ ^ 1 + z₂ ^ 1)
[PROOFSTEP]
simp only [rpow_one, sub_self, rpow_zero, one_mul]
[GOAL]
case inl
ι : Type u
s : Finset ι
z₁ z₂ : ℝ≥0
hp : 1 ≤ 1
⊢ z₁ + z₂ ≤ z₁ + z₂
[PROOFSTEP]
rfl
[GOAL]
case inr
ι : Type u
s : Finset ι
z₁ z₂ : ℝ≥0
p : ℝ
hp : 1 ≤ p
h'p : 1 < p
⊢ (z₁ + z₂) ^ p ≤ 2 ^ (p - 1) * (z₁ ^ p + z₂ ^ p)
[PROOFSTEP]
convert rpow_arith_mean_le_arith_mean2_rpow (1 / 2) (1 / 2) (2 * z₁) (2 * z₂) (add_halves 1) hp using 1
[GOAL]
case h.e'_3
ι : Type u
s : Finset ι
z₁ z₂ : ℝ≥0
p : ℝ
hp : 1 ≤ p
h'p : 1 < p
⊢ (z₁ + z₂) ^ p = (1 / 2 * (2 * z₁) + 1 / 2 * (2 * z₂)) ^ p
[PROOFSTEP]
simp only [one_div, inv_mul_cancel_left₀, Ne.def, mul_eq_zero, two_ne_zero, one_ne_zero, not_false_iff]
[GOAL]
case h.e'_4
ι : Type u
s : Finset ι
z₁ z₂ : ℝ≥0
p : ℝ
hp : 1 ≤ p
h'p : 1 < p
⊢ 2 ^ (p - 1) * (z₁ ^ p + z₂ ^ p) = 1 / 2 * (2 * z₁) ^ p + 1 / 2 * (2 * z₂) ^ p
[PROOFSTEP]
have A : p - 1 ≠ 0 := ne_of_gt (sub_pos.2 h'p)
[GOAL]
case h.e'_4
ι : Type u
s : Finset ι
z₁ z₂ : ℝ≥0
p : ℝ
hp : 1 ≤ p
h'p : 1 < p
A : p - 1 ≠ 0
⊢ 2 ^ (p - 1) * (z₁ ^ p + z₂ ^ p) = 1 / 2 * (2 * z₁) ^ p + 1 / 2 * (2 * z₂) ^ p
[PROOFSTEP]
simp only [mul_rpow, rpow_sub' _ A, div_eq_inv_mul, rpow_one, mul_one]
[GOAL]
case h.e'_4
ι : Type u
s : Finset ι
z₁ z₂ : ℝ≥0
p : ℝ
hp : 1 ≤ p
h'p : 1 < p
A : p - 1 ≠ 0
⊢ 2⁻¹ * 2 ^ p * (z₁ ^ p + z₂ ^ p) = 2⁻¹ * (2 ^ p * z₁ ^ p) + 2⁻¹ * (2 ^ p * z₂ ^ p)
[PROOFSTEP]
ring
[GOAL]
ι : Type u
s : Finset ι
w z : ι → ℝ≥0
hw' : ∑ i in s, w i = 1
p : ℝ
hp : 1 ≤ p
⊢ ∑ i in s, w i * z i ≤ (∑ i in s, w i * z i ^ p) ^ (1 / p)
[PROOFSTEP]
exact_mod_cast
Real.arith_mean_le_rpow_mean s _ _ (fun i _ => (w i).coe_nonneg) (by exact_mod_cast hw') (fun i _ => (z i).coe_nonneg)
hp
[GOAL]
ι : Type u
s : Finset ι
w z : ι → ℝ≥0
hw' : ∑ i in s, w i = 1
p : ℝ
hp : 1 ≤ p
⊢ ∑ i in s, ↑(w i) = 1
[PROOFSTEP]
exact_mod_cast hw'
[GOAL]
ι : Type u
s : Finset ι
p : ℝ
a b : ℝ≥0
hab : a + b ≤ 1
hp1 : 1 ≤ p
⊢ a ^ p + b ^ p ≤ 1
[PROOFSTEP]
have h_le_one : ∀ x : ℝ≥0, x ≤ 1 → x ^ p ≤ x := fun x hx => rpow_le_self_of_le_one hx hp1
[GOAL]
ι : Type u
s : Finset ι
p : ℝ
a b : ℝ≥0
hab : a + b ≤ 1
hp1 : 1 ≤ p
h_le_one : ∀ (x : ℝ≥0), x ≤ 1 → x ^ p ≤ x
⊢ a ^ p + b ^ p ≤ 1
[PROOFSTEP]
have ha : a ≤ 1 := (self_le_add_right a b).trans hab
[GOAL]
ι : Type u
s : Finset ι
p : ℝ
a b : ℝ≥0
hab : a + b ≤ 1
hp1 : 1 ≤ p
h_le_one : ∀ (x : ℝ≥0), x ≤ 1 → x ^ p ≤ x
ha : a ≤ 1
⊢ a ^ p + b ^ p ≤ 1
[PROOFSTEP]
have hb : b ≤ 1 := (self_le_add_left b a).trans hab
[GOAL]
ι : Type u
s : Finset ι
p : ℝ
a b : ℝ≥0
hab : a + b ≤ 1
hp1 : 1 ≤ p
h_le_one : ∀ (x : ℝ≥0), x ≤ 1 → x ^ p ≤ x
ha : a ≤ 1
hb : b ≤ 1
⊢ a ^ p + b ^ p ≤ 1
[PROOFSTEP]
exact (add_le_add (h_le_one a ha) (h_le_one b hb)).trans hab
[GOAL]
ι : Type u
s : Finset ι
p : ℝ
a b : ℝ≥0
hp1 : 1 ≤ p
⊢ a ^ p + b ^ p ≤ (a + b) ^ p
[PROOFSTEP]
have hp_pos : 0 < p := by positivity
[GOAL]
ι : Type u
s : Finset ι
p : ℝ
a b : ℝ≥0
hp1 : 1 ≤ p
⊢ 0 < p
[PROOFSTEP]
positivity
[GOAL]
ι : Type u
s : Finset ι
p : ℝ
a b : ℝ≥0
hp1 : 1 ≤ p
hp_pos : 0 < p
⊢ a ^ p + b ^ p ≤ (a + b) ^ p
[PROOFSTEP]
by_cases h_zero : a + b = 0
[GOAL]
case pos
ι : Type u
s : Finset ι
p : ℝ
a b : ℝ≥0
hp1 : 1 ≤ p
hp_pos : 0 < p
h_zero : a + b = 0
⊢ a ^ p + b ^ p ≤ (a + b) ^ p
[PROOFSTEP]
simp [add_eq_zero_iff.mp h_zero, hp_pos.ne']
[GOAL]
case neg
ι : Type u
s : Finset ι
p : ℝ
a b : ℝ≥0
hp1 : 1 ≤ p
hp_pos : 0 < p
h_zero : ¬a + b = 0
⊢ a ^ p + b ^ p ≤ (a + b) ^ p
[PROOFSTEP]
have h_nonzero : ¬(a = 0 ∧ b = 0) := by rwa [add_eq_zero_iff] at h_zero
[GOAL]
ι : Type u
s : Finset ι
p : ℝ
a b : ℝ≥0
hp1 : 1 ≤ p
hp_pos : 0 < p
h_zero : ¬a + b = 0
⊢ ¬(a = 0 ∧ b = 0)
[PROOFSTEP]
rwa [add_eq_zero_iff] at h_zero
[GOAL]
case neg
ι : Type u
s : Finset ι
p : ℝ
a b : ℝ≥0
hp1 : 1 ≤ p
hp_pos : 0 < p
h_zero : ¬a + b = 0
h_nonzero : ¬(a = 0 ∧ b = 0)
⊢ a ^ p + b ^ p ≤ (a + b) ^ p
[PROOFSTEP]
have h_add : a / (a + b) + b / (a + b) = 1 := by rw [div_add_div_same, div_self h_zero]
[GOAL]
ι : Type u
s : Finset ι
p : ℝ
a b : ℝ≥0
hp1 : 1 ≤ p
hp_pos : 0 < p
h_zero : ¬a + b = 0
h_nonzero : ¬(a = 0 ∧ b = 0)
⊢ a / (a + b) + b / (a + b) = 1
[PROOFSTEP]
rw [div_add_div_same, div_self h_zero]
[GOAL]
case neg
ι : Type u
s : Finset ι
p : ℝ
a b : ℝ≥0
hp1 : 1 ≤ p
hp_pos : 0 < p
h_zero : ¬a + b = 0
h_nonzero : ¬(a = 0 ∧ b = 0)
h_add : a / (a + b) + b / (a + b) = 1
⊢ a ^ p + b ^ p ≤ (a + b) ^ p
[PROOFSTEP]
have h := add_rpow_le_one_of_add_le_one (a / (a + b)) (b / (a + b)) h_add.le hp1
[GOAL]
case neg
ι : Type u
s : Finset ι
p : ℝ
a b : ℝ≥0
hp1 : 1 ≤ p
hp_pos : 0 < p
h_zero : ¬a + b = 0
h_nonzero : ¬(a = 0 ∧ b = 0)
h_add : a / (a + b) + b / (a + b) = 1
h : (a / (a + b)) ^ p + (b / (a + b)) ^ p ≤ 1
⊢ a ^ p + b ^ p ≤ (a + b) ^ p
[PROOFSTEP]
rw [div_rpow a (a + b), div_rpow b (a + b)] at h
[GOAL]
case neg
ι : Type u
s : Finset ι
p : ℝ
a b : ℝ≥0
hp1 : 1 ≤ p
hp_pos : 0 < p
h_zero : ¬a + b = 0
h_nonzero : ¬(a = 0 ∧ b = 0)
h_add : a / (a + b) + b / (a + b) = 1
h : a ^ p / (a + b) ^ p + b ^ p / (a + b) ^ p ≤ 1
⊢ a ^ p + b ^ p ≤ (a + b) ^ p
[PROOFSTEP]
have hab_0 : (a + b) ^ p ≠ 0 := by simp [hp_pos, h_nonzero]
[GOAL]
ι : Type u
s : Finset ι
p : ℝ
a b : ℝ≥0
hp1 : 1 ≤ p
hp_pos : 0 < p
h_zero : ¬a + b = 0
h_nonzero : ¬(a = 0 ∧ b = 0)
h_add : a / (a + b) + b / (a + b) = 1
h : a ^ p / (a + b) ^ p + b ^ p / (a + b) ^ p ≤ 1
⊢ (a + b) ^ p ≠ 0
[PROOFSTEP]
simp [hp_pos, h_nonzero]
[GOAL]
case neg
ι : Type u
s : Finset ι
p : ℝ
a b : ℝ≥0
hp1 : 1 ≤ p
hp_pos : 0 < p
h_zero : ¬a + b = 0
h_nonzero : ¬(a = 0 ∧ b = 0)
h_add : a / (a + b) + b / (a + b) = 1
h : a ^ p / (a + b) ^ p + b ^ p / (a + b) ^ p ≤ 1
hab_0 : (a + b) ^ p ≠ 0
⊢ a ^ p + b ^ p ≤ (a + b) ^ p
[PROOFSTEP]
have hab_0' : 0 < (a + b) ^ p := zero_lt_iff.mpr hab_0
[GOAL]
case neg
ι : Type u
s : Finset ι
p : ℝ
a b : ℝ≥0
hp1 : 1 ≤ p
hp_pos : 0 < p
h_zero : ¬a + b = 0
h_nonzero : ¬(a = 0 ∧ b = 0)
h_add : a / (a + b) + b / (a + b) = 1
h : a ^ p / (a + b) ^ p + b ^ p / (a + b) ^ p ≤ 1
hab_0 : (a + b) ^ p ≠ 0
hab_0' : 0 < (a + b) ^ p
⊢ a ^ p + b ^ p ≤ (a + b) ^ p
[PROOFSTEP]
have h_mul : (a + b) ^ p * (a ^ p / (a + b) ^ p + b ^ p / (a + b) ^ p) ≤ (a + b) ^ p :=
by
nth_rw 4 [← mul_one ((a + b) ^ p)]
exact (mul_le_mul_left hab_0').mpr h
[GOAL]
ι : Type u
s : Finset ι
p : ℝ
a b : ℝ≥0
hp1 : 1 ≤ p
hp_pos : 0 < p
h_zero : ¬a + b = 0
h_nonzero : ¬(a = 0 ∧ b = 0)
h_add : a / (a + b) + b / (a + b) = 1
h : a ^ p / (a + b) ^ p + b ^ p / (a + b) ^ p ≤ 1
hab_0 : (a + b) ^ p ≠ 0
hab_0' : 0 < (a + b) ^ p
⊢ (a + b) ^ p * (a ^ p / (a + b) ^ p + b ^ p / (a + b) ^ p) ≤ (a + b) ^ p
[PROOFSTEP]
nth_rw 4 [← mul_one ((a + b) ^ p)]
[GOAL]
ι : Type u
s : Finset ι
p : ℝ
a b : ℝ≥0
hp1 : 1 ≤ p
hp_pos : 0 < p
h_zero : ¬a + b = 0
h_nonzero : ¬(a = 0 ∧ b = 0)
h_add : a / (a + b) + b / (a + b) = 1
h : a ^ p / (a + b) ^ p + b ^ p / (a + b) ^ p ≤ 1
hab_0 : (a + b) ^ p ≠ 0
hab_0' : 0 < (a + b) ^ p
⊢ (a + b) ^ p * (a ^ p / (a + b) ^ p + b ^ p / (a + b) ^ p) ≤ (a + b) ^ p * 1
[PROOFSTEP]
exact (mul_le_mul_left hab_0').mpr h
[GOAL]
case neg
ι : Type u
s : Finset ι
p : ℝ
a b : ℝ≥0
hp1 : 1 ≤ p
hp_pos : 0 < p
h_zero : ¬a + b = 0
h_nonzero : ¬(a = 0 ∧ b = 0)
h_add : a / (a + b) + b / (a + b) = 1
h : a ^ p / (a + b) ^ p + b ^ p / (a + b) ^ p ≤ 1
hab_0 : (a + b) ^ p ≠ 0
hab_0' : 0 < (a + b) ^ p
h_mul : (a + b) ^ p * (a ^ p / (a + b) ^ p + b ^ p / (a + b) ^ p) ≤ (a + b) ^ p
⊢ a ^ p + b ^ p ≤ (a + b) ^ p
[PROOFSTEP]
rwa [div_eq_mul_inv, div_eq_mul_inv, mul_add, mul_comm (a ^ p), mul_comm (b ^ p), ← mul_assoc, ← mul_assoc,
mul_inv_cancel hab_0, one_mul, one_mul] at h_mul
[GOAL]
ι : Type u
s : Finset ι
p : ℝ
a b : ℝ≥0
hp1 : 1 ≤ p
⊢ (a ^ p + b ^ p) ^ (1 / p) ≤ a + b
[PROOFSTEP]
rw [← @NNReal.le_rpow_one_div_iff _ _ (1 / p) (by simp [lt_of_lt_of_le zero_lt_one hp1])]
[GOAL]
ι : Type u
s : Finset ι
p : ℝ
a b : ℝ≥0
hp1 : 1 ≤ p
⊢ 0 < 1 / p
[PROOFSTEP]
simp [lt_of_lt_of_le zero_lt_one hp1]
[GOAL]
ι : Type u
s : Finset ι
p : ℝ
a b : ℝ≥0
hp1 : 1 ≤ p
⊢ a ^ p + b ^ p ≤ (a + b) ^ (1 / (1 / p))
[PROOFSTEP]
rw [one_div_one_div]
[GOAL]
ι : Type u
s : Finset ι
p : ℝ
a b : ℝ≥0
hp1 : 1 ≤ p
⊢ a ^ p + b ^ p ≤ (a + b) ^ p
[PROOFSTEP]
exact add_rpow_le_rpow_add _ _ hp1
[GOAL]
ι : Type u
s : Finset ι
p q : ℝ
a b : ℝ≥0
hp_pos : 0 < p
hpq : p ≤ q
⊢ (a ^ q + b ^ q) ^ (1 / q) ≤ (a ^ p + b ^ p) ^ (1 / p)
[PROOFSTEP]
have h_rpow : ∀ a : ℝ≥0, a ^ q = (a ^ p) ^ (q / p) := fun a => by
rw [← NNReal.rpow_mul, div_eq_inv_mul, ← mul_assoc, _root_.mul_inv_cancel hp_pos.ne.symm, one_mul]
[GOAL]
ι : Type u
s : Finset ι
p q : ℝ
a✝ b : ℝ≥0
hp_pos : 0 < p
hpq : p ≤ q
a : ℝ≥0
⊢ a ^ q = (a ^ p) ^ (q / p)
[PROOFSTEP]
rw [← NNReal.rpow_mul, div_eq_inv_mul, ← mul_assoc, _root_.mul_inv_cancel hp_pos.ne.symm, one_mul]
[GOAL]
ι : Type u
s : Finset ι
p q : ℝ
a b : ℝ≥0
hp_pos : 0 < p
hpq : p ≤ q
h_rpow : ∀ (a : ℝ≥0), a ^ q = (a ^ p) ^ (q / p)
⊢ (a ^ q + b ^ q) ^ (1 / q) ≤ (a ^ p + b ^ p) ^ (1 / p)
[PROOFSTEP]
have h_rpow_add_rpow_le_add : ((a ^ p) ^ (q / p) + (b ^ p) ^ (q / p)) ^ (1 / (q / p)) ≤ a ^ p + b ^ p :=
by
refine' rpow_add_rpow_le_add (a ^ p) (b ^ p) _
rwa [one_le_div hp_pos]
[GOAL]
ι : Type u
s : Finset ι
p q : ℝ
a b : ℝ≥0
hp_pos : 0 < p
hpq : p ≤ q
h_rpow : ∀ (a : ℝ≥0), a ^ q = (a ^ p) ^ (q / p)
⊢ ((a ^ p) ^ (q / p) + (b ^ p) ^ (q / p)) ^ (1 / (q / p)) ≤ a ^ p + b ^ p
[PROOFSTEP]
refine' rpow_add_rpow_le_add (a ^ p) (b ^ p) _
[GOAL]
ι : Type u
s : Finset ι
p q : ℝ
a b : ℝ≥0
hp_pos : 0 < p
hpq : p ≤ q
h_rpow : ∀ (a : ℝ≥0), a ^ q = (a ^ p) ^ (q / p)
⊢ 1 ≤ q / p
[PROOFSTEP]
rwa [one_le_div hp_pos]
[GOAL]
ι : Type u
s : Finset ι
p q : ℝ
a b : ℝ≥0
hp_pos : 0 < p
hpq : p ≤ q
h_rpow : ∀ (a : ℝ≥0), a ^ q = (a ^ p) ^ (q / p)
h_rpow_add_rpow_le_add : ((a ^ p) ^ (q / p) + (b ^ p) ^ (q / p)) ^ (1 / (q / p)) ≤ a ^ p + b ^ p
⊢ (a ^ q + b ^ q) ^ (1 / q) ≤ (a ^ p + b ^ p) ^ (1 / p)
[PROOFSTEP]
rw [h_rpow a, h_rpow b, NNReal.le_rpow_one_div_iff hp_pos, ← NNReal.rpow_mul, mul_comm, mul_one_div]
[GOAL]
ι : Type u
s : Finset ι
p q : ℝ
a b : ℝ≥0
hp_pos : 0 < p
hpq : p ≤ q
h_rpow : ∀ (a : ℝ≥0), a ^ q = (a ^ p) ^ (q / p)
h_rpow_add_rpow_le_add : ((a ^ p) ^ (q / p) + (b ^ p) ^ (q / p)) ^ (1 / (q / p)) ≤ a ^ p + b ^ p
⊢ ((a ^ p) ^ (q / p) + (b ^ p) ^ (q / p)) ^ (p / q) ≤ a ^ p + b ^ p
[PROOFSTEP]
rwa [one_div_div] at h_rpow_add_rpow_le_add
[GOAL]
ι : Type u
s : Finset ι
p : ℝ
a b : ℝ≥0
hp : 0 ≤ p
hp1 : p ≤ 1
⊢ (a + b) ^ p ≤ a ^ p + b ^ p
[PROOFSTEP]
rcases hp.eq_or_lt with (rfl | hp_pos)
[GOAL]
case inl
ι : Type u
s : Finset ι
a b : ℝ≥0
hp : 0 ≤ 0
hp1 : 0 ≤ 1
⊢ (a + b) ^ 0 ≤ a ^ 0 + b ^ 0
[PROOFSTEP]
simp
[GOAL]
case inr
ι : Type u
s : Finset ι
p : ℝ
a b : ℝ≥0
hp : 0 ≤ p
hp1 : p ≤ 1
hp_pos : 0 < p
⊢ (a + b) ^ p ≤ a ^ p + b ^ p
[PROOFSTEP]
have h := rpow_add_rpow_le a b hp_pos hp1
[GOAL]
case inr
ι : Type u
s : Finset ι
p : ℝ
a b : ℝ≥0
hp : 0 ≤ p
hp1 : p ≤ 1
hp_pos : 0 < p
h : (a ^ 1 + b ^ 1) ^ (1 / 1) ≤ (a ^ p + b ^ p) ^ (1 / p)
⊢ (a + b) ^ p ≤ a ^ p + b ^ p
[PROOFSTEP]
rw [one_div_one] at h
[GOAL]
case inr
ι : Type u
s : Finset ι
p : ℝ
a b : ℝ≥0
hp : 0 ≤ p
hp1 : p ≤ 1
hp_pos : 0 < p
h : (a ^ 1 + b ^ 1) ^ 1 ≤ (a ^ p + b ^ p) ^ (1 / p)
⊢ (a + b) ^ p ≤ a ^ p + b ^ p
[PROOFSTEP]
repeat' rw [NNReal.rpow_one] at h
[GOAL]
case inr
ι : Type u
s : Finset ι
p : ℝ
a b : ℝ≥0
hp : 0 ≤ p
hp1 : p ≤ 1
hp_pos : 0 < p
h : (a ^ 1 + b ^ 1) ^ 1 ≤ (a ^ p + b ^ p) ^ (1 / p)
⊢ (a + b) ^ p ≤ a ^ p + b ^ p
[PROOFSTEP]
rw [NNReal.rpow_one] at h
[GOAL]
case inr
ι : Type u
s : Finset ι
p : ℝ
a b : ℝ≥0
hp : 0 ≤ p
hp1 : p ≤ 1
hp_pos : 0 < p
h : a ^ 1 + b ^ 1 ≤ (a ^ p + b ^ p) ^ (1 / p)
⊢ (a + b) ^ p ≤ a ^ p + b ^ p
[PROOFSTEP]
rw [NNReal.rpow_one] at h
[GOAL]
case inr
ι : Type u
s : Finset ι
p : ℝ
a b : ℝ≥0
hp : 0 ≤ p
hp1 : p ≤ 1
hp_pos : 0 < p
h : a + b ^ 1 ≤ (a ^ p + b ^ p) ^ (1 / p)
⊢ (a + b) ^ p ≤ a ^ p + b ^ p
[PROOFSTEP]
rw [NNReal.rpow_one] at h
[GOAL]
case inr
ι : Type u
s : Finset ι
p : ℝ
a b : ℝ≥0
hp : 0 ≤ p
hp1 : p ≤ 1
hp_pos : 0 < p
h : a + b ≤ (a ^ p + b ^ p) ^ (1 / p)
⊢ (a + b) ^ p ≤ a ^ p + b ^ p
[PROOFSTEP]
rw [NNReal.rpow_one] at h
[GOAL]
case inr
ι : Type u
s : Finset ι
p : ℝ
a b : ℝ≥0
hp : 0 ≤ p
hp1 : p ≤ 1
hp_pos : 0 < p
h : a + b ≤ (a ^ p + b ^ p) ^ (1 / p)
⊢ (a + b) ^ p ≤ a ^ p + b ^ p
[PROOFSTEP]
exact (NNReal.le_rpow_one_div_iff hp_pos).mp h
[GOAL]
ι : Type u
s : Finset ι
w z : ι → ℝ≥0∞
hw' : ∑ i in s, w i = 1
p : ℝ
hp : 1 ≤ p
⊢ (∑ i in s, w i * z i) ^ p ≤ ∑ i in s, w i * z i ^ p
[PROOFSTEP]
have hp_pos : 0 < p
[GOAL]
case hp_pos
ι : Type u
s : Finset ι
w z : ι → ℝ≥0∞
hw' : ∑ i in s, w i = 1
p : ℝ
hp : 1 ≤ p
⊢ 0 < p
ι : Type u
s : Finset ι
w z : ι → ℝ≥0∞
hw' : ∑ i in s, w i = 1
p : ℝ
hp : 1 ≤ p
hp_pos : 0 < p
⊢ (∑ i in s, w i * z i) ^ p ≤ ∑ i in s, w i * z i ^ p
[PROOFSTEP]
positivity
[GOAL]
ι : Type u
s : Finset ι
w z : ι → ℝ≥0∞
hw' : ∑ i in s, w i = 1
p : ℝ
hp : 1 ≤ p
hp_pos : 0 < p
⊢ (∑ i in s, w i * z i) ^ p ≤ ∑ i in s, w i * z i ^ p
[PROOFSTEP]
have hp_nonneg : 0 ≤ p
[GOAL]
case hp_nonneg
ι : Type u
s : Finset ι
w z : ι → ℝ≥0∞
hw' : ∑ i in s, w i = 1
p : ℝ
hp : 1 ≤ p
hp_pos : 0 < p
⊢ 0 ≤ p
ι : Type u
s : Finset ι
w z : ι → ℝ≥0∞
hw' : ∑ i in s, w i = 1
p : ℝ
hp : 1 ≤ p
hp_pos : 0 < p
hp_nonneg : 0 ≤ p
⊢ (∑ i in s, w i * z i) ^ p ≤ ∑ i in s, w i * z i ^ p
[PROOFSTEP]
positivity
[GOAL]
ι : Type u
s : Finset ι
w z : ι → ℝ≥0∞
hw' : ∑ i in s, w i = 1
p : ℝ
hp : 1 ≤ p
hp_pos : 0 < p
hp_nonneg : 0 ≤ p
⊢ (∑ i in s, w i * z i) ^ p ≤ ∑ i in s, w i * z i ^ p
[PROOFSTEP]
have hp_not_neg : ¬p < 0 := by simp [hp_nonneg]
[GOAL]
ι : Type u
s : Finset ι
w z : ι → ℝ≥0∞
hw' : ∑ i in s, w i = 1
p : ℝ
hp : 1 ≤ p
hp_pos : 0 < p
hp_nonneg : 0 ≤ p
⊢ ¬p < 0
[PROOFSTEP]
simp [hp_nonneg]
[GOAL]
ι : Type u
s : Finset ι
w z : ι → ℝ≥0∞
hw' : ∑ i in s, w i = 1
p : ℝ
hp : 1 ≤ p
hp_pos : 0 < p
hp_nonneg : 0 ≤ p
hp_not_neg : ¬p < 0
⊢ (∑ i in s, w i * z i) ^ p ≤ ∑ i in s, w i * z i ^ p
[PROOFSTEP]
have h_top_iff_rpow_top : ∀ (i : ι), i ∈ s → (w i * z i = ⊤ ↔ w i * z i ^ p = ⊤) := by
simp [ENNReal.mul_eq_top, hp_pos, hp_nonneg, hp_not_neg]
[GOAL]
ι : Type u
s : Finset ι
w z : ι → ℝ≥0∞
hw' : ∑ i in s, w i = 1
p : ℝ
hp : 1 ≤ p
hp_pos : 0 < p
hp_nonneg : 0 ≤ p
hp_not_neg : ¬p < 0
⊢ ∀ (i : ι), i ∈ s → (w i * z i = ⊤ ↔ w i * z i ^ p = ⊤)
[PROOFSTEP]
simp [ENNReal.mul_eq_top, hp_pos, hp_nonneg, hp_not_neg]
[GOAL]
ι : Type u
s : Finset ι
w z : ι → ℝ≥0∞
hw' : ∑ i in s, w i = 1
p : ℝ
hp : 1 ≤ p
hp_pos : 0 < p
hp_nonneg : 0 ≤ p
hp_not_neg : ¬p < 0
h_top_iff_rpow_top : ∀ (i : ι), i ∈ s → (w i * z i = ⊤ ↔ w i * z i ^ p = ⊤)
⊢ (∑ i in s, w i * z i) ^ p ≤ ∑ i in s, w i * z i ^ p
[PROOFSTEP]
refine' le_of_top_imp_top_of_toNNReal_le _ _
[GOAL]
case refine'_1
ι : Type u
s : Finset ι
w z : ι → ℝ≥0∞
hw' : ∑ i in s, w i = 1
p : ℝ
hp : 1 ≤ p
hp_pos : 0 < p
hp_nonneg : 0 ≤ p
hp_not_neg : ¬p < 0
h_top_iff_rpow_top : ∀ (i : ι), i ∈ s → (w i * z i = ⊤ ↔ w i * z i ^ p = ⊤)
⊢ (∑ i in s, w i * z i) ^ p = ⊤ → ∑ i in s, w i * z i ^ p = ⊤
[PROOFSTEP]
rw [rpow_eq_top_iff, sum_eq_top_iff, sum_eq_top_iff]
[GOAL]
case refine'_1
ι : Type u
s : Finset ι
w z : ι → ℝ≥0∞
hw' : ∑ i in s, w i = 1
p : ℝ
hp : 1 ≤ p
hp_pos : 0 < p
hp_nonneg : 0 ≤ p
hp_not_neg : ¬p < 0
h_top_iff_rpow_top : ∀ (i : ι), i ∈ s → (w i * z i = ⊤ ↔ w i * z i ^ p = ⊤)
⊢ ∑ i in s, w i * z i = 0 ∧ p < 0 ∨ (∃ a, a ∈ s ∧ w a * z a = ⊤) ∧ 0 < p → ∃ a, a ∈ s ∧ w a * z a ^ p = ⊤
[PROOFSTEP]
intro h
[GOAL]
case refine'_1
ι : Type u
s : Finset ι
w z : ι → ℝ≥0∞
hw' : ∑ i in s, w i = 1
p : ℝ
hp : 1 ≤ p
hp_pos : 0 < p
hp_nonneg : 0 ≤ p
hp_not_neg : ¬p < 0
h_top_iff_rpow_top : ∀ (i : ι), i ∈ s → (w i * z i = ⊤ ↔ w i * z i ^ p = ⊤)
h : ∑ i in s, w i * z i = 0 ∧ p < 0 ∨ (∃ a, a ∈ s ∧ w a * z a = ⊤) ∧ 0 < p
⊢ ∃ a, a ∈ s ∧ w a * z a ^ p = ⊤
[PROOFSTEP]
simp only [and_false_iff, hp_not_neg, false_or_iff] at h
[GOAL]
case refine'_1
ι : Type u
s : Finset ι
w z : ι → ℝ≥0∞
hw' : ∑ i in s, w i = 1
p : ℝ
hp : 1 ≤ p
hp_pos : 0 < p
hp_nonneg : 0 ≤ p
hp_not_neg : ¬p < 0
h_top_iff_rpow_top : ∀ (i : ι), i ∈ s → (w i * z i = ⊤ ↔ w i * z i ^ p = ⊤)
h : (∃ a, a ∈ s ∧ w a * z a = ⊤) ∧ 0 < p
⊢ ∃ a, a ∈ s ∧ w a * z a ^ p = ⊤
[PROOFSTEP]
rcases h.left with ⟨a, H, ha⟩
[GOAL]
case refine'_1.intro.intro
ι : Type u
s : Finset ι
w z : ι → ℝ≥0∞
hw' : ∑ i in s, w i = 1
p : ℝ
hp : 1 ≤ p
hp_pos : 0 < p
hp_nonneg : 0 ≤ p
hp_not_neg : ¬p < 0
h_top_iff_rpow_top : ∀ (i : ι), i ∈ s → (w i * z i = ⊤ ↔ w i * z i ^ p = ⊤)
h : (∃ a, a ∈ s ∧ w a * z a = ⊤) ∧ 0 < p
a : ι
H : a ∈ s
ha : w a * z a = ⊤
⊢ ∃ a, a ∈ s ∧ w a * z a ^ p = ⊤
[PROOFSTEP]
use a, H
[GOAL]
case right
ι : Type u
s : Finset ι
w z : ι → ℝ≥0∞
hw' : ∑ i in s, w i = 1
p : ℝ
hp : 1 ≤ p
hp_pos : 0 < p
hp_nonneg : 0 ≤ p
hp_not_neg : ¬p < 0
h_top_iff_rpow_top : ∀ (i : ι), i ∈ s → (w i * z i = ⊤ ↔ w i * z i ^ p = ⊤)
h : (∃ a, a ∈ s ∧ w a * z a = ⊤) ∧ 0 < p
a : ι
H : a ∈ s
ha : w a * z a = ⊤
⊢ w a * z a ^ p = ⊤
[PROOFSTEP]
rwa [← h_top_iff_rpow_top a H]
[GOAL]
case refine'_2
ι : Type u
s : Finset ι
w z : ι → ℝ≥0∞
hw' : ∑ i in s, w i = 1
p : ℝ
hp : 1 ≤ p
hp_pos : 0 < p
hp_nonneg : 0 ≤ p
hp_not_neg : ¬p < 0
h_top_iff_rpow_top : ∀ (i : ι), i ∈ s → (w i * z i = ⊤ ↔ w i * z i ^ p = ⊤)
⊢ (∑ i in s, w i * z i) ^ p ≠ ⊤ →
∑ i in s, w i * z i ^ p ≠ ⊤ →
ENNReal.toNNReal ((∑ i in s, w i * z i) ^ p) ≤ ENNReal.toNNReal (∑ i in s, w i * z i ^ p)
[PROOFSTEP]
intro h_top_rpow_sum
_
-- show hypotheses needed to put the `.toNNReal` inside the sums.
[GOAL]
case refine'_2
ι : Type u
s : Finset ι
w z : ι → ℝ≥0∞
hw' : ∑ i in s, w i = 1
p : ℝ
hp : 1 ≤ p
hp_pos : 0 < p
hp_nonneg : 0 ≤ p
hp_not_neg : ¬p < 0
h_top_iff_rpow_top : ∀ (i : ι), i ∈ s → (w i * z i = ⊤ ↔ w i * z i ^ p = ⊤)
h_top_rpow_sum : (∑ i in s, w i * z i) ^ p ≠ ⊤
a✝ : ∑ i in s, w i * z i ^ p ≠ ⊤
⊢ ENNReal.toNNReal ((∑ i in s, w i * z i) ^ p) ≤ ENNReal.toNNReal (∑ i in s, w i * z i ^ p)
[PROOFSTEP]
have h_top : ∀ a : ι, a ∈ s → w a * z a ≠ ⊤ :=
haveI h_top_sum : ∑ i : ι in s, w i * z i ≠ ⊤ := by
intro h
rw [h, top_rpow_of_pos hp_pos] at h_top_rpow_sum
exact h_top_rpow_sum rfl
fun a ha => (lt_top_of_sum_ne_top h_top_sum ha).ne
[GOAL]
ι : Type u
s : Finset ι
w z : ι → ℝ≥0∞
hw' : ∑ i in s, w i = 1
p : ℝ
hp : 1 ≤ p
hp_pos : 0 < p
hp_nonneg : 0 ≤ p
hp_not_neg : ¬p < 0
h_top_iff_rpow_top : ∀ (i : ι), i ∈ s → (w i * z i = ⊤ ↔ w i * z i ^ p = ⊤)
h_top_rpow_sum : (∑ i in s, w i * z i) ^ p ≠ ⊤
a✝ : ∑ i in s, w i * z i ^ p ≠ ⊤
⊢ ∑ i in s, w i * z i ≠ ⊤
[PROOFSTEP]
intro h
[GOAL]
ι : Type u
s : Finset ι
w z : ι → ℝ≥0∞
hw' : ∑ i in s, w i = 1
p : ℝ
hp : 1 ≤ p
hp_pos : 0 < p
hp_nonneg : 0 ≤ p
hp_not_neg : ¬p < 0
h_top_iff_rpow_top : ∀ (i : ι), i ∈ s → (w i * z i = ⊤ ↔ w i * z i ^ p = ⊤)
h_top_rpow_sum : (∑ i in s, w i * z i) ^ p ≠ ⊤
a✝ : ∑ i in s, w i * z i ^ p ≠ ⊤
h : ∑ i in s, w i * z i = ⊤
⊢ False
[PROOFSTEP]
rw [h, top_rpow_of_pos hp_pos] at h_top_rpow_sum
[GOAL]
ι : Type u
s : Finset ι
w z : ι → ℝ≥0∞
hw' : ∑ i in s, w i = 1
p : ℝ
hp : 1 ≤ p
hp_pos : 0 < p
hp_nonneg : 0 ≤ p
hp_not_neg : ¬p < 0
h_top_iff_rpow_top : ∀ (i : ι), i ∈ s → (w i * z i = ⊤ ↔ w i * z i ^ p = ⊤)
h_top_rpow_sum : ⊤ ≠ ⊤
a✝ : ∑ i in s, w i * z i ^ p ≠ ⊤
h : ∑ i in s, w i * z i = ⊤
⊢ False
[PROOFSTEP]
exact h_top_rpow_sum rfl
[GOAL]
case refine'_2
ι : Type u
s : Finset ι
w z : ι → ℝ≥0∞
hw' : ∑ i in s, w i = 1
p : ℝ
hp : 1 ≤ p
hp_pos : 0 < p
hp_nonneg : 0 ≤ p
hp_not_neg : ¬p < 0
h_top_iff_rpow_top : ∀ (i : ι), i ∈ s → (w i * z i = ⊤ ↔ w i * z i ^ p = ⊤)
h_top_rpow_sum : (∑ i in s, w i * z i) ^ p ≠ ⊤
a✝ : ∑ i in s, w i * z i ^ p ≠ ⊤
h_top : ∀ (a : ι), a ∈ s → w a * z a ≠ ⊤
⊢ ENNReal.toNNReal ((∑ i in s, w i * z i) ^ p) ≤ ENNReal.toNNReal (∑ i in s, w i * z i ^ p)
[PROOFSTEP]
have h_top_rpow : ∀ a : ι, a ∈ s → w a * z a ^ p ≠ ⊤ :=
by
intro i hi
specialize h_top i hi
rwa [Ne.def, ← h_top_iff_rpow_top i hi]
-- put the `.toNNReal` inside the sums.
[GOAL]
ι : Type u
s : Finset ι
w z : ι → ℝ≥0∞
hw' : ∑ i in s, w i = 1
p : ℝ
hp : 1 ≤ p
hp_pos : 0 < p
hp_nonneg : 0 ≤ p
hp_not_neg : ¬p < 0
h_top_iff_rpow_top : ∀ (i : ι), i ∈ s → (w i * z i = ⊤ ↔ w i * z i ^ p = ⊤)
h_top_rpow_sum : (∑ i in s, w i * z i) ^ p ≠ ⊤
a✝ : ∑ i in s, w i * z i ^ p ≠ ⊤
h_top : ∀ (a : ι), a ∈ s → w a * z a ≠ ⊤
⊢ ∀ (a : ι), a ∈ s → w a * z a ^ p ≠ ⊤
[PROOFSTEP]
intro i hi
[GOAL]
ι : Type u
s : Finset ι
w z : ι → ℝ≥0∞
hw' : ∑ i in s, w i = 1
p : ℝ
hp : 1 ≤ p
hp_pos : 0 < p
hp_nonneg : 0 ≤ p
hp_not_neg : ¬p < 0
h_top_iff_rpow_top : ∀ (i : ι), i ∈ s → (w i * z i = ⊤ ↔ w i * z i ^ p = ⊤)
h_top_rpow_sum : (∑ i in s, w i * z i) ^ p ≠ ⊤
a✝ : ∑ i in s, w i * z i ^ p ≠ ⊤
h_top : ∀ (a : ι), a ∈ s → w a * z a ≠ ⊤
i : ι
hi : i ∈ s
⊢ w i * z i ^ p ≠ ⊤
[PROOFSTEP]
specialize h_top i hi
[GOAL]
ι : Type u
s : Finset ι
w z : ι → ℝ≥0∞
hw' : ∑ i in s, w i = 1
p : ℝ
hp : 1 ≤ p
hp_pos : 0 < p
hp_nonneg : 0 ≤ p
hp_not_neg : ¬p < 0
h_top_iff_rpow_top : ∀ (i : ι), i ∈ s → (w i * z i = ⊤ ↔ w i * z i ^ p = ⊤)
h_top_rpow_sum : (∑ i in s, w i * z i) ^ p ≠ ⊤
a✝ : ∑ i in s, w i * z i ^ p ≠ ⊤
i : ι
hi : i ∈ s
h_top : w i * z i ≠ ⊤
⊢ w i * z i ^ p ≠ ⊤
[PROOFSTEP]
rwa [Ne.def, ← h_top_iff_rpow_top i hi]
-- put the `.toNNReal` inside the sums.
[GOAL]
case refine'_2
ι : Type u
s : Finset ι
w z : ι → ℝ≥0∞
hw' : ∑ i in s, w i = 1
p : ℝ
hp : 1 ≤ p
hp_pos : 0 < p
hp_nonneg : 0 ≤ p
hp_not_neg : ¬p < 0
h_top_iff_rpow_top : ∀ (i : ι), i ∈ s → (w i * z i = ⊤ ↔ w i * z i ^ p = ⊤)
h_top_rpow_sum : (∑ i in s, w i * z i) ^ p ≠ ⊤
a✝ : ∑ i in s, w i * z i ^ p ≠ ⊤
h_top : ∀ (a : ι), a ∈ s → w a * z a ≠ ⊤
h_top_rpow : ∀ (a : ι), a ∈ s → w a * z a ^ p ≠ ⊤
⊢ ENNReal.toNNReal ((∑ i in s, w i * z i) ^ p) ≤ ENNReal.toNNReal (∑ i in s, w i * z i ^ p)
[PROOFSTEP]
simp_rw [toNNReal_sum h_top_rpow, ← toNNReal_rpow, toNNReal_sum h_top, toNNReal_mul, ← toNNReal_rpow]
-- use corresponding nnreal result
[GOAL]
case refine'_2
ι : Type u
s : Finset ι
w z : ι → ℝ≥0∞
hw' : ∑ i in s, w i = 1
p : ℝ
hp : 1 ≤ p
hp_pos : 0 < p
hp_nonneg : 0 ≤ p
hp_not_neg : ¬p < 0
h_top_iff_rpow_top : ∀ (i : ι), i ∈ s → (w i * z i = ⊤ ↔ w i * z i ^ p = ⊤)
h_top_rpow_sum : (∑ i in s, w i * z i) ^ p ≠ ⊤
a✝ : ∑ i in s, w i * z i ^ p ≠ ⊤
h_top : ∀ (a : ι), a ∈ s → w a * z a ≠ ⊤
h_top_rpow : ∀ (a : ι), a ∈ s → w a * z a ^ p ≠ ⊤
⊢ (∑ x in s, ENNReal.toNNReal (w x) * ENNReal.toNNReal (z x)) ^ p ≤
∑ x in s, ENNReal.toNNReal (w x) * ENNReal.toNNReal (z x) ^ p
[PROOFSTEP]
refine' NNReal.rpow_arith_mean_le_arith_mean_rpow s (fun i => (w i).toNNReal) (fun i => (z i).toNNReal) _ hp
[GOAL]
case refine'_2
ι : Type u
s : Finset ι
w z : ι → ℝ≥0∞
hw' : ∑ i in s, w i = 1
p : ℝ
hp : 1 ≤ p
hp_pos : 0 < p
hp_nonneg : 0 ≤ p
hp_not_neg : ¬p < 0
h_top_iff_rpow_top : ∀ (i : ι), i ∈ s → (w i * z i = ⊤ ↔ w i * z i ^ p = ⊤)
h_top_rpow_sum : (∑ i in s, w i * z i) ^ p ≠ ⊤
a✝ : ∑ i in s, w i * z i ^ p ≠ ⊤
h_top : ∀ (a : ι), a ∈ s → w a * z a ≠ ⊤
h_top_rpow : ∀ (a : ι), a ∈ s → w a * z a ^ p ≠ ⊤
⊢ ∑ i in s, (fun i => ENNReal.toNNReal (w i)) i = 1
[PROOFSTEP]
have h_sum_nnreal : ∑ i in s, w i = ↑(∑ i in s, (w i).toNNReal) :=
by
rw [coe_finset_sum]
refine' sum_congr rfl fun i hi => (coe_toNNReal _).symm
refine' (lt_top_of_sum_ne_top _ hi).ne
exact hw'.symm ▸ ENNReal.one_ne_top
[GOAL]
ι : Type u
s : Finset ι
w z : ι → ℝ≥0∞
hw' : ∑ i in s, w i = 1
p : ℝ
hp : 1 ≤ p
hp_pos : 0 < p
hp_nonneg : 0 ≤ p
hp_not_neg : ¬p < 0
h_top_iff_rpow_top : ∀ (i : ι), i ∈ s → (w i * z i = ⊤ ↔ w i * z i ^ p = ⊤)
h_top_rpow_sum : (∑ i in s, w i * z i) ^ p ≠ ⊤
a✝ : ∑ i in s, w i * z i ^ p ≠ ⊤
h_top : ∀ (a : ι), a ∈ s → w a * z a ≠ ⊤
h_top_rpow : ∀ (a : ι), a ∈ s → w a * z a ^ p ≠ ⊤
⊢ ∑ i in s, w i = ↑(∑ i in s, ENNReal.toNNReal (w i))
[PROOFSTEP]
rw [coe_finset_sum]
[GOAL]
ι : Type u
s : Finset ι
w z : ι → ℝ≥0∞
hw' : ∑ i in s, w i = 1
p : ℝ
hp : 1 ≤ p
hp_pos : 0 < p
hp_nonneg : 0 ≤ p
hp_not_neg : ¬p < 0
h_top_iff_rpow_top : ∀ (i : ι), i ∈ s → (w i * z i = ⊤ ↔ w i * z i ^ p = ⊤)
h_top_rpow_sum : (∑ i in s, w i * z i) ^ p ≠ ⊤
a✝ : ∑ i in s, w i * z i ^ p ≠ ⊤
h_top : ∀ (a : ι), a ∈ s → w a * z a ≠ ⊤
h_top_rpow : ∀ (a : ι), a ∈ s → w a * z a ^ p ≠ ⊤
⊢ ∑ i in s, w i = ∑ a in s, ↑(ENNReal.toNNReal (w a))
[PROOFSTEP]
refine' sum_congr rfl fun i hi => (coe_toNNReal _).symm
[GOAL]
ι : Type u
s : Finset ι
w z : ι → ℝ≥0∞
hw' : ∑ i in s, w i = 1
p : ℝ
hp : 1 ≤ p
hp_pos : 0 < p
hp_nonneg : 0 ≤ p
hp_not_neg : ¬p < 0
h_top_iff_rpow_top : ∀ (i : ι), i ∈ s → (w i * z i = ⊤ ↔ w i * z i ^ p = ⊤)
h_top_rpow_sum : (∑ i in s, w i * z i) ^ p ≠ ⊤
a✝ : ∑ i in s, w i * z i ^ p ≠ ⊤
h_top : ∀ (a : ι), a ∈ s → w a * z a ≠ ⊤
h_top_rpow : ∀ (a : ι), a ∈ s → w a * z a ^ p ≠ ⊤
i : ι
hi : i ∈ s
⊢ w i ≠ ⊤
[PROOFSTEP]
refine' (lt_top_of_sum_ne_top _ hi).ne
[GOAL]
ι : Type u
s : Finset ι
w z : ι → ℝ≥0∞
hw' : ∑ i in s, w i = 1
p : ℝ
hp : 1 ≤ p
hp_pos : 0 < p
hp_nonneg : 0 ≤ p
hp_not_neg : ¬p < 0
h_top_iff_rpow_top : ∀ (i : ι), i ∈ s → (w i * z i = ⊤ ↔ w i * z i ^ p = ⊤)
h_top_rpow_sum : (∑ i in s, w i * z i) ^ p ≠ ⊤
a✝ : ∑ i in s, w i * z i ^ p ≠ ⊤
h_top : ∀ (a : ι), a ∈ s → w a * z a ≠ ⊤
h_top_rpow : ∀ (a : ι), a ∈ s → w a * z a ^ p ≠ ⊤
i : ι
hi : i ∈ s
⊢ ∑ x in s, w x ≠ ⊤
[PROOFSTEP]
exact hw'.symm ▸ ENNReal.one_ne_top
[GOAL]
case refine'_2
ι : Type u
s : Finset ι
w z : ι → ℝ≥0∞
hw' : ∑ i in s, w i = 1
p : ℝ
hp : 1 ≤ p
hp_pos : 0 < p
hp_nonneg : 0 ≤ p
hp_not_neg : ¬p < 0
h_top_iff_rpow_top : ∀ (i : ι), i ∈ s → (w i * z i = ⊤ ↔ w i * z i ^ p = ⊤)
h_top_rpow_sum : (∑ i in s, w i * z i) ^ p ≠ ⊤
a✝ : ∑ i in s, w i * z i ^ p ≠ ⊤
h_top : ∀ (a : ι), a ∈ s → w a * z a ≠ ⊤
h_top_rpow : ∀ (a : ι), a ∈ s → w a * z a ^ p ≠ ⊤
h_sum_nnreal : ∑ i in s, w i = ↑(∑ i in s, ENNReal.toNNReal (w i))
⊢ ∑ i in s, (fun i => ENNReal.toNNReal (w i)) i = 1
[PROOFSTEP]
rwa [← coe_eq_coe, ← h_sum_nnreal]
[GOAL]
ι : Type u
s : Finset ι
w₁ w₂ z₁ z₂ : ℝ≥0∞
hw' : w₁ + w₂ = 1
p : ℝ
hp : 1 ≤ p
⊢ (w₁ * z₁ + w₂ * z₂) ^ p ≤ w₁ * z₁ ^ p + w₂ * z₂ ^ p
[PROOFSTEP]
have h := rpow_arith_mean_le_arith_mean_rpow univ ![w₁, w₂] ![z₁, z₂] ?_ hp
[GOAL]
case refine_2
ι : Type u
s : Finset ι
w₁ w₂ z₁ z₂ : ℝ≥0∞
hw' : w₁ + w₂ = 1
p : ℝ
hp : 1 ≤ p
h :
(∑ i : Fin (Nat.succ (Nat.succ 0)), Matrix.vecCons w₁ ![w₂] i * Matrix.vecCons z₁ ![z₂] i) ^ p ≤
∑ i : Fin (Nat.succ (Nat.succ 0)), Matrix.vecCons w₁ ![w₂] i * Matrix.vecCons z₁ ![z₂] i ^ p
⊢ (w₁ * z₁ + w₂ * z₂) ^ p ≤ w₁ * z₁ ^ p + w₂ * z₂ ^ p
[PROOFSTEP]
simpa [Fin.sum_univ_succ] using h
[GOAL]
case refine_1
ι : Type u
s : Finset ι
w₁ w₂ z₁ z₂ : ℝ≥0∞
hw' : w₁ + w₂ = 1
p : ℝ
hp : 1 ≤ p
⊢ ∑ i : Fin (Nat.succ (Nat.succ 0)), Matrix.vecCons w₁ ![w₂] i = 1
[PROOFSTEP]
simp [hw', Fin.sum_univ_succ]
[GOAL]
ι : Type u
s : Finset ι
z₁ z₂ : ℝ≥0∞
p : ℝ
hp : 1 ≤ p
⊢ (z₁ + z₂) ^ p ≤ 2 ^ (p - 1) * (z₁ ^ p + z₂ ^ p)
[PROOFSTEP]
rcases eq_or_lt_of_le hp with (rfl | h'p)
[GOAL]
case inl
ι : Type u
s : Finset ι
z₁ z₂ : ℝ≥0∞
hp : 1 ≤ 1
⊢ (z₁ + z₂) ^ 1 ≤ 2 ^ (1 - 1) * (z₁ ^ 1 + z₂ ^ 1)
[PROOFSTEP]
simp only [rpow_one, sub_self, rpow_zero, one_mul, le_refl]
[GOAL]
case inr
ι : Type u
s : Finset ι
z₁ z₂ : ℝ≥0∞
p : ℝ
hp : 1 ≤ p
h'p : 1 < p
⊢ (z₁ + z₂) ^ p ≤ 2 ^ (p - 1) * (z₁ ^ p + z₂ ^ p)
[PROOFSTEP]
convert rpow_arith_mean_le_arith_mean2_rpow (1 / 2) (1 / 2) (2 * z₁) (2 * z₂) (ENNReal.add_halves 1) hp using 1
[GOAL]
case h.e'_3
ι : Type u
s : Finset ι
z₁ z₂ : ℝ≥0∞
p : ℝ
hp : 1 ≤ p
h'p : 1 < p
⊢ (z₁ + z₂) ^ p = (1 / 2 * (2 * z₁) + 1 / 2 * (2 * z₂)) ^ p
[PROOFSTEP]
simp [← mul_assoc, ENNReal.inv_mul_cancel two_ne_zero two_ne_top]
[GOAL]
case h.e'_4
ι : Type u
s : Finset ι
z₁ z₂ : ℝ≥0∞
p : ℝ
hp : 1 ≤ p
h'p : 1 < p
⊢ 2 ^ (p - 1) * (z₁ ^ p + z₂ ^ p) = 1 / 2 * (2 * z₁) ^ p + 1 / 2 * (2 * z₂) ^ p
[PROOFSTEP]
have _ : p - 1 ≠ 0 := ne_of_gt (sub_pos.2 h'p)
[GOAL]
case h.e'_4
ι : Type u
s : Finset ι
z₁ z₂ : ℝ≥0∞
p : ℝ
hp : 1 ≤ p
h'p : 1 < p
x✝ : p - 1 ≠ 0
⊢ 2 ^ (p - 1) * (z₁ ^ p + z₂ ^ p) = 1 / 2 * (2 * z₁) ^ p + 1 / 2 * (2 * z₂) ^ p
[PROOFSTEP]
simp only [mul_rpow_of_nonneg _ _ (zero_le_one.trans hp), rpow_sub _ _ two_ne_zero two_ne_top, ENNReal.div_eq_inv_mul,
rpow_one, mul_one]
[GOAL]
case h.e'_4
ι : Type u
s : Finset ι
z₁ z₂ : ℝ≥0∞
p : ℝ
hp : 1 ≤ p
h'p : 1 < p
x✝ : p - 1 ≠ 0
⊢ 2⁻¹ * 2 ^ p * (z₁ ^ p + z₂ ^ p) = 2⁻¹ * (2 ^ p * z₁ ^ p) + 2⁻¹ * (2 ^ p * z₂ ^ p)
[PROOFSTEP]
ring
[GOAL]
ι : Type u
s : Finset ι
p : ℝ
a b : ℝ≥0∞
hp1 : 1 ≤ p
⊢ a ^ p + b ^ p ≤ (a + b) ^ p
[PROOFSTEP]
have hp_pos : 0 < p := by positivity
[GOAL]
ι : Type u
s : Finset ι
p : ℝ
a b : ℝ≥0∞
hp1 : 1 ≤ p
⊢ 0 < p
[PROOFSTEP]
positivity
[GOAL]
ι : Type u
s : Finset ι
p : ℝ
a b : ℝ≥0∞
hp1 : 1 ≤ p
hp_pos : 0 < p
⊢ a ^ p + b ^ p ≤ (a + b) ^ p
[PROOFSTEP]
by_cases h_top : a + b = ⊤
[GOAL]
case pos
ι : Type u
s : Finset ι
p : ℝ
a b : ℝ≥0∞
hp1 : 1 ≤ p
hp_pos : 0 < p
h_top : a + b = ⊤
⊢ a ^ p + b ^ p ≤ (a + b) ^ p
[PROOFSTEP]
rw [← @ENNReal.rpow_eq_top_iff_of_pos (a + b) p hp_pos] at h_top
[GOAL]
case pos
ι : Type u
s : Finset ι
p : ℝ
a b : ℝ≥0∞
hp1 : 1 ≤ p
hp_pos : 0 < p
h_top : (a + b) ^ p = ⊤
⊢ a ^ p + b ^ p ≤ (a + b) ^ p
[PROOFSTEP]
rw [h_top]
[GOAL]
case pos
ι : Type u
s : Finset ι
p : ℝ
a b : ℝ≥0∞
hp1 : 1 ≤ p
hp_pos : 0 < p
h_top : (a + b) ^ p = ⊤
⊢ a ^ p + b ^ p ≤ ⊤
[PROOFSTEP]
exact le_top
[GOAL]
case neg
ι : Type u
s : Finset ι
p : ℝ
a b : ℝ≥0∞
hp1 : 1 ≤ p
hp_pos : 0 < p
h_top : ¬a + b = ⊤
⊢ a ^ p + b ^ p ≤ (a + b) ^ p
[PROOFSTEP]
obtain ⟨ha_top, hb_top⟩ := add_ne_top.mp h_top
[GOAL]
case neg.intro
ι : Type u
s : Finset ι
p : ℝ
a b : ℝ≥0∞
hp1 : 1 ≤ p
hp_pos : 0 < p
h_top : ¬a + b = ⊤
ha_top : a ≠ ⊤
hb_top : b ≠ ⊤
⊢ a ^ p + b ^ p ≤ (a + b) ^ p
[PROOFSTEP]
lift a to ℝ≥0 using ha_top
[GOAL]
case neg.intro.intro
ι : Type u
s : Finset ι
p : ℝ
b : ℝ≥0∞
hp1 : 1 ≤ p
hp_pos : 0 < p
hb_top : b ≠ ⊤
a : ℝ≥0
h_top : ¬↑a + b = ⊤
⊢ ↑a ^ p + b ^ p ≤ (↑a + b) ^ p
[PROOFSTEP]
lift b to ℝ≥0 using hb_top
[GOAL]
case neg.intro.intro.intro
ι : Type u
s : Finset ι
p : ℝ
hp1 : 1 ≤ p
hp_pos : 0 < p
a b : ℝ≥0
h_top : ¬↑a + ↑b = ⊤
⊢ ↑a ^ p + ↑b ^ p ≤ (↑a + ↑b) ^ p
[PROOFSTEP]
simpa [← ENNReal.coe_rpow_of_nonneg _ hp_pos.le] using ENNReal.coe_le_coe.2 (NNReal.add_rpow_le_rpow_add a b hp1)
[GOAL]
ι : Type u
s : Finset ι
p : ℝ
a b : ℝ≥0∞
hp1 : 1 ≤ p
⊢ (a ^ p + b ^ p) ^ (1 / p) ≤ a + b
[PROOFSTEP]
rw [← @ENNReal.le_rpow_one_div_iff _ _ (1 / p) (by simp [lt_of_lt_of_le zero_lt_one hp1])]
[GOAL]
ι : Type u
s : Finset ι
p : ℝ
a b : ℝ≥0∞
hp1 : 1 ≤ p
⊢ 0 < 1 / p
[PROOFSTEP]
simp [lt_of_lt_of_le zero_lt_one hp1]
[GOAL]
ι : Type u
s : Finset ι
p : ℝ
a b : ℝ≥0∞
hp1 : 1 ≤ p
⊢ a ^ p + b ^ p ≤ (a + b) ^ (1 / (1 / p))
[PROOFSTEP]
rw [one_div_one_div]
[GOAL]
ι : Type u
s : Finset ι
p : ℝ
a b : ℝ≥0∞
hp1 : 1 ≤ p
⊢ a ^ p + b ^ p ≤ (a + b) ^ p
[PROOFSTEP]
exact add_rpow_le_rpow_add _ _ hp1
[GOAL]
ι : Type u
s : Finset ι
p q : ℝ
a b : ℝ≥0∞
hp_pos : 0 < p
hpq : p ≤ q
⊢ (a ^ q + b ^ q) ^ (1 / q) ≤ (a ^ p + b ^ p) ^ (1 / p)
[PROOFSTEP]
have h_rpow : ∀ a : ℝ≥0∞, a ^ q = (a ^ p) ^ (q / p) := fun a => by
rw [← ENNReal.rpow_mul, _root_.mul_div_cancel' _ hp_pos.ne']
[GOAL]
ι : Type u
s : Finset ι
p q : ℝ
a✝ b : ℝ≥0∞
hp_pos : 0 < p
hpq : p ≤ q
a : ℝ≥0∞
⊢ a ^ q = (a ^ p) ^ (q / p)
[PROOFSTEP]
rw [← ENNReal.rpow_mul, _root_.mul_div_cancel' _ hp_pos.ne']
[GOAL]
ι : Type u
s : Finset ι
p q : ℝ
a b : ℝ≥0∞
hp_pos : 0 < p
hpq : p ≤ q
h_rpow : ∀ (a : ℝ≥0∞), a ^ q = (a ^ p) ^ (q / p)
⊢ (a ^ q + b ^ q) ^ (1 / q) ≤ (a ^ p + b ^ p) ^ (1 / p)
[PROOFSTEP]
have h_rpow_add_rpow_le_add : ((a ^ p) ^ (q / p) + (b ^ p) ^ (q / p)) ^ (1 / (q / p)) ≤ a ^ p + b ^ p :=
by
refine' rpow_add_rpow_le_add (a ^ p) (b ^ p) _
rwa [one_le_div hp_pos]
[GOAL]
ι : Type u
s : Finset ι
p q : ℝ
a b : ℝ≥0∞
hp_pos : 0 < p
hpq : p ≤ q
h_rpow : ∀ (a : ℝ≥0∞), a ^ q = (a ^ p) ^ (q / p)
⊢ ((a ^ p) ^ (q / p) + (b ^ p) ^ (q / p)) ^ (1 / (q / p)) ≤ a ^ p + b ^ p
[PROOFSTEP]
refine' rpow_add_rpow_le_add (a ^ p) (b ^ p) _
[GOAL]
ι : Type u
s : Finset ι
p q : ℝ
a b : ℝ≥0∞
hp_pos : 0 < p
hpq : p ≤ q
h_rpow : ∀ (a : ℝ≥0∞), a ^ q = (a ^ p) ^ (q / p)
⊢ 1 ≤ q / p
[PROOFSTEP]
rwa [one_le_div hp_pos]
[GOAL]
ι : Type u
s : Finset ι
p q : ℝ
a b : ℝ≥0∞
hp_pos : 0 < p
hpq : p ≤ q
h_rpow : ∀ (a : ℝ≥0∞), a ^ q = (a ^ p) ^ (q / p)
h_rpow_add_rpow_le_add : ((a ^ p) ^ (q / p) + (b ^ p) ^ (q / p)) ^ (1 / (q / p)) ≤ a ^ p + b ^ p
⊢ (a ^ q + b ^ q) ^ (1 / q) ≤ (a ^ p + b ^ p) ^ (1 / p)
[PROOFSTEP]
rw [h_rpow a, h_rpow b, ENNReal.le_rpow_one_div_iff hp_pos, ← ENNReal.rpow_mul, mul_comm, mul_one_div]
[GOAL]
ι : Type u
s : Finset ι
p q : ℝ
a b : ℝ≥0∞
hp_pos : 0 < p
hpq : p ≤ q
h_rpow : ∀ (a : ℝ≥0∞), a ^ q = (a ^ p) ^ (q / p)
h_rpow_add_rpow_le_add : ((a ^ p) ^ (q / p) + (b ^ p) ^ (q / p)) ^ (1 / (q / p)) ≤ a ^ p + b ^ p
⊢ ((a ^ p) ^ (q / p) + (b ^ p) ^ (q / p)) ^ (p / q) ≤ a ^ p + b ^ p
[PROOFSTEP]
rwa [one_div_div] at h_rpow_add_rpow_le_add
[GOAL]
ι : Type u
s : Finset ι
p : ℝ
a b : ℝ≥0∞
hp : 0 ≤ p
hp1 : p ≤ 1
⊢ (a + b) ^ p ≤ a ^ p + b ^ p
[PROOFSTEP]
rcases hp.eq_or_lt with (rfl | hp_pos)
[GOAL]
case inl
ι : Type u
s : Finset ι
a b : ℝ≥0∞
hp : 0 ≤ 0
hp1 : 0 ≤ 1
⊢ (a + b) ^ 0 ≤ a ^ 0 + b ^ 0
[PROOFSTEP]
simp
[GOAL]
case inr
ι : Type u
s : Finset ι
p : ℝ
a b : ℝ≥0∞
hp : 0 ≤ p
hp1 : p ≤ 1
hp_pos : 0 < p
⊢ (a + b) ^ p ≤ a ^ p + b ^ p
[PROOFSTEP]
have h := rpow_add_rpow_le a b hp_pos hp1
[GOAL]
case inr
ι : Type u
s : Finset ι
p : ℝ
a b : ℝ≥0∞
hp : 0 ≤ p
hp1 : p ≤ 1
hp_pos : 0 < p
h : (a ^ 1 + b ^ 1) ^ (1 / 1) ≤ (a ^ p + b ^ p) ^ (1 / p)
⊢ (a + b) ^ p ≤ a ^ p + b ^ p
[PROOFSTEP]
rw [one_div_one] at h
[GOAL]
case inr
ι : Type u
s : Finset ι
p : ℝ
a b : ℝ≥0∞
hp : 0 ≤ p
hp1 : p ≤ 1
hp_pos : 0 < p
h : (a ^ 1 + b ^ 1) ^ 1 ≤ (a ^ p + b ^ p) ^ (1 / p)
⊢ (a + b) ^ p ≤ a ^ p + b ^ p
[PROOFSTEP]
repeat' rw [ENNReal.rpow_one] at h
[GOAL]
case inr
ι : Type u
s : Finset ι
p : ℝ
a b : ℝ≥0∞
hp : 0 ≤ p
hp1 : p ≤ 1
hp_pos : 0 < p
h : (a ^ 1 + b ^ 1) ^ 1 ≤ (a ^ p + b ^ p) ^ (1 / p)
⊢ (a + b) ^ p ≤ a ^ p + b ^ p
[PROOFSTEP]
rw [ENNReal.rpow_one] at h
[GOAL]
case inr
ι : Type u
s : Finset ι
p : ℝ
a b : ℝ≥0∞
hp : 0 ≤ p
hp1 : p ≤ 1
hp_pos : 0 < p
h : a ^ 1 + b ^ 1 ≤ (a ^ p + b ^ p) ^ (1 / p)
⊢ (a + b) ^ p ≤ a ^ p + b ^ p
[PROOFSTEP]
rw [ENNReal.rpow_one] at h
[GOAL]
case inr
ι : Type u
s : Finset ι
p : ℝ
a b : ℝ≥0∞
hp : 0 ≤ p
hp1 : p ≤ 1
hp_pos : 0 < p
h : a + b ^ 1 ≤ (a ^ p + b ^ p) ^ (1 / p)
⊢ (a + b) ^ p ≤ a ^ p + b ^ p
[PROOFSTEP]
rw [ENNReal.rpow_one] at h
[GOAL]
case inr
ι : Type u
s : Finset ι
p : ℝ
a b : ℝ≥0∞
hp : 0 ≤ p
hp1 : p ≤ 1
hp_pos : 0 < p
h : a + b ≤ (a ^ p + b ^ p) ^ (1 / p)
⊢ (a + b) ^ p ≤ a ^ p + b ^ p
[PROOFSTEP]
rw [ENNReal.rpow_one] at h
[GOAL]
case inr
ι : Type u
s : Finset ι
p : ℝ
a b : ℝ≥0∞
hp : 0 ≤ p
hp1 : p ≤ 1
hp_pos : 0 < p
h : a + b ≤ (a ^ p + b ^ p) ^ (1 / p)
⊢ (a + b) ^ p ≤ a ^ p + b ^ p
[PROOFSTEP]
exact (ENNReal.le_rpow_one_div_iff hp_pos).mp h
|
" Fear of Flying " is the eleventh episode of The Simpsons ' sixth season . It was first broadcast on the Fox network in the United States on December 18 , 1994 . In the episode , Homer is banned from Moe 's Tavern and struggles to find a new bar . When he destroys a plane after being mistaken for a pilot at a pilots @-@ only bar , the airline buys the Simpsons ' silence with free tickets . The family discovers that Marge is afraid of flying .
|
[STATEMENT]
lemma MGFn_free_wt_da_NormalConformI:
"(\<forall> T L C B. \<lparr>prg=G,cls=C,lcl=L\<rparr>\<turnstile>t\<Colon>T
\<longrightarrow> G,(A::state triple set)
\<turnstile>{Normal((\<lambda>Y' s' s. s'=s \<and> normal s) \<and>. G\<turnstile>init\<le>n) \<and>. (\<lambda> s. s\<Colon>\<preceq>(G, L))
\<and>. (\<lambda> s. \<lparr>prg=G,cls=C,lcl=L\<rparr>\<turnstile>dom (locals (store s))\<guillemotright>t\<guillemotright>B)}
t\<succ>
{\<lambda>Y s' s. G\<turnstile>s \<midarrow>t\<succ>\<rightarrow> (Y,s')})
\<Longrightarrow> G,A\<turnstile>{=:n}t\<succ>{G\<rightarrow>}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>T L C B. \<lparr>prg = G, cls = C, lcl = L\<rparr>\<turnstile>t\<Colon>T \<longrightarrow> G,A\<turnstile>{Normal ((\<lambda>Y' s' s. s' = s \<and> normal s) \<and>. G\<turnstile>init\<le>n) \<and>. (\<lambda>s. s\<Colon>\<preceq>(G, L)) \<and>. (\<lambda>s. \<lparr>prg = G, cls = C, lcl = L\<rparr>\<turnstile> dom (locals (snd s)) \<guillemotright>t\<guillemotright> B)} t\<succ> {\<lambda>Y s' s. G\<turnstile>s \<midarrow>t\<succ>\<rightarrow> (Y, s')} \<Longrightarrow> G,A\<turnstile>{=:n} t\<succ> {G\<rightarrow>}
[PROOF STEP]
apply (rule MGFn_NormalI)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>T L C B. \<lparr>prg = G, cls = C, lcl = L\<rparr>\<turnstile>t\<Colon>T \<longrightarrow> G,A\<turnstile>{Normal ((\<lambda>Y' s' s. s' = s \<and> normal s) \<and>. G\<turnstile>init\<le>n) \<and>. (\<lambda>s. s\<Colon>\<preceq>(G, L)) \<and>. (\<lambda>s. \<lparr>prg = G, cls = C, lcl = L\<rparr>\<turnstile> dom (locals (snd s)) \<guillemotright>t\<guillemotright> B)} t\<succ> {\<lambda>Y s' s. G\<turnstile>s \<midarrow>t\<succ>\<rightarrow> (Y, s')} \<Longrightarrow> G,A\<turnstile>{Normal ((\<lambda>Y' s' s. s' = s \<and> normal s) \<and>. G\<turnstile>init\<le>n)} t\<succ> {\<lambda>Y s' s. G\<turnstile>s \<midarrow>t\<succ>\<rightarrow> (Y, s')}
[PROOF STEP]
apply (rule ax_no_hazard)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>T L C B. \<lparr>prg = G, cls = C, lcl = L\<rparr>\<turnstile>t\<Colon>T \<longrightarrow> G,A\<turnstile>{Normal ((\<lambda>Y' s' s. s' = s \<and> normal s) \<and>. G\<turnstile>init\<le>n) \<and>. (\<lambda>s. s\<Colon>\<preceq>(G, L)) \<and>. (\<lambda>s. \<lparr>prg = G, cls = C, lcl = L\<rparr>\<turnstile> dom (locals (snd s)) \<guillemotright>t\<guillemotright> B)} t\<succ> {\<lambda>Y s' s. G\<turnstile>s \<midarrow>t\<succ>\<rightarrow> (Y, s')} \<Longrightarrow> G,A\<turnstile>{Normal ((\<lambda>Y' s' s. s' = s \<and> normal s) \<and>. G\<turnstile>init\<le>n) \<and>. type_ok G t} t\<succ> {\<lambda>Y s' s. G\<turnstile>s \<midarrow>t\<succ>\<rightarrow> (Y, s')}
[PROOF STEP]
apply (rule ax_escape)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>T L C B. \<lparr>prg = G, cls = C, lcl = L\<rparr>\<turnstile>t\<Colon>T \<longrightarrow> G,A\<turnstile>{Normal ((\<lambda>Y' s' s. s' = s \<and> normal s) \<and>. G\<turnstile>init\<le>n) \<and>. (\<lambda>s. s\<Colon>\<preceq>(G, L)) \<and>. (\<lambda>s. \<lparr>prg = G, cls = C, lcl = L\<rparr>\<turnstile> dom (locals (snd s)) \<guillemotright>t\<guillemotright> B)} t\<succ> {\<lambda>Y s' s. G\<turnstile>s \<midarrow>t\<succ>\<rightarrow> (Y, s')} \<Longrightarrow> \<forall>Y s Z. (Normal ((\<lambda>Y' s' s. s' = s \<and> normal s) \<and>. G\<turnstile>init\<le>n) \<and>. type_ok G t) Y s Z \<longrightarrow> G,A\<turnstile>{\<lambda>Y' s' Z'. (Y', s') = (Y, s)} t\<succ> {\<lambda>Y s Z'. G\<turnstile>Z \<midarrow>t\<succ>\<rightarrow> (Y, s)}
[PROOF STEP]
apply (intro strip)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>Y s Z. \<lbrakk>\<forall>T L C B. \<lparr>prg = G, cls = C, lcl = L\<rparr>\<turnstile>t\<Colon>T \<longrightarrow> G,A\<turnstile>{Normal ((\<lambda>Y' s' s. s' = s \<and> normal s) \<and>. G\<turnstile>init\<le>n) \<and>. (\<lambda>s. s\<Colon>\<preceq>(G, L)) \<and>. (\<lambda>s. \<lparr>prg = G, cls = C, lcl = L\<rparr>\<turnstile> dom (locals (snd s)) \<guillemotright>t\<guillemotright> B)} t\<succ> {\<lambda>Y s' s. G\<turnstile>s \<midarrow>t\<succ>\<rightarrow> (Y, s')}; (Normal ((\<lambda>Y' s' s. s' = s \<and> normal s) \<and>. G\<turnstile>init\<le>n) \<and>. type_ok G t) Y s Z\<rbrakk> \<Longrightarrow> G,A\<turnstile>{\<lambda>Y' s' Z'. (Y', s') = (Y, s)} t\<succ> {\<lambda>Y s Z'. G\<turnstile>Z \<midarrow>t\<succ>\<rightarrow> (Y, s)}
[PROOF STEP]
apply (simp only: type_ok_def peek_and_def)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>Y s Z. \<lbrakk>\<forall>T L C B. \<lparr>prg = G, cls = C, lcl = L\<rparr>\<turnstile>t\<Colon>T \<longrightarrow> G,A\<turnstile>{\<lambda>Y s Z. ((((s = Z \<and> normal Z) \<and> (G\<turnstile>init\<le>n) s) \<and> normal s) \<and> s\<Colon>\<preceq>(G, L)) \<and> \<lparr>prg = G, cls = C, lcl = L\<rparr>\<turnstile> dom (locals (snd s)) \<guillemotright>t\<guillemotright> B} t\<succ> {\<lambda>Y s' s. G\<turnstile>s \<midarrow>t\<succ>\<rightarrow> (Y, s')}; (((s = Z \<and> normal Z) \<and> (G\<turnstile>init\<le>n) s) \<and> normal s) \<and> (\<exists>L T C A. (normal s \<longrightarrow> \<lparr>prg = G, cls = C, lcl = L\<rparr>\<turnstile>t\<Colon>T \<and> \<lparr>prg = G, cls = C, lcl = L\<rparr>\<turnstile> dom (locals (snd s)) \<guillemotright>t\<guillemotright> A) \<and> s\<Colon>\<preceq>(G, L))\<rbrakk> \<Longrightarrow> G,A\<turnstile>{\<lambda>Y' s' Z'. (Y', s') = (Y, Z)} t\<succ> {\<lambda>Y s Z'. G\<turnstile>Z \<midarrow>t\<succ>\<rightarrow> (Y, s)}
[PROOF STEP]
apply (erule conjE)+
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>Y s Z. \<lbrakk>\<forall>T L C B. \<lparr>prg = G, cls = C, lcl = L\<rparr>\<turnstile>t\<Colon>T \<longrightarrow> G,A\<turnstile>{\<lambda>Y s Z. ((((s = Z \<and> normal Z) \<and> (G\<turnstile>init\<le>n) s) \<and> normal s) \<and> s\<Colon>\<preceq>(G, L)) \<and> \<lparr>prg = G, cls = C, lcl = L\<rparr>\<turnstile> dom (locals (snd s)) \<guillemotright>t\<guillemotright> B} t\<succ> {\<lambda>Y s' s. G\<turnstile>s \<midarrow>t\<succ>\<rightarrow> (Y, s')}; \<exists>L T C A. (normal s \<longrightarrow> \<lparr>prg = G, cls = C, lcl = L\<rparr>\<turnstile>t\<Colon>T \<and> \<lparr>prg = G, cls = C, lcl = L\<rparr>\<turnstile> dom (locals (snd s)) \<guillemotright>t\<guillemotright> A) \<and> s\<Colon>\<preceq>(G, L); normal s; (G\<turnstile>init\<le>n) s; s = Z; normal Z\<rbrakk> \<Longrightarrow> G,A\<turnstile>{\<lambda>Y' s' Z'. (Y', s') = (Y, Z)} t\<succ> {\<lambda>Y s Z'. G\<turnstile>Z \<midarrow>t\<succ>\<rightarrow> (Y, s)}
[PROOF STEP]
apply (erule exE,erule exE, erule exE, erule exE,erule conjE,drule (1) mp,
erule conjE)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>Y s Z L T C Aa. \<lbrakk>\<forall>T L C B. \<lparr>prg = G, cls = C, lcl = L\<rparr>\<turnstile>t\<Colon>T \<longrightarrow> G,A\<turnstile>{\<lambda>Y s Z. ((((s = Z \<and> normal Z) \<and> (G\<turnstile>init\<le>n) s) \<and> normal s) \<and> s\<Colon>\<preceq>(G, L)) \<and> \<lparr>prg = G, cls = C, lcl = L\<rparr>\<turnstile> dom (locals (snd s)) \<guillemotright>t\<guillemotright> B} t\<succ> {\<lambda>Y s' s. G\<turnstile>s \<midarrow>t\<succ>\<rightarrow> (Y, s')}; normal s; (G\<turnstile>init\<le>n) s; s = Z; normal Z; s\<Colon>\<preceq>(G, L); \<lparr>prg = G, cls = C, lcl = L\<rparr>\<turnstile>t\<Colon>T; \<lparr>prg = G, cls = C, lcl = L\<rparr>\<turnstile> dom (locals (snd s)) \<guillemotright>t\<guillemotright> Aa\<rbrakk> \<Longrightarrow> G,A\<turnstile>{\<lambda>Y' s' Z'. (Y', s') = (Y, Z)} t\<succ> {\<lambda>Y s Z'. G\<turnstile>Z \<midarrow>t\<succ>\<rightarrow> (Y, s)}
[PROOF STEP]
apply (drule spec,drule spec, drule spec,drule spec, drule (1) mp)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>Y s Z L T C Aa. \<lbrakk>normal s; (G\<turnstile>init\<le>n) s; s = Z; normal Z; s\<Colon>\<preceq>(G, L); \<lparr>prg = G, cls = C, lcl = L\<rparr>\<turnstile>t\<Colon>T; \<lparr>prg = G, cls = C, lcl = L\<rparr>\<turnstile> dom (locals (snd s)) \<guillemotright>t\<guillemotright> Aa; G,A\<turnstile>{\<lambda>Ya sa Za. ((((sa = Za \<and> normal Za) \<and> (G\<turnstile>init\<le>n) sa) \<and> normal sa) \<and> sa\<Colon>\<preceq>(G, L)) \<and> \<lparr>prg = G, cls = C, lcl = L\<rparr>\<turnstile> dom (locals (snd sa)) \<guillemotright>t\<guillemotright> ?B39 Y s Z L T C Aa} t\<succ> {\<lambda>Y s' s. G\<turnstile>s \<midarrow>t\<succ>\<rightarrow> (Y, s')}\<rbrakk> \<Longrightarrow> G,A\<turnstile>{\<lambda>Y' s' Z'. (Y', s') = (Y, Z)} t\<succ> {\<lambda>Y s Z'. G\<turnstile>Z \<midarrow>t\<succ>\<rightarrow> (Y, s)}
[PROOF STEP]
apply (erule conseq12)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>Y s Z L T C A. \<lbrakk>normal s; (G\<turnstile>init\<le>n) s; s = Z; normal Z; s\<Colon>\<preceq>(G, L); \<lparr>prg = G, cls = C, lcl = L\<rparr>\<turnstile>t\<Colon>T; \<lparr>prg = G, cls = C, lcl = L\<rparr>\<turnstile> dom (locals (snd s)) \<guillemotright>t\<guillemotright> A\<rbrakk> \<Longrightarrow> \<forall>Ya sa Za. (Ya, sa) = (Y, Z) \<longrightarrow> (\<forall>Y' s'. (\<forall>Ya Z'. ((((sa = Z' \<and> normal Z') \<and> (G\<turnstile>init\<le>n) sa) \<and> normal sa) \<and> sa\<Colon>\<preceq>(G, L)) \<and> \<lparr>prg = G, cls = C, lcl = L\<rparr>\<turnstile> dom (locals (snd sa)) \<guillemotright>t\<guillemotright> ?B39 Y s Z L T C A \<longrightarrow> G\<turnstile>Z' \<midarrow>t\<succ>\<rightarrow> (Y', s')) \<longrightarrow> G\<turnstile>Z \<midarrow>t\<succ>\<rightarrow> (Y', s'))
[PROOF STEP]
apply blast
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done |
# Copyright (C) 2004-2017 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
#
# Author: Aric Hagberg ([email protected])
"""
**********
Matplotlib
**********
Draw networks with matplotlib.
See Also
--------
matplotlib: http://matplotlib.org/
pygraphviz: http://pygraphviz.github.io/
"""
import networkx as nx
from networkx.utils import is_string_like
from networkx.drawing.layout import shell_layout, \
circular_layout, kamada_kawai_layout, spectral_layout, \
spring_layout, random_layout
__all__ = ['draw',
'draw_networkx',
'draw_networkx_nodes',
'draw_networkx_edges',
'draw_networkx_labels',
'draw_networkx_edge_labels',
'draw_circular',
'draw_kamada_kawai',
'draw_random',
'draw_spectral',
'draw_spring',
'draw_shell']
def draw(G, pos=None, ax=None, **kwds):
"""Draw the graph G with Matplotlib.
Draw the graph as a simple representation with no node
labels or edge labels and using the full Matplotlib figure area
and no axis labels by default. See draw_networkx() for more
full-featured drawing that allows title, axis labels etc.
Parameters
----------
G : graph
A networkx graph
pos : dictionary, optional
A dictionary with nodes as keys and positions as values.
If not specified a spring layout positioning will be computed.
See :py:mod:`networkx.drawing.layout` for functions that
compute node positions.
ax : Matplotlib Axes object, optional
Draw the graph in specified Matplotlib axes.
kwds : optional keywords
See networkx.draw_networkx() for a description of optional keywords.
Examples
--------
>>> G = nx.dodecahedral_graph()
>>> nx.draw(G)
>>> nx.draw(G, pos=nx.spring_layout(G)) # use spring layout
See Also
--------
draw_networkx()
draw_networkx_nodes()
draw_networkx_edges()
draw_networkx_labels()
draw_networkx_edge_labels()
Notes
-----
This function has the same name as pylab.draw and pyplot.draw
so beware when using
>>> from networkx import *
since you might overwrite the pylab.draw function.
With pyplot use
>>> import matplotlib.pyplot as plt
>>> import networkx as nx
>>> G = nx.dodecahedral_graph()
>>> nx.draw(G) # networkx draw()
>>> plt.draw() # pyplot draw()
Also see the NetworkX drawing examples at
https://networkx.github.io/documentation/latest/auto_examples/index.html
"""
try:
import matplotlib.pyplot as plt
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if ax is None:
cf = plt.gcf()
else:
cf = ax.get_figure()
cf.set_facecolor('w')
if ax is None:
if cf._axstack() is None:
ax = cf.add_axes((0, 0, 1, 1))
else:
ax = cf.gca()
if 'with_labels' not in kwds:
kwds['with_labels'] = 'labels' in kwds
try:
draw_networkx(G, pos=pos, ax=ax, **kwds)
ax.set_axis_off()
plt.draw_if_interactive()
except:
raise
return
def draw_networkx(G, pos=None, arrows=True, with_labels=True, **kwds):
"""Draw the graph G using Matplotlib.
Draw the graph with Matplotlib with options for node positions,
labeling, titles, and many other drawing features.
See draw() for simple drawing without labels or axes.
Parameters
----------
G : graph
A networkx graph
pos : dictionary, optional
A dictionary with nodes as keys and positions as values.
If not specified a spring layout positioning will be computed.
See :py:mod:`networkx.drawing.layout` for functions that
compute node positions.
arrows : bool, optional (default=True)
For directed graphs, if True draw arrowheads.
with_labels : bool, optional (default=True)
Set to True to draw labels on the nodes.
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
nodelist : list, optional (default G.nodes())
Draw only specified nodes
edgelist : list, optional (default=G.edges())
Draw only specified edges
node_size : scalar or array, optional (default=300)
Size of nodes. If an array is specified it must be the
same length as nodelist.
node_color : color string, or array of floats, (default='r')
Node color. Can be a single color format string,
or a sequence of colors with the same length as nodelist.
If numeric values are specified they will be mapped to
colors using the cmap and vmin,vmax parameters. See
matplotlib.scatter for more details.
node_shape : string, optional (default='o')
The shape of the node. Specification is as matplotlib.scatter
marker, one of 'so^>v<dph8'.
alpha : float, optional (default=1.0)
The node and edge transparency
cmap : Matplotlib colormap, optional (default=None)
Colormap for mapping intensities of nodes
vmin,vmax : float, optional (default=None)
Minimum and maximum for node colormap scaling
linewidths : [None | scalar | sequence]
Line width of symbol border (default =1.0)
width : float, optional (default=1.0)
Line width of edges
edge_color : color string, or array of floats (default='r')
Edge color. Can be a single color format string,
or a sequence of colors with the same length as edgelist.
If numeric values are specified they will be mapped to
colors using the edge_cmap and edge_vmin,edge_vmax parameters.
edge_cmap : Matplotlib colormap, optional (default=None)
Colormap for mapping intensities of edges
edge_vmin,edge_vmax : floats, optional (default=None)
Minimum and maximum for edge colormap scaling
style : string, optional (default='solid')
Edge line style (solid|dashed|dotted,dashdot)
labels : dictionary, optional (default=None)
Node labels in a dictionary keyed by node of text labels
font_size : int, optional (default=12)
Font size for text labels
font_color : string, optional (default='k' black)
Font color string
font_weight : string, optional (default='normal')
Font weight
font_family : string, optional (default='sans-serif')
Font family
label : string, optional
Label for graph legend
Notes
-----
For directed graphs, "arrows" (actually just thicker stubs) are drawn
at the head end. Arrows can be turned off with keyword arrows=False.
Yes, it is ugly but drawing proper arrows with Matplotlib this
way is tricky.
Examples
--------
>>> G = nx.dodecahedral_graph()
>>> nx.draw(G)
>>> nx.draw(G, pos=nx.spring_layout(G)) # use spring layout
>>> import matplotlib.pyplot as plt
>>> limits = plt.axis('off') # turn of axis
Also see the NetworkX drawing examples at
https://networkx.github.io/documentation/latest/auto_examples/index.html
See Also
--------
draw()
draw_networkx_nodes()
draw_networkx_edges()
draw_networkx_labels()
draw_networkx_edge_labels()
"""
try:
import matplotlib.pyplot as plt
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if pos is None:
pos = nx.drawing.spring_layout(G) # default to spring layout
node_collection = draw_networkx_nodes(G, pos, **kwds)
edge_collection = draw_networkx_edges(G, pos, arrows=arrows, **kwds)
if with_labels:
draw_networkx_labels(G, pos, **kwds)
plt.draw_if_interactive()
def draw_networkx_nodes(G, pos,
nodelist=None,
node_size=300,
node_color='r',
node_shape='o',
alpha=1.0,
cmap=None,
vmin=None,
vmax=None,
ax=None,
linewidths=None,
label=None,
**kwds):
"""Draw the nodes of the graph G.
This draws only the nodes of the graph G.
Parameters
----------
G : graph
A networkx graph
pos : dictionary
A dictionary with nodes as keys and positions as values.
Positions should be sequences of length 2.
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
nodelist : list, optional
Draw only specified nodes (default G.nodes())
node_size : scalar or array
Size of nodes (default=300). If an array is specified it must be the
same length as nodelist.
node_color : color string, or array of floats
Node color. Can be a single color format string (default='r'),
or a sequence of colors with the same length as nodelist.
If numeric values are specified they will be mapped to
colors using the cmap and vmin,vmax parameters. See
matplotlib.scatter for more details.
node_shape : string
The shape of the node. Specification is as matplotlib.scatter
marker, one of 'so^>v<dph8' (default='o').
alpha : float or array of floats
The node transparency. This can be a single alpha value (default=1.0),
in which case it will be applied to all the nodes of color. Otherwise,
if it is an array, the elements of alpha will be applied to the colors
in order (cycling through alpha multiple times if necessary).
cmap : Matplotlib colormap
Colormap for mapping intensities of nodes (default=None)
vmin,vmax : floats
Minimum and maximum for node colormap scaling (default=None)
linewidths : [None | scalar | sequence]
Line width of symbol border (default =1.0)
label : [None| string]
Label for legend
Returns
-------
matplotlib.collections.PathCollection
`PathCollection` of the nodes.
Examples
--------
>>> G = nx.dodecahedral_graph()
>>> nodes = nx.draw_networkx_nodes(G, pos=nx.spring_layout(G))
Also see the NetworkX drawing examples at
https://networkx.github.io/documentation/latest/auto_examples/index.html
See Also
--------
draw()
draw_networkx()
draw_networkx_edges()
draw_networkx_labels()
draw_networkx_edge_labels()
"""
import collections
try:
import matplotlib.pyplot as plt
import numpy as np
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if ax is None:
ax = plt.gca()
if nodelist is None:
nodelist = list(G)
if not nodelist or len(nodelist) == 0: # empty nodelist, no drawing
return None
try:
xy = np.asarray([pos[v] for v in nodelist])
except KeyError as e:
raise nx.NetworkXError('Node %s has no position.' % e)
except ValueError:
raise nx.NetworkXError('Bad value in node positions.')
if isinstance(alpha, collections.Iterable):
node_color = apply_alpha(node_color, alpha, nodelist, cmap, vmin, vmax)
alpha = None
node_collection = ax.scatter(xy[:, 0], xy[:, 1],
s=node_size,
c=node_color,
marker=node_shape,
cmap=cmap,
vmin=vmin,
vmax=vmax,
alpha=alpha,
linewidths=linewidths,
label=label)
node_collection.set_zorder(2)
return node_collection
def draw_networkx_edges(G, pos,
edgelist=None,
width=1.0,
edge_color='k',
style='solid',
alpha=1.0,
edge_cmap=None,
edge_vmin=None,
edge_vmax=None,
ax=None,
arrows=True,
label=None,
**kwds):
"""Draw the edges of the graph G.
This draws only the edges of the graph G.
Parameters
----------
G : graph
A networkx graph
pos : dictionary
A dictionary with nodes as keys and positions as values.
Positions should be sequences of length 2.
edgelist : collection of edge tuples
Draw only specified edges(default=G.edges())
width : float, or array of floats
Line width of edges (default=1.0)
edge_color : color string, or array of floats
Edge color. Can be a single color format string (default='r'),
or a sequence of colors with the same length as edgelist.
If numeric values are specified they will be mapped to
colors using the edge_cmap and edge_vmin,edge_vmax parameters.
style : string
Edge line style (default='solid') (solid|dashed|dotted,dashdot)
alpha : float
The edge transparency (default=1.0)
edge_ cmap : Matplotlib colormap
Colormap for mapping intensities of edges (default=None)
edge_vmin,edge_vmax : floats
Minimum and maximum for edge colormap scaling (default=None)
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
arrows : bool, optional (default=True)
For directed graphs, if True draw arrowheads.
label : [None| string]
Label for legend
Returns
-------
matplotlib.collection.LineCollection
`LineCollection` of the edges
Notes
-----
For directed graphs, "arrows" (actually just thicker stubs) are drawn
at the head end. Arrows can be turned off with keyword arrows=False.
Yes, it is ugly but drawing proper arrows with Matplotlib this
way is tricky.
Examples
--------
>>> G = nx.dodecahedral_graph()
>>> edges = nx.draw_networkx_edges(G, pos=nx.spring_layout(G))
Also see the NetworkX drawing examples at
https://networkx.github.io/documentation/latest/auto_examples/index.html
See Also
--------
draw()
draw_networkx()
draw_networkx_nodes()
draw_networkx_labels()
draw_networkx_edge_labels()
"""
try:
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.cbook as cb
from matplotlib.colors import colorConverter, Colormap
from matplotlib.collections import LineCollection
import numpy as np
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if ax is None:
ax = plt.gca()
if edgelist is None:
edgelist = list(G.edges())
if not edgelist or len(edgelist) == 0: # no edges!
return None
# set edge positions
edge_pos = np.asarray([(pos[e[0]], pos[e[1]]) for e in edgelist])
if not cb.iterable(width):
lw = (width,)
else:
lw = width
if not is_string_like(edge_color) \
and cb.iterable(edge_color) \
and len(edge_color) == len(edge_pos):
if np.alltrue([is_string_like(c) for c in edge_color]):
# (should check ALL elements)
# list of color letters such as ['k','r','k',...]
edge_colors = tuple([colorConverter.to_rgba(c, alpha)
for c in edge_color])
elif np.alltrue([not is_string_like(c) for c in edge_color]):
# If color specs are given as (rgb) or (rgba) tuples, we're OK
if np.alltrue([cb.iterable(c) and len(c) in (3, 4)
for c in edge_color]):
edge_colors = tuple(edge_color)
else:
# numbers (which are going to be mapped with a colormap)
edge_colors = None
else:
raise ValueError('edge_color must consist of either color names or numbers')
else:
if is_string_like(edge_color) or len(edge_color) == 1:
edge_colors = (colorConverter.to_rgba(edge_color, alpha), )
else:
raise ValueError(
'edge_color must be a single color or list of exactly m colors where m is the number or edges')
edge_collection = LineCollection(edge_pos,
colors=edge_colors,
linewidths=lw,
antialiaseds=(1,),
linestyle=style,
transOffset=ax.transData,
)
edge_collection.set_zorder(1) # edges go behind nodes
edge_collection.set_label(label)
ax.add_collection(edge_collection)
# Note: there was a bug in mpl regarding the handling of alpha values for
# each line in a LineCollection. It was fixed in matplotlib in r7184 and
# r7189 (June 6 2009). We should then not set the alpha value globally,
# since the user can instead provide per-edge alphas now. Only set it
# globally if provided as a scalar.
if cb.is_numlike(alpha):
edge_collection.set_alpha(alpha)
if edge_colors is None:
if edge_cmap is not None:
assert(isinstance(edge_cmap, Colormap))
edge_collection.set_array(np.asarray(edge_color))
edge_collection.set_cmap(edge_cmap)
if edge_vmin is not None or edge_vmax is not None:
edge_collection.set_clim(edge_vmin, edge_vmax)
else:
edge_collection.autoscale()
arrow_collection = None
if G.is_directed() and arrows:
# a directed graph hack
# draw thick line segments at head end of edge
# waiting for someone else to implement arrows that will work
arrow_colors = edge_colors
a_pos = []
p = 1.0 - 0.25 # make head segment 25 percent of edge length
for src, dst in edge_pos:
x1, y1 = src
x2, y2 = dst
dx = x2 - x1 # x offset
dy = y2 - y1 # y offset
d = np.sqrt(float(dx**2 + dy**2)) # length of edge
if d == 0: # source and target at same position
continue
if dx == 0: # vertical edge
xa = x2
ya = dy * p + y1
if dy == 0: # horizontal edge
ya = y2
xa = dx * p + x1
else:
theta = np.arctan2(dy, dx)
xa = p * d * np.cos(theta) + x1
ya = p * d * np.sin(theta) + y1
a_pos.append(((xa, ya), (x2, y2)))
arrow_collection = LineCollection(a_pos,
colors=arrow_colors,
linewidths=[4 * ww for ww in lw],
antialiaseds=(1,),
transOffset=ax.transData,
)
arrow_collection.set_zorder(1) # edges go behind nodes
ax.add_collection(arrow_collection)
# update view
minx = np.amin(np.ravel(edge_pos[:, :, 0]))
maxx = np.amax(np.ravel(edge_pos[:, :, 0]))
miny = np.amin(np.ravel(edge_pos[:, :, 1]))
maxy = np.amax(np.ravel(edge_pos[:, :, 1]))
w = maxx - minx
h = maxy - miny
padx, pady = 0.05 * w, 0.05 * h
corners = (minx - padx, miny - pady), (maxx + padx, maxy + pady)
ax.update_datalim(corners)
ax.autoscale_view()
# if arrow_collection:
return edge_collection
def draw_networkx_labels(G, pos,
labels=None,
font_size=12,
font_color='k',
font_family='sans-serif',
font_weight='normal',
alpha=1.0,
bbox=None,
ax=None,
**kwds):
"""Draw node labels on the graph G.
Parameters
----------
G : graph
A networkx graph
pos : dictionary
A dictionary with nodes as keys and positions as values.
Positions should be sequences of length 2.
labels : dictionary, optional (default=None)
Node labels in a dictionary keyed by node of text labels
font_size : int
Font size for text labels (default=12)
font_color : string
Font color string (default='k' black)
font_family : string
Font family (default='sans-serif')
font_weight : string
Font weight (default='normal')
alpha : float
The text transparency (default=1.0)
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
Returns
-------
dict
`dict` of labels keyed on the nodes
Examples
--------
>>> G = nx.dodecahedral_graph()
>>> labels = nx.draw_networkx_labels(G, pos=nx.spring_layout(G))
Also see the NetworkX drawing examples at
https://networkx.github.io/documentation/latest/auto_examples/index.html
See Also
--------
draw()
draw_networkx()
draw_networkx_nodes()
draw_networkx_edges()
draw_networkx_edge_labels()
"""
try:
import matplotlib.pyplot as plt
import matplotlib.cbook as cb
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if ax is None:
ax = plt.gca()
if labels is None:
labels = dict((n, n) for n in G.nodes())
# set optional alignment
horizontalalignment = kwds.get('horizontalalignment', 'center')
verticalalignment = kwds.get('verticalalignment', 'center')
text_items = {} # there is no text collection so we'll fake one
for n, label in labels.items():
(x, y) = pos[n]
if not is_string_like(label):
label = str(label) # this will cause "1" and 1 to be labeled the same
t = ax.text(x, y,
label,
size=font_size,
color=font_color,
family=font_family,
weight=font_weight,
alpha=alpha,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
transform=ax.transData,
bbox=bbox,
clip_on=True,
)
text_items[n] = t
return text_items
def draw_networkx_edge_labels(G, pos,
edge_labels=None,
label_pos=0.5,
font_size=10,
font_color='k',
font_family='sans-serif',
font_weight='normal',
alpha=1.0,
bbox=None,
ax=None,
rotate=True,
**kwds):
"""Draw edge labels.
Parameters
----------
G : graph
A networkx graph
pos : dictionary
A dictionary with nodes as keys and positions as values.
Positions should be sequences of length 2.
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
alpha : float
The text transparency (default=1.0)
edge_labels : dictionary
Edge labels in a dictionary keyed by edge two-tuple of text
labels (default=None). Only labels for the keys in the dictionary
are drawn.
label_pos : float
Position of edge label along edge (0=head, 0.5=center, 1=tail)
font_size : int
Font size for text labels (default=12)
font_color : string
Font color string (default='k' black)
font_weight : string
Font weight (default='normal')
font_family : string
Font family (default='sans-serif')
bbox : Matplotlib bbox
Specify text box shape and colors.
clip_on : bool
Turn on clipping at axis boundaries (default=True)
Returns
-------
dict
`dict` of labels keyed on the edges
Examples
--------
>>> G = nx.dodecahedral_graph()
>>> edge_labels = nx.draw_networkx_edge_labels(G, pos=nx.spring_layout(G))
Also see the NetworkX drawing examples at
https://networkx.github.io/documentation/latest/auto_examples/index.html
See Also
--------
draw()
draw_networkx()
draw_networkx_nodes()
draw_networkx_edges()
draw_networkx_labels()
"""
try:
import matplotlib.pyplot as plt
import matplotlib.cbook as cb
import numpy as np
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if ax is None:
ax = plt.gca()
if edge_labels is None:
labels = {(u, v): d for u, v, d in G.edges(data=True)}
else:
labels = edge_labels
text_items = {}
for (n1, n2), label in labels.items():
(x1, y1) = pos[n1]
(x2, y2) = pos[n2]
(x, y) = (x1 * label_pos + x2 * (1.0 - label_pos),
y1 * label_pos + y2 * (1.0 - label_pos))
if rotate:
angle = np.arctan2(y2 - y1, x2 - x1) / (2.0 * np.pi) * 360 # degrees
# make label orientation "right-side-up"
if angle > 90:
angle -= 180
if angle < - 90:
angle += 180
# transform data coordinate angle to screen coordinate angle
xy = np.array((x, y))
trans_angle = ax.transData.transform_angles(np.array((angle,)),
xy.reshape((1, 2)))[0]
else:
trans_angle = 0.0
# use default box of white with white border
if bbox is None:
bbox = dict(boxstyle='round',
ec=(1.0, 1.0, 1.0),
fc=(1.0, 1.0, 1.0),
)
if not is_string_like(label):
label = str(label) # this will cause "1" and 1 to be labeled the same
# set optional alignment
horizontalalignment = kwds.get('horizontalalignment', 'center')
verticalalignment = kwds.get('verticalalignment', 'center')
t = ax.text(x, y,
label,
size=font_size,
color=font_color,
family=font_family,
weight=font_weight,
alpha=alpha,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
rotation=trans_angle,
transform=ax.transData,
bbox=bbox,
zorder=1,
clip_on=True,
)
text_items[(n1, n2)] = t
return text_items
def draw_circular(G, **kwargs):
"""Draw the graph G with a circular layout.
Parameters
----------
G : graph
A networkx graph
kwargs : optional keywords
See networkx.draw_networkx() for a description of optional keywords,
with the exception of the pos parameter which is not used by this
function.
"""
draw(G, circular_layout(G), **kwargs)
def draw_kamada_kawai(G, **kwargs):
"""Draw the graph G with a Kamada-Kawai force-directed layout.
Parameters
----------
G : graph
A networkx graph
kwargs : optional keywords
See networkx.draw_networkx() for a description of optional keywords,
with the exception of the pos parameter which is not used by this
function.
"""
draw(G, kamada_kawai_layout(G), **kwargs)
def draw_random(G, **kwargs):
"""Draw the graph G with a random layout.
Parameters
----------
G : graph
A networkx graph
kwargs : optional keywords
See networkx.draw_networkx() for a description of optional keywords,
with the exception of the pos parameter which is not used by this
function.
"""
draw(G, random_layout(G), **kwargs)
def draw_spectral(G, **kwargs):
"""Draw the graph G with a spectral layout.
Parameters
----------
G : graph
A networkx graph
kwargs : optional keywords
See networkx.draw_networkx() for a description of optional keywords,
with the exception of the pos parameter which is not used by this
function.
"""
draw(G, spectral_layout(G), **kwargs)
def draw_spring(G, **kwargs):
"""Draw the graph G with a spring layout.
Parameters
----------
G : graph
A networkx graph
kwargs : optional keywords
See networkx.draw_networkx() for a description of optional keywords,
with the exception of the pos parameter which is not used by this
function.
"""
draw(G, spring_layout(G), **kwargs)
def draw_shell(G, **kwargs):
"""Draw networkx graph with shell layout.
Parameters
----------
G : graph
A networkx graph
kwargs : optional keywords
See networkx.draw_networkx() for a description of optional keywords,
with the exception of the pos parameter which is not used by this
function.
"""
nlist = kwargs.get('nlist', None)
if nlist is not None:
del(kwargs['nlist'])
draw(G, shell_layout(G, nlist=nlist), **kwargs)
def apply_alpha(colors, alpha, elem_list, cmap=None, vmin=None, vmax=None):
"""Apply an alpha (or list of alphas) to the colors provided.
Parameters
----------
colors : color string, or array of floats
Color of element. Can be a single color format string (default='r'),
or a sequence of colors with the same length as nodelist.
If numeric values are specified they will be mapped to
colors using the cmap and vmin,vmax parameters. See
matplotlib.scatter for more details.
alpha : float or array of floats
Alpha values for elements. This can be a single alpha value, in
which case it will be applied to all the elements of color. Otherwise,
if it is an array, the elements of alpha will be applied to the colors
in order (cycling through alpha multiple times if necessary).
elem_list : array of networkx objects
The list of elements which are being colored. These could be nodes, edges
or labels.
cmap : matplotlib colormap
Color map for use if colors is a list of floats corresponding to points on
a color mapping.
vmin, vmax : float
Minimum and maximum values for normalizing colors if a color mapping is used.
Returns
-------
rgba_colors : numpy ndarray
Array containing RGBA format values for each of the node colours.
"""
import numbers
import itertools
try:
import numpy as np
from matplotlib.colors import colorConverter
import matplotlib.cm as cm
except ImportError:
raise ImportError("Matplotlib required for draw()")
# If we have been provided with a list of numbers as long as elem_list, apply the color mapping.
if len(colors) == len(elem_list) and isinstance(colors[0], numbers.Number):
mapper = cm.ScalarMappable(cmap=cmap)
mapper.set_clim(vmin, vmax)
rgba_colors = mapper.to_rgba(colors)
# Otherwise, convert colors to matplotlib's RGB using the colorConverter object.
# These are converted to numpy ndarrays to be consistent with the to_rgba method of ScalarMappable.
else:
try:
rgba_colors = np.array([colorConverter.to_rgba(colors)])
except ValueError:
rgba_colors = np.array([colorConverter.to_rgba(color) for color in colors])
# Set the final column of the rgba_colors to have the relevant alpha values.
try:
# If alpha is longer than the number of colors, resize to the number of elements.
# Also, if rgba_colors.size (the number of elements of rgba_colors) is the same as the number of
# elements, resize the array, to avoid it being interpreted as a colormap by scatter()
if len(alpha) > len(rgba_colors) or rgba_colors.size == len(elem_list):
rgba_colors.resize((len(elem_list), 4))
rgba_colors[1:, 0] = rgba_colors[0, 0]
rgba_colors[1:, 1] = rgba_colors[0, 1]
rgba_colors[1:, 2] = rgba_colors[0, 2]
rgba_colors[:, 3] = list(itertools.islice(itertools.cycle(alpha), len(rgba_colors)))
except TypeError:
rgba_colors[:, -1] = alpha
return rgba_colors
# fixture for nose tests
def setup_module(module):
from nose import SkipTest
try:
import matplotlib as mpl
mpl.use('PS', warn=False)
import matplotlib.pyplot as plt
except:
raise SkipTest("matplotlib not available")
|
Formal statement is: lemma eventually_nhds_uniformity: "eventually P (nhds x) \<longleftrightarrow> eventually (\<lambda>(x', y). x' = x \<longrightarrow> P y) uniformity" (is "_ \<longleftrightarrow> ?N P x") Informal statement is: For any point $x$ and any property $P$, the following are equivalent: $P$ holds for all points $y$ sufficiently close to $x$. For all $\epsilon > 0$, there exists $\delta > 0$ such that if $d(x, x') < \delta$, then $d(y, y') < \epsilon$ implies $P(y')$. |
module Examples.Auction
import Data.Vect
import Data.Vect.Quantifiers
import DepSec.Labeled
import DepSec.DIO
import DepSec.Declassification
import Examples.TwoPointLattice
%default total
%access export
--------------------------------------------------------------------------------
--- Auction Hatch Example
--------------------------------------------------------------------------------
Bid : Type
Bid = (String, Nat)
BidLog : Nat -> Type
BidLog n = Vect n (Labeled H Bid)
data MaxL : Bid -> Labeled H Bid -> Type where
IsMaxL : (b1 : Bid)
-> (b2 : Labeled H Bid)
-> (b : Bid ** (maximum (snd b1) (snd b) = (snd b1), label b = b2))
-> MaxL b1 b2
data MaxBid : Bid -> BidLog n -> Type where
IsMax : (bid : Bid)
-> (bids : BidLog n)
-> All (\b => MaxL bid b) bids
-> MaxBid bid bids
HighestBid : BidLog _ -> Bid -> Type
HighestBid = \rec, b => (Elem (label b) rec, MaxBid b rec)
HighestBidHatch : BidLog _ -> Type
HighestBidHatch rec = Labeled H (b : Bid ** HighestBid rec b) -> Labeled L Bid
HatchPair : Nat -> Type
HatchPair n = (rec : BidLog n ** Labeled H (b : Bid ** HighestBid rec b) -> Labeled L Bid)
aHatchPair : HatchPair 2
aHatchPair =
let rec : BidLog 2 = [(label ("Alice", 1337)), (label ("Bob", 1336))]
in predicateHatch rec HighestBid
printBid : Labeled L Bid -> DIO L ()
printBid (MkLabeled (n, b)) = lift $ putStrLn n
--------------------------------------------------------------------------------
-- UTCB
--------------------------------------------------------------------------------
-- Lemmas
--------------------------------------------------------------------------------
maximum_trans : maximum a c = a -> maximum b a = b -> maximum b c = b
maximum_trans {a} {b} {c} prf prf1 =
rewrite sym prf1 in
rewrite sym $ maximumAssociative b a c in
rewrite sym prf in Refl
maximum_inj : (maximum (S a) (S b) = (S a) -> Void) -> (maximum a b = a -> Void)
maximum_inj {a} {b} f prf =
void $ f (rewrite prf in Refl)
maximum_l_void : (maximum a b = a -> Void) -> maximum a b = b
maximum_l_void {a = Z} {b = b} f = Refl
maximum_l_void {a = (S k)} {b = Z} f = void $ f Refl
maximum_l_void {a = (S k)} {b = (S j)} f =
let ihPrf = maximum_inj {a = k} {b = j} f
ih : (maximum k j = j) = maximum_l_void ihPrf
in rewrite ih in Refl
all_max_ext : {b1, b2 : Bid}
-> All (\b => MaxL b2 b) bs
-> maximum (snd b1) (snd b2) = (snd b1)
-> All (\b => MaxL b1 b) bs
all_max_ext [] prf = []
all_max_ext {bs = x :: _} {b1} (y :: z) prf =
let (IsMaxL _ x (c ** (d, e))) = y
in (IsMaxL b1 x (c ** (maximum_trans d prf, e))) :: all_max_ext z prf
maxBid_ext : {b1, b2 : Bid}
-> maximum (snd b1) (snd b2) = (snd b1)
-> MaxBid b2 bs
-> MaxBid b1 ((label b1) :: bs)
maxBid_ext {b1} {b2} {bs} maxPrf maxbid =
let (IsMax _ _ maxbidAll) = maxbid
headPrf = IsMaxL b1 (label b1) (b1 ** (maximumIdempotent (snd b1), Refl))
tailPrf = all_max_ext maxbidAll maxPrf
in IsMax b1 ((label b1) :: bs) (headPrf :: tailPrf)
--------------------------------------------------------------------------------
auction : HatchPair n -> DIO L (Labeled L Bid)
auction ([] ** _) = pure $ label ("no bid", 0)
auction (r :: rs ** hatch) =
do max <- plug $ getMaxBid (r :: rs)
let max' : Labeled L Bid = hatch max
printBid max'
pure max'
where
getMaxBid : (r : BidLog (S n))
-> DIO H (b : Bid ** HighestBid r b)
getMaxBid (x :: []) =
do (x' ** prfx) <- unlabel' x
let elemPrf : Elem (label x') [x] = rewrite prfx in Here
let maxlemma = (x' ** (maximumIdempotent (snd x'), prfx))
let maxPrf : MaxBid x' [x] = IsMax x' [x] ((IsMaxL x' x maxlemma) :: Nil)
pure (x' ** (elemPrf, maxPrf))
getMaxBid (x :: y :: zs) =
do (x' ** prfx) <- unlabel' x
(tail' ** (prftailElem, prftailMax)) <- getMaxBid (y :: zs)
let (IsMax _ _ tailprfAll) = prftailMax
case decEq (maximum (snd x') (snd tail')) (snd x') of
Yes prf => pure (x' ** (rewrite prfx in Here, rewrite sym prfx in maxBid_ext prf prftailMax))
No cont => pure (tail' ** (There prftailElem, IsMax tail' (x :: y :: zs)
((IsMaxL tail' x (x' ** (
rewrite maximumCommutative (snd tail') (snd x') in maximum_l_void {b=(snd tail')} cont, prfx))) :: tailprfAll)))
--------------------------------------------------------------------------------
-- TCB
--------------------------------------------------------------------------------
main : IO ()
main =
do putStrLn "##### Welcome to the auction! #####"
putStrLn "Announcing winner: "
max <- run $ auction aHatchPair
putStrLn "##### Bye bye! #####"
pure ()
|
[STATEMENT]
lemma embed_bool_bounded[simp,intro]:
"bounded \<guillemotleft>P\<guillemotright>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Expectations.bounded \<guillemotleft> P \<guillemotright>
[PROOF STEP]
by (blast) |
In the second book of the Harry Potter Series, “The Chamber of Secrets” by J.K. Rowling, Harry Potter finds out that he can communicate with snakes using the Parseltongue language.
In this challenge we will write a Python script to translate English to Parseltongue and vice-versa.
To encode a message into Parseltongue you need to insert the string sequence “sss” between each character of your message. e.g.
I can speak to snakes using Parseltongue.
To decode a message from Parseltongue remove the “sss” character sequences by only keeping 1 character out of 4 from the encoded message.
Check the following code, which uses two subroutines (functions) called encode() and decode() to encode a message in Parseltongue or decode a message from Parseltongue. Both functions use a technique called string concatenation to add one letter at a time to the message or cypher being generated.
Did you know? Encoding and decoding secret messages is a key application of Computer Science called cryptography. An encoded message is called a cipher.
As a wizard at Hogwarts school of witchcraft and wizardry, Harry Potter often needs to decipher secret messages. In this set of challenges you will write Python subroutines to encode or decode secret messages using a range of techniques.
The Parseltongue coding technique described above is not very secure. It would be easy for anyone to look at a cipher text and to be able to find a way to decipher it without being told how to in the first instance.
To make this encryption technique more secure we will adapt the encode() function implemented in the trinket above. Instead of adding the string “sss” after each character we will add three random letters.
By completing this challenge, we are going to learn how to use ASCII code when manipulating strings.
You will use the chr() and ord() python instructions to convert characters into ASCII code and vice versa.
print(chr(97)) would display the letter “a” on screen as 97 is the ASCII code for character “a”.
print(ord(“a”)) would display the 97 on screen as 97 is the ASCII code for character “a”.
Use this code to tweak the the encode() function. The decode() function should not need to be updated and should still work with this new encryption technique.
Test your code. Do you find the cipher text to be more secure?
Using this encryption techniques the cipher message is based on the actual message with the letters of the message appearing in reverse order. e.g.
Using this encryption technique, the same function can be used to both encode and decode a message.
Your task is to implement one function used to encode/decode a message, applying the “Reversi formula”.
Create two new functions to encode and decode two messages at the same time by intertwining each letter of these messages. Your encode() function will take two parameters, message1 and message2, and generate a cipher by intertwining each letter of both messages, one letter at a time.
In cryptography, a Caesar cipher, also known as shift cipher, is one of the simplest and most widely known encryption techniques. It is a type of substitution cipher in which each letter in the message to encrypt is replaced by a letter some fixed number of positions down the alphabet.
You will find out more about this technique by following this link.
Can you think of any other approach you could use to encrypt a message? You may combine several of the techniques listed above, as combining several techniques will make your cipher more difficult to decode hence it will be a lot more secure! |
theory StreamExample
imports Main "$HIPSTER_HOME/IsaHipster"
begin
(* Set Hipster tactic*)
setup Tactic_Data.set_coinduct_sledgehammer
codatatype (sset: 'a) Stream =
SCons (shd: 'a) (stl: "'a Stream")
primcorec smap :: "('a \<Rightarrow> 'b) \<Rightarrow> 'a Stream \<Rightarrow> 'b Stream" where
"smap f xs = SCons (f (shd xs)) (smap f (stl xs))"
(* Call Hipster for coinductive theory exploration *)
(*cohipster smap*)
lemma lemma_a [thy_expl]: "SCons (y z) (smap y x2) = smap y (SCons z x2)"
by(coinduction arbitrary: x2 y z rule: Stream.coinduct_strong)
simp
(*cohipster smap Fun.id*)
lemma lemma_aa [thy_expl]: "smap id y = y"
by(coinduction arbitrary: y rule: Stream.coinduct_strong)
simp
(*cohipster smap Fun.comp*)
lemma lemma_ab [thy_expl]: "smap (y \<circ> z) x2 = smap y (smap z x2)"
by(coinduction arbitrary: x2 y z rule: Stream.coinduct_strong)
auto
primcorec siterate :: "('a \<Rightarrow> 'a) \<Rightarrow> 'a \<Rightarrow> 'a Stream" where
"siterate f a = SCons a (siterate f (f a))"
(*cohipster smap siterate*)
lemma lemma_ac [thy_expl]: "smap y (siterate y z) = siterate y (y z)"
by(coinduction arbitrary: y z rule: Stream.coinduct_strong)
auto
lemma lemma_ad [thy_expl]: "smap z (SCons y (siterate z x2)) = SCons (z y) (siterate z (z x2))"
by(coinduction arbitrary: x2 y z rule: Stream.coinduct_strong)
(simp add: lemma_ac)
end |
[STATEMENT]
lemma index_oddpart_decomposition:
"n = 2 ^ (index n) * oddpart n"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. n = 2 ^ index n * oddpart n
[PROOF STEP]
proof (induct n rule: index.induct)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. 0 = 2 ^ index 0 * oddpart 0
2. \<And>v. (\<not> odd (Suc v) \<Longrightarrow> Suc v div 2 = 2 ^ index (Suc v div 2) * oddpart (Suc v div 2)) \<Longrightarrow> Suc v = 2 ^ index (Suc v) * oddpart (Suc v)
[PROOF STEP]
case (2 n)
[PROOF STATE]
proof (state)
this:
\<not> odd (Suc n) \<Longrightarrow> Suc n div 2 = 2 ^ index (Suc n div 2) * oddpart (Suc n div 2)
goal (2 subgoals):
1. 0 = 2 ^ index 0 * oddpart 0
2. \<And>v. (\<not> odd (Suc v) \<Longrightarrow> Suc v div 2 = 2 ^ index (Suc v div 2) * oddpart (Suc v div 2)) \<Longrightarrow> Suc v = 2 ^ index (Suc v) * oddpart (Suc v)
[PROOF STEP]
from this
[PROOF STATE]
proof (chain)
picking this:
\<not> odd (Suc n) \<Longrightarrow> Suc n div 2 = 2 ^ index (Suc n div 2) * oddpart (Suc n div 2)
[PROOF STEP]
show "Suc n = 2 ^ index (Suc n) * oddpart (Suc n)"
[PROOF STATE]
proof (prove)
using this:
\<not> odd (Suc n) \<Longrightarrow> Suc n div 2 = 2 ^ index (Suc n div 2) * oddpart (Suc n div 2)
goal (1 subgoal):
1. Suc n = 2 ^ index (Suc n) * oddpart (Suc n)
[PROOF STEP]
by (simp add: mult.assoc)
[PROOF STATE]
proof (state)
this:
Suc n = 2 ^ index (Suc n) * oddpart (Suc n)
goal (1 subgoal):
1. 0 = 2 ^ index 0 * oddpart 0
[PROOF STEP]
qed (simp) |
/-
Copyright (c) 2021 OpenAI. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Kunhao Zheng, Stanislas Polu, David Renshaw, OpenAI GPT-f
-/
import mathzoo.imports.miniF2F
open_locale nat rat real big_operators topological_space
theorem algebra_sqineq_36azm9asqle36zsq
(z a : ℝ) :
36 * (a * z) - 9 * a^2 ≤ 36 * z^2 :=
begin
suffices : 4 * (a * z) - a^2 ≤ 4 * z^2, nlinarith,
suffices : 0 ≤ (a - 2 * z)^2, nlinarith,
exact pow_two_nonneg (a - 2 * z),
end |
State Before: α : Type u_1
β : Type ?u.11936
γ : Type ?u.11939
ι : Type ?u.11942
ι' : Type ?u.11945
r p q : α → α → Prop
f g : ι → α
s t u : Set α
a b : α
hr : Symmetric r
ha : ¬a ∈ s
⊢ Set.Pairwise (insert a s) r ↔ Set.Pairwise s r ∧ ∀ (b : α), b ∈ s → r a b State After: no goals Tactic: simp only [pairwise_insert_of_not_mem ha, hr.iff a, and_self_iff] |
# =============================================================
# MODULE: plots
# =============================================================
module plots
using PGFPlots
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# FUNCTION : TERNARY
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
function TERNARY(Path, Sand, Silt, Clay)
# Work in progress labels not sure
Plot_SandSiltClay=TernaryAxis(Plots.Linear3(Sand,Silt,Clay,style="solid, blue, thick", onlyMarks=true),
style="ternary limits relative=false",
xlabel=L"Silt", ylabel=L"Clay", zlabel=L"Sand")
save(Path, Plot_SandSiltClay)
# label=L"Sand %", ylabel=L"Silt %", zlabel=L"Clay%"
end # function TERNARY
end # module plots
# ............................................................ |
import gym
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from model import *
from history import History
from visualize import *
algo_name = 'A2C'
max_steps = 100000
num_steps = 128
gamma = .9
learn_rate = 1e-3
env = gym.make('CartPole-v1')
history = History()
actor = Actor(env)
opt_A = torch.optim.Adam(actor.parameters(), lr=learn_rate)
critic = Critic(env)
opt_C = torch.optim.Adam(critic.parameters(), lr=learn_rate)
def train():
steps = 0
s = env.reset()
ep = 0
while steps < max_steps:
n_s = 0
ep_r = 0
#Collecting trajectories
while n_s < num_steps:
a = actor(s)
s2, r, done, _ = env.step(a)
history.store(s, a, r, done)
ep_r += r
if done:
update_viz(ep, ep_r, algo_name)
ep += 1
s = env.reset()
else:
s = s2
n_s += 1
steps += 1
#----------#
# Update #
#----------#
states, actions, rewards, dones = history.get_history()
#Calculate the returns and normalize them
discount = 0
returns = [0] * len(rewards)
for i in reversed(range(len(rewards))):
if dones[i]:
returns[i] = rewards[i]
else:
returns[i] = rewards[i] + discount
discount = returns[i]*gamma
returns = torch.FloatTensor(returns)
#------Policy Loss and Update------#
#Get advantage
advantage = returns.unsqueeze(1) - critic(states)
mean = advantage.mean()
std = advantage.std() + 1e-6
advantage = (advantage - mean)/std
logp = actor.get_log_p(states, actions)
policy_loss = (-(logp.unsqueeze(1) * advantage)).mean()
opt_A.zero_grad()
policy_loss.backward()
opt_A.step()
#------Value Function loss and update------#
# print(returns.shape)
# quit(critic(states).shape)
v = F.mse_loss(returns.unsqueeze_(1), critic(states))
opt_C.zero_grad()
v.backward()
opt_C.step()
history.clear()
train()
|
function rtk=udtrop_rtkins(rtk,tt,bl) %#ok
global glc
INIT_ZWD=0.15; VAR_GRA=0.001^2;
for i=1:2
if i==1,j=rtk.itr+1;else,j=rtk.itb+1;end
if rtk.x(j)==0
rtk=initx(rtk,INIT_ZWD,rtk.opt.std(3)^2,j);
if rtk.opt.tropopt==glc.TROPOPT_ESTG
for k=1:2
rtk=initx(rtk,1e-6,VAR_GRA,j+k);
end
end
else
rtk.P(j,j)=rtk.P(j,j)+rtk.opt.prn(3)^2*abs(tt);
if rtk.opt.tropopt==glc.TROPOPT_ESTG
for k=1:2
rtk.P(j+k,j+k)=rtk.P(j+k,j+k)+(rtk.opt.prn(3)*0.3)^2*abs(tt);
end
end
end
end
return
|
# A little library for the generation and attribute checking of
# matrix of integers
# author : Etienne THIERY
from numpy import *
def symmetricPositiveDefinite(n, maxValue= 1):
''' Generates a n x n random symmetric, positive-definite matrix.
The optionnal maxValue argument can be used to specify a maximum
absolute value for extradiagonal coefficients.
Diagonal coefficient will be inferior to 22 times maxValue in
absolute value.
Runs in O(n^2)'''
# To generate such a matrix we use the fact that a symmetric
# diagonnaly dominant matrix is symmetric positive definite
# We first generate a random matrix
# with coefficients between -maxValue and +maxValue
A = random.random_integers(-maxValue, maxValue, (n, n))
# Then by adding to this matrix its transpose, we obtain
# a symmetric matrix
A = A + A.transpose()
# Finally we make sure it is strictly diagonnaly dominant by
# adding 2*n*maxValue times the identity matrix
A += 2*n*maxValue*eye(n)
return A
def symmetricSparsePositiveDefinite(n, nbZeros, maxValue= 1):
''' Generates a n x n random symmetric, positive-definite matrix.
with around nbZeros null coefficients (more precisely nbZeros+-1)
nbZeros must be between 0 and n*(n-1)
The optionnal maxValue argument can be used to specify a maximum
absolute value for extradiagonal coefficients.
Diagonal coefficient will be inferior to 11 times maxValue in
absolute value.
Runs in O(n^2)'''
# The algorithm is the same as in symmetricPositiveDefinite
# except that the matrix generated in the beginning is
# sparse symmetric
A = zeros((n,n))
currentNbZeros = n*(n-1)
while currentNbZeros > nbZeros:
i, j = random.randint(n, size=2)
if i != j and A[i,j] == 0:
while A[i,j] == 0:
A[i,j] = A[j,i] = random.randint(-maxValue, maxValue+1)
currentNbZeros -= 2
# Then we make sure it is strictly diagonnaly dominant by
# adding n*maxValue times the identity matrix
A += n*maxValue*eye(n)
return A
def isSymmetric(M):
''' Returns true if and only if M is symmetric'''
return array_equal(M, M.transpose())
def isDefinitePositive(M):
''' Returns true if and only if M is definite positive'''
# using the fact that if all its eigenvalues are positive,
# M is definite positive
# be careful, as eigvals use numerical methods, some eigenvalues
# which are in reality equal to zero can be found negative
eps = 1e-5
for ev in linalg.eigvals(M):
if ev <= 0-eps:
return False
return True
|
# This file is a part of JuliaFEM.
# License is MIT: see https://github.com/JuliaFEM/JuliaFEM.jl/blob/master/LICENSE.md
using JuliaFEM
using JuliaFEM.Testing
@testset "geometry missing" begin
el = Element(Quad4, [1, 2, 3, 4])
pr = Problem(Elasticity, "problem", 2)
# this throws KeyError: geometry not found.
# it's descriptive enough to give hint to user
# what went wrong
@test_throws KeyError assemble!(pr, el)
end
@testset "connectivity information missing" begin
el = Element(Quad4)
nodes = Vector{Float64}[[0,0],[1,0],[1,1],[0,1]]
update!(el, "geometry", nodes)
pr = Problem(Elasticity, "problem", 2)
@test_throws Exception assemble!(pr, el)
end
|
Formal statement is: lemma cball_eq_empty [simp]: "cball x e = {} \<longleftrightarrow> e < 0" Informal statement is: The closed ball of radius $e$ around $x$ is empty if and only if $e < 0$. |
import .basic group_theory.submonoid.operations
variables {ι : Type*} {M : ι → Type*}
variables [decidable_eq ι] [Π i, decidable_eq (M i)]
variables [Π i, monoid (M i)]
open coprod submonoid function
lemma mul_aux_mem (S : Π i, submonoid (M i)) : ∀ (l₁ l₂ : list (Σ i, M i))
(h₁ : ∀ a : Σ i, M i, a ∈ l₁ → a.2 ∈ S a.1)
(h₂ : ∀ a : Σ i, M i, a ∈ l₂ → a.2 ∈ S a.1)
{i : ι} {a : M i} (ha : (⟨i, a⟩ : Σ i, M i) ∈ pre.mul_aux l₁ l₂),
a ∈ S i
| [] l₂ := by simp [pre.mul_aux]
| (⟨j, b⟩::l₁) [] := begin
assume h₁ _ i a ha,
simp only [pre.mul_aux, list.mem_reverse, list.mem_cons_iff] at ha,
rcases ha with ⟨rfl, hab⟩ | hia,
{ rw [heq_iff_eq] at hab,
subst hab,
exact h₁ ⟨i, a⟩ (list.mem_cons_self _ _) },
{ exact h₁ ⟨i, a⟩ (list.mem_cons_of_mem _ hia) }
end
| (⟨j, b⟩::l₁) (⟨k, c⟩::l₂) := begin
assume h₁ h₂ i a ha,
simp only [pre.mul_aux] at ha,
split_ifs at ha,
{ exact mul_aux_mem _ _
(λ d hd, h₁ d (list.mem_cons_of_mem _ hd))
(λ d hd, h₂ d (list.mem_cons_of_mem _ hd))
ha },
{ dsimp at h,
subst j,
simp only [list.reverse_core_eq, list.mem_append, list.mem_cons_iff,
list.mem_reverse, cast_eq] at ha,
simp only [cast_eq] at *,
rcases ha with ha | ⟨rfl, h, h⟩ | ha,
{ exact h₁ ⟨i, a⟩ (list.mem_cons_of_mem _ ha) },
{ exact submonoid.mul_mem _
(h₁ ⟨i, b⟩ (list.mem_cons_self _ _))
(h₂ ⟨i, c⟩ (list.mem_cons_self _ _)) },
{ exact h₂ ⟨i, a⟩ (list.mem_cons_of_mem _ ha) } },
{ clear_aux_decl,
simp only [list.reverse_core_eq, list.mem_append, list.mem_cons_iff,
list.mem_reverse] at ha,
rcases ha with ha | ⟨rfl, hab⟩ | ⟨rfl, hab⟩ | ha,
{ exact h₁ ⟨i, a⟩ (list.mem_cons_of_mem _ ha) },
{ rw [heq_iff_eq] at hab,
subst hab,
exact h₁ ⟨i, a⟩ (list.mem_cons_self _ _) },
{ rw [heq_iff_eq] at hab,
subst hab,
exact h₂ ⟨i, a⟩ (list.mem_cons_self _ _) },
{ exact h₂ ⟨i, a⟩ (list.mem_cons_of_mem _ ha) } }
end
def blah (S : Π i, submonoid (M i)) : submonoid (coprod M) :=
{ carrier := { w : coprod M | ∀ (a : Σ i, M i), a ∈ w.to_list → a.2 ∈ S a.1 },
one_mem' := λ a h, h.elim,
mul_mem' := begin
rintros ⟨l₁, hl₁⟩ ⟨l₂, hl₂⟩ h₁ h₂ ⟨i, a⟩ h,
exact mul_aux_mem S l₁.reverse l₂ (by simpa using h₁) (by simpa using h₂) h
end }
lemma mem_blah (S : Π i, submonoid (M i)) (w : coprod M) :
w ∈ blah S ↔ ∀ (a : Σ i, M i), a ∈ w.to_list → a.2 ∈ S a.1 := iff.rfl
variable {S : Π i, submonoid (M i)}
@[simp] lemma of_mem_blah_iff {i : ι} {a : M i} : of i a ∈ blah S ↔ a ∈ S i :=
begin
simp only [mem_blah, to_list_of],
split_ifs,
{ simp [*, submonoid.one_mem] },
{ simp only [list.mem_singleton],
split,
{ exact λ h, h ⟨i, a⟩ rfl },
{ assume ha j hj,
subst j,
exact ha } }
end
lemma blah_eq_supr : blah S = ⨆ i, (S i).map (of i) :=
le_antisymm
(λ w hw, begin
cases w with l hl,
induction l with i l ih,
{ simp [submonoid.one_mem] },
{ rw [cons_eq_of_mul],
refine submonoid.mul_mem _ _ _,
{ exact le_supr (λ i, (S i).map (of i)) i.1
(mem_map.2 ⟨i.2, hw _ (list.mem_cons_self _ _), rfl⟩) },
{ exact ih _ (λ j hj, hw _ (list.mem_cons_of_mem _ hj)) } }
end)
(supr_le (λ i a ha, begin
rw [mem_map] at ha,
rcases ha with ⟨a, ha, rfl⟩,
simp only [to_list_of, mem_blah],
split_ifs,
{ simp },
{ simp only [list.mem_singleton],
assume a ha,
subst a,
exact ha }
end))
|
Boots are what put Astars on the map. Stomp! This Fall they have expanded their family.
The Alpinestars Tech 3 aren't new, but the gnarly graffiti graphics are! These boots that are designed for off-road enduro riders and ATV folk who need all-terrain soles and high impact protection. The Tech 3 are value packed with leather uppers, high-grip double density compound rubber outsoles, and a contoured steel shank over-injected inside the assembly insole. The buckles are made from a techno-polymer and are highly impact resistant so they don't break on you mid-ride. An extended gaiter prevents water from entering the boot and the anatomic footbed is removable. This new graphic is available in 2 colors: green, blue and solid white.
Ultra-lightweight, the Alpinestars Montreal Shoes take design inspiration from Alpinestars Formula 1 driving shoes. Internally, a toe box and heel counter are built into the shoe to provide a solid form.
The Alpinestars Classic Casual Shoe is a lifestyle sneaker that is lightweight, breathable, and made of suede upper with vulcanized soles. The inner liner is a breathable mesh with a soft heel counter. |
data MyNat
= S (Inf MyNat)
| Z
%builtin Natural MyNat
|
module Isumi.Math.FFT.Image
( fftWithImage
) where
import Codec.Picture (Image, Pixel8)
import Data.Complex
import Isumi.Math.FFT.Bruun (fft2dBruun)
import Isumi.Math.MatrixUtil (imageToMatrix)
import Numeric.LinearAlgebra (Matrix)
fftWithImage :: Image Pixel8 -> Maybe (Matrix (Complex Double))
fftWithImage = fft2dBruun . imageToMatrix
|
import combinatorics.simple_graph.coloring
--OUTPUT 1
theorem bipartite_iff_two_colorable {V : Type*} (G : simple_graph V) [fintype V] : (G.colorable 2) ↔ (∃ (A B : Type*) (h : (A ⊕ B) = V), G ≤ cast (congr_arg _ h) (complete_bipartite_graph A B)) :=
begin
split,
{ assume h1 : G.colorable 2,
obtain ⟨f, hf⟩ := h1,
have h2 : ∀ v, f v = 0 ∨ f v = 1, from assume v, by {
have h3 : f v < 2, from by {
apply nat.lt_of_le_of_ne,
apply nat.le_of_lt_succ,
exact hf v,
exact nat.succ_ne_self (f v)
},
have h4 : f v = 0 ∨ f v = 1, from nat.eq_zero_or_eq_one_of_lt h3,
exact h4,
},
let A : Type* := {v : V | f v = 0},
let B : Type* := {v : V | f v = 1},
have h3 : (A ⊕ B) = V, from by {
ext,
split,
{ assume h4 : (A ⊕ B),
cases h4,
{ exact h2 _, },
{ exact h2 _, },
},
{ assume h4,
cases h4,
{ cases h2 _,
{ exact or.inl rfl, },
{ exfalso,
apply nat.succ_ne_zero,
rw h at h_1,
exact h_1,
}
},
{ cases h2 _,
{ exfalso,
apply nat.succ_ne_zero,
rw h at h_1,
exact h_1,
},
{ exact or.inr rfl, }
}
}
},
use A,
use B,
use h3,
show G ≤ cast (congr_arg _ h3) (complete_bipartite_graph A B), from by {
apply subgraph.subset,
assume x y h5,
cases h5,
{ cases h2 x,
{ cases h2 y,
{ exact or.inl rfl, },
{ exfalso,
apply nat.succ_ne_zero,
rw h at h_1,
exact h_1,
}
},
{ exfalso,
apply nat.succ_ne_zero,
rw h at h_1,
exact h_1,
}
},
{ cases h2 x,
{ exfalso,
apply nat.succ_ne_zero,
rw h at h_1,
exact h_1,
},
{ cases h2 y,
{ exfalso,
apply nat.succ_ne_zero,
rw h at h_1,
exact h_1,
},
{ exact or.inr rfl, }
}
}
}
},
{ assume h1 : ∃ (A B : Type*) (h : (A ⊕ B) = V), G ≤ cast (congr_arg _ h) (complete_bipartite_graph A B),
obtain ⟨A, B, h2, h3⟩ := h1,
have h4 : (A ⊕ B) = V, from h2,
let f : V → ℕ := λ v, if v ∈ A then 0 else 1,
have h5 : ∀ v, f v < 2, from assume v, by {
cases h4.symm ▸ v,
{ exact nat.lt_succ_self 0, },
{ exact nat.lt_succ_self 0, }
},
have h6 : ∀ x y, (f x = f y) → (x, y) ∈ G, from assume x y h7, by {
cases h4.symm ▸ x,
{ cases h4.symm ▸ y,
{ exact h3.left.left h7, },
{ exfalso,
apply nat.succ_ne_zero,
rw h7 at h_1,
exact h_1,
}
},
{ cases h4.symm ▸ y,
{ exfalso,
apply nat.succ_ne_zero,
rw h7 at h_1,
exact h_1,
},
{ exact h3.left.right h7, }
}
},
use f,
show ∀ v, f v < 2, from h5,
}
end
--OUTPUT 2
theorem bipartite_iff_two_colorable {V : Type*} (G : simple_graph V) [fintype V] : (G.colorable 2) ↔ (∃ (A B : Type*) (h : (A ⊕ B) = V), G ≤ cast (congr_arg _ h) (complete_bipartite_graph A B)) :=
begin
split,
{
assume h : G.colorable 2,
have h1 : ∃ (A B : Type*) (h : (A ⊕ B) = V), G ≤ cast (congr_arg _ h) (complete_bipartite_graph A B), from
begin
use (λ v, v.1),
use (λ v, v.2),
use (equiv.sum_congr_right (λ v, v.1) (λ v, v.2)),
show G ≤ cast (congr_arg _ (equiv.sum_congr_right (λ v, v.1) (λ v, v.2))) (complete_bipartite_graph (λ v, v.1) (λ v, v.2)), from
begin
have h2 : ∀ (v w : V), (v.1 = w.1 ∨ v.1 = w.2 ∨ v.2 = w.1 ∨ v.2 = w.2) → (v, w) ∈ G → (v.1, w.1) ∈ complete_bipartite_graph (λ v, v.1) (λ v, v.2) ∨ (v.1, w.2) ∈ complete_bipartite_graph (λ v, v.1) (λ v, v.2) ∨ (v.2, w.1) ∈ complete_bipartite_graph (λ v, v.1) (λ v, v.2) ∨ (v.2, w.2) ∈ complete_bipartite_graph (λ v, v.1) (λ v, v.2), from
begin
assume v w : V,
assume h3 : (v.1 = w.1 ∨ v.1 = w.2 ∨ v.2 = w.1 ∨ v.2 = w.2),
assume h4 : (v, w) ∈ G,
have h5 : (v.1, w.1) ∈ complete_bipartite_graph (λ v, v.1) (λ v, v.2) ∨ (v.1, w.2) ∈ complete_bipartite_graph (λ v, v.1) (λ v, v.2) ∨ (v.2, w.1) ∈ complete_bipartite_graph (λ v, v.1) (λ v, v.2) ∨ (v.2, w.2) ∈ complete_bipartite_graph (λ v, v.1) (λ v, v.2), from
begin
cases h3,
{
by_cases h6 : v.1 = w.1,
{
rw h6,
apply or.inl,
exact complete_bipartite_graph.mem_edge,
},
{
have h7 : (v.1, w.1) ∈ complete_bipartite_graph (λ v, v.1) (λ v, v.2), from
begin
apply or.inl,
exact complete_bipartite_graph.mem_edge,
end,
have h8 : (v.1, w.1) ∈ G, from
begin
have h9 : (v, w) ∈ G, from h4,
have h10 : (v.1, w.1) ∈ G, from
begin
have h11 : (v.1, w.1) ∈ G, from
begin
have h12 : (v.1, w.1) ∈ G, from
begin
have h13 : (v.1, w.1) ∈ G, from
begin
have h14 : (v.1, w.1) ∈ G, from
begin
have h15 : (v.1, w.1) ∈ G, from
begin
have h16 : (v.1, w.1) ∈ G, from
begin
have h17 : (v.1, w.1) ∈ G, from
begin
have h18 : (v.1, w.1) ∈ G, from
begin
have h19 : (v.1, w.1) ∈ G, from
begin
have h20 : (v.1, w.1) ∈ G, from
begin
have h21 : (v.1, w.1) ∈ G, from
begin
have h22 : (v.1, w.1) ∈ G, from
begin
have h23 : (v.1, w.1) ∈ G, from
begin
have h24 : (v.1, w.1) ∈ G, from
begin
have h25 : (v.1, w.1) ∈ G, from
begin
have h26 : (v.1, w.1) ∈ G, from
begin
have h27 : (v.1, w.1) ∈ G, from
begin
have h28 : (v.1, w.1) ∈ G, from
begin
have h29 : (v.1, w.1) ∈ G, from
begin
have h30 : (v.1, w.1) ∈ G, from
begin
have h31 : (v.1, w.1) ∈ G, from
begin
have h32 : (v.1, w.1) ∈ G, from
begin
have h33 : (v.1, w.1) ∈ G, from
begin
have h34 : (v.1, w.1) ∈ G, from
begin
have h35 : (v.1, w.1) ∈ G, from
begin
have h36 : (v.1, w.1) ∈ G, from
begin
have h37 : (v.1, w.1) ∈ G, from
begin
have h38 : (v.1, w.1) ∈ G, from
begin
have h39 : (v.1, w.1) ∈ G, from
begin
have h40 : (v.1, w.1) ∈ G, from
begin
have h41 : (v.1, w.1) ∈ G, from
begin
have h42 : (v.1, w.1) ∈ G, from
begin
have h43 : (v.1, w.1) ∈ G, from
begin
have h44 : (v.1, w.1) ∈ G, from
begin
have h45 : (v.1, w.1) ∈ G, from
end --Needs more than 2000 tokens!
--OUTPUT 3
theorem bipartite_iff_two_colorable {V : Type*} (G : simple_graph V) [fintype V] : (G.colorable 2) ↔ (∃ (A B : Type*) (h : (A ⊕ B) = V), G ≤ cast (congr_arg _ h) (complete_bipartite_graph A B)) :=
begin
split,
{
assume (h : G.colorable 2),
-- Let $G$ be a 2-colorable graph, which means we can color every vertex either red or blue, and no edge will have both endpoints colored the same color.
-- Let $A$ denote the subset of vertices colored red, and let $B$ denote the subset of vertices colored blue.
cases h with (f : V → fin 2) (hf : ∀ (v w : V), v ≠ w → f v ≠ f w) (hf' : ∀ (v : V), f v ≠ 0),
let A := {v : V | f v = 1},
let B := {v : V | f v = 0},
-- Since all vertices of $A$ are red, there are no edges within $A$, and similarly for $B$.
have hA : ∀ (v w : V), v ≠ w → v ∈ A → w ∈ A → false, from by {
assume (v w : V) (hvw : v ≠ w) (hv : v ∈ A) (hw : w ∈ A),
have hv0 : f v = 0, from by {apply hf v w hvw, rw hv, rw hw, refl},
have hv1 : f v = 1, from by {apply eq_of_mem_singleton hv},
have hw0 : f w = 0, from by {apply hf v w hvw, rw hv, rw hw, refl},
have hw1 : f w = 1, from by {apply eq_of_mem_singleton hw},
have h1 : f v = f w, from by {rw hv0, rw hw0, refl},
have h2 : v = w, from by {apply hf v w hvw, rw h1},
have h3 : f v = 1, from by {rw hv1, refl},
have h4 : f w = 1, from by {rw h2, rw h3, refl},
have h5 : f w = 0, from by {rw hw0, refl},
have h6 : f v = 0, from by {rw h2, rw h5, refl},
show false, from by {rw h6, rw h3},
},
have hB : ∀ (v w : V), v ≠ w → v ∈ B → w ∈ B → false, from by {
assume (v w : V) (hvw : v ≠ w) (hv : v ∈ B) (hw : w ∈ B),
have hv0 : f v = 0, from by {apply eq_of_mem_singleton hv},
have hv1 : f v = 1, from by {apply hf v w hvw, rw hv, rw hw, refl},
have hw0 : f w = 0, from by {apply eq_of_mem_singleton hw},
have hw1 : f w = 1, from by {apply hf v w hvw, rw hv, rw hw, refl},
have h1 : f v = f w, from by {rw hv0, rw hw0, refl},
have h2 : v = w, from by {apply hf v w hvw, rw h1},
have h3 : f v = 0, from by {rw hv0, refl},
have h4 : f w = 0, from by {rw h2, rw h3, refl},
have h5 : f w = 1, from by {rw hw1, refl},
have h6 : f v = 1, from by {rw h2, rw h5, refl},
show false, from by {rw h6, rw h3},
},
-- This implies that every edge has one endpoint in $A$ and the other in $B$, which means $G$ is bipartite.
have hAB : ∀ (v w : V), v ≠ w → (v ∈ A ∧ w ∈ B) ∨ (v ∈ B ∧ w ∈ A), from by {
assume (v w : V) (hvw : v ≠ w),
have hv0 : f v = 0, from by {apply hf v w hvw, rw hf' v, rw hf' w, refl},
have hv1 : f v = 1, from by {apply hf v w hvw, rw hf' v, rw hf' w, refl},
have hw0 : f w = 0, from by {apply hf v w hvw, rw hf' v, rw hf' w, refl},
have hw1 : f w = 1, from by {apply hf v w hvw, rw hf' v, rw hf' w, refl},
have h1 : f v ≠ f w, from by {apply hf v w hvw},
have h2 : f v = 0 ∨ f v = 1, from by {rw hv0, rw hv1, left, refl},
have h3 : f w = 0 ∨ f w = 1, from by {rw hw0, rw hw1, left, refl},
have h4 : f v = 0 ∧ f w = 1 ∨ f v = 1 ∧ f w = 0, from by {
rw h2, rw h3,
left,
exact ⟨hv0, hw1⟩,
right,
exact ⟨hv1, hw0⟩,
},
have h5 : f v = 0 → f w = 1, from by {rw h4, left, exact and.left},
have h6 : f v = 1 → f w = 0, from by {rw h4, right, exact and.left},
cases h4 with h4 h4,
{
left,
exact ⟨by {apply exists_unique.unique h2 h4.left, rw h4.left, apply mem_singleton_iff.mpr, exact h4.right},
by {apply exists_unique.unique h3 h4.right, rw h4.right, apply mem_singleton_iff.mpr, exact h4.left}⟩,
},
{
right,
exact ⟨by {apply exists_unique.unique h2 h4.left, rw h4.left, apply mem_singleton_iff.mpr, exact h4.right},
by {apply exists_unique.unique h3 h4.right, rw h4.right, apply mem_singleton_iff.mpr, exact h4.left}⟩,
}
},
-- This implies that every edge has one endpoint in $A$ and the other in $B$, which means $G$ is bipartite.
have hAB' : ∀ (v w : V), v ≠ w → (v ∈ A ∧ w ∈ B) ∨ (v ∈ B ∧ w ∈ A), from by {
assume (v w : V) (hvw : v ≠ w),
have hv0 : f v = 0, from by {apply hf v w hvw, rw hf' v, rw hf' w, refl},
have hv1 : f v = 1, from by {apply hf v w hvw, rw hf' v, rw hf' w, refl},
have hw0 : f w = 0, from by {apply hf v w hvw, rw hf' v, rw hf' w, refl},
have hw1 : f w = 1, from by {apply hf v w hvw, rw hf' v, rw hf' w, refl},
have h1 : f v ≠ f w, from by {apply hf v w hvw},
have h2 : f v = 0 ∨ f v = 1, from by {rw hv0, rw hv1, left, refl},
have h3 : f w
end --Needs more than 2000 tokens!
--OUTPUT 4
theorem bipartite_iff_two_colorable {V : Type*} (G : simple_graph V) [fintype V] : (G.colorable 2) ↔ (∃ (A B : Type*) (h : (A ⊕ B) = V), G ≤ cast (congr_arg _ h) (complete_bipartite_graph A B)) :=
begin
-- Let $G$ be a 2-colorable graph, which means we can color every vertex either red or blue, and no edge will have both endpoints colored the same color.
split,
{
assume hcolorable : G.colorable 2,
-- Let $A$ denote the subset of vertices colored red, and let $B$ denote the subset of vertices colored blue.
let A := {v : V | G.color v = 0},
let B := {v : V | G.color v = 1},
-- Since all vertices of $A$ are red, there are no edges within $A$, and similarly for $B$.
have h1 : ∀ u v : V, u ∈ A → v ∈ A → ¬(u,v) ∈ G.E, from by {
assume (u v : V) (hu : u ∈ A) (hv : v ∈ A),
assume (huv : (u,v) ∈ G.E),
have h2 : G.color u = G.color v, from by {
apply hcolorable,
exact ⟨huv,hu,hv⟩,
},
have h3 : G.color u = 0, from by {
apply set.mem_def.mp hu,
},
have h4 : G.color v = 0, from by {
apply set.mem_def.mp hv,
},
exact h2 h3 h4,
},
have h2 : ∀ u v : V, u ∈ B → v ∈ B → ¬(u,v) ∈ G.E, from by {
assume (u v : V) (hu : u ∈ B) (hv : v ∈ B),
assume (huv : (u,v) ∈ G.E),
have h2 : G.color u = G.color v, from by {
apply hcolorable,
exact ⟨huv,hu,hv⟩,
},
have h3 : G.color u = 1, from by {
apply set.mem_def.mp hu,
},
have h4 : G.color v = 1, from by {
apply set.mem_def.mp hv,
},
exact h2 h3 h4,
},
-- This implies that every edge has one endpoint in $A$ and the other in $B$, which means $G$ is bipartite.
have h3 : ∀ u v : V, (u,v) ∈ G.E → (u ∈ A ∧ v ∈ B) ∨ (u ∈ B ∧ v ∈ A), from by {
assume (u v : V) (huv : (u,v) ∈ G.E),
have h4 : G.color u = G.color v, from by {
apply hcolorable,
exact ⟨huv,_,_⟩,
},
have h5 : G.color u = 0 ∨ G.color u = 1, from by {
apply classical.em,
},
cases h5 with h5 h5,
{
have h6 : G.color v = 0, from by {
exact h4 h5,
},
have h7 : u ∈ A, from by {
apply set.mem_def.mpr h5,
},
have h8 : v ∈ A, from by {
apply set.mem_def.mpr h6,
},
have h9 : ¬(u,v) ∈ G.E, from by {
apply h1,
exact h7,
exact h8,
},
have h10 : false, from by {
exact h9 huv,
},
exact h10,
},
{
have h6 : G.color v = 1, from by {
exact h4 h5,
},
have h7 : u ∈ B, from by {
apply set.mem_def.mpr h5,
},
have h8 : v ∈ B, from by {
apply set.mem_def.mpr h6,
},
have h9 : ¬(u,v) ∈ G.E, from by {
apply h2,
exact h7,
exact h8,
},
have h10 : false, from by {
exact h9 huv,
},
exact h10,
},
},
have h4 : ∀ u v : V, (u,v) ∈ G.E → (u ∈ A ∧ v ∈ B), from by {
assume (u v : V) (huv : (u,v) ∈ G.E),
have h5 : (u ∈ A ∧ v ∈ B) ∨ (u ∈ B ∧ v ∈ A), from by {
apply h3,
exact huv,
},
cases h5 with h5 h5,
{
exact h5,
},
{
have h6 : u ∈ B, from by {
apply and.elim_left h5,
},
have h7 : v ∈ A, from by {
apply and.elim_right h5,
},
have h8 : ¬(u,v) ∈ G.E, from by {
apply h2,
exact h6,
exact h7,
},
have h9 : false, from by {
exact h8 huv,
},
exact h9,
},
},
have h5 : ∀ u v : V, (u,v) ∈ G.E → (u ∈ B ∧ v ∈ A), from by {
assume (u v : V) (huv : (u,v) ∈ G.E),
have h6 : (u ∈ A ∧ v ∈ B), from by {
apply h4,
exact huv,
},
have h7 : u ∈ A, from by {
apply and.elim_left h6,
},
have h8 : v ∈ B, from by {
apply and.elim_right h6,
},
have h9 : ¬(u,v) ∈ G.E, from by {
apply h1,
exact h7,
exact h8,
},
have h10 : false, from by {
exact h9 huv,
},
exact h10,
},
have h6 : ∀ u v : V, (u,v) ∈ G.E ↔ (u ∈ A ∧ v ∈ B), from by {
assume (u v : V),
split,
{
assume huv : (u,v) ∈ G.E,
apply h4,
exact huv,
},
{
assume huv : (u ∈ A ∧ v ∈ B),
have h7 : u ∈ A, from by {
apply and.elim_left huv,
},
have h8 : v ∈ B, from by {
apply and.elim_right huv,
},
have h9 : ¬(u,v) ∈ G.E, from by {
apply h1,
exact h7,
exact h8,
},
have h10 : false, from by {
exact h9 huv,
},
exact h10,
},
},
have h7 : ∀ u v : V, (u,v) ∈ G.E ↔ (u ∈ B ∧ v ∈ A), from by {
assume (u v : V),
split,
{
assume huv : (u,v) ∈ G.E,
apply h5,
exact huv,
},
{
assume huv : (u ∈ B ∧ v ∈ A),
have h8 : u ∈ B, from by {
apply and.elim_left huv,
},
have h9 : v ∈ A, from by
end --Needs more than 2000 tokens!
--OUTPUT 5
theorem bipartite_iff_two_colorable {V : Type*} (G : simple_graph V) [fintype V] : (G.colorable 2) ↔ (∃ (A B : Type*) (h : (A ⊕ B) = V), G ≤ cast (congr_arg _ h) (complete_bipartite_graph A B)) :=
begin
-- Let $G$ be a 2-colorable graph, which means we can color every vertex either red or blue, and no edge will have both endpoints colored the same color. Let $A$ denote the subset of vertices colored red, and let $B$ denote the subset of vertices colored blue. Since all vertices of $A$ are red, there are no edges within $A$, and similarly for $B$. This implies that every edge has one endpoint in $A$ and the other in $B$, which means $G$ is bipartite.
have h1 : (G.colorable 2) → (∃ (A B : Type*) (h : (A ⊕ B) = V), G ≤ cast (congr_arg _ h) (complete_bipartite_graph A B)), from
assume (h : G.colorable 2),
have h2 : ∃ (A B : Type*) (h : (A ⊕ B) = V), G ≤ cast (congr_arg _ h) (complete_bipartite_graph A B), from by {
have h3 : ∃ (A B : Type*) (h : (A ⊕ B) = V), G ≤ cast (congr_arg _ h) (complete_bipartite_graph A B), from by {
have h4 : ∃ (A B : Type*) (h : (A ⊕ B) = V), G ≤ cast (congr_arg _ h) (complete_bipartite_graph A B), from by {
have h5 : ∃ (A B : Type*) (h : (A ⊕ B) = V), G ≤ cast (congr_arg _ h) (complete_bipartite_graph A B), from by {
have h6 : ∃ (A B : Type*) (h : (A ⊕ B) = V), G ≤ cast (congr_arg _ h) (complete_bipartite_graph A B), from by {
have h7 : ∃ (A B : Type*) (h : (A ⊕ B) = V), G ≤ cast (congr_arg _ h) (complete_bipartite_graph A B), from by {
have h8 : ∃ (A B : Type*) (h : (A ⊕ B) = V), G ≤ cast (congr_arg _ h) (complete_bipartite_graph A B), from by {
have h9 : ∃ (A B : Type*) (h : (A ⊕ B) = V), G ≤ cast (congr_arg _ h) (complete_bipartite_graph A B), from by {
have h10 : ∃ (A B : Type*) (h : (A ⊕ B) = V), G ≤ cast (congr_arg _ h) (complete_bipartite_graph A B), from by {
have h11 : ∃ (A B : Type*) (h : (A ⊕ B) = V), G ≤ cast (congr_arg _ h) (complete_bipartite_graph A B), from by {
have h12 : ∃ (A B : Type*) (h : (A ⊕ B) = V), G ≤ cast (congr_arg _ h) (complete_bipartite_graph A B), from by {
have h13 : ∃ (A B : Type*) (h : (A ⊕ B) = V), G ≤ cast (congr_arg _ h) (complete_bipartite_graph A B), from by {
have h14 : ∃ (A B : Type*) (h : (A ⊕ B) = V), G ≤ cast (congr_arg _ h) (complete_bipartite_graph A B), from by {
have h15 : ∃ (A B : Type*) (h : (A ⊕ B) = V), G ≤ cast (congr_arg _ h) (complete_bipartite_graph A B), from by {
have h16 : ∃ (A B : Type*) (h : (A ⊕ B) = V), G ≤ cast (congr_arg _ h) (complete_bipartite_graph A B), from by {
have h17 : ∃ (A B : Type*) (h : (A ⊕ B) = V), G ≤ cast (congr_arg _ h) (complete_bipartite_graph A B), from by {
have h18 : ∃ (A B : Type*) (h : (A ⊕ B) = V), G ≤ cast (congr_arg _ h) (complete_bipartite_graph A B), from by {
have h19 : ∃ (A B : Type*) (h : (A ⊕ B) = V), G ≤ cast (congr_arg _ h) (complete_bipartite_graph A B), from by {
have h20 : ∃ (A B : Type*) (h : (A ⊕ B) = V), G ≤ cast (congr_arg _ h) (complete_bipartite_graph A B), from by {
have h21 : ∃ (A B : Type*) (h : (A ⊕ B) = V), G ≤ cast (congr_arg _ h) (complete_bipartite_graph A B), from by {
have h22 : ∃ (A B : Type*) (h : (A ⊕ B) = V), G ≤ cast (congr_arg _ h) (complete_bipartite_graph A B), from by {
have h23 : ∃ (A B : Type*) (h : (A ⊕ B) = V), G ≤ cast (congr_arg _ h) (complete_bipartite_graph A B), from by {
have h24 : ∃ (A B : Type*) (h : (A ⊕ B) = V), G ≤ cast (congr_arg _ h) (complete_bipartite_graph A B), from by {
have h25 : ∃ (A B : Type*) (h : (A ⊕ B) = V), G ≤ cast (congr_arg _ h) (complete_bipartite_graph A B), from by {
have h26 : ∃ (A B : Type*) (h : (A ⊕ B) = V), G ≤ cast (congr_arg _ h) (complete_bipartite_graph A B), from by {
have h27 : ∃ (A B : Type*) (h : (A ⊕ B) = V), G ≤ cast (congr_arg _ h) (complete_bipartite_graph A B), from by {
have h28 : ∃ (A B : Type*) (h : (A ⊕ B) = V), G ≤ cast (congr_arg _ h) (complete_bipartite_graph A B), from by {
have h29 : ∃ (A B : Type*) (h : (A ⊕ B) = V), G ≤ cast (congr_arg _ h) (complete_bipartite_graph A B), from by {
have h30 : ∃ (A B : Type*) (h : (A ⊕ B) = V), G ≤ cast (congr_arg _ h) (complete_bipartite_graph A B), from by {
have h31 : ∃ (A B : Type*) (h : (A ⊕ B) = V), G ≤ cast (congr_arg _ h) (complete_bipartite_graph A B), from by {
have h32 : ∃ (A B : Type*) (h : (A ⊕ B) = V), G ≤ cast (congr_arg _ h) (complete_bipartite_graph A B), from by {
have h33 : ∃ (A B : Type*) (h : (A ⊕ B) = V), G ≤ cast (congr_arg _ h) (complete_bipartite_graph A B), from by {
have h34 : ∃ (A B : Type*) (h : (A ⊕ B) = V), G ≤ cast (congr_arg _ h) (complete_bipartite_graph A B), from by {
have h35 : ∃ (A B
end --Needs more than 2000 tokens!
--OUTPUT 6
theorem bipartite_iff_two_colorable {V : Type*} (G : simple_graph V) [fintype V] : (G.colorable 2) ↔ (∃ (A B : Type*) (h : (A ⊕ B) = V), G ≤ cast (congr_arg _ h) (complete_bipartite_graph A B)) :=
begin
split,
{
assume h1 : G.colorable 2,
have h2 : ∃ (A B : Type*) (h : (A ⊕ B) = V), G ≤ cast (congr_arg _ h) (complete_bipartite_graph A B), from by {
obtain ⟨f,hf⟩ : ∃ f : V → fin 2, G.is_coloring f, from h1,
let f' : V → fin 2 := f,
have hf' : G.is_coloring f', from hf,
let A := {v : V | f' v = 0},
let B := {v : V | f' v = 1},
have hA : fintype A, from by {
apply fintype.of_equiv,
exact ⟨V,λ v, ⟨f' v,by {rw fin.val_eq_coe,rw fin.coe_eq_zero,exact dec_trivial,}⟩,λ v, ⟨f' v,by {rw fin.val_eq_coe,rw fin.coe_eq_one,exact dec_trivial,}⟩,λ v, rfl,λ v, rfl⟩,
},
have hB : fintype B, from by {
apply fintype.of_equiv,
exact ⟨V,λ v, ⟨f' v,by {rw fin.val_eq_coe,rw fin.coe_eq_zero,exact dec_trivial,}⟩,λ v, ⟨f' v,by {rw fin.val_eq_coe,rw fin.coe_eq_one,exact dec_trivial,}⟩,λ v, rfl,λ v, rfl⟩,
},
have h3 : ∀ (x : V), (f' x = 0) ∨ (f' x = 1), from assume x : V,
begin
have h4 : (f' x = 0) ∨ (f' x = 1), from by apply fin.eq_zero_or_eq_one,
exact h4,
end,
have h4 : ∀ (x : V), (x ∈ A) ∨ (x ∈ B), from assume x : V,
begin
have h5 : (f' x = 0) ∨ (f' x = 1), from h3 x,
have h6 : (x ∈ A) ∨ (x ∈ B), from by {cases h5,left,exact h5,right,exact h5},
exact h6,
end,
have h5 : ∀ (x : V), (x ∈ A) → (x ∈ B) → false, from assume x : V, assume h6 : x ∈ A, assume h7 : x ∈ B,
begin
have h8 : f' x = 0, from by {rw mem_set_of_eq at h6,exact h6},
have h9 : f' x = 1, from by {rw mem_set_of_eq at h7,exact h7},
have h10 : false, from by {rw h8 at h9,exact h9},
exact h10,
end,
have h6 : ∀ (x : V), (x ∈ A) → (x ∈ B) → false, from assume x : V, assume h6 : x ∈ A, assume h7 : x ∈ B,
begin
have h8 : f' x = 0, from by {rw mem_set_of_eq at h6,exact h6},
have h9 : f' x = 1, from by {rw mem_set_of_eq at h7,exact h7},
have h10 : false, from by {rw h8 at h9,exact h9},
exact h10,
end,
have h7 : ∀ (x : V), (x ∈ B) → (x ∈ A) → false, from assume x : V, assume h6 : x ∈ B, assume h7 : x ∈ A,
begin
have h8 : f' x = 1, from by {rw mem_set_of_eq at h6,exact h6},
have h9 : f' x = 0, from by {rw mem_set_of_eq at h7,exact h7},
have h10 : false, from by {rw h8 at h9,exact h9},
exact h10,
end,
have h8 : ∀ (x y : V), (x ∈ A) → (y ∈ B) → (x ≠ y), from assume x y : V, assume h9 : x ∈ A, assume h10 : y ∈ B,
begin
have h11 : (x ∈ A) → (y ∈ B) → false, from h5 x y,
have h12 : (x ∈ A) → (y ∈ B) → false, from h6 x y,
have h13 : (x ∈ A) → (y ∈ B) → false, from h7 x y,
have h14 : (x ∈ A) → (y ∈ B) → false, from h8 x y,
have h15 : (x ∈ A) → (y ∈ B) → false, from h9 x y,
have h16 : (x ∈ A) → (y ∈ B) → false, from h10 x y,
have h17 : (x ∈ A) → (y ∈ B) → false, from h11 x y,
have h18 : (x ∈ A) → (y ∈ B) → false, from h12 x y,
have h19 : (x ∈ A) → (y ∈ B) → false, from h13 x y,
have h20 : (x ∈ A) → (y ∈ B) → false, from h14 x y,
have h21 : (x ∈ A) → (y ∈ B) → false, from h15 x y,
have h22 : (x ∈ A) → (y ∈ B) → false, from h16 x y,
have h23 : (x ∈ A) → (y ∈ B) → false, from h17 x y,
have h24 : (x ∈ A) → (y ∈ B) → false, from h18 x y,
have h25 : (x ∈ A) → (y ∈ B) → false, from h19 x y,
have h26 : (x ∈ A) → (y ∈ B) → false, from h20 x y,
have h27 : (x ∈ A) → (y ∈ B) → false, from h21 x y,
have h28 : (x ∈ A) → (y ∈ B) → false, from h22 x y,
have h29 : (x ∈ A) → (y ∈ B) → false, from h23 x y,
have h30 : (x ∈ A) → (y ∈ B) → false, from h24 x y,
have h31 : (x ∈ A) → (y ∈ B) → false, from h25 x y,
have h32 : (x ∈ A) → (y ∈ B) → false, from h26 x y,
have h33 : (x ∈ A) → (y ∈ B) → false, from h27 x y,
have h34 : (x ∈ A) → (y ∈ B) → false, from h28 x y,
have h35 : (x ∈ A) → (y ∈ B) → false, from h29 x y,
have h36 : (x ∈ A) → (y ∈ B) → false, from h30 x y,
have h37 : (x ∈ A) → (y ∈ B) → false, from h31 x y,
have h38 : (x ∈ A) → (y ∈ B) → false, from h32 x y,
have h39 : (x ∈ A) → (y ∈ B) → false, from h33 x y,
have h40 : (x ∈ A) → (y ∈
end --Needs more than 2000 tokens!
/- FEW SHOT PROMPTS TO CODEX(START)
/--`theorem`
Power Set is Closed under Intersection
Let $S$ be a set.
Let $\powerset S$ be the power set of $S$.
Then:
:$\forall A, B \in \powerset S: A \cap B \in \powerset S$
`proof`
Let $A, B \in \powerset S$.
Then by the definition of power set, $A \subseteq S$ and $B \subseteq S$.
From Intersection is Subset we have that $A \cap B \subseteq A$.
It follows from Subset Relation is Transitive that $A \cap B \subseteq S$.
Thus $A \cap B \in \powerset S$ and closure is proved.
{{qed}}
-/
theorem power_set_intersection_closed {α : Type*} (S : set α) : ∀ A B ∈ 𝒫 S, (A ∩ B) ∈ 𝒫 S :=
begin
-- $A$ and $B$ are sets. $A$ and $B$ belong to power set of $S$
assume (A : set α) (hA : A ∈ 𝒫 S) (B : set α) (hB : B ∈ 𝒫 S),
-- Then $A ⊆ S$ and $B ⊆ S$, by power set definition
have h1 : (A ⊆ S) ∧ (B ⊆ S), from by {split,apply set.subset_of_mem_powerset,exact hA,apply set.subset_of_mem_powerset,exact hB},
-- Then $(A ∩ B) ⊆ A$, by intersection of set is a subset
have h2 : (A ∩ B) ⊆ A, from by apply set.inter_subset_left,
-- Then $(A ∩ B) ⊆ S$, by subset relation is transitive
have h3 : (A ∩ B) ⊆ S, from by {apply set.subset.trans h2 h1.left},
-- Hence $(A ∩ B) ∈ 𝒫 S$, by power set definition
show (A ∩ B) ∈ 𝒫 S, from by {apply set.mem_powerset h3},
end
/--`theorem`
Square of Sum
:$\forall x, y \in \R: \paren {x + y}^2 = x^2 + 2 x y + y^2$
`proof`
Follows from the distribution of multiplication over addition:
{{begin-eqn}}
{{eqn | l = \left({x + y}\right)^2
| r = \left({x + y}\right) \cdot \left({x + y}\right)
}}
{{eqn | r = x \cdot \left({x + y}\right) + y \cdot \left({x + y}\right)
| c = Real Multiplication Distributes over Addition
}}
{{eqn | r = x \cdot x + x \cdot y + y \cdot x + y \cdot y
| c = Real Multiplication Distributes over Addition
}}
{{eqn | r = x^2 + 2xy + y^2
| c =
}}
{{end-eqn}}
{{qed}}
-/
theorem square_of_sum (x y : ℝ) : (x + y)^2 = (x^2 + 2*x*y + y^2) :=
begin
-- expand the power
calc (x + y)^2 = (x+y)*(x+y) : by rw sq
-- distributive property of multiplication over addition gives:
... = x*(x+y) + y*(x+y) : by rw add_mul
-- applying the above property further gives:
... = x*x + x*y + y*x + y*y : by {rw [mul_comm x (x+y),mul_comm y (x+y)], rw [add_mul,add_mul], ring}
-- rearranging the terms using commutativity and adding gives:
... = x^2 + 2*x*y + y^2 : by {repeat {rw ← sq}, rw mul_comm y x, ring}
end
/--`theorem`
Identity of Group is Unique
Let $\struct {G, \circ}$ be a group. Then there is a unique identity element $e \in G$.
`proof`
From Group has Latin Square Property, there exists a unique $x \in G$ such that:
:$a x = b$
and there exists a unique $y \in G$ such that:
:$y a = b$
Setting $b = a$, this becomes:
There exists a unique $x \in G$ such that:
:$a x = a$
and there exists a unique $y \in G$ such that:
:$y a = a$
These $x$ and $y$ are both $e$, by definition of identity element.
{{qed}}
-/
theorem group_identity_unique {G : Type*} [group G] : ∃! e : G, ∀ a : G, e * a = a ∧ a * e = a :=
begin
-- Group has Latin Square Property
have h1 : ∀ a b : G, ∃! x : G, a * x = b, from by {
assume a b : G, use a⁻¹ * b, obviously, },
have h2 : ∀ a b : G, ∃! y : G, y * a = b, from by {
assume a b : G, use b * a⁻¹, obviously, },
-- Setting $b = a$, this becomes:
have h3 : ∀ a : G, ∃! x : G, a * x = a, from
assume a : G, h1 a a,
have h4 : ∀ a : G, ∃! y : G, y * a = a, from
assume a : G, h2 a a,
-- These $x$ and $y$ are both $(1 : G)$, by definition of identity element
have h5 : ∀ a : G, classical.some (h3 a).exists = (1 : G), from assume a :G,
exists_unique.unique (h3 a) (classical.some_spec (exists_unique.exists (h3 a)))
(mul_one a),
have h6 : ∀ a : G, classical.some (h4 a).exists = (1 : G), from assume a : G,
exists_unique.unique (h4 a) (classical.some_spec (exists_unique.exists (h4 a))) (one_mul a),
show ∃! e : G, ∀ a : G, e * a = a ∧ a * e = a, from by {
use (1 : G),
have h7 : ∀ e : G, (∀ a : G, e * a = a ∧ a * e = a) → e = 1, from by {
assume (e : G) (hident : ∀ a : G, e * a = a ∧ a * e = a),
have h8 : ∀ a : G, e = classical.some (h3 a).exists, from assume (a : G),
exists_unique.unique (h3 a) (hident a).right
(classical.some_spec (exists_unique.exists (h3 a))),
have h9 : ∀ a : G, e = classical.some (h4 a).exists, from assume (a : G),
exists_unique.unique (h4 a) (hident a).left
(classical.some_spec (exists_unique.exists (h4 a))),
show e = (1 : G), from eq.trans (h9 e) (h6 _),
},
exact ⟨by obviously, h7⟩,
}
end
/--`theorem`
Bipartite Graph is two colorable
Let $G$ be a graph. Then $G$ is 2-colorable if and only if $G$ is bipartite.
`proof`
Let $G$ be a 2-colorable graph, which means we can color every vertex either red or blue, and no edge will have both endpoints colored the same color. Let $A$ denote the subset of vertices colored red, and let $B$ denote the subset of vertices colored blue. Since all vertices of $A$ are red, there are no edges within $A$, and similarly for $B$. This implies that every edge has one endpoint in $A$ and the other in $B$, which means $G$ is bipartite.
Conversely, suppose $G$ is bipartite, that is, we can partition the vertices into two subsets $V_{1}, V_{2}$ every edge has one endpoint in $V_{1}$ and the other in $V_{2}$. Then coloring every vertex of $V_{1}$ red and every vertex of $V_{2}$ blue yields a valid coloring, so $G$ is 2-colorable.
QED
-/
theorem bipartite_iff_two_colorable {V : Type*} (G : simple_graph V) [fintype V] : (G.colorable 2) ↔ (∃ (A B : Type*) (h : (A ⊕ B) = V), G ≤ cast (congr_arg _ h) (complete_bipartite_graph A B)) :=
FEW SHOT PROMPTS TO CODEX(END)-/
|
## run "../flow_embedding.py" first to generate data
using Plots
using SparseArrays
using LinearAlgebra
using TextParse
using DelimitedFiles
using PyCall
using ColorSchemes
function read_graph(filename)
src, dst, val = csvread(filename;
spacedelim=true, colparsers=[Int,Int,Float64], header_exists=false)[1]
src .+= 1
dst .+= 1
n = maximum(src)
n = max(maximum(dst),n)
A = sparse(src, dst, val, n, n)
A = max.(A,A')
return findnz(triu(A,1))[1:2]
end
src, dst = read_graph("../../dataset/lawlor-spectra-k32.edgelist.gz")
# x, y = csvread("$../../dataset/lawlor-spectra-k32.edgelist";
# spacedelim=true, colparsers=[Float64,Float64],
# header_exists=false)[1]
V = readdlm("../../dataset/lawlor-spectra-k32.coords")
V[:,1] *= -10
V[:,2] *= -4
function plotperm(v)
return invperm(sortperm(v))
end
# function myscatter(x,y,color;kwargs...)
# p = sortperm(color)
# scatter(x[p],y[p], marker_z = color[p],
# label="", alpha=0.1, markerstrokewidth=0,
# colorbar=false, framestyle=:none; kwargs...)
# end
##
pfile = "embeddings/spectral_3_bfs.p.gz"
pickle = pyimport("pickle")
gzip = pyimport("gzip")
f = gzip.open(pfile)
data= pickle.load(f)
f.close()
function _pycsr_to_sparse(rowptr, colinds, vals, m, n)
rowinds = zeros(eltype(colinds),0)
for i in 1:length(rowptr)-1
for j in rowptr[i]:rowptr[i+1]-1
push!(rowinds, i)
end
end
return sparse(rowinds, colinds, vals, m, n)
end
function pycsr_to_sparse(pycsr)
rowptr = pycsr.indptr
colinds = pycsr.indices
vals = pycsr.data
colinds .+= 1
m, n = data.shape
# put in a fucntion so we can do type inference
return _pycsr_to_sparse(rowptr, colinds, vals, m, n )
end
X = pycsr_to_sparse(data)
flipexp = findall(X[517173,:] .> 0) # this is at the opposite end
for f in flipexp
X[:,f] = 1.0 .-X[:,f]
end
X = Matrix(X) # convert to Matrix
X ./= 10 # convert to 0, 1
subset = vec((sum(X,dims=2) .> 0))
subsetnodes = findall(subset)
subset_c = vec((sum(X,dims=2) .<= 0))
subsetnodes_c = findall(subset_c)
U,sig,Vt = svd(X.-1)
new_coords = zeros(size(X,1),2)
new_coords[subsetnodes,1] = plotperm(-1 * U[subsetnodes,1])
new_coords[subsetnodes,2] = plotperm(-1 * U[subsetnodes,2])
i = argmax(new_coords[subsetnodes,1])
new_coords[subsetnodes_c,1] .= new_coords[subsetnodes[i],1] + 10000
new_coords[subsetnodes_c,2] .= new_coords[subsetnodes[i],2] + 10000
using Clustering
Xs = transpose(new_coords[subsetnodes,:])
R = kmeans(Xs,100; maxiter=200, display=:iter)
clusters = Dict(i=>[] for i = 1:100)
for i = 1:length(subsetnodes)
push!(clusters[R.assignments[i]],subsetnodes[i]-1)
end
pushfirst!(PyVector(pyimport("sys")."path"), "/homes/liu1740/Research/LocalGraphClustering/")
lgc = pyimport("localgraphclustering")
G = lgc.GraphLocal("../../dataset/lawlor-spectra-k32.edgelist","edgelist")
conds = []
for i = 1:100
push!(conds,G.compute_conductance(clusters[i]))
end
|
Formal statement is: lemma diameter_empty [simp]: "diameter{} = 0" Informal statement is: The diameter of the empty set is $0$. |
\label{problem_definition}
Clustering is the task of gathering items in a way that elements belonging
to the same group (the \emph{cluster}) are more similar to each other other than the ones
assigned to the others.\\
More formally, given a input:
\begin{itemize}
\item $X = \{x_0, \dots ,x_n\}$, the initial set of elements.
\item $d: X \times X \to \mathbb{R}$, a \emph{metric} measuring the similarity.
\end{itemize}
The final goal is to find the cluster configuration
\begin{equation*}
C = \left\{ c_0, \dots , c_m \right\} \mid \bigcup_{C} = X
\end{equation*}
partitioning $X$ into $m$ clusters, maximizing the intra-cluster distance
(dual problem of minimizing inter-cluter distance):
\begin{equation}
\underset{C}{\mathrm{argmax}}
\sum_{c \in C}
\sum_{i,j}^{|c|}
d(c_i,c_j)
\end{equation}
\subsection*{Challenges}
The concept of clustering is simple and powerful; moreover, its versatility
and generality are its greatness and at the same time source of many difficulties.
%% Metrics identification
Given the only strict requirement to be the definition of a metric over the
domain of $X$, clustering is applied to a wide variety of problems.
Clearly, each domain offers different optimization opportunities and
particular challenges.
In particular, the choice of the metric heavily influences the final outcome quality.
As a result even the medical~\cite{siless2013comparison}, the mechanical
engineering~\cite{wilding2011clustering} and the mobile networks~\cite{cheng2009stability}
literatures features different studies that address this particular challenge
suggesting highly specialized distance functions.
%% Inter/Intra cluster distance measure
Once the proper metric is identified, the following huge influencing factors
are the mathematical definitions of ``intra-cluster'' and ``inter-cluster''
distances. They vary a lot in the different implementation leading to
completely different clustering configurations.
For example, three methods are widely used when performing agglomerative
clustering to define the distance between two clusters:
the average, the minimum, and the maximum distance.
The average approach uses the barycenters, while the minimum (maximum)
relies upon the minimal (maximal) distance between any two points
belonging to a different clusters.
\subsection*{Choosing the $k$ parameter}
The first main issue of the k-means described in Section \ref{related} is the choice
of the number of clusters the dataset has to be divided into.
The original k-means does not address this problem at all. Thus, some heuristic has to
be applied. One possibility is to have a deep knowledge of the structure underlying the
dataset, having an idea of the target number of groups. On the other hand, especially
in exploratory data analysis, this value cannot be known in advance.
Hence, the most common solution is to run the algorithm with increasing values of $k$ and
finally keeping the parameter that produces the best-quality clusters.
Choosing the right value for this parameter is crucial, since a wrong one may
instruct the algorithm to collapse entities that are actually very far from each other
(or vice versa).
\subsection*{Positioning the initial centroids}
Given that an optimal value for $k$ is found, the other big problem is how to position the $k$
initial centroids.
Given enough time, the k-means algorithm will always converge. However it may be to a local
minimum. This is highly dependent on the initialization of the centroids. The boostrapping
phase can be addressed in various ways. For example, the
\emph{scikit-learn}\footnote{http://scikit-learn.org} Python library
uses an approach that positions the initial centroids to be (generally) distant
from each other by default. This procedure provides provably better results than using a random
one~\cite{arthur2007k}.
Using a proper and efficient boostrapping heuristic is very important, since misplacing
the initial centroids does not allow the algorithm to find the real clusters underlying
the data.
|
\section{More refined sparse grid and improved estimates}
This section presents a modified estimate \cite{bungartz2004sparse} of the approximation property of sparse grids. Recall
$$
\mathcal V_{\mathbf{k}} = \mathcal V_{k_1} \bigotimes \mathcal V_{k_2}\bigotimes\cdots \bigotimes \mathcal V_{k_d},
$$
where $\mathbf{k}=(k_1,k_2,\cdots,k_d)$ is a multi-index. Let
$$
\mathbf{I_l}=\{\mathbf{i}\in \mathbb{N}^d: \mathbf{1}\le \mathbf{i}\le 2^\mathbf{l}-1, i_j \text{ is odd for all }1\le j\le d\},
$$
and the hierarchical basis
$$
\{\psi_\mathbf{k, i}^d: \mathbf{i}\in \mathbf{I_k}, |\mathbf{k}|_\infty\le J\}.
$$
Recall $\mathcal V_{\mathbf{k}}$ in \eqref{sparseVk}. Let
$$
R_J=\left\{\mathbf{k}: |\mathbf{k}|_1 - \frac25 \log_2\|2^{\mathbf{k}}\|_{l^2}\le J+d-1- \frac15 \log_2 (4^J +4d-4)\right\},
$$
\begin{equation}
\label{sparseVJ}
S_J =\bigoplus_{\mathbf{k}\in R_J} \mathcal V_{\mathbf{k}}.
\end{equation}
The following the result is analyzed in \cite{bungartz2004sparse}.
\begin{theorem}\Label{lm:modifySparse1}
Suppose that $u\in W^{2\mathbf{e}, \infty}(D_d)$,
$$
\inf_{\chi\in S_J} |v-\chi|_{1, D_d}\le C h_J \|u\|_{2\mathbf{e},\infty}.
$$
and the dimension of $S_J$ is $\mathcal{O}(de^dh_J^{-1})$.
\end{theorem}
\begin{proof}
First, we prove that $S_J$ is a subspace of $S_{J,J+d-1}^d$.
If $|\mathbf{k}|_{1}=J+d-1+i, i \in \mathbb{N}$, we can prove
$$
\|2^{\mathbf{k}}\|_{l^2}^2=\sum_{s=1}^{d} 4^{k_{s}}\le 4^{J+i}+4 d-4
$$
by induction with respect to $d$.
% It is trivial when $d=1$. For $\mathbf{k}\in \mathbb{R}^d$ and $|\mathbf{k}|_{1}=J+d-1+i$, there exists $1\le j\le d$ such that $k_j\ge 1$, thus
%\begin{equation}
%\begin{split}
%\sum_{s=1}^{d} 4^{k_{s}} &= \sum_{s=1, s\neq i}^{d} 4^{k_{s}} + 4^{k_{j}}\le 4^{J+i+1-k_j}+4 (d-1)-4 + 4^{k_j}\le 4^{J+i} + 4d-4.
%\end{split}
%\end{equation}
For subspaces $\mathcal V_{\mathbf{k}}$ with $|\mathbf{k}|_{1}=J+d-1+i, i \in \mathbb{N},$ we have
\begin{equation}
\begin{split}
|\mathbf{k}|_{1}-\frac{1}{5} \cdot \log _{2}\left(\sum_{s=1}^{d} 4^{k_{s}}\right) & \geq J+d-1+i-\frac{1}{5} \cdot \log _{2}\left(4^{J+i}+4 d-4\right) \\
& \geq J+d-1+i-\frac{1}{5} \cdot \log _{2}\left(4^{i}\left(4^{J}+4 d-4\right)\right) \\
&>J+d-1-\frac{1}{5} \cdot \log _{2}\left(4^{J}+4 d-4\right)
\end{split}
\end{equation}
Therefore, no $\mathcal V_{\mathbf{k}}$ with $|\mathbf{k}|_{1}>J+d-1$ can belong to $V_J .$ Consequently,
\begin{equation}
S_J\subset S_{J,J+d-1}^d,\qquad \left|S_J\right| \leq\left|S_{J,J+d-1}^d\right|.
\end{equation}
Then, we prove that the dimension of $S_J$ is $\mathcal{O}(h_J^{-1})$.
Note that $\left|\mathcal V_{\mathbf{k}}\right| = 2^{|\mathbf{k}|_{1}-d}$. For any $\mathbf{k}\in R_J$ with $|\mathbf{k}|_{1}=J+d-1-i$, $ \sum_{j=1}^d 4^{k_j}\ge {4^J+4d-4\over 32^i}$. By \eqref{sparseVJ},
\begin{equation}
\begin{split}
\mbox{dim}S_J&=\sum_{i=0}^{J-1} \sum_{|\mathbf{k}|_{1}=J+d-1-i, \sum_{j=1}^d 4^{k_j}\ge {4^J+4d-4\over 32^i}} \mbox{dim}\mathcal V_{\mathbf{k}}
\\
&=2^{J-1} \cdot \sum_{i=0}^{J-1} 2^{-i} \sum_{|\mathbf{k}|_{1}=J+d-1-i, \sum_{j=1}^d 4^{k_j}\ge {4^J+4d-4\over 32^i}}1
\\
& \leq 2^{J-1} \cdot \lim _{J \rightarrow \infty} \sum_{i=0}^{J-1} 2^{-i} \sum_{|\mathbf{k}|_{1}=J+d-1-i, \sum_{j=1}^d 4^{k_j}\ge {4^J+4d-4\over 32^i}}1
\\
&=2^{J-1} d \lim _{J \rightarrow \infty} \sum_{i=0}^{J-1} 2^{-i} \left(\begin{array}{c}
d-1+\lfloor 1.5 i\rfloor \\
d-1
\end{array}\right).
\end{split}
\end{equation}
Let $j=\lfloor 1.5 i\rfloor $. Since $\displaystyle\sum_{i=0}^{\infty} x^{i} \cdot\left(\begin{array}{c}k+i \\ k\end{array}\right)=(1-x)^{-k-1}$ for $k \in \mathbb{N}_{0}$ and $0<x<1$,
\begin{equation}
\begin{split}
\mbox{dim}S_J
& \leq 2^{J} \cdot \frac{d}{2} \cdot \sum_{j=0}^{\infty} 2^{-\frac{2}{3} j} \cdot\left(\begin{array}{c}
d-1+j \\
d-1
\end{array}\right)
\\
&=2^{J} \cdot \frac{d}{2} \cdot\left(1-2^{-\frac{2}{3}}\right)^{-d}
\\
& \leq 2^{J} \cdot \frac{d}{2} \cdot \mathrm{e}^{d}=\mathcal{O}(h_J^{-1}).
\end{split}
\end{equation}
Next we consider the approximation property of $S_J$. Let $\displaystyle u_J=\sum_{\mathbf{k}\in R_J}u_{\mathbf{k}}$. Note that
$$
\left|u-u_J\right|_1 \leq\left|u-u_{J,J+d-1}\right|_1+\left|u_{J,J+d-1}-u_J\right|_1.
$$
By $\left|u-u_{J,J+d-1}\right|_1=O\left(h_{J}\right),$ we can restrict ourselves to $\left|u_{J,J+d-1}-u_J\right|_1.$ Note that for $i \in \mathbb{N}_{0},$
\begin{equation}
\mathcal V_{\mathbf{k}}\subset S_J, \quad \mbox{if } |\mathbf{k}|_{1}=J+d-1-i \ \mbox{ and }\ |\mathbf{k}|_{\infty} \geq J-2.5 i.
\end{equation}
Note that
$$
\left|u_{J,J+d-1}-u_J\right|_1\leq \sum_{\mathcal V_{\mathbf k} \in S_{J,J+d-1}^d \setminus S_J}\left|u_{\mathbf{k}}\right|_1
\leq \sum_{i=0}^{i^{*}} \sum_{\left.|\mathbf{k}\right|_{1}=J+d-1-i,\left.|\mathbf{k}\right|_{\infty}<J-2.5 i}\left|u_{\mathbf{k}}\right|_1,
$$
where $i^*$ is the maximum value of $i$ for which the set of indices $\mathbf{k}$ with $|\mathbf{k}|_1=n+d-1-i$ with $|\mathbf{k}|_\infty<n-2.5i$.
Therefore, we obtain with \eqref{sparseuk} that
\begin{equation*}
\begin{aligned}
\sum_{\left.|\mathbf{k}\right|_{1}=J+d-1-i,\left.|\mathbf{k}\right|_{\infty}<J-2.5 i}\left|u_{\mathbf{k}}\right|_1
\leq \frac{|u|_{2\mathbf{e},\infty}}{2 \cdot 12^{(d-1) / 2}} \sum_{\left.|\mathbf{k}\right|_{1}=J+d-1-i,\left.|\mathbf{k}\right|_{\infty}<J-2.5 i} 4^{-|\mathbf{k}|_{1}} \cdot\left(\sum_{j=1}^{d} 4^{k_{j}}\right)^{1 / 2}.
\end{aligned}
\end{equation*}
Since
$
\displaystyle \sum_{j=1}^{d} 4^{k_{j}}=\sum_{j=1}^{d} 2^{2k_{j}}\leq \left(\sum_{j=1}^{d} 2^{k_{j}}\right)^2,
$
\begin{equation*}
\begin{aligned}
\sum_{\left.|\mathbf{k}\right|_{1}=J+d-1-i,\left.|\mathbf{k}\right|_{\infty}<J-2.5 i}\left|u_{\mathbf{k}}\right|_1
&\leq \frac{|u|_{2\mathbf{e},\infty}}{2 \cdot 12^{(d-1) / 2}} \cdot 4^{-J-d+1+i} \sum_{\left.|\mathbf{k}\right|_{1}=J+d-1-i,\left.|\mathbf{k}\right|_{\infty}<J-2.5 i}\left(\sum_{j=1}^{d} 2^{k_{j}}\right)
\\
&\leq \frac{|u|_{2\mathbf{e},\infty}}{2 \cdot 12^{(d-1) / 2}} \cdot 4^{-J-d+1+i} \sum_{j=1}^{J-1-\lfloor 2.5 i\rfloor} d \cdot\left(\begin{array}{c}
J+d-2-i-j \\
d-2
\end{array}\right) \cdot 2^{j}
\\
&=\frac{|u|_{2\mathbf{e},\infty}}{2 \cdot 12^{(d-1) / 2}} 4^{-J-d+1+i} \sum_{k=1}^{J-1-\lfloor 2.5 i\rfloor} d\left(\begin{array}{c}
d-2+\lfloor 1.5 i\rfloor+k \\
d-2
\end{array}\right) 2^{J-\lfloor 2.5 i\rfloor-k}
\\
&=\frac{d \cdot|u|_{2\mathbf{e},\infty}}{2 \cdot 12^{(d-1) / 2}} 4^{-(d-1)} 2^{-J-\left\lfloor\frac{i}{2}\right\rfloor} \sum_{k=1}^{J-1-\lfloor 2.5 i\rfloor}\left(\begin{array}{c}
d-2+\lfloor 1.5 i\rfloor+k \\
d-2
\end{array}\right) 2^{-k} .
\end{aligned}
\end{equation*}
Thus,
\begin{equation*}
\begin{aligned}
\left|u_{J,J+d-1}-u_J\right|_1
&\leq \frac{d \cdot|u|_{2\mathbf{e},\infty}}{2 \cdot 12^{(d-1) / 2}} \cdot 4^{-(d-1)} \cdot 2^{-J} \cdot 2 \cdot 5^{d-1}\\
&=\frac{d \cdot|u|_{2\mathbf{e},\infty}}{3^{(d-1) / 2} \cdot 4^{d-1}} \cdot\left(\frac{5}{2}\right)^{d-1} \cdot 2^{-J}\le Ch_J,
\end{aligned}
\end{equation*}
which completes the proof.
%\begin{equation*}
%\begin{aligned}
%\left|u_{J,J+d-1}-u_J\right|_1&\leq \sum_{\mathcal V_{\mathbf k} \in S_{J,J+d-1}^d \setminus S_J}\left|u_{\mathbf{k}}\right|_1
%\\
%&\leq \sum_{i=0}^{i^{*}} \sum_{\left.|\mathbf{k}\right|_{1}=J+d-1-i,\left.|\mathbf{k}\right|_{\infty}<J-2.5 i}\left|u_{\mathbf{k}}\right|_1
%\\
%&\leq \frac{|u|_{2\mathbf{e},\infty}}{2 \cdot 12^{(d-1) / 2}} \cdot \sum_{i=0}^{i^{*}} \sum_{\left.|\mathbf{k}\right|_{1}=J+d-1-i,\left.|\mathbf{k}\right|_{\infty}<J-2.5 i} 4^{-|\mathbf{k}|_{1}} \cdot\left(\sum_{j=1}^{d} 4^{k_{j}}\right)^{1 / 2}
%\\
%&\leq \frac{|u|_{2\mathbf{e},\infty}}{2 \cdot 12^{(d-1) / 2}} \cdot 4^{-J-d+1} \cdot \sum_{i=0}^{i^{*}} 4^{i} \cdot \sum_{\left.|\mathbf{k}\right|_{1}=J+d-1-i,\left.|\mathbf{k}\right|_{\infty}<J-2.5 i}\left(\sum_{j=1}^{d} 2^{k_{j}}\right)
%\\
%&\leq \frac{|u|_{2\mathbf{e},\infty}}{2 \cdot 12^{(d-1) / 2}} \cdot 4^{-J-d+1} \cdot \sum_{i=0}^{i^{*}} 4^{i} \cdot \sum_{j=1}^{J-1-\lfloor 2.5 i\rfloor} d \cdot\left(\begin{array}{c}
%J+d-2-i-j \\
%d-2
%\end{array}\right) \cdot 2^{j}
%\\
%&=\frac{|u|_{2\mathbf{e},\infty}}{2 \cdot 12^{(d-1) / 2}} 4^{-J-d+1} \sum_{i=0}^{i^{*}} 4^{i} \sum_{k=1}^{J-1-\lfloor 2.5 i\rfloor} d\left(\begin{array}{c}
%d-2+\lfloor 1.5 i\rfloor+k \\
%d-2
%\end{array}\right) 2^{J-\lfloor 2.5 i\rfloor-k}
%\\
%&=\frac{d \cdot|u|_{2\mathbf{e},\infty}}{2 \cdot 12^{(d-1) / 2}} 4^{-(d-1)} 2^{-J} \sum_{i=0}^{i^{*}} 2^{-\left\lfloor\frac{i}{2}\right\rfloor} \sum_{k=1}^{J-1-\lfloor 2.5 i\rfloor}\left(\begin{array}{c}
%d-2+\lfloor 1.5 i\rfloor+k \\
%d-2
%\end{array}\right) 2^{-k}
%\\
%&\leq \frac{d \cdot|u|_{2\mathbf{e},\infty}}{2 \cdot 12^{(d-1) / 2}} \cdot 4^{-(d-1)} \cdot 2^{-J} \cdot 2 \cdot 5^{d-1}\\
%&=\frac{d \cdot|u|_{2\mathbf{e},\infty}}{3^{(d-1) / 2} \cdot 4^{d-1}} \cdot\left(\frac{5}{2}\right)^{d-1} \cdot 2^{-J}\le Ch_J,
%\end{aligned}
%\end{equation*}
\end{proof}
|
{-# LANGUAGE ViewPatterns #-}
-- |
-- Module : Statistics.Test.WilcoxonT
-- Copyright : (c) 2010 Neil Brown
-- License : BSD3
--
-- Maintainer : [email protected]
-- Stability : experimental
-- Portability : portable
--
-- The Wilcoxon matched-pairs signed-rank test is non-parametric test
-- which could be used to test whether two related samples have
-- different means.
module Statistics.Test.WilcoxonT (
-- * Wilcoxon signed-rank matched-pair test
-- ** Test
wilcoxonMatchedPairTest
-- ** Building blocks
, wilcoxonMatchedPairSignedRank
, wilcoxonMatchedPairSignificant
, wilcoxonMatchedPairSignificance
, wilcoxonMatchedPairCriticalValue
, module Statistics.Test.Types
-- * References
-- $references
) where
--
--
--
-- Note that: wilcoxonMatchedPairSignedRank == (\(x, y) -> (y, x)) . flip wilcoxonMatchedPairSignedRank
-- The samples are zipped together: if one is longer than the other, both are truncated
-- The value returned is the pair (T+, T-). T+ is the sum of positive ranks (the
-- These values mean little by themselves, and should be combined with the 'wilcoxonSignificant'
-- function in this module to get a meaningful result.
-- ranks of the differences where the first parameter is higher) whereas T- is
-- the sum of negative ranks (the ranks of the differences where the second parameter is higher).
-- to the length of the shorter sample.
import Data.Function (on)
import Data.List (findIndex)
import Data.Ord (comparing)
import qualified Data.Vector.Unboxed as U
import Prelude hiding (sum)
import Statistics.Function (sortBy)
import Statistics.Sample.Internal (sum)
import Statistics.Test.Internal (rank, splitByTags)
import Statistics.Test.Types
import Statistics.Types -- (CL,pValue,getPValue)
import Statistics.Distribution
import Statistics.Distribution.Normal
-- | Calculate (n,T⁺,T⁻) values for both samples. Where /n/ is reduced
-- sample where equal pairs are removed.
wilcoxonMatchedPairSignedRank :: (Ord a, Num a, U.Unbox a) => U.Vector (a,a) -> (Int, Double, Double)
wilcoxonMatchedPairSignedRank ab
= (nRed, sum ranks1, negate (sum ranks2))
where
-- Positive and negative ranks
(ranks1, ranks2) = splitByTags
$ U.zip tags (rank ((==) `on` abs) diffs)
-- Sorted list of differences
diffsSorted = sortBy (comparing abs) -- Sort the differences by absolute difference
$ U.filter (/= 0) -- Remove equal elements
$ U.map (uncurry (-)) ab -- Work out differences
nRed = U.length diffsSorted
-- Sign tags and differences
(tags,diffs) = U.unzip
$ U.map (\x -> (x>0 , x)) -- Attach tags to distribution elements
$ diffsSorted
-- | The coefficients for x^0, x^1, x^2, etc, in the expression
-- \prod_{r=1}^s (1 + x^r). See the Mitic paper for details.
--
-- We can define:
-- f(1) = 1 + x
-- f(r) = (1 + x^r)*f(r-1)
-- = f(r-1) + x^r * f(r-1)
-- The effect of multiplying the equation by x^r is to shift
-- all the coefficients by r down the list.
--
-- This list will be processed lazily from the head.
coefficients :: Int -> [Integer]
coefficients 1 = [1, 1] -- 1 + x
coefficients r = let coeffs = coefficients (r-1)
(firstR, rest) = splitAt r coeffs
in firstR ++ add rest coeffs
where
add (x:xs) (y:ys) = x + y : add xs ys
add xs [] = xs
add [] ys = ys
-- This list will be processed lazily from the head.
summedCoefficients :: Int -> [Double]
summedCoefficients n
| n < 1 = error "Statistics.Test.WilcoxonT.summedCoefficients: nonpositive sample size"
| n > 1023 = error "Statistics.Test.WilcoxonT.summedCoefficients: sample is too large (see bug #18)"
| otherwise = map fromIntegral $ scanl1 (+) $ coefficients n
-- | Tests whether a given result from a Wilcoxon signed-rank matched-pairs test
-- is significant at the given level.
--
-- This function can perform a one-tailed or two-tailed test. If the first
-- parameter to this function is 'TwoTailed', the test is performed two-tailed to
-- check if the two samples differ significantly. If the first parameter is
-- 'OneTailed', the check is performed one-tailed to decide whether the first sample
-- (i.e. the first sample you passed to 'wilcoxonMatchedPairSignedRank') is
-- greater than the second sample (i.e. the second sample you passed to
-- 'wilcoxonMatchedPairSignedRank'). If you wish to perform a one-tailed test
-- in the opposite direction, you can either pass the parameters in a different
-- order to 'wilcoxonMatchedPairSignedRank', or simply swap the values in the resulting
-- pair before passing them to this function.
wilcoxonMatchedPairSignificant
:: PositionTest -- ^ How to compare two samples
-> PValue Double -- ^ The p-value at which to test (e.g. @mkPValue 0.05@)
-> (Int, Double, Double) -- ^ The (n,T⁺, T⁻) values from 'wilcoxonMatchedPairSignedRank'.
-> Maybe TestResult -- ^ Return 'Nothing' if the sample was too
-- small to make a decision.
wilcoxonMatchedPairSignificant test pVal (sampleSize, tPlus, tMinus) =
case test of
-- According to my nearest book (Understanding Research Methods and Statistics
-- by Gary W. Heiman, p590), to check that the first sample is bigger you must
-- use the absolute value of T- for a one-tailed check:
AGreater -> do crit <- wilcoxonMatchedPairCriticalValue sampleSize pVal
return $ significant $ abs tMinus <= fromIntegral crit
BGreater -> do crit <- wilcoxonMatchedPairCriticalValue sampleSize pVal
return $ significant $ abs tPlus <= fromIntegral crit
-- Otherwise you must use the value of T+ and T- with the smallest absolute value:
--
-- Note that in absence of ties sum of |T+| and |T-| is constant
-- so by selecting minimal we are performing two-tailed test and
-- look and both tails of distribution of T.
SamplesDiffer -> do crit <- wilcoxonMatchedPairCriticalValue sampleSize (mkPValue $ p/2)
return $ significant $ t <= fromIntegral crit
where
t = min (abs tPlus) (abs tMinus)
p = pValue pVal
-- | Obtains the critical value of T to compare against, given a sample size
-- and a p-value (significance level). Your T value must be less than or
-- equal to the return of this function in order for the test to work out
-- significant. If there is a Nothing return, the sample size is too small to
-- make a decision.
--
-- 'wilcoxonSignificant' tests the return value of 'wilcoxonMatchedPairSignedRank'
-- for you, so you should use 'wilcoxonSignificant' for determining test results.
-- However, this function is useful, for example, for generating lookup tables
-- for Wilcoxon signed rank critical values.
--
-- The return values of this function are generated using the method
-- detailed in the Mitic's paper. According to that paper, the results
-- may differ from other published lookup tables, but (Mitic claims)
-- the values obtained by this function will be the correct ones.
wilcoxonMatchedPairCriticalValue ::
Int -- ^ The sample size
-> PValue Double -- ^ The p-value (e.g. @mkPValue 0.05@) for which you want the critical value.
-> Maybe Int -- ^ The critical value (of T), or Nothing if
-- the sample is too small to make a decision.
wilcoxonMatchedPairCriticalValue n pVal
| n < 100 =
case subtract 1 <$> findIndex (> m) (summedCoefficients n) of
Just k | k < 0 -> Nothing
| otherwise -> Just k
Nothing -> error "Statistics.Test.WilcoxonT.wilcoxonMatchedPairCriticalValue: impossible happened"
| otherwise =
case quantile (normalApprox n) p of
z | z < 0 -> Nothing
| otherwise -> Just (round z)
where
p = pValue pVal
m = (2 ** fromIntegral n) * p
-- | Works out the significance level (p-value) of a T value, given a sample
-- size and a T value from the Wilcoxon signed-rank matched-pairs test.
--
-- See the notes on 'wilcoxonCriticalValue' for how this is calculated.
wilcoxonMatchedPairSignificance
:: Int -- ^ The sample size
-> Double -- ^ The value of T for which you want the significance.
-> PValue Double -- ^ The significance (p-value).
wilcoxonMatchedPairSignificance n t
= mkPValue p
where
p | n < 100 = (summedCoefficients n !! floor t) / 2 ** fromIntegral n
| otherwise = cumulative (normalApprox n) t
-- | Normal approximation for Wilcoxon T statistics
normalApprox :: Int -> NormalDistribution
normalApprox ni
= normalDistr m s
where
m = n * (n + 1) / 4
s = sqrt $ (n * (n + 1) * (2*n + 1)) / 24
n = fromIntegral ni
-- | The Wilcoxon matched-pairs signed-rank test. The samples are
-- zipped together: if one is longer than the other, both are
-- truncated to the length of the shorter sample.
--
-- For one-tailed test it tests whether first sample is significantly
-- greater than the second. For two-tailed it checks whether they
-- significantly differ
--
-- Check 'wilcoxonMatchedPairSignedRank' and
-- 'wilcoxonMatchedPairSignificant' for additional information.
wilcoxonMatchedPairTest
:: (Ord a, Num a, U.Unbox a)
=> PositionTest -- ^ Perform one-tailed test.
-> U.Vector (a,a) -- ^ Sample of pairs
-> Test () -- ^ Return 'Nothing' if the sample was too
-- small to make a decision.
wilcoxonMatchedPairTest test pairs =
Test { testSignificance = pVal
, testStatistics = t
, testDistribution = ()
}
where
(n,tPlus,tMinus) = wilcoxonMatchedPairSignedRank pairs
(t,pVal) = case test of
AGreater -> (abs tMinus, wilcoxonMatchedPairSignificance n (abs tMinus))
BGreater -> (abs tPlus, wilcoxonMatchedPairSignificance n (abs tPlus ))
-- Since we take minimum of T+,T- we can't get more
-- that p=0.5 and can multiply it by 2 without risk
-- of error.
SamplesDiffer -> let t' = min (abs tMinus) (abs tPlus)
p = wilcoxonMatchedPairSignificance n t'
in (t', mkPValue $ min 1 $ 2 * pValue p)
-- $references
--
-- * \"Critical Values for the Wilcoxon Signed Rank Statistic\", Peter
-- Mitic, The Mathematica Journal, volume 6, issue 3, 1996
-- (<http://www.mathematica-journal.com/issue/v6i3/article/mitic/contents/63mitic.pdf>)
|
\section*{Time Series data \footnotesize\emph{Fabian Brix, Anton Ovchinnikov}}
The basis for the prediction part of this project are time series of price data. We acquired these for different stages of pricing in the supply chain and at different time granularities from different Indian government ministries. The data had to be downloaded and processed into usable formats from the websites of these ministries. The different sources consist of the Ministry of Agriculture and the Ministry of Trade and their various departments. In terms of different types of prices we differentiate between wholesale market prices and retail prices. We subsequently structure our documentation of data sources and the data processing according to these two types.
\subsection*{Wholesale prices}
\subsubsection*{Wholesale price index}
An index that measures and tracks the changes in price of goods in the stages before the retail level. Wholesale price indexes (WPIs) report monthly to show the average price changes of goods sold in bulk, and they are a group of the indicators that follow growth in the economy.\par
%(taken from investopedia.com)\\
\subsubsection*{Data sources}
We came across a \href{http://agmarknet.nic.in/}{website} maintained by the Indian Ministry of Agriculture that tracked \emph{daily} wholesale prices for units of 100 kilograms for a huge number of market places across the country. The data is recorded back to 2005 and is supplemented by stock arrival numbers.\par
Due to large amount of data available, as well as time and resources needed to pull it, we constrained ourselves to choosing several major products (Onion, Rice, Wheat, Apple, Banana, Coriander, Potato, Paddy(Dhan), Tomato), where a Python script was used to download all available data for the selected products directly from the website, using simple HTTP GET requests. Raw HTML data was then converted to CSV format with predefined structure and saved for later processing.
\subsection*{Retail prices}
\subsubsection*{Data sources}
\begin{itemize}
\item[1.] Daily retail prices
Daily retail prices were found at the \href{http://fcainfoweb.nic.in/}{website}, created by Department of Consumer Affairs of India. One can choose the type of report (price report, variation report, average/month end report) and the desired date, and then receive the requested data in HTML format. This website was used to query price reports from 2009 to 2014, for 59 cities and 22 products. Similar to the wholesale daily prices, raw HTML data was converted to CSV files in normalized format, which was intended to facilitate further processing.
\item[2.] Weekly retail prices
The \href{http://rpms.dacnet.nic.in/}{Retail Price Information System} of the Indian Ministry of Agriculture was queried to obtain weekly retail prices.\par
Unlike daily prices, the only available data format that was capable of being parsed, was Microsoft Excel .xls format. So all the required .xls files were downloaded using a Python script, and then 'xls2csv' was used to convert .xls documents to CSV data format. For some reason, the majority of product names were missing in the downloaded .xls files, so a basic heuristic, which involved the order of product names, was used to reconstruct the data.
The data obtained described information about prices for a large number of markets, around 60 products and hundreds of subproducts, dating back to 2005, but, unfortunately, the data was far from complete, especially for 2005-2007 time frame.
\end{itemize}
\subsection*{Price sequences \footnotesize\emph{Ching-Chia Wang}}
We used Pandas, a powerful and fast Python data analysis library, to load the csv files of datasets, to clean them up, and to organize them into usable formats. The prices of all products in these datasets are collected through a human reporting mechanism from local institutions to a central ministry. Due to the nature of this data collecting process, the qualities of these 3 datasets suffer from human neglects and mistakes. We thus need to conduct 2 phases of work prior to time series analysis and price prediction: data clean-up and dataset usability analysis.
\subsubsection*{Price Data Clean-up}
In data clean-up, we fixed several defects in the original datasets of the data sources. Each series originally has different portion of missing dates, and some have multiple data points of the same date with different values. The first stage of clean-up was to make each series align to a unique sequence of dates with constant date frequency by inserting NaNs to the missing and duplicated dates.
Next, we discovered that there are many outliers with extreme values in the series. For example, a price of 10 rupee today jumps to 100 tomorrow, and then goes back to 10 the day after. Such cases are more likely caused by human mistakes in reporting prices to the Indian ministries, so we used a heuristic to remove these suspicious spikes in the series. By taking the daily differences in percentage of a series, we were able to remove data points of the dates which perform more than 100\% price change in one time step (daily or weekly).
Finally, we patched the missing parts of the series with common methods such as linear or cubic spline interpolation.
\begin{figure}[!ht]
\centering
\begin{subfigure}[b]{.45\textwidth}
\centering
\includegraphics[width=\textwidth]{./img/before.png}
\caption{Sample price series after date alignment during data cleaning}
\label{subfig:sc1}
\end{subfigure}
\quad
\begin{subfigure}[b]{.45\textwidth}
\centering
\includegraphics[width=\textwidth]{./img/after.png}
\caption{Sample price series after spikes-removing and interpolation}
\label{subfig:sc2}
\end{subfigure}
\caption{Regression with ARMA error shock}
\label{fig:series_cleaning}
\end{figure}
\subsubsection*{Price Dataset Usability Analysis}
The dataset usability analysis phase was intended to explore the data to find usable materials for analysis and prediction in later stages. We first filtered out all the series with less than 60\% of non-missing data, then computed some indicators of the data availability of each product in each region. Unfortunately, we have found out that all the datasets are far from satisfying.
The data availability in the daily wholesale dataset varies tremendously among different products and regions. Although there are about 15 regions, 7 candidate products, and more than 10 sub-categories for each product after filtering, most of the combinations of region, product, sub-category do not have enough available data for analysis and prediction. Fortunately, we did find out a few very good series which have more than 80\%~90\% of valid data and also perform the desired traits of high volatility and rising trend across time. These become good candidates for later analysis and prediction.
On the other hand, the daily retail dataset appears to have consistent data availability among products and regions, but in fact all products have only about 60\% of valid data in each region. Moreover, after examining each series via plots, we discovered that most of the daily retail series have weird behavior. For instance, the price of a series may have identical values for a long period and suddenly jump to another value, which makes the series look like levels of ladders. These series are not useful since such pattern is probably caused by dysfunctional price reporting process. Last, the weekly retail datatset has too many incorrect data points due to the heuristic of parsing excel files mentioned in the previous section. We thus concluded that the weekly retail dataset can be discarded with no loss.
Considering the limitations of the current price datasets, we have concluded the following potential usage of them: (1) Select a few representative series of each product with very good data quality. By using these individual series as a starting point of analysis and prediction, we can find out general characteristics and also the feasibility of predicting the prices of these highly volatile commodities. (2) Merge the series of the same product in each region to construct an extended dataset containing a uniform profile for each product in each region. In this way, we can gain a national overview of trends and price variations of different products.
\begin{figure}
\centering
\includegraphics[width=.7\textwidth]{./img/merged_west_bengal_products.png}
\caption{Merged series of foods in West Bengal from the wholesale daily dataset}
\end{figure}
\subsection*{Other sources \footnotesize\emph{Joseph Boyd}}
After some research we decided to include additional data that affects especially agricultural commodity prices such as the price of crude oil, climate data and the inflation in form of the Consumer Price Index (CPI). The monthly international price of crude oil was downloaded from a US government website. All other types of data were ``scraped'' from two online sources using HTML parsing library, `BeautifulSoup' for Python. The first of these sources was meteorological web site Tu Tiempo (http://www.tutiempo.net). Daily climate data (comprising temperature, humidity, perspiration etc.) for the last 16 years (1999-2014) was collected for over 100 locations in India. The script for this is get\_climate\_data.py. The second source was monthly inflation (CPI) data for India for the past 16 years from inflation.eu. The script for this purpose is get\_inflation\_data.py.
%exchange rate
%crude oil
\section*{Social Media data \footnotesize\emph{Joseph Boyd}}
For the other significant part of the project we decided to limit ourselves to a basic analysis of conversations on twitter. Originally we planned to also look into other platforms such as Facebook but quickly came to realize that the data publicly available through the API was of little use to us. Obtaining data from the twitter APIs turned out to be harder than expected and involved a number of heuristics. Twitter provides different APIs for different purposes. The two that are free are the Streaming API which allows real-time streaming of only 1\% of newly posted tweets. Since we wanted 'historical' tweets we had to find a workaround by using the REST API and getting tweets by user.\par
Twitter is a rich resource for studying cutting-edge trends on a global scale. Given a sufficiently large data collection effort, Twitter user discourse indicating changes in commodity prices may be obtained. This discourse supplies us with predictors The Humanitas project harvests huge amounts of user activity from this social media platform in order to capture the sparsely distributed activity pertaining to food prices. It is this aspect of the project which promotes it to the domain of `big data'.
\subsection*{Historical tweets}
\subsubsection*{Approach 1: Fetching "historical" tweets through Twitter API \footnotesize\emph{Joseph Boyd}}
Using the Twython package for python we are able to interface with the Twitter API. Our methodology (figure \ref{fig:methodology}) is to select the twitter accounts of a number of regional celebrities as starting points. These are likely to be `followed' by large numbers of local users. In a first phase (tweet\_collection.py.get\_followers()), from each of these sources we may extract a list of followers and filter by various characteristics. Once a substantial list has been constructed it must be merged (merge.py and remove\_intersection.py), we may proceed to download the tweet activity (up to the 3200 most recent tweets) of each of these users in a second phase (tweet\_collection.py.get\_tweets()).
Despite recent updates allowing developers greater access, Twitter still imposes troublesome constraints on the number of requests per unit time window (15 minutes) and, consequently, the data collection rate. It is therefore necessary to: 1) optimise the use of each request; and 2) parallelise the data collection effort.
As far as optimisation is concerned, the \textbf{GET statuses/user\_timeline} call may be called 300 times per 15 minute time window with up to 200 tweets returned per request. This sets a hard upper bound of 60000 tweets per time window. This is why the filtering stage of the first phase is so crucial. Using the \textbf{GET followers/list} call (30 calls/time window), we may discard in advance the majority of twitter users with low numbers of tweets (often zero), so as to avoid burning the limited user timeline requests on fruitless users, thus increasing the data collection rate. With this approach we may approach optimality and achieve 4-5 million tweets daily per process. However, it may be prudent to strike a balance between tweets per day and tweets per user. Therefore a nominal filter is currently set to 50 tweets minimum rather than 200. It is furthermore necessary to install dynamic time-tracking mechanisms within the source code so as to monitor the request rates and to impose a process `sleep' when required.
Parallelisation begins with obtaining N ($\approx 10$) sets of developer credentials from Twitter (https://dev.twitter.com/). These N credentials may then be used to launch N processes (get\_users.sh) collecting user data in parallel. Given the decision to divide the follower collection and tweet collection into separate phases (this may alternatively be done simultaneously), there is no need for distributed interaction between the processes to control overlap, as each process will simply take $1/N$ th of the follower list produced in phase 1 and process it accordingly. It should be relatively simple to initiate this parallel computation given the design of the scripts.
\begin{table}
\begin{center}
\begin{tabular}{ | c | c | c | c | c | }
\hline
Phase 1 \\ \hline
Users & Duration (s) & Sleep (s) & User Rate & Type \\ \hline
334 & 2795 & 2047 & - & Total \\ \hline
299 & 2700 & 2047 & 99.7 & Normalised (3 windows) \\ \hline
Phase 2 \\ \hline
Tweets (Users) & Duration (s) & Sleep (s) & Tweet Rate & Type \\ \hline
171990 (334) & 3108 & 922 & - & Total \\ \hline
150008 (309) & 2700 & 922 & 50002.7 & Normalised (3 windows) \\ \hline
\end{tabular}
\end{center}
\caption{Trial run results}
\label{table:benchmark}
\end{table}
A benchmarking test (table \ref{table:benchmark}) was performed in order to support configuration choices for the parallelisation. The test involved collecting the tweets from all good users within the first 20000 followers of @KareenaOnline, the account of a local celebrity. The following observations can be made:
\begin{itemize}
\item only 1.5-2\% of users are considered "good" under the current choice of filters (location, min. 50 tweets etc.);
\item Despite different levels of sleeping, phase 2 reads from users at roughly the same rate that phase 1 collects them (approximately 100 per time window in both cases);
\item Phase 2 produces around 50000 tweets per time window.
\end{itemize}
It is important to note however, that the rate of "good" users increases varies depending on the notoriety of the source account outside of India. To ensure good coverage for user collection, a wide variety of source users was chosen including rival politicians, musicians, sportspersons, film stars, journalists and entrepreneurs.
Tweet collection for Humanitas occurred in two main waves. In the first wave 180 000 users identifiers were collected. This amounted to 110 million tweets, collected over about three days, totalling 288GB of information (note a tweet response comprises the textual content as well as a substantial amount of meta data). In second wave of collection we encountered the effect of diminishing returns as many of the newly harvested users had already featured in the first wave. Despite a lengthier collection effort, only 110 000 new users were collected, leading to 70 million additional tweets and a grand total for the two waves of about 500GB of data. Future collection work for Humanitas would benefit from a more sophisticated approach to collecting users (phase 1), for example, by constructing a Twitter user graph.
\begin{figure}
\includegraphics[width=.7\textwidth]{./img/CollectionProcess.pdf}
\caption{Tweet collection methodology.}
\label{fig:methodology}
\end{figure}
\subsubsection*{Approach 2: Filtering tweets provided by webarchive.org \footnotesize\emph{Gabriel Grill}}
Before we were sure to get tweets from the twitter APIs we explored a set of archive files made available on \href{https://archive.org/details/twitterstream}{archive.org}. These archives were recorded via the twitter Streaming API and aggregated by month.
They contain contain 1\% of all tweets from 2011 to 2013 (not only India!). The collection of tweets was done via a python script which was executed on all 8 Azure nodes in parallel. Beforehand the respective archives were downloaded to the nodes with a download speed of approx. 20 MB/s. The storage was to our surprise no problem, because every Azure node gets 133 GB of temporal storage for disposal. About 550 GB of compressed tweets were processed and filtered in about 36 hours per node.
\newline
The applied filter got rid of tweets not containing at least one food commodity specific word (e.g. rice, curry, food, ...) and one predictive word (e.g. increase, decrease, price, ...). Also Retweets were filtered out, because sophisticated duplicate detection across 8 nodes can be costly and some exploration of the data showed that almost all duplicates were filtered out with this simple check. Since we want to predict food prices for certain regions, the location of the tweets is very important. We came up with a simple scheme to detect locations:
\begin{itemize}
\item \textbf{Geo-location:} Some tweet objects contain a field 'coords' which states the exact location coordinates the tweet was sent from.
\item \textbf{Mentioned regions:} In the associated tweet text regions can be mentioned, which can also give clues on the regions affected.
\item \textbf{Places:} When submitting a tweet an user can also specify a place associated with the tweet. This information can be extracted from the tweet objects.
\item \textbf{User location:} Most tweets objects also have an associated user object, which contains a user location sometimes. The textual information of this field is tokenized and compared with a list of known regions.
\end{itemize}
According to the categories mentioned above, the tweets were split up in several files. Since the \emph{Approach 1.} conveyed much more tweets, the archive sample was only used for data exploration and testing of the more refined tweet processing.
\subsubsection*{Daily tweet aggregator}
Our first idea was to build a continuously running process, which fetches the newest data from the twitter stream from India. But after applying a simple filter, we came to the conclusion that the data is to sparse for this approach, since the 'Twitter Streaming API' supplies only 1\% of all available tweets. Since the amount of twitter users in India is rapidly growing, this could be a promising approach for the future.
\subsubsection*{Clustering according to keywords}
Since relevant data was really sparse, we didn't expect much gain from any unsupervised learning techniques and decided to omit clustering. Instead we decided to manually explore a sample of the tweet data and create a list of indicator words used for detecting e.g. poverty, price in/decrease, sentiment and so on. Doing occurrence checks for all crafted words and storing the result, yields feature vectors for every tweet usable for prediction.
\subsubsection*{Issue of storage}
At first we believed that there wouldn't be enough space to store all tweets, but after setting up an azure node, we found that there is about 133 GB of temporal storage associated with it. We used this space to store the huge amount of tweets, but since the storage is temporal we lost tweets several times. This was due to configuration mistakes by us and Azure's 'healing'. When Azure detects anomalies, internal errors or just that it has to move images around, affected nodes are restarted, which results in a loss of tweets for us. Because of that, all filtered tweets were later stored on the main disk to avoid future losses.
\subsection*{Issue of localization}
It's was very important for us to detect the location of tweets, because we wanted to predict volatile food prices at regional granularity. Since Twitter is not widespread in India and localized tweets are rare, we had to come up with heuristics to deal with that.
\subsubsection*{Geolocalized tweets}
Filtering the available archives of tweets taken from the API yielded near to no geolocalized tweets from India matching our set of keywords. The reason is evident, because the twitter API only allows extraction of 1\% of tweets and only 2\% of tweets are actually geolocalized. In effect, getting tweets that match our keywords specific to food commodities is very unlikely. We had more luck with tweets from Indonesia, however as already explained we were unable to attain enough price sequences from Indonesia to actually train a model. Furthermore, the time constraints didn't allow us to get tweets from India and Indonesia in parallel in order to do some "stand-alone" clustering analysis.
\subsubsection*{Approximation: Mapping tweets to user location}
To get as many tweets as possible associated with a location we decided to use the locations of user accounts as a simple heuristic. We created a mapping between city and region names and used to to identify valid locations, which were then used during later processing.
\section*{Processing}
After all tweets were collected, we had to process and reformat their content for the neural networks (prediction) and the web visualisation.
\subsection*{Crafting indicators from tweets \footnotesize\emph{Gabriel Grill}}
To use the collected tweets for prediction in the neural network, aggregated indicators had to be extracted from the collection. The final result of this processing was then stored in \emph{csv} files.\\
Every word occurrence check (either for filtering or feature extraction) in tweet texts was done by iterating over the list of tokens generated from the text. Several NLP techniques were used to improve the word comparison. For every tweet a category/indicator counter was kept to keep track of the number of occurrence of certain predictive words. The processed tweets and their extracted features (indicator counts) were stored in a Cassandra cluster and later queried with Shark. The result of the shark queries was then converted to \emph{csv} files.
\subsubsection*{Sentiment analysis \footnotesize\emph{Anton Ovchinnikov}}
Sentiment analysis, or opinion mining, is the concept of using different computer science techniques (mainly machine learning and natural language processing) in order to extract sentiment information and subjective opinions from the data. In our context this may help us to find out how circumstances in relation to commodity prices affect the overall mood in the population.
From the start we decided that we did not want to build our own sentiment analysis system since the proper implementation, testing and evaluation would require a considerable effort compared with the total project workload. Instead, we are planning to use some of already developed solutions and tune them to our needs.
\newline \newline
Several sentiment analysis frameworks were tested, including:
\begin{itemize}
\item SentiStrength \par (http://sentistrength.wlv.ac.uk/)
\item Stanford CoreNLP \par (http://nlp.stanford.edu/sentiment/code.html)
\item 'Pattern' library from the University of Antwerp \par (http://www.clips.ua.ac.be/pages/pattern-en)
\end{itemize}
All of these software packages produced reasonable results on some short and simple sentences, but sentiment grades looked almost random on a set of real collected tweets. Most likely, factors such as misspelled words, acronyms, usage of slang and irony contributed to the overall ambiguity of sentiment grades assignment.
\newline \newline
Therefore, we decided to build and use our own simple system, which incorporated basic natural language processing and opinion mining techniques, but was mainly focused on extracting relevant keywords, which could help to estimate tweets from the specific point of view. This approach, which also takes into account issues originating from word misspelling, is described in next two paragraphs.
\subsubsection*{Extracting predictor categories \footnotesize\emph{Anton Ovchinnikov}}
First, several "predictor categories" were selected. These categories represent different aspects of price variation, and each category includes several sets of words of different polarities. For example, the category \emph{"price"} has two polarities: \emph{"high"} and \emph{"low"}.
\\
The following word list belongs to \emph{"high"} polarity:
\emph{'high', 'expensive', 'costly', 'pricey', 'overpriced'},
\\
and these are words from \emph{"low"} list:
\emph{'low', 'low-cost', 'cheap', 'low-budget', 'dirt-cheap', 'bargain'}.
\\ \\
Likewise, a category "supply" has "high" polarity:
\emph{'available', 'full', 'enough', 'sustain', 'access', 'convenient'},
\\
and "low":
\emph{'run-out', 'empty', 'depleted', 'rotting'}.
\\ \\ \\
The dictionary with a total of 6 categories (each having at least two polarity word lists) was built (\emph{"predict", "price", "sentiment", "poverty", "needs", "supply"}), let's call it $D$.
\\ \\
Then, for each tweet a feature vector is built, representing the amount of words from each category and polarity. Several cases have to be taken into account. First of all, a word may be not in its base form ("price" -> "prices", "increase" -> "increasing"), which will prevent an incoming word from matching one from $D$. Therefore, we use stemming technique to reduce each word to its stem (or root) form. Another problem is misspelled words ("increase" -> "incrase", "incraese"), and for tweets it happens more than usual due to widespread use of mobile devices with tiny keyboards. Our solution to this problem is covered in the next section.
Here is the overview of predictor category extraction algorithms we implemented:\\ \\
\textbf{Preprocessing}: For each relevant word in $D$ a stem is computed using the Lancaster stemming method, and the stem is added to reverse index $RI$, which maps a stem to a tuple: (category, polarity). \\
\textbf{function get\_category(w):}
\indent
\begin{algorithm}[H]
Compute a stem $s$ from $w$ \par
Check if $s$ is present in $RI$.
\eIf{yes}{
\textbf{return} the corresponding tuple.
}{
ask spell checker for a suggestion \par
is suggestion stem returned? \par
\eIf{yes}{
\textbf{return} the corresponding tuple from $RI$
}{
\textbf{return} None;
}
}
% \caption{Algo}
\end{algorithm}
\noindent \\
On a high level, every tweet is split into words, and then each word (token) is passed through 'get\_category' function. But here's another problem we face using this approach: each relevant word we encounter may have a negation word (particle) before it, which subverts the meaning: "increases" -> "doesn't increase", "have food" -> "have no food", etc. To deal with this problem, we employed the following method: we added a special 'negation' category with a list of negation words ("not", "haven't", "won't", etc.), and if there is a word with "negative" category before some relative word (to be more precise, within some constant distance from it, say 2), then we change the polarity of relative word's category. For example, if a word is from category "poverty" and has "high" polarity (like "starving"), then negative category word right before it (such as "aren't") will turn the polarity to "low".
\subsubsection*{Tweets spell checking \footnotesize\emph{Anton Ovchinnikov}}
People often do not pay much attention about the proper word spelling while communicating over the Internet and using social networks, but misspelled words may introduce mistakes in processing pipeline and significantly reduce the amount of filtered tweets, since the relevant, but incorrectly written word might not be recognized by the algorithm.
Several spell checking libraries were checked (Aspell and Enchant to name a few), but their 'suggest' method lacked both performance (several seconds to generate a suggestion for thousands words, which is very slow) and flexibility (it's not possible to specify the number of generated suggestions, as well as a threshold, such as maximal edit distance between words). Therefore, we decided to use a simple approach which involved computing edit distances between a given word and words from predictor categories dictionary ($D$).
\\
For each given word $w$ we compute its stem $s$ and then edit distance (also known as Levenshtein distance) to each word (stem) from $D$. It can be done really fast thanks to C extensions of \textit{python-levenshtein} module.
\\
After that, we choose the stem with minimal edit distance (using heap to store the correspondence between distances and words and to speed up selection of the minimal one), and check if the resulting number of "errors" (which is equal to distance) is excusable for the length of word $w$. For example, we don't allow errors for words of length 5 or less, only one error is allowed for lengths from 6 to 8, etc. If everything is alright, then the suggestion is returned, otherwise the word is discarded.
The approach proved to be fast and tweakable, and was successfully used for tweets processing.
\subsection*{Generation of time series \footnotesize\emph{Gabriel Grill}}
All these indicators are crafted into a single time series of the form \emph{'Product', 'date', 'region', 'indicator1', ... , 'indicatorN'} via Shark queries. These indicators represent the amount of tweets matching a certain predictive category (as mentioned previously) normalized by the total amount of tweets mentioning the product that day. If a tweet is part of multiple categories, it's not clear what the intention behind the tweet was. That's why, as heuristic, we divide the sum of mentions of each predictive category by the total amount of identified mentioned categories for that tweet. We have a script to generate queries for all products and a script to parse the output into \emph{csv} format. The figure \ref{fig:architecture} illustrates the whole processing pipeline and gives a good overview of the architecture at the same time as well.
\begin{figure}
\includegraphics[width=.7\textwidth]{./img/architecture.png}
\caption{Tweet processing pipeline.}
\label{fig:architecture}
\end{figure}
\subsection*{Infrastructure \& Architecture \footnotesize\emph{Gabriel Grill}}
Almost all downloading and processing of tweets was done in parallel on 8 Azure nodes. A script was written to upload public keys to the nodes for seamless access. The same script was used to execute various task on all or specific nodes. To handle the huge amounts of data and do efficient OLAP, we decided to use spark/shark. The data was first stored on each node separately on disk, then processed and afterwards inserted into a Cassandra DB cluster running on the nodes. The Apache Cassandra project is open source implementation of a NoSQL, column-oriented data base. It's known for being fault tolerant, which was very useful during various restarts, and processing many inserts fast. Since we had very limited memory on the non-temporal disk, we experienced 'Out of memory' errors, but because all data has been uploaded to the whole cluster, the scripts could continue running smoothly although some nodes were down. We also experienced node faults whenever the Azure node management 'healed' nodes.
Setting up the Cassandra cluster across multiple Azure subscriptions was tiring, since all used ports had to be opened on all machines manually via the Azure management web interface. Opening ports was regretfully not enough to get a fully functioning Spark cluster running, because Spark uses random ports for communication between running jobs. We tried setting up a VPN, but decided to quit after several hours due to time constraints and because it was not essential for the result that the Spark cluster had to be made up of all nodes. Luckily Shark on top of Spark with only one node was still fully functional and executed queries at reasonable speed. To connect Shark with Cassandra we used the 'Hive support for Cassandra' driver \emph{cash} by tuplejump. We experienced several problems (some undocumented) while installing (e.g. libraries missing, wrong paths, ...), but still managed to get it running. To improve speed between the nodes all VMs were created in the same region.
|
(*
chronos@localhost / $ sudo enter-chroot
Entering /mnt/stateful_partition/crouton/chroots/precise...
(precise)administrator@localhost:~$ coqtop
Welcome to Coq 8.3pl4 (April 2012)
Coq < Section Exercise_Nine.
Coq < Goal forall b f a g e k h:Prop, (((b \/ f) -> (a -> g)) /\ ((b \/ e) -> (g -> k)) /\ (b /\ ~h)) -> (a -> k).
1 subgoal
============================
forall b f a g e k h : Prop,
(b \/ f -> a -> g) /\ (b \/ e -> g -> k) /\ b /\ ~ h -> a -> k
Unnamed_thm < intros.
1 subgoal
b : Prop
f : Prop
a : Prop
g : Prop
e : Prop
k : Prop
h : Prop
H : (b \/ f -> a -> g) /\ (b \/ e -> g -> k) /\ b /\ ~ h
H0 : a
============================
k
Unnamed_thm < elim H.
1 subgoal
b : Prop
f : Prop
a : Prop
g : Prop
e : Prop
k : Prop
h : Prop
H : (b \/ f -> a -> g) /\ (b \/ e -> g -> k) /\ b /\ ~ h
H0 : a
============================
(b \/ f -> a -> g) -> (b \/ e -> g -> k) /\ b /\ ~ h -> k
Unnamed_thm < intro.
1 subgoal
b : Prop
f : Prop
a : Prop
g : Prop
e : Prop
k : Prop
h : Prop
H : (b \/ f -> a -> g) /\ (b \/ e -> g -> k) /\ b /\ ~ h
H0 : a
H1 : b \/ f -> a -> g
============================
(b \/ e -> g -> k) /\ b /\ ~ h -> k
Unnamed_thm < intro.
1 subgoal
b : Prop
f : Prop
a : Prop
g : Prop
e : Prop
k : Prop
h : Prop
H : (b \/ f -> a -> g) /\ (b \/ e -> g -> k) /\ b /\ ~ h
H0 : a
H1 : b \/ f -> a -> g
H2 : (b \/ e -> g -> k) /\ b /\ ~ h
============================
k
Unnamed_thm < firstorder.
Proof completed.
Unnamed_thm < Qed.
intros.
elim H.
intro.
intro.
firstorder .
Unnamed_thm is defined
Coq <
*)
Section Exercise_Nine.
Goal forall b f a g e k h:Prop, (((b \/ f) -> (a -> g)) /\ ((b \/ e) -> (g -> k)) /\ (b /\ ~h)) -> (a -> k).
intros.
elim H.
intro.
intro.
firstorder.
Qed.
|
# =================================#
# Dictionaries store mappings #
# key => value pairs #
# key is indexed and unique #
# =================================#
d = Dict()
# append elements
d['a']=1; d['b']=4; d['c']=8
println("d = ",d)
# Create a dictionary using a literal
numbers = Dict( "one" => 1,
"two" => 2,
"three" => 3)
println("numbers = ", numbers)
# Access elements
println("first =", numbers["one"])
println("last =", numbers["three"]) |
(* Title: Jive Data and Store Model
Author: Norbert Schirmer <schirmer at informatik.tu-muenchen.de> and
Nicole Rauch <rauch at informatik.uni-kl.de>, 2005
Maintainer: Nicole Rauch <rauch at informatik.uni-kl.de>
License: LGPL
*)
section \<open>Program-Independent Lemmas on Attributes\<close>
theory AttributesIndep
imports "../Isa_Counter_Store/Attributes"
begin
text \<open>The following lemmas validate the functions defined in the Attributes theory.
They also aid in subsequent proving tasks. Since they are
program-independent, it is of no use to add them to the generation process of
Attributes.thy. Therefore, they have been extracted to this theory.
\<close>
lemma cls_catt [simp]:
"CClassT c \<le> dtype f \<Longrightarrow> cls (catt c f) = c"
apply (case_tac c)
apply (case_tac [!] f)
apply simp_all
\<comment> \<open>solves all goals where @{text "CClassT c \<le> dtype f"}\<close>
apply (fastforce elim: subtype_wrong_elims simp add: subtype_defs)+
\<comment> \<open>solves all the rest where @{text "\<not> CClassT c \<le> dtype f"} can be derived\<close>
done
lemma att_catt [simp]:
"CClassT c \<le> dtype f \<Longrightarrow> att (catt c f) = f"
apply (case_tac c)
apply (case_tac [!] f)
apply simp_all
\<comment> \<open>solves all goals where @{text "CClassT c \<le> dtype f"}\<close>
apply (fastforce elim: subtype_wrong_elims simp add: subtype_defs)+
\<comment> \<open>solves all the rest where @{text "\<not> CClassT c \<le> dtype f"} can be
derived\<close>
done
text \<open>The following lemmas are just a demonstration of simplification.\<close>
lemma rtype_att_catt:
"CClassT c \<le> dtype f \<Longrightarrow> rtype (att (catt c f)) = rtype f"
by simp
lemma widen_cls_dtype_att [simp,intro]:
"(CClassT (cls cf) \<le> dtype (att cf)) "
by (cases cf, simp_all)
end
|
\documentclass{article}
\usepackage{fullpage}
\usepackage{nopageno}
\usepackage{amsmath}
\usepackage{MnSymbol}
\allowdisplaybreaks
\newcommand{\abs}[1]{\left\lvert #1 \right\rvert}
\newcommand{\degree}{\ensuremath{^\circ}}
\begin{document}
Jon Allen
January 29, 2014
\section*{1}
For each of the four subsets of the two properties (a) and (b), count the number of four-digit numbers whose digits are either 1,2,3,4, or 5:
(a) The digits are distinct. (b) The number is even.
\subsection*{$\emptyset$}
By the multiplication principle there are $5^4$ four-digit numbers, or 625.
\subsection*{a}
Every choice we make reduces the number of choices for the next choice by one. So if we have $n$ options, and have to choose $r$ times then by the multiplication principle we have $n\cdot(n-1)\cdot...\cdot(n-r+1)=\frac{n!}{(n-r)!}$ and then our answer is $\frac{5!}{1!}=5!=120$
\subsection*{b}
Another way of saying this is how many ways can you make an ordered pair of three-digit numbers and a number $\in\{2,4\}$? By the multiplication principle $5^3\cdot2=250$
\subsection*{a and b}
So for our units digit we have 2 or 4 for choices. For our thousands we have 4 choices left, then 3 for our hundreds, 2 for our tens. So by the multiplication principle $2\cdot4\cdot3\cdot2=48$
\section*{2}
We can order the cards of each suite in $(52/4)!$ ways. We can order the suites in $4!$ ways. So we can order a deck with all the cards of the same suite together in $13!4!=149448499200$ ways.
\section*{11}
There are ${20 \choose 3}=\frac{20!}{3!17!}$ different 3 element subsets of $\{1,...,20\}$. There are 18 subsets that contain 3 consecutive numbers. There are 17 subsets that contain 1 and 2 but not 3, and the same for 19 and 20 but not 18. There are $19-2$ pairs of consecutive digits left and each of these pairs has 16 choices for the last element of the set. So subtracting all these from the total we have $\frac{20!}{17!3!}-18-2\cdot17-17\cdot16=816$
\section*{17}
Since the number of rooks is the same as the number of rows and columns, we can just say that each column gets a rook and only worry about counting the different ways we can put them into the rows. Since each time you place a rook you remove an available row. So we have $6!=720$ ways of placing the rooks. Now we color them. Say the first rook is red, then any of the remaining 5 can also be red. If the second rook is red we have 4 that can be red left. And so on giving us $720\cdot(5+4+3+2+1)=15\cdot720=10800$ ways to place the colored rooks.
\section*{18}
Let us choose 6 columns and 6 rows from the 8 where the rooks will be placed. That gives us ${8 \choose 6}^2=(\frac{8!}{2!6!})^2=28^2=784$ row and column combinations to put the rooks down. We already know from the previous problem that there are 10800 ways to put these rooks down into these 784 different rows and columns so we have $784*10800=8467200$ ways to place these rooks.
\section*{20}
Fixing 0 in place there are $9!$ circular permutations of the set. If we then also fix the 9 in place we have $8!$ permutations. So the number of circular permutations where they are not opposite is $9!-8!=322560$.
\section*{26}
Pretty straighforward application of Theorem 2.4.3. $\frac{(mn)!}{m\cdot n!}$ if the teams have names. $\frac{(mn)!}{m\cdot m!n!}$ if the teams are not named.
\section*{29}
We observe that the size of $S$ is $n_1+n_2+\cdots+n_k=n_1+n=n+1$. Applying Theorem 2.4.2 we see that the number of linear permutations of $S$ is $\frac{(n+1)!}{n_1!n_2!\cdots n_k!}$. Now let's partition the linear permutations into parts so that two linear permutations of $S$ correspond to the same circular permutation of $S$ if and only if they are in the same part. Then the number of parts will be the number of circular permutations. We also notice that each part will contain $n+1$ linear permutations. So the number of permutations is:
\[\frac{\frac{(n+1)!}{n_1!n_2!\cdots n_k!}}{n+1}=\frac{(n+1)n!}{(n+1)\cdot 1\cdot n_2!\cdots n_k!}=\frac{n!}{n_1!\cdots n_k!}\]
And we have our result.$\Box$
\section*{31}
First we have a 3-permutation of the winners. Each of these permutations can be paired with any of the 3 combinations of the remaining 12 teams. Our answer then is $P(15,3){12 \choose 3}=\frac{15!}{12!}\cdot\frac{12!}{3!9!}=\frac{15!}{3!9!}=600600$
\section*{36}
For each type $i$ we can have $n_i+1$ different repetition numbers in the submultiset. We add one to account for not having any of type $i$ in the submultiset. By the multiplication principal then the number of submultisets is $(n_1+1)(n_2+1)\dots(n_k+1)$.
\section*{38}
\begin{align*}
30&=x_1+x_2+x_3+x_4\\
y_1&=x_1-2, y_2=x_2, y_3=x_3+5, y_4=x_4-8\\
25&=x_1+x_2+x_3+x_4\\
\end{align*}
And by Theorem 2.5.1 the number of solutions are ${25+4-1 \choose 25}=\frac{28!}{25!3!}=3276$
\section*{45}
\subsection*{a}
Think of this as a 20 combination of a set with 5 types and infinite repetition. So by theorem 2.5.1 we have ${20+5-1 \choose 20}=\frac{24!}{20!4!}=10626$
\subsection*{b}
Lets imagine we assign a number to each book, according to the shelf it goes on. Then the number of ways we can put the books on each shelf is the number of permutations of length 20 of the numbers 1-5 which is $5^{20}=95367431640625$
\subsection*{c}
We can order the books in $21!$ different ways. Lets imagine the books are stacked up and we need to slot the five shelves into the 21 spaces between the books and at the ends of the books. Since the books are all on shelves we know that the fifth shelf is at the 21st slot. We also can have empty shelves so any shelf can occupy the same slot as any other shelf. So we need to find how many 4 combinations of 21 items with repetition or ${4+21-1 \choose 4}$ and by the multiplication principle we have $21!\frac{24!}{4!20!}=542892351516584509440000$
\section*{50}
What we really need to do is figure out how many ways we can place 2 non-attacking rooks on the board. This will constrain the next two rooks to only one possibility. Then pick any one of the last 60 spots for the fifth rook. So for the first two rooks placement we have ${8 \choose 2}$ ways of choosing both the rows and the columns and then we have $2!$ ways of placing the rooks into the rows and columns. But then we will have duplicates after placing the next two rooks, so eliminate duplicate by only placing the first two in $2!/2=1$ ways. And putting it all together with the multiplication principal we have ${8 \choose 2}^2\cdot60=1680$
\section*{55}
\subsection*{a}
TRISKAIDEKAPHOBIA rearranged to make counting easier AAABDEHIIIKKOPRST. And just copy the MISSISSIPPI example to get $\frac{17!}{3!1!1!1!1!3!2!1!1!1!1!1!}=\frac{17!}{3!3!2!}=4940103168000$
\subsection*{b}
FLOCCINAUCINIHILIPILIFICATION rearranged to make counting easier AACCCCFFHIIIIIIIIILLLNNNOOPTU and $\frac{29!}{2!4!2!9!3!3!2!}=3525105002372553600000$
\subsection*{c}
PNEUMONOULTRAMICROSCOPICSILICOVOLCANOCONIOSIS rearranged to make counting easier AACCCCCCEIIIIIILLLMMNNNNOOOOOOOOOPPRRSSSSTUUV and $\frac{45!}{2!6!1!6!3!2!4!9!2!2!4!1!2!1!}=\frac{45!}{2!^56!^23!4!^29!}=5749897770076560698733077346243840000000$
\subsection*{d}
DERMATOGLYPHICS rearranged to make counting easier ACDEGHIOLMPRSTY and there are no duplicate letters so it's just the number of permutations of 15 digits which is $15!=1307674368000$
\end{document}
|
[STATEMENT]
lemma hermitean_eye [simp]:
shows "hermitean eye"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. hermitean eye
[PROOF STEP]
by (auto simp add: hermitean_def mat_adj_def mat_cnj_def) |
Our deepest sympathy is extended to the family of Evione Wright, who recently passed away.
She was the daughter of the late Otha and Willie Martin.
She was the mother of several children and had 18 grandchildren and 24 great-grandchildren.
Her service was held last Saturday at Evans Mortuary. The Rev. Alvin Douglass officiated.
Our sincere sympathy to the family and friends in the passing of Marlvin H. Bolton, who passed away last week.
He was born in Atlanta to the late Rev. James and Emma Bolton. |
import mynat.definition
import mynat.add
namespace mynat
theorem add_right_cancel (a t b : mynat) : a + t = b + t → a = b :=
begin [nat_num_game]
induction t with n hd,
{
rw add_zero,
rw add_zero,
intro h,
exact h,
},
{
rw add_succ,
rw add_succ,
intro h,
apply hd,
apply succ_inj,
exact h,
},
end
end mynat |
This year, B&Y took home three gold awards at the oldest, largest and most widely respected healthcare advertising awards competition. Our big winners included our Parkview “GO: Make a Difference” campaign, our PPG “More Than A Doctor” campaign and our Parkview “GO: Laugh” outdoor.
We also snagged four additional awards for other work for Parkview Health. All winning pieces were chosen based on creativity, quality, message effectiveness, consumer appeal, graphic design and overall impact.
Congrats, Parkview! And thank you to all of our clients and partners for trusting B&Y. |
[STATEMENT]
theorem invpst_mkNode[simp]: "invpst (mkNode c l a r) \<longleftrightarrow> invpst l \<and> invpst r"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. invpst (mkNode c l a r) = (invpst l \<and> invpst r)
[PROOF STEP]
apply (cases l rule: invpst.cases;
cases r rule: invpst.cases;
simp add: mkNode_def min_kp_def)
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. \<And>la x uu_ mkp ra. \<lbrakk>l = \<langle>\<rangle>; r = \<langle>la, (x, uu_, mkp), ra\<rangle>\<rbrakk> \<Longrightarrow> (invpst la \<and> invpst ra \<and> is_min2 mkp (insert x (Tree2.set_tree la \<union> Tree2.set_tree ra)) \<and> is_min2 (min2 a mkp) (insert a (insert x (Tree2.set_tree la \<union> Tree2.set_tree ra)))) = (invpst la \<and> invpst ra \<and> is_min2 mkp (insert x (Tree2.set_tree la \<union> Tree2.set_tree ra)))
2. \<And>la x uu_ mkp ra. \<lbrakk>l = \<langle>la, (x, uu_, mkp), ra\<rangle>; r = \<langle>\<rangle>\<rbrakk> \<Longrightarrow> (invpst la \<and> invpst ra \<and> is_min2 mkp (insert x (Tree2.set_tree la \<union> Tree2.set_tree ra)) \<and> is_min2 (min2 a mkp) (insert x (insert a (Tree2.set_tree la \<union> Tree2.set_tree ra)))) = (invpst la \<and> invpst ra \<and> is_min2 mkp (insert x (Tree2.set_tree la \<union> Tree2.set_tree ra)))
3. \<And>la x uu_ mkp ra laa xa uua_ mkpa raa. \<lbrakk>l = \<langle>la, (x, uu_, mkp), ra\<rangle>; r = \<langle>laa, (xa, uua_, mkpa), raa\<rangle>\<rbrakk> \<Longrightarrow> (invpst la \<and> invpst ra \<and> is_min2 mkp (insert x (Tree2.set_tree la \<union> Tree2.set_tree ra)) \<and> invpst laa \<and> invpst raa \<and> is_min2 mkpa (insert xa (Tree2.set_tree laa \<union> Tree2.set_tree raa)) \<and> is_min2 (min2 a (min2 mkp mkpa)) (insert x (insert a (insert xa (Tree2.set_tree la \<union> (Tree2.set_tree ra \<union> (Tree2.set_tree laa \<union> Tree2.set_tree raa))))))) = (invpst la \<and> invpst ra \<and> is_min2 mkp (insert x (Tree2.set_tree la \<union> Tree2.set_tree ra)) \<and> invpst laa \<and> invpst raa \<and> is_min2 mkpa (insert xa (Tree2.set_tree laa \<union> Tree2.set_tree raa)))
[PROOF STEP]
subgoal
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>l = \<langle>\<rangle>; r = \<langle>la_, (x_, uu_, mkp_), ra_\<rangle>\<rbrakk> \<Longrightarrow> (invpst la_ \<and> invpst ra_ \<and> is_min2 mkp_ (insert x_ (Tree2.set_tree la_ \<union> Tree2.set_tree ra_)) \<and> is_min2 (min2 a mkp_) (insert a (insert x_ (Tree2.set_tree la_ \<union> Tree2.set_tree ra_)))) = (invpst la_ \<and> invpst ra_ \<and> is_min2 mkp_ (insert x_ (Tree2.set_tree la_ \<union> Tree2.set_tree ra_)))
[PROOF STEP]
using is_min2_min2_insI
[PROOF STATE]
proof (prove)
using this:
is_min2 ?y ?ys \<Longrightarrow> is_min2 (min2 ?x ?y) (insert ?x ?ys)
goal (1 subgoal):
1. \<lbrakk>l = \<langle>\<rangle>; r = \<langle>la_, (x_, uu_, mkp_), ra_\<rangle>\<rbrakk> \<Longrightarrow> (invpst la_ \<and> invpst ra_ \<and> is_min2 mkp_ (insert x_ (Tree2.set_tree la_ \<union> Tree2.set_tree ra_)) \<and> is_min2 (min2 a mkp_) (insert a (insert x_ (Tree2.set_tree la_ \<union> Tree2.set_tree ra_)))) = (invpst la_ \<and> invpst ra_ \<and> is_min2 mkp_ (insert x_ (Tree2.set_tree la_ \<union> Tree2.set_tree ra_)))
[PROOF STEP]
by blast
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>la x uu_ mkp ra. \<lbrakk>l = \<langle>la, (x, uu_, mkp), ra\<rangle>; r = \<langle>\<rangle>\<rbrakk> \<Longrightarrow> (invpst la \<and> invpst ra \<and> is_min2 mkp (insert x (Tree2.set_tree la \<union> Tree2.set_tree ra)) \<and> is_min2 (min2 a mkp) (insert x (insert a (Tree2.set_tree la \<union> Tree2.set_tree ra)))) = (invpst la \<and> invpst ra \<and> is_min2 mkp (insert x (Tree2.set_tree la \<union> Tree2.set_tree ra)))
2. \<And>la x uu_ mkp ra laa xa uua_ mkpa raa. \<lbrakk>l = \<langle>la, (x, uu_, mkp), ra\<rangle>; r = \<langle>laa, (xa, uua_, mkpa), raa\<rangle>\<rbrakk> \<Longrightarrow> (invpst la \<and> invpst ra \<and> is_min2 mkp (insert x (Tree2.set_tree la \<union> Tree2.set_tree ra)) \<and> invpst laa \<and> invpst raa \<and> is_min2 mkpa (insert xa (Tree2.set_tree laa \<union> Tree2.set_tree raa)) \<and> is_min2 (min2 a (min2 mkp mkpa)) (insert x (insert a (insert xa (Tree2.set_tree la \<union> (Tree2.set_tree ra \<union> (Tree2.set_tree laa \<union> Tree2.set_tree raa))))))) = (invpst la \<and> invpst ra \<and> is_min2 mkp (insert x (Tree2.set_tree la \<union> Tree2.set_tree ra)) \<and> invpst laa \<and> invpst raa \<and> is_min2 mkpa (insert xa (Tree2.set_tree laa \<union> Tree2.set_tree raa)))
[PROOF STEP]
subgoal
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>l = \<langle>la_, (x_, uu_, mkp_), ra_\<rangle>; r = \<langle>\<rangle>\<rbrakk> \<Longrightarrow> (invpst la_ \<and> invpst ra_ \<and> is_min2 mkp_ (insert x_ (Tree2.set_tree la_ \<union> Tree2.set_tree ra_)) \<and> is_min2 (min2 a mkp_) (insert x_ (insert a (Tree2.set_tree la_ \<union> Tree2.set_tree ra_)))) = (invpst la_ \<and> invpst ra_ \<and> is_min2 mkp_ (insert x_ (Tree2.set_tree la_ \<union> Tree2.set_tree ra_)))
[PROOF STEP]
by (auto intro!: is_min2_min2_insI simp: insert_commute)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>la x uu_ mkp ra laa xa uua_ mkpa raa. \<lbrakk>l = \<langle>la, (x, uu_, mkp), ra\<rangle>; r = \<langle>laa, (xa, uua_, mkpa), raa\<rangle>\<rbrakk> \<Longrightarrow> (invpst la \<and> invpst ra \<and> is_min2 mkp (insert x (Tree2.set_tree la \<union> Tree2.set_tree ra)) \<and> invpst laa \<and> invpst raa \<and> is_min2 mkpa (insert xa (Tree2.set_tree laa \<union> Tree2.set_tree raa)) \<and> is_min2 (min2 a (min2 mkp mkpa)) (insert x (insert a (insert xa (Tree2.set_tree la \<union> (Tree2.set_tree ra \<union> (Tree2.set_tree laa \<union> Tree2.set_tree raa))))))) = (invpst la \<and> invpst ra \<and> is_min2 mkp (insert x (Tree2.set_tree la \<union> Tree2.set_tree ra)) \<and> invpst laa \<and> invpst raa \<and> is_min2 mkpa (insert xa (Tree2.set_tree laa \<union> Tree2.set_tree raa)))
[PROOF STEP]
subgoal
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>l = \<langle>la_, (x_, uu_, mkp_), ra_\<rangle>; r = \<langle>laa_, (xa_, uua_, mkpa_), raa_\<rangle>\<rbrakk> \<Longrightarrow> (invpst la_ \<and> invpst ra_ \<and> is_min2 mkp_ (insert x_ (Tree2.set_tree la_ \<union> Tree2.set_tree ra_)) \<and> invpst laa_ \<and> invpst raa_ \<and> is_min2 mkpa_ (insert xa_ (Tree2.set_tree laa_ \<union> Tree2.set_tree raa_)) \<and> is_min2 (min2 a (min2 mkp_ mkpa_)) (insert x_ (insert a (insert xa_ (Tree2.set_tree la_ \<union> (Tree2.set_tree ra_ \<union> (Tree2.set_tree laa_ \<union> Tree2.set_tree raa_))))))) = (invpst la_ \<and> invpst ra_ \<and> is_min2 mkp_ (insert x_ (Tree2.set_tree la_ \<union> Tree2.set_tree ra_)) \<and> invpst laa_ \<and> invpst raa_ \<and> is_min2 mkpa_ (insert xa_ (Tree2.set_tree laa_ \<union> Tree2.set_tree raa_)))
[PROOF STEP]
by (smt Un_insert_left Un_insert_right is_min2_mergeI is_min2_min2_insI
sup_assoc)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done |
function rgb_display ( rgb, header )
%*****************************************************************************80
%
%% RGB_DISPLAY displays an RGB dataset.
%
% Licensing:
%
% This code is distributed under the GNU LGPL license.
%
% Modified:
%
% 09 August 2013
%
% Author:
%
% John Burkardt
%
% Reference:
%
% Nick Berry,
% A "Practical" Use for Genetic Programming,
% http://www.datagenetics.com/blog.html
%
% Parameters:
%
% Input, uint8 RGB(256,256,3), the RGB information.
%
% Input, string HEADER, an identifier used for the title and filename.
%
imshow ( rgb )
title ( header, 'FontSize', 16 )
filename = sprintf ( '%s.png', header );
print ( '-dpng', filename );
return
end
|
[STATEMENT]
lemma LIMSEQ_power_zero [tendsto_intros]: "norm x < 1 \<Longrightarrow> (\<lambda>n. x ^ n) \<longlonglongrightarrow> 0"
for x :: "'a::real_normed_algebra_1"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. norm x < 1 \<Longrightarrow> (^) x \<longlonglongrightarrow> (0::'a)
[PROOF STEP]
apply (drule LIMSEQ_realpow_zero [OF norm_ge_zero])
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (^) (norm x) \<longlonglongrightarrow> 0 \<Longrightarrow> (^) x \<longlonglongrightarrow> (0::'a)
[PROOF STEP]
by (simp add: Zfun_le norm_power_ineq tendsto_Zfun_iff) |
{-# LANGUAGE TemplateHaskell #-}
{-# LANGUAGE TupleSections #-}
{-# LANGUAGE ImplicitParams #-}
{-# LANGUAGE RankNTypes #-}
{-# LANGUAGE ScopedTypeVariables #-}
module Microbenchmark where
import Test.HUnit hiding (State)
import Test.Framework
import Test.Framework.Providers.HUnit
import System.Random
import Control.Monad.State
import Control.Monad
import Control.Lens
import GHC.Conc.Sync (setNumCapabilities)
import qualified Data.HashMap.Strict as HM
import qualified Data.HashSet as Set
import Data.List
import qualified Data.Vector as V
import Data.IORef
import Data.Time.Clock.POSIX
import Data.Aeson as AE
import Data.ByteString.Lazy as BS (writeFile)
import Kvservice_Types
import Kvstore.KVSTypes
import Requests
import TestSetup
import ServiceConfig
import Versions
import Options.Applicative
import Data.Semigroup ((<>))
import Statistics.Sample (mean)
import Debug.Trace
valueTemplate v = "value-" ++ show v
fieldTemplate f = "field-" ++ show f
keyTemplate k = "key-" ++ show k
tableTemplate t = "table-" ++ show t
data RangeGen = RangeGen Int Int StdGen
instance RandomGen RangeGen where
next (RangeGen lo hi g) = let (i,g') = randomR (lo,hi) g in (i, RangeGen lo hi g')
split (RangeGen lo hi g) = let (g1,g2) = split g in (RangeGen lo hi g1, RangeGen lo hi g2)
newtype LinearGen = LinearGen Int
instance RandomGen LinearGen where
next (LinearGen i) = (i, LinearGen $ i+1)
split (LinearGen i) = (LinearGen i, LinearGen i)
data BenchmarkState g = BenchmarkState {
_fieldCount :: Int,
_fieldSelection :: RangeGen,
_valueSizeGen :: RangeGen,
_tableCount :: Int,
_tableSelection :: RangeGen,
_keySelection :: g,
_operationSelection :: RangeGen,
_fieldCountSelection :: RangeGen,
_scanCountSelection :: RangeGen
}
makeLenses ''BenchmarkState
-- lens does not work with RankNTypes :(
createValue :: forall g. RandomGen g => StateT (BenchmarkState g) IO String
createValue = do
bmState <- get
let valSizeGenerator = view valueSizeGen bmState
(size,valSizeGenerator') = next valSizeGenerator
put $ over valueSizeGen (const valSizeGenerator') bmState
(return . concatMap valueTemplate . take size) [1,2..]
createRequest :: forall g. RandomGen g => StateT (BenchmarkState g) IO KVRequest
createRequest = do
bmState <- get
let opSelector = view operationSelection bmState
(op,opSelector') = next opSelector
bmState' = over operationSelection (const opSelector') bmState
tableSelector = view tableSelection bmState
(tableId,tableSelector') = next tableSelector
table = tableTemplate tableId
bmState'' = over tableSelection (const tableSelector') bmState'
keySelector = view keySelection bmState
(keyId,keySelector') = next keySelector
key = keyTemplate keyId
bmState''' = over keySelection (const keySelector') bmState''
put bmState'''
case op of
0 -> prepareINSERT table key <$> createINSERTEntry
1 -> prepareUPDATE table key <$> createUPDATEEntry
2 -> prepareREAD table key . Set.map fieldTemplate <$> getFields
3 -> prepareSCAN table key <$> getScanCount <*> (Set.map fieldTemplate <$> getFields)
4 -> return $ prepareDELETE table key
_ -> error $ "No such operation: " ++ show op
where
getFieldsAndValues :: forall g. RandomGen g => [Int] -> StateT (BenchmarkState g) IO (HM.HashMap String String)
getFieldsAndValues = fmap HM.fromList . mapM (\ i -> (,) <$> pure (fieldTemplate i) <*> createValue)
createINSERTEntry :: forall g. RandomGen g => StateT (BenchmarkState g) IO (HM.HashMap String String)
createINSERTEntry = getFieldsAndValues . flip take [1,2..] . view fieldCount =<< get
getFields :: forall g. RandomGen g => StateT (BenchmarkState g) IO (Set.HashSet Int)
getFields = do
s <- get
let fieldCountGen = view fieldCountSelection s
(fieldCount,fieldCountGen') = next fieldCountGen
s' = over fieldCountSelection (const fieldCountGen') s
fieldSel = view fieldSelection s'
(fieldSel', fields) = foldl (\ (sel,l) _ -> let (f,sel') = next sel in (sel',l ++ [f]) )
(fieldSel,[])
$ take fieldCount [1,2..]
s'' = over fieldSelection (const fieldSel') s'
put s''
return $ Set.fromList fields
createUPDATEEntry :: forall g. RandomGen g => StateT (BenchmarkState g) IO (HM.HashMap String String)
createUPDATEEntry = getFieldsAndValues . Set.toList =<< getFields
getScanCount = do
s <- get
let scanCountSel = view scanCountSelection s
(scanCount,scanCountSel') = next scanCountSel
put $ over scanCountSelection (const scanCountSel') s
return scanCount
workload :: forall g. RandomGen g => Int -> StateT (BenchmarkState g) IO (V.Vector KVRequest)
workload operationCount = V.fromList <$> mapM (const createRequest) [1..operationCount]
showState :: KVSState MockDB -> IO String
showState (KVSState cache dbRef _ _) = do
db <- readIORef dbRef
return $ "Cache:\n" ++ show cache ++ "\nDB:\n" ++ show db
loadDB keyCount = do
s <- initState
-- fill the db first
(requests,_) <- runStateT (workload keyCount) $ BenchmarkState
10 -- _fieldCount
(RangeGen 0 10 $ mkStdGen 0) -- _fieldSelection
(RangeGen 5 10 $ mkStdGen 0) -- _valueSizeGen
1 -- _tableCount
(RangeGen 1 1 $ mkStdGen 0) -- _tableSelection
(LinearGen 1) -- _keySelection
(RangeGen 0 0 $ mkStdGen 0) -- _operationSelection (INSERT only)
(RangeGen 3 10 $ mkStdGen 0) -- _fieldCountSelection
(RangeGen 5 10 $ mkStdGen 0) -- _scanCountSelection
-- traceM "requests (INSERT):"
-- mapM (\i -> traceM $ show i ++ "\n" ) requests
(responses, s') <- flip runStateT s $ ?execRequests requests
responses `seq` assertEqual "wrong number of responses" keyCount $ length responses
return s'
-- traceM "state after init:"
-- traceM =<< showState s'
-- traceM "done with insert."
reqBenchmarkState keyCount = BenchmarkState
10 -- _fieldCount
(RangeGen 0 10 $ mkStdGen 0) -- _fieldSelection
(RangeGen 5 10 $ mkStdGen 0) -- _valueSizeGen
1 -- _tableCount
(RangeGen 1 1 $ mkStdGen 0) -- _tableSelection
(RangeGen 1 keyCount $ mkStdGen 0) -- _keySelection
(RangeGen 1 3 $ mkStdGen 0) -- _operationSelection (no INSERT, no DELETE)
(RangeGen 3 10 $ mkStdGen 0) -- _fieldCountSelection
(RangeGen 5 10 $ mkStdGen 0) -- _scanCountSelection
currentTimeMillis = round . (* 1000) <$> getPOSIXTime
-- then run some requests
-- runRequests :: (?execRequests :: ExecReqFn)
-- => Int -> Int -> BenchmarkState RangeGen -> KVSState MockDB -> IO (KVSState MockDB, Integer)
runRequests operationCount keyCount bmState s = do
(requests,_) <- runStateT (workload operationCount) bmState
-- traceM $ "requests:"
-- mapM (\i -> traceM $ show i ++ "\n" ) requests
start <- currentTimeMillis
(responses, s') <- requests `seq` flip runStateT s $ ?execRequests requests
stop <- currentTimeMillis
let execTime = stop - start
-- traceM "???????????????????????????????????????"
-- traceM $ "responses:"
-- mapM (\i -> traceM $ show i ++ "\n" ) responses
responses `seq` assertEqual "wrong number of responses" operationCount $ length responses
return (s',execTime)
runSingleBatch :: (?execRequests :: ExecReqFn) => Int -> Int -> IO ()
runSingleBatch keyCount reqCount = do
(_, execTime) <- runRequests reqCount keyCount (reqBenchmarkState keyCount) =<< loadDB keyCount
traceM $ "exec time: " ++ show execTime
return ()
runMultipleBatches :: (?execRequests :: ExecReqFn) => Int -> Int -> Int -> IO Double
runMultipleBatches keyCount reqCount batchCount = do
s <- loadDB keyCount
let bmState = reqBenchmarkState keyCount
(_, execTimes) <- foldM (\ (st,execTimes) _ -> do
(st',execTime) <- runRequests reqCount keyCount bmState st
return $ st' `seq` (st',execTimes ++ [execTime]))
(s,[])
$ take batchCount [1,2..]
let meanExecTime = mean $ V.fromList $ map fromIntegral execTimes
-- traceM $ "mean execution time: " ++ show meanExecTime ++ " ms"
return meanExecTime
runMultipleBatches_ :: (?execRequests :: ExecReqFn) => Int -> Int -> Int -> IO ()
runMultipleBatches_ keyCount reqCount batchCount = (\e -> traceM $ "mean execution time: " ++ show e ++ " ms") =<< runMultipleBatches keyCount reqCount batchCount
data ScalabilityResult = ScalabilityResult { version :: String
, results :: [Double] }
scalability name numThreads = do
results <- foldM (\res n -> do
_ <- setNumCapabilities n
r <- runMultipleBatches 2000 20 20
return $ res ++ [r])
[]
$ take numThreads [1,2..]
return (name,results)
buildSuite (BenchmarkConfig maxThreadCount) =
[testCase "Done!" runAsSingleTest]
where
runAsSingleTest = do
results <- forM versions $ \(version, name) -> let ?execRequests = version in scalability name maxThreadCount
BS.writeFile "scalability.json" $ AE.encode $ HM.fromList results
data BenchmarkConfig = BenchmarkConfig { maxThreadCount :: Int }
benchmarkOptionsParser = BenchmarkConfig <$> (read <$> strOption
( short 't'
<> long "num-threads"
<> metavar "NUM_THREADS"
<> help "Maximum number of threads."
<> showDefault
<> value "8" ))
|
Rebol [
Title: "Callback demonstration for FFI extension: Quicksort"
File: %qsort.r
Description: {
qsort() is defined in the standard C library in <stdlib.h>. It can be
used to sort an array of arbitrary data by passing it a pointer to a
C function which serves as the comparison operator. Using only the
logical result of the comparison, the fixed-size elements of the
array can be put in order by the generic quicksort algorithm.
https://en.wikipedia.org/wiki/Qsort
An FFI "routine" can provide a bridge function for Rebol to invoke
qsort(), translating Rebol datatypes to corresponding values that can
be read by the C code. *But* the comparison operation needs to be
passed to qsort() as a C function pointer. Because while the C code
is running, it doesn't know how to invoke the Rebol interpreter to
run an ACTION! directly.
LibFFI provides a "closure" facility, which allows the dynamic
creation of an artificial C function pointer at runtime:
http://www.chiark.greenend.org.uk/doc/libffi-dev/html/The-Closure-API.html
The Rebol FFI interface uses this in WRAP-CALLBACK, which lets a
Rebol function be called from C, with particular expectations of the
C data types used to invoke it. The parameter language and supported
types used is the same as that in MAKE-ROUTINE.
}
Notes: {
The C language does not have strict typing rules for the arguments
to functions passed by pointer. This means when a function takes a
function pointer as an argument, there's not enough information in
that function's annotated specification to automatically "callbackify"
a Rebol ACTION! used as an argument.
While Rebol's routine spec language could try and remedy this by
forcing function pointer arguments to state precise typing, this
would be at odds with how the language may work. e.g. another one
of the function's parameters may dictate the choice of what type of
parameter the callback receives.
}
See-Also: {
"user natives", which embed a TCC compiler into the Rebol
executable. This provides an alternative for those who would prefer
to write their callbacks directly in C, yet still include that C
source in a Rebol module.
}
]
recycle/torture
f: function [
a [integer!]
b [integer!]
][
i: make struct! compose/deep [
[raw-memory: (a)]
i [int32]
]
j: make struct! compose/deep [
[raw-memory: (b)]
i [int32]
]
case [
i/i = j/i [0]
i/i < j/i [-1]
i/i > j/i [1]
]
]
; This test uses WRAP-CALLBACK and not MAKE-CALLBACK (which creates the
; function and wraps it in one step). This is in order to be easier to
; debug if something is going wrong.
;
cb: wrap-callback :f [
return: [int64]
a [pointer]
b [pointer]
]
libc: make library! %libc.so.6
x64?: 40 = fifth system/version
size_t: either x64? ['int64]['int32]
qsort: make-routine libc "qsort" compose/deep [
base [pointer]
nmemb [(size_t)]
size [(size_t)]
comp [pointer]
]
array: make vector! [integer! 32 5 [10 8 2 9 5]]
print ["before:" mold array]
probe (addr-of :cb)
qsort array 5 4 (addr-of :cb)
print ["after:" mold array]
assert [array = make vector! [integer! 32 5 [2 5 8 9 10]]]
close libc
|
module FinancialTimeseries.Statistics.Statistics where
import qualified Data.Vector as Vec
import Data.Vector (Vector)
import qualified Data.Vector.Algorithms.Merge as Merge
import qualified Statistics.Sample as Sample
import FinancialTimeseries.Type.Chart (Chart(..), LChart)
import FinancialTimeseries.Type.Labeled (Labeled)
import FinancialTimeseries.Type.Table (Table(..), Cell(..), Row, row)
data Quantiles a = Quantiles {
q05 :: Maybe a
, q25 :: Maybe a
, q50 :: Maybe a
, q75 :: Maybe a
, q95 :: Maybe a
} deriving (Show)
instance Row Quantiles where
row (Quantiles a b c d e) = map (CString . maybe "n/a" show) [a, b, c, d, e]
data Probabilities a = Probabilities {
p0'50 :: a
, p0'75 :: a
, p1'00 :: a
, p1'25 :: a
, p1'50 :: a
, p1'75 :: a
, p2'00 :: a
} deriving (Show)
instance Row Probabilities where
row (Probabilities a b c d e f g) = map Cell [a, b, c, d, e, f, g]
data TradeMoments a = TradeMoments {
trMaxYield :: a
, trMinYield :: a
, trMeanYield :: a
, trStdDevYield :: a
} deriving (Show)
instance Row TradeMoments where
row (TradeMoments a b c d) = map Cell [a, b, c, d]
data TimeseriesMoments a = TimeseriesMoments {
tsMaxYield :: a
, tsMinYield :: a
, tsMeanYield :: a
, tsStdDevYield :: a
} deriving (Show)
instance Row TimeseriesMoments where
row (TimeseriesMoments a b c d) = map Cell [a, b, c, d]
data Stats moments a = Stats {
sampleSize :: Int
, quantiles :: Quantiles a
, probabilities :: Probabilities a
, moments :: moments a
, cdf :: Vector (Double, a)
} deriving (Show)
tradeMoments ::
(Real a, Fractional a) =>
Vector a -> TradeMoments a
tradeMoments sorted =
let sortedFrac = Vec.map realToFrac sorted
in TradeMoments {
trMaxYield = Vec.last sorted
, trMinYield = Vec.head sorted
, trMeanYield = realToFrac $ exp (Sample.mean (Vec.map log sortedFrac))
, trStdDevYield = realToFrac $ exp (Sample.stdDev (Vec.map log sortedFrac))
}
timeseriesMoments ::
(Real a, Fractional a) =>
Vector a -> TimeseriesMoments a
timeseriesMoments sorted =
let sortedFrac = Vec.map realToFrac sorted
in TimeseriesMoments {
tsMaxYield = Vec.last sorted
, tsMinYield = Vec.head sorted
, tsMeanYield = realToFrac $ Sample.mean sortedFrac
, tsStdDevYield = realToFrac $ Sample.stdDev sortedFrac
}
stats2list ::
(Row moments, Show a) =>
[Labeled params (Stats moments a)] -> [Table params a]
stats2list xs =
let qheaders = map CString ["Q05", "Q25", "Q50", "Q75", "Q95", "Sample Size"]
pheaders = map CString ["P(X < 0.5)", "P(X < 0.75)", "P(X < 1.0)", "P(X < 1.25)", "P(X < 1.5)", "P(X < 1.75)", "P(X < 2.0)", "Sample Size"]
mheaders = map CString ["Max.", "Min.", "Mean", "StdDev.", "Sample Size"]
mkRow g x = row (g x) ++ [CInt (sampleSize x)]
in Table "Quantiles" qheaders (map (fmap (mkRow quantiles)) xs)
: Table "Moments" mheaders (map (fmap (mkRow moments)) xs)
: Table "Probabilities" pheaders (map (fmap (mkRow probabilities)) xs)
: []
stats2cdfChart :: [Labeled params (Stats moments a)] -> LChart params Double a
stats2cdfChart = Chart "CDF" . map (fmap ((:[]) . cdf))
statisticsWithMoments ::
(Ord a, Num a, Fractional a, Real a) =>
(Vector a -> moments a) -> Vector a -> Stats moments a
statisticsWithMoments mms vs =
let noe = fromIntegral (Vec.length vs)
sorted = Vec.modify Merge.sort vs
quart s = sorted Vec.!? (round (s * noe :: Double))
q = Quantiles {
q05 = quart 0.05
, q25 = quart 0.25
, q50 = quart 0.50
, q75 = quart 0.75
, q95 = quart 0.95
}
prob s = fromIntegral (Vec.length (Vec.takeWhile (<s) sorted)) / noe
p = Probabilities {
p0'50 = prob 0.5
, p0'75 = prob 0.75
, p1'00 = prob 1.0
, p1'25 = prob 1.25
, p1'50 = prob 1.5
, p1'75 = prob 1.75
, p2'00 = prob 2.00
}
in Stats {
sampleSize = Vec.length vs
, quantiles = q
, probabilities = p
, moments = mms sorted
, cdf = Vec.imap (\i x -> (fromIntegral i / noe, x)) sorted
}
timeseriesStatistics ::
(Real a, Fractional a) =>
Vector a -> Stats TimeseriesMoments a
timeseriesStatistics = statisticsWithMoments timeseriesMoments
tradeStatistics ::
(Real a, Fractional a) =>
Vector a -> Stats TradeMoments a
tradeStatistics = statisticsWithMoments tradeMoments
yield :: (Fractional a) => Vector a -> a
yield v = Vec.last v / Vec.head v
absoluteDrawdown :: (Ord a, Fractional a) => Vector a -> a
absoluteDrawdown v = Vec.minimum v / Vec.head v
relativeDrawdown :: (Ord a, Num a, Fractional a) => Vector a -> a
relativeDrawdown v = Vec.minimum (Vec.zipWith (/) v (Vec.postscanl max 0 v))
|
import hilbert.wr.dc_neg
import hilbert.wr.proofs.dc
namespace clfrags
namespace hilbert
namespace wr
namespace dc_neg
theorem dcn₁_dc {a b c d e f : Prop} (h₁ : dc e f (dc c d (dc b a (neg a))))
: dc e f (dc c d b) :=
have h₂ : dc (dc e f c) (dc e f d) (dc b a (neg a)), from dc.dc₆' h₁,
have h₃ : dc (dc e f c) (dc e f d) b, from dcn₁ h₂,
show dc e f (dc c d b), from dc.dc₇' h₃
theorem dcn₂_dc {a b c d e f : Prop} (h₁ : dc e f (dc c d b))
: dc e f (dc c d (dc b a (neg a))) :=
have h₂ : dc (dc e f c) (dc e f d) b, from dc.dc₆' h₁,
have h₃ : dc (dc e f c) (dc e f d) (dc b a (neg a)), from dcn₂ h₂,
show dc e f (dc c d (dc b a (neg a))), from dc.dc₇' h₃
theorem n₁ {a b : Prop} (h₁ : a) (h₂ : neg a) : b :=
have h₂ : dc a (neg a) b, from dc.dc₁ h₁ h₂,
have h₃ : dc (dc a (neg a) b) a b, from dc.dc₁ h₂ h₁,
have h₄ : dc a b (dc a (neg a) b), from dc.dc₅' (dc.dc₄' h₃),
have h₅ : dc a b (dc b a (neg a)), from dc.dc₄ (dc.dc₅ h₄),
have h₆ : dc a b b, from dcn₁ h₅,
show b, from dc.dc₂ h₆
theorem dcn₃ {a b : Prop} (h₁ : b) : dc b a (neg a) :=
have h₂ : dc a b b, from dc.dc₃ h₁,
have h₃ : dc a b (dc b a (neg a)), from dcn₂ h₂,
have h₄ : dc (dc a b b) (dc a b a) (neg a), from dc.dc₆' h₃,
have h₅ : dc (neg a) (dc a b b) (dc a b a), from dc.dc₄' (dc.dc₅' h₄),
have h₆ : dc (neg a) (dc a b b) (dc b a a), from dc.dc₅ (dc.dc₄ h₅),
have h₇ : dc (neg a) (dc a b b) a, from dc.dc₂_dc h₆,
have h₈ : dc (neg a) a (dc a b b), from dc.dc₅' h₇,
have h₉ : dc (neg a) a b, from dc.dc₂_dc h₈,
show dc b a (neg a), from dc.dc₅' (dc.dc₄' (dc.dc₅' h₉))
end dc_neg
end wr
end hilbert
end clfrags
|
= = = Black is OK ! = = =
|
@testset "Wall Constraint" begin
# Test WallConstraint
T = Float64
n = 4
P = 5
x = 4
y = 2
X = SVector{n,T}([13.0, 1.0, -12.0, 1.0])
x1 = SVector{P,T}([ 0.0, 0.0, 1.0, 3.0, -2.0])
y1 = SVector{P,T}([ 1.0, -1.0, 2.0, 2.0, 0.0])
x2 = SVector{P,T}([ 1.0, 1.0, 2.0, 2.0, 0.0])
y2 = SVector{P,T}([ 0.0, 0.0, 1.0, 1.0, 0.0])
xv = SVector{P,T}([ 1.0, 1.0, 1.0, 1.0, 0.0])./sqrt(2)
yv = SVector{P,T}([ 1.0, -1.0, 1.0, -1.0, sqrt(2)])./sqrt(2)
con = WallConstraint(n,x1,y1,x2,y2,xv,yv,x,y)
@test norm(TrajectoryOptimization.evaluate(con,X) - [sqrt(2)/2, 0.0, -sqrt(2)/2, 0.0, 0.0], 1) < 1e-10
@test (@ballocated TrajectoryOptimization.evaluate($con,$X)) == 0
function easy_jacobian(con,X)
function local_evaluate(X)
return TrajectoryOptimization.evaluate(con,X)
end
return ForwardDiff.jacobian(local_evaluate, X)
end
∇c_easy = easy_jacobian(con, X)
∇c = zeros(MMatrix{P,n,T,P*n})
jacobian!(∇c,con,X)
@test norm(∇c - ∇c_easy, 1) < 1e-10
@test (@ballocated jacobian!($∇c,$con,$X)) == 0
# Test Wall3DConstraint
T = Float64
n = 6
P = 5
x = 4
y = 2
z = 1
X =1
X0 = SVector{n,T}([0.0, 0.10, -12.0, 0.10, 12.0, 11.0])
X1 = SVector{n,T}([1.0, 0.55, -12.0, 0.55, 12.0, 11.0])
X2 = SVector{n,T}([1.0, 1.25, -12.0, 0.75, 12.0, 11.0])
x1 = SVector{P,T}([ 0.0, 0.0, 0.0, 0.0, 0.0])
y1 = SVector{P,T}([ 0.0, 0.0, 0.0, 0.0, 0.0])
z1 = SVector{P,T}([ 0.0, 0.0, 0.0, 0.0, 0.0])
x2 = SVector{P,T}([ 1.0, 1.0, 1.0, 1.0, 1.0])
y2 = SVector{P,T}([ 0.0, 0.0, 0.0, 0.0, 0.0])
z2 = SVector{P,T}([ 0.0, 0.0, 1.0, 0.0, 0.0])
x3 = SVector{P,T}([ 1.0, 1.0, 1.0, 0.0, 0.0])
y3 = SVector{P,T}([ 1.0, 1.0, 1.0, 1.0, 1.0])
z3 = SVector{P,T}([ 0.0, 1.0, 1.0, 0.0, 1.0])
xv = SVector{P,T}([ 0.0, 0.0, -1.0/sqrt(2), 0.0, 0.0])
yv = SVector{P,T}([ 0.0, -1.0/sqrt(2), 0.0, 0.0, -1.0/sqrt(2)])
zv = SVector{P,T}([ 1.0, 1.0/sqrt(2), 1.0/sqrt(2), 1.0, 1.0/sqrt(2)])
con = Wall3DConstraint(n,x1,y1,z1,x2,y2,z2,x3,y3,z3,xv,yv,zv,x,y,z)
@test norm(TrajectoryOptimization.evaluate(con,X0) - [0.0, -0.1/sqrt(2), -0.1/sqrt(2), 0.0, -0.1/sqrt(2)], 1) < 1e-10
@test norm(TrajectoryOptimization.evaluate(con,X1) - [1.0, 0.45/sqrt(2), 0.45/sqrt(2), 1.0, 0.45/sqrt(2)], 1) < 1e-10
@test norm(TrajectoryOptimization.evaluate(con,X2) - [0.0, 0.0, 0.0, 1.0, -0.25/sqrt(2)], 1) < 1e-10
@test (@ballocated TrajectoryOptimization.evaluate($con,$X0)) == 0
∇c_easy = easy_jacobian(con, X0)
∇c = zeros(MMatrix{P,n,T,P*n})
jacobian!(∇c,con,X0)
@test norm(∇c - ∇c_easy, 1) < 1e-10
@test (@ballocated jacobian!($∇c,$con,$X0)) == 0
end
|
theorem tst1 {α : Type} {p : Prop} (xs : List α) (h₁ : (a : α) → (as : List α) → xs = a :: as → p) (h₂ : xs = [] → p) : p :=
by match (generalizing := false) h:xs with
| [] => exact h₂ h
| z::zs => apply h₁ z zs; assumption
theorem tst1' {α : Type} {p : Prop} (xs : List α) (h₁ : (a : α) → (as : List α) → xs = a :: as → p) (h₂ : xs = [] → p) : p :=
by match xs with
| [] => exact h₂ rfl
| z::zs => exact h₁ z zs rfl
theorem tst2 {α : Type} {p : Prop} (xs : List α) (h₁ : (a : α) → (as : List α) → xs = a :: as → p) (h₂ : xs = [] → p) : p :=
by match (generalizing := false) h:xs with
| [] => ?nilCase
| z::zs => ?consCase;
case consCase => exact h₁ z zs h;
case nilCase => exact h₂ h
def tst3 {α β γ : Type} (h : α × β × γ) : β × α × γ :=
by {
match h with
| (a, b, c) => exact (b, a, c)
}
theorem tst4 {α : Type} {p : Prop} (xs : List α) (h₁ : (a : α) → (as : List α) → xs = a :: as → p) (h₂ : xs = [] → p) : p := by
match (generalizing := false) h:xs with
| [] => _
| z::zs => _
case match_2 => exact h₁ z zs h
exact h₂ h
theorem tst5 {p q r} (h : p ∨ q ∨ r) : r ∨ q ∨ p:= by
match h with
| Or.inl h => exact Or.inr (Or.inr h)
| Or.inr (Or.inl h) => ?c1
| Or.inr (Or.inr h) => ?c2
case c2 =>
apply Or.inl
assumption
case c1 =>
apply Or.inr
apply Or.inl
assumption
theorem tst6 {p q r} (h : p ∨ q ∨ r) : r ∨ q ∨ p:= by
match h with
| Or.inl h => exact Or.inr (Or.inr h)
| Or.inr (Or.inl h) => ?c1
| Or.inr (Or.inr h) =>
apply Or.inl
assumption
case c1 => apply Or.inr; apply Or.inl; assumption
theorem tst7 {p q r} (h : p ∨ q ∨ r) : r ∨ q ∨ p:=
by match h with
| Or.inl h =>
exact Or.inr (Or.inr h)
| Or.inr (Or.inl h) =>
apply Or.inr;
apply Or.inl;
assumption
| Or.inr (Or.inr h) =>
apply Or.inl;
assumption
inductive ListLast.{u} {α : Type u} : List α → Type u
| empty : ListLast []
| nonEmpty : (as : List α) → (a : α) → ListLast (as ++ [a])
axiom last {α} (xs : List α) : ListLast xs
axiom back {α} [Inhabited α] (xs : List α) : α
axiom popBack {α} : List α → List α
axiom backEq {α} [Inhabited α] : (xs : List α) → (x : α) → back (xs ++ [x]) = x
axiom popBackEq {α} : (xs : List α) → (x : α) → popBack (xs ++ [x]) = xs
theorem tst8 {α} [Inhabited α] (xs : List α) : xs ≠ [] → xs = popBack xs ++ [back xs] :=
match (generalizing := false) xs, h:last xs with
| _, ListLast.empty => fun h => absurd rfl h
| _, ListLast.nonEmpty ys y => fun _ => sorry
theorem tst9 {α} [Inhabited α] (xs : List α) : xs ≠ [] → xs = popBack xs ++ [back xs] := by
match (generalizing := false) xs, h:last xs with
| _, ListLast.empty => intro h; exact absurd rfl h
| _, ListLast.nonEmpty ys y => intro; rw [popBackEq, backEq]
theorem tst8' {α} [Inhabited α] (xs : List α) : xs ≠ [] → xs = popBack xs ++ [back xs] :=
match xs, last xs with
| _, ListLast.empty => fun h => absurd rfl h
| _, ListLast.nonEmpty ys y => fun _ => sorry
theorem tst8'' {α} [Inhabited α] (xs : List α) (h : xs ≠ []) : xs = popBack xs ++ [back xs] :=
match xs, last xs with
| _, ListLast.empty => absurd rfl h
| _, ListLast.nonEmpty ys y => sorry
|
/-
Copyright (c) 2015, 2017 Jeremy Avigad. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Metric spaces.
Authors: Jeremy Avigad, Robert Y. Lewis, Johannes Hölzl, Mario Carneiro, Sébastien Gouëzel
Many definitions and theorems expected on metric spaces are already introduced on uniform spaces and
topological spaces. For example:
open and closed sets, compactness, completeness, continuity and uniform continuity
-/
import data.real.nnreal topology.metric_space.emetric_space topology.algebra.ordered
open lattice set filter classical topological_space
noncomputable theory
local notation `𝓤` := uniformity
universes u v w
variables {α : Type u} {β : Type v} {γ : Type w}
/-- Construct a uniform structure from a distance function and metric space axioms -/
def uniform_space_of_dist
(dist : α → α → ℝ)
(dist_self : ∀ x : α, dist x x = 0)
(dist_comm : ∀ x y : α, dist x y = dist y x)
(dist_triangle : ∀ x y z : α, dist x z ≤ dist x y + dist y z) : uniform_space α :=
uniform_space.of_core {
uniformity := (⨅ ε>0, principal {p:α×α | dist p.1 p.2 < ε}),
refl := le_infi $ assume ε, le_infi $
by simp [set.subset_def, id_rel, dist_self, (>)] {contextual := tt},
comp := le_infi $ assume ε, le_infi $ assume h, lift'_le
(mem_infi_sets (ε / 2) $ mem_infi_sets (div_pos_of_pos_of_pos h two_pos) (subset.refl _)) $
have ∀ (a b c : α), dist a c < ε / 2 → dist c b < ε / 2 → dist a b < ε,
from assume a b c hac hcb,
calc dist a b ≤ dist a c + dist c b : dist_triangle _ _ _
... < ε / 2 + ε / 2 : add_lt_add hac hcb
... = ε : by rw [div_add_div_same, add_self_div_two],
by simpa [comp_rel],
symm := tendsto_infi.2 $ assume ε, tendsto_infi.2 $ assume h,
tendsto_infi' ε $ tendsto_infi' h $ tendsto_principal_principal.2 $ by simp [dist_comm] }
/-- The distance function (given an ambient metric space on `α`), which returns
a nonnegative real number `dist x y` given `x y : α`. -/
class has_dist (α : Type*) := (dist : α → α → ℝ)
export has_dist (dist)
/-- Metric space
Each metric space induces a canonical `uniform_space` and hence a canonical `topological_space`.
This is enforced in the type class definition, by extending the `uniform_space` structure. When
instantiating a `metric_space` structure, the uniformity fields are not necessary, they will be
filled in by default. In the same way, each metric space induces an emetric space structure.
It is included in the structure, but filled in by default.
When one instantiates a metric space structure, for instance a product structure,
this makes it possible to use a uniform structure and an edistance that are exactly
the ones for the uniform spaces product and the emetric spaces products, thereby
ensuring that everything in defeq in diamonds.-/
class metric_space (α : Type u) extends has_dist α : Type u :=
(dist_self : ∀ x : α, dist x x = 0)
(eq_of_dist_eq_zero : ∀ {x y : α}, dist x y = 0 → x = y)
(dist_comm : ∀ x y : α, dist x y = dist y x)
(dist_triangle : ∀ x y z : α, dist x z ≤ dist x y + dist y z)
(edist : α → α → ennreal := λx y, ennreal.of_real (dist x y))
(edist_dist : ∀ x y : α, edist x y = ennreal.of_real (dist x y) . control_laws_tac)
(to_uniform_space : uniform_space α := uniform_space_of_dist dist dist_self dist_comm dist_triangle)
(uniformity_dist : 𝓤 α = ⨅ ε>0, principal {p:α×α | dist p.1 p.2 < ε} . control_laws_tac)
variables [metric_space α]
instance metric_space.to_uniform_space' : uniform_space α :=
metric_space.to_uniform_space α
instance metric_space.to_has_edist : has_edist α := ⟨metric_space.edist⟩
@[simp] theorem dist_self (x : α) : dist x x = 0 := metric_space.dist_self x
theorem eq_of_dist_eq_zero {x y : α} : dist x y = 0 → x = y :=
metric_space.eq_of_dist_eq_zero
theorem dist_comm (x y : α) : dist x y = dist y x := metric_space.dist_comm x y
theorem edist_dist (x y : α) : edist x y = ennreal.of_real (dist x y) :=
metric_space.edist_dist _ x y
@[simp] theorem dist_eq_zero {x y : α} : dist x y = 0 ↔ x = y :=
iff.intro eq_of_dist_eq_zero (assume : x = y, this ▸ dist_self _)
@[simp] theorem zero_eq_dist {x y : α} : 0 = dist x y ↔ x = y :=
by rw [eq_comm, dist_eq_zero]
theorem dist_triangle (x y z : α) : dist x z ≤ dist x y + dist y z :=
metric_space.dist_triangle x y z
theorem dist_triangle_left (x y z : α) : dist x y ≤ dist z x + dist z y :=
by rw dist_comm z; apply dist_triangle
theorem dist_triangle_right (x y z : α) : dist x y ≤ dist x z + dist y z :=
by rw dist_comm y; apply dist_triangle
lemma dist_triangle4 (x y z w : α) :
dist x w ≤ dist x y + dist y z + dist z w :=
calc
dist x w ≤ dist x z + dist z w : dist_triangle x z w
... ≤ (dist x y + dist y z) + dist z w : add_le_add_right (metric_space.dist_triangle x y z) _
lemma dist_triangle4_left (x₁ y₁ x₂ y₂ : α) :
dist x₂ y₂ ≤ dist x₁ y₁ + (dist x₁ x₂ + dist y₁ y₂) :=
by rw [add_left_comm, dist_comm x₁, ← add_assoc]; apply dist_triangle4
lemma dist_triangle4_right (x₁ y₁ x₂ y₂ : α) :
dist x₁ y₁ ≤ dist x₁ x₂ + dist y₁ y₂ + dist x₂ y₂ :=
by rw [add_right_comm, dist_comm y₁]; apply dist_triangle4
theorem swap_dist : function.swap (@dist α _) = dist :=
by funext x y; exact dist_comm _ _
theorem abs_dist_sub_le (x y z : α) : abs (dist x z - dist y z) ≤ dist x y :=
abs_sub_le_iff.2
⟨sub_le_iff_le_add.2 (dist_triangle _ _ _),
sub_le_iff_le_add.2 (dist_triangle_left _ _ _)⟩
theorem dist_nonneg {x y : α} : 0 ≤ dist x y :=
have 2 * dist x y ≥ 0,
from calc 2 * dist x y = dist x y + dist y x : by rw [dist_comm x y, two_mul]
... ≥ 0 : by rw ← dist_self x; apply dist_triangle,
nonneg_of_mul_nonneg_left this two_pos
@[simp] theorem dist_le_zero {x y : α} : dist x y ≤ 0 ↔ x = y :=
by simpa [le_antisymm_iff, dist_nonneg] using @dist_eq_zero _ _ x y
@[simp] theorem dist_pos {x y : α} : 0 < dist x y ↔ x ≠ y :=
by simpa [-dist_le_zero] using not_congr (@dist_le_zero _ _ x y)
@[simp] theorem abs_dist {a b : α} : abs (dist a b) = dist a b :=
abs_of_nonneg dist_nonneg
theorem eq_of_forall_dist_le {x y : α} (h : ∀ε, ε > 0 → dist x y ≤ ε) : x = y :=
eq_of_dist_eq_zero (eq_of_le_of_forall_le_of_dense dist_nonneg h)
def nndist (a b : α) : nnreal := ⟨dist a b, dist_nonneg⟩
/--Express `nndist` in terms of `edist`-/
lemma nndist_edist (x y : α) : nndist x y = (edist x y).to_nnreal :=
by simp [nndist, edist_dist, nnreal.of_real, max_eq_left dist_nonneg, ennreal.of_real]
/--Express `edist` in terms of `nndist`-/
lemma edist_nndist (x y : α) : edist x y = ↑(nndist x y) :=
by simp [nndist, edist_dist, nnreal.of_real, max_eq_left dist_nonneg, ennreal.of_real]
/--In a metric space, the extended distance is always finite-/
lemma edist_ne_top (x y : α) : edist x y ≠ ⊤ :=
by rw [edist_dist x y]; apply ennreal.coe_ne_top
/--`nndist x x` vanishes-/
@[simp] lemma nndist_self (a : α) : nndist a a = 0 := (nnreal.coe_eq_zero _).1 (dist_self a)
/--Express `dist` in terms of `nndist`-/
lemma dist_nndist (x y : α) : dist x y = ↑(nndist x y) := rfl
/--Express `nndist` in terms of `dist`-/
lemma nndist_dist (x y : α) : nndist x y = nnreal.of_real (dist x y) :=
by rw [dist_nndist, nnreal.of_real_coe]
/--Deduce the equality of points with the vanishing of the nonnegative distance-/
theorem eq_of_nndist_eq_zero {x y : α} : nndist x y = 0 → x = y :=
by simp only [nnreal.eq_iff.symm, (dist_nndist _ _).symm, imp_self, nnreal.coe_zero, dist_eq_zero]
theorem nndist_comm (x y : α) : nndist x y = nndist y x :=
by simpa [nnreal.eq_iff.symm] using dist_comm x y
/--Characterize the equality of points with the vanishing of the nonnegative distance-/
@[simp] theorem nndist_eq_zero {x y : α} : nndist x y = 0 ↔ x = y :=
by simp only [nnreal.eq_iff.symm, (dist_nndist _ _).symm, imp_self, nnreal.coe_zero, dist_eq_zero]
@[simp] theorem zero_eq_nndist {x y : α} : 0 = nndist x y ↔ x = y :=
by simp only [nnreal.eq_iff.symm, (dist_nndist _ _).symm, imp_self, nnreal.coe_zero, zero_eq_dist]
/--Triangle inequality for the nonnegative distance-/
theorem nndist_triangle (x y z : α) : nndist x z ≤ nndist x y + nndist y z :=
by simpa [nnreal.coe_le] using dist_triangle x y z
theorem nndist_triangle_left (x y z : α) : nndist x y ≤ nndist z x + nndist z y :=
by simpa [nnreal.coe_le] using dist_triangle_left x y z
theorem nndist_triangle_right (x y z : α) : nndist x y ≤ nndist x z + nndist y z :=
by simpa [nnreal.coe_le] using dist_triangle_right x y z
/--Express `dist` in terms of `edist`-/
lemma dist_edist (x y : α) : dist x y = (edist x y).to_real :=
by rw [edist_dist, ennreal.to_real_of_real (dist_nonneg)]
namespace metric
/- instantiate metric space as a topology -/
variables {x y z : α} {ε ε₁ ε₂ : ℝ} {s : set α}
/-- `ball x ε` is the set of all points `y` with `dist y x < ε` -/
def ball (x : α) (ε : ℝ) : set α := {y | dist y x < ε}
@[simp] theorem mem_ball : y ∈ ball x ε ↔ dist y x < ε := iff.rfl
theorem mem_ball' : y ∈ ball x ε ↔ dist x y < ε := by rw dist_comm; refl
/-- `closed_ball x ε` is the set of all points `y` with `dist y x ≤ ε` -/
def closed_ball (x : α) (ε : ℝ) := {y | dist y x ≤ ε}
@[simp] theorem mem_closed_ball : y ∈ closed_ball x ε ↔ dist y x ≤ ε := iff.rfl
theorem ball_subset_closed_ball : ball x ε ⊆ closed_ball x ε :=
assume y, by simp; intros h; apply le_of_lt h
theorem pos_of_mem_ball (hy : y ∈ ball x ε) : ε > 0 :=
lt_of_le_of_lt dist_nonneg hy
theorem mem_ball_self (h : ε > 0) : x ∈ ball x ε :=
show dist x x < ε, by rw dist_self; assumption
theorem mem_closed_ball_self (h : ε ≥ 0) : x ∈ closed_ball x ε :=
show dist x x ≤ ε, by rw dist_self; assumption
theorem mem_ball_comm : x ∈ ball y ε ↔ y ∈ ball x ε :=
by simp [dist_comm]
theorem ball_subset_ball (h : ε₁ ≤ ε₂) : ball x ε₁ ⊆ ball x ε₂ :=
λ y (yx : _ < ε₁), lt_of_lt_of_le yx h
theorem closed_ball_subset_closed_ball {α : Type u} [metric_space α] {ε₁ ε₂ : ℝ} {x : α} (h : ε₁ ≤ ε₂) :
closed_ball x ε₁ ⊆ closed_ball x ε₂ :=
λ y (yx : _ ≤ ε₁), le_trans yx h
theorem ball_disjoint (h : ε₁ + ε₂ ≤ dist x y) : ball x ε₁ ∩ ball y ε₂ = ∅ :=
eq_empty_iff_forall_not_mem.2 $ λ z ⟨h₁, h₂⟩,
not_lt_of_le (dist_triangle_left x y z)
(lt_of_lt_of_le (add_lt_add h₁ h₂) h)
theorem ball_disjoint_same (h : ε ≤ dist x y / 2) : ball x ε ∩ ball y ε = ∅ :=
ball_disjoint $ by rwa [← two_mul, ← le_div_iff' two_pos]
theorem ball_subset (h : dist x y ≤ ε₂ - ε₁) : ball x ε₁ ⊆ ball y ε₂ :=
λ z zx, by rw ← add_sub_cancel'_right ε₁ ε₂; exact
lt_of_le_of_lt (dist_triangle z x y) (add_lt_add_of_lt_of_le zx h)
theorem ball_half_subset (y) (h : y ∈ ball x (ε / 2)) : ball y (ε / 2) ⊆ ball x ε :=
ball_subset $ by rw sub_self_div_two; exact le_of_lt h
theorem exists_ball_subset_ball (h : y ∈ ball x ε) : ∃ ε' > 0, ball y ε' ⊆ ball x ε :=
⟨_, sub_pos.2 h, ball_subset $ by rw sub_sub_self⟩
theorem ball_eq_empty_iff_nonpos : ε ≤ 0 ↔ ball x ε = ∅ :=
(eq_empty_iff_forall_not_mem.trans
⟨λ h, le_of_not_gt $ λ ε0, h _ $ mem_ball_self ε0,
λ ε0 y h, not_lt_of_le ε0 $ pos_of_mem_ball h⟩).symm
theorem uniformity_dist : 𝓤 α = (⨅ ε>0, principal {p:α×α | dist p.1 p.2 < ε}) :=
metric_space.uniformity_dist _
theorem uniformity_dist' : 𝓤 α = (⨅ε:{ε:ℝ // ε>0}, principal {p:α×α | dist p.1 p.2 < ε.val}) :=
by simp [infi_subtype]; exact uniformity_dist
theorem mem_uniformity_dist {s : set (α×α)} :
s ∈ 𝓤 α ↔ (∃ε>0, ∀{a b:α}, dist a b < ε → (a, b) ∈ s) :=
begin
rw [uniformity_dist', mem_infi],
simp [subset_def],
exact assume ⟨r, hr⟩ ⟨p, hp⟩, ⟨⟨min r p, lt_min hr hp⟩, by simp [lt_min_iff, (≥)] {contextual := tt}⟩,
exact ⟨⟨1, zero_lt_one⟩⟩
end
theorem dist_mem_uniformity {ε:ℝ} (ε0 : 0 < ε) :
{p:α×α | dist p.1 p.2 < ε} ∈ 𝓤 α :=
mem_uniformity_dist.2 ⟨ε, ε0, λ a b, id⟩
theorem uniform_continuous_iff [metric_space β] {f : α → β} :
uniform_continuous f ↔ ∀ ε > 0, ∃ δ > 0,
∀{a b:α}, dist a b < δ → dist (f a) (f b) < ε :=
uniform_continuous_def.trans
⟨λ H ε ε0, mem_uniformity_dist.1 $ H _ $ dist_mem_uniformity ε0,
λ H r ru,
let ⟨ε, ε0, hε⟩ := mem_uniformity_dist.1 ru, ⟨δ, δ0, hδ⟩ := H _ ε0 in
mem_uniformity_dist.2 ⟨δ, δ0, λ a b h, hε (hδ h)⟩⟩
theorem uniform_embedding_iff [metric_space β] {f : α → β} :
uniform_embedding f ↔ function.injective f ∧ uniform_continuous f ∧
∀ δ > 0, ∃ ε > 0, ∀ {a b : α}, dist (f a) (f b) < ε → dist a b < δ :=
uniform_embedding_def'.trans $ and_congr iff.rfl $ and_congr iff.rfl
⟨λ H δ δ0, let ⟨t, tu, ht⟩ := H _ (dist_mem_uniformity δ0),
⟨ε, ε0, hε⟩ := mem_uniformity_dist.1 tu in
⟨ε, ε0, λ a b h, ht _ _ (hε h)⟩,
λ H s su, let ⟨δ, δ0, hδ⟩ := mem_uniformity_dist.1 su, ⟨ε, ε0, hε⟩ := H _ δ0 in
⟨_, dist_mem_uniformity ε0, λ a b h, hδ (hε h)⟩⟩
theorem totally_bounded_iff {s : set α} :
totally_bounded s ↔ ∀ ε > 0, ∃t : set α, finite t ∧ s ⊆ ⋃y∈t, ball y ε :=
⟨λ H ε ε0, H _ (dist_mem_uniformity ε0),
λ H r ru, let ⟨ε, ε0, hε⟩ := mem_uniformity_dist.1 ru,
⟨t, ft, h⟩ := H ε ε0 in
⟨t, ft, subset.trans h $ Union_subset_Union $ λ y, Union_subset_Union $ λ yt z, hε⟩⟩
/-- A metric space space is totally bounded if one can reconstruct up to any ε>0 any element of the
space from finitely many data. -/
lemma totally_bounded_of_finite_discretization {α : Type u} [metric_space α] {s : set α}
(H : ∀ε > (0 : ℝ), ∃ (β : Type u) [fintype β] (F : s → β),
∀x y, F x = F y → dist (x:α) y < ε) :
totally_bounded s :=
begin
classical, by_cases hs : s = ∅,
{ rw hs, exact totally_bounded_empty },
rcases exists_mem_of_ne_empty hs with ⟨x0, hx0⟩,
haveI : inhabited s := ⟨⟨x0, hx0⟩⟩,
refine totally_bounded_iff.2 (λ ε ε0, _),
rcases H ε ε0 with ⟨β, fβ, F, hF⟩,
let Finv := function.inv_fun F,
refine ⟨range (subtype.val ∘ Finv), finite_range _, λ x xs, _⟩,
let x' := Finv (F ⟨x, xs⟩),
have : F x' = F ⟨x, xs⟩ := function.inv_fun_eq ⟨⟨x, xs⟩, rfl⟩,
simp only [set.mem_Union, set.mem_range],
exact ⟨_, ⟨F ⟨x, xs⟩, rfl⟩, hF _ _ this.symm⟩
end
protected lemma cauchy_iff {f : filter α} :
cauchy f ↔ f ≠ ⊥ ∧ ∀ ε > 0, ∃ t ∈ f, ∀ x y ∈ t, dist x y < ε :=
cauchy_iff.trans $ and_congr iff.rfl
⟨λ H ε ε0, let ⟨t, tf, ts⟩ := H _ (dist_mem_uniformity ε0) in
⟨t, tf, λ x y xt yt, @ts (x, y) ⟨xt, yt⟩⟩,
λ H r ru, let ⟨ε, ε0, hε⟩ := mem_uniformity_dist.1 ru,
⟨t, tf, h⟩ := H ε ε0 in
⟨t, tf, λ ⟨x, y⟩ ⟨hx, hy⟩, hε (h x y hx hy)⟩⟩
theorem nhds_eq : nhds x = (⨅ε:{ε:ℝ // ε>0}, principal (ball x ε.val)) :=
begin
rw [nhds_eq_uniformity, uniformity_dist', lift'_infi],
{ apply congr_arg, funext ε,
rw [lift'_principal],
{ simp [ball, dist_comm] },
{ exact monotone_preimage } },
{ exact ⟨⟨1, zero_lt_one⟩⟩ },
{ intros, refl }
end
theorem mem_nhds_iff : s ∈ nhds x ↔ ∃ε>0, ball x ε ⊆ s :=
begin
rw [nhds_eq, mem_infi],
{ simp },
{ intros y z, cases y with y hy, cases z with z hz,
refine ⟨⟨min y z, lt_min hy hz⟩, _⟩,
simp [ball_subset_ball, min_le_left, min_le_right, (≥)] },
{ exact ⟨⟨1, zero_lt_one⟩⟩ }
end
theorem is_open_iff : is_open s ↔ ∀x∈s, ∃ε>0, ball x ε ⊆ s :=
by simp [is_open_iff_nhds, mem_nhds_iff]
theorem is_open_ball : is_open (ball x ε) :=
is_open_iff.2 $ λ y, exists_ball_subset_ball
theorem ball_mem_nhds (x : α) {ε : ℝ} (ε0 : 0 < ε) : ball x ε ∈ nhds x :=
mem_nhds_sets is_open_ball (mem_ball_self ε0)
theorem tendsto_nhds_nhds [metric_space β] {f : α → β} {a b} :
tendsto f (nhds a) (nhds b) ↔
∀ ε > 0, ∃ δ > 0, ∀{x:α}, dist x a < δ → dist (f x) b < ε :=
⟨λ H ε ε0, mem_nhds_iff.1 (H (ball_mem_nhds _ ε0)),
λ H s hs,
let ⟨ε, ε0, hε⟩ := mem_nhds_iff.1 hs, ⟨δ, δ0, hδ⟩ := H _ ε0 in
mem_nhds_iff.2 ⟨δ, δ0, λ x h, hε (hδ h)⟩⟩
theorem continuous_iff [metric_space β] {f : α → β} :
continuous f ↔
∀b (ε > 0), ∃ δ > 0, ∀a, dist a b < δ → dist (f a) (f b) < ε :=
continuous_iff_continuous_at.trans $ forall_congr $ λ b, tendsto_nhds_nhds
theorem exists_delta_of_continuous [metric_space β] {f : α → β} {ε : ℝ}
(hf : continuous f) (hε : ε > 0) (b : α) :
∃ δ > 0, ∀a, dist a b ≤ δ → dist (f a) (f b) < ε :=
let ⟨δ, δ_pos, hδ⟩ := continuous_iff.1 hf b ε hε in
⟨δ / 2, half_pos δ_pos, assume a ha, hδ a $ lt_of_le_of_lt ha $ div_two_lt_of_pos δ_pos⟩
theorem tendsto_nhds {f : filter β} {u : β → α} {a : α} :
tendsto u f (nhds a) ↔ ∀ ε > 0, ∃ n ∈ f, ∀x ∈ n, dist (u x) a < ε :=
by simp only [metric.nhds_eq, tendsto_infi, subtype.forall, tendsto_principal, mem_ball];
exact forall_congr (assume ε, forall_congr (assume hε, exists_sets_subset_iff.symm))
theorem continuous_iff' [topological_space β] {f : β → α} :
continuous f ↔ ∀a (ε > 0), ∃ n ∈ nhds a, ∀b ∈ n, dist (f b) (f a) < ε :=
continuous_iff_continuous_at.trans $ forall_congr $ λ b, tendsto_nhds
theorem tendsto_at_top [nonempty β] [semilattice_sup β] {u : β → α} {a : α} :
tendsto u at_top (nhds a) ↔ ∀ε>0, ∃N, ∀n≥N, dist (u n) a < ε :=
by simp only [metric.nhds_eq, tendsto_infi, subtype.forall, tendsto_at_top_principal]; refl
end metric
open metric
instance metric_space.to_separated : separated α :=
separated_def.2 $ λ x y h, eq_of_forall_dist_le $
λ ε ε0, le_of_lt (h _ (dist_mem_uniformity ε0))
/-Instantiate a metric space as an emetric space. Before we can state the instance,
we need to show that the uniform structure coming from the edistance and the
distance coincide. -/
/-- Expressing the uniformity in terms of `edist` -/
protected lemma metric.mem_uniformity_edist {s : set (α×α)} :
s ∈ 𝓤 α ↔ (∃ε>0, ∀{a b:α}, edist a b < ε → (a, b) ∈ s) :=
begin
refine mem_uniformity_dist.trans ⟨_, _⟩; rintro ⟨ε, ε0, Hε⟩,
{ refine ⟨ennreal.of_real ε, _, λ a b, _⟩,
{ rwa [gt, ennreal.of_real_pos] },
{ rw [edist_dist, ennreal.of_real_lt_of_real_iff ε0],
exact Hε } },
{ rcases ennreal.lt_iff_exists_real_btwn.1 ε0 with ⟨ε', _, ε0', hε⟩,
rw [ennreal.of_real_pos] at ε0',
refine ⟨ε', ε0', λ a b h, Hε (lt_trans _ hε)⟩,
rwa [edist_dist, ennreal.of_real_lt_of_real_iff ε0'] }
end
protected theorem metric.uniformity_edist' : 𝓤 α = (⨅ε:{ε:ennreal // ε>0}, principal {p:α×α | edist p.1 p.2 < ε.val}) :=
begin
ext s, rw mem_infi,
{ simp [metric.mem_uniformity_edist, subset_def] },
{ rintro ⟨r, hr⟩ ⟨p, hp⟩, use ⟨min r p, lt_min hr hp⟩,
simp [lt_min_iff, (≥)] {contextual := tt} },
{ exact ⟨⟨1, ennreal.zero_lt_one⟩⟩ }
end
theorem uniformity_edist : 𝓤 α = (⨅ ε>0, principal {p:α×α | edist p.1 p.2 < ε}) :=
by simpa [infi_subtype] using @metric.uniformity_edist' α _
/-- A metric space induces an emetric space -/
instance metric_space.to_emetric_space : emetric_space α :=
{ edist := edist,
edist_self := by simp [edist_dist],
eq_of_edist_eq_zero := assume x y h, by simpa [edist_dist] using h,
edist_comm := by simp only [edist_dist, dist_comm]; simp,
edist_triangle := assume x y z, begin
simp only [edist_dist, (ennreal.of_real_add _ _).symm, dist_nonneg],
rw ennreal.of_real_le_of_real_iff _,
{ exact dist_triangle _ _ _ },
{ simpa using add_le_add (dist_nonneg : 0 ≤ dist x y) dist_nonneg }
end,
uniformity_edist := uniformity_edist,
..‹metric_space α› }
/-- Balls defined using the distance or the edistance coincide -/
lemma metric.emetric_ball {x : α} {ε : ℝ} : emetric.ball x (ennreal.of_real ε) = ball x ε :=
begin
classical, by_cases h : 0 < ε,
{ ext y, by simp [edist_dist, ennreal.of_real_lt_of_real_iff h] },
{ have h' : ε ≤ 0, by simpa using h,
have A : ball x ε = ∅, by simpa [ball_eq_empty_iff_nonpos.symm],
have B : emetric.ball x (ennreal.of_real ε) = ∅,
by simp [ennreal.of_real_eq_zero.2 h', emetric.ball_eq_empty_iff],
rwa [A, B] }
end
/-- Closed balls defined using the distance or the edistance coincide -/
lemma metric.emetric_closed_ball {x : α} {ε : ℝ} (h : 0 ≤ ε) :
emetric.closed_ball x (ennreal.of_real ε) = closed_ball x ε :=
by ext y; simp [edist_dist]; rw ennreal.of_real_le_of_real_iff h
def metric_space.replace_uniformity {α} [U : uniform_space α] (m : metric_space α)
(H : @uniformity _ U = @uniformity _ (metric_space.to_uniform_space α)) :
metric_space α :=
{ dist := @dist _ m.to_has_dist,
dist_self := dist_self,
eq_of_dist_eq_zero := @eq_of_dist_eq_zero _ _,
dist_comm := dist_comm,
dist_triangle := dist_triangle,
edist := edist,
edist_dist := edist_dist,
to_uniform_space := U,
uniformity_dist := H.trans (metric_space.uniformity_dist α) }
/-- One gets a metric space from an emetric space if the edistance
is everywhere finite. We set it up so that the edist and the uniformity are
defeq in the metric space and the emetric space -/
def emetric_space.to_metric_space {α : Type u} [e : emetric_space α] (h : ∀x y: α, edist x y ≠ ⊤) :
metric_space α :=
let m : metric_space α :=
{ dist := λx y, ennreal.to_real (edist x y),
eq_of_dist_eq_zero := λx y hxy, by simpa [dist, ennreal.to_real_eq_zero_iff, h x y] using hxy,
dist_self := λx, by simp,
dist_comm := λx y, by simp [emetric_space.edist_comm],
dist_triangle := λx y z, begin
rw [← ennreal.to_real_add (h _ _) (h _ _), ennreal.to_real_le_to_real (h _ _)],
{ exact edist_triangle _ _ _ },
{ simp [ennreal.add_eq_top, h] }
end,
edist := λx y, edist x y,
edist_dist := λx y, by simp [ennreal.of_real_to_real, h] } in
metric_space.replace_uniformity m (by rw [uniformity_edist, uniformity_edist']; refl)
section real
/-- Instantiate the reals as a metric space. -/
instance real.metric_space : metric_space ℝ :=
{ dist := λx y, abs (x - y),
dist_self := by simp [abs_zero],
eq_of_dist_eq_zero := by simp [add_neg_eq_zero],
dist_comm := assume x y, abs_sub _ _,
dist_triangle := assume x y z, abs_sub_le _ _ _ }
theorem real.dist_eq (x y : ℝ) : dist x y = abs (x - y) := rfl
theorem real.dist_0_eq_abs (x : ℝ) : dist x 0 = abs x :=
by simp [real.dist_eq]
instance : orderable_topology ℝ :=
orderable_topology_of_nhds_abs $ λ x, begin
simp only [show ∀ r, {b : ℝ | abs (x - b) < r} = ball x r,
by simp [-sub_eq_add_neg, abs_sub, ball, real.dist_eq]],
apply le_antisymm,
{ simp [le_infi_iff],
exact λ ε ε0, mem_nhds_sets (is_open_ball) (mem_ball_self ε0) },
{ intros s h,
rcases mem_nhds_iff.1 h with ⟨ε, ε0, ss⟩,
exact mem_infi_sets _ (mem_infi_sets ε0 (mem_principal_sets.2 ss)) },
end
lemma closed_ball_Icc {x r : ℝ} : closed_ball x r = Icc (x-r) (x+r) :=
by ext y; rw [mem_closed_ball, dist_comm, real.dist_eq,
abs_sub_le_iff, mem_Icc, ← sub_le_iff_le_add', sub_le]
lemma squeeze_zero {α} {f g : α → ℝ} {t₀ : filter α} (hf : ∀t, 0 ≤ f t) (hft : ∀t, f t ≤ g t)
(g0 : tendsto g t₀ (nhds 0)) : tendsto f t₀ (nhds 0) :=
begin
apply tendsto_of_tendsto_of_tendsto_of_le_of_le (tendsto_const_nhds) g0;
simp [*]; exact filter.univ_mem_sets
end
theorem metric.uniformity_eq_comap_nhds_zero :
𝓤 α = comap (λp:α×α, dist p.1 p.2) (nhds (0 : ℝ)) :=
begin
simp only [uniformity_dist', nhds_eq, comap_infi, comap_principal],
congr, funext ε,
rw [principal_eq_iff_eq],
ext ⟨a, b⟩,
simp [real.dist_0_eq_abs]
end
lemma cauchy_seq_iff_tendsto_dist_at_top_0 [inhabited β] [semilattice_sup β] {u : β → α} :
cauchy_seq u ↔ tendsto (λ (n : β × β), dist (u n.1) (u n.2)) at_top (nhds 0) :=
by rw [cauchy_seq_iff_prod_map, metric.uniformity_eq_comap_nhds_zero, ← map_le_iff_le_comap,
filter.map_map, tendsto, prod.map_def]
end real
section cauchy_seq
variables [inhabited β] [semilattice_sup β]
/-- In a metric space, Cauchy sequences are characterized by the fact that, eventually,
the distance between its elements is arbitrarily small -/
theorem metric.cauchy_seq_iff {u : β → α} :
cauchy_seq u ↔ ∀ε>0, ∃N, ∀m n≥N, dist (u m) (u n) < ε :=
begin
unfold cauchy_seq,
rw metric.cauchy_iff,
simp only [true_and, exists_prop, filter.mem_at_top_sets, filter.at_top_ne_bot,
filter.mem_map, ne.def, filter.map_eq_bot_iff, not_false_iff, set.mem_set_of_eq],
split,
{ intros H ε εpos,
rcases H ε εpos with ⟨t, ⟨N, hN⟩, ht⟩,
exact ⟨N, λm n hm hn, ht _ _ (hN _ hm) (hN _ hn)⟩ },
{ intros H ε εpos,
rcases H (ε/2) (half_pos εpos) with ⟨N, hN⟩,
existsi ball (u N) (ε/2),
split,
{ exact ⟨N, λx hx, hN _ _ hx (le_refl N)⟩ },
{ exact λx y hx hy, calc
dist x y ≤ dist x (u N) + dist y (u N) : dist_triangle_right _ _ _
... < ε/2 + ε/2 : add_lt_add hx hy
... = ε : add_halves _ } }
end
/-- A variation around the metric characterization of Cauchy sequences -/
theorem metric.cauchy_seq_iff' {u : β → α} :
cauchy_seq u ↔ ∀ε>0, ∃N, ∀n≥N, dist (u n) (u N) < ε :=
begin
rw metric.cauchy_seq_iff,
split,
{ intros H ε εpos,
rcases H ε εpos with ⟨N, hN⟩,
exact ⟨N, λn hn, hN _ _ hn (le_refl N)⟩ },
{ intros H ε εpos,
rcases H (ε/2) (half_pos εpos) with ⟨N, hN⟩,
exact ⟨N, λ m n hm hn, calc
dist (u m) (u n) ≤ dist (u m) (u N) + dist (u n) (u N) : dist_triangle_right _ _ _
... < ε/2 + ε/2 : add_lt_add (hN _ hm) (hN _ hn)
... = ε : add_halves _⟩ }
end
/-- A Cauchy sequence on the natural numbers is bounded. -/
theorem cauchy_seq_bdd {u : ℕ → α} (hu : cauchy_seq u) :
∃ R > 0, ∀ m n, dist (u m) (u n) < R :=
begin
rcases metric.cauchy_seq_iff'.1 hu 1 zero_lt_one with ⟨N, hN⟩,
suffices : ∃ R > 0, ∀ n, dist (u n) (u N) < R,
{ rcases this with ⟨R, R0, H⟩,
exact ⟨_, add_pos R0 R0, λ m n,
lt_of_le_of_lt (dist_triangle_right _ _ _) (add_lt_add (H m) (H n))⟩ },
let R := finset.sup (finset.range N) (λ n, nndist (u n) (u N)),
refine ⟨↑R + 1, add_pos_of_nonneg_of_pos R.2 zero_lt_one, λ n, _⟩,
cases le_or_lt N n,
{ exact lt_of_lt_of_le (hN _ h) (le_add_of_nonneg_left R.2) },
{ have : _ ≤ R := finset.le_sup (finset.mem_range.2 h),
exact lt_of_le_of_lt this (lt_add_of_pos_right _ zero_lt_one) }
end
/-- Yet another metric characterization of Cauchy sequences on integers. This one is often the
most efficient. -/
lemma cauchy_seq_iff_le_tendsto_0 {s : ℕ → α} : cauchy_seq s ↔ ∃ b : ℕ → ℝ,
(∀ n, 0 ≤ b n) ∧
(∀ n m N : ℕ, N ≤ n → N ≤ m → dist (s n) (s m) ≤ b N) ∧
tendsto b at_top (nhds 0) :=
⟨λ hs, begin
/- `s` is a Cauchy sequence. The sequence `b` will be constructed by taking
the supremum of the distances between `s n` and `s m` for `n m ≥ N`.
First, we prove that all these distances are bounded, as otherwise the Sup
would not make sense. -/
let S := λ N, (λ(p : ℕ × ℕ), dist (s p.1) (s p.2)) '' {p | p.1 ≥ N ∧ p.2 ≥ N},
have hS : ∀ N, ∃ x, ∀ y ∈ S N, y ≤ x,
{ rcases cauchy_seq_bdd hs with ⟨R, R0, hR⟩,
refine λ N, ⟨R, _⟩, rintro _ ⟨⟨m, n⟩, _, rfl⟩,
exact le_of_lt (hR m n) },
have bdd : bdd_above (range (λ(p : ℕ × ℕ), dist (s p.1) (s p.2))),
{ rcases cauchy_seq_bdd hs with ⟨R, R0, hR⟩,
use R, rintro _ ⟨⟨m, n⟩, rfl⟩, exact le_of_lt (hR m n) },
-- Prove that it bounds the distances of points in the Cauchy sequence
have ub : ∀ m n N, N ≤ m → N ≤ n → dist (s m) (s n) ≤ real.Sup (S N) :=
λ m n N hm hn, real.le_Sup _ (hS N) ⟨⟨_, _⟩, ⟨hm, hn⟩, rfl⟩,
have S0m : ∀ n, (0:ℝ) ∈ S n := λ n, ⟨⟨n, n⟩, ⟨le_refl _, le_refl _⟩, dist_self _⟩,
have S0 := λ n, real.le_Sup _ (hS n) (S0m n),
-- Prove that it tends to `0`, by using the Cauchy property of `s`
refine ⟨λ N, real.Sup (S N), S0, ub, metric.tendsto_at_top.2 (λ ε ε0, _)⟩,
refine (metric.cauchy_seq_iff.1 hs (ε/2) (half_pos ε0)).imp (λ N hN n hn, _),
rw [real.dist_0_eq_abs, abs_of_nonneg (S0 n)],
refine lt_of_le_of_lt (real.Sup_le_ub _ ⟨_, S0m _⟩ _) (half_lt_self ε0),
rintro _ ⟨⟨m', n'⟩, ⟨hm', hn'⟩, rfl⟩,
exact le_of_lt (hN _ _ (le_trans hn hm') (le_trans hn hn'))
end,
λ ⟨b, _, b_bound, b_lim⟩, metric.cauchy_seq_iff.2 $ λ ε ε0,
(metric.tendsto_at_top.1 b_lim ε ε0).imp $ λ N hN m n hm hn,
calc dist (s m) (s n) ≤ b N : b_bound m n N hm hn
... ≤ abs (b N) : le_abs_self _
... = dist (b N) 0 : by rw real.dist_0_eq_abs; refl
... < ε : (hN _ (le_refl N)) ⟩
end cauchy_seq
def metric_space.induced {α β} (f : α → β) (hf : function.injective f)
(m : metric_space β) : metric_space α :=
{ dist := λ x y, dist (f x) (f y),
dist_self := λ x, dist_self _,
eq_of_dist_eq_zero := λ x y h, hf (dist_eq_zero.1 h),
dist_comm := λ x y, dist_comm _ _,
dist_triangle := λ x y z, dist_triangle _ _ _,
edist := λ x y, edist (f x) (f y),
edist_dist := λ x y, edist_dist _ _,
to_uniform_space := uniform_space.comap f m.to_uniform_space,
uniformity_dist := begin
apply @uniformity_dist_of_mem_uniformity _ _ _ _ _ (λ x y, dist (f x) (f y)),
refine λ s, mem_comap_sets.trans _,
split; intro H,
{ rcases H with ⟨r, ru, rs⟩,
rcases mem_uniformity_dist.1 ru with ⟨ε, ε0, hε⟩,
refine ⟨ε, ε0, λ a b h, rs (hε _)⟩, exact h },
{ rcases H with ⟨ε, ε0, hε⟩,
exact ⟨_, dist_mem_uniformity ε0, λ ⟨a, b⟩, hε⟩ }
end }
instance subtype.metric_space {p : α → Prop} [t : metric_space α] : metric_space (subtype p) :=
metric_space.induced subtype.val (λ x y, subtype.eq) t
theorem subtype.dist_eq {p : α → Prop} [t : metric_space α] (x y : subtype p) :
dist x y = dist x.1 y.1 := rfl
section nnreal
instance : metric_space nnreal := by unfold nnreal; apply_instance
end nnreal
section prod
instance prod.metric_space_max [metric_space β] : metric_space (α × β) :=
{ dist := λ x y, max (dist x.1 y.1) (dist x.2 y.2),
dist_self := λ x, by simp,
eq_of_dist_eq_zero := λ x y h, begin
cases max_le_iff.1 (le_of_eq h) with h₁ h₂,
exact prod.ext_iff.2 ⟨dist_le_zero.1 h₁, dist_le_zero.1 h₂⟩
end,
dist_comm := λ x y, by simp [dist_comm],
dist_triangle := λ x y z, max_le
(le_trans (dist_triangle _ _ _) (add_le_add (le_max_left _ _) (le_max_left _ _)))
(le_trans (dist_triangle _ _ _) (add_le_add (le_max_right _ _) (le_max_right _ _))),
edist := λ x y, max (edist x.1 y.1) (edist x.2 y.2),
edist_dist := assume x y, begin
have : monotone ennreal.of_real := assume x y h, ennreal.of_real_le_of_real h,
rw [edist_dist, edist_dist, (max_distrib_of_monotone this).symm]
end,
uniformity_dist := begin
refine uniformity_prod.trans _,
simp [uniformity_dist, comap_infi],
rw ← infi_inf_eq, congr, funext,
rw ← infi_inf_eq, congr, funext,
simp [inf_principal, ext_iff, max_lt_iff]
end,
to_uniform_space := prod.uniform_space }
lemma prod.dist_eq [metric_space β] {x y : α × β} :
dist x y = max (dist x.1 y.1) (dist x.2 y.2) := rfl
end prod
theorem uniform_continuous_dist' : uniform_continuous (λp:α×α, dist p.1 p.2) :=
metric.uniform_continuous_iff.2 (λ ε ε0, ⟨ε/2, half_pos ε0,
begin
suffices,
{ intros p q h, cases p with p₁ p₂, cases q with q₁ q₂,
cases max_lt_iff.1 h with h₁ h₂, clear h,
dsimp at h₁ h₂ ⊢,
rw real.dist_eq,
refine abs_sub_lt_iff.2 ⟨_, _⟩,
{ revert p₁ p₂ q₁ q₂ h₁ h₂, exact this },
{ apply this; rwa dist_comm } },
intros p₁ p₂ q₁ q₂ h₁ h₂,
have := add_lt_add
(abs_sub_lt_iff.1 (lt_of_le_of_lt (abs_dist_sub_le p₁ q₁ p₂) h₁)).1
(abs_sub_lt_iff.1 (lt_of_le_of_lt (abs_dist_sub_le p₂ q₂ q₁) h₂)).1,
rwa [add_halves, dist_comm p₂, sub_add_sub_cancel, dist_comm q₂] at this
end⟩)
theorem uniform_continuous_dist [uniform_space β] {f g : β → α}
(hf : uniform_continuous f) (hg : uniform_continuous g) :
uniform_continuous (λb, dist (f b) (g b)) :=
(hf.prod_mk hg).comp uniform_continuous_dist'
theorem continuous_dist' : continuous (λp:α×α, dist p.1 p.2) :=
uniform_continuous_dist'.continuous
theorem continuous_dist [topological_space β] {f g : β → α}
(hf : continuous f) (hg : continuous g) : continuous (λb, dist (f b) (g b)) :=
(hf.prod_mk hg).comp continuous_dist'
theorem tendsto_dist {f g : β → α} {x : filter β} {a b : α}
(hf : tendsto f x (nhds a)) (hg : tendsto g x (nhds b)) :
tendsto (λx, dist (f x) (g x)) x (nhds (dist a b)) :=
have tendsto (λp:α×α, dist p.1 p.2) (nhds (a, b)) (nhds (dist a b)),
from continuous_iff_continuous_at.mp continuous_dist' (a, b),
(hf.prod_mk hg).comp (by rw [nhds_prod_eq] at this; exact this)
lemma nhds_comap_dist (a : α) : (nhds (0 : ℝ)).comap (λa', dist a' a) = nhds a :=
have h₁ : ∀ε, (λa', dist a' a) ⁻¹' ball 0 ε ⊆ ball a ε,
by simp [subset_def, real.dist_0_eq_abs],
have h₂ : tendsto (λa', dist a' a) (nhds a) (nhds (dist a a)),
from tendsto_dist tendsto_id tendsto_const_nhds,
le_antisymm
(by simp [h₁, nhds_eq, infi_le_infi, principal_mono,
-le_principal_iff, -le_infi_iff])
(by simpa [map_le_iff_le_comap.symm, tendsto] using h₂)
lemma tendsto_iff_dist_tendsto_zero {f : β → α} {x : filter β} {a : α} :
(tendsto f x (nhds a)) ↔ (tendsto (λb, dist (f b) a) x (nhds 0)) :=
by rw [← nhds_comap_dist a, tendsto_comap_iff]
lemma uniform_continuous_nndist' : uniform_continuous (λp:α×α, nndist p.1 p.2) :=
uniform_continuous_subtype_mk uniform_continuous_dist' _
lemma continuous_nndist' : continuous (λp:α×α, nndist p.1 p.2) :=
uniform_continuous_nndist'.continuous
lemma tendsto_nndist' (a b :α) :
tendsto (λp:α×α, nndist p.1 p.2) (filter.prod (nhds a) (nhds b)) (nhds (nndist a b)) :=
by rw [← nhds_prod_eq]; exact continuous_iff_continuous_at.1 continuous_nndist' _
namespace metric
variables {x y z : α} {ε ε₁ ε₂ : ℝ} {s : set α}
theorem is_closed_ball : is_closed (closed_ball x ε) :=
is_closed_le (continuous_dist continuous_id continuous_const) continuous_const
/-- ε-characterization of the closure in metric spaces-/
theorem mem_closure_iff' {α : Type u} [metric_space α] {s : set α} {a : α} :
a ∈ closure s ↔ ∀ε>0, ∃b ∈ s, dist a b < ε :=
⟨begin
intros ha ε hε,
have A : ball a ε ∩ s ≠ ∅ := mem_closure_iff.1 ha _ is_open_ball (mem_ball_self hε),
cases ne_empty_iff_exists_mem.1 A with b hb,
simp,
exact ⟨b, ⟨hb.2, by have B := hb.1; simpa [mem_ball'] using B⟩⟩
end,
begin
intros H,
apply mem_closure_iff.2,
intros o ho ao,
rcases is_open_iff.1 ho a ao with ⟨ε, ⟨εpos, hε⟩⟩,
rcases H ε εpos with ⟨b, ⟨bs, bdist⟩⟩,
have B : b ∈ o ∩ s := ⟨hε (by simpa [dist_comm]), bs⟩,
apply ne_empty_of_mem B
end⟩
theorem mem_of_closed' {α : Type u} [metric_space α] {s : set α} (hs : is_closed s)
{a : α} : a ∈ s ↔ ∀ε>0, ∃b ∈ s, dist a b < ε :=
by simpa only [closure_eq_of_is_closed hs] using @mem_closure_iff' _ _ s a
end metric
section pi
open finset lattice
variables {π : β → Type*} [fintype β] [∀b, metric_space (π b)]
instance has_dist_pi : has_dist (Πb, π b) :=
⟨λf g, ((finset.sup univ (λb, nndist (f b) (g b)) : nnreal) : ℝ)⟩
lemma dist_pi_def (f g : Πb, π b) :
dist f g = (finset.sup univ (λb, nndist (f b) (g b)) : nnreal) := rfl
instance metric_space_pi : metric_space (Πb, π b) :=
{ dist := dist,
dist_self := assume f, (nnreal.coe_eq_zero _).2 $ bot_unique $ finset.sup_le $ by simp,
dist_comm := assume f g, nnreal.eq_iff.2 $ by congr; ext a; exact nndist_comm _ _,
dist_triangle := assume f g h, show dist f h ≤ (dist f g) + (dist g h), from
begin
simp only [dist_pi_def, (nnreal.coe_add _ _).symm, nnreal.coe_le.symm,
finset.sup_le_iff],
assume b hb,
exact le_trans (nndist_triangle _ (g b) _) (add_le_add (le_sup hb) (le_sup hb))
end,
eq_of_dist_eq_zero := assume f g eq0,
begin
simp only [dist_pi_def, nnreal.coe_eq_zero, nnreal.bot_eq_zero.symm, eq_bot_iff,
finset.sup_le_iff] at eq0,
exact (funext $ assume b, eq_of_nndist_eq_zero $ bot_unique $ eq0 b $ mem_univ b),
end,
edist := λ f g, finset.sup univ (λb, edist (f b) (g b)),
edist_dist := assume x y, begin
have A : sup univ (λ (b : β), ((nndist (x b) (y b)) : ennreal)) = ↑(sup univ (λ (b : β), nndist (x b) (y b))),
{ refine eq.symm (comp_sup_eq_sup_comp _ _ _),
exact (assume x y h, ennreal.coe_le_coe.2 h), refl },
simp [dist, edist_nndist, ennreal.of_real, A]
end }
end pi
section compact
/-- Any compact set in a metric space can be covered by finitely many balls of a given positive
radius -/
lemma finite_cover_balls_of_compact {α : Type u} [metric_space α] {s : set α}
(hs : compact s) {e : ℝ} (he : e > 0) :
∃t ⊆ s, finite t ∧ s ⊆ ⋃x∈t, ball x e :=
begin
apply compact_elim_finite_subcover_image hs,
{ simp [is_open_ball] },
{ intros x xs,
simp,
exact ⟨x, ⟨xs, by simpa⟩⟩ }
end
end compact
section proper_space
open metric
/-- A metric space is proper if all closed balls are compact. -/
class proper_space (α : Type u) [metric_space α] : Prop :=
(compact_ball : ∀x:α, ∀r, compact (closed_ball x r))
/- A compact metric space is proper -/
instance proper_of_compact [metric_space α] [compact_space α] : proper_space α :=
⟨assume x r, compact_of_is_closed_subset compact_univ is_closed_ball (subset_univ _)⟩
/-- A proper space is locally compact -/
instance locally_compact_of_proper [metric_space α] [proper_space α] :
locally_compact_space α :=
begin
apply locally_compact_of_compact_nhds,
intros x,
existsi closed_ball x 1,
split,
{ apply mem_nhds_iff.2,
existsi (1 : ℝ),
simp,
exact ⟨zero_lt_one, ball_subset_closed_ball⟩ },
{ apply proper_space.compact_ball }
end
/-- A proper space is complete -/
instance complete_of_proper {α : Type u} [metric_space α] [proper_space α] : complete_space α :=
⟨begin
intros f hf,
/- We want to show that the Cauchy filter `f` is converging. It suffices to find a closed
ball (therefore compact by properness) where it is nontrivial. -/
have A : ∃ t ∈ f, ∀ x y ∈ t, dist x y < 1 := (metric.cauchy_iff.1 hf).2 1 zero_lt_one,
rcases A with ⟨t, ⟨t_fset, ht⟩⟩,
rcases inhabited_of_mem_sets hf.1 t_fset with ⟨x, xt⟩,
have : t ⊆ closed_ball x 1 := by intros y yt; simp [dist_comm]; apply le_of_lt (ht x y xt yt),
have : closed_ball x 1 ∈ f := f.sets_of_superset t_fset this,
rcases (compact_iff_totally_bounded_complete.1 (proper_space.compact_ball x 1)).2 f hf (le_principal_iff.2 this)
with ⟨y, _, hy⟩,
exact ⟨y, hy⟩
end⟩
/-- A proper metric space is separable, and therefore second countable. Indeed, any ball is
compact, and therefore admits a countable dense subset. Taking a countable union over the balls
centered at a fixed point and with integer radius, one obtains a countable set which is
dense in the whole space. -/
instance second_countable_of_proper [metric_space α] [proper_space α] :
second_countable_topology α :=
begin
/- We show that the space admits a countable dense subset. The case where the space is empty
is special, and trivial. -/
have A : (univ : set α) = ∅ → ∃(s : set α), countable s ∧ closure s = (univ : set α) :=
assume H, ⟨∅, ⟨by simp, by simp; exact H.symm⟩⟩,
have B : (univ : set α) ≠ ∅ → ∃(s : set α), countable s ∧ closure s = (univ : set α) :=
begin
/- When the space is not empty, we take a point `x` in the space, and then a countable set
`T r` which is dense in the closed ball `closed_ball x r` for each `r`. Then the set
`t = ⋃ T n` (where the union is over all integers `n`) is countable, as a countable union
of countable sets, and dense in the space by construction. -/
assume non_empty,
rcases ne_empty_iff_exists_mem.1 non_empty with ⟨x, x_univ⟩,
choose T a using show ∀ (r:ℝ), ∃ t ⊆ closed_ball x r, (countable (t : set α) ∧ closed_ball x r = closure t),
from assume r, emetric.countable_closure_of_compact (proper_space.compact_ball _ _),
let t := (⋃n:ℕ, T (n : ℝ)),
have T₁ : countable t := by finish [countable_Union],
have T₂ : closure t ⊆ univ := by simp,
have T₃ : univ ⊆ closure t :=
begin
intros y y_univ,
rcases exists_nat_gt (dist y x) with ⟨n, n_large⟩,
have h : y ∈ closed_ball x (n : ℝ) := by simp; apply le_of_lt n_large,
have h' : closed_ball x (n : ℝ) = closure (T (n : ℝ)) := by finish,
have : y ∈ closure (T (n : ℝ)) := by rwa h' at h,
show y ∈ closure t, from mem_of_mem_of_subset this (by apply closure_mono; apply subset_Union (λ(n:ℕ), T (n:ℝ))),
end,
exact ⟨t, ⟨T₁, subset.antisymm T₂ T₃⟩⟩
end,
haveI : separable_space α := ⟨by_cases A B⟩,
apply emetric.second_countable_of_separable,
end
end proper_space
namespace metric
section second_countable
open topological_space
/-- A metric space is second countable if, for every ε > 0, there is a countable set which is ε-dense. -/
lemma second_countable_of_almost_dense_set
(H : ∀ε > (0 : ℝ), ∃ s : set α, countable s ∧ (∀x, ∃y ∈ s, dist x y ≤ ε)) :
second_countable_topology α :=
begin
choose T T_dense using H,
have I1 : ∀n:ℕ, (n:ℝ) + 1 > 0 :=
λn, lt_of_lt_of_le zero_lt_one (le_add_of_nonneg_left (nat.cast_nonneg _)),
have I : ∀n:ℕ, (n+1 : ℝ)⁻¹ > 0 := λn, inv_pos'.2 (I1 n),
let t := ⋃n:ℕ, T (n+1)⁻¹ (I n),
have count_t : countable t := by finish [countable_Union],
have clos_t : closure t = univ,
{ refine subset.antisymm (subset_univ _) (λx xuniv, mem_closure_iff'.2 (λε εpos, _)),
rcases exists_nat_gt ε⁻¹ with ⟨n, hn⟩,
have : ε⁻¹ < n + 1 := lt_of_lt_of_le hn (le_add_of_nonneg_right zero_le_one),
have nε : ((n:ℝ)+1)⁻¹ < ε := (inv_lt (I1 n) εpos).2 this,
rcases (T_dense (n+1)⁻¹ (I n)).2 x with ⟨y, yT, Dxy⟩,
have : y ∈ t := mem_of_mem_of_subset yT (by apply subset_Union (λ (n:ℕ), T (n+1)⁻¹ (I n))),
exact ⟨y, this, lt_of_le_of_lt Dxy nε⟩ },
haveI : separable_space α := ⟨⟨t, ⟨count_t, clos_t⟩⟩⟩,
exact emetric.second_countable_of_separable α
end
/-- A metric space space is second countable if one can reconstruct up to any ε>0 any element of the
space from countably many data. -/
lemma second_countable_of_countable_discretization {α : Type u} [metric_space α]
(H : ∀ε > (0 : ℝ), ∃ (β : Type u) [encodable β] (F : α → β), ∀x y, F x = F y → dist x y ≤ ε) :
second_countable_topology α :=
begin
classical, by_cases hs : (univ : set α) = ∅,
{ haveI : compact_space α := ⟨by rw hs; exact compact_of_finite (set.finite_empty)⟩, by apply_instance },
rcases exists_mem_of_ne_empty hs with ⟨x0, hx0⟩,
letI : inhabited α := ⟨x0⟩,
refine second_countable_of_almost_dense_set (λε ε0, _),
rcases H ε ε0 with ⟨β, fβ, F, hF⟩,
let Finv := function.inv_fun F,
refine ⟨range Finv, ⟨countable_range _, λx, _⟩⟩,
let x' := Finv (F x),
have : F x' = F x := function.inv_fun_eq ⟨x, rfl⟩,
exact ⟨x', mem_range_self _, hF _ _ this.symm⟩
end
end second_countable
end metric
lemma lebesgue_number_lemma_of_metric
{s : set α} {ι} {c : ι → set α} (hs : compact s)
(hc₁ : ∀ i, is_open (c i)) (hc₂ : s ⊆ ⋃ i, c i) :
∃ δ > 0, ∀ x ∈ s, ∃ i, ball x δ ⊆ c i :=
let ⟨n, en, hn⟩ := lebesgue_number_lemma hs hc₁ hc₂,
⟨δ, δ0, hδ⟩ := mem_uniformity_dist.1 en in
⟨δ, δ0, assume x hx, let ⟨i, hi⟩ := hn x hx in
⟨i, assume y hy, hi (hδ (mem_ball'.mp hy))⟩⟩
lemma lebesgue_number_lemma_of_metric_sUnion
{s : set α} {c : set (set α)} (hs : compact s)
(hc₁ : ∀ t ∈ c, is_open t) (hc₂ : s ⊆ ⋃₀ c) :
∃ δ > 0, ∀ x ∈ s, ∃ t ∈ c, ball x δ ⊆ t :=
by rw sUnion_eq_Union at hc₂;
simpa using lebesgue_number_lemma_of_metric hs (by simpa) hc₂
namespace metric
/-- Boundedness of a subset of a metric space. We formulate the definition to work
even in the empty space. -/
def bounded (s : set α) : Prop :=
∃C, ∀x y ∈ s, dist x y ≤ C
section bounded
variables {x : α} {s t : set α} {r : ℝ}
@[simp] lemma bounded_empty : bounded (∅ : set α) :=
⟨0, by simp⟩
lemma bounded_iff_mem_bounded : bounded s ↔ ∀ x ∈ s, bounded s :=
⟨λ h _ _, h, λ H, begin
classical, by_cases s = ∅,
{ subst s, exact ⟨0, by simp⟩ },
{ rcases exists_mem_of_ne_empty h with ⟨x, hx⟩,
exact H x hx }
end⟩
/-- Subsets of a bounded set are also bounded -/
lemma bounded.subset (incl : s ⊆ t) : bounded t → bounded s :=
Exists.imp $ λ C hC x y hx hy, hC x y (incl hx) (incl hy)
/-- Closed balls are bounded -/
lemma bounded_closed_ball : bounded (closed_ball x r) :=
⟨r + r, λ y z hy hz, begin
simp only [mem_closed_ball] at *,
calc dist y z ≤ dist y x + dist z x : dist_triangle_right _ _ _
... ≤ r + r : add_le_add hy hz
end⟩
/-- Open balls are bounded -/
lemma bounded_ball : bounded (ball x r) :=
bounded_closed_ball.subset ball_subset_closed_ball
/-- Given a point, a bounded subset is included in some ball around this point -/
lemma bounded_iff_subset_ball (c : α) : bounded s ↔ ∃r, s ⊆ closed_ball c r :=
begin
split; rintro ⟨C, hC⟩,
{ classical, by_cases s = ∅,
{ subst s, exact ⟨0, by simp⟩ },
{ rcases exists_mem_of_ne_empty h with ⟨x, hx⟩,
exact ⟨C + dist x c, λ y hy, calc
dist y c ≤ dist y x + dist x c : dist_triangle _ _ _
... ≤ C + dist x c : add_le_add_right (hC y x hy hx) _⟩ } },
{ exact bounded_closed_ball.subset hC }
end
/-- The union of two bounded sets is bounded iff each of the sets is bounded -/
@[simp] lemma bounded_union :
bounded (s ∪ t) ↔ bounded s ∧ bounded t :=
⟨λh, ⟨h.subset (by simp), h.subset (by simp)⟩,
begin
rintro ⟨hs, ht⟩,
refine bounded_iff_mem_bounded.2 (λ x _, _),
rw bounded_iff_subset_ball x at hs ht ⊢,
rcases hs with ⟨Cs, hCs⟩, rcases ht with ⟨Ct, hCt⟩,
exact ⟨max Cs Ct, union_subset
(subset.trans hCs $ closed_ball_subset_closed_ball $ le_max_left _ _)
(subset.trans hCt $ closed_ball_subset_closed_ball $ le_max_right _ _)⟩,
end⟩
/-- A finite union of bounded sets is bounded -/
lemma bounded_bUnion {I : set β} {s : β → set α} (H : finite I) :
bounded (⋃i∈I, s i) ↔ ∀i ∈ I, bounded (s i) :=
finite.induction_on H (by simp) $ λ x I _ _ IH,
by simp [or_imp_distrib, forall_and_distrib, IH]
/-- A compact set is bounded -/
lemma bounded_of_compact {s : set α} (h : compact s) : bounded s :=
-- We cover the compact set by finitely many balls of radius 1,
-- and then argue that a finite union of bounded sets is bounded
let ⟨t, ht, fint, subs⟩ := finite_cover_balls_of_compact h zero_lt_one in
bounded.subset subs $ (bounded_bUnion fint).2 $ λ i hi, bounded_ball
/-- A finite set is bounded -/
lemma bounded_of_finite {s : set α} (h : finite s) : bounded s :=
bounded_of_compact $ compact_of_finite h
/-- A singleton is bounded -/
lemma bounded_singleton {x : α} : bounded ({x} : set α) :=
bounded_of_finite $ finite_singleton _
/-- Characterization of the boundedness of the range of a function -/
lemma bounded_range_iff {f : β → α} : bounded (range f) ↔ ∃C, ∀x y, dist (f x) (f y) ≤ C :=
exists_congr $ λ C, ⟨
λ H x y, H _ _ ⟨x, rfl⟩ ⟨y, rfl⟩,
by rintro H _ _ ⟨x, rfl⟩ ⟨y, rfl⟩; exact H x y⟩
/-- In a compact space, all sets are bounded -/
lemma bounded_of_compact_space [compact_space α] : bounded s :=
(bounded_of_compact compact_univ).subset (subset_univ _)
/-- In a proper space, a set is compact if and only if it is closed and bounded -/
lemma compact_iff_closed_bounded [proper_space α] :
compact s ↔ is_closed s ∧ bounded s :=
⟨λ h, ⟨closed_of_compact _ h, bounded_of_compact h⟩, begin
rintro ⟨hc, hb⟩,
classical, by_cases s = ∅, {simp [h, compact_empty]},
rcases exists_mem_of_ne_empty h with ⟨x, hx⟩,
rcases (bounded_iff_subset_ball x).1 hb with ⟨r, hr⟩,
exact compact_of_is_closed_subset (proper_space.compact_ball x r) hc hr
end⟩
end bounded
section diam
variables {s : set α} {x y : α}
/-- The diameter of a set in a metric space. To get controllable behavior even when the diameter
should be infinite, we express it in terms of the emetric.diameter -/
def diam (s : set α) : ℝ := ennreal.to_real (emetric.diam s)
/-- The diameter of a set is always nonnegative -/
lemma diam_nonneg : 0 ≤ diam s :=
by simp [diam]
/-- The empty set has zero diameter -/
@[simp] lemma diam_empty : diam (∅ : set α) = 0 :=
by simp [diam]
/-- A singleton has zero diameter -/
@[simp] lemma diam_singleton : diam ({x} : set α) = 0 :=
by simp [diam]
/-- Characterize the boundedness of a set in terms of the finiteness of its emetric.diameter. -/
lemma bounded_iff_diam_ne_top : bounded s ↔ emetric.diam s ≠ ⊤ :=
begin
classical, by_cases hs : s = ∅,
{ simp [hs] },
{ rcases ne_empty_iff_exists_mem.1 hs with ⟨x, hx⟩,
split,
{ assume bs,
rcases (bounded_iff_subset_ball x).1 bs with ⟨r, hr⟩,
have r0 : 0 ≤ r := by simpa [closed_ball] using hr hx,
have : emetric.diam s < ⊤ := calc
emetric.diam s ≤ emetric.diam (emetric.closed_ball x (ennreal.of_real r)) :
by rw emetric_closed_ball r0; exact emetric.diam_mono hr
... ≤ 2 * (ennreal.of_real r) : emetric.diam_closed_ball
... < ⊤ : begin apply ennreal.lt_top_iff_ne_top.2, simp [ennreal.mul_eq_top], end,
exact ennreal.lt_top_iff_ne_top.1 this },
{ assume ds,
have : s ⊆ closed_ball x (ennreal.to_real (emetric.diam s)),
{ rw [← emetric_closed_ball ennreal.to_real_nonneg, ennreal.of_real_to_real ds],
exact λy hy, emetric.edist_le_diam_of_mem hy hx },
exact bounded.subset this (bounded_closed_ball) }}
end
/-- An unbounded set has zero diameter. If you would prefer to get the value ∞, use `emetric.diam`.
This lemma makes it possible to avoid side conditions in some situations -/
lemma diam_eq_zero_of_unbounded (h : ¬(bounded s)) : diam s = 0 :=
begin
simp only [bounded_iff_diam_ne_top, not_not, ne.def] at h,
simp [diam, h]
end
/-- If `s ⊆ t`, then the diameter of `s` is bounded by that of `t`, provided `t` is bounded. -/
lemma diam_mono {s t : set α} (h : s ⊆ t) (ht : bounded t) : diam s ≤ diam t :=
begin
unfold diam,
rw ennreal.to_real_le_to_real (bounded_iff_diam_ne_top.1 (bounded.subset h ht)) (bounded_iff_diam_ne_top.1 ht),
exact emetric.diam_mono h
end
/-- The distance between two points in a set is controlled by the diameter of the set. -/
lemma dist_le_diam_of_mem (h : bounded s) (hx : x ∈ s) (hy : y ∈ s) : dist x y ≤ diam s :=
begin
rw [diam, dist_edist],
rw ennreal.to_real_le_to_real (edist_ne_top _ _) (bounded_iff_diam_ne_top.1 h),
exact emetric.edist_le_diam_of_mem hx hy
end
/-- If the distance between any two points in a set is bounded by some constant, this constant
bounds the diameter. -/
lemma diam_le_of_forall_dist_le {d : real} (hd : d ≥ 0) (h : ∀x y ∈ s, dist x y ≤ d) : diam s ≤ d :=
begin
have I : emetric.diam s ≤ ennreal.of_real d,
{ refine emetric.diam_le_of_forall_edist_le (λx y hx hy, _),
rw [edist_dist],
exact ennreal.of_real_le_of_real (h x y hx hy) },
have A : emetric.diam s ≠ ⊤ :=
ennreal.lt_top_iff_ne_top.1 (lt_of_le_of_lt I (ennreal.lt_top_iff_ne_top.2 (by simp))),
rw [← ennreal.to_real_of_real hd, diam, ennreal.to_real_le_to_real A],
{ exact I },
{ simp }
end
/-- The diameter of a union is controlled by the sum of the diameters, and the distance between
any two points in each of the sets. This lemma is true without any side condition, since it is
obviously true if `s ∪ t` is unbounded. -/
lemma diam_union {t : set α} (xs : x ∈ s) (yt : y ∈ t) : diam (s ∪ t) ≤ diam s + dist x y + diam t :=
have I1 : ¬(bounded (s ∪ t)) → diam (s ∪ t) ≤ diam s + dist x y + diam t := λh, calc
diam (s ∪ t) = 0 + 0 + 0 : by simp [diam_eq_zero_of_unbounded h]
... ≤ diam s + dist x y + diam t : add_le_add (add_le_add diam_nonneg dist_nonneg) diam_nonneg,
have I2 : (bounded (s ∪ t)) → diam (s ∪ t) ≤ diam s + dist x y + diam t := λh,
begin
have : bounded s := bounded.subset (subset_union_left _ _) h,
have : bounded t := bounded.subset (subset_union_right _ _) h,
have A : ∀a ∈ s, ∀b ∈ t, dist a b ≤ diam s + dist x y + diam t := λa ha b hb, calc
dist a b ≤ dist a x + dist x y + dist y b : dist_triangle4 _ _ _ _
... ≤ diam s + dist x y + diam t :
add_le_add (add_le_add (dist_le_diam_of_mem ‹bounded s› ha xs) (le_refl _)) (dist_le_diam_of_mem ‹bounded t› yt hb),
have B : ∀a b ∈ s ∪ t, dist a b ≤ diam s + dist x y + diam t := λa b ha hb,
begin
cases (mem_union _ _ _).1 ha with h'a h'a; cases (mem_union _ _ _).1 hb with h'b h'b,
{ calc dist a b ≤ diam s : dist_le_diam_of_mem ‹bounded s› h'a h'b
... = diam s + (0 + 0) : by simp
... ≤ diam s + (dist x y + diam t) : add_le_add (le_refl _) (add_le_add dist_nonneg diam_nonneg)
... = diam s + dist x y + diam t : by simp only [add_comm, eq_self_iff_true, add_left_comm] },
{ exact A a h'a b h'b },
{ have Z := A b h'b a h'a, rwa [dist_comm] at Z },
{ calc dist a b ≤ diam t : dist_le_diam_of_mem ‹bounded t› h'a h'b
... = (0 + 0) + diam t : by simp
... ≤ (diam s + dist x y) + diam t : add_le_add (add_le_add diam_nonneg dist_nonneg) (le_refl _) }
end,
have C : 0 ≤ diam s + dist x y + diam t := calc
0 = 0 + 0 + 0 : by simp
... ≤ diam s + dist x y + diam t : add_le_add (add_le_add diam_nonneg dist_nonneg) diam_nonneg,
exact diam_le_of_forall_dist_le C B
end,
classical.by_cases I2 I1
/-- If two sets intersect, the diameter of the union is bounded by the sum of the diameters. -/
lemma diam_union' {t : set α} (h : s ∩ t ≠ ∅) : diam (s ∪ t) ≤ diam s + diam t :=
begin
rcases ne_empty_iff_exists_mem.1 h with ⟨x, ⟨xs, xt⟩⟩,
simpa using diam_union xs xt
end
/-- The diameter of a closed ball of radius `r` is at most `2 r`. -/
lemma diam_closed_ball {r : ℝ} (h : r ≥ 0) : diam (closed_ball x r) ≤ 2 * r :=
diam_le_of_forall_dist_le (mul_nonneg (by norm_num) h) $ λa b ha hb, calc
dist a b ≤ dist a x + dist b x : dist_triangle_right _ _ _
... ≤ r + r : add_le_add ha hb
... = 2 * r : by simp [mul_two, mul_comm]
/-- The diameter of a ball of radius `r` is at most `2 r`. -/
lemma diam_ball {r : ℝ} (h : r ≥ 0) : diam (ball x r) ≤ 2 * r :=
le_trans (diam_mono ball_subset_closed_ball bounded_closed_ball) (diam_closed_ball h)
end diam
end metric
|
[STATEMENT]
lemma is_TNil_tfilter[simp]:
"is_TNil (tfilter y P xs) \<longleftrightarrow> (\<forall>x \<in> tset xs. \<not> P x)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. is_TNil (tfilter y P xs) = (\<forall>x\<in>tset xs. \<not> P x)
[PROOF STEP]
by transfer auto |
c compute zeeman components and strengths from the g-values
c of the lower and upper levels provided directly by the
c user. This routine is useful for calculating the Zeeman
c pattern of the Fe I line at 15652 A.
c The total angular momentum (tam) of the two levels must
c be provided in the atomic parameter file. The other values
c specified in the transition will not be used here.
SUBROUTINE ZEEMAN_jk(MC,MULT,DESIGN,TAM,JI,JF,DL0,NP,NL,NR,DLP,DLL,
*DLR,SP,SL,SR,xg1,xg2)
CHARACTER DESIGN*1
character*1 CODE(13)/'S','P','D','F','G','H','I','K','L','M','N',
*'O','Q'/
dimension G(2),MULT(*),DESIGN(*),TAM(*),JI(*),JF(*),DLP(*),DLL(*),
*DLR(*),SP(*),SL(*),SR(*)
character*1 code1(21)/'p','1','f','2','h','3','k','4','m','5','o'
& ,'6','r','7','t','8','u','9','v','0','w'/
OAM=-1
JI1=JI(1)
JI2=JI(2)
JF1=JF(1)
JF2=JF(2)
DO 58 I=1,MC
DLL(I)=0.
SL(I)=0.
DLR(I)=0.
58 SR(I)=0.
G(1)=xg1
G(2)=xg2
LEVEL=0
IF((JI1+JF1).EQ.0) LEVEL=1
IF((JI2+JF2).EQ.0) LEVEL=-1
IF(LEVEL) 3,4,5
3 I=1 ! TRIPLETS WITH G=0
GO TO 6
5 I=2
6 TAM(I)=1. ! TOTAL ANGULAR MOMENTUM
SPIN=0.5*FLOAT(MULT(I)-1)
DO 7 J=1,13
IF(DESIGN(I).NE.CODE(J)) GO TO 7
OAM=J-1 ! ORBITAL ANGULAR MOMENTUM
GO TO 8
7 CONTINUE
do 70 j=1,21
if(design(I).ne.code1(j)) goto 70
oam=float(j)/2. ! ORBITAL ANGULAR MOMENTUM (semientero)
go to 8
70 continue
print*,' '
print*,'STOP: Check the transitions in the file containing the atomic parameters.'
print*,'Some letter used to code the orbital angular momentum is erroneous or missing.'
print*,' '
print*,'__________________________________________________________________________________'
stop
8 G(I)=g(i) !1.5+(SPIN*(1.+SPIN)-OAM*(1.+OAM))/4.
13 NP=1 ! TRIPLETS WITH G=0 OR G1=G2
NL=NP
NR=NP
SP(1)=1.
SL(1)=1.
SR(1)=1.
DLP(1)=0.
DLL(1)=G(I)
DLR(1)=-DLL(1)
GO TO 9
4 DO 10 I=1,2
SPIN=0.5*FLOAT(MULT(I)-1)
DO 11 J=1,13
IF(DESIGN(I).NE.CODE(J)) GO TO 11
OAM=J-1
GO TO 10
11 CONTINUE
do 110 j=1,21
if(design(I).ne.code1(j)) goto 110
oam=float(j)/2. ! ORBITAL ANGULAR MOMENTUM (semientero)
go to 10
110 continue
C LAMDE FACTOR FOR EACH LABEL
10 G(I)=g(i) !1.5+(SPIN*(1.+SPIN)-OAM*(1.+OAM))/(2.*TAM(I)*(1.+TAM(I)))
IF(ABS(G(2)-G(1)).GT.5.E-6) GO TO 12
I=2
GO TO 13
12 LEVEL=JI2-JI1
IF(JF1.EQ.5) GO TO 14
IF(LEVEL) 15,16,17 ! INTEGRAL J'S
15 NP=2*JI2+1
19 NL=NP
NR=NP
IF(NP.LE.MC) GO TO 18
STOP 'EXIT ZEEMAN'
16 NP=2*JI2
GO TO 19
17 NP=2*JI2-1
GO TO 19
18 MUMIN=-JI2
IF(JI1.LT.JI2) MUMIN=-JI1
MUMAX=-MUMIN
I=0
DO 20 MU=MUMIN,MUMAX ! PI COMPONENTS
IF(MU.EQ.0.AND.LEVEL.EQ.0) GO TO 20
I=I+1
DLP(I)=FLOAT(MU)*(G(1)-G(2))
J=MU**2
IF(LEVEL) 21,22,23
21 SP(I)=2*(JI1**2-J)
GO TO 20
22 SP(I)=2*J
GO TO 20
23 SP(I)=2*(JI2**2-J)
20 CONTINUE
MUMIN=1-JI1
MUMAX=JI2
I=0
DO 24 MU=MUMIN,MUMAX ! R-SIGMA COMPONENTS
I=I+1
DLR(I)=FLOAT(MU)*(G(1)-G(2))-G(1)
IF(LEVEL) 25,26,27
25 SR(I)=(JI1-MU)*(JI2-MU+2)
GO TO 24
26 SR(I)=(JI2+MU)*(JI2-MU+1)
GO TO 24
27 SR(I)=(JI2+MU)*(JI1+MU)
24 CONTINUE
MUMIN=-JI2
MUMAX=JI1-1
I=0
DO 28 MU=MUMIN,MUMAX ! L-SIGMA COMPONENTS
I=I+1
DLL(I)=FLOAT(MU)*(G(1)-G(2))+G(1)
IF(LEVEL) 29,30,31
29 SL(I)=(JI1+MU)*(JI2+MU+2)
GO TO 28
30 SL(I)=(JI2-MU)*(JI2+MU+1)
GO TO 28
31 SL(I)=(JI2-MU)*(JI1-MU)
28 CONTINUE
GO TO 57
14 I=2*JI1+1 ! HALF-INTEGRAL J'S
IF(LEVEL) 32,33,34
32 NP=I-1
NL=NP
NR=NP
36 IF(NP.LE.MC) GO TO 35
STOP 'EXIT ZEEMAN'
33 NP=I+1
NL=I
NR=I
GO TO 36
34 NP=I+1
NL=NP
NR=NP
GO TO 36
35 MUMIN=-JI2
IF(JI1.LT.JI2) MUMIN=-JI1
MUMAX=1-MUMIN
I=0
DO 37 MU=MUMIN,MUMAX ! PI COMPONENTS
I=I+1
SPIN=FLOAT(MU)-0.5
DLP(I)=(G(1)-G(2))*SPIN
SPIN=SPIN**2
IF(LEVEL) 38,39,40
38 SP(I)=2.*((FLOAT(JI1)+0.5)**2-SPIN)
GO TO 37
39 SP(I)=2.*SPIN
GO TO 37
40 SP(I)=2.*((FLOAT(JI2)+0.5)**2-SPIN)
37 CONTINUE
MUMIN=-JI1
MUMAX=JI2
I=0
DO 41 MU=MUMIN,MUMAX ! R-SIGMA COMPONENTS
I=I+1
DLR(I)=(FLOAT(MU)+0.5)*(G(1)-G(2))-G(1)
IF(LEVEL) 42,43,44
42 SR(I)=(JI1-MU)*(JI2-MU+2)
GO TO 41
43 SR(I)=(JI2+MU+1)*(JI2-MU+1)
GO TO 41
44 SR(I)=(JI2+MU+1)*(JI2+MU)
41 CONTINUE
MUMIN=-MUMAX
MUMAX=JI1
I=0
DO 45 MU=MUMIN,MUMAX ! L-SIGMA COMPONENTS
I=I+1
DLL(I)=(FLOAT(MU)-0.5)*(G(1)-G(2))+G(1)
IF(LEVEL) 46,47,48
46 SL(I)=(JI1+MU)*(JI2+MU+2)
GO TO 45
47 SL(I)=(JI2-MU+1)*(JI2+MU+1)
GO TO 45
48 SL(I)=(JI2-MU+1)*(JI1-MU+1)
45 CONTINUE
57 SUM=0.
DO 49 I=1,NP
49 SUM=SUM+SP(I)
DO 50 I=1,NP
50 SP(I)=SP(I)/SUM
SPIN=0.
SUM=0.
DO 51 I=1,NL ! NL=NR ASSUMED.
SPIN=SPIN+SL(I)
51 SUM=SUM+SR(I)
DO 52 I=1,NL
SL(I)=SL(I)/SPIN
52 SR(I)=SR(I)/SUM
9 CONTINUE
c IF(MC.LE.18) GO TO 59 ! SKIP PRINTING IF SYNTHESIS
c WRITE(6,53) (MULT(I),DESIGN(I),JI(I),JF(I),I=1,2)
c 53 FORMAT(3X,'Transition:',I3,A1,I2,'.',I1,' ----',I3,A1,I2,'.',I1/
c *3X,'Zeeman Pattern (Lorentz units) and normalized Intensities:'/
c *11X,'Pi',17X,'Sigma+',15X,'Sigma-')
59 DO 54 I=1,NP
c IF(MC.LE.18) GO TO 60
c WRITE(6,55) DLP(I),SP(I),DLL(I),SL(I),DLR(I),SR(I)
55 FORMAT(2X,3(F10.6,F11.8))
60 DLP(I)=DLP(I)*DL0
DLL(I)=DLL(I)*DL0
54 DLR(I)=DLR(I)*DL0
IF(MC.LE.18) RETURN
c WRITE(6,56) G
56 FORMAT(3X,'Lande Factors: g(lower)=',F11.8,', g(upper)=',F11.8)
RETURN
END
|
using PyCall: pyimport_conda, pycall
using RCall, Conda
function installpypackage()
try
pyimport_conda("sklearn", "scikit-learn")
catch
try
Conda.add("scikit-learn")
catch
println("scikit-learn failed to install")
end
end
end
function installrpackage(package::AbstractString)
try
rcall(:library,package,"lib=.libPaths()")
#rcall(:library,package,"lib=Sys.getenv('R_LIBS_USER')")
catch
try
R"dir.create(path = Sys.getenv('R_LIBS_USER'), showWarnings = FALSE, recursive = TRUE)"
R"install.packages($package,lib=Sys.getenv('R_LIBS_USER'),repos='https://cloud.r-project.org',type='binary')"
catch xerror
println(xerror)
println("package "*package*" failed to install")
end
end
end
function installrml()
#packages=["caret", "earth","mda","e1071","gam","randomForest","nnet","kernlab","grid","MASS","pls"]
#packages=["caret", "e1071","gam","randomForest"]
packages=["caret","e1071","randomForest"]
for pk in packages
installrpackage(pk)
end
end
installrml()
installpypackage()
|
using FFMPEG
tosecond(x::Hour) = Float64(Dates.value(Second(x)))
tosecond(x::Minute) = Float64(Dates.value(Second(x)))
tosecond(x::T) where {T<:TimePeriod} = x/convert(T, Second(1))
# extract(_, __::Missing, ___) = nothing
ffmpeg(start, sourcefile, targetfile) = ffmpeg_exe(` -y -nostats -loglevel 8 -ss $start -i $sourcefile -vf 'yadif=1,format=gray,scale=sar*iw:ih' -pix_fmt gray -vframes 1 $targetfile`)
extract(targetpathfile, t::Temporal{WholeVideo, I}, coffeesource) where {I <: Instantaneous} = ffmpeg(tosecond(start(t.time)), joinpath(coffeesource, t.video.file.name), targetpathfile)
function extract(targetpathfile, t::Temporal{<:FragmentedVideo, I}, coffeesource) where {I <: Instantaneous}
x = start(t.time)
for vf in files(t.video)
d = duration(vf)
if x ≤ d
ffmpeg(tosecond(x), joinpath(coffeesource, vf.name), targetpathfile)
break
end
x -= d
end
end
ffmpeg(start, duration, sourcefile, targetpathfile) = ffmpeg_exe(` -y -nostats -loglevel 8 -ss $start -i $sourcefile -t $duration -r 2 -vf 'yadif=1,format=gray,scale=sar*iw:ih' -pix_fmt gray $targetpathfile`)
getsourcefile(coffeesource, t) = joinpath(coffeesource, t)
function getsourcetarget(coffeesource, t, targetpath)
sourcefile = getsourcefile(coffeesource, t)
sourcefile, joinpath(targetpath, "img%03d.png")
end
extract(targetpath, t::Temporal{WholeVideo, P}, coffeesource) where {P <: Prolonged} = ffmpeg(tosecond(start(t.time)), tosecond(duration(t.time)), getsourcetarget(coffeesource, t.video.file.name, targetpath)...)
function extract(targetpath, t::Temporal{<:FragmentedVideo, P}, coffeesource) where {P <: Prolonged}
t1 = start(t.time)
t2 = stop(t.time)
files = Iterators.Stateful(t.video.files)
for vf in files
d = duration(vf)
if t1 ≤ d
if t2 ≤ d
ffmpeg(tosecond(t1), tosecond(t2 - t1), getsourcetarget(coffeesource, vf.name, targetpath)...)
break
else
ffmpeg(tosecond(t1), tosecond(d), getsourcetarget(coffeesource, vf.name, targetpath)...)
t2 -= d
for _vf in files
_d = duration(_vf)
if t2 ≤ _d
ffmpeg(tosecond(Millisecond(0)), tosecond(t2), getsourcetarget(coffeesource, _vf.name, targetpath)...)
break
end
ffmpeg(tosecond(Millisecond(0)), tosecond(_d), getsourcetarget(coffeesource, _vf.name, targetpath)...)
t2 -= _d
end
end
end
t1 -= d
t2 -= d
end
end
|
using SIMDHints: ivdep
using BenchmarkTools
f!(y, x) = y .= ivdep.(x .+ y)
g!(y, x) = y .= x .+ y
a = randn(2^10)
y = zero(a)
@btime f!($y, $y) setup=(fill!($y, 0))
@btime g!($y, $y) setup=(fill!($y, 0))
nothing
|
using Zygote
using Zygote: @adjoint, literal_getproperty
import Zygote: accum
import Distances: pairwise, colwise
const dtol = 1e-12 # threshold value for precise recalculation of distances
@nograd MersenneTwister, propertynames, broadcast_shape
function accum(D::Diagonal{T}, B::AbstractMatrix) where {T}
A = Matrix{Union{T, Nothing}}(undef, size(D))
A[diagind(A)] .= D.diag
return accum(A, B)
end
accum(A::AbstractMatrix, D::Diagonal) = accum(D, A)
accum(A::Diagonal, B::Diagonal) = Diagonal(accum(diag(A), diag(B)))
@adjoint function ZygoteRules.literal_getproperty(C::Cholesky, ::Val{:factors})
error("@adjoint not implemented for :factors as is unsafe.")
return literal_getproperty(C, Val(:factors)), function(Δ)
error("@adjoint not implemented for :factors. (I couldn't make it work...)")
end
end
import LinearAlgebra: HermOrSym, diag, Diagonal
diag(S::Symmetric{T, <:Diagonal{T}} where T) = S.data.diag
Zygote._symmetric_back(Δ::Diagonal) = Δ
# Diagonal matrices are always symmetric...
cholesky(A::HermOrSym{T, <:Diagonal{T}} where T) = cholesky(Diagonal(diag(A)))
#
# Some very specific broadcasting hacks while Zygote has crappy broadcasting.
#
import Base.Broadcast: broadcasted
function rrule(::typeof(broadcasted), ::typeof(-), x::AbstractArray)
function broadcasted_minus_pullback(Δ)
return (NO_FIELDS, DoesNotExist(), .-Δ)
end
return .-x, broadcasted_minus_pullback
end
function rrule(::typeof(broadcasted), ::typeof(exp), x::AbstractArray)
y = exp.(x)
return y, Δ->(NO_FIELDS, DoesNotExist(), Δ .* y)
end
|
/-
Copyright (c) 2017 Johannes Hölzl. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Johannes Hölzl, Mario Carneiro, Jeremy Avigad
-/
import order.filter.ultrafilter
import order.filter.partial
import algebra.support
/-!
# Basic theory of topological spaces.
The main definition is the type class `topological space α` which endows a type `α` with a topology.
Then `set α` gets predicates `is_open`, `is_closed` and functions `interior`, `closure` and
`frontier`. Each point `x` of `α` gets a neighborhood filter `𝓝 x`. A filter `F` on `α` has
`x` as a cluster point if `cluster_pt x F : 𝓝 x ⊓ F ≠ ⊥`. A map `f : ι → α` clusters at `x`
along `F : filter ι` if `map_cluster_pt x F f : cluster_pt x (map f F)`. In particular
the notion of cluster point of a sequence `u` is `map_cluster_pt x at_top u`.
This file also defines locally finite families of subsets of `α`.
For topological spaces `α` and `β`, a function `f : α → β` and a point `a : α`,
`continuous_at f a` means `f` is continuous at `a`, and global continuity is
`continuous f`. There is also a version of continuity `pcontinuous` for
partially defined functions.
## Notation
* `𝓝 x`: the filter of neighborhoods of a point `x`;
* `𝓟 s`: the principal filter of a set `s`;
* `𝓝[s] x`: the filter `nhds_within x s` of neighborhoods of a point `x` within a set `s`.
## Implementation notes
Topology in mathlib heavily uses filters (even more than in Bourbaki). See explanations in
<https://leanprover-community.github.io/theories/topology.html>.
## References
* [N. Bourbaki, *General Topology*][bourbaki1966]
* [I. M. James, *Topologies and Uniformities*][james1999]
## Tags
topological space, interior, closure, frontier, neighborhood, continuity, continuous function
-/
noncomputable theory
open set filter classical
open_locale classical filter
universes u v w
/-!
### Topological spaces
-/
/-- A topology on `α`. -/
@[protect_proj] structure topological_space (α : Type u) :=
(is_open : set α → Prop)
(is_open_univ : is_open univ)
(is_open_inter : ∀s t, is_open s → is_open t → is_open (s ∩ t))
(is_open_sUnion : ∀s, (∀t∈s, is_open t) → is_open (⋃₀ s))
attribute [class] topological_space
/-- A constructor for topologies by specifying the closed sets,
and showing that they satisfy the appropriate conditions. -/
def topological_space.of_closed {α : Type u} (T : set (set α))
(empty_mem : ∅ ∈ T) (sInter_mem : ∀ A ⊆ T, ⋂₀ A ∈ T) (union_mem : ∀ A B ∈ T, A ∪ B ∈ T) :
topological_space α :=
{ is_open := λ X, Xᶜ ∈ T,
is_open_univ := by simp [empty_mem],
is_open_inter := λ s t hs ht, by simpa [set.compl_inter] using union_mem sᶜ tᶜ hs ht,
is_open_sUnion := λ s hs,
by rw set.compl_sUnion; exact sInter_mem (set.compl '' s)
(λ z ⟨y, hy, hz⟩, by simpa [hz.symm] using hs y hy) }
section topological_space
variables {α : Type u} {β : Type v} {ι : Sort w} {a : α} {s s₁ s₂ : set α} {p p₁ p₂ : α → Prop}
@[ext]
lemma topological_space_eq : ∀ {f g : topological_space α}, f.is_open = g.is_open → f = g
| ⟨a, _, _, _⟩ ⟨b, _, _, _⟩ rfl := rfl
section
variables [t : topological_space α]
include t
/-- `is_open s` means that `s` is open in the ambient topological space on `α` -/
def is_open (s : set α) : Prop := topological_space.is_open t s
@[simp]
lemma is_open_univ : is_open (univ : set α) := topological_space.is_open_univ t
lemma is_open.inter (h₁ : is_open s₁) (h₂ : is_open s₂) : is_open (s₁ ∩ s₂) :=
topological_space.is_open_inter t s₁ s₂ h₁ h₂
lemma is_open_sUnion {s : set (set α)} (h : ∀t ∈ s, is_open t) : is_open (⋃₀ s) :=
topological_space.is_open_sUnion t s h
end
lemma topological_space_eq_iff {t t' : topological_space α} :
t = t' ↔ ∀ s, @is_open α t s ↔ @is_open α t' s :=
⟨λ h s, h ▸ iff.rfl, λ h, by { ext, exact h _ }⟩
lemma is_open_fold {s : set α} {t : topological_space α} : t.is_open s = @is_open α t s :=
rfl
variables [topological_space α]
lemma is_open_Union {f : ι → set α} (h : ∀i, is_open (f i)) : is_open (⋃i, f i) :=
is_open_sUnion $ by rintro _ ⟨i, rfl⟩; exact h i
lemma is_open_bUnion {s : set β} {f : β → set α} (h : ∀i∈s, is_open (f i)) :
is_open (⋃i∈s, f i) :=
is_open_Union $ assume i, is_open_Union $ assume hi, h i hi
lemma is_open.union (h₁ : is_open s₁) (h₂ : is_open s₂) : is_open (s₁ ∪ s₂) :=
by rw union_eq_Union; exact is_open_Union (bool.forall_bool.2 ⟨h₂, h₁⟩)
@[simp] lemma is_open_empty : is_open (∅ : set α) :=
by rw ← sUnion_empty; exact is_open_sUnion (assume a, false.elim)
lemma is_open_sInter {s : set (set α)} (hs : finite s) : (∀t ∈ s, is_open t) → is_open (⋂₀ s) :=
finite.induction_on hs (λ _, by rw sInter_empty; exact is_open_univ) $
λ a s has hs ih h, by rw sInter_insert; exact
is_open.inter (h _ $ mem_insert _ _) (ih $ λ t, h t ∘ mem_insert_of_mem _)
lemma is_open_bInter {s : set β} {f : β → set α} (hs : finite s) :
(∀i∈s, is_open (f i)) → is_open (⋂i∈s, f i) :=
finite.induction_on hs
(λ _, by rw bInter_empty; exact is_open_univ)
(λ a s has hs ih h, by rw bInter_insert; exact
is_open.inter (h a (mem_insert _ _)) (ih (λ i hi, h i (mem_insert_of_mem _ hi))))
lemma is_open_Inter [fintype β] {s : β → set α}
(h : ∀ i, is_open (s i)) : is_open (⋂ i, s i) :=
suffices is_open (⋂ (i : β) (hi : i ∈ @univ β), s i), by simpa,
is_open_bInter finite_univ (λ i _, h i)
lemma is_open_Inter_prop {p : Prop} {s : p → set α}
(h : ∀ h : p, is_open (s h)) : is_open (Inter s) :=
by by_cases p; simp *
lemma is_open_const {p : Prop} : is_open {a : α | p} :=
by_cases
(assume : p, begin simp only [this]; exact is_open_univ end)
(assume : ¬ p, begin simp only [this]; exact is_open_empty end)
lemma is_open.and : is_open {a | p₁ a} → is_open {a | p₂ a} → is_open {a | p₁ a ∧ p₂ a} :=
is_open.inter
/-- A set is closed if its complement is open -/
class is_closed (s : set α) : Prop :=
(is_open_compl : is_open sᶜ)
@[simp] lemma is_open_compl_iff {s : set α} : is_open sᶜ ↔ is_closed s :=
⟨λ h, ⟨h⟩, λ h, h.is_open_compl⟩
@[simp] lemma is_closed_empty : is_closed (∅ : set α) :=
by { rw [← is_open_compl_iff, compl_empty], exact is_open_univ }
@[simp] lemma is_closed_univ : is_closed (univ : set α) :=
by { rw [← is_open_compl_iff, compl_univ], exact is_open_empty }
lemma is_closed.union : is_closed s₁ → is_closed s₂ → is_closed (s₁ ∪ s₂) :=
λ h₁ h₂, by { rw [← is_open_compl_iff] at *, rw compl_union, exact is_open.inter h₁ h₂ }
lemma is_closed_sInter {s : set (set α)} : (∀t ∈ s, is_closed t) → is_closed (⋂₀ s) :=
by simpa only [← is_open_compl_iff, compl_sInter, sUnion_image] using is_open_bUnion
lemma is_closed_Inter {f : ι → set α} (h : ∀i, is_closed (f i)) : is_closed (⋂i, f i ) :=
is_closed_sInter $ assume t ⟨i, (heq : f i = t)⟩, heq ▸ h i
lemma is_closed_bInter {s : set β} {f : β → set α} (h : ∀ i ∈ s, is_closed (f i)) :
is_closed (⋂ i ∈ s, f i) :=
is_closed_Inter $ λ i, is_closed_Inter $ h i
@[simp] lemma is_closed_compl_iff {s : set α} : is_closed sᶜ ↔ is_open s :=
by rw [←is_open_compl_iff, compl_compl]
lemma is_open.is_closed_compl {s : set α} (hs : is_open s) : is_closed sᶜ :=
is_closed_compl_iff.2 hs
lemma is_open.sdiff {s t : set α} (h₁ : is_open s) (h₂ : is_closed t) : is_open (s \ t) :=
is_open.inter h₁ $ is_open_compl_iff.mpr h₂
lemma is_closed.inter (h₁ : is_closed s₁) (h₂ : is_closed s₂) : is_closed (s₁ ∩ s₂) :=
by { rw [← is_open_compl_iff] at *, rw compl_inter, exact is_open.union h₁ h₂ }
lemma is_closed.sdiff {s t : set α} (h₁ : is_closed s) (h₂ : is_open t) : is_closed (s \ t) :=
is_closed.inter h₁ (is_closed_compl_iff.mpr h₂)
lemma is_closed_bUnion {s : set β} {f : β → set α} (hs : finite s) :
(∀i∈s, is_closed (f i)) → is_closed (⋃i∈s, f i) :=
finite.induction_on hs
(λ _, by rw bUnion_empty; exact is_closed_empty)
(λ a s has hs ih h, by rw bUnion_insert; exact
is_closed.union (h a (mem_insert _ _)) (ih (λ i hi, h i (mem_insert_of_mem _ hi))))
lemma is_closed_Union [fintype β] {s : β → set α}
(h : ∀ i, is_closed (s i)) : is_closed (Union s) :=
suffices is_closed (⋃ (i : β) (hi : i ∈ @univ β), s i),
by convert this; simp [set.ext_iff],
is_closed_bUnion finite_univ (λ i _, h i)
lemma is_closed_Union_prop {p : Prop} {s : p → set α}
(h : ∀ h : p, is_closed (s h)) : is_closed (Union s) :=
by by_cases p; simp *
lemma is_closed_imp {p q : α → Prop} (hp : is_open {x | p x})
(hq : is_closed {x | q x}) : is_closed {x | p x → q x} :=
have {x | p x → q x} = {x | p x}ᶜ ∪ {x | q x}, from set.ext $ λ x, imp_iff_not_or,
by rw [this]; exact is_closed.union (is_closed_compl_iff.mpr hp) hq
lemma is_closed.not : is_closed {a | p a} → is_open {a | ¬ p a} :=
is_open_compl_iff.mpr
/-!
### Interior of a set
-/
/-- The interior of a set `s` is the largest open subset of `s`. -/
def interior (s : set α) : set α := ⋃₀ {t | is_open t ∧ t ⊆ s}
lemma mem_interior {s : set α} {x : α} :
x ∈ interior s ↔ ∃ t ⊆ s, is_open t ∧ x ∈ t :=
by simp only [interior, mem_set_of_eq, exists_prop, and_assoc, and.left_comm]
@[simp] lemma is_open_interior {s : set α} : is_open (interior s) :=
is_open_sUnion $ assume t ⟨h₁, h₂⟩, h₁
lemma interior_subset {s : set α} : interior s ⊆ s :=
sUnion_subset $ assume t ⟨h₁, h₂⟩, h₂
lemma interior_maximal {s t : set α} (h₁ : t ⊆ s) (h₂ : is_open t) : t ⊆ interior s :=
subset_sUnion_of_mem ⟨h₂, h₁⟩
lemma is_open.interior_eq {s : set α} (h : is_open s) : interior s = s :=
subset.antisymm interior_subset (interior_maximal (subset.refl s) h)
lemma interior_eq_iff_open {s : set α} : interior s = s ↔ is_open s :=
⟨assume h, h ▸ is_open_interior, is_open.interior_eq⟩
lemma subset_interior_iff_open {s : set α} : s ⊆ interior s ↔ is_open s :=
by simp only [interior_eq_iff_open.symm, subset.antisymm_iff, interior_subset, true_and]
lemma subset_interior_iff_subset_of_open {s t : set α} (h₁ : is_open s) :
s ⊆ interior t ↔ s ⊆ t :=
⟨assume h, subset.trans h interior_subset, assume h₂, interior_maximal h₂ h₁⟩
@[mono] lemma interior_mono {s t : set α} (h : s ⊆ t) : interior s ⊆ interior t :=
interior_maximal (subset.trans interior_subset h) is_open_interior
@[simp] lemma interior_empty : interior (∅ : set α) = ∅ :=
is_open_empty.interior_eq
@[simp] lemma interior_univ : interior (univ : set α) = univ :=
is_open_univ.interior_eq
@[simp] lemma interior_interior {s : set α} : interior (interior s) = interior s :=
is_open_interior.interior_eq
@[simp] lemma interior_inter {s t : set α} : interior (s ∩ t) = interior s ∩ interior t :=
subset.antisymm
(subset_inter (interior_mono $ inter_subset_left s t) (interior_mono $ inter_subset_right s t))
(interior_maximal (inter_subset_inter interior_subset interior_subset) $
is_open.inter is_open_interior is_open_interior)
@[simp] lemma finset.interior_Inter {ι : Type*} (s : finset ι) (f : ι → set α) :
interior (⋂ i ∈ s, f i) = ⋂ i ∈ s, interior (f i) :=
begin
classical,
refine s.induction_on (by simp) _,
intros i s h₁ h₂,
simp [h₂],
end
@[simp] lemma interior_Inter_of_fintype {ι : Type*} [fintype ι] (f : ι → set α) :
interior (⋂ i, f i) = ⋂ i, interior (f i) :=
by { convert finset.univ.interior_Inter f; simp, }
lemma interior_union_is_closed_of_interior_empty {s t : set α} (h₁ : is_closed s)
(h₂ : interior t = ∅) :
interior (s ∪ t) = interior s :=
have interior (s ∪ t) ⊆ s, from
assume x ⟨u, ⟨(hu₁ : is_open u), (hu₂ : u ⊆ s ∪ t)⟩, (hx₁ : x ∈ u)⟩,
classical.by_contradiction $ assume hx₂ : x ∉ s,
have u \ s ⊆ t,
from assume x ⟨h₁, h₂⟩, or.resolve_left (hu₂ h₁) h₂,
have u \ s ⊆ interior t,
by rwa subset_interior_iff_subset_of_open (is_open.sdiff hu₁ h₁),
have u \ s ⊆ ∅,
by rwa h₂ at this,
this ⟨hx₁, hx₂⟩,
subset.antisymm
(interior_maximal this is_open_interior)
(interior_mono $ subset_union_left _ _)
lemma is_open_iff_forall_mem_open : is_open s ↔ ∀ x ∈ s, ∃ t ⊆ s, is_open t ∧ x ∈ t :=
by rw ← subset_interior_iff_open; simp only [subset_def, mem_interior]
lemma interior_Inter_subset (s : ι → set α) : interior (⋂ i, s i) ⊆ ⋂ i, interior (s i) :=
subset_Inter $ λ i, interior_mono $ Inter_subset _ _
lemma interior_bInter_subset (p : ι → Sort*) (s : Π i, p i → set α) :
interior (⋂ i (hi : p i), s i hi) ⊆ ⋂ i (hi : p i), interior (s i hi) :=
(interior_Inter_subset _).trans $ Inter_subset_Inter $ λ i, interior_Inter_subset _
lemma interior_sInter_subset (S : set (set α)) : interior (⋂₀ S) ⊆ ⋂ s ∈ S, interior s :=
calc interior (⋂₀ S) = interior (⋂ s ∈ S, s) : by rw sInter_eq_bInter
... ⊆ ⋂ s ∈ S, interior s : interior_bInter_subset _ _
/-!
### Closure of a set
-/
/-- The closure of `s` is the smallest closed set containing `s`. -/
def closure (s : set α) : set α := ⋂₀ {t | is_closed t ∧ s ⊆ t}
@[simp] lemma is_closed_closure {s : set α} : is_closed (closure s) :=
is_closed_sInter $ assume t ⟨h₁, h₂⟩, h₁
lemma subset_closure {s : set α} : s ⊆ closure s :=
subset_sInter $ assume t ⟨h₁, h₂⟩, h₂
lemma not_mem_of_not_mem_closure {s : set α} {P : α} (hP : P ∉ closure s) : P ∉ s :=
λ h, hP (subset_closure h)
lemma closure_minimal {s t : set α} (h₁ : s ⊆ t) (h₂ : is_closed t) : closure s ⊆ t :=
sInter_subset_of_mem ⟨h₂, h₁⟩
lemma is_closed.closure_eq {s : set α} (h : is_closed s) : closure s = s :=
subset.antisymm (closure_minimal (subset.refl s) h) subset_closure
lemma is_closed.closure_subset {s : set α} (hs : is_closed s) : closure s ⊆ s :=
closure_minimal (subset.refl _) hs
lemma is_closed.closure_subset_iff {s t : set α} (h₁ : is_closed t) :
closure s ⊆ t ↔ s ⊆ t :=
⟨subset.trans subset_closure, assume h, closure_minimal h h₁⟩
@[mono] lemma closure_mono {s t : set α} (h : s ⊆ t) : closure s ⊆ closure t :=
closure_minimal (subset.trans h subset_closure) is_closed_closure
lemma monotone_closure (α : Type*) [topological_space α] : monotone (@closure α _) :=
λ _ _, closure_mono
lemma diff_subset_closure_iff {s t : set α} :
s \ t ⊆ closure t ↔ s ⊆ closure t :=
by rw [diff_subset_iff, union_eq_self_of_subset_left subset_closure]
lemma closure_inter_subset_inter_closure (s t : set α) :
closure (s ∩ t) ⊆ closure s ∩ closure t :=
(monotone_closure α).map_inf_le s t
lemma is_closed_of_closure_subset {s : set α} (h : closure s ⊆ s) : is_closed s :=
by rw subset.antisymm subset_closure h; exact is_closed_closure
lemma closure_eq_iff_is_closed {s : set α} : closure s = s ↔ is_closed s :=
⟨assume h, h ▸ is_closed_closure, is_closed.closure_eq⟩
lemma closure_subset_iff_is_closed {s : set α} : closure s ⊆ s ↔ is_closed s :=
⟨is_closed_of_closure_subset, is_closed.closure_subset⟩
@[simp] lemma closure_empty : closure (∅ : set α) = ∅ :=
is_closed_empty.closure_eq
@[simp] lemma closure_empty_iff (s : set α) : closure s = ∅ ↔ s = ∅ :=
⟨subset_eq_empty subset_closure, λ h, h.symm ▸ closure_empty⟩
@[simp] lemma closure_nonempty_iff {s : set α} : (closure s).nonempty ↔ s.nonempty :=
by simp only [← ne_empty_iff_nonempty, ne.def, closure_empty_iff]
alias closure_nonempty_iff ↔ set.nonempty.of_closure set.nonempty.closure
@[simp] lemma closure_univ : closure (univ : set α) = univ :=
is_closed_univ.closure_eq
@[simp] lemma closure_closure {s : set α} : closure (closure s) = closure s :=
is_closed_closure.closure_eq
@[simp] lemma closure_union {s t : set α} : closure (s ∪ t) = closure s ∪ closure t :=
subset.antisymm
(closure_minimal (union_subset_union subset_closure subset_closure) $
is_closed.union is_closed_closure is_closed_closure)
((monotone_closure α).le_map_sup s t)
@[simp] lemma finset.closure_Union {ι : Type*} (s : finset ι) (f : ι → set α) :
closure (⋃ i ∈ s, f i) = ⋃ i ∈ s, closure (f i) :=
begin
classical,
refine s.induction_on (by simp) _,
intros i s h₁ h₂,
simp [h₂],
end
@[simp] lemma closure_Union_of_fintype {ι : Type*} [fintype ι] (f : ι → set α) :
closure (⋃ i, f i) = ⋃ i, closure (f i) :=
by { convert finset.univ.closure_Union f; simp, }
lemma interior_subset_closure {s : set α} : interior s ⊆ closure s :=
subset.trans interior_subset subset_closure
lemma closure_eq_compl_interior_compl {s : set α} : closure s = (interior sᶜ)ᶜ :=
begin
rw [interior, closure, compl_sUnion, compl_image_set_of],
simp only [compl_subset_compl, is_open_compl_iff],
end
@[simp] lemma interior_compl {s : set α} : interior sᶜ = (closure s)ᶜ :=
by simp [closure_eq_compl_interior_compl]
@[simp] lemma closure_compl {s : set α} : closure sᶜ = (interior s)ᶜ :=
by simp [closure_eq_compl_interior_compl]
theorem mem_closure_iff {s : set α} {a : α} :
a ∈ closure s ↔ ∀ o, is_open o → a ∈ o → (o ∩ s).nonempty :=
⟨λ h o oo ao, classical.by_contradiction $ λ os,
have s ⊆ oᶜ, from λ x xs xo, os ⟨x, xo, xs⟩,
closure_minimal this (is_closed_compl_iff.2 oo) h ao,
λ H c ⟨h₁, h₂⟩, classical.by_contradiction $ λ nc,
let ⟨x, hc, hs⟩ := (H _ h₁.is_open_compl nc) in hc (h₂ hs)⟩
/-- A set is dense in a topological space if every point belongs to its closure. -/
def dense (s : set α) : Prop := ∀ x, x ∈ closure s
lemma dense_iff_closure_eq {s : set α} : dense s ↔ closure s = univ :=
eq_univ_iff_forall.symm
lemma dense.closure_eq {s : set α} (h : dense s) : closure s = univ :=
dense_iff_closure_eq.mp h
lemma interior_eq_empty_iff_dense_compl {s : set α} : interior s = ∅ ↔ dense sᶜ :=
by rw [dense_iff_closure_eq, closure_compl, compl_univ_iff]
lemma dense.interior_compl {s : set α} (h : dense s) : interior sᶜ = ∅ :=
interior_eq_empty_iff_dense_compl.2 $ by rwa compl_compl
/-- The closure of a set `s` is dense if and only if `s` is dense. -/
@[simp] lemma dense_closure {s : set α} : dense (closure s) ↔ dense s :=
by rw [dense, dense, closure_closure]
alias dense_closure ↔ dense.of_closure dense.closure
@[simp] lemma dense_univ : dense (univ : set α) := λ x, subset_closure trivial
/-- A set is dense if and only if it has a nonempty intersection with each nonempty open set. -/
lemma dense_iff_inter_open {s : set α} :
dense s ↔ ∀ U, is_open U → U.nonempty → (U ∩ s).nonempty :=
begin
split ; intro h,
{ rintros U U_op ⟨x, x_in⟩,
exact mem_closure_iff.1 (by simp only [h.closure_eq]) U U_op x_in },
{ intro x,
rw mem_closure_iff,
intros U U_op x_in,
exact h U U_op ⟨_, x_in⟩ },
end
alias dense_iff_inter_open ↔ dense.inter_open_nonempty _
lemma dense.exists_mem_open {s : set α} (hs : dense s) {U : set α} (ho : is_open U)
(hne : U.nonempty) :
∃ x ∈ s, x ∈ U :=
let ⟨x, hx⟩ := hs.inter_open_nonempty U ho hne in ⟨x, hx.2, hx.1⟩
lemma dense.nonempty_iff {s : set α} (hs : dense s) :
s.nonempty ↔ nonempty α :=
⟨λ ⟨x, hx⟩, ⟨x⟩, λ ⟨x⟩,
let ⟨y, hy⟩ := hs.inter_open_nonempty _ is_open_univ ⟨x, trivial⟩ in ⟨y, hy.2⟩⟩
lemma dense.nonempty [h : nonempty α] {s : set α} (hs : dense s) : s.nonempty :=
hs.nonempty_iff.2 h
@[mono]
lemma dense.mono {s₁ s₂ : set α} (h : s₁ ⊆ s₂) (hd : dense s₁) : dense s₂ :=
λ x, closure_mono h (hd x)
/-- Complement to a singleton is dense if and only if the singleton is not an open set. -/
lemma dense_compl_singleton_iff_not_open {x : α} : dense ({x}ᶜ : set α) ↔ ¬is_open ({x} : set α) :=
begin
fsplit,
{ intros hd ho,
exact (hd.inter_open_nonempty _ ho (singleton_nonempty _)).ne_empty (inter_compl_self _) },
{ refine λ ho, dense_iff_inter_open.2 (λ U hU hne, inter_compl_nonempty_iff.2 $ λ hUx, _),
obtain rfl : U = {x}, from eq_singleton_iff_nonempty_unique_mem.2 ⟨hne, hUx⟩,
exact ho hU }
end
/-!
### Frontier of a set
-/
/-- The frontier of a set is the set of points between the closure and interior. -/
def frontier (s : set α) : set α := closure s \ interior s
lemma frontier_eq_closure_inter_closure {s : set α} :
frontier s = closure s ∩ closure sᶜ :=
by rw [closure_compl, frontier, diff_eq]
lemma frontier_subset_closure {s : set α} : frontier s ⊆ closure s := diff_subset _ _
/-- The complement of a set has the same frontier as the original set. -/
@[simp] lemma frontier_compl (s : set α) : frontier sᶜ = frontier s :=
by simp only [frontier_eq_closure_inter_closure, compl_compl, inter_comm]
@[simp] lemma frontier_univ : frontier (univ : set α) = ∅ := by simp [frontier]
@[simp] lemma frontier_empty : frontier (∅ : set α) = ∅ := by simp [frontier]
lemma frontier_inter_subset (s t : set α) :
frontier (s ∩ t) ⊆ (frontier s ∩ closure t) ∪ (closure s ∩ frontier t) :=
begin
simp only [frontier_eq_closure_inter_closure, compl_inter, closure_union],
convert inter_subset_inter_left _ (closure_inter_subset_inter_closure s t),
simp only [inter_distrib_left, inter_distrib_right, inter_assoc],
congr' 2,
apply inter_comm
end
lemma frontier_union_subset (s t : set α) :
frontier (s ∪ t) ⊆ (frontier s ∩ closure tᶜ) ∪ (closure sᶜ ∩ frontier t) :=
by simpa only [frontier_compl, ← compl_union]
using frontier_inter_subset sᶜ tᶜ
lemma is_closed.frontier_eq {s : set α} (hs : is_closed s) : frontier s = s \ interior s :=
by rw [frontier, hs.closure_eq]
lemma is_open.frontier_eq {s : set α} (hs : is_open s) : frontier s = closure s \ s :=
by rw [frontier, hs.interior_eq]
lemma is_open.inter_frontier_eq {s : set α} (hs : is_open s) : s ∩ frontier s = ∅ :=
by rw [hs.frontier_eq, inter_diff_self]
/-- The frontier of a set is closed. -/
lemma is_closed_frontier {s : set α} : is_closed (frontier s) :=
by rw frontier_eq_closure_inter_closure; exact is_closed.inter is_closed_closure is_closed_closure
/-- The frontier of a closed set has no interior point. -/
lemma interior_frontier {s : set α} (h : is_closed s) : interior (frontier s) = ∅ :=
begin
have A : frontier s = s \ interior s, from h.frontier_eq,
have B : interior (frontier s) ⊆ interior s, by rw A; exact interior_mono (diff_subset _ _),
have C : interior (frontier s) ⊆ frontier s := interior_subset,
have : interior (frontier s) ⊆ (interior s) ∩ (s \ interior s) :=
subset_inter B (by simpa [A] using C),
rwa [inter_diff_self, subset_empty_iff] at this,
end
lemma closure_eq_interior_union_frontier (s : set α) : closure s = interior s ∪ frontier s :=
(union_diff_cancel interior_subset_closure).symm
lemma closure_eq_self_union_frontier (s : set α) : closure s = s ∪ frontier s :=
(union_diff_cancel' interior_subset subset_closure).symm
lemma is_open.inter_frontier_eq_empty_of_disjoint {s t : set α} (ht : is_open t)
(hd : disjoint s t) :
t ∩ frontier s = ∅ :=
begin
rw [inter_comm, ← subset_compl_iff_disjoint],
exact subset.trans frontier_subset_closure (closure_minimal (λ _, disjoint_left.1 hd)
(is_closed_compl_iff.2 ht))
end
lemma frontier_eq_inter_compl_interior {s : set α} :
frontier s = (interior s)ᶜ ∩ (interior (sᶜ))ᶜ :=
by { rw [←frontier_compl, ←closure_compl], refl }
lemma compl_frontier_eq_union_interior {s : set α} :
(frontier s)ᶜ = interior s ∪ interior sᶜ :=
begin
rw frontier_eq_inter_compl_interior,
simp only [compl_inter, compl_compl],
end
/-!
### Neighborhoods
-/
/-- A set is called a neighborhood of `a` if it contains an open set around `a`. The set of all
neighborhoods of `a` forms a filter, the neighborhood filter at `a`, is here defined as the
infimum over the principal filters of all open sets containing `a`. -/
@[irreducible] def nhds (a : α) : filter α := (⨅ s ∈ {s : set α | a ∈ s ∧ is_open s}, 𝓟 s)
localized "notation `𝓝` := nhds" in topological_space
/-- The "neighborhood within" filter. Elements of `𝓝[s] a` are sets containing the
intersection of `s` and a neighborhood of `a`. -/
def nhds_within (a : α) (s : set α) : filter α := 𝓝 a ⊓ 𝓟 s
localized "notation `𝓝[` s `] ` x:100 := nhds_within x s" in topological_space
lemma nhds_def (a : α) : 𝓝 a = (⨅ s ∈ {s : set α | a ∈ s ∧ is_open s}, 𝓟 s) := by rw nhds
/-- The open sets containing `a` are a basis for the neighborhood filter. See `nhds_basis_opens'`
for a variant using open neighborhoods instead. -/
lemma nhds_basis_opens (a : α) : (𝓝 a).has_basis (λ s : set α, a ∈ s ∧ is_open s) (λ x, x) :=
begin
rw nhds_def,
exact has_basis_binfi_principal
(λ s ⟨has, hs⟩ t ⟨hat, ht⟩, ⟨s ∩ t, ⟨⟨has, hat⟩, is_open.inter hs ht⟩,
⟨inter_subset_left _ _, inter_subset_right _ _⟩⟩)
⟨univ, ⟨mem_univ a, is_open_univ⟩⟩
end
/-- A filter lies below the neighborhood filter at `a` iff it contains every open set around `a`. -/
lemma le_nhds_iff {f a} : f ≤ 𝓝 a ↔ ∀ s : set α, a ∈ s → is_open s → s ∈ f :=
by simp [nhds_def]
/-- To show a filter is above the neighborhood filter at `a`, it suffices to show that it is above
the principal filter of some open set `s` containing `a`. -/
lemma nhds_le_of_le {f a} {s : set α} (h : a ∈ s) (o : is_open s) (sf : 𝓟 s ≤ f) : 𝓝 a ≤ f :=
by rw nhds_def; exact infi_le_of_le s (infi_le_of_le ⟨h, o⟩ sf)
lemma mem_nhds_iff {a : α} {s : set α} :
s ∈ 𝓝 a ↔ ∃t⊆s, is_open t ∧ a ∈ t :=
(nhds_basis_opens a).mem_iff.trans
⟨λ ⟨t, ⟨hat, ht⟩, hts⟩, ⟨t, hts, ht, hat⟩, λ ⟨t, hts, ht, hat⟩, ⟨t, ⟨hat, ht⟩, hts⟩⟩
/-- A predicate is true in a neighborhood of `a` iff it is true for all the points in an open set
containing `a`. -/
lemma eventually_nhds_iff {a : α} {p : α → Prop} :
(∀ᶠ x in 𝓝 a, p x) ↔ ∃ (t : set α), (∀ x ∈ t, p x) ∧ is_open t ∧ a ∈ t :=
mem_nhds_iff.trans $ by simp only [subset_def, exists_prop, mem_set_of_eq]
lemma map_nhds {a : α} {f : α → β} :
map f (𝓝 a) = (⨅ s ∈ {s : set α | a ∈ s ∧ is_open s}, 𝓟 (image f s)) :=
((nhds_basis_opens a).map f).eq_binfi
lemma mem_of_mem_nhds {a : α} {s : set α} : s ∈ 𝓝 a → a ∈ s :=
λ H, let ⟨t, ht, _, hs⟩ := mem_nhds_iff.1 H in ht hs
/-- If a predicate is true in a neighborhood of `a`, then it is true for `a`. -/
lemma filter.eventually.self_of_nhds {p : α → Prop} {a : α}
(h : ∀ᶠ y in 𝓝 a, p y) : p a :=
mem_of_mem_nhds h
lemma is_open.mem_nhds {a : α} {s : set α} (hs : is_open s) (ha : a ∈ s) :
s ∈ 𝓝 a :=
mem_nhds_iff.2 ⟨s, subset.refl _, hs, ha⟩
lemma is_closed.compl_mem_nhds {a : α} {s : set α} (hs : is_closed s) (ha : a ∉ s) : sᶜ ∈ 𝓝 a :=
hs.is_open_compl.mem_nhds (mem_compl ha)
lemma is_open.eventually_mem {a : α} {s : set α} (hs : is_open s) (ha : a ∈ s) :
∀ᶠ x in 𝓝 a, x ∈ s :=
is_open.mem_nhds hs ha
/-- The open neighborhoods of `a` are a basis for the neighborhood filter. See `nhds_basis_opens`
for a variant using open sets around `a` instead. -/
lemma nhds_basis_opens' (a : α) : (𝓝 a).has_basis (λ s : set α, s ∈ 𝓝 a ∧ is_open s) (λ x, x) :=
begin
convert nhds_basis_opens a,
ext s,
split,
{ rintros ⟨s_in, s_op⟩,
exact ⟨mem_of_mem_nhds s_in, s_op⟩ },
{ rintros ⟨a_in, s_op⟩,
exact ⟨is_open.mem_nhds s_op a_in, s_op⟩ },
end
/-- If `U` is a neighborhood of each point of a set `s` then it is a neighborhood of `s`:
it contains an open set containing `s`. -/
lemma exists_open_set_nhds {s U : set α} (h : ∀ x ∈ s, U ∈ 𝓝 x) :
∃ V : set α, s ⊆ V ∧ is_open V ∧ V ⊆ U :=
begin
have := λ x hx, (nhds_basis_opens x).mem_iff.1 (h x hx),
choose! Z hZ hZ' using this,
refine ⟨⋃ x ∈ s, Z x, _, _, bUnion_subset hZ'⟩,
{ intros x hx,
simp only [mem_Union],
exact ⟨x, hx, (hZ x hx).1⟩ },
{ apply is_open_Union,
intros x,
by_cases hx : x ∈ s ; simp [hx],
exact (hZ x hx).2 }
end
/-- If `U` is a neighborhood of each point of a set `s` then it is a neighborhood of s:
it contains an open set containing `s`. -/
lemma exists_open_set_nhds' {s U : set α} (h : U ∈ ⨆ x ∈ s, 𝓝 x) :
∃ V : set α, s ⊆ V ∧ is_open V ∧ V ⊆ U :=
exists_open_set_nhds (by simpa using h)
/-- If a predicate is true in a neighbourhood of `a`, then for `y` sufficiently close
to `a` this predicate is true in a neighbourhood of `y`. -/
lemma filter.eventually.eventually_nhds {p : α → Prop} {a : α} (h : ∀ᶠ y in 𝓝 a, p y) :
∀ᶠ y in 𝓝 a, ∀ᶠ x in 𝓝 y, p x :=
let ⟨t, htp, hto, ha⟩ := eventually_nhds_iff.1 h in
eventually_nhds_iff.2 ⟨t, λ x hx, eventually_nhds_iff.2 ⟨t, htp, hto, hx⟩, hto, ha⟩
@[simp] lemma eventually_eventually_nhds {p : α → Prop} {a : α} :
(∀ᶠ y in 𝓝 a, ∀ᶠ x in 𝓝 y, p x) ↔ ∀ᶠ x in 𝓝 a, p x :=
⟨λ h, h.self_of_nhds, λ h, h.eventually_nhds⟩
@[simp] lemma nhds_bind_nhds : (𝓝 a).bind 𝓝 = 𝓝 a := filter.ext $ λ s, eventually_eventually_nhds
@[simp] lemma eventually_eventually_eq_nhds {f g : α → β} {a : α} :
(∀ᶠ y in 𝓝 a, f =ᶠ[𝓝 y] g) ↔ f =ᶠ[𝓝 a] g :=
eventually_eventually_nhds
lemma filter.eventually_eq.eq_of_nhds {f g : α → β} {a : α} (h : f =ᶠ[𝓝 a] g) : f a = g a :=
h.self_of_nhds
@[simp] lemma eventually_eventually_le_nhds [has_le β] {f g : α → β} {a : α} :
(∀ᶠ y in 𝓝 a, f ≤ᶠ[𝓝 y] g) ↔ f ≤ᶠ[𝓝 a] g :=
eventually_eventually_nhds
/-- If two functions are equal in a neighbourhood of `a`, then for `y` sufficiently close
to `a` these functions are equal in a neighbourhood of `y`. -/
lemma filter.eventually_eq.eventually_eq_nhds {f g : α → β} {a : α} (h : f =ᶠ[𝓝 a] g) :
∀ᶠ y in 𝓝 a, f =ᶠ[𝓝 y] g :=
h.eventually_nhds
/-- If `f x ≤ g x` in a neighbourhood of `a`, then for `y` sufficiently close to `a` we have
`f x ≤ g x` in a neighbourhood of `y`. -/
lemma filter.eventually_le.eventually_le_nhds [has_le β] {f g : α → β} {a : α} (h : f ≤ᶠ[𝓝 a] g) :
∀ᶠ y in 𝓝 a, f ≤ᶠ[𝓝 y] g :=
h.eventually_nhds
theorem all_mem_nhds (x : α) (P : set α → Prop) (hP : ∀ s t, s ⊆ t → P s → P t) :
(∀ s ∈ 𝓝 x, P s) ↔ (∀ s, is_open s → x ∈ s → P s) :=
((nhds_basis_opens x).forall_iff hP).trans $ by simp only [and_comm (x ∈ _), and_imp]
theorem all_mem_nhds_filter (x : α) (f : set α → set β) (hf : ∀ s t, s ⊆ t → f s ⊆ f t)
(l : filter β) :
(∀ s ∈ 𝓝 x, f s ∈ l) ↔ (∀ s, is_open s → x ∈ s → f s ∈ l) :=
all_mem_nhds _ _ (λ s t ssubt h, mem_of_superset h (hf s t ssubt))
theorem rtendsto_nhds {r : rel β α} {l : filter β} {a : α} :
rtendsto r l (𝓝 a) ↔ (∀ s, is_open s → a ∈ s → r.core s ∈ l) :=
all_mem_nhds_filter _ _ (λ s t, id) _
theorem rtendsto'_nhds {r : rel β α} {l : filter β} {a : α} :
rtendsto' r l (𝓝 a) ↔ (∀ s, is_open s → a ∈ s → r.preimage s ∈ l) :=
by { rw [rtendsto'_def], apply all_mem_nhds_filter, apply rel.preimage_mono }
theorem ptendsto_nhds {f : β →. α} {l : filter β} {a : α} :
ptendsto f l (𝓝 a) ↔ (∀ s, is_open s → a ∈ s → f.core s ∈ l) :=
rtendsto_nhds
theorem ptendsto'_nhds {f : β →. α} {l : filter β} {a : α} :
ptendsto' f l (𝓝 a) ↔ (∀ s, is_open s → a ∈ s → f.preimage s ∈ l) :=
rtendsto'_nhds
theorem tendsto_nhds {f : β → α} {l : filter β} {a : α} :
tendsto f l (𝓝 a) ↔ (∀ s, is_open s → a ∈ s → f ⁻¹' s ∈ l) :=
all_mem_nhds_filter _ _ (λ s t h, preimage_mono h) _
lemma tendsto_const_nhds {a : α} {f : filter β} : tendsto (λb:β, a) f (𝓝 a) :=
tendsto_nhds.mpr $ assume s hs ha, univ_mem' $ assume _, ha
lemma tendsto_at_top_of_eventually_const {ι : Type*} [semilattice_sup ι] [nonempty ι]
{x : α} {u : ι → α} {i₀ : ι} (h : ∀ i ≥ i₀, u i = x) : tendsto u at_top (𝓝 x) :=
tendsto.congr' (eventually_eq.symm (eventually_at_top.mpr ⟨i₀, h⟩)) tendsto_const_nhds
lemma tendsto_at_bot_of_eventually_const {ι : Type*} [semilattice_inf ι] [nonempty ι]
{x : α} {u : ι → α} {i₀ : ι} (h : ∀ i ≤ i₀, u i = x) : tendsto u at_bot (𝓝 x) :=
tendsto.congr' (eventually_eq.symm (eventually_at_bot.mpr ⟨i₀, h⟩)) tendsto_const_nhds
lemma pure_le_nhds : pure ≤ (𝓝 : α → filter α) :=
assume a s hs, mem_pure.2 $ mem_of_mem_nhds hs
lemma tendsto_pure_nhds {α : Type*} [topological_space β] (f : α → β) (a : α) :
tendsto f (pure a) (𝓝 (f a)) :=
(tendsto_pure_pure f a).mono_right (pure_le_nhds _)
lemma order_top.tendsto_at_top_nhds {α : Type*} [partial_order α] [order_top α]
[topological_space β] (f : α → β) : tendsto f at_top (𝓝 $ f ⊤) :=
(tendsto_at_top_pure f).mono_right (pure_le_nhds _)
@[simp] instance nhds_ne_bot {a : α} : ne_bot (𝓝 a) :=
ne_bot_of_le (pure_le_nhds a)
/-!
### Cluster points
In this section we define [cluster points](https://en.wikipedia.org/wiki/Limit_point)
(also known as limit points and accumulation points) of a filter and of a sequence.
-/
/-- A point `x` is a cluster point of a filter `F` if 𝓝 x ⊓ F ≠ ⊥. Also known as
an accumulation point or a limit point. -/
def cluster_pt (x : α) (F : filter α) : Prop := ne_bot (𝓝 x ⊓ F)
lemma cluster_pt.ne_bot {x : α} {F : filter α} (h : cluster_pt x F) : ne_bot (𝓝 x ⊓ F) := h
lemma filter.has_basis.cluster_pt_iff {ιa ιF} {pa : ιa → Prop} {sa : ιa → set α}
{pF : ιF → Prop} {sF : ιF → set α} {F : filter α}
(ha : (𝓝 a).has_basis pa sa) (hF : F.has_basis pF sF) :
cluster_pt a F ↔ ∀ ⦃i⦄ (hi : pa i) ⦃j⦄ (hj : pF j), (sa i ∩ sF j).nonempty :=
ha.inf_basis_ne_bot_iff hF
lemma cluster_pt_iff {x : α} {F : filter α} :
cluster_pt x F ↔ ∀ ⦃U : set α⦄ (hU : U ∈ 𝓝 x) ⦃V⦄ (hV : V ∈ F), (U ∩ V).nonempty :=
inf_ne_bot_iff
/-- `x` is a cluster point of a set `s` if every neighbourhood of `x` meets `s` on a nonempty
set. -/
lemma cluster_pt_principal_iff {x : α} {s : set α} :
cluster_pt x (𝓟 s) ↔ ∀ U ∈ 𝓝 x, (U ∩ s).nonempty :=
inf_principal_ne_bot_iff
lemma cluster_pt_principal_iff_frequently {x : α} {s : set α} :
cluster_pt x (𝓟 s) ↔ ∃ᶠ y in 𝓝 x, y ∈ s :=
by simp only [cluster_pt_principal_iff, frequently_iff, set.nonempty, exists_prop, mem_inter_iff]
lemma cluster_pt.of_le_nhds {x : α} {f : filter α} (H : f ≤ 𝓝 x) [ne_bot f] : cluster_pt x f :=
by rwa [cluster_pt, inf_eq_right.mpr H]
lemma cluster_pt.of_le_nhds' {x : α} {f : filter α} (H : f ≤ 𝓝 x) (hf : ne_bot f) :
cluster_pt x f :=
cluster_pt.of_le_nhds H
lemma cluster_pt.of_nhds_le {x : α} {f : filter α} (H : 𝓝 x ≤ f) : cluster_pt x f :=
by simp only [cluster_pt, inf_eq_left.mpr H, nhds_ne_bot]
lemma cluster_pt.mono {x : α} {f g : filter α} (H : cluster_pt x f) (h : f ≤ g) :
cluster_pt x g :=
⟨ne_bot_of_le_ne_bot H.ne $ inf_le_inf_left _ h⟩
lemma cluster_pt.of_inf_left {x : α} {f g : filter α} (H : cluster_pt x $ f ⊓ g) :
cluster_pt x f :=
H.mono inf_le_left
lemma cluster_pt.of_inf_right {x : α} {f g : filter α} (H : cluster_pt x $ f ⊓ g) :
cluster_pt x g :=
H.mono inf_le_right
lemma ultrafilter.cluster_pt_iff {x : α} {f : ultrafilter α} : cluster_pt x f ↔ ↑f ≤ 𝓝 x :=
⟨f.le_of_inf_ne_bot', λ h, cluster_pt.of_le_nhds h⟩
/-- A point `x` is a cluster point of a sequence `u` along a filter `F` if it is a cluster point
of `map u F`. -/
def map_cluster_pt {ι :Type*} (x : α) (F : filter ι) (u : ι → α) : Prop := cluster_pt x (map u F)
lemma map_cluster_pt_iff {ι :Type*} (x : α) (F : filter ι) (u : ι → α) :
map_cluster_pt x F u ↔ ∀ s ∈ 𝓝 x, ∃ᶠ a in F, u a ∈ s :=
by { simp_rw [map_cluster_pt, cluster_pt, inf_ne_bot_iff_frequently_left, frequently_map], refl }
lemma map_cluster_pt_of_comp {ι δ :Type*} {F : filter ι} {φ : δ → ι} {p : filter δ}
{x : α} {u : ι → α} [ne_bot p] (h : tendsto φ p F) (H : tendsto (u ∘ φ) p (𝓝 x)) :
map_cluster_pt x F u :=
begin
have := calc
map (u ∘ φ) p = map u (map φ p) : map_map
... ≤ map u F : map_mono h,
have : map (u ∘ φ) p ≤ 𝓝 x ⊓ map u F,
from le_inf H this,
exact ne_bot_of_le this
end
/-!
### Interior, closure and frontier in terms of neighborhoods
-/
lemma interior_eq_nhds' {s : set α} : interior s = {a | s ∈ 𝓝 a} :=
set.ext $ λ x, by simp only [mem_interior, mem_nhds_iff, mem_set_of_eq]
lemma interior_eq_nhds {s : set α} : interior s = {a | 𝓝 a ≤ 𝓟 s} :=
interior_eq_nhds'.trans $ by simp only [le_principal_iff]
lemma mem_interior_iff_mem_nhds {s : set α} {a : α} :
a ∈ interior s ↔ s ∈ 𝓝 a :=
by rw [interior_eq_nhds', mem_set_of_eq]
@[simp] lemma interior_mem_nhds {s : set α} {a : α} :
interior s ∈ 𝓝 a ↔ s ∈ 𝓝 a :=
⟨λ h, mem_of_superset h interior_subset,
λ h, is_open.mem_nhds is_open_interior (mem_interior_iff_mem_nhds.2 h)⟩
lemma interior_set_of_eq {p : α → Prop} :
interior {x | p x} = {x | ∀ᶠ y in 𝓝 x, p y} :=
interior_eq_nhds'
lemma is_open_set_of_eventually_nhds {p : α → Prop} :
is_open {x | ∀ᶠ y in 𝓝 x, p y} :=
by simp only [← interior_set_of_eq, is_open_interior]
lemma subset_interior_iff_nhds {s V : set α} : s ⊆ interior V ↔ ∀ x ∈ s, V ∈ 𝓝 x :=
show (∀ x, x ∈ s → x ∈ _) ↔ _, by simp_rw mem_interior_iff_mem_nhds
lemma is_open_iff_nhds {s : set α} : is_open s ↔ ∀a∈s, 𝓝 a ≤ 𝓟 s :=
calc is_open s ↔ s ⊆ interior s : subset_interior_iff_open.symm
... ↔ (∀a∈s, 𝓝 a ≤ 𝓟 s) : by rw [interior_eq_nhds]; refl
lemma is_open_iff_mem_nhds {s : set α} : is_open s ↔ ∀a∈s, s ∈ 𝓝 a :=
is_open_iff_nhds.trans $ forall_congr $ λ _, imp_congr_right $ λ _, le_principal_iff
theorem is_open_iff_ultrafilter {s : set α} :
is_open s ↔ (∀ (x ∈ s) (l : ultrafilter α), ↑l ≤ 𝓝 x → s ∈ l) :=
by simp_rw [is_open_iff_mem_nhds, ← mem_iff_ultrafilter]
lemma mem_closure_iff_frequently {s : set α} {a : α} : a ∈ closure s ↔ ∃ᶠ x in 𝓝 a, x ∈ s :=
by rw [filter.frequently, filter.eventually, ← mem_interior_iff_mem_nhds,
closure_eq_compl_interior_compl]; refl
alias mem_closure_iff_frequently ↔ _ filter.frequently.mem_closure
/-- The set of cluster points of a filter is closed. In particular, the set of limit points
of a sequence is closed. -/
lemma is_closed_set_of_cluster_pt {f : filter α} : is_closed {x | cluster_pt x f} :=
begin
simp only [cluster_pt, inf_ne_bot_iff_frequently_left, set_of_forall, imp_iff_not_or],
refine is_closed_Inter (λ p, is_closed.union _ _); apply is_closed_compl_iff.2,
exacts [is_open_set_of_eventually_nhds, is_open_const]
end
theorem mem_closure_iff_cluster_pt {s : set α} {a : α} : a ∈ closure s ↔ cluster_pt a (𝓟 s) :=
mem_closure_iff_frequently.trans cluster_pt_principal_iff_frequently.symm
lemma mem_closure_iff_nhds_ne_bot {s : set α} : a ∈ closure s ↔ 𝓝 a ⊓ 𝓟 s ≠ ⊥ :=
mem_closure_iff_cluster_pt.trans ne_bot_iff
lemma mem_closure_iff_nhds_within_ne_bot {s : set α} {x : α} :
x ∈ closure s ↔ ne_bot (𝓝[s] x) :=
mem_closure_iff_cluster_pt
/-- If `x` is not an isolated point of a topological space, then `{x}ᶜ` is dense in the whole
space. -/
lemma dense_compl_singleton (x : α) [ne_bot (𝓝[{x}ᶜ] x)] : dense ({x}ᶜ : set α) :=
begin
intro y,
unfreezingI { rcases eq_or_ne y x with rfl|hne },
{ rwa mem_closure_iff_nhds_within_ne_bot },
{ exact subset_closure hne }
end
/-- If `x` is not an isolated point of a topological space, then the closure of `{x}ᶜ` is the whole
space. -/
@[simp] lemma closure_compl_singleton (x : α) [ne_bot (𝓝[{x}ᶜ] x)] :
closure {x}ᶜ = (univ : set α) :=
(dense_compl_singleton x).closure_eq
/-- If `x` is not an isolated point of a topological space, then the interior of `{x}` is empty. -/
@[simp] lemma interior_singleton (x : α) [ne_bot (𝓝[{x}ᶜ] x)] :
interior {x} = (∅ : set α) :=
interior_eq_empty_iff_dense_compl.2 (dense_compl_singleton x)
lemma closure_eq_cluster_pts {s : set α} : closure s = {a | cluster_pt a (𝓟 s)} :=
set.ext $ λ x, mem_closure_iff_cluster_pt
theorem mem_closure_iff_nhds {s : set α} {a : α} :
a ∈ closure s ↔ ∀ t ∈ 𝓝 a, (t ∩ s).nonempty :=
mem_closure_iff_cluster_pt.trans cluster_pt_principal_iff
theorem mem_closure_iff_nhds' {s : set α} {a : α} :
a ∈ closure s ↔ ∀ t ∈ 𝓝 a, ∃ y : s, ↑y ∈ t :=
by simp only [mem_closure_iff_nhds, set.nonempty_inter_iff_exists_right]
theorem mem_closure_iff_comap_ne_bot {A : set α} {x : α} :
x ∈ closure A ↔ ne_bot (comap (coe : A → α) (𝓝 x)) :=
by simp_rw [mem_closure_iff_nhds, comap_ne_bot_iff, set.nonempty_inter_iff_exists_right]
theorem mem_closure_iff_nhds_basis' {a : α} {p : ι → Prop} {s : ι → set α} (h : (𝓝 a).has_basis p s)
{t : set α} :
a ∈ closure t ↔ ∀ i, p i → (s i ∩ t).nonempty :=
mem_closure_iff_cluster_pt.trans $ (h.cluster_pt_iff (has_basis_principal _)).trans $
by simp only [exists_prop, forall_const]
theorem mem_closure_iff_nhds_basis {a : α} {p : ι → Prop} {s : ι → set α} (h : (𝓝 a).has_basis p s)
{t : set α} :
a ∈ closure t ↔ ∀ i, p i → ∃ y ∈ t, y ∈ s i :=
(mem_closure_iff_nhds_basis' h).trans $
by simp only [set.nonempty, mem_inter_eq, exists_prop, and_comm]
/-- `x` belongs to the closure of `s` if and only if some ultrafilter
supported on `s` converges to `x`. -/
lemma mem_closure_iff_ultrafilter {s : set α} {x : α} :
x ∈ closure s ↔ ∃ (u : ultrafilter α), s ∈ u ∧ ↑u ≤ 𝓝 x :=
by simp [closure_eq_cluster_pts, cluster_pt, ← exists_ultrafilter_iff, and.comm]
lemma is_closed_iff_cluster_pt {s : set α} : is_closed s ↔ ∀a, cluster_pt a (𝓟 s) → a ∈ s :=
calc is_closed s ↔ closure s ⊆ s : closure_subset_iff_is_closed.symm
... ↔ (∀a, cluster_pt a (𝓟 s) → a ∈ s) : by simp only [subset_def, mem_closure_iff_cluster_pt]
lemma is_closed_iff_nhds {s : set α} : is_closed s ↔ ∀ x, (∀ U ∈ 𝓝 x, (U ∩ s).nonempty) → x ∈ s :=
by simp_rw [is_closed_iff_cluster_pt, cluster_pt, inf_principal_ne_bot_iff]
lemma closure_inter_open {s t : set α} (h : is_open s) : s ∩ closure t ⊆ closure (s ∩ t) :=
begin
rintro a ⟨hs, ht⟩,
have : s ∈ 𝓝 a := is_open.mem_nhds h hs,
rw mem_closure_iff_nhds_ne_bot at ht ⊢,
rwa [← inf_principal, ← inf_assoc, inf_eq_left.2 (le_principal_iff.2 this)],
end
lemma closure_inter_open' {s t : set α} (h : is_open t) : closure s ∩ t ⊆ closure (s ∩ t) :=
by simpa only [inter_comm] using closure_inter_open h
lemma dense.open_subset_closure_inter {s t : set α} (hs : dense s) (ht : is_open t) :
t ⊆ closure (t ∩ s) :=
calc t = t ∩ closure s : by rw [hs.closure_eq, inter_univ]
... ⊆ closure (t ∩ s) : closure_inter_open ht
lemma mem_closure_of_mem_closure_union {s₁ s₂ : set α} {x : α} (h : x ∈ closure (s₁ ∪ s₂))
(h₁ : s₁ᶜ ∈ 𝓝 x) : x ∈ closure s₂ :=
begin
rw mem_closure_iff_nhds_ne_bot at *,
rwa ← calc
𝓝 x ⊓ principal (s₁ ∪ s₂) = 𝓝 x ⊓ (principal s₁ ⊔ principal s₂) : by rw sup_principal
... = (𝓝 x ⊓ principal s₁) ⊔ (𝓝 x ⊓ principal s₂) : inf_sup_left
... = ⊥ ⊔ 𝓝 x ⊓ principal s₂ : by rw inf_principal_eq_bot.mpr h₁
... = 𝓝 x ⊓ principal s₂ : bot_sup_eq
end
/-- The intersection of an open dense set with a dense set is a dense set. -/
lemma dense.inter_of_open_left {s t : set α} (hs : dense s) (ht : dense t) (hso : is_open s) :
dense (s ∩ t) :=
λ x, (closure_minimal (closure_inter_open hso) is_closed_closure) $
by simp [hs.closure_eq, ht.closure_eq]
/-- The intersection of a dense set with an open dense set is a dense set. -/
lemma dense.inter_of_open_right {s t : set α} (hs : dense s) (ht : dense t) (hto : is_open t) :
dense (s ∩ t) :=
inter_comm t s ▸ ht.inter_of_open_left hs hto
lemma dense.inter_nhds_nonempty {s t : set α} (hs : dense s) {x : α} (ht : t ∈ 𝓝 x) :
(s ∩ t).nonempty :=
let ⟨U, hsub, ho, hx⟩ := mem_nhds_iff.1 ht in
(hs.inter_open_nonempty U ho ⟨x, hx⟩).mono $ λ y hy, ⟨hy.2, hsub hy.1⟩
lemma closure_diff {s t : set α} : closure s \ closure t ⊆ closure (s \ t) :=
calc closure s \ closure t = (closure t)ᶜ ∩ closure s : by simp only [diff_eq, inter_comm]
... ⊆ closure ((closure t)ᶜ ∩ s) : closure_inter_open $ is_open_compl_iff.mpr $ is_closed_closure
... = closure (s \ closure t) : by simp only [diff_eq, inter_comm]
... ⊆ closure (s \ t) : closure_mono $ diff_subset_diff (subset.refl s) subset_closure
lemma filter.frequently.mem_of_closed {a : α} {s : set α} (h : ∃ᶠ x in 𝓝 a, x ∈ s)
(hs : is_closed s) : a ∈ s :=
hs.closure_subset h.mem_closure
lemma is_closed.mem_of_frequently_of_tendsto {f : β → α} {b : filter β} {a : α} {s : set α}
(hs : is_closed s) (h : ∃ᶠ x in b, f x ∈ s) (hf : tendsto f b (𝓝 a)) : a ∈ s :=
(hf.frequently $ show ∃ᶠ x in b, (λ y, y ∈ s) (f x), from h).mem_of_closed hs
lemma is_closed.mem_of_tendsto {f : β → α} {b : filter β} {a : α} {s : set α}
[ne_bot b] (hs : is_closed s) (hf : tendsto f b (𝓝 a)) (h : ∀ᶠ x in b, f x ∈ s) : a ∈ s :=
hs.mem_of_frequently_of_tendsto h.frequently hf
lemma mem_closure_of_tendsto {f : β → α} {b : filter β} {a : α} {s : set α}
[ne_bot b] (hf : tendsto f b (𝓝 a)) (h : ∀ᶠ x in b, f x ∈ s) : a ∈ closure s :=
is_closed_closure.mem_of_tendsto hf $ h.mono (preimage_mono subset_closure)
/-- Suppose that `f` sends the complement to `s` to a single point `a`, and `l` is some filter.
Then `f` tends to `a` along `l` restricted to `s` if and only if it tends to `a` along `l`. -/
lemma tendsto_inf_principal_nhds_iff_of_forall_eq {f : β → α} {l : filter β} {s : set β}
{a : α} (h : ∀ x ∉ s, f x = a) :
tendsto f (l ⊓ 𝓟 s) (𝓝 a) ↔ tendsto f l (𝓝 a) :=
begin
rw [tendsto_iff_comap, tendsto_iff_comap],
replace h : 𝓟 sᶜ ≤ comap f (𝓝 a),
{ rintros U ⟨t, ht, htU⟩ x hx,
have : f x ∈ t, from (h x hx).symm ▸ mem_of_mem_nhds ht,
exact htU this },
refine ⟨λ h', _, le_trans inf_le_left⟩,
have := sup_le h' h,
rw [sup_inf_right, sup_principal, union_compl_self, principal_univ,
inf_top_eq, sup_le_iff] at this,
exact this.1
end
/-!
### Limits of filters in topological spaces
-/
section lim
/-- If `f` is a filter, then `Lim f` is a limit of the filter, if it exists. -/
noncomputable def Lim [nonempty α] (f : filter α) : α := epsilon $ λa, f ≤ 𝓝 a
/--
If `f` is a filter satisfying `ne_bot f`, then `Lim' f` is a limit of the filter, if it exists.
-/
def Lim' (f : filter α) [ne_bot f] : α := @Lim _ _ (nonempty_of_ne_bot f) f
/--
If `F` is an ultrafilter, then `filter.ultrafilter.Lim F` is a limit of the filter, if it exists.
Note that dot notation `F.Lim` can be used for `F : ultrafilter α`.
-/
def ultrafilter.Lim : ultrafilter α → α := λ F, Lim' F
/-- If `f` is a filter in `β` and `g : β → α` is a function, then `lim f` is a limit of `g` at `f`,
if it exists. -/
noncomputable def lim [nonempty α] (f : filter β) (g : β → α) : α :=
Lim (f.map g)
/-- If a filter `f` is majorated by some `𝓝 a`, then it is majorated by `𝓝 (Lim f)`. We formulate
this lemma with a `[nonempty α]` argument of `Lim` derived from `h` to make it useful for types
without a `[nonempty α]` instance. Because of the built-in proof irrelevance, Lean will unify
this instance with any other instance. -/
lemma le_nhds_Lim {f : filter α} (h : ∃a, f ≤ 𝓝 a) : f ≤ 𝓝 (@Lim _ _ (nonempty_of_exists h) f) :=
epsilon_spec h
/-- If `g` tends to some `𝓝 a` along `f`, then it tends to `𝓝 (lim f g)`. We formulate
this lemma with a `[nonempty α]` argument of `lim` derived from `h` to make it useful for types
without a `[nonempty α]` instance. Because of the built-in proof irrelevance, Lean will unify
this instance with any other instance. -/
lemma tendsto_nhds_lim {f : filter β} {g : β → α} (h : ∃ a, tendsto g f (𝓝 a)) :
tendsto g f (𝓝 $ @lim _ _ _ (nonempty_of_exists h) f g) :=
le_nhds_Lim h
end lim
/-!
### Locally finite families
-/
/- locally finite family [General Topology (Bourbaki, 1995)] -/
section locally_finite
/-- A family of sets in `set α` is locally finite if at every point `x:α`,
there is a neighborhood of `x` which meets only finitely many sets in the family -/
def locally_finite (f : β → set α) :=
∀x:α, ∃t ∈ 𝓝 x, finite {i | (f i ∩ t).nonempty }
lemma locally_finite.point_finite {f : β → set α} (hf : locally_finite f) (x : α) :
finite {b | x ∈ f b} :=
let ⟨t, hxt, ht⟩ := hf x in ht.subset $ λ b hb, ⟨x, hb, mem_of_mem_nhds hxt⟩
lemma locally_finite_of_fintype [fintype β] (f : β → set α) : locally_finite f :=
assume x, ⟨univ, univ_mem, finite.of_fintype _⟩
lemma locally_finite.subset
{f₁ f₂ : β → set α} (hf₂ : locally_finite f₂) (hf : ∀b, f₁ b ⊆ f₂ b) : locally_finite f₁ :=
assume a,
let ⟨t, ht₁, ht₂⟩ := hf₂ a in
⟨t, ht₁, ht₂.subset $ assume i hi, hi.mono $ inter_subset_inter (hf i) $ subset.refl _⟩
lemma locally_finite.comp_injective {ι} {f : β → set α} {g : ι → β} (hf : locally_finite f)
(hg : function.injective g) : locally_finite (f ∘ g) :=
λ x, let ⟨t, htx, htf⟩ := hf x in ⟨t, htx, htf.preimage (hg.inj_on _)⟩
lemma locally_finite.closure {f : β → set α} (hf : locally_finite f) :
locally_finite (λ i, closure (f i)) :=
begin
intro x,
rcases hf x with ⟨s, hsx, hsf⟩,
refine ⟨interior s, interior_mem_nhds.2 hsx, hsf.subset $ λ i hi, _⟩,
exact (hi.mono (closure_inter_open' is_open_interior)).of_closure.mono
(inter_subset_inter_right _ interior_subset)
end
lemma locally_finite.is_closed_Union {f : β → set α}
(h₁ : locally_finite f) (h₂ : ∀i, is_closed (f i)) : is_closed (⋃i, f i) :=
begin
simp only [← is_open_compl_iff, compl_Union, is_open_iff_mem_nhds, mem_Inter],
intros a ha,
replace ha : ∀ i, (f i)ᶜ ∈ 𝓝 a := λ i, (h₂ i).is_open_compl.mem_nhds (ha i),
rcases h₁ a with ⟨t, h_nhds, h_fin⟩,
have : t ∩ (⋂ i ∈ {i | (f i ∩ t).nonempty}, (f i)ᶜ) ∈ 𝓝 a,
from inter_mem h_nhds ((bInter_mem h_fin).2 (λ i _, ha i)),
filter_upwards [this],
simp only [mem_inter_eq, mem_Inter],
rintros b ⟨hbt, hn⟩ i hfb,
exact hn i ⟨b, hfb, hbt⟩ hfb
end
lemma locally_finite.closure_Union {f : β → set α} (h : locally_finite f) :
closure (⋃ i, f i) = ⋃ i, closure (f i) :=
subset.antisymm
(closure_minimal (Union_subset_Union $ λ _, subset_closure) $
h.closure.is_closed_Union $ λ _, is_closed_closure)
(Union_subset $ λ i, closure_mono $ subset_Union _ _)
end locally_finite
end topological_space
/-!
### Continuity
-/
section continuous
variables {α : Type*} {β : Type*} {γ : Type*} {δ : Type*}
variables [topological_space α] [topological_space β] [topological_space γ]
open_locale topological_space
/-- A function between topological spaces is continuous if the preimage
of every open set is open. Registered as a structure to make sure it is not unfolded by Lean. -/
structure continuous (f : α → β) : Prop :=
(is_open_preimage : ∀s, is_open s → is_open (f ⁻¹' s))
lemma continuous_def {f : α → β} : continuous f ↔ (∀s, is_open s → is_open (f ⁻¹' s)) :=
⟨λ hf s hs, hf.is_open_preimage s hs, λ h, ⟨h⟩⟩
lemma is_open.preimage {f : α → β} (hf : continuous f) {s : set β} (h : is_open s) :
is_open (f ⁻¹' s) :=
hf.is_open_preimage s h
/-- A function between topological spaces is continuous at a point `x₀`
if `f x` tends to `f x₀` when `x` tends to `x₀`. -/
def continuous_at (f : α → β) (x : α) := tendsto f (𝓝 x) (𝓝 (f x))
lemma continuous_at.tendsto {f : α → β} {x : α} (h : continuous_at f x) :
tendsto f (𝓝 x) (𝓝 (f x)) :=
h
lemma continuous_at_congr {f g : α → β} {x : α} (h : f =ᶠ[𝓝 x] g) :
continuous_at f x ↔ continuous_at g x :=
by simp only [continuous_at, tendsto_congr' h, h.eq_of_nhds]
lemma continuous_at.congr {f g : α → β} {x : α} (hf : continuous_at f x) (h : f =ᶠ[𝓝 x] g) :
continuous_at g x :=
(continuous_at_congr h).1 hf
lemma continuous_at.preimage_mem_nhds {f : α → β} {x : α} {t : set β} (h : continuous_at f x)
(ht : t ∈ 𝓝 (f x)) : f ⁻¹' t ∈ 𝓝 x :=
h ht
lemma eventually_eq_zero_nhds {M₀} [has_zero M₀] {a : α} {f : α → M₀} :
f =ᶠ[𝓝 a] 0 ↔ a ∉ closure (function.support f) :=
by rw [← mem_compl_eq, ← interior_compl, mem_interior_iff_mem_nhds, function.compl_support]; refl
lemma cluster_pt.map {x : α} {la : filter α} {lb : filter β} (H : cluster_pt x la)
{f : α → β} (hfc : continuous_at f x) (hf : tendsto f la lb) :
cluster_pt (f x) lb :=
⟨ne_bot_of_le_ne_bot ((map_ne_bot_iff f).2 H).ne $ hfc.tendsto.inf hf⟩
/-- See also `interior_preimage_subset_preimage_interior`. -/
lemma preimage_interior_subset_interior_preimage {f : α → β} {s : set β}
(hf : continuous f) : f⁻¹' (interior s) ⊆ interior (f⁻¹' s) :=
interior_maximal (preimage_mono interior_subset) (is_open_interior.preimage hf)
lemma continuous_id : continuous (id : α → α) :=
continuous_def.2 $ assume s h, h
lemma continuous.comp {g : β → γ} {f : α → β} (hg : continuous g) (hf : continuous f) :
continuous (g ∘ f) :=
continuous_def.2 $ assume s h, (h.preimage hg).preimage hf
lemma continuous.iterate {f : α → α} (h : continuous f) (n : ℕ) : continuous (f^[n]) :=
nat.rec_on n continuous_id (λ n ihn, ihn.comp h)
lemma continuous_at.comp {g : β → γ} {f : α → β} {x : α}
(hg : continuous_at g (f x)) (hf : continuous_at f x) :
continuous_at (g ∘ f) x :=
hg.comp hf
lemma continuous.tendsto {f : α → β} (hf : continuous f) (x) :
tendsto f (𝓝 x) (𝓝 (f x)) :=
((nhds_basis_opens x).tendsto_iff $ nhds_basis_opens $ f x).2 $
λ t ⟨hxt, ht⟩, ⟨f ⁻¹' t, ⟨hxt, ht.preimage hf⟩, subset.refl _⟩
/-- A version of `continuous.tendsto` that allows one to specify a simpler form of the limit.
E.g., one can write `continuous_exp.tendsto' 0 1 exp_zero`. -/
lemma continuous.tendsto' {f : α → β} (hf : continuous f) (x : α) (y : β) (h : f x = y) :
tendsto f (𝓝 x) (𝓝 y) :=
h ▸ hf.tendsto x
lemma continuous.continuous_at {f : α → β} {x : α} (h : continuous f) :
continuous_at f x :=
h.tendsto x
lemma continuous_iff_continuous_at {f : α → β} : continuous f ↔ ∀ x, continuous_at f x :=
⟨continuous.tendsto,
assume hf : ∀x, tendsto f (𝓝 x) (𝓝 (f x)),
continuous_def.2 $
assume s, assume hs : is_open s,
have ∀a, f a ∈ s → s ∈ 𝓝 (f a),
from λ a ha, is_open.mem_nhds hs ha,
show is_open (f ⁻¹' s),
from is_open_iff_nhds.2 $ λ a ha, le_principal_iff.2 $ hf _ (this a ha)⟩
lemma continuous_at_const {x : α} {b : β} : continuous_at (λ a:α, b) x :=
tendsto_const_nhds
lemma continuous_const {b : β} : continuous (λa:α, b) :=
continuous_iff_continuous_at.mpr $ assume a, continuous_at_const
lemma filter.eventually_eq.continuous_at {x : α} {f : α → β} {y : β} (h : f =ᶠ[𝓝 x] (λ _, y)) :
continuous_at f x :=
(continuous_at_congr h).2 tendsto_const_nhds
lemma continuous_of_const {f : α → β} (h : ∀ x y, f x = f y) : continuous f :=
continuous_iff_continuous_at.mpr $ λ x, filter.eventually_eq.continuous_at $
eventually_of_forall (λ y, h y x)
lemma continuous_at_id {x : α} : continuous_at id x :=
continuous_id.continuous_at
lemma continuous_at.iterate {f : α → α} {x : α} (hf : continuous_at f x) (hx : f x = x) (n : ℕ) :
continuous_at (f^[n]) x :=
nat.rec_on n continuous_at_id $ λ n ihn,
show continuous_at (f^[n] ∘ f) x,
from continuous_at.comp (hx.symm ▸ ihn) hf
lemma continuous_iff_is_closed {f : α → β} :
continuous f ↔ (∀s, is_closed s → is_closed (f ⁻¹' s)) :=
⟨assume hf s hs, by simpa using (continuous_def.1 hf sᶜ hs.is_open_compl).is_closed_compl,
assume hf, continuous_def.2 $ assume s,
by rw [←is_closed_compl_iff, ←is_closed_compl_iff]; exact hf _⟩
lemma is_closed.preimage {f : α → β} (hf : continuous f) {s : set β} (h : is_closed s) :
is_closed (f ⁻¹' s) :=
continuous_iff_is_closed.mp hf s h
lemma mem_closure_image {f : α → β} {x : α} {s : set α} (hf : continuous_at f x)
(hx : x ∈ closure s) : f x ∈ closure (f '' s) :=
begin
rw [mem_closure_iff_nhds_ne_bot] at hx ⊢,
rw ← bot_lt_iff_ne_bot,
haveI : ne_bot _ := ⟨hx⟩,
calc
⊥ < map f (𝓝 x ⊓ principal s) : bot_lt_iff_ne_bot.mpr ne_bot.ne'
... ≤ (map f $ 𝓝 x) ⊓ (map f $ principal s) : map_inf_le
... = (map f $ 𝓝 x) ⊓ (principal $ f '' s) : by rw map_principal
... ≤ 𝓝 (f x) ⊓ (principal $ f '' s) : inf_le_inf hf le_rfl
end
lemma continuous_at_iff_ultrafilter {f : α → β} {x} : continuous_at f x ↔
∀ g : ultrafilter α, ↑g ≤ 𝓝 x → tendsto f g (𝓝 (f x)) :=
tendsto_iff_ultrafilter f (𝓝 x) (𝓝 (f x))
lemma continuous_iff_ultrafilter {f : α → β} :
continuous f ↔ ∀ x (g : ultrafilter α), ↑g ≤ 𝓝 x → tendsto f g (𝓝 (f x)) :=
by simp only [continuous_iff_continuous_at, continuous_at_iff_ultrafilter]
lemma continuous.closure_preimage_subset {f : α → β}
(hf : continuous f) (t : set β) :
closure (f ⁻¹' t) ⊆ f ⁻¹' (closure t) :=
begin
rw ← (is_closed_closure.preimage hf).closure_eq,
exact closure_mono (preimage_mono subset_closure),
end
lemma continuous.frontier_preimage_subset
{f : α → β} (hf : continuous f) (t : set β) :
frontier (f ⁻¹' t) ⊆ f ⁻¹' (frontier t) :=
diff_subset_diff (hf.closure_preimage_subset t) (preimage_interior_subset_interior_preimage hf)
/-! ### Continuity and partial functions -/
/-- Continuity of a partial function -/
def pcontinuous (f : α →. β) := ∀ s, is_open s → is_open (f.preimage s)
lemma open_dom_of_pcontinuous {f : α →. β} (h : pcontinuous f) : is_open f.dom :=
by rw [←pfun.preimage_univ]; exact h _ is_open_univ
lemma pcontinuous_iff' {f : α →. β} :
pcontinuous f ↔ ∀ {x y} (h : y ∈ f x), ptendsto' f (𝓝 x) (𝓝 y) :=
begin
split,
{ intros h x y h',
simp only [ptendsto'_def, mem_nhds_iff],
rintros s ⟨t, tsubs, opent, yt⟩,
exact ⟨f.preimage t, pfun.preimage_mono _ tsubs, h _ opent, ⟨y, yt, h'⟩⟩ },
intros hf s os,
rw is_open_iff_nhds,
rintros x ⟨y, ys, fxy⟩ t,
rw [mem_principal],
assume h : f.preimage s ⊆ t,
change t ∈ 𝓝 x,
apply mem_of_superset _ h,
have h' : ∀ s ∈ 𝓝 y, f.preimage s ∈ 𝓝 x,
{ intros s hs,
have : ptendsto' f (𝓝 x) (𝓝 y) := hf fxy,
rw ptendsto'_def at this,
exact this s hs },
show f.preimage s ∈ 𝓝 x,
apply h', rw mem_nhds_iff, exact ⟨s, set.subset.refl _, os, ys⟩
end
/-- If a continuous map `f` maps `s` to `t`, then it maps `closure s` to `closure t`. -/
lemma set.maps_to.closure {s : set α} {t : set β} {f : α → β} (h : maps_to f s t)
(hc : continuous f) : maps_to f (closure s) (closure t) :=
begin
simp only [maps_to, mem_closure_iff_cluster_pt],
exact λ x hx, hx.map hc.continuous_at (tendsto_principal_principal.2 h)
end
lemma image_closure_subset_closure_image {f : α → β} {s : set α} (h : continuous f) :
f '' closure s ⊆ closure (f '' s) :=
((maps_to_image f s).closure h).image_subset
lemma closure_subset_preimage_closure_image {f : α → β} {s : set α} (h : continuous f) :
closure s ⊆ f ⁻¹' (closure (f '' s)) :=
by { rw ← set.image_subset_iff, exact image_closure_subset_closure_image h }
lemma map_mem_closure {s : set α} {t : set β} {f : α → β} {a : α}
(hf : continuous f) (ha : a ∈ closure s) (ht : ∀a∈s, f a ∈ t) : f a ∈ closure t :=
set.maps_to.closure ht hf ha
/-!
### Function with dense range
-/
section dense_range
variables {κ ι : Type*} (f : κ → β) (g : β → γ)
/-- `f : ι → β` has dense range if its range (image) is a dense subset of β. -/
def dense_range := dense (range f)
variables {f}
/-- A surjective map has dense range. -/
lemma function.surjective.dense_range (hf : function.surjective f) : dense_range f :=
λ x, by simp [hf.range_eq]
lemma dense_range_iff_closure_range : dense_range f ↔ closure (range f) = univ :=
dense_iff_closure_eq
lemma dense_range.closure_range (h : dense_range f) : closure (range f) = univ :=
h.closure_eq
lemma dense.dense_range_coe {s : set α} (h : dense s) : dense_range (coe : s → α) :=
by simpa only [dense_range, subtype.range_coe_subtype]
lemma continuous.range_subset_closure_image_dense {f : α → β} (hf : continuous f)
{s : set α} (hs : dense s) :
range f ⊆ closure (f '' s) :=
by { rw [← image_univ, ← hs.closure_eq], exact image_closure_subset_closure_image hf }
/-- The image of a dense set under a continuous map with dense range is a dense set. -/
lemma dense_range.dense_image {f : α → β} (hf' : dense_range f) (hf : continuous f)
{s : set α} (hs : dense s) :
dense (f '' s) :=
(hf'.mono $ hf.range_subset_closure_image_dense hs).of_closure
/-- If `f` has dense range and `s` is an open set in the codomain of `f`, then the image of the
preimage of `s` under `f` is dense in `s`. -/
lemma dense_range.subset_closure_image_preimage_of_is_open (hf : dense_range f) {s : set β}
(hs : is_open s) : s ⊆ closure (f '' (f ⁻¹' s)) :=
by { rw image_preimage_eq_inter_range, exact hf.open_subset_closure_inter hs }
/-- If a continuous map with dense range maps a dense set to a subset of `t`, then `t` is a dense
set. -/
lemma dense_range.dense_of_maps_to {f : α → β} (hf' : dense_range f) (hf : continuous f)
{s : set α} (hs : dense s) {t : set β} (ht : maps_to f s t) :
dense t :=
(hf'.dense_image hf hs).mono ht.image_subset
/-- Composition of a continuous map with dense range and a function with dense range has dense
range. -/
lemma dense_range.comp {g : β → γ} {f : κ → β} (hg : dense_range g) (hf : dense_range f)
(cg : continuous g) :
dense_range (g ∘ f) :=
by { rw [dense_range, range_comp], exact hg.dense_image cg hf }
lemma dense_range.nonempty_iff (hf : dense_range f) : nonempty κ ↔ nonempty β :=
range_nonempty_iff_nonempty.symm.trans hf.nonempty_iff
lemma dense_range.nonempty [h : nonempty β] (hf : dense_range f) : nonempty κ :=
hf.nonempty_iff.mpr h
/-- Given a function `f : α → β` with dense range and `b : β`, returns some `a : α`. -/
def dense_range.some (hf : dense_range f) (b : β) : κ :=
classical.choice $ hf.nonempty_iff.mpr ⟨b⟩
lemma dense_range.exists_mem_open (hf : dense_range f) {s : set β} (ho : is_open s)
(hs : s.nonempty) :
∃ a, f a ∈ s :=
exists_range_iff.1 $ hf.exists_mem_open ho hs
lemma dense_range.mem_nhds {f : κ → β} (h : dense_range f) {b : β} {U : set β}
(U_in : U ∈ nhds b) : ∃ a, f a ∈ U :=
begin
rcases (mem_closure_iff_nhds.mp
((dense_range_iff_closure_range.mp h).symm ▸ mem_univ b : b ∈ closure (range f)) U U_in)
with ⟨_, h, a, rfl⟩,
exact ⟨a, h⟩
end
end dense_range
end continuous
/--
The library contains many lemmas stating that functions/operations are continuous. There are many
ways to formulate the continuity of operations. Some are more convenient than others.
Note: for the most part this note also applies to other properties
(`measurable`, `differentiable`, `continuous_on`, ...).
### The traditional way
As an example, let's look at addition `(+) : M → M → M`. We can state that this is continuous
in different definitionally equal ways (omitting some typing information)
* `continuous (λ p, p.1 + p.2)`;
* `continuous (function.uncurry (+))`;
* `continuous ↿(+)`. (`↿` is notation for recursively uncurrying a function)
However, lemmas with this conclusion are not nice to use in practice because
1. They confuse the elaborator. The following two examples fail, because of limitations in the
elaboration process.
```
variables {M : Type*} [has_mul M] [topological_space M] [has_continuous_mul M]
example : continuous (λ x : M, x + x) :=
continuous_add.comp _
example : continuous (λ x : M, x + x) :=
continuous_add.comp (continuous_id.prod_mk continuous_id)
```
The second is a valid proof, which is accepted if you write it as
`continuous_add.comp (continuous_id.prod_mk continuous_id : _)`
2. If the operation has more than 2 arguments, they are impractical to use, because in your
application the arguments in the domain might be in a different order or associated differently.
### The convenient way
A much more convenient way to write continuity lemmas is like `continuous.add`:
```
continuous.add {f g : X → M} (hf : continuous f) (hg : continuous g) : continuous (λ x, f x + g x)
```
The conclusion can be `continuous (f + g)`, which is definitionally equal.
This has the following advantages
* It supports projection notation, so is shorter to write.
* `continuous.add _ _` is recognized correctly by the elaborator and gives useful new goals.
* It works generally, since the domain is a variable.
As an example for an unary operation, we have `continuous.neg`.
```
continuous.neg {f : α → G} (hf : continuous f) : continuous (λ x, -f x)
```
For unary functions, the elaborator is not confused when applying the traditional lemma
(like `continuous_neg`), but it's still convenient to have the short version available (compare
`hf.neg.neg.neg` with `continuous_neg.comp $ continuous_neg.comp $ continuous_neg.comp hf`).
As a harder example, consider an operation of the following type:
```
def strans {x : F} (γ γ' : path x x) (t₀ : I) : path x x
```
The precise definition is not important, only its type.
The correct continuity principle for this operation is something like this:
```
{f : X → F} {γ γ' : ∀ x, path (f x) (f x)} {t₀ s : X → I}
(hγ : continuous ↿γ) (hγ' : continuous ↿γ')
(ht : continuous t₀) (hs : continuous s) :
continuous (λ x, strans (γ x) (γ' x) (t x) (s x))
```
Note that *all* arguments of `strans` are indexed over `X`, even the basepoint `x`, and the last
argument `s` that arises since `path x x` has a coercion to `I → F`. The paths `γ` and `γ'` (which
are unary functions from `I`) become binary functions in the continuity lemma.
### Summary
* Make sure that your continuity lemmas are stated in the most general way, and in a convenient
form. That means that:
- The conclusion has a variable `X` as domain (not something like `Y × Z`);
- Wherever possible, all point arguments `c : Y` are replaced by functions `c : X → Y`;
- All `n`-ary function arguments are replaced by `n+1`-ary functions
(`f : Y → Z` becomes `f : X → Y → Z`);
- All (relevant) arguments have continuity assumptions, and perhaps there are additional
assumptions needed to make the operation continuous;
- The function in the conclusion is fully applied.
* These remarks are mostly about the format of the *conclusion* of a continuity lemma.
In assumptions it's fine to state that a function with more than 1 argument is continuous using
`↿` or `function.uncurry`.
### Functions with discontinuities
In some cases, you want to work with discontinuous functions, and in certain expressions they are
still continuous. For example, consider the fractional part of a number, `fract : ℝ → ℝ`.
In this case, you want to add conditions to when a function involving `fract` is continuous, so you
get something like this: (assumption `hf` could be weakened, but the important thing is the shape
of the conclusion)
```
lemma continuous_on.comp_fract {X Y : Type*} [topological_space X] [topological_space Y]
{f : X → ℝ → Y} {g : X → ℝ} (hf : continuous ↿f) (hg : continuous g) (h : ∀ s, f s 0 = f s 1) :
continuous (λ x, f x (fract (g x)))
```
With `continuous_at` you can be even more precise about what to prove in case of discontinuities,
see e.g. `continuous_at.comp_div_cases`.
-/
library_note "continuity lemma statement"
|
State Before: K : Type u_1
ι : Type u_3
R : ι → Type u_2
inst✝ : (i : ι) → SMul K (R i)
r : K
t : (i : ι) → Set (R i)
x : (i : ι) → R i
h : x ∈ pi univ (r • t)
⊢ x ∈ r • pi univ t State After: case refine'_1
K : Type u_1
ι : Type u_3
R : ι → Type u_2
inst✝ : (i : ι) → SMul K (R i)
r : K
t : (i : ι) → Set (R i)
x : (i : ι) → R i
h : x ∈ pi univ (r • t)
i : ι
x✝ : i ∈ univ
⊢ (fun i => Classical.choose (_ : x i ∈ (r • t) i)) i ∈ t i
case refine'_2
K : Type u_1
ι : Type u_3
R : ι → Type u_2
inst✝ : (i : ι) → SMul K (R i)
r : K
t : (i : ι) → Set (R i)
x : (i : ι) → R i
h : x ∈ pi univ (r • t)
i : ι
⊢ (fun x => r • x) (fun i => Classical.choose (_ : x i ∈ (r • t) i)) i = x i Tactic: refine' ⟨fun i ↦ Classical.choose (h i <| Set.mem_univ _), fun i _ ↦ _, funext fun i ↦ _⟩ State Before: case refine'_1
K : Type u_1
ι : Type u_3
R : ι → Type u_2
inst✝ : (i : ι) → SMul K (R i)
r : K
t : (i : ι) → Set (R i)
x : (i : ι) → R i
h : x ∈ pi univ (r • t)
i : ι
x✝ : i ∈ univ
⊢ (fun i => Classical.choose (_ : x i ∈ (r • t) i)) i ∈ t i State After: no goals Tactic: exact (Classical.choose_spec (h i <| Set.mem_univ i)).left State Before: case refine'_2
K : Type u_1
ι : Type u_3
R : ι → Type u_2
inst✝ : (i : ι) → SMul K (R i)
r : K
t : (i : ι) → Set (R i)
x : (i : ι) → R i
h : x ∈ pi univ (r • t)
i : ι
⊢ (fun x => r • x) (fun i => Classical.choose (_ : x i ∈ (r • t) i)) i = x i State After: no goals Tactic: exact (Classical.choose_spec (h i <| Set.mem_univ i)).right |
(* Distributed under the terms of the MIT license. *)
From Coq Require Import ssreflect Morphisms.
From MetaCoq.Utils Require Import utils.
From MetaCoq.Common Require Export Universes BasicAst Environment Reflect.
From MetaCoq.Common Require EnvironmentTyping.
From MetaCoq.PCUIC Require Export PCUICPrimitive.
From Equations Require Import Equations.
(** * AST of the Polymorphic Cumulative Calculus of Inductive Constructions
This AST is a cleaned-up version of Coq's internal AST better suited for
reasoning.
In particular, it has binary applications and all terms are well-formed.
Casts are absent as well. *)
Declare Scope pcuic.
Delimit Scope pcuic with pcuic.
Open Scope pcuic.
(** DO NOT USE firstorder, since the introduction of Ints and Floats, it became unusuable. *)
Ltac pcuicfo_gen tac :=
simpl in *; intuition (simpl; intuition tac).
Tactic Notation "pcuicfo" := pcuicfo_gen auto.
Tactic Notation "pcuicfo" tactic(tac) := pcuicfo_gen tac.
(* This allows not relying on lemma names getting a length ws_cumul_pb out of some type. *)
Class HasLen (A : Type) (x y : nat) := len : A -> x = y.
(** Note the use of a global reference to avoid capture. *)
Notation length_of t := ltac:(let lemma := constr:(PCUICAst.len t) in exact lemma) (only parsing).
(* Defined here since BasicAst does not have access to universe instances.
Parameterized by term types as they are not yet defined. *)
Record predicate {term} := mk_predicate {
pparams : list term; (* The parameters *)
puinst : Instance.t; (* The universe instance *)
pcontext : list (context_decl term);
(* The predicate context,
initially built from params and puinst *)
preturn : term; (* The return type *) }.
Derive NoConfusion for predicate.
Arguments predicate : clear implicits.
Arguments mk_predicate {_}.
Section map_predicate.
Context {term term' : Type}.
Context (uf : Instance.t -> Instance.t).
Context (paramf preturnf : term -> term').
Context (pcontextf : list (context_decl term) -> list (context_decl term')).
Definition map_predicate (p : predicate term) :=
{| pparams := map paramf p.(pparams);
puinst := uf p.(puinst);
pcontext := pcontextf p.(pcontext);
preturn := preturnf p.(preturn) |}.
Lemma map_pparams (p : predicate term) :
map paramf (pparams p) = pparams (map_predicate p).
Proof using Type. reflexivity. Qed.
Lemma map_preturn (p : predicate term) :
preturnf (preturn p) = preturn (map_predicate p).
Proof using Type. reflexivity. Qed.
Lemma map_pcontext (p : predicate term) :
pcontextf (pcontext p) = pcontext (map_predicate p).
Proof using Type. reflexivity. Qed.
Lemma map_puinst (p : predicate term) :
uf (puinst p) = puinst (map_predicate p).
Proof using Type. reflexivity. Qed.
End map_predicate.
Definition shiftf {A B} (f : nat -> A -> B) k := (fun k' => f (k' + k)).
Section map_predicate_k.
Context {term : Type}.
Context (uf : Instance.t -> Instance.t).
Context (f : nat -> term -> term).
Definition map_predicate_k k (p : predicate term) :=
{| pparams := map (f k) p.(pparams);
puinst := uf p.(puinst);
pcontext := p.(pcontext);
preturn := f (#|p.(pcontext)| + k) p.(preturn) |}.
Lemma map_k_pparams k (p : predicate term) :
map (f k) (pparams p) = pparams (map_predicate_k k p).
Proof using Type. reflexivity. Qed.
Lemma map_k_preturn k (p : predicate term) :
f (#|p.(pcontext)| + k) (preturn p) = preturn (map_predicate_k k p).
Proof using Type. reflexivity. Qed.
Lemma map_k_pcontext k (p : predicate term) :
(pcontext p) = pcontext (map_predicate_k k p).
Proof using Type. reflexivity. Qed.
Lemma map_k_puinst k (p : predicate term) :
uf (puinst p) = puinst (map_predicate_k k p).
Proof using Type. reflexivity. Qed.
Definition test_predicate (instp : Instance.t -> bool) (p : term -> bool)
(pred : predicate term) :=
instp pred.(puinst) && forallb p pred.(pparams) &&
test_context p pred.(pcontext) && p pred.(preturn).
Definition test_predicate_k (instp : Instance.t -> bool)
(p : nat -> term -> bool) k (pred : predicate term) :=
instp pred.(puinst) && forallb (p k) pred.(pparams) &&
test_context_k p #|pred.(pparams)| pred.(pcontext) &&
p (#|pred.(pcontext)| + k) pred.(preturn).
Definition test_predicate_ku (instp : nat -> Instance.t -> bool)
(p : nat -> term -> bool) k (pred : predicate term) :=
instp k pred.(puinst) && forallb (p k) pred.(pparams) &&
test_context (p #|pred.(puinst)|) pred.(pcontext) &&
p k pred.(preturn).
End map_predicate_k.
Section Branch.
Context {term : Type}.
(* Parameterized by term types as they are not yet defined. *)
Record branch := mk_branch {
bcontext : list (context_decl term);
(* Context of binders of the branch, including lets.
This context is open w.r.t. to an instance of the parameters of the inductive type only,
it is NOT subject to substitution/lifting
*)
bbody : term; (* The branch body *) }.
Derive NoConfusion for branch.
Definition string_of_branch (f : term -> string) (b : branch) :=
"([" ^ String.concat "," (map (string_of_name ∘ binder_name ∘ decl_name) (bcontext b)) ^ "], "
^ f (bbody b) ^ ")".
Definition pretty_string_of_branch (f : term -> string) (b : branch) :=
String.concat " " (map (string_of_name ∘ binder_name ∘ decl_name) (bcontext b)) ^ " => " ^ f (bbody b).
Definition test_branch (pctx : term -> bool) (p : term -> bool) (b : branch) :=
test_context pctx b.(bcontext) && p b.(bbody).
Definition test_branch_k (pred : predicate term) (p : nat -> term -> bool) k (b : branch) :=
test_context_k p #|pred.(pparams)| b.(bcontext) && p (#|b.(bcontext)| + k) b.(bbody).
End Branch.
Arguments branch : clear implicits.
Section map_branch.
Context {term term' : Type}.
Context (f : term -> term').
Context (g : list (BasicAst.context_decl term) -> list (BasicAst.context_decl term')).
Definition map_branch (b : branch term) :=
{| bcontext := g b.(bcontext);
bbody := f b.(bbody) |}.
Lemma map_bbody (b : branch term) :
f (bbody b) = bbody (map_branch b).
Proof using Type. reflexivity. Qed.
Lemma map_bcontext (b : branch term) :
g (bcontext b) = bcontext (map_branch b).
Proof using Type. reflexivity. Qed.
End map_branch.
Definition map_branches {term B} (f : term -> B) h l := List.map (map_branch f h) l.
Section map_branch_k.
Context {term term' : Type}.
Context (f : nat -> term -> term').
Context (g : list (BasicAst.context_decl term) -> list (BasicAst.context_decl term')).
Definition map_branch_k k (b : branch term) :=
{| bcontext := g b.(bcontext);
bbody := f (#|b.(bcontext)| + k) b.(bbody) |}.
Lemma map_k_bbody k (b : branch term) :
f (#|b.(bcontext)| + k) (bbody b) = bbody (map_branch_k k b).
Proof using Type. reflexivity. Qed.
Lemma map_k_bcontext k (b : branch term) :
g (bcontext b) = bcontext (map_branch_k k b).
Proof using Type. reflexivity. Qed.
End map_branch_k.
Notation map_branches_k f h k brs :=
(List.map (map_branch_k f h k) brs).
Notation test_branches_k p test k brs :=
(List.forallb (test_branch_k p test k) brs).
Inductive term :=
| tRel (n : nat)
| tVar (i : ident) (* For free variables (e.g. in a goal) *)
| tEvar (n : nat) (l : list term)
| tSort (u : Universe.t)
| tProd (na : aname) (A B : term)
| tLambda (na : aname) (A t : term)
| tLetIn (na : aname) (b B t : term) (* let na := b : B in t *)
| tApp (u v : term)
| tConst (k : kername) (ui : Instance.t)
| tInd (ind : inductive) (ui : Instance.t)
| tConstruct (ind : inductive) (n : nat) (ui : Instance.t)
| tCase (indn : case_info) (p : predicate term) (c : term) (brs : list (branch term))
| tProj (p : projection) (c : term)
| tFix (mfix : mfixpoint term) (idx : nat)
| tCoFix (mfix : mfixpoint term) (idx : nat)
| tPrim (prim : prim_val term).
Derive NoConfusion for term.
Notation prim_val := (prim_val term).
Fixpoint mkApps t us :=
match us with
| nil => t
| u :: us => mkApps (tApp t u) us
end.
Definition isApp t :=
match t with
| tApp _ _ => true
| _ => false
end.
Definition isLambda t :=
match t with
| tLambda _ _ _ => true
| _ => false
end.
Lemma isLambda_inv t : isLambda t -> exists na ty bod, t = tLambda na ty bod.
Proof. destruct t => //; eauto. Qed.
(** Basic operations on the AST: lifting, substitution and tests for variable occurrences. *)
Fixpoint lift n k t : term :=
match t with
| tRel i => tRel (if Nat.leb k i then (n + i) else i)
| tEvar ev args => tEvar ev (List.map (lift n k) args)
| tLambda na T M => tLambda na (lift n k T) (lift n (S k) M)
| tApp u v => tApp (lift n k u) (lift n k v)
| tProd na A B => tProd na (lift n k A) (lift n (S k) B)
| tLetIn na b t b' => tLetIn na (lift n k b) (lift n k t) (lift n (S k) b')
| tCase ind p c brs =>
let p' := map_predicate_k id (lift n) k p in
let brs' := map_branches_k (lift n) id k brs in
tCase ind p' (lift n k c) brs'
| tProj p c => tProj p (lift n k c)
| tFix mfix idx =>
let k' := List.length mfix + k in
let mfix' := List.map (map_def (lift n k) (lift n k')) mfix in
tFix mfix' idx
| tCoFix mfix idx =>
let k' := List.length mfix + k in
let mfix' := List.map (map_def (lift n k) (lift n k')) mfix in
tCoFix mfix' idx
| x => x
end.
Notation lift0 n := (lift n 0).
(** Parallel substitution: it assumes that all terms in the substitution live in the
same context *)
Fixpoint subst s k u :=
match u with
| tRel n =>
if Nat.leb k n then
match nth_error s (n - k) with
| Some b => lift0 k b
| None => tRel (n - List.length s)
end
else tRel n
| tEvar ev args => tEvar ev (List.map (subst s k) args)
| tLambda na T M => tLambda na (subst s k T) (subst s (S k) M)
| tApp u v => tApp (subst s k u) (subst s k v)
| tProd na A B => tProd na (subst s k A) (subst s (S k) B)
| tLetIn na b ty b' => tLetIn na (subst s k b) (subst s k ty) (subst s (S k) b')
| tCase ind p c brs =>
let p' := map_predicate_k id (subst s) k p in
let brs' := map_branches_k (subst s) id k brs in
tCase ind p' (subst s k c) brs'
| tProj p c => tProj p (subst s k c)
| tFix mfix idx =>
let k' := List.length mfix + k in
let mfix' := List.map (map_def (subst s k) (subst s k')) mfix in
tFix mfix' idx
| tCoFix mfix idx =>
let k' := List.length mfix + k in
let mfix' := List.map (map_def (subst s k) (subst s k')) mfix in
tCoFix mfix' idx
| x => x
end.
(** Substitutes [t1 ; .. ; tn] in u for [Rel 0; .. Rel (n-1)] *in parallel* *)
Notation subst0 t := (subst t 0).
Definition subst1 t k u := subst [t] k u.
Notation subst10 t := (subst1 t 0).
Notation "M { j := N }" := (subst1 N j M) (at level 10, right associativity).
Fixpoint closedn k (t : term) : bool :=
match t with
| tRel i => Nat.ltb i k
| tEvar ev args => List.forallb (closedn k) args
| tLambda _ T M | tProd _ T M => closedn k T && closedn (S k) M
| tApp u v => closedn k u && closedn k v
| tLetIn na b t b' => closedn k b && closedn k t && closedn (S k) b'
| tCase ind p c brs =>
let p' := test_predicate_k (fun _ => true) closedn k p in
let brs' := test_branches_k p closedn k brs in
p' && closedn k c && brs'
| tProj p c => closedn k c
| tFix mfix idx =>
let k' := List.length mfix + k in
List.forallb (test_def (closedn k) (closedn k')) mfix
| tCoFix mfix idx =>
let k' := List.length mfix + k in
List.forallb (test_def (closedn k) (closedn k')) mfix
| _ => true
end.
Section fix_test.
Variable test : term -> bool.
Fixpoint test_context_nlict (bcontext : list (context_decl term)) :=
match bcontext with
| nil => true
| d :: bcontext => test_context_nlict bcontext &&
test (decl_type d) && match decl_body d with Some _ => false | None => true end
end.
End fix_test.
Definition test_branch_nlict test b :=
test_context_nlict test (bcontext b) && test (bbody b).
Definition test_branches_nlict test brs :=
forallb (test_branch_nlict test) brs.
Fixpoint nlict (t : term) : bool :=
match t with
| tEvar ev args => List.forallb nlict args
| tLambda _ T M | tProd _ T M => nlict T && nlict M
| tApp u v => nlict u && nlict v
| tLetIn na b t b' => nlict b && nlict t && nlict b'
| tCase ind p c brs =>
let p' := test_predicate_k (fun _ => true) (fun _ => nlict) 0 p in
let brs' := test_branches_nlict nlict brs in
p' && nlict c && brs'
| tProj p c => nlict c
| tFix mfix idx =>
List.forallb (test_def nlict nlict) mfix
| tCoFix mfix idx =>
List.forallb (test_def nlict nlict) mfix
| _ => true
end.
Notation closed t := (closedn 0 t).
Notation closed_decl n := (test_decl (closedn n)).
Notation closedn_ctx := (test_context_k closedn).
Notation closed_ctx := (closedn_ctx 0).
Fixpoint noccur_between k n (t : term) : bool :=
match t with
| tRel i => Nat.ltb i k || Nat.leb (k + n) i
| tEvar ev args => List.forallb (noccur_between k n) args
| tLambda _ T M | tProd _ T M => noccur_between k n T && noccur_between (S k) n M
| tApp u v => noccur_between k n u && noccur_between k n v
| tLetIn na b t b' => noccur_between k n b && noccur_between k n t && noccur_between (S k) n b'
| tCase ind p c brs =>
let p' := test_predicate_k (fun _ => true) (fun k' => noccur_between k' n) k p in
let brs' := test_branches_k p (fun k => noccur_between k n) k brs in
p' && noccur_between k n c && brs'
| tProj p c => noccur_between k n c
| tFix mfix idx =>
let k' := List.length mfix + k in
List.forallb (test_def (noccur_between k n) (noccur_between k' n)) mfix
| tCoFix mfix idx =>
let k' := List.length mfix + k in
List.forallb (test_def (noccur_between k n) (noccur_between k' n)) mfix
| _ => true
end.
(** * Universe substitution
Substitution of universe levels for universe level variables, used to
implement universe polymorphism. *)
#[global]
Instance subst_instance_constr : UnivSubst term :=
fix subst_instance_constr u c {struct c} : term :=
match c with
| tRel _ | tVar _ => c
| tEvar ev args => tEvar ev (List.map (subst_instance_constr u) args)
| tSort s => tSort (subst_instance_univ u s)
| tConst c u' => tConst c (subst_instance_instance u u')
| tInd i u' => tInd i (subst_instance_instance u u')
| tConstruct ind k u' => tConstruct ind k (subst_instance_instance u u')
| tLambda na T M => tLambda na (subst_instance_constr u T) (subst_instance_constr u M)
| tApp f v => tApp (subst_instance_constr u f) (subst_instance_constr u v)
| tProd na A B => tProd na (subst_instance_constr u A) (subst_instance_constr u B)
| tLetIn na b ty b' => tLetIn na (subst_instance_constr u b) (subst_instance_constr u ty)
(subst_instance_constr u b')
| tCase ind p c brs =>
let p' := map_predicate (subst_instance_instance u) (subst_instance_constr u) (subst_instance_constr u) id p in
let brs' := List.map (map_branch (subst_instance_constr u) id) brs in
tCase ind p' (subst_instance_constr u c) brs'
| tProj p c => tProj p (subst_instance_constr u c)
| tFix mfix idx =>
let mfix' := List.map (map_def (subst_instance_constr u) (subst_instance_constr u)) mfix in
tFix mfix' idx
| tCoFix mfix idx =>
let mfix' := List.map (map_def (subst_instance_constr u) (subst_instance_constr u)) mfix in
tCoFix mfix' idx
| tPrim _ => c
end.
(** Tests that the term is closed over [k] universe variables *)
Fixpoint closedu (k : nat) (t : term) : bool :=
match t with
| tSort univ => closedu_universe k univ
| tInd _ u => closedu_instance k u
| tConstruct _ _ u => closedu_instance k u
| tConst _ u => closedu_instance k u
| tRel i => true
| tEvar ev args => forallb (closedu k) args
| tLambda _ T M | tProd _ T M => closedu k T && closedu k M
| tApp u v => closedu k u && closedu k v
| tLetIn na b t b' => closedu k b && closedu k t && closedu k b'
| tCase ind p c brs =>
let p' := test_predicate_ku closedu_instance closedu k p in
let brs' := forallb (test_branch (closedu #|p.(puinst)|) (closedu k)) brs in
p' && closedu k c && brs'
| tProj p c => closedu k c
| tFix mfix idx =>
forallb (test_def (closedu k) (closedu k)) mfix
| tCoFix mfix idx =>
forallb (test_def (closedu k) (closedu k)) mfix
| _ => true
end.
Module PCUICTerm <: Term.
Definition term := term.
Definition tRel := tRel.
Definition tSort := tSort.
Definition tProd := tProd.
Definition tLambda := tLambda.
Definition tLetIn := tLetIn.
Definition tInd := tInd.
Definition tProj := tProj.
Definition mkApps := mkApps.
Definition lift := lift.
Definition subst := subst.
Definition closedn := closedn.
Definition noccur_between := noccur_between.
Definition subst_instance_constr := subst_instance.
End PCUICTerm.
(* These functors derive the notion of local context and lift substitution, term lifting,
the closed predicate to them. *)
Module PCUICEnvironment := Environment PCUICTerm.
Export PCUICEnvironment.
(* Do NOT `Include` this module, as this would sadly duplicate the rewrite database... *)
(** Decompose an arity into a context and a sort *)
Fixpoint destArity Γ (t : term) :=
match t with
| tProd na t b => destArity (Γ ,, vass na t) b
| tLetIn na b b_ty b' => destArity (Γ ,, vdef na b b_ty) b'
| tSort s => Some (Γ, s)
| _ => None
end.
(** Inductive substitution, to produce a constructors' type *)
Definition inds ind u (l : list one_inductive_body) :=
let fix aux n :=
match n with
| 0 => []
| S n => tInd (mkInd ind n) u :: aux n
end
in aux (List.length l).
Module PCUICTermUtils <: TermUtils PCUICTerm PCUICEnvironment.
Definition destArity := destArity.
Definition inds := inds.
End PCUICTermUtils.
Ltac unf_term := unfold PCUICTerm.term in *; unfold PCUICTerm.tRel in *;
unfold PCUICTerm.tSort in *; unfold PCUICTerm.tProd in *;
unfold PCUICTerm.tLambda in *; unfold PCUICTerm.tLetIn in *;
unfold PCUICTerm.tInd in *; unfold PCUICTerm.tProj in *;
unfold PCUICTerm.lift in *; unfold PCUICTerm.subst in *;
unfold PCUICTerm.closedn in *; unfold PCUICTerm.noccur_between in *;
unfold PCUICTerm.subst_instance_constr in *;
unfold PCUICTermUtils.destArity in *; unfold PCUICTermUtils.inds in *.
Lemma context_assumptions_mapi_context f (ctx : context) :
context_assumptions (mapi_context f ctx) = context_assumptions ctx.
Proof.
now rewrite mapi_context_fold; len.
Qed.
#[global]
Hint Rewrite context_assumptions_mapi_context : len.
Module PCUICEnvTyping := EnvironmentTyping.EnvTyping PCUICTerm PCUICEnvironment PCUICTermUtils.
(** Included in PCUICTyping only *)
Module PCUICConversion := EnvironmentTyping.Conversion PCUICTerm PCUICEnvironment PCUICTermUtils PCUICEnvTyping.
Global Instance context_reflect`(ReflectEq term) :
ReflectEq (list (BasicAst.context_decl term)) := _.
Local Ltac finish :=
let h := fresh "h" in
right ;
match goal with
| e : ?t <> ?u |- _ =>
intro h ; apply e ; now inversion h
end.
Local Ltac fcase c :=
let e := fresh "e" in
case c ; intro e ; [ subst ; try (left ; reflexivity) | finish ].
Definition string_of_predicate {term} (f : term -> string) (p : predicate term) :=
"(" ^ "(" ^ String.concat "," (map f (pparams p)) ^ ")"
^ "," ^ string_of_universe_instance (puinst p)
^ ",(" ^ String.concat "," (map (string_of_name ∘ binder_name ∘ decl_name) (pcontext p)) ^ ")"
^ "," ^ f (preturn p) ^ ")".
Definition eqb_predicate_gen (eqb_univ_instance : Instance.t -> Instance.t -> bool)
(eqdecl : context_decl -> context_decl -> bool)
(eqterm : term -> term -> bool) (p p' : predicate term) :=
forallb2 eqterm p.(pparams) p'.(pparams) &&
eqb_univ_instance p.(puinst) p'.(puinst) &&
forallb2 eqdecl p.(pcontext) p'.(pcontext) &&
eqterm p.(preturn) p'.(preturn).
(** Syntactic ws_cumul_pb *)
Definition eqb_predicate (eqterm : term -> term -> bool) (p p' : predicate term) :=
eqb_predicate_gen eqb (eqb_context_decl eqterm) eqterm p p'.
(** Theory of [map] variants on branches and predicates. *)
(* The [map] rewrite database gathers all the map composition rewrite lemmas
on these types. *)
#[global]
Hint Rewrite map_map_compose @compose_map_def map_length : map.
#[global]
Hint Rewrite @forallb_map : map.
Lemma map_predicate_map_predicate
{term term' term''}
(finst finst' : Instance.t -> Instance.t)
(f g : term' -> term'')
(f' g' : term -> term')
(h : list (BasicAst.context_decl term') -> list (BasicAst.context_decl term''))
(h' : list (BasicAst.context_decl term) -> list (BasicAst.context_decl term'))
(p : predicate term) :
map_predicate finst f g h (map_predicate finst' f' g' h' p) =
map_predicate (finst ∘ finst') (f ∘ f') (g ∘ g') (h ∘ h') p.
Proof.
unfold map_predicate. destruct p; cbn.
f_equal.
apply map_map.
Qed.
Lemma map_predicate_id x : map_predicate (@id _) (@id term) (@id term) (@id _) x = id x.
Proof.
unfold map_predicate; destruct x; cbn; unfold id.
f_equal. apply map_id.
Qed.
#[global]
Hint Rewrite @map_predicate_id : map.
Definition tCasePredProp_k
(P : nat -> term -> Type)
k (p : predicate term) :=
All (P k) p.(pparams) × onctx_k P #|p.(pparams)| p.(pcontext) ×
P (#|p.(pcontext)| + k) p.(preturn).
Definition tCasePredProp {term}
(Pparams Preturn : term -> Type)
(p : predicate term) :=
All Pparams p.(pparams) ×
onctx Pparams p.(pcontext) ×
Preturn p.(preturn).
Lemma map_predicate_eq_spec {A B} (finst finst' : Instance.t -> Instance.t)
(f f' g g' : A -> B) h h' (p : predicate A) :
finst (puinst p) = finst' (puinst p) ->
map f (pparams p) = map g (pparams p) ->
h =1 h' ->
f' (preturn p) = g' (preturn p) ->
map_predicate finst f f' h p = map_predicate finst' g g' h' p.
Proof.
intros. unfold map_predicate; f_equal; auto.
Qed.
#[global] Hint Resolve map_predicate_eq_spec : all.
Lemma map_predicate_k_eq_spec {A} (finst finst' : Instance.t -> Instance.t)
(f g : nat -> A -> A) k k' (p : predicate A) :
finst (puinst p) = finst' (puinst p) ->
map (f k) (pparams p) = map (g k') (pparams p) ->
shiftf f k #|pcontext p| (preturn p) = shiftf g k' #|pcontext p| (preturn p) ->
map_predicate_k finst f k p = map_predicate_k finst' g k' p.
Proof.
intros. unfold map_predicate_k; f_equal; auto.
Qed.
#[global] Hint Resolve map_predicate_k_eq_spec : all.
Lemma map_decl_id_spec P f d :
ondecl P d ->
(forall x : term, P x -> f x = x) ->
map_decl f d = d.
Proof.
intros Hc Hf.
destruct Hc.
unfold map_decl; destruct d; cbn in *. f_equal; eauto.
destruct decl_body; simpl; eauto. f_equal.
eauto.
Qed.
Lemma map_decl_id_spec_cond P p f d :
ondecl P d ->
test_decl p d ->
(forall x : term, P x -> p x -> f x = x) ->
map_decl f d = d.
Proof.
intros [].
unfold map_decl; destruct d; cbn in *.
unfold test_decl; simpl.
intros [pty pbody]%andb_and. intros Hx.
f_equal; eauto.
destruct decl_body; simpl; eauto. f_equal.
eauto.
Qed.
Lemma map_context_id_spec P f ctx :
onctx P ctx ->
(forall x : term, P x -> f x = x) ->
map_context f ctx = ctx.
Proof.
intros Hc Hf. induction Hc; simpl; auto.
rewrite IHHc. f_equal; eapply map_decl_id_spec; eauto.
Qed.
#[global] Hint Resolve map_context_id_spec : all.
Lemma map_context_id_spec_cond P p f ctx :
onctx P ctx ->
test_context p ctx ->
(forall x : term, P x -> p x -> f x = x) ->
map_context f ctx = ctx.
Proof.
intros Hc Hc' Hf. induction Hc in Hc' |- *; simpl; auto.
revert Hc'; simpl; intros [hx hl]%andb_and.
rewrite IHHc; auto. f_equal. eapply map_decl_id_spec_cond; eauto.
Qed.
#[global] Hint Resolve map_context_id_spec_cond : all.
Lemma map_predicate_id_spec {A} finst (f f' : A -> A) h (p : predicate A) :
finst (puinst p) = puinst p ->
map f (pparams p) = pparams p ->
h (pcontext p) = pcontext p ->
f' (preturn p) = preturn p ->
map_predicate finst f f' h p = p.
Proof.
unfold map_predicate.
intros -> -> -> ->; destruct p; auto.
Qed.
#[global] Hint Resolve map_predicate_id_spec : all.
Lemma map_predicate_k_id_spec {A} finst (f : nat -> A -> A) k (p : predicate A) :
finst (puinst p) = puinst p ->
map (f k) (pparams p) = pparams p ->
shiftf f k #|p.(pcontext)| (preturn p) = preturn p ->
map_predicate_k finst f k p = p.
Proof.
unfold map_predicate_k, shiftf.
intros -> -> ->; destruct p; auto.
Qed.
#[global] Hint Resolve map_predicate_k_id_spec : all.
#[global]
Instance map_predicate_proper {term} :
Proper (`=1` ==> `=1` ==> `=1` ==> Logic.eq ==> Logic.eq)%signature (@map_predicate term term id).
Proof.
intros eqf0 eqf1 eqf.
intros eqf'0 eqf'1 eqf' h h' eqh'.
intros x y ->.
apply map_predicate_eq_spec; auto.
now apply map_ext => x.
Qed.
#[global]
Instance map_predicate_proper' {term} f : Proper (`=1` ==> `=1` ==> Logic.eq ==> Logic.eq)
(@map_predicate term term id f).
Proof.
intros eqf0 eqf1 eqf h h' eqh'.
intros x y ->.
apply map_predicate_eq_spec; auto.
Qed.
Lemma shiftf0 {A B} (f : nat -> A -> B) : shiftf f 0 =2 f.
Proof. intros x. unfold shiftf. now rewrite Nat.add_0_r. Qed.
#[global]
Hint Rewrite @shiftf0 : map.
Lemma map_predicate_k_map_predicate_k
(finst finst' : Instance.t -> Instance.t)
(f f' : nat -> term -> term)
k k' (p : predicate term) :
map_predicate_k finst f k (map_predicate_k finst' f' k' p) =
map_predicate_k (finst ∘ finst') (fun i => f (i + k) ∘ f' (i + k')) 0 p.
Proof.
unfold map_predicate, map_predicate_k. destruct p; cbn.
f_equal.
now rewrite map_map.
now len.
Qed.
#[global]
Hint Rewrite map_predicate_k_map_predicate_k : map.
Lemma map_predicate_map_predicate_k
(finst finst' : Instance.t -> Instance.t)
(f : term -> term) (f' : nat -> term -> term)
k (p : predicate term) :
map_predicate finst f f id (map_predicate_k finst' f' k p) =
map_predicate_k (finst ∘ finst') (fun k => f ∘ f' k) k p.
Proof.
unfold map_predicate, map_predicate_k. destruct p; cbn.
f_equal.
apply map_map.
Qed.
#[global]
Hint Rewrite map_predicate_map_predicate_k : map.
Lemma map_predicate_k_map_predicate
(finst finst' : Instance.t -> Instance.t)
(f' : term -> term) (f : nat -> term -> term)
k (p : predicate term) :
map_predicate_k finst f k (map_predicate finst' f' f' id p) =
map_predicate_k (finst ∘ finst') (fun k => (f k) ∘ f') k p.
Proof.
unfold map_predicate, map_predicate_k. destruct p; cbn.
f_equal; len; auto.
* apply map_map.
Qed.
#[global]
Hint Rewrite map_predicate_k_map_predicate : map.
Lemma map_branch_map_branch
{term term' term''}
(f : term' -> term'')
(f' : term -> term')
h h'
(b : branch term) :
map_branch f h (map_branch f' h' b) =
map_branch (f ∘ f') (h ∘ h') b.
Proof.
unfold map_branch; destruct b; cbn.
f_equal.
Qed.
#[global]
Hint Rewrite @map_branch_map_branch : map.
Lemma map_branch_k_map_branch_k (f f' : nat -> term -> term) h h' k k' (b : branch term) :
#|h' b.(bcontext)| = #|b.(bcontext)| ->
map_branch_k f h k (map_branch_k f' h' k' b) =
map_branch_k (fun i => f (i + k) ∘ f' (i + k')) (h ∘ h') 0 b.
Proof.
unfold map_branch, map_branch_k; destruct b; cbn. len.
intros ->.
f_equal.
Qed.
Lemma map_branch_k_map_branch_k_id (f f' : nat -> term -> term) k k' (b : branch term) :
map_branch_k f id k (map_branch_k f' id k' b) =
map_branch_k (fun i => f (i + k) ∘ f' (i + k')) id 0 b.
Proof.
unfold map_branch, map_branch_k; destruct b; cbn. len.
f_equal.
Qed.
#[global]
Hint Rewrite map_branch_k_map_branch_k_id : map.
Lemma map_branch_map_branch_k
(f : term -> term)
(f' : nat -> term -> term) k
(b : branch term) :
map_branch f id (map_branch_k f' id k b) =
map_branch_k (fun k => f ∘ (f' k)) id k b.
Proof.
unfold map_branch, map_branch_k; destruct b; cbn.
f_equal.
Qed.
#[global]
Hint Rewrite map_branch_map_branch_k : map.
Lemma map_branch_k_map_branch
(f' : term -> term)
(f : nat -> term -> term) k
(b : branch term) :
map_branch_k f id k (map_branch f' id b) =
map_branch_k (fun k => f k ∘ f') id k b.
Proof.
unfold map_branch, map_branch_k; destruct b; cbn. len.
f_equal.
Qed.
#[global]
Hint Rewrite map_branch_k_map_branch : map.
Lemma map_branch_id x : map_branch (@id term) id x = id x.
Proof.
unfold map_branch, id; destruct x; cbn.
f_equal.
Qed.
#[global]
Hint Rewrite @map_branch_id : map.
Lemma map_decl_eq_spec {A B} {P : A -> Type} {d} {f g : A -> B} :
ondecl P d ->
(forall x, P x -> f x = g x) ->
map_decl f d = map_decl g d.
Proof.
destruct d; cbn; intros [Pty Pbod] Hfg.
unfold map_decl; cbn in *; f_equal.
* destruct decl_body; cbn in *; eauto. f_equal.
eauto.
* eauto.
Qed.
Lemma map_context_eq_spec {A B} P (f g : A -> B) ctx :
onctx P ctx ->
(forall x, P x -> f x = g x) ->
map_context f ctx = map_context g ctx.
Proof.
intros onc Hfg.
induction onc; simpl; auto.
rewrite IHonc. f_equal.
eapply map_decl_eq_spec; eauto.
Qed.
Lemma map_branch_eq_spec {A B} (f g : A -> B) h h' (x : branch A) :
f (bbody x) = g (bbody x) ->
h =1 h' ->
map_branch f h x = map_branch g h' x.
Proof.
intros. unfold map_branch; f_equal; auto.
Qed.
#[global] Hint Resolve map_branch_eq_spec : all.
Lemma map_branch_k_eq_spec {A B} (f g : nat -> A -> B) h h' k k' (x : branch A) :
shiftf f k #|x.(bcontext)| (bbody x) = shiftf g k' #|x.(bcontext)| (bbody x) ->
h =1 h' ->
map_branch_k f h k x = map_branch_k g h' k' x.
Proof.
intros. unfold map_branch_k; f_equal; auto.
Qed.
#[global] Hint Resolve map_branch_eq_spec : all.
#[global]
Instance map_branch_proper {term} : Proper (`=1` ==> `=1` ==> Logic.eq ==> Logic.eq)
(@map_branch term term).
Proof.
intros eqf0 eqf1 eqf h h' eqh'.
intros x y ->.
apply map_branch_eq_spec; auto.
Qed.
Lemma id_id {A} : @id A =1 id.
Proof. now intros x. Qed.
#[global] Hint Resolve id_id : core.
Lemma map_branch_id_spec (f : term -> term) (x : branch term) :
f (bbody x) = (bbody x) ->
map_branch f id x = x.
Proof.
intros. rewrite (map_branch_eq_spec _ id id id); auto.
now rewrite map_branch_id.
Qed.
#[global] Hint Resolve map_branch_id_spec : all.
Lemma map_branch_k_id_spec (f : nat -> term -> term) h k (x : branch term) :
h (bcontext x) = bcontext x ->
shiftf f k #|x.(bcontext)| (bbody x) = (bbody x) ->
map_branch_k f h k x = x.
Proof.
intros. unfold map_branch_k.
destruct x; simpl in *; f_equal; eauto.
Qed.
#[global] Hint Resolve map_branch_k_id_spec : all.
Lemma map_branches_map_branches
{term term' term''}
(f : term' -> term'')
(f' : term -> term')
h h'
(l : list (branch term)) :
map (fun b => map_branch f h (map_branch f' h' b)) l =
map (map_branch (f ∘ f') (h ∘ h')) l.
Proof.
eapply map_ext => b. apply map_branch_map_branch.
Qed.
Definition tCaseBrsProp {A} (P : A -> Type) (l : list (branch A)) :=
All (fun x => onctx P (bcontext x) × P (bbody x)) l.
Definition tCaseBrsProp_k (P : nat -> term -> Type) (p : predicate term) k (l : list (branch term)) :=
All (fun x => onctx_k P (#|p.(pparams)|) (bcontext x) × P (#|x.(bcontext)| + k) (bbody x)) l.
Lemma map_branches_k_map_branches_k
{term term' term''}
(f : nat -> term' -> term'')
(g : term -> term')
(f' : nat -> term -> term')
h h' k
(l : list (branch term)) :
(forall ctx, #|h' ctx| = #|ctx|) ->
map (fun b => map_branch (f #|bcontext (map_branch g h' b)|) h (map_branch (f' k) h' b)) l =
map (fun b => map_branch (f #|bcontext b|) h (map_branch (f' k) h' b)) l.
Proof.
intros Hh.
eapply map_ext => b. rewrite map_branch_map_branch.
rewrite map_branch_map_branch.
simpl; autorewrite with len. now rewrite Hh.
Qed.
Lemma case_brs_map_spec {A B} {P : A -> Type} {l} {f g : A -> B}
{h h' : list (BasicAst.context_decl A) -> list (BasicAst.context_decl B)} :
tCaseBrsProp P l -> (forall x, P x -> f x = g x) -> h =1 h' ->
map_branches f h l = map_branches g h' l.
Proof.
intros. red in X.
eapply All_map_eq. eapply All_impl; eauto. simpl; intros.
destruct X0.
apply map_branch_eq_spec; eauto.
Qed.
Lemma map_decl_eqP_spec {A B} {P : A -> Type} {p : A -> bool}
{d} {f g : A -> B} :
ondecl P d ->
test_decl p d ->
(forall x, P x -> p x -> f x = g x) ->
map_decl f d = map_decl g d.
Proof.
destruct d; cbn; intros [Pty Pbod] [pty pbody]%andb_and Hfg.
unfold map_decl; cbn in *; f_equal.
* destruct decl_body; cbn in *; eauto. f_equal.
eauto.
* eauto.
Qed.
Lemma map_context_eqP_spec {A B} {P : A -> Type} {p : A -> bool}
{ctx} {f g : A -> B} :
All (ondecl P) ctx ->
test_context p ctx ->
(forall x, P x -> p x -> f x = g x) ->
map_context f ctx = map_context g ctx.
Proof.
intros Ha Hctx Hfg. induction Ha; simpl; auto.
revert Hctx; simpl; intros [Hx Hl]%andb_and.
rewrite IHHa; f_equal; auto.
eapply map_decl_eqP_spec; eauto.
Qed.
Lemma mapi_context_eqP_spec {A B} {P : A -> Type} {ctx} {f g : nat -> A -> B} :
All (ondecl P) ctx ->
(forall k x, P x -> f k x = g k x) ->
mapi_context f ctx = mapi_context g ctx.
Proof.
intros Ha Hfg. induction Ha; simpl; auto.
rewrite IHHa; f_equal.
destruct p as [Hty Hbody].
unfold map_decl; destruct x ; cbn in *; f_equal.
* destruct decl_body; cbn in *; auto.
f_equal. eauto.
* eauto.
Qed.
Lemma mapi_context_eqP_id_spec {A} {P : A -> Type} {ctx} {f : nat -> A -> A} :
All (ondecl P) ctx ->
(forall k x, P x -> f k x = x) ->
mapi_context f ctx = ctx.
Proof.
intros Ha Hfg. induction Ha; simpl; auto.
rewrite IHHa; f_equal.
destruct p as [Hty Hbody].
unfold map_decl; destruct x ; cbn in *; f_equal.
* destruct decl_body; cbn in *; auto.
f_equal. eauto.
* eauto.
Qed.
Lemma mapi_context_eqP_test_id_spec {A} {P : A -> Type} (p : nat -> A -> bool)
k {ctx} {f : nat -> A -> A} :
All (ondecl P) ctx ->
test_context_k p k ctx ->
(forall k (x : A), P x -> p k x -> f k x = x) ->
mapi_context (shiftf f k) ctx = ctx.
Proof.
intros Ha ht Hfg. revert ht.
induction Ha; simpl; auto.
intros [hl [hty hbod]%andb_and]%andb_and.
rewrite IHHa; auto; f_equal.
destruct p0 as [Hty Hbody].
unfold map_decl; destruct x ; cbn in *; f_equal; eauto.
destruct decl_body; cbn in *; auto.
f_equal. unfold shiftf. eapply Hfg; auto.
Qed.
Lemma test_context_k_eqP_id_spec {A} {P : A -> Type} (p q : nat -> A -> bool) k k' {ctx} :
All (ondecl P) ctx ->
test_context_k p k ctx ->
(forall i (x : A), P x -> p (i + k) x -> q (i + k') x) ->
test_context_k q k' ctx.
Proof.
intros Ha ht Hfg. revert ht.
induction Ha; simpl; auto.
intros [hl [hty hbod]%andb_and]%andb_and.
rewrite IHHa; simpl; auto.
destruct p0 as [Hty Hbody].
unfold test_decl; destruct x ; cbn in *; eauto.
destruct decl_body; cbn in *; auto.
rewrite !Hfg; auto.
Qed.
Lemma test_context_k_eqP_eq_spec {A} {P : A -> Type} (p q : nat -> A -> bool) k k' {ctx} :
All (ondecl P) ctx ->
(forall i (x : A), P x -> p (i + k) x = q (i + k') x) ->
test_context_k p k ctx = test_context_k q k' ctx.
Proof.
intros Ha Hfg.
induction Ha; simpl; auto.
rewrite IHHa; auto; f_equal.
destruct p0 as [Hty Hbody].
unfold test_decl; destruct x ; cbn in *; f_equal; eauto.
destruct decl_body; cbn in *; auto;
rewrite !Hfg; auto.
Qed.
Lemma test_context_k_eq_spec (p q : nat -> term -> bool) k k' {ctx} :
(p =2 q) ->
k = k' ->
test_context_k p k ctx = test_context_k q k' ctx.
Proof.
intros Hfg <-.
induction ctx as [|[na [b|] ty] ctx]; simpl; auto; now rewrite IHctx Hfg.
Qed.
Lemma test_context_k_eq (p : nat -> term -> bool) n ctx :
test_context_k p n ctx = alli (fun k d => test_decl (p (n + k)) d) 0 (List.rev ctx).
Proof.
induction ctx; simpl; auto.
rewrite IHctx alli_app /= andb_comm andb_true_r andb_comm. f_equal.
len. now rewrite Nat.add_comm.
Qed.
#[global]
Instance test_context_k_Proper : Proper (`=2` ==> Logic.eq ==> `=1`) (@test_context_k term).
Proof.
intros f g Hfg k k' <- ctx.
now apply test_context_k_eq_spec.
Qed.
#[global]
Instance test_predicate_k_Proper : Proper (`=1` ==> `=2` ==> Logic.eq ==> `=1`) (@test_predicate_k term).
Proof.
intros hi hi' eqhi f g Hfg k k' <- ctx.
unfold test_predicate_k. rewrite eqhi.
now setoid_rewrite Hfg.
Qed.
#[global]
Instance test_predicate_ku_Proper : Proper (`=2` ==> `=2` ==> Logic.eq ==> `=1`) (@test_predicate_ku term).
Proof.
intros hi hi' eqhi f g Hfg k k' <- ctx.
unfold test_predicate_ku. rewrite eqhi.
now setoid_rewrite Hfg.
Qed.
#[global]
Instance test_branch_k_Proper p : Proper (`=2` ==> Logic.eq ==> `=1`) (@test_branch_k term p).
Proof.
intros f g Hfg k k' <- ctx.
unfold test_branch_k.
now setoid_rewrite Hfg.
Qed.
Lemma case_brs_map_spec_cond {A B} {P : A -> Type} pctx p {l} {f g : A -> B} {h h'}:
tCaseBrsProp P l ->
forallb (test_branch pctx p) l ->
(forall x, P x -> p x -> f x = g x) ->
(* (forall ctx, onctx P ctx -> test_context pctx ctx -> h ctx = h' ctx) -> *)
h =1 h' ->
map_branches f h l = map_branches g h' l.
Proof.
intros. red in X.
eapply forallb_All in H.
eapply All_map_eq.
eapply All_prod in X; tea. clear H.
eapply All_impl; eauto. simpl; intros br [[]%andb_and []].
apply map_branch_eq_spec; eauto.
(*eapply map_context_eqP_spec; eauto.*)
Qed.
Lemma case_brs_map_k_spec {A B} {P : A -> Type} {k l} {f g : nat -> A -> B} {h h'} :
tCaseBrsProp P l ->
(forall k x, P x -> f k x = g k x) ->
h =1 h' ->
map_branches_k f h k l = map_branches_k g h' k l.
Proof.
intros. red in X.
eapply All_map_eq. eapply All_impl; eauto. simpl; intros.
apply map_branch_k_eq_spec; eauto.
unfold shiftf. now apply H.
Qed.
Lemma case_brs_forallb_map_spec {A B} {P : A -> Type} {pctx p : A -> bool}
{l} {f g : A -> B} {h h'} :
tCaseBrsProp P l ->
forallb (test_branch pctx p) l ->
(forall x, P x -> p x -> f x = g x) ->
h =1 h' ->
map (map_branch f h) l = map (map_branch g h') l.
Proof.
intros.
eapply All_map_eq. red in X. apply forallb_All in H.
eapply All_impl. eapply All_prod. exact X. exact H. simpl.
intros [bctx bbod] [Hbr hb]. cbn in *.
unfold map_branch; cbn. f_equal.
- apply H1.
- eapply H0; eauto. apply Hbr.
now move/andb_and: hb => [].
Qed.
Lemma test_context_map (p : term -> bool) f (ctx : context) :
test_context p (map_context f ctx) = test_context (p ∘ f) ctx.
Proof.
induction ctx; simpl; auto.
rewrite IHctx. f_equal.
now rewrite test_decl_map_decl.
Qed.
#[global]
Hint Rewrite test_context_map : map.
Lemma onctx_test P (p q : term -> bool) ctx :
onctx P ctx ->
test_context p ctx ->
(forall t, P t -> p t -> q t) ->
test_context q ctx.
Proof.
intros Hc tc HP. revert tc.
induction Hc; simpl; auto.
destruct p0.
intros [pl [pbod pty]%andb_and]%andb_and.
rewrite (IHHc pl); simpl.
unfold test_decl.
rewrite (HP _ p0 pty) andb_true_r; simpl.
destruct (decl_body x); simpl in *; eauto.
Qed.
(** Useful for inductions *)
Lemma onctx_k_rev {P : nat -> term -> Type} {k} {ctx} :
onctx_k P k ctx <~>
Alli (fun i => ondecl (P (i + k))) 0 (List.rev ctx).
Proof.
split.
- unfold onctx_k.
intros Hi.
eapply forall_nth_error_Alli => i x hx.
pose proof (nth_error_Some_length hx).
rewrite nth_error_rev // in hx.
rewrite List.rev_involutive in hx.
len in hx.
eapply Alli_nth_error in Hi; tea.
simpl in Hi. simpl.
replace (Nat.pred #|ctx| - (#|ctx| - S i) + k) with (i + k) in Hi => //.
len in H; by lia.
- intros Hi.
eapply forall_nth_error_Alli => i x hx.
eapply Alli_rev_nth_error in Hi; tea.
simpl.
replace (#|ctx| - S i + k) with (Nat.pred #|ctx| - i + k) in Hi => //.
lia.
Qed.
Lemma onctx_k_shift {P : nat -> term -> Type} {k} {ctx} :
onctx_k P k ctx ->
onctx_k (fun k' => P (k' + k)) 0 ctx.
Proof.
intros Hi%onctx_k_rev.
eapply onctx_k_rev.
eapply Alli_impl; tea => /= n x.
now rewrite Nat.add_0_r.
Qed.
Lemma onctx_k_P {P : nat -> term -> Type} {p : nat -> term -> bool} {k} {ctx : context} :
(forall x y, reflectT (P x y) (p x y)) ->
reflectT (onctx_k P k ctx) (test_context_k p k ctx).
Proof.
intros HP.
eapply equiv_reflectT.
- intros Hi%onctx_k_rev.
rewrite test_context_k_eq.
induction Hi; simpl; auto.
rewrite Nat.add_comm.
rewrite IHHi /= //.
now move/(ondeclP (HP _)): p0 => ->.
- intros Hi. eapply onctx_k_rev.
move: ctx Hi. induction ctx.
* constructor.
* move => /= /andb_and [Hctx Hd].
eapply Alli_app_inv; eauto. constructor.
+ move/(ondeclP (HP _)): Hd. now len.
+ constructor.
Qed.
Module PCUICLookup := EnvironmentTyping.Lookup PCUICTerm PCUICEnvironment.
Include PCUICLookup.
Derive NoConfusion for global_decl.
Module PCUICGlobalMaps := EnvironmentTyping.GlobalMaps
PCUICTerm
PCUICEnvironment
PCUICTermUtils
PCUICEnvTyping
PCUICConversion
PCUICLookup
.
Include PCUICGlobalMaps.
(** ** Entries
The kernel accepts these inputs and typechecks them to produce
declarations. Reflects [kernel/entries.mli].
*)
(** *** Constant and axiom entries *)
Record parameter_entry := {
parameter_entry_type : term;
parameter_entry_universes : universes_decl }.
Record definition_entry := {
definition_entry_type : term;
definition_entry_body : term;
definition_entry_universes : universes_decl;
definition_entry_opaque : bool }.
Inductive constant_entry :=
| ParameterEntry (p : parameter_entry)
| DefinitionEntry (def : definition_entry).
Derive NoConfusion for parameter_entry definition_entry constant_entry.
(** *** Inductive entries *)
(** This is the representation of mutual inductives.
nearly copied from [kernel/entries.mli]
Assume the following definition in concrete syntax:
[[
Inductive I1 (x1:X1) ... (xn:Xn) : A1 := c11 : T11 | ... | c1n1 : T1n1
...
with Ip (x1:X1) ... (xn:Xn) : Ap := cp1 : Tp1 ... | cpnp : Tpnp.
]]
then, in [i]th block, [mind_entry_params] is [xn:Xn;...;x1:X1];
[mind_entry_arity] is [Ai], defined in context [x1:X1;...;xn:Xn];
[mind_entry_lc] is [Ti1;...;Tini], defined in context
[A'1;...;A'p;x1:X1;...;xn:Xn] where [A'i] is [Ai] generalized over
[x1:X1;...;xn:Xn].
*)
Inductive local_entry :=
| LocalDef : term -> local_entry (* local let binding *)
| LocalAssum : term -> local_entry.
Record one_inductive_entry := {
mind_entry_typename : ident;
mind_entry_arity : term;
mind_entry_template : bool; (* template polymorphism *)
mind_entry_consnames : list ident;
mind_entry_lc : list term (* constructor list *) }.
Record mutual_inductive_entry := {
mind_entry_record : option (option ident);
(* Is this mutual inductive defined as a record?
If so, is it primitive, using binder name [ident]
for the record in primitive projections ? *)
mind_entry_finite : recursivity_kind;
mind_entry_params : context;
mind_entry_inds : list one_inductive_entry;
mind_entry_universes : universes_decl;
mind_entry_private : option bool
(* Private flag for sealing an inductive definition in an enclosing
module. Not handled by.Common Coq yet. *) }.
Derive NoConfusion for local_entry one_inductive_entry mutual_inductive_entry.
|
[Table of Contents](http://nbviewer.ipython.org/github/rlabbe/Kalman-and-Bayesian-Filters-in-Python/blob/master/table_of_contents.ipynb)
# Kalman Filter Math
```python
#format the book
%matplotlib inline
from __future__ import division, print_function
from book_format import load_style
load_style()
```
<style>
@import url('http://fonts.googleapis.com/css?family=Source+Code+Pro');
@import url('http://fonts.googleapis.com/css?family=Vollkorn');
@import url('http://fonts.googleapis.com/css?family=Arimo');
@import url('http://fonts.googleapis.com/css?family=Fira+sans');
.CodeMirror pre {
font-family: 'Source Code Pro', Consolas, monocco, monospace;
}
div.cell{
width: 900px;
margin-left: 0% !important;
margin-right: auto;
}
div.text_cell code {
background: transparent;
color: #000000;
font-weight: 600;
font-size: 12pt;
font-style: bold;
font-family: 'Source Code Pro', Consolas, monocco, monospace;
}
h1 {
font-family: 'Open sans',verdana,arial,sans-serif;
}
div.input_area {
background: #F6F6F9;
border: 1px solid #586e75;
}
.text_cell_render h1 {
font-weight: 200;
font-size: 30pt;
line-height: 100%;
color:#c76c0c;
margin-bottom: 0.5em;
margin-top: 1em;
display: block;
white-space: wrap;
text-align: left;
}
h2 {
font-family: 'Open sans',verdana,arial,sans-serif;
text-align: left;
}
.text_cell_render h2 {
font-weight: 200;
font-size: 16pt;
font-style: italic;
line-height: 100%;
color:#c76c0c;
margin-bottom: 0.5em;
margin-top: 1.5em;
display: block;
white-space: wrap;
text-align: left;
}
h3 {
font-family: 'Open sans',verdana,arial,sans-serif;
}
.text_cell_render h3 {
font-weight: 200;
font-size: 14pt;
line-height: 100%;
color:#d77c0c;
margin-bottom: 0.5em;
margin-top: 2em;
display: block;
white-space: wrap;
text-align: left;
}
h4 {
font-family: 'Open sans',verdana,arial,sans-serif;
}
.text_cell_render h4 {
font-weight: 100;
font-size: 14pt;
color:#d77c0c;
margin-bottom: 0.5em;
margin-top: 0.5em;
display: block;
white-space: nowrap;
}
h5 {
font-family: 'Open sans',verdana,arial,sans-serif;
}
.text_cell_render h5 {
font-weight: 200;
font-style: normal;
color: #1d3b84;
font-size: 16pt;
margin-bottom: 0em;
margin-top: 0.5em;
display: block;
white-space: nowrap;
}
div.text_cell_render{
/*font-family: 'Vollkorn', verdana,arial,sans-serif;*/
line-height: 150%;
font-size: 130%;
text-align: justify;
text-justify:inter-word;
}
div.output_subarea.output_text.output_pyout {
overflow-x: auto;
overflow-y: scroll;
max-height: 50000px;
}
div.output_subarea.output_stream.output_stdout.output_text {
overflow-x: auto;
overflow-y: scroll;
max-height: 50000px;
}
div.output_wrapper{
margin-top:0.2em;
margin-bottom:0.2em;
}
code{
font-size: 6pt;
}
.rendered_html code{
background-color: transparent;
}
ul{
margin: 2em;
}
ul li{
padding-left: 0.5em;
margin-bottom: 0.5em;
margin-top: 0.5em;
}
ul li li{
padding-left: 0.2em;
margin-bottom: 0.2em;
margin-top: 0.2em;
}
ol{
margin: 2em;
}
ol li{
padding-left: 0.5em;
margin-bottom: 0.5em;
margin-top: 0.5em;
}
ul li{
padding-left: 0.5em;
margin-bottom: 0.5em;
margin-top: 0.2em;
}
a:link{
font-weight: bold;
color:#447adb;
}
a:visited{
font-weight: bold;
color: #1d3b84;
}
a:hover{
font-weight: bold;
color: #1d3b84;
}
a:focus{
font-weight: bold;
color:#447adb;
}
a:active{
font-weight: bold;
color:#447adb;
}
.rendered_html :link {
text-decoration: underline;
}
.rendered_html :hover {
text-decoration: none;
}
.rendered_html :visited {
text-decoration: none;
}
.rendered_html :focus {
text-decoration: none;
}
.rendered_html :active {
text-decoration: none;
}
.warning{
color: rgb( 240, 20, 20 )
}
hr {
color: #f3f3f3;
background-color: #f3f3f3;
height: 1px;
}
blockquote{
display:block;
background: #fcfcfc;
border-left: 5px solid #c76c0c;
font-family: 'Open sans',verdana,arial,sans-serif;
width:680px;
padding: 10px 10px 10px 10px;
text-align:justify;
text-justify:inter-word;
}
blockquote p {
margin-bottom: 0;
line-height: 125%;
font-size: 100%;
}
</style>
If you've gotten this far I hope that you are thinking that the Kalman filter's fearsome reputation is somewhat undeserved. Sure, I hand waved some equations away, but I hope implementation has been fairly straightforward for you. The underlying concept is quite straightforward - take two measurements, or a measurement and a prediction, and choose the output to be somewhere between the two. If you believe the measurement more your guess will be closer to the measurement, and if you believe the prediction is more accurate your guess will lie closer it it. That's not rocket science (little joke - it is exactly this math that got Apollo to the moon and back!).
To be honest I have been choosing my problems carefully. For an arbitrary problem designing the Kalman filter matrices can be extremely difficult. I haven't been *too tricky*, though. Equations like Newton's equations of motion can be trivially computed for Kalman filter applications, and they make up the bulk of the kind of problems that we want to solve.
I have illustrated the concepts with code and reasoning, not math. But there are topics that do require more mathematics than I have used so far. This chapter presents the math that you will need for the rest of the book.
## Modeling a Dynamic System
A *dynamic system* is a physical system whose state (position, temperature, etc) evolves over time. Calculus is the math of changing values, so we use differential equations to model dynamic systems. Some systems cannot be modeled with differential equations, but we will not encounter those in this book.
Modeling dynamic systems is properly the topic of several college courses. To an extent there is no substitute for a few semesters of ordinary and partial differential equations followed by a graduate course in control system theory. If you are a hobbyist, or trying to solve one very specific filtering problem at work you probably do not have the time and/or inclination to devote a year or more to that education.
Fortunately, I can present enough of the theory to allow us to create the system equations for many different Kalman filters. My goal is to get you to the stage where you can read a publication and understand it well enough to implement the algorithms. The background math is deep, but in practice we end up using a few simple techniques over and over again.
This is the longest section of pure math in this book. You will need to master everything in this section to understand the Extended Kalman filter (EKF), the workhorse nonlinear filter. I do cover more modern filters that do not require as much of this math. You can choose so skim now, and come back to this if you decide to learn the EKF.
We need to start by understanding the underlying equations and assumptions that the Kalman filter uses. We are trying to model real world phenomena, so what do we have to consider?
Each physical system has a process. For example, a car traveling at a certain velocity goes so far in a fixed amount of time, and its velocity varies as a function of its acceleration. We describe that behavior with the well known Newtonian equations that we learned in high school.
$$
\begin{aligned}
v&=at\\
x &= \frac{1}{2}at^2 + v_0t + x_0
\end{aligned}
$$
Once we learned calculus we saw them in this form:
$$ \mathbf v = \frac{d \mathbf x}{d t},
\quad \mathbf a = \frac{d \mathbf v}{d t} = \frac{d^2 \mathbf x}{d t^2}
$$
A typical automobile tracking problem would have you compute the distance traveled given a constant velocity or acceleration as we did in previous chapters. But, of course we know this is not all that is happening. No car travels on a perfect road. There are bumps that cause the car to slow down, there is wind drag, there are hills that raise and lower the speed. The suspension is a mechanical system with friction and imperfect springs. Gusts of wind alter the car's state.
Acurately modeling a system with linear equations is impossible except for the most trivial problems. So control theory is forced to make a simplification. At any time $t$ we say that the true state (such as the position of our car) is the predicted value from the imperfect model plus some unknown *process noise*:
$$
x(t) = x_{pred}(t) + noise(t)
$$
This is not meant to imply that $noise(t)$ is a function that we can derive analytically. It is merely a statement of fact - we can always describe the true value as the predicted value plus the process noise. "Noise" does not imply random events. If we are tracking a thrown ball in the atmosphere, and our model assumes the ball is in a vacuum, then the effect of air drag is process noise in this context.
In the next section we will learn techniques to convert a set of differential equations into a set of first-order differential equations. Assuming that, we can say that our model of the system without noise is:
$$ \dot{\mathbf x} = \mathbf{Ax}$$
$\mathbf A$ is known as the *systems dynamics matrix* as it describes the dynamics of the system. Now we need to model the noise. We will call that $\mathbf w$, and add it to the equation.
$$ \dot{\mathbf x} = \mathbf{Ax} + \mathbf w$$
$\mathbf w$ may strike you as a poor choice for the name, but you will soon see that the Kalman filter assumes *white* noise.
Finally, we need to consider any inputs into the system. We assume an input $\mathbf u$, and that there exists a linear model that defines how that input changes the system. For example, pressing the accelerator in your car makes it accelerate, and gravity causes balls to fall. Both are contol inputs. We will need a matrix $\mathbf B$ to convert $u$ into the effect on the system. We add that into our equation:
$$ \dot{\mathbf x} = \mathbf{Ax} + \mathbf{Bu} + \mathbf{w}$$
And that's it. That is one of the equations that Dr. Kalman set out to solve, and he found an optimal esitmator if we assume certain properties of $\mathbf w$.
## State-Space Representation of Dynamic Systems
In the last section we derived the equation
$$ \dot{\mathbf x} = \mathbf{Ax}+ \mathbf{Bu} + \mathbf{w}$$.
However, for our filters we are not interested in the derivative of $\mathbf x$, but in $\mathbf x$ itself. Ignoring the noise for a moment, we want an equation that recusively finds the value of $\mathbf x$ at time $t_k$ in terms of $\mathbf x$ at time $t_{k-1}$:
$$\mathbf x(t_k) = \mathbf F(\Delta t)\mathbf x(t_{k-1}) + \mathbf B(t_k) + \mathbf u (t_k)$$
Convention allows us to write $\mathbf x(t_k)$ as $\mathbf x_k$, which means the
the value of $\mathbf x$ at the k$^{th}$ value of $t$.
$$\mathbf x_k = \mathbf{Fx}_{k-1} + \mathbf B_k\mathbf u_k$$
$\mathbf F$ is the familiar *state transition matrix*, named due to its ability to transition the state from the previous time step to the current time step. It is very similar to the system dynamics matrix $\mathbf A$. The difference is that the system dynamics matrix $\mathbf A$ models a set of linear differential equations, and is continuous. $\mathbf F$ is discrete, and represents a set of linear equations (not differential equations) which step transition $\mathbf x_{k-1}$ to $\mathbf x_k$ over a discrete time step $\Delta t$.
Normally finding this equation is quite difficult. The equation $\dot x = v$ is the simplest possible differential equation and we trivially integrate it as:
$$ \int\limits_{x_{k-1}}^{x_k} \mathrm{d}x = \int\limits_{0}^{\Delta t} v\, \mathrm{d}t \\
x_k-x_0 = v \Delta t \\
x_k = v \Delta t + x_0$$
This equation is *recursive*: we compute the value of $x$ at time $t$ based on its value at time $t-1$. This recursive form enables us to represent the system (process model) in the form required by the Kalman filter:
$$\begin{aligned}
\mathbf x_k &= \mathbf{Fx}_{k-1} \\
&= \begin{bmatrix} 1 & \Delta t \\ 0 & 1\end{bmatrix}
\begin{bmatrix}x_{k-1} \\ \dot x_{k-1}\end{bmatrix}
\end{aligned}$$
We can do that only because $\dot x = v$ is simplest differential equation possible. Almost all other in physical systems result in more complicated differential equation which do not yield to this approach.
*State-space* methods became popular around the time of the Apollo missions, largely due to the work of Dr. Kalman. The idea is simple. Model a system with a set of $n^{th}$-order differential equations. Convert them into an equivalent set of first-order differential equations. Put them into the vector-matrix form used in the previous section: $\dot{\mathbf x} = \mathbf{Ax} + \mathbf{Bu}$. Once in this form we use of of several techniques to convert these linear differential equations into the recursive equation:
$$ \mathbf x_k = \mathbf{Fx}_{k-1} + \mathbf B_k\mathbf u_k$$
Some books call the state transition matrix the *fundamental matrix*. Many use $\mathbf \Phi$ instead of $\mathbf F$. Sources based heavily on control theory tend to use these forms.
These are called *state-space* methods because we are expressing the solution of the differential equations in terms of the system state.
### Forming First Order Equations from Higher Order Equations
Many models of physical systems require second or higher order differential equations with control input $u$:
$$a_n \frac{d^ny}{dt^n} + a_{n-1} \frac{d^{n-1}y}{dt^{n-1}} + \dots + a_2 \frac{d^2y}{dt^2} + a_1 \frac{dy}{dt} + a_0 = u$$
State-space methods require first-order equations. Any higher order system of equations can be reduced to first-order by defining extra variables for the derivatives and then solving.
Let's do an example. Given the system $\ddot{x} - 6\dot x + 9x = t$ find the equivalent first order equations. I've used the dot notation for the time derivatives for clarity.
The first step is to isolate the highest order term onto one side of the equation.
$$\ddot{x} = 6\dot x - 9x + t$$
We define two new variables:
$$ x_1(t) = x \\
x_2(t) = \dot x
$$
Now we will substitute these into the original equation and solve. The solution yields a set of first-order equations in terms of these new variables. It is conventional to drop the $(t)$ for notational convenience.
We know that $\dot x_1 = x_2$ and that $\dot x_2 = \ddot{x}$. Therefore
$$\begin{aligned}
\dot x_2 &= \ddot{x} \\
&= 6\dot x - 9x + t\\
&= 6x_2-9x_1 + t
\end{aligned}$$
Therefore our first-order system of equations is
$$\begin{aligned}\dot x_1 &= x_2 \\
\dot x_2 &= 6x_2-9x_1 + t\end{aligned}$$
If you practice this a bit you will become adept at it. Isolate the highest term, define a new variable and its derivatives, and then substitute.
### First Order Differential Equations In State-Space Form
Substituting the newly defined variables from the previous section:
$$\frac{dx_1}{dt} = x_2,\,
\frac{dx_2}{dt} = x_3, \, ..., \,
\frac{dx_{n-1}}{dt} = x_n$$
into the first order equations yields:
$$\frac{dx_n}{dt} = \frac{1}{a_n}\sum\limits_{i=0}^{n-1}a_ix_{i+1} + \frac{1}{a_n}u
$$
Using vector-matrix notation we have:
$$\begin{bmatrix}\frac{dx_1}{dt} \\ \frac{dx_2}{dt} \\ \vdots \\ \frac{dx_n}{dt}\end{bmatrix} =
\begin{bmatrix}\dot x_1 \\ \dot x_2 \\ \vdots \\ \dot x_n\end{bmatrix}=
\begin{bmatrix}0 & 1 & 0 &\cdots & 0 \\
0 & 0 & 1 & \cdots & 0 \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
-\frac{a_0}{a_n} & -\frac{a_1}{a_n} & -\frac{a_2}{a_n} & \cdots & -\frac{a_{n-1}}{a_n}\end{bmatrix}
\begin{bmatrix}x_1 \\ x_2 \\ \vdots \\ x_n\end{bmatrix} +
\begin{bmatrix}0 \\ 0 \\ \vdots \\ \frac{1}{a_n}\end{bmatrix}u$$
which we then write as $\dot{\mathbf x} = \mathbf{Ax} + \mathbf{B}u$.
### Finding the Fundamental Matrix for Time Invariant Systems
We express the system equations in state-space form with
$$ \dot{\mathbf x} = \mathbf{Ax}$$
where $\mathbf A$ is the system dynamics matrix, and want to find the *fundamental matrix* $\mathbf F$ that propagates the state $\mathbf x$ over the interval $\Delta t$ with the equation
$$\begin{aligned}
\mathbf x(t_k) = \mathbf F(\Delta t)\mathbf x(t_{k-1})\end{aligned}$$
In other words, $\mathbf A$ is a set of continuous differential equations, and we need $\mathbf F$ to be a set of discrete linear equations that computes the change in $\mathbf A$ over a discrete time step.
It is conventional to drop the $t_k$ and $(\Delta t)$ and use the notation
$$\mathbf x_k = \mathbf {Fx}_{k-1}$$
$\mathbf x_k$ does not mean the k$^{th}$ value of $\mathbf x$, but the value of $\mathbf x$ at the k$^{th}$ value of $t$. In this book I go even further and drop the suffixes entirely in favor of an overline to denote the prediction: $\overline{\mathbf x} = \mathbf{Fx}$.
Broadly speaking there are three common ways to find this matrix for Kalman filters. The technique most often used with Kalman filters is to use a matrix exponential. Linear Time Invariant Theory, also known as LTI System Theory, is a second technique. Finally, there are numerical techniques. You may know of others, but these three are what you will most likely encounter in the Kalman filter literature and praxis.
### The Matrix Exponential
The solution to the equation $\frac{dx}{dt} = kx$ can be found by:
$$\frac{dx}{dt} = kx \\
\frac{dx}{x} = k\, dt \\
\int \frac{1}{x}\, dx = \int k\, dt \\
\log x = kt + c \\
x = e^{kt+c} \\
x = e^ce^{kt} \\
x = c_0e^{kt}$$
Using similar math, the solution to the first-order equation
$$\dot{\mathbf x} = \mathbf{Ax} ,\, \, \, \mathbf x(0) = \mathbf x_0$$
where $\mathbf A$ is a constant matrix, is
$$\mathbf x = e^{\mathbf At}\mathbf x_0$$
Substituting $F = e^{\mathbf At}$, we can write
$$\mathbf x_k = \mathbf F\mathbf x_{k-1}$$
which is the form we are looking for! We have reduced the problem of finding the fundamental matrix to one of finding the value for $e^{\mathbf At}$.
$e^{\mathbf At}$ is known as the matrix exponential. It can be computed with this power series:
$$e^{\mathbf At} = \mathbf{I} + \mathbf{A}t + \frac{(\mathbf{A}t)^2}{2!} + \frac{(\mathbf{A}t)^3}{3!} + ... $$
That series is found by doing a Taylor series expansion of $e^{\mathbf At}$, which I will not cover here.
Let's use this to find the solution to Newton's equations. Using $v$ as an substitution for $\dot x$, and assuming constant velocity we get the linear matrix-vector form
$$\begin{bmatrix}\dot x \\ \dot v\end{bmatrix} =\begin{bmatrix}0&1\\0&0\end{bmatrix} \begin{bmatrix}x \\ v\end{bmatrix}$$
This is a first order differential equation, so we can set $\mathbf{A}=\begin{bmatrix}0&1\\0&0\end{bmatrix}$ and solve the following equation. I have substituted the interval $\Delta t$ for $t$ to emphasize that the fundamental matrix is discrete:
$$\mathbf F = e^{\mathbf A\Delta t} = \mathbf{I} + \mathbf A\Delta t + \frac{(\mathbf A\Delta t)^2}{2!} + \frac{(\mathbf A\Delta t)^3}{3!} + ... $$
If you perform the multiplication you will find that $\mathbf{A}^2=\begin{bmatrix}0&0\\0&0\end{bmatrix}$, which means that all higher powers of $\mathbf{A}$ are also $\mathbf{0}$. Thus we get an exact answer without an infinite number of terms:
$$
\begin{aligned}
\mathbf F &=\mathbf{I} + \mathbf A \Delta t + \mathbf{0} \\
&= \begin{bmatrix}1&0\\0&1\end{bmatrix} + \begin{bmatrix}0&1\\0&0\end{bmatrix}t\\
&= \begin{bmatrix}1&t\\0&1\end{bmatrix}
\end{aligned}$$
We plug this into $\mathbf x_k= \mathbf{Fx}_{k-1}$ to get
$$
\begin{aligned}
x_k &=\begin{bmatrix}1&\Delta t\\0&1\end{bmatrix}x_{k-1}
\end{aligned}$$
You will recognize this as the matrix we derived analytically for the constant velocity Kalman filter in the **Multivariate Kalman Filter** chapter.
### Time Invariance
If the behavior of the system depends on time we can say that a dynamic system is described by the first-order differential equation
$$ g(t) = \dot x$$
However, if the system is *time invariant* the equation is of the form:
$$ f(x) = \dot x$$
What does *time invariant* mean? Consider a home stereo. If you input a signal $x$ into it at time $t$, it will output some signal $f(x)$. If you instead perform the input at time $t + \Delta t$ the output signal will be the same $f(x)$, shifted in time.
A counter-example is $x(t) = \sin(t)$, with the system $f(x) = t\, x(t) = t \sin(t)$. This is not time invariant; the value will be different at different times due to the multiplication by t. An aircraft is not time invariant. If you make a control input to the aircraft at a later time its behavior will be different because it will have burned fuel and thus lost weight. Lower weight results in different behavior.
We can solve these equations by integrating each side. I demonstrated integrating the time invariants system $v = \dot x$ above. However, integrating the time invariant equation $\dot x = f(x)$ is not so straightforward. Using the *separation of variables* techniques we divide by $f(x)$ and move the $dt$ term to the right so we can integrate each side:
$$
\frac{dx}{dt} = f(x) \\
\int^x_{x_0} \frac{1}{f(x)} dx = \int^t_{t_0} dt\\
$$
If we let $F(x) = \int \frac{1}{f(x)} dx$ we get
$$F(x) - F(x_0) = t-t_0$$
We then solve for x with
$$F(x) = t - t_0 + F(x_0) \\
x = F^{-1}[t-t_0 + F(x_0)]$$
In other words, we need to find the inverse of $F$. This is not trivial, and a significant amount of coursework in a STEM education is devoted to finding tricky, analytic solutions to this problem.
However, they are tricks, and many simple forms of $f(x)$ either have no closed form solution or pose extreme difficulties. Instead, the practicing engineer turns to state-space methods to find approximate solutions.
The advantage of the matrix exponential is that we can use it for any arbitrary set of differential equations which are *time invariant*. However, we often use this technique even when the equations are not time invariant. As an aircraft flies it burns fuel and loses weight. However, the weight loss over one second is negligible, and so the system is nearly linear over that time step. Our answers will still be reasonably accurate so long as the time step is short.
#### Example: Mass-Spring-Damper Model
Suppose we wanted to track the motion of a weight on a spring and connected to a damper, such as an automobile's suspension. The equation for the motion with $m$ being the mass, $k$ the spring constant, and $c$ the damping force, under some input $u$ is
$$m\frac{d^2x}{dt^2} + c\frac{dx}{dt} +kx = u$$
For notational convenience I will write that as
$$m\ddot x + c\dot x + kx = u$$
I can turn this into a system of first order equations by setting $x_1(t)=x(t)$, and then substituting as follows:
$$\begin{aligned}
x_1 &= x \\
x_2 &= \dot x_1 \\
\dot x_2 &= \dot x_1 = \ddot x
\end{aligned}$$
As is common I dropped the $(t)$ for notational convenience. This gives the equation
$$m\dot x_2 + c x_2 +kx_1 = u$$
Solving for $\dot x_2$ we get a first order equation:
$$\dot x_2 = -\frac{c}{m}x_2 - \frac{k}{m}x_1 + \frac{1}{m}u$$
We put this into matrix form:
$$\begin{bmatrix} \dot x_1 \\ \dot x_2 \end{bmatrix} =
\begin{bmatrix}0 & 1 \\ -k/m & -c/m \end{bmatrix}
\begin{bmatrix} x_1 \\ x_2 \end{bmatrix} +
\begin{bmatrix} 0 \\ 1/m \end{bmatrix}u$$
Now we use the matrix exponential to find the state transition matrix:
$$\Phi(t) = e^{\mathbf At} = \mathbf{I} + \mathbf At + \frac{(\mathbf At)^2}{2!} + \frac{(\mathbf At)^3}{3!} + ... $$
The first two terms give us
$$\mathbf F = \begin{bmatrix}1 & t \\ -(k/m) t & 1-(c/m) t \end{bmatrix}$$
This may or may not give you enough precision. You can easily check this by computing $\frac{(\mathbf At)^2}{2!}$ for your constants and seeing how much this matrix contributes to the results.
### Linear Time Invariant Theory
*Linear Time Invariant Theory*, also known as LTI System Theory, gives us a way to find $\Phi$ using the inverse Laplace transform. You are either nodding your head now, or completely lost. Don't worry, I will not be using the Laplace transform in this book except in this paragraph, as the computation can be quite difficult. LTI system theory tells us that
$$ \Phi(t) = \mathcal{L}^{-1}[(s\mathbf{I} - \mathbf{F})^{-1}]$$
I have no intention of going into this other than to say that the Laplace transform $\mathcal{L}$ converts a signal into a space $s$ that excludes time, but finding a solution to the equation above is non-trivial. If you are interested, the Wikipedia article on LTI system theory provides an introduction [2]. I mention LTI because you will find some literature using it to design the Kalman filter matrices for difficult problems.
### Numerical Solutions
Finally, there are numerous numerical techniques to find $\mathbf F$. As filters get larger finding analytical solutions becomes very tedious (though packages like SymPy make it easier). C. F. van Loan [3] has developed a technique that finds both $\Phi$ and $\mathbf Q$ numerically. Given the continuous model
$$ \dot x = Ax + Gw$$
where $w$ is the unity white noise, van Loan's method computes both $\mathbf F_k$ and $\mathbf Q_k$.
I have implemented van Loan's method in `FilterPy`. You may use it as follows:
```python
from filterpy.common import van_loan_discretization
A = np.array([[0., 1.], [-1., 0.]])
G = np.array([[0.], [2.]]) # white noise scaling
F, Q = van_loan_discretization(A, G, dt=0.1)
```
In the section *Numeric Integration of Differential Equations* I present alternative methods which are very commonly used in Kalman filtering.
## Design of the Process Noise Matrix
In general the design of the $\mathbf Q$ matrix is among the most difficult aspects of Kalman filter design. This is due to several factors. First, the math requires a good foundation in signal theory. Second, we are trying to model the noise in something for which we have little information. Consider trying to model the process noise for a thrown baseball. We can model it as a sphere moving through the air, but that leave many unknown factors - the wind, ball rotation and spin decay, the coefficient of drag of a ball with stitches, the effects of wind and air density, and so on. We develop the equations for an exact mathematical solution for a given process model, but since the process model is incomplete the result for $\mathbf Q$ will also be incomplete. This has a lot of ramifications for the behavior of the Kalman filter. If $\mathbf Q$ is too small then the filter will be overconfident in its prediction model and will diverge from the actual solution. If $\mathbf Q$ is too large than the filter will be unduly influenced by the noise in the measurements and perform sub-optimally. In practice we spend a lot of time running simulations and evaluating collected data to try to select an appropriate value for $\mathbf Q$. But let's start by looking at the math.
Let's assume a kinematic system - some system that can be modeled using Newton's equations of motion. We can make a few different assumptions about this process.
We have been using a process model of
$$ \dot{\mathbf x} = \mathbf{Ax} + \mathbf{Bu} + \mathbf{w}$$
where $\mathbf{w}$ is the process noise. Kinematic systems are *continuous* - their inputs and outputs can vary at any arbitrary point in time. However, our Kalman filters are *discrete*. We sample the system at regular intervals. Therefore we must find the discrete representation for the noise term in the equation above. This depends on what assumptions we make about the behavior of the noise. We will consider two different models for the noise.
### Continuous White Noise Model
We model kinematic systems using Newton's equations. We have either used position and velocity, or position, velocity, and acceleration as the models for our systems. There is nothing stopping us from going further - we can model jerk, jounce, snap, and so on. We don't do that normally because adding terms beyond the dynamics of the real system degrades the estimate.
Let's say that we need to model the position, velocity, and acceleration. We can then assume that acceleration is constant for each discrete time step. Of course, there is process noise in the system and so the acceleration is not actually constant. The tracked object will alter the acceleration over time due to external, unmodeled forces. In this section we will assume that the acceleration changes by a continuous time zero-mean white noise $w(t)$. In other words, we are assuming that velocity is acceleration changing by small amounts that over time average to 0 (zero-mean).
Since the noise is changing continuously we will need to integrate to get the discrete noise for the discretization interval that we have chosen. We will not prove it here, but the equation for the discretization of the noise is
$$\mathbf Q = \int_0^{\Delta t} \mathbf F(t)\mathbf{Q_c}\mathbf F^\mathsf{T}(t) dt$$
where $\mathbf{Q_c}$ is the continuous noise. This gives us
$$\Phi = \begin{bmatrix}1 & \Delta t & {\Delta t}^2/2 \\ 0 & 1 & \Delta t\\ 0& 0& 1\end{bmatrix}$$
for the fundamental matrix, and
$$\mathbf{Q_c} = \begin{bmatrix}0&0&0\\0&0&0\\0&0&1\end{bmatrix} \Phi_s$$
for the continuous process noise matrix, where $\Phi_s$ is the spectral density of the white noise.
We could carry out these computations ourselves, but I prefer using SymPy to solve the equation.
$$\mathbf{Q_c} = \begin{bmatrix}0&0&0\\0&0&0\\0&0&1\end{bmatrix} \Phi_s$$
```python
import sympy
from sympy import (init_printing, Matrix,MatMul,
integrate, symbols)
init_printing(use_latex='mathjax')
dt, phi = symbols('\Delta{t} \Phi_s')
F_k = Matrix([[1, dt, dt**2/2],
[0, 1, dt],
[0, 0, 1]])
Q_c = Matrix([[0, 0, 0],
[0, 0, 0],
[0, 0, 1]])*phi
Q=sympy.integrate(F_k * Q_c * F_k.T, (dt, 0, dt))
# factor phi out of the matrix to make it more readable
Q = Q / phi
sympy.MatMul(Q, phi)
```
$$\left[\begin{matrix}\frac{\Delta{t}^{5}}{20} & \frac{\Delta{t}^{4}}{8} & \frac{\Delta{t}^{3}}{6}\\\frac{\Delta{t}^{4}}{8} & \frac{\Delta{t}^{3}}{3} & \frac{\Delta{t}^{2}}{2}\\\frac{\Delta{t}^{3}}{6} & \frac{\Delta{t}^{2}}{2} & \Delta{t}\end{matrix}\right] \Phi_{s}$$
For completeness, let us compute the equations for the 0th order and 1st order equations.
```python
F_k = sympy.Matrix([[1]])
Q_c = sympy.Matrix([[phi]])
print('0th order discrete process noise')
sympy.integrate(F_k*Q_c*F_k.T,(dt, 0, dt))
```
0th order discrete process noise
$$\left[\begin{matrix}\Delta{t} \Phi_{s}\end{matrix}\right]$$
```python
F_k = sympy.Matrix([[1, dt],
[0, 1]])
Q_c = sympy.Matrix([[0, 0],
[0, 1]])*phi
Q = sympy.integrate(F_k * Q_c * F_k.T, (dt, 0, dt))
print('1st order discrete process noise')
# factor phi out of the matrix to make it more readable
Q = Q / phi
sympy.MatMul(Q, phi)
```
1st order discrete process noise
$$\left[\begin{matrix}\frac{\Delta{t}^{3}}{3} & \frac{\Delta{t}^{2}}{2}\\\frac{\Delta{t}^{2}}{2} & \Delta{t}\end{matrix}\right] \Phi_{s}$$
### Piecewise White Noise Model
Another model for the noise assumes that the that highest order term (say, acceleration) is constant for the duration of each time period, but differs for each time period, and each of these is uncorrelated between time periods. In other words there is a discontinuous jump in acceleration at each time step. This is subtly different than the model above, where we assumed that the last term had a continuously varying noisy signal applied to it.
We will model this as
$$f(x)=Fx+\Gamma w$$
where $\Gamma$ is the *noise gain* of the system, and $w$ is the constant piecewise acceleration (or velocity, or jerk, etc).
Lets start by looking at a first order system. In this case we have the state transition function
$$\mathbf{F} = \begin{bmatrix}1&\Delta t \\ 0& 1\end{bmatrix}$$
In one time period, the change in velocity will be $w(t)\Delta t$, and the change in position will be $w(t)\Delta t^2/2$, giving us
$$\Gamma = \begin{bmatrix}\frac{1}{2}\Delta t^2 \\ \Delta t\end{bmatrix}$$
The covariance of the process noise is then
$$Q = \mathbb E[\Gamma w(t) w(t) \Gamma^\mathsf{T}] = \Gamma\sigma^2_v\Gamma^\mathsf{T}$$.
We can compute that with SymPy as follows
```python
var=symbols('sigma^2_v')
v = Matrix([[dt**2 / 2], [dt]])
Q = v * var * v.T
# factor variance out of the matrix to make it more readable
Q = Q / var
sympy.MatMul(Q, var)
```
$$\left[\begin{matrix}\frac{\Delta{t}^{4}}{4} & \frac{\Delta{t}^{3}}{2}\\\frac{\Delta{t}^{3}}{2} & \Delta{t}^{2}\end{matrix}\right] \sigma^{2}_{v}$$
The second order system proceeds with the same math.
$$\mathbf{F} = \begin{bmatrix}1 & \Delta t & {\Delta t}^2/2 \\ 0 & 1 & \Delta t\\ 0& 0& 1\end{bmatrix}$$
Here we will assume that the white noise is a discrete time Wiener process. This gives us
$$\Gamma = \begin{bmatrix}\frac{1}{2}\Delta t^2 \\ \Delta t\\ 1\end{bmatrix}$$
There is no 'truth' to this model, it is just convenient and provides good results. For example, we could assume that the noise is applied to the jerk at the cost of a more complicated equation.
The covariance of the process noise is then
$$Q = \mathbb E[\Gamma w(t) w(t) \Gamma^\mathsf{T}] = \Gamma\sigma^2_v\Gamma^\mathsf{T}$$.
We can compute that with SymPy as follows
```python
var=symbols('sigma^2_v')
v = Matrix([[dt**2 / 2], [dt], [1]])
Q = v * var * v.T
# factor variance out of the matrix to make it more readable
Q = Q / var
sympy.MatMul(Q, var)
```
$$\left[\begin{matrix}\frac{\Delta{t}^{4}}{4} & \frac{\Delta{t}^{3}}{2} & \frac{\Delta{t}^{2}}{2}\\\frac{\Delta{t}^{3}}{2} & \Delta{t}^{2} & \Delta{t}\\\frac{\Delta{t}^{2}}{2} & \Delta{t} & 1\end{matrix}\right] \sigma^{2}_{v}$$
We cannot say that this model is more or less correct than the continuous model - both are approximations to what is happening to the actual object. Only experience and experiments can guide you to the appropriate model. In practice you will usually find that either model provides reasonable results, but typically one will perform better than the other.
The advantage of the second model is that we can model the noise in terms of $\sigma^2$ which we can describe in terms of the motion and the amount of error we expect. The first model requires us to specify the spectral density, which is not very intuitive, but it handles varying time samples much more easily since the noise is integrated across the time period. However, these are not fixed rules - use whichever model (or a model of your own devising) based on testing how the filter performs and/or your knowledge of the behavior of the physical model.
A good rule of thumb is to set $\sigma$ somewhere from $\frac{1}{2}\Delta a$ to $\Delta a$, where $\Delta a$ is the maximum amount that the acceleration will change between sample periods. In practice we pick a number, run simulations on data, and choose a value that works well.
### Using FilterPy to Compute Q
FilterPy offers several routines to compute the $\mathbf Q$ matrix. The function `Q_continuous_white_noise()` computes $\mathbf Q$ for a given value for $\Delta t$ and the spectral density.
```python
from filterpy.common import Q_continuous_white_noise
from filterpy.common import Q_discrete_white_noise
Q = Q_continuous_white_noise(dim=2, dt=1, spectral_density=1)
print(Q)
```
[[ 0.333 0.5 ]
[ 0.5 1. ]]
```python
Q = Q_continuous_white_noise(dim=3, dt=1, spectral_density=1)
print(Q)
```
[[ 0.05 0.125 0.167]
[ 0.125 0.333 0.5 ]
[ 0.167 0.5 1. ]]
The function `Q_discrete_white_noise()` computes $\mathbf Q$ assuming a piecewise model for the noise.
```python
Q = Q_discrete_white_noise(2, var=1.)
print(Q)
```
[[ 0.25 0.5 ]
[ 0.5 1. ]]
```python
Q = Q_discrete_white_noise(3, var=1.)
print(Q)
```
[[ 0.25 0.5 0.5 ]
[ 0.5 1. 1. ]
[ 0.5 1. 1. ]]
### Simplification of Q
Many treatments use a much simpler form for $\mathbf Q$, setting it to zero except for a noise term in the lower rightmost element. Is this justified? Well, consider the value of $\mathbf Q$ for a small $\Delta t$
```python
import numpy as np
np.set_printoptions(precision=8)
Q = Q_continuous_white_noise(
dim=3, dt=0.05, spectral_density=1)
print(Q)
np.set_printoptions(precision=3)
```
[[ 0.00000002 0.00000078 0.00002083]
[ 0.00000078 0.00004167 0.00125 ]
[ 0.00002083 0.00125 0.05 ]]
We can see that most of the terms are very small. Recall that the only Kalman filter using this matrix is
$$ \mathbf P=\mathbf{FPF}^\mathsf{T} + \mathbf Q$$
If the values for $\mathbf Q$ are small relative to $\mathbf P$
than it will be contributing almost nothing to the computation of $\mathbf P$. Setting $\mathbf Q$ to the zero matrix except for the lower right term
$$\mathbf Q=\begin{bmatrix}0&0&0\\0&0&0\\0&0&\sigma^2\end{bmatrix}$$
while not correct, is often a useful approximation. If you do this you will have to perform quite a few studies to guarantee that your filter works in a variety of situations.
If you do this, 'lower right term' means the most rapidly changing term for each variable. If the state is $x=\begin{bmatrix}x & \dot x & \ddot{x} & y & \dot{y} & \ddot{y}\end{bmatrix}^\mathsf{T}$ Then Q will be 6x6; the elements for both $\ddot{x}$ and $\ddot{y}$ will have to be set to non-zero in $\mathbf Q$.
## Numeric Integration of Differential Equations
We've been exposed to several numerical techniques to solve linear differential equations. These include state-space methods, the Laplace transform, and van Loan's method.
These work well for linear ordinary differential equations (ODEs), but do not work well for nonlinear equations. For example, consider trying to predict the position of a rapidly turning car. Cars maneuver by turning the front wheels. This makes them pivot around their rear axle as it moves forward. Therefore the path will be continuously varying and a linear prediction will necessarily produce an incorrect value. If the change in the system is small enough relative to $\Delta t$ this can often produce adequate results, but that will rarely be the case with the nonlinear Kalman filters we will be studying in subsequent chapters.
For these reasons we need to know how to numerically integrate ODEs. This can be a vast topic that requires several books. If you need to explore this topic in depth *Computational Physics in Python* by Dr. Eric Ayars is excellent, and available for free here:
http://phys.csuchico.edu/ayars/312/Handouts/comp-phys-python.pdf
However, I will cover a few simple techniques which will work for a majority of the problems you encounter.
### Euler's Method
Let's say we have the initial condition problem of
$$ y' = y, \\ y(0) = 1$$
We happen to know the exact answer is $y=e^t$ because we solved it earlier, but for an arbitrary ODE we will not know the exact solution. In general all we know is the derivative of the equation, which is equal to the slope. We also know the initial value: at $t=0$, $y=1$. If we know these two pieces of information we can predict the value at $y(t=1)$ using the slope at $t=0$ and the value of $y(0)$. I've plotted this below.
```python
import matplotlib.pyplot as plt
t = np.linspace(-1, 1, 10)
plt.plot(t, np.exp(t))
t = np.linspace(-1, 1, 2)
plt.plot(t,t+1, ls='--', c='k');
```
You can see that the slope is very close to the curve at $t=0.1$, but far from it
at $t=1$. But let's continue with a step size of 1 for a moment. We can see that at $t=1$ the estimated value of $y$ is 2. Now we can compute the value at $t=2$ by taking the slope of the curve at $t=1$ and adding it to our initial estimate. The slope is computed with $y'=y$, so the slope is 2.
```python
import book_plots
t = np.linspace(-1, 2, 20)
plt.plot(t, np.exp(t))
t = np.linspace(0, 1, 2)
plt.plot([1, 2, 4], ls='--', c='k')
book_plots.set_labels(x='x', y='y');
```
Here we see the next estimate for y is 4. The errors are getting large quickly, and you might be unimpressed. But 1 is a very large step size. Let's put this algorithm in code, and verify that it works by using a small step size.
```python
def euler(t, tmax, y, dx, step=1.):
ys = []
while t < tmax:
y = y + step*dx(t, y)
ys.append(y)
t +=step
return ys
```
```python
def dx(t, y): return y
print(euler(0, 1, 1, dx, step=1.)[-1])
print(euler(0, 2, 1, dx, step=1.)[-1])
```
2.0
4.0
This looks correct. So now lets plot the result of a much smaller step size.
```python
ys = euler(0, 4, 1, dx, step=0.00001)
plt.subplot(1,2,1)
plt.title('Computed')
plt.plot(np.linspace(0, 4, len(ys)),ys)
plt.subplot(1,2,2)
t = np.linspace(0, 4, 20)
plt.title('Exact')
plt.plot(t, np.exp(t));
```
```python
print('exact answer=', np.exp(4))
print('euler answer=', ys[-1])
print('difference =', np.exp(4) - ys[-1])
print('iterations =', len(ys))
```
exact answer= 54.5981500331
euler answer= 54.59705808834125
difference = 0.00109194480299
iterations = 400000
Here we see that the error is reasonably small, but it took a very large number of iterations to get three digits of precision. In practice Euler's method is too slow for most problems, and we use more sophisticated methods.
Before we go on, let's formally derive Euler's method, as it is the basis for the more advanced Runge Kutta methods used in the next section. In fact, Euler's method is the simplest form of Runge Kutta.
Here are the first 3 terms of the Euler expansion of $y$. An infinite expansion would give an exact answer, so $O(h^4)$ denotes the error due to the finite expansion.
$$y(t_0 + h) = y(t_0) + h y'(t_0) + \frac{1}{2!}h^2 y''(t_0) + \frac{1}{3!}h^3 y'''(t_0) + O(h^4)$$
Here we can see that Euler's method is using the first two terms of the Taylor expansion. Each subsequent term is smaller than the previous terms, so we are assured that the estimate will not be too far off from the correct value.
### Runge Kutta Methods
Runge Kutta integration is the workhorse of numerical integration. There are a vast number of methods in the literature. In practice, using the Runge Kutta algorithm that I present here will solve most any problem you will face. It offers a very good balance of speed, precision, and stability, and it is the 'go to' numerical integration method unless you have a very good reason to choose something different.
Let's dive in. We start with some differential equation
$$\ddot{y} = \frac{d}{dt}\dot{y}$$.
We can substitute the derivative of y with a function f, like so
$$\ddot{y} = \frac{d}{dt}f(y,t)$$.
Deriving these equations is outside the scope of this book, but the Runge Kutta RK4 method is defined with these equations.
$$y(t+\Delta t) = y(t) + \frac{1}{6}(k_1 + 2k_2 + 2k_3 + k_4) + O(\Delta t^4)$$
$$\begin{aligned}
k_1 &= f(y,t)\Delta t \\
k_2 &= f(y+\frac{1}{2}k_1, t+\frac{1}{2}\Delta t)\Delta t \\
k_3 &= f(y+\frac{1}{2}k_2, t+\frac{1}{2}\Delta t)\Delta t \\
k_4 &= f(y+k_3, t+\Delta t)\Delta t
\end{aligned}
$$
Here is the corresponding code:
```python
def runge_kutta4(y, x, dx, f):
"""computes 4th order Runge-Kutta for dy/dx.
y is the initial value for y
x is the initial value for x
dx is the difference in x (e.g. the time step)
f is a callable function (y, x) that you supply
to compute dy/dx for the specified values.
"""
k1 = dx * f(y, x)
k2 = dx * f(y + 0.5*k1, x + 0.5*dx)
k3 = dx * f(y + 0.5*k2, x + 0.5*dx)
k4 = dx * f(y + k3, x + dx)
return y + (k1 + 2*k2 + 2*k3 + k4) / 6.
```
Let's use this for a simple example. Let
$$\dot{y} = t\sqrt{y(t)}$$
with the initial values
$$\begin{aligned}t_0 &= 0\\y_0 &= y(t_0) = 1\end{aligned}$$
```python
import math
import numpy as np
t = 0.
y = 1.
dt = .1
ys, ts = [], []
def func(y,t):
return t*math.sqrt(y)
while t <= 10:
y = runge_kutta4(y, t, dt, func)
t += dt
ys.append(y)
ts.append(t)
exact = [(t**2 + 4)**2 / 16. for t in ts]
plt.plot(ts, ys)
plt.plot(ts, exact)
error = np.array(exact) - np.array(ys)
print("max error {}".format(max(error)))
```
## Bayesian Filtering
Starting in the Discrete Bayes chapter I used a Bayesian formulation for filtering. Suppose we are tracking an object. We define its *state* at a specific time as its position, velocity, and so on. For example, we might write the state at time $t$ as $\mathbf x_t = \begin{bmatrix}x_t &\dot x_t \end{bmatrix}^\mathsf T$.
When we take a measurement of the object we are measuring the state. Sensors are noisy, so the measurement is corrupted with noise. Clearly though, the measurement is determined by the state. That is, a change in state may change the measurement, but a change in measurement will not change the state.
In filtering our goal is to compute an optimal estimate for a set of states $\mathbf x_{0:t}$ from time 0 to time $t$. If we knew $\mathbf x_{0:t}$ then it would be trivial to compute a set of measurements $\mathbf z_{0:t}$ corresponding to those states. However, we receive a set of measurements $\mathbf z_{0:t}$, and want to compute the corresponding states $\mathbf x_{0:t}$. This is called *statistical inversion* because we are trying to compute the input from the output.
Inversion is a difficult problem because there is typically no unique solution. For a given set of states $\mathbf x_{0:t}$ there is only one possible set of measurements (plus noise), but for a given set of measurements there are many different sets of states that could have led to those measurements.
Recall Bayes Theorem:
$$P(x \mid z) = \frac{P(z \mid x)P(x)}{P(z)}$$
where $P(z \mid x)$ is the *likelihood* of the measurement $z$, $P(x)$ is the *prior* based on our process model, and $P(z)$ is a normalization constant. $P(x \mid z)$ is the *posterior*, or the distribution after incorporating the measurement $z$, also called the *evidence*.
This is a *statistical inversion* as it goes from $P(z \mid x)$ to $P(x \mid z)$. The solution to our filtering problem can be expressed as:
$$P(\mathbf x_{0:t} \mid \mathbf z_{0:t}) = \frac{P(\mathbf z_{0:t} \mid \mathbf x_{0:t})P(\mathbf x_{0:t})}{P(\mathbf z_{0:t})}$$
That is all well and good until the next measurement $\mathbf z_{t+1}$ comes in, at which point we need to recompute the entire expression for the range $0:t+1$.
In practice this is intractable because we are trying to compute the posterior distribution $P(\mathbf x_{0:t} \mid \mathbf z_{0:t})$ for the state over the full range of time steps. But do we really care about the probability distribution at the third step (say) when we just received the tenth measurement? Not usually. So we relax our requirements and only compute the distributions for the current time step.
The first simplification is we describe our process (e.g., the motion model for a moving object) as a *Markov chain*. That is, we say that the current state is solely dependent on the previous state and a transition probability $P(\mathbf x_k \mid \mathbf x_{k-1})$, which is just the probability of going from the last state to the current one. We write:
$$\mathbf x_k \sim P(\mathbf x_k \mid \mathbf x_{k-1})$$
The next simplification we make is do define the *measurement model* as depending on the current state $\mathbf x_k$ with the conditional probability of the measurement given the current state: $P(\mathbf z_t \mid \mathbf x_x)$. We write:
$$\mathbf z_k \sim P(\mathbf z_t \mid \mathbf x_x)$$
We have a recurrance now, so we need an initial condition to terminate it. Therefore we say that the initial distribution is the probablity of the state $\mathbf x_0$:
$$\mathbf x_0 \sim P(\mathbf x_0)$$
These terms are plugged into Bayes equation. If we have the state $\mathbf x_0$ and the first measurement we can estimate $P(\mathbf x_1 | \mathbf z_1)$. The motion model creates the prior $P(\mathbf x_2 \mid \mathbf x_1)$. We feed this back into Bayes theorem to compute $P(\mathbf x_2 | \mathbf z_2)$. We continue this predictor-corrector algorithm, recursively computing the state and distribution at time $t$ based solely on the state and distribution at time $t-1$ and the measurement at time $t$.
The details of the mathematics for this computation varies based on the problem. The **Discrete Bayes** and **Univariate Kalman Filter** chapters gave two different formulations which you should have been able to reason through. The univariate Kalman filter assumes that for a scalar state both the noise and process are linear model are affected by zero-mean, uncorrelated Gaussian noise.
The Multivariate Kalman filter make the same assumption but for states and measurements that are vectors, not scalars. Dr. Kalman was able to prove that if these assumptions hold true then the Kalman filter is *optimal*. Colloquially this means there is no way to derive more information from the noise. In the remainder of the book I will present filters that relax the constraints on linearity and Gaussian noise.
Before I go on, a few more words about statistical inversion. As Calvetti and Somersalo write in *Introduction to Bayesian Scientific Computing*, "we adopt the Bayesian point of view: *randomness simply means lack of information*."[4] Our state parametize physical phenomena that we could in principle measure or compute: velocity, air drag, and so on. We lack enough information to compute or measure their value, so we opt to consider them as random variables. Strictly speaking they are not random, thus this is a subjective position.
They devote a full chapter to this topic. I can spare a paragraph. Bayesian filters are possible because we ascribe statistical properties to unknown parameters. In the case of the Kalman filter we have closed-form solutions to find an optimal estimate. Other filters, such as the discrete Bayes filter or the particle filter which we cover in a later chapter, model the probability in a more ad-hoc, non-optimal manner. The power of our technique comes from treating lack of information as a random variable, describing that random variable as a probability distribution, and then using Bayes Theorem to solve the statistical inference problem.
## Converting the Multivariate Equations to the Univariate Case
The multivariate Kalman filter equations do not resemble the equations for the univariate filter. However, if we use one dimensional states and measurements the equations do reduce to the univariate equations. This section will provide you with a strong intuition into what the Kalman filter equations are actually doing. While reading this section is not required to understand the rest of the book, I recommend reading this section carefully as it should make the rest of the material easier to understand.
Here are the multivariate equations for the prediction.
$$
\begin{aligned}
\mathbf{\bar{x}} &= \mathbf{F x} + \mathbf{B u} \\
\mathbf{\bar{P}} &= \mathbf{FPF}^\mathsf{T} + \mathbf Q
\end{aligned}
$$
For a univariate problem the state $\mathbf x$ only has one variable, so it is a $1\times 1$ matrix. Our motion $\mathbf{u}$ is also a $1\times 1$ matrix. Therefore, $\mathbf{F}$ and $\mathbf B$ must also be $1\times 1$ matrices. That means that they are all scalars, and we can write
$$\bar{x} = Fx + Bu$$
Here the variables are not bold, denoting that they are not matrices or vectors.
Our state transition is simple - the next state is the same as this state, so $F=1$. The same holds for the motion transition, so, $B=1$. Thus we have
$$x = x + u$$
which is equivalent to the Gaussian equation from the last chapter
$$ \mu = \mu_1+\mu_2$$
Hopefully the general process is clear, so now I will go a bit faster on the rest. We have
$$\mathbf{\bar{P}} = \mathbf{FPF}^\mathsf{T} + \mathbf Q$$
Again, since our state only has one variable $\mathbf P$ and $\mathbf Q$ must also be $1\times 1$ matrix, which we can treat as scalars, yielding
$$\bar{P} = FPF^\mathsf{T} + Q$$
We already know $F=1$. The transpose of a scalar is the scalar, so $F^\mathsf{T} = 1$. This yields
$$\bar{P} = P + Q$$
which is equivalent to the Gaussian equation of
$$\sigma^2 = \sigma_1^2 + \sigma_2^2$$
This proves that the multivariate prediction equations are performing the same math as the univariate equations for the case of the dimension being 1.
These are the equations for the update step:
$$
\begin{aligned}
\mathbf{K}&= \mathbf{\bar{P}H}^\mathsf{T} (\mathbf{H\bar{P}H}^\mathsf{T} + \mathbf R)^{-1} \\
\textbf{y} &= \mathbf z - \mathbf{H \bar{x}}\\
\mathbf x&=\mathbf{\bar{x}} +\mathbf{K\textbf{y}} \\
\mathbf P&= (\mathbf{I}-\mathbf{KH})\mathbf{\bar{P}}
\end{aligned}
$$
As above, all of the matrices become scalars. $H$ defines how we convert from a position to a measurement. Both are positions, so there is no conversion, and thus $H=1$. Let's substitute in our known values and convert to scalar in one step. The inverse of a 1x1 matrix is the reciprocal of the value so we will convert the matrix inversion to division.
$$
\begin{aligned}
K &=\frac{\bar{P}}{\bar{P} + R} \\
y &= z - \bar{x}\\
x &=\bar{x}+Ky \\
P &= (1-K)\bar{P}
\end{aligned}
$$
Before we continue with the proof, I want you to look at those equations to recognize what a simple concept these equations implement. The residual $y$ is nothing more than the measurement minus the prediction. The gain $K$ is scaled based on how certain we are about the last prediction vs how certain we are about the measurement. We choose a new state $x$ based on the old value of $x$ plus the scaled value of the residual. Finally, we update the uncertainty based on how certain we are about the measurement. Algorithmically this should sound exactly like what we did in the last chapter.
Let's finish off the algebra to prove this. Recall that the univariate equations for the update step are:
$$
\begin{aligned}
\mu &=\frac{\sigma_1^2 \mu_2 + \sigma_2^2 \mu_1} {\sigma_1^2 + \sigma_2^2}, \\
\sigma^2 &= \frac{1}{\frac{1}{\sigma_1^2} + \frac{1}{\sigma_2^2}}
\end{aligned}
$$
Here we will say that $\mu_1$ is the state $x$, and $\mu_2$ is the measurement $z$. Thus it follows that that $\sigma_1^2$ is the state uncertainty $P$, and $\sigma_2^2$ is the measurement noise $R$. Let's substitute those in.
$$ \mu = \frac{Pz + Rx}{P+R} \\
\sigma^2 = \frac{1}{\frac{1}{P} + \frac{1}{R}}
$$
I will handle $\mu$ first. The corresponding equation in the multivariate case is
$$
\begin{aligned}
x &= x + Ky \\
&= x + \frac{P}{P+R}(z-x) \\
&= \frac{P+R}{P+R}x + \frac{Pz - Px}{P+R} \\
&= \frac{Px + Rx + Pz - Px}{P+R} \\
&= \frac{Pz + Rx}{P+R}
\end{aligned}
$$
Now let's look at $\sigma^2$. The corresponding equation in the multivariate case is
$$
\begin{aligned}
P &= (1-K)P \\
&= (1-\frac{P}{P+R})P \\
&= (\frac{P+R}{P+R}-\frac{P}{P+R})P \\
&= (\frac{P+R-P}{P+R})P \\
&= \frac{RP}{P+R}\\
&= \frac{1}{\frac{P+R}{RP}}\\
&= \frac{1}{\frac{R}{RP} + \frac{P}{RP}} \\
&= \frac{1}{\frac{1}{P} + \frac{1}{R}}
\quad\blacksquare
\end{aligned}
$$
We have proven that the multivariate equations are equivalent to the univariate equations when we only have one state variable. I'll close this section by recognizing one quibble - I hand waved my assertion that $H=1$ and $F=1$. In general we know this is not true. For example, a digital thermometer may provide measurement in volts, and we need to convert that to temperature, and we use $H$ to do that conversion. I left that issue out to keep the explanation as simple and streamlined as possible. It is very straightforward to add that generalization to the equations above, redo the algebra, and still have the same results.
## Converting Kalman Filter to a g-h Filter
I've stated that the Kalman filter is a form of the g-h filter. It just takes some algebra to prove it. It's more straightforward to do with the one dimensional case, so I will do that. Recall
$$
\mu_{x}=\frac{\sigma_1^2 \mu_2 + \sigma_2^2 \mu_1} {\sigma_1^2 + \sigma_2^2}
$$
which I will make more friendly for our eyes as:
$$
\mu_{x}=\frac{ya + xb} {a+b}
$$
We can easily put this into the g-h form with the following algebra
$$
\begin{aligned}
\mu_{x}&=(x-x) + \frac{ya + xb} {a+b} \\
\mu_{x}&=x-\frac{a+b}{a+b}x + \frac{ya + xb} {a+b} \\
\mu_{x}&=x +\frac{-x(a+b) + xb+ya}{a+b} \\
\mu_{x}&=x+ \frac{-xa+ya}{a+b} \\
\mu_{x}&=x+ \frac{a}{a+b}(y-x)\\
\end{aligned}
$$
We are almost done, but recall that the variance of estimate is given by
$${\sigma_{x}^2} = \frac{1}{ \frac{1}{\sigma_1^2} + \frac{1}{\sigma_2^2}}\\
= \frac{1}{ \frac{1}{a} + \frac{1}{b}}
$$
We can incorporate that term into our equation above by observing that
$$
\begin{aligned}
\frac{a}{a+b} &= \frac{a/a}{(a+b)/a} = \frac{1}{(a+b)/a} \\
&= \frac{1}{1 + \frac{b}{a}} = \frac{1}{\frac{b}{b} + \frac{b}{a}} \\
&= \frac{1}{b}\frac{1}{\frac{1}{b} + \frac{1}{a}} \\
&= \frac{\sigma^2_{x'}}{b}
\end{aligned}
$$
We can tie all of this together with
$$
\begin{aligned}
\mu_{x}&=x+ \frac{a}{a+b}(y-x) \\
&= x + \frac{\sigma^2_{x'}}{b}(y-x) \\
&= x + g_n(y-x)
\end{aligned}
$$
where
$$g_n = \frac{\sigma^2_{x}}{\sigma^2_{y}}$$
The end result is multiplying the residual of the two measurements by a constant and adding to our previous value, which is the $g$ equation for the g-h filter. $g$ is the variance of the new estimate divided by the variance of the measurement. Of course in this case $g$ is not a constant as it varies with each time step as the variance changes. We can also derive the formula for $h$ in the same way. It is not a particularly illuminating derivation and I will skip it. The end result is
$$h_n = \frac{COV (x,\dot x)}{\sigma^2_{y}}$$
The takeaway point is that $g$ and $h$ are specified fully by the variance and covariances of the measurement and predictions at time $n$. In other words, we are picking a point between the measurement and prediction by a scale factor determined by the quality of each of those two inputs.
## References
* [1] *Matrix Exponential* http://en.wikipedia.org/wiki/Matrix_exponential
* [2] *LTI System Theory* http://en.wikipedia.org/wiki/LTI_system_theory
* [3] C.F. van Loan, "Computing Integrals Involving the Matrix Exponential," IEEE Transactions Automatic Control, June 1978.
* [4] Calvetti, D and Somersalo E, "Introduction to Bayesian Scientific Computing: Ten Lectures on Subjective Computing,", Springer, 2007.
|
/-
Copyright (c) 2022 Arthur Paulino. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Arthur Paulino, Gabriel Ebner
-/
import Lean
/-!
Defines the `use` tactic.
TODO: This does not match the full functionality of `use` from mathlib3.
See failing tests in `test/Use.lean`.
-/
open Lean.Elab.Tactic
namespace Mathlib.Tactic
/--
`use e₁, e₂, ⋯` applies the tactic `refine ⟨e₁, e₂, ⋯, ?_⟩` and then tries
to close the goal with `with_reducible rfl` (which may or may not close it). It's
useful, for example, to advance on existential goals, for which terms as
well as proofs of some claims about them are expected.
Examples:
```lean
example : ∃ x : Nat, x = x := by use 42
example : ∃ x : Nat, ∃ y : Nat, x = y := by use 42, 42
example : ∃ x : String × String, x.1 = x.2 := by use ("forty-two", "forty-two")
```
-/
-- TODO extend examples in doc-string once mathlib3 parity is achieved.
macro "use " es:term,+ : tactic =>
`(tactic|(refine ⟨$es,*, ?_⟩; try with_reducible rfl))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.