text
stringlengths 0
3.34M
|
---|
{-# OPTIONS_GHC -Wall #-}
{-# LANGUAGE LambdaCase #-}
{-# LANGUAGE MultiWayIf #-}
{-# LANGUAGE TupleSections #-}
{-|
Module : AutoBench.Internal.IO
Description : AutoBench's internal IO.
Copyright : (c) 2018 Martin Handley
License : BSD-style
Maintainer : [email protected]
Stability : Experimental
Portability : GHC
This module deals with all AutoBench's internal IO, including:
* System saving to/loading from files;
* Generating/compiling/executing benchmarking files;
* Cleaning up temporary files created for/during benchmarking;
-}
{-
----------------------------------------------------------------------------
<TO-DO>:
----------------------------------------------------------------------------
- generateBenchmarks error handling?
- It would be nice for the generating benchmarking file to be nicely
formatted;
- Add more validation to generateBenchmarkingReport;
- Handle CTRL-C?
-}
module AutoBench.Internal.IO
(
-- * IO for benchmarking files
generateBenchmarkingFile -- Generate a benchmarking file to benchmark all the test programs in a given test suite
, generateTestReport -- Generate a 'TestReport' that summarises the system's testing phase.
, compileBenchmarkingFile -- Compile benchmarking file using zero or more user-specified compiler flags.
, deleteAllFiles -- Delete all files created by the system.
, deleteBenchmarkingFiles -- Delete all files created for/during the benchmarking phase.
, deleteTemporarySystemFiles -- Delete temporary system files created for/during the benchmarking phase.
-- * Helper functions
, anyKeyExit -- Press any key to exit.
, discoverInputFiles -- Discover potential input files in the working directory.
, execute -- Execute a file, capturing its output to STDOUT and printing it to the command line.
, generateBenchmarkingFilename -- Generate a valid filename for the benchmarking file from the filename of the user input file.
, nestPutStr -- Output documents at a specific level of nesting using 'putStr'.
, nestPutStrLn -- Output documents at a specific level of nesting using 'putStrLn'.
, spacer -- Output @n@ line spaces to the command line.
) where
import Control.Exception (catch)
import Control.Exception.Base (throwIO)
import Control.Monad (unless, void)
import Control.Monad.Catch (throwM)
import Criterion.IO (readJSONReports)
import qualified Data.ByteString as BS
import qualified Data.ByteString.Char8 as C
import Data.List ( groupBy, isInfixOf, partition, sort
, sortBy )
import qualified Data.Map as Map
import Data.Ord (comparing)
import qualified Data.Vector as V
import qualified DynFlags as GHC
import qualified GHC as GHC
import qualified GHC.Paths as GHC
import Statistics.Types (estPoint)
import System.Directory ( doesFileExist, getDirectoryContents
, removeFile )
import System.FilePath.Posix ( dropExtension, takeBaseName
, takeDirectory, takeExtension )
import System.IO (Handle, hFlush, stdout)
import System.IO.Error (isDoesNotExistError)
import qualified Text.PrettyPrint.HughesPJ as PP
import qualified Text.Megaparsec as MP
import System.Process
( ProcessHandle
, StdStream(..)
, createProcess
, getProcessExitCode
, proc
, std_out
)
import Criterion.Types
( Report
, anMean
, anOutlierVar
, anRegress
, anStdDev
, ovEffect
, ovFraction
, regCoeffs
, regResponder
, reportAnalysis
, reportName
, reportMeasured
)
import AutoBench.Internal.AbstractSyntax (Id, ModuleName, prettyPrint, qualIdt)
import AutoBench.Internal.UserIO (printGoodbyeMessage)
import AutoBench.Internal.Utils (Parser, allEq, integer, symbol)
import AutoBench.Internal.Types
( BenchReport(..)
, DataOpts(..)
, DataSize(..)
, InputError(..)
, SimpleReport(..)
, SystemError(..)
, TestReport(..)
, TestSuite(..)
, UserInputs(..)
)
-- * IO for benchmarking files:
-- | Generate a benchmarking file to benchmark all the test programs in a
-- given test suite. This includes generating/supplying necessary test data.
--
-- Note that the names of test programs and sizes of test inputs are encoded
-- into each benchmark's (i.e., test case's) title to keep track of
-- measurements. This allows for simple but necessary validation checks to be
-- performed on the JSON report file created by Criterion to ensure that test
-- results agree with test inputs. (The checks are done by
-- 'generateBenchmarkingReport'.)
generateBenchmarkingFile
:: FilePath -- ^ Filepath to save benchmarking file.
-> ModuleName -- ^ User input file's module name.
-> UserInputs -- ^ Parsed/categorised user inputs (to cross-reference).
-> Id -- ^ The chosen test suite's identifier.
-> TestSuite -- ^ The chosen test suite.
-> IO ()
generateBenchmarkingFile fp mn inps tsIdt ts = do
-- Generate functional call that will in turn generate the appropriate
-- benchmarks.
gFunc <- genFunc gen nf unary
-- Generate file contents and write to file.
writeFile fp (PP.render $ contents gFunc)
where
---------------------------------------------------------------------------
-- ** CHANGING THE CONTENTS WILL BREAK THE SYSTEM **
---------------------------------------------------------------------------
-- Note: imports/definitions are qualified to avoid ambiguity.
contents gFunc = PP.vcat
[ PP.text ""
, PP.text "module Main (main) where"
, PP.text ""
, PP.text "import qualified AutoBench.Internal.Benchmarking" -- Import all generation functions.
, PP.text "import qualified" PP.<+> PP.text mn -- Import user input file.
, PP.text ""
, PP.text "main :: IO ()" -- Generate a main function.
, PP.text "main = AutoBench.Internal.Benchmarking.runBenchmarks" -- Run benchmarks.
PP.<+> PP.char '(' PP.<> gFunc PP.<> PP.char ')' -- Generate benchmarks.
PP.<+> PP.text (prettyPrint . qualIdt mn $ tsIdt) -- Identifier of chosen test suite (for run cfg).
]
---------------------------------------------------------------------------
-- ** CHANGING THE NAMES OF THESE FUNCTIONS WILL BREAK THE SYSTEM **
---------------------------------------------------------------------------
-- Important note: each function below encodes the name of test programs
-- and input sizes into the titles of benchmark test cases.
---------------------------------------------------------------------------
-- Generate benchmarking function call.
-- genFunc gen? nf? unary?
genFunc :: Bool -> Bool -> Bool -> IO PP.Doc
-- genBenchmarksGenNfUn:
-- Generated test data, results to nf, unary test programs.
genFunc True True True = return
(genGenFunc "AutoBench.Internal.Benchmarking.genBenchmarksGenNfUn")
-- genBenchmarksGenWhnfUn:
-- Generated test data, results to whnf, unary test programs.
genFunc True False True = return
(genGenFunc "AutoBench.Internal.Benchmarking.genBenchmarksGenWhnfUn")
-- genBenchmarksGenNfBin:
-- Generated test data, results to nf, binary test programs.
genFunc True True False = return
(genGenFunc "AutoBench.Internal.Benchmarking.genBenchmarksGenNfBin")
-- genBenchmarksGenWhnfBin:
-- Generated test data, results to whnf, binary test programs.
genFunc True False False = return
(genGenFunc "AutoBench.Internal.Benchmarking.genBenchmarksGenWhnfBin")
-- genBenchmarksManNfUn:
-- User-specified test data, results to nf, unary test programs.
genFunc False True True =
genManFunc "AutoBench.Internal.Benchmarking.genBenchmarksManNfUn"
-- genBenchmarksManWhnfUn:
-- User-specified test data, results to whnf, unary test programs.
genFunc False False True =
genManFunc "AutoBench.Internal.Benchmarking.genBenchmarksManWhnfUn"
-- genBenchmarksManNfBin:
-- User-specified test data, results to nf, binary test programs.
genFunc False True False =
genManFunc "AutoBench.Internal.Benchmarking.genBenchmarksManNfBin"
-- genBenchmarksManWhnfBin:
-- User-specified test data, results to whnf, binary test programs.
genFunc False False False =
genManFunc "AutoBench.Internal.Benchmarking.genBenchmarksManWhnfBin"
-- Generate function call for benchmarks requiring automatically generated
-- test data.
genGenFunc :: Id -> PP.Doc
genGenFunc func = PP.hsep $
[ PP.text func
, ppList $ fmap ppTuple qualProgs
, PP.text (prettyPrint . qualIdt mn $ tsIdt)
]
-- Generate function call for benchmarks using user-specified test data.
genManFunc :: Id -> IO PP.Doc
genManFunc func = do
dat <- getManualDatIdt (_dataOpts ts)
return $ PP.hsep
[ PP.text func
, ppList $ fmap ppTuple qualProgs
, PP.text (prettyPrint . qualIdt mn $ tsIdt)
, PP.text (prettyPrint . qualIdt mn $ dat)
]
-- Pretty print an (identifier, program) tuple.
ppTuple :: Id -> PP.Doc
ppTuple idt = PP.char '('
PP.<> PP.text (show idt)
PP.<> PP.text ", "
PP.<> PP.text idt
PP.<> PP.char ')'
-- Pretty print a comma-separated list.
ppList :: [PP.Doc] -> PP.Doc
ppList docs = PP.hcat $
PP.char '[' : (PP.punctuate (PP.text ", ") docs) ++ [PP.char ']']
-- Helpers
-- Classifiers:
unary = head (_progs ts) `elem` fmap fst (_unaryFuns inps) -- Unary test programs?
nf = _nf ts -- NF test results?
gen = case _dataOpts ts of -- Generate test data?
Manual{} -> False
Gen{} -> True
-- All test programs are qualified with the module name.
qualProgs = fmap (prettyPrint . qualIdt mn) (_progs ts)
-- Get the identifier of User-specified test data from 'DataOpts'.
-- Questionable throwM error handling here? But better than a partial function.
getManualDatIdt :: DataOpts -> IO Id
getManualDatIdt (Manual s) = return s
getManualDatIdt Gen{} =
throwM (InternalErr $ "generateBenchmarks: unexpected 'Gen' setting.")
-- | Use GHC to compile the benchmarking file. Compile using the flags
-- specified in the 'TestSuite' used to generate the benchmarking file.
-- Includes the location of the user input file as a source directory in case
-- it is not the working directory. Any invalid compiler flags are returned
-- in case they affect test results.
compileBenchmarkingFile
:: FilePath -- ^ Benchmarking filepath.
-> FilePath -- ^ User input filepath.
-> [String] -- ^ GHC compiler flags.
-> IO [String] -- ^ Invalid compiler flags.
compileBenchmarkingFile benchFP userFP flags = do
(success, invalidFlags) <- GHC.runGhc (Just GHC.libdir) $ do
dflags <- GHC.getSessionDynFlags
-- Add flags specified in 'TestSuite's '_ghcFlags' list.
(dflags', invalidFlags, _) <- GHC.parseDynamicFlagsCmdLine dflags (GHC.noLoc <$> flags)
-- Include location of input file in import paths.
let dflags'' = dflags' { GHC.importPaths = GHC.importPaths dflags ++ [takeDirectory userFP] }
void $ GHC.setSessionDynFlags dflags''
target <- GHC.guessTarget benchFP Nothing
GHC.setTargets [target]
success <- GHC.succeeded <$> GHC.load GHC.LoadAllTargets
return (success, fmap GHC.unLoc invalidFlags)
-- Throw an error if compilation fails.
unless success (throwIO $ FileErr "Compilation failed.")
-- Notify user of any invalid flags in case they affect test results.
return invalidFlags
-- | Delete all files created for/during the benchmarking phase
-- of testing, including:
--
-- * Benchmarking Haskell module;
-- * Benchmarking binary;
-- * *.o, *.hi files;
-- * Temporary system files e.g., Criterion JSON report file.
deleteAllFiles :: FilePath -> FilePath -> [FilePath] -> IO ()
deleteAllFiles fBench fUser sysTmps =
mapM_ removeIfExists (fUsers ++ fBenchs ++ sysTmps)
where
fUsers = fmap (dropExtension fUser ++) exts
fBench' = dropExtension fBench
fBenchs = fBench : fBench' : fmap (fBench' ++ ) exts
exts = [".o", ".hi"]
removeIfExists fp = removeFile fp `catch` handleExists
where handleExists e | isDoesNotExistError e = return ()
| otherwise = throwIO e
-- | Delete files created for/during the benchmarking phase of testing,
-- including:
--
-- * Benchmarking Haskell module;
-- * Benchmarking binary;
-- * *.o, *.hi files;
deleteBenchmarkingFiles :: FilePath -> FilePath -> IO ()
deleteBenchmarkingFiles fBench fUser =
mapM_ removeIfExists (fUsers ++ fBenchs)
where
fUsers = fmap (dropExtension fUser ++) exts
fBench' = dropExtension fBench
fBenchs = fBench : fBench' : fmap (fBench' ++ ) exts
exts = [".o", ".hi"]
removeIfExists fp = removeFile fp `catch` handleExists
where handleExists e | isDoesNotExistError e = return ()
| otherwise = throwIO e
-- | Delete temporary files created for/during the benchmarking phase
-- of testing, e.g., Criterion JSON report file.
deleteTemporarySystemFiles :: [FilePath] -> IO ()
deleteTemporarySystemFiles systemTemps =
mapM_ removeIfExists systemTemps
where
removeIfExists fp = removeFile fp `catch` handleExists
where handleExists e | isDoesNotExistError e = return ()
| otherwise = throwIO e
-- | Generate a test report to summarise the system's testing phase. This
-- includes parsing the JSON benchmark report file created by Criterion
-- in order to generate a 'BenchReport': see 'generateBenchmarkingReport'.
generateTestReport
:: ModuleName -- Module name of user input file.
-> TestSuite -- TestSuite used to generate benchmarking file.
-> FilePath -- Filepath of Criterion's JSON report.
-> Bool -- Whether test programs are semantically equal according to QuickCheck testing.
-> IO TestReport
generateTestReport mn ts fp eql = do
-- Generate benchmarking report.
benchRep <- generateBenchmarkingReport mn ts fp
return TestReport
{ -- Copy 'TestSuite' settings:
_tProgs = sort $ fmap (prettyPrint . qualIdt mn) (_progs ts)
, _tDataOpts = _dataOpts ts
, _tNf = _nf ts
, _tGhcFlags = _ghcFlags ts
-- Other test results:
, _eql = eql
, _br = benchRep
}
-- | Parse a Criterion JSON report file and use the parsed '[Report]'s to
-- generate a 'BenchReport' that summarises the benchmarking phase of testing.
-- The 'BenchReport' includes a 'SimpleReport' for each test case and baseline
-- measurements, if applicable.
--
-- Some background information:
-- When generating the benchmarking file using 'generateBenchmarkingFile',
-- the names of test programs and input sizes are encoded into the titles of
-- benchmarks. The 'Report' titles are decoded here and checked against the
-- settings of the 'TestSuite used to generate the benchmarks initially.
generateBenchmarkingReport -- <TO-DO>: Check input size against the 'UserInputs' data structure.
:: ModuleName -- Module name of user input file.
-> TestSuite -- TestSuite used to generate benchmarking file. -- <TO-DO>: Are these checks sufficient?
-> FilePath -- Filepath of Criterion's JSON report.
-> IO BenchReport
generateBenchmarkingReport mn ts fp = do
-- Check file exists.
exists <- doesFileExist fp
unless exists (throwIO $ FileErr $ "Cannot locate Criterion report: " ++ fp)
-- Generate 'BenchReport' largely from Criterion's JSON results.
-- Parse Criterion JSON report.
readJSONReports fp >>= \case
-- Parse error.
Left err -> throwIO (FileErr $ "Invalid Criterion report: " ++ err)
-- Parsed 'ReportFileContents', only care about 'Report's.
Right (_, _, reps) ->
-- Reports are organised differently depending on whether baseline
-- measurements were taken so separate the baseline measurements.
let (bls, nonBls) = partition (("Baseline for" `isInfixOf`) . reportName) reps
in case bls of
-- If there isn't any baseline measurements, all reports are
-- test program measurements and have titles such as
-- Input Size 5/p1 Input Size 5/p2 Input Size 5/p3 for unary.
-- Input Sizes (5, 5)/p1 Input Sizes (5, 5)/p2 for binary.
[] -> case noBaselines reps of -- Use 'parseRepName' to parse titles to
-- (Id, DataSize) tuples.
Nothing -> throwIO $ FileErr $ "Incompatible Criterion report."
Just xs -> return $ convertReps [] (zip reps xs)
-- If baseline measurements have been taken, then reports relating
-- to test program measurements have titles such as:
-- With Baseline/Input Size 5/p1 etc.
-- And baseline measurements have titles such as:
-- With Baseline/Baseline Measurement for Input Size 5.
_ -> case withBaselines bls nonBls of
-- Use 'parseRepName' to parse titles to (Id, DataSize) tuples
-- and 'parseBaseline' to parse baseline measurements to same format.
Nothing -> throwIO $ FileErr $ "Incompatible Criterion report."
Just (nBls, nNonBls) ->
return $ convertReps (zip bls nBls) (zip nonBls nNonBls)
where
-- Qualified '_progs' list
progs = sort $ fmap (prettyPrint . qualIdt mn) (_progs ts)
-- The overall idea is that the titles of the Criterion reports encode
-- the names of test programs and the sizes of test inputs. The titles
-- are decoded and the parsed data is checked against the settings of the
-- 'TestSuite' used to generate the benchmarks in the first place. This is
-- perhaps a little over cautious but I think it's worth it.
-- Parse the report titles of the baseline and test program measurements.
withBaselines
:: [Report] -- Baseline measurements.
-> [Report] -- Test program measurements.
-> Maybe ([(Id, DataSize)], [(Id, DataSize)])
withBaselines _ [] = Nothing
withBaselines bls nonBls = do
-- Parse titles of baseline measurements. dropWhile (/= 'I') "With
-- Baseline/Baseline Measurement for Input Size 5" ===> "Input Size 5",
-- then can use 'parseBaseline'.
nBls <- sequence $ fmap (MP.parseMaybe parseBaseline .
dropWhile (/= 'I') . reportName) bls
-- Parse titles of test program measurements.
-- dropWhile (/= 'I') "With Baseline/Input Size 5/p1" ===> "Input Size
-- 5/p1", then can use 'parseRepName'.
nNonBls <- sequence $ fmap (MP.parseMaybe parseRepName .
dropWhile (/= 'I') . reportName) nonBls
-- Group parse results relating to the same test program by grouping
-- by identifier.
let nNonBlss = groupBy (\x1 x2 -> fst x1 == fst x2) $
sortBy (comparing fst) nNonBls
-- The size range of test data for each test program.
sizes = fmap (sort . fmap snd) nNonBlss
-- Validation checks:
if | not (allEq $ sort (fmap snd nBls) : sizes) -> Nothing -- (1) Make sure same number of measurements for each program
-- and all have same input sizes.
| not $ (sort $ fmap (fst . head) nNonBlss) == progs -> Nothing -- (2) Make sure test programs match those in the 'TestSuite's '_progs' list.
-- (3) <TO-DO>: Some form of input size check against 'UserInputs' data structure.
| otherwise -> Just (nBls, nNonBls)
-- Parse the report titles of just the test program measurements.
noBaselines :: [Report] -> Maybe [(Id, DataSize)]
noBaselines [] = Nothing
noBaselines reps = do
-- Parse titles of test program measurements.
xs <- sequence $ fmap (MP.parseMaybe parseRepName . reportName) reps
-- Group parse results relating to the same test program.
let xss = groupBy (\x1 x2 -> fst x1 == fst x2) $ sortBy (comparing fst) xs
-- The size range of test data for each test program.
sizes = fmap (sort . fmap snd) xss
-- Validation checks:
if | not (allEq sizes) -> Nothing -- (1) Make sure same number of measurements for each program
-- and all have same input sizes.
| not $ (sort $ fmap (fst . head) xss) == progs -> Nothing -- (2) Make sure test programs match those in the 'TestSuite's '_progs' list.
-- (3) <TO-DO>: Some form of input size check against 'UserInputs' data structure.
| otherwise -> Just xs
-- Convert a set of Criterion 'Report's from the same test into a
-- 'BenchReport' by generating a 'SimpleReport' for each test case
-- and baseline measurement. Copy over some settings from the 'TestSuite'
-- used to generate the benchmarks initially.
convertReps
:: [(Report, (Id, DataSize))] -- Baseline measurements.
-> [(Report, (Id, DataSize))] -- Test program measurements.
-> BenchReport
convertReps bls nonBls =
BenchReport
{ -- Generate test program 'SimpleReport's.
_reports = fmap (fmap $ uncurry toSimpleReport) .
-- Group by test program's identifier.
groupBy (\(_, (idt1, _)) (_, (idt2, _)) -> idt1 == idt2) .
-- Sort by test program's identifier.
sortBy (\(_, (idt1, _)) (_, (idt2, _)) -> compare idt1 idt2) $ nonBls
-- Generate 'SimpleReport's for baseline measurements.
, _baselines = fmap (uncurry toSimpleReport) $ sortBy (comparing snd) bls
}
where
-- Convert a Criterion 'Report' to a 'SimpleReport' for a given
-- (test program identifier, input size).
toSimpleReport :: Report -> (Id, DataSize) -> SimpleReport
toSimpleReport rep (idt, size) =
SimpleReport
{ _name = idt
, _size = size
, _runtime = getRegressTime -- Use the runtime predicted by linear regression, /not mean/.
-- ** But fall back on mean if something goes wrong **.
-- Note: Criterion uses a large number of samples to calculate its statistics.
-- Each sample itself is a number of iterations, but then the measurements are
-- standardised, so length here should work(?)
, _samples = V.length (reportMeasured rep)
, _stdDev = estPoint . anStdDev . reportAnalysis $ rep
, _outVarEff = ovEffect . anOutlierVar . reportAnalysis $ rep
, _outVarFrac = ovFraction . anOutlierVar . reportAnalysis $ rep
}
where
-- Lookup the runtime predicted by linear regression.
getRegressTime = case filter (\reg -> regResponder reg == "time")
(anRegress $ reportAnalysis rep) of
[x] -> case estPoint <$> Map.lookup "iters" (regCoeffs x) of
Just d -> d
-- Fall back on mean.
Nothing -> mean
-- Fall back on mean.
_ -> mean
where mean = estPoint . anMean . reportAnalysis $ rep
-- Parser helpers to decode report titles: ------------------------------
-- Parse a report's title into the corresponding test
-- program's identifier and input size.
parseRepName :: Parser (Id, DataSize)
parseRepName = do
-- E.g., "Input Sizes (5, 5)/p1"
-- E.g., "Input Size 5/p2"
void $ (symbol "Input Sizes") MP.<|> (symbol "Input Size")
ds <- parseDataSize
void $ symbol "/"
idt <- MP.manyTill MP.anySingle MP.eof
return (idt, ds)
-- Parse the encoded baseline size from the name of a Criterion report.
-- E.g., Input Sizes (5, 5) or Input Size 5
parseBaseline :: Parser (Id, DataSize)
parseBaseline = do
void $ (symbol "Input Sizes") MP.<|> (symbol "Input Size")
("Baseline measurements",) <$> parseDataSize -- <TO-DO>: This is too important to be a string.
-- Parse the encoded data size from the name of a Criterion report.
-- E.g., (5, 5) or 5.
parseDataSize :: Parser DataSize
parseDataSize = (do
void $ symbol "("
n1 <- integer
void $ symbol ","
n2 <- integer
void $ symbol ")"
return (SizeBin n1 n2)) MP.<|> (SizeUn <$> integer)
-- * Helper functions
-- | Execute a file, capturing its output to STDOUT and printing it to the
-- command line.
execute :: FilePath -> IO ()
execute fp = do
let p = (proc fp []) { std_out = CreatePipe }
(_, Just out, _, ph) <- createProcess p
printOutput ph out
where
printOutput :: ProcessHandle -> Handle -> IO ()
printOutput ph h = go
where
go = do
bs <- BS.hGetNonBlocking h (64 * 1024)
printLine bs
ec <- getProcessExitCode ph
maybe go (const $ do
end <- BS.hGetContents h
printLine end) ec
printLine bs = unless (BS.null bs) (C.putStr bs)
-- | Generate a valid filename for the benchmarking file from the filename of
-- the user input file.
generateBenchmarkingFilename :: String -> IO String
generateBenchmarkingFilename s = do
b1 <- doesFileExist s'
b2 <- doesFileExist (addSuffix s')
if b1 || b2
then go s' 0
else return (addSuffix s')
where
go :: String -> Int -> IO String
go s_ i = do
let s_' = s_ ++ show i
b1 <- doesFileExist s_'
b2 <- doesFileExist (addSuffix s_')
if b1 || b2
then go s_ (i + 1)
else return (addSuffix s_')
addSuffix = (++ ".hs")
s' = takeDirectory s ++ "/Bench" ++ takeBaseName s
-- | Discover potential input files in the working directory.
discoverInputFiles :: IO [FilePath]
discoverInputFiles = filter ((== ".hs") . takeExtension) <$> getDirectoryContents "."
-- | Press any key to exit.
anyKeyExit :: IO ()
anyKeyExit = do
putStr "Press any key to exit... "
hFlush stdout
-- Want to use Haskeline here but can't.
-- https://github.com/judah/haskeline/issues/74
void getChar
printGoodbyeMessage
-- | Output @n@ line spaces to the command line.
spacer :: Int -> IO ()
spacer n = putStrLn (replicate (n - 1) '\n') >> hFlush stdout
-- | Output documents at a specific level of nesting using 'putStr'.
nestPutStr :: Int -> PP.Doc -> IO ()
nestPutStr n doc = putStr (PP.render $ PP.nest n doc) >> hFlush stdout
-- | Output documents at a specific level of nesting using 'putStrLn'.
nestPutStrLn :: Int -> PP.Doc -> IO ()
nestPutStrLn n doc = putStrLn (PP.render $ PP.nest n doc) >> hFlush stdout
|
-- An ATP axiom must be used with postulates or data constructors.
-- This error is detected by TypeChecking.Rules.Decl.
module ATPBadAxiom where
foo : Set β Set
foo A = A
{-# ATP axiom foo #-}
|
From Coqprime Require Import PocklingtonRefl.
Local Open Scope positive_scope.
Lemma primo114:
prime 2224424442783039323682545799822955619261761489->
prime 157702795295546355891797783928828805431555932027233.
Proof.
intro H.
apply (Pocklington_refl
(Ell_certif
157702795295546355891797783928828805431555932027233
70896
((2224424442783039323682545799822955619261761489,1)::nil)
0
2058000
280
4900)
((Proof_certif _ H) :: nil)).
native_cast_no_check (refl_equal true).
Time Qed.
|
/-
Copyright (c) 2021 Yourong Zang. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yourong Zang
-/
import analysis.normed_space.conformal_linear_map
import analysis.inner_product_space.basic
/-!
# Conformal maps between inner product spaces
In an inner product space, a map is conformal iff it preserves inner products up to a scalar factor.
-/
variables {E F : Type*}
variables [normed_add_comm_group E] [normed_add_comm_group F]
variables [inner_product_space β E] [inner_product_space β F]
open linear_isometry continuous_linear_map
open_locale real_inner_product_space
/-- A map between two inner product spaces is a conformal map if and only if it preserves inner
products up to a scalar factor, i.e., there exists a positive `c : β` such that `βͺf u, f vβ« = c *
βͺu, vβ«` for all `u`, `v`. -/
lemma is_conformal_map_iff (f : E βL[β] F) :
is_conformal_map f β β (c : β), 0 < c β§ β (u v : E), βͺf u, f vβ« = c * βͺu, vβ« :=
begin
split,
{ rintros β¨cβ, hcβ, li, rflβ©,
refine β¨cβ * cβ, mul_self_pos.2 hcβ, Ξ» u v, _β©,
simp only [real_inner_smul_left, real_inner_smul_right, mul_assoc, coe_smul',
coe_to_continuous_linear_map, pi.smul_apply, inner_map_map] },
{ rintros β¨cβ, hcβ, huvβ©,
obtain β¨c, hc, rflβ© : β c : β, 0 < c β§ cβ = c * c,
from β¨real.sqrt cβ, real.sqrt_pos.2 hcβ, (real.mul_self_sqrt hcβ.le).symmβ©,
refine β¨c, hc.ne', (cβ»ΒΉ β’ f : E ββ[β] F).isometry_of_inner (Ξ» u v, _), _β©,
{ simp only [real_inner_smul_left, real_inner_smul_right, huv, mul_assoc, coe_smul,
inv_mul_cancel_leftβ hc.ne', linear_map.smul_apply, continuous_linear_map.coe_coe] },
{ ext1 x,
exact (smul_inv_smulβ hc.ne' (f x)).symm } }
end
|
import tactic
import data.nat.basic
open nat
lemma div_3_le_9 (x : β) (h9 : x β€ 9) (h3 : 3 β£ x) : x = 0 β¨ x = 3 β¨ x = 6 β¨ x = 9 :=
begin
cases h3 with k h3k,
have : k β€ 3 := by linarith,
interval_cases k; dec_trivial!,
end
|
example (P : Prop) : β x β (β
: set β), P :=
begin
intro x,
intro hx,
cases hx,
end
example (P : Prop) : β x β (β
: set β), P :=
begin
intros x hx, cases hx,
end
|
The recent ruling makes important points regarding the share-class selection, the timing of fund changes and comparisons to the S&P 500 index.
Some advisors are fired because they are not helping the plan Sponsor with Fiduciary issues.
Is it beneficial to periodically re-enroll the 401(k) plan? |
Nominees for the 31st Grammy Awards were the Hampton String Quartet for " Get a Job " , George Harrison for " When We Was Fab " , the American rock band Talking Heads for Storytelling Giant , " Weird Al " Yankovic for " Fat " , and Neil Young for " This Note 's for You " . " Get a Job " , a song recorded originally by the American group The Silhouettes , appears on the Hampton String Quartet 's album What If Mozart Wrote " Roll Over Beethoven " , a collection of 1950s R & B and pop music songs performed in the styles of Beethoven , Debussy , Mozart , and other composers . " When We Was Fab " , a song from the album Cloud Nine , is constructed from quotations written when The Beatles were at the height of their fame and features Harrison playing a sitar . The music video shows Elton John dressed as a walrus , a reference to the 1967 song " I Am the Walrus " . Storytelling Giants is a collection of Talking Heads ' music videos and additional material linking them together . Two of the nominated music videos had connections to Michael Jackson ; " Fat " is a parody of Jackson 's song " Bad " , and the video for " This Note 's for You " depicts a Jackson look @-@ alike 's hair catching fire ; a parody of an incident that occurred during a shoot for a Pepsi television advertisement in 1984 . In the " Fat " video , Yankovic becomes a " grossly overweight guy " through the use of cosmetics and special effects , and leads a group of overweight people on a parade . The award was presented to Yankovic as the performing artist , along with Jay Levey as the video director and Susan <unk> as the video producer .
|
/-
Copyright (c) 2021 Ashvni Narayanan. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Ashvni Narayanan
-/
import data.polynomial.algebra_map
import data.nat.choose.cast
import number_theory.bernoulli
/-!
# Bernoulli polynomials
The Bernoulli polynomials (defined here : https://en.wikipedia.org/wiki/Bernoulli_polynomials)
are an important tool obtained from Bernoulli numbers.
## Mathematical overview
The $n$-th Bernoulli polynomial is defined as
$$ B_n(X) = β_{k = 0}^n {n \choose k} (-1)^k * B_k * X^{n - k} $$
where $B_k$ is the $k$-th Bernoulli number. The Bernoulli polynomials are generating functions,
$$ t * e^{tX} / (e^t - 1) = β_{n = 0}^{\infty} B_n(X) * \frac{t^n}{n!} $$
## Implementation detail
Bernoulli polynomials are defined using `bernoulli`, the Bernoulli numbers.
## Main theorems
- `sum_bernoulli`: The sum of the $k^\mathrm{th}$ Bernoulli polynomial with binomial
coefficients up to n is `(n + 1) * X^n`.
- `bernoulli_generating_function`: The Bernoulli polynomials act as generating functions
for the exponential.
## TODO
- `bernoulli_eval_one_neg` : $$ B_n(1 - x) = (-1)^n*B_n(x) $$
-/
noncomputable theory
open_locale big_operators
open_locale nat polynomial
open nat finset
namespace polynomial
/-- The Bernoulli polynomials are defined in terms of the negative Bernoulli numbers. -/
def bernoulli (n : β) : β[X] :=
β i in range (n + 1), polynomial.monomial (n - i) ((_root_.bernoulli i) * (choose n i))
lemma bernoulli_def (n : β) : bernoulli n =
β i in range (n + 1), polynomial.monomial i ((_root_.bernoulli (n - i)) * (choose n i)) :=
begin
rw [βsum_range_reflect, add_succ_sub_one, add_zero, bernoulli],
apply sum_congr rfl,
rintros x hx,
rw mem_range_succ_iff at hx, rw [choose_symm hx, tsub_tsub_cancel_of_le hx],
end
/-
### examples
-/
section examples
@[simp] lemma bernoulli_zero : bernoulli 0 = 1 :=
by simp [bernoulli]
@[simp] lemma bernoulli_eval_zero (n : β) : (bernoulli n).eval 0 = _root_.bernoulli n :=
begin
rw [bernoulli, polynomial.eval_finset_sum, sum_range_succ],
have : β (x : β) in range n, _root_.bernoulli x * (n.choose x) * 0 ^ (n - x) = 0,
{ apply sum_eq_zero (Ξ» x hx, _),
have h : 0 < n - x := tsub_pos_of_lt (mem_range.1 hx),
simp [h] },
simp [this],
end
@[simp] lemma bernoulli_eval_one (n : β) : (bernoulli n).eval 1 = _root_.bernoulli' n :=
begin
simp only [bernoulli, polynomial.eval_finset_sum],
simp only [βsucc_eq_add_one, sum_range_succ, mul_one, cast_one, choose_self,
(_root_.bernoulli _).mul_comm, sum_bernoulli, one_pow, mul_one, polynomial.eval_C,
polynomial.eval_monomial],
by_cases h : n = 1,
{ norm_num [h], },
{ simp [h],
exact bernoulli_eq_bernoulli'_of_ne_one h, }
end
end examples
@[simp] theorem sum_bernoulli (n : β) :
β k in range (n + 1), ((n + 1).choose k : β) β’ bernoulli k =
polynomial.monomial n (n + 1 : β) :=
begin
simp_rw [bernoulli_def, finset.smul_sum, finset.range_eq_Ico, βfinset.sum_Ico_Ico_comm,
finset.sum_Ico_eq_sum_range],
simp only [cast_succ, add_tsub_cancel_left, tsub_zero, zero_add, linear_map.map_add],
simp_rw [polynomial.smul_monomial, mul_comm (_root_.bernoulli _) _, smul_eq_mul, βmul_assoc],
conv_lhs { apply_congr, skip, conv
{ apply_congr, skip,
rw [β nat.cast_mul, choose_mul ((le_tsub_iff_left $ mem_range_le H).1
$ mem_range_le H_1) (le.intro rfl), nat.cast_mul, add_comm x x_1, add_tsub_cancel_right,
mul_assoc, mul_comm, βsmul_eq_mul, βpolynomial.smul_monomial] },
rw [βsum_smul], },
rw [sum_range_succ_comm],
simp only [add_right_eq_self, cast_succ, mul_one, cast_one, cast_add, add_tsub_cancel_left,
choose_succ_self_right, one_smul, _root_.bernoulli_zero, sum_singleton, zero_add,
linear_map.map_add, range_one],
apply sum_eq_zero (Ξ» x hx, _),
have f : β x β range n, Β¬ n + 1 - x = 1,
{ rintros x H, rw [mem_range] at H,
rw [eq_comm],
exact ne_of_lt (nat.lt_of_lt_of_le one_lt_two (le_tsub_of_add_le_left (succ_le_succ H))) },
rw [sum_bernoulli],
have g : (ite (n + 1 - x = 1) (1 : β) 0) = 0,
{ simp only [ite_eq_right_iff, one_ne_zero],
intro hβ,
exact (f x hx) hβ, },
rw [g, zero_smul],
end
open power_series
variables {A : Type*} [comm_ring A] [algebra β A]
-- TODO: define exponential generating functions, and use them here
-- This name should probably be updated afterwards
/-- The theorem that `β Bβ(t)X^n/n!)(e^X-1)=Xe^{tX}` -/
theorem bernoulli_generating_function (t : A) :
mk (Ξ» n, aeval t ((1 / n! : β) β’ bernoulli n)) * (exp A - 1) =
power_series.X * rescale t (exp A) :=
begin
-- check equality of power series by checking coefficients of X^n
ext n,
-- n = 0 case solved by `simp`
cases n, { simp },
-- n β₯ 1, the coefficients is a sum to n+2, so use `sum_range_succ` to write as
-- last term plus sum to n+1
rw [coeff_succ_X_mul, coeff_rescale, coeff_exp, power_series.coeff_mul,
nat.sum_antidiagonal_eq_sum_range_succ_mk, sum_range_succ],
-- last term is zero so kill with `add_zero`
simp only [ring_hom.map_sub, tsub_self, constant_coeff_one, constant_coeff_exp,
coeff_zero_eq_constant_coeff, mul_zero, sub_self, add_zero],
-- Let's multiply both sides by (n+1)! (OK because it's a unit)
set u : units β := β¨(n+1)!, (n+1)!β»ΒΉ,
mul_inv_cancel (by exact_mod_cast factorial_ne_zero (n+1)),
inv_mul_cancel (by exact_mod_cast factorial_ne_zero (n+1))β© with hu,
rw βunits.mul_right_inj (units.map (algebra_map β A).to_monoid_hom u),
-- now tidy up unit mess and generally do trivial rearrangements
-- to make RHS (n+1)*t^n
rw [units.coe_map, mul_left_comm, ring_hom.to_monoid_hom_eq_coe,
ring_hom.coe_monoid_hom, βring_hom.map_mul, hu, units.coe_mk],
change _ = t^n * algebra_map β A (((n+1)*n! : β)*(1/n!)),
rw [cast_mul, mul_assoc, mul_one_div_cancel
(show (n! : β) β 0, from cast_ne_zero.2 (factorial_ne_zero n)), mul_one, mul_comm (t^n),
β polynomial.aeval_monomial, cast_add, cast_one],
-- But this is the RHS of `sum_bernoulli_poly`
rw [β sum_bernoulli, finset.mul_sum, alg_hom.map_sum],
-- and now we have to prove a sum is a sum, but all the terms are equal.
apply finset.sum_congr rfl,
-- The rest is just trivialities, hampered by the fact that we're coercing
-- factorials and binomial coefficients between β and β and A.
intros i hi,
-- deal with coefficients of e^X-1
simp only [nat.cast_choose β (mem_range_le hi), coeff_mk,
if_neg (mem_range_sub_ne_zero hi), one_div, alg_hom.map_smul, power_series.coeff_one,
units.coe_mk, coeff_exp, sub_zero, linear_map.map_sub, algebra.smul_mul_assoc, algebra.smul_def,
mul_right_comm _ ((aeval t) _), βmul_assoc, β ring_hom.map_mul, succ_eq_add_one],
-- finally cancel the Bernoulli polynomial and the algebra_map
congr',
apply congr_arg,
rw [mul_assoc, div_eq_mul_inv, β mul_invβ],
end
end polynomial
|
# using Base.Test
# using MIToS.Utils
print("""
Test eachline for ASCIIString
=============================
""")
let example = ""
@test_throws ErrorException eachline(example)
end
let example = "Hola"
@test collect(eachline(example)) == ["Hola"]
end
let example = "Hola\n"
@test collect(eachline(example)) == ["Hola\n"]
end
let example = "\n"
@test collect(eachline(example)) == ["\n"]
end
let example = "Hola\nMundo"
@test collect(eachline(example)) == ["Hola\n", "Mundo"]
end
let example = "Hola\nMundo\n"
@test collect(eachline(example)) == ["Hola\n", "Mundo\n"]
end
let example = "Hola\nMundo\n\n"
@test collect(eachline(example)) == ["Hola\n", "Mundo\n", "\n"]
end
|
Formal statement is: lemma fold_coeffs_0_eq [simp]: "fold_coeffs f 0 = id" Informal statement is: The fold function over the coefficients of a polynomial with zero as the initial value is the identity function. |
import LMT
variable {I} [Nonempty I] {E} [Nonempty E] [Nonempty (A I E)]
example {a1 a2 a3 : A I E} :
(v2) β ((((a1).write i2 (v2)).write i3 (v2)).read i2) β False := by
arr
|
Require Import String Omega List FunctionalExtensionality Ensembles
Computation ADT ADTRefinement ADTNotation QueryStructureSchema
BuildADTRefinements QueryStructure
QuerySpecs.QueryQSSpecs QuerySpecs.InsertQSSpecs EnsembleListEquivalence
ConstraintChecksRefinements.
(* Facts about implements insert operations. *)
Section InsertRefinements.
Hint Resolve AC_eq_nth_In AC_eq_nth_NIn crossConstr.
Hint Unfold SatisfiesCrossRelationConstraints
SatisfiesSchemaConstraints.
Arguments GetUnConstrRelation : simpl never.
Arguments UpdateUnConstrRelation : simpl never.
Arguments replace_BoundedIndex : simpl never.
Arguments BuildQueryStructureConstraints : simpl never.
Arguments BuildQueryStructureConstraints' : simpl never.
Program
Definition Insert_Valid
(qsSchema : QueryStructureSchema)
(qs : QueryStructure qsSchema)
(Ridx : _)
(tup : @IndexedTuple (QSGetNRelSchemaHeading qsSchema Ridx))
(schConstr : forall tup',
GetRelation qs Ridx tup' ->
SatisfiesSchemaConstraints Ridx tup tup')
(schConstr' : forall tup',
GetRelation qs Ridx tup' ->
SatisfiesSchemaConstraints Ridx tup' tup)
(schConstr_self :
@SatisfiesSchemaConstraints qsSchema Ridx tup tup)
(qsConstr : forall Ridx',
SatisfiesCrossRelationConstraints Ridx Ridx' tup (GetRelation qs Ridx'))
(qsConstr' : forall Ridx',
Ridx' <> Ridx ->
forall tup',
GetRelation qs Ridx' tup'
-> SatisfiesCrossRelationConstraints
Ridx' Ridx tup'
(EnsembleInsert tup (GetRelation qs Ridx)))
: QueryStructure qsSchema :=
{| rels :=
UpdateRelation _ (rels qs) Ridx {| rel := EnsembleInsert tup (GetRelation qs Ridx)|}
|}.
Next Obligation.
unfold GetRelation.
unfold SatisfiesSchemaConstraints, QSGetNRelSchema, GetNRelSchema,
GetRelation in *.
set ((ith_Bounded _ (rels qs) Ridx )) as X in *; destruct X; simpl in *.
destruct (schemaConstraints
(relSchema (nth_Bounded relName (qschemaSchemas qsSchema) Ridx))); eauto.
unfold EnsembleInsert in *; simpl in *; intuition; subst; eauto.
Defined.
Next Obligation.
caseEq (BuildQueryStructureConstraints qsSchema idx idx'); eauto.
unfold SatisfiesCrossRelationConstraints, UpdateRelation in *;
destruct (BoundedString_eq_dec Ridx idx'); subst.
- rewrite ith_replace_BoundIndex_eq; simpl.
rewrite ith_replace_BoundIndex_neq in H1; eauto using string_dec.
generalize (qsConstr' idx H0 _ H1); rewrite H; eauto.
- rewrite ith_replace_BoundIndex_neq in *; eauto using string_dec.
destruct (BoundedString_eq_dec Ridx idx); subst.
+ rewrite ith_replace_BoundIndex_eq in H1; simpl in *; eauto.
unfold EnsembleInsert in H1; destruct H1; subst; eauto.
* generalize (qsConstr idx'); rewrite H; eauto.
* pose proof (crossConstr qs idx idx') as X; rewrite H in X; eauto.
+ rewrite ith_replace_BoundIndex_neq in H1; eauto using string_dec.
pose proof (crossConstr qs idx idx') as X; rewrite H in X; eauto.
Qed.
Lemma QSInsertSpec_refine' :
forall qsSchema (qs : QueryStructure qsSchema) Ridx tup default,
refine
(Pick (QSInsertSpec {| qsHint := qs |} Ridx tup))
(schConstr_self <-
{b |
decides b
(SatisfiesSchemaConstraints Ridx tup tup)};
schConstr <-
{b |
decides
b
(forall tup',
GetRelation qs Ridx tup'
-> SatisfiesSchemaConstraints Ridx tup tup')};
schConstr' <-
{b |
decides
b
(forall tup',
GetRelation qs Ridx tup'
-> SatisfiesSchemaConstraints Ridx tup' tup)};
qsConstr <- {b | decides b
(forall Ridx', SatisfiesCrossRelationConstraints Ridx Ridx' tup (GetRelation qs Ridx'))};
qsConstr' <- {b | decides
b
(forall Ridx',
Ridx' <> Ridx ->
forall tup',
(GetRelation qs Ridx') tup'
-> SatisfiesCrossRelationConstraints
Ridx' Ridx tup'
(EnsembleInsert tup (GetRelation qs Ridx)))};
match schConstr_self, schConstr, schConstr', qsConstr, qsConstr' with
| true, true, true, true, true =>
{qs' |
(forall Ridx',
Ridx <> Ridx' ->
GetRelation qsHint Ridx' =
GetRelation qs' Ridx')
/\ forall t,
GetRelation qs' Ridx t <->
(EnsembleInsert tup (GetRelation qsHint Ridx) t)
}
| _, _ , _, _, _ => default
end).
Proof.
intros qsSchema qs Ridx tup default v Comp_v.
do 5 (apply_in_hyp computes_to_inv; destruct_ex; split_and);
destruct x;
[ destruct x0;
[ destruct x1;
[ destruct x2;
[ destruct x3;
[ repeat (apply_in_hyp computes_to_inv; destruct_ex; split_and); simpl in *;
econstructor; unfold QSInsertSpec; eauto |
]
| ]
| ]
| ]
| ];
cbv delta [decides] beta in *; simpl in *;
repeat (apply_in_hyp computes_to_inv; destruct_ex); eauto;
econstructor; unfold QSInsertSpec; intros;
solve [elimtype False; intuition].
Qed.
Lemma QSInsertSpec_refine :
forall qsSchema (qs : QueryStructure qsSchema) Ridx tup default,
refine
(Pick (QSInsertSpec {| qsHint := qs |} Ridx tup))
(schConstr_self <- {b | decides b
(SatisfiesSchemaConstraints Ridx tup tup)};
schConstr <-
{b |
decides
b
(forall tup',
GetRelation qs Ridx tup'
-> SatisfiesSchemaConstraints Ridx tup tup')};
schConstr' <-
{b |
decides
b
(forall tup',
GetRelation qs Ridx tup'
-> SatisfiesSchemaConstraints Ridx tup' tup)};
qsConstr <- (@Iterate_Decide_Comp _
(fun Ridx' =>
SatisfiesCrossRelationConstraints
Ridx Ridx' tup
(GetRelation qsHint Ridx')));
qsConstr' <- (@Iterate_Decide_Comp _
(fun Ridx' =>
Ridx' <> Ridx
-> forall tup',
(GetRelation qsHint Ridx') tup'
-> SatisfiesCrossRelationConstraints
Ridx' Ridx tup'
(EnsembleInsert tup (GetRelation qs Ridx))));
match schConstr_self, schConstr, schConstr', qsConstr, qsConstr' with
| true, true, true, true, true =>
{qs' |
(forall Ridx',
Ridx <> Ridx' ->
GetRelation qsHint Ridx' =
GetRelation qs' Ridx')
/\ forall t,
GetRelation qs' Ridx t <->
(EnsembleInsert tup (GetRelation qsHint Ridx) t)
}
| _, _, _, _, _ => default
end).
Proof.
intros.
rewrite QSInsertSpec_refine'; f_equiv.
unfold pointwise_relation; intros.
setoid_rewrite Iterate_Decide_Comp_BoundedIndex; f_equiv.
Qed.
Lemma QSInsertSpec_UnConstr_refine' :
forall qsSchema (qs : UnConstrQueryStructure qsSchema)
(Ridx : @BoundedString (map relName (qschemaSchemas qsSchema)))
(tup : @IndexedTuple (schemaHeading (QSGetNRelSchema qsSchema Ridx)))
(or : QueryStructure qsSchema)
(NIntup : ~ GetUnConstrRelation qs Ridx tup),
DropQSConstraints_AbsR or qs ->
refine
(or' <- (qs' <- Pick (QSInsertSpec {| qsHint := or |} Ridx tup);
b <- Pick (SuccessfulInsertSpec {| qsHint := or |} Ridx qs' tup);
ret (qs', b));
nr' <- {nr' | DropQSConstraints_AbsR (fst or') nr'};
ret (nr', snd or'))
(schConstr_self <- {b | decides b (SatisfiesSchemaConstraints Ridx tup tup)};
schConstr <-
{b | decides b
(forall tup',
GetUnConstrRelation qs Ridx tup'
-> SatisfiesSchemaConstraints Ridx tup tup')};
schConstr' <-
{b |
decides
b
(forall tup',
GetUnConstrRelation qs Ridx tup'
-> SatisfiesSchemaConstraints Ridx tup' tup)};
qsConstr <- (@Iterate_Decide_Comp _
(fun Ridx' =>
SatisfiesCrossRelationConstraints
Ridx Ridx' tup
(GetUnConstrRelation qs Ridx')));
qsConstr' <- (@Iterate_Decide_Comp _
(fun Ridx' =>
Ridx' <> Ridx
-> forall tup',
(GetUnConstrRelation qs Ridx') tup'
-> SatisfiesCrossRelationConstraints
Ridx' Ridx tup'
(EnsembleInsert tup (GetUnConstrRelation qs Ridx))));
ret match schConstr_self, schConstr, schConstr', qsConstr, qsConstr' with
| true, true, true, true, true =>
(UpdateUnConstrRelation qs Ridx (EnsembleInsert tup (GetUnConstrRelation qs Ridx)), true)
| _, _, _, _, _ => (qs, false)
end).
Proof.
intros.
setoid_rewrite refineEquiv_pick_eq'.
unfold DropQSConstraints_AbsR in *; intros; subst.
rewrite QSInsertSpec_refine with (default := ret or).
unfold refine; intros; subst.
do 5 (apply_in_hyp computes_to_inv; destruct_ex; split_and).
repeat rewrite GetRelDropConstraints in *.
(* These assert are gross. Need to eliminate them. *)
assert ((fun Ridx' : BoundedIndex (map relName (qschemaSchemas qsSchema)) =>
SatisfiesCrossRelationConstraints Ridx Ridx' tup
(GetUnConstrRelation (DropQSConstraints or) Ridx')) =
(fun Ridx' : BoundedIndex (map relName (qschemaSchemas qsSchema)) =>
SatisfiesCrossRelationConstraints Ridx Ridx' tup
(GetRelation or Ridx'))) as rewriteSat
by (apply functional_extensionality; intros; rewrite GetRelDropConstraints;
reflexivity); rewrite rewriteSat in H3; clear rewriteSat.
assert ((fun Ridx' : BoundedIndex (map relName (qschemaSchemas qsSchema)) =>
Ridx' <> Ridx ->
forall
tup' : @IndexedTuple
(schemaHeading
(relSchema
(nth_Bounded relName (qschemaSchemas qsSchema)
Ridx'))),
GetUnConstrRelation (DropQSConstraints or) Ridx' tup' ->
SatisfiesCrossRelationConstraints Ridx' Ridx tup'
(EnsembleInsert tup (GetRelation or Ridx))) =
(fun Ridx' : BoundedIndex (map relName (qschemaSchemas qsSchema)) =>
Ridx' <> Ridx ->
forall
tup' : @IndexedTuple
(schemaHeading
(relSchema
(nth_Bounded relName (qschemaSchemas qsSchema) Ridx'))),
GetRelation or Ridx' tup' ->
SatisfiesCrossRelationConstraints Ridx' Ridx tup'
(EnsembleInsert tup (GetRelation or Ridx))))
as rewriteSat
by (apply functional_extensionality; intros; rewrite GetRelDropConstraints;
reflexivity); rewrite rewriteSat in H4; clear rewriteSat.
(* Resume not-terribleness *)
generalize (Iterate_Decide_Comp_BoundedIndex _ _ _ H3) as H3';
generalize (Iterate_Decide_Comp_BoundedIndex _ _ _ H4) as H4'; intros.
revert H3 H4.
repeat apply_in_hyp computes_to_inv.
econstructor 2 with
(comp_a_value := match x as x', x0 as x0', x1 as x1', x2 as x2', x3 as x3'
return decides x' _ ->
decides x0' _ ->
decides x1' _ ->
decides x2' _ ->
decides x3' _ -> _
with
| true, true, true, true, true =>
fun H H0 H1 H2 H3 => (@Insert_Valid _ or Ridx tup H0 H1 H H2 H3, true)
| _, _, _, _, _ => fun _ _ _ _ _ => (or, false)
end H0 H1 H2 H3' H4').
econstructor 2 with (comp_a_value := match x as x', x0 as x0', x1 as x1', x2 as x2', x3 as x3'
return decides x' _ ->
decides x0' _ ->
decides x1' _ ->
decides x2' _ ->
decides x3' _ -> _
with
| true, true, true, true, true =>
fun H H0 H1 H2 H3 => @Insert_Valid _ or Ridx tup H0 H1 H H2 H3
| _, _, _, _, _ => fun _ _ _ _ _ => or
end H0 H1 H2 H3' H4').
repeat (econstructor; eauto).
repeat find_if_inside; try econstructor; simpl in *.
unfold GetRelation, Insert_Valid, UpdateUnConstrRelation,
UpdateRelation, EnsembleInsert ; simpl; split; intros; eauto.
rewrite ith_replace_BoundIndex_neq; eauto using string_dec; simpl.
rewrite ith_replace_BoundIndex_eq; unfold EnsembleInsert, GetRelation;
simpl; intuition.
econstructor.
econstructor 3 with (v := match x as x', x0 as x0', x1 as x1', x2 as x2', x3 as x3'
return decides x' _ ->
decides x0' _ ->
decides x1' _ ->
decides x2' _ ->
decides x3' _ -> _
with
| true, true, true, true, true =>
fun H H0 H1 H2 H3 => true
| _, _, _, _, _ => fun _ _ _ _ _ => false
end H0 H1 H2 H3' H4').
repeat find_if_inside; simpl;
try (solve [unfold not; let H := fresh in intros H; eapply NIntup; eapply H;
unfold EnsembleInsert; eauto]).
intros; rewrite <- GetRelDropConstraints.
unfold Insert_Valid, GetUnConstrRelation, DropQSConstraints,
UpdateRelation; simpl.
rewrite <- ith_Bounded_imap, ith_replace_BoundIndex_eq; simpl;
tauto.
repeat find_if_inside; subst; repeat econstructor.
simpl.
repeat find_if_inside; subst; repeat econstructor.
unfold DropQSConstraints, Insert_Valid, EnsembleInsert; simpl.
unfold GetRelation, Insert_Valid, UpdateUnConstrRelation,
UpdateRelation; rewrite imap_replace_BoundedIndex; simpl; eauto using string_dec.
Qed.
Lemma freshIdx2UnConstr {qsSchema} qs Ridx
: refine {bound | forall tup : IndexedTuple,
@GetUnConstrRelation qsSchema qs Ridx tup ->
tupleIndex tup <> bound}
{bound | UnConstrFreshIdx (GetUnConstrRelation qs Ridx) bound}.
Proof.
unfold UnConstrFreshIdx; intros v Comp_v; econstructor.
inversion_by computes_to_inv; intros.
apply Comp_v in H; omega.
Qed.
Lemma QSInsertSpec_UnConstr_refine :
forall qsSchema (qs : UnConstrQueryStructure qsSchema )
(Ridx : @BoundedString (map relName (qschemaSchemas qsSchema)))
(tup : @Tuple (schemaHeading (QSGetNRelSchema qsSchema Ridx)))
(or : QueryStructure qsSchema)
refined_schConstr_self refined_schConstr refined_schConstr'
refined_qsConstr refined_qsConstr',
refine {b | decides b (SatisfiesSchemaConstraints Ridx tup tup)}
refined_schConstr_self
-> refine {b | decides b
(forall tup',
GetUnConstrRelation qs Ridx tup'
-> SatisfiesSchemaConstraints Ridx tup tup')}
refined_schConstr
-> refine
{b |
decides
b
(forall tup',
GetUnConstrRelation qs Ridx tup'
-> SatisfiesSchemaConstraints Ridx tup' tup)}
refined_schConstr'
-> refine
(@Iterate_Decide_Comp _
(fun Ridx' =>
SatisfiesCrossRelationConstraints
Ridx Ridx' tup
(GetUnConstrRelation qs Ridx')))
refined_qsConstr
-> (forall idx,
refine
(@Iterate_Decide_Comp _
(fun Ridx' =>
Ridx' <> Ridx
-> forall tup',
(GetUnConstrRelation qs Ridx') tup'
-> SatisfiesCrossRelationConstraints
Ridx' Ridx tup'
(EnsembleInsert
{| tupleIndex := idx;
indexedTuple := tup |}
(GetUnConstrRelation qs Ridx))))
(refined_qsConstr' idx))
-> DropQSConstraints_AbsR or qs ->
refine
(or' <- (idx <- Pick (freshIdx {| qsHint := or |} Ridx);
qs' <- Pick (QSInsertSpec {| qsHint := or |} Ridx
{| tupleIndex := idx;
indexedTuple := tup |});
b <- Pick (SuccessfulInsertSpec {| qsHint := or |} Ridx qs'
{| tupleIndex := idx;
indexedTuple := tup |});
ret (qs', b));
nr' <- {nr' | DropQSConstraints_AbsR (fst or') nr'};
ret (nr', snd or'))
(idx <- {idx | UnConstrFreshIdx (GetUnConstrRelation qs Ridx) idx};
(schConstr_self <- refined_schConstr_self;
schConstr <- refined_schConstr;
schConstr' <- refined_schConstr';
qsConstr <- refined_qsConstr ;
qsConstr' <- (refined_qsConstr' idx);
ret match schConstr_self, schConstr, schConstr', qsConstr, qsConstr' with
| true, true, true, true, true =>
(UpdateUnConstrRelation qs Ridx
(EnsembleInsert
{| tupleIndex := idx;
indexedTuple := tup |}
(GetUnConstrRelation qs Ridx)), true)
| _, _, _, _, _ => (qs, false)
end)).
Proof.
intros.
simplify with monad laws.
unfold freshIdx.
simpl.
rewrite <- GetRelDropConstraints.
unfold DropQSConstraints_AbsR in *; subst.
rewrite freshIdx2UnConstr.
apply refine_bind_pick; intros.
setoid_rewrite <- H; setoid_rewrite <- H0; setoid_rewrite <- H1;
setoid_rewrite <- H2; setoid_rewrite <- (H3 a).
setoid_rewrite <- (QSInsertSpec_UnConstr_refine' _ {| tupleIndex := a; indexedTuple := tup |}).
repeat setoid_rewrite refineEquiv_bind_bind.
setoid_rewrite refineEquiv_bind_unit; simpl.
unfold DropQSConstraints_AbsR in *; subst.
f_equiv; intros.
unfold UnConstrFreshIdx, not in *; intros.
apply H4 in H5; simpl in *; omega.
reflexivity.
Qed.
Lemma refine_SatisfiesCrossConstraints'
: forall qsSchema qs
(Ridx : @BoundedString (map relName (qschemaSchemas qsSchema)))
(tup : @Tuple (schemaHeading (QSGetNRelSchema qsSchema Ridx))),
forall idx,
refine
(@Iterate_Decide_Comp _
(fun Ridx' =>
Ridx' <> Ridx
-> forall tup',
(GetUnConstrRelation qs Ridx') tup'
-> SatisfiesCrossRelationConstraints
Ridx' Ridx tup'
(EnsembleInsert
{| tupleIndex := idx;
indexedTuple := tup |}
(GetUnConstrRelation qs Ridx))))
(@Iterate_Decide_Comp_opt' _ _ []
(fun Ridx' =>
if (BoundedString_eq_dec Ridx Ridx') then
None
else
match (BuildQueryStructureConstraints qsSchema Ridx' Ridx) with
| Some CrossConstr =>
Some (
forall tup',
(GetUnConstrRelation qs Ridx') tup'
-> CrossConstr (indexedTuple tup') (
(EnsembleInsert
{| tupleIndex := idx;
indexedTuple := tup |}
(GetUnConstrRelation qs Ridx))))
| None => None
end)).
Proof.
intros.
setoid_rewrite <- refine_Iterate_Decide_Comp.
unfold SatisfiesCrossRelationConstraints.
apply refine_Iterate_Decide_Comp_equiv; simpl; intros.
apply string_dec.
destruct (BoundedString_eq_dec Ridx idx0); subst.
congruence.
destruct (BuildQueryStructureConstraints qsSchema idx0 Ridx); eauto.
intro; eapply H.
destruct (BoundedString_eq_dec Ridx idx0); subst; eauto.
destruct (BuildQueryStructureConstraints qsSchema idx0 Ridx); eauto.
Qed.
Local Transparent QSInsert.
Lemma QSInsertSpec_UnConstr_refine_opt :
forall qsSchema (qs : UnConstrQueryStructure qsSchema )
(Ridx : @BoundedString (map relName (qschemaSchemas qsSchema)))
(tup : @Tuple (schemaHeading (QSGetNRelSchema qsSchema Ridx)))
(or : QueryStructure qsSchema),
DropQSConstraints_AbsR or qs ->
refine
(or' <- (idx <- Pick (freshIdx {| qsHint := or |} Ridx);
qs' <- Pick (QSInsertSpec {| qsHint := or |} Ridx
{| tupleIndex := idx;
indexedTuple := tup |});
b <- Pick (SuccessfulInsertSpec {| qsHint := or |} Ridx qs'
{| tupleIndex := idx;
indexedTuple := tup |});
ret (qs', b));
nr' <- {nr' | DropQSConstraints_AbsR (fst or') nr'};
ret (nr', snd or'))
match (schemaConstraints (QSGetNRelSchema qsSchema Ridx)) with
Some Constr =>
idx <- {idx | UnConstrFreshIdx (GetUnConstrRelation qs Ridx) idx} ;
(schConstr_self <- {b | decides b (Constr tup tup) };
schConstr <- {b | decides b
(forall tup',
GetUnConstrRelation qs Ridx tup'
-> Constr tup tup')};
schConstr' <- {b | decides b
(forall tup',
GetUnConstrRelation qs Ridx tup'
-> Constr tup' tup)};
qsConstr <- (@Iterate_Decide_Comp_opt' _ _ []
(fun Ridx' =>
match (BuildQueryStructureConstraints qsSchema Ridx Ridx') with
| Some CrossConstr =>
Some (CrossConstr tup (GetUnConstrRelation qs Ridx'))
| None => None
end));
qsConstr' <- (@Iterate_Decide_Comp_opt' _ _ []
(fun Ridx' =>
if (BoundedString_eq_dec Ridx Ridx') then
None
else
match (BuildQueryStructureConstraints qsSchema Ridx' Ridx) with
| Some CrossConstr =>
Some (
forall tup',
(GetUnConstrRelation qs Ridx') tup'
-> CrossConstr (indexedTuple tup') (
(EnsembleInsert
{| tupleIndex := idx;
indexedTuple := tup |}
(GetUnConstrRelation qs Ridx))))
| None => None
end));
ret match schConstr_self, schConstr, schConstr', qsConstr, qsConstr' with
| true, true, true, true, true =>
(UpdateUnConstrRelation qs Ridx
(EnsembleInsert
{| tupleIndex := idx;
indexedTuple := tup |}
(GetUnConstrRelation qs Ridx)), true)
| _, _, _, _, _ => (qs, false)
end)
| None =>
idx <- {idx | UnConstrFreshIdx (GetUnConstrRelation qs Ridx) idx};
(qsConstr <- (@Iterate_Decide_Comp_opt' _ _ []
(fun Ridx' =>
match (BuildQueryStructureConstraints qsSchema Ridx Ridx') with
| Some CrossConstr =>
Some (CrossConstr tup (GetUnConstrRelation qs Ridx'))
| None => None
end));
qsConstr' <- (@Iterate_Decide_Comp_opt' _ _ []
(fun Ridx' =>
if (BoundedString_eq_dec Ridx Ridx') then
None
else
match (BuildQueryStructureConstraints qsSchema Ridx' Ridx) with
| Some CrossConstr =>
Some (
forall tup',
(GetUnConstrRelation qs Ridx') tup'
-> CrossConstr (indexedTuple tup') (
(EnsembleInsert
{| tupleIndex := idx;
indexedTuple := tup |}
(GetUnConstrRelation qs Ridx))))
| None => None
end));
ret match qsConstr, qsConstr' with
| true, true =>
(UpdateUnConstrRelation qs Ridx
(EnsembleInsert
{| tupleIndex := idx;
indexedTuple := tup |}
(GetUnConstrRelation qs Ridx)), true)
| _, _ => (qs, false)
end)
end.
unfold QSInsert.
intros; rewrite QSInsertSpec_UnConstr_refine;
eauto using
refine_SatisfiesSchemaConstraints_self,
refine_SatisfiesSchemaConstraints,
refine_SatisfiesSchemaConstraints',
refine_SatisfiesCrossConstraints;
[
| intros; eapply refine_SatisfiesCrossConstraints'].
destruct (schemaConstraints (QSGetNRelSchema qsSchema Ridx)).
reflexivity.
f_equiv; unfold pointwise_relation; intros.
repeat setoid_rewrite refineEquiv_bind_bind.
repeat setoid_rewrite refineEquiv_bind_unit; f_equiv.
Qed.
End InsertRefinements.
(* When we insert a tuple into a relation which has another relation has
a foreign key into, we need to show that we haven't messed up any
references (which is, of course, trivial. We should bake this into
our the [QSInsertSpec_refine'] refinement itself by filtering out the
irrelevant constraints somehow, but for now we can use the following
tactic to rewrite them away. *)
Ltac remove_trivial_insertion_constraints :=
repeat match goal with
|- context[EnsembleInsert _ (GetUnConstrRelation _ _) ] =>
match goal with
AbsR : @DropQSConstraints_AbsR ?schm ?or ?nr
|- context [
Pick
(fun b =>
decides
b
(forall tup' : @IndexedTuple ?heading,
(@GetUnConstrRelation ?schm ?r ?Ridx) tup' ->
ForeignKey_P ?attr ?attr' ?tup_map
(indexedTuple tup')
(EnsembleInsert ?tup (GetUnConstrRelation ?r ?Ridx'))))] =>
let neq := fresh in
assert (Ridx <> Ridx') by (subst_strings; discriminate);
let refine_trivial := fresh in
assert (refine {b' |
decides b'
(forall tup' : IndexedTuple,
(GetUnConstrRelation r Ridx) tup' ->
@ForeignKey_P heading
(schemaHeading (GetNRelSchema (qschemaSchemas schm) Ridx'))
attr attr' tup_map
(indexedTuple tup')
(EnsembleInsert tup (GetUnConstrRelation r Ridx')))}
(ret true)) as refine_trivial;
[ unfold ForeignKey_P;
let v := fresh in
let Comp_v := fresh in
intros v Comp_v;
apply computes_to_inv in Comp_v;
rewrite <- AbsR; subst;
repeat rewrite GetRelDropConstraints;
let tup' := fresh in
let In_tup' := fresh in
econstructor; simpl map; simpl; intros tup' In_tup';
unfold EnsembleInsert;
let H' := fresh in
pose proof (@crossConstr _ or Ridx Ridx' tup' neq In_tup') as H';
simpl map in *; simpl in *;
destruct H' as [? [? ?]]; eauto
| subst_strings; setoid_rewrite refine_trivial;
clear refine_trivial;
pose_string_ids; simplify with monad laws
] end end.
Tactic Notation "remove" "trivial" "insertion" "checks" :=
(* Move all the binds we can outside the exists / computes
used for abstraction, stopping when we've rewritten
the bind in [QSInsertSpec]. *)
repeat rewrite refineEquiv_bind_bind;
etransitivity;
[ repeat (apply refine_bind;
[reflexivity
| match goal with
| |- context [Bind (Insert _ into _)%QuerySpec _] =>
unfold pointwise_relation; intros
end
] );
(* Pull out the relation we're inserting into and then
rewrite [QSInsertSpec] *)
match goal with
H : DropQSConstraints_AbsR _ ?r_n
|- context [(Insert ?n into ?R)%QuerySpec] =>
let H' := fresh in
(* If we try to eapply [QSInsertSpec_UnConstr_refine] directly
after we've drilled under a bind, this tactic will fail because
typeclass resolution breaks down. Generalizing and applying gets
around this problem for reasons unknown. *)
let H' := fresh in
pose proof (@QSInsertSpec_UnConstr_refine_opt
_ r_n {| bindex := R |} n _ H) as H';
apply H'
end
| cbv beta; simpl schemaConstraints; cbv iota;
simpl map; simpl app;
simpl relName in *; simpl schemaHeading in *;
pose_string_ids; simpl;
simplify with monad laws;
try rewrite <- GetRelDropConstraints;
repeat match goal with
| H : DropQSConstraints_AbsR ?qs ?uqs |- _ =>
rewrite H in *
end
].
Tactic Notation "Split" "Constraint" "Checks" :=
repeat (let b := match goal with
| [ |- context[if ?X then _ else _] ] => constr:(X)
| [ H : context[if ?X then _ else _] |- _ ]=> constr:(X)
end in
let b_eq := fresh in
eapply (@refine_if _ _ b); intros b_eq;
simpl in *; repeat rewrite b_eq; simpl).
Tactic Notation "implement" "failed" "insert" :=
repeat (rewrite refine_pick_val, refineEquiv_bind_unit; eauto);
reflexivity.
Tactic Notation "drop" "constraints" "from" "insert" constr(methname) :=
hone method methname;
[ remove trivial insertion checks;
(* The trivial insertion checks involve the fresh id,
so we need to drill under the binder before
attempting to remove them. *)
rewrite refine_bind;
[ | reflexivity |
unfold pointwise_relation; intros;
repeat remove_trivial_insertion_constraints;
higher_order_1_reflexivity ];
finish honing
| ].
|
{-# OPTIONS --omega-in-omega --no-termination-check --overlapping-instances #-}
module Light.Implementation where
module Data where
module Empty where open import Light.Implementation.Data.Empty public
module Either where open import Light.Implementation.Data.Either public
module Natural where open import Light.Implementation.Data.Natural public
module Unit where open import Light.Implementation.Data.Unit public
module Integer where open import Light.Implementation.Data.Integer public
module Boolean where open import Light.Implementation.Data.Boolean public
module Product where open import Light.Implementation.Data.Product public
module These where open import Light.Implementation.Data.These public
module Relation where
module Sets where open import Light.Implementation.Relation.Sets public
module Boolean where open import Light.Implementation.Relation.Boolean public
module Binary where
module Equality where
module Propositional where
open import Light.Implementation.Relation.Binary.Equality.Propositional public
module Decidable where open import Light.Implementation.Relation.Binary.Equality.Propositional.Decidable public
module Decidable where open import Light.Implementation.Relation.Decidable public
module Action where open import Light.Implementation.Action public
|
If $f(x)$ is $O(g(x))$ for all $x \in A$, then $\sum_{y \in A} f(y, x)$ is $O(g(x))$. |
section \<open> Programs with memory \<close>
theory utp_mem_prog
imports utp_mem_prelim
begin
text \<open> Heap allocation takes a lens $x$ within the store that holds an address, and an expression
$e$ over store variables whose return type is countable. The semantics of allocation selects
an arbitrary memory location not currently allocated in the heap, places the said address
in $x$, and injects the expression $e$ into the heap at this memory location. \<close>
definition
heap_alloc :: "(addr \<Longrightarrow> 's) \<Rightarrow> ('a::{countable,infinite}, 's) uexpr \<Rightarrow> 's mprog"
("_ := alloc'(_')" [74,0] 75)
where [upred_defs, ndes_simp]:
"x := alloc(e) =
(true \<turnstile>\<^sub>n (\<Sqinter> l \<bullet> ?[\<guillemotleft>l\<guillemotright> \<notin> dom(hp)] ;; str:x := \<guillemotleft>l\<guillemotright> ;; hp := &hp(&str:x \<mapsto> uop to_nat_bij (e \<oplus>\<^sub>p str))\<^sub>u))"
text \<open> Heap lookup retrieves data from the heap and places it into a store variable. If the memory
location $l$ is unallocated then an abort is the result. \<close>
definition
heap_lookup :: "('a::{countable,infinite} \<Longrightarrow> 's) \<Rightarrow> (addr, 's) uexpr \<Rightarrow> 's mprog"
("_ := *_" [74,75] 75)
where [upred_defs, ndes_simp]:
"x := *l = (((l \<oplus>\<^sub>p str) \<in>\<^sub>u dom\<^sub>u(&hp)) \<turnstile>\<^sub>n str:x := uop from_nat_bij (&hp(l \<oplus>\<^sub>p str)\<^sub>a))"
text \<open> Heap mutation updates the value of an already allocated address in the heap. \<close>
definition
heap_mutate :: "(addr, 's) uexpr \<Rightarrow> ('a::{countable,infinite}, 's) uexpr \<Rightarrow> 's mprog"
("*_ := _" [0, 74] 75)
where [upred_defs, ndes_simp]:
"*l := e = (((l \<oplus>\<^sub>p str) \<in>\<^sub>u dom\<^sub>u(&hp)) \<turnstile>\<^sub>n hp := &hp((l \<oplus>\<^sub>p str) \<mapsto> uop to_nat_bij (e \<oplus>\<^sub>p str))\<^sub>u)"
text \<open> Heap deallocation removes an area of memory from the heap. \<close>
definition
heap_dealloc :: "(addr, 's) uexpr \<Rightarrow> 's mprog" ("dealloc'(_')")
where [upred_defs, ndes_simp]:
"dealloc(l) = (U((l \<oplus>\<^sub>p str) \<in> fdom(&hp)) \<turnstile>\<^sub>n hp := (fdom(&hp) - {l \<oplus>\<^sub>p str}) \<lhd>\<^sub>f &hp)"
subsection \<open> Weakest Precondition Semantics \<close>
lemma wp_heap_alloc [wp]:
"\<lbrakk> vwb_lens x; x \<sharp> e \<rbrakk> \<Longrightarrow>
x := alloc(e) wp\<^sub>D p = (\<^bold>\<forall> l \<in> -dom\<^sub>u(&hp) \<bullet> p\<lbrakk>\<guillemotleft>l\<guillemotright>,&hp(\<guillemotleft>l\<guillemotright> \<mapsto> uop to_nat_bij (e \<oplus>\<^sub>p str))\<^sub>u/&str:x,&hp\<rbrakk>)"
by (simp add: ndes_simp wp usubst unrest, rel_auto)
lemma wp_heap_dealloc [wp]:
"dealloc(e) wp\<^sub>D p = U(e \<oplus>\<^sub>p str \<in> fdom(&hp) \<and> p\<lbrakk>(fdom(&hp) - {e \<oplus>\<^sub>p str}) \<lhd>\<^sub>f &hp/&hp\<rbrakk>)"
by (rel_auto)
subsection \<open> Example properties \<close>
lemma least_nat_Compl_not_in [simp]:
fixes A :: "nat set"
assumes "finite A"
shows "Inf(- A) \<notin> A"
by (metis ComplD ComplI Inf_nat_def assms ex_new_if_finite infinite_UNIV wellorder_Least_lemma(1))
lemma "vwb_lens x \<Longrightarrow> (x := alloc(\<guillemotleft>u\<guillemotright>) ;; *x :=\<^sub>\<D> \<guillemotleft>Some v\<guillemotright>) = (x := alloc(\<guillemotleft>v\<guillemotright>))"
apply (ndes_simp, rel_auto') oops
lemma "dealloc(e) ;; dealloc(e) = \<bottom>\<^sub>D"
by (ndes_simp, rel_auto)
lemma "vwb_lens x \<Longrightarrow> dealloc(&x) ;; *x :=\<^sub>\<D> \<guillemotleft>v\<guillemotright> = \<bottom>\<^sub>D"
by (ndes_simp, rel_auto)
lemma "vwb_lens x \<Longrightarrow> (x := alloc(\<guillemotleft>5 :: int\<guillemotright>) ;; dealloc(&x) ;; str:x :=\<^sub>D 0) = str:x :=\<^sub>D 0"
apply (ndes_simp, rel_auto)
apply (rename_tac ok hp st ok')
oops
(*
apply (rule_tac x="hp(Inf(- fdom(hp)) \<mapsto> to_nat_bij 5)\<^sub>f" in exI)
apply (rule_tac x="put\<^bsub>x\<^esub> st (\<Squnion>(- fdom(hp)))" in exI, simp)
apply (auto)
done
*)
end
|
/-
Copyright (c) 2020 Bhavik Mehta. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Bhavik Mehta
! This file was ported from Lean 3 source module category_theory.limits.presheaf
! leanprover-community/mathlib commit 70fd9563a21e7b963887c9360bd29b2393e6225a
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathbin.CategoryTheory.Adjunction.Limits
import Mathbin.CategoryTheory.Adjunction.Opposites
import Mathbin.CategoryTheory.Elements
import Mathbin.CategoryTheory.Limits.FunctorCategory
import Mathbin.CategoryTheory.Limits.KanExtension
import Mathbin.CategoryTheory.Limits.Shapes.Terminal
import Mathbin.CategoryTheory.Limits.Types
/-!
# Colimit of representables
This file constructs an adjunction `yoneda_adjunction` between `(Cα΅α΅ β₯€ Type u)` and `β°` given a
functor `A : C β₯€ β°`, where the right adjoint sends `(E : β°)` to `c β¦ (A.obj c βΆ E)` (provided `β°`
has colimits).
This adjunction is used to show that every presheaf is a colimit of representables.
Further, the left adjoint `colimit_adj.extend_along_yoneda : (Cα΅α΅ β₯€ Type u) β₯€ β°` satisfies
`yoneda β L β
A`, that is, an extension of `A : C β₯€ β°` to `(Cα΅α΅ β₯€ Type u) β₯€ β°` through
`yoneda : C β₯€ Cα΅α΅ β₯€ Type u`. It is the left Kan extension of `A` along the yoneda embedding,
sometimes known as the Yoneda extension, as proved in `extend_along_yoneda_iso_Kan`.
`unique_extension_along_yoneda` shows `extend_along_yoneda` is unique amongst cocontinuous functors
with this property, establishing the presheaf category as the free cocompletion of a small category.
## Tags
colimit, representable, presheaf, free cocompletion
## References
* [S. MacLane, I. Moerdijk, *Sheaves in Geometry and Logic*][MM92]
* https://ncatlab.org/nlab/show/Yoneda+extension
-/
namespace CategoryTheory
noncomputable section
open Category Limits
universe uβ uβ
variable {C : Type uβ} [SmallCategory C]
variable {β° : Type uβ} [Category.{uβ} β°]
variable (A : C β₯€ β°)
namespace ColimitAdj
/--
The functor taking `(E : β°) (c : Cα΅α΅)` to the homset `(A.obj C βΆ E)`. It is shown in `L_adjunction`
that this functor has a left adjoint (provided `E` has colimits) given by taking colimits over
categories of elements.
In the case where `β° = Cα΅α΅ β₯€ Type u` and `A = yoneda`, this functor is isomorphic to the identity.
Defined as in [MM92], Chapter I, Section 5, Theorem 2.
-/
@[simps]
def restrictedYoneda : β° β₯€ Cα΅α΅ β₯€ Type uβ :=
yoneda β (whiskeringLeft _ _ (Type uβ)).obj (Functor.op A)
#align category_theory.colimit_adj.restricted_yoneda CategoryTheory.ColimitAdj.restrictedYoneda
/--
The functor `restricted_yoneda` is isomorphic to the identity functor when evaluated at the yoneda
embedding.
-/
def restrictedYonedaYoneda : restrictedYoneda (yoneda : C β₯€ Cα΅α΅ β₯€ Type uβ) β
π _ :=
NatIso.ofComponents
(fun P =>
NatIso.ofComponents (fun X => yonedaSectionsSmall X.unop _) fun X Y f =>
funext fun x => by
dsimp
rw [β functor_to_types.naturality _ _ x f (π _)]
dsimp
simp)
fun _ _ _ => rfl
#align category_theory.colimit_adj.restricted_yoneda_yoneda CategoryTheory.ColimitAdj.restrictedYonedaYoneda
/-- (Implementation). The equivalence of homsets which helps construct the left adjoint to
`colimit_adj.restricted_yoneda`.
It is shown in `restrict_yoneda_hom_equiv_natural` that this is a natural bijection.
-/
def restrictYonedaHomEquiv (P : Cα΅α΅ β₯€ Type uβ) (E : β°)
{c : Cocone ((CategoryOfElements.Ο P).leftOp β A)} (t : IsColimit c) :
(c.pt βΆ E) β (P βΆ (restrictedYoneda A).obj E) :=
((uliftTrivial _).symm βͺβ« t.homIso' E).toEquiv.trans
{ toFun := fun k =>
{ app := fun c p => k.1 (Opposite.op β¨_, pβ©)
naturality' := fun c c' f =>
funext fun p =>
(k.2
(Quiver.Hom.op β¨f, rflβ© :
(Opposite.op β¨c', P.map f pβ© : P.Elementsα΅α΅) βΆ Opposite.op β¨c, pβ©)).symm }
invFun := fun Ο =>
{ val := fun p => Ο.app p.unop.1 p.unop.2
property := fun p p' f => by
simp_rw [β f.unop.2]
apply (congr_fun (Ο.naturality f.unop.1) p'.unop.2).symm }
left_inv := by
rintro β¨kβ, kββ©
ext
dsimp
congr 1
simp
right_inv := by
rintro β¨_, _β©
rfl }
#align category_theory.colimit_adj.restrict_yoneda_hom_equiv CategoryTheory.ColimitAdj.restrictYonedaHomEquiv
/--
(Implementation). Show that the bijection in `restrict_yoneda_hom_equiv` is natural (on the right).
-/
theorem restrictYonedaHomEquiv_natural (P : Cα΅α΅ β₯€ Type uβ) (Eβ Eβ : β°) (g : Eβ βΆ Eβ) {c : Cocone _}
(t : IsColimit c) (k : c.pt βΆ Eβ) :
restrictYonedaHomEquiv A P Eβ t (k β« g) =
restrictYonedaHomEquiv A P Eβ t k β« (restrictedYoneda A).map g :=
by
ext (_ X p)
apply (assoc _ _ _).symm
#align category_theory.colimit_adj.restrict_yoneda_hom_equiv_natural CategoryTheory.ColimitAdj.restrictYonedaHomEquiv_natural
variable [HasColimits β°]
/--
The left adjoint to the functor `restricted_yoneda` (shown in `yoneda_adjunction`). It is also an
extension of `A` along the yoneda embedding (shown in `is_extension_along_yoneda`), in particular
it is the left Kan extension of `A` through the yoneda embedding.
-/
def extendAlongYoneda : (Cα΅α΅ β₯€ Type uβ) β₯€ β° :=
Adjunction.leftAdjointOfEquiv (fun P E => restrictYonedaHomEquiv A P E (colimit.isColimit _))
fun P E E' g => restrictYonedaHomEquiv_natural A P E E' g _
#align category_theory.colimit_adj.extend_along_yoneda CategoryTheory.ColimitAdj.extendAlongYoneda
@[simp]
theorem extendAlongYoneda_obj (P : Cα΅α΅ β₯€ Type uβ) :
(extendAlongYoneda A).obj P = colimit ((CategoryOfElements.Ο P).leftOp β A) :=
rfl
#align category_theory.colimit_adj.extend_along_yoneda_obj CategoryTheory.ColimitAdj.extendAlongYoneda_obj
theorem extendAlongYoneda_map {X Y : Cα΅α΅ β₯€ Type uβ} (f : X βΆ Y) :
(extendAlongYoneda A).map f =
colimit.pre ((CategoryOfElements.Ο Y).leftOp β A) (CategoryOfElements.map f).op :=
by
ext J
erw [colimit.ΞΉ_pre ((category_of_elements.Ο Y).leftOp β A) (category_of_elements.map f).op]
dsimp only [extend_along_yoneda, restrict_yoneda_hom_equiv, is_colimit.hom_iso',
is_colimit.hom_iso, ulift_trivial]
simpa
#align category_theory.colimit_adj.extend_along_yoneda_map CategoryTheory.ColimitAdj.extendAlongYoneda_map
/-- Show `extend_along_yoneda` is left adjoint to `restricted_yoneda`.
The construction of [MM92], Chapter I, Section 5, Theorem 2.
-/
def yonedaAdjunction : extendAlongYoneda A β£ restrictedYoneda A :=
Adjunction.adjunctionOfEquivLeft _ _
#align category_theory.colimit_adj.yoneda_adjunction CategoryTheory.ColimitAdj.yonedaAdjunction
/--
The initial object in the category of elements for a representable functor. In `is_initial` it is
shown that this is initial.
-/
def Elements.initial (A : C) : (yoneda.obj A).Elements :=
β¨Opposite.op A, π _β©
#align category_theory.colimit_adj.elements.initial CategoryTheory.ColimitAdj.Elements.initial
/-- Show that `elements.initial A` is initial in the category of elements for the `yoneda` functor.
-/
def isInitial (A : C) : IsInitial (Elements.initial A)
where
desc s := β¨s.pt.2.op, comp_id _β©
uniq s m w := by
simp_rw [β m.2]
dsimp [elements.initial]
simp
fac := by rintro s β¨β¨β©β©
#align category_theory.colimit_adj.is_initial CategoryTheory.ColimitAdj.isInitial
/--
`extend_along_yoneda A` is an extension of `A` to the presheaf category along the yoneda embedding.
`unique_extension_along_yoneda` shows it is unique among functors preserving colimits with this
property (up to isomorphism).
The first part of [MM92], Chapter I, Section 5, Corollary 4.
See Property 1 of <https://ncatlab.org/nlab/show/Yoneda+extension#properties>.
-/
def isExtensionAlongYoneda : (yoneda : C β₯€ Cα΅α΅ β₯€ Type uβ) β extendAlongYoneda A β
A :=
NatIso.ofComponents
(fun X =>
(colimit.isColimit _).coconePointUniqueUpToIso
(colimitOfDiagramTerminal (terminalOpOfInitial (isInitial _)) _))
(by
intro X Y f
change colimit.desc _ β¨_, _β© β« colimit.desc _ _ = colimit.desc _ _ β« _
apply colimit.hom_ext
intro j
rw [colimit.ΞΉ_desc_assoc, colimit.ΞΉ_desc_assoc]
change (colimit.ΞΉ _ _ β« π _) β« colimit.desc _ _ = _
rw [comp_id, colimit.ΞΉ_desc]
dsimp
rw [β A.map_comp]
congr 1)
#align category_theory.colimit_adj.is_extension_along_yoneda CategoryTheory.ColimitAdj.isExtensionAlongYoneda
/-- See Property 2 of https://ncatlab.org/nlab/show/Yoneda+extension#properties. -/
instance : PreservesColimits (extendAlongYoneda A) :=
(yonedaAdjunction A).leftAdjointPreservesColimits
/-- Show that the images of `X` after `extend_along_yoneda` and `Lan yoneda` are indeed isomorphic.
This follows from `category_theory.category_of_elements.costructured_arrow_yoneda_equivalence`.
-/
@[simps]
def extendAlongYonedaIsoKanApp (X) :
(extendAlongYoneda A).obj X β
((lan yoneda : (_ β₯€ β°) β₯€ _).obj A).obj X :=
let eq := CategoryOfElements.costructuredArrowYonedaEquivalence X
{ Hom := colimit.pre (Lan.diagram (yoneda : C β₯€ _ β₯€ Type uβ) A X) Eq.Functor
inv := colimit.pre ((CategoryOfElements.Ο X).leftOp β A) Eq.inverse
hom_inv_id' :=
by
erw [colimit.pre_pre ((category_of_elements.Ο X).leftOp β A) eq.inverse]
trans colimit.pre ((category_of_elements.Ο X).leftOp β A) (π _)
congr
Β· exact congr_arg functor.op (category_of_elements.from_to_costructured_arrow_eq X)
Β· ext
simp only [colimit.ΞΉ_pre]
erw [category.comp_id]
congr
inv_hom_id' :=
by
erw [colimit.pre_pre (Lan.diagram (yoneda : C β₯€ _ β₯€ Type uβ) A X) eq.functor]
trans colimit.pre (Lan.diagram (yoneda : C β₯€ _ β₯€ Type uβ) A X) (π _)
congr
Β· exact category_of_elements.to_from_costructured_arrow_eq X
Β· ext
simp only [colimit.ΞΉ_pre]
erw [category.comp_id]
congr }
#align category_theory.colimit_adj.extend_along_yoneda_iso_Kan_app CategoryTheory.ColimitAdj.extendAlongYonedaIsoKanApp
/-- Verify that `extend_along_yoneda` is indeed the left Kan extension along the yoneda embedding.
-/
@[simps]
def extendAlongYonedaIsoKan : extendAlongYoneda A β
(lan yoneda : (_ β₯€ β°) β₯€ _).obj A :=
NatIso.ofComponents (extendAlongYonedaIsoKanApp A)
(by
intro X Y f; simp
rw [extend_along_yoneda_map]
erw [colimit.pre_pre (Lan.diagram (yoneda : C β₯€ _ β₯€ Type uβ) A Y) (costructured_arrow.map f)]
erw [colimit.pre_pre (Lan.diagram (yoneda : C β₯€ _ β₯€ Type uβ) A Y)
(category_of_elements.costructured_arrow_yoneda_equivalence Y).Functor]
congr 1
apply category_of_elements.costructured_arrow_yoneda_equivalence_naturality)
#align category_theory.colimit_adj.extend_along_yoneda_iso_Kan CategoryTheory.ColimitAdj.extendAlongYonedaIsoKan
/-- extending `F β yoneda` along the yoneda embedding is isomorphic to `Lan F.op`. -/
@[simps]
def extendOfCompYonedaIsoLan {D : Type uβ} [SmallCategory D] (F : C β₯€ D) :
extendAlongYoneda (F β yoneda) β
lan F.op :=
Adjunction.natIsoOfRightAdjointNatIso (yonedaAdjunction (F β yoneda))
(Lan.adjunction (Type uβ) F.op)
(isoWhiskerRight curriedYonedaLemma' ((whiskeringLeft Cα΅α΅ Dα΅α΅ (Type uβ)).obj F.op : _))
#align category_theory.colimit_adj.extend_of_comp_yoneda_iso_Lan CategoryTheory.ColimitAdj.extendOfCompYonedaIsoLan
end ColimitAdj
open ColimitAdj
/-- `F β yoneda` is naturally isomorphic to `yoneda β Lan F.op`. -/
@[simps]
def compYonedaIsoYonedaCompLan {D : Type uβ} [SmallCategory D] (F : C β₯€ D) :
F β yoneda β
yoneda β lan F.op :=
(isExtensionAlongYoneda (F β yoneda)).symm βͺβ« isoWhiskerLeft yoneda (extendOfCompYonedaIsoLan F)
#align category_theory.comp_yoneda_iso_yoneda_comp_Lan CategoryTheory.compYonedaIsoYonedaCompLan
/-- Since `extend_along_yoneda A` is adjoint to `restricted_yoneda A`, if we use `A = yoneda`
then `restricted_yoneda A` is isomorphic to the identity, and so `extend_along_yoneda A` is as well.
-/
def extendAlongYonedaYoneda : extendAlongYoneda (yoneda : C β₯€ _) β
π _ :=
Adjunction.natIsoOfRightAdjointNatIso (yonedaAdjunction _) Adjunction.id restrictedYonedaYoneda
#align category_theory.extend_along_yoneda_yoneda CategoryTheory.extendAlongYonedaYoneda
-- Maybe this should be reducible or an abbreviation?
/-- A functor to the presheaf category in which everything in the image is representable (witnessed
by the fact that it factors through the yoneda embedding).
`cocone_of_representable` gives a cocone for this functor which is a colimit and has point `P`.
-/
def functorToRepresentables (P : Cα΅α΅ β₯€ Type uβ) : P.Elementsα΅α΅ β₯€ Cα΅α΅ β₯€ Type uβ :=
(CategoryOfElements.Ο P).leftOp β yoneda
#align category_theory.functor_to_representables CategoryTheory.functorToRepresentables
/-- This is a cocone with point `P` for the functor `functor_to_representables P`. It is shown in
`colimit_of_representable P` that this cocone is a colimit: that is, we have exhibited an arbitrary
presheaf `P` as a colimit of representables.
The construction of [MM92], Chapter I, Section 5, Corollary 3.
-/
def coconeOfRepresentable (P : Cα΅α΅ β₯€ Type uβ) : Cocone (functorToRepresentables P) :=
Cocone.extend (colimit.cocone _) (extendAlongYonedaYoneda.Hom.app P)
#align category_theory.cocone_of_representable CategoryTheory.coconeOfRepresentable
@[simp]
theorem coconeOfRepresentable_pt (P : Cα΅α΅ β₯€ Type uβ) : (coconeOfRepresentable P).pt = P :=
rfl
#align category_theory.cocone_of_representable_X CategoryTheory.coconeOfRepresentable_pt
-- Marking this as a simp lemma seems to make things more awkward.
/-- An explicit formula for the legs of the cocone `cocone_of_representable`. -/
theorem coconeOfRepresentable_ΞΉ_app (P : Cα΅α΅ β₯€ Type uβ) (j : P.Elementsα΅α΅) :
(coconeOfRepresentable P).ΞΉ.app j = (yonedaSectionsSmall _ _).inv j.unop.2 :=
colimit.ΞΉ_desc _ _
#align category_theory.cocone_of_representable_ΞΉ_app CategoryTheory.coconeOfRepresentable_ΞΉ_app
/-- The legs of the cocone `cocone_of_representable` are natural in the choice of presheaf. -/
theorem coconeOfRepresentable_naturality {Pβ Pβ : Cα΅α΅ β₯€ Type uβ} (Ξ± : Pβ βΆ Pβ) (j : Pβ.Elementsα΅α΅) :
(coconeOfRepresentable Pβ).ΞΉ.app j β« Ξ± =
(coconeOfRepresentable Pβ).ΞΉ.app ((CategoryOfElements.map Ξ±).op.obj j) :=
by
ext (T f)
simpa [cocone_of_representable_ΞΉ_app] using functor_to_types.naturality _ _ Ξ± f.op _
#align category_theory.cocone_of_representable_naturality CategoryTheory.coconeOfRepresentable_naturality
/-- The cocone with point `P` given by `the_cocone` is a colimit: that is, we have exhibited an
arbitrary presheaf `P` as a colimit of representables.
The result of [MM92], Chapter I, Section 5, Corollary 3.
-/
def colimitOfRepresentable (P : Cα΅α΅ β₯€ Type uβ) : IsColimit (coconeOfRepresentable P) :=
by
apply is_colimit.of_point_iso (colimit.is_colimit (functor_to_representables P))
change is_iso (colimit.desc _ (cocone.extend _ _))
rw [colimit.desc_extend, colimit.desc_cocone]
infer_instance
#align category_theory.colimit_of_representable CategoryTheory.colimitOfRepresentable
/-- Given two functors Lβ and Lβ which preserve colimits, if they agree when restricted to the
representable presheaves then they agree everywhere.
-/
def natIsoOfNatIsoOnRepresentables (Lβ Lβ : (Cα΅α΅ β₯€ Type uβ) β₯€ β°) [PreservesColimits Lβ]
[PreservesColimits Lβ] (h : yoneda β Lβ β
yoneda β Lβ) : Lβ β
Lβ :=
by
apply nat_iso.of_components _ _
Β· intro P
refine'
(is_colimit_of_preserves Lβ (colimit_of_representable P)).coconePointsIsoOfNatIso
(is_colimit_of_preserves Lβ (colimit_of_representable P)) _
apply functor.associator _ _ _ βͺβ« _
exact iso_whisker_left (category_of_elements.Ο P).leftOp h
Β· intro Pβ Pβ f
apply (is_colimit_of_preserves Lβ (colimit_of_representable Pβ)).hom_ext
intro j
dsimp only [id.def, is_colimit.cocone_points_iso_of_nat_iso_hom, iso_whisker_left_hom]
have :
(Lβ.map_cocone (cocone_of_representable Pβ)).ΞΉ.app j β« Lβ.map f =
(Lβ.map_cocone (cocone_of_representable Pβ)).ΞΉ.app
((category_of_elements.map f).op.obj j) :=
by
dsimp
rw [β Lβ.map_comp, cocone_of_representable_naturality]
rfl
rw [reassoc_of this, is_colimit.ΞΉ_map_assoc, is_colimit.ΞΉ_map]
dsimp
rw [β Lβ.map_comp, cocone_of_representable_naturality]
rfl
#align category_theory.nat_iso_of_nat_iso_on_representables CategoryTheory.natIsoOfNatIsoOnRepresentables
variable [HasColimits β°]
/-- Show that `extend_along_yoneda` is the unique colimit-preserving functor which extends `A` to
the presheaf category.
The second part of [MM92], Chapter I, Section 5, Corollary 4.
See Property 3 of https://ncatlab.org/nlab/show/Yoneda+extension#properties.
-/
def uniqueExtensionAlongYoneda (L : (Cα΅α΅ β₯€ Type uβ) β₯€ β°) (hL : yoneda β L β
A)
[PreservesColimits L] : L β
extendAlongYoneda A :=
natIsoOfNatIsoOnRepresentables _ _ (hL βͺβ« (isExtensionAlongYoneda _).symm)
#align category_theory.unique_extension_along_yoneda CategoryTheory.uniqueExtensionAlongYoneda
/-- If `L` preserves colimits and `β°` has them, then it is a left adjoint. This is a special case of
`is_left_adjoint_of_preserves_colimits` used to prove that.
-/
def isLeftAdjointOfPreservesColimitsAux (L : (Cα΅α΅ β₯€ Type uβ) β₯€ β°) [PreservesColimits L] :
IsLeftAdjoint L where
right := restrictedYoneda (yoneda β L)
adj := (yonedaAdjunction _).ofNatIsoLeft (uniqueExtensionAlongYoneda _ L (Iso.refl _)).symm
#align category_theory.is_left_adjoint_of_preserves_colimits_aux CategoryTheory.isLeftAdjointOfPreservesColimitsAux
/-- If `L` preserves colimits and `β°` has them, then it is a left adjoint. Note this is a (partial)
converse to `left_adjoint_preserves_colimits`.
-/
def isLeftAdjointOfPreservesColimits (L : (C β₯€ Type uβ) β₯€ β°) [PreservesColimits L] :
IsLeftAdjoint L :=
let e : _ β₯€ Type uβ β _ β₯€ Type uβ := (opOpEquivalence C).congr_left
let t := isLeftAdjointOfPreservesColimitsAux (e.Functor β L : _)
adjunction.left_adjoint_of_nat_iso (e.inv_fun_id_assoc _)
#align category_theory.is_left_adjoint_of_preserves_colimits CategoryTheory.isLeftAdjointOfPreservesColimits
end CategoryTheory
|
function p05_story ( )
%*****************************************************************************80
%
%% P05_STORY prints the "story" for problem p05.
%
% Licensing:
%
% This code is distributed under the GNU LGPL license.
%
% Modified:
%
% 01 August 2011
%
% Author:
%
% John Burkardt
%
% Reference:
%
% Larry Irvine, Samuel Marin, Philip Smith,
% Constrained Interpolation and Smoothing,
% Constructive Approximation,
% Volume 2, Number 1, December 1986, pages 129-151.
%
% Parameters:
%
% None
%
fprintf ( 1, '\n' );
fprintf ( 1, ' This example is due to Larry Irvine, Samuel Marin and Philip Smith.\n' );
fprintf ( 1, ' This data can cause problems for interpolation methods.\n' );
fprintf ( 1, ' There are sudden changes in direction, and at the same time,\n' );
fprintf ( 1, ' sparsely-placed data. This can cause an interpolant to overshoot\n' );
fprintf ( 1, ' the data in a way that seems implausible.\n' );
return
end
|
lemma content_eq_zero_iff [simp]: "content p = 0 \<longleftrightarrow> p = 0" |
/-
Copyright (c) 2022 Jireh Loreaux. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Jireh Loreaux
! This file was ported from Lean 3 source module analysis.normed_space.star.exponential
! leanprover-community/mathlib commit 1e3201306d4d9eb1fd54c60d7c4510ad5126f6f9
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathbin.Analysis.NormedSpace.Exponential
/-! # The exponential map from selfadjoint to unitary
In this file, we establish various propreties related to the map `Ξ» a, exp β A (I β’ a)` between the
subtypes `self_adjoint A` and `unitary A`.
## TODO
* Show that any exponential unitary is path-connected in `unitary A` to `1 : unitary A`.
* Prove any unitary whose distance to `1 : unitary A` is less than `1` can be expressed as an
exponential unitary.
* A unitary is in the path component of `1` if and only if it is a finite product of exponential
unitaries.
-/
section Star
variable {A : Type _} [NormedRing A] [NormedAlgebra β A] [StarRing A] [ContinuousStar A]
[CompleteSpace A] [StarModule β A]
open Complex
/-- The map from the selfadjoint real subspace to the unitary group. This map only makes sense
over β. -/
@[simps]
noncomputable def selfAdjoint.expUnitary (a : selfAdjoint A) : unitary A :=
β¨exp β (I β’ a), exp_mem_unitary_of_mem_skewAdjoint _ (a.Prop.smul_mem_skewAdjoint conj_I)β©
#align self_adjoint.exp_unitary selfAdjoint.expUnitary
open selfAdjoint
theorem Commute.expUnitary_add {a b : selfAdjoint A} (h : Commute (a : A) (b : A)) :
expUnitary (a + b) = expUnitary a * expUnitary b :=
by
ext
have hcomm : Commute (I β’ (a : A)) (I β’ (b : A))
calc
_ = _ := by simp only [h.eq, Algebra.smul_mul_assoc, Algebra.mul_smul_comm]
simpa only [exp_unitary_coe, AddSubgroup.coe_add, smul_add] using exp_add_of_commute hcomm
#align commute.exp_unitary_add Commute.expUnitary_add
theorem Commute.expUnitary {a b : selfAdjoint A} (h : Commute (a : A) (b : A)) :
Commute (expUnitary a) (expUnitary b) :=
calc
expUnitary a * expUnitary b = expUnitary b * expUnitary a := by
rw [β h.exp_unitary_add, β h.symm.exp_unitary_add, add_comm]
#align commute.exp_unitary Commute.expUnitary
end Star
|
[GOAL]
x y : β
h :
((β (k : β€), x β (2 * βk + 1) * Ο / 2) β§ β (l : β€), y β (2 * βl + 1) * Ο / 2) β¨
(β k, x = (2 * βk + 1) * Ο / 2) β§ β l, y = (2 * βl + 1) * Ο / 2
β’ tan (x + y) = (tan x + tan y) / (1 - tan x * tan y)
[PROOFSTEP]
simpa only [β Complex.ofReal_inj, Complex.ofReal_sub, Complex.ofReal_add, Complex.ofReal_div, Complex.ofReal_mul,
Complex.ofReal_tan] using @Complex.tan_add (x : β) (y : β) (by convert h <;> norm_cast)
[GOAL]
x y : β
h :
((β (k : β€), x β (2 * βk + 1) * Ο / 2) β§ β (l : β€), y β (2 * βl + 1) * Ο / 2) β¨
(β k, x = (2 * βk + 1) * Ο / 2) β§ β l, y = (2 * βl + 1) * Ο / 2
β’ ((β (k : β€), βx β (2 * βk + 1) * βΟ / 2) β§ β (l : β€), βy β (2 * βl + 1) * βΟ / 2) β¨
(β k, βx = (2 * βk + 1) * βΟ / 2) β§ β l, βy = (2 * βl + 1) * βΟ / 2
[PROOFSTEP]
convert h
[GOAL]
case h.e'_1.h.e'_1.h.h.a
x y : β
h :
((β (k : β€), x β (2 * βk + 1) * Ο / 2) β§ β (l : β€), y β (2 * βl + 1) * Ο / 2) β¨
(β k, x = (2 * βk + 1) * Ο / 2) β§ β l, y = (2 * βl + 1) * Ο / 2
aβ : β€
β’ βx = (2 * βaβ + 1) * βΟ / 2 β x = (2 * βaβ + 1) * Ο / 2
[PROOFSTEP]
norm_cast
[GOAL]
case h.e'_1.h.e'_2.h.h.a
x y : β
h :
((β (k : β€), x β (2 * βk + 1) * Ο / 2) β§ β (l : β€), y β (2 * βl + 1) * Ο / 2) β¨
(β k, x = (2 * βk + 1) * Ο / 2) β§ β l, y = (2 * βl + 1) * Ο / 2
aβ : β€
β’ βy = (2 * βaβ + 1) * βΟ / 2 β y = (2 * βaβ + 1) * Ο / 2
[PROOFSTEP]
norm_cast
[GOAL]
case h.e'_2.h.e'_1.h.e'_2.h.a
x y : β
h :
((β (k : β€), x β (2 * βk + 1) * Ο / 2) β§ β (l : β€), y β (2 * βl + 1) * Ο / 2) β¨
(β k, x = (2 * βk + 1) * Ο / 2) β§ β l, y = (2 * βl + 1) * Ο / 2
xβ : β€
β’ βx = (2 * βxβ + 1) * βΟ / 2 β x = (2 * βxβ + 1) * Ο / 2
[PROOFSTEP]
norm_cast
[GOAL]
case h.e'_2.h.e'_2.h.e'_2.h.a
x y : β
h :
((β (k : β€), x β (2 * βk + 1) * Ο / 2) β§ β (l : β€), y β (2 * βl + 1) * Ο / 2) β¨
(β k, x = (2 * βk + 1) * Ο / 2) β§ β l, y = (2 * βl + 1) * Ο / 2
xβ : β€
β’ βy = (2 * βxβ + 1) * βΟ / 2 β y = (2 * βxβ + 1) * Ο / 2
[PROOFSTEP]
norm_cast
[GOAL]
x : β
β’ tan (2 * x) = β2 * tan x / (β1 - tan x ^ 2)
[PROOFSTEP]
have := @Complex.tan_two_mul x
[GOAL]
x : β
this : Complex.tan (2 * βx) = 2 * Complex.tan βx / (1 - Complex.tan βx ^ 2)
β’ tan (2 * x) = β2 * tan x / (β1 - tan x ^ 2)
[PROOFSTEP]
norm_cast at *
[GOAL]
ΞΈ : β
β’ tan ΞΈ β 0 β β (k : β€), ΞΈ β βk * Ο / 2
[PROOFSTEP]
rw [β Complex.ofReal_ne_zero, Complex.ofReal_tan, Complex.tan_ne_zero_iff]
[GOAL]
ΞΈ : β
β’ (β (k : β€), βΞΈ β βk * βΟ / 2) β β (k : β€), ΞΈ β βk * Ο / 2
[PROOFSTEP]
norm_cast
[GOAL]
ΞΈ : β
β’ tan ΞΈ = 0 β β k, ΞΈ = βk * Ο / 2
[PROOFSTEP]
rw [β not_iff_not, not_exists, β Ne, tan_ne_zero_iff]
[GOAL]
n : β€
β’ β k, βn * Ο / 2 = βk * Ο / 2
[PROOFSTEP]
use n
[GOAL]
β’ ContinuousOn tan {x | cos x β 0}
[PROOFSTEP]
suffices ContinuousOn (fun x => sin x / cos x) {x | cos x β 0}
by
have h_eq : (fun x => sin x / cos x) = tan := by ext1 x; rw [tan_eq_sin_div_cos]
rwa [h_eq] at this
[GOAL]
this : ContinuousOn (fun x => sin x / cos x) {x | cos x β 0}
β’ ContinuousOn tan {x | cos x β 0}
[PROOFSTEP]
have h_eq : (fun x => sin x / cos x) = tan := by ext1 x; rw [tan_eq_sin_div_cos]
[GOAL]
this : ContinuousOn (fun x => sin x / cos x) {x | cos x β 0}
β’ (fun x => sin x / cos x) = tan
[PROOFSTEP]
ext1 x
[GOAL]
case h
this : ContinuousOn (fun x => sin x / cos x) {x | cos x β 0}
x : β
β’ sin x / cos x = tan x
[PROOFSTEP]
rw [tan_eq_sin_div_cos]
[GOAL]
this : ContinuousOn (fun x => sin x / cos x) {x | cos x β 0}
h_eq : (fun x => sin x / cos x) = tan
β’ ContinuousOn tan {x | cos x β 0}
[PROOFSTEP]
rwa [h_eq] at this
[GOAL]
β’ ContinuousOn (fun x => sin x / cos x) {x | cos x β 0}
[PROOFSTEP]
exact continuousOn_sin.div continuousOn_cos fun x => id
[GOAL]
β’ ContinuousOn tan (Ioo (-(Ο / 2)) (Ο / 2))
[PROOFSTEP]
refine' ContinuousOn.mono continuousOn_tan fun x => _
[GOAL]
x : β
β’ x β Ioo (-(Ο / 2)) (Ο / 2) β x β {x | cos x β 0}
[PROOFSTEP]
simp only [and_imp, mem_Ioo, mem_setOf_eq, Ne.def]
[GOAL]
x : β
β’ -(Ο / 2) < x β x < Ο / 2 β Β¬cos x = 0
[PROOFSTEP]
rw [cos_eq_zero_iff]
[GOAL]
x : β
β’ -(Ο / 2) < x β x < Ο / 2 β Β¬β k, x = (2 * βk + 1) * Ο / 2
[PROOFSTEP]
rintro hx_gt hx_lt β¨r, hxr_eqβ©
[GOAL]
case intro
x : β
hx_gt : -(Ο / 2) < x
hx_lt : x < Ο / 2
r : β€
hxr_eq : x = (2 * βr + 1) * Ο / 2
β’ False
[PROOFSTEP]
cases' le_or_lt 0 r with h h
[GOAL]
case intro.inl
x : β
hx_gt : -(Ο / 2) < x
hx_lt : x < Ο / 2
r : β€
hxr_eq : x = (2 * βr + 1) * Ο / 2
h : 0 β€ r
β’ False
[PROOFSTEP]
rw [lt_iff_not_ge] at hx_lt
[GOAL]
case intro.inl
x : β
hx_gt : -(Ο / 2) < x
hx_lt : Β¬x β₯ Ο / 2
r : β€
hxr_eq : x = (2 * βr + 1) * Ο / 2
h : 0 β€ r
β’ False
[PROOFSTEP]
refine' hx_lt _
[GOAL]
case intro.inl
x : β
hx_gt : -(Ο / 2) < x
hx_lt : Β¬x β₯ Ο / 2
r : β€
hxr_eq : x = (2 * βr + 1) * Ο / 2
h : 0 β€ r
β’ x β₯ Ο / 2
[PROOFSTEP]
rw [hxr_eq, β one_mul (Ο / 2), mul_div_assoc, ge_iff_le, mul_le_mul_right (half_pos pi_pos)]
[GOAL]
case intro.inl
x : β
hx_gt : -(Ο / 2) < x
hx_lt : Β¬x β₯ Ο / 2
r : β€
hxr_eq : x = (2 * βr + 1) * Ο / 2
h : 0 β€ r
β’ 1 β€ 2 * βr + 1
[PROOFSTEP]
simp [h]
[GOAL]
case intro.inr
x : β
hx_gt : -(Ο / 2) < x
hx_lt : x < Ο / 2
r : β€
hxr_eq : x = (2 * βr + 1) * Ο / 2
h : r < 0
β’ False
[PROOFSTEP]
rw [lt_iff_not_ge] at hx_gt
[GOAL]
case intro.inr
x : β
hx_gt : Β¬-(Ο / 2) β₯ x
hx_lt : x < Ο / 2
r : β€
hxr_eq : x = (2 * βr + 1) * Ο / 2
h : r < 0
β’ False
[PROOFSTEP]
refine' hx_gt _
[GOAL]
case intro.inr
x : β
hx_gt : Β¬-(Ο / 2) β₯ x
hx_lt : x < Ο / 2
r : β€
hxr_eq : x = (2 * βr + 1) * Ο / 2
h : r < 0
β’ -(Ο / 2) β₯ x
[PROOFSTEP]
rw [hxr_eq, β one_mul (Ο / 2), mul_div_assoc, ge_iff_le, neg_mul_eq_neg_mul, mul_le_mul_right (half_pos pi_pos)]
[GOAL]
case intro.inr
x : β
hx_gt : Β¬-(Ο / 2) β₯ x
hx_lt : x < Ο / 2
r : β€
hxr_eq : x = (2 * βr + 1) * Ο / 2
h : r < 0
β’ 2 * βr + 1 β€ -1
[PROOFSTEP]
have hr_le : r β€ -1 := by rwa [Int.lt_iff_add_one_le, β le_neg_iff_add_nonpos_right] at h
[GOAL]
x : β
hx_gt : Β¬-(Ο / 2) β₯ x
hx_lt : x < Ο / 2
r : β€
hxr_eq : x = (2 * βr + 1) * Ο / 2
h : r < 0
β’ r β€ -1
[PROOFSTEP]
rwa [Int.lt_iff_add_one_le, β le_neg_iff_add_nonpos_right] at h
[GOAL]
case intro.inr
x : β
hx_gt : Β¬-(Ο / 2) β₯ x
hx_lt : x < Ο / 2
r : β€
hxr_eq : x = (2 * βr + 1) * Ο / 2
h : r < 0
hr_le : r β€ -1
β’ 2 * βr + 1 β€ -1
[PROOFSTEP]
rw [β le_sub_iff_add_le, mul_comm, β le_div_iff]
[GOAL]
case intro.inr
x : β
hx_gt : Β¬-(Ο / 2) β₯ x
hx_lt : x < Ο / 2
r : β€
hxr_eq : x = (2 * βr + 1) * Ο / 2
h : r < 0
hr_le : r β€ -1
β’ βr β€ (-1 - 1) / 2
[PROOFSTEP]
norm_num
[GOAL]
case intro.inr
x : β
hx_gt : Β¬-(Ο / 2) β₯ x
hx_lt : x < Ο / 2
r : β€
hxr_eq : x = (2 * βr + 1) * Ο / 2
h : r < 0
hr_le : r β€ -1
β’ βr β€ -1
[PROOFSTEP]
rw [β Int.cast_one, β Int.cast_neg]
[GOAL]
case intro.inr
x : β
hx_gt : Β¬-(Ο / 2) β₯ x
hx_lt : x < Ο / 2
r : β€
hxr_eq : x = (2 * βr + 1) * Ο / 2
h : r < 0
hr_le : r β€ -1
β’ βr β€ β(-1)
[PROOFSTEP]
norm_cast
[GOAL]
case intro.inr
x : β
hx_gt : Β¬-(Ο / 2) β₯ x
hx_lt : x < Ο / 2
r : β€
hxr_eq : x = (2 * βr + 1) * Ο / 2
h : r < 0
hr_le : r β€ -1
β’ 0 < 2
[PROOFSTEP]
exact zero_lt_two
[GOAL]
this : -(Ο / 2) < Ο / 2
β’ Tendsto (fun x => tan βx) atBot atBot
[PROOFSTEP]
rw [tendsto_comp_coe_Ioo_atBot this]
[GOAL]
this : -(Ο / 2) < Ο / 2
β’ Tendsto tan (π[Ioi (-(Ο / 2))] (-(Ο / 2))) atBot
[PROOFSTEP]
exact tendsto_tan_neg_pi_div_two
[GOAL]
this : -(Ο / 2) < Ο / 2
β’ Tendsto (fun x => tan βx) atTop atTop
[PROOFSTEP]
rw [tendsto_comp_coe_Ioo_atTop this]
[GOAL]
this : -(Ο / 2) < Ο / 2
β’ Tendsto tan (π[Iio (Ο / 2)] (Ο / 2)) atTop
[PROOFSTEP]
exact tendsto_tan_pi_div_two
[GOAL]
x : β
β’ cos (arctan x) ^ 2 = β1 / (β1 + x ^ 2)
[PROOFSTEP]
rw_mod_cast [one_div, β inv_one_add_tan_sq (cos_arctan_pos x).ne', tan_arctan]
[GOAL]
x : β
β’ sin (arctan x) = x / sqrt (β1 + x ^ 2)
[PROOFSTEP]
rw_mod_cast [β tan_div_sqrt_one_add_tan_sq (cos_arctan_pos x), tan_arctan]
[GOAL]
x : β
β’ cos (arctan x) = β1 / sqrt (β1 + x ^ 2)
[PROOFSTEP]
rw_mod_cast [one_div, β inv_sqrt_one_add_tan_sq (cos_arctan_pos x), tan_arctan]
[GOAL]
x : β
h : x β Ioo (-1) 1
β’ arcsin x = arctan (x / sqrt (β1 - x ^ 2))
[PROOFSTEP]
rw_mod_cast [arctan_eq_arcsin, div_pow, sq_sqrt, one_add_div, div_div, β sqrt_mul, mul_div_cancel', sub_add_cancel,
sqrt_one, div_one]
[GOAL]
case hb
x : β
h : x β Ioo (β(Int.negSucc 0)) 1
β’ 1 - x ^ 2 β 0
[PROOFSTEP]
simp at h
[GOAL]
case hx
x : β
h : x β Ioo (β(Int.negSucc 0)) 1
β’ 0 β€ 1 - x ^ 2
[PROOFSTEP]
simp at h
[GOAL]
x : β
h : x β Ioo (β(Int.negSucc 0)) 1
β’ 1 - x ^ 2 β 0
[PROOFSTEP]
simp at h
[GOAL]
x : β
h : x β Ioo (β(Int.negSucc 0)) 1
β’ 0 β€ 1 - x ^ 2
[PROOFSTEP]
simp at h
[GOAL]
case hb
x : β
h : -1 < x β§ x < 1
β’ 1 - x ^ 2 β 0
[PROOFSTEP]
nlinarith [h.1, h.2]
[GOAL]
case hx
x : β
h : -1 < x β§ x < 1
β’ 0 β€ 1 - x ^ 2
[PROOFSTEP]
nlinarith [h.1, h.2]
[GOAL]
x : β
h : -1 < x β§ x < 1
β’ 1 - x ^ 2 β 0
[PROOFSTEP]
nlinarith [h.1, h.2]
[GOAL]
x : β
h : -1 < x β§ x < 1
β’ 0 β€ 1 - x ^ 2
[PROOFSTEP]
nlinarith [h.1, h.2]
[GOAL]
β’ arctan 0 = 0
[PROOFSTEP]
simp [arctan_eq_arcsin]
[GOAL]
x y : β
h : tan x = y
hx : x β Ioo (-(Ο / 2)) (Ο / 2)
β’ tan (arctan y) = tan x
[PROOFSTEP]
rw [tan_arctan, h]
[GOAL]
β’ Ο / 4 β Ioo (-(Ο / 2)) (Ο / 2)
[PROOFSTEP]
constructor
[GOAL]
case left
β’ -(Ο / 2) < Ο / 4
[PROOFSTEP]
linarith [pi_pos]
[GOAL]
case right
β’ Ο / 4 < Ο / 2
[PROOFSTEP]
linarith [pi_pos]
[GOAL]
x : β
β’ arctan (-x) = -arctan x
[PROOFSTEP]
simp [arctan_eq_arcsin, neg_div]
[GOAL]
x : β
h : 0 β€ x
β’ arctan x = arccos (sqrt (β1 + x ^ 2))β»ΒΉ
[PROOFSTEP]
rw [arctan_eq_arcsin, arccos_eq_arcsin]
[GOAL]
x : β
h : 0 β€ x
β’ arcsin (x / sqrt (β1 + x ^ 2)) = arcsin (sqrt (1 - (sqrt (β1 + x ^ 2))β»ΒΉ ^ 2))
x : β h : 0 β€ x β’ 0 β€ (sqrt (β1 + x ^ 2))β»ΒΉ
[PROOFSTEP]
swap
[GOAL]
x : β
h : 0 β€ x
β’ 0 β€ (sqrt (β1 + x ^ 2))β»ΒΉ
[PROOFSTEP]
exact inv_nonneg.2 (sqrt_nonneg _)
[GOAL]
x : β
h : 0 β€ x
β’ arcsin (x / sqrt (β1 + x ^ 2)) = arcsin (sqrt (1 - (sqrt (β1 + x ^ 2))β»ΒΉ ^ 2))
[PROOFSTEP]
congr 1
[GOAL]
case e_a
x : β
h : 0 β€ x
β’ x / sqrt (β1 + x ^ 2) = sqrt (1 - (sqrt (β1 + x ^ 2))β»ΒΉ ^ 2)
[PROOFSTEP]
rw_mod_cast [β sqrt_inv, sq_sqrt, β one_div, one_sub_div, add_sub_cancel', sqrt_div, sqrt_sq h]
[GOAL]
case e_a.hx
x : β
h : 0 β€ x
β’ 0 β€ x ^ 2
case e_a
x : β
h : 0 β€ x
β’ 1 + x ^ 2 β 0
case e_a x : β h : 0 β€ x β’ 0 β€ (1 + x ^ 2)β»ΒΉ
[PROOFSTEP]
all_goals positivity
[GOAL]
case e_a.hx
x : β
h : 0 β€ x
β’ 0 β€ x ^ 2
[PROOFSTEP]
positivity
[GOAL]
case e_a
x : β
h : 0 β€ x
β’ 1 + x ^ 2 β 0
[PROOFSTEP]
positivity
[GOAL]
case e_a
x : β
h : 0 β€ x
β’ 0 β€ (1 + x ^ 2)β»ΒΉ
[PROOFSTEP]
positivity
[GOAL]
x : β
h : 0 < x
β’ arccos x = arctan (sqrt (β1 - x ^ 2) / x)
[PROOFSTEP]
rw [arccos, eq_comm]
[GOAL]
x : β
h : 0 < x
β’ arctan (sqrt (β1 - x ^ 2) / x) = Ο / 2 - arcsin x
[PROOFSTEP]
refine' arctan_eq_of_tan_eq _ β¨_, _β©
[GOAL]
case refine'_1
x : β
h : 0 < x
β’ tan (Ο / 2 - arcsin x) = sqrt (β1 - x ^ 2) / x
[PROOFSTEP]
rw_mod_cast [tan_pi_div_two_sub, tan_arcsin, inv_div]
[GOAL]
case refine'_2
x : β
h : 0 < x
β’ -(Ο / 2) < Ο / 2 - arcsin x
[PROOFSTEP]
linarith only [arcsin_le_pi_div_two x, pi_pos]
[GOAL]
case refine'_3
x : β
h : 0 < x
β’ Ο / 2 - arcsin x < Ο / 2
[PROOFSTEP]
linarith only [arcsin_pos.2 h]
|
/-
Copyright (c) 2022 Ivan Sadofschi Costa. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Ivan Sadofschi Costa
! This file was ported from Lean 3 source module topology.continuous_function.t0_sierpinski
! leanprover-community/mathlib commit dc6c365e751e34d100e80fe6e314c3c3e0fd2988
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathlib.Topology.Order
import Mathlib.Topology.Sets.Opens
import Mathlib.Topology.ContinuousFunction.Basic
/-!
# Any T0 space embeds in a product of copies of the Sierpinski space.
We consider `Prop` with the Sierpinski topology. If `X` is a topological space, there is a
continuous map `productOfMemOpens` from `X` to `Opens X β Prop` which is the product of the maps
`X β Prop` given by `x β¦ x β u`.
The map `productOfMemOpens` is always inducing. Whenever `X` is T0, `productOfMemOpens` is
also injective and therefore an embedding.
-/
noncomputable section
namespace TopologicalSpace
theorem eq_induced_by_maps_to_sierpinski (X : Type _) [t : TopologicalSpace X] :
t = β¨
u : Opens X, sierpinskiSpace.induced (Β· β u) := by
apply le_antisymm
Β· rw [le_infα΅’_iff]
exact fun u => Continuous.le_induced (isOpen_iff_continuous_mem.mp u.2)
Β· intro u h
rw [β generateFrom_unionα΅’_isOpen]
apply isOpen_generateFrom_of_mem
simp only [Set.mem_unionα΅’, Set.mem_setOf_eq, isOpen_induced_iff]
exact β¨β¨u, hβ©, {True}, isOpen_singleton_true, by simp [Set.preimage]β©
#align topological_space.eq_induced_by_maps_to_sierpinski TopologicalSpace.eq_induced_by_maps_to_sierpinski
variable (X : Type _) [TopologicalSpace X]
/-- The continuous map from `X` to the product of copies of the Sierpinski space, (one copy for each
open subset `u` of `X`). The `u` coordinate of `productOfMemOpens x` is given by `x β u`.
-/
def productOfMemOpens : C(X, Opens X β Prop) where
toFun x u := x β u
continuous_toFun := continuous_pi_iff.2 fun u => continuous_Prop.2 u.isOpen
#align topological_space.product_of_mem_opens TopologicalSpace.productOfMemOpens
theorem productOfMemOpens_inducing : Inducing (productOfMemOpens X) := by
convert inducing_infα΅’_to_pi fun (u : Opens X) (x : X) => x β u
apply eq_induced_by_maps_to_sierpinski
#align topological_space.product_of_mem_opens_inducing TopologicalSpace.productOfMemOpens_inducing
theorem productOfMemOpens_injective [T0Space X] : Function.Injective (productOfMemOpens X) := by
intro x1 x2 h
apply Inseparable.eq
rw [β Inducing.inseparable_iff (productOfMemOpens_inducing X), h]
#align topological_space.product_of_mem_opens_injective TopologicalSpace.productOfMemOpens_injective
theorem productOfMemOpens_embedding [T0Space X] : Embedding (productOfMemOpens X) :=
Embedding.mk (productOfMemOpens_inducing X) (productOfMemOpens_injective X)
#align topological_space.product_of_mem_opens_embedding TopologicalSpace.productOfMemOpens_embedding
end TopologicalSpace
|
lemma normalize_monom [simp]: "normalize (monom a n) = monom (normalize a) n" |
using Smoothing
using Test
@test Smoothing.binomial([1.0,2.0,3.0,4.0,5.0], 1) == [1.0,2.0,3.0,4.0,5.0]
@test Smoothing.binomial([1.0,2.0,3.0,4.0,5.0], 3) == [1.0,2.0,3.0,4.0,5.0]
println("Passed Smoothing of counting numbers tests (1 and 3 pass)")
@test Smoothing.binomial([0.0,2.0,4.0,6.0,4.0, 2.0, 0.0], 1) == [0.0, 2.0, 4.0, 5.0, 4.0, 2.0, 0.0]
println("Passed Smoothing of manually created triangle pulse")
println("3 Tests passed!")
|
/-
Copyright (c) 2014 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Author: Leonardo de Moura
Casts and heterogeneous equality. See also init.datatypes and init.logic.
-/
import logic.eq logic.quantifiers
open eq.ops
namespace heq
universe variable u
variables {A B C : Type.{u}} {a a' : A} {b b' : B} {c : C}
theorem drec_on {C : Ξ {B : Type} (b : B), a == b β Type} (Hβ : a == b) (Hβ : C a (refl a)) :
C b Hβ :=
heq.rec (Ξ» Hβ : a == a, show C a Hβ, from Hβ) Hβ Hβ
theorem to_cast_eq (H : a == b) : cast (type_eq_of_heq H) a = b :=
drec_on H !cast_eq
end heq
section
universe variables u v
variables {A A' B C : Type.{u}} {P P' : A β Type.{v}} {a a' : A} {b : B}
theorem hcongr_fun {f : Ξ x, P x} {f' : Ξ x, P' x} (a : A) (Hβ : f == f') (Hβ : P = P') :
f a == f' a :=
begin
cases Hβ, cases Hβ, reflexivity
end
theorem hcongr {P' : A' β Type} {f : Ξ a, P a} {f' : Ξ a', P' a'} {a : A} {a' : A'}
(Hf : f == f') (HP : P == P') (Ha : a == a') : f a == f' a' :=
begin
cases Ha, cases HP, cases Hf, reflexivity
end
theorem hcongr_arg (f : Ξ x, P x) {a b : A} (H : a = b) : f a == f b :=
H βΈ (heq.refl (f a))
end
section
variables {A : Type} {B : A β Type} {C : Ξ a, B a β Type} {D : Ξ a b, C a b β Type}
variables {a a' : A} {b : B a} {b' : B a'} {c : C a b} {c' : C a' b'}
theorem hcongr_arg2 (f : Ξ a b, C a b) (Ha : a = a') (Hb : b == b') : f a b == f a' b' :=
hcongr (hcongr_arg f Ha) (hcongr_arg C Ha) Hb
theorem hcongr_arg3 (f : Ξ a b c, D a b c) (Ha : a = a') (Hb : b == b') (Hc : c == c')
: f a b c == f a' b' c' :=
hcongr (hcongr_arg2 f Ha Hb) (hcongr_arg2 D Ha Hb) Hc
end
section
universe variables u v
variables {A A' B C : Type.{u}} {P P' : A β Type.{v}} {a a' : A} {b : B}
-- should Hβ be explicit (useful in e.g. hproof_irrel)
theorem eq_rec_to_heq {Hβ : a = a'} {p : P a} {p' : P a'} (Hβ : eq.rec_on Hβ p = p') : p == p' :=
by subst Hβ; subst Hβ
theorem cast_to_heq {Hβ : A = B} (Hβ : cast Hβ a = b) : a == b :=
eq_rec_to_heq Hβ
theorem hproof_irrel {a b : Prop} (H : a = b) (Hβ : a) (Hβ : b) : Hβ == Hβ :=
eq_rec_to_heq (proof_irrel (cast H Hβ) Hβ)
--TODO: generalize to eq.rec. This is a special case of rec_on_comp in eq.lean
theorem cast_trans (Hab : A = B) (Hbc : B = C) (a : A) :
cast Hbc (cast Hab a) = cast (Hab β¬ Hbc) a :=
by subst Hab
theorem pi_eq (H : P = P') : (Ξ x, P x) = (Ξ x, P' x) :=
by subst H
theorem rec_on_app (H : P = P') (f : Ξ x, P x) (a : A) : eq.rec_on H f a == f a :=
by subst H
theorem rec_on_pull (H : P = P') (f : Ξ x, P x) (a : A) :
eq.rec_on H f a = eq.rec_on (congr_fun H a) (f a) :=
eq_of_heq (calc
eq.rec_on H f a == f a : rec_on_app H f a
... == eq.rec_on (congr_fun H a) (f a) : heq.symm (eq_rec_heq (congr_fun H a) (f a)))
theorem cast_app (H : P = P') (f : Ξ x, P x) (a : A) : cast (pi_eq H) f a == f a :=
by subst H
end
-- function extensionality wrt heterogeneous equality
theorem hfunext {A : Type} {B : A β Type} {B' : A β Type} {f : Ξ x, B x} {g : Ξ x, B' x}
(H : β a, f a == g a) : f == g :=
cast_to_heq (funext (Ξ» a, eq_of_heq (heq.trans (cast_app (funext (Ξ» x, type_eq_of_heq (H x))) f a) (H a))))
section
variables {A : Type} {B : A β Type} {C : Ξ a, B a β Type} {D : Ξ a b, C a b β Type}
{E : Ξ a b c, D a b c β Type} {F : Type}
variables {a a' : A}
{b : B a} {b' : B a'}
{c : C a b} {c' : C a' b'}
{d : D a b c} {d' : D a' b' c'}
theorem hcongr_arg4 (f : Ξ a b c d, E a b c d)
(Ha : a = a') (Hb : b == b') (Hc : c == c') (Hd : d == d') : f a b c d == f a' b' c' d' :=
hcongr (hcongr_arg3 f Ha Hb Hc) (hcongr_arg3 E Ha Hb Hc) Hd
theorem dcongr_arg2 (f : Ξ a, B a β F) (Ha : a = a') (Hb : eq.rec_on Ha b = b')
: f a b = f a' b' :=
eq_of_heq (hcongr_arg2 f Ha (eq_rec_to_heq Hb))
theorem dcongr_arg3 (f : Ξ a b, C a b β F) (Ha : a = a') (Hb : eq.rec_on Ha b = b')
(Hc : cast (dcongr_arg2 C Ha Hb) c = c') : f a b c = f a' b' c' :=
eq_of_heq (hcongr_arg3 f Ha (eq_rec_to_heq Hb) (eq_rec_to_heq Hc))
theorem dcongr_arg4 (f : Ξ a b c, D a b c β F) (Ha : a = a') (Hb : eq.rec_on Ha b = b')
(Hc : cast (dcongr_arg2 C Ha Hb) c = c')
(Hd : cast (dcongr_arg3 D Ha Hb Hc) d = d') : f a b c d = f a' b' c' d' :=
eq_of_heq (hcongr_arg4 f Ha (eq_rec_to_heq Hb) (eq_rec_to_heq Hc) (eq_rec_to_heq Hd))
-- mixed versions (we want them for example if C a' b' is a subsingleton, like a proposition.
-- Then proving eq is easier than proving heq)
theorem hdcongr_arg3 (f : Ξ a b, C a b β F) (Ha : a = a') (Hb : b == b')
(Hc : cast (eq_of_heq (hcongr_arg2 C Ha Hb)) c = c')
: f a b c = f a' b' c' :=
eq_of_heq (hcongr_arg3 f Ha Hb (eq_rec_to_heq Hc))
theorem hhdcongr_arg4 (f : Ξ a b c, D a b c β F) (Ha : a = a') (Hb : b == b')
(Hc : c == c')
(Hd : cast (dcongr_arg3 D Ha (!eq.rec_on_irrel_arg β¬ heq.to_cast_eq Hb)
(!eq.rec_on_irrel_arg β¬ heq.to_cast_eq Hc)) d = d')
: f a b c d = f a' b' c' d' :=
eq_of_heq (hcongr_arg4 f Ha Hb Hc (eq_rec_to_heq Hd))
theorem hddcongr_arg4 (f : Ξ a b c, D a b c β F) (Ha : a = a') (Hb : b == b')
(Hc : cast (eq_of_heq (hcongr_arg2 C Ha Hb)) c = c')
(Hd : cast (hdcongr_arg3 D Ha Hb Hc) d = d')
: f a b c d = f a' b' c' d' :=
eq_of_heq (hcongr_arg4 f Ha Hb (eq_rec_to_heq Hc) (eq_rec_to_heq Hd))
end
|
/-
Copyright (c) 2020 Eric Wieser. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Eric Wieser, Utensil Song
-/
import algebra.ring_quot
import linear_algebra.tensor_algebra
import linear_algebra.exterior_algebra
import linear_algebra.quadratic_form.basic
/-!
# Clifford Algebras
We construct the Clifford algebra of a module `M` over a commutative ring `R`, equipped with
a quadratic_form `Q`.
## Notation
The Clifford algebra of the `R`-module `M` equipped with a quadratic_form `Q` is denoted as
`clifford_algebra Q`.
Given a linear morphism `f : M β A` from a module `M` to another `R`-algebra `A`, such that
`cond : β m, f m * f m = algebra_map _ _ (Q m)`, there is a (unique) lift of `f` to an `R`-algebra
morphism, which is denoted `clifford_algebra.lift Q f cond`.
The canonical linear map `M β clifford_algebra Q` is denoted `clifford_algebra.ΞΉ Q`.
## Theorems
The main theorems proved ensure that `clifford_algebra Q` satisfies the universal property
of the Clifford algebra.
1. `ΞΉ_comp_lift` is the fact that the composition of `ΞΉ Q` with `lift Q f cond` agrees with `f`.
2. `lift_unique` ensures the uniqueness of `lift Q f cond` with respect to 1.
Additionally, when `Q = 0` an `alg_equiv` to the `exterior_algebra` is provided as `as_exterior`.
## Implementation details
The Clifford algebra of `M` is constructed as a quotient of the tensor algebra, as follows.
1. We define a relation `clifford_algebra.rel Q` on `tensor_algebra R M`.
This is the smallest relation which identifies squares of elements of `M` with `Q m`.
2. The Clifford algebra is the quotient of the tensor algebra by this relation.
This file is almost identical to `linear_algebra/exterior_algebra.lean`.
-/
variables {R : Type*} [comm_ring R]
variables {M : Type*} [add_comm_group M] [module R M]
variables (Q : quadratic_form R M)
variable {n : β}
namespace clifford_algebra
open tensor_algebra
/-- `rel` relates each `ΞΉ m * ΞΉ m`, for `m : M`, with `Q m`.
The Clifford algebra of `M` is defined as the quotient modulo this relation.
-/
inductive rel : tensor_algebra R M β tensor_algebra R M β Prop
| of (m : M) : rel (ΞΉ R m * ΞΉ R m) (algebra_map R _ (Q m))
end clifford_algebra
/--
The Clifford algebra of an `R`-module `M` equipped with a quadratic_form `Q`.
-/
@[derive [inhabited, ring, algebra R]]
def clifford_algebra := ring_quot (clifford_algebra.rel Q)
namespace clifford_algebra
/--
The canonical linear map `M ββ[R] clifford_algebra Q`.
-/
def ΞΉ : M ββ[R] clifford_algebra Q :=
(ring_quot.mk_alg_hom R _).to_linear_map.comp (tensor_algebra.ΞΉ R)
/-- As well as being linear, `ΞΉ Q` squares to the quadratic form -/
@[simp]
theorem ΞΉ_sq_scalar (m : M) : ΞΉ Q m * ΞΉ Q m = algebra_map R _ (Q m) :=
begin
erw [βalg_hom.map_mul, ring_quot.mk_alg_hom_rel R (rel.of m), alg_hom.commutes],
refl,
end
variables {Q} {A : Type*} [semiring A] [algebra R A]
@[simp]
theorem comp_ΞΉ_sq_scalar (g : clifford_algebra Q ββ[R] A) (m : M) :
g (ΞΉ Q m) * g (ΞΉ Q m) = algebra_map _ _ (Q m) :=
by rw [βalg_hom.map_mul, ΞΉ_sq_scalar, alg_hom.commutes]
variables (Q)
/--
Given a linear map `f : M ββ[R] A` into an `R`-algebra `A`, which satisfies the condition:
`cond : β m : M, f m * f m = Q(m)`, this is the canonical lift of `f` to a morphism of `R`-algebras
from `clifford_algebra Q` to `A`.
-/
@[simps symm_apply]
def lift :
{f : M ββ[R] A // β m, f m * f m = algebra_map _ _ (Q m)} β (clifford_algebra Q ββ[R] A) :=
{ to_fun := Ξ» f,
ring_quot.lift_alg_hom R β¨tensor_algebra.lift R (f : M ββ[R] A),
(Ξ» x y (h : rel Q x y), by
{ induction h,
rw [alg_hom.commutes, alg_hom.map_mul, tensor_algebra.lift_ΞΉ_apply, f.prop], })β©,
inv_fun := Ξ» F, β¨F.to_linear_map.comp (ΞΉ Q), Ξ» m, by rw [
linear_map.comp_apply, alg_hom.to_linear_map_apply, comp_ΞΉ_sq_scalar]β©,
left_inv := Ξ» f, by { ext,
simp only [ΞΉ, alg_hom.to_linear_map_apply, function.comp_app, linear_map.coe_comp,
subtype.coe_mk, ring_quot.lift_alg_hom_mk_alg_hom_apply,
tensor_algebra.lift_ΞΉ_apply] },
right_inv := Ξ» F, by { ext,
simp only [ΞΉ, alg_hom.comp_to_linear_map, alg_hom.to_linear_map_apply, function.comp_app,
linear_map.coe_comp, subtype.coe_mk, ring_quot.lift_alg_hom_mk_alg_hom_apply,
tensor_algebra.lift_ΞΉ_apply] } }
variables {Q}
@[simp]
theorem ΞΉ_comp_lift (f : M ββ[R] A) (cond : β m, f m * f m = algebra_map _ _ (Q m)) :
(lift Q β¨f, condβ©).to_linear_map.comp (ΞΉ Q) = f :=
(subtype.mk_eq_mk.mp $ (lift Q).symm_apply_apply β¨f, condβ©)
@[simp]
theorem lift_ΞΉ_apply (f : M ββ[R] A) (cond : β m, f m * f m = algebra_map _ _ (Q m)) (x) :
lift Q β¨f, condβ© (ΞΉ Q x) = f x :=
(linear_map.ext_iff.mp $ ΞΉ_comp_lift f cond) x
@[simp]
theorem lift_unique (f : M ββ[R] A) (cond : β m : M, f m * f m = algebra_map _ _ (Q m))
(g : clifford_algebra Q ββ[R] A) :
g.to_linear_map.comp (ΞΉ Q) = f β g = lift Q β¨f, condβ© :=
begin
convert (lift Q).symm_apply_eq,
rw lift_symm_apply,
simp only,
end
attribute [irreducible] clifford_algebra ΞΉ lift
@[simp]
theorem lift_comp_ΞΉ (g : clifford_algebra Q ββ[R] A) :
lift Q β¨g.to_linear_map.comp (ΞΉ Q), comp_ΞΉ_sq_scalar _β© = g :=
begin
convert (lift Q).apply_symm_apply g,
rw lift_symm_apply,
refl,
end
/-- See note [partially-applied ext lemmas]. -/
@[ext]
theorem hom_ext {A : Type*} [semiring A] [algebra R A] {f g : clifford_algebra Q ββ[R] A} :
f.to_linear_map.comp (ΞΉ Q) = g.to_linear_map.comp (ΞΉ Q) β f = g :=
begin
intro h,
apply (lift Q).symm.injective,
rw [lift_symm_apply, lift_symm_apply],
simp only [h],
end
/-- If `C` holds for the `algebra_map` of `r : R` into `clifford_algebra Q`, the `ΞΉ` of `x : M`,
and is preserved under addition and muliplication, then it holds for all of `clifford_algebra Q`.
-/
-- This proof closely follows `tensor_algebra.induction`
@[elab_as_eliminator]
lemma induction {C : clifford_algebra Q β Prop}
(h_grade0 : β r, C (algebra_map R (clifford_algebra Q) r))
(h_grade1 : β x, C (ΞΉ Q x))
(h_mul : β a b, C a β C b β C (a * b))
(h_add : β a b, C a β C b β C (a + b))
(a : clifford_algebra Q) :
C a :=
begin
-- the arguments are enough to construct a subalgebra, and a mapping into it from M
let s : subalgebra R (clifford_algebra Q) :=
{ carrier := C,
mul_mem' := h_mul,
add_mem' := h_add,
algebra_map_mem' := h_grade0, },
let of : { f : M ββ[R] s // β m, f m * f m = algebra_map _ _ (Q m) } :=
β¨(ΞΉ Q).cod_restrict s.to_submodule h_grade1,
Ξ» m, subtype.eq $ ΞΉ_sq_scalar Q m β©,
-- the mapping through the subalgebra is the identity
have of_id : alg_hom.id R (clifford_algebra Q) = s.val.comp (lift Q of),
{ ext,
simp [of], },
-- finding a proof is finding an element of the subalgebra
convert subtype.prop (lift Q of a),
exact alg_hom.congr_fun of_id a,
end
/-- A Clifford algebra with a zero quadratic form is isomorphic to an `exterior_algebra` -/
def as_exterior : clifford_algebra (0 : quadratic_form R M) ββ[R] exterior_algebra R M :=
alg_equiv.of_alg_hom
(clifford_algebra.lift 0 β¨(exterior_algebra.ΞΉ R),
by simp only [forall_const, ring_hom.map_zero,
exterior_algebra.ΞΉ_sq_zero, quadratic_form.zero_apply]β©)
(exterior_algebra.lift R β¨(ΞΉ (0 : quadratic_form R M)),
by simp only [forall_const, ring_hom.map_zero,
quadratic_form.zero_apply, ΞΉ_sq_scalar]β©)
(exterior_algebra.hom_ext $ linear_map.ext $
by simp only [alg_hom.comp_to_linear_map, linear_map.coe_comp,
function.comp_app, alg_hom.to_linear_map_apply,
exterior_algebra.lift_ΞΉ_apply, clifford_algebra.lift_ΞΉ_apply,
alg_hom.to_linear_map_id, linear_map.id_comp, eq_self_iff_true, forall_const])
(clifford_algebra.hom_ext $ linear_map.ext $
by simp only [alg_hom.comp_to_linear_map, linear_map.coe_comp,
function.comp_app, alg_hom.to_linear_map_apply,
clifford_algebra.lift_ΞΉ_apply, exterior_algebra.lift_ΞΉ_apply,
alg_hom.to_linear_map_id, linear_map.id_comp, eq_self_iff_true, forall_const])
/-- The symmetric product of vectors is a scalar -/
lemma ΞΉ_mul_ΞΉ_add_swap (a b : M) :
ΞΉ Q a * ΞΉ Q b + ΞΉ Q b * ΞΉ Q a = algebra_map R _ (quadratic_form.polar Q a b) :=
calc ΞΉ Q a * ΞΉ Q b + ΞΉ Q b * ΞΉ Q a
= ΞΉ Q (a + b) * ΞΉ Q (a + b) - ΞΉ Q a * ΞΉ Q a - ΞΉ Q b * ΞΉ Q b :
by { rw [(ΞΉ Q).map_add, mul_add, add_mul, add_mul], abel, }
... = algebra_map R _ (Q (a + b)) - algebra_map R _ (Q a) - algebra_map R _ (Q b) :
by rw [ΞΉ_sq_scalar, ΞΉ_sq_scalar, ΞΉ_sq_scalar]
... = algebra_map R _ (Q (a + b) - Q a - Q b) :
by rw [βring_hom.map_sub, βring_hom.map_sub]
... = algebra_map R _ (quadratic_form.polar Q a b) : rfl
section map
variables {Mβ Mβ Mβ : Type*}
variables [add_comm_group Mβ] [add_comm_group Mβ] [add_comm_group Mβ]
variables [module R Mβ] [module R Mβ] [module R Mβ]
variables (Qβ : quadratic_form R Mβ) (Qβ : quadratic_form R Mβ) (Qβ : quadratic_form R Mβ)
/-- Any linear map that preserves the quadratic form lifts to an `alg_hom` between algebras.
See `clifford_algebra.equiv_of_isometry` for the case when `f` is a `quadratic_form.isometry`. -/
def map (f : Mβ ββ[R] Mβ) (hf : β m, Qβ (f m) = Qβ m) :
clifford_algebra Qβ ββ[R] clifford_algebra Qβ :=
clifford_algebra.lift Qβ β¨(clifford_algebra.ΞΉ Qβ).comp f,
Ξ» m, (ΞΉ_sq_scalar _ _).trans $ ring_hom.congr_arg _ $ hf mβ©
@[simp]
lemma map_comp_ΞΉ (f : Mβ ββ[R] Mβ) (hf) :
(map Qβ Qβ f hf).to_linear_map.comp (ΞΉ Qβ) = (ΞΉ Qβ).comp f :=
ΞΉ_comp_lift _ _
@[simp]
lemma map_apply_ΞΉ (f : Mβ ββ[R] Mβ) (hf) (m : Mβ):
map Qβ Qβ f hf (ΞΉ Qβ m) = ΞΉ Qβ (f m) :=
lift_ΞΉ_apply _ _ m
@[simp]
lemma map_id :
map Qβ Qβ (linear_map.id : Mβ ββ[R] Mβ) (Ξ» m, rfl) = alg_hom.id R (clifford_algebra Qβ) :=
by { ext m, exact map_apply_ΞΉ _ _ _ _ m }
@[simp]
lemma map_comp_map (f : Mβ ββ[R] Mβ) (hf) (g : Mβ ββ[R] Mβ) (hg) :
(map Qβ Qβ f hf).comp (map Qβ Qβ g hg) = map Qβ Qβ (f.comp g) (Ξ» m, (hf _).trans $ hg m) :=
begin
ext m,
dsimp only [linear_map.comp_apply, alg_hom.comp_apply, alg_hom.to_linear_map_apply,
alg_hom.id_apply],
rw [map_apply_ΞΉ, map_apply_ΞΉ, map_apply_ΞΉ, linear_map.comp_apply],
end
variables {Qβ Qβ Qβ}
/-- Two `clifford_algebra`s are equivalent as algebras if their quadratic forms are
equivalent. -/
@[simps apply]
def equiv_of_isometry (e : Qβ.isometry Qβ) :
clifford_algebra Qβ ββ[R] clifford_algebra Qβ :=
alg_equiv.of_alg_hom
(map Qβ Qβ e e.map_app)
(map Qβ Qβ e.symm e.symm.map_app)
((map_comp_map _ _ _ _ _ _ _).trans $ begin
convert map_id _ using 2,
ext m,
exact e.to_linear_equiv.apply_symm_apply m,
end)
((map_comp_map _ _ _ _ _ _ _).trans $ begin
convert map_id _ using 2,
ext m,
exact e.to_linear_equiv.symm_apply_apply m,
end)
@[simp]
lemma equiv_of_isometry_symm (e : Qβ.isometry Qβ) :
(equiv_of_isometry e).symm = equiv_of_isometry e.symm := rfl
@[simp]
lemma equiv_of_isometry_trans (eββ : Qβ.isometry Qβ) (eββ : Qβ.isometry Qβ) :
(equiv_of_isometry eββ).trans (equiv_of_isometry eββ) = equiv_of_isometry (eββ.trans eββ) :=
by { ext x, exact alg_hom.congr_fun (map_comp_map Qβ Qβ Qβ _ _ _ _) x }
@[simp]
lemma equiv_of_isometry_refl :
(equiv_of_isometry $ quadratic_form.isometry.refl Qβ) = alg_equiv.refl :=
by { ext x, exact alg_hom.congr_fun (map_id Qβ) x }
end map
end clifford_algebra
namespace tensor_algebra
variables {Q}
/-- The canonical image of the `tensor_algebra` in the `clifford_algebra`, which maps
`tensor_algebra.ΞΉ R x` to `clifford_algebra.ΞΉ Q x`. -/
def to_clifford : tensor_algebra R M ββ[R] clifford_algebra Q :=
tensor_algebra.lift R (clifford_algebra.ΞΉ Q)
@[simp] lemma to_clifford_ΞΉ (m : M) : (tensor_algebra.ΞΉ R m).to_clifford = clifford_algebra.ΞΉ Q m :=
by simp [to_clifford]
end tensor_algebra
|
Spectacular, lakeview, immaculate remodeled second floor 3 bedroom 2 bath condo, enclosed patio, remodeled eat in kitchen, across from community pool. BUYER MUST PAY ONE TIME CONTRIBUTION FEE OF $965.00 TO ASSOCIATION AT CLOSING.
This home is located in ASPEN GLEN CONDO 55+ Active Adult Community. As an Active Adult Community Specialist for over 16 years, I know these communities very well. I am a real estate agent who cares. I don't just want to sell you a house, I want to sell you the perfect home that will fit your needs as well as your lifestyle. I live and play in an active adult community and the lifestyle is wonderful. Country Club living without the country club fees. To All of my baby boomers not quite 55 years old who want to live in a 55 plus community, please call me for a list of Active Adult Communities that will allow you to live in some of these wonderful communities! Please call me with any questions that you may have at (561)-306-7430. |
# BitVector
# ---------
# This is slow, but simple and valuable as a reference implementation.
function rank1(b::BitVector, i::Int)
n = 0
for iβ² in 1:i
n += b[iβ²]
end
return n
end
|
import implementation.model.predicate
import implementation.model.sys_state
import implementation.spec.main
import implementation.proof.misc
-- This file contains proofs about proposers (or what can be associated with
-- them in our co-located Paxos implementation).
--
-- NOTE(gnanabit): comments are omitted for facts that are "obvious" or not
-- particularly revealing about paxos.
variables {pid_t : Type} [linear_order pid_t] [fintype pid_t] {value_t : Type}
{is_quorum : finset pid_t β Prop} [decidable_pred is_quorum]
[quorum_assumption is_quorum] {vals : pid_t β value_t}
-- The ballot b has been proposed with value v if there is a server who has sent
-- a p2a with this proposal.
def proposed
(s : sys_state pid_t (server pid_t value_t is_quorum vals) (message pid_t value_t))
(b : ballot pid_t) (v : value_t) :=
β (proposer : pid_t) (e β s.network proposer),
(envelope.msg e) = message.p2a {bal := b, val := v}
lemma proposed_stable (b : ballot pid_t) (v : value_t) : predicate.stable
(Ξ» (s : sys_state pid_t (server pid_t value_t is_quorum vals) (message pid_t value_t)),
proposed s b v) :=
begin
intros u w holds_at_u u_pn_w,
rcases holds_at_u with β¨proposer, proposal_env, sent_by_proposer, is_proposalβ©,
exact β¨proposer, proposal_env, sys_state.ntwk_subset sent_by_proposer u_pn_w, is_proposalβ©,
end
def proposed_by
(s : sys_state pid_t (server pid_t value_t is_quorum vals) (message pid_t value_t))
(b : ballot pid_t) (proposer : pid_t) :=
β (v : value_t) (e β s.network proposer),
(envelope.msg e) = message.p2a {bal := b, val := v}
lemma none_proposed_at_init
(s : sys_state pid_t (server pid_t value_t is_quorum vals) (message pid_t value_t))
(hs : s.is_initial) : β (b : ballot pid_t) (p : pid_t), Β¬proposed_by s b p :=
begin
intros b p,
rintros β¨v, e, he, e_is_proposalβ©,
specialize hs p,
unfold protocol.init at hs,
injection hs with unused key, clear unused,
rw β key at he,
cases he,
{ rw he at e_is_proposal, injection e_is_proposal },
rw set.mem_singleton_iff at he,
rw he at e_is_proposal,
injection e_is_proposal
end
-- If a proposer p proposed ballot b, then p is the address of b.
lemma proposer_is_ballot_address : predicate.invariant
(Ξ» (s : sys_state pid_t (server pid_t value_t is_quorum vals) (message pid_t value_t)),
β (b : ballot pid_t) (p : pid_t), proposed_by s b p β b.address = p) :=
begin
suffices key : predicate.inductive_invariant
(Ξ» (s : sys_state pid_t (server pid_t value_t is_quorum vals) (message pid_t value_t)),
β (b : ballot pid_t) (p : pid_t), proposed_by s b p β b.address = p),
by { exact predicate.ind_inv_is_inv key },
split,
{ intros s hs b p wrong,
exact (none_proposed_at_init s hs b p wrong).elim },
intros u w hu,
rintros β¨receiver, sender, e, _h1, _h2, _h3, w_receiver, _h4, w_not_receiverβ©,
clear_,
unfold proposed_by,
intros b p,
cases decidable.em (p = receiver),
swap,
{ rw w_not_receiver p h,
intro hyp,
exact hu b p hyp },
cases e with msg __;
unfold envelope.msg at w_receiver,
clear __,
rintros β¨v, e, he, e_is_proposalβ©,
rw h at he,
rw w_receiver at he,
cases he,
{ apply hu,
rw h,
unfold proposed_by,
use [v, e, he, e_is_proposal] },
rw h,
rcases p2a_emitted e_is_proposal he with β¨p_or, __, receiver_ballot_address, __, __, e_isβ©,
clear_,
rw e_is at e_is_proposal,
injection e_is_proposal with proposals_match,
injection proposals_match with ballots_match,
rw ballots_match at receiver_ballot_address,
exact receiver_ballot_address
end
private lemma proposer_sends_to_all_but_self : predicate.invariant
(Ξ» (s : sys_state pid_t (server pid_t value_t is_quorum vals) (message pid_t value_t)),
β (b : ballot pid_t) (proposer : pid_t) (e β s.network proposer)
(v : value_t),
(envelope.msg e) = message.p2a {bal := b, val := v} β
e.sent_to = target.exclude proposer) :=
begin
suffices key : predicate.inductive_invariant
(Ξ» (s : sys_state pid_t (server pid_t value_t is_quorum vals) (message pid_t value_t)),
β (b : ballot pid_t) (proposer : pid_t) (e β s.network proposer)
(v : value_t),
(envelope.msg e) = message.p2a {bal := b, val := v} β
e.sent_to = target.exclude proposer),
by { exact predicate.ind_inv_is_inv key },
split,
{ intros s hs b proposer e he v e_msg_eq,
exact (none_proposed_at_init s hs b proposer β¨v, e, he, e_msg_eqβ©).elim },
intros u w hu u_pn_w b proposer e he v e_msg_eq,
rcases u_pn_w with β¨receiver, sender, e', he', deliverable, proc_change, ntwk_change, rest_sameβ©,
cases decidable.em (proposer = receiver),
swap,
{ rw rest_same.right proposer h at he,
exact hu b proposer e he v e_msg_eq },
clear rest_same proc_change,
rw h at he β’,
clear h proposer,
rw ntwk_change at he, clear ntwk_change,
cases he,
{ exact hu b receiver e he v e_msg_eq },
rcases p2a_emitted e_msg_eq he with β¨_, _, _, _, _, keyβ©,
rw key
end
-- If a process p proposed ballot b, then p's current ballot is at least as
-- large as b.
lemma proposer_ballot_ge : predicate.invariant
(Ξ» (s : sys_state pid_t (server pid_t value_t is_quorum vals) (message pid_t value_t)),
β (b : ballot pid_t) (p : pid_t), proposed_by s b p β b β€ (s.procs p).curr) :=
begin
suffices key : predicate.inductive_invariant
(Ξ» (s : sys_state pid_t (server pid_t value_t is_quorum vals) (message pid_t value_t)),
β (b : ballot pid_t) (p : pid_t), proposed_by s b p β b β€ (s.procs p).curr),
by { exact predicate.ind_inv_is_inv key },
split,
{ intros s hs b p wrong,
exact (none_proposed_at_init s hs b p wrong).elim },
intros u w inv_u u_pn_w,
intros b p,
have ballot_le := ballot_nondecreasing p u_pn_w,
rcases u_pn_w with β¨receiver, sender, e, he, deliverable, proc_change, ntwk_change, proc_same, ntwk_sameβ©,
rintros β¨v, e_del, he_del, e_del_is_voteβ©,
cases decidable.em (p = receiver),
swap,
{ rw ntwk_same p h at he_del,
unfold proposed_by at inv_u,
calc b β€ (u.procs p).curr : inv_u b p β¨v, e_del, he_del, e_del_is_voteβ©
... β€ (w.procs p).curr : ballot_le },
rw h at he_del ballot_le β’,
clear h p,
rw ntwk_change at he_del,
cases he_del,
{ calc b β€ (u.procs receiver).curr : inv_u b receiver β¨v, e_del, he_del, e_del_is_voteβ©
... β€ (w.procs receiver).curr : ballot_le },
rcases p2a_emitted e_del_is_vote he_del with β¨p_or, _, _, _, _, keyβ©,
rw key at e_del_is_vote,
injection e_del_is_vote with contents_same,
injection contents_same with ballots_same,
calc b = (u.procs receiver).curr : eq.symm ballots_same
... β€ (w.procs receiver).curr : ballot_le
end
private lemma not_proposed_without_quorum : predicate.invariant
(Ξ» (s : sys_state pid_t (server pid_t value_t is_quorum vals) (message pid_t value_t)),
β (p : pid_t), Β¬is_quorum (s.procs p).followers β Β¬proposed_by s (s.procs p).curr p) :=
begin
rw predicate.use_any_invariant,
split,
{ intros s hs p unimportant wrong,
exact (none_proposed_at_init s hs (s.procs p).curr p wrong).elim },
intros u w u_reachable not_proposed_wo_quorum_at_u,
rintros β¨receiver, sender, e, he, deliverable, proc_change, ntwk_change, rest_sameβ©,
intro w_reachable,
have ballot_le := ballot_nondecreasing receiver
β¨receiver, sender, e, he, deliverable, proc_change, ntwk_change, rest_sameβ©,
have proposed_le_at_u := proposer_ballot_ge u u_reachable,
intro p,
cases decidable.em (p = receiver),
swap,
{ rw rest_same.left p h,
unfold proposed_by,
rw rest_same.right p h,
exact not_proposed_wo_quorum_at_u p },
clear rest_same,
rw h,
clear h p,
intro hyp,
rintros β¨v, e', he', e'_msg_eqβ©,
rw ntwk_change at he', clear ntwk_change,
rw le_iff_lt_or_eq at ballot_le,
cases ballot_le with ballot_lt ballot_eq,
{ have key : Β¬proposed_by u (w.procs receiver).curr receiver,
by { intro wrong,
exact not_lt_of_ge (proposed_le_at_u (w.procs receiver).curr receiver wrong) ballot_lt },
cases he',
{ exact key β¨v, e', he', e'_msg_eqβ© },
clear key,
have key := network_change receiver (u.procs receiver) e.msg sender e' he',
cases e.msg,
case p1a : mb {
cases key;
rw key.right at e'_msg_eq;
injection e'_msg_eq
},
case p1b : mb p_or {
cases key.right.right.right.right with e'_eq e'_eq,
{ clear key,
rw e'_eq at e'_msg_eq,
injection e'_msg_eq with props_same,
injection props_same with ballots_same,
exact ne_of_lt ballot_lt ballots_same },
rw e'_eq at e'_msg_eq,
injection e'_msg_eq
},
case p2a : p {
cases key;
rw key.right at e'_msg_eq;
injection e'_msg_eq
},
case p2b : mb acc {
exact key.elim
},
case preempt : {
rw key.right at e'_msg_eq,
injection e'_msg_eq
}
},
rw β ballot_eq at e'_msg_eq,
cases he',
{ suffices cond : Β¬is_quorum (u.procs receiver).followers,
by {
apply not_proposed_wo_quorum_at_u receiver cond,
exact β¨v, e', he', e'_msg_eqβ© },
have key := state_change receiver (u.procs receiver) e.msg sender,
cases key,
{ rw key at proc_change,
rw proc_change at hyp,
exact hyp },
cases e,
cases e_msg,
case p1a : b {
cases key with ballot_increased made_larger,
rw made_larger at proc_change,
rw proc_change at ballot_eq,
exact (ne_of_lt ballot_increased ballot_eq).elim,
},
case p1b : b p_or {
cases key,
{ cases key with ballot_increased made_larger,
rw made_larger at proc_change,
rw proc_change at ballot_eq,
exact (ne_of_lt ballot_increased ballot_eq).elim },
cases key;
exact key.right.right.left
},
case p2a : p {
cases key with ballot_ge made_larger,
clear ballot_ge,
have p_bal_eq : p.bal = (u.procs receiver).curr,
by {
rw made_larger at proc_change,
rw proc_change at ballot_eq,
exact eq.symm ballot_eq
},
have p_bal_addr_ne_receiver: p.bal.address β receiver,
by {
have ballot_address_is_sender : p.bal.address = sender,
by {
apply proposer_is_ballot_address u u_reachable p.bal sender,
exact β¨p.val, {msg := message.p2a p, sent_to := e_sent_to}, he,
by { cases p, refl }β©,
},
suffices key : sender β receiver,
by {
rw ballot_address_is_sender,
exact key
},
suffices key : e_sent_to = target.exclude sender,
by { rw key at deliverable, exact deliverable },
apply proposer_sends_to_all_but_self u u_reachable p.bal sender
{msg := message.p2a p, sent_to := e_sent_to} he p.val,
cases p, refl
},
have p_bal_addr_eq_receiver := proposer_is_ballot_address u u_reachable
(u.procs receiver).curr receiver β¨v, e', he', e'_msg_eqβ©,
rw β p_bal_eq at p_bal_addr_eq_receiver,
exact (p_bal_addr_ne_receiver p_bal_addr_eq_receiver).elim
},
case p2b : mb acc {
cases key with ballot_gt updated,
rw updated at proc_change,
rw proc_change at ballot_eq,
exact (ne_of_lt ballot_gt ballot_eq).elim
},
case preempt : {
rw key.right at proc_change,
clear key,
rw proc_change at ballot_eq,
exact (ne_of_lt (ballot.next_larger receiver (u.procs receiver).curr) ballot_eq).elim
}
},
rcases p2a_emitted e'_msg_eq he' with
β¨p_or, e_msg_is, receiver_still_leader, began_wo_quorum, ended_w_quorum, emitted_justβ©,
suffices key : (w.procs receiver).followers = (u.procs receiver).followers βͺ {sender},
by { rw key at hyp, exact hyp ended_w_quorum },
rw proc_change,
rw e_msg_is,
unfold protocol.handler server.handle_p1b,
rw if_neg (lt_irrefl _),
rw if_neg (not_not.mpr receiver_still_leader),
rw if_neg (lt_irrefl _),
rw if_neg (show Β¬(is_quorum (u.procs receiver).followers β¨
sender β (u.procs receiver).followers), by {
intro cond,
cases cond with cond cond,
{ exact began_wo_quorum cond },
have key : (u.procs receiver).followers βͺ {sender} = (u.procs receiver).followers,
by {
rw finset.union_eq_left_iff_subset,
rw finset.singleton_subset_iff,
exact cond
},
rw key at ended_w_quorum,
exact began_wo_quorum ended_w_quorum
}),
rw if_pos ended_w_quorum
end
-- If two proposals have the same ballot, they have the same value.
--
-- The proof is by induction using predicates we've already proven.
--
-- Nothing is proposed at the initial state.
--
-- If the fact holds at u and two values are proposed with b at some state w
-- following from u, then the proposals are issued by the same proposer. Either
-- the proposer issued both proposals in u, in which case we apply the inductive
-- hypothesis, or one proposal was issued in u and the other was just issued in
-- the step (there are two ways in which that might happen, and they're
-- symmetric), or both proposals were issued in the last step.
--
-- * In the case that one proposal was issued at u and the other was only issued
-- as a result of the last step, it should be impossible, because when a server
-- proposes a value in a step, it has no quorum before it sends the proposal,
-- and therefore it could not have sent a proposal in step u under the required
-- ballot.
--
-- * In the case that both proposals were issued in the last step, only one
-- proposal is issued at a time, so both are the same proposal.
theorem proposals_unique : predicate.invariant
(Ξ» (s : sys_state pid_t (server pid_t value_t is_quorum vals) (message pid_t value_t)),
β (b : ballot pid_t) (vβ vβ : value_t), proposed s b vβ β proposed s b vβ β vβ = vβ) :=
begin
rw predicate.use_any_invariant,
split,
{ intros s hs b v _ proposed_at_initial,
rcases proposed_at_initial with β¨proposer, e, he, e_is_proposalβ©,
exfalso, apply none_proposed_at_init s hs b proposer,
exact β¨v, e, he, e_is_proposalβ© },
intros u w u_reachable unique_at_u u_pn_w w_reachable b vβ vβ hpβ hpβ,
specialize unique_at_u b vβ vβ,
rcases u_pn_w with β¨receiver, sender, e, he, deliverable, proc_change, ntwk_change, rest_sameβ©,
cases hpβ with proposerβ hpβ,
cases hpβ with proposerβ hpβ,
have propβ_eq := proposer_is_ballot_address w w_reachable b proposerβ β¨vβ, hpββ©,
rw β propβ_eq at hpβ,
have propβ_eq := proposer_is_ballot_address w w_reachable b proposerβ β¨vβ, hpββ©,
rw β propβ_eq at hpβ,
clear propβ_eq propβ_eq proposerβ proposerβ,
cases decidable.em (b.address = receiver),
swap,
{ rw rest_same.right b.address h at hpβ hpβ,
exact unique_at_u β¨b.address, hpββ© β¨b.address, hpββ© },
clear rest_same,
rw h at hpβ hpβ, clear h,
rcases hpβ with β¨eβ, heβ, eβ_msg_eqβ©,
rcases hpβ with β¨eβ, heβ, eβ_msg_eqβ©,
rw ntwk_change at heβ heβ,
cases heβ; cases heβ,
{ exact unique_at_u β¨receiver, eβ, heβ, eβ_msg_eqβ© β¨receiver, eβ, heβ, eβ_msg_eqβ© },
{ rcases p2a_emitted eβ_msg_eq heβ with β¨_, _, _, no_quorum, _, eβ_isβ©,
have ballots_match : (u.procs receiver).curr = b,
by { rw eβ_is at eβ_msg_eq, injection eβ_msg_eq with key, injection key },
clear eβ_is eβ_msg_eq heβ eβ,
rw β ballots_match at eβ_msg_eq,
exfalso,
apply not_proposed_without_quorum u u_reachable receiver no_quorum,
exact β¨vβ, eβ, heβ, eβ_msg_eqβ© },
{ rcases p2a_emitted eβ_msg_eq heβ with β¨_, _, _, no_quorum, _, eβ_isβ©,
have ballots_match : (u.procs receiver).curr = b,
by { rw eβ_is at eβ_msg_eq, injection eβ_msg_eq with key, injection key },
clear eβ_is eβ_msg_eq heβ eβ,
rw β ballots_match at eβ_msg_eq,
exfalso,
apply not_proposed_without_quorum u u_reachable receiver no_quorum,
exact β¨vβ, eβ, heβ, eβ_msg_eqβ© },
rcases p2a_emitted eβ_msg_eq heβ with β¨pβ, e_has_pβ, __, __, __, keyββ©,
rcases p2a_emitted eβ_msg_eq heβ with β¨pβ, e_has_pβ, __, __, __, keyββ©,
clear_,
have proposals_match : pβ = pβ,
by { injection eq.trans (eq.symm e_has_pβ) e_has_pβ },
have valβ : vβ = proposal.value_or_default (proposal.merge (u.procs receiver).accepted pβ)
(vals receiver),
by {
rw keyβ at eβ_msg_eq,
injection eβ_msg_eq with fact,
injection fact with _ goal,
exact eq.symm goal },
have valβ : vβ = proposal.value_or_default (proposal.merge (u.procs receiver).accepted pβ)
(vals receiver),
by {
rw keyβ at eβ_msg_eq,
injection eβ_msg_eq with fact,
injection fact with _ goal,
exact eq.symm goal },
rw valβ,
rw valβ,
rw proposals_match
end
|
[STATEMENT]
lemma top_on_acom_simps:
"top_on_acom (SKIP {Q}) X = top_on_opt Q X"
"top_on_acom (x ::= e {Q}) X = top_on_opt Q X"
"top_on_acom (C1;;C2) X = (top_on_acom C1 X \<and> top_on_acom C2 X)"
"top_on_acom (IF b THEN {P1} C1 ELSE {P2} C2 {Q}) X =
(top_on_opt P1 X \<and> top_on_acom C1 X \<and> top_on_opt P2 X \<and> top_on_acom C2 X \<and> top_on_opt Q X)"
"top_on_acom ({I} WHILE b DO {P} C {Q}) X =
(top_on_opt I X \<and> top_on_acom C X \<and> top_on_opt P X \<and> top_on_opt Q X)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (top_on\<^sub>c (SKIP {Q}) X = top_on\<^sub>o Q X &&& top_on\<^sub>c (x ::= e {Q}) X = top_on\<^sub>o Q X) &&& top_on\<^sub>c (C1;;
C2) X = (top_on\<^sub>c C1 X \<and> top_on\<^sub>c C2 X) &&& top_on\<^sub>c (IF b THEN {P1} C1 ELSE {P2} C2
{Q}) X = (top_on\<^sub>o P1 X \<and> top_on\<^sub>c C1 X \<and> top_on\<^sub>o P2 X \<and> top_on\<^sub>c C2 X \<and> top_on\<^sub>o Q X) &&& top_on\<^sub>c ({I}
WHILE b
DO {P}
C
{Q}) X = (top_on\<^sub>o I X \<and> top_on\<^sub>c C X \<and> top_on\<^sub>o P X \<and> top_on\<^sub>o Q X)
[PROOF STEP]
by(auto simp add: top_on_acom_def) |
{-# OPTIONS --cubical-compatible #-}
module WithoutK-PatternMatchingLambdas2 where
-- Equality defined with two indices.
data _β‘_ {A : Set} : A β A β Set where
refl : β x β x β‘ x
-- The --cubical-compatible option works with pattern matching lambdas.
K : (A : Set) (x : A) (P : x β‘ x β Set) β P (refl x) β (p : x β‘ x ) β P p
K = Ξ» { A .x P pr (refl x) β pr }
|
-- Convergent sequences of analytic functions
import analysis.analytic.basic
import analysis.calculus.fderiv_analytic
import analysis.complex.cauchy_integral
import analysis.complex.re_im_topology
import data.complex.basic
import data.real.basic
import data.real.ennreal
import data.real.nnreal
import data.real.pi.bounds
import data.set.basic
import measure_theory.integral.interval_integral
import order.bounded_order
import order.filter.at_top_bot
import topology.metric_space.basic
import topology.uniform_space.uniform_convergence
import analytic
import bounds
import simple
import tactics
import topology
open complex (abs I)
open filter (at_top)
open measure_theory.measure_space (volume)
open metric (ball closed_ball sphere)
open_locale real nnreal topological_space
noncomputable theory
def power_series := formal_multilinear_series β β β
lemma cauchy_on_cball_radius {f : β β β} {z : β} {r : ββ₯0} (rp : r > 0)
(h : analytic_on β f (closed_ball z r))
: has_fpower_series_on_ball f (cauchy_power_series f z r) z r := begin
have hd : differentiable_on β f (closed_ball z r), {
intros x H, exact analytic_at.differentiable_within_at (h x H)
},
set p : power_series := cauchy_power_series f z r,
exact differentiable_on.has_fpower_series_on_ball hd rp
end
lemma analytic_on_cball_radius {f : β β β} {z : β} {r : ββ₯0} (rp : r > 0) (h : analytic_on β f (closed_ball z r))
: β p : power_series, has_fpower_series_on_ball f p z r :=
β¨cauchy_power_series f z r, cauchy_on_cball_radius rp hβ©
lemma analytic_on_small_cball {f : β β β} {z : β} {r : ββ₯0} (h : analytic_on β f (ball z r))
(s : ββ₯0) (sr : s < r) : analytic_on β f (closed_ball z s) := begin
intros x hx,
rw closed_ball at hx, simp at hx,
have hb : x β ball z r, {
rw ball, simp,
calc nndist x z β€ s : hx
... < r : sr
},
exact h x hb
end
lemma analytic_on_ball_radius {f : β β β} {z : β} {r : ββ₯0} (rp : r > 0) (h : analytic_on β f (ball z r))
: β p : power_series, has_fpower_series_on_ball f p z r := begin
have h0 := analytic_on_small_cball h (r/2) (nnreal.half_lt_self $ ne_of_gt rp),
rcases analytic_on_cball_radius (nnreal.half_pos rp) h0 with β¨p, phβ©,
set R := formal_multilinear_series.radius p,
refine β¨p, { r_le := _, r_pos := ennreal.coe_pos.mpr rp, has_sum := _ }β©, {
apply ennreal.le_of_forall_pos_nnreal_lt,
intros t tp tr,
have ht := analytic_on_small_cball h t (ennreal.coe_lt_coe.elim_left tr),
rcases analytic_on_cball_radius tp ht with β¨p',hp'β©,
have pp : p = p' := has_fpower_series_at.eq_formal_multilinear_series β¨β(r/2),phβ© β¨t,hp'β©,
rw βpp at hp',
refine hp'.r_le
}, {
intros y yr,
rw [emetric.ball, set.mem_set_of] at yr,
rcases exists_between yr with β¨t,t0,t1β©,
have ht := analytic_on_small_cball h t.to_nnreal (simple.nnreal_ennreal_coe_lt t1),
rcases analytic_on_cball_radius _ ht with β¨p',hp'β©,
have pp : p = p' := has_fpower_series_at.eq_formal_multilinear_series β¨β(r/2),phβ© β¨t.to_nnreal,hp'β©,
rw βpp at hp',
refine hp'.has_sum _,
rw [emetric.ball, set.mem_set_of],
calc edist y 0 < t : t0
... = β(t.to_nnreal) : (ennreal.coe_to_nnreal $ ne_top_of_lt t1).symm,
exact simple.to_nnreal_pos t0 t1
}
end
lemma cauchy_bound {f : β β β} {c : β} {r : ββ₯0} {d : ββ₯0} {w : β} {n : β}
(rp : r > 0) (h : β w β closed_ball c r, abs (f w) β€ d)
: abs (cauchy_power_series f c r n (Ξ» _, w)) β€ abs w ^ n * rβ»ΒΉ ^ n * d :=
begin
set wr := abs w ^ n * rβ»ΒΉ ^ n * d,
rw [cauchy_power_series_apply f c r n w],
rw [smul_eq_mul, complex.abs_mul],
generalize hg : (Ξ» z, (w / (z - c)) ^ n β’ (z - c)β»ΒΉ β’ f z) = g,
have gs : β z β sphere c r, β₯g zβ₯ β€ wr * rβ»ΒΉ, {
intro, simp, intro, rw βhg, simp, rw H,
have zb : z β closed_ball c r, simp, rw [βnnreal.coe_le_coe, coe_nndist, complex.dist_eq], linarith,
have zs := h z zb,
calc abs w ^ n / βr ^ n * (rβ»ΒΉ * abs (f z)) = abs w ^ n * rβ»ΒΉ ^ n * (rβ»ΒΉ * abs (f z))
: by rw simple.div_pow_inv (abs w ^ n) βr n
... β€ abs w ^ n * rβ»ΒΉ ^ n * (rβ»ΒΉ * d) : by bound
... = abs w ^ n * rβ»ΒΉ ^ n * d * rβ»ΒΉ : by ring
... = wr * rβ»ΒΉ : rfl
},
have cn := circle_integral.norm_integral_le_of_norm_le_const (nnreal.coe_nonneg r) gs,
rw complex.norm_eq_abs at cn,
simp at hg cn β’,
have p3 : |Ο| = Ο := abs_eq_self.elim_right (by linarith [real.pi_gt_three]),
calc |Ο|β»ΒΉ * 2β»ΒΉ * abs (circle_integral g c βr) β€ |Ο|β»ΒΉ * 2β»ΒΉ * (2*Ο*r * (wr * rβ»ΒΉ)) : by bound
... = (Ο * |Ο|β»ΒΉ) * (r * rβ»ΒΉ) * wr : by ring
... = (Ο * Οβ»ΒΉ) * (r * rβ»ΒΉ) * wr : by rw p3
... = 1 * (r * rβ»ΒΉ) * wr : by rw mul_inv_cancel real.pi_ne_zero
... = 1 * 1 * wr : by rw mul_inv_cancel (nnreal.coe_ne_zero.elim_right $ ne_of_gt rp)
... = wr : by ring
end
lemma circle_integral_sub {f g : β β β} {c : β} {r : β}
(fi : circle_integrable f c r) (gi : circle_integrable g c r)
: circle_integral f c r - circle_integral g c r = circle_integral (f - g) c r := begin
rw circle_integral, generalize hf : (Ξ» ΞΈ : β, deriv (circle_map c r) ΞΈ β’ f (circle_map c r ΞΈ)) = fc,
rw circle_integral, generalize hg : (Ξ» ΞΈ : β, deriv (circle_map c r) ΞΈ β’ g (circle_map c r ΞΈ)) = gc,
rw circle_integral, generalize hfg : (Ξ» ΞΈ : β, deriv (circle_map c r) ΞΈ β’ (f-g) (circle_map c r ΞΈ)) = fgc,
have hs : fc - gc = fgc, {
rw [βhf, βhg, βhfg], apply funext, simp, intro, rw mul_sub_left_distrib,
},
rw βhs, clear hfg hs fgc, symmetry,
have fci := circle_integrable.out fi, rw hf at fci,
have gci := circle_integrable.out gi, rw hg at gci,
exact interval_integral.integral_sub fci gci
end
lemma circle_map_nz {c : β} {r : ββ₯0} {ΞΈ : β} (rp : r > 0) : circle_map c r ΞΈ - c β 0 := begin
simp, intro h, rw h at rp, simp at rp, exact rp
end
lemma cauchy_is_circle_integrable {f : β β β} {c : β} {r : ββ₯0}
(n : β) (w : β) (rp : r > 0) (h : continuous_on f (closed_ball c r))
: circle_integrable (Ξ» z, w^n / (z - c)^n * ((z - c)β»ΒΉ * f z)) c r := begin
refine continuous_on.interval_integrable _,
refine continuous_on.mul _ _,
refine continuous_on.mul continuous_on_const _,
apply continuous.continuous_on,
refine continuous.invβ (by continuity) (Ξ» x, pow_ne_zero n (circle_map_nz rp)),
refine continuous_on.mul _ _,
apply continuous.continuous_on,
refine continuous.invβ (by continuity) (Ξ» x, circle_map_nz rp),
refine continuous_on.comp h (continuous.continuous_on (by continuity)) _,
intros ΞΈ hΞΈ, exact circle_map_mem_closed_ball c (nnreal.coe_nonneg r) ΞΈ,
end
lemma cauchy_sub {f g : β β β} {c : β} {r : ββ₯0}
(n : β) (w : β) (rp : r > 0) (cf : continuous_on f (closed_ball c r)) (cg : continuous_on g (closed_ball c r))
: cauchy_power_series f c r n (Ξ» _, w) - cauchy_power_series g c r n (Ξ» _, w)
= cauchy_power_series (f - g) c r n (Ξ» _, w) :=
begin
rw [cauchy_power_series_apply f c r n w],
rw [cauchy_power_series_apply g c r n w],
rw [cauchy_power_series_apply (f-g) c r n w],
set s : β := (2 * Ο * I)β»ΒΉ,
simp,
have hfg : (Ξ» z, w^n / (z - c)^n * ((z - c)β»ΒΉ * (f - g) z))
= (Ξ» z, w^n / (z - c)^n * ((z - c)β»ΒΉ * f z))
- (Ξ» z, w^n / (z - c)^n * ((z - c)β»ΒΉ * g z)), {
apply funext, simp, intro, ring,
},
have fi := cauchy_is_circle_integrable n w rp cf,
have gi := cauchy_is_circle_integrable n w rp cg,
have cia := circle_integral_sub fi gi,
rw [βmul_sub_left_distrib, cia],
clear cia fi gi hfg cf cg rp,
have flip : (Ξ» z, w^n/(z-c)^n * ((z-c)β»ΒΉ * f z)) - (Ξ» z, w^n/(z-c)^n * ((z-c)β»ΒΉ * g z))
= (Ξ» z, w^n/(z-c)^n * ((z-c)β»ΒΉ * f z) - w^n/(z-c)^n * ((z-c)β»ΒΉ * g z)) := rfl,
rw flip, clear flip, ring_nf
end
lemma cauchy_dist {f g : β β β} {c : β} {r : ββ₯0} {d : ββ₯0}
(n : β) (w : β) (rp : r > 0) (cf : continuous_on f (closed_ball c r)) (cg : continuous_on g (closed_ball c r))
(h : β z, z β closed_ball c r β abs (f z - g z) β€ d)
: dist (cauchy_power_series f c r n (Ξ» _, w)) (cauchy_power_series g c r n (Ξ» _, w)) β€ abs w ^ n * rβ»ΒΉ ^ n * d :=
begin
rw [complex.dist_eq, cauchy_sub n w rp cf cg],
refine cauchy_bound rp _, intros z zr, simp at h zr, refine h z zr
end
-- Uniform limits of analytic functions are analytic.
theorem uniform_analytic_lim {I : Type} [lattice I] [nonempty I] {f : I β β β β} {g : β β β} {s : set β}
(o : is_open s) (h : β n, analytic_on β (f n) s) (u : tendsto_uniformly_on f g at_top s)
: analytic_on β g s := begin
intros c hc,
rcases open_has_cball o c hc with β¨r, rp, cbβ©,
have hb : β n, analytic_on β (f n) (closed_ball c r) := Ξ» n, (h n).mono cb,
set pr := Ξ» n, cauchy_power_series (f n) c r,
have hpf : β n, has_fpower_series_on_ball (f n) (pr n) c r, {
intro,
have cs := cauchy_on_cball_radius rp (hb n),
have pn : pr n = cauchy_power_series (f n) c r := rfl,
rw βpn at cs, exact cs
},
have cfs : β n, continuous_on (f n) s := Ξ» n, analytic_on.continuous_on (h n),
have cf : β n, continuous_on (f n) (closed_ball c r) := Ξ» n, continuous_on.mono (cfs n) cb,
have cg : continuous_on g (closed_ball c r)
:= continuous_on.mono (tendsto_uniformly_on.continuous_on u (filter.eventually_of_forall cfs)) cb,
clear h hb hc o cfs,
set p := cauchy_power_series g c r,
exact has_fpower_series_on_ball.analytic_at {
r_le := le_radius_cauchy_power_series g c r,
r_pos := ennreal.coe_pos.mpr rp,
has_sum := _
},
intros y yb,
have yr := yb, simp at yr,
set a := abs y / r,
have a0 : a β₯ 0 := by bound,
have a1 : a < 1 := (div_lt_one (nnreal.coe_pos.mpr rp)).mpr yr,
have a1p : 1 - a > 0 := by bound,
rw [has_sum, metric.tendsto_at_top],
intros e ep,
generalize d4 : (1-a)*(e/4) = d,
have dp : d > 0, { rw βd4, bound },
rcases filter.eventually_at_top.mp (metric.tendsto_uniformly_on_iff.mp u d dp) with β¨n,hn'β©,
set hn := hn' n, simp at hn, clear hn' u,
have dfg : dist (f n (c + y)) (g (c + y)) β€ d, {
apply le_of_lt, rw dist_comm,
refine hn (c + y) _,
apply cb,
simp, exact le_of_lt yr
},
set hs := (hpf n).has_sum yb, rw [has_sum, metric.tendsto_at_top] at hs,
rcases hs d dp with β¨N,NMβ©, clear hs,
existsi N, intros M NlM,
have dpf := le_of_lt (NM M NlM), clear NM NlM N yb,
have dppr : dist (M.sum (Ξ» (k : β), p k (Ξ» _, y))) (M.sum (Ξ» (k : β), pr n k (Ξ» _, y))) β€ e/4, {
transitivity M.sum (Ξ» (k : β), dist (p k (Ξ» _, y)) (pr n k (Ξ» _, y))),
apply dist_sum_sum_le M (Ξ» (k : β), p k (Ξ» _, y)) (Ξ» (k : β), pr n k (Ξ» _, y)),
transitivity M.sum (Ξ» k, a^k * d), {
apply finset.sum_le_sum, intros k hk,
have hak : a^k = abs y^k * rβ»ΒΉ^k, {
calc (abs y / r)^k = (abs y * rβ»ΒΉ)^k : by ring
... = abs y^k * rβ»ΒΉ^k : mul_pow _ _ _
},
rw hak,
generalize hd' : d.to_nnreal = d',
have dd : (d' : β) = d, { rw βhd', exact real.coe_to_nnreal d (le_of_lt dp) },
have hcb : β z, z β closed_ball c r β abs (g z - f n z) β€ d', {
intros z zb, exact trans (le_of_lt (hn z (cb zb))) (le_of_eq dd.symm)
},
exact trans (cauchy_dist k y rp cg (cf n) hcb) (mul_le_mul_of_nonneg_left (le_of_eq dd) (by bound))
}, {
have pgb : M.sum (Ξ» k, a^k) β€ (1-a)β»ΒΉ := partial_geometric_bound M a0 a1,
calc M.sum (Ξ» k, a^k * d) = M.sum (Ξ» k, a^k) * d : by rw βfinset.sum_mul
... β€ (1-a)β»ΒΉ * d : by bound
... = (1-a)β»ΒΉ * ((1-a) * (e/4)) : by rw βd4
... = (1-a) * (1-a)β»ΒΉ * (e/4) : by ring
... = 1 * (e/4) : by rw mul_inv_cancel (ne_of_gt a1p)
... = e/4 : by ring
}
},
generalize hMp : M.sum (Ξ» k : β, p k (Ξ» _, y)) = Mp, rw hMp at dppr,
generalize hMpr : M.sum (Ξ» k, pr n k (Ξ» _, y)) = Mpr, rw hMpr at dpf dppr,
calc dist Mp (g (c + y)) β€ dist Mp (f n (c + y)) + dist (f n (c + y)) (g (c + y)) : dist_triangle _ _ _
... β€ dist Mp Mpr + dist Mpr (f n (c + y)) + d : by bound
... β€ e/4 + d + d : by bound
... = e/4 + 2*(1-a)*(e/4) : by { rw βd4, ring }
... β€ e/4 + 2*(1-0)*(e/4) : by bound
... = (3/4)*e : by ring
... < 1*e : mul_lt_mul_of_pos_right (by norm_num) ep
... = e : by simp
end |
[STATEMENT]
theorem "\<forall>(i::int). 8 \<le> i --> (\<exists>x y. 0 \<le> x & 0 \<le> y & 3 * x + 5 * y = i)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>i\<ge>8. \<exists>x y. 0 \<le> x \<and> 0 \<le> y \<and> 3 * x + 5 * y = i
[PROOF STEP]
by presburger |
{-# LANGUAGE BangPatterns #-}
{-# LANGUAGE FlexibleContexts #-}
{-# LANGUAGE MagicHash #-}
{-# LANGUAGE TypeSynonymInstances #-}
{-# LANGUAGE UnboxedTuples #-}
{-# OPTIONS_GHC -fno-warn-orphans #-}
-- |
-- Module : Internal.Vector
-- Copyright : (c) Alberto Ruiz 2007-15
-- License : BSD3
-- Maintainer : Alberto Ruiz
-- Stability : provisional
--
module Internal.Vector(
I,Z,R,C,
fi,ti,
Vector, fromList, unsafeToForeignPtr, unsafeFromForeignPtr, unsafeWith,
createVector, avec, inlinePerformIO,
toList, dim, (@>), at', (|>),
vjoin, subVector, takesV, idxs,
buildVector,
asReal, asComplex,
toByteString,fromByteString,
zipVector, unzipVector, zipVectorWith, unzipVectorWith,
foldVector, foldVectorG, foldVectorWithIndex, foldLoop,
mapVector, mapVectorM, mapVectorM_,
mapVectorWithIndex, mapVectorWithIndexM, mapVectorWithIndexM_
) where
import Data.Complex
import Data.Int (Int64)
import Data.Vector.Storable (Vector, fromList, unsafeFromForeignPtr,
unsafeToForeignPtr, unsafeWith)
import qualified Data.Vector.Storable as Vector
import Foreign.C.Types (CInt)
import Foreign.ForeignPtr
import Foreign.Marshal.Array
import Foreign.Ptr
import Foreign.Storable
import GHC.Base (IO (IO), realWorld#, when)
import GHC.ForeignPtr (mallocPlainForeignPtrBytes)
import System.IO.Unsafe (unsafePerformIO)
import Control.Monad (replicateM)
import Data.Binary
import Data.Binary.Put
import qualified Data.ByteString.Internal as BS
import Data.Vector.Storable.Internal (updPtr)
type I = CInt
type Z = Int64
type R = Float
type C = Complex Float
-- | specialized fromIntegral
fi :: Int -> CInt
fi = fromIntegral
-- | specialized fromIntegral
ti :: CInt -> Int
ti = fromIntegral
-- | Number of elements
dim :: (Storable t) => Vector t -> Int
dim = Vector.length
{-# INLINE dim #-}
-- C-Haskell vector adapter
{-# INLINE avec #-}
avec :: Storable a => Vector a -> (f -> IO r) -> ((CInt -> Ptr a -> f) -> IO r)
avec v f g = unsafeWith v $ \ptr -> f (g (fromIntegral (Vector.length v)) ptr)
-- allocates memory for a new vector
createVector :: Storable a => Int -> IO (Vector a)
createVector n = do
when (n < 0) $ error ("trying to createVector of negative dim: "++show n)
fp <- doMalloc undefined
return $ unsafeFromForeignPtr fp 0 n
where
--
-- Use the much cheaper Haskell heap allocated storage
-- for foreign pointer space we control
--
doMalloc :: Storable b => b -> IO (ForeignPtr b)
doMalloc dummy = do
mallocPlainForeignPtrBytes (n * sizeOf dummy)
{- | creates a Vector from a list:
@> fromList [2,3,5,7]
4 |> [2.0,3.0,5.0,7.0]@
-}
safeRead :: Storable a => Vector a -> (Ptr a -> IO c) -> c
safeRead v = inlinePerformIO . unsafeWith v
{-# INLINE safeRead #-}
inlinePerformIO :: IO a -> a
inlinePerformIO (IO m) = case m realWorld# of (# _, r #) -> r
{-# INLINE inlinePerformIO #-}
{- extracts the Vector elements to a list
>>> toList (linspace 5 (1,10))
[1.0,3.25,5.5,7.75,10.0]
-}
toList :: Storable a => Vector a -> [a]
toList v = safeRead v $ peekArray (dim v)
{- | Create a vector from a list of elements and explicit dimension. The input
list is truncated if it is too long, so it may safely
be used, for instance, with infinite lists.
>>> 5 |> [1..]
[1.0,2.0,3.0,4.0,5.0]
it :: (Enum a, Num a, Foreign.Storable.Storable a) => Vector a
-}
(|>) :: (Storable a) => Int -> [a] -> Vector a
infixl 9 |>
n |> l
| length l' == n = fromList l'
| otherwise = error "list too short for |>"
where
l' = take n l
-- | Create a vector of indexes, useful for matrix extraction using '(??)'
idxs :: [Int] -> Vector I
idxs js = fromList (map fromIntegral js) :: Vector I
{- | takes a number of consecutive elements from a Vector
>>> subVector 2 3 (fromList [1..10])
[3.0,4.0,5.0]
it :: (Enum t, Num t, Foreign.Storable.Storable t) => Vector t
-}
subVector :: Storable t => Int -- ^ index of the starting element
-> Int -- ^ number of elements to extract
-> Vector t -- ^ source
-> Vector t -- ^ result
subVector = Vector.slice
{-# INLINE subVector #-}
{- | Reads a vector position:
>>> fromList [0..9] @> 7
7.0
-}
(@>) :: Storable t => Vector t -> Int -> t
infixl 9 @>
v @> n
| n >= 0 && n < dim v = at' v n
| otherwise = error "vector index out of range"
{-# INLINE (@>) #-}
-- | access to Vector elements without range checking
at' :: Storable a => Vector a -> Int -> a
at' v n = safeRead v $ flip peekElemOff n
{-# INLINE at' #-}
{- | concatenate a list of vectors
>>> vjoin [fromList [1..5::Float], konst 1 3]
[1.0,2.0,3.0,4.0,5.0,1.0,1.0,1.0]
it :: Vector Float
-}
vjoin :: Storable t => [Vector t] -> Vector t
vjoin [] = fromList []
vjoin [v] = v
vjoin as = unsafePerformIO $ do
let tot = sum (map dim as)
r <- createVector tot
unsafeWith r $ \ptr ->
joiner as tot ptr
return r
where joiner [] _ _ = return ()
joiner (v:cs) _ p = do
let n = dim v
unsafeWith v $ \pb -> copyArray p pb n
joiner cs 0 (advancePtr p n)
{- | Extract consecutive subvectors of the given sizes.
>>> takesV [3,4] (linspace 10 (1,10::Float))
[[1.0,2.0,3.0],[4.0,5.0,6.0,7.0]]
it :: [Vector Float]
-}
takesV :: Storable t => [Int] -> Vector t -> [Vector t]
takesV ms w | sum ms > dim w = error $ "takesV " ++ show ms ++ " on dim = " ++ (show $ dim w)
| otherwise = go ms w
where go [] _ = []
go (n:ns) v = subVector 0 n v
: go ns (subVector n (dim v - n) v)
---------------------------------------------------------------
-- | transforms a complex vector into a real vector with alternating real and imaginary parts
asReal :: (RealFloat a, Storable a) => Vector (Complex a) -> Vector a
asReal v = unsafeFromForeignPtr (castForeignPtr fp) (2*i) (2*n)
where (fp,i,n) = unsafeToForeignPtr v
-- | transforms a real vector into a complex vector with alternating real and imaginary parts
asComplex :: (RealFloat a, Storable a) => Vector a -> Vector (Complex a)
asComplex v = unsafeFromForeignPtr (castForeignPtr fp) (i `div` 2) (n `div` 2)
where (fp,i,n) = unsafeToForeignPtr v
--------------------------------------------------------------------------------
-- | map on Vectors
mapVector :: (Storable a, Storable b) => (a-> b) -> Vector a -> Vector b
mapVector f v = unsafePerformIO $ do
w <- createVector (dim v)
unsafeWith v $ \p ->
unsafeWith w $ \q -> do
let go (-1) = return ()
go !k = do x <- peekElemOff p k
pokeElemOff q k (f x)
go (k-1)
go (dim v -1)
return w
{-# INLINE mapVector #-}
-- | zipWith for Vectors
zipVectorWith :: (Storable a, Storable b, Storable c) => (a-> b -> c) -> Vector a -> Vector b -> Vector c
zipVectorWith f u v = unsafePerformIO $ do
let n = min (dim u) (dim v)
w <- createVector n
unsafeWith u $ \pu ->
unsafeWith v $ \pv ->
unsafeWith w $ \pw -> do
let go (-1) = return ()
go !k = do x <- peekElemOff pu k
y <- peekElemOff pv k
pokeElemOff pw k (f x y)
go (k-1)
go (n -1)
return w
{-# INLINE zipVectorWith #-}
-- | unzipWith for Vectors
unzipVectorWith :: (Storable (a,b), Storable c, Storable d)
=> ((a,b) -> (c,d)) -> Vector (a,b) -> (Vector c,Vector d)
unzipVectorWith f u = unsafePerformIO $ do
let n = dim u
v <- createVector n
w <- createVector n
unsafeWith u $ \pu ->
unsafeWith v $ \pv ->
unsafeWith w $ \pw -> do
let go (-1) = return ()
go !k = do z <- peekElemOff pu k
let (x,y) = f z
pokeElemOff pv k x
pokeElemOff pw k y
go (k-1)
go (n-1)
return (v,w)
{-# INLINE unzipVectorWith #-}
foldVector :: Storable a => (a -> b -> b) -> b -> Vector a -> b
foldVector f x v = unsafePerformIO $
unsafeWith v $ \p -> do
let go (-1) s = return s
go !k !s = do y <- peekElemOff p k
go (k-1::Int) (f y s)
go (dim v -1) x
{-# INLINE foldVector #-}
-- the zero-indexed index is passed to the folding function
foldVectorWithIndex :: Storable a => (Int -> a -> b -> b) -> b -> Vector a -> b
foldVectorWithIndex f x v = unsafePerformIO $
unsafeWith v $ \p -> do
let go (-1) s = return s
go !k !s = do y <- peekElemOff p k
go (k-1::Int) (f k y s)
go (dim v -1) x
{-# INLINE foldVectorWithIndex #-}
foldLoop :: (Int -> t -> t) -> t -> Int -> t
foldLoop f s0 d = go (d - 1) s0
where
go 0 s = f (0::Int) s
go !j !s = go (j - 1) (f j s)
foldVectorG :: Storable t1 => (Int -> (Int -> t1) -> t -> t) -> t -> Vector t1 -> t
foldVectorG f s0 v = foldLoop g s0 (dim v)
where g !k !s = f k (safeRead v . flip peekElemOff) s
{-# INLINE g #-} -- Thanks to Ryan Ingram (http://permalink.gmane.org/gmane.comp.lang.haskell.cafe/46479)
{-# INLINE foldVectorG #-}
-------------------------------------------------------------------
-- | monadic map over Vectors
-- the monad @m@ must be strict
mapVectorM :: (Storable a, Storable b, Monad m) => (a -> m b) -> Vector a -> m (Vector b)
mapVectorM f v = do
w <- return $! unsafePerformIO $! createVector (dim v)
mapVectorM' w 0 (dim v -1)
return w
where mapVectorM' w' !k !t
| k == t = do
x <- return $! inlinePerformIO $! unsafeWith v $! \p -> peekElemOff p k
y <- f x
return $! inlinePerformIO $! unsafeWith w' $! \q -> pokeElemOff q k y
| otherwise = do
x <- return $! inlinePerformIO $! unsafeWith v $! \p -> peekElemOff p k
y <- f x
_ <- return $! inlinePerformIO $! unsafeWith w' $! \q -> pokeElemOff q k y
mapVectorM' w' (k+1) t
{-# INLINE mapVectorM #-}
-- | monadic map over Vectors
mapVectorM_ :: (Storable a, Monad m) => (a -> m ()) -> Vector a -> m ()
mapVectorM_ f v = do
mapVectorM' 0 (dim v -1)
where mapVectorM' !k !t
| k == t = do
x <- return $! inlinePerformIO $! unsafeWith v $! \p -> peekElemOff p k
f x
| otherwise = do
x <- return $! inlinePerformIO $! unsafeWith v $! \p -> peekElemOff p k
_ <- f x
mapVectorM' (k+1) t
{-# INLINE mapVectorM_ #-}
-- | monadic map over Vectors with the zero-indexed index passed to the mapping function
-- the monad @m@ must be strict
mapVectorWithIndexM :: (Storable a, Storable b, Monad m) => (Int -> a -> m b) -> Vector a -> m (Vector b)
mapVectorWithIndexM f v = do
w <- return $! unsafePerformIO $! createVector (dim v)
mapVectorM' w 0 (dim v -1)
return w
where mapVectorM' w' !k !t
| k == t = do
x <- return $! inlinePerformIO $! unsafeWith v $! \p -> peekElemOff p k
y <- f k x
return $! inlinePerformIO $! unsafeWith w' $! \q -> pokeElemOff q k y
| otherwise = do
x <- return $! inlinePerformIO $! unsafeWith v $! \p -> peekElemOff p k
y <- f k x
_ <- return $! inlinePerformIO $! unsafeWith w' $! \q -> pokeElemOff q k y
mapVectorM' w' (k+1) t
{-# INLINE mapVectorWithIndexM #-}
-- | monadic map over Vectors with the zero-indexed index passed to the mapping function
mapVectorWithIndexM_ :: (Storable a, Monad m) => (Int -> a -> m ()) -> Vector a -> m ()
mapVectorWithIndexM_ f v = do
mapVectorM' 0 (dim v -1)
where mapVectorM' !k !t
| k == t = do
x <- return $! inlinePerformIO $! unsafeWith v $! \p -> peekElemOff p k
f k x
| otherwise = do
x <- return $! inlinePerformIO $! unsafeWith v $! \p -> peekElemOff p k
_ <- f k x
mapVectorM' (k+1) t
{-# INLINE mapVectorWithIndexM_ #-}
mapVectorWithIndex :: (Storable a, Storable b) => (Int -> a -> b) -> Vector a -> Vector b
--mapVectorWithIndex g = head . mapVectorWithIndexM (\a b -> [g a b])
mapVectorWithIndex f v = unsafePerformIO $ do
w <- createVector (dim v)
unsafeWith v $ \p ->
unsafeWith w $ \q -> do
let go (-1) = return ()
go !k = do x <- peekElemOff p k
pokeElemOff q k (f k x)
go (k-1)
go (dim v -1)
return w
{-# INLINE mapVectorWithIndex #-}
--------------------------------------------------------------------------------
-- a 64K cache, with a Float taking 13 bytes in Bytestring,
-- implies a chunk size of 5041
chunk :: Int
chunk = 5000
chunks :: Int -> [Int]
chunks d = let c = d `div` chunk
m = d `mod` chunk
in if m /= 0 then reverse (m:(replicate c chunk)) else (replicate c chunk)
putVector :: (Storable t, Binary t) => Vector t -> Data.Binary.Put.PutM ()
putVector v = mapM_ put $! toList v
getVector :: (Storable a, Binary a) => Int -> Get (Vector a)
getVector d = do
xs <- replicateM d get
return $! fromList xs
--------------------------------------------------------------------------------
toByteString :: Storable t => Vector t -> BS.ByteString
toByteString v = BS.PS (castForeignPtr fp) (sz*o) (sz * dim v)
where
(fp,o,_n) = unsafeToForeignPtr v
sz = sizeOf (v@>0)
fromByteString :: Storable t => BS.ByteString -> Vector t
fromByteString (BS.PS fp o n) = r
where
r = unsafeFromForeignPtr (castForeignPtr (updPtr (`plusPtr` o) fp)) 0 n'
n' = n `div` sz
sz = sizeOf (r@>0)
--------------------------------------------------------------------------------
instance (Binary a, Storable a) => Binary (Vector a) where
put v = do
let d = dim v
put d
mapM_ putVector $! takesV (chunks d) v
-- put = put . v2bs
get = do
d <- get
vs <- mapM getVector $ chunks d
return $! vjoin vs
-- get = fmap bs2v get
-------------------------------------------------------------------
{- | creates a Vector of the specified length using the supplied function to
to map the index to the value at that index.
@> buildVector 4 fromIntegral
4 |> [0.0,1.0,2.0,3.0]@
-}
buildVector :: Storable a => Int -> (Int -> a) -> Vector a
buildVector len f =
fromList $ map f [0 .. (len - 1)]
-- | zip for Vectors
zipVector :: (Storable a, Storable b, Storable (a,b)) => Vector a -> Vector b -> Vector (a,b)
zipVector = zipVectorWith (,)
-- | unzip for Vectors
unzipVector :: (Storable a, Storable b, Storable (a,b)) => Vector (a,b) -> (Vector a,Vector b)
unzipVector = unzipVectorWith id
-------------------------------------------------------------------
|
function test_mrvConvertVAnat
%Validate that mrVista conversopm of vAanatomy.data to nifti works
%
% test_mrvConvertVAnat()
%
% Tests: mrAnatConvertVAnatToT1Nifti
%
% INPUTS
% No inputs
%
% RETURNS
% No returns
%
% Example: test_mrvConvertVAnat()
%
% See also MRVTEST
%
% Copyright NYU team, mrVista, 2017
%% Convert vAnatomy.dat to nifti
% Get the vAnatomy.dat sample data file
vAnatFileName = mrtInstallSampleData('anatomy/anatomyV','vAnatomy', [], [], 'filetype', 'dat');
% Convert it to nifti
newNiftiFileName = fullfile(tempdir,'vAnatomy.nii.gz');
mrAnatConvertVAnatToT1Nifti(vAnatFileName,newNiftiFileName);
% Get a stored nifti file that corresponds to the vAnatomy
oldNifitFileName = mrtInstallSampleData('anatomy/anatomyV','t1.nii', [], [], 'filetype', 'gz');
%% Compare the nifti to some stored values
n0 = niftiRead(oldNifitFileName);
n1 = niftiRead(newNiftiFileName);
n0 = niftiApplyCannonicalXform(n0);
n1 = niftiApplyCannonicalXform(n1);
assertEqual(niftiGet(n0, 'qto_xyz'), niftiGet(n1, 'qto_xyz'));
assertEqual(niftiGet(n0, 'data'), niftiGet(n1, 'data'));
assertEqual(niftiGet(n0, 'dim'), niftiGet(n1, 'dim'));
|
There exists a countable set $D$ such that for any set $X$ satisfying $p(X)$, if $X$ is nonempty, then there exists $d \in D$ such that $d \in X$. |
# Project Primer
# Author: Zhaocheng Zhu
import io
import re
import sys
import unittest
from unittest import mock
import multiprocessing as mp
import numpy as np
import primer
from primer import performance
from primer import profile
class PerformanceTest(unittest.TestCase):
def setUp(self):
primer.config("stdout")
@mock.patch("sys.stdout", new_callable=io.StringIO)
def test_slot(self, mock_stdout):
class DictClass(object):
def __init__(self, x=1, y=2, z=3):
self.a = x
self.b = y
self.c = z
SlotClass = performance.slot(DictClass)
with profile.memory():
x = [DictClass() for _ in range(100000)]
with profile.memory():
y = [SlotClass() for _ in range(100000)]
output = mock_stdout.getvalue()
normal_memory, slot_memory = re.findall("[0-9.]+", output)
normal_memory = float(normal_memory)
slot_memory = float(slot_memory)
self.assertGreater((normal_memory - slot_memory) / normal_memory, 0.1,
"Slot doesn't reduce memory consumption")
@unittest.skipIf(sys.platform != "linux", "SharedNDArray only works on Linux")
@mock.patch("sys.stdout", new_callable=io.StringIO)
def test_shared_ndarray(self, mock_stdout):
arrays = [np.random.rand(100000) for _ in range(4)]
pool = mp.Pool(4)
shareds = [performance.SharedNDArray(a) for a in arrays]
with profile.time():
array_result = sum(pool.map(np.sum, arrays))
with profile.time():
shared_result = sum(pool.map(np.sum, shareds))
output = mock_stdout.getvalue()
array_time, shared_time = re.findall("[0-9.]+", output)
array_time = float(array_time)
shared_time = float(shared_time)
self.assertAlmostEqual(array_result, shared_result, "Incorrect result from SharedNDArray")
self.assertGreater((array_time - shared_time) / array_time, 0.1, "SharedNDArray doesn't improve speed")
if __name__ == "__main__":
unittest.main() |
namespace SciLean
-- This is like `ExactSolution` but it is intended to be used in automation.
inductive AutoExactSolution {Ξ± : Type _} : (Ξ± β Prop) β Type _ where
| exact {spec : Ξ± β Prop} (a : Ξ±) (h : spec a) : AutoExactSolution spec
def AutoImpl {Ξ±} (a : Ξ±) := AutoExactSolution Ξ» x => x = a
@[inline]
def AutoImpl.val {Ξ±} {a : Ξ±} (x : AutoImpl a) : Ξ± :=
match x with
| .exact val _ => val
def AutoImpl.finish {Ξ±} {a : Ξ±} : AutoImpl a := .exact a rfl
theorem AutoImpl.impl_eq_spec (x : AutoImpl a) : a = x.val :=
by
cases x; rename_i a' h;
simp[AutoImpl.val, val, h]
done
-- I don't think think this can be proven. Can it lead to contradiction?
axiom AutoImpl.injectivity_axiom {Ξ±} (a b : Ξ±) : (AutoImpl a = AutoImpl b) β (a = b)
-- Do we really need AutoImpl.injectivity_axiom?
@[simp] theorem AutoImpl.normalize_val {Ξ± : Type u} (a b : Ξ±) (h : (AutoImpl a = AutoImpl b))
: AutoImpl.val (Eq.mpr h (AutoImpl.finish (a:=b))) = b :=
by
have h' : a = b := by apply AutoImpl.injectivity_axiom; apply h
revert h; rw[h']
simp[val,finish,Eq.mpr]
done
-- This is a new version of `AutoImpl.normalize_val`, some tactic uses `cast` instead of `Eq.mpr` now
-- TODO: clean this up
@[simp] theorem AutoImpl.normalize_val' {Ξ± : Type u} (a b : Ξ±) (h : (AutoImpl a = AutoImpl b))
: AutoImpl.val (cast h (AutoImpl.finish (a:=a))) = a :=
by sorry
-- have h' : a = b := by apply AutoImpl.injectivity_axiom; apply h
-- revert h; rw[h']
-- simp[val,finish,Eq.mpr]
-- done
example {Ξ± : Type} (a b : Ξ±) (A : (Ξ£' x, x = a)) (h : (Ξ£' x, x = a) = (Ξ£' x, x = b))
: (a = b) β (h βΈ A).1 = A.1 :=
by
constructor
{
intro eq; rw[A.2]; conv => rhs; rw [eq]
apply (h βΈ A).2
}
{
intro eq; rw[β A.2]; rw[β eq]
apply (h βΈ A).2
}
open Lean.Parser.Tactic.Conv
-- TODO: turn `rewrite_by` to an elaborator and do not use `AutoImpl`
syntax term:max "rewrite_by" convSeq : term
macro_rules
| `($x rewrite_by $rw:convSeq) =>
`((by (conv => enter[1]; ($rw)); (apply AutoImpl.finish) : AutoImpl $x).val)
|
Formal statement is: lemma contour_integral_0 [simp]: "contour_integral g (\<lambda>x. 0) = 0" Informal statement is: The contour integral of the zero function is zero. |
# quiver
#JL Plots
Using Plots
n = 7
qf(x,y) = 1 / hypot(x, y)
x = repeat(-3:(2*3)/n:3, 1, n) |> vec
y = repeat(-3:(2*3)/n:3, 1, n)' |> vec
vx = qf.(x,y) .* cos.(atan.(y,x)) |> vec
vy = qf.(x,y) .* sin.(atan.(y,x)) |> vec
Plots.quiver(x, y, quiver=(vx, vy), aspect_ratio=:equal, c=:steelblue, linewidth=2)
|
[STATEMENT]
lemma regions_part_ex[intro]:
assumes "u \<in> V"
shows "u \<in> [u]\<^sub>\<R>" "[u]\<^sub>\<R> \<in> \<R>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. u \<in> [u]\<^sub>\<R> &&& [u]\<^sub>\<R> \<in> \<R>
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. u \<in> [u]\<^sub>\<R>
2. [u]\<^sub>\<R> \<in> \<R>
[PROOF STEP]
from assms regions_partition[OF meta_eq_to_obj_eq[OF \<R>_def]]
[PROOF STATE]
proof (chain)
picking this:
u \<in> V
\<forall>x\<in>\<X>. 0 \<le> ?u x \<Longrightarrow> \<exists>!R. R \<in> \<R> \<and> ?u \<in> R
[PROOF STEP]
have
"\<exists>!R. R \<in> \<R> \<and> u \<in> R"
[PROOF STATE]
proof (prove)
using this:
u \<in> V
\<forall>x\<in>\<X>. 0 \<le> ?u x \<Longrightarrow> \<exists>!R. R \<in> \<R> \<and> ?u \<in> R
goal (1 subgoal):
1. \<exists>!R. R \<in> \<R> \<and> u \<in> R
[PROOF STEP]
unfolding V_def
[PROOF STATE]
proof (prove)
using this:
u \<in> {v. \<forall>x\<in>\<X>. 0 \<le> v x}
\<forall>x\<in>\<X>. 0 \<le> ?u x \<Longrightarrow> \<exists>!R. R \<in> \<R> \<and> ?u \<in> R
goal (1 subgoal):
1. \<exists>!R. R \<in> \<R> \<and> u \<in> R
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<exists>!R. R \<in> \<R> \<and> u \<in> R
goal (2 subgoals):
1. u \<in> [u]\<^sub>\<R>
2. [u]\<^sub>\<R> \<in> \<R>
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<exists>!R. R \<in> \<R> \<and> u \<in> R
[PROOF STEP]
show "[u]\<^sub>\<R> \<in> \<R>" "u \<in> [u]\<^sub>\<R>"
[PROOF STATE]
proof (prove)
using this:
\<exists>!R. R \<in> \<R> \<and> u \<in> R
goal (1 subgoal):
1. [u]\<^sub>\<R> \<in> \<R> &&& u \<in> [u]\<^sub>\<R>
[PROOF STEP]
using alpha_interp.region_unique_spec
[PROOF STATE]
proof (prove)
using this:
\<exists>!R. R \<in> \<R> \<and> u \<in> R
\<lbrakk>?u \<in> ?R; ?R \<in> \<R>\<rbrakk> \<Longrightarrow> [?u]\<^sub>\<R> = ?R
goal (1 subgoal):
1. [u]\<^sub>\<R> \<in> \<R> &&& u \<in> [u]\<^sub>\<R>
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
[u]\<^sub>\<R> \<in> \<R>
u \<in> [u]\<^sub>\<R>
goal:
No subgoals!
[PROOF STEP]
qed |
State Before: R : Type u
S : Type ?u.780222
Ο : Type v
M : Type w
instββ΄ : CommRing R
instβΒ³ : CommRing S
instβΒ² : AddCommGroup M
instβΒΉ : Module R M
instβ : IsNoetherianRing R
β’ IsNoetherianRing (MvPolynomial (Fin 0) R) State After: case f
R : Type u
S : Type ?u.780222
Ο : Type v
M : Type w
instββ΄ : CommRing R
instβΒ³ : CommRing S
instβΒ² : AddCommGroup M
instβΒΉ : Module R M
instβ : IsNoetherianRing R
β’ R β+* MvPolynomial (Fin 0) R Tactic: apply isNoetherianRing_of_ringEquiv R State Before: case f
R : Type u
S : Type ?u.780222
Ο : Type v
M : Type w
instββ΄ : CommRing R
instβΒ³ : CommRing S
instβΒ² : AddCommGroup M
instβΒΉ : Module R M
instβ : IsNoetherianRing R
β’ R β+* MvPolynomial (Fin 0) R State After: case f
R : Type u
S : Type ?u.780222
Ο : Type v
M : Type w
instββ΄ : CommRing R
instβΒ³ : CommRing S
instβΒ² : AddCommGroup M
instβΒΉ : Module R M
instβ : IsNoetherianRing R
β’ MvPolynomial (Fin 0) R β+* R Tactic: symm State Before: case f
R : Type u
S : Type ?u.780222
Ο : Type v
M : Type w
instββ΄ : CommRing R
instβΒ³ : CommRing S
instβΒ² : AddCommGroup M
instβΒΉ : Module R M
instβ : IsNoetherianRing R
β’ MvPolynomial (Fin 0) R β+* R State After: no goals Tactic: apply MvPolynomial.isEmptyRingEquiv R (Fin 0) |
```
import pandas as pd
import statsmodels.api as sm
import numpy as np
from scipy.stats import logistic
import math
%pylab inline
```
Populating the interactive namespace from numpy and matplotlib
WARNING: pylab import has clobbered these variables: ['logistic']
`%matplotlib` prevents importing * from pylab and numpy
```
dob = pd.read_csv('snapshot_data/2014-09-17/property_indexes/dob-index.csv', index_col=0)
```
```
dob.fillna(value=0, inplace=True)
```
```
dob['total'] = dob.sum(axis=1)
```
```
dob.ix[1990:]['total']
```
1990 12987
1991 10692
1992 9168
1993 7059
1994 5114
1995 3019
1996 1674
1997 836
1998 471
1999 307
2000 199
2001 122
2002 100
2003 71
2004 57
2005 35
2006 28
2007 24
2008 13
2009 17
2010 23
2011 19
2012 20
2013 32
2014 7
2411 1
2426 2
Name: total, dtype: float64
```
dob['ratio'] = (dob['total'] - dob['male']) / dob['total']
```
```
dob['year'] = dob.index
dob['shift-year'] = dob['year'] - 1800
```
```
dob.ix[1800:1980]['ratio'].plot(kind='line')
```
```
logit = sm.Logit(dob.ix[1800:1990]['ratio'], dob.ix[1800:1990]['shift-year'])
```
```
result = logit.fit()
```
Optimization terminated successfully.
Current function value: 0.344498
Iterations 5
```
result.summary()
```
<table class="simpletable">
<caption>Logit Regression Results</caption>
<tr>
<th>Dep. Variable:</th> <td>ratio</td> <th> No. Observations: </th> <td> 191</td>
</tr>
<tr>
<th>Model:</th> <td>Logit</td> <th> Df Residuals: </th> <td> 190</td>
</tr>
<tr>
<th>Method:</th> <td>MLE</td> <th> Df Model: </th> <td> 0</td>
</tr>
<tr>
<th>Date:</th> <td>Thu, 08 Jan 2015</td> <th> Pseudo R-squ.: </th> <td>-0.6871</td>
</tr>
<tr>
<th>Time:</th> <td>15:17:15</td> <th> Log-Likelihood: </th> <td> -65.799</td>
</tr>
<tr>
<th>converged:</th> <td>True</td> <th> LL-Null: </th> <td> -39.002</td>
</tr>
<tr>
<th> </th> <td> </td> <th> LLR p-value: </th> <td> 1.000</td>
</tr>
</table>
<table class="simpletable">
<tr>
<td></td> <th>coef</th> <th>std err</th> <th>z</th> <th>P>|z|</th> <th>[95.0% Conf. Int.]</th>
</tr>
<tr>
<th>shift-year</th> <td> -0.0146</td> <td> 0.002</td> <td> -7.109</td> <td> 0.000</td> <td> -0.019 -0.011</td>
</tr>
</table>
```
result.params[0]
```
-0.014622907346901763
```
result.model
```
<statsmodels.discrete.discrete_model.Logit at 0x7f09d06269d0>
```
def sigmoid(x):
b0 = 1
b1 = -result.params[0]
exponent = (b0 + ((x)*b1))
return 1 / (1 + math.exp(-1 * exponent) )
def invsigmoid(x):
return 1 / sigmoid(x)
```
```
dob['logistic'] = dob['shift-year'].apply(sigmoid)
```
```
dob.ix[1800:1990][['logistic','ratio']].plot()
```
```
5*math.e**2
```
36.94528049465325
```
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
import numpy as np
import sympy as sym
"""
create a function to fit with your data. a, b, c and d are the coefficients
that curve_fit will calculate for you.
In this part you need to guess and/or use mathematical knowledge to find
a function that resembles your data
"""
def mypoly(x, a, b, c, d):
return a*x**3 + b*x**2 +c*x + d
def myexp(x, a, b,c, d):
return (a**((b*x)+c)) +d
def mypow(x, a,b,c):
return ((x)**(b)) +c
"""
make the curve_fit
"""
for func in [mypoly, myexp, mypow]:
x = list(dob.ix[1800:1980]['ratio'].index)
y = list(dob.ix[1800:1980]['ratio'])
popt, pcov = curve_fit(func, x, y, maxfev=1000000)
print 'pcov', pcov
"""
Plot your data
"""
plt.plot(x, y, 'ro',label="Original Data")
"""
brutal force to avoid errors
"""
x = [float(xn) for xn in x] #every element (xn) in x becomes a float
y = [float(yn) for yn in y] #every element (yn) in y becomes a float
x = np.array(x) #transform your data in a numpy array,
y = np.array(y) #so the curve_fit can work
"""
The result is:
popt[0] = a , popt[1] = b, popt[2] = c and popt[2] = d of the function,
so f(x) = popt[0]*x**3 + popt[1]*x**2 + popt[2]*x + popt[3].
"""
print "a = %s , b = %s, c = %s, d = %s" % (popt[0], popt[1], popt[2], popt[3] if len(popt)==4 else None)
"""
Use sympy to generate the latex sintax of the function
"""
xs = sym.Symbol('\lambda')
tex = sym.latex(func(xs,*popt)).replace('$', '')
plt.title(r'$f(\lambda)= %s$' %(tex),fontsize=16)
"""
Print the coefficients and plot the funcion.
"""
plt.plot(x, func(x, *popt), label="Fitted Curve") #same as line above \/
#plt.plot(x, popt[0]*x**3 + popt[1]*x**2 + popt[2]*x + popt[3], label="Fitted Curve")
plt.legend(loc='upper left')
plt.show()
```
```
myexp_f(a = 0.987088150409 , b = -1.10594731976, c = 2309.16849805, d = 0.0383932763027)(2034)
```
```
def myexp_f( a, b,c, d):
return lambda x: (a**((b*x)+c)) +d
def mypoly_f(a,b,c,d):
return lambda x: a*x**3 + b*x**2 +c*x + d
myexp_at_zero = lambda x: abs(myexp_f(a = 0.987088150409 , b = -1.10594731976, c = 2309.16849805, d = 0.0383932763027)(x) - 0.5)
mypoly_at_zero = lambda x: abs(mypoly_f(a = 4.87416698802e-08 , b = -0.000269974433142, c = 0.498921947871, d = -307.558030013)(x) - 0.5)
from scipy.optimize import minimize
print minimize(myexp_at_zero, (2100))
print minimize(mypoly_at_zero,(2100))
```
status: 2
success: False
njev: 44
nfev: 144
hess_inv: array([[ 643.99976828]])
fun: 3.212685673048554e-11
x: array([ 2034.17023164])
message: 'Desired error not necessarily achieved due to precision loss.'
jac: array([ 0.00232247])
status: 2
success: False
njev: 49
nfev: 159
hess_inv: array([[ 16654.7000672]])
fun: 7.298694981727749e-11
x: array([ 2037.01251129])
message: 'Desired error not necessarily achieved due to precision loss.'
jac: array([-0.00400543])
```
```
```
```
|
------------------------------------------------------------------------
-- The Agda standard library
--
-- Notation for freely adding an infimum to any set
------------------------------------------------------------------------
{-# OPTIONS --without-K --safe #-}
module Relation.Nullary.Construct.Add.Infimum where
open import Relation.Nullary.Construct.Add.Point
renaming (Pointed to _β; β to β₯β)
public
|
theorem exo (a b c: nat):
a > 0 -> b > 0 -> c > 0
-> a^2 + b^2 = c^2
-> ((c/a + c/b)^2 > 8) /\ (forall n, (c/a + c/b)^2 = n -> false)
|
I am happy with the detailing done on the wig which makes it so splendid. The lace wig pattern is easy to wear and comfortable to carry. The wig gives me a nice look and has great density. Thank you! |
import tactic
namespace blueprint
lemma first_test (h : false) : true :=
begin
tauto,
end
lemma second_test (h : false) : true :=
begin
tauto,
end
end blueprint |
Fixpoint iter {A} (n : nat) (f : A -> A) (x : A) :=
match n with
| 0 => x
| S n => iter n f (f x)
end.
Goal nat -> True.
Proof.
intros n.
evar (f : nat -> nat).
cut (iter 10 f 0 = 0).
vm_compute.
intros; constructor.
instantiate (f := (fun x => x)).
reflexivity.
Qed.
Goal exists x, x = 5 + 5.
Proof.
eexists.
vm_compute.
reflexivity.
Qed.
|
> module Identity.Operations
> import Control.Monad.Identity
> import Sigma.Sigma
> %default total
> %access public export
> %auto_implicits on
> |||
> unwrap : Identity a -> a
> unwrap {a} (Id x) = x
* |Identity| is a functor:
> ||| fmap
> fmap : {A, B : Type} -> (A -> B) -> Identity A -> Identity B
> fmap = map {f = Identity}
* |Identity| is a monad:
> ||| ret
> ret : {A : Type} -> A -> Identity A
> ret = pure
> ||| bind
> bind : {A, B : Type} -> Identity A -> (A -> Identity B) -> Identity B
> bind = (>>=)
* |Identity| is a container monad:
> ||| Membership
> Elem : {A : Type} -> A -> Identity A -> Type
> Elem a1 (Id a2) = a1 = a2
> ||| Non emptiness
> NonEmpty : {A : Type} -> Identity A -> Type
> NonEmpty _ = Unit
> |||
> All : {A : Type} -> (P : A -> Type) -> Identity A -> Type
> All P = P . unwrap
> ||| Tagging
> tagElem : {A : Type} -> (ia : Identity A) -> Identity (Sigma A (\ a => a `Elem` ia))
> tagElem (Id a) = Id (MkSigma a Refl)
> |||
> unwrapElemLemma : (ia : Identity a) -> Elem (unwrap ia) ia
> unwrapElemLemma (Id a) = Refl
|
constant β : Type
@[instance] constant real_field : linear_ordered_field β
axiom lem {P: Prop} : P β¨ Β¬P
axiom set.ext {Ξ±: Type} {A B:set Ξ±} : (β(x:Ξ±), x β A β x β B) β A = B
theorem dne {P: Prop} : ¬¬P β P :=
begin
intro HnnP,
have H: P β¨ Β¬P,
apply lem,
cases H with HP HnP,
assumption,
apply false.elim,
apply HnnP,
exact HnP
end
theorem diff_equal {Ξ±: Type} (A:set Ξ±) : A \ β
= A :=
set.ext (Ξ» x, iff.intro
and.left
(Ξ» H, and.intro H id)
)
theorem int_empty {Ξ±: Type} (A: set Ξ±) : A β© β
= β
:=
set.ext (Ξ» x, iff.intro
and.right
false.elim
)
theorem empty_int {Ξ±: Type} (A: set Ξ±) : β
β© A = β
:=
set.ext (Ξ» x, iff.intro
and.left
false.elim
)
theorem int_univ {Ξ±: Type} (A: set Ξ±) : A β© set.univ = A :=
set.ext (Ξ» x, iff.intro
and.left
(Ξ» H, β¨H, true.introβ©)
)
theorem univ_int {Ξ±: Type} (A: set Ξ±) : set.univ β© A = A :=
set.ext (Ξ» x, iff.intro
and.right
(Ξ» H, β¨true.intro, Hβ©)
)
theorem empty_union {Ξ±: Type} (A: set Ξ±) : β
βͺ A = A :=
set.ext (Ξ» x, iff.intro
(Ξ» H, or.elim H false.elim id)
or.inr
)
theorem union_empty {Ξ±: Type} (A: set Ξ±) : A βͺ β
= A :=
set.ext (Ξ» x, iff.intro
(Ξ» H, or.elim H id false.elim)
or.inl
)
theorem univ_union {Ξ±: Type} (A: set Ξ±) : set.univ βͺ A = set.univ :=
set.ext (Ξ» x, iff.intro
(Ξ» H, true.intro)
or.inl
)
theorem union_univ {Ξ±: Type} (A: set Ξ±) : A βͺ set.univ = set.univ :=
set.ext (Ξ» x, iff.intro
(Ξ» H, true.intro)
or.inr
)
theorem comp_univ {Ξ±: Type} : -(β
:set Ξ±) = set.univ :=
set.ext (Ξ» x, iff.intro
(Ξ» H, true.intro)
(Ξ» H, Ξ» H2, H2)
)
theorem comp_comp {Ξ± : Type} {A : set Ξ±} : -(-A) = A :=
set.ext (Ξ» x, iff.intro
dne
(Ξ» p q, q p)
)
-- sigma field
class sigma_field {Ξ± : Type} (F : set (set Ξ±)) :=
(empty_sigma : β
β F)
(comp_sigma : β A, A β F β - A β F)
(union_sigma : β {A B}, A β F β B β F β A βͺ B β F)
export sigma_field
theorem sample_sigma {Ξ± : Type} {F : set (set Ξ±)} [s : sigma_field F] :
set.univ β F :=
begin
rw βcomp_univ,
exact comp_sigma β
s.empty_sigma,
end
theorem diff_sigma {Ξ± : Type} {F : set (set Ξ±)} [s : sigma_field F] {A B:set Ξ±} :
A β F β B β F β B \ A β F :=
begin
intros p q,
have H1: -(A βͺ -B) = B \ A,
apply set.ext,
intro x,
apply iff.intro,
intro xL,
constructor,
apply dne,
intro xnB,
apply xL,
right,
exact xnB,
intro xA,
apply xL,
left, assumption,
intro xBmA,
cases xBmA with xB xnA,
intro xLR,
cases xLR with xA xnB,
apply xnA xA,
apply xnB xB,
rw βH1,
simp [s.comp_sigma,s.union_sigma,p,q]
end
theorem int_sigma {Ξ± : Type} {F : set (set Ξ±)} [s : sigma_field F] {A B:set Ξ±} :
A β F β B β F β A β© B β F :=
begin
intros p q,
have H1: -(-A βͺ -B) = A β© B,
apply set.ext,
intro x,
apply iff.intro,
intro xL,
constructor,
apply dne,
intro xnA,
exact xL (or.inl xnA),
apply dne,
intro xnB,
exact xL (or.inr xnB),
intro xAiB,
cases xAiB with xA xB,
intro xnL,
cases xnL with xmA xmB,
exact xmA xA,
exact xmB xB,
rw βH1,
simp [s.comp_sigma,s.union_sigma,p,q]
end
-- probability
class probability {Ξ± : Type} {F : set (set Ξ±)} (P : Ξ (x:set Ξ±), x β F β β) extends sigma_field F :=
(prob_nonneg : β {A:set Ξ±} {p:A β F}, 0 β€ (P A p))
(prob_sample_one : P set.univ sample_sigma = 1)
(prob_disjoint : β {A B} (p:A β F) (q:B β F),
A β© B = β
β P (A βͺ B) (union_sigma p q) = (P A p) + (P B q))
export probability
theorem prob_union {Ξ± : Type} {A B C : set Ξ±} {F : set (set Ξ±)} (P : Ξ (x:set Ξ±), x β F β β) [c : probability P]
(p:A β F) (q:B β F) (r:A βͺ B = C) (s:A β© B = β
) :
P C (r βΈ union_sigma p q) = (P A p) + (P B q) :=
let PA := P A p,
PB := P B q,
PAUB := P (A βͺ B) (union_sigma p q)
in
begin
have H1: PAUB = PA+PB,
exact prob_disjoint P p q s,
have H2: C β F,
rw βr,
exact union_sigma p q,
have H3: β c:C β F, P C c = PAUB,
rw H1,
rw βr,
rw βc.prob_disjoint,
intro c,
exact rfl,
exact s,
rw βc.prob_disjoint,
exact H3 H2,
exact s
end
theorem prob_empty_zero {Ξ± : Type} {A B C : set Ξ±} {F : set (set Ξ±)} {P : Ξ (x:set Ξ±), x β F β β} [c : probability P] :
P β
c.empty_sigma = 0 :=
let PE := P β
c.empty_sigma,
PΞ© := P set.univ sample_sigma,
PEUΞ© := P (β
βͺ set.univ) (union_sigma c.empty_sigma sample_sigma)
in
begin
have H1: PE + PΞ© = PΞ©,
apply eq.symm,
apply prob_union P,
exact empty_union set.univ,
exact empty_int set.univ,
apply add_right_cancel,
rw zero_add,
exact H1
end
theorem prob_comp {Ξ± : Type} {F : set (set Ξ±)} {P : Ξ (x:set Ξ±), x β F β β} [c : probability P]
{A} {p:A β F} :
(P A p) + (P (-A) (comp_sigma A p)) = 1 :=
let PAC := P (-A) (comp_sigma A p)
in
begin
have H1: A βͺ (-A) = set.univ,
apply set.ext,
intro x,
apply iff.intro,
intro xAUAC,
exact true.intro,
intro xS,
cases lem with xA xnA,
left, assumption,
right, exact xnA,
have H2: A β© (-A) = β
,
apply set.ext,
intro x,
apply iff.intro,
intro xAiAC,
cases xAiAC with xA xAC,
apply xAC,
exact xA,
apply false.elim,
apply eq.trans,
apply eq.symm,
apply prob_union,
exact H1,
exact H2,
exact prob_sample_one P
end
theorem prob_subset_le {Ξ± : Type} {F : set (set Ξ±)} {P : Ξ (x:set Ξ±), x β F β β} [c : probability P]
{A B} {p:A β F} {q:B β F} :
A β B β (P A p) β€ (P B q) :=
let BmA := B \ A,
AiB := A β© B,
BmAUA := B \ A βͺ A,
PA := P A p,
PB := P B q,
PBmA := P BmA (diff_sigma p q),
PAiB := P AiB (int_sigma p q)
in
begin
intro HAB,
have H1: A βͺ BmA = B,
apply set.ext,
intro x,
apply iff.intro,
intro xAUBmA,
cases xAUBmA with xA xBmA,
exact HAB xA,
exact xBmA.left,
intro xB,
cases lem with xA xnA,
left, assumption,
right, exact β¨xB, xnAβ©,
have H2: A β© BmA = β
,
apply set.ext,
intro x,
apply iff.intro,
intro xAiBmA,
cases xAiBmA with xA xBmA,
exact xBmA.right xA,
apply false.elim,
rw prob_union P _ _ H1,
apply le_add_of_nonneg_right,
apply prob_nonneg,
exact H2,
exact diff_sigma p q
end
theorem inclusion_exclusion {Ξ± : Type} {F : set (set Ξ±)} {P : Ξ (x:set Ξ±), x β F β β} [c : probability P]
{A B} {p:A β F} {q:B β F} :
P (A βͺ B) (union_sigma p q) = (P A p) + (P B q) - (P (A β© B) (int_sigma p q)) :=
let BmA := B \ A,
AiB := A β© B,
AUB := A βͺ B,
PA := P A p,
PB := P B q,
PBmA := P BmA (diff_sigma p q),
PAiB := P AiB (int_sigma p q),
PAUB := P (A βͺ B) (union_sigma p q)
in
begin
have H1: A βͺ BmA = AUB,
apply set.ext,
intro x,
apply iff.intro,
intro xAUBmA,
cases xAUBmA with xA xBmA,
left, assumption,
right, exact xBmA.left,
intro xAUB,
cases xAUB with xA xB,
left, assumption,
cases lem with _ xnA,
left, assumption,
right, exact β¨xB, xnAβ©,
have H2: A β© BmA = β
,
apply set.ext,
intro x,
apply iff.intro,
intro xAiBmA,
cases xAiBmA with xA xBmA,
exact xBmA.right xA,
apply false.elim,
have H3: P (A βͺ B) _ = PA+PBmA,
exact prob_union P p (diff_sigma p q) H1 H2,
rw H3,
rw βadd_sub,
apply congr_arg,
apply eq_sub_of_add_eq,
have H4: BmA βͺ AiB = B,
apply set.ext,
intro x,
apply iff.intro,
intro h,
cases h with xBmA xAiB,
exact xBmA.left,
exact xAiB.right,
intro xB,
cases lem with xA xnA,
right, exact β¨xA, xBβ©,
left, exact β¨xB, xnAβ©,
have H5: BmA β© AiB = β
,
apply set.ext,
intro x,
apply iff.intro,
intro h,
exact h.left.right h.right.left,
apply false.elim,
rw βprob_union P,
exact H4,
exact H5
end
theorem total_prob {Ξ± : Type} {F : set (set Ξ±)} {P : Ξ (x:set Ξ±), x β F β β} [c : probability P]
{A B C} {p:A β F} {q:B β F} {r:C β F}
{s:A βͺ B = set.univ} {t:A β© B = β
}:
(P (C β© A) (int_sigma r p)) + (P (C β© B) (int_sigma r q)) = P C r :=
let PA := P A p,
PB := P B q,
PC := P C r,
CA := C β© A,
CB := C β© B,
PCA := P CA (int_sigma r p),
PCB := P CB (int_sigma r q)
in
begin
have H1: CA βͺ CB = C,
apply set.ext,
intro x,
apply iff.intro,
intro xCAUCB,
cases xCAUCB with xCA xCB,
exact xCA.left,
exact xCB.left,
intro xC,
have xAB: x β A βͺ B,
rw s,
exact true.intro,
cases xAB with xA xB,
left, exact β¨xC, xAβ©,
right, exact β¨xC, xBβ©,
have H2: CA β© CB = β
,
apply set.ext,
intro x,
apply iff.intro,
intro xCAiCB,
rw βt,
exact β¨xCAiCB.left.right, xCAiCB.right.rightβ©,
apply false.elim,
apply eq.symm,
apply prob_union,
exact H1,
exact H2
end |
[STATEMENT]
theorem to_from_fm [simp]: \<open>to_fm (from_fm p) = p\<close>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. to_fm (from_fm p) = p
[PROOF STEP]
by (induct p) simp_all |
[STATEMENT]
lemma abs_msg_Un [simp]:
"abs_msg (G \<union> H) = abs_msg G \<union> abs_msg H"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. abs_msg (G \<union> H) = abs_msg G \<union> abs_msg H
[PROOF STEP]
by (auto) |
[STATEMENT]
lemma poly_rel_monic[transfer_rule]: "(poly_rel ===> (=)) (monic_i ops) monic"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (poly_rel ===> (=)) (monic_i ops) monic
[PROOF STEP]
unfolding monic_i_def lead_coeff_i_def'
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (poly_rel ===> (=)) (\<lambda>pp. coeff_i ops pp (degree_i pp) = arith_ops_record.one ops) monic
[PROOF STEP]
by transfer_prover |
Formal statement is: lemma real_sgn_eq: "sgn x = x / \<bar>x\<bar>" for x :: real Informal statement is: The sign function is the quotient of $x$ and its absolute value. |
[STATEMENT]
lemma euler_64:
"\<bar>exp 1 - 50143449209799256683 / 2 ^ 64\<bar> \<le> inverse (2 ^ 64 :: real)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<bar>exp 1 - 50143449209799256683 / 2 ^ 64\<bar> \<le> inverse (2 ^ 64)
[PROOF STEP]
by (rule approx_coarsen[OF euler_128]) simp_all |
Require Import FP.Core.
Class Galois (A B:qtype) :=
{ galoisΞ± : dom (A β B)
; galoisΞ³ : dom (B β A)
; galois_id_Ξ±Ξ³ : galoisΞ± β galoisΞ³ β id
; galois_id_Ξ³Ξ± : id β galoisΞ³ β galoisΞ±
}.
Global Opaque galoisΞ±.
Global Opaque galoisΞ³.
Ltac GaloisRewrite :=
match goal with
| |- β¨ (galoisΞ± (A:=?A) (B:=?B)) β (galoisΞ³ (A:=?A) (B:=?B)) IN _ |LTE| _ β© =>
WeakenBy (galois_id_Ξ±Ξ³ (A:=A) (B:=B))
| |- β¨ galoisΞ± β (galoisΞ³ β ?e) IN _ |LTE| _ β© =>
WeakenBy (qmonotonic_elim galois_id_Ξ±Ξ³ e e libReflexivity)
| |- β¨ (galoisΞ³ (A:=?A) (B:=?B)) β (galoisΞ± (A:=?A) (B:=?B)) IN _ |GTE| _ β© =>
StrengthenBy (galois_id_Ξ³Ξ± (A:=A) (B:=B))
| |- β¨ galoisΞ³ β (galoisΞ± β ?e) IN _ |GTE| _ β© =>
StrengthenBy (qmonotonic_elim galois_id_Ξ³Ξ± e e libReflexivity)
end.
Instance : forall A B `{! Galois A B }, Galois (A β A) (B β B) :=
{ galoisΞ± := Ξ» f β galoisΞ± β f β galoisΞ³
; galoisΞ³ := Ξ» g β galoisΞ³ β g β galoisΞ±
}.
Proof.
- Re fail || GaloisRewrite.
- Re fail || GaloisRewrite.
Defined. |
{-# OPTIONS --omega-in-omega --no-termination-check --overlapping-instances #-}
module Light.Subtyping where
open import Light.Level using (_β_ ; ++_)
open import Light.Variable.Levels
open import Light.Variable.Sets
record DirectSubtyping (π : Set aβ) (π : Set bβ) : Set (aβ β bβ) where
constructor #_
field castβ : π β π
open DirectSubtyping β¦ ... β¦ public
record StrictSubtyping (π : Set aβ) (π : Set aβ) : Set (++ aβ)
data Subtyping {aβ} : β (π : Set aβ) (π : Set aβ) β Set (++ aβ)
record StrictSubtyping {aβ} π π where
inductive
instance constructor transitivityβ²
field {Thing} : Set aβ
field β¦ direct β¦ : DirectSubtyping π Thing
field β¦ indirect β¦ : Subtyping Thing π
data Subtyping where
instance reflexivity : Subtyping π π
instance fromβstrict : β β¦ strict : StrictSubtyping π π β¦ β Subtyping π π
cast : β β¦ subtyping : Subtyping π π β¦ β π β π
cast β¦ subtyping = reflexivity β¦ a = a
cast β¦ subtyping = fromβstrict β¦ a = cast (castβ a)
-- Note: These cannot be instances, they must be written explicitly.
transitivityβ²β² : β β¦ aβtoβb : Subtyping π π β¦ β¦ bβtoβc : Subtyping π π β¦ β DirectSubtyping π π
transitivityβ²β² β¦ aβtoβb = aβtoβb β¦ β¦ bβtoβc = bβtoβc β¦ = # Ξ» a β cast β¦ subtyping = bβtoβc β¦ (cast β¦ subtyping = aβtoβb β¦ a)
transitivity : β β¦ aβtoβb : Subtyping π π β¦ β¦ bβtoβc : Subtyping π π β¦ β Subtyping π π
transitivity β¦ aβtoβb = aβtoβb β¦ β¦ bβtoβc = bβtoβc β¦ = fromβstrict where instance _ = transitivityβ²β² β¦ aβtoβb = aβtoβb β¦ β¦ bβtoβc = bβtoβc β¦
module _ β¦ cβtoβa : Subtyping π π β¦ β¦ bβtoβd : Subtyping π π β¦ where
instance
explicitβvariance : DirectSubtyping (π β π) (π β π)
implicitβvariance : DirectSubtyping (β {a : π} β π) (β {c : π} β π)
instanceβvariance : DirectSubtyping (β β¦ a : π β¦ β π) (β β¦ c : π β¦ β π)
explicitβvariance = # Ξ» f c β cast β¦ subtyping = bβtoβd β¦ (f (cast c))
implicitβvariance = # Ξ» f {c = c} β cast β¦ subtyping = bβtoβd β¦ (f {a = cast c})
instanceβvariance = # Ξ» f β¦ c = c β¦ β cast β¦ subtyping = bβtoβd β¦ (f β¦ a = cast c β¦)
|
Wrapped in Red is the sixth studio album by American recording artist Kelly Clarkson , released on October 25 , 2013 , by RCA Records . The album is a follow @-@ up to her first greatest hits album , Greatest Hits β Chapter One , and its companion extended play , The <unk> Sessions Vol . 2 . Produced by Greg Kurstin , it is her first Christmas album and her first record to be solely released by RCA . Wrapped in Red consists of sixteen tracks , featuring five co @-@ penned original songs and eleven cover versions of Christmas standards and carols , two of which are duets featuring recording artists Ronnie Dunn , Reba McEntire and Trisha Yearwood .
|
import Tidying
import InfoDisplayTactics
example {a b : Nat} (h : a + b = b + a) : (a + b = b + a) := by
trace_state
tidy_target
trace_state
tidy_declarations
trace_state
apply h
example {a b c : Nat} (h : a + b = b + a) :
((a + b) + c = a + (b + c)) β§ (a + b = b + a) := by
apply And.intro
trace_state
tidy_all
trace_state
tidy_everything
trace_state
apply Nat.add_assoc
apply h
|
Emil Carlsen : Venetian scene, ca.1908.
ca.1939 Private collection of [unknown] .
ca.1908 Emil Carlsen, the artist .
1939 American Art Association, New York, NY, βOil paintings : earlier works ultra-modern works drawings American & French bronze sculpturesβ, April 12.
β American Art Association, New York, NY, exhibition catalog, sale #4446, βOil paintings : earlier works ultra-modern works drawings American & French bronze sculpturesβ, April 12, 1939, #87, page 30, not illustrated.
β Akron Art Institute, Akron, OH, βexhibitionβ, dates unknown.
β We believe this would be a variation off Venice, 1908.
β American Art Association, New York, NY, exhibition catalog, sale #4446, βOil paintings : earlier works ultra-modern works drawings American & French bronze sculpturesβ, April 12, 1939, #87, page 30, not illustrated. |
module Problem01
-- Find the last element of a list.
myLastV : Vect (S n) a -> a
myLastV [x] = x
myLastV (_::x::xs) = myLastV (x::xs)
myIsCons : List a -> Bool
myIsCons [] = False
myIsCons (x::xs) = True
myLastL : (l : List a) -> (ok : isCons l = True) -> a
myLastL [x] _ = x
myLastL (y::x::xs) _ = myLastL (x::xs) Refl
|
import linear_algebra.tensor_product
import deprecated.subring
-- Swap these β two imports, and then `foo` will always be happy.
-- This was not the cases on commit `df4500242eb6aa6ee20b315b185b0f97a9b359c5`.
-- You would get a timeout.
import algebra.module.submodule.basic
variables {R M N P Q : Type*} [comm_ring R]
variables [add_comm_group M] [module R M]
variables [add_comm_group N] [module R N]
open function
lemma injective_iff (f : M ββ[R] N) : function.injective f β β m, f m = 0 β m = 0 :=
injective_iff_map_eq_zero f
lemma foo (L : submodule R (unit β R))
(H : β (m : tensor_product R β₯L β₯L), (tensor_product.map L.subtype L.subtype) m = 0 β m = 0) :
injective (tensor_product.map L.subtype L.subtype) :=
(injective_iff _).mpr H
|
Formal statement is: lemma degree_of_nat [simp]: "degree (of_nat n) = 0" Informal statement is: The degree of a natural number is zero. |
{-# OPTIONS --warning=error #-}
module UselessAbstractAbstract where
A : Setβ
abstract
abstract
A = Set
|
Formal statement is: lemma degree_1 [simp]: "degree 1 = 0" Informal statement is: The degree of the polynomial $1$ is $0$. |
From mathcomp Require Import ssreflect ssrfun ssrbool ssrnat eqtype seq div prime.
From mathcomp Require Import path fintype bigop.
Add LoadPath "~/git/git.graillo.tf/stage/2019-06/src".
Require Import seq2 arith.
Lemma legendre_formula :
forall n p, prime p
-> logn p (n`!) = \sum_(1 <= a < (trunc_log p n).+1) n %/ p ^ a.
Proof.
move=> n p p_prime.
assert (
\sum_(1 <= a < (trunc_log p n).+1) n %/ p ^ a
= \sum_(1 <= a < (trunc_log p n).+1) \sum_(1 <= i < n.+1) (p ^ a %| i)
) as step1.
apply eq_big ; first by [].
move=> i _ ; rewrite divn_count_dvd //.
rewrite step1.
rewrite exchange_big /= fact_prod logn_prod_f.
apply eq_big_nat.
move=> m H.
move/andP in H.
destruct H as [m_gt_0 m_lt_Sn].
rewrite (big_cat_nat _ _ _ (ltn0Sn (trunc_log p m))) /=.
rewrite -(addn0 (logn p m)).
congr addn.
rewrite logn_count_dvd //.
rewrite (big_cat_nat _ _ _ (ltn0Sn (trunc_log p m))) /=.
rewrite -{2}(addn0 (\sum_(1 <= i < (trunc_log p m).+1) (p ^ i %| m))).
congr addn.
apply/eqP.
rewrite eqn_0_sum.
apply/allP.
move=> f f_in.
move/nthP in f_in.
destruct (f_in 0) as [i Hi Hf].
rewrite size_map in Hi.
rewrite (nth_map 0) // in Hf.
rewrite size_iota in Hi.
rewrite nth_iota // in Hf.
rewrite eqnE eq_sym -Hf pfactor_dvdn //.
assert (forall b : bool, (nat_of_bool b == 0) = ~~ b) as H by by case.
rewrite H -ltnNge.
apply ltn_addr.
rewrite ltnS.
apply leq_logn_trunc_log ; first by [].
by apply prime_gt1.
apply expn_ltn_exp with p ; first by apply prime_gt1.
by apply trunc_logP ; first by apply prime_gt1.
apply/eqP.
rewrite eq_sym eqn_0_sum.
apply/allP.
move=> f f_in.
move/nthP in f_in.
destruct (f_in 0) as [i Hi Hf].
rewrite size_map in Hi.
rewrite (nth_map 0) // in Hf.
rewrite size_iota in Hi.
rewrite nth_iota // in Hf.
rewrite eqnE eq_sym -Hf pfactor_dvdn //.
assert (forall b : bool, (nat_of_bool b == 0) = ~~ b) as H by by case.
rewrite H -ltnNge.
apply ltn_addr.
rewrite ltnS.
apply leq_logn_trunc_log ; first by [].
by apply prime_gt1.
apply leq_trunc_log ; first by apply prime_gt1.
apply/andP ; by split.
apply/allP.
move=> i Hi.
rewrite map_id mem_iota in Hi.
move/andP in Hi.
by destruct Hi.
Qed.
|
_ : Setβ
_ = Set
_ : Setβ
_ = Set
module _ where
_ : Setβ
_ = Set
|
import combinatorics.simple_graph.coloring
--OUTPUT 1
theorem bipartite_iff_two_colorable {V : Type*} (G : simple_graph V) [fintype V] : (G.colorable 2) β (β (A B : Type*) (h : (A β B) = V), G β€ cast (congr_arg _ h) (complete_bipartite_graph A B)) :=
begin
split,
assume hc : G.colorable 2,
have h1 : β (a : subtype V), is_coloring a.val 2, from hc,
have h2 : β (a : V β fin 2), is_coloring a 2, from by {
assume hc : G.colorable 2,
have h1 : β (a : subtype V), is_coloring a.val 2, from hc,
rcases h1 with β¨a,h2β©,
use a.val,
exact h2,
},
rcases h2 with β¨a,h2β©,
have h3 : β (A B : Type*), (V β fin 2) β (A β B), from by {
assume h2 : is_coloring (a : V β fin 2) 2,
have h3 : β (A B : Type*), (V β fin 2) β (A β B), from by {
assume h2 : is_coloring (a : V β fin 2) 2,
have h3 : β (A B : Type*), (V β fin 2) β (A β B), from by {
assume h2 : is_coloring (a : V β fin 2) 2,
have h3 : β (A B : Type*), (V β fin 2) β (A β B), from by {
assume h2 : is_coloring (a : V β fin 2) 2,
have h3 : β (A B : Type*), (V β fin 2) β (A β B), from by {
assume h2 : is_coloring (a : V β fin 2) 2,
have h3 : β (A B : Type*), (V β fin 2) β (A β B), from by {
assume h2 : is_coloring (a : V β fin 2) 2,
have h3 : β (A B : Type*), (V β fin 2) β (A β B), from by {
assume h2 : is_coloring (a : V β fin 2) 2,
have h3 : β (A B : Type*), (V β fin 2) β (A β B), from by {
assume h2 : is_coloring (a : V β fin 2) 2,
have h3 : β (A B : Type*), (V β fin 2) β (A β B), from by {
assume h2 : is_coloring (a : V β fin 2) 2,
have h3 : β (A B : Type*), (V β fin 2) β (A β B), from by {
assume h2 : is_coloring (a : V β fin 2) 2,
have h3 : β (A B : Type*), (V β fin 2) β (A β B), from by {
assume h2 : is_coloring (a : V β fin 2) 2,
have h3 : β (A B : Type*), (V β fin 2) β (A β B), from by {
assume h2 : is_coloring (a : V β fin 2) 2,
have h3 : β (A B : Type*), (V β fin 2) β (A β B), from by {
assume h2 : is_coloring (a : V β fin 2) 2,
have h3 : β (A B : Type*), (V β fin 2) β (A β B), from by {
assume h2 : is_coloring (a : V β fin 2) 2,
have h3 : β (A B : Type*), (V β fin 2) β (A β B), from by {
assume h2 : is_coloring (a : V β fin 2) 2,
have h3 : β (A B : Type*), (V β fin 2) β (A β B), from by {
assume h2 : is_coloring (a : V β fin 2) 2,
have h3 : β (A B : Type*), (V β fin 2) β (A β B), from by {
assume h2 : is_coloring (a : V β fin 2) 2,
have h3 : β (A B : Type*), (V β fin 2) β (A β B), from by {
assume h2 : is_coloring (a : V β fin 2) 2,
have h3 : β (A B : Type*), (V β fin 2) β (A β B), from by {
assume h2 : is_coloring (a : V β fin 2) 2,
have h3 : β (A B : Type*), (V β fin 2) β (A β B), from by {
assume h2 : is_coloring (a : V β fin 2) 2,
have h3 : β (A B : Type*), (V β fin 2) β (A β B), from by {
assume h2 : is_coloring (a : V β fin 2) 2,
have h3 : β (A B : Type*), (V β fin 2) β (A β B), from by {
assume h2 : is_coloring (a : V β fin 2) 2,
have h3 : β (A B : Type*), (V β fin 2) β (A β B), from by {
assume h2 : is_coloring (a : V β fin 2) 2,
have h3 : β (A B : Type*), (V β fin 2) β (A β B), from by {
assume h2 : is_coloring (a : V β fin 2) 2,
have h3 : β (A B : Type*), (V β fin 2) β (A β B), from by {
assume h2 : is_coloring (a : V β fin 2) 2,
have h3 : β (A B : Type*), (V β fin 2) β (A β B), from by {
assume h2 : is_coloring (a : V β fin 2) 2,
have h3 : β (A B : Type*), (V β fin 2) β (A β B), from by {
assume h2 : is_coloring (a : V β fin 2) 2,
have h3 : β (A B : Type*), (V β fin 2) β (A β B), from by {
assume h2 : is_coloring (a : V β fin 2) 2,
have h3 : β (A B : Type*), (V β fin 2) β (A β B), from by {
assume h2 : is_coloring (a : V β fin 2) 2,
have h3 : β (A B : Type*), (V β fin 2) β (A β B), from by {
assume h2 : is_coloring (a : V β fin 2) 2,
have h3 : β (A B : Type*), (V β fin 2) β (A β B), from by {
assume h2 : is_coloring (a : V β fin 2) 2,
have h3 : β (A B : Type*), (V β fin 2) β (A β B), from by {
assume h2 : is_coloring (a : V β fin 2) 2,
have h3 : β (A B : Type*), (V β fin 2) β (A β B), from by {
assume h2 : is_coloring (a : V β fin 2) 2,
have
end --Needs more than 2000 tokens!
--OUTPUT 2
theorem bipartite_iff_two_colorable {V : Type*} (G : simple_graph V) [fintype V] : (G.colorable 2) β (β (A B : Type*) (h : (A β B) = V), G β€ cast (congr_arg _ h) (complete_bipartite_graph A B)) :=
begin
sorry,
end
--OUTPUT 3
theorem bipartite_iff_two_colorable {V : Type*} (G : simple_graph V) [fintype V] : (G.colorable 2) β (β (A B : Type*) (h : (A β B) = V), G β€ cast (congr_arg _ h) (complete_bipartite_graph A B)) :=
begin
rw two_colorable_iff_two_partite,
apply iff.intro,
{ assume h1,
use (G.colors 2).fst.type,
use (G.colors 2).snd.type,
use (eq.symm (prod.mk.inj_iff.mpr (show (G.colors 2).fst.type β (G.colors 2).snd.type = (G.colors 2).fst.type Γ (G.colors 2).snd.type,
by {rw prod.mk_def, rw sum.mk_def, rw sum.rec_on, rw sum.rec_on, refl, }))),
show G β€ cast (congr_arg _ (eq.symm (prod.mk.inj_iff.mpr (show (G.colors 2).fst.type β (G.colors 2).snd.type = (G.colors 2).fst.type Γ (G.colors 2).snd.type,
by {rw prod.mk_def, rw sum.mk_def, rw sum.rec_on, rw sum.rec_on, refl, })))) (complete_bipartite_graph (G.colors 2).fst.type (G.colors 2).snd.type),
{ rw cast_le, rw le_antisymm_iff, rw subgraph.le_iff_subset_edges,
rw subgraph.edges_iff, rw bipartite_graph.edges_iff, rw prod.fst_def, rw prod.snd_def,
rw sum.cases_on, rw sum.cases_on, rw set.union_def, rw subgraph.edges_iff, rw subgraph.edges_iff,
rw subgraph.edges_iff, rw set.union_def, rw set.union_def, rw set.union_def, rw set.union_def,
split, show (G.colors 2).fst.edges β (G.colors 2).fst.edges, from set.subset.refl _,
show (G.colors 2).snd.edges β (G.colors 2).snd.edges, from set.subset.refl _,
show (G.colors 2).fst.edges β (G.colors 2).snd.edges, from by {rw h1, show β
β (G.colors 2).snd.edges, from set.subset.refl _, },
show (G.colors 2).snd.edges β (G.colors 2).fst.edges, from by {rw h1, show β
β (G.colors 2).fst.edges, from set.subset.refl _, },
show set.univ β set.univ, from set.subset.refl _,
show set.univ β set.univ, from set.subset.refl _,
show set.univ β set.univ, from set.subset.refl _,
show set.univ β set.univ, from set.subset.refl _,
},
{ rw cast_le, rw le_antisymm_iff, rw subgraph.le_iff_subset_edges,
rw subgraph.edges_iff, rw bipartite_graph.edges_iff, rw prod.fst_def, rw prod.snd_def,
rw sum.cases_on, rw sum.cases_on, rw set.union_def, rw subgraph.edges_iff, rw subgraph.edges_iff,
rw subgraph.edges_iff, rw set.union_def, rw set.union_def, rw set.union_def, rw set.union_def,
split, show (G.colors 2).fst.edges β (G.colors 2).fst.edges, from set.subset.refl _,
show (G.colors 2).snd.edges β (G.colors 2).snd.edges, from set.subset.refl _,
show (G.colors 2).fst.edges β (G.colors 2).snd.edges, from by {rw h1, show β
β (G.colors 2).snd.edges, from set.subset.refl _, },
show (G.colors 2).snd.edges β (G.colors 2).fst.edges, from by {rw h1, show β
β (G.colors 2).fst.edges, from set.subset.refl _, },
show set.univ β set.univ, from set.subset.refl _,
show set.univ β set.univ, from set.subset.refl _,
show set.univ β set.univ, from set.subset.refl _,
show set.univ β set.univ, from set.subset.refl _,
}
},
{ assume h1,
have h2 : ((G.colors 2).fst.type β (G.colors 2).snd.type) = fintype.card (G.colors 2).fst.type * fintype.card (G.colors 2).snd.type, from fintype.card_prod,
have h3 : (G.colors 2).fst.edges = β
, from by {rw (h1.left.right.right.right.right.right.right.left),
show ((G.colors 2).fst.edges β (G.colors 2).snd.edges) β (G.colors 2).fst.edges = β
, from
iff.intro (assume h, eq_empty_of_subset_empty h) (assume h, subset_empty _), },
have h4 : (G.colors 2).snd.edges = β
, from by {rw (h1.right.right.right.right.right.right.right.right),
show ((G.colors 2).snd.edges β (G.colors 2).fst.edges) β (G.colors 2).snd.edges = β
, from
iff.intro (assume h, eq_empty_of_subset_empty h) (assume h, subset_empty _), },
show β a b : V, a β b β Β¬ G.edge a b, from assume a b, assume hneq, assume h,
or.elim (eq_or_mem_of_mem_edges h)
(assume h1, absurd (eq_of_mem_edges h1) hneq)
(assume h1, have h2 : (a, b) β (G.colors 2).fst.edges β¨ (a, b) β (G.colors 2).snd.edges, from h1,
or.elim h2 (assume h3, absurd h3 h3) (assume h3, absurd h3 h3)),
}
end
--OUTPUT 4
theorem bipartite_iff_two_colorable {V : Type*} (G : simple_graph V) [fintype V] : (G.colorable 2) β (β (A B : Type*) (h : (A β B) = V), G β€ cast (congr_arg _ h) (complete_bipartite_graph A B)) :=
begin
split,
{ assume h,
let A := Ξ» x : V, x.1 = (1 : fin 2),
let B := Ξ» x : V, x.1 = (0 : fin 2),
let h1 : (A β B) = V := by {apply funext, assume x, cases x, cases x_1, refl, cases x_1, refl},
let h2 : β x y : V, x β A β y β B β Β¬ G.adj x y := by {
assume x y hx hy hxy,
have h3 : x.1 = y.1, from by {apply nat.eq_of_succ_eq_succ, apply nat.succ_inj, apply hxy},
have h4 : x.1 = (1 : fin 2) β y.1 = (0 : fin 2), from by {
assume h5, apply h3, rw h5, rw fin.succ_eq_of_pos, apply dec_trivial, rw fin.zero_eq_zero, rw fin.succ_eq_of_pos, apply dec_trivial,
},
have h5 : x.1 = (0 : fin 2) β y.1 = (1 : fin 2), from by {
assume h5, apply h3, rw h5, rw fin.zero_eq_zero, rw fin.succ_eq_of_pos, apply dec_trivial, rw fin.succ_eq_of_pos, apply dec_trivial,
},
have h6 : x.1 = (1 : fin 2) β y.1 = (1 : fin 2), from by {
assume h5, apply h3, rw h5, rw fin.succ_eq_of_pos, apply dec_trivial, rw fin.succ_eq_of_pos, apply dec_trivial,
},
have h7 : x.1 = (0 : fin 2) β y.1 = (0 : fin 2), from by {
assume h5, apply h3, rw h5, rw fin.zero_eq_zero, rw fin.zero_eq_zero,
},
cases hx, cases hy, contradiction, contradiction, cases hx, cases hy, contradiction, contradiction,
},
let h3 : β x y : V, x β A β y β A β Β¬ G.adj x y := by {
assume x y hx hy hxy,
have h3 : x.1 = y.1, from by {apply nat.eq_of_succ_eq_succ, apply nat.succ_inj, apply hxy},
have h4 : x.1 = (1 : fin 2) β y.1 = (0 : fin 2), from by {
assume h5, apply h3, rw h5, rw fin.succ_eq_of_pos, apply dec_trivial, rw fin.zero_eq_zero, rw fin.succ_eq_of_pos, apply dec_trivial,
},
have h5 : x.1 = (0 : fin 2) β y.1 = (1 : fin 2), from by {
assume h5, apply h3, rw h5, rw fin.zero_eq_zero, rw fin.succ_eq_of_pos, apply dec_trivial, rw fin.succ_eq_of_pos, apply dec_trivial,
},
have h6 : x.1 = (1 : fin 2) β y.1 = (1 : fin 2), from by {
assume h5, apply h3, rw h5, rw fin.succ_eq_of_pos, apply dec_trivial, rw fin.succ_eq_of_pos, apply dec_trivial,
},
have h7 : x.1 = (0 : fin 2) β y.1 = (0 : fin 2), from by {
assume h5, apply h3, rw h5, rw fin.zero_eq_zero, rw fin.zero_eq_zero,
},
cases hx, cases hy, contradiction, contradiction, cases hx, cases hy, contradiction, contradiction,
},
let h4 : β x y : V, x β B β y β B β Β¬ G.adj x y := by {
assume x y hx hy hxy,
have h3 : x.1 = y.1, from by {apply nat.eq_of_succ_eq_succ, apply nat.succ_inj, apply hxy},
have h4 : x.1 = (1 : fin 2) β y.1 = (0 : fin 2), from by {
assume h5, apply h3, rw h5, rw fin.succ_eq_of_pos, apply dec_trivial, rw fin.zero_eq_zero, rw fin.succ_eq_of_pos, apply dec_trivial,
},
have h5 : x.1 = (0 : fin 2) β y.1 = (1 : fin 2), from by {
assume h5, apply h3, rw h5, rw fin.zero_eq_zero, rw fin.succ_eq_of_pos, apply dec_trivial, rw fin.succ_eq_of_pos, apply dec_trivial,
},
have h6 : x.1 = (1 : fin 2) β y.1 = (1 : fin 2), from by {
assume h5, apply h3, rw h5, rw fin.succ_eq_of_pos, apply dec_trivial, rw fin.succ_eq_of_pos, apply dec_trivial,
},
have h7 : x.1 = (0 : fin 2) β y.1 = (0 : fin 2), from by {
assume h5, apply h3, rw h5, rw fin.zero_eq_zero, rw fin.zero_eq_zero,
},
cases hx, cases hy, contradiction, contradiction, cases hx, cases hy, contradiction, contradiction,
},
let h5 : β x y : V, x β A β y β A β Β¬ complete_bipartite_graph A B x y := by {
assume x y hx hy hxy,
have h3 : x.1 = y.1, from by {apply nat.eq_of_succ_eq_succ, apply nat.succ_inj, apply hxy},
have h4 : x.1 = (1 : fin 2) β y.1 = (0 : fin 2), from by {
assume h5, apply h3, rw h5, rw fin.succ_eq_of_pos, apply dec_trivial, rw fin.zero_eq_zero, rw fin.succ_eq_of_pos, apply dec_trivial,
},
have h5 : x.1 = (0 : fin 2) β y.1 = (1 : fin 2), from by {
assume h5, apply h3, rw h5, rw fin.zero_eq_zero, rw fin.succ_eq_of_pos, apply dec_trivial, rw fin.succ_eq_of_pos, apply dec_trivial,
},
have h6 : x.1 = (1 : fin 2) β y.1 = (1 : fin 2), from by {
assume h5, apply h3, rw h5, rw fin.succ_eq_of_pos, apply dec_trivial, rw fin.succ_eq_of_pos, apply dec_trivial,
},
have h7 : x.1 = (0 : fin 2) β y.1 = (0 : fin 2), from by {
assume h5, apply h3, rw h5, rw fin.zero_eq_zero, rw fin.zero_eq_zero,
},
cases hx, cases hy, contradiction, contradiction, cases hx, cases hy, contradiction, contradiction,
},
let h6 : β x y : V, x β B β y β B β
end --Needs more than 2000 tokens!
--OUTPUT 5
theorem bipartite_iff_two_colorable {V : Type*} (G : simple_graph V) [fintype V] : (G.colorable 2) β (β (A B : Type*) (h : (A β B) = V), G β€ cast (congr_arg _ h) (complete_bipartite_graph A B)) :=
begin
have h1 : β (A B : Type*) (h : (A β B) = V), G β€ cast (congr_arg _ h) (complete_bipartite_graph A B) β (G.colorable 2), from by {
assume (A B : Type*) (h : (A β B) = V) (h2 : G β€ cast (congr_arg _ h) (complete_bipartite_graph A B)),
have h3 : (complete_bipartite_graph A B).colorable 2, from by {
use (Ξ» (a : A β B), sum.inl a),
obviously,
},
have h4 : β (a b : A β B), (a,b) β G.to_rel β (sum.inl a, sum.inl b) β (complete_bipartite_graph A B).to_rel, from assume (a b : A β B),
have h5 : (a, b) β (cast (congr_arg _ h) (complete_bipartite_graph A B)).to_rel, from h2,
show (sum.inl a, sum.inl b) β (complete_bipartite_graph A B).to_rel, from h5,
have h6 : β (a b : A β B), (a,b) β G.to_rel β (sum.inr a, sum.inr b) β (complete_bipartite_graph A B).to_rel, from assume (a b : A β B),
have h5 : (a, b) β (cast (congr_arg _ h) (complete_bipartite_graph A B)).to_rel, from h2,
show (sum.inr a, sum.inr b) β (complete_bipartite_graph A B).to_rel, from h5,
have h7 : β (a b : A β B), (a,b) β G.to_rel β ((sum.inl a, sum.inl b) β (complete_bipartite_graph A B).to_rel) β¨ ((sum.inr a, sum.inr b) β (complete_bipartite_graph A B).to_rel), from assume (a b : A β B) (h7 : (a,b) β G.to_rel),
or.inl (h4 a b h7),
have h8 : β (a b : A β B), (a,b) β G.to_rel β ((sum.inl a, sum.inr b) β (complete_bipartite_graph A B).to_rel) β¨ ((sum.inr a, sum.inl b) β (complete_bipartite_graph A B).to_rel), from assume (a b : A β B) (h7 : (a,b) β G.to_rel),
or.inr (h6 a b h7),
have h9 : β (a b : A β B), (a,b) β G.to_rel β ((sum.inl a, sum.inr b) β (complete_bipartite_graph A B).to_rel) β¨ ((sum.inr a, sum.inl b) β (complete_bipartite_graph A B).to_rel) β¨ ((sum.inl a, sum.inl b) β (complete_bipartite_graph A B).to_rel) β¨ ((sum.inr a, sum.inr b) β (complete_bipartite_graph A B).to_rel), from assume (a b : A β B) (h7 : (a,b) β G.to_rel),
or.inr (h7),
have h10 : β (a b : A β B), (a,b) β G.to_rel β ((sum.inl a, sum.inr b) β (complete_bipartite_graph A B).to_rel) β¨ ((sum.inr a, sum.inl b) β (complete_bipartite_graph A B).to_rel) β¨ ((sum.inl a, sum.inl b) β (complete_bipartite_graph A B).to_rel) β¨ ((sum.inr a, sum.inr b) β (complete_bipartite_graph A B).to_rel) β¨ false, from assume (a b : A β B) (h7 : (a,b) β G.to_rel),
or.inr (h7),
show G.colorable 2, from by {
use (Ξ» (a : A β B), sum.inl a),
show (Ξ» (a : A β B), sum.inl a) β (complete_bipartite_graph A B).colorings 2, from by {
have h11 : β (a b : A β B), (a,b) β G.to_rel β ((sum.inl a, sum.inr b) β (complete_bipartite_graph A B).to_rel) β¨ ((sum.inr a, sum.inl b) β (complete_bipartite_graph A B).to_rel) β¨ ((sum.inl a, sum.inl b) β (complete_bipartite_graph A B).to_rel) β¨ ((sum.inr a, sum.inr b) β (complete_bipartite_graph A B).to_rel) β¨ false, from assume (a b : A β B) (h7 : (a,b) β G.to_rel),
or.inr (h7),
use h11,
obviously,
},
},
rw β h3,
obviously,
},
have h2 : β (A B : Type*) (h : (A β B) = V), (β (A B : Type*) (h : (A β B) = V), G β€ cast (congr_arg _ h) (complete_bipartite_graph A B)) β (G.colorable 2), from by {
assume (A B : Type*) (h : (A β B) = V),
assume (h2 : β (A B : Type*) (h : (A β B) = V), G β€ cast (congr_arg _ h) (complete_bipartite_graph A B)),
have h3 : (A β B) = V, from by {
rw β h,
exact h2.left,
},
have h4 : G β€ cast (congr_arg _ h3) (complete_bipartite_graph A B), from by {
rw β h3,
exact h2.right,
},
show (G.colorable 2), from h1 A B h3 h4,
},
show (G.colorable 2) β (β (A B : Type*) (h : (A β B) = V), G β€ cast (congr_arg _ h) (complete_bipartite_graph A B)), from iff.intro h2 h1,
end
--OUTPUT 6
theorem bipartite_iff_two_colorable {V : Type*} (G : simple_graph V) [fintype V] : (G.colorable 2) β (β (A B : Type*) (h : (A β B) = V), G β€ cast (congr_arg _ h) (complete_bipartite_graph A B)) :=
begin
split,
assume hcolorable : G.colorable 2,
obtain β¨f, hf1, hf2β© := hcolorable,
let A := f '' {1},
let B := f '' {2},
have h1 : A β V, from by {apply image_subset_iff.mp, apply set.subset.trans (set.singleton_subset_iff.mpr 1) (set.subset_univ 1), },
have h2 : B β V, from by {apply image_subset_iff.mp, apply set.subset.trans (set.singleton_subset_iff.mpr 2) (set.subset_univ 2), },
have h3 : A β© B = β
, from by {
let ha : A β© B β β
, from by {
assume (x : A β© B) (hx : x β A β© B),
obtain β¨h1, h2β© := hx,
let y : A, from h1,
let z : B, from h2,
obtain β¨w1, hw1β© := y,
obtain β¨w2, hw2β© := z,
have hw1' : w1 = 1, from by {
have hw1'' : w1 β {1}, from by {apply image_subset_iff.mpr hw1, apply set.subset.trans (set.singleton_subset_iff.mpr 1) (set.subset_univ 1), },
exact set.mem_singleton_iff.mp hw1'',
},
have hw2' : w2 = 2, from by {
have hw2'' : w2 β {2}, from by {apply image_subset_iff.mpr hw2, apply set.subset.trans (set.singleton_subset_iff.mpr 2) (set.subset_univ 2), },
exact set.mem_singleton_iff.mp hw2'',
},
have hw : w1 = w2, from by {
rw β hw2',
rw β hw1',
},
rw hw at hw1,
exact absurd hw1 (set.mem_inter_iff.mpr β¨h1,h2β©).2,
},
exact set.subset_empty_iff.mpr ha,
},
have h4 : A βͺ B = V, from by {
let h : A βͺ B β V, from by {
assume (x : A βͺ B) (hx : x β A βͺ B),
have h1 : x β A β¨ x β B, from set.mem_union_iff.mp hx,
cases h1,
have h2 : x β A, from h1,
rw β image_subset_iff at h2,
cases h2,
exact h1_h,
cases h2,
exact absurd h2.right (set.mem_singleton_iff.mp h2.left).symm,
have h2 : x β B, from h1,
rw β image_subset_iff at h2,
cases h2,
exact h1_h,
cases h2,
exact absurd h2.right (set.mem_singleton_iff.mp h2.left).symm,
},
exact set.subset.antisymm h (set.subset_univ V),
},
have h5 : A β B = V, from by {
rw β h4,
exact sum.union_is_disjoint A B h3,
},
have h6 : β x, β y, f x = y, from by {
intros x,
have h : x β {1,2}, from set.mem_union_iff.mpr β¨set.mem_singleton_iff.mpr 1,set.mem_singleton_iff.mpr 2β©,
have h1 : x = 1 β¨ x = 2, from set.mem_singleton_iff.mp h,
cases h1,
use 1,
rw h1,
use 2,
rw h1,
},
have h7 : β x, x β V β β y, f x = y, from by {
intros x h,
have h1 : x β {1,2}, from set.mem_union_iff.mpr β¨set.mem_singleton_iff.mpr 1,set.mem_singleton_iff.mpr 2β©,
have h2 : x = 1 β¨ x = 2, from set.mem_singleton_iff.mp h1,
cases h2,
use 1,
rw h2,
use 2,
rw h2,
},
have h8 : β x, x β V β β! y, f x = y, from by {
intros x h,
have h1 : x β {1,2}, from set.mem_union_iff.mpr β¨set.mem_singleton_iff.mpr 1,set.mem_singleton_iff.mpr 2β©,
have h2 : x = 1 β¨ x = 2, from set.mem_singleton_iff.mp h1,
cases h2,
use 1,
rw h2,
obviously,
use 2,
rw h2,
obviously,
},
have h9 : β x y, x β V β y β V β f x = f y β x = y, from by {
intros x y hx hy hxy,
have h1 : x β {1,2}, from set.mem_union_iff.mpr β¨set.mem_singleton_iff.mpr 1,set.mem_singleton_iff.mpr 2β©,
have h2 : x = 1 β¨ x = 2, from set.mem_singleton_iff.mp h1,
cases h2,
have h3 : y β {1,2}, from set.mem_union_iff.mpr β¨set.mem_singleton_iff.mpr 1,set.mem_singleton_iff.mpr 2β©,
have h4 : y = 1 β¨ y = 2, from set.mem_singleton_iff.mp h3,
cases h4,
rw h2,
rw h4,
refl,
rw h2,
rw h4,
exact absurd hxy (eq.symm (set.mem_singleton_iff.mp h4)).symm,
have h3 : y β {1,2}, from set.mem_union_iff.mpr β¨set.mem_singleton_iff.mpr 1,set.mem_singleton_iff.mpr 2β©,
have h4 : y = 1 β¨ y = 2, from set.mem_singleton_iff.mp h3,
cases h4,
rw h2,
rw h4,
exact absurd hxy (eq.symm (set.mem_singleton_iff.mp h2)).symm,
rw h2,
rw h4,
refl,
},
have h10 : β x y, x β V β y β V β f x β f y β x β y, from by {
intros x y hx hy hxy,
have h1 : x β {1,2}, from set.mem_union_iff.mpr β¨set.mem_singleton_iff.mpr 1,set.mem_singleton_iff.mpr 2β©,
have h2 : x = 1 β¨ x = 2, from set.mem_singleton_iff.mp h1,
cases h2,
have h3 : y β {1,
end --Needs more than 2000 tokens!
--OUTPUT 7
theorem bipartite_iff_two_colorable {V : Type*} (G : simple_graph V) [fintype V] : (G.colorable 2) β (β (A B : Type*) (h : (A β B) = V), G β€ cast (congr_arg _ h) (complete_bipartite_graph A B)) :=
begin
split,
assume hcol : G.colorable 2,
have h1 : β (A B : Type*) (h : (A β B) = V), G β€ cast (congr_arg _ h) (complete_bipartite_graph A B), from by {
let A := {v : V | v.color = 1},
let B := {v : V | v.color = 2},
let h : (A β B) = V :=
begin
apply equiv.ext,
assume v : V,
have h1 : (v.color = 1) β¨ (v.color = 2), from by {apply hcol,exact v.2},
cases h1 with hc1 hc2,
exact (sum.inl β¨v,hc1β©),
exact (sum.inr β¨v,hc2β©),
end,
show β (A B : Type*) (h : (A β B) = V), G β€ cast (congr_arg _ h) (complete_bipartite_graph A B), from by {
use A,
use B,
use h,
show G β€ cast (congr_arg _ h) (complete_bipartite_graph A B), from by {
apply subgraph.mono,
assume a b : V,
assume hab : a -- b,
have h1 : (a : A β B) β (b : A β B), from by {
assume h2 : (a : A β B) = b,
rw β h2 at hab,
rw β h at hab,
have h1 : a.color = b.color, from hab.2,
have h2 : a.color = 1, from h1.symm βΈ hab.1,
have h3 : b.color = 2, from hab.2.symm βΈ hab.1,
exact (ne.symm h3.symm).elim,
},
exact (complete_bipartite_graph A B).edge_iff.2 β¨h1,hab.2β©,
},
},
},
exact h1,
assume hbip : β (A B : Type*) (h : (A β B) = V), G β€ cast (congr_arg _ h) (complete_bipartite_graph A B),
have h1 : β (A B : Type*) (h : (A β B) = V), β (v : V), β! (a : A β B), a = v, from by {
let (A B : Type*) (h : (A β B) = V) (h1 : G β€ cast (congr_arg _ h) (complete_bipartite_graph A B)) := hbip,
let h2 : β (v : V), β! (a : A β B), a = v :=
begin
assume v : V,
have h2 : β! (a : A β B), a = v, from by {
have h3 : v β V, from by {exact (v.2).1},
have h4 : v β A β B, from by {rw β h, exact h3},
cases h4 with ha hb,
use sum.inl ha,
obviously,
use sum.inr hb,
obviously,
},
exact h2,
end,
use A,
use B,
use h,
use h2,
},
have h2 : β! (f : V β fin 2), β (v : V), β! (a : A β B), a = v, from
by {exact exists_unique_of_exists_unique_of_fintype h1},
have h3 : β (f : V β fin 2), β (v : V), β! (a : A β B), a = v, from
by {exact classical.some h2},
have h4 : β (v : V), β! (a : A β B), a = v, from some_spec h2,
have h5 : β (v : V), β! (a : A β B), a = v, from
by {exact h3.right},
have h6 : β (v : V), β! (a : A β B), a = v, from h5,
have h7 : β! (f : V β fin 2), β (v : V), β! (a : A β B), a = v, from
by {exact exists_unique_of_exists_unique_of_fintype h6},
have h8 : V β fin 2, from classical.some h7,
have h9 : β (v : V), β! (a : A β B), a = v, from some_spec h7,
have h10 : β (v : V), β! (a : A β B), a = v, from h9,
have h11 : β (v : V), β! (a : A β B), a = v, from h10,
have h12 : β! (f : V β fin 2), β (v : V), β! (a : A β B), a = v, from
by {exact exists_unique_of_exists_unique_of_fintype h11},
have h13 : V β fin 2, from classical.some h12,
have h14 : β (v : V), β! (a : A β B), a = v, from some_spec h12,
have h15 : β (v : V), β! (a : A β B), a = v, from h14,
have h16 : β! (f : V β fin 2), β (v : V), β! (a : A β B), a = v, from
by {exact exists_unique_of_exists_unique_of_fintype h15},
have h17 : V β fin 2, from classical.some h16,
have h18 : β (v : V), β! (a : A β B), a = v, from some_spec h16,
have h19 : β (v : V), β! (a : A β B), a = v, from h18,
have h20 : β! (f : V β fin 2), β (v : V), β! (a : A β B), a = v, from
by {exact exists_unique_of_exists_unique_of_fintype h19},
have h21 : V β fin 2, from classical.some h20,
have h22 : β (v : V), β! (a : A β B), a = v, from some_spec h20,
have h23 : β (v : V), β! (a : A β B), a = v, from h22,
have h24 : β! (f : V β fin 2), β (v : V), β! (a : A β B), a = v, from
by {exact exists_unique_of_exists_unique_of_fintype h23},
have h25 : V β fin 2, from classical.some h24,
have h26 : β (v : V), β! (a : A β B), a = v, from some_spec h24,
have h27 : β (v : V), β! (a : A β B), a = v, from h26,
have h28 : β! (f : V β fin 2), β (v : V), β! (a : A β B), a = v, from
by {exact exists_unique_of_exists_unique_of
end --Needs more than 2000 tokens!
--OUTPUT 8
theorem bipartite_iff_two_colorable {V : Type*} (G : simple_graph V) [fintype V] : (G.colorable 2) β (β (A B : Type*) (h : (A β B) = V), G β€ cast (congr_arg _ h) (complete_bipartite_graph A B)) :=
begin
split,
{ assume h1 : (G.colorable 2),
have h2 : β (f : V β fin 2), G.is_coloring f, from h1,
cases h2 with f h2,
have h3 : (β (A B : Type*) (h : (A β B) = V), G β€ cast (congr_arg _ h) (complete_bipartite_graph A B)), from by {
let A : Type* := {x : V | f x = 0},
let B : Type* := {x : V | f x = 1},
let h : (A β B) = V := by {
ext x,
split,
{ assume hx : x β A,
have hh1 : f x = 0, from hx,
show x β (A β B), from or.inl β¨x,hh1β©, },
{ assume hx : x β B,
have hh1 : f x = 1, from hx,
show x β (A β B), from or.inr β¨x,hh1β©, },
{ assume hx : x β (A β B),
cases hx,
{ assume hx1 : x β A,
have hh1 : f x = 0, from hx1,
show x β A, from β¨x,hh1β©, },
{ assume hx1 : x β B,
have hh1 : f x = 1, from hx1,
show x β B, from β¨x,hh1β©, }
}
},
have h4 : G β€ cast (congr_arg _ h) (complete_bipartite_graph A B), from by {
simp only [has_edge, subgraph.has_edge, cast, subtype.coe_mk, subtype.coe_mk, complete_bipartite_graph],
assume x y hxy,
cases hxy,
{ assume h1,
have h2 : f x = f y, from h2 β¨x,h1β© β¨y,h1β©,
rw h2 at h1,
exact h1.elim, },
{ assume h1,
have h2 : f x = f y, from h2 β¨x,h1β© β¨y,h1β©,
rw h2 at h1,
exact h1.elim, },
},
use [A,B,h,h4],
},
exact h3,
},
{ assume h1 : (β (A B : Type*) (h : (A β B) = V), G β€ cast (congr_arg _ h) (complete_bipartite_graph A B)),
cases h1 with A h1, cases h1 with B h1, cases h1 with h h1,
let f : V β fin 2 := Ξ» (x : V), if x β A then 0 else 1,
have h2 : G.is_coloring f, from by {
simp only [has_edge, subgraph.has_edge, cast, subtype.coe_mk, subtype.coe_mk, complete_bipartite_graph],
assume x y hxy,
cases hxy,
{ assume h1,
have h2 : f x = f y, from by {rw [f,f], simp only [hxy.left,hxy.right]},
have h3 : x β A, from h1.left,
have h4 : y β A, from h1.right,
rw h2 at h3,
exact h3.elim,
},
{ assume h1,
have h2 : f x = f y, from by {rw [f,f], simp only [hxy.left,hxy.right]},
have h3 : x β B, from h1.left,
have h4 : y β B, from h1.right,
rw h2 at h3,
exact h3.elim,
},
},
exact β¨f,h2β©,
},
end
/- FEW SHOT PROMPTS TO CODEX(START)
/--`theorem`
Power Set is Closed under Intersection
Let $S$ be a set.
Let $\powerset S$ be the power set of $S$.
Then:
:$\forall A, B \in \powerset S: A \cap B \in \powerset S$
`proof`
Let $A, B \in \powerset S$.
Then by the definition of power set, $A \subseteq S$ and $B \subseteq S$.
From Intersection is Subset we have that $A \cap B \subseteq A$.
It follows from Subset Relation is Transitive that $A \cap B \subseteq S$.
Thus $A \cap B \in \powerset S$ and closure is proved.
{{qed}}
-/
theorem power_set_intersection_closed {Ξ± : Type*} (S : set Ξ±) : β A B β π« S, (A β© B) β π« S :=
begin
assume (A : set Ξ±) (hA : A β π« S) (B : set Ξ±) (hB : B β π« S),
have h1 : (A β S) β§ (B β S), from by {split,apply set.subset_of_mem_powerset,exact hA,apply set.subset_of_mem_powerset,exact hB},
have h2 : (A β© B) β A, from by apply set.inter_subset_left,
have h3 : (A β© B) β S, from by {apply set.subset.trans h2 h1.left},
show (A β© B) β π« S, from by {apply set.mem_powerset h3},
end
/--`theorem`
Square of Sum
:$\forall x, y \in \R: \paren {x + y}^2 = x^2 + 2 x y + y^2$
`proof`
Follows from the distribution of multiplication over addition:
{{begin-eqn}}
{{eqn | l = \left({x + y}\right)^2
| r = \left({x + y}\right) \cdot \left({x + y}\right)
}}
{{eqn | r = x \cdot \left({x + y}\right) + y \cdot \left({x + y}\right)
| c = Real Multiplication Distributes over Addition
}}
{{eqn | r = x \cdot x + x \cdot y + y \cdot x + y \cdot y
| c = Real Multiplication Distributes over Addition
}}
{{eqn | r = x^2 + 2xy + y^2
| c =
}}
{{end-eqn}}
{{qed}}
-/
theorem square_of_sum (x y : β) : (x + y)^2 = (x^2 + 2*x*y + y^2) :=
begin
calc (x + y)^2 = (x+y)*(x+y) : by rw sq
... = x*(x+y) + y*(x+y) : by rw add_mul
... = x*x + x*y + y*x + y*y : by {rw [mul_comm x (x+y),mul_comm y (x+y)], rw [add_mul,add_mul], ring}
... = x^2 + 2*x*y + y^2 : by {repeat {rw β sq}, rw mul_comm y x, ring}
end
/--`theorem`
Identity of Group is Unique
Let $\struct {G, \circ}$ be a group. Then there is a unique identity element $e \in G$.
`proof`
From Group has Latin Square Property, there exists a unique $x \in G$ such that:
:$a x = b$
and there exists a unique $y \in G$ such that:
:$y a = b$
Setting $b = a$, this becomes:
There exists a unique $x \in G$ such that:
:$a x = a$
and there exists a unique $y \in G$ such that:
:$y a = a$
These $x$ and $y$ are both $e$, by definition of identity element.
{{qed}}
-/
theorem group_identity_unique {G : Type*} [group G] : β! e : G, β a : G, e * a = a β§ a * e = a :=
begin
have h1 : β a b : G, β! x : G, a * x = b, from by {
assume a b : G, use aβ»ΒΉ * b, obviously, },
have h2 : β a b : G, β! y : G, y * a = b, from by {
assume a b : G, use b * aβ»ΒΉ, obviously, },
have h3 : β a : G, β! x : G, a * x = a, from
assume a : G, h1 a a,
have h4 : β a : G, β! y : G, y * a = a, from
assume a : G, h2 a a,
have h5 : β a : G, classical.some (h3 a).exists = (1 : G), from assume a :G,
exists_unique.unique (h3 a) (classical.some_spec (exists_unique.exists (h3 a)))
(mul_one a),
have h6 : β a : G, classical.some (h4 a).exists = (1 : G), from assume a : G,
exists_unique.unique (h4 a) (classical.some_spec (exists_unique.exists (h4 a))) (one_mul a),
show β! e : G, β a : G, e * a = a β§ a * e = a, from by {
use (1 : G),
have h7 : β e : G, (β a : G, e * a = a β§ a * e = a) β e = 1, from by {
assume (e : G) (hident : β a : G, e * a = a β§ a * e = a),
have h8 : β a : G, e = classical.some (h3 a).exists, from assume (a : G),
exists_unique.unique (h3 a) (hident a).right
(classical.some_spec (exists_unique.exists (h3 a))),
have h9 : β a : G, e = classical.some (h4 a).exists, from assume (a : G),
exists_unique.unique (h4 a) (hident a).left
(classical.some_spec (exists_unique.exists (h4 a))),
show e = (1 : G), from eq.trans (h9 e) (h6 _),
},
exact β¨by obviously, h7β©,
}
end
/--`theorem`
Bipartite Graph is two colorable
Let $G$ be a graph. Then $G$ is 2-colorable if and only if $G$ is bipartite.
`proof`
Let $G$ be a 2-colorable graph, which means we can color every vertex either red or blue, and no edge will have both endpoints colored the same color. Let $A$ denote the subset of vertices colored red, and let $B$ denote the subset of vertices colored blue. Since all vertices of $A$ are red, there are no edges within $A$, and similarly for $B$. This implies that every edge has one endpoint in $A$ and the other in $B$, which means $G$ is bipartite.
Conversely, suppose $G$ is bipartite, that is, we can partition the vertices into two subsets $V_{1}, V_{2}$ every edge has one endpoint in $V_{1}$ and the other in $V_{2}$. Then coloring every vertex of $V_{1}$ red and every vertex of $V_{2}$ blue yields a valid coloring, so $G$ is 2-colorable.
QED
-/
theorem bipartite_iff_two_colorable {V : Type*} (G : simple_graph V) [fintype V] : (G.colorable 2) β (β (A B : Type*) (h : (A β B) = V), G β€ cast (congr_arg _ h) (complete_bipartite_graph A B)) :=
FEW SHOT PROMPTS TO CODEX(END)-/
|
(*
This file contains the update semantics to C correspondence lemmas for word array functions
*)
theory WordArray_UpdCCorres
imports WordArray_UAbsFun
begin
context update_sem_init begin
definition
"abs_fun_rel \<Xi>' srel afun_name \<xi>' afun_mon \<sigma> st x x'
= (proc_ctx_wellformed \<Xi>' \<longrightarrow> (\<xi>' matches-u \<Xi>') \<longrightarrow> (\<sigma>,st) \<in> srel \<longrightarrow>
(\<forall>r' w'. val_rel x x'
\<and> (\<Xi>', \<sigma> \<turnstile> x :u prod.fst (prod.snd (prod.snd (prod.snd (\<Xi>' afun_name)))) \<langle>r', w'\<rangle>)
\<longrightarrow> \<not> prod.snd (afun_mon x' st)
\<and> (\<forall>st' y'. (y', st') \<in> prod.fst (afun_mon x' st)
\<longrightarrow> (\<exists>\<sigma>' y. \<xi>' afun_name (\<sigma>, x) (\<sigma>', y)
\<and> val_rel y y' \<and> (\<sigma>', st') \<in> srel))))"
lemma absfun_corres:
"abs_fun_rel \<Xi>' srel s \<xi>' afun' \<sigma> st (\<gamma> ! i) v'
\<Longrightarrow> i < length \<gamma> \<Longrightarrow> val_rel (\<gamma> ! i) v'
\<Longrightarrow> \<Gamma>' ! i = option.Some (prod.fst (prod.snd (prod.snd (prod.snd (\<Xi>' s)))))
\<Longrightarrow> corres srel
(App (AFun s [] ls) (Var i))
(do x \<leftarrow> afun' v'; gets (\<lambda>s. x) od)
\<xi>' \<gamma> \<Xi>' \<Gamma>' \<sigma> st"
apply (clarsimp simp: corres_def abs_fun_rel_def)
apply (frule matches_ptrs_length, simp)
apply (frule(2) matches_ptrs_proj_single')
apply clarsimp
apply (erule impE, blast)
apply clarsimp
apply (elim allE, drule mp, blast)
apply clarsimp
apply (intro exI conjI[rotated], assumption+)
apply (rule u_sem_abs_app)
apply (rule u_sem_afun)
apply (rule u_sem_var)
apply simp
done
lemma abs_fun_rel_def':
"abs_fun_rel \<Xi>' srel afun_name \<xi>' afun_mon \<sigma> st x x'
= (proc_ctx_wellformed \<Xi>' \<longrightarrow> \<xi>' matches-u \<Xi>' \<longrightarrow> (\<sigma>,st) \<in> srel \<longrightarrow>
(\<forall>r' w'. val_rel x x' \<and>
\<Xi>', \<sigma> \<turnstile> x :u prod.fst (prod.snd (prod.snd (prod.snd (\<Xi>' afun_name)))) \<langle>r', w'\<rangle>
\<longrightarrow> \<lbrace>\<lambda>s0. s0 = st\<rbrace>
afun_mon x'
\<lbrace>\<lambda>y' s'. \<exists>\<sigma>' y. \<xi>' afun_name (\<sigma>, x) (\<sigma>', y) \<and> (\<sigma>',s') \<in> srel \<and> val_rel y y'\<rbrace>!))"
by (fastforce simp: abs_fun_rel_def validNF_def valid_def no_fail_def)
end (* of context *)
sublocale WordArray \<subseteq> Generated _ wa_abs_typing_u wa_abs_repr
by (unfold_locales)
context WordArray begin
section "Correspondence Lemmas Between Update Semantics and C"
lemma upd_C_wordarray_put2_corres_gen:
"\<And>i \<gamma> v' \<Gamma>' \<sigma> st.
\<lbrakk>i < length \<gamma>; val_rel (\<gamma> ! i) v';
\<Gamma>' ! i = option.Some (prod.fst (prod.snd (prod.snd (prod.snd (\<Xi> ''wordarray_put2_0'')))));
\<xi>0' ''wordarray_put2_0'' = upd_wa_put2_0\<rbrakk>
\<Longrightarrow> update_sem_init.corres wa_abs_typing_u wa_abs_repr
(Generated.state_rel wa_abs_repr) (App (AFun ''wordarray_put2_0'' [] ls) (Var i))
(do x <- main_pp_inferred.wordarray_put2_0' v';
gets (\<lambda>s. x)
od)
\<xi>0' \<gamma> \<Xi> \<Gamma>' \<sigma> st"
apply (rule absfun_corres; simp?)
apply (thin_tac "\<Gamma>' ! i = _")
apply (clarsimp simp: abs_fun_rel_def; rename_tac r w)
apply (rotate_tac -1)
apply (subst (asm) \<Xi>_def)
apply (subst (asm) \<Xi>_def)
apply (clarsimp simp: val_rel_simp wordarray_put2_0_type_def abbreviated_type_defs)
apply (erule u_t_recE)
apply (erule u_t_r_consE; clarsimp)+
apply (erule u_t_primE)+
apply (subst (asm) lit_type.simps)+
apply clarsimp
apply (erule u_t_r_emptyE)
apply (erule u_t_ptrE; clarsimp)
apply (frule wa_abs_typing_u_elims(1); clarsimp; rename_tac len arr)
apply (rule conjI)
apply (monad_eq simp: wordarray_put2_0'_def)
apply (clarsimp simp: state_rel_def heap_rel_def)
apply (erule_tac x = "t2_C.arr_C v'" in allE)
apply (erule_tac x = "values_C (heap_WordArray_u32_C st (t2_C.arr_C v')) +\<^sub>p uint (t2_C.idx_C v')" in allE)
apply (clarsimp simp: heap_rel_ptr_def heap_rel_meta_def wa_abs_repr_def is_valid_simp type_rel_simp)
apply (frule wa_abs_typing_u_elims(5))
apply (erule_tac x = "t2_C.idx_C v'" in allE)+
apply (clarsimp simp: val_rel_simp heap_simp type_rel_simp)
apply clarsimp
apply (monad_eq simp: upd_wa_put2_0_def)
apply (case_tac "idx_C v' < len"; clarsimp)
apply (rule conjI)
apply (monad_eq simp: wordarray_put2_0'_def)
apply (clarsimp simp: state_rel_def heap_rel_def heap_rel_ptr_meta)
apply (monad_eq simp: wordarray_put2_0'_def)
apply (frule_tac p = "t2_C.arr_C v'" in all_heap_rel_ptrD; simp?)
apply (clarsimp simp: type_rel_simp wa_abs_repr_def)
apply (clarsimp simp: val_rel_simp heap_simp is_valid_simp)
apply (drule wa_abs_typing_u_elims(5))
apply (erule_tac x = "t2_C.idx_C v'" in allE; clarsimp)
apply (drule_tac upd_h = "(heap_w32_update
(\<lambda>x. x(values_C (heap_WordArray_u32_C st (t2_C.arr_C v')) +\<^sub>p uint (idx_C v') := val_C v')) st)" and
x = "(values_C (heap_WordArray_u32_C st (t2_C.arr_C v'))) +\<^sub>p uint (idx_C v')" and
uv' = "UPrim (LU32 (val_C v'))" and uv = "UPrim x"
in all_heap_rel_updE; simp?; clarsimp?)
apply (clarsimp simp: type_rel_simp)
apply (drule_tac upd_h = "(heap_w32_update
(\<lambda>x. x(values_C (heap_WordArray_u32_C st (t2_C.arr_C v')) +\<^sub>p uint (idx_C v') := val_C v')) st)" and
x = "(values_C (heap_WordArray_u32_C st (t2_C.arr_C v'))) +\<^sub>p uint (idx_C v')" and
uv' = "UPrim (LU32 (val_C v'))" and uv = "UPrim x"
in all_heap_rel_updE; simp?; clarsimp?)
apply (rule conjI, clarsimp simp: val_rel_simp)
apply clarsimp
apply (rule FalseE)
apply (cut_tac p = p and q = "((values_C (heap_WordArray_u32_C st (t2_C.arr_C v'))) +\<^sub>p uint (idx_C v'))" in ptr_val_inj)
apply (clarsimp simp: ptr_add_def)
apply (monad_eq simp: wordarray_put2_0'_def)
apply (clarsimp simp: state_rel_def heap_rel_def heap_rel_ptr_meta)
apply (frule_tac p = "t2_C.arr_C v'" in all_heap_rel_ptrD; simp?)
apply (clarsimp simp: type_rel_simp wa_abs_repr_def)
apply (clarsimp simp: val_rel_simp heap_simp is_valid_simp)
done
lemmas upd_C_wordarray_put2_corres = upd_C_wordarray_put2_corres_gen[rotated -1, of \<xi>0, simplified fun_eq_iff]
lemma upd_C_wordarray_length_corres_gen:
"\<And>i \<gamma> v' \<Gamma>' \<sigma> st.
\<lbrakk>i < length \<gamma>; val_rel (\<gamma> ! i) v';
\<Gamma>' ! i = option.Some (prod.fst (prod.snd (prod.snd (prod.snd (\<Xi> ''wordarray_length_0'')))));
\<xi>0' ''wordarray_length_0''= upd_wa_length_0\<rbrakk>
\<Longrightarrow> update_sem_init.corres wa_abs_typing_u wa_abs_repr
(Generated.state_rel wa_abs_repr) (App (AFun ''wordarray_length_0'' [] ls) (Var i))
(do x <- main_pp_inferred.wordarray_length_0' v';
gets (\<lambda>s. x)
od)
\<xi>0' \<gamma> \<Xi> \<Gamma>' \<sigma> st"
apply (rule absfun_corres; simp?)
apply (clarsimp simp: abs_fun_rel_def; rename_tac r w)
apply (thin_tac "\<Gamma>' ! i = _")
apply (rotate_tac -1)
apply (subst (asm) \<Xi>_def)
apply (subst (asm) \<Xi>_def)
apply (clarsimp simp: val_rel_simp wordarray_length_0_type_def)
apply (erule u_t_ptrE; clarsimp)
apply (frule wa_abs_typing_u_elims(1); clarsimp; rename_tac len arr)
apply (rule conjI)
apply (monad_eq simp: wordarray_length_0'_def)
apply (clarsimp simp: state_rel_def heap_rel_def)
apply (erule_tac x = v' in allE)
apply (clarsimp simp: heap_rel_ptr_def type_rel_simp wa_abs_repr_def is_valid_simp)
apply clarsimp
apply (rule_tac x = \<sigma> in exI)
apply (rule conjI)
apply (clarsimp simp: upd_wa_length_0_def)
apply (monad_eq simp: wordarray_length_0'_def)
apply (clarsimp simp: state_rel_def heap_rel_def)
apply (erule_tac x = v' in allE)
apply (clarsimp simp: heap_rel_ptr_def type_rel_simp wa_abs_repr_def heap_simp val_rel_simp)
apply (monad_eq simp: wordarray_length_0'_def)
done
lemmas upd_C_wordarray_length_corres = upd_C_wordarray_length_corres_gen[rotated -1, of \<xi>0, simplified fun_eq_iff]
lemma upd_C_wordarray_get_corres_gen:
"\<And>i \<gamma> v' \<Gamma>' \<sigma> st.
\<lbrakk>i < length \<gamma>; val_rel (\<gamma> ! i) v';
\<Gamma>' ! i = option.Some (prod.fst (prod.snd (prod.snd (prod.snd (\<Xi> ''wordarray_get_0'')))));
\<xi>0' ''wordarray_get_0'' = upd_wa_get_0\<rbrakk>
\<Longrightarrow> update_sem_init.corres wa_abs_typing_u wa_abs_repr
(Generated.state_rel wa_abs_repr) (App (AFun ''wordarray_get_0'' [] ls) (Var i))
(do x <- main_pp_inferred.wordarray_get_0' v';
gets (\<lambda>s. x)
od)
\<xi>0' \<gamma> \<Xi> \<Gamma>' \<sigma> st"
apply (rule absfun_corres; simp?)
apply (clarsimp simp: abs_fun_rel_def; rename_tac r w)
apply (thin_tac "\<Gamma>' ! i = _")
apply (rotate_tac -1)
apply (subst (asm) \<Xi>_def)
apply (subst (asm) \<Xi>_def)
apply (clarsimp simp: val_rel_simp wordarray_get_0_type_def abbreviated_type_defs)
apply (erule u_t_recE)
apply (erule u_t_r_consE; clarsimp)+
apply (erule u_t_r_emptyE)
apply (erule u_t_primE; subst (asm) lit_type.simps; clarsimp)
apply (erule u_t_ptrE; clarsimp)
apply (frule wa_abs_typing_u_elims(1); clarsimp; rename_tac len arr)
apply (rule conjI)
apply (monad_eq simp: wordarray_get_0'_def)
apply (clarsimp simp: state_rel_def heap_rel_def heap_rel_ptr_meta)
apply (drule_tac p = "t1_C.p1_C v'" and uv = "UAbstract (UWA (TPrim (Num U32)) len arr)" in all_heap_rel_ptrD;
clarsimp simp: type_rel_simp wa_abs_repr_def val_rel_simp is_valid_simp heap_simp)
apply (drule not_le_imp_less)
apply (frule wa_abs_typing_u_elims(5))
apply (erule_tac x = "t1_C.p2_C v'" in allE; clarsimp)
apply (drule_tac p = "values_C (heap_WordArray_u32_C st (t1_C.p1_C v')) +\<^sub>p uint (t1_C.p2_C v')" and
uv = "UPrim x" in all_heap_rel_ptrD; simp add: type_rel_simp)
apply clarsimp
apply (rule_tac x = \<sigma> in exI)
apply (rule conjI)
apply (clarsimp simp: upd_wa_get_0_def)
apply (frule wa_abs_typing_u_elims(5))
apply (erule_tac x = "t1_C.p2_C v'" in allE)
apply (monad_eq simp: wordarray_get_0'_def word_less_nat_alt word_le_nat_alt)
apply (clarsimp simp: state_rel_def heap_rel_def heap_rel_ptr_meta)
apply (drule_tac p = "t1_C.p1_C v'" and uv = "UAbstract (UWA (TPrim (Num U32)) len arr)" in all_heap_rel_ptrD;
clarsimp simp: type_rel_simp wa_abs_repr_def val_rel_simp is_valid_simp heap_simp)
apply (drule_tac p = "values_C (heap_WordArray_u32_C st (t1_C.p1_C v')) +\<^sub>p uint (t1_C.p2_C v')" and
uv = "UPrim x" in all_heap_rel_ptrD;
clarsimp simp: type_rel_simp val_rel_simp)
apply (monad_eq simp: wordarray_get_0'_def)
apply blast
done
lemmas upd_C_wordarray_get_corres = upd_C_wordarray_get_corres_gen[rotated -1, of \<xi>0, simplified fun_eq_iff]
abbreviation "mk_urecord xs \<equiv> URecord (map (\<lambda>x. (x, uval_repr x)) xs) None"
definition "foldmap_measure i end \<equiv> unat end - unat i"
definition "foldmap_bounds frm to len i e
\<equiv> frm \<le> i \<and> e = min to len \<and> (frm < e \<longrightarrow> i \<le> e) \<and> ((\<not>(frm < e)) \<longrightarrow> frm = i)"
definition "foldmap_inv foldmap n \<xi>' \<sigma> p frm i f acc obsv \<sigma>' res s' res'
\<equiv> foldmap \<xi>' \<sigma> p frm i f (foldmap_acc_type n) acc (foldmap_obsv_type n) obsv (\<sigma>', res) \<and>
val_rel res res' \<and> (\<sigma>', s') \<in> state_rel"
definition "foldmap_inv_stat obsv obsv' \<equiv> val_rel obsv obsv'"
lemma whileLoop_add_invI:
assumes "\<lbrace> P \<rbrace> whileLoop_inv c b init I (measure M) \<lbrace> Q \<rbrace>!"
shows "\<lbrace> P \<rbrace> whileLoop c b init \<lbrace> Q \<rbrace>!"
by (metis assms whileLoop_inv_def)
lemma validNF_select_UNIV:
"\<lbrace>\<lambda>s. \<forall>x. Q x s\<rbrace> select UNIV \<lbrace>Q\<rbrace>!"
apply (subst select_UNIV_unknown)
apply (rule validNF_unknown)
done
lemma \<Xi>_wordarray_fold_no_break_0:
"\<Xi> ''wordarray_fold_no_break_0'' = wordarray_fold_no_break_0_type"
by (clarsimp simp: \<Xi>_def)
abbreviation "elem_type x \<equiv> (present_type \<circ> (\<lambda>xs. xs ! 0) \<circ> rec_type_list) x"
lemma fold_dispatch_wp:
"\<lbrakk>proc_ctx_wellformed \<Xi>; proc_env_matches_ptrs \<xi>0' \<Xi>;
wa_abs_typing_u \<Xi> (UWA (TPrim (Num num)) len arr) ''WordArray'' [TPrim (Num num)] (Boxed ReadOnly ptrl) r w \<sigma>;
\<sigma> p = option.Some (UAbstract (UWA (TPrim (Num num)) len arr));
uval_typing \<Xi> \<sigma> acc (foldmap_acc_type ''wordarray_fold_no_break_0'') ra wa;
uval_typing \<Xi> \<sigma> obsv (foldmap_obsv_type ''wordarray_fold_no_break_0'') ro {};
wa \<inter> r = {}; wa \<inter> ro = {}; p \<notin> wa;
(\<Xi>, 0, [], {}, [option.Some (foldmap_funarg_type ''wordarray_fold_no_break_0'')] \<turnstile>
(App f (Var 0)) : (foldmap_funret_type ''wordarray_fold_no_break_0''));
\<forall>x x' \<sigma> s. val_rel x x' \<longrightarrow>
update_sem_init.corres wa_abs_typing_u wa_abs_repr (Generated.state_rel wa_abs_repr)
(App f (Var 0)) (do ret <- dispatch f_num x'; gets (\<lambda>s. ret) od)
\<xi>0' [x] \<Xi> [option.Some (foldmap_funarg_type ''wordarray_fold_no_break_0'')] \<sigma> s;
elem_type (foldmap_funarg_type ''wordarray_fold_no_break_0'') = TPrim (Num num)\<rbrakk> \<Longrightarrow>
\<lbrace>\<lambda>sa. (a', n') = (a, n) \<and> n < e \<and>
(\<exists>\<sigma>' res x v. args = t3_C.elem_C_update (\<lambda>_. v) a \<and>
\<sigma>' (arr + size_of_num_type num * n) = option.Some x \<and> val_rel x v \<and>
foldmap_inv upd_wa_foldnb_bod ''wordarray_fold_no_break_0'' \<xi>0' \<sigma> p frm n f acc obsv \<sigma>' res sa (t3_C.acc_C args)) \<and>
foldmap_bounds frm to len n e \<and> foldmap_inv_stat obsv (t3_C.obsv_C args)\<rbrace>
dispatch f_num args
\<lbrace>\<lambda>ret sb. (\<exists>\<sigma>' res.
foldmap_inv upd_wa_foldnb_bod ''wordarray_fold_no_break_0'' \<xi>0' \<sigma> p frm (n + 1) f acc obsv \<sigma>' res sb ret) \<and>
foldmap_inv_stat obsv (t3_C.obsv_C args) \<and> foldmap_bounds frm to len (n + 1) e \<and>
foldmap_measure (n + 1) to < foldmap_measure n' to\<rbrace>!"
apply (subst validNF_def)
apply (clarsimp simp: \<Xi>_wordarray_fold_no_break_0 wordarray_fold_no_break_0_type_def abbreviated_type_defs)
apply (subst valid_def)
apply (subst no_fail_def)
apply clarsimp
apply (subst all_imp_conj_distrib[symmetric])
apply (clarsimp simp: foldmap_inv_def)
apply (rename_tac sa \<sigma>' res x v)
apply (erule_tac x = "mk_urecord [x, res, obsv]" in allE)
apply (erule_tac x = args in allE)
apply (erule impE)
apply (subst val_rel_simp; simp add: foldmap_inv_stat_def)
apply (erule_tac x = \<sigma>' in allE)
apply (erule_tac x = sa in allE)
apply (clarsimp simp: corres_def)
apply (frule_tac t = "TPrim (Num num)" and
ptrl = ptrl and
ra = r and
wa = w and
rb = ra and
wb = wa and
rc = ro in upd_wa_foldnb_bod_preservation[rotated -1];
(simp add: \<Xi>_wordarray_fold_no_break_0 wordarray_fold_no_break_0_type_def abbreviated_type_defs)?;
(clarsimp simp: Int_commute)?)
apply (frule wa_abs_typing_u_elims(2); clarsimp)
apply (frule wa_abs_typing_u_elims(2); clarsimp)
apply (frule wa_abs_typing_u_elims(2); clarsimp)
apply (frule wa_abs_typing_u_elims(2); clarsimp)
apply (rename_tac r' w')
apply (subgoal_tac "matches_ptrs \<Xi> \<sigma>' [(mk_urecord [x, res, obsv])]
[option.Some (foldmap_funarg_type ''wordarray_fold_no_break_0'')] (r' \<union> ro) w'")
apply (clarsimp simp: \<Xi>_wordarray_fold_no_break_0 wordarray_fold_no_break_0_type_def abbreviated_type_defs)
apply (erule impE, blast)
apply clarsimp
apply (rename_tac ret sb)
apply (erule_tac x = ret in allE)
apply (erule_tac x = sb in allE)
apply clarsimp
apply (frule update_sem.preservation
[OF update_sem_axioms, where \<tau>s = "[]" and \<epsilon>="[]"and K = "[]" and L=0 and C="{}",
simplified subst_wellformed_nothing, simplified]; simp?)
apply clarsimp
apply (rename_tac \<sigma>'' res' rb wb)
apply (clarsimp simp: foldmap_bounds_def)
apply (case_tac "frm < to \<and> frm < len"; clarsimp)
apply (frule wa_abs_typing_u_elims(5))
apply (erule_tac x = n in allE; clarsimp)
apply (frule_tac p = "arr + (size_of_num_type num) * n" in valid_ptr_not_in_frame_same; simp?)
apply (drule_tac x = "arr + 4 * n" and S = r in orthD1; simp?)
apply (drule_tac wa_abs_typing_u_elims(2)[where \<tau>s = "[_]", simplified]; simp)
apply blast
apply clarsimp
apply (drule_tac ta = num and
arr = arr and
len = len and
res' = res' and
\<sigma>' = \<sigma>' and
\<sigma>'' = \<sigma>'' and
ptrl = ptrl and
ra = r and
wa = w and
rc = ro in upd_wa_foldnb_bod_step[rotated -5]; simp?; clarsimp?)
apply (drule_tac v = obsv in type_repr_uval_repr(1); simp)
apply (drule_tac v = res in type_repr_uval_repr(1); simp)
apply (drule wa_abs_typing_u_elims(2); clarsimp)
apply (drule wa_abs_typing_u_elims(2); clarsimp)
apply (drule wa_abs_typing_u_elims(2); clarsimp)
apply (drule wa_abs_typing_u_elims(2); clarsimp)
apply (rule conjI)
apply (intro exI conjI; simp)
apply (frule_tac a = n and k = to in less_is_non_zero_p1)
apply (drule unatSuc2; clarsimp simp: word_less_nat_alt word_le_nat_alt foldmap_measure_def)
apply linarith
apply (clarsimp simp: \<Xi>_wordarray_fold_no_break_0 wordarray_fold_no_break_0_type_def abbreviated_type_defs)
apply (rule matches_ptrs_some[where r' = "{}" and w' = "{}", simplified])
apply (rule u_t_struct; simp?)
apply (rule u_t_r_cons1[where r = "{}" and w = "{}", simplified]; simp?)
apply (subst (asm) val_rel_word; clarsimp)
apply (rule u_t_prim'; clarsimp)
apply (rule u_t_r_cons1[where r' = ro and w' = "{}", simplified]; simp?)
apply (drule_tac u = obsv in uval_typing_frame(1); simp?)
apply (rule u_t_r_cons1[where r' = "{}" and w' = "{}", simplified]; simp?)
apply (rule u_t_r_empty)
apply (drule_tac v = obsv in type_repr_uval_repr(1); simp)
apply (drule_tac v = obsv in frame_noalias_uval_typing(2)[rotated 1]; simp?)
apply blast
apply (drule_tac v = res in type_repr_uval_repr(1); simp)
apply (subst (asm) val_rel_word; clarsimp)
apply (rule matches_ptrs_empty[where \<tau>s = "[]" and \<epsilon> = "[]", simplified])
done
lemma upd_C_wordarray_fold_no_break_corres_gen:
"\<lbrakk>proc_env_matches_ptrs \<xi>0' \<Xi>; i < length \<gamma>; val_rel (\<gamma> ! i) v';
\<Gamma>' ! i = option.Some (prod.fst (prod.snd (prod.snd(prod.snd (\<Xi> ''wordarray_fold_no_break_0'')))));
D \<in> k \<or> S \<in> k; 0, K',{} \<turnstile> (foldmap_obsv_type ''wordarray_fold_no_break_0'') :\<kappa> k;
\<gamma> ! i = URecord fs None; f = prod.fst (fs ! 3);
(\<Xi>, 0, [], {}, [option.Some (foldmap_funarg_type ''wordarray_fold_no_break_0'')] \<turnstile>
(App (uvalfun_to_exprfun f) (Var 0)) : (foldmap_funret_type ''wordarray_fold_no_break_0''));
\<forall>x x' \<sigma> s. val_rel x x' \<longrightarrow> update_sem_init.corres wa_abs_typing_u wa_abs_repr (Generated.state_rel wa_abs_repr)
(App (uvalfun_to_exprfun f) (Var 0)) (do ret <- dispatch_t4' (t5_C.f_C v') x'; gets (\<lambda>s. ret) od)
\<xi>0' [x] \<Xi> [option.Some (foldmap_funarg_type ''wordarray_fold_no_break_0'')] \<sigma> s;
\<xi>1' ''wordarray_fold_no_break_0'' = upd_wa_foldnb \<Xi> \<xi>0' (foldmap_funarg_type ''wordarray_fold_no_break_0'');
elem_type (foldmap_funarg_type ''wordarray_fold_no_break_0'') = TPrim (Num num)\<rbrakk>
\<Longrightarrow> update_sem_init.corres wa_abs_typing_u wa_abs_repr (Generated.state_rel wa_abs_repr)
(App (AFun ''wordarray_fold_no_break_0'' [] []) (Var i)) (do x <- main_pp_inferred.wordarray_fold_no_break_0' v';
gets (\<lambda>s. x)
od)
\<xi>1' \<gamma> \<Xi> \<Gamma>' \<sigma> s"
apply (rule absfun_corres; simp)
apply (clarsimp simp: abs_fun_rel_def'; rename_tac r w)
apply (thin_tac "\<Gamma>' ! i = _")
apply (subst (asm) val_rel_simp; clarsimp)
apply (subst (asm) val_rel_ptr_def; clarsimp)
apply (subst (asm) val_rel_fun_tag)
apply (subst (asm) val_rel_word)
apply (subst (asm) val_rel_word)
apply (clarsimp simp: upd_wa_foldnb_def wordarray_fold_no_break_0'_def)
apply (rename_tac pwa_rep frm_rep to_rep f_rep acc a_rep obsv o_rep wa_rep)
apply (erule u_t_recE; clarsimp)
apply (clarsimp simp: \<Xi>_wordarray_fold_no_break_0 wordarray_fold_no_break_0_type_def abbreviated_type_defs)
apply (erule u_t_r_consE; clarsimp)
apply (erule u_t_ptrE; clarsimp)
apply (frule wa_abs_typing_u_elims(1); clarsimp)
apply (rename_tac r len arr)
apply (erule u_t_r_consE; simp)
apply (erule u_t_r_consE; simp)
apply (erule conjE)+
apply (drule_tac t = "type_repr _" in sym)+
apply clarsimp
apply (erule u_t_primE)+
apply (drule_tac t = "lit_type _" in sym)+
apply clarsimp
apply (erule u_t_r_consE; clarsimp)+
apply (erule u_t_r_emptyE; clarsimp)
apply (frule tfun_no_pointers(1))
apply (frule tfun_no_pointers(2))
apply clarsimp
apply (rename_tac acc ra wa obsv ro wo)
apply (drule discardable_or_shareable_not_writable; simp?)
apply clarsimp
apply (subst unknown_bind_ignore)+
apply (clarsimp simp: join_guards)
apply wp
apply (clarsimp simp: unknown_bind_ignore split: prod.split)
apply (rename_tac var e)
apply (rule_tac M = "\<lambda>((_, i), _). foldmap_measure i (t5_C.to_C v')" and
I = "\<lambda>(a, b) s. (\<exists>\<sigma>' res.
foldmap_inv upd_wa_foldnb_bod ''wordarray_fold_no_break_0'' \<xi>0' \<sigma> (ptr_val (t5_C.arr_C v'))
(t5_C.frm_C v') b (uvalfun_to_exprfun f) acc obsv \<sigma>' res s (t3_C.acc_C a)) \<and>
foldmap_inv_stat obsv (t3_C.obsv_C a) \<and>
foldmap_bounds (t5_C.frm_C v') (t5_C.to_C v') len b e" in whileLoop_add_invI; simp?)
apply (wp; clarsimp simp: unknown_bind_ignore split: prod.splits)
apply (rename_tac sa a n args a' n')
apply (rule_tac a = a and wa = wa and ra = ra and ro = ro and w = "{}" and r = r and
ptrl = None in fold_dispatch_wp[rotated 2]; simp?)
apply (clarsimp simp: \<Xi>_wordarray_fold_no_break_0 wordarray_fold_no_break_0_type_def abbreviated_type_defs)+
apply wp
apply wp
apply clarsimp
apply (rename_tac args j \<sigma>' res)
apply (clarsimp simp: foldmap_bounds_def foldmap_inv_def)
apply (frule wa_abs_typing_u_elims(5))
apply (erule_tac x = j in allE; clarsimp)
apply (clarsimp simp: \<Xi>_wordarray_fold_no_break_0 wordarray_fold_no_break_0_type_def abbreviated_type_defs)
apply (drule_tac acc = acc and
obsv = obsv and
rb = ra and
wb = wa and
rc = ro and
ptrl = None and
ra = r and
wa = "{}" in upd_wa_foldnb_bod_preservation; simp?; (clarsimp simp: Int_commute)?)
apply (frule_tac p = "ptr_val (t5_C.arr_C v')" in valid_ptr_not_in_frame_same; simp?)
apply (drule_tac p = "arr + (size_of_num_type num) * j" in valid_ptr_not_in_frame_same; simp?)
apply (drule_tac x = "arr + (size_of_num_type num) * j" and S' = wa and S = r in orthD1; simp?)
apply (drule wa_abs_typing_u_elims(2); clarsimp)
apply blast
apply (thin_tac "_ \<in> state_rel")
apply (rule conjI)
apply (rule_tac x = \<sigma>' in exI)
apply (rule_tac x = res in exI)
apply (rule_tac x = "UPrim x" in exI)
apply clarsimp
apply (clarsimp simp: state_rel_def heap_rel_def heap_rel_ptr_meta)
apply (rotate_tac -1)
apply (drule_tac p = "Ptr(arr + (size_of_num_type num) * j)" in all_heap_rel_ptrD; simp?)
apply (clarsimp simp: type_rel_simp)
apply (rule_tac x = "heap_w32 s (PTR(32 word) (arr + (size_of_num_type num) * j))" in exI)
apply (drule_tac p = "t5_C.arr_C v'" in all_heap_rel_ptrD; simp?)
apply (clarsimp simp: type_rel_simp wa_abs_repr_def)
apply (clarsimp simp: val_rel_WordArray_u32_C_def ptr_add_def heap_simp mult.commute)
apply (clarsimp simp: state_rel_def heap_rel_def heap_rel_ptr_meta)
apply (rotate_tac -1)
apply (drule_tac p = "Ptr(arr + (size_of_num_type num) * j)" in all_heap_rel_ptrD; simp?)
apply (clarsimp simp: type_rel_simp)
apply (drule_tac p = "t5_C.arr_C v'" in all_heap_rel_ptrD; simp?)
apply (clarsimp simp: type_rel_simp wa_abs_repr_def)
apply (clarsimp simp: is_valid_simp heap_simp val_rel_WordArray_u32_C_def ptr_add_def mult.commute)
apply (rule conjI)
apply (erule u_t_funafuntE; clarsimp)
apply (rename_tac x' j \<sigma>' res)
apply (rule_tac x = \<sigma>' in exI)
apply (rule_tac x = res in exI)
apply (clarsimp simp: foldmap_inv_def)
apply (clarsimp simp: \<Xi>_wordarray_fold_no_break_0 wordarray_fold_no_break_0_type_def abbreviated_type_defs)
apply (unfold foldmap_bounds_def)
apply (erule conjE)+
apply (case_tac "t5_C.frm_C v' < e")
apply (erule impE, assumption)
apply (thin_tac "_ \<longrightarrow> _")
apply clarsimp
apply (case_tac "j < t5_C.to_C v'"; clarsimp)
apply (rule_tac ptrl = None and
ra = r and
wa = "{}" and
rb = ra and
wb = wa and
rc = ro in upd_wa_foldnb_bod_to_geq_len; (simp add: Int_commute)?; clarsimp?)
apply (erule impE, assumption)
apply (thin_tac "_ \<longrightarrow> _")
apply clarsimp
apply (erule upd_wa_foldnb_bod.elims; clarsimp)
apply (case_tac "t5_C.frm_C v' < t5_C.to_C v'"; subst upd_wa_foldnb_bod.simps; clarsimp)
apply wp
apply wp
apply (rule validNF_select_UNIV)
apply (clarsimp simp: foldmap_inv_stat_def)
apply (clarsimp simp: order.strict_implies_order)
apply (clarsimp simp: foldmap_inv_def state_rel_def heap_rel_def heap_rel_ptr_meta)
apply (frule_tac p = "t5_C.arr_C v'" in all_heap_rel_ptrD; simp?)
apply (clarsimp simp: type_rel_simp wa_abs_repr_def)
apply (clarsimp simp: is_valid_simp val_rel_WordArray_u32_C_def heap_simp)
apply (subst upd_wa_foldnb_bod.simps; clarsimp)+
apply (rule conjI; clarsimp)
apply (subst min_def)
apply (subst (asm) not_le[symmetric])+
apply (subst if_not_P; simp?)
apply (subst min_def)
apply (clarsimp simp: not_le)
done
lemma \<Xi>_wordarray_map_no_break_0:
"\<Xi> ''wordarray_map_no_break_0'' = wordarray_map_no_break_0_type"
by (clarsimp simp: \<Xi>_def)
lemma map_dispatch_wp:
"\<lbrakk>proc_ctx_wellformed \<Xi>; proc_env_matches_ptrs \<xi>0' \<Xi>;
wa_abs_typing_u \<Xi> (UWA (TPrim (Num num)) len arr) ''WordArray'' [TPrim (Num num)] (Boxed Writable ptrl) r w \<sigma>;
\<sigma> p = option.Some (UAbstract (UWA (TPrim (Num num)) len arr));
uval_typing \<Xi> \<sigma> acc (foldmap_acc_type ''wordarray_map_no_break_0'') ra wa;
uval_typing \<Xi> \<sigma> obsv (foldmap_obsv_type ''wordarray_map_no_break_0'') ro {}; wa \<inter> r = {}; p \<notin> w;
p \<notin> r; w \<inter> wa = {}; w \<inter> (ra \<union> ro) = {}; wa \<inter> ro = {}; p \<notin> wa; p \<notin> ra; p \<notin> ro; p = ptr_val p';
(\<Xi>, 0, [], {}, [option.Some (foldmap_funarg_type ''wordarray_map_no_break_0'')] \<turnstile>
(App f (Var 0)) : (foldmap_funret_type ''wordarray_map_no_break_0''));
\<forall>x x' \<sigma> s. val_rel x x' \<longrightarrow>
update_sem_init.corres wa_abs_typing_u wa_abs_repr (Generated.state_rel wa_abs_repr)
(App f (Var 0)) (do ret <- dispatch f_num x'; gets (\<lambda>s. ret) od)
\<xi>0' [x] \<Xi> [option.Some (foldmap_funarg_type ''wordarray_map_no_break_0'')] \<sigma> s;
elem_type (foldmap_funarg_type ''wordarray_map_no_break_0'') = TPrim (Num num)\<rbrakk> \<Longrightarrow>
\<lbrace>\<lambda>sa. (a', n') = (a, n) \<and> n < e \<and>
(\<exists>\<sigma>' res x v. args = t6_C.elem_C_update (\<lambda>_. v) a \<and>
\<sigma>' (arr + size_of_num_type U32 * n) = option.Some x \<and> val_rel x v \<and>
foldmap_inv upd_wa_mapAccumnb_bod ''wordarray_map_no_break_0'' \<xi>0' \<sigma> p frm n f acc obsv \<sigma>'
res sa (t10_C p' (t6_C.acc_C args))) \<and>
foldmap_bounds frm to len n e \<and> foldmap_inv_stat obsv (t6_C.obsv_C args)\<rbrace>
dispatch f_num args
\<lbrace>\<lambda>ret sb. (\<exists>\<sigma>' res.
foldmap_inv upd_wa_mapAccumnb_bod ''wordarray_map_no_break_0'' \<xi>0' \<sigma> p frm (n + 1) f acc obsv \<sigma>' res
(heap_w32_update (\<lambda>x a. if a = values_C (heap_WordArray_u32_C sb p') +\<^sub>p uint n
then t7_C.p1_C ret else x a) sb) (t10_C p' (t7_C.p2_C ret))) \<and>
foldmap_inv_stat obsv (t6_C.obsv_C args) \<and>
foldmap_bounds frm to len (n + 1) e \<and>
foldmap_measure (n + 1) to < foldmap_measure n' to \<and>
is_valid_WordArray_u32_C sb p' \<and>
is_valid_w32 sb (values_C (heap_WordArray_u32_C sb p') +\<^sub>p uint n)\<rbrace>!"
apply (subst validNF_def)
apply (clarsimp simp: \<Xi>_wordarray_map_no_break_0 wordarray_map_no_break_0_type_def abbreviated_type_defs)
apply (subst valid_def)
apply (subst no_fail_def)
apply clarsimp
apply (subst all_imp_conj_distrib[symmetric])
apply (clarsimp simp: foldmap_inv_def)
apply (clarsimp simp: \<Xi>_wordarray_map_no_break_0 wordarray_map_no_break_0_type_def abbreviated_type_defs)
apply (rename_tac sa \<sigma>' res x v)
apply (subst (asm) Int_Un_distrib; clarsimp)
apply (frule_tac t = "TPrim (Num num)" and
ra = r and
wa = w and
rb = ra and
wb = wa and
rc = ro in upd_wa_mapAccumnb_bod_preservation; simp?; (clarsimp simp: Int_commute)?)
apply (rename_tac sa \<sigma>' x v racc r' w')
apply (erule_tac x = "mk_urecord [x, racc, obsv]" in allE)
apply (erule_tac x = args in allE)
apply clarsimp
apply (erule impE)
apply (clarsimp simp: val_rel_simp foldmap_inv_stat_def)
apply (erule_tac x = \<sigma>' in allE)
apply (erule_tac x = sa in allE)
apply (clarsimp simp: corres_def)
apply (subgoal_tac "matches_ptrs \<Xi> \<sigma>' [(mk_urecord [x, racc, obsv])]
[option.Some (foldmap_funarg_type ''wordarray_map_no_break_0'')] (r' \<union> ro) w'")
apply (clarsimp simp: \<Xi>_wordarray_map_no_break_0 wordarray_map_no_break_0_type_def abbreviated_type_defs)
apply (erule impE, blast)
apply clarsimp
apply (rename_tac ret sb)
apply (erule_tac x = ret in allE)
apply (erule_tac x = sb in allE)
apply clarsimp
apply (frule update_sem.preservation[OF update_sem_axioms,
where \<tau>s = "[]" and \<epsilon>="[]"and K = "[]" and L=0 and C="{}",
simplified subst_wellformed_nothing, simplified]; simp?)
apply clarsimp
apply (rename_tac \<sigma>'' res' r'a w'a)
apply (erule u_t_rectE; clarsimp)
apply (erule u_t_r_contE; clarsimp)
apply (erule u_t_r_contE; clarsimp)
apply (erule u_t_r_contE; clarsimp)
apply (rename_tac x' rd wd racc' r'a w'a)
apply (frule_tac v = x' in tprim_no_pointers(1); clarsimp)
apply (frule_tac v = x' in tprim_no_pointers(2); clarsimp)
apply (clarsimp simp: foldmap_bounds_def)
apply (case_tac "frm < to \<and> frm < len"; clarsimp)
apply (drule_tac ta = num and
arr = arr and
len = len and
va = x and
racc' = racc' and
va' = x' and
ra = r and
wa = w and
ptrl = ptrl and
\<sigma>' = \<sigma>' and
\<sigma>'' = \<sigma>'' and
racc = racc and
rc = ro in upd_wa_mapAccumnb_bod_step; simp?; clarsimp?)
apply (drule_tac v = obsv in type_repr_uval_repr(1))
apply (drule_tac v = racc in type_repr_uval_repr(1))
apply (clarsimp simp: val_rel_word)
apply (thin_tac "_ \<in> state_rel")
apply (clarsimp simp: state_rel_def heap_rel_def heap_rel_ptr_meta)
apply (frule_tac p = p and \<sigma> = \<sigma> in valid_ptr_not_in_frame_same; simp?)
apply (drule wa_abs_typing_u_elims(3); clarsimp)
apply (drule_tac p = "ptr_val p'" and \<sigma> = \<sigma>' and \<sigma>' = \<sigma>'' and w = w' in valid_ptr_not_in_frame_same[rotated -1]; simp?)
apply (drule_tac p = "arr + (size_of_num_type num) * n" and \<sigma> = \<sigma>' in valid_ptr_not_in_frame_same; simp?)
apply (drule_tac x = "arr + (size_of_num_type num) * n" and S = w' in orthD2; simp?)
apply (drule wa_abs_typing_u_elims(3); clarsimp)
apply (intro exI conjI; simp)
apply (frule_tac p = p' in all_heap_rel_ptrD; simp?)
apply (clarsimp simp: type_rel_simp wa_abs_repr_def)
apply (frule_tac p = "values_C (heap_WordArray_u32_C sb p') +\<^sub>p uint n" and uv = x in all_heap_rel_ptrD; simp?)
apply (clarsimp simp: val_rel_simp heap_simp)
apply (clarsimp simp: type_rel_simp val_rel_simp)
apply (clarsimp simp: is_valid_simp heap_simp)
apply (rule conjI)
apply (intro exI conjI, assumption, clarsimp simp: val_rel_simp)
apply (drule_tac upd_h = "(heap_w32_update
(\<lambda>x a. if a = values_C (heap_WordArray_u32_C sb p') +\<^sub>p uint n then t7_C.p1_C ret else x a) sb)" and
uv = x and uv' = x' and x = "values_C (heap_WordArray_u32_C sb p') +\<^sub>p uint n"
in all_heap_rel_updE; simp?; clarsimp?)
apply (clarsimp simp: val_rel_simp)
apply (drule_tac v = x' in type_repr_uval_repr(1); simp add: val_rel_simp)
apply (clarsimp simp: val_rel_simp type_rel_simp)
apply (clarsimp simp: val_rel_simp)
apply (drule_tac upd_h = "(heap_w32_update
(\<lambda>x a. if a = values_C (heap_WordArray_u32_C sb p') +\<^sub>p uint n then t7_C.p1_C ret else x a) sb)" and
uv = x and uv' = x' and x = "values_C (heap_WordArray_u32_C sb p') +\<^sub>p uint n" and
is_v = is_valid_w32 in all_heap_rel_updE; simp?; clarsimp?)
apply (clarsimp simp: val_rel_simp)
apply (drule_tac v = x' in type_repr_uval_repr(1); simp add: val_rel_simp)
apply (rule conjI)
apply (clarsimp simp: val_rel_simp)
apply (rename_tac pa)
apply (cut_tac p = pa and q = "values_C (heap_WordArray_u32_C sb p') +\<^sub>p uint n" in ptr_val_inj)
apply clarsimp
apply (clarsimp simp: ptr_add_def mult.commute val_rel_simp)
apply (frule_tac a = n and k = to in less_is_non_zero_p1)
apply (drule unatSuc2; clarsimp simp: word_less_nat_alt word_le_nat_alt foldmap_measure_def)
apply linarith
apply (clarsimp simp: \<Xi>_wordarray_map_no_break_0 wordarray_map_no_break_0_type_def abbreviated_type_defs)
apply (rule matches_ptrs_some[where r' = "{}" and w' = "{}", simplified])
apply (rule u_t_struct; simp?)
apply (rule u_t_r_cons1[where r = "{}" and w = "{}", simplified]; simp?)
apply (subst (asm) val_rel_word; clarsimp)
apply (rule u_t_prim'; clarsimp)
apply (rule u_t_r_cons1[where r' = ro and w' = "{}", simplified]; simp?)
apply (drule_tac u = obsv in uval_typing_frame(1); simp?)
apply (drule wa_abs_typing_u_elims(3); clarsimp)
apply blast
apply (rule u_t_r_cons1[where r' = "{}" and w' = "{}", simplified]; simp?)
apply (rule u_t_r_empty)
apply (drule_tac v = obsv in type_repr_uval_repr(1); simp)
apply (frule_tac v = obsv in frame_noalias_uval_typing'(2); (simp add: Int_Un_distrib)?; clarsimp?)
apply (drule wa_abs_typing_u_elims(3); clarsimp)
apply blast
apply (subst Int_commute; blast)
apply (drule_tac v = racc in type_repr_uval_repr(1); simp)
apply (subst (asm) val_rel_word; clarsimp)
apply (rule matches_ptrs_empty[where \<tau>s = "[]" and \<epsilon>="[]", simplified])
done
lemma upd_C_wordarray_map_no_break_corres_gen:
"\<lbrakk>proc_env_matches_ptrs \<xi>0' \<Xi>; i < length \<gamma>; val_rel (\<gamma> ! i) v';
\<Gamma>' ! i = option.Some (prod.fst (prod.snd (prod.snd (prod.snd (\<Xi> ''wordarray_map_no_break_0'')))));
D \<in> k \<or> S \<in> k; 0, K' , {} \<turnstile> (foldmap_obsv_type ''wordarray_map_no_break_0'') :\<kappa> k;
\<gamma> ! i = URecord fs None; f = prod.fst (fs ! 3);
(\<Xi>, 0, [], {}, [option.Some (foldmap_funarg_type ''wordarray_map_no_break_0'')] \<turnstile>
(App (uvalfun_to_exprfun f) (Var 0)) : (foldmap_funret_type ''wordarray_map_no_break_0''));
\<forall>x x' \<sigma> s. val_rel x x' \<longrightarrow> update_sem_init.corres wa_abs_typing_u wa_abs_repr (Generated.state_rel wa_abs_repr)
(App (uvalfun_to_exprfun f) (Var 0)) (do ret <- dispatch_t8' (t9_C.f_C v') x'; gets (\<lambda>s. ret) od)
\<xi>0' [x] \<Xi> [option.Some (foldmap_funarg_type ''wordarray_map_no_break_0'')] \<sigma> s;
\<xi>1' ''wordarray_map_no_break_0'' = upd_wa_mapAccumnb \<Xi> \<xi>0'
(foldmap_funarg_type ''wordarray_map_no_break_0'') (foldmap_funret_type ''wordarray_map_no_break_0'');
elem_type (foldmap_funarg_type ''wordarray_map_no_break_0'') = TPrim (Num num)\<rbrakk>
\<Longrightarrow> update_sem_init.corres wa_abs_typing_u wa_abs_repr (Generated.state_rel wa_abs_repr)
(App (AFun ''wordarray_map_no_break_0'' [] []) (Var i)) (do x <- main_pp_inferred.wordarray_map_no_break_0' v';
gets (\<lambda>s. x)
od)
\<xi>1' \<gamma> \<Xi> \<Gamma>' \<sigma> s"
apply (rule absfun_corres; simp)
apply (clarsimp simp: abs_fun_rel_def'; rename_tac r w)
apply (thin_tac "\<Gamma>' ! i = _")
apply (subst (asm) val_rel_simp; clarsimp)
apply (subst (asm) val_rel_ptr_def; clarsimp)
apply (subst (asm) val_rel_fun_tag)
apply (subst (asm) val_rel_word)
apply (subst (asm) val_rel_word)
apply (clarsimp simp: upd_wa_mapAccumnb_def wordarray_map_no_break_0'_def)
apply (rename_tac pwa_rep frm_rep to_rep f_rep acc a_rep o_rep wa_rep)
apply (erule u_t_recE; clarsimp)
apply (clarsimp simp: \<Xi>_wordarray_map_no_break_0 wordarray_map_no_break_0_type_def abbreviated_type_defs)
apply (erule u_t_r_consE; clarsimp)
apply (rename_tac r w r' w')
apply (erule u_t_ptrE; clarsimp)
apply (rename_tac w)
apply (frule wa_abs_typing_u_elims(1); clarsimp)
apply (rename_tac len arr)
apply (erule u_t_r_consE; simp)
apply (erule u_t_r_consE; simp)
apply (erule conjE)+
apply (drule_tac t = "type_repr _" in sym)+
apply clarsimp
apply (erule u_t_primE)+
apply (drule_tac t = "lit_type _" in sym)+
apply clarsimp
apply (erule u_t_r_consE; clarsimp)+
apply (erule u_t_r_emptyE; clarsimp)
apply (frule tfun_no_pointers(1))
apply (frule tfun_no_pointers(2))
apply clarsimp
apply (rename_tac acc ra wa obsv ro wo)
apply (drule_tac v = obsv in discardable_or_shareable_not_writable(1); simp?)
apply (subst unknown_bind_ignore)+
apply (clarsimp simp: join_guards)
apply wp
apply (clarsimp simp: unknown_bind_ignore split: prod.split)
apply (rename_tac var e)
apply (rule_tac M = "\<lambda>((_, i), _). foldmap_measure i (t9_C.to_C v')" and
I = "\<lambda>(a, b) s. (\<exists>\<sigma>' res.
foldmap_inv upd_wa_mapAccumnb_bod ''wordarray_map_no_break_0'' \<xi>0' \<sigma> (ptr_val (t9_C.arr_C v'))
(t9_C.frm_C v') b (uvalfun_to_exprfun f) acc obsv \<sigma>' res s (t10_C (t9_C.arr_C v') (t6_C.acc_C a))) \<and>
foldmap_inv_stat obsv (t6_C.obsv_C a) \<and>
foldmap_bounds (t9_C.frm_C v') (t9_C.to_C v') len b e" in whileLoop_add_invI; simp?)
apply (wp; clarsimp simp: unknown_bind_ignore split: prod.splits)
apply (rename_tac sa a n args a' n')
apply (clarsimp simp: conj_left_commute[of "is_valid_w32 _ _", simplified])
apply (clarsimp simp: conj_commute[of "is_valid_w32 _ _", simplified])
apply (rule_tac a = a and wa = wa and ra = ra and ro = ro and w = w and r = r and
ptrl = None in map_dispatch_wp; simp?)
apply (clarsimp simp: \<Xi>_wordarray_map_no_break_0 wordarray_map_no_break_0_type_def abbreviated_type_defs)+
apply wp
apply wp
apply clarsimp
apply (rename_tac args j \<sigma>' res)
apply (clarsimp simp: foldmap_inv_def foldmap_bounds_def)
apply (clarsimp simp: \<Xi>_wordarray_map_no_break_0 wordarray_map_no_break_0_type_def abbreviated_type_defs)
apply (subst (asm) Int_Un_distrib; clarsimp)
apply (drule_tac ptrl = None and
ra = r and
wa = w and
rb = ra and
wb = wa and
rc = ro in upd_wa_mapAccumnb_bod_preservation; simp?; (clarsimp simp: Int_commute)?)
apply (subst conj_commute[of "is_valid_w32 _ _"]; clarsimp)
apply (subst conj_commute[of "is_valid_w32 _ _"]; clarsimp)
apply (frule_tac \<sigma> = \<sigma>' in wa_abs_typing_u_elims(5), erule_tac x = j in allE, clarsimp)
apply (clarsimp simp: state_rel_def heap_rel_def heap_rel_ptr_meta)
apply (frule_tac p = "ptr_val (t9_C.arr_C v')" in valid_ptr_not_in_frame_same; simp?)
apply (drule wa_abs_typing_u_elims(3); clarsimp)
apply (frule_tac p = "t9_C.arr_C v'" and \<sigma> = \<sigma>' in all_heap_rel_ptrD; simp?)
apply (clarsimp simp: type_rel_simp wa_abs_repr_def)
apply (clarsimp simp: is_valid_simp heap_simp)
apply (frule_tac p = "Ptr(arr + (size_of_num_type num) * j)" and
\<sigma> = \<sigma>' and
is_v = is_valid_w32 in all_heap_rel_ptrD; simp?)
apply (clarsimp simp: type_rel_simp)
apply clarsimp
apply (intro conjI exI; simp?)
apply (clarsimp simp: val_rel_simp ptr_add_def mult.commute)
apply (clarsimp simp: val_rel_simp ptr_add_def mult.commute)
apply (rule conjI)
apply (erule u_t_funafuntE; clarsimp)
apply (rename_tac args j \<sigma>' res)
apply (rule_tac x = \<sigma>' in exI)
apply (rule_tac x = res in exI)
apply (clarsimp simp: foldmap_inv_def)
apply (clarsimp simp: \<Xi>_wordarray_map_no_break_0 wordarray_map_no_break_0_type_def abbreviated_type_defs)
apply (unfold foldmap_bounds_def)
apply (erule conjE)+
apply (rule conjI)
apply (case_tac "t9_C.frm_C v' < e")
apply (erule impE, assumption)
apply (thin_tac "_ \<longrightarrow> _")
apply clarsimp
apply (case_tac "j < t9_C.to_C v'"; clarsimp)
apply (subst (asm) Int_Un_distrib; clarsimp)
apply (rule_tac ptrl = None and
ra = r and
wa = w and
rb = ra and
wb = wa and
rc = ro in upd_wa_mapAccumnb_bod_to_geq_len; (simp add: Int_commute)?; clarsimp?)
apply (erule impE, assumption)
apply (thin_tac "_ \<longrightarrow> _")
apply clarsimp
apply (erule upd_wa_mapAccumnb_bod.elims; clarsimp)
apply (case_tac "t9_C.frm_C v' < t9_C.to_C v'"; subst upd_wa_mapAccumnb_bod.simps; clarsimp)
apply (clarsimp simp: val_rel_simp)
apply wp
apply wp
apply (rule validNF_select_UNIV)
apply (rule validNF_select_UNIV)
apply (clarsimp simp: foldmap_inv_stat_def)
apply (clarsimp simp: order.strict_implies_order)
apply (clarsimp simp: foldmap_inv_def state_rel_def heap_rel_def heap_rel_ptr_meta)
apply (frule_tac p = "t9_C.arr_C v'" in all_heap_rel_ptrD; simp?)
apply (clarsimp simp: type_rel_simp wa_abs_repr_def)
apply (clarsimp simp: is_valid_simp val_rel_WordArray_u32_C_def heap_simp)
apply (subst upd_wa_mapAccumnb_bod.simps; clarsimp)+
apply (clarsimp simp: val_rel_simp)
apply (rule conjI; clarsimp)
apply (subst min_def)
apply (subst (asm) not_le[symmetric])+
apply (subst if_not_P; simp?)
apply (subst min_def)
apply (clarsimp simp: not_le)
done
section "Specialised Lemmas for Cogent Functions"
lemma typing_mono_app_cogent_fun:
"\<Xi>', 0, [], {}, [option.Some a] \<turnstile> f : b \<Longrightarrow> \<Xi>', 0, [], {}, [option.Some a] \<turnstile> App (Fun f [] []) (Var 0) : b"
apply (frule typing_to_kinding_env(1); simp?)
apply (rule typing_app[where x = a and y = b and ?\<Gamma>1.0 = "[option.None]" and ?\<Gamma>2.0 = "[option.Some a]"]; simp?)
apply (clarsimp simp: split_conv_all_nth)
apply (rule right; simp)
apply (rule typing_fun[where \<delta> = "[]", OF _ _ _ _]; (simp add: Cogent.empty_def weakening_conv_all_nth)?)
apply (rule weakening_comp.none)
apply simp
apply (rule subst_wellformed_nothing)
apply (rule typing_var; simp add: Cogent.empty_def weakening_conv_all_nth)
apply (rule keep; simp)
done
lemma typing_mono_fun_cogent_fun:
"\<Xi>', 0, [], {}, [option.Some a] \<turnstile> f : b \<Longrightarrow> \<Xi>', 0, [], {}, [option.None] \<turnstile> Fun f [] [] : TFun a b"
apply (frule typing_to_kinding_env(1); simp?)
apply (rule typing_fun[where \<delta>= "[]"];
(simp add: Cogent.empty_def weakening_conv_all_nth )?)
apply(rule weakening_comp.none)
apply simp
apply (rule subst_wellformed_nothing)
done
lemma typing_mono_fun_imp_appfun:
"\<Xi>', 0, [], {}, [option.None] \<turnstile> Fun f [] []: TFun a b \<Longrightarrow> \<Xi>', 0, [], {}, [option.Some a] \<turnstile> App (Fun f [] []) (Var 0) : b"
apply (frule typing_to_wellformed(1))
apply (rule typing_app[where x = a and y = b and ?\<Gamma>1.0 = "[option.None]" and ?\<Gamma>2.0 = "[option.Some a]"]; simp?)
apply (clarsimp simp: split_conv_all_nth)
apply (rule right; simp)
apply (rule typing_var; simp add: Cogent.empty_def weakening_conv_all_nth)
apply (rule keep; simp)
done
lemma upd_C_wordarray_fold_no_break_corres_cog:
"\<lbrakk>i < length \<gamma>; val_rel (\<gamma> ! i) v';
\<Gamma>' ! i = option.Some (prod.fst (prod.snd(prod.snd(prod.snd (\<Xi> ''wordarray_fold_no_break_0'')))));
proc_env_matches_ptrs \<xi>0' \<Xi>;
\<Xi> ''wordarray_fold_no_break_0'' = (0, [],{}, \<tau>, \<tau>acc);
\<tau> = TRecord [(''arr'', TCon ''WordArray'' [TPrim (Num num)] (Boxed ReadOnly None), Present),
(''frm'', TPrim (Num U32), Present), (''to'', TPrim (Num U32), Present),
(''f'', TFun \<tau>f \<tau>acc, Present), (''acc'', \<tau>acc, Present), (''obsv'', \<tau>obsv, Present)] Unboxed;
\<tau>f = TRecord [(''elem'', TPrim (Num num), Present), (''acc'', \<tau>acc, Present),
(''obsv'', \<tau>obsv, Present)] Unboxed;
D \<in> k \<or> S \<in> k; 0, K', {} \<turnstile> \<tau>obsv :\<kappa> k;
\<gamma> ! i = URecord fs None; UFunction f [] [] = prod.fst (fs ! 3);
\<Xi>, 0, [], {}, [option.Some \<tau>f] \<turnstile> f : \<tau>acc;
\<forall>x x' \<sigma> s. val_rel x x' \<longrightarrow> update_sem_init.corres wa_abs_typing_u wa_abs_repr (Generated.state_rel wa_abs_repr)
(App (Fun f [] []) (Var 0)) (do ret <- dispatch_t4' (t5_C.f_C v') x'; gets (\<lambda>s. ret) od)
\<xi>0' [x] \<Xi> [option.Some \<tau>f] \<sigma> s;
\<xi>1' ''wordarray_fold_no_break_0'' = upd_wa_foldnb \<Xi> \<xi>0' \<tau>f\<rbrakk>
\<Longrightarrow> update_sem_init.corres wa_abs_typing_u wa_abs_repr (Generated.state_rel wa_abs_repr)
(App (AFun ''wordarray_fold_no_break_0'' [] []) (Var i)) (do x <- main_pp_inferred.wordarray_fold_no_break_0' v';
gets (\<lambda>s. x)
od)
\<xi>1' \<gamma> \<Xi> \<Gamma>' \<sigma> s"
apply (drule typing_mono_app_cogent_fun)
apply (rule upd_C_wordarray_fold_no_break_corres_gen; simp?)
apply (clarsimp simp: val_rel_simp)
apply (clarsimp simp: val_rel_simp)
done
section "Specialised Lemmas for Abstract Functions"
lemma typing_mono_app_cogent_absfun:
"\<lbrakk>proc_ctx_wellformed \<Xi>'; \<Xi>' f = (0,[],{}, a, b)\<rbrakk> \<Longrightarrow> \<Xi>', 0, [], {}, [option.Some a] \<turnstile> App (AFun f [] []) (Var 0) : b"
apply (unfold proc_ctx_wellformed_def)
apply (erule_tac x = f in allE; clarsimp)
apply (rule typing_app[where x = a and y = b and ?\<Gamma>1.0 = "[option.None]" and ?\<Gamma>2.0 = "[option.Some a]"]; simp?)
apply (clarsimp simp: split_conv_all_nth)
apply (rule right; simp)
apply (rule typing_afun[where ts = "[]", OF _ _ _ _]; (simp add: Cogent.empty_def weakening_conv_all_nth)?)
apply (rule weakening_comp.none)
apply simp
apply (rule subst_wellformed_nothing)
apply (rule typing_var; simp add: Cogent.empty_def weakening_conv_all_nth)
apply (rule keep; simp)
done
lemma typing_mono_afun_cogent_absfun:
"\<lbrakk>proc_ctx_wellformed \<Xi>'; \<Xi>' f = (0, [], {}, a, b)\<rbrakk> \<Longrightarrow> \<Xi>', 0, [], {}, [option.None] \<turnstile> AFun f [] [] : TFun a b"
apply (unfold proc_ctx_wellformed_def)
apply (erule_tac x = f in allE; clarsimp)
apply (rule typing_afun[where ts = "[]", OF _ _ _ _]; (simp add: Cogent.empty_def weakening_conv_all_nth)?)
apply (rule weakening_comp.none)
apply simp
apply (rule subst_wellformed_nothing)
done
lemma typing_mono_afun_imp_appafun:
"\<Xi>', 0, [], {}, [option.None] \<turnstile> AFun f [] []: TFun a b \<Longrightarrow> \<Xi>', 0, [], {}, [option.Some a] \<turnstile> App (AFun f [] []) (Var 0) : b"
apply (frule typing_to_wellformed(1))
apply (rule typing_app[where x = a and y = b and ?\<Gamma>1.0 = "[option.None]" and ?\<Gamma>2.0 = "[option.Some a]"]; simp?)
apply (clarsimp simp: split_conv_all_nth)
apply (rule right; simp)
apply (rule typing_var; simp add: Cogent.empty_def weakening_conv_all_nth)
apply (rule keep; simp)
done
end (* of context *)
end |
lemma LIMSEQ_binaryset: assumes f: "f {} = 0" shows "(\<lambda>n. \<Sum>i<n. f (binaryset A B i)) \<longlonglongrightarrow> f A + f B" |
= = Aerodromes = =
|
function sinogram = myRadon(image,thetas)
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% radon transformation -> schlegel & bille 9.1.1
% written by Mark Bangert
% [email protected] 2011
numOfAngularProjections = length(thetas);
numOfParallelProjections = size(image,1);
sinogram = zeros(numOfParallelProjections,numOfAngularProjections);
% loop over the number of angles
for i = 1:length(thetas)
% rotate image
tmpImage = imrotate(image,-thetas(i),'bilinear','crop');
% fill sinogram
sinogram(:,i) = sum(tmpImage,2);
% visualization on the fly
imagesc(sinogram);
drawnow
end
|
MODULE shape_mod
USE input_mod,ONLY: runend
USE esets_mod,ONLY: nnode
IMPLICIT NONE
!INTEGER(kind=4),PARAMETER :: shape_nodes=3
CONTAINS
!******************************************************************************
!** 1-Dimension Shape Function Selector **
!******************************************************************************
! SUBROUTINE shape_1d_select(xi,N_shape,dN_shape)
! IMPLICIT NONE
! !--Dummy arguments
! REAL(kind=8),INTENT(IN) :: xi
! REAL(kind=8),ALLOCATABLE,DIMENSION(:),INTENT(OUT) :: N_shape,dN_shape
! CHARACTER(len=5),INTENT(IN) :: ORDER
!
! SELECT CASE (nnode)
! CASE (2)
! ALLOCATE(N_shape(2),dN_shape(2))
! CALL linear_shape_1d(xi,N_shape,dN_shape)
! CASE (3)
! ALLOCATE(N_shape(3),dN_shape(3))
! CALL quadratic_shape_1d(xi,N_shape,dN_shape)
! CASE DEFAULT
! CALL runend('SHAPE_FUNCTIONS: ORDER not implemented')
! END SELECT
!******************************************************************************
!** Linear Shape Functions 1-dimension **
!******************************************************************************
SUBROUTINE linear_shape_1d(xi,N_shape,dN_shape)
IMPLICIT NONE
!--Dummy arguments
REAL(kind=8),INTENT(IN) :: xi
REAL(kind=8),INTENT(OUT) :: N_shape(2),dN_shape(2)
!--Local arguments
!functions
N_shape(1) = 0.5d0*(1.d0-xi)
N_shape(2) = 0.5d0*(1.d0+xi)
!derivatives
dN_shape(1) = -0.5d0
dN_shape(2) = 0.5d0
END SUBROUTINE linear_shape_1d
!******************************************************************************
!** Quadratic Shape Functions 1-dimension **
!******************************************************************************
SUBROUTINE quadratic_shape_1d(xi,N_shape,dN_shape)
IMPLICIT NONE
!--Dummy arguments
REAL(kind=8),INTENT(IN) :: xi
REAL(kind=8),INTENT(OUT) :: N_shape(3),dN_shape(3)
!--Local arguments
!functions
N_shape(1) = 0.5d0*xi*(xi-1.d0)
N_shape(2) = 1.d0-xi**2.d0
N_shape(3) = 0.5d0*xi*(xi+1.d0)
!derivatives
dN_shape(1) = 0.5d0*(2.d0*xi-1.d0)
dN_shape(2) = 1.d0-2.d0*xi
dN_shape(3) = 0.5d0*(2.d0*xi+1.d0)
END SUBROUTINE quadratic_shape_1d
END MODULE shape_mod
|
import data.nat.basic
import tactic.library_search
-- Turn off trace messages so they don't pollute the test build:
set_option trace.silence_library_search true
-- For debugging purposes, we can display the list of lemmas:
-- set_option trace.library_search true
-- Check that `library_search` fails if there are no goals.
example : true :=
begin
trivial,
success_if_fail { library_search },
end
example (a b : β) : a + b = b + a :=
by library_search -- says: `exact add_comm a b`
example {a b : β} : a β€ a + b :=
by library_search -- says: `exact le_add_right a b`
example (n m k : β) : n * (m - k) = n * m - n * k :=
by library_search -- says: `exact nat.mul_sub_left_distrib n m k`
example {n m : β} (h : m < n) : m β€ n - 1 :=
by library_search -- says: `exact nat.le_pred_of_lt h`
example {Ξ± : Type} (x y : Ξ±) : x = y β y = x :=
by library_search -- says: `exact eq_comm`
example (a b : β) (ha : 0 < a) (hb : 0 < b) : 0 < a + b :=
by library_search -- says: `exact add_pos ha hb`
example (a b : β) : 0 < a β 0 < b β 0 < a + b :=
by library_search -- says: `exact add_pos`
example (a b : β) (h : a β£ b) (w : b > 0) : a β€ b :=
by library_search -- says: `exact nat.le_of_dvd w h`
-- We even find `iff` results:
example {b : β} (w : b > 0) : b β₯ 1 :=
by library_search -- says: `exact nat.succ_le_iff.mpr w`
example : β P : Prop, Β¬(P β Β¬P) :=
by library_search -- says: `Ξ» (a : Prop), (iff_not_self a).mp`
example {a b c : β} (hβ : a β£ c) (hβ : a β£ b + c) : a β£ b :=
by library_search -- says `exact (nat.dvd_add_left hβ).mp hβ`
example {a b c : β} (hβ : a β£ b) (hβ : a β£ b + c) : a β£ c :=
by library_search -- says `exact (nat.dvd_add_left hβ).mp hβ`
|
module mod1
#ifdef BLA
integer, parameter :: bla = 10
#endif
end module mod1
|
syntax "tac" : tactic
theorem a : True := by tac
#check a -- should be declared
theorem a' : True β§ True := β¨by tac, by tacβ©
#check a' -- should be declared
syntax "term" : term
def b (n : Nat) : Nat := term
#print b -- should be declared
|
[STATEMENT]
theorem TBtheorem1a:
assumes "ine PQ E"
and "subcomponents PQ = {P,Q}"
and "correctCompositionIn PQ"
shows "ine P E \<or> ine Q E"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ine P E \<or> ine Q E
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
ine PQ E
subcomponents PQ = {P, Q}
correctCompositionIn PQ
goal (1 subgoal):
1. ine P E \<or> ine Q E
[PROOF STEP]
by (simp add: ine_def correctCompositionIn_def, auto) |
# Automatically generated using Clang.jl
const GSW_INVALID_VALUE = 9.0e15
const GSW_ERROR_LIMIT = 1.0e10
const INTERP_METHOD_LINEAR = 1
const INTERP_METHOD_PCHIP = 2
|
module Common.Weakening where
-- open import Agda.Primitive
import Data.List as List
open import Data.List.Base
open import Data.List.Membership.Propositional
open import Data.List.Relation.Unary.All as All
open import Data.List.Prefix
open import Function
open import Level
{-
The following `Weakenable` record defines a class of weakenable (or
monotone) predicates over lists.
We can make the definitions of `Weakenable` available using the
syntax:
open Weakenable β¦...β¦ public
Whenever we use `wk e p` where `e : x β x'`, `p : P x` where `x x' :
List X`, Agda will use instance argument search to find a defined
instance of `Weakenable {X} P`. See also
http://agda.readthedocs.io/en/v2.5.3/language/instance-arguments.html
-}
record Weakenable {i j}{A : Set i}(p : List A β Set j) : Set (i β j) where
field wk : β {w w'} β w β w' β p w β p w'
{-
In general, weakenable predicates can be defined over *any* preorder.
See `Experiments.Category` for a more general definition and
treatment.
The definition of `Weakenable` above is specialized to the
interpreters in our paper which are all defined in terms of
weakenable predicates over lists.
-}
open Weakenable β¦...β¦ public
{-
We define a few derived instances of `Weakenable` that appear
commonly.
-}
module _ {i} {A : Set i} where
instance
any-weakenable : β {x : A} β Weakenable (Ξ» xs β x β xs)
any-weakenable = record { wk = Ξ» ext l β β-β l ext }
all-weakenable : β {j} {B : Set j} {xs : List B}
β β {k} {C : B β List A β Set k} {{wβ : β {x} β Weakenable (C x)}}
β Weakenable (Ξ» ys β All (Ξ» x β C x ys) xs)
all-weakenable {{wβ}} = record {
wk = Ξ» ext v β All.map (Ξ» {a} y β Weakenable.wk wβ ext y) v }
-- const-weakenable : β {j}{I : Set j} β Weakenable {A = I} (Ξ» _ β A)
-- const-weakenable = record { wk = Ξ» ext c β c }
list-weakenable : β {b}{B : List A β Set b}
β {{wb : Weakenable B}} β Weakenable (Ξ» W β List (B W))
list-weakenable {{ wβ }} = record {wk = Ξ» ext v β List.map (wk ext) v }
-- Nicer syntax for transitivity of prefixes:
infixl 30 _β_
_β_ : β {i} {A : Set i} {W W' W'' : List A} β W' β W β W'' β W' β W'' β W
_β_ coβ coβ = β-trans coβ coβ
{-
Another common construction is that of products of weakenable
predicates. Section 3.4 defines this type, which corresponds to
`_β©_` from the Agda Standard Library:
-}
open import Relation.Unary
open import Data.Product
_β_ : β {a}{i j}{W : Set a}(p : W β Set i)(q : W β Set j)(w : W) β Set (i β j)
_β_ = _β©_
-- We prove that when `_β_` is a product of two weakenable predicates,
-- then `_β_` is an instance of `Weakenable`:
instance
weaken-pair : β {a}{A : Set a}{i j}{p : List A β Set i}{q : List A β Set j}
β {{wp : Weakenable p}} {{wq : Weakenable q}}
β Weakenable (p β q)
weaken-pair = record { wk = Ξ»{ ext (x , y) β (wk ext x , wk ext y) } }
|
#include "IllegalMultiplicationOperationException.h"
#include <boost/format.hpp>
using namespace thewizardplusplus::wizard_basic_2::compiler::translator::
exceptions;
using namespace boost;
IllegalMultiplicationOperationException::
IllegalMultiplicationOperationException(MultiplicationType::Types
multiplication_type, ValueType::Types operand1_type, ValueType::Types
operand2_type)
:
IllegalOperationException((format("multiplication operation \"%1%\"") %
MultiplicationType::convertToCppCode(multiplication_type)).str(),
operand1_type, operand2_type)
{}
|
lemma prime_elem_iff_prime_abs [simp]: "prime_elem k \<longleftrightarrow> prime \<bar>k\<bar>" for k :: int |
[STATEMENT]
lemma l5:
"--x \<le> --y \<Longrightarrow> -y \<le> -x"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. - - x \<le> - - y \<Longrightarrow> - y \<le> - x
[PROOF STEP]
by (metis (mono_tags, opaque_lifting) l3 a_stone bot_least il_bot_unit il_inf_left_unit il_inf_right_dist_sup il_inf_right_unit il_sub_inf_right_isotone sup_right_isotone) |
[STATEMENT]
lemma HMA_diagonal_to_Smith_aux_PQ_JNF[transfer_rule]:
"((=) ===> (HMA_M3 :: (_ \<Rightarrow> (_ \<times> ('a::bezout_ring^'nc::mod_type^'nr::mod_type) \<times> _) \<Rightarrow> _)) ===> HMA_M3)
(diagonal_to_Smith_aux_PQ_JNF xs) (diagonal_to_Smith_aux_PQ xs)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ((=) ===> HMA_M3 ===> HMA_M3) (diagonal_to_Smith_aux_PQ_JNF xs) (diagonal_to_Smith_aux_PQ xs)
[PROOF STEP]
proof (intro rel_funI, clarify, goal_cases)
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>x y a aa b ab ac ba. HMA_M3 (a, aa, b) (ab, ac, ba) \<Longrightarrow> HMA_M3 (diagonal_to_Smith_aux_PQ_JNF xs y (a, aa, b)) (diagonal_to_Smith_aux_PQ xs y (ab, ac, ba))
[PROOF STEP]
case (1 _ bezout P A Q P' A' Q')
[PROOF STATE]
proof (state)
this:
HMA_M3 (P, A, Q) (P', A', Q')
goal (1 subgoal):
1. \<And>x y a aa b ab ac ba. HMA_M3 (a, aa, b) (ab, ac, ba) \<Longrightarrow> HMA_M3 (diagonal_to_Smith_aux_PQ_JNF xs y (a, aa, b)) (diagonal_to_Smith_aux_PQ xs y (ab, ac, ba))
[PROOF STEP]
note HMA_M3[transfer_rule] = 1
[PROOF STATE]
proof (state)
this:
HMA_M3 (P, A, Q) (P', A', Q')
goal (1 subgoal):
1. \<And>x y a aa b ab ac ba. HMA_M3 (a, aa, b) (ab, ac, ba) \<Longrightarrow> HMA_M3 (diagonal_to_Smith_aux_PQ_JNF xs y (a, aa, b)) (diagonal_to_Smith_aux_PQ xs y (ab, ac, ba))
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. HMA_M3 (diagonal_to_Smith_aux_PQ_JNF xs bezout (P, A, Q)) (diagonal_to_Smith_aux_PQ xs bezout (P', A', Q'))
[PROOF STEP]
using xs HMA_M3
[PROOF STATE]
proof (prove)
using this:
\<forall>j\<in>set xs. j < min CARD('nr) CARD('nc)
HMA_M3 (P, A, Q) (P', A', Q')
goal (1 subgoal):
1. HMA_M3 (diagonal_to_Smith_aux_PQ_JNF xs bezout (P, A, Q)) (diagonal_to_Smith_aux_PQ xs bezout (P', A', Q'))
[PROOF STEP]
proof (induct xs arbitrary: P' A' Q' P A Q)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<And>P' A' Q' P A Q. \<lbrakk>\<forall>j\<in>set []. j < min CARD('nr) CARD('nc); HMA_M3 (P, A, Q) (P', A', Q')\<rbrakk> \<Longrightarrow> HMA_M3 (diagonal_to_Smith_aux_PQ_JNF [] bezout (P, A, Q)) (diagonal_to_Smith_aux_PQ [] bezout (P', A', Q'))
2. \<And>a xs P' A' Q' P A Q. \<lbrakk>\<And>P' A' Q' P A Q. \<lbrakk>\<forall>j\<in>set xs. j < min CARD('nr) CARD('nc); HMA_M3 (P, A, Q) (P', A', Q')\<rbrakk> \<Longrightarrow> HMA_M3 (diagonal_to_Smith_aux_PQ_JNF xs bezout (P, A, Q)) (diagonal_to_Smith_aux_PQ xs bezout (P', A', Q')); \<forall>j\<in>set (a # xs). j < min CARD('nr) CARD('nc); HMA_M3 (P, A, Q) (P', A', Q')\<rbrakk> \<Longrightarrow> HMA_M3 (diagonal_to_Smith_aux_PQ_JNF (a # xs) bezout (P, A, Q)) (diagonal_to_Smith_aux_PQ (a # xs) bezout (P', A', Q'))
[PROOF STEP]
case Nil
[PROOF STATE]
proof (state)
this:
\<forall>j\<in>set []. j < min CARD('nr) CARD('nc)
HMA_M3 (P, A, Q) (P', A', Q')
goal (2 subgoals):
1. \<And>P' A' Q' P A Q. \<lbrakk>\<forall>j\<in>set []. j < min CARD('nr) CARD('nc); HMA_M3 (P, A, Q) (P', A', Q')\<rbrakk> \<Longrightarrow> HMA_M3 (diagonal_to_Smith_aux_PQ_JNF [] bezout (P, A, Q)) (diagonal_to_Smith_aux_PQ [] bezout (P', A', Q'))
2. \<And>a xs P' A' Q' P A Q. \<lbrakk>\<And>P' A' Q' P A Q. \<lbrakk>\<forall>j\<in>set xs. j < min CARD('nr) CARD('nc); HMA_M3 (P, A, Q) (P', A', Q')\<rbrakk> \<Longrightarrow> HMA_M3 (diagonal_to_Smith_aux_PQ_JNF xs bezout (P, A, Q)) (diagonal_to_Smith_aux_PQ xs bezout (P', A', Q')); \<forall>j\<in>set (a # xs). j < min CARD('nr) CARD('nc); HMA_M3 (P, A, Q) (P', A', Q')\<rbrakk> \<Longrightarrow> HMA_M3 (diagonal_to_Smith_aux_PQ_JNF (a # xs) bezout (P, A, Q)) (diagonal_to_Smith_aux_PQ (a # xs) bezout (P', A', Q'))
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<forall>j\<in>set []. j < min CARD('nr) CARD('nc)
HMA_M3 (P, A, Q) (P', A', Q')
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
\<forall>j\<in>set []. j < min CARD('nr) CARD('nc)
HMA_M3 (P, A, Q) (P', A', Q')
goal (1 subgoal):
1. HMA_M3 (diagonal_to_Smith_aux_PQ_JNF [] bezout (P, A, Q)) (diagonal_to_Smith_aux_PQ [] bezout (P', A', Q'))
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
HMA_M3 (diagonal_to_Smith_aux_PQ_JNF [] bezout (P, A, Q)) (diagonal_to_Smith_aux_PQ [] bezout (P', A', Q'))
goal (1 subgoal):
1. \<And>a xs P' A' Q' P A Q. \<lbrakk>\<And>P' A' Q' P A Q. \<lbrakk>\<forall>j\<in>set xs. j < min CARD('nr) CARD('nc); HMA_M3 (P, A, Q) (P', A', Q')\<rbrakk> \<Longrightarrow> HMA_M3 (diagonal_to_Smith_aux_PQ_JNF xs bezout (P, A, Q)) (diagonal_to_Smith_aux_PQ xs bezout (P', A', Q')); \<forall>j\<in>set (a # xs). j < min CARD('nr) CARD('nc); HMA_M3 (P, A, Q) (P', A', Q')\<rbrakk> \<Longrightarrow> HMA_M3 (diagonal_to_Smith_aux_PQ_JNF (a # xs) bezout (P, A, Q)) (diagonal_to_Smith_aux_PQ (a # xs) bezout (P', A', Q'))
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>a xs P' A' Q' P A Q. \<lbrakk>\<And>P' A' Q' P A Q. \<lbrakk>\<forall>j\<in>set xs. j < min CARD('nr) CARD('nc); HMA_M3 (P, A, Q) (P', A', Q')\<rbrakk> \<Longrightarrow> HMA_M3 (diagonal_to_Smith_aux_PQ_JNF xs bezout (P, A, Q)) (diagonal_to_Smith_aux_PQ xs bezout (P', A', Q')); \<forall>j\<in>set (a # xs). j < min CARD('nr) CARD('nc); HMA_M3 (P, A, Q) (P', A', Q')\<rbrakk> \<Longrightarrow> HMA_M3 (diagonal_to_Smith_aux_PQ_JNF (a # xs) bezout (P, A, Q)) (diagonal_to_Smith_aux_PQ (a # xs) bezout (P', A', Q'))
[PROOF STEP]
case (Cons i xs)
[PROOF STATE]
proof (state)
this:
\<lbrakk>\<forall>j\<in>set xs. j < min CARD('nr) CARD('nc); HMA_M3 (?P, ?A, ?Q) (?P', ?A', ?Q')\<rbrakk> \<Longrightarrow> HMA_M3 (diagonal_to_Smith_aux_PQ_JNF xs bezout (?P, ?A, ?Q)) (diagonal_to_Smith_aux_PQ xs bezout (?P', ?A', ?Q'))
\<forall>j\<in>set (i # xs). j < min CARD('nr) CARD('nc)
HMA_M3 (P, A, Q) (P', A', Q')
goal (1 subgoal):
1. \<And>a xs P' A' Q' P A Q. \<lbrakk>\<And>P' A' Q' P A Q. \<lbrakk>\<forall>j\<in>set xs. j < min CARD('nr) CARD('nc); HMA_M3 (P, A, Q) (P', A', Q')\<rbrakk> \<Longrightarrow> HMA_M3 (diagonal_to_Smith_aux_PQ_JNF xs bezout (P, A, Q)) (diagonal_to_Smith_aux_PQ xs bezout (P', A', Q')); \<forall>j\<in>set (a # xs). j < min CARD('nr) CARD('nc); HMA_M3 (P, A, Q) (P', A', Q')\<rbrakk> \<Longrightarrow> HMA_M3 (diagonal_to_Smith_aux_PQ_JNF (a # xs) bezout (P, A, Q)) (diagonal_to_Smith_aux_PQ (a # xs) bezout (P', A', Q'))
[PROOF STEP]
note IH = Cons(1)
[PROOF STATE]
proof (state)
this:
\<lbrakk>\<forall>j\<in>set xs. j < min CARD('nr) CARD('nc); HMA_M3 (?P, ?A, ?Q) (?P', ?A', ?Q')\<rbrakk> \<Longrightarrow> HMA_M3 (diagonal_to_Smith_aux_PQ_JNF xs bezout (?P, ?A, ?Q)) (diagonal_to_Smith_aux_PQ xs bezout (?P', ?A', ?Q'))
goal (1 subgoal):
1. \<And>a xs P' A' Q' P A Q. \<lbrakk>\<And>P' A' Q' P A Q. \<lbrakk>\<forall>j\<in>set xs. j < min CARD('nr) CARD('nc); HMA_M3 (P, A, Q) (P', A', Q')\<rbrakk> \<Longrightarrow> HMA_M3 (diagonal_to_Smith_aux_PQ_JNF xs bezout (P, A, Q)) (diagonal_to_Smith_aux_PQ xs bezout (P', A', Q')); \<forall>j\<in>set (a # xs). j < min CARD('nr) CARD('nc); HMA_M3 (P, A, Q) (P', A', Q')\<rbrakk> \<Longrightarrow> HMA_M3 (diagonal_to_Smith_aux_PQ_JNF (a # xs) bezout (P, A, Q)) (diagonal_to_Smith_aux_PQ (a # xs) bezout (P', A', Q'))
[PROOF STEP]
note HMA_M3 = Cons.prems(2)
[PROOF STATE]
proof (state)
this:
HMA_M3 (P, A, Q) (P', A', Q')
goal (1 subgoal):
1. \<And>a xs P' A' Q' P A Q. \<lbrakk>\<And>P' A' Q' P A Q. \<lbrakk>\<forall>j\<in>set xs. j < min CARD('nr) CARD('nc); HMA_M3 (P, A, Q) (P', A', Q')\<rbrakk> \<Longrightarrow> HMA_M3 (diagonal_to_Smith_aux_PQ_JNF xs bezout (P, A, Q)) (diagonal_to_Smith_aux_PQ xs bezout (P', A', Q')); \<forall>j\<in>set (a # xs). j < min CARD('nr) CARD('nc); HMA_M3 (P, A, Q) (P', A', Q')\<rbrakk> \<Longrightarrow> HMA_M3 (diagonal_to_Smith_aux_PQ_JNF (a # xs) bezout (P, A, Q)) (diagonal_to_Smith_aux_PQ (a # xs) bezout (P', A', Q'))
[PROOF STEP]
have i: "i < min CARD('nr) CARD('nc)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. i < min CARD('nr) CARD('nc)
[PROOF STEP]
using Cons.prems
[PROOF STATE]
proof (prove)
using this:
\<forall>j\<in>set (i # xs). j < min CARD('nr) CARD('nc)
HMA_M3 (P, A, Q) (P', A', Q')
goal (1 subgoal):
1. i < min CARD('nr) CARD('nc)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
i < min CARD('nr) CARD('nc)
goal (1 subgoal):
1. \<And>a xs P' A' Q' P A Q. \<lbrakk>\<And>P' A' Q' P A Q. \<lbrakk>\<forall>j\<in>set xs. j < min CARD('nr) CARD('nc); HMA_M3 (P, A, Q) (P', A', Q')\<rbrakk> \<Longrightarrow> HMA_M3 (diagonal_to_Smith_aux_PQ_JNF xs bezout (P, A, Q)) (diagonal_to_Smith_aux_PQ xs bezout (P', A', Q')); \<forall>j\<in>set (a # xs). j < min CARD('nr) CARD('nc); HMA_M3 (P, A, Q) (P', A', Q')\<rbrakk> \<Longrightarrow> HMA_M3 (diagonal_to_Smith_aux_PQ_JNF (a # xs) bezout (P, A, Q)) (diagonal_to_Smith_aux_PQ (a # xs) bezout (P', A', Q'))
[PROOF STEP]
let ?D_JNF = "(Diagonal_to_Smith_row_i_PQ_JNF i bezout (P, A, Q))"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>a xs P' A' Q' P A Q. \<lbrakk>\<And>P' A' Q' P A Q. \<lbrakk>\<forall>j\<in>set xs. j < min CARD('nr) CARD('nc); HMA_M3 (P, A, Q) (P', A', Q')\<rbrakk> \<Longrightarrow> HMA_M3 (diagonal_to_Smith_aux_PQ_JNF xs bezout (P, A, Q)) (diagonal_to_Smith_aux_PQ xs bezout (P', A', Q')); \<forall>j\<in>set (a # xs). j < min CARD('nr) CARD('nc); HMA_M3 (P, A, Q) (P', A', Q')\<rbrakk> \<Longrightarrow> HMA_M3 (diagonal_to_Smith_aux_PQ_JNF (a # xs) bezout (P, A, Q)) (diagonal_to_Smith_aux_PQ (a # xs) bezout (P', A', Q'))
[PROOF STEP]
let ?D_HA = "(Diagonal_to_Smith_row_i_PQ i bezout (P', A', Q'))"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>a xs P' A' Q' P A Q. \<lbrakk>\<And>P' A' Q' P A Q. \<lbrakk>\<forall>j\<in>set xs. j < min CARD('nr) CARD('nc); HMA_M3 (P, A, Q) (P', A', Q')\<rbrakk> \<Longrightarrow> HMA_M3 (diagonal_to_Smith_aux_PQ_JNF xs bezout (P, A, Q)) (diagonal_to_Smith_aux_PQ xs bezout (P', A', Q')); \<forall>j\<in>set (a # xs). j < min CARD('nr) CARD('nc); HMA_M3 (P, A, Q) (P', A', Q')\<rbrakk> \<Longrightarrow> HMA_M3 (diagonal_to_Smith_aux_PQ_JNF (a # xs) bezout (P, A, Q)) (diagonal_to_Smith_aux_PQ (a # xs) bezout (P', A', Q'))
[PROOF STEP]
have rw_1: "diagonal_to_Smith_aux_PQ_JNF (i # xs) bezout (P, A, Q)
= diagonal_to_Smith_aux_PQ_JNF xs bezout ?D_JNF"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. diagonal_to_Smith_aux_PQ_JNF (i # xs) bezout (P, A, Q) = diagonal_to_Smith_aux_PQ_JNF xs bezout (Diagonal_to_Smith_row_i_PQ_JNF i bezout (P, A, Q))
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
diagonal_to_Smith_aux_PQ_JNF (i # xs) bezout (P, A, Q) = diagonal_to_Smith_aux_PQ_JNF xs bezout (Diagonal_to_Smith_row_i_PQ_JNF i bezout (P, A, Q))
goal (1 subgoal):
1. \<And>a xs P' A' Q' P A Q. \<lbrakk>\<And>P' A' Q' P A Q. \<lbrakk>\<forall>j\<in>set xs. j < min CARD('nr) CARD('nc); HMA_M3 (P, A, Q) (P', A', Q')\<rbrakk> \<Longrightarrow> HMA_M3 (diagonal_to_Smith_aux_PQ_JNF xs bezout (P, A, Q)) (diagonal_to_Smith_aux_PQ xs bezout (P', A', Q')); \<forall>j\<in>set (a # xs). j < min CARD('nr) CARD('nc); HMA_M3 (P, A, Q) (P', A', Q')\<rbrakk> \<Longrightarrow> HMA_M3 (diagonal_to_Smith_aux_PQ_JNF (a # xs) bezout (P, A, Q)) (diagonal_to_Smith_aux_PQ (a # xs) bezout (P', A', Q'))
[PROOF STEP]
have rw_2: "diagonal_to_Smith_aux_PQ (i # xs) bezout (P', A', Q')
= diagonal_to_Smith_aux_PQ xs bezout ?D_HA"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. diagonal_to_Smith_aux_PQ (i # xs) bezout (P', A', Q') = diagonal_to_Smith_aux_PQ xs bezout (Diagonal_to_Smith_row_i_PQ i bezout (P', A', Q'))
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
diagonal_to_Smith_aux_PQ (i # xs) bezout (P', A', Q') = diagonal_to_Smith_aux_PQ xs bezout (Diagonal_to_Smith_row_i_PQ i bezout (P', A', Q'))
goal (1 subgoal):
1. \<And>a xs P' A' Q' P A Q. \<lbrakk>\<And>P' A' Q' P A Q. \<lbrakk>\<forall>j\<in>set xs. j < min CARD('nr) CARD('nc); HMA_M3 (P, A, Q) (P', A', Q')\<rbrakk> \<Longrightarrow> HMA_M3 (diagonal_to_Smith_aux_PQ_JNF xs bezout (P, A, Q)) (diagonal_to_Smith_aux_PQ xs bezout (P', A', Q')); \<forall>j\<in>set (a # xs). j < min CARD('nr) CARD('nc); HMA_M3 (P, A, Q) (P', A', Q')\<rbrakk> \<Longrightarrow> HMA_M3 (diagonal_to_Smith_aux_PQ_JNF (a # xs) bezout (P, A, Q)) (diagonal_to_Smith_aux_PQ (a # xs) bezout (P', A', Q'))
[PROOF STEP]
have "HMA_M3 ?D_JNF ?D_HA"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. HMA_M3 (Diagonal_to_Smith_row_i_PQ_JNF i bezout (P, A, Q)) (Diagonal_to_Smith_row_i_PQ i bezout (P', A', Q'))
[PROOF STEP]
using HMA_Diagonal_to_Smith_row_i_PQ[OF i] HMA_M3
[PROOF STATE]
proof (prove)
using this:
((=) ===> HMA_M3 ===> HMA_M3) (Diagonal_to_Smith_row_i_PQ_JNF i) (Diagonal_to_Smith_row_i_PQ i)
HMA_M3 (P, A, Q) (P', A', Q')
goal (1 subgoal):
1. HMA_M3 (Diagonal_to_Smith_row_i_PQ_JNF i bezout (P, A, Q)) (Diagonal_to_Smith_row_i_PQ i bezout (P', A', Q'))
[PROOF STEP]
unfolding rel_fun_def
[PROOF STATE]
proof (prove)
using this:
\<forall>x y. x = y \<longrightarrow> (\<forall>xa ya. HMA_M3 xa ya \<longrightarrow> HMA_M3 (Diagonal_to_Smith_row_i_PQ_JNF i x xa) (Diagonal_to_Smith_row_i_PQ i y ya))
HMA_M3 (P, A, Q) (P', A', Q')
goal (1 subgoal):
1. HMA_M3 (Diagonal_to_Smith_row_i_PQ_JNF i bezout (P, A, Q)) (Diagonal_to_Smith_row_i_PQ i bezout (P', A', Q'))
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
HMA_M3 (Diagonal_to_Smith_row_i_PQ_JNF i bezout (P, A, Q)) (Diagonal_to_Smith_row_i_PQ i bezout (P', A', Q'))
goal (1 subgoal):
1. \<And>a xs P' A' Q' P A Q. \<lbrakk>\<And>P' A' Q' P A Q. \<lbrakk>\<forall>j\<in>set xs. j < min CARD('nr) CARD('nc); HMA_M3 (P, A, Q) (P', A', Q')\<rbrakk> \<Longrightarrow> HMA_M3 (diagonal_to_Smith_aux_PQ_JNF xs bezout (P, A, Q)) (diagonal_to_Smith_aux_PQ xs bezout (P', A', Q')); \<forall>j\<in>set (a # xs). j < min CARD('nr) CARD('nc); HMA_M3 (P, A, Q) (P', A', Q')\<rbrakk> \<Longrightarrow> HMA_M3 (diagonal_to_Smith_aux_PQ_JNF (a # xs) bezout (P, A, Q)) (diagonal_to_Smith_aux_PQ (a # xs) bezout (P', A', Q'))
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
HMA_M3 (Diagonal_to_Smith_row_i_PQ_JNF i bezout (P, A, Q)) (Diagonal_to_Smith_row_i_PQ i bezout (P', A', Q'))
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
HMA_M3 (Diagonal_to_Smith_row_i_PQ_JNF i bezout (P, A, Q)) (Diagonal_to_Smith_row_i_PQ i bezout (P', A', Q'))
goal (1 subgoal):
1. HMA_M3 (diagonal_to_Smith_aux_PQ_JNF (i # xs) bezout (P, A, Q)) (diagonal_to_Smith_aux_PQ (i # xs) bezout (P', A', Q'))
[PROOF STEP]
by (auto, smt Cons.hyps HMA_M3.elims(2) list.set_intros(2) local.Cons(2))
[PROOF STATE]
proof (state)
this:
HMA_M3 (diagonal_to_Smith_aux_PQ_JNF (i # xs) bezout (P, A, Q)) (diagonal_to_Smith_aux_PQ (i # xs) bezout (P', A', Q'))
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
HMA_M3 (diagonal_to_Smith_aux_PQ_JNF xs bezout (P, A, Q)) (diagonal_to_Smith_aux_PQ xs bezout (P', A', Q'))
goal:
No subgoals!
[PROOF STEP]
qed |
Formal statement is: lemma snd_o_paired [simp]: "snd \<circ> (\<lambda>(x,y). (f x y, g x y)) = (\<lambda>(x,y). g x y)" Informal statement is: $\mathrm{snd} \circ (x,y) \mapsto (f(x,y), g(x,y)) = (x,y) \mapsto g(x,y)$. |
/-
Copyright (c) 2017 Johannes HΓΆlzl. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Johannes HΓΆlzl, Mario Carneiro, Yury Kudryashov
! This file was ported from Lean 3 source module topology.algebra.order.extend_from
! leanprover-community/mathlib commit 0a0ec35061ed9960bf0e7ffb0335f44447b58977
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathlib.Topology.Order.Basic
import Mathlib.Topology.ExtendFrom
/-!
# Lemmas about `extendFrom` in an order topology.
-/
open Filter Set TopologicalSpace
open Topology Classical
theorem continuousOn_Icc_extendFrom_Ioo [TopologicalSpace Ξ±] [LinearOrder Ξ±] [DenselyOrdered Ξ±]
[OrderTopology Ξ±] [TopologicalSpace Ξ²] [RegularSpace Ξ²] {f : Ξ± β Ξ²} {a b : Ξ±} {la lb : Ξ²}
(hab : a β b) (hf : ContinuousOn f (Ioo a b)) (ha : Tendsto f (π[>] a) (π la))
(hb : Tendsto f (π[<] b) (π lb)) : ContinuousOn (extendFrom (Ioo a b) f) (Icc a b) := by
apply continuousOn_extendFrom
Β· rw [closure_Ioo hab]
Β· intro x x_in
rcases eq_endpoints_or_mem_Ioo_of_mem_Icc x_in with (rfl | rfl | h)
Β· exact β¨la, ha.mono_left <| nhdsWithin_mono _ Ioo_subset_Ioi_selfβ©
Β· exact β¨lb, hb.mono_left <| nhdsWithin_mono _ Ioo_subset_Iio_selfβ©
Β· exact β¨f x, hf x hβ©
#align continuous_on_Icc_extend_from_Ioo continuousOn_Icc_extendFrom_Ioo
theorem eq_lim_at_left_extendFrom_Ioo [TopologicalSpace Ξ±] [LinearOrder Ξ±] [DenselyOrdered Ξ±]
[OrderTopology Ξ±] [TopologicalSpace Ξ²] [T2Space Ξ²] {f : Ξ± β Ξ²} {a b : Ξ±} {la : Ξ²} (hab : a < b)
(ha : Tendsto f (π[>] a) (π la)) : extendFrom (Ioo a b) f a = la := by
apply extendFrom_eq
Β· rw [closure_Ioo hab.ne]
simp only [le_of_lt hab, left_mem_Icc, right_mem_Icc]
Β· simpa [hab]
#align eq_lim_at_left_extend_from_Ioo eq_lim_at_left_extendFrom_Ioo
theorem eq_lim_at_right_extendFrom_Ioo [TopologicalSpace Ξ±] [LinearOrder Ξ±] [DenselyOrdered Ξ±]
[OrderTopology Ξ±] [TopologicalSpace Ξ²] [T2Space Ξ²] {f : Ξ± β Ξ²} {a b : Ξ±} {lb : Ξ²} (hab : a < b)
(hb : Tendsto f (π[<] b) (π lb)) : extendFrom (Ioo a b) f b = lb := by
apply extendFrom_eq
Β· rw [closure_Ioo hab.ne]
simp only [le_of_lt hab, left_mem_Icc, right_mem_Icc]
Β· simpa [hab]
#align eq_lim_at_right_extend_from_Ioo eq_lim_at_right_extendFrom_Ioo
theorem continuousOn_Ico_extendFrom_Ioo [TopologicalSpace Ξ±] [LinearOrder Ξ±] [DenselyOrdered Ξ±]
[OrderTopology Ξ±] [TopologicalSpace Ξ²] [RegularSpace Ξ²] {f : Ξ± β Ξ²} {a b : Ξ±} {la : Ξ²}
(hab : a < b) (hf : ContinuousOn f (Ioo a b)) (ha : Tendsto f (π[>] a) (π la)) :
ContinuousOn (extendFrom (Ioo a b) f) (Ico a b) := by
apply continuousOn_extendFrom
Β· rw [closure_Ioo hab.ne]
exact Ico_subset_Icc_self
Β· intro x x_in
rcases eq_left_or_mem_Ioo_of_mem_Ico x_in with (rfl | h)
Β· use la
simpa [hab]
Β· exact β¨f x, hf x hβ©
#align continuous_on_Ico_extend_from_Ioo continuousOn_Ico_extendFrom_Ioo
theorem continuousOn_Ioc_extendFrom_Ioo [TopologicalSpace Ξ±] [LinearOrder Ξ±] [DenselyOrdered Ξ±]
[OrderTopology Ξ±] [TopologicalSpace Ξ²] [RegularSpace Ξ²] {f : Ξ± β Ξ²} {a b : Ξ±} {lb : Ξ²}
(hab : a < b) (hf : ContinuousOn f (Ioo a b)) (hb : Tendsto f (π[<] b) (π lb)) :
ContinuousOn (extendFrom (Ioo a b) f) (Ioc a b) := by
have := @continuousOn_Ico_extendFrom_Ioo Ξ±α΅α΅ _ _ _ _ _ _ _ f _ _ lb hab
erw [dual_Ico, dual_Ioi, dual_Ioo] at this
exact this hf hb
#align continuous_on_Ioc_extend_from_Ioo continuousOn_Ioc_extendFrom_Ioo
|
(*
Title: Dual_Order.thy
Author: Jose DivasΓ³n <jose.divasonm at unirioja.es>
Author: JesΓΊs Aransay <jesus-maria.aransay at unirioja.es>
*)
header "Dual Order"
theory Dual_Order
imports Main
begin
subsection{*Interpretation of dual order based on order*}
text{*Computable Greatest value operator for finite linorder classes. Based on @{thm "Least_def"}*}
interpretation dual_order: order "(op \<ge>)::('a::{order}=>'a=>bool)" "(op >)"
proof
fix x y::"'a::{order}" show "(y < x) = (y \<le> x \<and> \<not> x \<le> y)" using less_le_not_le .
show "x \<le> x" using order_refl .
fix z show "y \<le> x \<Longrightarrow> z \<le> y \<Longrightarrow> z \<le> x" using order_trans .
next
fix x y::"'a::{order}" show "y \<le> x \<Longrightarrow> x \<le> y \<Longrightarrow> x = y" by (metis eq_iff)
qed
interpretation dual_linorder: linorder "(op \<ge>)::('a::{linorder}=>'a=>bool)" "(op >)"
proof
fix x y::'a show "y \<le> x \<or> x \<le> y" using linear .
qed
lemma wf_wellorderI2:
assumes wf: "wf {(x::'a::ord, y). y < x}"
assumes lin: "class.linorder (\<lambda>(x\<Colon>'a) y\<Colon>'a. y \<le> x) (\<lambda>(x\<Colon>'a) y\<Colon>'a. y < x)"
shows "class.wellorder (\<lambda>(x\<Colon>'a) y\<Colon>'a. y \<le> x) (\<lambda>(x\<Colon>'a) y\<Colon>'a. y < x)"
using lin unfolding class.wellorder_def apply (rule conjI)
apply (rule class.wellorder_axioms.intro) by (blast intro: wf_induct_rule [OF wf])
lemma (in preorder) tranclp_less': "op >\<^sup>+\<^sup>+ = op >"
by(auto simp add: fun_eq_iff intro: less_trans elim: tranclp.induct)
interpretation dual_wellorder: wellorder "(op \<ge>)::('a::{linorder, finite}=>'a=>bool)" "(op >)"
proof (rule wf_wellorderI2)
show "wf {(x :: 'a, y). y < x}"
by(auto simp add: trancl_def tranclp_less' intro!: finite_acyclic_wf acyclicI)
show "class.linorder (\<lambda>(x\<Colon>'a) y\<Colon>'a. y \<le> x) (\<lambda>(x\<Colon>'a) y\<Colon>'a. y < x)"
unfolding class.linorder_def unfolding class.linorder_axioms_def unfolding class.order_def
unfolding class.preorder_def unfolding class.order_axioms_def by auto
qed
subsection{*Computable greatest operator*}
definition Greatest' :: "('a\<Colon>order \<Rightarrow> bool) \<Rightarrow> 'a\<Colon>order" (binder "GREATEST' " 10)
where "Greatest' P = dual_order.Least P"
text{*The following THE operator will be computable when the underlying type belongs to a suitable
class (for example, Enum).*}
lemmas Greatest'I2_order = dual_order.LeastI2_order[folded Greatest'_def]
lemmas Greatest'_equality = dual_order.Least_equality[folded Greatest'_def]
lemmas Greatest'I = dual_wellorder.LeastI[folded Greatest'_def]
lemmas Greatest'I2_ex = dual_wellorder.LeastI2_ex[folded Greatest'_def]
lemmas Greatest'I2_wellorder = dual_wellorder.LeastI2_wellorder[folded Greatest'_def]
lemmas Greatest'I_ex = dual_wellorder.LeastI_ex[folded Greatest'_def]
lemmas not_greater_Greatest' = dual_wellorder.not_less_Least[folded Greatest'_def]
lemmas Greatest'I2 = dual_wellorder.LeastI2[folded Greatest'_def]
lemmas Greatest'_ge = dual_wellorder.Least_le[folded Greatest'_def]
end
|
import requests as req
import datetime
try:
import numpy as np
import pandas as pd
except:
pass
from bs4 import BeautifulSoup
from io import StringIO, BytesIO
from nsepy.archives import date_to_str, __raw_zip_data_to_str
PRICE_LIST_URL = 'http://www1.nseindia.com/content/historical/DERIVATIVES/%s/%s/fo%sbhav.csv.zip'
DERIVATIVE_ARCHIVES = 'http://www1.nseindia.com/products/dynaContent/common/productsSymbolMapping.jsp?instrumentType=OPTIDX&symbol=NIFTY&expiryDate=27-07-2006&optionType=CE&strikePrice=&dateRange=week&fromDate=&toDate=&segmentLink=9&symbolCount='
def get_price_list(dt, proxies={}):
dt_str = date_to_str(dt, style='ddMMMyyyy')
yy = dt_str[5:9]
mm = dt_str[2:5].upper()
url = PRICE_LIST_URL % (yy, mm, dt_str.upper())
resp = req.get(url=url, proxies=proxies)
df = pd.read_csv(StringIO(
unicode(__raw_zip_data_to_str(resp.content))))
del df['Unnamed: 15']
return df
|
lemma emeasure_lborel_box[simp]: assumes [simp]: "\<And>b. b \<in> Basis \<Longrightarrow> l \<bullet> b \<le> u \<bullet> b" shows "emeasure lborel (box l u) = (\<Prod>b\<in>Basis. (u - l) \<bullet> b)" |
Require Import Iso.Isomorphism.
Require Import Cat.Category.
Require Import FunctionalExtensionality.
Inductive RNA := rA | rC | rG | rU.
Inductive DNA := dA | dC | dG | dT.
Theorem iso_rna_dna : Isomorphic CoqIsCat RNA DNA.
exists (fun x => match x with rA => dT | rC => dG | rG => dC | rU => dA end).
exists (fun x => match x with dA => rU | dC => rG | dG => rC | dT => rA end).
split;
apply functional_extensionality;
intros x;
destruct x;
reflexivity.
Qed.
|
Require Import ZArith.
Require Import Psatz.
Require Import Coq.Classes.RelationClasses.
Require Import TLC.LibTactics TLC.LibIntTactics.
Require Import TLC.LibOrder TLC.LibLogic.
Require Import LibFunOrd LibZExtra Cumul.
Require Import Filter.
(* todo: move *)
Lemma preorder_of_PreOrder A (R : A -> A -> Prop) :
PreOrder R ->
preorder R.
Proof.
intros (HR & HT). constructor.
- apply HR.
- intros y x z H1 H2. apply HT with (y := y); assumption.
Qed.
Section General_facts.
Context {A} (leA : A -> A -> Prop) {OA : preorder leA}.
Context {B} (leB : B -> B -> Prop) {OB : preorder leB}.
Context {C} (leC : C -> C -> Prop) {OC : preorder leC}.
Lemma monotonic_eq : forall f g,
monotonic leA leB f ->
(forall a, f a = g a) ->
monotonic leA leB g.
Proof.
introv M E.
intros a1 a2 H. rewrite <-!E. apply M. auto.
Qed.
Lemma monotonic_id :
monotonic leA leA (fun (x: A) => x).
Proof. intros a1 a2. auto. Qed.
Lemma monotonic_comp : forall (f : B -> C) (g : A -> B),
monotonic leB leC f ->
monotonic leA leB g ->
monotonic leA leC (fun x => f (g x)).
Proof. introv Hf Hg. intros a1 a2 ?. auto. Qed.
Lemma monotonic_cst : forall b,
monotonic leA leB (fun (_ : A) => b).
Proof. intros b a1 a2 ?. applys OB. Qed.
Lemma monotonic_after_of_monotonic : forall (f : A -> B),
monotonic leA leB f ->
(forall (a : A), monotonic (le_after a leA) leB f).
Proof.
intros f fmon a.
intros a1 a2 H. apply fmon. apply H.
Qed.
Lemma monotonic_of_monotonic_after : forall (f : A -> B),
(forall a, monotonic (le_after a leA) leB f) ->
monotonic leA leB f.
Proof.
intros f fmon.
intros a1 a2 H. eapply fmon.
splits; try (apply preorder_refl; typeclass); assumption.
Qed.
Lemma monotonic_after_weaken : forall (f : A -> B) (a1 a2 : A),
leA a1 a2 ->
monotonic (le_after a1 leA) leB f ->
monotonic (le_after a2 leA) leB f.
Proof.
intros f a1 a2 H fmon.
intros x y Hxy. apply fmon.
splits; try apply Hxy;
eapply (preorder_trans a2); first [apply H | apply Hxy].
Unshelve. typeclass. typeclass.
Qed.
End General_facts.
Lemma ultimately_monotonic_of_monotonic :
forall (A : filterType) B
(leA : A -> A -> Prop) (leB : B -> B -> Prop)
(f : A -> B),
monotonic leA leB f ->
ultimately A (fun a => monotonic (le_after a leA) leB f).
Proof.
introv fmon. applys filter_universe_alt.
eapply monotonic_after_of_monotonic. apply fmon.
Qed.
Section Z_facts.
Local Open Scope Z.
Context {A} (leA: A -> A -> Prop) {OA: preorder leA}.
Lemma monotonic_sum : forall (f g : A -> Z),
monotonic leA Z.le f ->
monotonic leA Z.le g ->
monotonic leA Z.le (fun x => f x + g x).
Proof.
introv Hf Hg. intros a1 a2 ?.
forwards~: Hf a1 a2. forwards~: Hg a1 a2. lia.
Qed.
Lemma monotonic_max : forall (f g : A -> Z),
monotonic leA Z.le f ->
monotonic leA Z.le g ->
monotonic leA Z.le (fun x => Z.max (f x) (g x)).
Proof.
introv Hf Hg. intros a1 a2 ?.
forwards~: Hf a1 a2. forwards~: Hg a1 a2. lia.
Qed.
Lemma monotonic_mul : forall (f g : A -> Z),
(forall x, 0 <= f x) ->
(forall x, 0 <= g x) ->
monotonic leA Z.le f ->
monotonic leA Z.le g ->
monotonic leA Z.le (fun x => f x * g x).
Proof.
introv Pf Pg Hf Hg. intros a1 a2 ?.
forwards~: Pf a1. forwards~: Pf a2. forwards~: Pg a1. forwards~: Pg a2.
forwards~: Hf a1 a2. forwards~: Hg a1 a2. nia.
Qed.
Lemma monotonic_mul_cst_l : forall (f : A -> Z) (c : Z),
0 <= c ->
monotonic leA Z.le f ->
monotonic leA Z.le (fun x => c * f x).
Proof.
introv cpos Hf. intros a1 a2 H.
forwards~: Hf a1 a2. nia.
Qed.
Lemma monotonic_mul_cst_r : forall (f : A -> Z) (c : Z),
0 <= c ->
monotonic leA Z.le f ->
monotonic leA Z.le (fun x => f x * c).
Proof.
introv cpos Hf.
intros a1 a2 H. forwards~: Hf a1 a2. nia.
Qed.
Lemma monotonic_log2 : monotonic Z.le Z.le Z.log2.
Proof.
intros a1 a2 H. apply~ Z.log2_le_mono.
Qed.
Lemma monotonic_pow_l : forall e,
monotonic (le_after 0 Z.le) Z.le (fun b => b ^ e).
Proof.
intros e a1 a2 H. apply~ Z.pow_le_mono_l. splits; apply H.
Qed.
Lemma monotonic_pow_r : forall b,
0 < b ->
monotonic Z.le Z.le (Z.pow b).
Proof.
intros b a1 a2 H. apply~ Z.pow_le_mono_r.
Qed.
Lemma monotonic_Z_to_ZZ : forall f g,
monotonic Z.le Z.le f ->
monotonic Z.le Z.le g ->
monotonic Z.le ZZle (fun x => (f x, g x)).
Proof.
intros f g Hf Hg x y H. unfold ZZle. auto.
Qed.
Lemma monotonic_cumul_Z : forall (f : Z -> Z) (lo : Z),
(forall x, lo <= x -> 0 <= f x) ->
monotonic Z.le Z.le (fun n => cumul lo n f).
Proof.
intros * Hf x1 x2 ?.
tests: (lo <= x1); cycle 1.
{ rewrite cumulP at 1. rewrite interval_empty. 2: math. cbn.
apply cumul_nonneg. intros. apply~ Hf. }
rewrite~ (@cumul_split x1 lo x2).
rewrite <-(@cumul_nonneg x1 x2 f). math.
intros. apply~ Hf. math.
Qed.
End Z_facts.
Section Ultimately_Z_facts.
Local Open Scope Z.
Lemma ultimately_monotonic_mul :
forall (A : filterType) (leA : A -> A -> Prop)
{OA: preorder leA}
(f g : A -> Z),
ultimately A (fun x => 0 <= f x) ->
ultimately A (fun x => 0 <= g x) ->
ultimately A (fun a => monotonic (le_after a leA) Z.le f) ->
ultimately A (fun a => monotonic (le_after a leA) Z.le g) ->
ultimately A (fun a => monotonic (le_after a leA) Z.le (fun x => f x * g x)).
Proof.
introv OA. introv. filter_closed_under_intersection.
introv (Pf & Pg & Hf & Hg). intros a1 a2 (a_le_a1 & a_le_a2 & a1_le_a2).
forwards~: Hf a a1; forwards~: Hg a a1;
forwards~: Hf a1 a2; forwards~: Hg a1 a2;
try (repeat split; first [apply OA | assumption]).
nia.
Qed.
End Ultimately_Z_facts.
(******************************************************************************)
(* Add lemmas to a [monotonic] hint base. *)
Hint Resolve monotonic_cst : monotonic.
Hint Resolve monotonic_id : monotonic.
Hint Resolve monotonic_sum : monotonic.
Hint Extern 1 (monotonic _ _ (fun _ => Z.sub _ ?y)) =>
apply monotonic_sum : monotonic.
Hint Extern 1 (monotonic _ _ (Z.sub ?y)) =>
apply monotonic_sum : monotonic.
Hint Resolve monotonic_max : monotonic.
Hint Resolve monotonic_mul : monotonic.
Hint Resolve monotonic_mul_cst_l : monotonic.
Hint Resolve monotonic_mul_cst_r : monotonic.
Hint Resolve monotonic_log2 : monotonic.
Hint Resolve monotonic_pow_l : monotonic.
Hint Resolve monotonic_pow_r : monotonic.
Hint Extern 1 (monotonic _ _ (fun _ => Z.log2 _)) =>
apply monotonic_comp with (leB := Z.le);
[ apply monotonic_log2 | ] : monotonic.
Hint Extern 2 (monotonic _ _ (fun _ => Z.pow ?b _)) =>
apply monotonic_comp with (leB := Z.le);
[ apply monotonic_pow_r | ].
Hint Extern 2 (monotonic _ _ (Z.pow ?b)) =>
apply monotonic_comp with (leB := Z.le);
[ apply monotonic_pow_r | ].
(* todo: Z.pow _ ?e *)
Hint Resolve monotonic_Z_to_ZZ : monotonic.
Hint Resolve monotonic_cumul_Z : monotonic.
Hint Extern 1 (monotonic _ _ (fun _ => ?f _)) =>
match goal with
| H: monotonic ?le _ f |- _ =>
apply monotonic_comp with (leB := le); [ apply H | ]
end : monotonic.
Hint Extern 0 (preorder _) =>
first [ typeclass
| apply preorder_of_PreOrder; typeclass ]
: monotonic.
Hint Extern 100 => try (intros; omega) : monotonic_sidegoals.
Hint Extern 999 (monotonic _ _ _) => shelve : monotonic_fallback.
Hint Extern 999 (preorder _) => shelve : monotonic_fallback.
(******************************************************************************)
(* TODO: make the search depth customisable *)
Ltac monotonic :=
unshelve (auto 20 with zarith typeclass_instances
monotonic
monotonic_sidegoals
monotonic_fallback).
Ltac monotonic_trysolve :=
auto 20 with zarith typeclass_instances
monotonic_sidegoals
monotonic.
(* TODO: extend monotonic_Z to handle monotonic (le_after ..) .., and ultimately
(fun a => monotonic (le_after a ..) ..) *)
(* Ltac monotonic_Z_auto_step := *)
(* match goal with *)
(* | |- @monotonic _ _ _ _ _ => *)
(* let a := fresh "a" in *)
(* apply monotonic_of_monotonic_after; [ *)
(* eauto; typeclass *)
(* | intro a; *)
(* monotonic_after_Z_auto; *)
(* try apply monotonic_after_of_monotonic ] *)
(* | |- @monotonic_after _ _ _ _ _ _ => *)
(* eapply monotonic_after_of_monotonic; eauto *)
(* | |- context [ @monotonic_after _ _ _ _ _ ] => *)
(* eapply ultimately_monotonic_of_monotonic; *)
(* eauto; try typeclass *)
(* end. *)
|
import Lean
open Lean.Elab.Term
open Lean.Elab.Command
elab "β'" b:term "," P:term : term => do
let ex β `(Exists (fun $b => $P));
elabTerm ex none
elab "#check2" b:term : command => do
let cmd β `(#check $b #check $b);
elabCommand cmd
#check β' x, x > 0
#check β' (x : UInt32), x > 0
#check2 10
elab "try" t:tactic : tactic => do
let t' β `(tactic| first | $t | skip);
Lean.Elab.Tactic.evalTactic t'
theorem tst (x y z : Nat) : y = z β x = x β x = y β x = z :=
by {
intro h1; intro h2; intro h3;
apply @Eq.trans;
try exact h1; -- `exact h1` fails
traceState;
try exact h3;
traceState;
try exact h1;
}
|
(*:maxLineLen=78:*)
section \<open> Finite Boolean Algebra Probability \label{sec:finite-boolean-algebra-probability} \<close>
(* TODO: Cite Bool's laws of thought, Dempster, Nils Nilsson's Probabilisitic Logic *)
theory Finite_Probability
imports
"../Logic/Probability_Logic"
Finite_Boolean_Algebra
begin
sledgehammer_params [smt_proofs = false]
no_notation
verum ("\<top>") and
falsum ("\<bottom>") and
disjunction (infixr "\<squnion>" 67) and
conjunction (infixr "\<sqinter>" 67) and
arbitrary_conjunction ("\<Sqinter>") and
arbitrary_disjunction ("\<Squnion>")
class \<P> =
fixes \<P> :: "'a \<Rightarrow> real"
subsection \<open> Definition of Finitely Additive Probability \<close>
text \<open> TODO: cite @{cite booleChapterXVIIGeneral1853}, @{cite broderickBooleanAlgebraProbability1940}, ``Elementary Theory of Probability'' @{cite kolmogoroffChapterElementareWahrscheinlichkeitsrechnung1933} \<close>
class finitely_additive_probability = \<P> + boolean_algebra +
assumes probability_non_negative: "\<P> \<phi> \<ge> 0"
assumes probability_unity: "\<P> \<top> = 1"
assumes finite_additivity: "\<phi> \<sqinter> \<psi> = \<bottom> \<Longrightarrow> \<P> (\<phi> \<squnion> \<psi>) = \<P> \<phi> + \<P> \<psi>"
context boolean_algebra begin
subsection \<open> Equivalence With Probability Logic \label{subsec:boolean-algebra-probability}\<close>
text \<open> The Boolean algebra formulation of finitely additive probability is
in fact a special case of probability logic as presented in \S\ref{sec:definition-of-probability-logic}.\<close>
definition residual (infixr "\<Rightarrow>" 70) where
"\<phi> \<Rightarrow> \<psi> \<equiv> - \<phi> \<squnion> \<psi>"
lemma residual_galois_connection:
"A \<sqinter> B \<le> C \<longleftrightarrow> B \<le> A \<Rightarrow> C"
proof
assume "A \<sqinter> B \<le> C"
have "B \<squnion> (A \<Rightarrow> C) = A \<Rightarrow> C \<squnion> B \<sqinter> \<top>"
unfolding residual_def
using inf_top.right_neutral
sup_commute
by presburger
moreover have "\<top> = A \<Rightarrow> C \<squnion> A"
unfolding residual_def
using sup_commute sup_compl_top_left2
by fastforce
ultimately have "B \<squnion> (A \<Rightarrow> C) = A \<Rightarrow> C \<squnion> B \<sqinter> A"
unfolding residual_def
by (simp add: sup_inf_distrib1)
moreover have "A \<sqinter> B \<squnion> C = C"
using \<open>A \<sqinter> B \<le> C\<close> sup.absorb_iff2 by blast
ultimately show "B \<le> A \<Rightarrow> C"
unfolding residual_def
by (metis
inf_commute
sup.absorb_iff2
sup.semigroup_axioms
sup_commute
semigroup.assoc)
next
assume "B \<le> A \<Rightarrow> C"
hence "B \<sqinter> (A \<Rightarrow> C) = B"
using inf_absorb1
unfolding residual_def
by fastforce
moreover have "A \<Rightarrow> C = C \<squnion> - A"
unfolding residual_def
by (simp add: abel_semigroup.commute sup.abel_semigroup_axioms)
moreover have "A \<sqinter> B \<sqinter> C = A \<sqinter> (B \<sqinter> C)"
by (simp add: inf.semigroup_axioms semigroup.assoc)
ultimately show "A \<sqinter> B \<le> C"
unfolding residual_def
by (metis
(no_types)
inf.orderI
inf_compl_bot_right
inf_sup_distrib1
sup_bot.right_neutral)
qed
interpretation classical_logic "(=) \<top>" "(\<Rightarrow>)" \<bottom>
proof standard
fix \<phi> \<psi>
show "\<top> = \<phi> \<Rightarrow> \<psi> \<Rightarrow> \<phi>"
unfolding residual_def
by (simp add: sup.commute)
next
fix \<phi> \<psi> \<chi>
show "\<top> = (\<phi> \<Rightarrow> \<psi> \<Rightarrow> \<chi>) \<Rightarrow> (\<phi> \<Rightarrow> \<psi>) \<Rightarrow> \<phi> \<Rightarrow> \<chi>"
proof -
have "\<top> = (\<phi> \<Rightarrow> \<chi>) \<Rightarrow> \<phi> \<Rightarrow> \<chi>"
unfolding residual_def
by (metis compl_sup_top)
moreover have "- \<phi> \<Rightarrow> \<phi> \<Rightarrow> \<chi> = - \<phi> \<Rightarrow> (\<phi> \<Rightarrow> \<psi> \<Rightarrow> \<chi>) \<Rightarrow> \<phi> \<Rightarrow> \<chi>"
unfolding residual_def
by (metis sup_compl_top_left2 sup_left_commute)
moreover have "\<psi> \<Rightarrow> (\<phi> \<Rightarrow> \<psi> \<Rightarrow> \<chi>) \<Rightarrow> \<phi> \<Rightarrow> \<chi> = \<chi> \<Rightarrow> \<phi> \<Rightarrow> \<chi>"
unfolding residual_def
by (metis compl_sup_top sup_compl_top_left2 sup_left_commute)
ultimately have "\<top> = (\<phi> \<Rightarrow> \<psi> \<Rightarrow> \<chi>) \<Rightarrow> (\<phi> \<Rightarrow> \<chi>) \<squnion> - (\<phi> \<Rightarrow> \<psi>)"
unfolding residual_def
using
abel_semigroup.commute
sup.abel_semigroup_axioms
sup_inf_distrib1
by fastforce
hence "\<top> = (\<phi> \<Rightarrow> \<psi>) \<Rightarrow> (\<phi> \<Rightarrow> \<psi> \<Rightarrow> \<chi>) \<Rightarrow> \<phi> \<Rightarrow> \<chi>"
unfolding residual_def
by (simp add: abel_semigroup.commute sup.abel_semigroup_axioms)
thus ?thesis
unfolding residual_def
by (simp add: sup_left_commute)
qed
next
fix \<phi> \<psi>
show "\<top> = \<phi> \<Rightarrow> \<psi> \<Longrightarrow> \<top> = \<phi> \<Longrightarrow> \<top> = \<psi>"
unfolding residual_def
using compl_top_eq
by auto
next
fix \<phi>
show "\<top> = ((\<phi> \<Rightarrow> \<bottom>) \<Rightarrow> \<bottom>) \<Rightarrow> \<phi>"
unfolding residual_def
by simp
qed
lemmas axiom_k = axiom_k
lemmas axiom_s = axiom_s
lemmas double_negation = double_negation
lemmas modus_ponens = modus_ponens
lemmas probabilities_def = probabilities_def
lemma probabilities_def':
"probabilities =
{ \<P>. class.finitely_additive_probability
\<P> (-) uminus (\<sqinter>) (\<le>) (<) (\<squnion>) \<bottom> \<top> }"
(is "_ = ?ba_probabilities")
proof
show "?ba_probabilities \<subseteq> probabilities"
proof
fix \<P>
assume "\<P> \<in> ?ba_probabilities"
from this interpret
finitely_additive_probability \<P>
unfolding probabilities_def
by auto
have "class.probability_logic ((=) \<top>) (\<Rightarrow>) \<bottom> \<P>"
proof standard
fix \<phi>
show "0 \<le> \<P> \<phi>"
by (simp add: probability_non_negative)
next
fix \<phi>
show "\<top> = \<phi> \<Longrightarrow> \<P> \<phi> = 1"
using probability_unity by blast
next
fix \<phi> \<psi>
assume "\<top> = (\<phi> \<Rightarrow> \<psi> \<Rightarrow> \<bottom>)"
hence "\<phi> \<sqinter> \<psi> = \<bottom>"
unfolding residual_def
using compl_top_eq by auto
thus "\<P> ((\<phi> \<Rightarrow> \<bottom>) \<Rightarrow> \<psi>) = \<P> \<phi> + \<P> \<psi>"
unfolding residual_def
by (simp add: finite_additivity)
qed
thus "\<P> \<in> probabilities"
unfolding probabilities_def
by auto
qed
next
show "probabilities \<subseteq> ?ba_probabilities"
proof
fix \<P>
assume "\<P> \<in> probabilities"
from this interpret probability_logic "(=) \<top>" "(\<Rightarrow>)" \<bottom> \<P>
unfolding probabilities_def
by auto
have
"class.finitely_additive_probability
\<P> (-) uminus (\<sqinter>) (\<le>) (<) (\<squnion>) \<bottom> \<top>"
proof standard
fix \<phi>
show "0 \<le> \<P> \<phi>"
by (simp add: probability_non_negative)
next
show "\<P> \<top> = 1"
using probability_unity by blast
next
fix \<phi> \<psi>
assume "\<phi> \<sqinter> \<psi> = \<bottom>"
thus "\<P> (\<phi> \<squnion> \<psi>) = \<P> \<phi> + \<P> \<psi>"
using
probability_implicational_additivity
compl_bot_eq
sup_bot.right_neutral
residual_def
by force
qed
thus "\<P> \<in> ?ba_probabilities"
by auto
qed
qed
lemma join_prime_to_dirac_measure:
assumes "\<alpha> \<in> \<J>"
shows "(\<lambda> \<phi>. if \<alpha> \<le> \<phi> then 1 else 0) \<in> dirac_measures"
(is "?\<delta> \<in> dirac_measures")
proof -
have "class.probability_logic ((=) \<top>) (\<Rightarrow>) \<bottom> ?\<delta>"
proof standard
fix \<phi>
show "0 \<le> ?\<delta> \<phi>"
by fastforce
next
fix \<phi>
show "\<top> = \<phi> \<Longrightarrow> (if \<alpha> \<le> \<phi> then 1 else 0) = 1"
using top_greatest by auto
next
fix \<phi> \<psi>
assume "\<top> = \<phi> \<Rightarrow> \<psi> \<Rightarrow> \<bottom>"
hence "\<phi> \<sqinter> \<psi> = \<bottom>"
using compl_top_eq residual_def by auto
hence "\<not> \<alpha> \<le> \<phi> \<or> \<not> \<alpha> \<le> \<psi>"
using \<open>\<alpha> \<in> \<J>\<close>
unfolding join_primes_def join_prime_def
using bot_unique inf.boundedI by blast
moreover have "\<alpha> \<le> \<phi> \<squnion> \<psi> \<longleftrightarrow> \<alpha> \<le> \<phi> \<or> \<alpha> \<le> \<psi>"
using \<open>\<alpha> \<in> \<J>\<close>
unfolding join_primes_def join_prime_def
using le_supI1 le_supI2 by blast
ultimately show "?\<delta> ((\<phi> \<Rightarrow> \<bottom>) \<Rightarrow> \<psi>) = ?\<delta> \<phi> + ?\<delta> \<psi>"
unfolding residual_def
by auto
qed
thus ?thesis
unfolding dirac_measures_def
by simp
qed
lemma conditional_probability_measure:
fixes \<P> :: "'a \<Rightarrow> real"
assumes "\<P> \<in> probabilities" and "\<P> \<psi> \<noteq> 0"
shows "(\<lambda> \<phi>. \<P> (\<phi> \<sqinter> \<psi>) / \<P> \<psi>) \<in> probabilities"
proof -
from assms interpret
finitely_additive_probability \<P>
unfolding probabilities_def'
by auto
have "\<P> \<psi> > 0"
using
\<open>\<P> \<psi> \<noteq> 0\<close>
probability_non_negative
order_class.dual_order.order_iff_strict
by blast
let ?\<P>' = "\<lambda> \<phi>. \<P> (\<phi> \<sqinter> \<psi>) / \<P> \<psi>"
have "class.finitely_additive_probability
?\<P>' (-) uminus (\<sqinter>) (\<le>) (<) (\<squnion>) \<bottom> \<top>"
proof standard
fix \<phi>
show "0 \<le> \<P> (\<phi> \<sqinter> \<psi>) / \<P> \<psi>"
by (simp add: probability_non_negative)
next
show "\<P> (\<top> \<sqinter> \<psi>) / \<P> \<psi> = 1"
using \<open>0 < \<P> \<psi>\<close> inf_top_left by auto
next
fix \<phi> \<chi>
assume "\<phi> \<sqinter> \<chi> = \<bottom>"
hence "\<P> ((\<phi> \<squnion> \<chi>) \<sqinter> \<psi>) = \<P> (\<phi> \<sqinter> \<psi>) + \<P> (\<chi> \<sqinter> \<psi>)"
by (metis
finite_additivity
inf.assoc
inf.commute
inf_bot_right
inf_sup_distrib2)
thus "\<P> ((\<phi> \<squnion> \<chi>) \<sqinter> \<psi>) / \<P> \<psi> = \<P> (\<phi> \<sqinter> \<psi>) / \<P> \<psi> + \<P> (\<chi> \<sqinter> \<psi>) / \<P> \<psi>"
by (simp add: add_divide_distrib)
qed
thus ?thesis
unfolding probabilities_def'
by blast
qed
lemma probabilities_convex:
fixes \<P> \<Q> :: "'a \<Rightarrow> real" and \<alpha> :: real
assumes "{\<P>,\<Q>} \<subseteq> probabilities" and "0 \<le> \<alpha>" and "\<alpha> \<le> 1"
shows "(\<lambda> \<phi>. \<alpha> * \<P> \<phi> + (1 - \<alpha>) * \<Q> \<phi>) \<in> probabilities"
proof -
let ?\<M> = "\<lambda> \<phi>. \<alpha> * \<P> \<phi> + (1 - \<alpha>) * \<Q> \<phi>"
from assms interpret finitely_additive_probability \<P>
unfolding probabilities_def'
by auto
note \<P>_probability_non_negative = probability_non_negative
note \<P>_probability_unity = probability_unity
note \<P>_finite_additivity = finite_additivity
from assms interpret finitely_additive_probability \<Q>
unfolding probabilities_def'
by auto
have "class.finitely_additive_probability
?\<M> (-) uminus (\<sqinter>) (\<le>) (<) (\<squnion>) \<bottom> \<top>"
proof standard
fix \<phi>
show "0 \<le> \<alpha> * \<P> \<phi> + (1 - \<alpha>) * \<Q> \<phi>"
by (simp add:
\<P>_probability_non_negative
probability_non_negative
\<open>0 \<le> \<alpha>\<close>
\<open>\<alpha> \<le> 1\<close>)
next
show "\<alpha> * \<P> \<top> + (1 - \<alpha>) * \<Q> \<top> = 1"
using \<P>_probability_unity probability_unity by auto
next
fix \<phi> \<psi>
assume "\<phi> \<sqinter> \<psi> = \<bottom>"
thus " \<alpha> * \<P> (\<phi> \<squnion> \<psi>) + (1 - \<alpha>) * \<Q> (\<phi> \<squnion> \<psi>)
= \<alpha> * \<P> \<phi> + (1 - \<alpha>) * \<Q> \<phi> + (\<alpha> * \<P> \<psi> + (1 - \<alpha>) * \<Q> \<psi>)"
by (simp add: \<P>_finite_additivity distrib_left finite_additivity)
qed
thus ?thesis
unfolding probabilities_def'
by auto
qed
end
context finitely_additive_probability begin
interpretation classical_logic "(=) \<top>" "(\<Rightarrow>)" \<bottom>
by (standard,
simp add: axiom_k,
simp add: axiom_s,
metis modus_ponens,
simp add: double_negation)
interpretation probability_logic "(=) \<top>" "(\<Rightarrow>)" \<bottom> \<P>
proof -
have "class.finitely_additive_probability
\<P> (-) uminus (\<sqinter>) (\<le>) (<) (\<squnion>) \<bottom> \<top>"
by standard
hence "\<P> \<in> probabilities"
unfolding probabilities_def'
by auto
thus "class.probability_logic ((=) \<top>) (\<Rightarrow>) \<bottom> \<P>"
unfolding probabilities_def
by blast
qed
lemma sum_rule: "\<P> a + \<P> b = \<P> (a \<sqinter> b) + \<P> (a \<squnion> b)"
by (metis compl_inf
conjunction_def
disjunction_def
double_compl
residual_def
sum_rule
sup.commute
sup_bot.left_neutral)
lemma conditional_probability_join_prime:
assumes "\<alpha> \<in> \<J>" and "\<P> \<alpha> \<noteq> 0"
shows "\<P> (\<phi> \<sqinter> \<alpha>) / \<P> \<alpha> = (if \<alpha> \<le> \<phi> then 1 else 0)"
proof (cases "\<alpha> \<le> \<phi>")
case True
hence "\<P> (\<phi> \<sqinter> \<alpha>) = \<P> \<alpha>"
by (simp add: inf_absorb2)
hence "\<P> (\<phi> \<sqinter> \<alpha>) / \<P> \<alpha> = 1"
using \<open>\<P> \<alpha> \<noteq> 0\<close> right_inverse_eq by blast
then show ?thesis
using \<open>\<alpha> \<le> \<phi>\<close> by simp
next
case False
hence "\<alpha> \<le> - \<phi>"
using \<open>\<alpha> \<in> \<J>\<close> top_greatest
unfolding join_primes_def join_prime_def
by force
hence "\<phi> \<sqinter> \<alpha> = \<bottom>"
by (metis inf_absorb1 inf_compl_bot_right)
hence "\<P> (\<phi> \<sqinter> \<alpha>) / \<P> \<alpha> = 0"
using finite_additivity inf_bot_right sup_bot.right_neutral by fastforce
then show ?thesis
using \<open>\<not> \<alpha> \<le> \<phi>\<close> by auto
qed
lemma join_prime_conditional_probability:
assumes "\<forall> \<phi>. \<P> (\<phi> \<sqinter> \<alpha>) / \<P> \<alpha> = (if \<alpha> \<le> \<phi> then 1 else 0)"
shows "\<alpha> \<in> \<J>"
proof -
have "\<P> (\<top> \<sqinter> \<alpha>) / \<P> \<alpha> = 1"
using assms top_greatest by auto
hence "\<P> \<alpha> > 0"
using less_eq_real_def probability_non_negative by fastforce
hence "\<alpha> \<noteq> \<bottom>"
using gaines_weatherson_antithesis by auto
moreover
have \<star>: "\<forall> \<phi>. \<P> (\<phi> \<sqinter> \<alpha>) = (if \<alpha> \<le> \<phi> then \<P> \<alpha> else 0)"
by (metis \<open>\<P> (\<top> \<sqinter> \<alpha>) / \<P> \<alpha> = 1\<close>
\<open>\<forall> \<phi>. \<P> (\<phi> \<sqinter> \<alpha>) / \<P> \<alpha> = (if \<alpha> \<le> \<phi> then 1 else 0)\<close>
divide_eq_0_iff
inf.absorb2 zero_neq_one)
{
fix \<phi> \<psi>
assume "\<alpha> \<le> \<phi> \<squnion> \<psi>"
have "\<alpha> \<le> \<phi> \<or> \<alpha> \<le> \<psi>"
proof (rule ccontr)
assume "\<not> (\<alpha> \<le> \<phi> \<or> \<alpha> \<le> \<psi>)"
hence "\<P> (\<phi> \<sqinter> \<alpha>) = 0"
"\<P> (\<psi> \<sqinter> \<alpha>) = 0"
using \<star> by auto
hence "0 = \<P> ((\<phi> \<sqinter> \<alpha>) \<sqinter> (\<psi> \<sqinter> \<alpha>)) + \<P> ((\<phi> \<sqinter> \<alpha>) \<squnion> (\<psi> \<sqinter> \<alpha>))"
using sum_rule by auto
hence "0 = \<P> (\<phi> \<sqinter> \<psi> \<sqinter> \<alpha>) + \<P> ((\<phi> \<squnion> \<psi>) \<sqinter> \<alpha>)"
by (simp add: inf.commute inf.left_commute inf_sup_distrib1)
hence "0 = \<P> (\<phi> \<sqinter> \<psi> \<sqinter> \<alpha>) + \<P> \<alpha>"
by (simp add: \<open>\<alpha> \<le> \<phi> \<squnion> \<psi>\<close> inf.absorb2)
hence "0 > \<P> (\<phi> \<sqinter> \<psi> \<sqinter> \<alpha>)"
using \<open>0 < \<P> \<alpha>\<close> by linarith
thus False
using probability_non_negative not_le by blast
qed
}
ultimately show ?thesis
unfolding join_primes_def join_prime_def
by blast
qed
lemma monotonicity: "a \<le> b \<Longrightarrow> \<P> a \<le> \<P> b"
by (metis
monotonicity
residual_def
sup.commute
sup.left_commute
sup_absorb1
sup_cancel_left1)
lemmas gaines_weatherson_antithesis = gaines_weatherson_antithesis
lemma complementation: "\<P> (- \<phi>) = 1 - \<P> \<phi>"
by (metis add_diff_cancel_left'
finite_additivity
probability_unity
inf_compl_bot
sup_compl_top)
lemma finite_certainty:
assumes "finite A" and "\<forall> a \<in> A. \<P> a = 1"
shows "\<P> (Finite_Set.fold (\<sqinter>) \<top> A) = 1"
using assms
proof (induct A rule: finite_induct)
case empty
show "\<P> (Finite_Set.fold (\<sqinter>) \<top> {}) = 1"
by (simp add: probability_unity)
next
case (insert a A)
have \<star>: "\<P> (Finite_Set.fold (\<sqinter>) \<top> (insert a A))
= \<P> (a \<sqinter> Finite_Set.fold (\<sqinter>) \<top> A)"
(is "\<P> ?A' = \<P> (a \<sqinter> ?A)")
by (simp add:
comp_fun_idem.fold_insert_idem
insert.hyps(1)
comp_fun_idem_inf)
have "\<P> ?A = 1"
using insert.hyps(3) insert.prems by blast
moreover have "\<P> a = 1"
by (simp add: insert.prems)
moreover
have "a \<le> a \<squnion> ?A" by simp
hence "1 \<le> \<P> (a \<squnion> ?A)"
using monotonicity \<open>\<P> a = 1\<close>
by fastforce
hence "\<P> (a \<squnion> ?A) = 1"
using unity_upper_bound [of "a \<squnion> ?A"]
by linarith
ultimately have "\<P> (a \<sqinter> ?A) = 1"
using sum_rule [where a="a" and b="?A"]
by linarith
thus "\<P> ?A' = 1"
using \<star> by auto
qed
lemma full_finite_additivity:
assumes "finite A" and "\<forall> a \<in> A. \<forall> a' \<in> A. a \<noteq> a' \<longrightarrow> a \<sqinter> a' = \<bottom>"
shows "\<P> (Finite_Set.fold (\<squnion>) \<bottom> A) = (\<Sum> a \<in> A. \<P> a)"
using assms
proof (induct A rule: finite_induct)
case empty
then show ?case
using gaines_weatherson_antithesis by fastforce
next
case (insert a A)
hence "\<forall>a' \<in> A. a \<sqinter> a' = \<bottom>"
by auto
with \<open>finite A\<close> \<open>a \<notin> A\<close>
have "a \<sqinter> (Finite_Set.fold (\<squnion>) \<bottom> A) = \<bottom>" (is "a \<sqinter> ?UA = \<bottom>")
proof (induct A rule: finite_induct)
case empty
then show ?case by auto
next
case (insert a' A)
hence "a \<sqinter> (Finite_Set.fold (\<squnion>) \<bottom> A) = \<bottom>" (is "a \<sqinter> ?UA = \<bottom>")
"a \<sqinter> a' = \<bottom>"
by auto
moreover
have "Finite_Set.fold (\<squnion>) \<bottom> ({a'} \<union> A) = a' \<squnion> ?UA"
(is "?UA' = _")
by (simp add:
comp_fun_idem.fold_insert_idem
\<open>finite A\<close>
comp_fun_idem_sup)
hence "a \<sqinter> ?UA' = (a \<sqinter> a') \<squnion> (a \<sqinter> ?UA)"
using inf_sup_distrib1 by auto
ultimately show ?case
by auto
qed
moreover have "Finite_Set.fold (\<squnion>) \<bottom> ({a} \<union> A) = a \<squnion> ?UA"
by (simp add: comp_fun_idem.fold_insert_idem \<open>finite A\<close> comp_fun_idem_sup)
moreover have "\<P> ?UA = (\<Sum> a \<in> A. \<P> a)"
using insert by blast
ultimately show ?case
by (simp add: \<open>finite A\<close> \<open>a \<notin> A\<close> finite_additivity)
qed
end
subsection \<open> Collapse Theorem For Finite Boolean Algebras \label{subsec:finite-boolean-algebra-collapse} \<close>
context finite_boolean_algebra begin
interpretation classical_logic "(=) \<top>" "(\<Rightarrow>)" \<bottom>
by (standard,
simp add: axiom_k,
simp add: axiom_s,
metis modus_ponens,
simp add: double_negation)
lemma join_prime_decomposition:
fixes \<P> :: "'a \<Rightarrow> real"
assumes "\<P> \<in> probabilities"
shows "\<P> \<phi> = (\<Sum> \<alpha> \<in> \<J>. \<P> \<alpha> * (if \<alpha> \<le> \<phi> then 1 else 0))"
proof -
interpret finitely_additive_probability \<P>
using \<open>\<P> \<in> probabilities\<close>
unfolding probabilities_def'
by blast
have \<star>: "\<phi> = \<Squnion> { \<alpha> \<in> \<J>. \<alpha> \<le> \<phi> }" (is "\<phi> = \<Squnion> ?\<J>\<phi>")
using
join_prime_embedding_def
sup_join_prime_embedding_ident
by auto
have "\<forall> \<alpha> \<in> ?\<J>\<phi>. \<forall> \<alpha>' \<in> ?\<J>\<phi>. \<alpha> \<noteq> \<alpha>' \<longrightarrow> \<alpha> \<sqinter> \<alpha>' = \<bottom>"
unfolding join_primes_def
by (metis inf.cobounded1 inf.commute join_prime_def' mem_Collect_eq)
hence "\<P> (\<Squnion> ?\<J>\<phi>) = (\<Sum> \<alpha> \<in> ?\<J>\<phi>. \<P> \<alpha>)"
by (simp add: Sup_def full_finite_additivity)
with \<star> have \<dagger>: "\<P> \<phi> = (\<Sum> \<alpha> \<in> ?\<J>\<phi>. \<P> \<alpha>)" by auto
have "finite ?\<J>\<phi>" by auto
hence "(\<Sum> \<alpha> \<in> ?\<J>\<phi>. \<P> \<alpha>) = (\<Sum> \<alpha> \<in> ?\<J>\<phi>. \<P> \<alpha> * (if \<alpha> \<le> \<phi> then 1 else 0))"
by (induct ?\<J>\<phi> rule: finite_induct, auto)
with \<dagger> have "\<P> \<phi> = (\<Sum> \<alpha> \<in> ?\<J>\<phi>. \<P> \<alpha> * (if \<alpha> \<le> \<phi> then 1 else 0))"
(is "_ = ?\<Sigma>1")
by presburger
moreover
let ?n\<J>\<phi> = "{ \<alpha> \<in> \<J>. \<not> \<alpha> \<le> \<phi> }"
have "finite ?n\<J>\<phi>" by auto
hence "0 = (\<Sum> \<alpha> \<in> ?n\<J>\<phi>. \<P> \<alpha> * (if \<alpha> \<le> \<phi> then 1 else 0))"
(is "_ = ?\<Sigma>2")
by (induct ?n\<J>\<phi> rule: finite_induct, auto)
with \<dagger> have \<ddagger>: "\<P> \<phi> = ?\<Sigma>1 + ?\<Sigma>2" by auto
have "\<forall>\<alpha> \<in> ?\<J>\<phi> \<inter> ?n\<J>\<phi>. \<P> \<alpha> * (if \<alpha> \<le> \<phi> then 1 else 0) = 0" by auto
with \<ddagger> have "\<P> \<phi> = (\<Sum> \<alpha> \<in> ?\<J>\<phi> \<union> ?n\<J>\<phi>. \<P> \<alpha> * (if \<alpha> \<le> \<phi> then 1 else 0))"
by (simp add: sum.union_inter_neutral [where A="?\<J>\<phi>" and B="?n\<J>\<phi>"])
moreover have "\<J> = ?\<J>\<phi> \<union> ?n\<J>\<phi>" by auto
ultimately show ?thesis
by auto
qed
lemma dirac_measure_to_join_prime:
assumes "\<delta> \<in> dirac_measures"
shows "\<Sqinter> { \<phi> . \<delta> \<phi> = 1 } \<in> \<J>"
(is "?\<alpha> \<in> \<J>")
proof -
have "\<delta> \<in> probabilities"
using
\<open>\<delta> \<in> dirac_measures\<close>
probabilities_def
unfolding dirac_measures_def
by blast
interpret finitely_additive_probability \<delta>
using \<open>\<delta> \<in> probabilities\<close>
unfolding probabilities_def'
by auto
have "\<forall> \<phi> \<in> { \<phi> . \<delta> \<phi> = 1 }. \<delta> \<phi> = 1"
(is "\<forall> \<phi> \<in> ?A. \<delta> \<phi> = 1")
by auto
hence "\<delta> ?\<alpha> = 1"
using finite_certainty Inf_def finite
by presburger
hence "?\<alpha> \<noteq> \<bottom>"
using gaines_weatherson_antithesis
by auto
moreover
{
fix y z
assume "?\<alpha> \<le> y \<squnion> z"
hence "1 \<le> \<delta> (y \<squnion> z)"
using \<open>\<delta> ?\<alpha> = 1\<close> monotonicity
by fastforce
hence "\<delta> (y \<squnion> z) = 1"
by (metis
probability_unity
monotonicity
sup.cobounded2
sup_top_left
order_class.eq_iff)
moreover have "\<delta> y = 0 \<Longrightarrow> \<delta> z = 0 \<Longrightarrow> \<delta> (y \<squnion> z) = 0"
by (metis
add.right_neutral
add_diff_cancel_left'
diff_ge_0_iff_ge
probability_non_negative
sum_rule
order_class.eq_iff)
ultimately have "\<delta> y \<noteq> 0 \<or> \<delta> z \<noteq> 0"
by linarith
hence "\<delta> y = 1 \<or> \<delta> z = 1"
using \<open>\<delta> \<in> dirac_measures\<close>
unfolding dirac_measures_def
by auto
hence "y \<in> ?A \<or> z \<in> ?A"
by auto
hence "?\<alpha> \<le> y \<or> ?\<alpha> \<le> z"
using Inf_lower by auto
}
ultimately show ?thesis
unfolding join_primes_def join_prime_def
by auto
qed
lemma dirac_to_join_prime_ident:
assumes "\<delta> \<in> dirac_measures"
shows "(\<lambda> \<phi>. if \<Sqinter> { \<phi> . \<delta> \<phi> = 1 } \<le> \<phi> then 1 else 0) = \<delta>"
proof
have "\<delta> \<in> probabilities"
using
\<open>\<delta> \<in> dirac_measures\<close>
probabilities_def
unfolding dirac_measures_def
by blast
interpret finitely_additive_probability \<delta>
using \<open>\<delta> \<in> probabilities\<close>
unfolding probabilities_def'
by auto
fix \<phi>
show "(if \<Sqinter> { \<phi> . \<delta> \<phi> = 1 } \<le> \<phi> then 1 else 0) = \<delta> \<phi>"
proof (cases "\<delta> \<phi> = 1")
case True
hence "\<Sqinter> { \<phi> . \<delta> \<phi> = 1 } \<le> \<phi>"
by (fastforce simp add: Inf_lower)
hence "(if \<Sqinter> { \<phi> . \<delta> \<phi> = 1 } \<le> \<phi> then 1 else 0) = 1"
by auto
then show ?thesis
using \<open>\<delta> \<phi> = 1\<close>
by simp
next
have "join_prime (\<Sqinter> { \<phi> . \<delta> \<phi> = 1 })"
using
\<open>\<delta> \<in> dirac_measures\<close>
dirac_measure_to_join_prime
unfolding join_primes_def
by blast
case False
hence "\<delta> \<phi> = 0"
using \<open>\<delta> \<in> dirac_measures\<close>
unfolding dirac_measures_def
by auto
hence "\<delta> (- \<phi>) = 1"
using complementation
by auto
hence "\<Sqinter> { \<phi> . \<delta> \<phi> = 1 } \<le> - \<phi>"
by (fastforce simp add: Inf_lower)
hence "\<not> (\<Sqinter> { \<phi> . \<delta> \<phi> = 1 } \<le> \<phi>)"
using \<open>join_prime (\<Sqinter> { \<phi> . \<delta> \<phi> = 1 })\<close>
unfolding join_prime_def
by (metis inf.boundedI inf_compl_bot le_bot)
hence "(if \<Sqinter> { \<phi> . \<delta> \<phi> = 1 } \<le> \<phi> then 1 else 0) = 0"
by auto
then show ?thesis
using \<open>\<delta> \<phi> = 0\<close>
by auto
qed
qed
lemma join_prime_to_dirac_ident:
assumes "\<alpha> \<in> \<J>"
shows "\<Sqinter>{ \<phi>. (\<lambda> \<phi>. if \<alpha> \<le> \<phi> then 1 else 0) \<phi> = (1 :: real)} = \<alpha>"
(is "?\<alpha> = \<alpha>")
proof (rule antisym)
have "\<alpha> \<in> { \<phi>. (\<lambda> \<phi>. if \<alpha> \<le> \<phi> then 1 else 0) \<phi> = 1 }"
by simp
thus "?\<alpha> \<le> \<alpha>"
by (simp add: Inf_lower)
next
{
fix \<phi>
assume "\<phi> \<in> { \<phi>. (\<lambda> \<phi>. if \<alpha> \<le> \<phi> then 1 else 0) \<phi> = (1 :: real) }"
hence "(if \<alpha> \<le> \<phi> then 1 else 0) = (1 :: real)"
by fastforce
hence "\<alpha> \<le> \<phi>"
by (meson zero_neq_one)
}
hence "\<forall> \<phi> \<in> { \<phi>. (\<lambda> \<phi>. if \<alpha> \<le> \<phi> then 1 else 0) \<phi> = (1 :: real) } . \<alpha> \<le> \<phi>"
by blast
thus "\<alpha> \<le> ?\<alpha>"
using Inf_greatest by blast
qed
lemma dirac_join_prime_bij_betw:
"bij_betw (\<lambda> \<alpha> \<phi>. if \<alpha> \<le> \<phi> then 1 else 0 :: real) \<J> dirac_measures"
unfolding bij_betw_def
proof
obtain to_\<delta> where to_\<delta>_def:
"to_\<delta> = (\<lambda> \<alpha> \<phi> . if \<alpha> \<le> \<phi> then 1 else 0 :: real)" by auto
{
fix \<alpha>\<^sub>1 \<alpha>\<^sub>2
assume
"\<alpha>\<^sub>1 \<in> \<J>"
"\<alpha>\<^sub>2 \<in> \<J>"
"to_\<delta> \<alpha>\<^sub>1 = to_\<delta> \<alpha>\<^sub>2"
moreover from this have
" \<Sqinter>{ \<phi>. (\<lambda> \<phi>. if \<alpha>\<^sub>1 \<le> \<phi> then 1 else 0) \<phi> = (1 :: real) }
= \<Sqinter>{ \<phi>. (\<lambda> \<phi>. if \<alpha>\<^sub>2 \<le> \<phi> then 1 else 0) \<phi> = (1 :: real) }"
unfolding to_\<delta>_def
by metis
ultimately have "\<alpha>\<^sub>1 = \<alpha>\<^sub>2"
using
join_prime_to_dirac_ident [of \<alpha>\<^sub>1]
join_prime_to_dirac_ident [of \<alpha>\<^sub>2]
by presburger
}
hence "inj_on to_\<delta> \<J>"
unfolding inj_on_def
by blast
thus "inj_on (\<lambda> \<alpha> \<phi>. if \<alpha> \<le> \<phi> then 1 else 0 :: real) \<J>"
unfolding to_\<delta>_def
by blast
next
show "(\<lambda>\<alpha> \<phi>. if \<alpha> \<le> \<phi> then 1 else 0) ` \<J> = dirac_measures"
proof
{
fix \<alpha>
assume "\<alpha> \<in> \<J>"
hence "(\<lambda>\<phi>. if \<alpha> \<le> \<phi> then 1 else 0) \<in> dirac_measures"
using join_prime_to_dirac_measure by blast
}
thus "(\<lambda>\<alpha> \<phi>. if \<alpha> \<le> \<phi> then 1 else 0) ` \<J> \<subseteq> dirac_measures" by blast
next
{
fix \<delta>
assume "\<delta> \<in> dirac_measures"
let ?\<alpha> = "\<Sqinter> { \<phi> . \<delta> \<phi> = 1 }"
have "?\<alpha> \<in> \<J>"
using \<open>\<delta> \<in> dirac_measures\<close> dirac_measure_to_join_prime by blast
moreover have "(\<lambda>\<phi>. if ?\<alpha> \<le> \<phi> then 1 else 0) = \<delta>"
using \<open>\<delta> \<in> dirac_measures\<close> dirac_to_join_prime_ident by blast
ultimately have "\<delta> \<in> (\<lambda>\<alpha> \<phi>. if \<alpha> \<le> \<phi> then 1 else 0) ` \<J>"
using image_iff by fastforce
}
thus "dirac_measures \<subseteq> (\<lambda>\<alpha> \<phi>. if \<alpha> \<le> \<phi> then 1 else 0) ` \<J>"
using subsetI
by blast
qed
qed
lemma dirac_join_prime_bij_betw_alt:
"bij_betw (\<lambda> \<delta>. \<Sqinter> { \<phi> . \<delta> \<phi> = 1 }) dirac_measures \<J>"
(is "bij_betw ?to_\<J> _ _")
unfolding bij_betw_def
proof
{
fix \<delta>\<^sub>1 \<delta>\<^sub>2
assume
"\<delta>\<^sub>1 \<in> dirac_measures"
"\<delta>\<^sub>2 \<in> dirac_measures"
"?to_\<J> \<delta>\<^sub>1 = ?to_\<J> \<delta>\<^sub>2"
moreover from this have
"(\<lambda> \<phi>. if ?to_\<J> \<delta>\<^sub>1 \<le> \<phi> then 1 else 0) = \<delta>\<^sub>1"
"(\<lambda> \<phi>. if ?to_\<J> \<delta>\<^sub>2 \<le> \<phi> then 1 else 0) = \<delta>\<^sub>2"
using dirac_to_join_prime_ident by blast+
ultimately have "\<delta>\<^sub>1 = \<delta>\<^sub>2"
by presburger
}
thus "inj_on ?to_\<J> dirac_measures"
unfolding inj_on_def
by auto
next
show "?to_\<J> ` dirac_measures = \<J>"
proof
show "(\<lambda>\<delta>. \<Sqinter> {\<phi>. \<delta> \<phi> = 1}) ` dirac_measures \<subseteq> \<J>"
using dirac_measure_to_join_prime by blast
next
{
fix \<alpha> :: 'a
assume "\<alpha> \<in> \<J>"
hence "(\<lambda>\<phi>. if \<alpha> \<le> \<phi> then 1 else 0 :: real) \<in> dirac_measures"
using join_prime_to_dirac_measure by blast
moreover have "?to_\<J> (\<lambda>\<phi>. if \<alpha> \<le> \<phi> then 1 else 0 :: real) = \<alpha>"
by (simp add: \<open>\<alpha> \<in> \<J>\<close> join_prime_to_dirac_ident)
ultimately have "\<alpha> \<in> ?to_\<J> ` dirac_measures"
using image_iff by fastforce
}
thus "\<J> \<subseteq> (\<lambda>\<delta>. \<Sqinter> {\<phi>. \<delta> \<phi> = 1}) ` dirac_measures"
using subsetI
by blast
qed
qed
lemma special_dirac_collapse:
"(\<forall> \<P> \<in> probabilities. (\<Sum>\<phi>\<leftarrow>\<Phi>. \<P> \<phi>) + c \<le> (\<Sum>\<gamma>\<leftarrow>\<Gamma>. \<P> \<gamma>))
= (\<forall> \<P> \<in> dirac_measures. (\<Sum>\<phi>\<leftarrow>\<Phi>. \<P> \<phi>) + \<lceil>c\<rceil> \<le> (\<Sum>\<gamma>\<leftarrow>\<Gamma>. \<P> \<gamma>))"
proof
assume \<star>: "\<forall> \<P> \<in> probabilities. (\<Sum>\<phi>\<leftarrow>\<Phi>. \<P> \<phi>) + c \<le> (\<Sum>\<gamma>\<leftarrow>\<Gamma>. \<P> \<gamma>)"
{
fix \<delta>
assume "\<delta> \<in> dirac_measures"
hence "\<forall> \<phi>. \<delta> \<phi> = 1 \<or> \<delta> \<phi> = 0"
using dirac_measures_def by blast
have A: "(\<Sum>\<phi>\<leftarrow>\<Phi>. \<delta> \<phi>) = \<lceil>\<Sum>\<phi>\<leftarrow>\<Phi>. \<delta> \<phi>\<rceil>"
proof (induct \<Phi>)
case Nil
then show ?case using \<open>\<forall> \<phi>. \<delta> \<phi> = 1 \<or> \<delta> \<phi> = 0\<close> by simp
next
case (Cons \<phi> \<Phi>)
then show ?case
proof (cases "\<delta> \<phi> = 0")
case True
then show ?thesis
using Cons.hyps by fastforce
next
case False
hence "\<delta> \<phi> = 1"
using \<open>\<forall>\<phi>. \<delta> \<phi> = 1 \<or> \<delta> \<phi> = 0\<close> by blast
then show ?thesis
by (simp,
metis
Cons.hyps
add.commute
ceiling_add_one
of_int_1
of_int_add)
qed
qed
have B: "(\<Sum>\<gamma>\<leftarrow>\<Gamma>. \<delta> \<gamma>) = \<lceil>\<Sum>\<gamma>\<leftarrow>\<Gamma>. \<delta> \<gamma>\<rceil>"
proof (induct \<Gamma>)
case Nil
then show ?case using \<open>\<forall> \<phi>. \<delta> \<phi> = 1 \<or> \<delta> \<phi> = 0\<close> by simp
next
case (Cons \<gamma> \<Gamma>)
then show ?case
proof (cases "\<delta> \<gamma> = 0")
case True
then show ?thesis
using Cons.hyps by fastforce
next
case False
hence "\<delta> \<gamma> = 1"
using \<open>\<forall>\<phi>. \<delta> \<phi> = 1 \<or> \<delta> \<phi> = 0\<close> by blast
then show ?thesis
by (simp,
metis
Cons.hyps
add.commute
ceiling_add_one
of_int_1
of_int_add)
qed
qed
have "\<delta> \<in> probabilities"
using \<open>\<delta> \<in> dirac_measures\<close> dirac_measures_subset by auto
hence C: "(\<Sum>\<phi>\<leftarrow>\<Phi>. \<delta> \<phi>) + c \<le> (\<Sum>\<gamma>\<leftarrow>\<Gamma>. \<delta> \<gamma>)"
using \<star>
by blast
from A B C have "\<lceil>(\<Sum>\<phi>\<leftarrow>\<Phi>. \<delta> \<phi>)\<rceil> + c \<le> \<lceil>(\<Sum>\<gamma>\<leftarrow>\<Gamma>. \<delta> \<gamma>)\<rceil>"
by simp
hence "\<lceil>(\<Sum>\<phi>\<leftarrow>\<Phi>. \<delta> \<phi>)\<rceil> + \<lceil>c\<rceil> \<le> \<lceil>(\<Sum>\<gamma>\<leftarrow>\<Gamma>. \<delta> \<gamma>)\<rceil>"
by linarith
hence "(\<Sum>\<phi>\<leftarrow>\<Phi>. \<delta> \<phi>) + \<lceil>c\<rceil> \<le> (\<Sum>\<gamma>\<leftarrow>\<Gamma>. \<delta> \<gamma>)"
using A B C by simp
}
thus "\<forall> \<delta> \<in> dirac_measures. (\<Sum>\<phi>\<leftarrow>\<Phi>. \<delta> \<phi>) + \<lceil>c\<rceil> \<le> (\<Sum>\<gamma>\<leftarrow>\<Gamma>. \<delta> \<gamma>)"
by auto
next
assume \<star>: "\<forall> \<delta> \<in> dirac_measures. (\<Sum>\<phi>\<leftarrow>\<Phi>. \<delta> \<phi>) + \<lceil>c\<rceil> \<le> (\<Sum>\<gamma>\<leftarrow>\<Gamma>. \<delta> \<gamma>)"
let ?to_\<delta> = "\<lambda> \<alpha> \<phi> :: 'a. if \<alpha> \<le> \<phi> then 1 :: real else 0"
{
fix \<P>
assume "\<P> \<in> probabilities"
from this interpret
finitely_additive_probability \<P>
unfolding probabilities_def'
by auto
have "finite \<J>" by simp
{
fix \<Phi> :: "'a list"
{
fix A :: "'a set"
assume "finite A"
have "(\<Sum>\<phi> \<leftarrow> \<Phi>. (\<Sum>\<alpha> \<in> A. \<P> \<alpha> * ?to_\<delta> \<alpha> \<phi>))
= (\<Sum>\<alpha> \<in> A. \<P> \<alpha> * (\<Sum>\<phi> \<leftarrow> \<Phi>. ?to_\<delta> \<alpha> \<phi>))"
proof (induct \<Phi>)
case Nil
then show ?case by simp
next
case (Cons \<phi>' \<Phi>)
with \<open>finite A\<close> show ?case
proof (induct A rule: finite_induct)
case empty
then show ?case by simp
next
case (insert a A)
have
"(\<Sum>\<phi>\<leftarrow>\<phi>' # \<Phi>. \<Sum>\<alpha>\<in>insert a A. \<P> \<alpha> * ?to_\<delta> \<alpha> \<phi>)
= (\<Sum>\<alpha>\<in>insert a A. \<P> \<alpha> * ?to_\<delta> \<alpha> \<phi>')
+ (\<Sum>\<phi>\<leftarrow>\<Phi>. \<Sum>\<alpha>\<in>insert a A. \<P> \<alpha> * ?to_\<delta> \<alpha> \<phi>)"
by simp
also have
"\<dots> = (\<Sum>\<alpha>\<in>insert a A. \<P> \<alpha> * ?to_\<delta> \<alpha> \<phi>')
+ (\<Sum>\<alpha>\<in>insert a A. \<P> \<alpha> * (\<Sum>\<phi>\<leftarrow>\<Phi>. ?to_\<delta> \<alpha> \<phi>))"
using insert.prems by linarith
also have
"\<dots> = (\<Sum>\<alpha>\<in>insert a A. (\<P> \<alpha> * ?to_\<delta> \<alpha> \<phi>')
+ \<P> \<alpha> * (\<Sum>\<phi>\<leftarrow>\<Phi>. ?to_\<delta> \<alpha> \<phi>))"
by (simp add: sum.distrib)
also have
"\<dots> = (\<Sum>\<alpha>\<in>insert a A. \<P> \<alpha> * (\<Sum>\<phi>\<leftarrow>\<phi>' # \<Phi>. ?to_\<delta> \<alpha> \<phi>))"
by (simp add: distrib_left)
finally show ?case by simp
qed
qed
}
note \<dagger> = this
have "(\<Sum>\<phi>\<leftarrow>\<Phi>. \<P> \<phi>) = (\<Sum>\<phi>\<leftarrow>\<Phi>. (\<Sum>\<alpha> \<in> \<J>. \<P> \<alpha> * ?to_\<delta> \<alpha> \<phi>))"
by (induct \<Phi>,
auto,
metis join_prime_decomposition [OF \<open>\<P> \<in> probabilities\<close>])
hence "(\<Sum>\<phi>\<leftarrow>\<Phi>. \<P> \<phi>) = (\<Sum> \<alpha> \<in> \<J>. \<P> \<alpha> * (\<Sum>\<phi> \<leftarrow> \<Phi>. ?to_\<delta> \<alpha> \<phi>))"
unfolding \<dagger> [OF \<open>finite \<J>\<close>] by auto
}
hence X: "(\<Sum>\<phi>\<leftarrow>\<Phi>. \<P> \<phi>) = (\<Sum>\<alpha> \<in> \<J>. \<P> \<alpha> * (\<Sum>\<phi> \<leftarrow> \<Phi>. ?to_\<delta> \<alpha> \<phi>))"
and Y: "(\<Sum>\<gamma>\<leftarrow>\<Gamma>. \<P> \<gamma>) = (\<Sum>\<alpha> \<in> \<J>. \<P> \<alpha> * (\<Sum>\<gamma> \<leftarrow> \<Gamma>. ?to_\<delta> \<alpha> \<gamma>))"
by auto
{
fix A :: "'a set"
assume "A \<subseteq> \<J>"
hence "finite A"
by simp
hence "(\<Sum>\<alpha> \<in> A. \<P> \<alpha> * ((\<Sum>\<phi> \<leftarrow> \<Phi>. ?to_\<delta> \<alpha> \<phi>) + \<lceil>c\<rceil>))
\<le> (\<Sum>\<alpha> \<in> A. \<P> \<alpha> * (\<Sum>\<gamma> \<leftarrow> \<Gamma>. ?to_\<delta> \<alpha> \<gamma>))"
using \<open>A \<subseteq> \<J>\<close>
proof (induct A rule: finite_induct)
case empty
then show ?case by auto
next
case (insert \<alpha>' A)
hence "\<alpha>' \<in> \<J>"
by blast
hence "?to_\<delta> \<alpha>' \<in> dirac_measures"
using dirac_join_prime_bij_betw
unfolding bij_betw_def
by blast
hence "(\<Sum>\<phi>\<leftarrow>\<Phi>. ?to_\<delta> \<alpha>' \<phi>) + \<lceil>c\<rceil> \<le> (\<Sum>\<gamma>\<leftarrow>\<Gamma>. ?to_\<delta> \<alpha>' \<gamma>)"
using \<star> by blast
moreover have "0 \<le> \<P> \<alpha>'"
by (simp add: probability_non_negative)
ultimately have
"\<P> \<alpha>' * ((\<Sum>\<phi>\<leftarrow>\<Phi>. ?to_\<delta> \<alpha>' \<phi>) + \<lceil>c\<rceil>) \<le> \<P> \<alpha>' * (\<Sum>\<gamma>\<leftarrow>\<Gamma>. ?to_\<delta> \<alpha>' \<gamma>)"
using mult_left_mono by blast
moreover have
"(\<Sum>\<alpha> \<in> A. \<P> \<alpha> * ((\<Sum>\<phi> \<leftarrow> \<Phi>. ?to_\<delta> \<alpha> \<phi>) + \<lceil>c\<rceil>))
\<le> (\<Sum>\<alpha> \<in> A. \<P> \<alpha> * (\<Sum>\<gamma> \<leftarrow> \<Gamma>. ?to_\<delta> \<alpha> \<gamma>))"
using insert.hyps insert.prems by blast
ultimately show ?case
using insert.hyps(2) by auto
qed
}
hence A:
"(\<Sum>\<alpha> \<in> \<J>. \<P> \<alpha> * ((\<Sum>\<phi> \<leftarrow> \<Phi>. ?to_\<delta> \<alpha> \<phi>) + \<lceil>c\<rceil>))
\<le> (\<Sum>\<alpha> \<in> \<J>. \<P> \<alpha> * (\<Sum>\<gamma> \<leftarrow> \<Gamma>. ?to_\<delta> \<alpha> \<gamma>))"
by blast
{
fix A :: "'a set"
assume "finite A"
hence
"(\<Sum>\<alpha> \<in> A. \<P> \<alpha> * ((\<Sum>\<phi> \<leftarrow> \<Phi>. ?to_\<delta> \<alpha> \<phi>) + \<lceil>c\<rceil>))
= (\<Sum>\<alpha> \<in> A. \<P> \<alpha> * (\<Sum>\<phi> \<leftarrow> \<Phi>. ?to_\<delta> \<alpha> \<phi>)) + \<lceil>c\<rceil> * (\<Sum>\<alpha> \<in> A. \<P> \<alpha>)"
by (induct A rule: finite_induct, simp, simp add: distrib_left)
}
with A \<open>finite \<J>\<close> have B:
"(\<Sum>\<alpha> \<in> \<J>. \<P> \<alpha> * (\<Sum>\<phi> \<leftarrow> \<Phi>. ?to_\<delta> \<alpha> \<phi>)) + \<lceil>c\<rceil> * (\<Sum>\<alpha> \<in> \<J>. \<P> \<alpha>)
\<le> (\<Sum>\<alpha> \<in> \<J>. \<P> \<alpha> * (\<Sum>\<gamma> \<leftarrow> \<Gamma>. ?to_\<delta> \<alpha> \<gamma>))"
by auto
have "(\<Sum>\<alpha>\<in>\<J>. \<P> \<alpha>) = 1"
using
join_prime_decomposition [OF \<open>\<P> \<in> probabilities\<close>, where \<phi>="\<top>"]
top_greatest
unfolding probability_unity
by auto
hence "(\<Sum>\<alpha> \<in> \<J>. \<P> \<alpha> * (\<Sum>\<phi> \<leftarrow> \<Phi>. ?to_\<delta> \<alpha> \<phi>)) + \<lceil>c\<rceil>
\<le> (\<Sum>\<alpha> \<in> \<J>. \<P> \<alpha> * (\<Sum>\<gamma> \<leftarrow> \<Gamma>. ?to_\<delta> \<alpha> \<gamma>))"
using B by auto
hence "(\<Sum>\<phi> \<leftarrow> \<Phi>. \<P> \<phi>) + c \<le> (\<Sum>\<gamma> \<leftarrow> \<Gamma>. \<P> \<gamma>)"
using X Y
by linarith
}
thus "\<forall> \<P> \<in> probabilities. (\<Sum>\<phi>\<leftarrow>\<Phi>. \<P> \<phi>) + c \<le> (\<Sum>\<gamma>\<leftarrow>\<Gamma>. \<P> \<gamma>)" by auto
qed
end
end
|
= = Dramatic analysis = =
|
Require Import Nijn.Nijn.
Open Scope poly_scope.
Inductive base_types :=
| Ca
| Clist.
Global Instance decEq_base_types : decEq base_types.
Proof.
decEq_finite.
Defined.
Definition a := Base Ca.
Definition list := Base Clist.
Inductive fun_symbols :=
| Tcons
| Tmap
| Tnil.
Global Instance decEq_fun_symbols : decEq fun_symbols.
Proof.
decEq_finite.
Defined.
Definition fn_arity fn_symbols :=
match fn_symbols with
| Tcons => a βΆ list βΆ list
| Tmap => list βΆ (a βΆ a) βΆ list
| Tnil => list
end.
Definition cons {C} : tm fn_arity C _ := BaseTm Tcons.
Definition map {C} : tm fn_arity C _ := BaseTm Tmap.
Definition nil {C} : tm fn_arity C _ := BaseTm Tnil.
Program Definition rule_0 :=
make_rewrite
(_ ,, β) _
(map Β· nil Β· V 0)
nil.
Program Definition rule_1 :=
make_rewrite
(_ ,, _ ,, _ ,, β) _
(map Β· (cons Β· V 0 Β· V 1) Β· V 2)
(cons Β· ( V 2 Β· V 0) Β· (map Β· V 1 Β· V 2)).
Definition trs :=
make_afs
fn_arity
(rule_0 :: rule_1 :: List.nil).
Definition map_fun_poly fn_symbols : poly β (arity trs fn_symbols) :=
match fn_symbols with
| Tnil => to_Poly (P_const 3)
| Tcons => Ξ»P Ξ»P let y1 := P_var Vz in
to_Poly (P_const 3
+ P_const 2 * y1)
| Tmap => Ξ»P let y0 := P_var (Vs Vz) in Ξ»P let G1 := P_var Vz in
to_Poly (P_const 3 * y0
+ P_const 3 * y0 * (G1 Β·P (y0)))
end.
Definition trs_isSN : isSN trs.
Proof.
solve_poly_SN map_fun_poly.
Qed.
|
record Pack (a : Type) where
constructor MkPack
nested : a
nat : Nat
proj : List (Pack Bool) -> List (Bool, Nat)
proj [] = []
proj (x :: xs) = MkPair x.nested (nat x) :: proj xs
ununpack : List (Pack (Pack a)) -> List a
ununpack = map (.nested.nested)
deepNats : List (Pack (Pack a)) -> List Nat
deepNats = map ((.nat) . (.nested))
|
Set Implicit Arguments.
Unset Strict Implicit.
Require Import Coq.micromega.Lia.
Require Import List.
Import ListNotations.
Definition surjective (X Y : Type) (f : X -> Y) : Prop :=
forall y, exists x, f x = y.
Theorem Cantor X : ~ exists f : X -> X -> Prop,
surjective f.
Proof.
intros [f A].
pose (g := fun x => ~ f x x).
destruct (A g) as [x B].
assert (C : g x <-> f x x).
{ rewrite B. intuition. }
unfold g in C.
intuition.
Qed.
|
module Issue175 where
data List (A : Set) : Set where
[] : List A
_β·_ : A β List A β List A
{-# BUILTIN LIST List #-}
{-# BUILTIN NIL [] #-}
{-# BUILTIN CONS _β·_ #-}
data _β‘_ {A : Set} (x : A) : A β Set where
refl : x β‘ x
postulate
Char : Set
String : Set
{-# BUILTIN CHAR Char #-}
{-# BUILTIN STRING String #-}
primitive primStringToList : String β List Char
lemma : primStringToList "0" β‘ ('0' β· [])
lemma = refl
|
[STATEMENT]
lemma nat_of_integer_of_nat [simp]:
"nat_of_integer (of_nat n) = n"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. nat_of_integer (of_nat n) = n
[PROOF STEP]
by transfer simp |
[GOAL]
p n k : β
β’ β(factorization (choose n k)) p β€ log p n
[PROOFSTEP]
by_cases h : (choose n k).factorization p = 0
[GOAL]
case pos
p n k : β
h : β(factorization (choose n k)) p = 0
β’ β(factorization (choose n k)) p β€ log p n
[PROOFSTEP]
simp [h]
[GOAL]
case neg
p n k : β
h : Β¬β(factorization (choose n k)) p = 0
β’ β(factorization (choose n k)) p β€ log p n
[PROOFSTEP]
have hp : p.Prime := Not.imp_symm (choose n k).factorization_eq_zero_of_non_prime h
[GOAL]
case neg
p n k : β
h : Β¬β(factorization (choose n k)) p = 0
hp : Prime p
β’ β(factorization (choose n k)) p β€ log p n
[PROOFSTEP]
have hkn : k β€ n := by
refine' le_of_not_lt fun hnk => h _
simp [choose_eq_zero_of_lt hnk]
[GOAL]
p n k : β
h : Β¬β(factorization (choose n k)) p = 0
hp : Prime p
β’ k β€ n
[PROOFSTEP]
refine' le_of_not_lt fun hnk => h _
[GOAL]
p n k : β
h : Β¬β(factorization (choose n k)) p = 0
hp : Prime p
hnk : n < k
β’ β(factorization (choose n k)) p = 0
[PROOFSTEP]
simp [choose_eq_zero_of_lt hnk]
[GOAL]
case neg
p n k : β
h : Β¬β(factorization (choose n k)) p = 0
hp : Prime p
hkn : k β€ n
β’ β(factorization (choose n k)) p β€ log p n
[PROOFSTEP]
rw [factorization_def _ hp, @padicValNat_def _ β¨hpβ© _ (choose_pos hkn)]
[GOAL]
case neg
p n k : β
h : Β¬β(factorization (choose n k)) p = 0
hp : Prime p
hkn : k β€ n
β’ Part.get (multiplicity p (choose n k)) (_ : multiplicity.Finite p (choose n k)) β€ log p n
[PROOFSTEP]
simp only [hp.multiplicity_choose hkn (lt_add_one _), PartENat.get_natCast]
[GOAL]
case neg
p n k : β
h : Β¬β(factorization (choose n k)) p = 0
hp : Prime p
hkn : k β€ n
β’ Finset.card (Finset.filter (fun i => p ^ i β€ k % p ^ i + (n - k) % p ^ i) (Finset.Ico 1 (log p n + 1))) β€ log p n
[PROOFSTEP]
refine (Finset.card_filter_le _ _).trans (le_of_eq (Nat.card_Ico _ _))
[GOAL]
p n k : β
p_large : n < p ^ 2
β’ β(factorization (choose n k)) p β€ 1
[PROOFSTEP]
apply factorization_choose_le_log.trans
[GOAL]
p n k : β
p_large : n < p ^ 2
β’ log p n β€ 1
[PROOFSTEP]
rcases eq_or_ne n 0 with (rfl | hn0)
[GOAL]
case inl
p k : β
p_large : 0 < p ^ 2
β’ log p 0 β€ 1
[PROOFSTEP]
simp
[GOAL]
case inr
p n k : β
p_large : n < p ^ 2
hn0 : n β 0
β’ log p n β€ 1
[PROOFSTEP]
exact lt_succ_iff.1 (log_lt_of_lt_pow hn0 p_large)
[GOAL]
p n k : β
hp' : p β 2
hk : p β€ k
hk' : p β€ n - k
hn : n < 3 * p
β’ β(factorization (choose n k)) p = 0
[PROOFSTEP]
cases' em' p.Prime with hp hp
[GOAL]
case inl
p n k : β
hp' : p β 2
hk : p β€ k
hk' : p β€ n - k
hn : n < 3 * p
hp : Β¬Prime p
β’ β(factorization (choose n k)) p = 0
[PROOFSTEP]
exact factorization_eq_zero_of_non_prime (choose n k) hp
[GOAL]
case inr
p n k : β
hp' : p β 2
hk : p β€ k
hk' : p β€ n - k
hn : n < 3 * p
hp : Prime p
β’ β(factorization (choose n k)) p = 0
[PROOFSTEP]
cases' lt_or_le n k with hnk hkn
[GOAL]
case inr.inl
p n k : β
hp' : p β 2
hk : p β€ k
hk' : p β€ n - k
hn : n < 3 * p
hp : Prime p
hnk : n < k
β’ β(factorization (choose n k)) p = 0
[PROOFSTEP]
simp [choose_eq_zero_of_lt hnk]
[GOAL]
case inr.inr
p n k : β
hp' : p β 2
hk : p β€ k
hk' : p β€ n - k
hn : n < 3 * p
hp : Prime p
hkn : k β€ n
β’ β(factorization (choose n k)) p = 0
[PROOFSTEP]
rw [factorization_def _ hp, @padicValNat_def _ β¨hpβ© _ (choose_pos hkn)]
[GOAL]
case inr.inr
p n k : β
hp' : p β 2
hk : p β€ k
hk' : p β€ n - k
hn : n < 3 * p
hp : Prime p
hkn : k β€ n
β’ Part.get (multiplicity p (choose n k)) (_ : multiplicity.Finite p (choose n k)) = 0
[PROOFSTEP]
simp only [hp.multiplicity_choose hkn (lt_add_one _), PartENat.get_natCast, Finset.card_eq_zero,
Finset.filter_eq_empty_iff, not_le]
[GOAL]
case inr.inr
p n k : β
hp' : p β 2
hk : p β€ k
hk' : p β€ n - k
hn : n < 3 * p
hp : Prime p
hkn : k β€ n
β’ β (x : β), x β Finset.Ico 1 (log p n + 1) β k % p ^ x + (n - k) % p ^ x < p ^ x
[PROOFSTEP]
intro i hi
[GOAL]
case inr.inr
p n k : β
hp' : p β 2
hk : p β€ k
hk' : p β€ n - k
hn : n < 3 * p
hp : Prime p
hkn : k β€ n
i : β
hi : i β Finset.Ico 1 (log p n + 1)
β’ k % p ^ i + (n - k) % p ^ i < p ^ i
[PROOFSTEP]
rcases eq_or_lt_of_le (Finset.mem_Ico.mp hi).1 with (rfl | hi)
[GOAL]
case inr.inr.inl
p n k : β
hp' : p β 2
hk : p β€ k
hk' : p β€ n - k
hn : n < 3 * p
hp : Prime p
hkn : k β€ n
hi : 1 β Finset.Ico 1 (log p n + 1)
β’ k % p ^ 1 + (n - k) % p ^ 1 < p ^ 1
[PROOFSTEP]
rw [pow_one, β add_lt_add_iff_left (2 * p), β succ_mul, two_mul, add_add_add_comm]
[GOAL]
case inr.inr.inl
p n k : β
hp' : p β 2
hk : p β€ k
hk' : p β€ n - k
hn : n < 3 * p
hp : Prime p
hkn : k β€ n
hi : 1 β Finset.Ico 1 (log p n + 1)
β’ p + k % p + (p + (n - k) % p) < succ 2 * p
[PROOFSTEP]
exact
lt_of_le_of_lt
(add_le_add (add_le_add_right (le_mul_of_one_le_right' ((one_le_div_iff hp.pos).mpr hk)) (k % p))
(add_le_add_right (le_mul_of_one_le_right' ((one_le_div_iff hp.pos).mpr hk')) ((n - k) % p)))
(by rwa [div_add_mod, div_add_mod, add_tsub_cancel_of_le hkn])
[GOAL]
p n k : β
hp' : p β 2
hk : p β€ k
hk' : p β€ n - k
hn : n < 3 * p
hp : Prime p
hkn : k β€ n
hi : 1 β Finset.Ico 1 (log p n + 1)
β’ p * (k / p) + k % p + (p * ((n - k) / p) + (n - k) % p) < succ 2 * p
[PROOFSTEP]
rwa [div_add_mod, div_add_mod, add_tsub_cancel_of_le hkn]
[GOAL]
case inr.inr.inr
p n k : β
hp' : p β 2
hk : p β€ k
hk' : p β€ n - k
hn : n < 3 * p
hp : Prime p
hkn : k β€ n
i : β
hiβ : i β Finset.Ico 1 (log p n + 1)
hi : 1 < i
β’ k % p ^ i + (n - k) % p ^ i < p ^ i
[PROOFSTEP]
replace hn : n < p ^ i
[GOAL]
case hn
p n k : β
hp' : p β 2
hk : p β€ k
hk' : p β€ n - k
hn : n < 3 * p
hp : Prime p
hkn : k β€ n
i : β
hiβ : i β Finset.Ico 1 (log p n + 1)
hi : 1 < i
β’ n < p ^ i
case inr.inr.inr
p n k : β
hp' : p β 2
hk : p β€ k
hk' : p β€ n - k
hp : Prime p
hkn : k β€ n
i : β
hiβ : i β Finset.Ico 1 (log p n + 1)
hi : 1 < i
hn : n < p ^ i
β’ k % p ^ i + (n - k) % p ^ i < p ^ i
[PROOFSTEP]
have : 3 β€ p := lt_of_le_of_ne hp.two_le hp'.symm
[GOAL]
case hn
p n k : β
hp' : p β 2
hk : p β€ k
hk' : p β€ n - k
hn : n < 3 * p
hp : Prime p
hkn : k β€ n
i : β
hiβ : i β Finset.Ico 1 (log p n + 1)
hi : 1 < i
this : 3 β€ p
β’ n < p ^ i
[PROOFSTEP]
calc
n < 3 * p := hn
_ β€ p * p := (mul_le_mul_right' this p)
_ = p ^ 2 := (sq p).symm
_ β€ p ^ i := pow_le_pow hp.one_lt.le hi
[GOAL]
case inr.inr.inr
p n k : β
hp' : p β 2
hk : p β€ k
hk' : p β€ n - k
hp : Prime p
hkn : k β€ n
i : β
hiβ : i β Finset.Ico 1 (log p n + 1)
hi : 1 < i
hn : n < p ^ i
β’ k % p ^ i + (n - k) % p ^ i < p ^ i
[PROOFSTEP]
rwa [mod_eq_of_lt (lt_of_le_of_lt hkn hn), mod_eq_of_lt (lt_of_le_of_lt tsub_le_self hn), add_tsub_cancel_of_le hkn]
[GOAL]
p n k : β
n_big : 2 < n
p_le_n : p β€ n
big : 2 * n < 3 * p
β’ β(factorization (centralBinom n)) p = 0
[PROOFSTEP]
refine' factorization_choose_of_lt_three_mul _ p_le_n (p_le_n.trans _) big
[GOAL]
case refine'_1
p n k : β
n_big : 2 < n
p_le_n : p β€ n
big : 2 * n < 3 * p
β’ p β 2
[PROOFSTEP]
rintro rfl
[GOAL]
case refine'_1
n k : β
n_big : 2 < n
p_le_n : 2 β€ n
big : 2 * n < 3 * 2
β’ False
[PROOFSTEP]
linarith
[GOAL]
case refine'_2
p n k : β
n_big : 2 < n
p_le_n : p β€ n
big : 2 * n < 3 * p
β’ n β€ 2 * n - n
[PROOFSTEP]
rw [two_mul, add_tsub_cancel_left]
[GOAL]
p n k : β
h : n < p
β’ β(factorization n !) p = 0
[PROOFSTEP]
induction' n with n hn
[GOAL]
case zero
p n k : β
hβ : n < p
h : zero < p
β’ β(factorization zero !) p = 0
[PROOFSTEP]
simp
[GOAL]
case succ
p nβ k : β
hβ : nβ < p
n : β
hn : n < p β β(factorization n !) p = 0
h : succ n < p
β’ β(factorization (succ n)!) p = 0
[PROOFSTEP]
rw [factorial_succ, factorization_mul n.succ_ne_zero n.factorial_ne_zero, Finsupp.coe_add, Pi.add_apply,
hn (lt_of_succ_lt h), add_zero, factorization_eq_zero_of_lt h]
[GOAL]
p n k : β
h : n < p
β’ β(factorization (choose n k)) p = 0
[PROOFSTEP]
by_cases hnk : n < k
[GOAL]
case pos
p n k : β
h : n < p
hnk : n < k
β’ β(factorization (choose n k)) p = 0
[PROOFSTEP]
simp [choose_eq_zero_of_lt hnk]
[GOAL]
case neg
p n k : β
h : n < p
hnk : Β¬n < k
β’ β(factorization (choose n k)) p = 0
[PROOFSTEP]
rw [choose_eq_factorial_div_factorial (le_of_not_lt hnk),
factorization_div (factorial_mul_factorial_dvd_factorial (le_of_not_lt hnk)), Finsupp.coe_tsub, Pi.sub_apply,
factorization_factorial_eq_zero_of_lt h, zero_tsub]
[GOAL]
p nβ kβ n k : β
hkn : k β€ n
β’ β p in Finset.range (n + 1), p ^ β(factorization (choose n k)) p = choose n k
[PROOFSTEP]
conv =>
-- Porting note: was `nth_rw_rhs`
rhs
rw [β factorization_prod_pow_eq_self (choose_pos hkn).ne']
[GOAL]
p nβ kβ n k : β
hkn : k β€ n
| β p in Finset.range (n + 1), p ^ β(factorization (choose n k)) p = choose n k
[PROOFSTEP]
rhs
rw [β factorization_prod_pow_eq_self (choose_pos hkn).ne']
[GOAL]
p nβ kβ n k : β
hkn : k β€ n
| β p in Finset.range (n + 1), p ^ β(factorization (choose n k)) p = choose n k
[PROOFSTEP]
rhs
rw [β factorization_prod_pow_eq_self (choose_pos hkn).ne']
[GOAL]
p nβ kβ n k : β
hkn : k β€ n
| β p in Finset.range (n + 1), p ^ β(factorization (choose n k)) p = choose n k
[PROOFSTEP]
rhs
[GOAL]
p nβ kβ n k : β
hkn : k β€ n
| choose n k
[PROOFSTEP]
rw [β factorization_prod_pow_eq_self (choose_pos hkn).ne']
[GOAL]
p nβ kβ n k : β
hkn : k β€ n
β’ β p in Finset.range (n + 1), p ^ β(factorization (choose n k)) p =
Finsupp.prod (factorization (choose n k)) fun x x_1 => x ^ x_1
[PROOFSTEP]
rw [eq_comm]
[GOAL]
p nβ kβ n k : β
hkn : k β€ n
β’ (Finsupp.prod (factorization (choose n k)) fun x x_1 => x ^ x_1) =
β p in Finset.range (n + 1), p ^ β(factorization (choose n k)) p
[PROOFSTEP]
apply Finset.prod_subset
[GOAL]
case h
p nβ kβ n k : β
hkn : k β€ n
β’ (factorization (choose n k)).support β Finset.range (n + 1)
[PROOFSTEP]
intro p hp
[GOAL]
case h
pβ nβ kβ n k : β
hkn : k β€ n
p : β
hp : p β (factorization (choose n k)).support
β’ p β Finset.range (n + 1)
[PROOFSTEP]
rw [Finset.mem_range]
[GOAL]
case h
pβ nβ kβ n k : β
hkn : k β€ n
p : β
hp : p β (factorization (choose n k)).support
β’ p < n + 1
[PROOFSTEP]
contrapose! hp
[GOAL]
case h
pβ nβ kβ n k : β
hkn : k β€ n
p : β
hp : n + 1 β€ p
β’ Β¬p β (factorization (choose n k)).support
[PROOFSTEP]
rw [Finsupp.mem_support_iff, Classical.not_not, factorization_choose_eq_zero_of_lt hp]
[GOAL]
case hf
p nβ kβ n k : β
hkn : k β€ n
β’ β (x : β),
x β Finset.range (n + 1) β
Β¬x β (factorization (choose n k)).support β (fun x x_1 => x ^ x_1) x (β(factorization (choose n k)) x) = 1
[PROOFSTEP]
intro p _ h2
[GOAL]
case hf
pβ nβ kβ n k : β
hkn : k β€ n
p : β
aβ : p β Finset.range (n + 1)
h2 : Β¬p β (factorization (choose n k)).support
β’ (fun x x_1 => x ^ x_1) p (β(factorization (choose n k)) p) = 1
[PROOFSTEP]
simp [Classical.not_not.1 (mt Finsupp.mem_support_iff.2 h2)]
[GOAL]
p nβ k n : β
β’ β p in Finset.range (2 * n + 1), p ^ β(factorization (centralBinom n)) p = centralBinom n
[PROOFSTEP]
apply prod_pow_factorization_choose
[GOAL]
case hkn
p nβ k n : β
β’ n β€ 2 * n
[PROOFSTEP]
linarith
|
#eval [1, 2, 3].map (Β·, 1)
#eval (Β·, Β·) 1 2
#eval (Β·, Β·, Β·) 1 2 3
theorem ex1 : [1, 2, 3].map (Β·, 1) = [(1, 1), (2, 1), (3, 1)] :=
rfl
theorem ex2 : (Β·, Β·) 1 2 = (1, 2) :=
rfl
|
Subsets and Splits