text
stringlengths 0
3.34M
|
---|
//-------------------------------------------------------------------
// MetaInfo Framework (MIF)
// https://github.com/tdv/mif
// Created: 03.2017
// Copyright (C) 2016-2021 tdv
//-------------------------------------------------------------------
// STD
#include <istream>
#include <memory>
#include <stdexcept>
// BOOST
#include <boost/property_tree/ptree.hpp>
#include <boost/property_tree/xml_parser.hpp>
// MIF
#include "mif/application/iconfig.h"
#include "mif/application/id/config.h"
#include "mif/service/creator.h"
#include "mif/service/make.h"
namespace Mif
{
namespace Application
{
namespace Detail
{
namespace
{
class Collection
: public Service::Inherit<Common::ICollection>
{
public:
Collection(boost::property_tree::ptree const &array)
: m_array{array}
, m_cur{std::begin(m_array)}
, m_end{std::end(m_array)}
{
}
private:
boost::property_tree::ptree m_array;
boost::property_tree::ptree::const_iterator m_cur;
boost::property_tree::ptree::const_iterator m_end;
// ICollection
virtual bool Next() override final
{
if (m_cur != m_end)
++m_cur;
return m_cur != m_end;
}
virtual bool IsEmpty() const override final
{
return m_array.empty();
}
virtual void Reset() override final
{
m_cur = std::begin(m_array);
}
virtual Service::IServicePtr Get() override final;
};
class Config
: public Service::Inherit<IConfig>
{
public:
using StreamPtr = std::shared_ptr<std::istream>;
Config(StreamPtr stream)
{
if (!stream)
throw std::invalid_argument{"[Mif::Application::Detail::Config] Empty input stream."};
try
{
boost::property_tree::ptree tree;
boost::property_tree::xml_parser::read_xml(*stream, tree);
if (auto document = tree.get_child_optional("document"))
m_tree = document.get();
}
catch (std::exception const &e)
{
throw std::invalid_argument{"[Mif::Application::Detail::Config] Empty input stream. "
"Failed to parse xml. Error: " + std::string{e.what()}};
}
}
Config(boost::property_tree::ptree const &tree)
: m_tree{tree}
{
}
private:
boost::property_tree::ptree m_tree;
// IConfig
virtual bool Exists(std::string const &path) const override final
{
return m_tree.find(path) != m_tree.not_found();
}
virtual std::string GetValue(std::string const &path) const override final
{
try
{
if (auto value = m_tree.get_optional<std::string>(path))
return value.get();
}
catch (std::exception const &e)
{
throw std::runtime_error{"[Mif::Application::Detail::Config::GetValue] "
"Failed to get value by path \"" + path + "\". Error: " + std::string{e.what()}};
}
throw std::invalid_argument{"[Mif::Application::Detail::Config::GetValue] "
"Failed to get value. Bad path \"" + path + "\""};
}
virtual Service::TIntrusivePtr<IConfig> GetConfig(std::string const &path) const override final
{
try
{
if (auto config = m_tree.get_child_optional(path))
return Service::Make<Config, IConfig>(config.get());
}
catch (std::exception const &e)
{
throw std::runtime_error{"[Mif::Application::Detail::Config::GetConfig] "
"Failed to get config by path \"" + path + "\". Error: " + std::string{e.what()}};
}
return {};
}
virtual Common::ICollectionPtr GetCollection(std::string const &path) const override final
{
try
{
if (auto array = m_tree.get_child_optional(path))
return Service::Make<Collection, Common::ICollection>(array.get());
}
catch (std::exception const &e)
{
throw std::runtime_error{"[Mif::Application::Detail::Config::GetCollection] "
"Failed to get collection by path \"" + path + "\". Error: " + std::string{e.what()}};
}
return {};
}
};
Service::IServicePtr Collection::Get()
{
if (IsEmpty() || m_cur == m_end)
{
throw std::runtime_error{"[Mif::Application::Detail::Collection::Get] "
"Failed to get item. No item."};
}
return Service::Make<Config>(m_cur->second);
}
} // namespace
} // namespace Detail
} // namespace Application
} // namespace Mif
MIF_SERVICE_CREATOR
(
Mif::Application::Id::Service::Config::Xml,
Mif::Application::Detail::Config,
Mif::Application::Detail::Config::StreamPtr
)
|
For all countable $i$, $P_i$ holds almost everywhere if and only if $P_i$ holds almost everywhere for all countable $i$. |
[STATEMENT]
lemma narrow_RefT: "G\<turnstile>RefT R\<succ>T \<Longrightarrow> \<exists>t. T=RefT t"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. G\<turnstile>RefT R\<succ>T \<Longrightarrow> \<exists>t. T = RefT t
[PROOF STEP]
apply (ind_cases "G\<turnstile>RefT R\<succ>T")
[PROOF STATE]
proof (prove)
goal (6 subgoals):
1. \<And>C D. \<lbrakk>R = ClassT D; T = Class C; G\<turnstile>C\<preceq>\<^sub>C D\<rbrakk> \<Longrightarrow> \<exists>t. T = RefT t
2. \<And>C I. \<lbrakk>R = ClassT C; T = Iface I; \<not> G\<turnstile>C\<leadsto>I\<rbrakk> \<Longrightarrow> \<exists>t. T = RefT t
3. \<And>Ta. \<lbrakk>R = ClassT Object; T = Ta.[]\<rbrakk> \<Longrightarrow> \<exists>t. T = RefT t
4. \<And>I C. \<lbrakk>R = IfaceT I; T = Class C\<rbrakk> \<Longrightarrow> \<exists>t. T = RefT t
5. \<And>I J mrt. \<lbrakk>R = IfaceT I; T = Iface J; imethds G I hidings imethds G J entails \<lambda>(md, mh) (md', mh'). G\<turnstile>mrt mh\<preceq>mrt mh'; (I, J) \<notin> (subint1 G)\<^sup>*\<rbrakk> \<Longrightarrow> \<exists>t. T = RefT t
6. \<And>S Ta. \<lbrakk>R = ArrayT (RefT S); T = RefT Ta.[]; G\<turnstile>RefT S\<succ>RefT Ta\<rbrakk> \<Longrightarrow> \<exists>t. T = RefT t
[PROOF STEP]
by auto |
\documentclass{standalone}
\begin{document}
\subsection{Thresholding}
Thresholding approach is very simple and it basically segments a scalar image by creating a binary partitioning of image intensities~\cite{ART:Pham}. It can be applied on an image to distinguish regions with contrasting intensities and thus differentiate between tissue regions represented within the image~\cite{INP:Withey}. \figurename\,\ref{fig:Histogram} shows a histogram of a scalar image with two classes, the threshold-based approach attempts to determine an intensity value, called \emph{threshold} which splits the desired classes~\cite{ART:Pham}. To achieve the segmentation we can group all the pixels with intensity higher than the threshold in one class an all the remaining ones into other class
\begin{figure}[h!]
\centering
\includegraphics[scale=.35]{hist.png}
\caption{Histogram of a GL image with two well delineated regions.The threshold value(red line) was set visually at -400 HU}\label{fig:Histogram}
\end{figure}
The threshold value is usually setting by visual assessment, but can also be automatized by algorithms like Otsu one.
Sometimes may happen that more than two classes are present in the image, so we can set more than one threshold values to achieve a multi-class segmentation, also, in this case, there are algorithms to automatized this process, as an extension of the previous one called \emph{multi Otsu threshold}.
This is a simple but very effective approach to segment images when different structures have high contrast in intensities. Threshold does not take into account the spatial characteristics of the image, so it is sensitive to noise and intensity inhomogeneity, that corrupt the image histogram and make difficult the separation~\cite{ART:Pham}. To overcome these issues several variations of the threshold have been proposed based on local intensities and connectivity.
A threshold is usually used as an initial step in sequences of image processing operations, followed by other segmentation techniques that improve the segmentation quality.
Since threshold uses only intensity information, it can be considered a pixel classification technique.
\end{document}
|
import category_theory.lifting_properties.basic
import category_theory.over
namespace category_theory
open category
variables {C : Type*} [category C] {M : C} {A B X Y : under M}
{i : A ⟶ B} {p : X ⟶ Y} {f : A ⟶ X} {g : B ⟶ Y}
(sq : comm_sq f i p g)
namespace comm_sq
def lift_struct.map {D : Type*} [category D] (F : C ⥤ D)
{A B X Y : C} {i : A ⟶ B} {p : X ⟶ Y} {f : A ⟶ X} {g : B ⟶ Y}
{sq : comm_sq f i p g} (l : sq.lift_struct) : (F.map_comm_sq sq).lift_struct :=
{ l := F.map l.l,
fac_left' := by { rw [← F.map_comp, l.fac_left], },
fac_right' := by { rw [← F.map_comp, l.fac_right], }, }
variable (sq)
def under_forget_equiv_lift_struct :
sq.lift_struct ≃ ((under.forget M).map_comm_sq sq).lift_struct :=
{ to_fun := lift_struct.map (under.forget M),
inv_fun := λ l,
{ l := structured_arrow.hom_mk l.l begin
simp only [functor.id_map],
have w₁ := f.w,
have w₂ := i.w,
dsimp at w₁ w₂,
rw [id_comp] at w₁ w₂,
have h := l.fac_left,
dsimp at h,
rw [w₂, assoc, h, w₁],
end,
fac_left' := by { ext, exact l.fac_left, },
fac_right' := by { ext, exact l.fac_right, }, },
left_inv := by tidy,
right_inv := by tidy, }
lemma under_sq_has_lift_iff :
sq.has_lift ↔ ((under.forget M).map_comm_sq sq).has_lift :=
by simpa only [comm_sq.has_lift.iff] using nonempty.congr
(under_forget_equiv_lift_struct sq).to_fun (under_forget_equiv_lift_struct sq).inv_fun
end comm_sq
variables (i p)
instance has_lifting_property_under
[has_lifting_property ((under.forget M).map i) ((under.forget M).map p)] :
has_lifting_property i p :=
⟨λ f g sq, by { rw comm_sq.under_sq_has_lift_iff, apply_instance, }⟩
end category_theory
|
-- Andreas, 2016-11-03, issue #2291 reported by Aerate
test = let {{_}} = _ in _
-- WAS: Internal error
-- NOW: Could not parse the left-hand side {{_}}
|
import matplotlib.pyplot as plt
import numpy as np
from log import logger
from settings import mask_dir
plt.rcParams['font.sans-serif'] = ['SimHei'] # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号
def plot_background_noise():
try:
ary = np.load(mask_dir.joinpath("background_noise.npy"))
ary[ary < 0] = 0
m = ary.max()
print(m)
ary = ary / m * 255
plt.imsave(mask_dir.joinpath("background_noise.png"), ary, cmap="gray_r")
except FileNotFoundError as e:
logger.error(f"文件{mask_dir.joinpath('background_noise.npy')}不存在!")
if __name__ == '__main__':
plot_background_noise()
|
-- ------------------------------------------------------------- [ Char.idr ]
-- Module : Lightyear.Char
-- Description : Character-related parsers.
--
-- This code is distributed under the BSD 2-clause license.
-- See the file LICENSE in the root directory for its full text.
--
-- This code is (mostly) a port of Daan Leijen's Text.Parsec.Char library.
-- --------------------------------------------------------------------- [ EOH ]
module Lightyear.Char
import Data.Strings
import public Data.Vect
import public Data.Fin
import public Control.Monad.Identity
import Lightyear.Core
import Lightyear.Combinators
-- Maybe somehow return proof that returned Char is the one passed as arg?
-- `char a <|> char b` should return `Either (c = a) (c = b)`
-- char : (Monad m, Stream Char str) => (c : Char) -> Covers (Subset Char (Equal c)) t => ParserT str m t
||| A parser that matches some particular character
export
char : (Monad m, Stream Char str) => Char -> ParserT str m Char
char c = satisfy (== c) <?> "character '" ++ singleton c ++ "'"
||| oneOf cs succeeds if the current character is in the supplied
||| list of characters @cs@. Returns the parsed character. See also
||| 'satisfy'.
|||
||| vowel = oneOf "aeiou"
export
oneOf : (Monad m, Stream Char str) => String -> ParserT str m Char
oneOf cs = satisfy (\c => elem c $ unpack cs)
||| As the dual of 'oneOf', @noneOf cs@ succeeds if the current
||| character /not/ in the supplied list of characters @cs@. Returns the
||| parsed character.
|||
||| consonant = noneOf "aeiou"
export
noneOf : (Monad m, Stream Char str) => String -> ParserT str m Char
noneOf cs = satisfy (\c => not $ elem c $ unpack cs)
||| Parses a white space character (any character which satisfies 'isSpace')
||| Returns the parsed character.
export
space : (Monad m, Stream Char s) => ParserT s m Char
space = satisfy isSpace <?> "space"
||| Skips /zero/ or more white space characters. See also 'skipMany'.
export
spaces : (Monad m, Stream Char s) => ParserT s m ()
spaces = skip (many Lightyear.Char.space) <?> "white space"
||| Parses a newline character (\'\\n\'). Returns a newline character.
export
newline : (Monad m, Stream Char s) => ParserT s m Char
newline = char '\n' <?> "lf new-line"
||| Parses a carriage return character (\'\\r\') followed by a newline character (\'\\n\').
||| Returns a newline character.
export
crlf : (Monad m, Stream Char s) => ParserT s m Char
crlf = char '\r' *> char '\n' <?> "crlf new-line"
||| Parses a CRLF (see 'crlf') or LF (see 'newline') end-of-line.
||| Returns a newline character (\'\\n\').
|||
||| endOfLine = newline <|> crlf
export
endOfLine : (Monad m, Stream Char s) => ParserT s m Char
endOfLine = newline <|> crlf <?> "new-line"
||| Parses a tab character (\'\\t\'). Returns a tab character.
export
tab : (Monad m, Stream Char s) => ParserT s m Char
tab = char '\t' <?> "tab"
||| Parses an upper case letter (a character between \'A\' and \'Z\').
||| Returns the parsed character.
export
upper : (Monad m, Stream Char s) => ParserT s m Char
upper = satisfy isUpper <?> "uppercase letter"
||| Parses a lower case character (a character between \'a\' and \'z\').
||| Returns the parsed character.
export
lower : (Monad m, Stream Char s) => ParserT s m Char
lower = satisfy isLower <?> "lowercase letter"
||| Parses a letter or digit (a character between \'0\' and \'9\').
||| Returns the parsed character.
export
alphaNum : (Monad m, Stream Char s) => ParserT s m Char
alphaNum = satisfy isAlphaNum <?> "letter or digit"
||| Parses a letter (an upper case or lower case character). Returns the
||| parsed character.
export
letter : (Monad m, Stream Char s) => ParserT s m Char
letter = satisfy isAlpha <?> "letter"
||| Matches a single digit
export
digit : (Monad m, Stream Char s) => ParserT s m (Fin 10)
digit = satisfyMaybe fromChar
where fromChar : Char -> Maybe (Fin 10)
fromChar '0' = Just FZ
fromChar '1' = Just (FS (FZ))
fromChar '2' = Just (FS (FS (FZ)))
fromChar '3' = Just (FS (FS (FS (FZ))))
fromChar '4' = Just (FS (FS (FS (FS (FZ)))))
fromChar '5' = Just (FS (FS (FS (FS (FS (FZ))))))
fromChar '6' = Just (FS (FS (FS (FS (FS (FS (FZ)))))))
fromChar '7' = Just (FS (FS (FS (FS (FS (FS (FS (FZ))))))))
fromChar '8' = Just (FS (FS (FS (FS (FS (FS (FS (FS (FZ)))))))))
fromChar '9' = Just (FS (FS (FS (FS (FS (FS (FS (FS (FS (FZ))))))))))
fromChar _ = Nothing
||| Matches an integer literal
export
integer : (Num n, Monad m, Stream Char s) => ParserT s m n
integer = do minus <- opt (char '-')
ds <- some digit
let theInt = getInteger ds
case minus of
Nothing => pure (fromInteger theInt)
Just _ => pure (fromInteger ((-1) * theInt))
where getInteger : List (Fin 10) -> Integer
getInteger = foldl (\a => \b => 10 * a + cast b) 0
||| Parses a hexadecimal digit (a digit or a letter between \'a\' and
||| \'f\' or \'A\' and \'F\'). Returns the parsed character.
export
hexDigit : (Monad m, Stream Char s) => ParserT s m Char
hexDigit = satisfy isHexDigit <?> "hexadecimal digit"
||| Parses an octal digit (a character between \'0\' and \'7\'). Returns
||| the parsed character.
export
octDigit : (Monad m, Stream Char s) => ParserT s m Char
octDigit = satisfy isOctDigit <?> "octal digit"
||| This parser succeeds for any character. Returns the parsed character.
export
anyChar : (Monad m, Stream Char s) => ParserT s m Char
anyChar = anyToken <?> "any character"
|
[STATEMENT]
lemma defs_uses_disjoint'[simp]: "n \<in> set (\<alpha>n g) \<Longrightarrow> v \<in> defs g n \<Longrightarrow> v \<in> uses g n \<Longrightarrow> False"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>n \<in> set (\<alpha>n g); v \<in> defs g n; v \<in> uses g n\<rbrakk> \<Longrightarrow> False
[PROOF STEP]
using defs_uses_disjoint
[PROOF STATE]
proof (prove)
using this:
?n \<in> set (\<alpha>n ?g) \<Longrightarrow> defs ?g ?n \<inter> uses ?g ?n = {}
goal (1 subgoal):
1. \<lbrakk>n \<in> set (\<alpha>n g); v \<in> defs g n; v \<in> uses g n\<rbrakk> \<Longrightarrow> False
[PROOF STEP]
by auto |
\section{Previous work}
|
Require Import Crypto.Arithmetic.PrimeFieldTheorems.
Require Import Crypto.Specific.solinas64_2e389m21_7limbs.Synthesis.
(* TODO : change this to field once field isomorphism happens *)
Definition carry :
{ carry : feBW_loose -> feBW_tight
| forall a, phiBW_tight (carry a) = (phiBW_loose a) }.
Proof.
Set Ltac Profiling.
Time synthesize_carry ().
Show Ltac Profile.
Time Defined.
Print Assumptions carry.
|
module Solve (
verboseSolve
) where
import qualified Data.DList as D
import qualified Data.IntMap.Lazy as M
import Data.Complex (realPart, Complex((:+)))
import Control.Monad.Trans.Writer
import Control.Monad.Trans.State
import Data.MyPolynomial
import Data.MyPolynomial.Print
type ComplexF = Complex Float
type ShowD = D.DList Char
data Solvability b = IsBroad b | Degree | Clear
sToShowD :: String -> D.DList ShowD
sToShowD s = (D.singleton . D.fromList) s
toShowD :: [String] -> D.DList ShowD
toShowD ls = ( D.fromList . ( fmap D.fromList ) ) ls
discriminantSpeech :: Float -> Writer (D.DList ShowD) ()
discriminantSpeech d = let s = case (compare d 0) of LT -> "strictly negative, the two complex solutions are:\n"
GT -> "strictly positive, the two real solutions are:\n"
EQ -> "null, the unique solution is:\n"
in tell $ toShowD ["The discriminant is ", s]
unrootableSpeech :: Solvability BroadSol -> Writer (D.DList ShowD) ()
unrootableSpeech (IsBroad Absurd) = tell $ sToShowD "Cannot solve: absurd.\n"
unrootableSpeech (IsBroad Real) = tell $ sToShowD "The solution is |R itself.\n"
unrootableSpeech Degree = tell $ sToShowD "The polynomial degree is strictly greater than 2, I can't solve."
unrootableSpeech _ = return ()
degSolvability :: Int -> Solvability b
degSolvability d | d >= 0 && d <= 2 = Clear
| otherwise = Degree
-- Tells solution/root(s) with mention of exclusion.
tellSolutions :: Maybe Float -> Roots -> [ComplexF] -> Writer (D.DList ShowD) ()
tellSolutions delta (c1, c2) badRoots
| delta == Just 0 = tell $ toShowD $ tellBad [c1] badRoots "root" []
| Nothing <- delta = tell $ toShowD $ tellBad [c1] badRoots "solution" []
| otherwise = tell $ toShowD $ tellBad [c1, c2] badRoots "root" []
where
tellBad (rt:rts) bad w =
case rt `elem` bad of True -> ([prettyComplex rt, " (excluded " ++ w ++ ")"] ++) . next
_ -> ([prettyComplex rt] ++) . next
where
next = case rts of (r:_) -> (["\n"] ++) . tellBad rts bad w
[] -> ([] ++)
tellReduced :: Int -> M.IntMap Float -> Writer (D.DList ShowD) ()
tellReduced deg map = tell $ toShowD ["Reduced form: ", porcelainPolynomialSM dMap "", " = 0\n"]
where dMap = M.filterWithKey (\k _ -> k <= deg) map
tellNatural :: M.IntMap Float -> Writer (D.DList ShowD) ()
tellNatural map = tell $ toShowD ["Natural reduced form: ", prettyPolynomialM map, " = 0\n"]
tellForbidden :: [ComplexF] -> Writer (D.DList ShowD) ()
tellForbidden [] = return ()
tellForbidden frts = tell $ toShowD $ ["Excluded roots/solutions:\n"] ++ (showForbidden frts [])
where
showForbidden [] = ([] ++)
showForbidden (fr:fs) = ([prettyComplex fr, "\n"]++) . showForbidden fs
verboseSolve :: Equation -> Writer (D.DList ShowD) (Equation, Maybe Solution)
verboseSolve eq = do
tellReduced deg mapPol
tellNatural mapPol
tell $ toShowD ["Polynomial degree: ", show deg, "\n"]
tellForbidden badRoots
case degSolvability deg of Clear -> doSolve
Degree -> quit Degree
where
Eq (cl, cr) = canonify eq
(straightP, badRoots) = runState ( handleNegativePowers cl ) []
ceq = Eq (straightP, cr) -- canonified equation with negative powers lifted
mapPol = toMap straightP -- left polynomial as an IntMap
deg = degree mapPol -- left polynomial degree
-- doSolve :: Writer (D.DList ShowD) (Equation, Maybe Solution)
doSolve = do
let sol = solveEquation ceq
in case sol of Quadratic _ -> result sol
Simple _ -> result sol
Broad b -> quit (IsBroad b)
-- quit :: Solvability b -> Writer (D.DList ShowD) (Equation, Maybe Solution)
quit sb = do
unrootableSpeech sb
let ret = case sb of Degree -> Nothing
IsBroad b -> Just (Broad b)
return (ceq, ret)
-- result :: Solution s -> Writer (D.DList ShowD) (Equation, Maybe Solution)
result sol@(Quadratic rts) = do
let delta = discriminantQuadratic mapPol
discriminantSpeech delta
tellSolutions (Just delta) rts badRoots
return ( ceq, Just sol )
result sol@(Simple fl) = do
tell $ toShowD [ "The solution is:\n" ]
tellSolutions Nothing (fl :+ 0, 0 :+ 0) badRoots
return ( ceq, Just sol )
|
(*
Copyright (C) 2017 M.A.L. Marques
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
*)
(* type: gga_exc *)
hcth_a_beta := 0.0042:
hcth_a_gamma := 6:
hcth_a_c0 := 1.09878:
hcth_a_c1 := -2.51173:
hcth_a_c2 := 0.0156233:
hcth_a_aux := x -> 1 + hcth_a_gamma*hcth_a_beta*x*arcsinh(x):
hcth_a_f := x -> hcth_a_c0 + hcth_a_beta/X_FACTOR_C*x^2*(hcth_a_c1/hcth_a_aux(x)
+ hcth_a_c2/(hcth_a_beta*hcth_a_aux(x)^2)):
f := (rs, z, xt, xs0, xs1) -> gga_exchange(hcth_a_f, rs, z, xs0, xs1):
|
struct MutualInformationContainer{H}
hist::H
pxy::Array{Float32,2}
px::Array{Float32,2}
py::Array{Float32,2}
px_py::Array{Float32,2}
nzs::BitArray{2}
function MutualInformationContainer(hist::H) where {H}
pxy = counts(hist) ./ sum(counts(hist))
px = sum(pxy, dims = 2)
py = sum(pxy, dims = 1)
px_py = px * py
nzs = pxy .> 0
new{H}(hist, pxy, px, py, px_py, nzs)
end
end
function _mutual_information!(mi::MutualInformationContainer)
mi.pxy .= counts(mi.hist) ./ sum(counts(mi.hist))
sum!(mi.px, mi.pxy)
sum!(mi.py, mi.pxy)
mi.px_py .= mi.px * mi.py
mi.nzs .= mi.pxy .> 0
pxys = mi.pxy[mi.nzs]
sum(pxys .* log.(pxys ./ mi.px_py[mi.nzs]))
end
"""
mutual_information!(mi::MutualInformationContainer, x, y)
Computes the mutual information between the two variables `x` and `y`. The histogram within `mi` must be of the correct
type to handle the formats of `x` and `y`.
"""
function mutual_information!(mi::MutualInformationContainer, x, y)
zero!(mi.hist)
increment_bins!(mi.hist, x, y)
_mutual_information!(mi)
end
"""
mutual_information!(
mi::MutualInformationContainer,
fixed,
buffer,
full_image,
moving_bbox,
range_x,
range_y,
::Missing;
set_buffer!,
get_buffer_crop,
prefilter_frame_crop! = x -> nothing,
)
Calculates the mutual information of two images at all shifts within the `range_x` and `range_y`. The `fixed` image
must already be filtered. This will set the `buffer` and filter its contents using `prefilter_frame_crop!`.
"""
function mutual_information!(
mi::MutualInformationContainer,
fixed,
buffer,
full_image,
moving_bbox,
range_x,
range_y,
::Missing;
set_buffer!,
get_buffer_crop,
prefilter_frame_crop! = x -> nothing,
)
w, h = size(fixed)
mis = OffsetArray(
Array{Float32}(undef, length(range_x), length(range_y)),
range_x,
range_y,
)
fixed_vec = vec(fixed)
# Crop and prefilter a section of `current_frame` big enough to handle the shift extents.
set_buffer!(buffer, full_image, moving_bbox)
prefilter_frame_crop!(buffer)
@inbounds for shift_x in range_x
@inbounds for shift_y in range_y
moving_vec = vec(get_buffer_crop(buffer, moving_bbox, shift_x, shift_y))
mis[shift_x, shift_y] = mutual_information!(mi, fixed_vec, moving_vec)
end
end
return mis
end
"""
mutual_information!(
mi::MutualInformationContainer,
fixed,
buffer,
::Any,
moving_bbox,
range_x,
range_y,
prev_mis::AbstractArray{Float32,2};
get_buffer_crop,
kwargs...
)
Calculates the mutual information of two images at all shifts within the `range_x` and `range_y`. Warm-starts the
evaluation using previous results (`prev_mis`; the return value from a previous call of this function) and using the
previously set and filtered contents of the `buffer`.
"""
function mutual_information!(
mi::MutualInformationContainer,
fixed,
buffer,
::Any,
moving_bbox,
range_x,
range_y,
prev_mis::AbstractArray{Float32,2};
get_buffer_crop,
kwargs...,
)
w, h = size(fixed)
mis = OffsetArray(
Array{Float32}(undef, length(range_x), length(range_y)),
range_x,
range_y,
)
prev_range_x = axes(prev_mis, 1)
prev_range_y = axes(prev_mis, 2)
fixed_vec = vec(fixed)
# No need to extract and prefilter a crop to fill the buffer here because it is done in `mutual_information!`
# where `prev_mis` is of type `Missing`. We just reuse the prefiltered crop here.
@inbounds for shift_x in range_x
@inbounds for shift_y in range_y
if shift_x ∈ prev_range_x && shift_y ∈ prev_range_y
mis[shift_x, shift_y] = prev_mis[shift_x, shift_y]
else
moving_vec = vec(get_buffer_crop(buffer, moving_bbox, shift_x, shift_y))
mis[shift_x, shift_y] = mutual_information!(mi, fixed_vec, moving_vec)
end
end
end
return mis
end
|
(1) Acknowledge your sin and ask God for forgiveness.
(2) Believe in Jesus Christ.
(3) Confess Jesus Christ as your personal Savior and Lord. |
import game.world7.level9 -- hide
import tactic.tauto
local attribute [instance, priority 10] classical.prop_decidable -- we are mathematicians
/-
# Advanced proposition world.
## Level 10: the law of the excluded middle.
We proved earlier that `(P → Q) → (¬ Q → ¬ P)`. The converse,
that `(¬ Q → ¬ P) → (P → Q)` is certainly true, but trying to prove
it using what we've learnt so far is impossible (because it is not provable in
constructive logic). For example, after
```
intro h,
intro p,
repeat {rw not_iff_imp_false at h},
```
in the below, you are left with
```
P Q : Prop,
h : (Q → false) → P → false
p : P
⊢ Q
```
The tools you have are not sufficient to continue. But you can just
prove this, and any other basic lemmas of this form like `¬ ¬ P → P`,
using the `by_cases` tactic. Instead of starting with all the `intro`s,
try this instead:
`by_cases p : P; by_cases q : Q,`
**Note the semicolon**! It means "do the next tactic to all the goals, not just the top one".
After it, there are four goals, one for each of the four possibilities PQ=TT, TF, FT, FF.
You can see that `p` is a proof of `P` in some of the goals, and a proof of `¬ P` in others.
Similar comments apply to `q`.
`repeat {cc}` then finishes the job.
This approach assumed that `P ∨ ¬ P` was true; the `by_cases` tactic just does `cases` on
this result. This is called the law of the excluded middle, and it cannot be proved just
using tactics such as `intro` and `apply`.
-/
/- Lemma : no-side-bar
If $P$ and $Q$ are true/false statements, then
$$(\lnot Q\implies \lnot P)\implies(P\implies Q).$$
-/
lemma contrapositive2 (P Q : Prop) : (¬ Q → ¬ P) → (P → Q) :=
begin
by_cases p : P; by_cases q : Q,
repeat {cc},
end
/-
OK that's enough logic -- now perhaps it's time to go on to Advanced Addition World!
Get to it via the main menu.
-/
/-
## Pro tip
In fact the tactic `tauto!` just kills this goal (and many other logic goals) immediately.
-/
/- Tactic : by_cases
## Summary
`by_cases h : P` does a cases split on whether `P` is true or false.
## Details
Some logic goals cannot be proved with `intro` and `apply` and `exact`.
The simplest example is the law of the excluded middle `¬ ¬ P → P`.
You can prove this using truth tables but not with `intro`, `apply` etc.
To do a truth table proof, the tactic `by_cases h : P` will turn a goal of
`⊢ ¬ ¬ P → P` into two goals
```
P : Prop,
h : P
⊢ ¬¬P → P
P : Prop,
h : ¬P
⊢ ¬¬P → P
```
Each of these can now be proved using `intro`, `apply`, `exact` and `exfalso`.
Remember though that in these simple logic cases, high-powered logic
tactics like `cc` and `tauto!` will just prove everything.
-/
/- Tactic : tauto
## Summary
The `tauto` tactic (and its variant `tauto!`) will close various logic
goals.
## Details
`tauto` is an all-purpose logic tactic which will try to solve goals using pure
logical reasoning -- for example it will close the following goal:
```
P Q : Prop,
hP : P,
hQ : Q
⊢ P ∧ Q
```
`tauto` is supposed to only use constructive logic, but its big brother `tauto!` uses classical logic
and hence closes more goals.
-/
|
# Moran Processes
The evolutionary models discussed in the previous chapters assume an infinite population that can be divided in to infinitessimal parts. Finite populations can also be studied using a model called a Moran Process (first described in 1958).
---
## Moran process with neutral drift
[Video](https://youtu.be/OeMku85hwEc?list=PLnC5h3PY-znxMsG0TRYGOyrnEO-QhVwLb)
Consider a population of two types of fixed size $N$. This can be represented as a vector of the form: $(i, N-i)$ where $i\geq 0$ represents the number of individuals of the first type.
The term **neutral** drift refers to the fact that the two types reproduce at the same rate.
The Moran process is as follows:
- At a given time step: select a random individual for reproduction and a random individual for elimination
- The eliminated individual is replaced by a new individual of the same type as the individual chosen for reproduction.
- Proceed to the next time step.
- The process terminates when there is only one type of individual in the population.
---
Here is some simple Python code that simulates such a Process assuming an initial population of $(3, 3)$:
```python
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
def neutral_moran(N, i=1, seed=0):
"""
Return the population counts for the Moran process with neutral drift.
"""
population = [0 for _ in range(i)] + [1 for _ in range(N - i)]
counts = [(population.count(0), population.count(1))]
np.random.seed(seed)
while len(set(population)) == 2:
reproduce_index = np.random.randint(N)
eliminate_index = np.random.randint(N)
population[eliminate_index] = population[reproduce_index]
counts.append((population.count(0), population.count(1)))
return counts
N = 6
plt.plot(neutral_moran(N=N, i=3, seed=6));
```
For different seeds we see we obtain different results. What becomes of interest is not the path but the end result: which strategy overcomes the presence of the other?
```python
def neutral_fixation(N, i=None, repetitions=10):
"""
Repeat the neutral Moran process and calculate the fixation probability
"""
fixation_count = 0
for seed in range(repetitions):
final_counts = neutral_moran(N=N, i=i, seed=seed)
if final_counts[-1][0] > 0:
fixation_count += 1
return fixation_count / repetitions
```
Let us take a look at probability of the first strategy taking over for different starting populations:
```python
probabilities = [neutral_fixation(N, i=i, repetitions=500) for i in range(1, N)]
plt.scatter(range(1, N), probabilities)
plt.xlabel("$i$")
plt.ylabel("$x_i$");
```
We see that as the initial population starts with more of a given type, the chance that that type "takes over" (becomes fixed) grows.
This Moran Process is a specific case of a Markov Process:
- A given state of the system can be described by a single integer $0\leq i\leq N$;
- The state to state transition probabilities are given by:
$$
\begin{aligned}
p_{i, i-1}&=\frac{i(N - i)}{N^2}\\
p_{i, i+1}&=\frac{i(N - i)}{N^2}\\
p_{i, i}&=1 - p_{i, i-1} - p_{i, i+1}
\end{aligned}
$$
We also have two absorbing states (when the Moran process ends):
$$p_{00}=1\qquad p_{0i}=0\text{ for all }i>0$$
$$
p_{NN}=1\qquad p_{Ni}=0\text{ for all } N>i
$$
these transitions can be represented as a matrix. Here for example is the matrix for $N=6$:
```python
N = 6
p = np.zeros((N + 1, N + 1))
p[0, 0] = 1
p[N, N] = 1
for i in range(1, N):
for j in [i - 1, i + 1]:
p[i, j] = i * (N - i) / (N ** 2)
p[i, i] = 1 - sum(p[i, :])
p.round(2)
```
array([[1. , 0. , 0. , 0. , 0. , 0. , 0. ],
[0.14, 0.72, 0.14, 0. , 0. , 0. , 0. ],
[0. , 0.22, 0.56, 0.22, 0. , 0. , 0. ],
[0. , 0. , 0.25, 0.5 , 0.25, 0. , 0. ],
[0. , 0. , 0. , 0.22, 0.56, 0.22, 0. ],
[0. , 0. , 0. , 0. , 0.14, 0.72, 0.14],
[0. , 0. , 0. , 0. , 0. , 0. , 1. ]])
The above corresponds to a particular type of Markov process called a Birth-Death process
---
## Birth death process
[Video](https://youtu.be/zJQQF2tq9AA?list=PLnC5h3PY-znxMsG0TRYGOyrnEO-QhVwLb)
A birth death process is a Markov process with the following properties:
- $p_{i,i+1}+p_{i,i-1}\leq 1$
- $p_{ii}=1-p_{i,i+1}-p_{i,i-1}$
- $p_{00}=1$ and $p_{NN}=1$
---
Thus we have two absorbing states: $\{0, N\}$. Let us denote by $x_i$ the probability of being in $state$ $i$ and eventually reaching state $N$.
We have the following linear system:
\begin{align}
x_0&=0\\
x_i&=p_{i,i-1}x_{i-1}+p_{ii}x_i+p_{i,i+1}x_{i+1}\text{ for all }0< i< N-1\\
x_N&=1\\
\end{align}
---
## Theorem: Fixation probabilities for the birth death process
Given a birth death process as defined above, the fixation probability $x_i$ is given by:
$$x_i=\frac{1+\sum_{j=1}^{i-1}\prod_{k=1}^j\gamma_k}{1+\sum_{j=1}^{N-1}\prod_{k=1}^j\gamma_k}$$
where:
$$
\gamma_k = \frac{p_{k,k-1}}{p_{k,k+1}}
$$
### Proof
We have:
$$
\begin{aligned}
p_{i,i+1}x_{i+1} & = -p_{i,i-1}x_{i-1} + x_i(1 - p_{ii}) \\
p_{i,i+1}x_{i+1} & = p_{i,i-1}(x_{i} - x_{i-1}) + x_ip_{i,i+1} \\
x_{i+1} - x_i & = \frac{p_{i, i-1}}{p_{i, i+1}}(x_i-x_{i-1})=\gamma_i(x_i-x_{i-1})
\end{aligned}
$$
We observe that:
$$
\begin{aligned}
x_2 - x_1 &= \gamma_1(x_1-x_{0})=\gamma_1x_1\\
x_3 - x_2 &= \gamma_2(x_2-x_1)=\gamma_2\gamma_1x_1\\
x_4 - x_3 &= \gamma_3(x_3-x_2)=\gamma_3\gamma_2\gamma_1x_1\\
&\; \vdots & \\
x_{i+1} - x_i &= \gamma_i(x_i-x_{i-1})=\prod_{k=1}^i\gamma_kx_1\\
&\; \vdots & \\
x_{N} - x_{N-1} &= \gamma_{N-1}(x_{N-1}-x_{N-2})=\prod_{k=1}^{N-1}\gamma_kx_1\\
\end{aligned}
$$
thus we have:
$$x_i=\sum_{j=0}^{i-1}x_{j+1}-x_j=\left(1+\sum_{j=1}^{i-1}\prod_{k=1}^j\gamma_k\right)x_1$$
we complete the proof by solving the following equation to obtain $x_1$:
$$x_N=1=\left(1+\sum_{j=1}^{N-1}\prod_{k=1}^j\gamma_k\right)x_1$$
---
In the case of neutral drift (considered above) we have:
$$p_{i,i-1}=p_{i,i+1}$$
thus:
$$
\gamma_i=1
$$
so:
$$
x_i=\frac{1+\sum_{j=1}^{i-1}\prod_{k=1}^j\gamma_k}{1+\sum_{j=1}^{N-1}\prod_{k=1}^j\gamma_k}=\frac{1+i-1}{1+N-1}=\frac{i}{N}
$$
```python
probabilities = [neutral_fixation(N, i=i, repetitions=500) for i in range(1, N)]
plt.scatter(range(1, N), probabilities, label="Simulated")
plt.plot(range(1, N), [i / N for i in range(1, N)], label="Theoretic: $i/N$", linestyle="dashed")
plt.xlabel("$i$")
plt.ylabel("$x_i$")
plt.legend();
```
---
## Fixation probability
The fixation probability in a Moran process is the probability that a give type starting with $i=1$ individuals takes over an entire population. We denote the fixation probabilities of the first/second type as $\rho_1$ and $\rho_2$ respectively and we have:
$$
\rho_1=x_1
$$
$$
\rho_2=1-x_{N-1}
$$
---
We will now consider a Moran process on a game:
---
## Moran process on a game
[Video](https://www.youtube.com/watch?v=TpqVoF1fBF8&index=43&list=PLnC5h3PY-znxMsG0TRYGOyrnEO-QhVwLb)
Consider a matrix $A\in\mathbb{R}^{m\times n}$ representing a game with two strategies.
$$
A=
\begin{pmatrix}
a & b\\
c & d
\end{pmatrix}
$$
The Moran process is as follows:
- At a given time step: all individuals play all other individuals.
- Obtain their fitness as given by the game.
- Randomly select an individual proportional to their fitness as an individual to be reproduced
- Uniformly select an individual to be replaced
- Proceed to the next time step.
- The process terminates when there is only one type of individual in the population.
Assuming $i$ individuals of the first type, the fitness of both types is given respectively by:
$$f_{1i}=\frac{a(i-1)+b(N-i)}{N-1}$$
$$f_{2i}=\frac{c(i)+d(N-i-1)}{N-1}$$
The transition probabilities are then given by:
$$p_{i,i+1}=\frac{if_{1i}}{if_{1i} + (N-i)f_{2i}}\frac{N-i}{N}$$
$$p_{i,i-1}=\frac{(N-i)f_{2i}}{if_{1i} + (N-i)f_{2i}}\frac{i}{N}$$
which gives:
$$\gamma_i=\frac{f_{2i}}{f_{1i}}$$
thus:
$$
x_i=\frac{1+\sum_{j=1}^{i-1}\prod_{k=1}^j\gamma_k}{1+\sum_{j=1}^{N-1}\prod_{k=1}^j\gamma_k}
$$
---
Here is some code to carry out this calculation:
```python
def theoretic_fixation(N, game, i=1):
"""
Calculate x_i as given by the above formula
"""
f_ones = np.array([(game[0, 0] * (i - 1) + game[0, 1] * (N - i)) / (N - 1) for i in range(1, N)])
f_twos = np.array([(game[1, 0] * i + game[1, 1] * (N - i - 1)) / (N - 1) for i in range(1, N)])
gammas = f_twos / f_ones
return (1 + np.sum(np.cumprod(gammas[:i-1]))) / (1 + np.sum(np.cumprod(gammas)))
```
Here is an example of calculating $x_1$ for the following game for $N=4$:
[Video](https://www.youtube.com/watch?v=3sBVrnQhemE&index=44&list=PLnC5h3PY-znxMsG0TRYGOyrnEO-QhVwLb)
$$
A =
\begin{pmatrix}
4 & 1\\
1 & 4
\end{pmatrix}
$$
```python
A = np.array([[4, 1],
[1, 4]])
theoretic_fixation(N=4, i=1, game=A)
```
0.125
Applying the theorem gives:
$$
\begin{aligned}
f_{1i}&=\frac{4(i - 1) + 4 - i}{3} = \frac{4i-4+4-i}{3}=i\\
f_{2i}&=\frac{i + 4(3 - i)}{3} = \frac{12-3i}{3}=4-i
\end{aligned}
$$
$$
\gamma_i = \frac{f_{2i}}{f_{1i}}=\frac{4-i}{i}=\frac{4}{i}-1
$$
Thus:
$$
\begin{aligned}
x_1 & =\frac{1 + \sum_{j=1}^{0}\prod_{k=1}^{j}\gamma_k}{1 + \sum_{j=1}^{4 - 1}\prod_{k=1}^{j}\gamma_k}\\
& =\frac{1}{1 + \sum_{j=1}^{3}\prod_{k=1}^{j}\gamma_k}\\
& =\frac{1}{1 + \gamma_1 + \gamma_1\times \gamma_2 + \gamma_1 \times \gamma_2 \times \gamma_3}\\
& =\frac{1}{1+3+3\times 1 + 3 \times 1\times \frac{1}{3}} = \frac{1}{1 + 3 + 3 + 1}=\frac{1}{8}\\
\end{aligned}
$$
Nashpy has the ability to run a single Moran process:
```python
import nashpy as nash
game = nash.Game(A)
initial_population = np.array((0, 1, 1, 1))
np.random.seed(0)
generations = game.moran_process(
initial_population=initial_population
)
for population in generations:
print(population)
```
[0 1 1 1]
[0 1 1 1]
[0 1 1 1]
[0 1 1 1]
[0 1 1 1]
[0 1 1 1]
[0 1 1 1]
[0 1 1 1]
[0 1 1 1]
[1 1 1 1]
We see there that in a population of 4 individuals, a single individual of the first type (`0`) does not become fixed. That is just for a single run, to be able to approximate the fixation probability the process needs to be repeated, this can be done in Nashpy:
```python
import collections
def approximate_fixation(N, A, i=None, repetitions=10):
"""
Repeat the Moran process and calculate the fixation probability
This is done by carrying out the following steps:
1. Creating a game
2. Building an initial population with i individuals
of the first type
3. Getting the fixation probabilities of both types
4. Returning the probability of the first type
"""
game = nash.Game(A)
initial_population = i * [0] + (N - i) * [1]
probabilities = game.fixation_probabilities(
initial_population=initial_population,
repetitions=repetitions
)
return probabilities[0]
```
Here is how the fixation probabilities vary for different initial populations:
```python
probabilities = [approximate_fixation(N, i=i, A=A, repetitions=500) for i in range(1, N)]
plt.scatter(range(1, N), probabilities, label="Simulated")
plt.plot(range(1, N), [i / N for i in range(1, N)], label="Neutral: $i/N$", linestyle="dashed")
plt.plot(range(1, N), [theoretic_fixation(N=N, i=i, game=A) for i in range(1, N)], label="Theoretic")
plt.xlabel("$i$")
plt.ylabel("$x_i$")
plt.legend();
```
|
State Before: X : Type u_1
Y✝ : Type ?u.306794
inst✝³ : TopologicalSpace X
inst✝² : TopologicalSpace Y✝
x y z : X
ι : Type ?u.306809
γ✝ γ : Path x y
Y : Type u_2
inst✝¹ : TopologicalSpace Y
Z : Type u_3
inst✝ : TopologicalSpace Z
f : X → Y
hf : Continuous f
g : Y → Z
hg : Continuous g
⊢ map (map γ hf) hg = map γ (_ : Continuous (g ∘ f)) State After: case a.h
X : Type u_1
Y✝ : Type ?u.306794
inst✝³ : TopologicalSpace X
inst✝² : TopologicalSpace Y✝
x y z : X
ι : Type ?u.306809
γ✝ γ : Path x y
Y : Type u_2
inst✝¹ : TopologicalSpace Y
Z : Type u_3
inst✝ : TopologicalSpace Z
f : X → Y
hf : Continuous f
g : Y → Z
hg : Continuous g
x✝ : ↑I
⊢ ↑(map (map γ hf) hg) x✝ = ↑(map γ (_ : Continuous (g ∘ f))) x✝ Tactic: ext State Before: case a.h
X : Type u_1
Y✝ : Type ?u.306794
inst✝³ : TopologicalSpace X
inst✝² : TopologicalSpace Y✝
x y z : X
ι : Type ?u.306809
γ✝ γ : Path x y
Y : Type u_2
inst✝¹ : TopologicalSpace Y
Z : Type u_3
inst✝ : TopologicalSpace Z
f : X → Y
hf : Continuous f
g : Y → Z
hg : Continuous g
x✝ : ↑I
⊢ ↑(map (map γ hf) hg) x✝ = ↑(map γ (_ : Continuous (g ∘ f))) x✝ State After: no goals Tactic: rfl |
{-# OPTIONS --cubical --safe #-}
open import Prelude
open import Algebra
module Data.Maybe.Monoid {ℓ} (sgr : Semigroup ℓ) where
open import Data.Maybe
open Semigroup sgr
_«∙»_ : Maybe 𝑆 → Maybe 𝑆 → Maybe 𝑆
nothing «∙» y = y
just x «∙» nothing = just x
just x «∙» just y = just (x ∙ y)
maybeMonoid : Monoid ℓ
maybeMonoid .Monoid.𝑆 = Maybe 𝑆
maybeMonoid .Monoid._∙_ = _«∙»_
maybeMonoid .Monoid.ε = nothing
maybeMonoid .Monoid.assoc nothing y z = refl
maybeMonoid .Monoid.assoc (just x) nothing z = refl
maybeMonoid .Monoid.assoc (just x) (just x₁) nothing = refl
maybeMonoid .Monoid.assoc (just x) (just y) (just z) = cong just (assoc x y z)
maybeMonoid .Monoid.ε∙ _ = refl
maybeMonoid .Monoid.∙ε nothing = refl
maybeMonoid .Monoid.∙ε (just x) = refl
|
/-
The goal of this file is to prove that a linear operator
T : ℂ^n → ℂ^n is self-adjoint if and only if ⟪T v, v⟫_ℂ is
real for all v ∈ ℂ^n.
-- Checked over by:
-- Hans
-/
import .lemmas.ladr_7_lem
variable {n : ℕ}
localized "postfix `†`:1000 := linear_map.adjoint" in src
open_locale big_operators complex_conjugate matrix
variables (T : Lℂ^n) (u w : ℂ^n)
local notation `is_sa` := inner_product_space.is_self_adjoint
lemma is_self_adjoint_iff_eq_adjoint : is_sa T ↔ T = T† :=
by rw [linear_map.eq_adjoint_iff, inner_product_space.is_self_adjoint]
/-
Linearity lemmas
-/
lemma inner_map_add_add : ⟪ T (u + w), u + w ⟫_ℂ = ⟪T u, u⟫_ℂ + ⟪T w, u⟫_ℂ + ⟪T u, w⟫_ℂ + ⟪T w, w⟫_ℂ :=
by {rw [map_add, inner_add_left, inner_add_right, inner_add_right], ring}
lemma inner_map_sub_sub : ⟪ T (u - w), u - w ⟫_ℂ = ⟪T u, u⟫_ℂ - ⟪T w, u⟫_ℂ - ⟪T u, w⟫_ℂ + ⟪T w, w⟫_ℂ :=
by {rw [map_sub,inner_sub_left, inner_sub_right, inner_sub_right], ring}
/-
Polarization identity
-/
lemma inner_map_polarization :
4 * ⟪ T w, u ⟫_ℂ = ⟪T (u + w) , u + w⟫_ℂ - ⟪T (u - w) , u - w⟫_ℂ + I * ⟪T (u + I • w) , u + I • w⟫_ℂ - I * ⟪T (u - I • w), u - I • w ⟫_ℂ :=
begin
symmetry,
iterate {rw inner_map_add_add},
iterate {rw inner_map_sub_sub},
rw [linear_map.map_smul, inner_smul_left, inner_smul_right],
ring_nf,
rw complex.conj_I,
ring_nf,
rw complex.I_sq,
ring_nf,
end
/-
If ⟪T u, u⟫_ℂ = 0 for all u ∈ ℂ^n, then ⟪T u, w⟫_ℂ = 0 for all u, w ∈ ℂ^n.
-/
lemma inner_map_eq_zero (h : ∀ (u : ℂ^n), ⟪T u, u⟫_ℂ = 0) :
∀ (u w : ℂ^n), ⟪T u, w ⟫_ℂ = 0 :=
begin
have : ∀ u w : ℂ^n, 4 * ⟪T u, w ⟫_ℂ = 0 :=
begin
intros u w,
rw inner_map_polarization,
iterate {rw h},
ring,
end,
intros u w,
specialize this u w,
simp only [false_or, bit0_eq_zero, one_ne_zero, mul_eq_zero] at this,
exact this,
end
/-
If ⟪T u, u⟫_ℂ = 0 for all u ∈ ℂ^n, then ⟪T u, w⟫_ℂ = 0 for all u, w ∈ ℂ^n.
-/
lemma inner_map_self_eq_zero (h : ∀ u : ℂ^n, ⟪T u, u⟫_ℂ = 0) :
T = 0 :=
begin
have : ∀ u w : ℂ^n, ⟪T u, w ⟫_ℂ = 0 := inner_map_eq_zero T h,
apply linear_map.ext,
intro x,
specialize this x (T x),
rw inner_self_eq_zero at this,
rw linear_map.zero_apply,
exact this,
end
/-
If T is self-adjoint, then ⟪T v, v⟫_ℂ is real for all v ∈ ℂ
-/
lemma self_adjoint_real_inner (h : is_sa T) :
∀ v : ℂ^n, conj ⟪T v, v⟫_ℂ = ⟪T v, v⟫_ℂ :=
begin
intro v,
apply inner_product_space.is_self_adjoint.conj_inner_sym,
exact h,
end
/-
If ⟪T v, v⟫_ℂ is real for all v ∈ ℂ^n, then T is self-adjoint.
-/
lemma real_inner_self_adjoint (h : ∀ v : ℂ^n, conj ⟪T v, v⟫_ℂ = ⟪T v, v⟫_ℂ) :
is_sa T :=
begin
rw is_self_adjoint_iff_eq_adjoint,
rw ← sub_eq_zero,
apply inner_map_self_eq_zero,
intro v,
specialize h v,
rw linear_map.sub_apply,
rw inner_sub_left,
rw linear_map.adjoint_inner_left,
rw ← h,
rw inner_conj_sym,
rw sub_self,
end
/-
T is self-adjoint if and only if ⟪T v, v⟫_ℂ
-/
lemma is_self_adjoint_iff_real_inner_map : (is_sa T) ↔ ∀ v : ℂ^n, conj ⟪T v, v⟫_ℂ = ⟪T v, v⟫_ℂ :=
begin
split,
intro h,
apply self_adjoint_real_inner T h,
intro h,
apply real_inner_self_adjoint T h,
end |
import tactic
-- example (n : nat) : n < n + 1 :=
-- begin
-- suggest,
-- sorry
-- end
example (n : nat) : n < n + 1 :=
by exact lt_add_one n
-- Al colocar el cursor sobre suggest muestra las siguientes sugerencias
-- Try this: exact lt_add_one n
-- Try this: exact nat.lt.base n
-- Try this: exact nat.lt_succ_self n
-- Try this: refine not_le.mp _
-- Try this: refine gt_iff_lt.mp _
-- Try this: refine nat.lt.step _
-- Try this: refine set.mem_Ioi.mp _
-- Try this: refine set.mem_Iio.mp _
-- Try this: refine lt_of_not_ge _
-- Try this: refine bit1_lt_bit1.mp _
-- Try this: refine bit0_lt_bit0.mp _
-- Try this: refine lt_of_not_ge' _
-- Try this: refine (lt_iff_not_ge n (n + 1)).mpr _
-- Try this: refine list.mem_range.mp _
-- Try this: refine int.coe_nat_lt.mp _
-- Try this: refine lt_iff_not_ge'.mpr _
-- Try this: refine n.lt_add_left 1 n _
-- Try this: refine nat.lt_succ_iff.mpr _
-- Try this: refine enat.coe_lt_coe.mp _
-- Try this: refine nat.succ_le_iff.mp _
-- Try this: refine lt_iff_le_not_le.mpr _
-- Try this: refine set.nonempty_Ico.mp _
-- Try this: refine set.nonempty_Ioc.mp _
-- Try this: refine set.left_mem_Ico.mp _
-- Try this: refine lt_iff_le_and_ne.mpr _
-- Try this: refine finset.mem_range.mp _
-- Try this: refine n.lt_add_right n 1 _
-- Try this: refine (n.psub_eq_none (n + 1)).mp _
-- Try this: refine (nat.fact_lt _).mp _
-- Try this: refine lt_of_le_of_ne _ _
-- Try this: refine lt_of_le_not_le _ _
-- Try this: refine buffer.lt_aux_1 _
-- Try this: refine lt_of_le_of_ne' _ _
-- Try this: refine gt.trans _ _
-- Try this: refine lt.trans _ _
-- Try this: refine lt_trans _ _
-- Try this: refine gt_trans _ _
-- Try this: refine nat.lt_trans _ _
-- Try this: refine (lt_is_glb_iff _).mpr _
-- Try this: refine (is_glb_lt_iff _).mpr _
-- Try this: refine (is_lub_lt_iff _).mpr _
-- Try this: refine (lt_is_lub_iff _).mpr _
-- Try this: refine (pnat.mk_lt_mk n (n + 1) _ _).mp _
-- Try this: refine gt_of_ge_of_gt _ _
-- Try this: refine gt_of_gt_of_ge _ _
-- Try this: refine lt_of_lt_of_le _ _
-- Try this: refine lt_of_le_of_lt _ _
-- Try this: refine (mul_lt_mul_left _).mp _
-- Try this: refine forall_lt_iff_le.mpr _ _
-- Try this: refine (mul_lt_mul_right _).mp _
-- Referencia:
-- Ver https://bit.ly/2Vkvrsu
|
\newsection{Arrays}
\subsection{Motivation}
Arrays are a very powerful tool in a programmer's arsenal. They allow managing many variables at once without much hassle.
\subsection{Arrays}
The typical structure for creating an array is:
\begin{center}
$<type>[] <name> = new <type>[<integer>];$
\end{center}
where $<integer>$ is greater than 0. For example, if we want to create an array of \verb|int| variables with 10 elements, we can construct it like this:
\begin{lstlisting}
int[] array = new int[10]; // 10 elements
\end{lstlisting}
Now how do we access or modify elements of an array? Here are some code examples on how to do so:
\begin{lstlisting}
int[] array = new int[100]; // 100 elements
// By default, each element is 0 (primitives) or null (objects)
// indexes are same as in String, etc. 0 - 99 indexes in this array
array[0] = 10; // set first element to 10
array[1] = 9; // set second element to 9
array[99] = 255; // set last element to 255
// array[100] = 100; // ArrayIndexOutOfBoundsException, because index is larger than the highest addressable one
// array[-1] = -1; // Also an ArrayIndexOutOfBoundsException
// Cannot assign/get different types or non-integer indexes:
// String s = array[10]; // compiler error
// array[10] = new String(); // compiler error
// System.out.println(array[2.5]); // compiler error
// However there is a ".length" attribute we can use (no parentheses):
System.out.println(array.length); // 100
// Therefore, we can set to the last element without a hardcoded number:
array[array.length-1] = 100;
// Getting an element is the same:
int n = array[56]; // get index 56 element
// We can loop over elements:
for(int i = 0; i < array.length; i++) {
System.out.print(array[i] + " ");
}
// Or equivalently, the range-based for loop:
for(int i : array) { // "for each i in array"
System.out.print(i + " ");
}
\end{lstlisting}
\subsection{Written Exercises}
\begin{enumerate}
\item What are the indices for the first and last positions of any array?
\item Immediately after instantiating a new array of primitives (\verb|int|s, \verb|double|s, etc.), what fills the array? What about an array of objects?
\item What happens when you try to access an array element past the end of the array?
\item Use the following array \verb|x| to answer the following questions:
\begin{table}[h]
\begin{tabular}{lllllll}
4 & 8 & 5 & 1 & 6 & 3 & 2
\end{tabular}
\end{table}
\begin{enumerate}
\item[a)]What value is given by x[1]?
\item[b)]What value is given by x[6]?
\item[c)]What value is given by x[7]?
\item[d)]What value is given by x.length?
\end{enumerate}
\end{enumerate}
\subsection{Programming Exercises}
\begin{enumerate}
\item Instantiate three arrays called \verb|x|, \verb|y|, and \verb|z| of type \verb|int|, \verb|String|, and \verb|BankAccount| (respectively), all of size 10.
\item Write a \verb|for|-loop to sum all of the elements of an array \verb|x| of type \verb|int|.
\item Write a \verb|for|-loop to multiply by 2 each element in an array x of type \verb|int|.
\item Write code to store the largest number in an \verb|int| array \verb|x| into a variable called \verb|max|.
\item Write code to count how many numbers in an array \verb|x| are strictly larger than 4, and store that total in a variable called \verb|total|.
\item Write code to print out every other element in an array \verb|x| separated by tabs.
\item Write code to shift each number in an array \verb|x| one place to the right (Note: there will be 2 copies of the 1st element when the code finishes).
\item Write code to print the contents of an array \verb|x| in reverse order, one element for each line.
\item Write a method called \verb|append| that appends the two \verb|int| arrays passed as arguments and returns an array of type \verb|int| as the result. For example, if the first array argument was \{1, 2, 3\}, and the second was \{4, 5, 6, 7\}, append returns \{1, 2, 3, 4, 5, 6, 7\}.
\item Write a method called \verb|findMin| that returns the smallest element in an \verb|int| array that is passed as an argument. For example, if the array was \{4, 7, 9, 12, 8, 1, 5\}, the method would return 1 (Hint: use an earlier exercise for this).
\end{enumerate} |
Description: An unofficial shirt from Australia featuring J.C., weapons, the flags of USA and Australia in front of a horizon of burning buildings under a crossbuster in the sky. The design was used as a concert poster at the Festival Hall, Melbourne, Australia show Nov. 9th 2007. Artist: Daymon Greulich. "Bad Religion" is printed on the back in the same font as on the front. |
// Copyright 2018 Jeremy Mason
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
//! \file sb_desc.c
//! Contains functions that actually calculate the spherical Bessel descriptors.
#include <cblas.h> // dscal
#include <math.h> // pow
#include <stdint.h> // uint32_t
#include <stdlib.h> // abort
#include "sbessel.h" // _sbessel
#include "sb_desc.h"
#include "sb_matrix.h" // sb_mat_malloc
#include "sb_structs.h" // sb_vec
#include "sb_utility.h" // SB_CHK_ERR
#include "sb_vector.h" // sb_vec_calloc
#include "safety.h"
// Lookup tables used in get_radial_basis. Built using function in `tables.c`.
static const double _u_data[10152] = {
#include "unl.tbl"
};
static const size_t _u_n_max = 140;
static const double _c1_data[10011] = {
#include "c1.tbl"
};
static const size_t _c1_n_max = 140;
static const double _c2_data[10011] = {
#include "c2.tbl"
};
static const size_t _c2_n_max = 140;
/// Calculates the radial basis functions for the spherical Bessel descriptors.
/// Numerical efficiency depends heavily on the lookup tables defined above,
/// allowing the function to be reduced to evaluating the recursion relations.
///
/// # Parameters
/// - `gnl`: pointer to a matrix to hold the result
/// - `r_data`: radial coordinates of the atoms
/// - `n_max`: defines the number of descriptors calculated
/// - `l`: order of the spherical Bessel functions
/// - `n_atom`: number of atoms in the environment
/// - `rc`: cutoff radius for the environment
///
/// # Returns
/// A copy of `gnl`
static sb_mat * get_radial_basis(
sb_mat * gnl,
double * r_data,
uint32_t l,
uint32_t n_atom,
double rc) {
// access lookup tables directly
const double * u_data = _u_data + l * (2 * _u_n_max - l + 5) / 2;
const double * c1_data = _c1_data + l * (2 * _c1_n_max - l + 3) / 2;
const double * c2_data = _c2_data + l * (2 * _c2_n_max - l + 3) / 2;
// gnl->n_cols
const size_t n_cols = gnl->n_cols;
// forward declaration of variables without initializations
size_t n, a;
double u0, u1, u2, d0, d1, e;
double * g_data;
// fnl built in gnl
g_data = gnl->data;
for (n = 0; n < n_cols; ++n) {
for (a = 0; a < n_atom; ++a) {
g_data[a] = c1_data[n] * _sbessel(l, r_data[a] * u_data[n])
- c2_data[n] * _sbessel(l, r_data[a] * u_data[n + 1]);
}
g_data += n_atom;
}
sb_mat_smul(gnl, pow(rc, -1.5));
// initialize quantities used for recursion
u1 = SB_SQR(u_data[0]);
u2 = SB_SQR(u_data[1]);
d1 = 1.;
// convert to gnl
g_data = gnl->data;
for (n = 1; n < n_cols; ++n) {
u0 = u1;
u1 = u2;
u2 = SB_SQR(u_data[n + 1]);
e = (u0 * u2) / ((u0 + u1) * (u1 + u2));
d0 = d1;
d1 = 1. - e / d0;
g_data += n_atom;
cblas_dscal(n_atom, 1. / sqrt(d1), g_data, 1);
cblas_daxpy(n_atom, sqrt(e / (d1 * d0)), g_data - n_atom, 1, g_data, 1);
}
return gnl;
}
/// Calculates spherical Bessel descriptors for the given atomic environment.
/// `desc` should contain space for the descriptors, labelled by (n, l) and
/// ordered lexicographically. `disp` should contain the relative Cartesian
/// coordinates of the surrouding atoms in the format `[x_1, y_1, z_1, ...]`.
/// `weights` should contain the weights used in the construction of the
/// neighbor density function (e.g., `[1., ...]`). `restrict` is not used to
/// help with portability.
///
/// # Parameters
/// - `desc`: pointer to an array to hold the result
/// - `disp`: pointer to an array of the relative displacements
/// - `weights`: pointer to an array of the atomic weights
/// - `rc`: cutoff radius for the environment
/// - `n_atom`: number of atoms in the environment
/// - `n_max`: defines the number of descriptors calculated
///
/// # Returns
/// A copy of `desc`
///
/// # Performance
/// The following preprocessor definitions (usually in `safety.h`) enable
/// various safety checks:
/// - `SAFE_MEMORY`: `desc`, `disp`, and `weights` are not `NULL`
/// - `SAFE_FINITE`: `rc` is nonnegative
///
/// # Warning
/// You are reponsible for ensuring that enough memory is allocated for the
/// relevant arrays, and should expect undefined behavior otherwise. The
/// lengths should be:
/// - `desc`: `(n_max + 1) * (n_max + 2) / 2`
/// - `disp`: at least `3 * n_atom`
/// - `weights`: at least `n_atom`
///
/// # Examples
/// ```
/// #include <stddef.h> // size_t
/// #include <stdio.h> // printf
/// #include <stdint.h> // uint32_t
/// #include "sb_desc.h" // sb_descriptors
///
/// /// An example where `sb_descriptors()` is used to calculate the spherical
/// /// Bessel descriptors for an atomic environment containing four atoms; the
/// /// first and second descriptors should be `0.031870` and `0.138078`.
/// int main(void) {
/// // Sets the number of descriptors returned.
/// uint32_t n_max = 4;
///
/// // Allocate memory for the result.
/// double desc[15] = { 0. };
///
/// // Number of atoms in the environment.
/// uint32_t n_atom = 4;
///
/// // Displacements to the surrounding atoms in Angstroms.
/// // [x_1, y_1, z_1, ...]
/// double disp[12] = {
/// 1.3681827, -1.3103517, -1.3131874,
/// -1.5151760, 1.3360077, -1.3477119,
/// -1.3989598, -1.2973683, 1.3679189,
/// 1.2279369, 1.3400378, 1.4797429
/// };
///
/// // Weights for the surrounding atoms.
/// double weights[4] = { 1., 1., 1., 1. };
///
/// // Cutoff radius in Angstroms.
/// double rc = 3.7711;
///
/// sb_descriptors(desc, disp, weights, rc, n_atom, n_max);
///
/// // Output the result
/// // Labelled by (n, l), ordered lexicographically
/// printf("SB descriptors:\n");
/// for (size_t a = 0; a < (n_max + 1) * (n_max + 2) / 2; ++a)
/// printf("%.6f\n", desc[a]);
///
/// printf("Completed successfully!\n");
/// }
/// ```
double * sb_descriptors(
double * desc_arr,
double * disp_arr,
const double * weights_arr,
const double rc,
const uint32_t n_atom,
const uint32_t n_max) {
#ifdef SAFE_MEMORY
SB_CHK_ERR(!desc_arr, abort(), "sb_descriptors: desc cannot be NULL");
SB_CHK_ERR(!disp_arr, abort(), "sb_descriptors: disp cannot be NULL");
SB_CHK_ERR(!weights_arr, abort(), "sb_descriptors: weights cannot be NULL");
#endif
#ifdef SAFE_FINITE
SB_CHK_ERR(rc < 0., abort(), "sb_descriptors: rc cannot be negative");
#endif
// Check that n_max is within limit defined by tables
SB_CHK_ERR(n_max > _u_n_max || n_max > _c1_n_max || n_max > _c2_n_max,
abort(), "sb_descriptors: n_max above limit defined by lookup tables");
// Convert raw pointers to sb_vec and sb_mat
sb_mat * disp = malloc(sizeof(sb_mat));
SB_CHK_ERR(!disp, abort(), "sb_descriptors: failed to allocate disp");
disp->n_rows = 3;
disp->n_cols = n_atom;
disp->n_elem = 3 * n_atom;
disp->data = disp_arr;
double * data1, * data2, * data3;
size_t a, b;
// Calculate radial coordinates
sb_vec * radius = sb_vec_calloc(n_atom, 'r');
data1 = disp->data;
data2 = radius->data;
for (a = 0; a < n_atom; ++a) {
for (b = 0; b < 3; ++b) {
*data2 += SB_SQR(data1[b]);
}
data1 += 3;
data2 += 1;
}
sb_vec_sqrt(radius);
// Normalize displacement vectors
sb_mat_vdiv(disp, radius, 'c');
sb_vec_smul(radius, 1. / rc);
// Calculate angle cosines
sb_mat * gamma = sb_mat_malloc(n_atom, n_atom);
sb_mat_mm_mul(gamma, disp, disp, "tn");
// Legendre polynomials
sb_mat * * lp = malloc((n_max + 1) * sizeof(sb_mat *));
SB_CHK_ERR(!lp, abort(), "sb_descriptors: failed to allocate lp");
for (a = 0; a <= n_max; ++a) {
lp[a] = sb_mat_malloc(n_atom, n_atom);
}
sb_mat_set_all(lp[0], 1.);
sb_mat_memcpy(lp[1], gamma);
for (a = 2; a <= n_max; ++a) { // l = a
sb_mat_memcpy(lp[a], gamma);
sb_mat_pmul(lp[a], lp[a - 1]);
sb_mat_smul(lp[a], (2. * a - 1.) / (a - 1.));
sb_mat_psub(lp[a], lp[a - 2]);
sb_mat_smul(lp[a], (a - 1.) / a);
}
// Include multiplier here to simplify calculation below
for (a = 0; a <= n_max; ++a) { // l = a
sb_mat_smul(lp[a], (2. * a + 1.) / 12.566370614359172);
}
// Radial basis functions
sb_mat * * gnl = malloc((n_max + 1) * sizeof(sb_mat *));
SB_CHK_ERR(!gnl, abort(), "sb_descriptors: failed to allocate gnl");
for (a = 0; a <= n_max; ++a) { // l = a
gnl[a] = sb_mat_malloc(n_atom, n_max - a + 1);
get_radial_basis(gnl[a], radius->data, a, n_atom, rc);
// Scale by the weights
for (b = 0; b < n_atom; ++b) {
cblas_dscal(gnl[a]->n_cols, weights_arr[b], gnl[a]->data + b, n_atom);
}
}
// radius can be used for workspace
data1 = radius->data;
for (a = 0; a <= n_max; ++a) { // l = a
data2 = lp[a]->data;
data3 = gnl[a]->data;
for (b = a; b <= n_max; ++b) { // n = b
cblas_dgemv(CblasColMajor, CblasNoTrans, n_atom, n_atom,
1., data2, n_atom, data3 + (b - a) * n_atom, 1, 0., data1, 1);
desc_arr[b * (b + 1) / 2 + a] = cblas_ddot(n_atom, data3 + (b - a) * n_atom, 1, data1, 1);
}
}
// Free memory
for (a = 0; a <= n_max; ++a) {
SB_MAT_FREE_ALL(lp[a], gnl[a]);
}
SB_FREE_ALL(disp, lp, gnl);
SB_VEC_FREE_ALL(radius);
SB_MAT_FREE_ALL(gamma);
return desc_arr;
}
|
#include <allegro.h>
#include "loadpng.h"
#include <cmath>
#include <boost/smart_ptr.hpp>
#include <boost/any.hpp>
#include <iostream>
#include <fstream>
#include <map>
#include <string>
#include <algorithm>
#include "Scanliner.hpp"
#include "XYDistanceShader.hpp"
#include "DistanceShader.hpp"
#include "ShaderEngine.hpp"
#define FPS 20
#define WIDTH 640
#define HEIGHT 480
volatile bool close_button_pressed = false;
void close_button_handler(void) // callback po wciśnięciu krzyżyka
{
close_button_pressed = true; // ustaw że został wciśnięty
}
END_OF_FUNCTION(close_button_handler)
static bool lastkey[KEY_MAX]; // zapamietaj że przycisk był juz wciśnięty
bool pressed(int keynum) // tylko w tedy kiedy wciskamy przycisk
{
return (!lastkey[keynum]) & (lastkey[keynum]=key[keynum]);
}
void nop(void *x) // nic nierób
{
}
int main(int argc, char *argv[])
{
if(allegro_init())
{
std::clog << "Nie mozna uruchomic allegro";
return EXIT_FAILURE;
}
try
{
install_keyboard();
loadpng_init();
set_color_depth(32);
if(set_gfx_mode(GFX_AUTODETECT_WINDOWED, WIDTH, HEIGHT, 0, 0))
throw "Problem z ustawieniem trybu graficznego";
LOCK_FUNCTION(close_button_handler);
set_close_button_callback(close_button_handler);
std::map< std::string, boost::shared_ptr<BITMAP> > bitmaps;
bitmaps["screen"] = boost::shared_ptr<BITMAP>(screen,&nop); // ustaw podgląd i niezwalniaj go
std::istream* in = 0; ///plik z którego otczytujemy
while(!close_button_pressed && !key[KEY_ESC]) /// dopóki niechcemy wyjść
{
if(pressed(KEY_SPACE)) // po naciśnięciu spacji
{
if(!in) //jeżeli plik niezostał otwarty
{
std::cout << "Otwieram plik run.cfg... " << std::endl;
in = new std::fstream("run.cfg");
}
std::string command,op1,op2; // ciągi znaków
while(!in->eof()) // dopuki nie koniec pliku
{
if(!(*in >> command)){ // wczytaj polecenie
continue; //wyjdzie gdy eof
}
if(command[0] == '#') { in->ignore(256,'\n'); continue;} // ignoruj wykomentowane linijki
if(command == "pause") break; // podczas pausy wyjdź z pętli wykonywania skryptu
// ustawiaj różne tryby rysowania
if(command == "solid"){ solid_mode();std::cout << "Solid bliting mode enabled." << std::endl;continue;}
if(command == "add"){ set_add_blender(0,0,0,0);std::cout << "Additive bliting mode enabled." << std::endl;continue;}
if(command == "alpha"){ set_alpha_blender();std::cout << "Alpha bliting mode enabled." << std::endl;continue;}
if(command == "writealpha"){ set_write_alpha_blender();std::cout << "Write alpha bliting mode enabled." << std::endl;continue;}
if(!(*in >> op1)){ // wczytaj 1 operand, jeżeli bląd to
std::cout << "Brak 1 parametru. " << std::endl;
break; //wyjdź
}
if(command == "clear") // polecenie czysc ekran
{
int color=0;
char line[256];
in->getline(line,256);
std::sscanf(line,"%x",&color); // wczytaj kolor w hexah
if(bitmaps[op1]) // jeżeli bitmapa z operandu 1 istnieje
clear_to_color(bitmaps[op1].get(),color); // wyczysc ją
std::cout << "Wyczyszczono " << op1 << std::endl;
continue;
}
if(!(*in >> op2)){ // wczytaj 2 operand, jeżeli bląd to
std::cout << "Brak 1 parametru. " << std::endl;
break; //wyjdź
}
if(command == "load") // polecenie wczytaj bitmape
{
//set_color_conversion(COLORCONV_24_TO_32);
BITMAP *b = load_bitmap(op1.c_str(),NULL); // odczytaj ją
if(b) // jeżeli się wczytała
{
bitmaps[op2] = boost::shared_ptr<BITMAP>(b, &destroy_bitmap); // utwóż inteligentny wskaźnik
std::cout << "Obraz " << op1 << " załadowany jako " << op2 << std::endl;
}
else
{
std::cout << "Bład ładowania obrazu " << op1 << std::endl;
}
}
else
if(command == "save") // polecenie zapisz bitmape
{
//set_color_conversion(COLORCONV_NONE);
if(bitmaps[op1]) // jeżeli bitmapa z operandu 1 istnieje
{
if( save_bitmap(op2.c_str(),bitmaps[op1].get(),NULL) ) //zapisz ją
std::cout << "Błąd zapisu bitmapy " << op1 << " jako " << op2 << "." << std::endl;
else
std::cout << "Obraz " << op1 << " zapisany jako " << op2 << std::endl;
}
else
{
std::cout << "Niema takiej bitmapy " << op1 << std::endl;
}
}
else
if(command == "blit") // polecenie wyświetl
{
if(bitmaps[op1] && bitmaps[op2]) // jeżeli operandy wskazują na poprawne bitmapy
{
draw_trans_sprite(bitmaps[op2].get(),bitmaps[op1].get(),0,0); //narysuj
std::cout << "Wyrysowano " << op1 << " na " << op2 << "." << std::endl;
}
else
std::cout << "Niepoprawne bitmapy." << std::endl;
}
else
if(command == "draw") // polecenie wyświetl
{
if(bitmaps[op1] && bitmaps[op2]) // jeżeli operandy wskazują na poprawne bitmapy
{
draw_sprite(bitmaps[op2].get(),bitmaps[op1].get(),0,0); //narysuj
std::cout << "Wyrysowano " << op1 << " na " << op2 << "." << std::endl;
}
else
std::cout << "Niepoprawne bitmapy." << std::endl;
}
else
if(command == "XYDistanceShader") // polecenie XYDistanceShader
{
set_write_alpha_blender(); // ustaw rysowanie alpha
float cd = 1.; // moc filtru
int co = 0xFFFFFF; // kolor
char line[32];
in->getline(line,32);
std::sscanf(line,"%f %x",&cd,&co); // wczytaj parametry
ShaderEngine filter = XYDistanceShader(co,cd); // zainitializuj filtr
std::cout << "Załadowano filer... ";
filter( bitmaps[op1] , bitmaps[op2] ); // uruchom filtr
std::cout << "done. " << std::endl;
}else
if(command == "DistanceShader")
{
set_write_alpha_blender(); // ustaw rysowanie alpha
float cd = 1; // moc filtru
int co = 0xFFFFFF; // kolor
char line[32];
in->getline(line,32);
std::sscanf(line,"%f %x",&cd,&co); // wczytaj parametry
ShaderEngine filter = DistanceShader(co,cd); // zainitializuj filtr
std::cout << "Załadowano filer... ";
filter(bitmaps[op1],bitmaps[op2]); // uruchom filtr
std::cout << "done. " << std::endl;
}else
std::cout << "Nieznana komenda " << command << std::endl;
}
if(in->eof())
{
std::cout << "Skrypt skonczony" << std::endl;
delete in;
in=0;
}
}
vsync();
rest(25);
}
}
catch(char const* msg)
{
allegro_message(msg);
return EXIT_FAILURE;
}
return EXIT_SUCCESS;
}
END_OF_MAIN();
|
/*
* Copyright (c) 2012 Aldebaran Robotics. All rights reserved.
* Use of this source code is governed by a BSD-style license that can be
* found in the COPYING file.
*/
#include <iostream>
#include <boost/filesystem.hpp>
#include <boost/locale.hpp>
int main(int argc, char *argv[])
{
char utf8[] = {0xC5, 0xAA, 0x6E, 0xC4, 0xAD, 0x63, 0xC5, 0x8D, 0x64, 0x65, 0xCC, 0xBD, 0};
std::string utf8xx(utf8);
// Get the default locale
std::local loc = boost::locale::generator().generate("");
// Set the global locale to loc
std::locale::global(loc);
// Make boost.filesystem use it by default
boost::filesystem::path::imbue(std::locale());
// Create the path ("foo" should be utf-8)
boost::filesystem::path path("foo");
path /= "bar";
path /= utf8xx;
std::cout << "path:" << path.string() << std::endl;
return 0;
}
|
Formal statement is: theorem Janiszewski: fixes a b :: complex assumes "compact S" "closed T" and conST: "connected (S \<inter> T)" and ccS: "connected_component (- S) a b" and ccT: "connected_component (- T) a b" shows "connected_component (- (S \<union> T)) a b" Informal statement is: If $S$ and $T$ are compact and closed sets in the complex plane, and $S \cap T$ is connected, then the connected component of $a$ and $b$ in $S \cup T$ is the same as the connected component of $a$ and $b$ in $S$ or $T$. |
[STATEMENT]
lemma gstate_rel_sv[relator_props,simp,intro!]: "single_valued gstate_rel"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. single_valued gstate_rel
[PROOF STEP]
by (simp add: gstate_rel_def) |
lemma convergent_imp_Bseq: "convergent f \<Longrightarrow> Bseq f" |
theorem test (p q : Prop) (hp : p) (hq : q) : p ∧ q ∧ p :=
begin
apply and.intro hp,
exact and.intro hq hp
end
theorem test' (p q : Prop) (hp : p) (hq : q) : p ∧ q ∧ p :=
by exact and.intro hp (and.intro hq hp)
#print test
example (p q r : Prop) : p ∧ (q ∨ r) ↔ (p ∧ q) ∨ (p ∧ r) :=
begin
apply iff.intro,
intro h,
apply or.elim (and.right h),
intro hq,
apply or.inl,
apply and.intro,
exact h.left,
exact hq,
intro hr,
apply or.inr,
apply and.intro,
exact h.left,
exact hr,
intro h,
apply or.elim h,
intro hpq,
apply and.intro,
exact hpq.left,
apply or.inl,
exact hpq.right,
intro hpr,
apply and.intro,
exact hpr.left,
apply or.inr,
exact hpr.right
end
example : ∀ a b c : ℕ, a = b → a = c → c = b :=
begin
intros a b c h1 h2,
exact eq.trans (eq.symm h2) h1,
end
variables x y z w : ℕ
example (h1 : x = y) (h2 : y = z) (h3 : z = w) : x = w :=
begin
apply eq.trans h1,
apply eq.trans h2,
assumption
end
example : ∀ a b c : ℕ, a = b → a = c → c = b :=
begin
intros,
transitivity a,
symmetry,
repeat { assumption},
end
example : ∃ a : ℕ, 5 = a :=
begin
apply exists.intro,
reflexivity,
end
example : ∃ a : ℕ, a = a :=
begin
fapply exists.intro,
exact 0,
reflexivity
end
example (x : ℕ) : x = x :=
begin
revert x,
intro y,
refl,
end
example (p q : Prop) : p ∨ q → q ∨ p :=
begin
intro h,
cases h with hp hq,
right, exact hp,
left, exact hq,
end
example (p q : Prop) : p ∧ q → q ∧ p :=
begin
intro h,
cases h with hp hq,
constructor, exact hq, exact hp,
end
example (p q : ℕ → Prop) : (∃ x, p x) → ∃ x, p x ∨ q x :=
begin
intro h,
cases h with x px,
constructor, left, exact px,
end
universes u v
def swap_pair {α : Type u} {β : Type v} : α × β → β × α :=
begin
intro p,
cases p with ha hb,
constructor, exact hb, exact ha
end
def swap_sum {α : Type u} {β : Type v} : α ⊕ β → β ⊕ α :=
begin
intro p,
cases p with ha hb,
right, exact ha,
left, exact hb
end
open nat
example (P : ℕ → Prop) (h0 : P 0) (h1 : ∀ n, P (succ n)) (m : ℕ) :
P m :=
begin
cases m with m', exact h0,
exact h1 m',
end
example (p q : Prop) : p ∧ ¬ p → q :=
begin
intro h, cases h, contradiction
end
|
(* Title: HOL/Auth/n_germanSymIndex_lemma_on_inv__34.thy
Author: Yongjian Li and Kaiqiang Duan, State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
Copyright 2016 State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
*)
header{*The n_germanSymIndex Protocol Case Study*}
theory n_germanSymIndex_lemma_on_inv__34 imports n_germanSymIndex_base
begin
section{*All lemmas on causal relation between inv__34 and some rule r*}
lemma n_RecvReqSVsinv__34:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_RecvReqS N i)" and
a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__34 p__Inv2)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_RecvReqS N i" apply fastforce done
from a2 obtain p__Inv2 where a2:"p__Inv2\<le>N\<and>f=inv__34 p__Inv2" apply fastforce done
have "(i=p__Inv2)\<or>(i~=p__Inv2)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv2)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Ident ''CurCmd'')) (Const Empty)) (eqn (IVar (Field (Para (Ident ''Chan2'') p__Inv2) ''Cmd'')) (Const Inv))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv2)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Ident ''CurCmd'')) (Const Empty)) (eqn (IVar (Field (Para (Ident ''Chan2'') p__Inv2) ''Cmd'')) (Const Inv))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_RecvReqEVsinv__34:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_RecvReqE N i)" and
a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__34 p__Inv2)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_RecvReqE N i" apply fastforce done
from a2 obtain p__Inv2 where a2:"p__Inv2\<le>N\<and>f=inv__34 p__Inv2" apply fastforce done
have "(i=p__Inv2)\<or>(i~=p__Inv2)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv2)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Ident ''CurCmd'')) (Const Empty)) (eqn (IVar (Field (Para (Ident ''Chan2'') p__Inv2) ''Cmd'')) (Const Inv))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv2)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Ident ''CurCmd'')) (Const Empty)) (eqn (IVar (Field (Para (Ident ''Chan2'') p__Inv2) ''Cmd'')) (Const Inv))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_SendInv__part__0Vsinv__34:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendInv__part__0 i)" and
a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__34 p__Inv2)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_SendInv__part__0 i" apply fastforce done
from a2 obtain p__Inv2 where a2:"p__Inv2\<le>N\<and>f=inv__34 p__Inv2" apply fastforce done
have "(i=p__Inv2)\<or>(i~=p__Inv2)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv2)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv2)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_SendInv__part__1Vsinv__34:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendInv__part__1 i)" and
a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__34 p__Inv2)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_SendInv__part__1 i" apply fastforce done
from a2 obtain p__Inv2 where a2:"p__Inv2\<le>N\<and>f=inv__34 p__Inv2" apply fastforce done
have "(i=p__Inv2)\<or>(i~=p__Inv2)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv2)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv2)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_SendInvAckVsinv__34:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendInvAck i)" and
a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__34 p__Inv2)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_SendInvAck i" apply fastforce done
from a2 obtain p__Inv2 where a2:"p__Inv2\<le>N\<and>f=inv__34 p__Inv2" apply fastforce done
have "(i=p__Inv2)\<or>(i~=p__Inv2)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv2)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv2)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_SendGntSVsinv__34:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendGntS i)" and
a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__34 p__Inv2)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_SendGntS i" apply fastforce done
from a2 obtain p__Inv2 where a2:"p__Inv2\<le>N\<and>f=inv__34 p__Inv2" apply fastforce done
have "(i=p__Inv2)\<or>(i~=p__Inv2)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv2)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv2)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_SendGntEVsinv__34:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendGntE N i)" and
a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__34 p__Inv2)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_SendGntE N i" apply fastforce done
from a2 obtain p__Inv2 where a2:"p__Inv2\<le>N\<and>f=inv__34 p__Inv2" apply fastforce done
have "(i=p__Inv2)\<or>(i~=p__Inv2)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv2)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv2)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_RecvGntSVsinv__34:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_RecvGntS i)" and
a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__34 p__Inv2)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_RecvGntS i" apply fastforce done
from a2 obtain p__Inv2 where a2:"p__Inv2\<le>N\<and>f=inv__34 p__Inv2" apply fastforce done
have "(i=p__Inv2)\<or>(i~=p__Inv2)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv2)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv2)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_RecvGntEVsinv__34:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_RecvGntE i)" and
a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__34 p__Inv2)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_RecvGntE i" apply fastforce done
from a2 obtain p__Inv2 where a2:"p__Inv2\<le>N\<and>f=inv__34 p__Inv2" apply fastforce done
have "(i=p__Inv2)\<or>(i~=p__Inv2)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv2)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv2)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_SendReqE__part__1Vsinv__34:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_SendReqE__part__1 i" and
a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__34 p__Inv2)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_StoreVsinv__34:
assumes a1: "\<exists> i d. i\<le>N\<and>d\<le>N\<and>r=n_Store i d" and
a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__34 p__Inv2)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_RecvInvAckVsinv__34:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_RecvInvAck i" and
a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__34 p__Inv2)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_SendReqE__part__0Vsinv__34:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_SendReqE__part__0 i" and
a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__34 p__Inv2)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_SendReqSVsinv__34:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_SendReqS i" and
a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__34 p__Inv2)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
end
|
using HackerNewsSlackApp
using Test
@testset "HackerNewsSlackApp.jl" begin
# Write your own tests here.
end
|
Formal statement is: proposition contractible_imp_simply_connected: fixes S :: "_::real_normed_vector set" assumes "contractible S" shows "simply_connected S" Informal statement is: If $S$ is contractible, then $S$ is simply connected. |
Of the captured ships , several were purchased and enjoyed long careers in the Royal Navy , in particular the two 80 @-@ gun ships HMS Sans Pareil which was decommissioned in 1802 but not broken up until 1842 , and HMS Juste , which was a popular command until her decommissioning in 1802 at the Peace of Amiens . Of the four 74 @-@ gun prizes , Achille and Northumberland ( both <unk> built in the late 1770s ) were broken up as unserviceable soon after arrival in Britain , while Impétueux was destroyed in a dockyard fire on 24 August 1794 while undergoing repairs . America , the final prize , was taken into the Royal Navy as HMS America but renamed HMS <unk> in July 1795 and remained in service until 1813 . The combined prize money for these ships was £ 201 @,@ 096 ( the equivalent of £ 21 @,@ 000 @,@ 000 as of 2016 ) , divided among the ships under Lord Howe 's command .
|
open import lib
open import eq-reas-nouni
equiv = _≡_
Val = nat
data Expn : Set where
val : Val -> Expn
plus : Expn -> Expn -> Expn
eval : Expn -> Val
eval (val v) = v
eval (plus e1 e2) = (eval e1) + (eval e2)
data evalsTo : Expn -> Val -> Set where
e-val : forall {v : Val}
------------------------
-> (evalsTo (val v) v)
e-add : forall {e1 e2 : Expn}{v1 v2 : Val}
-> (evalsTo e1 v1)
-> (evalsTo e2 v2)
-------------------------------------
-> (evalsTo (plus e1 e2) (v1 + v2))
e-thm-fwd : forall {e : Expn}{v : Val}
-> evalsTo e v -> equiv (eval e) v
e-thm-fwd (e-val{v}) =
begin
eval (val v)
equiv[ refl ]
v
qed
e-thm-fwd (e-add{e1}{e2}{v1}{v2} e1-evalsTo-v1 e2-evalsTo-v2) =
let
eval-e1-is-v1 = e-thm-fwd e1-evalsTo-v1
eval-e2-is-v2 = e-thm-fwd e2-evalsTo-v2
in begin
eval (plus e1 e2)
equiv[ refl ]
(eval e1) + (eval e2)
equiv[ cong2 _+_ eval-e1-is-v1 eval-e2-is-v2 ]
v1 + v2
qed
e-thm-alt : forall (e : Expn) -> evalsTo e (eval e)
e-thm-alt (val v) = e-val
e-thm-alt (plus e1 e2) = (e-add (e-thm-alt e1) (e-thm-alt e2))
|
/-
Copyright (c) 2018 Scott Morrison. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Scott Morrison
-/
import category_theory.natural_isomorphism
import logic.equiv.defs
/-!
# Full and faithful functors
> THIS FILE IS SYNCHRONIZED WITH MATHLIB4.
> Any changes to this file require a corresponding PR to mathlib4.
We define typeclasses `full` and `faithful`, decorating functors.
## Main definitions and results
* Use `F.map_injective` to retrieve the fact that `F.map` is injective when `[faithful F]`.
* Similarly, `F.map_surjective` states that `F.map` is surjective when `[full F]`.
* Use `F.preimage` to obtain preimages of morphisms when `[full F]`.
* We prove some basic "cancellation" lemmas for full and/or faithful functors, as well as a
construction for "dividing" a functor by a faithful functor, see `faithful.div`.
* `full F` carries data, so definitional properties of the preimage can be used when using
`F.preimage`. To obtain an instance of `full F` non-constructively, you can use `full_of_exists`
and `full_of_surjective`.
See `category_theory.equivalence.of_fully_faithful_ess_surj` for the fact that a functor is an
equivalence if and only if it is fully faithful and essentially surjective.
-/
-- declare the `v`'s first; see `category_theory.category` for an explanation
universes v₁ v₂ v₃ u₁ u₂ u₃
namespace category_theory
variables {C : Type u₁} [category.{v₁} C] {D : Type u₂} [category.{v₂} D]
/--
A functor `F : C ⥤ D` is full if for each `X Y : C`, `F.map` is surjective.
In fact, we use a constructive definition, so the `full F` typeclass contains data,
specifying a particular preimage of each `f : F.obj X ⟶ F.obj Y`.
See <https://stacks.math.columbia.edu/tag/001C>.
-/
class full (F : C ⥤ D) :=
(preimage : ∀ {X Y : C} (f : (F.obj X) ⟶ (F.obj Y)), X ⟶ Y)
(witness' : ∀ {X Y : C} (f : (F.obj X) ⟶ (F.obj Y)), F.map (preimage f) = f . obviously)
restate_axiom full.witness'
attribute [simp] full.witness
/--
A functor `F : C ⥤ D` is faithful if for each `X Y : C`, `F.map` is injective.
See <https://stacks.math.columbia.edu/tag/001C>.
-/
class faithful (F : C ⥤ D) : Prop :=
(map_injective' [] : ∀ {X Y : C}, function.injective (@functor.map _ _ _ _ F X Y) . obviously)
restate_axiom faithful.map_injective'
namespace functor
variables {X Y : C}
lemma map_injective (F : C ⥤ D) [faithful F] : function.injective $ @functor.map _ _ _ _ F X Y :=
faithful.map_injective F
lemma map_iso_injective (F : C ⥤ D) [faithful F] :
function.injective $ @functor.map_iso _ _ _ _ F X Y :=
λ i j h, iso.ext (map_injective F (congr_arg iso.hom h : _))
/-- The specified preimage of a morphism under a full functor. -/
def preimage (F : C ⥤ D) [full F] (f : F.obj X ⟶ F.obj Y) : X ⟶ Y :=
full.preimage.{v₁ v₂} f
@[simp] lemma image_preimage (F : C ⥤ D) [full F] {X Y : C} (f : F.obj X ⟶ F.obj Y) :
F.map (preimage F f) = f :=
by unfold preimage; obviously
lemma map_surjective (F : C ⥤ D) [full F] : function.surjective (@functor.map _ _ _ _ F X Y) :=
λ f, ⟨F.preimage f, F.image_preimage f⟩
/-- Deduce that `F` is full from the existence of preimages, using choice. -/
noncomputable def full_of_exists (F : C ⥤ D)
(h : ∀ (X Y : C) (f : F.obj X ⟶ F.obj Y), ∃ p, F.map p = f) : full F :=
by { choose p hp using h, exact ⟨p, hp⟩ }
/-- Deduce that `F` is full from surjectivity of `F.map`, using choice. -/
noncomputable def full_of_surjective (F : C ⥤ D)
(h : ∀ (X Y : C), function.surjective (@functor.map _ _ _ _ F X Y)) : full F :=
full_of_exists _ h
end functor
section
variables {F : C ⥤ D} [full F] [faithful F] {X Y Z : C}
@[simp] lemma preimage_id : F.preimage (𝟙 (F.obj X)) = 𝟙 X :=
F.map_injective (by simp)
@[simp] lemma preimage_comp (f : F.obj X ⟶ F.obj Y) (g : F.obj Y ⟶ F.obj Z) :
F.preimage (f ≫ g) = F.preimage f ≫ F.preimage g :=
F.map_injective (by simp)
@[simp] lemma preimage_map (f : X ⟶ Y) :
F.preimage (F.map f) = f :=
F.map_injective (by simp)
variables (F)
namespace functor
/-- If `F : C ⥤ D` is fully faithful, every isomorphism `F.obj X ≅ F.obj Y` has a preimage. -/
@[simps]
def preimage_iso (f : (F.obj X) ≅ (F.obj Y)) : X ≅ Y :=
{ hom := F.preimage f.hom,
inv := F.preimage f.inv,
hom_inv_id' := F.map_injective (by simp),
inv_hom_id' := F.map_injective (by simp), }
@[simp] lemma preimage_iso_map_iso (f : X ≅ Y) :
F.preimage_iso (F.map_iso f) = f :=
by { ext, simp, }
end functor
/--
If the image of a morphism under a fully faithful functor in an isomorphism,
then the original morphisms is also an isomorphism.
-/
lemma is_iso_of_fully_faithful (f : X ⟶ Y) [is_iso (F.map f)] : is_iso f :=
⟨⟨F.preimage (inv (F.map f)),
⟨F.map_injective (by simp), F.map_injective (by simp)⟩⟩⟩
/-- If `F` is fully faithful, we have an equivalence of hom-sets `X ⟶ Y` and `F X ⟶ F Y`. -/
@[simps]
def equiv_of_fully_faithful {X Y} : (X ⟶ Y) ≃ (F.obj X ⟶ F.obj Y) :=
{ to_fun := λ f, F.map f,
inv_fun := λ f, F.preimage f,
left_inv := λ f, by simp,
right_inv := λ f, by simp }
/-- If `F` is fully faithful, we have an equivalence of iso-sets `X ≅ Y` and `F X ≅ F Y`. -/
@[simps]
def iso_equiv_of_fully_faithful {X Y} : (X ≅ Y) ≃ (F.obj X ≅ F.obj Y) :=
{ to_fun := λ f, F.map_iso f,
inv_fun := λ f, F.preimage_iso f,
left_inv := λ f, by simp,
right_inv := λ f, by { ext, simp, } }
end
section
variables {E : Type*} [category E] {F G : C ⥤ D} (H : D ⥤ E) [full H] [faithful H]
/-- We can construct a natural transformation between functors by constructing a
natural transformation between those functors composed with a fully faithful functor. -/
@[simps]
def nat_trans_of_comp_fully_faithful (α : F ⋙ H ⟶ G ⋙ H) : F ⟶ G :=
{ app := λ X, (equiv_of_fully_faithful H).symm (α.app X),
naturality' := λ X Y f, by { dsimp, apply H.map_injective, simpa using α.naturality f, } }
/-- We can construct a natural isomorphism between functors by constructing a natural isomorphism
between those functors composed with a fully faithful functor. -/
@[simps]
def nat_iso_of_comp_fully_faithful (i : F ⋙ H ≅ G ⋙ H) : F ≅ G :=
nat_iso.of_components
(λ X, (iso_equiv_of_fully_faithful H).symm (i.app X))
(λ X Y f, by { dsimp, apply H.map_injective, simpa using i.hom.naturality f, })
lemma nat_iso_of_comp_fully_faithful_hom (i : F ⋙ H ≅ G ⋙ H) :
(nat_iso_of_comp_fully_faithful H i).hom = nat_trans_of_comp_fully_faithful H i.hom :=
by { ext, simp [nat_iso_of_comp_fully_faithful], }
lemma nat_iso_of_comp_fully_faithful_inv (i : F ⋙ H ≅ G ⋙ H) :
(nat_iso_of_comp_fully_faithful H i).inv = nat_trans_of_comp_fully_faithful H i.inv :=
by { ext, simp [←preimage_comp], dsimp, simp, }
/-- Horizontal composition with a fully faithful functor induces a bijection on
natural transformations. -/
@[simps]
def nat_trans.equiv_of_comp_fully_faithful : (F ⟶ G) ≃ (F ⋙ H ⟶ G ⋙ H) :=
{ to_fun := λ α, α ◫ 𝟙 H,
inv_fun := nat_trans_of_comp_fully_faithful H,
left_inv := by tidy,
right_inv := by tidy, }
/-- Horizontal composition with a fully faithful functor induces a bijection on
natural isomorphisms. -/
@[simps]
def nat_iso.equiv_of_comp_fully_faithful : (F ≅ G) ≃ (F ⋙ H ≅ G ⋙ H) :=
{ to_fun := λ e, nat_iso.hcomp e (iso.refl H),
inv_fun := nat_iso_of_comp_fully_faithful H,
left_inv := by tidy,
right_inv := by tidy, }
end
end category_theory
namespace category_theory
variables {C : Type u₁} [category.{v₁} C]
instance full.id : full (𝟭 C) :=
{ preimage := λ _ _ f, f }
instance faithful.id : faithful (𝟭 C) := by obviously
variables {D : Type u₂} [category.{v₂} D] {E : Type u₃} [category.{v₃} E]
variables (F F' : C ⥤ D) (G : D ⥤ E)
instance faithful.comp [faithful F] [faithful G] : faithful (F ⋙ G) :=
{ map_injective' := λ _ _ _ _ p, F.map_injective (G.map_injective p) }
lemma faithful.of_comp [faithful $ F ⋙ G] : faithful F :=
{ map_injective' := λ X Y, (F ⋙ G).map_injective.of_comp }
section
variables {F F'}
/-- If `F` is full, and naturally isomorphic to some `F'`, then `F'` is also full. -/
def full.of_iso [full F] (α : F ≅ F') : full F' :=
{ preimage := λ X Y f, F.preimage ((α.app X).hom ≫ f ≫ (α.app Y).inv),
witness' := λ X Y f, by simp [←nat_iso.naturality_1 α], }
lemma faithful.of_iso [faithful F] (α : F ≅ F') : faithful F' :=
{ map_injective' := λ X Y f f' h, F.map_injective
(by rw [←nat_iso.naturality_1 α.symm, h, nat_iso.naturality_1 α.symm]) }
end
variables {F G}
lemma faithful.of_comp_iso {H : C ⥤ E} [ℋ : faithful H] (h : F ⋙ G ≅ H) : faithful F :=
@faithful.of_comp _ _ _ _ _ _ F G (faithful.of_iso h.symm)
alias faithful.of_comp_iso ← _root_.category_theory.iso.faithful_of_comp
-- We could prove this from `faithful.of_comp_iso` using `eq_to_iso`,
-- but that would introduce a cyclic import.
lemma faithful.of_comp_eq {H : C ⥤ E} [ℋ : faithful H] (h : F ⋙ G = H) : faithful F :=
@faithful.of_comp _ _ _ _ _ _ F G (h.symm ▸ ℋ)
alias faithful.of_comp_eq ← _root_.eq.faithful_of_comp
variables (F G)
/-- “Divide” a functor by a faithful functor. -/
protected def faithful.div (F : C ⥤ E) (G : D ⥤ E) [faithful G]
(obj : C → D) (h_obj : ∀ X, G.obj (obj X) = F.obj X)
(map : Π {X Y}, (X ⟶ Y) → (obj X ⟶ obj Y))
(h_map : ∀ {X Y} {f : X ⟶ Y}, G.map (map f) == F.map f) :
C ⥤ D :=
{ obj := obj,
map := @map,
map_id' :=
begin
assume X,
apply G.map_injective,
apply eq_of_heq,
transitivity F.map (𝟙 X), from h_map,
rw [F.map_id, G.map_id, h_obj X]
end,
map_comp' :=
begin
assume X Y Z f g,
apply G.map_injective,
apply eq_of_heq,
transitivity F.map (f ≫ g), from h_map,
rw [F.map_comp, G.map_comp],
congr' 1;
try { exact (h_obj _).symm };
exact h_map.symm
end }
-- This follows immediately from `functor.hext` (`functor.hext h_obj @h_map`),
-- but importing `category_theory.eq_to_hom` causes an import loop:
-- category_theory.eq_to_hom → category_theory.opposites →
-- category_theory.equivalence → category_theory.fully_faithful
lemma faithful.div_comp (F : C ⥤ E) [faithful F] (G : D ⥤ E) [faithful G]
(obj : C → D) (h_obj : ∀ X, G.obj (obj X) = F.obj X)
(map : Π {X Y}, (X ⟶ Y) → (obj X ⟶ obj Y))
(h_map : ∀ {X Y} {f : X ⟶ Y}, G.map (map f) == F.map f) :
(faithful.div F G obj @h_obj @map @h_map) ⋙ G = F :=
begin
casesI F with F_obj _ _ _, casesI G with G_obj _ _ _,
unfold faithful.div functor.comp,
unfold_projs at h_obj,
have: F_obj = G_obj ∘ obj := (funext h_obj).symm,
substI this,
congr,
funext,
exact eq_of_heq h_map
end
lemma faithful.div_faithful (F : C ⥤ E) [faithful F] (G : D ⥤ E) [faithful G]
(obj : C → D) (h_obj : ∀ X, G.obj (obj X) = F.obj X)
(map : Π {X Y}, (X ⟶ Y) → (obj X ⟶ obj Y))
(h_map : ∀ {X Y} {f : X ⟶ Y}, G.map (map f) == F.map f) :
faithful (faithful.div F G obj @h_obj @map @h_map) :=
(faithful.div_comp F G _ h_obj _ @h_map).faithful_of_comp
instance full.comp [full F] [full G] : full (F ⋙ G) :=
{ preimage := λ _ _ f, F.preimage (G.preimage f) }
/-- If `F ⋙ G` is full and `G` is faithful, then `F` is full. -/
def full.of_comp_faithful [full $ F ⋙ G] [faithful G] : full F :=
{ preimage := λ X Y f, (F ⋙ G).preimage (G.map f),
witness' := λ X Y f, G.map_injective ((F ⋙ G).image_preimage _) }
/-- If `F ⋙ G` is full and `G` is faithful, then `F` is full. -/
def full.of_comp_faithful_iso {F : C ⥤ D} {G : D ⥤ E} {H : C ⥤ E} [full H] [faithful G]
(h : F ⋙ G ≅ H) : full F :=
@full.of_comp_faithful _ _ _ _ _ _ F G (full.of_iso h.symm) _
/--
Given a natural isomorphism between `F ⋙ H` and `G ⋙ H` for a fully faithful functor `H`, we
can 'cancel' it to give a natural iso between `F` and `G`.
-/
def fully_faithful_cancel_right {F G : C ⥤ D} (H : D ⥤ E)
[full H] [faithful H] (comp_iso: F ⋙ H ≅ G ⋙ H) : F ≅ G :=
nat_iso.of_components
(λ X, H.preimage_iso (comp_iso.app X))
(λ X Y f, H.map_injective (by simpa using comp_iso.hom.naturality f))
@[simp]
lemma fully_faithful_cancel_right_hom_app {F G : C ⥤ D} {H : D ⥤ E}
[full H] [faithful H] (comp_iso: F ⋙ H ≅ G ⋙ H) (X : C) :
(fully_faithful_cancel_right H comp_iso).hom.app X = H.preimage (comp_iso.hom.app X) :=
rfl
@[simp]
lemma fully_faithful_cancel_right_inv_app {F G : C ⥤ D} {H : D ⥤ E}
[full H] [faithful H] (comp_iso: F ⋙ H ≅ G ⋙ H) (X : C) :
(fully_faithful_cancel_right H comp_iso).inv.app X = H.preimage (comp_iso.inv.app X) :=
rfl
end category_theory
|
Load LFindLoad.
From lfind Require Import LFind.
From QuickChick Require Import QuickChick.
From adtind Require Import goal33.
Derive Show for natural.
Derive Arbitrary for natural.
Instance Dec_Eq_natural : Dec_Eq natural.
Proof. dec_eq. Qed.
Lemma conj20synthconj4 : forall (lv0 : natural) (lv1 : natural) (lv2 : natural), (@eq natural (plus (mult lv0 lv1) lv2) (plus (mult lv1 lv0) lv2)).
Admitted.
QuickChick conj20synthconj4.
|
Require Import Crypto.Arithmetic.PrimeFieldTheorems.
Require Import Crypto.Specific.solinas64_2e450m2e225m1_10limbs.Synthesis.
(* TODO : change this to field once field isomorphism happens *)
Definition carry :
{ carry : feBW_loose -> feBW_tight
| forall a, phiBW_tight (carry a) = (phiBW_loose a) }.
Proof.
Set Ltac Profiling.
Time synthesize_carry ().
Show Ltac Profile.
Time Defined.
Print Assumptions carry.
|
ds = read.csv("Position_Salaries.csv")
ds = ds[, 2:3]
# Fitting and predicting
lin_reg = lm(Salary ~.,
data = ds)
poly_reg = lm(Salary ~ poly(Level, 4),
data = ds)
y_pred = predict(poly_reg, data.frame(Level =6.5))
# Plotting
X_grid = seq(min(ds$Level), max(ds$Level), 0.1)
library(ggplot2)
ggplot() +
geom_point(aes(x= ds$Level, y = ds$Salary),
colour = 'red') +
geom_line(aes(x=ds$Level, y = predict(lin_reg, new_data = X_grid)),
colour = 'blue') +
geom_line(aes(x = X_grid, y = predict(poly_reg, newdata = data.frame(Level = X_grid))),
colour = 'black') +
ggtitle("Polynomial regression salary progression") +
xlab("Position") +
ylab("Salary")
|
If $f$ is holomorphic on a punctured neighborhood of $z$, then $z$ is an isolated singularity of $f$. |
State Before: α : Type u_1
g : GeneralizedContinuedFraction α
n : ℕ
⊢ TerminatedAt g n ↔ Stream'.Seq.get? (partialDenominators g) n = none State After: no goals Tactic: rw [terminatedAt_iff_s_none, part_denom_none_iff_s_none] |
Require Import VST.floyd.base.
Ltac make_ground_PTree a :=
let a := eval hnf in a in
match a with
| PTree.Leaf => constr:(a)
| PTree.Node ?l ?o ?r =>
let l := make_ground_PTree l in
let r := make_ground_PTree r in
let o := eval hnf in o in
constr:(PTree.Node l o r)
end.
Ltac simpl_PTree_get_old :=
repeat match goal with
| |- context [PTree.get ?i' ?t] =>
let i'' := eval hnf in i' in
change (PTree.get i' t) with
((fix get (A : Type) (i : positive) (m : PTree.t A) {struct i} : option A :=
match m with
| PTree.Leaf => None
| PTree.Node l o r =>
match i with
| (ii~1)%positive => get A ii r
| (ii~0)%positive => get A ii l
| 1%positive => o
end
end) _ i'' t)
end;
cbv iota zeta beta.
Ltac simpl_PTree_get :=
repeat match goal with
| |- context [PTree.get ?i' ?t] =>
let g := constr:(PTree.get i' t) in
let g := eval hnf in g in
change (PTree.get i' t) with g
end;
cbv iota zeta beta.
Ltac simpl_eqb_type :=
repeat
match goal with
| |- context [eqb_type ?t1 ?t2] =>
let b := eval hnf in (eqb_type t1 t2) in
change (eqb_type t1 t2) with b;
cbv beta iota zeta
end.
Ltac simpl_temp_types_get :=
repeat
match goal with
| |- context [(temp_types ?Delta) ! ?i] =>
let ret := eval hnf in ((temp_types Delta) ! i) in
change ((temp_types Delta) ! i) with ret
end.
Ltac pos_eqb_tac :=
let H := fresh "H" in
match goal with
| |- context [Pos.eqb ?i ?j] => destruct (Pos.eqb i j) eqn:H; [apply Pos.eqb_eq in H | apply Pos.eqb_neq in H]
end.
Definition VST_floyd_map {A B : Type} (f: A -> B): list A -> list B :=
fix map (l : list A) : list B := match l with
| nil => nil
| a :: t => f a :: map t
end.
Definition VST_floyd_app {A: Type}: list A -> list A -> list A :=
fix app (l m : list A) {struct l} : list A :=
match l with
| nil => m
| a :: l1 => a :: app l1 m
end.
Definition VST_floyd_concat {A: Type}: list (list A) -> list A :=
fix concat (l : list (list A)) : list A :=
match l with
| nil => nil
| x :: l0 => VST_floyd_app x (concat l0)
end.
|
dest <- fread('c:/perforce/daniel/ex/orig_data/destinations.csv')
train_13[hotel_continent==2 & hotel_market==701 & srch_destination_id==8260][,.N,by=hotel_cluster][order(-N)][1:10]
train_13[hotel_continent==2 & hotel_market==628][,.N,by=srch_destination_id][order(+N)][1:10]
clusters <- kmeans(dest[,2:150,with=F],50000,iter.max=100)
dest$cluster <- clusters$cluster
destClusters <- dest[,c('srch_destination_id','cluster'),with=F]
write.csv(destClusters,'c:/perforce/daniel/ex/statistics/clusterByDest_50k.csv',row.names=F,quote=F) |
/-
Copyright (c) 2014 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Author: Leonardo de Moura, Jeremy Avigad
-/
prelude
import init.datatypes init.logic
open decidable
set_option structure.proj_mk_thm true
structure subtype {A : Type} (P : A → Prop) :=
tag :: (elt_of : A) (has_property : P elt_of)
namespace subtype
notation `{` binder ` | ` r:(scoped:1 P, subtype P) `}` := r
definition exists_of_subtype {A : Type} {P : A → Prop} : { x | P x } → ∃ x, P x
| (subtype.tag a h) := exists.intro a h
variables {A : Type} {P : A → Prop}
theorem tag_irrelevant {a : A} (H1 H2 : P a) : tag a H1 = tag a H2 :=
rfl
theorem tag_eq {a1 a2 : A} {H1 : P a1} {H2 : P a2} (H3 : a1 = a2) : tag a1 H1 = tag a2 H2 :=
eq.subst H3 (tag_irrelevant H1) H2
protected theorem eq : ∀ {a1 a2 : {x | P x}} (H : elt_of a1 = elt_of a2), a1 = a2
| (tag x1 H1) (tag x2 H2) := tag_eq
protected definition is_inhabited [instance] {a : A} (H : P a) : inhabited {x | P x} :=
inhabited.mk (tag a H)
protected definition has_decidable_eq [instance] [H : decidable_eq A] : ∀ s₁ s₂ : {x | P x}, decidable (s₁ = s₂)
| (tag v₁ p₁) (tag v₂ p₂) :=
decidable_of_decidable_of_iff (H v₁ v₂)
(iff.intro tag_eq (λh, subtype.no_confusion h (λa b, a)))
end subtype
|
!> Compute
!!
!! @author Nathan A. Wukie (AFRL)
!! @date 7/12/2017
!! @note Modified directly from 'chidg airfoil' action
!!
!!
!---------------------------------------------------------------------------------------------
module mod_force
#include <messenger.h>
use mod_kinds, only: rk, ik
use mod_constants, only: ZERO, TWO, NO_ID, NO_DIFF
use mod_chidg_mpi, only: ChiDG_COMM
use type_chidg_data, only: chidg_data_t
use type_element_info, only: element_info_t
use type_chidg_worker, only: chidg_worker_t
use type_chidg_cache, only: chidg_cache_t
use type_cache_handler, only: cache_handler_t
use mpi_f08, only: MPI_AllReduce, MPI_REAL8, MPI_SUM
use ieee_arithmetic, only: ieee_is_nan
use DNAD_D
implicit none
contains
!> Compute force integrated over a specified patch group.
!!
!!
!! F = int[ (tau-p) dot n ] dPatch
!!
!!
!! @author Nathan A. Wukie (AFRL)
!! @date 7/12/2017
!! @note Modified directly from 'chidg airfoil' action
!!
!!
!! @param[in] data chidg_data instance
!! @param[in] patch_group Name of patch group over which the force will be integrated.
!! @result[out] force Integrated force vector: force = [f1, f2, f3]
!!
!-----------------------------------------------------------------------------------
subroutine report_forces(data,patch_group,force,power)
type(chidg_data_t), intent(inout) :: data
character(*), intent(in) :: patch_group
real(rk), intent(inout), optional :: force(3)
real(rk), intent(inout), optional :: power
integer(ik) :: group_ID, patch_ID, face_ID, &
idomain_l, ielement_l, iface, ierr
type(chidg_worker_t) :: worker
type(chidg_cache_t) :: cache
type(cache_handler_t) :: cache_handler
type(element_info_t) :: elem_info
real(rk) :: &
force_local(3), power_local
real(rk), allocatable, dimension(:) :: &
weights, det_jacobian_grid
real(rk), allocatable :: &
jacobian_grid(:,:,:), grid_velocity(:,:)
type(AD_D), allocatable, dimension(:) :: &
tau_11, tau_12, tau_13, &
tau_21, tau_22, tau_23, &
tau_31, tau_32, tau_33, &
stress_x, stress_y, stress_z, &
pressure, &
norm_1, norm_2, norm_3, &
norm_1_phys, norm_2_phys, norm_3_phys
call write_line('Computing Force...', io_proc=GLOBAL_MASTER)
! Initialize Chidg Worker references
call worker%init(data%mesh, data%eqnset(:)%prop, data%sdata, data%time_manager, cache)
! Get patch_group boundary group ID
group_ID = data%mesh%get_bc_patch_group_id(trim(patch_group))
! Make sure q is assembled so it doesn't hang when triggered in get_field
call data%sdata%q%assemble()
! Loop over domains/elements/faces for "patch_group"
force_local = ZERO
power_local = ZERO
if (group_ID /= NO_ID) then
do patch_ID = 1,data%mesh%bc_patch_group(group_ID)%npatches()
do face_ID = 1,data%mesh%bc_patch_group(group_ID)%patch(patch_ID)%nfaces()
idomain_l = data%mesh%bc_patch_group(group_ID)%patch(patch_ID)%idomain_l()
ielement_l = data%mesh%bc_patch_group(group_ID)%patch(patch_ID)%ielement_l(face_ID)
iface = data%mesh%bc_patch_group(group_ID)%patch(patch_ID)%iface(face_ID)
! Initialize element location object
elem_info = data%mesh%get_element_info(idomain_l,ielement_l)
call worker%set_element(elem_info)
worker%itime = 1
! Update the element cache and all models so they are available
call cache_handler%update(worker,data%eqnset,data%bc_state_group, components = 'all', &
face = NO_ID, &
differentiate = NO_DIFF, &
lift = .true.)
call worker%set_face(iface)
! Get pressure
if (worker%check_field_exists('Pressure')) then
pressure = worker%get_field('Pressure', 'value', 'boundary')
else
if (patch_ID == 1) call write_line('NOTE: Pressure not found in equation set, setting to zero.',io_proc=GLOBAL_MASTER)
pressure = ZERO*worker%get_field('Density', 'value', 'boundary')
end if
! Get shear stress tensor
if (worker%check_field_exists('Shear-11')) then
tau_11 = worker%get_field('Shear-11', 'value', 'boundary')
tau_22 = worker%get_field('Shear-22', 'value', 'boundary')
tau_33 = worker%get_field('Shear-33', 'value', 'boundary')
tau_12 = worker%get_field('Shear-12', 'value', 'boundary')
tau_13 = worker%get_field('Shear-13', 'value', 'boundary')
tau_23 = worker%get_field('Shear-23', 'value', 'boundary')
else
if (patch_ID == 1) call write_line('NOTE: Shear-## not found in equation set, setting to zero.',io_proc=GLOBAL_MASTER)
tau_11 = ZERO*worker%get_field('Density', 'value', 'boundary')
tau_22 = ZERO*worker%get_field('Density', 'value', 'boundary')
tau_33 = ZERO*worker%get_field('Density', 'value', 'boundary')
tau_12 = ZERO*worker%get_field('Density', 'value', 'boundary')
tau_13 = ZERO*worker%get_field('Density', 'value', 'boundary')
tau_23 = ZERO*worker%get_field('Density', 'value', 'boundary')
end if
! From symmetry
tau_21 = tau_12
tau_31 = tau_13
tau_32 = tau_23
! Add pressure component
tau_11 = tau_11 - pressure
tau_22 = tau_22 - pressure
tau_33 = tau_33 - pressure
! Get normal vectors and reverse, because we want outward-facing vector from
! the geometry.
norm_1 = -worker%normal(1)
norm_2 = -worker%normal(2)
norm_3 = -worker%normal(3)
! Transform normal vector with g*G^{-T} so our normal and Area correspond to quantities on the deformed grid
det_jacobian_grid = worker%get_det_jacobian_grid_face('value', 'face interior')
jacobian_grid = worker%get_inv_jacobian_grid_face('face interior')
grid_velocity = worker%get_grid_velocity_face('face interior')
norm_1_phys = det_jacobian_grid*(jacobian_grid(1,1,:)*norm_1 + jacobian_grid(2,1,:)*norm_2 + jacobian_grid(3,1,:)*norm_3)
norm_2_phys = det_jacobian_grid*(jacobian_grid(1,2,:)*norm_1 + jacobian_grid(2,2,:)*norm_2 + jacobian_grid(3,2,:)*norm_3)
norm_3_phys = det_jacobian_grid*(jacobian_grid(1,3,:)*norm_1 + jacobian_grid(2,3,:)*norm_2 + jacobian_grid(3,3,:)*norm_3)
!norm_1_phys = -worker%unit_normal_ale(1)
!norm_2_phys = -worker%unit_normal_ale(2)
!norm_3_phys = -worker%unit_normal_ale(3)
! But then need to add area scaling
! Compute \vector{n} dot \tensor{tau}
! : These should produce the same result since the tensor is
! : symmetric. Not sure which is more correct.
!
!stress_x = norm_1_phys*tau_11 + norm_2_phys*tau_21 + norm_3_phys*tau_31
!stress_y = norm_1_phys*tau_12 + norm_2_phys*tau_22 + norm_3_phys*tau_32
!stress_z = norm_1_phys*tau_13 + norm_2_phys*tau_23 + norm_3_phys*tau_33
stress_x = tau_11*norm_1_phys + tau_12*norm_2_phys + tau_13*norm_3_phys
stress_y = tau_21*norm_1_phys + tau_22*norm_2_phys + tau_23*norm_3_phys
stress_z = tau_31*norm_1_phys + tau_32*norm_2_phys + tau_33*norm_3_phys
! Integrate
weights = worker%mesh%domain(idomain_l)%faces(ielement_l,iface)%basis_s%weights_face(iface)
if (present(force)) then
force_local(1) = force_local(1) + sum( stress_x(:)%x_ad_ * weights)
force_local(2) = force_local(2) + sum( stress_y(:)%x_ad_ * weights)
force_local(3) = force_local(3) + sum( stress_z(:)%x_ad_ * weights)
end if
if (present(power)) then
power_local = power_local + sum( (stress_x(:)%x_ad_ * grid_velocity(:,1) * weights) + &
(stress_y(:)%x_ad_ * grid_velocity(:,2) * weights) + &
(stress_z(:)%x_ad_ * grid_velocity(:,3) * weights) )
end if
end do !iface
end do !ipatch
end if ! group_ID /= NO_ID
! Reduce result across processors
if (present(force)) call MPI_AllReduce(force_local,force,3,MPI_REAL8,MPI_SUM,ChiDG_COMM,ierr)
if (present(power)) call MPI_AllReduce(power_local,power,1,MPI_REAL8,MPI_SUM,ChiDG_COMM,ierr)
end subroutine report_forces
!******************************************************************************************
end module mod_force
|
echo on
% This script shows how to use the ga using a float representation.
% You should see the demos for
% more information as well. gademo1, gademo2, gademo3
global bounds
% Setting the seed back to the beginning for comparison sake
rand('seed',0)
% Crossover Operators
xFns = 'simpleXover';
xOpts = [.4];
% Mutation Operators
mFns = 'binaryMutation';
mOpts = [0.005];
% Termination Operators
termFns = 'maxGenTerm';
termOps = [200]; % 200 Generations
% Selection Function
selectFn = 'roulette'
selectOps = [];
% Evaluation Function
evalFn = 'gaMichEval';
evalOps = [];
type gaMichEval
% Bounds on the variables
bounds = [-3 12.1; 4.1 5.8];
% GA Options [epsilon float/binar display]
gaOpts=[1e-6 0 1];
% Generate an intialize population of size 20
startPop = initializega(20,bounds,'gaMichEval',[],[1e-6 0]);
% Lets run the GA
% Hit a return to continue
pause
[x endPop bestPop trace]=ga(bounds,evalFn,evalOps,startPop,gaOpts,...
termFns,termOps,selectFn,selectOps,xFns,xOpts,mFns,mOpts);
% x is the best solution found
x
% Hit a return to continue
pause
% endPop is the ending population
endPop
% Hit a return to continue
pause
% trace is a trace of the best value and average value of generations
trace
% Hit a return to continue
pause
% Plot the best over time
clf
plot(trace(:,1),trace(:,2));
% Hit a return to continue
pause
% Add the average to the graph
hold on
plot(trace(:,1),trace(:,3));
% Hit a return to continue
pause
% Lets increase the population size by running the defaults
%
rand('seed',0)
termOps=[100];
[x endPop bestPop trace]=ga(bounds,evalFn,evalOps,[],gaOpts,termFns,termOps,...
selectFn,selectOps);
% x is the best solution found
x
% Hit a return to continue
pause
% endPop is the ending population
endPop
% Hit a return to continue
pause
% trace is a trace of the best value and average value of generations
trace
% Hit a return to continue
pause
% Plot the best over time
clf
plot(trace(:,1),trace(:,2));
% Hit a return to continue
pause
% Add the average to the graph
hold on
plot(trace(:,1),trace(:,3));
echo off |
lemma nullstellensatz_univariate: "(\<forall>x. poly p x = (0::complex) \<longrightarrow> poly q x = 0) \<longleftrightarrow> p dvd (q ^ (degree p)) \<or> (p = 0 \<and> q = 0)" |
Formal statement is: lemma differentiable_at_polynomial_function: fixes f :: "_ \<Rightarrow> 'a::euclidean_space" shows "polynomial_function f \<Longrightarrow> f differentiable (at a within S)" Informal statement is: If $f$ is a polynomial function, then $f$ is differentiable at $a$ within $S$. |
module Main
main : IO ()
main = do putStrLn $ show $ map (\x => x + 1) $ Just 3
putStrLn $ show $ map (*3) $ reverse [1..10] |
%Copyright (c) October,15 2008 by Varsha Hedau, UIUC. All rights reserved.
function [vp p All_lines]=getVP(imdir,imagename,DO_DISPLAY,savedir)
% getVP Get a triplet of orthogonal vanishing points for an image.
%For details see [1] Varsha Hedau, Derek Hoiem, David Forsyth, Recovering the Spatial
% Layout of Cluttered Rooms, in the Twelfth IEEE International Conference
% on Computer Vision, 2009.
% INPUT:
% image -imagename and image directory
%OUTPUT:
% vp - three orthognal vanishing points
% p - vote for each line and each vanishing point
% All_lines - detected line segments in the image [x1 x2 y1 y2 theta r]
vp=[];p=[];lines=[];
img=imread([imdir imagename]);
%img=imresize(img,500/size(img,1));
[h w k]=size(img);
grayIm=rgb2gray(img);
[All_lines] = getLargeConnectedEdges(grayIm,30);
% chucking out the lines near image boundaries imaging artifacts
inds = find(sum(double(All_lines(:,1:2)>10),2) & sum(double(All_lines(:,1:2)<w-10),2) & ...
sum(double(All_lines(:,3:4)>10),2) & sum(double(All_lines(:,3:4)<h-10),2));
All_lines = All_lines(inds,:);
All_lines=[All_lines sqrt(((All_lines(:,1)-All_lines(:,2)).^2+(All_lines(:,3)-All_lines(:,4)).^2))];
maxl=max(All_lines(:,7));
imsize = size(grayIm);
%Computing intersections of all the lines
lines = All_lines;
Xpnts = ComputeIntersectionPoints(lines);
inds = find(~isnan(Xpnts(:,1)) & ~isnan(Xpnts(:,2)) & ...
~isinf(Xpnts(:,1)) & ~isinf(Xpnts(:,2)));
Xpnts = Xpnts(inds,:);
%Computing votes for every point from all lines
VoteArr = ComputeLinePtVote(lines,Xpnts);
Vote=sum(VoteArr,1);
%get the first point & remove the lines of this point
[vv ii]=sort(Vote,'descend');
vp(1:2)=Xpnts(ii(1),1:2);
Vote1 = VoteArr(:,ii(1));
active_lines = find((Vote1*maxl./All_lines(:,7))<0.8);
inactive_lines = find((Vote1*maxl./All_lines(:,7))>=0.8);
Vote1 = [Vote1(active_lines);Vote1(inactive_lines)];
lines = All_lines(active_lines,:);
%work with the remaining lines
Xpnts = ComputeIntersectionPoints(lines);
inds = find(~isnan(Xpnts(:,1)) & ~isnan(Xpnts(:,2)) & ...
~isinf(Xpnts(:,1)) & ~isinf(Xpnts(:,2)));
Xpnts = Xpnts(inds,:);
VoteArr = ComputeLinePtVote([lines;All_lines(inactive_lines,:)],Xpnts);
Vote=sum(VoteArr(1:size(lines,1),:),1);
[vv ii]=sort(Vote,'descend');
Vote = vv(:);
Xpnts=Xpnts(ii,:);
VoteArr = VoteArr(:,ii);
%Remove some of the points
[Xpnts,Vote,VoteArr] = RemoveRedundantPoints2(Xpnts,Vote,VoteArr,w,h);
% Vectorized orthogonality check
[pts2,pts1]=find(~triu(ones(length(Vote))));
npts=length(pts1);
orthochks=[];
for pt=1:100000:npts
tempinds = [pt:min(pt+100000-1,npts)];
temp_orthochks=chckothrogonalityvector(...
ones(length(tempinds),1)*vp(1:2),...
Xpnts(pts1(tempinds),:),...
Xpnts(pts2(tempinds),:),w,h);
orthochks = [orthochks;temp_orthochks(:)];
end
orthos = find(orthochks);
pts1 = pts1(orthos);
pts2 = pts2(orthos);
npts=length(pts1);
% Total vote computation for these points
totVote = zeros(npts,1);
for ln=1:length(Vote1)
Votes = [Vote1(ln)*ones(npts,1) VoteArr(ln,pts1)' VoteArr(ln,pts2)'];
Votes = max(Votes,[],2);
totVote = totVote+Votes;
end
totVote = [pts1(:) pts2(:) totVote(:)];
% lines = All_lines;
if size(totVote,1) > 0
[vv ii]=sort(totVote(:,3),'descend');
vp(3:4) = Xpnts(totVote(ii(1),1),:);
vp(5:6) = Xpnts(totVote(ii(1),2),:);
VoteArrTemp = ComputeLinePtVote(All_lines,[vp(1) vp(2);vp(3) vp(4);vp(5) vp(6)]);
p=[VoteArrTemp.*maxl./repmat(All_lines(:,7),[1 3]) zeros(size(All_lines,1),1)];%4th vp is outliers
ind=find(max(p(:,1:3),[],2)< 0.5);
p(ind,4)=1;
p=p./repmat(sum(p,2),[1 4]);
% [vv linemem] = max(VoteArrTemp,[],2);
[vv linemem] = max(p,[],2);
%Plot three vps
if DO_DISPLAY
figure(1000);plot(vp(1),vp(2),'r*');hold on;
imagesc(img);hold on;
plot(vp(1),vp(2),'r*');
plot(vp(3),vp(4),'g*');
plot(vp(5),vp(6),'b*');
% linemem(vv==0) = 4;
grp1=find(linemem==1);
grp2=find(linemem==2);
grp3=find(linemem==3);
grp4=find(linemem==4);
plot(All_lines(grp1, [1 2])', All_lines(grp1, [3 4])','r');
plot(All_lines(grp2, [1 2])', All_lines(grp2, [3 4])','g');
plot(All_lines(grp3, [1 2])', All_lines(grp3, [3 4])','b');
plot(All_lines(grp4, [1 2])', All_lines(grp4, [3 4])','c');
axis ij;axis equal;
saveas(1000,[savedir imagename(1:end-3) 'fig']);
close all
end
filename=fullfile(savedir,[imagename(1:end-4) '_vp.mat']);
save(filename,'vp','p','VoteArrTemp','All_lines');
end
return
|
Require Import GeoCoq.Tarski_dev.Definitions.
Section Euclid_def.
Context `{Tn:Tarski_neutral_dimensionless}.
(** First some statements needed for equivalence proofs
between different versions of the parallel postulate. *)
Definition decidability_of_parallelism := forall A B C D,
Par A B C D \/ ~ Par A B C D.
Definition decidability_of_not_intersection := forall A B C D,
~ (exists I, Col I A B /\ Col I C D) \/
~ ~ (exists I, Col I A B /\ Col I C D).
Definition decidability_of_intersection := forall A B C D,
(exists I, Col I A B /\ Col I C D) \/
~ (exists I, Col I A B /\ Col I C D).
(*
Definition decidability_of_intersection_in_a_plane :=
forall A B C D,
Coplanar A B C D ->
(exists I, Col I A B /\ Col I C D) \/
~ (exists I, Col I A B /\ Col I C D).
*)
Definition tarski_s_parallel_postulate := forall A B C D T,
Bet A D T -> Bet B D C -> A <> D ->
exists X Y, Bet A B X /\ Bet A C Y /\ Bet X T Y.
(** This is uniqueness of parallel postulate. *)
Definition playfair_s_postulate := forall A1 A2 B1 B2 C1 C2 P,
Par A1 A2 B1 B2 -> Col P B1 B2 ->
Par A1 A2 C1 C2 -> Col P C1 C2 ->
Col C1 B1 B2 /\ Col C2 B1 B2.
(** The sum of the angles of triangles is the flat angle.
Notice that we do not use pi here,
because defining angle measure requires some continuity axioms. *)
Definition triangle_postulate := forall A B C D E F,
TriSumA A B C D E F -> Bet D E F.
(** A figure with three right angles is closed. *)
Definition bachmann_s_lotschnittaxiom := forall P Q R P1 R1,
P <> Q -> Q <> R -> Per P Q R -> Per Q P P1 -> Per Q R R1 ->
Coplanar P Q R P1 -> Coplanar P Q R R1 ->
exists S, Col P P1 S /\ Col R R1 S.
(** Transitivity of parallelism. *)
Definition postulate_of_transitivity_of_parallelism := forall A1 A2 B1 B2 C1 C2,
Par A1 A2 B1 B2 -> Par B1 B2 C1 C2 ->
Par A1 A2 C1 C2.
(** This is the converse of triangle_mid_par. *)
Definition midpoint_converse_postulate := forall A B C P Q,
~ Col A B C ->
Midpoint P B C -> Par A B Q P -> Col A C Q ->
Midpoint Q A C.
(** This is the converse of l12_21_b.
The alternate interior angles between two parallel lines are congruent. *)
Definition alternate_interior_angles_postulate := forall A B C D,
TS A C B D -> Par A B C D ->
CongA B A C D C A.
(** The consecutive interior angles between two parallel lines are supplementary. *)
Definition consecutive_interior_angles_postulate := forall A B C D,
OS B C A D -> Par A B C D -> SuppA A B C B C D.
(** If two lines are parallel, every perpendicular to one of the lines is perpendicular to the other. *)
Definition perpendicular_transversal_postulate := forall A B C D P Q,
Par A B C D -> Perp A B P Q -> Coplanar C D P Q ->
Perp C D P Q.
(** Two lines, each perpendicular to one of a pair of parallel lines, are parallel. *)
Definition postulate_of_parallelism_of_perpendicular_transversals :=
forall A1 A2 B1 B2 C1 C2 D1 D2,
Par A1 A2 B1 B2 -> Perp A1 A2 C1 C2 -> Perp B1 B2 D1 D2 ->
Coplanar A1 A2 C1 D1 -> Coplanar A1 A2 C1 D2 ->
Coplanar A1 A2 C2 D1 -> Coplanar A1 A2 C2 D2 ->
Par C1 C2 D1 D2.
(** If two lines are parallel then they are everywhere equidistant. *)
Definition universal_posidonius_postulate := forall A1 A2 A3 A4 B1 B2 B3 B4,
Par A1 A2 B1 B2 ->
Col A1 A2 A3 -> Col B1 B2 B3 -> Perp A1 A2 A3 B3 ->
Col A1 A2 A4 -> Col B1 B2 B4 -> Perp A1 A2 A4 B4 ->
Cong A3 B3 A4 B4.
(** A variant of Playfair's postulate useful in the proofs. *)
Definition alternative_playfair_s_postulate := forall A1 A2 B1 B2 C1 C2 P,
Perp2 A1 A2 B1 B2 P -> ~ Col A1 A2 P -> Col P B1 B2 -> Coplanar A1 A2 B1 B2 ->
Par A1 A2 C1 C2 -> Col P C1 C2 ->
Col C1 B1 B2 /\ Col C2 B1 B2.
(** According to wikipedia:
"Proclus (410-485) wrote a commentary on The Elements where he comments on attempted proofs to deduce
the fifth postulate from the other four, in particular he notes that Ptolemy had produced a false 'proof'.
Proclus then goes on to give a false proof of his own.
However he did give a postulate which is equivalent to the fifth postulate." *)
Definition proclus_postulate := forall A B C D P Q,
Par A B C D -> Col A B P -> ~ Col A B Q -> Coplanar C D P Q ->
exists Y, Col P Q Y /\ Col C D Y.
Definition alternative_proclus_postulate := forall A B C D P Q,
Perp2 A B C D P -> ~ Col C D P -> Coplanar A B C D ->
Col A B P -> ~ Col A B Q -> Coplanar C D P Q ->
exists Y, Col P Q Y /\ Col C D Y.
(** Non degenerated triangles can be circumscribed. *)
Definition triangle_circumscription_principle := forall A B C,
~ Col A B C ->
exists CC, Cong A CC B CC /\ Cong A CC C CC /\ Coplanar A B C CC.
(** For any given acute angle, any point together with
its orthogonal projection on one side of the angle
form a line which intersects the other side. *)
Definition inverse_projection_postulate := forall A B C P Q,
Acute A B C ->
Out B A P -> P <> Q -> Per B P Q -> Coplanar A B C Q ->
exists Y, Out B C Y /\ Col P Q Y.
(** Given a non-degenerated parallelogram PRQS and a point U strictly between Q and R,
the rays PU and SQ intersect beyond U and Q. *)
Definition euclid_5 := forall P Q R S T U,
BetS P T Q -> BetS R T S -> BetS Q U R -> ~ Col P Q S ->
Cong P T Q T -> Cong R T S T ->
exists I, BetS S Q I /\ BetS P U I.
(** Given a non-degenerated parallelogram PRQS and a point U not on line PR,
the lines PU and SQ intersect. *)
Definition strong_parallel_postulate := forall P Q R S T U,
BetS P T Q -> BetS R T S -> ~ Col P R U ->
Coplanar P Q R U ->
Cong P T Q T -> Cong R T S T ->
exists I, Col S Q I /\ Col P U I.
(** If a straight line falling on two straight lines make
the sum of the interior angles on the same side different from two right angles,
the two straight lines meet if produced indefinitely. *)
Definition alternative_strong_parallel_postulate := forall A B C D P Q R,
OS B C A D -> SumA A B C B C D P Q R -> ~ Bet P Q R ->
exists Y, Col B A Y /\ Col C D Y.
(** If a straight line falling on two straight lines
make the interior angles on the same side less than two right angles,
the two straight lines, if produced indefinitely,
meet on that side on which are the angles less than the two right angles. *)
Definition euclid_s_parallel_postulate := forall A B C D P Q R,
OS B C A D -> SAMS A B C B C D -> SumA A B C B C D P Q R -> ~ Bet P Q R ->
exists Y, Out B A Y /\ Out C D Y.
(** There exists a triangle whose sum of angles is equal to the flat angle. *)
Definition postulate_of_existence_of_a_triangle_whose_angles_sum_to_two_rights :=
exists A B C D E F, ~ Col A B C /\ TriSumA A B C D E F /\ Bet D E F.
(** There exists two lines which are everywhere equidistant. *)
Definition posidonius_postulate :=
exists A1 A2 B1 B2,
~ Col A1 A2 B1 /\ B1 <> B2 /\ Coplanar A1 A2 B1 B2 /\
forall A3 A4 B3 B4,
Col A1 A2 A3 -> Col B1 B2 B3 -> Perp A1 A2 A3 B3 ->
Col A1 A2 A4 -> Col B1 B2 B4 -> Perp A1 A2 A4 B4 ->
Cong A3 B3 A4 B4.
(** There exists two non congruent similar triangles. *)
Definition postulate_of_existence_of_similar_triangles :=
exists A B C D E F,
~ Col A B C /\ ~ Cong A B D E /\
CongA A B C D E F /\ CongA B C A E F D /\ CongA C A B F D E.
(** If A, B and C are points on a circle where the line AB is a diameter of the circle,
then the angle ACB is a right angle. *)
Definition thales_postulate := forall A B C M,
Midpoint M A B -> Cong M A M C -> Per A C B.
(** The circumcenter of a right triangle is the midpoint of the hypotenuse. *)
Definition thales_converse_postulate := forall A B C M,
Midpoint M A B -> Per A C B -> Cong M A M C.
(** There exists a right triangle whose circumcenter is the midpoint of the hypotenuse. *)
Definition existential_thales_postulate :=
exists A B C M, ~ Col A B C /\ Midpoint M A B /\ Cong M A M C /\ Per A C B.
(** The angles of a any Saccheri quadrilateral are right. *)
Definition postulate_of_right_saccheri_quadrilaterals := forall A B C D,
Saccheri A B C D -> Per A B C.
(** There exists a Saccheri quadrilateral whose angles are right. *)
Definition postulate_of_existence_of_a_right_saccheri_quadrilateral :=
exists A B C D, Saccheri A B C D /\ Per A B C.
(** The angles of a any Lambert quadrilateral are right, i.e
if in a quadrilateral three angles are right, so is the fourth. *)
Definition postulate_of_right_lambert_quadrilaterals := forall A B C D,
Lambert A B C D -> Per B C D.
(** There exists a Lambert quadrilateral whose angles are right. *)
Definition postulate_of_existence_of_a_right_lambert_quadrilateral :=
exists A B C D, Lambert A B C D /\ Per B C D.
(** For any angle, that, together with itself, make a right angle,
any point together with its orthogonal projection on one side of the angle
form a line which intersects the other side. *)
Definition weak_inverse_projection_postulate := forall A B C D E F P Q,
Acute A B C -> Per D E F -> SumA A B C A B C D E F ->
Out B A P -> P <> Q -> Per B P Q -> Coplanar A B C Q ->
exists Y, Out B C Y /\ Col P Q Y.
Definition weak_tarski_s_parallel_postulate := forall A B C T,
Per A B C -> InAngle T A B C ->
exists X Y, Out B A X /\ Out B C Y /\ Bet X T Y.
(** The perpendicular bisectors of the legs of a right triangle intersect *)
Definition weak_triangle_circumscription_principle := forall A B C A1 A2 B1 B2,
~ Col A B C -> Per A C B ->
Perp_bisect A1 A2 B C -> Perp_bisect B1 B2 A C ->
Coplanar A B C A1 -> Coplanar A B C A2 ->
Coplanar A B C B1 -> Coplanar A B C B2 ->
exists I, Col A1 A2 I /\ Col B1 B2 I.
Definition legendre_s_parallel_postulate :=
exists A B C,
~ Col A B C /\ Acute A B C /\
forall T,
InAngle T A B C ->
exists X Y, Out B A X /\ Out B C Y /\ Bet X T Y.
(** There exists a point and a line such that
there is only one parallel to this line going through this point. *)
Definition existential_playfair_s_postulate :=
exists A1 A2 P, ~ Col A1 A2 P /\
(forall B1 B2 C1 C2,
Par A1 A2 B1 B2 -> Col P B1 B2 ->
Par A1 A2 C1 C2 -> Col P C1 C2 ->
Col C1 B1 B2 /\ Col C2 B1 B2).
End Euclid_def.
|
#' @import igraph
#' @importFrom stats complete.cases
NULL
|
lemma has_contour_integral_bound_part_circlepath_strong: assumes fi: "(f has_contour_integral i) (part_circlepath z r s t)" and "finite k" and le: "0 \<le> B" "0 < r" "s \<le> t" and B: "\<And>x. x \<in> path_image(part_circlepath z r s t) - k \<Longrightarrow> norm(f x) \<le> B" shows "cmod i \<le> B * r * (t - s)" |
lemma LIMSEQ_inverse_real_of_nat_add_minus: "(\<lambda>n. r + -inverse (real (Suc n))) \<longlonglongrightarrow> r" |
#!/usr/bin/env python
"""Provides interface types for the human operator/sensor.
"""
from __future__ import division
__author__ = "Nick Sweet"
__copyright__ = "Copyright 2015, Cohrint"
__credits__ = ["Nick Sweet", "Nisar Ahmed"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Nick Sweet"
__email__ = "[email protected]"
__status__ = "Development"
import logging
from matplotlib.widgets import RadioButtons, Button
from PyQt4 import QtGui, QtCore
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
class ChatInterface(QtGui.QWidget):
"""docstring for ChatInterface"""
def __init__(self, human_sensor=None, show_corrections=False):
super(ChatInterface, self).__init__()
self.human_sensor = human_sensor
self.show_corrections = show_corrections
self.response_text = ''
self.init_UI()
def init_UI(self):
# Description label
self.lbl = QtGui.QLabel(self)
self.lbl.move(20, 22)
# LineEdit text entry box
self.le = QtGui.QLineEdit(self)
self.le.move(20, 42)
self.le.resize(370,22)
self.le.textChanged[str].connect(self.onChanged)
# Submit button
self.btn = QtGui.QPushButton('Submit', self)
self.btn.move(400, 40)
self.btn.clicked.connect(self.submit)
# Window properties
self.setGeometry(200,700, 500, 80)
self.setWindowTitle('Chatbox')
self.show()
def onChanged(self, text):
self.nl_input = str(text)
def submit(self):
phrase, template = self.human_sensor.chatter.translate_from_natural_input(self.nl_input)
self.response_text = phrase
self.human_sensor.add_new_measurement(phrase)
self.lbl.setText(self.generate_response())
self.lbl.adjustSize()
self.le.clear()
def generate_response(self):
response = "I understood: {}".format(self.response_text)
return response
def keyPressEvent(self, qKeyEvent):
if qKeyEvent.key() == QtCore.Qt.Key_Return:
self.submit()
class CodebookInterface(object):
"""Generate a human interface on a given figure.
.. image:: img/classes_Human_interface.png
Parameters
----------
fig : figure handle
The figure on which to generate the human interface.
human_sensor : human sensor object
The object that handles parsing and use of human sensor updates.
input_type : {'radio_buttons','textbox'}
The type of human interface to generate.
measurement_types : dict
Dictionary of booleans relating to specific measurement types.
"""
input_types = ['radio_buttons', 'textbox']
measurement_types = {'velocity': False,
'area': True,
'object': True
}
def __init__(self, fig, human_sensor=None, input_type='radio_buttons',
measurement_types=None):
if measurement_types is None:
measurement_types = CodebookInterface.measurement_types
# General interface parameters
self.fig = fig
self.input_type = input_type
self.utterance = ''
self.radio = {}
# Use human sensor values if one is provided
if human_sensor:
self.certainties = human_sensor.certainties
self.targets = human_sensor.target_names
self.groundings = {'object': [], 'area': []}
for key, value in human_sensor.groundings['object'].iteritems():
self.groundings['object'].append(key)
for key, value in human_sensor.groundings['area'].iteritems():
self.groundings['area'].append(key)
self.positivities = human_sensor.positivities
self.relations = human_sensor.relations
self.movement_types = human_sensor.movement_types
self.movement_qualities = human_sensor.movement_qualities
logging.debug('using human_sensor values')
else:
self.certainties = ['think', 'know']
self.targets = ['nothing',
'a robber',
'Roy',
'Pris',
'Zhora',
]
self.positivities = ['is', 'is not']
self.relations = {'object': ['behind',
'in front of',
'left of',
'right of',
],
'area': ['inside',
'near',
'outside'
]}
self.groundings = {'area': ['the study',
'the billiard room',
'the hallway',
'the dining room',
'the kitchen',
'the library'
],
'object': ['Deckard',
'the bookshelf',
'the chair',
'the desk',
'the table',
]}
self.movement_types = ['moving', 'stopped']
self.movement_qualities = ['slowly', 'moderately', 'quickly']
self.groundings['object'].sort()
self.groundings['area'].sort()
for i, object_ in enumerate(self.groundings['object']):
if object_ not in ['Zhora', 'Pris', 'Leon', 'Deckard']:
self.groundings['object'][i] = 'the ' + object_.lower()
for i, area_name in enumerate(self.groundings['area']):
self.groundings['area'][i] = 'the ' + area_name.lower()
self.relations['object'].sort()
self.relations['area'].sort()
self.targets[2:] = sorted(self.targets[2:])
# Radio button parameters and default values
self.radio_boxcolor = None
self.set_default_values()
# General button parameters
self.button_color = 'lightgreen'
self.button_color_hover = 'palegreen'
# Create interface between interface and human sensor
self.human_sensor = human_sensor
self.fig.subplots_adjust(bottom=0.32)
self.set_helpers()
if self.input_type == 'radio_buttons':
self.current_dialog = 'position (object)'
self.make_dialog()
elif self.input_type == 'textbox':
self.make_textbox()
def make_dialog(self):
"""Make the whole dialog interface.
"""
# Make white bounding box
self.dialog_ax = self.fig.add_axes([0, 0, 1, 1])
self.dialog_ax.patch.set_visible(False)
self.dialog_ax.axis('off')
min_x, min_y, w, h = (0.04, 0.035, 0.92, 0.19)
self.dialog_ax.add_patch(Rectangle((min_x, min_y), w, h, fc='white',
edgecolor='black', zorder=-100))
# Make top tabs
tab_w, tab_h = 0.18, 0.04
bax = plt.axes([min_x, min_y + h, tab_w, tab_h])
self.position_obj_button = Button(bax, 'Position (Object)',
color=self.button_color,
hovercolor=self.button_color_hover)
bax = plt.axes([min_x + tab_w, min_y + h, tab_w, tab_h])
self.position_area_button = Button(bax, 'Position (Area)',
color=self.button_color,
hovercolor=self.button_color_hover)
bax = plt.axes([min_x + 2 * tab_w, min_y + h, tab_w, tab_h])
self.movement_button = Button(bax, 'Movement',
color=self.button_color,
hovercolor=self.button_color_hover)
self.make_position_dialog('object')
self.position_obj_button.on_clicked(self.set_position_obj_dialog)
self.position_area_button.on_clicked(self.set_position_area_dialog)
self.movement_button.on_clicked(self.set_movement_dialog)
# Make submit button
max_x = min_x + w
max_y = min_y + h
w, h = 0.10, 0.06
min_x = max_x - w - 0.04
min_y = (max_y - min_y) / 2
bax = plt.axes([min_x, min_y, w, h])
self.submit_button = Button(bax, 'Submit', color=self.button_color,
hovercolor=self.button_color_hover)
self.submit_button.ax.patch.set_visible(True)
self.submit_button.on_clicked(self.submit_selection)
# Make the input a complete sentence
min_x = 0.05
min_y = 0.18
self.fig.text(min_x, min_y, 'I')
self.make_base_buttons()
def make_base_buttons(self):
# Certainty radio buttons
min_x = 0.05
min_y = 0.18
min_x += 0.01
w, h = (0.09, 0.18)
rax = plt.axes([min_x, min_y + 0.07 - h, w, h],
axisbg=self.radio_boxcolor)
rax.patch.set_visible(False)
rax.axis('off')
self.radio['certainty'] = RadioButtons(rax, self.certainties)
self.radio['certainty'].on_clicked(self.certain_func)
# Target radio buttons
min_x += w
w, h = (0.09, 0.18)
rax = plt.axes([min_x, min_y + 0.0435 - h, w, h],
axisbg=self.radio_boxcolor)
rax.patch.set_visible(False)
rax.axis('off')
self.radio['target'] = RadioButtons(rax, self.targets)
self.radio['target'].on_clicked(self.target_func)
# Positivity radio buttons
min_x += w + 0.02
w, h = (0.09, 0.18)
rax = plt.axes([min_x, min_y + 0.07 - h, w, h],
axisbg=self.radio_boxcolor)
rax.patch.set_visible(False)
rax.axis('off')
self.radio['positivity'] = RadioButtons(rax, self.positivities)
self.radio['positivity'].on_clicked(self.positivity_func)
def make_position_dialog(self, type_='object'):
"""Genrate the position radio button interface.
"""
min_x = 0.35
min_y = 0.18
# Relationship radio buttons
w, h = (0.09, 0.18)
if type_ == 'object':
a = 0.045
else:
a = 0.06
rax = plt.axes([min_x, min_y + a - h, w, h],
axisbg=self.radio_boxcolor)
rax.patch.set_visible(False)
rax.axis('off')
self.radio['relation'] = RadioButtons(rax, self.relations[type_])
self.radio['relation'].on_clicked(self.relation_func)
# Map object radio buttons
min_x += w + 0.04
w, h = (0.09, 0.18)
rax = plt.axes([min_x, min_y + 0.045 - h, w, h],
axisbg=self.radio_boxcolor)
rax.patch.set_visible(False)
rax.axis('off')
self.radio['grounding'] = RadioButtons(rax, self.groundings[type_])
self.radio['grounding'].on_clicked(self.grounding_func)
def make_movement_dialog(self):
"""Genrate the movement radio button interface.
"""
min_x = 0.35
min_y = 0.18
# Movement type radio buttons
w, h = (0.09, 0.18)
rax = plt.axes([min_x, min_y + 0.07 - h, w, h],
axisbg=self.radio_boxcolor)
rax.patch.set_visible(False)
rax.axis('off')
self.radio['movement_type'] = RadioButtons(rax, self.movement_types)
self.radio['movement_type'].on_clicked(self.movement_type_func)
# Movement quality radio buttons
min_x += w + 0.04
w, h = (0.09, 0.18)
rax = plt.axes([min_x, min_y + 0.06 - h, w, h],
axisbg=self.radio_boxcolor)
rax.patch.set_visible(False)
rax.axis('off')
self.radio['movement_quality'] = RadioButtons(rax,
self.movement_qualities)
self.radio['movement_quality'].on_clicked(self.movement_quality_func)
def remove_dialog(self):
for radio_name, radio in self.radio.iteritems():
# if radio_name not in ['certainty', 'target', 'positivity']:
self.fig.delaxes(radio.ax)
remove_names = ['relation', 'grounding', 'movement_type',
'movement_quality']
for remove_name in remove_names:
if remove_name in self.radio:
del self.radio[remove_name]
logging.debug('deleted {}'.format(remove_name))
def set_default_values(self, type_='object'):
self.certainty = self.certainties[0]
self.target = self.targets[0]
self.positivity = self.positivities[0]
self.relation = self.relations[type_][0]
self.grounding = self.groundings[type_][0]
self.movement_type = self.movement_types[0]
self.movement_quality = self.movement_qualities[0]
def set_helpers(self):
"""Set helper functions for buttons and radios.
"""
def certain_func(label):
self.certainty = label
logging.debug(self.certainty)
self.certain_func = certain_func
def target_func(label):
self.target = label
logging.debug(self.target)
self.target_func = target_func
def positivity_func(label):
self.positivity = label
logging.debug(self.positivity)
self.positivity_func = positivity_func
def relation_func(label):
self.relation = label
logging.debug(self.relation)
self.relation_func = relation_func
def grounding_func(label):
self.grounding = label
logging.debug(self.grounding)
self.grounding_func = grounding_func
def movement_type_func(label):
self.movement_type = label
logging.debug(self.movement_type)
self.movement_type_func = movement_type_func
def movement_quality_func(label):
self.movement_quality = label
logging.debug(self.movement_quality)
self.movement_quality_func = movement_quality_func
def set_position_obj_dialog(event):
if self.current_dialog != 'position (object)':
self.current_dialog = 'position (object)'
self.remove_dialog()
self.make_base_buttons()
self.make_position_dialog('object')
self.set_default_values('object')
self.fig.canvas.draw()
logging.info('Swapped dialog to: {}'
.format(self.current_dialog))
else:
logging.debug('Attempted to swap from {} to position (object).'
.format(self.current_dialog))
self.set_position_obj_dialog = set_position_obj_dialog
def set_position_area_dialog(event):
if self.current_dialog != 'position (area)':
self.current_dialog = 'position (area)'
self.remove_dialog()
self.make_base_buttons()
self.make_position_dialog('area')
self.set_default_values('area')
self.fig.canvas.draw()
logging.info('Swapped dialog to: {}'
.format(self.current_dialog))
else:
logging.debug('Attempted to swap from {} to position (area).'
.format(self.current_dialog))
self.set_position_area_dialog = set_position_area_dialog
def set_movement_dialog(event):
if self.current_dialog != 'movement':
self.current_dialog = 'movement'
self.remove_dialog()
self.make_base_buttons()
self.make_movement_dialog()
self.set_default_values()
self.fig.canvas.draw()
logging.info('Swapped dialog to: {}'
.format(self.current_dialog))
else:
logging.debug('Attempted to swap from {} to movement.'
.format(self.current_dialog))
self.set_movement_dialog = set_movement_dialog
def submit_selection(event):
# Create human sensor utterance
if 'position' in self.current_dialog:
custom_content = ' '.join([self.relation,
self.grounding,
])
elif 'movement' in self.current_dialog:
# <>TODO: stopped slowly?
custom_content = ' '.join([self.movement_type,
self.movement_quality,
])
else:
custom_content = ''
self.utterance = ' '.join(['I',
self.certainty,
self.target,
self.positivity,
custom_content
]) + '.'
logging.info('Human says: {}'.format(self.utterance))
# Send result to human sensor
if self.human_sensor:
self.human_sensor.utterance = self.utterance
self.human_sensor.new_update = True # <>TODO: interrupt
self.submit_selection = submit_selection
def make_textbox(self):
"""Generate the textbox interface.
"""
pass
if __name__ == '__main__':
logging.basicConfig(format='%(levelname)s: %(message)s',
level=logging.INFO,)
# t = np.arange(0.0, 2.0, 0.01)
# s0 = np.sin(2 * np.pi * t)
# fig, ax = plt.subplots()
# l, = ax.plot(t, s0, lw=2, color='red')
# hi = CodebookInterface(fig)
import sys
from cops_and_robots.human_tools.human import Human
human_sensor = Human()
app = QtGui.QApplication(sys.argv)
chat = ChatInterface(human_sensor=human_sensor)
sys.exit(app.exec_())
plt.show()
|
module Toolkit.Data.List.Subset
import Toolkit.Decidable.Informative
%default total
public export
data Subset : (eq : a -> b -> Type)
-> (this : List a)
-> (that : List b)
-> Type
where
Empty : Subset eq Nil Nil
EmptyThis : Subset eq Nil xs
Keep : {eq : a -> b -> Type}
-> (prf : eq x y)
-> (rest : Subset eq xs ys)
-> Subset eq (x::xs) (y::ys)
Skip : (rest : Subset eq xs ys)
-> Subset eq xs (y::ys)
public export
data Error : Type -> Type where
EmptyThat : Error a
Fail : a -> Error a
FailThere : Error a -> Error a
emptyThat : Subset eq (x :: xs) [] -> Void
emptyThat Empty impossible
yesButNo : {eq : a -> b -> Type}
-> {x : a} -> {xs : List a}
-> {y : b} -> {ys : List b}
-> (h : Subset eq xs ys -> Void)
-> (t : Subset eq (x::xs) ys -> Void)
-> Subset eq (x::xs) (y::ys)
-> Void
yesButNo h t (Keep prf rest) = h rest
yesButNo h t (Skip rest) = t rest
justNot : {eq : a -> b -> Type}
-> {x : a} -> {xs : List a}
-> {y : b} -> {ys : List b}
-> (eq x y -> Void)
-> (Subset eq (x :: xs) ys -> Void )
-> Subset eq (x :: xs) (y :: ys)
-> Void
justNot f g (Keep prf rest) = f prf
justNot f g (Skip rest) = g rest
export
subset : {eq : a -> b -> Type}
-> (test : (x : a) -> (y : b) -> DecInfo err (eq x y))
-> (this : List a)
-> (that : List b)
-> DecInfo (Error err) (Subset eq this that)
subset test [] []
= Yes Empty
subset test [] (x :: xs)
= Yes EmptyThis
subset test (x :: xs) []
= No EmptyThat emptyThat
subset test (x :: xs) (y :: ys) with (test x y)
subset test (x :: xs) (y :: ys) | (Yes prfHere) with (subset test xs ys)
subset test (x :: xs) (y :: ys) | (Yes prfHere) | (Yes prfThere)
= Yes (Keep prfHere prfThere)
subset test (x :: xs) (y :: ys) | (Yes prfHere) | (No msgWhyNot prfWhyNot) with (subset test (x::xs) ys)
subset test (x :: xs) (y :: ys) | (Yes prfHere) | (No msgWhyNot prfWhyNot) | (Yes prfThere)
= Yes (Skip prfThere)
subset test (x :: xs) (y :: ys) | (Yes prfHere) | (No msgWhyNotHere prfWhyNotHere) | (No msgWhyNotThere prfWhyNotThere)
= No (FailThere msgWhyNotThere)
(yesButNo prfWhyNotHere prfWhyNotThere)
subset test (x :: xs) (y :: ys) | (No msgWhyNotHere prfWhyNotHere) with (subset test (x::xs) ys)
subset test (x :: xs) (y :: ys) | (No msgWhyNotHere prfWhyNotHere) | (Yes prfThere)
= Yes (Skip prfThere)
subset test (x :: xs) (y :: ys) | (No msgWhyNotHere prfWhyNotHere) | (No msgWhyNotThere prfWhyNotThere)
= No (FailThere msgWhyNotThere)
(justNot prfWhyNotHere prfWhyNotThere)
|
A friend of mine has recently been posting her understanding of Japanese culture and it influences on Christianity and Japanese ministry. She created a series following many of the traditions in the culture that seem entirely foreign and oftentimes create strongholds again developing a faith in Jesus Christ. For example, relationships in Japan between family, coworkers, and others are each treated specifically different. In America, we often act as if we should treat the people around us like close friends. This is not the case in Japan and could be seen as impolite by Japanese people. The author, Katie, is a missionary in Japan with her family. To understand more, read Katie’s blog or peek through these compiled posts, the Japanese Culture Series.
One of this Japan blog's highlights is a deep look at Japanese culture and its effects on travelers, missionaries, and families. This page is basically the table of contents for the many aspects we are finding in Japanese culture and how we can understand them better.
And if you'd like to learn, I suggest RJC's class Japan 101. |
##############################################################
## ##
## This file includes common functions used in this project ##
## ##
##############################################################
## Source R scripts ####
source(here::here("src/00a_install_packages.r"))
##
|
(* Title: HOL/Auth/n_mutualExFsm_lemma_on_inv__2.thy
Author: Yongjian Li and Kaiqiang Duan, State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
Copyright 2016 State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
*)
header{*The n_mutualExFsm Protocol Case Study*}
theory n_mutualExFsm_lemma_on_inv__2 imports n_mutualExFsm_base
begin
section{*All lemmas on causal relation between inv__2 and some rule r*}
lemma n_fsmVsinv__2:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_fsm i)" and
a2: "(\<exists> p__Inv0. p__Inv0\<le>N\<and>f=inv__2 p__Inv0)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_fsm i" apply fastforce done
from a2 obtain p__Inv0 where a2:"p__Inv0\<le>N\<and>f=inv__2 p__Inv0" apply fastforce done
have "(i=p__Inv0)\<or>(i~=p__Inv0)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv0)"
have "((formEval (eqn (IVar (Para (Ident ''n'') p__Inv0)) (Const I)) s))\<or>((formEval (andForm (andForm (eqn (IVar (Ident ''x'')) (Const true)) (eqn (IVar (Para (Ident ''n'') p__Inv0)) (Const T))) (neg (eqn (IVar (Para (Ident ''n'') p__Inv0)) (Const I)))) s))\<or>((formEval (andForm (andForm (neg (eqn (IVar (Ident ''x'')) (Const true))) (eqn (IVar (Para (Ident ''n'') p__Inv0)) (Const T))) (neg (eqn (IVar (Para (Ident ''n'') p__Inv0)) (Const I)))) s))\<or>((formEval (andForm (andForm (eqn (IVar (Para (Ident ''n'') p__Inv0)) (Const C)) (neg (eqn (IVar (Para (Ident ''n'') p__Inv0)) (Const T)))) (neg (eqn (IVar (Para (Ident ''n'') p__Inv0)) (Const I)))) s))\<or>((formEval (andForm (andForm (neg (eqn (IVar (Para (Ident ''n'') p__Inv0)) (Const C))) (neg (eqn (IVar (Para (Ident ''n'') p__Inv0)) (Const T)))) (neg (eqn (IVar (Para (Ident ''n'') p__Inv0)) (Const I)))) s))" by auto
moreover {
assume c1: "((formEval (eqn (IVar (Para (Ident ''n'') p__Inv0)) (Const I)) s))"
have "?P1 s"
proof(cut_tac a1 a2 b1 c1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume c1: "((formEval (andForm (andForm (eqn (IVar (Ident ''x'')) (Const true)) (eqn (IVar (Para (Ident ''n'') p__Inv0)) (Const T))) (neg (eqn (IVar (Para (Ident ''n'') p__Inv0)) (Const I)))) s))"
have "?P1 s"
proof(cut_tac a1 a2 b1 c1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume c1: "((formEval (andForm (andForm (neg (eqn (IVar (Ident ''x'')) (Const true))) (eqn (IVar (Para (Ident ''n'') p__Inv0)) (Const T))) (neg (eqn (IVar (Para (Ident ''n'') p__Inv0)) (Const I)))) s))"
have "?P2 s"
proof(cut_tac a1 a2 b1 c1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume c1: "((formEval (andForm (andForm (eqn (IVar (Para (Ident ''n'') p__Inv0)) (Const C)) (neg (eqn (IVar (Para (Ident ''n'') p__Inv0)) (Const T)))) (neg (eqn (IVar (Para (Ident ''n'') p__Inv0)) (Const I)))) s))"
have "?P1 s"
proof(cut_tac a1 a2 b1 c1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume c1: "((formEval (andForm (andForm (neg (eqn (IVar (Para (Ident ''n'') p__Inv0)) (Const C))) (neg (eqn (IVar (Para (Ident ''n'') p__Inv0)) (Const T)))) (neg (eqn (IVar (Para (Ident ''n'') p__Inv0)) (Const I)))) s))"
have "?P1 s"
proof(cut_tac a1 a2 b1 c1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately have "invHoldForRule s f r (invariants N)" by satx
}
moreover {
assume b1: "(i~=p__Inv0)"
have "((formEval (eqn (IVar (Para (Ident ''n'') i)) (Const I)) s))\<or>((formEval (andForm (andForm (eqn (IVar (Ident ''x'')) (Const true)) (eqn (IVar (Para (Ident ''n'') i)) (Const T))) (neg (eqn (IVar (Para (Ident ''n'') i)) (Const I)))) s))\<or>((formEval (andForm (andForm (neg (eqn (IVar (Ident ''x'')) (Const true))) (eqn (IVar (Para (Ident ''n'') i)) (Const T))) (neg (eqn (IVar (Para (Ident ''n'') i)) (Const I)))) s))\<or>((formEval (andForm (andForm (eqn (IVar (Para (Ident ''n'') i)) (Const C)) (neg (eqn (IVar (Para (Ident ''n'') i)) (Const T)))) (neg (eqn (IVar (Para (Ident ''n'') i)) (Const I)))) s))\<or>((formEval (andForm (andForm (neg (eqn (IVar (Para (Ident ''n'') i)) (Const C))) (neg (eqn (IVar (Para (Ident ''n'') i)) (Const T)))) (neg (eqn (IVar (Para (Ident ''n'') i)) (Const I)))) s))" by auto
moreover {
assume c1: "((formEval (eqn (IVar (Para (Ident ''n'') i)) (Const I)) s))"
have "?P2 s"
proof(cut_tac a1 a2 b1 c1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume c1: "((formEval (andForm (andForm (eqn (IVar (Ident ''x'')) (Const true)) (eqn (IVar (Para (Ident ''n'') i)) (Const T))) (neg (eqn (IVar (Para (Ident ''n'') i)) (Const I)))) s))"
have "?P1 s"
proof(cut_tac a1 a2 b1 c1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume c1: "((formEval (andForm (andForm (neg (eqn (IVar (Ident ''x'')) (Const true))) (eqn (IVar (Para (Ident ''n'') i)) (Const T))) (neg (eqn (IVar (Para (Ident ''n'') i)) (Const I)))) s))"
have "?P2 s"
proof(cut_tac a1 a2 b1 c1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume c1: "((formEval (andForm (andForm (eqn (IVar (Para (Ident ''n'') i)) (Const C)) (neg (eqn (IVar (Para (Ident ''n'') i)) (Const T)))) (neg (eqn (IVar (Para (Ident ''n'') i)) (Const I)))) s))"
have "?P2 s"
proof(cut_tac a1 a2 b1 c1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume c1: "((formEval (andForm (andForm (neg (eqn (IVar (Para (Ident ''n'') i)) (Const C))) (neg (eqn (IVar (Para (Ident ''n'') i)) (Const T)))) (neg (eqn (IVar (Para (Ident ''n'') i)) (Const I)))) s))"
have "?P3 s"
apply (cut_tac a1 a2 b1 c1, simp, rule_tac x="(neg (andForm (andForm (eqn (IVar (Para (Ident ''n'') p__Inv0)) (Const C)) (neg (eqn (IVar (Para (Ident ''n'') i)) (Const T)))) (neg (eqn (IVar (Para (Ident ''n'') i)) (Const I)))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately have "invHoldForRule s f r (invariants N)" by satx
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
end
|
"""Classes concerned with solving for decoders or full weight matrices.
.. inheritance-diagram:: nengo.solvers
:parts: 1
:top-classes: nengo.solvers.Solver, nengo.solvers.SolverParam
"""
import time
import numpy as np
import nengo.utils.least_squares_solvers as lstsq
from nengo.params import BoolParam, FrozenObject, NdarrayParam, NumberParam, Parameter
from nengo.utils.least_squares_solvers import (
format_system,
rmses,
LeastSquaresSolverParam,
)
class Solver(FrozenObject):
"""Decoder or weight solver.
A solver can have the ``weights`` parameter equal to ``True`` or ``False``.
Weight solvers are used to form neuron-to-neuron weight matrices.
They can be compositional or non-compositional. Non-compositional
solvers must operate on the whole neuron-to-neuron weight matrix
(i.e., each target is a separate postsynaptic current, without the bias
term), while compositional solvers operate in the decoded state-space
(i.e., each target is a dimension in state-space). Compositional solvers
then combine the returned ``X`` with the transform and/or encoders to
generate the full weight matrix.
For a solver to be compositional, the following property must be true::
X = solver(A, Y) if and only if L(X) = solver(A, L(Y))
where ``L`` is some arbitrary linear operator (i.e., the transform and/or
encoders for the postsynaptic population). This property can then be
leveraged by the backend for efficiency. See the solver's
``compositional`` class attribute to determine if it is compositional.
Non-weight solvers always operate in the decoded state-space regardless of
whether they are compositional or non-compositional.
"""
compositional = True
weights = BoolParam("weights")
def __init__(self, weights=False):
super().__init__()
self.weights = weights
def __call__(self, A, Y, rng=np.random):
"""Call the solver.
.. note:: ``n_targets`` is ``dimensions`` if ``solver.weights`` is ``False``
and ``post.n_neurons`` if ``solver.weights`` is ``True``.
Parameters
----------
A : (n_eval_points, n_neurons) array_like
Matrix of the neurons' activities at the evaluation points.
Y : (n_eval_points, n_targets) array_like
Matrix of target values at the evaluation points.
rng : `numpy.random.RandomState`, optional
A random number generator to use as required.
Returns
-------
X : (n_neurons, n_targets) array_like
Matrix of weights used to map activities onto targets.
A typical solver will approximate ``dot(A, X) ~= Y`` subject to
some constraints on ``X``.
info : dict
A dictionary of information about the solver. All dictionaries have
an ``'rmses'`` key that contains RMS errors of the solve (one per
target). Other keys are unique to particular solvers.
"""
raise NotImplementedError("Solvers must implement '__call__'")
class SolverParam(Parameter):
"""A parameter in which the value is a `.Solver` instance."""
def coerce(self, instance, solver):
self.check_type(instance, solver, Solver)
return super().coerce(instance, solver)
class Lstsq(Solver):
"""Unregularized least-squares solver.
Parameters
----------
weights : bool, optional
If False, solve for decoders. If True, solve for weights.
rcond : float, optional
Cut-off ratio for small singular values (see `numpy.linalg.lstsq`).
Attributes
----------
rcond : float
Cut-off ratio for small singular values (see `numpy.linalg.lstsq`).
weights : bool
If False, solve for decoders. If True, solve for weights.
"""
rcond = NumberParam("noise", low=0)
def __init__(self, weights=False, rcond=0.01):
super().__init__(weights=weights)
self.rcond = rcond
def __call__(self, A, Y, rng=np.random):
tstart = time.time()
X, residuals2, rank, s = np.linalg.lstsq(A, Y, rcond=self.rcond)
t = time.time() - tstart
return (
X,
{
"rmses": rmses(A, X, Y),
"residuals": np.sqrt(residuals2),
"rank": rank,
"singular_values": s,
"time": t,
},
)
def _add_noise_param_docs(cls):
cls.__doc__ += """
Parameters
----------
weights : bool, optional
If False, solve for decoders. If True, solve for weights.
noise : float, optional
Amount of noise, as a fraction of the neuron activity.
solver : `.LeastSquaresSolver`, optional
Subsolver to use for solving the least squares problem.
Attributes
----------
noise : float
Amount of noise, as a fraction of the neuron activity.
solver : `.LeastSquaresSolver`
Subsolver to use for solving the least squares problem.
weights : bool
If False, solve for decoders. If True, solve for weights.
"""
return cls
@_add_noise_param_docs
class LstsqNoise(Solver):
"""Least-squares solver with additive Gaussian white noise."""
noise = NumberParam("noise", low=0)
solver = LeastSquaresSolverParam("solver")
def __init__(self, weights=False, noise=0.1, solver=lstsq.Cholesky()):
super().__init__(weights=weights)
self.noise = noise
self.solver = solver
def __call__(self, A, Y, rng=np.random):
tstart = time.time()
sigma = self.noise * np.amax(np.abs(A))
A = A + rng.normal(scale=sigma, size=A.shape)
X, info = self.solver(A, Y, 0, rng=rng)
info["time"] = time.time() - tstart
return X, info
@_add_noise_param_docs
class LstsqMultNoise(LstsqNoise):
"""Least-squares solver with multiplicative white noise."""
def __call__(self, A, Y, rng=np.random):
tstart = time.time()
A = A + rng.normal(scale=self.noise, size=A.shape) * A
X, info = self.solver(A, Y, 0, rng=rng)
info["time"] = time.time() - tstart
return X, info
def _add_l2_param_docs(cls):
cls.__doc__ += """
Parameters
----------
weights : bool, optional
If False, solve for decoders. If True, solve for weights.
reg : float, optional
Amount of regularization, as a fraction of the neuron activity.
solver : `.LeastSquaresSolver`, optional
Subsolver to use for solving the least squares problem.
Attributes
----------
reg : float
Amount of regularization, as a fraction of the neuron activity.
solver : `.LeastSquaresSolver`
Subsolver to use for solving the least squares problem.
weights : bool
If False, solve for decoders. If True, solve for weights.
"""
return cls
@_add_l2_param_docs
class LstsqL2(Solver):
"""Least-squares solver with L2 regularization."""
reg = NumberParam("reg", low=0)
solver = LeastSquaresSolverParam("solver")
def __init__(self, weights=False, reg=0.1, solver=lstsq.Cholesky()):
super().__init__(weights=weights)
self.reg = reg
self.solver = solver
def __call__(self, A, Y, rng=np.random):
tstart = time.time()
sigma = self.reg * A.max()
X, info = self.solver(A, Y, sigma, rng=rng)
info["time"] = time.time() - tstart
return X, info
@_add_l2_param_docs
class LstsqL2nz(LstsqL2):
"""Least-squares solver with L2 regularization on non-zero components."""
def __call__(self, A, Y, rng=np.random):
tstart = time.time()
# Compute the equivalent noise standard deviation. This equals the
# base amplitude (noise_amp times the overall max activation) times
# the square-root of the fraction of non-zero components.
sigma = (self.reg * np.amax(np.abs(A))) * np.sqrt((np.abs(A) > 0).mean(axis=0))
# sigma == 0 means the neuron is never active, so won't be used, but
# we have to make sigma != 0 for numeric reasons.
sigma[sigma == 0] = sigma.max()
X, info = self.solver(A, Y, sigma, rng=rng)
info["time"] = time.time() - tstart
return X, info
class LstsqL1(Solver):
"""Least-squares solver with L1 and L2 regularization (elastic net).
This method is well suited for creating sparse decoders or weight matrices.
.. note:: Requires `scikit-learn <https://scikit-learn.org/stable/>`_.
Parameters
----------
weights : bool, optional
If False, solve for decoders. If True, solve for weights.
l1 : float, optional
Amount of L1 regularization.
l2 : float, optional
Amount of L2 regularization.
max_iter : int, optional
Maximum number of iterations for the underlying elastic net.
Attributes
----------
l1 : float
Amount of L1 regularization.
l2 : float
Amount of L2 regularization.
weights : bool
If False, solve for decoders. If True, solve for weights.
max_iter : int
Maximum number of iterations for the underlying elastic net.
"""
compositional = False
l1 = NumberParam("l1", low=0)
l2 = NumberParam("l2", low=0)
def __init__(self, weights=False, l1=1e-4, l2=1e-6, max_iter=1000):
# import to check existence
import sklearn.linear_model # pylint: disable=import-outside-toplevel
assert sklearn.linear_model
super().__init__(weights=weights)
self.l1 = l1
self.l2 = l2
self.max_iter = max_iter
def __call__(self, A, Y, rng=np.random):
import sklearn.linear_model # pylint: disable=import-outside-toplevel
tstart = time.time()
Y = np.array(Y) # copy since 'fit' may modify Y
# TODO: play around with regularization constants (I just guessed).
# Do we need to scale regularization by number of neurons, to get
# same level of sparsity? esp. with weights? Currently, setting
# l1=1e-3 works well with weights when connecting 1D populations
# with 100 neurons each.
a = self.l1 * A.max() # L1 regularization
b = self.l2 * A.max() ** 2 # L2 regularization
alpha = a + b
l1_ratio = a / (a + b)
# --- solve least-squares A * X = Y
model = sklearn.linear_model.ElasticNet(
alpha=alpha, l1_ratio=l1_ratio, fit_intercept=False, max_iter=self.max_iter
)
model.fit(A, Y)
X = model.coef_.T
X.shape = (A.shape[1], Y.shape[1]) if Y.ndim > 1 else (A.shape[1],)
t = time.time() - tstart
infos = {"rmses": rmses(A, X, Y), "time": t}
return X, infos
class LstsqDrop(Solver):
"""Find sparser decoders/weights by dropping small values.
This solver first solves for coefficients (decoders/weights) with
L2 regularization, drops those nearest to zero, and retrains remaining.
Parameters
----------
weights : bool, optional
If False, solve for decoders. If True, solve for weights.
drop : float, optional
Fraction of decoders or weights to set to zero.
solver1 : Solver, optional
Solver for finding the initial decoders.
solver2 : Solver, optional
Used for re-solving for the decoders after dropout.
Attributes
----------
drop : float
Fraction of decoders or weights to set to zero.
solver1 : Solver
Solver for finding the initial decoders.
solver2 : Solver
Used for re-solving for the decoders after dropout.
weights : bool
If False, solve for decoders. If True, solve for weights.
"""
compositional = False
drop = NumberParam("drop", low=0, high=1)
solver1 = SolverParam("solver1")
solver2 = SolverParam("solver2")
def __init__(
self,
weights=False,
drop=0.25,
solver1=LstsqL2(reg=0.001),
solver2=LstsqL2(reg=0.1),
):
super().__init__(weights=weights)
self.drop = drop
self.solver1 = solver1
self.solver2 = solver2
def __call__(self, A, Y, rng=np.random):
tstart = time.time()
Y, m, n, _, matrix_in = format_system(A, Y)
# solve for coefficients using standard solver
X, info0 = self.solver1(A, Y, rng=rng)
# drop weights close to zero, based on `drop` ratio
Xabs = np.sort(np.abs(X.flat))
threshold = Xabs[int(np.round(self.drop * Xabs.size))]
X[np.abs(X) < threshold] = 0
# retrain nonzero weights
info1s = []
for i in range(X.shape[1]):
info1 = None
nonzero = X[:, i] != 0
if nonzero.sum() > 0:
X[nonzero, i], info1 = self.solver2(A[:, nonzero], Y[:, i], rng=rng)
info1s.append(info1)
t = time.time() - tstart
info = {"rmses": rmses(A, X, Y), "info0": info0, "info1s": info1s, "time": t}
return X if matrix_in or X.shape[1] > 1 else X.ravel(), info
def _add_nnls_param_docs(l2=False):
reg_attr = """
reg : float
Amount of regularization, as a fraction of the neuron activity.\
"""
reg_param = """
reg : float, optional
Amount of regularization, as a fraction of the neuron activity.\
"""
docstring = """
.. note:: Requires
`SciPy <https://docs.scipy.org/doc/scipy/reference/>`_.
Parameters
----------
weights : bool, optional
If False, solve for decoders. If True, solve for weights.{reg_param}
Attributes
----------{reg_attr}
weights : bool
If False, solve for decoders. If True, solve for weights.
""".format(
reg_param=reg_param if l2 else "", reg_attr=reg_attr if l2 else ""
)
def _actually_add_nnls_param_docs(cls):
cls.__doc__ += docstring
return cls
return _actually_add_nnls_param_docs
@_add_nnls_param_docs(l2=False)
class Nnls(Solver):
"""Non-negative least-squares solver without regularization.
Similar to `.Lstsq`, except the output values are non-negative.
If solving for non-negative **weights**, it is important that the
intercepts of the post-population are also non-negative, since neurons with
negative intercepts will never be silent, affecting output accuracy.
"""
compositional = False
def __init__(self, weights=False):
# import here too to throw error early
import scipy.optimize # pylint: disable=import-outside-toplevel
assert scipy.optimize
super().__init__(weights=weights)
def __call__(self, A, Y, rng=np.random):
import scipy.optimize # pylint: disable=import-outside-toplevel
tstart = time.time()
Y, m, n, _, matrix_in = format_system(A, Y)
d = Y.shape[1]
X = np.zeros((n, d))
residuals = np.zeros(d)
for i in range(d):
X[:, i], residuals[i] = scipy.optimize.nnls(A, Y[:, i])
t = time.time() - tstart
info = {"rmses": rmses(A, X, Y), "residuals": residuals, "time": t}
return X if matrix_in or X.shape[1] > 1 else X.ravel(), info
@_add_nnls_param_docs(l2=True)
class NnlsL2(Nnls):
"""Non-negative least-squares solver with L2 regularization.
Similar to `.LstsqL2`, except the output values are non-negative.
If solving for non-negative **weights**, it is important that the
intercepts of the post-population are also non-negative, since neurons with
negative intercepts will never be silent, affecting output accuracy.
"""
reg = NumberParam("reg", low=0)
def __init__(self, weights=False, reg=0.1):
super().__init__(weights=weights)
self.reg = reg
def _solve(self, A, Y, sigma=0.0):
import scipy.optimize # pylint: disable=import-outside-toplevel
tstart = time.time()
Y, m, n, _, matrix_in = format_system(A, Y)
d = Y.shape[1]
# form Gram matrix so we can add regularization
GA = np.dot(A.T, A)
np.fill_diagonal(GA, GA.diagonal() + A.shape[0] * sigma ** 2)
GY = np.dot(A.T, np.maximum(Y, 0))
# ^ TODO: why is it better if we clip Y to be positive here?
X = np.zeros((n, d))
residuals = np.zeros(d)
for i in range(d):
X[:, i], residuals[i] = scipy.optimize.nnls(GA, GY[:, i])
t = time.time() - tstart
info = {"rmses": rmses(A, X, Y), "residuals": residuals, "time": t}
return X if matrix_in or X.shape[1] > 1 else X.ravel(), info
def __call__(self, A, Y, rng=np.random):
return self._solve(A, Y, sigma=self.reg * A.max())
@_add_nnls_param_docs(l2=True)
class NnlsL2nz(NnlsL2):
"""Non-negative least-squares with L2 regularization on nonzero components.
Similar to `.LstsqL2nz`, except the output values are non-negative.
If solving for non-negative **weights**, it is important that the
intercepts of the post-population are also non-negative, since neurons with
negative intercepts will never be silent, affecting output accuracy.
"""
def __call__(self, A, Y, rng=np.random):
sigma = (self.reg * A.max()) * np.sqrt((A > 0).mean(axis=0))
sigma[sigma == 0] = 1
return self._solve(A, Y, sigma=sigma)
class NoSolver(Solver):
"""Manually pass in weights, bypassing the decoder solver.
Parameters
----------
values : (n_neurons, size_out) array_like, optional
The array of decoders to use.
``size_out`` is the dimensionality of the decoded signal (determined
by the connection function).
If ``None``, which is the default, the solver will return an
appropriately sized array of zeros.
weights : bool, optional
If False, connection will use factored weights (decoders from this
solver, transform, and encoders).
If True, connection will use a full weight matrix (created by
linearly combining decoder, transform, and encoders).
Attributes
----------
values : (n_neurons, size_out) array_like, optional
The array of decoders to use.
``size_out`` is the dimensionality of the decoded signal (determined
by the connection function).
If ``None``, which is the default, the solver will return an
appropriately sized array of zeros.
weights : bool, optional
If False, connection will use factored weights (decoders from this
solver, transform, and encoders).
If True, connection will use a full weight matrix (created by
linearly combining decoder, transform, and encoders).
"""
compositional = True
values = NdarrayParam("values", optional=True, shape=("*", "*"))
def __init__(self, values=None, weights=False):
super().__init__(weights=weights)
self.values = values
def __call__(self, A, Y, rng=None):
if self.values is None:
n_neurons = np.asarray(A).shape[1]
return np.zeros((n_neurons, np.asarray(Y).shape[1])), {}
return self.values, {}
|
[STATEMENT]
lemma mat_trace_similarity [simp]:
assumes "mat_det A \<noteq> 0"
shows "mat_trace (similarity A M) = mat_trace M"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. mat_trace (similarity A M) = mat_trace M
[PROOF STEP]
proof-
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. mat_trace (similarity A M) = mat_trace M
[PROOF STEP]
obtain a b c d where AA: "A = (a, b, c, d)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>a b c d. A = (a, b, c, d) \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by (cases A) auto
[PROOF STATE]
proof (state)
this:
A = (a, b, c, d)
goal (1 subgoal):
1. mat_trace (similarity A M) = mat_trace M
[PROOF STEP]
obtain mA mB mC mD where MM: "M = (mA, mB, mC, mD)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>mA mB mC mD. M = (mA, mB, mC, mD) \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by (cases M) auto
[PROOF STATE]
proof (state)
this:
M = (mA, mB, mC, mD)
goal (1 subgoal):
1. mat_trace (similarity A M) = mat_trace M
[PROOF STEP]
have "mA * (a * d) / (a * d - b * c) + mD * (a * d) / (a * d - b * c) =
mA + mD + mA * (b * c) / (a * d - b * c) + mD * (b * c) / (a * d - b * c)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. mA * (a * d) / (a * d - b * c) + mD * (a * d) / (a * d - b * c) = mA + mD + mA * (b * c) / (a * d - b * c) + mD * (b * c) / (a * d - b * c)
[PROOF STEP]
using assms AA
[PROOF STATE]
proof (prove)
using this:
mat_det A \<noteq> 0
A = (a, b, c, d)
goal (1 subgoal):
1. mA * (a * d) / (a * d - b * c) + mD * (a * d) / (a * d - b * c) = mA + mD + mA * (b * c) / (a * d - b * c) + mD * (b * c) / (a * d - b * c)
[PROOF STEP]
by (simp add: field_simps)
[PROOF STATE]
proof (state)
this:
mA * (a * d) / (a * d - b * c) + mD * (a * d) / (a * d - b * c) = mA + mD + mA * (b * c) / (a * d - b * c) + mD * (b * c) / (a * d - b * c)
goal (1 subgoal):
1. mat_trace (similarity A M) = mat_trace M
[PROOF STEP]
thus ?thesis
[PROOF STATE]
proof (prove)
using this:
mA * (a * d) / (a * d - b * c) + mD * (a * d) / (a * d - b * c) = mA + mD + mA * (b * c) / (a * d - b * c) + mD * (b * c) / (a * d - b * c)
goal (1 subgoal):
1. mat_trace (similarity A M) = mat_trace M
[PROOF STEP]
using AA MM
[PROOF STATE]
proof (prove)
using this:
mA * (a * d) / (a * d - b * c) + mD * (a * d) / (a * d - b * c) = mA + mD + mA * (b * c) / (a * d - b * c) + mD * (b * c) / (a * d - b * c)
A = (a, b, c, d)
M = (mA, mB, mC, mD)
goal (1 subgoal):
1. mat_trace (similarity A M) = mat_trace M
[PROOF STEP]
by (simp add: field_simps similarity_def)
[PROOF STATE]
proof (state)
this:
mat_trace (similarity A M) = mat_trace M
goal:
No subgoals!
[PROOF STEP]
qed |
```
from random import randint
from timeit import default_timer
size = 100
mat_1 = [[randint(0, size) for _ in range(size)] for _ in range(size)]
mat_2 = [[randint(0, size) for _ in range(size)] for _ in range(size)]
result = [[0 for _ in range(size)] for _ in range(size)]
```
### 1. Serial Implementation
```
starttime = default_timer()
for i in range(size):
for j in range(size):
for k in range(size):
result[i][j] += mat_1[i][k] * mat_2[k][j]
t1 = default_timer() - starttime
print("Serial Time Taken :", t1)
result = [[0 for _ in range(size)] for _ in range(size)]
```
Serial Time Taken : 0.4240078129998892
### 2. Data Parallel Implementation
```
!pip install -q pymp-pypi
import pymp
starttime = default_timer()
res_arr = pymp.shared.array((size, size), dtype='uint8')
with pymp.Parallel(2) as p:
for i in p.range(size):
for j in range(size):
for k in range(size):
res_arr[i][j] += mat_1[i][k] * mat_2[k][j]
t2 = default_timer() - starttime
print("Parallel Time Taken :", t2)
```
Parallel Time Taken : 3.070237331000044
### 3. Numpy
```
import numpy as np
starttime = default_timer()
res = np.dot(mat_1,mat_2)
t3 = default_timer() - starttime
print("Numpy Time Taken :", t3)
```
Numpy Time Taken : 0.004947687999901973
### 4. Scipy
```
from scipy import sparse
m1 = sparse.csr_matrix(mat_1)
m2 = sparse.csr_matrix(mat_2)
starttime = default_timer()
res = m1.multiply(m2)
t4 = default_timer() - starttime
print("Scipy Time Taken :", t4)
```
Scipy Time Taken : 0.0029525400000238733
### 5. Pandas
```
import numpy as np
import pandas as pd
df_1 = pd.DataFrame(mat_1)
df_2 = pd.DataFrame(mat_2)
starttime = default_timer()
df_1.dot(df_2)
t5 = default_timer() - starttime
print("Pandas Time Taken :", t5)
```
Pandas Time Taken : 0.0017128819999925327
### 6. Sympy
```
from sympy import Matrix
m1 = Matrix(mat_1)
m2 = Matrix(mat_2)
starttime = default_timer()
r = m1*m2
t6 = default_timer() - starttime
print("Sympy Time Taken :", t6)
```
Sympy Time Taken : 9.038939131000006
### 7. Numba
```
import numpy as np
import timeit
from numba import jit, float64, prange
@jit('float64[:,:](float64[:,:],float64[:,:])', parallel=True, nopython=True)
def matmul(A, B):
C = np.zeros((A.shape[0], B.shape[1]))
for i in prange(A.shape[0]):
for j in prange(B.shape[1]):
for k in range(A.shape[0]):
C[i,j] = C[i,j] + A[i,k]*B[k,j]
return C
A = np.random.rand(size, size)
B = np.random.rand(size, size)
start = default_timer()
matmul(A, B)
t7 = default_timer() - start
print("Numba Time Taken :", t7)
```
/usr/local/lib/python3.7/dist-packages/numba/np/ufunc/parallel.py:363: NumbaWarning: The TBB threading layer requires TBB version 2019.5 or later i.e., TBB_INTERFACE_VERSION >= 11005. Found TBB_INTERFACE_VERSION = 9107. The TBB threading layer is disabled.
warnings.warn(problem)
Numba Time Taken : 0.0013237749999461812
### 8. Linalg
```
from numpy.linalg import multi_dot
start = default_timer()
_ = multi_dot([mat_1, mat_2])
t8 = default_timer() - start
print("linalg Time Taken :", t8)
```
linalg Time Taken : 0.004973874999905092
### 9. Pymatrix
```
!wget https://raw.githubusercontent.com/dthul/pymatrix/master/matrix.py
import sys
sys.path.append('/content/')
from matrix import Matrix
m1 = Matrix(mat_1)
m2 = Matrix(mat_2)
start = default_timer()
res = m1 * m2
t9 = default_timer() - start
print("pymatrix Time Taken :", t9)
```
--2021-04-27 14:43:17-- https://raw.githubusercontent.com/dthul/pymatrix/master/matrix.py
Resolving raw.githubusercontent.com (raw.githubusercontent.com)... 185.199.111.133, 185.199.109.133, 185.199.108.133, ...
Connecting to raw.githubusercontent.com (raw.githubusercontent.com)|185.199.111.133|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 16087 (16K) [text/plain]
Saving to: ‘matrix.py’
matrix.py 100%[===================>] 15.71K --.-KB/s in 0s
2021-04-27 14:43:17 (111 MB/s) - ‘matrix.py’ saved [16087/16087]
pymatrix Time Taken : 0.34896002799996495
### 10. Tensorflow
```
from tensorflow.linalg import matmul
start = default_timer()
_ = matmul(mat_1, mat_2)
t10 = default_timer() - start
print("tensorflow Time Taken :", t10)
```
tensorflow Time Taken : 5.036223599000095
### 12. Pymc
```
!pip install -q pymc3
import pymc3 as pm
start = default_timer()
res = pm.math.dot(mat_1, mat_2)
t12 = default_timer() - start
print("pymc Time Taken :", t12)
```
pymc Time Taken : 0.006076633000020593
```
import numpy as np
from functools import lru_cache, wraps
import numpy as np
def np_cache(*args, **kwargs):
"""LRU cache implementation for functions whose FIRST parameter is a numpy array
>>> array = np.array([[1, 2, 3], [4, 5, 6]])
>>> @np_cache(maxsize=256)
... def multiply(array, factor):
... print("Calculating...")
... return factor*array
>>> multiply(array, 2)
Calculating...
array([[ 2, 4, 6],
[ 8, 10, 12]])
>>> multiply(array, 2)
array([[ 2, 4, 6],
[ 8, 10, 12]])
>>> multiply.cache_info()
CacheInfo(hits=1, misses=1, maxsize=256, currsize=1)
"""
def decorator(function):
@wraps(function)
def wrapper(np_array, *args, **kwargs):
hashable_array = array_to_tuple(np_array)
return cached_wrapper(hashable_array, *args, **kwargs)
@lru_cache(*args, **kwargs)
def cached_wrapper(hashable_array, *args, **kwargs):
array = np.array(hashable_array)
return function(array, *args, **kwargs)
def array_to_tuple(np_array):
"""Iterates recursivelly."""
try:
return tuple(array_to_tuple(_) for _ in np_array)
except TypeError:
return np_array
# copy lru_cache attributes over too
wrapper.cache_info = cached_wrapper.cache_info
wrapper.cache_clear = cached_wrapper.cache_clear
return wrapper
return decorator
@np_cache(maxsize=256)
def sq_cache(array):
return array*array
starttime = default_timer()
l1 = np.array(mat_1)
sq_cache(l1)
t13 = default_timer() - starttime
print("Custom Time Taken :", t13)
```
Custom Time Taken : 0.009742387999722268
```
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
# methods = ['Serial','Parallel','Numpy','Scipy','Pandas','Sympy','Numba','Linalg','Pymatrix','TF','Pymc','Custom']
# times = [t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t12, t13]
methods = ['Serial','Parallel','Sympy','Pymatrix','TF','Pymc','Custom']
times = [t1, t2, t6, t9, t10, t12, t13]
ax.bar(methods,times)
ax.set_ylabel('Time in Seconds')
ax.set_title(f'Speedup Matrix Multiplication ({size}*{size})')
fig.set_size_inches(10, 3)
plt.yscale('log')
plt.show()
```
|
Cystinosis: lysosomal storage disorders of unknown molecular defect, characterized by widespread deposition of cystine crystals in reticuloendothelial cells.
Cystinosis: A metabolic disease characterized by the defective transport of CYSTINE across the lysosomal membrane due to mutation of a membrane protein cystinosin. This results in cystine accumulation and crystallization in the cells causing widespread tissue damage. In the KIDNEY, nephropathic cystinosis is a common cause of RENAL FANCONI SYNDROME.
Cystinosis is listed as a "rare disease" by the Office of Rare Diseases (ORD) of the National Institutes of Health (NIH). This means that Cystinosis, or a subtype of Cystinosis, affects less than 200,000 people in the US population.
Ophanet, a consortium of European partners, currently defines a condition rare when if affects 1 person per 2,000. They list Cystinosis as a "rare disease".
Cystinosis (medical condition): See Cystinosis (disease information).
The following list attempts to classify Cystinosis into categories where each line is subset of the next. |
The Northeast Georgia Regional Commission Geographic Information Systems (GIS) Department supports the Planning and Government Services Division as well as the public with mapping and data products. GIS is a rapidly evolving technology involving computers, mapping information, and people to help analyze and plan for the future of our twelve-county region.
Please fill out and submit the Request for GIS Services form here. |
# Building and using a 0D emulator
This notebook demonstrates how to use the `cardioemulator` library to:
- build a zero-dimensional emulator ($\mathcal{M}_{\text{0D}}$) of a three-dimensional left ventricular model ($\mathcal{M}_{\text{3D}}$);
- perform numerical simulations through the zero-dimensional emulator;
- use the zero-dimensional emulator to detect a limit cycle.
The file used in this demo are contained in the folder `example` of the [repository](https://github.com/FrancescoRegazzoni/cardioemulator).
## Building the 0D emulator
Let us first import the needed dependencies
```python
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import sys
# you don't need this line if you have installed cardioemulator correctly!
sys.path.insert(0, '..')
import cardioemulator
```
The file `example/data/sample_PV_loops.csv` contains five PV loops obtained through the 3D left ventricle (LV) model, coupled with a circulation model ($\mathcal{M}_{\text{3D}}$-$\mathcal{C}$).
The file `example/data/simulated_EDPVR.csv` contains pressure-volume pairs in relaxed conditions, obtained with the same 3D left ventricle model ($\mathcal{M}_{\text{3D}}$).
With the following commands, we build two emulators. The first one is based on the *fitted EDPVR approach* (that is, the EDPV relationship is obtained by fitting the Klotz's curve on the sample PV loops).
```python
emulator_fitted_EDPV = cardioemulator.build_emulator(
file_PV_loops = 'data/sample_PV_loops.csv',
period = 0.8, # [s]
num_cycles = 3)
```
Building 0D emulator:
file PV loops: data/sample_PV_loops.csv
EDPV approach: fitted
period: 0.800 s
num cycles: 3
EDPV V_0: -1.19 mL
EDPV V_30: 167.47 mL
ESPV V_0: 11.45 mL
ESPV elastance: 1.645 mmHg/mL
EDPV elastance: 0.132 mmHg/mL
The second one is based on the *simulated EDPVR approach* (that is the EDPV relationship is obtained through the 3D model).
```python
emulator_simulated_EDPV = cardioemulator.build_emulator(
file_PV_loops = 'data/sample_PV_loops.csv',
file_EDPV = 'data/simulated_EDPVR.csv',
period = 0.8, # [s]
num_cycles = 3)
```
Building 0D emulator:
file PV loops: data/sample_PV_loops.csv
EDPV approach: simulated
file EDPV: data/simulated_EDPVR.csv
period: 0.800 s
num cycles: 3
ESPV V_0: 11.45 mL
ESPV elastance: 1.645 mmHg/mL
EDPV elastance: 0.132 mmHg/mL
<div class="alert alert-info">
**Note**
These commands assume that pressures and volumes are contained in columns named "pressure" and "volume", respectively.
If this is not the case, you can specify the column names through the arguments `label_pressure_PV_loops` and `label_volume_PV_loops` (for `file_PV_loops`), or `label_pressure_EDPV` and `label_volume_PV_EDPV` (for `file_EDPV`).
Similarly, you can change the dafault measure units (mmHg and mL, respectively), through the arguments `pressure_unit_PV_loops`, `volume_unit_PV_loops`, `pressure_unit_EDPV` and `volume_unit_EDPV`.
</div>
Please notice that the same results can be obtained by running the bash command:
```bash
$ cardioemulator_build data/sample_PV_loops.csv 0.8 --num-cycles 3 \
-o emulator_fitted_EDPV.json
$ cardioemulator_build data/sample_PV_loops.csv 0.8 --num-cycles 3 \
--EDPV data/simulated_EDPVR.csv \
-o emulator_simulated_EDPV.json
```
and then loading the emulators from file within a Python script:
```python
emulator_fitted_EDPV = cardioemulator.Emulator('emulator_fitted_EDPV.json')
emulator_simulated_EDPV = cardioemulator.Emulator('emulator_simulated_EDPV.json')
```
<div class="alert alert-info">
**Hint**
The option ``-d`` of the app ``cardioemulator_build`` provides on-the-fly plots of the emulator built.
Similarly, the argument ``-f`` allows to export the figure to file.
</div>
## Visualizing the 0D emulator
Let us plot the functions defining the two emulators $\mathcal{M}_{\text{0D}}$, together with the PV loops we used for their construction.
```python
data_loop = pd.read_csv('data/sample_PV_loops.csv')
VV = np.linspace(20, 200, 1000)
times = np.linspace(0, 0.8, 1000)
_, axs = plt.subplots(1,2,figsize=(8,4))
axs[0].plot(data_loop.volume, data_loop.pressure, 'k', linewidth = 0.8)
axs[0].plot(VV, emulator_fitted_EDPV.EDPV(VV), label = 'EDPVR (fitted)')
axs[0].plot(VV, emulator_simulated_EDPV.EDPV(VV), label = 'EDPVR (simulated)')
axs[0].plot(VV, emulator_fitted_EDPV.ESPV(VV), label = 'ESPVR')
axs[0].set_xlabel('V [mL]')
axs[0].set_ylabel('p [mmHg]')
axs[0].set_xlim([50, 170])
axs[0].set_ylim([-10, 150])
axs[0].legend(bbox_to_anchor=(1, 1), loc=1, frameon=False, fontsize=7, labelspacing = 0.2)
axs[1].plot(times, emulator_fitted_EDPV.activation(times), label = 'fitted EDPVR')
axs[1].plot(times, emulator_simulated_EDPV.activation(times), label = 'simulated EDPVR')
axs[1].set_xlabel('time [s]')
axs[1].set_ylabel('activation function [-]')
axs[1].legend(bbox_to_anchor=(1, 1), loc=1, frameon=False, fontsize=7, labelspacing = 0.2)
```
## Using the 0D emulator
The 0D emulator consists of a time-dependent pressure-volume relationship:
\begin{equation}
p = \mathcal{P}(V, t)
\end{equation}
The pressure-volume function $\mathcal{P}$ is accessible through the member [PV](../docs/_autosummary/_autosummary/cardioemulator.Emulator.html#cardioemulator.Emulator.PV) of the class [Emulator](../_autosummary/_autosummary/cardioemulator.Emulator.html). The following script computes e.g. $\mathcal{P}(100 \text{ mL}, 0.4 \text{ s})$
```python
volume = 100 # [ml]
time = 0.4 # [s]
pressure = emulator_fitted_EDPV.PV(volume, time)
print('pressure = %f mmHg' % pressure)
```
pressure = 91.256128 mmHg
A useful feature of the 0D emulator is the possibilty of being coupled with a blood circulation model ($\mathcal{C}$).
For illutrative purposes, this example contains a Python class implementing the 0D closed-loop circulation model introduced in [this paper](https://arxiv.org/abs/2011.15040). To couple the 0D emulator with this circulation model, all we need to do is simply replacing the LV pressure-volume relationship (therein represented by an elastance-based model) by the function $\mathcal{P}$ and then running a simulation:
```python
from circulation_closed_loop import circulation_closed_loop
# initialize the circulation model
circulation = circulation_closed_loop(options = 'data/params_bsln.json')
# replace the original LV PV relationship with the one defined by the emulator
circulation.p_LV_func = emulator_fitted_EDPV.PV
# run a 5 heartbeats simulation
history = circulation.solve(num_cycles = 5, initial_state = 'data/state_bsln.json')
```
Circulation model - running simulation...
Circulation model - elapsed time 2.0310 s
Let us now visually compare the results obtanined with the $\mathcal{M}_{\text{0D}}$-$\mathcal{C}$ model and with the $\mathcal{M}_{\text{3D}}$-$\mathcal{C}$ model:
```python
_, axs = plt.subplots(1,1,figsize=(4,4))
axs.plot(data_loop.volume, data_loop.pressure, label = '3D')
axs.plot(history.VLV, history.pLV, label = '0D')
axs.legend(bbox_to_anchor = (1, 1), loc = 1, frameon = False)
axs.set_xlabel('V [mL]')
axs.set_ylabel('p [mmHg]')
```
## Detecting the limit cycle by means of the 0D emulator
Let us now run 100 heartbeats by means of the 0D emulator coupled with the circulation model ($\mathcal{M}_{\text{0D}}$-$\mathcal{C}$).
Then, we store the final state of this simulation in the file `limit_cycle.json`.
Finally, to check that the simulation actually reached a limit cycle, we perform a second and shorter simulation starting from the stored state.
```python
history_transient = circulation.solve(num_cycles = 100, initial_state = 'data/state_bsln.json')
circulation.save_state('limit_cycle.json')
history_limit_cycle = circulation.solve(num_cycles = 3, initial_state = 'limit_cycle.json')
```
Circulation model - running simulation...
Circulation model - elapsed time 39.8512 s
Circulation model - running simulation...
Circulation model - elapsed time 1.2790 s
Let us plot the full transient and the limit cycle:
```python
_, axs = plt.subplots(1,1,figsize=(4,4))
axs.plot(history_transient.VLV, history_transient.pLV, label = 'transient')
axs.plot(history_limit_cycle.VLV, history_limit_cycle.pLV, label = 'limit cycle')
axs.legend(bbox_to_anchor = (1, 1), loc = 1, frameon = False)
axs.set_xlabel('V [mL]')
axs.set_ylabel('p [mmHg]')
```
In conclusion, the final state of the simulation obtained through the $\mathcal{M}_{\text{0D}}$-$\mathcal{C}$ model (stored in the file `limit_cycle.json`) can be used to provide an initial state for the $\mathcal{M}_{\text{3D}}$-$\mathcal{C}$ model. Specifically, the state variables of the circulation model allow to initialize the circulation model itself, while the pressure or volume of the LV allow to initialize the $\mathcal{M}_{\text{3D}}$ model. The results are reported in \[1\].
### References
\[1\] F. Regazzoni, A. Quarteroni "[Accelerating the convergence to a limit cycle in 3D cardiac electromechanical simulations through a data-driven 0D emulator](https://doi.org/10.1016/j.compbiomed.2021.104641)", *Computers in Biology and Medicine* 135 (2021) 104641
|
# Neural Ordinary Differential Equations
A discussion of an interesting new paradigm in Neural Networks, adapted from "Neural Ordinary Differential Equations" by Chen et al. 2019 (https://arxiv.org/pdf/1806.07366.pdf)
## 1 - Background
### 1.1 - Neural Network Setup
Neural networks are a set of algorithms that are loosely based on the functonality of the human brain. These algorithms are designed to learn patterns in numerical data, by propagating input data through a series of hidden layers, with each layer being composed of several artificial 'neurons' or nodes. The final result is the output layer, a numerical representation of our desired prediction.
Consider an input dataset composed of a set of $m$ features: $(\vec{x_1}, \vec{x_2}, ..., \vec{x_m})$ . <br>
We add a bias component to this dataset, $\vec{x_0} = 1$, a vector of ones, yielding our input layer, $\vec{x} := \vec{x_0}, ... \vec{x_m}$. <br>
We need to now consider how to pass this input to our first hidden layer. Suppose this the first layer is composed of $n$ nodes, we define a set of weights $w=\vec{w_1},\vec{w_2} ..., \vec{w_n}$ where each $\vec{w_i}$ is a vector of length $m$ corresponding to node $i$. We then define $\vec{z_i} := \vec{x}\cdot \vec{w_{i}}$. <br>
We are passing $\vec{z_i}$ to each node in the first hidden layer, but but what occurs at the node, and what is output at the next layer? The answer is the activation function. For the purpose of this discussion, we will focus solely on the sigmoid activation function $\sigma(z) = \dfrac{1}{1+e^{-z}}$, though it should be noted that many others exist. At each node, we will compute $\sigma(z_i)$ and that's it! We have our single layer representation. <br>
To extrapolate to a full neural network, we simply repeat this process, using the output of the activation functions of the previous layer as the new input layer, taking the dot product with a new set of weights and computing the activation of these new inputs. The final layer will be our output layer, $\vec{y}$, and will have a number of neurons corresponding to our desired predictable (if we are predicting hand-written digits as in the exemplary MNIST dataset, our $\vec{y}$ will be a vector of length 10). <br>
Our problem is thus clear, we must compute: $W$, the weights, $L$, the number of layers and $n_{}$ the number of neurons in each layer. And we must do so efficiently, as there will often be many many weights to compute and many layers in our network.
### 1.2 - Computing parameters and hyperparameters
We will seperate the values we wish to compute into two categories: 'parameters' (the weights) and the 'hyperparameters' (the number of layers and the number of neurons in each layer). In general, determining the hyperparameters is much more of an art than a science, and requires significant experimentation and intuition. On the other hand, determining the parameters can be done by following one of several well-defined protocols. <br>
For our purposes, we will simply define a cost-function, which will turn out to be an aggregate of the cost function used in typical logistic regression (__NOTE: if you don't know logistic regression, stop here and go learn that first__). For now, I will simply state the cost function of a neural network implementing the sigmoid activation function: <br>
\begin{align}
J(\Theta) = - \dfrac{1}{m}\sum_{i=1}^m \sum_{k=1}^K\left[
y_{k}^{(i)} \log((h_{\Theta}(x^{(i)}))_k) + (1-y)\log(1-(h_{\Theta}(x^{(i)}))_k)
\right]
+ \dfrac{\lambda}{2m}\sum_{l=1}^{L-1}\sum_{i=1}^{s_l}\sum_{j=1}^{s_{l+1}}\left(
\Theta_{j,i}^{l}
\right)^{2}
\end{align}
where $L$ is the total number of layers in the network, $s_l$ is the number of nodes in layer $l$, $K$ is the number of output classes/units and $\lambda$ is a regularization parameter, to prevent overfitting (another hyperparameter to compute).
With this cost function, we will choose starting values for the parameters, feed these forward through the network, and calculate the loss, known as the feedforward step. We then take this loss to update the weight and bias values moving backward through the network, known as the backpropagation step.
## 2 - Neural Networks in Python
#### 2.1 - Definitions
As per [this](https://towardsdatascience.com/how-to-build-your-own-neural-network-from-scratch-in-python-68998a08e4f6) tutorial, we will define the input layer $\textbf{x}$, an arbitrary amount of hidden layers, an output layer $\hat{\textbf{y}}$, the set of weights $\textbf{W}$ and biases $\textbf{b}$ and the activation function $\sigma$.
We will now create this class in Python, for a neural network with one hidden layer, having 4 nodes:
```python
import numpy as np
class NeuralNetwork:
def __init__(self, x, y):
self.input = x
self.weights1 = np.random.rand(self.input.shape[1],4)
self.weights2 = np.random.rand(4,1)
self.y = y
self.output = np.zeros(y.shape)
```
To this class definition, we will add a feedforward step, where we assume the bias values $\textbf{b}$ are $0$ for simplicity.
### 2.2 - Training
To train this network, we need to find appropriate values for $\textbf{W}$ and $\textbf{b}$. We do so by feeding forward through the network with an initial set of parameters, and subsequently updating these parameters using backpropagation with an appropriate cost function. Adding the feedforward to our class definition is trivial:
```python
class NeuralNetwork:
def __init__(self, x, y):
self.input = x
self.weights1 = np.random.rand(self.input.shape[1],4)
self.weights2 = np.random.rand(4,1)
self.y = y
self.output = np.zeros(self.y.shape)
def feedforward(self):
self.layer1 = sigmoid(np.dot(self.input, self.weights1))
self.output = sigmoid(np.dot(self.layer1, self.weights2))
```
Now, to add the backpropagation step, we must determine the gradient of our cost function.
### 2. - MNIST Dataset
We will now work towards building a standard neural network from scratch, in Python, using the MNIST handwritten number dataset (a toy dataset commonly used in teaching image recognition).
|
-- Prototype of RealizedFunction as described in
-- https://blog.singularitynet.io/ai-dsl-toward-a-general-purpose-description-language-for-ai-agents-21459f691b9e
--
-- This is similar to SimpleDataRealizedFunction but the attributes
-- are wrapped in RealizedAttributes.
module RealizedFunction
import public RealizedAttributes
public export
data RealizedFunction : (t : Type) -> (attrs : RealizedAttributes) -> Type where
MkRealizedFunction : (f : t) -> (attrs : RealizedAttributes)
-> RealizedFunction t attrs
-- Perform the composition between 2 realized functions. The
-- resulting realized function is formed as follows:
--
-- 1. Composed lifted functions
-- 2. Use add_costs_min_quality for composing attributes
public export
compose : {a : Type} -> {b : Type} -> {c : Type} ->
(RealizedFunction (b -> c) g_attrs) ->
(RealizedFunction (a -> b) f_attrs) ->
(RealizedFunction (a -> c) (add_costs_min_quality f_attrs g_attrs))
compose (MkRealizedFunction f f_attrs) (MkRealizedFunction g g_attrs) =
MkRealizedFunction (f . g) (add_costs_min_quality f_attrs g_attrs)
-- Perform function application over realized functions. Maybe we'd
-- want to used some funded data, as defined in FndType.
public export
apply : (RealizedFunction (a -> b) attrs) -> a -> b
apply (MkRealizedFunction f attrs) = f
|
function f = dejongsfcn(x)
if strcmpi(x,'init')
f.Vectorized = 'on' ;
f.PopInitRange = [-5; 5] ;
f.KnownMin = [0 0] ; % For plotting only
else
f = sum(x.*x,2) ;
end |
program test_strarr
use testing, only:assert, initialize_tests, report_tests
use functional, only:arrstr, strarr, empty
implicit none
logical, dimension(:), allocatable :: tests
logical :: test_failed
integer :: n, ntests
n = 1
ntests = 3
call initialize_tests(tests, ntests)
tests(n) = assert(all(strarr('hello') == ['h', 'e', 'l', 'l', 'o']), &
'strarr converts to array')
n = n + 1
tests(n) = assert(all(strarr('') == empty(' ')), &
'strarr converts empty string to []')
n = n + 1
tests(n) = assert(arrstr(strarr('hello')) == 'hello', &
'arrstr(strarr(string)) == string')
n = n + 1
test_failed = .false.
call report_tests(tests, test_failed)
if(test_failed)stop 1
end program test_strarr
|
State Before: F : Type ?u.128362
α : Type u_1
β : Type ?u.128368
inst✝¹ : LinearOrderedRing α
inst✝ : FloorRing α
z : ℤ
a : α
⊢ ⌊1⌋ = 1 State After: no goals Tactic: rw [← cast_one, floor_intCast] |
################################################################################
# Description: Discrete controller for two assets (risk-free and risky)
# allocation task. The trader can go short (-1), stay neutral (0)
# or go long (+1) on the risky asset.
# Author: Pierpaolo Necchi
# Email: [email protected]
# Date: gio 26 mag 2016 10:53:26 CEST
################################################################################
import numpy as np
class DiscreteController(object):
""" Discrete controller for two assets (risk-free and risky) allocation
task. The trader can go short (-1), stay neutral (0) or go long (+1)
on the risky asset. The weight invested on the risk-free asset is given
by 1 - a_risky, so that the trader invests all of his wealth at each
time step. """
def __init__(self, nIn):
""" Initialize discrete controller.
Params:
nIn (int): input size
"""
# Initialize sizes
self.nIn = nIn
self.nParameters = nIn + 1
# Initialize controller parameters
self.parameters = 0.01 * (np.random.rand(self.nParameters) - 0.5)
def setParameters(self, parameters):
""" Set the controller parameters.
Args:
parameters (np.array): new controller parameters
"""
self.parameters = parameters
def activate(self, input):
""" Activate controller with a certain input.
Args:
input (np.array): controller input
Returns:
output (np.array): controller output
"""
# Add bias to the input
input_bias = np.append(input, 1.0)
# Evaluate risky-asset weight
activation = np.dot(input_bias, self.parameters)
aRisky = np.sign(activation)
aFree = 1 - aRisky
return np.array([aFree, aRisky])
|
(*
Copyright (C) 2017 M.A.L. Marques
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
*)
(* type: gga_exc *)
$define gga_c_pbe_params
$include "gga_c_pbe.mpl"
pbeloc_b0 := 0.0375:
pbeloc_a := 0.08:
(* we redefine beta here *)
mbeta := (rs, t) -> pbeloc_b0 + pbeloc_a*t^2*(1 - exp(-rs^2)):
|
/*
Copyright (C) 2018 Quaternion Risk Management Ltd
All rights reserved.
This file is part of ORE, a free-software/open-source library
for transparent pricing and risk analysis - http://opensourcerisk.org
ORE is free software: you can redistribute it and/or modify it
under the terms of the Modified BSD License. You should have received a
copy of the license along with this program.
The license is also available online at <http://opensourcerisk.org>
This program is distributed on the basis that it will form a useful
contribution to risk analytics and model standardisation, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the license for more details.
*/
#include <boost/test/unit_test.hpp>
#include <oret/toplevelfixture.hpp>
#include <ored/configuration/commoditycurveconfig.hpp>
using namespace std;
using namespace boost::unit_test_framework;
using namespace QuantLib;
using namespace ore::data;
BOOST_FIXTURE_TEST_SUITE(OREDataTestSuite, ore::test::TopLevelFixture)
BOOST_AUTO_TEST_SUITE(CommodityCurveConfigTests)
BOOST_AUTO_TEST_CASE(testConstructionQuotes) {
BOOST_TEST_MESSAGE("Testing commodity curve configuration quote vector construction");
// Main thing to check here is that the spot quote gets
// inserted at the beginning of the vector of quotes
string curveId = "GOLD_USD";
string curveDescription = "Value of troy ounce of gold in USD";
string currency = "USD";
string commoditySpotQuote = "COMMODITY/PRICE/GOLD/USD";
vector<string> quotes = {"COMMODITY_FWD/PRICE/GOLD/USD/2016-02-29", "COMMODITY_FWD/PRICE/GOLD/USD/2017-02-28"};
// Create configuration
CommodityCurveConfig config(curveId, curveDescription, currency, commoditySpotQuote, quotes);
// Check quotes vector from config (none of the other members have logic)
quotes.insert(quotes.begin(), commoditySpotQuote);
BOOST_CHECK_EQUAL_COLLECTIONS(quotes.begin(), quotes.end(), config.quotes().begin(), config.quotes().end());
}
BOOST_AUTO_TEST_CASE(testParseFromXml) {
BOOST_TEST_MESSAGE("Testing parsing of commodity curve configuration from XML");
// Create an XML string representation of the commodity curve configuration
string configXml;
configXml.append("<CommodityCurve>");
configXml.append(" <CurveId>GOLD_USD</CurveId>");
configXml.append(" <CurveDescription>Gold USD price curve</CurveDescription>");
configXml.append(" <Currency>USD</Currency>");
configXml.append(" <SpotQuote>COMMODITY/PRICE/GOLD/USD</SpotQuote>");
configXml.append(" <Quotes>");
configXml.append(" <Quote>COMMODITY_FWD/PRICE/GOLD/USD/2016-02-29</Quote>");
configXml.append(" <Quote>COMMODITY_FWD/PRICE/GOLD/USD/2017-02-28</Quote>");
configXml.append(" </Quotes>");
configXml.append(" <DayCounter>A365</DayCounter>");
configXml.append(" <InterpolationMethod>Linear</InterpolationMethod>");
configXml.append(" <Extrapolation>true</Extrapolation>");
configXml.append("</CommodityCurve>");
// Create the XMLNode
XMLDocument doc;
doc.fromXMLString(configXml);
XMLNode* configNode = doc.getFirstNode("CommodityCurve");
// Parse commodity curve configuration from XML
CommodityCurveConfig config;
config.fromXML(configNode);
// Expected vector of quotes
vector<string> quotes = {"COMMODITY/PRICE/GOLD/USD", "COMMODITY_FWD/PRICE/GOLD/USD/2016-02-29",
"COMMODITY_FWD/PRICE/GOLD/USD/2017-02-28"};
// Check fields
BOOST_CHECK_EQUAL(config.curveID(), "GOLD_USD");
BOOST_CHECK_EQUAL(config.curveDescription(), "Gold USD price curve");
BOOST_CHECK_EQUAL(config.currency(), "USD");
BOOST_CHECK_EQUAL(config.commoditySpotQuoteId(), "COMMODITY/PRICE/GOLD/USD");
BOOST_CHECK_EQUAL_COLLECTIONS(quotes.begin(), quotes.end(), config.quotes().begin(), config.quotes().end());
BOOST_CHECK_EQUAL(config.dayCountId(), "A365");
BOOST_CHECK_EQUAL(config.interpolationMethod(), "Linear");
BOOST_CHECK_EQUAL(config.extrapolation(), true);
}
BOOST_AUTO_TEST_SUITE_END()
BOOST_AUTO_TEST_SUITE_END()
|
#!/usr/bin/env Rscript
#this script is aimed to output the REF mtx based on AD and DP mtx.
#hxj5<[email protected]>
# parse command line args.
args <- commandArgs(trailingOnly = TRUE)
if (0 == length(args)) {
print("Welcome!")
print("use -h or --help for help on argument.")
quit("no", 1)
}
options(warn = -1)
library(argparse)
options(warn = 0)
parser <- ArgumentParser(
description = "",
formatter_class = "argparse.RawTextHelpFormatter"
)
parser$add_argument("--ad", type = "character", help = "The AD mtx file.")
parser$add_argument("--dp", type = "character", help = "The DP mtx file.")
parser$add_argument("-o", "--outfile", type = "character", help = "The REF mtx file.")
parser$add_argument("--utilDir", type = "character", default = ".",
help = "The util dir [.]")
args <- parser$parse_args()
# check args.
if (is.null(args$utilDir) || ! dir.exists(args$utilDir)) {
write("Error: the valid util Dir needed!", file = stderr())
quit("no", 1)
}
old_wd <- getwd()
setwd(args$utilDir)
source("base_utils.r")
source("mtx_utils.r")
setwd(old_wd)
check_path_exists(args$ad, "AD mtx")
check_path_exists(args$dp, "DP mtx")
check_arg_null(args$outfile, "the output REF mtx file")
# core part
# load ad mtx and dp mtx.
ad <- parse_mtx_file(args$ad)
if (is.null(ad)) {
error_exit("Error: the input AD file is invalid!")
}
dp <- parse_mtx_file(args$dp)
if (is.null(dp)) {
error_exit("Error: the input DP file is invalid!")
}
if (ad$nrow != dp$nrow || ad$ncol != dp$ncol || ad$nval > dp$nval) {
error_exit("Error: the headers of AD and DP mtx are not compatible!")
}
# substract ad from dp to get values of ref.
mdata <- merge(ad$data, dp$data, by = c("row", "col"), all = T,
suffixes = c("_ad", "_dp"), sort = T)
nna <- sum(is.na(mdata$value_dp)) # Each records in ad mtx should have a corresponding record in dp mtx.
if (nna > 0) {
msg <- paste0("Error: AD mtx has ", nna, " records that are not in DP mtx!")
error_exit(msg)
}
mdata$value_ad[is.na(mdata$value_ad)] <- 0
mdata$value_ref <- mdata$value_dp - mdata$value_ad
ref_data <- mdata[mdata$value_ref > 0, c("row", "col", "value_ref")]
# output to file.
mtx_ref <- list(
nrow = ad$nrow,
ncol = ad$ncol,
nval = nrow(ref_data),
data = ref_data
)
write_mtx_file(mtx_ref, args$outfile)
print("All Done!")
|
lemma dist_nz: "x \<noteq> y \<longleftrightarrow> 0 < dist x y" |
import matplotlib.pyplot as plt
from FT.all_subj import all_subj_names,all_subj_folders
import numpy as np
import pandas as pd
from FT.weighted_tracts import nodes_labels_mega
import networkx as nx
import scipy.io as sio
def all_g_prop():
subj = all_subj_folders.copy()
weighted_mat = r'\weighted_mega_wholebrain_plus.npy'
nonweighted_mat = r'\non-weighted_mega_wholebrain_plus.npy'
index_to_text_file = r'C:\Users\Admin\my_scripts\aal\megaatlas\megaatlas2nii.txt'
labels_headers, idx = nodes_labels_mega(index_to_text_file)
id = np.argsort(idx)
return subj, weighted_mat, nonweighted_mat, labels_headers, id
def save_df_as_csv(folder_name, rank_table):
table_file_name = folder_name + r'\clustering_coeff_rank.csv'
rank_table.to_csv(table_file_name)
if __name__ == '__main__':
subj, weighted_mat, nonweighted_mat, labels_headers, id =all_g_prop()
#nodes_nw= []
#nodes_w = []
nodes_nw = np.zeros([len(subj),len(id)])
nodes_w = np.zeros([len(subj),len(id)])
for i,s in enumerate(subj):
folder_name = r'C:\Users\Admin\my_scripts\Ax3D_Pack\V6\after_file_prep' + s
# non-weighted:
mat_file_name = folder_name + nonweighted_mat
mat = np.load(mat_file_name)
mat = mat[id]
mat = mat[:,id]
mat[mat < 0] = 0
mat[mat > 1] = 0
G = nx.from_numpy_array(mat)
clustering_nw_vals = nx.clustering(G, weight='weight')
nw = pd.DataFrame.from_dict(clustering_nw_vals, orient='index')
# weighted:
mat_file_name = folder_name + weighted_mat
mat = np.load(mat_file_name)
mat = mat[id]
mat = mat[:,id]
mat[mat < 0] = 0
mat[mat > 1] = 0
G = nx.from_numpy_array(mat)
clustering_w_vals = nx.clustering(G, weight='weight')
w = pd.DataFrame.from_dict(clustering_w_vals, orient='index')
rank_table = pd.concat([nw, w], axis=1)
rank_table.columns = ['non-weighted_vals', 'weighted_vals']
rank_table['non-weighted_ranks'] = rank_table['non-weighted_vals'].rank().astype('int64')
rank_table['weighted_ranks'] = rank_table['weighted_vals'].rank().astype('int64')
rank_table['cortex_part'] = labels_headers
rank_table['mutual'] = (rank_table['weighted_ranks'] + rank_table['non-weighted_ranks'])
rank_table['mutual_rank'] = rank_table['mutual'].rank().astype('int64')
#save_df_as_csv(folder_name, rank_table)
nodes_nw[i,:] = np.asarray(rank_table['non-weighted_ranks'])
nodes_w[i,:] = np.asarray(rank_table['weighted_ranks'])
#nodes_nw = nodes_nw + list(rank_table['non-weighted_vals'])
#nodes_w = nodes_w + list(rank_table['weighted_vals'])
nw_name = r'C:\Users\Admin\my_scripts\Ax3D_Pack\Testings\clus_nw.mat'
w_name = r'C:\Users\Admin\my_scripts\Ax3D_Pack\Testings\clus_w.mat'
sio.savemat(nw_name, {'nw_clustering_coeff_mat': nodes_nw})
sio.savemat(w_name, {'w_clustering_coeff_mat': nodes_w})
np.save(r'C:\Users\Admin\my_scripts\Ax3D_Pack\Testings\clus_nw',nodes_nw)
np.save(r'C:\Users\Admin\my_scripts\Ax3D_Pack\Testings\clus_w',nodes_w)
|
# GraphHopper Directions API
#
# You use the GraphHopper Directions API to add route planning, navigation and route optimization to your software. E.g. the Routing API has turn instructions and elevation data and the Route Optimization API solves your logistic problems and supports various constraints like time window and capacity restrictions. Also it is possible to get all distances between all locations with our fast Matrix API.
#
# OpenAPI spec version: 1.0.0
#
# Generated by: https://github.com/swagger-api/swagger-codegen.git
#' ModelBreak Class
#'
#' @field earliest
#' @field latest
#' @field duration
#' @field max_driving_time
#' @field initial_driving_time
#' @field possible_split
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
ModelBreak <- R6::R6Class(
'ModelBreak',
public = list(
`earliest` = NULL,
`latest` = NULL,
`duration` = NULL,
`max_driving_time` = NULL,
`initial_driving_time` = NULL,
`possible_split` = NULL,
initialize = function(`earliest`, `latest`, `duration`, `max_driving_time`, `initial_driving_time`, `possible_split`){
if (!missing(`earliest`)) {
stopifnot(is.numeric(`earliest`), length(`earliest`) == 1)
self$`earliest` <- `earliest`
}
if (!missing(`latest`)) {
stopifnot(is.numeric(`latest`), length(`latest`) == 1)
self$`latest` <- `latest`
}
if (!missing(`duration`)) {
stopifnot(is.numeric(`duration`), length(`duration`) == 1)
self$`duration` <- `duration`
}
if (!missing(`max_driving_time`)) {
stopifnot(is.numeric(`max_driving_time`), length(`max_driving_time`) == 1)
self$`max_driving_time` <- `max_driving_time`
}
if (!missing(`initial_driving_time`)) {
stopifnot(is.numeric(`initial_driving_time`), length(`initial_driving_time`) == 1)
self$`initial_driving_time` <- `initial_driving_time`
}
if (!missing(`possible_split`)) {
stopifnot(is.list(`possible_split`), length(`possible_split`) != 0)
lapply(`possible_split`, function(x) stopifnot(is.character(x)))
self$`possible_split` <- `possible_split`
}
},
toJSON = function() {
ModelBreakObject <- list()
if (!is.null(self$`earliest`)) {
ModelBreakObject[['earliest']] <- self$`earliest`
}
if (!is.null(self$`latest`)) {
ModelBreakObject[['latest']] <- self$`latest`
}
if (!is.null(self$`duration`)) {
ModelBreakObject[['duration']] <- self$`duration`
}
if (!is.null(self$`max_driving_time`)) {
ModelBreakObject[['max_driving_time']] <- self$`max_driving_time`
}
if (!is.null(self$`initial_driving_time`)) {
ModelBreakObject[['initial_driving_time']] <- self$`initial_driving_time`
}
if (!is.null(self$`possible_split`)) {
ModelBreakObject[['possible_split']] <- self$`possible_split`
}
ModelBreakObject
},
fromJSON = function(ModelBreakJson) {
ModelBreakObject <- jsonlite::fromJSON(ModelBreakJson)
if (!is.null(ModelBreakObject$`earliest`)) {
self$`earliest` <- ModelBreakObject$`earliest`
}
if (!is.null(ModelBreakObject$`latest`)) {
self$`latest` <- ModelBreakObject$`latest`
}
if (!is.null(ModelBreakObject$`duration`)) {
self$`duration` <- ModelBreakObject$`duration`
}
if (!is.null(ModelBreakObject$`max_driving_time`)) {
self$`max_driving_time` <- ModelBreakObject$`max_driving_time`
}
if (!is.null(ModelBreakObject$`initial_driving_time`)) {
self$`initial_driving_time` <- ModelBreakObject$`initial_driving_time`
}
if (!is.null(ModelBreakObject$`possible_split`)) {
self$`possible_split` <- ModelBreakObject$`possible_split`
}
},
toJSONString = function() {
sprintf(
'{
"earliest": %d,
"latest": %d,
"duration": %d,
"max_driving_time": %d,
"initial_driving_time": %d,
"possible_split": [%s]
}',
self$`earliest`,
self$`latest`,
self$`duration`,
self$`max_driving_time`,
self$`initial_driving_time`,
lapply(self$`possible_split`, function(x) paste(paste0('"', x, '"'), sep=","))
)
},
fromJSONString = function(ModelBreakJson) {
ModelBreakObject <- jsonlite::fromJSON(ModelBreakJson)
self$`earliest` <- ModelBreakObject$`earliest`
self$`latest` <- ModelBreakObject$`latest`
self$`duration` <- ModelBreakObject$`duration`
self$`max_driving_time` <- ModelBreakObject$`max_driving_time`
self$`initial_driving_time` <- ModelBreakObject$`initial_driving_time`
self$`possible_split` <- ModelBreakObject$`possible_split`
}
)
)
|
import system.io
def io.buffer_cmd (args : io.process.spawn_args) : io char_buffer :=
do child ← io.proc.spawn { args with stdout := io.process.stdio.piped },
buf ← io.fs.read_to_end child.stdout,
exitv ← io.proc.wait child,
when (exitv ≠ 0) $ io.fail $ "process exited with status " ++ to_string exitv,
return buf
def PYTHON_SCRIPT := "/cvxopt/opt.py"
meta def blah := do
b <- tactic.unsafe_run_io $ io.buffer_cmd { cmd := "python3", args := [PYTHON_SCRIPT] },
trace b.to_string
return b.to_string
example : false :=
begin
-- blah,
end |
Require Import ZArith.
Require Import Basic_type.
Definition zn2z_word_comm : forall w n, zn2z (word w n) = word (zn2z w) n.
fix zn2z_word_comm 2.
intros w n; case n.
reflexivity.
intros n0;simpl.
case (zn2z_word_comm w n0).
reflexivity.
Defined.
Fixpoint extend (n:nat) {struct n} : forall w:Set, zn2z w -> word w (S n) :=
match n return forall w:Set, zn2z w -> word w (S n) with
| O => fun w x => x
| S m =>
let aux := extend m in
fun w x => WW W0 (aux w x)
end.
Section ExtendMax.
Variable w:Set.
Definition Tmax n m :=
( {p:nat| word (word w n) p = word w m}
+ {p:nat| word (word w m) p = word w n})%type.
Definition max : forall n m, Tmax n m.
fix max 1;intros n.
case n.
intros m;left;exists m;exact (refl_equal (word w m)).
intros n0 m;case m.
right;exists (S n0);exact (refl_equal (word w (S n0))).
intros m0;case (max n0 m0);intros H;case H;intros p Heq.
left;exists p;simpl.
case (zn2z_word_comm (word w n0) p).
case Heq.
exact (refl_equal (zn2z (word (word w n0) p))).
right;exists p;simpl.
case (zn2z_word_comm (word w m0) p).
case Heq.
exact (refl_equal (zn2z (word (word w m0) p))).
Defined.
Definition extend_to_max :
forall n m (x:zn2z (word w n)) (y:zn2z (word w m)),
(zn2z (word w m) + zn2z (word w n))%type.
intros n m x y.
case (max n m);intros (p, Heq).
left; case Heq;exact (extend p (word w n) x).
right;case Heq;exact (extend p (word w m) y).
Defined.
End ExtendMax.
Section Reduce.
Variable w : Set.
Variable nT : Set.
Variable N0 : nT.
Variable eq0 : w -> bool.
Variable reduce_n : w -> nT.
Variable zn2z_to_Nt : zn2z w -> nT.
Definition reduce_n1 (x:zn2z w) :=
match x with
| W0 => N0
| WW xh xl =>
if eq0 xh then reduce_n xl
else zn2z_to_Nt x
end.
End Reduce.
Section ReduceRec.
Variable w : Set.
Variable nT : Set.
Variable N0 : nT.
Variable reduce_1n : zn2z w -> nT.
Variable c : forall n, word w (S n) -> nT.
Fixpoint reduce_n (n:nat) : word w (S n) -> nT :=
match n return word w (S n) -> nT with
| O => reduce_1n
| S m => fun x =>
match x with
| W0 => N0
| WW xh xl =>
match xh with
| W0 => @reduce_n m xl
| _ => @c (S m) x
end
end
end.
End ReduceRec.
Definition opp_compare cmp :=
match cmp with
| Lt => Gt
| Eq => Eq
| Gt => Lt
end.
Section CompareRec.
Variable wm w : Set.
Variable w_0 : w.
Variable compare : w -> w -> comparison.
Variable compare0_m : wm -> comparison.
Variable compare_m : wm -> w -> comparison.
Fixpoint compare0_mn (n:nat) : word wm n -> comparison :=
match n return word wm n -> comparison with
| 0 => compare0_m
| S m => fun x =>
match x with
| W0 => Eq
| WW xh xl =>
match compare0_mn m xh with
| Eq => compare0_mn m xl
| r => Lt
end
end
end.
Fixpoint compare_mn_1 (n:nat) : word wm n -> w -> comparison :=
match n return word wm n -> w -> comparison with
| 0 => compare_m
| S m => fun x y =>
match x with
| W0 => compare w_0 y
| WW xh xl =>
match compare0_mn m xh with
| Eq => compare_mn_1 m xl y
| r => Gt
end
end
end.
End CompareRec.
|
import matplotlib.pyplot as plt
import numpy as np
# I ran Richardson extrapolation, using ground truth 1024x1024, for smooth and discontinuous
# shear layer, mean and variance. Each experiment I ran computing the extrapolation using
# resolutions in the triplets (64, 128, 256) and (128, 256, 512) as input.
smooth_mean_RE = np.array([0.81, 0.36])
smooth_var_RE = np.array([9.17, 8.33])
discont_mean_RE = np.array([1.67, 0.97])
discont_var_RE = np.array([11.4, 7.97])
smooth_mean_RE_w5 = np.array([0.48, 0.25])
smooth_var_RE_w5 = np.array([9.12, 8.32])
discont_mean_RE_w5 = np.array([1.58, 0.95])
discont_var_RE_w5 = np.array([11.36, 7.96])
smooth_mean_data = np.array([4.13, 1.99, 0.9, 0.37])
smooth_var_data = np.array([17.46, 13.65, 9.17, 8.33])
discont_mean_data = np.array([4.33, 2.39, 1.67, 0.97])
discont_var_data = np.array([34.19, 22.29, 11.65, 7.97])
smooth_mean_data_w5 = np.array([4.08, 1.92, 0.82, 0.3])
smooth_var_data_w5 = np.array([17.17, 13.49, 9.12, 8.32])
discont_mean_data_w5 = np.array([4.1, 2.21, 1.58, 0.95])
discont_var_data_w5 = np.array([34.05, 22.21, 11.6, 7.96])
N = [256, 512]
Nplus = [64,128,256,512]
# plt.loglog(Nplus, smooth_mean_data, '.', linestyle='solid', c='red', label="smooth, mean (d)", base=2)
# plt.loglog(Nplus, smooth_mean_data_w5, '.', linestyle='dashed', c='red', label="smooth, mean (d+W5)", base=2)
# plt.loglog(N, smooth_mean_RE, '.', linestyle='dotted', c='red', label="smooth, mean (RE)", base=2)
# plt.loglog(N, smooth_mean_RE_w5, '.', linestyle='dashdot', c='red', label="smooth, mean (RE+W5)", base=2)
# plt.loglog(Nplus, smooth_var_data, '.', linestyle='solid', c='green', label="smooth, var (d)", base=2)
# plt.loglog(Nplus, smooth_var_data_w5, '.', linestyle='dashed', c='green', label="smooth, var (d+W5)", base=2)
# plt.loglog(N, smooth_var_RE, '.', linestyle='dotted', c='green', label="smooth, var (RE)", base=2)
# plt.loglog(N, smooth_var_RE_w5, '.', linestyle='dashdot', c='green', label="smooth, var (RE+W5)", base=2)
plt.loglog(Nplus, discont_mean_data, '.', linestyle='solid', c='blue', label="discont., mean (d)", base=2)
plt.loglog(Nplus, discont_mean_data_w5, '.', linestyle='dashed', c='blue', label="discont., mean (d+W5)", base=2)
plt.loglog(N, discont_mean_RE, '.', linestyle='dotted', c='blue', label="discont., mean (RE)", base=2)
plt.loglog(N, discont_mean_RE_w5, '.', linestyle='dashdot', c='blue', label="discont., mean (RE+W5)", base=2)
plt.loglog(Nplus, discont_var_data, '.', linestyle='solid', c='black', label="discont., var (d)", base=2)
plt.loglog(Nplus, discont_var_data_w5, '.', linestyle='dashed', c='black', label="discont., var (d+W5)", base=2)
plt.loglog(N, discont_var_RE, '.', linestyle='dotted', c='black', label="discont., var (RE)", base=2)
plt.loglog(N, discont_var_RE_w5, '.', linestyle='dashdot', c='black', label="discont., var (RE+W5)", base=2)
plt.legend()
plt.grid(which='major', linestyle='-')
plt.grid(which='minor', linestyle='--', linewidth='0.2')
plt.xlabel("N")
plt.ylabel("L1 error (% of GT)")
plt.title("Rich. extr. based on N/4, N/2, N. Discontinous SL, stats.")
plt.show()
|
lemma LIMSEQ_Suc_n_over_n: "(\<lambda>n. of_nat (Suc n) / of_nat n :: 'a :: real_normed_field) \<longlonglongrightarrow> 1" |
(* Author: Lukas Koller *)
theory KnightsTour
imports Main
begin
section \<open>Introduction and Definitions\<close>
text \<open>A Knight's path is a sequence of moves on a chessboard s.t. every step in sequence is a
valid move for a Knight and that the Knight visits every square on the boards exactly once.
A Knight is a chess figure that is only able to move two squares vertically and one square
horizontally or two squares horizontally and one square vertically. Finding a Knight's path is an
instance of the Hamiltonian Path Problem. A Knight's circuit is a Knight's path, where additionally
the Knight can move from the last square to the first square of the path, forming a loop.
@{cite "cull_decurtins_1987"} proves the existence of a Knight's path on a \<open>n\<times>m\<close>-board for
sufficiently large \<open>n\<close> and \<open>m\<close>. The main idea for the proof is to inductivly construct a Knight's
path for the \<open>n\<times>m\<close>-board from a few pre-computed Knight's paths for small boards, i.e. \<open>5\<times>5\<close>,
\<open>8\<times>6\<close>, ..., \<open>8\<times>9\<close>. The paths for small boards are transformed (i.e. transpose, mirror, translate)
and concatenated to create paths for larger boards.
While formalizing the proofs I discovered two mistakes in the original proof in
@{cite "cull_decurtins_1987"}: (i) the pre-computed path for the \<open>6\<times>6\<close>-board that ends in
the upper-left (in Figure 2) and (ii) the pre-computed path for the \<open>8\<times>8\<close>-board that ends in
the upper-left (in Figure 5) are incorrect. I.e. on the \<open>6\<times>6\<close>-board the Knight cannot step
from square 26 to square 27; in the \<open>8\<times>8\<close>-board the Knight cannot step from square 27 to
square 28. In this formalization I have replaced the two incorrect paths with correct paths.\<close>
text \<open>A square on a board is identified by its coordinates.\<close>
type_synonym square = "int \<times> int"
text \<open>A board is represented as a set of squares. Note, that this allows boards to have an
arbitrary shape and do not necessarily need to be rectangular.\<close>
type_synonym board = "square set"
text \<open>A (rectangular) \<open>(n\<times>m)\<close>-board is the set of all squares \<open>(i,j)\<close> where \<open>1 \<le> i \<le> n\<close>
and \<open>1 \<le> j \<le> m\<close>. \<open>(1,1)\<close> is the lower-left corner, and \<open>(n,m)\<close> is the upper-right corner.\<close>
definition board :: "nat \<Rightarrow> nat \<Rightarrow> board" where
"board n m = {(i,j) |i j. 1 \<le> i \<and> i \<le> int n \<and> 1 \<le> j \<and> j \<le> int m}"
text \<open>A path is a sequence of steps on a board. A path is represented by the list of visited
squares on the board. Each square on the \<open>(n\<times>m)\<close>-board is identified by its coordinates \<open>(i,j)\<close>.\<close>
type_synonym path = "square list"
text \<open>A Knight can only move two squares vertically and one square horizontally or two squares
horizontally and one square vertically. Thus, a knight at position \<open>(i,j)\<close> can only move
to \<open>(i\<plusminus>1,j\<plusminus>2)\<close> or \<open>(i\<plusminus>2,j\<plusminus>1)\<close>.\<close>
definition valid_step :: "square \<Rightarrow> square \<Rightarrow> bool" where
"valid_step s\<^sub>i s\<^sub>j \<equiv> (case s\<^sub>i of (i,j) \<Rightarrow> s\<^sub>j \<in> {(i+1,j+2),(i-1,j+2),(i+1,j-2),(i-1,j-2),
(i+2,j+1),(i-2,j+1),(i+2,j-1),(i-2,j-1)})"
text \<open>Now we define an inductive predicate that characterizes a Knight's path. A square \<open>s\<^sub>i\<close> can be
pre-pended to a current Knight's path \<open>s\<^sub>j#ps\<close> if (i) there is a valid step from the square \<open>s\<^sub>i\<close> to
the first square \<open>s\<^sub>j\<close> of the current path and (ii) the square \<open>s\<^sub>i\<close> has not been visited yet.\<close>
inductive knights_path :: "board \<Rightarrow> path \<Rightarrow> bool" where
"knights_path {s\<^sub>i} [s\<^sub>i]"
| "s\<^sub>i \<notin> b \<Longrightarrow> valid_step s\<^sub>i s\<^sub>j \<Longrightarrow> knights_path b (s\<^sub>j#ps) \<Longrightarrow> knights_path (b \<union> {s\<^sub>i}) (s\<^sub>i#s\<^sub>j#ps)"
code_pred knights_path .
text \<open>A sequence is a Knight's circuit iff the sequence if a Knight's path and there is a valid
step from the last square to the first square.\<close>
definition "knights_circuit b ps \<equiv> (knights_path b ps \<and> valid_step (last ps) (hd ps))"
section \<open>Executable Checker for a Knight's Path\<close>
text \<open>This section gives the implementation and correctness-proof for an executable checker for a
knights-path wrt. the definition @{const knights_path}.\<close>
subsection \<open>Implementation of an Executable Checker\<close>
fun row_exec :: "nat \<Rightarrow> int set" where
"row_exec 0 = {}"
| "row_exec m = insert (int m) (row_exec (m-1))"
fun board_exec_aux :: "nat \<Rightarrow> int set \<Rightarrow> board" where
"board_exec_aux 0 M = {}"
| "board_exec_aux k M = {(int k,j) |j. j \<in> M} \<union> board_exec_aux (k-1) M"
text \<open>Compute a board.\<close>
fun board_exec :: "nat \<Rightarrow> nat \<Rightarrow> board" where
"board_exec n m = board_exec_aux n (row_exec m)"
fun step_checker :: "square \<Rightarrow> square \<Rightarrow> bool" where
"step_checker (i,j) (i',j') =
((i+1,j+2) = (i',j') \<or> (i-1,j+2) = (i',j') \<or> (i+1,j-2) = (i',j') \<or> (i-1,j-2) = (i',j')
\<or> (i+2,j+1) = (i',j') \<or> (i-2,j+1) = (i',j') \<or> (i+2,j-1) = (i',j') \<or> (i-2,j-1) = (i',j'))"
fun path_checker :: "board \<Rightarrow> path \<Rightarrow> bool" where
"path_checker b [] = False"
| "path_checker b [s\<^sub>i] = ({s\<^sub>i} = b)"
| "path_checker b (s\<^sub>i#s\<^sub>j#ps) = (s\<^sub>i \<in> b \<and> step_checker s\<^sub>i s\<^sub>j \<and> path_checker (b - {s\<^sub>i}) (s\<^sub>j#ps))"
fun circuit_checker :: "board \<Rightarrow> path \<Rightarrow> bool" where
"circuit_checker b ps = (path_checker b ps \<and> step_checker (last ps) (hd ps))"
subsection \<open>Correctness Proof of the Executable Checker\<close>
lemma row_exec_leq: "j \<in> row_exec m \<longleftrightarrow> 1 \<le> j \<and> j \<le> int m"
by (induction m) auto
lemma board_exec_aux_leq_mem: "(i,j) \<in> board_exec_aux k M \<longleftrightarrow> 1 \<le> i \<and> i \<le> int k \<and> j \<in> M"
by (induction k M rule: board_exec_aux.induct) auto
lemma board_exec_leq: "(i,j) \<in> board_exec n m \<longleftrightarrow> 1 \<le> i \<and> i \<le> int n \<and> 1 \<le> j \<and> j \<le> int m"
using board_exec_aux_leq_mem row_exec_leq by auto
lemma board_exec_correct: "board n m = board_exec n m"
unfolding board_def using board_exec_leq by auto
lemma step_checker_correct: "step_checker s\<^sub>i s\<^sub>j \<longleftrightarrow> valid_step s\<^sub>i s\<^sub>j"
proof
assume "step_checker s\<^sub>i s\<^sub>j"
then show "valid_step s\<^sub>i s\<^sub>j"
unfolding valid_step_def
apply (cases s\<^sub>i)
apply (cases s\<^sub>j)
apply auto
done
next
assume assms: "valid_step s\<^sub>i s\<^sub>j"
then show "step_checker s\<^sub>i s\<^sub>j"
unfolding valid_step_def by auto
qed
lemma step_checker_rev: "step_checker (i,j) (i',j') \<Longrightarrow> step_checker (i',j') (i,j)"
apply (simp only: step_checker.simps)
by (elim disjE) auto
lemma knights_path_intro_rev:
assumes "s\<^sub>i \<in> b" "valid_step s\<^sub>i s\<^sub>j" "knights_path (b - {s\<^sub>i}) (s\<^sub>j#ps)"
shows "knights_path b (s\<^sub>i#s\<^sub>j#ps)"
using assms
proof -
assume assms: "s\<^sub>i \<in> b" "valid_step s\<^sub>i s\<^sub>j" "knights_path (b - {s\<^sub>i}) (s\<^sub>j#ps)"
then have "s\<^sub>i \<notin> (b - {s\<^sub>i})" "b - {s\<^sub>i} \<union> {s\<^sub>i} = b"
by auto
then show ?thesis
using assms knights_path.intros(2)[of s\<^sub>i "b - {s\<^sub>i}"] by auto
qed
text \<open>Final correctness corollary for the executable checker @{const path_checker}.\<close>
lemma path_checker_correct: "path_checker b ps \<longleftrightarrow> knights_path b ps"
proof
assume "path_checker b ps"
then show "knights_path b ps"
proof (induction rule: path_checker.induct)
case (3 s\<^sub>i s\<^sub>j xs b)
then show ?case using step_checker_correct knights_path_intro_rev by auto
qed (auto intro: knights_path.intros)
next
assume "knights_path b ps"
then show "path_checker b ps"
using step_checker_correct
by (induction rule: knights_path.induct) (auto elim: knights_path.cases)
qed
corollary knights_path_exec_simp: "knights_path (board n m) ps \<longleftrightarrow> path_checker (board_exec n m) ps"
using board_exec_correct path_checker_correct[symmetric] by simp
lemma circuit_checker_correct: "circuit_checker b ps \<longleftrightarrow> knights_circuit b ps"
unfolding knights_circuit_def using path_checker_correct step_checker_correct by auto
corollary knights_circuit_exec_simp:
"knights_circuit (board n m) ps \<longleftrightarrow> circuit_checker (board_exec n m) ps"
using board_exec_correct circuit_checker_correct[symmetric] by simp
section \<open>Basic Properties of @{const knights_path} and @{const knights_circuit}\<close>
lemma board_leq_subset: "n\<^sub>1 \<le> n\<^sub>2 \<and> m\<^sub>1 \<le> m\<^sub>2 \<Longrightarrow> board n\<^sub>1 m\<^sub>1 \<subseteq> board n\<^sub>2 m\<^sub>2"
unfolding board_def by auto
lemma finite_row_exec: "finite (row_exec m)"
by (induction m) auto
lemma finite_board_exec_aux: "finite M \<Longrightarrow> finite (board_exec_aux n M)"
by (induction n) auto
lemma board_finite: "finite (board n m)"
using finite_board_exec_aux finite_row_exec by (simp only: board_exec_correct) auto
lemma card_row_exec: "card (row_exec m) = m"
proof (induction m)
case (Suc m)
have "int (Suc m) \<notin> row_exec m"
using row_exec_leq by auto
then have "card (insert (int (Suc m)) (row_exec m)) = 1 + card (row_exec m)"
using card_Suc_eq by (metis Suc plus_1_eq_Suc row_exec.simps(1))
then have "card (row_exec (Suc m)) = 1 + card (row_exec m)"
by auto
then show ?case using Suc.IH by auto
qed auto
lemma set_comp_ins:
"{(k,j) |j. j \<in> insert x M} = insert (k,x) {(k,j) |j. j \<in> M}" (is "?Mi = ?iM")
proof
show "?Mi \<subseteq> ?iM"
proof
fix y assume "y \<in> ?Mi"
then obtain j where [simp]: "y = (k,j)" and "j \<in> insert x M" by blast
then have "j = x \<or> j \<in> M" by auto
then show "y \<in> ?iM" by (elim disjE) auto
qed
next
show "?iM \<subseteq> ?Mi"
proof
fix y assume "y \<in> ?iM"
then obtain j where [simp]: "y = (k,j)" and "j \<in> insert x M" by blast
then have "j = x \<or> j \<in> M" by auto
then show "y \<in> ?Mi" by (elim disjE) auto
qed
qed
lemma finite_card_set_comp: "finite M \<Longrightarrow> card {(k,j) |j. j \<in> M} = card M"
proof (induction M rule: finite_induct)
case (insert x M)
then show ?case using set_comp_ins[of k x M] by auto
qed auto
lemma card_board_exec_aux: "finite M \<Longrightarrow> card (board_exec_aux k M) = k * card M"
proof (induction k)
case (Suc k)
let ?M'="{(int (Suc k),j) |j. j \<in> M}"
let ?rec_k="board_exec_aux k M"
have finite: "finite ?M'" "finite ?rec_k"
using Suc finite_board_exec_aux by auto
then have card_Un_simp: "card (?M' \<union> ?rec_k) = card ?M' + card ?rec_k"
using board_exec_aux_leq_mem card_Un_Int[of ?M' ?rec_k] by auto
have card_M: "card ?M' = card M"
using Suc finite_card_set_comp by auto
have "card (board_exec_aux (Suc k) M) = card ?M' + card ?rec_k"
using card_Un_simp by auto
also have "... = card M + k * card M"
using Suc card_M by auto
also have "... = (Suc k) * card M"
by auto
finally show ?case .
qed auto
lemma card_board: "card (board n m) = n * m"
proof -
have "card (board n m) = card (board_exec_aux n (row_exec m))"
using board_exec_correct by auto
also have "... = n * m"
using card_row_exec card_board_exec_aux finite_row_exec by auto
finally show ?thesis .
qed
lemma knights_path_board_non_empty: "knights_path b ps \<Longrightarrow> b \<noteq> {}"
by (induction arbitrary: ps rule: knights_path.induct) auto
lemma knights_path_board_m_n_geq_1: "knights_path (board n m) ps \<Longrightarrow> min n m \<ge> 1"
unfolding board_def using knights_path_board_non_empty by fastforce
lemma knights_path_non_nil: "knights_path b ps \<Longrightarrow> ps \<noteq> []"
by (induction arbitrary: b rule: knights_path.induct) auto
lemma knights_path_set_eq: "knights_path b ps \<Longrightarrow> set ps = b"
by (induction rule: knights_path.induct) auto
lemma knights_path_subset:
"knights_path b\<^sub>1 ps\<^sub>1 \<Longrightarrow> knights_path b\<^sub>2 ps\<^sub>2 \<Longrightarrow> set ps\<^sub>1 \<subseteq> set ps\<^sub>2 \<longleftrightarrow> b\<^sub>1 \<subseteq> b\<^sub>2"
using knights_path_set_eq by auto
lemma knights_path_board_unique: "knights_path b\<^sub>1 ps \<Longrightarrow> knights_path b\<^sub>2 ps \<Longrightarrow> b\<^sub>1 = b\<^sub>2"
using knights_path_set_eq by auto
lemma valid_step_neq: "valid_step s\<^sub>i s\<^sub>j \<Longrightarrow> s\<^sub>i \<noteq> s\<^sub>j"
unfolding valid_step_def by auto
lemma valid_step_non_transitive: "valid_step s\<^sub>i s\<^sub>j \<Longrightarrow> valid_step s\<^sub>j s\<^sub>k \<Longrightarrow> \<not>valid_step s\<^sub>i s\<^sub>k"
proof -
assume assms: "valid_step s\<^sub>i s\<^sub>j" "valid_step s\<^sub>j s\<^sub>k"
obtain i\<^sub>i j\<^sub>i i\<^sub>j j\<^sub>j i\<^sub>k j\<^sub>k where [simp]: "s\<^sub>i = (i\<^sub>i,j\<^sub>i)" "s\<^sub>j = (i\<^sub>j,j\<^sub>j)" "s\<^sub>k = (i\<^sub>k,j\<^sub>k)" by force
then have "step_checker (i\<^sub>i,j\<^sub>i) (i\<^sub>j,j\<^sub>j)" "step_checker (i\<^sub>j,j\<^sub>j) (i\<^sub>k,j\<^sub>k)"
using assms step_checker_correct by auto
then show "\<not>valid_step s\<^sub>i s\<^sub>k"
apply (simp add: step_checker_correct[symmetric])
apply (elim disjE)
apply auto
done
qed
lemma knights_path_distinct: "knights_path b ps \<Longrightarrow> distinct ps"
proof (induction rule: knights_path.induct)
case (2 s\<^sub>i b s\<^sub>j ps)
then have "s\<^sub>i \<notin> set (s\<^sub>j # ps)"
using knights_path_set_eq valid_step_neq by blast
then show ?case using 2 by auto
qed auto
lemma knights_path_length: "knights_path b ps \<Longrightarrow> length ps = card b"
using knights_path_set_eq knights_path_distinct by (metis distinct_card)
lemma knights_path_take:
assumes "knights_path b ps" "0 < k" "k < length ps"
shows "knights_path (set (take k ps)) (take k ps)"
using assms
proof (induction arbitrary: k rule: knights_path.induct)
case (2 s\<^sub>i b s\<^sub>j ps)
then have "k = 1 \<or> k = 2 \<or> 2 < k" by force
then show ?case
using 2
proof (elim disjE)
assume "k = 2"
then have "take k (s\<^sub>i#s\<^sub>j#ps) = [s\<^sub>i,s\<^sub>j]" "s\<^sub>i \<notin> {s\<^sub>j}" using 2 valid_step_neq by auto
then show ?thesis using 2 knights_path.intros by auto
next
assume "2 < k"
then have k_simps: "k-2 = k-1-1" "0 < k-2" "k-2 < length ps" and
take_simp1: "take k (s\<^sub>i#s\<^sub>j#ps) = s\<^sub>i#take (k-1) (s\<^sub>j#ps)" and
take_simp2: "take k (s\<^sub>i#s\<^sub>j#ps) = s\<^sub>i#s\<^sub>j#take (k-1-1) ps"
using assms 2 take_Cons'[of k s\<^sub>i "s\<^sub>j#ps"] take_Cons'[of "k-1" s\<^sub>j ps] by auto
then have "knights_path (set (take (k-1) (s\<^sub>j#ps))) (take (k-1) (s\<^sub>j#ps))"
using 2 k_simps by auto
then have kp: "knights_path (set (take (k-1) (s\<^sub>j#ps))) (s\<^sub>j#take (k-2) ps)"
using take_Cons'[of "k-1" s\<^sub>j ps] by (auto simp: k_simps elim: knights_path.cases)
have no_mem: "s\<^sub>i \<notin> set (take (k-1) (s\<^sub>j#ps))"
using 2 set_take_subset[of "k-1" "s\<^sub>j#ps"] knights_path_set_eq by blast
have "knights_path (set (take (k-1) (s\<^sub>j#ps)) \<union> {s\<^sub>i}) (s\<^sub>i#s\<^sub>j#take (k-2) ps)"
using knights_path.intros(2)[OF no_mem \<open>valid_step s\<^sub>i s\<^sub>j\<close> kp] by auto
then show ?thesis using k_simps take_simp2 knights_path_set_eq by metis
qed (auto intro: knights_path.intros)
qed auto
lemma knights_path_drop:
assumes "knights_path b ps" "0 < k" "k < length ps"
shows "knights_path (set (drop k ps)) (drop k ps)"
using assms
proof (induction arbitrary: k rule: knights_path.induct)
case (2 s\<^sub>i b s\<^sub>j ps)
then have "(k = 1 \<and> ps = []) \<or> (k = 1 \<and> ps \<noteq> []) \<or> 1 < k" by force
then show ?case
using 2
proof (elim disjE)
assume "k = 1 \<and> ps \<noteq> []"
then show ?thesis using 2 knights_path_set_eq by force
next
assume "1 < k"
then have "0 < k-1" "k-1 < length (s\<^sub>j#ps)" "drop k (s\<^sub>i#s\<^sub>j#ps) = drop (k-1) (s\<^sub>j#ps)"
using assms 2 drop_Cons'[of k s\<^sub>i "s\<^sub>j#ps"] by auto
then show ?thesis
using 2 by auto
qed (auto intro: knights_path.intros)
qed auto
text \<open>A Knight's path can be split to form two new disjoint Knight's paths.\<close>
corollary knights_path_split:
assumes "knights_path b ps" "0 < k" "k < length ps"
shows
"\<exists>b\<^sub>1 b\<^sub>2. knights_path b\<^sub>1 (take k ps) \<and> knights_path b\<^sub>2 (drop k ps) \<and> b\<^sub>1 \<union> b\<^sub>2 = b \<and> b\<^sub>1 \<inter> b\<^sub>2 = {}"
using assms
proof -
let ?b\<^sub>1="set (take k ps)"
let ?b\<^sub>2="set (drop k ps)"
have kp1: "knights_path ?b\<^sub>1 (take k ps)" and kp2: "knights_path ?b\<^sub>2 (drop k ps)"
using assms knights_path_take knights_path_drop by auto
have union: "?b\<^sub>1 \<union> ?b\<^sub>2 = b"
using assms knights_path_set_eq by (metis append_take_drop_id set_append)
have inter: "?b\<^sub>1 \<inter> ?b\<^sub>2 = {}"
using assms knights_path_distinct by (metis append_take_drop_id distinct_append)
show ?thesis using kp1 kp2 union inter by auto
qed
text \<open>Append two disjoint Knight's paths.\<close>
corollary knights_path_append:
assumes "knights_path b\<^sub>1 ps\<^sub>1" "knights_path b\<^sub>2 ps\<^sub>2" "b\<^sub>1 \<inter> b\<^sub>2 = {}" "valid_step (last ps\<^sub>1) (hd ps\<^sub>2)"
shows "knights_path (b\<^sub>1 \<union> b\<^sub>2) (ps\<^sub>1 @ ps\<^sub>2)"
using assms
proof (induction arbitrary: ps\<^sub>2 b\<^sub>2 rule: knights_path.induct)
case (1 s\<^sub>i)
then have "s\<^sub>i \<notin> b\<^sub>2" "ps\<^sub>2 \<noteq> []" "valid_step s\<^sub>i (hd ps\<^sub>2)" "knights_path b\<^sub>2 (hd ps\<^sub>2#tl ps\<^sub>2)"
using knights_path_non_nil by auto
then have "knights_path (b\<^sub>2 \<union> {s\<^sub>i}) (s\<^sub>i#hd ps\<^sub>2#tl ps\<^sub>2)"
using knights_path.intros by blast
then show ?case using \<open>ps\<^sub>2 \<noteq> []\<close> by auto
next
case (2 s\<^sub>i b\<^sub>1 s\<^sub>j ps\<^sub>1)
then have "s\<^sub>i \<notin> b\<^sub>1 \<union> b\<^sub>2" "valid_step s\<^sub>i s\<^sub>j" "knights_path (b\<^sub>1 \<union> b\<^sub>2) (s\<^sub>j#ps\<^sub>1@ps\<^sub>2)" by auto
then have "knights_path (b\<^sub>1 \<union> b\<^sub>2 \<union> {s\<^sub>i}) (s\<^sub>i#s\<^sub>j#ps\<^sub>1@ps\<^sub>2)"
using knights_path.intros by auto
then show ?case by auto
qed
lemma valid_step_rev: "valid_step s\<^sub>i s\<^sub>j \<Longrightarrow> valid_step s\<^sub>j s\<^sub>i"
using step_checker_correct step_checker_rev by (metis prod.exhaust_sel)
text \<open>Reverse a Knight's path.\<close>
corollary knights_path_rev:
assumes "knights_path b ps"
shows "knights_path b (rev ps)"
using assms
proof (induction rule: knights_path.induct)
case (2 s\<^sub>i b s\<^sub>j ps)
then have "knights_path {s\<^sub>i} [s\<^sub>i]" "b \<inter> {s\<^sub>i} = {}" "valid_step (last (rev (s\<^sub>j # ps))) (hd [s\<^sub>i])"
using valid_step_rev by (auto intro: knights_path.intros)
then have "knights_path (b \<union> {s\<^sub>i}) ((rev (s\<^sub>j#ps))@[s\<^sub>i])"
using 2 knights_path_append by blast
then show ?case by auto
qed (auto intro: knights_path.intros)
text \<open>Reverse a Knight's circuit.\<close>
corollary knights_circuit_rev:
assumes "knights_circuit b ps"
shows "knights_circuit b (rev ps)"
using assms knights_path_rev valid_step_rev
unfolding knights_circuit_def by (auto simp: hd_rev last_rev)
(* Function to rotate a Knight's circuit to start with (1,1),(3,2),... *)
(* fun rot_c_acc :: "path \<Rightarrow> path \<Rightarrow> path" where
"rot_c_acc (s\<^sub>i#s\<^sub>j#ps) acc =
(if s\<^sub>i = (1,1) then
if s\<^sub>j = (3,2) then s\<^sub>i#rev (s\<^sub>j#ps@acc) else s\<^sub>i#s\<^sub>j#ps@acc
else rot_c_acc (s\<^sub>j#ps) (s\<^sub>i#acc))"
| "rot_c_acc _ acc = []"
definition "rot_c ps \<equiv> rot_c_acc ps []" *)
lemma knights_circuit_rotate1:
assumes "knights_circuit b (s\<^sub>i#ps)"
shows "knights_circuit b (ps@[s\<^sub>i])"
proof (cases "ps = []")
case True
then show ?thesis using assms by auto
next
case False
have kp1: "knights_path b (s\<^sub>i#ps)" "valid_step (last (s\<^sub>i#ps)) (hd (s\<^sub>i#ps))"
using assms unfolding knights_circuit_def by auto
then have kp_elim: "s\<^sub>i \<notin> (b - {s\<^sub>i})" "valid_step s\<^sub>i (hd ps)" "knights_path (b - {s\<^sub>i}) ps"
using \<open>ps \<noteq> []\<close> by (auto elim: knights_path.cases)
then have vs': "valid_step (last (ps@[s\<^sub>i])) (hd (ps@[s\<^sub>i]))"
using \<open>ps \<noteq> []\<close> valid_step_rev by auto
have kp2: "knights_path {s\<^sub>i} [s\<^sub>i]" "(b - {s\<^sub>i}) \<inter> {s\<^sub>i} = {}"
by (auto intro: knights_path.intros)
have vs: "valid_step (last ps) (hd [s\<^sub>i])"
using \<open>ps \<noteq> []\<close> \<open>valid_step (last (s\<^sub>i#ps)) (hd (s\<^sub>i#ps))\<close> by auto
have "(b - {s\<^sub>i}) \<union> {s\<^sub>i} = b"
using kp1 kp_elim knights_path_set_eq by force
then show ?thesis
unfolding knights_circuit_def
using vs knights_path_append[OF \<open>knights_path (b - {s\<^sub>i}) ps\<close> kp2] vs' by auto
qed
text \<open>A Knight's circuit can be rotated to start at any square on the board.\<close>
lemma knights_circuit_rotate_to:
assumes "knights_circuit b ps" "hd (drop k ps) = s\<^sub>i" "k < length ps"
shows "\<exists>ps'. knights_circuit b ps' \<and> hd ps' = s\<^sub>i"
using assms
proof (induction k arbitrary: b ps)
case (Suc k)
let ?s\<^sub>j="hd ps"
let ?ps'="tl ps"
show ?case
proof (cases "s\<^sub>i = ?s\<^sub>j")
case True
then show ?thesis using Suc by auto
next
case False
then have "?ps' \<noteq> []"
using Suc by (metis drop_Nil drop_Suc drop_eq_Nil2 le_antisym nat_less_le)
then have "knights_circuit b (?s\<^sub>j#?ps')"
using Suc by (metis list.exhaust_sel tl_Nil)
then have "knights_circuit b (?ps'@[?s\<^sub>j])" "hd (drop k (?ps'@[?s\<^sub>j])) = s\<^sub>i"
using Suc knights_circuit_rotate1 by (auto simp: drop_Suc)
then show ?thesis using Suc by auto
qed
qed auto
text \<open>For positive boards (1,1) can only have (2,3) and (3,2) as a neighbour.\<close>
lemma valid_step_1_1:
assumes "valid_step (1,1) (i,j)" "i > 0" "j > 0"
shows "(i,j) = (2,3) \<or> (i,j) = (3,2)"
using assms unfolding valid_step_def by auto
lemma list_len_g_1_split: "length xs > 1 \<Longrightarrow> \<exists>x\<^sub>1 x\<^sub>2 xs'. xs = x\<^sub>1#x\<^sub>2#xs'"
proof (induction xs)
case (Cons x xs)
then have "length xs > 0" by auto
then have "length xs \<ge> 1" by presburger
then have "length xs = 1 \<or> length xs > 1" by auto
then show ?case
proof (elim disjE)
assume "length xs = 1"
then obtain x\<^sub>1 where [simp]: "xs = [x\<^sub>1]"
using length_Suc_conv[of xs 0] by auto
then show ?thesis by auto
next
assume "1 < length xs"
then show ?thesis using Cons by auto
qed
qed auto
lemma list_len_g_3_split: "length xs > 3 \<Longrightarrow> \<exists>x\<^sub>1 x\<^sub>2 xs' x\<^sub>3. xs = x\<^sub>1#x\<^sub>2#xs'@[x\<^sub>3]"
proof (induction xs)
case (Cons x xs)
then have "length xs = 3 \<or> length xs > 3" by auto
then show ?case
proof (elim disjE)
assume "length xs = 3"
then obtain x\<^sub>1 xs\<^sub>1 where [simp]: "xs = x\<^sub>1#xs\<^sub>1" "length xs\<^sub>1 = 2"
using length_Suc_conv[of xs 2] by auto
then obtain x\<^sub>2 xs\<^sub>2 where [simp]: "xs\<^sub>1 = x\<^sub>2#xs\<^sub>2" "length xs\<^sub>2 = 1"
using length_Suc_conv[of xs\<^sub>1 1] by auto
then obtain x\<^sub>3 where [simp]: "xs\<^sub>2 = [x\<^sub>3]"
using length_Suc_conv[of xs\<^sub>2 0] by auto
then show ?thesis by auto
next
assume "length xs > 3"
then show ?thesis using Cons by auto
qed
qed auto
text \<open>Any Knight's circuit on a positive board can be rotated to start with (1,1) and
end with (3,2).\<close>
corollary rotate_knights_circuit:
assumes "knights_circuit (board n m) ps" "min n m \<ge> 5"
shows "\<exists>ps. knights_circuit (board n m) ps \<and> hd ps = (1,1) \<and> last ps = (3,2)"
using assms
proof -
let ?b="board n m"
have "knights_path ?b ps"
using assms unfolding knights_circuit_def by auto
then have "(1,1) \<in> set ps"
using assms knights_path_set_eq by (auto simp: board_def)
then obtain k where "hd (drop k ps) = (1,1)" "k < length ps"
by (metis hd_drop_conv_nth in_set_conv_nth)
then obtain ps\<^sub>r where ps\<^sub>r_prems: "knights_circuit ?b ps\<^sub>r" "hd ps\<^sub>r = (1,1)"
using assms knights_circuit_rotate_to by blast
then have kp: "knights_path ?b ps\<^sub>r" and "valid_step (last ps\<^sub>r) (1,1)"
unfolding knights_circuit_def by auto
have "(1,1) \<in> ?b" "(1,2) \<in> ?b" "(1,3) \<in> ?b"
using assms unfolding board_def by auto
then have "(1,1) \<in> set ps\<^sub>r" "(1,2) \<in> set ps\<^sub>r" "(1,3) \<in> set ps\<^sub>r"
using kp knights_path_set_eq by auto
have "3 < card ?b"
using assms board_leq_subset card_board[of 5 5]
card_mono[OF board_finite[of n m], of "board 5 5"] by auto
then have "3 < length ps\<^sub>r"
using knights_path_length kp by auto
then obtain s\<^sub>j ps' s\<^sub>k where [simp]: "ps\<^sub>r = (1,1)#s\<^sub>j#ps'@[s\<^sub>k]"
using \<open>hd ps\<^sub>r = (1,1)\<close> list_len_g_3_split[of ps\<^sub>r] by auto
have "s\<^sub>j \<noteq> s\<^sub>k"
using kp knights_path_distinct by force
have vs_s\<^sub>k: "valid_step s\<^sub>k (1,1)"
using \<open>valid_step (last ps\<^sub>r) (1,1)\<close> by simp
have vs_s\<^sub>j: "valid_step (1,1) s\<^sub>j" and kp': "knights_path (?b - {(1,1)}) (s\<^sub>j#ps'@[s\<^sub>k])"
using kp by (auto elim: knights_path.cases)
have "s\<^sub>j \<in> set ps\<^sub>r" "s\<^sub>k \<in> set ps\<^sub>r" by auto
then have "s\<^sub>j \<in> ?b" "s\<^sub>k \<in> ?b"
using kp knights_path_set_eq by blast+
then have "0 < fst s\<^sub>j \<and> 0 < snd s\<^sub>j" "0 < fst s\<^sub>k \<and> 0 < snd s\<^sub>k"
unfolding board_def by auto
then have "s\<^sub>k = (2,3) \<or> s\<^sub>k = (3,2)" "s\<^sub>j = (2,3) \<or> s\<^sub>j = (3,2)"
using vs_s\<^sub>k vs_s\<^sub>j valid_step_1_1 valid_step_rev by (metis prod.collapse)+
then have "s\<^sub>k = (3,2) \<or> s\<^sub>j = (3,2)"
using \<open>s\<^sub>j \<noteq> s\<^sub>k\<close> by auto
then show ?thesis
proof (elim disjE)
assume "s\<^sub>k = (3,2)"
then have "last ps\<^sub>r = (3,2)" by auto
then show ?thesis using ps\<^sub>r_prems by auto
next
assume "s\<^sub>j = (3,2)"
then have vs: "valid_step (last ((1,1)#rev (s\<^sub>j#ps'@[s\<^sub>k]))) (hd ((1,1)#rev (s\<^sub>j#ps'@[s\<^sub>k])))"
unfolding valid_step_def by auto
have rev_simp: "rev (s\<^sub>j#ps'@[s\<^sub>k]) = s\<^sub>k#(rev ps')@[s\<^sub>j]" by auto
have "knights_path (?b - {(1,1)}) (rev (s\<^sub>j#ps'@[s\<^sub>k]))"
using knights_path_rev[OF kp'] by auto
then have "(1,1) \<notin> (?b - {(1,1)})" "valid_step (1,1) s\<^sub>k"
"knights_path (?b - {(1,1)}) (s\<^sub>k#(rev ps')@[s\<^sub>j])"
using assms vs_s\<^sub>k valid_step_rev by (auto simp: rev_simp)
then have "knights_path (?b - {(1, 1)} \<union> {(1, 1)}) ((1,1)#s\<^sub>k#(rev ps')@[s\<^sub>j])"
using knights_path.intros(2)[of "(1,1)" "?b - {(1,1)}" s\<^sub>k "(rev ps')@[s\<^sub>j]"] by auto
then have "knights_path ?b ((1,1)#rev (s\<^sub>j#ps'@[s\<^sub>k]))"
using assms by (simp add: board_def insert_absorb rev_simp)
then have "knights_circuit ?b ((1,1)#rev (s\<^sub>j#ps'@[s\<^sub>k]))"
unfolding knights_circuit_def using vs by auto
then show ?thesis
using \<open>s\<^sub>j = (3,2)\<close> by auto
qed
qed
section \<open>Transposing Paths and Boards\<close>
subsection \<open>Implementation of Path and Board Transposition\<close>
definition "transpose_square s\<^sub>i = (case s\<^sub>i of (i,j) \<Rightarrow> (j,i))"
fun transpose :: "path \<Rightarrow> path" where
"transpose [] = []"
| "transpose (s\<^sub>i#ps) = (transpose_square s\<^sub>i)#transpose ps"
definition transpose_board :: "board \<Rightarrow> board" where
"transpose_board b \<equiv> {(j,i) |i j. (i,j) \<in> b}"
subsection \<open>Correctness of Path and Board Transposition\<close>
lemma transpose2: "transpose_square (transpose_square s\<^sub>i) = s\<^sub>i"
unfolding transpose_square_def by (auto split: prod.splits)
lemma transpose_nil: "ps = [] \<longleftrightarrow> transpose ps = []"
using transpose.elims by blast
lemma transpose_length: "length ps = length (transpose ps)"
by (induction ps) auto
lemma hd_transpose: "ps \<noteq>[] \<Longrightarrow> hd (transpose ps) = transpose_square (hd ps)"
by (induction ps) (auto simp: transpose_square_def)
lemma last_transpose: "ps \<noteq>[] \<Longrightarrow> last (transpose ps) = transpose_square (last ps)"
proof (induction ps)
case (Cons s\<^sub>i ps)
then show ?case
proof (cases "ps = []")
case True
then show ?thesis using Cons by (auto simp: transpose_square_def)
next
case False
then show ?thesis using Cons transpose_nil by auto
qed
qed auto
lemma take_transpose:
shows "take k (transpose ps) = transpose (take k ps)"
proof (induction ps arbitrary: k)
case Nil
then show ?case by auto
next
case (Cons s\<^sub>i ps)
then obtain i j where "s\<^sub>i = (i,j)" by force
then have "k = 0 \<or> k > 0" by auto
then show ?case
proof (elim disjE)
assume "k > 0"
then show ?thesis using Cons.IH by (auto simp: \<open>s\<^sub>i = (i,j)\<close> take_Cons')
qed auto
qed
lemma drop_transpose:
shows "drop k (transpose ps) = transpose (drop k ps)"
proof (induction ps arbitrary: k)
case Nil
then show ?case by auto
next
case (Cons s\<^sub>i ps)
then obtain i j where "s\<^sub>i = (i,j)" by force
then have "k = 0 \<or> k > 0" by auto
then show ?case
proof (elim disjE)
assume "k > 0"
then show ?thesis using Cons.IH by (auto simp: \<open>s\<^sub>i = (i,j)\<close> drop_Cons')
qed auto
qed
lemma transpose_board_correct: "s\<^sub>i \<in> b \<longleftrightarrow> (transpose_square s\<^sub>i) \<in> transpose_board b"
unfolding transpose_board_def transpose_square_def by (auto split: prod.splits)
lemma transpose_board: "transpose_board (board n m) = board m n"
unfolding board_def using transpose_board_correct by (auto simp: transpose_square_def)
lemma insert_transpose_board:
"insert (transpose_square s\<^sub>i) (transpose_board b) = transpose_board (insert s\<^sub>i b)"
unfolding transpose_board_def transpose_square_def by (auto split: prod.splits)
lemma transpose_board2: "transpose_board (transpose_board b) = b"
unfolding transpose_board_def by auto
lemma transpose_union: "transpose_board (b\<^sub>1 \<union> b\<^sub>2) = transpose_board b\<^sub>1 \<union> transpose_board b\<^sub>2"
unfolding transpose_board_def by auto
lemma transpose_valid_step:
"valid_step s\<^sub>i s\<^sub>j \<longleftrightarrow> valid_step (transpose_square s\<^sub>i) (transpose_square s\<^sub>j)"
unfolding valid_step_def transpose_square_def by (auto split: prod.splits)
lemma transpose_knights_path':
assumes "knights_path b ps"
shows "knights_path (transpose_board b) (transpose ps)"
using assms
proof (induction rule: knights_path.induct)
case (1 s\<^sub>i)
then have "transpose_board {s\<^sub>i} = {transpose_square s\<^sub>i}" "transpose [s\<^sub>i] = [transpose_square s\<^sub>i]"
using transpose_board_correct by (auto simp: transpose_square_def split: prod.splits)
then show ?case by (auto intro: knights_path.intros)
next
case (2 s\<^sub>i b s\<^sub>j ps)
then have prems: "transpose_square s\<^sub>i \<notin> transpose_board b"
"valid_step (transpose_square s\<^sub>i) (transpose_square s\<^sub>j)"
and "transpose (s\<^sub>j#ps) = transpose_square s\<^sub>j#transpose ps"
using 2 transpose_board_correct transpose_valid_step by auto
then show ?case
using 2 knights_path.intros(2)[OF prems] insert_transpose_board by auto
qed
corollary transpose_knights_path:
assumes "knights_path (board n m) ps"
shows "knights_path (board m n) (transpose ps)"
using assms transpose_knights_path'[of "board n m" ps] by (auto simp: transpose_board)
corollary transpose_knights_circuit:
assumes "knights_circuit (board n m) ps"
shows "knights_circuit (board m n) (transpose ps)"
using assms
proof -
have "knights_path (board n m) ps" and vs: "valid_step (last ps) (hd ps)"
using assms unfolding knights_circuit_def by auto
then have kp_t: "knights_path (board m n) (transpose ps)" and "ps \<noteq> []"
using transpose_knights_path knights_path_non_nil by auto
then have "valid_step (last (transpose ps)) (hd (transpose ps))"
using vs hd_transpose last_transpose transpose_valid_step by auto
then show ?thesis using kp_t by (auto simp: knights_circuit_def)
qed
section \<open>Mirroring Paths and Boards\<close>
subsection \<open>Implementation of Path and Board Mirroring\<close>
abbreviation "min1 ps \<equiv> Min ((fst) ` set ps)"
abbreviation "max1 ps \<equiv> Max ((fst) ` set ps)"
abbreviation "min2 ps \<equiv> Min ((snd) ` set ps)"
abbreviation "max2 ps \<equiv> Max ((snd) ` set ps)"
definition mirror1_square :: "int \<Rightarrow> square \<Rightarrow> square" where
"mirror1_square n s\<^sub>i = (case s\<^sub>i of (i,j) \<Rightarrow> (n-i,j))"
fun mirror1_aux :: "int \<Rightarrow> path \<Rightarrow> path" where
"mirror1_aux n [] = []"
| "mirror1_aux n (s\<^sub>i#ps) = (mirror1_square n s\<^sub>i)#mirror1_aux n ps"
definition "mirror1 ps = mirror1_aux (max1 ps + min1 ps) ps"
definition mirror1_board :: "int \<Rightarrow> board \<Rightarrow> board" where
"mirror1_board n b \<equiv> {mirror1_square n s\<^sub>i |s\<^sub>i. s\<^sub>i \<in> b}"
definition mirror2_square :: "int \<Rightarrow> square \<Rightarrow> square" where
"mirror2_square m s\<^sub>i = (case s\<^sub>i of (i,j) \<Rightarrow> (i,m-j))"
fun mirror2_aux :: "int \<Rightarrow> path \<Rightarrow> path" where
"mirror2_aux m [] = []"
| "mirror2_aux m (s\<^sub>i#ps) = (mirror2_square m s\<^sub>i)#mirror2_aux m ps"
definition "mirror2 ps = mirror2_aux (max2 ps + min2 ps) ps"
definition mirror2_board :: "int \<Rightarrow> board \<Rightarrow> board" where
"mirror2_board m b \<equiv> {mirror2_square m s\<^sub>i |s\<^sub>i. s\<^sub>i \<in> b}"
subsection \<open>Correctness of Path and Board Mirroring\<close>
lemma mirror1_board_id: "mirror1_board (int n+1) (board n m) = board n m" (is "_ = ?b")
proof
show "mirror1_board (int n+1) ?b \<subseteq> ?b"
proof
fix s\<^sub>i'
assume assms: "s\<^sub>i' \<in> mirror1_board (int n+1) ?b"
then obtain i' j' where [simp]: "s\<^sub>i' = (i',j')" by force
then have "(i',j') \<in> mirror1_board (int n+1) ?b"
using assms by auto
then obtain i j where "(i,j) \<in> ?b" "mirror1_square (int n+1) (i,j) = (i',j')"
unfolding mirror1_board_def by auto
then have "1 \<le> i \<and> i \<le> int n" "1 \<le> j \<and> j \<le> int m" "i'=(int n+1)-i" "j'=j"
unfolding board_def mirror1_square_def by auto
then have "1 \<le> i' \<and> i' \<le> int n" "1 \<le> j' \<and> j' \<le> int m"
by auto
then show "s\<^sub>i' \<in> ?b"
unfolding board_def by auto
qed
next
show "?b \<subseteq> mirror1_board (int n+1) ?b"
proof
fix s\<^sub>i
assume assms: "s\<^sub>i \<in> ?b"
then obtain i j where [simp]: "s\<^sub>i = (i,j)" by force
then have "(i,j) \<in> ?b"
using assms by auto
then have "1 \<le> i \<and> i \<le> int n" "1 \<le> j \<and> j \<le> int m"
unfolding board_def by auto
then obtain i' j' where "i'=(int n+1)-i" "j'=j" by auto
then have "(i',j') \<in> ?b" "mirror1_square (int n+1) (i',j') = (i,j)"
using \<open>1 \<le> i \<and> i \<le> int n\<close> \<open>1 \<le> j \<and> j \<le> int m\<close>
unfolding mirror1_square_def by (auto simp: board_def)
then show "s\<^sub>i \<in> mirror1_board (int n+1) ?b"
unfolding mirror1_board_def by force
qed
qed
lemma mirror2_board_id: "mirror2_board (int m+1) (board n m) = board n m" (is "_ = ?b")
proof
show "mirror2_board (int m+1) ?b \<subseteq> ?b"
proof
fix s\<^sub>i'
assume assms: "s\<^sub>i' \<in> mirror2_board (int m+1) ?b"
then obtain i' j' where [simp]: "s\<^sub>i' = (i',j')" by force
then have "(i',j') \<in> mirror2_board (int m+1) ?b"
using assms by auto
then obtain i j where "(i,j) \<in> ?b" "mirror2_square (int m+1) (i,j) = (i',j')"
unfolding mirror2_board_def by auto
then have "1 \<le> i \<and> i \<le> int n" "1 \<le> j \<and> j \<le> int m" "i'=i" "j'=(int m+1)-j"
unfolding board_def mirror2_square_def by auto
then have "1 \<le> i' \<and> i' \<le> int n" "1 \<le> j' \<and> j' \<le> int m"
by auto
then show "s\<^sub>i' \<in> ?b"
unfolding board_def by auto
qed
next
show "?b \<subseteq> mirror2_board (int m+1) ?b"
proof
fix s\<^sub>i
assume assms: "s\<^sub>i \<in> ?b"
then obtain i j where [simp]: "s\<^sub>i = (i,j)" by force
then have "(i,j) \<in> ?b"
using assms by auto
then have "1 \<le> i \<and> i \<le> int n" "1 \<le> j \<and> j \<le> int m"
unfolding board_def by auto
then obtain i' j' where "i'=i" "j'=(int m+1)-j" by auto
then have "(i',j') \<in> ?b" "mirror2_square (int m+1) (i',j') = (i,j)"
using \<open>1 \<le> i \<and> i \<le> int n\<close> \<open>1 \<le> j \<and> j \<le> int m\<close>
unfolding mirror2_square_def by (auto simp: board_def)
then show "s\<^sub>i \<in> mirror2_board (int m+1) ?b"
unfolding mirror2_board_def by force
qed
qed
lemma knights_path_min1: "knights_path (board n m) ps \<Longrightarrow> min1 ps = 1"
proof -
assume assms: "knights_path (board n m) ps"
then have "min n m \<ge> 1"
using knights_path_board_m_n_geq_1 by auto
then have "(1,1) \<in> board n m" and ge_1: "\<forall>(i,j) \<in> board n m. i \<ge> 1"
unfolding board_def by auto
then have finite: "finite ((fst) ` board n m)" and
non_empty: "(fst) ` board n m \<noteq> {}" and
mem_1: "1 \<in> (fst) ` board n m"
using board_finite by auto (metis fstI image_eqI)
then have "Min ((fst) ` board n m) = 1"
using ge_1 by (auto simp: Min_eq_iff)
then show ?thesis
using assms knights_path_set_eq by auto
qed
lemma knights_path_min2: "knights_path (board n m) ps \<Longrightarrow> min2 ps = 1"
proof -
assume assms: "knights_path (board n m) ps"
then have "min n m \<ge> 1"
using knights_path_board_m_n_geq_1 by auto
then have "(1,1) \<in> board n m" and ge_1: "\<forall>(i,j) \<in> board n m. j \<ge> 1"
unfolding board_def by auto
then have finite: "finite ((snd) ` board n m)" and
non_empty: "(snd) ` board n m \<noteq> {}" and
mem_1: "1 \<in> (snd) ` board n m"
using board_finite by auto (metis sndI image_eqI)
then have "Min ((snd) ` board n m) = 1"
using ge_1 by (auto simp: Min_eq_iff)
then show ?thesis
using assms knights_path_set_eq by auto
qed
lemma knights_path_max1: "knights_path (board n m) ps \<Longrightarrow> max1 ps = int n"
proof -
assume assms: "knights_path (board n m) ps"
then have "min n m \<ge> 1"
using knights_path_board_m_n_geq_1 by auto
then have "(int n,1) \<in> board n m" and leq_n: "\<forall>(i,j) \<in> board n m. i \<le> int n"
unfolding board_def by auto
then have finite: "finite ((fst) ` board n m)" and
non_empty: "(fst) ` board n m \<noteq> {}" and
mem_1: "int n \<in> (fst) ` board n m"
using board_finite by auto (metis fstI image_eqI)
then have "Max ((fst) ` board n m) = int n"
using leq_n by (auto simp: Max_eq_iff)
then show ?thesis
using assms knights_path_set_eq by auto
qed
lemma knights_path_max2: "knights_path (board n m) ps \<Longrightarrow> max2 ps = int m"
proof -
assume assms: "knights_path (board n m) ps"
then have "min n m \<ge> 1"
using knights_path_board_m_n_geq_1 by auto
then have "(1,int m) \<in> board n m" and leq_m: "\<forall>(i,j) \<in> board n m. j \<le> int m"
unfolding board_def by auto
then have finite: "finite ((snd) ` board n m)" and
non_empty: "(snd) ` board n m \<noteq> {}" and
mem_1: "int m \<in> (snd) ` board n m"
using board_finite by auto (metis sndI image_eqI)
then have "Max ((snd) ` board n m) = int m"
using leq_m by (auto simp: Max_eq_iff)
then show ?thesis
using assms knights_path_set_eq by auto
qed
lemma mirror1_aux_nil: "ps = [] \<longleftrightarrow> mirror1_aux m ps = []"
using mirror1_aux.elims by blast
lemma mirror1_nil: "ps = [] \<longleftrightarrow> mirror1 ps = []"
unfolding mirror1_def using mirror1_aux_nil by blast
lemma mirror2_aux_nil: "ps = [] \<longleftrightarrow> mirror2_aux m ps = []"
using mirror2_aux.elims by blast
lemma mirror2_nil: "ps = [] \<longleftrightarrow> mirror2 ps = []"
unfolding mirror2_def using mirror2_aux_nil by blast
lemma length_mirror1_aux: "length ps = length (mirror1_aux n ps)"
by (induction ps) auto
lemma length_mirror1: "length ps = length (mirror1 ps)"
unfolding mirror1_def using length_mirror1_aux by auto
lemma length_mirror2_aux: "length ps = length (mirror2_aux n ps)"
by (induction ps) auto
lemma length_mirror2: "length ps = length (mirror2 ps)"
unfolding mirror2_def using length_mirror2_aux by auto
lemma mirror1_board_iff:"s\<^sub>i \<notin> b \<longleftrightarrow> mirror1_square n s\<^sub>i \<notin> mirror1_board n b"
unfolding mirror1_board_def mirror1_square_def by (auto split: prod.splits)
lemma mirror2_board_iff:"s\<^sub>i \<notin> b \<longleftrightarrow> mirror2_square n s\<^sub>i \<notin> mirror2_board n b"
unfolding mirror2_board_def mirror2_square_def by (auto split: prod.splits)
lemma insert_mirror1_board:
"insert (mirror1_square n s\<^sub>i) (mirror1_board n b) = mirror1_board n (insert s\<^sub>i b)"
unfolding mirror1_board_def mirror1_square_def by (auto split: prod.splits)
lemma insert_mirror2_board:
"insert (mirror2_square n s\<^sub>i) (mirror2_board n b) = mirror2_board n (insert s\<^sub>i b)"
unfolding mirror2_board_def mirror2_square_def by (auto split: prod.splits)
lemma "(i::int) = i'+1 \<Longrightarrow> n-i=n-(i'+1)"
by auto
lemma valid_step_mirror1:
"valid_step s\<^sub>i s\<^sub>j \<longleftrightarrow> valid_step (mirror1_square n s\<^sub>i) (mirror1_square n s\<^sub>j)"
proof
assume assms: "valid_step s\<^sub>i s\<^sub>j"
obtain i j i' j' where [simp]: "s\<^sub>i = (i,j)" "s\<^sub>j = (i',j')" by force
then have "valid_step (n-i,j) (n-i',j')"
using assms unfolding valid_step_def
apply simp
apply (elim disjE)
apply auto
done
then show "valid_step (mirror1_square n s\<^sub>i) (mirror1_square n s\<^sub>j)"
unfolding mirror1_square_def by auto
next
assume assms: "valid_step (mirror1_square n s\<^sub>i) (mirror1_square n s\<^sub>j)"
obtain i j i' j' where [simp]: "s\<^sub>i = (i,j)" "s\<^sub>j = (i',j')" by force
then have "valid_step (i,j) (i',j')"
using assms unfolding valid_step_def mirror1_square_def
apply simp
apply (elim disjE)
apply auto
done
then show "valid_step s\<^sub>i s\<^sub>j"
unfolding mirror1_square_def by auto
qed
lemma valid_step_mirror2:
"valid_step s\<^sub>i s\<^sub>j \<longleftrightarrow> valid_step (mirror2_square m s\<^sub>i) (mirror2_square m s\<^sub>j)"
proof
assume assms: "valid_step s\<^sub>i s\<^sub>j"
obtain i j i' j' where [simp]: "s\<^sub>i = (i,j)" "s\<^sub>j = (i',j')" by force
then have "valid_step (i,m-j) (i',m-j')"
using assms unfolding valid_step_def
apply simp
apply (elim disjE)
apply auto
done
then show "valid_step (mirror2_square m s\<^sub>i) (mirror2_square m s\<^sub>j)"
unfolding mirror2_square_def by auto
next
assume assms: "valid_step (mirror2_square m s\<^sub>i) (mirror2_square m s\<^sub>j)"
obtain i j i' j' where [simp]: "s\<^sub>i = (i,j)" "s\<^sub>j = (i',j')" by force
then have "valid_step (i,j) (i',j')"
using assms unfolding valid_step_def mirror2_square_def
apply simp
apply (elim disjE)
apply auto
done
then show "valid_step s\<^sub>i s\<^sub>j"
unfolding mirror1_square_def by auto
qed
lemma hd_mirror1:
assumes "knights_path (board n m) ps" "hd ps = (i,j)"
shows "hd (mirror1 ps) = (int n+1-i,j)"
using assms
proof -
have "hd (mirror1 ps) = hd (mirror1_aux (int n+1) ps)"
unfolding mirror1_def using assms knights_path_min1 knights_path_max1 by auto
also have "... = hd (mirror1_aux (int n+1) ((hd ps)#(tl ps)))"
using assms knights_path_non_nil by (metis list.collapse)
also have "... = (int n+1-i,j)"
using assms by (auto simp: mirror1_square_def)
finally show ?thesis .
qed
lemma last_mirror1_aux:
assumes "ps \<noteq> []" "last ps = (i,j)"
shows "last (mirror1_aux n ps) = (n-i,j)"
using assms
proof (induction ps)
case (Cons s\<^sub>i ps)
then show ?case
using mirror1_aux_nil Cons by (cases "ps = []") (auto simp: mirror1_square_def)
qed auto
lemma last_mirror1:
assumes "knights_path (board n m) ps" "last ps = (i,j)"
shows "last (mirror1 ps) = (int n+1-i,j)"
unfolding mirror1_def using assms last_mirror1_aux knights_path_non_nil
by (simp add: knights_path_max1 knights_path_min1)
lemma hd_mirror2:
assumes "knights_path (board n m) ps" "hd ps = (i,j)"
shows "hd (mirror2 ps) = (i,int m+1-j)"
using assms
proof -
have "hd (mirror2 ps) = hd (mirror2_aux (int m+1) ps)"
unfolding mirror2_def using assms knights_path_min2 knights_path_max2 by auto
also have "... = hd (mirror2_aux (int m+1) ((hd ps)#(tl ps)))"
using assms knights_path_non_nil by (metis list.collapse)
also have "... = (i,int m+1-j)"
using assms by (auto simp: mirror2_square_def)
finally show ?thesis .
qed
lemma last_mirror2_aux:
assumes "ps \<noteq> []" "last ps = (i,j)"
shows "last (mirror2_aux m ps) = (i,m-j)"
using assms
proof (induction ps)
case (Cons s\<^sub>i ps)
then show ?case
using mirror2_aux_nil Cons by (cases "ps = []") (auto simp: mirror2_square_def)
qed auto
lemma last_mirror2:
assumes "knights_path (board n m) ps" "last ps = (i,j)"
shows "last (mirror2 ps) = (i,int m+1-j)"
unfolding mirror2_def using assms last_mirror2_aux knights_path_non_nil
by (simp add: knights_path_max2 knights_path_min2)
lemma mirror1_aux_knights_path:
assumes "knights_path b ps"
shows "knights_path (mirror1_board n b) (mirror1_aux n ps)"
using assms
proof (induction rule: knights_path.induct)
case (1 s\<^sub>i)
then have "mirror1_board n {s\<^sub>i} = {mirror1_square n s\<^sub>i}"
unfolding mirror1_board_def by blast
then show ?case by (auto intro: knights_path.intros)
next
case (2 s\<^sub>i b s\<^sub>j ps)
then have prems: "mirror1_square n s\<^sub>i \<notin> mirror1_board n b"
"valid_step (mirror1_square n s\<^sub>i) (mirror1_square n s\<^sub>j)"
and "mirror1_aux n (s\<^sub>j#ps) = mirror1_square n s\<^sub>j#mirror1_aux n ps"
using 2 mirror1_board_iff valid_step_mirror1 by auto
then show ?case
using 2 knights_path.intros(2)[OF prems] insert_mirror1_board by auto
qed
corollary mirror1_knights_path:
assumes "knights_path (board n m) ps"
shows "knights_path (board n m) (mirror1 ps)"
using assms
proof -
have [simp]: "min1 ps = 1" "max1 ps = int n"
using assms knights_path_min1 knights_path_max1 by auto
then have "mirror1_board (int n+1) (board n m) = (board n m)"
using mirror1_board_id by auto
then have "knights_path (board n m) (mirror1_aux (int n+1) ps)"
using assms mirror1_aux_knights_path[of "board n m" ps "int n+1"] by auto
then show ?thesis unfolding mirror1_def by auto
qed
lemma mirror2_aux_knights_path:
assumes "knights_path b ps"
shows "knights_path (mirror2_board n b) (mirror2_aux n ps)"
using assms
proof (induction rule: knights_path.induct)
case (1 s\<^sub>i)
then have "mirror2_board n {s\<^sub>i} = {mirror2_square n s\<^sub>i}"
unfolding mirror2_board_def by blast
then show ?case by (auto intro: knights_path.intros)
next
case (2 s\<^sub>i b s\<^sub>j ps)
then have prems: "mirror2_square n s\<^sub>i \<notin> mirror2_board n b"
"valid_step (mirror2_square n s\<^sub>i) (mirror2_square n s\<^sub>j)"
and "mirror2_aux n (s\<^sub>j#ps) = mirror2_square n s\<^sub>j#mirror2_aux n ps"
using 2 mirror2_board_iff valid_step_mirror2 by auto
then show ?case
using 2 knights_path.intros(2)[OF prems] insert_mirror2_board by auto
qed
corollary mirror2_knights_path:
assumes "knights_path (board n m) ps"
shows "knights_path (board n m) (mirror2 ps)"
proof -
have [simp]: "min2 ps = 1" "max2 ps = int m"
using assms knights_path_min2 knights_path_max2 by auto
then have "mirror2_board (int m+1) (board n m) = (board n m)"
using mirror2_board_id by auto
then have "knights_path (board n m) (mirror2_aux (int m+1) ps)"
using assms mirror2_aux_knights_path[of "board n m" ps "int m+1"] by auto
then show ?thesis unfolding mirror2_def by auto
qed
subsection \<open>Rotate Knight's Paths\<close>
text \<open>Transposing (\<open>transpose\<close>) and mirroring (along first axis \<open>mirror1\<close>) a Knight's path
preserves the Knight's path's property. Tranpose+Mirror1 equals a 90deg-clockwise turn.\<close>
corollary rot90_knights_path:
assumes "knights_path (board n m) ps"
shows "knights_path (board m n) (mirror1 (transpose ps))"
using assms transpose_knights_path mirror1_knights_path by auto
lemma hd_rot90_knights_path:
assumes "knights_path (board n m) ps" "hd ps = (i,j)"
shows "hd (mirror1 (transpose ps)) = (int m+1-j,i)"
using assms
proof -
have "hd (transpose ps) = (j,i)" "knights_path (board m n) (transpose ps)"
using assms knights_path_non_nil hd_transpose transpose_knights_path
by (auto simp: transpose_square_def)
then show ?thesis using hd_mirror1 by auto
qed
lemma last_rot90_knights_path:
assumes "knights_path (board n m) ps" "last ps = (i,j)"
shows "last (mirror1 (transpose ps)) = (int m+1-j,i)"
using assms
proof -
have "last (transpose ps) = (j,i)" "knights_path (board m n) (transpose ps)"
using assms knights_path_non_nil last_transpose transpose_knights_path
by (auto simp: transpose_square_def)
then show ?thesis using last_mirror1 by auto
qed
section \<open>Translating Paths and Boards\<close>
text \<open>When constructing knight's paths for larger boards multiple knight's paths for smaller boards
are concatenated. To concatenate paths the the coordinates in the path need to be translated.
Therefore, simple auxiliary functions are provided.\<close>
subsection \<open>Implementation of Path and Board Translation\<close>
text \<open>Translate the coordinates for a path by \<open>(k\<^sub>1,k\<^sub>2)\<close>.\<close>
fun trans_path :: "int \<times> int \<Rightarrow> path \<Rightarrow> path" where
"trans_path (k\<^sub>1,k\<^sub>2) [] = []"
| "trans_path (k\<^sub>1,k\<^sub>2) ((i,j)#xs) = (i+k\<^sub>1,j+k\<^sub>2)#(trans_path (k\<^sub>1,k\<^sub>2) xs)"
text \<open>Translate the coordinates of a board by \<open>(k\<^sub>1,k\<^sub>2)\<close>.\<close>
definition trans_board :: "int \<times> int \<Rightarrow> board \<Rightarrow> board" where
"trans_board t b \<equiv> (case t of (k\<^sub>1,k\<^sub>2) \<Rightarrow> {(i+k\<^sub>1,j+k\<^sub>2)|i j. (i,j) \<in> b})"
subsection \<open>Correctness of Path and Board Translation\<close>
lemma trans_path_length: "length ps = length (trans_path (k\<^sub>1,k\<^sub>2) ps)"
by (induction ps) auto
lemma trans_path_non_nil: "ps \<noteq> [] \<Longrightarrow> trans_path (k\<^sub>1,k\<^sub>2) ps \<noteq> []"
by (induction ps) auto
lemma trans_path_correct: "(i,j) \<in> set ps \<longleftrightarrow> (i+k\<^sub>1,j+k\<^sub>2) \<in> set (trans_path (k\<^sub>1,k\<^sub>2) ps)"
proof (induction ps)
case (Cons s\<^sub>i ps)
then show ?case by (cases s\<^sub>i) auto
qed auto
lemma trans_path_non_nil_last:
"ps \<noteq> [] \<Longrightarrow> last (trans_path (k\<^sub>1,k\<^sub>2) ps) = last (trans_path (k\<^sub>1,k\<^sub>2) ((i,j)#ps))"
using trans_path_non_nil by (induction ps) auto
lemma hd_trans_path:
assumes "ps \<noteq> []" "hd ps = (i,j)"
shows "hd (trans_path (k\<^sub>1,k\<^sub>2) ps) = (i+k\<^sub>1,j+k\<^sub>2)"
using assms by (induction ps) auto
lemma last_trans_path:
assumes "ps \<noteq> []" "last ps = (i,j)"
shows "last (trans_path (k\<^sub>1,k\<^sub>2) ps) = (i+k\<^sub>1,j+k\<^sub>2)"
using assms
proof (induction ps)
case (Cons s\<^sub>i ps)
then show ?case
using trans_path_non_nil_last[symmetric]
apply (cases s\<^sub>i)
apply (cases "ps = []")
apply auto
done
qed (auto)
lemma take_trans:
shows "take k (trans_path (k\<^sub>1,k\<^sub>2) ps) = trans_path (k\<^sub>1,k\<^sub>2) (take k ps)"
proof (induction ps arbitrary: k)
case Nil
then show ?case by auto
next
case (Cons s\<^sub>i ps)
then obtain i j where "s\<^sub>i = (i,j)" by force
then have "k = 0 \<or> k > 0" by auto
then show ?case
proof (elim disjE)
assume "k > 0"
then show ?thesis using Cons.IH by (auto simp: \<open>s\<^sub>i = (i,j)\<close> take_Cons')
qed auto
qed
lemma drop_trans:
shows "drop k (trans_path (k\<^sub>1,k\<^sub>2) ps) = trans_path (k\<^sub>1,k\<^sub>2) (drop k ps)"
proof (induction ps arbitrary: k)
case Nil
then show ?case by auto
next
case (Cons s\<^sub>i ps)
then obtain i j where "s\<^sub>i = (i,j)" by force
then have "k = 0 \<or> k > 0" by auto
then show ?case
proof (elim disjE)
assume "k > 0"
then show ?thesis using Cons.IH by (auto simp: \<open>s\<^sub>i = (i,j)\<close> drop_Cons')
qed auto
qed
lemma trans_board_correct: "(i,j) \<in> b \<longleftrightarrow> (i+k\<^sub>1,j+k\<^sub>2) \<in> trans_board (k\<^sub>1,k\<^sub>2) b"
unfolding trans_board_def by auto
lemma board_subset: "n\<^sub>1 \<le> n\<^sub>2 \<Longrightarrow> m\<^sub>1 \<le> m\<^sub>2 \<Longrightarrow> board n\<^sub>1 m\<^sub>1 \<subseteq> board n\<^sub>2 m\<^sub>2"
unfolding board_def by auto
text \<open>Board concatenation\<close>
corollary board_concat:
shows "board n m\<^sub>1 \<union> trans_board (0,int m\<^sub>1) (board n m\<^sub>2) = board n (m\<^sub>1+m\<^sub>2)" (is "?b1 \<union> ?b2 = ?b")
proof
show "?b1 \<union> ?b2 \<subseteq> ?b" unfolding board_def trans_board_def by auto
next
show "?b \<subseteq> ?b1 \<union> ?b2"
proof
fix x
assume "x \<in> ?b"
then obtain i j where x_split: "x = (i,j)" "1 \<le> i \<and> i \<le> int n" "1 \<le> j \<and> j \<le> int (m\<^sub>1+m\<^sub>2)"
unfolding board_def by auto
then have "j \<le> int m\<^sub>1 \<or> (int m\<^sub>1 < j \<and> j \<le> int (m\<^sub>1+m\<^sub>2))" by auto
then show "x \<in> ?b1 \<union> ?b2"
proof
assume "j \<le> int m\<^sub>1"
then show "x \<in> ?b1 \<union> ?b2" using x_split unfolding board_def by auto
next
assume asm: "int m\<^sub>1 < j \<and> j \<le> int (m\<^sub>1+m\<^sub>2)"
then have "(i,j-int m\<^sub>1) \<in> board n m\<^sub>2" using x_split unfolding board_def by auto
then show "x \<in> ?b1 \<union> ?b2"
using x_split asm trans_board_correct[of i "j-int m\<^sub>1" "board n m\<^sub>2" 0 "int m\<^sub>1"] by auto
qed
qed
qed
lemma transpose_trans_board:
"transpose_board (trans_board (k\<^sub>1,k\<^sub>2) b) = trans_board (k\<^sub>2,k\<^sub>1) (transpose_board b)"
unfolding transpose_board_def trans_board_def by blast
corollary board_concatT:
shows "board n\<^sub>1 m \<union> trans_board (int n\<^sub>1,0) (board n\<^sub>2 m) = board (n\<^sub>1+n\<^sub>2) m" (is "?b\<^sub>1 \<union> ?b\<^sub>2 = ?b")
proof -
let ?b\<^sub>1T="board m n\<^sub>1"
let ?b\<^sub>2T="trans_board (0,int n\<^sub>1) (board m n\<^sub>2)"
have "?b\<^sub>1 \<union> ?b\<^sub>2 = transpose_board (?b\<^sub>1T \<union> ?b\<^sub>2T) "
using transpose_board2 transpose_union transpose_board transpose_trans_board by auto
also have "... = transpose_board (board m (n\<^sub>1+n\<^sub>2))"
using board_concat by auto
also have "... = board (n\<^sub>1+n\<^sub>2) m"
using transpose_board by auto
finally show ?thesis .
qed
lemma trans_valid_step:
"valid_step (i,j) (i',j') \<Longrightarrow> valid_step (i+k\<^sub>1,j+k\<^sub>2) (i'+k\<^sub>1,j'+k\<^sub>2)"
unfolding valid_step_def by auto
text \<open>Translating a path and a boards preserves the validity.\<close>
lemma trans_knights_path:
assumes "knights_path b ps"
shows "knights_path (trans_board (k\<^sub>1,k\<^sub>2) b) (trans_path (k\<^sub>1,k\<^sub>2) ps)"
using assms
proof (induction rule: knights_path.induct)
case (2 s\<^sub>i b s\<^sub>j xs)
then obtain i j i' j' where split: "s\<^sub>i = (i,j)" "s\<^sub>j = (i',j')" by force
let ?s\<^sub>i="(i+k\<^sub>1,j+k\<^sub>2)"
let ?s\<^sub>j="(i'+k\<^sub>1,j'+k\<^sub>2)"
let ?xs="trans_path (k\<^sub>1,k\<^sub>2) xs"
let ?b="trans_board (k\<^sub>1,k\<^sub>2) b"
have simps: "trans_path (k\<^sub>1,k\<^sub>2) (s\<^sub>i#s\<^sub>j#xs) = ?s\<^sub>i#?s\<^sub>j#?xs"
"?b \<union> {?s\<^sub>i} = trans_board (k\<^sub>1,k\<^sub>2) (b \<union> {s\<^sub>i})"
unfolding trans_board_def using split by auto
have "?s\<^sub>i \<notin> ?b" "valid_step ?s\<^sub>i ?s\<^sub>j" "knights_path ?b (?s\<^sub>j#?xs)"
using 2 split trans_valid_step by (auto simp: trans_board_def)
then have "knights_path (?b \<union> {?s\<^sub>i}) (?s\<^sub>i#?s\<^sub>j#?xs)"
using knights_path.intros by auto
then show ?case using simps by auto
qed (auto simp: trans_board_def intro: knights_path.intros)
text \<open>Predicate that indicates if two squares \<open>s\<^sub>i\<close> and \<open>s\<^sub>j\<close> are adjacent in \<open>ps\<close>.\<close>
definition step_in :: "path \<Rightarrow> square \<Rightarrow> square \<Rightarrow> bool" where
"step_in ps s\<^sub>i s\<^sub>j \<equiv> (\<exists>k. 0 < k \<and> k < length ps \<and> last (take k ps) = s\<^sub>i \<and> hd (drop k ps) = s\<^sub>j)"
lemma step_in_Cons: "step_in ps s\<^sub>i s\<^sub>j \<Longrightarrow> step_in (s\<^sub>k#ps) s\<^sub>i s\<^sub>j"
proof -
assume "step_in ps s\<^sub>i s\<^sub>j"
then obtain k where "0 < k \<and> k < length ps" "last (take k ps) = s\<^sub>i" "hd (drop k ps) = s\<^sub>j"
unfolding step_in_def by auto
then have "0 < k+1 \<and> k+1 < length (s\<^sub>k#ps)"
"last (take (k+1) (s\<^sub>k#ps)) = s\<^sub>i" "hd (drop (k+1) (s\<^sub>k#ps)) = s\<^sub>j"
by auto
then show ?thesis
by (auto simp: step_in_def)
qed
lemma step_in_append: "step_in ps s\<^sub>i s\<^sub>j \<Longrightarrow> step_in (ps@ps') s\<^sub>i s\<^sub>j"
proof -
assume "step_in ps s\<^sub>i s\<^sub>j"
then obtain k where "0 < k \<and> k < length ps" "last (take k ps) = s\<^sub>i" "hd (drop k ps) = s\<^sub>j"
unfolding step_in_def by auto
then have "0 < k \<and> k < length (ps@ps')"
"last (take k (ps@ps')) = s\<^sub>i" "hd (drop k (ps@ps')) = s\<^sub>j"
by auto
then show ?thesis
by (auto simp: step_in_def)
qed
lemma step_in_prepend: "step_in ps s\<^sub>i s\<^sub>j \<Longrightarrow> step_in (ps'@ps) s\<^sub>i s\<^sub>j"
using step_in_Cons by (induction ps' arbitrary: ps) auto
lemma step_in_valid_step: "knights_path b ps \<Longrightarrow> step_in ps s\<^sub>i s\<^sub>j \<Longrightarrow> valid_step s\<^sub>i s\<^sub>j"
proof -
assume assms: "knights_path b ps" "step_in ps s\<^sub>i s\<^sub>j"
then obtain k where k_prems: "0 < k \<and> k < length ps" "last (take k ps) = s\<^sub>i" "hd (drop k ps) = s\<^sub>j"
unfolding step_in_def by auto
then have "k = 1 \<or> k > 1" by auto
then show ?thesis
proof (elim disjE)
assume "k = 1"
then obtain ps' where "ps = s\<^sub>i#s\<^sub>j#ps'"
using k_prems list_len_g_1_split by fastforce
then show ?thesis
using assms by (auto elim: knights_path.cases)
next
assume "k > 1"
then have "0 < k-1 \<and> k-1 < length ps"
using k_prems by auto
then obtain b where "knights_path b (drop (k-1) ps)"
using assms knights_path_split by blast
obtain ps' where "drop (k-1) ps = s\<^sub>i#s\<^sub>j#ps'"
using k_prems \<open>0 < k - 1 \<and> k - 1 < length ps\<close>
by (metis Cons_nth_drop_Suc Suc_diff_1 hd_drop_conv_nth last_snoc take_hd_drop)
then show ?thesis
using \<open>knights_path b (drop (k-1) ps)\<close> by (auto elim: knights_path.cases)
qed
qed
lemma trans_step_in:
"step_in ps (i,j) (i',j') \<Longrightarrow> step_in (trans_path (k\<^sub>1,k\<^sub>2) ps) (i+k\<^sub>1,j+k\<^sub>2) (i'+k\<^sub>1,j'+k\<^sub>2)"
proof -
let ?ps'="trans_path (k\<^sub>1,k\<^sub>2) ps"
assume "step_in ps (i,j) (i',j')"
then obtain k where "0 < k \<and> k < length ps" "last (take k ps) = (i,j)" "hd (drop k ps) = (i',j')"
unfolding step_in_def by auto
then have "take k ps \<noteq> []" "drop k ps \<noteq> []" by fastforce+
then have "0 < k \<and> k < length ?ps'"
"last (take k ?ps') = (i+k\<^sub>1,j+k\<^sub>2)" "hd (drop k ?ps') = (i'+k\<^sub>1,j'+k\<^sub>2)"
using trans_path_length
last_trans_path[OF \<open>take k ps \<noteq> []\<close> \<open>last (take k ps) = (i,j)\<close>] take_trans
hd_trans_path[OF \<open>drop k ps \<noteq> []\<close> \<open>hd (drop k ps) = (i',j')\<close>] drop_trans
by auto
then show ?thesis
by (auto simp: step_in_def)
qed
lemma transpose_step_in:
"step_in ps s\<^sub>i s\<^sub>j \<Longrightarrow> step_in (transpose ps) (transpose_square s\<^sub>i) (transpose_square s\<^sub>j)"
(is "_ \<Longrightarrow> step_in ?psT ?s\<^sub>iT ?s\<^sub>jT")
proof -
assume "step_in ps s\<^sub>i s\<^sub>j"
then obtain k where
k_prems: "0 < k" "k < length ps" "last (take k ps) = s\<^sub>i" "hd (drop k ps) = s\<^sub>j"
unfolding step_in_def by auto
then have non_nil: "take k ps \<noteq> []" "drop k ps \<noteq> []" by fastforce+
have "take k ?psT = transpose (take k ps)" "drop k ?psT = transpose (drop k ps)"
using take_transpose drop_transpose by auto
then have "last (take k ?psT) = ?s\<^sub>iT" "hd (drop k ?psT) = ?s\<^sub>jT"
using non_nil k_prems hd_transpose last_transpose by auto
then show "step_in ?psT ?s\<^sub>iT ?s\<^sub>jT"
unfolding step_in_def using k_prems transpose_length by auto
qed
lemma hd_take: "0 < k \<Longrightarrow> hd xs = hd (take k xs)"
by (induction xs) auto
lemma last_drop: "k < length xs \<Longrightarrow> last xs = last (drop k xs)"
by (induction xs) auto
subsection \<open>Concatenate Knight's Paths and Circuits\<close>
text \<open>Concatenate two knight's path on a \<open>n\<times>m\<close>-board along the 2nd axis if the first path contains
the step \<open>s\<^sub>i \<rightarrow> s\<^sub>j\<close> and there are valid steps \<open>s\<^sub>i \<rightarrow> hd ps\<^sub>2'\<close> and \<open>s\<^sub>j \<rightarrow> last ps\<^sub>2'\<close>, where
\<open>ps\<^sub>2'\<close> is \<open>ps\<^sub>2\<close> is translated by \<open>m\<^sub>1\<close>. An arbitrary step in \<open>ps\<^sub>2\<close> is preserved.\<close>
corollary knights_path_split_concat_si_prev:
assumes "knights_path (board n m\<^sub>1) ps\<^sub>1" "knights_path (board n m\<^sub>2) ps\<^sub>2"
"step_in ps\<^sub>1 s\<^sub>i s\<^sub>j" "hd ps\<^sub>2 = (i\<^sub>h,j\<^sub>h)" "last ps\<^sub>2 = (i\<^sub>l,j\<^sub>l)" "step_in ps\<^sub>2 (i,j) (i',j')"
"valid_step s\<^sub>i (i\<^sub>h,int m\<^sub>1+j\<^sub>h)" "valid_step (i\<^sub>l,int m\<^sub>1+j\<^sub>l) s\<^sub>j"
shows "\<exists>ps. knights_path (board n (m\<^sub>1+m\<^sub>2)) ps \<and> hd ps = hd ps\<^sub>1
\<and> last ps = last ps\<^sub>1 \<and> step_in ps (i,int m\<^sub>1+j) (i',int m\<^sub>1+j')"
using assms
proof -
let ?b\<^sub>1="board n m\<^sub>1"
let ?b\<^sub>2="board n m\<^sub>2"
let ?ps\<^sub>2'="trans_path (0,int m\<^sub>1) ps\<^sub>2"
let ?b'="trans_board (0,int m\<^sub>1) ?b\<^sub>2"
have kp2': "knights_path ?b' ?ps\<^sub>2'" using assms trans_knights_path by auto
then have "?ps\<^sub>2' \<noteq> []" using knights_path_non_nil by auto
obtain k where k_prems:
"0 < k" "k < length ps\<^sub>1" "last (take k ps\<^sub>1) = s\<^sub>i" "hd (drop k ps\<^sub>1) = s\<^sub>j"
using assms unfolding step_in_def by auto
let ?ps="(take k ps\<^sub>1) @ ?ps\<^sub>2' @ (drop k ps\<^sub>1)"
obtain b\<^sub>1 b\<^sub>2 where b_prems: "knights_path b\<^sub>1 (take k ps\<^sub>1)" "knights_path b\<^sub>2 (drop k ps\<^sub>1)"
"b\<^sub>1 \<union> b\<^sub>2 = ?b\<^sub>1" "b\<^sub>1 \<inter> b\<^sub>2 = {}"
using assms \<open>0 < k\<close> \<open>k < length ps\<^sub>1\<close> knights_path_split by blast
have "hd ?ps\<^sub>2' = (i\<^sub>h,int m\<^sub>1+j\<^sub>h)" "last ?ps\<^sub>2' = (i\<^sub>l,int m\<^sub>1+j\<^sub>l)"
using assms knights_path_non_nil hd_trans_path last_trans_path by auto
then have "hd ?ps\<^sub>2' = (i\<^sub>h,int m\<^sub>1+j\<^sub>h)" "last ((take k ps\<^sub>1) @ ?ps\<^sub>2') = (i\<^sub>l,int m\<^sub>1+j\<^sub>l)"
using \<open>?ps\<^sub>2' \<noteq> []\<close> by auto
then have vs: "valid_step (last (take k ps\<^sub>1)) (hd ?ps\<^sub>2')"
"valid_step (last ((take k ps\<^sub>1) @ ?ps\<^sub>2')) (hd (drop k ps\<^sub>1))"
using assms k_prems by auto
have "?b\<^sub>1 \<inter> ?b' = {}" unfolding board_def trans_board_def by auto
then have "b\<^sub>1 \<inter> ?b' = {} \<and> (b\<^sub>1 \<union> ?b') \<inter> b\<^sub>2 = {}" using b_prems by blast
then have inter_empty: "b\<^sub>1 \<inter> ?b' = {}" "(b\<^sub>1 \<union> ?b') \<inter> b\<^sub>2 = {}" by auto
have "knights_path (b\<^sub>1 \<union> ?b') ((take k ps\<^sub>1) @ ?ps\<^sub>2')"
using kp2' b_prems inter_empty vs knights_path_append by auto
then have "knights_path (b\<^sub>1 \<union> ?b' \<union> b\<^sub>2) ?ps"
using b_prems inter_empty vs knights_path_append[where ps\<^sub>1="(take k ps\<^sub>1) @ ?ps\<^sub>2'"] by auto
then have "knights_path (?b\<^sub>1 \<union> ?b') ?ps"
using b_prems Un_commute Un_assoc by metis
then have kp: "knights_path (board n (m\<^sub>1+m\<^sub>2)) ?ps"
using board_concat[of n m\<^sub>1 m\<^sub>2] by auto
have hd: "hd ?ps = hd ps\<^sub>1"
using assms \<open>0 < k\<close> knights_path_non_nil hd_take by auto
have last: "last ?ps = last ps\<^sub>1"
using assms \<open>k < length ps\<^sub>1\<close> knights_path_non_nil last_drop by auto
have m_simps: "j+int m\<^sub>1 = int m\<^sub>1+j" "j'+int m\<^sub>1 = int m\<^sub>1+j'" by auto
have si: "step_in ?ps (i,int m\<^sub>1+j) (i',int m\<^sub>1+j')"
using assms step_in_append[OF step_in_prepend[OF trans_step_in],
of ps\<^sub>2 i j i' j' "take k ps\<^sub>1" 0 "int m\<^sub>1" "drop k ps\<^sub>1"]
by (auto simp: m_simps)
show ?thesis using kp hd last si by auto
qed
lemma len1_hd_last: "length xs = 1 \<Longrightarrow> hd xs = last xs"
by (induction xs) auto
text \<open>Weaker version of @{thm knights_path_split_concat_si_prev}.\<close>
corollary knights_path_split_concat:
assumes "knights_path (board n m\<^sub>1) ps\<^sub>1" "knights_path (board n m\<^sub>2) ps\<^sub>2"
"step_in ps\<^sub>1 s\<^sub>i s\<^sub>j" "hd ps\<^sub>2 = (i\<^sub>h,j\<^sub>h)" "last ps\<^sub>2 = (i\<^sub>l,j\<^sub>l)"
"valid_step s\<^sub>i (i\<^sub>h,int m\<^sub>1+j\<^sub>h)" "valid_step (i\<^sub>l,int m\<^sub>1+j\<^sub>l) s\<^sub>j"
shows "\<exists>ps. knights_path (board n (m\<^sub>1+m\<^sub>2)) ps \<and> hd ps = hd ps\<^sub>1 \<and> last ps = last ps\<^sub>1"
proof -
have "length ps\<^sub>2 = 1 \<or> length ps\<^sub>2 > 1"
using assms knights_path_non_nil by (meson length_0_conv less_one linorder_neqE_nat)
then show ?thesis
proof (elim disjE)
let ?s\<^sub>k="(i\<^sub>h,int m\<^sub>1+j\<^sub>h)"
assume "length ps\<^sub>2 = 1"
(* contradiction *)
then have "(i\<^sub>h,j\<^sub>h) = (i\<^sub>l,j\<^sub>l)"
using assms len1_hd_last by metis
then have "valid_step s\<^sub>i ?s\<^sub>k" "valid_step ?s\<^sub>k s\<^sub>j" "valid_step s\<^sub>i s\<^sub>j"
using assms step_in_valid_step by auto
then show ?thesis
using valid_step_non_transitive by blast
next
assume "length ps\<^sub>2 > 1"
then obtain i\<^sub>1 j\<^sub>1 i\<^sub>2 j\<^sub>2 ps\<^sub>2' where "ps\<^sub>2 = (i\<^sub>1,j\<^sub>1)#(i\<^sub>2,j\<^sub>2)#ps\<^sub>2'"
using list_len_g_1_split by fastforce
then have "last (take 1 ps\<^sub>2) = (i\<^sub>1,j\<^sub>1)" "hd (drop 1 ps\<^sub>2) = (i\<^sub>2,j\<^sub>2)" by auto
then have "step_in ps\<^sub>2 (i\<^sub>1,j\<^sub>1) (i\<^sub>2,j\<^sub>2)" using \<open>length ps\<^sub>2 > 1\<close> by (auto simp: step_in_def)
then show ?thesis
using assms knights_path_split_concat_si_prev by blast
qed
qed
text \<open>Concatenate two knight's path on a \<open>n\<times>m\<close>-board along the 1st axis.\<close>
corollary knights_path_split_concatT:
assumes "knights_path (board n\<^sub>1 m) ps\<^sub>1" "knights_path (board n\<^sub>2 m) ps\<^sub>2"
"step_in ps\<^sub>1 s\<^sub>i s\<^sub>j" "hd ps\<^sub>2 = (i\<^sub>h,j\<^sub>h)" "last ps\<^sub>2 = (i\<^sub>l,j\<^sub>l)"
"valid_step s\<^sub>i (int n\<^sub>1+i\<^sub>h,j\<^sub>h)" "valid_step (int n\<^sub>1+i\<^sub>l,j\<^sub>l) s\<^sub>j"
shows "\<exists>ps. knights_path (board (n\<^sub>1+n\<^sub>2) m) ps \<and> hd ps = hd ps\<^sub>1 \<and> last ps = last ps\<^sub>1"
using assms
proof -
let ?ps\<^sub>1T="transpose ps\<^sub>1"
let ?ps\<^sub>2T="transpose ps\<^sub>2"
have kps: "knights_path (board m n\<^sub>1) ?ps\<^sub>1T" "knights_path (board m n\<^sub>2) ?ps\<^sub>2T"
using assms transpose_knights_path by auto
let ?s\<^sub>iT="transpose_square s\<^sub>i"
let ?s\<^sub>jT="transpose_square s\<^sub>j"
have si: "step_in ?ps\<^sub>1T ?s\<^sub>iT ?s\<^sub>jT"
using assms transpose_step_in by auto
have "ps\<^sub>1 \<noteq> []" "ps\<^sub>2 \<noteq> []"
using assms knights_path_non_nil by auto
then have hd_last2: "hd ?ps\<^sub>2T = (j\<^sub>h,i\<^sub>h)" "last ?ps\<^sub>2T = (j\<^sub>l,i\<^sub>l)"
using assms hd_transpose last_transpose by (auto simp: transpose_square_def)
have vs: "valid_step ?s\<^sub>iT (j\<^sub>h,int n\<^sub>1+i\<^sub>h)" "valid_step (j\<^sub>l,int n\<^sub>1+i\<^sub>l) ?s\<^sub>jT"
using assms transpose_valid_step by (auto simp: transpose_square_def split: prod.splits)
then obtain ps where
ps_prems: "knights_path (board m (n\<^sub>1+n\<^sub>2)) ps" "hd ps = hd ?ps\<^sub>1T" "last ps = last ?ps\<^sub>1T"
using knights_path_split_concat[OF kps si hd_last2 vs] by auto
then have "ps \<noteq> []" using knights_path_non_nil by auto
let ?psT="transpose ps"
have "knights_path (board (n\<^sub>1+n\<^sub>2) m) ?psT" "hd ?psT = hd ps\<^sub>1" "last ?psT = last ps\<^sub>1"
using \<open>ps\<^sub>1 \<noteq> []\<close> \<open>ps \<noteq> []\<close> ps_prems transpose_knights_path hd_transpose last_transpose
by (auto simp: transpose2)
then show ?thesis by auto
qed
text \<open>Concatenate two Knight's path along the 2nd axis. There is a valid step from the last square
in the first Knight's path \<open>ps\<^sub>1\<close> to the first square in the second Knight's path \<open>ps\<^sub>2\<close>.\<close>
corollary knights_path_concat:
assumes "knights_path (board n m\<^sub>1) ps\<^sub>1" "knights_path (board n m\<^sub>2) ps\<^sub>2"
"hd ps\<^sub>2 = (i\<^sub>h,j\<^sub>h)" "valid_step (last ps\<^sub>1) (i\<^sub>h,int m\<^sub>1+j\<^sub>h)"
shows "knights_path (board n (m\<^sub>1+m\<^sub>2)) (ps\<^sub>1 @ (trans_path (0,int m\<^sub>1) ps\<^sub>2))"
proof -
let ?ps\<^sub>2'="trans_path (0,int m\<^sub>1) ps\<^sub>2"
let ?b="trans_board (0,int m\<^sub>1) (board n m\<^sub>2)"
have inter_empty: "board n m\<^sub>1 \<inter> ?b = {}"
unfolding board_def trans_board_def by auto
have "hd ?ps\<^sub>2' = (i\<^sub>h,int m\<^sub>1+j\<^sub>h)"
using assms knights_path_non_nil hd_trans_path by auto
then have kp: "knights_path (board n m\<^sub>1) ps\<^sub>1" "knights_path ?b ?ps\<^sub>2'" and
vs: "valid_step (last ps\<^sub>1) (hd ?ps\<^sub>2')"
using assms trans_knights_path by auto
then show "knights_path (board n (m\<^sub>1+m\<^sub>2)) (ps\<^sub>1 @ ?ps\<^sub>2')"
using knights_path_append[OF kp inter_empty vs] board_concat by auto
qed
text \<open>Concatenate two Knight's path along the 2nd axis. The first Knight's path end in
\<open>(2,m\<^sub>1-1)\<close> (lower-right) and the second Knight's paths start in \<open>(1,1)\<close> (lower-left).\<close>
corollary knights_path_lr_concat:
assumes "knights_path (board n m\<^sub>1) ps\<^sub>1" "knights_path (board n m\<^sub>2) ps\<^sub>2"
"last ps\<^sub>1 = (2,int m\<^sub>1-1)" "hd ps\<^sub>2 = (1,1)"
shows "knights_path (board n (m\<^sub>1+m\<^sub>2)) (ps\<^sub>1 @ (trans_path (0,int m\<^sub>1) ps\<^sub>2))"
proof -
have "valid_step (last ps\<^sub>1) (1,int m\<^sub>1+1)"
using assms unfolding valid_step_def by auto
then show ?thesis
using assms knights_path_concat by auto
qed
text \<open>Concatenate two Knight's circuits along the 2nd axis. In the first Knight's path the
squares \<open>(2,m\<^sub>1-1)\<close> and \<open>(4,m\<^sub>1)\<close> are adjacent and the second Knight's cirucit starts in \<open>(1,1)\<close>
(lower-left) and end in \<open>(3,2)\<close>.\<close>
corollary knights_circuit_lr_concat:
assumes "knights_circuit (board n m\<^sub>1) ps\<^sub>1" "knights_circuit (board n m\<^sub>2) ps\<^sub>2"
"step_in ps\<^sub>1 (2,int m\<^sub>1-1) (4,int m\<^sub>1)"
"hd ps\<^sub>2 = (1,1)" "last ps\<^sub>2 = (3,2)" "step_in ps\<^sub>2 (2,int m\<^sub>2-1) (4,int m\<^sub>2)"
shows "\<exists>ps. knights_circuit (board n (m\<^sub>1+m\<^sub>2)) ps \<and> step_in ps (2,int (m\<^sub>1+m\<^sub>2)-1) (4,int (m\<^sub>1+m\<^sub>2))"
proof -
have kp1: "knights_path (board n m\<^sub>1) ps\<^sub>1" and kp2: "knights_path (board n m\<^sub>2) ps\<^sub>2"
and vs: "valid_step (last ps\<^sub>1) (hd ps\<^sub>1)"
using assms unfolding knights_circuit_def by auto
have m_simps: "int m\<^sub>1 + (int m\<^sub>2-1) = int (m\<^sub>1+m\<^sub>2)-1" "int m\<^sub>1 + int m\<^sub>2 = int (m\<^sub>1+m\<^sub>2)" by auto
have "valid_step (2,int m\<^sub>1-1) (1,int m\<^sub>1+1)" "valid_step (3,int m\<^sub>1+2) (4,int m\<^sub>1)"
unfolding valid_step_def by auto
then obtain ps where "knights_path (board n (m\<^sub>1+m\<^sub>2)) ps" "hd ps = hd ps\<^sub>1" "last ps = last ps\<^sub>1" and
si: "step_in ps (2,int (m\<^sub>1+m\<^sub>2)-1) (4,int (m\<^sub>1+m\<^sub>2))"
using assms kp1 kp2
knights_path_split_concat_si_prev[of n m\<^sub>1 ps\<^sub>1 m\<^sub>2 ps\<^sub>2 "(2,int m\<^sub>1-1)"
"(4,int m\<^sub>1)" 1 1 3 2 2 "int m\<^sub>2-1" 4 "int m\<^sub>2"]
by (auto simp only: m_simps)
then have "knights_circuit (board n (m\<^sub>1+m\<^sub>2)) ps"
using vs by (auto simp: knights_circuit_def)
then show ?thesis
using si by auto
qed
section \<open>Parsing Paths\<close>
text \<open>In this section functions are implemented to parse and construct paths. The parser converts
the matrix representation (\<open>(nat list) list\<close>) used in @{cite "cull_decurtins_1987" } to a path
(\<open>path\<close>).\<close>
text \<open>for debugging\<close>
fun test_path :: "path \<Rightarrow> bool" where
"test_path (s\<^sub>i#s\<^sub>j#xs) = (step_checker s\<^sub>i s\<^sub>j \<and> test_path (s\<^sub>j#xs))"
| "test_path _ = True"
fun f_opt :: "('a \<Rightarrow> 'a) \<Rightarrow> 'a option \<Rightarrow> 'a option" where
"f_opt _ None = None"
| "f_opt f (Some a) = Some (f a)"
fun add_opt_fst_sq :: "int \<Rightarrow> square option \<Rightarrow> square option" where
"add_opt_fst_sq _ None = None"
| "add_opt_fst_sq k (Some (i,j)) = Some (k+i,j)"
fun find_k_in_col :: "nat \<Rightarrow> nat list \<Rightarrow> int option" where
"find_k_in_col k [] = None"
| "find_k_in_col k (c#cs) = (if c = k then Some 1 else f_opt ((+) 1) (find_k_in_col k cs))"
fun find_k_sqr :: "nat \<Rightarrow> (nat list) list \<Rightarrow> square option" where
"find_k_sqr k [] = None"
| "find_k_sqr k (r#rs) = (case find_k_in_col k r of
None \<Rightarrow> f_opt (\<lambda>(i,j). (i+1,j)) (find_k_sqr k rs)
| Some j \<Rightarrow> Some (1,j))"
text \<open>Auxiliary function to easily parse pre-computed boards from paper.\<close>
fun to_sqrs :: "nat \<Rightarrow> (nat list) list \<Rightarrow> path option" where
"to_sqrs 0 rs = Some []"
| "to_sqrs k rs = (case find_k_sqr k rs of
None \<Rightarrow> None
| Some s\<^sub>i \<Rightarrow> f_opt (\<lambda>ps. ps@[s\<^sub>i]) (to_sqrs (k-1) rs))"
fun num_elems :: "(nat list) list \<Rightarrow> nat" where
"num_elems (r#rs) = length r * length (r#rs)"
text \<open>Convert a matrix (\<open>nat list list\<close>) to a path (\<open>path\<close>). With this function we implicitly
define the lower-left corner to be \<open>(1,1)\<close> and the upper-right corner to be \<open>(n,m)\<close>.\<close>
definition "to_path rs \<equiv> to_sqrs (num_elems rs) (rev rs)"
text \<open>Example\<close>
value "to_path
[[3,22,13,16,5],
[12,17,4,21,14],
[23,2,15,6,9],
[18,11,8,25,20],
[1,24,19,10,7::nat]]"
section \<open>Knight's Paths for \<open>5\<times>m\<close>-Boards\<close>
text \<open>Given here are knight's paths, \<open>kp5xmlr\<close> and \<open>kp5xmur\<close>, for the \<open>(5\<times>m)\<close>-board that start
in the lower-left corner for \<open>m\<in>{5,6,7,8,9}\<close>. The path \<open>kp5xmlr\<close> ends in the lower-right corner,
whereas the path \<open>kp5xmur\<close> ends in the upper-right corner.
The tables show the visited squares numbered in ascending order.\<close>
abbreviation "b5x5 \<equiv> board 5 5"
text \<open>A Knight's path for the \<open>(5\<times>5)\<close>-board that starts in the lower-left and ends in the
lower-right.
\begin{table}[H]
\begin{tabular}{|l|l|l|l|l|}
\hline
3 & 22 & 13 & 16 & 5 \\ \hline
12 & 17 & 4 & 21 & 14 \\ \hline
23 & 2 & 15 & 6 & 9 \\ \hline
18 & 11 & 8 & 25 & 20 \\ \hline
1 & 24 & 19 & 10 & 7 \\ \hline
\end{tabular}
\end{table}\<close>
abbreviation "kp5x5lr \<equiv> the (to_path
[[3,22,13,16,5],
[12,17,4,21,14],
[23,2,15,6,9],
[18,11,8,25,20],
[1,24,19,10,7]])"
lemma kp_5x5_lr: "knights_path b5x5 kp5x5lr"
by (simp only: knights_path_exec_simp) eval
lemma kp_5x5_lr_hd: "hd kp5x5lr = (1,1)" by eval
lemma kp_5x5_lr_last: "last kp5x5lr = (2,4)" by eval
lemma kp_5x5_lr_non_nil: "kp5x5lr \<noteq> []" by eval
text \<open>A Knight's path for the \<open>(5\<times>5)\<close>-board that starts in the lower-left and ends in the
upper-right.
\begin{table}[H]
\begin{tabular}{|l|l|l|l|l|}
\hline
7 & 12 & 15 & 20 & 5 \\ \hline
16 & 21 & 6 & 25 & 14 \\ \hline
11 & 8 & 13 & 4 & 19 \\ \hline
22 & 17 & 2 & 9 & 24 \\ \hline
1 & 10 & 23 & 18 & 3 \\ \hline
\end{tabular}
\end{table}\<close>
abbreviation "kp5x5ur \<equiv> the (to_path
[[7,12,15,20,5],
[16,21,6,25,14],
[11,8,13,4,19],
[22,17,2,9,24],
[1,10,23,18,3]])"
lemma kp_5x5_ur: "knights_path b5x5 kp5x5ur"
by (simp only: knights_path_exec_simp) eval
lemma kp_5x5_ur_hd: "hd kp5x5ur = (1,1)" by eval
lemma kp_5x5_ur_last: "last kp5x5ur = (4,4)" by eval
lemma kp_5x5_ur_non_nil: "kp5x5ur \<noteq> []" by eval
abbreviation "b5x6 \<equiv> board 5 6"
text \<open>A Knight's path for the \<open>(5\<times>6)\<close>-board that starts in the lower-left and ends in the
lower-right.
\begin{table}[H]
\begin{tabular}{|l|l|l|l|l|l|}
\hline
7 & 14 & 21 & 28 & 5 & 12 \\ \hline
22 & 27 & 6 & 13 & 20 & 29 \\ \hline
15 & 8 & 17 & 24 & 11 & 4 \\ \hline
26 & 23 & 2 & 9 & 30 & 19 \\ \hline
1 & 16 & 25 & 18 & 3 & 10 \\ \hline
\end{tabular}
\end{table}\<close>
abbreviation "kp5x6lr \<equiv> the (to_path
[[7,14,21,28,5,12],
[22,27,6,13,20,29],
[15,8,17,24,11,4],
[26,23,2,9,30,19],
[1,16,25,18,3,10]])"
lemma kp_5x6_lr: "knights_path b5x6 kp5x6lr"
by (simp only: knights_path_exec_simp) eval
lemma kp_5x6_lr_hd: "hd kp5x6lr = (1,1)" by eval
lemma kp_5x6_lr_last: "last kp5x6lr = (2,5)" by eval
lemma kp_5x6_lr_non_nil: "kp5x6lr \<noteq> []" by eval
text \<open>A Knight's path for the \<open>(5\<times>6)\<close>-board that starts in the lower-left and ends in the
upper-right.
\begin{table}[H]
\begin{tabular}{|l|l|l|l|l|l|}
\hline
3 & 10 & 29 & 20 & 5 & 12 \\ \hline
28 & 19 & 4 & 11 & 30 & 21 \\ \hline
9 & 2 & 17 & 24 & 13 & 6 \\ \hline
18 & 27 & 8 & 15 & 22 & 25 \\ \hline
1 & 16 & 23 & 26 & 7 & 14 \\ \hline
\end{tabular}
\end{table}\<close>
abbreviation "kp5x6ur \<equiv> the (to_path
[[3,10,29,20,5,12],
[28,19,4,11,30,21],
[9,2,17,24,13,6],
[18,27,8,15,22,25],
[1,16,23,26,7,14]])"
lemma kp_5x6_ur: "knights_path b5x6 kp5x6ur"
by (simp only: knights_path_exec_simp) eval
lemma kp_5x6_ur_hd: "hd kp5x6ur = (1,1)" by eval
lemma kp_5x6_ur_last: "last kp5x6ur = (4,5)" by eval
lemma kp_5x6_ur_non_nil: "kp5x6ur \<noteq> []" by eval
abbreviation "b5x7 \<equiv> board 5 7"
text \<open>A Knight's path for the \<open>(5\<times>7)\<close>-board that starts in the lower-left and ends in the
lower-right.
\begin{table}[H]
\begin{tabular}{|l|l|l|l|l|l|l|}
\hline
3 & 12 & 21 & 30 & 5 & 14 & 23 \\ \hline
20 & 29 & 4 & 13 & 22 & 31 & 6 \\ \hline
11 & 2 & 19 & 32 & 7 & 24 & 15 \\ \hline
28 & 33 & 10 & 17 & 26 & 35 & 8 \\ \hline
1 & 18 & 27 & 34 & 9 & 16 & 25 \\ \hline
\end{tabular}
\end{table}\<close>
abbreviation "kp5x7lr \<equiv> the (to_path
[[3,12,21,30,5,14,23],
[20,29,4,13,22,31,6],
[11,2,19,32,7,24,15],
[28,33,10,17,26,35,8],
[1,18,27,34,9,16,25]])"
lemma kp_5x7_lr: "knights_path b5x7 kp5x7lr"
by (simp only: knights_path_exec_simp) eval
lemma kp_5x7_lr_hd: "hd kp5x7lr = (1,1)" by eval
lemma kp_5x7_lr_last: "last kp5x7lr = (2,6)" by eval
lemma kp_5x7_lr_non_nil: "kp5x7lr \<noteq> []" by eval
text \<open>A Knight's path for the \<open>(5\<times>7)\<close>-board that starts in the lower-left and ends in the
upper-right.
\begin{table}[H]
\begin{tabular}{|l|l|l|l|l|l|l|}
\hline
3 & 32 & 11 & 34 & 5 & 26 & 13 \\ \hline
10 & 19 & 4 & 25 & 12 & 35 & 6 \\ \hline
31 & 2 & 33 & 20 & 23 & 14 & 27 \\ \hline
18 & 9 & 24 & 29 & 16 & 7 & 22 \\ \hline
1 & 30 & 17 & 8 & 21 & 28 & 15 \\ \hline
\end{tabular}
\end{table}\<close>
abbreviation "kp5x7ur \<equiv> the (to_path
[[3,32,11,34,5,26,13],
[10,19,4,25,12,35,6],
[31,2,33,20,23,14,27],
[18,9,24,29,16,7,22],
[1,30,17,8,21,28,15]])"
lemma kp_5x7_ur: "knights_path b5x7 kp5x7ur"
by (simp only: knights_path_exec_simp) eval
lemma kp_5x7_ur_hd: "hd kp5x7ur = (1,1)" by eval
lemma kp_5x7_ur_last: "last kp5x7ur = (4,6)" by eval
lemma kp_5x7_ur_non_nil: "kp5x7ur \<noteq> []" by eval
abbreviation "b5x8 \<equiv> board 5 8"
text \<open>A Knight's path for the \<open>(5\<times>8)\<close>-board that starts in the lower-left and ends in the
lower-right.
\begin{table}[H]
\begin{tabular}{|l|l|l|l|l|l|l|l|}
\hline
3 & 12 & 37 & 26 & 5 & 14 & 17 & 28 \\ \hline
34 & 23 & 4 & 13 & 36 & 27 & 6 & 15 \\ \hline
11 & 2 & 35 & 38 & 25 & 16 & 29 & 18 \\ \hline
22 & 33 & 24 & 9 & 20 & 31 & 40 & 7 \\ \hline
1 & 10 & 21 & 32 & 39 & 8 & 19 & 30 \\ \hline
\end{tabular}
\end{table}\<close>
abbreviation "kp5x8lr \<equiv> the (to_path
[[3,12,37,26,5,14,17,28],
[34,23,4,13,36,27,6,15],
[11,2,35,38,25,16,29,18],
[22,33,24,9,20,31,40,7],
[1,10,21,32,39,8,19,30]])"
lemma kp_5x8_lr: "knights_path b5x8 kp5x8lr"
by (simp only: knights_path_exec_simp) eval
lemma kp_5x8_lr_hd: "hd kp5x8lr = (1,1)" by eval
lemma kp_5x8_lr_last: "last kp5x8lr = (2,7)" by eval
lemma kp_5x8_lr_non_nil: "kp5x8lr \<noteq> []" by eval
text \<open>A Knight's path for the \<open>(5\<times>8)\<close>-board that starts in the lower-left and ends in the
upper-right.
\begin{table}[H]
\begin{tabular}{|l|l|l|l|l|l|l|l|}
\hline
33 & 8 & 17 & 38 & 35 & 6 & 15 & 24 \\ \hline
18 & 37 & 34 & 7 & 16 & 25 & 40 & 5 \\ \hline
9 & 32 & 29 & 36 & 39 & 14 & 23 & 26 \\ \hline
30 & 19 & 2 & 11 & 28 & 21 & 4 & 13 \\ \hline
1 & 10 & 31 & 20 & 3 & 12 & 27 & 22 \\ \hline
\end{tabular}
\end{table}\<close>
abbreviation "kp5x8ur \<equiv> the (to_path
[[33,8,17,38,35,6,15,24],
[18,37,34,7,16,25,40,5],
[9,32,29,36,39,14,23,26],
[30,19,2,11,28,21,4,13],
[1,10,31,20,3,12,27,22]])"
lemma kp_5x8_ur: "knights_path b5x8 kp5x8ur"
by (simp only: knights_path_exec_simp) eval
lemma kp_5x8_ur_hd: "hd kp5x8ur = (1,1)" by eval
lemma kp_5x8_ur_last: "last kp5x8ur = (4,7)" by eval
lemma kp_5x8_ur_non_nil: "kp5x8ur \<noteq> []" by eval
abbreviation "b5x9 \<equiv> board 5 9"
text \<open>
A Knight's path for the \<open>(5\<times>9)\<close>-board that starts in the lower-left and ends in the lower-right.
\begin{table}[H]
\begin{tabular}{|l|l|l|l|l|l|l|l|l|}
\hline
9 & 4 & 11 & 16 & 23 & 42 & 33 & 36 & 25 \\ \hline
12 & 17 & 8 & 3 & 32 & 37 & 24 & 41 & 34 \\ \hline
5 & 10 & 15 & 20 & 43 & 22 & 35 & 26 & 29 \\ \hline
18 & 13 & 2 & 7 & 38 & 31 & 28 & 45 & 40 \\ \hline
1 & 6 & 19 & 14 & 21 & 44 & 39 & 30 & 27 \\ \hline
\end{tabular}
\end{table}\<close>
abbreviation "kp5x9lr \<equiv> the (to_path
[[9,4,11,16,23,42,33,36,25],
[12,17,8,3,32,37,24,41,34],
[5,10,15,20,43,22,35,26,29],
[18,13,2,7,38,31,28,45,40],
[1,6,19,14,21,44,39,30,27]])"
lemma kp_5x9_lr: "knights_path b5x9 kp5x9lr"
by (simp only: knights_path_exec_simp) eval
lemma kp_5x9_lr_hd: "hd kp5x9lr = (1,1)" by eval
lemma kp_5x9_lr_last: "last kp5x9lr = (2,8)" by eval
lemma kp_5x9_lr_non_nil: "kp5x9lr \<noteq> []" by eval
text \<open>
A Knight's path for the \<open>(5\<times>9)\<close>-board that starts in the lower-left and ends in the upper-right.
\begin{table}[H]
\begin{tabular}{|l|l|l|l|l|l|l|l|l|}
\hline
9 & 4 & 11 & 16 & 27 & 32 & 35 & 40 & 25 \\ \hline
12 & 17 & 8 & 3 & 36 & 41 & 26 & 45 & 34 \\ \hline
5 & 10 & 15 & 20 & 31 & 28 & 33 & 24 & 39 \\ \hline
18 & 13 & 2 & 7 & 42 & 37 & 22 & 29 & 44 \\ \hline
1 & 6 & 19 & 14 & 21 & 30 & 43 & 38 & 23 \\ \hline
\end{tabular}
\end{table}\<close>
abbreviation "kp5x9ur \<equiv> the (to_path
[[9,4,11,16,27,32,35,40,25],
[12,17,8,3,36,41,26,45,34],
[5,10,15,20,31,28,33,24,39],
[18,13,2,7,42,37,22,29,44],
[1,6,19,14,21,30,43,38,23]])"
lemma kp_5x9_ur: "knights_path b5x9 kp5x9ur"
by (simp only: knights_path_exec_simp) eval
lemma kp_5x9_ur_hd: "hd kp5x9ur = (1,1)" by eval
lemma kp_5x9_ur_last: "last kp5x9ur = (4,8)" by eval
lemma kp_5x9_ur_non_nil: "kp5x9ur \<noteq> []" by eval
lemmas kp_5xm_lr =
kp_5x5_lr kp_5x5_lr_hd kp_5x5_lr_last kp_5x5_lr_non_nil
kp_5x6_lr kp_5x6_lr_hd kp_5x6_lr_last kp_5x6_lr_non_nil
kp_5x7_lr kp_5x7_lr_hd kp_5x7_lr_last kp_5x7_lr_non_nil
kp_5x8_lr kp_5x8_lr_hd kp_5x8_lr_last kp_5x8_lr_non_nil
kp_5x9_lr kp_5x9_lr_hd kp_5x9_lr_last kp_5x9_lr_non_nil
lemmas kp_5xm_ur =
kp_5x5_ur kp_5x5_ur_hd kp_5x5_ur_last kp_5x5_ur_non_nil
kp_5x6_ur kp_5x6_ur_hd kp_5x6_ur_last kp_5x6_ur_non_nil
kp_5x7_ur kp_5x7_ur_hd kp_5x7_ur_last kp_5x7_ur_non_nil
kp_5x8_ur kp_5x8_ur_hd kp_5x8_ur_last kp_5x8_ur_non_nil
kp_5x9_ur kp_5x9_ur_hd kp_5x9_ur_last kp_5x9_ur_non_nil
text \<open>For every \<open>5\<times>m\<close>-board with \<open>m \<ge> 5\<close> there exists a knight's path that starts in
\<open>(1,1)\<close> (bottom-left) and ends in \<open>(2,m-1)\<close> (bottom-right).\<close>
lemma knights_path_5xm_lr_exists:
assumes "m \<ge> 5"
shows "\<exists>ps. knights_path (board 5 m) ps \<and> hd ps = (1,1) \<and> last ps = (2,int m-1)"
using assms
proof (induction m rule: less_induct)
case (less m)
then have "m \<in> {5,6,7,8,9} \<or> 5 \<le> m-5" by auto
then show ?case
proof (elim disjE)
assume "m \<in> {5,6,7,8,9}"
then show ?thesis using kp_5xm_lr by fastforce
next
assume m_ge: "5 \<le> m-5" (* \<longleftrightarrow> 10 \<le> m *)
then obtain ps\<^sub>1 where ps\<^sub>1_IH: "knights_path (board 5 (m-5)) ps\<^sub>1" "hd ps\<^sub>1 = (1,1)"
"last ps\<^sub>1 = (2,int (m-5)-1)" "ps\<^sub>1 \<noteq> []"
using less.IH[of "m-5"] knights_path_non_nil by auto
let ?ps\<^sub>2="kp5x5lr"
let ?ps\<^sub>2'="ps\<^sub>1 @ trans_path (0,int (m-5)) ?ps\<^sub>2"
have "knights_path b5x5 ?ps\<^sub>2" "hd ?ps\<^sub>2 = (1, 1)" "?ps\<^sub>2 \<noteq> []" "last ?ps\<^sub>2 = (2,4)"
using kp_5xm_lr by auto
then have 1: "knights_path (board 5 m) ?ps\<^sub>2'"
using m_ge ps\<^sub>1_IH knights_path_lr_concat[of 5 "m-5" ps\<^sub>1 5 ?ps\<^sub>2] by auto
have 2: "hd ?ps\<^sub>2' = (1,1)" using ps\<^sub>1_IH by auto
have "last (trans_path (0,int (m-5)) ?ps\<^sub>2) = (2,int m-1)"
using m_ge last_trans_path[OF \<open>?ps\<^sub>2 \<noteq> []\<close> \<open>last ?ps\<^sub>2 = (2,4)\<close>] by auto
then have 3: "last ?ps\<^sub>2' = (2,int m-1)"
using last_appendR[OF trans_path_non_nil[OF \<open>?ps\<^sub>2 \<noteq> []\<close>],symmetric] by metis
show ?thesis using 1 2 3 by auto
qed
qed
text \<open>For every \<open>5\<times>m\<close>-board with \<open>m \<ge> 5\<close> there exists a knight's path that starts in
\<open>(1,1)\<close> (bottom-left) and ends in \<open>(4,m-1)\<close> (top-right).\<close>
lemma knights_path_5xm_ur_exists:
assumes "m \<ge> 5"
shows "\<exists>ps. knights_path (board 5 m) ps \<and> hd ps = (1,1) \<and> last ps = (4,int m-1)"
using assms
proof -
have "m \<in> {5,6,7,8,9} \<or> 5 \<le> m-5" using assms by auto
then show ?thesis
proof (elim disjE)
assume "m \<in> {5,6,7,8,9}"
then show ?thesis using kp_5xm_ur by fastforce
next
assume m_ge: "5 \<le> m-5" (* \<longleftrightarrow> 10 \<le> m *)
then obtain ps\<^sub>1 where ps_prems: "knights_path (board 5 (m-5)) ps\<^sub>1" "hd ps\<^sub>1 = (1,1)"
"last ps\<^sub>1 = (2,int (m-5)-1)" "ps\<^sub>1 \<noteq> []"
using knights_path_5xm_lr_exists[of "(m-5)"] knights_path_non_nil by auto
let ?ps\<^sub>2="kp5x5ur"
let ?ps'="ps\<^sub>1 @ trans_path (0,int (m-5)) ?ps\<^sub>2"
have "knights_path b5x5 ?ps\<^sub>2" "hd ?ps\<^sub>2 = (1, 1)" "?ps\<^sub>2 \<noteq> []"
"last ?ps\<^sub>2 = (4,4)"
using kp_5xm_ur by auto
then have 1: "knights_path (board 5 m) ?ps'"
using m_ge ps_prems knights_path_lr_concat[of 5 "m-5" ps\<^sub>1 5 ?ps\<^sub>2] by auto
have 2: "hd ?ps' = (1,1)" using ps_prems by auto
have "last (trans_path (0,int (m-5)) ?ps\<^sub>2) = (4,int m-1)"
using m_ge last_trans_path[OF \<open>?ps\<^sub>2 \<noteq> []\<close> \<open>last ?ps\<^sub>2 = (4,4)\<close>] by auto
then have 3: "last ?ps' = (4,int m-1)"
using last_appendR[OF trans_path_non_nil[OF \<open>?ps\<^sub>2 \<noteq> []\<close>],symmetric] by metis
show ?thesis using 1 2 3 by auto
qed
qed
text \<open>@{thm knights_path_5xm_lr_exists} and @{thm knights_path_5xm_lr_exists} formalize Lemma 1
from @{cite "cull_decurtins_1987"}.\<close>
lemmas knights_path_5xm_exists = knights_path_5xm_lr_exists knights_path_5xm_ur_exists
section \<open>Knight's Paths and Circuits for \<open>6\<times>m\<close>-Boards\<close>
abbreviation "b6x5 \<equiv> board 6 5"
text \<open>
A Knight's path for the \<open>(6\<times>5)\<close>-board that starts in the lower-left and ends in the upper-left.
\begin{table}[H]
\begin{tabular}{|l|l|l|l|l|}
\hline
10 & 19 & 4 & 29 & 12 \\ \hline
3 & 30 & 11 & 20 & 5 \\ \hline
18 & 9 & 24 & 13 & 28 \\ \hline
25 & 2 & 17 & 6 & 21 \\ \hline
16 & 23 & 8 & 27 & 14 \\ \hline
1 & 26 & 15 & 22 & 7 \\ \hline
\end{tabular}
\end{table}\<close>
abbreviation "kp6x5ul \<equiv> the (to_path
[[10,19,4,29,12],
[3,30,11,20,5],
[18,9,24,13,28],
[25,2,17,6,21],
[16,23,8,27,14],
[1,26,15,22,7]])"
lemma kp_6x5_ul: "knights_path b6x5 kp6x5ul"
by (simp only: knights_path_exec_simp) eval
lemma kp_6x5_ul_hd: "hd kp6x5ul = (1,1)" by eval
lemma kp_6x5_ul_last: "last kp6x5ul = (5,2)" by eval
lemma kp_6x5_ul_non_nil: "kp6x5ul \<noteq> []" by eval
text \<open>A Knight's circuit for the \<open>(6\<times>5)\<close>-board.
\begin{table}[H]
\begin{tabular}{|l|l|l|l|l|}
\hline
16 & 9 & 6 & 27 & 18 \\ \hline
7 & 26 & 17 & 14 & 5 \\ \hline
10 & 15 & 8 & 19 & 28 \\ \hline
25 & 30 & 23 & 4 & 13 \\ \hline
22 & 11 & 2 & 29 & 20 \\ \hline
1 & 24 & 21 & 12 & 3 \\ \hline
\end{tabular}
\end{table}\<close>
abbreviation "kc6x5 \<equiv> the (to_path
[[16,9,6,27,18],
[7,26,17,14,5],
[10,15,8,19,28],
[25,30,23,4,13],
[22,11,2,29,20],
[1,24,21,12,3]])"
lemma kc_6x5: "knights_circuit b6x5 kc6x5"
by (simp only: knights_circuit_exec_simp) eval
lemma kc_6x5_hd: "hd kc6x5 = (1,1)" by eval
lemma kc_6x5_non_nil: "kc6x5 \<noteq> []" by eval
abbreviation "b6x6 \<equiv> board 6 6"
text \<open>The path given for the \<open>6\<times>6\<close>-board that ends in the upper-left is wrong. The Knight cannot
move from square 26 to square 27.
\begin{table}[H]
\begin{tabular}{|l|l|l|l|l|l|}
\hline
14 & 23 & 6 & 28 & 12 & 21 \\ \hline
7 & 36 & 13 & 22 & 5 & \color{red}{27} \\ \hline
24 & 15 & 29 & 35 & 20 & 11 \\ \hline
30 & 8 & 17 & \color{red}{26} & 34 & 4 \\ \hline
16 & 25 & 2 & 32 & 10 & 19 \\ \hline
1 & 31 & 9 & 18 & 3 & 33 \\ \hline
\end{tabular}
\end{table}\<close>
abbreviation "kp6x6ul_false \<equiv> the (to_path
[[14,23,6,28,12,21],
[7,36,13,22,5,27],
[24,15,29,35,20,11],
[30,8,17,26,34,4],
[16,25,2,32,10,19],
[1,31,9,18,3,33]])"
lemma "\<not>knights_path b6x6 kp6x6ul_false"
by (simp only: knights_path_exec_simp) eval
text \<open>I have computed a correct Knight's path for the \<open>6\<times>6\<close>-board that ends in the upper-left.
A Knight's path for the \<open>(6\<times>6)\<close>-board that starts in the lower-left and ends in the upper-left.
\begin{table}[H]
\begin{tabular}{|l|l|l|l|l|l|}
\hline
8 & 25 & 10 & 21 & 6 & 23 \\ \hline
11 & 36 & 7 & 24 & 33 & 20 \\ \hline
26 & 9 & 34 & 3 & 22 & 5 \\ \hline
35 & 12 & 15 & 30 & 19 & 32 \\ \hline
14 & 27 & 2 & 17 & 4 & 29 \\ \hline
1 & 16 & 13 & 28 & 31 & 18 \\ \hline
\end{tabular}
\end{table}\<close>
abbreviation "kp6x6ul \<equiv> the (to_path
[[8,25,10,21,6,23],
[11,36,7,24,33,20],
[26,9,34,3,22,5],
[35,12,15,30,19,32],
[14,27,2,17,4,29],
[1,16,13,28,31,18]])"
lemma kp_6x6_ul: "knights_path b6x6 kp6x6ul"
by (simp only: knights_path_exec_simp) eval
lemma kp_6x6_ul_hd: "hd kp6x6ul = (1,1)" by eval
lemma kp_6x6_ul_last: "last kp6x6ul = (5,2)" by eval
lemma kp_6x6_ul_non_nil: "kp6x6ul \<noteq> []" by eval
text \<open>A Knight's circuit for the \<open>(6\<times>6)\<close>-board.
\begin{table}[H]
\begin{tabular}{|l|l|l|l|l|l|}
\hline
4 & 25 & 34 & 15 & 18 & 7 \\ \hline
35 & 14 & 5 & 8 & 33 & 16 \\ \hline
24 & 3 & 26 & 17 & 6 & 19 \\ \hline
13 & 36 & 23 & 30 & 9 & 32 \\ \hline
22 & 27 & 2 & 11 & 20 & 29 \\ \hline
1 & 12 & 21 & 28 & 31 & 10 \\ \hline
\end{tabular}
\end{table}\<close>
abbreviation "kc6x6 \<equiv> the (to_path
[[4,25,34,15,18,7],
[35,14,5,8,33,16],
[24,3,26,17,6,19],
[13,36,23,30,9,32],
[22,27,2,11,20,29],
[1,12,21,28,31,10]])"
lemma kc_6x6: "knights_circuit b6x6 kc6x6"
by (simp only: knights_circuit_exec_simp) eval
lemma kc_6x6_hd: "hd kc6x6 = (1,1)" by eval
lemma kc_6x6_non_nil: "kc6x6 \<noteq> []" by eval
abbreviation "b6x7 \<equiv> board 6 7"
text \<open>A Knight's path for the \<open>(6\<times>7)\<close>-board that starts in the lower-left and ends in the
upper-left.
\begin{table}[H]
\begin{tabular}{|l|l|l|l|l|l|l|}
\hline
18 & 23 & 8 & 39 & 16 & 25 & 6 \\ \hline
9 & 42 & 17 & 24 & 7 & 40 & 15 \\ \hline
22 & 19 & 32 & 41 & 38 & 5 & 26 \\ \hline
33 & 10 & 21 & 28 & 31 & 14 & 37 \\ \hline
20 & 29 & 2 & 35 & 12 & 27 & 4 \\ \hline
1 & 34 & 11 & 30 & 3 & 36 & 13 \\ \hline
\end{tabular}
\end{table}\<close>
abbreviation "kp6x7ul \<equiv> the (to_path
[[18,23,8,39,16,25,6],
[9,42,17,24,7,40,15],
[22,19,32,41,38,5,26],
[33,10,21,28,31,14,37],
[20,29,2,35,12,27,4],
[1,34,11,30,3,36,13]])"
lemma kp_6x7_ul: "knights_path b6x7 kp6x7ul"
by (simp only: knights_path_exec_simp) eval
lemma kp_6x7_ul_hd: "hd kp6x7ul = (1,1)" by eval
lemma kp_6x7_ul_last: "last kp6x7ul = (5,2)" by eval
lemma kp_6x7_ul_non_nil: "kp6x7ul \<noteq> []" by eval
text \<open>A Knight's circuit for the \<open>(6\<times>7)\<close>-board.
\begin{table}[H]
\begin{tabular}{|l|l|l|l|l|l|l|}
\hline
26 & 37 & 8 & 17 & 28 & 31 & 6 \\ \hline
9 & 18 & 27 & 36 & 7 & 16 & 29 \\ \hline
38 & 25 & 10 & 19 & 30 & 5 & 32 \\ \hline
11 & 42 & 23 & 40 & 35 & 20 & 15 \\ \hline
24 & 39 & 2 & 13 & 22 & 33 & 4 \\ \hline
1 & 12 & 41 & 34 & 3 & 14 & 21 \\ \hline
\end{tabular}
\end{table}\<close>
abbreviation "kc6x7 \<equiv> the (to_path
[[26,37,8,17,28,31,6],
[9,18,27,36,7,16,29],
[38,25,10,19,30,5,32],
[11,42,23,40,35,20,15],
[24,39,2,13,22,33,4],
[1,12,41,34,3,14,21]])"
lemma kc_6x7: "knights_circuit b6x7 kc6x7"
by (simp only: knights_circuit_exec_simp) eval
lemma kc_6x7_hd: "hd kc6x7 = (1,1)" by eval
lemma kc_6x7_non_nil: "kc6x7 \<noteq> []" by eval
abbreviation "b6x8 \<equiv> board 6 8"
text \<open>A Knight's path for the \<open>(6\<times>8)\<close>-board that starts in the lower-left and ends in the
upper-left.
\begin{table}[H]
\begin{tabular}{|l|l|l|l|l|l|l|l|}
\hline
18 & 31 & 8 & 35 & 16 & 33 & 6 & 45 \\ \hline
9 & 48 & 17 & 32 & 7 & 46 & 15 & 26 \\ \hline
30 & 19 & 36 & 47 & 34 & 27 & 44 & 5 \\ \hline
37 & 10 & 21 & 28 & 43 & 40 & 25 & 14 \\ \hline
20 & 29 & 2 & 39 & 12 & 23 & 4 & 41 \\ \hline
1 & 38 & 11 & 22 & 3 & 42 & 13 & 24 \\ \hline
\end{tabular}
\end{table}\<close>
abbreviation "kp6x8ul \<equiv> the (to_path
[[18,31,8,35,16,33,6,45],
[9,48,17,32,7,46,15,26],
[30,19,36,47,34,27,44,5],
[37,10,21,28,43,40,25,14],
[20,29,2,39,12,23,4,41],
[1,38,11,22,3,42,13,24]])"
lemma kp_6x8_ul: "knights_path b6x8 kp6x8ul"
by (simp only: knights_path_exec_simp) eval
lemma kp_6x8_ul_hd: "hd kp6x8ul = (1,1)" by eval
lemma kp_6x8_ul_last: "last kp6x8ul = (5,2)" by eval
lemma kp_6x8_ul_non_nil: "kp6x8ul \<noteq> []" by eval
text \<open>A Knight's circuit for the \<open>(6\<times>8)\<close>-board.
\begin{table}[H]
\begin{tabular}{|l|l|l|l|l|l|l|l|}
\hline
30 & 35 & 8 & 15 & 28 & 39 & 6 & 13 \\ \hline
9 & 16 & 29 & 36 & 7 & 14 & 27 & 38 \\ \hline
34 & 31 & 10 & 23 & 40 & 37 & 12 & 5 \\ \hline
17 & 48 & 33 & 46 & 11 & 22 & 41 & 26 \\ \hline
32 & 45 & 2 & 19 & 24 & 43 & 4 & 21 \\ \hline
1 & 18 & 47 & 44 & 3 & 20 & 25 & 42 \\ \hline
\end{tabular}
\end{table}\<close>
abbreviation "kc6x8 \<equiv> the (to_path
[[30,35,8,15,28,39,6,13],
[9,16,29,36,7,14,27,38],
[34,31,10,23,40,37,12,5],
[17,48,33,46,11,22,41,26],
[32,45,2,19,24,43,4,21],
[1,18,47,44,3,20,25,42]])"
lemma kc_6x8: "knights_circuit b6x8 kc6x8"
by (simp only: knights_circuit_exec_simp) eval
lemma kc_6x8_hd: "hd kc6x8 = (1,1)" by eval
lemma kc_6x8_non_nil: "kc6x8 \<noteq> []" by eval
abbreviation "b6x9 \<equiv> board 6 9"
text \<open>A Knight's path for the \<open>(6\<times>9)\<close>-board that starts in the lower-left and ends in the
upper-left.
\begin{table}[H]
\begin{tabular}{|l|l|l|l|l|l|l|l|l|}
\hline
22 & 45 & 10 & 53 & 20 & 47 & 8 & 35 & 18 \\ \hline
11 & 54 & 21 & 46 & 9 & 36 & 19 & 48 & 7 \\ \hline
44 & 23 & 42 & 37 & 52 & 49 & 32 & 17 & 34 \\ \hline
41 & 12 & 25 & 50 & 27 & 38 & 29 & 6 & 31 \\ \hline
24 & 43 & 2 & 39 & 14 & 51 & 4 & 33 & 16 \\ \hline
1 & 40 & 13 & 26 & 3 & 28 & 15 & 30 & 5 \\ \hline
\end{tabular}
\end{table}\<close>
abbreviation "kp6x9ul \<equiv> the (to_path
[[22,45,10,53,20,47,8,35,18],
[11,54,21,46,9,36,19,48,7],
[44,23,42,37,52,49,32,17,34],
[41,12,25,50,27,38,29,6,31],
[24,43,2,39,14,51,4,33,16],
[1,40,13,26,3,28,15,30,5]])"
lemma kp_6x9_ul: "knights_path b6x9 kp6x9ul"
by (simp only: knights_path_exec_simp) eval
lemma kp_6x9_ul_hd: "hd kp6x9ul = (1,1)" by eval
lemma kp_6x9_ul_last: "last kp6x9ul = (5,2)" by eval
lemma kp_6x9_ul_non_nil: "kp6x9ul \<noteq> []" by eval
text \<open>A Knight's circuit for the \<open>(6\<times>9)\<close>-board.
\begin{table}[H]
\begin{tabular}{|l|l|l|l|l|l|l|l|l|}
\hline
14 & 49 & 4 & 51 & 24 & 39 & 6 & 29 & 22 \\ \hline
3 & 52 & 13 & 40 & 5 & 32 & 23 & 42 & 7 \\ \hline
48 & 15 & 50 & 25 & 38 & 41 & 28 & 21 & 30 \\ \hline
53 & 2 & 37 & 12 & 33 & 26 & 31 & 8 & 43 \\ \hline
16 & 47 & 54 & 35 & 18 & 45 & 10 & 27 & 20 \\ \hline
1 & 36 & 17 & 46 & 11 & 34 & 19 & 44 & 9 \\ \hline
\end{tabular}
\end{table}\<close>
abbreviation "kc6x9 \<equiv> the (to_path
[[14,49,4,51,24,39,6,29,22],
[3,52,13,40,5,32,23,42,7],
[48,15,50,25,38,41,28,21,30],
[53,2,37,12,33,26,31,8,43],
[16,47,54,35,18,45,10,27,20],
[1,36,17,46,11,34,19,44,9]])"
lemma kc_6x9: "knights_circuit b6x9 kc6x9"
by (simp only: knights_circuit_exec_simp) eval
lemma kc_6x9_hd: "hd kc6x9 = (1,1)" by eval
lemma kc_6x9_non_nil: "kc6x9 \<noteq> []" by eval
lemmas kp_6xm_ul =
kp_6x5_ul kp_6x5_ul_hd kp_6x5_ul_last kp_6x5_ul_non_nil
kp_6x6_ul kp_6x6_ul_hd kp_6x6_ul_last kp_6x6_ul_non_nil
kp_6x7_ul kp_6x7_ul_hd kp_6x7_ul_last kp_6x7_ul_non_nil
kp_6x8_ul kp_6x8_ul_hd kp_6x8_ul_last kp_6x8_ul_non_nil
kp_6x9_ul kp_6x9_ul_hd kp_6x9_ul_last kp_6x9_ul_non_nil
lemmas kc_6xm =
kc_6x5 kc_6x5_hd kc_6x5_non_nil
kc_6x6 kc_6x6_hd kc_6x6_non_nil
kc_6x7 kc_6x7_hd kc_6x7_non_nil
kc_6x8 kc_6x8_hd kc_6x8_non_nil
kc_6x9 kc_6x9_hd kc_6x9_non_nil
text \<open>For every \<open>6\<times>m\<close>-board with \<open>m \<ge> 5\<close> there exists a knight's path that starts in
\<open>(1,1)\<close> (bottom-left) and ends in \<open>(5,2)\<close> (top-left).\<close>
lemma knights_path_6xm_ul_exists:
assumes "m \<ge> 5"
shows "\<exists>ps. knights_path (board 6 m) ps \<and> hd ps = (1,1) \<and> last ps = (5,2)"
using assms
proof (induction m rule: less_induct)
case (less m)
then have "m \<in> {5,6,7,8,9} \<or> 5 \<le> m-5" by auto
then show ?case
proof (elim disjE)
assume "m \<in> {5,6,7,8,9}"
then show ?thesis using kp_6xm_ul by fastforce
next
let ?ps\<^sub>1="kp6x5ul"
let ?b\<^sub>1="board 6 5"
have ps\<^sub>1_prems: "knights_path ?b\<^sub>1 ?ps\<^sub>1" "hd ?ps\<^sub>1 = (1,1)" "last ?ps\<^sub>1 = (5,2)"
using kp_6xm_ul by auto
assume m_ge: "5 \<le> m-5" (* \<longleftrightarrow> 10 \<le> m *)
then obtain ps\<^sub>2 where ps\<^sub>2_IH: "knights_path (board 6 (m-5)) ps\<^sub>2" "hd ps\<^sub>2 = (1,1)"
"last ps\<^sub>2 = (5,2)"
using less.IH[of "m-5"] knights_path_non_nil by auto
have "27 < length ?ps\<^sub>1" "last (take 27 ?ps\<^sub>1) = (2,4)" "hd (drop 27 ?ps\<^sub>1) = (4,5)" by eval+
then have "step_in ?ps\<^sub>1 (2,4) (4,5)"
unfolding step_in_def using zero_less_numeral by blast
then have "step_in ?ps\<^sub>1 (2,4) (4,5)"
"valid_step (2,4) (1,int 5+1)"
"valid_step (5,int 5+2) (4,5)"
unfolding valid_step_def by auto
then show ?thesis
using \<open>5 \<le> m-5\<close> ps\<^sub>1_prems ps\<^sub>2_IH knights_path_split_concat[of 6 5 ?ps\<^sub>1 "m-5" ps\<^sub>2] by auto
qed
qed
text \<open>For every \<open>6\<times>m\<close>-board with \<open>m \<ge> 5\<close> there exists a knight's circuit.\<close>
lemma knights_circuit_6xm_exists:
assumes "m \<ge> 5"
shows "\<exists>ps. knights_circuit (board 6 m) ps"
using assms
proof -
have "m \<in> {5,6,7,8,9} \<or> 5 \<le> m-5" using assms by auto
then show ?thesis
proof (elim disjE)
assume "m \<in> {5,6,7,8,9}"
then show ?thesis using kc_6xm by fastforce
next
let ?ps\<^sub>1="rev kc6x5"
have "knights_circuit b6x5 ?ps\<^sub>1" "last ?ps\<^sub>1 = (1,1)"
using kc_6xm knights_circuit_rev by (auto simp: last_rev)
then have ps\<^sub>1_prems: "knights_path b6x5 ?ps\<^sub>1" "valid_step (last ?ps\<^sub>1) (hd ?ps\<^sub>1)"
unfolding knights_circuit_def using valid_step_rev by auto
assume m_ge: "5 \<le> m-5" (* \<longleftrightarrow> 10 \<le> m *)
then obtain ps\<^sub>2 where ps2_prems: "knights_path (board 6 (m-5)) ps\<^sub>2" "hd ps\<^sub>2 = (1,1)"
"last ps\<^sub>2 = (5,2)"
using knights_path_6xm_ul_exists[of "(m-5)"] knights_path_non_nil by auto
have "2 < length ?ps\<^sub>1" "last (take 2 ?ps\<^sub>1) = (2,4)" "hd (drop 2 ?ps\<^sub>1) = (4,5)" by eval+
then have "step_in ?ps\<^sub>1 (2,4) (4,5)"
unfolding step_in_def using zero_less_numeral by blast
then have "step_in ?ps\<^sub>1 (2,4) (4,5)"
"valid_step (2,4) (1,int 5+1)"
"valid_step (5,int 5+2) (4,5)"
unfolding valid_step_def by auto
then have "\<exists>ps. knights_path (board 6 m) ps \<and> hd ps = hd ?ps\<^sub>1 \<and> last ps = last ?ps\<^sub>1"
using m_ge ps\<^sub>1_prems ps2_prems knights_path_split_concat[of 6 5 ?ps\<^sub>1 "m-5" ps\<^sub>2] by auto
then show ?thesis using ps\<^sub>1_prems by (auto simp: knights_circuit_def)
qed
qed
text \<open>@{thm knights_path_6xm_ul_exists} and @{thm knights_circuit_6xm_exists} formalize Lemma 2
from @{cite "cull_decurtins_1987"}.\<close>
lemmas knights_path_6xm_exists = knights_path_6xm_ul_exists knights_circuit_6xm_exists
section \<open>Knight's Paths and Circuits for \<open>8\<times>m\<close>-Boards\<close>
abbreviation "b8x5 \<equiv> board 8 5"
text \<open>A Knight's path for the \<open>(8\<times>5)\<close>-board that starts in the lower-left and ends in the
upper-left.
\begin{table}[H]
\begin{tabular}{|l|l|l|l|l|}
\hline
28 & 7 & 22 & 39 & 26 \\ \hline
23 & 40 & 27 & 6 & 21 \\ \hline
8 & 29 & 38 & 25 & 14 \\ \hline
37 & 24 & 15 & 20 & 5 \\ \hline
16 & 9 & 30 & 13 & 34 \\ \hline
31 & 36 & 33 & 4 & 19 \\ \hline
10 & 17 & 2 & 35 & 12 \\ \hline
1 & 32 & 11 & 18 & 3 \\ \hline
\end{tabular}
\end{table}\<close>
abbreviation "kp8x5ul \<equiv> the (to_path
[[28,7,22,39,26],
[23,40,27,6,21],
[8,29,38,25,14],
[37,24,15,20,5],
[16,9,30,13,34],
[31,36,33,4,19],
[10,17,2,35,12],
[1,32,11,18,3]])"
lemma kp_8x5_ul: "knights_path b8x5 kp8x5ul"
by (simp only: knights_path_exec_simp) eval
lemma kp_8x5_ul_hd: "hd kp8x5ul = (1,1)" by eval
lemma kp_8x5_ul_last: "last kp8x5ul = (7,2)" by eval
lemma kp_8x5_ul_non_nil: "kp8x5ul \<noteq> []" by eval
text \<open>A Knight's circuit for the \<open>(8\<times>5)\<close>-board.
\begin{table}[H]
\begin{tabular}{|l|l|l|l|l|}
\hline
26 & 7 & 28 & 15 & 24 \\ \hline
31 & 16 & 25 & 6 & 29 \\ \hline
8 & 27 & 30 & 23 & 14 \\ \hline
17 & 32 & 39 & 34 & 5 \\ \hline
38 & 9 & 18 & 13 & 22 \\ \hline
19 & 40 & 33 & 4 & 35 \\ \hline
10 & 37 & 2 & 21 & 12 \\ \hline
1 & 20 & 11 & 36 & 3 \\ \hline
\end{tabular}
\end{table}\<close>
abbreviation "kc8x5 \<equiv> the (to_path
[[26,7,28,15,24],
[31,16,25,6,29],
[8,27,30,23,14],
[17,32,39,34,5],
[38,9,18,13,22],
[19,40,33,4,35],
[10,37,2,21,12],
[1,20,11,36,3]])"
lemma kc_8x5: "knights_circuit b8x5 kc8x5"
by (simp only: knights_circuit_exec_simp) eval
lemma kc_8x5_hd: "hd kc8x5 = (1,1)" by eval
lemma kc_8x5_last: "last kc8x5 = (3,2)" by eval
lemma kc_8x5_non_nil: "kc8x5 \<noteq> []" by eval
lemma kc_8x5_si: "step_in kc8x5 (2,4) (4,5)" (is "step_in ?ps _ _")
proof -
have "0 < (21::nat)" "21 < length ?ps" "last (take 21 ?ps) = (2,4)" "hd (drop 21 ?ps) = (4,5)"
by eval+
then show ?thesis unfolding step_in_def by blast
qed
abbreviation "b8x6 \<equiv> board 8 6"
text \<open>A Knight's path for the \<open>(8\<times>6)\<close>-board that starts in the lower-left and ends in the
upper-left.
\begin{table}[H]
\begin{tabular}{|l|l|l|l|l|l|}
\hline
42 & 11 & 26 & 9 & 34 & 13 \\ \hline
25 & 48 & 43 & 12 & 27 & 8 \\ \hline
44 & 41 & 10 & 33 & 14 & 35 \\ \hline
47 & 24 & 45 & 20 & 7 & 28 \\ \hline
40 & 19 & 32 & 3 & 36 & 15 \\ \hline
23 & 46 & 21 & 6 & 29 & 4 \\ \hline
18 & 39 & 2 & 31 & 16 & 37 \\ \hline
1 & 22 & 17 & 38 & 5 & 30 \\ \hline
\end{tabular}
\end{table}\<close>
abbreviation "kp8x6ul \<equiv> the (to_path
[[42,11,26,9,34,13],
[25,48,43,12,27,8],
[44,41,10,33,14,35],
[47,24,45,20,7,28],
[40,19,32,3,36,15],
[23,46,21,6,29,4],
[18,39,2,31,16,37],
[1,22,17,38,5,30]])"
lemma kp_8x6_ul: "knights_path b8x6 kp8x6ul"
by (simp only: knights_path_exec_simp) eval
lemma kp_8x6_ul_hd: "hd kp8x6ul = (1,1)" by eval
lemma kp_8x6_ul_last: "last kp8x6ul = (7,2)" by eval
lemma kp_8x6_ul_non_nil: "kp8x6ul \<noteq> []" by eval
text \<open>A Knight's circuit for the \<open>(8\<times>6)\<close>-board. I have reversed circuit s.t. the circuit steps
from \<open>(2,5)\<close> to \<open>(4,6)\<close> and not the other way around. This makes the proofs easier.
\begin{table}[H]
\begin{tabular}{|l|l|l|l|l|l|}
\hline
8 & 29 & 24 & 45 & 12 & 37 \\ \hline
25 & 46 & 9 & 38 & 23 & 44 \\ \hline
30 & 7 & 28 & 13 & 36 & 11 \\ \hline
47 & 26 & 39 & 10 & 43 & 22 \\ \hline
6 & 31 & 4 & 27 & 14 & 35 \\ \hline
3 & 48 & 17 & 40 & 21 & 42 \\ \hline
32 & 5 & 2 & 19 & 34 & 15 \\ \hline
1 & 18 & 33 & 16 & 41 & 20 \\ \hline
\end{tabular}
\end{table}\<close>
abbreviation "kc8x6 \<equiv> the (to_path
[[8,29,24,45,12,37],
[25,46,9,38,23,44],
[30,7,28,13,36,11],
[47,26,39,10,43,22],
[6,31,4,27,14,35],
[3,48,17,40,21,42],
[32,5,2,19,34,15],
[1,18,33,16,41,20]])"
lemma kc_8x6: "knights_circuit b8x6 kc8x6"
by (simp only: knights_circuit_exec_simp) eval
lemma kc_8x6_hd: "hd kc8x6 = (1,1)" by eval
lemma kc_8x6_non_nil: "kc8x6 \<noteq> []" by eval
lemma kc_8x6_si: "step_in kc8x6 (2,5) (4,6)" (is "step_in ?ps _ _")
proof -
have "0 < (34::nat)" "34 < length ?ps"
"last (take 34 ?ps) = (2,5)" "hd (drop 34 ?ps) = (4,6)" by eval+
then show ?thesis unfolding step_in_def by blast
qed
abbreviation "b8x7 \<equiv> board 8 7"
text \<open>A Knight's path for the \<open>(8\<times>7)\<close>-board that starts in the lower-left and ends in the
upper-left.
\begin{table}[H]
\begin{tabular}{|l|l|l|l|l|l|l|}
\hline
38 & 19 & 6 & 55 & 46 & 21 & 8 \\ \hline
5 & 56 & 39 & 20 & 7 & 54 & 45 \\ \hline
18 & 37 & 4 & 47 & 34 & 9 & 22 \\ \hline
3 & 48 & 35 & 40 & 53 & 44 & 33 \\ \hline
36 & 17 & 52 & 49 & 32 & 23 & 10 \\ \hline
51 & 2 & 29 & 14 & 41 & 26 & 43 \\ \hline
16 & 13 & 50 & 31 & 28 & 11 & 24 \\ \hline
1 & 30 & 15 & 12 & 25 & 42 & 27 \\ \hline
\end{tabular}
\end{table}\<close>
abbreviation "kp8x7ul \<equiv> the (to_path
[[38,19,6,55,46,21,8],
[5,56,39,20,7,54,45],
[18,37,4,47,34,9,22],
[3,48,35,40,53,44,33],
[36,17,52,49,32,23,10],
[51,2,29,14,41,26,43],
[16,13,50,31,28,11,24],
[1,30,15,12,25,42,27]])"
lemma kp_8x7_ul: "knights_path b8x7 kp8x7ul"
by (simp only: knights_path_exec_simp) eval
lemma kp_8x7_ul_hd: "hd kp8x7ul = (1,1)" by eval
lemma kp_8x7_ul_last: "last kp8x7ul = (7,2)" by eval
lemma kp_8x7_ul_non_nil: "kp8x7ul \<noteq> []" by eval
text \<open>A Knight's circuit for the \<open>(8\<times>7)\<close>-board. I have reversed circuit s.t. the circuit steps
from \<open>(2,6)\<close> to \<open>(4,7)\<close> and not the other way around. This makes the proofs easier.
\begin{table}[H]
\begin{tabular}{|l|l|l|l|l|l|l|}
\hline
36 & 31 & 18 & 53 & 20 & 29 & 44 \\ \hline
17 & 54 & 35 & 30 & 45 & 52 & 21 \\ \hline
32 & 37 & 46 & 19 & 8 & 43 & 28 \\ \hline
55 & 16 & 7 & 34 & 27 & 22 & 51 \\ \hline
38 & 33 & 26 & 47 & 6 & 9 & 42 \\ \hline
3 & 56 & 15 & 12 & 25 & 50 & 23 \\ \hline
14 & 39 & 2 & 5 & 48 & 41 & 10 \\ \hline
1 & 4 & 13 & 40 & 11 & 24 & 49 \\ \hline
\end{tabular}
\end{table}\<close>
abbreviation "kc8x7 \<equiv> the (to_path
[[36,31,18,53,20,29,44],
[17,54,35,30,45,52,21],
[32,37,46,19,8,43,28],
[55,16,7,34,27,22,51],
[38,33,26,47,6,9,42],
[3,56,15,12,25,50,23],
[14,39,2,5,48,41,10],
[1,4,13,40,11,24,49]])"
lemma kc_8x7: "knights_circuit b8x7 kc8x7"
by (simp only: knights_circuit_exec_simp) eval
lemma kc_8x7_hd: "hd kc8x7 = (1,1)" by eval
lemma kc_8x7_non_nil: "kc8x7 \<noteq> []" by eval
lemma kc_8x7_si: "step_in kc8x7 (2,6) (4,7)" (is "step_in ?ps _ _")
proof -
have "0 < (41::nat)" "41 < length ?ps"
"last (take 41 ?ps) = (2,6)" "hd (drop 41 ?ps) = (4,7)" by eval+
then show ?thesis unfolding step_in_def by blast
qed
abbreviation "b8x8 \<equiv> board 8 8"
text \<open>The path given for the \<open>8\<times>8\<close>-board that ends in the upper-left is wrong. The Knight cannot
move from square 27 to square 28.
\begin{table}[H]
\begin{tabular}{|l|l|l|l|l|l|l|l|}
\hline
24 & 11 & 37 & 9 & 26 & 21 & 39 & 7 \\ \hline
36 & 64 & 24 & 22 & 38 & 8 & \color{red}{27} & 20 \\ \hline
12 & 23 & 10 & 53 & 58 & 49 & 6 & \color{red}{28} \\ \hline
63 & 35 & 61 & 50 & 55 & 52 & 19 & 40 \\ \hline
46 & 13 & 54 & 57 & 48 & 59 & 29 & 5 \\ \hline
34 & 62 & 47 & 60 & 51 & 56 & 41 & 18 \\ \hline
14 & 45 & 2 & 32 & 16 & 43 & 4 & 30 \\ \hline
1 & 33 & 15 & 44 & 3 & 31 & 17 & 42 \\ \hline
\end{tabular}
\end{table}\<close>
abbreviation "kp8x8ul_false \<equiv> the (to_path
[[24,11,37,9,26,21,39,7],
[36,64,25,22,38,8,27,20],
[12,23,10,53,58,49,6,28],
[63,35,61,50,55,52,19,40],
[46,13,54,57,48,59,29,5],
[34,62,47,60,51,56,41,18],
[14,45,2,32,16,43,4,30],
[1,33,15,44,3,31,17,42]])"
lemma "\<not>knights_path b8x8 kp8x8ul_false"
by (simp only: knights_path_exec_simp) eval
text \<open>I have computed a correct Knight's path for the \<open>8\<times>8\<close>-board that ends in the upper-left.
\begin{table}[H]
\begin{tabular}{|l|l|l|l|l|l|l|l|}
\hline
38 & 41 & 36 & 27 & 32 & 43 & 20 & 25 \\ \hline
35 & 64 & 39 & 42 & 21 & 26 & 29 & 44 \\ \hline
40 & 37 & 6 & 33 & 28 & 31 & 24 & 19 \\ \hline
5 & 34 & 63 & 14 & 7 & 22 & 45 & 30 \\ \hline
62 & 13 & 4 & 9 & 58 & 49 & 18 & 23 \\ \hline
3 & 10 & 61 & 52 & 15 & 8 & 57 & 46 \\ \hline
12 & 53 & 2 & 59 & 48 & 55 & 50 & 17 \\ \hline
1 & 60 & 11 & 54 & 51 & 16 & 47 & 56 \\ \hline
\end{tabular}
\end{table}\<close>
abbreviation "kp8x8ul \<equiv> the (to_path
[[38,41,36,27,32,43,20,25],
[35,64,39,42,21,26,29,44],
[40,37,6,33,28,31,24,19],
[5,34,63,14,7,22,45,30],
[62,13,4,9,58,49,18,23],
[3,10,61,52,15,8,57,46],
[12,53,2,59,48,55,50,17],
[1,60,11,54,51,16,47,56]])"
lemma kp_8x8_ul: "knights_path b8x8 kp8x8ul"
by (simp only: knights_path_exec_simp) eval
lemma kp_8x8_ul_hd: "hd kp8x8ul = (1,1)" by eval
lemma kp_8x8_ul_last: "last kp8x8ul = (7,2)" by eval
lemma kp_8x8_ul_non_nil: "kp8x8ul \<noteq> []" by eval
text \<open>A Knight's circuit for the \<open>(8\<times>8)\<close>-board.
\begin{table}[H]
\begin{tabular}{|l|l|l|l|l|l|l|l|}
\hline
48 & 13 & 30 & 9 & 56 & 45 & 28 & 7 \\ \hline
31 & 10 & 47 & 50 & 29 & 8 & 57 & 44 \\ \hline
14 & 49 & 12 & 55 & 46 & 59 & 6 & 27 \\ \hline
11 & 32 & 37 & 60 & 51 & 54 & 43 & 58 \\ \hline
36 & 15 & 52 & 63 & 38 & 61 & 26 & 5 \\ \hline
33 & 64 & 35 & 18 & 53 & 40 & 23 & 42 \\ \hline
16 & 19 & 2 & 39 & 62 & 21 & 4 & 25 \\ \hline
1 & 34 & 17 & 20 & 3 & 24 & 41 & 22 \\ \hline
\end{tabular}
\end{table}\<close>
abbreviation "kc8x8 \<equiv> the (to_path
[[48,13,30,9,56,45,28,7],
[31,10,47,50,29,8,57,44],
[14,49,12,55,46,59,6,27],
[11,32,37,60,51,54,43,58],
[36,15,52,63,38,61,26,5],
[33,64,35,18,53,40,23,42],
[16,19,2,39,62,21,4,25],
[1,34,17,20,3,24,41,22]])"
lemma kc_8x8: "knights_circuit b8x8 kc8x8"
by (simp only: knights_circuit_exec_simp) eval
lemma kc_8x8_hd: "hd kc8x8 = (1,1)" by eval
lemma kc_8x8_non_nil: "kc8x8 \<noteq> []" by eval
lemma kc_8x8_si: "step_in kc8x8 (2,7) (4,8)" (is "step_in ?ps _ _")
proof -
have "0 < (4::nat)" "4 < length ?ps"
"last (take 4 ?ps) = (2,7)" "hd (drop 4 ?ps) = (4,8)" by eval+
then show ?thesis unfolding step_in_def by blast
qed
abbreviation "b8x9 \<equiv> board 8 9"
text \<open>A Knight's path for the \<open>(8\<times>9)\<close>-board that starts in the lower-left and ends in the
upper-left.
\begin{table}[H]
\begin{tabular}{|l|l|l|l|l|l|l|l|l|}
\hline
32 & 47 & 6 & 71 & 30 & 45 & 8 & 43 & 26 \\ \hline
5 & 72 & 31 & 46 & 7 & 70 & 27 & 22 & 9 \\ \hline
48 & 33 & 4 & 29 & 64 & 23 & 44 & 25 & 42 \\ \hline
3 & 60 & 35 & 62 & 69 & 28 & 41 & 10 & 21 \\ \hline
34 & 49 & 68 & 65 & 36 & 63 & 24 & 55 & 40 \\ \hline
59 & 2 & 61 & 16 & 67 & 56 & 37 & 20 & 11 \\ \hline
50 & 15 & 66 & 57 & 52 & 13 & 18 & 39 & 54 \\ \hline
1 & 58 & 51 & 14 & 17 & 38 & 53 & 12 & 19 \\ \hline
\end{tabular}
\end{table}\<close>
abbreviation "kp8x9ul \<equiv> the (to_path
[[32,47,6,71,30,45,8,43,26],
[5,72,31,46,7,70,27,22,9],
[48,33,4,29,64,23,44,25,42],
[3,60,35,62,69,28,41,10,21],
[34,49,68,65,36,63,24,55,40],
[59,2,61,16,67,56,37,20,11],
[50,15,66,57,52,13,18,39,54],
[1,58,51,14,17,38,53,12,19]])"
lemma kp_8x9_ul: "knights_path b8x9 kp8x9ul"
by (simp only: knights_path_exec_simp) eval
lemma kp_8x9_ul_hd: "hd kp8x9ul = (1,1)" by eval
lemma kp_8x9_ul_last: "last kp8x9ul = (7,2)" by eval
lemma kp_8x9_ul_non_nil: "kp8x9ul \<noteq> []" by eval
text \<open>A Knight's circuit for the \<open>(8\<times>9)\<close>-board.
\begin{table}[H]
\begin{tabular}{|l|l|l|l|l|l|l|l|l|}
\hline
42 & 19 & 38 & 5 & 36 & 21 & 34 & 7 & 60 \\ \hline
39 & 4 & 41 & 20 & 63 & 6 & 59 & 22 & 33 \\ \hline
18 & 43 & 70 & 37 & 58 & 35 & 68 & 61 & 8 \\ \hline
3 & 40 & 49 & 64 & 69 & 62 & 57 & 32 & 23 \\ \hline
50 & 17 & 44 & 71 & 48 & 67 & 54 & 9 & 56 \\ \hline
45 & 2 & 65 & 14 & 27 & 12 & 29 & 24 & 31 \\ \hline
16 & 51 & 72 & 47 & 66 & 53 & 26 & 55 & 10 \\ \hline
1 & 46 & 15 & 52 & 13 & 28 & 11 & 30 & 25 \\ \hline
\end{tabular}
\end{table}\<close>
abbreviation "kc8x9 \<equiv> the (to_path
[[42,19,38,5,36,21,34,7,60],
[39,4,41,20,63,6,59,22,33],
[18,43,70,37,58,35,68,61,8],
[3,40,49,64,69,62,57,32,23],
[50,17,44,71,48,67,54,9,56],
[45,2,65,14,27,12,29,24,31],
[16,51,72,47,66,53,26,55,10],
[1,46,15,52,13,28,11,30,25]])"
lemma kc_8x9: "knights_circuit b8x9 kc8x9"
by (simp only: knights_circuit_exec_simp) eval
lemma kc_8x9_hd: "hd kc8x9 = (1,1)" by eval
lemma kc_8x9_non_nil: "kc8x9 \<noteq> []" by eval
lemma kc_8x9_si: "step_in kc8x9 (2,8) (4,9)" (is "step_in ?ps _ _")
proof -
have "0 < (55::nat)" "55 < length ?ps"
"last (take 55 ?ps) = (2,8)" "hd (drop 55 ?ps) = (4,9)" by eval+
then show ?thesis unfolding step_in_def by blast
qed
lemmas kp_8xm_ul =
kp_8x5_ul kp_8x5_ul_hd kp_8x5_ul_last kp_8x5_ul_non_nil
kp_8x6_ul kp_8x6_ul_hd kp_8x6_ul_last kp_8x6_ul_non_nil
kp_8x7_ul kp_8x7_ul_hd kp_8x7_ul_last kp_8x7_ul_non_nil
kp_8x8_ul kp_8x8_ul_hd kp_8x8_ul_last kp_8x8_ul_non_nil
kp_8x9_ul kp_8x9_ul_hd kp_8x9_ul_last kp_8x9_ul_non_nil
lemmas kc_8xm =
kc_8x5 kc_8x5_hd kc_8x5_last kc_8x5_non_nil kc_8x5_si
kc_8x6 kc_8x6_hd kc_8x6_non_nil kc_8x6_si
kc_8x7 kc_8x7_hd kc_8x7_non_nil kc_8x7_si
kc_8x8 kc_8x8_hd kc_8x8_non_nil kc_8x8_si
kc_8x9 kc_8x9_hd kc_8x9_non_nil kc_8x9_si
text \<open>For every \<open>8\<times>m\<close>-board with \<open>m \<ge> 5\<close> there exists a knight's circuit.\<close>
lemma knights_circuit_8xm_exists:
assumes "m \<ge> 5"
shows "\<exists>ps. knights_circuit (board 8 m) ps \<and> step_in ps (2,int m-1) (4,int m)"
using assms
proof (induction m rule: less_induct)
case (less m)
then have "m \<in> {5,6,7,8,9} \<or> 5 \<le> m-5" by auto
then show ?case
proof (elim disjE)
assume "m \<in> {5,6,7,8,9}"
then show ?thesis using kc_8xm by fastforce
next
let ?ps\<^sub>2="kc8x5"
let ?b\<^sub>2="board 8 5"
have ps\<^sub>2_prems: "knights_circuit ?b\<^sub>2 ?ps\<^sub>2" "hd ?ps\<^sub>2 = (1,1)" "last ?ps\<^sub>2 = (3,2)"
using kc_8xm by auto
have "21 < length ?ps\<^sub>2" "last (take 21 ?ps\<^sub>2) = (2,int 5-1)" "hd (drop 21 ?ps\<^sub>2) = (4,int 5)"
by eval+
then have si: "step_in ?ps\<^sub>2 (2,int 5-1) (4,int 5)"
unfolding step_in_def using zero_less_numeral by blast
assume m_ge: "5 \<le> m-5" (* \<longleftrightarrow> 10 \<le> m *)
then obtain ps\<^sub>1 where ps\<^sub>1_IH: "knights_circuit (board 8 (m-5)) ps\<^sub>1"
"step_in ps\<^sub>1 (2,int (m-5)-1) (4,int (m-5))"
using less.IH[of "m-5"] knights_path_non_nil by auto
then show ?thesis
using m_ge ps\<^sub>2_prems si knights_circuit_lr_concat[of 8 "m-5" ps\<^sub>1 5 ?ps\<^sub>2] by auto
qed
qed
text \<open>For every \<open>8\<times>m\<close>-board with \<open>m \<ge> 5\<close> there exists a knight's path that starts in
\<open>(1,1)\<close> (bottom-left) and ends in \<open>(7,2)\<close> (top-left).\<close>
lemma knights_path_8xm_ul_exists:
assumes "m \<ge> 5"
shows "\<exists>ps. knights_path (board 8 m) ps \<and> hd ps = (1,1) \<and> last ps = (7,2)"
using assms
proof -
have "m \<in> {5,6,7,8,9} \<or> 5 \<le> m-5" using assms by auto
then show ?thesis
proof (elim disjE)
assume "m \<in> {5,6,7,8,9}"
then show ?thesis using kp_8xm_ul by fastforce
next
let ?ps\<^sub>1="kp8x5ul"
have ps\<^sub>1_prems: "knights_path b8x5 ?ps\<^sub>1" "hd ?ps\<^sub>1 = (1,1)" "last ?ps\<^sub>1 = (7,2)"
using kp_8xm_ul by auto
assume m_ge: "5 \<le> m-5" (* \<longleftrightarrow> 10 \<le> m *)
then have b_prems: "5 \<le> min 8 (m-5)"
unfolding board_def by auto
obtain ps\<^sub>2 where "knights_circuit (board 8 (m-5)) ps\<^sub>2"
using m_ge knights_circuit_8xm_exists[of "(m-5)"] knights_path_non_nil by auto
then obtain ps\<^sub>2' where ps\<^sub>2'_prems': "knights_circuit (board 8 (m-5)) ps\<^sub>2'"
"hd ps\<^sub>2' = (1,1)" "last ps\<^sub>2' = (3,2)"
using b_prems \<open>5 \<le> min 8 (m-5)\<close> rotate_knights_circuit by blast
then have ps\<^sub>2'_path: "knights_path (board 8 (m-5)) (rev ps\<^sub>2')"
"valid_step (last ps\<^sub>2') (hd ps\<^sub>2')" "hd (rev ps\<^sub>2') = (3,2)" "last (rev ps\<^sub>2') = (1,1)"
unfolding knights_circuit_def using knights_path_rev by (auto simp: hd_rev last_rev)
have "34 < length ?ps\<^sub>1" "last (take 34 ?ps\<^sub>1) = (4,5)" "hd (drop 34 ?ps\<^sub>1) = (2,4)" by eval+
then have "step_in ?ps\<^sub>1 (4,5) (2,4)"
unfolding step_in_def using zero_less_numeral by blast
then have "step_in ?ps\<^sub>1 (4,5) (2,4)"
"valid_step (4,5) (3,int 5+2)"
"valid_step (1,int 5+1) (2,4)"
unfolding valid_step_def by auto
then have "\<exists>ps. knights_path (board 8 m) ps \<and> hd ps = hd ?ps\<^sub>1 \<and> last ps = last ?ps\<^sub>1"
using m_ge ps\<^sub>1_prems ps\<^sub>2'_prems' ps\<^sub>2'_path
knights_path_split_concat[of 8 5 ?ps\<^sub>1 "m-5" "rev ps\<^sub>2'"] by auto
then show ?thesis using ps\<^sub>1_prems by auto
qed
qed
text \<open>@{thm knights_circuit_8xm_exists} and @{thm knights_path_8xm_ul_exists} formalize Lemma 3
from @{cite "cull_decurtins_1987"}.\<close>
lemmas knights_path_8xm_exists = knights_circuit_8xm_exists knights_path_8xm_ul_exists
section \<open>Knight's Paths and Circuits for \<open>n\<times>m\<close>-Boards\<close>
text \<open>In this section the desired theorems are proved. The proof uses the previous lemmas to
construct paths and circuits for arbitrary \<open>n\<times>m\<close>-boards.\<close>
text \<open>A Knight's path for the \<open>(5\<times>5)\<close>-board that starts in the lower-left and ends in the
upper-left.
\begin{table}[H]
\begin{tabular}{|l|l|l|l|l|}
\hline
7 & 20 & 9 & 14 & 5 \\ \hline
10 & 25 & 6 & 21 & 16 \\ \hline
19 & 8 & 15 & 4 & 13 \\ \hline
24 & 11 & 2 & 17 & 22 \\ \hline
1 & 18 & 23 & 12 & 3 \\ \hline
\end{tabular}
\end{table}\<close>
abbreviation "kp5x5ul \<equiv> the (to_path
[[7,20,9,14,5],
[10,25,6,21,16],
[19,8,15,4,13],
[24,11,2,17,22],
[1,18,23,12,3]])"
lemma kp_5x5_ul: "knights_path b5x5 kp5x5ul"
by (simp only: knights_path_exec_simp) eval
text \<open>A Knight's path for the \<open>(5\<times>7)\<close>-board that starts in the lower-left and ends in the
upper-left.
\begin{table}[H]
\begin{tabular}{|l|l|l|l|l|l|l|}
\hline
17 & 14 & 25 & 6 & 19 & 8 & 29 \\ \hline
26 & 35 & 18 & 15 & 28 & 5 & 20 \\ \hline
13 & 16 & 27 & 24 & 7 & 30 & 9 \\ \hline
34 & 23 & 2 & 11 & 32 & 21 & 4 \\ \hline
1 & 12 & 33 & 22 & 3 & 10 & 31 \\ \hline
\end{tabular}
\end{table}\<close>
abbreviation "kp5x7ul \<equiv> the (to_path
[[17,14,25,6,19,8,29],
[26,35,18,15,28,5,20],
[13,16,27,24,7,30,9],
[34,23,2,11,32,21,4],
[1,12,33,22,3,10,31]])"
lemma kp_5x7_ul: "knights_path b5x7 kp5x7ul"
by (simp only: knights_path_exec_simp) eval
text \<open>A Knight's path for the \<open>(5\<times>9)\<close>-board that starts in the lower-left and ends in the
upper-left.
\begin{table}[H]
\begin{tabular}{|l|l|l|l|l|l|l|l|l|}
\hline
7 & 12 & 37 & 42 & 5 & 18 & 23 & 32 & 27 \\ \hline
38 & 45 & 6 & 11 & 36 & 31 & 26 & 19 & 24 \\ \hline
13 & 8 & 43 & 4 & 41 & 22 & 17 & 28 & 33 \\ \hline
44 & 39 & 2 & 15 & 10 & 35 & 30 & 25 & 20 \\ \hline
1 & 14 & 9 & 40 & 3 & 16 & 21 & 34 & 29 \\ \hline
\end{tabular}
\end{table}\<close>
abbreviation "kp5x9ul \<equiv> the (to_path
[[7,12,37,42,5,18,23,32,27],
[38,45,6,11,36,31,26,19,24],
[13,8,43,4,41,22,17,28,33],
[44,39,2,15,10,35,30,25,20],
[1,14,9,40,3,16,21,34,29]])"
lemma kp_5x9_ul: "knights_path b5x9 kp5x9ul"
by (simp only: knights_path_exec_simp) eval
abbreviation "b7x7 \<equiv> board 7 7"
text \<open>A Knight's path for the \<open>(7\<times>7)\<close>-board that starts in the lower-left and ends in the
upper-left.
\begin{table}[H]
\begin{tabular}{|l|l|l|l|l|l|l|}
\hline
9 & 30 & 19 & 42 & 7 & 32 & 17 \\ \hline
20 & 49 & 8 & 31 & 18 & 43 & 6 \\ \hline
29 & 10 & 41 & 36 & 39 & 16 & 33 \\ \hline
48 & 21 & 38 & 27 & 34 & 5 & 44 \\ \hline
11 & 28 & 35 & 40 & 37 & 26 & 15 \\ \hline
22 & 47 & 2 & 13 & 24 & 45 & 4 \\ \hline
1 & 12 & 23 & 46 & 3 & 14 & 25 \\ \hline
\end{tabular}
\end{table}\<close>
abbreviation "kp7x7ul \<equiv> the (to_path
[[9,30,19,42,7,32,17],
[20,49,8,31,18,43,6],
[29,10,41,36,39,16,33],
[48,21,38,27,34,5,44],
[11,28,35,40,37,26,15],
[22,47,2,13,24,45,4],
[1,12,23,46,3,14,25]])"
lemma kp_7x7_ul: "knights_path b7x7 kp7x7ul"
by (simp only: knights_path_exec_simp) eval
abbreviation "b7x9 \<equiv> board 7 9"
text \<open>A Knight's path for the \<open>(7\<times>9)\<close>-board that starts in the lower-left and ends in the
upper-left.
\begin{table}[H]
\begin{tabular}{|l|l|l|l|l|l|l|l|l|}
\hline
59 & 4 & 17 & 50 & 37 & 6 & 19 & 30 & 39 \\ \hline
16 & 63 & 58 & 5 & 18 & 51 & 38 & 7 & 20 \\ \hline
3 & 60 & 49 & 36 & 57 & 42 & 29 & 40 & 31 \\ \hline
48 & 15 & 62 & 43 & 52 & 35 & 56 & 21 & 8 \\ \hline
61 & 2 & 13 & 26 & 45 & 28 & 41 & 32 & 55 \\ \hline
14 & 47 & 44 & 11 & 24 & 53 & 34 & 9 & 22 \\ \hline
1 & 12 & 25 & 46 & 27 & 10 & 23 & 54 & 33 \\ \hline
\end{tabular}
\end{table}\<close>
abbreviation "kp7x9ul \<equiv> the (to_path
[[59,4,17,50,37,6,19,30,39],
[16,63,58,5,18,51,38,7,20],
[3,60,49,36,57,42,29,40,31],
[48,15,62,43,52,35,56,21,8],
[61,2,13,26,45,28,41,32,55],
[14,47,44,11,24,53,34,9,22],
[1,12,25,46,27,10,23,54,33]])"
lemma kp_7x9_ul: "knights_path b7x9 kp7x9ul"
by (simp only: knights_path_exec_simp) eval
abbreviation "b9x7 \<equiv> board 9 7"
text \<open>A Knight's path for the \<open>(9\<times>7)\<close>-board that starts in the lower-left and ends in the
upper-left.
\begin{table}[H]
\begin{tabular}{|l|l|l|l|l|l|l|}
\hline
5 & 20 & 53 & 48 & 7 & 22 & 31 \\ \hline
52 & 63 & 6 & 21 & 32 & 55 & 8 \\ \hline
19 & 4 & 49 & 54 & 47 & 30 & 23 \\ \hline
62 & 51 & 46 & 33 & 56 & 9 & 58 \\ \hline
3 & 18 & 61 & 50 & 59 & 24 & 29 \\ \hline
14 & 43 & 34 & 45 & 28 & 57 & 10 \\ \hline
17 & 2 & 15 & 60 & 35 & 38 & 25 \\ \hline
42 & 13 & 44 & 27 & 40 & 11 & 36 \\ \hline
1 & 16 & 41 & 12 & 37 & 26 & 39 \\ \hline
\end{tabular}
\end{table}\<close>
abbreviation "kp9x7ul \<equiv> the (to_path
[[5,20,53,48,7,22,31],
[52,63,6,21,32,55,8],
[19,4,49,54,47,30,23],
[62,51,46,33,56,9,58],
[3,18,61,50,59,24,29],
[14,43,34,45,28,57,10],
[17,2,15,60,35,38,25],
[42,13,44,27,40,11,36],
[1,16,41,12,37,26,39]])"
lemma kp_9x7_ul: "knights_path b9x7 kp9x7ul"
by (simp only: knights_path_exec_simp) eval
abbreviation "b9x9 \<equiv> board 9 9"
text \<open>A Knight's path for the \<open>(9\<times>9)\<close>-board that starts in the lower-left and ends in the
upper-left.
\begin{table}[H]
\begin{tabular}{|l|l|l|l|l|l|l|l|l|}
\hline
13 & 26 & 39 & 52 & 11 & 24 & 37 & 50 & 9 \\ \hline
40 & 81 & 12 & 25 & 38 & 51 & 10 & 23 & 36 \\ \hline
27 & 14 & 53 & 58 & 63 & 68 & 73 & 8 & 49 \\ \hline
80 & 41 & 64 & 67 & 72 & 57 & 62 & 35 & 22 \\ \hline
15 & 28 & 59 & 54 & 65 & 74 & 69 & 48 & 7 \\ \hline
42 & 79 & 66 & 71 & 76 & 61 & 56 & 21 & 34 \\ \hline
29 & 16 & 77 & 60 & 55 & 70 & 75 & 6 & 47 \\ \hline
78 & 43 & 2 & 31 & 18 & 45 & 4 & 33 & 20 \\ \hline
1 & 30 & 17 & 44 & 3 & 32 & 19 & 46 & 5 \\ \hline
\end{tabular}
\end{table}\<close>
abbreviation "kp9x9ul \<equiv> the (to_path
[[13,26,39,52,11,24,37,50,9],
[40,81,12,25,38,51,10,23,36],
[27,14,53,58,63,68,73,8,49],
[80,41,64,67,72,57,62,35,22],
[15,28,59,54,65,74,69,48,7],
[42,79,66,71,76,61,56,21,34],
[29,16,77,60,55,70,75,6,47],
[78,43,2,31,18,45,4,33,20],
[1,30,17,44,3,32,19,46,5]])"
lemma kp_9x9_ul: "knights_path b9x9 kp9x9ul"
by (simp only: knights_path_exec_simp) eval
text \<open>The following lemma is a sub-proof used in Lemma 4 in @{cite "cull_decurtins_1987"}.
I moved the sub-proof out to a separate lemma.\<close>
lemma knights_circuit_exists_even_n_gr10:
assumes "even n" "n \<ge> 10" "m \<ge> 5"
"\<exists>ps. knights_path (board (n-5) m) ps \<and> hd ps = (int (n-5),1)
\<and> last ps = (int (n-5)-1,int m-1)"
shows "\<exists>ps. knights_circuit (board m n) ps"
using assms
proof -
let ?b\<^sub>2="board (n-5) m"
assume "n \<ge> 10"
then obtain ps\<^sub>2 where ps\<^sub>2_prems: "knights_path ?b\<^sub>2 ps\<^sub>2" "hd ps\<^sub>2 = (int (n-5),1)"
"last ps\<^sub>2 = (int (n-5)-1,int m-1)"
using assms by auto
let ?ps\<^sub>2_m2="mirror2 ps\<^sub>2"
have ps\<^sub>2_m2_prems: "knights_path ?b\<^sub>2 ?ps\<^sub>2_m2" "hd ?ps\<^sub>2_m2 = (int (n-5),int m)"
"last ?ps\<^sub>2_m2 = (int (n-5)-1,2)"
using ps\<^sub>2_prems mirror2_knights_path hd_mirror2 last_mirror2 by auto
obtain ps\<^sub>1 where ps\<^sub>1_prems: "knights_path (board 5 m) ps\<^sub>1" "hd ps\<^sub>1 = (1,1)""last ps\<^sub>1 = (2,int m-1)"
using assms knights_path_5xm_exists by auto
let ?ps\<^sub>1'="trans_path (int (n-5),0) ps\<^sub>1"
let ?b\<^sub>1'="trans_board (int (n-5),0) (board 5 m)"
have ps\<^sub>1'_prems: "knights_path ?b\<^sub>1' ?ps\<^sub>1'" "hd ?ps\<^sub>1' = (int (n-5)+1,1)"
"last ?ps\<^sub>1' = (int (n-5)+2,int m-1)"
using ps\<^sub>1_prems trans_knights_path knights_path_non_nil hd_trans_path last_trans_path by auto
let ?ps="?ps\<^sub>1'@?ps\<^sub>2_m2"
let ?psT="transpose ?ps"
have "n-5 \<ge> 5" using \<open>n \<ge> 10\<close> by auto
have inter: "?b\<^sub>1' \<inter> ?b\<^sub>2 = {}"
unfolding trans_board_def board_def using \<open>n-5 \<ge> 5\<close> by auto
have union: "?b\<^sub>1' \<union> ?b\<^sub>2 = board n m"
using \<open>n-5 \<ge> 5\<close> board_concatT[of "n-5" m 5] by auto
have vs: "valid_step (last ?ps\<^sub>1') (hd ?ps\<^sub>2_m2)" and "valid_step (last ?ps\<^sub>2_m2) (hd ?ps\<^sub>1')"
unfolding valid_step_def using ps\<^sub>1'_prems ps\<^sub>2_m2_prems by auto
then have vs_c: "valid_step (last ?ps) (hd ?ps)"
using ps\<^sub>1'_prems ps\<^sub>2_m2_prems knights_path_non_nil by auto
have "knights_path (board n m) ?ps"
using ps\<^sub>1'_prems ps\<^sub>2_m2_prems inter vs union knights_path_append[of ?b\<^sub>1' ?ps\<^sub>1' ?b\<^sub>2 ?ps\<^sub>2_m2]
by auto
then have "knights_circuit (board n m) ?ps"
unfolding knights_circuit_def using vs_c by auto
then show ?thesis using transpose_knights_circuit by auto
qed
text \<open>For every \<open>n\<times>m\<close>-board with \<open>min n m \<ge> 5\<close> and odd \<open>n\<close> there exists a Knight's path that
starts in \<open>(n,1)\<close> (top-left) and ends in \<open>(n-1,m-1)\<close> (top-right).\<close>
text \<open>This lemma formalizes Lemma 4 from @{cite "cull_decurtins_1987"}. Formalizing the proof of
this lemma was quite challenging as a lot of details on how to exactly combine the boards are
left out in the original proof in @{cite "cull_decurtins_1987"}.\<close>
lemma knights_path_odd_n_exists:
assumes "odd n" "min n m \<ge> 5"
shows "\<exists>ps. knights_path (board n m) ps \<and> hd ps = (int n,1) \<and> last ps = (int n-1,int m-1)"
using assms
proof -
obtain x where "x = n + m" by auto
then show ?thesis
using assms
proof (induction x arbitrary: n m rule: less_induct)
case (less x)
then have "m = 5 \<or> m = 6 \<or> m = 7 \<or> m = 8 \<or> m = 9 \<or> m \<ge> 10" by auto
then show ?case
proof (elim disjE)
assume [simp]: "m = 5"
have "odd n" "n \<ge> 5" using less by auto
then have "n = 5 \<or> n = 7 \<or> n = 9 \<or> n-5 \<ge> 5" by presburger
then show ?thesis
proof (elim disjE)
assume [simp]: "n = 5"
let ?ps="mirror1 (transpose kp5x5ul)"
have kp: "knights_path (board n m) ?ps"
using kp_5x5_ul rot90_knights_path by auto
have "hd ?ps = (int n,1)" "last ?ps = (int n-1,int m-1)"
by (simp only: \<open>m = 5\<close> \<open>n = 5\<close> | eval)+
then show ?thesis using kp by auto
next
assume [simp]: "n = 7"
let ?ps="mirror1 (transpose kp5x7ul)"
have kp: "knights_path (board n m) ?ps"
using kp_5x7_ul rot90_knights_path by auto
have "hd ?ps = (int n,1)" "last ?ps = (int n-1,int m-1)"
by (simp only: \<open>m = 5\<close> \<open>n = 7\<close> | eval)+
then show ?thesis using kp by auto
next
assume [simp]: "n = 9"
let ?ps="mirror1 (transpose kp5x9ul)"
have kp: "knights_path (board n m) ?ps"
using kp_5x9_ul rot90_knights_path by auto
have "hd ?ps = (int n,1)" "last ?ps = (int n-1,int m-1)"
by (simp only: \<open>m = 5\<close> \<open>n = 9\<close> | eval)+
then show ?thesis using kp by auto
next
let ?b\<^sub>2="board m (n-5)"
assume "n-5 \<ge> 5"
then have "\<exists>ps. knights_circuit ?b\<^sub>2 ps"
proof -
have "n-5 = 6 \<or> n-5 = 8 \<or> n-5 \<ge> 10"
using \<open>n-5 \<ge> 5\<close> less by presburger
then show ?thesis
proof (elim disjE)
assume "n-5 = 6"
then obtain ps where "knights_circuit (board (n-5) m) ps"
using knights_path_6xm_exists[of m] by auto
then show ?thesis
using transpose_knights_circuit by auto
next
assume "n-5 = 8"
then obtain ps where "knights_circuit (board (n-5) m) ps"
using knights_path_8xm_exists[of m] by auto
then show ?thesis
using transpose_knights_circuit by auto
next
assume "n-5 \<ge> 10"
then show ?thesis
using less less.IH[of "n-10+m" "n-10" m]
knights_circuit_exists_even_n_gr10[of "n-5" m] by auto
qed
qed
then obtain ps\<^sub>2 where "knights_circuit ?b\<^sub>2 ps\<^sub>2" "hd ps\<^sub>2 = (1,1)" "last ps\<^sub>2 = (3,2)"
using \<open>n-5 \<ge> 5\<close> rotate_knights_circuit[of m "n-5"] by auto
then have rev_ps\<^sub>2_prems: "knights_path ?b\<^sub>2 (rev ps\<^sub>2)" "valid_step (last ps\<^sub>2) (hd ps\<^sub>2)"
"hd (rev ps\<^sub>2) = (3,2)" "last (rev ps\<^sub>2) = (1,1)"
unfolding knights_circuit_def using knights_path_rev by (auto simp: hd_rev last_rev)
let ?ps\<^sub>1="kp5x5ul"
have ps\<^sub>1_prems: "knights_path (board 5 5) ?ps\<^sub>1" "hd ?ps\<^sub>1 = (1,1)" "last ?ps\<^sub>1 = (4,2)"
using kp_5x5_ul by simp eval+
have "16 < length ?ps\<^sub>1" "last (take 16 ?ps\<^sub>1) = (4,5)" "hd (drop 16 ?ps\<^sub>1) = (2,4)" by eval+
then have si: "step_in ?ps\<^sub>1 (4,5) (2,4)"
unfolding step_in_def using zero_less_numeral by blast
have vs: "valid_step (4,5) (3,int 5+2)" "valid_step (1,int 5+1) (2,4)"
unfolding valid_step_def by auto
obtain ps where "knights_path (board m n) ps" "hd ps = (1,1)" "last ps = (4,2)"
using \<open>n-5 \<ge> 5\<close> ps\<^sub>1_prems rev_ps\<^sub>2_prems si vs
knights_path_split_concat[of 5 5 ?ps\<^sub>1 "n-5" "rev ps\<^sub>2" "(4,5)" "(2,4)"] by auto
then show ?thesis
using rot90_knights_path hd_rot90_knights_path last_rot90_knights_path by fastforce
qed
next
assume [simp]: "m = 6"
then obtain ps where
ps_prems: "knights_path (board m n) ps" "hd ps = (1,1)" "last ps = (int m-1,2)"
using less knights_path_6xm_exists[of n] by auto
let ?ps'="mirror1 (transpose ps)"
have "knights_path (board n m) ?ps'" "hd ?ps' = (int n,1)" "last ?ps' = (int n-1,int m-1)"
using ps_prems rot90_knights_path hd_rot90_knights_path last_rot90_knights_path by auto
then show ?thesis by auto
next
assume [simp]: "m = 7"
have "odd n" "n \<ge> 5" using less by auto
then have "n = 5 \<or> n = 7 \<or> n = 9 \<or> n-5 \<ge> 5" by presburger
then show ?thesis
proof (elim disjE)
assume [simp]: "n = 5"
let ?ps="mirror1 kp5x7lr"
have kp: "knights_path (board n m) ?ps"
using kp_5x7_lr mirror1_knights_path by auto
have "hd ?ps = (int n,1)" "last ?ps = (int n-1,int m-1)"
by (simp only: \<open>m = 7\<close> \<open>n = 5\<close> | eval)+
then show ?thesis using kp by auto
next
assume [simp]: "n = 7"
let ?ps="mirror1 (transpose kp7x7ul)"
have kp: "knights_path (board n m) ?ps"
using kp_7x7_ul rot90_knights_path by auto
have "hd ?ps = (int n,1)" "last ?ps = (int n-1,int m-1)"
by (simp only: \<open>m = 7\<close> \<open>n = 7\<close> | eval)+
then show ?thesis using kp by auto
next
assume [simp]: "n = 9"
let ?ps="mirror1 (transpose kp7x9ul)"
have kp: "knights_path (board n m) ?ps"
using kp_7x9_ul rot90_knights_path by auto
have "hd ?ps = (int n,1)" "last ?ps = (int n-1,int m-1)"
by (simp only: \<open>m = 7\<close> \<open>n = 9\<close> | eval)+
then show ?thesis using kp by auto
next
let ?b\<^sub>2="board m (n-5)"
let ?b\<^sub>2T="board (n-5) m"
assume "n-5 \<ge> 5"
then have "\<exists>ps. knights_circuit ?b\<^sub>2 ps"
proof -
have "n-5 = 6 \<or> n-5 = 8 \<or> n-5 \<ge> 10"
using \<open>n-5 \<ge> 5\<close> less by presburger
then show ?thesis
proof (elim disjE)
assume "n-5 = 6"
then obtain ps where "knights_circuit (board (n-5) m) ps"
using knights_path_6xm_exists[of m] by auto
then show ?thesis
using transpose_knights_circuit by auto
next
assume "n-5 = 8"
then obtain ps where "knights_circuit (board (n-5) m) ps"
using knights_path_8xm_exists[of m] by auto
then show ?thesis
using transpose_knights_circuit by auto
next
assume "n-5 \<ge> 10"
then show ?thesis
using less less.IH[of "n-10+m" "n-10" m]
knights_circuit_exists_even_n_gr10[of "n-5" m] by auto
qed
qed
then obtain ps\<^sub>2 where ps\<^sub>2_prems: "knights_circuit ?b\<^sub>2 ps\<^sub>2" "hd ps\<^sub>2 = (1,1)"
"last ps\<^sub>2 = (3,2)"
using \<open>n-5 \<ge> 5\<close> rotate_knights_circuit[of m "n-5"] by auto
let ?ps\<^sub>2T="transpose ps\<^sub>2"
have ps\<^sub>2T_prems: "knights_path ?b\<^sub>2T ?ps\<^sub>2T" "hd ?ps\<^sub>2T = (1,1)" "last ?ps\<^sub>2T = (2,3)"
using ps\<^sub>2_prems transpose_knights_path knights_path_non_nil hd_transpose last_transpose
unfolding knights_circuit_def transpose_square_def by auto
let ?ps\<^sub>1="kp5x7lr"
have ps\<^sub>1_prems: "knights_path b5x7 ?ps\<^sub>1" "hd ?ps\<^sub>1 = (1,1)" "last ?ps\<^sub>1 = (2,6)"
using kp_5x7_lr by simp eval+
have "29 < length ?ps\<^sub>1" "last (take 29 ?ps\<^sub>1) = (4,2)" "hd (drop 29 ?ps\<^sub>1) = (5,4)" by eval+
then have si: "step_in ?ps\<^sub>1 (4,2) (5,4)"
unfolding step_in_def using zero_less_numeral by blast
have vs: "valid_step (4,2) (int 5+1,1)" "valid_step (int 5+2,3) (5,4)"
unfolding valid_step_def by auto
obtain ps where "knights_path (board n m) ps" "hd ps = (1,1)" "last ps = (2,6)"
using \<open>n-5 \<ge> 5\<close> ps\<^sub>1_prems ps\<^sub>2T_prems si vs
knights_path_split_concatT[of 5 m ?ps\<^sub>1 "n-5" ?ps\<^sub>2T "(4,2)" "(5,4)"] by auto
then show ?thesis
using mirror1_knights_path hd_mirror1 last_mirror1 by fastforce
qed
next
assume [simp]: "m = 8"
then obtain ps where ps_prems: "knights_path (board m n) ps" "hd ps = (1,1)"
"last ps = (int m-1,2)"
using less knights_path_8xm_exists[of n] by auto
let ?ps'="mirror1 (transpose ps)"
have "knights_path (board n m) ?ps'" "hd ?ps' = (int n,1)" "last ?ps' = (int n-1,int m-1)"
using ps_prems rot90_knights_path hd_rot90_knights_path last_rot90_knights_path by auto
then show ?thesis by auto
next
assume [simp]: "m = 9"
have "odd n" "n \<ge> 5" using less by auto
then have "n = 5 \<or> n = 7 \<or> n = 9 \<or> n-5 \<ge> 5" by presburger
then show ?thesis
proof (elim disjE)
assume [simp]: "n = 5"
let ?ps="mirror1 kp5x9lr"
have kp: "knights_path (board n m) ?ps"
using kp_5x9_lr mirror1_knights_path by auto
have "hd ?ps = (int n,1)" "last ?ps = (int n-1,int m-1)"
by (simp only: \<open>m = 9\<close> \<open>n = 5\<close> | eval)+
then show ?thesis using kp by auto
next
assume [simp]: "n = 7"
let ?ps="mirror1 (transpose kp9x7ul)"
have kp: "knights_path (board n m) ?ps"
using kp_9x7_ul rot90_knights_path by auto
have "hd ?ps = (int n,1)" "last ?ps = (int n-1,int m-1)"
by (simp only: \<open>m = 9\<close> \<open>n = 7\<close> | eval)+
then show ?thesis using kp by auto
next
assume [simp]: "n = 9"
let ?ps="mirror1 (transpose kp9x9ul)"
have kp: "knights_path (board n m) ?ps"
using kp_9x9_ul rot90_knights_path by auto
have "hd ?ps = (int n,1)" "last ?ps = (int n-1,int m-1)"
by (simp only: \<open>m = 9\<close> \<open>n = 9\<close> | eval)+
then show ?thesis using kp by auto
next
let ?b\<^sub>2="board m (n-5)"
let ?b\<^sub>2T="board (n-5) m"
assume "n-5 \<ge> 5"
then have "\<exists>ps. knights_circuit ?b\<^sub>2 ps"
proof -
have "n-5 = 6 \<or> n-5 = 8 \<or> n-5 \<ge> 10"
using \<open>n-5 \<ge> 5\<close> less by presburger
then show ?thesis
proof (elim disjE)
assume "n-5 = 6"
then obtain ps where "knights_circuit (board (n-5) m) ps"
using knights_path_6xm_exists[of m] by auto
then show ?thesis
using transpose_knights_circuit by auto
next
assume "n-5 = 8"
then obtain ps where "knights_circuit (board (n-5) m) ps"
using knights_path_8xm_exists[of m] by auto
then show ?thesis
using transpose_knights_circuit by auto
next
assume "n-5 \<ge> 10"
then show ?thesis
using less less.IH[of "n-10+m" "n-10" m]
knights_circuit_exists_even_n_gr10[of "n-5" m] by auto
qed
qed
then obtain ps\<^sub>2 where ps\<^sub>2_prems: "knights_circuit ?b\<^sub>2 ps\<^sub>2" "hd ps\<^sub>2 = (1,1)"
"last ps\<^sub>2 = (3,2)"
using \<open>n-5 \<ge> 5\<close> rotate_knights_circuit[of m "n-5"] by auto
let ?ps\<^sub>2T="transpose (rev ps\<^sub>2)"
have ps\<^sub>2T_prems: "knights_path ?b\<^sub>2T ?ps\<^sub>2T" "hd ?ps\<^sub>2T = (2,3)" "last ?ps\<^sub>2T = (1,1)"
using ps\<^sub>2_prems knights_path_rev transpose_knights_path knights_path_non_nil
hd_transpose last_transpose
unfolding knights_circuit_def transpose_square_def by (auto simp: hd_rev last_rev)
let ?ps\<^sub>1="kp5x9lr"
have ps\<^sub>1_prems: "knights_path b5x9 ?ps\<^sub>1" "hd ?ps\<^sub>1 = (1,1)" "last ?ps\<^sub>1 = (2,8)"
using kp_5x9_lr by simp eval+
have "16 < length ?ps\<^sub>1" "last (take 16 ?ps\<^sub>1) = (5,4)" "hd (drop 16 ?ps\<^sub>1) = (4,2)" by eval+
then have si: "step_in ?ps\<^sub>1 (5,4) (4,2)"
unfolding step_in_def using zero_less_numeral by blast
have vs: "valid_step (5,4) (int 5+2,3)" "valid_step (int 5+1,1) (4,2)"
unfolding valid_step_def by auto
obtain ps where "knights_path (board n m) ps" "hd ps = (1,1)" "last ps = (2,8)"
using \<open>n-5 \<ge> 5\<close> ps\<^sub>1_prems ps\<^sub>2T_prems si vs
knights_path_split_concatT[of 5 m ?ps\<^sub>1 "n-5" ?ps\<^sub>2T "(5,4)" "(4,2)"] by auto
then show ?thesis
using mirror1_knights_path hd_mirror1 last_mirror1 by fastforce
qed
next
let ?b\<^sub>1="board n 5"
let ?b\<^sub>2="board n (m-5)"
assume "m \<ge> 10"
then have "n+5 < x" "5 \<le> min n 5" "n+(m-5) < x" "5 \<le> min n (m-5)"
using less by auto
then obtain ps\<^sub>1 ps\<^sub>2 where kp_prems:
"knights_path ?b\<^sub>1 ps\<^sub>1" "hd ps\<^sub>1 = (int n,1)" "last ps\<^sub>1 = (int n-1,4)"
"knights_path (board n (m-5)) ps\<^sub>2" "hd ps\<^sub>2 = (int n,1)" "last ps\<^sub>2 = (int n-1,int (m-5)-1)"
using less.prems less.IH[of "n+5" n "5"] less.IH[of "n+(m-5)" n "m-5"] by auto
let ?ps="ps\<^sub>1@trans_path (0,int 5) ps\<^sub>2"
have "valid_step (last ps\<^sub>1) (int n,int 5+1)"
unfolding valid_step_def using kp_prems by auto
then have "knights_path (board n m) ?ps" "hd ?ps = (int n,1)" "last ?ps = (int n-1,int m-1)"
using \<open>m \<ge> 10\<close> kp_prems knights_path_concat[of n 5 ps\<^sub>1 "m-5" ps\<^sub>2]
knights_path_non_nil trans_path_non_nil last_trans_path by auto
then show ?thesis by auto
qed
qed
qed
text \<open>Auxiliary lemma that constructs a Knight's circuit if \<open>m \<ge> 5\<close> and \<open>n \<ge> 10 \<and> even n\<close>.\<close>
lemma knights_circuit_exists_n_even_gr_10:
assumes "n \<ge> 10 \<and> even n" "m \<ge> 5"
shows "\<exists>ps. knights_circuit (board n m) ps"
using assms
proof -
obtain ps\<^sub>1 where ps\<^sub>1_prems: "knights_path (board 5 m) ps\<^sub>1" "hd ps\<^sub>1 = (1,1)"
"last ps\<^sub>1 = (2,int m-1)"
using assms knights_path_5xm_exists by auto
let ?ps\<^sub>1'="trans_path (int (n-5),0) ps\<^sub>1"
let ?b5xm'="trans_board (int (n-5),0) (board 5 m)"
have ps\<^sub>1'_prems: "knights_path ?b5xm' ?ps\<^sub>1'" "hd ?ps\<^sub>1' = (int (n-5)+1,1)"
"last ?ps\<^sub>1' = (int (n-5)+2,int m-1)"
using ps\<^sub>1_prems trans_knights_path knights_path_non_nil hd_trans_path last_trans_path by auto
assume "n \<ge> 10 \<and> even n"
then have "odd (n-5)" "min (n-5) m \<ge> 5" using assms by auto
then obtain ps\<^sub>2 where ps\<^sub>2_prems: "knights_path (board (n-5) m) ps\<^sub>2" "hd ps\<^sub>2 = (int (n-5),1)"
"last ps\<^sub>2 = (int (n-5)-1,int m-1)"
using knights_path_odd_n_exists[of "n-5" m] by auto
let ?ps\<^sub>2'="mirror2 ps\<^sub>2"
have ps\<^sub>2'_prems: "knights_path (board (n-5) m) ?ps\<^sub>2'" "hd ?ps\<^sub>2' = (int (n-5),int m)"
"last ?ps\<^sub>2' = (int (n-5)-1,2)"
using ps\<^sub>2_prems mirror2_knights_path hd_mirror2 last_mirror2 by auto
have inter: "?b5xm' \<inter> board (n-5) m = {}"
unfolding trans_board_def board_def by auto
have union: "board n m = ?b5xm' \<union> board (n-5) m"
using \<open>n \<ge> 10 \<and> even n\<close> board_concatT[of "n-5" m 5] by auto
have vs: "valid_step (last ?ps\<^sub>1') (hd ?ps\<^sub>2')" "valid_step (last ?ps\<^sub>2') (hd ?ps\<^sub>1')"
using ps\<^sub>1'_prems ps\<^sub>2'_prems unfolding valid_step_def by auto
let ?ps="?ps\<^sub>1' @ ?ps\<^sub>2'"
have "last ?ps = last ?ps\<^sub>2'" "hd ?ps = hd ?ps\<^sub>1'"
using ps\<^sub>1'_prems ps\<^sub>2'_prems knights_path_non_nil by auto
then have vs_c: "valid_step (last ?ps) (hd ?ps)"
using vs by auto
have "knights_path (board n m) ?ps"
using ps\<^sub>1'_prems ps\<^sub>2'_prems inter union vs knights_path_append by auto
then show ?thesis
using vs_c unfolding knights_circuit_def by blast
qed
text \<open>Final Theorem 1: For every \<open>n\<times>m\<close>-board with \<open>min n m \<ge> 5\<close> and \<open>n*m\<close> even there exists a
Knight's circuit.\<close>
theorem knights_circuit_exists:
assumes "min n m \<ge> 5" "even (n*m)"
shows "\<exists>ps. knights_circuit (board n m) ps"
using assms
proof -
have "n = 6 \<or> m = 6 \<or> n = 8 \<or> m = 8 \<or> (n \<ge> 10 \<and> even n) \<or> (m \<ge> 10 \<and> even m)"
using assms by auto
then show ?thesis
proof (elim disjE)
assume "n = 6"
then show ?thesis
using assms knights_path_6xm_exists by auto
next
assume "m = 6"
then obtain ps where "knights_circuit (board m n) ps"
using assms knights_path_6xm_exists by auto
then show ?thesis
using transpose_knights_circuit by auto
next
assume "n = 8"
then show ?thesis
using assms knights_path_8xm_exists by auto
next
assume "m = 8"
then obtain ps where "knights_circuit (board m n) ps"
using assms knights_path_8xm_exists by auto
then show ?thesis
using transpose_knights_circuit by auto
next
assume "n \<ge> 10 \<and> even n"
then show ?thesis
using assms knights_circuit_exists_n_even_gr_10 by auto
next
assume "m \<ge> 10 \<and> even m"
then obtain ps where "knights_circuit (board m n) ps"
using assms knights_circuit_exists_n_even_gr_10 by auto
then show ?thesis
using transpose_knights_circuit by auto
qed
qed
text \<open>Final Theorem 2: for every \<open>n\<times>m\<close>-board with \<open>min n m \<ge> 5\<close> there exists a Knight's path.\<close>
theorem knights_path_exists:
assumes "min n m \<ge> 5"
shows "\<exists>ps. knights_path (board n m) ps"
using assms
proof -
have "odd n \<or> odd m \<or> even (n*m)" by simp
then show ?thesis
proof (elim disjE)
assume "odd n"
then show ?thesis
using assms knights_path_odd_n_exists by auto
next
assume "odd m"
then obtain ps where "knights_path (board m n) ps"
using assms knights_path_odd_n_exists by auto
then show ?thesis
using transpose_knights_path by auto
next
assume "even (n*m)"
then show ?thesis
using assms knights_circuit_exists by (auto simp: knights_circuit_def)
qed
qed
text \<open>THE END\<close>
end |
theory Scratch
imports Main Observation
begin
text \<open>So, you can't have a typeclass return a type parameter. Let's see if we can define a type with
no properties, then later, assert some properties *of* that type and use them in a proof. So... some
`foo` exists. We don't know what it is.\<close>
typedecl foo
text \<open>We can make a container for foo\<close>
datatype box = Box "foo"
text \<open>We can use foo for equality...\<close>
lemma "(x :: foo) \<noteq> (y :: foo) \<longrightarrow> (Box x) \<noteq> (Box y)"
apply auto
done
lemma "(x :: foo) = (y :: foo) \<longrightarrow> (Box x) = (Box y)"
apply auto
done
text \<open>We can define a typeclass which returns foos...\<close>
class openable =
fixes open1 :: "'a \<Rightarrow> foo"
instantiation box :: openable
begin
primrec open1 :: "box \<Rightarrow> foo" where
"open1 (Box f) = f"
instance ..
end
text \<open>Now it'd be nice if we could show something trivial, like... if foo is, say, a nat, then open1
returns a nat. If we were using type parameters, I'd make a "box nat", but since we're NOT using
type parameters... I want to write, like, (foo = nat) \<Rightarrow> is_nat (openable x)" but I have no idea how\<close>
text \<open>I think what I want here is maybe... a locale? Let's try that...\<close>
locale partial_order =
fixes le :: "'a \<Rightarrow> 'a \<Rightarrow> bool"
assumes refl [intro, simp]: "le x x"
and anti_sym [intro]: "\<lbrakk>le x y; le y x\<rbrakk> \<Longrightarrow> x = y"
and trans [trans]: "\<lbrakk>le x y; le y z\<rbrakk> \<Longrightarrow> le x z"
text \<open>So... what we've done here is assert a le function exists, and it obeys these laws, but we
haven't actually said what le is. le could operate over cats or cargo ships. It could be < or >. Can
I define a locale that returns a type parameter?\<close>
locale openable2 =
fixes open2 :: "'a \<Rightarrow> 'b"
text \<open>Huh. So... all I've said here is that a binary function exists. What about... making a box for
a type parameter? That's something we can't do with typeclasses...\<close>
datatype 'a abstract_box = AbstractBox "'a"
locale boxable =
fixes box :: "'a \<Rightarrow> ('b abstract_box)"
text \<open>Okay, so I can define a boxable function which returns something with type parameters. Note
that our box is over a different type, so we could, I dunno, take strings and constantly return
AbstractBox 2 or something.\<close>
|
module SystemF.Everything where
open import SystemF.Syntax public
open import SystemF.WellTyped public
open import SystemF.Substitutions.Lemmas public
open import SystemF.Substitutions public
open import SystemF.NormalForm public
open TypeLemmas public hiding (var)
|
lemma plus_absorb2: "g \<in> o[F](f) \<Longrightarrow> L F (\<lambda>x. f x + g x) = L F (f)" |
# Function for calculate phytosociological table
# Version: 1.2.0
# https://github.com/MarioJose/r-functions/tree/master/phyto
phyto <- function(x, filter = NULL, area = NULL, criteria = NULL, measure = NULL, incDead = TRUE, nmDead = "Dead", diversity = TRUE, evenness = TRUE){
# x must be data frame with: plot, family, specie, diameter, (height).
# It must be in this order, but not necessarily with this names. Height is
# optional
# Checking input
# ++++++++++++++
if(!is.data.frame(x)){
stop("'x' must be a data frame")
} else {
if(dim(x)[2] < 4 | dim(x)[2] > 5){
stop("Your data frame must have at least 4 columns with the follow data at the same order: 'plot', 'family', 'specie', 'diameter'. The column 'height' is optional")
}
}
if(is.null(filter)){
stop("You must inform the filter to summarization: 'plot', 'family', 'genus', 'specie'")
} else {
if(!(filter %in% c("plot", "family", "genus", "specie"))){
stop("You must inform one of the follow option to filter: 'plot', 'family', 'genus', 'specie'")
}
}
if(is.null(area)){
stop("You must inform the area sampled")
}
if(is.null(criteria)){
stop("You must inform the diameter criteria for individual inclusion")
}
if(is.null(measure)){
stop("You must inform the measure: \"d\" = diameter; \"c\" = circumference")
} else {
if(!(measure %in% c("d","c"))){
stop("You must inform one of the follow option to measure: \"d\" = diameter; \"c\"c = circumference")
}
}
# Convert family and species column to character
x[ ,2] <- as.character(x[ ,2])
x[ ,3] <- as.character(x[ ,3])
# Remove dead individuals (column 'specie')
if(!incDead){
didx <- grep(nmDead , x[ ,3])
x <- x[-didx, ]
print(paste("Removed", length(didx), "individual named as", nmDead))
}
# Functions
# +++++++++
# Split multiple diameter or height and return diameter of total basal area of
# each individual or mean height of each individual
splitMultiple <- function(x, m){
if(!is.numeric(x)) tmp <- as.numeric(unlist(strsplit(x , "+", TRUE)))
else tmp <- x
# Expression used in conversion
# area = (pi*diameter^2)/4
# area = (circumference^2)/(4*pi)
# diameter = sqrt((4*area)/pi)
# Return diameter of total basal area
if(m == "d") out <- sqrt(4 * sum((pi * (tmp ^ 2)) / 4) / pi)
# Convert circumference and return diameter of total basal area
if(m == "c") out <- sqrt(4 * sum((tmp ^ 2) / (4 * pi)) / pi)
# Return mean of height
if(m == "h") out <- mean(tmp)
return(out)
}
# Shannon and Simpson diversity index
shannon_fn <- function(x){
p <- x / sum(x)
H <- -sum(p * log(p))
# Variance as Hutcheson (1970)
varH <- ((sum(p * (log(p)^2)) - sum(p * log(p))^2) / sum(x)) + ((length(x) - 1) / (2 * (length(x)^2)))
return(c(H = H, varH = varH))
}
simpson_fn <- function(x, var = FALSE){
# As Simpson (1949)
N <- sum(x)
p <- x / N
D <- sum(x * (x - 1)) / (N * (N - 1))
varD <- ( 4*N*(N - 1)*(N - 2)*sum(p^3) + 2*N*(N - 1)*sum(p^2) - 2*N*(N - 1)*(2*N - 3)*(sum(p^2)^2) ) / (N*(N - 1))^2
# if N be very large, approximately
#varD <- (4/N) * ( (sum(p^3)) - (sum(p^2))^2 );
return(c(D = D, varD = varD))
}
# Evar and E1/D evenness index
evenness_fn <- function(x){
# As Smith & Wilson (1996)
mulog <- sum(log(x)) / length(x)
# 0 = minimum eveness
Evar <- 1 - (2 / pi) * atan( sum((log(x) - mulog)^2) / length(x) )
E1D <- (1 / simpson_fn(x)[["D"]]) / length(x)
return(c(Evar = Evar, E1D = E1D))
}
# Checking data
# +++++++++++++
# Columns names
colnames(x) <- c("plot", "family", "specie", "diameter", "height")[1:dim(x)[2]]
# Create column with genus
x$genus <- sapply(x$specie, function(x) strsplit(x, " ")[[1]][1])
# Order data frame
x <- x[, c(1:2, dim(x)[2], 3:(dim(x)[2] - 1))]
# Standardize measure to diameter. Split multiples measures and return
# diameter of total basal area to individual
if(is.character(x$diameter) | is.factor(x$diameter))
x$diameter <- sapply(as.character(x$diameter), splitMultiple, m = measure)
else if(measure == "c") x$diameter <- x$diameter / pi
# Standardize height. Split multiples measures and return mean of height to
# individual
if(dim(x)[2] == 6)
if(is.character(x$height) | is.factor(x$height))
x$height <- sapply(as.character(x$height), splitMultiple, m = "h")
# Filter by inclusion criteria
cidx <- x$diameter >= criteria
x <- x[cidx, ]
if(dim(x)[1] < 1) stop("Your criteria removed all individuals")
else print(paste("Removed", length(cidx) - sum(cidx), "individual(s) by criteria", criteria))
# Calculate parameters
# ++++++++++++++++++++
# Create output data frame
out <- aggregate(list(nInd = x$specie), by = list(c1 = x[ ,filter]), FUN = length)
colnames(out) <- c(filter, "nInd")
# Add families
if(filter %in% c("genus", "specie")){
tf <- aggregate(list(nInd = x[ ,filter]), by = list(family = x$family, filter = x[ ,filter]), FUN = length)
out$family <- tf$family[match(out[ ,filter], tf$filter)]
out <- out[ ,c("family", filter, "nInd")]
}
if(filter == "plot"){
# Number of families per plot
out[ ,"nFamilies"] <- aggregate(x$family, by = list(x$plot), FUN = function(x){length(unique(x))})$x
# Number of genera per plot
out[ ,"nGenera"] <- aggregate(x$genus, by = list(x$plot), FUN = function(x){length(unique(x))})$x
# Number of species per plot
out[ ,"nSpecies"] <- aggregate(x$specie, by = list(x$plot), FUN = function(x){length(unique(x))})$x
if(diversity){
out[ ,c("H", "varH")] <- aggregate(x$specie, by = list(x$plot), FUN = function(x){shannon_fn(table(as.character(x)))})[[2]]
out[ ,c("D", "varD")] <- aggregate(x$specie, by = list(x$plot), FUN = function(x){simpson_fn(table(as.character(x)))})[[2]]
}
if(evenness)
out[ ,c("Evar","E1D")] <- aggregate(x$specie, by = list(x$plot), FUN = function(x){evenness_fn(table(as.character(x)))})[[2]]
}
if(filter == "family"){
# Number of genera per family
out[ ,"nGenera"] <- aggregate(x$genus, by = list(x$family), FUN = function(x){length(unique(x))})$x
# Number of species per family
out[ ,"nSpecies"] <- aggregate(x$specie, by = list(x$family), FUN = function(x){length(unique(x))})$x
}
if(filter == "genus"){
# Number of species per genus
out[ ,"nSpecies"] <- aggregate(x$specie, by = list(x$genus), FUN = function(x){length(unique(x))})$x
}
# Convert diameter measure from centimetres to meters
x$diameter <- x$diameter / 100
out[ ,"tBasalArea"] <- aggregate(x$diameter, by = list(x[ ,filter]), FUN = function(x){sum((pi * (x^2)) / 4)})$x
if(filter != "plot"){
out[ ,"AbsDens"] <- out$nInd / area
out[ ,"RelDens"] <- (out$nInd / sum(out$nInd)) * 100
# Species or genera by plot
tmp <- aggregate(list(nInd = x[ ,filter]), by = list(filter = x[ ,filter], plot=x$plot), FUN = length)
out[ ,"nPlot"] <- aggregate(tmp$filter, by = list(tmp$filter), FUN = length)$x
out[ ,"AbsFreq"] <- (out[ ,"nPlot"] / length(unique(x$plot))) * 100
out[ ,"RelFreq"] <- (out$AbsFreq / sum(out$AbsFreq)) * 100
out[ ,"AbsDom"] <- out[ ,"tBasalArea"] / area
out[ ,"RelDom"] <- (out[ ,"tBasalArea"] / sum(out[ ,"tBasalArea"])) * 100
out[ ,"IVI"] <- out$RelDens + out$RelFreq + out$RelDom
out[ ,"CVI"] <- out$RelDens + out$RelDom
}
if(length(x[1, ]) == 6){
out[ ,"meanHeight"] <- aggregate(x[ ,6], by = list(x[ ,filter]), FUN = mean)$x
out[ ,"sdHeight"] <- aggregate(x[ ,6], by = list(x[ ,filter]), FUN = sd)$x
}
return(out)
}
|
\section{Concurrent process calculi and spatial logics }\label{sec:concurrent_process_calculi_and_spatial_logics_} % (fold)
In the last thirty years the process calculi have matured into a
remarkably powerful analytic tool for reasoning about concurrent and
distributed systems. Process-calculus-based algebraical specification of
processes began with Milner's Calculus for Communicating Systems (CCS)
\cite{MilnerCCS80} and Hoare's Communicating Sequential Processes
(CSP) \cite{CSP} \cite{CSP1} \cite{CSP2} \cite{CSP3}, and continue
through the development of the so-called mobile process calculi,
e.g. Milner, Parrow and Walker's $\pi$-calculus \cite{ParrowWalker},
Cardelli and Caires's spatial logic \cite{CairesC04} \cite{CairesC03}
\cite{Caires04}, or Meredith and Radestock's reflective calculi
\cite{MeredithR05} \cite{meredith2005rho}. The process-calculus-based
algebraical specification of processes has expanded its scope of
applicability to include the specification, analysis, simulation and
execution of processes in domains such as:
\begin{itemize}
\item telecommunications, networking, security and application level protocols
\cite{AbadiB02}
\cite{AbadiB03}
\cite{BrownLM05}
\cite{LaneveZ05};
\item programming language semantics and design
\cite{BrownLM05}
\cite{djoin}
\cite{JoCaml}
\cite{WojciechowskiS99};
\item webservices
\cite{BrownLM05}
\cite{LaneveZ05}
\cite{MeredithB03};
\item and biological systems
\cite{Cardelli04}
\cite{DanosL03}
\cite{RegevS03}
\cite{PriamiRSS01}.
\end{itemize}
Among the many reasons for the continued success of this approach are
two central points. First, the process algebras provide a
compositional approach to the specification, analysis and execution of
concurrent and distributed systems. Owing to Milner's original
insights into computation as interaction \cite{Milner93}, the process
calculi are so organized that the behavior ---the semantics--- of a
system may be composed from the behavior of its components
\cite{Fokkink}. This means that specifications can be constructed in
terms of components ---without a global view of the system--- and
assembled into increasingly complete descriptions.
The second central point is that process algebras have a potent proof
principle, yielding a wide range of effective and novel proof
techniques \cite{MilnerS92} \cite{SangiorgiWalker} \cite{Sangiorgi95}
\cite{hop}. In particular, \emph{bisimulation} encapsulates an effective
notion of process equivalence that has been used in applications as
far-ranging as algorithmic games semantics
\cite{Abramsky2005Algorithmic-Gam} and the construction of
model-checkers \cite{Caires04}. The essential notion can be stated in
an intuitively recursive formulation: a \emph{bisimulation} between two
processes $P$ and $Q$ is an equivalence relation $E$ relating $P$
and $Q$ such that: whatever action of $P$ can be observed, taking it
to a new state $P'$, can be observed of $Q$, taking it to a new state
$Q'$, such that $P'$ is related to $Q'$ by $E$ and vice versa. $P$ and
$Q$ are \emph{bisimilar} if there is some bisimulation relating
them. Part of what makes this notion so robust and widely applicable
is that it is parameterized in the actions observable of processes
$P$ and $Q$, thus providing a framework for a broad range of
equivalences and up-to techniques \cite{milner92techniques} all governed by the same core
principle \cite{SangiorgiWalker} \cite{Sangiorgi95} \cite{hop}.
% section concurrent_process_calculi_and_spatial_logics_ (end) |
State Before: M : Type u_1
inst✝¹ : CancelCommMonoidWithZero M
inst✝ : Unique Mˣ
p : M
hp : Prime p
L : List M
hL : ∀ (q : M), q ∈ L → Prime q
hpL : p ∣ prod L
⊢ p ∈ L State After: case intro.intro
M : Type u_1
inst✝¹ : CancelCommMonoidWithZero M
inst✝ : Unique Mˣ
p : M
hp : Prime p
L : List M
hL : ∀ (q : M), q ∈ L → Prime q
hpL : p ∣ prod L
x : M
hx1 : x ∈ L
hx2 : p ∣ x
⊢ p ∈ L Tactic: obtain ⟨x, hx1, hx2⟩ := hp.dvd_prod_iff.mp hpL State Before: case intro.intro
M : Type u_1
inst✝¹ : CancelCommMonoidWithZero M
inst✝ : Unique Mˣ
p : M
hp : Prime p
L : List M
hL : ∀ (q : M), q ∈ L → Prime q
hpL : p ∣ prod L
x : M
hx1 : x ∈ L
hx2 : p ∣ x
⊢ p ∈ L State After: no goals Tactic: rwa [(prime_dvd_prime_iff_eq hp (hL x hx1)).mp hx2] |
A topological space is locally path-connected if and only if for every point $x$ and every open neighborhood $V$ of $x$, there exists an open neighborhood $U$ of $x$ such that for every point $y$ in $U$, there exists a path from $x$ to $y$ whose image is contained in $V$. |
% StackExchange Signal Processing Q81493
% https://dsp.stackexchange.com/questions/81493
% Applying 2D Sinc Interpolation for Upsampling in the Fourier Domain (DFT / FFT)
% References:
% 1.
% Remarks:
% 1. B
% TODO:
% 1. C
% Release Notes
% - 1.0.000 29/12/2021
% * First release.
%% General Parameters
subStreamNumberDefault = 79;
run('InitScript.m');
figureIdx = 0;
figureCounterSpec = '%04d';
generateFigures = ON;
%% Simulation Parameters
numRowsI = 5000;
numColsI = 5200;
numRowsO = 10000;
numColsO = 10400;
sincRadius = 5;
%% Generate / Load Data
mX = GenTest([numRowsI, numColsI], sincRadius);
mYRef = GenTest([numRowsO, numColsO], sincRadius);
mY = DftReSample2D(mX, [numRowsO, numColsO]);
%% Analysis
disp(['The interpolation error is given by: ', num2str(max(abs(mYRef - mY), [], 'all'))]);
%% Auxilizary Function
function [ mX ] = GenTest( vSize, sincRadius )
vX = linspace(-sincRadius, sincRadius, vSize(2) + 1);
vX(end) = [];
vY = linspace(-sincRadius, sincRadius, vSize(1) + 1);
vY = vY(:);
vY(end) = [];
% mX = abs(vX) + abs(vY) + sinc(sqrt(vX .^2 + vY .^2));
mX = sinc(sqrt(vX .^2 + vY .^2));
end
%% Restore Defaults
% set(0, 'DefaultFigureWindowStyle', 'normal');
% set(0, 'DefaultAxesLooseInset', defaultLoosInset);
|
State Before: α : Type u_1
β : Type ?u.953853
γ : Type ?u.953856
δ : Type ?u.953859
m : MeasurableSpace α
μ ν : Measure α
f g : α → ℝ≥0∞
hle : f ≤ᵐ[μ] g
hg : AEMeasurable g
ε : ℝ≥0∞
⊢ (∫⁻ (a : α), f a ∂μ) + ε * ↑↑μ {x | f x + ε ≤ g x} ≤ ∫⁻ (a : α), g a ∂μ State After: case intro.intro.intro
α : Type u_1
β : Type ?u.953853
γ : Type ?u.953856
δ : Type ?u.953859
m : MeasurableSpace α
μ ν : Measure α
f g : α → ℝ≥0∞
hle : f ≤ᵐ[μ] g
hg : AEMeasurable g
ε : ℝ≥0∞
φ : α → ℝ≥0∞
hφm : Measurable φ
hφ_le : φ ≤ f
hφ_eq : (∫⁻ (a : α), f a ∂μ) = ∫⁻ (a : α), φ a ∂μ
⊢ (∫⁻ (a : α), f a ∂μ) + ε * ↑↑μ {x | f x + ε ≤ g x} ≤ ∫⁻ (a : α), g a ∂μ Tactic: rcases exists_measurable_le_lintegral_eq μ f with ⟨φ, hφm, hφ_le, hφ_eq⟩ State Before: case intro.intro.intro
α : Type u_1
β : Type ?u.953853
γ : Type ?u.953856
δ : Type ?u.953859
m : MeasurableSpace α
μ ν : Measure α
f g : α → ℝ≥0∞
hle : f ≤ᵐ[μ] g
hg : AEMeasurable g
ε : ℝ≥0∞
φ : α → ℝ≥0∞
hφm : Measurable φ
hφ_le : φ ≤ f
hφ_eq : (∫⁻ (a : α), f a ∂μ) = ∫⁻ (a : α), φ a ∂μ
⊢ (∫⁻ (a : α), f a ∂μ) + ε * ↑↑μ {x | f x + ε ≤ g x} ≤ ∫⁻ (a : α), g a ∂μ State After: case intro.intro.intro
α : Type u_1
β : Type ?u.953853
γ : Type ?u.953856
δ : Type ?u.953859
m : MeasurableSpace α
μ ν : Measure α
f g : α → ℝ≥0∞
hle : f ≤ᵐ[μ] g
hg : AEMeasurable g
ε : ℝ≥0∞
φ : α → ℝ≥0∞
hφm : Measurable φ
hφ_le : φ ≤ f
hφ_eq : (∫⁻ (a : α), f a ∂μ) = ∫⁻ (a : α), φ a ∂μ
x : α
hx₁ : f x ≤ g x
⊢ φ x + indicator {x | φ x + ε ≤ g x} (fun x => ε) x ≤ g x Tactic: calc
(∫⁻ x, f x ∂μ) + ε * μ { x | f x + ε ≤ g x } = (∫⁻ x, φ x ∂μ) + ε * μ { x | f x + ε ≤ g x } :=
by rw [hφ_eq]
_ ≤ (∫⁻ x, φ x ∂μ) + ε * μ { x | φ x + ε ≤ g x } := by
gcongr
exact measure_mono fun x => (add_le_add_right (hφ_le _) _).trans
_ = ∫⁻ x, φ x + indicator { x | φ x + ε ≤ g x } (fun _ => ε) x ∂μ := by
rw [lintegral_add_left hφm, lintegral_indicator₀, set_lintegral_const]
exact measurableSet_le (hφm.nullMeasurable.measurable'.add_const _) hg.nullMeasurable
_ ≤ ∫⁻ x, g x ∂μ := lintegral_mono_ae (hle.mono fun x hx₁ => ?_) State Before: case intro.intro.intro
α : Type u_1
β : Type ?u.953853
γ : Type ?u.953856
δ : Type ?u.953859
m : MeasurableSpace α
μ ν : Measure α
f g : α → ℝ≥0∞
hle : f ≤ᵐ[μ] g
hg : AEMeasurable g
ε : ℝ≥0∞
φ : α → ℝ≥0∞
hφm : Measurable φ
hφ_le : φ ≤ f
hφ_eq : (∫⁻ (a : α), f a ∂μ) = ∫⁻ (a : α), φ a ∂μ
x : α
hx₁ : f x ≤ g x
⊢ φ x + indicator {x | φ x + ε ≤ g x} (fun x => ε) x ≤ g x State After: case intro.intro.intro
α : Type u_1
β : Type ?u.953853
γ : Type ?u.953856
δ : Type ?u.953859
m : MeasurableSpace α
μ ν : Measure α
f g : α → ℝ≥0∞
hle : f ≤ᵐ[μ] g
hg : AEMeasurable g
ε : ℝ≥0∞
φ : α → ℝ≥0∞
hφm : Measurable φ
hφ_le : φ ≤ f
hφ_eq : (∫⁻ (a : α), f a ∂μ) = ∫⁻ (a : α), φ a ∂μ
x : α
hx₁ : f x ≤ g x
⊢ (φ x + if x ∈ {x | φ x + ε ≤ g x} then ε else 0) ≤ g x Tactic: simp only [indicator_apply] State Before: case intro.intro.intro
α : Type u_1
β : Type ?u.953853
γ : Type ?u.953856
δ : Type ?u.953859
m : MeasurableSpace α
μ ν : Measure α
f g : α → ℝ≥0∞
hle : f ≤ᵐ[μ] g
hg : AEMeasurable g
ε : ℝ≥0∞
φ : α → ℝ≥0∞
hφm : Measurable φ
hφ_le : φ ≤ f
hφ_eq : (∫⁻ (a : α), f a ∂μ) = ∫⁻ (a : α), φ a ∂μ
x : α
hx₁ : f x ≤ g x
⊢ (φ x + if x ∈ {x | φ x + ε ≤ g x} then ε else 0) ≤ g x State After: case intro.intro.intro.inl
α : Type u_1
β : Type ?u.953853
γ : Type ?u.953856
δ : Type ?u.953859
m : MeasurableSpace α
μ ν : Measure α
f g : α → ℝ≥0∞
hle : f ≤ᵐ[μ] g
hg : AEMeasurable g
ε : ℝ≥0∞
φ : α → ℝ≥0∞
hφm : Measurable φ
hφ_le : φ ≤ f
hφ_eq : (∫⁻ (a : α), f a ∂μ) = ∫⁻ (a : α), φ a ∂μ
x : α
hx₁ : f x ≤ g x
hx₂ : x ∈ {x | φ x + ε ≤ g x}
⊢ φ x + ε ≤ g x
case intro.intro.intro.inr
α : Type u_1
β : Type ?u.953853
γ : Type ?u.953856
δ : Type ?u.953859
m : MeasurableSpace α
μ ν : Measure α
f g : α → ℝ≥0∞
hle : f ≤ᵐ[μ] g
hg : AEMeasurable g
ε : ℝ≥0∞
φ : α → ℝ≥0∞
hφm : Measurable φ
hφ_le : φ ≤ f
hφ_eq : (∫⁻ (a : α), f a ∂μ) = ∫⁻ (a : α), φ a ∂μ
x : α
hx₁ : f x ≤ g x
hx₂ : ¬x ∈ {x | φ x + ε ≤ g x}
⊢ φ x + 0 ≤ g x Tactic: split_ifs with hx₂ State Before: case intro.intro.intro.inl
α : Type u_1
β : Type ?u.953853
γ : Type ?u.953856
δ : Type ?u.953859
m : MeasurableSpace α
μ ν : Measure α
f g : α → ℝ≥0∞
hle : f ≤ᵐ[μ] g
hg : AEMeasurable g
ε : ℝ≥0∞
φ : α → ℝ≥0∞
hφm : Measurable φ
hφ_le : φ ≤ f
hφ_eq : (∫⁻ (a : α), f a ∂μ) = ∫⁻ (a : α), φ a ∂μ
x : α
hx₁ : f x ≤ g x
hx₂ : x ∈ {x | φ x + ε ≤ g x}
⊢ φ x + ε ≤ g x
case intro.intro.intro.inr
α : Type u_1
β : Type ?u.953853
γ : Type ?u.953856
δ : Type ?u.953859
m : MeasurableSpace α
μ ν : Measure α
f g : α → ℝ≥0∞
hle : f ≤ᵐ[μ] g
hg : AEMeasurable g
ε : ℝ≥0∞
φ : α → ℝ≥0∞
hφm : Measurable φ
hφ_le : φ ≤ f
hφ_eq : (∫⁻ (a : α), f a ∂μ) = ∫⁻ (a : α), φ a ∂μ
x : α
hx₁ : f x ≤ g x
hx₂ : ¬x ∈ {x | φ x + ε ≤ g x}
⊢ φ x + 0 ≤ g x State After: no goals Tactic: exacts [hx₂, (add_zero _).trans_le <| (hφ_le x).trans hx₁] State Before: α : Type u_1
β : Type ?u.953853
γ : Type ?u.953856
δ : Type ?u.953859
m : MeasurableSpace α
μ ν : Measure α
f g : α → ℝ≥0∞
hle : f ≤ᵐ[μ] g
hg : AEMeasurable g
ε : ℝ≥0∞
φ : α → ℝ≥0∞
hφm : Measurable φ
hφ_le : φ ≤ f
hφ_eq : (∫⁻ (a : α), f a ∂μ) = ∫⁻ (a : α), φ a ∂μ
⊢ (∫⁻ (x : α), f x ∂μ) + ε * ↑↑μ {x | f x + ε ≤ g x} = (∫⁻ (x : α), φ x ∂μ) + ε * ↑↑μ {x | f x + ε ≤ g x} State After: no goals Tactic: rw [hφ_eq] State Before: α : Type u_1
β : Type ?u.953853
γ : Type ?u.953856
δ : Type ?u.953859
m : MeasurableSpace α
μ ν : Measure α
f g : α → ℝ≥0∞
hle : f ≤ᵐ[μ] g
hg : AEMeasurable g
ε : ℝ≥0∞
φ : α → ℝ≥0∞
hφm : Measurable φ
hφ_le : φ ≤ f
hφ_eq : (∫⁻ (a : α), f a ∂μ) = ∫⁻ (a : α), φ a ∂μ
⊢ (∫⁻ (x : α), φ x ∂μ) + ε * ↑↑μ {x | f x + ε ≤ g x} ≤ (∫⁻ (x : α), φ x ∂μ) + ε * ↑↑μ {x | φ x + ε ≤ g x} State After: case bc.bc
α : Type u_1
β : Type ?u.953853
γ : Type ?u.953856
δ : Type ?u.953859
m : MeasurableSpace α
μ ν : Measure α
f g : α → ℝ≥0∞
hle : f ≤ᵐ[μ] g
hg : AEMeasurable g
ε : ℝ≥0∞
φ : α → ℝ≥0∞
hφm : Measurable φ
hφ_le : φ ≤ f
hφ_eq : (∫⁻ (a : α), f a ∂μ) = ∫⁻ (a : α), φ a ∂μ
⊢ ↑↑μ {x | f x + ε ≤ g x} ≤ ↑↑μ {x | φ x + ε ≤ g x} Tactic: gcongr State Before: case bc.bc
α : Type u_1
β : Type ?u.953853
γ : Type ?u.953856
δ : Type ?u.953859
m : MeasurableSpace α
μ ν : Measure α
f g : α → ℝ≥0∞
hle : f ≤ᵐ[μ] g
hg : AEMeasurable g
ε : ℝ≥0∞
φ : α → ℝ≥0∞
hφm : Measurable φ
hφ_le : φ ≤ f
hφ_eq : (∫⁻ (a : α), f a ∂μ) = ∫⁻ (a : α), φ a ∂μ
⊢ ↑↑μ {x | f x + ε ≤ g x} ≤ ↑↑μ {x | φ x + ε ≤ g x} State After: no goals Tactic: exact measure_mono fun x => (add_le_add_right (hφ_le _) _).trans State Before: α : Type u_1
β : Type ?u.953853
γ : Type ?u.953856
δ : Type ?u.953859
m : MeasurableSpace α
μ ν : Measure α
f g : α → ℝ≥0∞
hle : f ≤ᵐ[μ] g
hg : AEMeasurable g
ε : ℝ≥0∞
φ : α → ℝ≥0∞
hφm : Measurable φ
hφ_le : φ ≤ f
hφ_eq : (∫⁻ (a : α), f a ∂μ) = ∫⁻ (a : α), φ a ∂μ
⊢ (∫⁻ (x : α), φ x ∂μ) + ε * ↑↑μ {x | φ x + ε ≤ g x} = ∫⁻ (x : α), φ x + indicator {x | φ x + ε ≤ g x} (fun x => ε) x ∂μ State After: case hs
α : Type u_1
β : Type ?u.953853
γ : Type ?u.953856
δ : Type ?u.953859
m : MeasurableSpace α
μ ν : Measure α
f g : α → ℝ≥0∞
hle : f ≤ᵐ[μ] g
hg : AEMeasurable g
ε : ℝ≥0∞
φ : α → ℝ≥0∞
hφm : Measurable φ
hφ_le : φ ≤ f
hφ_eq : (∫⁻ (a : α), f a ∂μ) = ∫⁻ (a : α), φ a ∂μ
⊢ NullMeasurableSet {x | φ x + ε ≤ g x} Tactic: rw [lintegral_add_left hφm, lintegral_indicator₀, set_lintegral_const] State Before: case hs
α : Type u_1
β : Type ?u.953853
γ : Type ?u.953856
δ : Type ?u.953859
m : MeasurableSpace α
μ ν : Measure α
f g : α → ℝ≥0∞
hle : f ≤ᵐ[μ] g
hg : AEMeasurable g
ε : ℝ≥0∞
φ : α → ℝ≥0∞
hφm : Measurable φ
hφ_le : φ ≤ f
hφ_eq : (∫⁻ (a : α), f a ∂μ) = ∫⁻ (a : α), φ a ∂μ
⊢ NullMeasurableSet {x | φ x + ε ≤ g x} State After: no goals Tactic: exact measurableSet_le (hφm.nullMeasurable.measurable'.add_const _) hg.nullMeasurable |
function [u,v]=pow2cep(m,c,mode)
%CEP2POW convert cepstral means and variances to the power domain
% Inputs:
% m: vector giving means in the power domain
% c: covariance matrix in the power domain
% mode: 'c' pow=exp(irdct(cep)) [default]
% 'f' pow=exp(rsfft(cep)/n) [fft length even]
% 'fo' pow=exp(rsfft(cep)/n) [fft length odd]
% 'i' pow=exp(cep) [ no transformation ]
%
% Outputs:
% u: row vector giving the cepstral means with u(1) the 0'th cepstral coefficient
% v: cepstral covariance matrix
% Copyright (C) Mike Brookes 1998
% Version: $Id: pow2cep.m 713 2011-10-16 14:45:43Z dmb $
%
% VOICEBOX is a MATLAB toolbox for speech processing.
% Home page: http://www.ee.ic.ac.uk/hp/staff/dmb/voicebox/voicebox.html
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% This program is free software; you can redistribute it and/or modify
% it under the terms of the GNU General Public License as published by
% the Free Software Foundation; either version 2 of the License, or
% (at your option) any later version.
%
% This program is distributed in the hope that it will be useful,
% but WITHOUT ANY WARRANTY; without even the implied warranty of
% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
% GNU General Public License for more details.
%
% You can obtain a copy of the GNU General Public License from
% http://www.gnu.org/copyleft/gpl.html or by writing to
% Free Software Foundation, Inc.,675 Mass Ave, Cambridge, MA 02139, USA.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
if nargin<3 mode='c'; end
if min(size(c))==1
v=diag(c);
end
m=m(:)'; % force to be a row vector
q=log(1+c./(m'*m));
p=log(m)-0.5*diag(q)';
if any(mode=='f')
n=2*length(m)-2;
if any(mode=='o')
n=n+1;
end
u=rsfft(p,n);
v=rsfft(rsfft(q,n)',n);
elseif any(mode=='i')
u=p;
v=q;
else
u=rdct(p);
v=rdct(rdct(q)');
end
|
If $f$ is holomorphic on an open set $A$ and $z_0 \in A$, then the residue of $f(z)/(z-z_0)^{n+1}$ at $z_0$ is equal to $f^{(n)}(z_0)/n!$. |
module Starfield
-- Background starfield effect
import Effects
import Effect.SDL
import Effect.StdIO
import Effect.State
import Rnd
%access public export
data Starfield : Type where -- for labelling state
StarEff : Type -> Type
StarEff t = { [Starfield ::: STATE (List (Int, Int)), RND] } Eff t
initStarfield : List (Int, Int) -> Nat -> StarEff ()
initStarfield acc Z = Starfield :- put acc
initStarfield acc n
= do x <- rndInt 0 639
y <- rndInt 0 479
initStarfield ((x, y) :: acc) (minus n 1)
updateStarfield : StarEff ()
updateStarfield = do xs <- Starfield :- get
xs' <- upd [] xs
Starfield :- put xs'
where
upd : List (Int, Int) -> List (Int, Int) -> { [RND] } Eff (List (Int, Int))
upd acc [] = pure acc
upd acc ((x, y) :: xs)
= if (y > 479) then do
x <- rndInt 0 639
upd ((x, 0) :: acc) xs
else
upd ((x, y+1) :: acc) xs
drawStarfield : List (Int, Int) -> { [SDL_ON] } Eff ()
drawStarfield [] = pure ()
drawStarfield ((x, y) :: xs) = do line white x y x y
drawStarfield xs
|
(* Author: Joshua Schneider, ETH Zurich *)
section \<open>Formalisation of idiomatic terms and lifting\<close>
subsection \<open>Immediate joinability under a relation\<close>
theory Joinable
imports Main
begin
subsubsection \<open>Definition and basic properties\<close>
definition joinable :: "('a \<times> 'b) set \<Rightarrow> ('a \<times> 'a) set"
where "joinable R = {(x, y). \<exists>z. (x, z) \<in> R \<and> (y, z) \<in> R}"
lemma joinable_simp: "(x, y) \<in> joinable R \<longleftrightarrow> (\<exists>z. (x, z) \<in> R \<and> (y, z) \<in> R)"
unfolding joinable_def by simp
lemma joinableI: "(x, z) \<in> R \<Longrightarrow> (y, z) \<in> R \<Longrightarrow> (x, y) \<in> joinable R"
unfolding joinable_simp by blast
lemma joinableD: "(x, y) \<in> joinable R \<Longrightarrow> \<exists>z. (x, z) \<in> R \<and> (y, z) \<in> R"
unfolding joinable_simp .
lemma joinableE:
assumes "(x, y) \<in> joinable R"
obtains z where "(x, z) \<in> R" and "(y, z) \<in> R"
using assms unfolding joinable_simp by blast
lemma refl_on_joinable: "refl_on {x. \<exists>y. (x, y) \<in> R} (joinable R)"
by (auto intro!: refl_onI simp only: joinable_simp)
lemma refl_joinable_iff: "(\<forall>x. \<exists>y. (x, y) \<in> R) = refl (joinable R)"
by (auto intro!: refl_onI dest: refl_onD simp add: joinable_simp)
lemma refl_joinable: "refl R \<Longrightarrow> refl (joinable R)"
using refl_joinable_iff by (blast dest: refl_onD)
lemma joinable_refl: "refl R \<Longrightarrow> (x, x) \<in> joinable R"
using refl_joinable by (blast dest: refl_onD)
lemma sym_joinable: "sym (joinable R)"
by (auto intro!: symI simp only: joinable_simp)
lemma joinable_sym: "(x, y) \<in> joinable R \<Longrightarrow> (y, x) \<in> joinable R"
using sym_joinable by (rule symD)
lemma joinable_mono: "R \<subseteq> S \<Longrightarrow> joinable R \<subseteq> joinable S"
by (rule subrelI) (auto simp only: joinable_simp)
lemma refl_le_joinable:
assumes "refl R"
shows "R \<subseteq> joinable R"
proof (rule subrelI)
fix x y
assume "(x, y) \<in> R"
moreover from \<open>refl R\<close> have "(y, y) \<in> R" by (blast dest: refl_onD)
ultimately show "(x, y) \<in> joinable R" by (rule joinableI)
qed
lemma joinable_subst:
assumes R_subst: "\<And>x y. (x, y) \<in> R \<Longrightarrow> (P x, P y) \<in> R"
assumes joinable: "(x, y) \<in> joinable R"
shows "(P x, P y) \<in> joinable R"
proof -
from joinable obtain z where xz: "(x, z) \<in> R" and yz: "(y, z) \<in> R" by (rule joinableE)
from R_subst xz have "(P x, P z) \<in> R" .
moreover from R_subst yz have "(P y, P z) \<in> R" .
ultimately show ?thesis by (rule joinableI)
qed
subsubsection \<open>Confluence\<close>
definition confluent :: "'a rel \<Rightarrow> bool"
where "confluent R \<longleftrightarrow> (\<forall>x y y'. (x, y) \<in> R \<and> (x, y') \<in> R \<longrightarrow> (y, y') \<in> joinable R)"
lemma confluentI:
"(\<And>x y y'. (x, y) \<in> R \<Longrightarrow> (x, y') \<in> R \<Longrightarrow> \<exists>z. (y, z) \<in> R \<and> (y', z) \<in> R) \<Longrightarrow> confluent R"
unfolding confluent_def by (blast intro: joinableI)
lemma confluentD:
"confluent R \<Longrightarrow> (x, y) \<in> R \<Longrightarrow> (x,y') \<in> R \<Longrightarrow> (y, y') \<in> joinable R"
unfolding confluent_def by blast
lemma confluentE:
assumes "confluent R" and "(x, y) \<in> R" and "(x, y') \<in> R"
obtains z where "(y, z) \<in> R" and "(y', z) \<in> R"
using assms unfolding confluent_def by (blast elim: joinableE)
lemma trans_joinable:
assumes "trans R" and "confluent R"
shows "trans (joinable R)"
proof (rule transI)
fix x y z
assume "(x, y) \<in> joinable R"
then obtain u where xu: "(x, u) \<in> R" and yu: "(y, u) \<in> R" by (rule joinableE)
assume "(y, z) \<in> joinable R"
then obtain v where yv: "(y, v) \<in> R" and zv: "(z, v) \<in> R" by (rule joinableE)
from yu yv \<open>confluent R\<close> obtain w where uw: "(u, w) \<in> R" and vw: "(v, w) \<in> R"
by (blast elim: confluentE)
from xu uw \<open>trans R\<close> have "(x, w) \<in> R" by (blast elim: transE)
moreover from zv vw \<open>trans R\<close> have "(z, w) \<in> R" by (blast elim: transE)
ultimately show "(x, z) \<in> joinable R" by (rule joinableI)
qed
subsubsection \<open>Relation to reflexive transitive symmetric closure\<close>
theorem joinable_eq_rtscl:
assumes "confluent (R\<^sup>*)"
shows "joinable (R\<^sup>*) = (R \<union> R\<inverse>)\<^sup>*"
proof
show "joinable (R\<^sup>*) \<subseteq> (R \<union> R\<inverse>)\<^sup>*" using joinable_le_rtscl .
next
show "joinable (R\<^sup>*) \<supseteq> (R \<union> R\<inverse>)\<^sup>*" proof (rule subrelI)
fix x y
assume "(x, y) \<in> (R \<union> R\<inverse>)\<^sup>*"
thus "(x, y) \<in> joinable (R\<^sup>*)" proof (induction set: rtrancl)
case base
show "(x, x) \<in> joinable (R\<^sup>*)" using joinable_refl refl_rtrancl .
next
case (step y z)
have "R \<subseteq> joinable (R\<^sup>*)" using refl_le_joinable refl_rtrancl by fast
with \<open>(y, z) \<in> R \<union> R\<inverse>\<close> have "(y, z) \<in> joinable (R\<^sup>*)" using joinable_sym by fast
with \<open>(x, y) \<in> joinable (R\<^sup>*)\<close> show "(x, z) \<in> joinable (R\<^sup>*)"
using trans_joinable trans_rtrancl \<open>confluent (R\<^sup>*)\<close> by (blast dest: transD)
qed
qed
qed
subsubsection \<open>Predicate version\<close>
definition joinablep :: "('a \<Rightarrow> 'b \<Rightarrow> bool) \<Rightarrow> 'a \<Rightarrow> 'a \<Rightarrow> bool"
where "joinablep P x y \<longleftrightarrow> (\<exists>z. P x z \<and> P y z)"
lemma joinablep_joinable[pred_set_conv]:
"joinablep (\<lambda>x y. (x, y) \<in> R) = (\<lambda>x y. (x, y) \<in> joinable R)"
by (fastforce simp only: joinablep_def joinable_simp)
lemma reflp_joinablep: "reflp P \<Longrightarrow> reflp (joinablep P)"
by (blast intro: reflpI joinable_refl[to_pred] refl_onI[to_pred] dest: reflpD)
lemma joinablep_refl: "reflp P \<Longrightarrow> joinablep P x x"
using reflp_joinablep by (rule reflpD)
lemma reflp_le_joinablep: "reflp P \<Longrightarrow> P \<le> joinablep P"
by (blast intro!: refl_le_joinable[to_pred] refl_onI[to_pred] dest: reflpD)
end
|
/-
Copyright (c) 2020 Damiano Testa. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Damiano Testa
-/
import data.polynomial.degree.trailing_degree
import data.polynomial.erase_lead
import data.polynomial.eval
/-!
# Reverse of a univariate polynomial
The main definition is `reverse`. Applying `reverse` to a polynomial `f : R[X]` produces
the polynomial with a reversed list of coefficients, equivalent to `X^f.nat_degree * f(1/X)`.
The main result is that `reverse (f * g) = reverse f * reverse g`, provided the leading
coefficients of `f` and `g` do not multiply to zero.
-/
namespace polynomial
open polynomial finsupp finset
open_locale classical polynomial
section semiring
variables {R : Type*} [semiring R] {f : R[X]}
/-- If `i ≤ N`, then `rev_at_fun N i` returns `N - i`, otherwise it returns `i`.
This is the map used by the embedding `rev_at`.
-/
def rev_at_fun (N i : ℕ) : ℕ := ite (i ≤ N) (N-i) i
lemma rev_at_fun_invol {N i : ℕ} : rev_at_fun N (rev_at_fun N i) = i :=
begin
unfold rev_at_fun,
split_ifs with h j,
{ exact tsub_tsub_cancel_of_le h, },
{ exfalso,
apply j,
exact nat.sub_le N i, },
{ refl, },
end
lemma rev_at_fun_inj {N : ℕ} : function.injective (rev_at_fun N) :=
begin
intros a b hab,
rw [← @rev_at_fun_invol N a, hab, rev_at_fun_invol],
end
/-- If `i ≤ N`, then `rev_at N i` returns `N - i`, otherwise it returns `i`.
Essentially, this embedding is only used for `i ≤ N`.
The advantage of `rev_at N i` over `N - i` is that `rev_at` is an involution.
-/
def rev_at (N : ℕ) : function.embedding ℕ ℕ :=
{ to_fun := λ i , (ite (i ≤ N) (N-i) i),
inj' := rev_at_fun_inj }
/-- We prefer to use the bundled `rev_at` over unbundled `rev_at_fun`. -/
@[simp] lemma rev_at_fun_eq (N i : ℕ) : rev_at_fun N i = rev_at N i := rfl
@[simp] lemma rev_at_invol {N i : ℕ} : (rev_at N) (rev_at N i) = i :=
rev_at_fun_invol
@[simp] lemma rev_at_le {N i : ℕ} (H : i ≤ N) : rev_at N i = N - i :=
if_pos H
lemma rev_at_add {N O n o : ℕ} (hn : n ≤ N) (ho : o ≤ O) :
rev_at (N + O) (n + o) = rev_at N n + rev_at O o :=
begin
rcases nat.le.dest hn with ⟨n', rfl⟩,
rcases nat.le.dest ho with ⟨o', rfl⟩,
repeat { rw rev_at_le (le_add_right rfl.le) },
rw [add_assoc, add_left_comm n' o, ← add_assoc, rev_at_le (le_add_right rfl.le)],
repeat {rw add_tsub_cancel_left},
end
@[simp] lemma rev_at_zero (N : ℕ) : rev_at N 0 = N :=
by simp [rev_at]
/-- `reflect N f` is the polynomial such that `(reflect N f).coeff i = f.coeff (rev_at N i)`.
In other words, the terms with exponent `[0, ..., N]` now have exponent `[N, ..., 0]`.
In practice, `reflect` is only used when `N` is at least as large as the degree of `f`.
Eventually, it will be used with `N` exactly equal to the degree of `f`. -/
noncomputable def reflect (N : ℕ) : R[X] → R[X]
| ⟨f⟩ := ⟨finsupp.emb_domain (rev_at N) f⟩
lemma reflect_support (N : ℕ) (f : R[X]) :
(reflect N f).support = image (rev_at N) f.support :=
begin
rcases f,
ext1,
rw [reflect, mem_image, support, support, support_emb_domain, mem_map],
end
@[simp] lemma coeff_reflect (N : ℕ) (f : R[X]) (i : ℕ) :
coeff (reflect N f) i = f.coeff (rev_at N i) :=
begin
rcases f,
simp only [reflect, coeff],
calc finsupp.emb_domain (rev_at N) f i
= finsupp.emb_domain (rev_at N) f (rev_at N (rev_at N i)) : by rw rev_at_invol
... = f (rev_at N i) : finsupp.emb_domain_apply _ _ _
end
@[simp] lemma reflect_zero {N : ℕ} : reflect N (0 : R[X]) = 0 := rfl
@[simp] lemma reflect_eq_zero_iff {N : ℕ} {f : R[X]} :
reflect N (f : R[X]) = 0 ↔ f = 0 :=
by { rcases f, simp [reflect] }
@[simp] lemma reflect_add (f g : R[X]) (N : ℕ) :
reflect N (f + g) = reflect N f + reflect N g :=
by { ext, simp only [coeff_add, coeff_reflect], }
@[simp] lemma reflect_C_mul (f : R[X]) (r : R) (N : ℕ) :
reflect N (C r * f) = C r * (reflect N f) :=
by { ext, simp only [coeff_reflect, coeff_C_mul], }
@[simp] lemma reflect_C_mul_X_pow (N n : ℕ) {c : R} :
reflect N (C c * X ^ n) = C c * X ^ (rev_at N n) :=
begin
ext,
rw [reflect_C_mul, coeff_C_mul, coeff_C_mul, coeff_X_pow, coeff_reflect],
split_ifs with h j,
{ rw [h, rev_at_invol, coeff_X_pow_self], },
{ rw [not_mem_support_iff.mp],
intro a,
rw [← one_mul (X ^ n), ← C_1] at a,
apply h,
rw [← (mem_support_C_mul_X_pow a), rev_at_invol], },
end
@[simp] lemma reflect_C (r : R) (N : ℕ) : reflect N (C r) = C r * X ^ N :=
by conv_lhs { rw [← mul_one (C r), ← pow_zero X, reflect_C_mul_X_pow, rev_at_zero] }
@[simp] lemma reflect_monomial (N n : ℕ) : reflect N ((X : R[X]) ^ n) = X ^ (rev_at N n) :=
by rw [← one_mul (X ^ n), ← one_mul (X ^ (rev_at N n)), ← C_1, reflect_C_mul_X_pow]
lemma reflect_mul_induction (cf cg : ℕ) :
∀ N O : ℕ, ∀ f g : R[X],
f.support.card ≤ cf.succ → g.support.card ≤ cg.succ → f.nat_degree ≤ N → g.nat_degree ≤ O →
(reflect (N + O) (f * g)) = (reflect N f) * (reflect O g) :=
begin
induction cf with cf hcf,
--first induction (left): base case
{ induction cg with cg hcg,
-- second induction (right): base case
{ intros N O f g Cf Cg Nf Og,
rw [← C_mul_X_pow_eq_self Cf, ← C_mul_X_pow_eq_self Cg],
simp_rw [mul_assoc, X_pow_mul, mul_assoc, ← pow_add (X : R[X]), reflect_C_mul,
reflect_monomial, add_comm, rev_at_add Nf Og, mul_assoc, X_pow_mul, mul_assoc,
← pow_add (X : R[X]), add_comm], },
-- second induction (right): induction step
{ intros N O f g Cf Cg Nf Og,
by_cases g0 : g = 0,
{ rw [g0, reflect_zero, mul_zero, mul_zero, reflect_zero], },
rw [← erase_lead_add_C_mul_X_pow g, mul_add, reflect_add, reflect_add, mul_add, hcg, hcg];
try { assumption },
{ exact le_add_left card_support_C_mul_X_pow_le_one },
{ exact (le_trans (nat_degree_C_mul_X_pow_le g.leading_coeff g.nat_degree) Og) },
{ exact nat.lt_succ_iff.mp (gt_of_ge_of_gt Cg (erase_lead_support_card_lt g0)) },
{ exact le_trans erase_lead_nat_degree_le_aux Og } } },
--first induction (left): induction step
{ intros N O f g Cf Cg Nf Og,
by_cases f0 : f = 0,
{ rw [f0, reflect_zero, zero_mul, zero_mul, reflect_zero], },
rw [← erase_lead_add_C_mul_X_pow f, add_mul, reflect_add, reflect_add, add_mul, hcf, hcf];
try { assumption },
{ exact le_add_left card_support_C_mul_X_pow_le_one },
{ exact (le_trans (nat_degree_C_mul_X_pow_le f.leading_coeff f.nat_degree) Nf) },
{ exact nat.lt_succ_iff.mp (gt_of_ge_of_gt Cf (erase_lead_support_card_lt f0)) },
{ exact (le_trans erase_lead_nat_degree_le_aux Nf) } }
end
@[simp] theorem reflect_mul
(f g : R[X]) {F G : ℕ} (Ff : f.nat_degree ≤ F) (Gg : g.nat_degree ≤ G) :
reflect (F + G) (f * g) = reflect F f * reflect G g :=
reflect_mul_induction _ _ F G f g f.support.card.le_succ g.support.card.le_succ Ff Gg
section eval₂
variables {S : Type*} [comm_semiring S]
lemma eval₂_reflect_mul_pow (i : R →+* S) (x : S) [invertible x] (N : ℕ) (f : R[X])
(hf : f.nat_degree ≤ N) : eval₂ i (⅟x) (reflect N f) * x ^ N = eval₂ i x f :=
begin
refine induction_with_nat_degree_le (λ f, eval₂ i (⅟x) (reflect N f) * x ^ N = eval₂ i x f)
_ _ _ _ f hf,
{ simp },
{ intros n r hr0 hnN,
simp only [rev_at_le hnN, reflect_C_mul_X_pow, eval₂_X_pow, eval₂_C, eval₂_mul],
conv in (x ^ N) { rw [← nat.sub_add_cancel hnN] },
rw [pow_add, ← mul_assoc, mul_assoc (i r), ← mul_pow, inv_of_mul_self, one_pow, mul_one] },
{ intros,
simp [*, add_mul] }
end
lemma eval₂_reflect_eq_zero_iff (i : R →+* S) (x : S) [invertible x] (N : ℕ) (f : R[X])
(hf : f.nat_degree ≤ N) : eval₂ i (⅟x) (reflect N f) = 0 ↔ eval₂ i x f = 0 :=
begin
conv_rhs { rw [← eval₂_reflect_mul_pow i x N f hf] },
split,
{ intro h, rw [h, zero_mul] },
{ intro h, rw [← mul_one (eval₂ i (⅟x) _), ← one_pow N, ← mul_inv_of_self x,
mul_pow, ← mul_assoc, h, zero_mul] }
end
end eval₂
/-- The reverse of a polynomial f is the polynomial obtained by "reading f backwards".
Even though this is not the actual definition, reverse f = f (1/X) * X ^ f.nat_degree. -/
noncomputable def reverse (f : R[X]) : R[X] := reflect f.nat_degree f
lemma coeff_reverse (f : R[X]) (n : ℕ) :
f.reverse.coeff n = f.coeff (rev_at f.nat_degree n) :=
by rw [reverse, coeff_reflect]
@[simp] lemma coeff_zero_reverse (f : R[X]) : coeff (reverse f) 0 = leading_coeff f :=
by rw [coeff_reverse, rev_at_le (zero_le f.nat_degree), tsub_zero, leading_coeff]
@[simp] lemma reverse_zero : reverse (0 : R[X]) = 0 := rfl
@[simp] lemma reverse_eq_zero : f.reverse = 0 ↔ f = 0 :=
by simp [reverse]
lemma reverse_nat_degree_le (f : R[X]) : f.reverse.nat_degree ≤ f.nat_degree :=
begin
rw [nat_degree_le_iff_degree_le, degree_le_iff_coeff_zero],
intros n hn,
rw with_bot.coe_lt_coe at hn,
rw [coeff_reverse, rev_at, function.embedding.coe_fn_mk,
if_neg (not_le_of_gt hn), coeff_eq_zero_of_nat_degree_lt hn],
end
lemma nat_degree_eq_reverse_nat_degree_add_nat_trailing_degree (f : R[X]) :
f.nat_degree = f.reverse.nat_degree + f.nat_trailing_degree :=
begin
by_cases hf : f = 0,
{ rw [hf, reverse_zero, nat_degree_zero, nat_trailing_degree_zero] },
apply le_antisymm,
{ refine tsub_le_iff_right.mp _,
apply le_nat_degree_of_ne_zero,
rw [reverse, coeff_reflect, ←rev_at_le f.nat_trailing_degree_le_nat_degree, rev_at_invol],
exact trailing_coeff_nonzero_iff_nonzero.mpr hf },
{ rw ← le_tsub_iff_left f.reverse_nat_degree_le,
apply nat_trailing_degree_le_of_ne_zero,
have key := mt leading_coeff_eq_zero.mp (mt reverse_eq_zero.mp hf),
rwa [leading_coeff, coeff_reverse, rev_at_le f.reverse_nat_degree_le] at key },
end
lemma reverse_nat_degree (f : R[X]) :
f.reverse.nat_degree = f.nat_degree - f.nat_trailing_degree :=
by rw [f.nat_degree_eq_reverse_nat_degree_add_nat_trailing_degree, add_tsub_cancel_right]
lemma reverse_leading_coeff (f : R[X]) : f.reverse.leading_coeff = f.trailing_coeff :=
by rw [leading_coeff, reverse_nat_degree, ←rev_at_le f.nat_trailing_degree_le_nat_degree,
coeff_reverse, rev_at_invol, trailing_coeff]
lemma reverse_nat_trailing_degree (f : R[X]) : f.reverse.nat_trailing_degree = 0 :=
begin
by_cases hf : f = 0,
{ rw [hf, reverse_zero, nat_trailing_degree_zero] },
{ rw ← nat.le_zero_iff,
apply nat_trailing_degree_le_of_ne_zero,
rw [coeff_zero_reverse],
exact mt leading_coeff_eq_zero.mp hf },
end
theorem reverse_mul {f g : R[X]} (fg : f.leading_coeff * g.leading_coeff ≠ 0) :
reverse (f * g) = reverse f * reverse g :=
begin
unfold reverse,
rw [nat_degree_mul' fg, reflect_mul f g rfl.le rfl.le],
end
@[simp] lemma reverse_mul_of_domain {R : Type*} [ring R] [is_domain R] (f g : R[X]) :
reverse (f * g) = reverse f * reverse g :=
begin
by_cases f0 : f=0,
{ simp only [f0, zero_mul, reverse_zero], },
by_cases g0 : g=0,
{ rw [g0, mul_zero, reverse_zero, mul_zero], },
simp [reverse_mul, *],
end
lemma trailing_coeff_mul {R : Type*} [ring R] [is_domain R] (p q : R[X]) :
(p * q).trailing_coeff = p.trailing_coeff * q.trailing_coeff :=
by rw [←reverse_leading_coeff, reverse_mul_of_domain, leading_coeff_mul,
reverse_leading_coeff, reverse_leading_coeff]
@[simp] lemma coeff_one_reverse (f : R[X]) : coeff (reverse f) 1 = next_coeff f :=
begin
rw [coeff_reverse, next_coeff],
split_ifs with hf,
{ have : coeff f 1 = 0 := coeff_eq_zero_of_nat_degree_lt (by simp only [hf, zero_lt_one]),
simp [*, rev_at] },
{ rw rev_at_le,
exact nat.succ_le_iff.2 (pos_iff_ne_zero.2 hf) }
end
section eval₂
variables {S : Type*} [comm_semiring S]
lemma eval₂_reverse_mul_pow (i : R →+* S) (x : S) [invertible x] (f : R[X]) :
eval₂ i (⅟x) (reverse f) * x ^ f.nat_degree = eval₂ i x f :=
eval₂_reflect_mul_pow i _ _ f le_rfl
@[simp] lemma eval₂_reverse_eq_zero_iff (i : R →+* S) (x : S) [invertible x] (f : R[X]) :
eval₂ i (⅟x) (reverse f) = 0 ↔ eval₂ i x f = 0 :=
eval₂_reflect_eq_zero_iff i x _ _ le_rfl
end eval₂
end semiring
section ring
variables {R : Type*} [ring R]
@[simp] lemma reflect_neg (f : R[X]) (N : ℕ) :
reflect N (- f) = - reflect N f :=
by rw [neg_eq_neg_one_mul, ←C_1, ←C_neg, reflect_C_mul, C_neg, C_1, ←neg_eq_neg_one_mul]
@[simp] lemma reflect_sub (f g : R[X]) (N : ℕ) :
reflect N (f - g) = reflect N f - reflect N g :=
by rw [sub_eq_add_neg, sub_eq_add_neg, reflect_add, reflect_neg]
@[simp] lemma reverse_neg (f : R[X]) :
reverse (- f) = - reverse f :=
by rw [reverse, reverse, reflect_neg, nat_degree_neg]
end ring
end polynomial
|
import Data.Nat
p1_equals_s : (1+) = S
p1_equals_s = Refl
funext : {p, q : a -> b} -> ((x : a) -> p x = q x) -> p = q
funext _ = believe_me (Z = Z)
onep_equals_s : (+1) = S
onep_equals_s = funext \x => plusCommutative x 1
|
"""Locate maximum value."""
import numpy as np
def get_max_index(a):
"""Return the index of the maximum value in given 1D array."""
return a.argmax()
def test_run():
a = np.array([9, 6, 2, 3, 12, 14, 7, 10], dtype=np.int32) # 32-bit integer array
print "Array:", a
# Find the maximum and its index in array
print "Maximum value:", a.max()
print "Index of max.:", get_max_index(a)
if __name__ == "__main__":
test_run()
|
[STATEMENT]
lemma reachable_from_root: "v \<in> verts T \<Longrightarrow> root \<rightarrow>\<^sup>*\<^bsub>T\<^esub> v"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. v \<in> verts T \<Longrightarrow> root \<rightarrow>\<^sup>*\<^bsub>T\<^esub> v
[PROOF STEP]
using unique_awalk reachable_awalkI
[PROOF STATE]
proof (prove)
using this:
?v \<in> verts T \<Longrightarrow> \<exists>!p. awalk root p ?v
awalk ?u ?p ?v \<Longrightarrow> ?u \<rightarrow>\<^sup>*\<^bsub>T\<^esub> ?v
goal (1 subgoal):
1. v \<in> verts T \<Longrightarrow> root \<rightarrow>\<^sup>*\<^bsub>T\<^esub> v
[PROOF STEP]
by blast |
function f_x = ParFor2(in1)
%PARFOR2
% F_X = PARFOR2(IN1)
% This function was generated by the Symbolic Math Toolbox version 8.2.
% 20-Sep-2019 09:35:34
u = in1(:,1);
ux = in1(:,4);
uxx = in1(:,5);
uxxx = in1(:,6);
f_x = (u.*1.034234549167718e16+ux.*6.60584588804096e15+uxx.*1.215161942676603e17-uxxx.*4.609471812108288e15-u.*ux.*5.122148099342336e15+u.*uxx.*5.033451929390285e16+u.*uxxx.*5.000975400599552e15-2.603066969792512e15)./(u.*3.126164642771763e16+4.756339916996608e16);
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.