text
stringlengths 0
3.34M
|
---|
= = = Grammar guides = = =
|
SUBROUTINE OUTPUT( STEP, CONC )
C*************************************************************************
C
C FUNCTION:
C
C PRECONDITIONS: None
C
C KEY SUBROUTINES/FUNCTIONS CALLED: None
C
C REVISION HISTORY:
C Created by Jerry Gipson, August, 2000
C Modified by C. Hogrefe and C. Nolte to use M3UTILIO and
C Fortran intrinsic TRIM(), June 2017
C
C*************************************************************************
USE M3UTILIO
USE M3FILES
USE ENV_VARS
USE GRID_DATA
USE TIME_STEP
IMPLICIT NONE
C..ARGUMENTS:
INTEGER STEP ! Step number
REAL CONC( M3GRID % NCOLS, M3GRID % NROWS, M3GRID % NLAYS, NVAR )
C..PARAMETERS: NONE
C..SAVED LOCAL VARIABLES:
INTEGER IOUT ! Output file unit number
SAVE IOUT
LOGICAL LFIRST ! Flag for first call
SAVE LFIRST
INTEGER, SAVE :: NCELLS ! No. of cells
CHARACTER*11, ALLOCATABLE, SAVE :: DATEOUT( : )
CHARACTER*10, ALLOCATABLE, SAVE :: TIMEOUT( : )
C..SCRATCH LOCAL VARIABLES:
CHARACTER*24 CRDATE ! Create date
CHARACTER*16 PNAME ! Program Name
CHARACTER*80 MSG ! Error message
CHARACTER*256 RET_VAL ! Returned value of environment variable
INTEGER IND ! Index
INTEGER JDATE ! Create date YYYYDDD
INTEGER JTIME ! Create time HHMMSS
INTEGER C, R, L, V ! Loop indices
INTEGER NFL
REAL MAXPEAK
c CHARACTER*11, ALLOCATABLE :: DATEOUT( : )
c CHARACTER*10, ALLOCATABLE :: TIMEOUT( : )
C**********************************************************************
DATA PNAME / 'OUTPUT' /
DATA LFIRST / .TRUE. /
cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc
c Write header info on first call
cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc
IF( LFIRST ) THEN
IOUT = JUNIT()
OPEN( UNIT = IOUT, FILE = OUT_FNAME )
CALL GETDTTIME( JDATE, JTIME )
CALL NEXTIME( JDATE, JTIME, -050000 )
CRDATE = DT2STR( JDATE, JTIME )
DO NFL = 1, N_M3FILES
CALL NAMEVAL( M3_FLNAME( NFL ), RET_VAL )
! WRITE( IOUT, 92000) NFL, RET_VAL ( 1 : TRIMLEN( RET_VAL ) )
WRITE( IOUT, 92000) NFL, TRIM( RET_VAL )
ENDDO
WRITE( IOUT, 92060) '1'
! WRITE( IOUT, 92080) TIME_CONV ( 1 : TRIMLEN( TIME_CONV) )
WRITE( IOUT, 92080) TRIM( TIME_CONV )
WRITE( IOUT, 92120) CRDATE( 1 : 8 ), CRDATE( 9 : 24 )
IF( OUTFORMAT .EQ. 'SAS' ) THEN
WRITE( IOUT, 92140) ( VNAME( V )( 1 : 12) , V = 1, NVAR )
ELSE
WRITE( IOUT, 92160) ( VNAME( V )( 1 : 12) , V = 1, NVAR )
ENDIF
ALLOCATE ( DATEOUT( NFLSTEPS ) )
ALLOCATE ( TIMEOUT( NFLSTEPS ) )
CALL GET_TIMOUT( DATEOUT, TIMEOUT )
LFIRST = .FALSE.
ENDIF
DO L = LOLEV, HILEV
DO C = LOCOL, HICOL
DO R = LOROW, HIROW
IF( OUTFORMAT .EQ. 'SAS' ) THEN
WRITE( IOUT, 92200 ) DATEOUT( STEP ), TIMEOUT( STEP ),
& C, R, L,
& ( CONC( C, R, L, V ), V = 1, NVAR )
ELSE
WRITE( IOUT, 92300 ) DATEOUT( STEP )( 1 : 7 ),
& TIMEOUT( STEP )( 1 : 6 ),
& C, R, L, ( CONC( C, R, L, V ), V = 1, NVAR )
ENDIF
ENDDO
ENDDO
ENDDO
RETURN
92000 FORMAT( '# INPUT M3 FILE ', I2, ': ', A )
92060 FORMAT( '# AVERAGING TIME: ', A2 )
92080 FORMAT( '# OUTPUT TIME CONVENTION: ', A )
92120 FORMAT( '# CREATION DATE: ', A , ' EST', A )
92140 FORMAT( ' DATE TIME COL ROW LV', 3X,120( A12) )
92160 FORMAT( ' DATE TIME COL ROW LV', 3X,120( A14) )
92200 FORMAT( A11, 1X, A10, 1X, I3, 1X, I3, 1X, I2, 1X, 120( 1PE14.4 ) )
92300 FORMAT( A7, 1X, A6, 1X, I3, 1X, I3, 1X, I2, 1X, 120( 1PE14.4 ) )
END
|
// Do NOT add anything to this file
// This header from boost takes ages to compile, so we make sure it is compiled
// only once (here)
#define BOOST_TEST_DYN_LINK
#define BOOST_TEST_MAIN
#include <boost/test/unit_test.hpp>
|
\section{Script Constructors and Evaluation}
\label{sec:mps-lang}
\begin{figure}[htb]
\begin{align*}
\fun{validateScript} & \in\Script\to\Tx\to\Bool \\
\fun{validateScript} & ~s~\var{tx}~=
\begin{cases}
\fun{evalMultiSigScript}~s~vhks & \text{if}~s \in\ScriptNI \\
\fun{evalFPS}~s~\var{vhks} ~txb & \text{if}~s \in\ScriptMPS \\
\mathsf{False} & \text{otherwise}
\end{cases} \\
& ~~~~\where \var{vhks}\leteq \{\fun{hashKey}~vk \vert
vk \in \dom(\fun{txwitsVKey}~\var{tx})\}
\end{align*}
\caption{Script Validation}
\label{fig:functions-validate}
\end{figure}
We have updated the
$\fun{validateScripts}$ function, to allow for the validation of both the
multi-signature scripts and minting policy scripts, calling the appropriate
evaluator for each type of script, see Figure~\ref{fig:functions-validate}.
\begin{note}
This appendix will change if we change anything about the language, it will go
into the main part once it's more solidified
\end{note}
The arguments that are passed to the $\fun{validateScript}$ function include all those
that are needed for FPS and MSig script evaluation :
\begin{itemize}
\item The script getting evaluated
\item The set of key hashes (needed to use MSig scripts as FPS scripts)
\item The transaction body
\end{itemize}
Because of the extra arguments
(the $\TxBody$), we must also modify the call to this function
within the UTXOW rule, passing it the body of the signal transaction.
The semantics of the FPS language are specified in Figure~\ref{fig:defs:tx-mc-eval}.
\begin{figure*}[htb]
\emph{FPS Script Constructor Types}
\begin{align*}
& \type{evalFPS} & \in\Script\to \powerset{\KeyHash}\to\TxBody\to\Bool & \\
& \text{The type of the FPS script evaluator} \\~\\
%
& \type{JustMSig} & \in \ScriptMSig \to \ScriptMPS & \\
%
& \type{Interval} & \in (\Slot^? \times \Slot^?) \to \ScriptMPS &\\
%
& \type{AND} & \in \ScriptMPS \to \ScriptMPS \to \ScriptMPS & \\
%
& \type{OR} & \in \ScriptMPS \to \ScriptMPS \to \ScriptMPS & \\
\end{align*}
%
\emph{FPS Script Evaluation}
\begin{align*}
& \fun{evalFPS} ~(\type{JustMSig}~s)~\var{vhks}~\var{txb} \\
&~~~~ =~ \fun{evalMultiSigScript}~s~\var{vhks} \\
& \text {checks the msig script}\\~\\
%
& \fun{evalFPS} ~(\type{Interval}~\var{(i_s, i_f)})~\var{vhks}~\var{txb} \\
&~~~~ =~
\begin{cases}
\True & (i_s = \Nothing)~\wedge~(i_f = \Nothing) \\
(i_f' \neq \Nothing) \wedge (i_f'~\leq~i_f) & (i_s = \Nothing)~\wedge~(i_f \neq \Nothing) \\
(i_s' \neq \Nothing) \wedge (i_s~\leq~i_s') & (i_s \neq \Nothing)~\wedge~(i_f = \Nothing)\\
(i_s~\leq~i_s') \wedge (i_f'~\leq~i_f) & (i_s \neq \Nothing)~\wedge~(i_f \neq \Nothing)
\end{cases} \\
&~~~~~~~ \where \\
&~~~~~~~~~~~ (i_s', i_f') = \fun{txvld}~{txb} \\
& \text {checks that the tx validity interval is within the interval specified by the script}\\~\\
%
& \fun{evalFPS} ~(\type{AND}~s1~s2)~\var{vhks}~\var{txb}\\
&~~~~ =~ (\fun{evalFPS}~s1~\var{vhks}~\var{txb}) \wedge (\fun{evalFPS}~s2~\var{vhks}~\var{txb}) \\
& \text {checks that both scripts validate on the given inputs} \\~\\
%
& \fun{evalFPS} ~(\type{OR}~s1~s2)~\var{vhks}~\var{txb}\\
&~~~~ =~ (\fun{evalFPS}~s1~\var{vhks}~\var{txb}) \vee (\fun{evalFPS}~s2~\var{vhks}~\var{txb}) \\
& \text{checks that one of the scripts validates on the given inputs}
\end{align*}
\caption{FPS Script Constructor Types and Evaluation}
\label{fig:defs:tx-mc-eval}
\end{figure*}
% The Figures~\ref{fig:defs:tx-mc-eval},~\ref{fig:defs:tx-mc-eval-2},
% and~\ref{fig:whitelist-example} give
% possible constructors of the FPS language.
%
% %% \begin{note}
% %% sort out the constructors
% %% \end{note}
%
% \begin{figure*}[htb]
% \begin{align*}
% & \fun{evalFPS} \in\ScriptMPS\to\PolicyID\to\Slot\to\powerset\KeyHash \\
% &~~~~\to\TxBody\to\UTxO \to\Bool \\
% & \text{UTxO is only for the outputs THIS tx is spending, not global UTxO, i.e.} \\
% & \text{when called,}~\var{spentouts}~=~(\fun{txins}~\var{txb}) ~\restrictdom~\var{utxo} \\~\\
% %
% & \fun{evalFPS} ~(\type{JustMSig}~s)~\var{pid}~\var{slot}~\var{vhks}
% ~\var{txb}~\var{spentouts} \\
% &~~~~ =~ \fun{evalMultiSigScript}~s~\var{vhks} \\
% & \text {checks the msig script}\\~\\
% %
% & \fun{evalFPS}
% ~\type{DoMint}~\var{pid}~ \var{slot}~\var{vhks} ~\var{txb}~\var{spentouts} \\
% &~~~~ =~ \var{pid} \notin \dom~(\fun{mint}~\var{txb}) \\
% & \text {checks that script hash of this script is not an asset ID being minted by tx} \\~\\
% %
% & \fun{evalFPS}
% ~\type{SignedByPIDToken}~\var{pid}~ \var{slot}~\var{vhks} ~\var{txb}~\var{spentouts} \\
% &~~~~ =~ \exists~t\mapsto ~\_~\in~ \fun{range}~(\var{pid}~ \restrictdom~(\fun{ubalance}~\var{spentouts})) ~:~ t~\in~\var{vhks} \\
% & \text{checks that tx is signed by a key whose hash is the name of a token in this asset}
% \\~\\
% & \fun{evalFPS}
% ~(\type{SpendsCur}~\var{pid'})~\var{pid}~ \var{slot}~\var{vhks} ~\var{txb}~\var{spentouts} \\
% &~~~~ =~ (\var{pid'}~\neq~\Nothing ~\wedge ~\var{pid'}~\in~ \dom~(\fun{ubalance}~\var{spentouts}))\\
% &~~~~~~ \vee (\var{pid'}~=~\Nothing ~\wedge ~\var{pid}~\in~ \dom~(\fun{ubalance}~\var{spentouts})) \\
% & \text{checks that this transaction spends asset pid' OR itself if}~\var{pid'}~=~\Nothing
% \\~\\
% &\fun{evalFPS}~(\type{Not}~s)~\var{pid}~\var{slot}~\var{vhks}
% ~\var{txb}~\var{spentouts}
% \\
% &~~~~ = \neg ~\fun{evalFPS}~s~\var{pid}~\var{slot}~\var{vhks}
% ~\var{txb}~\var{spentouts}\\~\\
% %
% &\fun{evalFPS}~(\type{RequireAll}~ls)~\var{pid}~\var{slot}~\var{vhks}
% ~\var{txb}~\var{spentouts}
% \\
% &~~~~ = \forall ~s'~ \in~ ls~:~\fun{evalFPS}~s'~\var{pid}~\var{slot}~\var{vhks}
% ~\var{txb}~\var{spentouts}\\~\\
% %
% &\fun{evalFPS}~(\type{RequireOr}~ls)~\var{pid}~\var{slot}~\var{vhks}
% ~\var{txb}~\var{spentouts}
% \\
% &~~~~ = \exists ~s'~ \in~ ls~:~\fun{evalFPS}~s'~\var{pid}~\var{slot}~\var{vhks}
% ~\var{txb}~\var{spentouts}\\
% \end{align*}
% \caption{Multi-asset Script Evaluation}
% \label{fig:defs:tx-mc-eval}
% \end{figure*}
%
% \begin{figure*}[htb]
% \begin{align*}
% & \fun{evalFPS}
% ~(\type{AssetToAddress}~\var{pid'}~\var{addr})~\var{pid}~ \var{slot}~\var{vhks} ~\var{txb}~\var{spentouts} \\
% &~~~~ =~ \forall~(a, v)~\in~\fun{range}~(\fun{outs}~txb),~\\
% &~~~~~~ \var{c}~\in~\dom~v~\Rightarrow~(a~=~ \var{a'} ~\wedge~
% v~=~\var{c}~ \restrictdom~(\fun{ubalance}~(\fun{outs}~txb)) \\
% & \where \\
% & ~~~~~~~ \var{a'}~=~\fun{if}~ \var{addr}~\neq~\Nothing~\fun{then}~\var{addr}~\fun{else}~\var{(pid',pid')} \\
% & ~~~~~~~ \var{c}~=~\fun{if}~ \var{pid'}~\neq~\Nothing~\fun{then}~\var{pid'}~\fun{else}~\var{pid} \\
% & \text{checks that tx outputs any pid tokens by themselves to the specified address} \\
% & \text {the script address of the given asset when addr unspecified} \\~\\
% & \fun{evalFPS}
% ~(\type{TrancheTokens}~\var{tts}~\var{txin})~\var{pid}~\var{slot}~\var{vhks}
% ~\var{txb}~\var{spentouts} \\
% &~~~~ =~(\var{pid}\mapsto\var{tts}~\in~\var{val})~ \wedge~(\var{txin}~\in~\fun{txins}~{txb}) \\
% & \text{tranche tokens is incomplete} \\~\\
% %
% & \fun{evalFPS}
% ~(\type{FreshTokens})~\var{pid}~\var{slot}~\var{vhks}
% ~\var{txb}~\var{spentouts}
% \\
% &~~~~ =~\forall~\var{pid}~ \mapsto ~tkns ~\in~ \var{val}~:~ \\
% &~~~~ \forall~t~\in~\var{tkns},~
% \fun{nameToken}~(\fun{indexof}~\var{t}~\var{tkns},~\fun{txins}~{txb})~=~t
% \end{align*}
% \caption{Multi-asset Script Evaluation, cont.}
% \label{fig:defs:tx-mc-eval-2}
% \end{figure*}
%
% \begin{figure*}[htb]
% \begin{align*}
% & \fun{whitelist} \in\ScriptMSig\to\Script \\~\\
% %
% & \type{whitelist} ~\var{msig}~ =~ \type{RequireOr}~
% (\type{RequireAll}~(\type{DoMint};~\type{JustMSig}~\var{msig});~\\
% &~~~~~~ \type{RequireAll}~(\type{AssetToAddress}~\Nothing~\Nothing ;\\
% &~~~~~~ (\type{Not}~\type{DoMint});~\type{SignedByPIDToken})) \\
% %
% & \text{msig is some MSig script containing signatures of some accreditation authority} \\
% & \text{i.e. this authority can do any minting or spending of this token} \\~\\
% %
% & (\fun{hashScript}~(\type{SpendsCur}~(\fun{hashScript}~(\type{whitelist}~\var{msig}))),~ \var{tkns}) \\
% & \text{an example of an output spending which requires to be on a whitelist made by msig authority}
% \end{align*}
% \caption{Whitelist Script Example}
% \label{fig:whitelist-example}
% \end{figure*}
|
abstract type Hensel end
mutable struct HenselCtxQadic <: Hensel
f::PolyElem{qadic}
lf::Array{PolyElem{qadic}, 1}
la::Array{PolyElem{qadic}, 1}
p::qadic
n::Int
#TODO: lift over subfields first iff poly is defined over subfield
#TODO: use flint if qadic = padic!!
function HenselCtxQadic(f::PolyElem{qadic}, lfp::Array{fq_nmod_poly, 1})
@assert sum(map(degree, lfp)) == degree(f)
Q = base_ring(f)
Qx = parent(f)
K, mK = ResidueField(Q)
i = 1
la = Array{PolyElem{qadic}, 1}()
n = length(lfp)
while i < length(lfp)
f1 = lfp[i]
f2 = lfp[i+1]
g, a, b = gcdx(f1, f2)
@assert isone(g)
push!(la, setprecision(map_coefficients(x->preimage(mK, x), a, cached = false, parent = Qx), 1))
push!(la, setprecision(map_coefficients(x->preimage(mK, x), b, cached = false, parent = Qx), 1))
push!(lfp, f1*f2)
i += 2
end
return new(f, map(x->setprecision(map_coefficients(y->preimage(mK, y), x, cached = false, parent = Qx), 1), lfp), la, uniformizer(Q), n)
end
function HenselCtxQadic(f::PolyElem{qadic})
Q = base_ring(f)
K, mK = ResidueField(Q)
fp = map_coefficients(mK, f, cached = false)
lfp = collect(keys(factor(fp).fac))
return HenselCtxQadic(f, lfp)
end
end
function Base.show(io::IO, C::HenselCtxQadic)
println(io, "Lifting tree for $(C.f), with $(C.n) factors, currently up precision $(valuation(C.p))")
end
function lift(C::HenselCtxQadic, mx::Int = minimum(precision, coefficients(C.f)))
p = C.p
Q = parent(p)
N = valuation(p)
# @show map(precision, coefficients(C.f)), N, precision(parent(p))
#have: N need mx
if length(C.lf) == 1
C.lf[1] = C.f
return
end
ch = [mx]
while ch[end] > N
push!(ch, div(ch[end]+1, 2))
end
@vprint :PolyFactor 1 "using lifting chain $ch\n"
for k=length(ch)-1:-1:1
N2 = ch[k]
setprecision!(Q, N2+1)
p = Q(prime(Q))^ch[k+1]
i = length(C.lf)
j = i-1
p = setprecision(p, N2)
while j > 0
if i==length(C.lf)
f = setprecision(C.f, N2)
else
f = setprecision(C.lf[i], N2)
end
#formulae and names from the Flint doc
h = C.lf[j]
g = C.lf[j-1]
b = C.la[j]
a = C.la[j-1]
setprecision!(h, N2)
setprecision!(g, N2)
setprecision!(a, N2)
setprecision!(b, N2)
fgh = (f-g*h)*inv(p)
G = rem(fgh*b, g)*p+g
H = rem(fgh*a, h)*p+h
t = (1-a*G-b*H)*inv(p)
B = rem(t*b, g)*p+b
A = rem(t*a, h)*p+a
if i < length(C.lf)
C.lf[i] = G*H
end
C.lf[j-1] = G
C.lf[j] = H
C.la[j-1] = A
C.la[j] = B
i -= 1
j -= 2
end
end
C.p = Q(prime(Q))^ch[1]
end
function factor(C::HenselCtxQadic)
return C.lf[1:C.n]
end
function precision(C::HenselCtxQadic)
return valuation(C.p)
end
# interface to use Bill's Z/p^k lifting code. same algo as above, but
# tighter implementation
mutable struct HenselCtxPadic <: Hensel
X::HenselCtx
f::PolyElem{padic}
function HenselCtxPadic(f::PolyElem{padic})
r = new()
r.f = f
Zx = PolynomialRing(FlintZZ, cached = false)[1]
ff = Zx()
for i=0:degree(f)
setcoeff!(ff, i, lift(coeff(f, i)))
end
r.X = HenselCtx(ff, prime(base_ring(f)))
start_lift(r.X, 1)
return r
end
end
function lift(C::HenselCtxPadic, mx::Int)
for i=0:degree(C.f)
setcoeff!(C.X.f, i, lift(coeff(C.f, i)))
end
continue_lift(C.X, mx)
end
function factor(C::HenselCtxPadic)
res = typeof(C.f)[]
Zx = PolynomialRing(FlintZZ, cached = false)[1]
h = Zx()
Qp = base_ring(C.f)
for i = 1:C.X.LF._num #from factor_to_dict
#cannot use factor_to_dict as the order will be random (hashing!)
g = parent(C.f)()
ccall((:fmpz_poly_set, libflint), Nothing, (Ref{fmpz_poly}, Ref{fmpz_poly_raw}), h, C.X.LF.poly+(i-1)*sizeof(fmpz_poly_raw))
for j=0:degree(h)
setcoeff!(g, j, Qp(coeff(h, j)))
end
push!(res, g)
end
return res
end
function precision(C::HenselCtxPadic)
return Int(C.X.N)
end
function precision(H::HenselCtx)
return Int(H.N)
end
function prime(H::HenselCtx)
return Int(H.p)
end
function div_preinv(a::fmpz, b::fmpz, bi::fmpz_preinvn_struct)
q = fmpz()
r = fmpz()
fdiv_qr_with_preinvn!(q, r, a, b, bi)
return q
end
@doc Markdown.doc"""
round(::fmpz, a::fmpz, b::fmpz, bi::fmpz) -> fmpz
Computes `round(a//b)` using the pre-inverse of `2b`.
"""
function Base.round(::Type{fmpz}, a::fmpz, b::fmpz, bi::fmpz_preinvn_struct)
s = sign(a)
as = abs(a)
r = s*div_preinv(2*as+b, 2*b, bi)
@hassert :PolyFactor 1 abs(r - a//b) <= 1//2
# @assert r == round(fmpz, a//b)
return r
end
@doc Markdown.doc"""
round(::fmpz, a::fmpz, b::fmpz) -> fmpz
Computes `round(a//b)`.
"""
function Base.round(::Type{fmpz}, a::fmpz, b::fmpz)
s = sign(a)*sign(b)
bs = abs(b)
as = abs(a)
r = s*div(2*as+bs, 2*bs)
# @assert r == round(fmpz, a//b)
return r
end
#TODO: think about computing pM[1][1,:]//pM[2] as a "float" approximation
# to save on multiplications
function reco(a::fmpz, M, pM::Tuple{fmpz_mat, fmpz, fmpz_preinvn_struct}, O)
m = map(x -> round(fmpz, a*x, pM[2], pM[3]), pM[1][1, :])*M
return a - O(m)
end
function reco(a::fmpz, M, pM::Tuple{fmpz_mat, fmpz}, O)
m = map(x -> round(fmpz, a*x, pM[2]), pM[1][1, :])*M
return a - O(m)
end
function reco(a::NfAbsOrdElem, M, pM)
m = matrix(FlintZZ, 1, degree(parent(a)), coordinates(a))
m = m - map(x -> round(fmpz, x, pM[2]), m*pM[1])*M
return parent(a)(m)
end
function isprime_nice(O::NfOrd, p::Int)
f = isprime_nice(nf(O), p)
f || return f
if discriminant(O) %p == 0
return false
end
return true
end
function isprime_nice(K::AnticNumberField, p::Int)
d = lcm(map(denominator, coefficients(K.pol)))
if d % p == 0
return false
end
F = GF(p)
f = map_coefficients(F, d*K.pol)
if degree(f) < degree(K)
return false
end
if iszero(discriminant(f))
return false
end
return true
end
@doc Markdown.doc"""
factor_new(f::PolyElem{nf_elem}) -> Array{PolyElem{nf_elem}, 1}
Direct factorisation over a number field, using either Zassenhaus' approach
with the potentially exponential recombination or a van Hoeij like approach using LLL.
The decision is based on the number of local factors.
"""
function factor_new(f::PolyElem{nf_elem})
k = base_ring(f)
local zk::NfOrd
if ismaximal_order_known(k)
zk = maximal_order(k)
if isdefined(zk, :lllO)
zk = zk.lllO::NfOrd
end
else
zk = any_order(k)
end
zk = lll(zk) # always a good option!
p = degree(f)
f *= lcm(map(denominator, coefficients(f)))
np = 0
bp = 1*zk
br = 0
s = Set{Int}()
while true
@vprint :PolyFactor 3 "Trying with $p\n "
p = next_prime(p)
if !isprime_nice(zk, p)
continue
end
P = prime_decomposition(zk, p, 1)
if length(P) == 0
continue
end
F, mF1 = ResidueFieldSmallDegree1(zk::NfOrd, P[1][1])
mF = extend(mF1, k)
fp = map_coefficients(mF, f, cached = false)
if degree(fp) < degree(f) || iszero(constant_coefficient(fp)) || iszero(constant_coefficient(fp))
continue
end
if !issquarefree(fp)
continue
end
lf = factor_shape(fp)
ns = degree_set(lf)
if length(s) == 0
s = ns
else
s = Base.intersect(s, ns)
end
@vprint :PolyFactor :3 "$s\n"
if length(s) == 1
return typeof(f)[f]
end
if br == 0 || br > sum(values(lf))
br = sum(values(lf))
bp = P[1][1]
end
np += 1
if np > 2 && br > 10
break
end
if np > min(100, 2*degree(f))
break
end
end
@vprint :PolyFactor 1 "possible degrees: $s\n"
if br < 5
return zassenhaus(f, bp, degset = s)
else
return van_hoeij(f, bp)
end
end
function degree_set(fa::Dict{Int, Int})
T = Vector{Int}(undef, sum(values(fa)))
ind = 0
for (k, v) in fa
for j = 1:v
T[j+ind] = k
end
ind += v
end
M = MSet(T)
return Set(sum(s) for s = subsets(M) if length(s) > 0)
end
@doc Markdown.doc"""
zassenhaus(f::PolyElem{nf_elem}, P::NfOrdIdl; degset::Set{Int} = Set{Int}(collect(1:degree(f)))) -> Array{PolyElem{nf_elem}, 1}
Zassenhaus' factoring algorithm over an absolute simple field. Given a prime ideal $P$ which
has to be an unramified non-index divisor, a factorisation of $f$ in the $P$-adic completion
is computed. In the last step, all combinations of the local factors are tried to find the
correct factorisation.
$f$ needs to be square-free and square-free modulo $P$ as well.
"""
function zassenhaus(f::PolyElem{nf_elem}, P::NfOrdIdl; degset::Set{Int} = Set{Int}(collect(1:degree(f))))
@vprint :PolyFactor 1 "Using (relative) Zassenhaus\n"
K = base_ring(parent(f))
C, mC = completion(K, P)
b = landau_mignotte_bound(f)*upper_bound(sqrt(t2(leading_coefficient(f))), fmpz)
den = K(1)
if !ismaximal_known_and_maximal(order(P))
if !isdefining_polynomial_nice(K)
den = K(discriminant(order(P))*det(basis_matrix(order(P), copy = false)))
else
den = derivative(K.pol)(gen(K))
end
b *= upper_bound(sqrt(t2(den)), fmpz)
end
c1, c2 = norm_change_const(order(P))
N = ceil(Int, degree(K)/2/log(norm(P))*(log2(c1*c2) + 2*nbits(b)))
@vprint :PolyFactor 1 "using a precision of $N\n"
setprecision!(C, N)
vH = vanHoeijCtx()
if degree(P) == 1
vH.H = HenselCtxPadic(map_coefficients(x->coeff(mC(x), 0), f, cached = false))
else
vH.H = HenselCtxQadic(map_coefficients(mC, f, cached = false))
end
vH.C = C
vH.P = P
@vtime :PolyFactor 1 grow_prec!(vH, N)
av_bits = sum(nbits, vH.Ml)/degree(K)^2
H = vH.H
M = vH.Ml
pM = vH.pMr
lf = factor(H)
zk = order(P)
if degree(P) == 1
S = Set(map(x -> map_coefficients(y -> lift(y), x, parent = parent(f)), lf))
else
S = Set(map(x -> map_coefficients(y -> preimage(mC, y), x, parent = parent(f)), lf))
end
#TODO: test reco result for being small, do early abort
#TODO: test selected coefficients first without computing the product
#TODO: once a factor is found (need to enumerate by size!!!), remove stuff...
# : if f is the norm of a poly over a larger field, then every
# combination has to respect he prime splitting in the extension
# the norm(poly) is the prod of the local norm(poly)s
#TODO: add/use degree sets and search restrictions. Users might want restricted degrees
#TODO: add a call to jump from van Hoeij to Zassenhaus once a partitioning
# is there.
used = empty(S)
res = typeof(f)[]
for d = 1:length(S)
for s = subsets(S, d)
if length(Base.intersect(used, s)) > 0
# println("re-using data")
continue
end
#TODO: test constant term first, possibly also trace + size
g = prod(s)
g = map_coefficients(x -> K(reco(zk(leading_coefficient(f)*x*den), M, pM)), g, parent = parent(f))*(1//leading_coefficient(f)//den)
if iszero(rem(f, g))
push!(res, g)
used = union(used, s)
if length(used) == length(S)
return res
end
else
# println("reco failed")
end
end
end
error("no factor found - should not happen")
return res
end
###############################################
Base.log2(a::fmpz) = log2(BigInt(a)) # stupid: there has to be faster way
global last_lf = Ref{Any}()
#given the local factorisation in H, find the cld, the Coefficients of the Logarithmic
#Derivative: a factor g of f is mapped to g'*f/g
#Only the coefficients 0:up_to and from:degree(f)-1 are computed
function cld_data(H::Hensel, up_to::Int, from::Int, mC, Mi, sc::nf_elem)
lf = factor(H)
a = preimage(mC, zero(codomain(mC)))
k = parent(a)
N = degree(H.f)
@assert 0<= up_to <= N #up_tp: modulo x^up_tp
@assert 0<= from <= N #from : div by x^from
# @assert up_to <= from
M = zero_matrix(FlintZZ, length(lf), (1+up_to + N - from) * degree(k))
last_lf[] = (lf, H.f, up_to)
lf = [divexact_low(mullow(derivative(x), H.f, up_to+1), x, up_to+1) for x = lf]
# lf = [divexact(derivative(x)*H.f, x) for x = lf]
# @show llf .- lf
NN = zero_matrix(FlintZZ, 1, degree(k))
d = FlintZZ()
for i=0:up_to
for j=1:length(lf)
c = sc * preimage(mC, coeff(lf[j], i)) # should be an nf_elem
elem_to_mat_row!(NN, 1, d, c)
mul!(NN, NN, Mi) #base_change, Mi should be the inv-lll-basis-mat wrt field
@assert isone(d)
for h=1:degree(k)
M[j, i*degree(k) + h] = NN[1, h]
end
end
end
lf = factor(H)
lf = [divhigh(mulhigh(derivative(x), H.f, from), x, from) for x = lf]
for i=from:N-1
for j=1:length(lf)
c = sc * preimage(mC, coeff(lf[j], i)) # should be an nf_elem
elem_to_mat_row!(NN, 1, d, c)
mul!(NN, NN, Mi) #base_change, Mi should be the inv-lll-basis-mat wrt field
@assert isone(d)
for h=1:degree(k)
M[j, (i-from+up_to)*degree(k) + h] = NN[1, h]
end
end
end
return M
end
mutable struct vanHoeijCtx
H::Hensel
pr::Int
Ml::fmpz_mat
pMr::Tuple{fmpz_mat, fmpz, fmpz_preinvn_struct}
pM::Tuple{fmpz_mat, fmpz}
C::Union{FlintQadicField, FlintPadicField}
P::NfOrdIdl
function vanHoeijCtx()
return new()
end
end
#increase the precision of the local data, i.e lift the factorisation and
#the LLL_basis of the ideal
function grow_prec!(vH::vanHoeijCtx, pr::Int)
lift(vH.H, pr)
@vtime :PolyFactor 2 X1 = vH.P^pr
@vtime :PolyFactor 2 X2 = basis_matrix(X1)
@vtime :PolyFactor 2 vH.Ml = lll(X2)
@vtime :PolyFactor 2 pMr = pseudo_inv(vH.Ml)
F = FakeFmpqMat(pMr)
#M * basis_matrix(zk) is the basis wrt to the field
#(M*B)^-1 = B^-1 * M^-1, so I need basis_mat_inv(zk) * pM
vH.pMr = (F.num, F.den, fmpz_preinvn_struct(2*F.den))
F = basis_mat_inv(order(vH.P)) * F
vH.pM = (F.num, F.den)
end
global last_f = Ref{Any}()
function lll_with_removal_knapsack(x::fmpz_mat, b::fmpz, ctx::lll_ctx = lll_ctx(0.99, 0.51))
z = deepcopy(x)
d = Int(ccall((:fmpz_lll_wrapper_with_removal_knapsack, libflint), Cint,
(Ref{fmpz_mat}, Ptr{nothing}, Ref{fmpz}, Ref{lll_ctx}), z, C_NULL, b, ctx))
return d, z
end
function tdivpow2!(B::fmpz_mat, t::Int)
ccall((:fmpz_mat_scalar_tdiv_q_2exp, libflint), Nothing, (Ref{fmpz_mat}, Ref{fmpz_mat}, Cint), B, B, t)
end
function Nemo.tdivpow2(B::fmpz_mat, t::Int)
C = similar(B)
ccall((:fmpz_mat_scalar_tdiv_q_2exp, libflint), Nothing, (Ref{fmpz_mat}, Ref{fmpz_mat}, Cint), C, B, t)
return C
end
function gradual_feed_lll(M::fmpz_mat, sm::fmpz, B::fmpz_mat, d::fmpz, bnd::fmpz)
b = maximum(nbits, B)
sc = max(0, b-55)
while false && sc > 0
BB = tdivpow2(B, sc)
dd = tdivpow2(d, sc)
MM = [M BB; zero_matrix(FlintZZ, ncols(B), ncols(M)) dd*identity_matrix(FlintZZ, ncols(B))]
@show maximum(nbits, MM)
@time MM, T = lll_with_transform(MM, lll_ctx(0.75, 0.51))
@time l, _ = lll_with_removal(MM, bnd, lll_ctx(0.75, 0.51))
@show l
M = T[1:nrows(M), 1:nrows(M)]*M
B = T[1:nrows(M), 1:nrows(M)]*B
mod_sym!(B, d)
@show maximum(nbits, B)
@show sc = max(0, sc-55)
end
M = [M B; zero_matrix(FlintZZ, ncols(B), ncols(M)) d*identity_matrix(FlintZZ, ncols(B))]
return lll_with_removal(M, bnd)
end
@doc Markdown.doc"""
van_hoeij(f::PolyElem{nf_elem}, P::NfOrdIdl; prec_scale = 20) -> Array{PolyElem{nf_elem}, 1}
A van Hoeij-like factorisation over an absolute simple number field, using the factorisation in the
$P$-adic completion where $P$ has to be an unramified non-index divisor and the square-free $f$ has
to be square-free mod $P$ as well.
Approach is taken from Hart, Novacin, van Hoeij in ISSAC.
"""
function van_hoeij(f::PolyElem{nf_elem}, P::NfOrdIdl; prec_scale = 1)
@vprint :PolyFactor 1 "Using (relative) van Hoeij\n"
@vprint :PolyFactor 2 "with p = $P\n"
@assert all(x->denominator(x) == 1, coefficients(f))
K = base_ring(parent(f))
C, mC = completion(K, P)
zk = order(P)
if ismaximal_known_and_maximal(zk)
den = K(1)
elseif isdefining_polynomial_nice(K)
den = derivative(K.pol)(gen(K))
else
den = K(discriminant(order(P))) * det(basis_matrix(order(P), copy= false))
end
_, mK = ResidueField(order(P), P)
mK = extend(mK, K)
r = length(factor(map_coefficients(mK, f, cached = false)))
N = degree(f)
@vprint :PolyFactor 1 "Having $r local factors for degree $N \n"
setprecision!(C, 5)
vH = vanHoeijCtx()
if degree(P) == 1
vH.H = HenselCtxPadic(map_coefficients(x->coeff(mC(x), 0), f))
else
vH.H = HenselCtxQadic(map_coefficients(mC, f))
end
vH.C = C
vH.P = P
up_to = min(5, ceil(Int, N/20))
up_to_start = up_to
from = N-up_to #use 5 coeffs on either end
up_to = min(up_to, N)
from = min(from, N)
from = max(up_to, from)
b = cld_bound(f, vcat(0:up_to-1, from:N-1)) .* upper_bound(sqrt(t2(den*leading_coefficient(f))), fmpz)
# from Fieker/Friedrichs, still wrong here
# needs to be larger than anticipated...
c1, c2 = norm_change_const(order(P))
b = Int[ceil(Int, degree(K)/2/log(norm(P))*(log2(c1*c2) + 2*nbits(x)+ degree(K)*r+prec_scale)) for x = b]
bb = landau_mignotte_bound(f)*upper_bound(sqrt(t2(den*leading_coefficient(f))), fmpz)
kk = ceil(Int, degree(K)/2/log(norm(P))*(log2(c1*c2) + 2*nbits(bb)))
@vprint :PolyFactor 2 "using CLD precision bounds $b \n"
used = []
really_used = []
M = identity_matrix(FlintZZ, r)*fmpz(2)^prec_scale
while true #the main loop
#find some prec
#to start with, I want at least half of the CLDs to be useful
if length(b) == degree(f)
i = maximum(b) + 100
else
i= sort(b)[div(length(b)+1, 2)]
end
i = max(i, kk)
@vprint :PolyFactor 1 "setting prec to $i, and lifting the info ...\n"
setprecision!(codomain(mC), i)
if degree(P) == 1
vH.H.f = map_coefficients(x->coeff(mC(x), 0), f)
else
vH.H.f = map_coefficients(mC, f)
end
global last_vH = vH
@vtime :PolyFactor 1 grow_prec!(vH, i)
av_bits = sum(nbits, vH.Ml)/degree(K)^2
@vprint :PolyFactor 1 "obtaining CLDs...\n"
#prune: in Swinnerton-Dyer: either top or bottom are too large.
while from < N && b[N - from + up_to] > i
from += 1
end
while up_to > 0 && b[up_to] > i
up_to -= 1
end
b = b[vcat(1:up_to, length(b)-(N-from-1):length(b))]
have = vcat(0:up_to-1, from:N-2) #N-1 is always 1
if degree(P) == 1
mD = MapFromFunc(x->coeff(mC(x),0), y->K(lift(y)), K, base_ring(vH.H.f))
@vtime :PolyFactor 1 C = cld_data(vH.H, up_to, from, mD, vH.pM[1], den*leading_coefficient(f))
else
@vtime :PolyFactor 1 C = cld_data(vH.H, up_to, from, mC, vH.pM[1], den*leading_coefficient(f))
end
# In the end, p-adic precision needs to be large enough to
# cover some CLDs. If you want the factors, it also has to
# cover those. The norm change constants also come in ...
# and the degree of P...
# starting precision:
# - large enough to recover factors (maybe)
# - large enough to recover some CLD (definitely)
# - + eps to give algo a chance.
# Then take 10% of the CLD, small enough for the current precision
# possibly figure out which CLD's are available at all
# we want
# I | C/p^n
# 0 | I
# true factors, in this lattice, are small (the lower I is the rounding)
# the left part is to keep track of operations
# by cld_bound, we know the expected upper size of the rounded legal entries
# so we scale it by the bound. If all would be exact, the true factors would be zero...
# WHY???Zero??? small I see, but not zero..., smaller than 1 I can see.
# 1st make integral:
# I | C
# 0 | p^n
# scale:
# I | C/lambda
# 0 | p^n/lambda lambda depends on the column
# now, to limit damages re-write the rationals with den | 2^k (rounding)
# I | D/2^k
# | X/2^k
#make integral
# 2^k | D
# 0 | X where X is still diagonal
# is all goes like planned: lll with reduction will magically work...
# needs (I think): fix a in Z_k, P and ideal. Then write a wrt. a LLL basis of P^k
# a = sum a^k_i alpha^k_i, a^k_i in Q, then for k -> infty, a^k_i -> 0
# (ineffective: write coeffs with Cramer's rule via determinants. The
# numerator has n-1 LLL-basis vectors and one small vector (a), thus the
# determinant is s.th. ^(n-1) and the coeff then ()^(n-1)/()^n should go to zero
# lambda should be chosen, so that the true factors become < 1 by it
# for the gradual feeding, we can also add the individual coefficients (of the nf_elems) individually
# - apply transformations already done (by checking the left part of the matrix)
# - scale, round
# - call lll_with_removel
# until done (whatever that means)
# if unlucky: re-do Hensel and start over again, hopefull retaining some info
# can happen if the CLD coeffs are too large for the current Hensel level
while length(have) > length(used)
local m
m_empty = true
for i=1:length(have)
if have[i] in used
continue
end
if m_empty || b[i] < m[1]
m_empty = false
m = (b[i], i)
end
end
n = have[m[2]]
@assert !(n in used)
push!(used, n)
i = findfirst(x->x == n, have) #new data will be in block i of C
@vprint :PolyFactor 2 "trying to use coeff $n which is $i\n"
if b[i] > precision(codomain(mC))
@vprint :PolyFactor 2 "not enough precision for CLD $i, $b, $(precision(codomain(mC))), skipping\n"
# error()
continue
end
sz = floor(Int, degree(K)*av_bits/log(norm(P)) - b[i])
B = sub(C, 1:r, (i-1)*degree(K)+1:i*degree(K))
# @show i, maximum(nbits, B)
T = sub(M, 1:nrows(M), 1:r)
B = T*B # T contains the prec_scale
mod_sym!(B, vH.pM[2]*fmpz(2)^prec_scale)
# @show maximum(nbits, B), nbits(vH.pM[2]), b[i]
if sz + prec_scale >= nbits(vH.pM[2]) || sz < 0
println("Loss of precision for this col: ", sz, " ", nbits(vH.pM[2]))
@show f, base_ring(f), P
error()
continue
else
sz = nbits(vH.pM[2]) - div(r, 1) - prec_scale
end
push!(really_used, n)
tdivpow2!(B, sz+prec_scale)
d = tdivpow2(vH.pM[2], sz)
bnd = r*fmpz(2)^(2*prec_scale) + degree(K)*(ncols(M)-r)*div(r, 2)^2
rt = time_ns()
@vtime :PolyFactor 1 l, Mi = gradual_feed_lll(M, fmpz(2)^prec_scale, B, d, bnd)
# @vtime :PolyFactor 1 l, Mi = lll_with_removal(M, bnd)
M = Mi
# @show hnf(sub(M, 1:l, 1:r))
if iszero(M[1:l, 1:r])
# println(f)
# println(base_ring(f))
error("must never be zero")
end
@hassert :PolyFactor 1 !iszero(sub(M, 1:l, 1:r))
M = sub(M, 1:l, 1:ncols(M))
d = Dict{fmpz_mat, Array{Int, 1}}()
for l=1:r
k = M[:, l]
if haskey(d, k)
push!(d[k], l)
else
d[k] = [l]
end
end
@vprint :PolyFactor 1 "partitioning of local factors: $(values(d))\n"
if length(keys(d)) <= nrows(M)
@vprint :PolyFactor 1 "BINGO: potentially $(length(keys(d))) factors\n"
res = typeof(f)[]
fail = []
if length(keys(d)) == 1
return [f]
end
# display(d)
for v = values(d)
#trivial test:
if isone(den) && ismonic(f) #don't know what to do for non-monics
a = prod(map(constant_coefficient, factor(vH.H)[v]))
if degree(P) == 1
A = K(reco(order(P)(lift(a)), vH.Ml, vH.pMr))
else
A = K(reco(order(P)(preimage(mC, a)), vH.Ml, vH.pMr))
end
if denominator(divexact(constant_coefficient(f), A), order(P)) != 1
@vprint :PolyFactor 2 "Fail: const coeffs do not divide\n"
push!(fail, v)
if length(fail) > 1
break
end
continue
end
end
@vtime :PolyFactor 2 g = prod(factor(vH.H)[v])
if degree(P) == 1
@vtime :PolyFactor 2 G = parent(f)([K(reco(lift(coeff(mC(den*leading_coefficient(f)), 0)*coeff(g, l)), vH.Ml, vH.pMr, order(P))) for l=0:degree(g)])
else
@vtime :PolyFactor 2 G = parent(f)([K(reco(order(P)(preimage(mC, mC(den*leading_coefficient(f))*coeff(g, l))), vH.Ml, vH.pMr)) for l=0:degree(g)])
end
G *= 1//(den*leading_coefficient(f))
if !iszero(rem(f, G))
@vprint :PolyFactor 2 "Fail: poly does not divide\n"
push!(fail, v)
if length(fail) > 1
break
end
continue
end
push!(res, G)
end
if length(fail) == 1
@vprint :PolyFactor 1 "only one reco failed, total success\n"
push!(res, divexact(f, prod(res)))
return res
end
if length(res) < length(d)
@vprint :PolyFactor 1 "reco failed\n... here we go again ...\n"
else
return res
end
end
end
up_to = up_to_start = min(2*up_to_start, N)
up_to = min(N, up_to)
from = N-up_to
from = min(from, N)
from = max(up_to, from)
have = vcat(0:up_to-1, from:N-2) #N-1 is always 1
if length(have) <= length(really_used)
@show have, really_used, used
@show f
@show base_ring(f)
last_f[] = (f, P, vH)
error("too bad")
end
used = deepcopy(really_used)
b = cld_bound(f, vcat(0:up_to-1, from:N-1)) .* upper_bound(sqrt(t2(den*leading_coefficient(f))), fmpz)
# from Fieker/Friedrichs, still wrong here
# needs to be larger than anticipated...
b = [ceil(Int, degree(K)/2/log(norm(P))*(log2(c1*c2) + 2*nbits(x)+ 2*prec_scale)) for x = b]
end #the big while
end
function Base.map!(f, M::fmpz_mat)
for i=1:nrows(M)
for j=1:ncols(M)
M[i,j] = f(M[i,j])
end
end
end
#does not seem to be faster than the direct approach. (not modular)
#Magma is faster, which seems to suggest the direct resultant is
#even better (modular resultant)
# power series over finite fields are sub-par...or at least this usage
# fixed "most" of it...
#Update: f, K large enough, this wins. Need bounds...
function norm_mod(f::PolyElem{nf_elem}, p::Int, Zx::FmpzPolyRing = Globals.Zx)
K = base_ring(f)
k = GF(p)
me = modular_init(K, p)
t = modular_proj(f, me)
n = degree(f)*degree(K)
v = Vector{gfp_elem}(undef, n)
first = true
for i = 1:length(t)
t1 = polynomial_to_power_sums(t[i], n)
for j = 1:length(t1)
el = k(coeff(trace(t1[j]), 0))
if first
v[j] = el
else
v[j] += el
end
end
first = false
end
pol = power_sums_to_polynomial(v)
return lift(Zx, pol)
end
global _debug = []
function norm_mod(f::PolyElem{nf_elem}, Zx::FmpzPolyRing = Globals.Zx)
#assumes, implicitly, the coeffs of f are algebraic integers.
# equivalently: the norm is integral...
p = p_start
K = base_ring(f)
g = Zx(0)
d = fmpz(1)
stable = 0
while true
p = next_prime(p)
tt = norm_mod(f, p, Zx)
prev = g
if isone(d)
g = tt
d = fmpz(p)
else
g, d = induce_crt(g, d, tt, fmpz(p), true)
end
if prev == g
stable += 1
if stable > 4
return g
end
else
stable = 0
end
if nbits(d) > 20000
push!(_debug, f)
error("too bad")
end
end
end
#=
Daniel:
let a_i be a linear recurrence sequence or better
sum_1^infty a_i x^-i = -f/g is rational, deg f<deg g < n/2
run rational reconstruction on h := sum_0^n a_i x^(n-i) and x^n
finding bh = a mod x^n (h = a/b mod x^n)
then b = g and f = div(a-bh, x^n)
establishing the link between rat-recon and Berlekamp Massey
=#
|
module parkind
implicit none
save
!------------------------------------------------------------------
! rrtmg kinds
! Define integer and real kinds for various types.
!
! Initial version: MJIacono, AER, jun2006
! Revised: MJIacono, AER, aug2008
!------------------------------------------------------------------
!
! integer kinds
! -------------
!
integer, parameter :: kind_ib = selected_int_kind(13) ! 8 byte integer
integer, parameter :: kind_im = selected_int_kind(6) ! 4 byte integer
integer, parameter :: kind_in = kind(1) ! native integer
!
! real kinds
! ----------
!
integer, parameter :: kind_rb = selected_real_kind(12) ! 8 byte real
integer, parameter :: kind_rm = selected_real_kind(6) ! 4 byte real
integer, parameter :: kind_rn = kind(1.0) ! native real
end module parkind
|
If $a$ and $b$ are in the same path component of $S$, then the constant loops at $a$ and $b$ are homotopic. |
If $S$ is a closed set and $c$ is a component of the complement of $S$, then $S \cup c$ is closed. |
module Idris.Doc.String
import Core.Context
import Core.Context.Log
import Core.Core
import Core.Env
import Core.Metadata
import Core.TT
import Idris.Pretty
import Idris.Pretty.Render
import Idris.REPL.Opts
import Idris.Resugar
import Idris.Syntax
import TTImp.TTImp
import TTImp.TTImp.Functor
import TTImp.Elab.Prim
import Data.List
import Data.List1
import Data.Maybe
import Data.String
import Libraries.Data.ANameMap
import Libraries.Data.NameMap
import Libraries.Data.SortedMap
import Libraries.Data.StringMap as S
import Libraries.Data.String.Extra
import Libraries.Control.ANSI.SGR
import public Libraries.Text.PrettyPrint.Prettyprinter
import public Libraries.Text.PrettyPrint.Prettyprinter.Util
import Parser.Lexer.Source
%default covering
public export
data IdrisDocAnn
= Header
| Declarations
| Decl Name
| DocStringBody
| UserDocString
| Syntax IdrisSyntax
export
-- TODO: how can we deal with bold & so on?
docToDecoration : IdrisDocAnn -> Maybe Decoration
docToDecoration (Syntax syn) = syntaxToDecoration syn
docToDecoration _ = Nothing
export
styleAnn : IdrisDocAnn -> AnsiStyle
styleAnn Header = underline
styleAnn Declarations = []
styleAnn (Decl{}) = []
styleAnn DocStringBody = []
styleAnn UserDocString = []
styleAnn (Syntax syn) = syntaxAnn syn
export
tCon : Name -> Doc IdrisDocAnn -> Doc IdrisDocAnn
tCon n = annotate (Syntax $ TCon (Just n))
export
dCon : Name -> Doc IdrisDocAnn -> Doc IdrisDocAnn
dCon n = annotate (Syntax $ DCon (Just n))
export
fun : Name -> Doc IdrisDocAnn -> Doc IdrisDocAnn
fun n = annotate (Syntax $ Fun n)
export
header : Doc IdrisDocAnn -> Doc IdrisDocAnn
header d = annotate Header d <+> colon
-- Add a doc string for a module name
export
addModDocString : {auto s : Ref Syn SyntaxInfo} ->
ModuleIdent -> String ->
Core ()
addModDocString mi doc
= do syn <- get Syn
put Syn (record { saveMod $= (mi ::)
, modDocstrings $= insert mi doc } syn)
-- Add a doc string for a name in the current namespace
export
addDocString : {auto c : Ref Ctxt Defs} ->
{auto s : Ref Syn SyntaxInfo} ->
Name -> String ->
Core ()
addDocString n_in doc
= do n <- inCurrentNS n_in
log "doc.record" 50 $
"Adding doc for " ++ show n_in ++ " (aka " ++ show n ++ " in current NS)"
syn <- get Syn
put Syn (record { defDocstrings $= addName n doc,
saveDocstrings $= insert n () } syn)
-- Add a doc string for a name, in an extended namespace (e.g. for
-- record getters)
export
addDocStringNS : {auto c : Ref Ctxt Defs} ->
{auto s : Ref Syn SyntaxInfo} ->
Namespace -> Name -> String ->
Core ()
addDocStringNS ns n_in doc
= do n <- inCurrentNS n_in
let n' = case n of
NS old root => NS (old <.> ns) root
root => NS ns root
syn <- get Syn
put Syn (record { defDocstrings $= addName n' doc,
saveDocstrings $= insert n' () } syn)
prettyTerm : IPTerm -> Doc IdrisDocAnn
prettyTerm = reAnnotate Syntax . Idris.Pretty.prettyTerm
prettyName : Name -> Doc IdrisDocAnn
prettyName n =
case userNameRoot n of
-- shouldn't happen: we only show UN anyways...
Nothing => pretty (nameRoot n)
Just un => if isOpUserName un then parens (pretty un) else pretty un
prettyKindedName : Maybe String -> Doc IdrisDocAnn -> Doc IdrisDocAnn
prettyKindedName Nothing nm = nm
prettyKindedName (Just kw) nm
= annotate (Syntax Keyword) (pretty kw) <++> nm
export
getDocsForPrimitive : {auto c : Ref Ctxt Defs} ->
{auto s : Ref Syn SyntaxInfo} ->
Constant -> Core (Doc IdrisDocAnn)
getDocsForPrimitive constant = do
let (_, type) = checkPrim EmptyFC constant
let typeString = pretty (show constant)
<++> colon <++> prettyTerm !(resugar [] type)
pure (typeString <+> Line <+> indent 2 "Primitive")
public export
data Config : Type where
||| Configuration of the printer for a name
||| @ longNames Do we print qualified names?
||| @ dropFirst Do we drop the first argument in the type?
||| @ getTotality Do we print the totality status of the function?
MkConfig : {default True longNames : Bool} ->
{default False dropFirst : Bool} ->
{default True getTotality : Bool} ->
Config
||| Printer configuration for interface methods
||| * longNames turned off for interface methods because the namespace is
||| already spelt out for the interface itself
||| * dropFirst turned on for interface methods because the first argument
||| is always the interface constraint
||| * totality turned off for interface methods because the methods themselves
||| are just projections out of a record and so are total
export
methodsConfig : Config
methodsConfig
= MkConfig {longNames = False}
{dropFirst = True}
{getTotality = False}
export
shortNamesConfig : Config
shortNamesConfig
= MkConfig {longNames = False}
{dropFirst = False}
{getTotality = True}
export
getDocsForName : {auto o : Ref ROpts REPLOpts} ->
{auto c : Ref Ctxt Defs} ->
{auto s : Ref Syn SyntaxInfo} ->
FC -> Name -> Config -> Core (Doc IdrisDocAnn)
getDocsForName fc n config
= do syn <- get Syn
defs <- get Ctxt
let extra = case nameRoot n of
"-" => [NS numNS (UN $ Basic "negate")]
_ => []
resolved <- lookupCtxtName n (gamma defs)
let all@(_ :: _) = extra ++ map fst resolved
| _ => undefinedName fc n
let ns@(_ :: _) = concatMap (\n => lookupName n (defDocstrings syn)) all
| [] => pure emptyDoc
docs <- traverse (showDoc config) ns
pure $ vcat docs
where
showDoc : Config -> (Name, String) -> Core (Doc IdrisDocAnn)
-- Avoid generating too much whitespace by not returning a single empty line
reflowDoc : String -> List (Doc IdrisDocAnn)
reflowDoc "" = []
reflowDoc str = map (indent 2 . reflow) (forget $ Extra.lines str)
showTotal : Name -> Totality -> Doc IdrisDocAnn
showTotal n tot
= case isTerminating tot of
Unchecked => ""
_ => header "Totality" <++> pretty tot
getDConDoc : Name -> Core (Doc IdrisDocAnn)
getDConDoc con
= do defs <- get Ctxt
Just def <- lookupCtxtExact con (gamma defs)
-- should never happen, since we know that the DCon exists:
| Nothing => pure Empty
syn <- get Syn
ty <- resugar [] =<< normaliseHoles defs [] (type def)
let conWithTypeDoc = annotate (Decl con) (hsep [dCon con (prettyName con), colon, prettyTerm ty])
case lookupName con (defDocstrings syn) of
[(n, "")] => pure conWithTypeDoc
[(n, str)] => pure $ vcat
[ conWithTypeDoc
, annotate DocStringBody
$ annotate UserDocString
$ vcat $ reflowDoc str
]
_ => pure conWithTypeDoc
getImplDoc : Name -> Core (List (Doc IdrisDocAnn))
getImplDoc n
= do defs <- get Ctxt
Just def <- lookupCtxtExact n (gamma defs)
| Nothing => pure []
ty <- resugar [] =<< normaliseHoles defs [] (type def)
pure [annotate (Decl n) $ prettyTerm ty]
getMethDoc : Method -> Core (List (Doc IdrisDocAnn))
getMethDoc meth
= do syn <- get Syn
let [nstr] = lookupName meth.name (defDocstrings syn)
| _ => pure []
pure <$> showDoc methodsConfig nstr
getInfixDoc : Name -> Core (List (Doc IdrisDocAnn))
getInfixDoc n
= do let Just (Basic n) = userNameRoot n
| _ => pure []
let Just (fixity, assoc) = S.lookup n (infixes !(get Syn))
| Nothing => pure []
pure $ pure $ hsep
[ pretty (show fixity)
, "operator,"
, "level"
, pretty (show assoc)
]
getPrefixDoc : Name -> Core (List (Doc IdrisDocAnn))
getPrefixDoc n
= do let Just (Basic n) = userNameRoot n
| _ => pure []
let Just assoc = S.lookup n (prefixes !(get Syn))
| Nothing => pure []
pure $ ["prefix operator, level" <++> pretty (show assoc)]
getFixityDoc : Name -> Core (List (Doc IdrisDocAnn))
getFixityDoc n =
pure $ case toList !(getInfixDoc n) ++ toList !(getPrefixDoc n) of
[] => []
[f] => [header "Fixity Declaration" <++> f]
fs => [header "Fixity Declarations" <+> Line <+>
indent 2 (vcat fs)]
getIFaceDoc : (Name, IFaceInfo) -> Core (Doc IdrisDocAnn)
getIFaceDoc (n, iface)
= do let params =
case params iface of
[] => []
ps => [hsep (header "Parameters" :: punctuate comma (map (pretty . show) ps))]
let constraints =
case !(traverse (pterm . map defaultKindedName) (parents iface)) of
[] => []
ps => [hsep (header "Constraints" :: punctuate comma (map (pretty . show) ps))]
let icon = case dropNS (iconstructor iface) of
DN _ _ => [] -- machine inserted
nm => [hsep [header "Constructor", dCon nm (prettyName nm)]]
mdocs <- traverse getMethDoc (methods iface)
let meths = case concat mdocs of
[] => []
docs => [vcat [header "Methods", annotate Declarations $ vcat $ map (indent 2) docs]]
sd <- getSearchData fc False n
idocs <- case hintGroups sd of
[] => pure (the (List (List (Doc IdrisDocAnn))) [])
((_, tophs) :: _) => traverse getImplDoc tophs
let insts = case concat idocs of
[] => []
[doc] => [header "Implementation" <++> annotate Declarations doc]
docs => [vcat [header "Implementations"
, annotate Declarations $ vcat $ map (indent 2) docs]]
pure (vcat (params ++ constraints ++ icon ++ meths ++ insts))
getFieldDoc : Name -> Core (Doc IdrisDocAnn)
getFieldDoc nm
= do syn <- get Syn
defs <- get Ctxt
Just def <- lookupCtxtExact nm (gamma defs)
-- should never happen, since we know that the DCon exists:
| Nothing => pure Empty
ty <- resugar [] =<< normaliseHoles defs [] (type def)
let prettyName = prettyName nm
let projDecl = annotate (Decl nm) $ hsep [ fun nm prettyName, colon, prettyTerm ty ]
case lookupName nm (defDocstrings syn) of
[(_, "")] => pure projDecl
[(_, str)] =>
pure $ vcat [ projDecl
, annotate DocStringBody
$ annotate UserDocString
$ vcat $ reflowDoc str
]
_ => pure projDecl
getFieldsDoc : Name -> Core (Maybe (Doc IdrisDocAnn))
getFieldsDoc recName
= do let (Just ns, n) = displayName recName
| _ => pure Nothing
let recNS = ns <.> mkNamespace n
defs <- get Ctxt
let fields = getFieldNames (gamma defs) recNS
syn <- get Syn
case fields of
[] => pure Nothing
[proj] => pure $ Just $ header "Projection" <++> annotate Declarations !(getFieldDoc proj)
projs => pure $ Just $ vcat
[ header "Projections"
, annotate Declarations $ vcat $
map (indent 2) $ !(traverse getFieldDoc projs)
]
getExtra : Name -> GlobalDef -> Core (Maybe String, List (Doc IdrisDocAnn))
getExtra n d = do
do syn <- get Syn
let [] = lookupName n (ifaces syn)
| [ifacedata] => (Just "interface",) . pure <$> getIFaceDoc ifacedata
| _ => pure (Nothing, []) -- shouldn't happen, we've resolved ambiguity by now
case definition d of
PMDef _ _ _ _ _ => pure (Nothing, [showTotal n (totality d)])
TCon _ _ _ _ _ _ cons _ =>
do let tot = [showTotal n (totality d)]
cdocs <- traverse (getDConDoc <=< toFullNames) cons
cdoc <- case cdocs of
[] => pure (Just "data", [])
[doc] =>
let cdoc = header "Constructor" <++> annotate Declarations doc in
case !(getFieldsDoc n) of
Nothing => pure (Just "data", [cdoc])
Just fs => pure (Just "record", cdoc :: [fs])
docs => pure (Just "data"
, [vcat [header "Constructors"
, annotate Declarations $
vcat $ map (indent 2) docs]])
pure (map (tot ++) cdoc)
_ => pure (Nothing, [])
showDoc (MkConfig {longNames, dropFirst, getTotality}) (n, str)
= do defs <- get Ctxt
Just def <- lookupCtxtExact n (gamma defs)
| Nothing => undefinedName fc n
-- First get the extra stuff because this also tells us whether a
-- definition is `data`, `record`, or `interface`.
(typ, extra) <- ifThenElse getTotality
(getExtra n def)
(pure (Nothing, []))
-- Then form the type declaration
ty <- resugar [] =<< normaliseHoles defs [] (type def)
-- when printing e.g. interface methods there is no point in
-- repeating the interface's name
let ty = ifThenElse (not dropFirst) ty $ case ty of
PPi _ _ AutoImplicit _ _ sc => sc
_ => ty
nm <- aliasName n
-- when printing e.g. interface methods there is no point in
-- repeating the namespace the interface lives in
let cat = showCategory Syntax def
let nm = prettyKindedName typ $ cat
$ ifThenElse longNames (pretty (show nm)) (prettyName nm)
let docDecl = annotate (Decl n) (hsep [nm, colon, prettyTerm ty])
-- Finally add the user-provided docstring
let docText = let docs = reflowDoc str in
annotate UserDocString (vcat docs)
<$ guard (not $ null docs)
fixes <- getFixityDoc n
let docBody =
let docs = maybe id (::) docText
$ map (indent 2) (extra ++ fixes)
in annotate DocStringBody
(concatWith (\l, r => l <+> hardline <+> r) docs)
<$ guard (not (null docs))
pure (vcat (docDecl :: docBody))
export
getDocsForPTerm : {auto o : Ref ROpts REPLOpts} ->
{auto c : Ref Ctxt Defs} ->
{auto s : Ref Syn SyntaxInfo} ->
PTerm -> Core (Doc IdrisDocAnn)
getDocsForPTerm (PRef fc name) = getDocsForName fc name MkConfig
getDocsForPTerm (PPrimVal _ c) = getDocsForPrimitive c
getDocsForPTerm (PType _) = pure $ vcat
[ "Type : Type"
, indent 2 "The type of all types is Type. The type of Type is Type."
]
getDocsForPTerm (PString _ _) = pure $ vcat
[ "String Literal"
, indent 2 "Desugars to a fromString call"
]
getDocsForPTerm (PList _ _ _) = pure $ vcat
[ "List Literal"
, indent 2 "Desugars to (::) and Nil"
]
getDocsForPTerm (PSnocList _ _ _) = pure $ vcat
[ "SnocList Literal"
, indent 2 "Desugars to (:<) and Empty"
]
getDocsForPTerm (PPair _ _ _) = pure $ vcat
[ "Pair Literal"
, indent 2 "Desugars to MkPair or Pair"
]
getDocsForPTerm (PDPair _ _ _ _ _) = pure $ vcat
[ "Dependant Pair Literal"
, indent 2 "Desugars to MkDPair or DPair"
]
getDocsForPTerm (PUnit _) = pure $ vcat
[ "Unit Literal"
, indent 2 "Desugars to MkUnit or Unit"
]
getDocsForPTerm pterm = pure $
"Docs not implemented for" <++> pretty (show pterm) <++> "yet"
summarise : {auto c : Ref Ctxt Defs} ->
{auto s : Ref Syn SyntaxInfo} ->
Name -> Core (Doc IdrisDocAnn)
summarise n -- n is fully qualified
= do syn <- get Syn
defs <- get Ctxt
Just def <- lookupCtxtExact n (gamma defs)
| _ => pure ""
ty <- normaliseHoles defs [] (type def)
pure $ showCategory Syntax def (prettyName n)
<++> colon <++> hang 0 (prettyTerm !(resugar [] ty))
-- Display all the exported names in the given namespace
export
getContents : {auto o : Ref ROpts REPLOpts} ->
{auto c : Ref Ctxt Defs} ->
{auto s : Ref Syn SyntaxInfo} ->
Namespace -> Core (Doc IdrisDocAnn)
getContents ns
= -- Get all the names, filter by any that match the given namespace
-- and are visible, then display with their type
do defs <- get Ctxt
ns <- allNames (gamma defs)
let allNs = filter inNS ns
allNs <- filterM (visible defs) allNs
vsep <$> traverse summarise (sort allNs)
where
visible : Defs -> Name -> Core Bool
visible defs n
= do Just def <- lookupCtxtExact n (gamma defs)
| Nothing => pure False
pure (visibility def /= Private)
inNS : Name -> Bool
inNS (NS xns (UN _)) = ns `isParentOf` xns
inNS _ = False
|
Welcome to Reel, North Yorkshire's premier recording & production studio.
Just minutes from the centre of the picturesque city of York, lies this purpose built, spacious, comfortable and fantastic sounding recording studio.
Generously equipped with the latest in digital recording technology and classic analogue gear, twinned with a selection of the most technically and creatively capable engineers the industry has to offer.
Pass the hours in our spacious, well lit, and comfortable control room, carefully designed to give even sound wherever you are, so feel free to sink into the sofa knowing you’re hearing exactly what you need to.
In the age of the bedroom producer, the right live space is often overlooked. Here at Reel we spent months trying, testing and designing our live spaces. At 350 square feet, our main live room offers the space needed to achieve huge rock drum sounds, whilst still retaining a tight, focussed character. We can also offer a full PA and Monitor rig for live performance and/or pre-production.
We also offer a fully redundant 64-channel live recording system with splits, powerful enough to capture everything you need, with a tiny footprint.
Reel Recording Studio shared a post.
Reel Recording Studio is at Reel Recording Studio. |
[STATEMENT]
lemma complement_rep_number:
assumes "\<And> bl . bl \<in># \<B> \<Longrightarrow> incomplete_block bl"
shows "constant_rep_design \<V> \<B>\<^sup>C (\<b> - \<r>)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. constant_rep_design \<V> \<B>\<^sup>C (\<b> - \<r>)
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. constant_rep_design \<V> \<B>\<^sup>C (\<b> - \<r>)
[PROOF STEP]
interpret d: proper_design \<V> "(\<B>\<^sup>C)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. proper_design \<V> \<B>\<^sup>C
[PROOF STEP]
using complement_proper_design
[PROOF STATE]
proof (prove)
using this:
(\<And>bl. bl \<in># \<B> \<Longrightarrow> card bl < \<v> \<and> bl \<in># \<B>) \<Longrightarrow> proper_design \<V> \<B>\<^sup>C
goal (1 subgoal):
1. proper_design \<V> \<B>\<^sup>C
[PROOF STEP]
by (simp add: assms)
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. constant_rep_design \<V> \<B>\<^sup>C (\<b> - \<r>)
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. constant_rep_design \<V> \<B>\<^sup>C (\<b> - \<r>)
[PROOF STEP]
using complement_rep_number rep_number
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>?x \<in> \<V>; \<B> rep ?x = ?r\<rbrakk> \<Longrightarrow> \<B>\<^sup>C rep ?x = \<b> - ?r
?x \<in> \<V> \<Longrightarrow> \<B> rep ?x = \<r>
goal (1 subgoal):
1. constant_rep_design \<V> \<B>\<^sup>C (\<b> - \<r>)
[PROOF STEP]
by (unfold_locales) simp
[PROOF STATE]
proof (state)
this:
constant_rep_design \<V> \<B>\<^sup>C (\<b> - \<r>)
goal:
No subgoals!
[PROOF STEP]
qed |
open import Function using (case_of_; _∘_)
open import Data.List using (List; _++_; map) renaming (_∷_ to _,_; _∷ʳ_ to _,′_; [] to ∅)
open import Data.List.Properties using (map-++-commute)
open import Data.Product using () renaming (_×_ to _x'_)
open import Relation.Binary.PropositionalEquality as PropEq using (_≡_; refl; sym; cong)
module LinearLogic (U : Set) (R : U) (⟦_⟧ᵁ : U → Set) where
infixr 40 ¬_
infix 30 _⊗_
infixr 20 _⊸_
infix 4 _⊢_
data Type : Set where
el : (A : U) → Type
⊥ : Type
_⊗_ : Type → Type → Type
_⊸_ : Type → Type → Type
data _⊢_ : ∀ (X : List Type) (A : Type) → Set where
var : ∀ {A} → A , ∅ ⊢ A
abs : ∀ {X A B} → A , X ⊢ B → X ⊢ A ⊸ B
app : ∀ {X Y A B} → X ⊢ A ⊸ B → Y ⊢ A → X ++ Y ⊢ B
pair : ∀ {X Y A B} → X ⊢ A → Y ⊢ B → X ++ Y ⊢ A ⊗ B
case : ∀ {X Y A B C } → X ⊢ A ⊗ B → A , B , Y ⊢ C → X ++ Y ⊢ C
exch : ∀ {X Y Z W A} → (X ++ Z) ++ (Y ++ W) ⊢ A
→ (X ++ Y) ++ (Z ++ W) ⊢ A
¬_ : Type → Type
¬ A = A ⊸ ⊥
exch₀ : ∀ {A B C X} → A , B , X ⊢ C → B , A , X ⊢ C
exch₀ {A} {B} {X = X} t = exch {∅} {B , ∅} {A , ∅} {X} t
swap : ∀ {A B} → ∅ ⊢ A ⊗ B ⊸ B ⊗ A
swap {A} {B} = abs (case var (exch₀ (pair var var)))
raise : ∀ {A B X} → X ⊢ A → X ⊢ (A ⊸ B) ⊸ B
raise t = abs (app var t)
++-assoc : ∀ {a} {A : Set a} (X Y Z : List A) → X ++ (Y ++ Z) ≡ (X ++ Y) ++ Z
++-assoc ∅ Y Z = refl
++-assoc (x , X) Y Z = cong (_,_ x) (++-assoc X Y Z)
xs++[]=xs : ∀ {a} {A : Set a} (xs : List A) → xs ++ ∅ ≡ xs
xs++[]=xs ∅ = refl
xs++[]=xs (x , xs) = cong (_,_ x) (xs++[]=xs xs)
to-front : ∀ {X A B} → A , X ⊢ B → X ,′ A ⊢ B
to-front {X} {A} {B} t = lem1 lem2
where
lem1 : A , (X ++ ∅) ⊢ B → X ,′ A ⊢ B
lem1 = exch {∅} {X} {A , ∅} {∅}
lem2 : A , (X ++ ∅) ⊢ B
lem2 rewrite xs++[]=xs X = t
to-back : ∀ {X A B} → X ,′ A ⊢ B → A , X ⊢ B
to-back {X} {A} {B} t = lem2
where
lem1 : A , X ++ ∅ ⊢ B
lem1 = exch {∅} {A , ∅} {X} {∅} t
lem2 : A , X ⊢ B
lem2 rewrite sym (xs++[]=xs (A , X)) = lem1
YX↝XY : ∀ {A} X Y → Y ++ X ⊢ A → X ++ Y ⊢ A
YX↝XY {A} X Y t = lem₃
where
lem₁ : Y ++ X ++ ∅ ⊢ A
lem₁ rewrite xs++[]=xs X = t
lem₂ : X ++ Y ++ ∅ ⊢ A
lem₂ = exch {∅} {X} {Y} {∅} lem₁
lem₃ : X ++ Y ⊢ A
lem₃ = PropEq.subst (λ Y → X ++ Y ⊢ A) (xs++[]=xs Y) lem₂
Y[XZ]↝X[YZ] : ∀ {A} X Y Z → Y ++ (X ++ Z) ⊢ A → X ++ (Y ++ Z) ⊢ A
Y[XZ]↝X[YZ] {A} X Y Z t = exch {∅} {X} {Y} {Z} t
[YX]Z↝[XY]Z : ∀ {A} X Y Z → (Y ++ X) ++ Z ⊢ A → (X ++ Y) ++ Z ⊢ A
[YX]Z↝[XY]Z {A} X Y Z t = lem₃
where
lem₁ : Y ++ (X ++ Z) ⊢ A
lem₁ rewrite ++-assoc Y X Z = t
lem₂ : X ++ (Y ++ Z) ⊢ A
lem₂ = Y[XZ]↝X[YZ] X Y Z lem₁
lem₃ : (X ++ Y) ++ Z ⊢ A
lem₃ rewrite sym (++-assoc X Y Z) = lem₂
[XZ]Y↝[XY]Z : ∀ {A} X Y Z → (X ++ Z) ++ Y ⊢ A → (X ++ Y) ++ Z ⊢ A
[XZ]Y↝[XY]Z {A} X Y Z t = lem₃
where
lem₁ : (X ++ Z) ++ Y ++ ∅ ⊢ A
lem₁ rewrite xs++[]=xs Y = t
lem₂ : (X ++ Y) ++ Z ++ ∅ ⊢ A
lem₂ = exch {X} {Y} {Z} {∅} lem₁
lem₃ : (X ++ Y) ++ Z ⊢ A
lem₃ = PropEq.subst (λ Z → (X ++ Y) ++ Z ⊢ A) (xs++[]=xs Z) lem₂
X[ZY]↝X[YZ] : ∀ {A} X Y Z → X ++ (Z ++ Y) ⊢ A → X ++ (Y ++ Z) ⊢ A
X[ZY]↝X[YZ] {A} X Y Z t = lem₃
where
lem₁ : (X ++ Z) ++ Y ⊢ A
lem₁ rewrite sym (++-assoc X Z Y) = t
lem₂ : (X ++ Y) ++ Z ⊢ A
lem₂ = [XZ]Y↝[XY]Z X Y Z lem₁
lem₃ : X ++ Y ++ Z ⊢ A
lem₃ rewrite ++-assoc X Y Z = lem₂
XYZW↝XWZY : ∀ {A} X Y Z W → (X ++ Y) ++ (Z ++ W) ⊢ A → (X ++ W) ++ (Z ++ Y) ⊢ A
XYZW↝XWZY {A} X Y Z W t = lem₃
where
lem₁ : (X ++ Y) ++ (W ++ Z) ⊢ A
lem₁ = X[ZY]↝X[YZ] (X ++ Y) W Z t
lem₂ : (X ++ W) ++ (Y ++ Z) ⊢ A
lem₂ = exch {X} {W} {Y} {Z} lem₁
lem₃ : (X ++ W) ++ (Z ++ Y) ⊢ A
lem₃ = X[ZY]↝X[YZ] (X ++ W) Z Y lem₂
XYZW↝YWXZ : ∀ {A} X Y Z W → (X ++ Y) ++ (Z ++ W) ⊢ A → (Y ++ W) ++ (X ++ Z) ⊢ A
XYZW↝YWXZ {A} X Y Z W t = lem₃
where
lem₁ : (Y ++ X) ++ (Z ++ W) ⊢ A
lem₁ = [YX]Z↝[XY]Z Y X (Z ++ W) t
lem₂ : (Y ++ X) ++ (W ++ Z) ⊢ A
lem₂ = X[ZY]↝X[YZ] (Y ++ X) W Z lem₁
lem₃ : (Y ++ W) ++ (X ++ Z) ⊢ A
lem₃ = exch {Y} {W} {X} {Z} lem₂
XYZW↝ZXWY : ∀ {A} X Y Z W → (X ++ Y) ++ (Z ++ W) ⊢ A → (Z ++ X) ++ (W ++ Y) ⊢ A
XYZW↝ZXWY {A} X Y Z W t = lem₃
where
lem₁ : (X ++ Z) ++ (Y ++ W) ⊢ A
lem₁ = exch {X} {Z} {Y} {W} t
lem₂ : (Z ++ X) ++ (Y ++ W) ⊢ A
lem₂ = [YX]Z↝[XY]Z Z X (Y ++ W) lem₁
lem₃ : (Z ++ X) ++ (W ++ Y) ⊢ A
lem₃ = X[ZY]↝X[YZ] (Z ++ X) W Y lem₂
XYZW↝ZYXW : ∀ {A} X Y Z W → (X ++ Y) ++ (Z ++ W) ⊢ A → (Z ++ Y) ++ (X ++ W) ⊢ A
XYZW↝ZYXW {A} X Y Z W t = lem₃
where
lem₁ : (Y ++ X) ++ (Z ++ W) ⊢ A
lem₁ = [YX]Z↝[XY]Z Y X (Z ++ W) t
lem₂ : (Y ++ Z) ++ (X ++ W) ⊢ A
lem₂ = exch {Y} {Z} {X} {W} lem₁
lem₃ : (Z ++ Y) ++ (X ++ W) ⊢ A
lem₃ = [YX]Z↝[XY]Z Z Y (X ++ W) lem₂
pair-left : ∀ {X A B C} → A , B , X ⊢ C → A ⊗ B , X ⊢ C
pair-left t = case var t
pair-left′ : ∀ {X A B C} → X ++ (A , B , ∅) ⊢ C → X ,′ A ⊗ B ⊢ C
pair-left′ {X} {A} {B} {C} = lem₃
where
lem₁ : X ,′ A ,′ B ⊢ C → X ,′ A ⊗ B ⊢ C
lem₁ t = to-front (pair-left (to-back {B , X} {A} (to-back {X ,′ A} {B} t)))
lem₂ : ∀ {a} {A : Set a} xs (y z : A) → xs ,′ y ,′ z ≡ xs ++ (y , z , ∅)
lem₂ ∅ y z = refl
lem₂ (x , xs) y z = cong (_,_ x) (lem₂ xs y z)
lem₃ : X ++ (A , B , ∅) ⊢ C → X ,′ A ⊗ B ⊢ C
lem₃ rewrite sym (lem₂ X A B) = lem₁
open import IntuitionisticLogic U ⟦_⟧ᵁ as IL renaming (Type to TypeIL; _⊗_ to _×_)
open IL.Explicit
hiding (swap; swap′)
renaming (_⊢_ to _⊢IL_; ReifyType to ReifyTypeIL; ReifyCtxt to ReiftCtxtIL; [_] to reifyIL)
instance
ReifyType : Reify Type TypeIL
ReifyType = record { ⟦_⟧ = ⟦_⟧ }
where
⟦_⟧ : Type → TypeIL
⟦ ⊥ ⟧ = el R
⟦ el A ⟧ = el A
⟦ A ⊗ B ⟧ = ⟦ A ⟧ × ⟦ B ⟧
⟦ A ⊸ B ⟧ = ⟦ A ⟧ ⇒ ⟦ B ⟧
open Reify {{...}} using (⟦_⟧)
instance
ReifyCtxt : Reify (List Type) (List TypeIL)
ReifyCtxt = record { ⟦_⟧ = map ⟦_⟧ }
⟦X++Y⟧=⟦X⟧++⟦Y⟧ : (X Y : List Type) → ⟦ X ++ Y ⟧ ≡ ⟦ X ⟧ ++ ⟦ Y ⟧
⟦X++Y⟧=⟦X⟧++⟦Y⟧ X Y = map-++-commute ⟦_⟧ X Y
toIL : ∀ {X A} → X ⊢ A → ⟦ X ⟧ ⊢IL ⟦ A ⟧
toIL var = var
toIL (abs t) = abs (toIL t)
toIL (app {X} {Y} s t) rewrite ⟦X++Y⟧=⟦X⟧++⟦Y⟧ X Y = app (toIL s) (toIL t)
toIL (pair {X} {Y} s t) rewrite ⟦X++Y⟧=⟦X⟧++⟦Y⟧ X Y = pair (toIL s) (toIL t)
toIL (case {X} {Y} s t) rewrite ⟦X++Y⟧=⟦X⟧++⟦Y⟧ X Y = case (toIL s) (toIL t)
toIL (exch {X} {Y} {Z} {W} {A} t) = lem4
where
lem1 : ⟦ (X ++ Z) ++ (Y ++ W) ⟧ ⊢IL ⟦ A ⟧
lem1 = toIL t
lem2 : (⟦ X ⟧ ++ ⟦ Z ⟧) ++ (⟦ Y ⟧ ++ ⟦ W ⟧) ⊢IL ⟦ A ⟧
lem2 rewrite sym (⟦X++Y⟧=⟦X⟧++⟦Y⟧ X Z)
| sym (⟦X++Y⟧=⟦X⟧++⟦Y⟧ Y W)
| sym (⟦X++Y⟧=⟦X⟧++⟦Y⟧ (X ++ Z) (Y ++ W)) = lem1
lem3 : (⟦ X ⟧ ++ ⟦ Y ⟧) ++ (⟦ Z ⟧ ++ ⟦ W ⟧) ⊢IL ⟦ A ⟧
lem3 = exch {⟦ X ⟧} {⟦ Y ⟧} {⟦ Z ⟧} {⟦ W ⟧} lem2
lem4 : ⟦ (X ++ Y) ++ (Z ++ W) ⟧ ⊢IL ⟦ A ⟧
lem4 rewrite ⟦X++Y⟧=⟦X⟧++⟦Y⟧ (X ++ Y) (Z ++ W)
| ⟦X++Y⟧=⟦X⟧++⟦Y⟧ X Y
| ⟦X++Y⟧=⟦X⟧++⟦Y⟧ Z W = lem3
[_] : {A : Type} {X : List Type} → X ⊢ A → (Ctxt ⟦ ⟦ X ⟧ ⟧ → ⟦ ⟦ A ⟧ ⟧)
[_] = reifyIL ∘ toIL
swap′ : {A B : Type} → ⟦ ⟦ A ⟧ ⟧ x' ⟦ ⟦ B ⟧ ⟧ → ⟦ ⟦ B ⟧ ⟧ x' ⟦ ⟦ A ⟧ ⟧
swap′ {A} {B} = [ swap {A} {B} ] ∅
|
-- Example of a recursive type.
data MyNat = Z | S MyNat
-- Example of a union type.
||| Represents shapes.
data Shape = ||| A triangle, with its base length and height
Triangle Double Double
| ||| A rectangle, with its lengths and height
Rectangle Double Double
| ||| A circle, with its radius
Circle Double
area : Shape -> Double
area (Triangle base height) = base * height
area (Rectangle length height) = length * height
area (Circle radius) = pi * radius * radius
data Picture = ||| Uses the Shape type defined as the primitive.
Primitive Shape
| ||| Builds a picture by combining two smaller pictures.
Combine Picture Picture
| ||| Builds a picture by rotating another picture through an angle.
Rotate Double Picture
| ||| Builds a picture by moving to another location.
Translate Double Double Picture
rectangle : Picture
rectangle = Primitive (Rectangle 20 10)
circle : Picture
circle = Primitive (Circle 5)
triangle : Picture
triangle = Primitive (Triangle 10 10)
testPicture : Picture
testPicture = Combine (Translate 5 5 rectangle)
(Combine (Translate 35 5 circle)
(Translate 15 25 triangle))
%name Shape shape, shape1, shape2
%name Picture pic, pic1, pic2
pictureArea : Picture -> Double
pictureArea (Primitive shape) = area shape
pictureArea (Combine pic1 pic2) = pictureArea pic1 + pictureArea pic2
pictureArea (Rotate x pic) = pictureArea pic
pictureArea (Translate x y pic) = pictureArea pic
maybeMax : Ord a => Maybe a -> Maybe a -> Maybe a
maybeMax Nothing Nothing = Nothing
maybeMax Nothing (Just x) = Just x
maybeMax (Just x) Nothing = Just x
maybeMax (Just x) (Just y) = Just $ max x y
biggestTriangle : Picture -> Maybe Double
biggestTriangle (Primitive tri@(Triangle x y)) = Just $ area tri
biggestTriangle (Primitive _) = Nothing
biggestTriangle (Combine pic1 pic2) = maybeMax (biggestTriangle pic1) (biggestTriangle pic2)
biggestTriangle (Rotate x pic) = biggestTriangle pic
biggestTriangle (Translate x y pic) = biggestTriangle pic
|
Formal statement is: lemma degree_add_le_max: "degree (p + q) \<le> max (degree p) (degree q)" Informal statement is: The degree of a sum of two polynomials is less than or equal to the maximum of the degrees of the two polynomials. |
[STATEMENT]
lemma unital_quantale_homset_iff: "f \<in> unital_quantale_homset = (comp_pres f \<and> Sup_pres f \<and> un_pres f)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (f \<in> unital_quantale_homset) = (comp_pres f \<and> Sup_pres f \<and> un_pres f)
[PROOF STEP]
unfolding unital_quantale_homset_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (f \<in> {f. comp_pres f \<and> Sup_pres f \<and> un_pres f}) = (comp_pres f \<and> Sup_pres f \<and> un_pres f)
[PROOF STEP]
by clarsimp |
include("setup.jl")
@testset "Downloads.jl" begin
@testset "API coverage" begin
value = "Julia is great!"
base64 = "SnVsaWEgaXMgZ3JlYXQh"
url = "$server/base64/$base64"
headers = ["Foo" => "Bar"]
# test with one argument
path = Downloads.download(url)
@test isfile(path)
@test value == read(path, String)
rm(path)
# with headers
path = Downloads.download(url, headers=headers)
@test isfile(path)
@test value == read(path, String)
rm(path)
# test with two arguments
arg_writers() do path, output
@arg_test output begin
@test output == Downloads.download(url, output)
end
@test isfile(path)
@test value == read(path, String)
rm(path)
# with headers
@arg_test output begin
@test output == Downloads.download(url, output, headers=headers)
end
@test isfile(path)
@test value == read(path, String)
rm(path)
end
end
@testset "get request" begin
url = "$server/get"
data = download_json(url)
@test "url" in keys(data)
@test data["url"] == url
end
@testset "headers" begin
url = "$server/headers"
@testset "set headers" begin
headers = ["Foo" => "123", "Header" => "VaLuE", "Empty" => ""]
data = download_json(url, headers)
@test "headers" in keys(data)
headers′ = data["headers"]
for (key, value) in headers
@test header(headers′, key) == value
end
@test header(headers′, "Accept") == "*/*"
end
@testset "override default header" begin
headers = ["Accept" => "application/tar"]
data = download_json(url, headers)
@test "headers" in keys(data)
headers′ = data["headers"]
@test header(headers′, "Accept") == "application/tar"
end
@testset "override default header with empty value" begin
headers = ["Accept" => ""]
data = download_json(url, headers)
@test "headers" in keys(data)
headers′ = data["headers"]
@test header(headers′, "Accept") == ""
end
@testset "delete default header" begin
headers = ["Accept" => nothing]
data = download_json(url, headers)
@test "headers" in keys(data)
headers′ = data["headers"]
@test !("Accept" in keys(headers′))
end
end
@testset "errors" begin
@test_throws ArgumentError Downloads.download("ba\0d")
@test_throws ArgumentError Downloads.download("good", "ba\0d")
err = @exception Downloads.download("xyz://invalid")
@test err isa ErrorException
@test startswith(err.msg, "Protocol \"xyz\" not supported")
err = @exception Downloads.download("https://invalid")
@test err isa ErrorException
@test startswith(err.msg, "Could not resolve host")
err = @exception Downloads.download("$server/status/404")
@test err isa ErrorException
@test contains(err.msg, r"^HTTP/\d+(?:\.\d+)?\s+404\b")
path = tempname()
@test_throws ErrorException Downloads.download("$server/status/404", path)
@test !ispath(path)
end
@testset "concurrent requests" begin
delay = 2
count = 100
url = "$server/delay/$delay"
t = @elapsed @sync for id = 1:count
@async begin
data = download_json("$url?id=$id")
@test "args" in keys(data)
@test get(data["args"], "id", nothing) == ["$id"]
end
end
@test t < 0.9*count*delay
end
@testset "referer" begin
dest = "$server/headers"
url = "$server/redirect-to?url=$(url_escape(dest))"
data = download_json(url)
@test "headers" in keys(data)
headers′ = data["headers"]
@test header(headers′, "Referer") == url
end
@testset "request API" begin
@testset "basic request usage" begin
for status in (200, 300, 400)
url = "$server/status/$status"
resp = request_body(multi, url)[1]
@test resp.url == url
@test resp.status == status
test_response_string(resp.response, status)
@test all(hdr isa Pair{String,String} for hdr in resp.headers)
headers = Dict(resp.headers)
@test "content-length" in keys(headers)
end
end
@testset "custom headers" begin
url = "$server/response-headers?FooBar=VaLuE"
resp, data = request_body(multi, url)
@test resp.url == url
@test resp.status == 200
test_response_string(resp.response, 200)
headers = Dict(resp.headers)
@test "foobar" in keys(headers)
@test headers["foobar"] == "VaLuE"
end
@testset "url for redirect" begin
dest = "$server/headers"
url = "$server/redirect-to?url=$(url_escape(dest))"
resp, data = request_json(multi, url)
@test resp.url == dest
@test resp.status == 200
test_response_string(resp.response, 200)
@test "headers" in keys(data)
headers′ = data["headers"]
@test header(headers′, "Referer") == url
end
@testset "progress" begin
# https://httpbingo.org/drip doesn't work
# see https://github.com/mccutchen/go-httpbin/issues/40
# fixed, but their deployed setup is still broken
url = "https://httpbin.org/drip"
progress = Downloads.Curl.Progress[]
req = Request(devnull, url, String[])
Downloads.request(req, multi, p -> push!(progress, p))
unique!(progress)
@test 11 ≤ length(progress) ≤ 12
shift = length(progress) - 10
@test all(p.dl_total == (i==1 ? 0 : 10) for (i, p) in enumerate(progress))
@test all(p.dl_now == max(0, i-shift) for (i, p) in enumerate(progress))
end
end
end
|
module Oscar.Category.Morphism where
open import Oscar.Category.Setoid
open import Oscar.Level
open import Oscar.Property
open import Oscar.Data.Nat
record Morphism
{𝔬} (⋆ : Set 𝔬) 𝔪 𝔮
: Set (𝔬 ⊔ lsuc (𝔪 ⊔ 𝔮))
where
constructor #_
field
_⇒_ : ⋆ → ⋆ → Setoid 𝔪 𝔮
_↦_ : ⋆ → ⋆ → Set 𝔪
_↦_ x y = Setoid.⋆ (x ⇒ y)
infix 4 _≞_
_≞_ : ∀ {x y} → x ↦ y → x ↦ y → Set 𝔮
_≞_ {x} {y} = Setoid._≋_ (x ⇒ y)
instance IsSetoid↦ : ∀ {x y} → IsSetoid (_≞_ {x} {y})
IsSetoid↦ {x} {y} = Setoid.isSetoid (x ⇒ y)
-- IsSetoid↦ : ∀ {x y} → IsSetoid (x ↦ y) 𝔮
-- IsSetoid↦ {x} {y} = Setoid.isSetoid (x ⇒ y)
-- ⦃ isMorphism ⦄ : IsMorphism (λ {x} {y} → _≞_ {x} {y})
-- record Morphism 𝔬 𝔪 𝔮 : Set (lsuc (𝔬 ⊔ 𝔪 ⊔ 𝔮)) where
-- constructor ↑_
-- infix 4 _≞_
-- field
-- {⋆} : Set 𝔬
-- {_↦_} : ⋆ → ⋆ → Set 𝔪
-- _≞_ : ∀ {x y} → x ↦ y → x ↦ y → Set 𝔮
-- ⦃ isSetoid ⦄ : ∀ {x} {y} → IsSetoid (_≞_ {x} {y})
-- instance IsSetoid↦ : ∀ {x y} → IsSetoid (_≞_ {x} {y})
-- IsSetoid↦ {x} {y} = Setoid.isSetoid (x ⇒ y)
-- setoid : ∀ {x y} → Setoid 𝔪 𝔮
-- setoid {x} {y} = ↑ _≞_ {x} {y}
|
[GOAL]
𝕜 : Type u_1
inst✝⁴ : IsROrC 𝕜
n : Type u_2
inst✝³ : LinearOrder n
inst✝² : IsWellOrder n fun x x_1 => x < x_1
inst✝¹ : LocallyFiniteOrderBot n
S : Matrix n n 𝕜
inst✝ : Fintype n
hS : PosDef S
⊢ lowerInv hS = (Basis.toMatrix (Pi.basisFun 𝕜 n) ↑(gramSchmidtBasis (Pi.basisFun 𝕜 n)))ᵀ
[PROOFSTEP]
letI := NormedAddCommGroup.ofMatrix hS.transpose
[GOAL]
𝕜 : Type u_1
inst✝⁴ : IsROrC 𝕜
n : Type u_2
inst✝³ : LinearOrder n
inst✝² : IsWellOrder n fun x x_1 => x < x_1
inst✝¹ : LocallyFiniteOrderBot n
S : Matrix n n 𝕜
inst✝ : Fintype n
hS : PosDef S
this : NormedAddCommGroup (n → 𝕜) := NormedAddCommGroup.ofMatrix (_ : PosDef Sᵀ)
⊢ lowerInv hS = (Basis.toMatrix (Pi.basisFun 𝕜 n) ↑(gramSchmidtBasis (Pi.basisFun 𝕜 n)))ᵀ
[PROOFSTEP]
letI := InnerProductSpace.ofMatrix hS.transpose
[GOAL]
𝕜 : Type u_1
inst✝⁴ : IsROrC 𝕜
n : Type u_2
inst✝³ : LinearOrder n
inst✝² : IsWellOrder n fun x x_1 => x < x_1
inst✝¹ : LocallyFiniteOrderBot n
S : Matrix n n 𝕜
inst✝ : Fintype n
hS : PosDef S
this✝ : NormedAddCommGroup (n → 𝕜) := NormedAddCommGroup.ofMatrix (_ : PosDef Sᵀ)
this : InnerProductSpace 𝕜 (n → 𝕜) := InnerProductSpace.ofMatrix (_ : PosDef Sᵀ)
⊢ lowerInv hS = (Basis.toMatrix (Pi.basisFun 𝕜 n) ↑(gramSchmidtBasis (Pi.basisFun 𝕜 n)))ᵀ
[PROOFSTEP]
ext i j
[GOAL]
case a.h
𝕜 : Type u_1
inst✝⁴ : IsROrC 𝕜
n : Type u_2
inst✝³ : LinearOrder n
inst✝² : IsWellOrder n fun x x_1 => x < x_1
inst✝¹ : LocallyFiniteOrderBot n
S : Matrix n n 𝕜
inst✝ : Fintype n
hS : PosDef S
this✝ : NormedAddCommGroup (n → 𝕜) := NormedAddCommGroup.ofMatrix (_ : PosDef Sᵀ)
this : InnerProductSpace 𝕜 (n → 𝕜) := InnerProductSpace.ofMatrix (_ : PosDef Sᵀ)
i j : n
⊢ lowerInv hS i j = (Basis.toMatrix (Pi.basisFun 𝕜 n) ↑(gramSchmidtBasis (Pi.basisFun 𝕜 n)))ᵀ i j
[PROOFSTEP]
rw [LDL.lowerInv, Basis.coePiBasisFun.toMatrix_eq_transpose, coe_gramSchmidtBasis]
[GOAL]
case a.h
𝕜 : Type u_1
inst✝⁴ : IsROrC 𝕜
n : Type u_2
inst✝³ : LinearOrder n
inst✝² : IsWellOrder n fun x x_1 => x < x_1
inst✝¹ : LocallyFiniteOrderBot n
S : Matrix n n 𝕜
inst✝ : Fintype n
hS : PosDef S
this✝ : NormedAddCommGroup (n → 𝕜) := NormedAddCommGroup.ofMatrix (_ : PosDef Sᵀ)
this : InnerProductSpace 𝕜 (n → 𝕜) := InnerProductSpace.ofMatrix (_ : PosDef Sᵀ)
i j : n
⊢ gramSchmidt 𝕜 (↑(Pi.basisFun 𝕜 n)) i j = (gramSchmidt 𝕜 ↑(Pi.basisFun 𝕜 n))ᵀᵀ i j
[PROOFSTEP]
rfl
[GOAL]
𝕜 : Type u_1
inst✝⁴ : IsROrC 𝕜
n : Type u_2
inst✝³ : LinearOrder n
inst✝² : IsWellOrder n fun x x_1 => x < x_1
inst✝¹ : LocallyFiniteOrderBot n
S : Matrix n n 𝕜
inst✝ : Fintype n
hS : PosDef S
⊢ Invertible (lowerInv hS)
[PROOFSTEP]
rw [LDL.lowerInv_eq_gramSchmidtBasis]
[GOAL]
𝕜 : Type u_1
inst✝⁴ : IsROrC 𝕜
n : Type u_2
inst✝³ : LinearOrder n
inst✝² : IsWellOrder n fun x x_1 => x < x_1
inst✝¹ : LocallyFiniteOrderBot n
S : Matrix n n 𝕜
inst✝ : Fintype n
hS : PosDef S
⊢ Invertible (Basis.toMatrix (Pi.basisFun 𝕜 n) ↑(gramSchmidtBasis (Pi.basisFun 𝕜 n)))ᵀ
[PROOFSTEP]
haveI :=
Basis.invertibleToMatrix (Pi.basisFun 𝕜 n)
(@gramSchmidtBasis 𝕜 (n → 𝕜) _ (_ : _) (InnerProductSpace.ofMatrix hS.transpose) n _ _ _ (Pi.basisFun 𝕜 n))
[GOAL]
𝕜 : Type u_1
inst✝⁴ : IsROrC 𝕜
n : Type u_2
inst✝³ : LinearOrder n
inst✝² : IsWellOrder n fun x x_1 => x < x_1
inst✝¹ : LocallyFiniteOrderBot n
S : Matrix n n 𝕜
inst✝ : Fintype n
hS : PosDef S
this : Invertible (Basis.toMatrix (Pi.basisFun 𝕜 n) ↑(gramSchmidtBasis (Pi.basisFun 𝕜 n)))
⊢ Invertible (Basis.toMatrix (Pi.basisFun 𝕜 n) ↑(gramSchmidtBasis (Pi.basisFun 𝕜 n)))ᵀ
[PROOFSTEP]
infer_instance
[GOAL]
𝕜 : Type u_1
inst✝⁴ : IsROrC 𝕜
n : Type u_2
inst✝³ : LinearOrder n
inst✝² : IsWellOrder n fun x x_1 => x < x_1
inst✝¹ : LocallyFiniteOrderBot n
S : Matrix n n 𝕜
inst✝ : Fintype n
hS : PosDef S
i j : n
hij : i < j
⊢ lowerInv hS i j = 0
[PROOFSTEP]
rw [←
@gramSchmidt_triangular 𝕜 (n → 𝕜) _ (_ : _) (InnerProductSpace.ofMatrix hS.transpose) n _ _ _ i j hij
(Pi.basisFun 𝕜 n),
Pi.basisFun_repr, LDL.lowerInv]
[GOAL]
𝕜 : Type u_1
inst✝⁴ : IsROrC 𝕜
n : Type u_2
inst✝³ : LinearOrder n
inst✝² : IsWellOrder n fun x x_1 => x < x_1
inst✝¹ : LocallyFiniteOrderBot n
S : Matrix n n 𝕜
inst✝ : Fintype n
hS : PosDef S
⊢ diag hS = lowerInv hS * S * (lowerInv hS)ᴴ
[PROOFSTEP]
ext i j
[GOAL]
case a.h
𝕜 : Type u_1
inst✝⁴ : IsROrC 𝕜
n : Type u_2
inst✝³ : LinearOrder n
inst✝² : IsWellOrder n fun x x_1 => x < x_1
inst✝¹ : LocallyFiniteOrderBot n
S : Matrix n n 𝕜
inst✝ : Fintype n
hS : PosDef S
i j : n
⊢ diag hS i j = (lowerInv hS * S * (lowerInv hS)ᴴ) i j
[PROOFSTEP]
by_cases hij : i = j
[GOAL]
case pos
𝕜 : Type u_1
inst✝⁴ : IsROrC 𝕜
n : Type u_2
inst✝³ : LinearOrder n
inst✝² : IsWellOrder n fun x x_1 => x < x_1
inst✝¹ : LocallyFiniteOrderBot n
S : Matrix n n 𝕜
inst✝ : Fintype n
hS : PosDef S
i j : n
hij : i = j
⊢ diag hS i j = (lowerInv hS * S * (lowerInv hS)ᴴ) i j
[PROOFSTEP]
simp only [diag, diagEntries, EuclideanSpace.inner_piLp_equiv_symm, star_star, hij, diagonal_apply_eq, Matrix.mul_assoc]
[GOAL]
case pos
𝕜 : Type u_1
inst✝⁴ : IsROrC 𝕜
n : Type u_2
inst✝³ : LinearOrder n
inst✝² : IsWellOrder n fun x x_1 => x < x_1
inst✝¹ : LocallyFiniteOrderBot n
S : Matrix n n 𝕜
inst✝ : Fintype n
hS : PosDef S
i j : n
hij : i = j
⊢ lowerInv hS j ⬝ᵥ mulVec S (star (lowerInv hS j)) = (lowerInv hS * (S * (lowerInv hS)ᴴ)) j j
[PROOFSTEP]
rfl
[GOAL]
case neg
𝕜 : Type u_1
inst✝⁴ : IsROrC 𝕜
n : Type u_2
inst✝³ : LinearOrder n
inst✝² : IsWellOrder n fun x x_1 => x < x_1
inst✝¹ : LocallyFiniteOrderBot n
S : Matrix n n 𝕜
inst✝ : Fintype n
hS : PosDef S
i j : n
hij : ¬i = j
⊢ diag hS i j = (lowerInv hS * S * (lowerInv hS)ᴴ) i j
[PROOFSTEP]
simp only [LDL.diag, hij, diagonal_apply_ne, Ne.def, not_false_iff, mul_mul_apply]
[GOAL]
case neg
𝕜 : Type u_1
inst✝⁴ : IsROrC 𝕜
n : Type u_2
inst✝³ : LinearOrder n
inst✝² : IsWellOrder n fun x x_1 => x < x_1
inst✝¹ : LocallyFiniteOrderBot n
S : Matrix n n 𝕜
inst✝ : Fintype n
hS : PosDef S
i j : n
hij : ¬i = j
⊢ 0 = lowerInv hS i ⬝ᵥ mulVec S ((lowerInv hS)ᴴᵀ j)
[PROOFSTEP]
rw [conjTranspose, transpose_map, transpose_transpose, dotProduct_mulVec,
(LDL.lowerInv_orthogonal hS fun h : j = i => hij h.symm).symm, ← inner_conj_symm, mulVec_transpose,
EuclideanSpace.inner_piLp_equiv_symm, ← IsROrC.star_def, ← star_dotProduct_star, dotProduct_comm, star_star]
[GOAL]
case neg
𝕜 : Type u_1
inst✝⁴ : IsROrC 𝕜
n : Type u_2
inst✝³ : LinearOrder n
inst✝² : IsWellOrder n fun x x_1 => x < x_1
inst✝¹ : LocallyFiniteOrderBot n
S : Matrix n n 𝕜
inst✝ : Fintype n
hS : PosDef S
i j : n
hij : ¬i = j
⊢ vecMul (lowerInv hS i) S ⬝ᵥ star (lowerInv hS j) = vecMul (lowerInv hS i) S ⬝ᵥ map (lowerInv hS) star j
[PROOFSTEP]
rfl
[GOAL]
𝕜 : Type u_1
inst✝⁴ : IsROrC 𝕜
n : Type u_2
inst✝³ : LinearOrder n
inst✝² : IsWellOrder n fun x x_1 => x < x_1
inst✝¹ : LocallyFiniteOrderBot n
S : Matrix n n 𝕜
inst✝ : Fintype n
hS : PosDef S
⊢ lower hS * diag hS * (lower hS)ᴴ = S
[PROOFSTEP]
rw [LDL.lower, conjTranspose_nonsing_inv, Matrix.mul_assoc,
Matrix.inv_mul_eq_iff_eq_mul_of_invertible (LDL.lowerInv hS), Matrix.mul_inv_eq_iff_eq_mul_of_invertible]
[GOAL]
𝕜 : Type u_1
inst✝⁴ : IsROrC 𝕜
n : Type u_2
inst✝³ : LinearOrder n
inst✝² : IsWellOrder n fun x x_1 => x < x_1
inst✝¹ : LocallyFiniteOrderBot n
S : Matrix n n 𝕜
inst✝ : Fintype n
hS : PosDef S
⊢ diag hS = lowerInv hS * S * (lowerInv hS)ᴴ
[PROOFSTEP]
exact LDL.diag_eq_lowerInv_conj hS
|
The Lakers' Lamar Odom and teammate Ron Artest guard Rockets guard Trevor Ariza during the first half.
Rockets guard Trevor Ariza gets a hug from the Lakers' Kobe Bryant before the start of the game.
The Lakers' Ron Artest guards Rockets guard Trevor Ariza during the first half.
The Lakers' Ron Artest dives for a loose ball as Rockets forward Luis Scola looks on during the first half.
The Lakers' Ron Artest tries to get a hand on the ball as Rockets guard Trevor Ariza makes his way around during the first half.
The Lakers' Ron Artest points at Rockets guard Trevor Ariza during the first half.
The Lakers' Ron Artest looks onto the court from the bench during the first half.
Lakers guard Kobe Bryant guards Rockets forward Trevor Ariza during the first half.
Rockets forward Trevor Ariza goes up for a lay up in traffic during the first half.
Lakers forward Ron Artest defends against Rockets guard Aaron Brooks during the first half.
Lakers guard Kobe Bryant flys to the basket with Rockets forward Shane Battier trailing during the second half.
Rockets forward Luis Scola drives around the Lakers' Luke Walton during the second half.
The Rockets' Aaron Brooks jumps up as the crowd goes wild after one of his three-point shots during the second half.
The Lakers' Ron Artest battles with the Rockets' Chuck Hayes for a loose ball during the second half.
Rockets guard Aaron Brooks drives around the Lakers' Derek Fisher and Lamar Odom during the second half.
Rockets forward Trevor Ariza drives up the court against the Lakers' Lamar Odom during the second half.
The Rockets' Kyle Lowry drives up the court against the Lakers' Derek Fisher during the second half.
The Lakers' Ron Artest with his haircut during the second half.
Lakers guard Kobe Bryant flys to the basket with Rockets forward Louis Scola trailing during the second half.
Ropckets forward Trevor Ariza goes up for a basket during the second half.
The Lakers' Kobe Bryant gets a shot off as the Rockets' Shane Battier tries to defend during the second half.
The Lakers' Kobe Bryant bumps into the Rockets' Shane Battier during the second half.
The Lakers' Kobe Bryant booed by fans during a free throw during the second half.
Lakers forward Ron Artest fights Rockets forward Chuck Hayes for the ball in the fourth quarter.
Lakers forward Ron Artest puts a hand in the face of Rockets forward Carl Landry during the second half.
Rockets guard Trevor Ariza celebrates with Pops Mensah-Bonsu after his three-point shot during the last seconds of the second half.
The Rockets' Trevor Ariza tries to get his hands on a ball held by the Lakers' Ron Artest during the second half. |
% ProgGuide.tex: Sedna Programmer's Guide
% Copyright (C) 2010 ISP RAS
% The Institute for System Programming of the Russian Academy of Sciences
\documentclass[a4paper,12pt]{article}
\usepackage{alltt} % Like verbatim but supports commands inside
\usepackage{theorem}
\newtheorem{note}{Note} % To insert notes
\usepackage{multirow} % Allows inserting tables
\usepackage{ifpdf} % Package for conditionals in TeX
\newcommand{\TocAt}[6]{} % To avoid processing \TocAt by LaTeX
\usepackage[T1]{fontenc}
\title{Sedna Programmer's Guide}
\date{}
% Switch for between PDF and other formats to generate bookmarks,
% pretty table of contents and set document's information in PDF
\ifpdf
\usepackage[colorlinks=true, linkcolor=blue,
citecolor=blue, urlcolor=blue,
pdftex, %%% hyper-references for pdflatex
bookmarks=true, %%% generate bookmarks ...
bookmarksnumbered=true %%% ... with numbers
]{hyperref}
\pdfadjustspacing=1
\hypersetup{
pdfauthor = {Sedna Team},
pdftitle = {Sedna Programmer's Guide}
}
\else
\usepackage[colorlinks=true, linkcolor=blue,
citecolor=blue, urlcolor=blue]{hyperref}
\fi
% Use citemize environment to produce tightly packed lists
\newenvironment{citemize}
{\begin{itemize}
\setlength{\itemsep}{0pt}
\setlength{\parskip}{0pt}
\setlength{\parsep}{0pt}}
{\end{itemize}}
%===============================================================================
% Sedna Programmer's Guide: Client APIs: Java API
%===============================================================================
\begin{document}
\sloppy
\maketitle
\TocAt*{section,subsection,subsubsection}
\TocAt*{subsection,subsubsection}
\tableofcontents
\newpage
\section{Client Application Programming Interfaces}
The Sedna client application programming interfaces (APIs) provides programmatic
access to Sedna from client applications developed in host programming
languages. This section describes the client APIs distributed with Sedna.
\subsection{Java API}
The Java API provides programmatic access to XML data from the Java programming
language. Using the Java API, applications written in the Java can access one or
more databases of the Sedna DBMS and manipulate database data using the database
language described in Section \ref{sec:DBLang}.
Application working with the Sedna DBMS through the Java API operates with the
following notions of the Java API: session, transaction, statement, result.
\subsubsection{Sessions}
To start working with Sedna application has to open a session via establishing
an authenticated connection with the server. The Java API defines the
\verb!SednaConnection! interface to represent a session.
To open a session application uses static method \verb!getConnection! of the
\verb!DatabaseManager! class.
\small{
\begin{verbatim}
SednaConnection getConnection(String url,
String DBName,
String user,
String password)
throws DriverException
\end{verbatim}
}
Parameters:
\verb!url! - the name of the computer where the Sedna DBMS is running. This
parameter may contain a port number. If the port number is not specified, the
default port number (5050) is used.
\verb!DBName! - the name of the database to connect.
\verb!user! - user name.
\verb!password! - user password.
If the connection is established and authentication succeeds the method returns
an object that implements the \verb!SednaConnection! interface. Otherwise,
\verb!DriverException! is thrown.
Application can close session using the \verb!close! method of the
\verb!SednaConnection! interface.
\begin{verbatim}
public void close() throws DriverException
\end{verbatim}
If the server does not manage to close connection properly the \verb!close!
method throws \verb!DriverException!.
The \verb!isClosed! method retrieves whether this connection has been closed or
not. A connection is closed if the method \verb!close! has been called on it or
if certain fatal errors have occurred.
\begin{verbatim}
public boolean isClosed()
\end{verbatim}
Setting connection into debug mode allows getting debug information when XQuery
query fails due to some reason (see \ref{debug-mode} for details). To set the
connection into debug mode use \verb!setDebugMode! method of
\verb!SednaConnection! interface:
\begin{verbatim}
public void setDebugMode(boolean debug)
\end{verbatim}
Parameters:
\verb!debug! - set to \verb!true! to turn debug mode on; set to \verb!false! to
turn debug mode off. Debug mode is off by default. To get debug information
use \verb!getDebugInfo! method of the \verb!DriverException! class. See
\ref{java-example} for example of usage.
Sedna supports \verb!fn:trace! function for debugging purpose (see
\ref{trace} for details). By default trace output is included into XQuery query
result. You can turn trace output on/off using \verb!setTraceOutput! method of
the \verb!SednaConnection! interface:
\begin{verbatim}
public void setTraceOutput(boolean doTrace)
\end{verbatim}
Parameters:
\verb!doTrace! - set to \verb!true! to turn trace output on; set to \verb!false!
to turn trace output off. Trace output is on by default.
To limit transaction execution time use:
\begin{verbatim}
public void setQueryTimeout(int seconds)
\end{verbatim}
If set, for each next query in the session, transaction will be roll-backed if
it runs more than the timeout set. By default (value 0) there is no any timeout
for transaction execution, that is a tranaction can be executed as long as
needed.
%===============================================================================
% Java API: Transactions Management
%===============================================================================
\subsubsection{Transactions Management}
Application can execute queries and updates against specified database only in
the scope of a transaction. That is, once a session has been opened, application
can begin a transaction, execute statements and commit (or rollback) this
transaction.
In the session transactions are processed sequentially. That is, application
must commit a begun transaction before beginning a new one.
To specify transaction boundaries application uses methods of the
\verb!SednaConnection! interface: \verb!begin!, \verb!commit!.
The \verb!begin! method begins a new transaction.
\begin{verbatim}
public void begin() throws DriverException
\end{verbatim}
If the transaction has not begun successfully the \verb!begin! method throws
\verb!DriverException!.
The \verb!commit! method commits a transaction.
\begin{verbatim}
public void commit() throws DriverException
\end{verbatim}
If the transaction has not been committed successfully the \verb!commit! method
throws \verb!DriverException!.
To rollback transaction the \verb!rollback! method is used.
\begin{verbatim}
public void rollback() throws DriverException
\end{verbatim}
If the transaction has not been rollback successfully the \verb!rollback! method
throws \verb!DriverException!.
The Java API does not provide auto-commit mode for transactions. That is, every
transaction must be explicitly started (by means of \verb!begin!) and committed
(by means of \verb!commit!). If session is closed (by means of \verb!close!)
before a transaction committed, server does rollback for that transaction, and
\verb!close! throws \verb!DriverException!.
%===============================================================================
% Java API: Statements
%===============================================================================
\subsubsection{Statements}
The \verb!SednaStatement! interface provides methods for loading documents into
the database, executing statements of the database language defined in Section
\ref{sec:DBLang} and retrieving results that statements produce.
\verb!SednaStatement! object is created using the \verb!createStatement! method
of the \verb!SednaConnection! interface:
\begin{verbatim}
public SednaStatement createStatement()
throws DriverException
\end{verbatim}
\verb!SednaStatement! object may only be created on an open connection.
Otherwise, the \verb!createStatement! method throws \verb!DriverException!.
To load the document into the database use:
\begin{verbatim}
public void loadDocument(InputStream in,
String doc_name)
throws DriverException, IOException
\end{verbatim}
The \verb!in! parameter is an input stream to get the document from. The
\verb!doc_name! parameter is the name for this document in the database.
To load the document into the specified collection of the database use:
\begin{verbatim}
public void loadDocument(InputStream in,
String doc_name,
String col_name)
throws DriverException, IOException
\end{verbatim}
The \verb!in! parameter is an input stream to get the document from. The
\verb!doc_name! parameter is the name for this document in the database. The
\verb!col_name! parameter is the name of the collection to load the document
into.
To execute a statement the \verb!execute! methods of the \verb!SednaStatement!
are used.
\begin{verbatim}
public boolean execute(String queryText)
throws DriverException
\end{verbatim}
The \verb!queryText! parameter contains the text of the statement.
\begin{verbatim}
public boolean execute(InputSteram in)
throws DriverException
\end{verbatim}
The \verb!in! parameter is some input stream to read an XQuery statement from.
Some statements (such as XQuery statements or the retrieve metadata command)
produce the result. In case of such statements, the \verb!execute! methods
return true and the result can be obtained as described in Section
\ref{sec:results}. In case of statements that do not produce the result (such as
updates or bulk load), the \verb!execute! methods return false.
The results of XQuery queries to the Sedna DBMS can be represented either in XML
or SXML~\cite{paper:sxml}. To specify the type of the result use the following
extended versions of the \verb!execute! method:
\begin{verbatim}
boolean execute(InputStream in,
ResultType resultType)
throws DriverException, IOException;
\end{verbatim}
\begin{verbatim}
boolean execute(String queryText,
ResultType resultType)
throws DriverException, IOException;
\end{verbatim}
The \verb!resultType! parameter is either \verb!ResultType.XML! or
\verb!ResultType.SXML!.
%===============================================================================
% Java API: Results
%===============================================================================
\subsubsection{Results}
\label{sec:results}
The \verb!SednaSerializedResult! interface represents query execution result.
Application can obtain result using the \verb!getSerializedResult! method of the
\verb!SednaStatement! interface.
\begin{verbatim}
public SednaSerializedResult getSerializedResult()
\end{verbatim}
The result of the non-XQuery statement evaluation is retrieved as a sequence
with only one item, where the item is a string. For example, in case of the
retrieve descriptive schema command the result is a sequence with an item that
is a descriptive schema represented as a string. The result of the XQuery
statement evaluation is retrieved as a sequence of items, where every item is
represented as a string.
\begin{note}
Current Sedna version supports only sequential statements execution within one
connection. If application executes statement, it cannot iterate over the result
of the previous statement any more.
\end{note}
Application can use the \verb!next! methods of the \verb!SednaSerializedResult!
interface to iterate over the result sequence:
\begin{verbatim}
public String next() throws DriverException
\end{verbatim}
The method returns an item of the result sequence as a string. If the sequence
has ended it returns null. It throws \verb!DriverException! in the case of
error.
\begin{verbatim}
public int next(Writer writer) throws DriverException
\end{verbatim}
The method writes an item of the result sequence to some output stream using
\verb!writer!. It returns 0 if an item was retrieved and written successful, and
1 if the result sequence has ended. It throws \verb!DriverException! in the case
of error.
Both \verb!next()! methods prefetch and cache in memory (as \verb!String!
object) each result item. In some cases results can not be processed in this way
(e.g. query that returns hundred gygabytes document). \verb!SednaStatement!
interface provides method to set custom result handler:
\begin{verbatim}
void setResultInterceptor(ResultInterceptor interceptor)
\end{verbatim}
and the method to remove result interceptor:
\begin{verbatim}
public void resetResultInterceptor()
\end{verbatim}
With a custom result handler application has direct access to the data being
returned by the Sedna server. Application therefore may decide to redirect it
completely to a file, transform, filter, etc. For details and example of the
\verb!ResultInterceptor! see API javadocs.
%===============================================================================
% Java API: Exceptions
%===============================================================================
\subsubsection{Exceptions}
The \verb!DriverException! class provides information about errors that occur
while application works with the Sedna DBMS through the Java API.
\verb!DriverException! is also thrown when application loses its connection with
the server.
%===============================================================================
% Java API: Code Example
%===============================================================================
\subsubsection{Code Example}
\label{java-example}
In this section we provide an example program that uses the Java API to work
with Sedna DBMS. This application connects to the Sedna DBMS, opens a session.
The session consists of one transaction. Application loads data from the file
\verb!region.xml! and executes the XQuery statement. When statement is executed,
application drop the document, commits the transaction and closes the session.
\small{
\begin{verbatim}
import ru.ispras.sedna.driver.*;
class Client {
public static void main(String args[]) {
SednaConnection con = null;
try {
/* Get a connection */
con = DatabaseManager.getConnection("localhost",
"testdb",
"SYSTEM",
"MANAGER");
/* Set session options before the begin() call. To make
* sure that options affect first transaction as well. */
con.setDebugMode(true);
/* Begin a new transaction */
con.begin();
/* Create statement */
SednaStatement st = con.createStatement();
/* Load XML into the database */
System.out.println("Loading data ...");
boolean res;
res = st.execute("LOAD 'C:/region.xml' 'region'");
System.out.println("Document 'region.xml' "+
"has been loaded successfully");
/* Execute query */
System.out.println("Executing query");
res = st.execute("doc('region')/*/*");
/* Print query results */
printQueryResults(st);
/* Remove document */
System.out.println("Removing document ...");
res = st.execute("DROP DOCUMENT 'region'");
System.out.println("Document 'region' " +
"has been dropped successfully");
/* Commit current transaction */
con.commit();
}
catch(DriverException e) {
e.printStackTrace();
System.out.println("Got debug info: " + e.getDebugInfo());
}
finally {
/* Properly close connection */
try { if(con != null) con.close(); }
catch(DriverException e) {
e.printStackTrace();
}
}
}
/* Pretty printing for query results */
private static void printQueryResults(SednaStatement st)
throws DriverException {
int count = 1;
String item;
SednaSerializedResult pr = st.getSerializedResult();
while ((item = pr.next()) != null) {
System.out.println(count + " item: " + item);
count++;
}
}
}
\end{verbatim}}
The full-version source code of this example program can be found at:
\begin{verbatim}
[win:] INSTALL_DIR\examples\api\java\Client.java
[nix:] INSTALL_DIR/examples/api/java/Client.java
\end{verbatim}
where \verb!INSTALL_DIR! refers to the directory where Sedna is installed.
Before running the example make sure that the Sedna DBMS is installed and do the
following steps:
\begin{enumerate}
\item Start Sedna by running the following command:
\begin{verbatim}
se_gov
\end{verbatim}
If Sedna is started successfully it prints "GOVERNOR has been started in
the background mode".
\item Create a new database \verb!testdb! by running the following command:
\begin{verbatim}
se_cdb testdb
\end{verbatim}
If the database is created successfully it prints "The database 'testdb' has
been created successfully".
\item Start the testdb database by running the following command:
\begin{verbatim}
se_sm testdb
\end{verbatim}
If the database is started successfully it prints "SM has been started in
the background mode".
\end{enumerate}
You can compile and run the example following the steps listed below:
\begin{enumerate}
\item To compile the example, run the script:
\begin{verbatim}
[win:] Clientbuild.bat
[nix:] ./Clientbuild.sh
\end{verbatim}
located in the same folder as \verb!Client.java!.
\item To run the compiled example, use the script:
\begin{verbatim}
[win:] Client.bat
[nix:] ./Client.sh
\end{verbatim}
located in the same folder as \verb!Client.java!.
\end{enumerate}
%===============================================================================
% Sedna Programmer's Guide: C API
%===============================================================================
\subsection{C API}
\label{c-api}
libsedna is the C application programmer's interface to Sedna XML DBMS. libsedna
is a set of library functions that allow client programs to access one or more
databases of Sedna XML DBMS and manipulate database data using database language
(XQuery and XUpdate) described in Section 2.
libsedna library is supplied with two header files: \verb!"libsedna.h"!,
\verb!"sp_defs.h"!. Client programs that use libsedna must include the header
file libsedna.h, must link with the libsedna library and provide the compiler
with the path to the directory where \verb!"libsedna.h"!, \verb!sp_defs.h! files
are stored.
For convenience three versions of \verb!libsedna! are provided on the Windows
operating system:
\begin{enumerate}
\item\verb!libsednamt.lib! - static multi-threaded version built with /MT
option. Use it if you compile your project with /MT[d] option.
\item\verb!libsednamd.lib! - static multi-threaded version built with /MD
option. Use it if you compile your project with /MD[d] option.
\item\verb!sednamt.dll! - dynamic version. \verb!sednamt.lib! is import library.
\end{enumerate}
On Unix-like operating systems the following versions of \verb!libsedna! are
provided:
\begin{enumerate}
\item\verb!libsedna.so! - dynamic shared library.
\item\verb!libsedna.a! - static version of the library.
\item\verb!libsedna_pic.a! - static version of the library with PIC enabled. You
may need it to build drivers for Sedna which are based on \verb!libsedna!.
\end{enumerate}
%===============================================================================
% C API: Error Handling
%===============================================================================
\subsubsection{Errors Handling}
C API provides set of functions for sessions and transactions management, query
and update statements execution, etc. If the function fails it returns negative
value. In this case application can obtain the error message and code which help
to understand the reason of the error occurred.
To get the last error message use \verb!SEgetLastErrorMsg! function:
\begin{verbatim}
char* SEgetLastErrorMsg(SednaConnection* conn)
\end{verbatim}
\noindent
\textbf{Parameters:}
\begin{citemize}
\item\verb!conn! - a pointer to an instance of the \verb!SednaConnection! type
(see \ref{c-api-connecting}, Connecting to a Database section for details on
how to obtain a connection instance).
\end{citemize}
The function \verb!SEgetLastErrorCode! returns the last error code occurred in
the session:
\begin{verbatim}
int SEgetLastErrorCode(struct SednaConnection *conn)
\end{verbatim}
\noindent
\textbf{Parameters:}
\begin{citemize}
\item\verb!conn! - a pointer to an instance of the \verb!SednaConnection! type
(see \ref{c-api-connecting}, Connecting to a Database section for details on
how to obtain a connection instance).
\end{citemize}
%===============================================================================
% C API: Connecting to a Database
%===============================================================================
\subsubsection{Connecting to a Database}
\label{c-api-connecting}
Before working with Sedna an application has to declare variable of the
\verb!SednaConnection! type and initialize it in the following manner:
\begin{verbatim}
struct SednaConnection conn = SEDNA_CONNECTION_INITIALIZER;
\end{verbatim}
\begin{note}
The initialization with \verb!SEDNA_CONNECTION_INITIALIZER! is mandatory for
Sedna version 0.5 and earlier.
\end{note}
To start working with Sedna an application has to open a session via
establishing an authenticated connection with the server using \verb!SEconnect!:
\begin{verbatim}
int SEconnect(SednaConnection* conn,
const char* url,
const char* db_name,
const char* login,
const char* password)
\end{verbatim}
\noindent
\textbf{Parameters:}
\begin{citemize}
\item\verb!conn! - is a pointer to an instance of SednaConnection type, that is
associated with a session. The instance of SednaConnection type is initialized
by the SEconnect if the session is open successfully.
\item\verb!url! - the name of the computer where the Sedna DBMS is running. This
parameter may contain a port number. If the port number is not specified, the
default port number (5050) is used.
\item\verb!db_name! - the name of the database to connect to.
\item\verb!login! - user login.
\item\verb!password! - user password.
\end{citemize}
\noindent
\textbf{Return values:}
\medskip
\noindent
If the function succeeds, the return value is positive:
\begin{citemize}
\item\verb!SEDNA_SESSION_OPEN! - connection to the database is established,
authentication passed successfully.
\end{citemize}
\noindent
If the function fails, the return value is negative and session is not opened:
\begin{citemize}
\item\verb!SEDNA_AUTHENTICATION_FAILED! - authentication failed.
\item\verb!SEDNA_OPEN_SESSION_FAILED! - failed to open session.
\item\verb!SEDNA_ERROR! - some error occurred.
\end{citemize}
To access multiple databases at one time or to better process its complex logic
an application can have several sessions open at one time.
When an application finished it's work with the database, it must close the
session. \verb!SEclose! finishes the session and closes the connection to the
server. \verb!SEclose! also frees resources that were equipped by the call to
\verb!SEconnect! function, that is for every successful call to \verb!SEconnect!
there must be a call to \verb!SEclose! in the client program. You must call
\verb!SEclose! both when application finishes its work with the database, and
when application cannot work with the database anymore due to some error.
\begin{verbatim}
int SEclose(SednaConnection* conn)
\end{verbatim}
\noindent
\textbf{Parameters:}
\begin{citemize}
\item\verb!conn! - a pointer to an instance of the SednaConnection type,
associated with a session to be closed.
\end{citemize}
\noindent
\textbf{Return values:}
\medskip
\noindent
If the function succeeds, the return value is positive:
\begin{citemize}
\item\verb!SEDNA_SESSION_CLOSED! - session closed successfully.
\end{citemize}
\noindent
If the function fails, the return value is negative:
\begin{citemize}
\item\verb!SEDNA_CLOSE_SESSION_FAILED! - session closed with errors.
\item\verb!SEDNA_ERROR! - some error occurred.
\end{citemize}
%===============================================================================
% C API: Setting Session Options
%===============================================================================
\subsubsection{Setting Session Options}
\label{session-option-capi}
An application can set attributes that govern aspects of a session using
\verb!SEsetConnectionAttr!:
\begin{verbatim}
int SEsetConnectionAttr(struct SednaConnection *conn,
enum SEattr attr,
const void* attrValue,
int attrValueLength)
\end{verbatim}
\noindent
\textbf{Parameters:}
\begin{citemize}
\item\verb!conn! - a pointer to an instance of the SednaConnection type,
associated with a session to be closed.
\item\verb!attr! - an attribute to set (one of the predefined Sedna connection
attributes listed below).
\item\verb!attrValue! - a pointer to the value to be associated with the
attribute.
\item\verb!attrValueLength! - a length of the value in bytes.
\end{citemize}
\noindent
\textbf{Return values:}
\medskip
\noindent
If the function succeeds, the return value is positive:
\begin{citemize}
\item\verb!SEDNA_SET_ATTRIBUTE_SUCCEEDED! - the attribute was set successfully.
\end{citemize}
\noindent
If the function fails, the return value is negative:
\begin{citemize}
\item\verb!SEDNA_ERROR! - some error occurred.
\end{citemize}
\medskip
\noindent
\textbf{Supported attributes:}
\begin{itemize}
\item\verb!SEDNA_ATTR_AUTOCOMMIT! Autocommit mode is the default transaction
management mode of the Sedna server (\verb!SEDNA_AUTOCOMMIT_OFF! is the default
value of this attribute). Every XQuery or update statement is committed or
rolled back when it completes. If a statement completes successfully, it is
committed; if it encounters any error, it is rolled back. A connection to an
instance of the Sedna database operates in autocommit mode whenever this default
mode has not been overridden by setting this attribute into
\verb!SEDNA_AUTOCOMMIT_OFF! value.
\begin{tabular}{|l|l|}
\hline
\emph{Atrribute values} & \emph{Value size} \\
\hline
\verb!SEDNA_ATTR_AUTOCOMMIT_ON!, & \\
\verb!SEDNA_ATTR_AUTOCOMMIT_OFF! & sizeof(int) \\
\hline
\end{tabular}
\item\verb!SEDNA_ATTR_SESSION_DIRECTORY! connection attribute defines the
\verb!session directory!. If this attribute is set, paths in the \verb!LOAD!
statement \ref{bulk-load} or \verb!LOAD MODULE! are evaluated relative to the
session directory.
\begin{tabular}{|l|l|}
\hline
\emph{Atrribute values} & \emph{Size of value} \\
\hline
\emph{path to directory} & \emph{length of path} \\
\hline
\end{tabular}
\item\verb!SEDNA_ATTR_DEBUG! connection attribute turns on/off query debug
mode. Query debug mode is off by default. \textbf{Note:} \verb!SEDNA_ATTR_DEBUG!
connection attribute must be set only after \verb!SEconnect! has been called on
the \verb!conn!.
\begin{tabular}{|l|l|}
\hline
\emph{Atrribute values} & \emph{Value size} \\
\hline
\verb!SEDNA_ATTR_DEBUG_ON!, & \\
\verb!SEDNA_ATTR_DEBUG_OFF! & sizeof(int) \\
\hline
\end{tabular}
\item\verb!SEDNA_ATTR_CONCURRENCY_TYPE! connection attribute changes the
mode of the next transactions. Transaction can be set to run as \verb!READ-ONLY!
(\verb!SEDNA_READONLY_TRANSACTION!) or \verb!UPDATE!-transaction
(\verb!SEDNA_UPDATE_TRANSACTION!). \verb!READ-ONLY! transactions have one major
benefit: they never wait for other transactions (they do not have to acquire any
document/collection locks). However they might access slightly obsolete state of
the database (for example, they probably would not see the most recent committed
updates). You should use \verb!READ-ONLY! transactions in a highly concurrent
environment. Notice that the current transaction, if any, will be forcefully
committed.
\begin{tabular}{|l|l|}
\hline
\emph{Atrribute values} & \emph{Value size} \\
\hline
\verb!SEDNA_READONLY_TRANSACTION!, & \\
\verb!SEDNA_UPDATE_TRANSACTION! & sizeof(int) \\
\hline
\end{tabular}
\item\verb!SEDNA_ATTR_QUERY_EXEC_TIMEOUT! connection attribute allows to set the
limit on query execution time (transaction execution time if autocommit is
turned off ). If set, for each next query (transaction) in this session,
query execution will be stopped if it lasts longer than timeout set. In this
case transaction in bounds of which the query run is roll-backed. By default
(value 0) there is no any timeout for query (transaction) execution, that is a
query (transaction) can be executed as long as needed.
\begin{tabular}{|l|l|}
\hline
\emph{Atrribute values} & \emph{Value size} \\
\hline
\emph{time in seconds} & sizeof(int) \\
\hline
\end{tabular}
\item\verb!SEDNA_ATTR_MAX_RESULT_SIZE! connection attribute allows to set the
limit on query result size. If this attribute is set, the server will cut the
result data if its size exceeds the specified limit. By default, result data
that is passed from server in response to user query can be of unlimited size.
\begin{tabular}{|l|l|}
\hline
\emph{Atrribute values} & \emph{Value size} \\
\hline
\emph{size in bytes} & sizeof(int) \\
\hline
\end{tabular}
\item\verb!SEDNA_LOG_AMOUNT! connection attribute changes the mode of logical
logging for the following transactions in the same session. Transaction can be
set to run in full log mode (\verb!SEDNA_LOG_FULL!) or reduced log mode
(\verb!SEDNA_LOG_LESS!). The former means transaction writes much less log info
during bulk loads. Also, when such transaction commits the checkpoint is made,
which might greatly reduce recovery time. There is a caveat, however: such
transaction always runs in exclusive mode, which means there can be no
concurrent transactions. Before it starts it waits for other concurrent
transactions to finish. In turn, all other transactions will not start until
exlusive transaction finishes. You should use this option with care, since it
effectively stalls any concurrent activity. The main purpose of such
transactions is to bulk-load data. The other possible use-case includes
transactions performing heavy update operations. Since checkpoint will be made
when such transaction commits, it might reduce recovery time in case of database
crash. Otherwise, you should not use this option since you will not gain
anything. Notice also that the current transaction in the same session, if any,
will be forcefully committed. The default value for this attribute is
\verb!SEDNA_LOG_FULL!.
\begin{tabular}{|l|l|}
\hline
\emph{Atrribute values} & \emph{Value size} \\
\hline
\verb!SEDNA_LOG_LESS!, & \\
\verb!SEDNA_LOG_FULL! & sizeof(int) \\
\hline
\end{tabular}
\item\verb!SEDNA_ATTR_BOUNDARY_SPACE_PRESERVE_WHILE_LOAD! controls whether
boundary whitespace is preserved in document during bulk load. By default Sedna
discards boundary whitespace (\verb!SEDNA_BOUNDARY_SPACE_PRESERVE_OFF!).
Value of this option influences \verb!SEloadData! function behaviour only.
\begin{tabular}{|l|l|}
\hline
\emph{Atrribute values} & \emph{Value size} \\
\hline
\verb!SEDNA_BOUNDARY_SPACE_PRESERVE_ON!, & \\
\verb!SEDNA_BOUNDARY_SPACE_PRESERVE_OFF! & sizeof(int) \\
\hline
\end{tabular}
\item\verb!SEDNA_ATTR_CDATA_PRESERVE_WHILE_LOAD! controls whether CDATA is
preserved in document during bulk load. By default Sedna discards CDATA
(\verb!SEDNA_CDATA_PRESERVE_OFF!). Value of this option influences
\verb!SEloadData! function behaviour only.
\begin{tabular}{|l|l|}
\hline
\emph{Atrribute values} & \emph{Value size} \\
\hline
\verb!SEDNA_CDATA_PRESERVE_ON!, & \\
\verb!SEDNA_CDATA_PRESERVE_OFF! & sizeof(int) \\
\hline
\end{tabular}
\end{itemize}
An application can retrieve current value of the connection attributes using
\verb!SEgetConnectionAttr!:
\begin{verbatim}
int SEgetConnectionAttr(struct SednaConnection *conn,
enum SEattr attr,
void* attrValue,
int* attrValueLength);
\end{verbatim}
\noindent
\textbf{Parameters:}
\begin{citemize}
\item\verb!conn! - a pointer to an instance of the \verb!SednaConnection! type,
associated with a session to be closed.
\item\verb!attr! - an attribute to retrieve.
\item\verb!attrValue! - a pointer to memory in which to return the current value
of the attribute specified by \verb!attr!.
\item\verb!attrValueLength! - a length of the retrieved value in bytes.
\end{citemize}
\noindent
\textbf{Return values:}
\medskip
\noindent
If the function succeeds, the return value is positive:
\begin{citemize}
\item\verb!SEDNA_GET_ATTRIBUTE_SUCCEEDED! - the attribute was retrieved
successfully.
\end{citemize}
\noindent
If the function fails, the return value is negative:
\begin{citemize}
\item\verb!SEDNA_ERROR! - some error occurred.
\end{citemize}
To reset all connection attributes to default values use:
\begin{verbatim}
int SEresetAllConnectionAttr(struct SednaConnection *conn);
\end{verbatim}
\noindent
\textbf{Parameters:}
\begin{citemize}
\item\verb!conn! - a pointer to an instance of the \verb!SednaConnection! type,
associated with a session to be closed.
\end{citemize}
\noindent
\textbf{Return values:}
\medskip
\noindent
If the function succeeds, the return value is positive:
\begin{citemize}
\item\verb!SEDNA_RESET_ATTRIBUTES_SUCCEEDED! - attributes has been reset
successfully.
\end{citemize}
\noindent
If the function fails, the return value is negative:
\begin{citemize}
\item\verb!SEDNA_ERROR! - some error occurred.
\end{citemize}
%===============================================================================
% C API: Transactions Management
%===============================================================================
\subsubsection{Transactions Management}
An application can execute queries and updates against the specified database
only in the scope of a transaction. That is, once a session has been opened, an
application can begin a transaction, execute statements and commit this
transaction. In a session transactions are processed sequentially. That is,
application must commit an ongoing transaction before beginning a new one.
There are two ways to manage transactions in Sedna sessions: \emph{autocommit
mode} and \emph{manual-commit mode}:
\begin{itemize}
\item\textbf{Autocommit mode}. Each individual statement is committed when it
completes successfully. When running in autocommit mode no other transaction
management functions are needed. By default, Sedna sessions are run in
autocommit mode.
\item\textbf{Manual-commit mode}. Transaction boundaries are specified
explicitly by means of \verb!SEbegin!, \verb!SEcommit! and \verb!SErollback!
functions. All statements between the call to \verb!SEbegin! and
\verb!SEcommit!/\verb!SErollback! are included in the same transaction.
\end{itemize}
An application can switch between the two modes using \verb!SEsetConnectionAttr!
and \verb!SEgetConnectionAttr! functions (see \ref{session-option-capi}) for
\verb!SEDNA_ATTR_AUTOCOMMIT! attribute.
To specify transaction boundaries application uses \verb!SEbegin!,
\verb!SEcommit! and \verb!SErollback! functions. \verb!SEbegin! function starts
new transaction in the provided session:
\begin{verbatim}
int SEbegin(SednaConnection* conn)
\end{verbatim}
\noindent
\textbf{Parameters:}
\begin{citemize}
\item\verb!conn! - a pointer to an instance of the \verb!SednaConnection! type.
\end{citemize}
\noindent
\textbf{Return values:}
\medskip
\noindent
If the function succeeds, the return value is positive:
\begin{citemize}
\item\verb!SEDNA_BEGIN_TRANSACTION_SUCCEEDED! - transaction has been
successfully started.
\end{citemize}
\noindent
If the function fails, the return value is negative:
\begin{citemize}
\item\verb!SEDNA_BEGIN_TRANSACTION_FAILED! - failed to start a transaction.
\item\verb!SEDNA_ERROR! - some error occurred.
\end{citemize}
\verb!SEcommit! function commits the current transaction:
\begin{verbatim}
int SEcommit(SednaConnection* conn)
\end{verbatim}
\noindent
\textbf{Parameters:}
\begin{citemize}
\item\verb!conn! - a pointer to an instance of the \verb!SednaConnection! type.
\end{citemize}
\noindent
\textbf{Return values:}
\medskip
\noindent
If the function succeeds, the return value is positive:
\begin{citemize}
\item\verb!SEDNA_COMMIT_TRANSACTION_SUCCEEDED! - transaction has been committed.
\end{citemize}
\noindent
If the function fails, the return value is negative:
\begin{citemize}
\item\verb!SEDNA_COMMIT_TRANSACTION_FAILED! - failed to commit transaction.
\item\verb!SEDNA_ERROR! - some error occurred.
\end{citemize}
\verb!SErollback! function rollbacks the current transaction:
\begin{verbatim}
int SErollback(SednaCommection* conn)
\end{verbatim}
\noindent
\textbf{Parameters:}
\begin{citemize}
\item\verb!conn! - a pointer to an instance of the \verb!SednaConnection! type.
\end{citemize}
\noindent
\textbf{Return values:}
\medskip
\noindent
If the function succeeds, the return value is positive:
\begin{citemize}
\item\verb!SEDNA_ROLLBACK_TRANSACTION_SUCCEEDED! - transaction has been
rollbacked.
\end{citemize}
\noindent
If the function fails, the return value is negative:
\begin{citemize}
\item\verb!SEDNA_ROLLBACK_TRANSACTION_FAILED! - failed to rollback transaction.
\item\verb!SEDNA_ERROR! - some error occurred.
\end{citemize}
%===============================================================================
% C API: Getting connection and transaction status
%===============================================================================
\subsubsection{Getting Connection and Transaction Status}
An application can obtain the connection status by \verb!SEconnectionStatus!
function:
\begin{verbatim}
int SEconnectionStatus(SednaConnection* conn)
\end{verbatim}
\noindent
\textbf{Parameters:}
\begin{citemize}
\item\verb!conn! - a pointer to an instance of the \verb!SednaConnection! type.
\end{citemize}
\noindent
\textbf{Return values:}
\medskip
\begin{citemize}
\item\verb!SEDNA_CONNECTION_OK! - specified connection is open and functions ok.
\item\verb!SEDNA_CONNECTION_CLOSED! - specified connection is closed. This could
be either after the call to \verb!SEclose! function, or before the call to
\verb!SEconnect! function.
\item\verb!SEDNA_CONNECTION_FAILED! - specified connection has been failed.
(\textbf{Note:} in this case you should call \verb!SEclose! function to release
resources).
\end{citemize}
An application may obtain the transaction status by \verb!SEtransactionStatus!
function:
\begin{verbatim}
int SEtransactionStatus(SednaConnection* conn)
\end{verbatim}
\noindent
\textbf{Parameters:}
\begin{citemize}
\item\verb!conn! - a pointer to an instance of the \verb!SednaConnection! type.
\end{citemize}
\noindent
\textbf{Return values:}
\begin{citemize}
\item\verb!SEDNA_TRANSACTION_ACTIVE! - specified connection runs transaction.
\item\verb!SEDNA_NO_TRANSACTION! - specified connection does not run
transaction. This could be for example when previous transaction has been
committed and a new one has not begun yet.
\end{citemize}
%===============================================================================
% C API: Executing Queries and Updates
%===============================================================================
\subsubsection{Executing Queries and Updates}
\label{exec-capi}
There are two functions to execute a statement (query or update):
\verb!SEexecute! function and \verb!SEexecuteLong! function. First one reads
statement from a C-string, the second reads long statement from a provided file.
To get trace (\verb!fn:trace! XQuery function) and debug information application
may implement custom debug handler and set it using function:
\verb!SEsetDebugHandler!.
\begin{verbatim}
int SEexecute(SednaConnection* conn, const char* query)
\end{verbatim}
\noindent
\textbf{Parameters:}
\begin{citemize}
\item\verb!conn! - a pointer to an instance of the \verb!SednaConnection! type.
\item\verb!query! - a null-terminated string with an XQuery or XUpdate
statement.
\end{citemize}
\noindent
\textbf{Return values:}
\medskip
\noindent
If the function succeeds, the return value is positive:
\begin{citemize}
\item\verb!SEDNA_QUERY_SUCCEEDED! - specified query succeeded and result data
can be retrieved.
\item\verb!SEDNA_UPDATE_SUCCEEDED! - specified update succeeded.
\item\verb!SEDNA_BULK_LOAD_SUCCEEDED! - specified update (bulk load
\ref{bulk-load}) succeeded.
\end{citemize}
\noindent
If the function fails, the return value is negative:
\begin{citemize}
\item\verb!SEDNA_QUERY_FAILED! - specified query failed.
\item\verb!SEDNA_UPDATE_FAILED! - specified update failed.
\item\verb!SEDNA_BULK_LOAD_FAILED! - bulk load failed.
\item\verb!SEDNA_ERROR! - some error occurred.
\end{citemize}
If the statement is really long, and you prefer to pass it to the Sedna directly
from a file use \verb!SEexecuteLong! function.
\begin{verbatim}
int SEexecuteLong(SednaConnection* conn,
const char* query_file_path)
\end{verbatim}
\noindent
\textbf{Parameters:}
\begin{citemize}
\item\verb!conn! - a pointer to an instance of the \verb!SednaConnection! type.
\item\verb!query_file! - a path to the file with a statement to execute.
\end{citemize}
\noindent
\textbf{Return values:}
\medskip
\noindent
If the function succeeds, the return value is positive:
\begin{citemize}
\item\verb!SEDNA_QUERY_SUCCEEDED! - specified query succeeded and result data
can be retrieved.
\item\verb!SEDNA_UPDATE_SUCCEEDED! - specified update succeeded.
\item\verb!SEDNA_BULK_LOAD_SUCCEEDED! - specified update (bulk load
\ref{bulk-load}) succeeded.
\end{citemize}
\noindent
If the function fails, the return value is negative:
\begin{citemize}
\item\verb!SEDNA_QUERY_FAILED! - specified query failed.
\item\verb!SEDNA_UPDATE_FAILED! - specified update failed.
\item\verb!SEDNA_BULK_LOAD_FAILED! - bulk load failed.
\item\verb!SEDNA_ERROR! - some error occurred.
\end{citemize}
If \verb!SEexecute! function or \verb!SEexecuteLong! function return
\verb!SEDNA_QUERY_SUCCEEDED!, the result data can be retrieved. The result of
XQuery query evaluation is a sequence of items, where every item is represented
as a string. Use the \verb!SEnext! function to iterate over the sequence and
\verb!SEgetData! function to retrieve the current item of the sequence.
\begin{verbatim}
int SEnext(SednaConnection* conn)
\end{verbatim}
\noindent
\textbf{Parameters:}
\begin{citemize}
\item\verb!conn! - a pointer to an instance of the \verb!SednaConnection! type.
\end{citemize}
\noindent
\textbf{Return values:}
\medskip
\noindent
If the function succeeds, the return value is positive:
\begin{citemize}
\item\verb!SEDNA_NEXT_ITEM_SUCCEEDED! - moving to the next item succeeded, and
the item can be retrieved.
\end{citemize}
\noindent
If the function fails or the is no result items available to retrieve, the
return value is negative:
\begin{citemize}
\item\verb!SEDNA_NEXT_ITEM_FAILED! - failed to get next item.
\item\verb!SEDNA_RESULT_END! - the result sequence is ended, no result data to
retrieve.
\item\verb!SEDNA_NO_ITEM! - there was no succeeded query that produced the
result data, no result data to retrieve.
\item\verb!SEDNA_ERROR! - some error occurred.
\end{citemize}
\verb!SEgetData! function retrieves current item from the result sequence:
\begin{verbatim}
int SEgetData(SednaConnection* conn,
char* buf,
int bytes_to_read)
\end{verbatim}
\noindent
\textbf{Parameters:}
\begin{citemize}
\item\verb!conn! - a pointer to an instance of the \verb!SednaConnection! type.
\item\verb!buf! - pointer to the buffer that receives the data got from the
server.
\item\verb!bytes_to_read! - number of bytes to be read from the server into the
buffer.
\end{citemize}
\noindent
\textbf{Return values:}
\medskip
\noindent
If the function succeeds, the return value is non-negative:
\begin{citemize}
\item\verb!number of bytes! actually read from the server and put into the
buffer.
\item\verb!zero! - no data was read from the server and put into the buffer
because of the item end. (use \verb!SEnext! to move to the next item of the
result).
\end{citemize}
\noindent
If the function fails, the return value is negative:
\begin{citemize}
\item\verb!SEDNA_GET_DATA_FAILED! - failed to get data.
\item\verb!SEDNA_ERROR! - some error occurred.
\end{citemize}
Since version 1.5 Sedna supports reporting tracing information (\verb!fn:trace!
XQuery function). To handle tracing information while retrieving result data use
debug handler \verb!debug_handler_t! and \verb!SEsetDebugHandler! function:
\begin{verbatim}
void SEsetDebugHandler(struct SednaConnection *conn,
debug_handler_t debug_handler)
\end{verbatim}
\noindent
\textbf{Parameters:}
\begin{citemize}
\item\verb!conn! - a pointer to an instance of the \verb!SednaConnection! type.
\item\verb!debug_handler! - a pointer to your own defined function of the
following type: \verb!void (*debug_handler_t)(int subtype, const char *msg)!
where \verb!subtype! is a type of debug information (currently only
\verb!SEDNA_QUERY_TRACE_INFO! is supported), \verb!msg! is a buffer with debug
information.
\end{citemize}
For example the following debug handler prints out debug iformation to the
stdout:
\small{
\begin{verbatim}
void my_debug_handler(enum SEdebugType subtype,
const char *msg) {
printf("TRACE: ");
printf("subtype(%d), msg: %s\n", subtype, msg);
}
\end{verbatim}}
If the debug handler is not defined by the application, trace information is
ignored.
%===============================================================================
% C API: Loading Data
%===============================================================================
\subsubsection{Loading Data}
XML data can be loaded into a database using \verb!"LOAD"! statement of the Data
Manipulation Language (see \ref{bulk-load}). Besides, libsedna library provides
\verb!SEloadData! and \verb!SEendLoadData! functions to load well-formed XML
documents divided into parts of any convenient size.
\verb!SEloadData! functions loads a chunk of an XML document:
\begin{verbatim}
int SEloadData(SednaConnection* conn,
const char* buf,
int bytes_to_load,
const char* doc_name,
const char* col_name)
\end{verbatim}
\noindent
\textbf{Parameters:}
\begin{citemize}
\item\verb!conn! - a pointer to an instance of the \verb!SednaConnection! type.
\item\verb!buf! - a buffer with chunk of an XML document to load.
\item\verb!bytes_to_load! - number of bytes to load.
\item\verb!doc_name! - name of the document in a database the data loads to.
\item\verb!col_name! - name of the collection in the case if document is loaded
into the collection, \verb!NULL! if document is loaded as a standalone one.
\end{citemize}
\noindent
\textbf{Return values:}
\medskip
\noindent
If the function succeeds, the return value is positive:
\begin{citemize}
\item\verb!SEDNA_DATA_CHUNK_LOADED! - chunk of an XML document loaded
successfully.
\end{citemize}
\noindent
If the function fails, the return value is negative:
\begin{citemize}
\item\verb!SEDNA_ERROR! - some error occurred. Data is not loaded.
\end{citemize}
When the whole document is loaded using \verb!SEloadData!, application must use
\verb!SEendLoadData! to notify server that transfer of an XML document is
finished:
\begin{verbatim}
int SEendLoadData(SednaConnection* conn)
\end{verbatim}
\noindent
\textbf{Parameters:}
\begin{citemize}
\item\verb!conn! - a pointer to an instance of the \verb!SednaConnection! type.
\end{citemize}
\noindent
\textbf{Return values:}
\medskip
\noindent
If the function succeeds, the return value is positive:
\begin{citemize}
\item\verb!SEDNA_BULK_LOAD_SUCCEEDED! - XML document was successfully loaded
into the database.
\end{citemize}
\noindent
If the function fails, the return value is negative:
\begin{citemize}
\item\verb!SEDNA_BULK_LOAD_FAILED! - failed to load XML document into the
database.
\item\verb!SEDNA_ERROR! - some error occurred.
\end{citemize}
%===============================================================================
% C API: Example Code
%===============================================================================
\subsubsection{Example Code}
\small{
\begin{verbatim}
#include "libsedna.h"
#include "stdio.h"
int handle_error(SednaConnection* conn,
const char* op,
int close_connection) {
printf("%s failed: \n%s\n", op, SEgetLastErrorMsg(conn));
if(close_connection == 1) SEclose(conn);
return -1;
}
int main() {
struct SednaConnection conn = SEDNA_CONNECTION_INITIALIZER;
int bytes_read, res, value;
char buf[1024];
/* Turn off autocommit mode */
value = SEDNA_AUTOCOMMIT_OFF;
res = SEsetConnectionAttr(&conn, SEDNA_ATTR_AUTOCOMMIT,
(void*)&value, sizeof(int));
/* Connect to the database */
res = SEconnect(&conn, "localhost", "test_db",
"SYSTEM", "MANAGER");
if(res != SEDNA_SESSION_OPEN)
return handle_error(&conn, "Connection", 0);
/* Begin a new transaction */
res = SEbegin(&conn);
if(res != SEDNA_BEGIN_TRANSACTION_SUCCEEDED)
return handle_error(&conn, "Transaction begin", 1);
/* Load file "region.xml" into the document "region" */
res = SEexecute(&conn, "LOAD 'region.xml' 'region'");
if(res != SEDNA_BULK_LOAD_SUCCEEDED)
return handle_error(&conn, "Bulk load", 1);
/* Execute XQuery statement */
res = SEexecute(&conn, "doc('region')/*/*");
if(res != SEDNA_QUERY_SUCCEEDED)
return handle_error(&conn, "Query", 1);
/* Iterate and print the result sequence */
while((res = SEnext(&conn)) != SEDNA_RESULT_END) {
if (res == SEDNA_ERROR)
return handle_error(&conn, "Getting item", 1);
do {
bytes_read = SEgetData(&conn, buf, sizeof(buf) - 1);
if(bytes_read == SEDNA_ERROR)
return handle_error(&conn, "Getting item", 1);
buf[bytes_read] = '\0';
printf("%s\n", buf);
} while(bytes_read > 0);
}
/* Drop document "region" */
res = SEexecute(&conn, "DROP DOCUMENT 'region'");
if(res != SEDNA_UPDATE_SUCCEEDED)
return handle_error(&conn, "Drop document", 1);
/* Commit transaction */
res = SEcommit(&conn);
if(res != SEDNA_COMMIT_TRANSACTION_SUCCEEDED)
return handle_error(&conn, "Commit", 1);
/* Close connection */
res = SEclose(&conn);
if(res != SEDNA_SESSION_CLOSED)
return handle_error(&conn, "Close", 0);
return 0;
}
\end{verbatim}}
The full version of this example program can be found in:
\begin{verbatim}
[win:] INSTALL_DIR\examples\api\c\Client.c
[nix:] INSTALL_DIR/examples/api/c/Client.c
\end{verbatim}
Here \verb!INSTALL_DIR! refers to the directory where Sedna is installed.
Before running the example make sure that the Sedna DBMS is installed and do the
following steps:
\begin{enumerate}
\item Start Sedna by running the following command:
\begin{verbatim}
se_gov
\end{verbatim}
If Sedna is started successfully it prints "GOVERNOR has been started in
the background mode".
\item Create a new database \verb!testdb! by running the following command:
\begin{verbatim}
se_cdb testdb
\end{verbatim}
If the database is created successfully it prints "The database 'testdb' has
been created successfully".
\item Start the testdb database by running the following command:
\begin{verbatim}
se_sm testdb
\end{verbatim}
If the database is started successfully it prints "SM has been started in
the background mode".
\end{enumerate}
You can compile and run the example by following the steps listed below:
\begin{enumerate}
\item To compile the example use:
\begin{verbatim}
[win:] Clientbuild.bat
[nix:] ./Clientbuild.sh
\end{verbatim}
located in the same folder as \verb!Client.c!.
\item To run the compiled example use:
\begin{verbatim}
[win:] Client.exe
[nix:] ./Client
\end{verbatim}
located in the same folder as \verb!Client.c!.
\end{enumerate}
%===============================================================================
% Scheme API: Sessions
%===============================================================================
\subsection{Scheme API}
Scheme API allows querying and managing XML data from an application written in
Scheme. Scheme API follows the spirit of SchemeQL~\cite{paper:scheme-ql}, an
implementation of SQL 1992 for Scheme. The results of XQuery statements to the
Sedna DBMS via the Scheme API can be represented either in XML or
SXML~\cite{paper:sxml}.
\subsubsection{Sessions}
For working with the Sedna DBMS from Scheme, you should first establish a
connection between the Scheme API driver and the Sedna DBMS. Here are two
functions to manage connections:
\begin{verbatim}
> (sedna:connect-to-database host db-name user password)
: String String String String -> connection-object
\end{verbatim}
Establishes a connection between the client application and the Sedna DBMS.
Returns a 'connection' object which encapsulate information about the
connection.
The arguments are strings that denote connection parameters:
\begin{citemize}
\item \texttt{host} is the host where the Sedna DBMS is running. If it is on the
same machine as your Scheme application, you can use \texttt{"localhost"} for
the value of this parameter.
\item \texttt{db-name} is the name of the database you want to work with. You
are to establish a separate connection for each database you would like to work
with.
\item \texttt{user} is your user name for the session. You can use
\texttt{"SYSTEM"} for the value of this parameter.
\item \texttt{password} is your password for the session. You can use the
\texttt{"MANAGER"} value of this parameter.
\end{citemize}
To disconnect from the database, you can use the following function:
\begin{verbatim}
> (sedna:disconnect-from-database connection)
: connection-object -> void
\end{verbatim}
Closes the connection represented by the \texttt{connection} object. If server
fails to close the connection, the function closes the connection forcibly from
the client side and raises the exception, as discussed in subsubsection
\ref{scheme-errors}.
%===============================================================================
% Scheme API: Manage Transactions
%===============================================================================
\subsubsection{Manage Transactions}
After the connection with a database is established and the session is begun,
you can run zero or more transactions in this session. Transactions are to be
run sequentially, with no more than a single transaction at a time, so you
should commit your running transaction before starting a new one.
To begin a new transaction, the following function is provided:
\begin{verbatim}
> (sedna:begin-transaction connection)
: connection-object -> void
\end{verbatim}
It accepts the \texttt{connection} object (earlier created by
\texttt{sedna:connect-to-database} function) and starts a new transaction. If
the transaction could not be created, the exception is raised, as discussed in
subsubsection \ref{scheme-errors}.
To end your running transaction, you are provided with the following function:
\begin{verbatim}
> (sedna:end-transaction connection action)
: connection-object, symbol -> void
\end{verbatim}
If \texttt{action} is \texttt{'COMMIT} the transaction in the given connection
will be committed, if \texttt{'ROLLBACK} is given, the transaction will be
rolled back.
%===============================================================================
% Scheme API: Executing Queries and Updates
%===============================================================================
\subsubsection{Executing Queries and Updates}
Within a transaction, you can execute zero or more queries to the database.
\begin{verbatim}
> (sedna:execute-query connection query)
: connection-object, string -> result
\end{verbatim}
The first argument is the \texttt{connection} object, earlier created by
\texttt{sedna:connect-to-database} function. The \texttt{query} is represented
as a string and can express one of the following kinds of statements:
\begin{citemize}
\item \textbf{XQuery statement} -- for querying data from the database, without
modifying it;
\item \textbf{Update statement} -- for making modifications to the database you
work with;
\item \textbf{Bulk load command} -- for loading a new XML document from the
local file to the database;
\item \textbf{Database management statement} -- to create index, trigger,
retrieve metadata, etc.
\end{citemize}
If an error occurs at the server side during query execution (e.g.\ the
requested document not found), the function raises an exception that contains
the message about the error occurred.
In the successful case of query execution, \texttt{sedna:execute-query} returns
\texttt{\#t} for the last 3 kinds of queries, to denote a successful update made
to the database. The XQuery \texttt{query} results to a sequence of items, which
are evaluated lazily and are represented as a pair:
\begin{verbatim}
xquery-result ::= (cons current-item promise)
\end{verbatim}
\noindent
This way of result representation is very close to the notion of SchemeQL {\em
cursor}~\cite{paper:scheme-ql} (with the only difference in that the Scheme API
driver returns XQuery items instead of table rows returned by SchemeQL). The
first member of the pair is the \texttt{current-item} represented in SXML, and
the second member of the pair holds a promise (which can be forced) to evaluate
and return the next item in the result sequence.
To iterate over the result sequence, you can use the function:
\begin{verbatim}
> (sedna:next xquery-result)
: xquery-result -> xquery-result or '()
\end{verbatim}
\noindent
which forces the evaluation of the following items in the result sequence, until
the end of the sequence is reached.
Such design allows you to process a query result in a lazy stream-wise fashion
and provides you with an ability to process large query results, which would not
otherwise fit in the main memory.
However, for query results that are not very large, you may find it convenient
to evaluate them all at once and represent the result sequence as a Scheme list.
Scheme API provides a function that converts the \texttt{xquery-result} into the
list that contains all items of the result sequence:
\begin{verbatim}
> (sedna:result->list xquery-result)
: xquery-result -> (listof item)
\end{verbatim}
\noindent
To obtain the result sequence in the form of the list, you can execute your
queries as a superposition of the above considered functions:
\begin{verbatim}
(sedna:result->list
(sedna:execute-query connection query))
\end{verbatim}
\noindent
It should be noted that the XQuery statement result in that case exactly
corresponds to the term of a {\em node-set} in the XPath implementation in
Scheme SXPath~\cite{paper:sxpath}.
If you want to obtain your query results in XML instead of SXML, you can use
the function:
\begin{verbatim}
> (sedna:execute-query-xml connection query)
: connection-object, string -> result
\end{verbatim}
\noindent
It is the counterpart of earlier discussed \texttt{sedna:execute-query} and
has the same signature, but represents query results in XML.
The function returns a sequence of items, in the form of \texttt{xquery-result}
discussed above, but the \texttt{current-item} is now a string containing the
representation for the current item in the form of XML.
%===============================================================================
% Scheme API: Bulk Load from Stream
%===============================================================================
\subsubsection{Bulk Load from Stream}
The following wrapper function provides a natural way to load an input stream
containing an XML document into your database:
\begin{verbatim}
> (sedna:bulk-load-from-xml-stream
connection port document-name . collection-name)
: connection-object, input-port,
string [, collection-name] -> boolean
\end{verbatim}
As for \texttt{sedna:execute-query}, the first argument here is the
\texttt{connection} object, earlier created by
\texttt{sedna:connect-to-database} function. Argument \texttt{port} is a Scheme
input port and is supposed to contain a well-formed XML document. Argument
\texttt{document-name} specifies the name that will be given to the XML document
within a database. If the $4$-th argument \texttt{collection-name} is supplied,
the XML document is loaded into the collection which name is specified by the
\texttt{collection-name} argument. If the $4$-th argument of the function call
is not supplied, the XML document is loaded into the database as a standalone
document.
By allowing you to specify the input port you would like to use, this function
provides a higher-level shortcut for \texttt{sedna:execute-query} when bulk load
from stream is concerned. For more details on bulk load, see section
\ref{bulk-load}.
%===============================================================================
% Scheme API: Higher-level Function for a Transaction
%===============================================================================
\subsubsection{Higher-level Function for a Transaction}
This higher-level function provides a convenient way for executing a transaction
consisting of several queries, within a single function call:
\begin{verbatim}
> (sedna:transaction connection . queries)
: connection-object, string* -> result
\end{verbatim}
This function starts a new transaction on the \texttt{connection} objects and
executes all the \texttt{queries} given in order. If no exception occurs, the
function commits the transaction and returns the result of the last query. If
any exception occurred during query execution, the function sends rollback to
the Sedna DBMS and passes along the exception to the application.
%===============================================================================
% Scheme API: Executing Queries and Updates
%===============================================================================
\subsubsection{Error handling}
\label{scheme-errors}
Error handling in the Scheme API driver is based on the exception mechanism
suggested in the (currently withdrawn) SRFI-12~\cite{paper:srfi-12}. The SRFI-12
exception mechanism is natively supported in the Chicken Scheme
compiler~\cite{paper:chicken}. In the Scheme API driver, we also provide the
SRFI-12 implementation for PLT and Gambit.
%===============================================================================
% Scheme API: Code Example
%===============================================================================
\subsubsection{Code Example}
This section presents an example that illustrates the application of the Scheme
API driver.
\small{
\begin{verbatim}
; Load the necessary Scheme API driver files
(load "collect-sedna-plt.scm")
; Create a connection
(define conn
(sedna:connect-to-database "localhost" "testdb"
"SYSTEM" "MANAGER"))
; Begin a transaction
(sedna:begin-transaction conn)
; Bulk load
(call/cc
(lambda (k)
(with-exception-handler ; Exception handler
(lambda (x)
(display "File already loaded to the database")
(newline)
(k #f))
(lambda ()
(sedna:execute-query conn
"LOAD 'region.xml' 'regions'")))))
; Execute a statement and represent it as an SXML nodeset
(pp
(sedna:result->list
(sedna:execute-query conn "doc('region')/*/*")))
; Update statement
(pp
(sedna:execute-query conn
"UPDATE delete doc('region')//africa"))
; Querying all regions once again
(pp
(sedna:result->list
(sedna:execute-query conn "doc('region')/*/*")))
; Commit transaction
(sedna:end-transaction conn 'COMMIT)
; Close the connection
(sedna:disconnect-from-database conn)
\end{verbatim}}
You can find the full version of this example and the Scheme API driver code in:
\begin{verbatim}
[win:] INSTALL_DIR\examples\api\scheme
[nix:] INSTALL_DIR/examples/api/scheme
\end{verbatim}
where \verb!INSTALL_DIR! refers to the directory where Sedna is installed.
Before running the example make sure that the Sedna DBMS is installed and do the
following steps:
\begin{enumerate}
\item Start Sedna by running the following command in a command line:
\begin{verbatim}
se_gov
\end{verbatim}
If Sedna is started successfully it prints "GOVERNOR has been started in the
background mode".
\item Create a new database \verb!testdb! by running the following command:
\begin{verbatim}
se_cdb testdb
\end{verbatim}
If the database is created successfully it prints "The database 'testdb' has
been created successfully".
\item Start the testdb database by running the following command:
\begin{verbatim}
se_sm testdb
\end{verbatim}
If the database is started successfully it prints "SM has been started in the
background mode".
\end{enumerate}
If the Sedna DBMS is running on the same computer as your Scheme application,
you don't need to change anything in the example code. If the Sedna DBMS is
running on a remote machine, you should use the name of this machine when
connecting to the database with \texttt{sedna:connect-to-database} function.
For running the example supplied, you should copy all files from the folder:
\begin{verbatim}
[win:] INSTALL_DIR\examples\api\scheme
[nix:] INSTALL_DIR/examples/api/scheme
\end{verbatim}
into the folder where the Scheme API driver code is located
\begin{verbatim}
[win:] INSTALL_DIR\driver\scheme
[nix:] INSTALL_DIR/driver/scheme
\end{verbatim}
where \verb!INSTALL_DIR! refers to the directory where Sedna is installed.
You can use PLT DrScheme GUI to open and run \texttt{"client.scm"} in the
graphical mode. You can also run the example from your command line, by typing:
\begin{verbatim}
mzscheme -gr client.scm
\end{verbatim}
For running the example with a different Scheme implementation (Chicken or
Gambit), uncomment the corresponding line at the beginning of the example code
in \texttt{"client.scm"} and follow the instructions of the chosen Scheme
implementation in running the program.
%===============================================================================
% Database Language: XQuery Support
%===============================================================================
\section{Database Language}
\label{sec:DBLang}
\subsection{XQuery Support}
\label{sec:xquery-support}
The Sedna query language is XQuery \cite{paper:query-language} developed by W3C.
Sedna conforms to January 2007 specification of XQuery except the following
features:
\begin{itemize}
\item \verb!copy-namespaces! declaration works only in \verb!preserve!,
\verb!inherit! mode regardless of actual prolog values;
\item \verb!fn:normalize-unicode! function is not conformant (it always returns
the same string or raises exception);
\item External variables are not supported. However they are supported by XQJ
driver by Charles Foster (see download page);
\item Regular expressions (e.g. \verb!fn:matches()! use them) are based on
PCRE~\cite{lib:pcre-lib} syntax which differs from the one defined in W3C
specifications.
\end{itemize}
Sedna also has full support for two optional XQuery features:
\begin{itemize}
\item\textbf{Full Axis}. The following optional axes are supported:
\verb!ancestor!, \verb!ancestor-or-self!, \verb!following!,
\verb!following-sibling!, \verb!preceding!, and \verb!preceding-sibling!;
\item\textbf{Module Feature} allows a query Prolog to contain a Module Import
and allows library modules to be created.
\end{itemize}
Sedna passes "XML Query Test Suite (XQTS)" and has official "almost passed"
status. The detailed report can be found at
\url{http://www.w3.org/XML/Query/test-suite/XQTSReport.html}
%===============================================================================
% XQuery Options and Extensions: Serialization
%===============================================================================
\subsection{XQuery Options and Extensions}
\label{sec:xquery-extensions}
\subsubsection{Controlling Serialization}
\label{indents}
Serialization is the process of converting XML nodes evaluated by XQuery into a
stream of characters. In Sedna serialization is carried out when the result of a
query is returned to the user. You can control the serialization by setting the
\emph{serialization parameters} specified in \cite{paper:query-serialization}.
Currently, Sedna supports the following serialization parameters:
\medskip
\begin{tabular}{|l|l|l|}
\hline
Parameter name & Values & Description \\
\hline
\hline
indent & "yes" or "no" & Output indented \\
& (default yes) & \\
\hline
cdata-section-elements & Element list & Text within elements is \\
& e.g. 'name;data' & output as a CDATA section \\
\hline
character-map & String-to-string map & Character and its string \\
& e.g. '\>=>' & substitution \\
\hline
\end{tabular}
\medskip
To set a serialization parameter, use the \verb!output! option in a query
prolog. The \verb!output! option is in the Sedna namespace
(http://www.modis.ispras.ru/sedna) which is the predefined namespace in Sedna so
you can omit its declaration. The value of the \verb!output! option must have
the following structure "\verb!parameter-name=value; parameter-name=value!".
Consider the following examples:
\begin{verbatim}
declare namespace se = "http://www.modis.ispras.ru/sedna";
declare option se:output "indent=yes; cdata-section-elements=script";
<x><script>K&R C</script><b>element</b></x>
\end{verbatim}
The \verb!character-map! parameters define a list of characters (strings) and
corresponding string substitutions. Mapping is applied to the characters
(strings) that actually appear in a text or attribute node after other any
other serialization operations such as escaping or Unicode Normalization are
applied If a character (or string) is mapped, then it is not subjected to XML
escaping.
\begin{note}
Using a character map can result in non-well-formed documents if the string
contains XML-significant characters. For example, it is possible to create
documents containing unmatched start and end tags, references to entities that
are not declared, or attributes that contain tags or unescaped quotation marks.
\end{note}
The following example generates an XML tag:
\begin{verbatim}
declare option se:character-map ">=>";
declare option se:character-map "<=<";
concat("<" ,"text", "/>")
\end{verbatim}
As mentioned above, you may omit the Sedna namespace declaration:
\begin{verbatim}
declare option se:output "indent=yes; cdata-section-elements=script";
<x><script>K&R C</script><b>element</b></x>
\end{verbatim}
This query is evaluated as follows:
\begin{verbatim}
<x>
<script><![CDATA[K&R C]]></script>
<b>element</b>
</x>
\end{verbatim}
%===============================================================================
% XQuery Options and Extensions: Values Indices Functions
%===============================================================================
\subsubsection{Value Indices Functions}
\label{sec:value-index-scan-fun}
In the current version of Sedna, query executor does not use indices
automatically. Use the following functions to enforce executor to employ
indices.
\begin{verbatim}
index-scan ($title as xs:string,
$value as xdt:anyAtomicType,
$mode as xs:string) as node()*
\end{verbatim}
The \verb!index-scan! function scans the index with the \verb!$title! name and
returns the sequence of nodes which keys are equal (less than, greater than,
greater or equal, less or equal) to the search value \verb!$value!. A Sedna
error is raised if the search value can not be cast to the atomic type of the
index. The \verb!$mode! parameter of the \verb!xs:string! type is used to set
the type of the scan. The value of the parameter must be equal to one of the
following: 'EQ' (equal), 'LT'(less than), 'GT' (greater than), 'GE' (greater or
equal), 'LE' (less or equal).
\begin{verbatim}
index-scan-between ($title as xs:string,
$value1 as xdt:anyAtomicType,
$value2 as xdt:anyAtomicType,
$range as xs:string) as node()*
\end{verbatim}
The \verb!index-scan-between! scans the index with the \verb!$title! name and
returns the sequence of nodes which keys belong to the interval (segment, left
half-interval, right half-interval) between the first \verb!$value1! and second
\verb!$value2! search values. A Sedna error is raised if the search values can
not be cast to the atomic type of the index. This function provides the
\verb!$range! parameter of the \verb!xs:string! type to set the type of the
scan. The value of the string must be equal to one of the following: 'INT'
(interval), 'SEG' (segment), 'HINTR' (right half-interval), 'HINTL' (left
half-interval).
For example, to select the names of people who live in the London city employing
the "people" index defined in section \ref{sec:managing-value-indices}, use the
following expression:
\begin{verbatim}
index-scan("people", "London", "EQ")/name
\end{verbatim}
To get all different keys sorted in natural order use \verb!index-keys!
function:
\begin{verbatim}
index-keys ($title as xs:string) as xs:anyAtomic*,
\end{verbatim}
%===============================================================================
% XQuery Options and Extensions: Full-Text Search Functions
%===============================================================================
\subsubsection{Full-Text Search Functions}
\label{sec:ft-fun}
Please read section \ref{sec:managing-ft-indices} before reading this section.
In the current version of Sedna, query executor does not use full-text indices
automatically. Use the following functions to enforce executor to employ
indices.
\begin{verbatim}
ftindex-scan($title as xs:string,
$query as xs:string,
$options as xs:string) as node()*
\end{verbatim}
The \verb!ftindex-scan! function scans the full-text index with the
\verb!$title! name and returns the sequence of items which satisfy the
\verb!$query!. If dtSearch \cite{link:dtsearch-engine} is used as full-text
search backend, use dtSearch request language \cite{doc:dtsearch} to specify the
query. DtSearch options \verb!dtsSearchAnyWords! or \verb!dtsSearchAllWords! may
be specified in \verb!$options!.
For example, you can employ the "articles" index defined in section
\ref{sec:managing-ft-indices} to select the titles of articles that contain word
"apple" but not "pear":
\begin{verbatim}
ftindex-scan("articles", "apple and not pear")/title
\end{verbatim}
If native full-text indices are used, the following constructs can be used as
parts of query:
\begin{itemize}
\item phrases: several words in single or double quotes will be searched as a
phrase
\item binary operators: parts of the query separated by whitepace are treated
as conjunction, disjunction is specified by \verb!'OR'!. For example:
query \verb!'apple juice'! will return only nodes contaitinig both words.
while query \verb!'apple OR juice'! will return nodes containing any of
these words.
\item stemming: if index was created with stemtype=both option, tilde must be
appended to the word in order to use stemming, for example in the query
\verb!'apple juice~'! stemming will be used only for the second word.
\item contains: to search for words inside some tag, use CONTAINS; the query
\verb!'title CONTAINS word'! will return nodes in which word 'word'
occurs as part of tag 'title'.
\end{itemize}
All keywords (like CONTAINS and OR) must be upper-cased.
The \verb!ftscan! function returns those items of the input sequence \verb!$seq!
which satisfy the query \verb!$query!. The function does not use indices and can
be applied to any sequence of nodes, even those that are not indexed. The query
\verb!$query! is evaluated over the text representation constructed according to
the \verb!$type! and \verb!$customization_rules! parameters. The values of the
parameters are the same as those used when a full-text index is created (see
section \ref{sec:managing-ft-indices} for details).
\begin{verbatim}
ftscan($seq as node()*,
$query as xs:string,
$type as xs:string,
$customization_rules as xs:string) as node()*
\end{verbatim}
For example, you can select the titles of articles that contain word "apple" but
not "pear" \emph{without} using indices and using special customization rules as
follows:
\begin{verbatim}
ftscan(doc("foo")/library//article,
"apple and not pear",
"customized-value",
(("b","string-value"),("a","delimited-value")))/title
\end{verbatim}
%===============================================================================
% XQuery Options and Extensions: SQL Connection
%===============================================================================
\subsubsection{SQL Connection}
SQL Connection allows access to relational databases from XQuery using SQL. The
resulting relations are represented on-the-fly as sequences of XML-elements
representing rows. These elements have sub-elements corresponding with the
columns returned by the SQL query and thus can be easy processed in XQuery. All
functions dealing with access to SQL data are located in the namespace
\verb!http://modis.ispras.ru/Sedna/SQL! which is referred as \verb!sql! in the
following function declarations and examples.
\subsubsection*{Connections} In order to execute SQL queries on a RDBMS, you
should first establish a connection to it using one of the \verb!sql:connect!
functions:
\begin{verbatim}
function sql:connect($db-url as xs:string) as xs:integer
function sql:connect($db-url as xs:string,
$user as xs:string) as xs:integer
function sql:connect($db-url as xs:string,
$user as xs:string,
$password as xs:string) as xs:integer
function sql:connect($db-url as xs:string,
$user as xs:string,
$password as xs:string,
$options as element()*) as xs:integer
\end{verbatim}
These functions attempt to establish a database connection to the given URL
using a user name and password if specified. They return a connection handle
which could be then passed to \verb!sql:execute!, \verb!sql:prepare!,
\verb!sql:close!, \verb!sql:rollback!, and \verb!sql:commit! functions. If
connection could not be established, a Sedna error is raised.
All arguments of the \verb!sql:connect! functions except for \verb!$db-url! are
optional:
\begin{itemize}
\item\verb!$db-url! is the URL of the database to which a connect is
established. It should be one of the following form:
\small{\begin{verbatim}
odbc:<driver name>:[//<server>[/<database>][;]][<options>]
\end{verbatim}}
``;'' after \verb!<database>! or \verb!<server>! is required if there are some
driver options following it. Driver options must be in the following form:
\begin{verbatim}
<option>=<value>{;<option>=<value>}
\end{verbatim}
List of available options depends on the ODBC driver used. One of the common
options is ``Port'' which is used to specify the port on which the database
server is configured to listen. For example:
\small{\begin{verbatim}
odbc:MySQL ODBC 3.51 Driver://localhost/somedb;Port=1234
\end{verbatim}}
\item\verb!$user! is your user name for the session.
\item\verb!$password! is your password for the session.
\item\verb!$options! is an optional sequence of connection options. Connection
options are elements of the form:
\begin{verbatim}
<sql:option name="<option-name>" value="<option-value>"/>
\end{verbatim}
The only connection option available for the moment is \verb!manual-commit!
which enables manual commit mode if its value is \verb!on!.
\end{itemize}
To disconnect from the database, you can use the following function:
\begin{verbatim}
function sql:close($connection as xs:integer) as element()?
\end{verbatim}
It closes database connection associated with connection handle
\verb!$connection!. A Sedna error is raised if operation cannot be completed.
\subsubsection*{Executing Queries}
When a database connection is established you can start executing queries. Two
types of query execution are supported: \emph{direct query execution} and
\emph{prepared query execution}.
\subsubsection*{Direct Queries}
Simple SQL queries are executed as the following XQuery example shows:
\small{
\begin{verbatim}
declare namespace sql="http://modis.ispras.ru/Sedna/SQL";
let $connection :=
sql:connect("odbc:MySQL ODBC 3.51 Driver://localhost/somedb",
"user", "pass")
return
sql:execute($connection,
"SELECT * FROM people WHERE first = 'Peter'");
\end{verbatim}}
The result will be something like this:
\small{
\begin{verbatim}
<tuple first="Peter" last="Jackson" city="Wellington"/>
\end{verbatim}}
There are two functions for direct query execution:
\begin{verbatim}
function sql:execute($connection as xs:integer,
$statement as xs:string) as element()*
function sql:execute($connection as xs:integer,
$statement as xs:string,
$query-options as element()*) as element()*
\end{verbatim}
These functions execute a SQL query and return a sequence of elements
representing the query result. SQL query can be as both a query statement and an
update statement. In case of query statement, the result sequence contains an
element named 'row' for each row of the query result. Each element contains as
many children attributes as there are non-NULL fields in the corresponding
result-row. Each attribute has the name of a row field. Fields with NULL values
are not included. In case of update statement, empty sequence is returned.A
Sedna error is raised on an erroneous statement.
The \verb!sql:execute! have the following arguments:
\begin{citemize}
\item\verb!$connection! is a connection handle, returned by \verb!sql:connect!
function;
\item\verb!$statement! is a string containing SQL statement to be executed;
\item\verb!$query-options! is a sequence of optional query parameters.
\end{citemize}
Update queries can be executed using the \verb!sql:exec-update! function:
\begin{verbatim}
function sql:exec-update($connection as xs:integer,
$statement as xs:string) as xs:integer
function sql:exec-update($connection as xs:integer,
$statement as xs:string,
$query-options as element()*) as xs:integer
\end{verbatim}
these functions are similar to \verb!sql:execute!, but return the number of rows
affected by an update query (instead of an empty sequence returned by
\verb!sql:execute! for update-queries). Function arguments are same as for
\verb!sql:execute!. The behaviour of this function is undefined for non-update
queries.
\subsubsection*{Prepared Statements}
Sometimes it is more convenient or more efficient to use prepared SQL statements
instead of direct query execution. In most cases, when a SQL statement is
prepared it will be sent to the DBMS right away, where it will be compiled. This
means that the DBMS does not have to compile a prepared statement each time it
is executed.
Prepared statements can take parameters. This allows using the same statement
and supply it with different values each time you execute it, as in the
following XQuery example:
\small{
\begin{verbatim}
declare namespace sql="http://modis.ispras.ru/Sedna/SQL";
let $connection :=
sql:connect("odbc:MySQL ODBC 3.51 Driver://localhost/somedb",
"user", "pass")
let $statement :=
sql:prepare($connection,
"INSERT INTO people(first, last) VALUES (?, ?)")
return (sql:execute($statement, "John", "Smith"),
sql:execute($statement, "Matthew", "Barney"))
\end{verbatim}}
\noindent
this XQuery code inserts two rows into table \verb!people! and returns an empty
sequence.
To use prepared statements, first you need to create a prepared statement handle
using the \verb!sql:prepare! function:
\small{
\begin{verbatim}
function sql:prepare($connection as xs:integer,
$statement as xs:string) as xs:integer
function sql:prepare($connection as xs:integer,
$statement as xs:string,
$query-options as element()*) as xs:integer
\end{verbatim}}
\noindent
these functions prepare a SQL statement for later execution and returns a
prepared statement handle which can be used in the \verb!sql:execute! and
\verb!sql:exec-update! functions. A Sedna error is raised on an erroneous
statement.
The \verb!sql:prepare! functions have the following arguments:
\begin{citemize}
\item \verb!$connection! is a connection handle, created by \verb!sql:connect!
function;
\item \verb!$statement! is a string containing a SQL statement that may
contain one or more '?' - \verb!IN! parameter placeholders;
\item \verb!$query-options! is a sequence of optional query parameters.
\end{citemize}
There are two prepared statement execution functions, similar to direct
query execution:
\begin{verbatim}
function sql:execute($prepared-statement as xs:integer,
$param1 as item()?,
...) as element()*
\end{verbatim}
this function is similar to \verb!sql:execute! for direct queries and returns a
sequence of elements representing the query result.
The \verb!sql:execute! function have the following arguments:
\begin{citemize}
\item\verb!$prepared-statement! is a prepared statement handle created by
\verb!$sql:prepare!;
\item\verb!$param1, ...! are parameters for parametrized
statements. The number of parameters specified must exactly match the number
of parameters of the prepared statement. NULL values are represented as empty
sequences \verb!()!.
\end{citemize}
To execute a prepared update statement you may use \verb!exec-update! function:
\begin{verbatim}
function sql:exec-update($prepared-statement as xs:integer,
$param1 as item()?,
...) as xs:integer
\end{verbatim}
This function is similar to \verb!sql:execute!, but returns the number of rows
affected by an update query (instead of an empty sequence returned by
\verb!sql:execute! for update-queries). Function arguments are the same as for
\verb!sql:execute!. The behavior of this function is undefined for non-update
queries.
\subsubsection*{Transactions}
The default commit mode of connection is auto-commit, meaning that all updates
will be committed automatically. If this is not desired behaviour, you can pass
manual-commit option to \verb!sql:connect! when you create a connection handle.
In manual commit mode you can specify when updates will be committed or rolled
back:
\small{
\begin{verbatim}
declare namespace sql="http://modis.ispras.ru/Sedna/SQL";
let $connection :=
sql:connect("odbc:MySQL ODBC 3.51 Driver://localhost/testdb",
"user-name",
"user-password",
<sql:option name="manual-commit" value="1"/>)
return
for $person in doc("auction")/person
return (
sql:execute($connection, "<do something with person>"),
if (fn:all-is-ok($connection, $person)) then
(
sql:execute($connection, "<do something with person>"),
sql:commit($connection)
)
else
sql:rollback($connection))
\end{verbatim}}
There are two functions for specifying transaction boundaries -
\verb!sql:commit! and \verb!sql:rollback! (transactions are started
automatically by queries, these functions only close them):
\begin{verbatim}
function sql:commit($connection as xs:integer) as element()?
\end{verbatim}
\verb!sql:commit! function commits all changes made during the last transaction
in the database connection specified by connection handle \verb!$connection! and
closes transaction. A Sedna error is raised if operation cannot be completed.
Function \verb!sql:rollback! rolls back all changes made during the last
transaction in the database connection specified by the connection handle
\verb!$connection! and closes transaction. A Sedna error is raised if operation
cannot be completed.
\begin{verbatim}
function sql:rollback($connection as xs:integer) as element()?
\end{verbatim}
%===============================================================================
% XQuery Options and Extensions: External Functions
%===============================================================================
\subsubsection{External Functions}
\emph{External function} is a notion defined in the XQuery specification
\cite{paper:query-language} as follows: "External functions are functions that
are implemented outside the query environment". Support for external functions
allows you to extend XQuery by implementing functions in other languages.
Sedna provides a server programming API to write external functions in the C/C++
language. External functions in Sedna are limited to dealing with sequences of
atomic values. External functions are compiled and linked in the form of shared
libraries (i.e. \verb!.dll! files in Windows or \verb!.so! files in
Linux/FreeBSD and \verb!.dylib! in Mac OS) and loaded by the server on demand.
Although the Sedna XQuery executor evaluates queries in a lazy manner, all
external function calls are evaluated in an eager manner.
\subsubsection*{Using External Functions}
To use an external function you need to declare this function in prologue with
\verb!external! keyword instead of function body. Then it may be used normally:
\small{
\begin{verbatim}
declare function se:f($a as xs:integer) as $xs:integer external;
f(10)
\end{verbatim}}
\subsubsection*{Creating External Functions}
External functions must be written in C/C++. To implement a new XQuery function
\verb!func! you should write the following C (or C++) functions: \verb!func!,
\verb!func_init! and \verb!func_deinit!. When executor decides that it needs to
use an external function, first it initializes this function by calling
\verb!func_init!, after that it will call \verb!func! to compute results as many
times as needed. When some external function is not needed anymore, executor
calls \verb!func_deinit! (which probably will free any memory allocated by
\verb!func_init!). Each one of the three functions receives an
\verb!SEDNA_EF_INIT!\footnote{All needed types and constants are defined in the
sedna\_ef.h file, located in the include directory of the Sedna distribution.
See Section ``Sedna Directory Structure'' in \cite{doc:admin} to learn where the
include directory is located.} structure as a parameter. This structure has
several fields that are initialized by executor before any \verb!func_init! or
\verb!func_deinit! calls:
\begin{verbatim}
typedef struct sedna_ef_init
{
void *(*sedna_malloc)(size_t);
void (*sedna_free)(void *);
SEDNA_SEQUENCE_ITEM *item_buf;
} SEDNA_EF_INIT;
\end{verbatim}
The fields of this structure may be used in your implementation:
\begin{itemize}
\item \verb!sedna_malloc! is a pointer to a malloc function which must be used
to allocate memory for function results, this memory will be automatically freed
by the query executor. It may also be used to allocate memory for internal use,
such memory must be freed manually using the \verb!sedna_free! function.
\item \verb!sedna_free! is a pointer to free function that releases memory
allocated using \verb!sedna_malloc! function.
\item \verb!item_buf! is a pointer to a preallocated \verb!SEDNA_SEQUENCE_ITEM!
which may be used to store results (this allows to avoid using
\verb!sedna_malloc! function when result is a single atomic non-string value)
\end{itemize}
\verb!func!, \verb!func_init! and \verb!func_deinit! must have specific
signatures:
\begin{itemize}
\item \verb!func()! (required) -- computes external function results. This
function has the following signature:
\begin{verbatim}
SEDNA_SEQUENCE_ITEM *func(SEDNA_EF_INIT *init,
SEDNA_EF_ARGS *args,
char * error_msg_buf);
\end{verbatim}
\begin{citemize}
\item\verb!init! is a pointer to the \verb!SEDNA_EF_INIT! structure which was
passed to \verb!func_init! function (if written);
\item\verb!args! is a pointer to the \verb!SEDNA_EF_ARGS! structure which
contains all function arguments;
\item\verb!error_msg_buf! is a pointer to the string buffer used for specifying
error message if function invocation fails. Maximum message length is
\verb!SEDNA_ERROR_MSG_BUF_SIZE! bytes, including the null character \verb!'\0'!.
\end{citemize}
\item \verb!func_init()! (optional) -- the initialization function. It can be
used to allocate any memory required by the main function. This function has the
following signature:
\begin{verbatim}
void func_init(SEDNA_EF_INIT *init, char * error_msg_buf);
\end{verbatim}
\begin{citemize}
\item\verb!init! is a pointer to the \verb!SEDNA_EF_INIT! structure (the pointer
to this structure will be passed then to \verb!func! and \verb!func_deinit!
functions);
\item\verb!error_msg_buf! is a pointer to the string buffer used for specifying
error message if function invocation fails. Maximum message length is
\verb!SEDNA_ERROR_MSG_BUF_SIZE!, including the null character \verb!'\0'!.
\end{citemize}
\item \verb!func_deinit()! (optional) -- the deinitialization function. It
should deallocate any memory allocated by the initialization function. This
function has the following signature:
\begin{verbatim}
void func_init(SEDNA_EF_INIT *init, char * error_msg_buf);
\end{verbatim}
\begin{citemize}
\item\verb!init! is a pointer to the \verb!SEDNA_EF_INIT! structure which was
passed to \verb!func_init! function (if written);
\item\verb!error_msg_buf! is a pointer to the string buffer used for specifying
error message if function invocation fails. Maximum message length is
\verb!SEDNA_ERROR_MSG_BUF_SIZE!, including the null character \verb!'\0'!.
\end{citemize}
\end{itemize}
When \verb!func!, \verb!func_init! or \verb!func_deinit! is being executed
\verb!error_msg_buf! contains an empty string. If function succeedes, it should
leave this value empty. In case of error a non-empty string (error description)
must be placed in \verb!error_msg_buf! (if you place an empty string in
\verb!error_msg_buf! executor assumes that function execution was successful).
Each shared library must also export an null-terminated array with the names
of the XQuery functions defined by this library:
\begin{verbatim}
char const *ef_names[] = {"func", NULL};
\end{verbatim}
The file \verb!sedna_ef.h! defines several types for representing function
arguments and results:
\begin{itemize}
\item \verb!SEDNA_ATOMIC_TYPE! -- represents an atomic type, defined as:
\begin{verbatim}
typedef enum sedna_atomic_type {
SEDNATYPE_integer,
SEDNATYPE_float,
SEDNATYPE_double,
SEDNATYPE_string
} SEDNA_ATOMIC_TYPE;
\end{verbatim}
\item \verb!SEDNA_ATOMIC_VALUE! -- represents an atomic value, defined as:
\begin{verbatim}
typedef int SEDNA_integer;
typedef float SEDNA_float;
typedef double SEDNA_double;
typedef char *SEDNA_string;
typedef struct sedna_atomic_value {
SEDNA_ATOMIC_TYPE type;
union {
SEDNA_integer val_integer;
SEDNA_float val_float;
SEDNA_double val_double;
SEDNA_string val_string;
};
} SEDNA_ATOMIC_VALUE;
\end{verbatim}
Memory for values that are pointers (i.e. \verb!SEDNA_string!) MUST be allocated
using the malloc function passed in the \verb!SEDNA_EF_INIT! structure.
\item \verb!SEDNA_SEQUENCE_ITEM! -- represents a node in a linked list of atomic
values, defined as:
\begin{verbatim}
typedef struct sedna_sequence_item {
SEDNA_ATOMIC_VALUE data;
struct sedna_sequence_item *next;
} SEDNA_SEQUENCE_ITEM;
\end{verbatim}
Linked lists are used to represent sequences of atomic values. An empty sequence
is presented by a \verb!NULL! pointer. If \verb!func! needs to return a sequence
of values, memory for nodes MUST be allocated using the malloc function passed
in \verb!SEDNA_EF_INIT! structure.
\item \verb!SEDNA_EF_ARGS! -- represents an array of arguments passed to a
function, defined as:
\begin{verbatim}
typedef struct sedna_ef_args {
int length;
SEDNA_SEQUENCE_ITEM **args;
} SEDNA_EF_ARGS;
\end{verbatim}
\end{itemize}
\subsubsection*{Location of External Function Libraries}
Compiled libraries must be placed in the directory \verb!lib! that is (1) in the
same directory where the directory \verb!data! with database data is located or
(2) in the directory \verb!<db_name>_files! where database data are
stored\footnote{See Section ``Sedna Directory Structure'' in \cite{doc:admin} to
learn where database data are located}. Libraries that are database-independent
should be placed in (1). Libraries that are database-specific should be placed
in (2). Overloaded functions are not allowed. If two libraries located in (1)
and (2) contain functions with the same name, a function from the library in (2)
is called. If libraries in the same directory (1 or 2) contain functions with
the same name, it is not specified which one is called.
There is a sample external function code available in the folder:
\begin{verbatim}
[win:] INSTALL_DIR\examples\api\external-functions\c\
[nix:] INSTALL_DIR/examples/api/external-functions/c/
\end{verbatim}
where \verb!INSTALL_DIR! refers to the directory where Sedna is installed.
%===============================================================================
% XQuery Options and Extensions: Runtime Properties
%===============================================================================
\subsubsection{Runtime Properties}
The \verb!se:get-property! function provides a method for applications to
determine in runtime the current values of system parameters, configurable
limits, environment information. The name argument specifies the system variable
to be queried. The function is defined within the predefined Sedna namespace
(\verb!se! prefix) as follows:
\begin{verbatim}
se:get-property($name as xs:string) as item()
\end{verbatim}
The available \verb!names! are as follows:
\begin{itemize}
\item\verb!$user! - retrieves string which contains current user name
\end{itemize}
%===============================================================================
% XQuery Options and Extensions: General Hashing Functions
%===============================================================================
\subsubsection{General Hashing Functions}
Sedna provides built-in functions that perform standard hashing algorithms:
\verb!md5!, \verb!sha-1!, \verb!sha-224!, \verb!sha-256!, \verb!sha-384!,
\verb!sha-512!. Functions accept string (xs:string or derived/promotable) or
binary (xs:base64Binary) data and return hash in the hex binary representation.
All hashing functions are assigned to the \url{http://sedna.org/crypto}
namespace.
\begin{verbatim}
declare namespace hash = 'http://sedna.org/crypto';
hash:digest($value as xs:string or xs:base64Binary,
$algorithm as xs:string) as xs:hexBinary
\end{verbatim}
Computes digest of the given \verb!$value!, using the specified
\verb!$algorithm!. The specified values may be of type xs:string or
xs:base64Binary. The following algorihms are supported: \verb!md5!,
\verb!sha-1!, \verb!sha-224!, \verb!sha-256!, \verb!sha-384!, \verb!sha-512!
For example:
\begin{verbatim}
declare namespace hash = 'http://sedna.org/crypto';
hash:digest("", "md5")
\end{verbatim}
returns \verb!D41D8CD98F00B204E9800998ECF8427E!.
\begin{verbatim}
declare namespace hash = 'http://sedna.org/crypto';
hash:md5($value as xs:string or xs:base64Binary) as xs:hexBinary
hash:sha1($value as xs:string or xs:base64Binary) as xs:hexBinary
hash:sha224($value as xs:string or xs:base64Binary) as xs:hexBinary
hash:sha256($value as xs:string or xs:base64Binary) as xs:hexBinary
hash:sha384($value as xs:string or xs:base64Binary) as xs:hexBinary
hash:sha512($value as xs:string or xs:base64Binary) as xs:hexBinary
\end{verbatim}
Algorithm-specific digest functions. For example:
\begin{verbatim}
declare namespace hash = 'http://sedna.org/crypto';
hash:sha1("SXZhbiBTaGNoZWtsZWlu" cast as xs:base64Binary)
\end{verbatim}
returns \verb!C777B3E1AEB0D6D3FF07E8ADEFE841F933C09648! - \verb!SHA-1! hash for
the base64-encoded data.
%===============================================================================
% Update Language
%===============================================================================
\subsection{Update Language}
\label{upd-lang}
The update language is based on the XQuery update proposal by Patrick Lehti
\cite{paper:query-update} with the number of improvements.
\begin{note}
The result of each update statement, shouldn't break the well-formedness and
validness of XML entities, stored in the database. Otherwise, an error is
raised.
\end{note}
Sedna provides several kinds of update statements:
\begin{citemize}
\item\verb!INSERT! statement inserts zero or more nodes into a designated
position with respect to a target nodes;
\item\verb!DELETE! statement removes target nodes from the database with theirs
descendants;
\item\verb!DELETE_UNDEEP! statement removes target nodes from the database
preserving theirs content;
\item\verb!REPLACE! statement replaces target nodes with a new sequence of zero
or more nodes;
\item\verb!RENAME! statement changes the name of the target nodes.
\end{citemize}
The syntax and semantics of these expressions are described in the following
sections.
\subsubsection*{Insert Statement}
The insert statement inserts result of the given expression at the position
identified by the \verb!into!, \verb!preceding! or \verb!following! clauses:
\begin{verbatim}
UPDATE
insert SourceExpr (into|preceding|following) TargetExpr
\end{verbatim}
\verb!SourceExpr! identifies the ordered sequence of the nodes to be inserted.
The \verb!into!, \verb!preceding! or \verb!following! clause identifies the
position. For each node in the result sequence of \verb!TargetExpr!, the result
sequence of \verb!SourceExpr! is inserted to the position identified by the
\verb!into!, \verb!preceding! or \verb!following! clauses. If the \verb!into!
clause is specified, the sequence is appended to the random position of the
child sequence for each node in the result of \verb!TargetExpr!. If the
\verb!preceding! clause is specified, the sequence is appended before each node
in the result of \verb!TargetExpr!. If the \verb!following! clause is specified,
the sequence is appended after each node in the result of \verb!TargetExpr!.
Error is raised if one of the following conditions is met:
\begin{itemize}
\item There are non-element nodes in the result of the \verb!TargetExpr!
expression evaluation in case of the \verb!into! clause;
\item There are temporary nodes in the result of the \verb!TargetExpr!
expression evaluation (a node is considered \emph{temporary}, if it is created
as the result of the XQuery constructor evaluation).
\end{itemize}
For example, the following update statement inserts new \emph{warning} element
into all \emph{blood\_pressure} elements which have \emph{systolic} value
greater than 180:
\begin{verbatim}
UPDATE
insert <warning>High Blood Pressure!</warning>
preceding doc("hospital")//blood_pressure[systolic>180]
\end{verbatim}
\subsubsection*{Delete Statement}
The \verb!DELETE! statement removes persistent nodes from the database. It
contains a subexpression, that returns the nodes to be deleted.
\begin{verbatim}
UPDATE
delete Expr
\end{verbatim}
\verb!Expr! identifies the nodes to be removed from the database. Note, that
nodes are removed from the database with all their descendants.
Error is raised if one of the following conditions is met:
\begin{itemize}
\item There are atomic values in the result of the \verb!Expr! expressions;
\item There are temporary nodes in the result of the \verb!Expr! expression
evaluation (a node is considered \emph{temporary}, if it is created as the
result of the XQuery constructor evaluation).
\end{itemize}
The following update statement deletes all \emph{blood\_pressure} nodes which
contain \emph{systolic} value higher than 180:
\begin{verbatim}
UPDATE
delete doc("hospital")//blood_pressure[systolic>180]
\end{verbatim}
\subsubsection*{Delete Undeep Statement}
The \verb!DELETE_UNDEEP! statement removes nodes identified by \verb!Expr!, but
in contrast to the \verb!DELETE! statement it leaves the descendants of the
nodes in the database.
\begin{verbatim}
UPDATE
delete_undeep Expr
\end{verbatim}
\verb!Expr! identifies the nodes to be removed from the database.
Error is raised if one of the following conditions is met:
\begin{itemize}
\item There are atomic values in the result of the \verb!Expr! expressions;
\item There are temporary nodes in the result of the \verb!Expr! expression
evaluation (a node is considered \emph{temporary}, if it is created as the
result of the XQuery constructor evaluation).
\end{itemize}
Consider the following example. The document named \verb!a.xml! before update:
\begin{verbatim}
<A>
<B>
<C/>
<D/>
</B>
</A>
\end{verbatim}
The following delete undeep statement removes \emph{B} nodes and makes \emph{C}
and \emph{D} nodes children of the \emph{A} element:
\begin{verbatim}
UPDATE
delete_undeep doc("a.xml")//B
\end{verbatim}
This is how the \verb!a.xml! document will look after the update:
\begin{verbatim}
<A>
<C/>
<D/>
</A>
\end{verbatim}
\subsubsection*{Replace Statement}
The \verb!REPLACE! statement is used to replace nodes in an XML document in the
following manner:
\begin{verbatim}
UPDATE
replace $var [as type] in SourceExpr
with TargetExpr($var)
\end{verbatim}
Replace statement iterates over all the nodes returned by the \verb!SourceExpr!,
binding the variable \verb!$var! to each node. For each binding the result of
the \verb!TargetExpr($var)! expression is evaluated. Each node returned by the
\verb!SourceExpr! is replaced with the returned sequence of nodes. Note that
\verb!TargetExpr! is executed over the original document without taking into
account intermediate updates performed during execution of this statement.
Error is raised if one of the following conditions is met:
\begin{itemize}
\item There are atomic values in the result of the \verb!SourceExpr! or
\verb!TargetExpr! expressions;
\item There are temporary nodes in the result of the \verb!SourceExpr!
expression evaluation (a node is considered \emph{temporary}, if it is created
as the result of the XQuery constructor evaluation).
\end{itemize}
The \verb!$var! variable bound in \verb!replace! clause may have an optional
type declaration. If the type of a value bound to the variable does not match
the declared type, an error is raised.
In the following example the salary of persons named "John" is doubled.
\begin{verbatim}
UPDATE
replace $p in doc("foo.xml")/db/person[name="John"]
with
<person>
{($p/@*,
$p/node()[not(self::salary)],
for $s in $p/salary
return <salary>{$s*2}</salary>)}
</person>
\end{verbatim}
\subsubsection*{Rename Statement}
The \verb!RENAME! statement is used to change the qualified name of an element
or attribute:
\begin{verbatim}
UPDATE
rename TargetExpr on QName
\end{verbatim}
Rename statement changes name property of the all nodes returned by the
\verb!TargetExpr! expression with a new QName.
Error is raised if one of the following conditions is met:
\begin{itemize}
\item There are items which are not element or attribute nodes in the result of
the \verb!TargetExpr! expression evaluation;
\item There are temporary nodes in the result of the \verb!TargetExpr!
expression evaluation (a node is considered \emph{temporary}, if it is created
as the result of the XQuery constructor evaluation).
\end{itemize}
The following expression changes the name of all the \verb!job! elements without
changing their contents:
\begin{verbatim}
UPDATE
rename doc("foo.xml")//job on profession
\end{verbatim}
%===============================================================================
% Bulk Load
%===============================================================================
\subsection{Bulk Load}
\label{bulk-load}
To bulk load a stand alone document use the following statements:
\begin{verbatim}
LOAD "path_to_file" "document_name"
\end{verbatim}
The first parameter is a path to the file which contains a document to be
loaded. The second parameter is the name for this document in the database.
For example,
\begin{verbatim}
LOAD "/opt/test.xml" "test"
\end{verbatim}
loads file \verb!/opt/test.xml! into database as a stand-alone document with
name \verb!test!.
To load document into a collection, use the following statement:
\begin{verbatim}
LOAD "path_to_file" "document_name" "collection_name"
\end{verbatim}
The first parameter is a path to the file which contains a document to be
loaded. The second parameter is the name for this document in the database. The
third parameter is the collection name to load the document into.
For example,
\begin{verbatim}
LOAD "/opt/mail-01.xml" "mail-01" "mails"
\end{verbatim}
loads file \verb!/opt/mail-01.xml! into collection \verb!mails!.
For performing bulk load not from the source file but from an input stream,
use the following statements (first for loading stand alone document, second -
for loading into a collection):
\begin{verbatim}
LOAD STDIN "document_name"
\end{verbatim}
\begin{verbatim}
LOAD STDIN "document_name" "collection_name"
\end{verbatim}
Compared to the above bulk load statements, here the \verb!"file_name"! is
replaced by the keyword \verb!STDIN! to denote that the file to be loaded is
taken from the input stream. Characters in the input stream must form a
well-formed XML document, which is loaded into the database and named as
specified by \verb!"document_name"!. If \verb!collection_name! is set, the
document is loaded into the specified collection of the database.
By default, the standard input stream is used. You can redirect a different
input stream to be used as an input for bulk load. For example, an XML document
produced by some program as its output can be loaded to a Sedna database in a
stream-wise fashion. To redirect the input when working from a command line, you
can use the functionality provided by your operation system. Java and Scheme
APIs provide additional wrappers for bulk load from stream, such that the input
stream can be specified by an additional argument of a function call.
By default, Sedna removes boundary whitespace according to the boundary-space
policy defined in \cite{paper:query-language}. To control boundary whitespace
processing, use \verb!boundary-space declaration! \cite{paper:query-language} in
the prolog of the \verb!LOAD! statement. The following example illustrates a
boundary-space declaration that instructs Sedna to preserve whitespace while
loading \verb!auction.xml! document:
\begin{verbatim}
declare boundary-space preserve;
LOAD "auction.xml" "auction"
\end{verbatim}
Notice, that heavy bulk-loads might be greatly optimized by setting
\verb!SEDNA_LOG_AMOUNT! connection attribute to \verb!SEDNA_LOG_LESS! (see
Section \ref{session-option-capi} for more information).
\subsubsection{CDATA section preserving}
It is possible to save the formatting of continuous CDATA sections with
\verb!cdata-section-preserve! option.
\begin{verbatim}
declare option se:bulk-load "cdata-section-preserve=yes";
LOAD "auction.xml" "auction"
\end{verbatim}
The \verb/cdata-section-preserve=yes/ option makes text nodes within CDATA
sections to be serialized within CDATA sections. CDATA section formatting is
saved only for the whole text node and this property of text node is inherited
when text node is appended. E.g. in the following document fragment CDATA
section will be serialized as it appears in document:
\begin{verbatim}
<a><![CDATA[<example data>]]></a>
\end{verbatim}
But the next fragment will not be saved with mixed formatting.
\begin{verbatim}
<a><![CDATA[<example]]> data<![CDATA[>]]></a>
\end{verbatim}
Instead, it will be serialized in the same way as prevoius one, i.e. the whole
text will be in CDATA section.
%===============================================================================
% Data Definition Language
%===============================================================================
\subsection{Data Definition Language}
This section describes Sedna Data Definition Language (DDL) that is used to
create and manage the database structures that will hold data.
Most of parameters of Sedna DDL are computable and specified as XQuery
expressions. The expected type of all the parameters is \verb!xs:string!. All
parameters are evaluated and atomized. If the atomized value is not of
\verb!xs:string!, a dynamic error is raised.
%===============================================================================
% Data Definition Language: Managing Standalone Documents
%===============================================================================
\subsubsection{Managing Standalone Documents}
\begin{verbatim}
CREATE DOCUMENT doc-name-expr
\end{verbatim}
The \verb!CREATE! \verb!DOCUMENT! statement creates a new standalone document
with the name that is the result of \verb!doc-name-expr!.
\begin{verbatim}
DROP DOCUMENT doc-name-expr
\end{verbatim}
The \verb!DROP! \verb!DOCUMENT! statement drops the standalone document with the
name that is the result of \verb!doc-name-expr!.
For example, next statement:
\begin{verbatim}}
CREATE DOCUMENT "report"
\end{verbatim}
creates a documentnamed "report", while:
\begin{verbatim}
DROP DOCUMENT "report"
\end{verbatim}
There is a system document \verb!$documents! which lists all available documents
and collections. For details on retrieving metadata see \ref{managing-metadata}
section.
%===============================================================================
% Data Definition Language: Managing Collections
%===============================================================================
\subsubsection{Managing Collections}
Sedna provides a mechanism of organizing multiple documents into a collection.
Collection provides a uniform way of writing XQuery and XML update statements
addressed to multiple documents at once. Collections are preferable for
situations when a group of documents is to be queried/updated by means of not
referring to their individual document names, but according to some conditions
over their content.
In a Sedna database, a document can be either a standalone one (when it doesn't
belong to any collection) or belonging to some collection. Compared to
standalone documents, all documents within a given collection have a common
descriptive schema. The common descriptive schema (which can be considered as a
union of individual descriptive schemas for all documents that belong to a
collection) allows addressing XQuery and XML update queries to all members of a
collection.
Below is the specification of syntax and semantics of statements that manage
collections.
\begin{verbatim}
CREATE COLLECTION coll-name-expr
\end{verbatim}
The \verb!CREATE! \verb!COLLECTION! statement creates a new collection with the
name that is the result of \verb!coll-name-expr!.
For example, \verb!CREATE COLLECTION! \verb!"mails"! creates a collection named
"mails".
To check whether a collection is available the \verb!fn:col-available! function
can be used. It accepts a collection name as its argument:
\begin{verbatim}
fn:col-available($col-name as xs:string) as xs:boolean
\end{verbatim}
XML documents can be loaded into the collection, as previously described in
section \ref{bulk-load}.
To access a single document from collection in an XQuery or XML update query,
the \verb!fn:doc! function accepts a collection name as its second optional
argument:
\begin{verbatim}
fn:doc($doc as xs:string,
$col as xs:string) as document-node()
\end{verbatim}
The function returns the document with the \verb!$doc! name that belongs to the
collection named \verb!$col!.
To check whether a document is available in the collection the
\verb!fn:doc-available! function accepts a collection name as its second
optional argument:
\begin{verbatim}
fn:doc-available($doc as xs:string,
$col as xs:string) as xs:boolean
\end{verbatim}
Function call checks if user has rights to query the specified collection and
puts read lock on it.
For example, \verb!doc('mail-01', 'mails')! query returns documents with name
\verb!mail-01! from the collection \verb!mails!,
\verb!doc-available('mail-01', 'mails')! query returns \verb!true! only if
\verb!mail-01! is available in the collection \verb!mails!.
\begin{verbatim}
fn:collection($col as xs:string?) as document-node()*
\end{verbatim}
The function \verb!collection! can be called from any place within an XQuery or
XML update query where the function call is allowed. The \verb!collection!
function returns the sequence of all documents that belong to the collection
named \verb!$col!. The relative order of documents in a sequence returned by
\verb!collection! function is currently undefined in Sedna.
Conventional XQuery predicates can be used for filtering the sequence returned
by the \verb!collection! function call, for selecting certain documents that
satisfy the desired condition.
\begin{verbatim}
CREATE DOCUMENT doc-name-expr IN COLLECTION coll-name-expr
\end{verbatim}
This statement creates a new document named \verb!doc-name-expr! in a collection
named \verb!coll-name-expr!.
For example, the following statement:
\begin{verbatim}
CREATE DOCUMENT 'mail' IN COLLECTION 'mails'
\end{verbatim}
creates a document named "mail" in the collection named "mails".
\begin{verbatim}
DROP DOCUMENT doc-name-expr IN COLLECTION coll-name-expr
\end{verbatim}
The \verb!DROP DOCUMENT IN COLLECTION! statement drops the document named
\verb!doc-name-expr! located in the collection named \verb!coll-name-expr!.
\begin{verbatim}
DROP COLLECTION coll-name-expr
\end{verbatim}
The \verb!DROP COLLECTION! statement drops the collection with the
\verb!coll-name-expr! name from the database. If the collection contains any
documents, these documents are dropped as well.
\begin{verbatim}
RENAME COLLECTION old-name-expr INTO new-name-expr
\end{verbatim}
The \verb!RENAME COLLECTION! statement renames collection with the name that is
result of the \verb!old-name-expr!. The new name is assigned which is result of
the \verb!new-name-expr!. Both result of the \verb!old-name-expr! and result of
the \verb!new-name-expr! after atomization applied must be either of type
\verb!xs:string! (or derived) or promotable to \verb!xs:string!.
There is a system document \verb!$collections! which lists all available
collections. For details on retrieving metadata see \ref{managing-metadata}
section.
%===============================================================================
% Data Definition Language: Managing Value Indices
%===============================================================================
\subsubsection{Managing Value Indices}
\label{sec:managing-value-indices}
Sedna supports value indices to index XML element content and attribute values.
Index could be based on two different structure types: B+ tree and BST
(experimental) (\emph{B}lock \emph{S}tring \emph{T}rie). Below is the
description of statements to manage indices.
\begin{note} In the current version of Sedna, query executor does not use
indices automatically. You can enforce the executor to employ indices by using
the XQuery index-scan functions specified in section
\ref{sec:value-index-scan-fun}.
\end{note}
\begin{verbatim}
CREATE INDEX title-expr
ON Path1 BY Path2
AS type
[USING tree-type]
\end{verbatim}
The \verb!CREATE INDEX! creates an index on nodes (specified by \verb!Path1!) by
keys (specified by \verb!Path2!).
\verb!Path1! is an XPath expression without any filter expressions that
identifies the nodes of a document or a collection that are to be indexed.
\verb!Path2! is an XPath expression without any filter expressions that
specifies the relative path to the nodes whose string-values are used as keys to
identify the nodes returned by the \verb!Path1! expression. The \verb!Path2!
expression should not start with '/' or '//'. The full path from the root of
documents (that may be in a collection) to the key nodes is \verb!Path1/Path2!.
For instance, let \verb!Path1! be \verb!doc("a")/b/c! and \verb!Path2! be
\verb!d!. Let \verb!X! be the node returned by the \verb!Path1! expression, and
\verb!Y! be one of the nodes returned by the \verb!doc("a")/b/c/d! expression.
If \verb!Y! is the descendant of \verb!X!, then the value of \verb!Y! is used as
the key for searching the node \verb!X!.
\verb!title-expr! is the title of the index created. It should be unique for
each index in the database.
\verb!type! is an atomic type which the value of the keys should be cast to. The
following types are supported for B-tree: \verb!xs:string!, \verb!xs:integer!,
\verb!xs:float!, \verb!xs:double!, \verb!xs:date!, \verb!xs:time!,
\verb!xs:dateTime!, \verb!xs_yearMonthDuration!, \verb!xs_dateTimeDuration!.
Note that BST supports \verb!xs:string! only.
\verb!tree-type! defines the structure that would be used for index storage.
This argument is optional. Index is stored using B+ tree structure by default
and it's a good choice in the general case. But there is one more implemented
structure that could show great disk-space economy in certain situations. BST is
based on prefix tree (or \emph{trie}) conception. Main distinguishing features
are:
\begin{enumerate}
\item {BST can handle strings with any length.}
\item {If you want to index data by fields that contain strings with repeating
prefixes (e.g: URLs, URIs) BST would be a good choice for you.}
\end{enumerate}
In the case of appropriate usage BST can store indexes up to 4 times more
compressed with the same search speed in comparison with B+ tree.
The following \verb!tree-type! are supported: \verb!"btree"! for B+ tree
(default), \verb!"bstrie"! for BST.
\begin{note} BST feature is experimental at this moment; \textbf{do not use it
in production} or any critical applications.
\end{note}
In the following example, people are indexed by the names of their cities. To
generate the index keys, city names are cast to the \verb!xs:string! type. B+
tree is used for index storage.
\begin{verbatim}
CREATE INDEX "people"
ON doc("auction")/site//person BY address/city
AS xs:string
\end{verbatim}
The following example is exactly the same as the previous but uses BST for index
storage:
\begin{verbatim}
CREATE INDEX "people"
ON doc("auction")/site//person BY address/city
AS xs:string
USING "bstrie"
\end{verbatim}
To remove an index, use the following statement:
\begin{verbatim}
DROP INDEX title-expr
\end{verbatim}
The \verb!DROP INDEX! statement removes the index named \verb!title-expr! from
the database.
%===============================================================================
% Data Definition Language: Managing Full-Text Indices
%===============================================================================
\subsubsection{Managing Full-Text Indices}
\label{sec:managing-ft-indices}
Sedna allows to build full-text indices in order to combine XQuery with
full-text search facilities. Resulting indices need to be used explicitly via
full-text search functions (see \ref{sec:ft-fun}), XQuery full text extensions
are not supported.
Sedna can be integrated with dtSearch \cite{link:dtsearch-engine}, a commercial
text retrieval engine, which provides full-text indices. As dtSearch is a third
party commercial product, Sedna does not include dtSearch. If you are interested
in using Sedna with dtSearch, please contact us. Below is the description of
statements to manage full-text indices in Sedna.
\begin{note}
In the current version of Sedna, query executor does not use full-text indices
automatically. You can enforce the executor to employ indices by using the
XQuery full-text search functions specified in section \ref{sec:ft-fun}.
\end{note}
\begin{verbatim}
CREATE FULL-TEXT INDEX title-expr
ON path
TYPE type
[
WITH OPTIONS options
]
\end{verbatim}
The \verb!CREATE INDEX! indexes nodes (specified by \verb!path!) by a text
representation of the nodes. The text representations of the nodes are
constructed according to \verb!type! parameter value.
\verb!title-expr! is the title of the index created. It should be unique for
each full-text index in the database.
\verb!path! is an XPath expression without any filter expressions that
identifies the nodes of a document or a collection that are to be indexed. An
example of the \verb!path! expression is as follows
\verb!doc("foo")/library//article!.
\verb!type! specifies how the text representations of nodes are constructed when
the nodes are indexed. \verb!type! can have one of the following values:
\begin{itemize}
\item \verb!"xml"! -- the XML representations of the nodes are used;
\item \verb!"string-value"! -- the string-values of the nodes are used as
obtained using standard XQuery \verb!fn:string! function. The string-value of a
node is the concatenated contents of all its descendant text nodes, in document
order;
\item \verb!"delimited-value"! -- the same as \verb!"string-value"! but blank
characters are inserted between text nodes;
\item \verb!"customized-value"! \verb!((element-qname, type)! \verb!, ...!
\verb!(element-qname, type))! -- this option allows specifying types for
particular element nodes. Here \verb!element-qname! is a QName of an element,
\verb!type! can have one of the values listed above (i.e. \verb!"xml"!,
\verb!"string-value"!, \verb!"delimited-value"!). For those elements that are
not specified in the list, the \verb!"xml"! type is used by default.
\end{itemize}
\verb!options! is a sting of the following form:
\verb!"option=value{,option=value}"!. It denotes options used for index
constuction. The following options are available:
\begin{itemize}
\item \verb!backend! -- specifies which implementation of full-text indexes to
use. Allowed values are \verb!native! and \verb!dtsearch!, the latter in only
available in dtSearch-enabled builds. For dtSearch-enabled builds, default
backend is \verb!dtsearch!, for other builds - \verb!native!.
\end{itemize}
Options for \verb!native! backend:
\begin{itemize}
\item \verb!stemming! -- specifies stemming language to use.
\item \verb!stemtype! -- in order to be able to search both stemmed and original
words - add \verb!stemtype=both! option, otherwise stemming will be used always
if enabled.
\end{itemize}
In the following example, articles are indexed by their contents represented as
XML.
\begin{verbatim}
CREATE FULL-TEXT INDEX "articles"
ON doc("foo")/library//article
TYPE "xml"
\end{verbatim}
The example below illustrates the use of \verb!"customized-value"! type.
\begin{verbatim}
CREATE FULL-TEXT INDEX "messages"
ON doc("foo")//message
TYPE "customized-value"
(("b", "string-value"),
("a", "delimited-value"))
\end{verbatim}
To remove a full-text index, use the following statement:
\begin{verbatim}
DROP FULL-TEXT INDEX title-expr
\end{verbatim}
The \verb!DROP! \verb!FULL-TEXT! \verb!INDEX! statement removes the full-text
index named \verb!title-expr! from the database.
%===============================================================================
% Data Definition Language: Managing Modules
%===============================================================================
\subsubsection{Managing Modules}
XQuery allows putting functions in library modules, so that they can be shared
and imported by any query. A library module contains a module declaration
followed by variable and/or function declarations. The module declaration
specifies its target namespace URI which is used to identify the module in the
database. For more information on modules see the XQuery specification
\cite{paper:query-language}.
Before a library module could be imported from an query, it is to be loaded into
the database. To load a module, use the following statement.
\begin{verbatim}
LOAD MODULE "path_to_file", ..., "path_to_file"
\end{verbatim}
Each \verb!path_to_file! specifies a path to the file. If only one parameter is
supplied, it refers to the file which contains the module definition. The module
definition can also be divided into several files. In this case all files must
have a module declaration with the same target namespace URI (otherwise an error
is raised).
For example, suppose that you have the following module stored in
\verb!math.xqlib!.
\begin{verbatim}
module namespace math = "http://example.org/math";
declare variable $math:pi as xs:decimal := 3.1415926;
declare function math:increment($num as xs:decimal) as xs:decimal {
$num + 1
};
declare function math:square($num as xs:decimal) as xs:decimal {
$num * $num
};
\end{verbatim}
You can load this module as follows.
\begin{verbatim}
LOAD MODULE "math.xqlib"
\end{verbatim}
Once an library module is loaded into the database, it can be imported into an
query using conventional XQuery module import \cite{paper:query-language}. For
example, you can import the above module as follows.
\begin{verbatim}
import module namespace math = "http://example.org/math";
math:increment(math:square($math:pi))
\end{verbatim}
To replace an already loaded module with new one, use the following statement.
\begin{verbatim}
LOAD OR REPLACE MODULE "path_to_file", ..., "path_to_file"
\end{verbatim}
To remove a library module from the database, use the following statement.
\begin{verbatim}
DROP MODULE "target_namespace_URI"
\end{verbatim}
It results in removing the library module with the given target namespace URI
from the database.
You can obtain information about modules loaded into the database by querying
the system collection named \verb!$modules! as follows
\verb!collection("$modules")!.
%===============================================================================
% Data Definition Language: Retrieving Metadata
%===============================================================================
\subsubsection{Retrieving Metadata}
\label{managing-metadata}
You can retrieve various metadata about database objects (such as documents,
collections, indexes, etc.) by querying system documents and collections listed
below.
Names of the system documents and collections start with \verb!$! symbol. The
system documents and collections (except the ones marked with * symbol) are not
persistent but generated on the fly. You can query these documents as usual but
you cannot update them. Also these documents are not listed in the
\verb!$documents! system document.
\begin{citemize}
\item\verb!$documents! document -- list of all stand-alone documents,
collections and in-collection documents (except system meta-documents and
collection, like \verb!$documents! document itself);
\item\verb!$collections! document -- list of all collections;
\item\verb!$modules! document -- contains list of loaded modules with
theirs names;
\item\verb!$modules! (*) collection -- contains documents with precompiled
definitions of XQuery modules;
\item\verb!$indexes! document -- list of indexes with information about
them;
\item\verb!$ftindexes! document -- list of full-text indexes with
information about them (this document is available if Sedna is build with
\verb!SE_ENABLE_FTSEARCH! enabled);
\item\verb!$triggers! document -- list of triggers with information about
them (this document is available if Sedna is build with
\verb!SE_ENABLE_TRIGGERS! enabled);
\item\verb!$db_security_data! (*) document-- list of users and privileges on
database objects;
\item\verb!$schema! document -- descriptive schema of all documents and
collections with some schema-related information;
\item\verb!$errors! document -- list of all errors with descriptions;
\item\verb!$version! document -- version and build numbers;
\item\verb!$schema_<name>! document -- the descriptive schema of the
document or collection named \verb!<name>!;
\item\verb!$document_<name>! document -- statistical information about the
document named \verb!<name>!;
\item\verb!$collection_<name>! document -- statistical information about the
collection named \verb!<name>!.
\end{citemize}
The statistical information in \verb!$document_<name>! and
\verb!$collection_<name>! documents contains the following elements:
\begin{citemize}
\item\verb!total_schema_nodes! -- the number of the nodes of the descriptive
schema;
\item\verb!total_schema_text_nodes! -- the number of the attribute and text
nodes of the descriptive schema;
\item\verb!total_nodes! -- the number of the nodes of the document (or
collection);
\item\verb!schema_depth! -- the maximal depth of the document (or collection);
\item\verb!total_desc_blk! -- the number of the descriptor blocks occupied by
document (or collection);
\item\verb!total_str_blk! -- the number of the text blocks occupied by document
(or collection);
\item\verb!saturation! -- fill factor of the blocks (in percents);
\item\verb!total_innr_blk! -- the number of the descriptor blocks occupied by
document (or collection) except first and last blocks in each chain of blocks;
\item\verb!total_innr_size! -- the size of the inner blocks;
\item\verb!innr_blk_saturation! -- fill factor of the inner blocks (in
percents);
\item\verb!strings! -- the share of the string blocks (in percents);
\item\verb!descriptors! -- the share of the descriptor blocks (in percents);
\item\verb!nid! -- the share of the long labeling numbers' ($>11$) total size
(in percents);
\item\verb!indirection! -- the share of the indirection records' total size (in
percents);
\item\verb!total_size! -- the total size of the document (or collection), in
MBs;
\item\verb!string_size! -- the total size of the string blocks, in MBs;
\item\verb!descriptor_size! -- the total size of the descriptor blocks, in MBs;
\item\verb!nids_size! -- the total size of the long labeling numbers, in MBs;
\item\verb!free_space_in_str_blocks! -- the total size of the free space in the
string blocks;
\item\verb!indirection_size! -- the total size of the indirection records, in
MBs;
\item\verb!nids_size! -- the share of the indirection records' total size (in
percents);
\item\verb!STRINGS! -- the histogram of the xml data by its size;
\item\verb!NID! -- the histogram of the labeling numbers its length;
\end{citemize}
%===============================================================================
% XQuery Triggers
%===============================================================================
\subsection{XQuery Triggers}
XQuery triggers support in Sedna is provided as an XQuery extension. To create a
trigger into the Sedna database you have to issue the following \verb!CREATE!
\verb!TRIGGER! statement:
\begin{verbatim}
CREATE TRIGGER trigger-name
( BEFORE | AFTER ) (INSERT | DELETE | REPLACE)
ON path
( FOR EACH NODE | FOR EACH STATEMENT )
DO {
Update-statement ($NEW, $OLD,$WHERE);
. . .
Update-statement ($NEW, $OLD,$WHERE);
XQuery-statement ($NEW, $OLD, $WHERE);
}
\end{verbatim}
The \verb!DROP! \verb!TRIGGER! statement drops the trigger with the
name which is the result of the \verb!trigger-name-expression!:
\begin{verbatim}
DROP TRIGGER trigger-name-expression
\end{verbatim}
Triggers can be defined to execute either \emph{before} or \emph{after} any
\verb!INSERT!, \verb!DELETE! or \verb!REPLACE! operation, either once per
modified node (\emph{node-level} triggers), or once per XQuery statement
(\emph{statement-level} triggers). If a trigger event occurs, the trigger's
action is called at the appropriate time to handle the event.
\medskip
\noindent
\textbf{Create Trigger Parameters:}
\begin{itemize}
\item\verb!trigger-name! is the unique per database trigger name.
\item\verb!ON path! is XPath expression without any filter expression
(predicates) that identifies the nodes on which the trigger is set. That means
that the trigger fires when corresponding modification (insertion, deletion or
replacement) of those nodes occurs. This XPath expression is prohibited to have
predicates and parent axes.
\item\verb!FOR EACH NODE!/\verb!FOR EACH STATEMENT!: these key words mean the
trigger created is a \verb!node-level! or \verb!statement-level! trigger. With a
node-level trigger, the trigger action is invoked once for each node that is
affected by the update statement that fired the trigger. In contrast, a
statement-level trigger is invoked only once when an appropriate statement is
executed, regardless of the number of nodes affected by that statement.
\item\verb!BEFORE!/\verb!AFTER!: triggers are also classified as
\emph{before}-triggers and \emph{after}-triggers. \verb!BEFORE! keyword in
\verb!CREATE TRIGGER! statement means the trigger created is
\emph{before}-trigger; \verb!AFTER! keyword means the trigger created is
\emph{after}-trigger. Statement-level-before triggers fire before the statement
starts to do anything, while statement-level-after triggers fire at the very end
of the statement. Node-level-before triggers fire immediately before a
particular node is operated on, while node-level-after triggers fire immediately
after the node is operated on (but before any statement-level-after trigger).
\item\verb!DO!: trigger action is specified in braces \verb!{}! after the
\verb!DO! key word. Action contains zero or more update statements and an XQuery
query. It is a mandatory requirement that node-level trigger action ends with an
XQuery query, while this is optional for actions of statement-level triggers. It
is prohibited to use prolog in statements of the trigger action.
\item Transition variables \verb!$NEW!, \verb!$OLD! and \verb!$WHERE! are
defined for each node-level trigger firing and can be used in each statement of
the trigger action. These tree variables are defined as follows:
\begin{citemize}
\item For \verb!INSERT!: \verb!$NEW! is the node being inserted; \verb!$OLD! is
undefined; \verb!$WHERE! is the parent node in case in \emph{insert-into}
statement and sibling node in case of \emph{insert-preceding} and
\emph{insert-following} statements;
\item For \verb!DELETE!: \verb!$NEW! is undefined; \verb!$OLD! is the node being
deleted; \verb!$WHERE! is the parent of the deleted node;
\item For \verb!REPLACE!: \verb!$NEW! is the node being inserted during the
replacement; \verb!$OLD! is the node being replaced; \verb!$WHERE! is the parent
of the replaced node.
\end{citemize}
You cannot use transition variables in statement-level triggers.
\end{itemize}
XQuery statement in the trigger action of a node-level trigger can \emph{return
a node} to the calling executor, if they choose. A node-level-trigger fired
before an operation has the following choices:
\begin{itemize}
\item It can return \verb!empty sequence! to skip the operation for the current
node. This instructs the executor to not perform the node-level operation that
invoked the trigger (the insertion or replacement of a particular node).
\item For node-level \verb!INSERT! triggers only, the returned node becomes the
node that \emph{will be inserted}. This allows the trigger to modify the node
being inserted.
\item A node-level before trigger that does not intend to cause either of these
behaviors must be careful to return as its result the same node that was passed
in (that is, the \verb!$NEW! node for \verb!INSERT! and \verb!REPLACE! triggers.
For \verb!DELETE! triggers its returned value is ignored in all cases except
it is an empty sequence).
\end{itemize}
The trigger action return value is ignored for node-level triggers fired after
an operation, and for all statement-level triggers, and so they may as well
return empty sequence.
If more than one trigger is defined for the same event on the same document, the
triggers will be fired in alphabetical order by trigger name. In the case of
before triggers, the possibly-modified node returned by each trigger becomes the
input to the next trigger. If any before trigger returns empty sequence, the
operation is abandoned for that node and subsequent triggers are not fired.
Typically, node-level-before triggers are used for checking or modifying the
data that will be inserted or updated. For example, a before trigger might be
used to insert the current time node as a child of the inserting node, or to
check that two descendants of the inserting node are consistent.
Node-level-after triggers are most sensibly used to propagate the updates to
other documents, or make consistency checks against other documents. The reason
for this division of labor is that an after-trigger can be certain it is seeing
the final value of the node, while a before-trigger cannot; there might be other
before triggers firing after it. When designing your trigger-application note,
that node-level triggers are typically cheaper than statement-level ones.
If a trigger function executes update-statement then these commands may fire
other triggers again (cascading triggers). Currently \emph{trigger cascading
level} in Sedna is limited to 10.
\begin{note}
Currently is it prohibited in a trigger action to update the same document or
collection that is being updated by the outer update statement that has fired
this trigger.
\end{note}
\begin{figure}[h]
\begin{center}
{\footnotesize\begin{tabular}{|c||c|c|c|}
\hline
& \emph{update}: & \emph{update}: & \emph{update}: \\
& INSERT & DELETE & REPLACE \\
\hline
\hline
\emph{trigger event}: & trigger path & & trigger path \\
INSERT & $>$= & & $>$= \\
& update path & & update path \\
\hline
\emph{trigger event}: & & trigger path & trigger path \\
DELETE & & $>$= & $>$= \\
& & update path & update path \\
\hline
\emph{trigger event}: & & & trigger path \\
REPLACE & & & $>$= \\
& & & update path \\
\hline
\end{tabular}}
\caption{\label{triggertab} Update and trigger path lengths needed for trigger
firing}
\end{center}
\end{figure}
Note also that hierarchy of the XML data sometimes can affect the trigger firing
in a complicated way. For example, if a node is deleted with all its descendant
subtree, then a \verb!DELETE!-trigger set on the descendants of the deleting
node is fired. In this situation \emph{length of trigger path} $>$= \emph{length
of update path}. In general, triggers fire according to the table in figure
\ref{triggertab}.
%===============================================================================
% XQuery Triggers: Trigger Examples
%===============================================================================
\subsubsection{Trigger Examples}
The following trigger is set on insertion of \verb!person! nodes. When some
\verb!person! node is inserted, the trigger analyzes its content and modifies it
in the following way. If the person is under 14 years old, the trigger inserts
additional child node \verb!age-group! with the text value 'infant': if the
person is older than 14 years old - the trigger inserts \verb!age-group! node
with value 'adult':
\small{
\begin{verbatim}
CREATE TRIGGER "tr1"
BEFORE INSERT
ON doc("auction")/site//person
FOR EACH NODE
DO {
if($NEW/age < 14)
then
<person>{attribute id {$NEW/@id}}
{$NEW/*}
<age-group>infant</age-group>
</person>
else
<person>{attribute id {$NEW/@id}}
{$NEW/*}
<age-group>adult</age-group>
</person>;
}
\end{verbatim}}
The following trigger \emph{tr2} prohibits (throws exception) stake increase if
the person has already more than 3 open auctions:
\small{
\begin{verbatim}
CREATE TRIGGER "tr2"
BEFORE INSERT
ON doc("auction")/site/open_auctions/open_auction/bidder
FOR EACH NODE
DO {
if(($NEW/increase > 10.5) and
(count($WHERE/../open_auction
[bidder/personref/@person=$NEW/personref/@person]) > 3))
then error(xs:QName("tr2"),"The increase is prohibited")
else ($NEW);
}
\end{verbatim}}
The following trigger \emph{tr3} cancels \verb!person! node deletion if there
are any open auctions referenced by this person:
\small{
\begin{verbatim}
CREATE TRIGGER "tr3"
BEFORE DELETE
ON doc("auction")/site//person
FOR EACH NODE
DO {
if(exists(
$WHERE//open_auction/bidder/personref/@person=$OLD/@id))
then ()
else $OLD;
}
\end{verbatim}}
The next statement-level trigger \emph{tr4} maintains statistics in the document
named \emph{stat}. When this trigger is fired, the update operation is completed
- that gives the possibility to make aggregative checks on the updated data.
After deletion of any node in the \emph{auction} document, the trigger refreshes
statistics in \emph{stat} and throws exception if there are more than 50 persons
left:
\small{
\begin{verbatim}
CREATE TRIGGER "tr4"
AFTER DELETE
ON doc("auction")//*
FOR EACH STATEMENT
DO {
UPDATE replace $b in doc("stat")/stat with
<stat>
<open_auctions>
{count(doc("auction")//open_auction)}
</open_auctions>
<closed_auctions>
{count(doc("auction")//closed_auction)}
</closed_auctions>
<persons>
{count(doc("auction")//person)}
</persons>
</stat>;
UPDATE insert
if(count(doc("auction")//person) < 10)
then <warning>
"Critical number of person left in the auction"
</warning>
else ()
into doc("stat")/stat;
}
\end{verbatim}}
%===============================================================================
% Debug and Profile Facilities
%===============================================================================
\subsection{Debug and Profile Facilities}
Sedna provides several tracing, debug and perfomance profiling tools that can
help to monitor and analyze queries and update statements:
\begin{itemize}
\item\textbf{Trace} -- provides a query execution trace intended to be used in
debugging queries (section \ref{trace});
\item\textbf{Debug Mode} -- provides stack of physical operations when error is
raised (section \ref{debug-mode});
\item\textbf{Explain Query} -- shows complete physical execution plan of the
query or update statement (section \ref{explain});
\item\textbf{Profile Query} -- creates complete physical execution plan of the
query or update statement with profile information (execution time, number of
calls, etc) for each physical operation (section \ref{profile}).
\end{itemize}
%===============================================================================
% Debug and Profile Facilities: Trace
%===============================================================================
\subsubsection{Trace}
\label{trace}
Sedna supports standard XQuery function \verb!fn:trace! \cite{paper:query-fo}
providing user helper facility to trace queries. While executing XQuery query
using \verb!fn:trace! function intermediate results are shown to the user.
For example in Sedna Terminal the query with \verb!fn:trace! function will
provide the following output. Trace information is marked with \verb!##! string:
\medskip
\noindent
\textbf{Query:}
\begin{verbatim}
let $r:= trace(doc("region")/regions/*, "## ")
return $r[id_region="afr"]
\end{verbatim}
\medskip
\noindent
\textbf{Output:}
\small{
\begin{verbatim}
## <africa><id_region>afr</id_region></africa>
<africa>
<id_region>afr</id_region>
</africa>
## <asia><id_region>asi</id_region></asia>
## <australia><id_region>aus</id_region></australia>
## <europe><id_region>eur</id_region></europe>
## <namerica><id_region>nam</id_region></namerica>
## <samerica><id_region>sam</id_region></samerica>
\end{verbatim}}
If you want to use trace facility in your application working through Sedna API
you should set your own debug handler as it is shown in \ref{exec-capi} section.
%===============================================================================
% Debug and Profile Facilities: Debug Mode
%===============================================================================
\subsubsection{Debug Mode}
\label{debug-mode}
In addition to trace facility provided by standard XQuery \verb!fn:trace!
function (see previous section, \ref{trace} ) debug mode can be turned on in
Sedna Terminal (for details see "Sedna Terminal" section of the Sedna's
Administration Guide) or in your application using corresponding Sedna API
functions (see \ref{session-option-capi} section).
Each query in Sedna is represented and executed internally as a tree of the
physical operations. Debug mode is mechanism that allows to get stack of the
physical operations after dynamic error was raised. It serves two goals:
\begin{citemize}
\item localize error in the query's source code;
\item obtain information of the query execution process.
\end{citemize}
Let us consider the following query to illustrate execution in the debug mode:
\medskip
\noindent
\textbf{Query:}
\begin{verbatim}
(: In this query dynamic error will be raised :)
(: due to "aaaa" is not castable to xs:integer. :)
declare function local:f($i as item()) as xs:integer
{
$i cast as xs:integer
};
for $i in (1,2,3,"aaaa")
return local:f($i)
\end{verbatim}
\medskip
\noindent
\textbf{Output:}
\small{
\begin{verbatim}
1
2
3
<stack xmlns='http://www.modis.ispras.ru/sedna'>
<operation name='PPCast' line='3' column='4' calls='7'/>
<operation name='PPFunCall' line='7' column='8' calls='7'/>
<operation name='PPReturn' line='6' column='5' calls='4'/>
<operation name='PPQueryRoot' calls='4'/>
</stack>
SEDNA Message: ERROR FORG0001
Invalid value for cast/constructor.
Details: Cannot convert to xs:integer type
Query line: 3, column:4
\end{verbatim}}
As you can see in the output above each item of the operations stack list
consists of the following parts:
\begin{itemize}
\item operation name (\verb!PPCast! and \verb!PPFunCall! in the example);
\item calls counter - number of calls of the operation;
\item corresponding query and column numbers;
\item optional additional information (qualified name of the function in the
example).
\end{itemize}
%===============================================================================
% Debug and Profile Facilities: Explain Query
%===============================================================================
\subsubsection{Explain Query}
\label{explain}
The explain statement has the following syntax:
\begin{verbatim}
EXPLAIN
{XQuery or Update statement to explain}
\end{verbatim}
Each query in Sedna is represented and executed internally as a tree of the
physical operations. With the help of \verb!EXPLAIN! statement you can obtain
detailed query execution plan which shows how Sedna executes this query.
\medskip
\noindent
The following query illustrates \verb!EXPLAIN! statement execution:
\medskip
\noindent
\textbf{Query:}
\begin{verbatim}
explain
declare function local:fact($i as xs:integer) as xs:integer {
if ($i <= 1)
then 1
else $i * local:fact($i - 1)
};
local:fact(10)
\end{verbatim}
\medskip
\noindent
\textbf{Output:}
\small{
\begin{verbatim}
<prolog xmlns="http://www.modis.ispras.ru/sedna">
<function id="0" function-name="local:fact" type="xs:integer">
<arguments>
<argument descriptor="0" type="xs:integer"/>
</arguments>
<operation name="PPIf" position="2:5">
<operation name="PPLMGeneralComparison"
comparison="le" position="2:12">
<operation name="PPVariable" descriptor="0"
variable-name="i" position="2:9"/>
<operation name="PPConst" type="xs:integer"
value="1" position="2:15"/>
</operation>
<operation name="PPConst" type="xs:integer"
value="1" position="3:10"/>
<operation name="PPCalculate" position="4:10">
<operation name="BinaryOp" operation="*">
<operation name="LeafAtomOp">
<operation name="PPVariable" descriptor="0"
variable-name="i" position="4:10"/>
</operation>
<operation name="LeafAtomOp">
<operation name="PPFunCall" id="0"
function-name="local:fact" position="4:15">
<operation name="PPCalculate" position="4:26">
<operation name="BinaryOp" operation="-">
<operation name="LeafAtomOp">
<operation name="PPVariable" descriptor="0"
variable-name="i" position="4:26"/>
</operation>
<operation name="LeafAtomOp">
<operation name="PPConst" type="xs:integer"
value="1" position="4:31"/>
</operation>
</operation>
</operation>
</operation>
</operation>
</operation>
</operation>
</operation>
</function>
</prolog>
<query xmlns="http://www.modis.ispras.ru/sedna">
<operation name="PPQueryRoot">
<operation name="PPFunCall" id="0"
function-name="local:fact" position="7:1">
<operation name="PPConst" type="xs:integer"
value="10" position="7:12"/>
</operation>
</operation>
</query>
\end{verbatim}}
Explain output consists of two parts (just like any XQuery query): prolog and
query body explanations. Prolog part includes complete information about all
declarations: namespaces, functions, global variables with complete physical
plan for each user defined function and global variable. Query body explanation
part describes physical tree of the query.
For each physical operation \verb!EXPLAIN! returns: name of the operation
(\verb!PPConst!, \verb!PPFunCall!, etc), corresponding position in the source
query (e.g. 4:31 means that operation \verb!PPConst! corresponds to the '1'
atomic at the line 4, column 26). Output may also contain additional information
depending on the operation type (for example, variable name for some
\verb!PPVariable! operations).
%===============================================================================
% Debug and Profile Facilities: Profile Query
%===============================================================================
\subsubsection{Profile Query}
\label{profile}
The profile statement has the following syntax:
\begin{verbatim}
PROFILE
{XQuery or Update statement to profile}
\end{verbatim}
Each query in Sedna is represented and executed internally as a tree of the
physical operations. With the help of \verb!PROFILE! statement you can obtain
detailed tree of physical operation and execution time for each of them.
\medskip
\noindent
The following query illustrates \verb!PROFILE! statement execution:
\medskip
\noindent
\textbf{Query:}
\begin{verbatim}
profile fn:doc('TestSources/XMarkAuction.xml')//
person[@id = "person0"]/name
\end{verbatim}
\medskip
\noindent
\textbf{Output:}
\small{
\begin{alltt}
<operation name="PPQueryRoot" time="13.426" calls="1">
<operation name="PPAxisChild" step="child::element(name)"
time="13.426" calls="2">
<operation name="PPReturn" time="13.426" calls="2">
\emph{<operation name="PPAbsPath" root="document(auction)"
path="descendant-or-self::node()"
time="12.772" calls="85405"/>}
<operation name="PPPred1" time="0.530" calls="85405">
<operation name="PPAxisChild" step="child::element(person)"
time="0.461" calls="86168">
<operation name="PPVariable" descr="0"
time="0.380" calls="170808"/>
</operation>
<operation name="PPEQLGeneralComparison"
comparison="eq" time="0.013" calls="1528">
<operation name="PPAxisAttribute"
step="attribute::attribute(id)"
time="0.001" calls="1527">
<operation name="PPVariable" descr="1"
time="0.001" calls="1527"/>
</operation>
<operation name="PPConst"
type="xs:string" value="person0"
time="0.001" calls="1527"/>
</operation>
</operation>
</operation>
</operation>
</operation>
\end{alltt}}
Profiling output consists of two parts (just like any XQuery query): prolog and
query body explanations. Prolog part includes complete profile information for
global variables and user defined functions. Query body profile part describes
physical tree of the query and provides execution time and number of calls for
each physical operation.
For each physical operation \verb!PROFILE! returns: name of the operation
(\verb!PPConst!, \verb!PPFunCall!, etc), corresponding position in the source
query (e.g. 4:31 means that operation \verb!PPConst! corresponds to the '1'
atomic at the line 4, column 26), execution time of this operation and number of
calls. Output may also contain additional information depending on the operation
type (for example, variable name for some \verb!PPVariable! operations).
In above example you can see that \verb!PPAbsPath! operation takes almost all
time (12.772 seconds of 13.426 total) and was called 12772 times. Profiling in
this case shows that \verb!//! may be very hard to execute and it is much better
to use "single" XPath steps everywhere it is possible:
\medskip
\noindent
\textbf{Query:}
\begin{verbatim}
profile fn:doc('TestSources/XMarkAuction.xml')/
site/people/person[@id = "person0"]/name
\end{verbatim}
\medskip
\noindent
\textbf{Output:}
\small{
\begin{alltt}
<operation name="PPQueryRoot" time="0.018" calls="1">
<operation name="PPAxisChild" step="child::element(name)"
time="0.018" calls="2">
<operation name="PPReturn" time="0.018" calls="2">
<produces>
<variable descriptor="0"/>
</produces>
\emph{<operation name="PPAbsPath"
root="document(auction)"
path="child::element(site)/child::element(people)"
time="0.001" calls="2"/>}
<operation name="PPPred1" time="0.017" calls="2">
<produces>
<variable descriptor="1"/>
</produces>
<operation name="PPAxisChild" step="child::element(person)"
time="0.001" calls="765">
<operation name="PPVariable" descr="0"
time="0.000" calls="2"/>
</operation>
<operation name="PPEQLGeneralComparison"
comparison="eq" time="0.013" calls="1528">
<operation name="PPAxisAttribute"
step="attribute::attribute(id)"
time="0.005" calls="1527">
<operation name="PPVariable" descr="1"
time="0.003" calls="1527"/>
</operation>
<operation name="PPConst"
type="xs:string" value="person0"
time="0.000" calls="1527"/>
</operation>
</operation>
</operation>
</operation>
</operation>
\end{alltt}}
\newpage
%===============================================================================
% Sedna Programmer's Guide: Bibliography
%===============================================================================
\begin{thebibliography}{9}
\bibitem{doc:admin}
``Sedna Administration Guide'',
\url{http://modis.ispras.ru/sedna/adminguide/AdminGuide.html}
\bibitem{paper:data-model}
``XQuery 1.0 and XPath 2.0 Data Model'', W3C Recommendation,
\url{http://www.w3.org/TR/xpath-datamodel/}
\bibitem{paper:query-language}
``XQuery 1.0: An XML Query Language'', W3C Recommendation,
\url{http://www.w3.org/TR/xquery/}
\bibitem{paper:query-fo}
``XQuery 1.0 and XPath 2.0 Functions and Operators'', W3C Recommendation,
\url{http://www.w3.org/TR/xpath-functions/}
\bibitem{paper:query-serialization}
``XSLT 2.0 and XQuery 1.0 Serialization'', W3C Recommendation,
\url{http://www.w3.org/TR/xslt-xquery-serialization/}
\bibitem{paper:query-update} Patrick Lehti.
``Design and Implementation of a Data Manipulation Processor for a XML Query
Language'',
\url{http://www.lehti.de/beruf/diplomarbeit.pdf}
\bibitem{lib:pcre-lib}
``PCRE - Perl Compatible Regular Expressions'',
\url{http://www.pcre.org/}
\bibitem{paper:sedna-overview}
Maxim Grinev, Andrey Fomichev, Sergey Kuznetsov, Kostantin Antipin, Alexander
Boldakov, Dmitry Lizorkin, Leonid Novak, Maria Rekouts, Peter Pleshachkov.
``Sedna: A Native XML DBMS'',
\url{http://www.modis.ispras.ru/publications.htm}
\bibitem{paper:sedna-api} Rekouts Maria.
``Application Programming Interface for XML DBMS: design and implementation
proposal'',
\url{http://www.modis.ispras.ru/publications.htm}
\bibitem{paper:scheme-ql}
Noel Welsh, Francisco Solsona, Ian Glover.
``SchemeUnit and SchemeQL: Two little languages'',
Scheme Workshop 2002,
\url{http://schematics.sourceforge.net/schemeunit-schemeql.ps}
\bibitem{paper:sxml}
Oleg Kiselyov.
``SXML Specification, Revision 3.0'',
\url{http://www.okmij.org/ftp/Scheme/SXML.html}
\bibitem{paper:srfi-12}
William Clinger, R.\ Kent Dybvig, Matthew Flatt, and Marc Feeley.
``SRFI-12: Exception Handling'',
\url{http://srfi.schemers.org/srfi-12/srfi-12.html}
\bibitem{paper:chicken}
Felix L.\ Winkelmann.
``Chicken -- A practical and portable Scheme system'',
\url{http://www.call-with-current-continuation.org}
\bibitem{paper:sxpath}
Kirill Lisovsky, Dmitry Lizorkin.
``XML Path Language (XPath) and its functional implementation SXPath'',
Russian Digital Libraries Journal , 2003, Volume 6, Issue 4,
\url{http://www.elbib.ru/index.phtml?page=elbib/eng/journal/2003/part4/LL}
\bibitem{link:dtsearch-engine}
dtSearch Engine home page,
\url{http://www.dtsearch.com/PLF\_engine\_2.html}
\bibitem{doc:dtsearch}
dtSearch Web Help
\url{http://support.dtsearch.com/webhelp/dtsearch}
\end{thebibliography}
\end{document}
|
(*<*)
(*
* The worker/wrapper transformation, following Gill and Hutton.
* (C)opyright 2009-2011, Peter Gammie, peteg42 at gmail.com.
* License: BSD
*)
theory Continuations
imports
HOLCF
Maybe
Nats
WorkerWrapperNew
begin
(*>*)
section\<open>Tagless interpreter via double-barreled continuations\<close>
text\<open>\label{sec:continuations}\<close>
type_synonym 'a Cont = "('a \<rightarrow> 'a) \<rightarrow> 'a"
definition
val2cont :: "'a \<rightarrow> 'a Cont" where
"val2cont \<equiv> (\<Lambda> a c. c\<cdot>a)"
definition
cont2val :: "'a Cont \<rightarrow> 'a" where
"cont2val \<equiv> (\<Lambda> f. f\<cdot>ID)"
lemma cont2val_val2cont_id: "cont2val oo val2cont = ID"
by (rule cfun_eqI, simp add: val2cont_def cont2val_def)
domain Expr =
Val (lazy val::"Nat")
| Add (lazy addl::"Expr") (lazy addr::"Expr")
| Throw
| Catch (lazy cbody::"Expr") (lazy chandler::"Expr")
fixrec eval :: "Expr \<rightarrow> Nat Maybe"
where
"eval\<cdot>(Val\<cdot>n) = Just\<cdot>n"
| "eval\<cdot>(Add\<cdot>x\<cdot>y) = mliftM2 (\<Lambda> a b. a + b)\<cdot>(eval\<cdot>x)\<cdot>(eval\<cdot>y)"
| "eval\<cdot>Throw = mfail"
| "eval\<cdot>(Catch\<cdot>x\<cdot>y) = mcatch\<cdot>(eval\<cdot>x)\<cdot>(eval\<cdot>y)"
fixrec eval_body :: "(Expr \<rightarrow> Nat Maybe) \<rightarrow> Expr \<rightarrow> Nat Maybe"
where
"eval_body\<cdot>r\<cdot>(Val\<cdot>n) = Just\<cdot>n"
| "eval_body\<cdot>r\<cdot>(Add\<cdot>x\<cdot>y) = mliftM2 (\<Lambda> a b. a + b)\<cdot>(r\<cdot>x)\<cdot>(r\<cdot>y)"
| "eval_body\<cdot>r\<cdot>Throw = mfail"
| "eval_body\<cdot>r\<cdot>(Catch\<cdot>x\<cdot>y) = mcatch\<cdot>(r\<cdot>x)\<cdot>(r\<cdot>y)"
lemma eval_body_strictExpr[simp]: "eval_body\<cdot>r\<cdot>\<bottom> = \<bottom>"
by (subst eval_body.unfold, simp)
lemma eval_eval_body_eq: "eval = fix\<cdot>eval_body"
by (rule cfun_eqI, subst eval_def, subst eval_body.unfold, simp)
subsection\<open>Worker/wrapper\<close>
definition
unwrapC :: "(Expr \<rightarrow> Nat Maybe) \<rightarrow> (Expr \<rightarrow> (Nat \<rightarrow> Nat Maybe) \<rightarrow> Nat Maybe \<rightarrow> Nat Maybe)" where
"unwrapC \<equiv> \<Lambda> g e s f. case g\<cdot>e of Nothing \<Rightarrow> f | Just\<cdot>n \<Rightarrow> s\<cdot>n"
lemma unwrapC_strict[simp]: "unwrapC\<cdot>\<bottom> = \<bottom>"
unfolding unwrapC_def by (rule cfun_eqI)+ simp
definition
wrapC :: "(Expr \<rightarrow> (Nat \<rightarrow> Nat Maybe) \<rightarrow> Nat Maybe \<rightarrow> Nat Maybe) \<rightarrow> (Expr \<rightarrow> Nat Maybe)" where
"wrapC \<equiv> \<Lambda> g e. g\<cdot>e\<cdot>Just\<cdot>Nothing"
lemma wrapC_unwrapC_id: "wrapC oo unwrapC = ID"
proof(intro cfun_eqI)
fix g e
show "(wrapC oo unwrapC)\<cdot>g\<cdot>e = ID\<cdot>g\<cdot>e"
by (cases "g\<cdot>e", simp_all add: wrapC_def unwrapC_def)
qed
definition
eval_work :: "Expr \<rightarrow> (Nat \<rightarrow> Nat Maybe) \<rightarrow> Nat Maybe \<rightarrow> Nat Maybe" where
"eval_work \<equiv> fix\<cdot>(unwrapC oo eval_body oo wrapC)"
definition
eval_wrap :: "Expr \<rightarrow> Nat Maybe" where
"eval_wrap \<equiv> wrapC\<cdot>eval_work"
fixrec eval_body' :: "(Expr \<rightarrow> (Nat \<rightarrow> Nat Maybe) \<rightarrow> Nat Maybe \<rightarrow> Nat Maybe)
\<rightarrow> Expr \<rightarrow> (Nat \<rightarrow> Nat Maybe) \<rightarrow> Nat Maybe \<rightarrow> Nat Maybe"
where
"eval_body'\<cdot>r\<cdot>(Val\<cdot>n)\<cdot>s\<cdot>f = s\<cdot>n"
| "eval_body'\<cdot>r\<cdot>(Add\<cdot>x\<cdot>y)\<cdot>s\<cdot>f = (case wrapC\<cdot>r\<cdot>x of
Nothing \<Rightarrow> f
| Just\<cdot>n \<Rightarrow> (case wrapC\<cdot>r\<cdot>y of
Nothing \<Rightarrow> f
| Just\<cdot>m \<Rightarrow> s\<cdot>(n + m)))"
| "eval_body'\<cdot>r\<cdot>Throw\<cdot>s\<cdot>f = f"
| "eval_body'\<cdot>r\<cdot>(Catch\<cdot>x\<cdot>y)\<cdot>s\<cdot>f = (case wrapC\<cdot>r\<cdot>x of
Nothing \<Rightarrow> (case wrapC\<cdot>r\<cdot>y of
Nothing \<Rightarrow> f
| Just\<cdot>n \<Rightarrow> s\<cdot>n)
| Just\<cdot>n \<Rightarrow> s\<cdot>n)"
lemma eval_body'_strictExpr[simp]: "eval_body'\<cdot>r\<cdot>\<bottom>\<cdot>s\<cdot>f = \<bottom>"
by (subst eval_body'.unfold, simp)
definition
eval_work' :: "Expr \<rightarrow> (Nat \<rightarrow> Nat Maybe) \<rightarrow> Nat Maybe \<rightarrow> Nat Maybe" where
"eval_work' \<equiv> fix\<cdot>eval_body'"
text\<open>This proof is unfortunately quite messy, due to the
simplifier's inability to cope with HOLCF's case distinctions.\<close>
lemma eval_body'_eval_body_eq: "eval_body' = unwrapC oo eval_body oo wrapC"
apply (intro cfun_eqI)
apply (unfold unwrapC_def wrapC_def)
apply (case_tac xa)
apply simp_all
apply (simp add: wrapC_def)
apply (case_tac "x\<cdot>Expr1\<cdot>Just\<cdot>Nothing")
apply simp_all
apply (case_tac "x\<cdot>Expr2\<cdot>Just\<cdot>Nothing")
apply simp_all
apply (simp add: mfail_def)
apply (simp add: mcatch_def wrapC_def)
apply (case_tac "x\<cdot>Expr1\<cdot>Just\<cdot>Nothing")
apply simp_all
done
fixrec eval_body_final :: "(Expr \<rightarrow> (Nat \<rightarrow> Nat Maybe) \<rightarrow> Nat Maybe \<rightarrow> Nat Maybe)
\<rightarrow> Expr \<rightarrow> (Nat \<rightarrow> Nat Maybe) \<rightarrow> Nat Maybe \<rightarrow> Nat Maybe"
where
"eval_body_final\<cdot>r\<cdot>(Val\<cdot>n)\<cdot>s\<cdot>f = s\<cdot>n"
| "eval_body_final\<cdot>r\<cdot>(Add\<cdot>x\<cdot>y)\<cdot>s\<cdot>f = r\<cdot>x\<cdot>(\<Lambda> n. r\<cdot>y\<cdot>(\<Lambda> m. s\<cdot>(n + m))\<cdot>f)\<cdot>f"
| "eval_body_final\<cdot>r\<cdot>Throw\<cdot>s\<cdot>f = f"
| "eval_body_final\<cdot>r\<cdot>(Catch\<cdot>x\<cdot>y)\<cdot>s\<cdot>f = r\<cdot>x\<cdot>s\<cdot>(r\<cdot>y\<cdot>s\<cdot>f)"
lemma eval_body_final_strictExpr[simp]: "eval_body_final\<cdot>r\<cdot>\<bottom>\<cdot>s\<cdot>f = \<bottom>"
by (subst eval_body_final.unfold, simp)
lemma eval_body'_eval_body_final_eq: "eval_body_final oo unwrapC oo wrapC = eval_body'"
apply (rule cfun_eqI)+
apply (case_tac xa)
apply (simp_all add: unwrapC_def)
done
definition
eval_work_final :: "Expr \<rightarrow> (Nat \<rightarrow> Nat Maybe) \<rightarrow> Nat Maybe \<rightarrow> Nat Maybe" where
"eval_work_final \<equiv> fix\<cdot>eval_body_final"
definition
eval_final :: "Expr \<rightarrow> Nat Maybe" where
"eval_final \<equiv> (\<Lambda> e. eval_work_final\<cdot>e\<cdot>Just\<cdot>Nothing)"
lemma "eval = eval_final"
proof -
have "eval = fix\<cdot>eval_body" by (rule eval_eval_body_eq)
also from wrapC_unwrapC_id unwrapC_strict have "\<dots> = wrapC\<cdot>(fix\<cdot>eval_body_final)"
apply (rule worker_wrapper_fusion_new)
using eval_body'_eval_body_final_eq eval_body'_eval_body_eq by simp
also have "\<dots> = eval_final"
unfolding eval_final_def eval_work_final_def wrapC_def
by simp
finally show ?thesis .
qed
(*<*)
end
(*>*)
|
= = = Structure and genre = = =
|
[STATEMENT]
lemma coord_cring_cring:
"cring (R[\<X>\<^bsub>n\<^esub>])"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. cring (R [\<X>\<^bsub>n\<^esub>])
[PROOF STEP]
unfolding coord_ring_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. cring (Pring R {..<n})
[PROOF STEP]
by (simp add: R.Pring_is_cring R_cring) |
[STATEMENT]
lemma in_theFT_theNFT[simp]:
assumes "n1 \<in> theFT cl" and "n2 \<in> theNFT cl"
shows "n1 \<noteq> n2" and "n2 \<noteq> n1"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. n1 \<noteq> n2 &&& n2 \<noteq> n1
[PROOF STEP]
using assms theFT_Int_theNFT
[PROOF STATE]
proof (prove)
using this:
n1 \<in> theFT cl
n2 \<in> theNFT cl
theFT ?cl \<inter> theNFT ?cl = {}
goal (1 subgoal):
1. n1 \<noteq> n2 &&& n2 \<noteq> n1
[PROOF STEP]
by blast+ |
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝³ : TopologicalSpace α
inst✝² : TopologicalSpace β
inst✝¹ : TopologicalSpace γ
inst✝ : TopologicalSpace δ
g : β → γ
f : α → β
hg : Inducing g
hf : Inducing f
⊢ inst✝³ = TopologicalSpace.induced (g ∘ f) inst✝¹
[PROOFSTEP]
rw [hf.induced, hg.induced, induced_compose]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝³ : TopologicalSpace α
inst✝² : TopologicalSpace β
inst✝¹ : TopologicalSpace γ
inst✝ : TopologicalSpace δ
f : α → β
g : β → γ
hf : Continuous f
hg : Continuous g
hgf : Inducing (g ∘ f)
⊢ inst✝³ ≤ induced f inst✝²
[PROOFSTEP]
rwa [← continuous_iff_le_induced]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝³ : TopologicalSpace α
inst✝² : TopologicalSpace β
inst✝¹ : TopologicalSpace γ
inst✝ : TopologicalSpace δ
f : α → β
g : β → γ
hf : Continuous f
hg : Continuous g
hgf : Inducing (g ∘ f)
⊢ induced f inst✝² ≤ inst✝³
[PROOFSTEP]
rw [hgf.induced, ← induced_compose]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝³ : TopologicalSpace α
inst✝² : TopologicalSpace β
inst✝¹ : TopologicalSpace γ
inst✝ : TopologicalSpace δ
f : α → β
g : β → γ
hf : Continuous f
hg : Continuous g
hgf : Inducing (g ∘ f)
⊢ induced f inst✝² ≤ induced f (induced g inst✝¹)
[PROOFSTEP]
exact induced_mono hg.le_induced
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝³ : TopologicalSpace α
inst✝² : TopologicalSpace β
inst✝¹ : TopologicalSpace γ
inst✝ : TopologicalSpace δ
f : α → β
hf : Inducing f
s : Set α
⊢ 𝓝ˢ s = comap f (𝓝ˢ (f '' s))
[PROOFSTEP]
simp only [nhdsSet, sSup_image, comap_iSup, hf.nhds_eq_comap, iSup_image]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝³ : TopologicalSpace α
inst✝² : TopologicalSpace β
inst✝¹ : TopologicalSpace γ
inst✝ : TopologicalSpace δ
f : α → β
hf : Inducing f
a : α
l : Filter α
⊢ MapClusterPt (f a) l f ↔ ClusterPt a l
[PROOFSTEP]
delta MapClusterPt ClusterPt
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝³ : TopologicalSpace α
inst✝² : TopologicalSpace β
inst✝¹ : TopologicalSpace γ
inst✝ : TopologicalSpace δ
f : α → β
hf : Inducing f
a : α
l : Filter α
⊢ NeBot (𝓝 (f a) ⊓ map f l) ↔ NeBot (𝓝 a ⊓ l)
[PROOFSTEP]
rw [← Filter.push_pull', ← hf.nhds_eq_comap, map_neBot_iff]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝³ : TopologicalSpace α
inst✝² : TopologicalSpace β
inst✝¹ : TopologicalSpace γ
inst✝ : TopologicalSpace δ
ι : Type u_5
f : ι → β
g : β → γ
a : Filter ι
b : β
hg : Inducing g
⊢ Tendsto f a (𝓝 b) ↔ Tendsto (g ∘ f) a (𝓝 (g b))
[PROOFSTEP]
rw [hg.nhds_eq_comap, tendsto_comap_iff]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝³ : TopologicalSpace α
inst✝² : TopologicalSpace β
inst✝¹ : TopologicalSpace γ
inst✝ : TopologicalSpace δ
f : α → β
g : β → γ
hg : Inducing g
⊢ Continuous f ↔ Continuous (g ∘ f)
[PROOFSTEP]
simp_rw [continuous_iff_continuousAt, hg.continuousAt_iff]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝³ : TopologicalSpace α
inst✝² : TopologicalSpace β
inst✝¹ : TopologicalSpace γ
inst✝ : TopologicalSpace δ
f : α → β
g : β → γ
hf : Inducing f
x : α
h : range f ∈ 𝓝 (f x)
⊢ ContinuousAt (g ∘ f) x ↔ ContinuousAt g (f x)
[PROOFSTEP]
simp_rw [ContinuousAt, Filter.Tendsto, ← hf.map_nhds_of_mem _ h, Filter.map_map, comp]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝³ : TopologicalSpace α
inst✝² : TopologicalSpace β
inst✝¹ : TopologicalSpace γ
inst✝ : TopologicalSpace δ
f : α → β
g : β → γ
hg : Inducing g
⊢ Inducing f ↔ Inducing (g ∘ f)
[PROOFSTEP]
refine' ⟨fun h => hg.comp h, fun hgf => inducing_of_inducing_compose _ hg.continuous hgf⟩
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝³ : TopologicalSpace α
inst✝² : TopologicalSpace β
inst✝¹ : TopologicalSpace γ
inst✝ : TopologicalSpace δ
f : α → β
g : β → γ
hg : Inducing g
hgf : Inducing (g ∘ f)
⊢ Continuous f
[PROOFSTEP]
rw [hg.continuous_iff]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝³ : TopologicalSpace α
inst✝² : TopologicalSpace β
inst✝¹ : TopologicalSpace γ
inst✝ : TopologicalSpace δ
f : α → β
g : β → γ
hg : Inducing g
hgf : Inducing (g ∘ f)
⊢ Continuous (g ∘ f)
[PROOFSTEP]
exact hgf.continuous
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝³ : TopologicalSpace α
inst✝² : TopologicalSpace β
inst✝¹ : TopologicalSpace γ
inst✝ : TopologicalSpace δ
f : α → β
hf : Inducing f
s : Set α
⊢ closure s = f ⁻¹' closure (f '' s)
[PROOFSTEP]
ext x
[GOAL]
case h
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝³ : TopologicalSpace α
inst✝² : TopologicalSpace β
inst✝¹ : TopologicalSpace γ
inst✝ : TopologicalSpace δ
f : α → β
hf : Inducing f
s : Set α
x : α
⊢ x ∈ closure s ↔ x ∈ f ⁻¹' closure (f '' s)
[PROOFSTEP]
rw [Set.mem_preimage, ← closure_induced, hf.induced]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝³ : TopologicalSpace α
inst✝² : TopologicalSpace β
inst✝¹ : TopologicalSpace γ
inst✝ : TopologicalSpace δ
f : α → β
hf : Inducing f
s : Set α
⊢ IsClosed s ↔ ∃ t, IsClosed t ∧ f ⁻¹' t = s
[PROOFSTEP]
rw [hf.induced, isClosed_induced_iff]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝³ : TopologicalSpace α
inst✝² : TopologicalSpace β
inst✝¹ : TopologicalSpace γ
inst✝ : TopologicalSpace δ
f : α → β
hf : Inducing f
s : Set α
⊢ IsClosed s ↔ ∀ (x : α), f x ∈ closure (f '' s) → x ∈ s
[PROOFSTEP]
rw [hf.induced, isClosed_induced_iff']
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝³ : TopologicalSpace α
inst✝² : TopologicalSpace β
inst✝¹ : TopologicalSpace γ
inst✝ : TopologicalSpace δ
f : α → β
hf : Inducing f
s : Set α
⊢ IsOpen s ↔ ∃ t, IsOpen t ∧ f ⁻¹' t = s
[PROOFSTEP]
rw [hf.induced, isOpen_induced_iff]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝³ : TopologicalSpace α
inst✝² : TopologicalSpace β
inst✝¹ : TopologicalSpace γ
inst✝ : TopologicalSpace δ
f : α → β
hf : Inducing f
s : Set α
⊢ Dense s ↔ ∀ (x : α), f x ∈ closure (f '' s)
[PROOFSTEP]
simp only [Dense, hf.closure_eq_preimage_closure_image, mem_preimage]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalSpace β
inst✝ : TopologicalSpace γ
f : α → β
g : β → γ
hf : Continuous f
hg : Continuous g
hgf : _root_.Embedding (g ∘ f)
a₁ a₂ : α
h : f a₁ = f a₂
⊢ (g ∘ f) a₁ = (g ∘ f) a₂
[PROOFSTEP]
simp [h, (· ∘ ·)]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : TopologicalSpace α
inst✝³ : TopologicalSpace β
inst✝² : TopologicalSpace γ
X : Type u_5
Y : Type u_6
inst✝¹ : TopologicalSpace X
tY : TopologicalSpace Y
inst✝ : DiscreteTopology Y
f : X → Y
hf : _root_.Embedding f
x : X
⊢ 𝓝 x = pure x
[PROOFSTEP]
rw [hf.nhds_eq_comap, nhds_discrete, comap_pure, ← image_singleton, hf.inj.preimage_image, principal_singleton]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : TopologicalSpace α
inst✝ : TopologicalSpace β
f : α → β
⊢ (∀ (x : Set β), IsOpen xᶜ ↔ IsOpen (f ⁻¹' xᶜ)) ↔ ∀ (s : Set β), IsClosed s ↔ IsClosed (f ⁻¹' s)
[PROOFSTEP]
simp only [isOpen_compl_iff, preimage_compl]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝³ : TopologicalSpace α
inst✝² : TopologicalSpace β
inst✝¹ : TopologicalSpace γ
inst✝ : TopologicalSpace δ
g : β → γ
f : α → β
hg : QuotientMap g
hf : QuotientMap f
⊢ inst✝¹ = coinduced (g ∘ f) inst✝³
[PROOFSTEP]
rw [hg.right, hf.right, coinduced_compose]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝³ : TopologicalSpace α
inst✝² : TopologicalSpace β
inst✝¹ : TopologicalSpace γ
inst✝ : TopologicalSpace δ
g : β → γ
f : α → β
hf : Continuous f
hg : Continuous g
hgf : QuotientMap (g ∘ f)
⊢ inst✝¹ ≤ coinduced g inst✝²
[PROOFSTEP]
rw [hgf.right, ← coinduced_compose]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝³ : TopologicalSpace α
inst✝² : TopologicalSpace β
inst✝¹ : TopologicalSpace γ
inst✝ : TopologicalSpace δ
g : β → γ
f : α → β
hf : Continuous f
hg : Continuous g
hgf : QuotientMap (g ∘ f)
⊢ coinduced g (coinduced f inst✝³) ≤ coinduced g inst✝²
[PROOFSTEP]
exact coinduced_mono hf.coinduced_le
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝³ : TopologicalSpace α
inst✝² : TopologicalSpace β
inst✝¹ : TopologicalSpace γ
inst✝ : TopologicalSpace δ
g : β → γ
f : α → β
hf : QuotientMap f
⊢ Continuous g ↔ Continuous (g ∘ f)
[PROOFSTEP]
rw [continuous_iff_coinduced_le, continuous_iff_coinduced_le, hf.right, coinduced_compose]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalSpace β
inst✝ : TopologicalSpace γ
f : α → β
s : Set α
hs : IsOpen s
⊢ IsOpen (id '' s)
[PROOFSTEP]
rwa [image_id]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalSpace β
inst✝ : TopologicalSpace γ
f✝ : α → β
g : β → γ
f : α → β
hg : IsOpenMap g
hf : IsOpenMap f
s : Set α
hs : IsOpen s
⊢ IsOpen (g ∘ f '' s)
[PROOFSTEP]
rw [image_comp]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalSpace β
inst✝ : TopologicalSpace γ
f✝ : α → β
g : β → γ
f : α → β
hg : IsOpenMap g
hf : IsOpenMap f
s : Set α
hs : IsOpen s
⊢ IsOpen (g '' (f '' s))
[PROOFSTEP]
exact hg _ (hf _ hs)
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalSpace β
inst✝ : TopologicalSpace γ
f : α → β
hf : IsOpenMap f
⊢ IsOpen (range f)
[PROOFSTEP]
rw [← image_univ]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalSpace β
inst✝ : TopologicalSpace γ
f : α → β
hf : IsOpenMap f
⊢ IsOpen (f '' univ)
[PROOFSTEP]
exact hf _ isOpen_univ
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalSpace β
inst✝ : TopologicalSpace γ
f✝ f : α → β
h : ∀ (x : α), ∃ g, ContinuousAt g (f x) ∧ g (f x) = x ∧ Function.RightInverse g f
x : α
g : β → α
hgc : ContinuousAt g (f x)
hgx : g (f x) = x
hgf : Function.RightInverse g f
⊢ 𝓝 (f x) = map f (map g (𝓝 (f x)))
[PROOFSTEP]
rw [map_map, hgf.comp_eq_id, map_id]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalSpace β
inst✝ : TopologicalSpace γ
f✝ f : α → β
h : ∀ (x : α), ∃ g, ContinuousAt g (f x) ∧ g (f x) = x ∧ Function.RightInverse g f
x : α
g : β → α
hgc : ContinuousAt g (f x)
hgx : g (f x) = x
hgf : Function.RightInverse g f
⊢ map f (𝓝 (g (f x))) = map f (𝓝 x)
[PROOFSTEP]
rw [hgx]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalSpace β
inst✝ : TopologicalSpace γ
f : α → β
hf : IsOpenMap f
s : Set β
⊢ f ⁻¹' closure s ⊆ closure (f ⁻¹' s)
[PROOFSTEP]
rw [← compl_subset_compl]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalSpace β
inst✝ : TopologicalSpace γ
f : α → β
hf : IsOpenMap f
s : Set β
⊢ (closure (f ⁻¹' s))ᶜ ⊆ (f ⁻¹' closure s)ᶜ
[PROOFSTEP]
simp only [← interior_compl, ← preimage_compl, hf.interior_preimage_subset_preimage_interior]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalSpace β
inst✝ : TopologicalSpace γ
f : α → β
hf : IsOpenMap f
s : Set β
⊢ f ⁻¹' frontier s ⊆ frontier (f ⁻¹' s)
[PROOFSTEP]
simpa only [frontier_eq_closure_inter_closure, preimage_inter] using
inter_subset_inter hf.preimage_closure_subset_closure_preimage hf.preimage_closure_subset_closure_preimage
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalSpace β
inst✝ : TopologicalSpace γ
f : α → β
hf : IsOpenMap f
hfc : Continuous f
s : Set β
⊢ f ⁻¹' frontier s = frontier (f ⁻¹' s)
[PROOFSTEP]
simp only [frontier_eq_closure_inter_closure, preimage_inter, preimage_compl,
hf.preimage_closure_eq_closure_preimage hfc]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : TopologicalSpace α
inst✝ : TopologicalSpace β
f : α → β
hs : ∀ (s : Set α), f '' interior s ⊆ interior (f '' s)
u : Set α
hu : IsOpen u
⊢ f '' u = f '' interior u
[PROOFSTEP]
rw [hu.interior_eq]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalSpace β
inst✝ : TopologicalSpace γ
s : Set α
hs : IsClosed s
⊢ IsClosed (id '' s)
[PROOFSTEP]
rwa [image_id]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalSpace β
inst✝ : TopologicalSpace γ
g : β → γ
f : α → β
hg : IsClosedMap g
hf : IsClosedMap f
⊢ IsClosedMap (g ∘ f)
[PROOFSTEP]
intro s hs
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalSpace β
inst✝ : TopologicalSpace γ
g : β → γ
f : α → β
hg : IsClosedMap g
hf : IsClosedMap f
s : Set α
hs : IsClosed s
⊢ IsClosed (g ∘ f '' s)
[PROOFSTEP]
rw [image_comp]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalSpace β
inst✝ : TopologicalSpace γ
g : β → γ
f : α → β
hg : IsClosedMap g
hf : IsClosedMap f
s : Set α
hs : IsClosed s
⊢ IsClosed (g '' (f '' s))
[PROOFSTEP]
exact hg _ (hf _ hs)
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalSpace β
inst✝ : TopologicalSpace γ
f : α → β
f' : β → α
h : Continuous f'
l_inv : LeftInverse f f'
r_inv : Function.RightInverse f f'
s : Set α
hs : IsClosed s
⊢ IsClosed (f '' s)
[PROOFSTEP]
rw [image_eq_preimage_of_inverse r_inv l_inv]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalSpace β
inst✝ : TopologicalSpace γ
f : α → β
f' : β → α
h : Continuous f'
l_inv : LeftInverse f f'
r_inv : Function.RightInverse f f'
s : Set α
hs : IsClosed s
⊢ IsClosed (f' ⁻¹' s)
[PROOFSTEP]
exact hs.preimage h
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalSpace β
inst✝ : TopologicalSpace γ
f : α → β
h : ∀ (s : Set α), IsClosed s → Set.Nonempty s → IsClosed (f '' s)
⊢ IsClosedMap f
[PROOFSTEP]
intro s hs
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalSpace β
inst✝ : TopologicalSpace γ
f : α → β
h : ∀ (s : Set α), IsClosed s → Set.Nonempty s → IsClosed (f '' s)
s : Set α
hs : IsClosed s
⊢ IsClosed (f '' s)
[PROOFSTEP]
cases' eq_empty_or_nonempty s with h2s h2s
[GOAL]
case inl
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalSpace β
inst✝ : TopologicalSpace γ
f : α → β
h : ∀ (s : Set α), IsClosed s → Set.Nonempty s → IsClosed (f '' s)
s : Set α
hs : IsClosed s
h2s : s = ∅
⊢ IsClosed (f '' s)
[PROOFSTEP]
simp_rw [h2s, image_empty, isClosed_empty]
[GOAL]
case inr
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalSpace β
inst✝ : TopologicalSpace γ
f : α → β
h : ∀ (s : Set α), IsClosed s → Set.Nonempty s → IsClosed (f '' s)
s : Set α
hs : IsClosed s
h2s : Set.Nonempty s
⊢ IsClosed (f '' s)
[PROOFSTEP]
exact h s hs h2s
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : TopologicalSpace α
inst✝ : TopologicalSpace β
f : α → β
hf : Inducing f
h : IsClosed (range f)
⊢ IsClosedMap f
[PROOFSTEP]
intro s hs
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : TopologicalSpace α
inst✝ : TopologicalSpace β
f : α → β
hf : Inducing f
h : IsClosed (range f)
s : Set α
hs : IsClosed s
⊢ IsClosed (f '' s)
[PROOFSTEP]
rcases hf.isClosed_iff.1 hs with ⟨t, ht, rfl⟩
[GOAL]
case intro.intro
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : TopologicalSpace α
inst✝ : TopologicalSpace β
f : α → β
hf : Inducing f
h : IsClosed (range f)
t : Set β
ht : IsClosed t
hs : IsClosed (f ⁻¹' t)
⊢ IsClosed (f '' (f ⁻¹' t))
[PROOFSTEP]
rw [image_preimage_eq_inter_range]
[GOAL]
case intro.intro
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : TopologicalSpace α
inst✝ : TopologicalSpace β
f : α → β
hf : Inducing f
h : IsClosed (range f)
t : Set β
ht : IsClosed t
hs : IsClosed (f ⁻¹' t)
⊢ IsClosed (t ∩ range f)
[PROOFSTEP]
exact ht.inter h
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : TopologicalSpace α
inst✝ : TopologicalSpace β
f : α → β
hs : ∀ (s : Set α), closure (f '' s) ⊆ f '' closure s
c : Set α
hc : IsClosed c
⊢ f '' closure c = f '' c
[PROOFSTEP]
rw [hc.closure_eq]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : TopologicalSpace α
inst✝ : TopologicalSpace β
f : α → β
⊢ IsClosedMap f ↔ ∀ (s : Set α) (y : β), MapClusterPt y (𝓟 s) f → ∃ x, f x = y ∧ ClusterPt x (𝓟 s)
[PROOFSTEP]
simp [MapClusterPt, isClosedMap_iff_closure_image, subset_def, mem_closure_iff_clusterPt, and_comm]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : TopologicalSpace α
inst✝ : TopologicalSpace β
f : α → β
f_closed : IsClosedMap f
f_cont : Continuous f
F : Filter α
⊢ Filter.lift' (map f F) closure = map f (Filter.lift' F closure)
[PROOFSTEP]
rw [map_lift'_eq2 (monotone_closure β), map_lift'_eq (monotone_closure α)]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : TopologicalSpace α
inst✝ : TopologicalSpace β
f : α → β
f_closed : IsClosedMap f
f_cont : Continuous f
F : Filter α
⊢ Filter.lift' F (closure ∘ image f) = Filter.lift' F (image f ∘ closure)
[PROOFSTEP]
congr
[GOAL]
case e_h
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : TopologicalSpace α
inst✝ : TopologicalSpace β
f : α → β
f_closed : IsClosedMap f
f_cont : Continuous f
F : Filter α
⊢ closure ∘ image f = image f ∘ closure
[PROOFSTEP]
ext s : 1
[GOAL]
case e_h.h
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : TopologicalSpace α
inst✝ : TopologicalSpace β
f : α → β
f_closed : IsClosedMap f
f_cont : Continuous f
F : Filter α
s : Set α
⊢ (closure ∘ image f) s = (image f ∘ closure) s
[PROOFSTEP]
exact f_closed.closure_image_eq_of_continuous f_cont s
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : TopologicalSpace α
inst✝ : TopologicalSpace β
F : Filter α
f : α → β
f_closed : IsClosedMap f
f_cont : Continuous f
y : β
⊢ MapClusterPt y F f ↔ NeBot (Filter.lift' F closure ⊓ 𝓟 (f ⁻¹' {y}))
[PROOFSTEP]
rw [MapClusterPt, clusterPt_iff_lift'_closure', f_closed.lift'_closure_map_eq f_cont, ← comap_principal, ←
map_neBot_iff f, Filter.push_pull, principal_singleton]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalSpace β
inst✝ : TopologicalSpace γ
f : α → β
hf : OpenEmbedding f
s : Set α
h : IsOpen (f '' s)
⊢ IsOpen s
[PROOFSTEP]
convert ← h.preimage hf.toEmbedding.continuous
[GOAL]
case h.e'_3
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalSpace β
inst✝ : TopologicalSpace γ
f : α → β
hf : OpenEmbedding f
s : Set α
h : IsOpen (f '' s)
⊢ f ⁻¹' (f '' s) = s
[PROOFSTEP]
apply preimage_image_eq _ hf.inj
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalSpace β
inst✝ : TopologicalSpace γ
f : α → β
hf : OpenEmbedding f
g : β → γ
l : Filter γ
a : α
⊢ Tendsto (g ∘ f) (𝓝 a) l ↔ Tendsto g (𝓝 (f a)) l
[PROOFSTEP]
rw [Tendsto, ← map_map, hf.map_nhds_eq]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalSpace β
inst✝ : TopologicalSpace γ
f : α → β
hf : OpenEmbedding f
g : β → γ
l : Filter γ
a : α
⊢ map g (𝓝 (f a)) ≤ l ↔ Tendsto g (𝓝 (f a)) l
[PROOFSTEP]
rfl
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalSpace β
inst✝ : TopologicalSpace γ
f : α → β
hf : OpenEmbedding f
s : Set β
hs : s ⊆ range f
⊢ IsOpen s ↔ IsOpen (f ⁻¹' s)
[PROOFSTEP]
rw [hf.open_iff_image_open, image_preimage_eq_inter_range, inter_eq_self_of_subset_left hs]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalSpace β
inst✝ : TopologicalSpace γ
f : α → β
h₁ : Continuous f
h₂ : Injective f
h₃ : IsOpenMap f
⊢ OpenEmbedding f
[PROOFSTEP]
simp only [openEmbedding_iff_embedding_open, embedding_iff, inducing_iff_nhds, *, and_true_iff]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalSpace β
inst✝ : TopologicalSpace γ
f : α → β
h₁ : Continuous f
h₂ : Injective f
h₃ : IsOpenMap f
⊢ ∀ (a : α), 𝓝 a = comap f (𝓝 (f a))
[PROOFSTEP]
exact fun a => le_antisymm (h₁.tendsto _).le_comap (@comap_map _ _ (𝓝 a) _ h₂ ▸ comap_mono (h₃.nhds_le _))
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalSpace β
inst✝ : TopologicalSpace γ
g : β → γ
f : α → β
hg : OpenEmbedding g
⊢ IsOpenMap f ↔ IsOpenMap (g ∘ f)
[PROOFSTEP]
simp_rw [isOpenMap_iff_nhds_le, ← map_map, comp, ← hg.map_nhds_eq, Filter.map_le_map_iff hg.inj]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalSpace β
inst✝ : TopologicalSpace γ
f : α → β
g : β → γ
hg : OpenEmbedding g
⊢ OpenEmbedding (g ∘ f) ↔ OpenEmbedding f
[PROOFSTEP]
simp only [openEmbedding_iff_continuous_injective_open, ← hg.isOpenMap_iff, ← hg.1.continuous_iff, hg.inj.of_comp_iff]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalSpace β
inst✝ : TopologicalSpace γ
f : α → β
hf : ClosedEmbedding f
s : Set α
h : IsClosed (f '' s)
⊢ IsClosed s
[PROOFSTEP]
rw [← preimage_image_eq s hf.inj]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalSpace β
inst✝ : TopologicalSpace γ
f : α → β
hf : ClosedEmbedding f
s : Set α
h : IsClosed (f '' s)
⊢ IsClosed (f ⁻¹' (f '' s))
[PROOFSTEP]
exact h.preimage hf.continuous
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalSpace β
inst✝ : TopologicalSpace γ
f : α → β
hf : ClosedEmbedding f
s : Set β
hs : s ⊆ range f
⊢ IsClosed s ↔ IsClosed (f ⁻¹' s)
[PROOFSTEP]
rw [hf.closed_iff_image_closed, image_preimage_eq_of_subset hs]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalSpace β
inst✝ : TopologicalSpace γ
f : α → β
h₁ : Continuous f
h₂ : Injective f
h₃ : IsClosedMap f
⊢ ClosedEmbedding f
[PROOFSTEP]
refine closedEmbedding_of_embedding_closed ⟨⟨?_⟩, h₂⟩ h₃
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalSpace β
inst✝ : TopologicalSpace γ
f : α → β
h₁ : Continuous f
h₂ : Injective f
h₃ : IsClosedMap f
⊢ inst✝² = induced f inst✝¹
[PROOFSTEP]
refine h₁.le_induced.antisymm fun s hs => ?_
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalSpace β
inst✝ : TopologicalSpace γ
f : α → β
h₁ : Continuous f
h₂ : Injective f
h₃ : IsClosedMap f
s : Set α
hs : IsOpen s
⊢ IsOpen s
[PROOFSTEP]
refine ⟨(f '' sᶜ)ᶜ, (h₃ _ hs.isClosed_compl).isOpen_compl, ?_⟩
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalSpace β
inst✝ : TopologicalSpace γ
f : α → β
h₁ : Continuous f
h₂ : Injective f
h₃ : IsClosedMap f
s : Set α
hs : IsOpen s
⊢ f ⁻¹' (f '' sᶜ)ᶜ = s
[PROOFSTEP]
rw [preimage_compl, preimage_image_eq _ h₂, compl_compl]
|
Formal statement is: proposition Liouville_weak: assumes "f holomorphic_on UNIV" and "(f \<longlongrightarrow> l) at_infinity" shows "f z = l" Informal statement is: If $f$ is a holomorphic function on $\mathbb{C}$ and $f$ converges to $l$ at infinity, then $f$ is constant. |
(*
* Copyright 2014, General Dynamics C4 Systems
*
* SPDX-License-Identifier: GPL-2.0-only
*)
(*
Lemmas on arch get/set object etc
*)
theory ArchAcc_AI
imports "../SubMonad_AI"
"Lib.Crunch_Instances_NonDet"
begin
context Arch begin global_naming ARM
bundle unfold_objects =
obj_at_def[simp]
kernel_object.splits[split]
arch_kernel_obj.splits[split]
get_object_wp [wp]
bundle unfold_objects_asm =
obj_at_def[simp]
kernel_object.split_asm[split]
arch_kernel_obj.split_asm[split]
definition
"valid_asid asid s \<equiv> arm_asid_map (arch_state s) asid \<noteq> None"
lemma get_asid_pool_wp [wp]:
"\<lbrace>\<lambda>s. \<forall>pool. ko_at (ArchObj (ASIDPool pool)) p s \<longrightarrow> Q pool s\<rbrace>
get_asid_pool p
\<lbrace>Q\<rbrace>"
apply (simp add: get_asid_pool_def get_object_def)
apply (wp|wpc)+
apply (clarsimp simp: obj_at_def)
done
lemma set_asid_pool_typ_at [wp]:
"set_asid_pool ptr pool \<lbrace>\<lambda>s. P (typ_at T p s)\<rbrace>"
apply (simp add: set_asid_pool_def set_object_def get_object_def)
apply wp
including unfold_objects
by clarsimp
lemmas set_asid_pool_typ_ats [wp] = abs_typ_at_lifts [OF set_asid_pool_typ_at]
lemma get_pd_wp [wp]:
"\<lbrace>\<lambda>s. \<forall>pd. ko_at (ArchObj (PageDirectory pd)) p s \<longrightarrow> Q pd s\<rbrace> get_pd p \<lbrace>Q\<rbrace>"
unfolding get_pd_def including unfold_objects by wpsimp
lemma get_pde_wp:
"\<lbrace>\<lambda>s. \<forall>pd. ko_at (ArchObj (PageDirectory pd)) (p && ~~ mask pd_bits) s \<longrightarrow>
Q (pd (ucast (p && mask pd_bits >> 2))) s\<rbrace>
get_pde p
\<lbrace>Q\<rbrace>"
by (simp add: get_pde_def) wp
lemma get_pde_inv [wp]: "get_pde p \<lbrace>P\<rbrace>"
by (wpsimp wp: get_pde_wp)
bundle pagebits =
pd_bits_def[simp] pt_bits_def[simp]
pageBits_def[simp] mask_lower_twice[simp]
word_bool_alg.conj_assoc[symmetric,simp] obj_at_def[simp]
pde.splits[split]
pte.splits[split]
lemma get_master_pde_wp:
"\<lbrace>\<lambda>s. \<forall>pd. ko_at (ArchObj (PageDirectory pd)) (p && ~~ mask pd_bits) s
\<longrightarrow> Q (case (pd (ucast (p && ~~ mask 6 && mask pd_bits >> 2))) of
SuperSectionPDE x xa xb \<Rightarrow> pd (ucast (p && ~~ mask 6 && mask pd_bits >> 2))
| _ \<Rightarrow> pd (ucast (p && mask pd_bits >> 2))) s\<rbrace>
get_master_pde p
\<lbrace>Q\<rbrace>"
apply (simp add: get_master_pde_def)
apply (wp get_pde_wp | wpc)+
including pagebits
by auto
lemma store_pde_typ_at [wp]:
"store_pde ptr pde \<lbrace>\<lambda>s. P (typ_at T p s)\<rbrace>"
apply (simp add: store_pde_def set_pd_def set_object_def get_object_def)
apply (wpsimp simp: obj_at_def a_type_def)
done
lemmas store_pde_typ_ats [wp] = abs_typ_at_lifts [OF store_pde_typ_at]
lemma get_pt_wp [wp]:
"\<lbrace>\<lambda>s. \<forall>pt. ko_at (ArchObj (PageTable pt)) p s \<longrightarrow> Q pt s\<rbrace> get_pt p \<lbrace>Q\<rbrace>"
apply (simp add: get_pt_def get_object_def)
apply (wpsimp simp: obj_at_def)
done
lemma get_pte_wp:
"\<lbrace>\<lambda>s. \<forall>pt. ko_at (ArchObj (PageTable pt)) (p && ~~mask pt_bits) s \<longrightarrow>
Q (pt (ucast (p && mask pt_bits >> 2))) s\<rbrace>
get_pte p
\<lbrace>Q\<rbrace>"
by (simp add: get_pte_def) wp
lemma get_pte_inv [wp]:
"\<lbrace>P\<rbrace> get_pte p \<lbrace>\<lambda>_. P\<rbrace>"
by (wpsimp wp: get_pte_wp)
lemma get_master_pte_wp:
"\<lbrace>\<lambda>s. \<forall>pt. ko_at (ArchObj (PageTable pt)) (p && ~~ mask pt_bits) s \<longrightarrow>
Q (case pt (ucast (p && ~~ mask 6 && mask pt_bits >> 2)) of
LargePagePTE x xa xb \<Rightarrow>
pt (ucast (p && ~~ mask 6 && mask pt_bits >> 2))
| _ \<Rightarrow> pt (ucast (p && mask pt_bits >> 2)))
s\<rbrace>
get_master_pte p \<lbrace>Q\<rbrace>"
apply (simp add: get_master_pte_def)
apply (wp get_pte_wp | wpc)+
including pagebits
by auto
lemma store_pte_typ_at:
"store_pte ptr pte \<lbrace>\<lambda>s. P (typ_at T p s)\<rbrace>"
apply (simp add: store_pte_def set_pt_def set_object_def get_object_def)
apply (wpsimp simp: obj_at_def a_type_def)
done
lemmas store_pte_typ_ats [wp] = abs_typ_at_lifts [OF store_pte_typ_at]
lemma lookup_pt_slot_inv:
"lookup_pt_slot pd vptr \<lbrace>P\<rbrace>"
apply (simp add: lookup_pt_slot_def)
apply (wp get_pde_wp|wpc)+
apply clarsimp
done
lemma lookup_pt_slot_inv_any:
"\<lbrace>\<lambda>s. \<forall>x. Q x s\<rbrace> lookup_pt_slot pd vptr \<lbrace>Q\<rbrace>,-"
"\<lbrace>E\<rbrace> lookup_pt_slot pd vptr -, \<lbrace>\<lambda>ft. E\<rbrace>"
apply (simp_all add: lookup_pt_slot_def)
apply (wpsimp wp: get_pde_wp)+
done
crunch cte_wp_at[wp]: set_irq_state "\<lambda>s. P (cte_wp_at P' p s)"
lemma set_pt_cte_wp_at:
"set_pt ptr val \<lbrace>\<lambda>s. P (cte_wp_at P' p s)\<rbrace>"
apply (simp add: set_pt_def)
apply (wpsimp wp: set_object_wp_strong)
apply (subst cte_wp_at_after_update')
apply (clarsimp simp: a_type_def obj_at_def split: if_splits kernel_object.splits)+
done
lemma set_pd_cte_wp_at:
"set_pd ptr val \<lbrace>\<lambda>s. P (cte_wp_at P' p s)\<rbrace>"
apply (simp add: set_pd_def)
apply (wpsimp wp: set_object_wp_strong)
apply (subst cte_wp_at_after_update')
including unfold_objects
apply (clarsimp simp: a_type_def split: if_splits)+
done
lemma set_asid_pool_cte_wp_at:
"set_asid_pool ptr val \<lbrace>\<lambda>s. P (cte_wp_at P' p s)\<rbrace>"
apply (simp add: set_asid_pool_def)
including unfold_objects_asm
by (wpsimp wp: set_object_wp_strong
simp: a_type_def cte_wp_at_after_update'
split: if_splits)
lemma set_pt_pred_tcb_at[wp]:
"set_pt ptr val \<lbrace>pred_tcb_at proj P t\<rbrace>"
apply (simp add: set_pt_def)
apply (wpsimp wp: set_object_wp_strong)
apply (clarsimp simp: pred_tcb_at_def obj_at_def)
done
lemma set_pd_pred_tcb_at[wp]:
"set_pd ptr val \<lbrace>pred_tcb_at proj P t\<rbrace>"
apply (simp add: set_pd_def)
apply (wpsimp wp: set_object_wp_strong)
apply (clarsimp simp: pred_tcb_at_def obj_at_def)
done
lemma set_asid_pool_pred_tcb_at[wp]:
"set_asid_pool ptr val \<lbrace>pred_tcb_at proj P t\<rbrace>"
apply (subst set_asid_pool_def)
by (wpsimp wp: set_object_wp_strong
simp: pred_tcb_at_def obj_at_def)
lemma mask_pd_bits_inner_beauty:
"is_aligned p 2 \<Longrightarrow>
(p && ~~ mask pd_bits) + (ucast ((ucast (p && mask pd_bits >> 2))::12 word) << 2) = (p::word32)"
by (rule mask_split_aligned; simp add: pd_bits_def pageBits_def)
lemma more_pd_inner_beauty:
fixes x :: "12 word"
fixes p :: word32
assumes x: "x \<noteq> ucast (p && mask pd_bits >> 2)"
shows "(p && ~~ mask pd_bits) + (ucast x << 2) = p \<Longrightarrow> False"
by (rule mask_split_aligned_neg[OF _ _ x]; simp add: pd_bits_def pageBits_def)
lemma mask_pt_bits_inner_beauty:
"is_aligned p 2 \<Longrightarrow>
(p && ~~ mask pt_bits) + (ucast ((ucast (p && mask pt_bits >> 2))::word8) << 2) = (p::word32)"
by (rule mask_split_aligned; simp add: pt_bits_def pageBits_def)
lemma more_pt_inner_beauty:
fixes x :: "word8"
fixes p :: word32
assumes x: "x \<noteq> ucast (p && mask pt_bits >> 2)"
shows "(p && ~~ mask pt_bits) + (ucast x << 2) = p \<Longrightarrow> False"
by (rule mask_split_aligned_neg[OF _ _ x]; simp add: pt_bits_def pageBits_def)
lemma set_pd_aligned [wp]:
"set_pd base pd \<lbrace>pspace_aligned\<rbrace>"
by (wpsimp simp: set_pd_def)
crunch aligned [wp]: store_pde pspace_aligned
(wp: hoare_drop_imps)
lemmas undefined_validE_R = hoare_FalseE_R[where f=undefined]
lemma arch_derive_cap_valid_cap:
"\<lbrace>valid_cap (cap.ArchObjectCap arch_cap)\<rbrace>
arch_derive_cap arch_cap
\<lbrace>valid_cap\<rbrace>, -"
apply(simp add: arch_derive_cap_def)
apply(cases arch_cap, simp_all add: arch_derive_cap_def o_def)
apply(rule hoare_pre, wpc?, wp+;
clarsimp simp add: cap_aligned_def valid_cap_def split: option.splits)+
done
lemma arch_derive_cap_inv:
"arch_derive_cap arch_cap \<lbrace>P\<rbrace>"
apply(simp add: arch_derive_cap_def, cases arch_cap, simp_all)
apply(rule hoare_pre, wpc?, wp+; simp)+
done
definition
"valid_mapping_entries m \<equiv> case m of
Inl (InvalidPTE, _) \<Rightarrow> \<top>
| Inl (LargePagePTE _ _ _, xs) \<Rightarrow> \<lambda>s. \<forall>p \<in> set xs. pte_at p s
| Inl (SmallPagePTE _ _ _, xs) \<Rightarrow> \<lambda>s. \<forall>p \<in> set xs. pte_at p s
| Inr (InvalidPDE, _) \<Rightarrow> \<top>
| Inr (PageTablePDE _ _ _, _) \<Rightarrow> \<bottom>
| Inr (SectionPDE _ _ _ _, xs) \<Rightarrow> \<lambda>s. \<forall>p \<in> set xs. pde_at p s
| Inr (SuperSectionPDE _ _ _, xs) \<Rightarrow> \<lambda>s. \<forall>p \<in> set xs. pde_at p s"
definition "invalid_pte_at p \<equiv> obj_at (\<lambda>ko. \<exists>pt. ko = (ArchObj (PageTable pt))
\<and> pt (ucast (p && mask pt_bits) >> 2) = pte.InvalidPTE) (p && ~~ mask pt_bits)"
definition "invalid_pde_at p \<equiv> obj_at (\<lambda>ko. \<exists>pd. ko = (ArchObj (PageDirectory pd))
\<and> pd (ucast (p && mask pd_bits) >> 2) = pde.InvalidPDE) (p && ~~ mask pd_bits)"
definition
"valid_slots m \<equiv> case m of
Inl (pte, xs) \<Rightarrow>
\<lambda>s. xs \<noteq> [] \<and>
(\<forall>p \<in> set xs. (\<exists>\<rhd> (p && ~~ mask pt_bits) and pte_at p) s) \<and>
wellformed_pte pte \<and> valid_pte pte s
| Inr (pde, xs) \<Rightarrow>
\<lambda>s. xs \<noteq> [] \<and>
(\<forall>p \<in> set xs. (\<exists>\<rhd> (p && ~~ mask pd_bits) and pde_at p) s \<and>
ucast (p && mask pd_bits >> 2) \<notin> kernel_mapping_slots) \<and>
wellformed_pde pde \<and> valid_pde pde s"
crunch inv[wp]: get_master_pte P
crunch inv[wp]: get_master_pde P
lemma ucast_mask_asid_low_bits [simp]:
"ucast ((asid::word32) && mask asid_low_bits) = (ucast asid :: 10 word)"
by (word_eqI_solve simp: asid_low_bits_def)
lemma ucast_ucast_asid_high_bits [simp]:
"ucast (ucast (asid_high_bits_of asid)::word32) = asid_high_bits_of asid"
by word_eqI_solve
lemma mask_asid_low_bits_ucast_ucast:
"((asid::word32) && mask asid_low_bits) = ucast (ucast asid :: 10 word)"
by (word_eqI_solve simp: asid_low_bits_def)
lemma set_asid_pool_cur [wp]:
"set_asid_pool p a \<lbrace>\<lambda>s. P (cur_thread s)\<rbrace>"
unfolding set_asid_pool_def by (wpsimp wp: get_object_wp)
lemma set_asid_pool_cur_tcb [wp]:
"set_asid_pool p a \<lbrace>\<lambda>s. cur_tcb s\<rbrace>"
unfolding cur_tcb_def
by (rule hoare_lift_Pf [where f=cur_thread]; wp)
crunch arch [wp]: set_asid_pool "\<lambda>s. P (arch_state s)"
(wp: get_object_wp)
lemma set_asid_pool_valid_arch [wp]:
"set_asid_pool p a \<lbrace>valid_arch_state\<rbrace>"
by (rule valid_arch_state_lift) (wp set_asid_pool_typ_at)+
lemma set_asid_pool_valid_objs [wp]:
"set_asid_pool p a \<lbrace>valid_objs\<rbrace>"
apply (simp add: set_asid_pool_def)
apply (wp set_object_valid_objs get_object_wp)
including unfold_objects
by (clarsimp simp: a_type_def valid_obj_def arch_valid_obj_def)
lemma pde_at_aligned_vptr:
"\<lbrakk>x \<in> set [0 , 4 .e. 0x3C]; page_directory_at pd s;
pspace_aligned s; is_aligned vptr 24 \<rbrakk>
\<Longrightarrow> pde_at (x + lookup_pd_slot pd vptr) s"
apply (clarsimp simp: lookup_pd_slot_def Let_def
obj_at_def pde_at_def)
apply (drule(1) pspace_alignedD[rotated])
apply (clarsimp simp: a_type_def
split: kernel_object.split_asm
arch_kernel_obj.split_asm if_split_asm
cong: kernel_object.case_cong)
apply (prove "is_aligned x 2")
subgoal
apply (clarsimp simp: upto_enum_step_def word_shift_by_2)
by (rule is_aligned_shiftl_self)
apply (simp add: aligned_add_aligned word_bits_conv
is_aligned_shiftl_self)+
apply (prove "pd = (x + (pd + (vptr >> 20 << 2)) && ~~ mask pd_bits)")
subgoal
apply (subst mask_lower_twice[symmetric, where n=6])
apply (simp add: pd_bits_def pageBits_def)
apply (subst add.commute, subst add_mask_lower_bits)
apply (erule aligned_add_aligned)
apply (intro is_aligned_shiftl is_aligned_shiftr)
apply simp
apply (simp add: word_bits_conv)
apply simp
apply (subst upper_bits_unset_is_l2p_32[unfolded word_bits_conv])
apply simp
apply (clarsimp simp: upto_enum_step_def word_shift_by_2)
apply (rule shiftl_less_t2n[where m=6, simplified])
apply (rule word_leq_minus_one_le)
apply simp+
apply (rule sym, rule add_mask_lower_bits)
apply (simp add: pd_bits_def pageBits_def)
apply simp
apply (subst upper_bits_unset_is_l2p_32[unfolded word_bits_conv])
apply (simp add: pd_bits_def pageBits_def)
apply (rule shiftl_less_t2n)
apply (rule shiftr_less_t2n')
apply (simp add: pd_bits_def pageBits_def)
by (simp add: pd_bits_def pageBits_def)+
apply simp
done
lemma pde_shifting:
"\<lbrakk>is_aligned (vptr::word32) 24; x \<le> 0xF\<rbrakk> \<Longrightarrow> x + (vptr >> 20) < 0x1000"
apply (rule order_less_le_trans)
apply (subst upper_bits_unset_is_l2p_32 [where n=12, symmetric])
apply (clarsimp simp: word_bits_def)
prefer 2
apply simp
apply (clarsimp simp: word_bits_def)
subgoal premises prems for n'
proof -
have H: "(0xF::word32) < 2 ^ 4" by simp
from prems show ?thesis
apply (subst (asm) word_plus_and_or_coroll)
apply (rule word_eqI)
subgoal for n
apply (clarsimp simp: word_size nth_shiftr is_aligned_nth)
apply (spec "n + 20")
apply (frule test_bit_size[where n="n + 20"])
apply (simp add: word_size)
apply (insert H)
apply (drule (1) order_le_less_trans)
apply (drule bang_is_le)
apply (drule_tac z="2 ^ 4" in order_le_less_trans, assumption)
apply (drule word_power_increasing)
by simp+
apply (clarsimp simp: word_size nth_shiftl nth_shiftr is_aligned_nth)
apply (erule disjE)
apply (insert H)[1]
apply (drule (1) order_le_less_trans)
apply (drule bang_is_le)
apply (drule order_le_less_trans[where z="2 ^ 4"], assumption)
apply (drule word_power_increasing; simp)
apply (spec "n' + 20")
apply (frule test_bit_size[where n = "n' + 20"])
by (simp add: word_size)
qed
done
lemma p_le_0xF_helper:
"((p::word32) \<le> 0xF) = (\<forall>n'\<ge>4. n'< word_bits \<longrightarrow> \<not> p !! n')"
apply (subst upper_bits_unset_is_l2p_32)
apply (simp add: word_bits_def)
apply (auto intro: plus_one_helper dest: plus_one_helper2)
done
lemma pd_shifting:
"is_aligned (pd::word32) 14 \<Longrightarrow> pd + (vptr >> 20 << 2) && ~~ mask pd_bits = pd"
apply (rule word_eqI[rule_format])
apply (subst word_plus_and_or_coroll)
apply (rule word_eqI)
subgoal for \<dots> na
apply (clarsimp simp: word_size nth_shiftr nth_shiftl is_aligned_nth)
apply (spec na)
apply (simp add: linorder_not_less)
apply (drule test_bit_size)+
by (simp add: word_size)
subgoal for n
apply (clarsimp simp: word_size nth_shiftr nth_shiftl is_aligned_nth word_ops_nth_size
pd_bits_def pageBits_def linorder_not_less)
apply (rule iffI)
apply clarsimp
apply (drule test_bit_size)+
apply (simp add: word_size)
apply clarsimp
apply (spec n)
by simp
done
lemma pd_shifting_dual:
"is_aligned (pd::word32) 14 \<Longrightarrow> pd + (vptr >> 20 << 2) && mask pd_bits = vptr >> 20 << 2"
apply (simp add: pd_bits_def pageBits_def)
apply (subst word_plus_and_or_coroll)
apply (rule word_eqI)
subgoal for n
apply (clarsimp simp: word_size nth_shiftr nth_shiftl is_aligned_nth)
apply (spec n)
apply (simp add: linorder_not_less)
apply (drule test_bit_size)+
by (simp add: word_size)
apply (rule word_eqI)
apply (clarsimp simp: word_size nth_shiftr nth_shiftl is_aligned_nth word_ops_nth_size
pd_bits_def pageBits_def linorder_not_less)
apply (rule iffI)
apply clarsimp
apply clarsimp
apply (drule test_bit_size)+
apply (simp add: word_size)
done
lemma pd_shifting_at:
"\<lbrakk> page_directory_at pd s; pspace_aligned s \<rbrakk> \<Longrightarrow>
pd + (vptr >> 20 << 2) && ~~ mask pd_bits = pd"
apply (rule pd_shifting)
apply (clarsimp simp: pspace_aligned_def obj_at_def)
apply (drule bspec, blast)
including unfold_objects
by (clarsimp simp: a_type_def)
lemma kernel_mapping_slots_empty_pdeI:
"\<lbrakk>equal_kernel_mappings s; valid_global_objs s; valid_arch_state s;
kheap s p = Some (ArchObj (PageDirectory pd)); x \<in> kernel_mapping_slots\<rbrakk> \<Longrightarrow>
(\<forall>r. pde_ref (pd x) = Some r \<longrightarrow> r \<in> set (second_level_tables (arch_state s))) \<and> valid_pde_mappings (pd x)"
apply (clarsimp simp: invs_def valid_state_def equal_kernel_mappings_def valid_global_objs_def)
apply (erule_tac x=p in allE, erule_tac x="arm_global_pd (arch_state s)" in allE)
including unfold_objects
apply clarsimp
by (simp add: empty_table_def valid_arch_state_def a_type_def)
lemma invs_valid_global_pts:
"invs s \<Longrightarrow> valid_global_pts s"
by (clarsimp simp: invs_def valid_state_def valid_arch_state_def)
lemma is_aligned_pt:
"page_table_at pt s \<Longrightarrow> pspace_aligned s
\<Longrightarrow> is_aligned pt pt_bits"
apply (clarsimp simp: obj_at_def)
apply (drule(1) pspace_alignedD)
apply (simp add: pt_bits_def pageBits_def)
done
lemma is_aligned_global_pt:
"\<lbrakk>x \<in> set (arm_global_pts (arch_state s)); pspace_aligned s; valid_arch_state s\<rbrakk>
\<Longrightarrow> is_aligned x pt_bits"
by (metis valid_arch_state_def valid_global_pts_def
is_aligned_pt)
lemma data_at_aligned:
"\<lbrakk> data_at sz p s; pspace_aligned s \<rbrakk> \<Longrightarrow> is_aligned p (pageBitsForSize sz)"
by (erule pspace_alignedE[where x=p]; fastforce simp: data_at_def obj_at_def)
lemma page_table_pte_at_diffE:
"\<lbrakk> page_table_at p s; q - p = x << 2;
x < 2^(pt_bits - 2); pspace_aligned s \<rbrakk> \<Longrightarrow> pte_at q s"
apply (clarsimp simp: diff_eq_eq add.commute)
apply (erule(2) page_table_pte_atI)
done
lemma pte_at_aligned_vptr:
"\<lbrakk>x \<in> set [0 , 4 .e. 0x3C]; page_table_at pt s;
pspace_aligned s; is_aligned vptr 16 \<rbrakk>
\<Longrightarrow> pte_at (x + (pt + (((vptr >> 12) && 0xFF) << 2))) s"
apply (erule page_table_pte_at_diffE[where x="(x >> 2) + ((vptr >> 12) && 0xFF)"];simp?)
apply (simp add: word_shiftl_add_distrib upto_enum_step_def)
apply (clarsimp simp: word_shift_by_2 shiftr_shiftl1
is_aligned_neg_mask_eq is_aligned_shift)
apply (subst add.commute, rule is_aligned_add_less_t2n)
apply (rule is_aligned_andI1[where n=4], rule is_aligned_shiftr, simp)
apply (rule shiftr_less_t2n)
apply (clarsimp dest!: upto_enum_step_subset[THEN subsetD])
apply (erule order_le_less_trans, simp)
apply (simp add: pt_bits_def pageBits_def)
apply (simp add: pt_bits_def pageBits_def)
apply (rule order_le_less_trans, rule word_and_le1, simp)
done
lemma lookup_pt_slot_ptes_aligned_valid:
"\<lbrace>valid_vspace_objs and valid_arch_state
and equal_kernel_mappings and pspace_aligned
and valid_global_objs
and \<exists>\<rhd> pd and page_directory_at pd
and K (is_aligned vptr 16)\<rbrace>
lookup_pt_slot pd vptr
\<lbrace>\<lambda>r s. is_aligned r 6 \<and> (\<forall>x\<in>set [0 , 4 .e. 0x3C]. pte_at (x + r) s)\<rbrace>, -"
apply (simp add: lookup_pt_slot_def)
apply (wp get_pde_wp|wpc)+
apply (clarsimp simp: lookup_pd_slot_def Let_def)
apply (simp add: pd_shifting_at)
apply (frule (2) valid_vspace_objsD)
apply (clarsimp simp: )
subgoal for s _ _ x
apply (prove "page_table_at (ptrFromPAddr x) s")
subgoal
apply (bspec "(ucast (pd + (vptr >> 20 << 2) && mask pd_bits >> 2))";clarsimp)
apply (frule kernel_mapping_slots_empty_pdeI)
apply ((simp add: obj_at_def pte_at_def;fail)+)[4]
by (clarsimp simp: pde_ref_def valid_global_pts_def valid_arch_state_def second_level_tables_def)
apply (rule conjI)
apply (rule is_aligned_add)
apply (rule is_aligned_weaken, erule(1) is_aligned_pt)
apply (simp add: pt_bits_def pageBits_def)
apply (rule is_aligned_shiftl)
apply (rule is_aligned_andI1)
apply (rule is_aligned_shiftr, simp)
apply clarsimp
by (erule(1) pte_at_aligned_vptr, simp+)
done
lemma p_0x3C_shift:
"is_aligned (p :: word32) 6 \<Longrightarrow>
(\<forall>p\<in>set [p , p + 4 .e. p + 0x3C]. f p) = (\<forall>x\<in>set [0, 4 .e. 0x3C]. f (x + p))"
apply (clarsimp simp: upto_enum_step_def add.commute)
apply (frule is_aligned_no_overflow, simp add: word_bits_def)
apply (simp add: linorder_not_le [symmetric])
apply (erule notE)
apply (simp add: add.commute)
apply (erule word_random)
apply simp
done
lemma lookup_pt_slot_pte [wp]:
"\<lbrace>pspace_aligned and valid_vspace_objs and valid_arch_state
and equal_kernel_mappings and valid_global_objs
and \<exists>\<rhd> pd and page_directory_at pd\<rbrace>
lookup_pt_slot pd vptr \<lbrace>pte_at\<rbrace>,-"
apply (simp add: lookup_pt_slot_def)
apply (wp get_pde_wp|wpc)+
apply (clarsimp simp: lookup_pd_slot_def Let_def)
apply (simp add: pd_shifting_at)
apply (drule (2) valid_vspace_objsD)
apply (clarsimp simp: )
apply (bspec "ucast (pd + (vptr >> 20 << 2) && mask pd_bits >> 2)")
apply clarsimp
apply (erule page_table_pte_atI, simp_all)
apply (simp add: pt_bits_def pageBits_def)
apply (rule order_le_less_trans, rule word_and_le1, simp)
apply (frule kernel_mapping_slots_empty_pdeI)
apply (simp add: obj_at_def)+
apply (clarsimp simp: pde_ref_def)
apply (rule page_table_pte_atI, simp_all)
apply (simp add: valid_arch_state_def valid_global_pts_def second_level_tables_def)
apply (simp add: pt_bits_def pageBits_def)
apply (rule order_le_less_trans, rule word_and_le1, simp)
done
lemma shiftr_w2p:
"x < len_of TYPE('a) \<Longrightarrow>
2 ^ x = (2^(len_of TYPE('a) - 1) >> (len_of TYPE('a) - 1 - x) :: 'a :: len word)"
apply simp
apply (rule word_eqI)
apply (auto simp: word_size nth_shiftr nth_w2p)
done
lemma vptr_shiftr_le_2p:
"(vptr :: word32) >> 20 < 2 ^ pageBits"
apply (rule le_less_trans[rotated])
apply (rule and_mask_less' [where w=max_word])
apply (simp add: pageBits_def)
apply (rule word_leI)
apply (simp add: word_size nth_shiftr)
apply (drule test_bit_size)
apply (simp add: pageBits_def word_size)
done
lemma page_directory_pde_at_lookupI:
"\<lbrakk>page_directory_at pd s; pspace_aligned s\<rbrakk> \<Longrightarrow> pde_at (lookup_pd_slot pd vptr) s"
apply (simp add: lookup_pd_slot_def Let_def)
apply (erule (1) page_directory_pde_atI[rotated 2])
apply (rule vptr_shiftr_le_2p)
done
lemma vptr_shiftr_le_2pt:
"((vptr :: word32) >> 12) && 0xFF < 2 ^ (pt_bits - 2)"
apply (clarsimp simp: word_FF_is_mask pt_bits_def pageBits_def)
apply (rule and_mask_less_size[where n=8, simplified])
apply (clarsimp simp: word_size)
done
lemma page_table_pte_at_lookupI:
"\<lbrakk>page_table_at pt s; pspace_aligned s\<rbrakk> \<Longrightarrow> pte_at (lookup_pt_slot_no_fail pt vptr) s"
apply (simp add: lookup_pt_slot_no_fail_def)
apply (erule (1) page_table_pte_atI[rotated 2])
apply (rule vptr_shiftr_le_2pt)
done
lemmas lookup_pt_slot_ptes[wp] =
lookup_pt_slot_ptes_aligned_valid
[@ \<open>post_asm \<open>thin_tac "is_aligned x y" for x y\<close>\<close>]
lemmas lookup_pt_slot_ptes2[wp] =
lookup_pt_slot_ptes_aligned_valid
[@ \<open>post_asm \<open>drule (1) p_0x3C_shift[THEN iffD2], thin_tac _\<close>\<close>]
lemma create_mapping_entries_valid [wp]:
"\<lbrace>pspace_aligned and valid_arch_state and valid_vspace_objs
and equal_kernel_mappings and valid_global_objs
and \<exists>\<rhd> pd and page_directory_at pd and
K ((sz = ARMLargePage \<longrightarrow> is_aligned vptr 16) \<and>
(sz = ARMSuperSection \<longrightarrow> is_aligned vptr 24)) \<rbrace>
create_mapping_entries base vptr sz vm_rights attrib pd
\<lbrace>\<lambda>m. valid_mapping_entries m\<rbrace>, -"
apply (cases sz)
apply (rule hoare_pre)
apply (wp|simp add: valid_mapping_entries_def largePagePTE_offsets_def)+
apply clarsimp
apply (erule (1) page_directory_pde_at_lookupI)
apply (rule hoare_pre)
apply (clarsimp simp add: valid_mapping_entries_def)
apply wp
apply (simp add: lookup_pd_slot_def Let_def)
apply (prove "is_aligned pd 14")
apply (clarsimp simp: obj_at_def add.commute invs_def valid_state_def valid_pspace_def pspace_aligned_def)
apply (drule bspec, blast)
apply (clarsimp simp: a_type_def split: kernel_object.splits arch_kernel_obj.splits if_split_asm)
apply (clarsimp simp: superSectionPDE_offsets_def)
apply (clarsimp simp: upto_enum_step_def word_shift_by_2)
apply (clarsimp simp: pde_at_def)
apply (simp add: add.commute add.left_commute)
apply (subst add_mask_lower_bits)
apply (simp add: pd_bits_def pageBits_def)
apply (clarsimp simp: pd_bits_def pageBits_def)
apply (subst (asm) word_plus_and_or_coroll)
prefer 2
apply (clarsimp simp: word_size nth_shiftr nth_shiftl is_aligned_nth p_le_0xF_helper word_bits_def)
apply (drule test_bit_size)+
apply (simp add: word_size)
apply (rule word_eqI)
apply (clarsimp simp: word_size nth_shiftr nth_shiftl is_aligned_nth p_le_0xF_helper word_bits_def)
apply (frule_tac w=vptr in test_bit_size)
apply (simp add: word_size)
apply (thin_tac "All _")
subgoal for \<dots> n
apply (spec "18+n")
by simp
apply (clarsimp simp: a_type_simps)
apply (rule aligned_add_aligned is_aligned_shiftl_self
| simp add: word_bits_conv)+
done
lemma set_pt_distinct [wp]:
"set_pt p pt \<lbrace>pspace_distinct\<rbrace>"
apply (simp add: set_pt_def)
apply (wpsimp wp: set_object_wp_strong)
apply (clarsimp simp: obj_at_def a_type_def pspace_distinct_same_type
split: kernel_object.splits arch_kernel_obj.splits if_splits)
done
lemma set_pd_distinct [wp]:
"set_pd p pd \<lbrace>pspace_distinct\<rbrace>"
apply (simp add: set_pd_def)
apply (wp set_object_distinct[THEN hoare_set_object_weaken_pre] get_object_wp)
apply (clarsimp simp: obj_at_def a_type_def
split: kernel_object.splits arch_kernel_obj.splits)
done
lemma store_pte_valid_objs [wp]:
"\<lbrace>(%s. wellformed_pte pte) and valid_objs\<rbrace> store_pte p pte \<lbrace>\<lambda>_. valid_objs\<rbrace>"
apply (simp add: store_pte_def set_pt_def get_pt_def bind_assoc set_object_def get_object_def)
apply (rule hoare_pre)
apply (wp|wpc)+
apply (clarsimp simp: valid_objs_def dom_def simp del: fun_upd_apply)
subgoal for \<dots> ptr _
apply (rule valid_obj_same_type)
apply (cases "ptr = p && ~~ mask pt_bits")
apply (erule allE, erule impE, blast)
apply (clarsimp simp: valid_obj_def arch_valid_obj_def)
apply clarsimp
apply fastforce
apply (erule allE, erule impE, blast)
apply (clarsimp simp: valid_obj_def arch_valid_obj_def)
apply assumption
by (simp add: a_type_def)
done
lemma set_pt_caps_of_state [wp]:
"set_pt p pt \<lbrace>\<lambda>s. P (caps_of_state s)\<rbrace>"
apply (simp add: set_pt_def)
apply (wpsimp wp: set_object_wp_strong simp: obj_at_def a_type_simps)
apply (subst cte_wp_caps_of_lift)
prefer 2
apply assumption
apply (auto simp: cte_wp_at_cases a_type_def)
done
lemma set_pd_caps_of_state [wp]:
"set_pd p pd \<lbrace>\<lambda>s. P (caps_of_state s)\<rbrace>"
apply (simp add: set_pd_def bind_assoc)
apply (wpsimp wp: set_object_wp_strong simp: obj_at_def)
apply (subst cte_wp_caps_of_lift)
prefer 2
apply assumption
by (case_tac ko; simp add: cte_wp_at_cases a_type_simps split: if_splits)
lemma store_pte_aligned [wp]:
"store_pte pt p \<lbrace>pspace_aligned\<rbrace>"
apply (simp add: store_pte_def set_pt_def)
apply (wp set_object_aligned)
including unfold_objects
by (clarsimp simp: a_type_def)
lemma store_pde_valid_objs [wp]:
"\<lbrace>(%s. wellformed_pde pde) and valid_objs\<rbrace> store_pde p pde \<lbrace>\<lambda>_. valid_objs\<rbrace>"
apply (simp add: store_pde_def set_pd_def get_pd_def bind_assoc set_object_def get_object_def)
apply (rule hoare_pre)
apply (wp|wpc)+
apply (clarsimp simp: valid_objs_def dom_def simp del: fun_upd_apply)
subgoal for \<dots> ptr _
apply (rule valid_obj_same_type)
apply (cases "ptr = p && ~~ mask pd_bits")
apply (erule allE, erule impE, blast)
apply (clarsimp simp: valid_obj_def arch_valid_obj_def)
apply clarsimp
apply fastforce
apply (erule allE, erule impE, blast)
apply (clarsimp simp: valid_obj_def arch_valid_obj_def)
apply assumption
by (simp add: a_type_def)
done
lemma set_asid_pool_aligned [wp]:
"set_asid_pool p ptr \<lbrace>pspace_aligned\<rbrace>"
apply (simp add: set_asid_pool_def)
including unfold_objects
apply (wpsimp wp: set_object_wp_strong pspace_aligned_obj_update[rotated])
done
lemma set_asid_pool_distinct [wp]:
"set_asid_pool p ptr \<lbrace>pspace_distinct\<rbrace>"
apply (simp add: set_asid_pool_def)
including unfold_objects
by (wpsimp wp: set_object_wp_strong pspace_distinct_same_type)
lemma store_pde_arch [wp]:
"\<lbrace>\<lambda>s. P (arch_state s)\<rbrace> store_pde p pde \<lbrace>\<lambda>_ s. P (arch_state s)\<rbrace>"
by (simp add: store_pde_def set_pd_def get_object_def) wpsimp
lemma store_pte_valid_pte [wp]:
"\<lbrace>valid_pte pt\<rbrace> store_pte p pte \<lbrace>\<lambda>_. valid_pte pt\<rbrace>"
by (wp valid_pte_lift store_pte_typ_at)
lemma store_pde_valid_pde [wp]:
"\<lbrace>valid_pde pde\<rbrace> store_pde slot pde' \<lbrace>\<lambda>rv. valid_pde pde\<rbrace>"
by (wp valid_pde_lift store_pde_typ_at)
lemma set_pd_typ_at [wp]:
"\<lbrace>\<lambda>s. P (typ_at T p s)\<rbrace> set_pd ptr pd \<lbrace>\<lambda>_ s. P (typ_at T p s)\<rbrace>"
apply (simp add: set_pd_def)
by (wpsimp wp: set_object_wp_strong simp: obj_at_def)
lemma set_pd_valid_objs:
"\<lbrace>(%s. \<forall>i. wellformed_pde (pd i)) and valid_objs\<rbrace>
set_pd p pd
\<lbrace>\<lambda>_. valid_objs\<rbrace>"
apply (simp add: set_pd_def)
by (wpsimp wp: set_object_valid_objs simp: valid_obj_def)
lemma set_pd_iflive:
"\<lbrace>\<lambda>s. if_live_then_nonz_cap s\<rbrace>
set_pd p pd
\<lbrace>\<lambda>_ s. if_live_then_nonz_cap s\<rbrace>"
apply (subst set_pd_def)
including unfold_objects
by (wpsimp wp: set_object_iflive[THEN hoare_set_object_weaken_pre]
simp: live_def hyp_live_def a_type_def)
lemma set_pd_zombies:
"\<lbrace>\<lambda>s. zombies_final s\<rbrace>
set_pd p pd
\<lbrace>\<lambda>_ s. zombies_final s\<rbrace>"
apply (subst set_pd_def)
apply (wp set_object_zombies[THEN hoare_set_object_weaken_pre])
including unfold_objects
by (clarsimp simp: a_type_def)
lemma set_pd_zombies_state_refs:
"\<lbrace>\<lambda>s. P (state_refs_of s)\<rbrace>
set_pd p pd
\<lbrace>\<lambda>_ s. P (state_refs_of s)\<rbrace>"
apply (subst set_pd_def)
including unfold_objects
apply (wpsimp wp: set_object_wp_strong
simp: a_type_def)
apply (erule rsubst [where P=P], rule ext)
apply (simp add: state_refs_of_def)
done
lemma set_pd_zombies_state_hyp_refs:
"\<lbrace>\<lambda>s. P (state_hyp_refs_of s)\<rbrace>
set_pd p pd
\<lbrace>\<lambda>_ s. P (state_hyp_refs_of s)\<rbrace>"
apply (subst set_pd_def)
including unfold_objects
apply (wpsimp wp: set_object_wp_strong
simp: a_type_def)
apply (erule rsubst [where P=P], rule ext)
apply (simp add: state_hyp_refs_of_def)
done
lemma set_pd_cdt:
"\<lbrace>\<lambda>s. P (cdt s)\<rbrace> set_pd p pd \<lbrace>\<lambda>_ s. P (cdt s)\<rbrace>"
unfolding set_pd_def by (wpsimp wp: get_object_wp)
lemma set_pd_valid_mdb:
"\<lbrace>\<lambda>s. valid_mdb s\<rbrace>
set_pd p pd
\<lbrace>\<lambda>_ s. valid_mdb s\<rbrace>"
apply (rule valid_mdb_lift)
by (wpsimp wp: set_pd_cdt set_object_wp simp: set_pd_def)+
lemma set_pd_valid_idle:
"\<lbrace>\<lambda>s. valid_idle s\<rbrace> set_pd p pd \<lbrace>\<lambda>_ s. valid_idle s\<rbrace>"
by (wpsimp wp: valid_idle_lift set_object_wp simp: set_pd_def)
lemma set_pd_ifunsafe:
"\<lbrace>\<lambda>s. if_unsafe_then_cap s\<rbrace>
set_pd p pd
\<lbrace>\<lambda>_ s. if_unsafe_then_cap s\<rbrace>"
unfolding set_pd_def including unfold_objects
by (wpsimp wp: set_object_ifunsafe[THEN hoare_set_object_weaken_pre]
simp: a_type_def)
lemma set_pd_reply_caps:
"\<lbrace>\<lambda>s. valid_reply_caps s\<rbrace>
set_pd p pd
\<lbrace>\<lambda>_ s. valid_reply_caps s\<rbrace>"
by (wp valid_reply_caps_st_cte_lift)
lemma set_pd_reply_masters:
"\<lbrace>valid_reply_masters\<rbrace>
set_pd p pd
\<lbrace>\<lambda>_. valid_reply_masters\<rbrace>"
by (wp valid_reply_masters_cte_lift)
lemma global_refs_kheap [simp]:
"global_refs (kheap_update f s) = global_refs s"
by (simp add: global_refs_def)
crunch global_ref [wp]: set_pd "\<lambda>s. P (global_refs s)"
(wp: crunch_wps)
crunch arch [wp]: set_pd "\<lambda>s. P (arch_state s)"
(wp: crunch_wps)
crunch idle [wp]: set_pd "\<lambda>s. P (idle_thread s)"
(wp: crunch_wps)
crunch irq [wp]: set_pd "\<lambda>s. P (interrupt_irq_node s)"
(wp: crunch_wps)
lemma set_pd_valid_global:
"\<lbrace>\<lambda>s. valid_global_refs s\<rbrace>
set_pd p pd
\<lbrace>\<lambda>_ s. valid_global_refs s\<rbrace>"
by (wp valid_global_refs_cte_lift)
lemma set_pd_valid_arch:
"\<lbrace>\<lambda>s. valid_arch_state s\<rbrace>
set_pd p pd
\<lbrace>\<lambda>_ s. valid_arch_state s\<rbrace>"
by (wp valid_arch_state_lift)
lemma set_pd_cur:
"\<lbrace>\<lambda>s. cur_tcb s\<rbrace>
set_pd p pd
\<lbrace>\<lambda>_ s. cur_tcb s\<rbrace>"
apply (simp add: cur_tcb_def set_pd_def)
including unfold_objects
apply (wpsimp wp: set_object_wp_strong simp: a_type_simps)
apply (simp add: is_tcb_def)
done
crunch interrupt_states[wp]: set_pd "\<lambda>s. P (interrupt_states s)"
(wp: crunch_wps)
lemma set_pd_vspace_objs_unmap:
"\<lbrace>valid_vspace_objs and (\<lambda>s. (\<exists>\<rhd>p) s \<longrightarrow> valid_vspace_obj (PageDirectory pd') s) and
obj_at (\<lambda>ko. vs_refs (ArchObj (PageDirectory pd')) \<subseteq> vs_refs ko) p\<rbrace>
set_pd p pd' \<lbrace>\<lambda>_. valid_vspace_objs\<rbrace>"
apply (simp add: set_pd_def)
apply (wpsimp wp: set_object_vspace_objs[THEN hoare_set_object_weaken_pre])
including unfold_objects
apply (clarsimp simp: a_type_def)
done
declare graph_of_None_update[simp]
declare graph_of_Some_update[simp]
lemma set_pt_typ_at [wp]:
"\<lbrace>\<lambda>s. P (typ_at T p s)\<rbrace> set_pt ptr pt \<lbrace>\<lambda>_ s. P (typ_at T p s)\<rbrace>"
apply (simp add: set_pt_def)
by (wpsimp wp: set_object_wp_strong simp: obj_at_def)
lemma set_pt_valid_objs:
"\<lbrace>(%s. \<forall>i. wellformed_pte (pt i)) and valid_objs\<rbrace>
set_pt p pt
\<lbrace>\<lambda>_. valid_objs\<rbrace>"
apply (simp add: set_pt_def)
apply (wp set_object_valid_objs)
apply (clarsimp split: kernel_object.splits
arch_kernel_obj.splits)
apply (clarsimp simp: valid_obj_def obj_at_def a_type_def
arch_valid_obj_def)
done
lemma set_pt_iflive:
"\<lbrace>\<lambda>s. if_live_then_nonz_cap s\<rbrace>
set_pt p pt
\<lbrace>\<lambda>_ s. if_live_then_nonz_cap s\<rbrace>"
unfolding set_pt_def including unfold_objects
apply (wpsimp wp: set_object_iflive[THEN hoare_set_object_weaken_pre]
simp: live_def hyp_live_def a_type_def)
done
lemma set_pt_zombies:
"\<lbrace>\<lambda>s. zombies_final s\<rbrace>
set_pt p pt
\<lbrace>\<lambda>_ s. zombies_final s\<rbrace>"
unfolding set_pt_def including unfold_objects
apply (wpsimp wp: set_object_zombies[THEN hoare_set_object_weaken_pre]
simp: a_type_def)
done
lemma set_pt_zombies_state_refs:
"\<lbrace>\<lambda>s. P (state_refs_of s)\<rbrace>
set_pt p pt
\<lbrace>\<lambda>_ s. P (state_refs_of s)\<rbrace>"
unfolding set_pt_def including unfold_objects
apply (wpsimp wp: set_object_wp_strong simp: a_type_def)
apply (erule rsubst [where P=P])
apply (rule ext)
apply (clarsimp simp: state_refs_of_def)
done
lemma set_pt_zombies_state_hyp_refs:
"\<lbrace>\<lambda>s. P (state_hyp_refs_of s)\<rbrace>
set_pt p pt
\<lbrace>\<lambda>_ s. P (state_hyp_refs_of s)\<rbrace>"
unfolding set_pt_def including unfold_objects
apply (wpsimp wp: set_object_wp_strong simp: a_type_def)
apply (erule rsubst [where P=P])
apply (rule ext)
apply (clarsimp simp: state_hyp_refs_of_def)
done
lemma set_pt_cdt:
"\<lbrace>\<lambda>s. P (cdt s)\<rbrace> set_pt p pt \<lbrace>\<lambda>_ s. P (cdt s)\<rbrace>"
unfolding set_pt_def including unfold_objects by wpsimp
lemma set_pt_valid_mdb:
"\<lbrace>\<lambda>s. valid_mdb s\<rbrace> set_pt p pt \<lbrace>\<lambda>_ s. valid_mdb s\<rbrace>"
including unfold_objects
by (wpsimp wp: set_pt_cdt valid_mdb_lift simp: set_pt_def set_object_def)
lemma set_pt_valid_idle:
"\<lbrace>\<lambda>s. valid_idle s\<rbrace> set_pt p pt \<lbrace>\<lambda>_ s. valid_idle s\<rbrace>"
including unfold_objects
by (wpsimp wp: valid_idle_lift simp: set_pt_def)
lemma set_pt_ifunsafe:
"\<lbrace>\<lambda>s. if_unsafe_then_cap s\<rbrace> set_pt p pt \<lbrace>\<lambda>_ s. if_unsafe_then_cap s\<rbrace>"
including unfold_objects by (wpsimp wp: set_object_ifunsafe[THEN hoare_set_object_weaken_pre]
simp: set_pt_def a_type_def)
lemma set_pt_reply_caps:
"\<lbrace>\<lambda>s. valid_reply_caps s\<rbrace> set_pt p pt \<lbrace>\<lambda>_ s. valid_reply_caps s\<rbrace>"
by (wp valid_reply_caps_st_cte_lift)
lemma set_pt_reply_masters:
"\<lbrace>valid_reply_masters\<rbrace> set_pt p pt \<lbrace>\<lambda>_. valid_reply_masters\<rbrace>"
by (wp valid_reply_masters_cte_lift)
crunch global_ref [wp]: set_pt "\<lambda>s. P (global_refs s)"
(wp: crunch_wps)
crunch arch [wp]: set_pt "\<lambda>s. P (arch_state s)"
(wp: crunch_wps)
crunch idle [wp]: set_pt "\<lambda>s. P (idle_thread s)"
(wp: crunch_wps)
crunch irq [wp]: set_pt "\<lambda>s. P (interrupt_irq_node s)"
(wp: crunch_wps)
lemma set_pt_valid_global:
"\<lbrace>\<lambda>s. valid_global_refs s\<rbrace>
set_pt p pt
\<lbrace>\<lambda>_ s. valid_global_refs s\<rbrace>"
by (wp valid_global_refs_cte_lift)
lemma set_pt_valid_arch_state[wp]:
"\<lbrace>\<lambda>s. valid_arch_state s\<rbrace>
set_pt p pt
\<lbrace>\<lambda>_ s. valid_arch_state s\<rbrace>"
by (wp valid_arch_state_lift)
lemma set_pt_cur:
"\<lbrace>\<lambda>s. cur_tcb s\<rbrace>
set_pt p pt
\<lbrace>\<lambda>_ s. cur_tcb s\<rbrace>"
unfolding set_pt_def cur_tcb_def including unfold_objects
by (wpsimp wp: set_object_wp_strong simp: a_type_def is_tcb)
lemma set_pt_aligned [wp]:
"\<lbrace>pspace_aligned\<rbrace> set_pt p pt \<lbrace>\<lambda>_. pspace_aligned\<rbrace>"
unfolding set_pt_def including unfold_objects
by (wpsimp wp: set_object_aligned[THEN hoare_set_object_weaken_pre])
crunch interrupt_states[wp]: set_pt "\<lambda>s. P (interrupt_states s)"
(wp: crunch_wps)
lemma set_pt_vspace_objs [wp]:
"\<lbrace>valid_vspace_objs and (\<lambda>s. (\<exists>\<rhd>p) s \<longrightarrow> valid_vspace_obj (PageTable pt) s)\<rbrace>
set_pt p pt
\<lbrace>\<lambda>_. valid_vspace_objs\<rbrace>"
unfolding set_pt_def including unfold_objects
apply (wpsimp wp: set_object_vspace_objs[THEN hoare_set_object_weaken_pre]
simp: a_type_def)
by (simp add: vs_refs_def)
lemma set_pt_vs_lookup [wp]:
"\<lbrace>\<lambda>s. P (vs_lookup s)\<rbrace> set_pt p pt \<lbrace>\<lambda>x s. P (vs_lookup s)\<rbrace>"
unfolding set_pt_def including unfold_objects
apply (wpsimp wp: set_object_wp_strong simp: a_type_def)
apply (erule rsubst [where P=P])
apply (rule order_antisym)
apply (rule vs_lookup_sub)
apply (clarsimp simp: vs_refs_def)
prefer 3
apply (rule vs_lookup_sub)
apply (clarsimp simp: vs_refs_def split: if_split_asm)
apply blast+
apply auto
done
lemma store_pte_vs_lookup [wp]:
"\<lbrace>\<lambda>s. P (vs_lookup s)\<rbrace> store_pte x pte \<lbrace>\<lambda>_ s. P (vs_lookup s)\<rbrace>"
unfolding store_pte_def by wpsimp
lemma unique_table_caps_ptD:
"\<lbrakk> cs p = Some cap; cap_asid cap = None;
cs p' = Some cap'; is_pt_cap cap; is_pt_cap cap';
obj_refs cap' = obj_refs cap;
unique_table_caps cs\<rbrakk>
\<Longrightarrow> p = p'"
by (fastforce simp add: unique_table_caps_def)
lemma unique_table_caps_pdD:
"\<lbrakk> cs p = Some cap; cap_asid cap = None;
cs p' = Some cap'; is_pd_cap cap; is_pd_cap cap';
obj_refs cap' = obj_refs cap;
unique_table_caps cs\<rbrakk>
\<Longrightarrow> p = p'"
by (fastforce simp add: unique_table_caps_def)
lemma valid_objs_caps:
"valid_objs s \<Longrightarrow> valid_caps (caps_of_state s) s"
apply (clarsimp simp: valid_caps_def)
apply (erule (1) caps_of_state_valid_cap)
done
lemma simpler_set_pt_def:
"set_pt p pt =
(\<lambda>s. if \<exists>pt. kheap s p = Some (ArchObj (PageTable pt)) then
({((), s\<lparr>kheap := kheap s(p \<mapsto> ArchObj (PageTable pt))\<rparr>)}, False)
else ({}, True))"
apply (rule ext)
apply (clarsimp simp: set_pt_def set_object_def get_object_def assert_def
put_def get_def simpler_gets_def bind_def return_def fail_def)
apply (rule conjI)
apply (clarsimp simp: set_pt_def set_object_def get_object_def assert_def
put_def get_def simpler_gets_def bind_def
return_def fail_def a_type_def
split: kernel_object.splits
arch_kernel_obj.splits)
using a_type_def aa_type_APageTableE apply fastforce
done
lemma valid_set_ptI:
"(!!s opt. \<lbrakk>P s; kheap s p = Some (ArchObj (PageTable opt))\<rbrakk>
\<Longrightarrow> Q () (s\<lparr>kheap := kheap s(p \<mapsto> ArchObj (PageTable pt))\<rparr>))
\<Longrightarrow> \<lbrace>P\<rbrace> set_pt p pt \<lbrace>Q\<rbrace>"
by (rule validI) (clarsimp simp: simpler_set_pt_def split: if_split_asm)
lemma set_pt_table_caps [wp]:
"\<lbrace>valid_table_caps and (\<lambda>s. valid_caps (caps_of_state s) s) and
(\<lambda>s. ((\<exists>slot. caps_of_state s slot =
Some (ArchObjectCap (PageTableCap p None))) \<longrightarrow>
pt = (\<lambda>x. InvalidPTE)) \<or>
(\<forall>slot. \<exists>asid. caps_of_state s slot =
Some (ArchObjectCap (PageTableCap p (Some asid)))))\<rbrace>
set_pt p pt
\<lbrace>\<lambda>rv. valid_table_caps\<rbrace>"
unfolding valid_table_caps_def
apply (rule valid_set_ptI)
apply (intro allI impI, simp add: obj_at_def del: HOL.imp_disjL)
apply (cut_tac s=s and val= "ArchObj (PageTable pt)" and p=p
in caps_of_state_after_update[folded fun_upd_def])
apply (simp add: obj_at_def)
apply (clarsimp simp del: HOL.imp_disjL)
apply (thin_tac "ALL x. P x" for P)
apply (case_tac cap, simp_all add: is_pd_cap_def is_pt_cap_def)
apply (erule disjE)
apply (simp add: valid_caps_def)
apply ((drule spec)+, erule impE, assumption)
apply (rename_tac arch_cap)
apply (case_tac arch_cap,
simp_all add: valid_cap_def obj_at_def aa_type_simps)
apply clarsimp
apply (erule impE, fastforce simp: cap_asid_def split: option.splits)
apply (erule disjE, simp add: empty_table_def)
apply (drule_tac x=a in spec, drule_tac x=b in spec)
apply (clarsimp simp add: cap_asid_def split: option.splits)
done
lemma set_object_caps_of_state:
"\<lbrace>(\<lambda>s. \<not>(tcb_at p s) \<and> \<not>(\<exists>n. cap_table_at n p s)) and
K ((\<forall>x y. obj \<noteq> CNode x y) \<and> (\<forall>x. obj \<noteq> TCB x)) and
(\<lambda>s. P (caps_of_state s))\<rbrace>
set_object p obj
\<lbrace>\<lambda>_ s. P (caps_of_state s)\<rbrace>"
apply (wpsimp wp: set_object_wp_strong)
apply (erule rsubst[where P=P])
apply (rule ext)
apply (simp add: caps_of_state_cte_wp_at obj_at_def is_cap_table_def
is_tcb_def)
apply (auto simp: cte_wp_at_cases)
done
(* FIXME: Move to Invariants_A *)
lemma pte_ref_pagesD:
"pte_ref_pages (pt y) = Some x \<Longrightarrow>
(VSRef (ucast y) (Some APageTable), x)
\<in> vs_refs_pages (ArchObj (PageTable pt))"
by (auto simp: pte_ref_pages_def vs_refs_pages_def graph_of_def)
lemma set_pt_valid_vspace_objs[wp]:
"valid (\<lambda>s. valid_vspace_objs s \<and> ((\<exists>\<rhd> p) s \<longrightarrow> (\<forall>x. valid_pte (pt x) s)))
(set_pt p pt) (\<lambda>_. valid_vspace_objs)"
apply (rule valid_set_ptI)
apply (clarsimp simp: valid_vspace_objs_def)
subgoal for s opt pa rs ao
apply (spec pa)
apply (prove "(\<exists>\<rhd> pa) s")
apply (rule exI[where x=rs])
apply (erule vs_lookupE)
apply clarsimp
apply (erule vs_lookupI)
apply (erule rtrancl.induct, simp)
subgoal for \<dots> b c
apply (prove "(b \<rhd>1 c) s")
apply (thin_tac "_ : rtrancl _")+
apply (clarsimp simp add: vs_lookup1_def obj_at_def vs_refs_def
split: if_split_asm)
by simp
apply simp
apply (spec ao)
apply (cases "pa = p")
apply (clarsimp simp: obj_at_def)
subgoal for _ x
apply (drule_tac x=x in spec)
by (cases "pt x"; clarsimp simp: data_at_def obj_at_def a_type_simps)
apply (cases ao; simp add: obj_at_def a_type_simps)
apply clarsimp
apply (drule bspec, assumption, clarsimp)
apply clarsimp
subgoal for "fun" _ x
apply (spec x)
by (cases "fun x"; clarsimp simp: obj_at_def data_at_def a_type_simps)
apply clarsimp
apply (drule bspec,fastforce)
subgoal for "fun" x
by (cases "fun x"; clarsimp simp: data_at_def obj_at_def a_type_simps)
done
done
lemma set_pt_valid_vs_lookup [wp]:
"\<lbrace>\<lambda>s. valid_vs_lookup s \<and> valid_arch_state s \<and>
valid_vspace_objs s \<and> ((\<exists>\<rhd> p) s \<longrightarrow> (\<forall>x. valid_pte (pt x) s)) \<and>
(\<forall>ref. (ref \<unrhd> p) s \<longrightarrow>
(\<forall>x p. pte_ref_pages (pt x) = Some p \<longrightarrow>
(\<exists>p' cap. caps_of_state s p' = Some cap \<and>
p \<in> obj_refs cap \<and>
vs_cap_ref cap =
Some (VSRef (ucast x) (Some APageTable) # ref))))\<rbrace>
set_pt p pt
\<lbrace>\<lambda>rv. valid_vs_lookup\<rbrace>"
using set_pt_valid_vspace_objs[of p pt] set_pt_valid_arch_state[of p pt]
apply (clarsimp simp: valid_def simpler_set_pt_def)
apply (drule_tac x=s in spec)+
apply (clarsimp simp: valid_vs_lookup_def split: if_split_asm)
apply (erule (1) vs_lookup_pagesE_alt)
apply (clarsimp simp: valid_arch_state_def valid_asid_table_def
fun_upd_def)
apply (drule_tac x=pa in spec)
apply (drule_tac x="[VSRef (ucast a) None]" in spec)+
apply simp
apply (drule vs_lookup_pages_atI)
apply simp
apply (subst caps_of_state_after_update, simp add: obj_at_def)
apply simp
apply (drule_tac x=pa in spec)
apply (drule_tac x="[VSRef (ucast b) (Some AASIDPool),
VSRef (ucast a) None]" in spec)+
apply simp
apply (drule vs_lookup_pages_apI)
apply (simp split: if_split_asm)
apply simp+
apply (subst caps_of_state_after_update, simp add: obj_at_def)
apply simp
apply (drule_tac x=pa in spec)
apply (drule_tac x="[VSRef (ucast c) (Some APageDirectory),
VSRef (ucast b) (Some AASIDPool),
VSRef (ucast a) None]" in spec)+
apply simp
apply (drule vs_lookup_pages_pdI)
apply (simp split: if_split_asm)+
apply (subst caps_of_state_after_update, simp add: obj_at_def)
apply fastforce
apply (clarsimp simp: fun_upd_def split: if_split_asm)
apply (thin_tac "valid_vspace_objs s" for s, thin_tac "valid_arch_state s" for s)
apply (subst caps_of_state_after_update, simp add: obj_at_def)
apply (thin_tac "\<forall>p ref. P p ref" for P)
apply (drule_tac x="[VSRef (ucast c) (Some APageDirectory),
VSRef (ucast b) (Some AASIDPool),
VSRef (ucast a) None]" in spec)
apply (thin_tac "valid_pte pte s" for pte s)
apply (erule impE, fastforce intro: vs_lookup_pdI)
apply (drule_tac x=d in spec)
apply (erule impE)
apply (erule (5) vs_lookup_pdI[THEN vs_lookup_pages_vs_lookupI])
apply (drule spec, drule spec, erule impE, assumption)
apply assumption
apply (thin_tac "valid_vspace_objs s" for s, thin_tac "valid_arch_state s" for s)
apply (subst caps_of_state_after_update, simp add: obj_at_def)
apply (thin_tac "\<forall>ref. (ref \<unrhd> p) s \<longrightarrow> P ref" for P)
apply (drule_tac x=pa in spec)
apply (drule_tac x="[VSRef (ucast d) (Some APageTable),
VSRef (ucast c) (Some APageDirectory),
VSRef (ucast b) (Some AASIDPool),
VSRef (ucast a) None]" in spec)
apply (thin_tac "(\<exists>\<rhd> p) s \<longrightarrow> P" for P)
apply (erule impE, fastforce intro: vs_lookup_pages_ptI)
apply simp
done
lemma set_pt_arch_caps [wp]:
"\<lbrace>valid_arch_caps and valid_arch_state and valid_vspace_objs and
(\<lambda>s. valid_caps (caps_of_state s) s) and
(\<lambda>s. ((\<exists>slot. caps_of_state s slot =
Some (ArchObjectCap (PageTableCap p None))) \<longrightarrow>
pt = (\<lambda>x. InvalidPTE)) \<or>
(\<forall>slot. \<exists>asid. caps_of_state s slot =
Some (ArchObjectCap (PageTableCap p (Some asid))))) and
(\<lambda>s. ((\<exists>\<rhd> p) s \<longrightarrow> (\<forall>x. valid_pte (pt x) s)) \<and>
(\<forall>ref. (ref \<unrhd> p) s \<longrightarrow>
(\<forall>x p. pte_ref_pages (pt x) = Some p \<longrightarrow>
(\<exists>p' cap. caps_of_state s p' = Some cap \<and>
p \<in> obj_refs cap \<and>
vs_cap_ref cap =
Some (VSRef (ucast x) (Some APageTable) # ref)))))\<rbrace>
set_pt p pt \<lbrace>\<lambda>_. valid_arch_caps\<rbrace>"
unfolding valid_arch_caps_def
apply (rule hoare_pre)
apply (wp set_pt_valid_vs_lookup)
apply clarsimp
done
lemma valid_global_refsD2:
"\<lbrakk> caps_of_state s ptr = Some cap; valid_global_refs s \<rbrakk>
\<Longrightarrow> global_refs s \<inter> cap_range cap = {}"
by (cases ptr,
simp add: valid_global_refs_def valid_refs_def
cte_wp_at_caps_of_state)
lemma valid_global_refsD:
"\<lbrakk> valid_global_refs s; cte_wp_at ((=) cap) ptr s;
r \<in> global_refs s \<rbrakk>
\<Longrightarrow> r \<notin> cap_range cap"
apply (clarsimp simp: cte_wp_at_caps_of_state)
apply (drule(1) valid_global_refsD2)
apply fastforce
done
lemma set_pt_global_objs [wp]:
"\<lbrace>valid_global_objs and valid_arch_state and
(\<lambda>s. p \<in> set (arm_global_pts (arch_state s)) \<longrightarrow>
(\<forall>x. aligned_pte (pt x)))\<rbrace>
set_pt p pt
\<lbrace>\<lambda>rv. valid_global_objs\<rbrace>"
apply (rule valid_set_ptI)
apply (clarsimp simp: valid_global_objs_def valid_arch_state_def valid_vspace_obj_def
valid_vso_at_def obj_at_def empty_table_def)
done
crunch v_ker_map[wp]: set_pt "valid_kernel_mappings"
(ignore: set_object wp: set_object_v_ker_map crunch_wps)
lemma set_pt_asid_map [wp]:
"\<lbrace>valid_asid_map\<rbrace> set_pt p pt \<lbrace>\<lambda>_. valid_asid_map\<rbrace>"
apply (simp add: valid_asid_map_def vspace_at_asid_def)
apply (rule hoare_lift_Pf2 [where f="arch_state"])
apply wp+
done
lemma set_pt_only_idle [wp]:
"\<lbrace>only_idle\<rbrace> set_pt p pt \<lbrace>\<lambda>_. only_idle\<rbrace>"
by (wp only_idle_lift)
lemma set_pt_equal_mappings [wp]:
"\<lbrace>equal_kernel_mappings\<rbrace> set_pt p pt \<lbrace>\<lambda>rv. equal_kernel_mappings\<rbrace>"
by (simp add: set_pt_def | wp set_object_equal_mappings get_object_wp)+
lemma set_pt_valid_global_vspace_mappings:
"\<lbrace>\<lambda>s. valid_global_vspace_mappings s \<and> valid_global_objs s \<and> p \<notin> global_refs s\<rbrace>
set_pt p pt
\<lbrace>\<lambda>rv. valid_global_vspace_mappings\<rbrace>"
unfolding set_pt_def including unfold_objects
by (wpsimp wp: set_object_global_vspace_mappings)
lemma set_pt_kernel_window[wp]:
"\<lbrace>pspace_in_kernel_window\<rbrace> set_pt p pt \<lbrace>\<lambda>rv. pspace_in_kernel_window\<rbrace>"
unfolding set_pt_def including unfold_objects
by (wpsimp wp: set_object_pspace_in_kernel_window[THEN hoare_set_object_weaken_pre])
lemma set_pt_respects_device_region[wp]:
"\<lbrace>pspace_respects_device_region\<rbrace> set_pt p pt \<lbrace>\<lambda>rv. pspace_respects_device_region\<rbrace>"
unfolding set_pt_def including unfold_objects
by (wpsimp wp: set_object_pspace_respects_device_region[THEN hoare_set_object_weaken_pre])
lemma set_pt_caps_in_kernel_window[wp]:
"\<lbrace>cap_refs_in_kernel_window\<rbrace> set_pt p pt \<lbrace>\<lambda>rv. cap_refs_in_kernel_window\<rbrace>"
unfolding set_pt_def including unfold_objects
by (wpsimp wp: set_object_cap_refs_in_kernel_window[THEN hoare_set_object_weaken_pre]
simp: a_type_def)
lemma set_pt_caps_respects_device_region[wp]:
"\<lbrace>cap_refs_respects_device_region\<rbrace> set_pt p pt \<lbrace>\<lambda>rv. cap_refs_respects_device_region\<rbrace>"
unfolding set_pt_def including unfold_objects
by (wpsimp wp: set_object_cap_refs_respects_device_region[THEN hoare_set_object_weaken_pre]
simp: a_type_def)
lemma set_pt_valid_ioc[wp]:
"\<lbrace>valid_ioc\<rbrace> set_pt p pt \<lbrace>\<lambda>_. valid_ioc\<rbrace>"
unfolding set_pt_def including unfold_objects
by (wpsimp wp: set_object_valid_ioc_no_caps[THEN hoare_set_object_weaken_pre]
simp: is_tcb is_cap_table)
lemma valid_machine_stateE:
assumes vm: "valid_machine_state s"
assumes e: "\<lbrakk>in_user_frame p s
\<or> underlying_memory (machine_state s) p = 0 \<rbrakk> \<Longrightarrow> E "
shows E
using vm
apply (clarsimp simp: valid_machine_state_def)
apply (drule_tac x = p in spec)
apply (rule e)
apply auto
done
lemma in_user_frame_same_type_upd:
"\<lbrakk>typ_at type p s; type = a_type obj; in_user_frame q s\<rbrakk>
\<Longrightarrow> in_user_frame q (s\<lparr>kheap := kheap s(p \<mapsto> obj)\<rparr>)"
apply (clarsimp simp: in_user_frame_def obj_at_def)
apply (rule_tac x=sz in exI)
apply (auto simp: a_type_simps)
done
lemma in_device_frame_same_type_upd:
"\<lbrakk>typ_at type p s; type = a_type obj ; in_device_frame q s\<rbrakk>
\<Longrightarrow> in_device_frame q (s\<lparr>kheap := kheap s(p \<mapsto> obj)\<rparr>)"
apply (clarsimp simp: in_device_frame_def obj_at_def)
apply (rule_tac x=sz in exI)
apply (auto simp: a_type_simps)
done
lemma store_word_offs_in_user_frame[wp]:
"\<lbrace>\<lambda>s. in_user_frame p s\<rbrace> store_word_offs a x w \<lbrace>\<lambda>_ s. in_user_frame p s\<rbrace>"
unfolding in_user_frame_def
by (wp hoare_vcg_ex_lift)
lemma store_word_offs_in_device_frame[wp]:
"\<lbrace>\<lambda>s. in_device_frame p s\<rbrace> store_word_offs a x w \<lbrace>\<lambda>_ s. in_device_frame p s\<rbrace>"
unfolding in_device_frame_def
by (wp hoare_vcg_ex_lift)
lemma as_user_in_user_frame[wp]:
"\<lbrace>\<lambda>s. in_user_frame p s\<rbrace> as_user t m \<lbrace>\<lambda>_ s. in_user_frame p s\<rbrace>"
unfolding in_user_frame_def
by (wp hoare_vcg_ex_lift)
lemma as_user_in_device_frame[wp]:
"\<lbrace>\<lambda>s. in_device_frame p s\<rbrace> as_user t m \<lbrace>\<lambda>_ s. in_device_frame p s\<rbrace>"
unfolding in_device_frame_def
by (wp hoare_vcg_ex_lift)
crunch obj_at[wp]: load_word_offs "\<lambda>s. P (obj_at Q p s)"
lemma load_word_offs_in_user_frame[wp]:
"\<lbrace>\<lambda>s. in_user_frame p s\<rbrace> load_word_offs a x \<lbrace>\<lambda>_ s. in_user_frame p s\<rbrace>"
unfolding in_user_frame_def
by (wp hoare_vcg_ex_lift)
lemma valid_machine_state_heap_updI:
assumes vm : "valid_machine_state s"
assumes tyat : "typ_at type p s"
shows
" a_type obj = type \<Longrightarrow> valid_machine_state (s\<lparr>kheap := kheap s(p \<mapsto> obj)\<rparr>)"
apply (clarsimp simp: valid_machine_state_def)
subgoal for p
apply (rule valid_machine_stateE[OF vm,where p = p])
apply (elim disjE,simp_all)
apply (drule(1) in_user_frame_same_type_upd[OF tyat])
apply simp+
done
done
lemma set_pt_vms[wp]:
"\<lbrace>valid_machine_state\<rbrace> set_pt p pt \<lbrace>\<lambda>_. valid_machine_state\<rbrace>"
unfolding set_pt_def including unfold_objects
apply (wpsimp wp: set_object_wp_strong simp: a_type_def)
apply (erule valid_machine_state_heap_updI)
apply (fastforce simp: a_type_simps)+
done
crunch valid_irq_states[wp]: set_pt "valid_irq_states"
(wp: crunch_wps)
crunch valid_irq_states[wp]: set_pd "valid_irq_states"
(wp: crunch_wps)
lemma set_pt_invs:
"\<lbrace>invs and (\<lambda>s. \<forall>i. wellformed_pte (pt i)) and
(\<lambda>s. (\<exists>\<rhd>p) s \<longrightarrow> valid_vspace_obj (PageTable pt) s) and
(\<lambda>s. \<exists>slot asid. caps_of_state s slot =
Some (cap.ArchObjectCap (arch_cap.PageTableCap p asid)) \<and>
(pt = (\<lambda>x. InvalidPTE) \<or> asid \<noteq> None)) and
(\<lambda>s. \<forall>ref. (ref \<unrhd> p) s \<longrightarrow>
(\<forall>x p. pte_ref_pages (pt x) = Some p \<longrightarrow>
(\<exists>p' cap. caps_of_state s p' = Some cap \<and>
p \<in> obj_refs cap \<and>
vs_cap_ref cap =
Some (VSRef (ucast x) (Some APageTable) # ref))))\<rbrace>
set_pt p pt
\<lbrace>\<lambda>_. invs\<rbrace>"
apply (simp add: invs_def valid_state_def valid_pspace_def)
apply (rule hoare_pre)
apply (wp set_pt_valid_objs set_pt_iflive set_pt_zombies
set_pt_zombies_state_refs set_pt_zombies_state_hyp_refs set_pt_valid_mdb
set_pt_valid_idle set_pt_ifunsafe set_pt_reply_caps
set_pt_valid_arch_state set_pt_valid_global set_pt_cur
set_pt_reply_masters valid_irq_node_typ
valid_irq_handlers_lift
set_pt_valid_global_vspace_mappings)
apply (clarsimp dest!: valid_objs_caps)
apply (rule conjI[rotated])
apply (subgoal_tac "p \<notin> global_refs s", simp add: global_refs_def)
apply (frule (1) valid_global_refsD2)
apply (clarsimp simp add: cap_range_def is_pt_cap_def)
apply (thin_tac "ALL x. P x" for P)+
apply (clarsimp simp: valid_arch_caps_def unique_table_caps_def)
apply (drule_tac x=aa in spec, drule_tac x=ba in spec)
apply (drule_tac x=a in spec, drule_tac x=b in spec)
apply (clarsimp simp: is_pt_cap_def cap_asid_def)
done
(* FIXME: move to Invariants_A *)
lemma invs_valid_asid_table [elim!]:
"invs s \<Longrightarrow> valid_asid_table (arm_asid_table (arch_state s)) s"
by (simp add: invs_def valid_state_def valid_arch_state_def)
(* FIXME: move to Invariants_A *)
lemma valid_asid_table_ran:
"valid_asid_table asid_tbl s \<Longrightarrow> \<forall>p\<in>ran asid_tbl. asid_pool_at p s"
by (simp add: invs_def valid_state_def valid_arch_state_def
valid_asid_table_def)
lemma vs_lookup_pages_pt_eq:
"\<lbrakk>valid_vspace_objs s;
\<forall>p\<in>ran (arm_asid_table (arch_state s)). asid_pool_at p s;
page_table_at p s\<rbrakk>
\<Longrightarrow> (ref \<unrhd> p) s = (ref \<rhd> p) s"
apply (rule iffI[rotated])
apply (erule vs_lookup_pages_vs_lookupI)
apply (erule (2) vs_lookup_pagesE_alt)
apply (clarsimp simp: obj_at_def)+
apply (clarsimp simp: obj_at_def pde_ref_pages_def
split: pde.splits)
apply (erule (5) vs_lookup_pdI)
apply (auto simp: obj_at_def pte_ref_pages_def data_at_def
split: pte.splits)
done
lemmas invs_ran_asid_table = invs_valid_asid_table[THEN valid_asid_table_ran]
(* NOTE: we use vs_lookup in the precondition because in this case,
both are equivalent, but vs_lookup is generally preserved
by store_pte while vs_lookup_pages might not. *)
lemma store_pte_invs [wp]:
"\<lbrace>invs and (\<lambda>s. (\<exists>\<rhd>(p && ~~ mask pt_bits)) s \<longrightarrow> valid_pte pte s) and
(\<lambda>s. wellformed_pte pte) and
(\<lambda>s. \<exists>slot asid. caps_of_state s slot =
Some (ArchObjectCap
(PageTableCap (p && ~~ mask pt_bits) asid)) \<and>
(pte = InvalidPTE \<or> asid \<noteq> None)) and
(\<lambda>s. \<forall>ref. (ref \<rhd> (p && ~~ mask pt_bits)) s \<longrightarrow>
(\<forall>q. pte_ref_pages pte = Some q \<longrightarrow>
(\<exists>p' cap. caps_of_state s p' = Some cap \<and>
q \<in> obj_refs cap \<and>
vs_cap_ref cap =
Some (VSRef (p && mask pt_bits >> 2)
(Some APageTable) # ref))))\<rbrace>
store_pte p pte \<lbrace>\<lambda>_. invs\<rbrace>"
apply (simp add: store_pte_def)
apply (wp dmo_invs set_pt_invs)
apply clarsimp
apply (intro conjI)
apply (drule invs_valid_objs)
apply (fastforce simp: valid_objs_def dom_def obj_at_def valid_obj_def arch_valid_obj_def)
apply clarsimp
apply (drule (1) valid_vspace_objsD, fastforce)
apply simp
apply (thin_tac "All _")
apply (rule exI)+
apply (rule conjI, assumption)
subgoal premises prems for \<dots> asid
proof (cases asid)
case (Some a) from this show ?thesis
by fastforce
next
case None from this prems show ?thesis
apply clarsimp
apply (rule ext)
apply clarsimp
apply (frule invs_pd_caps)
apply (clarsimp simp add: valid_table_caps_def simp del: HOL.imp_disjL)
apply (spec "p && ~~ mask pt_bits")
apply (drule spec)+
apply (erule impE, assumption)
by (simp add: is_pt_cap_def cap_asid_def empty_table_def obj_at_def)
qed
apply (clarsimp simp: obj_at_def)
apply (intro impI conjI allI)
apply (drule (2) vs_lookup_pages_pt_eq[OF invs_vspace_objs invs_ran_asid_table,
THEN iffD1, rotated -1])
apply (clarsimp simp: obj_at_def a_type_simps)
apply (drule spec, erule impE, assumption)+
apply (erule exEI)+
apply clarsimp
apply (rule sym)
apply (rule ucast_ucast_len)
apply (rule shiftr_less_t2n)
using and_mask_less'[of 10 p]
apply (simp add: pt_bits_def pageBits_def)
subgoal for \<dots> pa
apply (thin_tac "All _", thin_tac "_ \<longrightarrow> _", thin_tac "_ \<or> _")
apply (frule invs_valid_vs_lookup)
apply (simp add: valid_vs_lookup_def)
apply (spec pa)
apply (drule spec, erule impE)
apply (erule vs_lookup_pages_step)
by (fastforce simp: vs_lookup_pages1_def obj_at_def
vs_refs_pages_def graph_of_def image_def) simp
done
lemma set_asid_pool_iflive [wp]:
"\<lbrace>\<lambda>s. if_live_then_nonz_cap s\<rbrace>
set_asid_pool p ap
\<lbrace>\<lambda>_ s. if_live_then_nonz_cap s\<rbrace>"
apply (simp add: set_asid_pool_def)
including unfold_objects
by (wpsimp wp: set_object_iflive[THEN hoare_set_object_weaken_pre]
simp: a_type_def live_def hyp_live_def)
lemma set_asid_pool_zombies [wp]:
"\<lbrace>\<lambda>s. zombies_final s\<rbrace>
set_asid_pool p ap
\<lbrace>\<lambda>_ s. zombies_final s\<rbrace>"
unfolding set_asid_pool_def including unfold_objects
by (wpsimp wp: set_object_zombies[THEN hoare_set_object_weaken_pre]
simp: a_type_def)
lemma set_asid_pool_zombies_state_refs [wp]:
"\<lbrace>\<lambda>s. P (state_refs_of s)\<rbrace>
set_asid_pool p ap
\<lbrace>\<lambda>_ s. P (state_refs_of s)\<rbrace>"
unfolding set_asid_pool_def including unfold_objects
apply (wpsimp wp: set_object_wp_strong
simp: a_type_def)
apply (erule rsubst [where P=P], rule ext)
apply (clarsimp simp: state_refs_of_def)
done
lemma set_asid_pool_zombies_state_hyp_refs [wp]:
"\<lbrace>\<lambda>s. P (state_hyp_refs_of s)\<rbrace>
set_asid_pool p ap
\<lbrace>\<lambda>_ s. P (state_hyp_refs_of s)\<rbrace>"
unfolding set_asid_pool_def including unfold_objects
apply (wpsimp wp: set_object_wp_strong
simp: a_type_def)
apply (erule rsubst [where P=P], rule ext)
apply (simp add: state_hyp_refs_of_def)
done
lemma set_asid_pool_cdt [wp]:
"\<lbrace>\<lambda>s. P (cdt s)\<rbrace>
set_asid_pool p ap
\<lbrace>\<lambda>_ s. P (cdt s)\<rbrace>"
unfolding set_asid_pool_def including unfold_objects
by wpsimp
lemma set_asid_pool_caps_of_state [wp]:
"\<lbrace>\<lambda>s. P (caps_of_state s)\<rbrace> set_asid_pool p ap \<lbrace>\<lambda>_ s. P (caps_of_state s)\<rbrace>"
unfolding set_asid_pool_def including unfold_objects
apply (wpsimp wp: set_object_wp_strong
simp: a_type_def)
apply (subst cte_wp_caps_of_lift)
prefer 2
apply assumption
apply (clarsimp simp: cte_wp_at_cases)
done
lemma set_asid_pool_valid_mdb [wp]:
"\<lbrace>\<lambda>s. valid_mdb s\<rbrace>
set_asid_pool p ap
\<lbrace>\<lambda>_ s. valid_mdb s\<rbrace>"
including unfold_objects
by (wpsimp wp: valid_mdb_lift simp: set_asid_pool_def set_object_def)
lemma set_asid_pool_valid_idle [wp]:
"\<lbrace>\<lambda>s. valid_idle s\<rbrace>
set_asid_pool p ap
\<lbrace>\<lambda>_ s. valid_idle s\<rbrace>"
including unfold_objects
by (wpsimp wp: valid_idle_lift simp: set_asid_pool_def)
lemma set_asid_pool_ifunsafe [wp]:
"\<lbrace>\<lambda>s. if_unsafe_then_cap s\<rbrace>
set_asid_pool p ap
\<lbrace>\<lambda>_ s. if_unsafe_then_cap s\<rbrace>"
unfolding set_asid_pool_def including unfold_objects
by (wpsimp wp: set_object_ifunsafe[THEN hoare_set_object_weaken_pre]
simp: a_type_def)
lemma set_asid_pool_reply_caps [wp]:
"\<lbrace>\<lambda>s. valid_reply_caps s\<rbrace>
set_asid_pool p ap
\<lbrace>\<lambda>_ s. valid_reply_caps s\<rbrace>"
by (wp valid_reply_caps_st_cte_lift)
lemma set_asid_pool_reply_masters [wp]:
"\<lbrace>valid_reply_masters\<rbrace>
set_asid_pool p ap
\<lbrace>\<lambda>_. valid_reply_masters\<rbrace>"
by (wp valid_reply_masters_cte_lift)
crunch global_ref [wp]: set_asid_pool "\<lambda>s. P (global_refs s)"
(wp: crunch_wps)
crunch arch [wp]: set_asid_pool "\<lambda>s. P (arch_state s)"
(wp: crunch_wps)
crunch idle [wp]: set_asid_pool "\<lambda>s. P (idle_thread s)"
(wp: crunch_wps)
crunch irq [wp]: set_asid_pool "\<lambda>s. P (interrupt_irq_node s)"
(wp: crunch_wps)
crunch valid_irq_states[wp]: set_asid_pool "valid_irq_states"
(wp: crunch_wps)
lemma set_asid_pool_valid_global [wp]:
"\<lbrace>\<lambda>s. valid_global_refs s\<rbrace>
set_asid_pool p ap
\<lbrace>\<lambda>_ s. valid_global_refs s\<rbrace>"
by (wp valid_global_refs_cte_lift)
crunch interrupt_states[wp]: set_asid_pool "\<lambda>s. P (interrupt_states s)"
(wp: crunch_wps)
lemma set_asid_pool_vspace_objs_unmap':
"\<lbrace>valid_vspace_objs and (\<lambda>s. (\<exists>\<rhd>p) s \<longrightarrow> valid_vspace_obj (ASIDPool ap) s) and
obj_at (\<lambda>ko. \<exists>ap'. ko = ArchObj (ASIDPool ap') \<and> graph_of ap \<subseteq> graph_of ap') p\<rbrace>
set_asid_pool p ap \<lbrace>\<lambda>_. valid_vspace_objs\<rbrace>"
unfolding set_asid_pool_def including unfold_objects
apply (wpsimp wp: set_object_vspace_objs simp: a_type_simps)
apply (fastforce simp: vs_refs_def)
done
lemma set_asid_pool_vspace_objs_unmap:
"\<lbrace>valid_vspace_objs and ko_at (ArchObj (ASIDPool ap)) p\<rbrace>
set_asid_pool p (ap |` S) \<lbrace>\<lambda>_. valid_vspace_objs\<rbrace>"
apply (wp set_asid_pool_vspace_objs_unmap')
apply (clarsimp simp: obj_at_def graph_of_restrict_map)
apply (drule valid_vspace_objsD, simp add: obj_at_def, assumption)
apply simp
by (auto simp: obj_at_def dest!: ran_restrictD)
lemma set_asid_pool_table_caps [wp]:
"\<lbrace>valid_table_caps\<rbrace> set_asid_pool p ap \<lbrace>\<lambda>_. valid_table_caps\<rbrace>"
apply (rule valid_table_caps_lift)
apply (rule set_asid_pool_caps_of_state)
apply wpsimp
unfolding set_asid_pool_def including unfold_objects
apply (wpsimp wp: set_object_wp_strong simp: a_type_def empty_table_def)
apply (metis kernel_object_exhaust)
done
(* FIXME: Move to Invariants_A *)
lemma vs_lookup_pages_stateI:
assumes 1: "(ref \<unrhd> p) s"
assumes ko: "\<And>ko p. ko_at ko p s \<Longrightarrow> obj_at (\<lambda>ko'. vs_refs_pages ko \<subseteq> vs_refs_pages ko') p s'"
assumes table: "graph_of (arm_asid_table (arch_state s)) \<subseteq> graph_of (arm_asid_table (arch_state s'))"
shows "(ref \<unrhd> p) s'"
using 1 vs_lookup_pages_sub [OF ko table] by blast
lemma set_asid_pool_vs_lookup_unmap':
"\<lbrace>valid_vs_lookup and
obj_at (\<lambda>ko. \<exists>ap'. ko = ArchObj (ASIDPool ap') \<and> graph_of ap \<subseteq> graph_of ap') p\<rbrace>
set_asid_pool p ap \<lbrace>\<lambda>_. valid_vs_lookup\<rbrace>"
apply (simp add: valid_vs_lookup_def pred_conj_def)
apply (rule hoare_lift_Pf2 [where f=caps_of_state];wp?)
apply (simp add: set_asid_pool_def set_object_def)
apply (wp get_object_wp)
apply (clarsimp simp: obj_at_def simp del: fun_upd_apply del: disjCI
split: kernel_object.splits arch_kernel_obj.splits)
subgoal for \<dots> pa ref
apply (spec pa)
apply (spec ref)
apply (erule impE)
apply (erule vs_lookup_pages_stateI)
by (clarsimp simp: obj_at_def vs_refs_pages_def split: if_split_asm)
fastforce+
done
lemma set_asid_pool_vs_lookup_unmap:
"\<lbrace>valid_vs_lookup and ko_at (ArchObj (ASIDPool ap)) p\<rbrace>
set_asid_pool p (ap |` S) \<lbrace>\<lambda>_. valid_vs_lookup\<rbrace>"
apply (wp set_asid_pool_vs_lookup_unmap')
by (clarsimp simp: obj_at_def
elim!: subsetD [OF graph_of_restrict_map])
lemma valid_pte_typ_at:
"(\<And>T p. typ_at (AArch T) p s = typ_at (AArch T) p s') \<Longrightarrow>
valid_pte pte s = valid_pte pte s'"
by (case_tac pte, auto simp add: data_at_def)
lemma valid_pde_typ_at:
"(\<And>T p. typ_at (AArch T) p s = typ_at (AArch T) p s') \<Longrightarrow>
valid_pde pde s = valid_pde pde s'"
by (case_tac pde, auto simp add: data_at_def)
lemma valid_vspace_obj_same_type:
"\<lbrakk>valid_vspace_obj ao s; kheap s p = Some ko; a_type ko' = a_type ko\<rbrakk>
\<Longrightarrow> valid_vspace_obj ao (s\<lparr>kheap := kheap s(p \<mapsto> ko')\<rparr>)"
apply (rule hoare_to_pure_kheap_upd[OF valid_vspace_obj_typ])
by (auto simp: obj_at_def)
lemma set_asid_pool_global_objs [wp]:
"\<lbrace>valid_global_objs and valid_arch_state\<rbrace>
set_asid_pool p ap
\<lbrace>\<lambda>_. valid_global_objs\<rbrace>"
apply (simp add: set_asid_pool_def set_object_def a_type_def)
apply (wp get_object_wp)
apply (clarsimp simp del: fun_upd_apply
split: kernel_object.splits arch_kernel_obj.splits)
apply (clarsimp simp: valid_global_objs_def valid_vso_at_def)
apply (rule conjI)
apply (clarsimp simp: obj_at_def)
apply (rule conjI)
subgoal by (clarsimp simp: valid_arch_state_def obj_at_def a_type_def)
apply clarsimp
apply (erule (1) valid_vspace_obj_same_type)
subgoal by (simp add: a_type_def)
apply (rule conjI)
subgoal by (clarsimp simp: obj_at_def valid_arch_state_def a_type_def)
apply (clarsimp simp: obj_at_def)
apply (drule (1) bspec)
by clarsimp
crunch v_ker_map[wp]: set_asid_pool "valid_kernel_mappings"
(ignore: set_object wp: set_object_v_ker_map crunch_wps)
lemma set_asid_pool_restrict_asid_map:
"\<lbrace>valid_asid_map and ko_at (ArchObj (ASIDPool ap)) p and
(\<lambda>s. \<forall>asid. asid \<le> mask asid_bits \<longrightarrow> ucast asid \<notin> S \<longrightarrow>
arm_asid_table (arch_state s) (asid_high_bits_of asid) = Some p \<longrightarrow>
arm_asid_map (arch_state s) asid = None)\<rbrace>
set_asid_pool p (ap |` S) \<lbrace>\<lambda>_. valid_asid_map\<rbrace>"
apply (simp add: set_asid_pool_def valid_asid_map_def set_object_def)
apply (wp get_object_wp)
apply (clarsimp split: kernel_object.splits arch_kernel_obj.splits
simp del: fun_upd_apply)
apply (drule(1) bspec)
apply (clarsimp simp: vspace_at_asid_def obj_at_def graph_of_def)
apply (drule subsetD, erule domI)
apply simp
apply (drule spec, drule(1) mp)
apply simp
apply (erule vs_lookupE)
apply (rule vs_lookupI, simp)
apply (clarsimp simp: vs_asid_refs_def graph_of_def)
apply (drule rtranclD)
apply (erule disjE, clarsimp)
apply clarsimp
apply (drule tranclD)
apply clarsimp
apply (rule r_into_rtrancl)
apply (drule vs_lookup1D)
apply clarsimp
apply (subst vs_lookup1_def)
apply (clarsimp simp: obj_at_def)
apply (erule rtranclE)
apply (clarsimp simp: vs_refs_def graph_of_def)
apply (rule image_eqI[where x="(_, _)"])
apply (simp add: split_def)
apply (clarsimp simp: restrict_map_def)
apply (drule ucast_up_inj, simp)
apply (simp add: mask_asid_low_bits_ucast_ucast)
apply (drule ucast_up_inj, simp)
apply clarsimp
apply clarsimp
apply (drule vs_lookup1_trans_is_append)
apply clarsimp
apply (drule vs_lookup1D)
by clarsimp
lemma set_asid_pool_asid_map_unmap:
"\<lbrace>valid_asid_map and ko_at (ArchObj (ASIDPool ap)) p and
(\<lambda>s. \<forall>asid. asid \<le> mask asid_bits \<longrightarrow>
ucast asid = x \<longrightarrow>
arm_asid_table (arch_state s) (asid_high_bits_of asid) = Some p \<longrightarrow>
arm_asid_map (arch_state s) asid = None)\<rbrace>
set_asid_pool p (ap(x := None)) \<lbrace>\<lambda>_. valid_asid_map\<rbrace>"
using set_asid_pool_restrict_asid_map[where S="- {x}"]
by (simp add: restrict_map_def fun_upd_def if_flip)
lemma set_asid_pool_vspace_objs_unmap_single:
"\<lbrace>valid_vspace_objs and ko_at (ArchObj (ASIDPool ap)) p\<rbrace>
set_asid_pool p (ap(x := None)) \<lbrace>\<lambda>_. valid_vspace_objs\<rbrace>"
using set_asid_pool_vspace_objs_unmap[where S="- {x}"]
by (simp add: restrict_map_def fun_upd_def if_flip)
lemma set_asid_pool_only_idle [wp]:
"\<lbrace>only_idle\<rbrace> set_asid_pool p ap \<lbrace>\<lambda>_. only_idle\<rbrace>"
by (wp only_idle_lift set_asid_pool_typ_at)
lemma set_asid_pool_equal_mappings [wp]:
"\<lbrace>equal_kernel_mappings\<rbrace> set_asid_pool p ap \<lbrace>\<lambda>rv. equal_kernel_mappings\<rbrace>"
by (simp add: set_asid_pool_def | wp set_object_equal_mappings get_object_wp)+
lemma set_asid_pool_valid_global_vspace_mappings[wp]:
"\<lbrace>valid_global_vspace_mappings\<rbrace>
set_asid_pool p ap \<lbrace>\<lambda>rv. valid_global_vspace_mappings\<rbrace>"
apply (simp add: set_asid_pool_def)
apply (wp set_object_global_vspace_mappings[THEN hoare_set_object_weaken_pre])
including unfold_objects
by (clarsimp simp: a_type_def)
lemma set_asid_pool_kernel_window[wp]:
"\<lbrace>pspace_in_kernel_window\<rbrace> set_asid_pool p ap \<lbrace>\<lambda>rv. pspace_in_kernel_window\<rbrace>"
unfolding set_asid_pool_def including unfold_objects
by (wpsimp wp: set_object_pspace_in_kernel_window[THEN hoare_set_object_weaken_pre])
lemma set_asid_pool_pspace_respects_device_region[wp]:
"\<lbrace>pspace_respects_device_region\<rbrace> set_asid_pool p ap \<lbrace>\<lambda>rv. pspace_respects_device_region\<rbrace>"
unfolding set_asid_pool_def including unfold_objects
by (wpsimp wp: set_object_pspace_respects_device_region[THEN hoare_set_object_weaken_pre])
lemma set_asid_pool_caps_kernel_window[wp]:
"\<lbrace>cap_refs_in_kernel_window\<rbrace> set_asid_pool p ap \<lbrace>\<lambda>rv. cap_refs_in_kernel_window\<rbrace>"
unfolding set_asid_pool_def including unfold_objects
by (wpsimp wp: set_object_cap_refs_in_kernel_window[THEN hoare_set_object_weaken_pre]
simp: a_type_def)
lemma set_asid_pool_caps_respects_device_region[wp]:
"\<lbrace>cap_refs_respects_device_region\<rbrace> set_asid_pool p ap \<lbrace>\<lambda>rv. cap_refs_respects_device_region\<rbrace>"
apply (simp add: set_asid_pool_def)
apply (wp set_object_cap_refs_respects_device_region[THEN hoare_set_object_weaken_pre])
including unfold_objects
by (simp add: a_type_def)
lemma set_asid_pool_valid_ioc[wp]:
"\<lbrace>valid_ioc\<rbrace> set_asid_pool p ap \<lbrace>\<lambda>_. valid_ioc\<rbrace>"
apply (simp add: set_asid_pool_def)
apply (wp set_object_valid_ioc_no_caps[THEN hoare_set_object_weaken_pre])
including unfold_objects
by (clarsimp simp: valid_def get_object_def simpler_gets_def assert_def
return_def fail_def bind_def
a_type_simps is_tcb is_cap_table)
lemma set_asid_pool_vms[wp]:
"\<lbrace>valid_machine_state\<rbrace> set_asid_pool p S \<lbrace>\<lambda>_. valid_machine_state\<rbrace>"
apply (simp add: set_asid_pool_def set_object_def)
apply (wp get_object_wp)
apply clarify
apply (erule valid_machine_state_heap_updI)
apply (fastforce simp: a_type_def obj_at_def
split: kernel_object.splits arch_kernel_obj.splits)+
done
lemma set_asid_pool_invs_restrict:
"\<lbrace>invs and ko_at (ArchObj (ASIDPool ap)) p and
(\<lambda>s. \<forall>asid. asid \<le> mask asid_bits \<longrightarrow> ucast asid \<notin> S \<longrightarrow>
arm_asid_table (arch_state s) (asid_high_bits_of asid) = Some p \<longrightarrow>
arm_asid_map (arch_state s) asid = None)\<rbrace>
set_asid_pool p (ap |` S) \<lbrace>\<lambda>_. invs\<rbrace>"
apply (simp add: invs_def valid_state_def valid_pspace_def
valid_arch_caps_def)
apply (wp valid_irq_node_typ set_asid_pool_typ_at
set_asid_pool_vspace_objs_unmap valid_irq_handlers_lift
set_asid_pool_vs_lookup_unmap set_asid_pool_restrict_asid_map)
apply simp
done
lemmas set_asid_pool_cte_wp_at1[wp]
= hoare_cte_wp_caps_of_state_lift [OF set_asid_pool_caps_of_state]
lemma mdb_cte_at_set_asid_pool[wp]:
"\<lbrace>\<lambda>s. mdb_cte_at (swp (cte_wp_at ((\<noteq>) cap.NullCap)) s) (cdt s)\<rbrace>
set_asid_pool y pool
\<lbrace>\<lambda>r s. mdb_cte_at (swp (cte_wp_at ((\<noteq>) cap.NullCap)) s) (cdt s)\<rbrace>"
apply (clarsimp simp:mdb_cte_at_def)
apply (simp only: imp_conv_disj)
apply (wp hoare_vcg_disj_lift hoare_vcg_all_lift)
done
lemma set_asid_pool_invs_unmap:
"\<lbrace>invs and ko_at (ArchObj (ASIDPool ap)) p and
(\<lambda>s. \<forall>asid. asid \<le> mask asid_bits \<longrightarrow> ucast asid = x \<longrightarrow>
arm_asid_table (arch_state s) (asid_high_bits_of asid) = Some p \<longrightarrow>
arm_asid_map (arch_state s) asid = None)\<rbrace>
set_asid_pool p (ap(x := None)) \<lbrace>\<lambda>_. invs\<rbrace>"
using set_asid_pool_invs_restrict[where S="- {x}"]
by (simp add: restrict_map_def fun_upd_def if_flip)
lemma valid_slots_typ_at:
assumes x: "\<And>T p. \<lbrace>typ_at (AArch T) p\<rbrace> f \<lbrace>\<lambda>rv. typ_at (AArch T) p\<rbrace>"
assumes y: "\<And>p. \<lbrace>\<exists>\<rhd> p\<rbrace> f \<lbrace>\<lambda>rv. \<exists>\<rhd> p\<rbrace>"
shows "\<lbrace>valid_slots m\<rbrace> f \<lbrace>\<lambda>rv. valid_slots m\<rbrace>"
unfolding valid_slots_def
by (cases m; clarsimp; wp x y hoare_vcg_const_Ball_lift valid_pte_lift
valid_pde_lift pte_at_atyp pde_at_atyp)
lemma ucast_ucast_id:
"(len_of TYPE('a)) < (len_of TYPE('b)) \<Longrightarrow> ucast ((ucast (x::('a::len) word))::('b::len) word) = x"
by (auto intro: ucast_up_ucast_id simp: is_up_def source_size_def target_size_def word_size)
lemma kernel_base_kernel_mapping_slots:
"x < kernel_base \<Longrightarrow> ucast (x >> 20) \<notin> kernel_mapping_slots"
apply (simp add: kernel_mapping_slots_def kernel_base_def)
apply (subst ucast_le_ucast[symmetric, where 'a=12 and 'b=32])
apply simp
apply (subst ucast_ucast_mask)
apply (simp add: ucast_def)
apply (subst less_mask_eq)
apply (rule vptr_shiftr_le_2p[unfolded pageBits_def])
apply (subst word_not_le)
apply word_bitwise
done
lemma lookup_pt_slot_looks_up [wp]:
"\<lbrace>ref \<rhd> pd and K (is_aligned pd 14 \<and> vptr < kernel_base)
and valid_arch_state and valid_vspace_objs and equal_kernel_mappings
and pspace_aligned and valid_global_objs\<rbrace>
lookup_pt_slot pd vptr
\<lbrace>\<lambda>pt_slot. (VSRef (vptr >> 20 << 2 >> 2) (Some APageDirectory) # ref) \<rhd> (pt_slot && ~~ mask pt_bits)\<rbrace>, -"
apply (simp add: lookup_pt_slot_def)
apply (wp get_pde_wp|wpc)+
apply clarsimp
apply (rule vs_lookup_step, assumption)
apply (clarsimp simp: vs_lookup1_def lookup_pd_slot_def Let_def pd_shifting pd_shifting_dual)
apply (rule exI, rule conjI, assumption)
subgoal for s _ x
apply (prove "ptrFromPAddr x + ((vptr >> 12) && 0xFF << 2) && ~~ mask pt_bits = ptrFromPAddr x")
apply (prove "is_aligned (ptrFromPAddr x) 10")
apply (drule (2) valid_vspace_objsD)
apply clarsimp
apply (erule_tac x="ucast (vptr >> 20 << 2 >> 2)" in ballE)
apply (thin_tac "obj_at P x s" for P x)+
apply (clarsimp simp: obj_at_def invs_def valid_state_def valid_pspace_def pspace_aligned_def)
apply (drule bspec, blast)
apply (clarsimp simp: a_type_def
split: kernel_object.splits arch_kernel_obj.splits if_split_asm)
apply (frule kernel_mapping_slots_empty_pdeI)
apply ((simp add: obj_at_def)+)[4]
apply (clarsimp simp: pde_ref_def second_level_tables_def)
apply (erule is_aligned_global_pt[unfolded pt_bits_def pageBits_def, simplified])
apply simp+
apply (subgoal_tac "(vptr >> 12) && 0xFF << 2 < 2 ^ 10")
apply (subst is_aligned_add_or, (simp add: pt_bits_def pageBits_def)+)
apply (subst word_ao_dist)
apply (subst mask_out_sub_mask [where x="(vptr >> 12) && 0xFF << 2"])
apply (subst less_mask_eq, simp)
apply (subst is_aligned_neg_mask_eq, simp)
apply (clarsimp simp: valid_arch_state_def valid_global_pts_def)
apply (rule shiftl_less_t2n, simp)
apply (rule and_mask_less'[where n=8, unfolded mask_def, simplified], (simp )+)
apply (subst shiftl_shiftr_id)
apply (simp add: word_bits_def)+
apply word_bitwise
apply (subst (asm) shiftl_shiftr_id)
apply (simp add: word_bits_def)+
apply word_bitwise
apply (erule vs_refs_pdI)
apply (erule kernel_base_kernel_mapping_slots)
apply (intro allI impI)
apply (simp add: nth_shiftr)
apply (rule bang_big[simplified])
by (simp add: word_size)
done
lemma lookup_pt_slot_reachable [wp]:
"\<lbrace>\<exists>\<rhd> pd and K (is_aligned pd 14 \<and> vptr < kernel_base)
and valid_arch_state and valid_vspace_objs and equal_kernel_mappings
and pspace_aligned and valid_global_objs\<rbrace>
lookup_pt_slot pd vptr
\<lbrace>\<lambda>pt_slot. \<exists>\<rhd> (pt_slot && ~~ mask pt_bits)\<rbrace>, -"
apply (simp add: pred_conj_def ex_simps [symmetric] del: ex_simps)
apply (rule hoare_vcg_ex_lift_R1)
apply (rule hoare_pre)
apply (rule hoare_post_imp_R)
apply (rule lookup_pt_slot_looks_up)
prefer 2
apply clarsimp
apply assumption
apply fastforce
done
lemma lookup_pt_slot_reachable2 [wp]:
"\<lbrace>\<exists>\<rhd> pd and K (is_aligned pd 14 \<and> is_aligned vptr 16 \<and> vptr < kernel_base)
and valid_arch_state and valid_vspace_objs and equal_kernel_mappings
and pspace_aligned and valid_global_objs\<rbrace>
lookup_pt_slot pd vptr
\<lbrace>\<lambda>rv s. \<forall>x\<in>set [0 , 4 .e. 0x3C]. (\<exists>\<rhd> (x + rv && ~~ mask pt_bits)) s\<rbrace>, -"
apply (simp add: lookup_pt_slot_def)
apply (wp get_pde_wp|wpc)+
apply clarsimp
apply (rule exI)
apply (rule vs_lookup_step, assumption)
apply (clarsimp simp: vs_lookup1_def lookup_pd_slot_def Let_def pd_shifting pd_shifting_dual
add.commute add.left_commute)
apply (rule exI, rule conjI, assumption)
apply (rule_tac x="VSRef (vptr >> 20 << 2 >> 2) (Some APageDirectory)" in exI)
apply (subgoal_tac "ptrFromPAddr x + (xa + ((vptr >> 12) && 0xFF << 2)) && ~~ mask pt_bits = ptrFromPAddr x")
prefer 2
apply (subgoal_tac "is_aligned (ptrFromPAddr x) 10")
prefer 2
apply (drule (2) valid_vspace_objsD)
apply clarsimp
apply (erule_tac x="ucast (vptr >> 20 << 2 >> 2)" in ballE)
apply (thin_tac "obj_at P x s" for P x)+
apply (clarsimp simp: obj_at_def pspace_aligned_def)
apply (drule bspec, blast)
apply (clarsimp simp: a_type_def
split: kernel_object.splits arch_kernel_obj.splits if_split_asm)
apply (frule kernel_mapping_slots_empty_pdeI)
apply (simp add: obj_at_def)+
apply clarsimp
apply (erule_tac x="ptrFromPAddr x" in allE)
apply (clarsimp simp: pde_ref_def second_level_tables_def)
apply (rule is_aligned_global_pt[unfolded pt_bits_def pageBits_def, simplified])
apply simp+
apply (subst add_mask_lower_bits)
apply (simp add: pt_bits_def pageBits_def)
prefer 2
apply simp
apply (clarsimp simp: pt_bits_def pageBits_def)
apply (clarsimp simp: upto_enum_step_def word_shift_by_2 p_le_0xF_helper)
apply (thin_tac "pda x = t" for x t)
apply (subst (asm) word_plus_and_or_coroll)
apply (rule word_eqI)
apply (clarsimp simp: word_size word_bits_def nth_shiftr nth_shiftl is_aligned_nth word_FF_is_mask)
apply (erule_tac x="n - 2" in allE)
apply simp
apply (clarsimp simp: word_size nth_shiftr nth_shiftl is_aligned_nth word_FF_is_mask word_bits_def)
apply (rule conjI, rule refl)
apply (simp add: add.commute add.left_commute)
apply (rule vs_refs_pdI)
prefer 3
apply (clarsimp simp: word_ops_nth_size word_size nth_shiftr nth_shiftl)
apply (drule test_bit_size)
apply (simp add: word_size)
apply fastforce
apply (subst shiftl_shiftr_id)
apply (simp add: word_bits_def)+
apply word_bitwise
apply (erule kernel_base_kernel_mapping_slots)
done
lemma lookup_pt_slot_reachable3 [wp]:
"\<lbrace>\<exists>\<rhd> pd and K (is_aligned pd 14 \<and> is_aligned vptr 16 \<and> vptr < kernel_base)
and valid_arch_state and valid_vspace_objs and equal_kernel_mappings
and pspace_aligned and valid_global_objs\<rbrace>
lookup_pt_slot pd vptr
\<lbrace>\<lambda>p s. \<forall>x\<in>set [p, p + 4 .e. p + 0x3C]. (\<exists>\<rhd> (x && ~~ mask pt_bits)) s\<rbrace>, -"
apply (simp add: lookup_pt_slot_def)
apply (wp get_pde_wp|wpc)+
apply (clarsimp del: ballI)
apply (clarsimp simp: lookup_pd_slot_def Let_def del: ballI)
apply (simp add: pd_shifting)
apply (frule (2) valid_vspace_objsD)
apply (clarsimp del: ballI)
apply (erule_tac x="(ucast (pd + (vptr >> 20 << 2) && mask pd_bits >> 2))" in ballE)
apply (clarsimp del: ballI)
apply (subgoal_tac "is_aligned (ptrFromPAddr x) 10")
prefer 2
apply (thin_tac "ko_at P p s" for P p)+
apply (clarsimp simp: obj_at_def add.commute add.left_commute pspace_aligned_def)
apply (drule bspec, blast)
apply (clarsimp simp: a_type_def split: kernel_object.splits arch_kernel_obj.splits if_split_asm)
apply (subst p_0x3C_shift)
apply (rule aligned_add_aligned, assumption)
apply (clarsimp intro!: is_aligned_andI1 is_aligned_shiftl is_aligned_shiftr)
apply simp
apply clarsimp
apply (rule exI)
apply (rule vs_lookup_step, assumption)
apply (clarsimp simp: vs_lookup1_def lookup_pd_slot_def Let_def pd_shifting pd_shifting_dual add.commute add.left_commute)
apply (rule exI, rule conjI, assumption)
apply (rule_tac x="VSRef (vptr >> 20 << 2 >> 2) (Some APageDirectory)" in exI)
apply (rule conjI, rule refl)
apply (subgoal_tac "ptrFromPAddr x + (xc + ((vptr >> 12) && 0xFF << 2)) && ~~ mask pt_bits = ptrFromPAddr x")
prefer 2
apply (subst add_mask_lower_bits)
apply (simp add: pt_bits_def pageBits_def)
prefer 2
apply simp
apply (clarsimp simp: pt_bits_def pageBits_def)
apply (clarsimp simp: upto_enum_step_def word_shift_by_2 p_le_0xF_helper)
apply (thin_tac "pda x = t" for x t)
apply (subst (asm) word_plus_and_or_coroll)
apply (rule word_eqI)
apply (clarsimp simp: word_size word_bits_def nth_shiftr nth_shiftl is_aligned_nth word_FF_is_mask)
apply (erule_tac x="n - 2" in allE)
apply simp
apply (clarsimp simp: word_size nth_shiftr nth_shiftl is_aligned_nth word_FF_is_mask word_bits_def)
apply (simp add: add.commute add.left_commute)
apply (rule vs_refs_pdI)
prefer 3
apply (clarsimp simp: word_ops_nth_size word_size nth_shiftr nth_shiftl)
apply (drule test_bit_size)
apply (simp add: word_size)
apply fastforce
apply (subst shiftl_shiftr_id)
apply (simp add: word_bits_def)+
apply word_bitwise
apply (erule kernel_base_kernel_mapping_slots)
apply clarsimp
apply (subst (asm) mask_add_aligned, simp add: pd_bits_def pageBits_def)+
apply (simp add: shiftr_over_and_dist)
apply (subst (asm) shiftl_shiftr_id, (simp add: word_bits_conv)+, word_bitwise)+
apply (subst (asm) shiftr_mask2, (simp add: pd_bits_def pageBits_def)+)+
apply (simp add: shiftr_mask_eq[where x=vptr and n=20, unfolded word_size, simplified])
apply (drule kernel_base_kernel_mapping_slots, simp)
done
lemma pd_aligned:
"\<lbrakk>pspace_aligned s; page_directory_at pd s\<rbrakk> \<Longrightarrow> is_aligned pd 14"
apply (clarsimp simp: pspace_aligned_def obj_at_def)
apply (drule bspec, blast)
apply (clarsimp simp: a_type_def split: kernel_object.splits arch_kernel_obj.splits if_split_asm)
done
lemma shiftr_shiftl_mask_pd_bits:
"(((vptr :: word32) >> 20) << 2) && mask pd_bits = (vptr >> 20) << 2"
apply (rule iffD2 [OF mask_eq_iff_w2p])
apply (simp add: pd_bits_def pageBits_def word_size)
apply (rule shiftl_less_t2n)
apply (rule shiftr_less_t2n3,
simp_all add: pd_bits_def word_bits_def pageBits_def)
done
lemma triple_shift_fun:
"x >> 20 << 2 >> 2 = (x :: ('a :: len) word) >> 20"
apply (rule word_eqI)
apply (simp add: word_size nth_shiftr nth_shiftl)
apply safe
apply (drule test_bit_size)
apply (simp add: word_size)
done
lemma shiftr_20_unat_ucast:
"unat (ucast (x >> 20 :: word32) :: 12 word) = unat (x >> 20)"
using vptr_shiftr_le_2p[where vptr=x]
apply (simp only: unat_ucast)
apply (rule mod_less)
apply (rule unat_less_power)
apply (simp add: word_bits_def)
apply (simp add: pageBits_def)
done
lemma shiftr_20_less:
"((ucast (x >> 20) :: 12 word) < ucast (y >> 20)) = ((x >> 20 :: word32) < y >> 20)"
"((ucast (x >> 20) :: 12 word) \<le> ucast (y >> 20)) = ((x >> 20 :: word32) \<le> y >> 20)"
by (simp add: word_less_nat_alt word_le_nat_alt shiftr_20_unat_ucast)+
lemma kernel_base_ge_observation:
"(kernel_base \<le> x) = (x && ~~ mask 29 = kernel_base)"
apply (subst mask_in_range)
apply (simp add: kernel_base_def is_aligned_def)
apply (simp add: kernel_base_def)
done
lemma kernel_base_less_observation:
"(x < kernel_base) = (x && ~~ mask 29 \<noteq> kernel_base)"
apply (simp add: linorder_not_le[symmetric] kernel_base_ge_observation)
done
lemma vptr_shifting_helper_magic:
"(x = 0) \<or> (x < 2 ^ 4 \<and> vmsz_aligned (vptr::word32) ARMSuperSection)
\<Longrightarrow> (x << 2) + (vptr >> 20 << 2) = ((vptr + (x << 20)) >> 20 << 2)"
apply (erule disjE, simp_all)
apply (clarsimp simp: vmsz_aligned_def)
apply (subst is_aligned_add_or, assumption)
apply (rule shiftl_less_t2n)
apply simp
apply simp
apply (simp add: shiftl_over_or_dist shiftr_over_or_dist)
apply (subst shiftl_shiftr_id)
apply (simp add: word_bits_def)
apply (simp add: word_bits_def)
apply unat_arith
apply (subst field_simps, rule is_aligned_add_or[where n=6])
apply (intro is_aligned_shiftl is_aligned_shiftr)
apply simp
apply (rule shiftl_less_t2n, simp_all)
done
lemma less_kernel_base_mapping_slots_both:
"\<lbrakk> vptr < kernel_base; is_aligned pd pd_bits;
(x = 0)
\<or> (x < 2 ^ 4 \<and> vmsz_aligned vptr ARMSuperSection) \<rbrakk>
\<Longrightarrow> ucast ((x << 2) + lookup_pd_slot pd vptr && mask pd_bits >> 2)
\<notin> kernel_mapping_slots"
apply (simp add: lookup_pd_slot_def Let_def)
apply (subst field_simps, subst mask_add_aligned, assumption)
apply (subst vptr_shifting_helper_magic)
apply simp
apply (simp add: shiftr_shiftl_mask_pd_bits triple_shift_fun)
apply (simp add: kernel_mapping_slots_def linorder_not_le
shiftr_20_less)
apply (rule le_m1_iff_lt[THEN iffD1,THEN iffD1])
apply (simp add:kernel_base_def)
apply (erule disjE)
apply (drule word_less_sub_1)
apply simp
apply (drule le_shiftr[where n=20])
apply (clarsimp simp :kernel_base_def vmsz_aligned_def)+
apply (drule(1) gap_between_aligned)
apply (simp add:is_aligned_def)
apply simp
apply (rule order.trans[OF le_shiftr])
apply (rule word_plus_mono_right[OF _ is_aligned_no_wrap'[where off = "2^24-1"]])
apply (rule word_less_sub_1)
apply (rule shiftl_less_t2n)
apply simp+
apply (clarsimp dest!:word_less_sub_1)
apply (erule order.trans[OF le_shiftr])
apply simp
done
lemmas less_kernel_base_mapping_slots
= less_kernel_base_mapping_slots_both[where x=0, simplified]
lemma is_aligned_lookup_pd_slot:
"\<lbrakk>is_aligned vptr 24; is_aligned pd 6\<rbrakk>
\<Longrightarrow> is_aligned (lookup_pd_slot pd vptr) 6"
apply (clarsimp simp: lookup_pd_slot_def)
apply (erule aligned_add_aligned)
apply (rule is_aligned_shiftl)
apply (rule is_aligned_shiftr)
apply simp
apply (simp add: word_bits_conv)
done
lemma lookup_pd_slot_eq:
"is_aligned pd pd_bits \<Longrightarrow>
(lookup_pd_slot pd vptr && ~~ mask pd_bits) = pd"
apply (clarsimp simp: lookup_pd_slot_def)
apply (erule conjunct2[OF is_aligned_add_helper])
apply (rule shiftl_less_t2n)
apply (rule shiftr_less_t2n3)
apply (simp_all add: pd_bits_def pageBits_def)
done
lemma is_aligned_lookup_pt_slot_no_fail:
"\<lbrakk>is_aligned vptr 16; is_aligned pt 6\<rbrakk>
\<Longrightarrow> is_aligned (lookup_pt_slot_no_fail pt vptr) 6"
apply (clarsimp simp: lookup_pt_slot_no_fail_def)
apply (erule aligned_add_aligned)
apply (rule is_aligned_shiftl)
apply (rule is_aligned_andI1)
apply (rule is_aligned_shiftr)
apply simp
apply simp
done
lemma lookup_pt_slot_non_empty:
"\<lbrace>valid_vspace_objs and \<exists>\<rhd> pd and page_directory_at pd and pspace_aligned
and K (is_aligned vptr 16 \<and> vptr < kernel_base)\<rbrace>
lookup_pt_slot pd vptr \<lbrace>\<lambda>rv s. [rv , rv + 4 .e. rv + 0x3C] \<noteq> []\<rbrace>, -"
apply (simp add:lookup_pt_slot_def)
apply (wp get_pde_wp| wpc | clarsimp)+
apply (simp add:valid_vspace_objs_def)
apply (drule_tac x = "(lookup_pd_slot pd vptr && ~~ mask pd_bits)" in spec)
apply (erule impE)
apply (subst lookup_pd_slot_eq)
apply (clarsimp simp: obj_at_def)
apply (drule_tac p = pd in pspace_alignedD)
apply simp
apply (simp add:obj_bits_def pageBits_def pd_bits_def)
apply fastforce
apply (drule spec)
apply (erule(1) impE)
apply (clarsimp simp:)
apply (drule_tac x = "(ucast (lookup_pd_slot pd vptr && mask pd_bits >> 2))" in bspec)
apply (drule less_kernel_base_mapping_slots)
apply (clarsimp simp: obj_at_def)
apply (drule_tac p = pd in pspace_alignedD)
apply simp
apply (simp add:obj_bits_def pageBits_def pd_bits_def)
apply simp
apply (clarsimp simp: obj_at_def)
apply (drule_tac p = "(ptrFromPAddr x)" in pspace_alignedD)
apply simp
apply (drule arg_cong[where f = length])
apply (subst (asm) length_upto_enum_step)
apply (rule_tac sz = 6 in is_aligned_no_wrap'[rotated])
apply simp
apply (erule aligned_add_aligned)
apply (rule is_aligned_shiftl)
apply (rule is_aligned_andI1[OF is_aligned_shiftr])
apply simp
apply (simp add:word_bits_conv)
apply (simp add:word_bits_conv)
done
(* FIXME: move *)
lemma pd_bits: "pd_bits = 14"
by (simp add: pd_bits_def pageBits_def)
lemma word_shift_by_n:
"x * (2^n) = (x::'a::len word) << n"
by (simp add: shiftl_t2n)
lemma create_mapping_entries_valid_slots [wp]:
"\<lbrace>valid_arch_state and valid_vspace_objs and equal_kernel_mappings
and pspace_aligned and valid_global_objs
and \<exists>\<rhd> pd and page_directory_at pd and data_at sz (ptrFromPAddr base) and
K (vmsz_aligned base sz \<and> vmsz_aligned vptr sz \<and> vptr < kernel_base
\<and> vm_rights \<in> valid_vm_rights)\<rbrace>
create_mapping_entries base vptr sz vm_rights attrib pd
\<lbrace>\<lambda>m. valid_slots m\<rbrace>, -"
apply (cases sz)
apply (rule hoare_pre)
apply (wp lookup_pt_slot_inv | simp add: valid_slots_def)+
apply (clarsimp simp: vmsz_aligned_def pd_aligned pageBits_def)
apply (rule hoare_pre)
apply (simp add: valid_slots_def largePagePTE_offsets_def pd_bits_def)
apply (wpsimp wp: lookup_pt_slot_inv lookup_pt_slot_non_empty
| simp add: valid_slots_def ball_conj_distrib largePagePTE_offsets_def)+
apply (clarsimp simp: pd_aligned vmsz_aligned_def upto_enum_def upto_enum_step_def
is_aligned_weaken pageBits_def)
apply (clarsimp simp add: valid_slots_def)
apply (rule hoare_pre)
apply wp
apply (clarsimp simp: valid_slots_def)
apply (rule conjI)
apply (simp add: lookup_pd_slot_def Let_def)
apply (fastforce simp: pd_shifting pd_aligned)
apply (simp add: page_directory_pde_at_lookupI)
apply (erule less_kernel_base_mapping_slots)
apply (simp add: pd_aligned pd_bits)
apply simp
apply (clarsimp simp: superSectionPDE_offsets_def)
apply (rule hoare_pre)
apply (clarsimp simp add: valid_slots_def)
apply wp
apply simp
apply (elim conjE)
apply (thin_tac "vmsz_aligned base b" for b)
apply (subgoal_tac "is_aligned pd 14")
prefer 2
apply (clarsimp simp: pd_aligned)
apply (clarsimp simp: upto_enum_step_def word_shift_by_2)
apply (clarsimp simp: obj_at_def pde_at_def)
apply (subgoal_tac "is_aligned pd pd_bits")
prefer 2
apply (simp add: pd_bits)
apply (rule conjI, simp add: upto_enum_def)
apply (intro allI impI)
apply (subst less_kernel_base_mapping_slots_both,assumption+)
apply (simp add: word_leq_minus_one_le)
apply (simp add: pd_bits vmsz_aligned_def)
apply (frule (1) is_aligned_lookup_pd_slot
[OF _ is_aligned_weaken[of _ 14 6, simplified]])
apply (subgoal_tac "(p<<2) + lookup_pd_slot pd vptr && ~~ mask 14 = pd")
prefer 2
apply (subst add.commute add.left_commute)
apply (subst and_not_mask_twice[where n=6 and m=14, simplified, symmetric])
apply (subst is_aligned_add_helper[THEN conjunct2], simp)
apply (rule shiftl_less_t2n)
apply (rule word_less_sub_le[THEN iffD1], simp+)
apply (erule lookup_pd_slot_eq[simplified pd_bits])
apply (simp add: a_type_simps)
apply (subst add.commute)
apply (fastforce intro!: aligned_add_aligned is_aligned_shiftl_self)
done
lemma is_aligned_addrFromPPtr_n:
"\<lbrakk> is_aligned p n; n \<le> 28 \<rbrakk> \<Longrightarrow> is_aligned (Platform.ARM.addrFromPPtr p) n"
apply (simp add: Platform.ARM.addrFromPPtr_def)
apply (erule aligned_sub_aligned, simp_all)
apply (simp add: physMappingOffset_def physBase_def
kernelBase_addr_def pageBits_def)
apply (erule is_aligned_weaken[rotated])
apply (simp add: is_aligned_def)
done
lemma is_aligned_addrFromPPtr:
"is_aligned p pageBits \<Longrightarrow> is_aligned (Platform.ARM.addrFromPPtr p) pageBits"
by (simp add: is_aligned_addrFromPPtr_n pageBits_def)
lemma is_aligned_ptrFromPAddr_n:
"\<lbrakk>is_aligned x sz; sz\<le> 28\<rbrakk>
\<Longrightarrow> is_aligned (ptrFromPAddr x) sz"
apply (simp add:ptrFromPAddr_def physMappingOffset_def
kernelBase_addr_def physBase_def)
apply (erule aligned_add_aligned)
apply (erule is_aligned_weaken[rotated])
apply (simp add:is_aligned_def)
apply (simp add:word_bits_def)
done
lemma is_aligned_ptrFromPAddr:
"is_aligned p pageBits \<Longrightarrow> is_aligned (ptrFromPAddr p) pageBits"
by (simp add: is_aligned_ptrFromPAddr_n pageBits_def)
lemma pbfs_le_28[simp]:
"pageBitsForSize sz \<le> 28"
by (cases sz; simp)
lemma store_pde_lookup_pd:
"\<lbrace>\<exists>\<rhd> pd and page_directory_at pd and valid_vspace_objs
and (\<lambda>s. valid_asid_table (arm_asid_table (arch_state s)) s)\<rbrace>
store_pde p pde \<lbrace>\<lambda>_. \<exists>\<rhd> pd\<rbrace>"
apply (simp add: store_pde_def set_pd_def set_object_def)
apply (wp get_object_wp)
apply clarsimp
apply (clarsimp simp: obj_at_def)
apply (erule vs_lookupE)
apply (clarsimp simp: vs_asid_refs_def graph_of_def)
apply (drule rtranclD)
apply (erule disjE)
apply clarsimp
apply (rule exI)
apply (rule vs_lookup_atI)
apply simp
apply clarsimp
apply (frule (1) valid_asid_tableD)
apply (frule vs_lookup_atI)
apply (frule (2) stronger_vspace_objsD)
apply (clarsimp simp: obj_at_def a_type_def)
apply (case_tac ao, simp_all, clarsimp)
apply (drule tranclD)
apply clarsimp
apply (drule rtranclD)
apply (erule disjE)
apply clarsimp
apply (rule_tac x=ref in exI)
apply (rule vs_lookup_step)
apply (rule vs_lookup_atI)
apply simp
apply (clarsimp simp: vs_lookup1_def)
apply (clarsimp simp: obj_at_def vs_refs_def graph_of_def)
apply clarsimp
apply (drule (1) vs_lookup_step)
apply (frule (2) stronger_vspace_objsD)
apply clarsimp
apply (drule vs_lookup1D)
apply clarsimp
apply (erule obj_atE)+
apply (clarsimp simp: vs_refs_def graph_of_def)
apply (drule bspec, blast)
apply (erule obj_atE)+
apply clarsimp
apply (drule tranclD)
apply clarsimp
apply (drule rtranclD)
apply clarsimp
apply (drule vs_lookup1D)
apply clarsimp
apply (erule obj_atE)+
apply (clarsimp simp: vs_refs_def graph_of_def)
apply (erule_tac x=ab in ballE)
apply (case_tac "pdb ab", simp_all add: pde_ref_def split: if_split_asm)
apply (erule obj_atE)
apply clarsimp
apply (erule disjE)
apply (clarsimp simp: a_type_def)
apply clarsimp
apply (drule tranclD)
apply clarsimp
apply (drule vs_lookup1D)
apply clarsimp
apply (erule obj_atE)+
apply (clarsimp simp: vs_refs_def graph_of_def)
done
lemma store_pde_vspace_objs_unmap:
"\<lbrace>valid_vspace_objs
and valid_pde pde
and K (pde_ref pde = None)\<rbrace>
store_pde p pde \<lbrace>\<lambda>_. valid_vspace_objs\<rbrace>"
apply (simp add: store_pde_def)
apply (wp set_pd_vspace_objs_unmap)
apply clarsimp
apply (rule conjI)
apply clarsimp
apply (drule (1) valid_vspace_objsD, fastforce)
apply (simp add:)
apply (clarsimp simp add: obj_at_def vs_refs_def)
apply (rule pair_imageI)
apply (simp add: graph_of_def split: if_split_asm)
done
(* FIXME: remove magic numbers in other lemmas, use in pde_at_aligned_vptr et al *)
lemma lookup_pd_slot_add_eq:
"\<lbrakk> is_aligned pd pd_bits; is_aligned vptr 24; x \<in> set [0 , 4 .e. 0x3C] \<rbrakk>
\<Longrightarrow> (x + lookup_pd_slot pd vptr && ~~ mask pd_bits) = pd"
apply (simp add: pd_bits_def pageBits_def add.commute add.left_commute lookup_pd_slot_def Let_def)
apply (clarsimp simp: upto_enum_step_def word_shift_by_2)
apply (subst add_mask_lower_bits, assumption)
prefer 2
apply simp
apply clarsimp
subgoal premises prems for _ n'
proof -
have H: "(0xF::word32) < 2 ^ 4" by simp
from prems show ?thesis
apply (subst (asm) word_plus_and_or_coroll)
apply (rule word_eqI)
apply (thin_tac "is_aligned pd _")
apply (clarsimp simp: word_size nth_shiftl nth_shiftr is_aligned_nth)
subgoal for n
apply (spec "18 + n")
apply (frule test_bit_size[where n="18 + n"])
apply (simp add: word_size)
apply (insert H)[1]
apply (drule (1) order_le_less_trans)
apply (drule bang_is_le)
apply (drule_tac z="2 ^ 4" in order_le_less_trans, assumption)
by (drule word_power_increasing; simp?)
apply simp
apply (clarsimp simp: word_size nth_shiftl nth_shiftr is_aligned_nth)
apply (erule disjE)
apply (insert H)[1]
apply (drule (1) order_le_less_trans)
apply (drule bang_is_le)
apply (drule_tac z="2 ^ 4" in order_le_less_trans, assumption)
apply (drule word_power_increasing; simp?)
apply (spec "18 + n'")
apply (frule test_bit_size[where n="18 + n'"])
by (simp add: word_size)
qed
done
lemma lookup_pd_slot_add:
"\<lbrakk> page_directory_at pd s; pspace_aligned s; is_aligned vptr 24; x \<in> set [0 , 4 .e. 0x3C] \<rbrakk>
\<Longrightarrow> (x + lookup_pd_slot pd vptr && ~~ mask pd_bits) = pd"
apply (clarsimp simp: obj_at_def pspace_aligned_def)
apply (drule bspec, blast)
apply (clarsimp simp: pd_bits_def pageBits_def a_type_def
split: kernel_object.splits arch_kernel_obj.splits if_split_asm)
apply (drule (1) lookup_pd_slot_add_eq [rotated])
apply (simp add: pd_bits_def pageBits_def)
apply (simp add: pd_bits_def pageBits_def)
done
lemma vs_lookup_arch_update:
"arm_asid_table (f (arch_state s)) = arm_asid_table (arch_state s) \<Longrightarrow>
vs_lookup (arch_state_update f s) = vs_lookup s"
apply (rule order_antisym)
apply (rule vs_lookup_sub)
apply (clarsimp simp: obj_at_def)
apply simp
apply (rule vs_lookup_sub)
apply (clarsimp simp: obj_at_def)
apply simp
done
lemma vs_lookup_pages_arch_update:
"arm_asid_table (f (arch_state s)) = arm_asid_table (arch_state s) \<Longrightarrow>
vs_lookup_pages (arch_state_update f s) = vs_lookup_pages s"
apply (rule order_antisym)
apply (rule vs_lookup_pages_sub)
apply (clarsimp simp: obj_at_def)
apply simp
apply (rule vs_lookup_pages_sub)
apply (clarsimp simp: obj_at_def)
apply simp
done
lemma vs_lookup_asid_map [iff]:
"vs_lookup (s\<lparr>arch_state := arm_asid_map_update f (arch_state s)\<rparr>) = vs_lookup s"
by (simp add: vs_lookup_arch_update)
lemma vs_lookup_hwasid_table [iff]:
"vs_lookup (s\<lparr>arch_state := arm_hwasid_table_update f (arch_state s)\<rparr>) = vs_lookup s"
by (simp add: vs_lookup_arch_update)
lemma vs_lookup_next_asid [iff]:
"vs_lookup (s\<lparr>arch_state := arm_next_asid_update f (arch_state s)\<rparr>) = vs_lookup s"
by (simp add: vs_lookup_arch_update)
lemma vs_lookup_pages_asid_map[iff]:
"vs_lookup_pages (s\<lparr>arch_state := arm_asid_map_update f (arch_state s)\<rparr>) =
vs_lookup_pages s"
by (simp add: vs_lookup_pages_arch_update)
lemma vs_lookup_pages_hwasid_table[iff]:
"vs_lookup_pages (s\<lparr>arch_state := arm_hwasid_table_update f (arch_state s)\<rparr>) =
vs_lookup_pages s"
by (simp add: vs_lookup_pages_arch_update)
lemma vs_lookup_pages_next_asid[iff]:
"vs_lookup_pages (s\<lparr>arch_state := arm_next_asid_update f (arch_state s)\<rparr>) =
vs_lookup_pages s"
by (simp add: vs_lookup_pages_arch_update)
lemma valid_vspace_objs_arch_update:
"arm_asid_table (f (arch_state s)) = arm_asid_table (arch_state s) \<Longrightarrow>
valid_vspace_objs (arch_state_update f s) = valid_vspace_objs s"
apply (rule iffI)
apply (erule valid_vspace_objs_stateI)
apply (clarsimp simp: obj_at_def)
apply simp
apply simp
apply (erule valid_vspace_objs_stateI)
apply (clarsimp simp: obj_at_def)
apply simp
apply simp
done
lemma store_pte_valid_vspace_objs[wp]:
"\<lbrace>valid_vspace_objs and valid_pte pte\<rbrace>
store_pte p pte
\<lbrace>\<lambda>_. (valid_vspace_objs)\<rbrace>"
unfolding store_pte_def
apply wp
apply clarsimp
apply (unfold valid_vspace_objs_def)
apply (erule_tac x="p && ~~ mask pt_bits" in allE)
apply auto
done
crunch valid_arch [wp]: store_pte valid_arch_state
lemma set_pd_vs_lookup_unmap:
"\<lbrace>valid_vs_lookup and
obj_at (\<lambda>ko. vs_refs_pages (ArchObj (PageDirectory pd)) \<subseteq> vs_refs_pages ko) p\<rbrace>
set_pd p pd
\<lbrace>\<lambda>_. valid_vs_lookup\<rbrace>"
apply (simp add: valid_vs_lookup_def pred_conj_def)
apply (rule hoare_lift_Pf2 [where f=caps_of_state])
prefer 2
apply wp
apply (simp add: set_pd_def set_object_def)
apply (wp get_object_wp)
apply (clarsimp simp del: fun_upd_apply del: disjCI
split: kernel_object.splits arch_kernel_obj.splits)
apply (erule allE)+
apply (erule impE)
apply (erule vs_lookup_pages_stateI)
apply (clarsimp simp: obj_at_def split: if_split_asm)
apply simp
apply simp
done
lemma unique_table_caps_pdE:
"\<lbrakk> unique_table_caps cs; cs p = Some cap; cap_asid cap = None;
cs p' = Some cap'; cap_asid cap' = Some v; is_pd_cap cap;
is_pd_cap cap'; obj_refs cap' = obj_refs cap \<rbrakk>
\<Longrightarrow> P"
apply (frule(6) unique_table_caps_pdD[where cs=cs])
apply simp
done
lemma set_pd_table_caps [wp]:
"\<lbrace>valid_table_caps and (\<lambda>s.
(obj_at (empty_table (set (second_level_tables (arch_state s)))) p s \<longrightarrow>
empty_table (set (second_level_tables (arch_state s))) (ArchObj (PageDirectory pd))) \<or>
(\<exists>slot cap. caps_of_state s slot = Some cap \<and> is_pd_cap cap \<and> p \<in> obj_refs cap \<and> cap_asid cap \<noteq> None) \<and>
valid_caps (caps_of_state s) s \<and>
unique_table_caps (caps_of_state s))\<rbrace>
set_pd p pd
\<lbrace>\<lambda>_. valid_table_caps\<rbrace>"
unfolding valid_table_caps_def
apply (simp add: pred_conj_def
del: split_paired_All split_paired_Ex imp_disjL)
apply (rule hoare_lift_Pf2 [where f=caps_of_state])
prefer 2
apply wp
apply (unfold set_pd_def set_object_def)
apply (wp get_object_wp)
apply (rule allI, intro impI)
apply (elim exE conjE)
apply (elim allEI)
apply (intro impI, simp)
apply (clarsimp simp: obj_at_def)
apply (erule disjE)
apply (erule(6) unique_table_caps_pdE)
apply (clarsimp simp: is_arch_cap_simps)
apply (simp add: valid_caps_def)
apply (erule_tac x=a in allE, erule allE, erule allE, erule (1) impE)
apply (clarsimp simp: is_arch_cap_simps valid_cap_def)
apply (clarsimp simp: obj_at_def)
done
lemma set_pd_global_objs[wp]:
"\<lbrace>valid_global_objs and valid_global_refs and
valid_arch_state and
(\<lambda>s. (obj_at (empty_table (set (second_level_tables (arch_state s)))) p s
\<longrightarrow> empty_table (set (second_level_tables (arch_state s)))
(ArchObj (PageDirectory pd)))
\<or> (\<exists>slot. cte_wp_at (\<lambda>cap. p \<in> obj_refs cap) slot s))\<rbrace>
set_pd p pd \<lbrace>\<lambda>rv. valid_global_objs\<rbrace>"
apply (simp add: set_pd_def second_level_tables_def)
apply (wpsimp wp: set_object_wp_strong)
apply (clarsimp simp add: valid_global_objs_def valid_vso_at_def
cte_wp_at_caps_of_state second_level_tables_def)
apply (intro conjI)
apply (clarsimp simp: obj_at_def
simp del: valid_vspace_obj.simps)
apply (intro conjI impI)
apply (clarsimp simp del: valid_vspace_obj.simps)
apply (erule disjE)
apply (drule(1) empty_table_is_valid)+
apply (rule valid_vspace_obj_same_type, (simp add: valid_vspace_obj_def)+)
apply (clarsimp simp: a_type_def)
apply (drule (1) valid_global_refsD2)
apply (simp add: cap_range_def global_refs_def)
apply (rule valid_vspace_obj_same_type, simp+)
apply (simp add: a_type_def)
apply (clarsimp simp: obj_at_def)
apply (drule (1) valid_global_refsD2)
apply (simp add: cap_range_def global_refs_def)
apply clarsimp
apply (clarsimp simp: obj_at_def
simp del: valid_vspace_obj.simps)
apply (drule(1) bspec, clarsimp simp: a_type_def)
done
lemma eq_ucast_word12[simp]:
"((ucast (x :: 12 word) :: word32) = ucast y) = (x = y)"
apply safe
apply (drule_tac f="ucast :: (word32 \<Rightarrow> 12 word)" in arg_cong)
apply (simp add: ucast_up_ucast_id is_up_def
source_size_def target_size_def word_size)
done
lemma set_pd_unmap_mappings:
"\<lbrace>valid_kernel_mappings and
obj_at (\<lambda>ko. vs_refs (ArchObj (PageDirectory pd)) \<subseteq> vs_refs ko) p
and obj_at (\<lambda>ko. \<exists>pd'. ko = ArchObj (PageDirectory pd')
\<and> (\<forall>x \<in> kernel_mapping_slots. pd x = pd' x)) p\<rbrace>
set_pd p pd
\<lbrace>\<lambda>rv. valid_kernel_mappings\<rbrace>"
apply (simp add: set_pd_def)
apply (wp set_object_v_ker_map get_object_wp)
apply (clarsimp simp: obj_at_def
split: kernel_object.split_asm
arch_kernel_obj.split_asm)
apply (simp add: vs_refs_def)
subgoal premises prems for s x r x3
apply (cases "x \<in> kernel_mapping_slots")
proof goal_cases
case False
with prems show ?thesis
apply -
apply (drule subsetD)
apply (rule image_eqI[rotated])
apply (rule pde_graph_ofI[rotated, rotated])
apply ((simp;fail)+)[4]
apply (clarsimp simp: valid_kernel_mappings_def
dest!: graph_ofD)
apply (drule bspec, erule ranI)
by (simp add: valid_kernel_mappings_if_pd_def)
next
case True
with prems show ?thesis
apply clarsimp
apply (bspec x)
apply (clarsimp simp: valid_kernel_mappings_def ran_def valid_kernel_mappings_if_pd_def)
apply (erule allE[where x="ArchObj (PageDirectory x3)"])
apply clarsimp
apply (erule impE)
apply (erule exI[where x=p])
apply (erule allE[where x=x], erule allE[where x=r])
by clarsimp+
qed
done
lemma set_pd_asid_map [wp]:
"\<lbrace>valid_asid_map\<rbrace> set_pd p pd \<lbrace>\<lambda>_. valid_asid_map\<rbrace>"
apply (simp add: set_pd_def set_object_def)
apply (wp get_object_wp)
apply (clarsimp simp: a_type_def simp del: fun_upd_apply
split: kernel_object.splits
arch_kernel_obj.splits)
apply (clarsimp simp: valid_asid_map_def)
apply (drule bspec, blast)
apply (clarsimp simp: vspace_at_asid_def obj_at_def)
apply (erule vs_lookupE)
apply (rule vs_lookupI, simp)
apply (clarsimp simp: vs_asid_refs_def dest!: graph_ofD)
apply (frule vs_lookup1_trans_is_append)
apply clarsimp
apply (drule rtranclD)
apply clarsimp
apply (drule tranclD)
apply clarsimp
apply (drule vs_lookup1D)
apply clarsimp
apply (rule rtrancl_trans)
apply (rule r_into_rtrancl)
apply (rule vs_lookup1I)
apply (clarsimp simp: obj_at_def)
apply (rule conjI, clarsimp)
prefer 2
apply clarsimp
apply (rule refl)
apply clarsimp
apply (clarsimp simp: vs_refs_def)
apply (drule vs_lookup1_trans_is_append)
apply clarsimp
apply assumption
apply (rule refl)
apply (frule vs_lookup1_trans_is_append, clarsimp)
apply (drule rtranclD)
apply (erule disjE, clarsimp)
apply clarsimp
apply (drule tranclD)
apply clarsimp
apply (drule vs_lookup1D)
apply clarsimp
apply (drule vs_lookup1_trans_is_append, clarsimp)
done
lemma set_pd_only_idle [wp]:
"\<lbrace>only_idle\<rbrace> set_pd p pd \<lbrace>\<lambda>_. only_idle\<rbrace>"
by (wp only_idle_lift)
lemma set_pd_equal_kernel_mappings_triv:
"\<lbrace>obj_at (\<lambda>ko. \<exists>pd'. ko = (ArchObj (PageDirectory pd'))
\<and> (\<forall>x \<in> kernel_mapping_slots. pd x = pd' x)) p
and equal_kernel_mappings\<rbrace>
set_pd p pd
\<lbrace>\<lambda>rv. equal_kernel_mappings\<rbrace>"
apply (simp add: set_pd_def)
apply (wp set_object_equal_mappings get_object_wp)
apply (clarsimp simp: obj_at_def)
apply (simp add: equal_kernel_mappings_def obj_at_def)
done
lemma set_pd_global_mappings[wp]:
"\<lbrace>\<lambda>s. valid_global_vspace_mappings s \<and> valid_global_objs s
\<and> p \<notin> global_refs s\<rbrace>
set_pd p pd
\<lbrace>\<lambda>rv. valid_global_vspace_mappings\<rbrace>"
apply (simp add: set_pd_def)
apply (wp set_object_global_vspace_mappings get_object_wp)
apply simp
done
lemma set_pd_kernel_window[wp]:
"\<lbrace>pspace_in_kernel_window\<rbrace> set_pd p pd \<lbrace>\<lambda>rv. pspace_in_kernel_window\<rbrace>"
apply (simp add: set_pd_def)
including unfold_objects
apply (wpsimp wp: set_object_pspace_in_kernel_window[THEN hoare_set_object_weaken_pre])
done
lemma set_pd_device_region[wp]:
"\<lbrace>pspace_respects_device_region\<rbrace> set_pd p pd \<lbrace>\<lambda>rv. pspace_respects_device_region\<rbrace>"
apply (simp add: set_pd_def)
including unfold_objects
apply (wpsimp wp: set_object_pspace_respects_device_region[THEN hoare_set_object_weaken_pre])
done
lemma set_pd_caps_kernel_window[wp]:
"\<lbrace>cap_refs_in_kernel_window\<rbrace> set_pd p pd \<lbrace>\<lambda>rv. cap_refs_in_kernel_window\<rbrace>"
apply (simp add: set_pd_def)
including unfold_objects
apply (wpsimp wp: set_object_cap_refs_in_kernel_window[THEN hoare_set_object_weaken_pre]
simp: a_type_def)
done
lemma set_pd_caps_respects_device_region[wp]:
"\<lbrace>cap_refs_respects_device_region\<rbrace> set_pd p pd \<lbrace>\<lambda>rv. cap_refs_respects_device_region\<rbrace>"
apply (simp add: set_pd_def)
including unfold_objects
apply (wpsimp wp: set_object_cap_refs_respects_device_region[THEN hoare_set_object_weaken_pre]
simp: a_type_def)
done
lemma set_pd_valid_ioc[wp]:
"\<lbrace>valid_ioc\<rbrace> set_pd p pt \<lbrace>\<lambda>_. valid_ioc\<rbrace>"
apply (simp add: set_pd_def)
including unfold_objects
apply (wpsimp wp: set_object_valid_ioc_no_caps[THEN hoare_set_object_weaken_pre]
simp: a_type_def is_tcb is_cap_table)
done
lemma set_pd_vms[wp]:
"\<lbrace>valid_machine_state\<rbrace> set_pd p pt \<lbrace>\<lambda>_. valid_machine_state\<rbrace>"
apply (simp add: set_pd_def)
apply (wp set_object_wp_strong)
apply clarify
apply (erule valid_machine_state_heap_updI)
apply (fastforce simp: a_type_def obj_at_def
split: Structures_A.kernel_object.splits arch_kernel_obj.splits)+
done
(* FIXME: Move to Invariants_A *)
lemma vs_refs_pages_subset: "vs_refs ko \<subseteq> vs_refs_pages ko"
apply (clarsimp simp: vs_refs_pages_def vs_refs_def graph_of_def pde_ref_def pde_ref_pages_def
split: kernel_object.splits arch_kernel_obj.splits pde.splits)
subgoal for "fun" a b
using
imageI[where A="{(x, y). (if x \<in> kernel_mapping_slots then None else pde_ref_pages (fun x)) = Some y}"
and f="(\<lambda>(r, y). (VSRef (ucast r) (Some APageDirectory), y))" and x="(a,b)"]
by (clarsimp simp: pde_ref_def pde_ref_pages_def split: if_splits pde.splits)+
done
lemma vs_refs_pages_subset2:
"\<lbrakk>vs_refs_pages ko \<subseteq> vs_refs_pages ko';
(\<forall>ao. (ko = ArchObj ao) \<longrightarrow> valid_vspace_obj ao s);
(\<forall>ao'. (ko' = ArchObj ao') \<longrightarrow> valid_vspace_obj ao' s)\<rbrakk>
\<Longrightarrow> vs_refs ko \<subseteq> vs_refs ko'"
apply clarsimp
apply (drule (1) subsetD[OF _ subsetD[OF vs_refs_pages_subset]])
apply (case_tac ko; simp add: vs_refs_def)
subgoal for fstref b arch_kernel_obj
apply (cases arch_kernel_obj; simp add: vs_refs_def)
apply (cases ko'; simp add: vs_refs_pages_def)
subgoal for \<dots> arch_kernel_obja
by (cases arch_kernel_obja;clarsimp)
apply (cases ko'; simp add: vs_refs_pages_def)
subgoal for \<dots> arch_kernel_obja
apply (cases arch_kernel_obja; clarsimp)
apply (clarsimp simp: graph_of_def split: if_splits)
subgoal for "fun" a
apply (cut_tac
imageI[where
A="{(x, y). (if x \<in> kernel_mapping_slots then None else pde_ref (fun x)) = Some y}"
and f="(\<lambda>(r, y). (VSRef (ucast r) (Some APageDirectory), y))" and x="(a,b)"])
apply simp
apply (clarsimp simp: pde_ref_def pde_ref_pages_def
split: pde.splits)
apply (drule bspec,simp)+
apply (simp add: valid_pde_def)
apply (clarsimp simp: data_at_def obj_at_def a_type_def)
apply (drule bspec, simp split: if_splits)+
by (clarsimp simp: obj_at_def a_type_def data_at_def)
done
done
done
lemma set_pd_invs_unmap:
"\<lbrace>invs and (\<lambda>s. \<forall>i. wellformed_pde (pd i)) and
(\<lambda>s. (\<exists>\<rhd>p) s \<longrightarrow> valid_vspace_obj (PageDirectory pd) s) and
obj_at (\<lambda>ko. vs_refs_pages (ArchObj (PageDirectory pd)) \<subseteq> vs_refs_pages ko) p and
obj_at (\<lambda>ko. vs_refs (ArchObj (PageDirectory pd)) \<subseteq> vs_refs ko) p and
obj_at (\<lambda>ko. \<exists>pd'. ko = ArchObj (PageDirectory pd')
\<and> (\<forall>x \<in> kernel_mapping_slots. pd x = pd' x)) p and
(\<lambda>s. p \<notin> global_refs s) and
(\<lambda>s. (obj_at (empty_table (set (second_level_tables (arch_state s)))) p s \<longrightarrow>
empty_table (set (second_level_tables (arch_state s))) (ArchObj (PageDirectory pd))))\<rbrace>
set_pd p pd
\<lbrace>\<lambda>_. invs\<rbrace>"
apply (simp add: invs_def valid_state_def valid_pspace_def valid_arch_caps_def)
apply (rule hoare_pre)
apply (wp set_pd_valid_objs set_pd_iflive set_pd_zombies
set_pd_zombies_state_refs set_pd_valid_mdb
set_pd_valid_idle set_pd_ifunsafe set_pd_reply_caps
set_pd_valid_arch set_pd_valid_global set_pd_cur
set_pd_reply_masters valid_irq_node_typ set_pd_zombies_state_hyp_refs
set_pd_vspace_objs_unmap set_pd_vs_lookup_unmap
valid_irq_handlers_lift
set_pd_unmap_mappings set_pd_equal_kernel_mappings_triv)
apply (clarsimp simp: cte_wp_at_caps_of_state valid_arch_caps_def
del: disjCI)
done
lemma store_pde_invs_unmap:
"\<lbrace>invs and valid_pde pde and (\<lambda>s. wellformed_pde pde)
and K (ucast (p && mask pd_bits >> 2) \<notin> kernel_mapping_slots)
and (\<lambda>s. p && ~~ mask pd_bits \<notin> global_refs s)
and K (pde = InvalidPDE)\<rbrace>
store_pde p pde \<lbrace>\<lambda>_. invs\<rbrace>"
apply (simp add: store_pde_def del: split_paired_Ex)
apply (wp set_pd_invs_unmap)
apply (clarsimp simp del: split_paired_Ex del: exE)
apply (rule conjI)
apply (drule invs_valid_objs)
apply (fastforce simp: valid_objs_def dom_def obj_at_def valid_obj_def)
apply (rule conjI)
apply clarsimp
apply (drule (1) valid_vspace_objsD, fastforce)
apply simp
apply (rule conjI)
apply (clarsimp intro!: pair_imageI
simp: obj_at_def vs_refs_def vs_refs_pages_def map_conv_upd graph_of_def pde_ref_def pde_ref_pages_def
split: if_split_asm)+
apply (clarsimp simp: empty_table_def)
apply (cases pde, (auto simp: pde_ref_def valid_pde_mappings_def split:if_split_asm))
done
lemma store_pde_state_refs_of:
"\<lbrace>\<lambda>s. P (state_refs_of s)\<rbrace> store_pde ptr val \<lbrace>\<lambda>rv s. P (state_refs_of s)\<rbrace>"
apply (simp add: store_pde_def set_pd_def set_object_def)
apply (wp get_object_wp)
apply (clarsimp elim!: rsubst[where P=P] intro!: ext)
apply (clarsimp simp: state_refs_of_def obj_at_def)
done
lemma store_pde_state_hyp_refs_of:
"\<lbrace>\<lambda>s. P (state_hyp_refs_of s)\<rbrace> store_pde ptr val \<lbrace>\<lambda>rv s. P (state_hyp_refs_of s)\<rbrace>"
apply (simp add: store_pde_def set_pd_def set_object_def)
apply (wp get_object_wp)
apply (clarsimp elim!: rsubst[where P=P] intro!: ext)
apply (clarsimp simp: state_hyp_refs_of_def obj_at_def)
done
lemma valid_asid_map_next_asid [iff]:
"valid_asid_map (s\<lparr>arch_state := arm_next_asid_update f (arch_state s)\<rparr>) =
valid_asid_map s"
by (simp add: valid_asid_map_def vspace_at_asid_def)
lemma pspace_respects_device_region_dmo:
assumes valid_f: "\<And>P. \<lbrace>\<lambda>ms. P (device_state ms)\<rbrace> f \<lbrace>\<lambda>r ms. P (device_state ms)\<rbrace>"
shows "\<lbrace>pspace_respects_device_region\<rbrace>do_machine_op f\<lbrace>\<lambda>r. pspace_respects_device_region\<rbrace>"
apply (clarsimp simp: do_machine_op_def gets_def select_f_def simpler_modify_def bind_def valid_def
get_def return_def)
apply (drule_tac P1 = "(=) (device_state (machine_state s))" in use_valid[OF _ valid_f])
apply auto
done
lemma cap_refs_respects_device_region_dmo:
assumes valid_f: "\<And>P. \<lbrace>\<lambda>ms. P (device_state ms)\<rbrace> f \<lbrace>\<lambda>r ms. P (device_state ms)\<rbrace>"
shows "\<lbrace>cap_refs_respects_device_region\<rbrace>do_machine_op f\<lbrace>\<lambda>r. cap_refs_respects_device_region\<rbrace>"
apply (clarsimp simp: do_machine_op_def gets_def select_f_def simpler_modify_def bind_def valid_def
get_def return_def)
apply (drule_tac P1 = "(=) (device_state (machine_state s))" in use_valid[OF _ valid_f])
apply auto
done
lemma machine_op_lift_device_state[wp]:
"\<lbrace>\<lambda>ms. P (device_state ms)\<rbrace> machine_op_lift f \<lbrace>\<lambda>_ ms. P (device_state ms)\<rbrace>"
by (clarsimp simp: machine_op_lift_def NonDetMonad.valid_def bind_def
machine_rest_lift_def gets_def simpler_modify_def get_def return_def
select_def ignore_failure_def select_f_def
split: if_splits)
crunches invalidateLocalTLB_ASID, invalidateLocalTLB_VAASID, setHardwareASID, isb, dsb,
set_current_pd, storeWord, cleanByVA_PoU, cleanL2Range
for device_state_inv[wp]: "\<lambda>ms. P (device_state ms)"
(simp: writeTTBR0_def
ignore_del: invalidateLocalTLB_ASID invalidateLocalTLB_VAASID setHardwareASID isb
dsb storeWord cleanByVA_PoU cleanL2Range)
lemma as_user_inv:
assumes x: "\<And>P. \<lbrace>P\<rbrace> f \<lbrace>\<lambda>x. P\<rbrace>"
shows "\<lbrace>P\<rbrace> as_user t f \<lbrace>\<lambda>x. P\<rbrace>"
proof -
have P: "\<And>a b input. (a, b) \<in> fst (f input) \<Longrightarrow> b = input"
by (rule use_valid [OF _ x], assumption, rule refl)
have Q: "\<And>s ps. ps (kheap s) = kheap s \<Longrightarrow> kheap_update ps s = s"
by simp
show ?thesis
apply (simp add: as_user_def gets_the_def assert_opt_def set_object_def get_object_def split_def)
apply wp
apply (clarsimp dest!: P)
apply (subst Q)
prefer 2
apply assumption
apply (rule ext)
apply (simp add: get_tcb_def)
apply (case_tac "kheap s t"; simp)
apply (case_tac a; simp)
apply (clarsimp simp: arch_tcb_context_set_def arch_tcb_context_get_def)
done
qed
lemma user_getreg_inv[wp]:
"\<lbrace>P\<rbrace> as_user t (getRegister r) \<lbrace>\<lambda>x. P\<rbrace>"
apply (rule as_user_inv)
apply (simp add: getRegister_def)
done
end
end
|
```python
from sympy import symbols, init_printing, Function, Sum, Eq, Matrix,cos, sin, pi, I, exp
#Paper T.Lubin 2010b
#Machine with no load rotor, rotor slots, current sheet on stator side
init_printing()
R_1, R_2, R_3, beta, mu_0 = symbols('R_1, R_2, R_3, beta, mu_0', real = 'true', positive = 'true', nonzero ='true')
theta_i = symbols('theta_i')
#Declaration of the motor geometry
Z_r = symbols('Z_r', integer = 'true', positive = 'true', nonzero ='true') #Number of rotor, stator slots
#Declaration of the space variables
r, t = symbols('r t', real = 'true', positive = 'true')
theta = symbols('theta', real ='true')
#Declaration of the discretizing integers for stator and rotor slots
i = symbols('i', integer='true', positive = 'true', nonzero='true')
#Declaration of th magnetic potentials in the 5 areas
P = Function("P")
E = Function("E")
```
```python
##AREA I : AIR GAP
#Dummy variable(s) of summation
n, N, k, K = symbols('n N k K', integer = 'true', positive = 'true', nonzero ='true')
#Integration constants
A_I0, A_In, B_In, C_In, D_In = symbols('A_I0, A_In, B_In, C_In, D_In', commutative=False)
#Expression of the potential
AzI_cst = A_I0
AzI_exp = A_In*R_2/n*P(n, r, R_3)/E(n, R_2, R_3) - B_In*R_3/n*P(n, r, R_2)/E(n, R_2, R_3)
expn = exp(I*(n*theta + k*t))
AzI = AzI_cst + Sum(Sum(AzI_exp*expn,(n,1,N)), (k,1,K))
#Expression of the field
#BrI_cst, BrI_cos, BrI_sin = compute_Br(AzI_cst, AzI_cos, AzI_sin, n, r, theta)
#BrI = BrI_cst + Sum(BrI_cos*cosn+BrI_sin*sinn,(n,1,N))
#BthetaI_cst, BthetaI_cos, BthetaI_sin = compute_Btheta(AzI_cst, AzI_cos, AzI_sin, r)
#BthetaI = BthetaI_cst + Sum(BthetaI_cos*cosn+BthetaI_sin*sinn,(n,1,N))
fAzI = Function('Az_I')(r,theta,t)
fBrI = Function('Br_I')(r,theta,t)
fBthetaI = Function('Btheta_I')(r,theta)
Eq(fAzI, AzI) #, Eq(fBrI, BrI), Eq(fBthetaI, BthetaI)
```
```python
##AREA i : ROTOR SLOT
#Dummy variable(s) of summation
k, K = symbols('k, K', integer = 'true', nonzero = 'true')
#Integration constants
A_i0, A_ik = symbols('A_i0, A_ik', commutative=False)
#Expression of the potential
Azi_cst = A_i0
Azi_cos = A_ik*P(k*pi/beta, R_1, r)/P(k*pi/beta, R_1, R_2)
Azi_sin = 0
coski = cos(k*pi/beta*(theta-theta_i+beta/2))
sinki = sin(k*pi/beta*(theta-theta_i+beta/2))
Azi = Azi_cst + Sum(Azi_cos*coski,(k,1,K))
#Expression of the field
Bri_cst, Bri_cos, Bri_sin = compute_Br(Azi_cst, Azi_cos, Azi_sin, k*pi/beta, r, theta)
Bri = Bri_cst + Sum(Bri_cos*coski+Bri_sin*sinki,(k,1,K))
Bthetai_cst, Bthetai_cos, Bthetai_sin = compute_Btheta(Azi_cst, Azi_cos, Azi_sin, r)
Bthetai = Bthetai_cst + Sum(Bthetai_cos*coski+Bthetai_sin*sinki,(k,1,K))
fAzi = Function('Az_i')(r,theta)
fBri = Function('Br_i')(r,theta)
fBthetai = Function('Btheta_i')(r,theta)
```
```python
Potentials = Matrix([Eq(fAzI, AzI), Eq(fAzi, Azi)])
Fields = Matrix([Eq(fBrI, BrI), Eq(fBthetaI, BthetaI), Eq(fBri, Bri), Eq(fBthetai, Bthetai)])
#Current sheet
p, m, M = symbols('p, m, M', integer = 'true', nonzero = 'true')
fK = Function('K')(theta)
K_m, alpha = symbols('K_m, alpha')
K_cos = K_m
cosm = cos(m*p(theta-alpha))
K = Sum(K_cos*cosm, (m,1,M))
## RESULTING EQUATIONS
Csts = Matrix([A_In, B_In, C_In, D_In, A_ik])
var = [n, n, n, n, (k, i)]
##General integrals to compute
fI_cosni, fI_sinni = symbols('I_cosni, I_sinni', commutative = False)
fI_cosksinni, fI_coskcosni = symbols('I_cosksinni, I_coskcosni', commutative = False)
##CONDITION A.11 = A.9
A_11 = Eq(BthetaI_cos.subs(r, R_2), 1/pi*(Bthetai_cst.subs(r, R_2)*fI_cosni +Bthetai_cos.subs(r, R_2)*fI_coskcosni))
##CONDITION A.7
A_7 = Eq(B_In, mu_0*K_m*cos(m*p*alpha))
##CONDITION A.12 = A.10
A_12 = Eq(BthetaI_sin.subs(r, R_2), 1/pi*(Bthetai_cst.subs(r, R_2)*fI_sinni +Bthetai_cos.subs(r, R_2)*fI_cosksinni))
##CONDITION A.8
A_8 = Eq(D_In, mu_0*K_m*sin(m*p*alpha))
##CONDITION A.13
A_13 = Eq(A_ik, 2/beta*((A_In*R_2/n*P(n, R_2, R_3)/E(n, R_2, R_3) + B_In*R_3/n*2/E(n, R_3, R_2))*fI_coskcosni + (C_In*R_2/n*P(n, R_2, R_3)/E(n, R_2, R_3) + D_In*R_3/n*2/E(n, R_3, R_2))*fI_cosksinni))
A_13bis = Eq(Azi_cos.subs(r, R_2), 2/beta*(AzI_cos.subs(r, R_2)*fI_coskcosni + AzI_sin.subs(r, R_2)*fI_cosksinni))
SetEqs = Matrix([A_11, A_7, A_12, A_8, A_13])
Mat, Vect, Index = get_System(var, var, Csts, SetEqs)
#I_coskcosni = computeInt_coscos(k*pi/beta, -theta_i + beta/2, n, 0, theta_i - beta/2, theta_i + beta/2)
#I_cosksinni = computeInt_cossin(k*pi/beta, -theta_i + beta/2, n, 0, theta_i - beta/2, theta_i + beta/2)
#I_coskcosni = computeInt_coscos(k*pi/beta, -theta_i, n, 0, theta_i, theta_i + beta)
#I_cosksinni = computeInt_cossin(k*pi/beta, -theta_i, n, 0, theta_i, theta_i + beta)
#def P(n,x,y) :
#
# return (x/y)**n + (y/x)**n
#
#def E(n,x,y) :
#
# return (x/y)**n - (y/x)**n
#
#P_n_R2_R3 = P(n, R_2, R_3)
#E_n_R2_R3 = E(n, R_2, R_3)
#E_n_R3_R2 = E(n, R_3, R_2)
#E_k_R1_R2 = E(k*pi/beta, R_1, R_2)
#P_k_R1_R2 = P(k*pi/beta, R_1, R_2)
#Current sheet Fourier series expansion
#I1 = computeInt_coscos(m*p, -alpha, n, 0, 0,2*pi)
#I2 = computeInt_coscos(m*p, -alpha, m*p, 0, 0,2*pi)
```
|
Trujillo was officially dictator only from 1930 to 1938 , and from 1942 to 1952 , but remained in effective power throughout the entire period . Though his regime was broadly nationalist , Daniel <unk> comments that he had " no particular ideology " and that his economic and social policies were basically progressive .
|
What Impact Will a 9% Drop in Profits Have On Your Organization?
Unless your Supply Management organization takes it to the next level, your company is facing a 9% drop in corporate profits this year due to rising prices and other inflationary pressure according to a recent Hackett Group study. For a typical Global 1000 company with 27.8 billion in revenue, Hackett’s study estimated that commodity and offshore labor inflation will drive a 150 million per year hit to the bottom line. Ouch!
Why? While most companies are now able to effectively anticipate commodity price increases, more than 60% of companies surveyed by Hackett in the recent study have not been successful at mitigating these cost increases. The reality is that few executives have experienced significant inflation, which is now at levels not seen in 30 years (when inflation rates hovered around 13% back in 1981).
And while inflation may not yet be at 13%, it is bad. Not only do respondents to the Hackett study expect the rate of inflation for commodities overall to rise by more than 30%, to 6.3% a year, but commodity price volatility has increased nearly 60% since before the recession. Making matters worse, at the same time, due to the talent crunch, the rate for internal labor is expected to more than triple from 0.7% to 2.2% and the rate of inflation for external labor is expected to more than double from 1.2% to 3%.
This entry was posted in Analyst, Economics on June 30, 2011 by thedoctor.
Regular readers of SI will know the importance of good should-cost modeling (which is also great for negotiations) as well as good market intelligence (which has dimensions and is valuable in a down economy) in cost reduction and avoidance. And while both should-cost modelling and market intelligence have a number of critical requirements that must be met for success, they both have one key requirement in common — good data. But where do you get good data? Certainly not from supplier bids! A new supplier is going to bid what it thinks it can get, not what the actual price is. Market indices from governments and professional associations? Better, but they will typically be at least a month or so behind. Trade associations that track and monitor prices on a daily basis or stock markets that trade the commodity? Great — but do you have the IT skills to integrate the feeds? And are you going to do it for the dozens of raw materials and commodities you need to build your should cost models?
To help their clients understand the data, Mintec provides a data analysis package, called Datagain, which can be used to import, graph, analyze, and compare different line items (such as the petrol price in the UK and the petrol price in Australia, normalized to US dollars). A user can graph any set of series, against any frequency, using any (currency and unit) converions, for any date range she chooses. She can also normalize or index this data using a custom formula, factor in seasonality, and plot trends. She can also break the series down across two or four graphs and/or plot specific subseries, against different projections, to see how the price might trend over time under different assumptions. The normalization / indexing equations can use all of the standard algebraic operators and be defined over any set of variables, including user defined variables, that the user chooses.
If the user is not sophisticated at trend analysis, or does not want to do it, Mintec also offers Benchmarking and Market (Intelligence) Report services that do a deep dive into a particular raw material, commodity, or service that discuss recent, current, and projected pricing subject to the state of the market and the dominant factors at play. These, by request, reports complement the monthly market reports and commodity fact sheets that track the major commodities and markets and their relative month-over-month changes for buyers who want to look at the bigger picture. If the user wants to learn more about Datagain, analysis, and should-cost modeling, Mintec also provides on-demand out-of-the-box and customized training sessions as well as quarterly newsletters and occasional articles.
It’s a huge amount of data, that comes at a very low price point. Most customers pay less than 100K £s for access to the data they need, when they need it, updated as often as they like. Moreover, medium-sized business can get basic access (to the datafeeds) and access to the desktop Datagain tool for as little as 10K £s a year. Large enterprises will probably want the on-line hosted applet version (at the higher price-point) that runs through the browser on a hosted database that is accessible anywhere, anytime, and always up to date. While it is more expensive, it’s still cheap compared to what the organization will be paying for their ERP solution (and much more valuable from a cost avoidance perspective).
This entry was posted in Best Practices, Market Intelligence on June 30, 2011 by thedoctor.
TMS Requires 100 Million, Does ERP Require 1 Billion?
Ben Pivar, Vice President and North American Supply Chain Lead for Capgemini regarding Transportation Management Systems (TMS) Pivar says that the economics of installing a TMS package on a client server, for example, doesn’t really work until you have nearly $100 million in freight spend and that’s why on-demand is so popular in that space.
SI has to agree. Unless a firm has tens of millions in freight spend, the costs of installation, maintenance, and usage tend to dwarf the benefits of using a TMS system. However, what’s even more important to note is that enterprise ERP (from a top vendor) is, on average, at least five, if not (usually) ten times, more expensive to install, integrate, maintain, and use than TMS. This would seem to indicate that the economics of traditional ERP don’t make sense unless your company has 1 Billion in spend, or at least 1 Billion in revenue. In other words, unless you’re a member of the Fortune 2000 or Global 3000, traditional end-to-end on-premise enterprise ERP is probably not for you. And it would appear that Oracle, one of the largest players, tends to agree. Why do you think it has advertisements stating it has 98% of the Fortune 500? It’s not just because the Fortune X, it’s target market, provide it with its biggest deals. It’s because Oracle also understands that unless a company has reached a critical mass, given the cost of the system, the company won’t get the advertised return (which is a key to keeping the company as a high-paying customer year after year).
However, every organization needs a good transaction store and data repository as analysis is key to supply management success. So what does this mean if you’re not one of the lucky ones? Don’t look at a a tradtional on-premise end-to-end ERP from a big boy. Look at either a newer, smaller, slimmed down offering from a smaller player, possibly based on an open-source solution (like Compiere), a suite from a provider that maintains its own transaction store, or a newer, slimmed down, SaaS offering from a traditional provider that can integrate with some BoB solutions in the cloud and offer an effective hybrid solution. Just don’t go for the billion-dollar solution, because your organization likely won’t get a return from the millions it will cost.
This entry was posted in Inventory, Logistics, Technology on June 29, 2011 by thedoctor.
Check out the live debate in two hours (@ 2pm EDT) between Paul Martyn, VP of Supply Strategy at BravoSolution, and Howard Coleman, principal of MCA Associates, to find out.
This entry was posted in Uncategorized on June 29, 2011 by thedoctor.
As per yesterday’s post, it’s been less than five months since we last checked in with Trade Extensions, who had traded up to a Fact Sheet User Interface and added a slew of new features, including improved RFI support, multi-dimensional rankings in e-Negotiation, Google Earth integration, new incumbent rules, and an OLAP foundation to reporting, including the implementation of a new n-way comparison report. Since then, Trade Extensions has been on a tear to add new functionality as fast as it can to make the platform not only one of the most powerful expressive bidding optimization platforms on the planet, but also one of the easiest to use — listening to its users (which include the Fortune 1000) and adding features and functions that make an average buyer’s life easier, taking usability to a whole new level yet again. And while earth-shattering technology improvements are cool, it is usability that is the ultimate key to to adoption, use, and, ultimately, cost avoidance and reduction in your sourcing organization.
Not only are there new rules that allow partial awards to be fixed based upon existing scenarios, but the number of constraint categories has doubled. While there were only general and incumbent constraints in the past, there are now an entire category of scenario reference rules and post processing rules. With respect to scenario reference rules, not only can allocations be kept, but bids can be favoured or penalized as well. The post-processing rules are also quite useful. Allocations can automatically be rounded and allocations that don’t meet a minimum number of units can be removed (or re-assigned to the supplier who meets a minimum allocation with the lowest total cost).
The buyer now has fine-grained control over what the supplier sees, and can even mix feedback types. For example, if the buyer only wants the top three suppliers to know they are top three, but suppliers four to six to know their exact rank, they can specify that specific rank starts at bidder four, and the top bidders default to “top 3”. In addition, if the supplier does not meet a minimum bid increment, which can be defined in a number of ways (including, minimum dollar or % decrease over last bid), the supplier gets a nice red error that the bid is not acceptable AND a message indicating the minimum increment required. Finally, and this is really cool, the user can define custom color-coded bid feedback fields based on dynamic formulas that now only let the user know where they rank, but how competitive their bid is (against the current bids from the competition) in English using a buyer defined scale such as “Competitive”, “Slightly Competitive”, “Not Competitive”, and “Not Acceptable”.
Plus, the buyer can now chat with users online in an integrated IM client, and immediately see who is online when they log in as it is a widget on their project management dashboard.
The “dashboards” for RFX and auction phases have also improved. The summary, bidder summary, and lot summary are now completely customizeable by the user, support custom fields, and user-defined colour codings in the rankings. In addition, there is integrated show/hide, drill-down functionality, and customizeable pop-up (bid, trend, and bidder activity) charts where a user can select one, some, or all of the rows in each report.
All and all, it’s a lot of new functionality in a short time frame that makes the tool extremely useable by an average buyer.
This entry was posted in Decision Optimization, Spend Analysis, Technology on June 29, 2011 by thedoctor. |
[STATEMENT]
lemma bin_last_last: "odd w \<longleftrightarrow> last (bin_to_bl (Suc n) w)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. odd w = last (bin_to_bl (Suc n) w)
[PROOF STEP]
by (simp add: bin_to_bl_def) (auto simp: bin_to_bl_aux_alt) |
using Beancount
using Dates
using Test
using Decimals
"Extract type of transaction."
typ(::Transaction{T}) where T = T
@testset "Loading Files" begin
# Load test file
entries, errors, options = Beancount.load_file("testfile.beancount")
# Check loaded data and options
@test length(entries) == 8
@test length(errors) == 0
@test options["title"] == "Beancount Example Ledger"
@test options["operating_currency"] == ["EUR"]
# Parse what can be parsed
parsed = from_python.(entries)
# Check parsed transactions and postings
@test parsed[5].date == Date(2007,12,31)
@test parsed[5] |> typ == pad
@test parsed[5].description == "(Padding inserted for Balance of 1412.24 EUR for difference 1412.24 EUR)"
@test parsed[5].payee == nothing
@test parsed[5].postings[1].account == "Assets:TestAccount"
@test parsed[5].postings[1].value == 1412.24
@test parsed[5].postings[1].currency == "EUR"
@test parsed[5].postings[2].account == "Equity:Opening-Balances"
@test parsed[5].postings[2].value == -1412.24
@test parsed[5].postings[2].currency == "EUR"
@test parsed[7].date == Date(2008,01,01)
@test parsed[7] |> typ == txn
@test parsed[7].description == "Else"
@test parsed[7].payee == "Something"
@test parsed[7].postings[1].account == "Assets:TestAccount"
@test parsed[7].postings[1].value == -30
@test parsed[7].postings[1].currency == "EUR"
@test parsed[7].postings[2].account == "Expenses:Stuff"
@test parsed[7].postings[2].value == 5562
@test parsed[7].postings[2].currency == "USD"
@test parsed[7].tags == Set(["tagA", "tagB"])
@test parsed[7].links == Set(["linkA"])
end
@testset "Writing Files" begin
# Write Transactions
tx = Transaction{txn}(date=today(),description="This is the description",tags=Set(["foo", "bar"]))
push!(tx,Posting(account="Equity:Opening-Balances"))
push!(tx,Posting(account="Assets:TestAccount",value=decimal("100.3")))
open("test.beancount","w") do f
println(f,"1970-01-01 open Equity:Opening-Balances")
println(f,"1970-01-01 open Assets:TestAccount")
println(f,tx)
end
@test isfile("test.beancount")
entries, errors, options = Beancount.load_file("test.beancount")
isfile("test.beancount") && rm("test.beancount")
@test length(errors) == 0
parsed = from_python.(entries)
@test parsed[end] == tx
end
|
theory "Yahalom2_cert_auto"
imports
"../ESPLogic"
begin
role I
where "I =
[ Send ''1'' {| sLAV ''I'', sLN ''ni'' |}
, Recv ''3'' {| sLMV ''nr'',
Enc {| sLC ''TT31'', sLAV ''R'', sLMV ''kir'', sLN ''ni'' |}
( sK ''I'' ''S'' ),
sLMV ''Ticket''
|}
, Send ''4'' {| sLMV ''Ticket'',
Enc {| sLC ''TT4'', sLMV ''nr'' |} ( sLMV ''kir'' )
|}
]"
role R
where "R =
[ Recv ''1'' {| sLAV ''I'', sLMV ''ni'' |}
, Send ''2'' {| sLAV ''R'', sLN ''nr'',
Enc {| sLC ''TT1'', sLAV ''I'', sLMV ''ni'' |} ( sK ''R'' ''S'' )
|}
, Recv ''4'' {| Enc {| sLC ''TT32'', sLAV ''I'', sLMV ''kir'',
sLN ''nr''
|}
( sK ''R'' ''S'' ),
Enc {| sLC ''TT4'', sLN ''nr'' |} ( sLMV ''kir'' )
|}
]"
role S
where "S =
[ Recv ''2'' {| sLAV ''R'', sLMV ''nr'',
Enc {| sLC ''TT1'', sLAV ''I'', sLMV ''ni'' |} ( sK ''R'' ''S'' )
|}
, Send ''3'' {| sLMV ''nr'',
Enc {| sLC ''TT31'', sLAV ''R'', sLN ''kir'', sLMV ''ni'' |}
( sK ''I'' ''S'' ),
Enc {| sLC ''TT32'', sLAV ''I'', sLN ''kir'', sLMV ''nr'' |}
( sK ''R'' ''S'' )
|}
]"
protocol yahalom_paulson
where "yahalom_paulson = { I, R, S }"
locale atomic_yahalom_paulson_state = atomic_state yahalom_paulson
locale yahalom_paulson_state = reachable_state yahalom_paulson
lemma (in atomic_yahalom_paulson_state) auto_ni:
assumes facts:
"roleMap r tid0 = Some I"
"LN ''ni'' tid0 : knows t"
shows "predOrd t (St(tid0, I_1)) (Ln(LN ''ni'' tid0))"
using facts proof(sources! " LN ''ni'' tid0 ")
case I_1_ni note facts = facts this[simplified]
thus ?thesis by force
next
case (S_3_ni tid1) note facts = facts this[simplified]
thus ?thesis proof(sources! "
Enc {| LC ''TT1'', s(|AV ''I'' tid1|), LN ''ni'' tid0 |}
( K ( s(|AV ''R'' tid1|) ) ( s(|AV ''S'' tid1|) ) ) ")
qed (insert facts, ((clarsimp, order?))+)?
qed (insert facts, ((clarsimp, order?))+)?
lemma (in atomic_yahalom_paulson_state) auto_nr:
assumes facts:
"roleMap r tid0 = Some R"
"LN ''nr'' tid0 : knows t"
shows "predOrd t (St(tid0, R_2)) (Ln(LN ''nr'' tid0))"
using facts proof(sources! " LN ''nr'' tid0 ")
case R_2_nr note facts = facts this[simplified]
thus ?thesis by force
next
case (S_3_ni tid1) note facts = facts this[simplified]
thus ?thesis proof(sources! "
Enc {| LC ''TT1'', s(|AV ''I'' tid1|), LN ''nr'' tid0 |}
( K ( s(|AV ''R'' tid1|) ) ( s(|AV ''S'' tid1|) ) ) ")
qed (insert facts, ((clarsimp, order?))+)?
qed (insert facts, ((clarsimp, order?))+)?
lemma (in atomic_yahalom_paulson_state) auto_I_k_I_S:
assumes facts:
"roleMap r tid0 = Some I"
"s(|AV ''I'' tid0|) ~: Compromised"
"s(|AV ''S'' tid0|) ~: Compromised"
"K ( s(|AV ''I'' tid0|) ) ( s(|AV ''S'' tid0|) ) : knows t"
shows "False"
using facts proof(sources! "
K ( s(|AV ''I'' tid0|) ) ( s(|AV ''S'' tid0|) ) ")
qed (insert facts, ((clarsimp, order?))+)?
lemma (in atomic_yahalom_paulson_state) auto_I_k_R_S:
assumes facts:
"roleMap r tid0 = Some I"
"s(|AV ''R'' tid0|) ~: Compromised"
"s(|AV ''S'' tid0|) ~: Compromised"
"K ( s(|AV ''R'' tid0|) ) ( s(|AV ''S'' tid0|) ) : knows t"
shows "False"
using facts proof(sources! "
K ( s(|AV ''R'' tid0|) ) ( s(|AV ''S'' tid0|) ) ")
qed (insert facts, ((clarsimp, order?))+)?
lemma (in atomic_yahalom_paulson_state) auto_R_k_R_S:
assumes facts:
"roleMap r tid0 = Some R"
"s(|AV ''R'' tid0|) ~: Compromised"
"s(|AV ''S'' tid0|) ~: Compromised"
"K ( s(|AV ''R'' tid0|) ) ( s(|AV ''S'' tid0|) ) : knows t"
shows "False"
using facts proof(sources! "
K ( s(|AV ''R'' tid0|) ) ( s(|AV ''S'' tid0|) ) ")
qed (insert facts, ((clarsimp, order?))+)?
lemma (in atomic_yahalom_paulson_state) auto_S_k_I_S:
assumes facts:
"roleMap r tid0 = Some S"
"s(|AV ''I'' tid0|) ~: Compromised"
"s(|AV ''S'' tid0|) ~: Compromised"
"K ( s(|AV ''I'' tid0|) ) ( s(|AV ''S'' tid0|) ) : knows t"
shows "False"
using facts proof(sources! "
K ( s(|AV ''I'' tid0|) ) ( s(|AV ''S'' tid0|) ) ")
qed (insert facts, ((clarsimp, order?))+)?
lemma (in atomic_yahalom_paulson_state) auto_S_k_R_S:
assumes facts:
"roleMap r tid0 = Some S"
"s(|AV ''R'' tid0|) ~: Compromised"
"s(|AV ''S'' tid0|) ~: Compromised"
"K ( s(|AV ''R'' tid0|) ) ( s(|AV ''S'' tid0|) ) : knows t"
shows "False"
using facts proof(sources! "
K ( s(|AV ''R'' tid0|) ) ( s(|AV ''S'' tid0|) ) ")
qed (insert facts, ((clarsimp, order?))+)?
lemma (in atomic_yahalom_paulson_state) auto_S_sec_kir:
assumes facts:
"roleMap r tid0 = Some S"
"s(|AV ''I'' tid0|) ~: Compromised"
"s(|AV ''R'' tid0|) ~: Compromised"
"s(|AV ''S'' tid0|) ~: Compromised"
"(tid0, S_3) : steps t"
"LN ''kir'' tid0 : knows t"
shows "False"
proof -
note_prefix_closed facts = facts note facts = this
thus ?thesis proof(sources! " LN ''kir'' tid0 ")
case S_3_kir note facts = facts this[simplified]
thus ?thesis by (fastsimp dest: auto_S_k_I_S intro: event_predOrdI)
next
case (S_3_ni tid1) note facts = facts this[simplified]
thus ?thesis proof(sources! "
Enc {| LC ''TT1'', s(|AV ''I'' tid1|), LN ''kir'' tid0 |}
( K ( s(|AV ''R'' tid1|) ) ( s(|AV ''S'' tid1|) ) ) ")
qed (insert facts, ((clarsimp, order?))+)?
next
case S_3_kir_1 note facts = facts this[simplified]
thus ?thesis by (fastsimp dest: auto_S_k_R_S intro: event_predOrdI)
qed (insert facts, ((clarsimp, order?))+)?
qed
lemma (in atomic_yahalom_paulson_state) auto_I_sec_kir:
assumes facts:
"roleMap r tid0 = Some I"
"s(|AV ''I'' tid0|) ~: Compromised"
"s(|AV ''R'' tid0|) ~: Compromised"
"s(|AV ''S'' tid0|) ~: Compromised"
"(tid0, I_3) : steps t"
"s(|MV ''kir'' tid0|) : knows t"
shows "False"
proof -
note_prefix_closed facts = facts note facts = this
thus ?thesis proof(sources! "
Enc {| LC ''TT31'', s(|AV ''R'' tid0|), s(|MV ''kir'' tid0|),
LN ''ni'' tid0
|}
( K ( s(|AV ''I'' tid0|) ) ( s(|AV ''S'' tid0|) ) ) ")
case fake note facts = facts this[simplified]
thus ?thesis by (fastsimp dest: auto_I_k_I_S intro: event_predOrdI)
next
case (S_3_enc tid1) note facts = facts this[simplified]
thus ?thesis by (fastsimp dest: auto_S_sec_kir intro: event_predOrdI)
qed (insert facts, ((clarsimp, order?))+)?
qed
lemma (in atomic_yahalom_paulson_state) auto_R_sec_kir:
assumes facts:
"roleMap r tid0 = Some R"
"s(|AV ''I'' tid0|) ~: Compromised"
"s(|AV ''R'' tid0|) ~: Compromised"
"s(|AV ''S'' tid0|) ~: Compromised"
"(tid0, R_4) : steps t"
"s(|MV ''kir'' tid0|) : knows t"
shows "False"
proof -
note_prefix_closed facts = facts note facts = this
thus ?thesis proof(sources! "
Enc {| LC ''TT32'', s(|AV ''I'' tid0|), s(|MV ''kir'' tid0|),
LN ''nr'' tid0
|}
( K ( s(|AV ''R'' tid0|) ) ( s(|AV ''S'' tid0|) ) ) ")
case fake note facts = facts this[simplified]
thus ?thesis by (fastsimp dest: auto_R_k_R_S intro: event_predOrdI)
next
case (S_3_enc tid1) note facts = facts this[simplified]
thus ?thesis by (fastsimp dest: auto_S_sec_kir intro: event_predOrdI)
qed (insert facts, ((clarsimp, order?))+)?
qed
lemma (in yahalom_paulson_state) weak_atomicity:
"complete (t,r,s) atomicAnn"
proof (cases rule: complete_atomicAnnI[completeness_cases_rule])
case (I_3_Ticket t r s tid0 \<alpha>) note facts = this
then interpret state: atomic_state yahalom_paulson t r s
by unfold_locales assumption+
let ?s' = "extendS s \<alpha>"
show ?case using facts
by (fastsimp simp: atomicAnn_def dest: state.extract_knows_hyps)
next
case (I_3_kir t r s tid0 \<alpha>) note facts = this
then interpret state: atomic_state yahalom_paulson t r s
by unfold_locales assumption+
let ?s' = "extendS s \<alpha>"
show ?case using facts
proof(sources! "
Enc {| LC ''TT31'', ?s'(|AV ''R'' tid0|), ?s'(|MV ''kir'' tid0|),
LN ''ni'' tid0
|}
( K ( ?s'(|AV ''I'' tid0|) ) ( ?s'(|AV ''S'' tid0|) ) ) ")
qed (insert facts, ((clarsimp, order?) | (fastsimp simp: atomicAnn_def dest: state.extract_knows_hyps))+)?
next
case (I_3_nr t r s tid0 \<alpha>) note facts = this
then interpret state: atomic_state yahalom_paulson t r s
by unfold_locales assumption+
let ?s' = "extendS s \<alpha>"
show ?case using facts
by (fastsimp simp: atomicAnn_def dest: state.extract_knows_hyps)
next
case (R_1_ni t r s tid0 \<alpha>) note facts = this
then interpret state: atomic_state yahalom_paulson t r s
by unfold_locales assumption+
let ?s' = "extendS s \<alpha>"
show ?case using facts
by (fastsimp simp: atomicAnn_def dest: state.extract_knows_hyps)
next
case (R_4_kir t r s tid0 \<alpha>) note facts = this
then interpret state: atomic_state yahalom_paulson t r s
by unfold_locales assumption+
let ?s' = "extendS s \<alpha>"
show ?case using facts
proof(sources! "
Enc {| LC ''TT32'', ?s'(|AV ''I'' tid0|), ?s'(|MV ''kir'' tid0|),
LN ''nr'' tid0
|}
( K ( ?s'(|AV ''R'' tid0|) ) ( ?s'(|AV ''S'' tid0|) ) ) ")
qed (insert facts, ((clarsimp, order?) | (fastsimp simp: atomicAnn_def dest: state.extract_knows_hyps))+)?
next
case (S_2_ni t r s tid0 \<alpha>) note facts = this
then interpret state: atomic_state yahalom_paulson t r s
by unfold_locales assumption+
let ?s' = "extendS s \<alpha>"
show ?case using facts
proof(sources! "
Enc {| LC ''TT1'', ?s'(|AV ''I'' tid0|), ?s'(|MV ''ni'' tid0|) |}
( K ( ?s'(|AV ''R'' tid0|) ) ( ?s'(|AV ''S'' tid0|) ) ) ")
qed (insert facts, ((clarsimp, order?) | (fastsimp simp: atomicAnn_def dest: state.extract_knows_hyps))+)?
next
case (S_2_nr t r s tid0 \<alpha>) note facts = this
then interpret state: atomic_state yahalom_paulson t r s
by unfold_locales assumption+
let ?s' = "extendS s \<alpha>"
show ?case using facts
by (fastsimp simp: atomicAnn_def dest: state.extract_knows_hyps)
qed
lemma (in atomic_yahalom_paulson_state) I_kir_sec:
assumes facts:
"roleMap r tid0 = Some I"
"s(|AV ''I'' tid0|) ~: Compromised"
"s(|AV ''R'' tid0|) ~: Compromised"
"s(|AV ''S'' tid0|) ~: Compromised"
"(tid0, I_3) : steps t"
"s(|MV ''kir'' tid0|) : knows t"
shows "False"
proof -
note_prefix_closed facts = facts note facts = this
thus ?thesis by (fastsimp dest: auto_I_sec_kir intro: event_predOrdI)
qed
lemma (in atomic_yahalom_paulson_state) R_kir_sec:
assumes facts:
"roleMap r tid0 = Some R"
"s(|AV ''I'' tid0|) ~: Compromised"
"s(|AV ''R'' tid0|) ~: Compromised"
"s(|AV ''S'' tid0|) ~: Compromised"
"(tid0, R_4) : steps t"
"s(|MV ''kir'' tid0|) : knows t"
shows "False"
proof -
note_prefix_closed facts = facts note facts = this
thus ?thesis by (fastsimp dest: auto_R_sec_kir intro: event_predOrdI)
qed
lemma (in atomic_yahalom_paulson_state) I_ni_synch:
assumes facts:
"roleMap r tid1 = Some I"
"s(|AV ''I'' tid1|) ~: Compromised"
"s(|AV ''R'' tid1|) ~: Compromised"
"s(|AV ''S'' tid1|) ~: Compromised"
"(tid1, I_3) : steps t"
shows
"? tid2 tid3.
roleMap r tid2 = Some R &
roleMap r tid3 = Some S &
s(|AV ''I'' tid2|) = s(|AV ''I'' tid1|) &
s(|AV ''I'' tid3|) = s(|AV ''I'' tid1|) &
s(|AV ''R'' tid2|) = s(|AV ''R'' tid1|) &
s(|AV ''R'' tid3|) = s(|AV ''R'' tid1|) &
s(|MV ''kir'' tid1|) = LN ''kir'' tid3 &
s(|MV ''ni'' tid2|) = LN ''ni'' tid1 &
s(|MV ''ni'' tid3|) = LN ''ni'' tid1 &
predOrd t (St(tid1, I_1)) (St(tid2, R_1)) &
predOrd t (St(tid2, R_2)) (St(tid3, S_2)) &
predOrd t (St(tid2, R_1)) (St(tid2, R_2)) &
predOrd t (St(tid3, S_3)) (St(tid1, I_3)) &
predOrd t (St(tid3, S_2)) (St(tid3, S_3))"
proof -
note_prefix_closed facts = facts note facts = this
thus ?thesis proof(sources! "
Enc {| LC ''TT31'', s(|AV ''R'' tid1|), s(|MV ''kir'' tid1|),
LN ''ni'' tid1
|}
( K ( s(|AV ''I'' tid1|) ) ( s(|AV ''S'' tid1|) ) ) ")
case fake note facts = facts this[simplified]
thus ?thesis by (fastsimp dest: auto_I_k_I_S intro: event_predOrdI)
next
case (S_3_enc tid2) note facts = facts this[simplified]
thus ?thesis proof(sources! "
Enc {| LC ''TT1'', s(|AV ''I'' tid1|), LN ''ni'' tid1 |}
( K ( s(|AV ''R'' tid1|) ) ( s(|AV ''S'' tid1|) ) ) ")
case fake note facts = facts this[simplified]
thus ?thesis by (fastsimp dest: auto_I_k_R_S intro: event_predOrdI)
next
case (R_2_enc tid3) note facts = facts this[simplified]
have f1: "roleMap r tid1 = Some I" using facts by (auto intro: event_predOrdI)
have f2: "LN ''ni'' tid1 : knows t" using facts by (auto intro: event_predOrdI)
note facts = facts auto_ni[OF f1 f2, simplified]
thus ?thesis by force
qed (insert facts, ((clarsimp, order?))+)?
qed (insert facts, ((clarsimp, order?))+)?
qed
lemma (in atomic_yahalom_paulson_state) R_ni_synch:
assumes facts:
"roleMap r tid2 = Some R"
"s(|AV ''I'' tid2|) ~: Compromised"
"s(|AV ''R'' tid2|) ~: Compromised"
"s(|AV ''S'' tid2|) ~: Compromised"
"(tid2, R_4) : steps t"
shows
"? tid1 tid3.
roleMap r tid1 = Some I &
roleMap r tid3 = Some S &
s(|AV ''I'' tid2|) = s(|AV ''I'' tid1|) &
s(|AV ''I'' tid3|) = s(|AV ''I'' tid1|) &
s(|AV ''R'' tid2|) = s(|AV ''R'' tid1|) &
s(|AV ''R'' tid3|) = s(|AV ''R'' tid1|) &
s(|MV ''kir'' tid1|) = LN ''kir'' tid3 &
s(|MV ''kir'' tid2|) = LN ''kir'' tid3 &
s(|MV ''nr'' tid1|) = LN ''nr'' tid2 &
s(|MV ''nr'' tid3|) = LN ''nr'' tid2 &
predOrd t (St(tid1, I_4)) (St(tid2, R_4)) &
predOrd t (St(tid1, I_3)) (St(tid1, I_4)) &
predOrd t (St(tid2, R_2)) (St(tid3, S_2)) &
predOrd t (St(tid2, R_1)) (St(tid2, R_2)) &
predOrd t (St(tid3, S_3)) (St(tid1, I_3)) &
predOrd t (St(tid3, S_2)) (St(tid3, S_3))"
proof -
note_prefix_closed facts = facts note facts = this
thus ?thesis proof(sources! "
Enc {| LC ''TT32'', s(|AV ''I'' tid2|), s(|MV ''kir'' tid2|),
LN ''nr'' tid2
|}
( K ( s(|AV ''R'' tid2|) ) ( s(|AV ''S'' tid2|) ) ) ")
case fake note facts = facts this[simplified]
thus ?thesis by (fastsimp dest: auto_R_k_R_S intro: event_predOrdI)
next
case (S_3_enc tid3) note facts = facts this[simplified]
have f1: "roleMap r tid2 = Some R" using facts by (auto intro: event_predOrdI)
have f2: "LN ''nr'' tid2 : knows t" using facts by (auto intro: event_predOrdI)
note facts = facts auto_nr[OF f1 f2, simplified]
thus ?thesis proof(sources! "
Enc {| LC ''TT4'', LN ''nr'' tid2 |} ( LN ''kir'' tid3 ) ")
case fake note facts = facts this[simplified]
thus ?thesis by (fastsimp dest: auto_S_sec_kir intro: event_predOrdI)
next
case (I_4_enc tid4) note facts = facts this[simplified]
thus ?thesis proof(sources! "
Enc {| LC ''TT31'', s(|AV ''R'' tid4|), LN ''kir'' tid3,
LN ''ni'' tid4
|}
( K ( s(|AV ''I'' tid4|) ) ( s(|AV ''S'' tid4|) ) ) ")
case fake note facts = facts this[simplified]
thus ?thesis by (fastsimp dest: auto_S_sec_kir intro: event_predOrdI)
next
case S_3_enc note facts = facts this[simplified]
thus ?thesis by force
qed (insert facts, ((clarsimp, order?))+)?
qed (insert facts, ((clarsimp, order?))+)?
qed (insert facts, ((clarsimp, order?))+)?
qed
end |
lemma lowdim_subset_hyperplane: fixes S :: "'a::euclidean_space set" assumes d: "dim S < DIM('a)" shows "\<exists>a::'a. a \<noteq> 0 \<and> span S \<subseteq> {x. a \<bullet> x = 0}" |
[STATEMENT]
lemma coprime_normalize:
assumes "coprime a b"
shows "coprime (normalize a) b"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. comm_monoid_mult_class.coprime (normalize a) b
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
comm_monoid_mult_class.coprime a b
goal (1 subgoal):
1. comm_monoid_mult_class.coprime (normalize a) b
[PROOF STEP]
by auto |
> module Decidable.Properties
> import Decidable.Predicates
> %default total
> %access public export
> ||| If |P| is decidable, |Not P| is decidable
> decNot : {P : Type} -> Dec P -> Dec (Not P)
> decNot {P} (Yes prf) = No contra where
> contra : Not P -> Void
> contra np = np prf
> decNot {P} (No contra) = Yes contra
> %freeze decNot -- frozen
> ||| If |P| and |Q| are decidable, |(P , Q)| is decidable
> decPair : {P, Q : Type} -> Dec P -> Dec Q -> Dec (P , Q)
> decPair (Yes p) (Yes q) = Yes (p , q)
> decPair (Yes p) (No nq) = No (\ pq => nq (snd pq))
> decPair (No np) (Yes q) = No (\ pq => np (fst pq))
> decPair (No np) (No nq) = No (\ pq => np (fst pq))
> %freeze decPair -- frozen
> ||| If |P| and |Q| are decidable, |Either P Q| is decidable
> decEither : {P, Q : Type} -> Dec P -> Dec Q -> Dec (Either P Q)
> decEither (Yes p) _ = Yes (Left p)
> decEither (No np) (Yes q) = Yes (Right q)
> decEither {P} {Q} (No np) (No nq) = No contra where
> contra : Either P Q -> Void
> contra (Left p) = np p
> contra (Right q) = nq q
> %freeze decEither -- frozen
|
theory Concrete_Semantics_2_2_ex3
imports Main
begin
fun count :: "'a => 'a list => nat" where
"count a Nil = 0" |
"count a (x # xs) = (if a = x then 1 else 0) + count a xs"
lemma "count x xs \<le> length xs"
apply(induction xs)
apply(auto)
done
end |
#pragma once
#include <boost/circular_buffer.hpp>
#include <generic_logger/generic_logger.hpp>
#include "../data_types.hpp"
namespace sensor_ublox {
namespace internal {
class UbxDataStreamReader {
public:
UbxDataStreamReader() : numObjectsRead_(0), end_(DataBuffer(), -1) {
buffer_ = std::make_unique<boost::circular_buffer<uint8_t>>(BufferSize);
}
void addData(const uint8_t* data, const size_t numBytes) {
DEBUG_STREAM("Adding data to stream reader: " << numBytes);
buffer_->insert(buffer_->end(), data, data + numBytes);
DEBUG_STREAM("Data in buffer: " << buffer_->size());
}
const UbxDataObject& end() const {
return end_;
}
UbxDataObject get();
private:
static constexpr size_t BufferSize = 8192;
std::unique_ptr<boost::circular_buffer<uint8_t>> buffer_;
uint64_t numObjectsRead_;
UbxDataObject end_;
};
} // namespace internal
} // namespace sensor_ublox
|
State Before: M : Type u_1
N : Type u_2
P : Type ?u.35290
inst✝² : Mul M
inst✝¹ : Mul N
inst✝ : Mul P
c✝ c : Con M
f : M → N
H : ∀ (x y : M), f (x * y) = f x * f y
h : mulKer f H ≤ c
hf : Surjective f
⊢ mapGen c f = mapOfSurjective c f H h hf State After: M : Type u_1
N : Type u_2
P : Type ?u.35290
inst✝² : Mul M
inst✝¹ : Mul N
inst✝ : Mul P
c✝ c : Con M
f : M → N
H : ∀ (x y : M), f (x * y) = f x * f y
h : mulKer f H ≤ c
hf : Surjective f
⊢ mapGen c f = conGen ↑(mapOfSurjective c f H h hf) Tactic: rw [← conGen_of_con (c.mapOfSurjective f H h hf)] State Before: M : Type u_1
N : Type u_2
P : Type ?u.35290
inst✝² : Mul M
inst✝¹ : Mul N
inst✝ : Mul P
c✝ c : Con M
f : M → N
H : ∀ (x y : M), f (x * y) = f x * f y
h : mulKer f H ≤ c
hf : Surjective f
⊢ mapGen c f = conGen ↑(mapOfSurjective c f H h hf) State After: no goals Tactic: rfl |
[STATEMENT]
lemma poly_power_n_eq:
fixes x::"'a :: idom"
assumes "n\<noteq>0"
shows "poly ([:-a,1:]^n) x=0 \<longleftrightarrow> (x=a)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (poly ([:- a, 1::'a:] ^ n) x = (0::'a)) = (x = a)
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
n \<noteq> 0
goal (1 subgoal):
1. (poly ([:- a, 1::'a:] ^ n) x = (0::'a)) = (x = a)
[PROOF STEP]
by (induct n,auto) |
data MyVect : (len: Nat) -> (elem: Type) -> Type where
Nil : MyVect 0 elem
(::) : (e: elem) -> (tail: MyVect len elem) -> MyVect (S len) elem
Eq elem => Eq (MyVect n elem) where
(==) Nil Nil = True
(==) (head :: tail) (head' :: tail') = head == head' && tail == tail'
Foldable (MyVect n) where
foldr func acc Nil = acc
foldr func acc (e :: tail) =
let
foldedTail = foldr func acc tail
in
func e foldedTail
data EqNat: (num1: Nat) -> (num2: Nat) -> Type where
Same: (num: Nat) -> EqNat num num
sameS : (k: Nat) -> (j: Nat) -> (eq : EqNat k j) -> EqNat (S k) (S j)
sameS j j (Same j) = Same (S j)
-- checkEqNat: (num1: Nat) -> (num2: Nat) -> Maybe (num1 = num2)
-- checkEqNat Z Z = Just Refl
-- checkEqNat Z (S k) = Nothing
-- checkEqNat (S k) Z = Nothing
-- checkEqNat (S k) (S j) =
-- case checkEqNat k j of
-- Nothing => Nothing
-- Just prf => Just (cong prf)
total zeroNotSuc : (0 = S k) -> Void
zeroNotSuc Refl impossible
total sucNotZero : (S k = 0) -> Void
sucNotZero Refl impossible
noRec : (contra : (k = j) -> Void) -> (S k = S j) -> Void
noRec contra Refl = contra Refl
checkEqNat: (num1: Nat) -> (num2: Nat) -> Dec (num1 = num2)
checkEqNat Z Z = Yes Refl
checkEqNat Z (S k) = No zeroNotSuc
checkEqNat (S k) Z = No sucNotZero
checkEqNat (S k) (S j) = case checkEqNat k j of
Yes prf => Yes (cong prf)
No contra => No (noRec contra)
exactLength: (len: Nat) -> (input: MyVect m a) -> Maybe (MyVect len a)
exactLength {m} len input =
case decEq m len of
Yes Refl => Just input
No contra => Nothing
total headUnequal: {xs: MyVect n a} -> {ys: MyVect n a} -> (contra: (x = y) -> Void) -> ((x :: xs) = (y :: ys)) -> Void
headUnequal contra Refl = contra Refl
total tailUnequal: {xs: MyVect n a} -> {ys: MyVect n a} -> (contra: (xs = ys) -> Void) -> ((x :: xs) = (y :: ys)) -> Void
tailUnequal contra Refl = contra Refl
DecEq a => DecEq (MyVect n a) where
decEq [] [] = Yes Refl
decEq (x :: xs) (y :: ys) = case decEq x y of
Yes Refl => case decEq xs ys of
Yes Refl => Yes Refl
No contra => No (tailUnequal contra)
No contra => No (headUnequal contra)
example1: MyVect 4 Integer
example1 = 1 :: 2 :: 3 :: 4 :: Nil
example1': MyVect 4 Integer
example1' = 1 :: 2 :: 3 :: 4 :: Nil
example2: MyVect 3 Integer
example2 = 1 :: 2 :: 3 :: Nil
example3: MyVect 4 Integer
example3 = 4 :: 3 :: 2 :: 1 :: Nil
natEqual: (x: Nat) -> (y: Nat) -> Maybe(x = y)
natEqual Z Z = Just Refl
natEqual Z (S k) = Nothing
natEqual (S k) Z = Nothing
natEqual (S k) (S j) = case natEqual k j of
Nothing => Nothing
(Just prf) => Just (cong prf)
|
In the silence, things are revealed.
Things that cannot be heard in the noise of modern life.
For the most part, our culture fears silence because of its revelatory power, both good and bad. We do have so many secrets.
Usually, the first thing us humans encounter in the silence is our thoughts. So we fear the silence because, we fear our own thoughts. And wouldn’t you know it? The pesky ones we try so hard to push away are usually the first to surface.
That’s a drag, so instead we block out the silence. We come home, we go to the fridge and we turn on the tv. We push our consciousness through a kind of predictable tunnel. And the tunnel has a weight, a momentum that is hard to break.
It takes a big experience like falling in love or the loss of a loved one to crack open the tunnel. But the thing is, no matter how life altering the experience, we have an uncanny way of eventually finding our way back to the tunnel where things are safe, warm and pretty doggone mediocre. Well, lately, mediocrity is even giving the tunnel too much credit. We’ve sunk somewhere below mediocrity into absurdity.
Then every once-in-awhile, in the silence you recognize an exciting thought that leads you down the rabbit hole. It stirs something within you, like the memory of a distant voice. Maybe it is hardly a voice at all and more like a feeling. It stirs you to action. Maybe you write it down. Maybe you make a collage. Maybe you fix something. Maybe you share your idea with another. Maybe you make the bed.
Or maybe you just shut up and listen. And then respond. Listen. Respond. You begin to have a conversation with what is. In the silence, our senses unfurl, like tendrils. We hear the wind. Bird. We smell the air, and perhaps notice that when the sun comes out for a moment from behind the clouds on a grayish day, the grass responds to the sun by releasing a stronger scent. Smell the greeting of the grass and the sun.
It is all so alive.
In the silence, one can slip into the role of the third person who watches. From that perspective, when someone, let’s say, cuts you off in traffic, you become frustrated for a moment, sure. You are yanked back down to a lower consciousness. But if you’ve been practicing as the watcher, you are more easily able to slip back into higher consciousness.
I don’t mean to imply that the higher consciousness is a way to avoid emotion. You feel the anger but in the silence, you more easily and quickly move to another place, like finding shade on a hot day. Why stay hot when the shade is right next to you?
Maybe to be enlightened means that you live in that higher consciousness all the time, or, I don’t know, 90% of the time. Those people, from what I’ve heard, have had some kind of major divine intervention. For most of us poor schmucks it’s just something we have to practice. It’s hard for a beginner like me and very time consuming, but also cleansing, like a good poop.
“And one should never underestimate the satisfaction of a good poop, I always say,” says Mr. Octopus, who is, pretty much, enlightened.
It’s called higher consciousness because it lifts you higher. Duh! Sort of like being high, but with better side affects.
And I suspect there’s more to the silence than even all of that. It’s exciting. But for now, this is as far as I’ve gotten.
So, what the hell? Turn off that tv once-in-awhile. Turn off that radio. Let the emptiness emerge and let the quiet part begin! Whoop whoop! The secrets are not as bad as we think. The truth is always better, even though its emergence is sometimes painful.
“Like a poop,” says Mr. Octopus.
“Yes, just like a hard poop,” I agree.
In silence we can hear the truth of ourselves, and, well, it’s not all bad! Geesh!
This entry was posted on Monday, December 13th, 2010 at 3:16 pm and posted in Favorites, Uncategorized. You can follow any responses to this entry through the RSS 2.0 feed. |
If $f$ and $g$ are homotopic and $g$ and $h$ are homotopic, then $f$ and $h$ are homotopic. |
[STATEMENT]
lemma concat_eq_append_conv:
"concat xss = ys @ zs \<longleftrightarrow>
(if xss = [] then ys = [] \<and> zs = []
else \<exists>xss1 xs xs' xss2. xss = xss1 @ (xs @ xs') # xss2 \<and> ys = concat xss1 @ xs \<and> zs = xs' @ concat xss2)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (concat xss = ys @ zs) = (if xss = [] then ys = [] \<and> zs = [] else \<exists>xss1 xs xs' xss2. xss = xss1 @ (xs @ xs') # xss2 \<and> ys = concat xss1 @ xs \<and> zs = xs' @ concat xss2)
[PROOF STEP]
by(auto dest: concat_eq_appendD) |
------------------------------------------------------------------------------
-- Totality properties respect to OrdList (flatten-OrdList-helper)
------------------------------------------------------------------------------
{-# OPTIONS --exact-split #-}
{-# OPTIONS --no-sized-types #-}
{-# OPTIONS --no-universe-polymorphism #-}
{-# OPTIONS --without-K #-}
module FOTC.Program.SortList.Properties.Totality.OrdList.FlattenATP where
open import FOTC.Base
open import FOTC.Data.Bool.PropertiesATP
open import FOTC.Data.Nat.Inequalities
open import FOTC.Data.Nat.Inequalities.PropertiesATP
open import FOTC.Data.Nat.Type
open import FOTC.Program.SortList.Properties.Totality.BoolATP
open import FOTC.Program.SortList.Properties.Totality.ListN-ATP
open import FOTC.Program.SortList.Properties.Totality.OrdTreeATP
open import FOTC.Program.SortList.Properties.MiscellaneousATP
open import FOTC.Program.SortList.SortList
------------------------------------------------------------------------------
flatten-OrdList-helper : ∀ {t₁ i t₂} → Tree t₁ → N i → Tree t₂ →
OrdTree (node t₁ i t₂) →
≤-Lists (flatten t₁) (flatten t₂)
flatten-OrdList-helper {t₂ = t₂} tnil Ni Tt₂ OTt =
subst (λ t → ≤-Lists t (flatten t₂))
(sym (flatten-nil))
(le-Lists-[] (flatten t₂))
flatten-OrdList-helper (ttip {i₁} Ni₁) Tt₁ tnil OTt = prf
where postulate prf : ≤-Lists (flatten (tip i₁)) (flatten nil)
{-# ATP prove prf #-}
flatten-OrdList-helper {i = i} (ttip {i₁} Ni₁) Ni (ttip {i₂} Ni₂) OTt = prf
where
postulate lemma : i₁ ≤ i₂
{-# ATP prove lemma ≤-trans &&-list₄-t le-ItemTree-Bool le-TreeItem-Bool ordTree-Bool #-}
postulate prf : ≤-Lists (flatten (tip i₁)) (flatten (tip i₂))
{-# ATP prove prf lemma #-}
flatten-OrdList-helper {i = i} (ttip {i₁} Ni₁) Ni
(tnode {t₂₁} {i₂} {t₂₂} Tt₂₁ Ni₂ Tt₂₂) OTt = prf
where
-- Helper terms to get the conjuncts from OTt.
helper₁ = ordTree-Bool (ttip Ni₁)
helper₂ = ordTree-Bool (tnode Tt₂₁ Ni₂ Tt₂₂)
helper₃ = le-TreeItem-Bool (ttip Ni₁) Ni
helper₄ = le-ItemTree-Bool Ni (tnode Tt₂₁ Ni₂ Tt₂₂)
helper₅ = trans (sym (ordTree-node (tip i₁) i (node t₂₁ i₂ t₂₂))) OTt
-- Helper terms to get the conjuncts from the fourth conjunct of OTt
helper₆ = le-ItemTree-Bool Ni Tt₂₁
helper₇ = le-ItemTree-Bool Ni Tt₂₂
helper₈ = trans (sym (le-ItemTree-node i t₂₁ i₂ t₂₂))
(&&-list₄-t₄ helper₁ helper₂ helper₃ helper₄ helper₅)
-- Common terms for the lemma₁ and lemma₂.
-- The ATPs could not figure out them.
OrdTree-tip-i₁ : OrdTree (tip i₁)
OrdTree-tip-i₁ = &&-list₄-t₁ helper₁ helper₂ helper₃ helper₄ helper₅
≤-TreeItem-tip-i₁-i : ≤-TreeItem (tip i₁) i
≤-TreeItem-tip-i₁-i = &&-list₄-t₃ helper₁ helper₂ helper₃ helper₄ helper₅
lemma₁ : ≤-Lists (flatten (tip i₁)) (flatten t₂₁)
lemma₁ = flatten-OrdList-helper (ttip Ni₁) Ni Tt₂₁ OT
where
-- The ATPs could not figure these terms.
OrdTree-t₂₁ : OrdTree t₂₁
OrdTree-t₂₁ =
leftSubTree-OrdTree Tt₂₁ Ni₂ Tt₂₂
(&&-list₄-t₂ helper₁ helper₂ helper₃ helper₄ helper₅)
≤-ItemTree-i-t₂₁ : ≤-ItemTree i t₂₁
≤-ItemTree-i-t₂₁ = &&-list₂-t₁ helper₆ helper₇ helper₈
postulate OT : OrdTree (node (tip i₁) i t₂₁)
{-# ATP prove OT ≤-TreeItem-tip-i₁-i ≤-ItemTree-i-t₂₁ OrdTree-tip-i₁ OrdTree-t₂₁ #-}
lemma₂ : ≤-Lists (flatten (tip i₁)) (flatten t₂₂)
lemma₂ = flatten-OrdList-helper (ttip Ni₁) Ni Tt₂₂ OT
where
-- The ATPs could not figure these terms.
OrdTree-t₂₂ : OrdTree t₂₂
OrdTree-t₂₂ =
rightSubTree-OrdTree Tt₂₁ Ni₂ Tt₂₂
(&&-list₄-t₂ helper₁ helper₂ helper₃ helper₄ helper₅)
≤-ItemTree-i-t₂₂ : ≤-ItemTree i t₂₂
≤-ItemTree-i-t₂₂ = &&-list₂-t₂ helper₆ helper₇ helper₈
postulate OT : OrdTree (node (tip i₁) i t₂₂)
{-# ATP prove OT ≤-TreeItem-tip-i₁-i ≤-ItemTree-i-t₂₂ OrdTree-tip-i₁ OrdTree-t₂₂ #-}
postulate prf : ≤-Lists (flatten (tip i₁)) (flatten (node t₂₁ i₂ t₂₂))
{-# ATP prove prf xs≤ys→xs≤zs→xs≤ys++zs flatten-ListN lemma₁ lemma₂ #-}
flatten-OrdList-helper {i = i} (tnode {t₁₁} {i₁} {t₁₂} Tt₁₁ Ni₁ Tt₁₂)
Ni tnil OTt = prf
where
-- Helper terms to get the conjuncts from OTt.
helper₁ = ordTree-Bool (tnode Tt₁₁ Ni₁ Tt₁₂)
helper₂ = ordTree-Bool tnil
helper₃ = le-TreeItem-Bool (tnode Tt₁₁ Ni₁ Tt₁₂) Ni
helper₄ = le-ItemTree-Bool Ni tnil
helper₅ = trans (sym (ordTree-node (node t₁₁ i₁ t₁₂) i nil)) OTt
-- Helper terms to get the conjuncts from the third conjunct of OTt.
helper₆ = le-TreeItem-Bool Tt₁₁ Ni
helper₇ = le-TreeItem-Bool Tt₁₂ Ni
helper₈ = trans (sym (le-TreeItem-node t₁₁ i₁ t₁₂ i))
(&&-list₄-t₃ helper₁ helper₂ helper₃ helper₄ helper₅)
lemma₁ : ≤-Lists (flatten t₁₁) (flatten nil)
lemma₁ = flatten-OrdList-helper Tt₁₁ Ni tnil OT
where
postulate OT : OrdTree (node t₁₁ i nil)
{-# ATP prove OT leftSubTree-OrdTree &&-list₂-t &&-list₄-t helper₁ helper₂ helper₃ helper₄ helper₅ helper₆ helper₇ helper₈ #-}
lemma₂ : ≤-Lists (flatten t₁₂) (flatten nil)
lemma₂ = flatten-OrdList-helper Tt₁₂ Ni tnil OT
where
postulate OT : OrdTree (node t₁₂ i nil)
{-# ATP prove OT rightSubTree-OrdTree &&-list₄-t helper₁ helper₂ helper₃ helper₄ helper₅ helper₆ helper₇ helper₈ #-}
postulate prf : ≤-Lists (flatten (node t₁₁ i₁ t₁₂)) (flatten nil)
{-# ATP prove prf xs≤zs→ys≤zs→xs++ys≤zs flatten-ListN lemma₁ lemma₂ #-}
flatten-OrdList-helper {i = i} (tnode {t₁₁} {i₁} {t₁₂} Tt₁₁ Ni₁ Tt₁₂) Ni
(ttip {i₂} Ni₂) OTt = prf
where
-- Helper terms to get the conjuncts from OTt.
helper₁ = ordTree-Bool (tnode Tt₁₁ Ni₁ Tt₁₂)
helper₂ = ordTree-Bool (ttip Ni₂)
helper₃ = le-TreeItem-Bool (tnode Tt₁₁ Ni₁ Tt₁₂) Ni
helper₄ = le-ItemTree-Bool Ni (ttip Ni₂)
helper₅ = trans (sym (ordTree-node (node t₁₁ i₁ t₁₂) i (tip i₂))) OTt
-- Helper terms to get the conjuncts from the third conjunct of OTt.
helper₆ = le-TreeItem-Bool Tt₁₁ Ni
helper₇ = le-TreeItem-Bool Tt₁₂ Ni
helper₈ = trans (sym (le-TreeItem-node t₁₁ i₁ t₁₂ i))
(&&-list₄-t₃ helper₁ helper₂ helper₃ helper₄ helper₅)
lemma₁ : ≤-Lists (flatten t₁₁) (flatten (tip i₂))
lemma₁ = flatten-OrdList-helper Tt₁₁ Ni (ttip Ni₂) OT
where
postulate OT : OrdTree (node t₁₁ i (tip i₂))
{-# ATP prove OT leftSubTree-OrdTree &&-list₂-t &&-list₄-t helper₁ helper₂ helper₃ helper₄ helper₅ helper₆ helper₇ helper₈ #-}
lemma₂ : ≤-Lists (flatten t₁₂) (flatten (tip i₂))
lemma₂ = flatten-OrdList-helper Tt₁₂ Ni (ttip Ni₂) OT
where
postulate OT : OrdTree (node t₁₂ i (tip i₂))
{-# ATP prove OT rightSubTree-OrdTree &&-list₂-t &&-list₄-t helper₁ helper₂ helper₃ helper₄ helper₅ helper₆ helper₇ helper₈ #-}
postulate prf : ≤-Lists (flatten (node t₁₁ i₁ t₁₂)) (flatten (tip i₂))
{-# ATP prove prf xs≤zs→ys≤zs→xs++ys≤zs flatten-ListN lemma₁ lemma₂ #-}
flatten-OrdList-helper {i = i} (tnode {t₁₁} {i₁} {t₁₂} Tt₁₁ Ni₁ Tt₁₂) Ni
(tnode {t₂₁} {i₂} {t₂₂} Tt₂₁ Ni₂ Tt₂₂) OTt = prf
where
-- Helper terms to get the conjuncts from OTt.
helper₁ = ordTree-Bool (tnode Tt₁₁ Ni₁ Tt₁₂)
helper₂ = ordTree-Bool (tnode Tt₂₁ Ni₂ Tt₂₂)
helper₃ = le-TreeItem-Bool (tnode Tt₁₁ Ni₁ Tt₁₂) Ni
helper₄ = le-ItemTree-Bool Ni (tnode Tt₂₁ Ni₂ Tt₂₂)
helper₅ = trans (sym (ordTree-node (node t₁₁ i₁ t₁₂) i (node t₂₁ i₂ t₂₂)))
OTt
-- Helper terms to get the conjuncts from the third conjunct of OTt.
helper₆ = le-TreeItem-Bool Tt₁₁ Ni
helper₇ = le-TreeItem-Bool Tt₁₂ Ni
helper₈ = trans (sym (le-TreeItem-node t₁₁ i₁ t₁₂ i))
(&&-list₄-t₃ helper₁ helper₂ helper₃ helper₄ helper₅)
lemma₁ : ≤-Lists (flatten t₁₁) (flatten (node t₂₁ i₂ t₂₂))
lemma₁ = flatten-OrdList-helper Tt₁₁ Ni (tnode Tt₂₁ Ni₂ Tt₂₂) OT
where
postulate OT : OrdTree (node t₁₁ i (node t₂₁ i₂ t₂₂))
{-# ATP prove OT leftSubTree-OrdTree &&-list₂-t &&-list₄-t helper₁ helper₂ helper₃ helper₄ helper₅ helper₆ helper₇ helper₈ #-}
lemma₂ : ≤-Lists (flatten t₁₂) (flatten (node t₂₁ i₂ t₂₂))
lemma₂ = flatten-OrdList-helper Tt₁₂ Ni (tnode Tt₂₁ Ni₂ Tt₂₂) OT
where
postulate OT : OrdTree (node t₁₂ i (node t₂₁ i₂ t₂₂))
{-# ATP prove OT rightSubTree-OrdTree &&-list₂-t &&-list₄-t helper₁ helper₂ helper₃ helper₄ helper₅ helper₆ helper₇ helper₈ #-}
postulate prf : ≤-Lists (flatten (node t₁₁ i₁ t₁₂))
(flatten (node t₂₁ i₂ t₂₂))
{-# ATP prove prf xs≤zs→ys≤zs→xs++ys≤zs flatten-ListN lemma₁ lemma₂ #-}
|
{-# OPTIONS --cubical --no-import-sorts --safe #-}
open import Cubical.Core.Everything
open import Cubical.Foundations.HLevels
module Cubical.Algebra.Semigroup.Construct.Left {ℓ} (Aˢ : hSet ℓ) where
open import Cubical.Foundations.Prelude
open import Cubical.Algebra.Semigroup
import Cubical.Algebra.Magma.Construct.Left Aˢ as LMagma
open LMagma public hiding (Left-isMagma; LeftMagma)
private
A = ⟨ Aˢ ⟩
isSetA = Aˢ .snd
◂-assoc : Associative _◂_
◂-assoc _ _ _ = refl
Left-isSemigroup : IsSemigroup A _◂_
Left-isSemigroup = record
{ isMagma = LMagma.Left-isMagma
; assoc = ◂-assoc
}
LeftSemigroup : Semigroup ℓ
LeftSemigroup = record { isSemigroup = Left-isSemigroup }
|
/-
Copyright (c) 2021 Yaël Dillies, Bhavik Mehta. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yaël Dillies, Bhavik Mehta
-/
import combinatorics.simplicial_complex.basic
namespace affine
open set
variables {m n : ℕ} {E : Type*} [normed_group E] [normed_space ℝ E] {S : simplicial_complex E}
{X Y : finset E}
/--
A simplicial complex is finite iff it has finitely many faces.
-/
def simplicial_complex.finite (S : simplicial_complex E) : Prop := S.faces.finite
noncomputable def simplicial_complex.faces_finset (S : simplicial_complex E) (hS : S.finite) :
finset (finset E) :=
hS.to_finset
@[simp]
lemma mem_faces_finset (hS : S.finite) :
X ∈ S.faces_finset hS ↔ X ∈ S.faces :=
set.finite.mem_to_finset _
/--
A simplicial complex `S` is locally finite at the face `X` iff `X` is a subface of finitely many
faces in `S`.
-/
def simplicial_complex.locally_finite_at (S : simplicial_complex E) (X : finset E) : Prop :=
set.finite {Y ∈ S.faces | X ⊆ Y}
/--
A simplicial complex `S` is locally finite at the face `X` iff `X` is a subface of infinitely many
faces in `S`.
-/
def simplicial_complex.locally_infinite_at (S : simplicial_complex E) (X : finset E) : Prop :=
set.infinite {Y ∈ S.faces | X ⊆ Y}
lemma simplicial_complex.locally_finite_at_iff_not_locally_infinite_at :
¬S.locally_infinite_at X ↔ S.locally_finite_at X :=
not_not
/--
A simplicial complex is locally finite iff each of its nonempty faces belongs to finitely many faces.
-/
def simplicial_complex.locally_finite (S : simplicial_complex E) : Prop :=
∀ {X : finset _}, X ∈ S.faces → X.nonempty → S.locally_finite_at X
example {α : Type*} {s : set α} {p q : α → Prop} (h : ∀ x, p x → q x) :
{x ∈ s | p x} ⊆ {x ∈ s | q x} :=
begin
refine inter_subset_inter_right s h,
end
lemma locally_finite_at_up_closed (hX : S.locally_finite_at X) (hXY : X ⊆ Y) :
S.locally_finite_at Y :=
begin
apply hX.subset,
rintro Z ⟨_, _⟩,
exact ⟨‹Z ∈ S.faces›, finset.subset.trans hXY ‹Y ⊆ Z›⟩,
end
lemma locally_infinite_at_down_closed (hY : S.locally_infinite_at Y) (hXY : X ⊆ Y) :
S.locally_infinite_at X :=
λ t, hY (locally_finite_at_up_closed t hXY)
lemma locally_finite_of_finite (hS : S.finite) :
S.locally_finite :=
λ X hX _, hS.subset (λ Y hY, hY.1)
/--
A simplicial complex is locally finite iff each point belongs to finitely many faces.
-/
lemma locally_finite_iff_mem_finitely_many_faces [decidable_eq E] :
S.locally_finite ↔ ∀ (x : E), finite {X | X ∈ S.faces ∧ x ∈ convex_hull (X : set E)} :=
begin
split,
{ unfold simplicial_complex.locally_finite,
contrapose!,
rintro ⟨x, hx⟩,
by_cases hxspace : x ∈ S.space,
{ obtain ⟨X, ⟨hX, hXhull, hXbound⟩, hXunique⟩ := combi_interiors_partition hxspace,
simp at hXunique,
use [X, hX],
split,
{ apply finset.nonempty_of_ne_empty,
rintro rfl,
simpa using hXhull },
rintro hXlocallyfinite,
apply hx,
suffices h : {X : finset E | X ∈ S.faces ∧ x ∈ convex_hull ↑X} ⊆
{Y : finset E | Y ∈ S.faces ∧ X ⊆ Y},
{ exact finite.subset hXlocallyfinite h },
rintro Y ⟨hY, hYhull⟩,
use hY,
have hXYhull := S.disjoint hX hY ⟨hXhull, hYhull⟩,
rw ←finset.coe_inter at hXYhull,
by_contra hXY,
apply hXbound,
have hYX : X ∩ Y ⊂ X,
{ use finset.inter_subset_left X Y,
rintro hXXY,
exact hXY (finset.subset_inter_iff.1 hXXY).2 },
exact mem_combi_frontier_iff.2 ⟨X ∩ Y, hYX, hXYhull⟩ },
{ exfalso,
apply hx,
suffices h : {X : finset E | X ∈ S.faces ∧ x ∈ convex_hull ↑X} = ∅,
{ rw h,
exact finite_empty },
apply eq_empty_of_subset_empty,
rintro X ⟨hX, h⟩,
exact hxspace (mem_bUnion hX h) }},
{ rintro hS X hX h,
obtain ⟨x, hx⟩ := h,
suffices h : {Y : finset E | Y ∈ S.faces ∧ X ⊆ Y} ⊆
{Y : finset E | Y ∈ S.faces ∧ x ∈ convex_hull ↑Y},
{ exact (hS x).subset h },
rintro Y ⟨hY, hXY⟩,
exact ⟨hY, subset_convex_hull Y (hXY hx)⟩ }
end
end affine
|
\chapter{Introductory Calculus}
\section{The Newton Quotient}
Consider any arbitrary curve. At any point on that curve, there is a line which
is tangent to that curve. How might we go about finding the slope of such a
line. Well, if you consider two points, one some units to the left of the
point we are targeting, and the other some arbitrary amount of units to the
right, between those two points is a secant line. If you were to take the limit
of the distance between the point we are targeting and the ones that we have
placed to the left and right of it, we would intuitively approach the slope of
the line tangent to the curve at that point.
This process is summarized in the Newton quotient, which is drawn out below:
\begin{equation}
m_{tan}=\lim_{\Delta x\to 0} \frac{f(x+\Delta x)-f(x)}{\Delta x}
\end{equation}
\section{Delta-Epsilon}
Let's say that for some function $f(x)$ that as $x\to c$, $f(x)\to L$. That is
to say:
\begin{equation}
\lim_{x \to c} f(x) = L
\end{equation}
We can describe $\epsilon$ as a quantity that is the maximum distance the limit
$L$ can be away from the actual value (on a continuous function) at $f(c)$.
Furthemore, $\delta$ is the units away from $x$ to $c$ as we take the limit of
$f$.
Therefore, we can summarize these two statements and presume that for every
$\epsilon > 0$, there is a $\delta > 0$ such that for all $x$:
\begin{equation}
0 < |x-c| < \delta
\end{equation}
and...
\begin{equation}
|f(x)-L| < \epsilon
\end{equation}
\section{The Derivative}
The derivative of $f(x)$ is described as $\frac{d}{dx}f(x)$ and is itself a
function. The derivative of $f$ is a function that for any $x$ produces a value
equal to the slope of the tangent line of $f$ at $x$.
One way to find the derivative of a function is to take the Newton Quotient of
function. This is true because:
\begin{equation}
\frac{d}{dx}f(x)=\lim_{\Delta x\to 0} \frac{f(x+\Delta x)-f(x)}{\Delta x}
\end{equation}
\textit{Note}: since the derivative of a function and you can take the deriative
of any function, you can take the derivative of a function an arbitrary number
of times. Physics is the only real world application that requires you to take
the derivative of a function more than two times, but this class frequently
requires you to take the second derivative of a function. This is often
expressed by the following notation:
\begin{equation}
\frac{d^2y}{dx^2}f(x)
\end{equation}
However, there are rules that we can apply to simplify the process of finding a
derivative.
\subsection{Derivation Rules}
\subsubsection{The Constant Rule}
\begin{equation}
\frac{d}{dx}c=0
\end{equation}
\subsubsection{The Constant Multiple Rule}
\begin{equation}
\frac{d}{dx}cf(x)=c\frac{d}{dx}f(x)
\end{equation}
\subsubsection{The Power Rule}
\begin{equation}
\frac{d}{dx}x^n=nx^{n-1}
\end{equation}
\subsubsection{The Sum and Difference Rule}
\begin{equation}
\frac{d}{dx}\big(f(x)\pm g(x)\big)=\frac{d}{dx}f(x) \pm \frac{d}{dx}g(x)
\end{equation}
\subsubsection{The Product Rule}
\begin{equation}
\frac{d}{dx}\big(f(x)g(x)\big)=f(x)\frac{d}{dx}g(x)+g(x)\frac{d}{dx}f(x)
\end{equation}
\subsubsection{The Quotient Rule}
\begin{equation}
\frac{d}{dx}\Bigg(\frac{f(x)}{g(x)}\Bigg)=\frac{f'(x)g(x)-g'(x)f(x)}{g^2(x)}
\end{equation}
\subsubsection{The Chain Rule}
\begin{equation}
\frac{d}{dx}f(g(x))=f'(g(x))g'(x)
\end{equation}
\section{Implicit Differentation}
Consider the function that defines the unit-circle: $x^2+y^2=1$. How would you
go about differentiating it? Sure, we know that $\frac{d}{dx}x^2=2x$, but what
does $\frac{d}{dx}y^2$ evaluate to? Differentation is easy when the variable we
are differentating agrees with the variable that we are taking the differential
with respect to, but what happens when they don't?
To evaluate $\frac{d}{dx}y^2$, we preform the normal differentation of $a^b$, so
we're left with $2y$, but we still haven't solved the problem of the mismatched
variables. The solution is simple: we simply multiply the quantity that we have
partially differentiated by the result of differentating the free-hanging
variable: $2y\frac{dy}{dx}$
|
-- @@stderr --
/dev/stdin:17: error: #else without #if
dtrace: failed to compile script test/unittest/preprocessor/err.incompelse.d: Preprocessor failed to process input program
|
State Before: ι : Type ?u.66703
α : Type u_1
β : Type ?u.66709
π : ι → Type ?u.66714
inst✝ : GeneralizedBooleanAlgebra α
a b c d : α
⊢ a ≤ a ∆ b ↔ Disjoint a b State After: ι : Type ?u.66703
α : Type u_1
β : Type ?u.66709
π : ι → Type ?u.66714
inst✝ : GeneralizedBooleanAlgebra α
a b c d : α
h : a ≤ a ∆ b
⊢ Disjoint a b Tactic: refine' ⟨fun h => _, fun h => h.symmDiff_eq_sup.symm ▸ le_sup_left⟩ State Before: ι : Type ?u.66703
α : Type u_1
β : Type ?u.66709
π : ι → Type ?u.66714
inst✝ : GeneralizedBooleanAlgebra α
a b c d : α
h : a ≤ a ∆ b
⊢ Disjoint a b State After: ι : Type ?u.66703
α : Type u_1
β : Type ?u.66709
π : ι → Type ?u.66714
inst✝ : GeneralizedBooleanAlgebra α
a b c d : α
h : a ≤ (a ⊔ b) \ (a ⊓ b)
⊢ Disjoint a b Tactic: rw [symmDiff_eq_sup_sdiff_inf] at h State Before: ι : Type ?u.66703
α : Type u_1
β : Type ?u.66709
π : ι → Type ?u.66714
inst✝ : GeneralizedBooleanAlgebra α
a b c d : α
h : a ≤ (a ⊔ b) \ (a ⊓ b)
⊢ Disjoint a b State After: no goals Tactic: exact disjoint_iff_inf_le.mpr (le_sdiff_iff.1 <| inf_le_of_left_le h).le |
Formal statement is: lemma holomorphic_on_compose_gen: "f holomorphic_on s \<Longrightarrow> g holomorphic_on t \<Longrightarrow> f ` s \<subseteq> t \<Longrightarrow> (g o f) holomorphic_on s" Informal statement is: If $f$ is holomorphic on $s$ and $g$ is holomorphic on $t$, and $f(s) \subseteq t$, then $g \circ f$ is holomorphic on $s$. |
[STATEMENT]
lemma connectedin_eq_not_separated:
"connectedin X S \<longleftrightarrow>
S \<subseteq> topspace X \<and>
(\<nexists>C1 C2. C1 \<union> C2 = S \<and> C1 \<noteq> {} \<and> C2 \<noteq> {} \<and> separatedin X C1 C2)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. connectedin X S = (S \<subseteq> topspace X \<and> (\<nexists>C1 C2. C1 \<union> C2 = S \<and> C1 \<noteq> {} \<and> C2 \<noteq> {} \<and> separatedin X C1 C2))
[PROOF STEP]
unfolding separatedin_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. connectedin X S = (S \<subseteq> topspace X \<and> (\<nexists>C1 C2. C1 \<union> C2 = S \<and> C1 \<noteq> {} \<and> C2 \<noteq> {} \<and> C1 \<subseteq> topspace X \<and> C2 \<subseteq> topspace X \<and> C1 \<inter> X closure_of C2 = {} \<and> C2 \<inter> X closure_of C1 = {}))
[PROOF STEP]
by (metis connectedin_separation sup.boundedE) |
State Before: ι : Type u
γ : Type w
β : ι → Type v
β₁ : ι → Type v₁
β₂ : ι → Type v₂
dec : DecidableEq ι
inst✝² : (i : ι) → Zero (β i)
inst✝¹ : (i : ι) → (x : β i) → Decidable (x ≠ 0)
p : ι → Prop
inst✝ : DecidablePred p
f : Π₀ (i : ι), β i
⊢ filter p f = mk (Finset.filter p (support f)) fun i => ↑f ↑i State After: case h
ι : Type u
γ : Type w
β : ι → Type v
β₁ : ι → Type v₁
β₂ : ι → Type v₂
dec : DecidableEq ι
inst✝² : (i : ι) → Zero (β i)
inst✝¹ : (i : ι) → (x : β i) → Decidable (x ≠ 0)
p : ι → Prop
inst✝ : DecidablePred p
f : Π₀ (i : ι), β i
i : ι
⊢ ↑(filter p f) i = ↑(mk (Finset.filter p (support f)) fun i => ↑f ↑i) i Tactic: ext i State Before: case h
ι : Type u
γ : Type w
β : ι → Type v
β₁ : ι → Type v₁
β₂ : ι → Type v₂
dec : DecidableEq ι
inst✝² : (i : ι) → Zero (β i)
inst✝¹ : (i : ι) → (x : β i) → Decidable (x ≠ 0)
p : ι → Prop
inst✝ : DecidablePred p
f : Π₀ (i : ι), β i
i : ι
⊢ ↑(filter p f) i = ↑(mk (Finset.filter p (support f)) fun i => ↑f ↑i) i State After: no goals Tactic: by_cases h1 : p i <;> by_cases h2 : f i ≠ 0 <;> simp at h2 <;> simp [h1, h2] |
function noise = ncnmNoiseExpandParam(noise, params)
% NCNMNOISEEXPANDPARAM Expand null category noise model's structure from param vector.
% FORMAT
% DESC returns a null category noise model structure filled with the
% parameters in the given vector. This is used as a helper function to
% enable parameters to be optimised in, for example, the NETLAB
% optimisation functions.
% ARG noise : the noise structure in which the parameters are to be
% placed.
% ARG param : vector of parameters which are to be placed in the
% noise structure.
% RETURN noise : noise structure with the given parameters in the
% relevant locations.
%
% SEEALSO : ncnmNoiseParamInit, ncnmNoiseExtractParam, noiseExpandParam
%
% COPYRIGHT : Neil D. Lawrence, 2004, 2005, 2006
% NOISE
noise.bias = params(1:noise.numProcess);
noise.gamman = params(noise.numProcess+1);
if noise.gammaSplit
noise.gammap = params(noise.numProcess+2);
else
noise.gammap = noise.gamman;
end
|
[STATEMENT]
lemma min_dist_is_dist: "connected v v' \<Longrightarrow> dist v (min_dist v v') v'"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. connected v v' \<Longrightarrow> dist v (min_dist v v') v'
[PROOF STEP]
by (auto intro: min_distI2) |
-- Andreas, 2014-10-05, code by Jesper Cockx
{-# OPTIONS --cubical-compatible --guardedness #-}
open import Common.Coinduction
open import Common.Equality
data False : Set where
data Pandora : Set where
C : ∞ False → Pandora
postulate
ext : (False → Pandora) → (Pandora → False) → False ≡ Pandora
f : False → Pandora
f ()
g : Pandora → False
g (C x) = ♭ x
foo : False ≡ Pandora
foo = ext f g
-- should be rejected
loop : (A : Set) → A ≡ Pandora → A
loop .Pandora refl = C (♯ (loop False foo))
absurd : False
absurd = loop False foo
|
# Weekly Corrections
The aim of this notebook is to improve the weekly correction. It seems gamma changes a lot over time, first however I will try to make it work with a constant gamma and see if the weekly corrections change significantly over time. The analysis is related to the observation, that the fraction of positives usually increases on Mondays (i.e. results from the weekend). However, one should first determine 𝛾, and if any weekly variations remain, these should be accounted for subsequently. To do this we will divide the data up in three periods:
- September 1st - December 15th
- December 15th - January 3th (we isolate christmas, as we expect this period to be different)
- January 4th - now
```python
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import matplotlib as mpl
import scipy
import datetime as dt
import scipy.stats
from scipy import stats
import iminuit
from iminuit import Minuit
from scipy.optimize import curve_fit
mpl.rcParams['font.size'] = 18 # Set the general plotting font size
plt.rc("font", family="serif")
```
```python
save_plots = True
day_0 = '2020-09-01'
fit_to_today = False
gamma_period = ['2020-09-01', '2021-02-15']
date_start = "2020-09-01"
date_end = "2021-03-09"
filename = 'Data-Epidemiologiske-Rapport-12032021-uh7y' #name of epidemiologisk rapport file
```
# Code from "MeasuringTestScalingExponent" to determine Gamma
## Read data
Note that I have not changed variable names yet, first I will see if we can get some results we can use. Later variables should be changed to P, SP and SCP.
```python
data_posi = pd.read_csv(filename+"/Municipality_cases_time_series.csv", sep=';', thousands='.', index_col=0)
data_test = pd.read_csv(filename+"/Municipality_tested_persons_time_series.csv", sep=';', thousands='.', index_col=0)
# All of Denmark
if fit_to_today:
date_end = data_test.index[-1]
Nposi_all = data_posi.loc[date_start:date_end].sum(axis=1)
eNposi_all = np.sqrt(Nposi_all)
Ntest_all = data_test.loc[date_start:date_end].sum(axis=1)
eNtest_all = np.sqrt(Ntest_all)
day = np.arange(1,len(Nposi_all)+1)
# Number of positive tests and uncertainty, assuming fixed number of daily tests and power law correction:
Power = 0.56 # Power in power law for the increase in positives with increase in tests.
SystError = 0.000 # To account for larger uncertainties on fraction positives than simply the statistical.
# TP 6. March: Are the above necessary? And the below scaling (yet)?
nAveDailyTests_all = Ntest_all.mean()
print("Average number of PCR tests: ", nAveDailyTests_all)
fPos_all = Nposi_all / Ntest_all
nPos_all = Nposi_all * (Ntest_all / nAveDailyTests_all)**(-Power)
enPos_all = np.sqrt(Ntest_all*fPos_all*(1-fPos_all) ) * (Ntest_all / nAveDailyTests_all)**(-Power)
```
Average number of PCR tests: 80992.6105263158
## Define ranges, fitting function, and model:
```python
Plot_StartDay = 1
Plot_EndDay = 63
Fit_StartDay = 0
Fit_EndDay = 33
day1jan = np.arange(Plot_StartDay, Plot_EndDay)
day1jan_fit = np.arange(Fit_StartDay, Fit_EndDay)
Npoints = len(day1jan_fit)
day_extrapol = 20
```
### Comparison of geometric mean with scaled positives
First we define a likelihood for comparingf geometric mean of scaled positives and scaled positives with variable scaling, $\gamma$. Then best fit and uncertainty on gamma within time-period is defined
```python
def model_bllh_og(N_obs, N_exp):
p_cdf = np.zeros_like(N_exp)
p_cdf[N_obs < N_exp] = scipy.stats.poisson.logcdf( N_obs[N_obs < N_exp], N_exp[N_obs < N_exp] )
p_cdf[N_obs >= N_exp] = scipy.stats.poisson.logsf( N_obs[N_obs >= N_exp], N_exp[N_obs >= N_exp] )
return - 2 * sum( (p_cdf) )
def model_bllh(N_obs, N_exp):
p_cdf = np.zeros_like(N_exp)
p_cdf = scipy.stats.poisson.logcdf( N_obs, N_exp )
p_cdf[p_cdf >= np.log(0.5)] = scipy.stats.poisson.logsf( N_obs[p_cdf >= np.log(0.5)], N_exp[p_cdf >= np.log(0.5)] )
return - 2 * sum( (p_cdf) )
#Find likelihood of N_obs given N_exp with poisson statistics
def gamma_index(): #finds best fit gamma and uncertainty for period
llh_list = []
gamma_list = np.linspace(0,1.,101) #probe gamma in discrete values.
for gamma in gamma_list:
nPos_all = Nposi_all * (Ntest_all / nAveDailyTests_all)**(-gamma)
enPos_all = np.sqrt(Nposi_all * (Ntest_all / nAveDailyTests_all)**(-gamma) )
y = nPos_all[Plot_StartDay:Plot_EndDay]
y_exp = []
for i in range(3,len(y)-3):
y_exp.append( (np.prod(y[i-3:i+4]))**(1/7) )
#Geometric running average, predicted scaled positive test number
llh = model_bllh( np.array(nPos_all[Plot_StartDay+4:Plot_EndDay-2].values), np.array(y_exp) )
llh_list.append(llh)
min_llh = min(llh_list)
p = np.polyfit(gamma_list[np.isclose(llh_list, min_llh, atol=10.)],
np.array(llh_list)[np.isclose(llh_list, min_llh, atol=10.)], 2)
gamma = -p[1]/(2*p[0])
dgamma = 1/np.sqrt(p[0]) #fit parabola fit intercept with 1 sigma
return gamma, dgamma, llh_list, gamma_list, nPos_all, enPos_all, y_exp
#gamma, dgamma, llh_list, gamma_list, nPos_all, enPos_all, y_exp = gamma_index()
```
### Compare $\gamma (t)$
Compute $\gamma (t)$ as a linear function using locally computed geometric mean
```python
gamma_t = []
dgamma_t = []
days_between_points = 7
for i in np.arange(0,len(data_posi.loc[date_start:date_end].sum(axis=1)) - 28,days_between_points):
#loop over every days_between_points days from september to february
Nposi_all = data_posi.loc[date_start:date_end].sum(axis=1)
eNposi_all = np.sqrt(Nposi_all)
Ntest_all = data_test.loc[date_start:date_end].sum(axis=1)
eNtest_all = np.sqrt(Ntest_all)
day = np.arange(1,len(Nposi_all)+1)
Plot_StartDay = i
Plot_EndDay = 28+i
nAveDailyTests_all = Ntest_all.mean()
fPos_all = Nposi_all / Ntest_all
gamma, dgamma, llh_list, gamma_list, nPos_all, enPos_all, y_exp = gamma_index()
gamma_t.append(gamma), dgamma_t.append(dgamma)
```
Here we plot $\gamma(t)$. There are some problems with the week lines, that don't align since gamma is only determined every seven days. For now I have just removed the week lines.
In this section we furthermore compute the average gamma for each period, called ``g1, g2, g3``. Notice also when I compute the average for each period, I manually plug in the numbers of how many of the gamma points should be included. This should be automatic, but I cant figure it ut right now and don't want to waste to much time on it if it doesnt work.
```python
#plot data
plt.figure(figsize=(12,8))
plt.errorbar(days_between_points*np.arange(len(gamma_t)), gamma_t, yerr=dgamma_t, fmt='.', capsize=5, color='k', label=f'Gamma for every {days_between_points} days')
days = days_between_points*np.arange(len(gamma_t))
plt.ylabel(r'Scaling Index [$\gamma$(t)]')#, plt.xlabel('Days from '+date_start)
plt.ylim(0,1)#, plt.legend()
#fit linear function
def lin(x,a,b):
return a*x+b
p, cov = curve_fit(lin, days, gamma_t, sigma=dgamma_t)
power = lin(np.arange(3,len(Nposi_all[date_start:date_end])-4), *p)
#plot fit
plt.plot(days, lin(days, *p), color='k', linestyle='dashed')
#This creates weekly lines
#now = dt.date(year=int(date_end[:4]), month = int(date_end[5:7]), day = int( date_end[8:] ))
#then = now + dt.timedelta(days=-(len(data_posi.loc[date_start:date_end].sum(axis=1)))+14 )
#for x in np.arange(0,165,7): #weeks
# plt.axvline( x, ls='--', color='grey', alpha=0.2)
# Plot monthly lines on first date
#d_now = dt.date(year=int(date_start[:4]), month = int(date_start[5:7]), day = int( date_start[8:] ))
#for i in range(len(data_posi.loc[date_start:date_end].sum(axis=1))):
# d_now = d_now + dt.timedelta(days=1)
# if d_now.day == 1:
# plt.axvline( i-12, ls='-', color='k', alpha=0.5)
#times = []; N_weeks = int(len(data_posi.loc[date_start:date_end].sum(axis=1))/14)
#for i in range(N_weeks):
# times.append(then + dt.timedelta(days=14*i))
#plt.xticks(ticks=[14*i for i in range(N_weeks)],
# labels=[str(time.year)+'-'+str(time.month)+'-'+str(time.day) for time in times], rotation=30 );
#Mark the three periods ------------------------------------------------------------------
period1 = (0, (dt.date(2020, 12, 15)-dt.date(2020, 9, 1)).days )
plt.axvspan(*period1, facecolor='b', alpha=0.2, label='Sep 1st to Dec 15th')
period2 = ( period1[1], period1[1] + (dt.date(2021, 1, 4)-dt.date(2020, 12, 15)).days )
plt.axvspan(*period2, facecolor='r', alpha=0.2, label='Dec 15th - Jan 4th')
period3 = ( period2[1], period2[1] + (dt.date(2021, 2, 15)-dt.date(2021, 1, 4)).days )
plt.axvspan(*period3, facecolor='g', alpha=0.2, label='Jan 4th - Feb 15th')
plt.legend()
plt.xlabel('Days from September 1st')
# Find a constant gamma for each period -------------------------------------------------
g1 = np.average(a=gamma_t[0:15], weights=dgamma_t[0:15])
g1_std = np.std(a=gamma_t[0:15])
plt.hlines(g1, *period1, color='b')
g2 = np.average(a=gamma_t[15:18], weights=dgamma_t[15:18])
g2_std = np.std(a=gamma_t[15:18])
plt.hlines(g2, *period2, color='r')
g3 = np.average(a=gamma_t[18:24], weights=dgamma_t[18:24])
g3_std = np.std(a=gamma_t[18:24])
plt.hlines(g3, *period3, color='g')
plt.show()
#plt.savefig('gamma_t_2.png', dpi=300)
```
Now we will plot the entire period, and mark the three periods.
```python
plt.figure(figsize=(20,10))
plt.errorbar(Nposi_all.index, Nposi_all[date_start:date_end],
yerr = eNposi_all[date_start:date_end], fmt='.', capsize=3, label='Positive Tests')
#includes date on x-label
ax = plt.gca()
temp = ax.xaxis.get_ticklabels()
temp = list(set(temp) - set(temp[::14]))
for label in temp:
label.set_visible(False)
for x in np.arange(0,len(Nposi_all[date_start:date_end]),14): #weeks
plt.axvline( x, ls='--', color='grey', alpha=0.5)
mean_arr = []
for i in np.arange(3,len(Nposi_all[date_start:date_end])-4):
mean_arr.append( np.mean(Ntest_all[date_start:date_end][i-3:i+4]) )
SP = Nposi_all[3:-4] * (Ntest_all[date_start:date_end][3:-4]/mean_arr)**(-np.asarray(power) )
plt.errorbar(SP.index, SP[date_start:date_end],
yerr = np.sqrt(SP[date_start:date_end]), fmt='.', capsize=3, label='Scaled Positives')
plt.ylabel(r'$N_{Pos} $')
plt.xticks(rotation = 40); plt.legend();
#Mark the three periods ------------------------------------------------------------------
period1 = (0, (dt.date(2020, 12, 15)-dt.date(2020, 9, 1)).days )
plt.axvspan(*period1, facecolor='b', alpha=0.2, label='Sep 1st to Dec 15th')
period2 = ( period1[1], period1[1] + (dt.date(2021, 1, 4)-dt.date(2020, 12, 15)).days )
plt.axvspan(*period2, facecolor='r', alpha=0.2, label='Dec 15th - Jan 4th')
period3 = ( period2[1], period2[1] + (dt.date(2021, 1, 31)-dt.date(2021, 1, 4)).days )
plt.axvspan(*period3, facecolor='g', alpha=0.2, label='Jan 4th - Jan 31st')
plt.show()
#plt.savefig('Scaled_pos.png', dpi=300)
```
# Weekly corrections with weighted chi2 mean
Now that we have determined gamma we can look to determine the weekly corrections. We have gamma as a linear function, where the varying values of gamma are saved in the variable ``power``. Furthermore we have (from another script? that if we chose gamma to constant it is assigned the value of -0.56). I have also determined an average gamma for each period. We will start by looking into the last to options of use of gamma.
## With a value of gamma for each period.
First we need to define a function that can find the weekly corrections. The idea behind the weekly correction is that no one day is special. Say we want to correct a given day, we use the three days before and the three days after, and find a weighted average of how many scaled positives we would expect that day ``N_exp``, and compare it to how many scaled positives observed that day ``N_obs``. We can then find the correction as:
$$ corr = \frac{N_{obs}-N_{exp}}{N_{exp}} $$
When we implement it we should therefor multiply by $1-corr$
If we do this for all days in each period (minus 3 on each side ?), we will get a table we can divide into the 7 weekdays. For all mondays in the period we will have a list of corrections along with their uncertainty, we can then compute a weighted average of those to obtain a correction factor for each day. This correction can be used on later data by multiplying with ``1-correction``.
As a sanity check we will plot the correction for each week on top of each other to see if they change significantly from one period to another.
```python
g3_std # we will use this for now as the error on g3
```
0.07657499206230552
```python
# Perform error propagation on SP_obs we need to use later (just functions I copied from my work in AppStat)
from sympy import*
def value_error_contribution_func_gen(expr, variables):
"""
expr = takes in a math expression in a string of type 'a+b'
var = takes in a tuple of variables strings, fx ('a', 'b')
"""
# Convert expression into a sympy expression
expr = parse_expr(expr)
# Define sympy symbols for the parameters (the tuple variables) and the standard deviations
var_symbols = symbols(variables)
err_symbols = symbols( tuple("sigma_" + k for k in variables) )
# Find expressions for each contributions
contributions = [expr.diff(var) ** 2 * err**2 for var, err in zip(var_symbols, err_symbols)]
# Convert contributions to numerical functions
f_contributions = [ lambdify(var_symbols + err_symbols, expression) for expression in contributions ]
# Find the error propagation expression to be evaluated, and display
expr_sig = sqrt( sum(contributions) )
display(expr_sig)
# Convert the expression for the value and the error into numerical functions
f_val = lambdify(var_symbols, expr)
f_err = lambdify(var_symbols + err_symbols, expr_sig)
def func(**kwargs):
"""
Define a function that will take in keywordarguments **kwargs which is a dictionary of type:
{'a':(1,0.1), 'b':(2,0.3)}. Kwargs.values calls the two tuples as one list [(1,0.1),(2,0.3)].
From there an array of variables and an array of errors can be extracted and the numerical
functions found above can be used.
"""
# Create tuple of values of variables
v = tuple(v[0] for v in kwargs.values())
# Create tuple of errors of variables
s = tuple(v[1] for v in kwargs.values())
# Calculate value and error
value, error = f_val(*v), f_err(*v, *s)
# Calculate contribution from each variable
contr_list = [ function(*v,*s) for function in f_contributions ]
#Return value and analytical error
return value, error, contr_list
# Return the main function that we set out to generate
return func
# Define function that gets variables from **kwargs and uses the function above to return value and error
def val_err_contr(expr, **kwargs):
"""
INPUT:
expr = takes in a math expression in a string of type 'a+b'
**kwargs = variable names = (value, error) of type a=(3, 0.3)
Note that if the relation depends on constant, type those in as variables with sigma = zero.
OUTPUT:
value = integer
error = integer
contributions = array_like with contributions from each variable in the same order as in the input
"""
return value_error_contribution_func_gen(expr, tuple(kwargs))(**kwargs)
# ERROR ON SP_OBS: P = P_all, T = T_all, m = T_mean, g = gamma
print('SP_obs error')
_, _, _ = val_err_contr('P * (T / m)**(-g)', P=(1,1), T=(1,1), m=(1,1), g=(1,1))
print('Frac corr error')
_, _, _ = val_err_contr('(SP_obs - SP_exp)/SP_exp', SP_obs=(1,1), SP_exp=(1,1) )
```
SP_obs error
$\displaystyle \sqrt{\frac{P^{2} g^{2} \sigma_{m}^{2} \left(\frac{T}{m}\right)^{- 2 g}}{m^{2}} + P^{2} \sigma_{g}^{2} \left(\frac{T}{m}\right)^{- 2 g} \log{\left(\frac{T}{m} \right)}^{2} + \frac{P^{2} g^{2} \sigma_{T}^{2} \left(\frac{T}{m}\right)^{- 2 g}}{T^{2}} + \sigma_{P}^{2} \left(\frac{T}{m}\right)^{- 2 g}}$
Frac corr error
$\displaystyle \sqrt{\sigma_{SP exp}^{2} \left(- \frac{1}{SP_{exp}} - \frac{- SP_{exp} + SP_{obs}}{SP_{exp}^{2}}\right)^{2} + \frac{\sigma_{SP obs}^{2}}{SP_{exp}^{2}}}$
This is the expression for the error on SP and error on the fractional correction for each day^^
```python
def week_correction(dato_start, dato_end, gamma, egamma, region=None):
"""
dato_start, dato_end = start day and en day of period we will analise. On the form 'year-month-date'.
Notice the corrections we will calculate will remove three days from each side
of this period to be able to compute the geometric mean
gamma = the power for the given period. Constant.
egamma = error on gamma
region = list of regions to include in the data, if none all is included.
"""
# Define a mask that includes all regions, if regions is not passed
if not region:
region = np.ones_like(data_posi.loc[dato_start:dato_end].sum(axis=1), dtype=bool)
# Extract data from the time period
P_all = data_posi.loc[dato_start:dato_end][region].sum(axis=1)
eP_all = np.sqrt(P_all)
T_all = data_test.loc[dato_start:dato_end][region].sum(axis=1)
eT_all = np.sqrt(T_all)
T_mean = T_all.mean()
eT_mean = np.std(T_all) / len(T_all) #error on mean as sigma/n
# Find observed scaled positives
SP_obs = P_all * (T_all/T_mean)**(-gamma)
eSP_obs = np.sqrt( (P_all**2 * gamma**2 * eT_mean**2 * (T_all/T_mean)**(-2*gamma))/T_mean**2 \
+ P_all**2 * egamma**2 * (T_all/T_mean)**(-2*gamma) * np.log(T_all/T_mean)**2 \
+ (P_all**2 * gamma**2 * eT_all**2 * (T_all/T_mean)**(-2*gamma))/T_all**2 \
+ eP_all**2 * (T_all/T_mean)**(-2*gamma) )
# Find expected scaled positives as the geometric mean of three days on each side
SP_exp = np.zeros_like(SP_obs[6:]) #remove the six days on each day we dont use
eSP_exp = np.zeros_like(SP_obs[6:])
n = 7 #number of days we compute the mean from = 2*3+1
for i in np.arange(3,len(SP_obs)-3):
# Geometric mean
SP_exp[i-3] = (np.prod(SP_obs[i-3:i+4]))**(1/n)
# Error on geometric mean - from https://en.wikipedia.org/wiki/Geometric_standard_deviation
eSP_exp[i-3] = np.exp( np.sqrt( np.sum( np.log(SP_obs[i-3:i+4])**2/n ) ) )
# Fractional corrections --------------------------------------------------------------
# Find number of weeks (rows in our matrix of size (weeks,7) ): +1 to make sure we have enough
nrows = len(SP_exp) // 7 + 1
# Create arrays with nan, so elements not filled in will not contribute to the weighted average
SP_obs_arr = np.empty(nrows * 7)
SP_obs_arr[:] = np.nan
eSP_obs_arr = np.empty(nrows * 7)
eSP_obs_arr[:] = np.nan
SP_exp_arr = np.empty(nrows * 7)
SP_exp_arr[:] = np.nan
eSP_exp_arr = np.empty(nrows * 7)
eSP_exp_arr[:] = np.nan
# Find out which day of the week, the first element is (should be dato_start+3)
first_weekday = dt.date(int(dato_start[:4]), int(dato_start[5:7]), int(dato_start[8:10])) + dt.timedelta(days=3)
day_index = first_weekday.weekday() #this would be the index where to insert the first element
# Fill in SP_obs and SP_exp into empty array so they will have the right shape, ie. if we start on a
# thursday the first 3 values will be np.nan
SP_obs_arr[day_index:day_index+len(SP_exp)] = SP_obs[3:-3]
eSP_obs_arr[day_index:day_index+len(SP_exp)] = eSP_obs[3:-3]
SP_exp_arr[day_index:day_index+len(SP_exp)] = SP_exp
eSP_exp_arr[day_index:day_index+len(SP_exp)] = eSP_exp
# Lets reshape the array into matrices of the form week x day
SP_obs_mat = SP_obs_arr.reshape(nrows, 7)
eSP_obs_mat = eSP_obs_arr.reshape(nrows, 7)
SP_exp_mat = SP_exp_arr.reshape(nrows, 7)
eSP_exp_mat = eSP_exp_arr.reshape(nrows, 7)
# Find fractional correction for each weekday with error propagation obtain error too
corr = (SP_obs_mat - SP_exp_mat) / SP_exp_mat
ecorr = np.sqrt( eSP_exp_mat**2 * ( -1/SP_exp_mat-(-SP_exp_mat+SP_obs_mat)/SP_exp_mat**2)**2 \
+ eSP_obs_mat**2/SP_exp_mat**2 )
# Lets display what we have gotten so far in a imshow plot, each column should be the same color ish
# if not there are big deviations, we all show the errors on the corrections
fig, ax = plt.subplots(ncols=2, figsize=(12,5))
im = ax[0].imshow( corr )
fig.colorbar(im, ax=ax[0])
ax[0].set_title('Corrections')
im2 = ax[1].imshow( ecorr )
fig.colorbar(im2, ax=ax[1])
ax[1].set_title('Errors on corr')
# Perform chi2 weighted average for each column ----------------------------------------
avg = np.nansum( (corr / ecorr**2) / np.nansum( 1 / ecorr**2, axis = 0 ), axis=0 )
eavg = np.sqrt( 1 / np.nansum(1 / ecorr**2, axis=0) )
# Find degrees of freedom (-1 )
N_dof = np.count_nonzero((~np.isnan(corr)), axis=0) - 1 #counts number of non nan elements in each column
# Calculate chi_square
chi2 = np.nansum( (corr - avg)**2 / ecorr**2, axis=0 )
# Calculate p-value (the integral of the chi2-distribution from chi2 to infinity)
p = stats.chi2.sf(chi2, N_dof)
# Find mean of the fractional correction for each weekday, ie along the axis 0 = rows
mean = np.nanmean( (SP_obs_mat - SP_exp_mat) / SP_exp_mat , axis=0) #monday, tuesday, wednesday etc.
n_days = np.count_nonzero((~np.isnan(SP_obs_mat)), axis=0) #counts number of non nan elements in each column
std = np.nanstd( (SP_obs_mat - SP_exp_mat) / SP_exp_mat , axis=0) / np.sqrt(n_days-1)
print('Chi2:', chi2)
print('P værdi:', p)
print('Weighted chi2 avg:', avg)
print('Alm mean vi tidligere brugte som correction', mean)
return mean, std #should return which method we deem the best
```
```python
# Use on the first period
f1, e1 = week_correction(dato_start='2020-09-01', dato_end='2020-12-15', gamma=g1, egamma=g1_std)
# Use on the second period
f2, e2 = week_correction(dato_start='2020-12-15', dato_end='2021-01-04', gamma=g2, egamma=g2_std)
# Use on the third period
f3, e3 = week_correction(dato_start='2021-01-04', dato_end='2021-02-15', gamma=g3, egamma=g3_std)
```
```python
# print korrektionerne
f1, f2, f3 # de gamle korrektioner
```
(array([-0.01914086, 0.01214777, 0.04827257, 0.00350983, -0.0073752 ,
0.01953519, -0.03416093]),
array([ 0.00347335, -0.13051513, 0.06252077, -0.04405409, -0.02281831,
0.12672008, -0.05817421]),
array([ 0.06977242, 0.09790363, 0.03645711, -0.07185984, -0.11346795,
0.00791217, -0.00158532]))
```python
def week_corr_plotter(mean, std, ax, lbl=None, col='blue'):
ax.errorbar(np.arange(len(mean)), mean, yerr=std, fmt='.', capsize=10, label=lbl, color=col)
ax.legend()
ax.set_ylabel('Fractional Deviation')
ax.hlines(0, 0, 6, ls='--', color='k')
plt.xticks([0,1,2,3,4,5,6], ['Monday','Tuesday','Wednesday','Thursday','Friday','Saturday','Sunday'], rotation=25);
```
```python
fig, ax = plt.subplots(figsize=(12,8))
week_corr_plotter(f1, e1, ax,'Period 1', col='blue')
week_corr_plotter(f2, e2, ax,'Period 2', col='red')
week_corr_plotter(f3, e3, ax,'Period 3', col='green')
```
```python
# Use on the third period
f3, e3 = week_correction(dato_start='2021-01-04', dato_end='2021-02-15', gamma=g3, egamma=g3_std)
fig, ax = plt.subplots(figsize=(12,8))
week_corr_plotter(f3, e3, ax,'Weekly Corrections found from January 4th to February 15th', col='blue')
```
De ting der er vist ovenfor (de ugentlige korrektioner som printes og som plottes) er dem der opstår ved det alm mean. Det ligner der stadig er noget ved det vægtede gennemsnit der skal kigges på inden vi kan bruge den.
# Gamle week correction function med alm mean (uden chi2)
```python
def week_correction_old(dato_start, dato_end, gamma):
"""
dato_start, dato_end = start day and en day of period we will analise. On the form 'year-month-date'.
Notice the corrections we will calculate will remove three days from each side
of this period to be able to compute the geometric mean
gamma = the power for the given period. Constant.
"""
# Extract data from the time period
P_all = data_posi.loc[dato_start:dato_end].sum(axis=1)
eP_all = np.sqrt(P_all)
T_all = data_test.loc[dato_start:dato_end].sum(axis=1)
eT_all = np.sqrt(T_all)
T_mean = T_all.mean()
# Find mean of tests performed, to use as T in the formula to find scaled positives SP
# The mean is found by looking at three days on each side
#mean_arr = []
#for i in np.arange(3, len(P_all)-4):
#mean_arr.append( np.mean(T_all[i-3:i+4]) )
# Find observed scaled positives
SP_obs = P_all * (T_all/T_mean)**(-gamma)
# Find expected scaled positives as the geometric mean of three days on each side
SP_exp = np.zeros_like(SP_obs[6:]) #remove the six days on each day we dont use
eSP_exp = np.zeros_like(SP_obs[6:])
n = 7 #number of days we compute the mean from = 2*3+1
for i in np.arange(3,len(SP_obs)-3):
# Geometric mean
SP_exp[i-3] = (np.prod(SP_obs[i-3:i+4]))**(1/n)
# Error on geometric mean - from https://en.wikipedia.org/wiki/Geometric_standard_deviation
eSP_exp[i-3] = np.exp( np.sqrt( np.sum( np.log(SP_obs[i-3:i+4])**2/n ) ) )
# Fractional corrections --------------------------------------------------------------
# Find number of weeks (rows in our matrix of size (weeks,7) ): +1 to make sure we have enough
nrows = len(SP_exp) // 7 + 1
# Create two arrays with nan, so elements not filled in will not contribute to the weighted average
SP_obs_arr = np.empty(nrows * 7)
SP_obs_arr[:] = np.nan
SP_exp_arr = np.empty(nrows * 7)
SP_exp_arr[:] = np.nan
# Find out which day of the week, the first element is (should be dato_start+3)
first_weekday = dt.date(int(dato_start[:4]), int(dato_start[5:7]), int(dato_start[8:10])) + dt.timedelta(days=3)
day_index = first_weekday.weekday() #this would be the index where to insert the first element
# Fill in SP_obs and SP_exp into empty array so they will have the right shape, ie. if we start on a
# thursday the first 3 values will be np.nan
SP_obs_arr[day_index:day_index+len(SP_exp)] = SP_obs[3:-3]
SP_exp_arr[day_index:day_index+len(SP_exp)] = SP_exp
# Lets reshape the array into matrices of the form week x day
SP_obs_mat = SP_obs_arr.reshape(nrows, 7)
SP_exp_mat = SP_exp_arr.reshape(nrows, 7)
# Lets display what we have gotten so far in a imshow plot, each column should be the same color ish
# if not there are big deviations
fig, ax = plt.subplots(figsize=(5,5))
im = ax.imshow( (SP_obs_mat - SP_exp_mat) / SP_exp_mat )
fig.colorbar(im)
# Find mean of the fractional correction for each weekday, ie along the axis 0 = rows
mean = np.nanmean( (SP_obs_mat - SP_exp_mat) / SP_exp_mat , axis=0) #monday, tuesday, wednesday etc.
n_days = np.count_nonzero((~np.isnan(SP_obs_mat)), axis=0) #counts number of non nan elements in each column
std = np.nanstd( (SP_obs_mat - SP_exp_mat) / SP_exp_mat , axis=0) / np.sqrt(n_days-1)
return mean, std
```
# Weekly corrections land versus by versus hovedstaden
Vi har en ide om at de ugentlige korrektioner måske afhænger af hvilken region man befinder sig i, i og med at adfærdsmønstrene måske er forksellige afhængig af region. Først prøver vi derfor at opdele dataet, så vi kan bruge vores weekly_corrections function på kategorierne hver for sig.
```python
#Split data into Capital region, City regions and country regions
# Capital region defined as Storkøbenhavn, seen in:
# http://smilingcopenhagen.dk/turist?City=All&area=Greater_Copenhagen&Go=Capital_Region_of_Denmark&Art=0&Visit=Denmark&Re=Europe&Book=area
capital = ["Copenhagen", "Frederiksberg", "Gentofte", "Dragør", "Ballerup", "Gladsaxe", "Herlev", "Hvidovre" \
, "Brøndby", "Tårnby", "Vallensbæk", "Rødovre", "Glostrup","Høje-Taastrup", "Ishøj","Albertslund"]
city = ["Aarhus", "Esbjerg", "Odense", "Aalborg"]
country = ["Kalundborg", "Middelfart", "Allerød", "Helsingør", "Hillerød", "Rudersdal", "Næstved" \
, "Favrskov", "Frederikssund", "Furesø", "Holbæk", "Kolding","Silkeborg","Skanderborg","Vejle" \
,"Aabenraa", "Faxe","Fredericia","Faaborg-Midtfyn", "Guldborgsund","Haderslev","Halsnæs" \
, "Herning","Horsens", "Hørsholm","Lolland","Lyngby-Taarbæk","Nordfyns","Nyborg","Randers" \
,"Skive","Slagelse","Sorø", "Svendborg","Syddjurs","Sønderborg","Vejen","Viborg","Vordingborg" \
,"Billund", "Gribskov","Holstebro","Kerteminde","Mariagerfjord","Norddjurs","Odder" \
, "Odsherred","Ringkøbing-Skjern","Morsø","Thisted","Tønder", "Assens","Fredensborg" \
,"Lemvig","Bornholm","Ikast-Brande","Langeland", "Ringsted","Struer","Hedensted","Varde" \
,"Ærø","Fanø","Samsø", "Roskilde","Solrød", "Greve","Hjørring","Køge","Lejre", "Stevns" \
,"Frederikshavn", "Rebild","Vesthimmerlands", "Jammerbugt", "Brønderslev", "Læsø", "Nordfyns"\
,"Egedal"]
# Check we have used all regions
antal_kommuner = len(pd.read_csv(filename+'/Municipality_test_pos.csv', sep=';', thousands='.', index_col=0)['Kommune_(navn)'])
print('Der mangler', len(capital)+len(city)+len(country) - antal_kommuner, 'kommuner')
```
Der mangler 0 kommuner
```python
# Use on the third period (for all three regions)
f_cap, e_cap = week_correction(dato_start='2021-01-04', dato_end='2021-02-15', gamma=g3, egamma=g3_std, region=capital)
f_city, e_city = week_correction(dato_start='2021-01-04', dato_end='2021-02-15', gamma=g3, egamma=g3_std, region=city)
f_con, e_con = week_correction(dato_start='2021-01-04', dato_end='2021-02-15', gamma=g3, egamma=g3_std, region=country)
# Lets visualize it
fig, ax = plt.subplots(figsize=(12,8))
week_corr_plotter(f_cap, e_cap, ax,'Weekly Corrections from Jan 4th to Feb 15th (capital)', col='blue')
week_corr_plotter(f_city, e_city, ax,'Weekly Corrections from Jan 4th to Feb 15th (city)', col='green')
week_corr_plotter(f_con, e_con, ax,'Weekly Corrections from Jan 4th to Feb 15th (country)', col='red')
```
Her har vi brugt korretion funktionen på den sidste periode for henholdsvis byer, Storkøbenhavn og landområder. Bemærk at de korrektioner som plottes er resultatet af et alm mean (og altså ikke det weighted average, da vi endnu ikke har bestemt hvilken metode der er bedst). Det ligner at kategorien byer ikke er ensformig, når man kigger på kollonnerne er der større forskel i farver og chi2 er generelt en faktor 10 højere end henholdsvis storkøbenhavn og landområder. Dette skyldes nok at der kun er inkluderet fire kommuner, som geografisk er spredt udover Danmark, og derfor ikke nødvendigvis har meget med hinanden at gøre. En kommune som Esbjerg f.eks. dækker også over meget mere end Esbjerg by og kan derfor måske have forskellige adfærd opsamlet. Jeg tror den bedste inddeling er københavn og resten af danmark umiddelbart.
```python
```
|
------------------------------------------------------------------------
-- The Agda standard library
--
-- The basic code for equational reasoning with a single relation
------------------------------------------------------------------------
{-# OPTIONS --without-K --safe #-}
open import Relation.Binary
module Relation.Binary.Reasoning.Base.Single
{a ℓ} {A : Set a} (_∼_ : Rel A ℓ)
(refl : Reflexive _∼_) (trans : Transitive _∼_)
where
open import Level using (_⊔_)
open import Relation.Binary.PropositionalEquality as P using (_≡_)
infix 4 _IsRelatedTo_
infix 3 _∎
infixr 2 _∼⟨_⟩_ _≡⟨_⟩_ _≡˘⟨_⟩_ _≡⟨⟩_
infix 1 begin_
-- This seemingly unnecessary type is used to make it possible to
-- infer arguments even if the underlying equality evaluates.
data _IsRelatedTo_ (x y : A) : Set (a ⊔ ℓ) where
relTo : (x∼y : x ∼ y) → x IsRelatedTo y
begin_ : ∀ {x y} → x IsRelatedTo y → x ∼ y
begin relTo x∼y = x∼y
_∼⟨_⟩_ : ∀ x {y z} → x ∼ y → y IsRelatedTo z → x IsRelatedTo z
_ ∼⟨ x∼y ⟩ relTo y∼z = relTo (trans x∼y y∼z)
_≡⟨_⟩_ : ∀ x {y z} → x ≡ y → y IsRelatedTo z → x IsRelatedTo z
_ ≡⟨ P.refl ⟩ x∼z = x∼z
_≡˘⟨_⟩_ : ∀ x {y z} → y ≡ x → y IsRelatedTo z → x IsRelatedTo z
_ ≡˘⟨ P.refl ⟩ x∼z = x∼z
_≡⟨⟩_ : ∀ x {y} → x IsRelatedTo y → x IsRelatedTo y
_ ≡⟨⟩ x∼y = _ ≡⟨ P.refl ⟩ x∼y
_∎ : ∀ x → x IsRelatedTo x
_∎ _ = relTo refl
|
[STATEMENT]
lemma nth_swap: "i < length xs \<Longrightarrow> j < length xs \<Longrightarrow>
swap xs i j ! k = (if k = i then xs ! j else if k = j then xs ! i else xs ! k)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>i < length xs; j < length xs\<rbrakk> \<Longrightarrow> swap xs i j ! k = (if k = i then xs ! j else if k = j then xs ! i else xs ! k)
[PROOF STEP]
by (auto simp: swap_def nth_list_update) |
{-# OPTIONS --safe #-}
module Cubical.Algebra.GradedRing.DirectSumHIT where
open import Cubical.Foundations.Prelude
open import Cubical.Foundations.HLevels
open import Cubical.Data.Sigma
open import Cubical.Algebra.Monoid
open import Cubical.Algebra.AbGroup
open import Cubical.Algebra.AbGroup.Instances.DirectSumHIT
open import Cubical.Algebra.DirectSum.DirectSumHIT.Base
open import Cubical.Algebra.Ring
open import Cubical.Algebra.CommRing
private variable
ℓ ℓ' : Level
-----------------------------------------------------------------------------
-- Def, notation, lemma
module GradedRing-⊕HIT-index
(IdM@(Idx , IdxStr) : Monoid ℓ)
(G : (n : Idx) → Type ℓ')
(Gstr : (n : Idx) → AbGroupStr (G n))
where
⊕G-AbGroup = ⊕HIT-AbGr Idx G Gstr
⊕G = ⊕HIT Idx G Gstr
open MonoidStr IdxStr renaming (is-set to isSetIdx)
open AbGroupStr (snd ⊕G-AbGroup)
renaming
( 0g to 0⊕
; _+_ to _+⊕_
; -_ to -⊕_
; +Assoc to +⊕Assoc
; +IdR to +⊕IdR
; +IdL to +⊕IdL
; +InvR to +⊕InvR
; +InvL to +⊕InvL
; +Comm to +⊕Comm
; is-set to isSet⊕G )
open AbGroupTheory ⊕G-AbGroup
open AbGroupStr
renaming
( +Assoc to +Assoc
; +IdR to +IdR
; +IdL to +IdL
; +InvR to +InvR
; +InvL to +InvL
; +Comm to +Comm
; is-set to isSetG )
module GradedRing-⊕HIT-⋆
(1⋆ : G ε)
(_⋆_ : {k l : Idx} → G k → G l → G (k · l))
(0-⋆ : {k l : Idx} → (b : G l) → (0g (Gstr k)) ⋆ b ≡ 0g (Gstr (k · l)))
(⋆-0 : {k l : Idx} → (a : G k) → a ⋆ (0g (Gstr l)) ≡ 0g (Gstr (k · l)))
(⋆Assoc : {k l m : Idx} → (a : G k) → (b : G l) → (c : G m) →
_≡_ {A = Σ[ k ∈ Idx ] G k} ((k · (l · m)) , (a ⋆ (b ⋆ c))) (((k · l) · m) , ((a ⋆ b) ⋆ c)))
(⋆IdR : {k : Idx} → (a : G k) → _≡_ {A = Σ[ k ∈ Idx ] G k} ( k · ε , a ⋆ 1⋆ ) (k , a))
(⋆IdL : {l : Idx} → (b : G l) → _≡_ {A = Σ[ k ∈ Idx ] G k} ( ε · l , 1⋆ ⋆ b ) (l , b))
(⋆DistR+ : {k l : Idx} → (a : G k) → (b c : G l) →
a ⋆ ((Gstr l) ._+_ b c) ≡ Gstr (k · l) ._+_ (a ⋆ b) (a ⋆ c))
(⋆DistL+ : {k l : Idx} → (a b : G k) → (c : G l) →
((Gstr k) ._+_ a b) ⋆ c ≡ Gstr (k · l) ._+_ (a ⋆ c) (b ⋆ c))
where
-----------------------------------------------------------------------------
-- Ring Properties
_prod_ : ⊕G → ⊕G → ⊕G
_prod_ = DS-Rec-Set.f _ _ _ _ (isSetΠ λ _ → isSet⊕G)
(λ _ → 0⊕)
(λ k a → DS-Rec-Set.f _ _ _ _ isSet⊕G
0⊕
(λ l b → base (k · l) (a ⋆ b))
_+⊕_
+⊕Assoc
+⊕IdR
+⊕Comm
(λ l → cong (base (k · l)) (⋆-0 a) ∙ base-neutral _)
λ l b c → base-add _ _ _ ∙ cong (base (k · l)) (sym (⋆DistR+ _ _ _)))
(λ xs ys y → (xs y) +⊕ (ys y))
(λ xs ys zs i y → +⊕Assoc (xs y) (ys y) (zs y) i)
(λ xs i y → +⊕IdR (xs y) i)
(λ xs ys i y → +⊕Comm (xs y) (ys y) i)
(λ k → funExt (DS-Ind-Prop.f _ _ _ _ (λ _ → isSet⊕G _ _)
refl
(λ l b → cong (base (k · l)) (0-⋆ _) ∙ base-neutral _)
λ {U V} ind-U ind-V → cong₂ _+⊕_ ind-U ind-V ∙ +⊕IdR _))
λ k a b → funExt (DS-Ind-Prop.f _ _ _ _ (λ _ → isSet⊕G _ _)
(+⊕IdR _)
(λ l c → base-add _ _ _ ∙ cong (base (k · l)) (sym (⋆DistL+ _ _ _)))
(λ {U V} ind-U ind-V → comm-4 _ _ _ _ ∙ cong₂ _+⊕_ ind-U ind-V))
1⊕ : ⊕G
1⊕ = base ε 1⋆
prodAssoc : (x y z : ⊕G) → x prod (y prod z) ≡ (x prod y) prod z
prodAssoc = DS-Ind-Prop.f _ _ _ _ (λ _ → isPropΠ2 λ _ _ → isSet⊕G _ _)
(λ _ _ → refl)
(λ k a → DS-Ind-Prop.f _ _ _ _ (λ _ → isPropΠ (λ _ → isSet⊕G _ _))
(λ z → refl)
(λ l b → DS-Ind-Prop.f _ _ _ _ (λ _ → isSet⊕G _ _)
refl
(λ m c → cong₂ base (cong fst (⋆Assoc _ _ _)) (cong snd (⋆Assoc _ _ _)))
λ {U V} ind-U ind-V → cong₂ _+⊕_ ind-U ind-V)
λ {U V} ind-U ind-V z → cong₂ _+⊕_ (ind-U z) (ind-V z))
λ {U V} ind-U ind-V y z → cong₂ _+⊕_ (ind-U y z) (ind-V y z)
prodIdR : (x : ⊕G) → x prod 1⊕ ≡ x
prodIdR = DS-Ind-Prop.f _ _ _ _ (λ _ → isSet⊕G _ _)
refl
(λ k a → cong₂ base (cong fst (⋆IdR _)) (cong snd (⋆IdR _)) )
λ {U V} ind-U ind-V → (cong₂ _+⊕_ ind-U ind-V)
prodIdL : (y : ⊕G) → 1⊕ prod y ≡ y
prodIdL = DS-Ind-Prop.f _ _ _ _ (λ _ → isSet⊕G _ _)
refl
(λ l b → cong₂ base (cong fst (⋆IdL _)) (cong snd (⋆IdL _)) )
λ {U V} ind-U ind-V → (cong₂ _+⊕_ ind-U ind-V)
prodDistR+ : (x y z : ⊕G) → x prod (y +⊕ z) ≡ (x prod y) +⊕ (x prod z)
prodDistR+ = DS-Ind-Prop.f _ _ _ _ (λ _ → isPropΠ2 (λ _ _ → isSet⊕G _ _))
(λ _ _ → sym (+⊕IdR _))
(λ k a y z → refl)
λ {U V} ind-U ind-V y z → cong₂ _+⊕_ (ind-U y z) (ind-V y z) ∙ comm-4 _ _ _ _
prodDistL+ : (x y z : ⊕G) → (x +⊕ y) prod z ≡ (x prod z) +⊕ (y prod z)
prodDistL+ = λ x y z → refl
-----------------------------------------------------------------------------
-- Ring Instances
⊕HITgradedRing-Ring : Ring (ℓ-max ℓ ℓ')
fst ⊕HITgradedRing-Ring = ⊕G
RingStr.0r (snd ⊕HITgradedRing-Ring) = 0⊕
RingStr.1r (snd ⊕HITgradedRing-Ring) = 1⊕
RingStr._+_ (snd ⊕HITgradedRing-Ring) = _+⊕_
RingStr._·_ (snd ⊕HITgradedRing-Ring) = _prod_
RingStr.- snd ⊕HITgradedRing-Ring = -⊕_
RingStr.isRing (snd ⊕HITgradedRing-Ring) = makeIsRing isSet⊕G
+⊕Assoc +⊕IdR +⊕InvR +⊕Comm
prodAssoc prodIdR prodIdL prodDistR+ prodDistL+
-----------------------------------------------------------------------------
-- CommRing extension
module ExtensionCommRing
(⋆Comm : {k l : Idx} → (a : G k) → (b : G l) →
_≡_ {A = Σ[ k ∈ Idx ] G k} ((k · l) , (a ⋆ b)) ((l · k) , (b ⋆ a)))
where
open RingTheory ⊕HITgradedRing-Ring
prodComm : (x y : ⊕G) → x prod y ≡ y prod x
prodComm = DS-Ind-Prop.f _ _ _ _ (λ _ → isPropΠ (λ _ → isSet⊕G _ _))
(λ y → sym (0RightAnnihilates y))
(λ k a → DS-Ind-Prop.f _ _ _ _ (λ _ → isSet⊕G _ _)
refl
(λ l b → cong₂ base (cong fst (⋆Comm _ _)) (cong snd (⋆Comm _ _)))
λ {U V} ind-U ind-V → cong₂ _+⊕_ ind-U ind-V)
λ {U V} ind-U ind-V Q → ((cong₂ _+⊕_ (ind-U Q) (ind-V Q)) ∙ sym (prodDistR+ Q U V))
⊕HITgradedRing-CommRing : CommRing (ℓ-max ℓ ℓ')
⊕HITgradedRing-CommRing = Ring→CommRing ⊕HITgradedRing-Ring prodComm
|
/- 30 Aug 2019 -/
-- degree
-- incidence matrix
-- adjacency matrix
/-
## Definitions:
* A sequence of nonnegative integers is called `graphic` if it is the degree
sequence of a simple graph.
how does one write dn where n is a subscript?
Havel-Hakimi Theorem: Let d_1 ≥ d_2 ≥ ... ≥ d_n ≥ 0 be a (finite) sequence of
nonnegative integers. The sequence is graphic iff the sequence
d_2 - 1, ... , d_(t + 1) - 1, d_(t + 2), ... , d_n, where t = d_1, is graphic.
Let 0 ≤ d_1 ≤ d_2 ≤ ... ≤ d_n be a (finite) sequence of
nonnegative integers. The sequence is graphic iff the sequence
d_2 - 1, ... , d_(t + 1) - 1, d_(t + 2), ... , d_n, where t = d_1 is graphic.
-/
import data.list.sort
import combinatorics.simple_graph.basic
import data.multiset.sort
universe u
variables (V : Type u) [fintype V]
-- what type should i use?
-- `list.sorted` or `list.pairwise`
-- i think i can just use nat since that includes zero
-- oh god i need some kind of counter? or index
-- copy over the sequence except erase largest element and
-- subtract one from the n next largest elements
def sub_one_n_times' (n : ℕ) (l : list ℕ) : list ℕ :=
(l.take n).map (nat.pred) ++ l.drop n
-- this one works i think, but ordering does matter
/-def list.pos_filter (l : list ℕ) : list ℕ := l.filter (λ n, 0 < n)
-- this probably already exists, just don't feel like looking it up
def n_pos_list_check (n : ℕ) (l : list ℕ) : Prop := n ≤ l.pos_filter.length-/
-- def nth_is_pos (n : ℕ) (l : list ℕ) [l.sorted (≤)] : Prop := 0 < (l.nth n)
-- bad
def sub_one_n_times (n : ℕ) (l : list ℕ) (h : l.sorted (≥)) : option (list ℕ) :=
if n ≤ (l.filter (λ n, 0 < n)).length then some (sub_one_n_times' n l) else none
def havel_hakimi' (l : list ℕ) (h : l.sorted (≥)) : option (list ℕ) :=
if (l.filter (λ n, 0 < n)) = [] then some [] else sub_one_n_times l.head l.tail h.tail
-- you can't get the empty list out of applying sub_one_n_times and removing the largest degree repeatedly, so when
-- you get the empty list, you're done
-- is there another way of doing it? is there something else i can return
-- also need to re-sort
def havel_hakimi_step (l : list ℕ) (h : l.sorted (≥)) : multiset ℕ := sub_one_n_times' l.head l.tail
-- ideas for degree sequence
-- multiset of vertices, take the image
-- `multiset.sort` to get sorted list
variables {V}
def simple_graph.degree_multiset (G : simple_graph V) [decidable_rel G.adj] : multiset ℕ := finset.univ.val.map (λ v, G.degree v)
def simple_graph.degree_sequence (G : simple_graph V) [decidable_rel G.adj] : list ℕ := G.degree_multiset.sort (≥)
-- test out definition - good for algebraic graph theory? - look through lecture notes
--variables (l : list ℕ) [l.sorted (≥)]
-- in pseudocode,
-- a multiset ℕ is graphic if it is the degree sequence of some graph `G`
def graphic' (s : multiset ℕ) : Prop := ∃ (G : simple_graph V) [decidable_rel G.adj], by exactI s = G.degree_multiset
-- a sorted list is graphic if blah blah
def graphic (l : list ℕ) : Prop := ∃ (n : ℕ) (G : simple_graph $ fin n) [decidable_rel G.adj], by exactI l = G.degree_sequence
-- theorem statement from wikipedia:
/-
Let `S = (d_{1},\dots ,d_{n})` be a finite list of nonnegative integers that is nonincreasing.
List `S` is graphic if and only if the finite list `S' = (d_{2}-1,d_{3}-1,\dots ,d_{{d_{1}+1}}-1,d_{{d_{1}+2}},\dots ,d_{n})`
has nonnegative integers and is graphic.
-/
variables (S : list ℕ) (h : S.sorted (≥))
def simple_graph.degree' (G : simple_graph V) [decidable_rel G.adj] : V → ℕ := λ v, G.degree v
theorem havel_hakimi_A : graphic S → (S.head ≤ (S.filter (λ n, 0 < n)).length) ∧ graphic ((havel_hakimi_step S h).sort (≥)) :=
begin
intros h2,
split,
{ -- this is just the fact that S.head is largest degree, so the vertex with that degree is adjacent
-- to S.head many vertices, which then means that they have degree at least 1
rcases h2 with ⟨n, G, hdec, hds⟩,
have h3 : S.head = (@simple_graph.degree_sequence (fin n) _ G hdec).head,
exact congr_arg list.head hds,
let d1 := (@simple_graph.degree_sequence (fin n) _ G hdec).head,
-- let v1 := simple_graph.degree_multiset⁻¹ G d1, -- how to get to the preimage of the map in degree_multiset
sorry },
{ -- the proof here is that performing the algorithm step is allowed because you can do the edge swap
sorry },
end
lemma havel_hakimi_B : (S.head ≤ (S.filter (λ n, 0 < n)).length) ∧ graphic ((havel_hakimi_step S h).sort (≥)) → graphic S :=
begin
intros h2,
rcases h2 with ⟨hnneg, n, G, hdec, hds⟩,
sorry,
end
theorem havel_hakimi : graphic S ↔ (S.head ≤ (S.filter (λ n, 0 < n)).length) ∧ graphic ((havel_hakimi_step S h).sort (≥)) :=
⟨havel_hakimi_A S h, havel_hakimi_B S h⟩
variables (G : simple_graph V) [decidable_eq V] (v w x y : V)
variables (h1 : G.adj v w) (h2 : G.adj x y) (hn1 : ¬ G.adj v x) (hn2 : ¬ G.adj w y)
def new_graph : simple_graph V :=
{ adj := λ a b, if (((a = v) ∧ (b = w)) ∨ ((a = v) ∧ (b = x)) ∨ (((a = w) ∧ (b = y)) ∨ ((a = x) ∧ (b = y)))) then ¬ G.adj a b
else G.adj a b,
-- there's gotta be a better way of doing this
sym := λ a b,
begin
simp,
intros h,
sorry,
end,
loopless := sorry, }
/-def new_graph : simple_graph V :=
{ adj := λ a b, if ((a ≠ v) ∧ (a ≠ w)) ∨ ((b ≠ x) ∧ (b ≠ y)) then G.adj a b
else ¬ G.adj a b,
-- there's gotta be a better way of doing this
sym := λ a b,
begin
simp,
intros h,
end,
loopless := _ }-/
-- okay shit this is gonna be annoying
-- going to need to show that the max degree is le the number of remaining vertices
-- sequence D is graphic if ∃ (G : simple_graph V), D is deg seq for G
-- for proof, need to define swapping edge algo
-- BUT FIRST we need to define edge deletion lmao |
The upgrade proposal proved to be very unpopular with North Kingstown residents who lived on the affected local roads . Additionally , RIDOT laid the highway out so that Route 4 would cross through wetlands in the area . This sparked environmental concerns , as one of the large wetlands that would be affected , Froberg 's Marsh , was deemed to be of high value by Rhode Island environmentalists . Despite local and environmental concerns , RIDOT still considers the Route 4 upgrade to be the safest way to improve traffic flow in the region . While the Department of Transportation considered upgrading nearby Route 2 to freeway standards as a potential alternative , this plan was ultimately rejected because of its effects on wells in the area . Although the project was originally scheduled to be completed by 2007 , the $ 55 million project has been postponed indefinitely .
|
Formal statement is: lemma contour_integrable_continuous_circlepath: "continuous_on (path_image (circlepath z r)) f \<Longrightarrow> f contour_integrable_on (circlepath z r)" Informal statement is: If $f$ is continuous on the image of the circle path $z$ with radius $r$, then $f$ is contour integrable on the circle path $z$ with radius $r$. |
/-
Copyright (c) 2019 Yury Kudryashov. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yury Kudryashov
-/
import Mathlib.PrePort
import Mathlib.Lean3Lib.init.default
import Mathlib.category_theory.endomorphism
import Mathlib.category_theory.category.Cat
import Mathlib.algebra.category.Mon.basic
import Mathlib.PostPort
universes u v w u_1
namespace Mathlib
/-!
# Single-object category
Single object category with a given monoid of endomorphisms. It is defined to facilitate transfering
some definitions and lemmas (e.g., conjugacy etc.) from category theory to monoids and groups.
## Main definitions
Given a type `α` with a monoid structure, `single_obj α` is `unit` type with `category` structure
such that `End (single_obj α).star` is the monoid `α`. This can be extended to a functor `Mon ⥤
Cat`.
If `α` is a group, then `single_obj α` is a groupoid.
An element `x : α` can be reinterpreted as an element of `End (single_obj.star α)` using
`single_obj.to_End`.
## Implementation notes
- `category_struct.comp` on `End (single_obj.star α)` is `flip (*)`, not `(*)`. This way
multiplication on `End` agrees with the multiplication on `α`.
- By default, Lean puts instances into `category_theory` namespace instead of
`category_theory.single_obj`, so we give all names explicitly.
-/
namespace category_theory
/-- Type tag on `unit` used to define single-object categories and groupoids. -/
def single_obj (α : Type u) :=
Unit
namespace single_obj
/-- One and `flip (*)` become `id` and `comp` for morphisms of the single object category. -/
protected instance category_struct (α : Type u) [HasOne α] [Mul α] : category_struct (single_obj α) :=
category_struct.mk (fun (_x : single_obj α) => 1)
fun (_x _x_1 _x_2 : single_obj α) (x : _x ⟶ _x_1) (y : _x_1 ⟶ _x_2) => y * x
/-- Monoid laws become category laws for the single object category. -/
protected instance category (α : Type u) [monoid α] : category (single_obj α) :=
category.mk
/--
Groupoid structure on `single_obj α`.
See https://stacks.math.columbia.edu/tag/0019.
-/
protected instance groupoid (α : Type u) [group α] : groupoid (single_obj α) :=
groupoid.mk fun (_x _x_1 : single_obj α) (x : _x ⟶ _x_1) => x⁻¹
/-- The single object in `single_obj α`. -/
protected def star (α : Type u) : single_obj α :=
Unit.unit
/-- The endomorphisms monoid of the only object in `single_obj α` is equivalent to the original
monoid α. -/
def to_End (α : Type u) [monoid α] : α ≃* End (single_obj.star α) :=
mul_equiv.mk (equiv.to_fun (equiv.refl α)) (equiv.inv_fun (equiv.refl α)) sorry sorry sorry
theorem to_End_def (α : Type u) [monoid α] (x : α) : coe_fn (to_End α) x = x :=
rfl
/-- There is a 1-1 correspondence between monoid homomorphisms `α → β` and functors between the
corresponding single-object categories. It means that `single_obj` is a fully faithful
functor.
See https://stacks.math.columbia.edu/tag/001F --
although we do not characterize when the functor is full or faithful.
-/
def map_hom (α : Type u) (β : Type v) [monoid α] [monoid β] : (α →* β) ≃ single_obj α ⥤ single_obj β :=
equiv.mk (fun (f : α →* β) => functor.mk id fun (_x _x : single_obj α) => ⇑f)
(fun (f : single_obj α ⥤ single_obj β) => monoid_hom.mk (functor.map f) sorry sorry) sorry sorry
theorem map_hom_id (α : Type u) [monoid α] : coe_fn (map_hom α α) (monoid_hom.id α) = 𝟭 :=
rfl
theorem map_hom_comp {α : Type u} {β : Type v} [monoid α] [monoid β] (f : α →* β) {γ : Type w} [monoid γ] (g : β →* γ) : coe_fn (map_hom α γ) (monoid_hom.comp g f) = coe_fn (map_hom α β) f ⋙ coe_fn (map_hom β γ) g :=
rfl
end single_obj
end category_theory
namespace monoid_hom
/-- Reinterpret a monoid homomorphism `f : α → β` as a functor `(single_obj α) ⥤ (single_obj β)`.
See also `category_theory.single_obj.map_hom` for an equivalence between these types. -/
def to_functor {α : Type u} {β : Type v} [monoid α] [monoid β] (f : α →* β) : category_theory.single_obj α ⥤ category_theory.single_obj β :=
coe_fn (category_theory.single_obj.map_hom α β) f
@[simp] theorem id_to_functor (α : Type u) [monoid α] : to_functor (id α) = 𝟭 :=
rfl
@[simp] theorem comp_to_functor {α : Type u} {β : Type v} [monoid α] [monoid β] (f : α →* β) {γ : Type w} [monoid γ] (g : β →* γ) : to_functor (comp g f) = to_functor f ⋙ to_functor g :=
rfl
end monoid_hom
namespace units
/--
The units in a monoid are (multiplicatively) equivalent to
the automorphisms of `star` when we think of the monoid as a single-object category. -/
def to_Aut (α : Type u) [monoid α] : units α ≃* category_theory.Aut (category_theory.single_obj.star α) :=
mul_equiv.trans (map_equiv (category_theory.single_obj.to_End α))
(category_theory.Aut.units_End_equiv_Aut (category_theory.single_obj.star α))
@[simp] theorem to_Aut_hom (α : Type u) [monoid α] (x : units α) : category_theory.iso.hom (coe_fn (to_Aut α) x) = coe_fn (category_theory.single_obj.to_End α) ↑x :=
rfl
@[simp] theorem to_Aut_inv (α : Type u) [monoid α] (x : units α) : category_theory.iso.inv (coe_fn (to_Aut α) x) = coe_fn (category_theory.single_obj.to_End α) ↑(x⁻¹) :=
rfl
end units
namespace Mon
/-- The fully faithful functor from `Mon` to `Cat`. -/
def to_Cat : Mon ⥤ category_theory.Cat :=
category_theory.functor.mk (fun (x : Mon) => category_theory.Cat.of (category_theory.single_obj ↥x))
fun (x y : Mon) (f : x ⟶ y) => coe_fn (category_theory.single_obj.map_hom ↥x ↥y) f
protected instance to_Cat_full : category_theory.full to_Cat :=
category_theory.full.mk fun (x y : Mon) => equiv.inv_fun (category_theory.single_obj.map_hom ↥x ↥y)
protected instance to_Cat_faithful : category_theory.faithful to_Cat :=
category_theory.faithful.mk
|
library(dslabs)
data('murders')
# 1. We made a plot of total murders versus population and noted a strong relationship. Not surprisingly, states with larger populations had more murders. Transform the variables using the log10 transformation and then plot them.
pop_in_millions <- murders$population / 10^6
total <- murders$total
plot(log10(pop_in_millions), log10(total))
# 2. Create a histogram of the state populations.
x <- with(murders, total / population * 10^6)
hist(x)
# 3. Generate boxplots of the state populations by region.
murders$rate <- with(murders, total / population * 100000)
boxplot(rate~region, data = murders)
|
lemma [trans]: "X = Y \<Longrightarrow> Y \<longlonglongrightarrow> z \<Longrightarrow> X \<longlonglongrightarrow> z" |
(*
Author: Wenda Li <[email protected] / [email protected]>
*)
section \<open>Some examples of applying the method winding\_eval\<close>
theory Winding_Number_Eval_Examples imports Winding_Number_Eval
begin
lemma example1:
assumes "R>1"
shows "winding_number (part_circlepath 0 R 0 pi +++ linepath (-R) R) \<i> = 1"
proof (eval_winding,simp_all)
define CR where "CR \<equiv>part_circlepath 0 R 0 pi"
define L where "L\<equiv> linepath (- (complex_of_real R)) R"
show "\<i> \<notin> path_image CR" unfolding CR_def using \<open>R>1\<close>
by (intro not_on_circlepathI,auto)
show *:"\<i> \<notin> closed_segment (- (of_real R)) R" using \<open>R>1\<close> complex_eq_iff
by (intro not_on_closed_segmentI,auto)
from cindex_pathE_linepath[OF this] have "cindex_pathE L \<i> = -1"
unfolding L_def using \<open>R>1\<close> by auto
moreover have "cindex_pathE CR \<i> = -1"
unfolding CR_def using \<open>R>1\<close>
apply (subst cindex_pathE_part_circlepath)
by (simp_all add:jumpF_pathstart_part_circlepath jumpF_pathfinish_part_circlepath)
ultimately show "- complex_of_real (cindex_pathE CR \<i>) - cindex_pathE L \<i> = 2"
unfolding L_def CR_def by auto
qed
lemma example2:
assumes "R>1"
shows "winding_number (part_circlepath 0 R 0 pi +++ linepath (-R) R) (-\<i>) = 0"
proof (eval_winding,simp_all)
define CR where "CR \<equiv>part_circlepath 0 R 0 pi"
define L where "L\<equiv> linepath (- (complex_of_real R)) R"
show "-\<i> \<notin> path_image CR" unfolding CR_def using \<open>R>1\<close>
by (intro not_on_circlepathI,auto)
show *:"-\<i> \<notin> closed_segment (- (of_real R)) R" using \<open>R>1\<close> complex_eq_iff
by (intro not_on_closed_segmentI,auto)
from cindex_pathE_linepath[OF this] have "cindex_pathE L (-\<i>) = 1"
unfolding L_def using \<open>R>1\<close> by auto
moreover have "cindex_pathE CR (-\<i>) = -1"
unfolding CR_def using \<open>R>1\<close>
apply (subst cindex_pathE_part_circlepath)
by (simp_all add:jumpF_pathstart_part_circlepath jumpF_pathfinish_part_circlepath)
ultimately show "-cindex_pathE CR (-\<i>) = cindex_pathE L (-\<i>)"
unfolding L_def CR_def by auto
qed
lemma example3:
fixes lb ub z :: complex
defines "rec \<equiv> linepath lb (Complex (Re ub) (Im lb)) +++ linepath (Complex (Re ub) (Im lb)) ub
+++ linepath ub (Complex (Re lb) (Im ub)) +++ linepath (Complex (Re lb) (Im ub)) lb"
assumes order_asms:"Re lb < Re z" "Re z < Re ub" "Im lb < Im z" "Im z < Im ub"
shows "winding_number rec z = 1"
unfolding rec_def
proof (eval_winding)
let ?l1 = "linepath lb (Complex (Re ub) (Im lb))"
and ?l2 = "linepath (Complex (Re ub) (Im lb)) ub"
and ?l3 = "linepath ub (Complex (Re lb) (Im ub))"
and ?l4 = "linepath (Complex (Re lb) (Im ub)) lb"
show l1: "z \<notin> path_image ?l1"
apply (auto intro!: not_on_closed_segmentI_complex)
using order_asms by (simp add: algebra_simps crossproduct_eq)
show l2:"z \<notin> path_image ?l2"
apply (auto intro!: not_on_closed_segmentI_complex)
using order_asms by (simp add: algebra_simps crossproduct_eq)
show l3:"z \<notin> path_image ?l3"
apply (auto intro!: not_on_closed_segmentI_complex)
using order_asms by (simp add: algebra_simps crossproduct_eq)
show l4:"z \<notin> path_image ?l4"
apply (auto intro!: not_on_closed_segmentI_complex)
using order_asms by (simp add: algebra_simps crossproduct_eq)
show "- complex_of_real (cindex_pathE ?l1 z + (cindex_pathE ?l2 z + (cindex_pathE ?l3 z +
cindex_pathE ?l4 z))) = 2 * 1"
proof -
have "(Im z - Im ub) * (Re ub - Re lb) < 0"
using mult_less_0_iff order_asms(1) order_asms(2) order_asms(4) by fastforce
then have "cindex_pathE ?l3 z = -1"
apply (subst cindex_pathE_linepath)
using l3 order_asms by (auto simp add:algebra_simps)
moreover have "(Im lb - Im z) * (Re ub - Re lb) <0"
using mult_less_0_iff order_asms(1) order_asms(2) order_asms(3) by fastforce
then have "cindex_pathE ?l1 z = -1"
apply (subst cindex_pathE_linepath)
using l1 order_asms by (auto simp add:algebra_simps)
moreover have "cindex_pathE ?l2 z = 0"
apply (subst cindex_pathE_linepath)
using l2 order_asms by (auto simp add:algebra_simps)
moreover have "cindex_pathE ?l4 z = 0"
apply (subst cindex_pathE_linepath)
using l4 order_asms by (auto simp add:algebra_simps)
ultimately show ?thesis by auto
qed
qed
end
|
State Before: J : Type v
inst✝ : SmallCategory J
ι : Type v
α : ι → TopCatMax
i : ι
⊢ (piIsoPi α).inv ≫ Pi.π α i = piπ α i State After: no goals Tactic: simp [piIsoPi] |
-- {-# OPTIONS -v tc.meta:40 #-}
-- {-# OPTIONS --verbose tc.conv.term:40 #-}
-- {-# OPTIONS --verbose tc.conv.level:40 #-}
-- {-# OPTIONS --verbose tc.conv.atom:50 #-}
-- {-# OPTIONS --verbose tc.conv.elim:50 #-}
module Issue680-NeutralLevels where
open import Common.Level
postulate
N : Set
A : N → Set
level : N → Level
lac : ∀ {n} → A n → N
I : Level → Level → Set
refl : ∀ {l : Level} → I l l
data Test : Set where
mkTest : (n : N) → (tel : A n) → Test
test : Test → N
test (mkTest n tel) = n
where
test′ : I (lsuc (level (lac tel)))
(lsuc (level (lac tel)))
test′ = refl
|
/- Introduces matrices and horrifying linear algebra. -/
import qtt.context
open context
open debrujin_idx
/- This matrix defines the context in which every variable (given by a debrujin idx)
exists. In other words, it defines a substitution. Variables all exist in the same
precontext, but the type multiplicities might differ. -/
def matrix (τ mult: Type) (γ δ: precontext τ)
: Type := Π (T: τ), γ ∋ T → @context τ mult δ
namespace matrix
variable {τ: Type}
variables {mult: Type} [semiring mult]
def identity: Π γ, @matrix τ mult γ γ
| (T::δ) _ ZVar := ⟦1⬝T⟧::0
| (T::δ) U (SVar x) := ⟦0⬝T⟧::(identity δ U x)
@[unfold_] lemma identity_zvar {T: τ} {γ: precontext τ}
: identity (T::γ) T ZVar = ⟦(1: mult)⬝T⟧::(0: context γ) := by refl
@[unfold_] lemma identity_svar {T U: τ} {δ: precontext τ} {x}
: identity (T::δ) U (SVar x) = ⟦(0: mult)⬝T⟧::(identity δ U x) := by refl
def vmul: ∀ {γ δ}, @context τ mult γ → @matrix τ mult γ δ → @context τ mult δ
| _ δ context.nil _ := 0
| _ _ (⟦π⬝T⟧::Γ) Ξ := (π • Ξ T ZVar) + (vmul Γ (λ U x, Ξ U (SVar x)))
infix ` ⊛ `:70 := vmul
namespace vmul
@[simp] lemma vmul_nil {δ} {Ξ: @matrix τ mult [] δ}
: context.nil ⊛ Ξ = 0 := by refl
@[unfold_] lemma vmul_cons {γ δ} {T: τ} {Γ: context γ} {π: mult} {Ξ: @matrix τ mult (T::γ) δ}
: ⟦π⬝T⟧::Γ ⊛ Ξ = (π • Ξ T ZVar) + (Γ ⊛ (λ U x, Ξ U (SVar x))) :=
by refl
@[simp] lemma zero_vmul
: ∀ {γ δ: precontext τ} (M: @matrix τ mult γ δ),
0 ⊛ M = 0 :=
begin
intros,
induction γ with T γ ih,
{ refl },
{ unfold has_zero.zero zeros,
simp * with unfold_,
show (0: mult) • M T ZVar + 0 ⊛ (λ (U: τ) (x: γ ∋ U), M U (SVar x)) = 0,
simp * },
end
@[simp] lemma vmul_ext_zero
: ∀ {γ δ} {Γ: context γ} {Ξ: @matrix τ mult γ δ} {T: τ},
Γ ⊛ (λ U x, ⟦0⬝T⟧::(Ξ U x)) = ⟦0⬝T⟧::(Γ ⊛ Ξ) :=
begin
intros,
induction Γ with γ' π T' Γ ih,
{ refl },
{ simp * with unfold_ },
end
@[simp] lemma one_vmul
: ∀ {γ δ} {Ξ: @matrix τ mult γ δ} {T: τ}
(x: γ ∋ T),
-------------------------------
(identity γ T x) ⊛ Ξ = Ξ T x :=
begin
intros,
induction x with γ' T' δ' T'' U x' ih;
{ simp * with unfold_ },
end
@[simp] lemma vmul_one
: ∀ {γ} (Γ: @context τ mult γ),
Γ ⊛ (identity γ) = Γ :=
begin
intros,
induction Γ with γ π T Γ ih,
{ refl },
{ simp [*, vmul_ext_zero] with unfold_ },
end
lemma smul_vmul
: ∀ {γ δ} {Γ: @context τ mult γ} {Ξ: @matrix τ mult γ δ} {π: mult},
(π • Γ) ⊛ Ξ = π • (Γ ⊛ Ξ) :=
begin
intros,
induction Γ with γ' π' T Γ ih;
{ simp [*, context.mul_smul]
with unfold_ sop_form },
end
@[sop_form] lemma vmul_right_distrib
: ∀ {γ δ} {Γ₁ Γ₂: @context τ mult γ} {Ξ: @matrix τ mult γ δ},
(Γ₁ + Γ₂) ⊛ Ξ = (Γ₁ ⊛ Ξ) + (Γ₂ ⊛ Ξ) :=
begin
intros,
induction Γ₁ with γ₁ π₁ T₁ Γ₁ ih₁;
{ cases Γ₂,
simp * with unfold_ sop_form },
end
end vmul
end matrix
|
-- @@stderr --
/dev/stdin:17: error: operator "defined" requires an identifier
/dev/stdin:17: error: unterminated #if
dtrace: failed to compile script test/unittest/preprocessor/err.ifdefincomp.d: Preprocessor failed to process input program
|
Redirect "/var/folders/lm/cpf87_lx21n9bgnl4kr72rjm0000gn/T/coqBYq1cI"
Test Search Output Name Only.
Timeout 1 Print Grammar tactic.
Add Search Blacklist "Private_" "_subproof".
Set Printing Depth 50.
Remove Search Blacklist "Private_" "_subproof".
Add Search Blacklist "Private_" "_subproof".
Timeout 1 Print LoadPath.
Redirect "/var/folders/lm/cpf87_lx21n9bgnl4kr72rjm0000gn/T/coq5lrxpQ" Print Ltac Signatures.
Timeout 1 Print Grammar tactic.
From Coq Require Import NArith List Streams.
From ReductionEffect Require Import PrintingEffect.
CoFixpoint fib (x y : N) : Stream N := Cons y (fib y (x + y)).
Eval compute in Str_nth 10 (map print_id (fib 0 1)).
Redirect "/var/folders/lm/cpf87_lx21n9bgnl4kr72rjm0000gn/T/coqcRXKe3" Print Ltac Signatures.
Timeout 1 Print Grammar tactic.
Timeout 1 Print LoadPath.
Import ListNotations.
Redirect "/var/folders/lm/cpf87_lx21n9bgnl4kr72rjm0000gn/T/coqinpWed" Print Ltac Signatures.
Timeout 1 Print Grammar tactic.
Timeout 1 Print LoadPath.
Anomaly ""Assert_failure printing/ppconstr.ml:399:14"." Please report at http://coq.inria.fr/bugs/.
Redirect "/var/folders/lm/cpf87_lx21n9bgnl4kr72rjm0000gn/T/coqk3FY68" Print Ltac Signatures.
Timeout 1 Print Grammar tactic.
Eval compute in List.map print_id (fib' 10 0 1).
Eval compute in (fun f x => f (f (f x))) (fun x => S (print_id x)) 0.
Eval cbn in (fun f x => f (f (f x))) print_id 0.
Eval hnf in (fun f x => f (f (f x))) print_id 0.
Eval simpl in (fun f x => f (f (f x))) (fun x => print_id (1 + x) + 1) 0.
Anomaly ""Assert_failure printing/ppconstr.ml:399:14"." Please report at http://coq.inria.fr/bugs/.
(* Auto-generated comment: Succeeded. *)
|
\chapter{Like White on Rice}
\label{ch:rice}
\lipsum[1-2]
\autoref{tab:table} shows an example table.
% From https://tex.stackexchange.com/questions/238503/tip-on-how-to-make-a-visually-good-table
% Also handles text overflow
\begin{table}[htbp]
\centering
\begin{tabularx}{0.9\textwidth}{L C C C} % left-align 1st column, centre-align others. See cls file for definitions of L and C
\toprule
& \multicolumn{3}{c}{methods tested} \\
\cmidrule(lr){2-4}
Method & Heat wheel & Exhaust air & Outdoor air \\
\midrule
First method & & Y & \\
Amazingly done & & & Y \\
Superficial pass & & Y & Y \\
Chemical bath & Y & & \\
Yang et al. & Y & Y & \\
Brown & Y & & Y \\
Industry standard & Y & Y & Y \\
\bottomrule
\end{tabularx}
\caption{Example of a table}
\label{tab:table}
\end{table}
\section{Preliminaries}
\lipsum[5]
\section{Methodology}
\lipsum[4-5]
\section{Summary}
\lipsum[6]
|
\subsection{The Elk River problem}
The Elk River is a situated in central West Virginia, USA. Elk River watershed is a relatively big but low populated area with Charleston being the only big city. It is characterized by vast natural resources and less developed industry and infrastructure. In 2014, one of the biggest employers in the area was involved in a major ecological incident, namely a chemical spill, which led to closure of the plant, leaving many people jobless and making impact on the local flora and fauna. Since then, the area has not been able to resume economic development, but still it attracted a lot of tourists, especially fishers who like to enjoy many fishing spots that the river offers. After analyzing the current situation in the area and all groups of people living and visiting the area, it becomes evident that one of the things that is most probable to succeed in improving the well-being of all groups could be improving the tourist offering. The goal of this document is to further explain the platform using user-centered approach.
\subsection{Stakeholders}
Stakeholders are the locals, whose economy is struggling since the 2014 spills. Many of them will be able to establish a job position with a stable income thanks to our service, with the possibility of working part time as a second job. Our platform will also boost the economy of the area by attracting tourists, so many activities already in place will indirectly benefit from the service, and others will born.
\subsection{Definitions and acronyms}
What follows is the list of all the main definitions and acronyms used in the document.
\subsubsection{Definitions}
\begin{itemize}
\item \textbf{Reservation}: data referring to the wish of the user to have the specified service at that time for himself.
\item \textbf{System}: All the software needed to deliver every functionality needed.
\end{itemize}
\subsubsection{Acronyms}
\begin{itemize}
\item \textbf{BPMN}: Business Process Model and Notation
\item \textbf{SDK}: Software Development Kit
\item \textbf{API}: Application Programming Interface
\item \textbf{DB}: Database
\item \textbf{DBMS}: Database Management System
\item \textbf{UID}: Unique Identifier
\item \textbf{URL}: Uniform Resource Locator
\item \textbf{UI}: User Interface
\end{itemize} |
`is_element/Xi/generic` := (gen_name,el_test) -> (A::set) -> (TT) -> proc(m)
global reason;
local pn,TT1,C,T;
pn := cat("is_element/Phi/",gen_name);
TT1 := `big_sets/trees`(A)(TT);
if not is_table_on(TT1,m) then
reason := [pn,"m is not a table on TT1",m,TT1];
return false;
fi;
C := children_map(A)(TT);
for T in TT1 do
if not(el_test(C[T])(m[T])) then
reason := [pn,"m[T] is not in M(C[T])",m[T],T,C[T]];
return false;
fi;
od;
return true;
end:
# If E is a strongly reduced operad, then there are natural maps
# \theta : (\Xi E)(TT) -> (\Xi E)(UU) whenever TT and UU are full trees
# on the same set A with UU contained in TT.
`theta/Xi/generic` := (gen_name,eta,gamma) -> (A::set) -> (TT,UU) -> proc(x)
local VV,CV,CT,X,Y,Z,T,P,Q,p,y,w;
if TT = UU then return x; fi;
T := (TT minus UU)[1];
VV := TT minus {T};
P := parent_map(A)(TT)[T];
CT := children_map(A)(TT);
CV := children_map(A)(VV);
Y := CT[T];
X := CV[P];
Z := CT[P];
p := table();
for Q in X do
p[Q] := `if`(member(Q,Y),T,Q);
od;
y := table();
y[T] := x[T];
for Q in X minus Y do
y[Q] := eta({Q});
od;
w := gamma(X,Z)(p)(x[P],y);
return `theta/Xi/generic`(gen_name,eta,gamma)(A)(VV,UU)(w);
end: |
State Before: α : Type u_1
N : α → Type u_2
inst✝² : DecidableEq α
inst✝¹ : (a : α) → DecidableEq (N a)
inst✝ : (a : α) → Zero (N a)
f g : Π₀ (a : α), N a
⊢ neLocus f g = neLocus g f State After: no goals Tactic: simp_rw [neLocus, Finset.union_comm, ne_comm] |
# In this file, we setup the `gen_download_cmd()`, `gen_unpack_cmd()` and
# `gen_package_cmd()` functions by providing methods to probe the environment
# and determine the most appropriate platform binaries to call.
export gen_download_cmd, gen_unpack_cmd, gen_package_cmd, gen_list_tarball_cmd,
parse_tarball_listing, gen_sh_cmd, parse_7z_list, parse_tar_list,
download_verify_unpack, download_verify, unpack
"""
gen_download_cmd(url::AbstractString, out_path::AbstractString)
Return a `Cmd` that will download resource located at `url` and store it at
the location given by `out_path`.
This method is initialized by `probe_platform_engines()`, which should be
automatically called upon first import of `BinaryProvider`.
"""
gen_download_cmd = (url::AbstractString, out_path::AbstractString) ->
error("Call `probe_platform_engines()` before `gen_download_cmd()`")
"""
gen_unpack_cmd(tarball_path::AbstractString, out_path::AbstractString; excludelist::Union{AbstractString, Nothing} = nothing)
Return a `Cmd` that will unpack the given `tarball_path` into the given
`out_path`. If `out_path` is not already a directory, it will be created.
excludlist is an optional file which contains a list of files that is not unpacked
This option is mainyl used to exclude symlinks from extraction (see: `copyderef`)
This method is initialized by `probe_platform_engines()`, which should be
automatically called upon first import of `BinaryProvider`.
"""
gen_unpack_cmd = (tarball_path::AbstractString, out_path::AbstractString; excludelist::Union{AbstractString, Nothing} = nothing) ->
error("Call `probe_platform_engines()` before `gen_unpack_cmd()`")
"""
gen_package_cmd(in_path::AbstractString, tarball_path::AbstractString)
Return a `Cmd` that will package up the given `in_path` directory into a
tarball located at `tarball_path`.
This method is initialized by `probe_platform_engines()`, which should be
automatically called upon first import of `BinaryProvider`.
"""
gen_package_cmd = (in_path::AbstractString, tarball_path::AbstractString) ->
error("Call `probe_platform_engines()` before `gen_package_cmd()`")
"""
gen_list_tarball_cmd(tarball_path::AbstractString)
Return a `Cmd` that will list the files contained within the tarball located at
`tarball_path`. The list will not include directories contained within the
tarball.
This method is initialized by `probe_platform_engines()`, which should be
automatically called upon first import of `BinaryProvider`.
"""
gen_list_tarball_cmd = (tarball_path::AbstractString) ->
error("Call `probe_platform_engines()` before `gen_list_tarball_cmd()`")
"""
parse_tarball_listing(output::AbstractString)
Parses the result of `gen_list_tarball_cmd()` into something useful.
This method is initialized by `probe_platform_engines()`, which should be
automatically called upon first import of `BinaryProvider`.
"""
parse_tarball_listing = (output::AbstractString) ->
error("Call `probe_platform_engines()` before `parse_tarball_listing()`")
"""
gen_sh_cmd(cmd::Cmd)
Runs a command using `sh`. On Unices, this will default to the first `sh`
found on the `PATH`, however on Windows if that is not found it will fall back
to the `sh` provided by the `busybox.exe` shipped with Julia.
This method is initialized by `probe_platform_engines()`, which should be
automatically called upon first import of `BinaryProvider`.
"""
gen_sh_cmd = (cmd::Cmd) ->
error("Call `probe_platform_engines()` before `gen_sh_cmd()`")
"""
probe_cmd(cmd::Cmd; verbose::Bool = false)
Returns `true` if the given command executes successfully, `false` otherwise.
"""
function probe_cmd(cmd::Cmd; verbose::Bool = false)
if verbose
@info("Probing $(cmd.exec[1]) as a possibility...")
end
try
success(cmd)
if verbose
@info(" Probe successful for $(cmd.exec[1])")
end
return true
catch
return false
end
end
"""
probe_symlink_creation(dest::AbstractString)
Probes whether we can create a symlink within the given destination directory,
to determine whether a particular filesystem is "symlink-unfriendly".
"""
function probe_symlink_creation(dest::AbstractString)
while !isdir(dest)
dest = dirname(dest)
end
# Build arbitrary (non-existent) file path name
link_path = joinpath(dest, "binaryprovider_symlink_test")
while ispath(link_path)
link_path *= "1"
end
loglevel = Logging.min_enabled_level(current_logger())
try
disable_logging(Logging.Warn)
symlink("foo", link_path)
return true
catch e
if isa(e, Base.IOError)
return false
end
rethrow(e)
finally
disable_logging(loglevel-1)
rm(link_path; force=true)
end
end
"""
probe_platform_engines!(;verbose::Bool = false)
Searches the environment for various tools needed to download, unpack, and
package up binaries. Searches for a download engine to be used by
`gen_download_cmd()` and a compression engine to be used by `gen_unpack_cmd()`,
`gen_package_cmd()`, `gen_list_tarball_cmd()` and `parse_tarball_listing()`, as
well as a `sh` execution engine for `gen_sh_cmd()`. Running this function
will set the global functions to their appropriate implementations given the
environment this package is running on.
This probing function will automatically search for download engines using a
particular ordering; if you wish to override this ordering and use one over all
others, set the `BINARYPROVIDER_DOWNLOAD_ENGINE` environment variable to its
name, and it will be the only engine searched for. For example, put:
ENV["BINARYPROVIDER_DOWNLOAD_ENGINE"] = "fetch"
within your `~/.juliarc.jl` file to force `fetch` to be used over `curl`. If
the given override does not match any of the download engines known to this
function, a warning will be printed and the typical ordering will be performed.
Similarly, if you wish to override the compression engine used, set the
`BINARYPROVIDER_COMPRESSION_ENGINE` environment variable to its name (e.g. `7z`
or `tar`) and it will be the only engine searched for. If the given override
does not match any of the compression engines known to this function, a warning
will be printed and the typical searching will be performed.
If `verbose` is `true`, print out the various engines as they are searched.
"""
probe_platform_engines!(;verbose::Bool = false) = nothing
"""
parse_7z_list(output::AbstractString)
Given the output of `7z l`, parse out the listed filenames. This funciton used
by `list_tarball_files`.
"""
function parse_7z_list(output::AbstractString)
lines = [chomp(l) for l in split(output, "\n")]
# If we didn't get anything, complain immediately
if isempty(lines)
return []
end
# Remove extraneous "\r" for windows platforms
for idx in 1:length(lines)
if endswith(lines[idx], '\r')
lines[idx] = lines[idx][1:end-1]
end
end
# Find index of " Name". Have to `collect()` as `findfirst()` doesn't work with
# generators: https://github.com/JuliaLang/julia/issues/16884
header_row = findfirst(collect(occursin(" Name", l) && occursin(" Attr", l) for l in lines))
name_idx = findfirst("Name", lines[header_row])[1]
attr_idx = findfirst("Attr", lines[header_row])[1] - 1
# Filter out only the names of files, ignoring directories
lines = [l[name_idx:end] for l in lines if length(l) > name_idx && l[attr_idx] != 'D']
if isempty(lines)
return []
end
# Extract within the bounding lines of ------------
bounds = [i for i in 1:length(lines) if all([c for c in lines[i]] .== Ref('-'))]
lines = lines[bounds[1]+1:bounds[2]-1]
# Eliminate `./` prefix, if it exists
for idx in 1:length(lines)
if startswith(lines[idx], "./") || startswith(lines[idx], ".\\")
lines[idx] = lines[idx][3:end]
end
end
return lines
end
"""
parse_tar_list(output::AbstractString)
Given the output of `tar -t`, parse out the listed filenames. This funciton
used by `list_tarball_files`.
"""
function parse_tar_list(output::AbstractString)
lines = [chomp(l) for l in split(output, "\n")]
for idx in 1:length(lines)
if endswith(lines[idx], '\r')
lines[idx] = lines[idx][1:end-1]
end
end
# Drop empty lines and and directories
lines = [l for l in lines if !isempty(l) && !endswith(l, '/')]
# Eliminate `./` prefix, if it exists
for idx in 1:length(lines)
if startswith(lines[idx], "./") || startswith(lines[idx], ".\\")
lines[idx] = lines[idx][3:end]
end
end
# make sure paths are always returned in the system's default way
return Sys.iswindows() ? replace.(lines, ['/' => '\\']) : lines
end
"""
download(url::AbstractString, dest::AbstractString;
verbose::Bool = false)
Download file located at `url`, store it at `dest`, continuing if `dest`
already exists and the server and download engine support it.
"""
function download(url::AbstractString, dest::AbstractString;
verbose::Bool = false)
download_cmd = gen_download_cmd(url, dest)
if verbose
@info("Downloading $(url) to $(dest)...")
end
oc = OutputCollector(download_cmd; verbose=verbose)
try
if !wait(oc)
error()
end
catch e
if isa(e, InterruptException)
rethrow()
end
error("Could not download $(url) to $(dest):\n$(e)")
end
end
"""
download_verify(url::AbstractString, hash::AbstractString,
dest::AbstractString; verbose::Bool = false,
force::Bool = false, quiet_download::Bool = false)
Download file located at `url`, verify it matches the given `hash`, and throw
an error if anything goes wrong. If `dest` already exists, just verify it. If
`force` is set to `true`, overwrite the given file if it exists but does not
match the given `hash`.
This method returns `true` if the file was downloaded successfully, `false`
if an existing file was removed due to the use of `force`, and throws an error
if `force` is not set and the already-existent file fails verification, or if
`force` is set, verification fails, and then verification fails again after
redownloading the file.
If `quiet_download` is set to `false` (the default), this method will print to
stdout when downloading a new file. If it is set to `true` (and `verbose` is
set to `false`) the downloading process will be completely silent. If
`verbose` is set to `true`, messages about integrity verification will be
printed in addition to messages regarding downloading.
"""
function download_verify(url::AbstractString, hash::AbstractString,
dest::AbstractString; verbose::Bool = false,
force::Bool = false, quiet_download::Bool = false)
# Whether the file existed in the first place
file_existed = false
if isfile(dest)
file_existed = true
if verbose
info_onchange(
"Destination file $(dest) already exists, verifying...",
"download_verify_$(dest)",
@__LINE__,
)
end
# verify download, if it passes, return happy. If it fails, (and
# `force` is `true`, re-download!)
try
verify(dest, hash; verbose=verbose)
return true
catch e
if isa(e, InterruptException)
rethrow()
end
if !force
rethrow()
end
if verbose
info_onchange(
"Verification failed, re-downloading...",
"download_verify_$(dest)",
@__LINE__,
)
end
end
end
# Make sure the containing folder exists
mkpath(dirname(dest))
try
# Download the file, optionally continuing
download(url, dest; verbose=verbose || !quiet_download)
verify(dest, hash; verbose=verbose)
catch e
if isa(e, InterruptException)
rethrow()
end
# If the file already existed, it's possible the initially downloaded chunk
# was bad. If verification fails after downloading, auto-delete the file
# and start over from scratch.
if file_existed
if verbose
@info("Continued download didn't work, restarting from scratch")
end
rm(dest; force=true)
# Download and verify from scratch
download(url, dest; verbose=verbose || !quiet_download)
verify(dest, hash; verbose=verbose)
else
# If it didn't verify properly and we didn't resume, something is
# very wrong and we must complain mightily.
rethrow()
end
end
# If the file previously existed, this means we removed it (due to `force`)
# and redownloaded, so return `false`. If it didn't exist, then this means
# that we successfully downloaded it, so return `true`.
return !file_existed
end
"""
package(src_dir::AbstractString, tarball_path::AbstractString;
verbose::Bool = false)
Compress `src_dir` into a tarball located at `tarball_path`.
"""
function package(src_dir::AbstractString, tarball_path::AbstractString;
verbose::Bool = false)
# For now, use environment variables to set the gzip compression factor to
# level 9, eventually there will be new enough versions of tar everywhere
# to use -I 'gzip -9', or even to switch over to .xz files.
withenv("GZIP" => "-9") do
oc = OutputCollector(gen_package_cmd(src_dir, tarball_path); verbose=verbose)
try
if !wait(oc)
error()
end
catch e
if isa(e, InterruptException)
rethrow()
end
error("Could not package $(src_dir) into $(tarball_path)")
end
end
end
"""
unpack(tarball_path::AbstractString, dest::AbstractString;
verbose::Bool = false)
Unpack tarball located at file `tarball_path` into directory `dest`.
"""
function unpack(tarball_path::AbstractString, dest::AbstractString;
verbose::Bool = false)
# unpack into dest
mkpath(dest)
# The user can force usage of our dereferencing workarounds for filesystems
# that don't support symlinks, but it is also autodetected.
copyderef = (get(ENV, "BINARYPROVIDER_COPYDEREF", "") == "true") || !probe_symlink_creation(dest)
# If we should "copyderef" what we do is to unpack everything except symlinks
# then copy the sources of the symlinks to the destination of the symlink instead.
# This is to work around filesystems that are mounted (such as SMBFS filesystems)
# that do not support symlinks.
excludelist = nothing
if copyderef
symlinks = list_tarball_symlinks(tarball_path)
if length(symlinks) > 0
(excludelist, io) = mktemp()
write(io, join([s[1] for s in symlinks], "\n"))
close(io)
end
end
oc = OutputCollector(gen_unpack_cmd(tarball_path, dest, excludelist); verbose=verbose)
try
if !wait(oc)
error()
end
catch e
if isa(e, InterruptException)
rethrow()
end
error("Could not unpack $(tarball_path) into $(dest)")
end
if copyderef && length(symlinks) > 0
@info("Replacing symlinks in tarball by their source files ...\n" * join(string.(symlinks),"\n"))
for s in symlinks
sourcefile = normpath(joinpath(dest, s[2]))
destfile = normpath(joinpath(dest, s[1]))
if isfile(sourcefile)
cp(sourcefile, destfile, force = true)
else
@warn("Symlink source '$sourcefile' does not exist!")
end
end
rm(excludelist; force = true)
end
end
"""
download_verify_unpack(url::AbstractString, hash::AbstractString,
dest::AbstractString; tarball_path = nothing,
verbose::Bool = false, ignore_existence::Bool = false,
force::Bool = false)
Helper method to download tarball located at `url`, verify it matches the
given `hash`, then unpack it into folder `dest`. In general, the method
`install()` should be used to download and install tarballs into a `Prefix`;
this method should only be used if the extra functionality of `install()` is
undesired.
If `tarball_path` is specified, the given `url` will be downloaded to
`tarball_path`, and it will not be removed after downloading and verification
is complete. If it is not specified, the tarball will be downloaded to a
temporary location, and removed after verification is complete.
If `force` is specified, a verification failure will cause `tarball_path` to be
deleted (if it exists), the `dest` folder to be removed (if it exists) and the
tarball to be redownloaded and reverified. If the verification check is failed
a second time, an exception is raised. If `force` is not specified, a
verification failure will result in an immediate raised exception.
If `ignore_existence` is set, the tarball is unpacked even if the destination
directory already exists.
Returns `true` if a tarball was actually unpacked, `false` if nothing was
changed in the destination prefix.
"""
function download_verify_unpack(url::AbstractString,
hash::AbstractString,
dest::AbstractString;
tarball_path = nothing,
ignore_existence::Bool = false,
force::Bool = false,
verbose::Bool = false)
# First, determine whether we should keep this tarball around
remove_tarball = false
if tarball_path === nothing
remove_tarball = true
function url_ext(url)
url = basename(url)
# Chop off urlparams
qidx = findfirst(isequal('?'), url)
if qidx !== nothing
url = url[1:qidx]
end
# Try to detect extension
dot_idx = findlast(isequal('.'), url)
if dot_idx === nothing
return nothing
end
return url[dot_idx+1:end]
end
# If extension of url contains a recognized extension, use it, otherwise use ".gz"
ext = url_ext(url)
if !(ext in ["tar", "gz", "tgz", "bz2", "xz"])
ext = "gz"
end
tarball_path = "$(tempname())-download.$(ext)"
end
# Download the tarball; if it already existed and we needed to remove it
# then we should remove the unpacked path as well
should_delete = !download_verify(url, hash, tarball_path;
force=force, verbose=verbose)
if should_delete
if verbose
@info("Removing dest directory $(dest) as source tarball changed")
end
rm(dest; recursive=true, force=true)
end
# If the destination path already exists, don't bother to unpack
if !ignore_existence && isdir(dest)
if verbose
@info("Destination directory $(dest) already exists, returning")
end
# Signify that we didn't do any unpacking
return false
end
try
if verbose
@info("Unpacking $(tarball_path) into $(dest)...")
end
unpack(tarball_path, dest; verbose=verbose)
finally
if remove_tarball
rm(tarball_path)
end
end
# Signify that we did some unpacking!
return true
end
|
\documentclass{article}[11pt]
\usepackage{amsmath}
\usepackage{amssymb}
\usepackage{verbatim}
\usepackage{epsf}
\usepackage{graphicx}
\usepackage{hyperref}
\def\colfigsize{\epsfxsize=5in}
\pdfpagewidth 8.5in
\pdfpageheight 11.0in
\setlength\topmargin{0in}
\setlength\evensidemargin{0in}
\setlength\oddsidemargin{0in}
\setlength\textheight{8.0in}
\setlength\textwidth{6.5in}
\title{Building a bootable guest image for Palacios and Kitten}
\begin{document}
\maketitle
\section{Getting the guest image build tools}
In order to build the bootable guest ISO image, we need to build a Linux kernel
from source and an initial ramdisk file system containing a set of useful
tools. We will use a new directory for demonstration; the root directory for the
following examples is ``\verb+test/+":
\begin{verbatim}
[jdoe@newskysaw ~]$ mkdir test/
\end{verbatim}
\noindent
There are a set of tools and sources that are useful for the guest image
building procedure. You can obtain these resources from our git repositories.
Change to the ``\verb+test/+" directory and clone the resources:
\begin{verbatim}
[jdoe@newskysaw test]$ git clone http://hornet.cs.northwestern.edu:9005/busybox
[jdoe@newskysaw test]$ git clone http://hornet.cs.northwestern.edu:9005/initrd
[jdoe@newskysaw test]$ git clone http://hornet.cs.northwestern.edu:9005/linux-2.6.30.y
\end{verbatim}
\section{Building the ramdisk filesystem}
The guest requires an initial ramdisk filesystem. Jack has made one that you can
leverage; it is temporarily located in his home directory. You will need sudo
or root access to create the device files when you unpack the archive:
\begin{verbatim}
[jdoe@newskysaw test]$ cp /home/jarusl/initrd/disks/v3vee_initramfs.tar.gz .
[jdoe@newskysaw test]$ sudo tar -C initrd -xzf v3vee_initramfs.tar.gz
\end{verbatim}
\noindent
If you require a custom initial ramdisk filesystem, change to the
``\verb|initrd/initramfs/|" directory and perform the following steps:
\begin{verbatim}
[jdoe@newskysaw initramfs]$ mkdir -p proc sys var/log
\end{verbatim}
\noindent
Edit the ``\verb|init_task|" script and uncomment these lines:
\begin{verbatim}
#mknod /dev/tty0 c 4 0
#mknod /dev/tty1 c 4 1
#mknod /dev/tty2 c 4 2
\end{verbatim}
\pagebreak
\noindent
Create the ``\verb|console|" device. If you have sudo or root access it is
possible to create this device manually:
\begin{verbatim}
[jdoe@newskysaw initramfs]$ sudo mknod dev/console c 5 1
[jdoe@newskysaw initramfs]$ sudo chmod 0600 dev/console
\end{verbatim}
\noindent
If you do not have sudo or root access it is still possible to create the
``\verb|console|" device indirectly through the kernel build. Change to the
``\verb|initrd/|" directory and create a file called ``\verb|root_files|". Add
the following line:
\begin{verbatim}
nod /dev/console 0600 0 0 c 5 1
\end{verbatim}
\noindent
The ``\verb|root_files|" file is used when building the Linux kernel in the
section Configuring and building the Linux kernel. Finally, create any
additional directories and copy any additional files that you need. Your initial
ramdisk filesystem is prepped and ready for installation of the BusyBox tools as
described in the section Configuring and installing BusyBox tools.
\pagebreak
\begin{figure}[ht]
\begin{center}
\colfigsize\epsffile{busyboxConf1.eps}
\caption{BusyBox configuration}
\label{fig:busyboxcf1}
\end{center}
\end{figure}
\begin{figure}[ht]
\begin{center}
\colfigsize\epsffile{busyboxConf2.eps}
\end{center}
\caption{BusyBox configuration}
\label{fig:busyboxcf2}
\end{figure}
\section{Configuring and installing BusyBox tools}
BusyBox is a software application released as Free software under the GNU GPL
that provides many standard Unix tools. BusyBox combines tiny versions of many
common UNIX utilities into a single, small executable. For more details on
BusyBox visit \url{http://busybox.net}. To configure BusyBox, in the
``\verb+busybox/+" directory, type the following:
\begin{verbatim}
[jdoe@newskysaw busybox]$ make menuconfig
\end{verbatim}
\noindent
or
\begin{verbatim}
[jdoe@newskysaw busybox]$ make xconfig
\end{verbatim}
\noindent
The BusyBox tools will be installed in the guest's initial ramdisk filesystem;
you can add any tools that you need. There are two required configuration
options. In the
``\verb|BusyBox settings->Build Options|" menu check the
``\verb|Build BusyBox as a static binary (no shared libs)|" option, as shown in
figure \ref{fig:busyboxcf1}, and in the
``\verb|BusyBox settings->Installation Options|" menu set the
``\verb|Busybox installation prefix|" to the path of the
``\verb|initrd/initramfs|" directory, as shown in figure \ref{fig:busyboxcf2}.
After you finish configuring BusyBox, save your configuration and quit the
window. Then, to make the BusyBox tools, type the following:
\begin{verbatim}
[jdoe@newskysaw busybox]$ make
\end{verbatim}
Install the tools to the guest's initial ramdisk filesystem directory:
\begin{verbatim}
[jdoe@newskysaw busybox]$ make install
\end{verbatim}
\begin{figure}[ht]
\begin{center}
\colfigsize\epsffile{linuxConf.eps}
\end{center}
\caption{Linux Kernel configuration}
\label{fig:linuxcf}
\end{figure}
\section{Configuring and building the Linux kernel}
The following procedure demonstrates how to configure and build a 32-bit Linux
kernel. Change to the ``\verb|linux-2.6.30.y/|" directory. There is a custom
configuration file ``\verb|jrl-default-config|" which is configured with minimal
kernel options (all unnecessary options are removed to keep the guest booting
process fast). If you are using the custom configuration file type the
following:
\begin{verbatim}
[jdoe@newskysaw linux-2.6.30.y]$ cp jrl-default-config .config
\end{verbatim}
\noindent
Configure the kernel to meet your requirements. For more on configuring and
building Linux kernels, check online. Type the following:
\begin{verbatim}
[jdoe@newskysaw linux-2.6.30.y]$ make ARCH=i386 menuconfig
\end{verbatim}
\noindent
or
\begin{verbatim}
[jdoe@newskysaw linux-2.6.30.y]$ make ARCH=i386 xconfig
\end{verbatim}
\noindent
The kernel must be configured with the initial ramdisk file system directory
(e.g. ``\verb|initrd/initramfs/|"): in the ``\verb|General setup|" menu under
option
``\verb|Initial RAM filesystem and RAM disk support|" set the
``\verb|Initramfs source file(s)|" option to the path of the
``\verb|initrd/initramfs/|" directory, as shown in figure \ref{fig:linuxcf}.
Additionally, if you are using the ``\verb|root_files|" file to create devices
files, add the ``\verb|root_files|" file path, separated by a space, after the
initial ramdisk filesystem directory. When you are finished configuring the
kernel, save your configuration, and build a bootable ISO image:
\begin{verbatim}
[jdoe@newskysaw linux-2.6.30.y]$ make ARCH=i386 isoimage
\end{verbatim}
\noindent
The ISO image can be found here: ``\verb|arch/x86/boot/image.iso|", and will be
used in the section Configuring and building the guest image.
\section{Configuring and building the guest image}
Checkout the updated Palacios repository to the ``\verb|palacios/|" directory.
(You can find instructions for checking out the Palacios repository at
\url{http://www.v3vee.org/palacios/}). The guest creator utility is required for
building the guest image. Change to the ``\verb|palacios/utils/guest_creator|"
directory and build the guest creator utility:
\begin{verbatim}
[jdoe@newskysaw guest_creator]$ make
\end{verbatim}
\noindent
You will get the ``\verb|build_vm|" utility:
\begin{verbatim}
[jdoe@newskysaw guest_creator]$ file build_vm
build_vm: ELF 64-bit LSB executable, x86-64, version 1 (SYSV), dynamically linked
(uses shared libs), for GNU/Linux 2.6.9, not stripped
\end{verbatim}
\noindent
The guest configuration file is written in XML. A sample configuration file is
provided: ``\verb|default.xml|". Make a copy of the default configuration file
named ``\verb|myconfig.xml|" and edit the configuration elements that you are
interested in (if a device is included in the guest configuration file, it
must be configured in the section Configuring and building Palacios or the guest
will not boot). Of particular importance is the ``\verb|files|" element. Comment
out this attribute:
\begin{verbatim}
<file id="boot-cd" filename="/home/jarusl/image.iso" />
\end{verbatim}
\noindent
Add an attribute that specifies the location of the Linux ISO image:
\begin{verbatim}
<file id="boot-cd" filename="../../../linux-2.6.30.y/arch/x86/boot/image.iso" />
\end{verbatim}
\noindent
When you are finished editing the guest configuration save the configuration
file. The guest image consists of the guest configuration file and the Linux
ISO image. Build the guest image with the guest creator utility:
\begin{verbatim}
[jdoe@newskysaw guest_creator]$ ./build_vm myconfig.xml -o guest.iso
\end{verbatim}
\noindent
The guest image, ``\verb+guest.iso+", is embedded in Kitten's
``\verb|init_task|" in the section Configuring and building Kitten.
\pagebreak
\begin{figure}[h]
\begin{center}
\colfigsize\epsffile{kittenConf1.eps}
\end{center}
\caption{Kitten configuration}
\label{fig:kittencf}
\end{figure}
\begin{figure}[ht]
\begin{center}
\colfigsize\epsffile{kittenConf2.eps}
\end{center}
\caption{Kitten configuration}
\label{fig:kittencf2}
\end{figure}
\section{Configuring and building Palacios and Kitten}
\subsection*{Configuring and building Palacios}
You can find the detailed manual of getting and building Palacios and Kitten
from scratch in the Palacios website (\url{http://www.v3vee.org/palacios}). Here
we only give the specific requirements related to the procedure of booting the
guest. To configure Palacios, change to the ``\verb|test/palacios/|" directory
and type the following:
\begin{verbatim}
[jdoe@newskysaw palacios]$ make menuconfig
\end{verbatim}
\noindent
or
\begin{verbatim}
[jdoe@newskysaw palacios]$ make xconfig
\end{verbatim}
\noindent
Don't forget to include the devices that your guest image requires. When you
have configured the components you want to build into Palacios, save the
configuration and close the window. To build Palacios type the following:
\begin{verbatim}
[jdoe@newskysaw palacios]$ make
\end{verbatim}
or
\begin{verbatim}
[jdoe@newskysaw palacios]$ make all
\end{verbatim}
\noindent
Once the Palacios static library has been built you can find the library file,
``\verb+libv3vee.a+", in the Palacios root directory.
\subsection*{Configuring and building Kitten}
Configure Kitten. Change to the ``\verb+test/kitten/+" directory and type the
following:
\begin{verbatim}
[jdoe@newskysaw kitten]$ make menuconfig
\end{verbatim}
\noindent
or
\begin{verbatim}
[jdoe@newskysaw kitten]$ make xconfig
\end{verbatim}
\noindent
Under the ``\verb|Virtualization|" menu select the
``\verb|Include Palacios virtual machine monitor|" option. Set the
``\verb|Path to pre-built Palacios tree|" option to the Palacios build tree
path, ``\verb|..\palacios|", as shown in figure \ref{fig:kittencf}. Set the
``\verb|Path to guest OS ISO image|" option to the guest image path,\\
''\verb|../palacios/utils/guest_creator/guest.iso|'', as shown in figure
\ref{fig:kittencf2}. When you have finished configuring Kitten, save the
configuration and close the window. To build Kitten type the following:
\begin{verbatim}
[jdoe@newskysaw kitten]$ make isoimage
\end{verbatim}
\noindent
This builds the bootable ISO image file with guest image, Palacios, and Kitten.
The ISO file is located in ``\verb+kitten/arch/x86_64/boot/image.iso+".
\pagebreak
\noindent
You have successfully created an ISO image file that can be booted on a machine.
You can boot the file on Qemu using the following sample command:
\begin{verbatim}
[jdoe@newskysaw test]$ /opt/vmm-tools/qemu/bin/qemu-system-x86_64 \
-smp 1 \
-m 2047 \
-serial file:./serial.out \
-cdrom kitten/arch/x86_64/boot/image.iso \
< /dev/null
\end{verbatim}
\noindent
We have finished the entire procedure for building a guest image and booting it
on the Palacios VMM. For more updated details, check the Palacios website
\url{http://www.v3vee.org/palacios} and Kitten website
\url{https://software.sandia.gov/trac/kitten} regularly.
\end{document} |
function [fix_up,fix_down] = presolve_fixvariables(A,b,c,lb,ub,monotinicity)
% These are optimally (or w.l.o.g) set to upper bound
not_in_obj = find(c<=0);
% Setting to 1 makes Ax<b increase feasible set for these variables
constrained_blow = all(-A(:,not_in_obj)>=0,1);
% and they enter via a psd matrix in all sdp constraints
sdp_positive = monotinicity(not_in_obj) == -1;
% these variables satisffy all constraints
can_fix = not_in_obj(find(constrained_blow & sdp_positive));
% these variables are still not fixed
still_on = find(lb==0 & ub==1);
% so we can fix these
fix_up = intersect(can_fix,still_on);
% These are optimally (or w.l.o.g) set to lower bound
not_in_obj = find(c>=0);
% Setting to 1 makes Ax<b increase feasible set for these variables
constrained_blow = all(A(:,not_in_obj)>=0,1);
% and they enter via a psd matrix in all sdp constraints
sdp_positive = monotinicity(not_in_obj) == 1;
% these variables satisffy all constraints
can_fix = not_in_obj(find(constrained_blow & sdp_positive));
% these variables are still not fixed
still_on = find(lb==0 & ub==1);
% so we can fix these
fix_down = intersect(can_fix,still_on);
|
[STATEMENT]
lemma is_word_terminals_drop: "wellformed_tokens p \<Longrightarrow> is_word(terminals (drop n p))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. wellformed_tokens p \<Longrightarrow> is_word (terminals (drop n p))
[PROOF STEP]
by (metis append_take_drop_id is_word_terminals list_all_append wellformed_tokens_def) |
State Before: α : Type u_1
inst✝¹ : DecidableEq α
inst✝ : Fintype α
f g : Perm α
l : List (Perm α)
h : List.Pairwise Disjoint l
⊢ support (List.prod l) = List.foldr (fun x x_1 => x ⊔ x_1) ⊥ (List.map support l) State After: case nil
α : Type u_1
inst✝¹ : DecidableEq α
inst✝ : Fintype α
f g : Perm α
l : List (Perm α)
h✝ : List.Pairwise Disjoint l
h : List.Pairwise Disjoint []
⊢ support (List.prod []) = List.foldr (fun x x_1 => x ⊔ x_1) ⊥ (List.map support [])
case cons
α : Type u_1
inst✝¹ : DecidableEq α
inst✝ : Fintype α
f g : Perm α
l : List (Perm α)
h✝ : List.Pairwise Disjoint l
hd : Perm α
tl : List (Perm α)
hl : List.Pairwise Disjoint tl → support (List.prod tl) = List.foldr (fun x x_1 => x ⊔ x_1) ⊥ (List.map support tl)
h : List.Pairwise Disjoint (hd :: tl)
⊢ support (List.prod (hd :: tl)) = List.foldr (fun x x_1 => x ⊔ x_1) ⊥ (List.map support (hd :: tl)) Tactic: induction' l with hd tl hl State Before: case nil
α : Type u_1
inst✝¹ : DecidableEq α
inst✝ : Fintype α
f g : Perm α
l : List (Perm α)
h✝ : List.Pairwise Disjoint l
h : List.Pairwise Disjoint []
⊢ support (List.prod []) = List.foldr (fun x x_1 => x ⊔ x_1) ⊥ (List.map support []) State After: no goals Tactic: simp State Before: case cons
α : Type u_1
inst✝¹ : DecidableEq α
inst✝ : Fintype α
f g : Perm α
l : List (Perm α)
h✝ : List.Pairwise Disjoint l
hd : Perm α
tl : List (Perm α)
hl : List.Pairwise Disjoint tl → support (List.prod tl) = List.foldr (fun x x_1 => x ⊔ x_1) ⊥ (List.map support tl)
h : List.Pairwise Disjoint (hd :: tl)
⊢ support (List.prod (hd :: tl)) = List.foldr (fun x x_1 => x ⊔ x_1) ⊥ (List.map support (hd :: tl)) State After: case cons
α : Type u_1
inst✝¹ : DecidableEq α
inst✝ : Fintype α
f g : Perm α
l : List (Perm α)
h✝ : List.Pairwise Disjoint l
hd : Perm α
tl : List (Perm α)
hl : List.Pairwise Disjoint tl → support (List.prod tl) = List.foldr (fun x x_1 => x ⊔ x_1) ⊥ (List.map support tl)
h : (∀ (a' : Perm α), a' ∈ tl → Disjoint hd a') ∧ List.Pairwise Disjoint tl
⊢ support (List.prod (hd :: tl)) = List.foldr (fun x x_1 => x ⊔ x_1) ⊥ (List.map support (hd :: tl)) Tactic: rw [List.pairwise_cons] at h State Before: case cons
α : Type u_1
inst✝¹ : DecidableEq α
inst✝ : Fintype α
f g : Perm α
l : List (Perm α)
h✝ : List.Pairwise Disjoint l
hd : Perm α
tl : List (Perm α)
hl : List.Pairwise Disjoint tl → support (List.prod tl) = List.foldr (fun x x_1 => x ⊔ x_1) ⊥ (List.map support tl)
h : (∀ (a' : Perm α), a' ∈ tl → Disjoint hd a') ∧ List.Pairwise Disjoint tl
⊢ support (List.prod (hd :: tl)) = List.foldr (fun x x_1 => x ⊔ x_1) ⊥ (List.map support (hd :: tl)) State After: case cons
α : Type u_1
inst✝¹ : DecidableEq α
inst✝ : Fintype α
f g : Perm α
l : List (Perm α)
h✝ : List.Pairwise Disjoint l
hd : Perm α
tl : List (Perm α)
hl : List.Pairwise Disjoint tl → support (List.prod tl) = List.foldr (fun x x_1 => x ⊔ x_1) ⊥ (List.map support tl)
h : (∀ (a' : Perm α), a' ∈ tl → Disjoint hd a') ∧ List.Pairwise Disjoint tl
this : Disjoint hd (List.prod tl)
⊢ support (List.prod (hd :: tl)) = List.foldr (fun x x_1 => x ⊔ x_1) ⊥ (List.map support (hd :: tl)) Tactic: have : Disjoint hd tl.prod := disjoint_prod_right _ h.left State Before: case cons
α : Type u_1
inst✝¹ : DecidableEq α
inst✝ : Fintype α
f g : Perm α
l : List (Perm α)
h✝ : List.Pairwise Disjoint l
hd : Perm α
tl : List (Perm α)
hl : List.Pairwise Disjoint tl → support (List.prod tl) = List.foldr (fun x x_1 => x ⊔ x_1) ⊥ (List.map support tl)
h : (∀ (a' : Perm α), a' ∈ tl → Disjoint hd a') ∧ List.Pairwise Disjoint tl
this : Disjoint hd (List.prod tl)
⊢ support (List.prod (hd :: tl)) = List.foldr (fun x x_1 => x ⊔ x_1) ⊥ (List.map support (hd :: tl)) State After: no goals Tactic: simp [this.support_mul, hl h.right] |
//==================================================================================================
/*!
@file
@copyright 2016 NumScale SAS
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt)
*/
//==================================================================================================
#ifndef BOOST_SIMD_ARCH_COMMON_SCALAR_FUNCTION_MAXMAG_HPP_INCLUDED
#define BOOST_SIMD_ARCH_COMMON_SCALAR_FUNCTION_MAXMAG_HPP_INCLUDED
#include <boost/simd/function/abs.hpp>
#include <boost/simd/function/max.hpp>
#include <boost/simd/detail/dispatch/function/overload.hpp>
#include <boost/config.hpp>
namespace boost { namespace simd { namespace ext
{
namespace bd = boost::dispatch;
BOOST_DISPATCH_OVERLOAD ( maxmag_
, (typename A0)
, bd::cpu_
, bd::scalar_< bd::arithmetic_<A0> >
, bd::scalar_< bd::arithmetic_<A0> >
)
{
BOOST_FORCEINLINE A0 operator() ( A0 a0, A0 a1) const BOOST_NOEXCEPT
{
A0 aa0 = simd::abs(a0);
A0 aa1 = simd::abs(a1);
return aa0 < aa1 ? a1 : aa1 < aa0 ? a0 : simd::max(a0, a1);
}
};
} } }
#endif
|
theory WellTypedExp
imports WTMiscEnv
begin
(*
####################################
P1. expression syntax
####################################
*)
(* expressions *)
datatype p_const =
UnitConst
| IConst int
| BConst bool
| FixConst
(* arrays *)
(*| EmptyArrayConst
| ExtArrayConst*)
| NullConst
| NewArrayConst
| ReadConst
| WriteConst
(* pairs *)
| UnpackConst
(* channels *)
| NewChanConst
| SendConst
| RecvConst
| ForkConst
datatype p_op =
I2Op "int \<Rightarrow> int \<Rightarrow> int"
| I1Op "int \<Rightarrow> int"
| C2Op "int \<Rightarrow> int \<Rightarrow> bool"
| C1Op "int \<Rightarrow> bool"
| R2Op "bool \<Rightarrow> bool \<Rightarrow> bool"
| R1Op "bool \<Rightarrow> bool"
datatype var_type =
VarType string
| LocType string
datatype p_exp =
ConstExp p_const
| OpExp p_op
| VarExp var_type
| PairExp p_exp p_exp
| IfExp p_exp p_exp p_exp
| LamExp string p_exp
| AppExp p_exp p_exp
fun bin_const where
"bin_const ReadConst = True"
| "bin_const WriteConst = True"
| "bin_const SendConst = True"
| "bin_const c = False"
fun is_value :: "p_exp \<Rightarrow> bool" where
"is_value (ConstExp c) = True"
| "is_value (OpExp xop) = True"
| "is_value (VarExp (LocType x)) = True"
| "is_value (PairExp v1 v2) = (is_value v1 \<and> is_value v2)"
| "is_value (LamExp x e) = True"
| "is_value (AppExp (ConstExp c) v) = (bin_const c \<and> is_value v)"
| "is_value other = False"
fun is_sexp :: "p_exp \<Rightarrow> bool" where
"is_sexp (ConstExp c) = True"
| "is_sexp (OpExp xop) = True"
| "is_sexp (VarExp (VarType x)) = False"
| "is_sexp (VarExp (LocType x)) = True"
| "is_sexp (PairExp v1 v2) = (is_value v1 \<and> is_value v2)"
| "is_sexp (LamExp x e) = True"
| "is_sexp (AppExp (ConstExp FixConst) (LamExp x e)) = True"
| "is_sexp (AppExp (ConstExp c) v) = (bin_const c \<and> is_value v)"
| "is_sexp other = False"
fun free_vars :: "p_exp \<Rightarrow> string set" where
"free_vars (ConstExp c) = {}"
| "free_vars (OpExp xop) = {}"
| "free_vars (VarExp v) = (case v of
VarType x \<Rightarrow> {x}
| other \<Rightarrow> {}
)"
| "free_vars (PairExp e1 e2) = free_vars e1 \<union> free_vars e2"
| "free_vars (IfExp e1 e2 e3) = free_vars e1 \<union> free_vars e2 \<union> free_vars e3"
| "free_vars (LamExp x e) = free_vars e - {x}"
| "free_vars (AppExp e1 e2) = free_vars e1 \<union> free_vars e2"
fun ref_vars :: "p_exp \<Rightarrow> string set" where
"ref_vars (ConstExp c) = {}"
| "ref_vars (OpExp xop) = {}"
| "ref_vars (VarExp v) = (case v of
VarType x \<Rightarrow> {}
| LocType x \<Rightarrow> {x}
)"
| "ref_vars (PairExp e1 e2) = ref_vars e1 \<union> ref_vars e2"
| "ref_vars (IfExp e1 e2 e3) = ref_vars e1 \<union> ref_vars e2 \<union> ref_vars e3"
| "ref_vars (LamExp x e) = ref_vars e"
| "ref_vars (AppExp e1 e2) = ref_vars e1 \<union> ref_vars e2"
type_synonym owner_env = "string \<Rightarrow> string"
fun res_name where
"res_name (VarType x) = Var x"
| "res_name (LocType x) = Loc x"
fun owner_name where
"owner_name delta (VarType x) = Var x"
| "owner_name delta (LocType x) = Loc (delta x)"
fun res_vars :: "owner_env \<Rightarrow> p_exp \<Rightarrow> res_id set" where
"res_vars delta (ConstExp c) = {}"
| "res_vars delta (OpExp xop) = {}"
| "res_vars delta (VarExp v) = {owner_name delta v}"
| "res_vars delta (PairExp e1 e2) = res_vars delta e1 \<union> res_vars delta e2"
| "res_vars delta (IfExp e1 e2 e3) = res_vars delta e1 \<union> res_vars delta e2 \<union> res_vars delta e3"
| "res_vars delta (LamExp x e) = res_vars delta e - {Var x}"
| "res_vars delta (AppExp e1 e2) = res_vars delta e1 \<union> res_vars delta e2"
definition non_prim_vars where
"non_prim_vars env delta e = { x | x. non_prim_entry env x \<and> x \<in> res_vars delta e }"
definition env_vars :: "(string \<Rightarrow> 'a option) \<Rightarrow> string set" where
"env_vars env = { x | x. env x \<noteq> None }"
definition use_env_vars where
"use_env_vars r_s = { x | x. r_s x \<noteq> NoPerm }"
definition own_env_vars where
"own_env_vars r_s = { x | x. r_s x = OwnPerm }"
(*
####################################
P4. type system for expressions
####################################
*)
definition pure_fun where
"pure_fun t1 t2 r = FunTy t1 t2 UsePerm r"
fun aff_leq where
"aff_leq Prim r = True"
| "aff_leq Ref NoPerm = False"
| "aff_leq Ref r = True"
| "aff_leq Aff OwnPerm = True"
| "aff_leq Aff r = False"
fun is_nullable where
"is_nullable (FunTy t1 t2 p a) = True"
| "is_nullable (ChanTy tau c_end) = True"
| "is_nullable (ArrayTy tau) = True"
| "is_nullable tau = False"
fun const_type :: "p_const \<Rightarrow> p_type set" where
"const_type UnitConst = {UnitTy}"
| "const_type (IConst i) = {IntTy}"
| "const_type (BConst b) = {BoolTy}"
| "const_type FixConst = {pure_fun (pure_fun t t (req_type t)) t Prim | t. fun_ty t \<and> unlim t}"
(* arrays: since t is unlim and arrays are unlim, no functions require ownership.
pairs are affine because they are only needed once. *)
(*| "const_type EmptyArrayConst = {pure_fun UnitTy (ArrayTy t) Prim | t. unlim t}"
| "const_type ExtArrayConst = {pure_fun (ArrayTy t) (FunTy t (ArrayTy t) OwnPerm Ref) Prim | t. unlim t}"*)
| "const_type NewArrayConst = {pure_fun IntTy (ArrayTy tau) Prim | tau. unlim tau}"
| "const_type NullConst = { tau | tau. True }"
| "const_type ReadConst = {pure_fun (ArrayTy t) (pure_fun IntTy t Ref) Prim | t. unlim t}"
| "const_type WriteConst = {pure_fun (ArrayTy t) (FunTy (PairTy IntTy t OwnPerm) UnitTy OwnPerm Ref) Prim | t. unlim t}"
(* pairs: a reusable pair must be constructed from unlimited values + requires ownership of both its elements,
however permissions-wise it is treated like a var.
- an affine pair can be constructed from anything, and also requires ownership of both its elements
- for unpacking, the pair is assumed to be owned, whatever is put in *)
| "const_type UnpackConst = {FunTy (PairTy t1 t2 r) (FunTy (FunTy t1 (FunTy t2 tx r (as_aff r')) r (as_aff r')) tx r' (as_aff r)) r Prim | t1 t2 tx r r'. leq_perm r r'}"
(* channels: all uses of a channel use up ownership *)
| "const_type NewChanConst = {pure_fun UnitTy (PairTy (ChanTy tau SEnd) (ChanTy tau REnd) OwnPerm) Prim | tau. True}"
| "const_type SendConst = {pure_fun (ChanTy t SEnd) (FunTy t UnitTy r Ref) Prim | t r. is_own r}"
| "const_type RecvConst = {pure_fun (ChanTy t REnd) t Prim | t. True }"
| "const_type ForkConst = {FunTy (FunTy UnitTy UnitTy UsePerm a) UnitTy r Prim | a r. is_own r}"
fun op_type :: "p_op \<Rightarrow> p_type" where
"op_type (I2Op xop) = pure_fun IntTy (pure_fun IntTy IntTy Prim) Prim"
| "op_type (I1Op xop) = pure_fun IntTy IntTy Prim"
| "op_type (C2Op xop) = pure_fun IntTy (pure_fun IntTy BoolTy Prim) Prim"
| "op_type (C1Op xop) = pure_fun IntTy BoolTy Prim"
| "op_type (R2Op xop) = pure_fun BoolTy (pure_fun BoolTy BoolTy Prim) Prim"
| "op_type (R1Op xop) = pure_fun BoolTy BoolTy Prim"
definition app_req where
"app_req rx1 rx2 r tau r_ex = (if req_type tau = Prim then empty_use_env else
diff_use_env (comp_use_env rx1 rx2) (comp_use_env (comp_use_env rx1 (lift_use_env rx2 r)) r_ex))"
definition pair_req where
"pair_req rx r_ex tau = (if req_type tau = Prim then empty_use_env else diff_use_env rx r_ex)"
fun safe_type where
"safe_type tau OwnPerm = True"
| "safe_type tau UsePerm = unlim tau"
| "safe_type tau NoPerm = False"
fun safe_type_x where
"safe_type_x tau OwnPerm = True"
| "safe_type_x tau UsePerm = unlim tau"
| "safe_type_x tau NoPerm = (req_type tau = Prim)"
fun safe_pair_aff where
"safe_pair_aff a NoPerm = False"
| "safe_pair_aff a UsePerm = (a \<noteq> Aff)"
| "safe_pair_aff a OwnPerm = True"
(*
fun value_req where
"value_req (VarType x) tau tau_x = True"
| "value_req (LocType x) tau tau_x = (req_type tau = Ref \<and> req_type tau_x = Ref)"
*)
fun well_typed :: "pt_env \<Rightarrow> owner_env \<Rightarrow> perm_use_env \<Rightarrow> p_exp \<Rightarrow> p_type \<Rightarrow> perm_use_env \<Rightarrow> perm_use_env \<Rightarrow> bool" where
"well_typed env delta r_s1 (ConstExp c) tau r_s2 rx = (tau \<in> const_type c \<and> leq_use_env r_s2 r_s1 \<and> leq_use_env rx r_s2)"
| "well_typed env delta r_s1 (OpExp xop) tau r_s2 rx = (tau = op_type xop \<and> leq_use_env r_s2 r_s1 \<and> leq_use_env rx r_s2)"
| "well_typed env delta r_s1 (VarExp v) tau r_s2 rx = (\<exists> r_ex tau_x. env (res_name v) = Some tau \<and> env (owner_name delta v) = Some tau_x \<and> (*value_req v tau tau_x \<and>*)
leq_use_env (ereq_use_env (owner_name delta v) tau_x) r_s1 \<and> leq_use_env r_s2 (diff_use_env r_s1 (comp_use_env (ereq_use_env (owner_name delta v) tau_x) r_ex)) \<and>
leq_use_env rx r_s2 \<and> leq_use_env r_ex r_s1 \<and> leq_use_env (diff_use_env (ereq_use_env (owner_name delta v) tau_x) (comp_use_env (ereq_use_env (owner_name delta v) tau_x) r_ex)) rx)"
| "well_typed env delta r_s1 (PairExp e1 e2) tau r_sf rf = (\<exists> t1 t2 r r_s2 r_s3 rx1 rx2 r_ex. tau = PairTy t1 t2 r \<and>
well_typed env delta r_s1 e1 t1 r_s2 rx1 \<and> well_typed env delta r_s2 e2 t2 r_s3 rx2 \<and>
leq_use_env (lift_use_env rx1 r) r_s3 \<and> leq_use_env (lift_use_env rx2 r) r_s3 \<and> aff_leq (max_aff (req_type t1) (req_type t2)) r \<and>
disj_use_env (lift_use_env rx1 r) (lift_use_env rx2 r) \<and>
leq_use_env r_sf (diff_use_env r_s3 r_ex) \<and> leq_use_env rf r_sf \<and> leq_use_env r_ex r_s1 \<and>
leq_use_env (pair_req (comp_use_env (lift_use_env rx1 r) (lift_use_env rx2 r)) r_ex tau) rf
)"
| "well_typed env delta r_s1 (IfExp e1 e2 e3) tau r_s3 rx = (\<exists> rx' r_s2 rx1 rx2.
well_typed env delta r_s1 e1 BoolTy r_s2 rx' \<and> well_typed env delta r_s2 e2 tau r_s3 rx1 \<and> well_typed env delta r_s2 e3 tau r_s3 rx2 \<and>
rx = comp_use_env rx1 rx2)"
| "well_typed env delta r_s1 (LamExp x e) tau r_s2 rf = (\<exists> t1 t2 r a rx r_end r_s' r_ex. tau = FunTy t1 t2 r a \<and>
well_typed (add_env env (Var x) t1) delta (add_use_env rx (Var x) r) e t2 r_s' r_end \<and> aff_use_env rx a \<and>
leq_use_env rx r_s1 \<and> leq_use_env r_s2 (diff_use_env r_s1 r_ex) \<and> leq_use_env rf r_s2 \<and>
leq_use_env r_ex r_s1 \<and> leq_use_env (diff_use_env rx r_ex) rf)"
| "well_typed env delta r_s1 (AppExp e1 e2) tau r_sf rx = (\<exists> t1 r a r_s2 rx1 rx2 r_s3 r_ex.
well_typed env delta r_s1 e1 (FunTy t1 tau r a) r_s2 rx1 \<and> well_typed env delta r_s2 e2 t1 r_s3 rx2 \<and>
leq_use_env r_sf (diff_use_env r_s3 (comp_use_env (comp_use_env rx1 (lift_use_env rx2 r)) r_ex)) \<and>
leq_use_env (comp_use_env rx1 (lift_use_env rx2 r)) r_s3 \<and>
disj_use_env rx1 (lift_use_env rx2 r) \<and> leq_use_env rx r_sf \<and>
leq_use_env r_ex r_s1 \<and> leq_use_env (app_req rx1 rx2 r tau r_ex) rx
)"
(* - expression lemmas *)
lemma value_is_sexp: "is_value e \<Longrightarrow> is_sexp e"
apply (case_tac e)
apply (auto)
apply (case_tac x3)
apply (auto)
apply (case_tac x71)
apply (auto)
apply (case_tac x1)
apply (auto)
done
lemma e2_sexp: "\<lbrakk> is_sexp (AppExp e1 e2) \<rbrakk> \<Longrightarrow> is_sexp e2"
apply (case_tac e1)
apply (auto)
apply (case_tac e2)
apply (auto)
apply (case_tac x3)
apply (auto)
apply (case_tac x71)
apply (auto)
apply (case_tac x1a)
apply (auto)
done
end |
lemma connected_contains_Icc: fixes A :: "'a::linorder_topology set" assumes "connected A" "a \<in> A" "b \<in> A" shows "{a..b} \<subseteq> A" |
function [no,el]=removeisolatednode(node,elem)
%
% [no,el]=removeisolatednode(node,elem)
%
% remove isolated nodes: nodes that are not included in any element
%
% author: Qianqian Fang (fangq<at> nmr.mgh.harvard.edu)
%
% input:
% node: list of node coordinates
% elem: list of elements of the mesh
%
% output:
% no: node coordinates after removing the isolated nodes
% el: element list of the resulting mesh
%
% -- this function is part of iso2mesh toolbox (http://iso2mesh.sf.net)
%
oid=1:size(node,1); % old node index
idx=setdiff(oid,elem(:)); % indices to the isolated nodes
idx=sort(idx);
delta=zeros(size(oid));
delta(idx)=1;
delta=-cumsum(delta); % calculate the new node index after removing the isolated nodes
oid=oid+delta; % map to new index
el=oid(elem); % element list in the new index
no=node;
no(idx,:)=[]; % remove the isolated nodes
|
module Text.PrettyPrint.Bernardy
import Data.String
import Data.List
import Data.List1
import public Control.Monad.Reader
import public Control.Monad.Identity
%default total
public export
record LayoutOpts where
constructor Opts
lineLength : Int
export
record Layout where
constructor MkLayout
content : List1 String
maxLine : Int
lastLine : Int
height : Int
export
record Doc (opts : LayoutOpts) where
constructor MkDoc
layouts : List Layout
replicateTR : Nat -> Char -> List Char -> String
replicateTR Z _ acc = pack acc
replicateTR (S k) c acc = replicateTR k c (c :: acc)
indentTR : Nat -> String -> String
indentTR k str = replicateTR k ' ' [] ++ str
namespace Layout
||| Render the given layout
export
render : Layout -> String
render (MkLayout (l ::: ls) _ _ _) = unlines (l :: ls)
||| Convert a single line of text to a layout.
|||
||| @ str this must be single line of text.
export
line : (str : String) -> Layout
line str =
let len = prim__strLength str
in MkLayout
{ content = str ::: []
, maxLine = len
, lastLine = len
, height = 0
}
allLines : (inp : List Char) -> (acc : List Char) -> List String
allLines [] acc = [pack $ reverse acc]
allLines ('\n' :: '\r' :: xs) acc = pack (reverse acc) :: allLines xs []
allLines ('\n' :: xs) acc = pack (reverse acc) :: allLines xs []
allLines ('\r' :: xs) acc = pack (reverse acc) :: allLines xs []
allLines (x :: xs) acc = allLines xs (x :: acc)
||| Convert a string to a layout.
||| This preserves any manual formatting
|||
||| @ str the String to pretty print
export
text : (str : String) -> Layout
text str =
let ls@(h :: t) = allLines (unpack str) []
| [] => line ""
(MkStats maxLine lastLine height) =
foldl
(\(MkStats maxLine lastLine height), line =>
let len = prim__strLength line
in MkStats (max maxLine len) len (height + 1))
(MkStats 0 0 (-1))
ls
in MkLayout
{ content = h ::: t
, maxLine
, lastLine
, height
}
where
data Stats : Type where
MkStats : (maxLine, lastLine, height : Int) -> Stats
concatContent' : String -> List String -> List1 String -> Nat -> List String
concatContent' x [] (y ::: ys) k = (x ++ y) :: map (indentTR k) ys
concatContent' x (x' :: xs) ys k = x :: concatContent' x' xs ys k
concatContent : List1 String -> List1 String -> Nat -> List1 String
concatContent (x ::: []) (y ::: ys) k = (x ++ y) ::: map (indentTR k) ys
concatContent (x ::: (x' :: xs)) ys k = x ::: concatContent' x' xs ys k
||| Concatenate to Layouts horizontally
export
Semigroup Layout where
left <+> right = MkLayout
{ content =
concatContent
left.content
right.content
(cast left.lastLine)
, maxLine = max left.maxLine $ left.lastLine + right.maxLine
, lastLine = left.lastLine + right.lastLine
, height = left.height + right.height
}
export
Monoid Layout where
neutral = MkLayout
{ content = "" ::: []
, maxLine = 0
, lastLine = 0
, height = 0
}
export %inline
FromString Layout where
fromString = text
export
flush : Layout -> Layout
flush x = MkLayout
{ content = addNL x.content.head x.content.tail
, maxLine = x.maxLine
, lastLine = 0
, height = x.height + 1
}
where
addNL : String -> List String -> List1 String
addNL x [] = x ::: [""]
addNL x (y :: xs) = x ::: forget (addNL y xs)
export
indent : Nat -> Layout -> Layout
indent k x = fromString (replicateTR k ' ' []) <+> x
visible : LayoutOpts -> Layout -> Bool
visible opts x = x.maxLine <= opts.lineLength
shortest : List Layout -> Maybe Layout
shortest [] = Nothing
shortest (x :: xs) = Just $ foldl (\x, y => if x.height <= y.height then x else y) x xs
namespace Doc
||| Render the best candidate from the given set of layouts
export
render : (opts : _) -> Doc opts -> Maybe String
render opts (MkDoc xs) = map render $ shortest $ filter (visible opts) xs
insert : Layout -> List Layout -> List Layout -> List Layout
insert x [] acc = x :: acc
insert x (y :: ys) acc = case keep x y of
KLeft => insert x ys acc
KBoth => insert x ys (y :: acc)
KRight => reverseOnto (y :: acc) ys
where
data Keep = KLeft | KBoth | KRight
keep : Layout -> Layout -> Keep
keep x y =
if x.maxLine == y.maxLine && x.lastLine == y.maxLine && x.height == y.height
then KBoth
else if x.maxLine <= y.maxLine && x.lastLine <= y.maxLine && x.height <= y.height
then KLeft
else if x.maxLine >= y.maxLine && x.lastLine >= y.maxLine && x.height >= y.height
then KRight
else KBoth
combine : List Layout -> List Layout -> List Layout
combine [] ys = ys
combine (x :: xs) ys = combine xs (insert x ys [])
export %inline
(<|>) : Doc opts -> Doc opts -> Doc opts
MkDoc xs <|> MkDoc ys = MkDoc $ combine xs ys
export %inline
(<+>) : {opts : _} -> Doc opts -> Doc opts -> Doc opts
MkDoc xs <+> MkDoc ys =
MkDoc $ combine
[ z
| x <- xs
, y <- ys
, let z = x <+> y
, visible opts z
]
[]
export
FromString (Doc opts) where
fromString str = MkDoc [fromString str]
export
empty : Doc opts
empty = MkDoc [neutral]
export
hcat : {opts : _} -> List (Doc opts) -> Doc opts
hcat xs = foldl (<+>) empty xs
export
hsep : {opts : _} -> Doc opts -> Doc opts -> Doc opts
hsep x y = hcat [x, " ", y]
export
flush : {opts : _} -> Doc opts -> Doc opts
flush (MkDoc xs) = MkDoc $ map flush xs
export
vcat : {opts : _} -> Doc opts -> Doc opts -> Doc opts
vcat x y = flush x <+> y
export
indent : {opts : _} -> Nat -> Doc opts -> Doc opts
indent k (MkDoc xs) =
MkDoc
[ y
| x <- xs
, let y = indent k x
, visible opts y
]
export
hang : {opts : _} -> Nat -> Doc opts -> Doc opts -> Doc opts
hang k x y = (x <+> y) <|> vcat x (indent k y)
export
text : String -> (Doc opts)
text str = MkDoc [text str]
export
sep : {opts : _} -> List (Doc opts) -> Doc opts
sep [] = empty
sep (x :: xs) = foldl1 hsep (x ::: xs) <|> foldl1 vcat (x ::: xs)
infixl 7 <++>
export
(<++>) : {opts : _} -> Doc opts -> Doc opts -> Doc opts
l <++> r = l <+> " " <+> r
public export
interface Pretty a where
pretty : {opts : _} -> a -> Doc opts
public export
interface PrettyPrec prec a | a where
prettyPrec : {opts : _} -> prec -> a -> Doc opts
|
module Issue561 where
open import Common.Prelude hiding (primIsDigit)
primitive
primIsDigit : Char → Bool
main : IO Bool
main = return true
|
Formal statement is: lemma homeomorphic_local_compactness: fixes S:: "'a::metric_space set" and T:: "'b::metric_space set" shows "S homeomorphic T \<Longrightarrow> locally compact S \<longleftrightarrow> locally compact T" Informal statement is: If two topological spaces are homeomorphic, then they are locally compact if and only if the other is locally compact. |
The closed ball of radius $0$ around $x$ is the singleton set $\{x\}$. |
%!TEX root = ../../thesis.tex
\section{Rich-text editing}
Major browsers, i.e. any browser with a market share above 0.5\%\footnote{\url{http://gs.statcounter.com/\#all-browser-ww-monthly-201406-201506-bar}, last checked on 07/25/2015}, do not offer native input fields that allow rich-text editing. Neither the W3C's HTML5 and HTML5.1 specifications nor the WHATWG's ''HTML Living Standard''\footnote{The Web Hypertext Application Technology Working Group (WHATWG) is a working group that mainly developed the HTML5 standard, which later resulted in the widely acknowledged ''HTML Living Standard'' see \refsection{sec:standardization-of-html-editing-apis}} recommend such elements. As discussed in \refsection{sec:html_sgml_def}, by being able to display HTML, browsers are rich-text viewers. By the early 2000s, the first JavaScript libraries emerged, that allowed users to interactively change (parts of) a website to enable rich-text editing in the browser. The techniques used will be discussed in section~\ref{sec:html-editing-apis} through section~\ref{sec:useage-of-html-editing-apis}.
% \footnote{\url{http://gs.statcounter.com/#all-browser-ww-monthly-201406-201506-bar}, last checked on 07/15/2015} |
{-# OPTIONS --without-K --safe #-}
module Categories.Functor.Cocontinuous where
open import Level
open import Data.Product using (Σ)
open import Categories.Category
open import Categories.Functor
import Categories.Category.Construction.Cocones as Coc
import Categories.Diagram.Cocone.Properties as Cocₚ
import Categories.Diagram.Colimit as Col
import Categories.Morphism as Mor
private
variable
o ℓ e : Level
C D E J : Category o ℓ e
-- G preserves the colimit of F.
CoimitPreserving : (G : Functor C D) {F : Functor J C} (L : Col.Colimit F) → Set _
CoimitPreserving {C = C} {D = D} G {F} L = Σ (Col.Colimit (G ∘F F)) λ L′ → G.F₀ (Col.Colimit.coapex L) ≅ Col.Colimit.coapex L′
where module F = Functor F
module G = Functor G
open Mor D
-- cocontinuous functors preserves all colimits.
Cocontinuous : ∀ (o ℓ e : Level) (G : Functor C D) → Set _
Cocontinuous {C = C} o ℓ e G = ∀ {J : Category o ℓ e} {F : Functor J C} (L : Col.Colimit F) → CoimitPreserving G L
|
Formal statement is: lemma cdiv_in_iff' [simp]: "c \<noteq> 0 \<Longrightarrow> (\<lambda>x. f x / c) \<in> L F (g) \<longleftrightarrow> f \<in> L F (g)" Informal statement is: If $c \neq 0$, then $f \in L^p(F,g)$ if and only if $\frac{f}{c} \in L^p(F,g)$. |
## Reconstructing Horndeski theories via the Gaussian Process: $H(z)$ reconstruction
This is part 1 of a two-part notebook on using the Gaussian process (GP) to reconstruct Horndeski theories ([2105.12970](https://arxiv.org/abs/2105.12970)). For this notebook, we reconstruct the Hubble function $H(z)$ given the combined Pantheon/MCT, cosmic chronometers (CC), and baryon acoustic oscillations (BAO) data. The output will be directly used in part 2 on model building.
References to the data and python packages used in this work can be found at end of this notebook.
### 0. Datasets: Pantheon/MCT, CC, BAO
To start, we import the datasets (Pantheon/MCT, CC, BAO) which will be used for the reconstruction.
```python
%matplotlib inline
import numpy as np
from numpy import loadtxt, savetxt
from scipy.constants import c
from matplotlib import pyplot as plt
c_kms = c/1000 # speed of light in km/s
# load pantheon + mct H(z) data
loc_sn = 'pantheon_mct.txt'
loc_sn_corr = 'pantheon_mct_corr.txt'
sn_data = loadtxt(loc_sn)
sn_corr = loadtxt(loc_sn_corr)
# setup snia observations
z_sn = sn_data[:, 0]
Ez_sn = sn_data[:, 1]
sigEz_sn_stat = sn_data[:, 2]
# construct snia cov matrix
covEz_sn_corr = np.diag(sigEz_sn_stat)@ \
[email protected](sigEz_sn_stat)
# load pantheon compressed m(z) data
loc_lcparam = 'lcparam_DS17f.txt'
loc_lcparam_sys = 'sys_DS17f.txt'
lcparam = loadtxt(loc_lcparam, usecols = (1, 4, 5))
lcparam_sys = loadtxt(loc_lcparam_sys, skiprows = 1)
# setup pantheon samples
z_ps = lcparam[:, 0]
mz_ps = lcparam[:, 1]
sigmz_ps = lcparam[:, 2]
# pantheon samples systematics
covmz_ps_sys = lcparam_sys.reshape(40, 40)
covmz_ps_tot = covmz_ps_sys + np.diag(sigmz_ps**2)
# load cc dataset
loc_cc = 'cc_data.txt'
cc_data = loadtxt(loc_cc)
# setup cc observations
z_cc = cc_data[:, 0]
Hz_cc = cc_data[:, 1]
sigHz_cc = cc_data[:, 2]
# load bao's
loc_bao = 'bao_data.txt'
bao_data = loadtxt(loc_bao)
z_bao = bao_data[:, 0]
dmrs_bao = bao_data[:, 1] # dM/rs, rs = sound horizon
sigdmrs_bao = bao_data[:, 2]
dhrs_bao = bao_data[:, 3] # dH/rs
sigdhrs_bao = bao_data[:, 4]
```
The different datasets are visualized below.
```python
# pantheon/mct
plt.errorbar(z_sn, Ez_sn,
yerr = np.sqrt(np.diag(covEz_sn_corr)),
fmt = 'k^', markersize = 4,
ecolor = 'red', elinewidth = 2, capsize = 2)
plt.xlabel('$z$')
plt.ylabel('$E(z)$')
plt.show()
# pantheon samples
plt.errorbar(np.log(z_ps), mz_ps,
yerr = np.sqrt(np.diag(covmz_ps_tot)),
fmt = 'k^', markersize = 4,
ecolor = 'red', elinewidth = 2, capsize = 2)
plt.xlabel('$\ln(z)$')
plt.ylabel('$m(z)$')
plt.show()
# cosmic chronometers
plt.errorbar(z_cc, Hz_cc, yerr = sigHz_cc,
fmt = 'bo', markersize = 2,
ecolor = 'red', elinewidth = 2, capsize = 2)
plt.xlabel('$z$')
plt.ylabel('$H(z)$')
plt.show()
# bao
fig, ax = plt.subplots(1,2, figsize = (15,4))
ax[0].errorbar(z_bao, dmrs_bao, yerr = sigdmrs_bao,
fmt = 'k*', markersize = 4,
ecolor = 'red', elinewidth = 2, capsize = 2)
ax[0].set_xlabel('$z$')
ax[0].set_ylabel('$dM(z)/r_s$')
ax[1].errorbar(z_bao, dhrs_bao, yerr = sigdhrs_bao,
fmt = 'k*', markersize = 2,
ecolor = 'red', elinewidth = 2, capsize = 2)
ax[1].set_xlabel('$z$')
ax[1].set_ylabel('$dH(z)/r_s$')
plt.show()
```
### 1. Reconstructing $H(z)$
In this section, we shall use the combined datasets above to reconstruct the Hubble function $H(z)$. We shall also further complement the above datasets with an $H_0$ prior. But, first, let us import the GP function and the RBF kernel.
```python
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import (RBF, ConstantKernel)
```
In what follows, we will use the RBF kernel, also commonly known as the squared exponential kernel.
```python
kernels = {"SquaredExponential": ConstantKernel()*RBF()}
```
Select any one of the kernels...
```python
kern_name = "SquaredExponential"
kernel = kernels[kern_name]
```
The $H_0$ priors we shall consider for the reconstruction are given below.
```python
H0_priors = {'R19': {'ave': 74.03, 'std': 1.42},
'TRGB': {'ave': 69.8, 'std': 1.9},
'P18': {'ave': 67.4, 'std': 0.5}}
```
The Gaussian process is now performed in the next few lines. We begin with the CC dataset appended with the prior $H_0$.
```python
# set GP rec parameters
z_min = 1e-5
z_max = 2
n_div = 50
for H0_prior in H0_priors:
H0_ave = H0_priors[H0_prior]['ave']
H0_std = H0_priors[H0_prior]['std']
z_cc_prior = np.append(np.array([z_min]), z_cc)
Hz_cc_prior = np.append(np.array([H0_ave]), Hz_cc)
sigHz_cc_prior = np.append(np.array([H0_std]), sigHz_cc)
gp = GaussianProcessRegressor(kernel = kernel,
alpha = sigHz_cc_prior**2,
n_restarts_optimizer = 10)
gp.fit(z_cc_prior.reshape(-1, 1), Hz_cc_prior)
print("\nLearned kernel: %s" % gp.kernel_)
print("Log-marginal-likelihood: %.3f"
% gp.log_marginal_likelihood(gp.kernel_.theta))
z_cc_rec = np.linspace(z_min, z_max, n_div)
Hz_cc_rec, sigHz_cc_rec = gp.predict(z_cc_rec.reshape(-1, 1),
return_std=True)
fig = plt.figure()
plt.errorbar(z_cc_prior, Hz_cc_prior, yerr = sigHz_cc_prior,
label = 'CC', fmt = 'kx', markersize = 4,
ecolor = 'red', elinewidth = 2, capsize = 2)
plt.plot(z_cc_rec, Hz_cc_rec, 'b-', label = 'mean')
plt.fill_between(z_cc_rec, Hz_cc_rec - sigHz_cc_rec,
Hz_cc_rec + sigHz_cc_rec,
alpha = .5, facecolor = 'b', edgecolor='None',
label= r'$1\sigma$')
plt.fill_between(z_cc_rec, Hz_cc_rec - 2*sigHz_cc_rec,
Hz_cc_rec + 2*sigHz_cc_rec,
alpha = .2, facecolor = 'b', edgecolor='None',
label= r'$2\sigma$')
plt.title(H0_prior)
plt.xlabel('$z$')
plt.ylabel('$H(z)$')
plt.legend(loc = 'lower right', prop = {'size': 9.5})
plt.xlim(min(z_cc_rec),
max(z_cc_rec))
plt.show()
```
The Pantheon/MCT dataset comes with a correlation matrix. A consistent usage of the lone Pantheon/MCT dataset therefore requires drawing many samples over the corresponding mean and covariance matrix of $E(z)$. However, regarding it as a minority in the overall dataset (CC + Pantheon/MCT + BAO dataset), we will just consider its mean for simplicity.
Moving on, the BAO $H(z)$ function can be obtained without relying on the sound horizon $r_s$ by using the Pantheon $m(z)$ samples. In particular, we take the ratio of $dM(z)/dH(z)$ and use the distance duality relation (DDR), $d_L(z) = d_A(z)(1 + z)^2$ where $d_L(z)$ and $d_A(z)$ are the luminosity distance and angular diameter distance, respectively, to obtain
\begin{equation}
H(z) = \dfrac{ dM(z) }{ dH(z) } 10^{(25 + M - m(z))/5} \ [ c / \text{Mpc} ].
\end{equation}
This requires a calibrated absolute magnitude $M$ which we determine this by sampling over the $z < 0.1$ data points in the compressed Pantheon samples. This assumes that for such very low redshifts the cosmological model-dependence should drop out and so the $\Lambda$CDM model can be taken to be a reasonably fair assumption. Furthermore, in using the DDR, we are also restricting our attention to spatially-flat and isotropic cosmologies.
The $\Lambda$CDM model is prepared below for the sampling. We import ``cobaya`` and ``getdist`` for the sampling and its statistical analysis.
```python
from scipy.integrate import quad
from cobaya.run import run
def E_lcdm(a, om0):
'''returns the rescaled Hubble function of lcdm
input:
a = scale factor
om0 = matter density
ol0 = dark energy density
'''
ol0 = 1 - om0
return np.sqrt((om0/(a**3)) + ol0)
def E_inv_lcdm(z, om0):
a = 1/(z + 1)
return 1/E_lcdm(a, om0)
def dl_lcdm(z, om0):
'''returns dL = luminosity distance*H0/c'''
rz = quad(E_inv_lcdm, 0, z, args = (om0))[0]
return (1 + z)*rz
def m_lcdm(z, H0, om0, M):
'''returns the apparent magnitude m'''
return 5*np.log10(100000*(c_kms/H0)*dl_lcdm(z, om0)) + M
# prepare the log-likelihood, consider only z < 0.1 points
nr = 29
valid = [r for r in range(covmz_ps_tot.shape[0]) if r not in
np.arange(len(z_ps) - nr, len(z_ps))]
covmz_ps_red = covmz_ps_tot[valid][:, valid] # covariance matrix
invcovmz_ps_red = np.linalg.inv(covmz_ps_red) # inverse C
def loglike_lcdm(H0, om0, M):
'''returns the log-likelihood given (H0, om0, M)'''
m_th = np.array([m_lcdm(z, H0, om0, M)
for z in z_ps[:11]])
if om0 > 0:
noise = mz_ps[:11] - m_th
return -0.5*noise.T@invcovmz_ps_red@noise
else: # rule out negative h and om0
return -np.inf
```
Now, we sample over the parameter space $(H_0, \Omega_{m0}, M)$ using ``cobaya``.
*The next line will run for about five minutes*. (Optionally) skip and proceed to next line if the output has already been generated in the folder *chains*.
```python
M_calib = {}
for H0_prior in H0_priors:
info_lcdm = {"likelihood": {"loglikeSNIa": loglike_lcdm}}
info_lcdm["params"] = {"H0": {"prior": {"min": 0, "max": 100},
"ref": {"min": 60, "max": 80},
"proposal": 0.5,
"latex": r"H_0"},
"om0": {"prior": {"min": -0.2, "max": 1},
"ref": {"min": 0.2, "max":0.4},
"proposal": 0.01,
"latex": r"\Omega_{m0}"},
"M": {"prior": {"min": -22, "max": -16},
"ref": {"min": -20, "max": -19},
"proposal": 0.01,
"latex": r"M"}}
def ol0_lcdm(om0):
'''returns the dark energy density parameter'''
return 1 - om0
info_lcdm["params"]["ol0"] = {"derived": ol0_lcdm,
"latex": r"\Omega_{\Lambda}"}
def H0_prior_loglike(H0):
'''H0_prior: assume Gaussian'''
H0_ave = H0_priors[H0_prior]['ave']
H0_std = H0_priors[H0_prior]['std']
return -0.5*((H0 - H0_ave)/H0_std)**2
info_lcdm["prior"] = {"H0_prior": H0_prior_loglike}
info_lcdm["sampler"] = {"mcmc": {"Rminus1_stop": 0.001,
"max_tries": 1000}}
# save mcmc chain
info_lcdm["output"] = 'chains/M_calib_' + H0_prior
# overwrite chain, if it exists
info_lcdm["force"] = True
# run MCMC
updated_info_lcdm, sampler_lcdm = run(info_lcdm)
```
*Note*: Since the computation only included the $z < 0.1$ data points (the first 11 of 40 points in the compressed Pantheon samples), then it is to be expected that there is much less information available about the geometry of the expansion, i.e., large confidence intervals for the $\Omega$'s. Nonetheless, the justification of this methodology is that absolute magnitude $M$ (a nuisance parameter) has been constrained to subpercent precision since this depends less on the dynamics of cosmic expansion.
The calibrated $M$ for each of the $H_0$ priors is shown below.
```python
from getdist.mcsamples import loadMCSamples
import getdist.plots as gdplt
import os # requires *full path*
M_calib = {}
gdsamples_per_H0_prior = {}
for H0_prior in H0_priors:
folder_file = 'chains/M_calib_' + H0_prior
gdsamples = loadMCSamples(os.path.abspath(folder_file))
# get statistics
stats = gdsamples.getMargeStats()
M_ave = stats.parWithName("M").mean
M_std = stats.parWithName("M").err
M_calib[H0_prior] = {'ave': M_ave, 'std': M_std}
gdsamples_per_H0_prior[H0_prior] = gdsamples
# print calibrated M
for each in M_calib:
print(each, 'M = ', M_calib[each]['ave'], '+/-', M_calib[each]['std'])
```
[root] *WARNING* fine_bins not large enough to well sample smoothing scale - minuslogprior
[root] *WARNING* fine_bins not large enough to well sample smoothing scale - minuslogprior__H0_prior
[root] *WARNING* fine_bins not large enough to well sample smoothing scale - chi2
[root] *WARNING* fine_bins not large enough to well sample smoothing scale - chi2__loglikeSNIa
[root] *WARNING* fine_bins not large enough to well sample smoothing scale - chi2
[root] *WARNING* fine_bins not large enough to well sample smoothing scale - chi2__loglikeSNIa
[root] *WARNING* fine_bins not large enough to well sample smoothing scale - minuslogprior
[root] *WARNING* fine_bins not large enough to well sample smoothing scale - minuslogprior__H0_prior
[root] *WARNING* fine_bins not large enough to well sample smoothing scale - chi2
[root] *WARNING* fine_bins not large enough to well sample smoothing scale - chi2__loglikeSNIa
R19 M = -19.24672146997805 +/- 0.047619802661079305
TRGB M = -19.374133105389465 +/- 0.06345405899637714
P18 M = -19.44973655096901 +/- 0.028209150641493812
The posteriors for each of the $H_0$ priors are superposed in the next plot.
```python
def plot_calib():
'''plot posterior of cosmological parameters'''
gdsamples_R16 = gdsamples_per_H0_prior['R19']
gdsamples_TRGB = gdsamples_per_H0_prior['TRGB']
gdsamples_P18 = gdsamples_per_H0_prior['P18']
gdplot = gdplt.get_subplot_plotter()
gdplot.triangle_plot([gdsamples_R16, gdsamples_TRGB, gdsamples_P18],
["H0", "M"],
contour_ls = ['-', '--', '-.'],
contour_lws = [1.5, 1.5, 1.5],
contour_colors = [('blue'),
('red'),
('green')],
filled = True,
legend_loc = 'upper right',
legend_labels = ['R19', 'TRGB', 'P18'])
gdplot = gdplt.get_subplot_plotter()
gdplot.triangle_plot([gdsamples_R16, gdsamples_TRGB, gdsamples_P18],
["H0", "om0"],
contour_ls = ['-', '--', '-.'],
contour_lws = [1.5, 1.5, 1.5],
contour_colors = [('blue'),
('red'),
('green')],
filled = True,
legend_loc = 'upper right',
legend_labels = ['R19', 'TRGB', 'P18'])
plot_calib()
```
Preparing the Hubble function given the BAO and reconstructed $m(z)$ from the Pantheon samples.
```python
def H_bao(z, dMrs, dHrs, m, M):
'''Hubble function at redshift z
dMrs = dM(z)/rs
dHrs = dH(z)/rs
rs = sound horizon
(m, M) come from dA(z) -> DDR -> dL (pantheon samples)'''
return c_kms*(dMrs/dHrs)*(1 + z)*(10**5)*(10**((M - m)/5))
def sigH_bao(z, dMrs, sigdMrs,
dHrs, sigdHrs, m, sigm, M, sigM):
'''uncertainty in the H_bao'''
jac_dM = (10**((25 + M - m)/5))*c_kms*(1 + z)/dHrs
jac_dH = (10**((25 + M - m)/5))*c_kms*dMrs*(1 + z)/(dHrs**2)
jac_m = (20000*(10**((M - m)/5))*c_kms*dMrs*(1 + z)* \
np.log(10))/dHrs
jac_M = jac_m
var_dM = (jac_dM*sigdMrs)**2
var_dH = (jac_dH*sigdHrs)**2
var_m = (jac_m*sigm)**2
var_M = (jac_M*sigM)**2
return np.sqrt(var_dM + var_dH + var_m + var_M)
```
The compressed Pantheon samples' $m(z)$ is reconstructed and plotted below.
```python
gp = GaussianProcessRegressor(kernel = kernel,
alpha = np.diag(covmz_ps_tot),
n_restarts_optimizer = 10)
gp.fit(np.log(z_ps).reshape(-1, 1), mz_ps)
print("\nLearned kernel: %s" % gp.kernel_)
print("Log-marginal-likelihood: %.3f"
% gp.log_marginal_likelihood(gp.kernel_.theta))
logz_ps_rec = np.log(np.linspace(z_min, z_max, n_div*4))
z_ps_rec = np.exp(logz_ps_rec)
mz_ps_rec, sigmz_ps_rec = \
gp.predict(logz_ps_rec.reshape(-1, 1),
return_std = True)
def mz_approx(z):
'''approximates the reconstruction mz_rec piecewise'''
delta_logz = list(abs(np.log(z) - logz_ps_rec))
min_index = delta_logz.index(min(delta_logz))
return mz_ps_rec[min_index], sigmz_ps_rec[min_index]
# plot in log(z)
fig = plt.figure()
plt.errorbar(np.log(z_ps), mz_ps,
yerr = np.sqrt(np.diag(covmz_ps_tot)),
fmt = 'ko', markersize = 5,
ecolor = 'red', elinewidth = 2,
capsize = 2, label = 'Pantheon')
plt.plot(logz_ps_rec, mz_ps_rec, 'b--', label = 'mean')
plt.fill_between(logz_ps_rec,
mz_ps_rec - sigmz_ps_rec,
mz_ps_rec + sigmz_ps_rec,
alpha = .4, facecolor = 'b',
edgecolor = 'None', label = r'1$\sigma$')
plt.fill_between(logz_ps_rec,
mz_ps_rec - 2*sigmz_ps_rec,
mz_ps_rec + 2*sigmz_ps_rec,
alpha = .2, facecolor = 'b',
edgecolor = 'None', label = r'2$\sigma$')
plt.xlabel('$\ln(z)$')
plt.ylabel('$m(z)$')
plt.legend(loc = 'upper left', prop = {'size': 9.5})
plt.xlim(np.log10(z_min), np.log10(z_max))
plt.ylim(10, 30)
plt.show()
# plot in z
fig = plt.figure()
plt.errorbar(z_ps, mz_ps,
yerr = np.sqrt(np.diag(covmz_ps_tot)),
fmt = 'ko', markersize = 5,
ecolor = 'red', elinewidth = 2,
capsize = 2, label = 'Pantheon')
plt.plot(z_ps_rec, mz_ps_rec, 'b--', label = 'mean')
plt.fill_between(z_ps_rec,
mz_ps_rec - sigmz_ps_rec,
mz_ps_rec + sigmz_ps_rec,
alpha = .4, facecolor = 'b',
edgecolor='None', label = r'1$\sigma$')
plt.fill_between(z_ps_rec,
mz_ps_rec - 2*sigmz_ps_rec,
mz_ps_rec + 2*sigmz_ps_rec,
alpha = .2, facecolor = 'b',
edgecolor='None', label = r'2$\sigma$')
plt.xlabel('$z$')
plt.ylabel('$m(z)$')
plt.legend(loc = 'lower right', prop = {'size': 9.5})
plt.xlim(z_min, z_max)
plt.ylim(10, 30)
plt.show()
```
To simplify the BAO reconstructions, we setup the function below in order to obtain BAO-$H(z)$ corresponding to a given $H_0$ prior.
```python
def setup_BAO(H0_prior):
'''prepares H(z) from BAO for a given H0 prior and calibrated M'''
M_ave = M_calib[H0_prior]['ave']
M_std = M_calib[H0_prior]['std']
# setup H(z) BAO data
Hz_bao = []
sigHz_bao = []
for i in np.arange(0, len(z_bao)):
m, sigm = mz_approx(z_bao[i])
Hz_i = H_bao(z = z_bao[i],
dMrs = dmrs_bao[i],
dHrs = dhrs_bao[i],
m = m,
M = M_ave)
sigHz_i = sigH_bao(z = z_bao[i],
dMrs = dmrs_bao[i],
sigdMrs = sigdmrs_bao[i],
dHrs = dhrs_bao[i],
sigdHrs = sigdhrs_bao[i],
m = m,
sigm = sigm,
M = M_ave,
sigM = M_std)
Hz_bao.append(Hz_i)
sigHz_bao.append(sigHz_i)
Hz_bao = np.array(Hz_bao)
sigHz_bao = np.array(sigHz_bao)
return Hz_bao, sigHz_bao
```
The CC + BAO GP reconstructed-$H(z)$ is obtained in the next line.
```python
for H0_prior in H0_priors:
H0_ave = H0_priors[H0_prior]['ave']
H0_std = H0_priors[H0_prior]['std']
z_cc_prior = np.append(np.array([z_min]), z_cc)
Hz_cc_prior = np.append(np.array([H0_ave]), Hz_cc)
sigHz_cc_prior = np.append(np.array([H0_std]), sigHz_cc)
Hz_bao, sigHz_bao = setup_BAO(H0_prior)
z_bao_cc_prior = np.append(z_bao, z_cc_prior)
Hz_bao_cc_prior = np.append(Hz_bao, Hz_cc_prior)
sigHz_bao_cc_prior = np.append(sigHz_bao, sigHz_cc_prior)
gp_bao_cc = GaussianProcessRegressor(kernel = kernel,
alpha = sigHz_bao_cc_prior**2,
n_restarts_optimizer = 10)
gp_bao_cc.fit(z_bao_cc_prior.reshape(-1, 1), Hz_bao_cc_prior)
print("\nLearned kernel: %s" % gp_bao_cc.kernel_)
print("Log-marginal-likelihood: %.3f"
% gp_bao_cc.log_marginal_likelihood(gp_bao_cc.kernel_.theta))
z_bao_cc_rec = np.linspace(z_min, z_max, n_div)
Hz_bao_cc_rec, sigHz_bao_cc_rec = \
gp_bao_cc.predict(z_bao_cc_rec.reshape(-1, 1),
return_std=True)
fig = plt.figure()
plt.errorbar(z_bao, Hz_bao,
yerr = sigHz_bao,
fmt = 'k^', markersize = 4,
ecolor = 'red', elinewidth = 2,
capsize = 2, label = 'BAO')
plt.errorbar(z_cc_prior, Hz_cc_prior, yerr = sigHz_cc_prior,
label = 'CC', fmt = 'ko', markersize = 5,
ecolor = 'red', elinewidth = 2, capsize = 2)
plt.plot(z_bao_cc_rec,
Hz_bao_cc_rec, 'b--', label = 'mean')
plt.fill_between(z_bao_cc_rec,
Hz_bao_cc_rec - sigHz_bao_cc_rec,
Hz_bao_cc_rec + sigHz_bao_cc_rec,
alpha = .4, facecolor = 'b',
edgecolor='None', label=r'$1\sigma$')
plt.fill_between(z_bao_cc_rec,
Hz_bao_cc_rec - 2*sigHz_bao_cc_rec,
Hz_bao_cc_rec + 2*sigHz_bao_cc_rec,
alpha = .2, facecolor = 'b',
edgecolor='None', label=r'$2\sigma$')
plt.title(H0_prior)
plt.xlabel('$z$')
plt.ylabel('$H(z)$')
plt.legend(loc = 'upper left', prop = {'size': 9.5})
plt.xlim(z_min, z_max)
plt.show()
```
The GP reconstruction given the combined CC + Pantheon/MCT + BAO is obtained in the next line. For comparison, this is shown together with the reconstructions obtained from the CC and CC + BAO datasets only.
```python
for H0_prior in H0_priors:
H0_ave = H0_priors[H0_prior]['ave']
H0_std = H0_priors[H0_prior]['std']
# reconstruct H(z): CC + prior
z_cc_prior = np.append(np.array([z_min]), z_cc)
Hz_cc_prior = np.append(np.array([H0_ave]), Hz_cc)
sigHz_cc_prior = np.append(np.array([H0_std]), sigHz_cc)
gp = GaussianProcessRegressor(kernel = kernel,
alpha = sigHz_cc_prior**2,
n_restarts_optimizer = 10)
gp.fit(z_cc_prior.reshape(-1, 1), Hz_cc_prior)
z_cc_rec = np.linspace(z_min, z_max, n_div)
Hz_cc_rec, sigHz_cc_rec = gp.predict(z_cc_rec.reshape(-1, 1),
return_std = True)
# reconstruct H(z): CC + BAO + Prior
Hz_bao, sigHz_bao = setup_BAO(H0_prior)
z_bao_cc_prior = np.append(z_bao, z_cc_prior)
Hz_bao_cc_prior = np.append(Hz_bao, Hz_cc_prior)
sigHz_bao_cc_prior = np.append(sigHz_bao, sigHz_cc_prior)
gp_bao_cc = GaussianProcessRegressor(kernel = kernel,
alpha = sigHz_bao_cc_prior**2,
n_restarts_optimizer = 10)
gp_bao_cc.fit(z_bao_cc_prior.reshape(-1, 1), Hz_bao_cc_prior)
z_bao_cc_rec = np.linspace(z_min, z_max, n_div)
Hz_bao_cc_rec, sigHz_bao_cc_rec = \
gp_bao_cc.predict(z_bao_cc_rec.reshape(-1, 1),
return_std = True)
# Pantheon/MCT + CC + BAO + Prior, subscripted sbc for 'SN', 'BAO', 'CC'
Hz_pnmct = H0_ave*Ez_sn
var_pnmct = (np.diag(covEz_sn_corr)* \
(H0_ave**2)) + (H0_std*Ez_sn)**2
z_sbc = np.append(z_sn, z_bao_cc_prior)
Hz_sbc = np.append(Hz_pnmct, Hz_bao_cc_prior)
sigHz_sbc = np.append(np.sqrt(var_pnmct), sigHz_bao_cc_prior)
# save full data for part 2
sbc_data = np.stack((z_sbc, Hz_sbc, sigHz_sbc), axis = 1)
savetxt('sbc_data_' + H0_prior + '.txt', sbc_data)
gp_sbc = GaussianProcessRegressor(kernel = kernel,
alpha = sigHz_sbc**2,
n_restarts_optimizer = 10)
gp_sbc.fit(z_sbc.reshape(-1, 1), Hz_sbc)
z_sbc_rec = np.linspace(1e-5, 2, 100)
Hz_sbc_rec, sigHz_sbc_rec = \
gp_sbc.predict(z_sbc_rec.reshape(-1, 1), return_std = True)
print("\nLearned kernel: %s" % gp_sbc.kernel_)
print("Log-marginal-likelihood: %.3f"
% gp_sbc.log_marginal_likelihood(gp_sbc.kernel_.theta))
fig = plt.figure()
# plot Pantheon/MCT
plt.errorbar(z_sn, Hz_pnmct, yerr = np.sqrt(var_pnmct),
label = 'Pantheon/MCT', fmt = 'ko', markersize = 5,
ecolor = 'red', elinewidth = 2, capsize = 2)
# plot BAO
plt.errorbar(z_bao, Hz_bao, yerr = sigHz_bao,
fmt = 'k^', markersize = 5, ecolor = 'red',
elinewidth = 2, capsize = 2, label = 'BAO')
# plot CC
plt.errorbar(z_cc, Hz_cc, yerr = sigHz_cc, label = 'CC',
fmt = 'kx', markersize = 5,
ecolor = 'red', elinewidth = 2, capsize = 2)
# GP reconstruction from SN, BAO, CC
plt.plot(z_sbc_rec, Hz_sbc_rec, 'b--', label = 'mean')
plt.fill_between(z_sbc_rec,
Hz_sbc_rec - sigHz_sbc_rec,
Hz_sbc_rec + sigHz_sbc_rec,
alpha = .4, facecolor = 'b',
edgecolor = 'None', label = r'1$\sigma$')
plt.fill_between(z_sbc_rec,
Hz_sbc_rec - 2*sigHz_sbc_rec,
Hz_sbc_rec + 2*sigHz_sbc_rec,
alpha = .2, facecolor = 'b',
edgecolor = 'None', label = '2$\sigma$')
plt.title(H0_prior)
plt.xlabel('$z$')
plt.ylabel('$H(z)$')
plt.legend(loc = 'upper left', prop = {'size': 9.5})
plt.xlim(min(z_sbc_rec), max(z_sbc_rec))
plt.show()
fig = plt.figure()
# CC only
plt.plot(z_cc_rec, Hz_cc_rec, 'r--', label = 'CC')
plt.fill_between(z_cc_rec,
Hz_cc_rec - 2*sigHz_cc_rec,
Hz_cc_rec + 2*sigHz_cc_rec,
alpha = .2, facecolor = 'r',
edgecolor = 'r', hatch = '-')
# CC + BAO
plt.plot(z_bao_cc_rec, Hz_bao_cc_rec,
'g-.', label = 'BAO + CC')
plt.fill_between(z_bao_cc_rec,
Hz_bao_cc_rec - 2*sigHz_bao_cc_rec,
Hz_bao_cc_rec + 2*sigHz_bao_cc_rec,
alpha = .2, facecolor = 'g',
edgecolor ='g', hatch = '|')
# CC + SN + BAO
plt.plot(z_sbc_rec, Hz_sbc_rec, 'b-',
label = 'Pantheon/MCT + BAO + CC')
plt.fill_between(z_sbc_rec,
Hz_sbc_rec - 2*sigHz_sbc_rec,
Hz_sbc_rec + 2*sigHz_sbc_rec,
alpha = .2, facecolor = 'b',
edgecolor = 'b', hatch = 'x')
plt.title(H0_prior)
plt.xlabel('$z$')
plt.ylabel('$H(z)$')
plt.legend(loc = 'upper left', prop = {'size': 9.5})
plt.xlim(min(z_sbc_rec), max(z_sbc_rec))
plt.show()
```
The reconstructed Hubble function will be used in part 2 to draw Horndeski theories from the Hubble data.
### References
**Data sets** used in this work:
***Pantheon/MCT***: A. G. Riess et al., Type Ia Supernova Distances at Redshift > 1.5 from the Hubble Space
Telescope Multi-cycle Treasury Programs: The Early Expansion Rate, Astrophys. J. 853 (2018)
126 [[1710.00844](https://arxiv.org/abs/1710.00844)].
***Pantheon samples***: D. M. Scolnic et al., The Complete Light-curve Sample of Spectroscopically Confirmed SNe Ia
from Pan-STARRS1 and Cosmological Constraints from the Combined Pantheon Sample,
Astrophys. J. 859 (2018) 101 [[1710.00845](https://arxiv.org/abs/1710.00845)].
***Baryon Acoustic Oscillations***, from *various sources*:
(1) BOSS collaboration, The clustering of galaxies in the completed SDSS-III Baryon Oscillation
Spectroscopic Survey: cosmological analysis of the DR12 galaxy sample, Mon. Not. Roy.
Astron. Soc. 470 (2017) 2617 [[1607.03155](https://arxiv.org/abs/1607.03155)].
(2) J. E. Bautista et al., The Completed SDSS-IV extended Baryon Oscillation Spectroscopic
Survey: measurement of the BAO and growth rate of structure of the luminous red galaxy
sample from the anisotropic correlation function between redshifts 0.6 and 1, Mon. Not. Roy.
Astron. Soc. 500 (2020) 736 [[2007.08993](https://arxiv.org/abs/2007.08993)].
(3) H. Gil-Marin et al., The Completed SDSS-IV extended Baryon Oscillation Spectroscopic
Survey: measurement of the BAO and growth rate of structure of the luminous red galaxy
sample from the anisotropic power spectrum between redshifts 0.6 and 1.0, Mon. Not. Roy.
Astron. Soc. 498 (2020) 2492 [[2007.08994](https://arxiv.org/abs/2007.08994)].
(4) A. Tamone et al., The Completed SDSS-IV extended Baryon Oscillation Spectroscopic Survey:
Growth rate of structure measurement from anisotropic clustering analysis in configuration
space between redshift 0.6 and 1.1 for the Emission Line Galaxy sample, Mon. Not. Roy.
Astron. Soc. 499 (2020) 5527 [[2007.09009](https://arxiv.org/abs/2007.09009)].
(5) A. de Mattia et al., The Completed SDSS-IV extended Baryon Oscillation Spectroscopic
Survey: measurement of the BAO and growth rate of structure of the emission line galaxy
sample from the anisotropic power spectrum between redshift 0.6 and 1.1, Mon. Not. Roy.
Astron. Soc. 501 (2021) 5616 [[2007.09008](https://arxiv.org/abs/2007.09008)].
(6) R. Neveux et al., The completed SDSS-IV extended Baryon Oscillation Spectroscopic Survey:
BAO and RSD measurements from the anisotropic power spectrum of the quasar sample
between redshift 0.8 and 2.2, Mon. Not. Roy. Astron. Soc. 499 (2020) 210 [[2007.08999](https://arxiv.org/abs/2007.08999)].
(7) J. Hou et al., The Completed SDSS-IV extended Baryon Oscillation Spectroscopic Survey:
BAO and RSD measurements from anisotropic clustering analysis of the Quasar Sample in
configuration space between redshift 0.8 and 2.2, Mon. Not. Roy. Astron. Soc. 500 (2020) 1201
[[2007.08998](https://arxiv.org/abs/2007.08998)].
(8) V. de Sainte Agathe et al., Baryon acoustic oscillations at z = 2.34 from the correlations of
Lyα absorption in eBOSS DR14, Astron. Astrophys. 629 (2019) A85 [[1904.03400](https://arxiv.org/abs/1904.03400)].
(9) M. Blomqvist et al., Baryon acoustic oscillations from the cross-correlation of Lyα absorption
and quasars in eBOSS DR14, Astron. Astrophys. 629 (2019) A86 [[1904.03430](https://arxiv.org/abs/1904.03430)].
***Cosmic Chronometers***, from *various sources*:
(1) M. Moresco, L. Pozzetti, A. Cimatti, R. Jimenez, C. Maraston, L. Verde et al., A 6%
measurement of the Hubble parameter at z ∼ 0.45: direct evidence of the epoch of cosmic
re-acceleration, JCAP 05 (2016) 014 [[1601.01701](https://arxiv.org/abs/1601.01701)].
(2) M. Moresco, Raising the bar: new constraints on the Hubble parameter with cosmic
chronometers at z ∼ 2, Mon. Not. Roy. Astron. Soc. 450 (2015) L16 [[1503.01116](https://arxiv.org/abs/1503.01116)].
(3) C. Zhang, H. Zhang, S. Yuan, S. Liu, T.-J. Zhang and Y.-C. Sun, Four new observational H(z)
data from luminous red galaxies in the Sloan Digital Sky Survey data release seven, Research in
Astronomy and Astrophysics 14 (2014) 1221 [[1207.4541](https://arxiv.org/abs/1207.4541)].
(4) D. Stern, R. Jimenez, L. Verde, M. Kamionkowski and S. A. Stanford, Cosmic chronometers:
constraining the equation of state of dark energy. I: H(z) measurements, JCAP 2010 (2010)
008 [[0907.3149](https://arxiv.org/abs/0907.3149)].
(5) M. Moresco et al., Improved constraints on the expansion rate of the Universe up to z ˜1.1 from
the spectroscopic evolution of cosmic chronometers, JCAP 2012 (2012) 006 [[1201.3609](https://arxiv.org/abs/1201.3609)].
(6) Ratsimbazafy et al. Age-dating Luminous Red Galaxies observed with the Southern African
Large Telescope, Mon. Not. Roy. Astron. Soc. 467 (2017) 3239 [[1702.00418](https://arxiv.org/abs/1702.00418)].
***R19 $H_0$ prior***: A. G. Riess, S. Casertano, W. Yuan, L. M. Macri and D. Scolnic, Large Magellanic Cloud
Cepheid Standards Provide a 1% Foundation for the Determination of the Hubble Constant and
Stronger Evidence for Physics beyond ΛCDM, Astrophys. J. 876 (2019) 85 [[1903.07603](https://arxiv.org/abs/1903.07603)].
***TRGB $H_0$ prior***: W. L. Freedman et al., The Carnegie-Chicago Hubble Program. VIII. An Independent
Determination of the Hubble Constant Based on the Tip of the Red Giant Branch, Astrophys.
J. 882 (2019) 34 [[1907.05922](https://arxiv.org/abs/1907.05922)].
***P18 $H_0$ prior***: Planck collaboration, Planck 2018 results. VI. Cosmological parameters, Astron. Astrophys. 641 (2020) A6 [[1807.06209](https://arxiv.org/abs/1807.06209)].
**Python packages** used in this work:
``scikit-learn``: F. Pedregosa, G. Varoquaux, A. Gramfort, V. Michel, B. Thirion, O. Grisel et al., Scikit-learn:
Machine learning in Python, [Journal of Machine Learning Research 12 (2011) 2825](https://www.jmlr.org/papers/volume12/pedregosa11a/pedregosa11a.pdf?source=post_page---------------------------).
``cobaya``: J. Torrado and A. Lewis, Cobaya: Code for Bayesian Analysis of hierarchical physical models (2020) [[2005.05290](https://arxiv.org/abs/2005.05290)].
``getdist``: A. Lewis, GetDist: a Python package for analysing Monte Carlo samples (2019) [[1910.13970](https://arxiv.org/abs/1910.13970)].
``numpy``: C. R. Harris et al., Array programming with NumPy, [Nature 585 (2020) 357–362](https://www.nature.com/articles/s41586-020-2649-2?fbclid=IwAR3qKNC7soKsJlgbF2YCeYQl90umdrcbM6hw8vnpaVvqQiaMdTeL2GZxUR0).
``scipy``: P. Virtanen et al., SciPy 1.0: Fundamental Algorithms for Scientific Computing in Python,
[Nature Methods 17 (2020) 261](https://www.nature.com/articles/s41592-019-0686-2).
``seaborn``: M. L. Waskom, seaborn: statistical data visualization, [Journal of Open Source Software 6
(2021) 3021](https://joss.theoj.org/papers/10.21105/joss.03021).
``matplotlib``: J. D. Hunter, Matplotlib: A 2d graphics environment, [Computing in Science Engineering 9
(2007) 90](https://ieeexplore.ieee.org/document/4160265).
|
[STATEMENT]
lemma has_path_extend [forward]:
"has_path n S i j \<Longrightarrow> S \<subseteq> T \<Longrightarrow> has_path n T i j"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>has_path n S i j; S \<subseteq> T\<rbrakk> \<Longrightarrow> has_path n T i j
[PROOF STEP]
by auto2 |
/-
Copyright (c) 2022 Yaël Dillies. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yaël Dillies
! This file was ported from Lean 3 source module category_theory.category.Bipointed
! leanprover-community/mathlib commit c8ab806ef73c20cab1d87b5157e43a82c205f28e
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathlib.CategoryTheory.Category.Pointed
/-!
# The category of bipointed types
This defines `Bipointed`, the category of bipointed types.
## TODO
Monoidal structure
-/
open CategoryTheory
universe u
variable {α β : Type _}
set_option linter.uppercaseLean3 false
/-- The category of bipointed types. -/
structure Bipointed : Type (u + 1) where
X : Type u
toProd : X × X
#align Bipointed Bipointed
namespace Bipointed
instance : CoeSort Bipointed (Type _) :=
⟨X⟩
-- porting note: protected attribute does not work
-- attribute [protected] Bipointed.X
/-- Turns a bipointing into a bipointed type. -/
def of {X : Type _} (to_prod : X × X) : Bipointed :=
⟨X, to_prod⟩
#align Bipointed.of Bipointed.of
@[simp]
theorem coe_of {X : Type _} (to_prod : X × X) : ↥(of to_prod) = X :=
rfl
#align Bipointed.coe_of Bipointed.coe_of
alias of ← _root_.Prod.Bipointed
#align prod.Bipointed Prod.Bipointed
instance : Inhabited Bipointed :=
⟨of ((), ())⟩
/-- Morphisms in `Bipointed`. -/
@[ext]
protected structure Hom (X Y : Bipointed.{u}) : Type u where
toFun : X → Y
map_fst : toFun X.toProd.1 = Y.toProd.1
map_snd : toFun X.toProd.2 = Y.toProd.2
#align Bipointed.hom Bipointed.Hom
namespace Hom
/-- The identity morphism of `X : Bipointed`. -/
@[simps]
nonrec def id (X : Bipointed) : Bipointed.Hom X X :=
⟨id, rfl, rfl⟩
#align Bipointed.hom.id Bipointed.Hom.id
instance (X : Bipointed) : Inhabited (Bipointed.Hom X X) :=
⟨id X⟩
/-- Composition of morphisms of `Bipointed`. -/
@[simps]
def comp {X Y Z : Bipointed.{u}} (f : Bipointed.Hom X Y) (g : Bipointed.Hom Y Z) :
Bipointed.Hom X Z :=
⟨g.toFun ∘ f.toFun, by rw [Function.comp_apply, f.map_fst, g.map_fst], by
rw [Function.comp_apply, f.map_snd, g.map_snd]⟩
#align Bipointed.hom.comp Bipointed.Hom.comp
end Hom
instance largeCategory : LargeCategory Bipointed where
Hom := Bipointed.Hom
id := Hom.id
comp := @Hom.comp
#align Bipointed.large_category Bipointed.largeCategory
instance concreteCategory : ConcreteCategory Bipointed where
Forget :=
{ obj := Bipointed.X
map := @Hom.toFun }
forget_faithful := ⟨@Hom.ext⟩
#align Bipointed.concrete_category Bipointed.concreteCategory
/-- Swaps the pointed elements of a bipointed type. `Prod.swap` as a functor. -/
@[simps]
def swap : Bipointed ⥤ Bipointed where
obj X := ⟨X, X.toProd.swap⟩
map f := ⟨f.toFun, f.map_snd, f.map_fst⟩
#align Bipointed.swap Bipointed.swap
/-- The equivalence between `Bipointed` and itself induced by `Prod.swap` both ways. -/
@[simps!]
def swapEquiv : Bipointed ≌ Bipointed :=
CategoryTheory.Equivalence.mk swap swap
(NatIso.ofComponents
(fun X =>
{ hom := ⟨id, rfl, rfl⟩
inv := ⟨id, rfl, rfl⟩ })
fun f => rfl)
(NatIso.ofComponents
(fun X =>
{ hom := ⟨id, rfl, rfl⟩
inv := ⟨id, rfl, rfl⟩ })
fun f => rfl)
#align Bipointed.swap_equiv Bipointed.swapEquiv
@[simp]
theorem swapEquiv_symm : swapEquiv.symm = swapEquiv :=
rfl
#align Bipointed.swap_equiv_symm Bipointed.swapEquiv_symm
end Bipointed
/-- The forgetful functor from `Bipointed` to `Pointed` which forgets about the second point. -/
def bipointedToPointedFst : Bipointed ⥤ Pointed where
obj X := ⟨X, X.toProd.1⟩
map f := ⟨f.toFun, f.map_fst⟩
#align Bipointed_to_Pointed_fst bipointedToPointedFst
/-- The forgetful functor from `Bipointed` to `Pointed` which forgets about the first point. -/
def bipointedToPointedSnd : Bipointed ⥤ Pointed where
obj X := ⟨X, X.toProd.2⟩
map f := ⟨f.toFun, f.map_snd⟩
#align Bipointed_to_Pointed_snd bipointedToPointedSnd
@[simp]
theorem bipointedToPointedFst_comp_forget :
bipointedToPointedFst ⋙ forget Pointed = forget Bipointed :=
rfl
#align Bipointed_to_Pointed_fst_comp_forget bipointedToPointedFst_comp_forget
@[simp]
theorem bipointedToPointedSnd_comp_forget :
bipointedToPointedSnd ⋙ forget Pointed = forget Bipointed :=
rfl
#align Bipointed_to_Pointed_snd_comp_forget bipointedToPointedSnd_comp_forget
@[simp]
theorem swap_comp_bipointedToPointedFst :
Bipointed.swap ⋙ bipointedToPointedFst = bipointedToPointedSnd :=
rfl
#align swap_comp_Bipointed_to_Pointed_fst swap_comp_bipointedToPointedFst
@[simp]
theorem swap_comp_bipointedToPointedSnd :
Bipointed.swap ⋙ bipointedToPointedSnd = bipointedToPointedFst :=
rfl
#align swap_comp_Bipointed_to_Pointed_snd swap_comp_bipointedToPointedSnd
/-- The functor from `Pointed` to `Bipointed` which bipoints the point. -/
def pointedToBipointed : Pointed.{u} ⥤ Bipointed where
obj X := ⟨X, X.point, X.point⟩
map f := ⟨f.toFun, f.map_point, f.map_point⟩
#align Pointed_to_Bipointed pointedToBipointed
/-- The functor from `Pointed` to `Bipointed` which adds a second point. -/
def pointedToBipointedFst : Pointed.{u} ⥤ Bipointed where
obj X := ⟨Option X, X.point, none⟩
map f := ⟨Option.map f.toFun, congr_arg _ f.map_point, rfl⟩
map_id _ := Bipointed.Hom.ext _ _ Option.map_id
map_comp f g := Bipointed.Hom.ext _ _ (Option.map_comp_map f.1 g.1).symm
#align Pointed_to_Bipointed_fst pointedToBipointedFst
/-- The functor from `Pointed` to `Bipointed` which adds a first point. -/
def pointedToBipointedSnd : Pointed.{u} ⥤ Bipointed where
obj X := ⟨Option X, none, X.point⟩
map f := ⟨Option.map f.toFun, rfl, congr_arg _ f.map_point⟩
map_id _ := Bipointed.Hom.ext _ _ Option.map_id
map_comp f g := Bipointed.Hom.ext _ _ (Option.map_comp_map f.1 g.1).symm
#align Pointed_to_Bipointed_snd pointedToBipointedSnd
@[simp]
theorem pointedToBipointedFst_comp_swap :
pointedToBipointedFst ⋙ Bipointed.swap = pointedToBipointedSnd :=
rfl
#align Pointed_to_Bipointed_fst_comp_swap pointedToBipointedFst_comp_swap
@[simp]
theorem pointedToBipointedSnd_comp_swap :
pointedToBipointedSnd ⋙ Bipointed.swap = pointedToBipointedFst :=
rfl
#align Pointed_to_Bipointed_snd_comp_swap pointedToBipointedSnd_comp_swap
/-- `BipointedToPointed_fst` is inverse to `PointedToBipointed`. -/
@[simps!]
def pointedToBipointedCompBipointedToPointedFst :
pointedToBipointed ⋙ bipointedToPointedFst ≅ 𝟭 _ :=
NatIso.ofComponents
(fun X =>
{ hom := ⟨id, rfl⟩
inv := ⟨id, rfl⟩ })
fun f => rfl
#align Pointed_to_Bipointed_comp_Bipointed_to_Pointed_fst pointedToBipointedCompBipointedToPointedFst
/-- `BipointedToPointed_snd` is inverse to `PointedToBipointed`. -/
@[simps!]
def pointedToBipointedCompBipointedToPointedSnd :
pointedToBipointed ⋙ bipointedToPointedSnd ≅ 𝟭 _ :=
NatIso.ofComponents
(fun X =>
{ hom := ⟨id, rfl⟩
inv := ⟨id, rfl⟩ })
fun f => rfl
#align Pointed_to_Bipointed_comp_Bipointed_to_Pointed_snd pointedToBipointedCompBipointedToPointedSnd
/-- The free/forgetful adjunction between `PointedToBipointed_fst` and `BipointedToPointed_fst`.
-/
def pointedToBipointedFstBipointedToPointedFstAdjunction :
pointedToBipointedFst ⊣ bipointedToPointedFst :=
Adjunction.mkOfHomEquiv
{ homEquiv := fun X Y =>
{ toFun := fun f => ⟨f.toFun ∘ Option.some, f.map_fst⟩
invFun := fun f => ⟨fun o => o.elim Y.toProd.2 f.toFun, f.map_point, rfl⟩
left_inv := fun f => by
apply Bipointed.Hom.ext
funext x
cases x
· exact f.map_snd.symm
· rfl
right_inv := fun f => Pointed.Hom.ext _ _ rfl }
homEquiv_naturality_left_symm := fun f g => by
apply Bipointed.Hom.ext
funext x
cases x <;> rfl }
#align Pointed_to_Bipointed_fst_Bipointed_to_Pointed_fst_adjunction pointedToBipointedFstBipointedToPointedFstAdjunction
/-- The free/forgetful adjunction between `PointedToBipointed_snd` and `BipointedToPointed_snd`.
-/
def pointedToBipointedSndBipointedToPointedSndAdjunction :
pointedToBipointedSnd ⊣ bipointedToPointedSnd :=
Adjunction.mkOfHomEquiv
{ homEquiv := fun X Y =>
{ toFun := fun f => ⟨f.toFun ∘ Option.some, f.map_snd⟩
invFun := fun f => ⟨fun o => o.elim Y.toProd.1 f.toFun, rfl, f.map_point⟩
left_inv := fun f => by
apply Bipointed.Hom.ext
funext x
cases x
· exact f.map_fst.symm
· rfl
right_inv := fun f => Pointed.Hom.ext _ _ rfl }
homEquiv_naturality_left_symm := fun f g => by
apply Bipointed.Hom.ext
funext x
cases x <;> rfl }
#align Pointed_to_Bipointed_snd_Bipointed_to_Pointed_snd_adjunction pointedToBipointedSndBipointedToPointedSndAdjunction
|
Require Import VerdiRaft.Raft.
Require Import VerdiRaft.RaftRefinementInterface.
Section VotedForTermSanity.
Context {orig_base_params : BaseParams}.
Context {one_node_params : OneNodeParams orig_base_params}.
Context {raft_params : RaftParams orig_base_params}.
Definition votedFor_term_sanity (net : network) : Prop :=
forall t h h',
currentTerm (snd (nwState net h')) = t ->
votedFor (snd (nwState net h')) = Some h ->
t <= currentTerm (snd (nwState net h)).
Class votedFor_term_sanity_interface : Prop :=
{
votedFor_term_sanity_invariant :
forall net,
refined_raft_intermediate_reachable net ->
votedFor_term_sanity net
}.
End VotedForTermSanity.
|
In 1955 and 1956 , Creutz spent a year at Los Alamos evaluating its thermonuclear fusion program for the Atomic Energy Commission . While there he was approached by Frederic de Hoffmann , who recruited him to join the General Atomics division of General Dynamics . He moved to La Jolla , California , as its Vice President for Research and Development , and was concurrently the Director of its John Jay Hopkins Laboratory for Pure and Applied Science from 1955 to 1967 . He was also a member of the Advisory Panel on General Science at the Department of Defense from 1959 to 1963 .
|
Formal statement is: lemma LIM_imp_LIM: fixes f :: "'a::topological_space \<Rightarrow> 'b::real_normed_vector" fixes g :: "'a::topological_space \<Rightarrow> 'c::real_normed_vector" assumes f: "f \<midarrow>a\<rightarrow> l" and le: "\<And>x. x \<noteq> a \<Longrightarrow> norm (g x - m) \<le> norm (f x - l)" shows "g \<midarrow>a\<rightarrow> m" Informal statement is: If $f$ converges to $l$ and $g$ is bounded by $f$, then $g$ converges to $m$. |
DIR , Rapid Intervention Squad of the Romanian Ministry of Defense is an elite special operations unit currently belonging to the Romanian Military Police . It is a special unit inside the military , formed of highly skilled individuals , a very large percentage of its members being champions in martial arts , kickboxing , athletic disciplines and so on . DIR was , until December 2003 , top secret .
|
import vtk
from vtk.util import numpy_support
import h5py
import numpy as np
import pyvista as pv
from pyvista import examples
basepath, epoch, realfake, AorB = None, 1, 'fake', 'B'
f = h5py.File(basepath+str(epoch)+'_'+realfake+'_'+AorB+'.vox', 'r').get('data').value
f = f[0,0,:,:,:]
# binary thresholding of intensity range [-1 1]
threshold = 0.
f[f <= threshold] = -1.
f[f > threshold] = 1.
# plot 3d segmentation mask with pyvista
mesh = pv.wrap(f)
plotter = pv.Plotter()
plotter.add_mesh_threshold(mesh, cmap='PuBuGn', smooth_shading=True, lighting=True)
cpos = plotter.show() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.