text
stringlengths 0
3.34M
|
---|
library(psych)
library(arsenal)
library(ggplot2)
library(data.table)
### GLOBAL VARIABLES
num_fac = 9
# this.dir<- dirname(parent.frame(2)$ofile)
# parent.dir <- strsplit(this.dir,'scripts')[[1]]
# setwd(parent.dir)
orig_var_order <- read.csv('efa/variable_order_mplus.csv')[,2]
city_names <- read.csv('data/urbandata2019-64var-citynames-countries.csv')[,1:2]
## LOADINGS || DATA || FACTOR CORRELATION
loading <- read.csv(paste0('efa/',num_fac,'f-loadings-missing-sig.csv'),header=FALSE)
data_scaled <-read.table('data/urbandata2019-64var-id-sorted-updated.dat')
#NOTE: Full-information maximum likelihood was used to estimate the factor model and obtain loadings
#But the mean-imputed dataset is used to compute the factor scores from these loadings as starting point for the iteration
## DATA
## Missingness
dm <-read.table('data/urbandata2019-64var-id-sorted-updated-missing.dat')
dm[dm=="."]<-NA
missrate <- as.numeric(table(is.na(dm))[2])/sum(table(is.na(dm)))*100
print(paste0("Percent missingness in data: ", round(missrate,2)))
## Support for factor choice (Figure 1 in paper)
datacor = cor(as.matrix(sapply(dm, as.numeric)),use="pairwise.complete.obs")
eigval = eigen(datacor)$values
ablinecolor <- rgb(31, 120, 180, alpha=255,maxColorValue = 255)
plot(c(1:64),eigval[1:64],xlab="Index",ylab="Eigenvalue",color='k',pch=19,xlim=c(0,64))
#axis(1, at = 0:64)
abline(v=9, col=ablinecolor)
rectcolor <- rgb(166, 206, 227, alpha=90,maxColorValue = 255)
rect(8, -2, 12, 17,border=NA,col=rectcolor)
getScore <- function(df, load, factor_cor=NULL){
if(num_fac==8){colnames(load) = c("Variables", "F1", "F2", "F3", "F4", "F5", "F6", "F7", "F8")}
if(num_fac==9){colnames(load) = c("Variables", "F1", "F2", "F3", "F4", "F5", "F6", "F7", "F8", "F9")}
if(num_fac==11){colnames(load) = c("Variables", "F1", "F2", "F3", "F4", "F5", "F6", "F7", "F8", "F9","F10","F11")}
print(head(load))
load <- as.matrix(load[,-1])
load <- cbind(load, as.matrix(orig_var_order))
load_sorted <- load[order(load[,num_fac+1]),]
load_sorted <- load_sorted[,1:num_fac]
print("Loading:")
print(head(load_sorted))
if (is.null(factor_cor)){
tf = factor.scores(data.matrix(df),load_sorted,method='components')
}else{
tf = factor.scores(data.matrix(df),load_sorted, factor_cor,method='components')
}
print("Scores")
xform = tf$scores
xform = xform[]
xform = data.frame(xform)
xform$city = city_names$City
xform$cityID = city_names$cityID
print(head(xform))
write.csv(xform,'output/fscores-mean-imputed.csv',row.names=FALSE)
return(xform)
}
scores <- getScore(data_scaled,loading)
viewLoadings <- function(df, n) {
df2 <- melt(df, id="Variables", variable.name="Factors", value.name="Loading", measure = colnames(df)[1:n+1])
print(head(df2))
xd <- ggplot(df2, aes(Variables, abs(Loading), fill=Loading)) + facet_wrap(~ Factors, nrow=1) + geom_bar(stat="identity")+ coord_flip()+
scale_fill_gradient2(name="Loading",high="blue",mid="white",low="red",midpoint=0,guide=F)+ylab("Loading")+theme_bw(base_size=10)
ggsave(paste0("output/", "loadings-", nrow(df),"-variables-", n,"-factors.pdf"), xd, width=18, height=15)
}
if(num_fac==8){colnames(loading) = c("Variables", "F1", "F2", "F3", "F4", "F5", "F6", "F7", "F8")}
if(num_fac==9){colnames(loading) = c("Variables", "F1", "F2", "F3", "F4", "F5", "F6", "F7", "F8", "F9")}
if(num_fac==11){colnames(loading) = c("Variables", "F1", "F2", "F3", "F4", "F5", "F6", "F7", "F8", "F9","F10","F11")}
loading$Variables <- factor(loading$Variables, levels=loading$Variables[c(1:64)])
viewLoadings(loading,num_fac)
## Loading stats
colMeans(loading[,-1])
apply(loading[,-1], 2, function(x) max(x, na.rm = TRUE))
apply(loading[,-1], 2, function(x) sd(x, na.rm = TRUE))
|
open import Function.Equivalence as FE using ()
open import Relation.Nullary using (yes; no)
open import Relation.Nullary.Negation using (contradiction)
open import Relation.Nullary.Product using (_×-dec_)
open import Relation.Nullary.Decidable using (False; map)
open import Relation.Binary using (Decidable)
open import Relation.Binary.PropositionalEquality using (_≡_; _≢_; refl; sym; cong; module ≡-Reasoning)
open import Relation.Binary.PropositionalEquality.WithK using (≡-irrelevant)
open ≡-Reasoning
open import Data.Product using (_×_; _,_; uncurry)
open import Data.Unit using (⊤; tt)
open import Agda.Builtin.FromNat using (Number)
module AKS.Rational.Base where
open import Data.Integer using (ℤ; +_; +0; +[1+_]; -[1+_]; ∣_∣) renaming (_+_ to _+ℤ_; _*_ to _*ℤ_; -_ to -ℤ_; _≟_ to _≟ℤ_)
open import Data.Integer.DivMod using () renaming (_divℕ_ to _/ℤ_)
open import Data.Integer.Properties using (∣-n∣≡∣n∣)
open import AKS.Nat using (ℕ; suc; ≢⇒¬≟; ¬≟⇒≢) renaming (_+_ to _+ℕ_; _*_ to _*ℕ_; _≟_ to _≟ℕ_)
open import AKS.Nat using (n≢0∧m≢0⇒n*m≢0)
open import AKS.Nat.Divisibility using () renaming (_/_ to _/ℕ_)
open import AKS.Nat.GCD using (gcd; _⊥_; gcd[a,1]≡1; b≢0⇒gcd[a,b]≢0; gcd[0,a]≡1⇒a≡1; ⊥-respˡ; ⊥-sym)
record ℚ : Set where
constructor ℚ✓
field
numerator : ℤ
denominator : ℕ
den≢0 : False (denominator ≟ℕ 0)
coprime : ∣ numerator ∣ ⊥ denominator
fromℤ : ℤ → ℚ
fromℤ n = record
{ numerator = n
; denominator = 1
; den≢0 = tt
; coprime = gcd[a,1]≡1 (∣ n ∣)
}
fromℕ : ℕ → ℚ
fromℕ n = fromℤ (+ n)
instance
ℚ-number : Number ℚ
ℚ-number = record
{ Constraint = λ _ → ⊤
; fromNat = λ n → fromℕ n
}
open import AKS.Unsafe using (trustMe; TODO)
open import Data.Fin using (Fin)
open import Data.Nat.DivMod using (_divMod_; result)
∣a/ℤb∣≡∣a∣/ℕb : ∀ a b {b≢0} → ∣ (a /ℤ b) {b≢0} ∣ ≡ (∣ a ∣ /ℕ b) {b≢0}
∣a/ℤb∣≡∣a∣/ℕb (+ n) b {b≢0} = refl
∣a/ℤb∣≡∣a∣/ℕb (-[1+ x ]) b {b≢0} with (suc x divMod b) {b≢0}
... | result q Fin.zero eq rewrite ∣-n∣≡∣n∣ (+ q) = trustMe
... | result q (Fin.suc r) eq = trustMe
a/d⊥b/d : ∀ a b d {d≢0} → gcd a b ≡ d → (a /ℕ d) {d≢0} ⊥ (b /ℕ d) {d≢0}
a/d⊥b/d a b d gcd[a,b]≡d = begin
gcd (a /ℕ d) (b /ℕ d) ≡⟨ trustMe ⟩
1 ∎
a≢0⇒a/b≢0 : ∀ a b (a≢0 : False (a ≟ℕ 0)) {b≢0} → (a /ℕ b) {b≢0} ≢ 0
a≢0⇒a/b≢0 (suc a) (suc b) a≢0 = TODO
canonical : ∀ (num : ℤ) (den : ℕ) {den≢0 : False (den ≟ℕ 0)} → ℚ
canonical num den {den≢0} = construct num den {den≢0} (gcd ∣ num ∣ den) {≢⇒¬≟ (b≢0⇒gcd[a,b]≢0 (∣ num ∣) den (¬≟⇒≢ den≢0))} refl
where
construct : ∀ num den {den≢0 : False (den ≟ℕ 0)} d {d≢0 : False (d ≟ℕ 0)} → gcd ∣ num ∣ den ≡ d → ℚ
construct num den {den≢0} d {d≢0} gcd[num,den]≡d = record
{ numerator = (num /ℤ d) {d≢0}
; denominator = (den /ℕ d) {d≢0}
; den≢0 = ≢⇒¬≟ (a≢0⇒a/b≢0 den d den≢0)
; coprime = ⊥-respˡ {den /ℕ d} (sym (∣a/ℤb∣≡∣a∣/ℕb num d)) (a/d⊥b/d ∣ num ∣ den d gcd[num,den]≡d)
}
infixl 6 _+_
_+_ : ℚ → ℚ → ℚ
(ℚ✓ num₁ den₁ den₁≢0 _) + (ℚ✓ num₂ den₂ den₂≢0 _) = canonical (num₁ *ℤ (+ den₂) +ℤ num₂ *ℤ (+ den₁)) (den₁ *ℕ den₂) {≢⇒¬≟ (n≢0∧m≢0⇒n*m≢0 (¬≟⇒≢ den₁≢0) (¬≟⇒≢ den₂≢0))}
infix 8 -_
-_ : ℚ → ℚ
- (ℚ✓ num den den≢0 num⊥den) = ℚ✓ (-ℤ num) den den≢0 (⊥-respˡ {den} (sym (∣-n∣≡∣n∣ num)) num⊥den)
infixl 7 _*_
_*_ : ℚ → ℚ → ℚ
(ℚ✓ num₁ den₁ den₁≢0 _) * (ℚ✓ num₂ den₂ den₂≢0 _) = canonical (num₁ *ℤ num₂) (den₁ *ℕ den₂) {≢⇒¬≟ (n≢0∧m≢0⇒n*m≢0 (¬≟⇒≢ den₁≢0) (¬≟⇒≢ den₂≢0))}
infix 8 _⁻¹
_⁻¹ : ∀ (q : ℚ) {q≢0 : q ≢ 0} → ℚ
(ℚ✓ +0 den den≢0 num⊥den ⁻¹) {q≢0} with gcd[0,a]≡1⇒a≡1 den num⊥den
(ℚ✓ +0 .1 tt refl ⁻¹) {q≢0} | refl = contradiction refl q≢0
(ℚ✓ +[1+ num ] (suc den) den≢0 num⊥den) ⁻¹ = ℚ✓ +[1+ den ] (suc num) tt (⊥-sym {suc num} {suc den} num⊥den)
(ℚ✓ -[1+ num ] (suc den) den≢0 num⊥den) ⁻¹ = ℚ✓ -[1+ den ] (suc num) tt (⊥-sym {suc num} {suc den} num⊥den)
infixl 7 _/_
_/_ : ∀ (p q : ℚ) {q≢0 : q ≢ 0} → ℚ
_/_ p q {q≢0}= p * (q ⁻¹) {q≢0}
_≟_ : Decidable {A = ℚ} _≡_
(ℚ✓ num₁ (suc den₁) tt num₁⊥den₁) ≟ (ℚ✓ num₂ (suc den₂) tt num₂⊥den₂)
= map (FE.equivalence forward backward) (num₁ ≟ℤ num₂ ×-dec den₁ ≟ℕ den₂)
where
forward : ∀ {num₁ num₂} {den₁ den₂} {num₁⊥den₁ num₂⊥den₂} → num₁ ≡ num₂ × den₁ ≡ den₂ → ℚ✓ num₁ (suc den₁) tt num₁⊥den₁ ≡ ℚ✓ num₂ (suc den₂) tt num₂⊥den₂
forward {num₁} {num₂} {den₁} {den₂} {num₁⊥den₁} {num₂⊥den₂} (refl , refl) = cong (λ pf → ℚ✓ num₁ (suc den₁) tt pf) (≡-irrelevant num₁⊥den₁ num₂⊥den₂)
backward : ∀ {num₁ num₂} {den₁ den₂} {num₁⊥den₁ num₂⊥den₂} → ℚ✓ num₁ (suc den₁) tt num₁⊥den₁ ≡ ℚ✓ num₂ (suc den₂) tt num₂⊥den₂ → num₁ ≡ num₂ × den₁ ≡ den₂
backward refl = (refl , refl)
≢0 : ∀ {p : ℚ} {p≢0 : False (p ≟ 0)} → p ≢ 0
≢0 {p} with p ≟ 0
≢0 {p} | no p≢0 = p≢0
test = ((1 / 2) {≢0} + (3 / 4) {≢0}) ⁻¹
open import Data.String using (String; _++_)
open import Data.Nat.Show using () renaming (show to show-ℕ)
show-ℤ : ℤ → String
show-ℤ (+ n) = show-ℕ n
show-ℤ (-[1+ n ]) = "-" ++ show-ℕ (suc n)
show-ℚ : ℚ → String
show-ℚ (ℚ✓ num 1 _ _) = show-ℤ num
show-ℚ (ℚ✓ num den _ _) = show-ℤ num ++ "/" ++ show-ℕ den
|
function POMDPs.requirements_info(solver::AbstractPOMCPSolver, problem::POMDP)
if @implemented initial_state_distribution(::typeof(problem))
return requirements_info(solver, problem, initial_state_distribution(problem))
else
println("""
Since POMCP is an online solver, most of the computation occurs in `action(planner, state)`. In order to view the requirements for this function, please, supply a state as the third argument to `requirements_info`, e.g.
@requirements_info $(typeof(solver))() $(typeof(problem))() $(state_type(typeof(problem)))()
""")
end
end
function POMDPs.requirements_info(solver::AbstractPOMCPSolver, problem::POMDP, b)
policy = solve(solver, problem)
requirements_info(policy, b)
end
function POMDPs.requirements_info(policy::POMCPPlanner, b)
@show_requirements action(policy, b)
problem = policy.problem
rng = MersenneTwister(1)
if @implemented(rand(::typeof(rng), ::typeof(b))) &&
@implemented(actions(::typeof(problem))) &&
@implemented(iterator(::typeof(actions(problem))))
s = rand(rng, b)
a = first(iterator(actions(problem)))
if @implemented generate_sor(::typeof(policy.problem), ::typeof(s), ::typeof(a), ::typeof(rng))
sp, o, r = generate_sor(policy.problem, s, a, rng)
if !isequal(deepcopy(o), o)
warn("""
isequal(deepcopy(o), o) returned false. Is isequal() defined correctly?
For POMCP to work correctly, you must define isequal(::$(typeof(o)), ::$(typeof(o))) (see https://docs.julialang.org/en/stable/stdlib/collections/#Associative-Collections-1, https://github.com/andrewcooke/AutoHashEquals.jl#background, also consider using StaticArrays). This warning was thrown because isequal($(deepcopy(o)), $o) returned false.
Note: isequal() should also be defined correctly for actions, but no warning will be issued.
""")
end
if hash(deepcopy(o)) != hash(o)
warn("""
hash(deepcopy(o)) was not equal to hash(o). Is hash() defined correctly?
For MCTS to work correctly, you must define hash(::$(typeof(o)), ::UInt) (see https://docs.julialang.org/en/stable/stdlib/collections/#Associative-Collections-1, https://github.com/andrewcooke/AutoHashEquals.jl#background, also consider using StaticArrays). This warning was thrown because hash($(deepcopy(o))) != hash($o).
Note: hash() should also be defined correctly for actions, but no warning will be issued.
""")
end
end
end
end
@POMDP_require action(p::POMCPPlanner, b) begin
tree = POMCPTree(p.problem, p.solver.tree_queries)
@subreq search(p, b, tree)
end
@POMDP_require search(p::POMCPPlanner, b, t::POMCPTree) begin
P = typeof(p.problem)
@req rand(::typeof(p.rng), ::typeof(b))
s = rand(p.rng, b)
@req isterminal(::P, ::state_type(P))
@subreq simulate(p, s, POMCPObsNode(t, 1), p.solver.max_depth)
end
@POMDP_require simulate(p::POMCPPlanner, s, hnode::POMCPObsNode, steps::Int) begin
P = typeof(p.problem)
S = state_type(P)
A = action_type(P)
O = obs_type(P)
@req generate_sor(::P, ::S, ::A, ::typeof(p.rng))
@req isequal(::O, ::O)
@req hash(::O)
# from insert_obs_node!
@req n_actions(::P)
@req actions(::P)
AS = typeof(actions(p.problem))
@req iterator(::AS)
@subreq estimate_value(p.solved_estimator, p.problem, s, hnode, steps)
@req discount(::P)
end
@POMDP_require estimate_value(f::Function, pomdp::POMDPs.POMDP, start_state, h::BeliefNode, steps::Int) begin
@req f(::typeof(pomdp), ::typeof(start_state), ::typeof(h), ::typeof(steps))
end
|
\chapter{Literature Survey}
\lipsum[1-4]
\dummyfig
\begin{tcolorbox}[title=\textbf{To get a list of contributions as a shaded box}]
\blinditemize
\end{tcolorbox}
\input{4_lit_survey/first_topic.tex}
\input{4_lit_survey/second_topic.tex}
\input{4_lit_survey/third_topic.tex}
|
% instantiate the library
disp('Loading the library...');
lib = lsl_loadlib();
% resolve a stream...
disp('Resolving an EEG stream...');
result = {};
while isempty(result)
result = lsl_resolve_byprop(lib,'type','EEG'); end
% create a new inlet
disp('Opening an inlet...');
inlet = lsl_inlet(result{1});
disp('Now receiving chunked data...');
while true
% get chunk from the inlet
[chunk,stamps] = inlet.pull_chunk();
for s=1:length(stamps)
% and display it
fprintf('%.2f\t',chunk(:,s));
fprintf('%.5f\n',stamps(s));
end
pause(0.05);
end |
PROGRAM ZBLAT3
*
* Test program for the COMPLEX*16 Level 3 Blas.
*
* The program must be driven by a short data file. The first 13 records
* of the file are read using list-directed input, the last 9 records
* are read using the format ( A12,L2 ). An annotated example of a data
* file can be obtained by deleting the first 3 characters from the
* following 22 lines:
* 'CBLAT3.SNAP' NAME OF SNAPSHOT OUTPUT FILE
* -1 UNIT NUMBER OF SNAPSHOT FILE (NOT USED IF .LT. 0)
* F LOGICAL FLAG, T TO REWIND SNAPSHOT FILE AFTER EACH RECORD.
* F LOGICAL FLAG, T TO STOP ON FAILURES.
* T LOGICAL FLAG, T TO TEST ERROR EXITS.
* 2 0 TO TEST COLUMN-MAJOR, 1 TO TEST ROW-MAJOR, 2 TO TEST BOTH
* 16.0 THRESHOLD VALUE OF TEST RATIO
* 6 NUMBER OF VALUES OF N
* 0 1 2 3 5 9 VALUES OF N
* 3 NUMBER OF VALUES OF ALPHA
* (0.0,0.0) (1.0,0.0) (0.7,-0.9) VALUES OF ALPHA
* 3 NUMBER OF VALUES OF BETA
* (0.0,0.0) (1.0,0.0) (1.3,-1.1) VALUES OF BETA
* ZGEMM T PUT F FOR NO TEST. SAME COLUMNS.
* ZHEMM T PUT F FOR NO TEST. SAME COLUMNS.
* ZSYMM T PUT F FOR NO TEST. SAME COLUMNS.
* ZTRMM T PUT F FOR NO TEST. SAME COLUMNS.
* ZTRSM T PUT F FOR NO TEST. SAME COLUMNS.
* ZHERK T PUT F FOR NO TEST. SAME COLUMNS.
* ZSYRK T PUT F FOR NO TEST. SAME COLUMNS.
* ZHER2K T PUT F FOR NO TEST. SAME COLUMNS.
* ZSYR2K T PUT F FOR NO TEST. SAME COLUMNS.
*
* See:
*
* Dongarra J. J., Du Croz J. J., Duff I. S. and Hammarling S.
* A Set of Level 3 Basic Linear Algebra Subprograms.
*
* Technical Memorandum No.88 (Revision 1), Mathematics and
* Computer Science Division, Argonne National Laboratory, 9700
* South Cass Avenue, Argonne, Illinois 60439, US.
*
* -- Written on 8-February-1989.
* Jack Dongarra, Argonne National Laboratory.
* Iain Duff, AERE Harwell.
* Jeremy Du Croz, Numerical Algorithms Group Ltd.
* Sven Hammarling, Numerical Algorithms Group Ltd.
*
* .. Parameters ..
INTEGER NIN, NOUT
PARAMETER ( NIN = 5, NOUT = 6 )
INTEGER NSUBS
PARAMETER ( NSUBS = 9 )
COMPLEX*16 ZERO, ONE
PARAMETER ( ZERO = ( 0.0D0, 0.0D0 ),
$ ONE = ( 1.0D0, 0.0D0 ) )
DOUBLE PRECISION RZERO, RHALF, RONE
PARAMETER ( RZERO = 0.0D0, RHALF = 0.5D0, RONE = 1.0D0 )
INTEGER NMAX
PARAMETER ( NMAX = 65 )
INTEGER NIDMAX, NALMAX, NBEMAX
PARAMETER ( NIDMAX = 9, NALMAX = 7, NBEMAX = 7 )
* .. Local Scalars ..
DOUBLE PRECISION EPS, ERR, THRESH
INTEGER I, ISNUM, J, N, NALF, NBET, NIDIM, NTRA,
$ LAYOUT
LOGICAL FATAL, LTESTT, REWI, SAME, SFATAL, TRACE,
$ TSTERR, CORDER, RORDER
CHARACTER*1 TRANSA, TRANSB
CHARACTER*12 SNAMET
CHARACTER*32 SNAPS
* .. Local Arrays ..
COMPLEX*16 AA( NMAX*NMAX ), AB( NMAX, 2*NMAX ),
$ ALF( NALMAX ), AS( NMAX*NMAX ),
$ BB( NMAX*NMAX ), BET( NBEMAX ),
$ BS( NMAX*NMAX ), C( NMAX, NMAX ),
$ CC( NMAX*NMAX ), CS( NMAX*NMAX ), CT( NMAX ),
$ W( 2*NMAX )
DOUBLE PRECISION G( NMAX )
INTEGER IDIM( NIDMAX )
LOGICAL LTEST( NSUBS )
CHARACTER*12 SNAMES( NSUBS )
* .. External Functions ..
DOUBLE PRECISION DDIFF, dlamch
LOGICAL LZE
EXTERNAL DDIFF, LZE, dlamch
* .. External Subroutines ..
EXTERNAL ZCHK1, ZCHK2, ZCHK3, ZCHK4, ZCHK5,ZMMCH
* .. Intrinsic Functions ..
INTRINSIC MAX, MIN
* .. Scalars in Common ..
INTEGER INFOT, NOUTC
LOGICAL LERR, OK
CHARACTER*12 SRNAMT
* .. Common blocks ..
COMMON /INFOC/INFOT, NOUTC, OK, LERR
COMMON /SRNAMC/SRNAMT
* .. Data statements ..
DATA SNAMES/'cblas_zgemm ', 'cblas_zhemm ',
$ 'cblas_zsymm ', 'cblas_ztrmm ', 'cblas_ztrsm ',
$ 'cblas_zherk ', 'cblas_zsyrk ', 'cblas_zher2k',
$ 'cblas_zsyr2k'/
* .. Executable Statements ..
*
NOUTC = NOUT
*
* Read name and unit number for snapshot output file and open file.
*
READ( NIN, FMT = * )SNAPS
READ( NIN, FMT = * )NTRA
TRACE = NTRA.GE.0
IF( TRACE )THEN
OPEN( NTRA, FILE = SNAPS, STATUS = 'NEW' )
END IF
* Read the flag that directs rewinding of the snapshot file.
READ( NIN, FMT = * )REWI
REWI = REWI.AND.TRACE
* Read the flag that directs stopping on any failure.
READ( NIN, FMT = * )SFATAL
* Read the flag that indicates whether error exits are to be tested.
READ( NIN, FMT = * )TSTERR
* Read the flag that indicates whether row-major data layout to be tested.
READ( NIN, FMT = * )LAYOUT
* Read the threshold value of the test ratio
READ( NIN, FMT = * )THRESH
*
* Read and check the parameter values for the tests.
*
* Values of N
READ( NIN, FMT = * )NIDIM
IF( NIDIM.LT.1.OR.NIDIM.GT.NIDMAX )THEN
WRITE( NOUT, FMT = 9997 )'N', NIDMAX
GO TO 220
END IF
READ( NIN, FMT = * )( IDIM( I ), I = 1, NIDIM )
DO 10 I = 1, NIDIM
IF( IDIM( I ).LT.0.OR.IDIM( I ).GT.NMAX )THEN
WRITE( NOUT, FMT = 9996 )NMAX
GO TO 220
END IF
10 CONTINUE
* Values of ALPHA
READ( NIN, FMT = * )NALF
IF( NALF.LT.1.OR.NALF.GT.NALMAX )THEN
WRITE( NOUT, FMT = 9997 )'ALPHA', NALMAX
GO TO 220
END IF
READ( NIN, FMT = * )( ALF( I ), I = 1, NALF )
* Values of BETA
READ( NIN, FMT = * )NBET
IF( NBET.LT.1.OR.NBET.GT.NBEMAX )THEN
WRITE( NOUT, FMT = 9997 )'BETA', NBEMAX
GO TO 220
END IF
READ( NIN, FMT = * )( BET( I ), I = 1, NBET )
*
* Report values of parameters.
*
WRITE( NOUT, FMT = 9995 )
WRITE( NOUT, FMT = 9994 )( IDIM( I ), I = 1, NIDIM )
WRITE( NOUT, FMT = 9993 )( ALF( I ), I = 1, NALF )
WRITE( NOUT, FMT = 9992 )( BET( I ), I = 1, NBET )
IF( .NOT.TSTERR )THEN
WRITE( NOUT, FMT = * )
WRITE( NOUT, FMT = 9984 )
END IF
WRITE( NOUT, FMT = * )
WRITE( NOUT, FMT = 9999 )THRESH
WRITE( NOUT, FMT = * )
RORDER = .FALSE.
CORDER = .FALSE.
IF (LAYOUT.EQ.2) THEN
RORDER = .TRUE.
CORDER = .TRUE.
WRITE( *, FMT = 10002 )
ELSE IF (LAYOUT.EQ.1) THEN
RORDER = .TRUE.
WRITE( *, FMT = 10001 )
ELSE IF (LAYOUT.EQ.0) THEN
CORDER = .TRUE.
WRITE( *, FMT = 10000 )
END IF
WRITE( *, FMT = * )
*
* Read names of subroutines and flags which indicate
* whether they are to be tested.
*
DO 20 I = 1, NSUBS
LTEST( I ) = .FALSE.
20 CONTINUE
30 READ( NIN, FMT = 9988, END = 60 )SNAMET, LTESTT
DO 40 I = 1, NSUBS
IF( SNAMET.EQ.SNAMES( I ) )
$ GO TO 50
40 CONTINUE
WRITE( NOUT, FMT = 9990 )SNAMET
STOP
50 LTEST( I ) = LTESTT
GO TO 30
*
60 CONTINUE
CLOSE ( NIN )
*
* Compute EPS (the machine precision).
*
* EPS = RONE
* 70 CONTINUE
* IF( DDIFF( RONE + EPS, RONE ).EQ.RZERO )
* $ GO TO 80
* EPS = RHALF*EPS
* GO TO 70
* 80 CONTINUE
* EPS = EPS + EPS
eps = dlamch('e')
WRITE( NOUT, FMT = 9998 )EPS
*
* Check the reliability of ZMMCH using exact data.
*
N = MIN( 32, NMAX )
DO 100 J = 1, N
DO 90 I = 1, N
AB( I, J ) = MAX( I - J + 1, 0 )
90 CONTINUE
AB( J, NMAX + 1 ) = J
AB( 1, NMAX + J ) = J
C( J, 1 ) = ZERO
100 CONTINUE
DO 110 J = 1, N
CC( J ) = J*( ( J + 1 )*J )/2 - ( ( J + 1 )*J*( J - 1 ) )/3
110 CONTINUE
* CC holds the exact result. On exit from ZMMCH CT holds
* the result computed by ZMMCH.
TRANSA = 'N'
TRANSB = 'N'
CALL ZMMCH( TRANSA, TRANSB, N, 1, N, ONE, AB, NMAX,
$ AB( 1, NMAX + 1 ), NMAX, ZERO, C, NMAX, CT, G, CC,
$ NMAX, EPS, ERR, FATAL, NOUT, .TRUE. )
SAME = LZE( CC, CT, N )
IF( .NOT.SAME.OR.ERR.NE.RZERO )THEN
WRITE( NOUT, FMT = 9989 )TRANSA, TRANSB, SAME, ERR
STOP
END IF
TRANSB = 'C'
CALL ZMMCH( TRANSA, TRANSB, N, 1, N, ONE, AB, NMAX,
$ AB( 1, NMAX + 1 ), NMAX, ZERO, C, NMAX, CT, G, CC,
$ NMAX, EPS, ERR, FATAL, NOUT, .TRUE. )
SAME = LZE( CC, CT, N )
IF( .NOT.SAME.OR.ERR.NE.RZERO )THEN
WRITE( NOUT, FMT = 9989 )TRANSA, TRANSB, SAME, ERR
STOP
END IF
DO 120 J = 1, N
AB( J, NMAX + 1 ) = N - J + 1
AB( 1, NMAX + J ) = N - J + 1
120 CONTINUE
DO 130 J = 1, N
CC( N - J + 1 ) = J*( ( J + 1 )*J )/2 -
$ ( ( J + 1 )*J*( J - 1 ) )/3
130 CONTINUE
TRANSA = 'C'
TRANSB = 'N'
CALL ZMMCH( TRANSA, TRANSB, N, 1, N, ONE, AB, NMAX,
$ AB( 1, NMAX + 1 ), NMAX, ZERO, C, NMAX, CT, G, CC,
$ NMAX, EPS, ERR, FATAL, NOUT, .TRUE. )
SAME = LZE( CC, CT, N )
IF( .NOT.SAME.OR.ERR.NE.RZERO )THEN
WRITE( NOUT, FMT = 9989 )TRANSA, TRANSB, SAME, ERR
STOP
END IF
TRANSB = 'C'
CALL ZMMCH( TRANSA, TRANSB, N, 1, N, ONE, AB, NMAX,
$ AB( 1, NMAX + 1 ), NMAX, ZERO, C, NMAX, CT, G, CC,
$ NMAX, EPS, ERR, FATAL, NOUT, .TRUE. )
SAME = LZE( CC, CT, N )
IF( .NOT.SAME.OR.ERR.NE.RZERO )THEN
WRITE( NOUT, FMT = 9989 )TRANSA, TRANSB, SAME, ERR
STOP
END IF
*
* Test each subroutine in turn.
*
DO 200 ISNUM = 1, NSUBS
WRITE( NOUT, FMT = * )
IF( .NOT.LTEST( ISNUM ) )THEN
* Subprogram is not to be tested.
WRITE( NOUT, FMT = 9987 )SNAMES( ISNUM )
ELSE
SRNAMT = SNAMES( ISNUM )
* Test error exits.
IF( TSTERR )THEN
CALL CZ3CHKE( SNAMES( ISNUM ) )
WRITE( NOUT, FMT = * )
END IF
* Test computations.
INFOT = 0
OK = .TRUE.
FATAL = .FALSE.
GO TO ( 140, 150, 150, 160, 160, 170, 170,
$ 180, 180 )ISNUM
* Test ZGEMM, 01.
140 IF (CORDER) THEN
CALL ZCHK1(SNAMES( ISNUM ), EPS, THRESH, NOUT, NTRA, TRACE,
$ REWI, FATAL, NIDIM, IDIM, NALF, ALF, NBET, BET,
$ NMAX, AB, AA, AS, AB( 1, NMAX + 1 ), BB, BS, C,
$ CC, CS, CT, G, 0 )
END IF
IF (RORDER) THEN
CALL ZCHK1(SNAMES( ISNUM ), EPS, THRESH, NOUT, NTRA, TRACE,
$ REWI, FATAL, NIDIM, IDIM, NALF, ALF, NBET, BET,
$ NMAX, AB, AA, AS, AB( 1, NMAX + 1 ), BB, BS, C,
$ CC, CS, CT, G, 1 )
END IF
GO TO 190
* Test ZHEMM, 02, ZSYMM, 03.
150 IF (CORDER) THEN
CALL ZCHK2(SNAMES( ISNUM ), EPS, THRESH, NOUT, NTRA, TRACE,
$ REWI, FATAL, NIDIM, IDIM, NALF, ALF, NBET, BET,
$ NMAX, AB, AA, AS, AB( 1, NMAX + 1 ), BB, BS, C,
$ CC, CS, CT, G, 0 )
END IF
IF (RORDER) THEN
CALL ZCHK2(SNAMES( ISNUM ), EPS, THRESH, NOUT, NTRA, TRACE,
$ REWI, FATAL, NIDIM, IDIM, NALF, ALF, NBET, BET,
$ NMAX, AB, AA, AS, AB( 1, NMAX + 1 ), BB, BS, C,
$ CC, CS, CT, G, 1 )
END IF
GO TO 190
* Test ZTRMM, 04, ZTRSM, 05.
160 IF (CORDER) THEN
CALL ZCHK3(SNAMES( ISNUM ), EPS, THRESH, NOUT, NTRA, TRACE,
$ REWI, FATAL, NIDIM, IDIM, NALF, ALF, NMAX, AB,
$ AA, AS, AB( 1, NMAX + 1 ), BB, BS, CT, G, C,
$ 0 )
END IF
IF (RORDER) THEN
CALL ZCHK3(SNAMES( ISNUM ), EPS, THRESH, NOUT, NTRA, TRACE,
$ REWI, FATAL, NIDIM, IDIM, NALF, ALF, NMAX, AB,
$ AA, AS, AB( 1, NMAX + 1 ), BB, BS, CT, G, C,
$ 1 )
END IF
GO TO 190
* Test ZHERK, 06, ZSYRK, 07.
170 IF (CORDER) THEN
CALL ZCHK4(SNAMES( ISNUM ), EPS, THRESH, NOUT, NTRA, TRACE,
$ REWI, FATAL, NIDIM, IDIM, NALF, ALF, NBET, BET,
$ NMAX, AB, AA, AS, AB( 1, NMAX + 1 ), BB, BS, C,
$ CC, CS, CT, G, 0 )
END IF
IF (RORDER) THEN
CALL ZCHK4(SNAMES( ISNUM ), EPS, THRESH, NOUT, NTRA, TRACE,
$ REWI, FATAL, NIDIM, IDIM, NALF, ALF, NBET, BET,
$ NMAX, AB, AA, AS, AB( 1, NMAX + 1 ), BB, BS, C,
$ CC, CS, CT, G, 1 )
END IF
GO TO 190
* Test ZHER2K, 08, ZSYR2K, 09.
180 IF (CORDER) THEN
CALL ZCHK5(SNAMES( ISNUM ), EPS, THRESH, NOUT, NTRA, TRACE,
$ REWI, FATAL, NIDIM, IDIM, NALF, ALF, NBET, BET,
$ NMAX, AB, AA, AS, BB, BS, C, CC, CS, CT, G, W,
$ 0 )
END IF
IF (RORDER) THEN
CALL ZCHK5(SNAMES( ISNUM ), EPS, THRESH, NOUT, NTRA, TRACE,
$ REWI, FATAL, NIDIM, IDIM, NALF, ALF, NBET, BET,
$ NMAX, AB, AA, AS, BB, BS, C, CC, CS, CT, G, W,
$ 1 )
END IF
GO TO 190
*
190 IF( FATAL.AND.SFATAL )
$ GO TO 210
END IF
200 CONTINUE
WRITE( NOUT, FMT = 9986 )
GO TO 230
*
210 CONTINUE
WRITE( NOUT, FMT = 9985 )
GO TO 230
*
220 CONTINUE
WRITE( NOUT, FMT = 9991 )
*
230 CONTINUE
IF( TRACE )
$ CLOSE ( NTRA )
CLOSE ( NOUT )
STOP
*
10002 FORMAT( ' COLUMN-MAJOR AND ROW-MAJOR DATA LAYOUTS ARE TESTED' )
10001 FORMAT(' ROW-MAJOR DATA LAYOUT IS TESTED' )
10000 FORMAT(' COLUMN-MAJOR DATA LAYOUT IS TESTED' )
9999 FORMAT(' ROUTINES PASS COMPUTATIONAL TESTS IF TEST RATIO IS LES',
$ 'S THAN', F8.2 )
9998 FORMAT(' RELATIVE MACHINE PRECISION IS TAKEN TO BE', 1P, E9.1 )
9997 FORMAT(' NUMBER OF VALUES OF ', A, ' IS LESS THAN 1 OR GREATER ',
$ 'THAN ', I2 )
9996 FORMAT( ' VALUE OF N IS LESS THAN 0 OR GREATER THAN ', I2 )
9995 FORMAT('TESTS OF THE COMPLEX*16 LEVEL 3 BLAS', //' THE F',
$ 'OLLOWING PARAMETER VALUES WILL BE USED:' )
9994 FORMAT( ' FOR N ', 9I6 )
9993 FORMAT( ' FOR ALPHA ',
$ 7( '(', F4.1, ',', F4.1, ') ', : ) )
9992 FORMAT( ' FOR BETA ',
$ 7( '(', F4.1, ',', F4.1, ') ', : ) )
9991 FORMAT( ' AMEND DATA FILE OR INCREASE ARRAY SIZES IN PROGRAM',
$ /' ******* TESTS ABANDONED *******' )
9990 FORMAT(' SUBPROGRAM NAME ', A12,' NOT RECOGNIZED', /' ******* T',
$ 'ESTS ABANDONED *******' )
9989 FORMAT(' ERROR IN ZMMCH - IN-LINE DOT PRODUCTS ARE BEING EVALU',
$ 'ATED WRONGLY.', /' ZMMCH WAS CALLED WITH TRANSA = ', A1,
$ 'AND TRANSB = ', A1, /' AND RETURNED SAME = ', L1, ' AND ',
$ ' ERR = ', F12.3, '.', /' THIS MAY BE DUE TO FAULTS IN THE ',
$ 'ARITHMETIC OR THE COMPILER.', /' ******* TESTS ABANDONED ',
$ '*******' )
9988 FORMAT( A12,L2 )
9987 FORMAT( 1X, A12,' WAS NOT TESTED' )
9986 FORMAT( /' END OF TESTS' )
9985 FORMAT( /' ******* FATAL ERROR - TESTS ABANDONED *******' )
9984 FORMAT( ' ERROR-EXITS WILL NOT BE TESTED' )
*
* End of ZBLAT3.
*
END
SUBROUTINE ZCHK1( SNAME, EPS, THRESH, NOUT, NTRA, TRACE, REWI,
$ FATAL, NIDIM, IDIM, NALF, ALF, NBET, BET, NMAX,
$ A, AA, AS, B, BB, BS, C, CC, CS, CT, G,
$ IORDER )
*
* Tests ZGEMM.
*
* Auxiliary routine for test program for Level 3 Blas.
*
* -- Written on 8-February-1989.
* Jack Dongarra, Argonne National Laboratory.
* Iain Duff, AERE Harwell.
* Jeremy Du Croz, Numerical Algorithms Group Ltd.
* Sven Hammarling, Numerical Algorithms Group Ltd.
*
* .. Parameters ..
COMPLEX*16 ZERO
PARAMETER ( ZERO = ( 0.0, 0.0 ) )
DOUBLE PRECISION RZERO
PARAMETER ( RZERO = 0.0 )
* .. Scalar Arguments ..
DOUBLE PRECISION EPS, THRESH
INTEGER NALF, NBET, NIDIM, NMAX, NOUT, NTRA, IORDER
LOGICAL FATAL, REWI, TRACE
CHARACTER*12 SNAME
* .. Array Arguments ..
COMPLEX*16 A( NMAX, NMAX ), AA( NMAX*NMAX ), ALF( NALF ),
$ AS( NMAX*NMAX ), B( NMAX, NMAX ),
$ BB( NMAX*NMAX ), BET( NBET ), BS( NMAX*NMAX ),
$ C( NMAX, NMAX ), CC( NMAX*NMAX ),
$ CS( NMAX*NMAX ), CT( NMAX )
DOUBLE PRECISION G( NMAX )
INTEGER IDIM( NIDIM )
* .. Local Scalars ..
COMPLEX*16 ALPHA, ALS, BETA, BLS
DOUBLE PRECISION ERR, ERRMAX
INTEGER I, IA, IB, ICA, ICB, IK, IM, IN, K, KS, LAA,
$ LBB, LCC, LDA, LDAS, LDB, LDBS, LDC, LDCS, M,
$ MA, MB, MS, N, NA, NARGS, NB, NC, NS
LOGICAL NULL, RESET, SAME, TRANA, TRANB
CHARACTER*1 TRANAS, TRANBS, TRANSA, TRANSB
CHARACTER*3 ICH
* .. Local Arrays ..
LOGICAL ISAME( 13 )
* .. External Functions ..
LOGICAL LZE, LZERES
EXTERNAL LZE, LZERES
* .. External Subroutines ..
EXTERNAL CZGEMM, ZMAKE, ZMMCH
* .. Intrinsic Functions ..
INTRINSIC MAX
* .. Scalars in Common ..
INTEGER INFOT, NOUTC
LOGICAL LERR, OK
* .. Common blocks ..
COMMON /INFOC/INFOT, NOUTC, OK, LERR
* .. Data statements ..
DATA ICH/'NTC'/
* .. Executable Statements ..
*
NARGS = 13
NC = 0
RESET = .TRUE.
ERRMAX = RZERO
*
DO 110 IM = 1, NIDIM
M = IDIM( IM )
*
DO 100 IN = 1, NIDIM
N = IDIM( IN )
* Set LDC to 1 more than minimum value if room.
LDC = M
IF( LDC.LT.NMAX )
$ LDC = LDC + 1
* Skip tests if not enough room.
IF( LDC.GT.NMAX )
$ GO TO 100
LCC = LDC*N
NULL = N.LE.0.OR.M.LE.0
*
DO 90 IK = 1, NIDIM
K = IDIM( IK )
*
DO 80 ICA = 1, 3
TRANSA = ICH( ICA: ICA )
TRANA = TRANSA.EQ.'T'.OR.TRANSA.EQ.'C'
*
IF( TRANA )THEN
MA = K
NA = M
ELSE
MA = M
NA = K
END IF
* Set LDA to 1 more than minimum value if room.
LDA = MA
IF( LDA.LT.NMAX )
$ LDA = LDA + 1
* Skip tests if not enough room.
IF( LDA.GT.NMAX )
$ GO TO 80
LAA = LDA*NA
*
* Generate the matrix A.
*
CALL ZMAKE( 'ge', ' ', ' ', MA, NA, A, NMAX, AA, LDA,
$ RESET, ZERO )
*
DO 70 ICB = 1, 3
TRANSB = ICH( ICB: ICB )
TRANB = TRANSB.EQ.'T'.OR.TRANSB.EQ.'C'
*
IF( TRANB )THEN
MB = N
NB = K
ELSE
MB = K
NB = N
END IF
* Set LDB to 1 more than minimum value if room.
LDB = MB
IF( LDB.LT.NMAX )
$ LDB = LDB + 1
* Skip tests if not enough room.
IF( LDB.GT.NMAX )
$ GO TO 70
LBB = LDB*NB
*
* Generate the matrix B.
*
CALL ZMAKE( 'ge', ' ', ' ', MB, NB, B, NMAX, BB,
$ LDB, RESET, ZERO )
*
DO 60 IA = 1, NALF
ALPHA = ALF( IA )
*
DO 50 IB = 1, NBET
BETA = BET( IB )
*
* Generate the matrix C.
*
CALL ZMAKE( 'ge', ' ', ' ', M, N, C, NMAX,
$ CC, LDC, RESET, ZERO )
*
NC = NC + 1
*
* Save every datum before calling the
* subroutine.
*
TRANAS = TRANSA
TRANBS = TRANSB
MS = M
NS = N
KS = K
ALS = ALPHA
DO 10 I = 1, LAA
AS( I ) = AA( I )
10 CONTINUE
LDAS = LDA
DO 20 I = 1, LBB
BS( I ) = BB( I )
20 CONTINUE
LDBS = LDB
BLS = BETA
DO 30 I = 1, LCC
CS( I ) = CC( I )
30 CONTINUE
LDCS = LDC
*
* Call the subroutine.
*
IF( TRACE )
$ CALL ZPRCN1(NTRA, NC, SNAME, IORDER,
$ TRANSA, TRANSB, M, N, K, ALPHA, LDA,
$ LDB, BETA, LDC)
IF( REWI )
$ REWIND NTRA
CALL CZGEMM( IORDER, TRANSA, TRANSB, M, N,
$ K, ALPHA, AA, LDA, BB, LDB,
$ BETA, CC, LDC )
*
* Check if error-exit was taken incorrectly.
*
IF( .NOT.OK )THEN
WRITE( NOUT, FMT = 9994 )
FATAL = .TRUE.
GO TO 120
END IF
*
* See what data changed inside subroutines.
*
ISAME( 1 ) = TRANSA.EQ.TRANAS
ISAME( 2 ) = TRANSB.EQ.TRANBS
ISAME( 3 ) = MS.EQ.M
ISAME( 4 ) = NS.EQ.N
ISAME( 5 ) = KS.EQ.K
ISAME( 6 ) = ALS.EQ.ALPHA
ISAME( 7 ) = LZE( AS, AA, LAA )
ISAME( 8 ) = LDAS.EQ.LDA
ISAME( 9 ) = LZE( BS, BB, LBB )
ISAME( 10 ) = LDBS.EQ.LDB
ISAME( 11 ) = BLS.EQ.BETA
IF( NULL )THEN
ISAME( 12 ) = LZE( CS, CC, LCC )
ELSE
ISAME( 12 ) = LZERES( 'ge', ' ', M, N, CS,
$ CC, LDC )
END IF
ISAME( 13 ) = LDCS.EQ.LDC
*
* If data was incorrectly changed, report
* and return.
*
SAME = .TRUE.
DO 40 I = 1, NARGS
SAME = SAME.AND.ISAME( I )
IF( .NOT.ISAME( I ) )
$ WRITE( NOUT, FMT = 9998 )I
40 CONTINUE
IF( .NOT.SAME )THEN
FATAL = .TRUE.
GO TO 120
END IF
*
IF( .NOT.NULL )THEN
*
* Check the result.
*
CALL ZMMCH( TRANSA, TRANSB, M, N, K,
$ ALPHA, A, NMAX, B, NMAX, BETA,
$ C, NMAX, CT, G, CC, LDC, EPS,
$ ERR, FATAL, NOUT, .TRUE. )
ERRMAX = MAX( ERRMAX, ERR )
* If got really bad answer, report and
* return.
IF( FATAL )
$ GO TO 120
END IF
*
50 CONTINUE
*
60 CONTINUE
*
70 CONTINUE
*
80 CONTINUE
*
90 CONTINUE
*
100 CONTINUE
*
110 CONTINUE
*
* Report result.
*
IF( ERRMAX.LT.THRESH )THEN
IF ( IORDER.EQ.0) WRITE( NOUT, FMT = 10000 )SNAME, NC
IF ( IORDER.EQ.1) WRITE( NOUT, FMT = 10001 )SNAME, NC
ELSE
IF ( IORDER.EQ.0) WRITE( NOUT, FMT = 10002 )SNAME, NC, ERRMAX
IF ( IORDER.EQ.1) WRITE( NOUT, FMT = 10003 )SNAME, NC, ERRMAX
END IF
GO TO 130
*
120 CONTINUE
WRITE( NOUT, FMT = 9996 )SNAME
CALL ZPRCN1(NOUT, NC, SNAME, IORDER, TRANSA, TRANSB,
$ M, N, K, ALPHA, LDA, LDB, BETA, LDC)
*
130 CONTINUE
RETURN
*
10003 FORMAT( ' ', A12,' COMPLETED THE ROW-MAJOR COMPUTATIONAL ',
$ 'TESTS (', I6, ' CALLS)', /' ******* BUT WITH MAXIMUM TEST ',
$ 'RATIO ', F8.2, ' - SUSPECT *******' )
10002 FORMAT( ' ', A12,' COMPLETED THE COLUMN-MAJOR COMPUTATIONAL ',
$ 'TESTS (', I6, ' CALLS)', /' ******* BUT WITH MAXIMUM TEST ',
$ 'RATIO ', F8.2, ' - SUSPECT *******' )
10001 FORMAT( ' ', A12,' PASSED THE ROW-MAJOR COMPUTATIONAL TESTS',
$ ' (', I6, ' CALL', 'S)' )
10000 FORMAT( ' ', A12,' PASSED THE COLUMN-MAJOR COMPUTATIONAL TESTS',
$ ' (', I6, ' CALL', 'S)' )
9998 FORMAT(' ******* FATAL ERROR - PARAMETER NUMBER ', I2, ' WAS CH',
$ 'ANGED INCORRECTLY *******' )
9996 FORMAT( ' ******* ', A12,' FAILED ON CALL NUMBER:' )
9995 FORMAT( 1X, I6, ': ', A12,'(''', A1, ''',''', A1, ''',',
$ 3( I3, ',' ), '(', F4.1, ',', F4.1, '), A,', I3, ', B,', I3,
$ ',(', F4.1, ',', F4.1, '), C,', I3, ').' )
9994 FORMAT(' ******* FATAL ERROR - ERROR-EXIT TAKEN ON VALID CALL *',
$ '******' )
*
* End of ZCHK1.
*
END
*
SUBROUTINE ZPRCN1(NOUT, NC, SNAME, IORDER, TRANSA, TRANSB, M, N,
$ K, ALPHA, LDA, LDB, BETA, LDC)
INTEGER NOUT, NC, IORDER, M, N, K, LDA, LDB, LDC
DOUBLE COMPLEX ALPHA, BETA
CHARACTER*1 TRANSA, TRANSB
CHARACTER*12 SNAME
CHARACTER*14 CRC, CTA,CTB
IF (TRANSA.EQ.'N')THEN
CTA = ' CblasNoTrans'
ELSE IF (TRANSA.EQ.'T')THEN
CTA = ' CblasTrans'
ELSE
CTA = 'CblasConjTrans'
END IF
IF (TRANSB.EQ.'N')THEN
CTB = ' CblasNoTrans'
ELSE IF (TRANSB.EQ.'T')THEN
CTB = ' CblasTrans'
ELSE
CTB = 'CblasConjTrans'
END IF
IF (IORDER.EQ.1)THEN
CRC = ' CblasRowMajor'
ELSE
CRC = ' CblasColMajor'
END IF
WRITE(NOUT, FMT = 9995)NC,SNAME,CRC, CTA,CTB
WRITE(NOUT, FMT = 9994)M, N, K, ALPHA, LDA, LDB, BETA, LDC
9995 FORMAT( 1X, I6, ': ', A12,'(', A14, ',', A14, ',', A14, ',')
9994 FORMAT( 10X, 3( I3, ',' ) ,' (', F4.1,',',F4.1,') , A,',
$ I3, ', B,', I3, ', (', F4.1,',',F4.1,') , C,', I3, ').' )
END
*
SUBROUTINE ZCHK2( SNAME, EPS, THRESH, NOUT, NTRA, TRACE, REWI,
$ FATAL, NIDIM, IDIM, NALF, ALF, NBET, BET, NMAX,
$ A, AA, AS, B, BB, BS, C, CC, CS, CT, G,
$ IORDER )
*
* Tests ZHEMM and ZSYMM.
*
* Auxiliary routine for test program for Level 3 Blas.
*
* -- Written on 8-February-1989.
* Jack Dongarra, Argonne National Laboratory.
* Iain Duff, AERE Harwell.
* Jeremy Du Croz, Numerical Algorithms Group Ltd.
* Sven Hammarling, Numerical Algorithms Group Ltd.
*
* .. Parameters ..
COMPLEX*16 ZERO
PARAMETER ( ZERO = ( 0.0D0, 0.0D0 ) )
DOUBLE PRECISION RZERO
PARAMETER ( RZERO = 0.0D0 )
* .. Scalar Arguments ..
DOUBLE PRECISION EPS, THRESH
INTEGER NALF, NBET, NIDIM, NMAX, NOUT, NTRA, IORDER
LOGICAL FATAL, REWI, TRACE
CHARACTER*12 SNAME
* .. Array Arguments ..
COMPLEX*16 A( NMAX, NMAX ), AA( NMAX*NMAX ), ALF( NALF ),
$ AS( NMAX*NMAX ), B( NMAX, NMAX ),
$ BB( NMAX*NMAX ), BET( NBET ), BS( NMAX*NMAX ),
$ C( NMAX, NMAX ), CC( NMAX*NMAX ),
$ CS( NMAX*NMAX ), CT( NMAX )
DOUBLE PRECISION G( NMAX )
INTEGER IDIM( NIDIM )
* .. Local Scalars ..
COMPLEX*16 ALPHA, ALS, BETA, BLS
DOUBLE PRECISION ERR, ERRMAX
INTEGER I, IA, IB, ICS, ICU, IM, IN, LAA, LBB, LCC,
$ LDA, LDAS, LDB, LDBS, LDC, LDCS, M, MS, N, NA,
$ NARGS, NC, NS
LOGICAL CONJ, LEFT, NULL, RESET, SAME
CHARACTER*1 SIDE, SIDES, UPLO, UPLOS
CHARACTER*2 ICHS, ICHU
* .. Local Arrays ..
LOGICAL ISAME( 13 )
* .. External Functions ..
LOGICAL LZE, LZERES
EXTERNAL LZE, LZERES
* .. External Subroutines ..
EXTERNAL CZHEMM, ZMAKE, ZMMCH, CZSYMM
* .. Intrinsic Functions ..
INTRINSIC MAX
* .. Scalars in Common ..
INTEGER INFOT, NOUTC
LOGICAL LERR, OK
* .. Common blocks ..
COMMON /INFOC/INFOT, NOUTC, OK, LERR
* .. Data statements ..
DATA ICHS/'LR'/, ICHU/'UL'/
* .. Executable Statements ..
CONJ = SNAME( 8: 9 ).EQ.'he'
*
NARGS = 12
NC = 0
RESET = .TRUE.
ERRMAX = RZERO
*
DO 100 IM = 1, NIDIM
M = IDIM( IM )
*
DO 90 IN = 1, NIDIM
N = IDIM( IN )
* Set LDC to 1 more than minimum value if room.
LDC = M
IF( LDC.LT.NMAX )
$ LDC = LDC + 1
* Skip tests if not enough room.
IF( LDC.GT.NMAX )
$ GO TO 90
LCC = LDC*N
NULL = N.LE.0.OR.M.LE.0
* Set LDB to 1 more than minimum value if room.
LDB = M
IF( LDB.LT.NMAX )
$ LDB = LDB + 1
* Skip tests if not enough room.
IF( LDB.GT.NMAX )
$ GO TO 90
LBB = LDB*N
*
* Generate the matrix B.
*
CALL ZMAKE( 'ge', ' ', ' ', M, N, B, NMAX, BB, LDB, RESET,
$ ZERO )
*
DO 80 ICS = 1, 2
SIDE = ICHS( ICS: ICS )
LEFT = SIDE.EQ.'L'
*
IF( LEFT )THEN
NA = M
ELSE
NA = N
END IF
* Set LDA to 1 more than minimum value if room.
LDA = NA
IF( LDA.LT.NMAX )
$ LDA = LDA + 1
* Skip tests if not enough room.
IF( LDA.GT.NMAX )
$ GO TO 80
LAA = LDA*NA
*
DO 70 ICU = 1, 2
UPLO = ICHU( ICU: ICU )
*
* Generate the hermitian or symmetric matrix A.
*
CALL ZMAKE(SNAME( 8: 9 ), UPLO, ' ', NA, NA, A, NMAX,
$ AA, LDA, RESET, ZERO )
*
DO 60 IA = 1, NALF
ALPHA = ALF( IA )
*
DO 50 IB = 1, NBET
BETA = BET( IB )
*
* Generate the matrix C.
*
CALL ZMAKE( 'ge', ' ', ' ', M, N, C, NMAX, CC,
$ LDC, RESET, ZERO )
*
NC = NC + 1
*
* Save every datum before calling the
* subroutine.
*
SIDES = SIDE
UPLOS = UPLO
MS = M
NS = N
ALS = ALPHA
DO 10 I = 1, LAA
AS( I ) = AA( I )
10 CONTINUE
LDAS = LDA
DO 20 I = 1, LBB
BS( I ) = BB( I )
20 CONTINUE
LDBS = LDB
BLS = BETA
DO 30 I = 1, LCC
CS( I ) = CC( I )
30 CONTINUE
LDCS = LDC
*
* Call the subroutine.
*
IF( TRACE )
$ CALL ZPRCN2(NTRA, NC, SNAME, IORDER,
$ SIDE, UPLO, M, N, ALPHA, LDA, LDB,
$ BETA, LDC)
IF( REWI )
$ REWIND NTRA
IF( CONJ )THEN
CALL CZHEMM( IORDER, SIDE, UPLO, M, N,
$ ALPHA, AA, LDA, BB, LDB, BETA,
$ CC, LDC )
ELSE
CALL CZSYMM( IORDER, SIDE, UPLO, M, N,
$ ALPHA, AA, LDA, BB, LDB, BETA,
$ CC, LDC )
END IF
*
* Check if error-exit was taken incorrectly.
*
IF( .NOT.OK )THEN
WRITE( NOUT, FMT = 9994 )
FATAL = .TRUE.
GO TO 110
END IF
*
* See what data changed inside subroutines.
*
ISAME( 1 ) = SIDES.EQ.SIDE
ISAME( 2 ) = UPLOS.EQ.UPLO
ISAME( 3 ) = MS.EQ.M
ISAME( 4 ) = NS.EQ.N
ISAME( 5 ) = ALS.EQ.ALPHA
ISAME( 6 ) = LZE( AS, AA, LAA )
ISAME( 7 ) = LDAS.EQ.LDA
ISAME( 8 ) = LZE( BS, BB, LBB )
ISAME( 9 ) = LDBS.EQ.LDB
ISAME( 10 ) = BLS.EQ.BETA
IF( NULL )THEN
ISAME( 11 ) = LZE( CS, CC, LCC )
ELSE
ISAME( 11 ) = LZERES( 'ge', ' ', M, N, CS,
$ CC, LDC )
END IF
ISAME( 12 ) = LDCS.EQ.LDC
*
* If data was incorrectly changed, report and
* return.
*
SAME = .TRUE.
DO 40 I = 1, NARGS
SAME = SAME.AND.ISAME( I )
IF( .NOT.ISAME( I ) )
$ WRITE( NOUT, FMT = 9998 )I
40 CONTINUE
IF( .NOT.SAME )THEN
FATAL = .TRUE.
GO TO 110
END IF
*
IF( .NOT.NULL )THEN
*
* Check the result.
*
IF( LEFT )THEN
CALL ZMMCH( 'N', 'N', M, N, M, ALPHA, A,
$ NMAX, B, NMAX, BETA, C, NMAX,
$ CT, G, CC, LDC, EPS, ERR,
$ FATAL, NOUT, .TRUE. )
ELSE
CALL ZMMCH( 'N', 'N', M, N, N, ALPHA, B,
$ NMAX, A, NMAX, BETA, C, NMAX,
$ CT, G, CC, LDC, EPS, ERR,
$ FATAL, NOUT, .TRUE. )
END IF
ERRMAX = MAX( ERRMAX, ERR )
* If got really bad answer, report and
* return.
IF( FATAL )
$ GO TO 110
END IF
*
50 CONTINUE
*
60 CONTINUE
*
70 CONTINUE
*
80 CONTINUE
*
90 CONTINUE
*
100 CONTINUE
*
* Report result.
*
IF( ERRMAX.LT.THRESH )THEN
IF ( IORDER.EQ.0) WRITE( NOUT, FMT = 10000 )SNAME, NC
IF ( IORDER.EQ.1) WRITE( NOUT, FMT = 10001 )SNAME, NC
ELSE
IF ( IORDER.EQ.0) WRITE( NOUT, FMT = 10002 )SNAME, NC, ERRMAX
IF ( IORDER.EQ.1) WRITE( NOUT, FMT = 10003 )SNAME, NC, ERRMAX
END IF
GO TO 120
*
110 CONTINUE
WRITE( NOUT, FMT = 9996 )SNAME
CALL ZPRCN2(NOUT, NC, SNAME, IORDER, SIDE, UPLO, M, N, ALPHA, LDA,
$ LDB, BETA, LDC)
*
120 CONTINUE
RETURN
*
10003 FORMAT( ' ', A12,' COMPLETED THE ROW-MAJOR COMPUTATIONAL ',
$ 'TESTS (', I6, ' CALLS)', /' ******* BUT WITH MAXIMUM TEST ',
$ 'RATIO ', F8.2, ' - SUSPECT *******' )
10002 FORMAT( ' ', A12,' COMPLETED THE COLUMN-MAJOR COMPUTATIONAL ',
$ 'TESTS (', I6, ' CALLS)', /' ******* BUT WITH MAXIMUM TEST ',
$ 'RATIO ', F8.2, ' - SUSPECT *******' )
10001 FORMAT( ' ', A12,' PASSED THE ROW-MAJOR COMPUTATIONAL TESTS',
$ ' (', I6, ' CALL', 'S)' )
10000 FORMAT( ' ', A12,' PASSED THE COLUMN-MAJOR COMPUTATIONAL TESTS',
$ ' (', I6, ' CALL', 'S)' )
9998 FORMAT(' ******* FATAL ERROR - PARAMETER NUMBER ', I2, ' WAS CH',
$ 'ANGED INCORRECTLY *******' )
9996 FORMAT( ' ******* ', A12,' FAILED ON CALL NUMBER:' )
9995 FORMAT(1X, I6, ': ', A12,'(', 2( '''', A1, ''',' ), 2( I3, ',' ),
$ '(', F4.1, ',', F4.1, '), A,', I3, ', B,', I3, ',(', F4.1,
$ ',', F4.1, '), C,', I3, ') .' )
9994 FORMAT(' ******* FATAL ERROR - ERROR-EXIT TAKEN ON VALID CALL *',
$ '******' )
*
* End of ZCHK2.
*
END
*
SUBROUTINE ZPRCN2(NOUT, NC, SNAME, IORDER, SIDE, UPLO, M, N,
$ ALPHA, LDA, LDB, BETA, LDC)
INTEGER NOUT, NC, IORDER, M, N, LDA, LDB, LDC
DOUBLE COMPLEX ALPHA, BETA
CHARACTER*1 SIDE, UPLO
CHARACTER*12 SNAME
CHARACTER*14 CRC, CS,CU
IF (SIDE.EQ.'L')THEN
CS = ' CblasLeft'
ELSE
CS = ' CblasRight'
END IF
IF (UPLO.EQ.'U')THEN
CU = ' CblasUpper'
ELSE
CU = ' CblasLower'
END IF
IF (IORDER.EQ.1)THEN
CRC = ' CblasRowMajor'
ELSE
CRC = ' CblasColMajor'
END IF
WRITE(NOUT, FMT = 9995)NC,SNAME,CRC, CS,CU
WRITE(NOUT, FMT = 9994)M, N, ALPHA, LDA, LDB, BETA, LDC
9995 FORMAT( 1X, I6, ': ', A12,'(', A14, ',', A14, ',', A14, ',')
9994 FORMAT( 10X, 2( I3, ',' ),' (',F4.1,',',F4.1, '), A,', I3,
$ ', B,', I3, ', (',F4.1,',',F4.1, '), ', 'C,', I3, ').' )
END
*
SUBROUTINE ZCHK3( SNAME, EPS, THRESH, NOUT, NTRA, TRACE, REWI,
$ FATAL, NIDIM, IDIM, NALF, ALF, NMAX, A, AA, AS,
$ B, BB, BS, CT, G, C, IORDER )
*
* Tests ZTRMM and ZTRSM.
*
* Auxiliary routine for test program for Level 3 Blas.
*
* -- Written on 8-February-1989.
* Jack Dongarra, Argonne National Laboratory.
* Iain Duff, AERE Harwell.
* Jeremy Du Croz, Numerical Algorithms Group Ltd.
* Sven Hammarling, Numerical Algorithms Group Ltd.
*
* .. Parameters ..
COMPLEX*16 ZERO, ONE
PARAMETER ( ZERO = ( 0.0D0, 0.0D0 ), ONE = ( 1.0D0, 0.0D0 ) )
DOUBLE PRECISION RZERO
PARAMETER ( RZERO = 0.0D0 )
* .. Scalar Arguments ..
DOUBLE PRECISION EPS, THRESH
INTEGER NALF, NIDIM, NMAX, NOUT, NTRA, IORDER
LOGICAL FATAL, REWI, TRACE
CHARACTER*12 SNAME
* .. Array Arguments ..
COMPLEX*16 A( NMAX, NMAX ), AA( NMAX*NMAX ), ALF( NALF ),
$ AS( NMAX*NMAX ), B( NMAX, NMAX ),
$ BB( NMAX*NMAX ), BS( NMAX*NMAX ),
$ C( NMAX, NMAX ), CT( NMAX )
DOUBLE PRECISION G( NMAX )
INTEGER IDIM( NIDIM )
* .. Local Scalars ..
COMPLEX*16 ALPHA, ALS
DOUBLE PRECISION ERR, ERRMAX
INTEGER I, IA, ICD, ICS, ICT, ICU, IM, IN, J, LAA, LBB,
$ LDA, LDAS, LDB, LDBS, M, MS, N, NA, NARGS, NC,
$ NS
LOGICAL LEFT, NULL, RESET, SAME
CHARACTER*1 DIAG, DIAGS, SIDE, SIDES, TRANAS, TRANSA, UPLO,
$ UPLOS
CHARACTER*2 ICHD, ICHS, ICHU
CHARACTER*3 ICHT
* .. Local Arrays ..
LOGICAL ISAME( 13 )
* .. External Functions ..
LOGICAL LZE, LZERES
EXTERNAL LZE, LZERES
* .. External Subroutines ..
EXTERNAL ZMAKE, ZMMCH, CZTRMM, CZTRSM
* .. Intrinsic Functions ..
INTRINSIC MAX
* .. Scalars in Common ..
INTEGER INFOT, NOUTC
LOGICAL LERR, OK
* .. Common blocks ..
COMMON /INFOC/INFOT, NOUTC, OK, LERR
* .. Data statements ..
DATA ICHU/'UL'/, ICHT/'NTC'/, ICHD/'UN'/, ICHS/'LR'/
* .. Executable Statements ..
*
NARGS = 11
NC = 0
RESET = .TRUE.
ERRMAX = RZERO
* Set up zero matrix for ZMMCH.
DO 20 J = 1, NMAX
DO 10 I = 1, NMAX
C( I, J ) = ZERO
10 CONTINUE
20 CONTINUE
*
DO 140 IM = 1, NIDIM
M = IDIM( IM )
*
DO 130 IN = 1, NIDIM
N = IDIM( IN )
* Set LDB to 1 more than minimum value if room.
LDB = M
IF( LDB.LT.NMAX )
$ LDB = LDB + 1
* Skip tests if not enough room.
IF( LDB.GT.NMAX )
$ GO TO 130
LBB = LDB*N
NULL = M.LE.0.OR.N.LE.0
*
DO 120 ICS = 1, 2
SIDE = ICHS( ICS: ICS )
LEFT = SIDE.EQ.'L'
IF( LEFT )THEN
NA = M
ELSE
NA = N
END IF
* Set LDA to 1 more than minimum value if room.
LDA = NA
IF( LDA.LT.NMAX )
$ LDA = LDA + 1
* Skip tests if not enough room.
IF( LDA.GT.NMAX )
$ GO TO 130
LAA = LDA*NA
*
DO 110 ICU = 1, 2
UPLO = ICHU( ICU: ICU )
*
DO 100 ICT = 1, 3
TRANSA = ICHT( ICT: ICT )
*
DO 90 ICD = 1, 2
DIAG = ICHD( ICD: ICD )
*
DO 80 IA = 1, NALF
ALPHA = ALF( IA )
*
* Generate the matrix A.
*
CALL ZMAKE( 'tr', UPLO, DIAG, NA, NA, A,
$ NMAX, AA, LDA, RESET, ZERO )
*
* Generate the matrix B.
*
CALL ZMAKE( 'ge', ' ', ' ', M, N, B, NMAX,
$ BB, LDB, RESET, ZERO )
*
NC = NC + 1
*
* Save every datum before calling the
* subroutine.
*
SIDES = SIDE
UPLOS = UPLO
TRANAS = TRANSA
DIAGS = DIAG
MS = M
NS = N
ALS = ALPHA
DO 30 I = 1, LAA
AS( I ) = AA( I )
30 CONTINUE
LDAS = LDA
DO 40 I = 1, LBB
BS( I ) = BB( I )
40 CONTINUE
LDBS = LDB
*
* Call the subroutine.
*
IF( SNAME( 10: 11 ).EQ.'mm' )THEN
IF( TRACE )
$ CALL ZPRCN3( NOUT, NC, SNAME, IORDER,
$ SIDE, UPLO, TRANSA, DIAG, M, N, ALPHA,
$ LDA, LDB)
IF( REWI )
$ REWIND NTRA
CALL CZTRMM(IORDER, SIDE, UPLO, TRANSA,
$ DIAG, M, N, ALPHA, AA, LDA,
$ BB, LDB )
ELSE IF( SNAME( 10: 11 ).EQ.'sm' )THEN
IF( TRACE )
$ CALL ZPRCN3( NOUT, NC, SNAME, IORDER,
$ SIDE, UPLO, TRANSA, DIAG, M, N, ALPHA,
$ LDA, LDB)
IF( REWI )
$ REWIND NTRA
CALL CZTRSM(IORDER, SIDE, UPLO, TRANSA,
$ DIAG, M, N, ALPHA, AA, LDA,
$ BB, LDB )
END IF
*
* Check if error-exit was taken incorrectly.
*
IF( .NOT.OK )THEN
WRITE( NOUT, FMT = 9994 )
FATAL = .TRUE.
GO TO 150
END IF
*
* See what data changed inside subroutines.
*
ISAME( 1 ) = SIDES.EQ.SIDE
ISAME( 2 ) = UPLOS.EQ.UPLO
ISAME( 3 ) = TRANAS.EQ.TRANSA
ISAME( 4 ) = DIAGS.EQ.DIAG
ISAME( 5 ) = MS.EQ.M
ISAME( 6 ) = NS.EQ.N
ISAME( 7 ) = ALS.EQ.ALPHA
ISAME( 8 ) = LZE( AS, AA, LAA )
ISAME( 9 ) = LDAS.EQ.LDA
IF( NULL )THEN
ISAME( 10 ) = LZE( BS, BB, LBB )
ELSE
ISAME( 10 ) = LZERES( 'ge', ' ', M, N, BS,
$ BB, LDB )
END IF
ISAME( 11 ) = LDBS.EQ.LDB
*
* If data was incorrectly changed, report and
* return.
*
SAME = .TRUE.
DO 50 I = 1, NARGS
SAME = SAME.AND.ISAME( I )
IF( .NOT.ISAME( I ) )
$ WRITE( NOUT, FMT = 9998 )I
50 CONTINUE
IF( .NOT.SAME )THEN
FATAL = .TRUE.
GO TO 150
END IF
*
IF( .NOT.NULL )THEN
IF( SNAME( 10: 11 ).EQ.'mm' )THEN
*
* Check the result.
*
IF( LEFT )THEN
CALL ZMMCH( TRANSA, 'N', M, N, M,
$ ALPHA, A, NMAX, B, NMAX,
$ ZERO, C, NMAX, CT, G,
$ BB, LDB, EPS, ERR,
$ FATAL, NOUT, .TRUE. )
ELSE
CALL ZMMCH( 'N', TRANSA, M, N, N,
$ ALPHA, B, NMAX, A, NMAX,
$ ZERO, C, NMAX, CT, G,
$ BB, LDB, EPS, ERR,
$ FATAL, NOUT, .TRUE. )
END IF
ELSE IF( SNAME( 10: 11 ).EQ.'sm' )THEN
*
* Compute approximation to original
* matrix.
*
DO 70 J = 1, N
DO 60 I = 1, M
C( I, J ) = BB( I + ( J - 1 )*
$ LDB )
BB( I + ( J - 1 )*LDB ) = ALPHA*
$ B( I, J )
60 CONTINUE
70 CONTINUE
*
IF( LEFT )THEN
CALL ZMMCH( TRANSA, 'N', M, N, M,
$ ONE, A, NMAX, C, NMAX,
$ ZERO, B, NMAX, CT, G,
$ BB, LDB, EPS, ERR,
$ FATAL, NOUT, .FALSE. )
ELSE
CALL ZMMCH( 'N', TRANSA, M, N, N,
$ ONE, C, NMAX, A, NMAX,
$ ZERO, B, NMAX, CT, G,
$ BB, LDB, EPS, ERR,
$ FATAL, NOUT, .FALSE. )
END IF
END IF
ERRMAX = MAX( ERRMAX, ERR )
* If got really bad answer, report and
* return.
IF( FATAL )
$ GO TO 150
END IF
*
80 CONTINUE
*
90 CONTINUE
*
100 CONTINUE
*
110 CONTINUE
*
120 CONTINUE
*
130 CONTINUE
*
140 CONTINUE
*
* Report result.
*
IF( ERRMAX.LT.THRESH )THEN
IF ( IORDER.EQ.0) WRITE( NOUT, FMT = 10000 )SNAME, NC
IF ( IORDER.EQ.1) WRITE( NOUT, FMT = 10001 )SNAME, NC
ELSE
IF ( IORDER.EQ.0) WRITE( NOUT, FMT = 10002 )SNAME, NC, ERRMAX
IF ( IORDER.EQ.1) WRITE( NOUT, FMT = 10003 )SNAME, NC, ERRMAX
END IF
GO TO 160
*
150 CONTINUE
WRITE( NOUT, FMT = 9996 )SNAME
CALL ZPRCN3( NOUT, NC, SNAME, IORDER, SIDE, UPLO, TRANSA, DIAG,
$ M, N, ALPHA, LDA, LDB)
*
160 CONTINUE
RETURN
*
10003 FORMAT( ' ', A12,' COMPLETED THE ROW-MAJOR COMPUTATIONAL ',
$ 'TESTS (', I6, ' CALLS)', /' ******* BUT WITH MAXIMUM TEST ',
$ 'RATIO ', F8.2, ' - SUSPECT *******' )
10002 FORMAT( ' ', A12,' COMPLETED THE COLUMN-MAJOR COMPUTATIONAL ',
$ 'TESTS (', I6, ' CALLS)', /' ******* BUT WITH MAXIMUM TEST ',
$ 'RATIO ', F8.2, ' - SUSPECT *******' )
10001 FORMAT( ' ', A12,' PASSED THE ROW-MAJOR COMPUTATIONAL TESTS',
$ ' (', I6, ' CALL', 'S)' )
10000 FORMAT( ' ', A12,' PASSED THE COLUMN-MAJOR COMPUTATIONAL TESTS',
$ ' (', I6, ' CALL', 'S)' )
9998 FORMAT(' ******* FATAL ERROR - PARAMETER NUMBER ', I2, ' WAS CH',
$ 'ANGED INCORRECTLY *******' )
9996 FORMAT(' ******* ', A12,' FAILED ON CALL NUMBER:' )
9995 FORMAT(1X, I6, ': ', A12,'(', 4( '''', A1, ''',' ), 2( I3, ',' ),
$ '(', F4.1, ',', F4.1, '), A,', I3, ', B,', I3, ') ',
$ ' .' )
9994 FORMAT(' ******* FATAL ERROR - ERROR-EXIT TAKEN ON VALID CALL *',
$ '******' )
*
* End of ZCHK3.
*
END
*
SUBROUTINE ZPRCN3(NOUT, NC, SNAME, IORDER, SIDE, UPLO, TRANSA,
$ DIAG, M, N, ALPHA, LDA, LDB)
INTEGER NOUT, NC, IORDER, M, N, LDA, LDB
DOUBLE COMPLEX ALPHA
CHARACTER*1 SIDE, UPLO, TRANSA, DIAG
CHARACTER*12 SNAME
CHARACTER*14 CRC, CS, CU, CA, CD
IF (SIDE.EQ.'L')THEN
CS = ' CblasLeft'
ELSE
CS = ' CblasRight'
END IF
IF (UPLO.EQ.'U')THEN
CU = ' CblasUpper'
ELSE
CU = ' CblasLower'
END IF
IF (TRANSA.EQ.'N')THEN
CA = ' CblasNoTrans'
ELSE IF (TRANSA.EQ.'T')THEN
CA = ' CblasTrans'
ELSE
CA = 'CblasConjTrans'
END IF
IF (DIAG.EQ.'N')THEN
CD = ' CblasNonUnit'
ELSE
CD = ' CblasUnit'
END IF
IF (IORDER.EQ.1)THEN
CRC = ' CblasRowMajor'
ELSE
CRC = ' CblasColMajor'
END IF
WRITE(NOUT, FMT = 9995)NC,SNAME,CRC, CS,CU
WRITE(NOUT, FMT = 9994)CA, CD, M, N, ALPHA, LDA, LDB
9995 FORMAT( 1X, I6, ': ', A12,'(', A14, ',', A14, ',', A14, ',')
9994 FORMAT( 10X, 2( A14, ',') , 2( I3, ',' ), ' (', F4.1, ',',
$ F4.1, '), A,', I3, ', B,', I3, ').' )
END
*
SUBROUTINE ZCHK4( SNAME, EPS, THRESH, NOUT, NTRA, TRACE, REWI,
$ FATAL, NIDIM, IDIM, NALF, ALF, NBET, BET, NMAX,
$ A, AA, AS, B, BB, BS, C, CC, CS, CT, G,
$ IORDER )
*
* Tests ZHERK and ZSYRK.
*
* Auxiliary routine for test program for Level 3 Blas.
*
* -- Written on 8-February-1989.
* Jack Dongarra, Argonne National Laboratory.
* Iain Duff, AERE Harwell.
* Jeremy Du Croz, Numerical Algorithms Group Ltd.
* Sven Hammarling, Numerical Algorithms Group Ltd.
*
* .. Parameters ..
COMPLEX*16 ZERO
PARAMETER ( ZERO = ( 0.0D0, 0.0D0 ) )
DOUBLE PRECISION RONE, RZERO
PARAMETER ( RONE = 1.0D0, RZERO = 0.0D0 )
* .. Scalar Arguments ..
DOUBLE PRECISION EPS, THRESH
INTEGER NALF, NBET, NIDIM, NMAX, NOUT, NTRA, IORDER
LOGICAL FATAL, REWI, TRACE
CHARACTER*12 SNAME
* .. Array Arguments ..
COMPLEX*16 A( NMAX, NMAX ), AA( NMAX*NMAX ), ALF( NALF ),
$ AS( NMAX*NMAX ), B( NMAX, NMAX ),
$ BB( NMAX*NMAX ), BET( NBET ), BS( NMAX*NMAX ),
$ C( NMAX, NMAX ), CC( NMAX*NMAX ),
$ CS( NMAX*NMAX ), CT( NMAX )
DOUBLE PRECISION G( NMAX )
INTEGER IDIM( NIDIM )
* .. Local Scalars ..
COMPLEX*16 ALPHA, ALS, BETA, BETS
DOUBLE PRECISION ERR, ERRMAX, RALPHA, RALS, RBETA, RBETS
INTEGER I, IA, IB, ICT, ICU, IK, IN, J, JC, JJ, K, KS,
$ LAA, LCC, LDA, LDAS, LDC, LDCS, LJ, MA, N, NA,
$ NARGS, NC, NS
LOGICAL CONJ, NULL, RESET, SAME, TRAN, UPPER
CHARACTER*1 TRANS, TRANSS, TRANST, UPLO, UPLOS
CHARACTER*2 ICHT, ICHU
* .. Local Arrays ..
LOGICAL ISAME( 13 )
* .. External Functions ..
LOGICAL LZE, LZERES
EXTERNAL LZE, LZERES
* .. External Subroutines ..
EXTERNAL CZHERK, ZMAKE, ZMMCH, CZSYRK
* .. Intrinsic Functions ..
INTRINSIC DCMPLX, MAX, DBLE
* .. Scalars in Common ..
INTEGER INFOT, NOUTC
LOGICAL LERR, OK
* .. Common blocks ..
COMMON /INFOC/INFOT, NOUTC, OK, LERR
* .. Data statements ..
DATA ICHT/'NC'/, ICHU/'UL'/
* .. Executable Statements ..
CONJ = SNAME( 8: 9 ).EQ.'he'
*
NARGS = 10
NC = 0
RESET = .TRUE.
ERRMAX = RZERO
*
DO 100 IN = 1, NIDIM
N = IDIM( IN )
* Set LDC to 1 more than minimum value if room.
LDC = N
IF( LDC.LT.NMAX )
$ LDC = LDC + 1
* Skip tests if not enough room.
IF( LDC.GT.NMAX )
$ GO TO 100
LCC = LDC*N
*
DO 90 IK = 1, NIDIM
K = IDIM( IK )
*
DO 80 ICT = 1, 2
TRANS = ICHT( ICT: ICT )
TRAN = TRANS.EQ.'C'
IF( TRAN.AND..NOT.CONJ )
$ TRANS = 'T'
IF( TRAN )THEN
MA = K
NA = N
ELSE
MA = N
NA = K
END IF
* Set LDA to 1 more than minimum value if room.
LDA = MA
IF( LDA.LT.NMAX )
$ LDA = LDA + 1
* Skip tests if not enough room.
IF( LDA.GT.NMAX )
$ GO TO 80
LAA = LDA*NA
*
* Generate the matrix A.
*
CALL ZMAKE( 'ge', ' ', ' ', MA, NA, A, NMAX, AA, LDA,
$ RESET, ZERO )
*
DO 70 ICU = 1, 2
UPLO = ICHU( ICU: ICU )
UPPER = UPLO.EQ.'U'
*
DO 60 IA = 1, NALF
ALPHA = ALF( IA )
IF( CONJ )THEN
RALPHA = DBLE( ALPHA )
ALPHA = DCMPLX( RALPHA, RZERO )
END IF
*
DO 50 IB = 1, NBET
BETA = BET( IB )
IF( CONJ )THEN
RBETA = DBLE( BETA )
BETA = DCMPLX( RBETA, RZERO )
END IF
NULL = N.LE.0
IF( CONJ )
$ NULL = NULL.OR.( ( K.LE.0.OR.RALPHA.EQ.
$ RZERO ).AND.RBETA.EQ.RONE )
*
* Generate the matrix C.
*
CALL ZMAKE( SNAME( 8: 9 ), UPLO, ' ', N, N, C,
$ NMAX, CC, LDC, RESET, ZERO )
*
NC = NC + 1
*
* Save every datum before calling the subroutine.
*
UPLOS = UPLO
TRANSS = TRANS
NS = N
KS = K
IF( CONJ )THEN
RALS = RALPHA
ELSE
ALS = ALPHA
END IF
DO 10 I = 1, LAA
AS( I ) = AA( I )
10 CONTINUE
LDAS = LDA
IF( CONJ )THEN
RBETS = RBETA
ELSE
BETS = BETA
END IF
DO 20 I = 1, LCC
CS( I ) = CC( I )
20 CONTINUE
LDCS = LDC
*
* Call the subroutine.
*
IF( CONJ )THEN
IF( TRACE )
$ CALL ZPRCN6( NTRA, NC, SNAME, IORDER,
$ UPLO, TRANS, N, K, RALPHA, LDA, RBETA,
$ LDC)
IF( REWI )
$ REWIND NTRA
CALL CZHERK( IORDER, UPLO, TRANS, N, K,
$ RALPHA, AA, LDA, RBETA, CC,
$ LDC )
ELSE
IF( TRACE )
$ CALL ZPRCN4( NTRA, NC, SNAME, IORDER,
$ UPLO, TRANS, N, K, ALPHA, LDA, BETA, LDC)
IF( REWI )
$ REWIND NTRA
CALL CZSYRK( IORDER, UPLO, TRANS, N, K,
$ ALPHA, AA, LDA, BETA, CC, LDC )
END IF
*
* Check if error-exit was taken incorrectly.
*
IF( .NOT.OK )THEN
WRITE( NOUT, FMT = 9992 )
FATAL = .TRUE.
GO TO 120
END IF
*
* See what data changed inside subroutines.
*
ISAME( 1 ) = UPLOS.EQ.UPLO
ISAME( 2 ) = TRANSS.EQ.TRANS
ISAME( 3 ) = NS.EQ.N
ISAME( 4 ) = KS.EQ.K
IF( CONJ )THEN
ISAME( 5 ) = RALS.EQ.RALPHA
ELSE
ISAME( 5 ) = ALS.EQ.ALPHA
END IF
ISAME( 6 ) = LZE( AS, AA, LAA )
ISAME( 7 ) = LDAS.EQ.LDA
IF( CONJ )THEN
ISAME( 8 ) = RBETS.EQ.RBETA
ELSE
ISAME( 8 ) = BETS.EQ.BETA
END IF
IF( NULL )THEN
ISAME( 9 ) = LZE( CS, CC, LCC )
ELSE
ISAME( 9 ) = LZERES( SNAME( 8: 9 ), UPLO, N,
$ N, CS, CC, LDC )
END IF
ISAME( 10 ) = LDCS.EQ.LDC
*
* If data was incorrectly changed, report and
* return.
*
SAME = .TRUE.
DO 30 I = 1, NARGS
SAME = SAME.AND.ISAME( I )
IF( .NOT.ISAME( I ) )
$ WRITE( NOUT, FMT = 9998 )I
30 CONTINUE
IF( .NOT.SAME )THEN
FATAL = .TRUE.
GO TO 120
END IF
*
IF( .NOT.NULL )THEN
*
* Check the result column by column.
*
IF( CONJ )THEN
TRANST = 'C'
ELSE
TRANST = 'T'
END IF
JC = 1
DO 40 J = 1, N
IF( UPPER )THEN
JJ = 1
LJ = J
ELSE
JJ = J
LJ = N - J + 1
END IF
IF( TRAN )THEN
CALL ZMMCH( TRANST, 'N', LJ, 1, K,
$ ALPHA, A( 1, JJ ), NMAX,
$ A( 1, J ), NMAX, BETA,
$ C( JJ, J ), NMAX, CT, G,
$ CC( JC ), LDC, EPS, ERR,
$ FATAL, NOUT, .TRUE. )
ELSE
CALL ZMMCH( 'N', TRANST, LJ, 1, K,
$ ALPHA, A( JJ, 1 ), NMAX,
$ A( J, 1 ), NMAX, BETA,
$ C( JJ, J ), NMAX, CT, G,
$ CC( JC ), LDC, EPS, ERR,
$ FATAL, NOUT, .TRUE. )
END IF
IF( UPPER )THEN
JC = JC + LDC
ELSE
JC = JC + LDC + 1
END IF
ERRMAX = MAX( ERRMAX, ERR )
* If got really bad answer, report and
* return.
IF( FATAL )
$ GO TO 110
40 CONTINUE
END IF
*
50 CONTINUE
*
60 CONTINUE
*
70 CONTINUE
*
80 CONTINUE
*
90 CONTINUE
*
100 CONTINUE
*
* Report result.
*
IF( ERRMAX.LT.THRESH )THEN
IF ( IORDER.EQ.0) WRITE( NOUT, FMT = 10000 )SNAME, NC
IF ( IORDER.EQ.1) WRITE( NOUT, FMT = 10001 )SNAME, NC
ELSE
IF ( IORDER.EQ.0) WRITE( NOUT, FMT = 10002 )SNAME, NC, ERRMAX
IF ( IORDER.EQ.1) WRITE( NOUT, FMT = 10003 )SNAME, NC, ERRMAX
END IF
GO TO 130
*
110 CONTINUE
IF( N.GT.1 )
$ WRITE( NOUT, FMT = 9995 )J
*
120 CONTINUE
WRITE( NOUT, FMT = 9996 )SNAME
IF( CONJ )THEN
CALL ZPRCN6( NOUT, NC, SNAME, IORDER, UPLO, TRANS, N, K, RALPHA,
$ LDA, rBETA, LDC)
ELSE
CALL ZPRCN4( NOUT, NC, SNAME, IORDER, UPLO, TRANS, N, K, ALPHA,
$ LDA, BETA, LDC)
END IF
*
130 CONTINUE
RETURN
*
10003 FORMAT( ' ', A12,' COMPLETED THE ROW-MAJOR COMPUTATIONAL ',
$ 'TESTS (', I6, ' CALLS)', /' ******* BUT WITH MAXIMUM TEST ',
$ 'RATIO ', F8.2, ' - SUSPECT *******' )
10002 FORMAT( ' ', A12,' COMPLETED THE COLUMN-MAJOR COMPUTATIONAL ',
$ 'TESTS (', I6, ' CALLS)', /' ******* BUT WITH MAXIMUM TEST ',
$ 'RATIO ', F8.2, ' - SUSPECT *******' )
10001 FORMAT( ' ', A12,' PASSED THE ROW-MAJOR COMPUTATIONAL TESTS',
$ ' (', I6, ' CALL', 'S)' )
10000 FORMAT( ' ', A12,' PASSED THE COLUMN-MAJOR COMPUTATIONAL TESTS',
$ ' (', I6, ' CALL', 'S)' )
9998 FORMAT(' ******* FATAL ERROR - PARAMETER NUMBER ', I2, ' WAS CH',
$ 'ANGED INCORRECTLY *******' )
9996 FORMAT( ' ******* ', A12,' FAILED ON CALL NUMBER:' )
9995 FORMAT( ' THESE ARE THE RESULTS FOR COLUMN ', I3 )
9994 FORMAT(1X, I6, ': ', A12,'(', 2( '''', A1, ''',' ), 2( I3, ',' ),
$ F4.1, ', A,', I3, ',', F4.1, ', C,', I3, ') ',
$ ' .' )
9993 FORMAT(1X, I6, ': ', A12,'(', 2( '''', A1, ''',' ), 2( I3, ',' ),
$ '(', F4.1, ',', F4.1, ') , A,', I3, ',(', F4.1, ',', F4.1,
$ '), C,', I3, ') .' )
9992 FORMAT(' ******* FATAL ERROR - ERROR-EXIT TAKEN ON VALID CALL *',
$ '******' )
*
* End of CCHK4.
*
END
*
SUBROUTINE ZPRCN4(NOUT, NC, SNAME, IORDER, UPLO, TRANSA,
$ N, K, ALPHA, LDA, BETA, LDC)
INTEGER NOUT, NC, IORDER, N, K, LDA, LDC
DOUBLE COMPLEX ALPHA, BETA
CHARACTER*1 UPLO, TRANSA
CHARACTER*12 SNAME
CHARACTER*14 CRC, CU, CA
IF (UPLO.EQ.'U')THEN
CU = ' CblasUpper'
ELSE
CU = ' CblasLower'
END IF
IF (TRANSA.EQ.'N')THEN
CA = ' CblasNoTrans'
ELSE IF (TRANSA.EQ.'T')THEN
CA = ' CblasTrans'
ELSE
CA = 'CblasConjTrans'
END IF
IF (IORDER.EQ.1)THEN
CRC = ' CblasRowMajor'
ELSE
CRC = ' CblasColMajor'
END IF
WRITE(NOUT, FMT = 9995)NC, SNAME, CRC, CU, CA
WRITE(NOUT, FMT = 9994)N, K, ALPHA, LDA, BETA, LDC
9995 FORMAT( 1X, I6, ': ', A12,'(', 3( A14, ',') )
9994 FORMAT( 10X, 2( I3, ',' ), ' (', F4.1, ',', F4.1 ,'), A,',
$ I3, ', (', F4.1,',', F4.1, '), C,', I3, ').' )
END
*
*
SUBROUTINE ZPRCN6(NOUT, NC, SNAME, IORDER, UPLO, TRANSA,
$ N, K, ALPHA, LDA, BETA, LDC)
INTEGER NOUT, NC, IORDER, N, K, LDA, LDC
DOUBLE PRECISION ALPHA, BETA
CHARACTER*1 UPLO, TRANSA
CHARACTER*12 SNAME
CHARACTER*14 CRC, CU, CA
IF (UPLO.EQ.'U')THEN
CU = ' CblasUpper'
ELSE
CU = ' CblasLower'
END IF
IF (TRANSA.EQ.'N')THEN
CA = ' CblasNoTrans'
ELSE IF (TRANSA.EQ.'T')THEN
CA = ' CblasTrans'
ELSE
CA = 'CblasConjTrans'
END IF
IF (IORDER.EQ.1)THEN
CRC = ' CblasRowMajor'
ELSE
CRC = ' CblasColMajor'
END IF
WRITE(NOUT, FMT = 9995)NC, SNAME, CRC, CU, CA
WRITE(NOUT, FMT = 9994)N, K, ALPHA, LDA, BETA, LDC
9995 FORMAT( 1X, I6, ': ', A12,'(', 3( A14, ',') )
9994 FORMAT( 10X, 2( I3, ',' ),
$ F4.1, ', A,', I3, ',', F4.1, ', C,', I3, ').' )
END
*
SUBROUTINE ZCHK5( SNAME, EPS, THRESH, NOUT, NTRA, TRACE, REWI,
$ FATAL, NIDIM, IDIM, NALF, ALF, NBET, BET, NMAX,
$ AB, AA, AS, BB, BS, C, CC, CS, CT, G, W,
$ IORDER )
*
* Tests ZHER2K and ZSYR2K.
*
* Auxiliary routine for test program for Level 3 Blas.
*
* -- Written on 8-February-1989.
* Jack Dongarra, Argonne National Laboratory.
* Iain Duff, AERE Harwell.
* Jeremy Du Croz, Numerical Algorithms Group Ltd.
* Sven Hammarling, Numerical Algorithms Group Ltd.
*
* .. Parameters ..
COMPLEX*16 ZERO, ONE
PARAMETER ( ZERO = ( 0.0D0, 0.0D0 ), ONE = ( 1.0D0, 0.0D0 ) )
DOUBLE PRECISION RONE, RZERO
PARAMETER ( RONE = 1.0D0, RZERO = 0.0D0 )
* .. Scalar Arguments ..
DOUBLE PRECISION EPS, THRESH
INTEGER NALF, NBET, NIDIM, NMAX, NOUT, NTRA, IORDER
LOGICAL FATAL, REWI, TRACE
CHARACTER*12 SNAME
* .. Array Arguments ..
COMPLEX*16 AA( NMAX*NMAX ), AB( 2*NMAX*NMAX ),
$ ALF( NALF ), AS( NMAX*NMAX ), BB( NMAX*NMAX ),
$ BET( NBET ), BS( NMAX*NMAX ), C( NMAX, NMAX ),
$ CC( NMAX*NMAX ), CS( NMAX*NMAX ), CT( NMAX ),
$ W( 2*NMAX )
DOUBLE PRECISION G( NMAX )
INTEGER IDIM( NIDIM )
* .. Local Scalars ..
COMPLEX*16 ALPHA, ALS, BETA, BETS
DOUBLE PRECISION ERR, ERRMAX, RBETA, RBETS
INTEGER I, IA, IB, ICT, ICU, IK, IN, J, JC, JJ, JJAB,
$ K, KS, LAA, LBB, LCC, LDA, LDAS, LDB, LDBS,
$ LDC, LDCS, LJ, MA, N, NA, NARGS, NC, NS
LOGICAL CONJ, NULL, RESET, SAME, TRAN, UPPER
CHARACTER*1 TRANS, TRANSS, TRANST, UPLO, UPLOS
CHARACTER*2 ICHT, ICHU
* .. Local Arrays ..
LOGICAL ISAME( 13 )
* .. External Functions ..
LOGICAL LZE, LZERES
EXTERNAL LZE, LZERES
* .. External Subroutines ..
EXTERNAL CZHER2K, ZMAKE, ZMMCH, CZSYR2K
* .. Intrinsic Functions ..
INTRINSIC DCMPLX, DCONJG, MAX, DBLE
* .. Scalars in Common ..
INTEGER INFOT, NOUTC
LOGICAL LERR, OK
* .. Common blocks ..
COMMON /INFOC/INFOT, NOUTC, OK, LERR
* .. Data statements ..
DATA ICHT/'NC'/, ICHU/'UL'/
* .. Executable Statements ..
CONJ = SNAME( 8: 9 ).EQ.'he'
*
NARGS = 12
NC = 0
RESET = .TRUE.
ERRMAX = RZERO
*
DO 130 IN = 1, NIDIM
N = IDIM( IN )
* Set LDC to 1 more than minimum value if room.
LDC = N
IF( LDC.LT.NMAX )
$ LDC = LDC + 1
* Skip tests if not enough room.
IF( LDC.GT.NMAX )
$ GO TO 130
LCC = LDC*N
*
DO 120 IK = 1, NIDIM
K = IDIM( IK )
*
DO 110 ICT = 1, 2
TRANS = ICHT( ICT: ICT )
TRAN = TRANS.EQ.'C'
IF( TRAN.AND..NOT.CONJ )
$ TRANS = 'T'
IF( TRAN )THEN
MA = K
NA = N
ELSE
MA = N
NA = K
END IF
* Set LDA to 1 more than minimum value if room.
LDA = MA
IF( LDA.LT.NMAX )
$ LDA = LDA + 1
* Skip tests if not enough room.
IF( LDA.GT.NMAX )
$ GO TO 110
LAA = LDA*NA
*
* Generate the matrix A.
*
IF( TRAN )THEN
CALL ZMAKE( 'ge', ' ', ' ', MA, NA, AB, 2*NMAX, AA,
$ LDA, RESET, ZERO )
ELSE
CALL ZMAKE( 'ge', ' ', ' ', MA, NA, AB, NMAX, AA, LDA,
$ RESET, ZERO )
END IF
*
* Generate the matrix B.
*
LDB = LDA
LBB = LAA
IF( TRAN )THEN
CALL ZMAKE( 'ge', ' ', ' ', MA, NA, AB( K + 1 ),
$ 2*NMAX, BB, LDB, RESET, ZERO )
ELSE
CALL ZMAKE( 'ge', ' ', ' ', MA, NA, AB( K*NMAX + 1 ),
$ NMAX, BB, LDB, RESET, ZERO )
END IF
*
DO 100 ICU = 1, 2
UPLO = ICHU( ICU: ICU )
UPPER = UPLO.EQ.'U'
*
DO 90 IA = 1, NALF
ALPHA = ALF( IA )
*
DO 80 IB = 1, NBET
BETA = BET( IB )
IF( CONJ )THEN
RBETA = DBLE( BETA )
BETA = DCMPLX( RBETA, RZERO )
END IF
NULL = N.LE.0
IF( CONJ )
$ NULL = NULL.OR.( ( K.LE.0.OR.ALPHA.EQ.
$ ZERO ).AND.RBETA.EQ.RONE )
*
* Generate the matrix C.
*
CALL ZMAKE( SNAME( 8: 9 ), UPLO, ' ', N, N, C,
$ NMAX, CC, LDC, RESET, ZERO )
*
NC = NC + 1
*
* Save every datum before calling the subroutine.
*
UPLOS = UPLO
TRANSS = TRANS
NS = N
KS = K
ALS = ALPHA
DO 10 I = 1, LAA
AS( I ) = AA( I )
10 CONTINUE
LDAS = LDA
DO 20 I = 1, LBB
BS( I ) = BB( I )
20 CONTINUE
LDBS = LDB
IF( CONJ )THEN
RBETS = RBETA
ELSE
BETS = BETA
END IF
DO 30 I = 1, LCC
CS( I ) = CC( I )
30 CONTINUE
LDCS = LDC
*
* Call the subroutine.
*
IF( CONJ )THEN
IF( TRACE )
$ CALL ZPRCN7( NTRA, NC, SNAME, IORDER,
$ UPLO, TRANS, N, K, ALPHA, LDA, LDB,
$ RBETA, LDC)
IF( REWI )
$ REWIND NTRA
CALL CZHER2K( IORDER, UPLO, TRANS, N, K,
$ ALPHA, AA, LDA, BB, LDB, RBETA,
$ CC, LDC )
ELSE
IF( TRACE )
$ CALL ZPRCN5( NTRA, NC, SNAME, IORDER,
$ UPLO, TRANS, N, K, ALPHA, LDA, LDB,
$ BETA, LDC)
IF( REWI )
$ REWIND NTRA
CALL CZSYR2K( IORDER, UPLO, TRANS, N, K,
$ ALPHA, AA, LDA, BB, LDB, BETA,
$ CC, LDC )
END IF
*
* Check if error-exit was taken incorrectly.
*
IF( .NOT.OK )THEN
WRITE( NOUT, FMT = 9992 )
FATAL = .TRUE.
GO TO 150
END IF
*
* See what data changed inside subroutines.
*
ISAME( 1 ) = UPLOS.EQ.UPLO
ISAME( 2 ) = TRANSS.EQ.TRANS
ISAME( 3 ) = NS.EQ.N
ISAME( 4 ) = KS.EQ.K
ISAME( 5 ) = ALS.EQ.ALPHA
ISAME( 6 ) = LZE( AS, AA, LAA )
ISAME( 7 ) = LDAS.EQ.LDA
ISAME( 8 ) = LZE( BS, BB, LBB )
ISAME( 9 ) = LDBS.EQ.LDB
IF( CONJ )THEN
ISAME( 10 ) = RBETS.EQ.RBETA
ELSE
ISAME( 10 ) = BETS.EQ.BETA
END IF
IF( NULL )THEN
ISAME( 11 ) = LZE( CS, CC, LCC )
ELSE
ISAME( 11 ) = LZERES( 'he', UPLO, N, N, CS,
$ CC, LDC )
END IF
ISAME( 12 ) = LDCS.EQ.LDC
*
* If data was incorrectly changed, report and
* return.
*
SAME = .TRUE.
DO 40 I = 1, NARGS
SAME = SAME.AND.ISAME( I )
IF( .NOT.ISAME( I ) )
$ WRITE( NOUT, FMT = 9998 )I
40 CONTINUE
IF( .NOT.SAME )THEN
FATAL = .TRUE.
GO TO 150
END IF
*
IF( .NOT.NULL )THEN
*
* Check the result column by column.
*
IF( CONJ )THEN
TRANST = 'C'
ELSE
TRANST = 'T'
END IF
JJAB = 1
JC = 1
DO 70 J = 1, N
IF( UPPER )THEN
JJ = 1
LJ = J
ELSE
JJ = J
LJ = N - J + 1
END IF
IF( TRAN )THEN
DO 50 I = 1, K
W( I ) = ALPHA*AB( ( J - 1 )*2*
$ NMAX + K + I )
IF( CONJ )THEN
W( K + I ) = DCONJG( ALPHA )*
$ AB( ( J - 1 )*2*
$ NMAX + I )
ELSE
W( K + I ) = ALPHA*
$ AB( ( J - 1 )*2*
$ NMAX + I )
END IF
50 CONTINUE
CALL ZMMCH( TRANST, 'N', LJ, 1, 2*K,
$ ONE, AB( JJAB ), 2*NMAX, W,
$ 2*NMAX, BETA, C( JJ, J ),
$ NMAX, CT, G, CC( JC ), LDC,
$ EPS, ERR, FATAL, NOUT,
$ .TRUE. )
ELSE
DO 60 I = 1, K
IF( CONJ )THEN
W( I ) = ALPHA*DCONJG( AB( ( K +
$ I - 1 )*NMAX + J ) )
W( K + I ) = DCONJG( ALPHA*
$ AB( ( I - 1 )*NMAX +
$ J ) )
ELSE
W( I ) = ALPHA*AB( ( K + I - 1 )*
$ NMAX + J )
W( K + I ) = ALPHA*
$ AB( ( I - 1 )*NMAX +
$ J )
END IF
60 CONTINUE
CALL ZMMCH( 'N', 'N', LJ, 1, 2*K, ONE,
$ AB( JJ ), NMAX, W, 2*NMAX,
$ BETA, C( JJ, J ), NMAX, CT,
$ G, CC( JC ), LDC, EPS, ERR,
$ FATAL, NOUT, .TRUE. )
END IF
IF( UPPER )THEN
JC = JC + LDC
ELSE
JC = JC + LDC + 1
IF( TRAN )
$ JJAB = JJAB + 2*NMAX
END IF
ERRMAX = MAX( ERRMAX, ERR )
* If got really bad answer, report and
* return.
IF( FATAL )
$ GO TO 140
70 CONTINUE
END IF
*
80 CONTINUE
*
90 CONTINUE
*
100 CONTINUE
*
110 CONTINUE
*
120 CONTINUE
*
130 CONTINUE
*
* Report result.
*
IF( ERRMAX.LT.THRESH )THEN
IF ( IORDER.EQ.0) WRITE( NOUT, FMT = 10000 )SNAME, NC
IF ( IORDER.EQ.1) WRITE( NOUT, FMT = 10001 )SNAME, NC
ELSE
IF ( IORDER.EQ.0) WRITE( NOUT, FMT = 10002 )SNAME, NC, ERRMAX
IF ( IORDER.EQ.1) WRITE( NOUT, FMT = 10003 )SNAME, NC, ERRMAX
END IF
GO TO 160
*
140 CONTINUE
IF( N.GT.1 )
$ WRITE( NOUT, FMT = 9995 )J
*
150 CONTINUE
WRITE( NOUT, FMT = 9996 )SNAME
IF( CONJ )THEN
CALL ZPRCN7( NOUT, NC, SNAME, IORDER, UPLO, TRANS, N, K,
$ ALPHA, LDA, LDB, RBETA, LDC)
ELSE
CALL ZPRCN5( NOUT, NC, SNAME, IORDER, UPLO, TRANS, N, K,
$ ALPHA, LDA, LDB, BETA, LDC)
END IF
*
160 CONTINUE
RETURN
*
10003 FORMAT( ' ', A12,' COMPLETED THE ROW-MAJOR COMPUTATIONAL ',
$ 'TESTS (', I6, ' CALLS)', /' ******* BUT WITH MAXIMUM TEST ',
$ 'RATIO ', F8.2, ' - SUSPECT *******' )
10002 FORMAT( ' ', A12,' COMPLETED THE COLUMN-MAJOR COMPUTATIONAL ',
$ 'TESTS (', I6, ' CALLS)', /' ******* BUT WITH MAXIMUM TEST ',
$ 'RATIO ', F8.2, ' - SUSPECT *******' )
10001 FORMAT( ' ', A12,' PASSED THE ROW-MAJOR COMPUTATIONAL TESTS',
$ ' (', I6, ' CALL', 'S)' )
10000 FORMAT( ' ', A12,' PASSED THE COLUMN-MAJOR COMPUTATIONAL TESTS',
$ ' (', I6, ' CALL', 'S)' )
9998 FORMAT(' ******* FATAL ERROR - PARAMETER NUMBER ', I2, ' WAS CH',
$ 'ANGED INCORRECTLY *******' )
9996 FORMAT( ' ******* ', A12,' FAILED ON CALL NUMBER:' )
9995 FORMAT( ' THESE ARE THE RESULTS FOR COLUMN ', I3 )
9994 FORMAT(1X, I6, ': ', A12,'(', 2( '''', A1, ''',' ), 2( I3, ',' ),
$ '(', F4.1, ',', F4.1, '), A,', I3, ', B,', I3, ',', F4.1,
$ ', C,', I3, ') .' )
9993 FORMAT(1X, I6, ': ', A12,'(', 2( '''', A1, ''',' ), 2( I3, ',' ),
$ '(', F4.1, ',', F4.1, '), A,', I3, ', B,', I3, ',(', F4.1,
$ ',', F4.1, '), C,', I3, ') .' )
9992 FORMAT(' ******* FATAL ERROR - ERROR-EXIT TAKEN ON VALID CALL *',
$ '******' )
*
* End of ZCHK5.
*
END
*
SUBROUTINE ZPRCN5(NOUT, NC, SNAME, IORDER, UPLO, TRANSA,
$ N, K, ALPHA, LDA, LDB, BETA, LDC)
INTEGER NOUT, NC, IORDER, N, K, LDA, LDB, LDC
DOUBLE COMPLEX ALPHA, BETA
CHARACTER*1 UPLO, TRANSA
CHARACTER*12 SNAME
CHARACTER*14 CRC, CU, CA
IF (UPLO.EQ.'U')THEN
CU = ' CblasUpper'
ELSE
CU = ' CblasLower'
END IF
IF (TRANSA.EQ.'N')THEN
CA = ' CblasNoTrans'
ELSE IF (TRANSA.EQ.'T')THEN
CA = ' CblasTrans'
ELSE
CA = 'CblasConjTrans'
END IF
IF (IORDER.EQ.1)THEN
CRC = ' CblasRowMajor'
ELSE
CRC = ' CblasColMajor'
END IF
WRITE(NOUT, FMT = 9995)NC, SNAME, CRC, CU, CA
WRITE(NOUT, FMT = 9994)N, K, ALPHA, LDA, LDB, BETA, LDC
9995 FORMAT( 1X, I6, ': ', A12,'(', 3( A14, ',') )
9994 FORMAT( 10X, 2( I3, ',' ), ' (', F4.1, ',', F4.1, '), A,',
$ I3, ', B', I3, ', (', F4.1, ',', F4.1, '), C,', I3, ').' )
END
*
*
SUBROUTINE ZPRCN7(NOUT, NC, SNAME, IORDER, UPLO, TRANSA,
$ N, K, ALPHA, LDA, LDB, BETA, LDC)
INTEGER NOUT, NC, IORDER, N, K, LDA, LDB, LDC
DOUBLE COMPLEX ALPHA
DOUBLE PRECISION BETA
CHARACTER*1 UPLO, TRANSA
CHARACTER*12 SNAME
CHARACTER*14 CRC, CU, CA
IF (UPLO.EQ.'U')THEN
CU = ' CblasUpper'
ELSE
CU = ' CblasLower'
END IF
IF (TRANSA.EQ.'N')THEN
CA = ' CblasNoTrans'
ELSE IF (TRANSA.EQ.'T')THEN
CA = ' CblasTrans'
ELSE
CA = 'CblasConjTrans'
END IF
IF (IORDER.EQ.1)THEN
CRC = ' CblasRowMajor'
ELSE
CRC = ' CblasColMajor'
END IF
WRITE(NOUT, FMT = 9995)NC, SNAME, CRC, CU, CA
WRITE(NOUT, FMT = 9994)N, K, ALPHA, LDA, LDB, BETA, LDC
9995 FORMAT( 1X, I6, ': ', A12,'(', 3( A14, ',') )
9994 FORMAT( 10X, 2( I3, ',' ), ' (', F4.1, ',', F4.1, '), A,',
$ I3, ', B', I3, ',', F4.1, ', C,', I3, ').' )
END
*
SUBROUTINE ZMAKE( TYPE, UPLO, DIAG, M, N, A, NMAX, AA, LDA, RESET,
$ TRANSL )
*
* Generates values for an M by N matrix A.
* Stores the values in the array AA in the data structure required
* by the routine, with unwanted elements set to rogue value.
*
* TYPE is 'ge', 'he', 'sy' or 'tr'.
*
* Auxiliary routine for test program for Level 3 Blas.
*
* -- Written on 8-February-1989.
* Jack Dongarra, Argonne National Laboratory.
* Iain Duff, AERE Harwell.
* Jeremy Du Croz, Numerical Algorithms Group Ltd.
* Sven Hammarling, Numerical Algorithms Group Ltd.
*
* .. Parameters ..
COMPLEX*16 ZERO, ONE
PARAMETER ( ZERO = ( 0.0D0, 0.0D0 ),
$ ONE = ( 1.0D0, 0.0D0 ) )
COMPLEX*16 ROGUE
PARAMETER ( ROGUE = ( -1.0D10, 1.0D10 ) )
DOUBLE PRECISION RZERO
PARAMETER ( RZERO = 0.0D0 )
DOUBLE PRECISION RROGUE
PARAMETER ( RROGUE = -1.0D10 )
* .. Scalar Arguments ..
COMPLEX*16 TRANSL
INTEGER LDA, M, N, NMAX
LOGICAL RESET
CHARACTER*1 DIAG, UPLO
CHARACTER*2 TYPE
* .. Array Arguments ..
COMPLEX*16 A( NMAX, * ), AA( * )
* .. Local Scalars ..
INTEGER I, IBEG, IEND, J, JJ
LOGICAL GEN, HER, LOWER, SYM, TRI, UNIT, UPPER
* .. External Functions ..
COMPLEX*16 ZBEG
EXTERNAL ZBEG
* .. Intrinsic Functions ..
INTRINSIC DCMPLX, DCONJG, DBLE
* .. Executable Statements ..
GEN = TYPE.EQ.'ge'
HER = TYPE.EQ.'he'
SYM = TYPE.EQ.'sy'
TRI = TYPE.EQ.'tr'
UPPER = ( HER.OR.SYM.OR.TRI ).AND.UPLO.EQ.'U'
LOWER = ( HER.OR.SYM.OR.TRI ).AND.UPLO.EQ.'L'
UNIT = TRI.AND.DIAG.EQ.'U'
*
* Generate data in array A.
*
DO 20 J = 1, N
DO 10 I = 1, M
IF( GEN.OR.( UPPER.AND.I.LE.J ).OR.( LOWER.AND.I.GE.J ) )
$ THEN
A( I, J ) = ZBEG( RESET ) + TRANSL
IF( I.NE.J )THEN
* Set some elements to zero
IF( N.GT.3.AND.J.EQ.N/2 )
$ A( I, J ) = ZERO
IF( HER )THEN
A( J, I ) = DCONJG( A( I, J ) )
ELSE IF( SYM )THEN
A( J, I ) = A( I, J )
ELSE IF( TRI )THEN
A( J, I ) = ZERO
END IF
END IF
END IF
10 CONTINUE
IF( HER )
$ A( J, J ) = DCMPLX( DBLE( A( J, J ) ), RZERO )
IF( TRI )
$ A( J, J ) = A( J, J ) + ONE
IF( UNIT )
$ A( J, J ) = ONE
20 CONTINUE
*
* Store elements in array AS in data structure required by routine.
*
IF( TYPE.EQ.'ge' )THEN
DO 50 J = 1, N
DO 30 I = 1, M
AA( I + ( J - 1 )*LDA ) = A( I, J )
30 CONTINUE
DO 40 I = M + 1, LDA
AA( I + ( J - 1 )*LDA ) = ROGUE
40 CONTINUE
50 CONTINUE
ELSE IF( TYPE.EQ.'he'.OR.TYPE.EQ.'sy'.OR.TYPE.EQ.'tr' )THEN
DO 90 J = 1, N
IF( UPPER )THEN
IBEG = 1
IF( UNIT )THEN
IEND = J - 1
ELSE
IEND = J
END IF
ELSE
IF( UNIT )THEN
IBEG = J + 1
ELSE
IBEG = J
END IF
IEND = N
END IF
DO 60 I = 1, IBEG - 1
AA( I + ( J - 1 )*LDA ) = ROGUE
60 CONTINUE
DO 70 I = IBEG, IEND
AA( I + ( J - 1 )*LDA ) = A( I, J )
70 CONTINUE
DO 80 I = IEND + 1, LDA
AA( I + ( J - 1 )*LDA ) = ROGUE
80 CONTINUE
IF( HER )THEN
JJ = J + ( J - 1 )*LDA
AA( JJ ) = DCMPLX( DBLE( AA( JJ ) ), RROGUE )
END IF
90 CONTINUE
END IF
RETURN
*
* End of ZMAKE.
*
END
SUBROUTINE ZMMCH( TRANSA, TRANSB, M, N, KK, ALPHA, A, LDA, B, LDB,
$ BETA, C, LDC, CT, G, CC, LDCC, EPS, ERR, FATAL,
$ NOUT, MV )
*
* Checks the results of the computational tests.
*
* Auxiliary routine for test program for Level 3 Blas.
*
* -- Written on 8-February-1989.
* Jack Dongarra, Argonne National Laboratory.
* Iain Duff, AERE Harwell.
* Jeremy Du Croz, Numerical Algorithms Group Ltd.
* Sven Hammarling, Numerical Algorithms Group Ltd.
*
* .. Parameters ..
COMPLEX*16 ZERO
PARAMETER ( ZERO = ( 0.0D0, 0.0D0 ) )
DOUBLE PRECISION RZERO, RONE
PARAMETER ( RZERO = 0.0D0, RONE = 1.0D0 )
* .. Scalar Arguments ..
COMPLEX*16 ALPHA, BETA
DOUBLE PRECISION EPS, ERR
INTEGER KK, LDA, LDB, LDC, LDCC, M, N, NOUT
LOGICAL FATAL, MV
CHARACTER*1 TRANSA, TRANSB
* .. Array Arguments ..
COMPLEX*16 A( LDA, * ), B( LDB, * ), C( LDC, * ),
$ CC( LDCC, * ), CT( * )
DOUBLE PRECISION G( * )
* .. Local Scalars ..
COMPLEX*16 CL
DOUBLE PRECISION ERRI
INTEGER I, J, K
LOGICAL CTRANA, CTRANB, TRANA, TRANB
* .. Intrinsic Functions ..
INTRINSIC ABS, DIMAG, DCONJG, MAX, DBLE, SQRT
* .. Statement Functions ..
DOUBLE PRECISION ABS1
* .. Statement Function definitions ..
ABS1( CL ) = ABS( DBLE( CL ) ) + ABS( DIMAG( CL ) )
* .. Executable Statements ..
TRANA = TRANSA.EQ.'T'.OR.TRANSA.EQ.'C'
TRANB = TRANSB.EQ.'T'.OR.TRANSB.EQ.'C'
CTRANA = TRANSA.EQ.'C'
CTRANB = TRANSB.EQ.'C'
*
* Compute expected result, one column at a time, in CT using data
* in A, B and C.
* Compute gauges in G.
*
DO 220 J = 1, N
*
DO 10 I = 1, M
CT( I ) = ZERO
G( I ) = RZERO
10 CONTINUE
IF( .NOT.TRANA.AND..NOT.TRANB )THEN
DO 30 K = 1, KK
DO 20 I = 1, M
CT( I ) = CT( I ) + A( I, K )*B( K, J )
G( I ) = G( I ) + ABS1( A( I, K ) )*ABS1( B( K, J ) )
20 CONTINUE
30 CONTINUE
ELSE IF( TRANA.AND..NOT.TRANB )THEN
IF( CTRANA )THEN
DO 50 K = 1, KK
DO 40 I = 1, M
CT( I ) = CT( I ) + DCONJG( A( K, I ) )*B( K, J )
G( I ) = G( I ) + ABS1( A( K, I ) )*
$ ABS1( B( K, J ) )
40 CONTINUE
50 CONTINUE
ELSE
DO 70 K = 1, KK
DO 60 I = 1, M
CT( I ) = CT( I ) + A( K, I )*B( K, J )
G( I ) = G( I ) + ABS1( A( K, I ) )*
$ ABS1( B( K, J ) )
60 CONTINUE
70 CONTINUE
END IF
ELSE IF( .NOT.TRANA.AND.TRANB )THEN
IF( CTRANB )THEN
DO 90 K = 1, KK
DO 80 I = 1, M
CT( I ) = CT( I ) + A( I, K )*DCONJG( B( J, K ) )
G( I ) = G( I ) + ABS1( A( I, K ) )*
$ ABS1( B( J, K ) )
80 CONTINUE
90 CONTINUE
ELSE
DO 110 K = 1, KK
DO 100 I = 1, M
CT( I ) = CT( I ) + A( I, K )*B( J, K )
G( I ) = G( I ) + ABS1( A( I, K ) )*
$ ABS1( B( J, K ) )
100 CONTINUE
110 CONTINUE
END IF
ELSE IF( TRANA.AND.TRANB )THEN
IF( CTRANA )THEN
IF( CTRANB )THEN
DO 130 K = 1, KK
DO 120 I = 1, M
CT( I ) = CT( I ) + DCONJG( A( K, I ) )*
$ DCONJG( B( J, K ) )
G( I ) = G( I ) + ABS1( A( K, I ) )*
$ ABS1( B( J, K ) )
120 CONTINUE
130 CONTINUE
ELSE
DO 150 K = 1, KK
DO 140 I = 1, M
CT( I ) = CT( I ) + DCONJG( A( K, I ) )*
$ B( J, K )
G( I ) = G( I ) + ABS1( A( K, I ) )*
$ ABS1( B( J, K ) )
140 CONTINUE
150 CONTINUE
END IF
ELSE
IF( CTRANB )THEN
DO 170 K = 1, KK
DO 160 I = 1, M
CT( I ) = CT( I ) + A( K, I )*
$ DCONJG( B( J, K ) )
G( I ) = G( I ) + ABS1( A( K, I ) )*
$ ABS1( B( J, K ) )
160 CONTINUE
170 CONTINUE
ELSE
DO 190 K = 1, KK
DO 180 I = 1, M
CT( I ) = CT( I ) + A( K, I )*B( J, K )
G( I ) = G( I ) + ABS1( A( K, I ) )*
$ ABS1( B( J, K ) )
180 CONTINUE
190 CONTINUE
END IF
END IF
END IF
DO 200 I = 1, M
CT( I ) = ALPHA*CT( I ) + BETA*C( I, J )
G( I ) = ABS1( ALPHA )*G( I ) +
$ ABS1( BETA )*ABS1( C( I, J ) )
200 CONTINUE
*
* Compute the error ratio for this result.
*
ERR = ZERO
DO 210 I = 1, M
ERRI = ABS1( CT( I ) - CC( I, J ) )/EPS
IF( G( I ).NE.RZERO )
$ ERRI = ERRI/G( I )
ERR = MAX( ERR, ERRI )
IF( ERR*SQRT( EPS ).GE.RONE )
$ GO TO 230
210 CONTINUE
*
220 CONTINUE
*
* If the loop completes, all results are at least half accurate.
GO TO 250
*
* Report fatal error.
*
230 FATAL = .TRUE.
WRITE( NOUT, FMT = 9999 )
DO 240 I = 1, M
IF( MV )THEN
WRITE( NOUT, FMT = 9998 )I, CT( I ), CC( I, J )
ELSE
WRITE( NOUT, FMT = 9998 )I, CC( I, J ), CT( I )
END IF
240 CONTINUE
IF( N.GT.1 )
$ WRITE( NOUT, FMT = 9997 )J
*
250 CONTINUE
RETURN
*
9999 FORMAT( ' ******* FATAL ERROR - COMPUTED RESULT IS LESS THAN HAL',
$ 'F ACCURATE *******', /' EXPECTED RE',
$ 'SULT COMPUTED RESULT' )
9998 FORMAT( 1X, I7, 2( ' (', G15.6, ',', G15.6, ')' ) )
9997 FORMAT( ' THESE ARE THE RESULTS FOR COLUMN ', I3 )
*
* End of ZMMCH.
*
END
LOGICAL FUNCTION LZE( RI, RJ, LR )
*
* Tests if two arrays are identical.
*
* Auxiliary routine for test program for Level 3 Blas.
*
* -- Written on 8-February-1989.
* Jack Dongarra, Argonne National Laboratory.
* Iain Duff, AERE Harwell.
* Jeremy Du Croz, Numerical Algorithms Group Ltd.
* Sven Hammarling, Numerical Algorithms Group Ltd.
*
* .. Scalar Arguments ..
INTEGER LR
* .. Array Arguments ..
COMPLEX*16 RI( * ), RJ( * )
* .. Local Scalars ..
INTEGER I
* .. Executable Statements ..
DO 10 I = 1, LR
IF( RI( I ).NE.RJ( I ) )
$ GO TO 20
10 CONTINUE
LZE = .TRUE.
GO TO 30
20 CONTINUE
LZE = .FALSE.
30 RETURN
*
* End of LZE.
*
END
LOGICAL FUNCTION LZERES( TYPE, UPLO, M, N, AA, AS, LDA )
*
* Tests if selected elements in two arrays are equal.
*
* TYPE is 'ge' or 'he' or 'sy'.
*
* Auxiliary routine for test program for Level 3 Blas.
*
* -- Written on 8-February-1989.
* Jack Dongarra, Argonne National Laboratory.
* Iain Duff, AERE Harwell.
* Jeremy Du Croz, Numerical Algorithms Group Ltd.
* Sven Hammarling, Numerical Algorithms Group Ltd.
*
* .. Scalar Arguments ..
INTEGER LDA, M, N
CHARACTER*1 UPLO
CHARACTER*2 TYPE
* .. Array Arguments ..
COMPLEX*16 AA( LDA, * ), AS( LDA, * )
* .. Local Scalars ..
INTEGER I, IBEG, IEND, J
LOGICAL UPPER
* .. Executable Statements ..
UPPER = UPLO.EQ.'U'
IF( TYPE.EQ.'ge' )THEN
DO 20 J = 1, N
DO 10 I = M + 1, LDA
IF( AA( I, J ).NE.AS( I, J ) )
$ GO TO 70
10 CONTINUE
20 CONTINUE
ELSE IF( TYPE.EQ.'he'.OR.TYPE.EQ.'sy' )THEN
DO 50 J = 1, N
IF( UPPER )THEN
IBEG = 1
IEND = J
ELSE
IBEG = J
IEND = N
END IF
DO 30 I = 1, IBEG - 1
IF( AA( I, J ).NE.AS( I, J ) )
$ GO TO 70
30 CONTINUE
DO 40 I = IEND + 1, LDA
IF( AA( I, J ).NE.AS( I, J ) )
$ GO TO 70
40 CONTINUE
50 CONTINUE
END IF
*
60 CONTINUE
LZERES = .TRUE.
GO TO 80
70 CONTINUE
LZERES = .FALSE.
80 RETURN
*
* End of LZERES.
*
END
COMPLEX*16 FUNCTION ZBEG( RESET )
*
* Generates complex numbers as pairs of random numbers uniformly
* distributed between -0.5 and 0.5.
*
* Auxiliary routine for test program for Level 3 Blas.
*
* -- Written on 8-February-1989.
* Jack Dongarra, Argonne National Laboratory.
* Iain Duff, AERE Harwell.
* Jeremy Du Croz, Numerical Algorithms Group Ltd.
* Sven Hammarling, Numerical Algorithms Group Ltd.
*
* .. Scalar Arguments ..
LOGICAL RESET
* .. Local Scalars ..
INTEGER I, IC, J, MI, MJ
* .. Save statement ..
SAVE I, IC, J, MI, MJ
* .. Intrinsic Functions ..
INTRINSIC DCMPLX
* .. Executable Statements ..
IF( RESET )THEN
* Initialize local variables.
MI = 891
MJ = 457
I = 7
J = 7
IC = 0
RESET = .FALSE.
END IF
*
* The sequence of values of I or J is bounded between 1 and 999.
* If initial I or J = 1,2,3,6,7 or 9, the period will be 50.
* If initial I or J = 4 or 8, the period will be 25.
* If initial I or J = 5, the period will be 10.
* IC is used to break up the period by skipping 1 value of I or J
* in 6.
*
IC = IC + 1
10 I = I*MI
J = J*MJ
I = I - 1000*( I/1000 )
J = J - 1000*( J/1000 )
IF( IC.GE.5 )THEN
IC = 0
GO TO 10
END IF
ZBEG = DCMPLX( ( I - 500 )/1001.0D0, ( J - 500 )/1001.0D0 )
RETURN
*
* End of ZBEG.
*
END
DOUBLE PRECISION FUNCTION DDIFF( X, Y )
*
* Auxiliary routine for test program for Level 3 Blas.
*
* -- Written on 8-February-1989.
* Jack Dongarra, Argonne National Laboratory.
* Iain Duff, AERE Harwell.
* Jeremy Du Croz, Numerical Algorithms Group Ltd.
* Sven Hammarling, Numerical Algorithms Group Ltd.
*
* .. Scalar Arguments ..
DOUBLE PRECISION X, Y
* .. Executable Statements ..
DDIFF = X - Y
RETURN
*
* End of DDIFF.
*
END
|
State Before: ι : Type ?u.77884
α : Type u
β : Type v
γ : Type w
δ : Type x
l₁ l₂ : List α
inst✝ : DecidableEq α
a b : α
l : List α
x✝ : a = b
e : a = b := x✝
⊢ indexOf a (b :: l) = 0 State After: ι : Type ?u.77884
α : Type u
β : Type v
γ : Type w
δ : Type x
l₁ l₂ : List α
inst✝ : DecidableEq α
a b : α
l : List α
x✝ : a = b
e : a = b := x✝
⊢ indexOf b (b :: l) = 0 Tactic: rw [e] State Before: ι : Type ?u.77884
α : Type u
β : Type v
γ : Type w
δ : Type x
l₁ l₂ : List α
inst✝ : DecidableEq α
a b : α
l : List α
x✝ : a = b
e : a = b := x✝
⊢ indexOf b (b :: l) = 0 State After: no goals Tactic: exact indexOf_cons_self b l |
# SPACE incorporating information of potential hub nodes
espace <- function(X,hub_indx, alpha, lambda,maxit_in=1000,maxit_out=5,tol=1e-6)
{
n = nrow(X)
p = ncol(X)
nh = length(hub_indx)
rho = matrix(0,p,p)
rsd = matrix(0,n,p)
sigma = rep(0,p)
out <- .C('espace',n = as.integer(n), p = as.integer(p), nh = as.integer(nh),
X = as.double(X), hub_indx = as.integer(hub_indx), alpha = as.double(alpha),
lam = as.double(lambda), niter_in = as.integer(maxit_in), niter_out=as.integer(maxit_out),
tol = as.double(tol), rho = as.double(rho), residual = as.double(rsd), sigma=as.double(sigma),PACKAGE='espace')
out$rho <- matrix(out$rho,p,p)
output <- list(rho=out$rho, alpha=alpha, lambda=lambda, residual=matrix(out$residual,n,p), w_d=out$sigma)
return(output)
} |
# encoding: utf-8
"""
@author: liaoxingyu
@contact: [email protected]
"""
import numpy as np
import torch
from ignite.metrics import Metric
from data.datasets.eval_reid import eval_func
class R1_mAP(Metric):
def __init__(self, num_query, max_rank=50):
super(R1_mAP, self).__init__()
self.num_query = num_query
self.max_rank = max_rank
def reset(self):
self.feats = []
self.pids = []
self.camids = []
def update(self, output):
feat, pid, camid = output
self.feats.append(feat)
self.pids.extend(np.asarray(pid))
self.camids.extend(np.asarray(camid))
def compute(self):
feats = torch.cat(self.feats, dim=0)
# query
qf = feats[:self.num_query]
q_pids = np.asarray(self.pids[:self.num_query])
q_camids = np.asarray(self.camids[:self.num_query])
# gallery
gf = feats[self.num_query:]
g_pids = np.asarray(self.pids[self.num_query:])
g_camids = np.asarray(self.camids[self.num_query:])
m, n = qf.shape[0], gf.shape[0]
distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
distmat.addmm_(1, -2, qf, gf.t())
distmat = distmat.cpu().numpy()
cmc, mAP = eval_func(distmat, q_pids, g_pids, q_camids, g_camids)
return cmc, mAP
|
Mount Sterling Advocate, "Fact and Observations at Morehead" (1900). Morehead State College Histories. 27. |
Formal statement is: lemma mem_cball_leI: "x \<in> cball y e \<Longrightarrow> e \<le> f \<Longrightarrow> x \<in> cball y f" Informal statement is: If $x$ is in the closed ball of radius $e$ centered at $y$, and $e \leq f$, then $x$ is in the closed ball of radius $f$ centered at $y$. |
#include ":include:acap.h"
resource 'EuAC' (128,purgeable)
{{
3,0,0,"vendor.Eudora.PopAccount",
4,0,0,"vendor.Eudora.SMTPServer",
5,0,0,"vendor.Eudora.ReturnAddress",
6,0,0,"vendor.Eudora.CheckForMailEvery",
18,ACAPBoolean,0,"vendor.Eudora.LeaveMailOnServer",
23,0,0,"vendor.Eudora.PhServer",
26,ACAPBoolean,0,"vendor.Eudora.IncludeResource",
34,ACAPBoolean,0,"vendor.Eudora.OverlapPOP3",
69,0,0,"vendor.Eudora.FirstUnread",
74,0,0,"vendor.Eudora.DSDefault",
77,0,0,"vendor.Eudora.RealName",
87,ACAPBoolean,0,"vendor.Eudora.UsePOPSend",
91,ACAPRevBoolean,0,"vendor.Eudora.UseQuotedPrintable",
101,0,0,"vendor.Eudora.LeaveOnServerDays",
103,0,0,"vendor.Eudora.FingerServer",
106,ACAPBoolean,0,"vendor.Eudora.DNSLoadBalancing",
116,0,0,"vendor.Eudora.DomainQualifier",
190,ACAPBoolean,0,"vendor.Eudora.UseMX",
196,ACAPBoolean,0,"vendor.Eudora.AvoidSendMailDelays",
217,ACAPBoolean,0,"vendor.Eudora.UseAcapServer",
218,0,0,"vendor.Eudora.ACAPServer",
219,0,0,"vendor.Eudora.AcapUserID",
221,0,0,"vendor.Eudora.MailName",
222,0,0,"vendor.Eudora.MailServer",
312,0,0,"vendor.Eudora.HTTPProxyHost",
313,0,0,"vendor.Eudora.UseHTTPProxy",
6814,0,0,"vendor.Eudora.PhPort",
6820,0,0,"vendor.Eudora.POPPort",
7211,0,0,"vendor.Eudora.SMTPPort",
7616,ACAPWeKeepK,0,"vendor.Eudora.BigMessageThreshold",
8216,0,0,"vendor.Eudora.Passwordchangeport",
8403,0,0,"vendor.Eudora.FingerPort",
8613,0,0,"vendor.Eudora.PhReturn",
10108,0,0,"vendor.Eudora.LDAPPort",
10109,0,0,"vendor.Eudora.ACAPPort",
10110,0,0,"vendor.Eudora.POPAuthenticate",
10111,0,0,"vendor.Eudora.SendFormat",
11319,0,0,"vendor.Eudora.IMAPPort",
}};
resource 'EuAl' (128,purgeable)
{{
3,
4,
5,
6,
18,
23,
26,
34,
69,
74,
77,
87,
91,
101,
103,
106,
116,
190,
196,
217,
218,
219,
221,
222,
312,
313,
6814,
6820,
7211,
7616,
8216,
8403,
8613,
10108,
10109,
10110,
10111,
11319,
}};
|
// [===========================================================================]
// [ M o n e t a ]
// [---------------------------------------------------------------------------]
// [ ]
// [ Copyright (C) 2005-2015 ]
// [ Rodrigo Madera <[email protected]> ]
// [ ]
// [---------------------------------------------------------------------------]
// [ Distributed under the Boost Software License, Version 1.0 ]
// [ Read accompanying LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt ]
// [===========================================================================]
#pragma once
#include "detail/soci_includes.hxx"
#include "../../make_entity.hxx"
#include "../../traits/is_entity.hxx"
#include "../../traits/extract_pk.hxx"
#include "../../traits/rtuple.hxx"
#include "../../sql/generators/insert.hxx"
#include <boost/fusion/view/zip_view.hpp>
#include <boost/fusion/tuple.hpp>
namespace moneta { namespace serialization { namespace soci {
// TODO: Move this somewhere else.
template <class Entity>
class incremental_integer_generator {
::soci::session& _session;
public:
incremental_integer_generator(::soci::session& session)
: _session(session) {
}
typename traits::pk<Entity>::type operator()() const {
// Only single-field primary keys are supported.
//BOOST_MPL_ASSERT((boost::mpl::and_<
// boost::mpl::not_<
// traits::is_fusion_vector<typename traits::pk<Entity>::type>
// >,
// boost::mpl::equal_to<
// boost::mpl::size<typename traits::pk<Entity>::type>,
// boost::mpl::int_<1>::type
// >
//>));
const std::string field = fucker::traits::sql::get_pk_field_names<Entity>()[0];
const std::string table = fucker::traits::sql::table_name<fucker::traits::pure_type<Entity>::type>::get(); // FIXME: Make this nice.
//int id = 0;
boost::optional<int> id;
try {
_session << boost::format("select max(%s) from %s") % field % table,
::soci::into(id);
} catch (...) {
// FIXME: Make this better.
id = 0;
}
return typename fucker::traits::pk<Entity>::type(
id? ++id.get() : 1
);
}
};
class recursive_soci_serializer {
::soci::session& _session;
public:
recursive_soci_serializer(::soci::session& session)
: _session(session) {
}
template <typename T>
typename boost::enable_if<
traits::is_entity<
typename boost::mpl::at_c<T, 0>::type
>,
void
>::type
operator()(T& pair) const {
BOOST_MPL_ASSERT((
boost::mpl::equal_to<
boost::mpl::size<T>::type,
boost::mpl::int_<2>
>
));
boost::fusion::get<1>(pair) = soci_create<
typename traits::pure_type<typename boost::mpl::at_c<T, 0>::type>::type
>(_session, boost::fusion::get<0>(pair));
}
template <typename T>
typename boost::enable_if<
boost::mpl::not_<
traits::is_entity<
typename boost::mpl::at_c<T, 0>::type
>
>,
void
>::type
operator()(T& pair) const {
BOOST_MPL_ASSERT((
boost::mpl::equal_to<
boost::mpl::size<T>::type,
boost::mpl::int_<2>
>
));
boost::fusion::get<1>(pair) = boost::fusion::get<0>(pair);
}
};
template <class Entity>//, class SociIdGenerator = boost::function<int()> >
typename traits::pk<Entity>::type
soci_create(::soci::session& session, Entity& entity) {
//, SociIdGenerator id_generator = incremental_integer_generator<Entity>(session)) {
//// TODO: Allow specializations.
//SociIdGenerator id_generator = incremental_integer_generator<Entity>(session);
//if (traits::has_empty_pk(entity)) {
// traits::extract_pk(entity) = id_generator();
//}
traits::tie<Entity>::type entity_tuple = traits::to_tie<Entity>(entity);
traits::rtuple<Entity>::type rtuple;
typedef boost::fusion::vector<
traits::tie<Entity>::type&,
traits::rtuple<Entity>::type&
> zip_vector_type;
boost::fusion::zip_view<zip_vector_type> zip(zip_vector_type(entity_tuple, rtuple));
boost::fusion::for_each(zip, recursive_soci_serializer(session));
session << sql::generators::insert_into_table<Entity>(), ::soci::use(rtuple);
return traits::extract_pk(entity);
}
}}}
|
Formal statement is: lemma homotopic_into_retract: "\<lbrakk>f ` S \<subseteq> T; g ` S \<subseteq> T; T retract_of U; homotopic_with_canon (\<lambda>x. True) S U f g\<rbrakk> \<Longrightarrow> homotopic_with_canon (\<lambda>x. True) S T f g" Informal statement is: If $f$ and $g$ are homotopic maps from $S$ to $T$ and $T$ is a retract of $U$, then $f$ and $g$ are homotopic maps from $S$ to $U$. |
[GOAL]
t : Type u → Type u
inst✝⁵ : Traversable t
inst✝⁴ : LawfulTraversable t
F G : Type u → Type u
inst✝³ : Applicative F
inst✝² : LawfulApplicative F
inst✝¹ : Applicative G
inst✝ : LawfulApplicative G
α β γ : Type u
g : α → F β
h : β → G γ
f✝ : β → γ
α✝ β✝ : Type u
f : Id (α✝ → β✝)
x : Id α✝
⊢ pure (Seq.seq f fun x_1 => x) = Seq.seq (pure f) fun x_1 => pure x
[PROOFSTEP]
simp only [map_pure, seq_pure]
[GOAL]
t : Type u → Type u
inst✝⁵ : Traversable t
inst✝⁴ : LawfulTraversable t
F G : Type u → Type u
inst✝³ : Applicative F
inst✝² : LawfulApplicative F
inst✝¹ : Applicative G
inst✝ : LawfulApplicative G
α β γ : Type u
g : α → F β
h : β → G γ
f✝ : β → γ
α✝ β✝ : Type u
f : Id (α✝ → β✝)
x : Id α✝
⊢ pure (Seq.seq f fun x_1 => x) = pure (f x)
[PROOFSTEP]
rfl
[GOAL]
t : Type u → Type u
inst✝⁵ : Traversable t
inst✝⁴ : LawfulTraversable t
F G : Type u → Type u
inst✝³ : Applicative F
inst✝² : LawfulApplicative F
inst✝¹ : Applicative G
inst✝ : LawfulApplicative G
α β γ : Type u
g : α → F β
h : β → G γ
f : β → γ
x✝ : t β
x : t α
⊢ map f <$> traverse g x = traverse (map f ∘ g) x
[PROOFSTEP]
rw [map_eq_traverse_id f]
[GOAL]
t : Type u → Type u
inst✝⁵ : Traversable t
inst✝⁴ : LawfulTraversable t
F G : Type u → Type u
inst✝³ : Applicative F
inst✝² : LawfulApplicative F
inst✝¹ : Applicative G
inst✝ : LawfulApplicative G
α β γ : Type u
g : α → F β
h : β → G γ
f : β → γ
x✝ : t β
x : t α
⊢ traverse (pure ∘ f) <$> traverse g x = traverse (map f ∘ g) x
[PROOFSTEP]
refine' (comp_traverse (pure ∘ f) g x).symm.trans _
[GOAL]
t : Type u → Type u
inst✝⁵ : Traversable t
inst✝⁴ : LawfulTraversable t
F G : Type u → Type u
inst✝³ : Applicative F
inst✝² : LawfulApplicative F
inst✝¹ : Applicative G
inst✝ : LawfulApplicative G
α β γ : Type u
g : α → F β
h : β → G γ
f : β → γ
x✝ : t β
x : t α
⊢ traverse (Comp.mk ∘ map (pure ∘ f) ∘ g) x = traverse (map f ∘ g) x
[PROOFSTEP]
congr
[GOAL]
case h.e_4.h
t : Type u → Type u
inst✝⁵ : Traversable t
inst✝⁴ : LawfulTraversable t
F G : Type u → Type u
inst✝³ : Applicative F
inst✝² : LawfulApplicative F
inst✝¹ : Applicative G
inst✝ : LawfulApplicative G
α β γ : Type u
g : α → F β
h : β → G γ
f : β → γ
x✝ : t β
x : t α
⊢ Comp.instApplicativeComp = inst✝³
[PROOFSTEP]
apply Comp.applicative_comp_id
[GOAL]
t : Type u → Type u
inst✝⁵ : Traversable t
inst✝⁴ : LawfulTraversable t
F G : Type u → Type u
inst✝³ : Applicative F
inst✝² : LawfulApplicative F
inst✝¹ : Applicative G
inst✝ : LawfulApplicative G
α β γ : Type u
g✝ : α → F β
h : β → G γ
f✝ : β → γ
x✝ : t β
f : β → F γ
g : α → β
x : t α
⊢ traverse f (g <$> x) = traverse (f ∘ g) x
[PROOFSTEP]
rw [@map_eq_traverse_id t _ _ _ _ g]
[GOAL]
t : Type u → Type u
inst✝⁵ : Traversable t
inst✝⁴ : LawfulTraversable t
F G : Type u → Type u
inst✝³ : Applicative F
inst✝² : LawfulApplicative F
inst✝¹ : Applicative G
inst✝ : LawfulApplicative G
α β γ : Type u
g✝ : α → F β
h : β → G γ
f✝ : β → γ
x✝ : t β
f : β → F γ
g : α → β
x : t α
⊢ traverse f (traverse (pure ∘ g) x) = traverse (f ∘ g) x
[PROOFSTEP]
refine' (comp_traverse (G := Id) f (pure ∘ g) x).symm.trans _
[GOAL]
t : Type u → Type u
inst✝⁵ : Traversable t
inst✝⁴ : LawfulTraversable t
F G : Type u → Type u
inst✝³ : Applicative F
inst✝² : LawfulApplicative F
inst✝¹ : Applicative G
inst✝ : LawfulApplicative G
α β γ : Type u
g✝ : α → F β
h : β → G γ
f✝ : β → γ
x✝ : t β
f : β → F γ
g : α → β
x : t α
⊢ traverse (Comp.mk ∘ map f ∘ pure ∘ g) x = traverse (f ∘ g) x
[PROOFSTEP]
congr
[GOAL]
case h.e_4.h
t : Type u → Type u
inst✝⁵ : Traversable t
inst✝⁴ : LawfulTraversable t
F G : Type u → Type u
inst✝³ : Applicative F
inst✝² : LawfulApplicative F
inst✝¹ : Applicative G
inst✝ : LawfulApplicative G
α β γ : Type u
g✝ : α → F β
h : β → G γ
f✝ : β → γ
x✝ : t β
f : β → F γ
g : α → β
x : t α
⊢ Comp.instApplicativeComp = inst✝³
[PROOFSTEP]
apply Comp.applicative_id_comp
[GOAL]
t : Type u → Type u
inst✝⁵ : Traversable t
inst✝⁴ : LawfulTraversable t
F G : Type u → Type u
inst✝³ : Applicative F
inst✝² : LawfulApplicative F
inst✝¹ : Applicative G
inst✝ : LawfulApplicative G
α β γ : Type u
g : α → F β
h : β → G γ
f : β → γ
x✝ : t β
x : t α
⊢ traverse pure x = pure x
[PROOFSTEP]
have : traverse pure x = pure (traverse (m := Id) pure x) := (naturality (PureTransformation F) pure x).symm
[GOAL]
t : Type u → Type u
inst✝⁵ : Traversable t
inst✝⁴ : LawfulTraversable t
F G : Type u → Type u
inst✝³ : Applicative F
inst✝² : LawfulApplicative F
inst✝¹ : Applicative G
inst✝ : LawfulApplicative G
α β γ : Type u
g : α → F β
h : β → G γ
f : β → γ
x✝ : t β
x : t α
this : traverse pure x = pure (traverse pure x)
⊢ traverse pure x = pure x
[PROOFSTEP]
rwa [id_traverse] at this
[GOAL]
t : Type u → Type u
inst✝⁵ : Traversable t
inst✝⁴ : LawfulTraversable t
F G : Type u → Type u
inst✝³ : Applicative F
inst✝² : LawfulApplicative F
inst✝¹ : Applicative G
inst✝ : LawfulApplicative G
α β γ : Type u
g : α → F β
h : β → G γ
f : β → γ
x✝ : t β
x : t α
⊢ sequence (pure <$> x) = pure x
[PROOFSTEP]
simp [sequence, traverse_map, id_traverse]
[GOAL]
t : Type u → Type u
inst✝⁵ : Traversable t
inst✝⁴ : LawfulTraversable t
F G : Type u → Type u
inst✝³ : Applicative F
inst✝² : LawfulApplicative F
inst✝¹ : Applicative G
inst✝ : LawfulApplicative G
α β γ : Type u
g : α → F β
h : β → G γ
f : β → γ
x✝ : t β
x : t (F (G α))
⊢ sequence (Comp.mk <$> x) = Comp.mk (sequence <$> sequence x)
[PROOFSTEP]
simp [sequence, traverse_map]
[GOAL]
t : Type u → Type u
inst✝⁵ : Traversable t
inst✝⁴ : LawfulTraversable t
F G : Type u → Type u
inst✝³ : Applicative F
inst✝² : LawfulApplicative F
inst✝¹ : Applicative G
inst✝ : LawfulApplicative G
α β γ : Type u
g : α → F β
h : β → G γ
f : β → γ
x✝ : t β
x : t (F (G α))
⊢ traverse Comp.mk x = Comp.mk (traverse id <$> traverse id x)
[PROOFSTEP]
rw [← comp_traverse]
[GOAL]
t : Type u → Type u
inst✝⁵ : Traversable t
inst✝⁴ : LawfulTraversable t
F G : Type u → Type u
inst✝³ : Applicative F
inst✝² : LawfulApplicative F
inst✝¹ : Applicative G
inst✝ : LawfulApplicative G
α β γ : Type u
g : α → F β
h : β → G γ
f : β → γ
x✝ : t β
x : t (F (G α))
⊢ traverse Comp.mk x = traverse (Comp.mk ∘ map id ∘ id) x
[PROOFSTEP]
simp [map_id]
[GOAL]
t : Type u → Type u
inst✝⁵ : Traversable t
inst✝⁴ : LawfulTraversable t
F G : Type u → Type u
inst✝³ : Applicative F
inst✝² : LawfulApplicative F
inst✝¹ : Applicative G
inst✝ : LawfulApplicative G
α β γ : Type u
g : α → F β
h : β → G γ
f : β → γ
x✝ : t β
η : ApplicativeTransformation F G
x : t (F α)
⊢ (fun {α} => ApplicativeTransformation.app η α) (sequence x) =
sequence ((fun {α} => ApplicativeTransformation.app η α) <$> x)
[PROOFSTEP]
simp [sequence, naturality, traverse_map]
[GOAL]
t : Type u → Type u
inst✝⁵ : Traversable t
inst✝⁴ : LawfulTraversable t
F G : Type u → Type u
inst✝³ : Applicative F
inst✝² : LawfulApplicative F
inst✝¹ : Applicative G
inst✝ : LawfulApplicative G
α β γ : Type u
g : α → F β
h : β → G γ
f : β → γ
x : t β
⊢ traverse pure = pure
[PROOFSTEP]
ext
[GOAL]
case h
t : Type u → Type u
inst✝⁵ : Traversable t
inst✝⁴ : LawfulTraversable t
F G : Type u → Type u
inst✝³ : Applicative F
inst✝² : LawfulApplicative F
inst✝¹ : Applicative G
inst✝ : LawfulApplicative G
α β γ : Type u
g : α → F β
h : β → G γ
f : β → γ
x : t β
x✝ : t α
⊢ traverse pure x✝ = pure x✝
[PROOFSTEP]
exact id_traverse _
[GOAL]
t : Type u → Type u
inst✝⁵ : Traversable t
inst✝⁴ : LawfulTraversable t
F G : Type u → Type u
inst✝³ : Applicative F
inst✝² : LawfulApplicative F
inst✝¹ : Applicative G
inst✝ : LawfulApplicative G
α β γ : Type u
g✝ : α → F β
h✝ : β → G γ
f : β → γ
x : t β
g : α → F β
h : β → G γ
⊢ traverse (Comp.mk ∘ map h ∘ g) = Comp.mk ∘ map (traverse h) ∘ traverse g
[PROOFSTEP]
ext
[GOAL]
case h
t : Type u → Type u
inst✝⁵ : Traversable t
inst✝⁴ : LawfulTraversable t
F G : Type u → Type u
inst✝³ : Applicative F
inst✝² : LawfulApplicative F
inst✝¹ : Applicative G
inst✝ : LawfulApplicative G
α β γ : Type u
g✝ : α → F β
h✝ : β → G γ
f : β → γ
x : t β
g : α → F β
h : β → G γ
x✝ : t α
⊢ traverse (Comp.mk ∘ map h ∘ g) x✝ = (Comp.mk ∘ map (traverse h) ∘ traverse g) x✝
[PROOFSTEP]
exact comp_traverse _ _ _
[GOAL]
t : Type u → Type u
inst✝⁵ : Traversable t
inst✝⁴ : LawfulTraversable t
F G : Type u → Type u
inst✝³ : Applicative F
inst✝² : LawfulApplicative F
inst✝¹ : Applicative G
inst✝ : LawfulApplicative G
α β γ : Type u
g : α → F β
h : β → G γ
f✝ : β → γ
x : t β
f : β → γ
⊢ traverse (pure ∘ f) = pure ∘ map f
[PROOFSTEP]
ext
[GOAL]
case h
t : Type u → Type u
inst✝⁵ : Traversable t
inst✝⁴ : LawfulTraversable t
F G : Type u → Type u
inst✝³ : Applicative F
inst✝² : LawfulApplicative F
inst✝¹ : Applicative G
inst✝ : LawfulApplicative G
α β γ : Type u
g : α → F β
h : β → G γ
f✝ : β → γ
x : t β
f : β → γ
x✝ : t β
⊢ traverse (pure ∘ f) x✝ = (pure ∘ map f) x✝
[PROOFSTEP]
exact traverse_eq_map_id _ _
[GOAL]
t : Type u → Type u
inst✝⁵ : Traversable t
inst✝⁴ : LawfulTraversable t
F G : Type u → Type u
inst✝³ : Applicative F
inst✝² : LawfulApplicative F
inst✝¹ : Applicative G
inst✝ : LawfulApplicative G
α β γ : Type u
g✝ : α → F β
h✝ : β → G γ
f : β → γ
x : t β
g : α → β
h : β → G γ
⊢ traverse (h ∘ g) = traverse h ∘ map g
[PROOFSTEP]
ext
[GOAL]
case h
t : Type u → Type u
inst✝⁵ : Traversable t
inst✝⁴ : LawfulTraversable t
F G : Type u → Type u
inst✝³ : Applicative F
inst✝² : LawfulApplicative F
inst✝¹ : Applicative G
inst✝ : LawfulApplicative G
α β γ : Type u
g✝ : α → F β
h✝ : β → G γ
f : β → γ
x : t β
g : α → β
h : β → G γ
x✝ : t α
⊢ traverse (h ∘ g) x✝ = (traverse h ∘ map g) x✝
[PROOFSTEP]
rw [comp_apply, traverse_map]
[GOAL]
t : Type u → Type u
inst✝⁵ : Traversable t
inst✝⁴ : LawfulTraversable t
F G : Type u → Type u
inst✝³ : Applicative F
inst✝² : LawfulApplicative F
inst✝¹ : Applicative G
inst✝ : LawfulApplicative G
α β γ : Type u
g✝ : α → F β
h✝ : β → G γ
f : β → γ
x : t β
g : α → G β
h : β → γ
⊢ traverse (map h ∘ g) = map (map h) ∘ traverse g
[PROOFSTEP]
ext
[GOAL]
case h
t : Type u → Type u
inst✝⁵ : Traversable t
inst✝⁴ : LawfulTraversable t
F G : Type u → Type u
inst✝³ : Applicative F
inst✝² : LawfulApplicative F
inst✝¹ : Applicative G
inst✝ : LawfulApplicative G
α β γ : Type u
g✝ : α → F β
h✝ : β → G γ
f : β → γ
x : t β
g : α → G β
h : β → γ
x✝ : t α
⊢ traverse (map h ∘ g) x✝ = (map (map h) ∘ traverse g) x✝
[PROOFSTEP]
rw [comp_apply, map_traverse]
[GOAL]
t : Type u → Type u
inst✝⁵ : Traversable t
inst✝⁴ : LawfulTraversable t
F G : Type u → Type u
inst✝³ : Applicative F
inst✝² : LawfulApplicative F
inst✝¹ : Applicative G
inst✝ : LawfulApplicative G
α β γ : Type u
g : α → F β
h : β → G γ
f✝ : β → γ
x : t β
η : ApplicativeTransformation F G
f : α → F β
⊢ traverse ((fun {α} => ApplicativeTransformation.app η α) ∘ f) =
(fun {α} => ApplicativeTransformation.app η α) ∘ traverse f
[PROOFSTEP]
ext
[GOAL]
case h
t : Type u → Type u
inst✝⁵ : Traversable t
inst✝⁴ : LawfulTraversable t
F G : Type u → Type u
inst✝³ : Applicative F
inst✝² : LawfulApplicative F
inst✝¹ : Applicative G
inst✝ : LawfulApplicative G
α β γ : Type u
g : α → F β
h : β → G γ
f✝ : β → γ
x : t β
η : ApplicativeTransformation F G
f : α → F β
x✝ : t α
⊢ traverse ((fun {α} => ApplicativeTransformation.app η α) ∘ f) x✝ =
((fun {α} => ApplicativeTransformation.app η α) ∘ traverse f) x✝
[PROOFSTEP]
rw [comp_apply, naturality]
|
The arrival of a new owner in Lerner and of manager Martin O 'Neill marked the start of a new period of optimism at Villa Park and sweeping changes occurred throughout the club including a new badge , a new kit sponsor and team changes in the summer of 2007 . The first Cup final of the Lerner era came in 2010 when Villa were beaten 2 – 1 in the League Cup Final . Villa made a second trip to Wembley in that season losing 3 – 0 to Chelsea in the FA Cup semifinal . Just five days before the opening day of the 2010 – 11 season , O 'Neill resigned as manager with immediate effect . The club appointed Gérard Houllier as a replacement in September 2010 , but he stepped down on 1 June 2011 due to ill @-@ health . Houllier was replaced by Birmingham City manager Alex McLeish , despite numerous protests from fans against his appointment ; this was the first time that a manager had moved directly from Birmingham to Villa . McLeish 's contract was terminated at the end of the 2011 – 12 season after Villa finished in 16th place , only just above the relegation zone . On 2 July 2012 , Aston Villa confirmed the appointment of former Norwich City manager Paul Lambert as the replacement for McLeish . On 28 February 2012 , the club announced a financial loss of £ 53 @.@ 9 million . Lerner put the club up for sale on 12 May 2014 , with an estimated value of £ 200 million . With Lerner still on board , in the 2014 – 15 season Aston Villa scored just 12 goals in 25 league games , the lowest in Premier League history , and Lambert was sacked on 11 February 2015 . Tim Sherwood succeeded him , and saved Aston Villa from relegation while also leading them to the 2015 FA Cup Final , but he was sacked in the 2015 – 16 season , as was his successor Rémi Garde . Eric Black took temporary charge of the team , but was not able to prevent Villa from being relegated for the first time since 1987 . On 2 June 2016 , Roberto Di Matteo was announced as the club 's new manager .
|
Require Import Bool Arith List Cpdt.CpdtTactics.
Set Implicit Arguments.
Set Asymmetric Patterns.
Definition var := nat.
Inductive prop : Set :=
| Var : var -> prop
| Neg : prop -> prop
| Conj : prop -> prop -> prop
| Disj : prop -> prop -> prop.
Fixpoint propDenote (truth : var -> bool) (p : prop) : Prop :=
match p with
| Var v => if truth v then True else False
| Neg p' => ~ propDenote truth p'
| Conj p1 p2 => propDenote truth p1 /\ propDenote truth p2
| Disj p1 p2 => propDenote truth p1 \/ propDenote truth p2
end.
Notation "'Yes'" := (left _ _).
Notation "'No'" := (right _ _).
Definition bool_true_dec : forall b, {b = true} + {b = true -> False}.
refine (fun b =>
match b with
| true => Yes
| false => No
end). reflexivity. discriminate.
Defined.
Definition decide : forall (truth : var -> bool) (p : prop),
{propDenote truth p} + {~ propDenote truth p}.
intros. induction p; crush. destruct (truth v); crush.
Defined.
Notation "[ e ]" := (exist _ e _).
Notation "x <- e1 ; e2" :=
(match e1 with exist x _ => e2 end)
(right associativity, at level 60).
Definition negate : forall p : prop,
{p' : prop | forall truth, propDenote truth p <-> ~ propDenote truth p'}.
refine (fix F (p : prop) : {p' : prop | forall truth, propDenote truth p <-> ~ propDenote truth p'} :=
match p with
| Var v => [Neg (Var v)]
| Neg p => [p]
| Conj p1 p2 =>
p1' <- F p1;
p2' <- F p2;
[Disj p1' p2']
| Disj p1 p2 =>
p1' <- F p1;
p2' <- F p2;
[Conj p1' p2']
end); crush;
repeat (match goal with
| [i : forall truth : var -> bool, _ <-> _ |- _] =>
destruct (i truth); clear i
| [|- context[if ?E then _ else _]] =>
destruct E
end); crush.
destruct (decide truth p1'); crush.
Defined.
|
(* (c) Copyright Microsoft Corporation and Inria. All rights reserved. *)
Require Import ssreflect.
Require Import ssrbool.
Require Import funs.
Require Import dataset.
Require Import ssrnat.
Require Import seq.
Require Import paths.
Require Import znat.
Require Import grid.
Set Implicit Arguments.
Unset Strict Implicit.
Import Prenex Implicits.
(* Mattes are finite sets of grid squares that are delimited by a simple grid *)
(* ring; we explicitly include the enumeration of the region and the ring. *)
(* Mattes will be used to define conservative approximations of aribitrary *)
(* connected open sets of points (regions). We therefore need to provide *)
(* operations for extending a matte in order to improve the approximation. *)
(* This involves three different operations : *)
(* - adding pixels within a specified rectangle that the matte meets, so *)
(* that a specific pixel is covered. *)
(* - adding the pixels surrounding a grid point of the matte boundary. *)
(* - refining the grid to ensure that the two previous operations are safe. *)
(* Note that we can't add large rectangle blindly to a matte if we want to *)
(* preserve its geometrical properties (we might end up with a disconnected *)
(* contour). We reduce the first two operations above to two primitives, which *)
(* add a pixel that has exactly one or two consecutive sides in common with *)
(* the matte, respectively (more precisely, 2 or 3 consecutive vertices in *)
(* common with the matte contour vertices). We don't actually provide the *)
(* second operation here, because it requires multiple grid refinement. *)
(* Instead we provide a basic have that needs to be iterated to accomplish the *)
(* operation, along with the metric ("matte_order") that decreases with that *)
(* step. *)
Definition mrlink p1 p2 := end1g p1 =d end0g p2.
Record matte : Set := Matte {
mdisk :> gpointseq;
mring : gpointseq;
matte_ne : 0 < size mdisk;
cycle_mring : cycle mrlink mring;
simple_mring : uniq (maps end0g mring);
mring_def : mring =1 (fun x => mdisk (halfg x) && negb (mdisk (halfg (gedge x))))
}.
Lemma mring_edge : forall m p, mring m p -> negb (mring m (gedge p)).
Proof.
by case=> /= [c d _ _ _ Dc] p; rewrite !Dc; move/andP=> [_ H]; rewrite (negbE H).
Qed.
(* Initial single_square matte. *)
Section PointMatte.
Variable p : gpoint.
Let pmdisk : gpointseq := Adds p seq0.
Let pmring : gpointseq := maps (fun d => consg d p) (Seq Gb00 Gb10 Gb11 Gb01).
Lemma pmatte_ne : 0 < size pmdisk. Proof. done. Qed.
Lemma cycle_pmring : cycle mrlink pmring.
Proof. by rewrite /= /mrlink /end0g /end1g !halfg_cons !oddg_cons /= !set11. Qed.
Lemma simple_pmring : uniq (maps end0g pmring).
Proof.
by rewrite /= /setU1 /end0g !halfg_cons !oddg_cons !(monic_eqd (addg_inv p)).
Qed.
Lemma pmring_def :
pmring =1 (fun x => pmdisk (halfg x) && negb (pmdisk (halfg (gedge x)))).
Proof.
move=> x; rewrite -{1}[x]consg_odd_half halfg_edge.
case Hx: (pmdisk (halfg x)).
rewrite /= /setU1 orbF in Hx; rewrite -(eqP Hx) /= /setU1 !orbF.
rewrite -{9}[p]addg0 (monic_eqd (addg_inv p)).
by case (oddg x); rewrite set11 ?orbT.
apply/mapsP => [[d _ Dx]]; move: (congr1 halfg Dx); rewrite !halfg_cons.
by move=> Dp; rewrite /pmdisk Dp /= setU11 in Hx.
Qed.
Definition point_matte := Matte pmatte_ne cycle_pmring simple_pmring pmring_def.
End PointMatte.
(* Grid refinement for mattes. *)
Section RefineMatte.
Fixpoint refine_mring (c : gpointseq) : gpointseq :=
if c is Adds p c' then
Seq (consg (oddg p) p) (consg (oddg p) (gface p)) & (refine_mring c')
else seq0.
Fixpoint refine_mdisk (md : gpointseq) : gpointseq :=
if md is Adds p md' then
Seq (consg Gb00 p) (consg Gb10 p) (consg Gb11 p) (consg Gb01 p)
& (refine_mdisk md')
else seq0.
Lemma mem_refine_mdisk : forall md p, refine_mdisk md p = md (halfg p).
Proof.
move=> md p; elim: md => //= [q md Hrec]; rewrite /setU1 Hrec !orbA; congr orb.
rewrite -!orbA; apply/idP/eqP; first by case/or4P=> H; rewrite -(eqP H) halfg_cons.
by rewrite -{-1}[p]consg_odd_half=> <-; case (oddg p); rewrite set11 /= ?orbT.
Qed.
Variable m : matte.
Lemma refine_matte_ne : 0 < size (refine_mdisk m).
Proof. by case: (mdisk m) (matte_ne m). Qed.
Lemma cycle_refine_mring : cycle mrlink (refine_mring (mring m)).
Proof.
case: (mring m) (cycle_mring m) => // [p0 c]; rewrite !(cycle_path p0).
rewrite {1}/last -/(last p0); set q := last p0 c.
have <-: consg (oddg q) (gface q) = last p0 (refine_mring (Adds p0 c)).
by rewrite {}/q /=; elim: c p0 => /=.
elim: {p0 c}(Adds p0 c) q => //= [p c Hrec] q; move/andP=> [Hqp Hc]; move: Hqp.
rewrite {}Hrec {c Hc}// andbT /mrlink /end0g /end1g !halfg_cons !oddg_cons.
move/eqP=> Dp; rewrite /gface -{1 3}[p]consg_odd_half /consg.
rewrite addgA -addgA -(addgC (halfg q)) {}Dp addgA (addgC (halfg p)) !addgA set11.
by rewrite addgC -!addgA addgCA addgC -!addgA set11.
Qed.
Lemma mem_refine_mring : forall (c : gpointseq) p,
reflect (exists2 q, c q & p = consg (oddg q) q \/ p = consg (oddg q) (gface q))
(refine_mring c p).
Proof.
move=> c p; elim: c => [|q c Hrec] /=; first by right; case.
apply: (iffP setU1P).
case; first by exists q; [ exact: setU11 | left ].
case/setU1P; first by exists q; [ exact: setU11 | right ].
by case/Hrec=> [q' Hq' Dp]; exists q'; first exact: setU1r.
case=> q'; case/setU1P=> [<-|Hq'].
by case=> <-; [ left | right; exact: setU11 ].
by right; apply setU1r; apply/Hrec; exists q'.
Qed.
Lemma simple_refine_mring : uniq (maps end0g (refine_mring (mring m))).
Proof.
elim: (mring m) (simple_mring m) (@mring_edge m) => //= [p c Hrec].
move/andP=> [Hp Uc] HpcE.
have HcE: forall q, c q -> negb (c (gedge q)).
by move=> q Hq; case/norP: (HpcE _ (setU1r _ Hq)).
rewrite {}Hrec {Hc HcE}// andbT; apply/andP; split.
apply/setU1P => [[Dp|Hp']].
rewrite /end0g !halfg_cons !oddg_cons -!(addgC (oddg p)) in Dp.
move: (congr1 oddg (monic_inj (addg_inv _) Dp)).
by rewrite oddg_face; case (oddg p).
case/mapsP: Hp' => [q Hq Dq].
case/mem_refine_mring: Hq => [q' Hq' [Dq'|Dq']].
case/mapsP: Hp; exists q'; auto; rewrite -halfg_add_odd addgC.
move: Dq; rewrite -!halfg_add_odd Dq' !oddg_cons addgC /consg addgA.
rewrite (addgC (oddg p)) -(addgA (p +g p)) !halfg_add_double !halfg_double.
by move <-.
move: Dq; rewrite -!halfg_add_odd Dq' !oddg_cons addgC /consg addgA.
rewrite (addgC (oddg p)) -(addgA (p +g p)) !halfg_add_double !halfg_double.
move/(congr1 oddg); rewrite /gface /consg addgA oddg_add_double.
rewrite (addgC p) -{2}[p]consg_odd_half /consg addgA oddg_add_double oddg_double.
by case (oddg q').
apply/mapsP => [[q Hq]]; move/esym; rewrite -!halfg_add_odd oddg_cons.
rewrite addgC /consg addgA halfg_add_double halfg_double (addgC q).
case/mem_refine_mring: Hq => [q' Hq' [Dq|Dq]];
rewrite {}Dq oddg_cons /consg addgA halfg_add_double halfg_double.
- move/(congr1 oddg); rewrite /gface /consg addgA oddg_add_double.
rewrite -{2}[q']consg_odd_half /consg addgA oddg_add_double oddg_double.
by case (oddg p).
case/norP: {HpcE}(HpcE _ (setU1r _ Hq')) => [Epq' _].
rewrite /gface /consg addgCA -/(consg (oddg p) (halfg p)) consg_odd_half.
rewrite addgCA consgI consg_odd_half=> Dq'.
rewrite /gedge addgA (addgC q') -Dq' -addgA addgCA -{1}[p]addg0 in Epq'.
rewrite (monic_eqd (addg_inv p)) in Epq'.
have Dq1: oddg q' = oddg p.
move: (congr1 oddg Dq'); rewrite oddg_add {1}[oddg]lock oddg_add -lock.
by case: (oddg p) Epq'; case: (oddg q').
rewrite Dq1 in Dq'; case/mapsP: Hp; exists q'; auto.
by rewrite (monic_inj (addg_inv _) Dq').
Qed.
Lemma refine_mring_def : let md' := refine_mdisk (mdisk m) in
refine_mring (mring m)
=1 (fun p => md' (halfg p) && negb (md' (halfg (gedge p)))).
Proof.
move: (mdisk m) (mring_def m) (mem_refine_mdisk (mdisk m)) => md Dmd Dmd' /= p.
rewrite /= !{}Dmd' halfg_edge.
apply/(mem_refine_mring _ _)/andP => [[q Hq Dp]|[Hp Hep]].
rewrite Dmd halfg_edge addgC in Hq; case/andP: Hq => [Hq HqE].
by case: Dp => ->; rewrite !halfg_cons oddg_cons ?halfg_face; split;
rewrite // addgC halfg_add ?halfg_face ?oddg_face; case: (oddg q) HqE.
set q := halfg p; set d := oddg p; rewrite addgC halfg_add addgC -/q -/d in Hep.
have Hqd: set2 d (ccw d) (oddg q).
move: Hp Hep; rewrite -/q -{1}[halfg q]addg0.
by case: d; case: (oddg q) => //= H; case/idP.
exists (consg (oddg p) (halfg (halfg p))).
rewrite Dmd halfg_edge halfg_cons oddg_cons Hp -/q -/d /=.
by case: (oddg q) Hqd Hep; case d.
rewrite oddg_cons /gface oddg_cons halfg_cons -/d -/q.
case/orP: Hqd; move/eqP=> Dq; rewrite {2}[d]Dq /q /d !consg_odd_half; auto.
Qed.
Definition refine_matte :=
Matte refine_matte_ne cycle_refine_mring simple_refine_mring refine_mring_def.
Lemma mem_refine_matte : forall p, refine_matte p = m (halfg p).
Proof. exact: mem_refine_mdisk. Qed.
Lemma refine_matte_refined : forall r, refined_in (refine_matte : set _) r.
Proof. by move=> r p q _ Dq; rewrite !mem_refine_matte Dq. Qed.
End RefineMatte.
Section ExtendMatte.
Variable m : matte.
Definition ext_mdisk p : gpointseq := Adds (halfg p) m.
Lemma ext_mdisk_ne : forall p, 0 < size (ext_mdisk p).
Proof. done. Qed.
Definition ehex p := gchop_rect (gtouch (halfg p)) p.
Definition equad p := gchop_rect (ehex p) (gface p).
Lemma ehexF : forall p, sub_set (equad p) (ehex (gface p)).
Proof.
move=> p q; rewrite /equad /ehex halfg_face !mem_gchop_rect /setI.
by rewrite mem_gchop_rect /setI -andbA andbCA; case/andP.
Qed.
Lemma end0g_equad : forall p, negb (has (equad (gface p)) m) ->
m (end0g p) = false.
Proof.
move=> p Hp; apply/idP => [Hmp]; case/idP: {Hp Hmp}(hasPn Hp _ Hmp).
rewrite /equad /ehex /gchop_rect !halfg_face !oddg_face /= /end0g.
case: (halfg p) => [x y] /=; rewrite !leq_decz_z !leq_z_incz.
case (oddg p); rewrite /= ?addz0 -?incz_def -?decz_def;
do 2 rewrite ?leqzz ?leq_decz_z ?leq_z_incz //.
Qed.
Lemma mring_equad : forall p, negb (has (equad (gface p)) m) ->
maps end0g (mring m) (end0g p) = false.
Proof.
move=> p Hp; apply/mapsP => [[q Hq Eqp]].
rewrite mring_def in Hq; move/andP: Hq => [Hq _]; case/idP: (hasPn Hp _ Hq).
rewrite /equad /ehex /gchop_rect !halfg_face !oddg_face /=.
rewrite /end0g addgC in Eqp; rewrite (monic_move (addg_inv _) Eqp) addgC -addgA.
case: (halfg p) => [x y] /=; rewrite !leq_decz_z !leq_z_incz.
case (oddg p); case (oddg q); rewrite /= ?addz0 -?incz_def -?decz_def;
do 2 rewrite ?leqzz ?leq_decz_z ?leq_z_incz //.
Qed.
Section Extend1.
Variable p : gpoint.
Definition ext1_hex := m (halfg (gedge p)) && negb (has (ehex p) m).
Hypothesis HpE : ext1_hex.
Remark Extend1_HpF : mdisk m (halfg p) = false.
Proof.
apply/idP => [Hdp]; case/andP: HpE => _; case/hasP; exists (halfg p); auto.
by rewrite /ehex mem_gchop_rect /setI gtouch_refl gchop_halfg.
Qed.
Let HpF := Extend1_HpF.
Remark Extend1_Hp : mring m (gedge p).
Proof. by rewrite mring_def gedge2 HpF andbT; case/andP: HpE. Qed.
Let Hp := Extend1_Hp.
Remark Extend1_Hp1 : negb (has (equad (iter 3 gface p)) m).
Proof.
apply/hasP => [[q Hmq Hq]]; case/andP: HpE => _; case/hasP; exists q; first done.
by rewrite -[p]gface4; apply ehexF.
Qed.
Let Hp1 := Extend1_Hp1.
Remark Extend1_Hp2 : negb (has (equad (iter 4 gface p)) m).
Proof.
apply/hasP => [[q Hmq Hq]]; case/andP: HpE => _; case/hasP; exists q; first done.
by rewrite /iter gface4 /equad mem_gchop_rect in Hq; case/andP: Hq.
Qed.
Let Hp2 := Extend1_Hp2.
Definition ext1_mring : gpointseq :=
let: RotToSpec _ c _ := rot_to Hp in cat (traject gface (gface p) 3) c.
Lemma cycle_ext1_mring : cycle mrlink ext1_mring.
Proof.
rewrite /ext1_mring; case: (rot_to Hp) (cycle_mring m) => [i c Dc].
rewrite -(cycle_rot i) {i}Dc !(cycle_path p) /=.
rewrite {1 3 4 5}/mrlink !end0g_face !set11 /= end0g_edge.
case: c => [|q c] /=; first by rewrite end1g_edge -{1}[p]gface4 end0g_face.
by rewrite {1 3}/mrlink end1g_edge -{2}[p]gface4 end0g_face.
Qed.
Lemma simple_ext1_mring : uniq (maps end0g ext1_mring).
Proof.
rewrite /ext1_mring; move: (mring_equad Hp2) (simple_mring m).
case: (rot_to Hp) => [i c Dc]; rewrite -(uniq_rot i) -(mem_rot i).
move: (mring_equad Hp1); rewrite -(mem_rot i) -maps_rot.
rewrite {i}Dc [uniq]lock /= !end0g_edge -!end0g_face -lock.
move: {c}(maps end0g c) (gface p) => c q /=; rewrite /setU1.
move/norP=> [Uqq1 Ucq1]; move/norP=> [Uqq2 Ucq2]; move/andP=> [Ucq Uc].
rewrite {}Uc (negbE Ucq) (negbE Ucq1) (negbE Ucq2) eqd_sym (negbE Uqq1).
rewrite eqd_sym (negbE Uqq2) /= orbF andbT end0g_face; apply/eqP.
by move/(monic_inj (addg_inv _)); case (oddg (gface q)).
Qed.
Remark Extend1_HpEF :
all (fun q => negb (m (halfg (gedge q)))) (traject gface (gface p) 3).
Proof.
apply/allP => [q Hq]; apply/idP => [Hmq]; case/andP: HpE => _; case/hasP.
exists (halfg (gedge q)); first done; case/trajectP: Hq => [i Hi <-] {q Hmq}.
rewrite halfg_edge iter_f halfg_iter_face oddg_iter_face /ehex /gchop_rect.
case: (halfg p) => [x y]; rewrite /= ?leq_decz_z ?leq_z_incz.
case (oddg p); case: i Hi => [|[|[|i]]] //= _; rewrite ?addz0 -?incz_def;
by rewrite -?decz_def ?leqzz ?leq_decz_z ?leq_z_incz ?leq_decz_incz.
Qed.
Let HpEF := Extend1_HpEF.
Lemma ext1_mring_def :
ext1_mring
=1 (fun q => ext_mdisk p (halfg q) && negb (ext_mdisk p (halfg (gedge q)))).
Proof.
move=> q; rewrite /ext1_mring; case: (rot_to Hp) => [i c Dc] /=.
case/and4P: HpEF => [Hep1 Hep2 Hep3 _].
rewrite /setU1; case Hq1: (gface p =d q).
move/eqP: Hq1 => <- {q}/=; rewrite halfg_face set11 /=.
by rewrite -halfg_face neq_halfg_edge /=.
case Hq2: (gface (gface p) =d q).
move/eqP: Hq2 => <- {q Hq1}/=; rewrite !halfg_face set11 /=; symmetry.
by rewrite -2!halfg_face neq_halfg_edge /=.
case Hq3: (gface (gface (gface p)) =d q).
move/eqP: Hq3 => <- {q Hq1 Hq2}/=; rewrite !halfg_face set11 /=; symmetry.
by rewrite -3!halfg_face neq_halfg_edge /=.
case Hcq: (mring m q).
move: (Hcq) (maps_uniq (simple_mring m)) (Hcq) => Hdq.
rewrite -(mem_rot i) -(uniq_rot i) Dc /= /setU1; case/andP.
case Hpq: (gedge p =d q).
by move/eqP: Hpq => <-; rewrite gedge2 set11 andbF; move/negPf.
rewrite mring_def in Hdq; case/andP: Hdq => [Hdq Hdeq] _ _ /= Hcq'.
rewrite {}Hcq' {}Hdq {Hdeq}(negbE Hdeq) orbT orbF; symmetry; apply/eqP => Dp.
move: (Hcq); rewrite mring_def; case/nandP; left.
case Heq1: (gedge (gface p) =d q); first by move/eqP: Heq1 => <-.
case Heq2: (gedge (gface (gface p)) =d q); first by move/eqP: Heq2 => <-.
case Heq3: (gedge (gface (gface (gface p))) =d q); first by move/eqP: Heq3 => <-.
elimtype False; move: Hpq Heq1 Heq2 Heq3; rewrite !(monic2_eqd gedge2 gedge2).
rewrite -[gedge q]consg_odd_half -{1}[p]consg_odd_half -Dp.
rewrite {1 2 4}/gface !halfg_face !oddg_face /consg.
set p2 := halfg p +g halfg p; rewrite -!(addgC p2) !(monic_eqd (addg_inv p2)).
by case (oddg p); case (oddg (gedge q)).
move: (Hcq); rewrite -(mem_rot i) Dc /=; move/norP=> [Hpq Hcq'].
rewrite (negbE Hcq') /=; symmetry.
case Hpq': (halfg p =d halfg q).
case Hq0: (p =d q).
move: Hp; rewrite (eqP Hq0) mring_def; move/andP=> [Dp _].
by rewrite Dp orbT andbF.
move: Hq0 Hq1 Hq2 Hq3; rewrite -[p]consg_odd_half -[q]consg_odd_half (eqP Hpq').
rewrite /gface !oddg_cons !halfg_cons /consg.
set q2 := halfg q +g halfg q; rewrite -!(addgC q2) !(monic_eqd (addg_inv q2)).
by case (oddg p); case (oddg q).
case: (halfg p =d halfg (gedge q)); first by rewrite andbF.
by rewrite /= -mring_def.
Qed.
Definition ext1_matte :=
Matte (ext_mdisk_ne _) cycle_ext1_mring simple_ext1_mring ext1_mring_def.
End Extend1.
Section Extend2.
Variable p : gpoint.
Definition ext2_quad :=
and3b (mdisk m (halfg (gedge p))) (mdisk m (halfg (gedge (gface p))))
(negb (has (equad p) m)).
Hypothesis HpE : ext2_quad.
Remark Extend2_HpF : m (halfg p) = false.
Proof.
apply/idP => Hp; case/and3P: HpE => _ _; case/hasP; exists (halfg p); auto.
rewrite /equad /ehex /gchop_rect halfg_face oddg_face.
case: (halfg p) => [x y]; rewrite /= ?leq_decz_z ?leq_z_incz.
by case (oddg p); rewrite /= ?leq_decz_z ?leq_z_incz ?leqzz.
Qed.
Let HpF := Extend2_HpF.
Remark Extend2_Hp1 : negb (has (equad (iter 4 gface p)) m).
Proof. by rewrite /iter gface4; case/and3P: HpE. Qed.
Let Hp1 := Extend2_Hp1.
Remark Extend2_Hefp : mring m (gedge (gface p)).
Proof. by rewrite mring_def gedge2 halfg_face HpF andbT; case/and3P: HpE. Qed.
Let Hefp := Extend2_Hefp.
Remark Extend2_Hep : mring m (gedge p).
Proof. by rewrite mring_def gedge2 HpF andbT; case/andP: HpE. Qed.
Let Hep := Extend2_Hep.
Remark Extend2_Hp : {c : gpointseq & {i : nat |
rot i (mring m) = Seq (gedge (gface p)) (gedge p) & c}}.
Proof.
case/rot_to: Hefp => [i [|p' c] Dp].
move: (cycle_mring m); rewrite -(cycle_rot i) Dp /= /mrlink.
rewrite /end0g /end1g (monic_eqd (addg_inv _)) oddg_edge oddg_face.
by case (oddg p).
exists c; exists i; rewrite Dp; do 2 congr Adds.
move: (cycle_mring m) Hep (simple_mring m).
rewrite -(cycle_rot i) -(mem_rot i) -(uniq_rot i) -maps_rot Dp /mrlink.
move/andP=> [Dp' _]; rewrite end1g_edge end0g_face in Dp'.
rewrite -(mem_rot 1) -(uniq_rot 1) -maps_rot rot1_adds /=.
case/setU1P=> // [Hp']; case/andP; case/mapsP; exists (gedge p); auto.
by rewrite end0g_edge (eqP Dp').
Qed.
Let Hp := Extend2_Hp.
Definition ext2_mring : gpointseq :=
let: existS c _ := Hp in cat (traject gface (gface (gface p)) 2) c.
Lemma cycle_ext2_mring : cycle mrlink ext2_mring.
Proof.
rewrite /ext2_mring; case: Hp (cycle_mring m) => [c [i Dc]].
rewrite -(cycle_rot i) {i}Dc !(cycle_path p) /=.
rewrite {1 2 4 5}/mrlink end1g_edge !end0g_face !end0g_edge !set11 /=.
case: c => [|q c] /=; first by rewrite end1g_edge -{1}[p]gface4 end0g_face.
by rewrite {1 3}/mrlink end1g_edge -{2}[p]gface4 end0g_face.
Qed.
Lemma simple_ext2_mring : uniq (maps end0g ext2_mring).
Proof.
rewrite /ext2_mring -(uniq_rot 1).
case: Hp (mring_equad Hp1) (simple_mring m) => [c [i Dc]].
rewrite -(uniq_rot i) -(mem_rot i) -(uniq_rot 1) -(mem_rot 1) -!maps_rot.
rewrite {i}Dc [maps]lock /= -!lock !rot1_adds /= !maps_add_last.
by rewrite !end0g_edge !end0g_face; move/norP=> [_ ->]; case/andP.
Qed.
Remark Extend2_HpEF :
all (fun q => negb (m (halfg (gedge q)))) (traject gface (gface (gface p)) 2).
Proof.
apply/allP => q Hq; apply/idP => Hmq; case/and3P: HpE => _ _; case/hasP.
exists (halfg (gedge q)); first done; case/trajectP: Hq => [i Hi <-] {q Hmq}.
rewrite halfg_edge !iter_f halfg_iter_face oddg_iter_face.
rewrite /equad /ehex /gchop_rect halfg_face oddg_face.
case: (halfg p) => [x y]; rewrite /= ?leq_decz_z ?leq_z_incz.
case (oddg p); rewrite /= ?leq_decz_z ?leq_z_incz;
case: i Hi => [|[|i]] //= _; rewrite ?addz0 -?incz_def -?decz_def;
by rewrite ?leqzz ?leq_decz_z ?leq_z_incz.
Qed.
Let HpEF := Extend2_HpEF.
Lemma ext2_mring_def :
ext2_mring
=1 (fun q => ext_mdisk p (halfg q) && negb (ext_mdisk p (halfg (gedge q)))).
Proof.
move=> q; rewrite /ext2_mring; case: Hp => [c [i Dc]] /=; rewrite /setU1.
case/and3P: HpEF => Hep2 Hep3 _.
case Hq2: (gface (gface p) =d q).
move/eqP: Hq2 => <- {q}/=; rewrite !halfg_face set11 /=; symmetry.
by rewrite -2!halfg_face neq_halfg_edge /=.
case Hq3: (gface (gface (gface p)) =d q).
move/eqP: Hq3 => <- {q Hq2}/=; rewrite !halfg_face set11 /=; symmetry.
by rewrite -3!halfg_face neq_halfg_edge /=.
case Hcq: (mring m q).
move: (Hcq) (maps_uniq (simple_mring m)) (Hcq) => Hdq.
rewrite -(mem_rot i) -(uniq_rot i) Dc /= /setU1; case/and3P; case/norP=> _.
case Hepq: (gedge (gface p) =d q).
by rewrite -(eqP Hepq) gedge2 halfg_face set11 andbF; move/negPf.
case Hpq: (gedge p =d q).
by rewrite -(eqP Hpq) gedge2 set11 andbF; clear; move/negPf.
rewrite mring_def in Hdq; case/andP: Hdq => Hdq Hdeq _ _ _ /= ->.
rewrite {}Hdq {Hdeq}(negbE Hdeq) orbT orbF; symmetry; apply/eqP => Dp.
move: (Hcq); rewrite mring_def; case/nandP; left.
case Heq2: (gedge (gface (gface p)) =d q); first by rewrite -(eqP Heq2).
case Heq3: (gedge (gface (gface (gface p))) =d q); first by rewrite -(eqP Heq3).
elimtype False; move: Hpq Hepq Heq2 Heq3; rewrite !(monic2_eqd gedge2 gedge2).
rewrite -[gedge q]consg_odd_half -{1}[p]consg_odd_half -Dp.
rewrite {1 2 4}/gface !halfg_face !oddg_face /consg.
set p2 := halfg p +g halfg p; rewrite -!(addgC p2) !(monic_eqd (addg_inv p2)).
by case (oddg p); case (oddg (gedge q)).
move: (Hcq); rewrite -(mem_rot i) Dc /=.
case/norP=> Hpq; case/norP=> Hpeq Hcq'; rewrite (negbE Hcq') /=; symmetry.
case Hpq': (halfg p =d halfg q).
case Hq0: (p =d q).
by move: Hep; rewrite (eqP Hq0) mring_def; case/andP=> ->; rewrite orbT andbF.
case Hq1: (gface p =d q).
by move: Hefp; rewrite (eqP Hq1) mring_def; case/andP=> ->; rewrite orbT andbF.
move: Hq0 Hq1 Hq2 Hq3; rewrite -[p]consg_odd_half -[q]consg_odd_half (eqP Hpq').
rewrite /gface !oddg_cons !halfg_cons /consg.
set q2 := halfg q +g halfg q; rewrite -!(addgC q2) !(monic_eqd (addg_inv q2)).
by case (oddg p); case (oddg q).
case: (halfg p =d halfg (gedge q)); first by rewrite andbF.
by rewrite /= -mring_def.
Qed.
Definition ext2_matte :=
Matte (ext_mdisk_ne _) cycle_ext2_mring simple_ext2_mring ext2_mring_def.
End Extend2.
End ExtendMatte.
Section MatteExtension.
Variable m : matte.
Inductive matte_extension : matte -> Set :=
| Mext_nil : matte_extension m
| Mext_add : forall (p : gpoint) (xm' xm : matte),
matte_extension xm' -> mring xm' (gedge p) -> xm =1 ext_mdisk xm' p ->
matte_extension xm.
Implicit Arguments Mext_add [].
Lemma mem_extension : forall xm, matte_extension xm -> sub_set m xm.
Proof.
move=> xm; elim: xm / => [|p xm' xm _ Hrec _ Dxm] q Hq //.
by rewrite Dxm /= setU1r ?Hrec.
Qed.
Inductive extends_in (r : grect) (p : gpoint) : Set :=
ExtendIn (xm : matte)
(_ : matte_extension xm) (_ : sub_set xm (setU r m)) (_ : xm p).
Lemma extends_in_sub : forall r1 r2 : grect, sub_set r1 r2 ->
forall p, extends_in r1 p -> extends_in r2 p.
Proof.
move=> r1 r2 Hr12 p [xm Hxm Hxmr Hp]; exists xm; auto.
by move=> q Hq; apply/orP; case/orP: (Hxmr _ Hq); auto.
Qed.
Definition inset r p := sub_grect (gtouch p) r.
Lemma refined_extends_in : forall r : grect,
refined_in (m : set _) r -> has r m ->
forall p, inset r p -> extends_in r p.
Proof.
move=> r Hr0m Hrm p Hirp; have Hrr0: sub_set r r by move.
have Hr0r: sub_set (setI r m) r by move=> q; case/andP.
have Hr0p: sub_set (gtouch p) r by apply mem_sub_grect.
have Hrp: r p by apply Hr0p; exact: gtouch_refl.
move: {-1}r {1 3 4 5 8 11}r p (ltnSn (garea r)) Hrm Hrp Hrr0 Hr0r Hr0p Hr0m {Hirp}.
elim: {r}(S (garea r)) => // [n Hrec] r0 r p Hn Hrm Hrp Hrr0 Hr0r Hr0p Hr0m.
set G := extends_in r p; rewrite ltnS in Hn.
case Hmp: (mdisk m p).
by exists m; try first [ left | move=> q Hq; rewrite /setU Hq orbT ].
have Hxm1: forall p', halfg p' = p -> negb (has (ehex p') m) ->
extends_in (gchop_rect r (gedge p')) (halfg (gedge p')) -> G.
move=> p' Dp' Hep' [xm Hxm Hxmr' Hxmp'].
have Hp': ext1_hex xm p'.
rewrite /ext1_hex Hxmp'; apply/hasPn => q Hq.
case/orP: {Hq}(Hxmr' _ Hq); last by move=> *; apply (hasPn Hep').
rewrite /ehex !mem_gchop_rect /setI gchop_edge.
by case/andP=> *; apply/nandP; right.
exists (ext1_matte Hp'); rewrite /= ?Dp' ?setU11 //.
right with p' xm; rewrite // mring_def gedge2 Dp' Hxmp'.
apply/idP; move/Hxmr'; rewrite /setU Hmp mem_gchop_rect /setI gchop_edge.
by rewrite -Dp' /setC gchop_halfg andbF.
move=> q; case/setU1P=> [<-|Hq]; first by rewrite /setU Hrp.
apply/orP; case/orP: (Hxmr' _ Hq); try (rewrite mem_gchop_rect; case/andP); auto.
have Hcut: forall p', halfg p' = p -> has (setD r0 (gchop1 p')) m -> G.
move=> p' Dp' Hr0p'; set r0' := gchop1_rect r0 p'.
case Hr0': (has r0' m).
set r' := gchop1_rect r p'.
have Hr'r: sub_set r' r by exact: gchop_rect_sub.
apply: (extends_in_sub Hr'r); apply (Hrec r0').
- case/hasP: Hr0p' => [q Hmq]; move/andP=> [Hp'q Hr0q].
apply: leq_trans Hn; apply: ((ltn_garea_sub_rect Hr'r) q).
by rewrite /setD /r' mem_gchop1_rect /setI (negbE Hp'q) Hr0r //= /setI Hr0q.
- case/hasP: Hr0' => [q Hmq]; rewrite /r0' mem_gchop1_rect.
move/andP=> [Hr0q Hp'q]; apply/hasP; exists q; first done.
by rewrite /r' mem_gchop1_rect /setI Hp'q Hr0r //= /setI Hr0q.
- by rewrite /r' mem_gchop1_rect /setI Hrp gchop_chop1 // -Dp' gchop_halfg.
- move=> q; rewrite /r' /r0' !mem_gchop1_rect /setI.
by case/andP=> *; apply/andP; auto.
- move=> q; rewrite /r' /r0' /setI !mem_gchop1_rect /setI -andbA andbCA.
by case/andP=> *; apply/andP; auto.
- move=> q Hq; rewrite /r0' mem_gchop1_rect; apply/andP; split; auto.
by move: Hq; rewrite -Dp' gtouch_chop1; case/andP.
move=> q q' Hq; exact (Hr0m q q' (gchop_rect_sub Hq)).
apply (Hxm1 _ Dp').
apply/hasP => [[q Hmq]]; rewrite /ehex mem_gchop_rect Dp'.
move/andP=> [Hpq _]; case/hasP: Hr0'; exists q; first done.
rewrite /r0' mem_gchop1_rect /setI Hr0p //=.
by move: Hpq; rewrite -Dp' gtouch_chop1; case/andP.
set r' := gchop_rect r (gedge p').
have Hr'r: sub_set r' r by exact: gchop_rect_sub.
have Hr'm: has r' m.
case/hasP: Hr0p' => [q Hmq]; move/andP=> [Hp'q1 Hr0q]; apply/hasP.
exists q; rewrite // /r' mem_gchop_rect /setI Hr0r /setI ?Hr0q // gchop_edge.
by apply/idP => Hp'q; case/idP: Hp'q1; apply gchop_chop1.
apply (Hrec r0); auto.
- apply: leq_trans Hn; apply ((ltn_garea_sub_rect Hr'r) p); rewrite /setD /r'.
by rewrite mem_gchop_rect /setI Hrp gchop_edge /setC -Dp' gchop_halfg.
- by case/hasP: Hr'm => q _ Hq; apply: gchop_rect_edge Hq; rewrite gedge2 Dp'.
- by move; auto.
- move=> q Hq; rewrite /r' mem_gchop_rect /setI Hr0r //= gchop_edge.
apply/idP => [Hp'q]; case/andP: Hq => [Hr0q Hmq]; case/hasP: Hr0'.
by exists q; rewrite // /r0' mem_gchop1_rect /setI Hr0q; apply: gchop_chop1.
set p2 := gedge (gface (gface (gedge p'))).
have Hp2: gchop_rect r0 p2 (halfg p2).
case/hasP: Hr0p' => [q Hmq Hq0]; apply gchop_rect_edge with q.
by apply Hr0p; rewrite /p2 gedge2 !halfg_face -Dp' gtouch_edge.
by rewrite mem_gchop_rect /p2 /setI gchop_edge andbC.
rewrite mem_gchop_rect /setI gchop_halfg andbT in Hp2.
have Hp1: r0 (halfg (gedge (gface p'))).
by apply Hr0p; rewrite -Dp' -halfg_face gtouch_edge.
have Hp3: r0 (halfg (gedge (gface (gface (gface p'))))).
by apply Hr0p; rewrite -Dp' -3!halfg_face gtouch_edge.
apply mem_sub_grect; move: Hp1 Hp2 Hp3.
rewrite /p2 !halfg_edge !oddg_face !halfg_face !oddg_edge halfg_edge ccw4 Dp'.
rewrite -!addgA; case: (r0) (p) => [x0 x1 y0 y1] [x y].
case (oddg p'); case/and4P=> [Hx01 Hx11 Hy01 Hy11];
case/and4P=> [Hx02 Hx12 Hy02 Hy12]; case/and4P=> [Hx03 Hx13 Hy03 Hy13];
by rewrite /= ?incz_def ?decz_def -!addzA; apply/and4P; split.
pose np q := consg (ccw (ccw (ccw (oddg q)))) q.
have Enph: forall q, halfg (np q) = q by move=> *; rewrite /np halfg_cons.
have Enpo: forall q, oddg (np q) = ccw (ccw (ccw (oddg q))).
by move=> *; rewrite /np oddg_cons.
have EnpE: forall q, halfg (gedge (np q)) = gnode q.
by move=> q; rewrite halfg_edge /gnode /np halfg_cons oddg_cons; case (oddg q).
have DnpN: forall q, np (gnode q) = gedge (gnode (gedge (np q))).
move=> q; apply (monic_move gmonicF).
by rewrite /gface Enph Enpo oddg_node -Enpo -oddg_edge -EnpE consg_odd_half.
have Hnp4: forall q, r0 q -> has (equad (np q)) m -> m q.
move=> q0 Hr0q0; move/hasP=> [q Hmq Hq]; rewrite -(Hr0m _ q Hr0q0) //.
apply: eqP; move: Hq {Hmq}; rewrite /equad /ehex /gchop_rect halfg_face.
rewrite oddg_face Enph Enpo ccw4 -{1 2 4}[q0]consg_odd_half /consg addgC.
case: (halfg q0) q => [x y] [x' y']; rewrite eqd_sym /eqd /=.
rewrite !eqz_leq -!andbA !leqz_halfl !leqz_halfr.
case (oddg q0);
by rewrite /= ?addz0 ?addn0 ?leq_decz_z ?leq_z_incz -?incz_def ?decz_inc.
have Eq2x2: forall q q', ehex q q' \/ ehex (gface (gedge (gface q))) q' ->
equad q q' \/ equad (gedge (gface q)) q'.
have Ec1c: forall q q' b, gchop1 q q' && b && gchop q q' = gchop q q' && b.
move=> q q' b; case Hqq': (gchop q q'); last by rewrite andbF.
by rewrite andbT (gchop_chop1 Hqq').
move=> q q'; repeat rewrite /equad /ehex mem_gchop_rect /setI.
rewrite !gtouch_chop1 /all /traject !andbT gface4 !Ec1c.
rewrite {-12 13}[andb]lock andbCA -!lock Ec1c gchopFEF.
case (gchop q q'); rewrite // !andTb Ec1c.
set q2q' := gchop1 (gface (gface q)) q'.
have <-: q2q' = gchop1 (gface (gface (gface (gedge (gface q))))) q'.
rewrite /gchop1 -gchopFEF -!gnode_face !gface4; congr gchop; congr gface.
by symmetry; apply: (monic_move gmonicN); rewrite gnode4.
case: q2q'; rewrite ?andbF // !andTb !andbT gchop_edge /setC.
case Hqq': (gchop (gface q) q').
by case; case/andP; left; rewrite // -[gface q]gedge2; apply: gchop_chop1.
by case; case/andP; right; first exact: gchop_chop1.
have Hr0np: r0 (gnode p) by apply Hr0p; rewrite -EnpE -{1}[p]Enph gtouch_edge.
have EnpFE: forall q, np (gface (gedge q)) = gedge (gface (np q)).
by move=> q; rewrite -{2}[q]gmonicE DnpN gmonicN gedge2.
have Hr0fep: r0 (gface (gedge p)).
apply Hr0p; rewrite -[gface (gedge p)]Enph -[np (gface (gedge p))]gedge2.
by rewrite -{1}[p]gmonicE -EnpE gtouch_edge.
case Hqpm: (has (equad (np p)) m); first by rewrite Hnp4 ?Hrr0 in Hmp.
case Hhpm: (has (ehex (np p)) m).
have Hmefp: m (halfg (gedge (gface (np p)))).
rewrite -EnpFE Enph Hnp4 //.
case/hasP: Hhpm => [q Hmq Hq]; apply/hasP; exists q; first done.
case: (Eq2x2 (np p) q) => //; rewrite -?EnpFE //; first by left.
by move=> *; case/hasP: Hqpm; exists q.
case Hhfpm: (has (ehex (gface (np p))) m).
have Hmep: m (halfg (gedge (np p))).
rewrite EnpE Hnp4 //; case/hasP: Hhfpm => [q Hmq Hq].
case: (Eq2x2 (np (gnode p)) q); try rewrite -EnpFE gmonicN; auto.
by move=> *; apply/hasP; exists q.
by move=> *; case/hasP: Hqpm; exists q.
have Hext: ext2_quad m (np p) by rewrite /ext2_quad Hmep Hmefp Hqpm.
exists (ext2_matte Hext); rewrite /= ?Enph ?setU11 //.
right with (np p) m; [ left | idtac | split ].
by rewrite mring_def gedge2 Enph Hmp Hmep.
by move=> q; case/setU1P=> [<-|Hq]; apply/orP; auto.
apply: (Hxm1 (gface (np p))); rewrite ?Hhfpm ?halfg_face //.
exists m; [ left | move=> q Hq; apply/orP; auto | done ].
case Hpm: (negb (has (gtouch p) m)).
pose hrp i := has (setD r0 (gchop1 (iter i gface (np p)))) m.
suffice: {i : nat | hrp i} by case=> i; apply: Hcut; rewrite halfg_iter_face.
case Hh0: (hrp 0); first by exists 0.
case Hh1: (hrp 1); first by exists 1.
case Hh2: (hrp 2); first by exists 2.
case Hh3: (hrp 3); first by exists 3.
case/hasP: Hpm; case/hasP: Hrm => [q Hmq Hq]; exists q; first done.
rewrite -[p]Enph gtouch_chop1; apply/allP => p'; move/trajectP=> [i Hi <-].
case Hhi: (hrp i); first by case/negPf: Hhi; case: i Hi => [|[|[|[|i]]]] //.
by move: (elimFn hasPn Hhi _ Hmq); rewrite /setD Hrr0 //= andbT negb_elim.
apply: (Hxm1 (np p)); rewrite ?Hhpm // EnpE.
case Hmnp: (m (gnode p)).
by exists m; try first [ left | move=> q Hq; rewrite /setU Hq orbT ].
case Hqnpm: (has (equad (np (gnode p))) m); first by rewrite Hnp4 in Hmnp.
have Hr0n2p: r0 (gnode (gnode p)).
rewrite {1}/gnode oddg_node /gnode -!addgA; apply Hr0p; case: {1 2}p => [x y].
by case (oddg p); rewrite /= -?incz_def -?decz_def !leqzz !leq_decz_incz.
have Hmn2p: m (gnode (gnode p)).
apply: Hnp4 => //; apply/hasP; move/hasP: Hpm => [q Hmq Hq]; exists q; auto.
have Hnpq: gchop (gface (np (gnode p))) q.
rewrite DnpN gmonicN gchop_edge; apply/idP => [Hpq]; case/hasP: Hhpm.
by exists q; last by rewrite /ehex mem_gchop_rect Enph /setI Hq.
case: (Eq2x2 (np (gnode (gnode p))) q) => //.
right; rewrite /ehex -EnpFE gmonicN mem_gchop_rect /setI Hnpq halfg_face Enph.
move: Hq Hnpq; rewrite /gchop halfg_face oddg_face Enph Enpo oddg_node ccw4.
rewrite /gnode; case: (p) (q) (oddg p) => [x y] [x' y'] /=.
case; rewrite /= -?incz_def -?decz_def ?addz0 ?decz_inc ?incz_dec andbT;
rewrite !leqz_dec_inc ?leqz_inc_dec; case/and4P=> *; apply/and4P; split;
by rewrite // leqz_dec ?decz_inc; apply/orP; right.
by rewrite -EnpFE gmonicN; move=> *; case/hasP: Hqnpm; exists q.
have Hrnp: r (gnode p).
have Hrn2p: r (gnode (gnode p)) by apply Hr0r; rewrite /setI Hr0n2p.
move: Hrn2p; do 3 rewrite {1}/gnode ?oddg_node; rewrite -!addgA.
case: (r) (p) (oddg p) Hrp => [x0 x1 y0 y1] [x y].
by case; rewrite /= -?incz_def -?decz_def ?decz_inc ?incz_dec ?addz0;
move/and4P=> [Hx0 Hx1 Hy0 Hy1]; case/and4P=> *; apply/and4P; split.
have Hext: ext1_hex m (np (gnode p)).
rewrite /ext1_hex EnpE Hmn2p; apply/hasP => [[q Hmq Hq]].
case: (Eq2x2 (np (gnode p)) q); rewrite -?EnpFE ?gmonicN; auto.
by move=> *; case/hasP: Hqnpm; exists q.
by move=> *; case/hasP: Hqpm; exists q.
exists (ext1_matte Hext); rewrite /= ?Enph ?setU11 //.
right with (np (gnode p)) m; try by try left.
by rewrite mring_def EnpE Hmn2p gedge2 Enph Hmnp.
move=> q /=; case/setU1P=> [<-|Hq]; apply/orP; [ left | by right ].
rewrite mem_gchop_rect /setI Hrnp /gchop EnpE oddg_edge Enpo ccw4.
by case: (gnode p) (oddg p) => [x y]; case; apply: leqzz.
Qed.
(* The refined_extends_in lemma is used directly to show that the union of a *)
(* set of extension mattes included in a region is closed (relatively to the *)
(* region). To show that this union is open we will need the MatteOrder lemmas *)
(* below. We use the refined_extend_meet lemma to extend the mattes of *)
(* adjacent regions so that their contours have a common edge. *)
Lemma extend_meet : forall (m2 : matte) (r : grect),
refined_in (m : set _) r -> has r m -> has (inset r) m2 -> negb (has m m2) ->
{xm : matte | sub_set m xm /\ sub_set xm (setD (setU r m) m2) &
has (fun q => mring m2 (gedge q)) (mring xm)}.
Proof.
move=> m2 r Hmr Hrm Hrm2 Hmm2; rewrite has_sym in Hmm2.
have [p Hm2p Hp]: {p : gpoint | m2 p & inset r p}.
exists (sub neg1g m2 (find (inset r) m2)); last by apply: sub_find.
by apply: mem_sub; rewrite -has_find.
case: (refined_extends_in Hmr Hrm Hp) => [xm Hxm Hxmr Hxmp].
have Hxm2: has m2 xm by apply/hasP; exists p.
elim: {xm}Hxm Hxmr Hxm2 {p Hm2p Hrm2 Hp Hxmp}; first by rewrite (negbE Hmm2).
move=> p xm' xm Hxm' Hrec Hxm'p Dxm Hxmr Hxm2.
have Hxm'r: sub_set xm' (setU r m).
by move=> q Hq; apply Hxmr; rewrite Dxm /= /setU1 Hq orbT.
case Hxm'2: (has m2 xm'); [ eauto | exists xm' ].
split; [ by apply mem_extension | move=> q Hq; rewrite /setD andbC Hxm'r //= ].
by apply (elimFn hasPn Hxm'2).
apply/hasP; exists (gedge p); rewrite // gedge2 mring_def; apply/andP; split.
apply/idPn => Hm2p; case/hasPn: Hxm2; move=> q; rewrite Dxm /=.
by case/setU1P=> [<-|Hq] //; apply (elimFn hasPn Hxm'2).
by apply (elimFn hasPn Hxm'2); rewrite mring_def in Hxm'p; case/andP: Hxm'p.
Qed.
End MatteExtension.
Section MatteOrder.
Definition matte_order (m : matte) p : nat :=
let m' := setC m in
double (m' (p +g oppg Gb01) + m' (p +g oppg Gb10))
+ (m' (p +g oppg Gb11) + m' (p +g oppg Gb00)).
Lemma zspan_decz_z : forall x, zspan (decz x) x = Seq (decz x) x.
Proof.
move=> x; rewrite /zspan /zwidth {2}(decz_def x) -subz_sub subzz /=.
by rewrite incz_dec.
Qed.
Definition ltouch p := let: Gpoint mx my := p in Grect (decz mx) mx (decz my) my.
Lemma matte_order0 : forall m p, matte_order m p = 0 -> sub_set (ltouch p) m.
Proof.
move=> m p; move/(introT eqP); rewrite /matte_order double_addnn !eqn_add0 andbb.
have Em': (fun q => (setC m q : nat) =d 0) =1 m by rewrite /setC /eqfun; case/m.
case: p => [x y] Hp q; rewrite -mem_enum_grect /= !zspan_decz_z /=.
rewrite !Em' /= !addz0 -!decz_def -!andbA in Hp.
by case/and4P: Hp => [Hp01 Hp10 Hp11 Hp00]; do 4 case/setU1P=> [<-|] //.
Qed.
Definition inset2 r p := inset r p && inset r (p +g neg1g).
Lemma inset2_refine : forall r p, inset r (halfg p) -> inset2 (refine_rect r) p.
Proof.
move=> [x0 x1 y0 y1] [x y]; rewrite /inset2 /inset /= -?decz_def.
rewrite !leqz_inc2 -2!leqz_inc_dec !leqz_halfr 4!incz_def -!addzA.
rewrite -!(addzCA x0) -!(addzCA y0) !addzA -!incz_def 6!leqz_inc_dec !leqz_halfl.
rewrite 2!incz_def (decz_def x1) (decz_def y1) -!addzA !addz0.
rewrite -!(addzC x1) -!(addzC y1) !addzA -?decz_def.
move/and4P=> [Hx0 Hx1 Hy0 Hy1]; rewrite Hx0 Hy0 leqz_dec Hx0 leqz_dec Hx1.
rewrite leqz_dec Hy0 leqz_dec Hy1 2!leqz_dec leqz_dec2 Hx1.
by rewrite 2!leqz_dec leqz_dec2 Hy1 !orbT.
Qed.
Lemma refine_matte_order : forall (m : matte) r p,
m (halfg p) -> inset r (halfg p) -> 0 < matte_order m (halfg p) ->
{xm : matte | sub_set (fun q => m (halfg q)) xm /\
sub_set xm (fun q => setU r m (halfg q)) &
matte_order xm p < matte_order m (halfg p)}.
Proof.
move=> m r p Hmp Hrp; pose dp (d : gbits) := p +g oppg d.
have Hdp: forall d,
{xm : matte | sub_set (fun q => m (halfg q)) xm /\
sub_set xm (fun q => setU r m (halfg q)) &
forall d' : gbits,
xm (p +g oppg d') = or3b (d =d d') (m (halfg (dp d'))) (xm (dp d'))}.
pose r' := refine_rect r; pose m' := refine_matte m.
have Hr'p: forall d, inset r' (dp d).
move=> d; case/andP: (inset2_refine Hrp); rewrite -/r' {}/dp {Hmp Hrp}.
case: p r' => [x y] [x0 x1 y0 y1].
rewrite /inset /= -?decz_def ?incz_dec.
move/and4P=> [Hx0 Hx1 Hy0 Hy1]; move/and4P=> [Hx0' Hx1' Hy0' Hy1'].
case: d;
by rewrite /= -?decz_def -?incz_def ?incz_dec ?addz0; apply/and4P; split.
have Hm'r': refined_in (m' : set _) r' by exact: refine_matte_refined.
have Hr'm': has r' m'.
apply/hasP; exists (dp Gb00); first by rewrite /m' mem_refine_matte /dp addg0.
apply (mem_sub_grect (Hr'p Gb00)); exact: gtouch_refl.
move=> d; case: (refined_extends_in Hm'r' Hr'm' (Hr'p d)) => [xm Hxm Hxmr' Hxmd].
exists xm.
split; move=> q Hq.
by apply (mem_extension Hxm); rewrite /m' mem_refine_matte.
apply/orP; case/orP: (Hxmr' _ Hq).
by rewrite /r' mem_refine_rect; left.
by rewrite /m' mem_refine_matte; right.
move=> d'; rewrite -/(dp d') -mem_refine_matte -/m'.
case: (d =P d') => [<-|_]; first by rewrite Hxmd orbT.
by case Hd': (m' (dp d')); first by rewrite (mem_extension Hxm).
have leq_norb: forall b b', negb (b || b') <= negb b by do 2 case.
have Edp: forall d, halfg (dp d) = halfg p +g halfg (oddg p +g oppg d).
by move=> d; rewrite /dp addgC halfg_add addgC; congr addg; rewrite addgC.
case Dp: (oddg p) Edp => [|||] Edp.
- move=> Hmp0; pose dhp d := negb (m (halfg p +g oppg d)).
have [d Dd Hd]: {d : gbits | setC1 d Gb00 & dhp d}.
move: Hmp0; rewrite /matte_order /setC addg0 Hmp.
rewrite -/(dhp Gb01) -/(dhp Gb10) -/(dhp Gb11).
case H01: (dhp Gb01); first by exists Gb01.
case H10: (dhp Gb10); first by exists Gb10.
by case H11: (dhp Gb11); first by exists Gb11.
rewrite {}/dhp in Hd; case: (Hdp d) => [xm Hxm Dxm]; exists xm; first done.
rewrite /matte_order /setC !Dxm !Edp !addg0 Hmp !addn0.
by case: (d) Dd Hd => [|||] //= _ Hd;
rewrite (negbE Hd) /= ?addn1 ?add1n ?doubleS ?addSn ltnS ?addn0 ?add0n;
try apply leqW; try apply leq_add2; rewrite ?leq_double; try apply leq_add2.
- case: (Hdp Gb01) => [xm Hxm Dxm] Hmp0; exists xm; first done.
rewrite {1}/matte_order /setC !Dxm !Edp !addg0 Hmp /= add0n addn0.
case Hm01: (m _) => //.
by rewrite /matte_order /= /setC Hm01 /= add1n doubleS; case (xm (dp Gb11)).
- case: (Hdp Gb00) => [xm Hxm Dxm] Hmp0; exists xm; first done.
by rewrite {1}/matte_order /setC !Dxm !Edp !addg0 Hmp.
case: (Hdp Gb10) => [xm Hxm Dxm] Hmp0; exists xm; first done.
rewrite {1}/matte_order /setC !Dxm !Edp !addg0 Hmp /= add0n addn0.
case Hm01: (m _) => //.
by rewrite /matte_order /= /setC Hm01 /= addn1 doubleS; case (xm (dp Gb11)).
Qed.
End MatteOrder.
Set Strict Implicit.
Unset Implicit Arguments.
|
module Core.CaseTree
import Core.TT
import Data.Bool
import Data.List
import Libraries.Data.NameMap
import Libraries.Text.PrettyPrint.Prettyprinter
%default covering
mutual
||| Case trees in A-normal forms
||| i.e. we may only dispatch on variables, not expressions
public export
data CaseTree : List Name -> Type where
||| case x return scTy of { p1 => e1 ; ... }
Case : {name : _} ->
(idx : Nat) ->
(0 p : IsVar name idx vars) ->
(scTy : Term vars) -> List (CaseAlt vars) ->
CaseTree vars
||| RHS: no need for further inspection
||| The Int is a clause id that allows us to see which of the
||| initial clauses are reached in the tree
STerm : Int -> Term vars -> CaseTree vars
||| error from a partial match
Unmatched : (msg : String) -> CaseTree vars
||| Absurd context
Impossible : CaseTree vars
||| Case alternatives. Unlike arbitrary patterns, they can be at most
||| one constructor deep.
public export
data CaseAlt : List Name -> Type where
||| Constructor for a data type; bind the arguments and subterms.
ConCase : Name -> (tag : Int) -> (args : List Name) ->
CaseTree (args ++ vars) -> CaseAlt vars
||| Lazy match for the Delay type use for codata types
DelayCase : (ty : Name) -> (arg : Name) ->
CaseTree (ty :: arg :: vars) -> CaseAlt vars
||| Match against a literal
ConstCase : Constant -> CaseTree vars -> CaseAlt vars
||| Catch-all case
DefaultCase : CaseTree vars -> CaseAlt vars
mutual
public export
measure : CaseTree vars -> Nat
measure (Case idx p scTy xs) = sum $ measureAlts <$> xs
measure (STerm x y) = 0
measure (Unmatched msg) = 0
measure Impossible = 0
measureAlts : CaseAlt vars -> Nat
measureAlts (ConCase x tag args y) = 1 + (measure y)
measureAlts (DelayCase ty arg x) = 1 + (measure x)
measureAlts (ConstCase x y) = 1 + (measure y)
measureAlts (DefaultCase x) = 1 + (measure x)
export
isDefault : CaseAlt vars -> Bool
isDefault (DefaultCase _) = True
isDefault _ = False
mutual
export
StripNamespace (CaseTree vars) where
trimNS ns (Case idx p scTy xs)
= Case idx p (trimNS ns scTy) (map (trimNS ns) xs)
trimNS ns (STerm x t) = STerm x (trimNS ns t)
trimNS ns c = c
restoreNS ns (Case idx p scTy xs)
= Case idx p (restoreNS ns scTy) (map (restoreNS ns) xs)
restoreNS ns (STerm x t) = STerm x (restoreNS ns t)
restoreNS ns c = c
export
StripNamespace (CaseAlt vars) where
trimNS ns (ConCase x tag args t) = ConCase x tag args (trimNS ns t)
trimNS ns (DelayCase ty arg t) = DelayCase ty arg (trimNS ns t)
trimNS ns (ConstCase x t) = ConstCase x (trimNS ns t)
trimNS ns (DefaultCase t) = DefaultCase (trimNS ns t)
restoreNS ns (ConCase x tag args t) = ConCase x tag args (restoreNS ns t)
restoreNS ns (DelayCase ty arg t) = DelayCase ty arg (restoreNS ns t)
restoreNS ns (ConstCase x t) = ConstCase x (restoreNS ns t)
restoreNS ns (DefaultCase t) = DefaultCase (restoreNS ns t)
public export
data Pat : Type where
PAs : FC -> Name -> Pat -> Pat
PCon : FC -> Name -> (tag : Int) -> (arity : Nat) ->
List Pat -> Pat
PTyCon : FC -> Name -> (arity : Nat) -> List Pat -> Pat
PConst : FC -> (c : Constant) -> Pat
PArrow : FC -> (x : Name) -> Pat -> Pat -> Pat
PDelay : FC -> LazyReason -> Pat -> Pat -> Pat
-- TODO: Matching on lazy types
PLoc : FC -> Name -> Pat
PUnmatchable : FC -> Term [] -> Pat
export
isPConst : Pat -> Maybe Constant
isPConst (PConst _ c) = Just c
isPConst _ = Nothing
showCT : {vars : _} -> (indent : String) -> CaseTree vars -> String
showCA : {vars : _} -> (indent : String) -> CaseAlt vars -> String
showCT indent (Case {name} idx prf ty alts)
= "case " ++ show name ++ "[" ++ show idx ++ "] : " ++ show ty ++ " of"
++ "\n" ++ indent ++ " { "
++ showSep ("\n" ++ indent ++ " | ")
(assert_total (map (showCA (" " ++ indent)) alts))
++ "\n" ++ indent ++ " }"
showCT indent (STerm i tm) = "[" ++ show i ++ "] " ++ show tm
showCT indent (Unmatched msg) = "Error: " ++ show msg
showCT indent Impossible = "Impossible"
showCA indent (ConCase n tag args sc)
= showSep " " (map show (n :: args)) ++ " => " ++
showCT indent sc
showCA indent (DelayCase _ arg sc)
= "Delay " ++ show arg ++ " => " ++ showCT indent sc
showCA indent (ConstCase c sc)
= "Constant " ++ show c ++ " => " ++ showCT indent sc
showCA indent (DefaultCase sc)
= "_ => " ++ showCT indent sc
export
{vars : _} -> Show (CaseTree vars) where
show = showCT ""
export
{vars : _} -> Show (CaseAlt vars) where
show = showCA ""
mutual
export
{vars : _} -> Pretty (CaseTree vars) where
pretty (Case {name} idx prf ty alts)
= "case" <++> pretty name <++> ":" <++> pretty ty <++> "of"
<+> nest 2 (hardline
<+> vsep (assert_total (map pretty alts)))
pretty (STerm i tm) = pretty tm
pretty (Unmatched msg) = pretty "Error:" <++> pretty msg
pretty Impossible = pretty "Impossible"
export
{vars : _} -> Pretty (CaseAlt vars) where
pretty (ConCase n tag args sc)
= hsep (map pretty (n :: args)) <++> pretty "=>"
<+> Union (spaces 1 <+> pretty sc) (nest 2 (hardline <+> pretty sc))
pretty (DelayCase _ arg sc) =
pretty "Delay" <++> pretty arg <++> pretty "=>"
<+> Union (spaces 1 <+> pretty sc) (nest 2 (hardline <+> pretty sc))
pretty (ConstCase c sc) =
pretty c <++> pretty "=>"
<+> Union (spaces 1 <+> pretty sc) (nest 2 (hardline <+> pretty sc))
pretty (DefaultCase sc) =
pretty "_ =>"
<+> Union (spaces 1 <+> pretty sc) (nest 2 (hardline <+> pretty sc))
mutual
export
eqTree : CaseTree vs -> CaseTree vs' -> Bool
eqTree (Case i _ _ alts) (Case i' _ _ alts')
= i == i'
&& length alts == length alts'
&& all (uncurry eqAlt) (zip alts alts')
eqTree (STerm _ t) (STerm _ t') = eqTerm t t'
eqTree (Unmatched _) (Unmatched _) = True
eqTree Impossible Impossible = True
eqTree _ _ = False
eqAlt : CaseAlt vs -> CaseAlt vs' -> Bool
eqAlt (ConCase n t args tree) (ConCase n' t' args' tree')
= n == n' && eqTree tree tree' -- assume arities match, since name does
eqAlt (DelayCase _ _ tree) (DelayCase _ _ tree')
= eqTree tree tree'
eqAlt (ConstCase c tree) (ConstCase c' tree')
= c == c' && eqTree tree tree'
eqAlt (DefaultCase tree) (DefaultCase tree')
= eqTree tree tree'
eqAlt _ _ = False
export
Show Pat where
show (PAs _ n p) = show n ++ "@(" ++ show p ++ ")"
show (PCon _ n i _ args) = show n ++ " " ++ show i ++ " " ++ assert_total (show args)
show (PTyCon _ n _ args) = "<TyCon>" ++ show n ++ " " ++ assert_total (show args)
show (PConst _ c) = show c
show (PArrow _ x s t) = "(" ++ show s ++ " -> " ++ show t ++ ")"
show (PDelay _ _ _ p) = "(Delay " ++ show p ++ ")"
show (PLoc _ n) = show n
show (PUnmatchable _ tm) = ".(" ++ show tm ++ ")"
export
Pretty Pat where
prettyPrec d (PAs _ n p) = pretty n <++> pretty "@" <+> parens (pretty p)
prettyPrec d (PCon _ n _ _ args) =
parenthesise (d > Open) $ hsep (pretty n :: map (prettyPrec App) args)
prettyPrec d (PTyCon _ n _ args) =
parenthesise (d > Open) $ hsep (pretty n :: map (prettyPrec App) args)
prettyPrec d (PConst _ c) = pretty c
prettyPrec d (PArrow _ _ p q) =
parenthesise (d > Open) $ pretty p <++> pretty "->" <++> pretty q
prettyPrec d (PDelay _ _ _ p) = parens (pretty "Delay" <++> pretty p)
prettyPrec d (PLoc _ n) = pretty n
prettyPrec d (PUnmatchable _ tm) = pretty "." <+> parens (pretty tm)
mutual
insertCaseNames : SizeOf outer ->
SizeOf ns ->
CaseTree (outer ++ inner) ->
CaseTree (outer ++ (ns ++ inner))
insertCaseNames outer ns (Case idx prf scTy alts)
= let MkNVar prf' = insertNVarNames outer ns (MkNVar prf) in
Case _ prf' (insertNames outer ns scTy)
(map (insertCaseAltNames outer ns) alts)
insertCaseNames outer ns (STerm i x) = STerm i (insertNames outer ns x)
insertCaseNames _ _ (Unmatched msg) = Unmatched msg
insertCaseNames _ _ Impossible = Impossible
insertCaseAltNames : SizeOf outer ->
SizeOf ns ->
CaseAlt (outer ++ inner) ->
CaseAlt (outer ++ (ns ++ inner))
insertCaseAltNames p q (ConCase x tag args ct)
= ConCase x tag args
(rewrite appendAssociative args outer (ns ++ inner) in
insertCaseNames (mkSizeOf args + p) q {inner}
(rewrite sym (appendAssociative args outer inner) in
ct))
insertCaseAltNames outer ns (DelayCase tyn valn ct)
= DelayCase tyn valn
(insertCaseNames (suc (suc outer)) ns ct)
insertCaseAltNames outer ns (ConstCase x ct)
= ConstCase x (insertCaseNames outer ns ct)
insertCaseAltNames outer ns (DefaultCase ct)
= DefaultCase (insertCaseNames outer ns ct)
export
Weaken CaseTree where
weakenNs ns t = insertCaseNames zero ns t
total
getNames : (forall vs . NameMap Bool -> Term vs -> NameMap Bool) ->
NameMap Bool -> CaseTree vars -> NameMap Bool
getNames add ns sc = getSet ns sc
where
mutual
getAltSet : NameMap Bool -> CaseAlt vs -> NameMap Bool
getAltSet ns (ConCase n t args sc) = getSet ns sc
getAltSet ns (DelayCase t a sc) = getSet ns sc
getAltSet ns (ConstCase i sc) = getSet ns sc
getAltSet ns (DefaultCase sc) = getSet ns sc
getAltSets : NameMap Bool -> List (CaseAlt vs) -> NameMap Bool
getAltSets ns [] = ns
getAltSets ns (a :: as) = getAltSets (getAltSet ns a) as
getSet : NameMap Bool -> CaseTree vs -> NameMap Bool
getSet ns (Case _ x ty xs) = getAltSets ns xs
getSet ns (STerm i tm) = add ns tm
getSet ns (Unmatched msg) = ns
getSet ns Impossible = ns
export
getRefs : (aTotal : Name) -> CaseTree vars -> NameMap Bool
getRefs at = getNames (addRefs False at) empty
export
addRefs : (aTotal : Name) -> NameMap Bool -> CaseTree vars -> NameMap Bool
addRefs at ns = getNames (addRefs False at) ns
export
getMetas : CaseTree vars -> NameMap Bool
getMetas = getNames addMetas empty
export
mkTerm : (vars : List Name) -> Pat -> Term vars
mkTerm vars (PAs fc x y) = mkTerm vars y
mkTerm vars (PCon fc x tag arity xs)
= apply fc (Ref fc (DataCon tag arity) x)
(map (mkTerm vars) xs)
mkTerm vars (PTyCon fc x arity xs)
= apply fc (Ref fc (TyCon 0 arity) x)
(map (mkTerm vars) xs)
mkTerm vars (PConst fc c) = PrimVal fc c
mkTerm vars (PArrow fc x s t)
= Bind fc x (Pi fc top Explicit (mkTerm vars s)) (mkTerm (x :: vars) t)
mkTerm vars (PDelay fc r ty p)
= TDelay fc r (mkTerm vars ty) (mkTerm vars p)
mkTerm vars (PLoc fc n)
= case isVar n vars of
Just (MkVar prf) => Local fc Nothing _ prf
_ => Ref fc Bound n
mkTerm vars (PUnmatchable fc tm) = embed tm
|
"""
Author: colorsky
Date: 2020/01/15
"""
from multiprocessing import Pool, cpu_count
from shapely.geometry import Polygon, Point
import numpy as np
import random
def polygon_simplify(points, tolerance):
coords = Polygon(points).simplify(tolerance).exterior.coords
if not coords:
return
x, y = coords.xy
p = list(zip(x, y))
if len(p) < 2:
return
return p
def polygon_interpolate(polygon: list,
displacement_f: float = None,
displacement: float = 10,
min_area: float = 10,
max_iter: int = 100):
"""Recursively interpolate polygon segments to generate a whirl like output.
:param polygon: Vertices of the polygon
:param displacement_f: Factor of how much distance the new point moves along the edge relative to the edge's length
:param displacement: Displacement of each of the new points relative to the previous points
:param min_area: Minimum area to terminate the interpolation
:param max_iter: Maximum iterations to terminate the interpolation
:return: {iter_0:polygon, iter_1:polygon, ..., max_iter:polygon}
"""
output = {0: polygon}
if displacement == 0:
return output
if polygon[0] != polygon[-1]: # Ensure the polygon is closed
polygon.append(polygon[0])
# Begin interpolating
parent_polygon = polygon
min_area_diff = 10
for iteration in range(1, max_iter + 1):
if Polygon(parent_polygon).area < min_area:
break
child_polygon = []
for i in range(len(parent_polygon) - 1):
displacement_ = float(displacement)
pi = np.array(parent_polygon[i])
pj = np.array(parent_polygon[i + 1])
if all(pi == pj):
continue
v = pj - pi
dist = np.sqrt(np.sum(v ** 2))
normalized_v = v / dist
if displacement_f:
displacement_ = dist * displacement_f
qi = pi + normalized_v * displacement_
if dist <= displacement_:
qi = pi
child_polygon += [qi.tolist()]
if child_polygon[0] != child_polygon[-1]: # Ensure the polygon is closed
child_polygon.append(child_polygon[0])
simplify_depth = 1
while len(child_polygon) > 12:
# new_polygon = polygon_simplify_curve_edge(new_polygon)
child_polygon = polygon_simplify(child_polygon, simplify_depth)
simplify_depth += 1
if Polygon(output[len(output) - 1]).area - Polygon(child_polygon).area <= min_area_diff:
# Remove polygons with few area difference
break
output[len(output)] = child_polygon
parent_polygon = child_polygon
return output
def polygons_interpolate_wrapper(*args):
points, kwargs = args[0]
return polygon_interpolate(polygon=points, **kwargs)
def polygons_interpolate(polygons, **kwargs):
if len(polygons) < 10:
return [polygons_interpolate_wrapper((polygon, kwargs)) for polygon in polygons]
else:
# multiprocessing
with Pool(cpu_count() - 1) as pool:
args = [[polygon, kwargs] for polygon in polygons]
output = pool.map(polygons_interpolate_wrapper, args)
return output
def random_points_inside_polygon(polygon: list, n: int, seed: int = -1):
"""
Generate n random points inside the polygon with random seed set to seed(if provided).
:param polygon: Vertices of the polygon.
:param n: Number of random points to generate.
:param seed: Random seed.
:return: A list of points
"""
if seed >= 0:
random.seed(seed)
polygon = Polygon(polygon)
minx, miny, maxx, maxy = polygon.bounds
points = []
for i in range(n):
x, y = random.uniform(minx, maxx), random.uniform(miny, maxy)
while not polygon.contains(Point(x, y)):
x, y = random.uniform(minx, maxx), random.uniform(miny, maxy)
points.append([x, y])
return points
|
State Before: α✝ : Type u
β✝ : Type v
α : Type u_1
β : Type u_2
l : List α
f : ℕ → α → β
⊢ length (mapIdx f l) = length l State After: case nil
α✝ : Type u
β✝ : Type v
α : Type u_1
β : Type u_2
f✝ f : ℕ → α → β
⊢ length (mapIdx f []) = length []
case cons
α✝ : Type u
β✝ : Type v
α : Type u_1
β : Type u_2
f✝ : ℕ → α → β
hd : α
tl : List α
IH : ∀ (f : ℕ → α → β), length (mapIdx f tl) = length tl
f : ℕ → α → β
⊢ length (mapIdx f (hd :: tl)) = length (hd :: tl) Tactic: induction' l with hd tl IH generalizing f State Before: case nil
α✝ : Type u
β✝ : Type v
α : Type u_1
β : Type u_2
f✝ f : ℕ → α → β
⊢ length (mapIdx f []) = length [] State After: no goals Tactic: rfl State Before: case cons
α✝ : Type u
β✝ : Type v
α : Type u_1
β : Type u_2
f✝ : ℕ → α → β
hd : α
tl : List α
IH : ∀ (f : ℕ → α → β), length (mapIdx f tl) = length tl
f : ℕ → α → β
⊢ length (mapIdx f (hd :: tl)) = length (hd :: tl) State After: no goals Tactic: simp [IH] |
In March 2011 , James confirmed that she had left Home and Away . She has already filmed her final scenes and Nicole will leave on @-@ screen later in the year . Of her departure , James said " I was at Home and Away for three @-@ and @-@ a @-@ half @-@ years , so it 's good to be finished and get to be who I am , and do what I 've wanted to do for so long . "
|
\section{Background}
\label{sec:background}
|
[STATEMENT]
lemma pow_empty_iff:
shows "[] \<in> (lang r) ^^ n \<longleftrightarrow> (if n = 0 then True else [] \<in> (lang r))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ([] \<in> lang r ^^ n) = (if n = 0 then True else [] \<in> lang r)
[PROOF STEP]
by (induct n)(auto) |
"""Black Hole.
"""
import numpy as np
import opytimizer.math.random as r
import opytimizer.utils.logging as l
from opytimizer.core import Optimizer
from opytimizer.utils import constant
logger = l.get_logger(__name__)
class BH(Optimizer):
"""A BH class, inherited from Optimizer.
This is the designed class to define BH-related
variables and methods.
References:
A. Hatamlou. Black hole: A new heuristic optimization approach for data clustering.
Information Sciences (2013).
"""
def __init__(self, params=None):
"""Initialization method.
Args:
params (dict): Contains key-value parameters to the meta-heuristics.
"""
logger.info('Overriding class: Optimizer -> BH.')
# Overrides its parent class with the receiving params
super(BH, self).__init__()
# Builds the class
self.build(params)
logger.info('Class overrided.')
def _update_position(self, agents, best_agent, function):
"""It updates every star position and calculates their event's horizon cost (eq. 3).
Args:
agents (list): List of agents.
best_agent (Agent): Global best agent.
function (Function): A function object.
Returns:
The cost of the event horizon.
"""
# Event's horizon cost
cost = 0
# Iterates through all agents
for agent in agents:
# Generate an uniform random number
r1 = r.generate_uniform_random_number()
# Updates agent's position
agent.position += r1 * (best_agent.position - agent.position)
# Checks agents limits
agent.clip_by_bound()
# Evaluates agent
agent.fit = function(agent.position)
# If new agent's fitness is better than best
if agent.fit < best_agent.fit:
# Swap their positions and their fitness
agent.position, best_agent.position = best_agent.position, agent.position
agent.fit, best_agent.fit = best_agent.fit, agent.fit
# Increment the cost with current agent's fitness
cost += agent.fit
return cost
def _event_horizon(self, agents, best_agent, cost):
"""It calculates the stars' crossing an event horizon (eq. 4).
Args:
agents (list): List of agents.
best_agent (Agent): Global best agent.
cost (float): The event's horizon cost.
"""
# Calculates the radius of the event horizon
radius = best_agent.fit / max(cost, constant.EPSILON)
# Iterate through every agent
for agent in agents:
# Calculates distance between star and black hole
distance = (np.linalg.norm(best_agent.position - agent.position))
# If distance is smaller than horizon's radius
if distance < radius:
# Fills agent with new random positions
agent.fill_with_uniform()
def update(self, space, function):
"""Wraps Black Hole over all agents and variables.
Args:
space (Space): Space containing agents and update-related information.
function (Function): A Function object that will be used as the objective function.
"""
# Updates stars position and calculate their cost (eq. 3)
cost = self._update_position(space.agents, space.best_agent, function)
# Performs the Event Horizon (eq. 4)
self._event_horizon(space.agents, space.best_agent, cost)
|
Yikal Kuyum is one of the branches of the Student Recruitment and Retention Center. This student organizations student organization aims to recruit and retain the Chicana/o and Latina/o student population, focusing especially on youth not targeted by existing, conventional outreach programs. Yikal Kuyam outreach efforts focus on the development of holistic students while facilitating critical consciousness and academically competitive students.
Outreach efforts include: high school workshops/visits, K12 empowerment conferences, shadowing projects, university visits for youth, and tutoring. Target schools include Douglass Junior HS and Woodland HS.
Retention efforts include: holistic peer counseling, speakers/forums; gender/sexuality dialogue circles, study halls, tutoring referrals, resource development, and an annual student retreat.
Yikal Kuyum is located in the Student Recruitment and Retention Center in the new Student Community Center.
|
/*
* C version of Diffusive Nested Sampling (DNest4) by Brendon J. Brewer
*
* Yan-Rong Li, [email protected]
* Jun 30, 2016
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <math.h>
#include <time.h>
#include <string.h>
#include <gsl/gsl_rng.h>
#include "dnestvars.h"
/*! \file dnestpostprocess.c
* \brief post process the sample generated by dnest.
*/
int cmp_sample(const void *pa, const void *pb);
typedef struct
{
double logl, tiebreaker;
int id;
}SampleType;
/*
* This function calculates log(exp(x1) - exp(x2)).
*/
double logdiffexp(double x1, double x2) // x1 is larger
{
double biggest = x1;
double xx1 = x1 - biggest, xx2 = x2 - biggest;
return log(exp(xx1) - exp(xx2)) + biggest;
}
/*
* This functions calculates log(exp(x1)+...+exp(xn)).
*/
double logsumexp(double *x, int n)
{
int j;
double sum, max;
max = x[0];
for(j = 0; j < n; j++)
{
max = fmax(max, x[j]);
}
sum = 0.0;
for(j=0; j< n; j++)
{
sum += exp( x[j] - max);
}
return log(sum) + max;
}
void postprocess(double temperature)
{
printf("# Starts postprocess.\n");
FILE *fp, *fp_sample;
double **levels_orig, **sample_info, *logl;
int *sandwhich;
double *psample;
int i, j;
int num_levels, num_samples;
char buf[BUF_MAX_LENGTH];
int moreSample = 1;
// read number of levels and samples
fp = fopen(options.sampler_state_file, "r");
if(fp == NULL)
{
fprintf(stderr, "# Error: Cannot open file %s.\n", options.sampler_state_file);
exit(0);
}
fscanf(fp, "%d %d\n", &num_levels, &num_samples);
fclose(fp);
// allocate memory for levels
levels_orig = malloc(num_levels * sizeof(double *));
for(i=0; i< num_levels; i++)
{
levels_orig[i] = malloc(3 * sizeof(double));
}
// allocate memory for sample_info
sample_info = malloc(num_samples * sizeof(double *));
for(i=0; i< num_samples; i++)
{
sample_info[i] = malloc(3 * sizeof(double));
}
// allocate memory for samples
logl = (void *)malloc(num_samples * sizeof(double));
sandwhich = malloc(num_samples * sizeof(int));
psample = (double *)malloc(dnest_size_of_modeltype);
// read levels
fp = fopen(options.levels_file, "r");
if(fp == NULL)
{
fprintf(stderr, "# Error: Cannot open file %s.\n", options.levels_file);
exit(0);
}
fgets(buf, BUF_MAX_LENGTH, fp);
for(i=0; i < num_levels; i++)
{
if(feof(fp) != 0)
{
fprintf(stderr, "# Error: file %s ends at %d.\n", options.levels_file, i);
exit(0);
}
fgets(buf, BUF_MAX_LENGTH, fp);
if(sscanf(buf, "%lf %lf %lf", &levels_orig[i][0], &levels_orig[i][1], &levels_orig[i][2]) < 3)
{
fprintf(stderr, "# Error: Cannot read file %s.\n", options.levels_file);
exit(0);
}
buf[0]='\0'; // clear up buf
}
fclose(fp);
// read sample_info
if(dnest_flag_sample_info == 0) //no need to recalculate
{
fp = fopen(options.sample_info_file, "r");
if(fp == NULL)
{
fprintf(stderr, "# Error: Cannot open file %s.\n", options.sample_info_file);
exit(0);
}
fgets(buf, BUF_MAX_LENGTH, fp);
for(i=0; i < num_samples; i++)
{
if(feof(fp) != 0)
{
fprintf(stderr, "# Error: file %s ends at %d.\n", options.sample_info_file, i);
exit(0);
}
fgets(buf, BUF_MAX_LENGTH, fp);
if(sscanf(buf, "%lf %lf %lf", &sample_info[i][0], &sample_info[i][1], &sample_info[i][2]) < 3)
{
fprintf(stderr, "# Error: Cannot read file %s.\n", options.sample_info_file);
exit(0);
}
buf[0]='\0'; // clear buf
/* reset level assignment for levels larger than the maximum level numbers */
if(sample_info[i][0] > num_levels -1)
sample_info[i][0] = num_levels - 1;
}
fclose(fp);
}
else //sample_info file doest not exist, need to recalculate.
{
fp = fopen(options.sample_info_file, "w");
if(fp == NULL)
{
fprintf(stderr, "# Error: Cannot open file %s.\n", options.sample_info_file);
exit(0);
}
printf("# Dnest starts to recalculate the sample info.\n");
fprintf(fp, "# level assignment, log likelihood, tiebreaker, ID.\n");
//read sample
fp_sample = fopen(options.sample_file, "r");
if(fp_sample == NULL)
{
fprintf(stderr, "# Error: Cannot open file %s.\n", options.sample_file);
exit(0);
}
fgets(buf, BUF_MAX_LENGTH, fp_sample);
for(i=0; i < num_samples; i++)
{
read_particle(fp_sample, (void *)psample);
sample_info[i][1] = log_likelihoods_cal_initial((void *)psample, dnest_arg);
sample_info[i][2] = dnest_rand();
for(j=0; j<num_levels; j++)
{
if(sample_info[i][1] < levels_orig[j][1])
break;
}
/*j=num_levels-1; // find out the highest allowed level
while( (sample_info[i][1] < levels_orig[j][1]) && (j>=0) )
{
j--;
}*/
sample_info[i][0] = (double)dnest_rand_int(j); // randomly assign a level [0, j-1]
fprintf(fp, "%d %e %f %d\n", (int)sample_info[i][0], sample_info[i][1], sample_info[i][2], 1);
}
fclose(fp);
fclose(fp_sample);
}
//tempering with a temperature
for(i=0; i<num_samples; i++)
logl[i] = sample_info[i][1] / temperature;
// finding sandwhiching levels for each samples
for(i=0; i<num_samples; i++)
{
sandwhich[i] = (int)sample_info[i][0];
for(j=sandwhich[i]; j < num_levels; j++)
{
if( sample_info[i][1] > levels_orig[j][1] )
sandwhich[i] = j;
}
//printf("%f %d\n", logl[i], sandwhich[i]);
}
double *logx_samples, *logp_samples, *logP_samples;
double logx_min, logx_max, Umin, U;
int num_samples_thisLevel;
double *logx_samples_thisLevel;
SampleType *logl_samples_thisLevel;
double left, right;
logx_samples = malloc(num_samples * sizeof(double));
logp_samples = malloc(num_samples * sizeof(double));
logP_samples = malloc(num_samples * sizeof(double));
logx_samples_thisLevel = malloc(num_samples * sizeof(double));
logl_samples_thisLevel = malloc(num_samples * sizeof(SampleType));
for(i=0; i<num_levels; i++)
{
logx_max = levels_orig[i][0];
if(i == num_levels - 1)
logx_min = -1.0E300;
else
logx_min = levels_orig[i+1][0];
Umin = exp( logx_min - logx_max);
// finding the samples sandwhiched by this levels
num_samples_thisLevel = 0;
for(j=0; j<num_samples; j++)
if( sandwhich[j] == i )
{
logl_samples_thisLevel[num_samples_thisLevel].logl = sample_info[j][1]; // logl
logl_samples_thisLevel[num_samples_thisLevel].tiebreaker = sample_info[j][2]; // tiebreaker
logl_samples_thisLevel[num_samples_thisLevel].id = j; // id
num_samples_thisLevel++;
}
//printf("%d\n", num_samples_thisLevel);
for(j=0; j<num_samples_thisLevel; j++)
{
U = Umin + (1.0 - Umin) * ( 1.0/(1.0 + num_samples_thisLevel)
+ ( 1.0 - 2.0/(1.0 + num_samples_thisLevel) ) * (num_samples_thisLevel-1 - j)/(num_samples_thisLevel - 1.0) );
logx_samples_thisLevel[j] = logx_max + log(U);
}
qsort(logl_samples_thisLevel, num_samples_thisLevel, sizeof(SampleType), cmp_sample);
//printf("%f %f %d %f\n", logl_samples_thisLevel[0].logl, logl_samples_thisLevel[0].tiebreaker, logl_samples_thisLevel[0].id, logx_samples_thisLevel[0]);
//printf("%f %f %d %f\n", logl_samples_thisLevel[1].logl, logl_samples_thisLevel[1].tiebreaker, logl_samples_thisLevel[1].id, logx_samples_thisLevel[1]);
for(j = 0; j<num_samples_thisLevel; j++)
{
if(j != num_samples_thisLevel - 1)
left = logx_samples_thisLevel[j+1];
else if (i == num_levels - 1)
left = -1.0E300;
else
left = levels_orig[i+1][0];
if( j!= 0)
right = logx_samples_thisLevel[j-1];
else
right = levels_orig[i][0];
//printf("%e %e %e\n", right, left, logdiffexp(right, left));
logx_samples[logl_samples_thisLevel[j].id] = logx_samples_thisLevel[j];
logp_samples[logl_samples_thisLevel[j].id] = log(0.5) + logdiffexp(right, left);
}
}
double sum, max, logz_estimates, H_estimates, ESS;
sum = logsumexp(logp_samples, num_samples);
for(j = 0; j < num_samples; j++)
{
logp_samples[j] -= sum;
//logP_samples[j] = logp_samples[j] + sample_info[j][1];
logP_samples[j] = logp_samples[j] + logl[j];
}
logz_estimates = logsumexp(logP_samples, num_samples);
H_estimates = -logz_estimates;
ESS = 0.0;
for(j=0; j<num_samples; j++)
{
logP_samples[j] -= logz_estimates;
//H_estimates += exp(logP_samples[j]) * sample_info[j][1];
H_estimates += exp(logP_samples[j]) * logl[j];
ESS += -logP_samples[j]*exp(logP_samples[j]);
}
ESS = exp(ESS);
printf("log(Z) = %f\n", logz_estimates);
printf("H = %f\n", H_estimates);
printf("Effective sample size = %f\n", ESS);
post_logz = logz_estimates;
// resample to uniform weight
int num_ps = moreSample*ESS;
void *posterior_sample;
double *posterior_sample_info;
int *posterior_sample_idx;
int which;
const gsl_rng_type * dnest_post_gsl_T;
gsl_rng * dnest_post_gsl_r;
dnest_post_gsl_T = (gsl_rng_type *) gsl_rng_default;
dnest_post_gsl_r = gsl_rng_alloc (dnest_post_gsl_T);
#ifndef Debug
gsl_rng_set(dnest_post_gsl_r, time(NULL));
#else
gsl_rng_set(dnest_post_gsl_r, 8888);
printf("# debugging, random seed %d\n", 8888);
#endif
posterior_sample = malloc(num_ps * dnest_size_of_modeltype);
posterior_sample_info = malloc(num_ps * sizeof(double));
posterior_sample_idx = malloc(num_ps * sizeof(int)); // flag for which particle to save
max = logP_samples[0];
for(j=0; j<num_samples; j++)
max = fmax(logP_samples[j], max);
for(j=0; j<num_samples; j++)
logP_samples[j] -= max;
for(j=0; j<num_ps; j++)
{
while(true)
{
which = gsl_rng_uniform_int(dnest_post_gsl_r, num_samples);
if(log(gsl_rng_uniform(dnest_post_gsl_r)) < logP_samples[which])
{
posterior_sample_info[j] = logl[which];
posterior_sample_idx[j] = which; // sample this particle
break;
}
}
}
// read sample and pick out selected particles
fp_sample = fopen(options.sample_file, "r");
if(fp_sample == NULL)
{
fprintf(stderr, "# Error: Cannot open file %s.\n", options.sample_file);
exit(0);
}
fgets(buf, BUF_MAX_LENGTH, fp_sample);
for(i=0; i < num_samples; i++)
{
read_particle(fp_sample, (void *)psample);
for(j=0; j < num_ps; j++)
{
if(posterior_sample_idx[j] == i)
{
memcpy(posterior_sample+j*dnest_size_of_modeltype, (void *)psample, dnest_size_of_modeltype);
}
}
//printf("%f %f %f\n", sample[i].params[0], sample[i].params[1], sample[i].params[2]);
}
fclose(fp_sample);
//save posterior sample
fp = fopen(options.posterior_sample_file, "w");
if(fp == NULL)
{
fprintf(stderr, "# Error: Cannot open file %s.\n", options.posterior_sample_file);
exit(0);
}
fprintf(fp, "# %d\n", num_ps);
for(i=0; i<num_ps; i++)
{
print_particle(fp, posterior_sample + i*dnest_size_of_modeltype, dnest_arg);
}
fclose(fp);
//save posterior sample information
fp = fopen(options.posterior_sample_info_file, "w");
if(fp == NULL)
{
fprintf(stderr, "# Error: Cannot open file %s.\n", options.posterior_sample_info_file);
exit(0);
}
fprintf(fp, "# %d\n", num_ps);
for(i=0; i<num_ps; i++)
{
fprintf(fp, "%e\n", posterior_sample_info[i]);
}
fclose(fp);
for(i=0; i<num_levels; i++)
free(levels_orig[i]);
free(levels_orig);
for(i=0; i<num_samples; i++)
free(sample_info[i]);
free(sample_info);
free(logl);
free(logx_samples);
free(logx_samples_thisLevel);
free(logp_samples);
free(logP_samples);
free(logl_samples_thisLevel);
free(sandwhich);
free(psample);
free(posterior_sample);
free(posterior_sample_info);
free(posterior_sample_idx);
gsl_rng_free(dnest_post_gsl_r);
printf("# Ends dnest postprocess.\n");
}
int cmp_sample(const void *pa, const void *pb)
{
SampleType *a = (SampleType *)pa;
SampleType *b = (SampleType *)pb;
// in acesending order
if(a->logl > b->logl)
return true;
if( a->logl == b->logl && a->tiebreaker > b->tiebreaker)
return true;
return false;
}
|
function plotbernstein(B,a,b)
%PLOTBERNSTEIN Plots Berstein points with connecting lines
%
%For univariate Bernstein points,
%
% plotbernstein(B)
%
%plots Bernstein points in default interval [0,1]. Correspondingly,
%
% plotbernstein(B,a,b)
%
%plots Bernstein points in the interval [a,b].
%
%For given polynomial P and an interval [a,b], a typical call for plotting P together with
%its Bernstein points is
%
% B = bernsteincoeff(ptrans(P,a,b,0,1));
% plotpoly(P,a,b), hold on, plotbernstein(B,a,b), hold off
%
% written 12/25/02 S.M. Rump
% modified 04/04/04 S.M. Rump set round to nearest for safety
% modified 04/06/05 S.M. Rump rounding unchanged
% modified 11/20/05 S.M. Rump fast check for rounding to nearest
%
e = 1e-30;
if 1+e==1-e % fast check for rounding to nearest
rndold = 0;
else
rndold = getround;
setround(0)
end
if ~isreal(B.c)
error('polynomial must be real (point or interval)')
end
if size(B.e,2)==1 % univariate polynomial
if nargin==1
a = 0;
b = 1;
end
n = length(B.c);
X = linspace(a,b,n);
if isa(B.c,'intval')
Bc = fliplr([B.c.inf B.c.sup]);
XX = [X X];
if n>1
index = convhull(XX,Bc);
else
index = 1:2;
end
plot(XX,Bc,'o',XX(index),Bc(index),'-o')
else
Bc = fliplr(B.c);
if n>2
index = convhull(X,Bc);
else
index = 1:length(Bc);
end
plot(X,Bc,'o',X(index),Bc(index),'-o')
end
elseif size(B.e,2)==2 % multivariate polynomial in two unknowns
error('not yet implemented')
else
error('Bernstein plot only for polynomials in one or two unknowns')
end
setround(rndold)
|
/**
* @file bblas_ztesting.c
*
* @brief BBLAS testing for double _Complex routines.
*
* BBLAS is a software package provided by Univ. of Manchester,
* Univ. of Tennessee.
*
* @version 1.0.0
* @author Samuel D. Relton
* @author Pedro V. Lara
* @author Mawussi Zounon
* @date 2016-02-20
*
*/
#ifndef DOXYGEN_SHOULD_SKIP_THIS
/**
* Code generation
* @precisions normal z -> c d s
**/
#endif
#include "bblas_common.h"
#if defined(BBLAS_WITH_MKL)
#include <mkl_lapacke.h>
#else
#include <lapacke.h>
#endif
#include <cblas.h>
#define COMPLEX
/**
* Initialize test parameters to their default values.
**/
void bblas_zinit_config (bblas_ztest_t *test)
{
test->gen_uplo =1;
test->gen_transA =1;
test->gen_transB =1;
test->gen_trans =1;
test->gen_side =1;
test->gen_diag =1;
test->minM = 0;
test->minN = 0;
test->minK = 0;
test->maxM = 0;
test->maxN = 0;
test->maxK = 0;
test->minbatch_count = 1;
test->maxbatch_count = 1;
test->batch_opts = 0;
test->routine = 1;
test->nb_test = 1;
test->set_error = 0;
test->global_error = 0;
test->faulty_iter = 0;
test->mkl_sequential = 0;
test->new_accuracy = 1;
}
/**
* Set the values of all the BBLAS parameters inside the test structure.
**/
void bblas_zsettest(bblas_ztest_t *test)
{
enum BBLAS_ROUTINE routine = test->routine;
/*
* Set the value of batch count
*/
bblas_zset_batch_count(test);
/*
* Allocate memory and set values for uplo
*/
if ((routine == BBLAS_SYMM) || (routine == BBLAS_HEMM) ||
(routine == BBLAS_SYRK) || (routine == BBLAS_HERK) ||
(routine == BBLAS_SYR2K)|| (routine == BBLAS_HER2K)||
(routine == BBLAS_TRMM) || (routine == BBLAS_TRSM))
{
bblas_zsetuplo(test);
}
/*
* Allocate memory and set values for transA
*/
if ((routine == BBLAS_GEMM) || (routine == BBLAS_TRMM) ||
(routine == BBLAS_TRSM))
{
bblas_zsettransA(test);
}
/*
* Allocate memory and set values for transB
*/
if (routine == BBLAS_GEMM)
{
bblas_zsettransB(test);
}
/*
* Allocate memory and set values for trans
*/
if ((routine == BBLAS_SYRK) || (routine == BBLAS_HERK) ||
(routine == BBLAS_SYR2K)|| (routine == BBLAS_HER2K))
{
bblas_zsettrans(test);
}
/*
* Allocate memory and set values for side
*/
if ((routine == BBLAS_SYMM) || (routine == BBLAS_HEMM) ||
(routine == BBLAS_TRMM) || (routine == BBLAS_TRSM))
{
bblas_zsetside(test);
}
/*
* Allocate memory and set values for diag
*/
if ((routine == BBLAS_TRMM) || (routine == BBLAS_TRSM))
{
bblas_zsetdiag(test);
}
/*
* Allocate memory and set values for M
*/
if ((routine == BBLAS_GEMM) || (routine == BBLAS_SYMM) ||
(routine == BBLAS_HEMM) || (routine == BBLAS_TRMM) ||
(routine == BBLAS_TRSM))
{
bblas_zsetM(test);
}
/*
* Allocate memory and set values for N, all routines
*/
bblas_zsetN(test);
/*
* Allocate memory and set values for K
*/
if ((routine == BBLAS_GEMM) || (routine == BBLAS_SYRK) ||
(routine == BBLAS_HERK) || (routine == BBLAS_SYR2K) ||
(routine == BBLAS_HER2K))
{
bblas_zsetK(test);
}
/*
* Allocate memory and set values for lda, all routines
*/
bblas_zsetlda(test);
/*
* Allocate memory and set values for ldb, all routines
*/
if ((routine == BBLAS_GEMM) || (routine == BBLAS_SYMM) ||
(routine == BBLAS_HEMM) || (routine == BBLAS_SYR2K) ||
(routine == BBLAS_HER2K) || (routine == BBLAS_TRMM) ||
(routine == BBLAS_TRSM))
{
bblas_zsetldb(test);
}
/*
* Allocate memory and set values for ldc, all routines
*/
if ((routine == BBLAS_GEMM) || (routine == BBLAS_SYMM) ||
(routine == BBLAS_HEMM) || (routine == BBLAS_SYRK) ||
(routine == BBLAS_HERK) || (routine == BBLAS_SYR2K)||
(routine == BBLAS_HER2K))
{
bblas_zsetldc(test);
}
/*
* Allocate memory and set values for alpha, all routines
*/
if ((routine == BBLAS_GEMM) || (routine == BBLAS_SYMM) ||
(routine == BBLAS_HEMM) || (routine == BBLAS_SYRK) ||
(routine == BBLAS_SYR2K)|| (routine == BBLAS_HER2K)||
(routine == BBLAS_TRMM) || (routine == BBLAS_TRSM))
{
bblas_zsetalpha(test);
}
/*
* Allocate memory and set values for alpha
*/
if (routine == BBLAS_HERK)
{
bblas_zsetalpha_herk(test);
}
/*
* Allocate memory and set values for beta
*/
if ((routine == BBLAS_GEMM) || (routine == BBLAS_SYMM) ||
(routine == BBLAS_HEMM) || (routine == BBLAS_SYRK) ||
(routine == BBLAS_SYR2K))
{
bblas_zsetbeta(test);
}
/*
* Allocate memory and set values for beta_herk
*/
if ((routine == BBLAS_HERK) || (routine == BBLAS_HER2K))
{
bblas_zsetbeta_herk(test);
}
/*
* Allocate memory and set values for arrayA
*/
bblas_zsetarrayA(test);
/*
* Allocate memory and set values for arrayB
*/
if ((routine == BBLAS_GEMM) || (routine == BBLAS_SYMM) ||
(routine == BBLAS_HEMM) || (routine == BBLAS_SYR2K) ||
(routine == BBLAS_HER2K) || (routine == BBLAS_TRMM) ||
(routine == BBLAS_TRSM))
{
bblas_zsetarrayB(test);
}
/*
* Allocate memory and set values for arrayC
*/
if ((routine == BBLAS_GEMM) || (routine == BBLAS_SYMM) ||
(routine == BBLAS_HEMM) || (routine == BBLAS_SYRK) ||
(routine == BBLAS_HERK) || (routine == BBLAS_SYR2K)||
(routine == BBLAS_HER2K))
{
bblas_zsetarrayC(test);
}
/* Memory allocation for result and error variables */
bblas_zmalloc_result_error(test);
}
/**
* Allocate memory and set the values of uplo
**/
void bblas_zsetuplo(bblas_ztest_t *test)
{
int batch_iter;
int nb_data = bblas_znbdata(test);
char ptr_name[NAME_LENGTH] = "uplo";
int random_number;
/*Initialize random number generation */
srand ( time(NULL) ) ;
/*Memory allocation */
test->uplo = (enum BBLAS_UPLO*) malloc(nb_data*sizeof(enum BBLAS_UPLO));
/*Malloc checking */
bblas_malloc_check(test->uplo, ptr_name);
/*set UPLO values */
switch (test->gen_uplo)
{
case UPLO_LOWER:
for( batch_iter =0; batch_iter < nb_data; batch_iter++)
{
test->uplo[batch_iter] = BblasLower;
}
break;
case UPLO_UPPER:
for( batch_iter =0; batch_iter < nb_data; batch_iter++)
{
test->uplo[batch_iter] = BblasUpper;
}
break;
default:
for( batch_iter =0; batch_iter < nb_data; batch_iter++)
{
/*Generate a random number */
random_number = rand() % 100;
if (random_number < 50 )
{
test->uplo[batch_iter] = BblasLower;
}else
{
test->uplo[batch_iter] = BblasUpper;
}
break;
}
}
}
/**
* Allocate memory and set the values of transA
**/
void bblas_zsettransA(bblas_ztest_t *test)
{
int batch_iter;
int nb_data = bblas_znbdata(test);
char ptr_name[NAME_LENGTH] = "transA";
int random_number;
/*Initialize random number generation */
srand ( time(NULL) ) ;
/*Memory allocation */
test->transA = (enum BBLAS_TRANS*) malloc(nb_data*sizeof(enum BBLAS_TRANS));
/*Malloc checking */
bblas_malloc_check(test->transA, ptr_name);
/*set transA */
switch (test->gen_transA)
{
case NO_TRANS:
for( batch_iter =0; batch_iter < nb_data ; batch_iter++)
{
test->transA[batch_iter] = BblasNoTrans;
}
break;
case TRANS:
for( batch_iter =0; batch_iter < nb_data ; batch_iter++)
{
test->transA[batch_iter] = BblasTrans;
}
break;
case CONJ:
for( batch_iter =0; batch_iter < nb_data ; batch_iter++)
{
test->transA[batch_iter] = BblasConjTrans;
}
break;
default:
for( batch_iter =0; batch_iter < nb_data ; batch_iter++)
{
/*Generate a random number */
random_number = rand() % 100;
if (random_number < 50)
{
test->transA[batch_iter] = BblasNoTrans;
}else if (random_number < 80)
{
test->transA[batch_iter] = BblasTrans;
}else
{
test->transA[batch_iter] = BblasConjTrans;
}
}
break;
}
}
/**
* Allocate memory and set the values of transB
**/
void bblas_zsettransB(bblas_ztest_t *test)
{
int batch_iter;
int nb_data = bblas_znbdata(test);
char ptr_name[NAME_LENGTH] = "transB";
int random_number;
/*Initialize random number generation */
srand ( time(NULL) ) ;
/*Memory allocation for transB */
test->transB = (enum BBLAS_TRANS*) malloc(nb_data*sizeof(enum BBLAS_TRANS));
/*checking memory allocation */
bblas_malloc_check(test->transB, ptr_name);
/*set transB */
switch (test->gen_transB)
{
case NO_TRANS:
for( batch_iter =0; batch_iter < nb_data ; batch_iter++)
{
test->transB[batch_iter] = BblasNoTrans;
}
break;
case TRANS:
for( batch_iter =0; batch_iter < nb_data ; batch_iter++)
{
test->transB[batch_iter] = BblasTrans;
}
break;
case CONJ:
for( batch_iter =0; batch_iter < nb_data ; batch_iter++)
{
test->transB[batch_iter] = BblasConjTrans;
}
break;
default:
for( batch_iter =0; batch_iter < nb_data ; batch_iter++)
{
/*Generate a random number */
random_number = rand() % 100;
if (random_number < 50)
{
test->transB[batch_iter] = BblasNoTrans;
}else if (random_number < 80)
{
test->transB[batch_iter] = BblasTrans;
}else
{
test->transB[batch_iter] = BblasConjTrans;
}
}
break;
}
}
/**
* Allocate memory and set the values of trans
**/
void bblas_zsettrans(bblas_ztest_t *test)
{
int batch_iter;
int nb_data = bblas_znbdata(test);
char ptr_name[NAME_LENGTH] = "trans";
int random_number;
/*Initialize random number generation */
srand ( time(NULL) ) ;
/*Memory allocation for trans */
test->trans = (enum BBLAS_TRANS*) malloc(nb_data*sizeof(enum BBLAS_TRANS));
/*checking memory allocation */
bblas_malloc_check(test->trans, ptr_name);
/*Set the values of trans */
switch (test->gen_trans)
{
case NO_TRANS:
for( batch_iter =0; batch_iter < nb_data ; batch_iter++)
{
test->trans[batch_iter] = BblasNoTrans;
}
break;
case TRANS:
for( batch_iter =0; batch_iter < nb_data ; batch_iter++)
{
test->trans[batch_iter] = BblasTrans;
}
break;
case CONJ:
for( batch_iter =0; batch_iter < nb_data ; batch_iter++)
{
test->trans[batch_iter] = BblasConjTrans;
}
break;
default:
for( batch_iter =0; batch_iter < nb_data ; batch_iter++)
{
/*Generate a random number */
random_number = rand() % 100;
if (random_number < 50)
{
test->trans[batch_iter] = BblasNoTrans;
}else if (random_number < 80)
{
test->trans[batch_iter] = BblasTrans;
}else
{
test->trans[batch_iter] = BblasConjTrans;
}
}
break;
}
}
/**
* Allocate memory and set the values of side
**/
void bblas_zsetside(bblas_ztest_t *test)
{
int batch_iter;
int nb_data = bblas_znbdata(test);
char ptr_name[NAME_LENGTH] = "side";
int random_number;
/*Initialize random number generation */
srand ( time(NULL) ) ;
/*Memory allocation for side */
test->side = (enum BBLAS_SIDE*) malloc(nb_data*sizeof(enum BBLAS_SIDE));
/*checking memory allocation */
bblas_malloc_check(test->side, ptr_name);
/*Set the values of side */
switch (test->gen_side)
{
case SIDE_LEFT:
for( batch_iter =0; batch_iter < nb_data; batch_iter++)
{
test->side[batch_iter] = BblasLeft;
}
break;
case SIDE_RIGHT:
for( batch_iter =0; batch_iter < nb_data; batch_iter++)
{
test->side[batch_iter] = BblasRight;
}
break;
default:
for( batch_iter =0; batch_iter < nb_data; batch_iter++)
{
/*Generate a random number */
random_number = rand() % 100;
if (random_number < 50 )
{
test->side[batch_iter] = BblasLeft;
}else
{
test->side[batch_iter] = BblasRight;
}
break;
}
}
}
/**
* Allocate memory and set the values of diag
**/
void bblas_zsetdiag(bblas_ztest_t *test)
{
int batch_iter;
int nb_data = bblas_znbdata(test);
char ptr_name[NAME_LENGTH] = "diag";
int random_number;
/*Initialize random number generation */
srand ( time(NULL) ) ;
/*Memory allocation for diag */
test->diag = (enum BBLAS_DIAG*) malloc(nb_data*sizeof(enum BBLAS_DIAG));
/*checking memory allocation */
bblas_malloc_check(test->diag, ptr_name);
/*Set the values of diag */
switch (test->gen_diag)
{
case DIAG_NO_U:
for( batch_iter =0; batch_iter < nb_data; batch_iter++)
{
test->diag[batch_iter] = BblasNonUnit;
}
break;
case DIAG_U:
for( batch_iter =0; batch_iter < nb_data; batch_iter++)
{
test->diag[batch_iter] = BblasUnit;
}
break;
default:
for( batch_iter =0; batch_iter < nb_data; batch_iter++)
{
/*Generate a random number */
random_number = rand() % 100;
if (random_number < 50 )
{
test->diag[batch_iter] = BblasNonUnit;
}else
{
test->diag[batch_iter] = BblasUnit;
}
break;
}
}
}
/**
* Allocate memory and set the values of M
**/
void bblas_zsetM(bblas_ztest_t *test)
{
int batch_iter;
int nb_data = bblas_znbdata(test);
char ptr_name[NAME_LENGTH] = "M";
/*Memory allocation for M */
test->M = (int*) malloc(nb_data*sizeof(int));
/*checking memory allocation */
bblas_malloc_check(test->M, ptr_name);
/*Set the values of M */
for( batch_iter =0; batch_iter < nb_data ; batch_iter++)
{
test->M[batch_iter] = irandRange(test->minM, test->maxM);
}
}
/**
* Allocate memory and set the values of N
**/
void bblas_zsetN(bblas_ztest_t *test)
{
int batch_iter;
int nb_data = bblas_znbdata(test);
char ptr_name[NAME_LENGTH] = "N";
/*Memory allocation for N */
test->N = (int*) malloc(nb_data*sizeof(int));
/*checking memory allocation */
bblas_malloc_check(test->N, ptr_name);
/*Set the values of N */
for( batch_iter =0; batch_iter < nb_data ; batch_iter++)
{
test->N[batch_iter] = irandRange(test->minN, test->maxN);
}
}
/**
* Allocate memory and set the values of K
**/
void bblas_zsetK(bblas_ztest_t *test)
{
int batch_iter;
int nb_data = bblas_znbdata(test);
char ptr_name[NAME_LENGTH] = "K";
/*Memory allocation for K */
test->K = (int*) malloc(nb_data*sizeof(int));
/*checking memory allocation */
bblas_malloc_check(test->K, ptr_name);
/*Set the values of K */
for( batch_iter =0; batch_iter < nb_data ; batch_iter++)
{
test->K[batch_iter] = irandRange(test->minK, test->maxK);
}
}
/**
* Allocate memory and set the values of lda
**/
void bblas_zsetlda(bblas_ztest_t *test)
{
int batch_iter;
int nb_data = bblas_znbdata(test);
int routine = test->routine;
char ptr_name[NAME_LENGTH] = "lda";
/*Memory allocation for lda */
test->lda = (int*) malloc(nb_data*sizeof(int));
/*checking memory allocation */
bblas_malloc_check(test->lda, ptr_name);
/*LDA for GEMM */
if (routine == BBLAS_GEMM)
{
for( batch_iter =0; batch_iter < nb_data ; batch_iter++)
{
if (test->transA[batch_iter] == BblasNoTrans)
{
test->lda[batch_iter] = test->M[batch_iter];
}else
{
test->lda[batch_iter] =test->K[batch_iter] ;
}
}
}
/*LDA for SYMM, HEMM, TRMM AND TRSM */
if ((routine == BBLAS_SYMM) || (routine == BBLAS_HEMM) ||
(routine == BBLAS_TRMM) || (routine == BBLAS_TRSM))
{
for( batch_iter =0; batch_iter < nb_data ; batch_iter++)
{
if (test->side[batch_iter] == BblasLeft)
{
test->lda[batch_iter] = test->M[batch_iter];
}else
{
test->lda[batch_iter] =test->N[batch_iter] ;
}
}
}
/*LDA for SYRK, HERK, SYR2K, HER2K */
if ((routine == BBLAS_SYRK) || (routine == BBLAS_HERK) ||
(routine == BBLAS_SYR2K)|| (routine == BBLAS_HER2K))
{
for( batch_iter =0; batch_iter < nb_data ; batch_iter++)
{
if (test->trans[batch_iter] == BblasNoTrans)
{
test->lda[batch_iter] = test->N[batch_iter];
}else
{
test->lda[batch_iter] =test->K[batch_iter] ;
}
}
}
}
/**
* Allocate memory and set the values of ldb
**/
void bblas_zsetldb(bblas_ztest_t *test)
{
int batch_iter;
int routine = test->routine;
int nb_data = bblas_znbdata(test);
char ptr_name[NAME_LENGTH] = "ldb";
/*Memory allocation for ldb */
test->ldb = (int*) malloc(nb_data*sizeof(int));
/*checking memory allocation */
bblas_malloc_check(test->ldb, ptr_name);
/*LDB for GEMM */
if ((routine == BBLAS_GEMM) )
{
for( batch_iter =0; batch_iter < nb_data; batch_iter++)
{
if (test->transB[batch_iter] == BblasNoTrans)
{
test->ldb[batch_iter] = test->K[batch_iter];
}else
{
test->ldb[batch_iter] =test->N[batch_iter] ;
}
}
}
/*LDB SYMM, HEMM, TRMM AND TRSM */
if ((routine == BBLAS_SYMM) || (routine == BBLAS_HEMM) ||
(routine == BBLAS_TRMM) || (routine == BBLAS_TRSM))
{
for( batch_iter =0; batch_iter < nb_data; batch_iter++)
{
test->ldb[batch_iter] = test->M[batch_iter];
}
}
/*LDB for SYR2K, HER2K */
if ((routine == BBLAS_SYR2K)|| (routine == BBLAS_HER2K))
{
for( batch_iter =0; batch_iter < nb_data; batch_iter++)
{
if (test->trans[batch_iter] == BblasNoTrans)
{
test->ldb[batch_iter] = test->N[batch_iter];
}else
{
test->ldb[batch_iter] =test->K[batch_iter] ;
}
}
}
}
/**
* Allocate memory and set the values of ldc
**/
void bblas_zsetldc(bblas_ztest_t *test)
{
int batch_iter;
int routine = test->routine;
int nb_data = bblas_znbdata(test);
char ptr_name[NAME_LENGTH] = "ldc";
/*Memory allocation for ldc */
test->ldc = (int*) malloc(nb_data*sizeof(int));
/*checking memory allocation */
bblas_malloc_check(test->ldc, ptr_name);
/*LDC for GEMM */
if (routine == BBLAS_GEMM)
{
for( batch_iter =0; batch_iter < nb_data ; batch_iter++)
{
test->ldc[batch_iter] = test->M[batch_iter];
}
}
/*LDC for SYMM, HEMM */
if ((routine == BBLAS_SYMM) || (routine == BBLAS_HEMM))
{
for( batch_iter =0; batch_iter < nb_data ; batch_iter++)
{
test->ldc[batch_iter] = test->M[batch_iter];
}
}
/*LDC for SYRK, HERK, SYR2K, HER2K */
if ((routine == BBLAS_SYRK) || (routine == BBLAS_HERK) ||
(routine == BBLAS_SYR2K)|| (routine == BBLAS_HER2K))
{
for( batch_iter =0; batch_iter < nb_data ; batch_iter++)
{
test->ldc[batch_iter] = test->N[batch_iter];
}
}
}
/**
* Allocate memory and set the values of alpha
**/
void bblas_zsetalpha(bblas_ztest_t *test)
{
int batch_iter;
int batch_count = test->batch_count;
char ptr_name[NAME_LENGTH] = "alpha";
/*Memory allocation for alpha */
test->alpha = (BBLAS_Complex64_t*) malloc(batch_count*sizeof(BBLAS_Complex64_t));
/*checking memory allocation */
bblas_malloc_check(test->alpha, ptr_name);
/* Set value of alpha */
for( batch_iter =0; batch_iter < batch_count; batch_iter++)
{
test->alpha[batch_iter] = ((BBLAS_Complex64_t)rand()/(BBLAS_Complex64_t)RAND_MAX);
}
}
/**
* Allocate memory and set the values of alpha specifically for herk.
**/
void bblas_zsetalpha_herk(bblas_ztest_t *test)
{
int batch_iter;
int batch_count = test->batch_count;
char ptr_name[NAME_LENGTH] = "alpha_herk";
/*Memory allocation for alpha_herk */
test->alpha_herk = (double*) malloc(batch_count*sizeof(double));
/*checking memory allocation */
bblas_malloc_check(test->alpha_herk, ptr_name);
/* Set value of alpha_herk */
for( batch_iter =0; batch_iter < batch_count; batch_iter++)
{
test->alpha_herk[batch_iter] = ((double)rand()/(double)RAND_MAX);
}
}
/**
* Allocate memory and set the values of beta.
**/
void bblas_zsetbeta(bblas_ztest_t *test)
{
int batch_iter;
int batch_count = test->batch_count;
char ptr_name[NAME_LENGTH] = "beta";
/*Memory allocation for beta */
test->beta = (BBLAS_Complex64_t*) malloc(batch_count*sizeof(BBLAS_Complex64_t));
/*checking memory allocation */
bblas_malloc_check(test->beta, ptr_name);
/* Set value of beta */
for( batch_iter =0; batch_iter < batch_count; batch_iter++)
{
test->beta[batch_iter] = ((BBLAS_Complex64_t)rand()/(BBLAS_Complex64_t)RAND_MAX);
}
}
/**
* Allocate memory and set the values of beta specifically for herk.
**/
void bblas_zsetbeta_herk(bblas_ztest_t *test)
{
int batch_iter;
int batch_count = test->batch_count;
char ptr_name[NAME_LENGTH] = "beta_herk";
/*Memory allocation for beta_herk */
test->beta_herk = (double*) malloc(batch_count*sizeof(double));
/*checking memory allocation */
bblas_malloc_check(test->beta_herk, ptr_name);
/* Set value of beta_herk */
for( batch_iter =0; batch_iter < batch_count; batch_iter++)
{
test->beta_herk[batch_iter] = ((double)rand()/(double)RAND_MAX);
}
}
/**
* Allocate memory and set the values of arrayA.
**/
void bblas_zsetarrayA(bblas_ztest_t *test)
{
int batch_iter, nb_row, nb_col;
int first_index = 0;
int routine = test->routine;
int batch_count = test->batch_count;
char ptr_name[NAME_LENGTH] = "arrayA";
int IONE = 1;
int ISEED[4] ={0,0,0,1};
/*Memory allocation for **arrayA */
test->arrayA = (BBLAS_Complex64_t**) malloc(batch_count*sizeof(BBLAS_Complex64_t*));
bblas_malloc_check(test->arrayA, ptr_name);
if( test->batch_opts == BBLAS_VARIABLE )
{
for( batch_iter =0; batch_iter < batch_count; batch_iter++)
{
nb_row = test->lda[batch_iter];
/* nb_col for GEMM */
if (routine == BBLAS_GEMM)
{
if (test->transA[batch_iter] == BblasNoTrans)
{
nb_col =test->K[batch_iter];
}else
{
nb_col =test->M[batch_iter];
}
}
/* nb_col SYMM, HEMM, TRMM AND TRSM */
if ((routine == BBLAS_SYMM) || (routine == BBLAS_HEMM) ||
(routine == BBLAS_TRMM) || (routine == BBLAS_TRSM))
{
if(test->side[batch_iter] == BblasLeft )
{
nb_col = test->M[batch_iter];
}else
{
nb_col = test->N[batch_iter];
}
}
/* nb_col for SYRK, HERK, SYR2K, HER2K */
if ((routine == BBLAS_SYRK) || (routine == BBLAS_HERK) ||
(routine == BBLAS_SYR2K)|| (routine == BBLAS_HER2K))
{
if (test->trans[batch_iter] == BblasNoTrans)
{
nb_col = test->K[batch_iter];
}else
{
nb_col = test->N[batch_iter];
}
}
/*Matrix filling */
test->arrayA[batch_iter] = (BBLAS_Complex64_t *) malloc(nb_row*nb_col* sizeof(BBLAS_Complex64_t ));
bblas_malloc_check(test->arrayA[batch_iter], ptr_name);
#if defined(BBLAS_WITH_MKL)
LAPACKE_zlarnv_work(IONE, ISEED, nb_row*nb_col, (MKL_Complex16*) test->arrayA[batch_iter]);
#else
LAPACKE_zlarnv_work(IONE, ISEED, nb_row*nb_col, test->arrayA[batch_iter]);
#endif
if( (routine == BBLAS_TRMM) || (routine == BBLAS_TRSM))
{
for(int i=0; i<max(nb_row,nb_col); i++)
{
test->arrayA[batch_iter][nb_col*i+i] = test->arrayA[batch_iter][nb_col*i+i] + 1.0;
}
}
if(routine == BBLAS_HEMM )
{
for(int i=0; i< nb_row; i++)
{
test->arrayA[batch_iter][nb_col*i+i] = creal(test->arrayA[batch_iter][nb_col*i+i]);
}
}
}
}else if( test->batch_opts == BBLAS_FIXED )
{
nb_row = test->lda[first_index];
/* nb_col for GEMM */
if (routine == BBLAS_GEMM)
{
if (test->transA[first_index] == BblasNoTrans)
{
nb_col =test->K[first_index];
}else
{
nb_col =test->M[first_index];
}
}
/* nb_col SYMM, HEMM, TRMM AND TRSM */
if ((routine == BBLAS_SYMM) || (routine == BBLAS_HEMM) ||
(routine == BBLAS_TRMM) || (routine == BBLAS_TRSM))
{
if(test->side[first_index] == BblasLeft )
{
nb_col = test->M[first_index];
}else
{
nb_col = test->N[first_index];
}
}
/* nb_col for SYRK, HERK, SYR2K, HER2K */
if ((routine == BBLAS_SYRK) || (routine == BBLAS_HERK) ||
(routine == BBLAS_SYR2K)|| (routine == BBLAS_HER2K))
{
if (test->trans[first_index] == BblasNoTrans)
{
nb_col = test->K[first_index];
}else
{
nb_col = test->N[first_index];
}
}
/*Matrix filling */
for( batch_iter =0; batch_iter < batch_count; batch_iter++)
{
test->arrayA[batch_iter] = (BBLAS_Complex64_t *) malloc(nb_row*nb_col* sizeof(BBLAS_Complex64_t ));
bblas_malloc_check(test->arrayA[batch_iter], ptr_name);
#if defined(BBLAS_WITH_MKL)
LAPACKE_zlarnv_work(IONE, ISEED, nb_row*nb_col, (MKL_Complex16*) test->arrayA[batch_iter]);
#else
LAPACKE_zlarnv_work(IONE, ISEED, nb_row*nb_col, test->arrayA[batch_iter]);
#endif
if( (routine == BBLAS_TRMM) || (routine == BBLAS_TRSM))
{
for(int i=0; i<max(nb_row,nb_col); i++)
{
test->arrayA[batch_iter][nb_col*i+i] = test->arrayA[batch_iter][nb_col*i+i] + 1.0;
}
}
if(routine == BBLAS_HEMM )
{
for(int i=0; i<max(nb_row,nb_col); i++)
{
test->arrayA[batch_iter][nb_col*i+i] = creal(test->arrayA[batch_iter][nb_col*i+i]);
}
}
}
}else
{
bblas_error("bblas_ztesting.c", "wrong batch_opts value");
}
}
/**
* Allocate memory and set the values of arrayB.
**/
void bblas_zsetarrayB(bblas_ztest_t *test)
{
int batch_iter, nb_row, nb_col;
int first_index = 0;
int routine = test->routine;
int batch_count = test->batch_count;
char ptr_name[NAME_LENGTH] = "arrayB";
int max_work_size = max(test->maxK, max(test->maxM, test->maxN));
int IONE = 1;
int ISEED[4] ={0,0,0,1};
/*Memory allocation for arrayB */
test->arrayB = (BBLAS_Complex64_t**) malloc(batch_count*sizeof(BBLAS_Complex64_t*));
bblas_malloc_check(test->arrayB, ptr_name);
test->Binitnorm = (double *)malloc(batch_count*sizeof(double));
bblas_malloc_check(test->Binitnorm, "Binitnorm");
double *work = (double *)malloc(max_work_size*sizeof(double));
bblas_malloc_check(work, "work");
if( test->batch_opts == BBLAS_VARIABLE )
{
for( batch_iter =0; batch_iter < batch_count ; batch_iter++)
{
nb_row =test->ldb[batch_iter];
/* nb_col for GEMM */
if (routine == BBLAS_GEMM)
{
if (test->transB[batch_iter] == BblasNoTrans)
{
nb_col = test->N[batch_iter];
}else
{
nb_col = test->K[batch_iter];
}
}
/* nb_col for SYMM, HEMM, TRMM AND TRSM */
if ((routine == BBLAS_SYMM) || (routine == BBLAS_HEMM) ||
(routine == BBLAS_TRMM) || (routine == BBLAS_TRSM))
{
nb_col =test->N[batch_iter];
}
/* nb_col for SYR2K, HER2K */
if ((routine == BBLAS_SYR2K)|| (routine == BBLAS_HER2K))
{
if (test->trans[batch_iter] == BblasNoTrans)
{
nb_col = test->K[batch_iter];
}else
{
nb_col = test->N[batch_iter];
}
}
test->arrayB[batch_iter] = (BBLAS_Complex64_t *) malloc(nb_row*nb_col* sizeof(BBLAS_Complex64_t ));
bblas_malloc_check(test->arrayB[batch_iter], ptr_name);
#if defined(BBLAS_WITH_MKL)
LAPACKE_zlarnv_work(IONE, ISEED, nb_row*nb_col, (MKL_Complex16*) test->arrayB[batch_iter]);
#else
LAPACKE_zlarnv_work(IONE, ISEED, nb_row*nb_col, test->arrayB[batch_iter]);
#endif
/*Compute the infinity norm of B */
if( (routine == BBLAS_TRMM) || (routine == BBLAS_TRSM))
{
#if defined(BBLAS_WITH_MKL)
test->Binitnorm[batch_iter] = (BBLAS_Complex64_t) LAPACKE_zlange_work(LAPACK_COL_MAJOR,
'I', nb_row, nb_col,
(MKL_Complex16*) test->arrayB[batch_iter],
test->ldb[batch_iter], work);
#else
test->Binitnorm[batch_iter] = LAPACKE_zlange_work(LAPACK_COL_MAJOR,
'I', nb_row, nb_col,
test->arrayB[batch_iter],
test->ldb[batch_iter], work);
#endif
}
}
}else if( test->batch_opts == BBLAS_FIXED )
{
nb_row =test->ldb[first_index];
/* nb_col for GEMM */
if (routine == BBLAS_GEMM)
{
if (test->transB[first_index] == BblasNoTrans)
{
nb_col = test->N[first_index];
}else
{
nb_col = test->K[first_index];
}
}
/* nb_col for SYMM, HEMM, TRMM AND TRSM */
if ((routine == BBLAS_SYMM) || (routine == BBLAS_HEMM) ||
(routine == BBLAS_TRMM) || (routine == BBLAS_TRSM))
{
nb_col =test->N[first_index];
}
/* nb_col for SYR2K, HER2K */
if ((routine == BBLAS_SYR2K)|| (routine == BBLAS_HER2K))
{
if (test->trans[first_index] == BblasNoTrans)
{
nb_col = test->K[first_index];
}else
{
nb_col = test->N[first_index];
}
}
/*Matrix filling */
for( batch_iter =0; batch_iter < batch_count; batch_iter++)
{
test->arrayB[batch_iter] = (BBLAS_Complex64_t *) malloc(nb_row*nb_col* sizeof(BBLAS_Complex64_t ));
#if defined(BBLAS_WITH_MKL)
LAPACKE_zlarnv_work(IONE, ISEED, nb_row*nb_col, (MKL_Complex16*) test->arrayB[batch_iter]);
#else
LAPACKE_zlarnv_work(IONE, ISEED, nb_row*nb_col, test->arrayB[batch_iter]);
#endif
/*Compute the infinity norm of B */
if( (routine == BBLAS_TRMM) || (routine == BBLAS_TRSM))
{
#if defined(BBLAS_WITH_MKL)
test->Binitnorm[batch_iter] = (BBLAS_Complex64_t) LAPACKE_zlange_work(LAPACK_COL_MAJOR,
'I', nb_row, nb_col,
(MKL_Complex16*) test->arrayB[batch_iter],
test->ldb[first_index], work);
#else
test->Binitnorm[batch_iter] = LAPACKE_zlange_work(LAPACK_COL_MAJOR,
'I', nb_row, nb_col,
test->arrayB[batch_iter],
test->ldb[first_index], work);
#endif
}
}
}else
{
bblas_error("bblas_ztesting.c", "wrong batch_opts value");
}
/*Free work */
free(work);
}
/**
* Allocate memory and set the values of arrayC.
**/
void bblas_zsetarrayC(bblas_ztest_t *test)
{
int batch_iter, nb_row, nb_col;
int first_index = 0;
int batch_count = test->batch_count;
int max_work_size = max(test->maxK, max(test->maxM, test->maxN));
char ptr_name[NAME_LENGTH] = "arrayC";
int IONE = 1;
int ISEED[4] ={0,0,0,1};
test->arrayC = (BBLAS_Complex64_t**) malloc(batch_count*sizeof(BBLAS_Complex64_t*));
bblas_malloc_check(test->arrayC, ptr_name);
test->Cinitnorm = (double *)malloc(batch_count*sizeof(double));
bblas_malloc_check(test->Cinitnorm, "Cinitnorm");
double *work = (double *)malloc(max_work_size*sizeof(double));
bblas_malloc_check(work, "work");
if( test->batch_opts == BBLAS_VARIABLE )
{
for( batch_iter =0; batch_iter < batch_count ; batch_iter++)
{
nb_row =test->ldc[batch_iter];
nb_col =test->N[batch_iter];
test->arrayC[batch_iter] = (BBLAS_Complex64_t *) malloc(nb_row*nb_col* sizeof(BBLAS_Complex64_t ));
bblas_malloc_check(test->arrayC[batch_iter], ptr_name);
#if defined(BBLAS_WITH_MKL)
LAPACKE_zlarnv_work(IONE, ISEED, nb_row*nb_col, (MKL_Complex16*) test->arrayC[batch_iter]);
#else
LAPACKE_zlarnv_work(IONE, ISEED, nb_row*nb_col, test->arrayC[batch_iter]);
#endif
if( (test->routine == BBLAS_HERK) || (test->routine == BBLAS_HER2K))
{
for(int i=0; i<max(nb_row,nb_col); i++)
{
test->arrayC[batch_iter][nb_col*i+i] = creal(test->arrayC[batch_iter][nb_col*i+i]);
}
}
/*Compuptation of the norm of C */
#if defined(BBLAS_WITH_MKL)
test->Cinitnorm[batch_iter] = (BBLAS_Complex64_t) LAPACKE_zlange_work(LAPACK_COL_MAJOR,
'I', nb_row, nb_col, (MKL_Complex16*) test->arrayC[batch_iter],
test->ldc[batch_iter], work);
#else
test->Cinitnorm[batch_iter] = LAPACKE_zlange_work(LAPACK_COL_MAJOR,
'I', nb_row, nb_col, test->arrayC[batch_iter],
test->ldc[batch_iter], work);
#endif
}
}else if( test->batch_opts == BBLAS_FIXED )
{
nb_row =test->ldc[first_index];
nb_col =test->N[first_index];
for( batch_iter =0; batch_iter < batch_count ; batch_iter++)
{
test->arrayC[batch_iter] = (BBLAS_Complex64_t *) malloc(nb_row*nb_col* sizeof(BBLAS_Complex64_t ));
bblas_malloc_check(test->arrayC[batch_iter], ptr_name);
#if defined(BBLAS_WITH_MKL)
LAPACKE_zlarnv_work(IONE, ISEED, nb_row*nb_col, (MKL_Complex16*) test->arrayC[batch_iter]);
#else
LAPACKE_zlarnv_work(IONE, ISEED, nb_row*nb_col, test->arrayC[batch_iter]);
#endif
if( (test->routine == BBLAS_HERK) || (test->routine == BBLAS_HER2K))
{
for(int i=0; i<max(nb_row,nb_col); i++)
{
test->arrayC[batch_iter][nb_col*i+i] = creal(test->arrayC[batch_iter][nb_col*i+i]);
}
}
/*Compuptation of the norm of C */
#if defined(BBLAS_WITH_MKL)
test->Cinitnorm[batch_iter] = (BBLAS_Complex64_t) LAPACKE_zlange_work(LAPACK_COL_MAJOR,
'I', nb_row, nb_col,
(MKL_Complex16*) test->arrayC[batch_iter],
test->ldc[first_index], work);
#else
test->Cinitnorm[batch_iter] = LAPACKE_zlange_work(LAPACK_COL_MAJOR,
'I',nb_row, nb_col, test->arrayC[batch_iter],
test->ldc[first_index], work);
#endif
}
} else
{
bblas_error("bblas_ztesting.c", "wrong batch_opts value");
}
/*Free work */
free(work);
}
/**
* Allocate memory for error checking.
**/
void bblas_zmalloc_result_error(bblas_ztest_t *test)
{
int batch_count = test->batch_count;
/*Memory for error computation */
switch(test->target)
{
case BBLAS_MKL:
test->mkl_result = (BBLAS_Complex64_t**) malloc(batch_count*sizeof(BBLAS_Complex64_t*));
bblas_malloc_check(test->mkl_result, "mkl_result");
test->mkl_error = (double*) malloc(batch_count*sizeof(double));
bblas_malloc_check(test->mkl_error, "mkl_error");
break;
case BBLAS_CUBLAS:
case BBLAS_MAGMA:
test->device_result = (BBLAS_Complex64_t**) malloc(batch_count*sizeof(BBLAS_Complex64_t*));
bblas_malloc_check(test->device_result, "device_result");
test->device_error = (double*) malloc(batch_count*sizeof(double));
bblas_malloc_check(test->device_error, "device_error");
break;
case BBLAS_OTHER:
test->other_result = (BBLAS_Complex64_t**) malloc(batch_count*sizeof(BBLAS_Complex64_t*));
bblas_malloc_check(test->other_result, "other_result");
test->other_error = (double*) malloc(batch_count*sizeof(double));
bblas_malloc_check(test->other_error, "other_error");
break;
case BBLAS_CUMKL:
test->mkl_result = (BBLAS_Complex64_t**) malloc(batch_count*sizeof(BBLAS_Complex64_t*));
bblas_malloc_check(test->mkl_result, "mkl_result");
test->device_result = (BBLAS_Complex64_t**) malloc(batch_count*sizeof(BBLAS_Complex64_t*));
bblas_malloc_check(test->device_result, "device_result");
test->mkl_error = (double*) malloc(batch_count*sizeof(double));
bblas_malloc_check(test->mkl_error, "mkl_error");
test->device_error = (double*) malloc(batch_count*sizeof(double));
bblas_malloc_check(test->device_error, "device_error");
break;
default:
printf("Memory alloation for error: Target no defined\n");
exit(EXIT_FAILURE);
}
/*Memory for info */
test->info = (int*) malloc(batch_count*sizeof(int));
}
/**
* Allocate memory and copy the initial arrayC values.
* This is required to take norms after the computation is complete.
**/
void bblas_zcopy_Cinit(bblas_ztest_t *test, BBLAS_Complex64_t **C_copy)
{
/*Local variables */
enum BBLAS_ROUTINE routine = test->routine;
int batch_count = test->batch_count;
int batch_iter, first_index = 0;
int ldc, N;
if (!((routine == BBLAS_GEMM) || (routine == BBLAS_SYMM) ||
(routine == BBLAS_HEMM) || (routine == BBLAS_SYRK) ||
(routine == BBLAS_HERK) || (routine == BBLAS_SYR2K)||
(routine == BBLAS_HER2K)))
{
printf("BBLAS FATAL ERROR: bblas_ztesting.c():\n");
printf("\t bblas_zcopy_Cinit not defined for %s \n", bblas_getroutine(test->routine));
exit(EXIT_FAILURE);
}
if( test->batch_opts == BBLAS_VARIABLE ) // Varible size
{
for( batch_iter =0; batch_iter < batch_count ; batch_iter++)
{
ldc = test->ldc[batch_iter];
N = test->N[batch_iter];
C_copy[batch_iter] = (BBLAS_Complex64_t *) malloc(ldc*N* sizeof(BBLAS_Complex64_t ));
/*Copy the matrix {C}_i */
cblas_zcopy (ldc*N, test->arrayC[batch_iter], 1, C_copy[batch_iter], 1);
}
}else if( test->batch_opts == BBLAS_FIXED ) //fixed size
{
ldc = test->ldc[first_index];
N = test->N[first_index];
for( batch_iter =0; batch_iter < batch_count ; batch_iter++)
{
C_copy[batch_iter] = (BBLAS_Complex64_t *) malloc(ldc*N* sizeof(BBLAS_Complex64_t ));
/*Copy the matrix {C}_i */
cblas_zcopy (ldc*N, test->arrayC[batch_iter], 1, C_copy[batch_iter], 1);
}
}else
{
bblas_error("bblas_ztesting.c", "wrong batch_opts value\n");
}
}
/**
* Allocate memory and copy the initial arrayB values.
* This is required to take norms after the computation is complete.
**/
void bblas_zcopy_Binit(bblas_ztest_t *test, BBLAS_Complex64_t **B_copy)
{
/*Local variables */
enum BBLAS_ROUTINE routine = test->routine;
int batch_count = test->batch_count;
int batch_iter, first_index = 0;
int ldb, N;
if (!((routine == BBLAS_TRMM) || (routine == BBLAS_TRSM)))
{
printf("BBLAS FATAL ERROR: bblas_ztesting.c():\n");
printf("\t bblas_zcopy_Binit not defined for %s \n", bblas_getroutine(test->routine));
exit(EXIT_FAILURE);
}
if( test->batch_opts == BBLAS_VARIABLE )
{
for( batch_iter =0; batch_iter < batch_count ; batch_iter++)
{
ldb = test->ldb[batch_iter];
N = test->N[batch_iter];
B_copy[batch_iter] = (BBLAS_Complex64_t *) malloc(ldb*N* sizeof(BBLAS_Complex64_t ));
/*Copy the matrix {B}_i */
cblas_zcopy (ldb*N, test->arrayB[batch_iter], 1, B_copy[batch_iter], 1);
}
}else if( test->batch_opts == BBLAS_FIXED ) //fixed size
{
ldb = test->ldb[first_index];
N = test->N[first_index];
for( batch_iter =0; batch_iter < batch_count ; batch_iter++)
{
B_copy[batch_iter] = (BBLAS_Complex64_t *) malloc(ldb*N* sizeof(BBLAS_Complex64_t ));
/*Copy the matrix {B}_i */
cblas_zcopy (ldb*N, test->arrayB[batch_iter], 1, B_copy[batch_iter], 1);
}
}else
{
bblas_error("bblas_ztesting.c", "wrong batch_opts value\n");
}
}
/**
* Compute the relative error of each batch operation.
**/
void bblas_zcheck_Cfinal(bblas_ztest_t *test, BBLAS_Complex64_t **C_final)
{
/*Local variables */
enum BBLAS_ROUTINE routine = test->routine;
int batch_count = test->batch_count;
int batch_iter, first_index = 0;
int ldc, N;
/* Error calculation variables */
double Cnorm, Error_norm;
BBLAS_Complex64_t alpha =-1;
/*Check if the call has been made by the correct routine */
if (!((routine == BBLAS_GEMM) || (routine == BBLAS_SYMM) ||
(routine == BBLAS_HEMM) || (routine == BBLAS_SYRK) ||
(routine == BBLAS_HERK) || (routine == BBLAS_SYR2K)||
(routine == BBLAS_HER2K)))
{
printf("BBLAS FATAL ERROR: bblas_ztesting.c():\n");
printf("\t bblas_zcheck_Cfinal not defined for %s \n", bblas_getroutine(test->routine));
exit(EXIT_FAILURE);
}
/*Temporary buffer to save (test-arrayC -C_final) */
BBLAS_Complex64_t **C_diff;
C_diff = (BBLAS_Complex64_t **) malloc(batch_count*sizeof(BBLAS_Complex64_t *));
/*Make a copy of test->arrayC in C_diff */
bblas_zcopy_Cinit(test, C_diff);
/*Variable size */
if( test->batch_opts == BBLAS_VARIABLE )
{
for( batch_iter =0; batch_iter < batch_count ; batch_iter++)
{
ldc = test->ldc[batch_iter];
N = test->N[batch_iter];
/*Computation of the Frobenus norm of {C}_batch_iter */
Cnorm = cblas_dznrm2(ldc*N, test->arrayC[batch_iter], 1);
/*Compute the error */
cblas_zaxpy (ldc*N, CBLAS_SADDR(alpha), C_final[batch_iter], 1, C_diff[batch_iter], 1);
/*Compute the norm assoicated with the error */
Error_norm = cblas_dznrm2(ldc*N, C_diff[batch_iter], 1);
/*Compute the relative error */
switch(test->target)
{
case BBLAS_MKL:
test->mkl_error[batch_iter] = Error_norm/Cnorm;
break;
case BBLAS_CUBLAS:
case BBLAS_MAGMA:
test->device_error[batch_iter] = Error_norm/Cnorm;
break;
case BBLAS_OTHER:
test->other_error[batch_iter] = Error_norm/Cnorm;
break;
default:
printf("In bblas_zcheck_Cfinal, Variable: Target no defined\n");
exit(EXIT_FAILURE);
}
}
}else if( test->batch_opts == BBLAS_FIXED ) //fixed size
{
ldc = test->ldc[first_index];
N = test->N[first_index];
for( batch_iter =0; batch_iter < batch_count ; batch_iter++)
{
/*Computation of the Frobenus norm of {C}_batch_iter */
Cnorm = cblas_dznrm2(ldc*N, test->arrayC[batch_iter], 1);
/*Compute the error */
cblas_zaxpy (ldc*N, CBLAS_SADDR(alpha), C_final[batch_iter], 1, C_diff[batch_iter], 1);
/*Compute the norm assoicated with the error */
Error_norm = cblas_dznrm2(ldc*N, C_diff[batch_iter], 1);
/*Compute the relative error */
switch(test->target)
{
case BBLAS_MKL:
test->mkl_error[batch_iter] = Error_norm/Cnorm;
break;
case BBLAS_CUBLAS:
case BBLAS_MAGMA:
test->device_error[batch_iter] = Error_norm/Cnorm;
break;
case BBLAS_OTHER:
test->other_error[batch_iter] = Error_norm/Cnorm;
break;
default:
printf("In bblas_zcheck_Cfinal: Fixed, Target no defined\n");
exit(EXIT_FAILURE);
}
}
}else
{
bblas_error("bblas_ztesting.c", "wrong batch_opts value");
}
/*Free C_diff */
for(batch_iter=0; batch_iter < batch_count ; batch_iter++)
{
free(C_diff[batch_iter]);
}
free(C_diff);
}
/**
* Compute the relative error of each batch operation.
**/
void bblas_zcheck_Bfinal(bblas_ztest_t *test, BBLAS_Complex64_t **B_final)
{
/*Local variables */
enum BBLAS_ROUTINE routine = test->routine;
int batch_count = test->batch_count;
int batch_iter, first_index = 0;
int ldb, N;
/* Error calculation variables */
double Bnorm, Error_norm;
BBLAS_Complex64_t alpha =-1;
/*Check if the call has been made by the correct routine */
if (!((routine == BBLAS_TRSM) || (routine == BBLAS_TRMM)))
{
printf("BBLAS FATAL ERROR: bblas_ztesting.c():\n");
printf("\t bblas_zcheck_Bfinal not defined for %s \n", bblas_getroutine(test->routine));
exit(EXIT_FAILURE);
}
/*Temporary buffer to save (test-arrayB -B_final) */
BBLAS_Complex64_t **B_diff;
B_diff = (BBLAS_Complex64_t **) malloc(batch_count*sizeof(BBLAS_Complex64_t *));
/*Make a copy of test->arrayB in B_diff */
bblas_zcopy_Binit(test, B_diff);
/*Variable size */
if( test->batch_opts == BBLAS_VARIABLE )
{
for( batch_iter =0; batch_iter < batch_count ; batch_iter++)
{
ldb = test->ldb[batch_iter];
N = test->N[batch_iter];
/*Computation of the Frobenus norm of {B}_batch_iter */
Bnorm = cblas_dznrm2(ldb*N, test->arrayB[batch_iter], 1);
/*Compute the error */
cblas_zaxpy (ldb*N, CBLAS_SADDR(alpha), B_final[batch_iter], 1, B_diff[batch_iter], 1);
/*Compute the norm assoicated with the error */
Error_norm = cblas_dznrm2(ldb*N, B_diff[batch_iter], 1);
/*Compute the relative error */
switch(test->target)
{
case BBLAS_MKL:
test->mkl_error[batch_iter] = Error_norm/Bnorm;
break;
case BBLAS_CUBLAS:
case BBLAS_MAGMA:
test->device_error[batch_iter] = Error_norm/(Bnorm);
break;
case BBLAS_OTHER:
test->other_error[batch_iter] = Error_norm/(Bnorm);
break;
default:
printf("In bblas_zcheck_Bfinal(): Target no defined\n");
exit(EXIT_FAILURE);
}
}
}else if( test->batch_opts == BBLAS_FIXED )
{
ldb = test->ldb[first_index];
N = test->N[first_index];
for( batch_iter =0; batch_iter < batch_count ; batch_iter++)
{
/*Computation of the Frobenus norm of {B}_batch_iter */
Bnorm = cblas_dznrm2(ldb*N, test->arrayB[batch_iter], 1);
/*Compute the error */
cblas_zaxpy (ldb*N, CBLAS_SADDR(alpha), B_final[batch_iter], 1, B_diff[batch_iter], 1);
/*Compute the norm assoicated with the error */
Error_norm = cblas_dznrm2(ldb*N, B_diff[batch_iter], 1);
/*Compute the relative error */
switch(test->target)
{
case BBLAS_MKL:
test->mkl_error[batch_iter] = Error_norm/(Bnorm);
break;
case BBLAS_CUBLAS:
case BBLAS_MAGMA:
test->device_error[batch_iter] = Error_norm/(Bnorm);
break;
case BBLAS_OTHER:
test->other_error[batch_iter] = Error_norm/(Bnorm);
break;
default:
printf("In bblas_zcheck_Bfinal(): Target no defined\n");
exit(EXIT_FAILURE);
}
}
}else
{
bblas_error("bblas_ztesting.c", "wrong batch_opts value");
}
/*Free B_diff */
for(batch_iter=0; batch_iter < batch_count ; batch_iter++)
{
free(B_diff[batch_iter]);
}
free(B_diff);
}
/**
* Free the memory associated with the test structure.
**/
void bblas_zfreetest( bblas_ztest_t *test )
{
enum BBLAS_ROUTINE routine = test->routine;
/*Free uplo */
if ((routine == BBLAS_SYMM) || (routine == BBLAS_HEMM) ||
(routine == BBLAS_SYRK) || (routine == BBLAS_HERK) ||
(routine == BBLAS_SYR2K)|| (routine == BBLAS_HER2K)||
(routine == BBLAS_TRMM) || (routine == BBLAS_TRSM))
{
free(test->uplo);
}
/*Free TRANSA */
if ((routine == BBLAS_GEMM) || (routine == BBLAS_TRMM) ||
(routine == BBLAS_TRSM))
{
free( test->transA );
}
/*Free TRANSB */
if (routine == BBLAS_GEMM)
{
free( test->transB );
}
/*Free TRANS */
if ((routine == BBLAS_SYRK) || (routine == BBLAS_HERK) ||
(routine == BBLAS_SYR2K)|| (routine == BBLAS_HER2K))
{
free( test->trans );
}
/*Free SIDE*/
if ((routine == BBLAS_SYMM) || (routine == BBLAS_HEMM) ||
(routine == BBLAS_TRMM) || (routine == BBLAS_TRSM))
{
free( test->side );
}
/*Free DIAG*/
if ((routine == BBLAS_TRMM) || (routine == BBLAS_TRSM))
{
free(test->diag);
}
/* Free M memory*/
if ((routine == BBLAS_GEMM) || (routine == BBLAS_SYMM) ||
(routine == BBLAS_HEMM) || (routine == BBLAS_TRMM) ||
(routine == BBLAS_TRSM))
{
free( test->M );
}
/* Free N memory*/
free( test->N );
/* Free K memory*/
if ((routine == BBLAS_GEMM) || (routine == BBLAS_SYRK) ||
(routine == BBLAS_HERK) || (routine == BBLAS_SYR2K) ||
(routine == BBLAS_HER2K))
{
free( test->K );
}
/*Free LDA memory */
free( test->lda );
/*Free LDB memory */
if ((routine == BBLAS_GEMM) || (routine == BBLAS_SYMM) ||
(routine == BBLAS_HEMM) || (routine == BBLAS_SYR2K) ||
(routine == BBLAS_HER2K) || (routine == BBLAS_TRMM) ||
(routine == BBLAS_TRSM))
{
free( test->ldb );
}
/*Free LDC memory */
if ((routine == BBLAS_GEMM) || (routine == BBLAS_SYMM) ||
(routine == BBLAS_HEMM) || (routine == BBLAS_SYRK) ||
(routine == BBLAS_HERK) || (routine == BBLAS_SYR2K)||
(routine == BBLAS_HER2K))
{
free( test->ldc );
}
/*Free ALPHA memory */
if ((routine == BBLAS_GEMM) || (routine == BBLAS_SYMM) ||
(routine == BBLAS_HEMM) ||(routine == BBLAS_SYRK) ||
(routine == BBLAS_SYR2K)|| (routine == BBLAS_HER2K)||
(routine == BBLAS_TRMM) || (routine == BBLAS_TRSM))
{
free(test->alpha);
}else if (routine == BBLAS_HERK)
{
free(test->alpha_herk);
}
/*Free ALPHA BETA memory */
if ((routine == BBLAS_GEMM) || (routine == BBLAS_SYMM) ||
(routine == BBLAS_HEMM) || (routine == BBLAS_SYRK) ||
(routine == BBLAS_SYR2K))
{
free( test->beta );
}else if ((routine == BBLAS_HERK) || (routine == BBLAS_HER2K))
{
free(test->beta_herk);
}
/*Free matrices {A}_i */
for ( int batch_iter = 0; batch_iter < test->batch_count; batch_iter++ )
{
free( test->arrayA[batch_iter] );
}
free( test->arrayA );
/*Free matrices {B}_i*/
if ((routine == BBLAS_GEMM) || (routine == BBLAS_SYMM) ||
(routine == BBLAS_HEMM) || (routine == BBLAS_SYR2K) ||
(routine == BBLAS_HER2K) || (routine == BBLAS_TRMM) ||
(routine == BBLAS_TRSM))
{
for ( int batch_iter = 0; batch_iter < test->batch_count; batch_iter++ )
{
free( test->arrayB[batch_iter] );
}
free( test->arrayB );
}
/*Free matrices {C}_i*/
if ((routine == BBLAS_GEMM) || (routine == BBLAS_SYMM) ||
(routine == BBLAS_HEMM) || (routine == BBLAS_SYRK) ||
(routine == BBLAS_HERK) || (routine == BBLAS_SYR2K)||
(routine == BBLAS_HER2K))
{
for ( int batch_iter = 0; batch_iter < test->batch_count; batch_iter++ )
{
free( test->arrayC[batch_iter] );
}
free( test->arrayC );
}
/*Free memory allocated for error computation */
switch(test->target)
{
case BBLAS_MKL:
free(test->mkl_error);
//free(test->group_size);
for ( int batch_iter = 0; batch_iter < test->batch_count; batch_iter++ )
{
free( test->mkl_result[batch_iter] );
}
free( test->mkl_result );
break;
case BBLAS_CUBLAS:
case BBLAS_MAGMA:
free(test->device_error);
for (int batch_iter =0; batch_iter < test->batch_count; batch_iter++)
{
free(test->device_result[batch_iter]);
}
free(test->device_result);
break;
case BBLAS_OTHER:
free(test->other_error);
for (int batch_iter =0; batch_iter < test->batch_count; batch_iter++)
{
free(test->other_result[batch_iter]);
}
free(test->other_result);
break;
case BBLAS_CUMKL:
free(test->mkl_error);
free(test->device_error);
free(test->group_size);
for (int batch_iter =0; batch_iter < test->batch_count; batch_iter++)
{
free(test->device_result[batch_iter]);
free(test->mkl_result[batch_iter]);
}
free(test->device_result);
free(test->mkl_result);
break;
default:
printf("In bblas_zfreetest(): Target no defined\n");
exit(EXIT_FAILURE);
}
/*Free INFO */
free( test->info );
/*Free cuda memory */
if( (test->target == BBLAS_CUBLAS) || (test->target == BBLAS_MAGMA))
{
bblas_zcudaFree(test);
}
if ((routine == BBLAS_GEMM) || (routine == BBLAS_SYMM) ||
(routine == BBLAS_HEMM) || (routine == BBLAS_SYR2K) ||
(routine == BBLAS_HER2K) || (routine == BBLAS_TRMM) ||
(routine == BBLAS_TRSM))
{
free(test->Binitnorm);
}
/*Free memory allocated for norm computing */
if ((routine == BBLAS_GEMM) || (routine == BBLAS_SYMM) ||
(routine == BBLAS_HEMM) || (routine == BBLAS_SYRK) ||
(routine == BBLAS_HERK) || (routine == BBLAS_SYR2K)||
(routine == BBLAS_HER2K))
{
free(test->Cinitnorm);
}
}
#undef COMPLEX
|
-- exercises in "Type-Driven Development with Idris"
-- chapter 4
-- check that all functions are total
%default total
--
-- Binary Search Trees
--
||| A binary search tree.
data BST : Type -> Type where
||| An empty tree.
Empty : Ord elem =>
BST elem
||| A node with a value, a left subtree, and a right subtree.
||| Elements in the left subtree must be strictly lower than the value.
||| Elements in the right subtree must be strictly greater than the value.
Node : Ord elem =>
(left : BST elem) ->
(val : elem) ->
(right : BST elem) ->
BST elem
||| insert a value in a binary search tree
insert : elem -> BST elem -> BST elem
insert x Empty = Node Empty x Empty
insert x orig@(Node left val right)
= case compare x val of
LT => Node (insert x left) val right
EQ => orig
GT => Node left val (insert x right)
listToTree : Ord a => List a -> BST a
listToTree [] = Empty
listToTree (x :: xs) = insert x $ listToTree xs
treeToList : BST a -> List a
treeToList Empty = []
treeToList (Node left val right) = treeToList left ++ [val] ++ treeToList right
|
[STATEMENT]
lemma quality_increases_rt_fresherD [dest]:
fixes ip
assumes "quality_increases \<xi> \<xi>'"
and "ip\<in>kD(rt \<xi>)"
shows "rt \<xi> \<sqsubseteq>\<^bsub>ip\<^esub> rt \<xi>'"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. rt \<xi> \<sqsubseteq>\<^bsub>ip\<^esub> rt \<xi>'
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
quality_increases \<xi> \<xi>'
ip \<in> kD (rt \<xi>)
goal (1 subgoal):
1. rt \<xi> \<sqsubseteq>\<^bsub>ip\<^esub> rt \<xi>'
[PROOF STEP]
by auto |
On an old map this area used to be Wood Farm so I wonder if Sparhawk Avenue, Peregrine Close etc are something to do with birds in the woods? Please help if you know.
The odd shaped building where the toilets and storage is was built in 1975. |
/- This module defines the type of values that is used in the dynamic
semantics of all our intermediate languages. -/
import .lib .integers .floats .ast
namespace values
open integers word floats ast ast.typ ast.memory_chunk
def block : Type := pos_num
instance eq_block : decidable_eq block := by tactic.mk_dec_eq_instance
instance coe_block : has_coe block ℕ := ⟨λp, num.pos p⟩
instance dlo_block : decidable_linear_order block := pos_num.decidable_linear_order
instance block_one : has_one block := ⟨(1 : pos_num)⟩
/- A value is either:
- a machine integer;
- a floating-point number;
- a pointer: a pair of a memory address and an integer offset with respect
to this address;
- the [Vundef] value denoting an arbitrary bit pattern, such as the
value of an uninitialized variable.
-/
inductive val : Type
| Vundef : val
| Vint : int32 → val
| Vlong : int64 → val
| Vfloat : float → val
| Vsingle : float32 → val
| Vptr : block → ptrofs → val
export val
instance coe_int32_val : has_coe int32 val := ⟨Vint⟩
instance coe_int64_val : has_coe int64 val := ⟨Vlong⟩
instance coe_Int32_val : has_coe (@word W32) val := ⟨Vint⟩
instance coe_Int64_val : has_coe (@word W64) val := ⟨Vlong⟩
instance coe_float_val : has_coe float val := ⟨Vfloat⟩
instance coe_single_val : has_coe float32 val := ⟨Vsingle⟩
instance inhabited_val : inhabited val := ⟨Vundef⟩
def Vzero : val := Vint 0
def Vone : val := Vint 1
def Vmone : val := Vint (-1)
instance val_zero : has_zero val := ⟨Vzero⟩
instance val_one : has_one val := ⟨Vone⟩
def Vtrue : val := 1
def Vfalse : val := 0
def Vnullptr := if archi.ptr64 then Vlong 0 else Vint 0
def Vptrofs (n : ptrofs) : val :=
if archi.ptr64 then ptrofs.to_int64 n else ptrofs.to_int n
/- * Operations over values -/
/- The module [val] defines a number of arithmetic and logical operations
over type [val]. Most of these operations are straightforward extensions
of the corresponding integer or floating-point operations. -/
namespace val
instance val_eq : decidable_eq val := by tactic.mk_dec_eq_instance
def has_type : val → typ → Prop
| Vundef _ := true
| (Vint _) Tint := true
| (Vlong _) Tlong := true
| (Vfloat _) Tfloat := true
| (Vsingle _) Tsingle := true
| (Vptr _ _) Tint := ¬ archi.ptr64
| (Vptr _ _) Tlong := archi.ptr64
| (Vint _) Tany32 := true
| (Vsingle _) Tany32 := true
| (Vptr _ _) Tany32 := ¬ archi.ptr64
| _ Tany64 := true
| _ _ := false
def has_type_list : list val → list typ → Prop
| [] [] := true
| (v1 :: vs) (t1 :: ts) := has_type v1 t1 ∧ has_type_list vs ts
| _ _ := false
def has_opttype (v : val) : option typ → Prop
| none := v = Vundef
| (some t) := has_type v t
lemma Vptr_has_type (b ofs) : has_type (Vptr b ofs) Tptr :=
begin
delta Tptr,
ginduction archi.ptr64 with h,
{ intro h2, note := h.symm.trans h2, contradiction },
{ exact h }
end
lemma Vnullptr_has_type : has_type Vnullptr Tptr :=
by delta Tptr Vnullptr; cases archi.ptr64; trivial
lemma has_subtype (ty1 ty2 v) : subtype ty1 ty2 → has_type v ty1 → has_type v ty2 := sorry'
lemma has_subtype_list (tyl1 tyl2 vl) :
subtype_list tyl1 tyl2 → has_type_list vl tyl1 → has_type_list vl tyl2 := sorry'
/- Truth values. Non-zero integers are treated as [True].
The integer 0 (also used to represent the null pointer) is [False].
Other values are neither true nor false. -/
def to_bool : val → option bool
| (Vint n) := some (n ≠ 0)
| _ := none
/- Arithmetic operations -/
protected def neg : val → val
| (Vint n) := word.neg n
| _ := Vundef
instance : has_neg val := ⟨val.neg⟩
def negf : val → val
| (Vfloat f) := float.neg f
| _ := Vundef
def absf : val → val
| (Vfloat f) := float.abs f
| _ := Vundef
def negfs : val → val
| (Vsingle f) := float32.neg f
| _ := Vundef
def absfs : val → val
| (Vsingle f) := float32.abs f
| _ := Vundef
def make_total (ov : option val) : val :=
ov.get_or_else Vundef
def int_of_float : val → option val
| (Vfloat f) := Vint <$> float.to_int f
| _ := none
def intu_of_float : val → option val
| (Vfloat f) := Vint <$> float.to_intu f
| _ := none
def float_of_int : val → option val
| (Vint n) := float.of_int n
| _ := none
def float_of_intu : val → option val
| (Vint n) := float.of_intu n
| _ := none
def int_of_single : val → option val
| (Vsingle f) := Vint <$> float32.to_int f
| _ := none
def intu_of_single : val → option val
| (Vsingle f) := Vint <$> float32.to_intu f
| _ := none
def single_of_int : val → option val
| (Vint n) := float32.of_int n
| _ := none
def single_of_intu : val → option val
| (Vint n) := float32.of_intu n
| _ := none
def negint : val → val
| (Vint n) := word.neg n
| _ := Vundef
def notint : val → val
| (Vint n) := word.not n
| _ := Vundef
def of_bool (b : bool) : val := if b then Vtrue else Vfalse
instance : has_coe bool val := ⟨of_bool⟩
def boolval : val → val
| (Vint n) := of_bool (n ≠ 0)
| (Vptr b ofs) := Vtrue
| _ := Vundef
def notbool : val → val
| (Vint n) := of_bool (n = 0)
| (Vptr b ofs) := Vfalse
| _ := Vundef
def zero_ext (nbits : ℕ) : val → val
| (Vint n) := word.zero_ext nbits n
| _ := Vundef
def sign_ext (nbits : ℕ+) : val → val
| (Vint n) := word.sign_ext nbits n
| _ := Vundef
def single_of_float : val → val
| (Vfloat f) := float.to_single f
| _ := Vundef
def float_of_single : val → val
| (Vsingle f) := float.of_single f
| _ := Vundef
protected def add : val → val → val
| (Vint m) (Vint n) := Vint $ m + n
| (Vptr b1 ofs1) (Vint n) := if archi.ptr64 then Vundef else Vptr b1 (ofs1 + ptrofs.of_int n)
| (Vint m) (Vptr b2 ofs2) := if archi.ptr64 then Vundef else Vptr b2 (ofs2 + ptrofs.of_int m)
| _ _ := Vundef
instance : has_add val := ⟨val.add⟩
protected def sub : val → val → val
| (Vint m) (Vint n) := Vint $ m - n
| (Vptr b1 ofs1) (Vint n) := if archi.ptr64 then Vundef else Vptr b1 (ofs1 - ptrofs.of_int n)
| (Vptr b1 ofs1) (Vptr b2 ofs2) := if archi.ptr64 ∨ b1 ≠ b2 then Vundef else ptrofs.to_int (ofs1 - ofs2)
| _ _ := Vundef
instance : has_sub val := ⟨val.sub⟩
protected def mul : val → val → val
| (Vint m) (Vint n) := Vint (m * n)
| _ _ := Vundef
instance : has_mul val := ⟨val.mul⟩
def mulhs : val → val → val
| (Vint m) (Vint n) := word.mulhs m n
| _ _ := Vundef
def mulhu : val → val → val
| (Vint m) (Vint n) := word.mulhu m n
| _ _ := Vundef
def divs : val → val → option val
| (Vint m) (Vint n) := if n = 0 ∨ m = repr (word.min_signed W32) ∧ n = -1
then none else some (m / n : word _)
| _ _ := none
def mods : val → val → option val
| (Vint m) (Vint n) := if n = 0 ∨ m = repr (@min_signed W32) ∧ n = -1
then none else some (m % n : word _)
| _ _ := none
def divu : val → val → option val
| (Vint m) (Vint n) := if n = 0 then none else word.divu m n
| _ _ := none
def modu : val → val → option val
| (Vint m) (Vint n) := if n = 0 then none else word.modu m n
| _ _ := none
def add_carry : val → val → val → val
| (Vint m) (Vint n) (Vint c) := word.add_carry m n c
| _ _ _ := Vundef
def sub_overflow : val → val → val
| (Vint m) (Vint n) := word.sub_overflow m n 0
| _ _ := Vundef
def negative : val → val
| (Vint n) := word.negative n
| _ := Vundef
protected def and : val → val → val
| (Vint m) (Vint n) := word.and m n
| _ _ := Vundef
protected def or : val → val → val
| (Vint m) (Vint n) := word.or m n
| _ _ := Vundef
protected def xor : val → val → val
| (Vint m) (Vint n) := word.xor m n
| _ _ := Vundef
def shl : val → val → val
| (Vint m) (Vint n) := if word.ltu n iwordsize
then word.shl m n else Vundef
| _ _ := Vundef
def shr : val → val → val
| (Vint m) (Vint n) := if word.ltu n iwordsize
then word.shr m n else Vundef
| _ _ := Vundef
def shr_carry : val → val → val
| (Vint m) (Vint n) := if word.ltu n iwordsize
then word.shr_carry m n else Vundef
| _ _ := Vundef
def shrx : val → val → option val
| (Vint m) (Vint n) := if word.ltu n (repr 31)
then word.shrx m n else none
| _ _ := none
def shru : val → val → val
| (Vint m) (Vint n) := if word.ltu n iwordsize
then word.shru m n else Vundef
| _ _ := Vundef
def rol : val → val → val
| (Vint m) (Vint n) := word.rol m n
| _ _ := Vundef
def rolm : val → int32 → int32 → val
| (Vint n) amount mask := word.rolm n amount mask
| _ amount mask := Vundef
def ror : val → val → val
| (Vint m) (Vint n) := word.ror m n
| _ _ := Vundef
def addf : val → val → val
| (Vfloat x) (Vfloat y) := Vfloat $ x + y
| _ _ := Vundef
def subf : val → val → val
| (Vfloat x) (Vfloat y) := Vfloat $ x - y
| _ _ := Vundef
def mulf : val → val → val
| (Vfloat x) (Vfloat y) := Vfloat $ x * y
| _ _ := Vundef
def divf : val → val → val
| (Vfloat x) (Vfloat y) := Vfloat $ x / y
| _ _ := Vundef
def float_of_words : val → val → val
| (Vint m) (Vint n) := float.from_words m n
| _ _ := Vundef
def addfs : val → val → val
| (Vsingle x) (Vsingle y) := float32.add x y
| _ _ := Vundef
def subfs : val → val → val
| (Vsingle x) (Vsingle y) := float32.sub x y
| _ _ := Vundef
def mulfs : val → val → val
| (Vsingle x) (Vsingle y) := float32.mul x y
| _ _ := Vundef
def divfs : val → val → val
| (Vsingle x) (Vsingle y) := float32.div x y
| _ _ := Vundef
/- Operations on 64-bit integers -/
def long_of_words : val → val → val
| (Vint m) (Vint n) := int64.ofwords m n
| _ _ := Vundef
def loword : val → val
| (Vlong n) := int64.loword n
| _ := Vundef
def hiword : val → val
| (Vlong n) := int64.hiword n
| _ := Vundef
def negl : val → val
| (Vlong n) := Vlong $ -n
| _ := Vundef
def notl : val → val
| (Vlong n) := word.not n
| _ := Vundef
def long_of_int : val → val
| (Vint n) := Vlong $ scoe n
| _ := Vundef
def long_of_intu : val → val
| (Vint n) := Vlong $ ucoe n
| _ := Vundef
def long_of_float : val → option val
| (Vfloat f) := Vlong <$> float.to_long f
| _ := none
def longu_of_float : val → option val
| (Vfloat f) := Vlong <$> float.to_longu f
| _ := none
def long_of_single : val → option val
| (Vsingle f) := Vlong <$> float32.to_long f
| _ := none
def longu_of_single : val → option val
| (Vsingle f) := Vlong <$> float32.to_longu f
| _ := none
def float_of_long : val → option val
| (Vlong n) := float.of_long n
| _ := none
def float_of_longu : val → option val
| (Vlong n) := float.of_longu n
| _ := none
def single_of_long : val → option val
| (Vlong n) := float32.of_long n
| _ := none
def single_of_longu : val → option val
| (Vlong n) := float32.of_longu n
| _ := none
def addl : val → val → val
| (Vlong n1) (Vlong n2) := Vlong (n1 + n2)
| (Vptr b1 ofs1) (Vlong n2) := if archi.ptr64 then Vptr b1 (ofs1 + ptrofs.of_int64 n2) else Vundef
| (Vlong n1) (Vptr b2 ofs2) := if archi.ptr64 then Vptr b2 (ofs2 + ptrofs.of_int64 n1) else Vundef
| _ _ := Vundef
def subl : val → val → val
| (Vlong n1) (Vlong n2) := Vlong (n1 - n2)
| (Vptr b1 ofs1) (Vlong n2) := if archi.ptr64 then Vptr b1 (ofs1 - ptrofs.of_int64 n2) else Vundef
| (Vptr b1 ofs1) (Vptr b2 ofs2) := if archi.ptr64 ∨ b1 ≠ b2 then Vundef else ptrofs.to_int64 (ofs1 - ofs2)
| _ _ := Vundef
def mull : val → val → val
| (Vlong m) (Vlong n) := Vlong (m * n)
| _ _ := Vundef
def mull' : val → val → val
| (Vint m) (Vint n) := Vlong (ucoe m * ucoe n)
| _ _ := Vundef
def mullhs : val → val → val
| (Vlong m) (Vlong n) := word.mulhs m n
| _ _ := Vundef
def mullhu : val → val → val
| (Vlong m) (Vlong n) := word.mulhu m n
| _ _ := Vundef
def divls : val → val → option val
| (Vlong m) (Vlong n) := if n = 0 ∨ m = repr (min_signed 64) ∧ n = -1
then none else some (m / n : word _)
| _ _ := none
def modls : val → val → option val
| (Vlong m) (Vlong n) := if n = 0 ∨ m = repr (min_signed 64) ∧ n = -1
then none else some (m % n : word _)
| _ _ := none
def divlu : val → val → option val
| (Vlong m) (Vlong n) := if n = 0 then none else word.divu m n
| _ _ := none
def modlu : val → val → option val
| (Vlong m) (Vlong n) := if n = 0 then none else word.modu m n
| _ _ := none
def subl_overflow : val → val → val
| (Vlong m) (Vlong n) := Vint $ ucoe $ word.sub_overflow m n 0
| _ _ := Vundef
def negativel : val → val
| (Vlong n) := Vint $ ucoe $ word.negative n
| _ := Vundef
def andl : val → val → val
| (Vlong m) (Vlong n) := word.and m n
| _ _ := Vundef
def orl : val → val → val
| (Vlong m) (Vlong n) := word.or m n
| _ _ := Vundef
def xorl : val → val → val
| (Vlong m) (Vlong n) := word.xor m n
| _ _ := Vundef
def shll : val → val → val
| (Vlong m) (Vint n) := if word.ltu n 64 then word.shl m (ucoe n) else Vundef
| _ _ := Vundef
def shrl : val → val → val
| (Vlong m) (Vint n) := if word.ltu n 64 then word.shr m (ucoe n) else Vundef
| _ _ := Vundef
def shrlu : val → val → val
| (Vlong m) (Vint n) := if word.ltu n 64 then word.shru m (ucoe n) else Vundef
| _ _ := Vundef
def shrxl : val → val → option val
| (Vlong m) (Vint n) := if word.ltu n 63 then word.shrx m (ucoe n) else none
| _ _ := none
def roll : val → val → val
| (Vlong m) (Vint n) := word.rol m (ucoe n)
| _ _ := Vundef
def rorl : val → val → val
| (Vlong m) (Vint n) := word.ror m (ucoe n)
| _ _ := Vundef
def rolml : val → int64 → int64 → val
| (Vlong n) amount mask := word.rolm n amount mask
| _ amount mask := Vundef
/- Comparisons -/
section comparisons
parameter valid_ptr : block → ℕ → bool
def weak_valid_ptr (b : block) (ofs : ℕ) := valid_ptr b ofs || valid_ptr b (ofs - 1)
def cmp_bool (c : comparison) : val → val → option bool
| (Vint m) (Vint n) := cmp c m n
| _ _ := none
def cmp_different_blocks : comparison → option bool
| Ceq := some ff
| Cne := some tt
| _ := none
def cmpu_bool (c : comparison) : val → val → option bool
| (Vint m) (Vint n) := some $ cmpu c m n
| (Vint m) (Vptr b2 ofs2) :=
if archi.ptr64 then none else
if m = 0 ∧ weak_valid_ptr b2 (unsigned ofs2)
then cmp_different_blocks c
else none
| (Vptr b1 ofs1) (Vptr b2 ofs2) :=
if archi.ptr64 then none else
if b1 = b2 then
if weak_valid_ptr b1 (unsigned ofs1) ∧ weak_valid_ptr b2 (unsigned ofs2)
then cmpu c ofs1 ofs2
else none
else
if valid_ptr b1 (unsigned ofs1) ∧ valid_ptr b2 (unsigned ofs2)
then cmp_different_blocks c
else none
| (Vptr b1 ofs1) (Vint n) :=
if archi.ptr64 then none else
if n = 0 ∧ weak_valid_ptr b1 (unsigned ofs1)
then cmp_different_blocks c
else none
| _ _ := none
def cmpf_bool (c : comparison) : val → val → option bool
| (Vfloat x) (Vfloat y) := float.cmp c x y
| _ _ := none
def cmpfs_bool (c : comparison) : val → val → option bool
| (Vsingle x) (Vsingle y) := float32.cmp c x y
| _ _ := none
def cmpl_bool (c : comparison) : val → val → option bool
| (Vlong m) (Vlong n) := cmp c m n
| _ _ := none
def cmplu_bool (c : comparison) : val → val → option bool
| (Vlong n1) (Vlong n2) := some $ cmpu c n1 n2
| (Vlong n1) (Vptr b2 ofs2) :=
if archi.ptr64 ∧ n1 = 0 ∧ weak_valid_ptr b2 (unsigned ofs2)
then cmp_different_blocks c
else none
| (Vptr b1 ofs1) (Vptr b2 ofs2) :=
if ¬ archi.ptr64 then none else
if b1 = b2 then
if weak_valid_ptr b1 (unsigned ofs1) && weak_valid_ptr b2 (unsigned ofs2)
then some (cmpu c ofs1 ofs2)
else none
else
if valid_ptr b1 (unsigned ofs1) && valid_ptr b2 (unsigned ofs2)
then cmp_different_blocks c
else none
| (Vptr b1 ofs1) (Vlong n2) :=
if archi.ptr64 ∧ n2 = 0 ∧ weak_valid_ptr b1 (unsigned ofs1)
then cmp_different_blocks c
else none
| _ _ := none
def of_optbool : option bool → val
| (some tt) := Vtrue
| (some ff) := Vfalse
| none := Vundef
def cmp (c : comparison) (v1 v2 : val) : val :=
of_optbool $ cmp_bool c v1 v2
def cmpu (c : comparison) (v1 v2 : val) : val :=
of_optbool $ cmpu_bool c v1 v2
def cmpf (c : comparison) (v1 v2 : val) : val :=
of_optbool $ cmpf_bool c v1 v2
def cmpfs (c : comparison) (v1 v2 : val) : val :=
of_optbool $ cmpfs_bool c v1 v2
def cmpl (c : comparison) (v1 v2 : val) : option val :=
of_bool <$> cmpl_bool c v1 v2
def cmplu (c : comparison) (v1 v2 : val) : option val :=
of_bool <$> cmplu_bool c v1 v2
def maskzero_bool : val → int32 → option bool
| (Vint n) mask := some $ word.and n mask = 0
| _ mask := none
end comparisons
/- Add the given offset to the given pointer. -/
def offset_ptr : val → ptrofs → val
| (Vptr b ofs) delta := Vptr b (ofs + delta)
| _ delta := Vundef
/- [load_result] reflects the effect of storing a value with a given
memory chunk, then reading it back with the same chunk. Depending
on the chunk and the type of the value, some normalization occurs.
For instance, consider storing the integer value [0xFFF] on 1 byte
at a given address, and reading it back. If it is read back with
chunk [Mint8unsigned], zero-extension must be performed, resulting
in [0xFF]. If it is read back as a [Mint8signed], sign-extension is
performed and [0xFFFFFFFF] is returned. -/
def load_result : memory_chunk → val → val
| Mint8signed (Vint n) := word.sign_ext W8 n
| Mint8unsigned (Vint n) := word.zero_ext 8 n
| Mint16signed (Vint n) := word.sign_ext W16 n
| Mint16unsigned (Vint n) := word.zero_ext 16 n
| Mint32 (Vint n) := Vint n
| Mint32 (Vptr b ofs) := if archi.ptr64 then Vundef else Vptr b ofs
| Mint64 (Vlong n) := Vlong n
| Mint64 (Vptr b ofs) := if archi.ptr64 then Vptr b ofs else Vundef
| Mfloat32 (Vsingle f) := Vsingle f
| Mfloat64 (Vfloat f) := Vfloat f
| Many32 (Vint n) := Vint n
| Many32 (Vsingle f) := Vsingle f
| Many32 (Vptr b ofs) := if archi.ptr64 then Vundef else Vptr b ofs
| Many64 v := v
| _ _ := Vundef
lemma load_result_type (chunk v) : has_type (load_result chunk v) chunk.type := sorry'
lemma load_result_same {v ty} : has_type v ty → load_result (chunk_of_type ty) v = v := sorry'
/- Theorems on arithmetic operations. -/
theorem cast8unsigned_and (x) : zero_ext 8 x = val.and x (0xFF : int32) := sorry'
theorem cast16unsigned_and (x) : zero_ext 16 x = val.and x (0xFFFF : int32) := sorry'
theorem to_bool_of_bool (b1 b2) : (of_bool b1).to_bool = some b2 → b1 = b2 := sorry'
theorem to_bool_of_optbool (ob) : (of_optbool ob).to_bool = ob := sorry'
theorem notbool_negb_1 (b) : of_bool (bnot b) = notbool (of_bool b) := sorry'
theorem notbool_negb_2 (b) : of_bool b = notbool (of_bool (bnot b)) := sorry'
theorem notbool_negb_3 (ob) : of_optbool (bnot <$> ob) = notbool (of_optbool ob) := sorry'
set_option type_context.unfold_lemmas true
theorem notbool_idem2 (b) : notbool (notbool (of_bool b)) = of_bool b :=
by cases b; refl
theorem notbool_idem3 (x) : notbool (notbool (notbool x)) = notbool x := sorry'
theorem notbool_idem4 (ob) : notbool (notbool (of_optbool ob)) = of_optbool ob := sorry'
theorem add_comm (x y : val) : x + y = y + x := sorry'
theorem add_assoc (x y z : val) : (x + y) + z = x + (y + z) := sorry'
theorem neg_zero : (-0 : val) = 0 := sorry'
theorem neg_add (x y : val) : -(x + y) = -x + -y := sorry'
theorem zero_sub (x : val) : 0 - x = -x := sorry'
theorem sub_eq_add_neg (x y) : x - Vint y = x + Vint (-y) := sorry'
theorem sub_neg_eq_add (x y) : x - Vint (-y) = x + Vint y := sorry'
theorem sub_add_eq_add_sub (v1 v2 i) : v1 + Vint i - v2 = v1 - v2 + Vint i := sorry'
theorem sub_add_eq_sub_add_neg (v1 v2 i) : v1 - (v2 + Vint i) = v1 - v2 + Vint (-i) := sorry'
theorem mul_comm (x y : val) : x * y = y * x := sorry'
theorem mul_assoc (x y z : val) : (x * y) * z = x * (y * z) := sorry'
theorem right_distrib (x y z : val) : (x + y) * z = x * z + y * z := sorry'
theorem left_distrib (x y z : val) : x * (y + z) = x * y + x * z := sorry'
theorem mul_pow2 (x n logn) : is_power2 n = some logn →
x * Vint n = shl x (Vint logn) := sorry'
theorem mods_divs (x y z) : mods x y = some z → ∃ v, divs x y = some v ∧ z = x - v * y := sorry'
theorem modu_divu (x y z) : modu x y = some z → ∃ v, divu x y = some v ∧ z = x - v * y := sorry'
theorem divs_pow2 (x n logn y) : is_power2 n = some logn → word.ltu logn 31 →
divs x (Vint n) = some y → shrx x (Vint logn) = some y := sorry'
theorem divu_pow2 (x n logn y) : is_power2 n = some logn →
divu x (Vint n) = some y → shru x (Vint logn) = y := sorry'
theorem modu_pow2 (x n logn y) : is_power2 n = some logn →
modu x (Vint n) = some y → val.and x (Vint (n - 1)) = y := sorry'
theorem and_comm (x y) : val.and x y = val.and y x := sorry'
theorem and_assoc (x y z) : val.and (val.and x y) z = val.and x (val.and y z) := sorry'
theorem or_comm (x y) : val.or x y = val.or y x := sorry'
theorem or_assoc (x y z) : val.or (val.or x y) z = val.or x (val.or y z) := sorry'
theorem xor_commut (x y) : val.xor x y = val.xor y x := sorry'
theorem xor_assoc (x y z) : val.xor (val.xor x y) z = val.xor x (val.xor y z) := sorry'
theorem not_xor (x) : notint x = val.xor x (Vint (-1)) := sorry'
theorem shl_rolm (x n) : word.ltu n 32 → shl x (Vint n) = rolm x n (word.shl (-1) n) := sorry'
theorem shru_rolm (x n) : word.ltu n 32 →
shru x (Vint n) = rolm x (32 - n) (word.shru (-1) n) := sorry'
theorem shrx_carry (x y z) : shrx x y = some z →
shr x y + shr_carry x y = z := sorry'
theorem shrx_shr (x y z) : shrx x y = some z →
∃ p, ∃ q, x = Vint p ∧ y = Vint q ∧
z = shr (if p < 0 then x + Vint (word.shl 1 q - 1) else x) (Vint q) := sorry'
theorem shrx_shr_2 (n x z) : shrx x (Vint n) = some z →
z = if n = 0 then x else shr (x + shru (shr x 31) (Vint (32 - n))) (Vint n) := sorry'
theorem or_rolm (x n m1 m2) : val.or (rolm x n m1) (rolm x n m2) = rolm x n (word.or m1 m2) := sorry'
theorem rolm_rolm (x n1 m1 n2 m2) : rolm (rolm x n1 m1) n2 m2 =
rolm x (word.modu (n1 + n2) 32) (word.and (word.rol m1 n2) m2) := sorry'
theorem rolm_zero (x m) : rolm x 0 m = val.and x (Vint m) := sorry'
theorem addl_comm (x y) : addl x y = addl y x := sorry'
theorem addl_assoc (x y z) : addl (addl x y) z = addl x (addl y z) := sorry'
theorem negl_addl_distr (x y) : negl (addl x y) = addl (negl x) (negl y) := sorry'
theorem subl_addl_opp (x y) : subl x (Vlong y) = addl x (Vlong (-y)) := sorry'
theorem subl_opp_addl : ∀ x y, subl x (Vlong (-y)) = addl x (Vlong y) := sorry'
theorem subl_addl_l (v1 v2 i) : subl (addl v1 (Vlong i)) v2 = addl (subl v1 v2) (Vlong i) := sorry'
theorem subl_addl_r (v1 v2 i) : subl v1 (addl v2 (Vlong i)) = addl (subl v1 v2) (Vlong (-i)) := sorry'
theorem mull_comm (x y) : mull x y = mull y x := sorry'
theorem mull_assoc (x y z) : mull (mull x y) z = mull x (mull y z) := sorry'
theorem mull_addl_distr_l (x y z) : mull (addl x y) z = addl (mull x z) (mull y z) := sorry'
theorem mull_addl_distr_r (x y z) : mull x (addl y z) = addl (mull x y) (mull x z) := sorry'
theorem andl_comm (x y) : andl x y = andl y x := sorry'
theorem andl_assoc (x y z) : andl (andl x y) z = andl x (andl y z) := sorry'
theorem orl_comm (x y) : orl x y = orl y x := sorry'
theorem orl_assoc (x y z) : orl (orl x y) z = orl x (orl y z) := sorry'
theorem xorl_commut (x y) : xorl x y = xorl y x := sorry'
theorem xorl_assoc (x y z) : xorl (xorl x y) z = xorl x (xorl y z) := sorry'
theorem notl_xorl (x) : notl x = xorl x (Vlong (-1)) := sorry'
theorem divls_pow2 (x n logn y) : int64.is_power2 n = some logn → word.ltu logn 63 →
divls x (Vlong n) = some y →
shrxl x (Vint logn) = some y := sorry'
theorem divlu_pow2 (x n logn y) : int64.is_power2 n = some logn →
divlu x (Vlong n) = some y →
shrlu x (Vint logn) = y := sorry'
theorem modlu_pow2 (x n logn y) : int64.is_power2 n = some logn →
modlu x (Vlong n) = some y →
andl x (Vlong (n - 1)) = y := sorry'
theorem shrxl_shrl_2 (n x z) : shrxl x (Vint n) = some z →
z = if n = 0 then x else
shrl (addl x (shrlu (shrl x (Vint 63)) (Vint (64 - n)))) (Vint n) := sorry'
theorem negate_cmp_bool (c x y) : cmp_bool (negate_comparison c) x y = bnot <$> cmp_bool c x y := sorry'
theorem negate_cmpu_bool (valid_ptr c x y) :
cmpu_bool valid_ptr (negate_comparison c) x y = bnot <$> cmpu_bool valid_ptr c x y := sorry'
theorem negate_cmpl_bool (c x y) : cmpl_bool (negate_comparison c) x y = bnot <$> cmpl_bool c x y := sorry'
theorem negate_cmplu_bool (valid_ptr c x y) :
cmplu_bool valid_ptr (negate_comparison c) x y = bnot <$> cmplu_bool valid_ptr c x y := sorry'
lemma not_of_optbool (ob) : of_optbool (bnot <$> ob) = notbool (of_optbool ob) := sorry'
theorem negate_cmp (c x y) : cmp (negate_comparison c) x y = notbool (cmp c x y) := sorry'
theorem negate_cmpu (valid_ptr c x y) :
cmpu valid_ptr (negate_comparison c) x y =
notbool (cmpu valid_ptr c x y) := sorry'
theorem swap_cmp_bool (c x y) : cmp_bool (swap_comparison c) x y = cmp_bool c y x := sorry'
theorem swap_cmpu_bool (valid_ptr c x y) :
cmpu_bool valid_ptr (swap_comparison c) x y = cmpu_bool valid_ptr c y x := sorry'
theorem swap_cmpl_bool (c x y) : cmpl_bool (swap_comparison c) x y = cmpl_bool c y x := sorry'
theorem swap_cmplu_bool (valid_ptr c x y) :
cmplu_bool valid_ptr (swap_comparison c) x y = cmplu_bool valid_ptr c y x := sorry'
theorem negate_cmpf_eq (v1 v2) : notbool (cmpf Cne v1 v2) = cmpf Ceq v1 v2 := sorry'
theorem negate_cmpf_ne (v1 v2) : notbool (cmpf Ceq v1 v2) = cmpf Cne v1 v2 := sorry'
theorem cmpf_le (v1 v2) : cmpf Cle v1 v2 = val.or (cmpf Clt v1 v2) (cmpf Ceq v1 v2) := sorry'
theorem cmpf_ge (v1 v2) : cmpf Cge v1 v2 = val.or (cmpf Cgt v1 v2) (cmpf Ceq v1 v2) := sorry'
theorem cmp_ne_0_optbool (ob) : cmp Cne (of_optbool ob) 0 = of_optbool ob := sorry'
theorem cmp_eq_1_optbool (ob) : cmp Ceq (of_optbool ob) 1 = of_optbool ob := sorry'
theorem cmp_eq_0_optbool (ob) : cmp Ceq (of_optbool ob) 0 = of_optbool (bnot <$> ob) := sorry'
theorem cmp_ne_1_optbool (ob) : cmp Cne (of_optbool ob) 1 = of_optbool (bnot <$> ob) := sorry'
theorem cmpu_ne_0_optbool (valid_ptr ob) :
cmpu valid_ptr Cne (of_optbool ob) 0 = of_optbool ob := sorry'
theorem cmpu_eq_1_optbool (valid_ptr ob) :
cmpu valid_ptr Ceq (of_optbool ob) 1 = of_optbool ob := sorry'
theorem cmpu_eq_0_optbool (valid_ptr ob) :
cmpu valid_ptr Ceq (of_optbool ob) 0 = of_optbool (bnot <$> ob) := sorry'
theorem cmpu_ne_1_optbool (valid_ptr ob) :
cmpu valid_ptr Cne (of_optbool ob) 1 = of_optbool (bnot <$> ob) := sorry'
lemma zero_ext_and (n v) : 0 < n → n < 32 →
val.zero_ext n v = val.and v (Vint (repr (2^n - 1))) := sorry'
lemma rolm_lt_zero (v) : rolm v 1 1 = cmp Clt v 0 := sorry'
lemma rolm_ge_zero (v) : val.xor (rolm v 1 1) 1 = cmp Cge v 0 := sorry'
/- The ``is less defined'' relation between values.
A value is less defined than itself, and [Vundef] is
less defined than any value. -/
inductive lessdef : val → val → Prop
| refl (v) : lessdef v v
| undef (v) : lessdef Vundef v
lemma lessdef_of_eq : Π {v1 v2}, v1 = v2 → lessdef v1 v2
| v ._ rfl := lessdef.refl v
lemma lessdef_trans {v1 v2 v3} : lessdef v1 v2 → lessdef v2 v3 → lessdef v1 v3 :=
by intros h1 h2; cases h1; try {assumption}; cases h2; assumption
lemma lessdef_list_inv {vl1 vl2} : list.forall2 lessdef vl1 vl2 → vl1 = vl2 ∨ Vundef ∈ vl1 := sorry'
lemma lessdef_list_trans {vl1 vl2 vl3} :
list.forall2 lessdef vl1 vl2 → list.forall2 lessdef vl2 vl3 → list.forall2 lessdef vl1 vl3 :=
@list.forall2.trans _ _ @lessdef_trans _ _ _
/- Compatibility of operations with the [lessdef] relation. -/
lemma load_result_lessdef (chunk v1 v2) :
lessdef v1 v2 → lessdef (load_result chunk v1) (load_result chunk v2) := sorry'
lemma zero_ext_lessdef (n v1 v2) :
lessdef v1 v2 → lessdef (zero_ext n v1) (zero_ext n v2) := sorry'
lemma sign_ext_lessdef (n v1 v2) :
lessdef v1 v2 → lessdef (sign_ext n v1) (sign_ext n v2) := sorry'
lemma singleoffloat_lessdef (v1 v2) :
lessdef v1 v2 → lessdef (single_of_float v1) (single_of_float v2) := sorry'
lemma add_lessdef (v1 v1' v2 v2') :
lessdef v1 v1' → lessdef v2 v2' → lessdef (v1 + v2) (v1' + v2') := sorry'
lemma addl_lessdef (v1 v1' v2 v2') :
lessdef v1 v1' → lessdef v2 v2' → lessdef (addl v1 v2) (addl v1' v2') := sorry'
lemma cmpu_bool_lessdef {valid_ptr valid_ptr' : block → ℕ → bool} {c v1 v1' v2 v2' b} :
(∀ b ofs, valid_ptr b ofs → valid_ptr' b ofs) →
lessdef v1 v1' → lessdef v2 v2' →
cmpu_bool valid_ptr c v1 v2 = some b →
cmpu_bool valid_ptr' c v1' v2' = some b := sorry'
lemma cmplu_bool_lessdef {valid_ptr valid_ptr' : block → ℕ → bool} {c v1 v1' v2 v2' b} :
(∀ b ofs, valid_ptr b ofs → valid_ptr' b ofs) →
lessdef v1 v1' → lessdef v2 v2' →
cmplu_bool valid_ptr c v1 v2 = some b →
cmplu_bool valid_ptr' c v1' v2' = some b := sorry'
lemma of_optbool_lessdef {ob ob'} :
(∀ b, ob = some b → ob' = some b) →
lessdef (of_optbool ob) (of_optbool ob') := sorry'
lemma long_of_words_lessdef {v1 v2 v1' v2'} :
lessdef v1 v1' → lessdef v2 v2' → lessdef (long_of_words v1 v2) (long_of_words v1' v2') := sorry'
lemma loword_lessdef {v v'} : lessdef v v' → lessdef (loword v) (loword v') := sorry'
lemma hiword_lessdef {v v'} : lessdef v v' → lessdef (hiword v) (hiword v') := sorry'
lemma offset_ptr_zero (v) : lessdef (offset_ptr v 0) v := sorry'
lemma offset_ptr_assoc (v d1 d2) : offset_ptr (offset_ptr v d1) d2 = offset_ptr v (d1 + d2) := sorry'
/- * Values and memory injections -/
/- A memory injection [f] is a function from addresses to either [none]
or [some] of an address and an offset. It defines a correspondence
between the blocks of two memory states [m1] and [m2]:
- if [f b = none], the block [b] of [m1] has no equivalent in [m2];
- if [f b = some (b', ofs)], the block [b] of [m2] corresponds to
a sub-block at offset [ofs] of the block [b'] in [m2].
-/
def meminj : Type := block → option (block × ℤ)
/- A memory injection defines a relation between values that is the
identity relation, except for pointer values which are shifted
as prescribed by the memory injection. Moreover, [Vundef] values
inject into any other value. -/
inductive inject (mi : meminj) : val → val → Prop
| int (i) : inject (Vint i) (Vint i)
| long (i) : inject (Vlong i) (Vlong i)
| float (f) : inject (Vfloat f) (Vfloat f)
| single (f) : inject (Vsingle f) (Vsingle f)
| ptr (b1 ofs1 b2 ofs2 delta) :
mi b1 = some (b2, delta) →
(ofs2 : ptrofs) = ofs1 + repr delta →
inject (Vptr b1 ofs1) (Vptr b2 ofs2)
| undef (v) : inject Vundef v
lemma inject_ptrofs (mi i) : inject mi (Vptrofs i) (Vptrofs i) := sorry'
section val_inj_ops
parameter f : meminj
lemma load_result_inject (chunk v1 v2) :
inject f v1 v2 →
inject f (load_result chunk v1) (load_result chunk v2) := sorry'
theorem add_inject {v1 v1' v2 v2'} :
inject f v1 v1' → inject f v2 v2' →
inject f (v1 + v2) (v1' + v2') := sorry'
theorem sub_inject {v1 v1' v2 v2'} :
inject f v1 v1' → inject f v2 v2' →
inject f (v1 - v2) (v1' - v2') := sorry'
theorem addl_inject {v1 v1' v2 v2'} :
inject f v1 v1' → inject f v2 v2' →
inject f (addl v1 v2) (addl v1' v2') := sorry'
theorem subl_inject {v1 v1' v2 v2'} :
inject f v1 v1' → inject f v2 v2' →
inject f (subl v1 v2) (subl v1' v2') := sorry'
lemma offset_ptr_inject {v v'} (ofs) : inject f v v' →
inject f (offset_ptr v ofs) (offset_ptr v' ofs) := sorry'
lemma cmp_bool_inject {c v1 v2 v1' v2' b} :
inject f v1 v1' → inject f v2 v2' →
cmp_bool c v1 v2 = some b → cmp_bool c v1' v2' = some b := sorry'
parameters (valid_ptr1 valid_ptr2 : block → ℕ → bool)
def weak_valid_ptr1 := weak_valid_ptr valid_ptr1
def weak_valid_ptr2 := weak_valid_ptr valid_ptr2
parameter valid_ptr_inj : ∀ b1 ofs b2 delta, f b1 = some (b2, delta) →
valid_ptr1 b1 (unsigned (ofs : ptrofs)) →
valid_ptr2 b2 (unsigned (ofs + repr delta))
parameter weak_valid_ptr_inj : ∀ b1 ofs b2 delta, f b1 = some (b2, delta) →
weak_valid_ptr1 b1 (unsigned (ofs : ptrofs)) →
weak_valid_ptr2 b2 (unsigned (ofs + repr delta))
parameter weak_valid_ptr_no_overflow : ∀ b1 ofs b2 delta, f b1 = some (b2, delta) →
weak_valid_ptr1 b1 (unsigned (ofs : ptrofs)) →
unsigned ofs + unsigned (repr delta : ptrofs) ≤ @max_unsigned ptrofs.wordsize
parameter valid_different_ptrs_inj : ∀ b1 ofs1 b2 ofs2 b1' delta1 b2' delta2,
b1 ≠ b2 →
valid_ptr1 b1 (unsigned (ofs1 : ptrofs)) →
valid_ptr1 b2 (unsigned (ofs2 : ptrofs)) →
f b1 = some (b1', delta1) →
f b2 = some (b2', delta2) →
b1' = b2' → unsigned (ofs1 + repr delta1) ≠ unsigned (ofs2 + repr delta2)
lemma cmpu_bool_inject {c v1 v2 v1' v2' b} :
inject f v1 v1' → inject f v2 v2' →
cmpu_bool valid_ptr1 c v1 v2 = some b →
cmpu_bool valid_ptr2 c v1' v2' = some b := sorry'
lemma cmplu_bool_inject {c v1 v2 v1' v2' b} :
inject f v1 v1' → inject f v2 v2' →
cmplu_bool valid_ptr1 c v1 v2 = some b →
cmplu_bool valid_ptr2 c v1' v2' = some b := sorry'
lemma long_of_words_inject {v1 v2 v1' v2'} :
inject f v1 v1' → inject f v2 v2' →
inject f (long_of_words v1 v2) (long_of_words v1' v2') := sorry'
lemma loword_inject {v v'} : inject f v v' → inject f (loword v) (loword v') := sorry'
lemma hiword_inject {v v'} : inject f v v' → inject f (hiword v) (hiword v') := sorry'
end val_inj_ops
end val
export val (meminj)
/- Monotone evolution of a memory injection. -/
def inject_incr (f1 f2 : meminj) : Prop :=
∀ ⦃b b' delta⦄, f1 b = some (b', delta) → f2 b = some (b', delta)
lemma inject_incr.refl (f) : inject_incr f f := λ _ _ _, id
lemma inject_incr.trans {f1 f2 f3}
(i1 : inject_incr f1 f2) (i2 : inject_incr f2 f3) : inject_incr f1 f3 :=
λ b b' d h, i2 (i1 h)
lemma val_inject_incr {f1 f2 v v'} :
inject_incr f1 f2 → inject f1 v v' → inject f2 v v' := sorry'
lemma val_inject_list_incr {f1 f2 vl vl'}
(i : inject_incr f1 f2) (il : list.forall2 (inject f1) vl vl') :
list.forall2 (inject f2) vl vl' :=
il.imp (λ x y, val_inject_incr i)
/- The identity injection gives rise to the "less defined than" relation. -/
def inject_id : meminj := λ b, some (b, 0)
lemma val_inject_id {v1 v2} :
inject inject_id v1 v2 ↔ val.lessdef v1 v2 := sorry'
lemma val_inject_id_list {vl1 vl2} :
list.forall2 (inject inject_id) vl1 vl2 ↔ list.forall2 val.lessdef vl1 vl2 :=
list.forall2.iff @val_inject_id
/- Composing two memory injections -/
def val.meminj.comp (f f' : meminj) : meminj := λ b,
do ⟨b', delta⟩ ← f b,
⟨b'', delta'⟩ ← f' b',
return (b'', delta + delta')
lemma inject.comp {f f' v1 v2 v3} :
inject f v1 v2 → inject f' v2 v3 →
inject (f.comp f') v1 v3 := sorry'
end values |
to evaluate the values of dysregulated miRNAs in RA diagnosis and monitoring.
polymerase chain reaction (qPCR) in 30 RA patients and 30 control patients.
were positively correlated with disease activity and inflammation level of RA.
miR-5571-3p and miR-135b-5p correlate with increased RA risk and activity.
Wenzhou Medical University, Wenzhou, China. |
# James Rekow
meanOverDOCCreator = function(MVec = NULL, lambdaVec = NULL, N = 20, iStrength = 1, univ = 1,
sigmaMax = 0.1, thresholdMult = 10 ^ (-4), maxSteps = 10 ^ 4,
tStep = 10 ^ (-2), intTime = 1, interSmplMult = 0.01){
source("abdListCreator.r")
source("DOCProcedure.r")
if(is.null(MVec)){
MVec = seq(40, 200, 40)
} # end if
if(is.null(lambdaVec)){
lambdaVec = seq(0, 200, 50)
} # end if
meanOverProducer = function(inputVec){
M = inputVec[1]
lambda = inputVec[2]
abdList = abdListCreator(M = M, N = N, iStrength = iStrength, univ = univ, sigmaMax = sigmaMax,
thresholdMult = thresholdMult, maxSteps = maxSteps, tStep = tStep,
intTime = intTime, interSmplMult = interSmplMult, lambda = lambda,
returnParams = FALSE, conGraph = NULL)
doc = DOCProcedure(abdList)
meanOver = mean(doc[[1]])
return(meanOver)
} # end meanOVerProducer function
inputMat = expand.grid(MVec, lambdaVec)
meanOver = apply(inputMat, 1, meanOverProducer)
meanOverDOC = data.frame(M = inputMat[ , 1], lambda = inputMat[ , 2], meanOver)
return(meanOverDOC)
} # end meanOverDOCCreator function
|
import System.File
main : IO ()
main = do
Right f <- openFile "sampleFile.txt" Read
| Left err => printLn err
Right contents <- fRead f
| Left err => printLn err
printLn contents
|
module Prob
import Marginalization
import Semiring
import HetVect
%default total
%access public export
-- TODO: convert to haskell, parallel, prove
mutual
||| Probability monad which keeps a list of the variables it contains
||| Prob s xs a = probability of `a`, given `xs`. `s` is the unit
data Prob : Type -> Type -> Type where
||| change to (forall s. Semiring s => (a -> s) -> s) in order to avoid enum,
||| bounded constraints (maybe).
Dst : (a -> s) -> Prob s a
Bnd
: {xs : List Type}
-> {auto finite : AllFinite xs}
-> MultiProb s xs
-> (Vect xs -> Prob s b)
-> Prob s b
data MultiProb : Type -> List Type -> Type where
Nil : MultiProb s []
(::) : {x : Type} -> Prob s x -> MultiProb s xs -> MultiProb s (x :: xs)
getProb : Semiring s => Prob s a -> a -> s
getProb (Dst f) = f
getProb (Bnd {xs} ps f) = \y => margVec (\ws => allMessages ws * getProb (f ws) y) where
funcs : Semiring s => {ys : List Type} -> MultiProb s ys -> s -> Vect ys -> s
funcs [] = \n, _ => n
funcs (p::ps) =
let m = getProb p
ms = funcs ps
in \n, (v::vs) => ms (n * m v) vs -- change to not tail-recursive to make parallel
allMessages : Vect xs -> s
allMessages = funcs ps one
getProbPar : Semiring s => Prob s a -> a -> s
getProbPar (Dst f) = f
getProbPar (Bnd {xs} ps f) = \y => margVec (\ws => allMessages ws * getProb (f ws) y) where
funcs : Semiring s => {ys : List Type} -> MultiProb s ys -> Vect ys -> s
funcs [] = \_ => one
funcs (p::ps) = \(v::vs) =>
let m = getProbPar p v -- Evaluate these in parallel (only currently possible in Haskell)
ms = funcs ps vs
in ms * m
allMessages : Vect xs -> s
allMessages = funcs ps
|
module Lib.Eq where
open import Lib.Prelude as P hiding (String)
open import Lib.Nat renaming (_==_ to _=Nat=_)
open import Lib.Fin
open import Lib.List
open import Lib.Bool
-- Wrapper type, used to ensure that El is constructor-headed.
record String : Set where
constructor string
field unString : P.String
-- Codes for types supporting equality
data EqU : Set where
nat : EqU
bool : EqU
string : EqU
unit : EqU
fin : Nat -> EqU
list : EqU -> EqU
pair : EqU -> EqU -> EqU
El : EqU -> Set
El nat = Nat
El bool = Bool
El string = String
El unit = Unit
El (fin n) = Fin n
El (list u) = List (El u)
El (pair u v) = El u × El v
primitive primStringEquality : P.String -> P.String -> Bool
infix 30 _==_
_==_ : {u : EqU} -> El u -> El u -> Bool
_==_ {nat} n m = n =Nat= m
_==_ {fin n} i j = finEq i j
_==_ {bool} false y = not y
_==_ {bool} true y = y
_==_ {string} (string x) (string y) = primStringEquality x y
_==_ {unit} _ _ = true
_==_ {list u} [] [] = true
_==_ {list u} (x :: xs) (y :: ys) = x == y && xs == ys
_==_ {list u} _ _ = false
_==_ {pair u v} (x₁ , y₁) (x₂ , y₂) = x₁ == x₂ && y₁ == y₂
|
[STATEMENT]
lemma REAL_E: "\<lbrakk>val_type v = REAL; \<And>b. v = RealVal b \<Longrightarrow> P\<rbrakk> \<Longrightarrow> P"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>val_type v = REAL; \<And>b. v = RealVal b \<Longrightarrow> P\<rbrakk> \<Longrightarrow> P
[PROOF STEP]
by (cases v) auto |
||| Module for the parsing and encoding LSP messages, updated to version 3.16.
|||
||| References:
||| [1] https://microsoft.github.io/language-server-protocol/specifications/specification-3-16/
|||
||| (C) The Idris Community, 2021
module Language.LSP.Message
import public Language.LSP.Message.CallHierarchy
import public Language.LSP.Message.Cancel
import public Language.LSP.Message.ClientCapabilities
import public Language.LSP.Message.CodeAction
import public Language.LSP.Message.CodeLens
import public Language.LSP.Message.Command
import public Language.LSP.Message.Completion
import public Language.LSP.Message.Declaration
import public Language.LSP.Message.Definition
import public Language.LSP.Message.Derive
import public Language.LSP.Message.Diagnostics
import public Language.LSP.Message.DocumentColor
import public Language.LSP.Message.DocumentFormatting
import public Language.LSP.Message.DocumentHighlight
import public Language.LSP.Message.DocumentLink
import public Language.LSP.Message.DocumentSymbols
import public Language.LSP.Message.FoldingRange
import public Language.LSP.Message.Hover
import public Language.LSP.Message.Implementation
import public Language.LSP.Message.Initialize
import public Language.LSP.Message.LinkedEditingRange
import public Language.LSP.Message.Location
import public Language.LSP.Message.Markup
import public Language.LSP.Message.Message
import public Language.LSP.Message.Method
import public Language.LSP.Message.Moniker
import public Language.LSP.Message.Progress
import public Language.LSP.Message.References
import public Language.LSP.Message.Registration
import public Language.LSP.Message.RegularExpressions
import public Language.LSP.Message.Rename
import public Language.LSP.Message.SelectionRange
import public Language.LSP.Message.SemanticTokens
import public Language.LSP.Message.ServerCapabilities
import public Language.LSP.Message.SignatureHelp
import public Language.LSP.Message.TextDocument
import public Language.LSP.Message.Trace
import public Language.LSP.Message.URI
import public Language.LSP.Message.Utils
import public Language.LSP.Message.Window
import public Language.LSP.Message.Workspace
|
```python
# reference https://socratic.org/questions/the-top-and-bottom-margins-of-a-poster-are-4-cm-and-the-side-margins-are-each-6-
from IPython.display import Image
from IPython.core.display import HTML
from sympy import *; x,h,y,n,t = symbols("x h y n t"); C, D = symbols("C D", real=True)
Image(url= "https://i.imgur.com/LA701Wi.png")
```
```python
A,a,b = symbols("A a b")
A = 382 + 2*(a*8) + 2*(b*8)-4*(8*8)
A
```
$\displaystyle 16 a + 16 b + 126$
```python
Eq(A,a*b)
```
$\displaystyle 16 a + 16 b + 126 = a b$
```python
pprint(solve(Eq(A,a*b),a)) # A(b)
```
⎡2⋅(8⋅b + 63)⎤
⎢────────────⎥
⎣ b - 16 ⎦
```python
simplify((2*(8*b + 63)/(b - 16))*b) *a * b
```
$\displaystyle \frac{2 b \left(8 b + 63\right)}{b - 16}$
```python
simplify(diff((2*(8*b + 63)/(b - 16))*b))
```
$\displaystyle \frac{16 \left(b^{2} - 32 b - 126\right)}{b^{2} - 32 b + 256}$
```python
print(simplify(diff((2*(8*b + 63)/(b - 16))*b)))
```
16*(b**2 - 32*b - 126)/(b**2 - 32*b + 256)
```python
b**2 - 32*b - 126
```
$\displaystyle b^{2} - 32 b - 126$
```python
solve(b**2 - 32*b - 126,b)
```
[16 - sqrt(382), 16 + sqrt(382)]
```python
Image(url= "https://i.imgur.com/3RToGO0.png")
```
|
Formal statement is: lemma add_monom: "monom a n + monom b n = monom (a + b) n" Informal statement is: The sum of two monomials with the same exponent is a monomial with the sum of the coefficients and the same exponent. |
Applications for the 2019 Fellowship are now closed. Applications for a 2020 Fellowship will open on August 1, 2019.
In 2020, the HARPO Fellowship residency can take place either January 6-31, February 3-28, March 2-27, August 31-September 25, October 5-30, or November 2-27.
The HARPO Emerging Artist Fellow is integrated into SFAI’s International Thematic Residency Program and may also share space with a variety of local and international Fellows sponsored by government agencies including the Taiwan Ministry or Culture and Greece Fulbright Commission. HARPO Fellows are not required to engage the 2020 residency theme, but may do so at their discretion. Ultimately, the semi-structured programming and unique environment at SFAI allows the Artist Fellow to be as interactive or private as they wish with other residents. For details about our International Thematic Residency Program, please visit our Residency Information and Residency FAQ.
For more information about past recipients of the Emerging Artist Fellowship Residency, please visit this site and/or www.harpofoundation.org.
Annual Call for Applications opens August 1st.
Annual Application Deadline is October 1st, 11:59pm.
Letters of Recommendation Deadline: October 8, 11:59pm.
In addition to the $35 application fee, SFAI requires a refundable $150 security deposit to be paid upon acceptance. All other expenses, materials, and equipment are the responsibility of the fellow. |
cc ------------ dpmjet3.4 - authors: S.Roesler, R.Engel, J.Ranft -------
cc -------- phojet1.12-40 - authors: S.Roesler, R.Engel, J.Ranft -------
cc - oct'13 -------
cc ----------- pythia-6.4 - authors: Torbjorn Sjostrand, Lund'10 -------
cc ---------------------------------------------------------------------
cc converted for use with FLUKA -------
cc - oct'13 -------
SUBROUTINE PYTBHG(Q1,Q2,P1,P2,P3,MT,MB,RMB,MHP,AMP2)
C
C CONVENTIONS AND INPUT/OUTPUT DEFINITIONS:
C
C INPUT: Q1,Q2 ARE ENTERING 4-MOMENTA OF INITIAL GLUONS OR QUARKS;
C P1, P2 ARE THE TOP AND BOTTOM OUTGOING 4-MOMENTA;
C P3 IS OUTGOING CHARGED HIGGS 4-MOMENTA.
C (NB FOR ALL 4-MOMENTA P(4) IS TIME-COMPONENT)
C "PHYSICAL PARAMETERS" INPUT:
C MT,MB TOP AND BOTTOM MASSES;
C MHP CHARGED HIGGS MASS
C FURTHER PARAMETERS INPUT IS NEEDED FROM COMMON/PARAM/ (SEE BELOW)
C
C OUTPUT: AMP2 IS MATRIX ELEMENT (AMPLITUDE**2) FOR GG->TB H^+
C (NB AMP2 IS TRULY AMPLITUDE SQUARRED, I.E. WITHOUT ANY
C PHASE SPACE FACTORS INCLUDED. IT INCLUDES COLOUR AND COUPLING
C FACTORS, AS EXPLICIT BELOW. ACCORDINGLY, FOR EXAMPLE THE TOTAL
C CROSS-SECTION SHOULD BE (SYMBOLICALLY):
C SIGMA = INTEGRATE [PARTON DENSITY FUNCTIONS * 3-PARTICLE FINAL
C STATE PHASE-SPACE (STANDARDLY NORMALIZED) * AMP2 ]
C
IMPLICIT DOUBLE PRECISION(A-H, O-Z)
IMPLICIT INTEGER(I-N)
DOUBLE PRECISION MT,MB,MW,MHP
DIMENSION Q1(4),Q2(4),P1(4),P2(4),P3(4)
include 'inc/pydat1'
include 'inc/pydat2'
include 'inc/pymssm'
include 'inc/pyctbh'
C !THE RELEVANT INPUT PARAMETERS ABOVE ARE NEEDED FOR CALCULATION
C BUT ARE NOT DEFINED HERE SO THAT ONE MAY CHOOSE/VARY THEIR VALUES:
C ACCORDINGLY, WHEN CALLING THESE SUBROUTINES, PLEASE SUPPLY VIA
C THIS COMMON/PARAM/ YOUR PREFERRED ALPHA, ALPHAS,..AND TANB
C (TAN BETA) VALUES
C
C THE NORMALIZED V,A COUPLINGS ARE DEFINED BELOW AND USED BOTH
C IN THIS ROUTINE AND IN THE TOP WIDTH CALCULATION PYTBHB(..).
PI = 4*ATAN(1.D0)
MW = SQRT(MW2)
C
C COLLECTING THE RELEVANT OVERALL FACTORS:
C 8X8 INITIAL GLUON COLOR AVERAGE, 2X2 GLUON SPIN AVERAGE
PS=1.D0/(8.D0*8.D0 *2.D0*2.D0)
C COUPLING CONSTANT (OVERALL NORMALIZATION)
FACT=(4.D0*PI*ALPHA)*(4.D0*PI*ALPHAS)**2/SW2/2.D0
C NB ALPHA IS E^2/4/PI, BUT BETTER DEFINED IN TERMS OF G_FERMI:
C ALPHA= SQRT(2.D0)*GF*SW2*MW**2/PI
C ALPHAS IS ALPHA_STRONG;
C SW2 IS SIN(THETA_W)**2.
C
C VTB=.998D0
C VTB IS TOP-BOTTOM CKM MATRIX ELEMENT (APPROXIMATE VALUE HERE)
C
V = ( MT/MW/TANB +RMB/MW*TANB)/2.D0
A = (-MT/MW/TANB +RMB/MW*TANB)/2.D0
C V AND A ARE (NORMALIZED) VECTOR AND AXIAL TBH^+ COUPLINGS
C
C REDEFINING P2 INGOING FROM OVERALL MOMENTUM CONSERVATION
C (BECAUSE P2 INGOING WAS USED IN OUR GRAPH CALCULATION CONVENTIONS)
DO 100 KK=1,4
P2(KK)=P3(KK)-Q1(KK)-Q2(KK)+P1(KK)
100 CONTINUE
C DEFINING VARIOUS RELEVANT 4-SCALAR PRODUCTS:
S = 2*PYTBHS(Q1,Q2)
P1Q1=PYTBHS(Q1,P1)
P1Q2=PYTBHS(P1,Q2)
P2Q1=PYTBHS(P2,Q1)
P2Q2=PYTBHS(P2,Q2)
P1P2=PYTBHS(P1,P2)
C
C TOP WIDTH CALCULATION
CALL PYTBHB(MT,MB,MHP,BR,GAMT)
C GAMT IS THE TOP WIDTH: T->BH^+ AND/OR T->B W^+
C THEN DEFINE TOP (RESONANT) PROPAGATOR:
A1INV= S -2*P1Q1 -2*P1Q2
A1 =A1INV/(A1INV**2+ (GAMT*MT)**2)
C (I.E. INTRODUCE THE TOP WIDTH IN A1 TO REGULARISE THE POLE)
C NB: A12 = A1*A1 BUT CORRECT EXPRESSION BELOW BECAUSE OF
C THE TOP WIDTH
A12 = 1.D0/(A1INV**2+ (GAMT*MT)**2)
A2 =1.D0/(S +2*P2Q1 +2*P2Q2)
C NOTE A2 IS B PROPAGATOR, DOES NOT NEED A WIDTH
C NOW COMES THE AMP**2:
C NB COLOR FACTOR (COMING FROM GRAPHS) ALREADY INCLUDED IN
C THE EXPRESSIONS BELOW
V18=0.D0
A18=0.D0
V18= 640*A1/3+640*A2/3+32*A1*A2*MB**2-368*A12*MB*MT-
&512*A1*A2*MB*MT/3-
&368*A2**2*MB*MT+32*A1*A2*MT**2+496*A12*P1P2/3+
&320*A1*A2*P1P2+496*A2**2*P1P2/3+128*A1*MB*MT**3/(3*P1Q1**2)+
&128*A1*MT**4/(3*P1Q1**2)-256*A12*MB*MT**5/(3*P1Q1**2)+
&256*A1*MT**2*P1P2/(3*P1Q1**2)-256*A12*MT**4*P1P2/(3*P1Q1**2)+
&8/(3*P1Q1)-32*A1*MB*MT/P1Q1-56*A2*MB*MT/(3*P1Q1)+
&88*A1*MT**2/(3*P1Q1)+72*A2*MT**2/P1Q1+
&704*A12*MB*MT**3/(3*P1Q1)-224*A1*A2*MB*MT**3/(3*P1Q1)+
&104*A1*P1P2/(3*P1Q1)+48*A2*P1P2/P1Q1+
&128*A1*A2*MB*MT*P1P2/(3*P1Q1)+512*A12*MT**2*P1P2/(3*P1Q1)-
&448*A1*A2*MT**2*P1P2/(3*P1Q1)-32*A1*A2*P1P2**2/P1Q1-
&656*A1*A2*P1Q1/3-224*A2**2*P1Q1+128*A1*MB*MT**3/(3*P1Q2**2)+
&128*A1*MT**4/(3*P1Q2**2)-256*A12*MB*MT**5/(3*P1Q2**2)+
&256*A1*MT**2*P1P2/(3*P1Q2**2)-256*A12*MT**4*P1P2/(3*P1Q2**2)+
&256*A1*MT**2*P1Q1/(3*P1Q2**2)+256*A12*MB*MT**3*P1Q1/(3*P1Q2**2)+
&8/(3*P1Q2)-32*A1*MB*MT/P1Q2-56*A2*MB*MT/(3*P1Q2)
V18=V18+88*A1*MT**2/(3*P1Q2)+72*A2*MT**2/P1Q2+
&704*A12*MB*MT**3/(3*P1Q2)-224*A1*A2*MB*MT**3/(3*P1Q2)+
&104*A1*P1P2/(3*P1Q2)+48*A2*P1P2/P1Q2+
&128*A1*A2*MB*MT*P1P2/(3*P1Q2)+512*A12*MT**2*P1P2/(3*P1Q2)-
&448*A1*A2*MT**2*P1P2/(3*P1Q2)-32*A1*A2*P1P2**2/P1Q2-
&32*A1*MB*MT**3/(3*P1Q1*P1Q2)-32*A1*MT**4/(3*P1Q1*P1Q2)+
&64*A12*MB*MT**5/(3*P1Q1*P1Q2)+16*P1P2/(3*P1Q1*P1Q2)-
&64*A1*MT**2*P1P2/(3*P1Q1*P1Q2)+64*A12*MT**4*P1P2/(3*P1Q1*P1Q2)+
&112*A1*P1Q1/P1Q2+272*A2*P1Q1/(3*P1Q2)-
&272*A1*A2*MB**2*P1Q1/(3*P1Q2)+208*A12*MB*MT*P1Q1/(3*P1Q2)-
&400*A1*A2*MB*MT*P1Q1/(3*P1Q2)-80*A1*A2*MT**2*P1Q1/P1Q2+
&96*A12*P1P2*P1Q1/P1Q2-320*A1*A2*P1P2*P1Q1/P1Q2-
&544*A1*A2*P1Q1**2/(3*P1Q2)-656*A1*A2*P1Q2/3-224*A2**2*P1Q2+
&256*A1*MT**2*P1Q2/(3*P1Q1**2)+256*A12*MB*MT**3*P1Q2/(3*P1Q1**2)+
&112*A1*P1Q2/P1Q1+272*A2*P1Q2/(3*P1Q1)-
&272*A1*A2*MB**2*P1Q2/(3*P1Q1)+208*A12*MB*MT*P1Q2/(3*P1Q1)-
&400*A1*A2*MB*MT*P1Q2/(3*P1Q1)-80*A1*A2*MT**2*P1Q2/P1Q1
V18=V18+96*A12*P1P2*P1Q2/P1Q1-320*A1*A2*P1P2*P1Q2/P1Q1-
&544*A1*A2*P1Q2**2/(3*P1Q1)+128*A2*MB**4/(3*P2Q1**2)+
&128*A2*MB**3*MT/(3*P2Q1**2)-256*A2**2*MB**5*MT/(3*P2Q1**2)+
&256*A2*MB**2*P1P2/(3*P2Q1**2)-256*A2**2*MB**4*P1P2/(3*P2Q1**2)+
&256*A2*MB**2*P1Q1/(3*P2Q1**2)-256*A2**2*MB**4*P1Q1/(3*P2Q1**2)-
&64*MB**3*MT**3/(3*P1Q2**2*P2Q1**2)-
&64*MB**2*MT**2*P1P2/(3*P1Q2**2*P2Q1**2)-
&64*MB**2*MT**2*P1Q1/(3*P1Q2**2*P2Q1**2)+
&64*MB**3*MT/(3*P1Q2*P2Q1**2)+
&256*A2*MB**3*MT*P1P2/(3*P1Q2*P2Q1**2)+
&256*A2*MB**2*P1P2**2/(3*P1Q2*P2Q1**2)+
&256*A2*MB**3*MT*P1Q1/(3*P1Q2*P2Q1**2)+
&512*A2*MB**2*P1P2*P1Q1/(3*P1Q2*P2Q1**2)+
&256*A2*MB**2*P1Q1**2/(3*P1Q2*P2Q1**2)-
&256*A2**2*MB**4*P1Q2/(3*P2Q1**2)-8/(3*P2Q1)-72*A1*MB**2/P2Q1-
&88*A2*MB**2/(3*P2Q1)+56*A1*MB*MT/(3*P2Q1)+32*A2*MB*MT/P2Q1+
&224*A1*A2*MB**3*MT/(3*P2Q1)-704*A2**2*MB**3*MT/(3*P2Q1)
V18=V18-48*A1*P1P2/P2Q1-104*A2*P1P2/(3*P2Q1)+
&448*A1*A2*MB**2*P1P2/(3*P2Q1)-512*A2**2*MB**2*P1P2/(3*P2Q1)-
&128*A1*A2*MB*MT*P1P2/(3*P2Q1)+32*A1*A2*P1P2**2/P2Q1-
&16*P1P2/(3*P1Q1*P2Q1)-32*A1*MB*MT*P1P2/(3*P1Q1*P2Q1)-
&32*A2*MB*MT*P1P2/(3*P1Q1*P2Q1)-
&64*A1*A2*MB*MT*P1P2**2/(3*P1Q1*P2Q1)-
&64*A1*A2*P1P2**3/(3*P1Q1*P2Q1)-256*A2*P1Q1/(3*P2Q1)+
&448*A1*A2*MB**2*P1Q1/(3*P2Q1)-368*A2**2*MB**2*P1Q1/(3*P2Q1)+
&224*A1*A2*MB*MT*P1Q1/(3*P2Q1)+304*A1*A2*P1P2*P1Q1/(3*P2Q1)-
&64*MB*MT**3/(3*P1Q2**2*P2Q1)-
&256*A1*MB*MT**3*P1P2/(3*P1Q2**2*P2Q1)-
&256*A1*MT**2*P1P2**2/(3*P1Q2**2*P2Q1)+
&64*MT**2*P1Q1/(3*P1Q2**2*P2Q1)-
&128*A1*MB**2*MT**2*P1Q1/(3*P1Q2**2*P2Q1)-
&128*A1*MB*MT**3*P1Q1/(3*P1Q2**2*P2Q1)-
&256*A1*MT**2*P1P2*P1Q1/(3*P1Q2**2*P2Q1)-4*MB**2/(3*P1Q2*P2Q1)+
&64*MB*MT/(3*P1Q2*P2Q1)-128*A2*MB**3*MT/(3*P1Q2*P2Q1)
V18=V18-4*MT**2/(3*P1Q2*P2Q1)-128*A1*MB**2*MT**2/(3*P1Q2*P2Q1)-
&128*A2*MB**2*MT**2/(3*P1Q2*P2Q1)-128*A1*MB*MT**3/(3*P1Q2*P2Q1)-
&112*A2*MB**2*P1P2/(3*P1Q2*P2Q1)-32*A1*MB*MT*P1P2/(3*P1Q2*P2Q1)-
&32*A2*MB*MT*P1P2/(3*P1Q2*P2Q1)-112*A1*MT**2*P1P2/(3*P1Q2*P2Q1)-
&48*A1*P1P2**2/(P1Q2*P2Q1)-48*A2*P1P2**2/(P1Q2*P2Q1)+
&512*A1*A2*MB*MT*P1P2**2/(3*P1Q2*P2Q1)+
&512*A1*A2*P1P2**3/(3*P1Q2*P2Q1)-8*MB*MT*P1P2/(3*P1Q1*P1Q2*P2Q1)-
&8*MT**2*P1P2/(3*P1Q1*P1Q2*P2Q1)+
&32*A1*MB*MT**3*P1P2/(3*P1Q1*P1Q2*P2Q1)-
&16*P1P2**2/(3*P1Q1*P1Q2*P2Q1)+
&32*A1*MT**2*P1P2**2/(3*P1Q1*P1Q2*P2Q1)+8*P1Q1/(3*P1Q2*P2Q1)-
&160*A1*MB**2*P1Q1/(3*P1Q2*P2Q1)-272*A2*MB**2*P1Q1/(3*P1Q2*P2Q1)+
&56*A1*MB*MT*P1Q1/(3*P1Q2*P2Q1)+200*A2*MB*MT*P1Q1/(3*P1Q2*P2Q1)-
&48*A1*P1P2*P1Q1/(P1Q2*P2Q1)-256*A2*P1P2*P1Q1/(3*P1Q2*P2Q1)+
&256*A1*A2*MB**2*P1P2*P1Q1/(3*P1Q2*P2Q1)+
&256*A1*A2*MB*MT*P1P2*P1Q1/(P1Q2*P2Q1)+
&1024*A1*A2*P1P2**2*P1Q1/(3*P1Q2*P2Q1)
V18=V18-272*A2*P1Q1**2/(3*P1Q2*P2Q1)+
&256*A1*A2*MB**2*P1Q1**2/(3*P1Q2*P2Q1)+
&256*A1*A2*MB*MT*P1Q1**2/(3*P1Q2*P2Q1)+
&512*A1*A2*P1P2*P1Q1**2/(3*P1Q2*P2Q1)+16*A2*P1Q2/(3*P2Q1)+
&64*A1*A2*MB**2*P1Q2/P2Q1+32*A2**2*MB**2*P1Q2/(3*P2Q1)+
&112*A1*A2*MB*MT*P1Q2/(3*P2Q1)+368*A1*A2*P1P2*P1Q2/(3*P2Q1)+
&32*A2*P1P2*P1Q2/(3*P1Q1*P2Q1)-
&32*A1*A2*MB**2*P1P2*P1Q2/(3*P1Q1*P2Q1)-
&32*A1*A2*MB*MT*P1P2*P1Q2/(3*P1Q1*P2Q1)-
&64*A1*A2*P1P2**2*P1Q2/(3*P1Q1*P2Q1)+224*A12*P2Q1+
&656*A1*A2*P2Q1/3-256*A1*MT**2*P2Q1/(3*P1Q1**2)+
&256*A12*MT**4*P2Q1/(3*P1Q1**2)-256*A1*P2Q1/(3*P1Q1)+
&224*A1*A2*MB*MT*P2Q1/(3*P1Q1)-368*A12*MT**2*P2Q1/(3*P1Q1)+
&448*A1*A2*MT**2*P2Q1/(3*P1Q1)+304*A1*A2*P1P2*P2Q1/(3*P1Q1)+
&256*A12*MT**4*P2Q1/(3*P1Q2**2)+
&256*A12*MT**2*P1Q1*P2Q1/(3*P1Q2**2)+16*A1*P2Q1/(3*P1Q2)+
&112*A1*A2*MB*MT*P2Q1/(3*P1Q2)+32*A12*MT**2*P2Q1/(3*P1Q2)
V18=V18+64*A1*A2*MT**2*P2Q1/P1Q2+368*A1*A2*P1P2*P2Q1/(3*P1Q2)+
&16*A1*MT**2*P2Q1/(3*P1Q1*P1Q2)-64*A12*MT**4*P2Q1/(3*P1Q1*P1Q2)+
&640*A12*P1Q1*P2Q1/(3*P1Q2)+544*A1*A2*P1Q1*P2Q1/(3*P1Q2)+
&32*A12*P1Q2*P2Q1/P1Q1+944*A1*A2*P1Q2*P2Q1/(3*P1Q1)+
&128*A2*MB**4/(3*P2Q2**2)+128*A2*MB**3*MT/(3*P2Q2**2)-
&256*A2**2*MB**5*MT/(3*P2Q2**2)+256*A2*MB**2*P1P2/(3*P2Q2**2)-
&256*A2**2*MB**4*P1P2/(3*P2Q2**2)-
&64*MB**3*MT**3/(3*P1Q1**2*P2Q2**2)-
&64*MB**2*MT**2*P1P2/(3*P1Q1**2*P2Q2**2)+
&64*MB**3*MT/(3*P1Q1*P2Q2**2)+
&256*A2*MB**3*MT*P1P2/(3*P1Q1*P2Q2**2)+
&256*A2*MB**2*P1P2**2/(3*P1Q1*P2Q2**2)-
&256*A2**2*MB**4*P1Q1/(3*P2Q2**2)+256*A2*MB**2*P1Q2/(3*P2Q2**2)-
&256*A2**2*MB**4*P1Q2/(3*P2Q2**2)-
&64*MB**2*MT**2*P1Q2/(3*P1Q1**2*P2Q2**2)+
&256*A2*MB**3*MT*P1Q2/(3*P1Q1*P2Q2**2)+
&512*A2*MB**2*P1P2*P1Q2/(3*P1Q1*P2Q2**2)
V18=V18+256*A2*MB**2*P1Q2**2/(3*P1Q1*P2Q2**2)-
&256*A2*MB**2*P2Q1/(3*P2Q2**2)-256*A2**2*MB**3*MT*P2Q1/(3*P2Q2**2)+
&64*MB**2*MT**2*P2Q1/(3*P1Q1**2*P2Q2**2)+
&64*MB**2*P2Q1/(3*P1Q1*P2Q2**2)-
&128*A2*MB**3*MT*P2Q1/(3*P1Q1*P2Q2**2)-
&128*A2*MB**2*MT**2*P2Q1/(3*P1Q1*P2Q2**2)-
&256*A2*MB**2*P1P2*P2Q1/(3*P1Q1*P2Q2**2)+
&256*A2**2*MB**2*P1Q1*P2Q1/(3*P2Q2**2)-
&256*A2*MB**2*P1Q2*P2Q1/(3*P1Q1*P2Q2**2)-8/(3*P2Q2)-
&72*A1*MB**2/P2Q2-88*A2*MB**2/(3*P2Q2)+56*A1*MB*MT/(3*P2Q2)+
&32*A2*MB*MT/P2Q2+224*A1*A2*MB**3*MT/(3*P2Q2)-
&704*A2**2*MB**3*MT/(3*P2Q2)-48*A1*P1P2/P2Q2-
&104*A2*P1P2/(3*P2Q2)+448*A1*A2*MB**2*P1P2/(3*P2Q2)-
&512*A2**2*MB**2*P1P2/(3*P2Q2)-128*A1*A2*MB*MT*P1P2/(3*P2Q2)+
&32*A1*A2*P1P2**2/P2Q2-64*MB*MT**3/(3*P1Q1**2*P2Q2)-
&256*A1*MB*MT**3*P1P2/(3*P1Q1**2*P2Q2)-
&256*A1*MT**2*P1P2**2/(3*P1Q1**2*P2Q2)-4*MB**2/(3*P1Q1*P2Q2)
V18=V18+64*MB*MT/(3*P1Q1*P2Q2)-128*A2*MB**3*MT/(3*P1Q1*P2Q2)-
&4*MT**2/(3*P1Q1*P2Q2)-128*A1*MB**2*MT**2/(3*P1Q1*P2Q2)-
&128*A2*MB**2*MT**2/(3*P1Q1*P2Q2)-128*A1*MB*MT**3/(3*P1Q1*P2Q2)-
&112*A2*MB**2*P1P2/(3*P1Q1*P2Q2)-32*A1*MB*MT*P1P2/(3*P1Q1*P2Q2)-
&32*A2*MB*MT*P1P2/(3*P1Q1*P2Q2)-112*A1*MT**2*P1P2/(3*P1Q1*P2Q2)-
&48*A1*P1P2**2/(P1Q1*P2Q2)-48*A2*P1P2**2/(P1Q1*P2Q2)+
&512*A1*A2*MB*MT*P1P2**2/(3*P1Q1*P2Q2)+
&512*A1*A2*P1P2**3/(3*P1Q1*P2Q2)+16*A2*P1Q1/(3*P2Q2)+
&64*A1*A2*MB**2*P1Q1/P2Q2+32*A2**2*MB**2*P1Q1/(3*P2Q2)+
&112*A1*A2*MB*MT*P1Q1/(3*P2Q2)+368*A1*A2*P1P2*P1Q1/(3*P2Q2)-
&16*P1P2/(3*P1Q2*P2Q2)-32*A1*MB*MT*P1P2/(3*P1Q2*P2Q2)-
&32*A2*MB*MT*P1P2/(3*P1Q2*P2Q2)-
&64*A1*A2*MB*MT*P1P2**2/(3*P1Q2*P2Q2)-
&64*A1*A2*P1P2**3/(3*P1Q2*P2Q2)-8*MB*MT*P1P2/(3*P1Q1*P1Q2*P2Q2)-
&8*MT**2*P1P2/(3*P1Q1*P1Q2*P2Q2)+
&32*A1*MB*MT**3*P1P2/(3*P1Q1*P1Q2*P2Q2)-
&16*P1P2**2/(3*P1Q1*P1Q2*P2Q2)
V18=V18+32*A1*MT**2*P1P2**2/(3*P1Q1*P1Q2*P2Q2)+
&32*A2*P1P2*P1Q1/(3*P1Q2*P2Q2)-
&32*A1*A2*MB**2*P1P2*P1Q1/(3*P1Q2*P2Q2)-
&32*A1*A2*MB*MT*P1P2*P1Q1/(3*P1Q2*P2Q2)-
&64*A1*A2*P1P2**2*P1Q1/(3*P1Q2*P2Q2)-256*A2*P1Q2/(3*P2Q2)+
&448*A1*A2*MB**2*P1Q2/(3*P2Q2)-368*A2**2*MB**2*P1Q2/(3*P2Q2)+
&224*A1*A2*MB*MT*P1Q2/(3*P2Q2)+304*A1*A2*P1P2*P1Q2/(3*P2Q2)+
&64*MT**2*P1Q2/(3*P1Q1**2*P2Q2)-
&128*A1*MB**2*MT**2*P1Q2/(3*P1Q1**2*P2Q2)-
&128*A1*MB*MT**3*P1Q2/(3*P1Q1**2*P2Q2)-
&256*A1*MT**2*P1P2*P1Q2/(3*P1Q1**2*P2Q2)+8*P1Q2/(3*P1Q1*P2Q2)-
&160*A1*MB**2*P1Q2/(3*P1Q1*P2Q2)-272*A2*MB**2*P1Q2/(3*P1Q1*P2Q2)+
&56*A1*MB*MT*P1Q2/(3*P1Q1*P2Q2)+200*A2*MB*MT*P1Q2/(3*P1Q1*P2Q2)-
&48*A1*P1P2*P1Q2/(P1Q1*P2Q2)-256*A2*P1P2*P1Q2/(3*P1Q1*P2Q2)+
&256*A1*A2*MB**2*P1P2*P1Q2/(3*P1Q1*P2Q2)+
&256*A1*A2*MB*MT*P1P2*P1Q2/(P1Q1*P2Q2)+
&1024*A1*A2*P1P2**2*P1Q2/(3*P1Q1*P2Q2)
V18=V18-272*A2*P1Q2**2/(3*P1Q1*P2Q2)+
&256*A1*A2*MB**2*P1Q2**2/(3*P1Q1*P2Q2)+
&256*A1*A2*MB*MT*P1Q2**2/(3*P1Q1*P2Q2)+
&512*A1*A2*P1P2*P1Q2**2/(3*P1Q1*P2Q2)-32*A2*MB**4/(3*P2Q1*P2Q2)-
&32*A2*MB**3*MT/(3*P2Q1*P2Q2)+64*A2**2*MB**5*MT/(3*P2Q1*P2Q2)+
&16*P1P2/(3*P2Q1*P2Q2)-64*A2*MB**2*P1P2/(3*P2Q1*P2Q2)+
&64*A2**2*MB**4*P1P2/(3*P2Q1*P2Q2)+8*MB**2*P1P2/(3*P1Q1*P2Q1*P2Q2)+
&8*MB*MT*P1P2/(3*P1Q1*P2Q1*P2Q2)-
&32*A2*MB**3*MT*P1P2/(3*P1Q1*P2Q1*P2Q2)+
&16*P1P2**2/(3*P1Q1*P2Q1*P2Q2)-
&32*A2*MB**2*P1P2**2/(3*P1Q1*P2Q1*P2Q2)-
&16*A2*MB**2*P1Q1/(3*P2Q1*P2Q2)+64*A2**2*MB**4*P1Q1/(3*P2Q1*P2Q2)+
&8*MB**2*P1P2/(3*P1Q2*P2Q1*P2Q2)+8*MB*MT*P1P2/(3*P1Q2*P2Q1*P2Q2)-
&32*A2*MB**3*MT*P1P2/(3*P1Q2*P2Q1*P2Q2)+
&16*P1P2**2/(3*P1Q2*P2Q1*P2Q2)-
&32*A2*MB**2*P1P2**2/(3*P1Q2*P2Q1*P2Q2)+
&16*MB*MT*P1P2**2/(3*P1Q1*P1Q2*P2Q1*P2Q2)
V18=V18+16*P1P2**3/(3*P1Q1*P1Q2*P2Q1*P2Q2)-
&32*A2*MB**2*P1P2*P1Q1/(3*P1Q2*P2Q1*P2Q2)-
&16*A2*MB**2*P1Q2/(3*P2Q1*P2Q2)+64*A2**2*MB**4*P1Q2/(3*P2Q1*P2Q2)-
&32*A2*MB**2*P1P2*P1Q2/(3*P1Q1*P2Q1*P2Q2)+272*A1*P2Q1/(3*P2Q2)+
&112*A2*P2Q1/P2Q2-80*A1*A2*MB**2*P2Q1/P2Q2-
&400*A1*A2*MB*MT*P2Q1/(3*P2Q2)+208*A2**2*MB*MT*P2Q1/(3*P2Q2)-
&272*A1*A2*MT**2*P2Q1/(3*P2Q2)-320*A1*A2*P1P2*P2Q1/P2Q2+
&96*A2**2*P1P2*P2Q1/P2Q2+256*A1*MB*MT**3*P2Q1/(3*P1Q1**2*P2Q2)+
&512*A1*MT**2*P1P2*P2Q1/(3*P1Q1**2*P2Q2)-8*P2Q1/(3*P1Q1*P2Q2)-
&200*A1*MB*MT*P2Q1/(3*P1Q1*P2Q2)-56*A2*MB*MT*P2Q1/(3*P1Q1*P2Q2)+
&272*A1*MT**2*P2Q1/(3*P1Q1*P2Q2)+160*A2*MT**2*P2Q1/(3*P1Q1*P2Q2)+
&256*A1*P1P2*P2Q1/(3*P1Q1*P2Q2)+48*A2*P1P2*P2Q1/(P1Q1*P2Q2)-
&256*A1*A2*MB*MT*P1P2*P2Q1/(P1Q1*P2Q2)-
&256*A1*A2*MT**2*P1P2*P2Q1/(3*P1Q1*P2Q2)-
&1024*A1*A2*P1P2**2*P2Q1/(3*P1Q1*P2Q2)-
&544*A1*A2*P1Q1*P2Q1/(3*P2Q2)-640*A2**2*P1Q1*P2Q1/(3*P2Q2)-
&32*A1*P1P2*P2Q1/(3*P1Q2*P2Q2)
V18=V18+32*A1*A2*MB*MT*P1P2*P2Q1/(3*P1Q2*P2Q2)+
&32*A1*A2*MT**2*P1P2*P2Q1/(3*P1Q2*P2Q2)+
&64*A1*A2*P1P2**2*P2Q1/(3*P1Q2*P2Q2)-
&32*A1*MT**2*P1P2*P2Q1/(3*P1Q1*P1Q2*P2Q2)+
&64*A1*A2*P1P2*P1Q1*P2Q1/(3*P1Q2*P2Q2)-
&944*A1*A2*P1Q2*P2Q1/(3*P2Q2)-32*A2**2*P1Q2*P2Q1/P2Q2+
&256*A1*MT**2*P1Q2*P2Q1/(3*P1Q1**2*P2Q2)+
&96*A1*P1Q2*P2Q1/(P1Q1*P2Q2)+96*A2*P1Q2*P2Q1/(P1Q1*P2Q2)-
&128*A1*A2*MB**2*P1Q2*P2Q1/(3*P1Q1*P2Q2)-
&256*A1*A2*MB*MT*P1Q2*P2Q1/(P1Q1*P2Q2)-
&128*A1*A2*MT**2*P1Q2*P2Q1/(3*P1Q1*P2Q2)-
&512*A1*A2*P1P2*P1Q2*P2Q1/(P1Q1*P2Q2)-
&512*A1*A2*P1Q2**2*P2Q1/(3*P1Q1*P2Q2)+544*A1*A2*P2Q1**2/(3*P2Q2)-
&256*A1*MT**2*P2Q1**2/(3*P1Q1**2*P2Q2)-
&272*A1*P2Q1**2/(3*P1Q1*P2Q2)+
&256*A1*A2*MB*MT*P2Q1**2/(3*P1Q1*P2Q2)+
&256*A1*A2*MT**2*P2Q1**2/(3*P1Q1*P2Q2)
V18=V18+512*A1*A2*P1P2*P2Q1**2/(3*P1Q1*P2Q2)+
&512*A1*A2*P1Q2*P2Q1**2/(3*P1Q1*P2Q2)+224*A12*P2Q2+
&656*A1*A2*P2Q2/3+256*A12*MT**4*P2Q2/(3*P1Q1**2)+
&16*A1*P2Q2/(3*P1Q1)+112*A1*A2*MB*MT*P2Q2/(3*P1Q1)+
&32*A12*MT**2*P2Q2/(3*P1Q1)+64*A1*A2*MT**2*P2Q2/P1Q1+
&368*A1*A2*P1P2*P2Q2/(3*P1Q1)-256*A1*MT**2*P2Q2/(3*P1Q2**2)+
&256*A12*MT**4*P2Q2/(3*P1Q2**2)-256*A1*P2Q2/(3*P1Q2)+
&224*A1*A2*MB*MT*P2Q2/(3*P1Q2)-368*A12*MT**2*P2Q2/(3*P1Q2)+
&448*A1*A2*MT**2*P2Q2/(3*P1Q2)+304*A1*A2*P1P2*P2Q2/(3*P1Q2)+
&16*A1*MT**2*P2Q2/(3*P1Q1*P1Q2)-64*A12*MT**4*P2Q2/(3*P1Q1*P1Q2)+
&32*A12*P1Q1*P2Q2/P1Q2+944*A1*A2*P1Q1*P2Q2/(3*P1Q2)+
&256*A12*MT**2*P1Q2*P2Q2/(3*P1Q1**2)+
&640*A12*P1Q2*P2Q2/(3*P1Q1)+544*A1*A2*P1Q2*P2Q2/(3*P1Q1)-
&256*A2*MB**2*P2Q2/(3*P2Q1**2)-256*A2**2*MB**3*MT*P2Q2/(3*P2Q1**2)+
&64*MB**2*MT**2*P2Q2/(3*P1Q2**2*P2Q1**2)+
&64*MB**2*P2Q2/(3*P1Q2*P2Q1**2)-
&128*A2*MB**3*MT*P2Q2/(3*P1Q2*P2Q1**2)
V18=V18-128*A2*MB**2*MT**2*P2Q2/(3*P1Q2*P2Q1**2)-
&256*A2*MB**2*P1P2*P2Q2/(3*P1Q2*P2Q1**2)-
&256*A2*MB**2*P1Q1*P2Q2/(3*P1Q2*P2Q1**2)+
&256*A2**2*MB**2*P1Q2*P2Q2/(3*P2Q1**2)+272*A1*P2Q2/(3*P2Q1)+
&112*A2*P2Q2/P2Q1-80*A1*A2*MB**2*P2Q2/P2Q1-
&400*A1*A2*MB*MT*P2Q2/(3*P2Q1)+208*A2**2*MB*MT*P2Q2/(3*P2Q1)-
&272*A1*A2*MT**2*P2Q2/(3*P2Q1)-320*A1*A2*P1P2*P2Q2/P2Q1+
&96*A2**2*P1P2*P2Q2/P2Q1-32*A1*P1P2*P2Q2/(3*P1Q1*P2Q1)+
&32*A1*A2*MB*MT*P1P2*P2Q2/(3*P1Q1*P2Q1)+
&32*A1*A2*MT**2*P1P2*P2Q2/(3*P1Q1*P2Q1)+
&64*A1*A2*P1P2**2*P2Q2/(3*P1Q1*P2Q1)-944*A1*A2*P1Q1*P2Q2/(3*P2Q1)-
&32*A2**2*P1Q1*P2Q2/P2Q1+256*A1*MB*MT**3*P2Q2/(3*P1Q2**2*P2Q1)+
&512*A1*MT**2*P1P2*P2Q2/(3*P1Q2**2*P2Q1)+
&256*A1*MT**2*P1Q1*P2Q2/(3*P1Q2**2*P2Q1)-8*P2Q2/(3*P1Q2*P2Q1)-
&200*A1*MB*MT*P2Q2/(3*P1Q2*P2Q1)-56*A2*MB*MT*P2Q2/(3*P1Q2*P2Q1)+
&272*A1*MT**2*P2Q2/(3*P1Q2*P2Q1)+160*A2*MT**2*P2Q2/(3*P1Q2*P2Q1)+
&256*A1*P1P2*P2Q2/(3*P1Q2*P2Q1)+48*A2*P1P2*P2Q2/(P1Q2*P2Q1)
V18=V18-256*A1*A2*MB*MT*P1P2*P2Q2/(P1Q2*P2Q1)-
&256*A1*A2*MT**2*P1P2*P2Q2/(3*P1Q2*P2Q1)-
&1024*A1*A2*P1P2**2*P2Q2/(3*P1Q2*P2Q1)-
&32*A1*MT**2*P1P2*P2Q2/(3*P1Q1*P1Q2*P2Q1)+
&96*A1*P1Q1*P2Q2/(P1Q2*P2Q1)+96*A2*P1Q1*P2Q2/(P1Q2*P2Q1)-
&128*A1*A2*MB**2*P1Q1*P2Q2/(3*P1Q2*P2Q1)-
&256*A1*A2*MB*MT*P1Q1*P2Q2/(P1Q2*P2Q1)-
&128*A1*A2*MT**2*P1Q1*P2Q2/(3*P1Q2*P2Q1)-
&512*A1*A2*P1P2*P1Q1*P2Q2/(P1Q2*P2Q1)-
&512*A1*A2*P1Q1**2*P2Q2/(3*P1Q2*P2Q1)-544*A1*A2*P1Q2*P2Q2/(3*P2Q1)-
&640*A2**2*P1Q2*P2Q2/(3*P2Q1)+
&64*A1*A2*P1P2*P1Q2*P2Q2/(3*P1Q1*P2Q1)+544*A1*A2*P2Q2**2/(3*P2Q1)-
&256*A1*MT**2*P2Q2**2/(3*P1Q2**2*P2Q1)-
&272*A1*P2Q2**2/(3*P1Q2*P2Q1)+
&256*A1*A2*MB*MT*P2Q2**2/(3*P1Q2*P2Q1)+
&256*A1*A2*MT**2*P2Q2**2/(3*P1Q2*P2Q1)+
&512*A1*A2*P1P2*P2Q2**2/(3*P1Q2*P2Q1)
V18=V18+512*A1*A2*P1Q1*P2Q2**2/(3*P1Q2*P2Q1)+
&384*A12*MB*MT*P1Q1**2/S**2+
&384*A12*P1P2*P1Q1**2/S**2+2688*A12*MB*MT*P1Q1*P1Q2/S**2+
&2688*A12*P1P2*P1Q1*P1Q2/S**2+384*A12*MB*MT*P1Q2**2/S**2+
&384*A12*P1P2*P1Q2**2/S**2+768*A1*A2*MB*MT*P1Q1*P2Q1/S**2+
&768*A1*A2*P1P2*P1Q1*P2Q1/S**2+2688*A1*A2*MB*MT*P1Q2*P2Q1/S**2+
&2688*A1*A2*P1P2*P1Q2*P2Q1/S**2-960*A12*P1Q1*P1Q2*P2Q1/S**2-
&960*A1*A2*P1Q1*P1Q2*P2Q1/S**2+960*A12*P1Q2**2*P2Q1/S**2+
&960*A1*A2*P1Q2**2*P2Q1/S**2+384*A2**2*MB*MT*P2Q1**2/S**2+
&384*A2**2*P1P2*P2Q1**2/S**2-960*A1*A2*P1Q2*P2Q1**2/S**2-
&960*A2**2*P1Q2*P2Q1**2/S**2+2688*A1*A2*MB*MT*P1Q1*P2Q2/S**2+
&2688*A1*A2*P1P2*P1Q1*P2Q2/S**2+960*A12*P1Q1**2*P2Q2/S**2+
&960*A1*A2*P1Q1**2*P2Q2/S**2+768*A1*A2*MB*MT*P1Q2*P2Q2/S**2+
&768*A1*A2*P1P2*P1Q2*P2Q2/S**2-960*A12*P1Q1*P1Q2*P2Q2/S**2-
&960*A1*A2*P1Q1*P1Q2*P2Q2/S**2+2688*A2**2*MB*MT*P2Q1*P2Q2/S**2+
&2688*A2**2*P1P2*P2Q1*P2Q2/S**2+960*A1*A2*P1Q1*P2Q1*P2Q2/S**2+
&960*A2**2*P1Q1*P2Q1*P2Q2/S**2+960*A1*A2*P1Q2*P2Q1*P2Q2/S**2+
&960*A2**2*P1Q2*P2Q1*P2Q2/S**2+384*A2**2*MB*MT*P2Q2**2/S**2
V18=V18+384*A2**2*P1P2*P2Q2**2/S**2-960*A1*A2*P1Q1*P2Q2**2/S**2-
&960*A2**2*P1Q1*P2Q2**2/S**2+96*A1*MB*MT/S+96*A2*MB*MT/S-
&768*A2**2*MB**3*MT/S-768*A12*MB*MT**3/S-192*A1*P1P2/S-
&192*A2*P1P2/S-768*A2**2*MB**2*P1P2/S-2304*A1*A2*MB*MT*P1P2/S-
&768*A12*MT**2*P1P2/S-2304*A1*A2*P1P2**2/S-
&96*A1*MB*MT**3/(P1Q1*S)-192*A2*MB*MT*P1P2/(P1Q1*S)-
&96*A1*MT**2*P1P2/(P1Q1*S)-192*A2*P1P2**2/(P1Q1*S)-192*A1*P1Q1/S-
&144*A2*P1Q1/S-384*A1*A2*MB**2*P1Q1/S-480*A2**2*MB**2*P1Q1/S-
&480*A12*MB*MT*P1Q1/S+96*A1*A2*MB*MT*P1Q1/S-
&864*A12*P1P2*P1Q1/S-672*A1*A2*P1P2*P1Q1/S-96*A1*A2*P1Q1**2/S-
&96*A1*MB*MT**3/(P1Q2*S)-192*A2*MB*MT*P1P2/(P1Q2*S)-
&96*A1*MT**2*P1P2/(P1Q2*S)-192*A2*P1P2**2/(P1Q2*S)-
&48*A1*MB*MT*P1Q1/(P1Q2*S)+96*A2*MB*MT*P1Q1/(P1Q2*S)-
&48*A1*MT**2*P1Q1/(P1Q2*S)-192*A1*P1P2*P1Q1/(P1Q2*S)-
&192*A2*P1P2*P1Q1/(P1Q2*S)+192*A1*A2*MB*MT*P1P2*P1Q1/(P1Q2*S)+
&192*A1*A2*P1P2**2*P1Q1/(P1Q2*S)-192*A1*P1Q1**2/(P1Q2*S)-
&192*A2*P1Q1**2/(P1Q2*S)+192*A1*A2*MB**2*P1Q1**2/(P1Q2*S)
V18=V18-192*A12*MB*MT*P1Q1**2/(P1Q2*S)+
&96*A1*A2*MB*MT*P1Q1**2/(P1Q2*S)+
&192*A1*A2*P1P2*P1Q1**2/(P1Q2*S)-192*A1*P1Q2/S-144*A2*P1Q2/S-
&384*A1*A2*MB**2*P1Q2/S-480*A2**2*MB**2*P1Q2/S-
&480*A12*MB*MT*P1Q2/S+96*A1*A2*MB*MT*P1Q2/S-
&864*A12*P1P2*P1Q2/S-672*A1*A2*P1P2*P1Q2/S-
&48*A1*MB*MT*P1Q2/(P1Q1*S)+96*A2*MB*MT*P1Q2/(P1Q1*S)-
&48*A1*MT**2*P1Q2/(P1Q1*S)-192*A1*P1P2*P1Q2/(P1Q1*S)-
&192*A2*P1P2*P1Q2/(P1Q1*S)+192*A1*A2*MB*MT*P1P2*P1Q2/(P1Q1*S)+
&192*A1*A2*P1P2**2*P1Q2/(P1Q1*S)-576*A1*A2*P1Q1*P1Q2/S-
&96*A1*A2*P1Q2**2/S-192*A1*P1Q2**2/(P1Q1*S)-
&192*A2*P1Q2**2/(P1Q1*S)+192*A1*A2*MB**2*P1Q2**2/(P1Q1*S)-
&192*A12*MB*MT*P1Q2**2/(P1Q1*S)+96*A1*A2*MB*MT*P1Q2**2/(P1Q1*S)+
&192*A1*A2*P1P2*P1Q2**2/(P1Q1*S)+96*A2*MB**3*MT/(P2Q1*S)+
&96*A2*MB**2*P1P2/(P2Q1*S)+192*A1*MB*MT*P1P2/(P2Q1*S)+
&192*A1*P1P2**2/(P2Q1*S)+96*A1*MB**2*P1Q1/(P2Q1*S)+
&192*A2*MB**2*P1Q1/(P2Q1*S)+96*A1*MB*MT*P1Q1/(P2Q1*S)+
&192*A1*A2*MB**3*MT*P1Q1/(P2Q1*S)+192*A1*P1P2*P1Q1/(P2Q1*S)
V18=V18+192*A1*A2*MB**2*P1P2*P1Q1/(P2Q1*S)+
&96*A1*A2*MB**2*P1Q1**2/(P2Q1*S)+
&192*A2*MB**3*MT*P1Q1/(P1Q2*P2Q1*S)+
&192*A2*MB**2*P1P2*P1Q1/(P1Q2*P2Q1*S)+
&96*A1*MB*MT*P1P2*P1Q1/(P1Q2*P2Q1*S)+
&96*A1*P1P2**2*P1Q1/(P1Q2*P2Q1*S)+
&96*A1*MB**2*P1Q1**2/(P1Q2*P2Q1*S)+
&192*A2*MB**2*P1Q1**2/(P1Q2*P2Q1*S)+
&48*A1*MB*MT*P1Q1**2/(P1Q2*P2Q1*S)+
&96*A1*P1P2*P1Q1**2/(P1Q2*P2Q1*S)+96*A1*MB**2*P1Q2/(P2Q1*S)+
&48*A2*MB**2*P1Q2/(P2Q1*S)-192*A1*A2*MB**3*MT*P1Q2/(P2Q1*S)-
&192*A1*A2*MB**2*P1P2*P1Q2/(P2Q1*S)-
&96*A1*A2*MB**2*P1Q2**2/(P2Q1*S)+144*A1*P2Q1/S+192*A2*P2Q1/S-
&96*A1*A2*MB*MT*P2Q1/S+480*A2**2*MB*MT*P2Q1/S+
&480*A12*MT**2*P2Q1/S+384*A1*A2*MT**2*P2Q1/S+
&672*A1*A2*P1P2*P2Q1/S+864*A2**2*P1P2*P2Q1/S+
&96*A2*MB*MT*P2Q1/(P1Q1*S)+192*A1*MT**2*P2Q1/(P1Q1*S)
V18=V18+96*A2*MT**2*P2Q1/(P1Q1*S)+
&192*A1*A2*MB*MT**3*P2Q1/(P1Q1*S)+
&192*A2*P1P2*P2Q1/(P1Q1*S)+192*A1*A2*MT**2*P1P2*P2Q1/(P1Q1*S)-
&192*A12*P1Q1*P2Q1/S-192*A2**2*P1Q1*P2Q1/S+
&48*A1*MT**2*P2Q1/(P1Q2*S)+96*A2*MT**2*P2Q1/(P1Q2*S)-
&192*A1*A2*MB*MT**3*P2Q1/(P1Q2*S)-
&192*A1*A2*MT**2*P1P2*P2Q1/(P1Q2*S)-
&96*A1*A2*MB*MT*P1Q1*P2Q1/(P1Q2*S)-
&192*A12*MT**2*P1Q1*P2Q1/(P1Q2*S)-
&96*A1*A2*MT**2*P1Q1*P2Q1/(P1Q2*S)-
&384*A1*A2*P1P2*P1Q1*P2Q1/(P1Q2*S)-384*A12*P1Q1**2*P2Q1/(P1Q2*S)-
&384*A1*A2*P1Q1**2*P2Q1/(P1Q2*S)-480*A12*P1Q2*P2Q1/S-
&960*A1*A2*P1Q2*P2Q1/S-480*A2**2*P1Q2*P2Q1/S+
&144*A1*P1Q2*P2Q1/(P1Q1*S)+96*A2*P1Q2*P2Q1/(P1Q1*S)-
&384*A1*A2*MB*MT*P1Q2*P2Q1/(P1Q1*S)-
&96*A12*MT**2*P1Q2*P2Q1/(P1Q1*S)+
&96*A1*A2*MT**2*P1Q2*P2Q1/(P1Q1*S)-
&576*A1*A2*P1P2*P1Q2*P2Q1/(P1Q1*S)-192*A12*P1Q2**2*P2Q1/(P1Q1*S)
V18=V18-384*A1*A2*P1Q2**2*P2Q1/(P1Q1*S)-96*A1*A2*P2Q1**2/S-
&96*A1*A2*MT**2*P2Q1**2/(P1Q1*S)+96*A1*A2*MT**2*P2Q1**2/(P1Q2*S)+
&288*A1*A2*P1Q2*P2Q1**2/(P1Q1*S)+96*A2*MB**3*MT/(P2Q2*S)+
&96*A2*MB**2*P1P2/(P2Q2*S)+192*A1*MB*MT*P1P2/(P2Q2*S)+
&192*A1*P1P2**2/(P2Q2*S)+96*A1*MB**2*P1Q1/(P2Q2*S)+
&48*A2*MB**2*P1Q1/(P2Q2*S)-192*A1*A2*MB**3*MT*P1Q1/(P2Q2*S)-
&192*A1*A2*MB**2*P1P2*P1Q1/(P2Q2*S)-
&96*A1*A2*MB**2*P1Q1**2/(P2Q2*S)+96*A1*MB**2*P1Q2/(P2Q2*S)+
&192*A2*MB**2*P1Q2/(P2Q2*S)+96*A1*MB*MT*P1Q2/(P2Q2*S)+
&192*A1*A2*MB**3*MT*P1Q2/(P2Q2*S)+192*A1*P1P2*P1Q2/(P2Q2*S)+
&192*A1*A2*MB**2*P1P2*P1Q2/(P2Q2*S)+
&192*A2*MB**3*MT*P1Q2/(P1Q1*P2Q2*S)+
&192*A2*MB**2*P1P2*P1Q2/(P1Q1*P2Q2*S)+
&96*A1*MB*MT*P1P2*P1Q2/(P1Q1*P2Q2*S)+
&96*A1*P1P2**2*P1Q2/(P1Q1*P2Q2*S)+96*A1*A2*MB**2*P1Q2**2/(P2Q2*S)+
&96*A1*MB**2*P1Q2**2/(P1Q1*P2Q2*S)+
&192*A2*MB**2*P1Q2**2/(P1Q1*P2Q2*S)
V18=V18+48*A1*MB*MT*P1Q2**2/(P1Q1*P2Q2*S)+
&96*A1*P1P2*P1Q2**2/(P1Q1*P2Q2*S)-48*A2*MB**2*P2Q1/(P2Q2*S)+
&96*A1*MB*MT*P2Q1/(P2Q2*S)-48*A2*MB*MT*P2Q1/(P2Q2*S)-
&192*A1*P1P2*P2Q1/(P2Q2*S)-192*A2*P1P2*P2Q1/(P2Q2*S)+
&192*A1*A2*MB*MT*P1P2*P2Q1/(P2Q2*S)+
&192*A1*A2*P1P2**2*P2Q1/(P2Q2*S)-
&192*A1*MB*MT**3*P2Q1/(P1Q1*P2Q2*S)-
&96*A2*MB*MT*P1P2*P2Q1/(P1Q1*P2Q2*S)-
&192*A1*MT**2*P1P2*P2Q1/(P1Q1*P2Q2*S)-
&96*A2*P1P2**2*P2Q1/(P1Q1*P2Q2*S)+
&96*A1*A2*MB**2*P1Q1*P2Q1/(P2Q2*S)+
&192*A2**2*MB**2*P1Q1*P2Q1/(P2Q2*S)+
&96*A1*A2*MB*MT*P1Q1*P2Q1/(P2Q2*S)+
&384*A1*A2*P1P2*P1Q1*P2Q1/(P2Q2*S)-96*A1*P1Q2*P2Q1/(P2Q2*S)-
&144*A2*P1Q2*P2Q1/(P2Q2*S)-96*A1*A2*MB**2*P1Q2*P2Q1/(P2Q2*S)+
&96*A2**2*MB**2*P1Q2*P2Q1/(P2Q2*S)+
&384*A1*A2*MB*MT*P1Q2*P2Q1/(P2Q2*S)
V18=V18+576*A1*A2*P1P2*P1Q2*P2Q1/(P2Q2*S)-
&96*A2*MB**2*P1Q2*P2Q1/(P1Q1*P2Q2*S)+
&48*A1*MB*MT*P1Q2*P2Q1/(P1Q1*P2Q2*S)+
&48*A2*MB*MT*P1Q2*P2Q1/(P1Q1*P2Q2*S)-
&96*A1*MT**2*P1Q2*P2Q1/(P1Q1*P2Q2*S)-
&96*A1*P1P2*P1Q2*P2Q1/(P1Q1*P2Q2*S)-
&96*A2*P1P2*P1Q2*P2Q1/(P1Q1*P2Q2*S)+
&96*A1*A2*P1Q1*P1Q2*P2Q1/(P2Q2*S)+288*A1*A2*P1Q2**2*P2Q1/(P2Q2*S)-
&96*A1*P1Q2**2*P2Q1/(P1Q1*P2Q2*S)-96*A2*P1Q2**2*P2Q1/(P1Q1*P2Q2*S)+
&192*A1*P2Q1**2/(P2Q2*S)+192*A2*P2Q1**2/(P2Q2*S)-
&96*A1*A2*MB*MT*P2Q1**2/(P2Q2*S)+192*A2**2*MB*MT*P2Q1**2/(P2Q2*S)-
&192*A1*A2*MT**2*P2Q1**2/(P2Q2*S)-192*A1*A2*P1P2*P2Q1**2/(P2Q2*S)+
&48*A2*MB*MT*P2Q1**2/(P1Q1*P2Q2*S)+
&192*A1*MT**2*P2Q1**2/(P1Q1*P2Q2*S)+
&96*A2*MT**2*P2Q1**2/(P1Q1*P2Q2*S)+
&96*A2*P1P2*P2Q1**2/(P1Q1*P2Q2*S)-384*A1*A2*P1Q1*P2Q1**2/(P2Q2*S)-
&384*A2**2*P1Q1*P2Q1**2/(P2Q2*S)-384*A1*A2*P1Q2*P2Q1**2/(P2Q2*S)
V18=V18-192*A2**2*P1Q2*P2Q1**2/(P2Q2*S)+
&96*A1*P1Q2*P2Q1**2/(P1Q1*P2Q2*S)+
&96*A2*P1Q2*P2Q1**2/(P1Q1*P2Q2*S)+144*A1*P2Q2/S+192*A2*P2Q2/S-
&96*A1*A2*MB*MT*P2Q2/S+480*A2**2*MB*MT*P2Q2/S+
&480*A12*MT**2*P2Q2/S+384*A1*A2*MT**2*P2Q2/S+
&672*A1*A2*P1P2*P2Q2/S+864*A2**2*P1P2*P2Q2/S+
&48*A1*MT**2*P2Q2/(P1Q1*S)+96*A2*MT**2*P2Q2/(P1Q1*S)-
&192*A1*A2*MB*MT**3*P2Q2/(P1Q1*S)-
&192*A1*A2*MT**2*P1P2*P2Q2/(P1Q1*S)-480*A12*P1Q1*P2Q2/S-
&960*A1*A2*P1Q1*P2Q2/S-480*A2**2*P1Q1*P2Q2/S+
&96*A2*MB*MT*P2Q2/(P1Q2*S)+192*A1*MT**2*P2Q2/(P1Q2*S)+
&96*A2*MT**2*P2Q2/(P1Q2*S)+192*A1*A2*MB*MT**3*P2Q2/(P1Q2*S)+
&192*A2*P1P2*P2Q2/(P1Q2*S)+192*A1*A2*MT**2*P1P2*P2Q2/(P1Q2*S)+
&144*A1*P1Q1*P2Q2/(P1Q2*S)+96*A2*P1Q1*P2Q2/(P1Q2*S)-
&384*A1*A2*MB*MT*P1Q1*P2Q2/(P1Q2*S)-
&96*A12*MT**2*P1Q1*P2Q2/(P1Q2*S)+
&96*A1*A2*MT**2*P1Q1*P2Q2/(P1Q2*S)
V18=V18-576*A1*A2*P1P2*P1Q1*P2Q2/(P1Q2*S)-
&192*A12*P1Q1**2*P2Q2/(P1Q2*S)-
&384*A1*A2*P1Q1**2*P2Q2/(P1Q2*S)-192*A12*P1Q2*P2Q2/S-
&192*A2**2*P1Q2*P2Q2/S-96*A1*A2*MB*MT*P1Q2*P2Q2/(P1Q1*S)-
&192*A12*MT**2*P1Q2*P2Q2/(P1Q1*S)-
&96*A1*A2*MT**2*P1Q2*P2Q2/(P1Q1*S)-
&384*A1*A2*P1P2*P1Q2*P2Q2/(P1Q1*S)-384*A12*P1Q2**2*P2Q2/(P1Q1*S)-
&384*A1*A2*P1Q2**2*P2Q2/(P1Q1*S)-48*A2*MB**2*P2Q2/(P2Q1*S)+
&96*A1*MB*MT*P2Q2/(P2Q1*S)-48*A2*MB*MT*P2Q2/(P2Q1*S)-
&192*A1*P1P2*P2Q2/(P2Q1*S)-192*A2*P1P2*P2Q2/(P2Q1*S)+
&192*A1*A2*MB*MT*P1P2*P2Q2/(P2Q1*S)+
&192*A1*A2*P1P2**2*P2Q2/(P2Q1*S)-96*A1*P1Q1*P2Q2/(P2Q1*S)-
&144*A2*P1Q1*P2Q2/(P2Q1*S)-96*A1*A2*MB**2*P1Q1*P2Q2/(P2Q1*S)+
&96*A2**2*MB**2*P1Q1*P2Q2/(P2Q1*S)+
&384*A1*A2*MB*MT*P1Q1*P2Q2/(P2Q1*S)+
&576*A1*A2*P1P2*P1Q1*P2Q2/(P2Q1*S)+288*A1*A2*P1Q1**2*P2Q2/(P2Q1*S)-
&192*A1*MB*MT**3*P2Q2/(P1Q2*P2Q1*S)
V18=V18-96*A2*MB*MT*P1P2*P2Q2/(P1Q2*P2Q1*S)-
&192*A1*MT**2*P1P2*P2Q2/(P1Q2*P2Q1*S)-
&96*A2*P1P2**2*P2Q2/(P1Q2*P2Q1*S)-
&96*A2*MB**2*P1Q1*P2Q2/(P1Q2*P2Q1*S)+
&48*A1*MB*MT*P1Q1*P2Q2/(P1Q2*P2Q1*S)
V18BIS=
&48*A2*MB*MT*P1Q1*P2Q2/(P1Q2*P2Q1*S)-
&96*A1*MT**2*P1Q1*P2Q2/(P1Q2*P2Q1*S)-
&96*A1*P1P2*P1Q1*P2Q2/(P1Q2*P2Q1*S)-
&96*A2*P1P2*P1Q1*P2Q2/(P1Q2*P2Q1*S)-
&96*A1*P1Q1**2*P2Q2/(P1Q2*P2Q1*S)-96*A2*P1Q1**2*P2Q2/(P1Q2*P2Q1*S)+
&96*A1*A2*MB**2*P1Q2*P2Q2/(P2Q1*S)+
&192*A2**2*MB**2*P1Q2*P2Q2/(P2Q1*S)+
&96*A1*A2*MB*MT*P1Q2*P2Q2/(P2Q1*S)+
&384*A1*A2*P1P2*P1Q2*P2Q2/(P2Q1*S)+
&96*A1*A2*P1Q1*P1Q2*P2Q2/(P2Q1*S)-576*A1*A2*P2Q1*P2Q2/S+
&96*A1*A2*P1Q1*P2Q1*P2Q2/(P1Q2*S)+96*A1*A2*P1Q2*P2Q1*P2Q2/(P1Q1*S)-
&96*A1*A2*P2Q2**2/S+96*A1*A2*MT**2*P2Q2**2/(P1Q1*S)-
&96*A1*A2*MT**2*P2Q2**2/(P1Q2*S)+288*A1*A2*P1Q1*P2Q2**2/(P1Q2*S)+
&192*A1*P2Q2**2/(P2Q1*S)+192*A2*P2Q2**2/(P2Q1*S)-
&96*A1*A2*MB*MT*P2Q2**2/(P2Q1*S)+192*A2**2*MB*MT*P2Q2**2/(P2Q1*S)-
&192*A1*A2*MT**2*P2Q2**2/(P2Q1*S)-192*A1*A2*P1P2*P2Q2**2/(P2Q1*S)
V18BIS=V18BIS-384*A1*A2*P1Q1*P2Q2**2/(P2Q1*S)-
&192*A2**2*P1Q1*P2Q2**2/(P2Q1*S)+
&48*A2*MB*MT*P2Q2**2/(P1Q2*P2Q1*S)+
&192*A1*MT**2*P2Q2**2/(P1Q2*P2Q1*S)+
&96*A2*MT**2*P2Q2**2/(P1Q2*P2Q1*S)+
&96*A2*P1P2*P2Q2**2/(P1Q2*P2Q1*S)+96*A1*P1Q1*P2Q2**2/(P1Q2*P2Q1*S)+
&96*A2*P1Q1*P2Q2**2/(P1Q2*P2Q1*S)-384*A1*A2*P1Q2*P2Q2**2/(P2Q1*S)-
&384*A2**2*P1Q2*P2Q2**2/(P2Q1*S)+512*A1*A2*S/3-
&128*A1*MT**2*S/(3*P1Q1**2)-128*A12*MB*MT**3*S/(3*P1Q1**2)-
&152*A1*S/(3*P1Q1)+152*A12*MB*MT*S/(3*P1Q1)+
&128*A1*A2*MB*MT*S/(3*P1Q1)+112*A1*A2*MT**2*S/(3*P1Q1)-
&16*A12*P1P2*S/P1Q1+152*A1*A2*P1P2*S/(3*P1Q1)-
&128*A1*MT**2*S/(3*P1Q2**2)-128*A12*MB*MT**3*S/(3*P1Q2**2)-
&152*A1*S/(3*P1Q2)+152*A12*MB*MT*S/(3*P1Q2)+
&128*A1*A2*MB*MT*S/(3*P1Q2)+112*A1*A2*MT**2*S/(3*P1Q2)-
&16*A12*P1P2*S/P1Q2+152*A1*A2*P1P2*S/(3*P1Q2)-
&16*A1*MB*MT*S/(3*P1Q1*P1Q2)+32*A12*MB*MT**3*S/(3*P1Q1*P1Q2)
V18BIS=V18BIS-16*A1*P1P2*S/(3*P1Q1*P1Q2)+
&272*A1*A2*P1Q1*S/(3*P1Q2)+
&272*A1*A2*P1Q2*S/(3*P1Q1)-128*A2*MB**2*S/(3*P2Q1**2)-
&128*A2**2*MB**3*MT*S/(3*P2Q1**2)+
&32*MB**2*MT**2*S/(3*P1Q2**2*P2Q1**2)+32*MB**2*S/(3*P1Q2*P2Q1**2)-
&64*A2*MB**3*MT*S/(3*P1Q2*P2Q1**2)-
&64*A2*MB**2*MT**2*S/(3*P1Q2*P2Q1**2)-
&128*A2*MB**2*P1P2*S/(3*P1Q2*P2Q1**2)-
&128*A2*MB**2*P1Q1*S/(3*P1Q2*P2Q1**2)+
&128*A2**2*MB**2*P1Q2*S/(3*P2Q1**2)+152*A2*S/(3*P2Q1)-
&112*A1*A2*MB**2*S/(3*P2Q1)-128*A1*A2*MB*MT*S/(3*P2Q1)-
&152*A2**2*MB*MT*S/(3*P2Q1)-152*A1*A2*P1P2*S/(3*P2Q1)+
&16*A2**2*P1P2*S/P2Q1+8*A1*A2*MB**3*MT*S/(3*P1Q1*P2Q1)+
&16*A1*A2*MB**2*MT**2*S/(3*P1Q1*P2Q1)+
&8*A1*A2*MB*MT**3*S/(3*P1Q1*P2Q1)-8*A1*P1P2*S/(3*P1Q1*P2Q1)-
&8*A2*P1P2*S/(3*P1Q1*P2Q1)+8*A1*A2*MB**2*P1P2*S/(3*P1Q1*P2Q1)+
&16*A1*A2*MB*MT*P1P2*S/(3*P1Q1*P2Q1)
V18BIS=V18BIS+8*A1*A2*MT**2*P1P2*S/(3*P1Q1*P2Q1)+
&32*A1*A2*P1P2**2*S/(3*P1Q1*P2Q1)-32*A2**2*P1Q1*S/(3*P2Q1)-
&32*MT**2*S/(3*P1Q2**2*P2Q1)+64*A1*MB**2*MT**2*S/(3*P1Q2**2*P2Q1)+
&64*A1*MB*MT**3*S/(3*P1Q2**2*P2Q1)+
&128*A1*MT**2*P1P2*S/(3*P1Q2**2*P2Q1)-12*S/(P1Q2*P2Q1)+
&24*A1*MB**2*S/(P1Q2*P2Q1)-64*A1*A2*MB**3*MT*S/(3*P1Q2*P2Q1)+
&24*A2*MT**2*S/(P1Q2*P2Q1)-128*A1*A2*MB**2*MT**2*S/(3*P1Q2*P2Q1)-
&64*A1*A2*MB*MT**3*S/(3*P1Q2*P2Q1)+56*A1*P1P2*S/(3*P1Q2*P2Q1)+
&56*A2*P1P2*S/(3*P1Q2*P2Q1)-64*A1*A2*MB**2*P1P2*S/(3*P1Q2*P2Q1)-
&128*A1*A2*MB*MT*P1P2*S/(3*P1Q2*P2Q1)-
&64*A1*A2*MT**2*P1P2*S/(3*P1Q2*P2Q1)-
&256*A1*A2*P1P2**2*S/(3*P1Q2*P2Q1)+4*P1P2*S/(3*P1Q1*P1Q2*P2Q1)+
&8*A1*MB*MT*P1P2*S/(3*P1Q1*P1Q2*P2Q1)-
&8*A1*MT**2*P1P2*S/(3*P1Q1*P1Q2*P2Q1)+136*A2*P1Q1*S/(3*P1Q2*P2Q1)-
&128*A1*A2*MB**2*P1Q1*S/(3*P1Q2*P2Q1)-
&128*A1*A2*MB*MT*P1Q1*S/(3*P1Q2*P2Q1)-
&256*A1*A2*P1P2*P1Q1*S/(3*P1Q2*P2Q1)-160*A2**2*P1Q2*S/(3*P2Q1)
V18BIS=V18BIS+16*A1*A2*P1P2*P1Q2*S/(3*P1Q1*P2Q1)-
&32*A12*P2Q1*S/(3*P1Q1)-
&128*A12*MT**2*P2Q1*S/(3*P1Q2**2)-160*A12*P2Q1*S/(3*P1Q2)-
&128*A2*MB**2*S/(3*P2Q2**2)-128*A2**2*MB**3*MT*S/(3*P2Q2**2)+
&32*MB**2*MT**2*S/(3*P1Q1**2*P2Q2**2)+32*MB**2*S/(3*P1Q1*P2Q2**2)-
&64*A2*MB**3*MT*S/(3*P1Q1*P2Q2**2)-
&64*A2*MB**2*MT**2*S/(3*P1Q1*P2Q2**2)-
&128*A2*MB**2*P1P2*S/(3*P1Q1*P2Q2**2)+
&128*A2**2*MB**2*P1Q1*S/(3*P2Q2**2)-
&128*A2*MB**2*P1Q2*S/(3*P1Q1*P2Q2**2)+152*A2*S/(3*P2Q2)-
&112*A1*A2*MB**2*S/(3*P2Q2)-128*A1*A2*MB*MT*S/(3*P2Q2)-
&152*A2**2*MB*MT*S/(3*P2Q2)-152*A1*A2*P1P2*S/(3*P2Q2)+
&16*A2**2*P1P2*S/P2Q2-32*MT**2*S/(3*P1Q1**2*P2Q2)+
&64*A1*MB**2*MT**2*S/(3*P1Q1**2*P2Q2)+
&64*A1*MB*MT**3*S/(3*P1Q1**2*P2Q2)+
&128*A1*MT**2*P1P2*S/(3*P1Q1**2*P2Q2)-12*S/(P1Q1*P2Q2)+
&24*A1*MB**2*S/(P1Q1*P2Q2)-64*A1*A2*MB**3*MT*S/(3*P1Q1*P2Q2)
V18BIS=V18BIS+24*A2*MT**2*S/(P1Q1*P2Q2)-
&128*A1*A2*MB**2*MT**2*S/(3*P1Q1*P2Q2)-
&64*A1*A2*MB*MT**3*S/(3*P1Q1*P2Q2)+56*A1*P1P2*S/(3*P1Q1*P2Q2)+
&56*A2*P1P2*S/(3*P1Q1*P2Q2)-64*A1*A2*MB**2*P1P2*S/(3*P1Q1*P2Q2)-
&128*A1*A2*MB*MT*P1P2*S/(3*P1Q1*P2Q2)-
&64*A1*A2*MT**2*P1P2*S/(3*P1Q1*P2Q2)-
&256*A1*A2*P1P2**2*S/(3*P1Q1*P2Q2)-160*A2**2*P1Q1*S/(3*P2Q2)+
&8*A1*A2*MB**3*MT*S/(3*P1Q2*P2Q2)+
&16*A1*A2*MB**2*MT**2*S/(3*P1Q2*P2Q2)+
&8*A1*A2*MB*MT**3*S/(3*P1Q2*P2Q2)-8*A1*P1P2*S/(3*P1Q2*P2Q2)-
&8*A2*P1P2*S/(3*P1Q2*P2Q2)+8*A1*A2*MB**2*P1P2*S/(3*P1Q2*P2Q2)+
&16*A1*A2*MB*MT*P1P2*S/(3*P1Q2*P2Q2)+
&8*A1*A2*MT**2*P1P2*S/(3*P1Q2*P2Q2)+
&32*A1*A2*P1P2**2*S/(3*P1Q2*P2Q2)+4*P1P2*S/(3*P1Q1*P1Q2*P2Q2)+
&8*A1*MB*MT*P1P2*S/(3*P1Q1*P1Q2*P2Q2)-
&8*A1*MT**2*P1P2*S/(3*P1Q1*P1Q2*P2Q2)+
&16*A1*A2*P1P2*P1Q1*S/(3*P1Q2*P2Q2)-32*A2**2*P1Q2*S/(3*P2Q2)
V18BIS=V18BIS+136*A2*P1Q2*S/(3*P1Q1*P2Q2)-
&128*A1*A2*MB**2*P1Q2*S/(3*P1Q1*P2Q2)-
&128*A1*A2*MB*MT*P1Q2*S/(3*P1Q1*P2Q2)-
&256*A1*A2*P1P2*P1Q2*S/(3*P1Q1*P2Q2)-16*A2*MB*MT*S/(3*P2Q1*P2Q2)+
&32*A2**2*MB**3*MT*S/(3*P2Q1*P2Q2)-16*A2*P1P2*S/(3*P2Q1*P2Q2)-
&4*P1P2*S/(3*P1Q1*P2Q1*P2Q2)+8*A2*MB**2*P1P2*S/(3*P1Q1*P2Q1*P2Q2)-
&8*A2*MB*MT*P1P2*S/(3*P1Q1*P2Q1*P2Q2)-4*P1P2*S/(3*P1Q2*P2Q1*P2Q2)+
&8*A2*MB**2*P1P2*S/(3*P1Q2*P2Q1*P2Q2)-
&8*A2*MB*MT*P1P2*S/(3*P1Q2*P2Q1*P2Q2)+
&2*MB**3*MT*S/(3*P1Q1*P1Q2*P2Q1*P2Q2)+
&4*MB**2*MT**2*S/(3*P1Q1*P1Q2*P2Q1*P2Q2)+
&2*MB*MT**3*S/(3*P1Q1*P1Q2*P2Q1*P2Q2)-
&2*MB**2*P1P2*S/(3*P1Q1*P1Q2*P2Q1*P2Q2)-
&4*MB*MT*P1P2*S/(3*P1Q1*P1Q2*P2Q1*P2Q2)-
&2*MT**2*P1P2*S/(3*P1Q1*P1Q2*P2Q1*P2Q2)-
&8*P1P2**2*S/(3*P1Q1*P1Q2*P2Q1*P2Q2)+
&8*A2*P1P2*P1Q1*S/(3*P1Q2*P2Q1*P2Q2)
V18BIS=V18BIS+8*A2*P1P2*P1Q2*S/(3*P1Q1*P2Q1*P2Q2)+
&272*A1*A2*P2Q1*S/(3*P2Q2)-
&128*A1*MT**2*P2Q1*S/(3*P1Q1**2*P2Q2)-136*A1*P2Q1*S/(3*P1Q1*P2Q2)+
&128*A1*A2*MB*MT*P2Q1*S/(3*P1Q1*P2Q2)+
&128*A1*A2*MT**2*P2Q1*S/(3*P1Q1*P2Q2)+
&256*A1*A2*P1P2*P2Q1*S/(3*P1Q1*P2Q2)-
&16*A1*A2*P1P2*P2Q1*S/(3*P1Q2*P2Q2)+
&8*A1*P1P2*P2Q1*S/(3*P1Q1*P1Q2*P2Q2)+
&256*A1*A2*P1Q2*P2Q1*S/(3*P1Q1*P2Q2)-
&128*A12*MT**2*P2Q2*S/(3*P1Q1**2)-160*A12*P2Q2*S/(3*P1Q1)-
&32*A12*P2Q2*S/(3*P1Q2)+272*A1*A2*P2Q2*S/(3*P2Q1)-
&16*A1*A2*P1P2*P2Q2*S/(3*P1Q1*P2Q1)-
&128*A1*MT**2*P2Q2*S/(3*P1Q2**2*P2Q1)-136*A1*P2Q2*S/(3*P1Q2*P2Q1)+
&128*A1*A2*MB*MT*P2Q2*S/(3*P1Q2*P2Q1)+
&128*A1*A2*MT**2*P2Q2*S/(3*P1Q2*P2Q1)+
&256*A1*A2*P1P2*P2Q2*S/(3*P1Q2*P2Q1)+
&8*A1*P1P2*P2Q2*S/(3*P1Q1*P1Q2*P2Q1)
V18BIS=V18BIS+256*A1*A2*P1Q1*P2Q2*S/(3*P1Q2*P2Q1)+
&8*A12*MB*MT*S**2/(3*P1Q1*P1Q2)+16*A12*P1P2*S**2/(3*P1Q1*P1Q2)-
&8*A1*A2*P1P2*S**2/(3*P1Q1*P2Q1)+4*A1*P1P2*S**2/(3*P1Q1*P1Q2*P2Q1)-
&8*A1*A2*P1P2*S**2/(3*P1Q2*P2Q2)+4*A1*P1P2*S**2/(3*P1Q1*P1Q2*P2Q2)+
&8*A2**2*MB*MT*S**2/(3*P2Q1*P2Q2)+16*A2**2*P1P2*S**2/(3*P2Q1*P2Q2)-
&4*A2*P1P2*S**2/(3*P1Q1*P2Q1*P2Q2)-
&4*A2*P1P2*S**2/(3*P1Q2*P2Q1*P2Q2)+
&2*P1P2*S**2/(3*P1Q1*P1Q2*P2Q1*P2Q2)
C
A18 = 640*A1/3+640*A2/3+32*A1*A2*MB**2+368*A12*MB*MT+
&512*A1*A2*MB*MT/3+
&368*A2**2*MB*MT+32*A1*A2*MT**2+496*A12*P1P2/3+
&320*A1*A2*P1P2+496*A2**2*P1P2/3-128*A1*MB*MT**3/(3*P1Q1**2)+
&128*A1*MT**4/(3*P1Q1**2)+256*A12*MB*MT**5/(3*P1Q1**2)+
&256*A1*MT**2*P1P2/(3*P1Q1**2)-256*A12*MT**4*P1P2/(3*P1Q1**2)+
&8/(3*P1Q1)+32*A1*MB*MT/P1Q1+56*A2*MB*MT/(3*P1Q1)+
&88*A1*MT**2/(3*P1Q1)+72*A2*MT**2/P1Q1-
&704*A12*MB*MT**3/(3*P1Q1)+224*A1*A2*MB*MT**3/(3*P1Q1)+
&104*A1*P1P2/(3*P1Q1)+48*A2*P1P2/P1Q1-
&128*A1*A2*MB*MT*P1P2/(3*P1Q1)+512*A12*MT**2*P1P2/(3*P1Q1)-
&448*A1*A2*MT**2*P1P2/(3*P1Q1)-32*A1*A2*P1P2**2/P1Q1-
&656*A1*A2*P1Q1/3-224*A2**2*P1Q1-128*A1*MB*MT**3/(3*P1Q2**2)+
&128*A1*MT**4/(3*P1Q2**2)+256*A12*MB*MT**5/(3*P1Q2**2)+
&256*A1*MT**2*P1P2/(3*P1Q2**2)-256*A12*MT**4*P1P2/(3*P1Q2**2)+
&256*A1*MT**2*P1Q1/(3*P1Q2**2)-256*A12*MB*MT**3*P1Q1/(3*P1Q2**2)+
&8/(3*P1Q2)+32*A1*MB*MT/P1Q2+56*A2*MB*MT/(3*P1Q2)
A18=A18+88*A1*MT**2/(3*P1Q2)+72*A2*MT**2/P1Q2-
&704*A12*MB*MT**3/(3*P1Q2)+224*A1*A2*MB*MT**3/(3*P1Q2)+
&104*A1*P1P2/(3*P1Q2)+48*A2*P1P2/P1Q2-
&128*A1*A2*MB*MT*P1P2/(3*P1Q2)+512*A12*MT**2*P1P2/(3*P1Q2)-
&448*A1*A2*MT**2*P1P2/(3*P1Q2)-32*A1*A2*P1P2**2/P1Q2+
&32*A1*MB*MT**3/(3*P1Q1*P1Q2)-32*A1*MT**4/(3*P1Q1*P1Q2)-
&64*A12*MB*MT**5/(3*P1Q1*P1Q2)+16*P1P2/(3*P1Q1*P1Q2)-
&64*A1*MT**2*P1P2/(3*P1Q1*P1Q2)+64*A12*MT**4*P1P2/(3*P1Q1*P1Q2)+
&112*A1*P1Q1/P1Q2+272*A2*P1Q1/(3*P1Q2)-
&272*A1*A2*MB**2*P1Q1/(3*P1Q2)-208*A12*MB*MT*P1Q1/(3*P1Q2)+
&400*A1*A2*MB*MT*P1Q1/(3*P1Q2)-80*A1*A2*MT**2*P1Q1/P1Q2+
&96*A12*P1P2*P1Q1/P1Q2-320*A1*A2*P1P2*P1Q1/P1Q2-
&544*A1*A2*P1Q1**2/(3*P1Q2)-656*A1*A2*P1Q2/3-224*A2**2*P1Q2+
&256*A1*MT**2*P1Q2/(3*P1Q1**2)-256*A12*MB*MT**3*P1Q2/(3*P1Q1**2)+
&112*A1*P1Q2/P1Q1+272*A2*P1Q2/(3*P1Q1)-
&272*A1*A2*MB**2*P1Q2/(3*P1Q1)-208*A12*MB*MT*P1Q2/(3*P1Q1)+
&400*A1*A2*MB*MT*P1Q2/(3*P1Q1)-80*A1*A2*MT**2*P1Q2/P1Q1
A18=A18+96*A12*P1P2*P1Q2/P1Q1-320*A1*A2*P1P2*P1Q2/P1Q1-
&544*A1*A2*P1Q2**2/(3*P1Q1)+128*A2*MB**4/(3*P2Q1**2)-
&128*A2*MB**3*MT/(3*P2Q1**2)+256*A2**2*MB**5*MT/(3*P2Q1**2)+
&256*A2*MB**2*P1P2/(3*P2Q1**2)-256*A2**2*MB**4*P1P2/(3*P2Q1**2)+
&256*A2*MB**2*P1Q1/(3*P2Q1**2)-256*A2**2*MB**4*P1Q1/(3*P2Q1**2)+
&64*MB**3*MT**3/(3*P1Q2**2*P2Q1**2)-
&64*MB**2*MT**2*P1P2/(3*P1Q2**2*P2Q1**2)-
&64*MB**2*MT**2*P1Q1/(3*P1Q2**2*P2Q1**2)-
&64*MB**3*MT/(3*P1Q2*P2Q1**2)-
&256*A2*MB**3*MT*P1P2/(3*P1Q2*P2Q1**2)+
&256*A2*MB**2*P1P2**2/(3*P1Q2*P2Q1**2)-
&256*A2*MB**3*MT*P1Q1/(3*P1Q2*P2Q1**2)+
&512*A2*MB**2*P1P2*P1Q1/(3*P1Q2*P2Q1**2)+
&256*A2*MB**2*P1Q1**2/(3*P1Q2*P2Q1**2)-
&256*A2**2*MB**4*P1Q2/(3*P2Q1**2)-8/(3*P2Q1)-72*A1*MB**2/P2Q1-
&88*A2*MB**2/(3*P2Q1)-56*A1*MB*MT/(3*P2Q1)-32*A2*MB*MT/P2Q1-
&224*A1*A2*MB**3*MT/(3*P2Q1)+704*A2**2*MB**3*MT/(3*P2Q1)
A18=A18-48*A1*P1P2/P2Q1-104*A2*P1P2/(3*P2Q1)+
&448*A1*A2*MB**2*P1P2/(3*P2Q1)-512*A2**2*MB**2*P1P2/(3*P2Q1)+
&128*A1*A2*MB*MT*P1P2/(3*P2Q1)+32*A1*A2*P1P2**2/P2Q1-
&16*P1P2/(3*P1Q1*P2Q1)+32*A1*MB*MT*P1P2/(3*P1Q1*P2Q1)+
&32*A2*MB*MT*P1P2/(3*P1Q1*P2Q1)+
&64*A1*A2*MB*MT*P1P2**2/(3*P1Q1*P2Q1)-
&64*A1*A2*P1P2**3/(3*P1Q1*P2Q1)-256*A2*P1Q1/(3*P2Q1)+
&448*A1*A2*MB**2*P1Q1/(3*P2Q1)-368*A2**2*MB**2*P1Q1/(3*P2Q1)-
&224*A1*A2*MB*MT*P1Q1/(3*P2Q1)+304*A1*A2*P1P2*P1Q1/(3*P2Q1)+
&64*MB*MT**3/(3*P1Q2**2*P2Q1)+
&256*A1*MB*MT**3*P1P2/(3*P1Q2**2*P2Q1)-
&256*A1*MT**2*P1P2**2/(3*P1Q2**2*P2Q1)+
&64*MT**2*P1Q1/(3*P1Q2**2*P2Q1)-
&128*A1*MB**2*MT**2*P1Q1/(3*P1Q2**2*P2Q1)+
&128*A1*MB*MT**3*P1Q1/(3*P1Q2**2*P2Q1)-
&256*A1*MT**2*P1P2*P1Q1/(3*P1Q2**2*P2Q1)-4*MB**2/(3*P1Q2*P2Q1)-
&64*MB*MT/(3*P1Q2*P2Q1)+128*A2*MB**3*MT/(3*P1Q2*P2Q1)
A18=A18-4*MT**2/(3*P1Q2*P2Q1)-128*A1*MB**2*MT**2/(3*P1Q2*P2Q1)-
&128*A2*MB**2*MT**2/(3*P1Q2*P2Q1)+128*A1*MB*MT**3/(3*P1Q2*P2Q1)-
&112*A2*MB**2*P1P2/(3*P1Q2*P2Q1)+32*A1*MB*MT*P1P2/(3*P1Q2*P2Q1)+
&32*A2*MB*MT*P1P2/(3*P1Q2*P2Q1)-112*A1*MT**2*P1P2/(3*P1Q2*P2Q1)-
&48*A1*P1P2**2/(P1Q2*P2Q1)-48*A2*P1P2**2/(P1Q2*P2Q1)-
&512*A1*A2*MB*MT*P1P2**2/(3*P1Q2*P2Q1)+
&512*A1*A2*P1P2**3/(3*P1Q2*P2Q1)+8*MB*MT*P1P2/(3*P1Q1*P1Q2*P2Q1)-
&8*MT**2*P1P2/(3*P1Q1*P1Q2*P2Q1)-
&32*A1*MB*MT**3*P1P2/(3*P1Q1*P1Q2*P2Q1)-
&16*P1P2**2/(3*P1Q1*P1Q2*P2Q1)+
&32*A1*MT**2*P1P2**2/(3*P1Q1*P1Q2*P2Q1)+8*P1Q1/(3*P1Q2*P2Q1)-
&160*A1*MB**2*P1Q1/(3*P1Q2*P2Q1)-272*A2*MB**2*P1Q1/(3*P1Q2*P2Q1)-
&56*A1*MB*MT*P1Q1/(3*P1Q2*P2Q1)-200*A2*MB*MT*P1Q1/(3*P1Q2*P2Q1)-
&48*A1*P1P2*P1Q1/(P1Q2*P2Q1)-256*A2*P1P2*P1Q1/(3*P1Q2*P2Q1)+
&256*A1*A2*MB**2*P1P2*P1Q1/(3*P1Q2*P2Q1)-
&256*A1*A2*MB*MT*P1P2*P1Q1/(P1Q2*P2Q1)+
&1024*A1*A2*P1P2**2*P1Q1/(3*P1Q2*P2Q1)
A18=A18-272*A2*P1Q1**2/(3*P1Q2*P2Q1)+
&256*A1*A2*MB**2*P1Q1**2/(3*P1Q2*P2Q1)-
&256*A1*A2*MB*MT*P1Q1**2/(3*P1Q2*P2Q1)+
&512*A1*A2*P1P2*P1Q1**2/(3*P1Q2*P2Q1)+16*A2*P1Q2/(3*P2Q1)+
&64*A1*A2*MB**2*P1Q2/P2Q1+32*A2**2*MB**2*P1Q2/(3*P2Q1)-
&112*A1*A2*MB*MT*P1Q2/(3*P2Q1)+368*A1*A2*P1P2*P1Q2/(3*P2Q1)+
&32*A2*P1P2*P1Q2/(3*P1Q1*P2Q1)-
&32*A1*A2*MB**2*P1P2*P1Q2/(3*P1Q1*P2Q1)+
&32*A1*A2*MB*MT*P1P2*P1Q2/(3*P1Q1*P2Q1)-
&64*A1*A2*P1P2**2*P1Q2/(3*P1Q1*P2Q1)+224*A12*P2Q1+
&656*A1*A2*P2Q1/3-256*A1*MT**2*P2Q1/(3*P1Q1**2)+
&256*A12*MT**4*P2Q1/(3*P1Q1**2)-256*A1*P2Q1/(3*P1Q1)-
&224*A1*A2*MB*MT*P2Q1/(3*P1Q1)-368*A12*MT**2*P2Q1/(3*P1Q1)+
&448*A1*A2*MT**2*P2Q1/(3*P1Q1)+304*A1*A2*P1P2*P2Q1/(3*P1Q1)+
&256*A12*MT**4*P2Q1/(3*P1Q2**2)+
&256*A12*MT**2*P1Q1*P2Q1/(3*P1Q2**2)+16*A1*P2Q1/(3*P1Q2)-
&112*A1*A2*MB*MT*P2Q1/(3*P1Q2)+32*A12*MT**2*P2Q1/(3*P1Q2)
A18=A18+64*A1*A2*MT**2*P2Q1/P1Q2+368*A1*A2*P1P2*P2Q1/(3*P1Q2)+
&16*A1*MT**2*P2Q1/(3*P1Q1*P1Q2)-64*A12*MT**4*P2Q1/(3*P1Q1*P1Q2)+
&640*A12*P1Q1*P2Q1/(3*P1Q2)+544*A1*A2*P1Q1*P2Q1/(3*P1Q2)+
&32*A12*P1Q2*P2Q1/P1Q1+944*A1*A2*P1Q2*P2Q1/(3*P1Q1)+
&128*A2*MB**4/(3*P2Q2**2)-128*A2*MB**3*MT/(3*P2Q2**2)+
&256*A2**2*MB**5*MT/(3*P2Q2**2)+256*A2*MB**2*P1P2/(3*P2Q2**2)-
&256*A2**2*MB**4*P1P2/(3*P2Q2**2)+
&64*MB**3*MT**3/(3*P1Q1**2*P2Q2**2)-
&64*MB**2*MT**2*P1P2/(3*P1Q1**2*P2Q2**2)-
&64*MB**3*MT/(3*P1Q1*P2Q2**2)-
&256*A2*MB**3*MT*P1P2/(3*P1Q1*P2Q2**2)+
&256*A2*MB**2*P1P2**2/(3*P1Q1*P2Q2**2)-
&256*A2**2*MB**4*P1Q1/(3*P2Q2**2)+256*A2*MB**2*P1Q2/(3*P2Q2**2)-
&256*A2**2*MB**4*P1Q2/(3*P2Q2**2)-
&64*MB**2*MT**2*P1Q2/(3*P1Q1**2*P2Q2**2)-
&256*A2*MB**3*MT*P1Q2/(3*P1Q1*P2Q2**2)+
&512*A2*MB**2*P1P2*P1Q2/(3*P1Q1*P2Q2**2)
A18=A18+256*A2*MB**2*P1Q2**2/(3*P1Q1*P2Q2**2)-
&256*A2*MB**2*P2Q1/(3*P2Q2**2)+256*A2**2*MB**3*MT*P2Q1/(3*P2Q2**2)+
&64*MB**2*MT**2*P2Q1/(3*P1Q1**2*P2Q2**2)+
&64*MB**2*P2Q1/(3*P1Q1*P2Q2**2)+
&128*A2*MB**3*MT*P2Q1/(3*P1Q1*P2Q2**2)-
&128*A2*MB**2*MT**2*P2Q1/(3*P1Q1*P2Q2**2)-
&256*A2*MB**2*P1P2*P2Q1/(3*P1Q1*P2Q2**2)+
&256*A2**2*MB**2*P1Q1*P2Q1/(3*P2Q2**2)-
&256*A2*MB**2*P1Q2*P2Q1/(3*P1Q1*P2Q2**2)-8/(3*P2Q2)-
&72*A1*MB**2/P2Q2-88*A2*MB**2/(3*P2Q2)-56*A1*MB*MT/(3*P2Q2)-
&32*A2*MB*MT/P2Q2-224*A1*A2*MB**3*MT/(3*P2Q2)+
&704*A2**2*MB**3*MT/(3*P2Q2)-48*A1*P1P2/P2Q2-
&104*A2*P1P2/(3*P2Q2)+448*A1*A2*MB**2*P1P2/(3*P2Q2)-
&512*A2**2*MB**2*P1P2/(3*P2Q2)+128*A1*A2*MB*MT*P1P2/(3*P2Q2)+
&32*A1*A2*P1P2**2/P2Q2+64*MB*MT**3/(3*P1Q1**2*P2Q2)+
&256*A1*MB*MT**3*P1P2/(3*P1Q1**2*P2Q2)-
&256*A1*MT**2*P1P2**2/(3*P1Q1**2*P2Q2)-4*MB**2/(3*P1Q1*P2Q2)
A18=A18-64*MB*MT/(3*P1Q1*P2Q2)+128*A2*MB**3*MT/(3*P1Q1*P2Q2)-
&4*MT**2/(3*P1Q1*P2Q2)-128*A1*MB**2*MT**2/(3*P1Q1*P2Q2)-
&128*A2*MB**2*MT**2/(3*P1Q1*P2Q2)+128*A1*MB*MT**3/(3*P1Q1*P2Q2)-
&112*A2*MB**2*P1P2/(3*P1Q1*P2Q2)+32*A1*MB*MT*P1P2/(3*P1Q1*P2Q2)+
&32*A2*MB*MT*P1P2/(3*P1Q1*P2Q2)-112*A1*MT**2*P1P2/(3*P1Q1*P2Q2)-
&48*A1*P1P2**2/(P1Q1*P2Q2)-48*A2*P1P2**2/(P1Q1*P2Q2)-
&512*A1*A2*MB*MT*P1P2**2/(3*P1Q1*P2Q2)+
&512*A1*A2*P1P2**3/(3*P1Q1*P2Q2)+16*A2*P1Q1/(3*P2Q2)+
&64*A1*A2*MB**2*P1Q1/P2Q2+32*A2**2*MB**2*P1Q1/(3*P2Q2)-
&112*A1*A2*MB*MT*P1Q1/(3*P2Q2)+368*A1*A2*P1P2*P1Q1/(3*P2Q2)-
&16*P1P2/(3*P1Q2*P2Q2)+32*A1*MB*MT*P1P2/(3*P1Q2*P2Q2)+
&32*A2*MB*MT*P1P2/(3*P1Q2*P2Q2)+
&64*A1*A2*MB*MT*P1P2**2/(3*P1Q2*P2Q2)-
&64*A1*A2*P1P2**3/(3*P1Q2*P2Q2)+8*MB*MT*P1P2/(3*P1Q1*P1Q2*P2Q2)-
&8*MT**2*P1P2/(3*P1Q1*P1Q2*P2Q2)-
&32*A1*MB*MT**3*P1P2/(3*P1Q1*P1Q2*P2Q2)-
&16*P1P2**2/(3*P1Q1*P1Q2*P2Q2)
A18=A18+32*A1*MT**2*P1P2**2/(3*P1Q1*P1Q2*P2Q2)+
&32*A2*P1P2*P1Q1/(3*P1Q2*P2Q2)-
&32*A1*A2*MB**2*P1P2*P1Q1/(3*P1Q2*P2Q2)+
&32*A1*A2*MB*MT*P1P2*P1Q1/(3*P1Q2*P2Q2)-
&64*A1*A2*P1P2**2*P1Q1/(3*P1Q2*P2Q2)-256*A2*P1Q2/(3*P2Q2)+
&448*A1*A2*MB**2*P1Q2/(3*P2Q2)-368*A2**2*MB**2*P1Q2/(3*P2Q2)-
&224*A1*A2*MB*MT*P1Q2/(3*P2Q2)+304*A1*A2*P1P2*P1Q2/(3*P2Q2)+
&64*MT**2*P1Q2/(3*P1Q1**2*P2Q2)-
&128*A1*MB**2*MT**2*P1Q2/(3*P1Q1**2*P2Q2)+
&128*A1*MB*MT**3*P1Q2/(3*P1Q1**2*P2Q2)-
&256*A1*MT**2*P1P2*P1Q2/(3*P1Q1**2*P2Q2)+8*P1Q2/(3*P1Q1*P2Q2)-
&160*A1*MB**2*P1Q2/(3*P1Q1*P2Q2)-272*A2*MB**2*P1Q2/(3*P1Q1*P2Q2)-
&56*A1*MB*MT*P1Q2/(3*P1Q1*P2Q2)-200*A2*MB*MT*P1Q2/(3*P1Q1*P2Q2)-
&48*A1*P1P2*P1Q2/(P1Q1*P2Q2)-256*A2*P1P2*P1Q2/(3*P1Q1*P2Q2)+
&256*A1*A2*MB**2*P1P2*P1Q2/(3*P1Q1*P2Q2)-
&256*A1*A2*MB*MT*P1P2*P1Q2/(P1Q1*P2Q2)+
&1024*A1*A2*P1P2**2*P1Q2/(3*P1Q1*P2Q2)
A18=A18-272*A2*P1Q2**2/(3*P1Q1*P2Q2)+
&256*A1*A2*MB**2*P1Q2**2/(3*P1Q1*P2Q2)-
&256*A1*A2*MB*MT*P1Q2**2/(3*P1Q1*P2Q2)+
&512*A1*A2*P1P2*P1Q2**2/(3*P1Q1*P2Q2)-32*A2*MB**4/(3*P2Q1*P2Q2)+
&32*A2*MB**3*MT/(3*P2Q1*P2Q2)-64*A2**2*MB**5*MT/(3*P2Q1*P2Q2)+
&16*P1P2/(3*P2Q1*P2Q2)-64*A2*MB**2*P1P2/(3*P2Q1*P2Q2)+
&64*A2**2*MB**4*P1P2/(3*P2Q1*P2Q2)+8*MB**2*P1P2/(3*P1Q1*P2Q1*P2Q2)-
&8*MB*MT*P1P2/(3*P1Q1*P2Q1*P2Q2)+
&32*A2*MB**3*MT*P1P2/(3*P1Q1*P2Q1*P2Q2)+
&16*P1P2**2/(3*P1Q1*P2Q1*P2Q2)-
&32*A2*MB**2*P1P2**2/(3*P1Q1*P2Q1*P2Q2)-
&16*A2*MB**2*P1Q1/(3*P2Q1*P2Q2)+64*A2**2*MB**4*P1Q1/(3*P2Q1*P2Q2)+
&8*MB**2*P1P2/(3*P1Q2*P2Q1*P2Q2)-8*MB*MT*P1P2/(3*P1Q2*P2Q1*P2Q2)+
&32*A2*MB**3*MT*P1P2/(3*P1Q2*P2Q1*P2Q2)+
&16*P1P2**2/(3*P1Q2*P2Q1*P2Q2)-
&32*A2*MB**2*P1P2**2/(3*P1Q2*P2Q1*P2Q2)-
&16*MB*MT*P1P2**2/(3*P1Q1*P1Q2*P2Q1*P2Q2)
A18=A18+16*P1P2**3/(3*P1Q1*P1Q2*P2Q1*P2Q2)-
&32*A2*MB**2*P1P2*P1Q1/(3*P1Q2*P2Q1*P2Q2)-
&16*A2*MB**2*P1Q2/(3*P2Q1*P2Q2)+64*A2**2*MB**4*P1Q2/(3*P2Q1*P2Q2)-
&32*A2*MB**2*P1P2*P1Q2/(3*P1Q1*P2Q1*P2Q2)+272*A1*P2Q1/(3*P2Q2)+
&112*A2*P2Q1/P2Q2-80*A1*A2*MB**2*P2Q1/P2Q2+
&400*A1*A2*MB*MT*P2Q1/(3*P2Q2)-208*A2**2*MB*MT*P2Q1/(3*P2Q2)-
&272*A1*A2*MT**2*P2Q1/(3*P2Q2)-320*A1*A2*P1P2*P2Q1/P2Q2+
&96*A2**2*P1P2*P2Q1/P2Q2-256*A1*MB*MT**3*P2Q1/(3*P1Q1**2*P2Q2)+
&512*A1*MT**2*P1P2*P2Q1/(3*P1Q1**2*P2Q2)-8*P2Q1/(3*P1Q1*P2Q2)+
&200*A1*MB*MT*P2Q1/(3*P1Q1*P2Q2)+56*A2*MB*MT*P2Q1/(3*P1Q1*P2Q2)+
&272*A1*MT**2*P2Q1/(3*P1Q1*P2Q2)+160*A2*MT**2*P2Q1/(3*P1Q1*P2Q2)+
&256*A1*P1P2*P2Q1/(3*P1Q1*P2Q2)+48*A2*P1P2*P2Q1/(P1Q1*P2Q2)+
&256*A1*A2*MB*MT*P1P2*P2Q1/(P1Q1*P2Q2)-
&256*A1*A2*MT**2*P1P2*P2Q1/(3*P1Q1*P2Q2)-
&1024*A1*A2*P1P2**2*P2Q1/(3*P1Q1*P2Q2)-
&544*A1*A2*P1Q1*P2Q1/(3*P2Q2)-640*A2**2*P1Q1*P2Q1/(3*P2Q2)-
&32*A1*P1P2*P2Q1/(3*P1Q2*P2Q2)
A18=A18-32*A1*A2*MB*MT*P1P2*P2Q1/(3*P1Q2*P2Q2)+
&32*A1*A2*MT**2*P1P2*P2Q1/(3*P1Q2*P2Q2)+
&64*A1*A2*P1P2**2*P2Q1/(3*P1Q2*P2Q2)-
&32*A1*MT**2*P1P2*P2Q1/(3*P1Q1*P1Q2*P2Q2)+
&64*A1*A2*P1P2*P1Q1*P2Q1/(3*P1Q2*P2Q2)-
&944*A1*A2*P1Q2*P2Q1/(3*P2Q2)-32*A2**2*P1Q2*P2Q1/P2Q2+
&256*A1*MT**2*P1Q2*P2Q1/(3*P1Q1**2*P2Q2)+
&96*A1*P1Q2*P2Q1/(P1Q1*P2Q2)+96*A2*P1Q2*P2Q1/(P1Q1*P2Q2)-
&128*A1*A2*MB**2*P1Q2*P2Q1/(3*P1Q1*P2Q2)+
&256*A1*A2*MB*MT*P1Q2*P2Q1/(P1Q1*P2Q2)-
&128*A1*A2*MT**2*P1Q2*P2Q1/(3*P1Q1*P2Q2)-
&512*A1*A2*P1P2*P1Q2*P2Q1/(P1Q1*P2Q2)-
&512*A1*A2*P1Q2**2*P2Q1/(3*P1Q1*P2Q2)+544*A1*A2*P2Q1**2/(3*P2Q2)-
&256*A1*MT**2*P2Q1**2/(3*P1Q1**2*P2Q2)-
&272*A1*P2Q1**2/(3*P1Q1*P2Q2)-
&256*A1*A2*MB*MT*P2Q1**2/(3*P1Q1*P2Q2)+
&256*A1*A2*MT**2*P2Q1**2/(3*P1Q1*P2Q2)
A18=A18+512*A1*A2*P1P2*P2Q1**2/(3*P1Q1*P2Q2)+
&512*A1*A2*P1Q2*P2Q1**2/(3*P1Q1*P2Q2)+224*A12*P2Q2+
&656*A1*A2*P2Q2/3+256*A12*MT**4*P2Q2/(3*P1Q1**2)+
&16*A1*P2Q2/(3*P1Q1)-112*A1*A2*MB*MT*P2Q2/(3*P1Q1)+
&32*A12*MT**2*P2Q2/(3*P1Q1)+64*A1*A2*MT**2*P2Q2/P1Q1+
&368*A1*A2*P1P2*P2Q2/(3*P1Q1)-256*A1*MT**2*P2Q2/(3*P1Q2**2)+
&256*A12*MT**4*P2Q2/(3*P1Q2**2)-256*A1*P2Q2/(3*P1Q2)-
&224*A1*A2*MB*MT*P2Q2/(3*P1Q2)-368*A12*MT**2*P2Q2/(3*P1Q2)+
&448*A1*A2*MT**2*P2Q2/(3*P1Q2)+304*A1*A2*P1P2*P2Q2/(3*P1Q2)+
&16*A1*MT**2*P2Q2/(3*P1Q1*P1Q2)-64*A12*MT**4*P2Q2/(3*P1Q1*P1Q2)+
&32*A12*P1Q1*P2Q2/P1Q2+944*A1*A2*P1Q1*P2Q2/(3*P1Q2)+
&256*A12*MT**2*P1Q2*P2Q2/(3*P1Q1**2)+
&640*A12*P1Q2*P2Q2/(3*P1Q1)+544*A1*A2*P1Q2*P2Q2/(3*P1Q1)-
&256*A2*MB**2*P2Q2/(3*P2Q1**2)+256*A2**2*MB**3*MT*P2Q2/(3*P2Q1**2)+
&64*MB**2*MT**2*P2Q2/(3*P1Q2**2*P2Q1**2)+
&64*MB**2*P2Q2/(3*P1Q2*P2Q1**2)+
&128*A2*MB**3*MT*P2Q2/(3*P1Q2*P2Q1**2)
A18=A18-128*A2*MB**2*MT**2*P2Q2/(3*P1Q2*P2Q1**2)-
&256*A2*MB**2*P1P2*P2Q2/(3*P1Q2*P2Q1**2)-
&256*A2*MB**2*P1Q1*P2Q2/(3*P1Q2*P2Q1**2)+
&256*A2**2*MB**2*P1Q2*P2Q2/(3*P2Q1**2)+272*A1*P2Q2/(3*P2Q1)+
&112*A2*P2Q2/P2Q1-80*A1*A2*MB**2*P2Q2/P2Q1+
&400*A1*A2*MB*MT*P2Q2/(3*P2Q1)-208*A2**2*MB*MT*P2Q2/(3*P2Q1)-
&272*A1*A2*MT**2*P2Q2/(3*P2Q1)-320*A1*A2*P1P2*P2Q2/P2Q1+
&96*A2**2*P1P2*P2Q2/P2Q1-32*A1*P1P2*P2Q2/(3*P1Q1*P2Q1)-
&32*A1*A2*MB*MT*P1P2*P2Q2/(3*P1Q1*P2Q1)+
&32*A1*A2*MT**2*P1P2*P2Q2/(3*P1Q1*P2Q1)+
&64*A1*A2*P1P2**2*P2Q2/(3*P1Q1*P2Q1)-944*A1*A2*P1Q1*P2Q2/(3*P2Q1)-
&32*A2**2*P1Q1*P2Q2/P2Q1-256*A1*MB*MT**3*P2Q2/(3*P1Q2**2*P2Q1)+
&512*A1*MT**2*P1P2*P2Q2/(3*P1Q2**2*P2Q1)+
&256*A1*MT**2*P1Q1*P2Q2/(3*P1Q2**2*P2Q1)-8*P2Q2/(3*P1Q2*P2Q1)+
&200*A1*MB*MT*P2Q2/(3*P1Q2*P2Q1)+56*A2*MB*MT*P2Q2/(3*P1Q2*P2Q1)+
&272*A1*MT**2*P2Q2/(3*P1Q2*P2Q1)+160*A2*MT**2*P2Q2/(3*P1Q2*P2Q1)+
&256*A1*P1P2*P2Q2/(3*P1Q2*P2Q1)+48*A2*P1P2*P2Q2/(P1Q2*P2Q1)
A18=A18+256*A1*A2*MB*MT*P1P2*P2Q2/(P1Q2*P2Q1)-
&256*A1*A2*MT**2*P1P2*P2Q2/(3*P1Q2*P2Q1)-
&1024*A1*A2*P1P2**2*P2Q2/(3*P1Q2*P2Q1)-
&32*A1*MT**2*P1P2*P2Q2/(3*P1Q1*P1Q2*P2Q1)+
&96*A1*P1Q1*P2Q2/(P1Q2*P2Q1)+96*A2*P1Q1*P2Q2/(P1Q2*P2Q1)-
&128*A1*A2*MB**2*P1Q1*P2Q2/(3*P1Q2*P2Q1)+
&256*A1*A2*MB*MT*P1Q1*P2Q2/(P1Q2*P2Q1)-
&128*A1*A2*MT**2*P1Q1*P2Q2/(3*P1Q2*P2Q1)-
&512*A1*A2*P1P2*P1Q1*P2Q2/(P1Q2*P2Q1)-
&512*A1*A2*P1Q1**2*P2Q2/(3*P1Q2*P2Q1)-544*A1*A2*P1Q2*P2Q2/(3*P2Q1)-
&640*A2**2*P1Q2*P2Q2/(3*P2Q1)+
&64*A1*A2*P1P2*P1Q2*P2Q2/(3*P1Q1*P2Q1)+544*A1*A2*P2Q2**2/(3*P2Q1)-
&256*A1*MT**2*P2Q2**2/(3*P1Q2**2*P2Q1)-
&272*A1*P2Q2**2/(3*P1Q2*P2Q1)-
&256*A1*A2*MB*MT*P2Q2**2/(3*P1Q2*P2Q1)+
&256*A1*A2*MT**2*P2Q2**2/(3*P1Q2*P2Q1)+
&512*A1*A2*P1P2*P2Q2**2/(3*P1Q2*P2Q1)
A18=A18+512*A1*A2*P1Q1*P2Q2**2/(3*P1Q2*P2Q1)-
&384*A12*MB*MT*P1Q1**2/S**2+
&384*A12*P1P2*P1Q1**2/S**2-2688*A12*MB*MT*P1Q1*P1Q2/S**2+
&2688*A12*P1P2*P1Q1*P1Q2/S**2-384*A12*MB*MT*P1Q2**2/S**2+
&384*A12*P1P2*P1Q2**2/S**2-768*A1*A2*MB*MT*P1Q1*P2Q1/S**2+
&768*A1*A2*P1P2*P1Q1*P2Q1/S**2-2688*A1*A2*MB*MT*P1Q2*P2Q1/S**2+
&2688*A1*A2*P1P2*P1Q2*P2Q1/S**2-960*A12*P1Q1*P1Q2*P2Q1/S**2-
&960*A1*A2*P1Q1*P1Q2*P2Q1/S**2+960*A12*P1Q2**2*P2Q1/S**2+
&960*A1*A2*P1Q2**2*P2Q1/S**2-384*A2**2*MB*MT*P2Q1**2/S**2+
&384*A2**2*P1P2*P2Q1**2/S**2-960*A1*A2*P1Q2*P2Q1**2/S**2-
&960*A2**2*P1Q2*P2Q1**2/S**2-2688*A1*A2*MB*MT*P1Q1*P2Q2/S**2+
&2688*A1*A2*P1P2*P1Q1*P2Q2/S**2+960*A12*P1Q1**2*P2Q2/S**2+
&960*A1*A2*P1Q1**2*P2Q2/S**2-768*A1*A2*MB*MT*P1Q2*P2Q2/S**2+
&768*A1*A2*P1P2*P1Q2*P2Q2/S**2-960*A12*P1Q1*P1Q2*P2Q2/S**2-
&960*A1*A2*P1Q1*P1Q2*P2Q2/S**2-2688*A2**2*MB*MT*P2Q1*P2Q2/S**2+
&2688*A2**2*P1P2*P2Q1*P2Q2/S**2+960*A1*A2*P1Q1*P2Q1*P2Q2/S**2+
&960*A2**2*P1Q1*P2Q1*P2Q2/S**2+960*A1*A2*P1Q2*P2Q1*P2Q2/S**2
A18=A18+960*A2**2*P1Q2*P2Q1*P2Q2/S**2-
&384*A2**2*MB*MT*P2Q2**2/S**2+
&384*A2**2*P1P2*P2Q2**2/S**2-960*A1*A2*P1Q1*P2Q2**2/S**2-
&960*A2**2*P1Q1*P2Q2**2/S**2-96*A1*MB*MT/S-96*A2*MB*MT/S+
&768*A2**2*MB**3*MT/S+768*A12*MB*MT**3/S-192*A1*P1P2/S-
&192*A2*P1P2/S-768*A2**2*MB**2*P1P2/S+2304*A1*A2*MB*MT*P1P2/S-
&768*A12*MT**2*P1P2/S-2304*A1*A2*P1P2**2/S+
&96*A1*MB*MT**3/(P1Q1*S)+192*A2*MB*MT*P1P2/(P1Q1*S)-
&96*A1*MT**2*P1P2/(P1Q1*S)-192*A2*P1P2**2/(P1Q1*S)-192*A1*P1Q1/S-
&144*A2*P1Q1/S-384*A1*A2*MB**2*P1Q1/S-480*A2**2*MB**2*P1Q1/S+
&480*A12*MB*MT*P1Q1/S-96*A1*A2*MB*MT*P1Q1/S-
&864*A12*P1P2*P1Q1/S-672*A1*A2*P1P2*P1Q1/S-96*A1*A2*P1Q1**2/S+
&96*A1*MB*MT**3/(P1Q2*S)+192*A2*MB*MT*P1P2/(P1Q2*S)-
&96*A1*MT**2*P1P2/(P1Q2*S)-192*A2*P1P2**2/(P1Q2*S)+
&48*A1*MB*MT*P1Q1/(P1Q2*S)-96*A2*MB*MT*P1Q1/(P1Q2*S)-
&48*A1*MT**2*P1Q1/(P1Q2*S)-192*A1*P1P2*P1Q1/(P1Q2*S)-
&192*A2*P1P2*P1Q1/(P1Q2*S)-192*A1*A2*MB*MT*P1P2*P1Q1/(P1Q2*S)
A18=A18+192*A1*A2*P1P2**2*P1Q1/(P1Q2*S)-192*A1*P1Q1**2/(P1Q2*S)-
&192*A2*P1Q1**2/(P1Q2*S)+192*A1*A2*MB**2*P1Q1**2/(P1Q2*S)+
&192*A12*MB*MT*P1Q1**2/(P1Q2*S)-96*A1*A2*MB*MT*P1Q1**2/(P1Q2*S)+
&192*A1*A2*P1P2*P1Q1**2/(P1Q2*S)-192*A1*P1Q2/S-144*A2*P1Q2/S-
&384*A1*A2*MB**2*P1Q2/S-480*A2**2*MB**2*P1Q2/S+
&480*A12*MB*MT*P1Q2/S-96*A1*A2*MB*MT*P1Q2/S-
&864*A12*P1P2*P1Q2/S-672*A1*A2*P1P2*P1Q2/S+
&48*A1*MB*MT*P1Q2/(P1Q1*S)-96*A2*MB*MT*P1Q2/(P1Q1*S)-
&48*A1*MT**2*P1Q2/(P1Q1*S)-192*A1*P1P2*P1Q2/(P1Q1*S)-
&192*A2*P1P2*P1Q2/(P1Q1*S)-192*A1*A2*MB*MT*P1P2*P1Q2/(P1Q1*S)+
&192*A1*A2*P1P2**2*P1Q2/(P1Q1*S)-576*A1*A2*P1Q1*P1Q2/S-
&96*A1*A2*P1Q2**2/S-192*A1*P1Q2**2/(P1Q1*S)-
&192*A2*P1Q2**2/(P1Q1*S)+192*A1*A2*MB**2*P1Q2**2/(P1Q1*S)+
&192*A12*MB*MT*P1Q2**2/(P1Q1*S)-96*A1*A2*MB*MT*P1Q2**2/(P1Q1*S)+
&192*A1*A2*P1P2*P1Q2**2/(P1Q1*S)-96*A2*MB**3*MT/(P2Q1*S)+
&96*A2*MB**2*P1P2/(P2Q1*S)-192*A1*MB*MT*P1P2/(P2Q1*S)+
&192*A1*P1P2**2/(P2Q1*S)+96*A1*MB**2*P1Q1/(P2Q1*S)
A18=A18+192*A2*MB**2*P1Q1/(P2Q1*S)-96*A1*MB*MT*P1Q1/(P2Q1*S)-
&192*A1*A2*MB**3*MT*P1Q1/(P2Q1*S)+192*A1*P1P2*P1Q1/(P2Q1*S)+
&192*A1*A2*MB**2*P1P2*P1Q1/(P2Q1*S)+
&96*A1*A2*MB**2*P1Q1**2/(P2Q1*S)-
&192*A2*MB**3*MT*P1Q1/(P1Q2*P2Q1*S)+
&192*A2*MB**2*P1P2*P1Q1/(P1Q2*P2Q1*S)-
&96*A1*MB*MT*P1P2*P1Q1/(P1Q2*P2Q1*S)+
&96*A1*P1P2**2*P1Q1/(P1Q2*P2Q1*S)+
&96*A1*MB**2*P1Q1**2/(P1Q2*P2Q1*S)+
&192*A2*MB**2*P1Q1**2/(P1Q2*P2Q1*S)-
&48*A1*MB*MT*P1Q1**2/(P1Q2*P2Q1*S)+
&96*A1*P1P2*P1Q1**2/(P1Q2*P2Q1*S)+96*A1*MB**2*P1Q2/(P2Q1*S)+
&48*A2*MB**2*P1Q2/(P2Q1*S)+192*A1*A2*MB**3*MT*P1Q2/(P2Q1*S)-
&192*A1*A2*MB**2*P1P2*P1Q2/(P2Q1*S)-
&96*A1*A2*MB**2*P1Q2**2/(P2Q1*S)+144*A1*P2Q1/S+192*A2*P2Q1/S+
&96*A1*A2*MB*MT*P2Q1/S-480*A2**2*MB*MT*P2Q1/S+
&480*A12*MT**2*P2Q1/S+384*A1*A2*MT**2*P2Q1/S
A18=A18+672*A1*A2*P1P2*P2Q1/S+864*A2**2*P1P2*P2Q1/S-
&96*A2*MB*MT*P2Q1/(P1Q1*S)+192*A1*MT**2*P2Q1/(P1Q1*S)+
&96*A2*MT**2*P2Q1/(P1Q1*S)-192*A1*A2*MB*MT**3*P2Q1/(P1Q1*S)+
&192*A2*P1P2*P2Q1/(P1Q1*S)+192*A1*A2*MT**2*P1P2*P2Q1/(P1Q1*S)-
&192*A12*P1Q1*P2Q1/S-192*A2**2*P1Q1*P2Q1/S+
&48*A1*MT**2*P2Q1/(P1Q2*S)+96*A2*MT**2*P2Q1/(P1Q2*S)+
&192*A1*A2*MB*MT**3*P2Q1/(P1Q2*S)-
&192*A1*A2*MT**2*P1P2*P2Q1/(P1Q2*S)+
&96*A1*A2*MB*MT*P1Q1*P2Q1/(P1Q2*S)-
&192*A12*MT**2*P1Q1*P2Q1/(P1Q2*S)-
&96*A1*A2*MT**2*P1Q1*P2Q1/(P1Q2*S)-
&384*A1*A2*P1P2*P1Q1*P2Q1/(P1Q2*S)-384*A12*P1Q1**2*P2Q1/(P1Q2*S)-
&384*A1*A2*P1Q1**2*P2Q1/(P1Q2*S)-480*A12*P1Q2*P2Q1/S-
&960*A1*A2*P1Q2*P2Q1/S-480*A2**2*P1Q2*P2Q1/S+
&144*A1*P1Q2*P2Q1/(P1Q1*S)+96*A2*P1Q2*P2Q1/(P1Q1*S)+
&384*A1*A2*MB*MT*P1Q2*P2Q1/(P1Q1*S)-
&96*A12*MT**2*P1Q2*P2Q1/(P1Q1*S)
A18=A18+96*A1*A2*MT**2*P1Q2*P2Q1/(P1Q1*S)-
&576*A1*A2*P1P2*P1Q2*P2Q1/(P1Q1*S)-192*A12*P1Q2**2*P2Q1/(P1Q1*S)-
&384*A1*A2*P1Q2**2*P2Q1/(P1Q1*S)-96*A1*A2*P2Q1**2/S-
&96*A1*A2*MT**2*P2Q1**2/(P1Q1*S)+96*A1*A2*MT**2*P2Q1**2/(P1Q2*S)+
&288*A1*A2*P1Q2*P2Q1**2/(P1Q1*S)-96*A2*MB**3*MT/(P2Q2*S)+
&96*A2*MB**2*P1P2/(P2Q2*S)-192*A1*MB*MT*P1P2/(P2Q2*S)+
&192*A1*P1P2**2/(P2Q2*S)+96*A1*MB**2*P1Q1/(P2Q2*S)+
&48*A2*MB**2*P1Q1/(P2Q2*S)+192*A1*A2*MB**3*MT*P1Q1/(P2Q2*S)-
&192*A1*A2*MB**2*P1P2*P1Q1/(P2Q2*S)-
&96*A1*A2*MB**2*P1Q1**2/(P2Q2*S)+96*A1*MB**2*P1Q2/(P2Q2*S)+
&192*A2*MB**2*P1Q2/(P2Q2*S)-96*A1*MB*MT*P1Q2/(P2Q2*S)-
&192*A1*A2*MB**3*MT*P1Q2/(P2Q2*S)+192*A1*P1P2*P1Q2/(P2Q2*S)+
&192*A1*A2*MB**2*P1P2*P1Q2/(P2Q2*S)-
&192*A2*MB**3*MT*P1Q2/(P1Q1*P2Q2*S)+
&192*A2*MB**2*P1P2*P1Q2/(P1Q1*P2Q2*S)-
&96*A1*MB*MT*P1P2*P1Q2/(P1Q1*P2Q2*S)+
&96*A1*P1P2**2*P1Q2/(P1Q1*P2Q2*S)+96*A1*A2*MB**2*P1Q2**2/(P2Q2*S)
A18=A18+96*A1*MB**2*P1Q2**2/(P1Q1*P2Q2*S)+
&192*A2*MB**2*P1Q2**2/(P1Q1*P2Q2*S)-
&48*A1*MB*MT*P1Q2**2/(P1Q1*P2Q2*S)+
&96*A1*P1P2*P1Q2**2/(P1Q1*P2Q2*S)-48*A2*MB**2*P2Q1/(P2Q2*S)-
&96*A1*MB*MT*P2Q1/(P2Q2*S)+48*A2*MB*MT*P2Q1/(P2Q2*S)-
&192*A1*P1P2*P2Q1/(P2Q2*S)-192*A2*P1P2*P2Q1/(P2Q2*S)-
&192*A1*A2*MB*MT*P1P2*P2Q1/(P2Q2*S)+
&192*A1*A2*P1P2**2*P2Q1/(P2Q2*S)+
&192*A1*MB*MT**3*P2Q1/(P1Q1*P2Q2*S)+
&96*A2*MB*MT*P1P2*P2Q1/(P1Q1*P2Q2*S)-
&192*A1*MT**2*P1P2*P2Q1/(P1Q1*P2Q2*S)-
&96*A2*P1P2**2*P2Q1/(P1Q1*P2Q2*S)+
&96*A1*A2*MB**2*P1Q1*P2Q1/(P2Q2*S)+
&192*A2**2*MB**2*P1Q1*P2Q1/(P2Q2*S)-
&96*A1*A2*MB*MT*P1Q1*P2Q1/(P2Q2*S)+
&384*A1*A2*P1P2*P1Q1*P2Q1/(P2Q2*S)-96*A1*P1Q2*P2Q1/(P2Q2*S)-
&144*A2*P1Q2*P2Q1/(P2Q2*S)-96*A1*A2*MB**2*P1Q2*P2Q1/(P2Q2*S)
A18=A18+96*A2**2*MB**2*P1Q2*P2Q1/(P2Q2*S)-
&384*A1*A2*MB*MT*P1Q2*P2Q1/(P2Q2*S)+
&576*A1*A2*P1P2*P1Q2*P2Q1/(P2Q2*S)-
&96*A2*MB**2*P1Q2*P2Q1/(P1Q1*P2Q2*S)-
&48*A1*MB*MT*P1Q2*P2Q1/(P1Q1*P2Q2*S)-
&48*A2*MB*MT*P1Q2*P2Q1/(P1Q1*P2Q2*S)-
&96*A1*MT**2*P1Q2*P2Q1/(P1Q1*P2Q2*S)-
&96*A1*P1P2*P1Q2*P2Q1/(P1Q1*P2Q2*S)-
&96*A2*P1P2*P1Q2*P2Q1/(P1Q1*P2Q2*S)+
&96*A1*A2*P1Q1*P1Q2*P2Q1/(P2Q2*S)+288*A1*A2*P1Q2**2*P2Q1/(P2Q2*S)-
&96*A1*P1Q2**2*P2Q1/(P1Q1*P2Q2*S)-96*A2*P1Q2**2*P2Q1/(P1Q1*P2Q2*S)+
&192*A1*P2Q1**2/(P2Q2*S)+192*A2*P2Q1**2/(P2Q2*S)+
&96*A1*A2*MB*MT*P2Q1**2/(P2Q2*S)-192*A2**2*MB*MT*P2Q1**2/(P2Q2*S)-
&192*A1*A2*MT**2*P2Q1**2/(P2Q2*S)-192*A1*A2*P1P2*P2Q1**2/(P2Q2*S)-
&48*A2*MB*MT*P2Q1**2/(P1Q1*P2Q2*S)+
&192*A1*MT**2*P2Q1**2/(P1Q1*P2Q2*S)+
&96*A2*MT**2*P2Q1**2/(P1Q1*P2Q2*S)
A18=A18+96*A2*P1P2*P2Q1**2/(P1Q1*P2Q2*S)-
&384*A1*A2*P1Q1*P2Q1**2/(P2Q2*S)-
&384*A2**2*P1Q1*P2Q1**2/(P2Q2*S)-384*A1*A2*P1Q2*P2Q1**2/(P2Q2*S)-
&192*A2**2*P1Q2*P2Q1**2/(P2Q2*S)+96*A1*P1Q2*P2Q1**2/(P1Q1*P2Q2*S)+
&96*A2*P1Q2*P2Q1**2/(P1Q1*P2Q2*S)+144*A1*P2Q2/S+192*A2*P2Q2/S+
&96*A1*A2*MB*MT*P2Q2/S-480*A2**2*MB*MT*P2Q2/S+
&480*A12*MT**2*P2Q2/S+384*A1*A2*MT**2*P2Q2/S+
&672*A1*A2*P1P2*P2Q2/S+864*A2**2*P1P2*P2Q2/S+
&48*A1*MT**2*P2Q2/(P1Q1*S)+96*A2*MT**2*P2Q2/(P1Q1*S)+
&192*A1*A2*MB*MT**3*P2Q2/(P1Q1*S)-
&192*A1*A2*MT**2*P1P2*P2Q2/(P1Q1*S)-480*A12*P1Q1*P2Q2/S-
&960*A1*A2*P1Q1*P2Q2/S-480*A2**2*P1Q1*P2Q2/S-
&96*A2*MB*MT*P2Q2/(P1Q2*S)+192*A1*MT**2*P2Q2/(P1Q2*S)+
&96*A2*MT**2*P2Q2/(P1Q2*S)-192*A1*A2*MB*MT**3*P2Q2/(P1Q2*S)+
&192*A2*P1P2*P2Q2/(P1Q2*S)+192*A1*A2*MT**2*P1P2*P2Q2/(P1Q2*S)+
&144*A1*P1Q1*P2Q2/(P1Q2*S)+96*A2*P1Q1*P2Q2/(P1Q2*S)+
&384*A1*A2*MB*MT*P1Q1*P2Q2/(P1Q2*S)
A18=A18-96*A12*MT**2*P1Q1*P2Q2/(P1Q2*S)+
&96*A1*A2*MT**2*P1Q1*P2Q2/(P1Q2*S)-
&576*A1*A2*P1P2*P1Q1*P2Q2/(P1Q2*S)-192*A12*P1Q1**2*P2Q2/(P1Q2*S)-
&384*A1*A2*P1Q1**2*P2Q2/(P1Q2*S)-192*A12*P1Q2*P2Q2/S-
&192*A2**2*P1Q2*P2Q2/S+96*A1*A2*MB*MT*P1Q2*P2Q2/(P1Q1*S)-
&192*A12*MT**2*P1Q2*P2Q2/(P1Q1*S)-
&96*A1*A2*MT**2*P1Q2*P2Q2/(P1Q1*S)-
&384*A1*A2*P1P2*P1Q2*P2Q2/(P1Q1*S)-384*A12*P1Q2**2*P2Q2/(P1Q1*S)-
&384*A1*A2*P1Q2**2*P2Q2/(P1Q1*S)-48*A2*MB**2*P2Q2/(P2Q1*S)-
&96*A1*MB*MT*P2Q2/(P2Q1*S)+48*A2*MB*MT*P2Q2/(P2Q1*S)-
&192*A1*P1P2*P2Q2/(P2Q1*S)-192*A2*P1P2*P2Q2/(P2Q1*S)-
&192*A1*A2*MB*MT*P1P2*P2Q2/(P2Q1*S)+
&192*A1*A2*P1P2**2*P2Q2/(P2Q1*S)-96*A1*P1Q1*P2Q2/(P2Q1*S)-
&144*A2*P1Q1*P2Q2/(P2Q1*S)-96*A1*A2*MB**2*P1Q1*P2Q2/(P2Q1*S)+
&96*A2**2*MB**2*P1Q1*P2Q2/(P2Q1*S)-
&384*A1*A2*MB*MT*P1Q1*P2Q2/(P2Q1*S)+
&576*A1*A2*P1P2*P1Q1*P2Q2/(P2Q1*S)+288*A1*A2*P1Q1**2*P2Q2/(P2Q1*S)
A18=A18+192*A1*MB*MT**3*P2Q2/(P1Q2*P2Q1*S)+
&96*A2*MB*MT*P1P2*P2Q2/(P1Q2*P2Q1*S)-
&192*A1*MT**2*P1P2*P2Q2/(P1Q2*P2Q1*S)-
&96*A2*P1P2**2*P2Q2/(P1Q2*P2Q1*S)-
&96*A2*MB**2*P1Q1*P2Q2/(P1Q2*P2Q1*S)-
&48*A1*MB*MT*P1Q1*P2Q2/(P1Q2*P2Q1*S)-
&48*A2*MB*MT*P1Q1*P2Q2/(P1Q2*P2Q1*S)-
&96*A1*MT**2*P1Q1*P2Q2/(P1Q2*P2Q1*S)-
&96*A1*P1P2*P1Q1*P2Q2/(P1Q2*P2Q1*S)-
&96*A2*P1P2*P1Q1*P2Q2/(P1Q2*P2Q1*S)-
&96*A1*P1Q1**2*P2Q2/(P1Q2*P2Q1*S)-96*A2*P1Q1**2*P2Q2/(P1Q2*P2Q1*S)+
&96*A1*A2*MB**2*P1Q2*P2Q2/(P2Q1*S)+
&192*A2**2*MB**2*P1Q2*P2Q2/(P2Q1*S)-
&96*A1*A2*MB*MT*P1Q2*P2Q2/(P2Q1*S)+
&384*A1*A2*P1P2*P1Q2*P2Q2/(P2Q1*S)+
&96*A1*A2*P1Q1*P1Q2*P2Q2/(P2Q1*S)-576*A1*A2*P2Q1*P2Q2/S+
&96*A1*A2*P1Q1*P2Q1*P2Q2/(P1Q2*S)+96*A1*A2*P1Q2*P2Q1*P2Q2/(P1Q1*S)
A18=A18-96*A1*A2*P2Q2**2/S+96*A1*A2*MT**2*P2Q2**2/(P1Q1*S)-
&96*A1*A2*MT**2*P2Q2**2/(P1Q2*S)+288*A1*A2*P1Q1*P2Q2**2/(P1Q2*S)+
&192*A1*P2Q2**2/(P2Q1*S)+192*A2*P2Q2**2/(P2Q1*S)+
&96*A1*A2*MB*MT*P2Q2**2/(P2Q1*S)-192*A2**2*MB*MT*P2Q2**2/(P2Q1*S)-
&192*A1*A2*MT**2*P2Q2**2/(P2Q1*S)-192*A1*A2*P1P2*P2Q2**2/(P2Q1*S)-
&384*A1*A2*P1Q1*P2Q2**2/(P2Q1*S)-192*A2**2*P1Q1*P2Q2**2/(P2Q1*S)-
&48*A2*MB*MT*P2Q2**2/(P1Q2*P2Q1*S)+
&192*A1*MT**2*P2Q2**2/(P1Q2*P2Q1*S)+
&96*A2*MT**2*P2Q2**2/(P1Q2*P2Q1*S)+
&96*A2*P1P2*P2Q2**2/(P1Q2*P2Q1*S)+96*A1*P1Q1*P2Q2**2/(P1Q2*P2Q1*S)+
&96*A2*P1Q1*P2Q2**2/(P1Q2*P2Q1*S)-384*A1*A2*P1Q2*P2Q2**2/(P2Q1*S)-
&384*A2**2*P1Q2*P2Q2**2/(P2Q1*S)+512*A1*A2*S/3-
&128*A1*MT**2*S/(3*P1Q1**2)+128*A12*MB*MT**3*S/(3*P1Q1**2)-
&152*A1*S/(3*P1Q1)-152*A12*MB*MT*S/(3*P1Q1)-
&128*A1*A2*MB*MT*S/(3*P1Q1)+112*A1*A2*MT**2*S/(3*P1Q1)-
&16*A12*P1P2*S/P1Q1+152*A1*A2*P1P2*S/(3*P1Q1)-
&128*A1*MT**2*S/(3*P1Q2**2)+128*A12*MB*MT**3*S/(3*P1Q2**2)
A18=A18-152*A1*S/(3*P1Q2)-152*A12*MB*MT*S/(3*P1Q2)-
&128*A1*A2*MB*MT*S/(3*P1Q2)+112*A1*A2*MT**2*S/(3*P1Q2)-
&16*A12*P1P2*S/P1Q2+152*A1*A2*P1P2*S/(3*P1Q2)+
&16*A1*MB*MT*S/(3*P1Q1*P1Q2)-32*A12*MB*MT**3*S/(3*P1Q1*P1Q2)-
&16*A1*P1P2*S/(3*P1Q1*P1Q2)+272*A1*A2*P1Q1*S/(3*P1Q2)+
&272*A1*A2*P1Q2*S/(3*P1Q1)-128*A2*MB**2*S/(3*P2Q1**2)+
&128*A2**2*MB**3*MT*S/(3*P2Q1**2)+
&32*MB**2*MT**2*S/(3*P1Q2**2*P2Q1**2)+32*MB**2*S/(3*P1Q2*P2Q1**2)
A18BIS=
&64*A2*MB**3*MT*S/(3*P1Q2*P2Q1**2)-
&64*A2*MB**2*MT**2*S/(3*P1Q2*P2Q1**2)-
&128*A2*MB**2*P1P2*S/(3*P1Q2*P2Q1**2)-
&128*A2*MB**2*P1Q1*S/(3*P1Q2*P2Q1**2)+
&128*A2**2*MB**2*P1Q2*S/(3*P2Q1**2)+152*A2*S/(3*P2Q1)-
&112*A1*A2*MB**2*S/(3*P2Q1)+128*A1*A2*MB*MT*S/(3*P2Q1)+
&152*A2**2*MB*MT*S/(3*P2Q1)-152*A1*A2*P1P2*S/(3*P2Q1)+
&16*A2**2*P1P2*S/P2Q1-8*A1*A2*MB**3*MT*S/(3*P1Q1*P2Q1)+
&16*A1*A2*MB**2*MT**2*S/(3*P1Q1*P2Q1)-
&8*A1*A2*MB*MT**3*S/(3*P1Q1*P2Q1)-8*A1*P1P2*S/(3*P1Q1*P2Q1)-
&8*A2*P1P2*S/(3*P1Q1*P2Q1)+8*A1*A2*MB**2*P1P2*S/(3*P1Q1*P2Q1)-
&16*A1*A2*MB*MT*P1P2*S/(3*P1Q1*P2Q1)+
&8*A1*A2*MT**2*P1P2*S/(3*P1Q1*P2Q1)+
&32*A1*A2*P1P2**2*S/(3*P1Q1*P2Q1)-32*A2**2*P1Q1*S/(3*P2Q1)-
&32*MT**2*S/(3*P1Q2**2*P2Q1)+64*A1*MB**2*MT**2*S/(3*P1Q2**2*P2Q1)-
&64*A1*MB*MT**3*S/(3*P1Q2**2*P2Q1)
A18BIS=A18BIS+128*A1*MT**2*P1P2*S/(3*P1Q2**2*P2Q1)-
&12*S/(P1Q2*P2Q1)+
&24*A1*MB**2*S/(P1Q2*P2Q1)+64*A1*A2*MB**3*MT*S/(3*P1Q2*P2Q1)+
&24*A2*MT**2*S/(P1Q2*P2Q1)-128*A1*A2*MB**2*MT**2*S/(3*P1Q2*P2Q1)+
&64*A1*A2*MB*MT**3*S/(3*P1Q2*P2Q1)+56*A1*P1P2*S/(3*P1Q2*P2Q1)+
&56*A2*P1P2*S/(3*P1Q2*P2Q1)-64*A1*A2*MB**2*P1P2*S/(3*P1Q2*P2Q1)+
&128*A1*A2*MB*MT*P1P2*S/(3*P1Q2*P2Q1)-
&64*A1*A2*MT**2*P1P2*S/(3*P1Q2*P2Q1)-
&256*A1*A2*P1P2**2*S/(3*P1Q2*P2Q1)+4*P1P2*S/(3*P1Q1*P1Q2*P2Q1)-
&8*A1*MB*MT*P1P2*S/(3*P1Q1*P1Q2*P2Q1)-
&8*A1*MT**2*P1P2*S/(3*P1Q1*P1Q2*P2Q1)+136*A2*P1Q1*S/(3*P1Q2*P2Q1)-
&128*A1*A2*MB**2*P1Q1*S/(3*P1Q2*P2Q1)+
&128*A1*A2*MB*MT*P1Q1*S/(3*P1Q2*P2Q1)-
&256*A1*A2*P1P2*P1Q1*S/(3*P1Q2*P2Q1)-160*A2**2*P1Q2*S/(3*P2Q1)+
&16*A1*A2*P1P2*P1Q2*S/(3*P1Q1*P2Q1)-32*A12*P2Q1*S/(3*P1Q1)-
&128*A12*MT**2*P2Q1*S/(3*P1Q2**2)-160*A12*P2Q1*S/(3*P1Q2)-
&128*A2*MB**2*S/(3*P2Q2**2)+128*A2**2*MB**3*MT*S/(3*P2Q2**2)
A18BIS=A18BIS+32*MB**2*MT**2*S/(3*P1Q1**2*P2Q2**2)+
&32*MB**2*S/(3*P1Q1*P2Q2**2)+
&64*A2*MB**3*MT*S/(3*P1Q1*P2Q2**2)-
&64*A2*MB**2*MT**2*S/(3*P1Q1*P2Q2**2)-
&128*A2*MB**2*P1P2*S/(3*P1Q1*P2Q2**2)+
&128*A2**2*MB**2*P1Q1*S/(3*P2Q2**2)-
&128*A2*MB**2*P1Q2*S/(3*P1Q1*P2Q2**2)+152*A2*S/(3*P2Q2)-
&112*A1*A2*MB**2*S/(3*P2Q2)+128*A1*A2*MB*MT*S/(3*P2Q2)+
&152*A2**2*MB*MT*S/(3*P2Q2)-152*A1*A2*P1P2*S/(3*P2Q2)+
&16*A2**2*P1P2*S/P2Q2-32*MT**2*S/(3*P1Q1**2*P2Q2)+
&64*A1*MB**2*MT**2*S/(3*P1Q1**2*P2Q2)-
&64*A1*MB*MT**3*S/(3*P1Q1**2*P2Q2)+
&128*A1*MT**2*P1P2*S/(3*P1Q1**2*P2Q2)-12*S/(P1Q1*P2Q2)+
&24*A1*MB**2*S/(P1Q1*P2Q2)+64*A1*A2*MB**3*MT*S/(3*P1Q1*P2Q2)+
&24*A2*MT**2*S/(P1Q1*P2Q2)-128*A1*A2*MB**2*MT**2*S/(3*P1Q1*P2Q2)+
&64*A1*A2*MB*MT**3*S/(3*P1Q1*P2Q2)+56*A1*P1P2*S/(3*P1Q1*P2Q2)+
&56*A2*P1P2*S/(3*P1Q1*P2Q2)-64*A1*A2*MB**2*P1P2*S/(3*P1Q1*P2Q2)
A18BIS=A18BIS+128*A1*A2*MB*MT*P1P2*S/(3*P1Q1*P2Q2)-
&64*A1*A2*MT**2*P1P2*S/(3*P1Q1*P2Q2)-
&256*A1*A2*P1P2**2*S/(3*P1Q1*P2Q2)-160*A2**2*P1Q1*S/(3*P2Q2)-
&8*A1*A2*MB**3*MT*S/(3*P1Q2*P2Q2)+
&16*A1*A2*MB**2*MT**2*S/(3*P1Q2*P2Q2)-
&8*A1*A2*MB*MT**3*S/(3*P1Q2*P2Q2)-8*A1*P1P2*S/(3*P1Q2*P2Q2)-
&8*A2*P1P2*S/(3*P1Q2*P2Q2)+8*A1*A2*MB**2*P1P2*S/(3*P1Q2*P2Q2)-
&16*A1*A2*MB*MT*P1P2*S/(3*P1Q2*P2Q2)+
&8*A1*A2*MT**2*P1P2*S/(3*P1Q2*P2Q2)+
&32*A1*A2*P1P2**2*S/(3*P1Q2*P2Q2)+4*P1P2*S/(3*P1Q1*P1Q2*P2Q2)-
&8*A1*MB*MT*P1P2*S/(3*P1Q1*P1Q2*P2Q2)-
&8*A1*MT**2*P1P2*S/(3*P1Q1*P1Q2*P2Q2)+
&16*A1*A2*P1P2*P1Q1*S/(3*P1Q2*P2Q2)-32*A2**2*P1Q2*S/(3*P2Q2)+
&136*A2*P1Q2*S/(3*P1Q1*P2Q2)-128*A1*A2*MB**2*P1Q2*S/(3*P1Q1*P2Q2)+
&128*A1*A2*MB*MT*P1Q2*S/(3*P1Q1*P2Q2)-
&256*A1*A2*P1P2*P1Q2*S/(3*P1Q1*P2Q2)+16*A2*MB*MT*S/(3*P2Q1*P2Q2)-
&32*A2**2*MB**3*MT*S/(3*P2Q1*P2Q2)-16*A2*P1P2*S/(3*P2Q1*P2Q2)
A18BIS=A18BIS-4*P1P2*S/(3*P1Q1*P2Q1*P2Q2)+
&8*A2*MB**2*P1P2*S/(3*P1Q1*P2Q1*P2Q2)+
&8*A2*MB*MT*P1P2*S/(3*P1Q1*P2Q1*P2Q2)-4*P1P2*S/(3*P1Q2*P2Q1*P2Q2)+
&8*A2*MB**2*P1P2*S/(3*P1Q2*P2Q1*P2Q2)+
&8*A2*MB*MT*P1P2*S/(3*P1Q2*P2Q1*P2Q2)-
&2*MB**3*MT*S/(3*P1Q1*P1Q2*P2Q1*P2Q2)+
&4*MB**2*MT**2*S/(3*P1Q1*P1Q2*P2Q1*P2Q2)-
&2*MB*MT**3*S/(3*P1Q1*P1Q2*P2Q1*P2Q2)-
&2*MB**2*P1P2*S/(3*P1Q1*P1Q2*P2Q1*P2Q2)+
&4*MB*MT*P1P2*S/(3*P1Q1*P1Q2*P2Q1*P2Q2)-
&2*MT**2*P1P2*S/(3*P1Q1*P1Q2*P2Q1*P2Q2)-
&8*P1P2**2*S/(3*P1Q1*P1Q2*P2Q1*P2Q2)+
&8*A2*P1P2*P1Q1*S/(3*P1Q2*P2Q1*P2Q2)+
&8*A2*P1P2*P1Q2*S/(3*P1Q1*P2Q1*P2Q2)+272*A1*A2*P2Q1*S/(3*P2Q2)-
&128*A1*MT**2*P2Q1*S/(3*P1Q1**2*P2Q2)-136*A1*P2Q1*S/(3*P1Q1*P2Q2)-
&128*A1*A2*MB*MT*P2Q1*S/(3*P1Q1*P2Q2)+
&128*A1*A2*MT**2*P2Q1*S/(3*P1Q1*P2Q2)
A18BIS=A18BIS+256*A1*A2*P1P2*P2Q1*S/(3*P1Q1*P2Q2)-
&16*A1*A2*P1P2*P2Q1*S/(3*P1Q2*P2Q2)+
&8*A1*P1P2*P2Q1*S/(3*P1Q1*P1Q2*P2Q2)+
&256*A1*A2*P1Q2*P2Q1*S/(3*P1Q1*P2Q2)-
&128*A12*MT**2*P2Q2*S/(3*P1Q1**2)-160*A12*P2Q2*S/(3*P1Q1)-
&32*A12*P2Q2*S/(3*P1Q2)+272*A1*A2*P2Q2*S/(3*P2Q1)-
&16*A1*A2*P1P2*P2Q2*S/(3*P1Q1*P2Q1)-
&128*A1*MT**2*P2Q2*S/(3*P1Q2**2*P2Q1)-136*A1*P2Q2*S/(3*P1Q2*P2Q1)-
&128*A1*A2*MB*MT*P2Q2*S/(3*P1Q2*P2Q1)+
&128*A1*A2*MT**2*P2Q2*S/(3*P1Q2*P2Q1)+
&256*A1*A2*P1P2*P2Q2*S/(3*P1Q2*P2Q1)+
&8*A1*P1P2*P2Q2*S/(3*P1Q1*P1Q2*P2Q1)+
&256*A1*A2*P1Q1*P2Q2*S/(3*P1Q2*P2Q1)-
&8*A12*MB*MT*S**2/(3*P1Q1*P1Q2)+16*A12*P1P2*S**2/(3*P1Q1*P1Q2)-
&8*A1*A2*P1P2*S**2/(3*P1Q1*P2Q1)+4*A1*P1P2*S**2/(3*P1Q1*P1Q2*P2Q1)-
&8*A1*A2*P1P2*S**2/(3*P1Q2*P2Q2)+4*A1*P1P2*S**2/(3*P1Q1*P1Q2*P2Q2)-
&8*A2**2*MB*MT*S**2/(3*P2Q1*P2Q2)+16*A2**2*P1P2*S**2/(3*P2Q1*P2Q2)
A18BIS=A18BIS-4*A2*P1P2*S**2/(3*P1Q1*P2Q1*P2Q2)-
&4*A2*P1P2*S**2/(3*P1Q2*P2Q1*P2Q2)+
&2*P1P2*S**2/(3*P1Q1*P1Q2*P2Q1*P2Q2)
C
V18=V18+V18BIS
A18=A18+A18BIS
V910 =-48*A12*MB*MT-48*A2**2*MB*MT-48*A12*P1P2-48*A2**2*P1P2-
&384*A12*MB*MT*P1Q1*P1Q2/S**2-384*A12*P1P2*P1Q1*P1Q2/S**2-
&384*A1*A2*MB*MT*P1Q2*P2Q1/S**2-384*A1*A2*P1P2*P1Q2*P2Q1/S**2+
&192*A12*P1Q1*P1Q2*P2Q1/S**2+192*A1*A2*P1Q1*P1Q2*P2Q1/S**2-
&192*A12*P1Q2**2*P2Q1/S**2-192*A1*A2*P1Q2**2*P2Q1/S**2+
&192*A1*A2*P1Q2*P2Q1**2/S**2+192*A2**2*P1Q2*P2Q1**2/S**2-
&384*A1*A2*MB*MT*P1Q1*P2Q2/S**2-384*A1*A2*P1P2*P1Q1*P2Q2/S**2-
&192*A12*P1Q1**2*P2Q2/S**2-192*A1*A2*P1Q1**2*P2Q2/S**2+
&192*A12*P1Q1*P1Q2*P2Q2/S**2+192*A1*A2*P1Q1*P1Q2*P2Q2/S**2-
&384*A2**2*MB*MT*P2Q1*P2Q2/S**2-384*A2**2*P1P2*P2Q1*P2Q2/S**2-
&192*A1*A2*P1Q1*P2Q1*P2Q2/S**2-192*A2**2*P1Q1*P2Q1*P2Q2/S**2-
&192*A1*A2*P1Q2*P2Q1*P2Q2/S**2-192*A2**2*P1Q2*P2Q1*P2Q2/S**2+
&192*A1*A2*P1Q1*P2Q2**2/S**2+192*A2**2*P1Q1*P2Q2**2/S**2+
&96*A12*MB*MT*P1Q1/S-96*A1*A2*MB*MT*P1Q1/S+
&96*A12*P1P2*P1Q1/S-96*A1*A2*P1P2*P1Q1/S+96*A12*MB*MT*P1Q2/S-
&96*A1*A2*MB*MT*P1Q2/S+96*A12*P1P2*P1Q2/S-96*A1*A2*P1P2*P1Q2/S+
&96*A1*A2*MB*MT*P2Q1/S-96*A2**2*MB*MT*P2Q1/S
V910=V910+96*A1*A2*P1P2*P2Q1/S-
&96*A2**2*P1P2*P2Q1/S+96*A12*P1Q2*P2Q1/S+
&192*A1*A2*P1Q2*P2Q1/S+96*A2**2*P1Q2*P2Q1/S+
&96*A1*A2*MB*MT*P2Q2/S-96*A2**2*MB*MT*P2Q2/S+
&96*A1*A2*P1P2*P2Q2/S-96*A2**2*P1P2*P2Q2/S+96*A12*P1Q1*P2Q2/S+
&192*A1*A2*P1Q1*P2Q2/S+96*A2**2*P1Q1*P2Q2/S
C
A910 = 48*A12*MB*MT+48*A2**2*MB*MT-48*A12*P1P2-48*A2**2*P1P2+
&384*A12*MB*MT*P1Q1*P1Q2/S**2-384*A12*P1P2*P1Q1*P1Q2/S**2+
&384*A1*A2*MB*MT*P1Q2*P2Q1/S**2-384*A1*A2*P1P2*P1Q2*P2Q1/S**2+
&192*A12*P1Q1*P1Q2*P2Q1/S**2+192*A1*A2*P1Q1*P1Q2*P2Q1/S**2-
&192*A12*P1Q2**2*P2Q1/S**2-192*A1*A2*P1Q2**2*P2Q1/S**2+
&192*A1*A2*P1Q2*P2Q1**2/S**2+192*A2**2*P1Q2*P2Q1**2/S**2+
&384*A1*A2*MB*MT*P1Q1*P2Q2/S**2-384*A1*A2*P1P2*P1Q1*P2Q2/S**2-
&192*A12*P1Q1**2*P2Q2/S**2-192*A1*A2*P1Q1**2*P2Q2/S**2+
&192*A12*P1Q1*P1Q2*P2Q2/S**2+192*A1*A2*P1Q1*P1Q2*P2Q2/S**2+
&384*A2**2*MB*MT*P2Q1*P2Q2/S**2-384*A2**2*P1P2*P2Q1*P2Q2/S**2-
&192*A1*A2*P1Q1*P2Q1*P2Q2/S**2-192*A2**2*P1Q1*P2Q1*P2Q2/S**2-
&192*A1*A2*P1Q2*P2Q1*P2Q2/S**2-192*A2**2*P1Q2*P2Q1*P2Q2/S**2+
&192*A1*A2*P1Q1*P2Q2**2/S**2+192*A2**2*P1Q1*P2Q2**2/S**2-
&96*A12*MB*MT*P1Q1/S+96*A1*A2*MB*MT*P1Q1/S+
&96*A12*P1P2*P1Q1/S-96*A1*A2*P1P2*P1Q1/S-96*A12*MB*MT*P1Q2/S+
&96*A1*A2*MB*MT*P1Q2/S+96*A12*P1P2*P1Q2/S-96*A1*A2*P1P2*P1Q2/S-
&96*A1*A2*MB*MT*P2Q1/S+96*A2**2*MB*MT*P2Q1/S
A910=A910+96*A1*A2*P1P2*P2Q1/S-
&96*A2**2*P1P2*P2Q1/S+96*A12*P1Q2*P2Q1/S+
&192*A1*A2*P1Q2*P2Q1/S+96*A2**2*P1Q2*P2Q1/S-
&96*A1*A2*MB*MT*P2Q2/S+96*A2**2*MB*MT*P2Q2/S+
&96*A1*A2*P1P2*P2Q2/S-96*A2**2*P1P2*P2Q2/S+96*A12*P1Q1*P2Q2/S+
&192*A1*A2*P1Q1*P2Q2/S+96*A2**2*P1Q1*P2Q2/S
C
C FINAL RESULT;
C
AMP2= FACT*PS*VTB**2*(V**2 *(V18 +V910)+A**2 *(A18+A910) )
END
|
Suppose $f$ is a holomorphic function on the open ball $B(\xi, r)$ and continuous on the closed ball $\overline{B}(\xi, r)$. If $f(\xi)$ is smaller than $f(z)$ for all $z$ on the boundary of $\overline{B}(\xi, r)$, then $f$ has a zero in $B(\xi, r)$. |
{-# LANGUAGE DeriveDataTypeable, DeriveGeneric, OverloadedStrings,
RecordWildCards #-}
-- |
-- Module : Statistics.Resampling.Bootstrap
-- Copyright : (c) 2009, 2011 Bryan O'Sullivan
-- License : BSD3
--
-- Maintainer : [email protected]
-- Stability : experimental
-- Portability : portable
--
-- The bootstrap method for statistical inference.
module Statistics.Resampling.Bootstrap
(
Estimate(..)
, bootstrapBCA
, scale
-- * References
-- $references
) where
import Control.Applicative ((<$>), (<*>))
import Control.DeepSeq (NFData)
import Control.Exception (assert)
import Control.Monad.Par (parMap, runPar)
import Data.Aeson (FromJSON, ToJSON)
import Data.Binary (Binary)
import Data.Binary (put, get)
import Data.Data (Data)
import Data.Typeable (Typeable)
import Data.Vector.Unboxed ((!))
import GHC.Generics (Generic)
import Statistics.Distribution (cumulative, quantile)
import Statistics.Distribution.Normal
import Statistics.Resampling (Resample(..), jackknife)
import Statistics.Sample (mean)
import Statistics.Types (Estimator, Sample)
import qualified Data.Vector.Unboxed as U
import qualified Statistics.Resampling as R
-- | A point and interval estimate computed via an 'Estimator'.
data Estimate = Estimate {
estPoint :: {-# UNPACK #-} !Double
-- ^ Point estimate.
, estLowerBound :: {-# UNPACK #-} !Double
-- ^ Lower bound of the estimate interval (i.e. the lower bound of
-- the confidence interval).
, estUpperBound :: {-# UNPACK #-} !Double
-- ^ Upper bound of the estimate interval (i.e. the upper bound of
-- the confidence interval).
, estConfidenceLevel :: {-# UNPACK #-} !Double
-- ^ Confidence level of the confidence intervals.
} deriving (Eq, Read, Show, Typeable, Data, Generic)
instance FromJSON Estimate
instance ToJSON Estimate
instance Binary Estimate where
put (Estimate w x y z) = put w >> put x >> put y >> put z
get = Estimate <$> get <*> get <*> get <*> get
instance NFData Estimate
-- | Multiply the point, lower bound, and upper bound in an 'Estimate'
-- by the given value.
scale :: Double -- ^ Value to multiply by.
-> Estimate -> Estimate
scale f e@Estimate{..} = e {
estPoint = f * estPoint
, estLowerBound = f * estLowerBound
, estUpperBound = f * estUpperBound
}
estimate :: Double -> Double -> Double -> Double -> Estimate
estimate pt lb ub cl =
assert (lb <= ub) .
assert (cl > 0 && cl < 1) $
Estimate { estPoint = pt
, estLowerBound = lb
, estUpperBound = ub
, estConfidenceLevel = cl
}
data T = {-# UNPACK #-} !Double :< {-# UNPACK #-} !Double
infixl 2 :<
-- | Bias-corrected accelerated (BCA) bootstrap. This adjusts for both
-- bias and skewness in the resampled distribution.
bootstrapBCA :: Double -- ^ Confidence level
-> Sample -- ^ Sample data
-> [Estimator] -- ^ Estimators
-> [Resample] -- ^ Resampled data
-> [Estimate]
bootstrapBCA confidenceLevel sample estimators resamples
| confidenceLevel > 0 && confidenceLevel < 1
= runPar $ parMap (uncurry e) (zip estimators resamples)
| otherwise = error "Statistics.Resampling.Bootstrap.bootstrapBCA: confidence level outside (0,1) range"
where
e est (Resample resample)
| U.length sample == 1 || isInfinite bias =
estimate pt pt pt confidenceLevel
| otherwise =
estimate pt (resample ! lo) (resample ! hi) confidenceLevel
where
pt = R.estimate est sample
lo = max (cumn a1) 0
where a1 = bias + b1 / (1 - accel * b1)
b1 = bias + z1
hi = min (cumn a2) (ni - 1)
where a2 = bias + b2 / (1 - accel * b2)
b2 = bias - z1
z1 = quantile standard ((1 - confidenceLevel) / 2)
cumn = round . (*n) . cumulative standard
bias = quantile standard (probN / n)
where probN = fromIntegral . U.length . U.filter (<pt) $ resample
ni = U.length resample
n = fromIntegral ni
accel = sumCubes / (6 * (sumSquares ** 1.5))
where (sumSquares :< sumCubes) = U.foldl' f (0 :< 0) jack
f (s :< c) j = s + d2 :< c + d2 * d
where d = jackMean - j
d2 = d * d
jackMean = mean jack
jack = jackknife est sample
-- $references
--
-- * Davison, A.C; Hinkley, D.V. (1997) Bootstrap methods and their
-- application. <http://statwww.epfl.ch/davison/BMA/>
|
(**
CoLoR, a Coq library on rewriting and termination.
See the COPYRIGHTS and LICENSE files.
- Frederic Blanqui, 2005-06-17
general results on pairs
*)
Set Implicit Arguments.
From CoLoR Require Import LogicUtil.
Section S.
Variables (A B : Type)
(eqdecA : forall x y : A, {x=y}+{~x=y})
(eqdecB : forall x y : B, {x=y}+{~x=y}).
Lemma eq_pair_dec : forall x y : A*B, {x=y}+{~x=y}.
Proof.
intros (x1,x2) (y1,y2). case (eqdecA x1 y1); intro.
subst y1. case (eqdecB x2 y2); intro. subst y2. auto.
right. unfold not. intro. injection H. intro. cong.
right. unfold not. intro. injection H. intros. cong.
Qed.
End S.
|
module Data.SnocVect.Elem
import Data.SnocVect
import Decidable.Equality
%default total
public export
data Elem : SnocVect k a -> a -> Type where
Here : Elem (sx :< x) x
There : Elem sx x -> Elem (sx :< y) x
export
{sx : SnocVect 0 a} -> Uninhabited (Elem sx x) where
uninhabited Here impossible
uninhabited (There x) impossible
neitherHereNorThere : DecEq a => {0 x,y : a} ->
Not (x = y) ->
Not (Elem sy x) ->
Not (Elem (sy :< y) x)
neitherHereNorThere Refl _ Here impossible
neitherHereNorThere _ g (There z) = g z
export
isElem : DecEq a => (x : a) -> (sx : SnocVect k a) -> Dec (Elem sx x)
isElem x [<] = No absurd
isElem x (sx :< y) with (decEq x y)
isElem y (sx :< y) | (Yes Refl) = Yes Here
isElem x (sx :< y) | (No contra) with (isElem x sx)
isElem x (sx :< y) | (No _) | (Yes prf) = Yes (There prf)
isElem x (sx :< y) | (No yNotX) | (No xNotInSx) =
No (neitherHereNorThere yNotX xNotInSx)
|
State Before: R : Type u_1
inst✝ : Ring R
⊢ ↑μ * ↑ζ = 1 State After: no goals Tactic: rw [← coe_coe, ← intCoe_mul, moebius_mul_coe_zeta, intCoe_one] |
[STATEMENT]
lemma nyinitcls_emptyD: "\<lbrakk>nyinitcls G s = {}; is_class G C\<rbrakk> \<Longrightarrow> initd C s"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>nyinitcls G s = {}; is_class G C\<rbrakk> \<Longrightarrow> initd C s
[PROOF STEP]
unfolding nyinitcls_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>{C. is_class G C \<and> \<not> initd C s} = {}; is_class G C\<rbrakk> \<Longrightarrow> initd C s
[PROOF STEP]
by fast |
(* Title: HOL/MicroJava/DFA/Product.thy
Author: Tobias Nipkow
Copyright 2000 TUM
*)
section \<open>Products as Semilattices\<close>
theory Product
imports Err
begin
definition le :: "'a ord \<Rightarrow> 'b ord \<Rightarrow> ('a * 'b) ord" where
"le rA rB == %(a,b) (a',b'). a <=_rA a' & b <=_rB b'"
definition sup :: "'a ebinop \<Rightarrow> 'b ebinop \<Rightarrow> ('a * 'b)ebinop" where
"sup f g == %(a1,b1)(a2,b2). Err.sup Pair (a1 +_f a2) (b1 +_g b2)"
definition esl :: "'a esl \<Rightarrow> 'b esl \<Rightarrow> ('a * 'b ) esl" where
"esl == %(A,rA,fA) (B,rB,fB). (A \<times> B, le rA rB, sup fA fB)"
abbreviation
lesubprod_sntax :: "'a * 'b \<Rightarrow> 'a ord \<Rightarrow> 'b ord \<Rightarrow> 'a * 'b \<Rightarrow> bool"
("(_ /<='(_,_') _)" [50, 0, 0, 51] 50)
where "p <=(rA,rB) q == p <=_(le rA rB) q"
lemma unfold_lesub_prod:
"p <=(rA,rB) q == le rA rB p q"
by (simp add: lesub_def)
lemma le_prod_Pair_conv [iff]:
"((a1,b1) <=(rA,rB) (a2,b2)) = (a1 <=_rA a2 & b1 <=_rB b2)"
by (simp add: lesub_def le_def)
lemma less_prod_Pair_conv:
"((a1,b1) <_(Product.le rA rB) (a2,b2)) =
(a1 <_rA a2 & b1 <=_rB b2 | a1 <=_rA a2 & b1 <_rB b2)"
apply (unfold lesssub_def)
apply simp
apply blast
done
lemma order_le_prod [iff]:
"order(Product.le rA rB) = (order rA & order rB)"
apply (unfold Semilat.order_def)
apply simp
apply meson
done
lemma acc_le_prodI [intro!]:
"\<lbrakk> acc r\<^sub>A; acc r\<^sub>B \<rbrakk> \<Longrightarrow> acc(Product.le r\<^sub>A r\<^sub>B)"
apply (unfold acc_def)
apply (rule wf_subset)
apply (erule wf_lex_prod)
apply assumption
apply (auto simp add: lesssub_def less_prod_Pair_conv lex_prod_def)
done
lemma unfold_plussub_lift2:
"e1 +_(lift2 f) e2 == lift2 f e1 e2"
by (simp add: plussub_def)
lemma plus_eq_Err_conv [simp]:
assumes "x:A" and "y:A"
and "semilat(err A, Err.le r, lift2 f)"
shows "(x +_f y = Err) = (~(? z:A. x <=_r z & y <=_r z))"
proof -
have plus_le_conv2:
"\<And>r f z. \<lbrakk> z : err A; semilat (err A, r, f); OK x : err A; OK y : err A;
OK x +_f OK y <=_r z\<rbrakk> \<Longrightarrow> OK x <=_r z \<and> OK y <=_r z"
by (rule Semilat.plus_le_conv [OF Semilat.intro, THEN iffD1])
from assms show ?thesis
apply (rule_tac iffI)
apply clarify
apply (drule OK_le_err_OK [THEN iffD2])
apply (drule OK_le_err_OK [THEN iffD2])
apply (drule Semilat.lub [OF Semilat.intro, of _ _ _ "OK x" _ "OK y"])
apply assumption
apply assumption
apply simp
apply simp
apply simp
apply simp
apply (case_tac "x +_f y")
apply assumption
apply (rename_tac "z")
apply (subgoal_tac "OK z: err A")
apply (frule plus_le_conv2)
apply assumption
apply simp
apply blast
apply simp
apply (blast dest: Semilat.orderI [OF Semilat.intro] order_refl)
apply blast
apply (erule subst)
apply (unfold semilat_def err_def closed_def)
apply simp
done
qed
lemma err_semilat_Product_esl:
"\<And>L1 L2. \<lbrakk> err_semilat L1; err_semilat L2 \<rbrakk> \<Longrightarrow> err_semilat(Product.esl L1 L2)"
apply (unfold esl_def Err.sl_def)
apply (simp (no_asm_simp) only: split_tupled_all)
apply simp
apply (simp (no_asm) only: semilat_Def)
apply (simp (no_asm_simp) only: Semilat.closedI [OF Semilat.intro] closed_lift2_sup)
apply (simp (no_asm) only: unfold_lesub_err Err.le_def unfold_plussub_lift2 sup_def)
apply (auto elim: semilat_le_err_OK1 semilat_le_err_OK2
simp add: lift2_def split: err.split)
apply (blast dest: Semilat.orderI [OF Semilat.intro])
apply (blast dest: Semilat.orderI [OF Semilat.intro])
apply (rule OK_le_err_OK [THEN iffD1])
apply (erule subst, subst OK_lift2_OK [symmetric], rule Semilat.lub [OF Semilat.intro])
apply simp
apply simp
apply simp
apply simp
apply simp
apply simp
apply (rule OK_le_err_OK [THEN iffD1])
apply (erule subst, subst OK_lift2_OK [symmetric], rule Semilat.lub [OF Semilat.intro])
apply simp
apply simp
apply simp
apply simp
apply simp
apply simp
done
end
|
/-
Copyright (c) 2018 Andreas Swerdlow. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Andreas Swerdlow
-/
import ring_theory.subring
variables {F : Type*} [field F] (S : set F)
class is_subfield extends is_subring S : Prop :=
(inv_mem : ∀ {x : F}, x ≠ 0 → x ∈ S → x⁻¹ ∈ S)
namespace field
def closure : set F :=
{ x | ∃ y ∈ ring.closure S, ∃ z ∈ ring.closure S, z ≠ 0 ∧ y / z = x }
variables {S}
theorem ring_closure_subset : ring.closure S ⊆ closure S :=
λ x hx, ⟨x, hx, 1, is_submonoid.one_mem _, one_ne_zero, div_one x⟩
instance closure.is_submonoid : is_submonoid (closure S) :=
{ mul_mem := by rintros _ _ ⟨p, hp, q, hq, hq0, rfl⟩ ⟨r, hr, s, hs, hs0, rfl⟩;
exact ⟨p * r, is_submonoid.mul_mem hp hr, q * s, is_submonoid.mul_mem hq hs, mul_ne_zero hq0 hs0, (div_mul_div _ _ hq0 hs0).symm⟩,
one_mem := ring_closure_subset $ is_submonoid.one_mem _ }
instance closure.is_subfield : is_subfield (closure S) :=
{ add_mem := begin
rintros _ _ ⟨p, hp, q, hq, hq0, rfl⟩ ⟨r, hr, s, hs, hs0, rfl⟩,
exact ⟨p * s + q * r, is_add_submonoid.add_mem (is_submonoid.mul_mem hp hs) (is_submonoid.mul_mem hq hr),
q * s, is_submonoid.mul_mem hq hs, mul_ne_zero hq0 hs0, (div_add_div p r hq0 hs0).symm⟩
end,
zero_mem := ring_closure_subset $ is_add_submonoid.zero_mem _,
neg_mem := begin
rintros _ ⟨p, hp, q, hq, hq0, rfl⟩,
exact ⟨-p, is_add_subgroup.neg_mem hp, q, hq, hq0, neg_div q p⟩
end,
inv_mem := begin
rintros _ hp0 ⟨p, hp, q, hq, hq0, rfl⟩,
exact ⟨q, hq, p, hp, (div_ne_zero_iff hq0).1 hp0, (inv_div ((div_ne_zero_iff hq0).1 hp0) hq0).symm⟩
end }
theorem mem_closure {a : F} (ha : a ∈ S) : a ∈ closure S :=
ring_closure_subset $ ring.mem_closure ha
theorem subset_closure : S ⊆ closure S :=
λ _, mem_closure
theorem closure_subset {T : set F} [is_subfield T] (H : S ⊆ T) : closure S ⊆ T :=
by rintros _ ⟨p, hp, q, hq, hq0, rfl⟩; exact is_submonoid.mul_mem (ring.closure_subset H hp)
(is_subfield.inv_mem hq0 $ ring.closure_subset H hq)
theorem closure_subset_iff (s t : set F) [is_subfield t] : closure s ⊆ t ↔ s ⊆ t :=
⟨set.subset.trans subset_closure, closure_subset⟩
theorem closure_mono {s t : set F} (H : s ⊆ t) : closure s ⊆ closure t :=
closure_subset $ set.subset.trans H subset_closure
end field
|
[STATEMENT]
lemma weight_spmf_conv_pmf_None: "weight_spmf p = 1 - pmf p None"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. weight_spmf p = 1 - pmf p None
[PROOF STEP]
by(simp add: pmf_None_eq_weight_spmf) |
subroutine fprime( ei, emxs ,ne1, ne3, ne, ik0, xsec, xsnorm,chia,
1 vrcorr, vicorr, cchi)
c calculate f' including solid state and lifetime effects.
c using algorithm in Ankudinov, Rehr danes paper.
c the output correction is returned via cchi. The rest is input
c mu(omega) = xsec + xsnorm*chia + (cchi)
implicit double precision (a-h, o-z)
include '../HEADERS/const.h'
include '../HEADERS/dim.h'
dimension xsnorm(nex), omega(nex)
complex*16 emxs(nex), xsec(nex), chia(nex), cchi(nex)
complex*16 xmu(nex), aa, bb, temp
c complex*16 c1, ec, x1, x2
complex*16 xmup(nex)
dimension emp(nex)
parameter (eps4 = 1.0d-4)
complex*16 lorenz, funlog, value
external lorenz, funlog
dimension dout(7,nex)
c character*72 string
c dimension oscstr(14),enosc(14)
integer ient
data ient /0/
c$$$c read data from fpf0.dat
c$$$ open (unit=16, file='fpf0.dat', status='old', iostat=ios)
c$$$ read (16,*) string
c$$$ read (16,*) eatom
c$$$ read (16,*) nosc
c$$$ do 5 i=1, nosc
c$$$ read (16,*) oscstr(i), enosc(i)
c$$$ 5 continue
c$$$c the rest is f0(Q) and is not currently needed
c$$$ close (unit=16)
c$$$ call json_read_fpf0(nosc, oscstr, enosc)
ient = ient+1
ifp = 1
efermi = dble(emxs(ne1+1))
xloss = dimag(emxs(1))
ne2 = ne-ne1-ne3
if (ne2.gt.0) then
c DANES
do 10 ie = 1,ne1
10 xmu(ie) = coni*xsnorm(ie) + xsnorm(ie)*chia(ie)
do 11 ie = ne1+1,ne1+ne2
11 xmu (ie) = xsnorm(ie)*chia(ie)
do 12 ie = ne-ne3+1, ne
12 xmu (ie) = coni*xsnorm(ie)
else
c FPRIME
do 13 ie = 1,ne
13 xmu (ie) = xsec(ie) + xsnorm(ie)*chia(ie)
endif
if (abs(vrcorr).gt.eps4) then
bb = xmu(ik0)
efermi = efermi - vrcorr
do 20 ie = 1,ne1
20 omega(ie) = dble(emxs(ie))
call terpc(omega, xmu ,ne1, 1, efermi, bb)
do 30 ie = 1, ne2
30 emxs(ne1+ie) = emxs(ne1+ie) - vrcorr
if (abs(xmu(ik0)).gt. eps4) bb = bb/xmu(ik0)
c rescale values on vertical axis
do 60 ie = ne1+1, ne-ne3
60 xmu(ie) = xmu (ie) * bb
endif
if (vicorr.gt.eps4) then
xloss = xloss + vicorr
do 40 ie=1,ne2
40 omega(ie) = dimag(emxs(ne1+ie))
call terpc(omega, xmu(ne1+1) ,ne2, 1, xloss, aa)
do 50 ie = 1, ne1
xx = vicorr**2 /(vicorr**2 + (dble(emxs(ie))-efermi)**2)
xmu(ie) = xmu(ie)*(1.0d0 - xx) + aa * xx
emxs(ie) = emxs(ie) + coni*vicorr
50 continue
endif
do 200 ie = 1, ne1
c cycle over energy points on horizontal grid
dout(1,ie) = dble(emxs(ie)) * hart
dele = dble(emxs(ie)) - efermi
c delp correspond to pole with negative frequency
c see Sakurai for details
delp = -dele - 2*ei
c delp = dele
c dele = delp
cchi(ie) = 0
if (ne2.gt.0) then
if (abs(dele).lt.eps4) dele = 0.0d0
w1 = dimag(emxs(ne1+1))
w2 = dimag(emxs(ne1+2))
w3 = dimag(emxs(ne1+3))
c matsubara pole
temp = lorenz(xloss,w1,dele)*xmu(ne1+1)*2*coni*w1
temp = temp + lorenz(xloss,w1,delp)*xmu(ne1+1)*2*coni*w1
dout(2,ie)=dble(temp)
c sommerfeld correction
temp = coni*w1**2/ 6*(lorenz(xloss,w3,dele)*xmu(ne1+3)-
2 lorenz(xloss,w2,dele)*xmu(ne1+2)) / (w3-w2)
dout(3,ie)=dble(temp)
cchi(ie) = lorenz(xloss,w1,dele)*xmu(ne1+1) *2*coni*w1
1 + coni * w1**2 / 6 * (lorenz(xloss,w3,dele)*xmu(ne1+3)-
2 lorenz(xloss,w2,dele)*xmu(ne1+2)) / (w3-w2)
c from negative pole has additional minus sign
cchi(ie) = cchi(ie) +
1 lorenz(xloss,w1,delp)*xmu(ne1+1) *2*coni*w1
1 + coni * w1**2 / 6 * (lorenz(xloss,w3,delp)*xmu(ne1+3)-
2 lorenz(xloss,w2,delp)*xmu(ne1+2)) / (w3-w2)
c theta funcion contribution only for positive pole
if (dele .lt. eps4) cchi(ie) = cchi(ie) - xmu(ie)
if (abs(dele).lt.eps4) cchi(ie) = cchi(ie) + xmu(ie)/2
c anomalous contribution
temp = 0
wp = 2*ei
if (dele.ge.eps4) temp = xmu(ie)
if (abs(dele).lt.eps4) temp = xmu(ie)/2
temp = temp + xmu(ik0)* funlog(1,xloss,wp,dele)
c xmu(iko) + xsec(ik0) if n3 >0
dout(4,ie)=dble(temp)
c integration over vertical axis to final point
n1 = ne1+2
n2 = ne-ne3
call fpint (emxs, xmu, n1, n2, dele, xloss, eps4, efermi,
1 value)
cchi(ie) = cchi(ie) + value
c add contribution from other pole
call fpint (emxs, xmu, n1, n2, delp, xloss, eps4, efermi,
1 value)
cchi(ie) = cchi(ie) + value
endif
c integration over horizontal axis to final point
temp = 0
if (ne2.gt.0) then
c DANES
n1 = ne1-ik0 + 1
do 120 i = ik0, ne1
emp(i-ik0+1) = dble(emxs(i))
xmup(i-ik0+1) = coni*xsnorm(i)
120 continue
do 130 i = 1, ne3
emp(i+n1) = dble(emxs(i+ne-ne3))
xmup(i+n1) = xmu(i+ne-ne3)
130 continue
n2 = n1 + ne3
else
c FPRIME
n1 = 0
do 140 i = 1, ne1
if (n1.eq.0 .and. dble(emxs(i)).gt. dble(emxs(ne1+1)))
1 n1 = i
140 continue
do 150 i = 1, ne3
emp(i) = dble(emxs(ne1+i))
xmup(i) = xmu(ne1+i)
150 continue
n2 = ne3
endif
call fpintp (emp, xmup , n2, dele, xloss, efermi, value)
temp = temp + value
c add contribution from other pole
call fpintp (emp, xmup , n2, delp, xloss, efermi, value)
temp = temp + value
c was used before
cc contribution to fp from poles of the core states
c temp=0
c do 110 i=2, nosc
cc eif = E_f- E_i in hartrees
cc eif = enosc(i)-enosc(1)
cc deltaf = deltaf - oscstr(i)*2*alpinv**2/eif
c temp = temp + alpinv**2 * oscstr(i)* (dele -
c 1 enosc(i)+efermi-1)/ ((dele-enosc(i)+efermi-1)**2+xloss**2)
c temp = temp + alpinv**2 * oscstr(i)* (delp -
c 1 enosc(i)+efermi-1)/ ((delp-enosc(i)+efermi-1)**2+xloss**2)
c 110 continue
dout(5,ie) = dble(temp)
cchi(ie) = cchi(ie) + temp
c total contribution (not normalized)
temp = xmu(ie) + cchi(ie)
dout(6,ie) = dble(temp)
c (integral w2 to wmax) minus (cusp formula)
dout (7,ie) = dout(6,ie)-dout(4,ie)
200 continue
c restore the input energy mesh
if (vicorr.gt.eps4) then
do 250 ie = 1, ne1
250 emxs(ie) = emxs(ie) - coni*vicorr
endif
if (abs(vrcorr).gt.eps4) then
do 260 ie = 1, ne2
260 emxs(ne1+ie) = emxs(ne1+ie) + vrcorr
endif
c if (ient.eq.1) then
open(unit=3,file='danes.dat', status='unknown', iostat=ios)
write(3,310) '# E matsub. sommerf. anomal. tale, total, differ.'
310 format (a)
do 300 ie = 1, ne1
write(3,320) (dout(i,ie), i=1,7)
320 format ( 7(1x,1pe11.4))
300 continue
close(unit=3)
c endif
return
end
complex*16 function funlog (icase, xloss, w, dele)
c anomalous fp should have all main features of total fp
c except smooth difference
c analytic expression for anomalous fp (without integral)
c is obtained by adding and subtracting G(Ef + i*Gamma) / E-w
c and performing integral for Im axis analytically
c icase = 1 simplified expression (compared to 2)
c icase=2 use real w
c icase=3 pure imaginary w (absolute value is input)
implicit double precision (a-h, o-z)
include '../HEADERS/const.h'
parameter (eps4 = 1.0d-4)
if (icase.eq.1) then
if (abs(dele).ge.eps4) then
funlog= coni/2/pi*
1 (log((-xloss+coni*dele)/w)+ log((xloss+coni*dele)/w))
else
funlog= coni/pi*log(abs(xloss/w))
endif
elseif (icase.eq.2) then
if (abs(dele).ge.eps4) then
funlog= coni/2/pi* (w+coni*xloss) * (
1 ( log((-xloss+coni*dele)/w)) / (w+dele+coni*xloss) +
2 ( log(( xloss+coni*dele)/w)) / (w+dele-coni*xloss))
else
funlog= coni/pi*(log(abs(xloss/w)))*
1 (1 + coni*xloss/(w-coni*xloss))
endif
elseif (icase.eq.3) then
if (abs(dele).ge.eps4) then
funlog= -(w+xloss)/2/pi* (
1 log((-xloss+coni*dele)/w) / (dele+coni*(w+xloss)) +
2 log(( xloss+coni*dele)/w) / (dele+coni*(w-xloss)) )
else
funlog= coni/pi* log(abs(xloss/w))*
1 (1 + xloss/(w-xloss))
endif
endif
return
end
subroutine fpint (emxs, xmu, n1, n2, dele, xloss, eps4, efermi,
1 value)
c performs integral for fp calculations between points n1 and n2.
implicit double precision (a-h, o-z)
include '../HEADERS/const.h'
include '../HEADERS/dim.h'
complex*16 emxs(nex), xmu(nex), value
complex*16 z1, z2, aa, bb, c1
c last interval - similar to Matsubara pole ( shift and - sign)
c notice that this also works for horizontal axis if last value
c is small
z1 = emxs(n2)-efermi
z2 = emxs(n2-1)-efermi
value = - coni/pi * (z1-dele) / (xloss**2+(z1-dele)**2)
1 *xmu(n2) * (2 * (z1-z2))
c all other intervals
do 300 i = n1, n2-2
z1 = emxs(i) - efermi
z2 = emxs(i+1) - efermi
bb=(xmu(i+1)*(z2-dele) - xmu(i)*(z1-dele)) / xloss / (z2-z1)
aa = xmu(i)*(z1-dele)/xloss - bb * z1
c1 = (aa+bb*(dele+coni*xloss )) / 2 /coni
if (abs(dele-dble(z1)).lt.eps4 .and.
1 abs(dele-dble(z2)).lt.eps4) then
value = value - coni/pi *c1*
1 log( abs((z2-dele-coni*xloss)/(z1-dele-coni*xloss)) )
else
value = value - coni/pi *c1*
1 log((z2-dele-coni*xloss)/(z1-dele-coni*xloss))
endif
c1 = -(aa+bb*(dele-coni*xloss )) / 2 /coni
value = value - coni/pi *c1*
1 log((z2-dele+coni*xloss)/(z1-dele+coni*xloss))
300 continue
return
end
subroutine fpintp (em, xmu, n2, dele, xloss, efermi, value)
c performs integral for fp calculations between points 1 and n2.
c and adds tail to infinity
implicit double precision (a-h, o-z)
include '../HEADERS/const.h'
include '../HEADERS/dim.h'
dimension em(nex)
complex*16 xmu(nex), value
complex*16 z1, z2, aa, bb, cc
value = 0
c all intervals
do 300 i = 1, n2-1
x1 = em(i) - efermi
x2 = em(i+1) - efermi
de = (x2-x1)/2
x0 = (em(i) + em(i+1)) / 2
call terpc(em, xmu, n2, 3, x0, aa)
bb=(xmu(i+1) - xmu(i)) / (x2-x1)
cc = (xmu(i+1) - aa - bb * de) / de**2
z1 = dele - x0 + efermi - coni*xloss
z2 = dele - x0 + efermi + coni*xloss
value = value + 2*de*bb + 2*z1*de*cc +
1 log((de-z1)/(-de-z1)) * (aa+bb*z1+cc*z1**2)
value = value + 2*de*bb + 2*z2*de*cc +
1 log((de-z2)/(-de-z2)) * (aa+bb*z2+cc*z2**2)
300 continue
c tail of xmu to infinity approximated by aa/(w-bb)**2
x1 = em(n2-1)
x2 = em(n2)
a = sqrt ( dble(xmu(n2-1)/xmu(n2)) )
b = ( a*x1 - x2) / (a-1)
if (b.gt. x1) b = 0
aa = xmu(n2) * (x2-b)**2
z1 = dele -coni*xloss - b
z2 = dele +coni*xloss - b
x0 = x2 - b
value = value + log( x0/(x0-z1) ) *aa/z1**2 - aa/z1/x0
value = value + log( x0/(x0-z2) ) *aa/z2**2 - aa/z2/x0
c multiply by constant factor
value = - coni /2 /pi *value
return
end
|
/-
Copyright (c) 2017 Johannes Hölzl. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Johannes Hölzl, Jeremy Avigad, Yury Kudryashov
-/
import order.filter.cofinite
import order.zorn_atoms
/-!
# Ultrafilters
> THIS FILE IS SYNCHRONIZED WITH MATHLIB4.
> Any changes to this file require a corresponding PR to mathlib4.
An ultrafilter is a minimal (maximal in the set order) proper filter.
In this file we define
* `ultrafilter.of`: an ultrafilter that is less than or equal to a given filter;
* `ultrafilter`: subtype of ultrafilters;
* `ultrafilter.pure`: `pure x` as an `ultrafiler`;
* `ultrafilter.map`, `ultrafilter.bind`, `ultrafilter.comap` : operations on ultrafilters;
* `hyperfilter`: the ultrafilter extending the cofinite filter.
-/
universes u v
variables {α : Type u} {β : Type v} {γ : Type*}
open set filter function
open_locale classical filter
/-- `filter α` is an atomic type: for every filter there exists an ultrafilter that is less than or
equal to this filter. -/
instance : is_atomic (filter α) :=
is_atomic.of_is_chain_bounded $ λ c hc hne hb,
⟨Inf c, (Inf_ne_bot_of_directed' hne (show is_chain (≥) c, from hc.symm).directed_on hb).ne,
λ x hx, Inf_le hx⟩
/-- An ultrafilter is a minimal (maximal in the set order) proper filter. -/
@[protect_proj]
structure ultrafilter (α : Type*) extends filter α :=
(ne_bot' : ne_bot to_filter)
(le_of_le : ∀ g, filter.ne_bot g → g ≤ to_filter → to_filter ≤ g)
namespace ultrafilter
variables {f g : ultrafilter α} {s t : set α} {p q : α → Prop}
instance : has_coe_t (ultrafilter α) (filter α) := ⟨ultrafilter.to_filter⟩
instance : has_mem (set α) (ultrafilter α) := ⟨λ s f, s ∈ (f : filter α)⟩
lemma unique (f : ultrafilter α) {g : filter α} (h : g ≤ f)
(hne : ne_bot g . tactic.apply_instance) : g = f :=
le_antisymm h $ f.le_of_le g hne h
instance ne_bot (f : ultrafilter α) : ne_bot (f : filter α) := f.ne_bot'
protected lemma is_atom (f : ultrafilter α) : is_atom (f : filter α) :=
⟨f.ne_bot.ne, λ g hgf, by_contra $ λ hg, hgf.ne $ f.unique hgf.le ⟨hg⟩⟩
@[simp, norm_cast] lemma mem_coe : s ∈ (f : filter α) ↔ s ∈ f := iff.rfl
lemma coe_injective : injective (coe : ultrafilter α → filter α)
| ⟨f, h₁, h₂⟩ ⟨g, h₃, h₄⟩ rfl := by congr
lemma eq_of_le {f g : ultrafilter α} (h : (f : filter α) ≤ g) : f = g :=
coe_injective (g.unique h)
@[simp, norm_cast] lemma coe_le_coe {f g : ultrafilter α} : (f : filter α) ≤ g ↔ f = g :=
⟨λ h, eq_of_le h, λ h, h ▸ le_rfl⟩
@[simp, norm_cast] lemma coe_inj : (f : filter α) = g ↔ f = g := coe_injective.eq_iff
@[ext] lemma ext ⦃f g : ultrafilter α⦄ (h : ∀ s, s ∈ f ↔ s ∈ g) : f = g :=
coe_injective $ filter.ext h
lemma le_of_inf_ne_bot (f : ultrafilter α) {g : filter α} (hg : ne_bot (↑f ⊓ g)) : ↑f ≤ g :=
le_of_inf_eq (f.unique inf_le_left hg)
lemma le_of_inf_ne_bot' (f : ultrafilter α) {g : filter α} (hg : ne_bot (g ⊓ f)) : ↑f ≤ g :=
f.le_of_inf_ne_bot $ by rwa inf_comm
lemma inf_ne_bot_iff {f : ultrafilter α} {g : filter α} : ne_bot (↑f ⊓ g) ↔ ↑f ≤ g :=
⟨le_of_inf_ne_bot f, λ h, (inf_of_le_left h).symm ▸ f.ne_bot⟩
lemma disjoint_iff_not_le {f : ultrafilter α} {g : filter α} : disjoint ↑f g ↔ ¬↑f ≤ g :=
by rw [← inf_ne_bot_iff, ne_bot_iff, ne.def, not_not, disjoint_iff]
@[simp] lemma compl_not_mem_iff : sᶜ ∉ f ↔ s ∈ f :=
⟨λ hsc, le_principal_iff.1 $ f.le_of_inf_ne_bot
⟨λ h, hsc $ mem_of_eq_bot$ by rwa compl_compl⟩, compl_not_mem⟩
@[simp] lemma frequently_iff_eventually : (∃ᶠ x in f, p x) ↔ ∀ᶠ x in f, p x :=
compl_not_mem_iff
alias frequently_iff_eventually ↔ _root_.filter.frequently.eventually _
lemma compl_mem_iff_not_mem : sᶜ ∈ f ↔ s ∉ f := by rw [← compl_not_mem_iff, compl_compl]
lemma diff_mem_iff (f : ultrafilter α) : s \ t ∈ f ↔ s ∈ f ∧ t ∉ f :=
inter_mem_iff.trans $ and_congr iff.rfl compl_mem_iff_not_mem
/-- If `sᶜ ∉ f ↔ s ∈ f`, then `f` is an ultrafilter. The other implication is given by
`ultrafilter.compl_not_mem_iff`. -/
def of_compl_not_mem_iff (f : filter α) (h : ∀ s, sᶜ ∉ f ↔ s ∈ f) : ultrafilter α :=
{ to_filter := f,
ne_bot' := ⟨λ hf, by simpa [hf] using h⟩,
le_of_le := λ g hg hgf s hs, (h s).1 $ λ hsc, by exactI compl_not_mem hs (hgf hsc) }
/-- If `f : filter α` is an atom, then it is an ultrafilter. -/
def of_atom (f : filter α) (hf : is_atom f) : ultrafilter α :=
{ to_filter := f,
ne_bot' := ⟨hf.1⟩,
le_of_le := λ g hg, (_root_.is_atom_iff.1 hf).2 g hg.ne }
lemma nonempty_of_mem (hs : s ∈ f) : s.nonempty := nonempty_of_mem hs
lemma ne_empty_of_mem (hs : s ∈ f) : s ≠ ∅ := (nonempty_of_mem hs).ne_empty
@[simp] lemma empty_not_mem : ∅ ∉ f := empty_not_mem f
@[simp] lemma le_sup_iff {u : ultrafilter α} {f g : filter α} : ↑u ≤ f ⊔ g ↔ ↑u ≤ f ∨ ↑u ≤ g :=
not_iff_not.1 $ by simp only [← disjoint_iff_not_le, not_or_distrib, disjoint_sup_right]
@[simp] lemma union_mem_iff : s ∪ t ∈ f ↔ s ∈ f ∨ t ∈ f :=
by simp only [← mem_coe, ← le_principal_iff, ← sup_principal, le_sup_iff]
lemma mem_or_compl_mem (f : ultrafilter α) (s : set α) : s ∈ f ∨ sᶜ ∈ f :=
or_iff_not_imp_left.2 compl_mem_iff_not_mem.2
protected lemma em (f : ultrafilter α) (p : α → Prop) :
(∀ᶠ x in f, p x) ∨ ∀ᶠ x in f, ¬p x :=
f.mem_or_compl_mem {x | p x}
lemma eventually_or : (∀ᶠ x in f, p x ∨ q x) ↔ (∀ᶠ x in f, p x) ∨ ∀ᶠ x in f, q x :=
union_mem_iff
lemma eventually_not : (∀ᶠ x in f, ¬p x) ↔ ¬∀ᶠ x in f, p x := compl_mem_iff_not_mem
lemma eventually_imp : (∀ᶠ x in f, p x → q x) ↔ (∀ᶠ x in f, p x) → ∀ᶠ x in f, q x :=
by simp only [imp_iff_not_or, eventually_or, eventually_not]
lemma finite_sUnion_mem_iff {s : set (set α)} (hs : s.finite) : ⋃₀ s ∈ f ↔ ∃t∈s, t ∈ f :=
finite.induction_on hs (by simp) $ λ a s ha hs his,
by simp [union_mem_iff, his, or_and_distrib_right, exists_or_distrib]
lemma finite_bUnion_mem_iff {is : set β} {s : β → set α} (his : is.finite) :
(⋃i∈is, s i) ∈ f ↔ ∃i∈is, s i ∈ f :=
by simp only [← sUnion_image, finite_sUnion_mem_iff (his.image s), bex_image_iff]
/-- Pushforward for ultrafilters. -/
def map (m : α → β) (f : ultrafilter α) : ultrafilter β :=
of_compl_not_mem_iff (map m f) $ λ s, @compl_not_mem_iff _ f (m ⁻¹' s)
@[simp, norm_cast] lemma coe_map (m : α → β) (f : ultrafilter α) :
(map m f : filter β) = filter.map m ↑f := rfl
@[simp] lemma mem_map {m : α → β} {f : ultrafilter α} {s : set β} :
s ∈ map m f ↔ m ⁻¹' s ∈ f := iff.rfl
@[simp] lemma map_id (f : ultrafilter α) : f.map id = f := coe_injective map_id
@[simp] lemma map_id' (f : ultrafilter α) : f.map (λ x, x) = f := map_id _
@[simp] lemma map_map (f : ultrafilter α) (m : α → β) (n : β → γ) :
(f.map m).map n = f.map (n ∘ m) :=
coe_injective map_map
/-- The pullback of an ultrafilter along an injection whose range is large with respect to the given
ultrafilter. -/
def comap {m : α → β} (u : ultrafilter β) (inj : injective m)
(large : set.range m ∈ u) : ultrafilter α :=
{ to_filter := comap m u,
ne_bot' := u.ne_bot'.comap_of_range_mem large,
le_of_le := λ g hg hgu, by { resetI,
simp only [← u.unique (map_le_iff_le_comap.2 hgu), comap_map inj, le_rfl] } }
@[simp] lemma mem_comap {m : α → β} (u : ultrafilter β) (inj : injective m)
(large : set.range m ∈ u) {s : set α} :
s ∈ u.comap inj large ↔ m '' s ∈ u :=
mem_comap_iff inj large
@[simp, norm_cast] lemma coe_comap {m : α → β} (u : ultrafilter β) (inj : injective m)
(large : set.range m ∈ u) : (u.comap inj large : filter α) = filter.comap m u := rfl
@[simp] lemma comap_id (f : ultrafilter α) (h₀ : injective (id : α → α) := injective_id)
(h₁ : range id ∈ f := by { rw range_id, exact univ_mem}) :
f.comap h₀ h₁ = f :=
coe_injective comap_id
@[simp] lemma comap_comap (f : ultrafilter γ) {m : α → β} {n : β → γ} (inj₀ : injective n)
(large₀ : range n ∈ f) (inj₁ : injective m) (large₁ : range m ∈ f.comap inj₀ large₀)
(inj₂ : injective (n ∘ m) := inj₀.comp inj₁)
(large₂ : range (n ∘ m) ∈ f := by { rw range_comp, exact image_mem_of_mem_comap large₀ large₁ }) :
(f.comap inj₀ large₀).comap inj₁ large₁ = f.comap inj₂ large₂ :=
coe_injective comap_comap
/-- The principal ultrafilter associated to a point `x`. -/
instance : has_pure ultrafilter :=
⟨λ α a, of_compl_not_mem_iff (pure a) $ λ s, by simp⟩
@[simp] lemma mem_pure {a : α} {s : set α} : s ∈ (pure a : ultrafilter α) ↔ a ∈ s := iff.rfl
@[simp] lemma coe_pure (a : α) : ↑(pure a : ultrafilter α) = (pure a : filter α) := rfl
@[simp] lemma map_pure (m : α → β) (a : α) : map m (pure a) = pure (m a) := rfl
@[simp] lemma comap_pure {m : α → β} (a : α) (inj : injective m) (large) :
comap (pure $ m a) inj large = pure a :=
coe_injective $ comap_pure.trans $
by rw [coe_pure, ←principal_singleton, ←image_singleton, preimage_image_eq _ inj]
lemma pure_injective : injective (pure : α → ultrafilter α) :=
λ a b h, filter.pure_injective (congr_arg ultrafilter.to_filter h : _)
instance [inhabited α] : inhabited (ultrafilter α) := ⟨pure default⟩
instance [nonempty α] : nonempty (ultrafilter α) := nonempty.map pure infer_instance
lemma eq_pure_of_finite_mem (h : s.finite) (h' : s ∈ f) : ∃ x ∈ s, f = pure x :=
begin
rw ← bUnion_of_singleton s at h',
rcases (ultrafilter.finite_bUnion_mem_iff h).mp h' with ⟨a, has, haf⟩,
exact ⟨a, has, eq_of_le (filter.le_pure_iff.2 haf)⟩
end
lemma eq_pure_of_finite [finite α] (f : ultrafilter α) : ∃ a, f = pure a :=
(eq_pure_of_finite_mem finite_univ univ_mem).imp $ λ a ⟨_, ha⟩, ha
lemma le_cofinite_or_eq_pure (f : ultrafilter α) : (f : filter α) ≤ cofinite ∨ ∃ a, f = pure a :=
or_iff_not_imp_left.2 $ λ h,
let ⟨s, hs, hfin⟩ := filter.disjoint_cofinite_right.1 (disjoint_iff_not_le.2 h),
⟨a, has, hf⟩ := eq_pure_of_finite_mem hfin hs
in ⟨a, hf⟩
/-- Monadic bind for ultrafilters, coming from the one on filters
defined in terms of map and join.-/
def bind (f : ultrafilter α) (m : α → ultrafilter β) : ultrafilter β :=
of_compl_not_mem_iff (bind ↑f (λ x, ↑(m x))) $ λ s,
by simp only [mem_bind', mem_coe, ← compl_mem_iff_not_mem, compl_set_of, compl_compl]
instance has_bind : has_bind ultrafilter := ⟨@ultrafilter.bind⟩
instance functor : functor ultrafilter := { map := @ultrafilter.map }
instance monad : monad ultrafilter := { map := @ultrafilter.map }
section
local attribute [instance] filter.monad filter.is_lawful_monad
instance is_lawful_monad : is_lawful_monad ultrafilter :=
{ id_map := assume α f, coe_injective (id_map f.1),
pure_bind := assume α β a f, coe_injective (pure_bind a (coe ∘ f)),
bind_assoc := assume α β γ f m₁ m₂, coe_injective (filter_eq rfl),
bind_pure_comp_eq_map := assume α β f x, coe_injective (bind_pure_comp_eq_map f x.1) }
end
/-- The ultrafilter lemma: Any proper filter is contained in an ultrafilter. -/
alias exists_le ← _root_.filter.exists_ultrafilter_le
/-- Construct an ultrafilter extending a given filter.
The ultrafilter lemma is the assertion that such a filter exists;
we use the axiom of choice to pick one. -/
noncomputable def of (f : filter α) [ne_bot f] : ultrafilter α :=
classical.some (exists_le f)
lemma of_le (f : filter α) [ne_bot f] : ↑(of f) ≤ f := classical.some_spec (exists_le f)
lemma of_coe (f : ultrafilter α) : of ↑f = f :=
coe_inj.1 $ f.unique (of_le f)
lemma exists_ultrafilter_of_finite_inter_nonempty (S : set (set α))
(cond : ∀ T : finset (set α), (↑T : set (set α)) ⊆ S → (⋂₀ (↑T : set (set α))).nonempty) :
∃ F : ultrafilter α, S ⊆ F.sets :=
begin
haveI : ne_bot (generate S) := generate_ne_bot_iff.2
(λ t hts ht, ht.coe_to_finset ▸ cond ht.to_finset (ht.coe_to_finset.symm ▸ hts)),
exact ⟨of (generate S), λ t ht, (of_le $ generate S) $ generate_sets.basic ht⟩
end
end ultrafilter
namespace filter
variables {f : filter α} {s : set α} {a : α}
open ultrafilter
lemma is_atom_pure : is_atom (pure a : filter α) := (pure a : ultrafilter α).is_atom
protected lemma ne_bot.le_pure_iff (hf : f.ne_bot) : f ≤ pure a ↔ f = pure a :=
⟨ultrafilter.unique (pure a), le_of_eq⟩
@[simp] lemma lt_pure_iff : f < pure a ↔ f = ⊥ := is_atom_pure.lt_iff
lemma le_pure_iff' : f ≤ pure a ↔ f = ⊥ ∨ f = pure a := is_atom_pure.le_iff
@[simp] lemma Iic_pure (a : α) : Iic (pure a : filter α) = {⊥, pure a} := is_atom_pure.Iic_eq
lemma mem_iff_ultrafilter : s ∈ f ↔ ∀ g : ultrafilter α, ↑g ≤ f → s ∈ g :=
begin
refine ⟨λ hf g hg, hg hf, λ H, by_contra $ λ hf, _⟩,
set g : filter ↥sᶜ := comap coe f,
haveI : ne_bot g := comap_ne_bot_iff_compl_range.2 (by simpa [compl_set_of]),
simpa using H ((of g).map coe) (map_le_iff_le_comap.mpr (of_le g))
end
lemma le_iff_ultrafilter {f₁ f₂ : filter α} : f₁ ≤ f₂ ↔ ∀ g : ultrafilter α, ↑g ≤ f₁ → ↑g ≤ f₂ :=
⟨λ h g h₁, h₁.trans h, λ h s hs, mem_iff_ultrafilter.2 $ λ g hg, h g hg hs⟩
/-- A filter equals the intersection of all the ultrafilters which contain it. -/
lemma supr_ultrafilter_le_eq (f : filter α) :
(⨆ (g : ultrafilter α) (hg : ↑g ≤ f), (g : filter α)) = f :=
eq_of_forall_ge_iff $ λ f', by simp only [supr_le_iff, ← le_iff_ultrafilter]
/-- The `tendsto` relation can be checked on ultrafilters. -/
lemma tendsto_iff_ultrafilter (f : α → β) (l₁ : filter α) (l₂ : filter β) :
tendsto f l₁ l₂ ↔ ∀ g : ultrafilter α, ↑g ≤ l₁ → tendsto f g l₂ :=
by simpa only [tendsto_iff_comap] using le_iff_ultrafilter
lemma exists_ultrafilter_iff {f : filter α} : (∃ (u : ultrafilter α), ↑u ≤ f) ↔ ne_bot f :=
⟨λ ⟨u, uf⟩, ne_bot_of_le uf, λ h, @exists_ultrafilter_le _ _ h⟩
lemma forall_ne_bot_le_iff {g : filter α} {p : filter α → Prop} (hp : monotone p) :
(∀ f : filter α, ne_bot f → f ≤ g → p f) ↔ ∀ f : ultrafilter α, ↑f ≤ g → p f :=
begin
refine ⟨λ H f hf, H f f.ne_bot hf, _⟩,
introsI H f hf hfg,
exact hp (of_le f) (H _ ((of_le f).trans hfg))
end
section hyperfilter
variables (α) [infinite α]
/-- The ultrafilter extending the cofinite filter. -/
noncomputable def hyperfilter : ultrafilter α := ultrafilter.of cofinite
variable {α}
lemma hyperfilter_le_cofinite : ↑(hyperfilter α) ≤ @cofinite α :=
ultrafilter.of_le cofinite
@[simp] lemma bot_ne_hyperfilter : (⊥ : filter α) ≠ hyperfilter α :=
(by apply_instance : ne_bot ↑(hyperfilter α)).1.symm
theorem nmem_hyperfilter_of_finite {s : set α} (hf : s.finite) : s ∉ hyperfilter α :=
λ hy, compl_not_mem hy $ hyperfilter_le_cofinite hf.compl_mem_cofinite
alias nmem_hyperfilter_of_finite ← _root_.set.finite.nmem_hyperfilter
theorem compl_mem_hyperfilter_of_finite {s : set α} (hf : set.finite s) :
sᶜ ∈ hyperfilter α :=
compl_mem_iff_not_mem.2 hf.nmem_hyperfilter
alias compl_mem_hyperfilter_of_finite ← _root_.set.finite.compl_mem_hyperfilter
theorem mem_hyperfilter_of_finite_compl {s : set α} (hf : set.finite sᶜ) :
s ∈ hyperfilter α :=
compl_compl s ▸ hf.compl_mem_hyperfilter
end hyperfilter
end filter
namespace ultrafilter
open filter
variables {m : α → β} {s : set α} {g : ultrafilter β}
lemma comap_inf_principal_ne_bot_of_image_mem (h : m '' s ∈ g) :
(filter.comap m g ⊓ 𝓟 s).ne_bot :=
filter.comap_inf_principal_ne_bot_of_image_mem g.ne_bot h
/-- Ultrafilter extending the inf of a comapped ultrafilter and a principal ultrafilter. -/
noncomputable def of_comap_inf_principal (h : m '' s ∈ g) : ultrafilter α :=
@of _ (filter.comap m g ⊓ 𝓟 s) (comap_inf_principal_ne_bot_of_image_mem h)
lemma of_comap_inf_principal_mem (h : m '' s ∈ g) : s ∈ of_comap_inf_principal h :=
begin
let f := filter.comap m g ⊓ 𝓟 s,
haveI : f.ne_bot := comap_inf_principal_ne_bot_of_image_mem h,
have : s ∈ f := mem_inf_of_right (mem_principal_self s),
exact le_def.mp (of_le _) s this
end
lemma of_comap_inf_principal_eq_of_map (h : m '' s ∈ g) :
(of_comap_inf_principal h).map m = g :=
begin
let f := filter.comap m g ⊓ 𝓟 s,
haveI : f.ne_bot := comap_inf_principal_ne_bot_of_image_mem h,
apply eq_of_le,
calc filter.map m (of f) ≤ filter.map m f : map_mono (of_le _)
... ≤ (filter.map m $ filter.comap m g) ⊓ filter.map m (𝓟 s) : map_inf_le
... = (filter.map m $ filter.comap m g) ⊓ (𝓟 $ m '' s) : by rw map_principal
... ≤ g ⊓ (𝓟 $ m '' s) : inf_le_inf_right _ map_comap_le
... = g : inf_of_le_left (le_principal_iff.mpr h)
end
end ultrafilter
|
function [lat2,lon2,h2]=direct(lat1,lon1,h1,az,va,d,a,e2)
% DIRECT Computes direct (forward) geodetic problem.
% Determines coordinates of 2nd station given ellipsoidal
% coordinates of 1st station and azimuth, vertical angle
% and distance from 1st to 2nd station. If az,va are local
% astronomic, lat,lon must also be astronomic. If az,va
% are local geodetic, lat,lon must be local geodetic.
% Non-vectorized. See also INVERSE.
% Version: 2011-02-19
% Useage: [lat2,lon2,h2]=direct(lat1,lon1,h1,az,va,d,a,e2)
% [lat2,lon2,h2]=direct(lat1,lon1,h1,az,va,d)
% Input: lat1 - ellipsoidal latitude of 1st station (rads)
% lon1 - ellipsoidal longitude of 1st station (rads)
% h1 - ellipsoidal ht. of 1st station (m)
% az - azimuth from station 1 to 2 (rads)
% va - vertical angle from 1 to 2 (rads)
% d - distance from 1 to 2 (m)
% a - ref. ellipsoid major semi-axis (m); default GRS80
% e2 - ref. ellipsoid eccentricity squared; default GRS80
% Output: lat2 - ellipsoidal latitude of 2nd station (rads)
% lon2 - ellipsoidal longitude of 2nd station (rads)
% h2 - ellipsoidal ht. of 2nd station (m)
% Copyright (c) 2011, Michael R. Craymer
% All rights reserved.
% Email: [email protected]
if nargin ~= 6 & nargin ~= 8
warning('Incorrect number of input arguments');
return
end
if nargin == 6
[a,b,e2]=refell('grs80');
end
[X1,Y1,Z1]=ell2xyz(lat1,lon1,h1,a,e2);
[dx,dy,dz]=sph2xyz(az,va,d);
[dX,dY,dZ]=lg2ct(dx,dy,dz,lat1,lon1);
X2=X1+dX;
Y2=Y1+dY;
Z2=Z1+dZ;
[lat2,lon2,h2]=xyz2ell(X2,Y2,Z2,a,e2);
|
<center>
<h1> <b>Métodos numéricos</b> </h1>
<h3> <b>Segundo cuatrimestre 2021</b> </h3>
<br>
<h1> <b>Práctica 2: Esquemas temporales</b> </h1>
<h3> Cátedra: Pablo Dmitruk </h3>
<br>
<h3> Fecha límite de entrega: 24 de septiembre de 2021 23:59 </h3>
</center>
### Entrega de: **COMPLETAR CON SUS NOMBRES**
- [Ejercicios](#ejercicios)
- [Repaso teórico](#explicacion)
<a name="ejercicios"></a>
# **Ejercicios**
## **Problema 1: Reducción del orden**
Reescriba los siguientes problemas de valores iniciales como un sistema de primer orden.
\begin{alignat}{7}
\mathrm{a)}& \quad \ddot{y} + \mu \dot{y} + \omega^2_0 y &&= \cos(t), \qquad \qquad &&y(0) = y_0, \qquad \dot{y}(0) = \dot{y}_0; \\
\mathrm{b)}& \quad \ddot{y} - \mu(1-t^2) \dot{y} + y &&= 0, \qquad \qquad &&y(0) = y_0, \qquad \dot{y}(0) = \dot{y_0}; \\
\mathrm{c)}& \quad \dddot{y} + y \ddot{y} + \beta\left(1 - \dot{y}^2 \right) &&= h(t), \qquad \qquad &&y(0) = y_0, \qquad \dot{y}(0) = \dot{y_0}, \qquad \ddot{y}(0) = \ddot{y_0}.
\end{alignat}
**Su resolución acá**
---
## **Problema 2: Derivación de métodos multipaso**
Demuestre la consistencia de los siguientes esquemas de integración temporal y especifique el orden de precisión de cada uno:
$\text{a)}$ Adams-Bashforth de 2 pasos:
\begin{equation*}
y^{n+1} = y^n + \frac{k}{2} (3 f^n - f^{n-1});
\end{equation*}
$\text{b)}$ Adams-Moulton de 1 paso (de máximo orden):
\begin{equation}
y^{n+1} = y^n + \frac{k}{2} (f^{n+1} + f^n);
\end{equation}
$\text{c)}$ Adams-Moulton de 2 pasos:
\begin{equation*}
y^{n+1} = y^n + \frac{k}{12} (5f^{n+1} + 8f^n - f^{n-1}).
\end{equation*}
**Su resolución acá**
---
## **Problema 3: Verificación de orden para métodos multipaso**
Para cada uno de los métodos demostrados en el problema anterior:
$\text{a)}$ integre numéricamente el problema de valor inicial
\begin{equation*}
\dot{y} = -y + \cos(t), \qquad \qquad y(0) = 3/2,
\end{equation*}
para $0\le t \le 6$ utilizando los pasos temporales $k \in \{1\times 10^{-1}, 3\times 10^{-2}, 1 \times 10^{-2}, 3 \times 10^{-3}, 1 \times 10^{-3} \}$. Puede utilizar que este problema admite a $y=e^{-t} + (\mathrm{sen}(t) + \cos(t))/2$ como solución analítica para inicializar los primeros pasos de la integración;
<br>
$\text{b)}$ grafique la norma infinito del error en función de $k$ y verifique que encuentra el orden de convergencia esperado.
```python
# Su solución acá
```
---
## **Problema 4: Verificación de orden para métodos predictores correctores**
Repita el estudio realizado en el problema 3 para los siguientes integradores temporales predictores-correctores:
$\text{a)}$ Matsuno;
$\text{b)}$ Heun.
En ambos casos considere solo una iteración del paso corrector.
```python
# Su solución acá
```
---
## **Problema 5: Verificación de orden para Runge-Kutta**
Realice nuevamente lo hecho en los problemas 3 y 4 para los siguientes integradores multietapa (Runge-Kutta):
$a)$ Runge-Kutta de dos etapas punto medio o _midpoint_ (RK2);
$b)$ Runge-Kutta de cuatro etapas (RK4).
```python
# Su solución acá
```
---
## **Problema 6: Integración del oscilador armónico amortiguado**
Integre la ecuación del oscilador amortiguado
\begin{equation*}
\ddot{x} + \mu \dot{x} + \omega^2_0 x = 0, \qquad \qquad x(0) = x_0, \qquad \dot{x} = \dot{x}_0,
\end{equation*}
para $0\le t \le 5$ utilizando:
$\text{a)}$ el método de Matsuno (con una única iteración del proceso estimación-corrección — EC—);
$\text{b)}$ el método de Runge-Kutta de punto medio (RK2).
Considere en particular $\omega_0 = 2$, $\mu=0,1$, $x_0 = 1$ y $\dot{x}_0 = 0$. Integre el problema para $k \in \{10^{-1}; 10^{-2}; 10^{-3}\}$. Usando la solución analítica $x(t) = e^{-\mu t/2} \bigg(\cos(\tilde{\omega}t) + \mu \mathrm{sen}(\tilde{\omega}t)/2 \tilde \omega\bigg)$, con $\tilde \omega = (4\omega_0^2-\mu^2)^{1/2}/2$, verifique para ambos integradores que el error en la posición final $x(5)$ y la velocidad final $\dot{x}(5)$ decrecen a la tasa esperada al reducir el paso temporal.
```python
# Su solución acá
```
---
## **Problema 7: Estabilidad en la integración de un oscilador**
Considere nuevamente el problema de valores iniciales del ejercicio anterior (i.e. el oscilador armónico amortiguado), pero en esta ocasión tome $\omega_0 = 30$, $\mu=0,5$, $t_f=1$ y los pasos temporales $k \in \{ 8 \times 10^{-2}; 4 \times 10^{-2}; 1 \times 10^{-2} \}$.
Noten que el problema involucra dos escalas temporales bien separadas, una de magnitud $1/30$ ($1/\omega_0$) y otra de magnitud $2$ ($1/\mu$). No es extraño en física toparse con un problema con múltiples escalas, pero donde nos interesa la dinámica asociada a solo una de ellas. Por ejemplo, en este problema, podríamos estar interesados solamente en estudiar la tasa de disipación de energía, para lo cual la escala temporal de $1/30$ no resulta determinante.
Para los parámetros, integre el problema utilizando los siguientes integradores temporales de segundo orden:
$\text{a)}$ Runge-Kutta (punto medio);
$\text{b)}$ Adams-Moulton de segundo orden. _Ayuda: Puede mostrar primeramente que dado el sistema de ecuaciones_
\begin{equation*}
\mathbf{y}^{n+1} = \mathbf{y}^n + \frac{k}{2} \left( A \mathbf{y}^n + A \mathbf{y}^{n+1} \right),
\end{equation*}
puede resolverse como
\begin{equation*}
\mathbf{y}^{n+1} = \tilde{A}^{-1} \left(\mathbf{y}^n + \frac{k}{2} A \mathbf{y}^n\right),
\end{equation*}
_con $\tilde{A} = \mathbb{I} - k A/2$, donde $\mathbb{I}$ es la matriz identidad._
En ambos casos grafique la solución analítica y compárela con la solución obtenida numéricamente. _Ayuda: grafique solo para las ordenadas en el intervalo $(-3/2;\ 3/2)$._
```python
# Su solución acá
```
---
## **Problema 8: Regiones de estabilidad**
Halle analíticamente las regiones de estabilidad para los siguientes métodos predictores-correctores:
$\text{a)}$ Matsuno;
$\text{b)}$ Heun.
Grafique las regiones de estabilidad y compárelas con los métodos de Euler adelantado y Adams-Moulton de 2do orden, respectivamente. _Ayuda: el siguiente código grafica en azul los puntos que satisfacen la desigualdad $| 1 - \bar{\lambda} | < 1$_:
```python
import numpy as np
import matplotlib.pyplot as plt
# Crea una grilla cuadrada de NX x NY puntos para
# (x0,y0) x (x1, y1) y defino una variable compleja sobre ella
Nx, Ny = 1000, 1000
x0, x1 = -2, 2
y0, y1 = -2, 2
x = np.linspace(x0, x1, Nx)
y = np.linspace(y0, y1, Ny)
lamda = x[:,None] + 1j*y[None,:] # Defino la variable compleja
arg = 1 - lamda # Defino el argumento del módulo
# Grafico
fig, ax = plt.subplots(1, 1, figsize=(8,4), constrained_layout=True)
ax.imshow((np.abs(arg) < 1).T, extent=[x[0], x[-1], y[0], y[-1]],
cmap="GnBu", vmin=0, vmax=1);
```
**Su resolución acá**
```python
# Su resolución acá
```
---
## **Problema 9: Verificación numérica de regiones de estabilidad**
Verifique numéricamente las regiones de estabilidad halladas en el problema 8. Para ello puede usar como base la siguiente función que, dado un arreglo 2D $\lambda_{pq} = x_{p} + i y_{q}$ (donde $x$ e $y$ son arreglos de números reales) devuelve un arreglo con `True` donde $\lambda$ es estable y `False` donde no lo es para el esquema de Euler adelantado:
```python
def estabilidad_euler(dt, lamda):
pasos = 50
estabilidad = np.ones_like(lamda, dtype=bool)
for i in range(lamda.shape[0]):
for j in range(lamda.shape[1]):
y = 1
for n in range(0, pasos):
y = y + dt*(lamda[i,j]*y)
if np.abs(y) > 2:
estabilidad[i,j] = False
break
return estabilidad
```
```python
# Su solución acá
```
---
## **Problema 10: Péndulo (no-lineal)**
La tarea para este ejercicio es escribir dos integradores temporales, de órdenes distintos para la ecuación del péndulo sin la aproximación de pequeñas amplitudes. Consideraremos también el roce con el aire. Este problema queda regido por la ecuación
\begin{equation*}
\ddot \theta + \mu \dot \theta + \omega_0^2 \mathrm{sen}(\theta) = 0, \qquad \qquad \theta(0) = \theta_0, \qquad \dot \theta(0) = \dot \theta_0, \qquad 0 \le t \le t_f,
\end{equation*}
donde como vimos en la práctica anterior $\omega_0 = \sqrt{g/\ell}$ es la frecuencia natural del sistema ($g$ y $\ell$ representan la aceleración de la gravedad y el largo de la cuerda inextensible desde la cual suspende la masa, respectivamente), y $\mu$ es la magnitud (dimensionada en unidades de frecuencia) del roce con el medio.
Con las herramientas vistas en la práctica podemos resolver esta ecuación ordinaria de segundo orden escribiéndola como un sistema de dos ecuaciones de primer orden:
$\text{a)}$ Escriba la ecuación mencionada como un sistema de ecuaciones de primer orden.
**Su resolución acá**
$\text{b)}$ Escriba dos funciones, `rk2` y `rk4` las cuales, dados $\theta_0$ y $\dot \theta_0$, $\omega_0$, $\mu$, el paso temporal y $t_f$, resuelvan la ecuación del péndulo para los instantes solicitados. Como sus nombres indican `rk2` debera realizar la integración usando un método de Runge-Kutta de 2do orden (punto medio), mientras que `rk4` deberá hacer lo propio utilizando un método de cuarto orden.
```python
import numpy as np
import matplotlib.pyplot as plt
def rk2(th0, omega0, mu, dt, tf):
"""
Integra la ecuación del péndulo usando un método de Runge-Kutta
de segundo orden:
Entrada:
- th0: Arreglo de 2 elementos. th[0] es la posición y th[1] la velocidad
- omega0: frecuencia natural del péndulo.
- mu: coeficiente de roce del péndulo con el medio circundante.
- dt: paso temporal a utilizar.
- tf: tiempo hasta el cual se desea integrar.
Salida:
- th: Arreglo de (N, 2), con N la cantidad de pasos temporales
(incluye la condición inicial). th[:,0] contiene la posición y
th[:,1] la velocidad.
"""
# Completar con el integrador RK2
# ...
# ...
# ...
# Finalmente, convierto theta al intervalo [-pi,pi)
th[:, 0] = np.arctan2(np.sin(th[:,0]), np.cos(th[:,0]))
return th
def rk4(th0, omega0, mu, dt, tf):
"""
Integra la ecuación del péndulo usando un método de Runge-Kutta
de cuarto orden.
Entrada:
- th0: Arreglo de 2 elementos. th[0] es la posición y th[1] la velocidad
- omega0: frecuencia natural del péndulo.
- mu: coeficiente de roce del péndulo con el medio circundante.
- dt: paso temporal a utilizar.
- tf: tiempo hasta el cual se desea integrar.
Salida:
- th: Arreglo de (N, 2), con N la cantidad de pasos temporales
(incluye la condición inicial). th[:,0] contiene la posición y
th[:,1] la velocidad.
"""
# Completar con el integrador RK4
# ...
# ...
# ...
# Finalmente, convierto theta al intervalo [-pi,pi)
th[:, 0] = np.arctan2(np.sin(th[:,0]), np.cos(th[:,0]))
return th
# NO EDITAR DEBAJO DE ESTA LÍNEA
#-------------------------------------------------------------------------------
def estetizar_graficos(ax, titulo, etiqueta_x, etiqueta_y):
"""
Dado el par de ejes `ax` coloca el título y las etiquetas de los ejes
x e y. Además de existir crea también las leyendas y agregra una grilla.
"""
ax.legend()
ax.grid()
ax.set_title(titulo)
ax.set_xlabel(etiqueta_x)
ax.set_ylabel(etiqueta_y)
```
### Posición y velocidad sin roce
A diferencia del oscilador resuelto en la práctica anterior, no contamos en esta ocasión con una solución analítica (al menos en términos de funciones elementales) contra la cual contrastar para verficar el funcionamiento de nuestra solución.
En estos casos, una manera de comprobar si nuestro _solver_ se comporta de manera razonable, es analizar las soluciones que devuelve para casos límites, situaciones en las que podemos aprovechar nuestro conocimiento físico del problema.
$\text{c)}$ Piense cualitativamente como debería ser el movimiento en los siguientes casos:
<ol type="i">
<li> $\theta_0 = 0,1$, $\dot \theta_0 = 0$;</li>
<li> $\theta_0 = \pi - 0,1$, $\dot \theta_0 = 0$;</li>
<li> $\theta_0 = \pi$, $\dot \theta_0 = 0$;</li>
<li> $\theta_0 = 0$, $\dot \theta_0 = 2\omega + 0,01$ ($2\omega$ es la máxima diferencia de energía potencial gravitatoria entre dos puntos cualesquiera).</li>
</ol>
Luego, para ambos métodos de Runge-Kutta, integre la ecuación para cada condición inicial fijando $\omega_0 = 2$, $\mu=0$, $k=10^{-3}$ y $t_f = 10$. Grafique la posición y la velocidad obtenidas. En todos los casos verifique que recupera el comportamiento esperado.
```python
# Posición y velocidad sin roce
#-------------------------------
# Figura y pares de ejes
fig, axs = plt.subplots(2, 4, figsize=(16,4), constrained_layout=True)
dt = # COMPLETAR: Paso temporal
# Integrar para el primer conjunto de condiciones iniciales
th_2 = # COMPLETAR: guardar en th_2 el resultado de integrar con RK2
th_4 = # COMPLETAR: guardar en th_2 el resultado de integrar con RK4
#-------------------------------------------------------------------------------
# NO EDITAR ESTA SECCIÓN
#-------------------------------------------------------------------------------
t = np.arange(th_2.shape[0])*dt
axs[0,0].plot(t, th_2[:,0], label="RK2")
axs[0,0].plot(t, th_4[:,0], label="RK4")
axs[1,0].plot(t, th_2[:,1], label="RK2")
axs[1,0].plot(t, th_4[:,1], label="RK4")
estetizar_graficos(axs[0,0], r"$\theta_0 = 0,1 \ \wedge \ \dot \theta_0 = 0$",
"$t$", r"$\theta$")
estetizar_graficos(axs[1,0], "", "$t$", r"$\dot \theta$")
#-------------------------------------------------------------------------------
# Integrar para el segundo conjunto de condiciones iniciales
th_2 = # COMPLETAR: guardar en th_2 el resultado de integrar con RK2
th_4 = # COMPLETAR: guardar en th_2 el resultado de integrar con RK4
#-------------------------------------------------------------------------------
# NO EDITAR ESTA SECCIÓN
#-------------------------------------------------------------------------------
t = np.arange(th_2.shape[0])*dt
axs[0,1].plot(t, th_2[:,0], label="RK2")
axs[0,1].plot(t, th_4[:,0], label="RK4")
axs[1,1].plot(t, th_2[:,1], label="RK2")
axs[1,1].plot(t, th_4[:,1], label="RK4")
estetizar_graficos(axs[0,1], r"$\theta_0 = \pi-0,1 \ \wedge \ \dot \theta_0=0$",
"$t$", r"$\theta$")
estetizar_graficos(axs[1,1], "", "$t$", r"$\dot \theta$")
#-------------------------------------------------------------------------------
# Integrar para el tercer conjunto de condiciones iniciales
th_2 = # COMPLETAR: guardar en th_2 el resultado de integrar con RK2
th_4 = # COMPLETAR: guardar en th_2 el resultado de integrar con RK4
#-------------------------------------------------------------------------------
# NO EDITAR ESTA SECCIÓN
#-------------------------------------------------------------------------------
t = np.arange(th_2.shape[0])*dt
axs[0,2].plot(t, th_2[:,0], label="RK2")
axs[0,2].plot(t, th_4[:,0], label="RK4")
axs[1,2].plot(t, th_2[:,1], label="RK2")
axs[1,2].plot(t, th_4[:,1], label="RK4")
estetizar_graficos(axs[0,2], r"$\theta_0 = \pi \ \wedge \ \dot \theta_0 = 0$",
"$t$", r"$\theta$")
estetizar_graficos(axs[1,2], "", "$t$", r"$\dot \theta$")
#-------------------------------------------------------------------------------
# Integrar para el cuarto conjunto de condiciones iniciales
th_2 = # COMPLETAR: guardar en th_2 el resultado de integrar con RK2
th_4 = # COMPLETAR: guardar en th_2 el resultado de integrar con RK4
#-------------------------------------------------------------------------------
# NO EDITAR ESTA SECCIÓN
#-------------------------------------------------------------------------------
t = np.arange(th_2.shape[0])*dt
axs[0,3].plot(t, th_2[:,0], label="RK2")
axs[0,3].plot(t, th_4[:,0], label="RK4")
axs[1,3].plot(t, th_2[:,1], label="RK2")
axs[1,3].plot(t, th_4[:,1], label="RK4")
estetizar_graficos(axs[0,3], r"$\theta_0 = 0 \ \wedge \ " +\
r"\dot \theta_0 = 2\omega_0 + 0,01$", "$t$", r"$\theta$")
estetizar_graficos(axs[1,3], "", "$t$", r"$\dot \theta$");
```
### Conservación de la energía
Además de verificar que el movimiento, cualitativamente, es consistente con nuestro conocimiento físico, para el caso sin roce sabemos que debe conservarse la energía mecánica, que podemos definir como
\begin{equation*}
E(t) = \dot \theta(t)^2 + 4 \omega_0^2 \mathrm{sen}^2\left(\frac{\theta(t)}{2} \right).
\end{equation*}
$\text{d)}$ Escriba una función `energia` que reciba como valores de entrada a $\theta$, $\dot \theta$ y $\omega_0$ y devuelva la energía mecánica para cada instante. Usando esta función verifique que a todo tiempo se verifica aproximadamente la conservación de la energía, es decir, $|\Delta E| = |E(t) - E(0)| \ll 1$, para todos los casos estudiados en el inciso anterior, considerando esta vez $t_f=75$.
```python
# Estudio de conservación de energía para el caso sin rozamiento
def energia(th, omega):
"""
Calcula la energía mecánica para todo tiempo.
Entrada:
- th: Arreglo de forma (N,2) con la posición ([:,0]) y
la velocidad angular ([:,1]).
- omega_0: Frecuencia natural del sistema.
Salida:
- energia: Arreglo de forma (N).
"""
# COMPLETAR
return energia
# Figuras, pares de ejes y paso temporal
fig, axs = plt.subplots(1, 4, figsize=(16,4), constrained_layout=True)
dt = 1e-3
offset = int(2/1e-3)
# COMPLETAR: Calcular la variación de energía para todo instante usando RK2
# para el primer conjunto de condiciones iniciales y asignarla a la variable
# DeltaE_2.
# COMPLETAR: Calcular la variación de energía para todo instante usando RK4
# para el primer conjunto de condiciones iniciales y asignarla a la variable
# DeltaE_4.
# ----------------------------- NO EDITAR --------------------------------------
t = np.arange(0, DeltaE_2.shape[0])*dt
axs[0].semilogy(t[offset:], DeltaE_2[offset:], label="RK2")
axs[0].semilogy(t[offset:], DeltaE_4[offset:], label="RK4")
estetizar_graficos(axs[0], r"$\theta_0 = 0,1 \ \wedge \ \dot{\theta}_0 = 0$",
"$t$", r"$|\Delta E|$")
# ------------------------------------------------------------------------------
# COMPLETAR: Calcular la variación de energía para todo instante usando RK2
# para el segundo conjunto de condiciones iniciales y asignarla a la variable
# DeltaE_2.
# COMPLETAR: Calcular la variación de energía para todo instante usando RK4
# para el segundo conjunto de condiciones iniciales y asignarla a la variable
# DeltaE_4.
# ----------------------------- NO EDITAR --------------------------------------
t = np.arange(0, DeltaE_2.shape[0])*dt
axs[1].semilogy(t[offset:], DeltaE_2[offset:], label="RK2")
axs[1].semilogy(t[offset:], DeltaE_4[offset:], label="RK4")
estetizar_graficos(axs[1], r"$\theta_0 = \pi-0,1 \ \wedge \ \dot{\theta}_0 = 0$",
"$t$", r"$|\Delta E|$")
# ------------------------------------------------------------------------------
# COMPLETAR: Calcular la variación de energía para todo instante usando RK2
# para el tercer conjunto de condiciones iniciales y asignarla a la variable
# DeltaE_2.
# COMPLETAR: Calcular la variación de energía para todo instante usando RK4
# para el tercer conjunto de condiciones iniciales y asignarla a la variable
# DeltaE_4.
# ----------------------------- NO EDITAR --------------------------------------
t = np.arange(0, DeltaE_2.shape[0])*dt
axs[2].semilogy(t[offset:], DeltaE_2[offset:], label="RK2")
axs[2].semilogy(t[offset:], DeltaE_4[offset:], label="RK4")
estetizar_graficos(axs[2], r"$\theta_0 = \pi \ \wedge \ \dot{\theta}_0 = 0$",
"$t$", r"$|\Delta E|$")
# ------------------------------------------------------------------------------
# COMPLETAR: Calcular la variación de energía para todo instante usando RK2
# para el cuarto conjunto de condiciones iniciales y asignarla a la variable
# DeltaE_2.
# COMPLETAR: Calcular la variación de energía para todo instante usando RK4
# para el cuarto conjunto de condiciones iniciales y asignarla a la variable
# DeltaE_4.
# ----------------------------- NO EDITAR --------------------------------------
t = np.arange(0, DeltaE_2.shape[0])*dt
axs[3].semilogy(t[offset:], DeltaE_2[offset:], label="RK2")
axs[3].semilogy(t[offset:], DeltaE_4[offset:], label="RK4")
estetizar_graficos(axs[3], rr"$\theta_0 = 0 \ \wedge \ " +\
r"\dot \theta_0 = 2\omega_0 + 0,01$", "$t$", r"$|\Delta E|$");
# ------------------------------------------------------------------------------
```
### Reproducción del período real
También usando la conservación de la energía, y si el movimiento parte del reposo, podemos obtener una expresión analítica para el período del movimiento:
\begin{equation*}
T = \frac{4}{\omega} K\left( \mathrm{sen}^2\left( \frac{\theta_0}{2}\right)\right),
\end{equation*}
donde $\theta_0$ es la posición inicial y $K$ la integral elíptica completa de primera especie.
$\text{e)}$ Para cada integrador temporal, calcule la diferencia entre el período dado por la expresión analítica y el que obtiene a partir de sus simulaciones para el conjunto de posiciones iniciales $[\boldsymbol \theta_0]_j = 0,01 + j\Delta \theta_0$, $0 \le j < 20$ y $\Delta \theta_0 = 0,1234$ (es decir 20 puntos equiespaciados entre $0,01$ y $3\pi/4$). En todos los casos considere nula a la velocidad angular inicial. Analice en particular los siguientes casos:
<ol type="i">
<li>$ \omega_0 = 1, \ \mu=0, \ k=1 \times 10^{-2},\ t_f = 250 $; </li>
<li>$ \omega_0 = 10, \ \mu=0, \ k=1 \times 10^{-2},\ t_f = 250 $; </li>
<li>$ \omega_0 = 10, \ \mu=0, \ k=1 \times 10^{-3},\ t_f = 250 $. </li>
</ol>
Como ayuda le proporcionamos funciones que calculan ambos períodos.
```python
def periodo_analitico(th0, omega0):
"""
Calcula el período del péndulo sin roce dada la posición inicial y la
frecuencia natural del sistema.
Entradas:
- th0: Número de punto flotante con la posición inicial de la masa.
- omega0: Frecuencia natural del sistema.
Salida:
- Período del péndulo.
"""
import scipy.special as spspecial
return 4/omega0*spspecial.ellipk(np.sin(th0/2)**2)
def periodo_datos(th, dt):
"""
Calcula el período del péndulo sin roce dada una serie de posiciones
theta(t) y el paso temporal entre muestras.
Entradas:
- th: Vector de N elementos con la posición de la masa para N instantes
consecutivos.
- dt: Espaciamemiento entre muestras.
Salida:
- Período del péndulo.
"""
import scipy.signal as spsignal
# Uso una ventana de Blackman-Harris para reducir Gibbs
con_ventana = th*spsignal.blackmanharris(th.size)
# Relleno con ceros para interpolar el espectro
ceros = 10000
T_previo = 0
T_actual = 1
# Pruebo distintos niveles de relleno hasta converger 3 dígitos decimales
while np.abs((T_previo-T_actual)/T_actual) > 1e-3:
ceros = 2*ceros
relleno = np.pad(con_ventana, ceros, mode="constant")
RELLENO = np.fft.rfft(relleno)
f = np.fft.rfftfreq(relleno.size, d=dt)
# Busco el pico de la FFT y saco la frecuencia del pico. Transformo a T.
ind = np.argmax(np.abs(RELLENO))
T_previo = T_actual
T_actual = 1/f[ind]
return T_actual
# Figura y pares de ejes
fig, axs = plt.subplots(1, 3, figsize=(12,4), constrained_layout=True)
# Vector de tiempos
dt, tf = 1e-2, 250
pasos = int(round(tf/dt))
t = np.arange(0, pasos+1)*dt
# Conjunto de condiciones iniciales y diferencias de periodos
N = 20
conds_ini = np.linspace(1e-2, 3*np.pi/4, N)
DeltaT_2 = np.zeros(N)
DeltaT_4 = np.zeros(N)
# Períodos para omega_0 = 1
for n, ini in enumerate(conds_ini):
# COMPLETAR: Obtener la diferencia de períodos para la condicion inicial
# actual y guardarlos en DeltaT_2[n] (para RK2) y DeltaT_4[n] (para RK4).
# -------------------- NO EDITAR ESTA SECCIÓN ----------------------------------
axs[0].plot(conds_ini, DeltaT_2, "x", c="C0", label="RK2", markersize=10)
axs[0].plot(conds_ini, DeltaT_4, "o", c="C1", label="RK4")
estetizar_graficos(axs[0], r"$\omega_0=1, \ \Delta t=1\times 10^{-2}$",
r"$\theta_0$", "$T$")
# ------------------------------------------------------------------------------
# Períodos para omega_0 = 10
for n, ini in enumerate(conds_ini):
# COMPLETAR: Obtener la diferencia de períodos para la condicion inicial
# actual y guardarlos en DeltaT_2[n] (para RK2) y DeltaT_4[n] (para RK4).
# -------------------- NO EDITAR ESTA SECCIÓN ----------------------------------
axs[1].plot(conds_ini, DeltaT_2, "x", c="C0", label="RK2", markersize=10)
axs[1].plot(conds_ini, DeltaT_4, "o", c="C1", label="RK4")
estetizar_graficos(axs[1], r"$\omega_0=10, \ \Delta t=1\times 10^{-2}$",
r"$\theta_0$", "$T$")
# ------------------------------------------------------------------------------
# Vector de tiempos
dt, tf = 1e-3, 250
pasos = int(round(tf/dt))
t = np.arange(0, pasos+1)*dt
# Períodos para omega0 = 10 con k más chico
for n, ini in enumerate(conds_ini):
# COMPLETAR: Obtener la diferencia de períodos para la condicion inicial
# actual y guardarlos en DeltaT_2[n] (para RK2) y DeltaT_4[n] (para RK4).
# -------------------- NO EDITAR ESTA SECCIÓN ----------------------------------
axs[2].plot(conds_ini, DeltaT_2, "x", c="C0", label="RK2", markersize=10)
axs[2].plot(conds_ini, DeltaT_4, "o", c="C1", label="RK4")
estetizar_graficos(axs[2], r"$\omega_0=10, \ \Delta t=1 \times 10^{-3}$",
r"$\theta_0$", "$T$");
# ------------------------------------------------------------------------------
```
**Nota**: La celda anterior puede tardar entre 5 y 10 minutos en ejecutarse. Intente reducir la cantidad de condiciones iniciales `N` a 2 o 3 hasta estar seguro de estar obteniendo resultados razonables.
### Posición y velocidad con roce
Obtenidos resultados satisfactorios en los incisos previos, pueden tener ya una cierta confianza en que los integradores que escribieron evolucionan aceptablemente el término no lineal. Resta realizar algunas pruebas para ver que los resultados para los casos con roce también se ajustan a lo esperado.
$\text{f)}$ Considerando $\theta_0=0,01$, $\omega_0 = 1$, $k=10^{-2}$, $t_f=100$ y para ambos integradores temporales, integre en tiempo los casos:
<ol type="i">
<li>$\mu = 0,1$ (amortiguamiento subcrítico); </li>
<li>$\mu = 10$ (amortiguamiento supercrítico). </li>
</ol>
Verifique que, cualitativamente, la posición y la velocidad angular presentan el comportamientos esperados.
```python
# Figura, pares de ejes y parámetros temporales
fig, axs = plt.subplots(2, 2, figsize=(8,8), constrained_layout=True)
dt, tf = 1e-2, 100
# COMPLETAR: Con las soluciones para RK2 (th_2) y RK4 (th_4) en el caso
# mu = 0,1.
th_2 = #...
th_4 = #...
# --------------------------- NO EDITAR ESTA SECCIÓN ---------------------------
t = np.arange(0, th_2.shape[0])*dt
axs[0,0].plot(t, th_2[:,0], label="RK2")
axs[0,0].plot(t, th_4[:,0], label="RK4")
estetizar_graficos(axs[0,0], "Amortiguamiento subcrítico", "$t$", r"$\theta$")
axs[1,0].plot(t, th_2[:,1], label="RK2")
axs[1,0].plot(t, th_4[:,1], label="RK4")
estetizar_graficos(axs[1,0], "", "$t$", r"$\dot\theta$")
# ------------------------------------------------------------------------------
# COMPLETAR: Con las soluciones para RK2 (th_2) y RK4 (th_4) en el caso
# mu = 10
th_2 = #...
th_4 = #...
# --------------------------- NO EDITAR ESTA SECCIÓN ---------------------------
t = np.arange(0, th_2.shape[0])*dt
axs[0,1].plot(t, th_2[:,0], label="RK2")
axs[0,1].plot(t, th_4[:,0], label="RK4")
estetizar_graficos(axs[0,1], "Amortiguamiento supercrítico", "$t$", r"$\theta$")
axs[1,1].plot(t, th_2[:,1], label="RK2")
axs[1,1].plot(t, th_4[:,1], label="RK4")
estetizar_graficos(axs[1,1], "", "$t$", r"$\dot\theta$");
# ------------------------------------------------------------------------------
```
### Balance entre pérdida de energía mecánica y potencia disipada
En términos de energía mecánica ya no vamos a tener conservación para el caso con disipación. Sin embargo, resulta sencillo derivar la siguiente ecuación de balance
\begin{equation*}
\dot E (t) = - \mu \dot \theta^2(t) = P_\mu(t),
\end{equation*}
donde $P_\mu$ es la potencia disipada por la fuerza de roce.
$\text{g)}$ Para los casos estudiados en el inciso anterior, verifique si se verifica (de manera aproximada) la ecuación de balance mencionada. Para ello, grafique la cantidad $|\dot{E} - P_\mu|$ en función del tiempo.
```python
# Balance variación de energía - disipación
def potencia_roce(th_punto, mu):
"""
Calcula la potencia entregada/disipada al sistema por el roce con el aire.
Entrada:
- th_punto: Arreglo de (N) elementos con la velocidad angular de la
masa para N instantes de tiempo.
- mu: Coeficiente de la fuerza de rozamiento (en unidades de frecuencia)
Salida:
- potencia: Arreglo de (N) elementos con la potencia entregada/disipada
por la fuerza de roce a cada instante.
"""
return # COMPLETAR
def variacion_energia(energia, dt):
"""
Calcula la variación temporal de la energía mecánica usando diferencias
finitas centradas de 6to orden. En los extremos del intervalo, donde no se
tienen suficientes datos, se devuelve 0.
Entrada:
- energia: Arreglo de (N) elementos con la energía mecánica para N
N instantes consecutivos.
- dt: Espaciamiento entre muestras de la energía mecánica.
Salida:
- var: Arreglo de (N) elementos con la variación temporal de la energía
mecánica.
"""
var = np.zeros_like(energia)
var = -1*np.roll(energia, 3) + 9*np.roll(energia, 2)
var += -45*np.roll(energia, 1) + 45*np.roll(energia, -1)
var += -9*np.roll(energia, -2) + np.roll(energia, -3)
var = var/(60*dt)
var[-3:] = 0
var[:3] = 0
return var
# Figuras, pares de ejes y paso temporal
fig, axs = plt.subplots(1, 2, figsize=(8,4), constrained_layout=True)
dt = 1e-2
offset = int(round(2/dt))
# Integración para mu = 0,1
# COMPLETAR: Calcular el valor absoluto de la diferencia entre la variación
# de energía y la potencia disipada. Guarde sus resultados en las variables
# bal_2 (para la integración de segundo orden) y bal_4 (para la de cuarto).
# --------------------------- NO EDITAR ESTA SECCIÓN ---------------------------
t = np.arange(bal_2.shape[0])*dt
axs[0].semilogy(t[offset:], bal_2[offset:], label="RK2")
axs[0].semilogy(t[offset:], bal_4[offset:], label="RK4")
estetizar_graficos(axs[0], "Amortiguamiento subcrítico",
"$t$", r"$|\partial_t E - P_\mu|$")
# ------------------------------------------------------------------------------
# Integración para mu = 10
# COMPLETAR: Calcular el valor absoluto de la diferencia entre la variación
# de energía y la potencia disipada. Guarde sus resultados en las variables
# bal_2 (para la integración de segundo orden) y bal_4 (para la de cuarto).
# --------------------------- NO EDITAR ESTA SECCIÓN ---------------------------
t = np.arange(bal_2.shape[0])*dt
axs[1].semilogy(t[offset:], bal_2[offset:], label="RK2")
axs[1].semilogy(t[offset:], bal_4[offset:], label="RK4")
estetizar_graficos(axs[1], "Amortiguamiento supercrítico",
"$t$", r"$|\partial_t E - P_\mu|$")
# ------------------------------------------------------------------------------
```
### Análisis de resultados
$\text{h)}$ Describa **brevemente** los resultados obtenidos en los incisos anteriores.
**Su resolución acá**
---
---
<a name="explicacion"></a>
# **Esquemas temporales**
## **Integración temporal**
En esta práctica nos interesa poder resolver numéricamente el problema
\begin{align*}
\dot y &= f(t, y),\\
y(t_0) &= y_0,
\end{align*}
donde para alivianar la notación usaremos en adelante $\dot y = \mathrm{d}y/\mathrm{d}t$.
Noten que, a diferencia de lo que vimos en la práctica anterior, conocemos ahora la derivada y queremos recuperar $y^n$ (es decir, el valor de $y$ sobre una cantidad discreta de valores de $t$, $t^n$). Luego, asumiendo conocidos $y^j, f(t^j, y^j)$ para $0 \le j \le n$, una forma de determinar $y^{n+1}$ es usar expansiones de Taylor. Por ejemplo, expandiendo $y^{n+1}$ alrededor de $y^n$ o bien $y^n$ alrededor de $y^{n+1}$, tenemos respectivamente
\begin{alignat*}{5}
y^{n+1} &= y^n &+& \dot y^n k &+& \mathcal{O}(k^2),\\
y^n &= y^{n+1} &-& \dot y^{n+1} k &+& \mathcal{O}(k^2),
\end{alignat*}
donde $k = \Delta t$ es el paso temporal y $f^n = f(t^n, y^n)$. Podemos despejar fácilmente sendos métodos de segundo **orden local**
\begin{align}
y^{n+1} &= y^n + f^n k, \tag{Euler adelantado}\\
y^{n+1} &= y^n + f^{n+1}k. \tag{Euler atrasado}
\end{align}
Como indican las etiquetas, estos métodos reciben el nombre de **Euler adelantado** y **Euler atrasado**, respectivamente. Estos métodos determinan $y^{n+1}$ a segundo orden, si conocemos $y^n$ y $f^n$ o $f^{n+1}$ de manera exacta (o, al menos, a orden $k^2$ y $k$, respectivamente). Sin embargo, este no suele ser el caso, ya que lo usual suele ser partir de una condición inicial y aplicar integradores temporales de forma iterativa hasta llegar a la solución a un cierto tiempo final $t_f$ deseado. Al aplicar el método $N=t_f/k$ veces, el orden será entonces $N \mathcal{O}(k^2) = t_f \mathcal{O}(k)$. En consecuencia, el **orden global**, es decir, el error asociado a integrar hasta un cierto tiempo $t$, es $\mathcal{O}(k)$ para los métodos de Euler (adelantado y atrasado). Encontraremos esta diferencia de un orden de magnitud entre el orden local y el orden global para todos los esquemas temporales que veremos, al menos mientras permanezcan **estables**, concepto que discutiremos más adelante. Excepto mención explícita de lo contrario, **cuando nos referimos al orden de un integrador temporal hacemos referencia al orden global** del mismo.
Un elemento en el que aún no reparamos es en que si conocemos funcionalmente $f$, y contamos con $y^n$, la ecuación de Euler hacia adelante resulta inmediata para resolver. Sin embargo, la versión atrasada, es una expresión implícita para $y^{n+1}$ (ya que $f^{n+1}$ depende de $y^{n+1}$). Esto último no presenta gran dificultad (en precisión infinita) si $f^{n+1}$ es lineal, pero no resulta trivial en casos generales, requiriendo generalmente usar métodos iterativos para hallar $y^{n+1}$. A los métodos que, conocida toda la información para $t^j, y^j$, $j\le n$ permiten determinar explícitamente $y^{n+1}$ se los llama **métodos explícitos**. Por otro lado, aquellos que requiren resolver una ecuación implicita (como Euler hacia atrás), se los llama **métodos implícitos**. A la hora de utilizar métodos implícitos, es usual conjugarlos con algún esquema iterativo, como por ejemplo el algoritmo de Newton-Raphson hasta alcanzar la tolerancia deseada. Así planteado, parecería que los métodos implícitos son una complicación innecesaria, sin embargo a la hora de discutir estabilidad, veremos que pueden ser muy ventajosos para algunos problemas, mientras que los explícitos serán una opción mejor en otros casos.
Para construir métodos de mayor orden, las estrategias más populares (y que veremos en el curso) incluyen:
- **<u>Métodos multipaso lineales</u>**: en forma similar a lo que hacíamos con diferencias finitas, se propone $y^{n+1}$ como función de múltiples valores $y^j$ y $f^j$, en lugar de usar solo $y^n$ y $f^n/f^{n+1}$ como hicimos para los métodos de Euler.
- **<u>Métodos predictores-correctores</u>**: generan una solución aproximada a partir de métodos de bajo orden (la _predicción_) que luego es mejorada usando algún algoritmo de interpolación (la _corrección_).
- **<u>Métodos de Runge-Kutta</u>**: utilizan, en lugar de valores de $y$ y de $f$ para pasos previos/posteriores, valores intermedios de $f$, como por ejemplo $f^{n+\frac{1}{2}} = f\left(t^n + \frac{k}{2}, y\left(t^n+\frac{k}{2}\right)\right)$. Pueden verse como un caso de métodos predictores-correctores, aunque su uso extendido hace que reciban una denominación propia.
Veamos, antes de pasar a considerar en detalle estas estrategias, por qué nos concentraremos solo en integrar $y$ a partir de su derivada primera $\dot y$. Spoiler: una EDO de orden $N$ puede reducirse a un sistema de $N$ EDOs de primer orden. Vale notar, sin embargo, que existen integradores temporales para EDOs de orden superior, aunque no los veremos en el curso. Cuando en prácticas posteriores veamos ecuaciones en derivadas parciales EDPs, trabajar con integradores para la primer derivada temporal tampoco resultará un limitante, como oportunamente mostraremos.
### **Reducción de dimensionalidad de una EDO (ecuación diferencial ordinaria)**
Si bien en la materia solo veremos integradores temporales para sistemas de EDOs de primer orden, esto no representa restricción alguna con respecto a los problemas que podremos resolver. Como probablemente hayan visto en materias anteriores, cualquier EDO de orden $N$ puede reescribirse como un sistema de $N$ EDOs de primer orden. La manera más simple de verlo es con un ejemplo. Dada el problema de valores iniciales de orden 2
\begin{equation*}
\dot{y} \ddot{y}\cos(t) + t^2\dot{y}^2 = y \ln(y), \qquad y(t_0) = y_0 \quad \wedge \quad \dot{y}(t_0) = \dot{y}_0,
\end{equation*}
podemos substituir $u_1 = y$, $u_2 = \dot y$, y de esta manera tenemos el sistema
\begin{align*}
\dot{u}_1 &= u_2, \qquad \qquad &&u_1(t_0) = y_0, \\
\dot{u}_2 &= \frac{u_1 \ln(u_1)- t^2 u_2^2}{u_2 \cos(t)}, \qquad \qquad &&u_2(t_0) = \dot{y}_0,
\end{align*}
que es un sistema de 2 ecuaciones diferenciales de primer orden acopladas expresable como
\begin{align*}
\dot{\mathbf{u}} &= \mathbf{F}(t, \mathbf{u}), \\ \mathbf{u}(t_0) &= \mathbf{u_0},
\end{align*}
con
\begin{equation*}
\mathbf{u} = \begin{pmatrix}
u_1 \\
u_2 \\
\end{pmatrix}, \qquad \mathbf{u}_0=\begin{pmatrix}
y_0\\
\dot{y}_0
\end{pmatrix}, \qquad \mathbf{F}=\begin{pmatrix}
u_2 \\
\dfrac{u_1 \ln(u_1) - t^2 u_2^2}{u_2 \cos(t)}
\end{pmatrix}.
\end{equation*}
Vemos entonces que, reescribiendo nuestro problema, los métodos que veremos a continuación resultan fácilmente aplicables a ecuaciones de órdenes superiores. Además, para alivianar la notación, presentaremos los esquemas de integración temporal considerando una única EDO de primer orden. Sin embargo, todos los métodos son fácilmente generalizables a sistemas de EDOs, como veremos al final con un ejemplo.
### **Métodos multipaso lineales**
Este tipo de métodos se llaman lineales ya que proponen una combinación lineal (de allí el nombre lineal) de $y^j$ y $f^j$ usando información correspondiente a varios pasos temporales (de allí el nombre multipaso). Esto quiere decir que, en general, podemos escribirlos para un cierto tiempo $t^n$ como
\begin{equation*}
\sum_{j=0}^s \alpha^j y^{n+j} = k \sum_{j=0}^s \beta^j f^{n+j}, \tag{1}
\end{equation*}
dando valores específicos de $\boldsymbol \alpha$ y $\boldsymbol \beta$ a realizaciones particulares de métodos multipaso lineales. Si bien a simple vista la elección de $\boldsymbol \alpha$ y $\boldsymbol \beta$ puede parecer trivial, i.e. ajustar los $2s+2$ grados de libertad de forma de obtener esquemas de orden $\mathcal{O}(k^{2s})$, si buscamos obtener integradores estables (veremos por qué son los únicos que nos interesan más adelante), el mayor orden de aproximación que podemos obtener con la ecuación $(1)$ es $\mathcal{O}(k^{s+2})$ (Teorema de Dahlquist). De esta manera, aparecen muchas propuestas de cómo ajustar los correspondientes $s$ grados de libertad.
**Una dificultad que presentan los métodos multipaso es que**, por construcción, **resulta necesario conocer $y^j$ y $f^j$ para múltiples instantes**, información con la que no contamos si solo disponemos de una condición inicial $y^0$. **En la práctica, una manera usual de resolver esto es inicializar la integración con otro método** (ej: Euler con alta resolución temporal — de forma de mantener el orden de aproximación—, Runge-Kutta, predictor-corrector, etc.). Una vez contamos con la cantidad de pasos suficientes comenzamos la aplicación del método multipaso deseado.
En la práctica de la materia veremos dos familias de métodos multipaso lineales: los métodos de **Adams-Bashforth** (explícitos) y los de **Adams-Moulton** (implícitos). Ambos métodos en su conjunto se conocen como métodos de _Adams_$^\dagger$ y están dados por la expresión
\begin{equation*}
y^{n+s} - y^{n+s-1} = k\sum_{j=0}^s \beta^j f^{n+j},
\end{equation*}
es decir, surgen de escoger $\alpha_0, \dots, \alpha_{s-2} = 0$, $\alpha_{s-1} = -1$ y $\alpha_s = 1$.
$^\dagger$: <font size=2>el mismo Adams que postuló por primera vez la existencia de Neptuno.
#### **Adams-Bashforth**
En el método de Adams-Bashforth se busca obtener un método que resulte explícito y por lo tanto se propone $\beta_s = 0$, es decir
\begin{equation*}
y^{n+s} - y^{n+s-1} = k\sum_{j=0}^{s-1} \beta^j f^{n+j} \tag{Adams-Bashforth}
\end{equation*}
De esta forma solo queda determinar los coeficientes $\beta^{n}, \dots, \beta^{n+s-1}$ y con ello tendremos ya una fórmula que permite determinar $y^{n+s}$ si conocemos $y^{n+s-1}$ y $f^{n}, \dots, f^{n+s-1}$. Para ello, naturalmente, buscaremos un método consistente y maximizar el orden de aproximación a $y^{n+s}$. Para esto consideremos $Y$ una solución exacta a la EDO en cuestión, reemplazando en la última ecuación tenemos
\begin{gather*}
\left[ Y + \dot{Y}k + \ddot{Y}\frac{k^2}{2} + \dots \right]_{t^{n+s-1}} - Y^{n+s-1} + \delta Y = k \left\{ \beta^{s-1} f^{n+s-1} + \beta^{s-2} \left[ f - \dot{f} k + \ddot{f} \frac{k^2}{2} + \dots \right]_{t^{n+s-1}} + \\
+ \ldots + \beta^0 \left[ f - \dot{f} (s-1)k + \ddot{f} \frac{(s-1)^2 k^2}{2} + \dots \right]_{t^{n+s-1}} \right\}
\end{gather*}
donde reemplazamos $Y^{n+s}$ por su expansión de Taylor alrededor de $Y^{n+s-1}$, $f^{n+s-j}$ por las correspondientes expansiones alrededor de $f^{n+s-1}$, y $\delta Y$ es el error de truncamiento. Regrupando obtenemos
\begin{equation}
\delta Y = k \dot Y^{n+s-1} \left[ \beta^{s-1} + \beta^{s-2} + \ldots + \beta^0 - 1\right] + \\
+ k^2 \ddot Y \left[ - \beta^{s-2} - \ldots - (s-1) \beta^0 - \frac{1}{2} \right] + \dots, \tag{Error de truncamiento}
\end{equation}
donde usamos $f^j = \dot Y^j$, $\dot f^j = \ddot Y^j$ y así sucesivamente.
Vemos que para que haya consistencia, debemos pedir $\sum_j \beta^j = 1$, este resultado es general y aplica a todos los métodos multipaso lineales. Los restantes $s$ coeficientes se obtienen intentando cancelar la mayor cantidad de potencias de $k$, de forma de conseguir un método con el mejor orden de aproximación posible para la cantidad de pasos en consideración.
##### **Deducción de coeficientes para un método de 3 pasos**
Para ver un ejemplo de esto, intentemos deducir el método de Adams-Bashforth que utiliza 3 pasos ($s=3$). Es decir, debemos determinar $\beta^0, \beta^1, \beta^2$ ($\beta^3 = 0$ por ser explícito, i.e. un método de Adams-Bashforth). Para ello, escribimos el caso partícular de la ecuación anterior
\begin{gather*}
\delta Y = k \dot Y \left[ \beta^2 + \beta^1 + \beta^0 - 1\right] + k^2 \ddot Y \left[ -\beta^1 - 2\beta^0 - \frac{1}{2} \right] + \\
+ k^3 \dddot Y \left[ \frac{\beta^1}{2} + \frac{2^2}{2} \beta^0 - \frac{1}{6} \right] + \mathcal{O}(k^4),
\end{gather*}
y pidiendo que se anulen la mayor cantidad de órdenes dominantes posibles (i.e., lo que acompaña a $k$, $k^2$, $k^3$) tenemos
\begin{equation*}
\begin{pmatrix}
1 & 1 & 1 \\
-2 & -1 & 0 \\
2 & \frac{1}{2} & 0
\end{pmatrix}
\begin{pmatrix}
\beta^0\\
\beta^1\\
\beta^2
\end{pmatrix}
= \begin{pmatrix}
1 \\
\frac{1}{2} \\
\frac{1}{6}
\end{pmatrix},
\end{equation*}
que tiene como solución
\begin{equation*}
\beta^0 = \frac{5}{12}, \qquad \beta^1 = - \frac{16}{12}, \qquad \beta^2 = \frac{23}{12},
\end{equation*}
obteniendo un método donde $\delta F = \mathcal{O}(k^4)$, o sea, un método de orden $3$ (recuerden que el orden global es un orden de magnitud menor que el error al cabo de un paso). En general, un método de Adams-Bashforth de $s$ pasos, posee un orden (global) $s$.
En una notación más operativa, podemos escribir el resultado hallado como
\begin{equation}
y^{n+1} = y^n + \frac{k}{12} \left( 23 f^n - 16 f^{n-1} + 5 f^{n-2} \right).
\end{equation}
Veamos en acción el método que acabamos de derivar. Para eso consideramos el problema de valor inicial
\begin{equation*}
\dot y (t) = -y(t) + \mathrm{sen}(t), \qquad y(0)=1/2,
\end{equation*}
para $0 \le t \le 10$, que tiene como solución analítica $y(t) = e^{-t} + [\mathrm{sen}(t) - \cos(t)]/2$. Nos valdremos de esta solución analítica para resolver el problema de la inicialización. Esta estrategia no será posible cuando desconozcamos la solución analítica, debiendo recurrir a inicializar la integración con otros métodos, como mencionamos anteriormente.
```python
import numpy as np
import matplotlib.pyplot as plt
dt = 2.5e-1 # Paso temporal
y0 = 1/2 # Condición inicial
tf = 10 # Tiempo final de integración
pasos = int(round(tf/dt)) # Cantidad de pasos
y = np.zeros( pasos+1 ) # Variable para ir guardando la integración
# Agrego los tres primeros pasos
y[0] = y0
y[1] = np.exp(-dt) + (np.sin(dt) - np.cos(dt) )/2
y[2] = np.exp(-2*dt) + (np.sin(2*dt)-np.cos(2*dt))/2
# Integro usando AB3
for n in range(2, pasos):
tn = n*dt # t^n
tn1 = (n-1)*dt # t^{n-1}
tn2 = (n-2)*dt # t^{n-2}
fn = -y[n] + np.sin(tn) # f^n
fn1 = -y[n-1] + np.sin(tn1) # f^{n-1}
fn2 = -y[n-2] + np.sin(tn2) # f^{n-2}
y[n+1] = y[n] + (23*fn - 16*fn1 + 5*fn2)*dt/12 # Integración explícita
# Grafico
t = np.arange(0, y.size)*dt
fig, ax = plt.subplots(1, 1, figsize=(8,4), constrained_layout=True)
ax.plot(t, y, label=r"AB3 ($k=2,5 \times 10^{-1})$", c="C1", lw=4)
ax.plot(t, np.exp(-t) + (np.sin(t)-np.cos(t))/2, "--k", label="Solución exacta")
ax.legend()
ax.set_title(r"Integración de $\dot{y} = -y + \mathrm{sen(t)}$, $y(0)=1/2$")
ax.set_xlabel("$t$")
ax.set_ylabel("$y$");
```
##### **Resumen de métodos de Adams-Bashforth**
Incluimos a continuación una tabla con los coeficientes que acompañan a cada evaluación de $f$ para los métodos de Adams-Bashforth hasta orden $4$.
| $\mathrm{Pasos}$ | $\mathrm{Orden}$ | $f^n$ | $f^{n-1}$ | $f^{n-2}$ | $f^{n-3}$ |
|------------------|------------------|----------------|----------------|----------------|---------------|
| $1$ | $1$ | $1$ | $0$ | $0$ | $0$ |
| $2$ | $2$ |$\frac{3}{2}$ | $-\frac{1}{2}$ | $0$ | $0$ |
| $3$ | $3$ |$\frac{23}{12}$ |$-\frac{16}{12}$| $\frac{5}{12}$ | $0$ |
| $4$ | $4$ |$\frac{55}{24}$ |$-\frac{59}{24}$| $\frac{37}{24}$|$-\frac{9}{24}$|
Noten que para $s=1$ recuperamos el método de Euler hacia adelante.
#### **Adams-Moulton**
En los métodos de Adams-Moulton se sigue la misma lógica que la empleada en Adams-Bashforth, pero se relaja la restricción $\beta^s = 0$, resultando por tanto en un método implícito. La forma de obtener los coeficientes apropiados es completamente análoga, obteniendo
la siguiente relación para el error de truncamiento
\begin{equation}
\delta Y = k \dot Y^{n+s-1} \left[ \beta^s + \beta^{s-1} + \beta^{s-2} + \ldots + \beta^0 - 1\right] + k^2 \ddot Y \left[ \beta^s - \beta^{s-2} - \ldots - (s-1) \beta^0 - \frac{1}{2} \right] + \dots,
\end{equation}
que naturalmente tiene un grado de libertad mayor a la expresión hallada para Adams-Bashforth, dado por $\beta^s$.
##### **Deducción de coeficientes para un método de 3 pasos**
De manera análoga a lo que hicimos para el caso explícito, calculemos los coeficientes para el método implícito (Adams-Moulton) de 3 pasos. Esto implica hallar $\beta^3$, $\beta^2$, $\beta^1$ y $\beta^0$. Para ello escribamos el error de truncamiento hasta un orden apropiado
\begin{gather*}
\delta Y = k \dot Y \left[ \beta^3 + \beta^2 + \beta^1 + \beta^0 - 1\right] + k^2 \ddot Y \left[ \beta^3 -\beta^1 - 2\beta^0 - \frac{1}{2} \right] + \\
+ k^3 \ddot Y \left[ \frac{\beta^3}{2} + \frac{\beta^1}{2} + \frac{2^2}{2} \beta^0 - \frac{1}{6} \right] + + k^4 \ddot Y \left[ \frac{\beta^3}{6} - \frac{\beta^1}{6} - \frac{2^3}{6} \beta^0 - \frac{1}{24} \right] + \mathcal{O}(k^5).
\end{gather*}
Pedimos nuevamente que se anulen la mayor cantidad de órdenes dominantes posibles, es decir, lo que acompaña a $k$, $k^2$, $k^3$ y $k^4$. Noten que como tenemos un coeficiente más, podemos anular un término más con respecto a lo planteado para Adams-Bashforth. Tenemos entonces
\begin{equation*}
\begin{pmatrix}
1 & 1 & 1 & 1 \\
-2 & -1 & 0 & 1 \\
2 & \frac{1}{2} & 0 & \frac{1}{2}\\
-\frac{4}{3} & -\frac{1}{6} & 0 & \frac{1}{6}
\end{pmatrix}
\begin{pmatrix}
\beta^0\\
\beta^1\\
\beta^2\\
\beta^3
\end{pmatrix}
= \begin{pmatrix}
1 \\
\frac{1}{2} \\
\frac{1}{6} \\
\frac{1}{24} \\
\end{pmatrix},
\end{equation*}
que tiene como solución
\begin{equation*}
\beta^0 = \frac{1}{24}, \qquad \beta^1 = - \frac{5}{24}, \qquad \beta^2 = \frac{19}{24}, \qquad \beta^3 = \frac{9}{24}
\end{equation*}
obteniendo un método $5$. En general, un método de Adams-Moulton de $s$ pasos, posee un orden (global) $s+1$.
En una notación más operativa, podemos escribir el resultado hallado como
\begin{equation}
y^{n+1} = y^n + \frac{k}{24} \left( 9 f^{n+1} + 19 f^n - 5 f^{n-1} + f^{n-2} \right),
\end{equation}
que es una expresión implícita para $y^{n+1}$.
Al igual que hicimos antes, consideremos el problema de valores iniciales
\begin{equation*}
\dot y (t) = -y(t) + \mathrm{sen}(t), \qquad \qquad y(0) = 1/2,
\end{equation*}
para $0 \le t \le 10$. Al igual que antes, haremos uso de la solución analítica $y(t) = e^{-t} + \left[\mathrm{sen}(t) - \cos(t) \right]/2$ para inicializar la integración. Además, dado que esta ecuación es lineal, el hecho de que el método sea implícito no supondrá dificultad, ya que puede realizarse el despeje de la siguiente manera
\begin{align*}
y^{n+1} &= y^n + \frac{k}{24} \left[9(-y^{n+1} + \mathrm{sen}(t^{n+1}) + 19f^n - 5f^{n-1} + f^{n-2} \right]\\
&= \left[y^n + \frac{k}{24} \left(9\ \mathrm{sen}(t^{n+1}) + 19f^n - 5f^{n-1} + f^{n-2} \right) \right] \frac{1}{1 + \dfrac{9}{24}k}.
\end{align*}
Esto no generalizará a problemas no lineales, donde habrá que usar algún esquema iterativo para hallar $y^{n+1}$.
```python
import numpy as np
import matplotlib.pyplot as plt
dt = 2.5e-1 # Paso temporal
y0 = 1/2 # Condición inicial
tf = 10 # Tiempo final de integración
pasos = int(round(tf/dt)) # Cantidad de pasos
y = np.zeros( pasos+1 ) # Variable para ir guardando la integración
# Condición inicial y primeras iteraciones (las saco de la solución analítica)
y[0] = y0
y[1] = np.exp(-dt) + (np.sin(dt) - np.cos(dt) )/2
y[2] = np.exp(-2*dt) + (np.sin(2*dt)-np.cos(2*dt))/2
# Integro usando AM4
for n in range(2, pasos):
ts = (n+1)*dt # t^{n+1} (tiempo siguiente)
tn = n*dt # t^n
tn1 = (n-1)*dt # t^{n-1}
tn2 = (n-2)*dt # t^{n-2}
fn = -y[n] + np.sin(tn) # f^n
fn1 = -y[n-1] + np.sin(tn1) # f^{n-1}
fn2 = -y[n-2] + np.sin(tn2) # f^{n-2}
y[n+1] = (y[n] + (9*np.sin(ts) + 19*fn - 5*fn1 + fn2)*dt/24)/(1+9/24*dt)
# Grafico
t = np.arange(0, y.size)*dt
fig, ax = plt.subplots(1, 1, figsize=(8,4), constrained_layout=True)
ax.plot(t, y, label=r"AM3 ($k=2,5 \times 10^{-1})$", c="C1", lw=4)
ax.plot(t, np.exp(-t) + (np.sin(t)-np.cos(t))/2, "--k", label="Solución exacta")
ax.legend()
ax.set_title(r"Integración de $\dot{y} = -y + \mathrm{sen(t)}$, $y(0)=1/2$")
ax.set_xlabel("$t$")
ax.set_ylabel("$y$");
```
##### **Resumen de métodos de Adams-Moulton**
| $\mathrm{Pasos}$ | $\mathrm{Orden}$ | $f^{n+1}$ | $f^n$ | $f^{n-1}$ | $f^{n-2}$ | $f^{n-3}$ |
|------------------|------------------|-----------------|-----------------|------------------|-----------------|-----------------|
| $1$ | $1$ | $1$ | $0$ | $0$ | $0$ | $0$ |
| $1$ | $2$ |$\frac{1}{2}$ |$\frac{1}{2}$ | $0$ | $0$ | $0$ |
| $2$ | $3$ |$\frac{5}{12}$ |$\frac{8}{12}$ |$-\frac{1}{12}$ | $0$ | $0$ |
| $3$ | $4$ |$\frac{9}{24}$ |$\frac{19}{24}$ |$-\frac{5}{24}$ |$\frac{1}{24}$ | $0$ |
| $4$ | $5$ |$\frac{251}{720}$|$\frac{646}{720}$|$-\frac{264}{720}$|$\frac{106}{720}$|$-\frac{19}{720}$|
Vemos que el caso de $1$ paso resulta degenerado, teniendo dos esquemas posibles. El de orden $1$ no es ni más ni ménos que el método de Euler atrasado, mientras que el esquema de segundo orden se conoce como regla trapezoidal.
#### **Otros métodos multipaso lineales**
Sin demostraciones de por medio, podemos mencionar otras familias populares de métodos multipaso lineales. Una opción por ejemplo es buscar operadores temporales centrados, que resulten de la forma
\begin{equation*}
y^{n+s}-y^{n+s-2} = \sum_{j=0}^s \beta^j f^{n+j}.
\end{equation*}
Los métodos explícitos ($\beta^s=0$) asociados a esta elección reciben el nombre de _métodos de Nyström_, mientras que a aquellos implícitos se los conoce como métodos de _Milne-Simpson_, y tienen orden $s$ y $s+1$, respectivamente. Un método de Nyström que vieron en las clases teóricas es aquel conocido como _**Salto de rana**_ (o _leapfrog_ en inglés), dado por
\begin{equation*}
y^{n+1} = y^{n-1} + 2k f^n,
\end{equation*}
que como vieron en las clases teóricas presenta un modo computacional (es decir, un modo _no-físico_).
Adicionalmente, y de manera opuesta a lo que proponen los métodos de Adams, pueden buscarse métodos con $\beta^0 = \beta^1 = \ldots = \beta^{s-1} = 0$, donde se ajusta $\beta^s$ y $\boldsymbol \alpha$. Estos esquemas reciben el nombre de **_fórmulas de diferenciación hacia atrás_** (o _backward differentiation formulas_ — BDF en inglés—).
### **Métodos predictores-correctores**
Como mencionamos anteriormente, los métodos predictores-correctores consisten en usar dos integradores temporales y combinarlos. La técnica más común es la que combina un método explícito con uno implícito, de la siguiente manera
1. **<u>Paso predictor ($P$)</u>**: Usamos un método explícito para obtener una estimación de $y^{n+1}$, que llamamos $y^{*n+1}$ por ejemplo, utilizando Euler adelantado tendremos:
\begin{equation*}
y^{*n+1} = y^n + kf^n.
\end{equation*}
2. **<u>Paso evaluador ($E$)</u>**: A partir de la estimación para $y^{n+1}$ obtenida en el paso anterior, calculamos $f(t^{n+1}, y^{*n+1})$, es decir, $f^{*n+1}$.
3. **<u> Paso corrector ($C$)</u>**: Llamando $f^{*n+1}$ a $f(t^{n+1}, y^{*n+1})$, resolvemos para un esquema implícito. Utilizando ahora como ejemplo Euler atrasado, se obtiene:
\begin{equation}
y^{n+1} = y^n + kf^{*n+1} = y^n + kf(t^{n+1}, y^n + kf^n). \tag{Matsuno}
\end{equation}
El método que acabamos de hallar es conocido como **_método de Matsuno_** y es $\mathcal{O}(k)$. Análogamente podríamos haber utilizado el esquema trapezoidal (Adams-Moulton de 2do orden) en lugar de Euler atrasado y tendríamos
\begin{equation}
y^{n+1} = y^n + \frac{k}{2} \left( f^{*n+1} + f^n \right) = y^n + \frac{k}{2}\left[f(t^{n+1}, y^n + kf^n) + f(t^n, y^n) \right], \tag{Heun}
\end{equation}
conocido como **_método de Heun_**, cuyo orden global es $\mathcal{O}(k^2)$.
Noten que en ambos casos acabamos con métodos explícitos, derivados de la combinación de uno explícito y uno implícito.
Vale mencionar que es posible realizar este proceso de manera iterativa, utilizando las ecuaciones de $(\mathrm{Matsuno})$ y $(\mathrm{Heun})$ como nuevos candidatos $y^{*n+1}$, obteniendo un nuevo valor para $f^{*n+1}$ y volviendo a aplicar el método respectivo para obtener $y^{n+1}$. Si realizamos la operación de corrección $c$ veces, suele notarse al método resultante como $P(EC)^c$.
Si bien los métodos predictores-correctores pueden estudiarse más formalmente, no lo haremos en el curso. Solo mencionaremos que otra combinación común para generar métodos predictores-correctores es la conjunción de métodos de Adams-Bashforth (predictor) y Adams-Moulton (corrector), que reciben el nombre de _métodos Adams-Bashforth-Moulton_.
### **Métodos de Runge-Kutta**
A diferencia de los métodos multipaso, que utilizan valores de $y$ y $f$ calculados en pasos anteriores (o para el paso posterior si son implícitos), la idea de los métodos de Runge-Kutta es utilizar múltiples evaluaciones de $f$ entre $t^n$ y $t^{n+1}$ (denominadas _etapas_) para generar una aproximación de mayor orden.
A priori, esto podría parecer ineficiente ya que en los métodos multipaso usamos información ya conocida, mientras que en Runge-Kutta estaremos utilizando nuevas evaluaciones que emplearemos solo para el paso en cuestión. Sin embargo, los métodos de Runge-Kutta precisan menos almacenamiento en memoria. Además presentan distintas características de estabilidad con respecto a los métodos multipaso, por lo que pueden ser más apropiados para algunas EDOs.
Los métodos de Runge-Kutta se clasifican de acuerdo a la cantidad de etapas (así como los multipaso a la cantidad de pasos utilizados). Por ejemplo, para un método de dos etapas tendremos
\begin{equation*}
y^{n+1} = y^{n} + k \left( \alpha_1 f^n_1 + \alpha_2 f^n_2 \right),
\end{equation*}
donde $f^n_1=f(t^n_1, y^n_1)$ y $f^n_2=f(t^n_2, y^n_2)$ son evaluaciones de $f$ entre $t^n$ y $t^{n+1}$, es decir, $t^n \le t^n_1 \le t^n_2 \le t^{n+1}$. La misma idea aplica para métodos con más etapas. _**Vale resaltar que veremos solo los métodos de Runge-Kutta explícitos ya que son los más empleados**_.
En general, y a diferencia de los métodos multipaso, en el caso de Runge-Kutta no alcanza con definir una cantidad de etapas $s$ y buscar maximizar el orden de aproximación para obtener un método único. Sin embargo, si nos restringimos a métodos cuyas evaluaciones de $f$ están equiespaciadas (aunque en algunos casos repetidas), obtenemos los siguientes esquemas:
* _**Segundo orden**_:
\begin{align}
y^{n+1} &= y^n + R_2, \tag{Midpoint} \\
R_1 &= kf\left( t^n, y^n \right), \\
R_2 &= kf\left( t^n + \frac{k}{2}, y^n + \frac{R_1}{2}\right)
\end{align}
<br>
\begin{align}
y^{n+1} &= y^n + \frac{1}{2} \left( R_1 + R_2 \right), \tag{Heun} \\
R_1 &= kf\left( t^n, y^n \right), \\
R_2 &= kf\left( t^n + k, y^n + R_1\right)
\end{align}
* _**Tercer orden**_:
\begin{align}
y^{n+1} &= y^n + \frac{1}{4}\left( R_1 + 3R_3 \right), \tag{RK3} \\
R_1 &= kf\left( t^n, y^n \right), \\
R_2 &= kf\left( t^n + \frac{k}{3}, y^n + \frac{R_1}{3}\right), \\
R_3 &= kf\left( t^n + 2\frac{k}{3}, y^n + 2\frac{R_2}{3} \right).
\end{align}
* _**Cuarto orden**_:
\begin{align}
y^{n+1} &= y^n + \frac{1}{6}\left( R_1 + 2R_2 + 2R_3 + R_4\right), \tag{RK4} \\
R_1 &= kf\left( t^n, y^n \right), \\
R_2 &= kf\left( t^n + \frac{k}{2}, y^n + \frac{R_1}{2}\right), \\
R_3 &= kf\left( t^n + \frac{k}{2}, y^n + \frac{R_2}{2} \right), \\
R_4 &= kf\left( t^n + k, y^n + R_3 \right).
\end{align}
Vemos que tenemos dos métodos de segundo orden. El primero de ellos llamado _**Euler mejorado**_ o _**Punto medio**_ (_midpoint_ en inglés), mientras que el otro es el ya familiar método de Heun. Esto último muestra, como mencionamos anteriormente, que aunque no es del todo usual, es posible considerar a los métodos de Runge-Kutta como esquemas predictores-correctores, solo que operando sobre instantes intermedios (i.e. etapas) en lugar de entre pasos.
Adicionalmente, vemos que nuevamente recuperamos el método de Euler adelantado como el caso degenerado de un método Runge-Kutta de 1 etapa.
Al igual que hicimos antes, para ilustrar el uso de los métodos de Runge-Kutta, integremos el problema de valores iniciales
\begin{equation*}
\dot y(t) = -y(t) + \mathrm{sen}(t), \qquad \qquad y(0) = 1/2,
\end{equation*}
para $0\le t\le 10$ usando un método de Runge-Kutta de orden 3.
```python
import numpy as np
dt = 2.5e-1 # Paso temporal
y0 = 1/2 # Condición inicial
tf = 10 # Tiempo final de integración
pasos = int(round(tf/dt)) # Cantidad de pasos
# Variable para ir guardando la integración
y = np.zeros( pasos+1 )
y[0] = y0
# Integro usando RK3
for n in range(0, pasos):
t1 = n*dt
R1 = dt*(-y[n] + np.sin(t1)) # Primer etapa
t2 = t1 + dt/3
R2 = dt*(-(y[n] + R1/3) + np.sin(t2)) # Segunda etapa
t3 = t2 + dt/3
R3 = dt*(-(y[n] + 2*R2/3) + np.sin(t3)) # Última etapa
y[n+1] = y[n] + (R1 + 3*R3)/4 # Combino las etapas
# Grafico
t = np.arange(0, y.size)*dt
fig, ax = plt.subplots(1, 1, figsize=(8,4), constrained_layout=True)
ax.plot(t, y, label=r"RK3 ($k=2,5 \times 10^{-1})$", c="C1", lw=4)
ax.plot(t, np.exp(-t) + (np.sin(t)-np.cos(t))/2, "--k", label="Solución exacta")
ax.legend()
ax.set_title(r"Integración de $\dot{y} = -y + \mathrm{sen(t)}$, $y(0)=1/2$")
ax.set_xlabel("$t$")
ax.set_ylabel("$y$");
```
### **Aplicación a una EDO de orden superior**
Consideremos ahora el siguiente problema de valores iniciales
\begin{equation*}
\ddot{y} + \frac{2}{t^2+1} (y - t\dot y) = \bigg( \cos(t) + t\mathrm{sen}(t) \bigg)\frac{2}{t^2+1} - \cos(t), \qquad \qquad y(0) = 2, \qquad \dot{y}(0) = 0.
\end{equation*}
Este problema tiene como solución $y(t) = 1-t^2 + \cos(t)$.
Notemos que llamando $u_0 = y$, $u_1 = \dot{y}$, podemos reducir esta EDO de segundo orden a un sistema de EDOs de primer orden de la siguiente manera
\begin{align*}
\dot{u}_0 &= u_1, \qquad \qquad & u_0(0) = 2,\\
\dot{u}_1 &= -\frac{2}{t^2 + 1} (u_0 - tu_1) + \bigg( \cos(t) + t\mathrm{sen}(t) \bigg)\frac{2}{t^2+1} - \cos(t), & u_1(0) = 0.
\end{align*}
Resolvamos este sistema usando un método de Runge-Kutta de orden 3 (RK3).
```python
import numpy as np
import matplotlib.pyplot as plt
dt = 1e-2 # Paso temporal
tf = 2 # Tiempo final de integración
pasos = int(round(tf/dt)) # Cantidad de pasos
u = np.zeros( (pasos+1, 2) ) # Arreglo para u_0 (u[:,0]) y u_1 (u[:,1])
# Condiciones iniciales
u[0,0] = 2
u[0,1] = 0
# Variables para guardar los pasos intermedios de Runge-Kutta
R1 = np.zeros( 2 )
R2 = np.zeros( 2 )
R3 = np.zeros( 2 )
# Comienzo a integrar Runge-Kutta de 3er orden (RK3)
for n in range(0, pasos):
# Primera etapa
t1 = n*dt
R1[0] = dt*u[n,1]
R1[1] = -2*dt/(t1**2+1)*(u[n,0] - t1*u[n,1]) \
+ dt*((np.cos(t1) + t1*np.sin(t1))*2/(t1**2+1) - np.cos(t1) )
# Segunda etapa
t2 = t1 + dt/3
R2[0] = dt*( u[n,1] + R1[1]/3 )
R2[1] = -2*dt/(t2**2+1)*( u[n,0]+ R1[0]/3 - t2*(u[n,1]+R1[1]/3) ) \
+ dt*((np.cos(t2) + t2*np.sin(t2))*2/(t2**2+1) - np.cos(t2) )
# Tercera etapa
t3 = t1 + 2*dt/3
R3[0] = dt*( u[n,1]+2*R2[1]/3 )
R3[1] = -2*dt/(t3**2+1)*( u[n,0]+ 2*R2[0]/3 - t3*(u[n,1]+2*R2[1]/3)) \
+ dt*((np.cos(t3) + t3*np.sin(t3))*2/(t3**2+1) - np.cos(t3) )
# Combino etapas
u[n+1] = u[n] + (R1 + 3*R3)*1/4
# Grafico
t = np.arange(u.shape[0])*dt
fig, ax = plt.subplots(1, 1, figsize=(8,4), constrained_layout=True)
ax.plot(t, u[:,0], label="Numérica", lw=6, c="C1")
ax.plot(t, (1-t**2) + np.cos(t), "--k", label="Exacta")
ax.legend()
ax.set_title(r"Integración de $\ddot y + \frac{2}{t^2+1}(y-t\dot{y})= " +
r"(\cos(t)+t\mathrm{sen}(t))\frac{2}{t^2+1}-\cos(t)$"+
"\n con $y(0) = 2$ y $\ddot{y} = 0$")
ax.set_xlabel("$t$")
ax.set_ylabel("$y$");
```
## **Estabilidad**
### **Regiones de estabilidad**
Una cuestión muy importante que evitamos discutir hasta ahora es la convergencia de la solución numérica a la solución real.
Mencionamos en la práctica anterior que un requisito para que nuestro esquema sea convergente era que el método sea consistente. Esto quiere decir, que en el límite en que la grilla se vuelve infinitamente densa, el operador discreto aplicado a la solución real $Y$ debe converger a la aplicación del operador continuo (lo vimos de esta manera en la práctica anterior, hay otras definiciones equivalentes).
Sin embargo, a la hora de realizar la integración temporal, esto no garantiza que la solución numérica sea convergente. Si inicialmente nuestra solución numérica contiene un pequeño error $\epsilon$, es decir
$y^0 = Y^0 + \epsilon$, ¿qué sucede con $\epsilon$ a medida que integramos en el tiempo?
Para responder esta pregunta es que analizamos la estabilidad. Solo veremos aquí algunas consideraciones generales sobre la estabilidad de distintos esquemas y no todas las maneras de estudiar la estabilidad de un integrador temporal, que es un tópico amplio.
Una manera de analizar la estabilidad es pregntarnos si el esquema temporal propuesto se mantiene acotado al aplicarlo a la ecuación
\begin{equation*}
\dot y = \lambda y,
\end{equation*}
donde $\lambda \in \mathbb{C}$. Noten la similitud con el análisis de Fourier para los errores que vieron en teóricas (estamos mirando el error de amplitud en vez del error de fase). Para $\mathrm{Re}(\lambda) < 0$, un esquema temporal apropiado debería devolver una solución acotada, sin embargo, veremos que esto no siempre es así.
En general, para un dado esquema temporal, habrá valores de $\Delta t$ para los cuales la solución discreta de esta ecuación se mantenga acotada y otros para los cuales no. Estas serán las _**regiones de estabilidad**_ e inestabilidad del método, respectivamente.
### **Ejemplos de regiones de estabilidad**
#### **Euler adelantado**
Como ejemplo podemos estudiar el método de Euler hacia adelante, y el análisis de estabilidad resulta
\begin{equation*}
y^{n+1} = y^n + k \lambda y^n = (1+k\lambda)y^n.
\end{equation*}
Si ahora lo escribimos en términos de la condición inicial vemos que
\begin{equation*}
y^n = (1+\bar{\lambda})^n y^0
\end{equation*}
con $\bar \lambda = k \lambda$, que se mantiene acotado solo sí $|1 + \bar{\lambda}| < 1$. Utilizamos la variable $\bar \lambda$ ya que para cualquier esquema acabaremos con un factor $\bar{\lambda} = k \lambda$ al discretizar la ecuación.
Obtuvimos entonces que la _**región de estabilidad**_ para el método de Euler adelantado es la región $|1+k\lambda| < 1$ (i.e. un círculo de radio $1$ en el plano complejo centrado en $z=-1$).
#### **Trapezoidal**
Si consideramos ahora el método trapezoidal tenemos
\begin{equation*}
y^{n+1} = y^n + \frac{\bar{\lambda}}{2} (y^n + y^{n+1}) \qquad \Longrightarrow \qquad y^{n+1}\left(1 - \frac{\bar \lambda}{2} \right) = y^n \left(1 + \frac{\bar \lambda}{2} \right)
\end{equation*}
y por tanto
\begin{equation*}
y^n = y_0 \left[ \dfrac{1 + \dfrac{\bar \lambda}{2}}{1 - \dfrac{\bar \lambda}{2}}\right]^n.
\end{equation*}
La región de estabilidad estará dada entonces por $|1+\bar \lambda/2| < |1 - \bar \lambda/2|$, que se satisface siempre que $\mathrm{Re}(\bar \lambda) < 0$, es decir, el método trapezoidal es _**incondicionalmente estable**_ (es estable para todo $k$).
### **Rigidez de una EDO**
La sección previa refirió solo a la estabilidad de EDOs lineales. Sin embargo, para el caso no-lineal, dada una solución a la ecuación a tiempo $t^*$, $y^*$ (que obtuvimos, por ejemplo, numéricamente), podemos linealizar la ecuación alrededor de ($t^*, y^*)$ y hacer un estudio de estabilidad lineal en un entorno de $(t^*$, $y^*)$. Esta estrategia, que no utilizaremos en la materia, funciona aceptablemente en una gran cantidad de casos.
Luego de esta discusión, queda de manifiesto que **la estabilidad al integrar una EDO depende del esquema temporal escogido y de la propia EDO**. De esta observación surge el calificativo de **rígidas** (_stiff_ en inglés) para ecuaciónes diferenciales que requieren pasos temporales muy pequeños para mantener estable su integración. Una formulación alternativa de este concepto, es considerar rígidas a aquellas ecuaciones diferenciales que resultan extremadamente difíciles de integrar con métodos explícitos, que como veremos más adelante, suelen tener regiones de estabilidad más reducidas.
Aún cuando generalmente refiere ecuaciones diferenciales, y así lo usaremos en la materia, el calificativo de rígido es más apropiado para referirse al problema de valores iniciales en su conjunto. Por ejemplo, una EDO puede ser más o menos estable para integrar en función de su condición inicial (i.e. de la solución particular), o del intervalo donde estamos buscando una solución.
Para ver los conceptos de rigidez y estabilidad, consideremos el ejemplo
\begin{equation*}
\dot y(t) = -100\left[y(t) - \cos(t) \right] - \mathrm{sen}(t), \qquad \qquad y(0) = 1,
\end{equation*}
para $0\le t \le 1$. Este problema de valores iniciales tiene como solución $y(t) = \cos(t)$, y se anula el término entre corchetes.
Veamos que sucede numéricamente
```python
import numpy as np
import matplotlib.pyplot as plt
dt = 1e-1 # Paso temporal
y0 = 1 # Condición inicial
tf = 1 # Tiempo final de integración
pasos = int(round(tf/dt)) # Cantidad de pasos
# Inicializo y para los dos integradores que voy a usar
y_ex1 = np.zeros( pasos+1 )
y_im = np.zeros( pasos+1 )
# Escribo la condición inicial
y_ex1[0] = y0
y_im[0] = y0
# Integro con Euler adelantado (explícito) y atrasado (implícito)
for n in range(0, pasos):
t_ex = n*dt # Tiempo actual
t_im = (n+1)*dt # Tiempo siguiente
# Euler adelantado
y_ex1[n+1] = y_ex1[n] + dt*(-100*(y_ex1[n]-np.cos(t_ex)) - np.sin(t_ex))
# Euler atrasado, como la EDO es lineal, puedo despejar y^{n+1}
y_im[n+1] = (y_im[n] + dt*(100*np.cos(t_im) - np.sin(t_im))) / (1+100*dt)
# Pruebo ahora reducir el paso en un factor 10
dt2 = dt/10
pasos2 = int(round(tf/dt2))
y_ex2 = np.zeros( pasos2+1 )
y_ex2[0] = y0
for n in range(0, pasos2):
t_ex = n*dt2
# Euler adelantado
y_ex2[n+1] = y_ex2[n] + dt2*(-100*(y_ex2[n]-np.cos(t_ex)) - np.sin(t_ex))
# Grafico
t1 = np.arange(0, y_ex1.size)*dt
t2 = np.arange(0, y_ex2.size)*dt2
fig, ax = plt.subplots(1, figsize=(8,4), constrained_layout=True)
ax.plot(t1, y_ex1, label=r"Euler adelantado ($k=1\times10^{-1})$")
ax.plot(t2, y_ex2, label=r"Euler adelantado ($k=1\times10^{-2})$", lw=16,
alpha=.5)
ax.plot(t1, y_im, label=r"Euler atrasado ($k=1\times10^{-1})$", lw=8, alpha=0.8)
ax.plot(t1, np.cos(t1), '--k', label="Solución exacta", lw=2)
ax.legend(frameon=False, loc="upper left")
ax.set_title("Integración de $\dot y = -100(y-\cos(t)) - \mathrm{sen}(t)$," +
" y(0)=1")
ax.set_xlabel("$t$")
ax.set_ylabel("$y$")
ax.set_ylim(0, 2);
```
Vemos que el método explícito diverge para $k=1\times 10^{-1}$, mientras que el implícito se mantiene acotado y además converge a la solución exacta. Esto puede verse a partir de lo que vimos en la sección previa, ya que linealizando la ecuación tenemos $\lambda = -100$ y $\bar \lambda = k \lambda = -10$ está fuera de la región de convergencia del método de Euler adelantado.
Alternativamente, podemos comprender este fenómeno de la siguiente manera. Reparemos en que para una condición inicial ligeramente diferente el término entre corchetes resulta dominante en los instantes iniciales, convergiendo en una escala temporal de $0,01$ ($1/100$) a una solución muy similar a $\cos(t)$. Sin embargo, el método explícito no logra capturar esta escala temporal y acaba por diverger.
Es por ello que un formulación igualmente válida de rigidez, pero más interesante para este curso será la siguiente:
> **Una EDO rígida es aquella que involucra escalas muy diversas.**
Vean que, en nuestro caso, el problema que nos presenta la EDO propuesta es que nos interesa el comportamiento para tiempos $\mathcal{O}(1)$ (el orden del período de $\cos(t)$); sin embargo, para alcanzar una solución aceptable de este comportamiento de baja frecuencia, debemos resolver también las escalas $\mathcal{O}(0,01)$. Pensado físicamente, ese es el motivo por el cual nuestro integrador explícito presenta dificultades, requiriendo 10 veces más pasos para converger a la solución correcta.
Espero que con esto también resulte más clara la importancia de los métodos implícitos que, para el caso no-lineal, pueden requerir un trabajo considerablemente mayor por iteración, con respecto a los explícitos. Sin embargo, generalmente se mantienen estables tomando pasos temporales más grandes. Veremos en el resto de la materia que la elección de un esquema temporal explícito o implícito será problema-dependiente.
### **Resumen de regiones de estabilidad**
Les dejamos a continuación diagramas que esquematizan las regiones de estabilidad de algunas familias de esquemas temporales. En todos los casos los diagramas se hallan en unidades de $\bar \lambda$ (noten las diferencias en los límites de cada panel).
<center></img></center>
Fuera de esta gráfica quedó el método _Salto de rana_, que solo es estable en la región $\mathrm{Re}(\bar \lambda) = 0 \ \ \wedge \ \ |\mathrm{Im}(\bar \lambda)| \le 1$, siendo apropiado solo para problemas de oscilaciones sin amortiguamiento.
Para los métodos explícitos (Adams-Bashforth y Runge-Kutta) y para Adams-Moulton de orden 3 en adelante, las regiones de estabilidad son las regiones interiores a cada curva (sombreadas en los gráficos). Para Adams-Moulton de orden 1 la región de estabilidad es la exterior a la circunsferencia, mientras que para orden 2 es todo el semiplano $\mathrm{Re}(\bar \lambda) \le 0$. En el caso de diferenciación hacia atrás, las regiones de estabilidad son exteriores a cada una de las curvas.
Recuerden que coinciden los métodos de Runge-Kutta y Adams-Bashforth de orden 1, siendo ambos el método de Euler adelantado. Por su parte, los esquemas de Adams-Moulton de orden 1 y 2 representan a Euler atrasado y a la regla trapezoidal, respectivamente.
Vale notar también que en los métodos multipaso las regiones de estabilidad van disminuyendo a medida que aumenta el orden, mientras que con Runge-Kutta (hasta orden 4) pasa lo contrario.
### **Relación entre convergencia, consistencia y estabilidad**
La importancia de la estabilidad queda resumida en el teorema de Dahlquist$^\dagger$, que enuncia que para un esquema numérico que aproxime a una EDO
\begin{equation}
\mathrm{Convergente} \qquad \Longleftrightarrow \qquad \mathrm{Consistente} \wedge \mathrm{Estable} \tag{T. de Dahlquist}
\end{equation}
## **Ejemplo integrador: Ecuación de Lane-Emden**
Consideremos ahora el siguiente problema de valores iniciales no lineal
\begin{equation*}
t^2 \ddot{y} + 2t \dot{y} + t^2 y^\gamma = 0, \qquad \qquad y(0) = 1, \qquad \dot{y}(0) = 0.
\end{equation*}
Esta ecuación es conocida como ecuación de Lane-Emden y describe (adimensionalmente) la presión y la densidad en función de la distancia para una esfera de fluido autogravitante en equilibrio hidrostático, bajo la aproximación politrópica.
Llamando $u_0 = y$, $u_1 = \dot{y}$, podemos reducir esta EDO de segundo orden a un sistema de EDOs de primer orden de la siguiente manera
\begin{align*}
\dot{u}_0 &= u_1, \qquad \qquad & u_0(0) = 1,\\
\dot{u}_1 &= - u_0^\gamma - \frac{2u_1}{t} & u_1(0) = 0.
\end{align*}
Noten que para integrar vamos a tener una singularidad al calcular $\dot{u_1}$ en $t=0$. Puede mostrarse que, para todo valor de $\gamma$, las soluciones a la ecuación de Lane-Emden verifican $\ddot y (0) = - y(0)^\gamma/3$. Vamos a aprovechar este resultado para conservar el orden del integrador. Vale remarcar que si no repararamos en este hecho, podríamos integrar la ecuación de cualquier modo, pero los integradores de alto orden temporal verían su orden de precisión disminuido.
Vamos a resolver este sistema utilizando un método de Adams-Moulton de 3er orden (i.e., de 2 pasos). Para ello, tendremos que conseguir de alguna manera $\mathbf{u}^1$ (la solución para el primer paso temporal) y a partir de allí podremos aplicar
\begin{equation*}
\mathbf u^{n+1} = \mathbf u^n + \frac{k}{12} \left( 5 \mathbf f^{n+1} + 8 \mathbf f^n - \mathbf f^{n-1} \right).
\end{equation*}
Esta ecuación implícita para $\mathbf u^{n+1}$ (aparece en $\mathbf f^{n+1}$ y explícitamente en el miembro izquierdo), la resolveremos mediante un método iterativo de Newton-Krylov (en particular, [LGMRES](https://epubs.siam.org/doi/10.1137/S0895479803422014)) que buscará las raíces de la función
\begin{equation*}
\mathbf g(\mathbf x) = \mathbf x - \mathbf u^n - \frac{k}{12} \left( 5 \mathbf f(\mathbf x) + 8 \mathbf f^n - \mathbf f^{n-1} \right),
\end{equation*}
donde $\mathbf f(t^{n+1}, \mathbf{x})$ es el sistema de Lane-Emden evaluado en $(t^{n+1}, \mathbf x)$. Vale remarcar que no es necesario que como funcionan los algoritmos de Newton-Krylov, solo precisan saber que permiten obtener raices de forma eficiente y que podemos acceder a él mediante una biblioteca preinstalalada en Colab, SciPy, a través de [`scipy.optimize.newton_krylov`](https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.newton_krylov.html).
Para obtener $\mathbf{u}^1$ vamos a usar otro método de la familia de Adams-Moulton, en particular un método de segundo orden. Para ello utilizaremos la misma idea de búsqueda de raices que mencionamos más arriba pero con el esquema
\begin{equation*}
\mathbf u^1 = \mathbf u^0 + \frac{k}{2} (\mathbf f^1 + \mathbf f^0).
\end{equation*}
Sin embargo, este método genera $\mathbf u^1$ con un orden de aproximación menor que el que tendrá el integrador que usaremos en adelante (2 y 3 respectivamente). Para mantener el orden de aproximación tendremos que usar a la hora de inicializar un paso temporal $k'$ más chico que el que usaremos luego para continuar la integración, $k$. Este $k'$ deberá verificar ser el mínimo entero mayor que $k^{1-3/2}$ (3 y 2 vienen del orden de integración de cada método), es decir
\begin{equation*}
k' = \lceil k^{1-\frac{3}{2}} \rceil,
\end{equation*}
con $\lceil \rceil$ la función techo.
El siguiente código define una función que nos permite inicializar la cantidad de pasos deseada para la ecuación de Lane-Emden. También permite ser utilizada para inicializar métodos de otro orden.
```python
def inicializar(cond_inicial, gamma, dt, pasos, orden):
""" Inicializa Lane-Emden usando Adams-Moulton de 2do orden (trapezoidal)
utilizando un paso de forma tal de mantener consistente el orden de
aproximación posterior:
Entrada:
- cond_incial: arreglo de dimensiones (2) con las condiciones
iniciales para u0 y u1.
- gamma: índice politrópico.
- dt: paso temporal del método que continuará la integración;
- pasos: cantidad de pasos a generar usando AM2;
- orden: orden del método que continuará la integración.
Devuelve:
- u: arreglo de dimensiones (pasos, 2) con la ecuación de Lane-Emden
integrada sobre la cantidad indicada de pasos."""
import numpy as np
import scipy.optimize as spoptimize
# Calculo los pasos para el inicializador por cada paso que debe realizar
# el integrador de orden superior.
pasos_ini = dt**(1-orden/2)
# pasos_ini seguramente no sea entero. Tomo el primer entero mayor a
# pasos_ini y defino el dt del inicializador coherentemente
pasos_ini = int(np.ceil(pasos_ini))
dt_ini = dt/pasos_ini
# Creo un arreglo para ir guardando la integración y agrego condicion inicial
u_ini = np.zeros( (pasos_ini*pasos+1, 2) )
u_ini[0] = cond_inicial
fn = np.zeros( 2 ) # Donde voy a ir guardando f^n
fn1 = np.zeros( 2 ) # Donde voy a ir guardando f^{n-1}
# Comienzo a integrar usando una regla trapezoidal implícita (AM2)
for n in range(0, pasos_ini*pasos):
ts = (n+1)*dt_ini # t^{n+1}
tn = n*dt_ini # t^n
fn[0] = u_ini[n,1] # f_1^n
fn[1] = -u_ini[n,0]**gamma - 2*u_ini[n,1]/tn # f_2^n
# Salvo la singularidad en 0 para y''.
if n==0: fn[1] = -1/3
# Obtengo estimación incial para buscar raices de y^{n+1} usando Euler
est = u_ini[n] + dt_ini*fn
# Función a la que le voy a buscar las raices
def f_raices(us):
fs = np.array([ us[1], -us[0]**gamma - 2*us[1]/ts ]) # f^{n+1}
return us - u_ini[n] - (fs + fn)*dt_ini/2 # AM2
# Le busco las raices usando Newton-Krylov (i.e. obtengo u_ini^{n+1})
u_ini[n+1] = spoptimize.newton_krylov(f_raices, est)
# Devuelvo solo los valores de u para el paso temporal original
return u_ini[pasos_ini::pasos_ini]
```
**Luego de correr esa celda**, ya pueden correr el siguiente código para integrar la ecuación de Lane-Emden con el mecanismo propuesto.
```python
# Vamos a integrar Lane-Emden con Adams-Moulton de 3er orden (2 pasos)
import numpy as np
import scipy.optimize as spoptimize
import matplotlib.pyplot as plt
# Las siguientes dos líneas filtran algunas advertencias que genera el método
# de búsqueda de raices. No las usen en sus códigos.
import warnings
warnings.filterwarnings('ignore')
# Para algunos valores de gamma conozco la solución analítica, la aprovecho.
def sol_analitica(t, gamma):
if gamma == 0:
return np.array([ 1-t**2/6, -t/3 ])
if gamma == 1:
return np.array([ np.sin(t)/t, (t*np.cos(t) - np.sin(t))/t**2 ])
if gamma == 5:
return np.array([ 1/np.sqrt(1+t**2/3), -np.sqrt(3)*t/(t**2+3)**1.5 ])
tf = 10 # "Tiempo" final de integración
gamma = 5 # Índice politrópico
analitica = gamma in [0, 1, 5] # Veo si existe sol. analitica para gamma
dts = np.array([ 1e-1, 1e-2, 1e-3 ]) # Pasos temporales a explorar
# Si no tengo solución analitica elijo un único dt para integrar.
if not analitica: dt = np.array([1e-3])
fn = np.zeros( 2 ) # Donde voy a ir guardando f^n
fn1 = np.zeros( 2 ) # Donde voy a ir guardando f^{n-1}
# Si hay solución analítica voy a ir guardando el error de cada integración
if analitica: errs = np.zeros( (dts.size, 2) )
# Integro para cada dt
for i, dt in enumerate(dts):
pasos = int(round(tf/dt)) # Cantidad de pasos
u = np.zeros( (pasos+1, 2) ) # Arreglo para u_0 (u[:,0]) y u_1 (u[:,1])
# Condiciones iniciales
u[0,0] = 1
u[0,1] = 0
# Inicializo el paso que no puedo hacer con AM3.
u[1] = inicializar(u[0], gamma, dt, 1, 3)
# Comienzo a integrar Adams-Moulton de 3er orden (AM3)
for n in range(1, pasos):
ts = (n+1)*dt # t^{n+1}
tn = n*dt # t^n
tn1 = (n-1)*dt # t^{n-1}
if n == 1: tn1 = 1e-50 # Para evitar 0/0 al evaluar f en la t=0.
fn[0] = u[n,1] # f_1^n
fn[1] = -u[n,0]**gamma - 2*u[n,1]/tn # f_2^n
fn1[0] = u[n-1,1] # f_1^{n-1}
fn1[1] = -u[n-1,0]**gamma - 2*u[n-1,1]/tn1 # f_2^{n-1}
# Salvo la singularidad para y'' en t=0
if n==1: fn1[1] = -1/3
# Obtengo estimación incial para buscar raices de y^{n+1} usando Euler
est = u[n] + dt*fn
# Función a la que le voy a buscar las raices
def f_raices(us):
fs = np.array([ us[1], -us[0]**gamma - 2*us[1]/ts ]) # f^{n+1}
return us - u[n] - ( 5*fs + 8*fn - fn1)*dt/12 # AM3
# Le busco las raices con Newton-Krylov (i.e. obtengo u^{n+1})
u[n+1] = spoptimize.newton_krylov(f_raices, est)
# Si tengo solución analítica, calculo el error
if analitica: errs[i] = np.abs(u[n+1] - sol_analitica(ts, gamma))
# Grafico
if analitica:
fig, axs = plt.subplots(1, 2, figsize=(8,4), constrained_layout=True)
else:
fig, ax = plt.subplots(1, 1, figsize=(4,4), constrained_layout=True)
axs = [ax]
t = np.arange(u.shape[0])*dt
axs[0].plot(t, u[:,0], c="C1", label="Numérica", lw=6)
axs[0].set_xlabel("$t$")
axs[0].set_ylabel("$y$")
fig.suptitle(f"Ecuación de Lane-Emden para $\gamma={gamma}$", fontsize=16)
axs[0].set_title("Solución $y(t)$")
if analitica:
axs[0].plot(t, sol_analitica(t,gamma)[0], "--k", label="Exacta")
axs[1].loglog(1/dts, errs[:,0], 'x', label="$y$")
axs[1].loglog(1/dts, errs[:,1], 'o', label="$\dot{y}$")
axs[1].loglog(1/dts, 1e-1*dts**3, "--k", label="$\propto k^3$")
axs[1].legend()
axs[1].set_title(f"Errores en $y$ e $y'$ para $t={t[-1]:.1f}$")
axs[1].set_xlabel("$1/k$")
axs[1].set_ylabel("Error")
axs[0].legend();
```
Como pueden probar ustedes mismos, el código propuesto aprovecha el hecho de que para $\gamma \in \{0, 1, 5 \}$ existe una solución analítica y calcula el error en la integración. Pueden ver que para estos valores de $\gamma$ el orden de precisión del método es el esperado (i.e. $\propto k^3$), excepto para $\gamma = 0$ donde en todos los casos está cerca del error asociado a la precisión aritmética finita.
Vale remarcar finalmente que el uso de un método implícito en este ejemplo conlleva un objetivo puramente pedagógico, de forma de ilustrar su uso para un caso no-lineal y de resolver el problema de la inicialización. Para una ecuación poco rígida como esta resulta más eficiente el uso de algoritmos explícitos, como RK4.
## **Paso adaptativo**
Un elemento que no queríamos dejar de mencionar, aunque no lo veremos en el curso, es que los métodos vistos anteriormente pueden reformularse, con mayor o menor dificultad, para funcionar con $k$ variable. De esta manera, cuando la EDO se comporta más rígidamente se disminuye $k$, mientras que si luego la integración se vuelve menos rígida es posible agrandar el paso. Numerosos solvers automáticos de EDOs, como `scipy.integrate.odeint` implementan esta estrategia.
Sin embargo, esta estrategia no siempre resulta computacionalmente óptima cuando se tiene conocimiento del problema físico en cuestión. Por ejemplo, al integrar las ecuaciones de Navier-Stokes, se conoce a priori el rango de escalas temporales que se deben resolver correctamente y, por tanto, las estrategias con $k$ fijo pueden resultar más apropiadas.
## **Referencias**:
- [_Finite Difference and Spectral Methods for Ordinary and Partial Differential Equations_; N. L. Trefethen (1996)](https://people.maths.ox.ac.uk/trefethen/pdetext.html).
- [_An Introduction to Numerical Modeling of the Atmosphere_; D. A. Randall](http://hogback.atmos.colostate.edu/group/dave/at604pdf/AT604_LaTeX_Book.pdf).
|
(* ************************************************************************** *)
(** * Basic tactics *)
(* ************************************************************************** *)
(** This file collects a number of basic tactics for better proof automation,
structuring large proofs, or rewriting. Many of the definitions have been
ported from ss-reflect. *)
(** Symbols starting with [hahn__] are internal. *)
Require Import Bool Arith ZArith String.
Require ClassicalFacts.
Require Export ClassicalDescription FunctionalExtensionality.
Open Scope bool_scope.
Open Scope list_scope.
Set Implicit Arguments.
Unset Strict Implicit.
(** Set up hint databases *)
Create HintDb hahn discriminated. (* General stuff, used by done *)
Create HintDb hahn_refl discriminated. (* Decidable equalities *)
Create HintDb hahn_full discriminated. (* Expensive lemmas *)
(** Shorthand for applying functional extensionality. *)
Ltac exten := apply functional_extensionality.
(* ************************************************************************** *)
(** ** Coersion of [bool] into [Prop] *)
(* ************************************************************************** *)
(** Coersion of bools into Prop *)
Coercion is_true (b : bool) : Prop := b = true.
(** Hints for auto *)
Lemma hahn__true_is_true : true.
Proof. reflexivity. Qed.
Lemma hahn__not_false_is_true : ~ false.
Proof. discriminate. Qed.
Hint Resolve hahn__true_is_true hahn__not_false_is_true.
(* ************************************************************************** *)
(** ** Very basic automation *)
(* ************************************************************************** *)
(** Set up for basic simplification *)
(** Adaptation of the ss-reflect "[done]" tactic. *)
Ltac hahn__basic_done :=
solve [trivial with hahn | simple apply sym_equal; trivial | discriminate | contradiction].
Ltac done := trivial with hahn; hnf; intros;
solve [try hahn__basic_done; split;
try hahn__basic_done; split;
try hahn__basic_done; split;
try hahn__basic_done; split;
try hahn__basic_done; split; hahn__basic_done
| match goal with H : ~ _ |- _ => solve [case H; trivial] end].
(** A variant of the ssr "done" tactic that performs "eassumption". *)
Ltac edone := try eassumption; trivial; hnf; intros;
solve [try eassumption; try hahn__basic_done; split;
try eassumption; try hahn__basic_done; split;
try eassumption; try hahn__basic_done; split;
try eassumption; try hahn__basic_done; split;
try eassumption; try hahn__basic_done; split;
try eassumption; hahn__basic_done
| match goal with H : ~ _ |- _ => solve [case H; trivial] end].
Tactic Notation "by" tactic(tac) := (tac; done).
Tactic Notation "eby" tactic(tac) := (tac; edone).
(* ************************************************************************** *)
(** ** Equality types *)
(* ************************************************************************** *)
Module Equality.
Definition axiom T (e : T -> T -> bool) :=
forall x y, reflect (x = y) (e x y).
Structure mixin_of T := Mixin {op : T -> T -> bool; _ : axiom op}.
Notation class_of := mixin_of (only parsing).
Section ClassDef.
Structure type := Pack {sort; _ : class_of sort; _ : Type}.
Definition class cT' :=
match cT' return class_of (sort cT') with @Pack _ c _ => c end.
Definition pack (T: Type) c := @Pack T c T.
End ClassDef.
Module Exports.
Coercion sort : type >-> Sortclass.
Notation eqType := type.
Notation EqMixin := Mixin.
Notation EqType T m := (@pack T m).
End Exports.
End Equality.
Export Equality.Exports.
Definition eq_op T := Equality.op (Equality.class T).
Arguments eq_op {T}.
Lemma eqE : forall T x, eq_op x = Equality.op (Equality.class T) x.
Proof. done. Qed.
Lemma eqP : forall T, Equality.axiom (@eq_op T).
Proof. by unfold eq_op; destruct T as [? []]. Qed.
Arguments eqP [T] x y.
(*
Notation "x == y" := (eq_op x y)
(at level 70, no associativity) : bool_scope.
Notation "x == y :> T" := ((x : T) == (y : T))
(at level 70, y at next level) : bool_scope.
Notation "x != y" := (negb (x == y))
(at level 70, no associativity) : bool_scope.
Notation "x != y :> T" := (negb (x == y :> T))
(at level 70, y at next level) : bool_scope.
*)
Lemma hahn__internal_eqP :
forall (T: eqType) (x y : T), reflect (x = y) (eq_op x y).
Proof. apply eqP. Qed.
Lemma neqP : forall (T: eqType) (x y: T), reflect (x <> y) (negb (eq_op x y)).
Proof. intros; case eqP; constructor; auto. Qed.
Lemma beq_refl : forall (T : eqType) (x : T), eq_op x x.
Proof. by intros; case eqP. Qed.
Lemma beq_sym : forall (T : eqType) (x y : T), (eq_op x y) = (eq_op y x).
Proof. intros; do 2 case eqP; congruence. Qed.
Hint Resolve beq_refl : hahn.
Hint Rewrite beq_refl : hahn_trivial.
Notation eqxx := beq_refl.
(** Comparison for [nat] *)
Fixpoint eqn_rec (x y: nat) {struct x} :=
match x, y with
| O, O => true
| S x, S y => eqn_rec x y
| _, _ => false
end.
Definition eqn := match tt with tt => eqn_rec end.
Lemma eqnP: forall x y, reflect (x = y) (eqn x y).
Proof.
induction x; destruct y; try (constructor; done).
change (eqn (S x) (S y)) with (eqn x y).
case IHx; constructor; congruence.
Qed.
Canonical Structure nat_eqMixin := EqMixin eqnP.
Canonical Structure nat_eqType := Eval hnf in EqType nat nat_eqMixin.
Lemma eqnE : eqn = (@eq_op _).
Proof. done. Qed.
(* ************************************************************************** *)
(** ** Basic simplification tactics *)
(* ************************************************************************** *)
Lemma hahn__negb_rewrite : forall b, negb b -> b = false.
Proof. by intros []. Qed.
Lemma hahn__andb_split : forall b1 b2, b1 && b2 -> b1 /\ b2.
Proof. by intros [] []. Qed.
Lemma hahn__nandb_split : forall b1 b2, b1 && b2 = false -> b1 = false \/ b2 = false.
Proof. intros [] []; auto. Qed.
Lemma hahn__orb_split : forall b1 b2, b1 || b2 -> b1 \/ b2.
Proof. intros [] []; auto. Qed.
Lemma hahn__norb_split : forall b1 b2, b1 || b2 = false -> b1 = false /\ b2 = false.
Proof. intros [] []; auto. Qed.
Lemma hahn__eqb_split : forall b1 b2 : bool, (b1 -> b2) -> (b2 -> b1) -> b1 = b2.
Proof. intros [] [] H H'; unfold is_true in *; auto using sym_eq. Qed.
Lemma hahn__beq_rewrite : forall (T : eqType) (x1 x2 : T), eq_op x1 x2 -> x1 = x2.
Proof. by intros *; case eqP. Qed.
(** Set up for basic simplification: database of reflection lemmas *)
Hint Resolve hahn__internal_eqP neqP : hahn_refl.
Ltac hahn__complaining_inj f H :=
let X := fresh in
(match goal with | [|- ?P ] => set (X := P) end);
injection H; clear H; intros; subst X;
try subst.
Ltac hahn__clarify1 :=
try subst;
repeat match goal with
| [H: is_true (andb _ _) |- _] =>
let H' := fresh H in case (hahn__andb_split H); clear H; intros H' H
| [H: is_true (negb ?x) |- _] => rewrite (hahn__negb_rewrite H) in *
| [H: is_true ?x |- _] => rewrite H in *
| [H: ?x = true |- _] => rewrite H in *
| [H: ?x = false |- _] => rewrite H in *
| [H: is_true (eq_op _ _) |- _] => generalize (hahn__beq_rewrite H); clear H; intro H
| [H: @existT _ _ _ _ = @existT _ _ _ _ |- _] => apply inj_pair2 in H; try subst
| [H: ?f _ = ?f _ |- _] => hahn__complaining_inj f H
| [H: ?f _ _ = ?f _ _ |- _] => hahn__complaining_inj f H
| [H: ?f _ _ _ = ?f _ _ _ |- _] => hahn__complaining_inj f H
| [H: ?f _ _ _ _ = ?f _ _ _ _ |- _] => hahn__complaining_inj f H
| [H: ?f _ _ _ _ _ = ?f _ _ _ _ _ |- _] => hahn__complaining_inj f H
| [H: ?f _ _ _ _ _ _ = ?f _ _ _ _ _ _ |- _] => hahn__complaining_inj f H
| [H: ?f _ _ _ _ _ _ _ = ?f _ _ _ _ _ _ _ |- _] => hahn__complaining_inj f H
end; try done.
(** Perform injections & discriminations on all hypotheses *)
Ltac clarify :=
hahn__clarify1;
repeat match goal with
| H1: ?x = Some _, H2: ?x = None |- _ => rewrite H2 in H1; discriminate
| H1: ?x = Some _, H2: ?x = Some _ |- _ => rewrite H2 in H1; hahn__clarify1
end; (* autorewrite with hahn_trivial; *) try done.
(** Kill simple goals that require up to two econstructor calls. *)
Ltac vauto :=
(clarify; try edone;
try [> econstructor; (solve [edone | [> econstructor; edone]])]).
Ltac inv x := inversion x; clarify.
Ltac simpls := simpl in *; try done.
Ltac ins := simpl in *; try done; intros.
Ltac hahn__clarsimp1 :=
clarify; (autorewrite with hahn_trivial hahn in * );
(autorewrite with hahn_trivial in * ); try done;
clarify; auto 1 with hahn.
Ltac clarsimp := intros; simpl in *; hahn__clarsimp1.
Ltac autos := clarsimp; auto with hahn.
Tactic Notation "econs" := econstructor.
Tactic Notation "econs" int_or_var(x) := econstructor x.
(* ************************************************************************** *)
(** Destruct but give useful names *)
(* ************************************************************************** *)
Definition NW (P: unit -> Prop) : Prop := P tt.
Notation "\u27ea x : t \u27eb" := (NW (fun x => t)) (at level 80, x ident, no associativity).
Notation "<< x : t >>" := (NW (fun x => t))
(at level 80, x ident, no associativity, only parsing).
Notation "\u27ea t \u27eb" := (NW (fun _ => t)) (at level 79, no associativity, format "\u27ea t \u27eb").
Ltac unnw := unfold NW in *.
Ltac rednw := red; unnw.
Hint Unfold NW.
Ltac splits :=
intros; unfold NW;
repeat match goal with
| [ |- _ /\ _ ] => split
end.
Ltac esplits :=
intros; unfold NW;
repeat match goal with
| [ |- @ex _ _ ] => eexists
| [ |- _ /\ _ ] => split
| [ |- @sig _ _ ] => eexists
| [ |- @sigT _ _ ] => eexists
| [ |- @prod _ _ ] => split
end.
(** Destruct, but no case split *)
Ltac desc :=
repeat match goal with
| H: is_true (eq_op _ _) |- _ => generalize (hahn__beq_rewrite H); clear H; intro H
| H : exists x, NW (fun y => _) |- _ =>
let x' := fresh x in let y' := fresh y in destruct H as [x' y']; red in y'
| H : exists x, ?p |- _ =>
let x' := fresh x in destruct H as [x' H]
| H : ?p /\ ?q |- _ =>
let x' := match p with | NW (fun z => _) => fresh z | _ => H end in
let y' := match q with | NW (fun z => _) => fresh z | _ => fresh H end in
destruct H as [x' y'];
match p with | NW _ => red in x' | _ => idtac end;
match q with | NW _ => red in y' | _ => idtac end
| H : is_true (_ && _) |- _ =>
let H' := fresh H in case (hahn__andb_split H); clear H; intros H H'
| H : (_ || _) = false |- _ =>
let H' := fresh H in case (hahn__norb_split H); clear H; intros H H'
| H : ?x = ?x |- _ => clear H
(* | H: is_true ?x |- _ => eapply elimT in H; [|solve [trivial with hahn_refl]]
| H: ?x = true |- _ => eapply elimT in H; [|solve [trivial with hahn_refl]]
| H: ?x = false |- _ => eapply elimFn in H; [|solve [trivial with hahn_refl]]
| H: ?x = false |- _ => eapply elimF in H; [|solve [trivial with hahn_refl]] *)
end.
Ltac des :=
repeat match goal with
| H: is_true (eq_op _ _) |- _ => generalize (hahn__beq_rewrite H); clear H; intro H
| H : exists x, NW (fun y => _) |- _ =>
let x' := fresh x in let y' := fresh y in destruct H as [x' y']; red in y'
| H : exists x, ?p |- _ =>
let x' := fresh x in destruct H as [x' H]
| H : ?p /\ ?q |- _ =>
let x' := match p with | NW (fun z => _) => fresh z | _ => H end in
let y' := match q with | NW (fun z => _) => fresh z | _ => fresh H end in
destruct H as [x' y'];
match p with | NW _ => red in x' | _ => idtac end;
match q with | NW _ => red in y' | _ => idtac end
| H : is_true (_ && _) |- _ =>
let H' := fresh H in case (hahn__andb_split H); clear H; intros H H'
| H : (_ || _) = false |- _ =>
let H' := fresh H in case (hahn__norb_split H); clear H; intros H H'
| H : ?x = ?x |- _ => clear H
| H : ?p <-> ?q |- _ =>
let x' := match p with | NW (fun z => _) => fresh z | _ => H end in
let y' := match q with | NW (fun z => _) => fresh z | _ => fresh H end in
destruct H as [x' y'];
match p with | NW _ => unfold NW at 1 in x'; red in y' | _ => idtac end;
match q with | NW _ => unfold NW at 1 in y'; red in x' | _ => idtac end
| H : ?p \/ ?q |- _ =>
let x' := match p with | NW (fun z => _) => fresh z | _ => H end in
let y' := match q with | NW (fun z => _) => fresh z | _ => H end in
destruct H as [x' | y'];
[ match p with | NW _ => red in x' | _ => idtac end
| match q with | NW _ => red in y' | _ => idtac end]
| H : is_true (_ || _) |- _ => case (hahn__orb_split H); clear H; intro H
| H : (_ && _) = false |- _ => case (hahn__nandb_split H); clear H; intro H
end.
Ltac cdes H :=
let H' := fresh H in assert (H' := H); red in H'; desc.
Ltac des_if_asm :=
clarify;
repeat
match goal with
| H: context[ match ?x with _ => _ end ] |- _ =>
match (type of x) with
| { _ } + { _ } => destruct x; clarify
| bool =>
let Heq := fresh "Heq" in
let P := fresh in
evar(P: Prop);
assert (Heq: reflect P x) by (subst P; trivial with hahn_refl);
subst P; destruct Heq as [Heq|Heq]
| _ => let Heq := fresh "Heq" in destruct x as [] eqn: Heq; clarify
end
end.
Ltac des_if_goal :=
clarify;
repeat
match goal with
| |- context[match ?x with _ => _ end] =>
match (type of x) with
| { _ } + { _ } => destruct x; clarify
| bool =>
let Heq := fresh "Heq" in
let P := fresh in
evar(P: Prop);
assert (Heq: reflect P x) by (subst P; trivial with hahn_refl);
subst P; destruct Heq as [Heq|Heq]
| _ => let Heq := fresh "Heq" in destruct x as [] eqn: Heq; clarify
end
end.
Ltac des_if :=
clarify;
repeat
match goal with
| |- context[match ?x with _ => _ end] =>
match (type of x) with
| { _ } + { _ } => destruct x; clarify
| bool =>
let Heq := fresh "Heq" in
let P := fresh in
evar(P: Prop);
assert (Heq: reflect P x) by (subst P; trivial with hahn_refl);
subst P; destruct Heq as [Heq|Heq]
| _ => let Heq := fresh "Heq" in destruct x as [] eqn: Heq; clarify
end
| H: context[ match ?x with _ => _ end ] |- _ =>
match (type of x) with
| { _ } + { _ } => destruct x; clarify
| bool =>
let Heq := fresh "Heq" in
let P := fresh in
evar(P: Prop);
assert (Heq: reflect P x) by (subst P; trivial with hahn_refl);
subst P; destruct Heq as [Heq|Heq]
| _ => let Heq := fresh "Heq" in destruct x as [] eqn: Heq; clarify
end
end.
Ltac des_eqrefl :=
match goal with
| H: context[match ?X with _ => _ end Logic.eq_refl] |- _ =>
let EQ := fresh "EQ" in
let id' := fresh "x" in
revert H;
generalize (Logic.eq_refl X); generalize X at 1 3;
intros id' EQ; destruct id'; intros H
| |- context[match ?X with _ => _ end Logic.eq_refl] =>
let EQ := fresh "EQ" in
let id' := fresh "x" in
generalize (Logic.eq_refl X); generalize X at 1 3;
intros id' EQ; destruct id'
end.
Ltac desf_asm := clarify; des; des_if_asm.
Ltac desf := clarify; des; des_if.
Ltac clarassoc := clarsimp; autorewrite with hahn_trivial hahn hahnA in *; try done.
Ltac hahn__hacksimp1 :=
clarsimp;
match goal with
| H: _ |- _ => solve [rewrite H; clear H; clarsimp
|rewrite <- H; clear H; clarsimp]
| _ => solve [f_equal; clarsimp]
end.
Ltac hacksimp :=
clarsimp;
try match goal with
| H: _ |- _ => solve [rewrite H; clear H; clarsimp
|rewrite <- H; clear H; clarsimp]
| |- context[match ?p with _ => _ end] => solve [destruct p; hahn__hacksimp1]
| _ => solve [f_equal; clarsimp]
end.
Ltac clarify_not :=
repeat (match goal with
| H : ~ False |- _ => clear H
| H : ~ ~ _ |- _ => apply NNPP in H
| H : ~ _ |- _ => apply imply_to_and in H; desc
| H : ~ _ |- _ => apply not_or_and in H; desc
| H : ~ _ |- _ => apply not_and_or in H; des
| H : ~ _ |- _ => apply not_all_ex_not in H; desc
end; clarify).
Tactic Notation "tertium_non_datur" constr(P) :=
destruct (classic P); clarify_not.
Tactic Notation "tertium_non_datur" constr(P) "as" simple_intropattern(pattern) :=
destruct (classic P) as pattern; clarify_not.
(* ************************************************************************** *)
(** ** Unification helpers *)
(* ************************************************************************** *)
Tactic Notation "pattern_lhs" uconstr(term) :=
match goal with
|- _ ?lhs _ =>
let P := fresh in
pose (P := lhs); pattern term in P; change lhs with P; subst P
end.
Tactic Notation "pattern_rhs" uconstr(term) :=
match goal with
|- _ _ ?rhs =>
let P := fresh in
pose (P := rhs); pattern term in P; change rhs with P; subst P
end.
(* ************************************************************************** *)
(** ** Exploiting a hypothesis *)
(* ************************************************************************** *)
Tactic Notation "forward" tactic1(tac) :=
let foo := fresh in
evar (foo : Prop); cut (foo); subst foo; cycle 1; [tac|].
Tactic Notation "forward" tactic1(tac) "as" simple_intropattern(H) :=
let foo := fresh in
evar (foo : Prop); cut (foo); subst foo; cycle 1; [tac|intros H].
Tactic Notation "specialize_full" ident(H) :=
let foo := fresh in
evar (foo : Prop); cut (foo); subst foo; cycle 1; [eapply H|try clear H; intro H].
(** Exploit an assumption (adapted from CompCert). *)
Ltac exploit x :=
refine ((fun x y => y x) (x _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) _)
|| refine ((fun x y => y x) (x _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) _)
|| refine ((fun x y => y x) (x _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) _)
|| refine ((fun x y => y x) (x _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) _)
|| refine ((fun x y => y x) (x _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) _)
|| refine ((fun x y => y x) (x _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) _)
|| refine ((fun x y => y x) (x _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) _)
|| refine ((fun x y => y x) (x _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) _)
|| refine ((fun x y => y x) (x _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) _)
|| refine ((fun x y => y x) (x _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) _)
|| refine ((fun x y => y x) (x _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) _)
|| refine ((fun x y => y x) (x _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) _)
|| refine ((fun x y => y x) (x _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) _)
|| refine ((fun x y => y x) (x _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) _)
|| refine ((fun x y => y x) (x _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) _)
|| refine ((fun x y => y x) (x _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) _)
|| refine ((fun x y => y x) (x _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) _)
|| refine ((fun x y => y x) (x _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) _)
|| refine ((fun x y => y x) (x _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) _)
|| refine ((fun x y => y x) (x _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) _)
|| refine ((fun x y => y x) (x _ _ _ _ _ _ _ _ _ _ _ _ _ _) _)
|| refine ((fun x y => y x) (x _ _ _ _ _ _ _ _ _ _ _ _ _) _)
|| refine ((fun x y => y x) (x _ _ _ _ _ _ _ _ _ _ _ _) _)
|| refine ((fun x y => y x) (x _ _ _ _ _ _ _ _ _ _ _) _)
|| refine ((fun x y => y x) (x _ _ _ _ _ _ _ _ _ _) _)
|| refine ((fun x y => y x) (x _ _ _ _ _ _ _ _ _) _)
|| refine ((fun x y => y x) (x _ _ _ _ _ _ _ _) _)
|| refine ((fun x y => y x) (x _ _ _ _ _ _ _) _)
|| refine ((fun x y => y x) (x _ _ _ _ _ _) _)
|| refine ((fun x y => y x) (x _ _ _ _ _) _)
|| refine ((fun x y => y x) (x _ _ _ _) _)
|| refine ((fun x y => y x) (x _ _ _) _)
|| refine ((fun x y => y x) (x _ _) _)
|| refine ((fun x y => y x) (x _) _). |
-- Andreas, 2018-04-16, issue #3033, reported by Christian Sattler
-- The DotPatternCtx was not used inside dot patterns in ConcreteToAbstract.
postulate
A : Set
B : Set
a : A
f : A → B
data C : B → Set where
c : C (f a)
foo : (b : B) → C b → Set
foo .{!f a!} c = A -- give "f a" here
-- WAS: foo .f a c = A
-- NOW: foo .(f a) c = A
|
[STATEMENT]
lemma closed_err_types:
"wf_prog wf_mb P \<Longrightarrow> closed (err (types P)) (lift2 (sup P))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. wf_prog wf_mb P \<Longrightarrow> closed (err (types P)) (lift2 (SemiType.sup P))
[PROOF STEP]
(*<*)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. wf_prog wf_mb P \<Longrightarrow> closed (err (types P)) (lift2 (SemiType.sup P))
[PROOF STEP]
apply (unfold closed_def plussub_def lift2_def sup_def')
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. wf_prog wf_mb P \<Longrightarrow> \<forall>x\<in>err (types P). \<forall>y\<in>err (types P). (case x of Err \<Rightarrow> Err | OK x \<Rightarrow> case y of Err \<Rightarrow> Err | OK T\<^sub>2 \<Rightarrow> if is_refT x \<and> is_refT T\<^sub>2 then OK (if x = NT then T\<^sub>2 else if T\<^sub>2 = NT then x else Class (exec_lub (subcls1 P) (super P) (the_Class x) (the_Class T\<^sub>2))) else if x = T\<^sub>2 then OK x else Err) \<in> err (types P)
[PROOF STEP]
apply (frule acyclic_subcls1)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>wf_prog wf_mb P; acyclic (subcls1 P)\<rbrakk> \<Longrightarrow> \<forall>x\<in>err (types P). \<forall>y\<in>err (types P). (case x of Err \<Rightarrow> Err | OK x \<Rightarrow> case y of Err \<Rightarrow> Err | OK T\<^sub>2 \<Rightarrow> if is_refT x \<and> is_refT T\<^sub>2 then OK (if x = NT then T\<^sub>2 else if T\<^sub>2 = NT then x else Class (exec_lub (subcls1 P) (super P) (the_Class x) (the_Class T\<^sub>2))) else if x = T\<^sub>2 then OK x else Err) \<in> err (types P)
[PROOF STEP]
apply (frule single_valued_subcls1)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>wf_prog wf_mb P; acyclic (subcls1 P); single_valued (subcls1 P)\<rbrakk> \<Longrightarrow> \<forall>x\<in>err (types P). \<forall>y\<in>err (types P). (case x of Err \<Rightarrow> Err | OK x \<Rightarrow> case y of Err \<Rightarrow> Err | OK T\<^sub>2 \<Rightarrow> if is_refT x \<and> is_refT T\<^sub>2 then OK (if x = NT then T\<^sub>2 else if T\<^sub>2 = NT then x else Class (exec_lub (subcls1 P) (super P) (the_Class x) (the_Class T\<^sub>2))) else if x = T\<^sub>2 then OK x else Err) \<in> err (types P)
[PROOF STEP]
apply (auto simp: is_type_def is_refT_def is_class_is_subcls split: err.split ty.splits)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>x5 x5a. \<lbrakk>wf_prog wf_mb P; acyclic (subcls1 P); single_valued (subcls1 P); x5 \<noteq> x5a; P \<turnstile> x5a \<preceq>\<^sup>* Object; P \<turnstile> x5 \<preceq>\<^sup>* Object\<rbrakk> \<Longrightarrow> P \<turnstile> exec_lub (subcls1 P) (super P) x5 x5a \<preceq>\<^sup>* Object
[PROOF STEP]
apply (blast dest!: is_lub_exec_lub is_lubD is_ubD intro!: is_ubI superI)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done |
(* Title: JinjaDCI/Common/Objects.thy
Author: David von Oheimb, Susannah Mansky
Copyright 1999 Technische Universitaet Muenchen, 2019-20 UIUC
Based on the Jinja theory Common/Objects.thy by David von Oheimb
*)
section \<open> Objects and the Heap \<close>
theory Objects imports TypeRel Value begin
subsection\<open> Objects \<close>
type_synonym
fields = "vname \<times> cname \<rightharpoonup> val" \<comment> \<open>field name, defining class, value\<close>
type_synonym
obj = "cname \<times> fields" \<comment> \<open>class instance with class name and fields\<close>
type_synonym
sfields = "vname \<rightharpoonup> val" \<comment> \<open>field name to value\<close>
definition obj_ty :: "obj \<Rightarrow> ty"
where
"obj_ty obj \<equiv> Class (fst obj)"
\<comment> \<open> initializes a given list of fields \<close>
definition init_fields :: "((vname \<times> cname) \<times> staticb \<times> ty) list \<Rightarrow> fields"
where
"init_fields FDTs \<equiv> (map_of \<circ> map (\<lambda>((F,D),b,T). ((F,D),default_val T))) FDTs"
definition init_sfields :: "((vname \<times> cname) \<times> staticb \<times> ty) list \<Rightarrow> sfields"
where
"init_sfields FDTs \<equiv> (map_of \<circ> map (\<lambda>((F,D),b,T). (F,default_val T))) FDTs"
\<comment> \<open>a new, blank object with default values for instance fields:\<close>
definition blank :: "'m prog \<Rightarrow> cname \<Rightarrow> obj"
where
"blank P C \<equiv> (C,init_fields (ifields P C))"
\<comment> \<open>a new, blank object with default values for static fields:\<close>
definition sblank :: "'m prog \<Rightarrow> cname \<Rightarrow> sfields"
where
"sblank P C \<equiv> init_sfields (isfields P C)"
(* replaced all vname, cname in below with `char list' and \<rightharpoonup> with returned option
so that pretty printing works -SM *)
translations
(type) "fields" <= (type) "char list \<times> char list \<Rightarrow> val option"
(type) "obj" <= (type) "char list \<times> fields"
(type) "sfields" <= (type) "char list \<Rightarrow> val option"
subsection\<open> Heap \<close>
type_synonym heap = "addr \<rightharpoonup> obj"
(* replaced addr with nat and \<rightharpoonup> with returned option so that pretty printing works -SM *)
translations
(type) "heap" <= (type) "nat \<Rightarrow> obj option"
abbreviation
cname_of :: "heap \<Rightarrow> addr \<Rightarrow> cname" where
"cname_of hp a == fst (the (hp a))"
definition new_Addr :: "heap \<Rightarrow> addr option"
where
"new_Addr h \<equiv> if \<exists>a. h a = None then Some(LEAST a. h a = None) else None"
definition cast_ok :: "'m prog \<Rightarrow> cname \<Rightarrow> heap \<Rightarrow> val \<Rightarrow> bool"
where
"cast_ok P C h v \<equiv> v = Null \<or> P \<turnstile> cname_of h (the_Addr v) \<preceq>\<^sup>* C"
definition hext :: "heap \<Rightarrow> heap \<Rightarrow> bool" ("_ \<unlhd> _" [51,51] 50)
where
"h \<unlhd> h' \<equiv> \<forall>a C fs. h a = Some(C,fs) \<longrightarrow> (\<exists>fs'. h' a = Some(C,fs'))"
primrec typeof_h :: "heap \<Rightarrow> val \<Rightarrow> ty option" ("typeof\<^bsub>_\<^esub>")
where
"typeof\<^bsub>h\<^esub> Unit = Some Void"
| "typeof\<^bsub>h\<^esub> Null = Some NT"
| "typeof\<^bsub>h\<^esub> (Bool b) = Some Boolean"
| "typeof\<^bsub>h\<^esub> (Intg i) = Some Integer"
| "typeof\<^bsub>h\<^esub> (Addr a) = (case h a of None \<Rightarrow> None | Some(C,fs) \<Rightarrow> Some(Class C))"
lemma new_Addr_SomeD:
"new_Addr h = Some a \<Longrightarrow> h a = None"
(*<*)by(fastforce simp: new_Addr_def split:if_splits intro:LeastI)(*>*)
lemma [simp]: "(typeof\<^bsub>h\<^esub> v = Some Boolean) = (\<exists>b. v = Bool b)"
(*<*)by(induct v) auto(*>*)
lemma [simp]: "(typeof\<^bsub>h\<^esub> v = Some Integer) = (\<exists>i. v = Intg i)"
(*<*)by(cases v) auto(*>*)
lemma [simp]: "(typeof\<^bsub>h\<^esub> v = Some NT) = (v = Null)"
(*<*)by(cases v) auto(*>*)
lemma [simp]: "(typeof\<^bsub>h\<^esub> v = Some(Class C)) = (\<exists>a fs. v = Addr a \<and> h a = Some(C,fs))"
(*<*)by(cases v) auto(*>*)
lemma [simp]: "h a = Some(C,fs) \<Longrightarrow> typeof\<^bsub>(h(a\<mapsto>(C,fs')))\<^esub> v = typeof\<^bsub>h\<^esub> v"
(*<*)by(induct v) (auto simp:fun_upd_apply)(*>*)
text\<open> For literal values the first parameter of @{term typeof} can be
set to @{term empty} because they do not contain addresses: \<close>
abbreviation
typeof :: "val \<Rightarrow> ty option" where
"typeof v == typeof_h Map.empty v"
lemma typeof_lit_typeof:
"typeof v = Some T \<Longrightarrow> typeof\<^bsub>h\<^esub> v = Some T"
(*<*)by(cases v) auto(*>*)
lemma typeof_lit_is_type:
"typeof v = Some T \<Longrightarrow> is_type P T"
(*<*)by (induct v) (auto simp:is_type_def)(*>*)
subsection \<open> Heap extension @{text"\<unlhd>"} \<close>
lemma hextI: "\<forall>a C fs. h a = Some(C,fs) \<longrightarrow> (\<exists>fs'. h' a = Some(C,fs')) \<Longrightarrow> h \<unlhd> h'"
(*<*)by(auto simp: hext_def)(*>*)
lemma hext_objD: "\<lbrakk> h \<unlhd> h'; h a = Some(C,fs) \<rbrakk> \<Longrightarrow> \<exists>fs'. h' a = Some(C,fs')"
(*<*)by(auto simp: hext_def)(*>*)
lemma hext_refl [iff]: "h \<unlhd> h"
(*<*)by (rule hextI) fast(*>*)
lemma hext_new [simp]: "h a = None \<Longrightarrow> h \<unlhd> h(a\<mapsto>x)"
(*<*)by (rule hextI) (auto simp:fun_upd_apply)(*>*)
lemma hext_trans: "\<lbrakk> h \<unlhd> h'; h' \<unlhd> h'' \<rbrakk> \<Longrightarrow> h \<unlhd> h''"
(*<*)by (rule hextI) (fast dest: hext_objD)(*>*)
lemma hext_upd_obj: "h a = Some (C,fs) \<Longrightarrow> h \<unlhd> h(a\<mapsto>(C,fs'))"
(*<*)by (rule hextI) (auto simp:fun_upd_apply)(*>*)
lemma hext_typeof_mono: "\<lbrakk> h \<unlhd> h'; typeof\<^bsub>h\<^esub> v = Some T \<rbrakk> \<Longrightarrow> typeof\<^bsub>h'\<^esub> v = Some T"
(*<*)
proof(cases v)
case Addr assume "h \<unlhd> h'" and "typeof\<^bsub>h\<^esub> v = \<lfloor>T\<rfloor>"
then show ?thesis using Addr by(fastforce simp:hext_def)
qed simp_all
(*>*)
subsection\<open> Static field information function \<close>
datatype init_state = Done | Processing | Prepared | Error
\<comment> \<open>@{term Done} = initialized\<close>
\<comment> \<open>@{term Processing} = currently being initialized\<close>
\<comment> \<open>@{term Prepared} = uninitialized and not currently being initialized\<close>
\<comment> \<open>@{term Error} = previous initialization attempt resulted in erroneous state\<close>
inductive iprog :: "init_state \<Rightarrow> init_state \<Rightarrow> bool" ("_ \<le>\<^sub>i _" [51,51] 50)
where
[simp]: "Prepared \<le>\<^sub>i i"
| [simp]: "Processing \<le>\<^sub>i Done"
| [simp]: "Processing \<le>\<^sub>i Error"
| [simp]: "i \<le>\<^sub>i i"
lemma iprog_Done[simp]: "(Done \<le>\<^sub>i i) = (i = Done)"
by(simp only: iprog.simps, simp)
lemma iprog_Error[simp]: "(Error \<le>\<^sub>i i) = (i = Error)"
by(simp only: iprog.simps, simp)
lemma iprog_Processing[simp]: "(Processing \<le>\<^sub>i i) = (i = Done \<or> i = Error \<or> i = Processing)"
by(simp only: iprog.simps, simp)
lemma iprog_trans: "\<lbrakk> i \<le>\<^sub>i i'; i' \<le>\<^sub>i i'' \<rbrakk> \<Longrightarrow> i \<le>\<^sub>i i''"
(*<*)by(case_tac i; case_tac i') simp_all(*>*)
subsection\<open> Static Heap \<close>
text \<open>The static heap (sheap) is used for storing information about static
field values and initialization status for classes.\<close>
type_synonym
sheap = "cname \<rightharpoonup> sfields \<times> init_state"
translations
(type) "sheap" <= (type) "char list \<Rightarrow> (sfields \<times> init_state) option"
definition shext :: "sheap \<Rightarrow> sheap \<Rightarrow> bool" ("_ \<unlhd>\<^sub>s _" [51,51] 50)
where
"sh \<unlhd>\<^sub>s sh' \<equiv> \<forall>C sfs i. sh C = Some(sfs,i) \<longrightarrow> (\<exists>sfs' i'. sh' C = Some(sfs',i') \<and> i \<le>\<^sub>i i')"
lemma shextI: "\<forall>C sfs i. sh C = Some(sfs,i) \<longrightarrow> (\<exists>sfs' i'. sh' C = Some(sfs',i') \<and> i \<le>\<^sub>i i') \<Longrightarrow> sh \<unlhd>\<^sub>s sh'"
(*<*)by(auto simp: shext_def)(*>*)
lemma shext_objD: "\<lbrakk> sh \<unlhd>\<^sub>s sh'; sh C = Some(sfs,i) \<rbrakk> \<Longrightarrow> \<exists>sfs' i'. sh' C = Some(sfs', i') \<and> i \<le>\<^sub>i i'"
(*<*)by(auto simp: shext_def)(*>*)
lemma shext_refl [iff]: "sh \<unlhd>\<^sub>s sh"
(*<*)by (rule shextI) auto(*>*)
lemma shext_new [simp]: "sh C = None \<Longrightarrow> sh \<unlhd>\<^sub>s sh(C\<mapsto>x)"
(*<*)by (rule shextI) (auto simp:fun_upd_apply)(*>*)
lemma shext_trans: "\<lbrakk> sh \<unlhd>\<^sub>s sh'; sh' \<unlhd>\<^sub>s sh'' \<rbrakk> \<Longrightarrow> sh \<unlhd>\<^sub>s sh''"
(*<*)by (rule shextI) (fast dest: iprog_trans shext_objD)(*>*)
lemma shext_upd_obj: "\<lbrakk> sh C = Some (sfs,i); i \<le>\<^sub>i i' \<rbrakk> \<Longrightarrow> sh \<unlhd>\<^sub>s sh(C\<mapsto>(sfs',i'))"
(*<*)by (rule shextI) (auto simp:fun_upd_apply)(*>*)
end
|
theory exercise_2_5
imports Main
begin
fun sum_upto::"nat\<Rightarrow>nat"
where
"sum_upto 0=0"|
"sum_upto n=n+(sum_upto(n-1))"
value "sum_upto 10"
lemma sum_upto_su:"sum_upto n=n*(n+1) div 2"
apply(induction n)
apply(auto)
done
end |
= = = Pistons roadblock ( 1987 – 1990 ) = = =
|
SUBROUTINE init_modular_coils (nvariables, xvariables, nfp)
!-----------------------------------------------
! M o d u l e s
!-----------------------------------------------
USE modular_coils
IMPLICIT NONE
!-----------------------------------------------
! L o c a l V a r i a b l e s
!-----------------------------------------------
INTEGER :: nc, i, n, nfp
INTEGER :: nvariables, modes
REAL(rprec) :: xvariables(*)
!-----------------------------------------------
nc = nmod_coils_per_period
nfper = nfp
nmod_coils = nc*nfper
nmid = (nc+1)/2
nodd = MOD(nc,2)
IF ((nodd.eq.0) .and. (.not.lsymm)) nmid = nmid + 1
nvariables = 0
nmod_coeffs = 0
IF (nc .le. 0) RETURN
! Initialize the variables to values of unique coil parameters
! and count the number of variables
n = 0
! First consider the CASE with coils on both symmetry planes at
! phi = 0 and phi = pi/nfp (lsymm = F). This implies that the number
! of coils per field period must be even (nodd = 0).
IF ((nodd .eq. 0) .and. (.not.lsymm)) THEN
! Symmetry coil at phi = 0.
i = 1
DO modes = 1, nf_phi
n = n + 1
xvariables(n) = phis(i,modes)
END DO
DO modes = 1, nf_rho
n = n + 1
xvariables(n) = rhos(i,modes)
END DO
! Coils 2 through nmid-1.
DO i = 2, nmid-1
modes = 0
n = n + 1
xvariables(n) = phic(i,modes)
DO modes = 1,nf_phi
n = n + 1
xvariables(n) = phic(i,modes)
n = n + 1
xvariables(n) = phis(i,modes)
END DO
DO modes = 1,nf_rho
n = n + 1
xvariables(n) = rhos(i,modes)
END DO
END DO
! Symmetry coil at phi = pi/nfp.
i = nmid
DO modes = 1, nf_phi
n = n + 1
xvariables(n) = phis(i,modes)
END DO
DO modes = 1, nf_rho
n = n + 1
xvariables(n) = rhos(i,modes)
END DO
! Next consider the cases with a coil on phi = 0 (lsymm = T), or
! on phi = pi/nfp (lsymm = F), but not both. Then there may be an
! even number of coils per period (nodd = 0) or an odd number of
! coils per period (nodd = 1).
ELSE
DO i = 1, nmid-nodd
modes = 0
n = n + 1
xvariables(n) = phic(i,modes)
DO modes = 1,nf_phi
n = n + 1
xvariables(n) = phic(i,modes)
n = n + 1
xvariables(n) = phis(i,modes)
END DO
DO modes = 1,nf_rho
n = n + 1
xvariables(n) = rhos(i,modes)
END DO
END DO
IF (nodd .eq. 1) THEN
i = nmid
DO modes = 1, nf_phi
n = n + 1
xvariables(n) = phis(i,modes)
END DO
DO modes = 1, nf_rho
n = n + 1
xvariables(n) = rhos(i,modes)
END DO
END IF
END IF ! END IF ((nodd .eq. 0) .and. (lsymm .eqv. .false.))
nmod_coeffs = n
nvariables = n
END SUBROUTINE init_modular_coils
|
# Threads
using ContextTracking
using Logging
@ctx function foo()
@memo x = 1
bar(current_task(), threadid())
sleep(0.5) # make it slow and yield to other green threads
end
@ctx function bar(task, thread_id)
c = context()
@info "inside bar" task thread_id c.data[:x]
end
asyncmap(i -> foo(), 1:3);
#=
julia> asyncmap(i -> foo(), 1:3);
┌ Info: inside bar
│ task = Task (runnable) @0x0000000127810010
│ thread_id = 1
└ c.data[:x] = 1
┌ Info: inside bar
│ task = Task (runnable) @0x0000000127810250
│ thread_id = 1
└ c.data[:x] = 1
┌ Info: inside bar
│ task = Task (runnable) @0x0000000127810490
│ thread_id = 1
└ c.data[:x] = 1
=#
# Regular experimental threading
using Base.Threads: @threads
@show Threads.nthreads();
#=
julia> @show Threads.nthreads();
Threads.nthreads() = 4
=#
@threads for i in 1:5
foo()
end
#=
julia> @threads for i in 1:5
foo()
end
┌ Info: inside bar
│ task = Task (runnable) @0x000000010b874490
│ thread_id = 3
└ c.data[:x] = 1
┌ Info: inside bar
│ task = Task (runnable) @0x000000010b874010
│ thread_id = 1
└ c.data[:x] = 1
┌ Info: inside bar
│ task = Task (runnable) @0x000000010b8746d0
│ thread_id = 4
└ c.data[:x] = 1
┌ Info: inside bar
│ task = Task (runnable) @0x000000010b874250
│ thread_id = 2
└ c.data[:x] = 1
┌ Info: inside bar
│ task = Task (runnable) @0x000000010b874010
│ thread_id = 1
└ c.data[:x] = 1
=#
# New threading
using Base.Threads: @spawn
for i in 1:5
@spawn foo()
end
#=
julia> for i in 1:5
@spawn foo()
end
┌ Info: inside bar
│ task = Task (runnable) @0x000000010b874910
│ thread_id = 2
└ c.data[:x] = 1
┌ Info: inside bar
│ task = Task (runnable) @0x000000010b874b50
│ thread_id = 3
└ c.data[:x] = 1
┌ Info: inside bar
│ task = Task (runnable) @0x000000010b875210
│ thread_id = 2
└ c.data[:x] = 1
┌ Info: inside bar
│ task = Task (runnable) @0x000000010b874d90
│ thread_id = 4
└ c.data[:x] = 1
┌ Info: inside bar
│ task = Task (runnable) @0x000000010b874fd0
│ thread_id = 1
└ c.data[:x] = 1
=#
|
If $f$ is an orthogonal transformation, then $f$ maps the closed ball of radius $r$ centered at $x$ to the closed ball of radius $r$ centered at $f(x)$. |
State Before: 𝕜 : Type u_1
E : Type u_2
F : Type ?u.156386
inst✝² : LinearOrderedField 𝕜
inst✝¹ : AddCommGroup E
inst✝ : Module 𝕜 E
x : E
s : Set E
hs : StarConvex 𝕜 0 s
hx : x ∈ s
t : 𝕜
ht : 1 ≤ t
⊢ x ∈ t • s State After: 𝕜 : Type u_1
E : Type u_2
F : Type ?u.156386
inst✝² : LinearOrderedField 𝕜
inst✝¹ : AddCommGroup E
inst✝ : Module 𝕜 E
x : E
s : Set E
hs : StarConvex 𝕜 0 s
hx : x ∈ s
t : 𝕜
ht : 1 ≤ t
⊢ t⁻¹ • x ∈ s Tactic: rw [mem_smul_set_iff_inv_smul_mem₀ (zero_lt_one.trans_le ht).ne'] State Before: 𝕜 : Type u_1
E : Type u_2
F : Type ?u.156386
inst✝² : LinearOrderedField 𝕜
inst✝¹ : AddCommGroup E
inst✝ : Module 𝕜 E
x : E
s : Set E
hs : StarConvex 𝕜 0 s
hx : x ∈ s
t : 𝕜
ht : 1 ≤ t
⊢ t⁻¹ • x ∈ s State After: no goals Tactic: exact hs.smul_mem hx (by positivity) (inv_le_one ht) State Before: 𝕜 : Type u_1
E : Type u_2
F : Type ?u.156386
inst✝² : LinearOrderedField 𝕜
inst✝¹ : AddCommGroup E
inst✝ : Module 𝕜 E
x : E
s : Set E
hs : StarConvex 𝕜 0 s
hx : x ∈ s
t : 𝕜
ht : 1 ≤ t
⊢ 0 ≤ t⁻¹ State After: no goals Tactic: positivity |
/-
Copyright (c) 2020 Zhouhang Zhou. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Zhouhang Zhou
-/
import algebra.support
/-!
# Indicator function
> THIS FILE IS SYNCHRONIZED WITH MATHLIB4.
> Any changes to this file require a corresponding PR to mathlib4.
- `indicator (s : set α) (f : α → β) (a : α)` is `f a` if `a ∈ s` and is `0` otherwise.
- `mul_indicator (s : set α) (f : α → β) (a : α)` is `f a` if `a ∈ s` and is `1` otherwise.
## Implementation note
In mathematics, an indicator function or a characteristic function is a function
used to indicate membership of an element in a set `s`,
having the value `1` for all elements of `s` and the value `0` otherwise.
But since it is usually used to restrict a function to a certain set `s`,
we let the indicator function take the value `f x` for some function `f`, instead of `1`.
If the usual indicator function is needed, just set `f` to be the constant function `λx, 1`.
The indicator function is implemented non-computably, to avoid having to pass around `decidable`
arguments. This is in contrast with the design of `pi.single` or `set.piecewise`.
## Tags
indicator, characteristic
-/
open_locale big_operators
open function
variables {α β ι M N : Type*}
namespace set
section has_one
variables [has_one M] [has_one N] {s t : set α} {f g : α → M} {a : α}
/-- `indicator s f a` is `f a` if `a ∈ s`, `0` otherwise. -/
noncomputable def indicator {M} [has_zero M] (s : set α) (f : α → M) : α → M
| x := by haveI := classical.dec_pred (∈ s); exact if x ∈ s then f x else 0
/-- `mul_indicator s f a` is `f a` if `a ∈ s`, `1` otherwise. -/
@[to_additive]
noncomputable def mul_indicator (s : set α) (f : α → M) : α → M
| x := by haveI := classical.dec_pred (∈ s); exact if x ∈ s then f x else 1
@[simp, to_additive] lemma piecewise_eq_mul_indicator [decidable_pred (∈ s)] :
s.piecewise f 1 = s.mul_indicator f :=
funext $ λ x, @if_congr _ _ _ _ (id _) _ _ _ _ iff.rfl rfl rfl
@[to_additive] lemma mul_indicator_apply (s : set α) (f : α → M) (a : α) [decidable (a ∈ s)] :
mul_indicator s f a = if a ∈ s then f a else 1 := by convert rfl
@[simp, to_additive] lemma mul_indicator_of_mem (h : a ∈ s) (f : α → M) :
mul_indicator s f a = f a :=
by { letI := classical.dec (a ∈ s), exact if_pos h }
@[simp, to_additive] lemma mul_indicator_of_not_mem (h : a ∉ s) (f : α → M) :
mul_indicator s f a = 1 :=
by { letI := classical.dec (a ∈ s), exact if_neg h }
@[to_additive] lemma mul_indicator_eq_one_or_self (s : set α) (f : α → M) (a : α) :
mul_indicator s f a = 1 ∨ mul_indicator s f a = f a :=
begin
by_cases h : a ∈ s,
{ exact or.inr (mul_indicator_of_mem h f) },
{ exact or.inl (mul_indicator_of_not_mem h f) }
end
@[simp, to_additive] lemma mul_indicator_apply_eq_self :
s.mul_indicator f a = f a ↔ (a ∉ s → f a = 1) :=
by letI := classical.dec (a ∈ s); exact ite_eq_left_iff.trans (by rw [@eq_comm _ (f a)])
@[simp, to_additive] lemma mul_indicator_eq_self : s.mul_indicator f = f ↔ mul_support f ⊆ s :=
by simp only [funext_iff, subset_def, mem_mul_support, mul_indicator_apply_eq_self, not_imp_comm]
@[to_additive] lemma mul_indicator_eq_self_of_superset (h1 : s.mul_indicator f = f) (h2 : s ⊆ t) :
t.mul_indicator f = f :=
by { rw mul_indicator_eq_self at h1 ⊢, exact subset.trans h1 h2 }
@[simp, to_additive] lemma mul_indicator_apply_eq_one :
mul_indicator s f a = 1 ↔ (a ∈ s → f a = 1) :=
by letI := classical.dec (a ∈ s); exact ite_eq_right_iff
@[simp, to_additive] lemma mul_indicator_eq_one :
mul_indicator s f = (λ x, 1) ↔ disjoint (mul_support f) s :=
by simp only [funext_iff, mul_indicator_apply_eq_one, set.disjoint_left, mem_mul_support,
not_imp_not]
@[simp, to_additive] lemma mul_indicator_eq_one' :
mul_indicator s f = 1 ↔ disjoint (mul_support f) s :=
mul_indicator_eq_one
@[to_additive] lemma mul_indicator_apply_ne_one {a : α} :
s.mul_indicator f a ≠ 1 ↔ a ∈ s ∩ mul_support f :=
by simp only [ne.def, mul_indicator_apply_eq_one, not_imp, mem_inter_iff, mem_mul_support]
@[simp, to_additive] lemma mul_support_mul_indicator :
function.mul_support (s.mul_indicator f) = s ∩ function.mul_support f :=
ext $ λ x, by simp [function.mem_mul_support, mul_indicator_apply_eq_one]
/-- If a multiplicative indicator function is not equal to `1` at a point, then that point is in the
set. -/
@[to_additive "If an additive indicator function is not equal to `0` at a point, then that point is
in the set."]
lemma mem_of_mul_indicator_ne_one (h : mul_indicator s f a ≠ 1) : a ∈ s :=
not_imp_comm.1 (λ hn, mul_indicator_of_not_mem hn f) h
@[to_additive] lemma eq_on_mul_indicator : eq_on (mul_indicator s f) f s :=
λ x hx, mul_indicator_of_mem hx f
@[to_additive] lemma mul_support_mul_indicator_subset : mul_support (s.mul_indicator f) ⊆ s :=
λ x hx, hx.imp_symm (λ h, mul_indicator_of_not_mem h f)
@[simp, to_additive] lemma mul_indicator_mul_support : mul_indicator (mul_support f) f = f :=
mul_indicator_eq_self.2 subset.rfl
@[simp, to_additive] lemma mul_indicator_range_comp {ι : Sort*} (f : ι → α) (g : α → M) :
mul_indicator (range f) g ∘ f = g ∘ f :=
by letI := classical.dec_pred (∈ range f); exact piecewise_range_comp _ _ _
@[to_additive] lemma mul_indicator_congr (h : eq_on f g s) :
mul_indicator s f = mul_indicator s g :=
funext $ λx, by { simp only [mul_indicator], split_ifs, { exact h h_1 }, refl }
@[simp, to_additive] lemma mul_indicator_univ (f : α → M) : mul_indicator (univ : set α) f = f :=
mul_indicator_eq_self.2 $ subset_univ _
@[simp, to_additive] lemma mul_indicator_empty (f : α → M) : mul_indicator (∅ : set α) f = λa, 1 :=
mul_indicator_eq_one.2 $ disjoint_empty _
@[to_additive] lemma mul_indicator_empty' (f : α → M) : mul_indicator (∅ : set α) f = 1 :=
mul_indicator_empty f
variable (M)
@[simp, to_additive] lemma mul_indicator_one (s : set α) :
mul_indicator s (λx, (1:M)) = λx, (1:M) :=
mul_indicator_eq_one.2 $ by simp only [mul_support_one, empty_disjoint]
@[simp, to_additive] lemma mul_indicator_one' {s : set α} : s.mul_indicator (1 : α → M) = 1 :=
mul_indicator_one M s
variable {M}
@[to_additive] lemma mul_indicator_mul_indicator (s t : set α) (f : α → M) :
mul_indicator s (mul_indicator t f) = mul_indicator (s ∩ t) f :=
funext $ λx, by { simp only [mul_indicator], split_ifs, repeat {simp * at * {contextual := tt}} }
@[simp, to_additive] lemma mul_indicator_inter_mul_support (s : set α) (f : α → M) :
mul_indicator (s ∩ mul_support f) f = mul_indicator s f :=
by rw [← mul_indicator_mul_indicator, mul_indicator_mul_support]
@[to_additive] lemma comp_mul_indicator (h : M → β) (f : α → M) {s : set α} {x : α}
[decidable_pred (∈ s)] :
h (s.mul_indicator f x) = s.piecewise (h ∘ f) (const α (h 1)) x :=
by letI := classical.dec_pred (∈ s); convert s.apply_piecewise f (const α 1) (λ _, h)
@[to_additive] lemma mul_indicator_comp_right {s : set α} (f : β → α) {g : α → M} {x : β} :
mul_indicator (f ⁻¹' s) (g ∘ f) x = mul_indicator s g (f x) :=
by { simp only [mul_indicator], split_ifs; refl }
@[to_additive] lemma mul_indicator_image {s : set α} {f : β → M} {g : α → β} (hg : injective g)
{x : α} : mul_indicator (g '' s) f (g x) = mul_indicator s (f ∘ g) x :=
by rw [← mul_indicator_comp_right, preimage_image_eq _ hg]
@[to_additive] lemma mul_indicator_comp_of_one {g : M → N} (hg : g 1 = 1) :
mul_indicator s (g ∘ f) = g ∘ (mul_indicator s f) :=
begin
funext,
simp only [mul_indicator],
split_ifs; simp [*]
end
@[to_additive] lemma comp_mul_indicator_const (c : M) (f : M → N) (hf : f 1 = 1) :
(λ x, f (s.mul_indicator (λ x, c) x)) = s.mul_indicator (λ x, f c) :=
(mul_indicator_comp_of_one hf).symm
@[to_additive] lemma mul_indicator_preimage (s : set α) (f : α → M) (B : set M) :
(mul_indicator s f)⁻¹' B = s.ite (f ⁻¹' B) (1 ⁻¹' B) :=
by letI := classical.dec_pred (∈ s); exact piecewise_preimage s f 1 B
@[to_additive] lemma mul_indicator_one_preimage (s : set M) :
t.mul_indicator 1 ⁻¹' s ∈ ({set.univ, ∅} : set (set α)) :=
begin
classical,
rw [mul_indicator_one', preimage_one],
split_ifs; simp
end
@[to_additive] lemma mul_indicator_const_preimage_eq_union (U : set α) (s : set M) (a : M)
[decidable (a ∈ s)] [decidable ((1 : M) ∈ s)] :
U.mul_indicator (λ x, a) ⁻¹' s = (if a ∈ s then U else ∅) ∪ (if (1 : M) ∈ s then Uᶜ else ∅) :=
begin
rw [mul_indicator_preimage, preimage_one, preimage_const],
split_ifs; simp [← compl_eq_univ_diff]
end
@[to_additive] lemma mul_indicator_const_preimage (U : set α) (s : set M) (a : M) :
U.mul_indicator (λ x, a) ⁻¹' s ∈ ({set.univ, U, Uᶜ, ∅} : set (set α)) :=
begin
classical,
rw [mul_indicator_const_preimage_eq_union],
split_ifs; simp
end
lemma indicator_one_preimage [has_zero M] (U : set α) (s : set M) :
U.indicator 1 ⁻¹' s ∈ ({set.univ, U, Uᶜ, ∅} : set (set α)) :=
indicator_const_preimage _ _ 1
@[to_additive] lemma mul_indicator_preimage_of_not_mem (s : set α) (f : α → M)
{t : set M} (ht : (1:M) ∉ t) :
(mul_indicator s f)⁻¹' t = f ⁻¹' t ∩ s :=
by simp [mul_indicator_preimage, pi.one_def, set.preimage_const_of_not_mem ht]
@[to_additive] lemma mem_range_mul_indicator {r : M} {s : set α} {f : α → M} :
r ∈ range (mul_indicator s f) ↔ (r = 1 ∧ s ≠ univ) ∨ (r ∈ f '' s) :=
by simp [mul_indicator, ite_eq_iff, exists_or_distrib, eq_univ_iff_forall, and_comm, or_comm,
@eq_comm _ r 1]
@[to_additive] lemma mul_indicator_rel_mul_indicator {r : M → M → Prop} (h1 : r 1 1)
(ha : a ∈ s → r (f a) (g a)) :
r (mul_indicator s f a) (mul_indicator s g a) :=
by { simp only [mul_indicator], split_ifs with has has, exacts [ha has, h1] }
end has_one
section monoid
variables [mul_one_class M] {s t : set α} {f g : α → M} {a : α}
@[to_additive] lemma mul_indicator_union_mul_inter_apply (f : α → M) (s t : set α) (a : α) :
mul_indicator (s ∪ t) f a * mul_indicator (s ∩ t) f a =
mul_indicator s f a * mul_indicator t f a :=
by by_cases hs : a ∈ s; by_cases ht : a ∈ t; simp *
@[to_additive] lemma mul_indicator_union_mul_inter (f : α → M) (s t : set α) :
mul_indicator (s ∪ t) f * mul_indicator (s ∩ t) f = mul_indicator s f * mul_indicator t f :=
funext $ mul_indicator_union_mul_inter_apply f s t
@[to_additive] lemma mul_indicator_union_of_not_mem_inter (h : a ∉ s ∩ t) (f : α → M) :
mul_indicator (s ∪ t) f a = mul_indicator s f a * mul_indicator t f a :=
by rw [← mul_indicator_union_mul_inter_apply f s t, mul_indicator_of_not_mem h, mul_one]
@[to_additive] lemma mul_indicator_union_of_disjoint (h : disjoint s t) (f : α → M) :
mul_indicator (s ∪ t) f = λa, mul_indicator s f a * mul_indicator t f a :=
funext $ λa, mul_indicator_union_of_not_mem_inter (λ ha, h.le_bot ha) _
@[to_additive] lemma mul_indicator_mul (s : set α) (f g : α → M) :
mul_indicator s (λa, f a * g a) = λa, mul_indicator s f a * mul_indicator s g a :=
by { funext, simp only [mul_indicator], split_ifs, { refl }, rw mul_one }
@[to_additive] lemma mul_indicator_mul' (s : set α) (f g : α → M) :
mul_indicator s (f * g) = mul_indicator s f * mul_indicator s g :=
mul_indicator_mul s f g
@[simp, to_additive] lemma mul_indicator_compl_mul_self_apply (s : set α) (f : α → M) (a : α) :
mul_indicator sᶜ f a * mul_indicator s f a = f a :=
classical.by_cases (λ ha : a ∈ s, by simp [ha]) (λ ha, by simp [ha])
@[simp, to_additive] lemma mul_indicator_compl_mul_self (s : set α) (f : α → M) :
mul_indicator sᶜ f * mul_indicator s f = f :=
funext $ mul_indicator_compl_mul_self_apply s f
@[simp, to_additive] lemma mul_indicator_self_mul_compl_apply (s : set α) (f : α → M) (a : α) :
mul_indicator s f a * mul_indicator sᶜ f a = f a :=
classical.by_cases (λ ha : a ∈ s, by simp [ha]) (λ ha, by simp [ha])
@[simp, to_additive]
@[to_additive] lemma mul_indicator_mul_eq_left {f g : α → M}
(h : disjoint (mul_support f) (mul_support g)) :
(mul_support f).mul_indicator (f * g) = f :=
begin
refine (mul_indicator_congr $ λ x hx, _).trans mul_indicator_mul_support,
have : g x = 1, from nmem_mul_support.1 (disjoint_left.1 h hx),
rw [pi.mul_apply, this, mul_one]
end
@[to_additive] lemma mul_indicator_mul_eq_right {f g : α → M}
(h : disjoint (mul_support f) (mul_support g)) :
(mul_support g).mul_indicator (f * g) = g :=
begin
refine (mul_indicator_congr $ λ x hx, _).trans mul_indicator_mul_support,
have : f x = 1, from nmem_mul_support.1 (disjoint_right.1 h hx),
rw [pi.mul_apply, this, one_mul]
end
@[to_additive] lemma mul_indicator_mul_compl_eq_piecewise
[decidable_pred (∈ s)] (f g : α → M) :
s.mul_indicator f * sᶜ.mul_indicator g = s.piecewise f g :=
begin
ext x,
by_cases h : x ∈ s,
{ rw [piecewise_eq_of_mem _ _ _ h, pi.mul_apply, set.mul_indicator_of_mem h,
set.mul_indicator_of_not_mem (set.not_mem_compl_iff.2 h), mul_one] },
{ rw [piecewise_eq_of_not_mem _ _ _ h, pi.mul_apply, set.mul_indicator_of_not_mem h,
set.mul_indicator_of_mem (set.mem_compl h), one_mul] },
end
/-- `set.mul_indicator` as a `monoid_hom`. -/
@[to_additive "`set.indicator` as an `add_monoid_hom`."]
noncomputable def mul_indicator_hom {α} (M) [mul_one_class M] (s : set α) : (α → M) →* (α → M) :=
{ to_fun := mul_indicator s,
map_one' := mul_indicator_one M s,
map_mul' := mul_indicator_mul s }
end monoid
section distrib_mul_action
variables {A : Type*} [add_monoid A] [monoid M] [distrib_mul_action M A]
lemma indicator_smul_apply (s : set α) (r : α → M) (f : α → A) (x : α) :
indicator s (λ x, r x • f x) x = r x • indicator s f x :=
by { dunfold indicator, split_ifs, exacts [rfl, (smul_zero (r x)).symm] }
lemma indicator_smul (s : set α) (r : α → M) (f : α → A) :
indicator s (λ (x : α), r x • f x) = λ (x : α), r x • indicator s f x :=
funext $ indicator_smul_apply s r f
lemma indicator_const_smul_apply (s : set α) (r : M) (f : α → A) (x : α) :
indicator s (λ x, r • f x) x = r • indicator s f x :=
indicator_smul_apply s (λ x, r) f x
lemma indicator_const_smul (s : set α) (r : M) (f : α → A) :
indicator s (λ (x : α), r • f x) = λ (x : α), r • indicator s f x :=
funext $ indicator_const_smul_apply s r f
end distrib_mul_action
section group
variables {G : Type*} [group G] {s t : set α} {f g : α → G} {a : α}
@[to_additive] lemma mul_indicator_inv' (s : set α) (f : α → G) :
mul_indicator s (f⁻¹) = (mul_indicator s f)⁻¹ :=
(mul_indicator_hom G s).map_inv f
@[to_additive] lemma mul_indicator_inv (s : set α) (f : α → G) :
mul_indicator s (λa, (f a)⁻¹) = λa, (mul_indicator s f a)⁻¹ :=
mul_indicator_inv' s f
@[to_additive] lemma mul_indicator_div (s : set α) (f g : α → G) :
mul_indicator s (λ a, f a / g a) =
λ a, mul_indicator s f a / mul_indicator s g a :=
(mul_indicator_hom G s).map_div f g
@[to_additive] lemma mul_indicator_div' (s : set α) (f g : α → G) :
mul_indicator s (f / g) = mul_indicator s f / mul_indicator s g :=
mul_indicator_div s f g
@[to_additive indicator_compl'] lemma mul_indicator_compl (s : set α) (f : α → G) :
mul_indicator sᶜ f = f * (mul_indicator s f)⁻¹ :=
eq_mul_inv_of_mul_eq $ s.mul_indicator_compl_mul_self f
lemma indicator_compl {G} [add_group G] (s : set α) (f : α → G) :
indicator sᶜ f = f - indicator s f :=
by rw [sub_eq_add_neg, indicator_compl']
@[to_additive indicator_diff'] lemma mul_indicator_diff (h : s ⊆ t) (f : α → G) :
mul_indicator (t \ s) f = mul_indicator t f * (mul_indicator s f)⁻¹ :=
eq_mul_inv_of_mul_eq $ by { rw [pi.mul_def, ←mul_indicator_union_of_disjoint, diff_union_self,
union_eq_self_of_subset_right h], exact disjoint_sdiff_self_left }
lemma indicator_diff {G : Type*} [add_group G] {s t : set α} (h : s ⊆ t) (f : α → G) :
indicator (t \ s) f = indicator t f - indicator s f :=
by rw [indicator_diff' h, sub_eq_add_neg]
end group
section comm_monoid
variables [comm_monoid M]
/-- Consider a product of `g i (f i)` over a `finset`. Suppose `g` is a
function such as `pow`, which maps a second argument of `1` to
`1`. Then if `f` is replaced by the corresponding multiplicative indicator
function, the `finset` may be replaced by a possibly larger `finset`
without changing the value of the sum. -/
@[to_additive] lemma prod_mul_indicator_subset_of_eq_one [has_one N] (f : α → N)
(g : α → N → M) {s t : finset α} (h : s ⊆ t) (hg : ∀ a, g a 1 = 1) :
∏ i in s, g i (f i) = ∏ i in t, g i (mul_indicator ↑s f i) :=
begin
rw ← finset.prod_subset h _,
{ apply finset.prod_congr rfl,
intros i hi,
congr,
symmetry,
exact mul_indicator_of_mem hi _ },
{ refine λ i hi hn, _,
convert hg i,
exact mul_indicator_of_not_mem hn _ }
end
/-- Consider a sum of `g i (f i)` over a `finset`. Suppose `g` is a
function such as multiplication, which maps a second argument of 0 to
0. (A typical use case would be a weighted sum of `f i * h i` or `f i
• h i`, where `f` gives the weights that are multiplied by some other
function `h`.) Then if `f` is replaced by the corresponding indicator
function, the `finset` may be replaced by a possibly larger `finset`
without changing the value of the sum. -/
add_decl_doc set.sum_indicator_subset_of_eq_zero
/-- Taking the product of an indicator function over a possibly larger `finset` is the same as
taking the original function over the original `finset`. -/
@[to_additive "Summing an indicator function over a possibly larger `finset` is the same as summing
the original function over the original `finset`."]
lemma prod_mul_indicator_subset (f : α → M) {s t : finset α} (h : s ⊆ t) :
∏ i in s, f i = ∏ i in t, mul_indicator ↑s f i :=
prod_mul_indicator_subset_of_eq_one _ (λ a b, b) h (λ _, rfl)
@[to_additive] lemma _root_.finset.prod_mul_indicator_eq_prod_filter
(s : finset ι) (f : ι → α → M) (t : ι → set α) (g : ι → α) [decidable_pred (λ i, g i ∈ t i)]:
∏ i in s, mul_indicator (t i) (f i) (g i) = ∏ i in s.filter (λ i, g i ∈ t i), f i (g i) :=
begin
refine (finset.prod_filter_mul_prod_filter_not s (λ i, g i ∈ t i) _).symm.trans _,
refine eq.trans _ (mul_one _),
exact congr_arg2 (*)
(finset.prod_congr rfl $ λ x hx, mul_indicator_of_mem (finset.mem_filter.1 hx).2 _)
(finset.prod_eq_one $ λ x hx, mul_indicator_of_not_mem (finset.mem_filter.1 hx).2 _)
end
@[to_additive] lemma mul_indicator_finset_prod (I : finset ι) (s : set α) (f : ι → α → M) :
mul_indicator s (∏ i in I, f i) = ∏ i in I, mul_indicator s (f i) :=
(mul_indicator_hom M s).map_prod _ _
@[to_additive] lemma mul_indicator_finset_bUnion {ι} (I : finset ι)
(s : ι → set α) {f : α → M} : (∀ (i ∈ I) (j ∈ I), i ≠ j → disjoint (s i) (s j)) →
mul_indicator (⋃ i ∈ I, s i) f = λ a, ∏ i in I, mul_indicator (s i) f a :=
begin
classical,
refine finset.induction_on I _ _,
{ intro h, funext, simp },
assume a I haI ih hI,
funext,
rw [finset.prod_insert haI, finset.set_bUnion_insert, mul_indicator_union_of_not_mem_inter, ih _],
{ assume i hi j hj hij,
exact hI i (finset.mem_insert_of_mem hi) j (finset.mem_insert_of_mem hj) hij },
simp only [not_exists, exists_prop, mem_Union, mem_inter_iff, not_and],
assume hx a' ha',
refine disjoint_left.1 (hI a (finset.mem_insert_self _ _) a' (finset.mem_insert_of_mem ha') _) hx,
exact (ne_of_mem_of_not_mem ha' haI).symm
end
@[to_additive] lemma mul_indicator_finset_bUnion_apply {ι} (I : finset ι)
(s : ι → set α) {f : α → M} (h : ∀ (i ∈ I) (j ∈ I), i ≠ j → disjoint (s i) (s j)) (x : α) :
mul_indicator (⋃ i ∈ I, s i) f x = ∏ i in I, mul_indicator (s i) f x :=
by rw set.mul_indicator_finset_bUnion I s h
end comm_monoid
section mul_zero_class
variables [mul_zero_class M] {s t : set α} {f g : α → M} {a : α}
lemma indicator_mul (s : set α) (f g : α → M) :
indicator s (λa, f a * g a) = λa, indicator s f a * indicator s g a :=
by { funext, simp only [indicator], split_ifs, { refl }, rw mul_zero }
lemma indicator_mul_left (s : set α) (f g : α → M) :
indicator s (λa, f a * g a) a = indicator s f a * g a :=
by { simp only [indicator], split_ifs, { refl }, rw [zero_mul] }
lemma indicator_mul_right (s : set α) (f g : α → M) :
indicator s (λa, f a * g a) a = f a * indicator s g a :=
by { simp only [indicator], split_ifs, { refl }, rw [mul_zero] }
lemma inter_indicator_mul {t1 t2 : set α} (f g : α → M) (x : α) :
(t1 ∩ t2).indicator (λ x, f x * g x) x = t1.indicator f x * t2.indicator g x :=
by { rw [← set.indicator_indicator], simp [indicator] }
end mul_zero_class
section mul_zero_one_class
variables [mul_zero_one_class M]
lemma inter_indicator_one {s t : set α} :
(s ∩ t).indicator (1 : _ → M) = s.indicator 1 * t.indicator 1 :=
funext (λ _, by simpa only [← inter_indicator_mul, pi.mul_apply, pi.one_apply, one_mul])
lemma indicator_prod_one {s : set α} {t : set β} {x : α} {y : β} :
(s ×ˢ t).indicator (1 : _ → M) (x, y) = s.indicator 1 x * t.indicator 1 y :=
by { classical, simp [indicator_apply, ←ite_and] }
variables (M) [nontrivial M]
lemma indicator_eq_zero_iff_not_mem {U : set α} {x : α} :
indicator U 1 x = (0 : M) ↔ x ∉ U :=
by { classical, simp [indicator_apply, imp_false] }
lemma indicator_eq_one_iff_mem {U : set α} {x : α} :
indicator U 1 x = (1 : M) ↔ x ∈ U :=
by { classical, simp [indicator_apply, imp_false] }
lemma indicator_one_inj {U V : set α} (h : indicator U (1 : α → M) = indicator V 1) : U = V :=
by { ext, simp_rw [← indicator_eq_one_iff_mem M, h] }
end mul_zero_one_class
section order
variables [has_one M] {s t : set α} {f g : α → M} {a : α} {y : M}
section
variables [has_le M]
@[to_additive] lemma mul_indicator_apply_le' (hfg : a ∈ s → f a ≤ y) (hg : a ∉ s → 1 ≤ y) :
mul_indicator s f a ≤ y :=
begin
by_cases ha : a ∈ s,
{ simpa [ha] using hfg ha },
{ simpa [ha] using hg ha },
end
@[to_additive] lemma mul_indicator_le' (hfg : ∀ a ∈ s, f a ≤ g a) (hg : ∀ a ∉ s, 1 ≤ g a) :
mul_indicator s f ≤ g :=
λ a, mul_indicator_apply_le' (hfg _) (hg _)
@[to_additive] lemma le_mul_indicator_apply {y} (hfg : a ∈ s → y ≤ g a) (hf : a ∉ s → y ≤ 1) :
y ≤ mul_indicator s g a :=
@mul_indicator_apply_le' α Mᵒᵈ ‹_› _ _ _ _ _ hfg hf
@[to_additive] lemma le_mul_indicator (hfg : ∀ a ∈ s, f a ≤ g a) (hf : ∀ a ∉ s, f a ≤ 1) :
f ≤ mul_indicator s g :=
λ a, le_mul_indicator_apply (hfg _) (hf _)
end
variables [preorder M]
@[to_additive indicator_apply_nonneg]
lemma one_le_mul_indicator_apply (h : a ∈ s → 1 ≤ f a) : 1 ≤ mul_indicator s f a :=
le_mul_indicator_apply h (λ _, le_rfl)
@[to_additive indicator_nonneg]
lemma one_le_mul_indicator (h : ∀ a ∈ s, 1 ≤ f a) (a : α) : 1 ≤ mul_indicator s f a :=
one_le_mul_indicator_apply (h a)
@[to_additive] lemma mul_indicator_apply_le_one (h : a ∈ s → f a ≤ 1) : mul_indicator s f a ≤ 1 :=
mul_indicator_apply_le' h (λ _, le_rfl)
@[to_additive] lemma mul_indicator_le_one (h : ∀ a ∈ s, f a ≤ 1) (a : α) :
mul_indicator s f a ≤ 1 :=
mul_indicator_apply_le_one (h a)
@[to_additive] lemma mul_indicator_le_mul_indicator (h : f a ≤ g a) :
mul_indicator s f a ≤ mul_indicator s g a :=
mul_indicator_rel_mul_indicator le_rfl (λ _, h)
attribute [mono] mul_indicator_le_mul_indicator indicator_le_indicator
@[to_additive] lemma mul_indicator_le_mul_indicator_of_subset (h : s ⊆ t) (hf : ∀ a, 1 ≤ f a)
(a : α) :
mul_indicator s f a ≤ mul_indicator t f a :=
mul_indicator_apply_le' (λ ha, le_mul_indicator_apply (λ _, le_rfl) (λ hat, (hat $ h ha).elim))
(λ ha, one_le_mul_indicator_apply (λ _, hf _))
@[to_additive] lemma mul_indicator_le_self' (hf : ∀ x ∉ s, 1 ≤ f x) : mul_indicator s f ≤ f :=
mul_indicator_le' (λ _ _, le_rfl) hf
@[to_additive] lemma mul_indicator_Union_apply {ι M} [complete_lattice M] [has_one M]
(h1 : (⊥:M) = 1) (s : ι → set α) (f : α → M) (x : α) :
mul_indicator (⋃ i, s i) f x = ⨆ i, mul_indicator (s i) f x :=
begin
by_cases hx : x ∈ ⋃ i, s i,
{ rw [mul_indicator_of_mem hx],
rw [mem_Union] at hx,
refine le_antisymm _ (supr_le $ λ i, mul_indicator_le_self' (λ x hx, h1 ▸ bot_le) x),
rcases hx with ⟨i, hi⟩,
exact le_supr_of_le i (ge_of_eq $ mul_indicator_of_mem hi _) },
{ rw [mul_indicator_of_not_mem hx],
simp only [mem_Union, not_exists] at hx,
simp [hx, ← h1] }
end
end order
section canonically_ordered_monoid
variables [canonically_ordered_monoid M]
@[to_additive] lemma mul_indicator_le_self (s : set α) (f : α → M) :
mul_indicator s f ≤ f :=
mul_indicator_le_self' $ λ _ _, one_le _
@[to_additive] lemma mul_indicator_apply_le {a : α} {s : set α} {f g : α → M}
(hfg : a ∈ s → f a ≤ g a) :
mul_indicator s f a ≤ g a :=
mul_indicator_apply_le' hfg $ λ _, one_le _
@[to_additive] lemma mul_indicator_le {s : set α} {f g : α → M} (hfg : ∀ a ∈ s, f a ≤ g a) :
mul_indicator s f ≤ g :=
mul_indicator_le' hfg $ λ _ _, one_le _
end canonically_ordered_monoid
lemma indicator_le_indicator_nonneg {β} [linear_order β] [has_zero β] (s : set α) (f : α → β) :
s.indicator f ≤ {x | 0 ≤ f x}.indicator f :=
begin
intro x,
classical,
simp_rw indicator_apply,
split_ifs,
{ exact le_rfl, },
{ exact (not_le.mp h_1).le, },
{ exact h_1, },
{ exact le_rfl, },
end
lemma indicator_nonpos_le_indicator {β} [linear_order β] [has_zero β] (s : set α) (f : α → β) :
{x | f x ≤ 0}.indicator f ≤ s.indicator f :=
@indicator_le_indicator_nonneg α βᵒᵈ _ _ s f
end set
@[to_additive] lemma monoid_hom.map_mul_indicator
{M N : Type*} [mul_one_class M] [mul_one_class N] (f : M →* N)
(s : set α) (g : α → M) (x : α) :
f (s.mul_indicator g x) = s.mul_indicator (f ∘ g) x :=
congr_fun (set.mul_indicator_comp_of_one f.map_one).symm x
|
Formal statement is: lemma finite_bounded_log: "finite {z::complex. norm z \<le> b \<and> exp z = w}" Informal statement is: The set of complex numbers $z$ such that $|z| \leq b$ and $e^z = w$ is finite. |
lemma diameter_closure: assumes "bounded S" shows "diameter(closure S) = diameter S" |
-- TODO
-- Basic Tarski-style semantics, for soundness only.
module BasicT.Semantics.BasicTarski where
open import BasicT.Syntax.Common public
-- Tarski models.
record Model : Set₁ where
infix 3 ⊩ᵅ_
field
-- Forcing for atomic propositions.
⊩ᵅ_ : Atom → Set
open Model {{…}} public
-- Forcing in a particular model.
module _ {{_ : Model}} where
infix 3 ⊩_
⊩_ : Ty → Set
⊩ α P = ⊩ᵅ P
⊩ A ▻ B = ⊩ A → ⊩ B
⊩ A ∧ B = ⊩ A × ⊩ B
⊩ ⊤ = 𝟙
⊩ BOOL = 𝔹
⊩ NAT = ℕ
infix 3 ⊩⋆_
⊩⋆_ : Cx Ty → Set
⊩⋆ ∅ = 𝟙
⊩⋆ Ξ , A = ⊩⋆ Ξ × ⊩ A
-- Entailment, or forcing in all models.
infix 3 ⊨_
⊨_ : Ty → Set₁
⊨ A = ∀ {{_ : Model}} → ⊩ A
-- Forcing in a particular model, for sequents.
module _ {{_ : Model}} where
infix 3 ⊩_⇒_
⊩_⇒_ : Cx Ty → Ty → Set
⊩ Γ ⇒ A = ⊩⋆ Γ → ⊩ A
infix 3 ⊩_⇒⋆_
⊩_⇒⋆_ : Cx Ty → Cx Ty → Set
⊩ Γ ⇒⋆ Ξ = ⊩⋆ Γ → ⊩⋆ Ξ
-- Entailment, or forcing in all models, for sequents.
infix 3 _⊨_
_⊨_ : Cx Ty → Ty → Set₁
Γ ⊨ A = ∀ {{_ : Model}} → ⊩ Γ ⇒ A
infix 3 _⊨⋆_
_⊨⋆_ : Cx Ty → Cx Ty → Set₁
Γ ⊨⋆ Ξ = ∀ {{_ : Model}} → ⊩ Γ ⇒⋆ Ξ
-- Additional useful equipment, for sequents.
module _ {{_ : Model}} where
lookup : ∀ {A Γ} → A ∈ Γ → ⊩ Γ ⇒ A
lookup top (γ , a) = a
lookup (pop i) (γ , b) = lookup i γ
⟦λ⟧ : ∀ {A B Γ} → ⊩ Γ , A ⇒ B → ⊩ Γ ⇒ A ▻ B
⟦λ⟧ f γ = λ a → f (γ , a)
_⟦$⟧_ : ∀ {A B Γ} → ⊩ Γ ⇒ A ▻ B → ⊩ Γ ⇒ A → ⊩ Γ ⇒ B
(f ⟦$⟧ g) γ = f γ $ g γ
⟦S⟧ : ∀ {A B C Γ} → ⊩ Γ ⇒ A ▻ B ▻ C → ⊩ Γ ⇒ A ▻ B → ⊩ Γ ⇒ A → ⊩ Γ ⇒ C
⟦S⟧ f g a γ = S (f γ) (g γ) (a γ)
_⟦,⟧_ : ∀ {A B Γ} → ⊩ Γ ⇒ A → ⊩ Γ ⇒ B → ⊩ Γ ⇒ A ∧ B
(a ⟦,⟧ b) γ = a γ , b γ
⟦π₁⟧ : ∀ {A B Γ} → ⊩ Γ ⇒ A ∧ B → ⊩ Γ ⇒ A
⟦π₁⟧ s γ = π₁ (s γ)
⟦π₂⟧ : ∀ {A B Γ} → ⊩ Γ ⇒ A ∧ B → ⊩ Γ ⇒ B
⟦π₂⟧ s γ = π₂ (s γ)
|
[STATEMENT]
lemma mono_prop_set_equiv:
assumes "mono_prop P"
and "equiv A B"
shows "setify P A \<longleftrightarrow> setify P B"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. setify P A = setify P B
[PROOF STEP]
by (meson assms(1) assms(2) local.equiv_def sep_algebra.mono_prop_set sep_algebra_axioms) |
# MAE 3120 Methods of Engineering Experiments
>__Philippe M Bardet__
>__Mechanical and Aerospace Engineering__
>__The George Washington University__
# Module 01: Introduction to measurement system
This class will be focused mainly on experiments, but many of the concepts seen here are also applied in many other fields where analytical thinking is needed. In fact, with the advancement of computers one talks more and more of numerical experiments. The stock market could also be considered as a huge real time experiment...
This is an introductory lecture and we need to establish a common (rigorous) language for the rest of the class (and your career). We are going to introduce a lot of definitions that we will use for the rest of the semester. Many definitions and notions introduced here should only be reviews at this point (I hope).
Along with the common language, we will also adopt a notation convention that is consistent throughout the class. Depending on the textbook you choose to follow, the convention might be slightly different than the one adopted here.
## DIKW pyramid
In engineering, knowledge could be defined as a model that describes and (ideally) predicts the behavior of a complex system. The context of data acquisition, analysis, and model development can be put in the context of the wisdom pyramid (or DIKW pyramyd). This concept has been developed over the years by the information theory field.
https://en.wikipedia.org/wiki/DIKW_pyramid
Starting from the bottom of the pyramid:
__data__: Signal reading from sensor/transducer
__information__: "organized or structured data, which has been processed in such a way that the information now has relevance for a specific purpose or context, and is therefore meaningful, valuable, useful and relevant." information + data: "know what".
__knowledge__: "organization and processing to convey understanding, experience, and accumulated learning". It can be seen as having an engineering model that describes a phenomena or system. "Why is".
__wisdom__: "Why do". Applicability of model to predict new behaviors.
Use example of taking data at 3 points, to extract mean values, from mean values, trend line, and then knowing when to use the trendline.
Here is another representation of the pyramid:
## Goals of experiments
Experiments serve two main purposes:
>__1- Engineering/scientific experimentation__:
>The goal is to seek new information. For example when developing a new product one needs to know: how hot does it get? When will it fail? Another example would be to determine a model that describes the behavior of a system.
>__2- Operational system__:
>The goal is to monitor and control processes. In other words to create a reliable operational sysem. This is generally applied to existing equipment (or equipment under design), rather than used to design a new equipment (first application of measurement). For example, this could be the A/C control system of a room: one needs to measure temperature and regulate the heating/cooling based on a set point.
It is convenient to think of the measurement process with a block diagram.
## A brief history of measurement (length)
In Ancient Egypt, (~3,000 BC) people used a measure called a CUBIT. This word comes from a Latin word “cubitum” which meant elbow and it was the length of a person’s outstretched forearm - from the elbow to the tip of the middle finger. It was based on the Pharaoh’s body: the ROYAL CUBIT. A stick was marked with this distance and copies were distributed to merchants throughout the land. When the Pharaoh died, a new Pharaoh took the throne, a new Royal Cubit came into being, and a new stick and it’s copies had to be made and sent across the land. Plus, there were OTHER problems: The lengths that people wanted to measure were sometimes shorter or longer than a cubit. So other lengths like the PALM, DIGIT, and FOOT were used. 7 PALMS was the same as 1 CUBIT. The ROYAL FOOT was equal to about 18 fingers or ⅔ of a Royal Cubit.
Alongside the Ancient Egyptians, people throughout the Mediterranean used parts of their bodies to create units of measurement. Mediterranean sailors used Fathoms to measure depth. The Hebrew people of long ago used a measurement called a SPAN. Hand spans are still a unit used to measure horses! The people of Ancient Greece adapted the Egyptian-Hebrew measurements and added more measurements based upon multiples of fingers. Ancient Romans created a measurement meant based on the width of the thumb or “uncia” in Latin. That’s where the word for INCH comes from. Roman armies measured the distance from one step to another using PACES. A PACE was equal to 2 Egyptian cubits and is still used to describe speed in foot races. The word MILE comes from the Latin word “milliare,” a distance of 1,000 paces covered by the Roman army at a forced march.
As the Roman empire spread, the Roman measuring units became the accepted system of measurement throughout Europe. A Roman FOOT was equal to 12 UNCIA. These uncia came to England and over time became INCHES. Like the Pharaohs of Ancient Egypt, the King of England at the time also wanted to STANDARDIZE (or make the same) the units of measurement across the land based on his own body. A royal decree went out: a YARD was to be the distance from the nose to the tip of the middle finger of the outstretched arm...or about 3 FEET. From this time, all units were derived from the King’s foot and yard. The English Mile was derived from that. This English or Customary system spread back down through Europe and across the ocean to North America with the English who arrived here. As trade and communication increased again, once again a more uniform or regulated system was needed. It took several hundred years for the Customary system to become more and more dependable and standardized. In 1855, a new distance for a yard was formalized. This was still about the size of the original Roman yard. Since then all of the other units of measurement for length have been derived from multiplications and divisions of the Yard. 1 Inch was 1/36 of a yard. 1 Foot was ⅓ yard.
While the English or Customary system came to be widely used in Europe, and is still used here in the United States, today there is another widely used system of measurement that is not based on the measurements of the human body. It’s beginnings can be found in the 1600s in Europe, when people began to talk about finding better STANDARDS for measurement. In the 1790s, a group of French scientists decided to create a new standard of measurement that would be unchangeable. The name for the unit of measure chosen was the METER; it was based upon a measurement of the Earth. The system they developed is called the METRIC SYSTEM and it is still in use today, throughout France, Europe and almost every nation in the world. All scientists use the Metric system because it is the most precise. The length of the meter was taken to be one ten-millionth of the distance from the North Pole to the equator along a line of longitude near Paris, France. The word meters comes from the Greek word “metron” which means to measure. The metric system is based on multiples of 10.
## Dimensions and Units
For measured data to be useful, one needs to have a common language: i.e. a unique definition of dimensions (length, time, etc.) with associated dimensional units (meter, second, etc.).
There are two types of dimensions:
>__1- Primary or base.__ 7 in total.
\begin{array}{l l l}
\hline
\mathrm{primary\, dimension} & \mathrm{symbol} & \mathrm{unit} \\
\hline
\text{mass} & m & \mathrm{kg}\\
\mathrm{length} & L & \mathrm{m}\\
\mathrm{time} & t & \mathrm{s}\\
\mathrm{thermodynamic\,temperature} & T& \mathrm{K}\\
\mathrm{electrical \,current} & I & \mathrm{A}\\
\mathrm{amount\,of\,light} & C& \mathrm{Cd,\,Candela}\\
\mathrm{amount\,of\,matter} & mol & \mathrm{mole}\\
\hline
\end{array}
>__2- Secondary or derived.__ They are made of a combination of primary/base dimensions
\begin{equation}
\mathrm{force} = \frac{ m \times L}{t^2}
\end{equation}
All other dimensions and units can be derived as combinations of the primaries. Here is a table with examples.
\begin{array}{l l l l}
\hline
\mathrm{secondary\, dimension} & \mathrm{Symbol} & \mathrm{unit} & \text{unit name}\\
\hline
\mathrm{force} & F & \mathrm{N= kg \cdot m/s^2} & \text{Newton}\\
\mathrm{pressure} & P \,(p) & \mathrm{Pa = N/m^2} & \text{Pascal}\\
\mathrm{energy} & E & \mathrm{J = N \cdot m = kg \cdot m^2 / s^2} & \text{Joule} \\
\mathrm{power} & \dot{W} \, (P) & \mathrm{W = N \cdot m / s = kg \cdot m^2 / s^3} & \text{Watt}\\
\hline
\end{array}
To avoid confusions we will use the SI (International Standard) system of units.
Please also note the notation in how we report the units and symbols. We will use the same notation throughout the class and you will also throughtout your career. In scientific notation:
> - mathematical symbols are reported as italic (e.g. temperature $T$, pressure $P$, velocity $U$),
> - while units are reported in roman fonts and with a space in front of the value it characterizes (e.g. $P$ = 100 Pa, $U$ = 5 m/s, $T$ = 400 K).
Anecdote: the Mars Climate Orbiter crashed in 1990's due to a problem of unit conversion. Source of the failure (from official report): ''failure using metric units''.
While we will use the SI system in the class it is useful to know how to convert dimensions from one unit system to another (i.e. imperial to SI). Here are some useful quantities to keep handy.
### Unit conversion
\begin{array}{l l l}
\hline
\mathrm{length} & & \\
\hline
1 \,\mathrm{in} & = & 25.4 \times 10^{-3}\,\mathrm{m}\\
1 \,\mathrm{ft} & = & 0.3048 \,\mathrm{m} \\
& = & 12 \,\mathrm{in}\\
1\, \AA & = & 10^{-10}\,\mathrm{m} \\
1\,\mathrm{mile \,(statute)} & = & 1,609 \,\mathrm{m}\\
1 \,\mathrm{mile \,(nautical)} & = & 1,852 \,\mathrm{m} \\
%
\hline
\mathrm{volume} & & \\
\hline
1 \,\mathrm{l\, (liter)} & = & 10^{-3}\,\mathrm{m}^3 \\
1\,\mathrm{ in}^3 & = & 16.387 \,\mathrm{cm}^3\\
1 \,\mathrm{gal\, (U.S.\, liq.)} & = & 3.785\,\mathrm{l} \\
1 \,\mathrm{gal\, (U.S.\, dry)} & = & 1.164\,\mathrm{ U.S.-liq.\, gal}\\
1 \,\mathrm{gal \,(British)} & = & 1.201\,\mathrm{ U.S.-liq.\, gal}\\
%
\hline
\mathrm{mass} & &\\
\hline
1 \,\mathrm{lb \,(mass)} & = & 0.454\,\mathrm{ kg}\\
%
\hline
\mathrm{force}& &\\
\hline
1 \,\mathrm{N }& = & 1\,\mathrm{ kg\cdot m/s}^2\\
1\,\mathrm{ dynes} & = & 10^{-5}\,\mathrm{ N}\\
1 \,\mathrm{lb \,(force)} & = & 4.448 \,\mathrm{N}\\
%
\hline
\mathrm{energy} & & \\
\hline
1 \,\mathrm{J }& = & 1\, \mathrm{kg \cdot m}^2/\mathrm{s}^2\\
1 \,\mathrm{BTU} & = & 1,055.1\,\mathrm{ J}\\
1 \,\mathrm{cal} & \equiv & 4.184\,\mathrm{ J}\\
1 \,\mathrm{kg-TNT} & \equiv & 4.184\,\mathrm{ MJ}\\
%
\hline
\mathrm{power} & & \\
\hline
1 \,\mathrm{W} & \equiv & 1 \,\mathrm{J/s}\\
1 \,\mathrm{HP\, (imperial)} & \equiv & 745.7 \,\mathrm{W}\\
1 \,\mathrm{HP\, (metric)} & \equiv & 735.5 \,\mathrm{W}\\
\hline
\end{array}
### Dimensionless numbers
\begin{array}{l l l}
\mathrm{Reynolds\, number} & Re & U L / \nu \\
\mathrm{Mach\, number} & M & U/a \\
\mathrm{Prandtl\, number} & Pr & \mu c_p / k = \nu / \kappa \\
\mathrm{Strouhal\, number} & St & L/U \tau \\
\mathrm{Knudsen\, number} & Kn & \Lambda / L \\
\mathrm{Peclet\, number} & Pe & U L / \kappa = Pr \cdot Re \\
\mathrm{Schmidt\, number} & Sc & \nu / D \\
\mathrm{Lewis\, number} & Le & D / \kappa \\
\end{array}
### Useful constants
Avogadro's number:
\begin{align*}
N_A & = 6.022\, 1367 \times 10^{23} \mathrm{\, molecules/(mol)} %\nolabel
\end{align*}
Boltzman constant:
\begin{align*}
k_B & = 1.380\, 69 \times 10^{-23} \mathrm{\, J/K} \\
k_B T & = 2.585 \times 10^{-2} \mathrm{\,eV} \sim \frac{1}{40} \mathrm{\, eV, at \,} T = 300 \mathrm{K}
\end{align*}
Universal gas constant:
\begin{align*}
R_u & = 8.314\, 510 \mathrm{\, J/(mol} \cdot \mathrm{K)}
\end{align*}
Earth radius (at equator):
\begin{align*}
r_{earth} & = 6\,378.1370 \mathrm{\, km}
\end{align*}
## Dimensional analysis
Now that we know the primary dimensionns, we can make use of it to reduce the number of experimental runs one needs to perform. This is the foundation of dimensional analysis, which you have seen in MAE 3126 (Fluid Mechanics). To reduce the number of experimental runs will see other techniques, such as Taguchi arrays in a few weeks when we treat design of experiments. The benefit of dimensional analysis is best seen through the graph below:
Please review your notes of Fluid Mechanics on dimensional analysis and the method of repeating variables (also called Buckingham $\Pi$ theorem)
## Errors and Uncertainties
An __error__ is defined as:
\begin{align*}
\epsilon = x_m - x_{true}
\end{align*}
where $x_m$ is the measured value and $x_{true}$ the true value. The problem is that we do not always (rarely in fact) know the true value. This will lead to the concept of uncertainty.
Errors can be categorized into two types:
>__1- Systematic or bias error__: Those are errors that are consistent or repeatable. For example, I use a ruler with the first 3 mm missing, all the measurements will be short by 3 mm.
>__2- Random or precision error__: errors that are inconsistent or unrepeatable. This will be seen as scatter in the measured data. For examl]ple, this could be caused by electo-magnetic noise in a voltmeter (with implication on grounding and shielding of the instrument).
Systematic vs random errors can be best seen visually:
In light of the two types of errors defined above, one would like to define mathematical formulas to quantify them.
__systematic/bias error__
\begin{align*}
\epsilon_b = x_m - x_{true}
\end{align*}
_Question_: What are sources of bias errors?
How can we reduce bias errors?
__relative mean bias error__: non-dimensional (normalized) form of the mean bias error.
\begin{align*}
\frac{<x_m> - x_{true}}{x_{true}}
\end{align*}
__random/precision error__
\begin{align*}
\epsilon_p = x_m - <x_m>
\end{align*}
_example_: We have five temperature measurements: Can you find the maximum precision error?
The __overal precision error or standard error__ is found by computing the standard deviation, $S$, divided by the square root of the number of samples, $n$.
\begin{align*}
\epsilon_{op} = \frac{S}{\sqrt{n}}
\end{align*}
```python
import numpy
T=[372.80, 373.00, 372.90, 373.30, 373.10]
Tm = numpy.mean(T)
print(Tm)
ep_max = 373.30-Tm
print(ep_max)
```
373.02
0.28000000000002956
_Question_: Are five measurement enough to quantify the precision error? We also need a statistics that represents the __mean__ precision error. We will see this soon.
Now that we have described the two types of errors and hinted at how to estimate them, let's look at instruments specifically.
### Calibration
Calibration aims to determine and imrove its accuracy. Calibration can be accomplished in 3 manners: 1- comparison with a primary standard (such as developed by NIST, like the mass or meter defined earlier), 2- a secondary standard, such as another instrument of known and higher accuracy, 3- a known input source.
Here are examples of primary standards for temperature that are ''easy'' to implement in any labs:
\begin{array}{l l}
\hline
\mathrm{definition} & \text{temperature (K)}\\
\hline
\mathrm{triple\, point\, of\, hydrogen} & 13.8033 \\
\mathrm{triple\, point\, of\, oxygen} & 54.3584\\
\mathrm{triple\, point\, of\, water} & 273.16 \\
\mathrm{Ice\, point} & 273.15 \\
\mathrm{normal\, boiling\, point\, of\, water} & 373.15 \\
\hline
\end{array}
Calibration can be done in a static and/or dynamic manner. It is a very important step to verify the accuracy of an instrument or sensor. In most laboratories, calibration has to be performed regularly. Some commercial entities are specialised in doing so.
Before each important test campaign or after a company recertify instrument, the results have be to be documented for traceability. Here is an example for load balance.
### Uncertainty
The concept of __uncertainty__ needs to be taken into account when we conduct experiments. Uncertainty can be defined as (S.J. Kline) ''What we think the error would be if we could and did measure it by calibration''. Taking data is a very small part of doing an experiment and we are going to spend a lot of time doing uncertainty analysis.
An error, $\epsilon$ has a particular sign and magnitude (see the equations above). If it is known, then if can be removed from the measurments (through calibration for example). Any remaining error that does not have a sign and mangitude cannot be removed. We will define an uncertainty $\pm u$ as the range that contains the remaining (unknown) errors.
Because we are doing measurement in an uncertain world, we need to be able to express our __confidence level__ in our results. This wil require set of sophisticated statistical tools.
__Uncertainty analysis__ is an extremely important tool and step in experiments and we are going to spend a significant amount of time on it during the class. This analysis is performed typically in the experimental planning phase (to help in determining the appropriate components to use in our instrumentation chain see in the first figure). An extensive analysis is also performed after the campaign to characterize the actual uncertainties in the measurement.
### Instrument rating
When selecting an instrument for a measurement, one has many options. Ideally, one would like to select a system that will meet our requirements for the measurement (such as expected range, but also for uncertainty) while not breaking the bank... Luckily manufacturers report a lot of data with their sensor/transducer that can help us in making an educated guess of the expected performance without having to characterize it ourselves. Here is an example from Omega Scientific for a pressure transducer:
Let's define a few of the terms used.
__Accuracy__ is difference between true and measured value.
\begin{align*}
u_a = x_m - x_{true}
\end{align*}
A small difference between true and measured value leads to a high accuracy and vice-versa. It can be expressed as a percentage of reading, of full-scale, or an absolute value. Accuracy can be assessed and minimized by calibrating the system.
__Precision__ of an instrument is the reading minus the average of readings. It characterizes random error of instrument output or the reproducibility of an instrument.
\begin{align*}
u_p = x_m - <x_m>
\end{align*}
_Questions_:
>Can we improve precision by calibrating the system?
> Is there a limit up to which we can improve the accuracy of a system?
If we recall our previous definitions for errors, you will remark that they are the same than the two terms introduced above and imply that we compare the instrument readings to the true, known, value. However, in most cases, we do not know the true value and instead we are only confident that we are within a certain range ($\pm$) of the true value. Therefore to be consistent with the definitions introduced so far, we should use the term uncertainty and not error, when describing experimental results (except for a few cases).
Accuracy and precision are the two main categories of uncertainties in our measurements; however, they are each comprised of elemental components. A non-exhaustive list includes:
__resolution__: smallest change or increment in the measurand that the instrumente can detect. Note for digital instrument, resolution is associated with the number of digits on display, ie a 5 digit Digital Multi-Meter (DMM) has better resolution than a 4 digit DMM. The values reported by the DMM will be at $\pm$ the last digit.
__sensitivity__: it is defined
Other sources of errors are zero, linearity, sensitivity, hysteresis, etc.
## Experiment planning
```python
```
|
lemma higher_deriv_compose_linear: fixes z::complex assumes f: "f holomorphic_on T" and S: "open S" and T: "open T" and z: "z \<in> S" and fg: "\<And>w. w \<in> S \<Longrightarrow> u * w \<in> T" shows "(deriv ^^ n) (\<lambda>w. f (u * w)) z = u^n * (deriv ^^ n) f (u * z)" |
import os
# import re
# import pandas as pd
import numpy as np
from karon import Sample
from karon.decorators import readwrite, requires
from karon.io import ExcelIO
# from karon.tree.build import from_parent
from karon.tree.build import generate_tree
# from karon.tree import NLRTree
# from karon.tree import LRNTree
# from karon.tree import empty_like
from karon.tree.util import get, put
from karon.specialized import as_opnode
from karon.io.pandas import to_dataframe
@readwrite
@requires("name", "parent name")
def generic(**contents):
return Sample(**contents)
@readwrite
@requires("name", "parent name")
def build(**contents):
return Sample(**contents)
@readwrite
@requires("name", "parent name")
def mechanical(**contents):
return Sample(**contents)
@readwrite
@requires("name", "parent name")
def porosity(**contents):
return Sample(**contents)
def propagate(parent_key, child_key=None, overwrite=False):
"""
Creates an agent that propagates specific keys from parent to child
through the tree.
:param parent_key: Key from Node.contents that holds the value to be
propagated to the child.
:type parent_key: str
:param child_key: Key in the child to place the result. By default, is
the same as parent_key.
:type child_key: str
:param overwrite: Whether to overwrite values already present in the
children. Default: False.
:return: Unary function, signature: f(tree-like)
"""
def get_from_parent(node):
return get(parent_key)(node)
def put_in_child(node):
if overwrite or (child_key not in node.contents):
value = get_from_parent(node.parent)
if value is not None:
put(child_key)(node, value)
def func(root):
root = as_opnode(root)
root.puts(put_in_child)
if child_key is None:
child_key = parent_key
return func
def aggregate(gets, reduce=None):
"""
Creates an agent that aggregates and, optionally, performs a reduction
on values collected from descendant nodes in a tree.
:param gets: Function to extract the feature/features that are to
be aggregated.
:type gets: unary function, signature: f(Node)
:param reduce: Function that accepts the list of results returned from
this aggregation and applies a transform. While this need not return
a scalar, an archetype for this function is to perform a reduction,
such as a mean.
:type reduce: unary function, signature: f(list-like)
:return: Unary function, signature: f(tree-like)
"""
def func(root):
root = as_opnode(root)
root.gets(gets, callback=reduce)
return func
# example usage
def mean(alist):
try:
return np.mean([x for x in alist if (x is not None)])
except:
return alist
def strcmp(*transforms):
def func(lhs, rhs):
l = str(lhs)
r = str(rhs)
for t in transforms:
l = t(l)
r = t(r)
return (l == r)
return func
reader = ExcelIO(build=build,
mechanical=mechanical,
porosity=porosity,
default=generic)
nodes = reader.load(os.path.join('..', 'tests', 'data', 'example.xlsx'))
lineage = generate_tree(
get_nodeid=get('name'),
get_parent=get('parent name'),
cmp=strcmp(str.lower, str.strip))(nodes)
aggregator = aggregate(
get("modulus (GPa)"),
reduce=lambda node, arr: put("average modulus")(node, mean(arr)))
propagator = propagate("spot size (um)")
for root in lineage:
aggregator(root)
propagator(root)
to_dataframe(nodes).to_excel('output.xlsx', index=False)
##### OO-based example #####
# class Example(object):
# _uid_key: str
# _parent_key: str
#
# def __init__(self, uid: str, parent: str):
# """
# Create an example object for reading and processing an structured
# data object into a hierarchical data object.
#
# :param uid: Key (column name) that uniquely identifies a sample.
# :type uid: str
# :param parent: Key (column name) that identifies from which sample
# a child node descends.
# :type parent: str
# """
# self._uid_key = uid
# self._parent_key = parent
# self._filenames = []
# self._nodes = {}
# self.roots = []
#
# def get_uid(self, entry):
# """Returns the unique identifier for the given dictionary."""
# try:
# key = [k for k in entry.keys()
# if re.match(self._uid_key, k, flags=re.IGNORECASE)][0]
# except IndexError:
# raise KeyError(f'The unique identifier field ({self._uid_key})'
# f'was not found.')
# return entry[key]
#
# @staticmethod
# def node_generator_from_sheetname(sheetname):
# return {
# 'build': build,
# 'mechanical': mechanical,
# 'porosity': porosity
# }.get(sheetname, generic)
#
# def excel_reader(self, filename):
# """
# Reads data from an excel file and maps the nodes to their names.
#
# :param filename: (str) Excel file containing sample information.
# :return: (dict) Maps UIDs to the nodes.
# """
# nodes = {}
# # get sheet names
# xls = pd.ExcelFile(filename)
# for sheetName in xls.sheet_names:
# # read in each sheet
# df = read_excel(filename, sheetName)
# # read nodes into dictionary
# for entry in df.to_dict('records'):
# uid = self.get_uid(entry)
# if uid in nodes:
# raise KeyError('Sample names must be unique. '
# '{} is duplicated'.format(uid))
# else:
# nodes[uid] = Example.node_generator_from_sheetname(
# sheetName)(**entry)
# # return dictionary of nodes.
# return nodes
#
# @staticmethod
# def filetype(filename):
# """
# Guess the file type from the file name.
#
# :param filename: Name of the file to be read.
# :type filename: str
# :return: Keyword/phrase to describe the file type.
# :rtype: str
# """
# if (filename.lower().endswith('xls') or
# filename.lower().endswith('xlsx')):
# return 'excel'
# else:
# raise ValueError(f"Filetype of {filename} could not be identified.")
#
# def read(self, *filenames):
# """
# Reads the list of filenames. They types are inferred using
# the filetype function.
#
# :param filenames: Filenames to be read.
# :type filenames: tuple of str
# :return: None. (Updates the list of nodes.)
# """
# for afilename in filenames:
# # get reader
# reader = {
# 'excel': self.excel_reader
# }[Example.filetype(afilename)]
# # read this file
# nodes = reader(afilename)
# # save that this file was read
# self._filenames.append(afilename)
# # look for duplicate nodes
# intersection = set(
# self._nodes.keys()).intersection(set(nodes.keys()))
# if len(intersection) != 0:
# raise KeyError(f'At least one duplicate entry ({intersection}) '
# f'found while reading {filenames}')
# else:
# self._nodes.update(nodes)
# self.build_trees()
#
# def build_trees(self):
# """
# Build trees based on Excel structure.
#
# :return: None. (Updates list of roots in this instance.)
# """
# self.roots = from_parent(self._nodes, key=self._parent_key)
#
# def get_all_keys(self):
# self.build_trees()
# keys = set()
# for root in self.roots:
# for node in NLRTree(root):
# keys = keys.union(set(node.contents.keys()))
# return tuple(keys)
#
# def propagate(self, *keys, overwrite: bool = False):
# """
# Passes information down to the child nodes, if they are writeable.
#
# :param key: The properties/features to be passed down to the children.
# If no key is given inherit all keys.
# :type key: str
# :param overwrite: Should the value being pushed to the children
# overwrite an existing entry? Default: False.
# :type overwrite: bool
# :param callback: Unary function generator that takes
# :return: None
# """
# def get_from(root, key):
# def func(node):
# nodeval = node.contents.get(key, '')
# if nodeval in ('', None) or overwrite:
# node.contents[key] = rootval
# return
#
# rootval = root.contents.get(key, '')
# return func
#
# # propagate all keys, if no key is given
# if len(keys) == 0 or keys[0] is None:
# keys = self.get_all_keys()
# # pass
# # run through all keys
# for akey in keys:
# for root in self.roots:
# for node in NLRTree(root):
# push = get_from(node, akey)
# for sub in NLRTree(node):
# sub.puts(func=push)
#
# @staticmethod
# def mean(key):
# def func(node, vec):
# vec = [x for x in vec if x is not None]
# try:
# if len(vec) > 0:
# node.contents[key] = np.mean(vec)
# except TypeError:
# pass
# return func
#
# @staticmethod
# def store(key):
# def func(node, vec):
# node.contents[key] = vec
# return func
#
# @staticmethod
# def fetch(key):
# def func(node):
# return node.contents.get(key, None)
# return func
#
# def aggregate(self, *keys, reduce=None):
# """
# Applies a reduction operation to each node. The aggregation is
# done from the root node downward so that the attributes for the
# descendant nodes are not populated until after the predecessor
# nodes.
#
# :param keys: Feature to reduce and combine into the each parent node.
# If none are given, then reduce on all keys.
# :type key: tuple(str)
# :param reduce: Function generator used to reduce the collected
# results. This generator takes a single parameter, a string,
# that indicates where the result should be stored. The default
# is a function that simply stores the vector-result of the
# aggregation. See examples.
# :type reduce: unary function
# :return: None.
# """
# # handle the default values for aggregate
# reduce = {
# None: Example.store,
# 'store': Example.store}.get(reduce, reduce)
# # reduce on all keys, if no key is given
# if len(keys) == 0 or keys[0] is None:
# keys = self.get_all_keys()
# # reduce on keys twice: once to populate each node from its
# # descendents, and once again to populate antecedents from
# # descendents that were populated in the first iteration.
# for iteration in range(2):
# for key in keys:
# for root in self.roots:
# # perform reduction
# for node in NLRTree(root):
# # if node contains a value, do not replace with
# # the reduced value.
# if key in node.contents:
# continue
# else:
# node.gets(func=Example.fetch(key),
# callback=reduce(key))
|
# Contributions to SBDF reader functionality provided by PDF Solutions, Inc. (C) 2021
"""
TODOS:
* Return table/column metadata as well as the table data
* Support Decimal type
* Support _ValueArrayEncodingId.RUN_LENGTH array type
* Contemplate making an SBDF writer
"""
from contextlib import ExitStack
from pathlib import Path
from typing import Any, BinaryIO, Dict, Hashable, List, Tuple, Union, cast
import numpy as np
import pandas as pd
try:
from tqdm import tqdm
except ImportError:
tqdm = None
from .array import (
PackedArray,
PackedBitArray,
PackedPlainArray,
next_bytes_as_packed_array,
unpack_bit_array,
unpack_packed_array,
)
from .base import SectionTypeId, ValueTypeId, next_bytes_as_int, next_bytes_as_str
from .metadata import Metadatum, next_bytes_as_column_metadata, next_bytes_as_metadata
def _next_bytes_as_section_id(file: BinaryIO) -> int:
"""Reads section type id from file."""
magic_number = next_bytes_as_int(file)
if magic_number != 0xDF:
raise ValueError("Section magic number 1 not found")
magic_number = next_bytes_as_int(file)
if magic_number != 0x5B:
raise ValueError("Section magic number 2 not found")
section_id = next_bytes_as_int(file)
return section_id
def import_data( # noqa: C901
sbdf_file: Union[str, Path],
strings_as_categories: bool = False,
skip_strings: bool = False,
progress_bar: bool = True,
) -> pd.DataFrame:
"""Import data from an SBDF file and create a pandas DataFrame.
TODO: document keyword arguments
"""
# prevent edge cases for skip_strings option
if skip_strings and strings_as_categories:
raise ValueError("Strings cannot be both skipped and treated as categories")
# establish a master context manager for the duration of reading the file
with ExitStack() as read_context:
# open the SBDF file, managing context using the master context
file = read_context.enter_context(Path(sbdf_file).open("rb"))
# if we have tqdm, create and add progress bar managed by master read context
pbar = None
if tqdm is not None:
pbar = read_context.enter_context(
tqdm(desc="Reading File", unit="row", disable=not progress_bar)
)
# read file header
section_id = _next_bytes_as_section_id(file)
assert section_id == SectionTypeId.FILEHEADER
version_major = next_bytes_as_int(file)
version_minor = next_bytes_as_int(file)
if (version_major, version_minor) != (1, 0):
v = f"{version_major}.{version_minor}"
msg = f"Only version 1.0 supported, but version {v} encountered."
raise ValueError(msg)
# read table metadata
section_id = _next_bytes_as_section_id(file)
assert section_id == SectionTypeId.TABLEMETADATA
table_metadata = { # noqa F841
md.name: md.value for md in next_bytes_as_metadata(file)
}
# TODO: parse table metadata into a form that can be returned
# read column metadata
n_columns = next_bytes_as_int(file, n_bytes=4)
column_metadata_fields: Tuple[Metadatum, ...] = next_bytes_as_metadata(
file, skip_values=True
)
column_metadatas: Tuple[Dict[str, Any], ...] = tuple(
{
md.name: md.value
for md in next_bytes_as_column_metadata(file, column_metadata_fields)
}
for _ in range(n_columns)
)
# TODO: parse column metadata into a form that can be returned
column_names: Tuple[Hashable, ...] = tuple(
md_dict["Name"] for md_dict in column_metadatas
)
column_types = tuple(
ValueTypeId(md_dict["DataType"][0]) for md_dict in column_metadatas
)
# read table content as arrays packed into bytes objects
rows_per_slice: List[int] = []
table_slices: List[Dict[Hashable, PackedArray]] = []
table_slice_nulls: List[Dict[Hashable, PackedBitArray]] = []
while True:
current_slice: Dict[Hashable, PackedArray] = dict()
current_slice_nulls: Dict[Hashable, PackedBitArray] = dict()
# read next table slice
section_id = _next_bytes_as_section_id(file)
if section_id == SectionTypeId.TABLEEND:
break
if section_id != SectionTypeId.TABLESLICE:
raise ValueError(f"Expected table slice ID, got {section_id} instead")
slice_n_columns = next_bytes_as_int(file, n_bytes=4)
assert slice_n_columns == n_columns
# read each column slice in the table slice
for column_name in column_names:
section_id = _next_bytes_as_section_id(file)
assert section_id == SectionTypeId.COLUMNSLICE
col_vals = next_bytes_as_packed_array(file)
# handle column properties (ignoring all but IsInvalid)
n_properties = next_bytes_as_int(file, n_bytes=4)
for _ in range(n_properties):
property_name = next_bytes_as_str(file)
property_value = cast(
PackedBitArray, next_bytes_as_packed_array(file)
)
# we only care about the "IsInvalid" property, which defines nulls
if property_name == "IsInvalid":
current_slice_nulls[column_name] = property_value
current_slice[column_name] = col_vals
n_row_in_slice = next(iter(current_slice.values())).n
rows_per_slice.append(n_row_in_slice)
if pbar is not None:
pbar.update(n_row_in_slice)
table_slices.append(current_slice)
table_slice_nulls.append(current_slice_nulls)
# concatenate column slices and missing mask slices into single packed objects
col_name_iter = column_names
if tqdm is not None:
col_name_iter = tqdm(
col_name_iter,
desc="Concatenating Column Slice Data",
unit="col",
disable=not progress_bar,
)
packed_full_columns = {}
packed_missing_masks = {}
for col_name in col_name_iter:
chunks = tuple(ts.pop(col_name) for ts in table_slices)
array_type = type(chunks[0]) if len(chunks) > 0 else PackedPlainArray
packed_full_columns[col_name] = array_type.concatenate(chunks) # type: ignore
packed_missing_masks[col_name] = PackedBitArray.concatenate(
tuple(
tsn.pop(col_name, PackedBitArray.empty(n))
for tsn, n in zip(table_slice_nulls, rows_per_slice)
)
)
# unpack columns from bytes objects into numpy arrays
col_name_type_iter = zip(column_names, column_types)
if tqdm is not None:
col_name_type_iter = tqdm(
col_name_type_iter,
desc="Unpacking Data",
unit="col",
disable=not progress_bar,
total=n_columns,
)
pandas_data = {}
for col_name, col_type in col_name_type_iter:
# skip strings if setting enabled
if skip_strings and col_type == ValueTypeId.STRING:
del packed_full_columns[col_name]
pandas_data[col_name] = pd.Categorical.from_codes(
codes=np.zeros(sum(rows_per_slice), dtype=np.uint8),
categories=["<SKIPPED>"],
)
continue
# unpack column to array otherwise
packed = packed_full_columns.pop(col_name)
if isinstance(packed, PackedPlainArray):
col_array = unpack_packed_array(packed, strings_as_categories)
elif isinstance(packed, PackedBitArray):
col_array = unpack_bit_array(packed)
else:
raise RuntimeError(
"Unable to parse file correctly, we thought we had a packed "
"array, but we didn't!"
)
pandas_data[col_name] = col_array
# unpack and apply missing masks
col_name_type_iter = zip(column_names, column_types)
if tqdm is not None:
col_name_type_iter = tqdm(
col_name_type_iter,
desc="Handling Missing Values",
unit="col",
disable=not progress_bar,
total=n_columns,
)
for col_name, col_type in col_name_type_iter:
missing_mask = unpack_bit_array(packed_missing_masks.pop(col_name))
if missing_mask.any():
col_array = pandas_data[col_name]
missing_value = (
None
if col_type
in (ValueTypeId.BINARY, ValueTypeId.DECIMAL, ValueTypeId.STRING)
else np.nan
)
needs_copy = (
not col_array.flags.writeable if hasattr(col_array, "flags") else False
)
# convert numpy-native binary array to Python object array for nullability
dtype = "O" if col_type == ValueTypeId.BINARY else None
col_array = pd.Series(col_array, copy=needs_copy, dtype=dtype)
col_array.loc[missing_mask] = missing_value
col_array = col_array.values
pandas_data[col_name] = col_array
# create dataframe and return
df = pd.DataFrame(pandas_data)
return df
|
import topology.continuous_function.basic
import topology.instances.real
import topology.path_connected
import intervals
/-!
# Homotopy
In this file, we define homotopies between continuous functions. Note in particular that we are
defining homotopies to be `to_fun : C(X × ℝ, Y)` instead of `to_fun : C(X × I, Y)`. This is because
of the subtypes can be annoying to work with, and we don't actually care about the value that the
homotopy takes outside of `X × [0, 1]`.
## Implementation Notes
The definition of homotopies is inspired by the file HOL-Library/Homotopy, by Lawrence Paulson. In
particular, we define a general `homotopy_with f₀ f₁ P`, which is a homotopy between `f₀` and `f₁`,
where all of the intermediate maps satisfy the property `P`. In particular, this general definition
allows us to define homotopy, homotopy between paths, homotopy between loops and homotopy relative
to a subset all using the same definition.
## Key Declarations
- `homotopy_with f₀ f₁ P` - A homotopy between `f₀` and `f₁`, where all of the intermediate maps
satisfy the property `P`.
- `homotopy f₀ f₁` - A homotopy between `f₀` and `f₁`.
-/
noncomputable theory
variables {X Y : Type _} [topological_space X] [topological_space Y]
/--
A homotopy between `f₀` and `f₁`, with a proposition `P` restricting the intermediate maps.
-/
@[nolint has_inhabited_instance] -- if `P` is always `false`, then there are no homotopies.
structure homotopy_with (f₀ f₁ : C(X, Y)) (P : (X → Y) → Prop) :=
(to_fun : C(X × ℝ, Y))
(to_fun_zero' : ∀ x, to_fun (x, 0) = f₀ x)
(to_fun_one' : ∀ x, to_fun (x, 1) = f₁ x)
(prop : ∀ t, P(λ x, to_fun (x, t)))
namespace homotopy_with
variables {f₀ f₁ f₂ : C(X, Y)} {P : (X → Y) → Prop}
instance : has_coe_t (homotopy_with f₀ f₁ P) (C(X × ℝ, Y)) := ⟨homotopy_with.to_fun⟩
instance : has_coe_to_fun (homotopy_with f₀ f₁ P) := ⟨_, λ h, h.to_fun.to_fun⟩
@[continuity]
lemma continuous (h : homotopy_with f₀ f₁ P) : continuous h := h.to_fun.continuous
@[simp] lemma to_fun_zero (h : homotopy_with f₀ f₁ P) (x : X) : h (x, 0) = f₀ x :=
h.to_fun_zero' x
@[simp] lemma to_fun_one (h : homotopy_with f₀ f₁ P) (x : X) : h (x, 1) = f₁ x :=
h.to_fun_one' x
@[simp] lemma coe_coe_apply_eq_coe (h : homotopy_with f₀ f₁ P) (x : X × ℝ) :
(h : C(X × ℝ, Y)) x = h x := rfl
/--
If `f₀` satisfies the property `P`, then we have a `homotopy_with f₀ f₀ P`.
-/
def refl (hP : P f₀) : homotopy_with f₀ f₀ P :=
{ to_fun :=
{ to_fun := λ p, f₀ p.1 },
to_fun_zero' := by simp only [continuous_map.coe_mk, implies_true_iff, eq_self_iff_true],
to_fun_one' := by simp only [continuous_map.coe_mk, implies_true_iff, eq_self_iff_true],
prop := λ t, hP }
/--
If `f₀` and `f₁` agree on every input, and that `f₀` satisfies the property `P`, then we have a ` homotmotopy_with f₀ f₁ P`.
-/
def of_refl (hP : P f₀) (h : f₀ = f₁) : homotopy_with f₀ f₁ P :=
{ to_fun := { to_fun := λ p, f₀ p.1 },
to_fun_zero' := by simp only [continuous_map.coe_mk, implies_true_iff, eq_self_iff_true],
to_fun_one' := by simp only [continuous_map.coe_mk, implies_true_iff, eq_self_iff_true, h],
prop := λ t, hP }
/--
If we have `h : homotopy_with f₀ f₁ P`, we can define a `homotopy_with f₁ f₀ P` by reversing the
direction of the homotopy.
-/
def symm (h : homotopy_with f₀ f₁ P) : homotopy_with f₁ f₀ P :=
{ to_fun :=
{ to_fun := λ p, h (p.1, 1 - p.2) },
to_fun_zero' := by simp,
to_fun_one' := by simp,
prop := λ t,
begin
simp only [continuous_map.coe_mk],
apply h.prop,
end }
/--
If we have `h₀ : homotopy_with f₀ f₁ P` and `h₁ : homotopy_with f₁ f₂ P`, we can define a
`homotopy_with f₀ f₂ P` by 'gluing' the homotopies together.
-/
def trans (h₀ : homotopy_with f₀ f₁ P) (h₁ : homotopy_with f₁ f₂ P) : homotopy_with f₀ f₂ P :=
{ to_fun :=
{ to_fun := λ p, if p.2 ≤ 1/2 then h₀ (p.1, 2 * p.2) else h₁ (p.1, 2 * p.2 - 1),
continuous_to_fun := begin
apply continuous.if; [skip, continuity, continuity],
intros a ha,
rw frontier_snd_le at ha,
obtain ⟨ha₁, ha₂⟩ := ha,
simp only [*, one_div, set.mem_singleton_iff, to_fun_one, mul_inv_cancel, ne.def,
not_false_iff, bit0_eq_zero, one_ne_zero, to_fun_zero, sub_self] at *,
end },
to_fun_zero' := λ x, by simp only [one_div, zero_le_one, inv_nonneg, if_true,
continuous_map.coe_mk, zero_le_bit0, to_fun_zero, mul_zero],
to_fun_one' := λ x, by norm_num,
prop := λ t, begin
simp only [continuous_map.coe_mk, set.mem_singleton_iff, to_fun_one, mul_inv_cancel,
ne.def, not_false_iff, bit0_eq_zero, one_ne_zero, to_fun_zero, sub_self],
split_ifs,
{ apply h₀.prop },
{ apply h₁.prop }
end }
end homotopy_with
/--
A `homotopy f₀ f₁` is defined to be a `homotopy_with f₀ f₁ P`, where `P` is always `true`.
-/
abbreviation homotopy (f₀ f₁ : C(X, Y)) := homotopy_with f₀ f₁ (λ f, true)
namespace homotopy
variables {f₀ f₁ f₂ : C(X, Y)}
/--
For `homotopy f₀ f₀`, the property `P` in `homotopy_with.refl` is always satisfied, so we add in
this definition so we don't need to prove `true` every time.
-/
def refl (f₀ : C(X, Y)) : homotopy f₀ f₀ := homotopy_with.refl trivial
/--
The property `P` in `homotopy_with.refl` is always satisfied for `homotopy`, so we add in this
definition so we don't need to prove `true` every time.
-/
def of_refl (h : f₀ = f₁) : homotopy f₀ f₁ := homotopy_with.of_refl trivial h
end homotopy
/--
Two continuous functions `f₀` and `f₁` are homotopic if there exists a `homotopy f₀ f₁`.
-/
def homotopic (f₀ f₁ : C(X, Y)) := nonempty (homotopy f₀ f₁)
lemma homotopic.equiv : equivalence (@homotopic X Y _ _) :=
⟨λ f, ⟨homotopy.refl f⟩, λ f g ⟨h⟩, ⟨h.symm⟩, λ f₀ f₁ f₂ ⟨h₀⟩ ⟨h₁⟩, ⟨h₀.trans h₁⟩⟩
|
lemma add_monom: "monom a n + monom b n = monom (a + b) n" |
{-# OPTIONS --prop --rewriting --confluence-check #-}
open import Agda.Builtin.Nat
open import Agda.Builtin.Equality
{-# BUILTIN REWRITE _≡_ #-}
data _≐_ {ℓ} {A : Set ℓ} (x : A) : A → Prop ℓ where
refl : x ≐ x
postulate
subst : ∀ {ℓ ℓ′} {A : Set ℓ} (P : A → Set ℓ′)
→ (x y : A) → x ≐ y → P x → P y
subst-rew : ∀ {ℓ ℓ′} {A : Set ℓ} (P : A → Set ℓ′)
→ {x : A} (e : x ≐ x) (p : P x) → subst P x x e p ≡ p
{-# REWRITE subst-rew #-}
data Box (A : Prop) : Set where
box : A -> Box A
foo : (A : Prop)(x y : A)(P : Box A → Set)(p : P (box x)) → subst P (box x) (box y) refl p ≐ p
foo A x y P p = refl -- refl does not type check
|
[STATEMENT]
lemma COND_upd_uinfo_no_oracle:
"no_oracle ainfo uinfo \<Longrightarrow> no_oracle ainfo (upd_uinfo uinfo fld)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. True \<Longrightarrow> True
[PROOF STEP]
by (auto simp add: upd_uinfo_def) |
\name{set_size}
\alias{set_size}
\title{
Set Sizes
}
\description{
Set Sizes
}
\usage{
set_size(m)
}
\arguments{
\item{m}{A combination matrix returned by \code{\link{make_comb_mat}}.}
}
\value{
A vector of set sizes.
}
\examples{
set.seed(123)
lt = list(a = sample(letters, 10),
b = sample(letters, 15),
c = sample(letters, 20))
m = make_comb_mat(lt)
set_size(m)
}
|
///
/// config.hpp
///
/// Copyright (c) 2009-2014 Nous Xiong (348944179 at qq dot com)
///
/// Distributed under the Boost Software License, Version 1.0. (See accompanying
/// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
///
/// See https://github.com/nousxiong/gce for latest version.
///
#ifndef GCE_CONFIG_HPP
#define GCE_CONFIG_HPP
#include <boost/predef.h>
#include <gce/user.hpp>
/// Suppress some vc warnings.
#ifdef BOOST_COMP_MSVC
# pragma warning(disable : 4251 4231 4660 4275 4355 4244 4307)
#endif
/// Ensure occupy entire cache(s) line.
#define GCE_CACHE_ALIGNED_VAR(type, var) \
type var; \
byte_t pad_##var[(sizeof(type)/GCE_CACHE_LINE_SIZE + 1)*GCE_CACHE_LINE_SIZE - sizeof(type)];
#endif /// GCE_CONFIG_HPP
|
#include "strutil.h"
#include <cstring>
#include <iostream>
#include <boost/algorithm/string/predicate.hpp>
bool strutil::iequals(std::string a, std::string b) {
return boost::iequals(a, b);
}
char *strutil::time_cstr(time_t *t) {
char *time_str = ctime(t);
time_str[strlen(time_str) - 1] = '\0';
return time_str;
}
|
[STATEMENT]
lemma foldr_does_nothing_to_xf:
"\<lbrakk> \<And>x s. x \<in> set xs \<Longrightarrow> xf (f x s) = xf s \<rbrakk> \<Longrightarrow> xf (foldr f xs s) = xf s"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>x s. x \<in> set xs \<Longrightarrow> xf (f x s) = xf s) \<Longrightarrow> xf (foldr f xs s) = xf s
[PROOF STEP]
by (induct xs, simp_all) |
------------------------------------------------------------------------
-- The Agda standard library
--
-- Signs
------------------------------------------------------------------------
{-# OPTIONS --without-K --safe #-}
module Data.Sign where
open import Relation.Binary using (Decidable)
open import Relation.Binary.PropositionalEquality using (_≡_; refl)
open import Relation.Nullary using (yes; no)
-- Signs.
data Sign : Set where
- : Sign
+ : Sign
-- Decidable equality.
infix 4 _≟_
_≟_ : Decidable {A = Sign} _≡_
- ≟ - = yes refl
- ≟ + = no λ()
+ ≟ - = no λ()
+ ≟ + = yes refl
-- The opposite sign.
opposite : Sign → Sign
opposite - = +
opposite + = -
-- "Multiplication".
infixl 7 _*_
_*_ : Sign → Sign → Sign
+ * s₂ = s₂
- * s₂ = opposite s₂
|
State Before: x y : PGame
⊢ x < y ∨ (x ≈ y) ∨ y ⧏ x State After: x y : PGame
⊢ x < y ∨ (x ≈ y) ∨ y < x ∨ x ‖ y Tactic: rw [lf_iff_lt_or_fuzzy, Fuzzy.swap_iff] State Before: x y : PGame
⊢ x < y ∨ (x ≈ y) ∨ y < x ∨ x ‖ y State After: no goals Tactic: exact lt_or_equiv_or_gt_or_fuzzy x y |
module Oscar.Prelude where
module _ where -- Objectevel
open import Agda.Primitive public
using ()
renaming ( Level to Ł
; lzero to ∅̂
; lsuc to ↑̂_
; _⊔_ to _∙̂_ )
infix 0 Ø_
Ø_ : ∀ 𝔬 → Set (↑̂ 𝔬)
Ø_ 𝔬 = Set 𝔬
Ø₀ = Ø ∅̂
Ø₁ = Ø (↑̂ ∅̂)
postulate
magic : ∀ {a} {A : Ø a} → A
module _ where -- Function
infixr 9 _∘_
_∘_ : ∀ {a b c}
{A : Set a} {B : A → Set b} {C : {x : A} → B x → Set c} →
(∀ {x} (y : B x) → C y) → (g : (x : A) → B x) →
((x : A) → C (g x))
f ∘ g = λ x → f (g x)
infixr 9 _∘′_
_∘′_ : ∀ {a b c} {A : Set a} {B : Set b} {C : Set c} →
(B → C) → (A → B) → (A → C)
f ∘′ g = f ∘ g
¡ : ∀ {𝔬} {𝔒 : Ø 𝔬} → 𝔒 → 𝔒
¡ 𝓞 = 𝓞
infixl -10 ¡
syntax ¡ {𝔒 = A} x = x ofType A
¡[_] : ∀ {𝔬} (𝔒 : Ø 𝔬) → 𝔒 → 𝔒
¡[ _ ] = ¡
_∋_ : ∀ {a} (A : Set a) → A → A
A ∋ x = x
_∞ : ∀ {a} {A : Set a} → A → ∀ {b} {B : Set b} → B → A
_∞ x = λ _ → x
_∞⟦_⟧ : ∀ {a} {A : Set a} → A → ∀ {b} (B : Set b) → B → A
x ∞⟦ B ⟧ = _∞ x {B = B}
_∞₁ : ∀ ..{a} ..{A : Set a} → A → ∀ ..{b} ..{B : Set b} → ∀ ..{h} ..{H : Set h} → .(_ : B) .{_ : H} → A
_∞₁ f _ = f
_∞₃ : ∀ ..{a} ..{A : Set a} → A → ∀ ..{b} ..{B : Set b} → ∀ ..{h₁ h₂ h₃} ..{H₁ : Set h₁} ..{H₂ : Set h₂} ..{H₃ : Set h₃} → .(_ : B) .{_ : H₁} .{_ : H₂} .{_ : H₃} → A
_∞₃ f _ = f
hid : ∀ {a} {A : Set a} {x : A} → A
hid {x = x} = x
it : ∀ {a} {A : Set a} {{x : A}} → A
it {{x}} = x
{-# INLINE it #-}
! = it
asInstance : ∀ {a b} {A : Set a} {B : A → Set b} (x : A) → (∀ {{x}} → B x) → B x
asInstance x f = f {{x}}
{-# INLINE asInstance #-}
flip : ∀ {a b c} {A : Set a} {B : Set b} {C : A → B → Set c} → (∀ x y → C x y) → ∀ y x → C x y
flip f x y = f y x
{-# INLINE flip #-}
infixr -20 _$_
_$_ : ∀ {a b} {A : Set a} {B : A → Set b} → (∀ x → B x) → ∀ x → B x
f $ x = f x
infixr -20 _$′_
_$′_ : ∀ {a b} {A : Set a} {B : Set b} → (A → B) → A → B
f $′ x = f x
-- The S combinator. (Written infix as in Conor McBride's paper
-- "Outrageous but Meaningful Coincidences: Dependent type-safe syntax
-- and evaluation".)
_ˢ_ : ∀ {a b c}
{A : Set a} {B : A → Set b} {C : (x : A) → B x → Set c} →
((x : A) (y : B x) → C x y) →
(g : (x : A) → B x) →
((x : A) → C x (g x))
f ˢ g = λ x → f x (g x)
infixr 0 case_of_ case_return_of_
case_of_ : ∀ {a b} {A : Set a} {B : Set b} → A → (A → B) → B
case x of f = f x
case_return_of_ : ∀ {a b} {A : Set a} (x : A) (B : A → Set b) → (∀ x → B x) → B x
case x return B of f = f x
infixl 8 _on_
_on_ : ∀ {a b c} {A : Set a} {B : A → Set b} {C : ∀ x y → B x → B y → Set c} →
(∀ {x y} (z : B x) (w : B y) → C x y z w) → (f : ∀ x → B x) → ∀ x y →
C x y (f x) (f y)
h on f = λ x y → h (f x) (f y)
{-# INLINE _on_ #-}
Function : ∀ {a} → Ø a → Ø a → Ø a
Function A B = A → B
Function⟦_⟧ : ∀ a → Ø a → Ø a → Ø a
Function⟦ a ⟧ = Function {a = a}
MFunction : ∀ {a b} (M : Ø a → Ø b) → Ø a → Ø a → Ø b
MFunction M A B = M A → M B
Arrow : ∀ {𝔵} {𝔛 : Ø 𝔵} {𝔞} {𝔟} → (𝔛 → Ø 𝔞) → (𝔛 → Ø 𝔟) → 𝔛 → 𝔛 → Ø 𝔞 ∙̂ 𝔟
Arrow 𝔄 𝔅 x y = 𝔄 x → 𝔅 y
module _ where
Extension : ∀ {𝔬} {𝔒 : Ø 𝔬} {𝔭} (𝔓 : 𝔒 → Ø 𝔭) → 𝔒 → 𝔒 → Ø 𝔭
Extension 𝔓 = Arrow 𝔓 𝔓
module _ where
_⟨_⟩→_ : ∀ {𝔬} {𝔒 : Ø 𝔬} → 𝔒 → ∀ {𝔭} → (𝔒 → Ø 𝔭) → 𝔒 → Ø 𝔭
m ⟨ 𝔓 ⟩→ n = Extension 𝔓 m n
π̂ : ∀ {𝔵} ℓ (𝔛 : Ø 𝔵) → Ø 𝔵 ∙̂ ↑̂ ℓ
π̂ ℓ 𝔛 = 𝔛 → Ø ℓ
infixl 21 _←̂_
_←̂_ = π̂
π̇ : ∀ {𝔞 𝔟} (𝔄 : Ø 𝔞) (𝔅 : 𝔄 → Ø 𝔟) → Ø 𝔞 ∙̂ 𝔟
π̇ 𝔄 𝔅 = (𝓐 : 𝔄) → 𝔅 𝓐
infixl 20 π̇
syntax π̇ 𝔄 (λ 𝓐 → 𝔅𝓐) = 𝔅𝓐 ← 𝓐 ≔ 𝔄
π̇-hidden-quantifier-syntax = π̇
infixl 20 π̇-hidden-quantifier-syntax
syntax π̇-hidden-quantifier-syntax 𝔄 (λ _ → 𝔅𝓐) = 𝔅𝓐 ← 𝔄
π̂² : ∀ {𝔞} ℓ → Ø 𝔞 → Ø 𝔞 ∙̂ ↑̂ ℓ
π̂² ℓ 𝔄 = ℓ ←̂ 𝔄 ← 𝔄
_→̂²_ : ∀ {𝔞} → Ø 𝔞 → ∀ ℓ → Ø 𝔞 ∙̂ ↑̂ ℓ
_→̂²_ 𝔒 ℓ = π̂² ℓ 𝔒
record Lift {a ℓ} (A : Set a) : Set (a ∙̂ ℓ) where
instance constructor lift
field lower : A
open Lift public
record Wrap {𝔵} (𝔛 : Ø 𝔵) : Ø 𝔵 where
constructor ∁
field
π₀ : 𝔛
open Wrap public
∀̇ : ∀ {𝔵} {𝔛 : Ø 𝔵} {𝔞}
→ (∀ ℓ (𝔄 : Ø 𝔞) → Ø 𝔞 ∙̂ ↑̂ ℓ)
→ ∀ ℓ → (𝔛 → Ø 𝔞) → Ø 𝔵 ∙̂ 𝔞 ∙̂ ↑̂ ℓ
∀̇ Q ℓ 𝔄 = ∀ {x} → Q ℓ (𝔄 x)
Ṙelation : ∀ {𝔵} {𝔛 : Ø 𝔵} {𝔞} ℓ → (𝔞 ←̂ 𝔛) → Ø 𝔵 ∙̂ 𝔞 ∙̂ ↑̂ ℓ
Ṙelation ℓ P = Wrap (∀̇ π̂² ℓ P)
Pointwise : ∀ {𝔞} {𝔄 : Ø 𝔞} {𝔟} {𝔅 : Ø 𝔟} {ℓ} → 𝔅 →̂² ℓ → (𝔅 ← 𝔄) → (𝔄 → 𝔅) → Ø 𝔞 ∙̂ ℓ
Pointwise _≈_ = λ f g → ∀ x → f x ≈ g x
Ṗroperty : ∀ {𝔵} {𝔛 : Ø 𝔵} {𝔬} ℓ → (𝔵 ∙̂ 𝔬 ∙̂ ↑̂ ℓ) ←̂ (𝔬 ←̂ 𝔛)
Ṗroperty ℓ P = Wrap (∀̇ π̂ ℓ P)
LeftṖroperty : ∀ {𝔵} {𝔛 : Ø 𝔵} {𝔶} {𝔜 : 𝔛 → Ø 𝔶} {𝔯} → ∀ ℓ → ((x : 𝔛) → 𝔜 x → Ø 𝔯) → 𝔛 → Ø 𝔶 ∙̂ 𝔯 ∙̂ ↑̂ ℓ
LeftṖroperty ℓ _↦_ = Ṗroperty ℓ ∘ _↦_
ArrowṖroperty : ∀ {𝔵} {𝔛 : Ø 𝔵} {𝔭₁ 𝔭₂} → ∀ ℓ → (𝔛 → Ø 𝔭₁) → (𝔛 → Ø 𝔭₂) → 𝔛 → Ø 𝔵 ∙̂ 𝔭₁ ∙̂ 𝔭₂ ∙̂ ↑̂ ℓ
ArrowṖroperty ℓ 𝔒₁ 𝔒₂ = LeftṖroperty ℓ (Arrow 𝔒₁ 𝔒₂)
module _ where
infixr 5 _,_
record Σ {𝔬} (𝔒 : Ø 𝔬) {𝔭} (𝔓 : 𝔒 → Ø 𝔭) : Ø 𝔬 ∙̂ 𝔭 where
instance constructor _,_
field
π₀ : 𝔒
π₁ : 𝔓 π₀
open Σ public
infixr 5 _,,_
record Σ′ {𝔬} (𝔒 : Ø 𝔬) {𝔭} (𝔓 : Ø 𝔭) : Ø 𝔬 ∙̂ 𝔭 where
instance constructor _,,_
field
π₀ : 𝔒
π₁ : 𝔓
open Σ′ public
_×_ : ∀ {𝔬₁ 𝔬₂} (𝔒₁ : Ø 𝔬₁) (𝔒₂ : Ø 𝔬₂) → Ø 𝔬₁ ∙̂ 𝔬₂
_×_ O₁ O₂ = Σ O₁ (λ _ → O₂)
∃_ : ∀ {𝔬} {𝔒 : Ø 𝔬} {𝔭} (𝔓 : 𝔒 → Ø 𝔭) → Ø 𝔬 ∙̂ 𝔭
∃_ = Σ _
uncurry : ∀ {a b c} {A : Set a} {B : A → Set b} {C : ∀ x → B x → Set c} →
(∀ x (y : B x) → C x y) → (p : Σ A B) → C (π₀ p) (π₁ p)
uncurry f (x , y) = f x y
uncurry′ : ∀ {a b c} {A : Set a} {B : A → Set b} {C : Set c} →
(∀ x → B x → C) → Σ A B → C
uncurry′ f (x , y) = f x y
curry : ∀ {a b c} {A : Set a} {B : A → Set b} {C : Σ A B → Set c} →
(∀ p → C p) → ∀ x (y : B x) → C (x , y)
curry f x y = f (x , y)
ExtensionṖroperty : ∀ {𝔵} {𝔛 : Ø 𝔵} {𝔬} {ℓ̇}
ℓ (𝔒 : 𝔛 → Ø 𝔬) (_↦_ : ∀̇ π̂² ℓ̇ 𝔒)
→ Ø 𝔵 ∙̂ 𝔬 ∙̂ ↑̂ ℓ ∙̂ ℓ̇
ExtensionṖroperty ℓ 𝔒 _↦_ = Σ (Ṗroperty ℓ 𝔒) (λ P → ∀ {x} {f g : 𝔒 x} → f ↦ g → Extension (π₀ P) f g)
LeftExtensionṖroperty : ∀ {𝔶} {𝔜 : Ø 𝔶} {𝔵} {𝔛 : 𝔜 → Ø 𝔵} {𝔬} {ℓ̇}
ℓ (𝔒 : (y : 𝔜) → 𝔛 y → Ø 𝔬) (_↦_ : ∀ {y} → ∀̇ π̂² ℓ̇ (𝔒 y))
→ 𝔜
→ Ø 𝔵 ∙̂ 𝔬 ∙̂ ↑̂ ℓ ∙̂ ℓ̇
LeftExtensionṖroperty ℓ 𝔒 _↦_ y = ExtensionṖroperty ℓ (𝔒 y) _↦_
ArrowExtensionṖroperty : ∀
{𝔵} {𝔛 : Ø 𝔵}
{𝔬₁} ℓ (𝔒₁ : 𝔛 → Ø 𝔬₁)
{𝔬₂} (𝔒₂ : 𝔛 → Ø 𝔬₂)
→ ∀ {ℓ̇} (_↦_ : ∀̇ π̂² ℓ̇ 𝔒₂)
→ 𝔛
→ Ø 𝔵 ∙̂ 𝔬₁ ∙̂ 𝔬₂ ∙̂ ↑̂ ℓ ∙̂ ℓ̇
ArrowExtensionṖroperty ℓ 𝔒₁ 𝔒₂ _↦_ = LeftExtensionṖroperty ℓ (Arrow 𝔒₁ 𝔒₂) (Pointwise _↦_)
record Instance {a} (A : Set a) : Set a where
constructor ∁
field {{x}} : A
mkInstance : ∀ {a} {A : Set a} → A → Instance A
mkInstance x = ∁ {{x}}
|
Require Import TestCommon.
Require Import Regularity.
Open Scope L2GMu.
Definition id := Λ => (λ (##0) => (#0)).
Definition id_typ := ∀ (##0 ==> ##0).
Ltac simpl_op := cbn; try case_if; auto.
(* Ltac solve_simple_type := repeat ((* let L := gather_vars in try apply typing_abs with L; *) intros; econstructor; eauto; cbn; try case_if; eauto). *)
Ltac crush_simple_type := repeat (cbv; (try case_if); econstructor; eauto).
Lemma well_typed_id : {empty, emptyΔ, empty} ⊢(Treg) id ∈ id_typ.
cbv; autotyper1.
Qed.
Lemma well_formed_id :
term id
/\ type id_typ
/\ wft empty emptyΔ id_typ.
destruct* (typing_regular well_typed_id).
Qed.
Definition id_app := (id <|| typ_unit <| trm_unit).
Lemma id_app_types : {empty, emptyΔ, empty} ⊢(Treg) id_app ∈ typ_unit.
Proof.
cbv.
autotyper1.
instantiate (1 := (##0 ==> ##0)).
auto.
autotyper1.
auto.
Qed.
Ltac crush_eval := repeat (try (apply eval_finish; eauto); econstructor; simpl_op).
Lemma id_app_evals : evals id_app trm_unit.
Proof.
crush_eval.
Unshelve. fs. fs. fs. fs.
Qed.
Require Import Preservation.
Lemma preservation_evals : forall Σ e T TT e',
{Σ, emptyΔ, empty} ⊢(TT) e ∈ T ->
evals e e' ->
{Σ, emptyΔ, empty} ⊢(Tgen) e' ∈ T.
Proof.
introv Typ Ev.
eapply Tgen_from_any in Typ.
induction Ev.
- apply* IHEv.
lets HP: preservation_thm.
unfold preservation in HP.
apply* HP.
- auto.
Qed.
Eval cbn in (preservation_evals _ _ _ _ _ id_app_types id_app_evals).
Definition let_id_app := trm_let (id) (#0 <|| typ_unit <| trm_unit).
Lemma let_id_app_types : {empty, emptyΔ, empty} ⊢(Treg) let_id_app ∈ typ_unit.
Proof.
cbv.
autotyper1.
4: {
instantiate (1 := (##0 ==> ##0)).
cbn. autotyper1.
}
autotyper1.
autotyper1.
autotyper1.
auto.
Qed.
Lemma let_id_app_evals : evals let_id_app trm_unit.
Proof.
crush_eval.
Unshelve.
fs. fs. fs. fs. fs. fs. fs. fs. fs. fs. fs.
Qed.
Definition loop := fixs (typ_unit ==> typ_unit) => λ typ_unit => (#1 <| #0).
Lemma loop_type : {empty, emptyΔ, empty} ⊢(Treg) loop ∈ (typ_unit ==> typ_unit).
Proof.
cbv.
autotyper1.
Qed.
Definition divergent := loop <| trm_unit.
Lemma divergent_type : {empty, emptyΔ, empty} ⊢(Treg) divergent ∈ typ_unit.
Proof.
cbv. autotyper1.
Qed.
Compute divergent_type.
Lemma divergent_diverges : evals divergent divergent.
Proof.
cbv.
econstructor.
- crush_eval.
- unfold open_ee. cbn; repeat case_if.
econstructor.
+ crush_eval.
+ repeat case_if.
apply eval_finish.
Unshelve.
fs. fs. fs. fs. fs. fs. fs.
Qed.
|
State Before: R✝ : Type u
S : Type v
T : Type w
ι✝ : Type y
a b : R✝
m n : ℕ
inst✝³ : CommSemiring R✝
p✝ q : R✝[X]
x✝ : R✝
inst✝² : CommSemiring S
f : R✝ →+* S
R : Type u_1
inst✝¹ : CommRing R
inst✝ : IsDomain R
ι : Type u_2
s : Finset ι
p : ι → R[X]
x : R
⊢ IsRoot (∏ j in s, p j) x ↔ ∃ i, i ∈ s ∧ IsRoot (p i) x State After: no goals Tactic: simp only [IsRoot, eval_prod, Finset.prod_eq_zero_iff] |
MODULE vacmod
USE vacmod0
USE vac_persistent
USE vmec_input, ONLY: lasym
USE vmec_params, ONLY: signgs
USE vparams, ONLY: zero, one, c2p0, cp5
USE mgrid_mod, ONLY: nr0b, np0b, nz0b,
1 rminb, zminb, rmaxb, zmaxb, delrb, delzb
IMPLICIT NONE
C-----------------------------------------------
C L o c a l P a r a m e t e r s
C-----------------------------------------------
REAL(rprec), PARAMETER :: p5 = cp5, two = c2p0
C-----------------------------------------------
C L o c a l V a r i a b l e s
C-----------------------------------------------
INTEGER :: nfper, nvper
REAL(rprec), DIMENSION(:), ALLOCATABLE, TARGET :: potvac
REAL(rprec), DIMENSION(:), ALLOCATABLE :: bvecsav, amatsav,
1 bexni, brv, bphiv, bzv, bsqvac, bsqvac0, r1b, rub, rvb, z1b,
2 zub, zvb, bexu, bexv, bexn, auu, auv, avv, snr, snv, snz, drv,
3 guu_b, guv_b, gvv_b, rzb2, rcosuv, rsinuv,
5 bredge, bpedge, bzedge
REAL(rprec), DIMENSION(:), ALLOCATABLE :: raxis_nestor,
1 zaxis_nestor
REAL(rprec) :: bsubvvac, pi2,
2 pi3, pi4, alp, alu, alv, alvp, onp, onp2
END MODULE vacmod
|
[STATEMENT]
lemma map_graph_comp: "map_graph (g \<circ>\<^sub>m f) = (map_graph f) O (map_graph g)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. map_graph (g \<circ>\<^sub>m f) = map_graph f O map_graph g
[PROOF STEP]
apply (auto simp add: map_comp_def map_graph_def relcomp_unfold)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>a b. (case f a of None \<Rightarrow> None | Some x \<Rightarrow> g x) = Some b \<Longrightarrow> \<exists>y. f a = Some y \<and> g y = Some b
[PROOF STEP]
apply (rename_tac a b)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>a b. (case f a of None \<Rightarrow> None | Some x \<Rightarrow> g x) = Some b \<Longrightarrow> \<exists>y. f a = Some y \<and> g y = Some b
[PROOF STEP]
apply (case_tac "f a", auto)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done |
[STATEMENT]
lemma ord_iso_Sup_pres:
fixes f :: "'a::complete_lattice \<Rightarrow> 'b::complete_lattice"
shows "ord_iso f \<Longrightarrow> Sup \<circ> (`) f = f \<circ> Sup"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ord_iso f \<Longrightarrow> Sup \<circ> (`) f = f \<circ> Sup
[PROOF STEP]
proof-
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. ord_iso f \<Longrightarrow> Sup \<circ> (`) f = f \<circ> Sup
[PROOF STEP]
let ?g = "the_inv f"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. ord_iso f \<Longrightarrow> Sup \<circ> (`) f = f \<circ> Sup
[PROOF STEP]
assume h: "ord_iso f"
[PROOF STATE]
proof (state)
this:
ord_iso f
goal (1 subgoal):
1. ord_iso f \<Longrightarrow> Sup \<circ> (`) f = f \<circ> Sup
[PROOF STEP]
hence a: "mono ?g"
[PROOF STATE]
proof (prove)
using this:
ord_iso f
goal (1 subgoal):
1. mono (the_inv f)
[PROOF STEP]
by (simp add: ord_iso_the_inv)
[PROOF STATE]
proof (state)
this:
mono (the_inv f)
goal (1 subgoal):
1. ord_iso f \<Longrightarrow> Sup \<circ> (`) f = f \<circ> Sup
[PROOF STEP]
{
[PROOF STATE]
proof (state)
this:
mono (the_inv f)
goal (1 subgoal):
1. ord_iso f \<Longrightarrow> Sup \<circ> (`) f = f \<circ> Sup
[PROOF STEP]
fix X :: "'a::complete_lattice set"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. ord_iso f \<Longrightarrow> Sup \<circ> (`) f = f \<circ> Sup
[PROOF STEP]
{
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. ord_iso f \<Longrightarrow> Sup \<circ> (`) f = f \<circ> Sup
[PROOF STEP]
fix y :: "'b::complete_lattice"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. ord_iso f \<Longrightarrow> Sup \<circ> (`) f = f \<circ> Sup
[PROOF STEP]
have "(f (\<Squnion>X) \<le> y) = (\<Squnion>X \<le> ?g y)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (f (\<Squnion> X) \<le> y) = (\<Squnion> X \<le> the_inv f y)
[PROOF STEP]
by (metis (mono_tags, lifting) UNIV_I f_the_inv_into_f h monoD ord_embed_alt ord_embed_inj ord_iso_alt)
[PROOF STATE]
proof (state)
this:
(f (\<Squnion> X) \<le> y) = (\<Squnion> X \<le> the_inv f y)
goal (1 subgoal):
1. ord_iso f \<Longrightarrow> Sup \<circ> (`) f = f \<circ> Sup
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
(f (\<Squnion> X) \<le> y) = (\<Squnion> X \<le> the_inv f y)
goal (1 subgoal):
1. ord_iso f \<Longrightarrow> Sup \<circ> (`) f = f \<circ> Sup
[PROOF STEP]
have "... = (\<forall>x \<in> X. x \<le> ?g y)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<Squnion> X \<le> the_inv f y) = (\<forall>x\<in>X. x \<le> the_inv f y)
[PROOF STEP]
by (simp add: Sup_le_iff)
[PROOF STATE]
proof (state)
this:
(\<Squnion> X \<le> the_inv f y) = (\<forall>x\<in>X. x \<le> the_inv f y)
goal (1 subgoal):
1. ord_iso f \<Longrightarrow> Sup \<circ> (`) f = f \<circ> Sup
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
(\<Squnion> X \<le> the_inv f y) = (\<forall>x\<in>X. x \<le> the_inv f y)
goal (1 subgoal):
1. ord_iso f \<Longrightarrow> Sup \<circ> (`) f = f \<circ> Sup
[PROOF STEP]
have "... = (\<forall>x \<in> X. f x \<le> y)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<forall>x\<in>X. x \<le> the_inv f y) = (\<forall>x\<in>X. f x \<le> y)
[PROOF STEP]
by (metis (mono_tags, lifting) UNIV_I f_the_inv_into_f h monoD ord_embed_alt ord_embed_inj ord_iso_alt)
[PROOF STATE]
proof (state)
this:
(\<forall>x\<in>X. x \<le> the_inv f y) = (\<forall>x\<in>X. f x \<le> y)
goal (1 subgoal):
1. ord_iso f \<Longrightarrow> Sup \<circ> (`) f = f \<circ> Sup
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
(\<forall>x\<in>X. x \<le> the_inv f y) = (\<forall>x\<in>X. f x \<le> y)
goal (1 subgoal):
1. ord_iso f \<Longrightarrow> Sup \<circ> (`) f = f \<circ> Sup
[PROOF STEP]
have "... = (\<Squnion> (f ` X) \<le> y)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<forall>x\<in>X. f x \<le> y) = (\<Squnion> (f ` X) \<le> y)
[PROOF STEP]
by (simp add: SUP_le_iff)
[PROOF STATE]
proof (state)
this:
(\<forall>x\<in>X. f x \<le> y) = (\<Squnion> (f ` X) \<le> y)
goal (1 subgoal):
1. ord_iso f \<Longrightarrow> Sup \<circ> (`) f = f \<circ> Sup
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
(f (\<Squnion> X) \<le> y) = (\<Squnion> (f ` X) \<le> y)
[PROOF STEP]
have "(f (\<Squnion>X) \<le> y) = (\<Squnion> (f ` X) \<le> y)"
[PROOF STATE]
proof (prove)
using this:
(f (\<Squnion> X) \<le> y) = (\<Squnion> (f ` X) \<le> y)
goal (1 subgoal):
1. (f (\<Squnion> X) \<le> y) = (\<Squnion> (f ` X) \<le> y)
[PROOF STEP]
.
[PROOF STATE]
proof (state)
this:
(f (\<Squnion> X) \<le> y) = (\<Squnion> (f ` X) \<le> y)
goal (1 subgoal):
1. ord_iso f \<Longrightarrow> Sup \<circ> (`) f = f \<circ> Sup
[PROOF STEP]
}
[PROOF STATE]
proof (state)
this:
(f (\<Squnion> X) \<le> ?y2) = (\<Squnion> (f ` X) \<le> ?y2)
goal (1 subgoal):
1. ord_iso f \<Longrightarrow> Sup \<circ> (`) f = f \<circ> Sup
[PROOF STEP]
hence "f (\<Squnion>X) = \<Squnion> (f ` X)"
[PROOF STATE]
proof (prove)
using this:
(f (\<Squnion> X) \<le> ?y2) = (\<Squnion> (f ` X) \<le> ?y2)
goal (1 subgoal):
1. f (\<Squnion> X) = \<Squnion> (f ` X)
[PROOF STEP]
by (meson dual_order.antisym order_refl)
[PROOF STATE]
proof (state)
this:
f (\<Squnion> X) = \<Squnion> (f ` X)
goal (1 subgoal):
1. ord_iso f \<Longrightarrow> Sup \<circ> (`) f = f \<circ> Sup
[PROOF STEP]
}
[PROOF STATE]
proof (state)
this:
f (\<Squnion> ?X2) = \<Squnion> (f ` ?X2)
goal (1 subgoal):
1. ord_iso f \<Longrightarrow> Sup \<circ> (`) f = f \<circ> Sup
[PROOF STEP]
thus ?thesis
[PROOF STATE]
proof (prove)
using this:
f (\<Squnion> ?X2) = \<Squnion> (f ` ?X2)
goal (1 subgoal):
1. Sup \<circ> (`) f = f \<circ> Sup
[PROOF STEP]
unfolding fun_eq_iff
[PROOF STATE]
proof (prove)
using this:
f (\<Squnion> ?X2) = \<Squnion> (f ` ?X2)
goal (1 subgoal):
1. \<forall>x. (Sup \<circ> (`) f) x = (f \<circ> Sup) x
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
Sup \<circ> (`) f = f \<circ> Sup
goal:
No subgoals!
[PROOF STEP]
qed |
// (C) Copyright Edward Diener 2019
// Use, modification and distribution are subject to the Boost Software License,
// Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt).
#if !defined(BOOST_TTI_DETAIL_MEM_FUN_TEMPLATE_HPP)
#define BOOST_TTI_DETAIL_MEM_FUN_TEMPLATE_HPP
#include <boost/function_types/is_member_function_pointer.hpp>
#include <boost/function_types/property_tags.hpp>
#include <boost/mpl/and.hpp>
#include <boost/mpl/logical.hpp>
#include <boost/mpl/assert.hpp>
#include <boost/mpl/bool.hpp>
#include <boost/mpl/eval_if.hpp>
#include <boost/mpl/vector.hpp>
#include <boost/preprocessor/cat.hpp>
#include <boost/preprocessor/array/enum.hpp>
#include <boost/type_traits/detail/yes_no_type.hpp>
#include <boost/type_traits/is_same.hpp>
#include <boost/tti/detail/dcomp_mem_fun_template.hpp>
#include <boost/tti/detail/ddeftype.hpp>
#include <boost/tti/detail/dnullptr.hpp>
#include <boost/tti/detail/dptmf.hpp>
#include <boost/tti/detail/dmacro_sunfix.hpp>
#include <boost/tti/detail/denclosing_type.hpp>
#include <boost/tti/gen/namespace_gen.hpp>
#define BOOST_TTI_DETAIL_TRAIT_HAS_TYPES_MEMBER_FUNCTION_TEMPLATE(trait,name,pparray) \
template<class BOOST_TTI_DETAIL_TP_PMEMF,class BOOST_TTI_DETAIL_TP_C> \
struct BOOST_PP_CAT(trait,_detail_hmft_types) \
{ \
template<BOOST_TTI_DETAIL_TP_PMEMF> \
struct helper BOOST_TTI_DETAIL_MACRO_SUNFIX ; \
\
template<class BOOST_TTI_DETAIL_TP_EC> \
static ::boost::type_traits::yes_type chkt(helper<&BOOST_TTI_DETAIL_TP_EC::template name<BOOST_PP_ARRAY_ENUM(pparray)> > *); \
\
template<class BOOST_TTI_DETAIL_TP_EC> \
static ::boost::type_traits::no_type chkt(...); \
\
typedef boost::mpl::bool_<sizeof(chkt<BOOST_TTI_DETAIL_TP_C>(BOOST_TTI_DETAIL_NULLPTR))==sizeof(::boost::type_traits::yes_type)> type; \
}; \
/**/
#define BOOST_TTI_DETAIL_TRAIT_CTMF_INVOKE_TEMPLATE(trait,name,pparray) \
BOOST_TTI_DETAIL_TRAIT_HAS_TYPES_MEMBER_FUNCTION_TEMPLATE(trait,name,pparray) \
template<class BOOST_TTI_DETAIL_TP_T,class BOOST_TTI_DETAIL_TP_R,class BOOST_TTI_DETAIL_TP_FS,class BOOST_TTI_DETAIL_TP_TAG> \
struct BOOST_PP_CAT(trait,_detail_hmft_ctmf_invoke_template) : \
BOOST_PP_CAT(trait,_detail_hmft_types) \
< \
typename BOOST_TTI_NAMESPACE::detail::ptmf_seq<BOOST_TTI_DETAIL_TP_T,BOOST_TTI_DETAIL_TP_R,BOOST_TTI_DETAIL_TP_FS,BOOST_TTI_DETAIL_TP_TAG>::type, \
BOOST_TTI_DETAIL_TP_T \
> \
{ \
}; \
/**/
#define BOOST_TTI_DETAIL_TRAIT_HAS_CALL_TYPES_MEMBER_FUNCTION_TEMPLATE(trait,name,pparray) \
BOOST_TTI_DETAIL_TRAIT_CTMF_INVOKE_TEMPLATE(trait,name,pparray) \
template<class BOOST_TTI_DETAIL_TP_T,class BOOST_TTI_DETAIL_TP_R,class BOOST_TTI_DETAIL_TP_FS,class BOOST_TTI_DETAIL_TP_TAG> \
struct BOOST_PP_CAT(trait,_detail_hmft_call_types) : \
boost::mpl::eval_if \
< \
BOOST_TTI_NAMESPACE::detail::enclosing_type<BOOST_TTI_DETAIL_TP_T>, \
BOOST_PP_CAT(trait,_detail_hmft_ctmf_invoke_template) \
< \
BOOST_TTI_DETAIL_TP_T, \
BOOST_TTI_DETAIL_TP_R, \
BOOST_TTI_DETAIL_TP_FS, \
BOOST_TTI_DETAIL_TP_TAG \
>, \
boost::mpl::false_ \
> \
{ \
}; \
/**/
#define BOOST_TTI_DETAIL_TRAIT_CHECK_HAS_COMP_MEMBER_FUNCTION_TEMPLATE(trait,name,pparray) \
BOOST_TTI_DETAIL_TRAIT_HAS_COMP_MEMBER_FUNCTION_TEMPLATE(trait,name,pparray) \
template<class BOOST_TTI_DETAIL_TP_T> \
struct BOOST_PP_CAT(trait,_detail_hmft_check_comp) : \
BOOST_PP_CAT(trait,_detail_hcmft)<BOOST_TTI_DETAIL_TP_T> \
{ \
BOOST_MPL_ASSERT((boost::function_types::is_member_function_pointer<BOOST_TTI_DETAIL_TP_T>)); \
}; \
/**/
#define BOOST_TTI_DETAIL_TRAIT_HAS_MEMBER_FUNCTION_TEMPLATE(trait,name,pparray) \
BOOST_TTI_DETAIL_TRAIT_HAS_CALL_TYPES_MEMBER_FUNCTION_TEMPLATE(trait,name,pparray) \
BOOST_TTI_DETAIL_TRAIT_CHECK_HAS_COMP_MEMBER_FUNCTION_TEMPLATE(trait,name,pparray) \
template<class BOOST_TTI_DETAIL_TP_T,class BOOST_TTI_DETAIL_TP_R,class BOOST_TTI_DETAIL_TP_FS,class BOOST_TTI_DETAIL_TP_TAG> \
struct BOOST_PP_CAT(trait,_detail_hmft) : \
boost::mpl::eval_if \
< \
boost::mpl::and_ \
< \
boost::is_same<BOOST_TTI_DETAIL_TP_R,BOOST_TTI_NAMESPACE::detail::deftype>, \
boost::is_same<BOOST_TTI_DETAIL_TP_FS,boost::mpl::vector<> >, \
boost::is_same<BOOST_TTI_DETAIL_TP_TAG,boost::function_types::null_tag> \
>, \
BOOST_PP_CAT(trait,_detail_hmft_check_comp)<BOOST_TTI_DETAIL_TP_T>, \
BOOST_PP_CAT(trait,_detail_hmft_call_types)<BOOST_TTI_DETAIL_TP_T,BOOST_TTI_DETAIL_TP_R,BOOST_TTI_DETAIL_TP_FS,BOOST_TTI_DETAIL_TP_TAG> \
> \
{ \
}; \
/**/
#endif // BOOST_TTI_DETAIL_MEM_FUN_TEMPLATE_HPP
|
7 Senses in CI begins this Sunday!
7 Senses of Contact Improvisation- at Studio Azul, Berkeley, Ca.
Idiomatics of Contact Improv meets Exploratory Release Technique and other stuff. Post modern dance techniques broken down slowly with repetition. Expanding vocabulary of physics in contact, improvising with movement score/s -concepts to play with in solo or in contact with others. Returning to neutral. Integrating physical, emotional, mental.
Moving into the Spring Equinox, we’ll gain our equilibrium from a fear-inducing government and focus on developing a heightened awareness of the spidey senses during Women’s history month.
***All genders welcome-(feminists only of course) ***All Levels- (Beginners are welcome).
If Jane Austin had contact improvisation, it would have all made sense!
(Sense and Sensibility= Body and Mind).
Find freedom in movement through the IDIOMATICS of the senses, exploratory TECHNIQUES, and the CURIOSITY within creative process.
*INTUITION: (Developing your 6th Sense) Ability to predict and support movement, as well as, opening doors to the unconscious stories of the body/mind. Using aesthetic feedback scores, somatic transference, creative restrictions, blindfolded exercises, RE-membering natural origins and body part mapping.
*EQUILIBRIUM: Finding balance and proprioceptive awareness in solo and in contact with others. Learning anatomically sound pathways of movement, head/tail connection, spiraling in and out of the floor, falling off your axis, turning upside down, working with your range of motion, traveling and changing direction/orientation.
**Note: Class builds on itself so drop-in is not preferred unless you are an advanced/experienced dancer or you are attending the first classes in sequence. Pro-rated is fine- must contact email below.
*Partial Scholarships available upon request.
Imagine all the dots are people.[:]::[::]:.[:..].:.:.:. ~secret codes~Etc.
This entry was posted in Piñata News, Teaching, Workshops and tagged Contact Improvisation, Contemporary Dance, piñata dance collective, release technique, somatics, tamalpa life-art process on February 28, 2017 by Elizabeth Boubion. |
Require Import UniMath.Foundations.Preamble.
Require Import UniMath.CategoryTheory.Core.Categories.
Require Import UniMath.CategoryTheory.Core.Isos.
Require Import UniMath.CategoryTheory.Core.Functors.
Require Import UniMath.CategoryTheory.Core.Univalence.
Require Import
UniMath.CategoryTheory.opp_precat
UniMath.CategoryTheory.yoneda
UniMath.CategoryTheory.categories.HSET.Core
UniMath.CategoryTheory.categories.HSET.MonoEpiIso.
Require Import
UniMath.CategoryTheory.RepresentableFunctors.Bifunctor
UniMath.CategoryTheory.RepresentableFunctors.Precategories.
Require Import UniMath.MoreFoundations.Tactics.
Local Open Scope cat.
Local Open Scope Cat.
Definition isUniversal {C:category} {X:[C^op,HSET]} {c:C} (x:c ⇒ X)
:= ∏ (c':C), isweq (λ f : c' --> c, x ⟲ f).
Definition Universal {C:category} (X:[C^op,HSET]) (c:C)
:= ∑ (x:c ⇒ X), isUniversal x.
Lemma z_iso_Universal_weq {C:category} {X Y:[C^op,HSET]} (c:C) :
z_iso X Y -> Universal X c ≃ Universal Y c.
Proof.
intro i.
set (I := (functor_z_iso_pointwise_if_z_iso
C^op HSET (homset_property HSET) X Y (pr1 i) (pr2 i))).
unshelve refine (weqbandf _ _ _ _).
- apply hset_z_iso_equiv_weq. unfold arrow, functor_object_application. exact (I c).
- simpl; intros x. apply weqonsecfibers; intro b. apply weqiff.
+ unshelve refine (twooutof3c_iff_1_homot _ _ _ _ _).
* exact (pr1 i ◽ opp_ob b).
* intro f; simpl.
exact (eqtohomot (nat_trans_ax (pr1 i) _ _ f) x).
* exact (hset_z_iso_is_equiv _ _ (I b)).
+ apply isapropisweq.
+ apply isapropisweq.
Defined.
Definition Representation {C:category} (X:[C^op,HSET]) : UU
:= ∑ (c:C), Universal X c.
Definition isRepresentable {C:category} (X:[C^op,HSET]) := ∥ Representation X ∥.
Lemma isaprop_Representation {C: univalent_category} (X:[C^op,HSET]) :
isaprop (@Representation C X).
Proof.
Abort.
Definition z_iso_Representation_weq {C:category} {X Y:[C^op,HSET]} :
z_iso X Y -> Representation X ≃ Representation Y.
Proof.
intros i. apply weqfibtototal; intro c. apply z_iso_Universal_weq; assumption.
Defined.
(* categories of functors with representations *)
Definition RepresentedFunctor (C:category) : category
:= categoryWithStructure [C^op,HSET] Representation.
Definition toRepresentation {C:category} (X : RepresentedFunctor C) :
Representation (pr1 X)
:= pr2 X.
Definition RepresentableFunctor (C:category) : category
:= categoryWithStructure [C^op,HSET] isRepresentable.
Definition toRepresentableFunctor {C:category} :
RepresentedFunctor C ⟶ RepresentableFunctor C :=
functorWithStructures (λ c, hinhpr).
(* make a representation of a functor *)
Definition makeRepresentation {C:category} {c:C} {X:[C^op,HSET]} (x:c ⇒ X) :
(∏ (c':C), UniqueConstruction (λ f : c' --> c, x ⟲ f)) -> Representation X.
Proof.
intros bij. exists c. exists x. intros c'. apply set_bijection_to_weq.
- exact (bij c').
- apply setproperty.
Defined.
(* universal aspects of represented functors *)
Definition universalObject {C:category} {X:[C^op,HSET]} (r:Representation X) : C
:= pr1 r.
Definition universalElement {C:category} {X:[C^op,HSET]} (r:Representation X) :
universalObject r ⇒ X
:= pr1 (pr2 r).
Coercion universalElement : Representation >-> pr1hSet.
Definition universalProperty {C:category} {X:[C^op,HSET]} (r:Representation X) (c:C) :
c --> universalObject r ≃ (c ⇒ X)
:= make_weq (λ f : c --> universalObject r, r ⟲ f)
(pr2 (pr2 r) c).
Definition universalMap {C:category} {X:[C^op,HSET]} (r:Representation X) {c:C} :
c ⇒ X -> c --> universalObject r
:= invmap (universalProperty _ _).
Notation "r \\ x" := (universalMap r x) (at level 50, left associativity) : cat.
Definition universalMap' {C:category} {X:[C^op^op,HSET]} (r:Representation X) {c:C} :
X ⇐ c -> c <-- universalObject r
:= invmap (universalProperty _ _).
Notation "x // r" := (universalMap' r x) (at level 50, left associativity) : cat.
Definition universalMapProperty {C:category} {X:[C^op,HSET]} (r:Representation X)
{c:C} (x : c ⇒ X) :
r ⟲ (r \\ x) = x
:= homotweqinvweq (universalProperty r c) x.
Definition mapUniqueness {C:category} (X:[C^op,HSET]) (r : Representation X) (c:C)
(f g: c --> universalObject r) :
r ⟲ f = r ⟲ g -> f = g
:= invmaponpathsweq (universalProperty _ _) _ _.
Definition universalMapUniqueness {C:category} {X:[C^op,HSET]} {r:Representation X}
{c:C} (x : c ⇒ X) (f : c --> universalObject r) :
r ⟲ f = x -> f = r \\ x
:= pathsweq1 (universalProperty r c) f x.
Definition universalMapIdentity {C:category} {X:[C^op,HSET]} (r:Representation X) :
r \\ r = identity _.
Proof.
apply pathsinv0. apply universalMapUniqueness. apply arrow_mor_id.
Qed.
Definition universalMapUniqueness' {C:category} {X:[C^op,HSET]} {r:Representation X}
{c:C} (x : c ⇒ X) (f : c --> universalObject r) :
f = r \\ x -> r ⟲ f = x
:= pathsweq1' (universalProperty r c) f x.
Lemma univ_arrow_mor_assoc {C:category} {a b:C} {Z:[C^op,HSET]}
(f : a --> b) (z : b ⇒ Z) (t : Representation Z) :
(t \\ z) ∘ f = t \\ (z ⟲ f).
Proof.
apply universalMapUniqueness.
unshelve refine (arrow_mor_mor_assoc _ _ _ @ _).
apply maponpaths.
apply universalMapProperty.
Qed.
(* *)
Lemma uOF_identity {C:category} {X:[C^op,HSET]} (r:Representation X) :
r \\ (identity X ⟳ r) = identity _.
Proof.
unfold nat_trans_id; simpl.
unshelve refine (transportb (λ k, _ \\ k = _) (identityFunction' _ _) _).
apply universalMapIdentity.
Qed.
Lemma uOF_comp {C:category} {X Y Z:[C^op,HSET]}
(r:Representation X)
(s:Representation Y)
(t:Representation Z)
(p:X-->Y) (q:Y-->Z) :
t \\ ((q ∘ p) ⟳ r) = (t \\ (q ⟳ s)) ∘ (s \\ (p ⟳ r)).
Proof.
unshelve refine (transportf (λ k, _ \\ k = _) (nattrans_nattrans_arrow_assoc _ _ _) _).
unshelve refine (_ @ !univ_arrow_mor_assoc _ _ _).
apply maponpaths.
unshelve refine (_ @ nattrans_arrow_mor_assoc _ _ _).
apply (maponpaths (λ k, q ⟳ k)).
apply pathsinv0.
apply universalMapProperty.
Qed.
Definition universalObjectFunctor (C:category) : RepresentedFunctor C ⟶ C.
Proof.
unshelve refine (makeFunctor _ _ _ _).
- intro X. exact (universalObject (pr2 X)).
- intros X Y p; simpl. exact (pr2 Y \\ (p ⟳ pr2 X)).
- intros X; simpl. apply uOF_identity.
- intros X Y Z p q; simpl. apply uOF_comp.
Defined.
Definition universalObjectFunctor_on_map (C:category) {X Y:RepresentedFunctor C} (p:X-->Y) :
universalObjectFunctor C ▭ p = pr2 Y \\ (p ⟳ pr2 X).
Proof.
reflexivity.
Defined.
Lemma universalObjectFunctor_comm (C:category) {X Y:RepresentedFunctor C} (p:X-->Y) :
p ⟳ universalElement (pr2 X) = universalElement (pr2 Y) ⟲ universalObjectFunctor C ▭ p.
Proof.
change (universalObjectFunctor C ▭ p) with (pr2 Y \\ (p ⟳ pr2 X)).
apply pathsinv0, universalMapProperty.
Defined.
(** transferring universal properties between isomorphic objects *)
Definition isUniversal_isom {C:category} {X:[C^op,HSET]} {c c':C}
(x:c ⇒ X) (f : z_iso c' c) :
isUniversal x <-> isUniversal (x ⟲ f).
Proof.
Abort.
(** transferring representability via embeddings and isomorphisms of categories *)
Definition embeddingRepresentability {C D:category}
{X:[C^op,HSET]} {Y:[D^op,HSET]}
(s:Representation Y)
(i:categoryEmbedding C D) :
z_iso (Y □ functorOp (opp_ob (pr1 i))) X ->
(∑ c, i c = universalObject s) -> Representation X.
Proof.
intros j ce.
apply (z_iso_Representation_weq j).
exists (pr1 ce).
exists (transportf (λ d, Y ◾ d : hSet) (!pr2 ce) s).
intro c'. apply (twooutof3c (# i) (λ g, _ ⟲ g)).
- apply (pr2 i).
- induction (! pr2 ce). exact (weqproperty (universalProperty _ _)).
Defined.
Definition isomorphismRepresentability {C D:category}
{X:[C^op,HSET]} {Y:[D^op,HSET]}
(s:Representation Y)
(i:categoryIsomorphism C D) :
z_iso (Y □ functorOp (opp_ob (pr1 (pr1 i)))) X -> Representation X
:= λ j, embeddingRepresentability s i j (iscontrpr1 (pr2 i (universalObject s))).
(*** Some standard functors to consider representing *)
(** the functor represented by an object *)
Definition Hom1 {C:category} (c:C) : [C^op,HSET].
Proof.
unshelve refine (makeFunctor_op _ _ _ _).
- intro b. exact (Hom C b c).
- intros b a f g; simpl. exact (g ∘ f).
- abstract (intros b; simpl; apply funextsec; intro g; apply id_left) using _L_.
- abstract (intros i j k f g; simpl; apply funextsec; intro h;
rewrite <- assoc; reflexivity) using _L_.
Defined.
Lemma Hom1_Representation {C:category} (c:C) : Representation (Hom1 c).
Proof.
exists c. exists (identity c). intro b. apply (isweqhomot (idweq _)).
- abstract (intro f; unfold arrow_morphism_composition; unfold Hom1; simpl;
apply pathsinv0, id_right) using _R_.
- abstract (apply weqproperty) using _T_.
Defined.
(** maps from Hom1 to functors *)
Lemma compose_SET {X Y Z:HSET} (f:X-->Y) (g:Y-->Z) : g∘f = λ x, g(f x).
Proof.
reflexivity.
Defined.
Definition element_to_nattrans {C:category} (X:[C^op,HSET]) (c:C) :
c ⇒ X -> Hom1 c --> X.
Proof.
intros x. unshelve refine (makeNattrans_op _ _).
- unfold Hom1; simpl; intros b f. exact (x ⟲ f).
- abstract (intros a b f; apply funextsec; intro g; apply arrow_mor_mor_assoc) using _L_.
Defined.
(** representable functors are isomorphic to one represented by an object *)
Theorem Representation_to_z_iso {C:category} (X:[C^op,HSET]) (r:Representation X) :
z_iso (Hom1 (universalObject r)) X.
Proof.
refine (z_iso_from_nat_z_iso _ ((element_to_nattrans X (universalObject r) (universalElement r)),,_)).
intro b. apply (pr2 (weq_iff_z_iso_SET _)). exact (pr2 (pr2 r) b).
Defined.
(** initial and final objects and zero maps *)
Definition UnitFunctor (C:category) : [C,SET].
unshelve refine (_,,_).
{ exists (λ c, unitset). exact (λ a b f t, t). }
{ split.
{ intros a. reflexivity. }
{ intros a b c f g. reflexivity. } }
Defined.
Definition TerminalObject (C:category) := Representation (UnitFunctor C^op).
Definition terminalObject {C} (t:TerminalObject C) : ob C := universalObject t.
Definition terminalArrow {C} (t:TerminalObject C) (c:ob C) :
Hom C c (terminalObject t)
:= t \\ tt.
Definition InitialObject (C:category) := TerminalObject C^op.
Definition initialObject {C} (i:InitialObject C) : ob C := universalObject i.
Definition initialArrow {C} (i:InitialObject C) (c:ob C) :
Hom C (initialObject i) c
:= rm_opp_mor (tt // i).
Definition init_to_opp {C:category} : InitialObject C -> TerminalObject C^op
:= λ i, i.
Definition term_to_opp {C:category} : TerminalObject C -> InitialObject C^op.
Proof.
intros. unfold InitialObject. now induction (opp_opp_precat C).
Defined.
(** zero objects, as an alternative to ZeroObject.v *)
Definition ZeroObject (C:category)
:= ∑ z:ob C, Universal (UnitFunctor C^op) z × Universal (UnitFunctor C^op^op) z.
Definition zero_to_terminal (C:category) : ZeroObject C -> TerminalObject C
:= λ z, pr1 z ,, pr1 (pr2 z).
Definition zero_to_initial (C:category) : ZeroObject C -> InitialObject C
:= λ z, pr1 z ,, pr2 (pr2 z).
Definition zero_opp (C:category) : ZeroObject C -> ZeroObject C^op.
Proof.
intro z. induction z as [z k]. exists z.
induction (opp_opp_precat C).
exact (pr2 k,,pr1 k).
Defined.
Definition hasZeroObject (C:category) := ∥ ZeroObject C ∥.
Definition haszero_opp (C:category) : hasZeroObject C -> hasZeroObject C^op
:= hinhfun (zero_opp C).
Definition zeroMap' (C:category) (a b:ob C) (o:ZeroObject C) : Hom C a b
:= (zero_to_initial C o \\ tt) ∘ (zero_to_terminal C o \\ tt).
Lemma zero_eq_zero_opp (C:category) (a b:ob C) (o:ZeroObject C) :
zeroMap' C^op (opp_ob b) (opp_ob a) (zero_opp C o)
=
opp_mor (zeroMap' C a b o).
Proof.
intros.
try reflexivity.
Abort.
(** binary products and coproducts *)
Definition HomPair {C:category} (a b:C) : [C^op,SET].
Proof.
unshelve refine (makeFunctor_op _ _ _ _).
- intro c. exact (Hom C c a × Hom C c b) % set.
- simpl. intros c d f x. exact (pr1 x ∘ f ,, pr2 x ∘ f).
- abstract (simpl; intro c; apply funextsec; intro x;
apply dirprodeq; apply id_left) using _B_.
- abstract (simpl; intros c d e f g;
apply funextsec; intro x;
apply dirprodeq; apply pathsinv0, assoc) using _C_.
Defined.
Definition HomPair_1 {C:category} (a b c:C) :
(((HomPair a b : C^op ⟶ SET) c : hSet) -> Hom C c a)
:= pr1.
Definition HomPair_2 {C:category} (a b c:C) :
(((HomPair a b : C^op ⟶ SET) c : hSet) -> Hom C c b)
:= pr2.
Definition BinaryProduct {C:category} (a b:C) :=
Representation (HomPair a b).
Definition BinaryProducts (C:category) := ∏ (a b:C), BinaryProduct a b.
Definition pr_1 {C:category} {a b:C} (prod : BinaryProduct a b) :
universalObject prod --> a
:= pr1 (universalElement prod).
Definition pr_2 {C:category} {a b:C} (prod : BinaryProduct a b) :
universalObject prod --> b
:= pr2 (universalElement prod).
Definition binaryProductMap {C:category} {a b:C} (prod : BinaryProduct a b)
{c:C} : c --> a -> c --> b -> c --> universalObject prod
:= λ f g, prod \\ (f,,g).
Definition binaryProduct_pr_1_eqn {C:category} {a b:C} (prod : BinaryProduct a b)
{c:C} (f : c --> a) (g : c --> b) :
pr_1 prod ∘ binaryProductMap prod f g = f
:= maponpaths (HomPair_1 a b (opp_ob c)) (pr2 (pr1 (pr2 (pr2 prod) c (f,,g)))).
Definition binaryProduct_pr_2_eqn {C:category} {a b:C} (prod : BinaryProduct a b)
{c:C} (f : c --> a) (g : c --> b) :
pr_2 prod ∘ binaryProductMap prod f g = g
:= maponpaths (HomPair_2 a b (opp_ob c)) (pr2 (pr1 (pr2 (pr2 prod) c (f,,g)))).
Lemma binaryProductMapUniqueness {C:category} {a b:C} (prod : BinaryProduct a b)
{c:C} (f g : Hom C c (universalObject prod)) :
pr_1 prod ∘ f = pr_1 prod ∘ g ->
pr_2 prod ∘ f = pr_2 prod ∘ g -> f = g.
Proof.
intros r s. apply mapUniqueness. apply dirprodeq. exact r. exact s.
Defined.
Definition binaryProductMap_2 {C:category} {a b a' b':C}
(prod : BinaryProduct a b)
(prod' : BinaryProduct a' b')
(f : a --> a')
(g : b --> b')
: rm_opp_ob (universalObject prod) --> rm_opp_ob (universalObject prod').
Proof.
unshelve refine (binaryProductMap _ _ _).
{ exact (f ∘ pr_1 prod). }
{ exact (g ∘ pr_2 prod). }
Defined.
Definition BinarySum {C:category} (a b:C) :=
BinaryProduct (opp_ob a) (opp_ob b).
Definition BinarySums (C:category) := ∏ (a b:C), BinarySum a b.
Lemma binarySumsToProducts {C:category} :
BinarySums C -> BinaryProducts C^op.
Proof.
intros sum. exact sum.
Defined.
Lemma binaryProductToSums {C:category} :
BinaryProducts C -> BinarySums C^op.
Proof.
intro prod. exact prod.
Defined.
Definition in_1 {C:category} {a b:C} (sum : BinarySum a b) :
Hom C a (universalObject sum)
:= pr_1 sum.
Definition in_2 {C:category} {a b:C} (sum : BinarySum a b) :
Hom C b (universalObject sum)
:= pr_2 sum.
Definition binarySumProperty {C:category} {a b c:C} (f:a-->c) (g:b-->c) :=
isUniversal ((f ,, g) : HomPair (opp_ob a) (opp_ob b) ◾ c : hSet).
Definition binarySumMap {C:category} {a b:C} (sum : BinarySum a b)
{c:C} : a --> c -> b --> c -> rm_opp_ob (universalObject sum) --> c
:= λ f g, rm_opp_mor (sum \\ (opp_mor f,,opp_mor g)).
Definition binarySum_in_1_eqn {C:category} {a b:C} (sum : BinarySum a b)
{c:C} (f : a --> c) (g : b --> c) :
binarySumMap sum f g ∘ in_1 sum = f
:= maponpaths (HomPair_1 (opp_ob a) (opp_ob b) c) ((pr2 (pr1 (pr2 (pr2 sum) c (f,,g))))).
Definition binarySum_in_2_eqn {C:category} {a b:C} (sum : BinarySum a b)
{c:C} (f : a --> c) (g : b --> c) :
binarySumMap sum f g ∘ in_2 sum = g
:= maponpaths (HomPair_2 (opp_ob a) (opp_ob b) c) ((pr2 (pr1 (pr2 (pr2 sum) c (f,,g))))).
Lemma binarySumMapUniqueness {C:category} {a b:C} (sum : BinarySum a b)
{c:C} (f g : Hom C (rm_opp_ob (universalObject sum)) c) :
f ∘ in_1 sum = g ∘ in_1 sum ->
f ∘ in_2 sum = g ∘ in_2 sum -> f = g.
Proof.
intros r s. apply opp_mor_eq, mapUniqueness, dirprodeq; assumption.
Defined.
Definition binarySumMap_2 {C:category} {a b a' b':C}
(sum : BinarySum a b)
(sum' : BinarySum a' b')
(f : a --> a')
(g : b --> b')
: rm_opp_ob (universalObject sum) --> rm_opp_ob (universalObject sum').
Proof.
unshelve refine (binarySumMap _ _ _).
{ exact (in_1 sum' ∘ f). }
{ exact (in_2 sum' ∘ g). }
Defined.
(** products and coproducts *)
Definition HomFamily (C:category) {I} (c:I -> ob C) : C^op ⟶ SET.
Proof.
unshelve refine (_,,_).
- unshelve refine (_,,_).
+ intros x. exact (∏ i, Hom C x (c i)) % set.
+ intros x y f p i; simpl; simpl in p.
exact (compose (C:=C) f (p i)).
- abstract (split;
[ intros a; apply funextsec; intros f; apply funextsec; intros i; simpl;
apply id_left
| intros x y z p q;
apply funextsec; intros f; apply funextsec; intros i; simpl;
apply pathsinv0, assoc]) using _L_.
Defined.
Definition Product {C:category} {I} (c:I -> ob C)
:= Representation (HomFamily C c).
Definition pr_ {C:category} {I} {c:I -> ob C} (prod : Product c) (i:I) :
universalObject prod --> c i
:= universalElement prod i.
Definition productMapExistence {C:category} {I} {c:I -> ob C} (prod : Product c)
{a:C} :
(∏ i, Hom C a (c i)) -> Hom C a (universalObject prod)
:= λ f, prod \\ f.
Lemma productMapUniqueness {C:category} {I} {c:I -> ob C} (prod : Product c)
{a:C} (f g : Hom C a (universalObject prod)) :
(∏ i, pr_ prod i ∘ f = pr_ prod i ∘ g) -> f = g.
Proof.
intro e. apply mapUniqueness. apply funextsec; intro i. apply e.
Defined.
Definition Sum {C:category} {I} (c:I -> ob C)
:= Representation (HomFamily C^op c).
Definition in_ {C:category} {I} {c:I -> ob C} (sum : Sum c) (i:I) :
c i --> universalObject sum
:= rm_opp_mor (universalElement sum i).
Definition sumMapExistence {C:category} {I} {c:I -> ob C} (sum : Sum c)
{a:C} :
(∏ i, Hom C (c i) a) -> Hom C (universalObject sum) a
:= λ f, f // sum.
Lemma sumMapUniqueness {C:category} {I} {c:I -> ob C} (sum : Sum c)
{a:C} (f g : Hom C (universalObject sum) a) :
(∏ i, f ∘ in_ sum i = g ∘ in_ sum i) -> f = g.
Proof.
intro e. apply opp_mor_eq, mapUniqueness. apply funextsec; intro i. apply e.
Defined.
(** equalizers and coequalizers *)
Definition Equalization {C:category} {c d:C} (f g:c-->d) :
C^op ⟶ SET.
Proof.
unshelve refine (makeFunctor_op _ _ _ _).
- intro b. unshelve refine (_,,_).
+ exact (∑ p:b --> c, f∘p = g∘p).
+ abstract (apply isaset_total2;
[ apply homset_property
| intro; apply isasetaprop; apply homset_property]) using _L_.
- intros b a e w; simpl in *. exists (pr1 w ∘ e).
abstract (rewrite <- 2? assoc; apply maponpaths; exact (pr2 w)) using _M_.
- abstract (
intros b; apply funextsec; intro w; apply subtypePath;
[ intro; apply homset_property
| simpl; apply id_left]) using _N_.
- abstract (
intros a'' a' a r s; apply funextsec;
intro w; apply subtypePath;
[ intro; apply homset_property
| apply pathsinv0, assoc ]) using _O_.
Defined.
Definition Equalizer {C:category} {c d:C} (f g:c-->d) :=
Representation (Equalization f g).
Definition equalizerMap {C:category} {c d:C} {f g:c-->d} (eq : Equalizer f g) :
universalObject eq --> c
:= pr1 (universalElement eq).
Definition equalizerEquation {C:category} {c d:C} {f g:c-->d} (eq : Equalizer f g) :
f ∘ equalizerMap eq = g ∘ equalizerMap eq
:= pr2 (universalElement eq).
Definition Coequalizer {C:category} {c d:C} (f g:c-->d) :=
Representation (Equalization (opp_mor f) (opp_mor g)).
Definition coequalizerMap {C:category} {c d:C} {f g:c-->d} (coeq : Coequalizer f g) :
d --> universalObject coeq
:= pr1 (universalElement coeq).
Definition coequalizerEquation {C:category} {c d:C} {f g:c-->d} (coeq : Coequalizer f g) :
coequalizerMap coeq ∘ f = coequalizerMap coeq ∘ g
:= pr2 (universalElement coeq).
(** pullbacks and pushouts *)
Definition PullbackCone {C:category} {a b c:C} (f:a-->c) (g:b-->c) :
C^op ⟶ SET.
Proof.
intros.
unshelve refine (makeFunctor_op _ _ _ _).
- intros t. unshelve refine (_,,_).
+ exact (∑ (p: t --> a × t --> b), f ∘ pr1 p = g ∘ pr2 p).
+ abstract (apply isaset_total2;
[ apply isasetdirprod; apply homset_property
| intro; apply isasetaprop; apply homset_property]) using _L_.
- intros t u p w; simpl in *.
exists (pr1 (pr1 w) ∘ p,, pr2 (pr1 w) ∘ p).
abstract (
simpl; rewrite <- 2? assoc; apply maponpaths; exact (pr2 w)) using _M_.
- abstract (intros t; simpl; apply funextsec; intro w;
induction w as [w eq]; induction w as [p q];
simpl in *; unshelve refine (two_arg_paths_f _ _);
[ rewrite 2? id_left; reflexivity
| apply proofirrelevance; apply homset_property]) using _N_.
- abstract (
intros r s t p q; simpl in *; apply funextsec; intro w;
unshelve refine (total2_paths2_f _ _);
[ simpl; rewrite 2? assoc; reflexivity
| apply proofirrelevance; apply homset_property]) using _P_.
Defined.
Definition Pullback {C:category} {a b c:C} (f:a-->c) (g:b-->c) :=
Representation (PullbackCone f g).
Definition pb_1 {C:category} {a b c:C} {f:a-->c} {g:b-->c} (pb : Pullback f g) :
universalObject pb --> a
:= pr1 (pr1 (universalElement pb)).
Definition pb_2 {C:category} {a b c:C} {f:a-->c} {g:b-->c} (pb : Pullback f g) :
universalObject pb --> b
:= pr2 (pr1 (universalElement pb)).
Definition pb_eqn {C:category} {a b c:C} {f:a-->c} {g:b-->c} (pb : Pullback f g) :
f ∘ pb_1 pb = g ∘ pb_2 pb
:= pr2 (universalElement pb).
Definition Pushout {C:category} {a b c:C} (f:a-->b) (g:a-->c) :=
Representation (PullbackCone (opp_mor f) (opp_mor g)).
Definition po_1 {C:category} {a b c:C} {f:a-->b} {g:a-->c} (po : Pushout f g) :
b --> universalObject po
:= pr1 (pr1 (universalElement po)).
Definition po_2 {C:category} {a b c:C} {f:a-->b} {g:a-->c} (po : Pushout f g) :
c --> universalObject po
:= pr2 (pr1 (universalElement po)).
Definition po_eqn {C:category} {a b c:C} {f:a-->c} {g:a-->c} (po : Pushout f g) :
po_1 po ∘ f = po_2 po ∘ g
:= pr2 (universalElement po).
(** kernels and cokernels *)
Definition Annihilator (C:category) (zero:ZeroMaps C) {c d:C} (f:c --> d) :
C^op ⟶ SET.
Proof.
unshelve refine (_,,_).
{ unshelve refine (_,,_).
{ intro b. exists (∑ g:Hom C b c, f ∘ g = pr1 zero b d).
abstract (apply isaset_total2; [ apply setproperty |
intro g; apply isasetaprop; apply homset_property ]) using _L_. }
{ intros a b p ge; simpl.
exists (pr1 ge ∘ opp_mor p).
{ abstract (
unshelve refine (! assoc _ _ _ @ _); rewrite (pr2 ge);
apply (pr2 (pr2 zero) _ _ _ _)) using _M_. } } }
{ abstract (split;
[ intros x; apply funextsec; intros [r rf0];
apply subtypePath;
[ intro; apply homset_property
| simpl; unfold opp_mor; apply id_left ]
| intros w x y t u; apply funextsec; intros [r rf0];
apply subtypePath;
[ intro; apply homset_property
| simpl; unfold opp_mor; apply pathsinv0, assoc ] ]) using _N_. }
Defined.
Definition Kernel {C:category} (zero:ZeroMaps C) {c d:ob C} (f:c --> d) :=
Representation (Annihilator C zero f).
Definition Cokernel {C:category} (zero:ZeroMaps C) {c d:ob C} (f:c --> d) :=
Representation (Annihilator C^op (ZeroMaps_opp C zero) f).
Definition kernelMap {C:category} {zero:ZeroMaps C} {c d:ob C} {f:c --> d}
(r : Kernel zero f) : universalObject r --> c
:= pr1 (universalElement r).
Definition kernelEquation {C:category} {zero:ZeroMaps C} {c d:ob C} {f:c --> d}
(ker : Kernel zero f) :
f ∘ kernelMap ker = pr1 zero _ _
:= pr2 (universalElement ker).
Definition cokernelMap {C:category} {zero:ZeroMaps C} {c d:ob C} {f:c --> d}
(r : Cokernel zero f) : d --> universalObject r
:= pr1 (universalElement r).
Definition cokernelEquation {C:category} {zero:ZeroMaps C} {c d:ob C} {f:c --> d}
(coker : Cokernel zero f) :
cokernelMap coker ∘ f = pr1 zero _ _
:= pr2 (universalElement coker).
(** fibers of maps between functors *)
Definition fiber {C:category} {X Y:[C^op,SET]} (p : X --> Y) {c:C} (y : c ⇒ Y) :
C^op ⟶ SET.
Proof.
unshelve refine (makeFunctor_op _ _ _ _).
- intro b.
exists (∑ fx : (b --> c) × (b ⇒ X), p ⟳ pr2 fx = y ⟲ pr1 fx).
abstract (apply isaset_total2;
[ apply isaset_dirprod, setproperty; apply homset_property
| intros [f x]; apply isasetaprop; apply setproperty ]) using _K_.
- simpl; intros b b' g fxe.
exists (pr1 (pr1 fxe) ∘ g,, pr2 (pr1 fxe) ⟲ g).
abstract (simpl; rewrite nattrans_arrow_mor_assoc, arrow_mor_mor_assoc;
apply maponpaths; exact (pr2 fxe)) using _M_.
- abstract (intro b; apply funextsec; intro w;
induction w as [w e]; induction w as [f x]; simpl;
unshelve refine (two_arg_paths_f _ _);
[ apply dirprodeq; [ apply id_left | apply arrow_mor_id ]
| apply setproperty]) using _R_.
- abstract (intros b b' b'' g g''; apply funextsec; intro w;
induction w as [w e]; induction w as [f x]; simpl;
unshelve refine (total2_paths2_f _ _);
[ apply dirprodeq;
[ apply pathsinv0, assoc | apply arrow_mor_mor_assoc ]
| apply setproperty ]) using _T_.
Defined.
(* this is representability of a map between two functors, in the sense of
Grothendieck. See EGA Chapter 0. *)
Definition Representation_Map {C:category} {X Y:[C^op,SET]} (p : X --> Y) :=
∏ (c : C) (y : c ⇒ Y), Representation (fiber p y).
Definition isRepresentable_Map {C:category} {X Y:[C^op,SET]} (p : X --> Y) :=
∏ (c : C) (y : c ⇒ Y), isRepresentable (fiber p y).
(** limits and colimits *)
Definition cone {I C:category} (c:C) (D: [I,C]) : UU
:= ∑ (φ : ∏ i, Hom C c (D ◾ i)),
∏ i j (e : i --> j), D ▭ e ∘ φ i = φ j.
Lemma cone_eq {C I:category} (c:C^op) (D: I⟶C) (p q:cone (C:=C) c D) :
pr1 p ~ pr1 q -> p = q.
Proof.
intros h. apply subtypePath.
{ intro r.
apply impred_isaprop; intro i;
apply impred_isaprop; intro j;
apply impred_isaprop; intro e.
apply homset_property. }
apply funextsec; intro i; apply h.
Qed.
Definition cone_functor {I C:category} : [I,C] ⟶ [C^op,SET].
Proof.
intros.
unshelve refine (_,,_).
{ unshelve refine (_,,_).
{ intros D. unshelve refine (_,,_).
{ unshelve refine (_,,_).
- intro c. exists (cone (C:=C) c D).
abstract (
apply isaset_total2;
[ apply impred_isaset; intro i; apply homset_property
| intros φ;
apply impred_isaset; intro i;
apply impred_isaset; intro j;
apply impred_isaset; intro e; apply isasetaprop;
apply homset_property]) using LLL.
- simpl; intros a b f φ.
exists (λ i, pr1 φ i ∘ f).
abstract (
intros i j e; simpl;
rewrite <- assoc;
apply maponpaths;
apply (pr2 φ)) using _M_. }
{ abstract (split;
[ intro c; simpl;
apply funextsec; intro p;
apply cone_eq;
intro i; simpl;
apply id_left
| intros a b c f g; simpl; apply funextsec; intro p;
apply cone_eq; simpl; intro i; apply pathsinv0, assoc ]) using _N_. } }
{ intros D D' f; simpl.
unshelve refine (_,,_).
- simpl. unfold cone. intros c φ.
unshelve refine (_,,_).
+ intros i. exact (pr1 f i ∘ pr1 φ i).
+ abstract (
simpl; intros i j e; assert (L := pr2 φ i j e); simpl in L;
rewrite <- L; rewrite <- assoc; rewrite <- assoc;
apply maponpaths; apply pathsinv0; apply nat_trans_ax) using _P_.
- abstract (intros a b g; simpl;
apply funextsec; intro p; apply cone_eq; intro i; simpl;
apply pathsinv0, assoc) using _Q_. } }
{ abstract (split;
[ intros D; simpl;
apply nat_trans_eq;
[ exact (homset_property SET)
| intros c; apply funextsec; intro φ; simpl;
apply cone_eq; intro i; apply id_right]
| intros D D' D'' p q; apply nat_trans_eq;
[ apply homset_property
| intro c; apply funextsec; intro K; apply cone_eq; intros i; apply assoc ]]). }
Defined.
Definition cocone_functor {I C:category} : [I,C]^op ⟶ [C^op^op,SET] :=
cone_functor □ functorOp.
Definition Limit {C I:category} (D: I⟶C) := Representation (cone_functor D).
Definition Colimit {C I:category} (D: I⟶C) := Representation (cocone_functor D).
Definition proj_ {C I:category} {D: I⟶C} (lim:Limit D) (i:I) : universalObject lim --> D i.
Proof.
intros. exact ((pr1 (universalElement lim) i)).
Defined.
Definition inj_ {C I:category} {D: I⟶C} (colim:Colimit D) (i:I) : D i --> universalObject colim.
Proof.
intros. exact ((pr1 (universalElement colim) i)).
Defined.
Definition proj_comm {C I:category} {D: I⟶C} (lim:Limit D) {i j:I} (f:i-->j) :
# D f ∘ proj_ lim i = proj_ lim j.
Proof.
intros. exact (pr2 (universalElement lim) _ _ f).
Defined.
Definition inj_comm {C I:category} {D: I⟶C} (colim:Colimit D) {i j:I} (f:i-->j) :
inj_ colim j ∘ # D f = inj_ colim i.
Proof.
intros. exact (pr2 (universalElement colim) _ _ f).
Defined.
Definition Limits (C:category) := ∏ (I:category) (D: I⟶C), Limit D.
Definition Colimits (C:category) := ∏ (I:category) (D: I⟶C), Colimit D.
Definition lim_functor (C:category) (lim:Limits C) (I:category) :
[I,C] ⟶ C
:= universalObjectFunctor C □ addStructure cone_functor (lim I).
Definition colim_functor (C:category) (colim:Colimits C) (I:category) :
[I,C] ⟶ C
:= functorRmOp (
universalObjectFunctor C^op □ addStructure cocone_functor (colim I)).
Lemma bifunctor_assoc_repn {B C:category} (X : [B, [C^op,SET]]) :
(∏ b, Representation (X ◾ b)) -> Representation (bifunctor_assoc X).
Proof.
intro r. set (X' := addStructure X r).
change (categoryWithStructure [C ^op, SET] Representation) with (RepresentedFunctor C) in X'.
set (F := universalObjectFunctor C □ X').
exists F. unshelve refine (_,,_).
{ unshelve refine (_,,_).
{ intro b. exact (universalElement (r b)). }
{ abstract (intros b b' f; exact (!universalObjectFunctor_comm C (X' ▭ f))) using _K_. } }
{ intro F'. apply UniqueConstruction_to_weq.
split.
{ intro x'. unfold arrow in x'.
unshelve refine (_,,_).
{ unshelve refine (makeNattrans _ _).
{ intro b. exact (r b \\ pr1 x' b). }
{ abstract (intros b b' f; simpl;
unshelve refine (univ_arrow_mor_assoc (F' ▭ f) (pr1 x' b') (r b') @ _);
intermediate_path (r b' \\ (X ▭ f ⟳ pr1 x' b));
[ apply maponpaths, (pr2 x' b b' f)
| unfold F;
rewrite comp_func_on_mor;
rewrite (universalObjectFunctor_on_map C (X' ▭ f));
change (pr2 (X' ◾ b')) with (r b');
change (pr2 (X' ◾ b)) with (r b);
change (X' ▭ f) with (X ▭ f);
unshelve refine (_ @ !univ_arrow_mor_assoc _ _ _);
apply maponpaths;
rewrite <- nattrans_arrow_mor_assoc;
apply (maponpaths (λ k, X ▭ f ⟳ k));
apply pathsinv0;
exact (universalMapProperty (r b) (pr1 x' b)) ]) using _R_. } }
{ abstract (unshelve refine (total2_paths_f _ _);
[ simpl; apply funextsec; intro b; unshelve refine (universalMapProperty _ _)
| apply funextsec; intro b;
apply funextsec; intro b';
apply funextsec; intro f; simpl; apply setproperty ] ) using _L_. } }
{ abstract (intros p q e; apply nat_trans_eq;
[ apply homset_property
| intros b; apply (mapUniqueness _ (r b) _ (p ◽ b) (q ◽ b));
exact (maponpaths (λ k, pr1 k b) e)]) using _M_. } }
Defined.
Theorem functorcategoryTerminalObject (B C:category) :
TerminalObject C -> TerminalObject [B,C].
Proof.
intro t.
apply (@z_iso_Representation_weq _ (bifunctor_assoc (constantFunctor B (UnitFunctor C^op)))).
{ unshelve refine (makeNatiso _ _).
{ intros F. apply hset_equiv_z_iso.
unfold bifunctor_assoc; simpl.
unshelve refine (weq_iso _ _ _ _).
- intros _. exact tt.
- intros x. unshelve refine (_,,_).
+ unfold θ_1; simpl. intro b. exact tt.
+ eqn_logic.
- simpl. intros w. apply subtypePath.
{ intros f. apply impred; intro b; apply impred; intro b'; apply impred; intro g. apply isasetunit. }
apply funextfun; intro b. apply isapropunit.
- eqn_logic. }
{ eqn_logic. } }
{ apply bifunctor_assoc_repn; intro b. exact t. }
Defined.
Goal ∏ B C t b,
universalObject(functorcategoryTerminalObject B C t) ◾ b = universalObject t.
reflexivity.
Defined.
Definition binaryProductFunctor {B C:category} (F G:[B,C]) : [B,[C^op,SET]].
Proof.
unshelve refine (makeFunctor _ _ _ _).
- intro b. exact (HomPair (F ◾ b) (G ◾ b)).
- intros b b' f.
unshelve refine (makeNattrans_op _ _).
+ intros c w. exact (F ▭ f ∘ pr1 w ,, G ▭ f ∘ pr2 w).
+ abstract (intros c c' g; simpl; apply funextsec; intro v;
apply dirprodeq; ( simpl; apply pathsinv0, assoc )) using _L_.
- abstract (intro b; apply nat_trans_eq;
[ apply homset_property
| intro c; simpl;
apply funextsec; intro v;
apply dirprodeq;
( simpl; rewrite functor_on_id; rewrite id_right; reflexivity )]) using _L_.
- abstract (intros b b' b'' f g; apply nat_trans_eq;
[ apply homset_property
| intro c; apply funextsec; intro w; apply dirprodeq ;
( simpl; rewrite functor_on_comp; rewrite assoc; reflexivity) ]) using _L_.
Defined.
Lemma BinaryProductFunctorAssoc {B C : category}
(prod : BinaryProducts C)
(F G : [B, C]) :
z_iso (bifunctor_assoc (binaryProductFunctor F G)) (HomPair F G).
Proof.
unshelve refine (makeNatiso (C := [B, C]^op) _ _).
{ intro H. apply hset_equiv_z_iso.
unshelve refine (weq_iso _ _ _ _).
{ intros w.
unshelve refine (_,,_).
{ unshelve refine (makeNattrans _ _).
{ intro b. exact (pr1 (pr1 w b)). }
{ abstract (intros b b' f; exact (maponpaths dirprod_pr1 (pr2 w b b' f))) using _L_. } }
{ unshelve refine (makeNattrans _ _).
{ intro b. exact (pr2 (pr1 w b)). }
{ abstract (intros b b' f; exact (maponpaths dirprod_pr2 (pr2 w b b' f))) using _L_. } } }
{ simpl. intros pq.
unshelve refine (_,,_).
{ intros b. exact (pr1 pq b ,, pr2 pq b). }
{ abstract (intros b b' f; simpl;
apply dirprodeq; ( simpl; apply nattrans_naturality )) using _L_. } }
{ abstract (intros w;
unshelve refine (total2_paths_f _ _);
[ apply funextsec; intro b; apply pathsinv0; reflexivity
| (apply funextsec; intro b;
apply funextsec; intro b';
apply funextsec; intro f;
apply isaset_dirprod; apply homset_property) ]) using _M_. }
{ abstract (intros pq; apply dirprodeq;
( apply nat_trans_eq;
[ apply homset_property | intro b; reflexivity ] )) using _L_. } }
{ abstract (intros H H' p;
apply funextsec; intros v;
apply dirprodeq;
( simpl; apply nat_trans_eq;
[ apply homset_property
| intros b; unfold makeNattrans; simpl; reflexivity ] )) using _L_. }
Defined.
Theorem functorBinaryProduct {B C:category} :
BinaryProducts C -> BinaryProducts [B,C].
Proof.
intros prod F G. unshelve refine (z_iso_Representation_weq _ _).
{ exact (bifunctor_assoc (binaryProductFunctor F G)). }
{ now apply BinaryProductFunctorAssoc. }
{ apply bifunctor_assoc_repn. intro b. apply prod. }
Defined.
Lemma functorBinaryProduct_eqn {B C:category} (prod : BinaryProducts C)
(F G : [B,C]) (b:B) :
universalObject (functorBinaryProduct prod F G) ◾ b
=
universalObject (prod (F ◾ b) (G ◾ b)).
Proof.
reflexivity.
Defined.
Lemma functorBinaryProduct_map_eqn {B C:category} (prod : BinaryProducts C)
(F G F' G' : [B,C]) (p:F-->F') (q:G-->G') (b:B) :
binaryProductMap_2 (functorBinaryProduct prod F G) (functorBinaryProduct prod F' G') p q ◽ b
=
binaryProductMap_2 (prod (F ◾ b) (G ◾ b)) (prod (F' ◾ b) (G' ◾ b)) (p ◽ b) (q ◽ b).
Proof.
reflexivity.
Defined.
Lemma HomPairOp {B C : category} (F G : [B, C]) :
z_iso (HomPair (functorOp F) (functorOp G) □ functorOp')
(HomPair (opp_ob F) (opp_ob G)).
(* This should be replaced by a general statement where [B,C]^op and
[B^op,C^op] are replaced by arbitrary isomorphic categories. And there
should be lemmas saying that having binary sums or products is preserved by
isomorphisms of categories. *)
Proof.
unshelve refine (makeNatiso _ _).
{ intros H. apply hset_equiv_z_iso.
apply weqdirprodf; exact (invweq (isomorphismOnMor functorOpIso H _)). }
{ abstract (intros H J p; apply funextsec; intro w;
apply dirprodeq;
( apply nat_trans_eq; [ apply homset_property | reflexivity ] )). }
Defined.
Theorem functorBinarySum {B C:category} :
BinarySums C -> BinarySums [B,C].
Proof.
intros sum F G.
exact (isomorphismRepresentability
(functorBinaryProduct (binarySumsToProducts sum)
(functorOp F) (functorOp G))
functorOpIso
(HomPairOp F G)).
Defined.
Lemma functorBinarySum_eqn {B C:category} (sum : BinarySums C)
(F G : [B,C]) (b:B) :
universalObject (functorBinarySum sum F G) ◾ b
=
universalObject (sum (F ◾ b) (G ◾ b)).
Proof.
reflexivity.
Defined.
Lemma functorBinarySum_map_eqn {B C:category} (sum : BinarySums C)
(F G F' G' : [B,C]) (p:F-->F') (q:G-->G') (b:B) :
binarySumMap_2 (functorBinarySum sum F G) (functorBinarySum sum F' G') p q ◽ b
=
binarySumMap_2 (sum (F ◾ b) (G ◾ b)) (sum (F' ◾ b) (G' ◾ b)) (p ◽ b) (q ◽ b).
Proof.
try reflexivity.
(* This failure might be what prevents using this framework with
SubstitutionSystems on the branch "colimits". Since
[functorBinaryProduct_map_eqn] admits a trivial proof, that's an argument
for replacing the proof of functorBinarySum by one that's parallel to the
proof of functorBinaryProduct, rather than deducing it as a corollary.
Maybe then we could also write [universalObject sum] instead of [rm_opp_ob
(universalObject sum)] *)
Abort.
Theorem functorLimits (B C:category) : Limits C -> Limits [B,C].
Proof.
intros lim I D.
unfold Limits, Limit in lim.
set (D' := bifunctor_comm _ _ _ D).
assert (M := bifunctor_assoc_repn (cone_functor □ D') (λ b, lim I (D' ◾ b))); clear lim.
exists (universalObject M).
unfold Representation in M.
Abort.
Theorem functorColimits (B C:category) : Colimits C -> Colimits [B,C].
Proof.
Abort.
(* --- *)
|
[STATEMENT]
lemma DERIV_Uniq: "\<exists>\<^sub>\<le>\<^sub>1D. DERIV f x :> D"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<exists>\<^sub>\<le>\<^sub>1 D. (f has_field_derivative D) (at x)
[PROOF STEP]
by (simp add: DERIV_unique Uniq_def) |
[STATEMENT]
lemma DERIV_Uniq: "\<exists>\<^sub>\<le>\<^sub>1D. DERIV f x :> D"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<exists>\<^sub>\<le>\<^sub>1 D. (f has_field_derivative D) (at x)
[PROOF STEP]
by (simp add: DERIV_unique Uniq_def) |
Require Import VST.floyd.base2.
Require Import VST.floyd.client_lemmas.
Require Import VST.floyd.nested_field_lemmas.
Require Import VST.floyd.mapsto_memory_block.
Require Import VST.floyd.reptype_lemmas.
Require Import VST.floyd.data_at_rec_lemmas.
Require Import VST.floyd.field_at.
Require Import VST.floyd.field_compat.
Require Import VST.floyd.closed_lemmas.
Require Import VST.floyd.nested_pred_lemmas.
(*Require Import VST.floyd.unfold_data_at.*)
Local Open Scope logic.
Fixpoint fold_right_sepcon' (l: list(environ->mpred)) : environ -> mpred :=
match l with
| nil => emp
| b::nil => b
| b::r => b * fold_right_sepcon' r
end.
Lemma fold_right_sepcon'_eq:
fold_right_sepcon' = @fold_right (environ->mpred) _ sepcon emp.
Proof.
extensionality l rho.
induction l; auto.
simpl.
destruct l. simpl. rewrite sepcon_emp. auto.
f_equal; auto.
Qed.
Lemma orp_dup {A}{ND: NatDed A}: forall P: A, P || P = P.
Proof. intros. apply pred_ext.
apply orp_left; apply derives_refl.
apply orp_right1; apply derives_refl.
Qed.
Lemma unsigned_repr_range: forall i, 0 <= i -> 0 <= Ptrofs.unsigned (Ptrofs.repr i) <= i.
Proof.
intros.
rewrite Ptrofs.unsigned_repr_eq.
pose proof Z.mod_le i Ptrofs.modulus H.
pose proof Z.mod_bound_pos i Ptrofs.modulus H.
set (x := Ptrofs.modulus) in *.
revert x H0 H1.
rewrite Ptrofs.modulus_power.
intros.
unfold Ptrofs.zwordsize, Ptrofs.wordsize, Wordsize_Ptrofs.wordsize in x.
destruct (Archi.ptr64);
(compute in x; subst x; spec H0; [omega| ]; spec H1; omega).
Qed.
Lemma tc_globalvar_sound:
forall Delta i t gz idata rho,
(var_types Delta) ! i = None ->
(glob_types Delta) ! i = Some t ->
gvar_volatile gz = false ->
gvar_init gz = idata ->
tc_environ Delta rho ->
globvar2pred (globals_of_env rho) (i, gz) rho |-- init_data_list2pred idata (readonly2share (gvar_readonly gz)) (eval_var i t rho) rho.
Proof.
intros.
unfold globvar2pred.
simpl.
destruct_var_types i.
destruct_glob_types i.
unfold globals_of_env.
unfold eval_var.
rewrite Heqo0, Heqo1, H1, H2.
auto.
Qed.
Lemma tc_globalvar_sound':
forall Delta i t gv idata rho,
(var_types Delta) ! i = None ->
(glob_types Delta) ! i = Some t ->
gvar_volatile gv = false ->
gvar_init gv = idata ->
tc_environ Delta rho ->
globvar2pred (globals_of_env rho) (i, gv) rho |--
init_data_list2pred idata (readonly2share (gvar_readonly gv)) (globals_of_env rho i) rho.
Proof.
intros.
unfold globvar2pred.
simpl.
destruct_glob_types i.
unfold globals_of_env.
rewrite Heqo0, H1, H2.
auto.
Qed.
Definition zero_of_type (t: type) : val :=
match t with
| Tfloat _ _ => Vfloat Float.zero
| _ => Vint Int.zero
end.
Definition eval_sgvar (id: ident) (ty: type) (rho: environ) :=
match Map.get (ge_of rho) id with
| Some b => Vptr b Ptrofs.zero
| None => Vundef
end.
(*
Lemma eval_sgvar_lemma1:
forall (F: val -> mpred) ofs id t,
F Vundef |-- FF ->
`F (`(offset_val ofs) (eval_sgvar id t)) =
(EX v:val, local (locald_denote (sgvar id v)) && `(F (offset_val ofs v))).
Proof.
intros.
extensionality rho.
unfold_lift. unfold local, lift1.
unfold eval_sgvar.
simpl.
apply pred_ext.
unfold sgvar_denote.
destruct (Map.get (ge_of rho) id).
apply exp_right with (Vptr b Ptrofs.zero).
normalize.
eapply derives_trans; [ apply H | ].
apply FF_left.
unfold sgvar_denote.
apply exp_left; intro; normalize.
destruct (Map.get (ge_of rho) id).
subst. auto.
contradiction.
Qed.
*)
Definition init_data2pred' {cs: compspecs}
(Delta: tycontext) (gv: globals) (d: init_data) (sh: share) (v: val) : mpred :=
match d with
| Init_int8 i => mapsto sh tuchar v (Vint (Int.zero_ext 8 i))
| Init_int16 i => mapsto sh tushort v (Vint (Int.zero_ext 16 i))
| Init_int32 i => mapsto sh tuint v (Vint i)
| Init_int64 i => mapsto sh tulong v (Vlong i)
| Init_float32 r => mapsto sh tfloat v (Vsingle r)
| Init_float64 r => mapsto sh tdouble v (Vfloat r)
| Init_space n => memory_block sh n v
| Init_addrof symb ofs =>
match (var_types Delta) ! symb, (glob_types Delta) ! symb with
| None, Some (Tarray t n' att) =>
mapsto sh (Tpointer t noattr) v (offset_val (Ptrofs.unsigned ofs) (gv symb))
| None, Some t => mapsto sh (Tpointer t noattr) v (offset_val (Ptrofs.unsigned ofs) (gv symb))
| Some _, Some _ => mapsto_ sh (Tpointer Tvoid noattr) v
| _, _ => TT
end
end.
Lemma unsigned_repr_le: forall i, 0 <= i -> Int.unsigned (Int.repr i) <= i.
Proof.
intros.
rewrite Int.unsigned_repr_eq.
apply Zmod_le; try assumption.
pose proof Int.Z_mod_modulus_range i.
omega.
Qed.
Lemma mapsto_aligned:
forall t ch, access_mode t = By_value ch ->
forall sh b z p,
mapsto sh t (Vptr b z) p
|-- !! (Memdata.align_chunk ch | Ptrofs.unsigned z).
Proof.
intros.
unfold mapsto. simpl.
rewrite H.
if_tac.
simple_if_tac.
apply FF_left.
apply orp_left. normalize. clear H H0.
rewrite (res_predicates.address_mapsto_align).
match goal with |- ?A |-- ?B => change (predicates_hered.derives A B) end.
intros ? ?. destruct H. apply H0.
normalize.
clear.
rewrite (res_predicates.address_mapsto_align).
match goal with |- ?A |-- ?B => change (predicates_hered.derives A B) end.
intros ? ?. destruct H. apply H0.
simple_if_tac.
apply FF_left.
normalize.
Qed.
Lemma sizeof_Tpointer {cs: compspecs} : forall t,
sizeof (Tpointer t noattr) = if Archi.ptr64 then 8 else 4.
Proof.
intros.
simpl. reflexivity.
Qed.
Lemma init_data_size_space {cs: compspecs}:
forall t, init_data_size (Init_space (sizeof t)) = sizeof t.
Proof. intros.
pose proof (sizeof_pos t).
unfold init_data_size. rewrite Z.max_l; auto. omega.
Qed.
Lemma init_data2pred_rejigger {cs: compspecs}:
forall (Delta : tycontext) (idata : init_data) (rho : environ)
(sh : Share.t) (b : block) ofs v,
tc_environ Delta rho ->
v = Vptr b (Ptrofs.repr 0) ->
readable_share sh ->
init_data2pred idata sh (offset_val ofs v) rho
|-- init_data2pred' Delta (globals_of_env rho) idata sh (offset_val ofs v).
Proof.
intros until v.
intros H7 H8 RS.
assert (H6:=I).
unfold init_data2pred', init_data2pred.
rename H8 into H8'.
assert (H8: offset_val ofs v = Vptr b (Ptrofs.repr ofs)).
rewrite H8'; simpl. rewrite Ptrofs.add_zero_l; auto.
clear H8'.
simpl.
destruct idata; super_unfold_lift; try apply derives_refl.
* unfold init_data_size in H6.
assert (Ptrofs.max_unsigned = Ptrofs.modulus-1) by computable.
pose proof (Z.le_max_l z 0).
rewrite H8.
apply mapsto_zeros_memory_block; auto.
* destruct_var_types i eqn:Hv&Hv'; rewrite ?Hv, ?Hv';
destruct_glob_types i eqn:Hg&Hg'; rewrite ?Hg, ?Hg';
try solve [simpl; apply TT_right].
+ rewrite H8. cancel.
+ replace (offset_val (Ptrofs.unsigned i0) (globals_of_env rho i)) with (Vptr b0 i0).
replace (mapsto sh (Tpointer Tvoid noattr) (offset_val ofs v) (Vptr b0 i0))
with (mapsto sh (Tpointer t noattr) (offset_val ofs v) (Vptr b0 i0)).
destruct t; auto.
unfold mapsto; simpl.
destruct (offset_val ofs v); auto. rewrite !if_true by auto. rewrite andb_false_r.
apply derives_refl.
unfold mapsto; simpl.
destruct (offset_val ofs v); auto. rewrite !if_true by auto. rewrite andb_false_r.
reflexivity.
unfold globals_of_env. rewrite Hg'. simpl. rewrite Ptrofs.add_zero_l.
f_equal. rewrite Ptrofs.repr_unsigned; auto.
Qed.
Lemma readable_readonly2share: forall ro, readable_share (readonly2share ro).
Proof.
intros. apply initialize.readable_readonly2share.
Qed.
Lemma unpack_globvar {cs: compspecs}:
forall Delta gz i t gv idata,
(var_types Delta) ! i = None ->
(glob_types Delta) ! i = Some t ->
(complete_legal_cosu_type (gvar_info gv) && is_aligned cenv_cs ha_env_cs la_env_cs (gvar_info gv) 0 = true)%bool ->
gvar_volatile gv = false ->
gvar_info gv = t ->
gvar_init gv = idata :: nil ->
init_data_size idata <= sizeof t ->
sizeof t <= Ptrofs.max_unsigned ->
local (`and (tc_environ Delta) (fun rho =>gz = globals_of_env rho)) && globvar2pred gz (i, gv) |--
`(init_data2pred' Delta gz idata (readonly2share (gvar_readonly gv)) (gz i)).
Proof.
intros.
go_lowerx. subst gz.
eapply derives_trans; [eapply tc_globalvar_sound'; try eassumption | ].
assert (RS:= readable_readonly2share (gvar_readonly gv)).
forget (readonly2share (gvar_readonly gv)) as sh.
autorewrite with subst norm1 norm2; normalize.
unfold init_data_list2pred.
rewrite sepcon_emp.
destruct (globvar_eval_var _ _ _ _ H7 H H0) as [b [? ?]].
assert (globals_of_env rho i = offset_val 0 (globals_of_env rho i)).
unfold globals_of_env.
rewrite H9. reflexivity.
rewrite H10 at 1.
apply derives_trans with
(init_data2pred' Delta (globals_of_env rho) idata sh
(offset_val 0 (globals_of_env rho i))).
+ rewrite andb_true_iff in H1; destruct H1.
eapply init_data2pred_rejigger; eauto; try omega.
unfold globals_of_env; rewrite H9; reflexivity.
+
unfold init_data2pred'.
rewrite <- H10.
destruct idata; unfold_lift;
try (rewrite H8; simpl; rewrite Ptrofs.add_zero_l; auto);
try apply derives_refl.
Qed.
Fixpoint id2pred_star {cs: compspecs}
(Delta: tycontext) (gz: globals) (sh: share) (v: val) (dl: list init_data) : environ->mpred :=
match dl with
| d::dl' => `(init_data2pred' Delta gz d sh v)
* id2pred_star Delta gz sh (offset_val (init_data_size d) v) dl'
| nil => emp
end.
Arguments id2pred_star cs Delta gz sh v dl rho / .
Lemma init_data_size_pos : forall a, init_data_size a >= 0.
Proof.
destruct a; simpl; try omega.
pose proof (Zmax_spec z 0).
destruct (zlt 0 z); omega.
destruct Archi.ptr64; omega.
Qed.
Lemma init_data_list_size_pos : forall a, init_data_list_size a >= 0.
Proof.
induction a; simpl.
omega.
pose proof (init_data_size_pos a); omega.
Qed.
Definition globvar_all_aligned {cs: compspecs} gv : bool :=
forallb (fun a =>andb
(init_data_size a mod hardware_alignof ha_env_cs (gvar_info gv) =? 0)
(init_data_size a mod alignof (gvar_info gv) =? 0))
(gvar_init gv).
Lemma unpack_globvar_star {cs: compspecs}:
forall Delta gz i gv,
(var_types Delta) ! i = None ->
(glob_types Delta) ! i = Some (gvar_info gv) ->
(complete_legal_cosu_type (gvar_info gv) && is_aligned cenv_cs ha_env_cs la_env_cs (gvar_info gv) 0)% bool = true ->
gvar_volatile gv = false ->
(globvar_all_aligned gv = true) ->
init_data_list_size (gvar_init gv) <= sizeof (gvar_info gv) <= Ptrofs.max_unsigned ->
local (`and (tc_environ Delta) (fun rho =>gz = globals_of_env rho)) && globvar2pred gz (i, gv) |--
id2pred_star Delta gz (readonly2share (gvar_readonly gv)) (gz i) (gvar_init gv).
Proof.
intros until 4.
intros H5.
unfold globvar_all_aligned in H5.
remember (gvar_info gv) as t eqn:H3; symmetry in H3.
remember (gvar_init gv) as idata eqn:H4; symmetry in H4.
intros.
go_lowerx. subst gz.
eapply derives_trans; [eapply tc_globalvar_sound'; eassumption | ].
normalize.
autorewrite with subst norm1 norm2; normalize.
match goal with |- _ |-- ?F _ _ _ _ _ _ _ => change F with @id2pred_star end.
normalize.
autorewrite with subst norm1 norm2; normalize.
assert (RS:= readable_readonly2share (gvar_readonly gv)).
forget (readonly2share (gvar_readonly gv)) as sh.
set (ofs:=0%Z).
assert (alignof t | Ptrofs.unsigned (Ptrofs.repr ofs)) by (subst ofs; simpl; apply Z.divide_0_r).
destruct (globvar_eval_var _ _ _ _ H7 H H0) as [b [_ H9']].
unfold globals_of_env. rewrite H9'.
remember (Vptr b Ptrofs.zero) as x.
assert (H10: x = offset_val ofs x) by (subst ofs x; reflexivity).
rewrite H10 at 1.
clear H10.
assert (H11: init_data_list_size idata + ofs <= sizeof t) by (unfold ofs; omega).
assert (H12: sizeof t <= Ptrofs.max_unsigned) by omega.
assert (0 <= ofs) by (unfold ofs; omega).
fold (globals_of_env rho).
match goal with |- _ |-- ?F _ _ _ _ _ _ _ => change F with @id2pred_star end.
replace x with (offset_val ofs x) at 2. 2: subst x; normalize.
change 0 with ofs in H1.
clearbody ofs.
revert ofs H1 H5 H8 H9 H9' H11 H12.
clear dependent gv. clear H H0 H6.
induction idata; simpl; auto; intros.
match goal with |- _ |-- _ * ?F _ _ _ _ _ _ _ => change F with @id2pred_star end.
apply sepcon_derives.
*
clear IHidata.
rewrite andb_true_iff in H1 by (pose proof (init_data_list_size_pos idata); omega).
pose proof (init_data_size_pos a).
pose proof (init_data_list_size_pos idata).
assert (Ptrofs.max_unsigned = Ptrofs.modulus-1) by computable.
destruct H1.
rewrite Ptrofs.unsigned_repr in H8 by omega.
eapply init_data2pred_rejigger; eauto; try tauto;
clear x Heqx; clear RS H7 H9' b.
* specialize (IHidata (ofs + init_data_size a)).
rewrite offset_offset_val.
pose proof (init_data_list_size_pos idata).
pose proof (init_data_size_pos a).
rewrite Ptrofs.unsigned_repr in H8 by omega.
rewrite !andb_true_iff in H5.
destruct H5 as [[H5a H5b] H5].
assert (hardware_alignof ha_env_cs t | init_data_size a). {
clear - H5a.
assert (hardware_alignof ha_env_cs t > 0). {
eapply hardware_alignof_pos; eauto.
apply cenv_consistent.
apply ha_env_cs_consistent.
apply ha_env_cs_complete.
}
rewrite Z.eqb_eq in H5a.
rewrite Z.mod_divide in H5a by omega. auto.
}
assert (alignof t | init_data_size a). {
clear - H5b.
pose proof (alignof_pos t).
rewrite Z.eqb_eq in H5b.
rewrite Z.mod_divide in H5b by omega. auto.
}
assert (Halign: is_aligned cenv_cs ha_env_cs la_env_cs t (ofs + init_data_size a) = true). {
clear - H1 H2.
rewrite andb_true_iff in H1; destruct H1; auto.
unfold is_aligned, is_aligned_aux in *.
rewrite andb_true_iff in H0|-*; destruct H0; split; auto.
rewrite Z.eqb_eq in H1|-*.
destruct (zeq (hardware_alignof ha_env_cs t) 0).
rewrite e. apply Zmod_0_r.
rewrite Z.mod_divide in H1|-* by auto.
apply Z.divide_add_r; auto.
}
apply IHidata; clear IHidata; try omega; auto.
rewrite andb_true_iff in H1|-*; destruct H1; split; auto.
rewrite Ptrofs.unsigned_repr by omega.
apply Z.divide_add_r; auto.
Qed.
Definition inttype2init_data (sz: intsize) : (int -> init_data) :=
match sz with
| IBool => Init_int8
| I8 => Init_int8
| I16 => Init_int16
| I32 => Init_int32
end.
Definition notboolsize (sz: intsize) : Prop :=
match sz with IBool => False | _ => True end.
Lemma id2pred_star_ZnthV_Tint {cs: compspecs} :
forall Delta gz sh n (v: val) (data: list int) sz sign mdata
(NBS: notboolsize sz),
n = Zlength mdata ->
mdata = map (inttype2init_data sz) data ->
!! isptr v && !! align_compatible (Tint sz sign noattr) v &&
!! (offset_strict_in_range (sizeof (Tint sz sign noattr) * n)) v &&
id2pred_star Delta gz sh v mdata |--
`(data_at sh (tarray (Tint sz sign noattr) n)
(map (Basics.compose Vint (Cop.cast_int_int sz sign)) data) v).
Proof.
intros. subst n mdata.
replace (Zlength (map (inttype2init_data sz) data)) with (Zlength data)
by (repeat rewrite Zlength_correct; rewrite map_length; auto).
go_lowerx.
match goal with |- ?F _ _ _ _ _ _ _ |-- _ => change F with @id2pred_star end.
change (offset_strict_in_range (sizeof (Tint sz sign noattr) * Zlength data) v) in H1.
assert (offset_strict_in_range (sizeof (Tint sz sign noattr) * 0) v) by
(unfold offset_strict_in_range; destruct v; auto; pose proof Ptrofs.unsigned_range i; omega).
unfold tarray.
set (t := Tint sz sign noattr) in *.
revert v H H0 H1 H2; induction data; intros.
*
rewrite Zlength_nil. unfold data_at, field_at; simpl.
unfold at_offset; simpl.
unfold nested_field_type; simpl.
rewrite data_at_rec_eq. unfold aggregate_pred.aggregate_pred.array_pred.
unfold aggregate_pred.array_pred. simpl.
repeat apply andp_right; auto; try apply prop_right; try reflexivity.
hnf. simpl.
split3; auto.
split3; auto.
hnf. destruct v; auto. replace (sizeof (Tarray (Tint sz sign noattr) 0 noattr)) with 0 by (destruct sz; simpl; auto).
pose proof Ptrofs.unsigned_range i; omega.
hnf; destruct v; auto. apply align_compatible_rec_Tarray. intros. omega.
*
rewrite Zlength_cons.
simpl map.
unfold id2pred_star; fold @id2pred_star.
erewrite (split2_data_at_Tarray sh t (Z.succ (Zlength data)) 1).
4: rewrite sublist_same.
4: apply eq_refl.
2: list_solve. 2: list_solve. 2: auto. 2: list_solve. 2: apply eq_refl. 2: apply eq_refl.
rewrite (sublist_one) by list_solve.
autorewrite with sublist.
rewrite sublist_1_cons.
rewrite sublist_same by list_solve.
apply sepcon_derives.
+
clear IHdata.
fold (tarray t 1). erewrite data_at_singleton_array_eq by apply eq_refl.
rewrite <- (mapsto_data_at sh t (Vint (Cop.cast_int_int sz sign a)) (Vint (Cop.cast_int_int sz sign a)) v); try reflexivity; auto.
2: subst t; destruct sz, sign; reflexivity.
2:{
destruct v; auto. red.
assert (sizeof t > 0).
subst t; simpl. destruct sz; computable.
clearbody t.
clear - H1 H3.
rewrite Zlength_cons in H1. simpl in H1.
unfold Z.succ in H1. rewrite Z.mul_add_distr_l in H1.
pose proof (Zlength_nonneg data).
rewrite Z.mul_1_r in H1.
assert (0 <= sizeof t * Zlength data)
by (apply Z.mul_nonneg_nonneg; omega).
omega.
}
subst t.
normalize.
unfold init_data2pred', inttype2init_data.
destruct sz; try contradiction NBS;
unfold_lift; unfold tuchar, tushort, tuint; rewrite <- (mapsto_unsigned_signed Unsigned sign sh);
apply derives_refl.
+
replace (init_data_size (inttype2init_data sz a)) with (sizeof t)
by (subst t; destruct sz; simpl; auto).
assert (H8: align_compatible t (offset_val (sizeof t) v)
/\ offset_strict_in_range (sizeof t * Zlength data) (offset_val (sizeof t) v)
/\ offset_strict_in_range (sizeof t * 0) (offset_val (sizeof t) v)). {
clear IHdata.
rewrite Zlength_cons in H1. unfold Z.succ in H1.
rewrite Z.mul_add_distr_l in H1. rewrite Z.mul_1_r in H1.
rewrite Z.mul_0_r in H2.
pose proof (sizeof_pos t). pose proof (Zlength_nonneg data).
destruct v; try contradiction.
pose proof (Ptrofs.unsigned_range i).
assert (Ptrofs.max_unsigned = Ptrofs.modulus-1) by computable.
rewrite Z.mul_0_r in *.
assert (0 <= sizeof t * Zlength data) by (apply Z.mul_nonneg_nonneg; omega).
unfold offset_strict_in_range, offset_val in *.
unfold align_compatible in H0|-*.
unfold Ptrofs.add.
rewrite (Ptrofs.unsigned_repr (sizeof t))
by (unfold Ptrofs.max_unsigned, Ptrofs.modulus, Ptrofs.wordsize, Wordsize_Ptrofs.wordsize;
clear; subst t; destruct sz,sign, Archi.ptr64; simpl; omega).
rewrite Ptrofs.unsigned_repr.
split3; try omega.
assert (exists ch, access_mode t = By_value ch)
by (clear; subst t; destruct sz,sign; eexists; reflexivity).
destruct H8 as [ch ?].
apply align_compatible_rec_by_value_inv with (ch:=ch) in H0; auto.
apply align_compatible_rec_by_value with (ch:=ch); auto.
apply Z.divide_add_r; auto.
clear - H8. subst t. destruct sz,sign; inv H8; simpl; apply Z.divide_refl.
unfold Ptrofs.max_unsigned.
omega.
}
destruct H8 as [H8a [H8b H8c]].
eapply derives_trans; [ apply IHdata | ]; clear IHdata; auto.
replace (Z.succ (Zlength data) - 1) with (Zlength data) by (clear; omega).
apply derives_refl'; f_equal.
unfold field_address0.
rewrite if_true.
unfold offset_val. destruct v; simpl; auto. f_equal.
subst t; destruct sz,sign; reflexivity.
eapply field_compatible0_cons_Tarray.
reflexivity.
hnf; simpl. split3; auto.
destruct v; try contradiction.
split3; auto; red.
unfold sizeof; fold sizeof.
pose proof (Zlength_nonneg data).
rewrite Z.max_r by omega.
unfold offset_strict_in_range in H1. rewrite Zlength_cons in H1.
omega.
apply align_compatible_rec_Tarray; intros.
unfold align_compatible, offset_val in H8a.
assert (exists ch, access_mode t = By_value ch)
by (clear; subst t; destruct sz,sign; eexists; reflexivity).
destruct H4 as [ch ?].
eapply align_compatible_rec_by_value; try eassumption.
simpl in H0.
eapply align_compatible_rec_by_value_inv in H0; try eassumption.
apply Z.divide_add_r; auto.
apply Z.divide_mul_l; auto.
clear - t H4.
subst t.
destruct sz,sign; inv H4; try apply Z.divide_refl.
pose proof (Zlength_nonneg data); omega.
Qed.
Lemma id2pred_star_ZnthV_tint {cs: compspecs}:
forall Delta gz sh n (v: val) (data: list int) mdata,
n = Zlength mdata ->
mdata = map Init_int32 data ->
!! isptr v && !! align_compatible tint v &&
!! offset_strict_in_range (sizeof tint * n) v &&
id2pred_star Delta gz sh v mdata |--
`(data_at sh (tarray tint n) (map Vint data) v).
Proof. intros; apply id2pred_star_ZnthV_Tint; auto; apply Coq.Init.Logic.I.
Qed.
Lemma offset_zero_globals_of_env: forall rho i,
offset_val 0 (globals_of_env rho i) = globals_of_env rho i.
Proof.
intros.
unfold globals_of_env.
destruct (Map.get (ge_of rho) i); simpl; auto.
Qed.
Lemma unpack_globvar_array {cs: compspecs}:
forall t sz sign (data: list int) n Delta gz i gv,
(var_types Delta) ! i = None ->
(glob_types Delta) ! i = Some (gvar_info gv) ->
gvar_info gv = tarray t n ->
gvar_volatile gv = false ->
globvar_all_aligned gv = true ->
t = Tint sz sign noattr ->
forall (NBS: notboolsize sz),
n = Zlength (gvar_init gv) ->
gvar_init gv = map (inttype2init_data sz) data ->
init_data_list_size (gvar_init gv) <= sizeof (gvar_info gv) <= Ptrofs.max_unsigned ->
local (`and (tc_environ Delta) (fun rho =>gz = globals_of_env rho)) && globvar2pred gz(i, gv) |--
`(data_at (readonly2share (gvar_readonly gv))
(tarray (Tint sz sign noattr) n)
(map (Basics.compose Vint (Cop.cast_int_int sz sign)) data)
(gz i)).
Proof.
intros until 4. intros Hgal; intros. subst t.
match goal with |- ?A |-- _ =>
erewrite (add_andp A (local (tc_environ Delta)))
end.
2: solve [apply andp_left1; unfold local, lift1; intro rho; apply prop_derives; intros [? ?]; auto].
match goal with |- ?A |-- _ =>
erewrite (add_andp A (local (`isptr (eval_var i (tarray (Tint sz sign noattr) n)))))
end.
2:{
go_lowerx. apply prop_right. eapply eval_var_isptr; eauto.
right; split; auto. rewrite <- H1; auto.
}
eapply derives_trans.
apply andp_right.
apply andp_left1. apply andp_left1. apply andp_left1. apply derives_refl.
apply andp_derives; [ apply andp_derives; [ eapply unpack_globvar_star; try eassumption; try reflexivity | apply derives_refl] | apply derives_refl].
* rewrite andb_true_iff.
split; rewrite H1.
reflexivity.
unfold is_aligned, is_aligned_aux. rewrite andb_true_iff; split.
destruct sz, sign; simpl; auto.
rewrite Z.mod_0_l. reflexivity.
destruct sz, sign; simpl; computable.
*
(* rewrite H1.*) (* rewrite H3.*) rewrite H5.
rewrite <- andp_assoc.
apply andp_left1.
go_lowerx.
eapply derives_trans; [| apply (id2pred_star_ZnthV_Tint Delta (globals_of_env rho)); auto].
instantiate (1 := rho).
2: rewrite <- H5; auto.
match goal with |- ?F _ _ _ _ _ _ _ |-- _ => change F with @id2pred_star end.
subst gz.
normalize. clear H8.
rewrite H1 in H6.
assert (headptr (globals_of_env rho i)). {
unfold globals_of_env. destruct (globvar_eval_var _ _ _ _ H3 H H0) as [b [_ H10]]. rewrite H10.
exists b; auto.
}
assert (align_compatible (Tint sz sign noattr) (globals_of_env rho i)). {
destruct H7 as [b ?]. rewrite H7.
assert (exists ch, access_mode (Tint sz sign noattr) = By_value ch)
by (clear; destruct sz,sign; eexists; reflexivity).
destruct H8 as [ch ?].
eapply align_compatible_rec_by_value; try eassumption.
rewrite Ptrofs.unsigned_zero.
apply Z.divide_0_r.
}
apply headptr_isptr in H7.
simpl andp. fold (sizeof (Tint sz sign noattr)).
assert (offset_strict_in_range (sizeof (Tint sz sign noattr) * n) (globals_of_env rho i)). {
unfold offset_strict_in_range.
destruct (globals_of_env rho i) eqn:?H; auto.
rewrite H5 in H6; simpl in H6.
pose proof initial_world.zlength_nonneg _ (gvar_init gv).
rewrite Z.max_r in H6 by omega.
fold (sizeof (Tint sz sign noattr)) in H6.
unfold Ptrofs.max_unsigned in H6.
pose proof init_data_list_size_pos (gvar_init gv).
simpl in H8.
unfold globals_of_env in H9. destruct (Map.get (ge_of rho) i) eqn:?H; inv H9.
rewrite Ptrofs.unsigned_zero.
split; try omega.
rewrite Z.add_0_l.
apply Z.mul_nonneg_nonneg.
clear; pose proof (sizeof_pos (Tint sz sign noattr)); omega.
apply Zlength_nonneg.
}
normalize.
match goal with |- _ |-- ?F _ _ _ _ _ _ _ => change F with @id2pred_star end.
apply derives_refl.
Qed.
Lemma process_globvar':
forall {cs: compspecs} {Espec: OracleKind} Delta P Q R (i: ident)
gz gv gvs SF c Post (idata : init_data) t,
(var_types Delta) ! i = None ->
(glob_types Delta) ! i = Some t ->
(complete_legal_cosu_type (gvar_info gv) && is_aligned cenv_cs ha_env_cs la_env_cs (gvar_info gv) 0)%bool = true ->
gvar_volatile gv = false ->
gvar_info gv = t ->
gvar_init gv = (idata::nil) ->
init_data_size idata <= sizeof t ->
sizeof t <= Ptrofs.max_unsigned ->
semax Delta (PROPx P (LOCALx (gvars gz::Q) (SEPx R))
* id2pred_star Delta gz
(readonly2share (gvar_readonly gv))
(gz i) (idata ::nil) * globvars2pred gz gvs * SF)
c Post ->
semax Delta (PROPx P (LOCALx (gvars gz::Q) (SEPx R))
* globvars2pred gz ((i,gv)::gvs) * SF)
c Post.
Proof.
intros.
eapply semax_pre; [ | apply H7]; clear H7.
pose proof (unpack_globvar Delta gz i t gv idata H H0 H1 H2 H3 H4 H5 H6).
clear H H0 H1 H2 H3 H4 H5 H6.
rewrite <- insert_local.
forget (PROPx P (LOCALx Q (SEPx R))) as PQR.
unfold globvars2pred.
change (lift_S (LiftEnviron Prop)) with environ in *.
unfold lift2.
change (fun rho : environ => gz = globals_of_env rho)
with (locald_denote (gvars gz)) in H7|-*.
go_lowerx.
normalize.
apply sepcon_derives; auto.
rewrite sepcon_assoc.
apply sepcon_derives; auto.
apply sepcon_derives; auto.
unfold local, lift1 in H7. specialize (H7 rho). simpl in H7. rewrite prop_true_andp in H7 by (split; auto).
apply H7.
Qed.
Lemma process_globvar_array:
forall {cs: compspecs} {Espec: OracleKind} Delta gz P Q R (i: ident)
gv gvs SF c Post (n: Z) (t: type) (sz : intsize) (sign : signedness) (data : list int),
(var_types Delta) ! i = None ->
(glob_types Delta) ! i = Some (gvar_info gv) ->
gvar_info gv = tarray t n ->
gvar_volatile gv = false ->
globvar_all_aligned gv = true ->
t = Tint sz sign noattr ->
notboolsize sz ->
n = Zlength (gvar_init gv) ->
gvar_init gv = map (inttype2init_data sz) data ->
init_data_list_size (gvar_init gv) <= sizeof (gvar_info gv) <=
Ptrofs.max_unsigned ->
semax Delta (PROPx P (LOCALx (gvars gz :: Q)
(SEPx ((data_at
(readonly2share (gvar_readonly gv))
(tarray (Tint sz sign noattr) n)
(map (Vint oo Cop.cast_int_int sz sign) data) (gz i))
:: R)))
* globvars2pred gz gvs * SF)
c Post ->
semax Delta (PROPx P (LOCALx (gvars gz :: Q) (SEPx R))
* globvars2pred gz ((i,gv)::gvs) * SF)
c Post.
Proof.
intros until 4. intro Hgal; intros.
eapply semax_pre; [ | apply H8]. clear H8.
pose proof (unpack_globvar_array _ _ _ _ _ _ gz _ _ H H0 H1 H2 Hgal H3 H4 H5 H6 H7).
clear H H0 H1 H2 H3 H4 H5 H6 H7.
rewrite <- !insert_local.
rewrite <- insert_SEP.
forget (PROPx P (LOCALx Q (SEPx R))) as PQR.
unfold globvars2pred.
change (lift_S (LiftEnviron Prop)) with environ in *.
unfold lift2.
change (fun rho : environ => gz = globals_of_env rho)
with (locald_denote (gvars gz)) in H8|-*.
go_lowerx.
normalize.
apply sepcon_derives; auto.
pull_right (PQR rho).
apply sepcon_derives; auto.
apply sepcon_derives; auto.
unfold local, lift1 in H8. specialize (H8 rho). simpl in H8. rewrite prop_true_andp in H8 by (split; auto).
apply H8.
Qed.
Lemma process_globvar_star':
forall {cs: compspecs} {Espec: OracleKind} Delta gz P Q R (i: ident)
gv gvs SF c Post,
(var_types Delta) ! i = None ->
(glob_types Delta) ! i = Some (gvar_info gv) ->
(complete_legal_cosu_type (gvar_info gv) && is_aligned cenv_cs ha_env_cs la_env_cs (gvar_info gv) 0)%bool = true ->
gvar_volatile gv = false ->
globvar_all_aligned gv = true ->
init_data_list_size (gvar_init gv) <= sizeof (gvar_info gv) <=
Ptrofs.max_unsigned ->
semax Delta (PROPx P (LOCALx (gvars gz :: Q)
(SEPx R))
* (id2pred_star Delta gz (readonly2share (gvar_readonly gv))
(gz i) (gvar_init gv))
* globvars2pred gz gvs * SF)
c Post ->
semax Delta (PROPx P (LOCALx (gvars gz :: Q) (SEPx R))
* globvars2pred gz ((i,gv)::gvs) * SF)
c Post.
Proof.
intros.
eapply semax_pre; [ | apply H5]. clear H5.
pose proof (unpack_globvar_star _ gz _ _ H H0 H1 H2 H3 H4).
clear H H0 H1 H2 H3 H4.
rewrite <- !insert_local.
forget (PROPx P (LOCALx Q (SEPx R))) as PQR.
unfold globvars2pred.
change (lift_S (LiftEnviron Prop)) with environ in *.
unfold lift2.
change (fun rho : environ => gz = globals_of_env rho)
with (locald_denote (gvars gz)) in H5|-*.
go_lowerx.
normalize.
apply sepcon_derives; auto.
pull_right (PQR rho).
apply sepcon_derives; auto.
apply sepcon_derives; auto.
unfold local, lift1 in H5. specialize (H5 rho). simpl in H5.
rewrite prop_true_andp in H5 by (split; auto).
apply H5.
Qed.
Fixpoint init_datalist2pred' {cs: compspecs}
(Delta: tycontext) (gv: globals) (dl: list init_data) (sh: share) (ofs: Z) (v: val) : mpred :=
match dl with
| d::dl' => init_data2pred' Delta gv d sh (offset_val ofs v)
* init_datalist2pred' Delta gv dl' sh (ofs + init_data_size d) v
| nil => emp
end.
Lemma halfprocess_globvar_star:
forall {cs: compspecs} {Espec: OracleKind} Delta gz P Q R (i: ident)
gv gvs SF c Post,
(var_types Delta) ! i = None ->
(glob_types Delta) ! i = Some (gvar_info gv) ->
(complete_legal_cosu_type (gvar_info gv) && is_aligned cenv_cs ha_env_cs la_env_cs (gvar_info gv) 0)%bool = true ->
gvar_volatile gv = false ->
init_data_list_size (gvar_init gv) <= sizeof (gvar_info gv) <=
Ptrofs.max_unsigned ->
semax Delta (PROPx P (LOCALx (gvars gz :: Q)
(SEPx (init_datalist2pred' Delta gz (gvar_init gv) (readonly2share (gvar_readonly gv)) 0 (gz i)
::R)))
* globvars2pred gz gvs * SF)
c Post ->
semax Delta (PROPx P (LOCALx (gvars gz :: Q) (SEPx R))
* globvars2pred gz ((i,gv)::gvs) * SF)
c Post.
Proof.
intros.
eapply semax_pre; [ | apply H4]. clear H4.
unfold globvars2pred; fold globvars2pred.
go_lowerx.
unfold lift2. simpl.
normalize.
rewrite prop_true_andp by (split; auto).
cancel.
unfold globvar2pred.
simpl.
rewrite H2.
pose proof (readable_readonly2share (gvar_readonly gv)).
forget (readonly2share (gvar_readonly gv)) as sh.
rewrite <- offset_zero_globals_of_env at 1.
set (ofs:=0).
clearbody ofs.
revert ofs; induction (gvar_init gv); intros.
apply derives_refl.
apply sepcon_derives.
destruct (globvar_eval_var _ _ _ _ H4 H H0) as [b [? ?]].
eapply init_data2pred_rejigger; eauto.
unfold globals_of_env.
rewrite H10. reflexivity.
fold init_data_list2pred.
fold init_datalist2pred'.
spec IHl.
simpl in H3.
pose proof (init_data_size_pos a).
omega.
eapply derives_trans; [ | apply IHl].
rewrite offset_offset_val.
auto.
Qed.
Lemma map_instantiate:
forall {A B} (f: A -> B) (x: A) (y: list B) z,
y = map f z -> f x :: y = map f (x :: z).
Proof. intros. subst. reflexivity. Qed.
Lemma main_pre_start:
forall prog u gv,
main_pre prog u gv = (PROP() LOCAL(gvars gv) SEP())%assert * globvars2pred gv (prog_vars prog).
Proof.
intros.
unfold main_pre.
unfold globvars2pred, PROPx, LOCALx, SEPx.
unfold lift2.
extensionality rho.
simpl.
normalize.
unfold gvars_denote. unfold_lift. unfold local, lift1.
fold (globals_of_env rho).
apply pred_ext; intros; normalize.
rewrite prop_true_andp by auto.
auto.
Qed.
Lemma main_pre_ext_start:
forall {Espec : OracleKind} prog u gv ora,
main_pre_ext prog ora u gv = (PROP() LOCAL(gvars gv) SEP(has_ext ora))%assert * globvars2pred gv (prog_vars prog).
Proof.
intros.
unfold main_pre_ext.
unfold globvars2pred, PROPx, LOCALx, SEPx.
unfold lift2.
extensionality rho.
simpl.
normalize.
unfold gvars_denote. unfold_lift. unfold local, lift1.
fold (globals_of_env rho).
rewrite sepcon_comm.
apply pred_ext; intros; normalize.
rewrite prop_true_andp by auto.
auto.
Qed.
Lemma process_globvar_space:
forall {cs: compspecs} {Espec: OracleKind} Delta P Q R (i: ident)
gz gv gvs SF c Post t,
gvar_info gv = t ->
(var_types Delta) ! i = None ->
(glob_types Delta) ! i = Some t ->
(complete_legal_cosu_type (gvar_info gv) && is_aligned cenv_cs ha_env_cs la_env_cs (gvar_info gv) 0)%bool = true ->
gvar_volatile gv = false ->
gvar_init gv = (Init_space (sizeof t)::nil) ->
sizeof t <= Ptrofs.max_unsigned ->
semax Delta (PROPx P (LOCALx (gvars gz::Q) (SEPx (data_at_ (readonly2share (gvar_readonly gv)) t (gz i) :: R)))
* globvars2pred gz gvs * SF)
c Post ->
semax Delta (PROPx P (LOCALx (gvars gz::Q) (SEPx R))
* globvars2pred gz ((i,gv)::gvs) * SF)
c Post.
Proof.
intros until t. intros H3; intros.
eapply semax_pre; [ | apply H6]; clear H6.
rewrite <- insert_SEP.
rewrite <- insert_local.
forget (PROPx P (LOCALx Q (SEPx R))) as PQR.
assert (H7 := unpack_globvar Delta gz i t gv _ H H0 H1 H2 H3 H4).
spec H7.
simpl. pose proof (sizeof_pos t). rewrite Z.max_l by omega. omega.
specialize (H7 H5).
go_lowerx.
unfold globvars2pred; fold globvars2pred.
simpl map.
unfold fold_right; fold (fold_right sepcon emp (map (globvar2pred gz) gvs)).
unfold lift2.
normalize.
apply sepcon_derives; auto.
pull_left (PQR rho).
rewrite sepcon_assoc.
apply sepcon_derives; auto.
apply sepcon_derives; auto.
specialize (H7 rho).
unfold_lift in H7. unfold local, lift1 in H7.
simpl in H7.
rewrite prop_true_andp in H7 by auto.
eapply derives_trans; [ apply H7 | ].
unfold_lift.
assert_PROP (isptr (globals_of_env rho i)) by (saturate_local; apply prop_right; auto).
assert (headptr (globals_of_env rho i)).
hnf. unfold globals_of_env in H9|-*. destruct (Map.get (ge_of rho) i); try contradiction. eauto.
rewrite memory_block_data_at_; auto.
subst t.
rewrite andb_true_iff in H1; destruct H1.
pose proof (la_env_cs_sound 0 (gvar_info gv) H1 H3).
apply headptr_field_compatible; auto.
apply I.
assert (Ptrofs.modulus = Ptrofs.max_unsigned + 1) by computable.
omega.
Qed.
Ltac process_one_globvar :=
first
[ simple eapply process_globvar_space;
[simpl; reflexivity | reflexivity | reflexivity | reflexivity | reflexivity | reflexivity | simpl; computable | ]
| simple eapply process_globvar';
[reflexivity | reflexivity | reflexivity | reflexivity | reflexivity | reflexivity
| reflexivity | compute; congruence | ]
| simple eapply process_globvar_array;
[reflexivity | reflexivity | reflexivity | reflexivity | reflexivity | reflexivity | apply Coq.Init.Logic.I
| compute; clear; congruence
| repeat eapply map_instantiate; symmetry; apply map_nil
| compute; split; clear; congruence | ]
| simple eapply process_globvar_star';
[reflexivity | reflexivity | reflexivity | reflexivity
| reflexivity | compute; split; clear; congruence
| simpl gvar_info; simpl gvar_readonly; simpl readonly2share;
change (Share.lub extern_retainer Tsh) with Ews
]
| simple eapply halfprocess_globvar_star;
[reflexivity | reflexivity | reflexivity | reflexivity|
simpl; compute; split; clear; congruence | ]
];
change (Share.lub extern_retainer _) with Ews;
change (Share.lub extern_retainer _) with Ers;
try change (Vint oo _) with (Vint oo id);
fold_types;
rewrite ?Combinators.compose_id_right.
Lemma move_globfield_into_SEP:
forall {cs: compspecs}{Espec: OracleKind} Delta P Q R
(S1: mpred) (S2 S3 S4: environ -> mpred) c Post,
semax Delta (PROPx P (LOCALx Q (SEPx (S1::R))) * S2 * S3 * S4) c Post ->
semax Delta (PROPx P (LOCALx Q (SEPx R)) * (`S1 * S2) * S3 * S4) c Post.
Proof.
intros.
eapply semax_pre0; [ | eassumption].
rewrite <- insert_SEP.
rewrite <- !sepcon_assoc.
pull_left (`S1).
auto.
Qed.
Lemma move_globfield_into_SEP':
forall {cs: compspecs}{Espec: OracleKind} Delta P Q R
(f: val -> localdef)
(g: val -> mpred)
(h: val -> val) (S2 S3 S4: environ -> mpred) c Post,
(forall x: val,
semax Delta (PROPx P (LOCALx (f x :: Q) (SEPx ((g (h x))::R))) * S2 * S3 * S4) c Post) ->
semax Delta (PROPx P (LOCALx Q (SEPx R)) * ((EX x:val, local (locald_denote (f x)) && `(g (h x))) * S2) * S3 * S4) c Post.
Proof.
intros.
normalize.
apply extract_exists_pre; intro x.
eapply semax_pre0; [ | apply (H x)].
clear.
rewrite <- insert_SEP.
rewrite <- insert_local.
rewrite local_sepcon_assoc1.
rewrite !local_sepcon_assoc2.
rewrite !local_sepcon_assoc1.
apply andp_derives; auto.
rewrite <- !sepcon_assoc.
pull_left (`(g (h x))).
apply derives_refl.
Qed.
(*
Lemma move_globfield_into_SEP'':
forall {cs: compspecs}{Espec: OracleKind} Delta P Q R
(i: ident) (v: val)
(g: val -> mpred)
(h: val -> val) (S2 S3 S4: environ -> mpred) c Post,
In (gvar i v) Q ->
semax Delta (PROPx P (LOCALx Q (SEPx ((g (h v))::R))) * S2 * S3 * S4) c Post ->
semax Delta (PROPx P (LOCALx Q (SEPx R)) * ((EX x:val, local (locald_denote (gvar i x)) && `(g (h x))) * S2) * S3 * S4) c Post.
Proof.
intros.
normalize.
apply extract_exists_pre; intro x.
eapply semax_pre0; [ | apply H0].
clear - H.
rewrite <- insert_SEP.
go_lowerx.
normalize.
cancel.
clear - H2 H H1.
revert H H1; induction Q; intros.
inv H. simpl in H. destruct H. subst a.
simpl in H1.
destruct H1.
clear - H2 H.
hnf in H,H2.
destruct (Map.get (ve_of rho) i) as [[? ?]|]. contradiction.
destruct (Map.get (ge_of rho) i); try contradiction.
subst. auto.
destruct H1.
auto.
Qed.
*)
Lemma move_globfield_into_SEP0:
forall {cs: compspecs}{Espec: OracleKind} Delta
(S0 S3 S4: environ -> mpred) c Post,
semax Delta (S0 * S3 * S4) c Post ->
semax Delta (S0 * emp * S3 * S4) c Post.
Proof.
intros.
rewrite sepcon_emp; auto.
Qed.
Lemma offset_val_unsigned_repr: forall i p,
offset_val (Ptrofs.unsigned (Ptrofs.repr i)) p = offset_val i p.
Proof.
intros.
unfold offset_val.
unfold Ptrofs.add.
rewrite Ptrofs.repr_unsigned.
auto.
Qed.
Ltac process_idstar :=
process_one_globvar;
lazymatch goal with |- semax _ (_ * ?A * _ * _) _ _ =>
let p := fresh "p" in set (p:=A);
simpl in p;
unfold id2pred_star, init_data2pred' in p;
simpl PTree.get in p; simpl zeq in p;
cbv beta iota zeta in p;
simpl init_data_size in p;
revert p; rewrite ?offset_offset_val; intro p; simpl Z.add in p;
subst p;
repeat first
[simple apply move_globfield_into_SEP
(* | simple eapply move_globfield_into_SEP''; [ now repeat econstructor | ] *)
| simple apply move_globfield_into_SEP'; intros ?gvar0 (*;
lazymatch goal with
| |- semax _ ((PROPx _ (LOCALx (gvar ?A ?B :: _) _)) * _ * _ * _) _ _ =>
let n := fresh "v" A in rename B into n
| |- _ => idtac
end*)
];
simple apply move_globfield_into_SEP0
| |- semax _ (_ * _ * _) _ _ => idtac
end.
Lemma eliminate_globvars2pred_nil:
forall {cs: compspecs}{Espec: OracleKind} Delta PQR gv SF c Post,
semax Delta (PQR * SF) c Post ->
semax Delta (PQR * globvars2pred gv nil * SF) c Post.
Proof.
intros.
eapply semax_pre; [ | apply H].
go_lowerx; normalize.
Qed.
Ltac expand_main_pre :=
(rewrite main_pre_start || rewrite main_pre_ext_start);
unfold prog_vars, prog_vars'; simpl globvars2pred;
repeat process_idstar;
apply eliminate_globvars2pred_nil;
rewrite ?offset_val_unsigned_repr;
simpl readonly2share.
|
[STATEMENT]
lemma "R - 1\<^sub>\<pi> \<subseteq> S - 1\<^sub>\<pi> \<Longrightarrow> (R \<parallel> T) - 1\<^sub>\<pi> \<subseteq> (S \<parallel> T) - 1\<^sub>\<pi>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. R - 1\<^sub>\<pi> \<subseteq> S - 1\<^sub>\<pi> \<Longrightarrow> R \<parallel> T - 1\<^sub>\<pi> \<subseteq> S \<parallel> T - 1\<^sub>\<pi>
[PROOF STEP]
nitpick
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. R - 1\<^sub>\<pi> \<subseteq> S - 1\<^sub>\<pi> \<Longrightarrow> R \<parallel> T - 1\<^sub>\<pi> \<subseteq> S \<parallel> T - 1\<^sub>\<pi>
[PROOF STEP]
oops |
import analysis.special_functions.exp
import analysis.special_functions.log.basic
/- Tactics you may consider
-apply
-exact
-have
-norm_num: deals with numerical expressions
-/
open real
variables a b c d e : ℝ
#check le_refl
#check le_refl b
#check add_pos
#check add_le_add
#check add_le_add_left
#check exp_pos
#check exp_pos d
#check exp_le_exp.mpr
#check log_le_log
-- BEGIN
example (h₀ : d ≤ e) : c + exp (a + d) ≤ c + exp (a + e) :=
begin
apply add_le_add,
apply le_refl c,
apply exp_le_exp.mpr,
apply add_le_add,
apply le_refl a,
apply h₀,
end
example : (0 : ℝ) < 1 :=
by norm_num
example (h : a ≤ b) : log (1 + exp a) ≤ log (1 + exp b) :=
begin
have h₀ : 0 < 1 + exp a,
{ apply add_pos,
norm_num,
exact exp_pos a},
have h₁ : 0 < 1 + exp b,
{ apply add_pos,
norm_num,
exact exp_pos b},
apply (log_le_log h₀ h₁).mpr,
{ apply add_le_add_left,
apply exp_le_exp.mpr h, },
end
-- END |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.