text
stringlengths 0
3.34M
|
---|
State Before: ฮฑ : Type u
ฮฒ : Type v
ฮณ : Type w
ฮน : Type x
instโยฒ : PseudoMetricSpace ฮฑ
instโยน : PseudoMetricSpace ฮฒ
instโ : PseudoMetricSpace ฮณ
K : โโฅ0
f : ฮฑ โ ฮฒ
x y : ฮฑ
r : โ
h : โ (x y : ฮฑ), dist (f x) (f y) โค dist x y
โข โ (x y : ฮฑ), dist (f x) (f y) โค โ1 * dist x y State After: no goals Tactic: simpa only [NNReal.coe_one, one_mul] using h |
open import Data.Product using ( _ร_ ; _,_ )
open import Data.Sum using ( _โ_ ; injโ ; injโ )
open import FRP.LTL.Time.Interval using ( _โ_ ; _~_ ; _โข_โต_ )
open import FRP.LTL.ISet.Core using ( ISet ; [_] ; _,_ ; Mโฆ_โง ; splitMโฆ_โง ; subsumMโฆ_โง )
module FRP.LTL.ISet.Sum where
_โจ_ : ISet โ ISet โ ISet
A โจ B = [ (ฮป i โ Mโฆ A โง i โ Mโฆ B โง i) , split , subsum ] where
split : โ i j i~j โ
(Mโฆ A โง (i โข j โต i~j) โ Mโฆ B โง (i โข j โต i~j)) โ
((Mโฆ A โง i โ Mโฆ B โง i) ร (Mโฆ A โง j โ Mโฆ B โง j))
split i j i~j (injโ ฯ) with splitMโฆ A โง i j i~j ฯ
split i j i~j (injโ ฯ) | (ฯโ , ฯโ) = (injโ ฯโ , injโ ฯโ)
split i j i~j (injโ ฯ) with splitMโฆ B โง i j i~j ฯ
split i j i~j (injโ ฯ) | (ฯโ , ฯโ) = (injโ ฯโ , injโ ฯโ)
subsum : โ i j โ (i โ j) โ (Mโฆ A โง j โ Mโฆ B โง j) โ (Mโฆ A โง i โ Mโฆ B โง i)
subsum i j iโj (injโ ฯ) = injโ (subsumMโฆ A โง i j iโj ฯ)
subsum i j iโj (injโ ฯ) = injโ (subsumMโฆ B โง i j iโj ฯ)
|
(*
Copyright (C) 2017 M.A.L. Marques
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
*)
(* type: gga_exc *)
a := -0.74860:
b := 0.06001:
c := 3.60073:
d := 0.90000:
f_num := (z, xt) -> sqrt(1 - z^2)*(a + b*xt):
f_den := (rs, xs0, xs1) -> c + d*(xs0 + xs1) + rs:
f := (rs, zeta, xt, xs0, xs1) -> f_num(zeta, xt)/f_den(rs, xs0, xs1):
|
function [l, L_rf, L_sf, L_k, L_seg, L_n] = ...
retroProjHmgLinFromPinHoleOnRob(Rf, Sf, k, seg, n)
% RETROPROJHMGLINFROMPINHOLEONROB retroprj Hmg Line from pinhole on robot.
% Copyright 2009 Teresa Vidal.
if nargout == 1
ls = invPinHoleHmgLin(k, seg, n) ;
lr = fromFrameHmgLin(Sf, ls);
l = fromFrameHmgLin(Rf, lr);
else % Jacobians requested
[ls, LS_seg, LS_n, LS_k] = invPinHoleHmgLin(seg, n, k) ;
[lr, LR_sf, LR_ls] = fromFrameHmgLin(Sf, ls);
[l, L_rf, L_lr] = fromFrameHmgLin(Rf, lr);
L_sf = L_lr*LR_sf;
L_ls = L_lr*LR_ls;
L_k = L_ls*LS_k;
L_seg = L_ls*LS_seg;
L_n = L_ls*LS_n;
end
% ========== End of function - Start GPL license ==========
% # START GPL LICENSE
%---------------------------------------------------------------------
%
% This file is part of SLAMTB, a SLAM toolbox for Matlab.
%
% SLAMTB is free software: you can redistribute it and/or modify
% it under the terms of the GNU General Public License as published by
% the Free Software Foundation, either version 3 of the License, or
% (at your option) any later version.
%
% SLAMTB is distributed in the hope that it will be useful,
% but WITHOUT ANY WARRANTY; without even the implied warranty of
% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
% GNU General Public License for more details.
%
% You should have received a copy of the GNU General Public License
% along with SLAMTB. If not, see <http://www.gnu.org/licenses/>.
%
%---------------------------------------------------------------------
% SLAMTB is Copyright:
% Copyright (c) 2008-2010, Joan Sola @ LAAS-CNRS,
% Copyright (c) 2010-2013, Joan Sola,
% Copyright (c) 2014-2015, Joan Sola @ IRI-UPC-CSIC,
% SLAMTB is Copyright 2009
% by Joan Sola, Teresa Vidal-Calleja, David Marquez and Jean Marie Codol
% @ LAAS-CNRS.
% See on top of this file for its particular copyright.
% # END GPL LICENSE
|
ORLANDO, Fla. โ Federal workers are coming up on a second empty paycheck as the partial government shutdown continues. Although many, though not all, of the workers will eventually get back pay, for now, they're draining savings and trying to keep creditors at bay.
If you're a federal worker trying to survive the shutdown, here's where you can find assistance.
Be sure to visit the U.S. Office of Personnel Management website to find resources to help with payments to banks and other companies, including documents and letters you will need.
If you have a business or group helping federal workers, send us an email at [email protected], and we'll add it here.
United Wayโs 211 Crisis Line is open 24/7 and can help connect people with services ranging from utility assistance and eviction prevention to finding food pantries and other services. You can reach a specialist by dialing 211 or texting your zip code to 898-211.
The 211 website also has a list of phone companies and banks offering help for workers.
In Central Florida, the Heart of Florida United Way chapter has a resources page for people who need help.
The Legion will help Coast Guardsman with families who will not be getting paid starting this week. Head to the Legion website to find out how to apply for funds through the Temporary Financial Assistance Program.
The American Legionโs Project: VetRelief will help provide financial assistance for single Coast Guardsman without children who live in Florida. Membership in the legion is not required, just be active-duty military or a veteran. An application can be found on the Project: VetRelief website.
Second Harvest has a page dedicated to the shutdown on its website. It includes a food finder tool to find local food pantries, and a community resources page for other types of assistance.
It also has information on SNAP benefits and other government nutrition programs, who were told that they should begin looking for ways to stretch those benefits as the shutdown lingers on. SNAP reportedly has funding through February.
Several utility companies are offering payment extension or assistance programs. We'll add more to this list as we get them.
Duke Energy has partnered with local agencies for payment assistance programs. You can find more information on the Duke Energy website. Customers can also request payment deferments by calling the customer service line at 1-800-700-8744, Monday-Friday, 7 a.m. to 9 p.m.
Florida Power and Light has a payment extension program for its customers. Head to the FPL website to request an extension.
Orlando Utilities Commission customers are eligible for payment extensions, late fees waivers, and referrals for utility assistance programs. To make the arrangement, Orlando/Orange County residents should call 407-423-9018. St. Cloud/Osceola County residents should call 407-957-7373.
Pharmaceutical company Lilly has a program for anyone that needs help paying for insulin. Find more information on the Lilly website.
Rocco's Tacos and Tequila Bar: Any furloughed government employee with proper identification can get a one-time $20 credit. The deal is good through the end of the shutdown. More details are on the Rocco's Tacos Facebook page. There are Rocco's Tacos locations on Sand Lake Road in Orlando, and on Westshore Boulevard in Tampa.
Rock & Brews: The rock-inspired restaurant founded by KISS musicians Gene Simmons and Paul Stanley is offering a free pulled pork sandwich or strawberry fields salad to TSA workers with proper ID, now until the workers are paid again. There are three Rock & Brews locations in Central Florida: Kissimmee, Orlando, and Oviedo.
Madame Tussauds and Sea Life Orlando Aquarium: Free admission offered to federal workers affected by the shutdown. Workers must show a valid ID to receive one free ticket.
Tortuga Pools: The pool care company in Brevard County is offering free pool care to unpaid government employees. Call 321-405-9360 for more info.
Central Florida Zoo: The zoo in Sanford is offering free single-day admission to federal employees impacted by the shutdown plus up to three guests. The offer also includes contractors who work within the federal government. Guests must show a valid ID badge or ID and must be redeemed in person.
G.I. Tax Service and Marco's Pizza: G.I. Tax Service in Melbourne is partnering with Marco's Pizza to offer $1 one-topping medium pizzas for furloughed and government workers. To get the pizza, visit one of the two G.I. Tax locations to pick up a card, good for the pizza at one of two Marco's Pizza locations.
Sorelli Hair Studio and Spa, Melbourne: The salon is offering free haircuts and half price color retouch services for people furloughed and their immediate families.
IKEA Orlando: Starting January 24, IKEA Orlando is offering free breakfasts to federal workers who have a valid government ID. The offer includes an assortment of breakfast items and a drink and is available Monday-Friday, 9:30-11 a.m. or until the partial government shutdown is over.
Your Kid's Urgent Care: Families of furloughed workers affected by the government shutdown can visit any Your Kid's Urgent Care locations in the Orlando and Tampa Bay areas for free. Services will be offered to immediate family members of federal workers who are under 21 years old. Co-pays won't be collected and your insurance won't be billed. Bring valid ID.
Conti Moore Law: Conti Moore Law is offering free legal services (family law) to any furloughed federal employees.
Free dog and cat food: The Brevard Humane Society is offering free dog and cat food for federal employees and government contractors either working without pay or not working during the shutdown. Email the humane society at [email protected]. You'll get a reply with a pickup day at the Cocoa Adoption Center at 1020 Cox Road. A valid government ID must be presented. Free draft beer for donations: If you would like to donate unopened and unexpired dog or cat food, bring it to Dog Nโ Bone British Pub at 9 Stone Street in Historic Cocoa Village, which is issuing vouchers for 1 free draft person to those who donate.
Orlando Philarmonic offering free tickets to furloughed federal workers.
Garage Door and Spring Repair LLC: Orlando-area garage door repair company is offering to fix the garage doors of those affected by the government shutdown free of charge. Call 407-969-8795.
Unique Barbershop: Free haircuts for federal workers and their families. Must present valid government ID. Just head to their shop at 4441B Old Winter Garden Road, Orlando, 32811. |
\name{unzoom-HilbertCurve-method}
\alias{unzoom,HilbertCurve-method}
\alias{unzoom}
\title{
Transform zoomed positions to their original values
}
\description{
Transform zoomed positions to their original values
}
\usage{
\S4method{unzoom}{HilbertCurve}(object, x)
}
\arguments{
\item{object}{A \code{\link{HilbertCurve-class}} object.}
\item{x}{positions.}
}
\details{
This is a reverse function of \code{\link{zoom,HilbertCurve-method}}.
The function is used internally.
}
\value{
A numeric vector of original positions.
}
\author{
Zuguang Gu <[email protected]>
}
\examples{
hc = HilbertCurve(1, 2)
z = zoom(hc, 1.5)
unzoom(hc, z)
}
|
program mn_depth
!
! Set depth to be a minimum level
!
! USAGE:
! min_depth file_in file_out level
!
!
use iso_fortran_env
use netcdf
implicit none
integer(int32) :: i,j,min_level
integer(int32) :: im,ip,jm,jp
integer(int32) :: nxt,nyt,nzt ! Size of model T grid
integer(int32) :: ncid_out,depth_id_out ! NetCDF ids
integer(int32) :: ncid_topo, depth_id ! NetCDF ids
integer(int32) :: ncid_lev, lev_id ! NetCDF ids
integer(int32) :: dids_topo_out(2) ! NetCDF ids
integer(int32) :: dids_topo(2) ! NetCDF ids
integer(int32) :: dids_lev(1) ! NetCDF ids
integer(int32) :: zlen ! length of zeta array
real(real32),allocatable,dimension(:,:) :: depth
real(real64) :: zeta
real(real64), dimension(:), allocatable :: zeta_arr
real(real32) :: min_depth, max_depth
character*128 :: file_in,file_out,level
real(real32), parameter :: missing_value = -1e30
if( command_argument_count() /= 3 ) then
write(*,*) 'ERROR: Incorrect number of arguments'
write(*,*) 'Usage: min_depth file_in file_out level'
endif
call get_command_argument(1,file_in)
call get_command_argument(2,file_out)
call get_command_argument(3,level)
read(level,*) min_level
! Get info on the grid from input
call handle_error(nf90_open('ocean_vgrid.nc',nf90_nowrite,ncid_lev))
call handle_error(nf90_inq_varid(ncid_lev,'zeta',lev_id))
call handle_error(nf90_get_var(ncid_lev,lev_id,zeta,start=[2*min_level+1]))
min_depth=zeta
call handle_error(nf90_inquire_variable(ncid_lev,lev_id,dimids=dids_lev))
call handle_error(nf90_inquire_dimension(ncid_lev,dids_lev(1),len=zlen))
call handle_error(nf90_get_var(ncid_lev,lev_id,zeta,start=[zlen]))
max_depth=zeta
call handle_error(nf90_close(ncid_lev))
write(*,*) 'Setting minimum depth to ',min_depth
write(*,*) 'Setting maximum depth to ',max_depth
call handle_error(nf90_open(trim(file_in),nf90_nowrite,ncid_topo))
call handle_error(nf90_inq_dimid(ncid_topo,'xx',dids_topo(1)))
call handle_error(nf90_inq_dimid(ncid_topo,'yy',dids_topo(2)))
call handle_error(nf90_inquire_dimension(ncid_topo,dids_topo(1),len=nxt))
call handle_error(nf90_inquire_dimension(ncid_topo,dids_topo(2),len=nyt))
call handle_error(nf90_inq_varid(ncid_topo,'depth',depth_id))
allocate(depth(nxt,nyt))
call handle_error(nf90_get_var(ncid_topo,depth_id,depth))
call handle_error(nf90_close(ncid_topo))
! Reset depth
do j=1,nyt
do i=1,nxt
if(depth(i,j) > 0.0 ) then
depth(i,j) = min(max(depth(i,j),min_depth),max_depth)
else
depth(i,j) = missing_value
endif
enddo
enddo
call handle_error(nf90_create(trim(file_out),ior(nf90_netcdf4,nf90_clobber),ncid_out))
call handle_error(nf90_def_dim(ncid_out,'xx',nxt,dids_topo_out(1)))
call handle_error(nf90_def_dim(ncid_out,'yy',nyt,dids_topo_out(2)))
call handle_error(nf90_def_var(ncid_out,'depth',nf90_float,dids_topo_out,depth_id_out, &
chunksizes=[nxt/10,nyt/10], &
deflate_level=1,shuffle=.true.))
call handle_error(nf90_put_att(ncid_out,depth_id_out,'missing_value',missing_value))
call handle_error(nf90_put_att(ncid_out,depth_id_out,'long_name','depth'))
call handle_error(nf90_put_att(ncid_out,depth_id_out,'units','m'))
call handle_error(nf90_put_att(ncid_out,depth_id,'lakes_removed','yes'))
call handle_error(nf90_put_att(ncid_out,depth_id,'minimum_depth',min_depth))
call handle_error(nf90_put_att(ncid_out,depth_id,'minimum_levels',min_level))
call handle_error(nf90_put_att(ncid_out,nf90_global,'original_file',trim(file_in)))
call handle_error(nf90_enddef(ncid_out))
call handle_error(nf90_put_var(ncid_out,depth_id_out,depth))
call handle_error(nf90_close(ncid_out))
contains
subroutine handle_error(error_flag,isfatal,err_string)
! Simple error handle for NetCDF
integer(int32),intent(in) :: error_flag
logical, intent(in),optional :: isfatal
character(*), intent(in),optional :: err_string
logical :: fatal
fatal = .true.
if(present(isfatal)) fatal=isfatal
if ( error_flag /= nf90_noerr ) then
if ( fatal ) then
write(*,*) 'FATAL ERROR:',nf90_strerror(error_flag)
if (present(err_string)) write(*,*) trim(err_string)
stop
endif
endif
end subroutine handle_error
end program mn_depth
|
Bosi uses molecular gastronomy to create some items on the menu in an effort to enhance their flavours , such as freeze @-@ drying cabbage to create a purรฉe . The restaurant has received mixed reviews from critics , but has been listed in The World 's 50 Best Restaurants since 2010 , and was named by Egon Ronay as the best restaurant in the UK in 2005 . The Good Food Guide ranked Hibiscus as the eighth @-@ best restaurant in the UK in the 2013 edition . It has also been awarded five AA Rosettes .
|
\section{Past Work}
Asureโs primary focus within the social security field is on pension insurance. As part of the ongoing research, we have ported the specific aspects of the German pension system to Ethereum blockchain. Based on both, our hands-on experience and our expertise from years of working in the insurance field, we developed the theoretical backbone of how a decentralized pension system is supposed to function as well as the proof-of-concept implementation of such a system.
\subsection{Research on the blockchain technology and automation}
Asureโs CTO, Fabian Raetz, did a research project at the University of Applied Science and Art Dortmund in 2013 where he analyzed the emerging blockchain technologies and its possible applications. \cite{fraetz}
\newline
In 2014 a small team led by Paul Mizel and Fabian Raetz developed their own blockchain based currency as a proof of concept and tested different kinds of blockchain issues and economic systems (NRJ Coin). \cite{nrjcoin}
\newline
Paul Mizel has built a team in Kiev late 2015 for AI-based innovation projects โInsure Chatโ, โInsure Assistantโ and โInsure Advisorโ. The applications that were built as a result were fully automated chatbots for support, claim management, and other tasks with a unique learning mechanism and connection to social platforms like Facebook, Telegram, Skype, and others.\newline
Tech stack: IBM Watson, Microsoft Bot Framework, MS Luis, .NET.
\newline
Algorithms used: Text mining, regression analysis, SVMs, neural networks.
\subsection{German Pension System}
In order to demonstrate the potential of blockchain-based social security, Asure created a prototype based on the model of the German statutory pay-as-you-go pension system.
\newline\newline
The Asure dApp will become the reference implementation for dApps using the Asure blockchain and platform.
\newline\newline
It will feature
\begin{itemize}
\item a technical feasibility study of the german statutory pension system implemented on the Ethereum blockchain and the Asure protocol / platform.
\item a complete wallet implementation.
\item an overview and management of your insurance policies.
\item an insurance store to find and buy insurance policies.
\end{itemize}
Please try out the Asure dApp which runs currently on the Ethereum Rinkiby testnet:
https://dapp.asure.io
\subsection{Decentralized Pension System}
To demonstrate that blockchain can solve problems globally, Asure also developed a prototype of a global pension system which is fully decentralized and hence lies neither in the hands of governments nor of any insurance company.
This is an alpha-phase experiment designed to show how social security systems can be improved in the future with the help of blockchain technology.
The idea is to implement a pay-as-you-go pension system on Ethereum blockchain. Members pay their contributions in ETH and receive ERC20 tokens in return. No contributions are invested in the capital market and therefore no interest is earned. Instead, the paid-in ETHs are used directly for the payment of outstanding pension claims. How much pension is going to be paid out depends on how many pension tokens a pensioner has, i.e. how many contributions he paid into the system.
As a rule, pay-as-you-go systems only work because states introduce mandatory social security systems and, thus can guarantee a stable number of members and contribution payments. In a decentralized pension system nobody can be forced to become a member. Asure's membership creates several incentives that are intended to lead to mass acceptance.
In the decentralized pension system as well as in a classic one, whoever makes a higher contribution gets a higher pension. Pay-ins longevity plays a role as well. The longer one makes regular pay-ins the longer the pension is going to be paid out.
\begin{figure}[H]
\centering
\includegraphics[width=5.0in]{img/pension.png}
\caption{PAYG Model}
\label{fig:payg}
\end{figure}
The Asure decentralized pension dApp runs currently on the Ethereum Rinkeby testnet. It was developed during ETHBerlin hackathon and can be accessed via the following link:
\url{https://ethberlin.asure.io}
Pension is a bet that the value I pay in is at least as great, if not greater, as the payout. The decentralized pension is based on the German pension system and has implemented a โgeneration contractโ. The young generation pays the older generation according to their possibilities and in return, the pension entitlements are tokenized, In the form of pension entitlement tokens (PET).
\newline\newline
\subsubsection*{Incentive models were developed within the project}
The system excludes the administration of age, thereby avoiding fraud and evidence. The time is divided into periods where a period is a month. Within each period deposits can be made. For each period a target price is fixed, which can shift if the median of the deposits of the previous period has a big difference to the target price.
If the maximum number of periods has been paid in, the maximum number of pension payments is also possible. Let's assume that the maximum number of periods is 480 equal 40 years. For monthly payments of 40 years, there is a claim to 40 years pension. If someone has only used the system for 2 years, the application is for 1 month only. The incentive to use the system to the maximum rewards the participants with more pension entitlement period.
\begin{eqnarray}
entitlementMonths = \frac{payedMonths^2}{12 \cdot 40 years}
\end{eqnarray}
\begin{figure}[H]
\centering
\includegraphics[width=3.0in]{img/pension_years.png}
\caption{Decentralized pension payed vs. recive years}
\label{fig:pension_years}
\end{figure}
Since everyone can pay in different amounts in the system, the maximum payer is granted a maximum of double pension entitlement. All those who pay in more than the target price of the period will receive more PET up to a maximum of 2 per period. Maximum achievable 960 PET, this allows a later claim to twice as much in redistribution as someone who activates 480 PET.
\begin{eqnarray}
DPT = \begin{cases} 1 + \frac{amount-amount_{max}}
{targetPrice - amount_{max}}
* DTP_{bonus} & amount \geq targetPrice\\
\frac{amount - amount_{min}}
{targetPrice - amount_{min}}
* DTP_{bonus} & otherwise\end{cases}
\end{eqnarray}
\begin{eqnarray}
targetPrice - amount_{max} \neq 0 \quad and \quad targetPrice - amount_{min} \neq 0
\end{eqnarray}
As a further incentive for the early adopters, a bonus was provided in the system which has a multiplicator of 1.5 and with the time logarithmically approaching 1.0 is planned to approach annually.
\begin{eqnarray}
DTP_{bonus} = f(year) = 1.5-0.12 * log(year)
\end{eqnarray}
\begin{figure}[H]
\centering
\includegraphics[width=3.0in]{img/pension_bonus.png}
\caption{Decentralized pension bonus by year}
\label{fig:pension_bonus}
\end{figure}
If everyone leaves the system, the last participants are rewarded more, thus we guarantee that the system remains lucrative, with zero participants in the system the system is set to its initial state again.
By the limitation on maximally 2 PET or with the factor 1.5 initially 3 PET per period in the first years a utilization possibility results with several accounts into the system to pay in which the system prevents that the PETs are not transferable.
With the help of these incentives and transparent design and DAO approach, this will start as a social experiment after necessary simulations and parameter adjustments on Ethereum mainnet.
\subsubsection*{Benefits}
Independent Crypto Pension has many advantages, the intergenerational contract allows the inflation security. It is autonomous and decentralized according to the idea of the DAO. There is no intermediary. The privacy is secured because no personal data is necessary to participate in the system. It is completely transparent as all transactions are on the blockchain and it is also open source.
\subsubsection*{Read more}
We summarized our ideas on how a redistribution based peer-to-peer pension system might look and share our results with the broader community.
\newline
Depot Paper: \url{https://www.asure.network/asure.depot.en.pdf}
|
[STATEMENT]
lemma AE_all_imp_countable:
assumes "countable {x. Q x}"
shows "(AE x in M. \<forall>y. Q y \<longrightarrow> P x y) = (\<forall>y. Q y \<longrightarrow> (AE x in M. P x y))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (AE x in M. \<forall>y. Q y \<longrightarrow> P x y) = (\<forall>y. Q y \<longrightarrow> (AE x in M. P x y))
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
countable {x. Q x}
goal (1 subgoal):
1. (AE x in M. \<forall>y. Q y \<longrightarrow> P x y) = (\<forall>y. Q y \<longrightarrow> (AE x in M. P x y))
[PROOF STEP]
by (auto dest: AE_ball_countable) |
/-
Copyright (c) 2018 Scott Morrison. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Scott Morrison
-/
import category_theory.natural_isomorphism
import logic.equiv.basic
/-!
# Full and faithful functors
We define typeclasses `full` and `faithful`, decorating functors.
Use `F.map_injective` to retrieve the fact that `F.map` is injective when `[faithful F]`,
and `F.preimage` to obtain preimages of morphisms when `[full F]`.
We prove some basic "cancellation" lemmas for full and/or faithful functors.
See `category_theory.equivalence` for the fact that a functor is an equivalence if and only if
it is fully faithful and essentially surjective.
-/
-- declare the `v`'s first; see `category_theory.category` for an explanation
universes vโ vโ vโ uโ uโ uโ
namespace category_theory
variables {C : Type uโ} [category.{vโ} C] {D : Type uโ} [category.{vโ} D]
/--
A functor `F : C โฅค D` is full if for each `X Y : C`, `F.map` is surjective.
In fact, we use a constructive definition, so the `full F` typeclass contains data,
specifying a particular preimage of each `f : F.obj X โถ F.obj Y`.
See <https://stacks.math.columbia.edu/tag/001C>.
-/
class full (F : C โฅค D) :=
(preimage : โ {X Y : C} (f : (F.obj X) โถ (F.obj Y)), X โถ Y)
(witness' : โ {X Y : C} (f : (F.obj X) โถ (F.obj Y)), F.map (preimage f) = f . obviously)
restate_axiom full.witness'
attribute [simp] full.witness
/--
A functor `F : C โฅค D` is faithful if for each `X Y : C`, `F.map` is injective.
See <https://stacks.math.columbia.edu/tag/001C>.
-/
class faithful (F : C โฅค D) : Prop :=
(map_injective' [] : โ {X Y : C}, function.injective (@functor.map _ _ _ _ F X Y) . obviously)
restate_axiom faithful.map_injective'
namespace functor
variables {X Y : C}
lemma map_injective (F : C โฅค D) [faithful F] :
function.injective $ @functor.map _ _ _ _ F X Y :=
faithful.map_injective F
lemma map_iso_injective (F : C โฅค D) [faithful F] :
function.injective $ @functor.map_iso _ _ _ _ F X Y :=
ฮป i j h, iso.ext (map_injective F (congr_arg iso.hom h : _))
/-- The specified preimage of a morphism under a full functor. -/
def preimage (F : C โฅค D) [full F] (f : F.obj X โถ F.obj Y) : X โถ Y :=
full.preimage.{vโ vโ} f
@[simp] lemma image_preimage (F : C โฅค D) [full F] {X Y : C} (f : F.obj X โถ F.obj Y) :
F.map (preimage F f) = f :=
by unfold preimage; obviously
end functor
section
variables {F : C โฅค D} [full F] [faithful F] {X Y Z : C}
@[simp] lemma preimage_id : F.preimage (๐ (F.obj X)) = ๐ X :=
F.map_injective (by simp)
@[simp] lemma preimage_comp (f : F.obj X โถ F.obj Y) (g : F.obj Y โถ F.obj Z) :
F.preimage (f โซ g) = F.preimage f โซ F.preimage g :=
F.map_injective (by simp)
@[simp] lemma preimage_map (f : X โถ Y) :
F.preimage (F.map f) = f :=
F.map_injective (by simp)
variables (F)
namespace functor
/-- If `F : C โฅค D` is fully faithful, every isomorphism `F.obj X โ
F.obj Y` has a preimage. -/
@[simps]
def preimage_iso (f : (F.obj X) โ
(F.obj Y)) : X โ
Y :=
{ hom := F.preimage f.hom,
inv := F.preimage f.inv,
hom_inv_id' := F.map_injective (by simp),
inv_hom_id' := F.map_injective (by simp), }
@[simp] lemma preimage_iso_map_iso (f : X โ
Y) :
F.preimage_iso (F.map_iso f) = f :=
by { ext, simp, }
end functor
/--
If the image of a morphism under a fully faithful functor in an isomorphism,
then the original morphisms is also an isomorphism.
-/
lemma is_iso_of_fully_faithful (f : X โถ Y) [is_iso (F.map f)] : is_iso f :=
โจโจF.preimage (inv (F.map f)),
โจF.map_injective (by simp), F.map_injective (by simp)โฉโฉโฉ
/-- If `F` is fully faithful, we have an equivalence of hom-sets `X โถ Y` and `F X โถ F Y`. -/
@[simps]
def equiv_of_fully_faithful {X Y} : (X โถ Y) โ (F.obj X โถ F.obj Y) :=
{ to_fun := ฮป f, F.map f,
inv_fun := ฮป f, F.preimage f,
left_inv := ฮป f, by simp,
right_inv := ฮป f, by simp }
/-- If `F` is fully faithful, we have an equivalence of iso-sets `X โ
Y` and `F X โ
F Y`. -/
@[simps]
def iso_equiv_of_fully_faithful {X Y} : (X โ
Y) โ (F.obj X โ
F.obj Y) :=
{ to_fun := ฮป f, F.map_iso f,
inv_fun := ฮป f, F.preimage_iso f,
left_inv := ฮป f, by simp,
right_inv := ฮป f, by { ext, simp, } }
end
section
variables {E : Type*} [category E] {F G : C โฅค D} (H : D โฅค E) [full H] [faithful H]
/-- We can construct a natural transformation between functors by constructing a
natural transformation between those functors composed with a fully faithful functor. -/
@[simps]
def nat_trans_of_comp_fully_faithful (ฮฑ : F โ H โถ G โ H) : F โถ G :=
{ app := ฮป X, (equiv_of_fully_faithful H).symm (ฮฑ.app X),
naturality' := ฮป X Y f, by { dsimp, apply H.map_injective, simpa using ฮฑ.naturality f, } }
/-- We can construct a natural isomorphism between functors by constructing a natural isomorphism
between those functors composed with a fully faithful functor. -/
@[simps]
def nat_iso_of_comp_fully_faithful (i : F โ H โ
G โ H) : F โ
G :=
nat_iso.of_components
(ฮป X, (iso_equiv_of_fully_faithful H).symm (i.app X))
(ฮป X Y f, by { dsimp, apply H.map_injective, simpa using i.hom.naturality f, })
lemma nat_iso_of_comp_fully_faithful_hom (i : F โ H โ
G โ H) :
(nat_iso_of_comp_fully_faithful H i).hom = nat_trans_of_comp_fully_faithful H i.hom :=
by { ext, simp [nat_iso_of_comp_fully_faithful], }
lemma nat_iso_of_comp_fully_faithful_inv (i : F โ H โ
G โ H) :
(nat_iso_of_comp_fully_faithful H i).inv = nat_trans_of_comp_fully_faithful H i.inv :=
by { ext, simp [โpreimage_comp], dsimp, simp, }
end
end category_theory
namespace category_theory
variables {C : Type uโ} [category.{vโ} C]
instance full.id : full (๐ญ C) :=
{ preimage := ฮป _ _ f, f }
instance faithful.id : faithful (๐ญ C) := by obviously
variables {D : Type uโ} [category.{vโ} D] {E : Type uโ} [category.{vโ} E]
variables (F F' : C โฅค D) (G : D โฅค E)
instance faithful.comp [faithful F] [faithful G] : faithful (F โ G) :=
{ map_injective' := ฮป _ _ _ _ p, F.map_injective (G.map_injective p) }
lemma faithful.of_comp [faithful $ F โ G] : faithful F :=
{ map_injective' := ฮป X Y, (F โ G).map_injective.of_comp }
section
variables {F F'}
/-- If `F` is full, and naturally isomorphic to some `F'`, then `F'` is also full. -/
def full.of_iso [full F] (ฮฑ : F โ
F') : full F' :=
{ preimage := ฮป X Y f, F.preimage ((ฮฑ.app X).hom โซ f โซ (ฮฑ.app Y).inv),
witness' := ฮป X Y f, by simp [โnat_iso.naturality_1 ฮฑ], }
lemma faithful.of_iso [faithful F] (ฮฑ : F โ
F') : faithful F' :=
{ map_injective' := ฮป X Y f f' h, F.map_injective
(by rw [โnat_iso.naturality_1 ฮฑ.symm, h, nat_iso.naturality_1 ฮฑ.symm]) }
end
variables {F G}
lemma faithful.of_comp_iso {H : C โฅค E} [โ : faithful H] (h : F โ G โ
H) : faithful F :=
@faithful.of_comp _ _ _ _ _ _ F G (faithful.of_iso h.symm)
alias faithful.of_comp_iso โ category_theory.iso.faithful_of_comp
-- We could prove this from `faithful.of_comp_iso` using `eq_to_iso`,
-- but that would introduce a cyclic import.
lemma faithful.of_comp_eq {H : C โฅค E} [โ : faithful H] (h : F โ G = H) : faithful F :=
@faithful.of_comp _ _ _ _ _ _ F G (h.symm โธ โ)
alias faithful.of_comp_eq โ eq.faithful_of_comp
variables (F G)
/-- โDivideโ a functor by a faithful functor. -/
protected def faithful.div (F : C โฅค E) (G : D โฅค E) [faithful G]
(obj : C โ D) (h_obj : โ X, G.obj (obj X) = F.obj X)
(map : ฮ {X Y}, (X โถ Y) โ (obj X โถ obj Y))
(h_map : โ {X Y} {f : X โถ Y}, G.map (map f) == F.map f) :
C โฅค D :=
{ obj := obj,
map := @map,
map_id' :=
begin
assume X,
apply G.map_injective,
apply eq_of_heq,
transitivity F.map (๐ X), from h_map,
rw [F.map_id, G.map_id, h_obj X]
end,
map_comp' :=
begin
assume X Y Z f g,
apply G.map_injective,
apply eq_of_heq,
transitivity F.map (f โซ g), from h_map,
rw [F.map_comp, G.map_comp],
congr' 1;
try { exact (h_obj _).symm };
exact h_map.symm
end }
-- This follows immediately from `functor.hext` (`functor.hext h_obj @h_map`),
-- but importing `category_theory.eq_to_hom` causes an import loop:
-- category_theory.eq_to_hom โ category_theory.opposites โ
-- category_theory.equivalence โ category_theory.fully_faithful
lemma faithful.div_comp (F : C โฅค E) [faithful F] (G : D โฅค E) [faithful G]
(obj : C โ D) (h_obj : โ X, G.obj (obj X) = F.obj X)
(map : ฮ {X Y}, (X โถ Y) โ (obj X โถ obj Y))
(h_map : โ {X Y} {f : X โถ Y}, G.map (map f) == F.map f) :
(faithful.div F G obj @h_obj @map @h_map) โ G = F :=
begin
casesI F with F_obj _ _ _, casesI G with G_obj _ _ _,
unfold faithful.div functor.comp,
unfold_projs at h_obj,
have: F_obj = G_obj โ obj := (funext h_obj).symm,
substI this,
congr,
funext,
exact eq_of_heq h_map
end
lemma faithful.div_faithful (F : C โฅค E) [faithful F] (G : D โฅค E) [faithful G]
(obj : C โ D) (h_obj : โ X, G.obj (obj X) = F.obj X)
(map : ฮ {X Y}, (X โถ Y) โ (obj X โถ obj Y))
(h_map : โ {X Y} {f : X โถ Y}, G.map (map f) == F.map f) :
faithful (faithful.div F G obj @h_obj @map @h_map) :=
(faithful.div_comp F G _ h_obj _ @h_map).faithful_of_comp
instance full.comp [full F] [full G] : full (F โ G) :=
{ preimage := ฮป _ _ f, F.preimage (G.preimage f) }
/-- If `F โ G` is full and `G` is faithful, then `F` is full. -/
def full.of_comp_faithful [full $ F โ G] [faithful G] : full F :=
{ preimage := ฮป X Y f, (F โ G).preimage (G.map f),
witness' := ฮป X Y f, G.map_injective ((F โ G).image_preimage _) }
/-- If `F โ G` is full and `G` is faithful, then `F` is full. -/
def full.of_comp_faithful_iso {F : C โฅค D} {G : D โฅค E} {H : C โฅค E} [full H] [faithful G]
(h : F โ G โ
H) : full F :=
@full.of_comp_faithful _ _ _ _ _ _ F G (full.of_iso h.symm) _
/--
Given a natural isomorphism between `F โ H` and `G โ H` for a fully faithful functor `H`, we
can 'cancel' it to give a natural iso between `F` and `G`.
-/
def fully_faithful_cancel_right {F G : C โฅค D} (H : D โฅค E)
[full H] [faithful H] (comp_iso: F โ H โ
G โ H) : F โ
G :=
nat_iso.of_components
(ฮป X, H.preimage_iso (comp_iso.app X))
(ฮป X Y f, H.map_injective (by simpa using comp_iso.hom.naturality f))
@[simp]
lemma fully_faithful_cancel_right_hom_app {F G : C โฅค D} {H : D โฅค E}
[full H] [faithful H] (comp_iso: F โ H โ
G โ H) (X : C) :
(fully_faithful_cancel_right H comp_iso).hom.app X = H.preimage (comp_iso.hom.app X) :=
rfl
@[simp]
lemma fully_faithful_cancel_right_inv_app {F G : C โฅค D} {H : D โฅค E}
[full H] [faithful H] (comp_iso: F โ H โ
G โ H) (X : C) :
(fully_faithful_cancel_right H comp_iso).inv.app X = H.preimage (comp_iso.inv.app X) :=
rfl
end category_theory
|
/-
Copyright (c) 2018 Kenny Lau. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Johan Commelin, Kenny Lau, Robert Y. Lewis
! This file was ported from Lean 3 source module group_theory.eckmann_hilton
! leanprover-community/mathlib commit 448144f7ae193a8990cb7473c9e9a01990f64ac7
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathbin.Algebra.Group.Defs
/-!
# Eckmann-Hilton argument
> THIS FILE IS SYNCHRONIZED WITH MATHLIB4.
> Any changes to this file require a corresponding PR to mathlib4.
The Eckmann-Hilton argument says that if a type carries two monoid structures that distribute
over one another, then they are equal, and in addition commutative.
The main application lies in proving that higher homotopy groups (`ฯโ` for `n โฅ 2`) are commutative.
## Main declarations
* `eckmann_hilton.comm_monoid`: If a type carries a unital magma structure that distributes
over a unital binary operation, then the magma is a commutative monoid.
* `eckmann_hilton.comm_group`: If a type carries a group structure that distributes
over a unital binary operation, then the group is commutative.
-/
universe u
namespace EckmannHilton
variable {X : Type u}
-- mathport name: ยซexpr < > ยป
local notation a " <" m "> " b => m a b
#print EckmannHilton.IsUnital /-
/-- `is_unital m e` expresses that `e : X` is a left and right unit
for the binary operation `m : X โ X โ X`. -/
structure IsUnital (m : X โ X โ X) (e : X) extends IsLeftId _ m e, IsRightId _ m e : Prop
#align eckmann_hilton.is_unital EckmannHilton.IsUnital
-/
/- warning: eckmann_hilton.mul_one_class.is_unital -> EckmannHilton.MulOneClass.isUnital is a dubious translation:
lean 3 declaration is
forall {X : Type.{u1}} [G : MulOneClass.{u1} X], EckmannHilton.IsUnital.{u1} X (HMul.hMul.{u1, u1, u1} X X X (instHMul.{u1} X (MulOneClass.toHasMul.{u1} X G))) (OfNat.ofNat.{u1} X 1 (OfNat.mk.{u1} X 1 (One.one.{u1} X (MulOneClass.toHasOne.{u1} X G))))
but is expected to have type
forall {X : Type.{u1}} [G : MulOneClass.{u1} X], EckmannHilton.IsUnital.{u1} X (fun ([email protected]._hyg.284 : X) ([email protected]._hyg.286 : X) => HMul.hMul.{u1, u1, u1} X X X (instHMul.{u1} X (MulOneClass.toMul.{u1} X G)) [email protected]._hyg.284 [email protected]._hyg.286) (OfNat.ofNat.{u1} X 1 (One.toOfNat1.{u1} X (MulOneClass.toOne.{u1} X G)))
Case conversion may be inaccurate. Consider using '#align eckmann_hilton.mul_one_class.is_unital EckmannHilton.MulOneClass.isUnitalโ'. -/
@[to_additive EckmannHilton.AddZeroClass.IsUnital]
theorem MulOneClass.isUnital [G : MulOneClass X] : IsUnital (ยท * ยท) (1 : X) :=
IsUnital.mk (by infer_instance) (by infer_instance)
#align eckmann_hilton.mul_one_class.is_unital EckmannHilton.MulOneClass.isUnital
#align eckmann_hilton.add_zero_class.is_unital EckmannHilton.AddZeroClass.IsUnital
variable {mโ mโ : X โ X โ X} {eโ eโ : X}
variable (hโ : IsUnital mโ eโ) (hโ : IsUnital mโ eโ)
variable (distrib : โ a b c d, ((a <mโ> b) <mโ> c <mโ> d) = (a <mโ> c) <mโ> b <mโ> d)
include hโ hโ distrib
#print EckmannHilton.one /-
/-- If a type carries two unital binary operations that distribute over each other,
then they have the same unit elements.
In fact, the two operations are the same, and give a commutative monoid structure,
see `eckmann_hilton.comm_monoid`. -/
theorem one : eโ = eโ := by
simpa only [hโ.left_id, hโ.right_id, hโ.left_id, hโ.right_id] using Distrib eโ eโ eโ eโ
#align eckmann_hilton.one EckmannHilton.one
-/
#print EckmannHilton.mul /-
/-- If a type carries two unital binary operations that distribute over each other,
then these operations are equal.
In fact, they give a commutative monoid structure, see `eckmann_hilton.comm_monoid`. -/
theorem mul : mโ = mโ := by
funext a b
calc
mโ a b = mโ (mโ a eโ) (mโ eโ b) := by
simp only [one hโ hโ Distrib, hโ.left_id, hโ.right_id, hโ.left_id, hโ.right_id]
_ = mโ a b := by simp only [Distrib, hโ.left_id, hโ.right_id, hโ.left_id, hโ.right_id]
#align eckmann_hilton.mul EckmannHilton.mul
-/
#print EckmannHilton.mul_comm /-
/-- If a type carries two unital binary operations that distribute over each other,
then these operations are commutative.
In fact, they give a commutative monoid structure, see `eckmann_hilton.comm_monoid`. -/
theorem mul_comm : IsCommutative _ mโ :=
โจfun a b => by simpa [mul hโ hโ Distrib, hโ.left_id, hโ.right_id] using Distrib eโ a b eโโฉ
#align eckmann_hilton.mul_comm EckmannHilton.mul_comm
-/
#print EckmannHilton.mul_assoc /-
/-- If a type carries two unital binary operations that distribute over each other,
then these operations are associative.
In fact, they give a commutative monoid structure, see `eckmann_hilton.comm_monoid`. -/
theorem mul_assoc : IsAssociative _ mโ :=
โจfun a b c => by simpa [mul hโ hโ Distrib, hโ.left_id, hโ.right_id] using Distrib a b eโ cโฉ
#align eckmann_hilton.mul_assoc EckmannHilton.mul_assoc
-/
omit hโ hโ distrib
/- warning: eckmann_hilton.comm_monoid -> EckmannHilton.commMonoid is a dubious translation:
lean 3 declaration is
forall {X : Type.{u1}} {mโ : X -> X -> X} {eโ : X}, (EckmannHilton.IsUnital.{u1} X mโ eโ) -> (forall [h : MulOneClass.{u1} X], (forall (a : X) (b : X) (c : X) (d : X), Eq.{succ u1} X (mโ (HMul.hMul.{u1, u1, u1} X X X (instHMul.{u1} X (MulOneClass.toHasMul.{u1} X h)) a b) (HMul.hMul.{u1, u1, u1} X X X (instHMul.{u1} X (MulOneClass.toHasMul.{u1} X h)) c d)) (HMul.hMul.{u1, u1, u1} X X X (instHMul.{u1} X (MulOneClass.toHasMul.{u1} X h)) (mโ a c) (mโ b d))) -> (CommMonoid.{u1} X))
but is expected to have type
forall {X : Type.{u1}} {mโ : X -> X -> X} {eโ : X}, (EckmannHilton.IsUnital.{u1} X mโ eโ) -> (forall [h : MulOneClass.{u1} X], (forall (a : X) (b : X) (c : X) (d : X), Eq.{succ u1} X (mโ (HMul.hMul.{u1, u1, u1} X X X (instHMul.{u1} X (MulOneClass.toMul.{u1} X h)) a b) (HMul.hMul.{u1, u1, u1} X X X (instHMul.{u1} X (MulOneClass.toMul.{u1} X h)) c d)) (HMul.hMul.{u1, u1, u1} X X X (instHMul.{u1} X (MulOneClass.toMul.{u1} X h)) (mโ a c) (mโ b d))) -> (CommMonoid.{u1} X))
Case conversion may be inaccurate. Consider using '#align eckmann_hilton.comm_monoid EckmannHilton.commMonoidโ'. -/
/-- If a type carries a unital magma structure that distributes over a unital binary
operations, then the magma structure is a commutative monoid. -/
@[reducible,
to_additive
"If a type carries a unital additive magma structure that distributes over\na unital binary operations, then the additive magma structure is a commutative additive monoid."]
def commMonoid [h : MulOneClass X]
(distrib : โ a b c d, ((a * b) <mโ> c * d) = (a <mโ> c) * b <mโ> d) : CommMonoid X :=
{ h with
mul := (ยท * ยท)
one := 1
mul_comm := (mul_comm hโ MulOneClass.isUnital Distrib).comm
mul_assoc := (mul_assoc hโ MulOneClass.isUnital Distrib).and_assoc }
#align eckmann_hilton.comm_monoid EckmannHilton.commMonoid
#align eckmann_hilton.add_comm_monoid EckmannHilton.addCommMonoid
/- warning: eckmann_hilton.comm_group -> EckmannHilton.commGroup is a dubious translation:
lean 3 declaration is
forall {X : Type.{u1}} {mโ : X -> X -> X} {eโ : X}, (EckmannHilton.IsUnital.{u1} X mโ eโ) -> (forall [G : Group.{u1} X], (forall (a : X) (b : X) (c : X) (d : X), Eq.{succ u1} X (mโ (HMul.hMul.{u1, u1, u1} X X X (instHMul.{u1} X (MulOneClass.toHasMul.{u1} X (Monoid.toMulOneClass.{u1} X (DivInvMonoid.toMonoid.{u1} X (Group.toDivInvMonoid.{u1} X G))))) a b) (HMul.hMul.{u1, u1, u1} X X X (instHMul.{u1} X (MulOneClass.toHasMul.{u1} X (Monoid.toMulOneClass.{u1} X (DivInvMonoid.toMonoid.{u1} X (Group.toDivInvMonoid.{u1} X G))))) c d)) (HMul.hMul.{u1, u1, u1} X X X (instHMul.{u1} X (MulOneClass.toHasMul.{u1} X (Monoid.toMulOneClass.{u1} X (DivInvMonoid.toMonoid.{u1} X (Group.toDivInvMonoid.{u1} X G))))) (mโ a c) (mโ b d))) -> (CommGroup.{u1} X))
but is expected to have type
forall {X : Type.{u1}} {mโ : X -> X -> X} {eโ : X}, (EckmannHilton.IsUnital.{u1} X mโ eโ) -> (forall [G : Group.{u1} X], (forall (a : X) (b : X) (c : X) (d : X), Eq.{succ u1} X (mโ (HMul.hMul.{u1, u1, u1} X X X (instHMul.{u1} X (MulOneClass.toMul.{u1} X (Monoid.toMulOneClass.{u1} X (DivInvMonoid.toMonoid.{u1} X (Group.toDivInvMonoid.{u1} X G))))) a b) (HMul.hMul.{u1, u1, u1} X X X (instHMul.{u1} X (MulOneClass.toMul.{u1} X (Monoid.toMulOneClass.{u1} X (DivInvMonoid.toMonoid.{u1} X (Group.toDivInvMonoid.{u1} X G))))) c d)) (HMul.hMul.{u1, u1, u1} X X X (instHMul.{u1} X (MulOneClass.toMul.{u1} X (Monoid.toMulOneClass.{u1} X (DivInvMonoid.toMonoid.{u1} X (Group.toDivInvMonoid.{u1} X G))))) (mโ a c) (mโ b d))) -> (CommGroup.{u1} X))
Case conversion may be inaccurate. Consider using '#align eckmann_hilton.comm_group EckmannHilton.commGroupโ'. -/
/-- If a type carries a group structure that distributes over a unital binary operation,
then the group is commutative. -/
@[reducible,
to_additive
"If a type carries an additive group structure that\ndistributes over a unital binary operation, then the additive group is commutative."]
def commGroup [G : Group X] (distrib : โ a b c d, ((a * b) <mโ> c * d) = (a <mโ> c) * b <mโ> d) :
CommGroup X :=
{ EckmannHilton.commMonoid hโ Distrib, G with }
#align eckmann_hilton.comm_group EckmannHilton.commGroup
#align eckmann_hilton.add_comm_group EckmannHilton.addCommGroup
end EckmannHilton
|
This week in Washington, Cliff Floyd is expecting the best โ but bracing for the worst.
โIโm expecting good, clean baseball,โ said Floyd, who was 3-for-18 with six strikeouts against Dontrelle Willis and was given yesterday off. โI think if Pedro [Martinez] hits [Jose Guillen], weโre going to fight.
Floyd has never understood that when a pitcher hits a position player, a position player on another team is targeted.
He said heโd be seething if his team had been hit six times in a series, as the Nationals were.
โBut my point over there is, hit the guy whoโs throwing,โ Floyd said. โHeโs going to pitch again, sooner or later.
The Mets left fielder nearly charged the mound against Houstonโs Roy Oswalt last season, and he acknowledged talking about retaliating (as he did at the time) is nonsense that must stop.
Batting .200 (3-for-15) this season, Floyd worked on his mechanics in the indoor cage before the 3-2 victory over Florida.
The Mets realigned their rotation after Saturdayโs rainout, and Victor Zambrano will start Thursday in Washington. Zambrano (strained left hamstring) was originally scheduled to start yesterday.
Brian Bannister and Pedro Martinez will stay on turn and pitch Tuesday and Wednesday. Tom Glavine and Steve Trachsel will flip-flop and work on Friday and Saturday so Glavine can stay on turn.
Carlos Delgado was inconspicuous during the seventh-inning rendition of โGod Bless America,โ when a handful of his teammates stood on the top step or above it. . . . Saturdayโs rain washed away starts for Chris Woodward and Ramon Castro. Yesterday, Paul Lo Duca and Anderson Hernandez were back at catcher and second base, respectively. โWeโve got an off-day [today], and I didnโt want guys playing every day to get three days off,โ Willie Randolph said. โThatโs just the way it works out.โ The Mets play day games after night games on Thursday and Saturday, so Castro should start a couple of games. Woodward pinch-hit in the sixth and whiffed against Willis. |
From events, to meetings, to gatherings, it's a chic, unique, stylish piece that stands out and makes a statement....just like you.
Each time you use it, remember who you are, what you stand for and be prepared to share YOUR definition!
Your body is your temple. Treat it well and feel good about how you fuel it.
I AM FemFab Necklace - Limited Edition - SOLD OUT! |
/*
* Copyright 2019 Tomasz bla Fortuna. All rights reserved.
* License: MIT
*/
#ifndef _UTILS_H_
#define _UTILS_H_
#include <chrono>
#include <iostream>
#include <fstream>
#include <boost/algorithm/string.hpp>
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
/** Measure execution time of a lambda */
template<typename Fn>
int measure(std::string desc, Fn execute) {
using std::chrono::system_clock;
auto start = system_clock::now();
execute();
auto took = system_clock::now() - start;
if (desc.length() > 0) {
const double s = took.count() / 1e9;
if (s < 1) {
std::cout << desc << " took " << s * 1000 << "ms"
<< std::endl;
} else {
std::cout << desc << " took " << s << "s"
<< std::endl;
}
}
return took.count();
}
/** Fill structure with data and measure time */
template<typename T, typename D>
void test_generation(const std::string &name, T &algo, const D &data) {
measure(name + " generation",
[&algo, &data] () {
int id = 0;
for (auto &item: data) {
algo.add(item, id);
/* Simulate a client ID / ptr */
id++;
}
});
}
/** Query structure for a given IP, possibly mutate IPs between queries */
template<typename T, typename Fn>
void test_query(const std::string &name, T &algo,
Fn mutate_ip,
const int tests = 5000000) {
int found = 0, nx = 0;
auto took = measure("",
[&] () {
for (int i = 0; i < tests; i++) {
const auto test_ip = mutate_ip(i);
const int ret = algo.query(test_ip);
if (ret == -1) {
nx += 1;
} else {
found += 1;
}
}
});
const double per_s = tests / (took / 1e9);
const double ns_per_q = 1.0 * took / tests;
std::cout
<< name << " finished:" << std::endl
<< " found=" << 100.0 * found / (found + nx) << "%"
<< " (" << found << " / " << nx << ")" << std::endl
<< " queries " << tests << " in " << took / 1e9 << "s -> "
<< per_s / 1e6 << " Mq/s; "
<< ns_per_q << " ns/q"
<< std::endl;
}
/** Convert dot-ipv4 notation to network-byte-order binary */
uint32_t ip_to_hl(const std::string &addr) {
in_addr ip_parsed;
int ret = inet_aton(addr.c_str(), &ip_parsed);
if (ret == 0)
throw std::exception();
uint32_t ip_network = ntohl(ip_parsed.s_addr);
return ip_network;
}
/** Parse IP / mask */
template<typename K>
void ip_from_string(const std::string &addr_mask, K &ip_n, int &mask_n) {
constexpr static int BITS_TOTAL = (8 * sizeof(K));
std::string addr;
size_t found = addr_mask.find("/");
if (found == std::string::npos) {
mask_n = -1;
addr = addr_mask;
} else {
addr = addr_mask.substr(0, found);
std::string mask_s = addr_mask.substr(found + 1, addr_mask.size());
mask_n = std::stoi(mask_s);
}
if constexpr (BITS_TOTAL == 32) {
in_addr ip_parsed;
int ret = inet_pton(AF_INET, addr.c_str(), &ip_parsed);
if (ret == 0)
throw std::runtime_error("Unable to parse IPv4 address");
ip_n = ntohl(ip_parsed.s_addr);
} else if constexpr (BITS_TOTAL == 128) {
in6_addr ip_parsed;
int ret = inet_pton(AF_INET6, addr.c_str(), &ip_parsed);
if (ret == 0)
throw std::runtime_error("Unable to parse IPv6 address");
/* Convert IPv6 to host order, so that bitshifts work ok */
ip_n = 0;
for (int i=0; i<16; i++) {
ip_n |= ((K)ip_parsed.s6_addr[i]) << (120 - 8*i);
}
} else {
throw std::runtime_error("IP Address of unknown lenght");
}
}
void show_mem_usage(bool quiet = false)
{
const std::string prefix = "VmRSS";
std::ifstream status("/proc/self/status");
std::string buffer;
std::vector<std::string> columns;
static int last_rss_kb = -1;
while (std::getline(status, buffer)) {
if (boost::starts_with(buffer, prefix)) {
boost::split(columns, buffer, boost::is_any_of(" "),
boost::algorithm::token_compress_on);
assert(columns.size() >= 2);
const int rss_kb = atoi(columns[1].c_str());
if (not quiet) {
std::cout << "-> Process RSS: " << rss_kb << "kB;";
if (last_rss_kb != -1) {
std::cout << " difference: " << rss_kb - last_rss_kb << "kB";
}
std::cout << std::endl;
}
last_rss_kb = rss_kb;
break;
}
}
}
uint32_t fastrand(void) {
static unsigned long next = 1;
next = next * 1103515245 + 12345;
return((unsigned)(next/65536) % RAND_MAX);
}
/** Using random input data (networks) generate random query data (IPs). */
std::vector<uint32_t> get_rnd_test_data(std::vector<std::string> &input_data,
int count=5000000) {
const int input_len = input_data.size();
std::vector<uint32_t> data;
std::string addr;
for (int i = 0; i < count; i++) {
auto addr_mask = input_data[fastrand() % input_len];
uint32_t netip;
int mask_n;
ip_from_string<uint32_t>(addr_mask, netip, mask_n);
assert(mask_n != -1);
uint32_t mask = 0xffffffff << (32 - mask_n);
uint32_t host_rnd = fastrand() & ~mask;
uint32_t rnd_ip = netip | host_rnd;
data.push_back(rnd_ip);
}
return data;
};
/** Load subnets from file and sort them by mask */
std::vector<std::string> load_test_data(const std::string &path) {
std::ifstream in(path);
std::string line;
std::vector<std::string> addresses;
while (getline(in, line)) {
addresses.push_back(line);
}
/* Sort by mask */
std::sort(addresses.begin(),
addresses.end(),
[](const std::string &a, const std::string &b) {
int mask_a = 0, mask_b = 0;
size_t found = a.find("/");
assert(found != std::string::npos);
std::from_chars(a.data() + found+1, a.data() + a.size(),
mask_a);
found = b.find("/");
std::from_chars(b.data() + found+1, b.data() + b.size(),
mask_b);
return mask_a < mask_b;
});
return addresses;
}
#endif
|
(*
Copyright (C) 2020 Susi Lehtola
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
*)
(* type: mgga_exc *)
(* prefix:
mgga_x_r2scan_params *params;
assert(p->params != NULL);
params = (mgga_x_r2scan_params * )(p->params);
*)
$include "mgga_x_rscan.mpl"
$include "mgga_x_scan.mpl"
(* eqn S6 *)
r2scan_alpha := (x, t) -> (t - x^2/8)/(K_FACTOR_C + params_a_eta*x^2/8):
(* f(alpha) replaced with a polynomial for alpha in [0, 2.5], eqn S7 *)
r2scan_f_alpha_neg := a -> exp(-params_a_c1*a/(1 - a)):
r2scan_f_alpha := (a, ff) -> my_piecewise5(a <= 0, r2scan_f_alpha_neg(m_min(a, 0)), a <= 2.5, rscan_f_alpha_small(m_min(a, 2.5), ff), rscan_f_alpha_large(m_max(a, 2.5))):
(* eqn S11 *)
Cn := 20/27 + params_a_eta*5/3:
(* eqn S12 *)
C2 := ff -> -add(i*ff[9-i], i=1..8) * (1-scan_h0x):
(* eqn S10; this is analogous to scan_y *)
r2scan_x := (p, ff) -> (Cn*C2(ff)*exp(-p^2/params_a_dp2^4)+MU_GE)*p:
r2scan_f := (x, u, t) -> (scan_h1x(r2scan_x(scan_p(x), rscan_fx)) + r2scan_f_alpha(r2scan_alpha(x, t), rscan_fx) * (scan_h0x - scan_h1x(r2scan_x(scan_p(x), rscan_fx))))*scan_gx(x):
f := (rs, z, xt, xs0, xs1, u0, u1, t0, t1) -> mgga_exchange(r2scan_f, rs, z, xs0, xs1, u0, u1, t0, t1):
|
\problemname{Hello World!}
\section*{Input}
There is no input for this problem.
\section*{Output}
Output should contain one line, containing the string ``Hello World!''.
|
module Extra.File
import System.File
||| `fputc` in C
%foreign "C:fputc,libc"
export
prim__fputc : Int -> FilePtr -> PrimIO Int
||| `fputc` with higher level primitives in idris2
export
fputc : HasIO io => Bits8 -> File -> io (Either FileError ())
fputc b (FHandle ptr) = do
let c = cast b
c' <- primIO $ prim__fputc c ptr
pure $ if c' == c then Right () else Left FileWriteError
|
using JuliaInterpreter
using Test
@testset "core" begin
@test JuliaInterpreter.is_quoted_type(QuoteNode(Int32), :Int32)
@test !JuliaInterpreter.is_quoted_type(QuoteNode(Int32), :Int64)
@test !JuliaInterpreter.is_quoted_type(QuoteNode(Int32(0)), :Int32)
@test !JuliaInterpreter.is_quoted_type(Int32, :Int32)
end
|
# Create actors and reviews
actors_vector <- c("Jack Nicholson","Shelley Duvall","Danny Lloyd","Scatman Crothers","Barry Nelson")
reviews_factor <- factor(c("Good", "OK", "Good", "Perfect", "Bad", "Perfect", "Good"),
ordered = TRUE, levels = c("Bad", "OK", "Good", "Perfect"))
# Create shining_list
shining_list <- list(title="The Shining", actors=actors_vector, reviews=reviews_factor)
print(shining_list)
str(shining_list)
# Add both the year and director to shining_list: shining_list_ext
shining_list_ext <- c(shining_list, year=1980, director="Stanley Kubrick")
# Have a look at the structure of shining_list_ext
str(shining_list_ext) |
module Window (
hamming
, hammingC
, c
, Window
, CWindow
) where
import Data.Vector
import qualified Data.Complex as C
type Window = Vector Double -- Windowing function
type CWindow = Vector (C.Complex Double) -- Windowing function
-- quick conversion to complex
c :: Num a => a -> C.Complex a
c = (C.:+ 0)
-- A hamming window of size n.
hamming :: Int -> Window
hamming m = generate m hamming' where
hamming' n = 0.54 - 0.46*cos(2 * pi * (fromIntegral n)/(fromIntegral m-1))
-- A hamming window of type Complex Double of size n
hammingC :: Int -> CWindow
hammingC m = generate m hamming' where
hamming' n = c $ 0.54 - 0.46*cos(2 * pi * (fromIntegral n)/(fromIntegral m-1))
|
{-# OPTIONS --cubical --safe #-}
module Data.Empty.Base where
open import Cubical.Data.Empty using (โฅ; โฅ-elim; isPropโฅ) public
open import Level
infix 4.5 ยฌ_
ยฌ_ : Type a โ Type a
ยฌ A = A โ โฅ
|
theory Memdata
imports Main Integers Values AST Archi
begin
(** In-memory representation of values. *)
section \<open>Properties of memory chunks\<close>
(** Memory reads and writes are performed by quantities called memory chunks,
encoding the type, size and signedness of the chunk being addressed.
The following functions extract the size information from a chunk. *)
fun size_chunk_nat :: "memory_chunk \<Rightarrow> nat" where
"size_chunk_nat Mint8signed = 1"
| "size_chunk_nat Mint8unsigned = 1"
| "size_chunk_nat Mint16signed = 2"
| "size_chunk_nat Mint16unsigned = 2"
| "size_chunk_nat Mint32 = 4"
| "size_chunk_nat Mint64 = 8"
| "size_chunk_nat Mfloat32 = 4"
| "size_chunk_nat Mfloat64 = 8"
| "size_chunk_nat Many32 = 4"
| "size_chunk_nat Many64 = 8"
lemma size_chunk_nat_pos[iff]: "0 < size_chunk_nat chunk"
by (cases chunk) auto
fun size_chunk :: "memory_chunk \<Rightarrow> Z" where
"size_chunk chunk = int (size_chunk_nat chunk)"
(** Memory reads and writes must respect alignment constraints:
the byte offset of the location being addressed should be an exact
multiple of the natural alignment for the chunk being addressed.
This natural alignment is defined by the following
[align_chunk] function. Some target architectures
(e.g. PowerPC and x86) have no alignment constraints, which we could
reflect by taking [align_chunk chunk = 1]. However, other architectures
have stronger alignment requirements. The following definition is
appropriate for PowerPC, ARM and x86. *)
fun align_chunk :: "memory_chunk \<Rightarrow> Z" where
"align_chunk Mint8signed = 1"
| "align_chunk Mint8unsigned = 1"
| "align_chunk Mint16signed = 2"
| "align_chunk Mint16unsigned = 2"
| "align_chunk Mint32 = 4"
| "align_chunk Mint64 = 8"
| "align_chunk Mfloat32 = 4"
| "align_chunk Mfloat64 = 4"
| "align_chunk Many32 = 4"
| "align_chunk Many64 = 4"
lemma align_dvd_size[iff]: "align_chunk chunk dvd int (size_chunk_nat chunk)"
apply (cases chunk)
by auto
lemma align_dvd_size_mult: "align_chunk chunk dvd i*size_chunk chunk"
apply (cases chunk)
by auto
datatype quantity = Q32 | Q64
fun size_quantity_nat :: "quantity \<Rightarrow> nat" where
"size_quantity_nat Q32 = 4" | "size_quantity_nat Q64 = 8"
lemma [iff]: "(0::nat) < size_quantity_nat q"
apply (cases q)
by auto
lemma size_quantity_nat_set: "size_quantity_nat q \<in> {4,8}"
apply (cases q)
by auto
section \<open>Memory values\<close>
(** A ``memory value'' is a byte-sized quantity that describes the current
content of a memory cell. It can be either:
- a concrete 8-bit integer;
- a byte-sized fragment of an opaque value;
- the special constant [Undef] that represents uninitialized memory.
*)
(** Values stored in memory cells. *)
datatype memval =
Undef
| Byte m_byte
| Fragment val quantity nat
subsection \<open>Encoding and decoding integers\<close>
(** We define functions to convert between integers and lists of bytes
of a given length *)
fun bytes_of_int :: "nat \<Rightarrow> int \<Rightarrow> (m_byte list)" where
"bytes_of_int 0 _ = []"
| "bytes_of_int (Suc m) x = Byte.repr x # bytes_of_int m (x div 256)"
fun int_of_bytes :: "(m_byte list) \<Rightarrow> int" where
"int_of_bytes [] = 0"
| "int_of_bytes (b # l') = Byte.unsigned b + int_of_bytes l' * 256"
lemma int_to_bytes_to_int[simp]: "int_of_bytes (bytes_of_int n x) = (x mod (2 ^ (8 * n)))"
proof (induction n x rule: bytes_of_int.induct)
case (1 uu)
then show ?case
by auto
next
case (2 m x)
note steps =
mod_div_mult_eq[of "x mod 2 ^ (8 + 8 * m)" "256", symmetric]
mod_exp_eq[of x "8 + 8 * m" "8", simplified]
div_exp_mod_exp_eq[of x 8 "8 * m", simplified]
show ?case
apply (simp add: 2)
apply (subst steps(1))
apply (subst steps(2))
apply (subst steps(3))
by (simp add: take_bit_eq_mod)
qed
lemma length_bytes_of_int[simp]: "length (bytes_of_int sz i) = sz"
apply (induction sz i rule: bytes_of_int.induct)
by (auto)
lemma bytes_of_int_truncate: "x = (2 ^ (8*sz)) \<Longrightarrow> bytes_of_int sz (i mod x) = bytes_of_int sz i"
apply (induction sz i arbitrary: x rule: bytes_of_int.induct)
apply (auto simp add: Byte.repr_mod)
subgoal for m x
using div_exp_mod_exp_eq[of x 8 "8*m"]
by auto
done
declare bytes_of_int.simps[simp del] int_of_bytes.simps[simp del]
definition rev_if_be :: "(m_byte list) \<Rightarrow> (m_byte list)" where
"rev_if_be l = (if Archi.big_endian then List.rev l else l)"
(* only valid for 64bit *)
definition encode_int :: "nat \<Rightarrow> int \<Rightarrow> (m_byte list)" where
"encode_int sz x = rev_if_be (bytes_of_int sz x)"
lemma encode_int_length[simp]:
"length (encode_int sz x) = sz"
unfolding encode_int_def by simp
lemma encode_int_truncate: "x = (2 ^ (8*sz)) \<Longrightarrow> encode_int sz (i mod x) = encode_int sz i"
unfolding encode_int_def
using bytes_of_int_truncate by simp
lemma encode_int_truncate':
assumes "LENGTH('w::len) = 8*sz"
shows "(encode_int sz (uint (UCAST('x::len \<rightarrow> 'w) i))) = (encode_int sz (uint i))"
apply (simp add: unsigned_ucast_eq take_bit_eq_mod)
using assms(1) encode_int_truncate
by simp
definition decode_int :: "(m_byte list) \<Rightarrow> int" where
"decode_int b = int_of_bytes (rev_if_be b)"
lemma encode_decode_int[simp]: "decode_int (encode_int sz x) = (x mod (2 ^ (8 * sz)))"
unfolding encode_int_def decode_int_def
using int_to_bytes_to_int
by auto
subsection \<open>Encoding and decoding values\<close>
definition inj_bytes :: "(m_byte list) \<Rightarrow> (memval list)" where
"inj_bytes bl = List.map Byte bl"
lemma inj_bytes_length[simp]:
"length (inj_bytes bl) = length bl"
unfolding inj_bytes_def by simp
lemma inj_bytes_valid[iff]:
"Undef \<notin> set (inj_bytes bl)"
unfolding inj_bytes_def
by auto
fun proj_bytes :: "(memval list) \<Rightarrow> ((m_byte list) option)" where
"proj_bytes [] = Some []"
| "proj_bytes (Byte b # vl) = (case proj_bytes vl of None \<Rightarrow> None | Some bl \<Rightarrow> Some(b # bl))"
| "proj_bytes _ = None"
lemma proj_inj_bytes[simp]: "proj_bytes (inj_bytes bs) = Some bs"
unfolding inj_bytes_def
by (induction bs) auto
lemma proj_bytes_undef[simp]: "proj_bytes (replicate l Undef) = None" if "l > 0"
using that
by (cases l) auto
fun inj_value_rec :: "nat \<Rightarrow> val \<Rightarrow> quantity \<Rightarrow> (memval list)" where
"inj_value_rec 0 v q = []"
| "inj_value_rec (Suc m) v q = Fragment v q m # inj_value_rec m v q"
lemma inj_value_rec_n: "n > 0 \<Longrightarrow> inj_value_rec n v q = Fragment v q (n-1) # inj_value_rec (n-1) v q"
by (metis Suc_diff_1 inj_value_rec.simps(2))
lemma inj_value_rec_length[simp]: "length (inj_value_rec s v q) = s"
apply (induction s)
by auto
lemma inj_value_rec_valid[iff]:
"Undef \<notin> set (inj_value_rec s v q)"
apply (induction s)
by auto
definition inj_value :: "quantity \<Rightarrow> val \<Rightarrow> (memval list)" where
"inj_value q v = inj_value_rec (size_quantity_nat q) v q"
lemma inj_value_first: "inj_value q v = Fragment v q (size_quantity_nat q - 1) # inj_value_rec (size_quantity_nat q - 1) v q"
unfolding inj_value_def
using inj_value_rec_n by simp
lemma inj_value_length[simp]: "length (inj_value q v) = size_quantity_nat q"
unfolding inj_value_def
by simp
lemma inj_value_valid[iff]:
"Undef \<notin> set (inj_value q v)"
unfolding inj_value_def
by simp
fun check_value :: "nat \<Rightarrow> val \<Rightarrow> quantity \<Rightarrow> (memval list) \<Rightarrow> bool" where
"check_value 0 v q [] = True"
| "check_value (Suc m) v q (Fragment v' q' m' # vl') =
(v = v' \<and> q = q' \<and> m = m' \<and> check_value m v q vl')"
| "check_value _ v q _ = False"
lemma check_inj_value[simp]: "check_value sz v q (inj_value_rec sz v q) = True"
apply (induction sz v q rule: inj_value_rec.induct)
by auto
definition proj_value :: "quantity \<Rightarrow> (memval list) \<Rightarrow> val" where
"proj_value q vl = (case vl of
(Fragment v q' n # vl') \<Rightarrow> (if check_value (size_quantity_nat q) v q vl then v else Vundef)
| _ \<Rightarrow> Vundef)"
lemma proj_inj_value[simp]: "proj_value q (inj_value q v) = v"
unfolding inj_value_def proj_value_def
apply (subst (2) inj_value_rec_n)
apply (cases q)
by auto
lemma proj_bytes_inj_value[simp]: "proj_bytes (inj_value v q) = None"
unfolding inj_value_def
apply (subst inj_value_rec_n)
by auto
lemma proj_value_undef[simp]: "proj_value x (Undef # l) = Vundef"
unfolding proj_value_def by simp
lemma proj_value_undef'[simp]: "proj_value q (replicate l Undef) = Vundef"
unfolding proj_value_def
by (cases l) auto
fun encode_val :: "memory_chunk \<Rightarrow> val \<Rightarrow> (memval list)" where
"encode_val chunk v =
(case (v, chunk) of
(Vint n, Mint8signed) \<Rightarrow> inj_bytes (encode_int 1 (uint n))
| (Vint n, Mint8unsigned) \<Rightarrow> inj_bytes (encode_int 1 (uint n))
| (Vint n, Mint16signed) \<Rightarrow> inj_bytes (encode_int 2 (uint n))
| (Vint n, Mint16unsigned) \<Rightarrow> inj_bytes (encode_int 2 (uint n))
| (Vint n, Mint32) \<Rightarrow> inj_bytes (encode_int 4 (uint n))
| (Vptr b ofs, Mint32) \<Rightarrow> if Archi.ptr64 then replicate 4 Undef else inj_value Q32 v
| (Vlong n, Mint64) \<Rightarrow> inj_bytes (encode_int 8 (uint n))
| (Vptr b ofs, Mint64) \<Rightarrow> if Archi.ptr64 then inj_value Q64 v else replicate 8 Undef
| (Vsingle n, Mfloat32) \<Rightarrow> inj_bytes (encode_int 4 (uint (fp32_of_float n)))
| (Vfloat n, Mfloat64) \<Rightarrow> inj_bytes (encode_int 8 (uint (fp64_of_float n)))
| (_, Many32) \<Rightarrow> inj_value Q32 v
| (_, Many64) \<Rightarrow> inj_value Q64 v
| (_, _) \<Rightarrow> replicate (size_chunk_nat chunk) Undef
)"
lemma encode_val_length[simp]:
"length (encode_val chunk v) = size_chunk_nat chunk"
apply (cases chunk; cases v)
by auto
lemma encode_val_length'[simp]: "int (length (encode_val chunk v)) = size_chunk chunk"
using encode_val_length by auto
fun decode_val :: "memory_chunk \<Rightarrow> (memval list) \<Rightarrow> val" where
"decode_val chunk vl =
(case proj_bytes vl of
Some bl \<Rightarrow>
(case chunk of
Mint8signed \<Rightarrow> Vint(scast (Byte.repr (decode_int bl)))
| Mint8unsigned \<Rightarrow> Vint(ucast (Byte.repr (decode_int bl)))
| Mint16signed \<Rightarrow> Vint(scast (Short.repr (decode_int bl)))
| Mint16unsigned \<Rightarrow> Vint(ucast (Short.repr (decode_int bl)))
| Mint32 \<Rightarrow> Vint(Int.repr(decode_int bl))
| Mint64 \<Rightarrow> Vlong(Int64.repr(decode_int bl))
| Mfloat32 \<Rightarrow> Vsingle(Float32_repr (decode_int bl))
| Mfloat64 \<Rightarrow> Vfloat(Float64_repr (decode_int bl))
| Many32 \<Rightarrow> Vundef
| Many64 \<Rightarrow> Vundef
)
| None \<Rightarrow>
(case chunk of
Mint32 \<Rightarrow> if Archi.ptr64 then Vundef else Val.load_result chunk (proj_value Q32 vl)
| Many32 \<Rightarrow> Val.load_result chunk (proj_value Q32 vl)
| Mint64 \<Rightarrow> if Archi.ptr64 then Val.load_result chunk (proj_value Q64 vl) else Vundef
| Many64 \<Rightarrow> Val.load_result chunk (proj_value Q64 vl)
| _ \<Rightarrow> Vundef
)
)"
lemma load_result[simp]:
"decode_val chunk (encode_val chunk v) = Val.load_result chunk v"
apply (cases chunk; cases v)
by (auto)
section \<open>Compatibility with memory injections\<close>
(** Relating two memory values according to a memory injection. *)
inductive memval_inject :: "meminj \<Rightarrow> memval \<Rightarrow> memval \<Rightarrow> bool" where
memval_inject_byte:
"memval_inject f (Byte n) (Byte n)"
| memval_inject_frag:
"Val.inject f v1 v2 \<Longrightarrow>
memval_inject f (Fragment v1 q n) (Fragment v2 q n)"
| memval_inject_undef:
"memval_inject f Undef mv"
lemmas encode_simps = encode_val.simps decode_val.simps
declare encode_simps[simp del]
end |
@testset "Pzero" begin
Random.seed!(0)
nsamps = 50000
@testset "same dist" begin
log_bf = cdd.compute_log_bf(Pzero(NC=10000, NT=10000, QC=1000, QT=1000), nsamps)
@test abs(log_bf) < 2
end
@testset "diff dist" begin
log_bf = cdd.compute_log_bf(Pzero(NC=10000, NT=10000, QC=9000, QT=1000), nsamps)
@test abs(log_bf) > 10
end
end
|
\section{Implementation}
%While there are existing bindings to these socket protocols for languages such as python~\cite{snakeoil,pyscrc}, we present the first such binding for the functional programming language Haskell.
|
[STATEMENT]
lemma infty_inf_unbox:
"Num a \<noteq> top"
"top \<noteq> Num a"
"Infty = top"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Num a \<noteq> top &&& top \<noteq> Num a &&& Infty = top
[PROOF STEP]
by (auto simp add: top_infty_def) |
module Libra.Constants
import Data.Vect
%access export
%default total
{-
Constants
-}
||| Lowercase and uppercase alphabetic characters
Alphabet : Vect 52 Char
Alphabet = [ 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',
'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M',
'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z' ]
||| Numerical characters from 0 through 9
Digits : Vect 10 Char
Digits = [ '0', '1', '2', '3', '4', '5', '6', '7', '8', '9' ]
||| Alphanumerical characters corresponding to: [a-zA-Z0-9]
Alphanumeric : Vect 62 Char
Alphanumeric = Alphabet ++ Digits
|
import data.matrix.notation
import .short_exact_sequence
noncomputable theory
open category_theory
open category_theory.limits
universes v u
namespace homological_complex
variables (C : Type u) [category.{v} C] [abelian C]
variables {ฮน : Type*} {c : complex_shape ฮน}
abbreviation Fst : chain_complex (short_exact_sequence C) โ โฅค
homological_complex C (complex_shape.down โ) :=
(short_exact_sequence.Fst C).map_homological_complex _
abbreviation Snd : chain_complex (short_exact_sequence C) โ โฅค
homological_complex C (complex_shape.down โ) :=
(short_exact_sequence.Snd C).map_homological_complex _
abbreviation Trd : chain_complex (short_exact_sequence C) โ โฅค
homological_complex C (complex_shape.down โ) :=
(short_exact_sequence.Trd C).map_homological_complex _
abbreviation Fst_Snd : Fst C โถ Snd C :=
nat_trans.map_homological_complex (short_exact_sequence.f_nat C) _
abbreviation Snd_Trd : Snd C โถ Trd C :=
nat_trans.map_homological_complex (short_exact_sequence.g_nat C) _
variables {C}
variables (A : chain_complex (short_exact_sequence C) โ)
instance Fst_Snd_mono (n : โ) : mono (((Fst_Snd C).app A).f n) := (A.X n).mono'
instance Snd_Trd_epi (n : โ) : epi (((Snd_Trd C).app A).f n) := (A.X n).epi'
lemma Fst_Snd_Trd_exact (n : โ) : exact (((Fst_Snd C).app A).f n) (((Snd_Trd C).app A).f n) :=
(A.X n).exact'
end homological_complex |
function X = matrandcong(m,n,gamma)
%MATRANDCONG Create a random matrix with a fixed congruence.
%
% X = MATRANDCONG(M,N,GAMMA) creates a matrix X of size M x N such
% that each column of X has norm 1 and any two columns of X have an inner
% product equal to GAMMA.
%
% Based on code from Evrim Acar and the paper G. Tomasi and R. Bro, A
% comparison of algorithms for fitting the PARAFAC model, Computational
% Statistics & Data Analysis, 50: 1700-1734, 2006.
%
% See also MATRANDORTH, MATRANDNORM, CREATE_PROBLEM, CREATE_GUESS.
%
%MATLAB Tensor Toolbox.
%Copyright 2015, Sandia Corporation.
% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others.
% http://www.sandia.gov/~tgkolda/TensorToolbox.
% Copyright (2015) Sandia Corporation. Under the terms of Contract
% DE-AC04-94AL85000, there is a non-exclusive license for use of this
% work by or on behalf of the U.S. Government. Export of this data may
% require a license from the United States Government.
% The full license terms can be found in the file LICENSE.txt
CG = gamma * ones(n,n) + (1-gamma) * eye(n);
CGR = chol(CG);
X = randn(m,n);
[Q,~] = qr(X,0);
X = Q * CGR;
|
function [Com] = Data_Input(Time,ComandoC,ComandoC0,startCC,ComandoR,ComandoR0,startCR,ComandoS,FreqS,ComandoS0,FreqS0,startCS)
if(Time < startCC)
ComC=ComandoC0;
else
ComC=ComandoC;
end
if(Time < startCR)
ComR=ComandoR0;
startR=0;
K=0;
else
ComR=ComandoR;
startR=startCR;
K=ComandoR0*startCR;
end
if(Time < startCS)
ComS=ComandoS0;
Freq=FreqS0;
T=0;
else
ComS=ComandoS;
Freq=FreqS;
T=startCS;
end
Com=ComC+K+ComR*(Time-startR)+ComS*sin(Freq*(Time-T));
end |
.ds TL "Block Driver"
.NH "Example of a Block Driver"
.PP
This section gives an example driver for a block device:
the \*(CO driver for the AT hard disk.
This driver is described in the article
.B at
in the \*(CO Lexicon.
The source is kept in directory
.BR /etc/conf/at/src/at.c .
.PP
In the following, code appears in monospaced font; comments appear in Roman.
.SH "Preliminaries"
.PP
The following code prefaces the driver.
.Sh "Header Files"
.PP
.B at
uses the following header files.
Because header files have changed drastically for \*(CO 4.2, you should note
carefully the suite included here:
.DM
#include <sys/cmn_err.h>
#include <sys/inline.h>
#include <sys/stat.h>
#include <sys/errno.h>
#include <stdlib.h>
.DE
.DM
#include <kernel/typed.h>
#include <sys/coherent.h>
#include <sys/uproc.h>
#include <sys/fdisk.h>
#include <sys/hdioctl.h>
#include <sys/buf.h>
#include <sys/con.h>
#include <sys/devices.h>
.DE
.Sh "Manifest Constants"
.PP
.B at
uses the following gives manifest constants and macros:
.DM
#define LOCAL
.DE
.DM
.tm 3.0i
#define HDBASE 0x01F0 /* Port base */
#define SOFTLIM 6 /* (7) num of retries before diag */
#define HARDLIM 8 /* number of retries before fail */
#define BADLIM 100 /* num to stop recov if flagged bad */
.DE
.DM
.tm 3.0i
#define BIT(n) (1 << (n))
.DE
.DM
.tm 3.0i
#define CMOSA 0x70 /* write cmos address to this port */
#define CMOSD 0x71 /* read cmos data through this port */
.DE
.DM
.ta 2.0i 3.5i
/*
* I/O Port Addresses
*/
#define DATA_REG (HDBASE + 0) /* data (r/w) */
#define AUX_REG (HDBASE + 1) /* error(r), write precomp cyl/4 (w) */
#define NSEC_REG (HDBASE + 2) /* sector count (r/w) */
#define SEC_REG (HDBASE + 3) /* sector number (r/w) */
#define LCYL_REG (HDBASE + 4) /* low cylinder (r/w) */
#define HCYL_REG (HDBASE + 5) /* high cylinder (r/w) */
#define HDRV_REG (HDBASE + 6) /* drive/head (r/w) (D <<4)+(1 << H) */
#define CSR_REG (HDBASE + 7) /* status (r), command (w) */
#define HF_REG (HDBASE + 0x206) /* Usually 0x3F6 */
.DE
.DM
.ta 2.0i 3.5i
/*
* Error from AUX_REG (r)
*/
#define DAM_ERR BIT(0) /* data address mark not found */
#define TR0_ERR BIT(1) /* track 000 not found */
#define ABT_ERR BIT(2) /* aborted command */
#define ID_ERR BIT(4) /* id not found */
#define ECC_ERR BIT(6) /* data ecc error */
#define BAD_ERR BIT(7) /* bad block detect */
.DE
.DM
.ta 2.0i 3.5i
/*
* Status from CSR_REG (r)
*/
#define ERR_ST BIT(0) /* error occurred */
#define INDEX_ST BIT(1) /* index pulse */
#define SOFT_ST BIT(2) /* soft (corrected) ECC error */
#define DRQ_ST BIT(3) /* data request */
#define SKC_ST BIT(4) /* seek complete */
#define WFLT_ST BIT(5) /* improper drive operation */
#define RDY_ST BIT(6) /* drive is ready */
#define BSY_ST BIT(7) /* controller is busy */
.DE
.DM
.ta 2.0i 3.5i
/*
* Commands to CSR_REG (w)
*/
#define RESTORE(rate) (0x10 +(rate)) /* X */
#define SEEK(rate) (0x70 +(rate)) /* X */
#define READ_CMD (0x20) /* X */
#define WRITE_CMD (0x30) /* X */
#define FORMAT_CMD (0x50) /* X */
#define VERIFY_CMD (0x40) /* X */
#define DIAGNOSE_CMD (0x90) /* X */
#define SETPARM_CMD (0x91) /* X */
.DE
.DM
.ta 2.0i 3.5i
/*
* Device States.
*/
#define SIDLE 0 /* controller idle */
#define SRETRY 1 /* seeking */
#define SREAD 2 /* reading */
#define SWRITE 3 /* writing */
.DE
.Sh "Function Declarations"
.PP
The following declares the functions used in the driver.
.DM
.ta 2.0i 3.5i
/*
* Forward Referenced Functions.
*/
LOCAL void atreset ();
LOCAL int atdequeue ();
LOCAL void atstart ();
LOCAL void atdefer ();
LOCAL int aterror ();
LOCAL void atrecov ();
LOCAL void atdone ();
.DE
.Sh "Macros"
.PP
.B at
uses the following macros:
.DM
.ta 2.0i 3.5i
#define NOTBUSY() ((inb (ATSREG) & BSY_ST) == 0)
#define DATAREQUESTED() ((inb (ATSREG) & DRQ_ST) != 0)
#define ATDRQ() (DATAREQUESTED () ? 1 : atdrq ())
#define ATBSYW(u) (NOTBUSY () ? 1 : myatbsyw (u))
.DE
.Sh "Global and Static Variables"
.PP
.B at
uses the following global and static variables:
.DM
extern typed_space boot_gift;
extern short at_drive_ct;
.DE
The following are used throughout the driver:
.IP \fBATSECS\fR
This is number of seconds to wait for an expected interrupt.
.IP \fBATSREG\fR
This must be 3F6 for most new IDE drives; or
1F7 for Perstor controllers and some old IDE drives.
Either value works with most drives.
.IP \fBatparm\fR
This holds drive parameters.
If initialized to zero,
the driver will try to use values it read from the BIOS during real-mode
startup.
.DM
extern unsigned ATSECS;
extern unsigned ATSREG;
extern struct hdparm_s atparm [];
.DE
.PP
The next line gives the global variable that holds the partition parameters,
as copied from the disk.
There are
.B "N_ATDRV * NPARTN"
positions for the user partitions,
plus
.B N_ATDRV
additional partitions to span each drive.
.PP
When aligning partitions on cylinder boundaries, the
optimal partition size is 14,280 blocks (2 * 3 * 4 * 5 * 7 * 17); whereas
an acceptable partition size is 7,140 blocks (3 * 4 * 5 * 7 * 17).
.DM
static struct fdisk_s pparm [N_ATDRV * NPARTN + N_ATDRV];
.DE
The following structure
.B at
holds information about the disk controller.
There exists one copy of this structure for each AT controller.
.DM
.ta 0.5i 3.5i
static struct at {
BUF *at_actf; /* Link to first */
BUF *at_actl; /* Link to last */
paddr_t at_addr; /* Source/Dest virtual address */
daddr_t at_bno; /* Block # on disk */
unsigned at_nsec; /* # of sectors on current transfer */
unsigned at_drv;
unsigned at_head;
unsigned at_cyl;
unsigned at_sec;
unsigned at_partn;
unsigned char at_dtype [N_ATDRV]; /* drive type, 0 if unused */
unsigned char at_tries;
unsigned char at_state;
unsigned at_totalsec;
} at;
.DE
Finally, this last variable holds the template of the message
to be displayed when an AT drive times out.
.DM
static char timeout_msg [] = "at%d: TO\en";
.DE
.SH "Load Routine"
.PP
.B atload()
is the routine that the kernel executes when this driver is loaded.
Under \*(CO 4.2, it is executed once, when the kernel is booted.
.PP
This function resets the controller, grabs the interrupt vector, and
sets up the drive characteristics.
.DM
LOCAL void
atload ()
{
unsigned int u;
struct hdparm_s * dp;
struct { unsigned short off, seg; } p;
.DE
.DM
if (at_drive_ct <= 0)
return;
/* Flag drives 0, 1 as present or not. */
at.at_dtype [0] = 1;
at.at_dtype [1] = at_drive_ct > 1 ? 1 : 0;
.DE
.DM
.DE
.DM
/* Obtain Drive Characteristics. */
for (u = 0, dp = atparm; u < at_drive_ct; ++ dp, ++ u) {
struct hdparm_s int_dp;
unsigned short ncyl = _CHAR2_TO_USHORT (dp->ncyl);
.DE
.DM
if (ncyl == 0) {
/*
* Not patched.
*
* If tertiary boot sent us parameters,
* Use "fifo" routines to fetch them.
* This only gives us ncyl, nhead, and nspt.
* Make educated guesses for other parameters:
* Set landc to ncyl, wpcc to -1.
* Set ctrl to 0 or 8 depending on head count.
*
* Follow INT 0x41/46 to get drive static BIOS drive
* parameters, if any.
*
* If there were no parameters from tertiary boot,
* or if INT 0x4? nhead and nspt match tboot parms,
* use "INT" parameters (will give better match on
* wpcc, landc, and ctrl fields, which tboot can't
* give us).
*/
.DE
.DM
FIFO * ffp;
typed_space * tp;
int found, parm_int;
.DE
.DM
if (F_NULL != (ffp = fifo_open (& boot_gift, 0))) {
for (found = 0; ! found && (tp = fifo_read (ffp)); ) {
BIOS_DISK * bdp = (BIOS_DISK *)tp->ts_data;
if ((T_BIOS_DISK == tp->ts_type) &&
(u == bdp->dp_drive) ) {
found = 1;
_NUM_TO_CHAR2(dp->ncyl,
bdp->dp_cylinders);
dp->nhead = bdp->dp_heads;
dp->nspt = bdp->dp_sectors;
_NUM_TO_CHAR2(dp->wpcc, 0xffff);
_NUM_TO_CHAR2(dp->landc,
bdp->dp_cylinders);
if (dp->nhead > 8)
dp->ctrl |= 8;
}
}
fifo_close (ffp);
}
.DE
.DM
if (u == 0)
parm_int = 0x41;
else /* (u == 1) */
parm_int = 0x46;
pxcopy ((paddr_t)(parm_int * 4), & p, sizeof p, SEL_386_KD);
pxcopy ((paddr_t)(p.seg <<4L)+ p.off,
& int_dp, sizeof (int_dp), SEL_386_KD);
.DE
.DM
if (! found || (dp->nhead == int_dp.nhead &&
dp->nspt == int_dp.nspt)) {
* dp = int_dp;
printf ("Using INT 0x%x", parm_int);
} else
printf ("Using INT 0x13(08)");
.DE
.DM
} else {
printf ("Using patched");
/*
* Avoid incomplete patching.
*/
if (at.at_dtype [u] == 0)
at.at_dtype [u] = 1;
if (dp->nspt == 0)
dp->nspt = 17;
}
.DE
.DM
#if VERBOSE > 0
printf (" drive %d parameters\en", u);
cmn_err (CE_CONT,
"at%d: ncyl=%d nhead=%d wpcc=%d eccl=%d ctrl=%d landc=%d "
"nspt=%d\en", u, _CHAR2_TO_USHORT (dp->ncyl), dp->nhead,
_CHAR2_TO_USHORT (dp->wpcc), dp->eccl, dp->ctrl,
_CHAR2_TO_USHORT (dp->landc), dp->nspt);
#endif
}
.DE
.DM
/* Initialize Drive Size. */
for (u = 0, dp = atparm; u < at_drive_ct; ++ dp, ++ u) {
if (at.at_dtype [u] == 0)
continue;
.DE
.DM
pparm [N_ATDRV * NPARTN + u].p_size =
(long) _CHAR2_TO_USHORT (dp->ncyl) * dp->nhead *
dp->nspt;
}
.DE
.DM
/* Initialize Drive Controller. */
atreset ();
}
.DE
.SH "Unload Routine"
.PP
Function
.B atunload()
is called when this driver is unloaded from memory \(em or would be, if
\*(CO 4.2 supported loadable drivers.
.DM
LOCAL void
atunload ()
{
}
.DE
.SH "Reset the Controller"
.PP
Function
.B atreset()
resets the hard-disk controller and defines drive characteristics.
.DM
LOCAL void
atreset ()
{
int u;
struct hdparm_s * dp;
.DE
.DM
/* Reset controller for a minimum of 4.8 microseconds. */
outb (HF_REG, 4);
for (u = 100; -- u != 0;)
/* DO NOTHING */ ;
outb (HF_REG, atparm [0].ctrl & 0x0F);
ATBSYW (0);
.DE
.DM if (inb (AUX_REG) != 0x01) {
/*
* Some IDE drives always timeout on initial reset.
* So don't report first timeout.
*/
static one_bad;
.DE
.DM
if (one_bad)
printf ("at: hd controller reset timeout\en");
else
one_bad = 1;
}
.DE
.DM
/* Initialize drive parameters. */
for (u = 0, dp = atparm; u < at_drive_ct; ++ dp, ++ u) {
if (at.at_dtype [u] == 0)
continue;
ATBSYW (u);
.DE
.DM
/* Set drive characteristics. */
outb (HF_REG, dp->ctrl);
outb (HDRV_REG, 0xA0 + (u << 4) + dp->nhead - 1);
.DE
.DM
outb (AUX_REG, _CHAR2_TO_USHORT (dp->wpcc) / 4);
outb (NSEC_REG, dp->nspt);
outb (SEC_REG, 0x01);
outb (LCYL_REG, dp->ncyl [0]);
outb (HCYL_REG, dp->ncyl [1]);
outb (CSR_REG, SETPARM_CMD);
ATBSYW (u);
.DE
.DM
/* Restore heads. */
outb (CSR_REG, RESTORE (0));
ATBSYW (u);
}
}
.DE
.SH "Open Routine"
.PP
Function
.B atopen()
is called when a user's application invokes the system call
.B open()
for an AT device.
A pointer to this function appears in field
.B c_open
of the
.B CON
structure at the end of this driver.
.PP
This function validating the minor device (that is, ensures that the
user is attempting to open a devices that exists), and updates
the paritition table if necessary.
.DM
LOCAL void
atopen (dev, mode)
dev_t dev;
{
int d; /* drive */
int p; /* partition */
.DE
.DM
p = minor (dev) % (N_ATDRV * NPARTN);
if (minor (dev) & SDEV) {
d = minor (dev) % N_ATDRV;
p += N_ATDRV * NPARTN;
} else
d = minor (dev) / NPARTN;
.DE
.DM
if (d >= N_ATDRV || at.at_dtype [d] == 0) {
printf ("atopen: drive %d not present ", d);
set_user_error (ENXIO);
return;
}
if (minor (dev) & SDEV)
return;
.DE
.DM
/* If partition not defined read partition characteristics. */
if (pparm [p].p_size == 0)
fdisk (makedev (major (dev), SDEV + d), & pparm [d * NPARTN]);
.DE
.DM
/* Ensure partition lies within drive boundaries and is non-zero size. */
if (pparm [p].p_base + pparm [p].p_size >
pparm [d + N_ATDRV * NPARTN].p_size) {
printf ("atopen: p_size too big ");
set_user_error (EINVAL);
} else if (pparm [p].p_size == 0) {
printf ("atopen: p_size zero ");
set_user_error (ENODEV);
}
}
.DE
.SH "Read Routine"
.PP
Function
.B atread()
is called when a user's application invokes the system call
.B read()
for an AT device.
A pointer to this function appears in field
.B c_read
of the
.B CON
structure at the end of this driver.
This function simply invokes the common code for processing raw I/O.
.DM
LOCAL void
atread (dev, iop)
dev_t dev;
IO *iop;
{
ioreq (NULL, iop, dev, BREAD, BFRAW | BFBLK | BFIOC);
}
.DE
.SH "Write Routine"
.PP
Function
.B atwrite()
is called when a user's application invokes the system call
.B write()
for an AT device.
A pointer to this function appears in field
.B c_write
of the
.B CON
structure at the end of this driver.
This function simply invokes the common code for processing raw I/O.
.DM
LOCAL void
atwrite (dev, iop)
dev_t dev;
IO *iop;
{
ioreq (NULL, iop, dev, BWRITE, BFRAW | BFBLK | BFIOC);
}
.DE
.SH "ioctl Routine"
.PP
Function
.B atioctl()
is called when a user's application invokes the system call
.B ioctl()
for an AT device.
A pointer to this function appears in field
.B c_ioctl
of the
.B CON
structure at the end of this driver.
This function validates the minor device and
updates the paritition table if necessary.
.DM
LOCAL void
atioctl (dev, cmd, vec)
dev_t dev;
int cmd;
char *vec;
{
int d;
.DE
.DM
/* Identify drive number. */
if (minor (dev) & SDEV)
d = minor (dev) % N_ATDRV;
else
d = minor (dev) / NPARTN;
.DE
.DM
/* Identify input / output request. */
switch (cmd) {
case HDGETA:
/* Get hard disk attributes. */
kucopy (atparm + d, vec, sizeof (atparm [0]));
break;
.DE
.DM
case HDSETA:
/* Set hard disk attributes. */
ukcopy (vec, atparm + d, sizeof (atparm [0]));
at.at_dtype [d] = 1; /* set drive type nonzero */
pparm [N_ATDRV * NPARTN + d].p_size =
(long) _CHAR2_TO_USHORT (atparm [d].ncyl) *
atparm [d].nhead * atparm [d].nspt;
atreset ();
break;
.DE
.DM
default:
set_user_error (EINVAL);
break;
}
}
.DE
.SH "Watch for Interrupts"
.PP
Function
.B atwatch()
watches for interrupts.
If
.B drvl[AT_MAJOR]
is greater than zero, this function decrements it.
If it decrements to zero, it simulates a hardware interrupt.
.DM
LOCAL void
atwatch()
{
BUF * bp = at.at_actf;
int s;
.DE
.DM
s = sphi ();
if (-- drvl [AT_MAJOR].d_time > 0) {
spl (s);
return;
}
.DE
.DM
/* Reset hard disk controller, cancel request. */
atreset ();
if (at.at_tries ++ < SOFTLIM) {
atstart ();
} else {
printf ("at%d%c: bno=%lu head=%u cyl=%u nsec=%u tsec=%d "
"dsec=%d <Watchdog Timeout>\en", at.at_drv,
(bp->b_dev & SDEV) ? 'x' : at.at_partn % NPARTN + 'a',
bp->b_bno, at.at_head, at.at_cyl, at.at_nsec,
at.at_totalsec, inb (NSEC_REG));
.DE
.DM
at.at_actf->b_flag |= BFERR;
atdone (at.at_actf);
}
spl (s);
}
.DE
.SH "Block Function"
.PP
Function
.B atblock()
queues a block to the disk.
It also ensures that the transfer is within the disk partition.
.DM
LOCAL void
atblock (bp)
BUF * bp;
{
struct fdisk_s * pp;
int partn = minor (bp->b_dev) % (N_ATDRV * NPARTN);
int s;
.DE
.DM
bp->b_resid = bp->b_count;
if (minor (bp->b_dev) & SDEV)
partn += N_ATDRV * NPARTN;
pp = pparm + partn;
.DE
.DM
/* Check for read at end of partition. */
if (bp->b_req == BREAD && bp->b_bno == pp->p_size) {
bdone (bp);
return;
}
.DE
.DM
/* Range check disk region. */
if (bp->b_bno + (bp->b_count / BSIZE) > pp->p_size ||
bp->b_count % BSIZE != 0 || bp->b_count == 0) {
bp->b_flag |= BFERR;
bdone (bp);
return;
}
.DE
.DM
s = sphi ();
bp->b_actf = NULL;
if (at.at_actf == NULL)
at.at_actf = bp;
else
at.at_actl->b_actf = bp;
at.at_actl = bp;
spl (s);
.DE
.DM
if (at.at_state == SIDLE)
if (atdequeue ())
atstart ();
}
.DE
.SH "Dequeue a Request"
.PP
Function
.B atdequeue()
obtains the next request for disk I/O.
.DM
LOCAL int
atdequeue ()
{
BUF * bp;
struct fdisk_s * pp;
struct hdparm_s * dp;
unsigned int nspt;
ldiv_t addr;
unsigned short secs;
unsigned short newsec;
.DE
.DM
at.at_tries = 0;
if ((bp = at.at_actf) == NULL)
return 0;
.DE
.DM
at.at_partn = minor (bp->b_dev) % (N_ATDRV * NPARTN);
if (minor (bp->b_dev) & SDEV) {
at.at_partn += N_ATDRV * NPARTN;
at.at_drv = minor (bp->b_dev) % N_ATDRV;
} else
at.at_drv = minor (bp->b_dev) / NPARTN;
.DE
.DM
nspt = atparm [at.at_drv].nspt;
at.at_addr = bp->b_paddr;
pp = pparm + at.at_partn;
at.at_bno = pp->p_base + bp->b_bno;
.DE
.DM
dp = atparm + at.at_drv;
addr = ldiv (at.at_bno, dp->nspt);
at.at_sec = addr.rem + 1;
addr = ldiv (addr.quot, dp->nhead);
at.at_cyl = addr.quot;
at.at_head = addr.rem;
.DE
The following code was added to the driver for \*(CO 4.2, to speed I/O
on the AT disk.
The following explains how this speed-up works.
.PP
It is unclear why, but IDE writes appear always to lose a revolution,
even though reads work comfortably.
This may be caused by IDE drives trying to maintain the synchronous
semantics of the write, or it may be due to
the \*(CO kernel's not making the read time and the
slack being taken up by track-buffering.
.PP
In either case, \*(CO gains a vast improvement in throughput for writes and
a modest gain for reads by looking ahead in the request chain and
coalescing separate requests to consecutive blocks into a single
multi-sector request.
.DM
newsec = secs = bp->b_count / BSIZE;
while (bp->b_actf != NULL && bp->b_actf->b_bno == bp->b_bno + secs &&
bp->b_actf->b_req == bp->b_req &&
bp->b_actf->b_dev == bp->b_dev) {
/*
* Take care to bound the length of the combined request to a
* single byte count of sectors.
*/
bp = bp->b_actf;
.DE
.DM
if (newsec + (secs = bp->b_count / BSIZE) > 256)
break;
newsec += secs;
}
at.at_totalsec = at.at_nsec = newsec;
return 1;
}
.DE
.SH "Send Data to the Disk"
.PP
Function
.B atsend()
actually moves data onto the disk.
.DM
LOCAL void
atsend (addr)
paddr_t addr;
{
addr = P2P (addr);
repoutsw (DATA_REG, (unsigned short *) __PTOV (addr), BSIZE / 2);
}
.DE
.SH "Receive Data from the Disk"
.PP
Function
.B atrecv()
actually receives data from the disk.
.DM
LOCAL void
atrecv (addr)
paddr_t addr;
{
addr = P2P (addr);
repinsw (DATA_REG, (unsigned short *) __PTOV (addr), BSIZE / 2);
}
.DE
.SH "Abandon a Request"
.PP
Function
.B atabandon()
abandons a request for disk I/O.
.DM
LOCAL void
atabandon ()
{
buf_t *bp;
.DE
.DM
/* Abandon this operation. */
while ((bp = at.at_actf) != NULL) {
at.at_actf = bp->b_actf;
bp->b_flag |= BFERR;
bdone (bp);
}
at.at_state = SIDLE;
}
.DE
.SH "Start a Read/Write Operation"
.PP
Function
.B atstart()
starts or restarts the next disk read/write operation.
.DM
LOCAL void
atstart ()
{
struct hdparm_s * dp;
.DE
.DM
/* Check for repeated access to most recently identified bad track. */
ATBSYW (at.at_drv);
dp = atparm + at.at_drv;
outb (HF_REG, dp->ctrl);
outb (HDRV_REG, (at.at_drv << 4) + at.at_head + 0xA0);
.DE
.DM
outb (NSEC_REG, at.at_nsec);
outb (SEC_REG, at.at_sec);
outb (LCYL_REG, at.at_cyl);
outb (HCYL_REG, at.at_cyl >> 8);
.DE
.DM
if (inb (NSEC_REG) != (at.at_nsec & 0xFF)) {
/*
* If we get here, things are confused. We should reset the
* controller and retry whatever operation we want to start
* now.
*/
drvl [AT_MAJOR].d_time = 1;
return;
}
.DE
.DM
if (at.at_actf->b_req == BWRITE) {
outb (CSR_REG, WRITE_CMD);
while (ATDRQ () == 0) {
atabandon ();
return;
}
atsend (at.at_addr);
at.at_state = SWRITE;
} else {
outb (CSR_REG, READ_CMD);
at.at_state = SREAD;
}
.DE
.DM
drvl [AT_MAJOR].d_time = ATSECS;
}
.DE
.SH "Interrupt Handler"
.PP
Function
.B atintr()
handles interrupts.
It clears the interrupt, and defers its processing until a more suitable time.
.DM
void
atintr ()
{
(void) inb (CSR_REG); /* clears controller interrupt */
atdefer ();
}
.DE
.SH "Defer Service of an Interrupt"
.PP
Function
.B atdefer()
actually services the hard-disk interrupt.
It transfers the required data, and updates the state of the device.
.DM
LOCAL void
atdefer ()
{
BUF * bp = at.at_actf;
switch (at.at_state) {
case SRETRY:
atstart ();
break;
.DE
.DM
case SREAD:
/* Check for I/O error before waiting for data. */
if (aterror ()) {
atrecov ();
break;
}
.DE
.DM
/* Wait for data, or forever. */
if (ATDRQ () == 0) {
atabandon ();
break;
}
.DE
.DM
/* Read data block.*/
atrecv (at.at_addr);
.DE
.DM
/* Check for I/O error after reading data. */
if (aterror ()) {
atrecov ();
break;
}
.DE
.DM
/*
* Every time we transfer a block, bump the timeout to prevent
* very large multisector transfers from timing out due to
* sheer considerations of volume.
*/
drvl [AT_MAJOR].d_time = ATSECS * 2;
.DE
.DM
at.at_addr += BSIZE;
bp->b_resid -= BSIZE;
at.at_tries = 0;
at.at_bno ++;
.DE
.DM
/*
* Check for end of transfer (total, or simply part of a large
* combined request).
*/
if (-- at.at_nsec == 0)
atdone (bp);
else if (bp->b_resid == 0) {
at.at_addr = (at.at_actf = bp->b_actf)->b_paddr;
bdone (bp);
}
break;
.DE
.DM
case SWRITE:
/* Check for I/O error. */
if (aterror ()) {
atrecov ();
break;
}
.DE
.DM
/* bump timeout again, for reasons given above. */
drvl [AT_MAJOR].d_time = ATSECS * 2;
.DE
.DM
at.at_addr += BSIZE;
bp->b_resid -= BSIZE;
at.at_tries = 0;
at.at_bno ++;
.DE
.DM
/*
* Check for end of transfer, either the real end or the end
* of a block boundary in a combined transfer.
*/
if (-- at.at_nsec == 0) {
atdone (bp);
break;
} else if (bp->b_resid == 0)
at.at_addr = bp->b_actf->b_paddr;
.DE
.DM
/* Wait for ability to send data, or forever. */
while (ATDRQ () == 0) {
atabandon ();
break;
}
.DE
.DM
/* Send data block. */
atsend (at.at_addr);
if (bp->b_resid == 0) {
at.at_actf = bp->b_actf;
bdone (bp);
}
}
}
.DE
.SH "Check for an Error"
.PP
Function
.B aterror()
checks for drive error.
If it finds an error, it increments the error count and prints a
message that reports the error.
It returns zero if it did not find an error, and one if it did.
.DM
LOCAL int
aterror ()
{
BUF * bp = at.at_actf;
int csr;
int aux;
.DE
.DM
if ((csr = inb (ATSREG)) & (ERR_ST | WFLT_ST)) {
aux = inb (AUX_REG);
.DE
.DM
if (aux & BAD_ERR) {
at.at_tries = BADLIM;
} else if (++ at.at_tries < SOFTLIM)
return 1;
.DE
.DM
printf ("at%d%c: bno =%lu head =%u cyl =%u",
at.at_drv,
(bp->b_dev & SDEV) ? 'x' : at.at_partn % NPARTN + 'a',
(bp->b_count / BSIZE) + bp->b_bno - at.at_nsec,
at.at_head, at.at_cyl);
.DE
.DM
if ((csr & (RDY_ST | WFLT_ST)) != RDY_ST)
printf (" csr =%x", csr);
if (aux & (DAM_ERR | TR0_ERR | ID_ERR | ECC_ERR | ABT_ERR))
printf (" aux =%x", aux);
.DE
.DM
if (aux & BAD_ERR)
printf (" <Block Flagged Bad>");
if (at.at_tries < HARDLIM)
printf (" retrying...");
printf ("\en");
return 1;
}
return 0;
}
.DE
.SH "Attempt to Recover from an Error"
.PP
Function
.B atrecov()
attempts to recover from a reported error.
.DM
LOCAL void
atrecov ()
{
BUF * bp = at.at_actf;
int cmd = SEEK (0);
int cyl = at.at_cyl;
.DE
.DM
switch (at.at_tries) {
case 1:
case 2:
/* Move in 1 cylinder, then retry operation */
if (--cyl < 0)
cyl += 2;
break;
.DE
.DM
case 3:
case 4:
/* Move out 1 cylinder, then retry operation */
if (++ cyl >= _CHAR2_TO_USHORT (atparm [at.at_drv].ncyl))
cyl -= 2;
break;
.DE
.DM
case 5:
case 6:
/* Seek to cylinder 0, then retry operation */
cyl = 0;
break;
.DE
.DM
default:
/* Restore drive, then retry operation */
cmd = RESTORE (0);
cyl = 0;
break;
}
.DE
.DM
/* Retry operation [after repositioning head] */
if (at.at_tries < HARDLIM) {
drvl [AT_MAJOR].d_time = cmd == RESTORE (0) ? ATSECS * 2 :
ATSECS;
outb (LCYL_REG, cyl);
outb (HCYL_REG, cyl >> 8);
outb (HDRV_REG, (at.at_drv << 4) + 0xA0);
outb (CSR_REG, cmd);
at.at_state = SRETRY;
} else {
.DE
.DM
/* Give up on block. */
bp->b_flag |= BFERR;
atdone (bp);
}
}
.DE
.SH "Release the Current I/O Buffer"
.PP
Function
.B atdone()
releases the current I/O buffer.
.DM
LOCAL void
atdone (bp)
BUF * bp;
{
at.at_actf = bp->b_actf;
drvl [AT_MAJOR].d_time = 0;
at.at_state = SIDLE;
.DE
.DM
if (atdequeue ())
atstart ();
bdone (bp);
}
.DE
.SH "Indicate the Drive Is Not Busy"
.PP
Function
.B notBusy()
indicates that the drive is not busy.
See macro
.BR NOTBUSY() ,
defined above.
.DM
int
notBusy ()
{
return NOTBUSY ();
}
.DE
.SH "Indicate Whether Data Have Been Requested"
.PP
Function
.B dataRequested()
indicates whether data have been requested.
See macro
.BR DATAREQUESTED() ,
defined above.
.DM
int
dataRequested ()
{
return DATAREQUESTED ();
}
.DE
.SH "Report a Timeout, First Version"
.PP
Function
.B _report_timeout()
actually prints the message that reports that an I/O operation has
timed out.
.DM
static int report_scheduled;
static int report_drv;
LOCAL void
_report_timeout ()
{
printf (timeout_msg, report_drv);
report_scheduled = 0;
}
.DE
.SH "Report a Timeout, Second Version"
.PP
Function
.B report_timeout()
manages the task of reporting that an I/O request has timed out.
.DM
LOCAL void
report_timeout (unit)
int unit;
{
short s = sphi();
if (report_scheduled == 0) {
report_scheduled = 1;
spl(s);
.DE
.DM
report_drv = unit;
defer (_report_timeout);
} else
spl (s);
}
.DE
.SH "Wait Until the Controller Is Freed"
.PP
Function
.B myatbsyw()
waits while the controller is busy.
It returns zero if the driver timed out while executing this I/O task;
or a non-zero value if it did not.
.DM
int
myatbsyw (unit)
int unit;
{
if (busyWait (notBusy, ATSECS * HZ))
return 1;
report_timeout (unit);
return 0;
}
.DE
.SH "Wait for Controller to Initiate Request"
.PP
Function
.B atdrq()
waits for the controller to initiate a request.
It returns zero if the driver timed out while waiting; or one if it
did not.
.DM
int
atdrq ()
{
if (busyWait (dataRequested, ATSECS /* * HZ */))
return 1;
report_timeout (at.at_drv);
return 0;
}
.DE
.SH "The CON Structure"
.PP
Finally, the following gives the
.B CON
structure for this driver.
This structure contains pointers to the functions to be invoked by
the kernel's system calls.
For details on this structure, see the entry for
.B CON
in this manual's Lexicon.
.DM
CON atcon = {
DFBLK | DFCHR, /* Flags */
AT_MAJOR, /* Major index */
atopen, /* Open */
NULL, /* Close */
atblock, /* Block */
atread, /* Read */
atwrite, /* Write */
atioctl, /* Ioctl */
NULL, /* Powerfail */
atwatch, /* Timeout */
atload, /* Load */
atunload /* Unload */
};
.DE
.SH "Where To Go From Here"
.PP
The following section gives an example of a driver for a character device.
The kernel functions invoked in this driver are described in this manual's
Lexicon.
|
[STATEMENT]
lemma truncate_down_eq_zero_iff[simp]: "truncate_down prec x = 0 \<longleftrightarrow> x = 0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (truncate_down prec x = 0) = (x = 0)
[PROOF STEP]
by (metis not_less_iff_gr_or_eq truncate_down_less_zero_iff truncate_down_pos truncate_down_zero) |
/*
* BRAINS
* (B)LR (R)everberation-mapping (A)nalysis (I)n AGNs with (N)ested (S)ampling
* Yan-Rong Li, [email protected]
* Thu, Aug 4, 2016
*/
/*!
* \file dnest_con.c
* \brief run dnest for continuum analysis
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <math.h>
#include <string.h>
#include <gsl/gsl_rng.h>
#include <gsl/gsl_randist.h>
#include <gsl/gsl_interp.h>
#include <mpi.h>
#include "brains.h"
DNestFptrSet *fptrset_con;
/*!
* This function run dnest sampling for continuum.
*/
int dnest_con(int argc, char **argv)
{
int i;
num_params = parset.n_con_recon + num_params_var;
par_range_model = malloc( num_params * sizeof(double *));
par_prior_gaussian = malloc(num_params * sizeof(double *));
for(i=0; i<num_params; i++)
{
par_range_model[i] = malloc(2*sizeof(double));
par_prior_gaussian[i] = malloc(2*sizeof(double));
}
par_prior_model = malloc( num_params * sizeof(int));
par_fix = (int *) malloc(num_params * sizeof(int));
par_fix_val = (double *) malloc(num_params * sizeof(double));
fptrset_con = dnest_malloc_fptrset();
/* setup functions used for dnest*/
fptrset_con->from_prior = from_prior_con;
fptrset_con->perturb = perturb_con;
fptrset_con->print_particle = print_particle_con;
fptrset_con->restart_action = restart_action_con;
fptrset_con->accept_action = accept_action_con;
fptrset_con->kill_action = kill_action_con;
fptrset_con->read_particle = read_particle_con;
if(parset.flag_exam_prior != 1)
{
fptrset_con->log_likelihoods_cal = log_likelihoods_cal_con;
fptrset_con->log_likelihoods_cal_initial = log_likelihoods_cal_initial_con;
fptrset_con->log_likelihoods_cal_restart = log_likelihoods_cal_restart_con;
}
else
{
fptrset_con->log_likelihoods_cal = log_likelihoods_cal_con_exam;
fptrset_con->log_likelihoods_cal_initial = log_likelihoods_cal_con_exam;
fptrset_con->log_likelihoods_cal_restart = log_likelihoods_cal_con_exam;
}
set_par_range_con();
/* setup fixed parameters */
for(i=0; i<num_params; i++)
{
par_fix[i] = 0;
par_fix_val[i] = -DBL_MAX;
}
/* fix systematic error of continuum */
if(parset.flag_con_sys_err != 1)
{
par_fix[0] = 1;
par_fix_val[0] = log(1.0);
}
print_par_names_con();
/* if not only print parameter name */
if(parset.flag_para_name != 1)
logz_con = dnest(argc, argv, fptrset_con, num_params, dnest_options_file);
dnest_free_fptrset(fptrset_con);
return 0;
}
/*!
* this function set the parameter range.
*/
void set_par_range_con()
{
int i;
/* variability parameters */
for(i=0; i<num_params_drw; i++)
{
par_range_model[i][0] = var_range_model[i][0];
par_range_model[i][1] = var_range_model[i][1];
par_prior_model[i] = UNIFORM;
par_prior_gaussian[i][0] = 0.0;
par_prior_gaussian[i][1] = 0.0;
}
/* parameters for long-term trend */
for(i=num_params_drw; i<num_params_drw + num_params_trend; i++)
{
par_range_model[i][0] = var_range_model[3][0];
par_range_model[i][1] = var_range_model[3][1];
par_prior_model[i] = GAUSSIAN;
par_prior_gaussian[i][0] = 0.0;
par_prior_gaussian[i][1] = 1.0;
}
/* parameter for trend difference */
for(i= num_params_drw + num_params_trend; i<num_params_var; i++)
{
par_range_model[i][0] = var_range_model[4 + i - (num_params_drw + num_params_trend)][0];
par_range_model[i][1] = var_range_model[4 + i - (num_params_drw + num_params_trend)][1];
par_prior_model[i] = UNIFORM;
par_prior_gaussian[i][0] = 0.0;
par_prior_gaussian[i][1] = 0.0;
}
/* continuum light curve parameters */
for(i=num_params_var; i<num_params; i++)
{
par_range_model[i][0] = var_range_model[4+num_params_difftrend][0];
par_range_model[i][1] = var_range_model[4+num_params_difftrend][1];
par_prior_model[i] = GAUSSIAN;
par_prior_gaussian[i][0] = 0.0;
par_prior_gaussian[i][1] = 1.0;
}
return;
}
/*!
* print names and prior ranges for parameters
*
*/
void print_par_names_con()
{
if(thistask!= roottask)
return;
int i, j;
FILE *fp;
char fname[BRAINS_MAX_STR_LENGTH], str_fmt[BRAINS_MAX_STR_LENGTH];
sprintf(fname, "%s/%s", parset.file_dir, "data/para_names_con.txt");
fp = fopen(fname, "w");
if(fp == NULL)
{
fprintf(stderr, "# Error: Cannot open file %s.\n", fname);
exit(0);
}
strcpy(str_fmt, "%4d %-15s %10.6f %10.6f %4d %4d %15.6e\n");
printf("# Print parameter name in %s\n", fname);
fprintf(fp, "#*************************************************\n");
fprint_version(fp);
fprintf(fp, "#*************************************************\n");
fprintf(fp, "%4s %-15s %10s %10s %4s %4s %15s\n", "#", "Par", "Min", "Max", "Prior", "Fix", "Val");
i=0;
fprintf(fp, str_fmt, i, "sys_err_con", par_range_model[i][0], par_range_model[i][1], par_prior_model[i],
par_fix[i], par_fix_val[i]);
i++;
fprintf(fp, str_fmt, i, "sigmad", par_range_model[i][0], par_range_model[i][1], par_prior_model[i],
par_fix[i], par_fix_val[i]);
i++;
fprintf(fp, str_fmt, i, "taud", par_range_model[i][0], par_range_model[i][1], par_prior_model[i],
par_fix[i], par_fix_val[i]);
for(j=0; j<num_params_trend; j++)
{
i++;
fprintf(fp, str_fmt, i, "trend", par_range_model[i][0], par_range_model[i][1], par_prior_model[i],
par_fix[i], par_fix_val[i]);
}
for(j=0; j<num_params_difftrend; j++)
{
i++;
fprintf(fp, str_fmt, i, "diff trend", par_range_model[i][0], par_range_model[i][1], par_prior_model[i],
par_fix[i], par_fix_val[i]);
}
for(j=0; j<parset.n_con_recon; j++)
{
i++;
fprintf(fp, str_fmt, i, "time series", par_range_model[i][0], par_range_model[i][1], par_prior_model[i],
par_fix[i], par_fix_val[i]);
}
fclose(fp);
}
/*!
* This function generate a sample from the prior.
*/
void from_prior_con(void *model)
{
int i;
double *pm = (double *)model;
for(i=0; i<num_params; i++)
{
if(par_prior_model[i] == GAUSSIAN )
{
pm[i] = dnest_randn() * par_prior_gaussian[i][1] + par_prior_gaussian[i][0];
dnest_wrap(&pm[i], par_range_model[i][0], par_range_model[i][1]);
}
else
{
pm[i] = var_range_model[i][0] + dnest_rand()*(par_range_model[i][1] - par_range_model[i][0]);
}
}
for(i=0; i<num_params_var; i++)
{
if(par_fix[i] == 1)
pm[i] = par_fix_val[i];
}
/* all parameters need to update at the initial step */
which_parameter_update = -1;
}
/*!
* This function calculate log likelihood probability.
*/
double log_likelihoods_cal_con(const void *model)
{
double logL;
logL = prob_con_variability_semiseparable(model);
return logL;
}
/*!
* This function calculate log likelihood probability at the initial step.
*/
double log_likelihoods_cal_initial_con(const void *model)
{
double logL;
logL = prob_con_variability_initial_semiseparable(model);
return logL;
}
/*!
* This function calculate log likelihood probability at the restart step.
*/
double log_likelihoods_cal_restart_con(const void *model)
{
double logL;
logL = prob_con_variability_initial_semiseparable(model);
return logL;
}
/*!
* This function generate a new move of parameters.
*/
double perturb_con(void *model)
{
double *pm = (double *)model;
double logH = 0.0, limit1, limit2, width, rnd;
int which, which_level;
/* sample variability parameters more frequently */
do
{
rnd = dnest_rand();
if(rnd < 0.1)
which = dnest_rand_int(num_params_var);
else
which = dnest_rand_int(parset.n_con_recon) + num_params_var;
}while(par_fix[which] == 1);
which_parameter_update = which;
/* level-dependent width */
which_level_update = dnest_get_which_level_update();
which_level = which_level_update > (size_levels - 10)?(size_levels-10):which_level_update;
if( which_level > 0)
{
limit1 = limits[(which_level-1) * num_params *2 + which *2];
limit2 = limits[(which_level-1) * num_params *2 + which *2 + 1];
width = limit2 - limit1;
}
else
{
width = ( par_range_model[which][1] - par_range_model[which][0] );
}
if(par_prior_model[which] == GAUSSIAN)
{
logH -= (-0.5*pow((pm[which] - par_prior_gaussian[which][0])/par_prior_gaussian[which][1], 2.0) );
pm[which] += dnest_randh() * width;
dnest_wrap(&pm[which], par_range_model[which][0], par_range_model[which][1]);
logH += (-0.5*pow((pm[which] - par_prior_gaussian[which][0])/par_prior_gaussian[which][1], 2.0) );
}
else
{
pm[which] += dnest_randh() * width;
dnest_wrap(&(pm[which]), par_range_model[which][0], par_range_model[which][1]);
}
return logH;
}
/*!
* This function print the particle into the file.
*/
void print_particle_con(FILE *fp, const void *model)
{
int i;
double *pm = (double *)model;
for(i=0; i<num_params; i++)
{
fprintf(fp, "%e ", pm[i] );
}
fprintf(fp, "\n");
}
/*!
* This function read the particle from the file.
*/
void read_particle_con(FILE *fp, void *model)
{
int j;
double *psample = (double *)model;
for(j=0; j < dnest_num_params; j++)
{
if(fscanf(fp, "%lf", psample+j) < 1)
{
printf("%f\n", *psample);
fprintf(stderr, "#Error: Cannot read file %s.\n", options.sample_file);
exit(0);
}
}
return;
}
void restart_action_con(int iflag)
{
return;
}
void accept_action_con()
{
int param;
param = which_parameter_update;
/* only update prob when variability parameters are updated. */
if(param < num_params_var)
{
prob_con_particles[which_particle_update] = prob_con_particles_perturb[which_particle_update];
}
return;
}
void kill_action_con(int i, int i_copy)
{
prob_con_particles[i] = prob_con_particles[i_copy];
return;
}
/*!
* exam for prior
*/
double log_likelihoods_cal_con_exam(const void *model)
{
return 0.0;
} |
Require Export SpecializedCategory.
Require Import Common.
Set Implicit Arguments.
Set Asymmetric Patterns.
Set Universe Polymorphism.
Section DCategory.
Variable O : Type.
Local Ltac simpl_eq := subst_body; hnf in *; simpl in *; intros; destruct_type @inhabited; simpl in *;
repeat constructor;
repeat subst;
auto;
simpl_transitivity.
Let DiscreteCategory_Morphism (s d : O) := s = d.
Definition DiscreteCategory_Compose (s d d' : O) (m : DiscreteCategory_Morphism d d') (m' : DiscreteCategory_Morphism s d) :
DiscreteCategory_Morphism s d'.
simpl_eq.
Defined.
Definition DiscreteCategory_Identity o : DiscreteCategory_Morphism o o.
simpl_eq.
Defined.
Global Arguments DiscreteCategory_Compose [s d d'] m m' /.
Global Arguments DiscreteCategory_Identity o /.
Definition DiscreteCategory : @SpecializedCategory O.
refine (@Build_SpecializedCategory _
DiscreteCategory_Morphism
DiscreteCategory_Identity
DiscreteCategory_Compose
_
_
_);
abstract (
unfold DiscreteCategory_Compose, DiscreteCategory_Identity;
simpl_eq
).
Defined.
End DCategory.
|
module Problem2 where
open import Problem1
infixr 40 _โบ_
data Vec (A : Set) : Nat -> Set where
ฮต : Vec A zero
_โบ_ : {n : Nat} -> A -> Vec A n -> Vec A (suc n)
-- 2.1
vec : {A : Set}{n : Nat} -> A -> Vec A n
vec {n = zero } x = ฮต
vec {n = suc n} x = x โบ vec x
-- 2.2
infixl 80 _<*>_
_<*>_ : {A B : Set}{n : Nat} -> Vec (A -> B) n -> Vec A n -> Vec B n
ฮต <*> ฮต = ฮต
(f โบ fs) <*> (x โบ xs) = f x โบ fs <*> xs
-- 2.3
map : {A B : Set}{n : Nat} -> (A -> B) -> Vec A n -> Vec B n
map f xs = vec f <*> xs
-- 2.4
zip : {A B C : Set}{n : Nat} -> (A -> B -> C) ->
Vec A n -> Vec B n -> Vec C n
zip f xs ys = vec f <*> xs <*> ys
|
// Copyright 2019-present MongoDB Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <cast_core/actors/NopMetrics.hpp>
#include <string>
#include <boost/log/trivial.hpp>
#include <gennylib/Cast.hpp>
namespace genny::actor {
/** @private */
struct NopMetrics::PhaseConfig {
/** record data about each iteration */
metrics::Operation operation;
explicit PhaseConfig(PhaseContext& context, ActorId actorId)
: operation{context.operation("Iterate", actorId)} {}
};
void NopMetrics::run() {
for (auto&& config : _loop) {
auto prev_ctx = std::unique_ptr<metrics::OperationContext>();
for (auto _ : config) {
if (prev_ctx) {
prev_ctx->success();
}
prev_ctx = std::make_unique<metrics::OperationContext>(config->operation.start());
}
if (prev_ctx) {
prev_ctx->success();
}
}
}
NopMetrics::NopMetrics(genny::ActorContext& context)
: Actor(context), _loop{context, NopMetrics::id()} {}
namespace {
auto registerNopMetrics = genny::Cast::registerDefault<genny::actor::NopMetrics>();
}
} // namespace genny::actor
|
State Before: R : Type u
Sโ : Type v
A : Type w
B : Type uโ
M : Type vโ
instโยนยฒ : CommSemiring R
instโยนยน : Semiring Sโ
instโยนโฐ : AddCommMonoid A
instโโน : Algebra R Sโ
instโโธ : Module Sโ A
instโโท : Module R A
instโโถ : IsScalarTower R Sโ A
S : Type u_1
T : Type u_2
instโโต : CommSemiring S
instโโด : Semiring T
instโยณ : Algebra R S
instโยฒ : Algebra R T
instโยน : Algebra S T
instโ : IsScalarTower R S T
x : S
a : Set S
hx : x โ span R a
โข โ(algebraMap S T) x โ span R (โ(algebraMap S T) '' a) State After: R : Type u
Sโ : Type v
A : Type w
B : Type uโ
M : Type vโ
instโยนยฒ : CommSemiring R
instโยนยน : Semiring Sโ
instโยนโฐ : AddCommMonoid A
instโโน : Algebra R Sโ
instโโธ : Module Sโ A
instโโท : Module R A
instโโถ : IsScalarTower R Sโ A
S : Type u_1
T : Type u_2
instโโต : CommSemiring S
instโโด : Semiring T
instโยณ : Algebra R S
instโยฒ : Algebra R T
instโยน : Algebra S T
instโ : IsScalarTower R S T
x : S
a : Set S
hx : x โ span R a
โข โ y, y โ span R a โง โ(โR (Algebra.linearMap S T)) y = โ(algebraMap S T) x Tactic: rw [span_algebraMap_image_of_tower, mem_map] State Before: R : Type u
Sโ : Type v
A : Type w
B : Type uโ
M : Type vโ
instโยนยฒ : CommSemiring R
instโยนยน : Semiring Sโ
instโยนโฐ : AddCommMonoid A
instโโน : Algebra R Sโ
instโโธ : Module Sโ A
instโโท : Module R A
instโโถ : IsScalarTower R Sโ A
S : Type u_1
T : Type u_2
instโโต : CommSemiring S
instโโด : Semiring T
instโยณ : Algebra R S
instโยฒ : Algebra R T
instโยน : Algebra S T
instโ : IsScalarTower R S T
x : S
a : Set S
hx : x โ span R a
โข โ y, y โ span R a โง โ(โR (Algebra.linearMap S T)) y = โ(algebraMap S T) x State After: no goals Tactic: exact โจx, hx, rflโฉ |
# add structure file and popmap
convert_treemix <- function(file, popmap) {
x <- read.table(file, sep = "\t")
# remove the population identification column or not
if (is.na(x[1, 2]) == TRUE) {
x <- cbind(x[,1], x[,3:ncol(x)])
}
x <- x[2:nrow(x),]
bi_allelic <- c()
for(a in 2:ncol(x)) { #determine which SNPs are bi-allelic
bi <- x[,a]
bi <- unique(bi)
bi <- bi[ bi != 0 ]
bi <- length(bi)
bi_allelic <- c(bi_allelic, bi)
}
bi_allelic <- bi_allelic == 2
x2 <- x[,bi_allelic]
###determine possible alleles of each SNP
allele.a <- c()
allele.b <- c()
for(b in 2:ncol(x2)) {
alleles <- unique(x2[, b])
alleles <- alleles[ alleles != 0 ]
allele.a <- c(allele.a, alleles[1])
allele.b <- c(allele.b, alleles[2])
}
# match individuals to populations
pops <- read.table(popmap,sep="\t")
inds <- as.character(pops[,1])
species <- as.character(pops[,2])
pops <- cbind(inds, species)
unique.species <- unique(species)
# population allele frequencies loop
allele.freqs <- list()
for(a in 1:length(unique.species)) {
z.a <- c()
z.b <- c()
inds.rep <- as.vector(pops[pops[,2]==unique.species[a],1])
inds.rep <- match(inds.rep,x2[,1])
inds.rep <- c(inds.rep, inds.rep+1)
inds.rep <- x2[inds.rep,2:ncol(x2)]
for(b in 1:ncol(inds.rep)) {
snp.rep <- inds.rep[,b]
snp.rep <- snp.rep[snp.rep != 0]
z.a <- c(z.a, length(snp.rep[snp.rep == allele.a[b]]))
z.b <- c(z.b, length(snp.rep[snp.rep == allele.b[b]]))
}
allele.freqs[[a]] <- paste(z.a, ",", z.b, sep="")
}
# put all lists together and add column names
for(a in 1:length(allele.freqs)) {
if(a == 1) {
final <- allele.freqs[[a]]
} else {
final <- cbind(final, allele.freqs[[a]])
}
}
colnames(final) <- unique.species
write.table(final, file = "treemix", sep = " ", row.names = FALSE, quote=FALSE)
}
|
lemma dist_triangle_eq: fixes x y z :: "'a::real_inner" shows "dist x z = dist x y + dist y z \<longleftrightarrow> norm (x - y) *\<^sub>R (y - z) = norm (y - z) *\<^sub>R (x - y)" |
[STATEMENT]
lemma OclIsKindOf\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y_OclAny_strict1[simp] : "(invalid::OclAny) .oclIsKindOf(OclAny) = invalid"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. invalid.oclIsKindOf(OclAny) = invalid
[PROOF STEP]
by(rule ext, simp add: invalid_def bot_option_def
OclIsKindOf\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y_OclAny) |
{-# OPTIONS --cubical --safe --no-import-sorts #-}
module Cubical.Algebra.Algebra where
open import Cubical.Algebra.Algebra.Base public
|
import numpy as np
from generativemagic.arrays import np_index
from generativemagic.effect import DECK_TOP, DECK_BOTTOM
from generativemagic.movement import Movement
class ScallopCut(Movement):
"""Cuts to a scalloped, shorted, svengalied card. Simple, perfect cut to a card."""
def __init__(self, scallop_card: int, cut_to: int = DECK_TOP):
self._cut_to = cut_to
self._scallop_card = scallop_card
def apply(self, sequence, chosen=None):
position = np_index(sequence, self._scallop_card)
if self._cut_to == DECK_BOTTOM:
return np.roll(sequence, len(sequence) - position - 1)
return np.roll(sequence, -position)
def __repr__(self):
return f"ScallopCut({self._scallop_card})"
|
function pde = constant_fun(bm,bp,r,x0,y0,z0,coef)
%% USAGE: polynomial solution for Poisson equation
% Last Modified: 02/21/2020 by Xu Zhang
%% PDE Structure
pde = struct('intf',@intf,...
'exactu1',@exactu1,'exactu2',@exactu2,'exactu3',@exactu3,...
'um1',@um1,'um2',@um2,'um3',@um3,'up1',@up1,'up2',@up2,'up3',@up3,...
'Dxu',@Dxu,'Dxum',@Dxum,'Dxup',@Dxup,'Dyu',@Dyu,...
'Dyum',@Dyum,'Dyup',@Dyup,'Dzu',@Dzu,'Dzum',@Dzum,'Dzup',@Dzup,...
'f1',@f1,'f2',@f2,'f3',@f3,...
'fm1',@fm1,'fm2',@fm2,'fm3',@fm3,...
'fp1',@fp1,'fp2',@fp2,'fp3',@fp3,...
'A',@A,'Am',@Am,'Ap',@Ap,'one',@one);
pde.bm = bm;
pde.bp = bp;
%% interface function
function u = intf(x,y,z)
u = ((x-x0).^2 + (y-y0).^2 + (z-z0).^2).^(1/2)/r-1;
end
%% exact solution
function u = exactu1(x,y,z)
u = um1(x,y,z);
id = intf(x,y,z) > 0;
u(id) = up1(x(id),y(id),z(id));
end
function u = exactu2(x,y,z)
u = um2(x,y,z);
id = intf(x,y,z) > 0;
u(id) = up2(x(id),y(id),z(id));
end
function u = exactu3(x,y,z)
u = um3(x,y,z);
id = intf(x,y,z) > 0;
u(id) = up3(x(id),y(id),z(id));
end
function u = um1(x,y,z)
u = coef*ones(size(x));
end
function u = um2(x,y,z)
u = coef*ones(size(x));
end
function u = um3(x,y,z)
u = coef*ones(size(x));
end
function u = up1(x,y,z)
u = coef*ones(size(x));
end
function u = up2(x,y,z)
u = coef*ones(size(x));
end
function u = up3(x,y,z)
u = coef*ones(size(x));
end
%% Boundary Function
function u = gD1(x,y,z)
u = exactu1(x,y,z);
end
function u = gD2(x,y,z)
u = exactu2(x,y,z);
end
function u = gD3(x,y,z)
u = exactu3(x,y,z);
end
%% Derivative of the exact solution
function u = Dxu(x,y,z)
u = Dxum(x,y,z);
id = intf(x,y,z) > 0;
u(id) = Dxup(x(id),y(id),z(id));
end
function u = Dyu(x,y,z)
u = Dyum(x,y,z);
id = intf(x,y,z) > 0;
u(id) = Dyup(x(id),y(id),z(id));
end
function u = Dzu(x,y,z)
u = Dzum(x,y,z);
id = intf(x,y,z) > 0;
u(id) = Dzup(x(id),y(id),z(id));
end
function u = Dxum(x,y,z)
u = zeros(size(x));
end
function u = Dyum(x,y,z)
u = zeros(size(x));
end
function u = Dzum(x,y,z)
u = zeros(size(x));
end
function u = Dxup(x,y,z)
u = zeros(size(x));
end
function u = Dyup(x,y,z)
u = zeros(size(x));
end
function u = Dzup(x,y,z)
u = zeros(size(x));
end
function u = Duker(x,y,z)
u = zeros(size(x));
end
%% right hand side function
function u = f1(x,y,z)
u = fm1(x,y,z);
id = intf(x,y,z) > 0;
u(id) = fp1(x(id),y(id),z(id));
end
function u = f2(x,y,z)
u = fm2(x,y,z);
id = intf(x,y,z) > 0;
u(id) = fp2(x(id),y(id),z(id));
end
function u = f3(x,y,z)
u = fm3(x,y,z);
id = intf(x,y,z) > 0;
u(id) = fp3(x(id),y(id),z(id));
end
function u = fm1(x,y,z)
u = coef*ones(size(x));
end
function u = fm2(x,y,z)
u = coef*ones(size(x));
end
function u = fm3(x,y,z)
u = coef*ones(size(x));
end
function u = fp1(x,y,z)
u = coef*ones(size(x));
end
function u = fp2(x,y,z)
u = coef*ones(size(x));
end
function u = fp3(x,y,z)
u = coef*ones(size(x));
end
%% Diffusion coefficient function
function u = A(x,y,z)
u = Am(x,y,z);
id = intf(x,y,z) > 0;
u(id) = Ap(x(id),y(id),z(id));
end
function u = Am(x,y,z)
u = bm*ones(size(x));
end
function u = Ap(x,y,z)
u = bp*ones(size(x));
end
%% Other function
function u = one(x,y,z)
u = ones(size(x));
end
end |
REBOL [
System: "REBOL [R3] Language Interpreter and Run-time Environment"
Title: "Source File Database"
Rights: {
Copyright 2012 REBOL Technologies
REBOL is a trademark of REBOL Technologies
}
License: {
Licensed under the Apache License, Version 2.0
See: http://www.apache.org/licenses/LICENSE-2.0
}
Author: "Carl Sassenrath"
Purpose: {
Lists of files used for creating makefiles.
}
]
core: [
a-constants.c
a-globals.c
a-lib.c
b-boot.c
b-init.c
c-do.c
c-error.c
c-frame.c
c-function.c
c-port.c
c-task.c
c-word.c
d-crash.c
d-dump.c
d-print.c
f-blocks.c
f-deci.c
f-dtoa.c
f-enbase.c
f-extension.c
f-math.c
f-modify.c
f-qsort.c
f-random.c
f-round.c
f-series.c
f-stubs.c
l-scan.c
l-types.c
m-gc.c
m-pools.c
m-series.c
n-control.c
n-data.c
n-io.c
n-loop.c
n-math.c
n-sets.c
n-strings.c
n-system.c
p-clipboard.c
p-console.c
p-dir.c
p-dns.c
p-event.c
p-file.c
p-net.c
s-cases.c
s-crc.c
s-file.c
s-find.c
s-make.c
s-mold.c
s-ops.c
s-trim.c
s-unicode.c
t-bitset.c
t-block.c
t-char.c
t-datatype.c
t-date.c
t-decimal.c
t-event.c
t-function.c
t-integer.c
t-logic.c
t-map.c
t-money.c
t-none.c
t-object.c
t-pair.c
t-port.c
t-string.c
t-time.c
t-tuple.c
t-typeset.c
t-utype.c
t-vector.c
t-word.c
u-compress.c
u-dialect.c
u-md5.c
u-parse.c
u-sha1.c
u-zlib.c
]
made: [
make-boot.r core/b-boot.c
make-headers.r include/tmp-funcs.h
make-host-ext.r include/host-ext-graphics.h
make-host-init.r include/host-init.h
make-os-ext.r include/host-lib.h
make-reb-lib.r include/reb-lib.h
]
os: [
host-main.c
host-args.c
host-device.c
host-stdio.c
dev-net.c
dev-dns.c
]
os-win32: [
host-lib.c
dev-stdio.c
dev-file.c
dev-event.c
dev-clipboard.c
]
os-win32g: [
host-graphics.c
host-event.c
host-window.c
host-draw.c
host-text.c
]
os-posix: [
host-lib.c
host-readline.c
dev-stdio.c
dev-event.c
dev-file.c
]
boot-files: [
version.r
graphics.r
draw.r
shape.r
text.r
]
mezz-files: [
; prot-http.r
; view-colors.r
; view-funcs.r
]
tools: [
make-host-init.r
make-host-ext.r
form-header.r
]
|
import numpy as np
from openpathsampling.integration_tools import (
openmm, md, error_if_no_mdtraj, unit, error_if_no_simtk_unit, HAS_OPENMM
)
if HAS_OPENMM:
# this is in case we directly import tools (e.g., for
# trajectory_to/from_mdtraj) when we don't have OpenMM installed. In
# that case, we skip these imports (engines/openmm/__init__.py prevents
# them from being made)
from .snapshot import Snapshot
from openpathsampling.engines.topology import Topology, MDTrajTopology
try:
# openmm >= 7.6
from openmm.app import internal as _internal
except ImportError:
# openmm < 7.6
from simtk.openmm.app import internal as _internal
reducePeriodicBoxVectors = _internal.unitcell.reducePeriodicBoxVectors
from openpathsampling.engines import Trajectory, NoEngine, SnapshotDescriptor
__author__ = 'Jan-Hendrik Prinz'
class TopologyEngine(NoEngine):
_default_options = {}
def __init__(self, topology):
descriptor = SnapshotDescriptor.construct(
Snapshot,
{
'n_atoms': topology.n_atoms,
'n_spatial': topology.n_spatial
}
)
super(NoEngine, self).__init__(
descriptor=descriptor
)
self.topology = topology
@property
def mdtraj_topology(self):
return self.topology.mdtraj
def to_dict(self):
return {
'topology': self.topology,
}
class FileEngine(TopologyEngine):
_default_options = {}
def __init__(self, topology, filename):
super(FileEngine, self).__init__(
topology=topology
)
self.filename = filename
def to_dict(self):
return {
'topology': self.topology,
'filename': self.filename
}
class OpenMMToolsTestsystemEngine(TopologyEngine):
_default_options = {}
def __init__(self, topology, testsystem_name):
super(OpenMMToolsTestsystemEngine, self).__init__(
topology=topology
)
self.testsystem_name = testsystem_name
def to_dict(self):
return {
'topology': self.topology,
'testsystem_name': self.testsystem_name
}
def snapshot_from_pdb(pdb_file, simple_topology=False):
"""
Construct a Snapshot from the first frame in a pdb file without velocities
Parameters
----------
pdb_file : str
The filename of the .pdb file to be used
simple_topology : bool
if `True` only a simple topology with n_atoms will be created.
This cannot be used with complex CVs but loads and stores very fast
Returns
-------
:class:`openpathsampling.engines.Snapshot`
the constructed Snapshot
"""
snap = ops_load_trajectory(pdb_file)[0]
if simple_topology:
topology = Topology(*pdb.xyz[0].shape)
else:
topology = snap.topology
snapshot = Snapshot.construct(
coordinates=snap.coordinates,
box_vectors=snap.box_vectors,
velocities=snap.velocities,
engine=FileEngine(topology, pdb_file)
)
return snapshot
def topology_from_pdb(pdb_file, simple_topology=False):
"""
Construct a Topology from the first frame in a pdb file without velocities
Parameters
----------
pdb_file : str
The filename of the .pdb file to be used
simple_topology : bool
if `True` only a simple topology with n_atoms will be created.
This cannot be used with complex CVs but loads and stores very fast
Returns
-------
:class:`openpathsampling.engines.Snapshot`
the constructed Snapshot
"""
pdb = md.load(pdb_file)
if simple_topology:
topology = Topology(*pdb.xyz[0].shape)
else:
topology = MDTrajTopology(pdb.topology)
return topology
def snapshot_from_testsystem(testsystem, simple_topology=False,
periodic=True):
"""
Construct a Snapshot from openmm topology and state objects
Parameters
----------
testsystem : openmmtools.Topology
The filename of the .pdb file to be used
simple_topology : bool
if `True` only a simple topology with n_atoms will be created.
This cannot be used with complex CVs but loads and stores very fast
periodic : bool
True (default) if system is periodic; if False, box vectors are None
Returns
-------
:class:`openpathsampling.engines.Snapshot`
the constructed Snapshot
"""
error_if_no_simtk_unit("snapshot_from_testsystem")
u_nm = unit.nanometers
u_ps = unit.picoseconds
velocities = unit.Quantity(np.zeros(testsystem.positions.shape),
u_nm / u_ps)
if simple_topology:
topology = Topology(*testsystem.positions.shape)
else:
topology = MDTrajTopology(md.Topology.from_openmm(testsystem.topology))
if periodic:
sys_box_vectors = testsystem.system.getDefaultPeriodicBoxVectors()
box_vectors = np.array([v / u_nm for v in sys_box_vectors]) * u_nm
else:
box_vectors = None
snapshot = Snapshot.construct(
coordinates=testsystem.positions,
box_vectors=box_vectors,
velocities=velocities,
engine=OpenMMToolsTestsystemEngine(topology, testsystem.name)
)
return snapshot
def trajectory_from_mdtraj(mdtrajectory, simple_topology=False,
velocities=None):
"""
Construct a Trajectory object from an mdtraj.Trajectory object
Parameters
----------
mdtrajectory : mdtraj.Trajectory
Input mdtraj.Trajectory
simple_topology : bool
if `True` only a simple topology with n_atoms will be created.
This cannot be used with complex CVs but loads and stores very fast
velocities : np.array
velocities in units of nm/ps
Returns
-------
openpathsampling.engines.Trajectory
the constructed Trajectory instance
"""
error_if_no_simtk_unit("trajectory_from_mdtraj")
trajectory = Trajectory()
u_nm = unit.nanometer
u_ps = unit.picosecond
vel_unit = u_nm / u_ps
if simple_topology:
topology = Topology(*mdtrajectory.xyz[0].shape)
else:
topology = MDTrajTopology(mdtrajectory.topology)
if velocities is None:
empty_vel = unit.Quantity(np.zeros(mdtrajectory.xyz[0].shape),
vel_unit)
if mdtrajectory.unitcell_vectors is not None:
box_vects = unit.Quantity(mdtrajectory.unitcell_vectors,
unit.nanometers)
else:
box_vects = [None] * len(mdtrajectory)
engine = TopologyEngine(topology)
for frame_num in range(len(mdtrajectory)):
# mdtraj trajectories only have coordinates and box_vectors
coord = unit.Quantity(mdtrajectory.xyz[frame_num], u_nm)
if velocities is not None:
vel = unit.Quantity(velocities[frame_num], vel_unit)
else:
vel = empty_vel
box_v = box_vects[frame_num]
statics = Snapshot.StaticContainer(
coordinates=coord,
box_vectors=box_v,
engine=engine
)
kinetics = Snapshot.KineticContainer(velocities=vel,
engine=engine)
snap = Snapshot(
statics=statics,
kinetics=kinetics,
engine=engine
)
trajectory.append(snap)
return trajectory
def empty_snapshot_from_openmm_topology(topology, simple_topology=False):
"""
Return an empty snapshot from an openmm.Topology object
Velocities will be set to zero.
Parameters
----------
topology : openmm.Topology
the topology representing the structure and number of atoms
simple_topology : bool
if `True` only a simple topology with n_atoms will be created.
This cannot be used with complex CVs but loads and stores very fast
Returns
-------
openpathsampling.engines.Snapshot
the complete snapshot with zero coordinates and velocities
"""
error_if_no_simtk_unit("empty_snapshot_from_openmm_topology")
u_nm = unit.nanometers
u_ps = unit.picoseconds
n_atoms = topology.n_atoms
if simple_topology:
topology = Topology(n_atoms, 3)
else:
error_if_no_mdtraj("empty_snaphsot_from_openmm_topology")
topology = MDTrajTopology(md.Topology.from_openmm(topology))
snapshot = Snapshot.construct(
coordinates=unit.Quantity(np.zeros((n_atoms, 3)), u_nm),
box_vectors=unit.Quantity(topology.setUnitCellDimensions(), u_nm),
velocities=unit.Quantity(np.zeros((n_atoms, 3)), u_nm / u_ps),
engine=TopologyEngine(topology)
)
return snapshot
def to_openmm_topology(obj):
"""
Contruct an openmm.Topology file out of a Snapshot or Configuration
object. This uses the mdtraj.Topology in the Configuration as well as
the box_vectors.
Parameters
----------
obj : openpathsampling.engines.BaseSnapshot or Configuration
the object to be used in the topology construction
Returns
-------
openmm.Topology
an object representing an openmm.Topology
"""
if obj.topology is not None:
if hasattr(obj.topology, 'mdtraj'):
openmm_topology = obj.topology.mdtraj.to_openmm()
box_size_dimension = np.linalg.norm(
obj.box_vectors.value_in_unit(unit.nanometer), axis=1)
openmm_topology.setUnitCellDimensions(box_size_dimension)
return openmm_topology
else:
return None
def trajectory_to_mdtraj(trajectory, md_topology=None):
"""
Construct a `mdtraj.Trajectory` object from an :obj:`Trajectory` object
Parameters
----------
trajectory : :obj:`openpathsampling.engines.Trajectory`
Input Trajectory
Returns
-------
:obj:`mdtraj.Trajectory`
the constructed Trajectory instance
"""
if not hasattr(trajectory, 'to_mdtraj'):
try:
_ = len(trajectory)
except TypeError:
trajectory = Trajectory([trajectory])
else:
trajectory = Trajectory(trajectory)
# For now, let's keep all the code in one place, and better for
# engines.openmm.tools to require engines.trajectory than vice versa
return trajectory.to_mdtraj(md_topology)
def ops_load_trajectory(filename, **kwargs):
error_if_no_mdtraj("ops_load_trajectory")
return trajectory_from_mdtraj(md.load(filename, **kwargs))
# ops_load_trajectory and the mdtraj stuff is not OpenMM-specific
def reduced_box_vectors(snapshot):
"""Reduced box vectors for a snapshot (with units)
See also
--------
reduce_trajectory_box_vectors
Parameters
----------
snapshot : :class:`.Snapshot`
input snapshot
Returns
-------
:class:`.Snapshot`
snapshot with correctly reduced box vectors
"""
nm = unit.nanometer
return np.array(
reducePeriodicBoxVectors(snapshot.box_vectors).value_in_unit(nm)
) * nm
def reduce_trajectory_box_vectors(trajectory):
"""Trajectory with reduced box vectors.
OpenMM has strict requirements on the box vectors describing the unit
cell. In some cases, such as trajectories loaded from files that have
rounded the box vectors, these conditions might not be satisfied. This
method forces the box vectors to meet OpenMM's criteria.
Parameters
----------
trajectory : :class:`.Trajectory`
input trajectory
Returns
-------
:class:`.Trajectory`
trajectory with correctly reduced box vectors
"""
return Trajectory([
snap.copy_with_replacement(box_vectors=reduced_box_vectors(snap))
for snap in trajectory
])
def load_trr(trr_file, top, velocities=True):
"""Load a TRR file, ready for use as input to an OpenMMEngine.
This is a single method to handle several peculiarities of both the TRR
format (which rounds some values) and OpenMM (which has certain
requirements of box vectors), plus the possibility that you'll want
velocities.
Parameters
----------
trr_file : string
name of TRR file to load
top : string
name of topology (e.g., ``.gro``) file to use. See MDTraj
documentation on md.load.
velocities : bool
whether to also load velocities from the TRR file; default ``True``
Return
------
:class:`.Trajectory`
the OPS trajectory, with OpenMM-reduced box vectors and velocities
(if requested)
"""
mdt = md.load(trr_file, top=top)
trr = md.formats.TRRTrajectoryFile(trr_file)
if velocities:
vel = trr._read(n_frames=len(mdt), atom_indices=None,
get_velocities=True)[5]
else:
vel = None
traj = trajectory_from_mdtraj(mdt, velocities=vel)
return reduce_trajectory_box_vectors(traj)
def n_dofs_from_system(system):
"""Get the number of degrees of freedom from an Openmm System
Parameters
----------
system : :class:`simtk.openmm.System`
object describing the system
Returns
-------
int :
number of degrees of freedom
"""
# dof calculation based on OpenMM's StateDataReporter
n_spatial = 3
n_particles = system.getNumParticles()
dofs_particles = sum([n_spatial for i in range(n_particles)
if system.getParticleMass(i) > 0*unit.dalton])
dofs_constaints = system.getNumConstraints()
dofs_motion_removers = 0
has_cm_motion_remover = any(
type(system.getForce(i)) == openmm.CMMotionRemover
for i in range(system.getNumForces())
)
if has_cm_motion_remover:
dofs_motion_removers += 3
dofs = dofs_particles - dofs_constaints - dofs_motion_removers
return dofs
def has_constraints_from_system(system):
"""Get the number of degrees of freedom from an Openmm System
Parameters
----------
system : :class:`openmm.System`
object describing the system
Returns
-------
bool :
whether there are constraints
"""
return system.getNumConstraints() > 0
|
# Advanced Expression Manipulation
```python
from sympy import *
x, y, z = symbols('x y z')
```
For each exercise, fill in the function according to its docstring.
## Creating expressions from classes
Create the following objects without using any mathematical operators like `+`, `-`, `*`, `/`, or `**` by explicitly using the classes `Add`, `Mul`, and `Pow`. You may use `x` instead of `Symbol('x')` and `4` instead of `Integer(4)`.
$$x^2 + 4xyz$$
$$x^{(x^y)}$$
$$x - \frac{y}{z}$$
```python
def explicit_classes1():
"""
Returns the expression x**2 + 4*x*y*z, built using SymPy classes explicitly.
>>> explicit_classes1()
x**2 + 4*x*y*z
"""
```
```python
explicit_classes1()
```
```python
def explicit_classes2():
"""
Returns the expression x**(x**y), built using SymPy classes explicitly.
>>> explicit_classes2()
x**(x**y)
"""
```
```python
explicit_classes2()
```
```python
def explicit_classes3():
"""
Returns the expression x - y/z, built using SymPy classes explicitly.
>>> explicit_classes3()
x - y/z
"""
```
```python
explicit_classes3()
```
## Nested args
```python
expr = x**2 - y*(2**(x + 3) + z)
```
Use nested `.args` calls to get the 3 in expr.
```python
def nested_args():
"""
Get the 3 in the above expression.
>>> nested_args()
3
"""
```
```python
nested_args()
```
## Traversal
Write a post-order traversal function that prints each node.
```python
def post(expr):
"""
Post-order traversal
>>> expr = x**2 - y*(2**(x + 3) + z)
>>> post(expr)
-1
y
2
3
x
x + 3
2**(x + 3)
z
2**(x + 3) + z
-y*(2**(x + 3) + z)
x
2
x**2
x**2 - y*(2**(x + 3) + z)
"""
```
```python
post(expr)
```
```python
for i in postorder_traversal(expr):
print(i)
```
|
chapter {* R7: รrboles binarios completos *}
theory R7_Arboles_binarios_completos
imports Main
begin
text {*
En esta relaciรณn se piden demostraciones automรกticas (lo mรกs cortas
posibles). Para ello, en algunos casos es necesario incluir lemas
auxiliares (que se demuestran automรกticamente) y usar ejercicios
anteriores.
---------------------------------------------------------------------
Ejercicio 1. Definir el tipo de datos arbol para representar los
รกrboles binarios que no tienen informaciรณn ni en los nodos y ni en las
hojas. Por ejemplo, el รกrbol
ยท
/ \
/ \
ยท ยท
/ \ / \
ยท ยท ยท ยท
se representa por "N (N H H) (N H H)".
---------------------------------------------------------------------
*}
datatype arbol = H | N arbol arbol
value "N (N H H) (N H H) = (N (N H H) (N H H) :: arbol)"
text {*
---------------------------------------------------------------------
Ejercicio 2. Definir la funciรณn
hojas :: "arbol => nat"
tal que (hojas a) es el nรบmero de hojas del รกrbol a. Por ejemplo,
hojas (N (N H H) (N H H)) = 4
---------------------------------------------------------------------
*}
fun hojas :: "arbol => nat" where
"hojas H = 1"
| "hojas (N i d) = hojas i + hojas d"
value "hojas (N (N H H) (N H H)) = 4"
text {*
---------------------------------------------------------------------
Ejercicio 4. Definir la funciรณn
profundidad :: "arbol => nat"
tal que (profundidad a) es la profundidad del รกrbol a. Por ejemplo,
profundidad (N (N H H) (N H H)) = 2
---------------------------------------------------------------------
*}
fun profundidad :: "arbol => nat" where
"profundidad H = 0"
| "profundidad (N i d) = 1 + (max (profundidad i)(profundidad d))"
value "profundidad (N (N H H) (N H H)) = 2"
text {*
---------------------------------------------------------------------
Ejercicio 5. Definir la funciรณn
abc :: "nat \<Rightarrow> arbol"
tal que (abc n) es el รกrbol binario completo de profundidad n. Por
ejemplo,
abc 3 = N (N (N H H) (N H H)) (N (N H H) (N H H))
---------------------------------------------------------------------
*}
fun abc :: "nat \<Rightarrow> arbol" where
"abc 0 = H"
| "abc (Suc n) = (N (abc n) (abc n))"
value "abc 3 = N (N (N H H) (N H H)) (N (N H H) (N H H))"
text {*
---------------------------------------------------------------------
Ejercicio 6. Un รกrbol binario a es completo respecto de la medida f si
a es una hoja o bien a es de la forma (N i d) y se cumple que tanto i
como d son รกrboles binarios completos respecto de f y, ademรกs,
f(i) = f(r).
Definir la funciรณn
es_abc :: "(arbol => 'a) => arbol => bool
tal que (es_abc f a) se verifica si a es un รกrbol binario completo
respecto de f.
---------------------------------------------------------------------
*}
fun es_abc :: "(arbol => 'a) => arbol => bool" where
"es_abc _ H = True"
| "es_abc f (N i d) = (es_abc f i \<and> es_abc f d \<and> (f i = f d))"
text {*
---------------------------------------------------------------------
Nota. (size a) es el nรบmero de nodos del รกrbol a. Por ejemplo,
size (N (N H H) (N H H)) = 3
---------------------------------------------------------------------
*}
value "size (N (N H H) (N H H)) = 3"
value "size (N (N (N H H) (N H H)) (N (N H H) (N H H))) = 7"
text {*
---------------------------------------------------------------------
Nota. Tenemos 3 funciones de medida sobre los รกrboles: nรบmero de
hojas, nรบmero de nodos y profundidad. A cada una le corresponde un
concepto de completitud. En los siguientes ejercicios demostraremos
que los tres conceptos de completitud son iguales.
---------------------------------------------------------------------
*}
text {*
---------------------------------------------------------------------
Ejercicio 7. Demostrar que un รกrbol binario a es completo respecto de
la profundidad syss es completo respecto del nรบmero de hojas.
---------------------------------------------------------------------
*}
lemma arbol_profundidad_respecto_num_hojas:
assumes "es_abc profundidad n"
shows "hojas n = 2^(profundidad n)"
using assms
by (induct n) auto
lemma lej7: "es_abc profundidad a = es_abc hojas a"
by (induct a) (auto simp add: arbol_profundidad_respecto_num_hojas)
text {*
---------------------------------------------------------------------
Ejercicio 8. Demostrar que un รกrbol binario a es completo respecto del
nรบmero de hojas syss es completo respecto del nรบmero de nodos.
---------------------------------------------------------------------
*}
lemma arbol_completo_respecto_num_hojas:
assumes "es_abc hojas n"
shows "Suc(size n) = hojas n"
using assms
by (induct n) auto
lemma lej8: "es_abc hojas a = es_abc size a"
by (induct a) (auto simp add:arbol_completo_respecto_num_hojas [symmetric])
text {*
---------------------------------------------------------------------
Ejercicio 9. Demostrar que un รกrbol binario a es completo respecto de
la profundidad syss es completo respecto del nรบmero de nodos.
---------------------------------------------------------------------
*}
lemma arbol_completo_respecto_profundidad: "es_abc profundidad n = es_abc size n"
by (simp add: lej7 lej8)
text {*
---------------------------------------------------------------------
Ejercicio 10. Demostrar que (abc n) es un รกrbol binario completo.
---------------------------------------------------------------------
*}
lemma lej10: "es_abc profundidad (abc n)"
by (induct n) auto
text {*
---------------------------------------------------------------------
Ejercicio 11. Demostrar que si a es un รกrbolo binario completo
respecto de la profundidad, entonces a es igual a
(abc (profundidad a)).
---------------------------------------------------------------------
*}
lemma lej11:
assumes " es_abc profundidad n"
shows "n = (abc (profundidad n))"
using assms
by (induct n) auto
text {*
---------------------------------------------------------------------
Ejercicio 12. Encontrar una medida f tal que (es_abc f) es distinto de
(es_abc size).
---------------------------------------------------------------------
*}
lemma "es_abc f n = es_abc size n"
quickcheck
end |
(* Title: HOL/Auth/n_flash_nodata_cub_lemma_on_inv__110.thy
Author: Yongjian Li and Kaiqiang Duan, State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
Copyright 2016 State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
*)
header{*The n_flash_nodata_cub Protocol Case Study*}
theory n_flash_nodata_cub_lemma_on_inv__110 imports n_flash_nodata_cub_base
begin
section{*All lemmas on causal relation between inv__110 and some rule r*}
lemma n_PI_Remote_GetVsinv__110:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_PI_Remote_Get src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__110 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_PI_Remote_Get src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__110 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_PI_Remote_GetXVsinv__110:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_PI_Remote_GetX src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__110 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_PI_Remote_GetX src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__110 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_NakVsinv__110:
assumes a1: "(\<exists> dst. dst\<le>N\<and>r=n_NI_Nak dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__110 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain dst where a1:"dst\<le>N\<and>r=n_NI_Nak dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__110 p__Inv4" apply fastforce done
have "(dst=p__Inv4)\<or>(dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(dst=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(dst~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_Get_Nak__part__0Vsinv__110:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Nak__part__0 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__110 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Nak__part__0 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__110 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_Get_Nak__part__1Vsinv__110:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Nak__part__1 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__110 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Nak__part__1 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__110 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_Get_Nak__part__2Vsinv__110:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Nak__part__2 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__110 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Nak__part__2 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__110 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_Get_Get__part__0Vsinv__110:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Get__part__0 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__110 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Get__part__0 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__110 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_Get_Get__part__1Vsinv__110:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Get__part__1 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__110 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Get__part__1 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__110 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_Get_Put_HeadVsinv__110:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Put_Head N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__110 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Put_Head N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__110 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_Get_PutVsinv__110:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Put src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__110 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Put src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__110 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_Get_Put_DirtyVsinv__110:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Put_Dirty src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__110 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Put_Dirty src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__110 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Remote_Get_NakVsinv__110:
assumes a1: "(\<exists> src dst. src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_Get_Nak src dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__110 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src dst where a1:"src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_Get_Nak src dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__110 p__Inv4" apply fastforce done
have "(src=p__Inv4\<and>dst~=p__Inv4)\<or>(src~=p__Inv4\<and>dst=p__Inv4)\<or>(src~=p__Inv4\<and>dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4\<and>dst~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>dst=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>dst~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Remote_Get_PutVsinv__110:
assumes a1: "(\<exists> src dst. src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_Get_Put src dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__110 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src dst where a1:"src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_Get_Put src dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__110 p__Inv4" apply fastforce done
have "(src=p__Inv4\<and>dst~=p__Inv4)\<or>(src~=p__Inv4\<and>dst=p__Inv4)\<or>(src~=p__Inv4\<and>dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4\<and>dst~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>dst=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>dst~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_Nak__part__0Vsinv__110:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_Nak__part__0 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__110 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_Nak__part__0 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__110 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_Nak__part__1Vsinv__110:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_Nak__part__1 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__110 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_Nak__part__1 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__110 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_Nak__part__2Vsinv__110:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_Nak__part__2 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__110 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_Nak__part__2 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__110 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_GetX__part__0Vsinv__110:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_GetX__part__0 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__110 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_GetX__part__0 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__110 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_GetX__part__1Vsinv__110:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_GetX__part__1 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__110 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_GetX__part__1 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__110 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_1Vsinv__110:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_1 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__110 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_1 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__110 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_2Vsinv__110:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_2 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__110 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_2 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__110 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_3Vsinv__110:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_3 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__110 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_3 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__110 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_4Vsinv__110:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_4 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__110 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_4 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__110 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_5Vsinv__110:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_5 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__110 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_5 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__110 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_6Vsinv__110:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_6 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__110 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_6 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__110 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_7__part__0Vsinv__110:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_7__part__0 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__110 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_7__part__0 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__110 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_7__part__1Vsinv__110:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_7__part__1 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__110 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_7__part__1 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__110 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_7_NODE_Get__part__0Vsinv__110:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_7_NODE_Get__part__0 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__110 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_7_NODE_Get__part__0 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__110 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_7_NODE_Get__part__1Vsinv__110:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_7_NODE_Get__part__1 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__110 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_7_NODE_Get__part__1 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__110 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_8_HomeVsinv__110:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_8_Home N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__110 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_8_Home N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__110 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_8_Home_NODE_GetVsinv__110:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_8_Home_NODE_Get N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__110 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_8_Home_NODE_Get N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__110 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_8Vsinv__110:
assumes a1: "(\<exists> src pp. src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_Local_GetX_PutX_8 N src pp)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__110 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src pp where a1:"src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_Local_GetX_PutX_8 N src pp" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__110 p__Inv4" apply fastforce done
have "(src=p__Inv4\<and>pp~=p__Inv4)\<or>(src~=p__Inv4\<and>pp=p__Inv4)\<or>(src~=p__Inv4\<and>pp~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4\<and>pp~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>pp=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>pp~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_8_NODE_GetVsinv__110:
assumes a1: "(\<exists> src pp. src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_Local_GetX_PutX_8_NODE_Get N src pp)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__110 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src pp where a1:"src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_Local_GetX_PutX_8_NODE_Get N src pp" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__110 p__Inv4" apply fastforce done
have "(src=p__Inv4\<and>pp~=p__Inv4)\<or>(src~=p__Inv4\<and>pp=p__Inv4)\<or>(src~=p__Inv4\<and>pp~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4\<and>pp~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>pp=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>pp~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_9__part__0Vsinv__110:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_9__part__0 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__110 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_9__part__0 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__110 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_9__part__1Vsinv__110:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_9__part__1 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__110 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_9__part__1 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__110 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_10_HomeVsinv__110:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_10_Home N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__110 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_10_Home N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__110 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_10Vsinv__110:
assumes a1: "(\<exists> src pp. src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_Local_GetX_PutX_10 N src pp)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__110 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src pp where a1:"src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_Local_GetX_PutX_10 N src pp" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__110 p__Inv4" apply fastforce done
have "(src=p__Inv4\<and>pp~=p__Inv4)\<or>(src~=p__Inv4\<and>pp=p__Inv4)\<or>(src~=p__Inv4\<and>pp~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4\<and>pp~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>pp=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>pp~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_11Vsinv__110:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_11 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__110 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_11 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__110 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Remote_GetX_NakVsinv__110:
assumes a1: "(\<exists> src dst. src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_GetX_Nak src dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__110 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src dst where a1:"src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_GetX_Nak src dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__110 p__Inv4" apply fastforce done
have "(src=p__Inv4\<and>dst~=p__Inv4)\<or>(src~=p__Inv4\<and>dst=p__Inv4)\<or>(src~=p__Inv4\<and>dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4\<and>dst~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>dst=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>dst~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Remote_GetX_PutXVsinv__110:
assumes a1: "(\<exists> src dst. src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_GetX_PutX src dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__110 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src dst where a1:"src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_GetX_PutX src dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__110 p__Inv4" apply fastforce done
have "(src=p__Inv4\<and>dst~=p__Inv4)\<or>(src~=p__Inv4\<and>dst=p__Inv4)\<or>(src~=p__Inv4\<and>dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4\<and>dst~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>dst=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>dst~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Remote_PutVsinv__110:
assumes a1: "(\<exists> dst. dst\<le>N\<and>r=n_NI_Remote_Put dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__110 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain dst where a1:"dst\<le>N\<and>r=n_NI_Remote_Put dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__110 p__Inv4" apply fastforce done
have "(dst=p__Inv4)\<or>(dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(dst=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(dst~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Remote_PutXVsinv__110:
assumes a1: "(\<exists> dst. dst\<le>N\<and>r=n_NI_Remote_PutX dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__110 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain dst where a1:"dst\<le>N\<and>r=n_NI_Remote_PutX dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__110 p__Inv4" apply fastforce done
have "(dst=p__Inv4)\<or>(dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(dst=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(dst~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_InvAck_1Vsinv__110:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_InvAck_1 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__110 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_InvAck_1 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__110 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_PI_Local_Get_PutVsinv__110:
assumes a1: "(r=n_PI_Local_Get_Put )" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__110 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__110 p__Inv4" apply fastforce done
have "?P3 s"
apply (cut_tac a1 a2 , simp, rule_tac x="(neg (andForm (andForm (eqn (IVar (Field (Field (Ident ''Sta'') ''Dir'') ''Pending'')) (Const false)) (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''UniMsg'') p__Inv4) ''Cmd'')) (Const UNI_Get))) (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''UniMsg'') p__Inv4) ''HomeProc'')) (Const false))))" in exI, auto) done
then show "invHoldForRule s f r (invariants N)" by auto
qed
lemma n_PI_Local_GetX_PutX_HeadVld__part__0Vsinv__110:
assumes a1: "(r=n_PI_Local_GetX_PutX_HeadVld__part__0 N )" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__110 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__110 p__Inv4" apply fastforce done
have "?P3 s"
apply (cut_tac a1 a2 , simp, rule_tac x="(neg (andForm (andForm (eqn (IVar (Field (Field (Ident ''Sta'') ''Dir'') ''Pending'')) (Const false)) (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''UniMsg'') p__Inv4) ''Cmd'')) (Const UNI_Get))) (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''UniMsg'') p__Inv4) ''HomeProc'')) (Const false))))" in exI, auto) done
then show "invHoldForRule s f r (invariants N)" by auto
qed
lemma n_PI_Local_GetX_PutX_HeadVld__part__1Vsinv__110:
assumes a1: "(r=n_PI_Local_GetX_PutX_HeadVld__part__1 N )" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__110 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__110 p__Inv4" apply fastforce done
have "?P3 s"
apply (cut_tac a1 a2 , simp, rule_tac x="(neg (andForm (andForm (eqn (IVar (Field (Field (Ident ''Sta'') ''Dir'') ''Pending'')) (Const false)) (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''UniMsg'') p__Inv4) ''Cmd'')) (Const UNI_Get))) (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''UniMsg'') p__Inv4) ''HomeProc'')) (Const false))))" in exI, auto) done
then show "invHoldForRule s f r (invariants N)" by auto
qed
lemma n_PI_Local_GetX_PutX__part__0Vsinv__110:
assumes a1: "(r=n_PI_Local_GetX_PutX__part__0 )" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__110 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__110 p__Inv4" apply fastforce done
have "?P3 s"
apply (cut_tac a1 a2 , simp, rule_tac x="(neg (andForm (andForm (eqn (IVar (Field (Field (Ident ''Sta'') ''Dir'') ''Pending'')) (Const false)) (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''UniMsg'') p__Inv4) ''Cmd'')) (Const UNI_Get))) (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''UniMsg'') p__Inv4) ''HomeProc'')) (Const false))))" in exI, auto) done
then show "invHoldForRule s f r (invariants N)" by auto
qed
lemma n_PI_Local_GetX_PutX__part__1Vsinv__110:
assumes a1: "(r=n_PI_Local_GetX_PutX__part__1 )" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__110 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__110 p__Inv4" apply fastforce done
have "?P3 s"
apply (cut_tac a1 a2 , simp, rule_tac x="(neg (andForm (andForm (eqn (IVar (Field (Field (Ident ''Sta'') ''Dir'') ''Pending'')) (Const false)) (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''UniMsg'') p__Inv4) ''Cmd'')) (Const UNI_Get))) (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''UniMsg'') p__Inv4) ''HomeProc'')) (Const false))))" in exI, auto) done
then show "invHoldForRule s f r (invariants N)" by auto
qed
lemma n_PI_Local_PutXVsinv__110:
assumes a1: "(r=n_PI_Local_PutX )" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__110 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__110 p__Inv4" apply fastforce done
have "((formEval (eqn (IVar (Field (Field (Ident ''Sta'') ''Dir'') ''Pending'')) (Const true)) s))\<or>((formEval (neg (eqn (IVar (Field (Field (Ident ''Sta'') ''Dir'') ''Pending'')) (Const true))) s))" by auto
moreover {
assume c1: "((formEval (eqn (IVar (Field (Field (Ident ''Sta'') ''Dir'') ''Pending'')) (Const true)) s))"
have "?P2 s"
proof(cut_tac a1 a2 c1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume c1: "((formEval (neg (eqn (IVar (Field (Field (Ident ''Sta'') ''Dir'') ''Pending'')) (Const true))) s))"
have "?P1 s"
proof(cut_tac a1 a2 c1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_PI_Local_ReplaceVsinv__110:
assumes a1: "(r=n_PI_Local_Replace )" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__110 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__110 p__Inv4" apply fastforce done
have "?P1 s"
proof(cut_tac a1 a2 , auto) qed
then show "invHoldForRule s f r (invariants N)" by auto
qed
lemma n_NI_Local_PutVsinv__110:
assumes a1: "(r=n_NI_Local_Put )" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__110 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__110 p__Inv4" apply fastforce done
have "?P3 s"
apply (cut_tac a1 a2 , simp, rule_tac x="(neg (andForm (andForm (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''UniMsg'') p__Inv4) ''Cmd'')) (Const UNI_Get)) (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''UniMsg'') p__Inv4) ''HomeProc'')) (Const false))) (eqn (IVar (Field (Field (Ident ''Sta'') ''HomeUniMsg'') ''Cmd'')) (Const UNI_Put))))" in exI, auto) done
then show "invHoldForRule s f r (invariants N)" by auto
qed
lemma n_NI_Local_PutXAcksDoneVsinv__110:
assumes a1: "(r=n_NI_Local_PutXAcksDone )" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__110 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__110 p__Inv4" apply fastforce done
have "?P3 s"
apply (cut_tac a1 a2 , simp, rule_tac x="(neg (andForm (andForm (eqn (IVar (Field (Field (Ident ''Sta'') ''HomeUniMsg'') ''Cmd'')) (Const UNI_PutX)) (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''UniMsg'') p__Inv4) ''Cmd'')) (Const UNI_Get))) (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''UniMsg'') p__Inv4) ''HomeProc'')) (Const false))))" in exI, auto) done
then show "invHoldForRule s f r (invariants N)" by auto
qed
lemma n_NI_Remote_GetX_PutX_HomeVsinv__110:
assumes a1: "\<exists> dst. dst\<le>N\<and>r=n_NI_Remote_GetX_PutX_Home dst" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__110 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_WbVsinv__110:
assumes a1: "r=n_NI_Wb " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__110 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_InvAck_3Vsinv__110:
assumes a1: "\<exists> src. src\<le>N\<and>r=n_NI_InvAck_3 N src" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__110 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Local_GetX_GetX__part__1Vsinv__110:
assumes a1: "r=n_PI_Local_GetX_GetX__part__1 " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__110 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Local_GetX_GetX__part__0Vsinv__110:
assumes a1: "r=n_PI_Local_GetX_GetX__part__0 " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__110 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Remote_ReplaceVsinv__110:
assumes a1: "\<exists> src. src\<le>N\<and>r=n_PI_Remote_Replace src" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__110 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_InvAck_existsVsinv__110:
assumes a1: "\<exists> src pp. src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_InvAck_exists src pp" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__110 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Remote_PutXVsinv__110:
assumes a1: "\<exists> dst. dst\<le>N\<and>r=n_PI_Remote_PutX dst" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__110 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_Remote_Get_Put_HomeVsinv__110:
assumes a1: "\<exists> dst. dst\<le>N\<and>r=n_NI_Remote_Get_Put_Home dst" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__110 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_InvVsinv__110:
assumes a1: "\<exists> dst. dst\<le>N\<and>r=n_NI_Inv dst" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__110 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_ShWbVsinv__110:
assumes a1: "r=n_NI_ShWb N " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__110 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_ReplaceVsinv__110:
assumes a1: "\<exists> src. src\<le>N\<and>r=n_NI_Replace src" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__110 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_Remote_GetX_Nak_HomeVsinv__110:
assumes a1: "\<exists> dst. dst\<le>N\<and>r=n_NI_Remote_GetX_Nak_Home dst" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__110 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_Remote_Get_Nak_HomeVsinv__110:
assumes a1: "\<exists> dst. dst\<le>N\<and>r=n_NI_Remote_Get_Nak_Home dst" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__110 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_InvAck_exists_HomeVsinv__110:
assumes a1: "\<exists> src. src\<le>N\<and>r=n_NI_InvAck_exists_Home src" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__110 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_Replace_HomeVsinv__110:
assumes a1: "r=n_NI_Replace_Home " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__110 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_Nak_ClearVsinv__110:
assumes a1: "r=n_NI_Nak_Clear " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__110 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Local_Get_GetVsinv__110:
assumes a1: "r=n_PI_Local_Get_Get " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__110 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_Nak_HomeVsinv__110:
assumes a1: "r=n_NI_Nak_Home " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__110 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_InvAck_2Vsinv__110:
assumes a1: "\<exists> src. src\<le>N\<and>r=n_NI_InvAck_2 N src" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__110 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_FAckVsinv__110:
assumes a1: "r=n_NI_FAck " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__110 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
end
|
import Control.ST
import Control.ST.ImplicitCall
import Network.Socket
data SocketState = Ready | Bound | Listening | Open | Closed
data CloseOK : SocketState -> Type where
CloseOpen : CloseOK Open
CloseListening : CloseOK Listening
interface Sockets (m : Type -> Type) where
Sock : SocketState -> Type
socket : SocketType -> ST m (Either () Var) [addIfRight (Sock Ready)]
bind : (sock : Var) -> (addr : Maybe SocketAddress) -> (port : Port) ->
ST m (Either () ()) [sock ::: Sock Ready :-> (Sock Closed `or` Sock Bound)]
listen : (sock : Var) -> ST m (Either () ())
[sock ::: Sock Bound :-> (Sock Closed `or` Sock Listening)]
accept : (sock : Var) -> ST m (Either () Var)
[sock ::: Sock Listening, addIfRight $ Sock Open]
connect : (sock : Var) -> SocketAddress -> Port ->
ST m (Either () ()) [sock ::: Sock Ready :-> (Sock Closed `or` Sock Open)]
send : (sock : Var) -> String ->
ST m (Either () ()) [sock ::: Sock Open :-> (Sock Closed `or` Sock Open)]
recv : (sock : Var) -> ST m (Either () String)
[sock ::: Sock Open :-> (Sock Closed `or` Sock Open)]
close : (sock : Var) -> {auto prf : CloseOK st} -> ST m () [sock ::: Sock st :-> Sock Closed]
remove : (sock : Var) -> ST m () [Remove sock (Sock Closed)]
echoServer : (ConsoleIO m, Sockets m) => (sock : Var) ->
ST m () [remove sock (Sock {m} Listening)]
echoServer sock =
do
Right new <- accept sock
| Left err => do close sock; remove sock
Right msg <- recv new
| Left err => do close sock; remove sock; remove new
Right ok <- send new ("You said " ++ msg)
| Left err => do remove new; close sock; remove sock
close new; remove new; echoServer sock
startServer : (ConsoleIO m, Sockets m) => ST m () []
startServer =
do
Right sock <- socket Stream
| Left err => pure ()
Right ok <- bind sock Nothing 9442
| Left err => remove sock
Right ok <- listen sock
| Left err => remove sock
echoServer sock
implementation Sockets IO where
Sock _ = State Socket
socket ty =
do
Right sock <- lift $ Socket.socket AF_INET ty 0
| Left err => pure (Left ())
lbl <- new sock
pure (Right lbl)
bind sock addr port =
do
ok <- lift $ bind !(read sock) addr port
if ok /= 0
then pure (Left ())
else pure (Right ())
listen sock =
do
ok <- lift $ listen !(read sock)
if ok /= 0
then pure (Left ())
else pure (Right ())
accept sock =
do
Right (conn, addr) <- lift $ accept !(read sock)
| Left err => pure (Left ())
lbl <- new conn
returning (Right lbl) (toEnd lbl)
connect sock addr port =
do
ok <- lift $ connect !(read sock) addr port
if ok /= 0
then pure (Left ())
else pure (Right ())
close sock =
do
lift $ close !(read sock)
pure ()
remove sock = delete sock
send sock msg =
do
Right _ <- lift $ send !(read sock) msg
| Left _ => pure (Left ())
pure (Right ())
recv sock =
do
Right (msg, len) <- lift $ recv !(read sock) 1024
| Left _ => pure (Left ())
pure (Right msg)
|
-- Andreas, 2015-02-26
-- {-# OPTIONS -v interaction:100 #-}
data D : Set where
c : D
goal : D
goal = {! !} -- C-c C-r gave a parse error here, as there was a (single) space.
g1 : D
g1 = {! !}
g2 : D
g2 = {! !}
-- works now
|
/-
Copyright (c) 2020 Kenny Lau. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Kenny Lau
-/
import ring_theory.integrally_closed
import ring_theory.valuation.integers
/-!
# Integral elements over the ring of integers of a valution
The ring of integers is integrally closed inside the original ring.
-/
universes u v w
open_locale big_operators
namespace valuation
namespace integers
section comm_ring
variables {R : Type u} {ฮโ : Type v} [comm_ring R] [linear_ordered_comm_group_with_zero ฮโ]
variables {v : valuation R ฮโ} {O : Type w} [comm_ring O] [algebra O R] (hv : integers v O)
include hv
open polynomial
lemma mem_of_integral {x : R} (hx : is_integral O x) : x โ v.integer :=
let โจp, hpm, hpxโฉ := hx in le_of_not_lt $ ฮป (hvx : 1 < v x), begin
rw [hpm.as_sum, evalโ_add, evalโ_pow, evalโ_X, evalโ_finset_sum, add_eq_zero_iff_eq_neg] at hpx,
replace hpx := congr_arg v hpx, refine ne_of_gt _ hpx,
rw [v.map_neg, v.map_pow],
refine v.map_sum_lt' (zero_lt_one.trans_le (one_le_pow_of_one_le' hvx.le _)) (ฮป i hi, _),
rw [evalโ_mul, evalโ_pow, evalโ_C, evalโ_X, v.map_mul, v.map_pow, โ one_mul (v x ^ p.nat_degree)],
cases (hv.2 $ p.coeff i).lt_or_eq with hvpi hvpi,
{ exact mul_lt_mulโ hvpi (pow_lt_powโ hvx $ finset.mem_range.1 hi) },
{ erw hvpi, rw [one_mul, one_mul], exact pow_lt_powโ hvx (finset.mem_range.1 hi) }
end
protected lemma integral_closure : integral_closure O R = โฅ :=
bot_unique $ ฮป r hr, let โจx, hxโฉ := hv.3 (hv.mem_of_integral hr) in algebra.mem_bot.2 โจx, hxโฉ
end comm_ring
section fraction_field
variables {K : Type u} {ฮโ : Type v} [field K] [linear_ordered_comm_group_with_zero ฮโ]
variables {v : valuation K ฮโ} {O : Type w} [comm_ring O] [is_domain O]
variables [algebra O K] [is_fraction_ring O K]
variables (hv : integers v O)
lemma integrally_closed : is_integrally_closed O :=
(is_integrally_closed.integral_closure_eq_bot_iff K).mp (valuation.integers.integral_closure hv)
end fraction_field
end integers
end valuation
|
Formal statement is: lemma has_contour_integral_bound_circlepath_strong: "\<lbrakk>(f has_contour_integral i) (circlepath z r); finite k; 0 \<le> B; 0 < r; \<And>x. \<lbrakk>norm(x - z) = r; x \<notin> k\<rbrakk> \<Longrightarrow> norm(f x) \<le> B\<rbrakk> \<Longrightarrow> norm i \<le> B*(2*pi*r)" Informal statement is: If $f$ has a contour integral around a circle of radius $r$ centered at $z$, and $f$ is bounded by $B$ on the circle, then the contour integral is bounded by $B \cdot 2 \pi r$. |
State Before: ฮฑ : Type u_1
instโยฒ : CanonicallyOrderedAddMonoid ฮฑ
instโยน : Sub ฮฑ
instโ : OrderedSub ฮฑ
a b c d : ฮฑ
โข 0 < a - b โ ยฌa โค b State After: no goals Tactic: rw [pos_iff_ne_zero, Ne.def, tsub_eq_zero_iff_le] |
# Upbit Open API
#
# ## REST API for Upbit Exchange - Base URL: [https://api.upbit.com] - Official Upbit API Documents: [https://docs.upbit.com] - Official Support email: [[email protected]]
#
# OpenAPI spec version: 1.0.0
# Contact: [email protected]
# Generated by: https://github.com/swagger-api/swagger-codegen.git
#' Orderbook Class
#'
#' @field market
#' @field timestamp
#' @field total_ask_size
#' @field total_bid_size
#' @field orderbook_units
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
Orderbook <- R6::R6Class(
'Orderbook',
public = list(
`market` = NULL,
`timestamp` = NULL,
`total_ask_size` = NULL,
`total_bid_size` = NULL,
`orderbook_units` = NULL,
initialize = function(`market`, `timestamp`, `total_ask_size`, `total_bid_size`, `orderbook_units`){
if (!missing(`market`)) {
stopifnot(is.character(`market`), length(`market`) == 1)
self$`market` <- `market`
}
if (!missing(`timestamp`)) {
self$`timestamp` <- `timestamp`
}
if (!missing(`total_ask_size`)) {
stopifnot(is.numeric(`total_ask_size`), length(`total_ask_size`) == 1)
self$`total_ask_size` <- `total_ask_size`
}
if (!missing(`total_bid_size`)) {
stopifnot(is.numeric(`total_bid_size`), length(`total_bid_size`) == 1)
self$`total_bid_size` <- `total_bid_size`
}
if (!missing(`orderbook_units`)) {
stopifnot(is.list(`orderbook_units`), length(`orderbook_units`) != 0)
lapply(`orderbook_units`, function(x) stopifnot(R6::is.R6(x)))
self$`orderbook_units` <- `orderbook_units`
}
},
toJSON = function() {
OrderbookObject <- list()
if (!is.null(self$`market`)) {
OrderbookObject[['market']] <- self$`market`
}
if (!is.null(self$`timestamp`)) {
OrderbookObject[['timestamp']] <- self$`timestamp`
}
if (!is.null(self$`total_ask_size`)) {
OrderbookObject[['total_ask_size']] <- self$`total_ask_size`
}
if (!is.null(self$`total_bid_size`)) {
OrderbookObject[['total_bid_size']] <- self$`total_bid_size`
}
if (!is.null(self$`orderbook_units`)) {
OrderbookObject[['orderbook_units']] <- lapply(self$`orderbook_units`, function(x) x$toJSON())
}
OrderbookObject
},
fromJSON = function(OrderbookJson) {
OrderbookObject <- jsonlite::fromJSON(OrderbookJson)
if (!is.null(OrderbookObject$`market`)) {
self$`market` <- OrderbookObject$`market`
}
if (!is.null(OrderbookObject$`timestamp`)) {
self$`timestamp` <- OrderbookObject$`timestamp`
}
if (!is.null(OrderbookObject$`total_ask_size`)) {
self$`total_ask_size` <- OrderbookObject$`total_ask_size`
}
if (!is.null(OrderbookObject$`total_bid_size`)) {
self$`total_bid_size` <- OrderbookObject$`total_bid_size`
}
if (!is.null(OrderbookObject$`orderbook_units`)) {
self$`orderbook_units` <- lapply(OrderbookObject$`orderbook_units`, function(x) {
orderbook_unitsObject <- OrderbookUnit$new()
orderbook_unitsObject$fromJSON(jsonlite::toJSON(x, auto_unbox = TRUE))
orderbook_unitsObject
})
}
},
toJSONString = function() {
sprintf(
'{
"market": %s,
"timestamp": %s,
"total_ask_size": %d,
"total_bid_size": %d,
"orderbook_units": [%s]
}',
self$`market`,
self$`timestamp`,
self$`total_ask_size`,
self$`total_bid_size`,
lapply(self$`orderbook_units`, function(x) paste(x$toJSON(), sep=","))
)
},
fromJSONString = function(OrderbookJson) {
OrderbookObject <- jsonlite::fromJSON(OrderbookJson)
self$`market` <- OrderbookObject$`market`
self$`timestamp` <- OrderbookObject$`timestamp`
self$`total_ask_size` <- OrderbookObject$`total_ask_size`
self$`total_bid_size` <- OrderbookObject$`total_bid_size`
self$`orderbook_units` <- lapply(OrderbookObject$`orderbook_units`, function(x) OrderbookUnit$new()$fromJSON(jsonlite::toJSON(x, auto_unbox = TRUE)))
}
)
)
|
Formal statement is: lemma fundamental_theorem_of_algebra: assumes nc: "\<not> constant (poly p)" shows "\<exists>z::complex. poly p z = 0" Informal statement is: If a polynomial $p$ is not constant, then it has a root. |
function [ fn ] = eph_name( denum, year )
%eph_name creates a naming convection for the file names of the binary ephemerides
fn = Ephem.asc_name( denum, year );
if ~isempty(fn)
fn(1:3) = 'eph';
end
end
|
SUBROUTINE AF_PFLV ( report, isflv, ieflv, iret )
C************************************************************************
C* AF_PFLV *
C* *
C* This subroutine decodes and stores the flight level data from within *
C* a PIREP report. *
C* *
C* AF_PFLV ( REPORT, ISFLV, IEFLV, IRET ) *
C* *
C* Input parameters: *
C* REPORT CHAR* PIREP report *
C* ISFLV INTEGER Pointer to start of flight level*
C* data within REPORT *
C* IEFLV INTEGER Pointer to end of flight level *
C* data within REPORT *
C* *
C* Output parameters: *
C* IRET INTEGER Return code *
C* 0 = normal return *
C* *
C** *
C* Log: *
C* J. Ator/NP12 09/96 *
C* J. Ator/NP12 10/96 Remove calls to ERRRPT *
C* J. Ator/NP12 08/97 New interface format, style changes *
C* J. Ator/NCEP 11/99 Declare field variable locally *
C************************************************************************
INCLUDE 'GEMPRM.PRM'
INCLUDE 'afcmn.cmn'
C*
CHARACTER*(*) report
C*
CHARACTER field*(MXLENF)
C-----------------------------------------------------------------------
iret = 0
itsflv = isflv
C
CALL AF_GFLD ( report, ieflv, itsflv, field, lenf, ier )
IF ( ier .ne. 0 ) THEN
RETURN
ELSE IF ( lenf .eq. 3 ) THEN
CALL AF_FLVL ( field (1:3), ierflv )
END IF
C*
RETURN
END
|
(* Title: HOL/TLA/Memory/MemoryParameters.thy
Author: Stephan Merz, University of Munich
*)
section {* RPC-Memory example: Memory parameters *}
theory MemoryParameters
imports RPCMemoryParams
begin
(* the memory operations *)
datatype memOp = read Locs | "write" Locs Vals
consts
(* memory locations and contents *)
MemLoc :: "Locs set"
MemVal :: "Vals set"
(* some particular values *)
OK :: "Vals"
BadArg :: "Vals"
MemFailure :: "Vals"
NotAResult :: "Vals" (* defined here for simplicity *)
(* the initial value stored in each memory cell *)
InitVal :: "Vals"
axiomatization where
(* basic assumptions about the above constants and predicates *)
BadArgNoMemVal: "BadArg ~: MemVal" and
MemFailNoMemVal: "MemFailure ~: MemVal" and
InitValMemVal: "InitVal : MemVal" and
NotAResultNotVal: "NotAResult ~: MemVal" and
NotAResultNotOK: "NotAResult ~= OK" and
NotAResultNotBA: "NotAResult ~= BadArg" and
NotAResultNotMF: "NotAResult ~= MemFailure"
lemmas [simp] =
BadArgNoMemVal MemFailNoMemVal InitValMemVal NotAResultNotVal
NotAResultNotOK NotAResultNotBA NotAResultNotMF
NotAResultNotOK [symmetric] NotAResultNotBA [symmetric] NotAResultNotMF [symmetric]
lemma MemValNotAResultE: "[| x : MemVal; (x ~= NotAResult ==> P) |] ==> P"
using NotAResultNotVal by blast
end
|
Formal statement is: lemma [code abstract]: "coeffs (pCons a p) = a ## coeffs p" Informal statement is: The list of coefficients of a polynomial is the coefficient of the leading term followed by the list of coefficients of the rest of the polynomial. |
Formal statement is: lemma prime_elem_const_poly_iff: fixes c :: "'a :: semidom" shows "prime_elem [:c:] \<longleftrightarrow> prime_elem c" Informal statement is: A constant polynomial is a prime element if and only if its constant is a prime element. |
"""
UnderlyingMDP(m::POMDP)
Transform `POMDP` `m` into an `MDP` where the states are fully observed.
UnderlyingMDP(m::MDP)
Return `m`
"""
struct UnderlyingMDP{P <: POMDP, S, A} <: MDP{S, A}
pomdp::P
end
function UnderlyingMDP(pomdp::POMDP{S, A, O}) where {S,A,O}
P = typeof(pomdp)
return UnderlyingMDP{P,S,A}(pomdp)
end
UnderlyingMDP(m::MDP) = m
POMDPs.transition(mdp::UnderlyingMDP{P, S, A}, s::S, a::A) where {P,S,A}= transition(mdp.pomdp, s, a)
POMDPs.initialstate_distribution(mdp::UnderlyingMDP) = initialstate_distribution(mdp.pomdp)
POMDPs.initialstate(mdp::UnderlyingMDP, rng::AbstractRNG) = initialstate(mdp.pomdp, rng)
POMDPs.states(mdp::UnderlyingMDP) = states(mdp.pomdp)
POMDPs.actions(mdp::UnderlyingMDP) = actions(mdp.pomdp)
POMDPs.reward(mdp::UnderlyingMDP{P, S, A}, s::S, a::A) where {P,S,A} = reward(mdp.pomdp, s, a)
POMDPs.reward(mdp::UnderlyingMDP{P, S, A}, s::S, a::A, sp::S) where {P,S,A} = reward(mdp.pomdp, s, a, sp)
POMDPs.isterminal(mdp ::UnderlyingMDP{P, S, A}, s::S) where {P,S,A} = isterminal(mdp.pomdp, s)
POMDPs.discount(mdp::UnderlyingMDP) = discount(mdp.pomdp)
POMDPs.stateindex(mdp::UnderlyingMDP{P, S, A}, s::S) where {P,S,A} = stateindex(mdp.pomdp, s)
POMDPs.stateindex(mdp::UnderlyingMDP{P, Int, A}, s::Int) where {P,A} = stateindex(mdp.pomdp, s) # fix ambiguity with src/convenience
POMDPs.stateindex(mdp::UnderlyingMDP{P, Bool, A}, s::Bool) where {P,A} = stateindex(mdp.pomdp, s)
POMDPs.actionindex(mdp::UnderlyingMDP{P, S, A}, a::A) where {P,S,A} = actionindex(mdp.pomdp, a)
POMDPs.actionindex(mdp::UnderlyingMDP{P,S, Int}, a::Int) where {P,S} = actionindex(mdp.pomdp, a)
POMDPs.actionindex(mdp::UnderlyingMDP{P,S, Bool}, a::Bool) where {P,S} = actionindex(mdp.pomdp, a)
POMDPs.gen(d::DDNOut, mdp::UnderlyingMDP, s, a, rng) = gen(d, mdp.pomdp, s, a, rng)
POMDPs.gen(d::DDNNode, mdp::UnderlyingMDP, s, a, rng) = gen(d, mdp.pomdp, s, a, rng)
POMDPs.gen(mdp::UnderlyingMDP, s, a, rng) = gen(m.pomdp, s, a, rng)
# deprecated in POMDPs v0.8
POMDPs.n_actions(mdp::UnderlyingMDP) = n_actions(mdp.pomdp)
POMDPs.n_states(mdp::UnderlyingMDP) = n_states(mdp.pomdp)
POMDPs.generate_s(mdp::UnderlyingMDP, s, a, rng::AbstractRNG) = generate_s(mdp.pomdp, s, a, rng)
POMDPs.generate_sr(mdp::UnderlyingMDP, s, a, rng::AbstractRNG) = generate_sr(mdp.pomdp, s, a, rng)
|
section \<open> Deadlock \<close>
theory ITree_Deadlock
imports ITree_Divergence
begin
text \<open> Deadlock is an interaction with no visible event \<close>
definition deadlock :: "('e, 'r) itree" where
"deadlock = Vis {}\<^sub>p"
lemma stable_deadlock [simp]: "stable deadlock"
by (simp add: deadlock_def)
lemma deadlock_trace_to: "deadlock \<midarrow>tr\<leadsto> P \<longleftrightarrow> tr = [] \<and> P = deadlock"
by (auto simp add: deadlock_def)
lemma pure_deadlock: "pure_itree deadlock"
by (simp add: deadlock_trace_to pure_itree_def)
lemma div_free_deadlock: "div_free deadlock"
by (metis deadlock_def div_free_run run_empty)
lemma pure_itree_disj_cases:
assumes "pure_itree P"
shows "(\<exists> n v. P = Sils n (Ret v)) \<or> (\<exists> n. P = Sils n deadlock) \<or> P = diverge"
unfolding deadlock_def
by (metis assms itree_disj_cases pure_itree_Vis pure_itree_trace_to trace_of_Sils)
lemma pure_itree_cases [case_names rets deadlock diverge, consumes 1]:
assumes "pure_itree P"
"\<And> n v. P = Sils n (Ret v) \<Longrightarrow> Q" "\<And> n. P = Sils n deadlock \<Longrightarrow> Q" "P = diverge \<Longrightarrow> Q"
shows Q
by (meson assms pure_itree_disj_cases)
lemma deadlock_bind [simp]: "deadlock \<bind> P = deadlock"
by (metis (no_types, lifting) deadlock_def run_bind run_empty)
lemma retvals_deadlock [simp]: "\<^bold>R(deadlock) = {}"
by (simp add: deadlock_def)
definition deadlock_free :: "('e, 'r) itree \<Rightarrow> bool" where
"deadlock_free P = (\<forall> tr. \<not> P \<midarrow>tr\<leadsto> deadlock)"
lemma deadlock_free_deadlock: "deadlock_free deadlock = False"
by (simp add: deadlock_free_def deadlock_trace_to)
lemma deadlock_free_Ret: "deadlock_free (\<checkmark> x)"
by (simp add: deadlock_def deadlock_free_def)
lemma deadlock_free_Sil: "deadlock_free (Sil P) = deadlock_free P"
by (metis deadlock_free_def itree.disc(5) stable_deadlock trace_to_Sil trace_to_SilE)
lemma deadlock_free_VisI:
assumes
"dom(F) \<noteq> {}" "\<And> e. e \<in> dom(F) \<Longrightarrow> deadlock_free (F(e)\<^sub>p)"
shows "deadlock_free (Vis F)"
by (metis assms deadlock_def deadlock_free_def itree.inject(3) pdom_zero trace_to_VisE)
lemma deadlock_free_VisE:
assumes "deadlock_free (Vis F)"
"\<lbrakk> dom(F) \<noteq> {}; \<And> e. e \<in> pdom(F) \<Longrightarrow> deadlock_free (F(e)\<^sub>p) \<rbrakk> \<Longrightarrow> thesis"
shows thesis
by (metis assms deadlock_def deadlock_free_deadlock deadlock_free_def pdom_empty_iff_dom_empty trace_to.intros(3))
lemma deadlock_free_Vis:
"deadlock_free (Vis F) = (dom(F) \<noteq> {} \<and> (\<forall>e\<in>pdom(F). deadlock_free (F(e)\<^sub>p)))"
by (auto intro: deadlock_free_VisI elim: deadlock_free_VisE)
lemma deadlock_free_bindI: "\<lbrakk> deadlock_free P; \<forall> s\<in>\<^bold>R(P). deadlock_free (Q s) \<rbrakk> \<Longrightarrow> deadlock_free (P \<bind> Q)"
apply (auto elim!:trace_to_bindE bind_VisE' simp add: deadlock_def deadlock_free_def)
apply (metis retvals_traceI trace_to_Nil)
apply (meson retvals_traceI)
done
lemma deadlock_free_bind_iff:
"deadlock_free (P \<bind> Q) \<longleftrightarrow> (deadlock_free P \<and> (\<forall> s\<in>\<^bold>R(P). deadlock_free (Q s)))"
apply (auto intro: deadlock_free_bindI)
apply (auto simp add: deadlock_free_def retvals_def)
apply (metis deadlock_bind trace_to_bind_left)
apply (meson trace_to_bind)
done
lemma deadlock_free_Vis_prism_fun:
"wb_prism c \<Longrightarrow> deadlock_free (Vis (prism_fun c A (\<lambda> x. (P x, Ret (f x))))) = (\<exists>v\<in>A. P v)"
by (auto simp add: deadlock_free_Vis dom_prism_fun prism_fun_apply deadlock_free_Ret)
end |
{-# OPTIONS --without-K --safe #-}
open import Categories.Category.Core using (Category)
open import Categories.Functor.Bifunctor using (Bifunctor)
module Categories.Diagram.Cowedge {o โ e oโฒ โโฒ eโฒ} {C : Category o โ e} {D : Category oโฒ โโฒ eโฒ}
(F : Bifunctor (Category.op C) C D) where
private
module C = Category C
module D = Category D
open D
open HomReasoning
open Equiv
variable
A : Obj
open import Level
open import Categories.Functor
open import Categories.Functor.Construction.Constant
open import Categories.NaturalTransformation.Dinatural
open Functor F
record Cowedge : Set (levelOfTerm F) where
field
E : Obj
dinatural : DinaturalTransformation F (const E)
module dinatural = DinaturalTransformation dinatural
Cowedge-โ : (W : Cowedge) โ Cowedge.E W โ A โ Cowedge
Cowedge-โ {A = A} W f = record
{ E = A
; dinatural = extranaturalหก (ฮป X โ f โ dinatural.ฮฑ X)
(assoc โ โ-resp-โสณ (extranatural-commหก dinatural) โ sym-assoc)
}
where open Cowedge W
record Cowedge-Morphism (Wโ Wโ : Cowedge) : Set (levelOfTerm F) where
private
module Wโ = Cowedge Wโ
module Wโ = Cowedge Wโ
open DinaturalTransformation
field
u : Wโ.E โ Wโ.E
commute : โ {C} โ u โ Wโ.dinatural.ฮฑ C โ Wโ.dinatural.ฮฑ C
Cowedge-id : โ {W} โ Cowedge-Morphism W W
Cowedge-id {W} = record { u = D.id ; commute = D.identityหก }
Cowedge-Morphism-โ : {A B C : Cowedge} โ Cowedge-Morphism B C โ Cowedge-Morphism A B โ Cowedge-Morphism A C
Cowedge-Morphism-โ M N = record { u = u M โ u N ; commute = assoc โ (โ-resp-โสณ (commute N) โ commute M) }
where
open Cowedge-Morphism
open HomReasoning
|
#install.packages("FLCore", repo = "http://flr-project.org/R")
#library(devtools)
#install_github("ices-tools-prod/msy")
library(msy)
nsamp <- 1000 # number of stochatic runs for each option
Mstk<-SMS2FLStocks(sumfile=file.path(data.path,'summary.out'),
bio.interact=F, read.input=TRUE, read.output=TRUE,control=read.FLSMS.control())
SSB.R.year.first<[email protected]
SSB.R.year.last <[email protected]
SSB.R.year.first[SSB.R.year.first==-1]<[email protected]
SSB.R.year.last[SSB.R.year.last==-1]<[email protected]
# read recruiment years used
recruit.years<-matrix(head(scan(file='recruitment_years.in',comment.char='#'),-1),[email protected]@first.year.model+1 ,byrow=T)
colnames(recruit.years)<-c(as.character(seq([email protected],[email protected])))
dev<-'png'
nox<-1; noy<-1;
noxy<-nox*noy
i<-noxy
stk<-Mstk
class(stk)<-'FLStock'
# move recruitment to first Quarter
[email protected][1,,,1,,]<-as.vector(stock.n(stk)[1,,,3,,])
stk<-trim(stk,season=1,year=SSB.R.year.first[1]:(SSB.R.year.last[1]-1)) # Delete the most recent year (as driven by used S/R relation). SSB and recruit are there, but the rest of data is crap (first half-year only)
harvest.spwn(stk)<-0
m.spwn(stk)<-0
#[email protected]<[email protected]/1000
cat('\n',sp.names[1+first.VPA-1],'\n SSB:\n');print(ssb(stk))
save(stk,file=file.path(data.path,'NOP_FLR.Rdata'))
models<- c("Ricker", "Segreg", "Bevholt")
if (0==length(excl.years<-as.numeric(dimnames(recruit.years)[[2]][0==recruit.years[1,]]))) excl.years<-NULL
FIT<-eqsr_fit(stk, nsamp = nsamp, models = models,
method = "Buckland",
id.sr = paste(sp.names[1+first.VPA-1],', ',SSB.R.year.first[1],'-',SSB.R.year.last[1],sep=''),
remove.years = excl.years)
if (i>=noxy) {
if (dev=='png') cleanup()
newplot(dev,filename=paste('equisim',1,sep='_'),nox,noy,Portrait=T);
i<-0
}
eqsr_plot(FIT,Scale=0.001,n=nsamp)
dev.off()
if (dev=='png') cleanup()
print(FIT$sr.det)
|
\clearpage
\subsection{Compound Statement} % (fold)
\label{sub:compound_statement}
\nameref{sub:branching} and \nameref{sub:looping} statements need to be able to include a number of instructions within their paths. Often languages will manage this by indicating that only a \emph{single} statement can be included in any of these paths, and then include the ability to code multiple statements in a \emph{single compound statement}.
\begin{figure}[h]
\centering
\includegraphics[width=\textwidth]{./topics/control-flow/diagrams/CompoundStatement}
\caption{A compound statement is a statement that can contain other statements}
\label{fig:branching-compound-statement}
\end{figure}
\mynote{
\begin{itemize}
\item \emph{Compound Statement} is a \textbf{term} used to describe a way of grouping \emph{actions}, allowing you to create a single statement that contains multiple statements.
\item Compound Statements are useful when combined with \nameref{sub:branching} and \nameref{sub:looping} statements. Allowing you to put multiple statements within a path.
\end{itemize}
}
% subsection compound_statements (end) |
[STATEMENT]
lemma spies_evs_rev: "spies evs = spies (rev evs)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. knows Spy evs = knows Spy (rev evs)
[PROOF STEP]
apply (induct_tac "evs")
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. knows Spy [] = knows Spy (rev [])
2. \<And>a list. knows Spy list = knows Spy (rev list) \<Longrightarrow> knows Spy (a # list) = knows Spy (rev (a # list))
[PROOF STEP]
apply (rename_tac [2] a b)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. knows Spy [] = knows Spy (rev [])
2. \<And>a b. knows Spy b = knows Spy (rev b) \<Longrightarrow> knows Spy (a # b) = knows Spy (rev (a # b))
[PROOF STEP]
apply (induct_tac [2] "a")
[PROOF STATE]
proof (prove)
goal (4 subgoals):
1. knows Spy [] = knows Spy (rev [])
2. \<And>a b x1 x2 x3. knows Spy b = knows Spy (rev b) \<Longrightarrow> knows Spy (Says x1 x2 x3 # b) = knows Spy (rev (Says x1 x2 x3 # b))
3. \<And>a b x1 x2. knows Spy b = knows Spy (rev b) \<Longrightarrow> knows Spy (Gets x1 x2 # b) = knows Spy (rev (Gets x1 x2 # b))
4. \<And>a b x1 x2. knows Spy b = knows Spy (rev b) \<Longrightarrow> knows Spy (Notes x1 x2 # b) = knows Spy (rev (Notes x1 x2 # b))
[PROOF STEP]
apply (simp_all (no_asm_simp) add: spies_Says_rev spies_Gets_rev spies_Notes_rev)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done |
Private flying is most associated with the traditional form of factory @-@ produced two and four @-@ seater , single piston @-@ engine training and touring aircraft . Examples of these are the Cessna 152 , Cessna 172 , and Piper <unk> Cherokee , all with their origins in the 1950s , and the more modern designs of Cirrus . The average cost per hour to fly such aircraft has been estimated to be ยฃ 133 , compared to an estimated ยฃ 77 per hour for gliders , and a reported ยฃ 35 per hour for microlights . Recent trends have seen an increase in the use of microlights , and also in recreational helicopter flying following the introduction of smaller and cheaper machines such as the Robinson <unk> and R44 . Another growth area in private flying in recent years has been in the use of amateur built aircraft , such as the Van 's Aircraft RV @-@ 4 and the Europa .
|
#Dependencies
import numpy as np
import pandas as pd
import sqlalchemy
from sqlalchemy import create_engine, inspect, func
from sqlalchemy import distinct
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from flask import Flask, jsonify
import datetime as dt
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
#reflect
Base = automap_base()
Base.prepare(engine, reflect=True)
Base.classes.keys()
measurement = Base.classes.measurement
station = Base.classes.station
# 2. Create an app, being sure to pass __name__
app = Flask(__name__)
#Home Page
@app.route("/")
def home():
return (
f"Welcome to the Homework API Home Page!<br>"
f"Available Routes:<br>"
f"/api/v1.0/precipitation<br>"
f"/api/v1.0/stations<br>"
f"/api/v1.0/tobs<br>"
f"/api/v1.0/yyyy-mm-dd<br>"
f"/api/v1.0/yyyy-mm-dd/yyyy-mm-dd<br>"
f"Please copy the format above to check the temps for the dates"
)
#Precipitation
@app.route("/api/v1.0/precipitation")
def precipiation():
#Open session and query
session = Session(engine)
year_ago = dt.date(2017, 8, 23) - dt.timedelta(days=365)
results =session.query(measurement.date, measurement.prcp).\
filter(measurement.date > year_ago).\
order_by(measurement.date).all()
#Close session
session.close()
#Dictionary
precipitation = []
for date, prcp in results:
dict = {}
dict["date"] = date
dict["prcp"] = prcp
precipitation.append(dict)
return jsonify(precipitation)
#Stations
@app.route("/api/v1.0/stations")
def station():
#Open session and query
session = Session(engine)
results = session.query(measurement.station, func.count(measurement.station)).group_by(measurement.station).order_by(func.count(measurement.station).desc()).all()
stations = list(np.ravel(results))
#Close session
session.close()
return jsonify(stations)
#TOBS
@app.route("/api/v1.0/tobs")
def tobs():
#Open session and query
session = Session(engine)
year_ago = dt.date(2017, 8, 23) - dt.timedelta(days=365)
query =session.query(measurement.tobs).\
filter(measurement.date >= year_ago).\
filter(measurement.station == 'USC00519281').all()
tobs = list(np.ravel(query))
#Close session
session.close()
return jsonify(tobs)
#Dates
@app.route("/api/v1.0/<start_date>")
def date(start_date):
#Open session and query
session = Session(engine)
temps1 = session.query(func.min(measurement.tobs), func.avg(measurement.tobs), func.max(measurement.tobs)).\
filter(measurement.date >= start_date).all()
#Close session
session.close()
return jsonify(temps1)
#Dates
@app.route("/api/v1.0/<start_date>/<end_date>")
def date2(start_date, end_date):
#Open session and query
session = Session(engine)
temps2 = session.query(func.min(measurement.tobs), func.avg(measurement.tobs), func.max(measurement.tobs)).\
filter(measurement.date >= start_date).filter(measurement.date <= end_date).all()
#Close session
session.close()
return jsonify(temps2)
if __name__ == '__main__':
app.run(debug=True)
|
! { dg-do run }
!
! PR fortran/42769
! The static resolution of A%GET used to be incorrectly simplified to MOD2's
! MY_GET instead of the original MOD1's MY_GET, depending on the order in which
! MOD1 and MOD2 were use-associated.
!
! Original testcase by Salvator Filippone <[email protected]>
! Reduced by Janus Weil <[email protected]>
module mod1
type :: t1
contains
procedure, nopass :: get => my_get
end type
contains
subroutine my_get(i)
i = 2
end subroutine
end module
module mod2
contains
subroutine my_get(i) ! must have the same name as the function in mod1
i = 5
end subroutine
end module
call test1()
call test2()
contains
subroutine test1()
use mod2
use mod1
type(t1) :: a
call a%get(j)
if (j /= 2) call abort
end subroutine test1
subroutine test2()
use mod1
use mod2
type(t1) :: a
call a%get(j)
if (j /= 2) call abort
end subroutine test2
end
|
lemma connected_componentI: "connected T \<Longrightarrow> T \<subseteq> S \<Longrightarrow> x \<in> T \<Longrightarrow> y \<in> T \<Longrightarrow> connected_component S x y" |
import algebra.category.Group.limits
import category_theory.limits.concrete_category
universes u v
namespace Ab
open category_theory
open category_theory.limits
variables {J : Type u} [category_theory.small_category J] (K : J โฅค Ab.{max u v})
lemma comp_apply {A B C : Ab} (f : A โถ B) (g : B โถ C) (a : A) :
(f โซ g) a = g (f a) := rfl
instance : add_comm_group (K โ category_theory.forget _).sections :=
{ add := ฮป u v, โจ u + v, ฮป i j f, begin
have u2 := u.2 f,
have v2 := v.2 f,
dsimp only [functor.comp_map, pi.add_apply, forget_map_eq_coe, subtype.val_eq_coe] at โข u2 v2,
simp only [u2, v2, map_add],
endโฉ,
add_assoc := ฮป a b c, by { ext, simp only [add_assoc, subtype.coe_mk] },
zero := โจ0, ฮป i j f, by { dsimp only [functor.comp_map, pi.zero_apply,
forget_map_eq_coe], rw [map_zero] }โฉ,
zero_add := ฮป a, by { ext, simp only [subtype.coe_mk, zero_add] },
add_zero := ฮป a, by { ext, simp only [subtype.coe_mk, add_zero] },
neg := ฮป t, โจ -t, begin
intros i j f,
have t2 := t.2 f,
dsimp only [functor.comp_map, pi.neg_apply, forget_map_eq_coe, subtype.val_eq_coe] at โข t2,
simp only [map_neg, t2],
end โฉ,
add_left_neg := ฮป a, by { ext, change - (a.1 x) + a.1 x = 0, simp only [add_left_neg] },
add_comm := ฮป a b, by { ext, change (a.1 x) + (b.1 x) = (b.1 x) + (a.1 x), rw [add_comm] } }
def explicit_limit_cone : cone K :=
{ X := AddCommGroup.of (K โ category_theory.forget _).sections,
ฯ :=
{ app := ฮป j,
{ to_fun := ฮป t, t.1 j,
map_zero' := rfl,
map_add' := ฮป x y, rfl },
naturality' := begin
intros i j f,
ext,
simpa using (x.2 f).symm,
end } }
def explicit_limit_cone_is_limit : is_limit (explicit_limit_cone K) :=
{ lift := ฮป S,
{ to_fun := ฮป t, โจฮป j, S.ฯ.app j t, begin
intros i j f,
dsimp,
rw โ S.w f,
refl,
endโฉ,
map_zero' := by { ext, dsimp, simpa },
map_add' := ฮป x y, by { ext, dsimp, simpa } },
fac' := begin
intros S j,
ext,
refl,
end,
uniq' := begin
intros S m hm,
ext,
simpa [โ hm],
end }
noncomputable
def barx : preserves_limit K (forget Ab.{max u v}) :=
preserves_limits_of_shape.preserves_limit
noncomputable
instance foo {K : J โฅค Ab.{u}} : preserves_limit K (forget Ab.{u}) :=
barx.{u u} K
lemma is_limit_ext {K : J โฅค Ab.{u}} (C : limit_cone K) (x y : C.cone.X)
(h : โ j : J, C.cone.ฯ.app j x = C.cone.ฯ.app j y) : x = y :=
limits.concrete.is_limit_ext K C.2 _ _ h
noncomputable
instance forget_creates_limit (J : Type u) [small_category J] (K : J โฅค Ab.{max u v}) :
creates_limit K (forget Ab) :=
creates_limit_of_reflects_iso $ ฮป C hC,
{ lifted_cone := explicit_limit_cone _,
valid_lift := (types.limit_cone_is_limit _).unique_up_to_iso hC,
makes_limit := explicit_limit_cone_is_limit _ }
-- Do we have this somewhere else?
noncomputable
instance forget_creates_limits : creates_limits (forget Ab.{max u v}) :=
by { constructor, introsI J hJ, constructor, intros K, apply Ab.forget_creates_limit.{_ u} J K }
-- Do we have this somewhere else?
noncomputable
instance forget_creates_limits' : creates_limits (forget Ab.{u}) :=
Ab.forget_creates_limits.{u u}
end Ab
|
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 10 14:12:58 2021
@author: Cam
"""
import scipy
from scipy import integrate
from scipy.integrate import solve_ivp
from matplotlib import pyplot as plt
import numpy as np
#x2 = lambda x: x**2
#integral = integrate.quad(x2,0,4)
#exact = 4**3/3.0
#print(integral)
#print(exact)
# def expon_decay(t,y):
# return -0.5*y
# sol = integrate.solve_ivp(expon_decay,[0,10],[10])
# y = sol.y.T
# t = sol.t
# plt.plot(t,y)
# def lotkavolterra(t, z, a, b, c, d):
# x, y = z
# return [a*x - b*x*y, -c*y + d*x*y]
# sol = solve_ivp(lotkavolterra, [0, 15], [10, 2], args=(1.5, 1, 3, 1),
# dense_output=True)
# t = np.linspace(0, 15, 300)
# z = sol.sol(t)
# plt.plot(t, z.T)
# plt.xlabel('t')
# plt.legend(['x', 'y'], shadow=True)
# plt.title('Lotka-Volterra System')
# plt.show()
h = 0.01
t = np.array([0.0, 10.0])
yinit = np.array([0.4, -0.7, 21.0])
def myFunc(t, y):
# Lorenz system
sigma = 10.0
rho = 28.0
beta = 8.0/3.0
dy = np.zeros((len(y)))
dy[0] = sigma*(y[1] - y[0])
dy[1] = y[0]*(rho - y[2]) - y[1]
dy[2] = y[0]*y[1] - beta*y[2]
return dy
sol_lorenz = solve_ivp(myFunc,t,yinit,max_step=.01,method='RK45')
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot(sol_lorenz.y[0], sol_lorenz.y[1], sol_lorenz.y[2])
ax.set_xlabel('x', fontsize=15)
ax.set_ylabel('y', fontsize=15)
ax.set_zlabel('z', fontsize=15) |
section "Common Basis Theory"
theory Base_FDS
imports "HOL-Library.Pattern_Aliases"
begin
declare Let_def [simp]
text \<open>Lemma \<open>size_prod_measure\<close>, when declared with the \<open>measure_function\<close> attribute,
enables \<open>fun\<close> to prove termination of a larger class of functions automatically.
By default, \<open>fun\<close> only tries lexicographic combinations of the sizes of the parameters.
With \<open>size_prod_measure\<close> enabled it also tries measures based on the sum of the sizes
of different parameters.
To alert the reader whenever such a more subtle termination proof is taking place
the lemma is not enabled all the time but only when it is needed.
\<close>
lemma size_prod_measure:
"is_measure f \<Longrightarrow> is_measure g \<Longrightarrow> is_measure (size_prod f g)"
by (rule is_measure_trivial)
end
|
State Before: M : Type u_2
A : Type ?u.259525
B : Type ?u.259528
R : Type u_1
instโยฒ : NonUnitalNonAssocSemiring R
instโยน : SetLike M R
instโ : MulMemClass M R
S : M
a b : R
ha : a โ AddSubmonoid.closure โS
hb : b โ AddSubmonoid.closure โS
โข a * b โ AddSubmonoid.closure โS State After: M : Type u_2
A : Type ?u.259525
B : Type ?u.259528
R : Type u_1
instโยฒ : NonUnitalNonAssocSemiring R
instโยน : SetLike M R
instโ : MulMemClass M R
S : M
b : R
hb : b โ AddSubmonoid.closure โS
โข โ {a : R}, a โ AddSubmonoid.closure โS โ a * b โ AddSubmonoid.closure โS Tactic: revert a State Before: M : Type u_2
A : Type ?u.259525
B : Type ?u.259528
R : Type u_1
instโยฒ : NonUnitalNonAssocSemiring R
instโยน : SetLike M R
instโ : MulMemClass M R
S : M
b : R
hb : b โ AddSubmonoid.closure โS
โข โ {a : R}, a โ AddSubmonoid.closure โS โ a * b โ AddSubmonoid.closure โS State After: case refine'_1
M : Type u_2
A : Type ?u.259525
B : Type ?u.259528
R : Type u_1
instโยฒ : NonUnitalNonAssocSemiring R
instโยน : SetLike M R
instโ : MulMemClass M R
S : M
โข โ (x : R), x โ โS โ (fun z => โ {a : R}, a โ AddSubmonoid.closure โS โ a * z โ AddSubmonoid.closure โS) x
case refine'_2
M : Type u_2
A : Type ?u.259525
B : Type ?u.259528
R : Type u_1
instโยฒ : NonUnitalNonAssocSemiring R
instโยน : SetLike M R
instโ : MulMemClass M R
S : M
โข (fun z => โ {a : R}, a โ AddSubmonoid.closure โS โ a * z โ AddSubmonoid.closure โS) 0
case refine'_3
M : Type u_2
A : Type ?u.259525
B : Type ?u.259528
R : Type u_1
instโยฒ : NonUnitalNonAssocSemiring R
instโยน : SetLike M R
instโ : MulMemClass M R
S : M
โข โ (x y : R),
(fun z => โ {a : R}, a โ AddSubmonoid.closure โS โ a * z โ AddSubmonoid.closure โS) x โ
(fun z => โ {a : R}, a โ AddSubmonoid.closure โS โ a * z โ AddSubmonoid.closure โS) y โ
(fun z => โ {a : R}, a โ AddSubmonoid.closure โS โ a * z โ AddSubmonoid.closure โS) (x + y) Tactic: refine' @AddSubmonoid.closure_induction _ _ _
(fun z => โ {a : R}, a โ AddSubmonoid.closure โS โ a * z โ AddSubmonoid.closure โS)
_ hb _ _ _ <;> clear hb b State Before: case refine'_1
M : Type u_2
A : Type ?u.259525
B : Type ?u.259528
R : Type u_1
instโยฒ : NonUnitalNonAssocSemiring R
instโยน : SetLike M R
instโ : MulMemClass M R
S : M
โข โ (x : R), x โ โS โ (fun z => โ {a : R}, a โ AddSubmonoid.closure โS โ a * z โ AddSubmonoid.closure โS) x State After: no goals Tactic: exact fun r hr b hb => MulMemClass.mul_right_mem_add_closure hb hr State Before: case refine'_2
M : Type u_2
A : Type ?u.259525
B : Type ?u.259528
R : Type u_1
instโยฒ : NonUnitalNonAssocSemiring R
instโยน : SetLike M R
instโ : MulMemClass M R
S : M
โข (fun z => โ {a : R}, a โ AddSubmonoid.closure โS โ a * z โ AddSubmonoid.closure โS) 0 State After: no goals Tactic: exact fun _ => by simp only [mul_zero, (AddSubmonoid.closure (S : Set R)).zero_mem] State Before: M : Type u_2
A : Type ?u.259525
B : Type ?u.259528
R : Type u_1
instโยฒ : NonUnitalNonAssocSemiring R
instโยน : SetLike M R
instโ : MulMemClass M R
S : M
aโ : R
xโ : aโ โ AddSubmonoid.closure โS
โข aโ * 0 โ AddSubmonoid.closure โS State After: no goals Tactic: simp only [mul_zero, (AddSubmonoid.closure (S : Set R)).zero_mem] State Before: case refine'_3
M : Type u_2
A : Type ?u.259525
B : Type ?u.259528
R : Type u_1
instโยฒ : NonUnitalNonAssocSemiring R
instโยน : SetLike M R
instโ : MulMemClass M R
S : M
โข โ (x y : R),
(fun z => โ {a : R}, a โ AddSubmonoid.closure โS โ a * z โ AddSubmonoid.closure โS) x โ
(fun z => โ {a : R}, a โ AddSubmonoid.closure โS โ a * z โ AddSubmonoid.closure โS) y โ
(fun z => โ {a : R}, a โ AddSubmonoid.closure โS โ a * z โ AddSubmonoid.closure โS) (x + y) State After: case refine'_3
M : Type u_2
A : Type ?u.259525
B : Type ?u.259528
R : Type u_1
instโยฒ : NonUnitalNonAssocSemiring R
instโยน : SetLike M R
instโ : MulMemClass M R
S : M
โข โ (x y : R),
(โ {a : R}, a โ AddSubmonoid.closure โS โ a * x โ AddSubmonoid.closure โS) โ
(โ {a : R}, a โ AddSubmonoid.closure โS โ a * y โ AddSubmonoid.closure โS) โ
โ {a : R}, a โ AddSubmonoid.closure โS โ a * x + a * y โ AddSubmonoid.closure โS Tactic: simp_rw [mul_add] State Before: case refine'_3
M : Type u_2
A : Type ?u.259525
B : Type ?u.259528
R : Type u_1
instโยฒ : NonUnitalNonAssocSemiring R
instโยน : SetLike M R
instโ : MulMemClass M R
S : M
โข โ (x y : R),
(โ {a : R}, a โ AddSubmonoid.closure โS โ a * x โ AddSubmonoid.closure โS) โ
(โ {a : R}, a โ AddSubmonoid.closure โS โ a * y โ AddSubmonoid.closure โS) โ
โ {a : R}, a โ AddSubmonoid.closure โS โ a * x + a * y โ AddSubmonoid.closure โS State After: no goals Tactic: exact fun r s hr hs b hb => (AddSubmonoid.closure (S : Set R)).add_mem (hr hb) (hs hb) |
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Short Sectioned Assignment
% LaTeX Template
% Version 1.0 (5/5/12)
%
% This template has been downloaded from:
% http://www.LaTeXTemplates.com
%
% Original author:
% Frits Wenneker (http://www.howtotex.com)
%
% License:
% CC BY-NC-SA 3.0 (http://creativecommons.org/licenses/by-nc-sa/3.0/)
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%----------------------------------------------------------------------------------------
% PACKAGES AND OTHER DOCUMENT CONFIGURATIONS
%----------------------------------------------------------------------------------------
\documentclass[paper=a4, fontsize=11pt]{scrartcl} % A4 paper and 11pt font size
\usepackage[T1]{fontenc} % Use 8-bit encoding that has 256 glyphs
\usepackage{fourier} % Use the Adobe Utopia font for the document - comment this line to return to the LaTeX default
\usepackage[english]{babel} % English language/hyphenation
\usepackage{amsmath,amsfonts,amsthm} % Math packages
\usepackage{lipsum} % Used for inserting dummy 'Lorem ipsum' text into the template
\usepackage{sectsty} % Allows customizing section commands
\allsectionsfont{\centering \normalfont\scshape} % Make all sections centered, the default font and small caps
\usepackage{fancyhdr} % Custom headers and footers
\pagestyle{fancyplain} % Makes all pages in the document conform to the custom headers and footers
\fancyhead{} % No page header - if you want one, create it in the same way as the footers below
\fancyfoot[L]{} % Empty left footer
\fancyfoot[C]{} % Empty center footer
\fancyfoot[R]{\thepage} % Page numbering for right footer
\renewcommand{\headrulewidth}{0pt} % Remove header underlines
\renewcommand{\footrulewidth}{0pt} % Remove footer underlines
\setlength{\headheight}{13.6pt} % Customize the height of the header
\numberwithin{equation}{section} % Number equations within sections (i.e. 1.1, 1.2, 2.1, 2.2 instead of 1, 2, 3, 4)
\numberwithin{figure}{section} % Number figures within sections (i.e. 1.1, 1.2, 2.1, 2.2 instead of 1, 2, 3, 4)
\numberwithin{table}{section} % Number tables within sections (i.e. 1.1, 1.2, 2.1, 2.2 instead of 1, 2, 3, 4)
\setlength\parindent{0pt} % Removes all indentation from paragraphs - comment this line for an assignment with lots of text
%----------------------------------------------------------------------------------------
% TITLE SECTION
%----------------------------------------------------------------------------------------
\newcommand{\horrule}[1]{\rule{\linewidth}{#1}} % Create horizontal rule command with 1 argument of height
\title{
\normalfont \normalsize
\textsc{How to Learn to Code} \\ [25pt] % Your university, school and/or department name(s)
\horrule{0.5pt} \\[0.4cm] % Thin top horizontal rule
\huge R Syllabus \\ % The assignment title
\horrule{2pt} \\[0.5cm] % Thick bottom horizontal rule
}
\author{Amy Pomeroy} % ADD YOUR NAME HERE IF YOU CONTRIBUTE!!
\date{\normalsize\today} % Today's date or a custom date
\begin{document}
\maketitle % Print the title
%----------------------------------------------------------------------------------------
% FIRST CLASS
%----------------------------------------------------------------------------------------
\section{First Class - The Basics}
The goal of this class is to introduce the basics of R and get students comfortable working in RStudio. It also serves as a good time to make sure that all students have R and RStudio up and running on their computers.
%------------------------------------------------
\subsection{Class expectations}
\begin{enumerate}
\item Use the basic math operators (+, -, *, /)
\item Use the assignment operator and how to use it (<-)
\item Understand what a function is, how to use a function, and understand some basic functions
\item Understand the three most common data classes (character, numeric, logical)
\item Apply the basic comparison operators (>, <, ==, >=, <=)
\item Compare objects, and predict the data classes and how they change when comparing objects
\end{enumerate}
%----------------------------------------------------------------------------------------
% SECOND CLASS
%----------------------------------------------------------------------------------------
\section{Second Class - Data Structures}
Be sure to review the information from the previous class (5-10 minutes). Then go over the four basic data structures. Be sure to emphasize the similarities and differences between the data structures. Finally, discuss how to subset each structure, again emphasizing similarities and differences.
%------------------------------------------------
\subsection{Class expectations}
\begin{enumerate}
\item Understand the basic R data structures (vector, matrix, list, data frame)
\item Subset the four basic data structures
\end{enumerate}
%----------------------------------------------------------------------------------------
% THIRD CLASS
%----------------------------------------------------------------------------------------
\section{Third Class - Plotting Data}
Start this class by introducing how to import data from a csv file. Then review of subsetting by using examples from the imported data, as understanding how to subset the data will make plotting much easier. Then go over the arguments of the basic plot function.
It would be good if you made a lesson plan for this yourself with data that you find interesting. Please write it up in the same format as the other documents and save it to the GitHub so others can use it.
%------------------------------------------------
\subsection{Class expectations}
\begin{enumerate}
\item Import data from a csv file format
\item Use the arguments of the plot function
\item Make basic plots
\end{enumerate}
%----------------------------------------------------------------------------------------
% FOURTH CLASS
%----------------------------------------------------------------------------------------
\section{Fourth Class - Control Statments}
This is typically the most challenging class for a lot of students. This class does not require a review of plotting to be successful. Make sure to start with very simple examples and only build complexity as the students are understanding. This is a really important concept and takes some patience to teach well.
%------------------------------------------------
\subsection{Class expectations}
\begin{enumerate}
\item Implement the three basic control statements in R (for-loops, if/else statements, and while statements)
\item Learn the and/or operators for combining logical statements
\end{enumerate}
%----------------------------------------------------------------------------------------
% FIFTH CLASS
%----------------------------------------------------------------------------------------
\section{Fifth Class - Functions}
If your students are struggling with control loops it would be good to do more control loop practice today and push this lesson back a day. Todays goal is to teach how to write and use functions in R. Be sure to emphasize why they would want to know how to write functions and how functions would be able to help in their research.
%------------------------------------------------
\subsection{Class expectations}
\begin{enumerate}
\item Write and run a basic function in R
\item Understand function environments and how functions find things
\item Understand the "do not repeat yourself" (DRY) principle
\end{enumerate}
%----------------------------------------------------------------------------------------
% SIXTH CLASS
%----------------------------------------------------------------------------------------
\section{Sixth Class - Packages}
You may not reach this lesson if your students struggled with control loops and that's okay. You can alway hand out the lecture notes to those students that are interested. The focus of this lecture is on doing reproducible coding (something we can all work on).
%------------------------------------------------
\subsection{Class expectations}
\begin{enumerate}
\item Install and load R packages
\item Consider some principles of reproducible research
\item Know the basic components of an R package
\item Create a simple R package using RStudio and roxygen2
\end{enumerate}
%----------------------------------------------------------------------------------------
% SEVENTH AND EIGHTH CLASSES
%----------------------------------------------------------------------------------------
\section{Seventh and Eighth Classes - Final Projects}
Devote the last two classes to working on a final project of your choosing. This can be done individually or in groups. Some sample projects will be provided.
%----------------------------------------------------------------------------------------
\end{document} |
%default total
tailRecId : (a -> Either a b) -> a -> b
tailRecId f a = case f a of
Left a2 => tailRecId f a2
Right b => b
iamvoid : Void
iamvoid = tailRecId go ()
where go : () -> Either () Void
go () = Left ()
|
lemma uniformly_continuous_on_extension_on_closure: fixes f::"'a::metric_space \<Rightarrow> 'b::complete_space" assumes uc: "uniformly_continuous_on X f" obtains g where "uniformly_continuous_on (closure X) g" "\<And>x. x \<in> X \<Longrightarrow> f x = g x" "\<And>Y h x. X \<subseteq> Y \<Longrightarrow> Y \<subseteq> closure X \<Longrightarrow> continuous_on Y h \<Longrightarrow> (\<And>x. x \<in> X \<Longrightarrow> f x = h x) \<Longrightarrow> x \<in> Y \<Longrightarrow> h x = g x" |
import Lvl
open import Type
module Data.List.Relation.Sublist {โ} {T : Type{โ}} where
open import Data.List using (List ; โ
; _โฐ_)
open import Logic
-- Whether a list's elements are contained in another list in order.
-- Examples:
-- [1,2,3] โ [1,2,3]
-- [1,2,3] โ [1,2,3,4]
-- [1,2,3] โ [0,1,2,3]
-- [1,2,3] โ [0,1,10,2,20,3,30]
-- [0,10,20,30] โ [0,1,10,2,20,3,30]
data _โ_ : List(T) โ List(T) โ Stmt{โ} where
empty : (โ
โ โ
)
use : โ{x}{lโ lโ} โ (lโ โ lโ) โ ((x โฐ lโ) โ (x โฐ lโ))
skip : โ{x}{lโ lโ} โ (lโ โ lโ) โ (lโ โ (x โฐ lโ))
-- Whether a list's elements are contained in another list in order while not containing the same sublist.
-- Examples:
-- [1,2,3] โ [1,2,3,4]
-- [1,2,3] โ [0,1,2,3]
-- [1,2,3] โ [0,1,10,2,20,3,30]
data _โ_ : List(T) โ List(T) โ Stmt{โ} where
use : โ{x}{lโ lโ} โ (lโ โ lโ) โ ((x โฐ lโ) โ (x โฐ lโ))
skip : โ{x}{lโ lโ} โ (lโ โ lโ) โ (lโ โ (x โฐ lโ))
|
(* Title: HOL/Auth/n_mutualEx_lemma_inv__5_on_rules.thy
Author: Yongjian Li and Kaiqiang Duan, State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
Copyright 2016 State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
*)
header{*The n_mutualEx Protocol Case Study*}
theory n_mutualEx_lemma_inv__5_on_rules imports n_mutualEx_lemma_on_inv__5
begin
section{*All lemmas on causal relation between inv__5*}
lemma lemma_inv__5_on_rules:
assumes b1: "r \<in> rules N" and b2: "(\<exists> p__Inv0 p__Inv1. p__Inv0\<le>N\<and>p__Inv1\<le>N\<and>p__Inv0~=p__Inv1\<and>f=inv__5 p__Inv0 p__Inv1)"
shows "invHoldForRule s f r (invariants N)"
proof -
have c1: "(\<exists> i. i\<le>N\<and>r=n_Try i)\<or>
(\<exists> i. i\<le>N\<and>r=n_Crit i)\<or>
(\<exists> i. i\<le>N\<and>r=n_Exit i)\<or>
(\<exists> i. i\<le>N\<and>r=n_Idle i)"
apply (cut_tac b1, auto) done
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_Try i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_TryVsinv__5) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_Crit i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_CritVsinv__5) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_Exit i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_ExitVsinv__5) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_Idle i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_IdleVsinv__5) done
}
ultimately show "invHoldForRule s f r (invariants N)"
by satx
qed
end
|
[STATEMENT]
lemma Qp_poly_tuple_Cons:
assumes "is_poly_tuple n fs"
assumes "f \<in> carrier (Q\<^sub>p[\<X>\<^bsub>k\<^esub>])"
assumes "k \<le>n"
shows "is_poly_tuple n (f#fs)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. is_poly_tuple n (f # fs)
[PROOF STEP]
using is_poly_tuple_Cons[of n fs f] poly_ring_car_mono[of k n] assms
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>is_poly_tuple n fs; f \<in> carrier (Q\<^sub>p [\<X>\<^bsub>n\<^esub>])\<rbrakk> \<Longrightarrow> is_poly_tuple n (f # fs)
k \<le> n \<Longrightarrow> carrier (Q\<^sub>p [\<X>\<^bsub>k\<^esub>]) \<subseteq> carrier (Q\<^sub>p [\<X>\<^bsub>n\<^esub>])
is_poly_tuple n fs
f \<in> carrier (Q\<^sub>p [\<X>\<^bsub>k\<^esub>])
k \<le> n
goal (1 subgoal):
1. is_poly_tuple n (f # fs)
[PROOF STEP]
by blast |
/* blas/blas.c
*
* Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2009 Gerard Jungman & Brian
* Gough
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or (at
* your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/* GSL implementation of BLAS operations for vectors and dense
* matrices. Note that GSL native storage is row-major. */
#include <config.h>
#include <gsl/gsl_math.h>
#include <gsl/gsl_errno.h>
#include <gsl/gsl_cblas.h>
#include <gsl/gsl_cblas.h>
#include <gsl/gsl_blas_types.h>
#include <gsl/gsl_blas.h>
/* ========================================================================
* Level 1
* ========================================================================
*/
/* CBLAS defines vector sizes in terms of int. GSL defines sizes in
terms of size_t, so we need to convert these into integers. There
is the possibility of overflow here. FIXME: Maybe this could be
caught */
#define INT(X) ((int)(X))
int
gsl_blas_sdsdot (float alpha, const gsl_vector_float * X,
const gsl_vector_float * Y, float *result)
{
if (X->size == Y->size)
{
*result =
cblas_sdsdot (INT (X->size), alpha, X->data, INT (X->stride), Y->data,
INT (Y->stride));
return GSL_SUCCESS;
}
else
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
}
int
gsl_blas_dsdot (const gsl_vector_float * X, const gsl_vector_float * Y,
double *result)
{
if (X->size == Y->size)
{
*result =
cblas_dsdot (INT (X->size), X->data, INT (X->stride), Y->data,
INT (Y->stride));
return GSL_SUCCESS;
}
else
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
}
int
gsl_blas_sdot (const gsl_vector_float * X, const gsl_vector_float * Y,
float *result)
{
if (X->size == Y->size)
{
*result =
cblas_sdot (INT (X->size), X->data, INT (X->stride), Y->data,
INT (Y->stride));
return GSL_SUCCESS;
}
else
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
}
int
gsl_blas_ddot (const gsl_vector * X, const gsl_vector * Y, double *result)
{
if (X->size == Y->size)
{
*result =
cblas_ddot (INT (X->size), X->data, INT (X->stride), Y->data,
INT (Y->stride));
return GSL_SUCCESS;
}
else
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
}
int
gsl_blas_cdotu (const gsl_vector_complex_float * X,
const gsl_vector_complex_float * Y, gsl_complex_float * dotu)
{
if (X->size == Y->size)
{
cblas_cdotu_sub (INT (X->size), X->data, INT (X->stride), Y->data,
INT (Y->stride), GSL_COMPLEX_P (dotu));
return GSL_SUCCESS;
}
else
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
}
int
gsl_blas_cdotc (const gsl_vector_complex_float * X,
const gsl_vector_complex_float * Y, gsl_complex_float * dotc)
{
if (X->size == Y->size)
{
cblas_cdotc_sub (INT (X->size), X->data, INT (X->stride), Y->data,
INT (Y->stride), GSL_COMPLEX_P (dotc));
return GSL_SUCCESS;
}
else
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
}
int
gsl_blas_zdotu (const gsl_vector_complex * X, const gsl_vector_complex * Y,
gsl_complex * dotu)
{
if (X->size == Y->size)
{
cblas_zdotu_sub (INT (X->size), X->data, INT (X->stride), Y->data,
INT (Y->stride), GSL_COMPLEX_P (dotu));
return GSL_SUCCESS;
}
else
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
}
int
gsl_blas_zdotc (const gsl_vector_complex * X, const gsl_vector_complex * Y,
gsl_complex * dotc)
{
if (X->size == Y->size)
{
cblas_zdotc_sub (INT (X->size), X->data, INT (X->stride), Y->data,
INT (Y->stride), GSL_COMPLEX_P (dotc));
return GSL_SUCCESS;
}
else
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
}
/* Norms of vectors */
float
gsl_blas_snrm2 (const gsl_vector_float * X)
{
return cblas_snrm2 (INT (X->size), X->data, INT (X->stride));
}
double
gsl_blas_dnrm2 (const gsl_vector * X)
{
return cblas_dnrm2 (INT (X->size), X->data, INT (X->stride));
}
float
gsl_blas_scnrm2 (const gsl_vector_complex_float * X)
{
return cblas_scnrm2 (INT (X->size), X->data, INT (X->stride));
}
double
gsl_blas_dznrm2 (const gsl_vector_complex * X)
{
return cblas_dznrm2 (INT (X->size), X->data, INT (X->stride));
}
/* Absolute sums of vectors */
float
gsl_blas_sasum (const gsl_vector_float * X)
{
return cblas_sasum (INT (X->size), X->data, INT (X->stride));
}
double
gsl_blas_dasum (const gsl_vector * X)
{
return cblas_dasum (INT (X->size), X->data, INT (X->stride));
}
float
gsl_blas_scasum (const gsl_vector_complex_float * X)
{
return cblas_scasum (INT (X->size), X->data, INT (X->stride));
}
double
gsl_blas_dzasum (const gsl_vector_complex * X)
{
return cblas_dzasum (INT (X->size), X->data, INT (X->stride));
}
/* Maximum elements of vectors */
CBLAS_INDEX_t
gsl_blas_isamax (const gsl_vector_float * X)
{
return cblas_isamax (INT (X->size), X->data, INT (X->stride));
}
CBLAS_INDEX_t
gsl_blas_idamax (const gsl_vector * X)
{
return cblas_idamax (INT (X->size), X->data, INT (X->stride));
}
CBLAS_INDEX_t
gsl_blas_icamax (const gsl_vector_complex_float * X)
{
return cblas_icamax (INT (X->size), X->data, INT (X->stride));
}
CBLAS_INDEX_t
gsl_blas_izamax (const gsl_vector_complex * X)
{
return cblas_izamax (INT (X->size), X->data, INT (X->stride));
}
/* Swap vectors */
int
gsl_blas_sswap (gsl_vector_float * X, gsl_vector_float * Y)
{
if (X->size == Y->size)
{
cblas_sswap (INT (X->size), X->data, INT (X->stride), Y->data,
INT (Y->stride));
return GSL_SUCCESS;
}
else
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
}
int
gsl_blas_dswap (gsl_vector * X, gsl_vector * Y)
{
if (X->size == Y->size)
{
cblas_dswap (INT (X->size), X->data, INT (X->stride), Y->data,
INT (Y->stride));
return GSL_SUCCESS;
}
else
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
};
}
int
gsl_blas_cswap (gsl_vector_complex_float * X, gsl_vector_complex_float * Y)
{
if (X->size == Y->size)
{
cblas_cswap (INT (X->size), X->data, INT (X->stride), Y->data,
INT (Y->stride));
return GSL_SUCCESS;
}
else
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
}
int
gsl_blas_zswap (gsl_vector_complex * X, gsl_vector_complex * Y)
{
if (X->size == Y->size)
{
cblas_zswap (INT (X->size), X->data, INT (X->stride), Y->data,
INT (Y->stride));
return GSL_SUCCESS;
}
else
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
}
/* Copy vectors */
int
gsl_blas_scopy (const gsl_vector_float * X, gsl_vector_float * Y)
{
if (X->size == Y->size)
{
cblas_scopy (INT (X->size), X->data, INT (X->stride), Y->data,
INT (Y->stride));
return GSL_SUCCESS;
}
else
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
}
int
gsl_blas_dcopy (const gsl_vector * X, gsl_vector * Y)
{
if (X->size == Y->size)
{
cblas_dcopy (INT (X->size), X->data, INT (X->stride), Y->data,
INT (Y->stride));
return GSL_SUCCESS;
}
else
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
}
int
gsl_blas_ccopy (const gsl_vector_complex_float * X,
gsl_vector_complex_float * Y)
{
if (X->size == Y->size)
{
cblas_ccopy (INT (X->size), X->data, INT (X->stride), Y->data,
INT (Y->stride));
return GSL_SUCCESS;
}
else
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
}
int
gsl_blas_zcopy (const gsl_vector_complex * X, gsl_vector_complex * Y)
{
if (X->size == Y->size)
{
cblas_zcopy (INT (X->size), X->data, INT (X->stride), Y->data,
INT (Y->stride));
return GSL_SUCCESS;
}
else
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
}
/* Compute Y = alpha X + Y */
int
gsl_blas_saxpy (float alpha, const gsl_vector_float * X, gsl_vector_float * Y)
{
if (X->size == Y->size)
{
cblas_saxpy (INT (X->size), alpha, X->data, INT (X->stride), Y->data,
INT (Y->stride));
return GSL_SUCCESS;
}
else
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
}
int
gsl_blas_daxpy (double alpha, const gsl_vector * X, gsl_vector * Y)
{
if (X->size == Y->size)
{
cblas_daxpy (INT (X->size), alpha, X->data, INT (X->stride), Y->data,
INT (Y->stride));
return GSL_SUCCESS;
}
else
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
}
int
gsl_blas_caxpy (const gsl_complex_float alpha,
const gsl_vector_complex_float * X,
gsl_vector_complex_float * Y)
{
if (X->size == Y->size)
{
cblas_caxpy (INT (X->size), GSL_COMPLEX_P (&alpha), X->data,
INT (X->stride), Y->data, INT (Y->stride));
return GSL_SUCCESS;
}
else
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
}
int
gsl_blas_zaxpy (const gsl_complex alpha, const gsl_vector_complex * X,
gsl_vector_complex * Y)
{
if (X->size == Y->size)
{
cblas_zaxpy (INT (X->size), GSL_COMPLEX_P (&alpha), X->data,
INT (X->stride), Y->data, INT (Y->stride));
return GSL_SUCCESS;
}
else
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
}
/* Generate rotation */
int
gsl_blas_srotg (float a[], float b[], float c[], float s[])
{
cblas_srotg (a, b, c, s);
return GSL_SUCCESS;
}
int
gsl_blas_drotg (double a[], double b[], double c[], double s[])
{
cblas_drotg (a, b, c, s);
return GSL_SUCCESS;
}
/* Apply rotation to vectors */
int
gsl_blas_srot (gsl_vector_float * X, gsl_vector_float * Y, float c, float s)
{
if (X->size == Y->size)
{
cblas_srot (INT (X->size), X->data, INT (X->stride), Y->data,
INT (Y->stride), c, s);
return GSL_SUCCESS;
}
else
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
}
int
gsl_blas_drot (gsl_vector * X, gsl_vector * Y, const double c, const double s)
{
if (X->size == Y->size)
{
cblas_drot (INT (X->size), X->data, INT (X->stride), Y->data,
INT (Y->stride), c, s);
return GSL_SUCCESS;
}
else
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
}
/* Generate modified rotation */
int
gsl_blas_srotmg (float d1[], float d2[], float b1[], float b2, float P[])
{
cblas_srotmg (d1, d2, b1, b2, P);
return GSL_SUCCESS;
}
int
gsl_blas_drotmg (double d1[], double d2[], double b1[], double b2, double P[])
{
cblas_drotmg (d1, d2, b1, b2, P);
return GSL_SUCCESS;
}
/* Apply modified rotation */
int
gsl_blas_srotm (gsl_vector_float * X, gsl_vector_float * Y, const float P[])
{
if (X->size == Y->size)
{
cblas_srotm (INT (X->size), X->data, INT (X->stride), Y->data,
INT (Y->stride), P);
return GSL_SUCCESS;
}
else
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
}
int
gsl_blas_drotm (gsl_vector * X, gsl_vector * Y, const double P[])
{
if (X->size == Y->size)
{
cblas_drotm (INT (X->size), X->data, INT (X->stride), Y->data,
INT (Y->stride), P);
return GSL_SUCCESS;
}
else
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
}
/* Scale vector */
void
gsl_blas_sscal (float alpha, gsl_vector_float * X)
{
cblas_sscal (INT (X->size), alpha, X->data, INT (X->stride));
}
void
gsl_blas_dscal (double alpha, gsl_vector * X)
{
cblas_dscal (INT (X->size), alpha, X->data, INT (X->stride));
}
void
gsl_blas_cscal (const gsl_complex_float alpha, gsl_vector_complex_float * X)
{
cblas_cscal (INT (X->size), GSL_COMPLEX_P (&alpha), X->data,
INT (X->stride));
}
void
gsl_blas_zscal (const gsl_complex alpha, gsl_vector_complex * X)
{
cblas_zscal (INT (X->size), GSL_COMPLEX_P (&alpha), X->data,
INT (X->stride));
}
void
gsl_blas_csscal (float alpha, gsl_vector_complex_float * X)
{
cblas_csscal (INT (X->size), alpha, X->data, INT (X->stride));
}
void
gsl_blas_zdscal (double alpha, gsl_vector_complex * X)
{
cblas_zdscal (INT (X->size), alpha, X->data, INT (X->stride));
}
/* ===========================================================================
* Level 2
* ===========================================================================
*/
/* GEMV */
int
gsl_blas_sgemv (CBLAS_TRANSPOSE_t TransA, float alpha,
const gsl_matrix_float * A, const gsl_vector_float * X,
float beta, gsl_vector_float * Y)
{
const size_t M = A->size1;
const size_t N = A->size2;
if ((TransA == CblasNoTrans && N == X->size && M == Y->size)
|| (TransA == CblasTrans && M == X->size && N == Y->size))
{
cblas_sgemv (CblasRowMajor, TransA, INT (M), INT (N), alpha, A->data,
INT (A->tda), X->data, INT (X->stride), beta, Y->data,
INT (Y->stride));
return GSL_SUCCESS;
}
else
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
}
int
gsl_blas_dgemv (CBLAS_TRANSPOSE_t TransA, double alpha, const gsl_matrix * A,
const gsl_vector * X, double beta, gsl_vector * Y)
{
const size_t M = A->size1;
const size_t N = A->size2;
if ((TransA == CblasNoTrans && N == X->size && M == Y->size)
|| (TransA == CblasTrans && M == X->size && N == Y->size))
{
cblas_dgemv (CblasRowMajor, TransA, INT (M), INT (N), alpha, A->data,
INT (A->tda), X->data, INT (X->stride), beta, Y->data,
INT (Y->stride));
return GSL_SUCCESS;
}
else
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
}
int
gsl_blas_cgemv (CBLAS_TRANSPOSE_t TransA, const gsl_complex_float alpha,
const gsl_matrix_complex_float * A,
const gsl_vector_complex_float * X,
const gsl_complex_float beta, gsl_vector_complex_float * Y)
{
const size_t M = A->size1;
const size_t N = A->size2;
if ((TransA == CblasNoTrans && N == X->size && M == Y->size)
|| (TransA == CblasTrans && M == X->size && N == Y->size)
|| (TransA == CblasConjTrans && M == X->size && N == Y->size))
{
cblas_cgemv (CblasRowMajor, TransA, INT (M), INT (N),
GSL_COMPLEX_P (&alpha), A->data, INT (A->tda), X->data,
INT (X->stride), GSL_COMPLEX_P (&beta), Y->data,
INT (Y->stride));
return GSL_SUCCESS;
}
else
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
}
int
gsl_blas_zgemv (CBLAS_TRANSPOSE_t TransA, const gsl_complex alpha,
const gsl_matrix_complex * A, const gsl_vector_complex * X,
const gsl_complex beta, gsl_vector_complex * Y)
{
const size_t M = A->size1;
const size_t N = A->size2;
if ((TransA == CblasNoTrans && N == X->size && M == Y->size)
|| (TransA == CblasTrans && M == X->size && N == Y->size)
|| (TransA == CblasConjTrans && M == X->size && N == Y->size))
{
cblas_zgemv (CblasRowMajor, TransA, INT (M), INT (N),
GSL_COMPLEX_P (&alpha), A->data, INT (A->tda), X->data,
INT (X->stride), GSL_COMPLEX_P (&beta), Y->data,
INT (Y->stride));
return GSL_SUCCESS;
}
else
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
}
/* HEMV */
int
gsl_blas_chemv (CBLAS_UPLO_t Uplo, const gsl_complex_float alpha,
const gsl_matrix_complex_float * A,
const gsl_vector_complex_float * X,
const gsl_complex_float beta, gsl_vector_complex_float * Y)
{
const size_t M = A->size1;
const size_t N = A->size2;
if (M != N)
{
GSL_ERROR ("matrix must be square", GSL_ENOTSQR);
}
else if (N != X->size || N != Y->size)
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
cblas_chemv (CblasRowMajor, Uplo, INT (N), GSL_COMPLEX_P (&alpha), A->data,
INT (A->tda), X->data, INT (X->stride), GSL_COMPLEX_P (&beta),
Y->data, INT (Y->stride));
return GSL_SUCCESS;
}
int
gsl_blas_zhemv (CBLAS_UPLO_t Uplo, const gsl_complex alpha,
const gsl_matrix_complex * A, const gsl_vector_complex * X,
const gsl_complex beta, gsl_vector_complex * Y)
{
const size_t M = A->size1;
const size_t N = A->size2;
if (M != N)
{
GSL_ERROR ("matrix must be square", GSL_ENOTSQR);
}
else if (N != X->size || N != Y->size)
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
cblas_zhemv (CblasRowMajor, Uplo, INT (N), GSL_COMPLEX_P (&alpha), A->data,
INT (A->tda), X->data, INT (X->stride), GSL_COMPLEX_P (&beta),
Y->data, INT (Y->stride));
return GSL_SUCCESS;
}
/* SYMV */
int
gsl_blas_ssymv (CBLAS_UPLO_t Uplo, float alpha, const gsl_matrix_float * A,
const gsl_vector_float * X, float beta, gsl_vector_float * Y)
{
const size_t M = A->size1;
const size_t N = A->size2;
if (M != N)
{
GSL_ERROR ("matrix must be square", GSL_ENOTSQR);
}
else if (N != X->size || N != Y->size)
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
cblas_ssymv (CblasRowMajor, Uplo, INT (N), alpha, A->data, INT (A->tda),
X->data, INT (X->stride), beta, Y->data, INT (Y->stride));
return GSL_SUCCESS;
}
int
gsl_blas_dsymv (CBLAS_UPLO_t Uplo, double alpha, const gsl_matrix * A,
const gsl_vector * X, double beta, gsl_vector * Y)
{
const size_t M = A->size1;
const size_t N = A->size2;
if (M != N)
{
GSL_ERROR ("matrix must be square", GSL_ENOTSQR);
}
else if (N != X->size || N != Y->size)
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
cblas_dsymv (CblasRowMajor, Uplo, INT (N), alpha, A->data, INT (A->tda),
X->data, INT (X->stride), beta, Y->data, INT (Y->stride));
return GSL_SUCCESS;
}
/* TRMV */
int
gsl_blas_strmv (CBLAS_UPLO_t Uplo, CBLAS_TRANSPOSE_t TransA,
CBLAS_DIAG_t Diag, const gsl_matrix_float * A,
gsl_vector_float * X)
{
const size_t M = A->size1;
const size_t N = A->size2;
if (M != N)
{
GSL_ERROR ("matrix must be square", GSL_ENOTSQR);
}
else if (N != X->size)
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
cblas_strmv (CblasRowMajor, Uplo, TransA, Diag, INT (N), A->data,
INT (A->tda), X->data, INT (X->stride));
return GSL_SUCCESS;
}
int
gsl_blas_dtrmv (CBLAS_UPLO_t Uplo, CBLAS_TRANSPOSE_t TransA,
CBLAS_DIAG_t Diag, const gsl_matrix * A, gsl_vector * X)
{
const size_t M = A->size1;
const size_t N = A->size2;
if (M != N)
{
GSL_ERROR ("matrix must be square", GSL_ENOTSQR);
}
else if (N != X->size)
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
cblas_dtrmv (CblasRowMajor, Uplo, TransA, Diag, INT (N), A->data,
INT (A->tda), X->data, INT (X->stride));
return GSL_SUCCESS;
}
int
gsl_blas_ctrmv (CBLAS_UPLO_t Uplo, CBLAS_TRANSPOSE_t TransA,
CBLAS_DIAG_t Diag, const gsl_matrix_complex_float * A,
gsl_vector_complex_float * X)
{
const size_t M = A->size1;
const size_t N = A->size2;
if (M != N)
{
GSL_ERROR ("matrix must be square", GSL_ENOTSQR);
}
else if (N != X->size)
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
cblas_ctrmv (CblasRowMajor, Uplo, TransA, Diag, INT (N), A->data,
INT (A->tda), X->data, INT (X->stride));
return GSL_SUCCESS;
}
int
gsl_blas_ztrmv (CBLAS_UPLO_t Uplo, CBLAS_TRANSPOSE_t TransA,
CBLAS_DIAG_t Diag, const gsl_matrix_complex * A,
gsl_vector_complex * X)
{
const size_t M = A->size1;
const size_t N = A->size2;
if (M != N)
{
GSL_ERROR ("matrix must be square", GSL_ENOTSQR);
}
else if (N != X->size)
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
cblas_ztrmv (CblasRowMajor, Uplo, TransA, Diag, INT (N), A->data,
INT (A->tda), X->data, INT (X->stride));
return GSL_SUCCESS;
}
/* TRSV */
int
gsl_blas_strsv (CBLAS_UPLO_t Uplo, CBLAS_TRANSPOSE_t TransA,
CBLAS_DIAG_t Diag, const gsl_matrix_float * A,
gsl_vector_float * X)
{
const size_t M = A->size1;
const size_t N = A->size2;
if (M != N)
{
GSL_ERROR ("matrix must be square", GSL_ENOTSQR);
}
else if (N != X->size)
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
cblas_strsv (CblasRowMajor, Uplo, TransA, Diag, INT (N), A->data,
INT (A->tda), X->data, INT (X->stride));
return GSL_SUCCESS;
}
int
gsl_blas_dtrsv (CBLAS_UPLO_t Uplo, CBLAS_TRANSPOSE_t TransA,
CBLAS_DIAG_t Diag, const gsl_matrix * A, gsl_vector * X)
{
const size_t M = A->size1;
const size_t N = A->size2;
if (M != N)
{
GSL_ERROR ("matrix must be square", GSL_ENOTSQR);
}
else if (N != X->size)
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
cblas_dtrsv (CblasRowMajor, Uplo, TransA, Diag, INT (N), A->data,
INT (A->tda), X->data, INT (X->stride));
return GSL_SUCCESS;
}
int
gsl_blas_ctrsv (CBLAS_UPLO_t Uplo, CBLAS_TRANSPOSE_t TransA,
CBLAS_DIAG_t Diag, const gsl_matrix_complex_float * A,
gsl_vector_complex_float * X)
{
const size_t M = A->size1;
const size_t N = A->size2;
if (M != N)
{
GSL_ERROR ("matrix must be square", GSL_ENOTSQR);
}
else if (N != X->size)
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
cblas_ctrsv (CblasRowMajor, Uplo, TransA, Diag, INT (N), A->data,
INT (A->tda), X->data, INT (X->stride));
return GSL_SUCCESS;
}
int
gsl_blas_ztrsv (CBLAS_UPLO_t Uplo, CBLAS_TRANSPOSE_t TransA,
CBLAS_DIAG_t Diag, const gsl_matrix_complex * A,
gsl_vector_complex * X)
{
const size_t M = A->size1;
const size_t N = A->size2;
if (M != N)
{
GSL_ERROR ("matrix must be square", GSL_ENOTSQR);
}
else if (N != X->size)
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
cblas_ztrsv (CblasRowMajor, Uplo, TransA, Diag, INT (N), A->data,
INT (A->tda), X->data, INT (X->stride));
return GSL_SUCCESS;
}
/* GER */
int
gsl_blas_sger (float alpha, const gsl_vector_float * X,
const gsl_vector_float * Y, gsl_matrix_float * A)
{
const size_t M = A->size1;
const size_t N = A->size2;
if (X->size == M && Y->size == N)
{
cblas_sger (CblasRowMajor, INT (M), INT (N), alpha, X->data,
INT (X->stride), Y->data, INT (Y->stride), A->data,
INT (A->tda));
return GSL_SUCCESS;
}
else
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
}
int
gsl_blas_dger (double alpha, const gsl_vector * X, const gsl_vector * Y,
gsl_matrix * A)
{
const size_t M = A->size1;
const size_t N = A->size2;
if (X->size == M && Y->size == N)
{
cblas_dger (CblasRowMajor, INT (M), INT (N), alpha, X->data,
INT (X->stride), Y->data, INT (Y->stride), A->data,
INT (A->tda));
return GSL_SUCCESS;
}
else
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
}
/* GERU */
int
gsl_blas_cgeru (const gsl_complex_float alpha,
const gsl_vector_complex_float * X,
const gsl_vector_complex_float * Y,
gsl_matrix_complex_float * A)
{
const size_t M = A->size1;
const size_t N = A->size2;
if (X->size == M && Y->size == N)
{
cblas_cgeru (CblasRowMajor, INT (M), INT (N), GSL_COMPLEX_P (&alpha),
X->data, INT (X->stride), Y->data, INT (Y->stride),
A->data, INT (A->tda));
return GSL_SUCCESS;
}
else
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
}
int
gsl_blas_zgeru (const gsl_complex alpha, const gsl_vector_complex * X,
const gsl_vector_complex * Y, gsl_matrix_complex * A)
{
const size_t M = A->size1;
const size_t N = A->size2;
if (X->size == M && Y->size == N)
{
cblas_zgeru (CblasRowMajor, INT (M), INT (N), GSL_COMPLEX_P (&alpha),
X->data, INT (X->stride), Y->data, INT (Y->stride),
A->data, INT (A->tda));
return GSL_SUCCESS;
}
else
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
}
/* GERC */
int
gsl_blas_cgerc (const gsl_complex_float alpha,
const gsl_vector_complex_float * X,
const gsl_vector_complex_float * Y,
gsl_matrix_complex_float * A)
{
const size_t M = A->size1;
const size_t N = A->size2;
if (X->size == M && Y->size == N)
{
cblas_cgerc (CblasRowMajor, INT (M), INT (N), GSL_COMPLEX_P (&alpha),
X->data, INT (X->stride), Y->data, INT (Y->stride),
A->data, INT (A->tda));
return GSL_SUCCESS;
}
else
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
}
int
gsl_blas_zgerc (const gsl_complex alpha, const gsl_vector_complex * X,
const gsl_vector_complex * Y, gsl_matrix_complex * A)
{
const size_t M = A->size1;
const size_t N = A->size2;
if (X->size == M && Y->size == N)
{
cblas_zgerc (CblasRowMajor, INT (M), INT (N), GSL_COMPLEX_P (&alpha),
X->data, INT (X->stride), Y->data, INT (Y->stride),
A->data, INT (A->tda));
return GSL_SUCCESS;
}
else
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
}
/* HER */
int
gsl_blas_cher (CBLAS_UPLO_t Uplo, float alpha,
const gsl_vector_complex_float * X,
gsl_matrix_complex_float * A)
{
const size_t M = A->size1;
const size_t N = A->size2;
if (M != N)
{
GSL_ERROR ("matrix must be square", GSL_ENOTSQR);
}
else if (X->size != N)
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
cblas_cher (CblasRowMajor, Uplo, INT (M), alpha, X->data, INT (X->stride),
A->data, INT (A->tda));
return GSL_SUCCESS;
}
int
gsl_blas_zher (CBLAS_UPLO_t Uplo, double alpha, const gsl_vector_complex * X,
gsl_matrix_complex * A)
{
const size_t M = A->size1;
const size_t N = A->size2;
if (M != N)
{
GSL_ERROR ("matrix must be square", GSL_ENOTSQR);
}
else if (X->size != N)
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
cblas_zher (CblasRowMajor, Uplo, INT (N), alpha, X->data, INT (X->stride),
A->data, INT (A->tda));
return GSL_SUCCESS;
}
/* HER2 */
int
gsl_blas_cher2 (CBLAS_UPLO_t Uplo, const gsl_complex_float alpha,
const gsl_vector_complex_float * X,
const gsl_vector_complex_float * Y,
gsl_matrix_complex_float * A)
{
const size_t M = A->size1;
const size_t N = A->size2;
if (M != N)
{
GSL_ERROR ("matrix must be square", GSL_ENOTSQR);
}
else if (X->size != N || Y->size != N)
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
cblas_cher2 (CblasRowMajor, Uplo, INT (N), GSL_COMPLEX_P (&alpha), X->data,
INT (X->stride), Y->data, INT (Y->stride), A->data,
INT (A->tda));
return GSL_SUCCESS;
}
int
gsl_blas_zher2 (CBLAS_UPLO_t Uplo, const gsl_complex alpha,
const gsl_vector_complex * X, const gsl_vector_complex * Y,
gsl_matrix_complex * A)
{
const size_t M = A->size1;
const size_t N = A->size2;
if (M != N)
{
GSL_ERROR ("matrix must be square", GSL_ENOTSQR);
}
else if (X->size != N || Y->size != N)
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
cblas_zher2 (CblasRowMajor, Uplo, INT (N), GSL_COMPLEX_P (&alpha), X->data,
INT (X->stride), Y->data, INT (Y->stride), A->data,
INT (A->tda));
return GSL_SUCCESS;
}
/* SYR */
int
gsl_blas_ssyr (CBLAS_UPLO_t Uplo, float alpha, const gsl_vector_float * X,
gsl_matrix_float * A)
{
const size_t M = A->size1;
const size_t N = A->size2;
if (M != N)
{
GSL_ERROR ("matrix must be square", GSL_ENOTSQR);
}
else if (X->size != N)
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
cblas_ssyr (CblasRowMajor, Uplo, INT (N), alpha, X->data, INT (X->stride),
A->data, INT (A->tda));
return GSL_SUCCESS;
}
int
gsl_blas_dsyr (CBLAS_UPLO_t Uplo, double alpha, const gsl_vector * X,
gsl_matrix * A)
{
const size_t M = A->size1;
const size_t N = A->size2;
if (M != N)
{
GSL_ERROR ("matrix must be square", GSL_ENOTSQR);
}
else if (X->size != N)
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
cblas_dsyr (CblasRowMajor, Uplo, INT (N), alpha, X->data, INT (X->stride),
A->data, INT (A->tda));
return GSL_SUCCESS;
}
/* SYR2 */
int
gsl_blas_ssyr2 (CBLAS_UPLO_t Uplo, float alpha, const gsl_vector_float * X,
const gsl_vector_float * Y, gsl_matrix_float * A)
{
const size_t M = A->size1;
const size_t N = A->size2;
if (M != N)
{
GSL_ERROR ("matrix must be square", GSL_ENOTSQR);
}
else if (X->size != N || Y->size != N)
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
cblas_ssyr2 (CblasRowMajor, Uplo, INT (N), alpha, X->data, INT (X->stride),
Y->data, INT (Y->stride), A->data, INT (A->tda));
return GSL_SUCCESS;
}
int
gsl_blas_dsyr2 (CBLAS_UPLO_t Uplo, double alpha, const gsl_vector * X,
const gsl_vector * Y, gsl_matrix * A)
{
const size_t M = A->size1;
const size_t N = A->size2;
if (M != N)
{
GSL_ERROR ("matrix must be square", GSL_ENOTSQR);
}
else if (X->size != N || Y->size != N)
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
cblas_dsyr2 (CblasRowMajor, Uplo, INT (N), alpha, X->data, INT (X->stride),
Y->data, INT (Y->stride), A->data, INT (A->tda));
return GSL_SUCCESS;
}
/*
* ===========================================================================
* Prototypes for level 3 BLAS
* ===========================================================================
*/
/* GEMM */
int
gsl_blas_sgemm (CBLAS_TRANSPOSE_t TransA, CBLAS_TRANSPOSE_t TransB,
float alpha, const gsl_matrix_float * A,
const gsl_matrix_float * B, float beta, gsl_matrix_float * C)
{
const size_t M = C->size1;
const size_t N = C->size2;
const size_t MA = (TransA == CblasNoTrans) ? A->size1 : A->size2;
const size_t NA = (TransA == CblasNoTrans) ? A->size2 : A->size1;
const size_t MB = (TransB == CblasNoTrans) ? B->size1 : B->size2;
const size_t NB = (TransB == CblasNoTrans) ? B->size2 : B->size1;
if (M == MA && N == NB && NA == MB) /* [MxN] = [MAxNA][MBxNB] */
{
cblas_sgemm (CblasRowMajor, TransA, TransB, INT (M), INT (N), INT (NA),
alpha, A->data, INT (A->tda), B->data, INT (B->tda), beta,
C->data, INT (C->tda));
return GSL_SUCCESS;
}
else
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
}
int
gsl_blas_dgemm (CBLAS_TRANSPOSE_t TransA, CBLAS_TRANSPOSE_t TransB,
double alpha, const gsl_matrix * A, const gsl_matrix * B,
double beta, gsl_matrix * C)
{
const size_t M = C->size1;
const size_t N = C->size2;
const size_t MA = (TransA == CblasNoTrans) ? A->size1 : A->size2;
const size_t NA = (TransA == CblasNoTrans) ? A->size2 : A->size1;
const size_t MB = (TransB == CblasNoTrans) ? B->size1 : B->size2;
const size_t NB = (TransB == CblasNoTrans) ? B->size2 : B->size1;
if (M == MA && N == NB && NA == MB) /* [MxN] = [MAxNA][MBxNB] */
{
cblas_dgemm (CblasRowMajor, TransA, TransB, INT (M), INT (N), INT (NA),
alpha, A->data, INT (A->tda), B->data, INT (B->tda), beta,
C->data, INT (C->tda));
return GSL_SUCCESS;
}
else
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
}
int
gsl_blas_cgemm (CBLAS_TRANSPOSE_t TransA, CBLAS_TRANSPOSE_t TransB,
const gsl_complex_float alpha,
const gsl_matrix_complex_float * A,
const gsl_matrix_complex_float * B,
const gsl_complex_float beta, gsl_matrix_complex_float * C)
{
const size_t M = C->size1;
const size_t N = C->size2;
const size_t MA = (TransA == CblasNoTrans) ? A->size1 : A->size2;
const size_t NA = (TransA == CblasNoTrans) ? A->size2 : A->size1;
const size_t MB = (TransB == CblasNoTrans) ? B->size1 : B->size2;
const size_t NB = (TransB == CblasNoTrans) ? B->size2 : B->size1;
if (M == MA && N == NB && NA == MB) /* [MxN] = [MAxNA][MBxNB] */
{
cblas_cgemm (CblasRowMajor, TransA, TransB, INT (M), INT (N), INT (NA),
GSL_COMPLEX_P (&alpha), A->data, INT (A->tda), B->data,
INT (B->tda), GSL_COMPLEX_P (&beta), C->data,
INT (C->tda));
return GSL_SUCCESS;
}
else
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
}
int
gsl_blas_zgemm (CBLAS_TRANSPOSE_t TransA, CBLAS_TRANSPOSE_t TransB,
const gsl_complex alpha, const gsl_matrix_complex * A,
const gsl_matrix_complex * B, const gsl_complex beta,
gsl_matrix_complex * C)
{
const size_t M = C->size1;
const size_t N = C->size2;
const size_t MA = (TransA == CblasNoTrans) ? A->size1 : A->size2;
const size_t NA = (TransA == CblasNoTrans) ? A->size2 : A->size1;
const size_t MB = (TransB == CblasNoTrans) ? B->size1 : B->size2;
const size_t NB = (TransB == CblasNoTrans) ? B->size2 : B->size1;
if (M == MA && N == NB && NA == MB) /* [MxN] = [MAxNA][MBxNB] */
{
cblas_zgemm (CblasRowMajor, TransA, TransB, INT (M), INT (N), INT (NA),
GSL_COMPLEX_P (&alpha), A->data, INT (A->tda), B->data,
INT (B->tda), GSL_COMPLEX_P (&beta), C->data,
INT (C->tda));
return GSL_SUCCESS;
}
else
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
}
/* SYMM */
int
gsl_blas_ssymm (CBLAS_SIDE_t Side, CBLAS_UPLO_t Uplo, float alpha,
const gsl_matrix_float * A, const gsl_matrix_float * B,
float beta, gsl_matrix_float * C)
{
const size_t M = C->size1;
const size_t N = C->size2;
const size_t MA = A->size1;
const size_t NA = A->size2;
const size_t MB = B->size1;
const size_t NB = B->size2;
if (MA != NA)
{
GSL_ERROR ("matrix A must be square", GSL_ENOTSQR);
}
if ((Side == CblasLeft && (M == MA && N == NB && NA == MB))
|| (Side == CblasRight && (M == MB && N == NA && NB == MA)))
{
cblas_ssymm (CblasRowMajor, Side, Uplo, INT (M), INT (N), alpha,
A->data, INT (A->tda), B->data, INT (B->tda), beta,
C->data, INT (C->tda));
return GSL_SUCCESS;
}
else
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
}
int
gsl_blas_dsymm (CBLAS_SIDE_t Side, CBLAS_UPLO_t Uplo, double alpha,
const gsl_matrix * A, const gsl_matrix * B, double beta,
gsl_matrix * C)
{
const size_t M = C->size1;
const size_t N = C->size2;
const size_t MA = A->size1;
const size_t NA = A->size2;
const size_t MB = B->size1;
const size_t NB = B->size2;
if (MA != NA)
{
GSL_ERROR ("matrix A must be square", GSL_ENOTSQR);
}
if ((Side == CblasLeft && (M == MA && N == NB && NA == MB))
|| (Side == CblasRight && (M == MB && N == NA && NB == MA)))
{
cblas_dsymm (CblasRowMajor, Side, Uplo, INT (M), INT (N), alpha,
A->data, INT (A->tda), B->data, INT (B->tda), beta,
C->data, INT (C->tda));
return GSL_SUCCESS;
}
else
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
}
int
gsl_blas_csymm (CBLAS_SIDE_t Side, CBLAS_UPLO_t Uplo,
const gsl_complex_float alpha,
const gsl_matrix_complex_float * A,
const gsl_matrix_complex_float * B,
const gsl_complex_float beta, gsl_matrix_complex_float * C)
{
const size_t M = C->size1;
const size_t N = C->size2;
const size_t MA = A->size1;
const size_t NA = A->size2;
const size_t MB = B->size1;
const size_t NB = B->size2;
if (MA != NA)
{
GSL_ERROR ("matrix A must be square", GSL_ENOTSQR);
}
if ((Side == CblasLeft && (M == MA && N == NB && NA == MB))
|| (Side == CblasRight && (M == MB && N == NA && NB == MA)))
{
cblas_csymm (CblasRowMajor, Side, Uplo, INT (M), INT (N),
GSL_COMPLEX_P (&alpha), A->data, INT (A->tda), B->data,
INT (B->tda), GSL_COMPLEX_P (&beta), C->data,
INT (C->tda));
return GSL_SUCCESS;
}
else
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
}
int
gsl_blas_zsymm (CBLAS_SIDE_t Side, CBLAS_UPLO_t Uplo,
const gsl_complex alpha, const gsl_matrix_complex * A,
const gsl_matrix_complex * B, const gsl_complex beta,
gsl_matrix_complex * C)
{
const size_t M = C->size1;
const size_t N = C->size2;
const size_t MA = A->size1;
const size_t NA = A->size2;
const size_t MB = B->size1;
const size_t NB = B->size2;
if (MA != NA)
{
GSL_ERROR ("matrix A must be square", GSL_ENOTSQR);
}
if ((Side == CblasLeft && (M == MA && N == NB && NA == MB))
|| (Side == CblasRight && (M == MB && N == NA && NB == MA)))
{
cblas_zsymm (CblasRowMajor, Side, Uplo, INT (M), INT (N),
GSL_COMPLEX_P (&alpha), A->data, INT (A->tda), B->data,
INT (B->tda), GSL_COMPLEX_P (&beta), C->data,
INT (C->tda));
return GSL_SUCCESS;
}
else
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
}
/* HEMM */
int
gsl_blas_chemm (CBLAS_SIDE_t Side, CBLAS_UPLO_t Uplo,
const gsl_complex_float alpha,
const gsl_matrix_complex_float * A,
const gsl_matrix_complex_float * B,
const gsl_complex_float beta, gsl_matrix_complex_float * C)
{
const size_t M = C->size1;
const size_t N = C->size2;
const size_t MA = A->size1;
const size_t NA = A->size2;
const size_t MB = B->size1;
const size_t NB = B->size2;
if (MA != NA)
{
GSL_ERROR ("matrix A must be square", GSL_ENOTSQR);
}
if ((Side == CblasLeft && (M == MA && N == NB && NA == MB))
|| (Side == CblasRight && (M == MB && N == NA && NB == MA)))
{
cblas_chemm (CblasRowMajor, Side, Uplo, INT (M), INT (N),
GSL_COMPLEX_P (&alpha), A->data, INT (A->tda), B->data,
INT (B->tda), GSL_COMPLEX_P (&beta), C->data,
INT (C->tda));
return GSL_SUCCESS;
}
else
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
}
int
gsl_blas_zhemm (CBLAS_SIDE_t Side, CBLAS_UPLO_t Uplo,
const gsl_complex alpha, const gsl_matrix_complex * A,
const gsl_matrix_complex * B, const gsl_complex beta,
gsl_matrix_complex * C)
{
const size_t M = C->size1;
const size_t N = C->size2;
const size_t MA = A->size1;
const size_t NA = A->size2;
const size_t MB = B->size1;
const size_t NB = B->size2;
if (MA != NA)
{
GSL_ERROR ("matrix A must be square", GSL_ENOTSQR);
}
if ((Side == CblasLeft && (M == MA && N == NB && NA == MB))
|| (Side == CblasRight && (M == MB && N == NA && NB == MA)))
{
cblas_zhemm (CblasRowMajor, Side, Uplo, INT (M), INT (N),
GSL_COMPLEX_P (&alpha), A->data, INT (A->tda), B->data,
INT (B->tda), GSL_COMPLEX_P (&beta), C->data,
INT (C->tda));
return GSL_SUCCESS;
}
else
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
}
/* SYRK */
int
gsl_blas_ssyrk (CBLAS_UPLO_t Uplo, CBLAS_TRANSPOSE_t Trans, float alpha,
const gsl_matrix_float * A, float beta, gsl_matrix_float * C)
{
const size_t M = C->size1;
const size_t N = C->size2;
const size_t J = (Trans == CblasNoTrans) ? A->size1 : A->size2;
const size_t K = (Trans == CblasNoTrans) ? A->size2 : A->size1;
if (M != N)
{
GSL_ERROR ("matrix C must be square", GSL_ENOTSQR);
}
else if (N != J)
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
cblas_ssyrk (CblasRowMajor, Uplo, Trans, INT (N), INT (K), alpha, A->data,
INT (A->tda), beta, C->data, INT (C->tda));
return GSL_SUCCESS;
}
int
gsl_blas_dsyrk (CBLAS_UPLO_t Uplo, CBLAS_TRANSPOSE_t Trans, double alpha,
const gsl_matrix * A, double beta, gsl_matrix * C)
{
const size_t M = C->size1;
const size_t N = C->size2;
const size_t J = (Trans == CblasNoTrans) ? A->size1 : A->size2;
const size_t K = (Trans == CblasNoTrans) ? A->size2 : A->size1;
if (M != N)
{
GSL_ERROR ("matrix C must be square", GSL_ENOTSQR);
}
else if (N != J)
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
cblas_dsyrk (CblasRowMajor, Uplo, Trans, INT (N), INT (K), alpha, A->data,
INT (A->tda), beta, C->data, INT (C->tda));
return GSL_SUCCESS;
}
int
gsl_blas_csyrk (CBLAS_UPLO_t Uplo, CBLAS_TRANSPOSE_t Trans,
const gsl_complex_float alpha,
const gsl_matrix_complex_float * A,
const gsl_complex_float beta, gsl_matrix_complex_float * C)
{
const size_t M = C->size1;
const size_t N = C->size2;
const size_t J = (Trans == CblasNoTrans) ? A->size1 : A->size2;
const size_t K = (Trans == CblasNoTrans) ? A->size2 : A->size1;
if (M != N)
{
GSL_ERROR ("matrix C must be square", GSL_ENOTSQR);
}
else if (N != J)
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
cblas_csyrk (CblasRowMajor, Uplo, Trans, INT (N), INT (K),
GSL_COMPLEX_P (&alpha), A->data, INT (A->tda),
GSL_COMPLEX_P (&beta), C->data, INT (C->tda));
return GSL_SUCCESS;
}
int
gsl_blas_zsyrk (CBLAS_UPLO_t Uplo, CBLAS_TRANSPOSE_t Trans,
const gsl_complex alpha, const gsl_matrix_complex * A,
const gsl_complex beta, gsl_matrix_complex * C)
{
const size_t M = C->size1;
const size_t N = C->size2;
const size_t J = (Trans == CblasNoTrans) ? A->size1 : A->size2;
const size_t K = (Trans == CblasNoTrans) ? A->size2 : A->size1;
if (M != N)
{
GSL_ERROR ("matrix C must be square", GSL_ENOTSQR);
}
else if (N != J)
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
cblas_zsyrk (CblasRowMajor, Uplo, Trans, INT (N), INT (K),
GSL_COMPLEX_P (&alpha), A->data, INT (A->tda),
GSL_COMPLEX_P (&beta), C->data, INT (C->tda));
return GSL_SUCCESS;
}
/* HERK */
int
gsl_blas_cherk (CBLAS_UPLO_t Uplo, CBLAS_TRANSPOSE_t Trans, float alpha,
const gsl_matrix_complex_float * A, float beta,
gsl_matrix_complex_float * C)
{
const size_t M = C->size1;
const size_t N = C->size2;
const size_t J = (Trans == CblasNoTrans) ? A->size1 : A->size2;
const size_t K = (Trans == CblasNoTrans) ? A->size2 : A->size1;
if (M != N)
{
GSL_ERROR ("matrix C must be square", GSL_ENOTSQR);
}
else if (N != J)
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
cblas_cherk (CblasRowMajor, Uplo, Trans, INT (N), INT (K), alpha, A->data,
INT (A->tda), beta, C->data, INT (C->tda));
return GSL_SUCCESS;
}
int
gsl_blas_zherk (CBLAS_UPLO_t Uplo, CBLAS_TRANSPOSE_t Trans, double alpha,
const gsl_matrix_complex * A, double beta,
gsl_matrix_complex * C)
{
const size_t M = C->size1;
const size_t N = C->size2;
const size_t J = (Trans == CblasNoTrans) ? A->size1 : A->size2;
const size_t K = (Trans == CblasNoTrans) ? A->size2 : A->size1;
if (M != N)
{
GSL_ERROR ("matrix C must be square", GSL_ENOTSQR);
}
else if (N != J)
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
cblas_zherk (CblasRowMajor, Uplo, Trans, INT (N), INT (K), alpha, A->data,
INT (A->tda), beta, C->data, INT (C->tda));
return GSL_SUCCESS;
}
/* SYR2K */
int
gsl_blas_ssyr2k (CBLAS_UPLO_t Uplo, CBLAS_TRANSPOSE_t Trans, float alpha,
const gsl_matrix_float * A, const gsl_matrix_float * B,
float beta, gsl_matrix_float * C)
{
const size_t M = C->size1;
const size_t N = C->size2;
const size_t MA = (Trans == CblasNoTrans) ? A->size1 : A->size2;
const size_t NA = (Trans == CblasNoTrans) ? A->size2 : A->size1;
const size_t MB = (Trans == CblasNoTrans) ? B->size1 : B->size2;
const size_t NB = (Trans == CblasNoTrans) ? B->size2 : B->size1;
if (M != N)
{
GSL_ERROR ("matrix C must be square", GSL_ENOTSQR);
}
else if (N != MA || N != MB || NA != NB)
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
cblas_ssyr2k (CblasRowMajor, Uplo, Trans, INT (N), INT (NA), alpha, A->data,
INT (A->tda), B->data, INT (B->tda), beta, C->data,
INT (C->tda));
return GSL_SUCCESS;
}
int
gsl_blas_dsyr2k (CBLAS_UPLO_t Uplo, CBLAS_TRANSPOSE_t Trans, double alpha,
const gsl_matrix * A, const gsl_matrix * B, double beta,
gsl_matrix * C)
{
const size_t M = C->size1;
const size_t N = C->size2;
const size_t MA = (Trans == CblasNoTrans) ? A->size1 : A->size2;
const size_t NA = (Trans == CblasNoTrans) ? A->size2 : A->size1;
const size_t MB = (Trans == CblasNoTrans) ? B->size1 : B->size2;
const size_t NB = (Trans == CblasNoTrans) ? B->size2 : B->size1;
if (M != N)
{
GSL_ERROR ("matrix C must be square", GSL_ENOTSQR);
}
else if (N != MA || N != MB || NA != NB)
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
cblas_dsyr2k (CblasRowMajor, Uplo, Trans, INT (N), INT (NA), alpha, A->data,
INT (A->tda), B->data, INT (B->tda), beta, C->data,
INT (C->tda));
return GSL_SUCCESS;
}
int
gsl_blas_csyr2k (CBLAS_UPLO_t Uplo, CBLAS_TRANSPOSE_t Trans,
const gsl_complex_float alpha,
const gsl_matrix_complex_float * A,
const gsl_matrix_complex_float * B,
const gsl_complex_float beta, gsl_matrix_complex_float * C)
{
const size_t M = C->size1;
const size_t N = C->size2;
const size_t MA = (Trans == CblasNoTrans) ? A->size1 : A->size2;
const size_t NA = (Trans == CblasNoTrans) ? A->size2 : A->size1;
const size_t MB = (Trans == CblasNoTrans) ? B->size1 : B->size2;
const size_t NB = (Trans == CblasNoTrans) ? B->size2 : B->size1;
if (M != N)
{
GSL_ERROR ("matrix C must be square", GSL_ENOTSQR);
}
else if (N != MA || N != MB || NA != NB)
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
cblas_csyr2k (CblasRowMajor, Uplo, Trans, INT (N), INT (NA),
GSL_COMPLEX_P (&alpha), A->data, INT (A->tda), B->data,
INT (B->tda), GSL_COMPLEX_P (&beta), C->data, INT (C->tda));
return GSL_SUCCESS;
}
int
gsl_blas_zsyr2k (CBLAS_UPLO_t Uplo, CBLAS_TRANSPOSE_t Trans,
const gsl_complex alpha, const gsl_matrix_complex * A,
const gsl_matrix_complex * B, const gsl_complex beta,
gsl_matrix_complex * C)
{
const size_t M = C->size1;
const size_t N = C->size2;
const size_t MA = (Trans == CblasNoTrans) ? A->size1 : A->size2;
const size_t NA = (Trans == CblasNoTrans) ? A->size2 : A->size1;
const size_t MB = (Trans == CblasNoTrans) ? B->size1 : B->size2;
const size_t NB = (Trans == CblasNoTrans) ? B->size2 : B->size1;
if (M != N)
{
GSL_ERROR ("matrix C must be square", GSL_ENOTSQR);
}
else if (N != MA || N != MB || NA != NB)
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
cblas_zsyr2k (CblasRowMajor, Uplo, Trans, INT (N), INT (NA),
GSL_COMPLEX_P (&alpha), A->data, INT (A->tda), B->data,
INT (B->tda), GSL_COMPLEX_P (&beta), C->data, INT (C->tda));
return GSL_SUCCESS;
}
/* HER2K */
int
gsl_blas_cher2k (CBLAS_UPLO_t Uplo, CBLAS_TRANSPOSE_t Trans,
const gsl_complex_float alpha,
const gsl_matrix_complex_float * A,
const gsl_matrix_complex_float * B, float beta,
gsl_matrix_complex_float * C)
{
const size_t M = C->size1;
const size_t N = C->size2;
const size_t MA = (Trans == CblasNoTrans) ? A->size1 : A->size2;
const size_t NA = (Trans == CblasNoTrans) ? A->size2 : A->size1;
const size_t MB = (Trans == CblasNoTrans) ? B->size1 : B->size2;
const size_t NB = (Trans == CblasNoTrans) ? B->size2 : B->size1;
if (M != N)
{
GSL_ERROR ("matrix C must be square", GSL_ENOTSQR);
}
else if (N != MA || N != MB || NA != NB)
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
cblas_cher2k (CblasRowMajor, Uplo, Trans, INT (N), INT (NA),
GSL_COMPLEX_P (&alpha), A->data, INT (A->tda), B->data,
INT (B->tda), beta, C->data, INT (C->tda));
return GSL_SUCCESS;
}
int
gsl_blas_zher2k (CBLAS_UPLO_t Uplo, CBLAS_TRANSPOSE_t Trans,
const gsl_complex alpha, const gsl_matrix_complex * A,
const gsl_matrix_complex * B, double beta,
gsl_matrix_complex * C)
{
const size_t M = C->size1;
const size_t N = C->size2;
const size_t MA = (Trans == CblasNoTrans) ? A->size1 : A->size2;
const size_t NA = (Trans == CblasNoTrans) ? A->size2 : A->size1;
const size_t MB = (Trans == CblasNoTrans) ? B->size1 : B->size2;
const size_t NB = (Trans == CblasNoTrans) ? B->size2 : B->size1;
if (M != N)
{
GSL_ERROR ("matrix C must be square", GSL_ENOTSQR);
}
else if (N != MA || N != MB || NA != NB)
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
cblas_zher2k (CblasRowMajor, Uplo, Trans, INT (N), INT (NA),
GSL_COMPLEX_P (&alpha), A->data, INT (A->tda), B->data,
INT (B->tda), beta, C->data, INT (C->tda));
return GSL_SUCCESS;
}
/* TRMM */
int
gsl_blas_strmm (CBLAS_SIDE_t Side, CBLAS_UPLO_t Uplo,
CBLAS_TRANSPOSE_t TransA, CBLAS_DIAG_t Diag, float alpha,
const gsl_matrix_float * A, gsl_matrix_float * B)
{
const size_t M = B->size1;
const size_t N = B->size2;
const size_t MA = A->size1;
const size_t NA = A->size2;
if (MA != NA)
{
GSL_ERROR ("matrix A must be square", GSL_ENOTSQR);
}
if ((Side == CblasLeft && M == MA) || (Side == CblasRight && N == MA))
{
cblas_strmm (CblasRowMajor, Side, Uplo, TransA, Diag, INT (M), INT (N),
alpha, A->data, INT (A->tda), B->data, INT (B->tda));
return GSL_SUCCESS;
}
else
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
}
int
gsl_blas_dtrmm (CBLAS_SIDE_t Side, CBLAS_UPLO_t Uplo,
CBLAS_TRANSPOSE_t TransA, CBLAS_DIAG_t Diag, double alpha,
const gsl_matrix * A, gsl_matrix * B)
{
const size_t M = B->size1;
const size_t N = B->size2;
const size_t MA = A->size1;
const size_t NA = A->size2;
if (MA != NA)
{
GSL_ERROR ("matrix A must be square", GSL_ENOTSQR);
}
if ((Side == CblasLeft && M == MA) || (Side == CblasRight && N == MA))
{
cblas_dtrmm (CblasRowMajor, Side, Uplo, TransA, Diag, INT (M), INT (N),
alpha, A->data, INT (A->tda), B->data, INT (B->tda));
return GSL_SUCCESS;
}
else
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
}
int
gsl_blas_ctrmm (CBLAS_SIDE_t Side, CBLAS_UPLO_t Uplo,
CBLAS_TRANSPOSE_t TransA, CBLAS_DIAG_t Diag,
const gsl_complex_float alpha,
const gsl_matrix_complex_float * A,
gsl_matrix_complex_float * B)
{
const size_t M = B->size1;
const size_t N = B->size2;
const size_t MA = A->size1;
const size_t NA = A->size2;
if (MA != NA)
{
GSL_ERROR ("matrix A must be square", GSL_ENOTSQR);
}
if ((Side == CblasLeft && M == MA) || (Side == CblasRight && N == MA))
{
cblas_ctrmm (CblasRowMajor, Side, Uplo, TransA, Diag, INT (M), INT (N),
GSL_COMPLEX_P (&alpha), A->data, INT (A->tda), B->data,
INT (B->tda));
return GSL_SUCCESS;
}
else
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
}
int
gsl_blas_ztrmm (CBLAS_SIDE_t Side, CBLAS_UPLO_t Uplo,
CBLAS_TRANSPOSE_t TransA, CBLAS_DIAG_t Diag,
const gsl_complex alpha, const gsl_matrix_complex * A,
gsl_matrix_complex * B)
{
const size_t M = B->size1;
const size_t N = B->size2;
const size_t MA = A->size1;
const size_t NA = A->size2;
if (MA != NA)
{
GSL_ERROR ("matrix A must be square", GSL_ENOTSQR);
}
if ((Side == CblasLeft && M == MA) || (Side == CblasRight && N == MA))
{
cblas_ztrmm (CblasRowMajor, Side, Uplo, TransA, Diag, INT (M), INT (N),
GSL_COMPLEX_P (&alpha), A->data, INT (A->tda), B->data,
INT (B->tda));
return GSL_SUCCESS;
}
else
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
}
/* TRSM */
int
gsl_blas_strsm (CBLAS_SIDE_t Side, CBLAS_UPLO_t Uplo,
CBLAS_TRANSPOSE_t TransA, CBLAS_DIAG_t Diag, float alpha,
const gsl_matrix_float * A, gsl_matrix_float * B)
{
const size_t M = B->size1;
const size_t N = B->size2;
const size_t MA = A->size1;
const size_t NA = A->size2;
if (MA != NA)
{
GSL_ERROR ("matrix A must be square", GSL_ENOTSQR);
}
if ((Side == CblasLeft && M == MA) || (Side == CblasRight && N == MA))
{
cblas_strsm (CblasRowMajor, Side, Uplo, TransA, Diag, INT (M), INT (N),
alpha, A->data, INT (A->tda), B->data, INT (B->tda));
return GSL_SUCCESS;
}
else
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
}
int
gsl_blas_dtrsm (CBLAS_SIDE_t Side, CBLAS_UPLO_t Uplo,
CBLAS_TRANSPOSE_t TransA, CBLAS_DIAG_t Diag, double alpha,
const gsl_matrix * A, gsl_matrix * B)
{
const size_t M = B->size1;
const size_t N = B->size2;
const size_t MA = A->size1;
const size_t NA = A->size2;
if (MA != NA)
{
GSL_ERROR ("matrix A must be square", GSL_ENOTSQR);
}
if ((Side == CblasLeft && M == MA) || (Side == CblasRight && N == MA))
{
cblas_dtrsm (CblasRowMajor, Side, Uplo, TransA, Diag, INT (M), INT (N),
alpha, A->data, INT (A->tda), B->data, INT (B->tda));
return GSL_SUCCESS;
}
else
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
}
int
gsl_blas_ctrsm (CBLAS_SIDE_t Side, CBLAS_UPLO_t Uplo,
CBLAS_TRANSPOSE_t TransA, CBLAS_DIAG_t Diag,
const gsl_complex_float alpha,
const gsl_matrix_complex_float * A,
gsl_matrix_complex_float * B)
{
const size_t M = B->size1;
const size_t N = B->size2;
const size_t MA = A->size1;
const size_t NA = A->size2;
if (MA != NA)
{
GSL_ERROR ("matrix A must be square", GSL_ENOTSQR);
}
if ((Side == CblasLeft && M == MA) || (Side == CblasRight && N == MA))
{
cblas_ctrsm (CblasRowMajor, Side, Uplo, TransA, Diag, INT (M), INT (N),
GSL_COMPLEX_P (&alpha), A->data, INT (A->tda), B->data,
INT (B->tda));
return GSL_SUCCESS;
}
else
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
}
int
gsl_blas_ztrsm (CBLAS_SIDE_t Side, CBLAS_UPLO_t Uplo,
CBLAS_TRANSPOSE_t TransA, CBLAS_DIAG_t Diag,
const gsl_complex alpha, const gsl_matrix_complex * A,
gsl_matrix_complex * B)
{
const size_t M = B->size1;
const size_t N = B->size2;
const size_t MA = A->size1;
const size_t NA = A->size2;
if (MA != NA)
{
GSL_ERROR ("matrix A must be square", GSL_ENOTSQR);
}
if ((Side == CblasLeft && M == MA) || (Side == CblasRight && N == MA))
{
cblas_ztrsm (CblasRowMajor, Side, Uplo, TransA, Diag, INT (M), INT (N),
GSL_COMPLEX_P (&alpha), A->data, INT (A->tda), B->data,
INT (B->tda));
return GSL_SUCCESS;
}
else
{
GSL_ERROR ("invalid length", GSL_EBADLEN);
}
}
|
Require Import Crypto.Arithmetic.PrimeFieldTheorems.
Require Import Crypto.Specific.solinas32_2e174m3_8limbs.Synthesis.
(* TODO : change this to field once field isomorphism happens *)
Definition carry :
{ carry : feBW_loose -> feBW_tight
| forall a, phiBW_tight (carry a) = (phiBW_loose a) }.
Proof.
Set Ltac Profiling.
Time synthesize_carry ().
Show Ltac Profile.
Time Defined.
Print Assumptions carry.
|
module Language.Reflection.Types
-- inspired by https://github.com/MarcelineVQ/idris2-elab-deriving/
import public Language.Reflection.Pretty
import public Language.Reflection.Syntax
import public Language.Reflection
import Text.PrettyPrint.Prettyprinter
%language ElabReflection
--------------------------------------------------------------------------------
-- Utilities
--------------------------------------------------------------------------------
public export
Res : Type -> Type
Res = Either String
Eq Namespace where
(MkNS xs) == (MkNS ys) = xs == ys
Eq Name where
(UN a) == (UN x) = a == x
(MN a b) == (MN x y) = a == x && b == y
(NS a b) == (NS x y) = a == x && b == y
(DN a b) == (DN x y) = a == x && b == y
(RF a) == (RF x) = a == x
_ == _ = False
--------------------------------------------------------------------------------
-- General Types
--------------------------------------------------------------------------------
||| Constructor of a data type
public export
record Con where
constructor MkCon
name : Name
args : List NamedArg
type : TTImp
||| Tries to lookup a constructor by name.
export
getCon : Name -> Elab Con
getCon n = do (n',tt) <- lookupName n
(args,tpe) <- unPiNamed tt
pure $ MkCon n' args tpe
export
Pretty Con where
prettyPrec p (MkCon n args tpe) = applyH p "MkCon" [n, args, tpe]
||| Information about a data type
|||
||| @name : Name of the data type
||| Note: There is no guarantee that the name will be fully
||| qualified
||| @args : Type arguments of the data type
||| @cons : List of data constructors
public export
record TypeInfo where
constructor MkTypeInfo
name : Name
args : List NamedArg
cons : List Con
export
Pretty TypeInfo where
pretty (MkTypeInfo name args cons) =
let head = applyH Open "MkTypeInfo" [name, args]
cons = indent 2 $ vsep (map pretty cons)
in vsep [head,cons]
||| Tries to get information about the data type specified
||| by name. The name need not be fully qualified, but
||| needs to be unambiguous.
export
getInfo' : Name -> Elab TypeInfo
getInfo' n =
do (n',tt) <- lookupName n
(args,IType _) <- unPiNamed tt
| (_,_) => fail "Type declaration does not end in IType"
conNames <- getCons n'
cons <- traverse getCon conNames
pure (MkTypeInfo n' args cons)
||| macro version of `getInfo'`
export %macro
getInfo : Name -> Elab TypeInfo
getInfo = getInfo'
||| Tries to get the name of the sole constructor
||| of data type specified by name. Fails, if
||| the name is not unambiguous, or if the data type
||| in question has not exactly one constructor.
export %macro
singleCon : Name -> Elab Name
singleCon n = do (MkTypeInfo _ _ cs) <- getInfo' n
(c::Nil) <- pure cs | _ => fail "not a single constructor"
pure $ name c
--------------------------------------------------------------------------------
-- Parameterized Types
--------------------------------------------------------------------------------
||| Explicit arg of a data constructor
|||
||| The `hasParam` flag indicates, whether one of the
||| type parameters of the data type makes an appearance
||| in the arguments type.
|||
||| For instance, in the following data type, arguments
||| a1 and a3 would have `hasParam` set to `True`.
|||
||| ```
||| data MyData : (f : Type -> Type) -> (t : Type) -> Type where
||| A : (a1 : f Int) -> (a2 : String) -> MyData f a
||| B : (a3 : f a) -> MyData f a
||| ```
public export
record ExplicitArg where
constructor MkExplicitArg
name : Name
tpe : TTImp
hasParam : Bool
export
Pretty ExplicitArg where
prettyPrec p (MkExplicitArg n tpe hasParam) =
applyH p "MkExplicitArg" [n, tpe, hasParam]
||| Constructor of a parameterized data type.
|||
||| We only accept two types of arguments for
||| such a constructor: Implicit arguments
||| corresponding to the parameters of the data types
||| and explicit arguments.
|||
||| See `ParamTypeInfo` for examples about what is
||| allowed and what not.
public export
record ParamCon where
constructor MkParamCon
name : Name
explicitArgs : List ExplicitArg
export
Pretty ParamCon where
prettyPrec p (MkParamCon n explicitArgs) =
applyH p "MkParamCon" [n, explicitArgs]
export
hasParamTypes : ParamCon -> List TTImp
hasParamTypes = mapMaybe hasParamType . explicitArgs
where hasParamType : ExplicitArg -> Maybe TTImp
hasParamType (MkExplicitArg _ t True) = Just t
hasParamType _ = Nothing
||| Information about a parameterized data type.
|||
||| The constructors of such a data type are only
||| allowed to have two kinds of arguments:
||| Implicit arguments corresponding to the data
||| type's parameters and explicit arguments.
|||
||| Auto implicits or existentials are not allowed.
|||
||| Below are some examples of valid parameterized data types
|||
||| ```
||| data Foo a = Val a | Nope
|||
||| data Reader : (m : Type -> Type) -> (e : Type) -> (a : Type) -> Type where
||| MkReader : (run : e -> m a) -> Reader m e a
|||
||| data Wrapper : (n : Nat) -> (t : Type) -> Type
||| Wrap : Vect n t -> Wrapper n t
||| ```
|||
||| Examples of valid parameterized data types
|||
||| Indexed types families:
|||
||| ```
||| data GADT : (t : Type) -> Type where
||| A : GADT Int
||| B : GADT ()
||| Any : a -> GADT a
||| ```
|||
||| Existentials:
|||
||| ```
||| data Forget : Type where
||| DoForget : a -> Forget
||| ```
|||
||| Constraint types:
|||
||| ```
||| data ShowableForget : Type where
||| ShowForget : Show a => a -> Forget
||| ```
public export
record ParamTypeInfo where
constructor MkParamTypeInfo
name : Name
params : List (Name,TTImp)
cons : List ParamCon
export
Pretty ParamTypeInfo where
pretty (MkParamTypeInfo name params cons) =
let head = applyH Open "MkParamTypeInfo" [name, toList params]
cons = indent 2 $ vsep (map pretty cons)
in vsep [head,cons]
-- Given a Vect of type parameters (from the surrounding
-- data type), tries to extract a list of type parameter names
-- from the type declaration of a constructor.
private
conParams : (con : Name) -> Vect n a -> TTImp -> Res $ Vect n Name
conParams con as t = run as (snd $ unApp t)
where err : String
err = show con
++ ": Constructor type arguments do not match "
++ "data type type arguments."
run : Vect k a -> List TTImp -> Res $ Vect k Name
run [] [] = Right []
run (_ :: as) ((IVar _ n) :: ts) = (n ::) <$> run as ts
run _ _ = Left err
-- Renames all type parameter names in an argument's
-- type according to the given Vect of pairs.
-- Returns the renamed type and `True` if at least
-- one parameter was found, `False` otherwise.
private
rename : Vect n (Name,Name) -> TTImp -> (TTImp, Bool)
rename ns (IVar x n) = case lookup n ns of
Nothing => (IVar x n, False)
Just n' => (IVar x n', True)
rename ns (IPi x y z w a r) = let (a',ba) = rename ns a
(r',br) = rename ns r
in (IPi x y z w a' r', ba || br)
rename ns (IApp x y z) = let (y',by) = rename ns y
(z',bz) = rename ns z
in (IApp x y' z', by || bz)
rename _ t = (t, False)
private
implicitErr : (con: Name) -> (n : Name) -> Res a
implicitErr con n = Left $ show con
++ ": Non-explicit constructor argument \""
++ show n
++ "\" is not a type parameter."
private
indicesErr : (con : Name) -> (ns : Vect k Name) -> Res a
indicesErr con ns = Left $ show con ++ ": Type indices found: " ++ show ns
-- For a constructor, takes a list of type parameter
-- names and tries to remove the corresponding implicit
-- arguments from the head of the given argument list.
-- Extracts explicit argument names and types from the rest of
-- the list.
--
-- Fails if : a) Not all values in `names` are present as implicit
-- function arguments
-- b) The function has additional non-implicit arguments
private
argPairs : (con : Name)
-> Vect n (Name,Name)
-> List NamedArg
-> Res $ List ExplicitArg
argPairs con names = run names
where delete : Name -> Vect (S k) (Name,a) -> Res $ Vect k (Name,a)
delete m ((n,a) :: ns) =
if m == n then Right ns
else case ns of
[] => implicitErr con m
ns@(_::_) => ((n,a) ::) <$> delete m ns
mkArg : NamedArg -> Res ExplicitArg
mkArg (MkArg _ ExplicitArg n t) = let (t',isP) = rename names t
in Right $ MkExplicitArg n t' isP
mkArg (MkArg _ _ n _) = implicitErr con n
run : Vect k (Name,a) -> List NamedArg -> Res $ List ExplicitArg
run [] as = traverse mkArg as
run ps@(_::_) ((MkArg _ ImplicitArg n _) :: t) = run !(delete n ps) t
run ps _ = indicesErr con (map fst ps)
private
paramCon : Vect n Name -> Con -> Res $ ParamCon
paramCon ns (MkCon n as t) = do params <- conParams n ns t
args <- argPairs n (zip params ns) as
pure $ MkParamCon n args
private
toParamTypeInfo : TypeInfo -> Res ParamTypeInfo
toParamTypeInfo (MkTypeInfo n as cs) =
do ps <- traverse expPair as
let ns = map fst $ fromList ps
cs' <- traverse (paramCon ns) cs
pure $ MkParamTypeInfo n ps cs'
where expPair : NamedArg -> Res (Name,TTImp)
expPair (MkArg _ ExplicitArg n t) = Right (n,t)
expPair _ = Left $ show n
++ ": Non-explicit type arguments are not supported"
||| Returns information about a parameterized data type
||| specified by the given (probably not fully qualified) name.
|||
||| The implementation makes sure, that all occurences of
||| type parameters in the constructors have been given
||| the same names as occurences in the type declaration.
export
getParamInfo' : Name -> Elab ParamTypeInfo
getParamInfo' n = do ti <- getInfo' n
(Right pt) <- pure (toParamTypeInfo ti)
| (Left err) => fail err
pure pt
||| macro version of `getParamInfo`.
export %macro
getParamInfo : Name -> Elab ParamTypeInfo
getParamInfo = getParamInfo'
|
(* Title: HOL/Auth/Guard/Analz.thy
Author: Frederic Blanqui, University of Cambridge Computer Laboratory
Copyright 2001 University of Cambridge
*)
section\<open>Decomposition of Analz into two parts\<close>
theory Analz imports Extensions begin
text\<open>decomposition of \<^term>\<open>analz\<close> into two parts:
\<^term>\<open>pparts\<close> (for pairs) and analz of \<^term>\<open>kparts\<close>\<close>
subsection\<open>messages that do not contribute to analz\<close>
inductive_set
pparts :: "msg set => msg set"
for H :: "msg set"
where
Inj [intro]: "\<lbrakk>X \<in> H; is_MPair X\<rbrakk> \<Longrightarrow> X \<in> pparts H"
| Fst [dest]: "\<lbrakk>\<lbrace>X,Y\<rbrace> \<in> pparts H; is_MPair X\<rbrakk> \<Longrightarrow> X \<in> pparts H"
| Snd [dest]: "\<lbrakk>\<lbrace>X,Y\<rbrace> \<in> pparts H; is_MPair Y\<rbrakk> \<Longrightarrow> Y \<in> pparts H"
subsection\<open>basic facts about \<^term>\<open>pparts\<close>\<close>
lemma pparts_is_MPair [dest]: "X \<in> pparts H \<Longrightarrow> is_MPair X"
by (erule pparts.induct, auto)
lemma Crypt_notin_pparts [iff]: "Crypt K X \<notin> pparts H"
by auto
lemma Key_notin_pparts [iff]: "Key K \<notin> pparts H"
by auto
lemma Nonce_notin_pparts [iff]: "Nonce n \<notin> pparts H"
by auto
lemma Number_notin_pparts [iff]: "Number n \<notin> pparts H"
by auto
lemma Agent_notin_pparts [iff]: "Agent A \<notin> pparts H"
by auto
lemma pparts_empty [iff]: "pparts {} = {}"
by (auto, erule pparts.induct, auto)
lemma pparts_insertI [intro]: "X \<in> pparts H \<Longrightarrow> X \<in> pparts (insert Y H)"
by (erule pparts.induct, auto)
lemma pparts_sub: "\<lbrakk>X \<in> pparts G; G \<subseteq> H\<rbrakk> \<Longrightarrow> X \<in> pparts H"
by (erule pparts.induct, auto)
lemma pparts_insert2 [iff]: "pparts (insert X (insert Y H))
= pparts {X} Un pparts {Y} Un pparts H"
by (rule eq, (erule pparts.induct, auto)+)
lemma pparts_insert_MPair [iff]: "pparts (insert \<lbrace>X,Y\<rbrace> H)
= insert \<lbrace>X,Y\<rbrace> (pparts ({X,Y} \<union> H))"
apply (rule eq, (erule pparts.induct, auto)+)
apply (rule_tac Y=Y in pparts.Fst, auto)
apply (erule pparts.induct, auto)
by (rule_tac X=X in pparts.Snd, auto)
lemma pparts_insert_Nonce [iff]: "pparts (insert (Nonce n) H) = pparts H"
by (rule eq, erule pparts.induct, auto)
lemma pparts_insert_Crypt [iff]: "pparts (insert (Crypt K X) H) = pparts H"
by (rule eq, erule pparts.induct, auto)
lemma pparts_insert_Key [iff]: "pparts (insert (Key K) H) = pparts H"
by (rule eq, erule pparts.induct, auto)
lemma pparts_insert_Agent [iff]: "pparts (insert (Agent A) H) = pparts H"
by (rule eq, erule pparts.induct, auto)
lemma pparts_insert_Number [iff]: "pparts (insert (Number n) H) = pparts H"
by (rule eq, erule pparts.induct, auto)
lemma pparts_insert_Hash [iff]: "pparts (insert (Hash X) H) = pparts H"
by (rule eq, erule pparts.induct, auto)
lemma pparts_insert: "X \<in> pparts (insert Y H) \<Longrightarrow> X \<in> pparts {Y} \<union> pparts H"
by (erule pparts.induct, blast+)
lemma insert_pparts: "X \<in> pparts {Y} \<union> pparts H \<Longrightarrow> X \<in> pparts (insert Y H)"
by (safe, erule pparts.induct, auto)
lemma pparts_Un [iff]: "pparts (G \<union> H) = pparts G \<union> pparts H"
by (rule eq, erule pparts.induct, auto dest: pparts_sub)
lemma pparts_pparts [iff]: "pparts (pparts H) = pparts H"
by (rule eq, erule pparts.induct, auto)
lemma pparts_insert_eq: "pparts (insert X H) = pparts {X} Un pparts H"
by (rule_tac A=H in insert_Un, rule pparts_Un)
lemmas pparts_insert_substI = pparts_insert_eq [THEN ssubst]
lemma in_pparts: "Y \<in> pparts H \<Longrightarrow> \<exists>X. X \<in> H \<and> Y \<in> pparts {X}"
by (erule pparts.induct, auto)
subsection\<open>facts about \<^term>\<open>pparts\<close> and \<^term>\<open>parts\<close>\<close>
lemma pparts_no_Nonce [dest]: "\<lbrakk>X \<in> pparts {Y}; Nonce n \<notin> parts {Y}\<rbrakk>
\<Longrightarrow> Nonce n \<notin> parts {X}"
by (erule pparts.induct, simp_all)
subsection\<open>facts about \<^term>\<open>pparts\<close> and \<^term>\<open>analz\<close>\<close>
lemma pparts_analz: "X \<in> pparts H \<Longrightarrow> X \<in> analz H"
by (erule pparts.induct, auto)
lemma pparts_analz_sub: "\<lbrakk>X \<in> pparts G; G \<subseteq> H\<rbrakk> \<Longrightarrow> X \<in> analz H"
by (auto dest: pparts_sub pparts_analz)
subsection\<open>messages that contribute to analz\<close>
inductive_set
kparts :: "msg set => msg set"
for H :: "msg set"
where
Inj [intro]: "\<lbrakk>X \<in> H; not_MPair X\<rbrakk> \<Longrightarrow> X \<in> kparts H"
| Fst [intro]: "\<lbrakk>\<lbrace>X,Y\<rbrace> \<in> pparts H; not_MPair X\<rbrakk> \<Longrightarrow> X \<in> kparts H"
| Snd [intro]: "\<lbrakk>\<lbrace>X,Y\<rbrace> \<in> pparts H; not_MPair Y\<rbrakk> \<Longrightarrow> Y \<in> kparts H"
subsection\<open>basic facts about \<^term>\<open>kparts\<close>\<close>
lemma kparts_not_MPair [dest]: "X \<in> kparts H \<Longrightarrow> not_MPair X"
by (erule kparts.induct, auto)
lemma kparts_empty [iff]: "kparts {} = {}"
by (rule eq, erule kparts.induct, auto)
lemma kparts_insertI [intro]: "X \<in> kparts H \<Longrightarrow> X \<in> kparts (insert Y H)"
by (erule kparts.induct, auto dest: pparts_insertI)
lemma kparts_insert2 [iff]: "kparts (insert X (insert Y H))
= kparts {X} \<union> kparts {Y} \<union> kparts H"
by (rule eq, (erule kparts.induct, auto)+)
lemma kparts_insert_MPair [iff]: "kparts (insert \<lbrace>X,Y\<rbrace> H)
= kparts ({X,Y} \<union> H)"
by (rule eq, (erule kparts.induct, auto)+)
lemma kparts_insert_Nonce [iff]: "kparts (insert (Nonce n) H)
= insert (Nonce n) (kparts H)"
by (rule eq, erule kparts.induct, auto)
lemma kparts_insert_Crypt [iff]: "kparts (insert (Crypt K X) H)
= insert (Crypt K X) (kparts H)"
by (rule eq, erule kparts.induct, auto)
lemma kparts_insert_Key [iff]: "kparts (insert (Key K) H)
= insert (Key K) (kparts H)"
by (rule eq, erule kparts.induct, auto)
lemma kparts_insert_Agent [iff]: "kparts (insert (Agent A) H)
= insert (Agent A) (kparts H)"
by (rule eq, erule kparts.induct, auto)
lemma kparts_insert_Number [iff]: "kparts (insert (Number n) H)
= insert (Number n) (kparts H)"
by (rule eq, erule kparts.induct, auto)
lemma kparts_insert_Hash [iff]: "kparts (insert (Hash X) H)
= insert (Hash X) (kparts H)"
by (rule eq, erule kparts.induct, auto)
lemma kparts_insert: "X \<in> kparts (insert X H) \<Longrightarrow> X \<in> kparts {X} \<union> kparts H"
by (erule kparts.induct, (blast dest: pparts_insert)+)
lemma kparts_insert_fst [rule_format,dest]: "X \<in> kparts (insert Z H) \<Longrightarrow>
X \<notin> kparts H \<longrightarrow> X \<in> kparts {Z}"
by (erule kparts.induct, (blast dest: pparts_insert)+)
lemma kparts_sub: "\<lbrakk>X \<in> kparts G; G \<subseteq> H\<rbrakk> \<Longrightarrow> X \<in> kparts H"
by (erule kparts.induct, auto dest: pparts_sub)
lemma kparts_Un [iff]: "kparts (G \<union> H) = kparts G \<union> kparts H"
by (rule eq, erule kparts.induct, auto dest: kparts_sub)
lemma pparts_kparts [iff]: "pparts (kparts H) = {}"
by (rule eq, erule pparts.induct, auto)
lemma kparts_kparts [iff]: "kparts (kparts H) = kparts H"
by (rule eq, erule kparts.induct, auto)
lemma kparts_insert_eq: "kparts (insert X H) = kparts {X} \<union> kparts H"
by (rule_tac A=H in insert_Un, rule kparts_Un)
lemmas kparts_insert_substI = kparts_insert_eq [THEN ssubst]
lemma in_kparts: "Y \<in> kparts H \<Longrightarrow> \<exists>X. X \<in> H \<and> Y \<in> kparts {X}"
by (erule kparts.induct, auto dest: in_pparts)
lemma kparts_has_no_pair [iff]: "has_no_pair (kparts H)"
by auto
subsection\<open>facts about \<^term>\<open>kparts\<close> and \<^term>\<open>parts\<close>\<close>
lemma kparts_no_Nonce [dest]: "\<lbrakk>X \<in> kparts {Y}; Nonce n \<notin> parts {Y}\<rbrakk>
\<Longrightarrow> Nonce n \<notin> parts {X}"
by (erule kparts.induct, auto)
lemma kparts_parts: "X \<in> kparts H \<Longrightarrow> X \<in> parts H"
by (erule kparts.induct, auto dest: pparts_analz)
lemma parts_kparts: "X \<in> parts (kparts H) \<Longrightarrow> X \<in> parts H"
by (erule parts.induct, auto dest: kparts_parts
intro: parts.Fst parts.Snd parts.Body)
lemma Crypt_kparts_Nonce_parts [dest]: "\<lbrakk>Crypt K Y \<in> kparts {Z};
Nonce n \<in> parts {Y}\<rbrakk> \<Longrightarrow> Nonce n \<in> parts {Z}"
by auto
subsection\<open>facts about \<^term>\<open>kparts\<close> and \<^term>\<open>analz\<close>\<close>
lemma kparts_analz: "X \<in> kparts H \<Longrightarrow> X \<in> analz H"
by (erule kparts.induct, auto dest: pparts_analz)
lemma kparts_analz_sub: "\<lbrakk>X \<in> kparts G; G \<subseteq> H\<rbrakk> \<Longrightarrow> X \<in> analz H"
by (erule kparts.induct, auto dest: pparts_analz_sub)
lemma analz_kparts [rule_format,dest]: "X \<in> analz H \<Longrightarrow>
Y \<in> kparts {X} \<longrightarrow> Y \<in> analz H"
by (erule analz.induct, auto dest: kparts_analz_sub)
lemma analz_kparts_analz: "X \<in> analz (kparts H) \<Longrightarrow> X \<in> analz H"
by (erule analz.induct, auto dest: kparts_analz)
lemma analz_kparts_insert: "X \<in> analz (kparts (insert Z H)) \<Longrightarrow> X \<in> analz (kparts {Z} \<union> kparts H)"
by (rule analz_sub, auto)
lemma Nonce_kparts_synth [rule_format]: "Y \<in> synth (analz G)
\<Longrightarrow> Nonce n \<in> kparts {Y} \<longrightarrow> Nonce n \<in> analz G"
by (erule synth.induct, auto)
lemma kparts_insert_synth: "\<lbrakk>Y \<in> parts (insert X G); X \<in> synth (analz G);
Nonce n \<in> kparts {Y}; Nonce n \<notin> analz G\<rbrakk> \<Longrightarrow> Y \<in> parts G"
apply (drule parts_insert_substD, clarify)
apply (drule in_sub, drule_tac X=Y in parts_sub, simp)
apply (auto dest: Nonce_kparts_synth)
done
lemma Crypt_insert_synth:
"\<lbrakk>Crypt K Y \<in> parts (insert X G); X \<in> synth (analz G); Nonce n \<in> kparts {Y}; Nonce n \<notin> analz G\<rbrakk>
\<Longrightarrow> Crypt K Y \<in> parts G"
by (metis Fake_parts_insert_in_Un Nonce_kparts_synth UnE analz_conj_parts synth_simps(5))
subsection\<open>analz is pparts + analz of kparts\<close>
lemma analz_pparts_kparts: "X \<in> analz H \<Longrightarrow> X \<in> pparts H \<or> X \<in> analz (kparts H)"
by (erule analz.induct, auto)
lemma analz_pparts_kparts_eq: "analz H = pparts H Un analz (kparts H)"
by (rule eq, auto dest: analz_pparts_kparts pparts_analz analz_kparts_analz)
lemmas analz_pparts_kparts_substI = analz_pparts_kparts_eq [THEN ssubst]
lemmas analz_pparts_kparts_substD = analz_pparts_kparts_eq [THEN sym, THEN ssubst]
end
|
/*
Copyright [2020] [IBM Corporation]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#ifndef _MCAS_NUPM_ARENA_
#define _MCAS_NUPM_ARENA_
#include <nupm/region_descriptor.h>
#include <common/logging.h>
#include <common/string_view.h>
#include <gsl/pointers> /* not_null */
#include <sys/uio.h> /* ::iovec */
#include <cstddef>
#include <vector>
namespace nupm
{
struct registry_memory_mapped;
struct space_registered;
}
struct arena
: private common::log_source
{
using region_descriptor = nupm::region_descriptor;
using registry_memory_mapped = nupm::registry_memory_mapped;
using space_registered = nupm::space_registered;
using string_view = common::string_view;
arena(const common::log_source &ls) : common::log_source(ls) {}
virtual ~arena() {}
virtual void debug_dump() const = 0;
virtual region_descriptor region_get(const string_view &id) = 0;
virtual region_descriptor region_create(const string_view &id, gsl::not_null<registry_memory_mapped *> mh, std::size_t size) = 0;
virtual void region_resize(gsl::not_null<space_registered *> mh, std::size_t size) = 0;
/* It is unknown whether region_erase may be used on an open region.
* arena_fs assumes that it may, just as ::unlink can be used against
* an open file.
*/
virtual void region_erase(const string_view &id, gsl::not_null<registry_memory_mapped *> mh) = 0;
virtual std::size_t get_max_available() = 0;
virtual bool is_file_backed() const = 0;
protected:
using common::log_source::debug_level;
};
#endif
|
\section{The Sorting Problem}
%%%%%%%%%%%%%%%%%%%%
\begin{frame}{Algorithms}
% \begin{center}
% An algorithm is a sequence of operations \\
% that transform the input into the output.
% \end{center}
\begin{center}
What is an algorithm? \qquad \pause What is computation?
\end{center}
\pause
\fignocaption{width = 0.50\textwidth}{figs/algorithm-def.pdf}
\pause
\vspace{-0.60cm}
\begin{center}
\textcolor{red}{Correctness!}
\end{center}
\pause
\begin{description}[Effectiveness:]
\item[Definiteness:] precisely defined operations
\pause
\item[Finiteness:] termination
\pause
\item[Effectiveness:] a reasonable model; basic operations % RAM {\scriptsize (Random-Access Machine)} model
\pause
\begin{itemize}
% \item unrealistic: \texttt{sort} operation
% \item realistic: arithmetic, data movement, and control
% \pause
\item for sorting: compare, swap
\end{itemize}
\end{description}
\end{frame}
%%%%%%%%%%%%%%%%%%%%
\begin{frame}{Sorting}
The sorting problem:
\begin{description}
\item[Input:] A sequence of $n$ integers $A$:\seq{$a_1, a_2, \cdots, a_n$}.
\item[Output:] A permutation $A'$:\seq{$a'_1, a'_2, \ldots, a'_n$} of $A$ \emph{s.t.} $a'_1 \le a'_2 \le \cdots \le a'_n$ {\small (non-decreasing order)}.
\end{description}
\[
3\quad 1\quad 4\quad 2\quad \Longrightarrow 1\quad 2\quad 3\quad 4
\]
\pause
\vspace{0.50cm}
\fignocaption{width = 0.50\textwidth}{figs/sorting-alg-def.pdf}
% sortable
% A little more formalism: ordering relation ``$<$'' on $A$.
% \vspace{0.20cm}
% $\forall a, b, c \in A$,
% \begin{description}[Transitivity:]
% \item[Trichotomy:] $a < b, a = b, a > b$
% \item[Transitivity:] $a < b \land b < c \implies a < c$
% \end{description}
\end{frame}
%%%%%%%%%%%%%%%%%%%%
\begin{frame}{Inversions}
\[
A = a_1\quad a_2\quad \cdots\quad a_i\quad \cdots\quad a_j\quad \cdots\quad a_n.
\]
\begin{center}
If $i < j$ and $a_{i} > a_{j}$, then $(a_i, a_j)$ is an \textcolor{red}{\bf inversion}.\\[8pt] \pause
\textcolor{blue}{Adjacent} inversion: $(a_i, a_{i+1})$
\end{center}
\pause
\vspace{-0.50cm}
\begin{columns}
\column{0.50\textwidth}
\fignocaption{width = 0.50\textwidth}{figs/inversions-example.pdf}
\column{0.50\textwidth}
{\small
\begin{center}
\#inversions = 3\\
\#adjacent inversions = 2
\end{center}
}
\end{columns}
\pause
\begin{columns}
\column{0.50\textwidth}
\fignocaption{width = 0.50\textwidth}{figs/inversions-example-nonincreasing.pdf}
\column{0.50\textwidth}
{\small
\begin{center}
\#inversions = 3 + 2 + 1 = 6\\
\#adjacent inversions = 3
\end{center}
}
\end{columns}
\pause
\begin{columns}
\column{0.50\textwidth}
\fignocaption{width = 0.50\textwidth}{figs/inversions-example-nondecreasing.pdf}
\column{0.50\textwidth}
{\small
\begin{center}
\#inversions = 0\\
\#adjacent inversions = 0
\end{center}
}
\end{columns}
\end{frame}
%%%%%%%%%%%%%%%%%%%%
\begin{frame}{Inversions}
\begin{center}
\fbox{\textcolor{blue}{Theorem:} $A$ is sorted $\iff$ $A$ has no adjacent inversions.}
\end{center}
\begin{align*}
\onslide<2->{A \text{ is sorted } \Longrightarrow A \text{ has no adjacent inversions}.}
\end{align*}
\vspace{-0.50cm}
\begin{align*}
\onslide<3->{A \text{ has no adjacent inversions } &\Longrightarrow \forall i \in [1,n-1]: a_{i} \le a_{i+1} \\}
\onslide<4->{&\Longrightarrow A \text{ is sorted}.}
\end{align*}
\end{frame}
%%%%%%%%%%%%%%%%%%%%
|
Load GradVer21Determ.
Import Semantics.
Require Import Coq.Logic.Classical_Pred_Type.
Definition PLIFTm1 (f : phi -> phi) : (pphi -> pphi) :=
fun pp1 => fun p2 => exists p1, pp1 p1 /\ f p1 = p2.
Definition PLIFTp1 (f : phi -> phi -> Prop) : (pphi -> pphi) :=
fun pp1 => fun p2 => exists p1, pp1 p1 /\ f p1 p2.
Definition PLIFTp3 (f : phi -> phi -> phi -> phi -> Prop) : (pphi -> pphi -> pphi -> pphi) :=
fun pp1 pp2 pp3 => fun px => exists p1 p2 p3, pp1 p1 /\ pp2 p2 /\ pp3 p3 /\ f p1 p2 p3 px.
Definition GLIFTmp1 (f : phi -> phi) (gf : gphi -> gphi -> Prop) : Prop :=
forall gp1 gp2 pp gp2',
gGamma gp1 pp ->
gAlpha (PLIFTm1 f pp) gp2 ->
gf gp1 gp2' ->
gphiEquals gp2' gp2.
Definition GLIFTm1 (f : phi -> phi) (gf : gphi -> gphi) : Prop :=
forall gp1 gp2 pp,
gGamma gp1 pp ->
gAlpha (PLIFTm1 f pp) gp2 ->
gphiEquals (gf gp1) gp2.
Definition GLIFTpp1 (f : phi -> phi -> Prop) (gf : gphi -> gphi -> Prop) : Prop :=
forall gp1 gp2 pp gp2',
gGamma gp1 pp ->
gAlpha (PLIFTp1 f pp) gp2 ->
gf gp1 gp2' ->
gphiEquals gp2' gp2.
Definition GLIFTpp1x (f : phi -> phi -> Prop) (gf : gphi -> gphi -> Prop) : Prop :=
forall gp1 gp2 pp,
gGamma gp1 pp ->
gf gp1 gp2 ->
gAlpha (PLIFTp1 f pp) gp2.
Definition GLIFTpp3 (f : phi -> phi -> phi -> phi -> Prop) (gf : gphi -> gphi -> gphi -> gphi -> Prop) : Prop :=
forall gp1 gp2 gp3 gpx pp1 pp2 pp3 gpx',
gGamma gp1 pp1 ->
gGamma gp2 pp2 ->
gGamma gp3 pp3 ->
gAlpha (PLIFTp3 f pp1 pp2 pp3) gpx ->
gf gp1 gp2 gp3 gpx' ->
gphiEquals gpx' gpx.
(* (* monotonic function with respect to phiImplies *)
Definition pmFun (f : phi -> phi -> Prop) : Prop :=
forall x1 y1 x2 y2,
f x1 y1 ->
f x2 y2 ->
phiImplies x1 x2 ->
phiImplies y1 y2. *)
(* monotonic function with respect to phiImplies *)
Definition pmFun (f : phi -> phi -> Prop) : Prop :=
forall x1 y1 x2 y2,
f x1 y1 ->
f x2 y2 ->
phiImplies x1 x2 ->
phiImplies y1 y2.
(* special condition *)
Definition gFun (f : phi -> phi -> Prop) : Prop :=
forall pa pb pc, f pb pc ->
forall p1, gGamma' (pa, pc) p1 ->
exists p2
p3, gGamma' (pa, pb) p2 /\
f p2 p3 /\
phiImplies p1 p3.
(* determinism, goodness transfer, sanity, ... *)
Definition goodFun (f : phi -> phi -> Prop) : Prop :=
(forall x y,
f x y ->
good x ->
good y)
/\
(forall x y1 y2,
f x y1 ->
f x y2 ->
y1 = y2).
Definition liftable (f : phi -> phi -> Prop) : Prop :=
goodFun f /\
pmFun f /\
gFun f.
Lemma liftableTrans : forall f1 f2,
liftable f1 ->
liftable f2 ->
liftable (fun x1 x3 => exists x2, f1 x1 x2 /\ f2 x2 x3).
Proof.
unfold liftable.
intros. unf.
split.
unfold goodFun in *. unf.
split.
intros. unf.
eapply H2 in H10; eauto.
intros. unf.
assert (x0 = x1). eapp H6.
subst. eapp H7.
split.
unfold pmFun in *.
intros. unf.
eapply H0; eauto.
unfold gFun in *.
intros. unf.
assert (H8' := H8).
eapply H3 in H8; eauto. unf.
assert (H4' := H4).
eapply H5 in H4; eauto. unf.
destruct pa.
- inv H7. inv H9. inv H6. simpl in *.
unfold gGamma'. simpl.
exists pb.
exists pc.
splau.
splau.
admit.
- inv H7. inv H9. inv H6. simpl in *.
assert (pc = x1). eapp H1. subst.
assert (x = x3). eapp H2. subst.
exists pb.
exists x1.
cut.
Admitted.
Definition simpleLift (f : phi -> phi -> Prop) : (gphi -> gphi -> Prop) :=
fun gp1 gp2 =>
gGood gp1 /\
gGood gp2 /\
fst gp1 = fst gp2 /\
f (snd gp1) (snd gp2).
(* liftable functions are in fact simply liftable *)
Lemma GLIFT_liftable : forall f,
goodFun f ->
pmFun f ->
gFun f ->
GLIFTpp1x f (simpleLift f).
Proof.
unfold GLIFTpp1x, PLIFTp1,
simpleLift, pmFun, goodFun, gFun.
intros.
destruct gp1, gp2. simpl in *.
unf. subst.
rename H4 into go0.
rename H3 into go1.
rename H2 into ga.
rename H0 into mon.
rename H1 into mag.
rename H6 into goF.
rename H8 into det.
rename H7 into ff.
inv ga.
clear H.
constructor.
- constructor.
* assert (exists x, gGamma' (b0, p1) x) as ee.
unfold gGamma'. simpl.
destruct b0.
inv go1. apply hasWellFormedSubtype in H. unf.
exists x. splau. eca.
eex.
invE ee xx.
eapply (mag b0) in ff; eauto.
unf.
exists x0.
exists x.
tauto.
* intros. unf.
apply goF in H1; auto.
unfold gGamma' in *.
destruct b0; simpl in *; cut.
subst.
inv go0.
split. apply H.
inv H0; cut.
- assumption.
- repeat intro. unf.
unfold gGamma' in *.
destruct b0; simpl in *.
* unf.
eapply mon in H2; eauto.
* subst.
eapp det.
- unfold pincl in *.
intros.
assert (ffx := ff).
eapply mag in ff; eauto. unf.
assert (gGamma' gp' x0).
apply H0.
eex.
destruct gp'.
unfold gGamma' in *. simpl in *.
destruct b, b0; unf; subst.
* splau.
eapp (phiImpliesTrans p2 x0 p3).
* assert (x0 = p1).
eapp det. subst.
splau.
* admit. (*contradictory*)
* eapp det.
Admitted.
Definition liftableImplies (p : phi) (p1 p2 : phi) : Prop :=
p1 = p2 /\
phiImplies p1 p.
Theorem liftableImplies_ : forall p, liftable (liftableImplies p).
Proof.
split.
split; intros.
inv H. assumption.
inv H. inv H0. congruence.
split.
unfold pmFun. intros.
inv H. inv H0.
assumption.
unfold gFun. intros.
inv H.
exists p1.
exists p1.
splau.
splau.
splau.
eapp (phiImpliesTrans p1 pc p0).
unfold gGamma' in *. simpl in *.
destruct pa; unf; cut.
Qed.
Definition liftableAppend (p : phi) (p1 p2 : phi) : Prop :=
p2 = p1 ++ p /\
(good p1 -> good (p1 ++ p)) /\
(forall p'',(good p'' /\ phiImplies p'' (p1 ++ p)) ->
exists p' , good p' /\ phiImplies p' (p1) /\ phiImplies p'' (p' ++ p) /\ good (p' ++ p)).
Theorem liftableAppend_ : forall p, liftable (liftableAppend p).
Proof.
split.
split; intros.
inv H. apply H2. assumption.
inv H. inv H0. congruence.
split.
unfold pmFun. intros.
inv H. inv H0.
repeat intro.
apply evalphiSymm.
apply evalphiSymm in H.
apply evalphiApp in H. unf.
apply evalphiAppRev; cut.
unfold gFun. intros.
inv H. unf.
unfold gGamma' in *. simpl in *.
destruct pa.
- assert (H00 := H0).
apply H1 in H0. unf.
exists x. exists (x ++ p0).
splau.
splau.
split. congruence.
splau.
intros. unf.
exists x.
cut.
- subst.
exists pb. exists (pb ++ p0).
splau.
splau.
split. congruence.
split. assumption.
apply H1.
Qed.
Definition minWith {T:Type} (pred : T -> Prop) (lt : T -> T -> Prop) : T -> Prop :=
fun x => pred x /\ (forall y, pred y -> lt x y).
Definition liftableWOvar (x : x) (p1 p2 : phi) : Prop :=
(good p1 -> good p2) /\
(minWith (fun p => phiImplies p1 p /\ ~ In x (FV p)) phiImplies p2).
Theorem liftableWOvar_ : forall x, liftable (liftableWOvar x).
Proof.
split.
split; intros.
intros. apply H. assumption.
intros. admit. (*the case when implemented*)
split.
unfold pmFun. intros.
inv H. inv H0.
unfold minWith in *.
unf.
apply H7.
splau.
eapp (phiImpliesTrans x1 x2 y2).
unfold gFun. intros.
unfold gGamma' in *. simpl in *.
destruct pa.
- unf.
eexists. (* take minimum self-framed pb (if multiple choices, look at p1) *)
eexists. (* follows from evaluation of WO *)
admit.
- subst.
exists pb.
exists pc.
cut.
Admitted.
Definition liftableWOacc (a : A'_s) (p1 p2 : phi) : Prop :=
(phiImplies p1 [phiAcc (fst a) (snd a)]) /\
(good p1 -> good p2) /\
(minWith (fun p => phiImplies p1 p /\ (forall px, phiImplies p px /\ ~ In a (staticFootprint px) /\ ~ In a (staticFootprintX px))) phiImplies p2).
Theorem liftableWOacc_ : forall x, liftable (liftableWOacc x).
Proof.
Admitted.
Fixpoint liftableWOaccs (a : A_s) (p1 p2 : phi) : Prop :=
match a with
| [] => p1 = p2
| (a' :: a) => exists pp, liftableWOacc a' p1 pp /\
liftableWOaccs a pp p2
end.
Theorem liftableWOaccs_ : forall x, liftable (liftableWOaccs x).
Proof.
induction x.
- simpl.
split.
split; cut.
split.
unfold pmFun. intros. subst. auto.
unfold gFun. intros. subst. eex.
- simpl.
apply liftableTrans.
* apply liftableWOacc_.
* assumption.
Qed.
Definition liftableWOaccsX (p : phi) (p1 p2 : phi) : Prop :=
phiImplies p1 p /\
liftableWOaccs (staticFootprint p) p1 p2.
Theorem liftableWOaccsX_ : forall x, liftable (liftableWOaccsX x).
Proof.
intros.
assert (li := liftableWOaccs_ (staticFootprint x)).
unfold liftableWOaccsX.
inv li. inv H0.
split.
inv H.
split; intros; unf.
eapply H0; eauto.
eapply H3; eauto.
split.
- unfold pmFun in *. intros. unf.
eapply H1; eauto.
- unfold gFun in *. intros. unf.
eapply H2 in H5; eauto.
unf.
exists x0.
exists x1.
splau.
splau.
splau.
unfold gGamma' in H0.
simpl in *.
destruct pa.
* unf.
eapp phiImpliesTrans.
* subst.
assumption.
Qed.
Definition liftablePS2
(x1 : x) (y1 : x)
(x2 : x) (y2 : x)
(p1 p2 : phi) : Prop :=
p2 = phiSubsts2 x1 y1 x2 y2 p1
.
Theorem liftablePS2_ : forall x1 y1 x2 y2, liftable (liftablePS2 x1 y1 x2 y2).
Proof.
Admitted.
Definition liftablePS3
(x1 : x) (y1 : x)
(x2 : x) (y2 : x)
(x3 : x) (y3 : x)
(p1 p2 : phi) : Prop :=
p2 = phiSubsts3 x1 y1 x2 y2 x3 y3 p1
.
Theorem liftablePS3_ : forall x1 y1 x2 y2 x3 y3, liftable (liftablePS3 x1 y1 x2 y2 x3 y3).
Proof.
Admitted.
|
(*
* Copyright 2014, General Dynamics C4 Systems
*
* SPDX-License-Identifier: GPL-2.0-only
*)
(*
Top level architecture related proofs.
*)
theory Arch_R
imports Untyped_R Finalise_R
begin
context begin interpretation Arch . (*FIXME: arch_split*)
declare is_aligned_shiftl [intro!]
declare is_aligned_shiftr [intro!]
definition
"asid_ci_map i \<equiv>
case i of X64_A.MakePool frame slot parent base \<Rightarrow>
X64_H.MakePool frame (cte_map slot) (cte_map parent) base"
definition
"valid_aci' aci \<equiv> case aci of MakePool frame slot parent base \<Rightarrow>
\<lambda>s. cte_wp_at' (\<lambda>c. cteCap c = NullCap) slot s \<and>
cte_wp_at' (\<lambda>cte. \<exists>idx. cteCap cte = UntypedCap False frame pageBits idx) parent s \<and>
descendants_of' parent (ctes_of s) = {} \<and>
slot \<noteq> parent \<and>
ex_cte_cap_to' slot s \<and>
sch_act_simple s \<and>
is_aligned base asid_low_bits \<and> asid_wf base"
lemma vp_strgs':
"valid_pspace' s \<longrightarrow> pspace_distinct' s"
"valid_pspace' s \<longrightarrow> pspace_aligned' s"
"valid_pspace' s \<longrightarrow> valid_mdb' s"
by auto
lemma safe_parent_strg':
"cte_wp_at' (\<lambda>cte. cteCap cte = UntypedCap False frame pageBits idx) p s \<and>
descendants_of' p (ctes_of s) = {} \<and>
valid_pspace' s
\<longrightarrow> safe_parent_for' (ctes_of s) p (ArchObjectCap (ASIDPoolCap frame base))"
apply (clarsimp simp: safe_parent_for'_def cte_wp_at_ctes_of)
apply (case_tac cte)
apply (simp add: isCap_simps)
apply (subst conj_comms)
apply (rule context_conjI)
apply (drule ctes_of_valid_cap', fastforce)
apply (clarsimp simp: valid_cap'_def capAligned_def)
apply (drule is_aligned_no_overflow)
apply (clarsimp simp: capRange_def asid_low_bits_def bit_simps)
apply (clarsimp simp: sameRegionAs_def2 isCap_simps capRange_def asid_low_bits_def bit_simps)
done
lemma descendants_of'_helper:
"\<lbrace>P\<rbrace> f \<lbrace>\<lambda>r s. Q (descendants_of' t (null_filter' (ctes_of s)))\<rbrace>
\<Longrightarrow> \<lbrace>P\<rbrace> f \<lbrace>\<lambda>r s. Q (descendants_of' t (ctes_of s))\<rbrace>"
apply (clarsimp simp:valid_def)
apply (subst null_filter_descendants_of')
prefer 2
apply fastforce
apply simp
done
lemma createObject_typ_at':
"\<lbrace>\<lambda>s. koTypeOf ty = otype \<and> is_aligned ptr (objBitsKO ty) \<and>
pspace_aligned' s \<and> pspace_no_overlap' ptr (objBitsKO ty) s\<rbrace>
createObjects' ptr (Suc 0) ty 0
\<lbrace>\<lambda>rv s. typ_at' otype ptr s\<rbrace>"
supply
is_aligned_neg_mask_eq[simp del]
is_aligned_neg_mask_weaken[simp del]
apply (clarsimp simp:createObjects'_def alignError_def split_def | wp hoare_unless_wp | wpc )+
apply (clarsimp simp:obj_at'_def ko_wp_at'_def typ_at'_def pspace_distinct'_def)+
apply (subgoal_tac "ps_clear ptr (objBitsKO ty)
(s\<lparr>ksPSpace := \<lambda>a. if a = ptr then Some ty else ksPSpace s a\<rparr>)")
apply (simp add:ps_clear_def)+
apply (rule ccontr)
apply (drule int_not_emptyD)
apply clarsimp
apply (unfold pspace_no_overlap'_def)
apply (erule allE)+
apply (erule(1) impE)
apply (subgoal_tac "x \<in> {x..x + 2 ^ objBitsKO y - 1}")
apply (fastforce simp:is_aligned_neg_mask_eq p_assoc_help)
apply (drule(1) pspace_alignedD')
apply (clarsimp simp: is_aligned_no_wrap' p_assoc_help)
done
lemma retype_region2_ext_retype_region_ArchObject:
"retype_region ptr n us (ArchObject x)=
retype_region2 ptr n us (ArchObject x)"
apply (rule ext)
apply (simp add:retype_region_def retype_region2_def bind_assoc
retype_region2_ext_def retype_region_ext_def default_ext_def)
apply (rule ext)
apply (intro monad_eq_split_tail ext)+
apply simp
apply simp
apply (simp add:gets_def get_def bind_def return_def simpler_modify_def )
apply (rule_tac x = xc in fun_cong)
apply (rule_tac f = do_extended_op in arg_cong)
apply (rule ext)
apply simp
apply simp
done
lemma set_cap_device_and_range_aligned:
"is_aligned ptr sz \<Longrightarrow> \<lbrace>\<lambda>_. True\<rbrace>
set_cap
(cap.UntypedCap dev ptr sz idx)
aref
\<lbrace>\<lambda>rv s.
\<exists>slot.
cte_wp_at
(\<lambda>c. cap_is_device c = dev \<and>
up_aligned_area ptr sz \<subseteq> cap_range c)
slot s\<rbrace>"
apply (subst is_aligned_neg_mask_eq[symmetric])
apply simp
apply (wp set_cap_device_and_range)
done
lemma pac_corres:
"asid_ci_map i = i' \<Longrightarrow>
corres dc
(einvs and ct_active and valid_aci i)
(invs' and ct_active' and valid_aci' i')
(perform_asid_control_invocation i)
(performASIDControlInvocation i')"
supply
is_aligned_neg_mask_eq[simp del]
is_aligned_neg_mask_weaken[simp del]
apply (cases i)
apply (rename_tac word1 prod1 prod2 word2)
apply (clarsimp simp: asid_ci_map_def)
apply (simp add: perform_asid_control_invocation_def placeNewObject_def2
performASIDControlInvocation_def)
apply (rule corres_name_pre)
apply (clarsimp simp:valid_aci_def valid_aci'_def cte_wp_at_ctes_of cte_wp_at_caps_of_state)
apply (subgoal_tac "valid_cap' (capability.UntypedCap False word1 pageBits idx) s'")
prefer 2
apply (case_tac ctea)
apply clarsimp
apply (erule ctes_of_valid_cap')
apply fastforce
apply (frule valid_capAligned)
apply (clarsimp simp: capAligned_def page_bits_def)
apply (rule corres_guard_imp)
apply (rule corres_split)
prefer 2
apply (erule detype_corres)
apply (simp add:pageBits_def)
apply (rule corres_split[OF _ getSlotCap_corres])
apply (rule_tac F = " pcap = (cap.UntypedCap False word1 pageBits idxa)" in corres_gen_asm)
apply (rule corres_split[OF _ updateFreeIndex_corres])
apply (rule corres_split)
prefer 2
apply (simp add: retype_region2_ext_retype_region_ArchObject )
apply (rule corres_retype [where ty="Inl (KOArch (KOASIDPool F))" for F,
unfolded APIType_map2_def makeObjectKO_def,
THEN createObjects_corres',simplified,
where val = "makeObject::asidpool"])
apply simp
apply (simp add: objBits_simps obj_bits_api_def arch_kobj_size_def
default_arch_object_def archObjSize_def)+
apply (simp add: obj_relation_retype_def default_object_def
default_arch_object_def objBits_simps archObjSize_def)
apply (simp add: other_obj_relation_def asid_pool_relation_def)
apply (simp add: makeObject_asidpool const_def inv_def)
apply (rule range_cover_full)
apply (simp add:obj_bits_api_def arch_kobj_size_def default_arch_object_def)+
apply (rule corres_split)
prefer 2
apply (rule cins_corres_simple, simp, rule refl, rule refl)
apply (rule_tac F="asid_low_bits_of word2 = 0" in corres_gen_asm)
apply (simp add: is_aligned_mask dc_def[symmetric])
apply (rule corres_split [where P=\<top> and P'=\<top> and r'="\<lambda>t t'. t = t' o ucast"])
prefer 2
apply (clarsimp simp: state_relation_def arch_state_relation_def)
apply (rule corres_trivial)
apply (rule corres_modify)
apply (thin_tac "x \<in> state_relation" for x)
apply (clarsimp simp: state_relation_def arch_state_relation_def o_def)
apply (rule ext)
apply clarsimp
apply (erule_tac P = "x = asid_high_bits_of word2" in notE)
apply (rule word_eqI[rule_format])
apply (drule_tac x1="ucast x" in bang_eq [THEN iffD1])
apply (erule_tac x=n in allE)
apply (simp add: word_size nth_ucast)
apply wp+
apply (strengthen safe_parent_strg[where idx = "2^pageBits"])
apply (strengthen invs_valid_objs invs_distinct
invs_psp_aligned invs_mdb
| simp cong:conj_cong)+
apply (wp retype_region_plain_invs[where sz = pageBits]
retype_cte_wp_at[where sz = pageBits])+
apply (strengthen vp_strgs'
safe_parent_strg'[where idx = "2^pageBits"])
apply (simp cong: conj_cong)
apply (wp createObjects_valid_pspace'
[where sz = pageBits and ty="Inl (KOArch (KOASIDPool undefined))"])
apply (simp add: makeObjectKO_def)+
apply (simp add:objBits_simps archObjSize_def range_cover_full valid_cap'_def)+
apply (fastforce elim!: canonical_address_neq_mask)
apply (rule in_kernel_mappings_neq_mask, (simp add: valid_cap'_def bit_simps)+)[1]
apply (clarsimp simp:valid_cap'_def)
apply (wp createObject_typ_at'
createObjects_orig_cte_wp_at'[where sz = pageBits])
apply (rule descendants_of'_helper)
apply (wp createObjects_null_filter'
[where sz = pageBits and ty="Inl (KOArch (KOASIDPool undefined))"])
apply (clarsimp simp:is_cap_simps)
apply (simp add: free_index_of_def)
apply (clarsimp simp: conj_comms obj_bits_api_def arch_kobj_size_def
objBits_simps archObjSize_def default_arch_object_def
pred_conj_def)
apply (clarsimp simp: conj_comms
| strengthen invs_mdb invs_valid_pspace)+
apply (simp add:region_in_kernel_window_def)
apply (wp set_untyped_cap_invs_simple[where sz = pageBits]
set_cap_cte_wp_at
set_cap_caps_no_overlap[where sz = pageBits]
set_cap_no_overlap
set_cap_device_and_range_aligned[where dev = False,simplified]
set_untyped_cap_caps_overlap_reserved[where sz = pageBits])+
apply (clarsimp simp: conj_comms obj_bits_api_def arch_kobj_size_def
objBits_simps archObjSize_def default_arch_object_def
makeObjectKO_def range_cover_full
simp del: capFreeIndex_update.simps
| strengthen invs_valid_pspace' invs_pspace_aligned'
invs_pspace_distinct'
exI[where x="makeObject :: asidpool"])+
apply (wp updateFreeIndex_forward_invs'
updateFreeIndex_pspace_no_overlap'
updateFreeIndex_caps_no_overlap''
updateFreeIndex_descendants_of2
updateFreeIndex_cte_wp_at
updateFreeIndex_caps_overlap_reserved
| simp add: descendants_of_null_filter' split del: if_split)+
apply (wp get_cap_wp)+
apply (subgoal_tac "word1 && ~~ mask pageBits = word1 \<and> pageBits \<le> word_bits \<and> word_size_bits \<le> pageBits")
prefer 2
apply (clarsimp simp:bit_simps word_bits_def is_aligned_neg_mask_eq)
apply (simp only:delete_objects_rewrite)
apply wp+
apply (clarsimp simp: conj_comms)
apply (clarsimp simp: conj_comms ex_disj_distrib
| strengthen invs_valid_pspace' invs_pspace_aligned'
invs_pspace_distinct')+
apply (wp deleteObjects_invs'[where p="makePoolParent i'"]
deleteObjects_cte_wp_at'
deleteObjects_descendants[where p="makePoolParent i'"])
apply (clarsimp split del: if_split simp:valid_cap'_def)
apply (wp hoare_vcg_ex_lift
deleteObjects_caps_no_overlap''[where slot="makePoolParent i'"]
deleteObject_no_overlap
deleteObjects_ct_active'[where cref="makePoolParent i'"])
apply (clarsimp simp: is_simple_cap_def valid_cap'_def max_free_index_def is_cap_simps
cong: conj_cong)
apply (strengthen empty_descendants_range_in')
apply (wp deleteObjects_descendants[where p="makePoolParent i'"]
deleteObjects_cte_wp_at'
deleteObjects_null_filter[where p="makePoolParent i'"])
apply (clarsimp simp:invs_mdb max_free_index_def invs_untyped_children)
apply (subgoal_tac "detype_locale x y sa" for x y)
prefer 2
apply (simp add:detype_locale_def)
apply (fastforce simp:cte_wp_at_caps_of_state descendants_range_def2
empty_descendants_range_in invs_untyped_children)
apply (intro conjI)
apply (clarsimp)
apply (erule(1) caps_of_state_valid)
subgoal by (fastforce simp:cte_wp_at_caps_of_state
descendants_range_def2 empty_descendants_range_in)
apply (fold_subgoals (prefix))[2]
subgoal premises prems using prems by (clarsimp simp:invs_def valid_state_def)+
apply (clarsimp simp:cte_wp_at_caps_of_state)
apply (drule detype_locale.non_null_present)
apply (fastforce simp:cte_wp_at_caps_of_state)
apply simp
apply (frule_tac ptr = "(aa,ba)" in detype_invariants [rotated 3])
apply fastforce
apply simp
apply (simp add: cte_wp_at_caps_of_state)
apply (simp add: is_cap_simps)
apply (simp add:empty_descendants_range_in descendants_range_def2)
apply (frule intvl_range_conv[where bits = pageBits])
apply (clarsimp simp:pageBits_def word_bits_def)
apply (clarsimp simp: invs_valid_objs cte_wp_at_caps_of_state range_cover_full
invs_psp_aligned invs_distinct cap_master_cap_simps is_cap_simps
is_simple_cap_def)
apply (clarsimp simp: conj_comms)
apply (rule conjI, clarsimp simp: is_aligned_asid_low_bits_of_zero)
apply (frule ex_cte_cap_protects)
apply (simp add:cte_wp_at_caps_of_state)
apply (simp add:empty_descendants_range_in)
apply fastforce
apply (rule subset_refl)
apply fastforce
apply clarsimp
apply (rule conjI, clarsimp)
apply (rule conjI, clarsimp simp: clear_um_def)
apply (simp add:detype_clear_um_independent)
apply (rule conjI,erule caps_no_overlap_detype[OF descendants_range_caps_no_overlapI])
apply (clarsimp simp:is_aligned_neg_mask_eq cte_wp_at_caps_of_state)
apply (simp add:empty_descendants_range_in)+
apply (rule conjI)
apply clarsimp
apply (drule_tac p = "(aa,ba)" in cap_refs_in_kernel_windowD2[OF caps_of_state_cteD])
apply fastforce
apply (clarsimp simp: region_in_kernel_window_def valid_cap_def
cap_aligned_def is_aligned_neg_mask_eq detype_def clear_um_def)
apply (rule conjI, rule pspace_no_overlap_subset,
rule pspace_no_overlap_detype[OF caps_of_state_valid])
apply (simp add:invs_psp_aligned invs_valid_objs is_aligned_neg_mask_eq)+
apply (clarsimp simp: detype_def clear_um_def detype_ext_def valid_sched_def valid_etcbs_def
st_tcb_at_kh_def obj_at_kh_def st_tcb_at_def obj_at_def is_etcb_at_def)
apply (simp add: detype_def clear_um_def)
apply (drule_tac x = "cte_map (aa,ba)" in pspace_relation_cte_wp_atI[OF state_relation_pspace_relation])
apply (simp add:invs_valid_objs)+
apply clarsimp
apply (drule cte_map_inj_eq)
apply ((fastforce simp:cte_wp_at_caps_of_state)+)[5]
apply (clarsimp simp:cte_wp_at_caps_of_state invs_valid_pspace' conj_comms cte_wp_at_ctes_of
valid_cap_simps')
apply (strengthen refl)
apply clarsimp
apply (frule empty_descendants_range_in')
apply (intro conjI,
simp_all add: is_simple_cap'_def isCap_simps descendants_range'_def2
null_filter_descendants_of'[OF null_filter_simp']
capAligned_def asid_low_bits_def)
apply (erule descendants_range_caps_no_overlapI')
apply (fastforce simp:cte_wp_at_ctes_of is_aligned_neg_mask_eq)
apply (simp add:empty_descendants_range_in')
apply (simp add:word_bits_def bit_simps)
apply (rule is_aligned_weaken)
apply (rule is_aligned_shiftl_self[unfolded shiftl_t2n,where p = 1,simplified])
apply (simp add:pageBits_def)
apply clarsimp
apply (drule(1) cte_cap_in_untyped_range)
apply (fastforce simp:cte_wp_at_ctes_of)
apply assumption+
apply fastforce
apply simp
apply clarsimp
apply (drule (1) cte_cap_in_untyped_range)
apply (fastforce simp add: cte_wp_at_ctes_of)
apply assumption+
apply (clarsimp simp: invs'_def valid_state'_def if_unsafe_then_cap'_def cte_wp_at_ctes_of)
apply fastforce
apply simp
done
(* FIXME x64: move *)
definition
ioport_data_relation :: "io_port_invocation_data \<Rightarrow> ioport_invocation_data \<Rightarrow> bool"
where
"ioport_data_relation d d' \<equiv> case d of
X64_A.IOPortIn8 \<Rightarrow> d' = IOPortIn8
| X64_A.IOPortIn16 \<Rightarrow> d' = IOPortIn16
| X64_A.IOPortIn32 \<Rightarrow> d' = IOPortIn32
| X64_A.IOPortOut8 w \<Rightarrow> d' = IOPortOut8 w
| X64_A.IOPortOut16 w \<Rightarrow> d' = IOPortOut16 w
| X64_A.IOPortOut32 w \<Rightarrow> d' = IOPortOut32 w"
definition
ioport_invocation_map :: "io_port_invocation \<Rightarrow> ioport_invocation \<Rightarrow> bool"
where
"ioport_invocation_map i i' \<equiv> case i of
X64_A.IOPortInvocation iop dat \<Rightarrow> \<exists>dat'. i' = IOPortInvocation iop dat' \<and> ioport_data_relation dat dat'"
definition
ioport_control_inv_relation :: "io_port_control_invocation \<Rightarrow> ioport_control_invocation \<Rightarrow> bool"
where
"ioport_control_inv_relation i i' \<equiv> case i of
IOPortControlInvocation f l slot slot' \<Rightarrow>
(i' = IOPortControlIssue f l (cte_map slot) (cte_map slot'))"
definition
ioport_control_inv_valid' :: "ioport_control_invocation \<Rightarrow> kernel_state \<Rightarrow> bool"
where
"ioport_control_inv_valid' i \<equiv> case i of
IOPortControlIssue f l ptr ptr' \<Rightarrow> (cte_wp_at' (\<lambda>cte. cteCap cte = NullCap) ptr and
cte_wp_at' (\<lambda>cte. cteCap cte = ArchObjectCap IOPortControlCap) ptr' and
ex_cte_cap_to' ptr and real_cte_at' ptr and
(\<lambda>s. {f..l} \<inter> issued_ioports' (ksArchState s) = {}) and K (f \<le> l))"
definition
archinv_relation :: "arch_invocation \<Rightarrow> Arch.invocation \<Rightarrow> bool"
where
"archinv_relation ai ai' \<equiv> case ai of
arch_invocation.InvokePageTable pti \<Rightarrow>
\<exists>pti'. ai' = InvokePageTable pti' \<and> page_table_invocation_map pti pti'
| arch_invocation.InvokePageDirectory pdi \<Rightarrow>
\<exists>pdi'. ai' = InvokePageDirectory pdi' \<and> page_directory_invocation_map pdi pdi'
| arch_invocation.InvokePDPT pdpti \<Rightarrow>
\<exists>pdpti'. ai' = InvokePDPT pdpti' \<and> pdpt_invocation_map pdpti pdpti'
| arch_invocation.InvokePage pgi \<Rightarrow>
\<exists>pgi'. ai' = InvokePage pgi' \<and> page_invocation_map pgi pgi'
| arch_invocation.InvokeASIDControl aci \<Rightarrow>
\<exists>aci'. ai' = InvokeASIDControl aci' \<and> aci' = asid_ci_map aci
| arch_invocation.InvokeASIDPool ap \<Rightarrow>
\<exists>ap'. ai' = InvokeASIDPool ap' \<and> ap' = asid_pool_invocation_map ap
| arch_invocation.InvokeIOPort iopi \<Rightarrow>
\<exists>iopi'. ai' = InvokeIOPort iopi' \<and> ioport_invocation_map iopi iopi'
| arch_invocation.InvokeIOPortControl iopci \<Rightarrow>
\<exists>iopci'. ai' = InvokeIOPortControl iopci' \<and> ioport_control_inv_relation iopci iopci'"
definition
valid_arch_inv' :: "Arch.invocation \<Rightarrow> kernel_state \<Rightarrow> bool"
where
"valid_arch_inv' ai \<equiv> case ai of
InvokePageTable pti \<Rightarrow> valid_pti' pti
| InvokePageDirectory pdi \<Rightarrow> valid_pdi' pdi
| InvokePDPT pdpti \<Rightarrow> valid_pdpti' pdpti
| InvokePage pgi \<Rightarrow> valid_page_inv' pgi
| InvokeASIDControl aci \<Rightarrow> valid_aci' aci
| InvokeASIDPool ap \<Rightarrow> valid_apinv' ap
| InvokeIOPort i \<Rightarrow> \<top>
| InvokeIOPortControl ic \<Rightarrow> ioport_control_inv_valid' ic"
lemma mask_vmrights_corres:
"maskVMRights (vmrights_map R) (rightsFromWord d) =
vmrights_map (mask_vm_rights R (data_to_rights d))"
by (clarsimp simp: rightsFromWord_def data_to_rights_def
vmrights_map_def Let_def maskVMRights_def
mask_vm_rights_def nth_ucast
validate_vm_rights_def vm_read_write_def
vm_kernel_only_def vm_read_only_def
split: bool.splits)
lemma vm_attributes_corres:
"vmattributes_map (attribs_from_word w) = attribsFromWord w"
by (clarsimp simp: attribsFromWord_def attribs_from_word_def
Let_def vmattributes_map_def)
lemma check_vp_corres:
"corres (ser \<oplus> dc) \<top> \<top>
(check_vp_alignment sz w)
(checkVPAlignment sz w)"
apply (simp add: check_vp_alignment_def checkVPAlignment_def)
apply (cases sz, simp_all add: corres_returnOk unlessE_whenE is_aligned_mask)
apply ((rule corres_guard_imp, rule corres_whenE, rule refl, auto)[1])+
done
lemma checkVP_wpR [wp]:
"\<lbrace>\<lambda>s. vmsz_aligned w sz \<longrightarrow> P () s\<rbrace>
checkVPAlignment sz w \<lbrace>P\<rbrace>, -"
apply (simp add: checkVPAlignment_def)
by (wpsimp wp: hoare_whenE_wp simp: is_aligned_mask vmsz_aligned_def)
lemma asidHighBits [simp]:
"asidHighBits = asid_high_bits"
by (simp add: asidHighBits_def asid_high_bits_def)
declare word_unat_power [symmetric, simp del]
crunch inv [wp]: "X64_H.decodeInvocation" "P"
(wp: crunch_wps mapME_x_inv_wp getASID_wp
simp: forME_x_def crunch_simps)
lemma case_option_corresE:
assumes nonec: "corres r Pn Qn (nc >>=E f) (nc' >>=E g)"
and somec: "\<And>v'. corres r (Ps v') (Qs v') (sc v' >>=E f) (sc' v' >>=E g)"
shows "corres r (case_option Pn Ps v) (case_option Qn Qs v) (case_option nc sc v >>=E f) (case_option nc' sc' v >>=E g)"
apply (cases v)
apply simp
apply (rule nonec)
apply simp
apply (rule somec)
done
lemma cap_relation_Untyped_eq:
"cap_relation c (UntypedCap d p sz f) = (c = cap.UntypedCap d p sz f)"
by (cases c) auto
declare check_vp_alignment_inv[wp del]
lemma select_ext_fa:
"free_asid_select asid_tbl \<in> S
\<Longrightarrow> ((select_ext (\<lambda>_. free_asid_select asid_tbl) S) :: (3 word) det_ext_monad)
= return (free_asid_select asid_tbl)"
by (simp add: select_ext_def get_def gets_def bind_def assert_def return_def fail_def)
lemma select_ext_fap:
"free_asid_pool_select p b \<in> S
\<Longrightarrow> ((select_ext (\<lambda>_. free_asid_pool_select p b) S) :: (9 word) det_ext_monad)
= return (free_asid_pool_select p b)"
by (simp add: select_ext_def get_def gets_def bind_def assert_def return_def)
lemma vs_refs_pages_ptI:
"pt x = pte \<Longrightarrow> pte_ref_pages pte = Some r'
\<Longrightarrow> (VSRef (ucast x) (Some APageTable), r') \<in> vs_refs_pages (ArchObj (PageTable pt))"
apply (simp add: vs_refs_pages_def)
apply (rule image_eqI[rotated], rule graph_ofI[where x=x], simp)
apply simp
done
lemmas vs_refs_pages_pt_smallI
= vs_refs_pages_ptI[where pte="X64_A.pte.SmallPagePTE x y z" for x y z,
unfolded pte_ref_pages_def, simplified, OF _ refl]
lemma vs_refs_pages_pdI:
"pd x = pde \<Longrightarrow> pde_ref_pages pde = Some r'
\<Longrightarrow> (VSRef (ucast x) (Some APageDirectory), r') \<in> vs_refs_pages (ArchObj (PageDirectory pd))"
apply (simp add: vs_refs_pages_def)
apply (rule image_eqI[rotated], rule graph_ofI[where x=x], simp)
apply simp
done
lemmas vs_refs_pages_pd_largeI
= vs_refs_pages_pdI[where pde="X64_A.pde.LargePagePDE x y z " for x y z ,
unfolded pde_ref_pages_def, simplified, OF _ refl]
lemma vs_refs_pages_pdptI:
"pdpt x = pdpte \<Longrightarrow> pdpte_ref_pages pdpte = Some r'
\<Longrightarrow> (VSRef (ucast x) (Some APDPointerTable), r') \<in> vs_refs_pages (ArchObj (PDPointerTable pdpt))"
apply (simp add: vs_refs_pages_def)
apply (rule image_eqI[rotated], rule graph_ofI[where x=x], simp)
apply simp
done
lemmas vs_refs_pages_pdpt_hugeI
= vs_refs_pages_pdptI[where pdpte="X64_A.pdpte.HugePagePDPTE x y z " for x y z ,
unfolded pdpte_ref_pages_def, simplified, OF _ refl]
lemma userVTop_conv[simp]: "userVTop = user_vtop"
by (simp add: userVTop_def user_vtop_def X64.pptrUserTop_def)
lemma find_vspace_for_asid_lookup_slot [wp]:
"\<lbrace>pspace_aligned and valid_vspace_objs\<rbrace>
find_vspace_for_asid asid
\<lbrace>\<lambda>rv. \<exists>\<rhd> (lookup_pml4_slot rv vptr && ~~ mask pml4_bits)\<rbrace>, -"
apply (rule hoare_pre)
apply (rule hoare_post_imp_R)
apply (rule hoare_vcg_R_conj)
apply (rule hoare_vcg_R_conj)
apply (rule find_vspace_for_asid_inv [where P="\<top>", THEN valid_validE_R])
apply (rule find_vspace_for_asid_lookup)
apply (rule find_vspace_for_asid_aligned_pm)
apply clarsimp
apply (subst lookup_pml4_slot_eq)
apply (auto simp: bit_simps)
done
lemmas vmsz_aligned_imp_aligned
= vmsz_aligned_def[THEN meta_eq_to_obj_eq, THEN iffD1, THEN is_aligned_weaken]
lemma corres_splitEE':
assumes "corres_underlying sr nf nf' (f \<oplus> r') P P' a c"
assumes "\<And>rv rv'. r' rv rv'
\<Longrightarrow> corres_underlying sr nf nf' (f \<oplus> r) (R rv) (R' rv') (b rv) (d rv')"
assumes "\<lbrace>Q\<rbrace> a \<lbrace>R\<rbrace>,\<lbrace>\<top>\<top>\<rbrace>" "\<lbrace>Q'\<rbrace> c \<lbrace>R'\<rbrace>,\<lbrace>\<top>\<top>\<rbrace>"
shows "corres_underlying sr nf nf' (f \<oplus> r) (P and Q) (P' and Q') (a >>=E (\<lambda>rv. b rv)) (c >>=E (\<lambda>rv'. d rv'))"
by (rule corres_splitEE; rule assms)
lemma decode_page_inv_corres:
"\<lbrakk>cap = arch_cap.PageCap d p R mt sz opt; acap_relation cap cap';
list_all2 cap_relation (map fst excaps) (map fst excaps');
list_all2 (\<lambda>s s'. s' = cte_map s) (map snd excaps) (map snd excaps') \<rbrakk> \<Longrightarrow>
corres (ser \<oplus> archinv_relation)
(invs and valid_cap (cap.ArchObjectCap cap) and
cte_wp_at ((=) (cap.ArchObjectCap cap)) slot and
(\<lambda>s. \<forall>x\<in>set excaps. s \<turnstile> fst x \<and> cte_wp_at (\<lambda>_. True) (snd x) s))
(invs' and valid_cap' (capability.ArchObjectCap cap') and
(\<lambda>s. \<forall>x\<in>set excaps'. valid_cap' (fst x) s \<and> cte_wp_at' (\<lambda>_. True) (snd x) s))
(decode_page_invocation l args slot cap excaps)
(decodeX64FrameInvocation l args (cte_map slot) cap' excaps')"
apply (simp add: decode_page_invocation_def decodeX64FrameInvocation_def Let_def isCap_simps
split del: if_split)
apply (cases "invocation_type l = ArchInvocationLabel X64PageMap")
apply (case_tac "\<not>(2 < length args \<and> excaps \<noteq> [])", clarsimp split: list.splits)
apply (simp add: Let_def neq_Nil_conv)
apply (elim exE conjE)
apply (rename_tac pm_cap pm_cap_cnode pm_cap_slot excaps_rest)
apply (clarsimp split: list.split, intro conjI impI allI; clarsimp)
apply (rename_tac vaddr rights_mask attr pd_cap' excaps'_rest args_rest)
apply (rule corres_guard_imp)
apply (rule_tac P="\<nexists>pm asid. pm_cap = cap.ArchObjectCap (arch_cap.PML4Cap pm (Some asid))"
in corres_symmetric_bool_cases[where Q=\<top> and Q'=\<top>, OF refl])
apply (case_tac pm_cap; clarsimp; rename_tac pm_acap pm_acap'; case_tac pm_acap; clarsimp)
apply (rule corres_splitEE'[where r'="(=)" and P=\<top> and P'=\<top>])
apply (clarsimp simp: corres_returnOkTT)
apply (rule_tac F="pm_cap = cap.ArchObjectCap (arch_cap.PML4Cap (fst rv) (Some (snd rv)))"
in corres_gen_asm)
apply (clarsimp cong: option.case_cong)
apply (rename_tac vspace asid)
apply wpfix \<comment> \<open>get asid and vspace parameters in schematic preconditions\<close>
apply (rule_tac P=
"(opt = None \<and> (user_vtop < vaddr \<or> user_vtop < vaddr + 2 ^ pageBitsForSize sz))
\<or> (\<exists>asid' vaddr'. opt = Some (asid', vaddr')
\<and> (asid' \<noteq> asid \<or> mt \<noteq> VMVSpaceMap \<or> vaddr' \<noteq> vaddr))"
in corres_symmetric_bool_cases[where Q=\<top> and Q'=\<top>, OF refl]; clarsimp)
apply (case_tac opt; clarsimp)
apply (case_tac "asid' \<noteq> asid"; clarsimp)
apply (case_tac "mt \<noteq> VMVSpaceMap"; clarsimp)
apply (rule corres_splitEE'[where r'=dc])
apply (case_tac opt; clarsimp simp: whenE_def)
apply (rule corres_returnOkTT, simp)
apply (rule corres_returnOkTT, simp)
apply (rule corres_splitEE'[OF corres_lookup_error[OF find_vspace_for_asid_corres]], simp)
apply (rule whenE_throwError_corres; simp)
apply (rule corres_splitEE'[where r'=dc, OF check_vp_corres])
apply (rule corres_splitEE'[OF create_mapping_entries_corres]
; simp add: mask_vmrights_corres vm_attributes_corres)
apply (rule corres_splitEE'[OF ensure_safe_mapping_corres], assumption)
apply (rule corres_returnOkTT)
\<comment> \<open>program split done, now prove resulting preconditions and Hoare triples\<close>
apply (simp add: archinv_relation_def page_invocation_map_def)
apply wpsimp+
apply (wp createMappingEntries_wf)
apply wpsimp+
apply (wp find_vspace_for_asid_wp)
apply (wp findVSpaceForASID_vs_at_wp)
apply wpsimp
apply wpsimp
apply wpsimp
apply wpsimp
apply (clarsimp simp: cte_wp_at_caps_of_state cong: conj_cong)
apply (rename_tac pm_ptr asid')
apply (prop_tac "is_aligned pm_ptr pml4_bits")
apply (clarsimp simp: valid_cap_simps cap_aligned_def pml4_bits_def)
apply (clarsimp simp: invs_implies)
apply (case_tac "asid' = asid"; clarsimp)
apply (prop_tac "0 < asid \<and> asid_wf asid \<and> asid \<noteq> 0", clarsimp simp: valid_cap_simps)
apply clarsimp
apply (prop_tac "vspace_at_asid asid pm_ptr s \<longrightarrow> (\<exists>ref. (ref \<rhd> pm_ptr) s)")
apply (fastforce simp: vspace_at_asid_def)
apply (case_tac opt; clarsimp simp: valid_cap_simps)
using aligned_sum_le_user_vtop le_user_vtop_canonical_address le_user_vtop_less_pptr_base word_not_le
apply blast
apply fastforce
\<comment> \<open>PageUnmap\<close>
apply (simp split del: if_split)
apply (cases "invocation_type l = ArchInvocationLabel X64PageUnmap")
apply simp
apply (rule corres_returnOk)
apply (clarsimp simp: archinv_relation_def page_invocation_map_def)
\<comment> \<open>PageGetAddress\<close>
apply (cases "invocation_type l = ArchInvocationLabel X64PageGetAddress")
apply simp
apply (rule corres_returnOk)
apply (clarsimp simp: archinv_relation_def page_invocation_map_def)
by (clarsimp split: invocation_label.splits arch_invocation_label.splits split del: if_split)
lemma VMReadWrite_vmrights_map[simp]: "vmrights_map vm_read_write = VMReadWrite"
by (simp add: vmrights_map_def vm_read_write_def)
lemma decode_page_table_inv_corres:
"\<lbrakk>cap = arch_cap.PageTableCap p opt; acap_relation cap cap';
list_all2 cap_relation (map fst excaps) (map fst excaps');
list_all2 (\<lambda>s s'. s' = cte_map s) (map snd excaps) (map snd excaps') \<rbrakk> \<Longrightarrow>
corres (ser \<oplus> archinv_relation)
(invs and valid_cap (cap.ArchObjectCap cap) and
cte_wp_at ((=) (cap.ArchObjectCap cap)) slot and
(\<lambda>s. \<forall>x\<in>set excaps. s \<turnstile> fst x \<and> cte_wp_at (\<lambda>_. True) (snd x) s))
(invs' and valid_cap' (capability.ArchObjectCap cap') and
(\<lambda>s. \<forall>x\<in>set excaps'. valid_cap' (fst x) s \<and> cte_wp_at' (\<lambda>_. True) (snd x) s))
(decode_page_table_invocation l args slot cap excaps)
(decodeX64PageTableInvocation l args (cte_map slot) cap' excaps')"
apply (simp add: decode_page_table_invocation_def decodeX64PageTableInvocation_def Let_def
isCap_simps
split del: if_split)
apply (cases "invocation_type l = ArchInvocationLabel X64PageTableMap")
apply (simp split: invocation_label.split arch_invocation_label.splits split del: if_split)
apply (simp split: list.split, intro conjI impI allI, simp_all)[1]
apply (clarsimp simp: neq_Nil_conv Let_def)
apply (rule whenE_throwError_corres_initial, simp+)
apply (simp split: cap.split arch_cap.split option.split,
intro conjI allI impI, simp_all)[1]
apply (rule whenE_throwError_corres_initial, simp+)
apply (rule corres_guard_imp)
apply (rule corres_splitEE)
prefer 2
apply (rule corres_lookup_error)
apply (rule find_vspace_for_asid_corres[OF refl])
apply (rule whenE_throwError_corres, simp, simp)
apply (rule corres_splitEE)
prefer 2
apply (rule corres_lookup_error)
apply (rule lookup_pd_slot_corres)
apply (rule corres_splitEE)
prefer 2
apply simp
apply (rule get_pde_corres')
apply (simp add: unlessE_whenE)
apply (rule corres_splitEE)
prefer 2
apply (rule corres_whenE)
apply clarsimp
apply (case_tac old_pde; simp )[1]
apply (rule corres_trivial)
apply simp
apply simp
apply (rule corres_trivial)
apply (rule corres_returnOk)
apply (clarsimp simp: archinv_relation_def page_table_invocation_map_def)
apply (clarsimp simp: attribs_from_word_def filter_frame_attrs_def
attribsFromWord_def Let_def)
apply ((clarsimp cong: if_cong
| wp hoare_whenE_wp hoare_vcg_all_lift_R getPDE_wp get_pde_wp
| wp (once) hoare_drop_imps)+)[6]
apply (clarsimp intro!: validE_R_validE)
apply (rule_tac Q'="\<lambda>rv s. pspace_aligned s \<and> valid_vspace_objs s \<and> valid_arch_state s \<and>
equal_kernel_mappings s \<and> valid_global_objs s \<and>
(\<exists>ref. (ref \<rhd> rv) s) \<and> typ_at (AArch APageMapL4) rv s \<and>
is_aligned rv pml4_bits"
in hoare_post_imp_R[rotated])
apply fastforce
apply (wpsimp | wp (once) hoare_drop_imps)+
apply (fastforce simp: valid_cap_def mask_def)
apply (clarsimp simp: valid_cap'_def)
apply fastforce
\<comment> \<open>PageTableUnmap\<close>
apply (clarsimp simp: isCap_simps)+
apply (cases "invocation_type l = ArchInvocationLabel X64PageTableUnmap")
apply (clarsimp simp: unlessE_whenE liftE_bindE)
apply (rule stronger_corres_guard_imp)
apply (rule corres_symb_exec_r_conj)
apply (rule_tac F="isArchCap isPageTableCap (cteCap cteVal)"
in corres_gen_asm2)
apply (rule corres_split[OF _ final_cap_corres[where ptr=slot]])
apply (drule mp)
apply (clarsimp simp: isCap_simps final_matters'_def)
apply (rule whenE_throwError_corres)
apply simp
apply simp
apply (rule corres_trivial, simp add: returnOk_def archinv_relation_def
page_table_invocation_map_def)
apply (wp getCTE_wp' | wp (once) hoare_drop_imps)+
apply (clarsimp)
apply (rule no_fail_pre, rule no_fail_getCTE)
apply (erule conjunct2)
apply (clarsimp simp: cte_wp_at_caps_of_state cap_rights_update_def acap_rights_update_def)
apply (clarsimp simp add: cap_rights_update_def acap_rights_update_def)
apply (clarsimp simp: cte_wp_at_ctes_of cap_rights_update_def acap_rights_update_def
cte_wp_at_caps_of_state)
apply (drule pspace_relation_ctes_ofI[OF _ caps_of_state_cteD, rotated],
erule invs_pspace_aligned', clarsimp+)
apply (simp add: isCap_simps)
apply (simp add: isCap_simps split del: if_split)
by (clarsimp split: invocation_label.splits arch_invocation_label.splits)
lemma decode_page_directory_inv_corres:
"\<lbrakk>cap = arch_cap.PageDirectoryCap p opt; acap_relation cap cap';
list_all2 cap_relation (map fst excaps) (map fst excaps');
list_all2 (\<lambda>s s'. s' = cte_map s) (map snd excaps) (map snd excaps') \<rbrakk> \<Longrightarrow>
corres (ser \<oplus> archinv_relation)
(invs and valid_cap (cap.ArchObjectCap cap) and
cte_wp_at ((=) (cap.ArchObjectCap cap)) slot and
(\<lambda>s. \<forall>x\<in>set excaps. s \<turnstile> fst x \<and> cte_wp_at (\<lambda>_. True) (snd x) s))
(invs' and valid_cap' (capability.ArchObjectCap cap') and
(\<lambda>s. \<forall>x\<in>set excaps'. valid_cap' (fst x) s \<and> cte_wp_at' (\<lambda>_. True) (snd x) s))
(decode_page_directory_invocation l args slot cap excaps)
(decodeX64PageDirectoryInvocation l args (cte_map slot) cap' excaps')"
apply (simp add: decode_page_directory_invocation_def decodeX64PageDirectoryInvocation_def Let_def
isCap_simps
split del: if_split)
apply (cases "invocation_type l = ArchInvocationLabel X64PageDirectoryMap")
apply (simp split: invocation_label.split arch_invocation_label.splits split del: if_split)
apply (simp split: list.split, intro conjI impI allI, simp_all)[1]
apply (clarsimp simp: neq_Nil_conv Let_def)
apply (rule whenE_throwError_corres_initial, simp+)
apply (simp split: cap.split arch_cap.split option.split,
intro conjI allI impI, simp_all)[1]
apply (rule whenE_throwError_corres_initial, simp+)
apply (rule corres_guard_imp)
apply (rule corres_splitEE)
prefer 2
apply (rule corres_lookup_error)
apply (rule find_vspace_for_asid_corres[OF refl])
apply (rule whenE_throwError_corres, simp, simp)
apply (rule corres_splitEE)
prefer 2
apply (rule corres_lookup_error)
apply (rule lookup_pdpt_slot_corres)
apply (rule corres_splitEE)
prefer 2
apply simp
apply (rule get_pdpte_corres')
apply (simp add: unlessE_whenE)
apply (rule corres_splitEE)
prefer 2
apply (rule corres_whenE)
apply clarsimp
apply (case_tac old_pdpte; simp )[1]
apply (rule corres_trivial)
apply simp
apply simp
apply (rule corres_trivial)
apply (rule corres_returnOk)
apply (clarsimp simp: archinv_relation_def page_directory_invocation_map_def)
apply (clarsimp simp: attribs_from_word_def filter_frame_attrs_def
attribsFromWord_def Let_def)
apply ((clarsimp cong: if_cong
| wp hoare_whenE_wp hoare_vcg_all_lift_R getPDPTE_wp get_pdpte_wp
| wp (once) hoare_drop_imps)+)[6]
apply (clarsimp intro!: validE_R_validE)
apply (rule_tac Q'="\<lambda>rv s. pspace_aligned s \<and> valid_vspace_objs s \<and> valid_arch_state s \<and>
equal_kernel_mappings s \<and> valid_global_objs s \<and>
(\<exists>ref. (ref \<rhd> rv) s) \<and> typ_at (AArch APageMapL4) rv s \<and>
is_aligned rv pml4_bits"
in hoare_post_imp_R[rotated])
apply fastforce
apply (wpsimp | wp (once) hoare_drop_imps)+
apply (fastforce simp: valid_cap_def mask_def)
apply (clarsimp simp: valid_cap'_def)
apply fastforce
\<comment> \<open>PageDirectoryUnmap\<close>
apply (clarsimp simp: isCap_simps)+
apply (cases "invocation_type l = ArchInvocationLabel X64PageDirectoryUnmap")
apply (clarsimp simp: unlessE_whenE liftE_bindE)
apply (rule stronger_corres_guard_imp)
apply (rule corres_symb_exec_r_conj)
apply (rule_tac F="isArchCap isPageDirectoryCap (cteCap cteVal)"
in corres_gen_asm2)
apply (rule corres_split[OF _ final_cap_corres[where ptr=slot]])
apply (drule mp)
apply (clarsimp simp: isCap_simps final_matters'_def)
apply (rule whenE_throwError_corres)
apply simp
apply simp
apply (rule corres_trivial, simp add: returnOk_def archinv_relation_def
page_directory_invocation_map_def)
apply (wp getCTE_wp' | wp (once) hoare_drop_imps)+
apply (clarsimp)
apply (rule no_fail_pre, rule no_fail_getCTE)
apply (erule conjunct2)
apply (clarsimp simp: cte_wp_at_caps_of_state cap_rights_update_def acap_rights_update_def)
apply (clarsimp simp add: cap_rights_update_def acap_rights_update_def)
apply (clarsimp simp: cte_wp_at_ctes_of cap_rights_update_def acap_rights_update_def
cte_wp_at_caps_of_state)
apply (drule pspace_relation_ctes_ofI[OF _ caps_of_state_cteD, rotated],
erule invs_pspace_aligned', clarsimp+)
apply (simp add: isCap_simps)
apply (simp add: isCap_simps split del: if_split)
by (clarsimp split: invocation_label.splits arch_invocation_label.splits)
lemma decode_pdpt_inv_corres:
"\<lbrakk>cap = arch_cap.PDPointerTableCap p opt; acap_relation cap cap';
list_all2 cap_relation (map fst excaps) (map fst excaps');
list_all2 (\<lambda>s s'. s' = cte_map s) (map snd excaps) (map snd excaps') \<rbrakk> \<Longrightarrow>
corres (ser \<oplus> archinv_relation)
(invs and valid_cap (cap.ArchObjectCap cap) and
cte_wp_at ((=) (cap.ArchObjectCap cap)) slot and
(\<lambda>s. \<forall>x\<in>set excaps. s \<turnstile> fst x \<and> cte_wp_at (\<lambda>_. True) (snd x) s))
(invs' and valid_cap' (capability.ArchObjectCap cap') and
(\<lambda>s. \<forall>x\<in>set excaps'. valid_cap' (fst x) s \<and> cte_wp_at' (\<lambda>_. True) (snd x) s))
(decode_pdpt_invocation l args slot cap excaps)
(decodeX64PDPointerTableInvocation l args (cte_map slot) cap' excaps')"
apply (simp add: decode_pdpt_invocation_def decodeX64PDPointerTableInvocation_def Let_def
isCap_simps
split del: if_split)
apply (cases "invocation_type l = ArchInvocationLabel X64PDPTMap")
apply (simp split: invocation_label.split arch_invocation_label.splits split del: if_split)
apply (simp split: list.split, intro conjI impI allI, simp_all)[1]
apply (clarsimp simp: neq_Nil_conv Let_def)
apply (rule whenE_throwError_corres_initial, simp+)
apply (simp split: cap.split arch_cap.split option.split,
intro conjI allI impI, simp_all)[1]
apply (rule whenE_throwError_corres_initial, simp+)
apply (rule corres_guard_imp)
apply (rule corres_splitEE)
prefer 2
apply (rule corres_lookup_error)
apply (rule find_vspace_for_asid_corres[OF refl])
apply (rule whenE_throwError_corres, simp, simp)
apply (rule corres_splitEE)
prefer 2
apply simp
apply (rule get_pml4e_corres')
apply (simp add: unlessE_whenE)
apply (rule corres_splitEE)
prefer 2
apply (rule corres_whenE)
apply clarsimp
apply (case_tac old_pml4e; simp )[1]
apply (rule corres_trivial)
apply simp
apply simp
apply (rule corres_trivial)
apply (rule corres_returnOk)
apply (clarsimp simp: archinv_relation_def pdpt_invocation_map_def)
apply (clarsimp simp: attribs_from_word_def filter_frame_attrs_def
attribsFromWord_def Let_def)
apply ((clarsimp cong: if_cong
| wp hoare_whenE_wp hoare_vcg_all_lift_R getPML4E_wp get_pml4e_wp
| wp (once) hoare_drop_imps)+)
apply (fastforce simp: valid_cap_def mask_def intro!: page_map_l4_pml4e_at_lookupI)
apply (clarsimp simp: valid_cap'_def)
apply fastforce
\<comment> \<open>PDPTUnmap\<close>
apply (clarsimp simp: isCap_simps)+
apply (cases "invocation_type l = ArchInvocationLabel X64PDPTUnmap")
apply (clarsimp simp: unlessE_whenE liftE_bindE)
apply (rule stronger_corres_guard_imp)
apply (rule corres_symb_exec_r_conj)
apply (rule_tac F="isArchCap isPDPointerTableCap (cteCap cteVal)"
in corres_gen_asm2)
apply (rule corres_split[OF _ final_cap_corres[where ptr=slot]])
apply (drule mp)
apply (clarsimp simp: isCap_simps final_matters'_def)
apply (rule whenE_throwError_corres)
apply simp
apply simp
apply (rule corres_trivial, simp add: returnOk_def archinv_relation_def
pdpt_invocation_map_def)
apply (wp getCTE_wp' | wp (once) hoare_drop_imps)+
apply (clarsimp)
apply (rule no_fail_pre, rule no_fail_getCTE)
apply (erule conjunct2)
apply (clarsimp simp: cte_wp_at_caps_of_state cap_rights_update_def acap_rights_update_def)
apply (clarsimp simp: cte_wp_at_ctes_of cap_rights_update_def acap_rights_update_def
cte_wp_at_caps_of_state)
apply (drule pspace_relation_ctes_ofI[OF _ caps_of_state_cteD, rotated],
erule invs_pspace_aligned', clarsimp+)
apply (simp add: isCap_simps)
apply (simp add: isCap_simps split del: if_split)
by (clarsimp split: invocation_label.splits arch_invocation_label.splits)
lemma ensure_port_op_allowed_corres:
"\<lbrakk>cap = arch_cap.IOPortCap f l; acap_relation cap cap'\<rbrakk> \<Longrightarrow>
corres (ser \<oplus> dc) (valid_cap (cap.ArchObjectCap cap) and K (w \<le> 0xFFFF \<and> (x = 1\<or> x = 2 \<or> x = 4))) (valid_cap' (ArchObjectCap cap'))
(ensure_port_operation_allowed cap w x)
(ensurePortOperationAllowed cap' w x)"
apply (simp add: ensure_port_operation_allowed_def ensurePortOperationAllowed_def)
apply (rule corres_gen_asm)
apply (rule corres_guard_imp)
apply (rule corres_split_eqrE)
apply (rule corres_split_eqrE)
apply (rule corres_whenE, simp)
apply clarsimp
apply clarsimp
apply (rule corres_assertE_assume)
apply (rule impI, assumption)+
apply wp+
apply (rule corres_assertE_assume; (rule impI, assumption))
apply wp+
apply (clarsimp simp: valid_cap_def; elim disjE; clarsimp)
apply (subst add.commute, subst no_olen_add, simp add: word_le_def)+
apply (clarsimp simp: valid_cap'_def; elim disjE; clarsimp)
apply (subst add.commute, subst no_olen_add, simp add: word_le_def)+
done
lemma ucast_ucast_ioport_max [simp]:
"UCAST(16 \<rightarrow> 32) (UCAST(64 \<rightarrow> 16) y) \<le> 0xFFFF"
by word_bitwise
lemma decode_port_inv_corres:
"\<lbrakk>cap = arch_cap.IOPortCap f l; acap_relation cap cap' \<rbrakk> \<Longrightarrow>
corres (ser \<oplus> archinv_relation)
(invs and valid_cap (cap.ArchObjectCap cap))
(invs' and valid_cap' (capability.ArchObjectCap cap'))
(decode_port_invocation label args cap)
(decodeX64PortInvocation label args slot cap' extraCaps')"
apply (simp add: decode_port_invocation_def decodeX64PortInvocation_def)
apply (cases "invocation_type label = ArchInvocationLabel X64IOPortIn8")
apply (simp add: Let_def isCap_simps whenE_def)
apply (intro conjI impI)
apply (clarsimp simp: neq_Nil_conv)
apply (rule corres_guard_imp)
apply (rule corres_split_norE)
apply (rule corres_returnOkTT)
apply (clarsimp simp: archinv_relation_def ioport_invocation_map_def ioport_data_relation_def)
apply (rule ensure_port_op_allowed_corres, simp, simp)
apply wpsimp+
apply (cases "invocation_type label = ArchInvocationLabel X64IOPortIn16")
apply (simp add: Let_def isCap_simps whenE_def)
apply (intro conjI impI)
apply (clarsimp simp: neq_Nil_conv)
apply (rule corres_guard_imp)
apply (rule corres_split_norE)
apply (rule corres_returnOkTT)
apply (clarsimp simp: archinv_relation_def ioport_invocation_map_def ioport_data_relation_def)
apply (rule ensure_port_op_allowed_corres, simp, simp)
apply wpsimp+
apply (cases "invocation_type label = ArchInvocationLabel X64IOPortIn32")
apply (simp add: Let_def isCap_simps whenE_def)
apply (intro conjI impI)
apply (clarsimp simp: neq_Nil_conv)
apply (rule corres_guard_imp)
apply (rule corres_split_norE)
apply (rule corres_returnOkTT)
apply (clarsimp simp: archinv_relation_def ioport_invocation_map_def ioport_data_relation_def)
apply (rule ensure_port_op_allowed_corres, simp, simp)
apply wpsimp+
apply (cases "invocation_type label = ArchInvocationLabel X64IOPortOut8")
apply (simp add: Let_def isCap_simps whenE_def)
apply (clarsimp simp: neq_Nil_conv split: list.splits)+
apply (rule corres_guard_imp)
apply (rule corres_split_norE)
apply (rule corres_returnOkTT)
apply (clarsimp simp: archinv_relation_def ioport_invocation_map_def ioport_data_relation_def)
apply (rule ensure_port_op_allowed_corres, simp, simp)
apply wpsimp+
apply (cases "invocation_type label = ArchInvocationLabel X64IOPortOut16")
apply (simp add: Let_def isCap_simps whenE_def)
apply (clarsimp simp: neq_Nil_conv split: list.splits)+
apply (rule corres_guard_imp)
apply (rule corres_split_norE)
apply (rule corres_returnOkTT)
apply (clarsimp simp: archinv_relation_def ioport_invocation_map_def ioport_data_relation_def)
apply (rule ensure_port_op_allowed_corres, simp, simp)
apply wpsimp+
apply (cases "invocation_type label = ArchInvocationLabel X64IOPortOut32")
apply (simp add: Let_def isCap_simps whenE_def)
apply (clarsimp simp: neq_Nil_conv split: list.splits)+
apply (rule corres_guard_imp)
apply (rule corres_split_norE)
apply (rule corres_returnOkTT)
apply (clarsimp simp: archinv_relation_def ioport_invocation_map_def ioport_data_relation_def)
apply (rule ensure_port_op_allowed_corres, simp, simp)
apply wpsimp+
apply (clarsimp simp: isCap_simps Let_def split: arch_invocation_label.splits invocation_label.splits)
done
lemma free_range_corres:
"(\<not> foldl (\<lambda>x y. x \<or> f y) False (ls::'a::len word list)) = (set ls \<inter> Collect f = {})"
apply (subst foldl_fun_or_alt)
apply (fold orList_def)
apply (simp only: orList_False)
by auto
lemma is_ioport_range_free_corres:
"f \<le> l \<Longrightarrow> corres (=) \<top> \<top>
(is_ioport_range_free f l)
(isIOPortRangeFree f l)"
apply (clarsimp simp: is_ioport_range_free_def isIOPortRangeFree_def)
apply (rule corres_guard_imp)
apply (rule corres_split_eqr[OF _ corres_gets_allocated_io_ports])
apply (rule corres_return_eq_same)
apply (auto simp: free_range_corres)
done
lemma isIOPortRangeFree_wp:
"\<lbrace>\<lambda>s. \<forall>rv. (rv \<longrightarrow> {f..l} \<inter> issued_ioports' (ksArchState s) = {}) \<longrightarrow> Q rv s\<rbrace> isIOPortRangeFree f l \<lbrace>Q\<rbrace>"
apply (wpsimp simp: isIOPortRangeFree_def)
apply (subst free_range_corres)
apply (clarsimp simp: issued_ioports'_def)
by (simp add: disjoint_iff_not_equal)
lemma decode_ioport_control_inv_corres:
"\<lbrakk>list_all2 cap_relation caps caps'; cap = arch_cap.IOPortControlCap; acap_relation cap cap'\<rbrakk> \<Longrightarrow>
corres (ser \<oplus> archinv_relation)
(invs and (\<lambda>s. \<forall>cp \<in> set caps. s \<turnstile> cp))
(invs' and (\<lambda>s. \<forall>cp \<in> set caps'. s \<turnstile>' cp))
(decode_ioport_control_invocation label args slot cap caps)
(decodeX64PortInvocation label args (cte_map slot) cap' caps')"
supply if_splits[split del]
apply (clarsimp simp: decode_ioport_control_invocation_def X64_H.decodeX64PortInvocation_def Let_def)
apply (cases "invocation_type label = ArchInvocationLabel X64IOPortControlIssue")
apply (clarsimp split: if_splits simp: isCap_simps)
apply (rule conjI, clarsimp split del: if_splits)
prefer 2
apply clarsimp
apply (cases caps, simp)
apply (auto split: arch_invocation_label.splits list.splits invocation_label.splits
simp: length_Suc_conv list_all2_Cons1 whenE_rangeCheck_eq liftE_bindE split_def)[2]
apply (cases caps, simp split: list.split)
apply (case_tac "\<exists>n. length args = Suc (Suc (Suc (Suc n)))",
clarsimp simp: length_Suc_conv list_all2_Cons1 whenE_rangeCheck_eq
liftE_bindE split_def)
prefer 2 apply (auto split: list.split)[1]
apply (clarsimp simp: Let_def)
apply (rule corres_guard_imp)
apply (rule whenE_throwError_corres)
apply clarsimp
apply clarsimp
apply (rule corres_split_eqr[OF _ is_ioport_range_free_corres])
apply (clarsimp simp: unlessE_whenE)
apply (rule whenE_throwError_corres)
apply clarsimp
apply clarsimp
apply (clarsimp simp: lookupTargetSlot_def)
apply (rule corres_splitEE[OF _ lsfc_corres])
apply (rule corres_splitEE[OF _ ensure_empty_corres])
apply (rule corres_returnOkTT)
apply (clarsimp simp: archinv_relation_def ioport_control_inv_relation_def)
apply clarsimp
apply wp
apply wp
apply (clarsimp simp: cap_relation_def)
apply clarsimp
apply wpsimp
apply wpsimp
apply (clarsimp simp: word_le_not_less)
apply clarsimp
apply (wpsimp wp: is_ioport_range_free_wp)
apply clarsimp
apply (wpsimp wp: isIOPortRangeFree_wp)
apply (clarsimp simp: invs_valid_objs)
apply (clarsimp simp: invs_valid_objs' invs_pspace_aligned')
apply (clarsimp simp: isCap_simps split: invocation_label.splits arch_invocation_label.splits)
done
lemma dec_arch_inv_corres:
notes check_vp_inv[wp del] check_vp_wpR[wp]
(* FIXME: check_vp_inv shadowed check_vp_wpR. Instead,
check_vp_wpR should probably be generalised to replace check_vp_inv. *)
shows
"\<lbrakk> acap_relation arch_cap arch_cap';
list_all2 cap_relation (map fst excaps) (map fst excaps');
list_all2 (\<lambda>s s'. s' = cte_map s) (map snd excaps) (map snd excaps') \<rbrakk> \<Longrightarrow>
corres
(ser \<oplus> archinv_relation)
(invs and valid_cap (cap.ArchObjectCap arch_cap) and
cte_wp_at ((=) (cap.ArchObjectCap arch_cap)) slot and
(\<lambda>s. \<forall>x\<in>set excaps. s \<turnstile> fst x \<and> cte_at (snd x) s))
(invs' and valid_cap' (capability.ArchObjectCap arch_cap') and
(\<lambda>s. \<forall>x\<in>set excaps'. s \<turnstile>' fst x \<and> cte_at' (snd x) s))
(arch_decode_invocation (mi_label mi) args (to_bl cptr') slot
arch_cap excaps)
(Arch.decodeInvocation (mi_label mi) args cptr'
(cte_map slot) arch_cap' excaps')"
apply (simp add: arch_decode_invocation_def
X64_H.decodeInvocation_def
decodeX64MMUInvocation_def
split del: if_split)
apply (cases arch_cap)
\<comment> \<open>ASIDPoolCap\<close>
apply (simp add: isCap_simps isIOCap_def decodeX64MMUInvocation_def
decodeX64ASIDPoolInvocation_def Let_def
split del: if_split)
apply (cases "invocation_type (mi_label mi) \<noteq> ArchInvocationLabel X64ASIDPoolAssign")
apply (simp split: invocation_label.split arch_invocation_label.split)
apply (rename_tac word1 word2)
apply (cases "excaps", simp)
apply (cases "excaps'", simp)
apply clarsimp
apply (case_tac a, simp_all)[1]
apply (rename_tac arch_capa)
apply (case_tac arch_capa, simp_all)[1]
apply (rename_tac word3 option)
apply (case_tac option, simp_all)[1]
apply (rule corres_guard_imp)
apply (rule corres_splitEE)
prefer 2
apply (rule corres_trivial [where r="ser \<oplus> (\<lambda>p p'. p = p' o ucast)"])
apply (clarsimp simp: state_relation_def arch_state_relation_def)
apply (rule whenE_throwError_corres, simp)
apply (simp add: lookup_failure_map_def)
apply simp
apply (rule_tac P="\<lambda>s. asid_table (asid_high_bits_of word2) = Some word1 \<longrightarrow> asid_pool_at word1 s" and
P'="pspace_aligned' and pspace_distinct'" in corres_inst)
apply (simp add: liftME_return)
apply (rule whenE_throwError_corres_initial, simp)
apply auto[1]
apply (rule corres_guard_imp)
apply (rule corres_splitEE)
prefer 2
apply simp
apply (rule get_asid_pool_corres_inv'[OF refl])
apply (simp add: bindE_assoc)
apply (rule corres_splitEE)
prefer 2
apply (rule corres_whenE)
apply (subst conj_assoc [symmetric])
apply (subst assocs_empty_dom_comp [symmetric])
apply (rule dom_ucast_eq)
apply (rule corres_trivial)
apply simp
apply simp
apply (rule_tac F="- dom pool \<inter> {x. ucast x + word2 \<noteq> 0} \<noteq> {}" in corres_gen_asm)
apply (frule dom_hd_assocsD)
apply (simp add: select_ext_fap[simplified free_asid_pool_select_def]
free_asid_pool_select_def)
apply (simp add: returnOk_liftE[symmetric])
apply (rule corres_returnOk)
apply (simp add: archinv_relation_def asid_pool_invocation_map_def)
apply (rule hoare_pre, wp hoare_whenE_wp)
apply (clarsimp simp: ucast_fst_hd_assocs)
apply (wp hoareE_TrueI hoare_whenE_wp getASID_wp | simp)+
apply ((clarsimp simp: p2_low_bits_max | rule TrueI impI)+)[2]
apply (wp hoare_whenE_wp getASID_wp)+
apply (clarsimp simp: valid_cap_def)
apply auto[1]
\<comment> \<open>ASIDControlCap\<close>
apply (simp add: isCap_simps isIOCap_def decodeX64MMUInvocation_def
Let_def decodeX64ASIDControlInvocation_def
split del: if_split)
apply (cases "invocation_type (mi_label mi) \<noteq> ArchInvocationLabel X64ASIDControlMakePool")
apply (simp split: invocation_label.split arch_invocation_label.split)
apply (subgoal_tac "length excaps' = length excaps")
prefer 2
apply (simp add: list_all2_iff)
apply (cases args, simp)
apply (rename_tac a0 as)
apply (case_tac as, simp)
apply (rename_tac a1 as')
apply (cases excaps, simp)
apply (rename_tac excap0 exs)
apply (case_tac exs)
apply (auto split: list.split)[1]
apply (rename_tac excap1 exss)
apply (case_tac excap0)
apply (rename_tac c0 slot0)
apply (case_tac excap1)
apply (rename_tac c1 slot1)
apply (clarsimp simp: Let_def split del: if_split)
apply (cases excaps', simp)
apply (case_tac list, simp)
apply (rename_tac c0' exs' c1' exss')
apply (clarsimp split del: if_split)
apply (rule corres_guard_imp)
apply (rule corres_splitEE [where r'="\<lambda>p p'. p = p' o ucast"])
prefer 2
apply (rule corres_trivial)
apply (clarsimp simp: state_relation_def arch_state_relation_def)
apply (rule corres_splitEE)
prefer 2
apply (rule corres_whenE)
apply (subst assocs_empty_dom_comp [symmetric])
apply (simp add: o_def)
apply (rule dom_ucast_eq_8)
apply (rule corres_trivial, simp, simp)
apply (simp split del: if_split)
apply (rule_tac F="- dom (asidTable \<circ> ucast) \<inter> {x. x \<le> 2 ^ asid_high_bits - 1} \<noteq> {}" in corres_gen_asm)
apply (drule dom_hd_assocsD)
apply (simp add: select_ext_fa[simplified free_asid_select_def]
free_asid_select_def o_def returnOk_liftE[symmetric] split del: if_split)
apply (thin_tac "fst a \<notin> b \<and> P" for a b P)
apply (case_tac "isUntypedCap a \<and> capBlockSize a = objBits (makeObject::asidpool) \<and> \<not> capIsDevice a")
prefer 2
apply (rule corres_guard_imp)
apply (rule corres_trivial)
apply (case_tac ad, simp_all add: isCap_simps
split del: if_split)[1]
apply (case_tac x21, simp_all split del: if_split)[1]
apply (clarsimp simp: objBits_simps archObjSize_def
split del: if_split)
apply clarsimp
apply (rule TrueI)+
apply (clarsimp simp: isCap_simps cap_relation_Untyped_eq lookupTargetSlot_def
objBits_simps archObjSize_def bindE_assoc split_def)
apply (rule corres_splitEE)
prefer 2
apply (rule ensure_no_children_corres, rule refl)
apply (rule corres_splitEE)
prefer 2
apply (erule lsfc_corres, rule refl)
apply (rule corres_splitEE)
prefer 2
apply (rule ensure_empty_corres)
apply clarsimp
apply (rule corres_returnOk[where P="\<top>"])
apply (clarsimp simp add: archinv_relation_def asid_ci_map_def split_def)
apply (clarsimp simp add: ucast_assocs[unfolded o_def] split_def
filter_map asid_high_bits_def)
apply (simp add: ord_le_eq_trans [OF word_n1_ge])
apply (wp hoare_drop_imps)+
apply (simp add: o_def validE_R_def)
apply (fastforce simp: asid_high_bits_def)
apply clarsimp
apply (simp add: null_def split_def asid_high_bits_def
word_le_make_less)
apply (subst hd_map, assumption)
(* need abstract guard to show list nonempty *)
apply (simp add: word_le_make_less)
apply (subst ucast_ucast_len)
apply (drule hd_in_set)
apply simp
apply fastforce
\<comment> \<open>IOPortCap\<close>
apply (simp add: isCap_simps isIOCap_def Let_def split del: if_split)
apply (rule corres_guard_imp, rule decode_port_inv_corres; simp)
\<comment> \<open>IOPortControlCap\<close>
apply (simp add: isCap_simps isIOCap_def Let_def split del: if_split)
apply (rule corres_guard_imp, rule decode_ioport_control_inv_corres; simp)
\<comment> \<open>PageCap\<close>
apply (rename_tac word cap_rights vmpage_size option)
apply (simp add: isCap_simps isIOCap_def decodeX64MMUInvocation_def Let_def
split del: if_split)
apply (rule decode_page_inv_corres; simp)
\<comment> \<open>PageTableCap\<close>
apply (simp add: isCap_simps isIOCap_def decodeX64MMUInvocation_def Let_def
split del: if_split)
apply (rule decode_page_table_inv_corres; simp)
\<comment> \<open>PageDirectoryCap\<close>
apply (simp add: isCap_simps isIOCap_def decodeX64MMUInvocation_def Let_def
split del: if_split)
apply (rule decode_page_directory_inv_corres; simp)
\<comment> \<open>PDPointerTableCap\<close>
apply (simp add: isCap_simps isIOCap_def decodeX64MMUInvocation_def Let_def
split del: if_split)
apply (rule decode_pdpt_inv_corres; simp)
\<comment> \<open>PML4Cap - no invocations\<close>
apply (clarsimp simp: isCap_simps isIOCap_def decodeX64MMUInvocation_def Let_def
split del: if_split)
done
lemma not_InvokeIOPort_rel:"\<lbrakk>archinv_relation ai ai'; \<forall>x. ai \<noteq> arch_invocation.InvokeIOPort x\<rbrakk> \<Longrightarrow>
\<forall>y. ai' \<noteq> InvokeIOPort y"
by (clarsimp simp: archinv_relation_def split: arch_invocation.splits)
lemma not_InvokeIOPort_perform_simp':"\<forall>y. ai' \<noteq> InvokeIOPort y \<Longrightarrow>
(case ai' of invocation.InvokeIOPort x \<Rightarrow> performX64PortInvocation ai'
| _ \<Rightarrow> performX64MMUInvocation ai') = performX64MMUInvocation ai'"
by (case_tac ai'; clarsimp)
lemmas not_InvokeIOPort_perform_simp[simp] = not_InvokeIOPort_perform_simp'[OF not_InvokeIOPort_rel]
lemma port_in_corres[corres]:
"no_fail \<top> a \<Longrightarrow> corres (=) \<top> \<top> (port_in a) (portIn a)"
apply (clarsimp simp: port_in_def portIn_def)
apply (rule corres_guard_imp)
apply (rule corres_split_eqr)
apply wpsimp
apply (rule corres_machine_op[OF corres_Id], simp+)
by wpsimp+
lemma port_out_corres[@lift_corres_args, corres]:
"no_fail \<top> (a w) \<Longrightarrow> corres (=) \<top> \<top> (port_out a w) (portOut a w)"
apply (clarsimp simp: port_out_def portOut_def)
apply (rule corres_guard_imp)
apply (rule corres_split_eqr)
apply wpsimp
apply (rule corres_machine_op[OF corres_Id], simp+)
apply wpsimp+
done
lemma perform_port_inv_corres:
"\<lbrakk>archinv_relation ai ai'; ai = arch_invocation.InvokeIOPort x\<rbrakk>
\<Longrightarrow> corres (intr \<oplus> (=))
(einvs and ct_active and valid_arch_inv ai)
(invs' and ct_active' and valid_arch_inv' ai')
(liftE (perform_io_port_invocation x))
(performX64PortInvocation ai')"
apply (clarsimp simp: perform_io_port_invocation_def performX64PortInvocation_def
archinv_relation_def ioport_invocation_map_def)
apply (case_tac x; clarsimp)
apply (corressimp corres: port_in_corres simp: ioport_data_relation_def)
by (auto simp: no_fail_in8 no_fail_in16 no_fail_in32
no_fail_out8 no_fail_out16 no_fail_out32)
crunches setIOPortMask
for valid_pspace'[wp]: valid_pspace'
and valid_cap'[wp]: "valid_cap' c"
lemma setIOPortMask_invs':
"\<lbrace>invs' and (\<lambda>s. \<not> b \<longrightarrow> (\<forall>cap'\<in>ran (cteCaps_of s). cap_ioports' cap' \<inter> {f..l} = {}))\<rbrace> setIOPortMask f l b \<lbrace>\<lambda>rv. invs'\<rbrace>"
apply (wpsimp wp: setIOPortMask_ioports' simp: invs'_def valid_state'_def setIOPortMask_def simp_del: fun_upd_apply)
apply (clarsimp simp: foldl_map foldl_fun_upd_value valid_global_refs'_def global_refs'_def
valid_arch_state'_def valid_machine_state'_def)
apply (case_tac b; clarsimp simp: valid_ioports'_simps foldl_fun_upd_value)
apply (drule_tac x=cap in bspec, assumption)
apply auto[1]
apply (drule_tac x=cap in bspec, assumption)
by auto
lemma valid_ioports_issuedD':
"\<lbrakk>valid_ioports' s; cteCaps_of s src = Some cap\<rbrakk> \<Longrightarrow> cap_ioports' cap \<subseteq> issued_ioports' (ksArchState s)"
apply (clarsimp simp: valid_ioports'_def all_ioports_issued'_def)
by auto
lemma perform_ioport_control_inv_corres:
"\<lbrakk>archinv_relation ai ai'; ai = arch_invocation.InvokeIOPortControl x\<rbrakk> \<Longrightarrow>
corres (intr \<oplus> (=))
(einvs and ct_active and valid_arch_inv ai)
(invs' and ct_active' and valid_arch_inv' ai')
(liftE (do perform_ioport_control_invocation x; return [] od))
(performX64PortInvocation ai')"
apply (clarsimp simp: perform_ioport_control_invocation_def performX64PortInvocation_def
archinv_relation_def ioport_control_inv_relation_def)
apply (case_tac x; clarsimp simp: bind_assoc simp del: split_paired_All)
apply (rule corres_guard_imp)
apply (rule corres_split_nor[OF _ set_ioport_mask_corres])
apply (rule corres_split_nor[OF _ cins_corres_simple])
apply (rule corres_return_eq_same, simp)
apply (clarsimp simp: cap_relation_def)
apply simp
apply simp
apply wpsimp
apply wpsimp
apply (clarsimp simp: is_simple_cap_def is_cap_simps)
apply wpsimp
apply (strengthen invs_distinct[mk_strg] invs_psp_aligned_strg invs_strgs)
apply (wpsimp wp: set_ioport_mask_invs set_ioport_mask_safe_parent_for)
apply (clarsimp simp: is_simple_cap'_def isCap_simps)
apply wpsimp
apply (strengthen invs_mdb'[mk_strg])
apply (wpsimp wp: setIOPortMask_invs')
apply (clarsimp simp: invs_valid_objs valid_arch_inv_def valid_iocontrol_inv_def cte_wp_at_caps_of_state)
apply (rule conjI, clarsimp)
apply (clarsimp simp: safe_parent_for_def safe_parent_for_arch_def)
apply (clarsimp simp: invs_pspace_distinct' invs_pspace_aligned' valid_arch_inv'_def ioport_control_inv_valid'_def
valid_cap'_def capAligned_def word_bits_def)
apply (clarsimp simp: safe_parent_for'_def cte_wp_at_ctes_of)
apply (case_tac ctea)
apply (clarsimp simp: isCap_simps sameRegionAs_def3)
apply (drule_tac src=p in valid_ioports_issuedD'[OF invs_valid_ioports'])
apply (fastforce simp: cteCaps_of_def)
apply force
done
lemma arch_ioport_inv_case_simp:
"\<lbrakk>archinv_relation ai ai';
\<nexists>x. ai = arch_invocation.InvokeIOPort x;
\<nexists>x. ai = arch_invocation.InvokeIOPortControl x\<rbrakk>
\<Longrightarrow> (case ai' of
invocation.InvokeIOPort x \<Rightarrow> performX64PortInvocation ai'
| invocation.InvokeIOPortControl x \<Rightarrow>
performX64PortInvocation ai'
| _ \<Rightarrow> performX64MMUInvocation ai') = performX64MMUInvocation ai'"
by (clarsimp simp: archinv_relation_def split: invocation.splits arch_invocation.splits)
lemma inv_arch_corres:
"archinv_relation ai ai' \<Longrightarrow>
corres (intr \<oplus> (=))
(einvs and ct_active and valid_arch_inv ai)
(invs' and ct_active' and valid_arch_inv' ai')
(arch_perform_invocation ai) (Arch.performInvocation ai')"
apply (clarsimp simp: arch_perform_invocation_def
X64_H.performInvocation_def
performX64MMUInvocation_def)
apply (cases "\<exists>x. ai = arch_invocation.InvokeIOPort x")
apply (clarsimp simp: archinv_relation_def)
apply (rule corres_guard_imp[OF perform_port_inv_corres[where ai=ai, simplified]];
clarsimp simp: archinv_relation_def)
apply (cases "\<exists>x. ai = arch_invocation.InvokeIOPortControl x")
apply (clarsimp simp: archinv_relation_def)
apply (rule corres_guard_imp[OF perform_ioport_control_inv_corres[where ai=ai, simplified]];
clarsimp simp: archinv_relation_def)
apply (subst arch_ioport_inv_case_simp; simp)
apply (clarsimp simp: archinv_relation_def)
apply (clarsimp simp: performX64MMUInvocation_def)
apply (cases ai)
apply (clarsimp simp: archinv_relation_def performX64MMUInvocation_def)
apply (rule corres_guard_imp, rule corres_split_nor, rule corres_trivial, simp)
apply (rule perform_page_table_corres; wpsimp)
apply wpsimp+
apply (fastforce simp: valid_arch_inv_def)
apply (fastforce simp: valid_arch_inv'_def)
apply (clarsimp simp: archinv_relation_def)
apply (rule corres_guard_imp, rule corres_split_nor, rule corres_trivial, simp)
apply (rule perform_page_directory_corres; wpsimp)
apply wpsimp+
apply (fastforce simp: valid_arch_inv_def)
apply (fastforce simp: valid_arch_inv'_def)
apply (clarsimp simp: archinv_relation_def)
apply (rule corres_guard_imp, rule corres_split_nor, rule corres_trivial, simp)
apply (rule perform_pdpt_corres; wpsimp)
apply wpsimp+
apply (fastforce simp: valid_arch_inv_def)
apply (fastforce simp: valid_arch_inv'_def)
apply (clarsimp simp: archinv_relation_def)
apply (rule corres_guard_imp, rule corres_split_nor, rule corres_trivial, simp)
apply (rule perform_page_corres; wpsimp)
apply wpsimp+
apply (fastforce simp: valid_arch_inv_def)
apply (fastforce simp: valid_arch_inv'_def)
apply (clarsimp simp: archinv_relation_def)
apply (rule corres_guard_imp, rule corres_split_nor, rule corres_trivial, simp)
apply (rule pac_corres; wpsimp)
apply wpsimp+
apply (fastforce simp: valid_arch_inv_def)
apply (fastforce simp: valid_arch_inv'_def)
apply (clarsimp simp: archinv_relation_def)
apply (rule corres_guard_imp, rule corres_split_nor, rule corres_trivial, simp)
apply (rule pap_corres; wpsimp)
apply wpsimp+
apply (fastforce simp: valid_arch_inv_def)
apply (fastforce simp: valid_arch_inv'_def)
apply clarsimp
apply clarsimp
done
lemma asid_pool_typ_at_ext':
"asid_pool_at' = obj_at' (\<top>::asidpool \<Rightarrow> bool)"
apply (rule ext)+
apply (simp add: typ_at_to_obj_at_arches)
done
lemma st_tcb_strg':
"st_tcb_at' P p s \<longrightarrow> tcb_at' p s"
by (auto simp: pred_tcb_at')
lemma performASIDControlInvocation_tcb_at':
"\<lbrace>st_tcb_at' active' p and invs' and ct_active' and valid_aci' aci\<rbrace>
performASIDControlInvocation aci
\<lbrace>\<lambda>y. tcb_at' p\<rbrace>"
apply (rule hoare_name_pre_state)
apply (clarsimp simp: performASIDControlInvocation_def split: asidcontrol_invocation.splits)
apply (clarsimp simp: valid_aci'_def cte_wp_at_ctes_of cong: conj_cong)
apply (wp static_imp_wp |simp add:placeNewObject_def2)+
apply (wp createObjects_orig_obj_at2' updateFreeIndex_pspace_no_overlap' getSlotCap_wp static_imp_wp)+
apply (clarsimp simp: projectKO_opts_defs)
apply (strengthen st_tcb_strg' [where P=\<top>])
apply (wp deleteObjects_invs_derivatives[where p="makePoolParent aci"]
hoare_vcg_ex_lift deleteObjects_cte_wp_at'[where d=False]
deleteObjects_st_tcb_at'[where p="makePoolParent aci"] static_imp_wp
updateFreeIndex_pspace_no_overlap' deleteObject_no_overlap[where d=False])+
apply (case_tac ctea)
apply (clarsimp)
apply (frule ctes_of_valid_cap')
apply (simp add:invs_valid_objs')+
apply (clarsimp simp:valid_cap'_def capAligned_def cte_wp_at_ctes_of)
apply (strengthen refl order_refl
pred_tcb'_weakenE[mk_strg I E])
apply (clarsimp simp: conj_comms invs_valid_pspace' isCap_simps
descendants_range'_def2 empty_descendants_range_in')
apply (frule ctes_of_valid', clarsimp, simp,
drule capFreeIndex_update_valid_cap'[where fb="2 ^ pageBits", rotated -1],
simp_all)
apply (simp add: pageBits_def is_aligned_def untypedBits_defs)
apply (simp add: valid_cap_simps' range_cover_def objBits_simps archObjSize_def untypedBits_defs
capAligned_def unat_eq_0 and_mask_eq_iff_shiftr_0[symmetric]
word_bw_assocs)
apply clarsimp
apply (drule(1) cte_cap_in_untyped_range,
fastforce simp add: cte_wp_at_ctes_of, assumption, simp_all)
apply (clarsimp simp: invs'_def valid_state'_def if_unsafe_then_cap'_def cte_wp_at_ctes_of)
apply clarsimp
done
crunch tcb_at'[wp]: performX64PortInvocation "tcb_at' t"
lemma invokeArch_tcb_at':
"\<lbrace>invs' and valid_arch_inv' ai and ct_active' and st_tcb_at' active' p\<rbrace>
Arch.performInvocation ai
\<lbrace>\<lambda>rv. tcb_at' p\<rbrace>"
apply (simp add: X64_H.performInvocation_def performX64MMUInvocation_def)
apply (wpsimp simp: performX64MMUInvocation_def pred_tcb_at' valid_arch_inv'_def
wp: performASIDControlInvocation_tcb_at')
done
(* FIXME random place to have these *)
lemma pspace_no_overlap_queuesL1 [simp]:
"pspace_no_overlap' w sz (ksReadyQueuesL1Bitmap_update f s) = pspace_no_overlap' w sz s"
by (simp add: pspace_no_overlap'_def)
(* FIXME random place to have these *)
lemma pspace_no_overlap_queuesL2 [simp]:
"pspace_no_overlap' w sz (ksReadyQueuesL2Bitmap_update f s) = pspace_no_overlap' w sz s"
by (simp add: pspace_no_overlap'_def)
crunch pspace_no_overlap'[wp]: setThreadState "pspace_no_overlap' w s"
(simp: unless_def)
lemma sts_cte_cap_to'[wp]:
"\<lbrace>ex_cte_cap_to' p\<rbrace> setThreadState st t \<lbrace>\<lambda>rv. ex_cte_cap_to' p\<rbrace>"
by (wp ex_cte_cap_to'_pres)
lemma valid_slots_lift':
assumes t: "\<And>T p. \<lbrace>typ_at' T p\<rbrace> f \<lbrace>\<lambda>rv. typ_at' T p\<rbrace>"
shows "\<lbrace>valid_slots' x\<rbrace> f \<lbrace>\<lambda>rv. valid_slots' x\<rbrace>"
apply (clarsimp simp: valid_slots'_def)
apply (case_tac x, clarsimp split: vmpage_entry.splits)
apply safe
apply (rule hoare_pre, wp hoare_vcg_const_Ball_lift t valid_pde_lift' valid_pte_lift' valid_pdpte_lift', simp)+
done
lemma sts_valid_arch_inv':
"\<lbrace>valid_arch_inv' ai\<rbrace> setThreadState st t \<lbrace>\<lambda>rv. valid_arch_inv' ai\<rbrace>"
apply (cases ai, simp_all add: valid_arch_inv'_def)
apply (clarsimp simp: valid_pdpti'_def split: pdptinvocation.splits)
apply (intro allI conjI impI)
apply wpsimp+
apply (clarsimp simp: valid_pdi'_def split: page_directory_invocation.splits)
apply (intro allI conjI impI)
apply wpsimp+
apply (clarsimp simp: valid_pti'_def split: page_table_invocation.splits)
apply (intro allI conjI impI)
apply (wp | simp)+
apply (rename_tac page_invocation)
apply (case_tac page_invocation, simp_all add: valid_page_inv'_def)[1]
apply (wp valid_slots_lift' |simp)+
apply (clarsimp simp: valid_aci'_def split: asidcontrol_invocation.splits)
apply (clarsimp simp: cte_wp_at_ctes_of)
apply (rule hoare_pre, wp)
apply clarsimp
apply (clarsimp simp: valid_apinv'_def split: asidpool_invocation.splits)
apply (rule hoare_pre, wp)
apply simp
apply wp
apply (clarsimp simp: ioport_control_inv_valid'_def split:ioport_control_invocation.splits)
apply wpsimp
done
lemma inv_ASIDPool: "inv ASIDPool = (\<lambda>v. case v of ASIDPool a \<Rightarrow> a)"
apply (rule ext)
apply (case_tac v)
apply simp
apply (rule inv_f_f, rule inj_onI)
apply simp
done
lemma eq_arch_update':
"ArchObjectCap cp = cteCap cte \<Longrightarrow> is_arch_update' (ArchObjectCap cp) cte"
by (clarsimp simp: is_arch_update'_def isCap_simps)
lemma lookup_pdpt_slot_no_fail_corres[simp]:
"lookupPDPTSlotFromPDPT pt vptr
= (do stateAssert (pd_pointer_table_at' pt) []; return (lookup_pdpt_slot_no_fail pt vptr) od)"
by (simp add: lookup_pdpt_slot_no_fail_def lookupPDPTSlotFromPDPT_def
mask_def checkPDPTAt_def word_size_bits_def)
lemma lookup_pd_slot_no_fail_corres[simp]:
"lookupPDSlotFromPD pt vptr
= (do stateAssert (page_directory_at' pt) []; return (lookup_pd_slot_no_fail pt vptr) od)"
by (simp add: lookup_pd_slot_no_fail_def lookupPDSlotFromPD_def
mask_def checkPDAt_def word_size_bits_def)
lemma lookup_pt_slot_no_fail_corres[simp]:
"lookupPTSlotFromPT pt vptr
= (do stateAssert (page_table_at' pt) []; return (lookup_pt_slot_no_fail pt vptr) od)"
by (simp add: lookup_pt_slot_no_fail_def lookupPTSlotFromPT_def
mask_def checkPTAt_def word_size_bits_def)
lemma decode_page_inv_wf[wp]:
"cap = (arch_capability.PageCap word vmrights mt vmpage_size d option) \<Longrightarrow>
\<lbrace>invs' and valid_cap' (capability.ArchObjectCap cap ) and
cte_wp_at' ((=) (capability.ArchObjectCap cap) \<circ> cteCap) slot and
(\<lambda>s. \<forall>x\<in>set excaps. cte_wp_at' ((=) (fst x) \<circ> cteCap) (snd x) s) and
sch_act_simple\<rbrace>
decodeX64FrameInvocation label args slot cap excaps
\<lbrace>valid_arch_inv'\<rbrace>, -"
apply (simp add: decodeX64FrameInvocation_def Let_def isCap_simps
cong: if_cong split del: if_split)
apply (cases "invocation_type label = ArchInvocationLabel X64PageMap")
apply (simp add: split_def split del: if_split
cong: list.case_cong prod.case_cong)
apply (rule hoare_pre)
apply (wp createMappingEntries_wf checkVP_wpR whenE_throwError_wp hoare_vcg_const_imp_lift_R
| wpc | simp add: valid_arch_inv'_def valid_page_inv'_def | wp (once) hoare_drop_imps)+
apply (clarsimp simp: neq_Nil_conv invs_valid_objs' linorder_not_le
cte_wp_at_ctes_of)
apply (drule ctes_of_valid', fastforce)+
apply (drule_tac t="cteCap cte" in sym)
apply (clarsimp simp: valid_cap'_def ptBits_def pageBits_def)
apply (clarsimp simp: is_arch_update'_def isCap_simps capAligned_def vmsz_aligned_def)
apply (rule conjI)
apply (clarsimp simp: valid_cap_simps)
apply (rule conjI)
apply (erule is_aligned_addrFromPPtr_n, case_tac vmpage_size; simp add: bit_simps)
apply (subgoal_tac "x < pptr_base", simp add: pptr_base_def)
apply (fastforce simp flip: word_le_not_less
intro: le_user_vtop_less_pptr_base
elim: word_add_increasing[where w="w-1" for w, simplified algebra_simps]
is_aligned_no_overflow)
apply clarsimp
apply (erule is_aligned_addrFromPPtr_n, case_tac vmpage_size; simp add: bit_simps)
apply (cases "invocation_type label = ArchInvocationLabel X64PageUnmap")
apply (simp split del: if_split)
apply (rule hoare_pre, wp)
apply (clarsimp simp: valid_arch_inv'_def valid_page_inv'_def)
apply (thin_tac "Ball S P" for S P)
apply (erule cte_wp_at_weakenE')
apply (clarsimp simp: is_arch_update'_def isCap_simps)
apply (cases "invocation_type label = ArchInvocationLabel X64PageGetAddress")
apply (simp split del: if_split)
apply (rule hoare_pre, wp)
apply (clarsimp simp: valid_arch_inv'_def valid_page_inv'_def)
by (simp add:throwError_R'
split: invocation_label.splits arch_invocation_label.splits)
lemma decode_page_table_inv_wf[wp]:
"arch_cap = PageTableCap word option \<Longrightarrow>
\<lbrace>invs' and valid_cap' (capability.ArchObjectCap arch_cap) and
cte_wp_at' ((=) (capability.ArchObjectCap arch_cap) \<circ> cteCap) slot and
(\<lambda>s. \<forall>x\<in>set excaps. cte_wp_at' ((=) (fst x) \<circ> cteCap) (snd x) s) and
sch_act_simple\<rbrace>
decodeX64PageTableInvocation label args slot arch_cap excaps
\<lbrace>valid_arch_inv'\<rbrace>, - "
apply (simp add: decodeX64PageTableInvocation_def Let_def isCap_simps split del: if_split cong: if_cong)
apply (rule hoare_pre)
apply ((wp whenE_throwError_wp isFinalCapability_inv getPDE_wp
| wpc
| simp add: valid_arch_inv'_def valid_pti'_def if_apply_def2
| wp (once) hoare_drop_imps)+)
apply (clarsimp simp: linorder_not_le isCap_simps cte_wp_at_ctes_of)
apply (frule eq_arch_update')
apply (case_tac option; clarsimp)
apply (drule_tac t="cteCap ctea" in sym, simp)
apply (clarsimp simp: is_arch_update'_def isCap_simps valid_cap'_def capAligned_def)
apply (thin_tac "Ball S P" for S P)+
apply (drule ctes_of_valid', fastforce)+
apply (clarsimp simp: valid_cap'_def bit_simps is_aligned_addrFromPPtr_n
invs_valid_objs' and_not_mask[symmetric])
apply (clarsimp simp: mask_def X64.pptrBase_def X64.pptrUserTop_def user_vtop_def)
apply word_bitwise
apply auto
done
lemma decode_page_directory_inv_wf[wp]:
"arch_cap = PageDirectoryCap word option \<Longrightarrow>
\<lbrace>invs' and valid_cap' (capability.ArchObjectCap arch_cap) and
cte_wp_at' ((=) (capability.ArchObjectCap arch_cap) \<circ> cteCap) slot and
(\<lambda>s. \<forall>x\<in>set excaps. cte_wp_at' ((=) (fst x) \<circ> cteCap) (snd x) s) and
sch_act_simple\<rbrace>
decodeX64PageDirectoryInvocation label args slot arch_cap excaps
\<lbrace>valid_arch_inv'\<rbrace>, - "
apply (simp add: decodeX64PageDirectoryInvocation_def Let_def isCap_simps split del: if_split cong: if_cong)
apply (rule hoare_pre)
apply ((wp whenE_throwError_wp isFinalCapability_inv getPDPTE_wp
| wpc
| simp add: valid_arch_inv'_def valid_pdi'_def if_apply_def2
| wp (once) hoare_drop_imps)+)
apply (clarsimp simp: linorder_not_le isCap_simps cte_wp_at_ctes_of)
apply (frule eq_arch_update')
apply (case_tac option; clarsimp)
apply (drule_tac t="cteCap ctea" in sym, simp)
apply (clarsimp simp: is_arch_update'_def isCap_simps valid_cap'_def capAligned_def)
apply (thin_tac "Ball S P" for S P)+
apply (drule ctes_of_valid', fastforce)+
apply (clarsimp simp: valid_cap'_def bit_simps is_aligned_addrFromPPtr_n
invs_valid_objs' and_not_mask[symmetric])
apply (clarsimp simp: mask_def X64.pptrBase_def X64.pptrUserTop_def user_vtop_def)
apply word_bitwise
apply auto
done
lemma decode_pdpt_inv_wf[wp]:
"arch_cap = PDPointerTableCap word option \<Longrightarrow>
\<lbrace>invs' and valid_cap' (capability.ArchObjectCap arch_cap) and
cte_wp_at' ((=) (capability.ArchObjectCap arch_cap) \<circ> cteCap) slot and
(\<lambda>s. \<forall>x\<in>set excaps. cte_wp_at' ((=) (fst x) \<circ> cteCap) (snd x) s) and
sch_act_simple\<rbrace>
decodeX64PDPointerTableInvocation label args slot arch_cap excaps
\<lbrace>valid_arch_inv'\<rbrace>, - "
apply (simp add: decodeX64PDPointerTableInvocation_def Let_def isCap_simps split del: if_split cong: if_cong)
apply (rule hoare_pre)
apply ((wp whenE_throwError_wp isFinalCapability_inv getPML4E_wp
| wpc
| simp add: valid_arch_inv'_def valid_pdpti'_def if_apply_def2
| wp (once) hoare_drop_imps)+)
apply (clarsimp simp: linorder_not_le isCap_simps cte_wp_at_ctes_of)
apply (frule eq_arch_update')
apply (case_tac option; clarsimp)
apply (drule_tac t="cteCap ctea" in sym, simp)
apply (clarsimp simp: is_arch_update'_def isCap_simps valid_cap'_def capAligned_def)
apply (thin_tac "Ball S P" for S P)+
apply (drule ctes_of_valid', fastforce)+
apply (clarsimp simp: valid_cap'_def bit_simps is_aligned_addrFromPPtr_n
invs_valid_objs' and_not_mask[symmetric])
apply (clarsimp simp: mask_def X64.pptrBase_def X64.pptrUserTop_def user_vtop_def)
apply word_bitwise
apply auto
done
lemma decode_port_inv_wf:
"arch_cap = IOPortCap f l \<Longrightarrow>
\<lbrace>\<top>\<rbrace>
decodeX64PortInvocation label args slot arch_cap excaps
\<lbrace>valid_arch_inv'\<rbrace>, - "
apply (clarsimp simp: decodeX64PortInvocation_def Let_def isCap_simps split del: if_split cong: if_cong)
by (wpsimp simp: valid_arch_inv'_def)
lemma decode_port_control_inv_wf:
"arch_cap = IOPortControlCap \<Longrightarrow>
\<lbrace>\<lambda>s. invs' s \<and> (\<forall>cap \<in> set caps. s \<turnstile>' cap)
\<and> (\<forall>cap \<in> set caps. \<forall>r \<in> cte_refs' cap (irq_node' s). ex_cte_cap_to' r s)
\<and> cte_wp_at' (\<lambda>cte. cteCap cte = ArchObjectCap IOPortControlCap) slot s\<rbrace>
decodeX64PortInvocation label args slot arch_cap caps
\<lbrace>valid_arch_inv'\<rbrace>, -"
apply (clarsimp simp add: decodeX64PortInvocation_def Let_def split_def
unlessE_whenE isCap_simps lookupTargetSlot_def
split del: if_split cong: if_cong list.case_cong prod.case_cong
arch_invocation_label.case_cong)
apply (rule hoare_pre)
apply (simp add: rangeCheck_def unlessE_whenE lookupTargetSlot_def valid_arch_inv'_def
ioport_control_inv_valid'_def
cong: list.case_cong prod.case_cong
| wp whenE_throwError_wp ensureEmptySlot_stronger isIOPortRangeFree_wp
| wpc
| wp (once) hoare_drop_imps)+
by (auto simp: invs_valid_objs')
lemma arch_decodeInvocation_wf[wp]:
notes ensureSafeMapping_inv[wp del]
shows "\<lbrace>invs' and valid_cap' (ArchObjectCap arch_cap) and
cte_wp_at' ((=) (ArchObjectCap arch_cap) o cteCap) slot and
(\<lambda>s. \<forall>x \<in> set excaps. cte_wp_at' ((=) (fst x) o cteCap) (snd x) s) and
(\<lambda>s. \<forall>x \<in> set excaps. \<forall>r \<in> cte_refs' (fst x) (irq_node' s). ex_cte_cap_to' r s) and
(\<lambda>s. \<forall>x \<in> set excaps. s \<turnstile>' fst x) and
sch_act_simple\<rbrace>
Arch.decodeInvocation label args cap_index slot arch_cap excaps
\<lbrace>valid_arch_inv'\<rbrace>,-"
apply (cases arch_cap)
\<comment> \<open>ASIDPool cap\<close>
apply (simp add: decodeX64MMUInvocation_def X64_H.decodeInvocation_def
Let_def split_def isCap_simps isIOCap_def decodeX64ASIDPoolInvocation_def
cong: if_cong split del: if_split)
apply (rule hoare_pre)
apply ((wp whenE_throwError_wp getASID_wp|
wpc|
simp add: valid_arch_inv'_def valid_apinv'_def)+)[1]
apply (clarsimp simp: word_neq_0_conv valid_cap'_def valid_arch_inv'_def valid_apinv'_def)
apply (rule conjI)
apply (erule cte_wp_at_weakenE')
apply (simp, drule_tac t="cteCap c" in sym, simp)
apply (subst (asm) conj_assoc [symmetric])
apply (subst (asm) assocs_empty_dom_comp [symmetric])
apply (drule dom_hd_assocsD)
apply (simp add: capAligned_def asid_wf_def)
apply (elim conjE)
apply (subst field_simps, erule is_aligned_add_less_t2n)
apply assumption
apply (simp add: asid_low_bits_def asid_bits_def)
apply assumption
\<comment> \<open>ASIDControlCap\<close>
apply (simp add: decodeX64MMUInvocation_def X64_H.decodeInvocation_def
Let_def split_def isCap_simps isIOCap_def decodeX64ASIDControlInvocation_def
cong: if_cong invocation_label.case_cong arch_invocation_label.case_cong list.case_cong prod.case_cong
split del: if_split)
apply (rule hoare_pre)
apply ((wp whenE_throwError_wp ensureEmptySlot_stronger|
wpc|
simp add: valid_arch_inv'_def valid_aci'_def is_aligned_shiftl_self
split del: if_split)+)[1]
apply (rule_tac Q'=
"\<lambda>rv. K (fst (hd [p\<leftarrow>assocs asidTable . fst p \<le> 2 ^ asid_high_bits - 1 \<and> snd p = None])
<< asid_low_bits \<le> 2 ^ asid_bits - 1) and
real_cte_at' rv and
ex_cte_cap_to' rv and
cte_wp_at' (\<lambda>cte. \<exists>idx. cteCap cte = (UntypedCap False frame pageBits idx)) (snd (excaps!0)) and
sch_act_simple and
(\<lambda>s. descendants_of' (snd (excaps!0)) (ctes_of s) = {}) "
in hoare_post_imp_R)
apply (simp add: lookupTargetSlot_def)
apply wp
apply (clarsimp simp: cte_wp_at_ctes_of asid_wf_def)
apply (simp split del: if_split)
apply (wp ensureNoChildren_sp whenE_throwError_wp|wpc)+
apply clarsimp
apply (rule conjI)
apply (clarsimp simp: null_def neq_Nil_conv)
apply (drule filter_eq_ConsD)
apply clarsimp
apply (rule shiftl_less_t2n)
apply (simp add: asid_bits_def asid_low_bits_def asid_high_bits_def)
apply unat_arith
apply (simp add: asid_bits_def)
apply clarsimp
apply (rule conjI, fastforce)
apply (clarsimp simp: cte_wp_at_ctes_of objBits_simps archObjSize_def)
\<comment> \<open>IOPortCap\<close>
apply (simp add: decodeX64MMUInvocation_def X64_H.decodeInvocation_def
Let_def split_def isCap_simps isIOCap_def valid_arch_inv'_def
cong: if_cong split del: if_split)
apply (wp decode_port_inv_wf, simp+)
\<comment> \<open>IOPortControlCap\<close>
apply (simp add: decodeX64MMUInvocation_def X64_H.decodeInvocation_def Let_def isCap_simps
split_def isIOCap_def
cong: if_cong
split del: if_split)
apply (wp decode_port_control_inv_wf, simp)
apply (clarsimp simp: cte_wp_at_ctes_of)
\<comment> \<open>PageCap\<close>
apply (simp add: decodeX64MMUInvocation_def isCap_simps X64_H.decodeInvocation_def Let_def isIOCap_def
cong: if_cong split del: if_split)
apply (wp, simp+)
\<comment> \<open>PageTableCap\<close>
apply (simp add: decodeX64MMUInvocation_def isCap_simps X64_H.decodeInvocation_def isIOCap_def Let_def
cong: if_cong split del: if_split)
apply (wpsimp, simp+)
\<comment> \<open>PageDirectoryCap\<close>
apply (simp add: decodeX64MMUInvocation_def isCap_simps X64_H.decodeInvocation_def isIOCap_def Let_def
cong: if_cong split del: if_split)
apply (wpsimp, simp+)
\<comment> \<open>PDPointerTableCap\<close>
apply (simp add: decodeX64MMUInvocation_def isCap_simps X64_H.decodeInvocation_def isIOCap_def Let_def
cong: if_cong split del: if_split)
apply (wpsimp, simp+)
\<comment> \<open>PML4Cap\<close>
apply (simp add: decodeX64MMUInvocation_def isCap_simps X64_H.decodeInvocation_def isIOCap_def Let_def
cong: if_cong split del: if_split)
by (wpsimp)
crunch nosch[wp]: setMRs "\<lambda>s. P (ksSchedulerAction s)"
(ignore: getRestartPC setRegister transferCapsToSlots
wp: hoare_drop_imps hoare_vcg_split_case_option
mapM_wp'
simp: split_def zipWithM_x_mapM)
crunches performX64MMUInvocation, performX64PortInvocation
for nosch [wp]: "\<lambda>s. P (ksSchedulerAction s)"
(simp: crunch_simps
wp: crunch_wps getObject_cte_inv getASID_wp)
lemmas setObject_cte_st_tcb_at' [wp] = setCTE_pred_tcb_at' [unfolded setCTE_def]
crunch st_tcb_at': performPageDirectoryInvocation, performPageTableInvocation,
performPageInvocation, performPDPTInvocation,
performASIDPoolInvocation, performX64PortInvocation "st_tcb_at' P t"
(wp: crunch_wps getASID_wp getObject_cte_inv simp: crunch_simps)
lemma performASIDControlInvocation_st_tcb_at':
"\<lbrace>st_tcb_at' (P and (\<noteq>) Inactive and (\<noteq>) IdleThreadState) t and
valid_aci' aci and invs' and ct_active'\<rbrace>
performASIDControlInvocation aci
\<lbrace>\<lambda>y. st_tcb_at' P t\<rbrace>"
apply (rule hoare_name_pre_state)
apply (clarsimp simp: performASIDControlInvocation_def split: asidcontrol_invocation.splits)
apply (clarsimp simp: valid_aci'_def cte_wp_at_ctes_of cong: conj_cong)
apply (rule hoare_pre)
apply (wp createObjects_orig_obj_at'[where P="P \<circ> tcbState", folded st_tcb_at'_def]
updateFreeIndex_pspace_no_overlap' getSlotCap_wp
hoare_vcg_ex_lift
deleteObjects_cte_wp_at' deleteObjects_invs_derivatives
deleteObjects_st_tcb_at'
static_imp_wp
| simp add: placeNewObject_def2)+
apply (case_tac ctea)
apply (clarsimp)
apply (frule ctes_of_valid_cap')
apply (simp add:invs_valid_objs')+
apply (clarsimp simp:valid_cap'_def capAligned_def cte_wp_at_ctes_of)
apply (rule conjI)
apply clarsimp
apply (drule (1) cte_cap_in_untyped_range)
apply (fastforce simp add: cte_wp_at_ctes_of)
apply assumption+
subgoal by (clarsimp simp: invs'_def valid_state'_def if_unsafe_then_cap'_def cte_wp_at_ctes_of)
subgoal by fastforce
apply simp
apply (rule conjI,assumption)
apply (clarsimp simp:invs_valid_pspace' objBits_simps archObjSize_def
range_cover_full descendants_range'_def2 isCap_simps)
apply (intro conjI)
apply (fastforce simp:empty_descendants_range_in')+
apply clarsimp
apply (drule (1) cte_cap_in_untyped_range)
apply (fastforce simp add: cte_wp_at_ctes_of)
apply assumption+
apply (clarsimp simp: invs'_def valid_state'_def if_unsafe_then_cap'_def cte_wp_at_ctes_of)
apply fastforce
apply simp
apply auto
done
crunch aligned': "Arch.finaliseCap" pspace_aligned'
(wp: crunch_wps getASID_wp simp: crunch_simps)
lemmas arch_finalise_cap_aligned' = finaliseCap_aligned'
crunch distinct': "Arch.finaliseCap" pspace_distinct'
(wp: crunch_wps getASID_wp simp: crunch_simps)
lemmas arch_finalise_cap_distinct' = finaliseCap_distinct'
crunch nosch [wp]: "Arch.finaliseCap" "\<lambda>s. P (ksSchedulerAction s)"
(wp: crunch_wps getASID_wp simp: crunch_simps updateObject_default_def)
crunch st_tcb_at' [wp]: "Arch.finaliseCap" "st_tcb_at' P t"
(wp: crunch_wps getASID_wp simp: crunch_simps)
crunch typ_at' [wp]: "Arch.finaliseCap" "\<lambda>s. P (typ_at' T p s)"
(wp: crunch_wps getASID_wp simp: crunch_simps)
crunch cte_wp_at': "Arch.finaliseCap" "cte_wp_at' P p"
(wp: crunch_wps getASID_wp simp: crunch_simps)
lemma invs_asid_table_strengthen':
"invs' s \<and> asid_pool_at' ap s \<and> asid \<le> 2 ^ asid_high_bits - 1 \<longrightarrow>
invs' (s\<lparr>ksArchState :=
x64KSASIDTable_update (\<lambda>_. (x64KSASIDTable \<circ> ksArchState) s(asid \<mapsto> ap)) (ksArchState s)\<rparr>)"
apply (clarsimp simp: invs'_def valid_state'_def)
apply (rule conjI)
apply (clarsimp simp: valid_global_refs'_def global_refs'_def)
apply (clarsimp simp: valid_arch_state'_def)
apply (clarsimp simp: valid_asid_table'_def ran_def)
apply (rule conjI)
apply (clarsimp split: if_split_asm)
apply fastforce
apply (rule conjI)
apply (clarsimp simp: valid_pspace'_def)
apply (simp add: valid_machine_state'_def)
apply (clarsimp simp: valid_ioports'_simps)
done
lemma ex_cte_not_in_untyped_range:
"\<lbrakk>(ctes_of s) cref = Some (CTE (capability.UntypedCap d ptr bits idx) mnode);
descendants_of' cref (ctes_of s) = {}; invs' s;
ex_cte_cap_wp_to' (\<lambda>_. True) x s; valid_global_refs' s\<rbrakk>
\<Longrightarrow> x \<notin> {ptr .. ptr + 2 ^ bits - 1}"
apply clarsimp
apply (drule(1) cte_cap_in_untyped_range)
apply (fastforce simp:cte_wp_at_ctes_of)+
done
lemma ucast_asid_high_btis_of_le [simp]:
"ucast (asid_high_bits_of w) \<le> (2 ^ asid_high_bits - 1 :: machine_word)"
apply (simp add: asid_high_bits_of_def)
apply (rule word_less_sub_1)
apply (rule order_less_le_trans)
apply (rule ucast_less)
apply simp
apply (simp add: asid_high_bits_def)
done
lemma performASIDControlInvocation_invs' [wp]:
"\<lbrace>invs' and ct_active' and valid_aci' aci\<rbrace>
performASIDControlInvocation aci \<lbrace>\<lambda>y. invs'\<rbrace>"
apply (rule hoare_name_pre_state)
apply (clarsimp simp: performASIDControlInvocation_def valid_aci'_def
placeNewObject_def2 cte_wp_at_ctes_of
split: asidcontrol_invocation.splits)
apply (rename_tac w1 w2 w3 w4 cte ctea idx)
apply (case_tac ctea)
apply (clarsimp)
apply (frule ctes_of_valid_cap')
apply fastforce
apply (rule hoare_pre)
apply (wp hoare_vcg_const_imp_lift)
apply (strengthen invs_asid_table_strengthen')
apply (wp cteInsert_simple_invs)
apply (wp createObjects'_wp_subst[OF
createObjects_no_cte_invs[where sz = pageBits and ty="Inl (KOArch (KOASIDPool pool))" for pool]]
createObjects_orig_cte_wp_at'[where sz = pageBits] hoare_vcg_const_imp_lift
|simp add: makeObjectKO_def projectKOs asid_pool_typ_at_ext' valid_cap'_def cong: rev_conj_cong
|strengthen safe_parent_strg'[where idx= "2^ pageBits"])+
apply (rule hoare_vcg_conj_lift)
apply (rule descendants_of'_helper)
apply (wp createObjects_null_filter'
[where sz = pageBits and ty="Inl (KOArch (KOASIDPool ap))" for ap]
createObjects_valid_pspace'
[where sz = pageBits and ty="Inl (KOArch (KOASIDPool ap))" for ap]
| simp add: makeObjectKO_def projectKOs asid_pool_typ_at_ext' valid_cap'_def
cong: rev_conj_cong)+
apply (simp add: objBits_simps archObjSize_def valid_cap'_def capAligned_def range_cover_full)
apply (wp createObjects'_wp_subst[OF createObjects_ex_cte_cap_to[where sz = pageBits]]
createObjects_orig_cte_wp_at'[where sz = pageBits]
hoare_vcg_const_imp_lift
|simp add: makeObjectKO_def projectKOs asid_pool_typ_at_ext' valid_cap'_def
not_ioport_cap_safe_ioport_insert' isCap_simps
canonical_address_neq_mask
cong: rev_conj_cong
|strengthen safe_parent_strg'[where idx = "2^ pageBits"]
| rule in_kernel_mappings_neq_mask
| simp add: bit_simps)+
apply (simp add:asid_pool_typ_at_ext'[symmetric])
apply (wp createObject_typ_at')
apply (simp add: objBits_simps archObjSize_def valid_cap'_def
capAligned_def range_cover_full makeObjectKO_def
projectKOs asid_pool_typ_at_ext'
cong: rev_conj_cong)
apply (clarsimp simp:conj_comms
descendants_of_null_filter'
| strengthen invs_pspace_aligned' invs_pspace_distinct'
invs_pspace_aligned' invs_valid_pspace')+
apply (wp updateFreeIndex_forward_invs'
updateFreeIndex_cte_wp_at
updateFreeIndex_pspace_no_overlap'
updateFreeIndex_caps_no_overlap''
updateFreeIndex_descendants_of2
updateFreeIndex_caps_overlap_reserved
updateCap_cte_wp_at_cases static_imp_wp
getSlotCap_wp)+
apply (clarsimp simp:conj_comms ex_disj_distrib is_aligned_mask
| strengthen invs_valid_pspace' invs_pspace_aligned'
invs_pspace_distinct' empty_descendants_range_in')+
apply (wp deleteObjects_invs'[where p="makePoolParent aci"]
hoare_vcg_ex_lift
deleteObjects_caps_no_overlap''[where slot="makePoolParent aci"]
deleteObject_no_overlap
deleteObjects_cap_to'[where p="makePoolParent aci"]
deleteObjects_ct_active'[where cref="makePoolParent aci"]
deleteObjects_descendants[where p="makePoolParent aci"]
deleteObjects_cte_wp_at'
deleteObjects_null_filter[where p="makePoolParent aci"])
apply (frule valid_capAligned)
apply (clarsimp simp: invs_mdb' invs_valid_pspace' capAligned_def
cte_wp_at_ctes_of is_simple_cap'_def isCap_simps)
apply (strengthen refl ctes_of_valid_cap'[mk_strg I E])
apply (clarsimp simp: conj_comms invs_valid_objs')
apply (frule_tac ptr="w1" in descendants_range_caps_no_overlapI'[where sz = pageBits])
apply (fastforce simp:is_aligned_neg_mask_eq cte_wp_at_ctes_of)
apply (simp add:empty_descendants_range_in')
apply (frule(1) if_unsafe_then_capD'[OF _ invs_unsafe_then_cap',rotated])
apply (fastforce simp:cte_wp_at_ctes_of)
apply (drule ex_cte_not_in_untyped_range[rotated -2])
apply (simp add:invs_valid_global')+
apply (drule ex_cte_not_in_untyped_range[rotated -2])
apply (simp add:invs_valid_global')+
apply (subgoal_tac "is_aligned (2 ^ pageBits) minUntypedSizeBits")
prefer 2
apply (rule is_aligned_weaken)
apply (rule is_aligned_shiftl_self[unfolded shiftl_t2n,where p = 1, simplified])
apply (simp add: pageBits_def untypedBits_defs)
apply (frule_tac cte="CTE (capability.UntypedCap False a b c) m" for a b c m in valid_global_refsD', clarsimp)
apply (simp add: is_aligned_neg_mask_eq Int_commute)
by (auto simp:empty_descendants_range_in' objBits_simps max_free_index_def
archObjSize_def asid_low_bits_def word_bits_def
range_cover_full descendants_range'_def2 is_aligned_mask
null_filter_descendants_of'[OF null_filter_simp'] bit_simps
valid_cap_simps' mask_def)+
lemma dmo_out8_invs'[wp]:
"\<lbrace>invs'\<rbrace> doMachineOp (out8 a b) \<lbrace>\<lambda>_. invs'\<rbrace>"
apply (wp dmo_invs' no_irq_out8 no_irq)
apply clarsimp
apply (drule_tac P4="\<lambda>m'. underlying_memory m' p = underlying_memory m p"
in use_valid[where P=P and Q="\<lambda>_. P" for P])
apply (simp add: out8_def machine_op_lift_def
machine_rest_lift_def split_def | wp)+
done
lemma dmo_out16_invs'[wp]:
"\<lbrace>invs'\<rbrace> doMachineOp (out16 a b) \<lbrace>\<lambda>_. invs'\<rbrace>"
apply (wp dmo_invs' no_irq_out16 no_irq)
apply clarsimp
apply (drule_tac P4="\<lambda>m'. underlying_memory m' p = underlying_memory m p"
in use_valid[where P=P and Q="\<lambda>_. P" for P])
apply (simp add: out16_def machine_op_lift_def
machine_rest_lift_def split_def | wp)+
done
lemma dmo_out32_invs'[wp]:
"\<lbrace>invs'\<rbrace> doMachineOp (out32 a b) \<lbrace>\<lambda>_. invs'\<rbrace>"
apply (wp dmo_invs' no_irq_out32 no_irq)
apply clarsimp
apply (drule_tac P4="\<lambda>m'. underlying_memory m' p = underlying_memory m p"
in use_valid[where P=P and Q="\<lambda>_. P" for P])
apply (simp add: out32_def machine_op_lift_def
machine_rest_lift_def split_def | wp)+
done
lemma dmo_in8_invs'[wp]:
"\<lbrace>invs'\<rbrace> doMachineOp (in8 a) \<lbrace>\<lambda>_. invs'\<rbrace>"
apply (wp dmo_invs' no_irq_in8 no_irq)
apply clarsimp
apply (drule_tac P4="\<lambda>m'. underlying_memory m' p = underlying_memory m p"
in use_valid[where P=P and Q="\<lambda>_. P" for P])
apply (simp add: in8_def machine_op_lift_def
machine_rest_lift_def split_def | wp)+
done
lemma dmo_in16_invs'[wp]:
"\<lbrace>invs'\<rbrace> doMachineOp (in16 a) \<lbrace>\<lambda>_. invs'\<rbrace>"
apply (wp dmo_invs' no_irq_in16 no_irq)
apply clarsimp
apply (drule_tac P4="\<lambda>m'. underlying_memory m' p = underlying_memory m p"
in use_valid[where P=P and Q="\<lambda>_. P" for P])
apply (simp add: in16_def machine_op_lift_def
machine_rest_lift_def split_def | wp)+
done
lemma dmo_in32_invs'[wp]:
"\<lbrace>invs'\<rbrace> doMachineOp (in32 a) \<lbrace>\<lambda>_. invs'\<rbrace>"
apply (wp dmo_invs' no_irq_in32 no_irq)
apply clarsimp
apply (drule_tac P4="\<lambda>m'. underlying_memory m' p = underlying_memory m p"
in use_valid[where P=P and Q="\<lambda>_. P" for P])
apply (simp add: in32_def machine_op_lift_def
machine_rest_lift_def split_def | wp)+
done
lemma setIOPortMask_safe_ioport_insert':
"\<lbrace>\<lambda>s. (\<forall>cap\<in>ran (cteCaps_of s). cap_ioports' ac \<inter> cap_ioports' cap = {}) \<and>
ac = ArchObjectCap (IOPortCap f l)\<rbrace>
setIOPortMask f l True
\<lbrace>\<lambda>rv s. safe_ioport_insert' ac NullCap s\<rbrace>"
supply fun_upd_apply[simp del]
apply (clarsimp simp: safe_ioport_insert'_def issued_ioports'_def setIOPortMask_def)
apply wpsimp
by (clarsimp simp: cte_wp_at_ctes_of foldl_map foldl_fun_upd_value)
lemma setIOPortMask_cte_cap_to'[wp]:
"\<lbrace>ex_cte_cap_to' p\<rbrace> setIOPortMask f l b \<lbrace>\<lambda>rv. ex_cte_cap_to' p\<rbrace>"
by (wp ex_cte_cap_to'_pres)
lemma arch_performInvocation_invs':
"\<lbrace>invs' and ct_active' and valid_arch_inv' invocation\<rbrace>
Arch.performInvocation invocation
\<lbrace>\<lambda>rv. invs'\<rbrace>"
unfolding X64_H.performInvocation_def
apply (cases invocation, simp_all add: performX64MMUInvocation_def valid_arch_inv'_def,
(wp|wpc|simp)+)
apply (clarsimp simp: performX64PortInvocation_def)
apply (wpsimp simp: portIn_def portOut_def)
apply (clarsimp simp: invs'_def cur_tcb'_def)
apply (clarsimp simp: performX64PortInvocation_def)
apply (wpsimp wp: cteInsert_simple_invs setIOPortMask_invs' setIOPortMask_safe_ioport_insert')
apply (clarsimp simp: ioport_control_inv_valid'_def valid_cap'_def capAligned_def word_bits_def
is_simple_cap'_def isCap_simps)
apply (clarsimp simp: cte_wp_at_ctes_of)
apply (rule conjI, clarsimp)
apply (rule conjI, clarsimp simp: safe_parent_for'_def)
apply (case_tac ctea)
apply (clarsimp simp: isCap_simps sameRegionAs_def3)
apply (drule_tac src=p in valid_ioports_issuedD'[OF invs_valid_ioports'])
apply (fastforce simp: cteCaps_of_def)
apply force
by (force simp: cteCaps_of_def ran_def valid_ioports'_simps dest!: invs_valid_ioports')
end
end
|
Require Import FiatFormal.Data.Context.
Require Import FiatFormal.Data.List.
Require Import FiatFormal.Tactics.
Require Import FiatFormal.Norm.
(* Chain of evaluation contexts.
This carries the evaluation contexts needed to reduce
a list of expressions in left-to-right order.
Given a list of expressions:
xs = [x1 x2 x3 x4 x5 x6 x7 x8]
We want to prove that if each one is either a value or can be reduced
to one, then we can evaluate the whole list of expressions to a list
of values.
vs = [v1 v2 v3 v4 v5 v6 v7 v8]
To do this we need a list of evaluation contexts like follows,
where XX is the expression currently being evaluated.
C0: [XX x1 x2 x3 x4 x5 x6 x7]
C1: [v0 XX x2 x3 x4 x5 x6 x7]
C2: [v0 v1 XX x3 x4 x5 x6 x7]
C3: [v0 v1 v2 XX x4 x5 x6 x7]
...
C7: [v0 v1 v2 v3 v4 v5 v6 XX ]
The proof using these contexts proceeds *backwards* over
the number of expressions that are known to be values.
1) If all expressions are values then we're already done.
2) For some context C, if we can reduce (C v) to all values,
and we can reduce x to v, then we can also reduce (C x) to
all values. This reasoning is similar to that used by the
eval_expansion lemma.
3) Producing the base-level context (C0) is trivial. We don't
need to show that expressions to the left are already values,
because there aren't any.
*)
Section Chain.
Variable exp : Type.
Variable Val : exp -> Prop.
Variable Steps : exp -> exp -> Prop.
Variable Steps_val : forall x1 x2, Val x1 -> Steps x1 x2 -> x2 = x1.
Inductive CHAIN : list exp -> list exp -> Prop :=
| EcDone
: forall vs
, Forall Val vs
-> CHAIN vs vs
| EcCons
: forall x v vs C
, exps_ctx Val C
-> Steps x v -> Val v
-> CHAIN (C v) vs
-> CHAIN (C x) vs.
Hint Constructors CHAIN.
(* Add an already-evaluated expression to a chain.
This effectively indicates that the expression was pre-evaluated
and doesn't need to be evaluated again during the reduction. *)
Lemma chain_extend
: forall v xs ys
, Val v
-> CHAIN xs ys
-> CHAIN (v :: xs) (v :: ys).
Proof.
intros v xs ys HW HC.
induction HC; auto.
lets D1: (@XscCons exp) Val v H. auto.
lets D2: EcCons D1 H0 H1 IHHC. auto.
Qed.
(* Make the chain of evaluation contexts needed to evaluate
some expressions to values *)
Lemma make_chain
: forall xs vs
, Forall2 Steps xs vs
-> Forall Val vs
-> CHAIN xs vs.
Proof.
intros. gen vs.
induction xs as [xs | x]; intros.
Case "xs = nil".
inverts H. auto.
Case "xs = xs' :> v".
destruct vs as [vs | v].
SCase "vs = nil".
nope.
SCase "vs = vs' :> v".
inverts H.
inverts H0.
have (CHAIN xs vs). clear IHxs.
(* Build the property of STEPS we want to ass to exps_ctx2_run *)
assert (Forall2
(fun x v => Steps x v /\ Val v /\ (Val x -> v = x)) xs vs) as HS.
eapply (@Forall2_impl_in exp exp Steps); auto.
intros. nforall. rip.
(* Either all the xs are already whnfX,
or there is a context where one can step *)
lets HR: (@exps_ctx2_run exp) Val HS. clear HS.
inverts HR.
SSCase "xs all whnfX".
assert (Forall2 eq xs vs).
eapply (@Forall2_impl_in exp exp Steps (@eq exp) xs vs).
nforall. intros. symmetry. eauto. auto.
assert (xs = vs).
apply Forall2_eq. auto. subst.
lets C1: (@XscHead exp) vs.
lets D1: EcCons x v (v :: vs) C1 H4.
eauto.
SSCase "something steps".
(* unpack the evaluation contexts *)
dest C1. dest C2.
destruct H0 as [x'].
destruct H0 as [v'].
rip.
lets HC1: exps_ctx2_left H0.
lets HC2: exps_ctx2_right H0.
assert (Val v').
eapply exps_ctx_Forall; eauto.
lets E1: (@XscCons exp) C1 H2 HC1.
lets E2: (@XscCons exp) C2 H2 HC2.
lets E3: (@XscHead exp) (C1 x').
lets F1: EcCons x v (v :: C2 v') E3.
apply F1; auto. clear F1.
eapply chain_extend; auto.
Qed.
End Chain.
|
Require Import Crypto.Arithmetic.PrimeFieldTheorems.
Require Import Crypto.Specific.solinas32_2e152m17_7limbs.Synthesis.
(* TODO : change this to field once field isomorphism happens *)
Definition carry :
{ carry : feBW_loose -> feBW_tight
| forall a, phiBW_tight (carry a) = (phiBW_loose a) }.
Proof.
Set Ltac Profiling.
Time synthesize_carry ().
Show Ltac Profile.
Time Defined.
Print Assumptions carry.
|
The endpoint of the arc of a circle with radius $r$ and center $z$ from angle $s$ to angle $t$ is $z + r\exp(it)$. |
State Before: R : Type u
S : Type v
a b c d : R
n m : โ
instโ : Semiring R
p q r : R[X]
h : degree p = 1
โข โC (coeff p 1) * X + โC (coeff p 0) = โC (leadingCoeff p) * X + โC (coeff p 0) State After: R : Type u
S : Type v
a b c d : R
n m : โ
instโ : Semiring R
p q r : R[X]
h : degree p = 1
โข โC (coeff p 1) * X + โC (coeff p 0) = โC (coeff p One.one) * X + โC (coeff p 0) Tactic: simp only [leadingCoeff, natDegree_eq_of_degree_eq_some h] State Before: R : Type u
S : Type v
a b c d : R
n m : โ
instโ : Semiring R
p q r : R[X]
h : degree p = 1
โข โC (coeff p 1) * X + โC (coeff p 0) = โC (coeff p One.one) * X + โC (coeff p 0) State After: no goals Tactic: rfl |
function val=calcRMSE(xTrue,xEst,is3D)
%%CALCRMSE Compute the scalar root-mean-squared error (RMSE) of estimates
% compared to true values.
%
%INPUTS: xTrue The truth data. This is either an xDimXNumSamples matrix or
% an xDimXNXnumSamples matrix. The latter formulation is
% useful when the MSE over multiple Monte Carlo runs of an
% entire length-N track is desired. In the first formulation,
% we take N=1. Alternatively, if the same true value is used
% for all numSamples, then xTrue can just be an xDimXN matrix.
% Alternatively, if xTrue is the same for all numSamples and
% all N, then just an xDimX1 matrix can be passed. N and
% numSamples are inferred from xEst.
% xEst An xDimXnumSamples set of estimates or an xDimXNXnumSamples
% set of estimates (if values at N times are desired).
% is3D An optional indicating that xEst is 3D. This is only used if
% xEst is a matrix. In such an instance, there is an ambiguity
% whether xEst is truly 2D, so N=1, or whether numSamples=1
% and xEst is 3D with the third dimension being 1. If this
% parameter is omitted or an empty matrix is passed, it is
% assumed that N=1.
%
%OUTPUTS: val A 1XN vector of scalar RMSE values.
%
%The RMSE is discussed with relation to alternative error measured in [1]
%and is given in Equation 1 of [1].
%
%EXAMPLE:
%Here, we show that the RMSE of samples of a Gaussian random variable is
%related to the square root of the trace of a covariance matrix.
% R=[28, 4, 10;
% 4, 22, 16;
% 10, 16, 16];%The covariance matrix.
% xTrue=[10;-20;30];
% numRuns=100000;
% xEst=GaussianD.rand(numRuns,xTrue,R);
% rootTrace=sqrt(trace(R))
% val=calcRMSE(xTrue,xEst)
%One will see that the root-trace and the sample RMSE are close.
%
%REFERENCES:
%[1] X. R. Li and Z. Zhao, "Measures of performance for evaluation of
% estimators and filters," in Proceedings of SPIE: Conference on Signal
% and Data processing of Small Targets, vol. 4473, San Diego, CA, 29
% Jul. 2001, pp. 530-541.
%
%February 2017 David F. Crouse, Naval Research Laboratory, Washington D.C.
%(UNCLASSIFIED) DISTRIBUTION STATEMENT A. Approved for public release.
if(nargin<3||isempty(is3D))
is3D=false;
end
xDim=size(xEst,1);
if(ismatrix(xEst)&&is3D==false)
N=1;
numSamples=size(xEst,2);
xEst=reshape(xEst,[xDim,1,numSamples]);
if(size(xTrue,2)==1)
%If the true values are the same for all samples.
xTrue=repmat(xTrue,[1,1,numSamples]);
else
xTrue=reshape(xTrue,[xDim,1,numSamples]);
end
else
N=size(xEst,2);
numSamples=size(xEst,3);
if(ismatrix(xTrue))
if(size(xTrue,2)==1)
%If the true values are the same for all samples and for all N.
xTrue=repmat(xTrue,[1,N,numSamples]);
else
%If the true values are the same for all samples.
xTrue=repmat(xTrue,[1,1,numSamples]);
end
end
end
val=zeros(1,N);
for k=1:N
val(k)=sqrt(sum(sum((xEst(:,k,:)-xTrue(:,k,:)).^2,3)/numSamples));
end
end
%LICENSE:
%
%The source code is in the public domain and not licensed or under
%copyright. The information and software may be used freely by the public.
%As required by 17 U.S.C. 403, third parties producing copyrighted works
%consisting predominantly of the material produced by U.S. government
%agencies must provide notice with such work(s) identifying the U.S.
%Government material incorporated and stating that such material is not
%subject to copyright protection.
%
%Derived works shall not identify themselves in a manner that implies an
%endorsement by or an affiliation with the Naval Research Laboratory.
%
%RECIPIENT BEARS ALL RISK RELATING TO QUALITY AND PERFORMANCE OF THE
%SOFTWARE AND ANY RELATED MATERIALS, AND AGREES TO INDEMNIFY THE NAVAL
%RESEARCH LABORATORY FOR ALL THIRD-PARTY CLAIMS RESULTING FROM THE ACTIONS
%OF RECIPIENT IN THE USE OF THE SOFTWARE.
|
Require Export SfLib.
Require Export Smtrg.
Module SmtrgProp.
Import Smtrg.Smtrg.
Theorem unique_typing : forall Gamma t T1 T2,
Gamma |- t \in T1 ->
Gamma |- t \in T2 ->
T1 = T2 .
Proof with auto.
intros Gamma t Ta Tb HTa HTb.
generalize dependent Tb.
has_type_cases (induction HTa) Case ; intros Tb HTb; inversion HTb.
Case "T_Var". rewrite H in H2. injection H2. auto.
Case "T_Abs".
assert (T12 = T1).
apply IHHTa. assumption.
rewrite H5. reflexivity.
Case "T_App".
assert (TArrow T11 T12 = TArrow T0 Tb). apply IHHTa1. assumption.
injection H5. auto.
Case "T_Unit". reflexivity.
Case "T_Pair".
f_equal.
apply IHHTa1...
apply IHHTa2...
Case "T_Fst".
assert (TProd T1 T2 = TProd Tb T3).
apply IHHTa...
injection H3...
Case "T_Snd".
assert (TProd T1 T2 = TProd T0 Tb).
apply IHHTa...
injection H3...
Case "T_Fix".
assert (TArrow T T = TArrow Tb Tb)...
injection H3...
Case "T_Formula". reflexivity.
Case "T_If". apply IHHTa1 ; assumption.
Qed.
Theorem progress : forall t T,
empty |- t \in T ->
value t \/ exists t', t ==> t'.
Proof with eauto.
intros t T Ht.
remember (@empty ty) as Gamma.
has_type_cases (induction Ht) Case; subst Gamma.
Ltac isvalue v_xx := left ; apply v_xx.
Case "T_Var".
(* contradiction: variables aren't typed in empty context *)
unfold empty in H ; discriminate H.
Case "T_Abs". isvalue v_abs.
Case "T_App".
(* t1 t2.
If t1 steps, then we make progress with ST_App,
otherwise, t1 must be an abstraction, so we make progress with ST_AppAbs.
*)
right. destruct IHHt1...
SCase "t1 is a value".
v_cases (inversion H) SSCase; subst; try solve by inversion.
SSCase "v_abs".
exists ([x0:=t2]t).
apply ST_AppAbs ; assumption.
SCase "t1 steps".
inversion H as [t1' Hstp].
exists (tapp t1' t2).
apply ST_App ; assumption.
Case "T_Unit". isvalue v_unit.
Case "T_Pair". isvalue v_pair.
Case "T_Fst".
(* fst t
If t steps, then we make progress with ST_Fst,
otherwise, t must be a pair, so we make progress with ST_FstPair.
*)
right. destruct IHHt...
SCase "t is a value".
v_cases (inversion H) SSCase; subst; try solve by inversion.
SSCase "v_pair".
exists t1.
apply ST_FstPair ; assumption.
SCase "t steps".
inversion H as [t' Hstp].
exists (tfst t').
apply ST_Fst ; assumption.
Case "T_Snd".
(* snd t
If t steps, then we make progress with ST_Snd,
otherwise, t must be a pair, so we make progress with ST_SndPair.
*)
right. destruct IHHt...
SCase "t is a value".
v_cases (inversion H) SSCase; subst; try solve by inversion.
SSCase "v_pair".
exists t2.
apply ST_SndPair ; assumption.
SCase "t steps".
inversion H as [t' Hstp].
exists (tsnd t').
apply ST_Snd ; assumption.
Case "T_Fix".
(* makes progress by ST_Fix *)
right.
exists (tapp t (tfix t)).
apply ST_Fix ; assumption.
Case "T_Formula". isvalue v_formula.
Case "T_If".
(* makes progress by one of
ST_IfAbs, ST_IfUnit, ST_IfPair, ST_IfFormula,
ST_IfLeft, or ST_IfRight, depending on
t1 and t2 as being values or stepping,
which we have by induction
*)
right.
destruct (IHHt1 (eq_refl empty)) as [Hvalue1 | Hsteps1];
destruct (IHHt2 (eq_refl empty)) as [Hvalue2 | Hsteps2].
SCase "t1 and t2 values".
v_cases (destruct Hvalue1) SSCase ;
(* Both values must have the same type *)
destruct Hvalue2 ; inversion Ht1 ; inversion Ht2 ; subst ; try discriminate.
SSCase "v_abs".
(* We know the argument types match for both lambdas *)
injection H7; intros ; subst.
(* So apply ST_IfAbs *)
exists (tabs x T0 (tif f (tapp (tabs x0 T0 t) (tvar x)) (tapp (tabs x1 T0 t0) (tvar x)))).
apply ST_IfAbs.
SSCase "v_unit". exists tunit ; apply ST_IfUnit.
SSCase "v_pair". exists (tpair (tif f t1 t2) (tif f t0 t3)) ; apply ST_IfPair.
SSCase "v_formula". exists (tformula (Sat.fite f f0 f1)) ; apply ST_IfFormula.
SCase "t1 value, t2 steps".
destruct Hsteps2 as [t2'].
exists (tif f t1 t2') ; apply ST_IfRight ; assumption.
SCase "t1 steps, t2 value".
destruct Hsteps1 as [t1'].
exists (tif f t1' t2) ; apply ST_IfLeft ; assumption.
SCase "t1 steps, t2 steps".
destruct Hsteps1 as [t1'].
exists (tif f t1' t2) ; apply ST_IfLeft ; assumption.
Qed.
Inductive appears_free_in : id -> tm -> Prop :=
| afi_var : forall x,
appears_free_in x (tvar x)
| afi_app1 : forall x t1 t2,
appears_free_in x t1 -> appears_free_in x (tapp t1 t2)
| afi_app2 : forall x t1 t2,
appears_free_in x t2 -> appears_free_in x (tapp t1 t2)
| afi_abs : forall x y T11 t12,
y <> x ->
appears_free_in x t12 ->
appears_free_in x (tabs y T11 t12)
| afi_pair1 : forall x t1 t2,
appears_free_in x t1 ->
appears_free_in x (tpair t1 t2)
| afi_pair2 : forall x t1 t2,
appears_free_in x t2 ->
appears_free_in x (tpair t1 t2)
| afi_fst : forall x t,
appears_free_in x t ->
appears_free_in x (tfst t)
| afi_snd : forall x t,
appears_free_in x t ->
appears_free_in x (tsnd t)
| afi_fix : forall x t,
appears_free_in x t ->
appears_free_in x (tfix t)
| afi_if1 : forall x f t1 t2,
appears_free_in x t1 ->
appears_free_in x (tif f t1 t2)
| afi_if2 : forall x f t1 t2,
appears_free_in x t2 ->
appears_free_in x (tif f t1 t2)
.
Tactic Notation "afi_cases" tactic(first) ident(c) :=
first;
[ Case_aux c "afi_var"
| Case_aux c "afi_app1" | Case_aux c "afi_app2"
| Case_aux c "afi_abs"
| Case_aux c "afi_pair1" | Case_aux c "afi_pair2"
| Case_aux c "afi_fst" | Case_aux c "afi_snd"
| Case_aux c "afi_fix"
| Case_aux c "afi_if1" | Case_aux c "afi_if2"
].
Hint Constructors appears_free_in.
Definition closed (t:tm) :=
forall x, ~ appears_free_in x t.
Lemma free_in_context : forall x t T Gamma,
appears_free_in x t ->
Gamma |- t \in T ->
exists T', Gamma x = Some T'.
Proof.
intros x t T Gamma H H0. generalize dependent Gamma.
generalize dependent T.
afi_cases (induction H) Case;
intros; try solve [inversion H0; eauto].
Case "afi_abs".
inversion H1; subst.
apply IHappears_free_in in H7.
rewrite extend_neq in H7; assumption.
Qed.
Lemma closed_typing : forall t T,
empty |- t \in T ->
closed t.
Proof.
intros t T HT.
unfold closed.
intro x.
unfold not.
intro Hafi.
absurd (exists (T':ty), empty x = Some T').
unfold not. intro Hsilly. inversion Hsilly. inversion H.
apply free_in_context with t T; assumption.
Qed.
Lemma context_invariance : forall Gamma Gamma' t T,
Gamma |- t \in T ->
(forall x, appears_free_in x t -> Gamma x = Gamma' x) ->
Gamma' |- t \in T.
Proof with eauto.
intros.
generalize dependent Gamma'.
has_type_cases (induction H) Case; intros; auto.
Case "T_Var".
apply T_Var. rewrite <- H0...
Case "T_Abs".
apply T_Abs.
apply IHhas_type. intros x1 Hafi.
(* the only tricky step... the [Gamma'] we use to
instantiate is [extend Gamma x T11] *)
unfold extend. destruct (eq_id_dec x0 x1)...
Case "T_App". apply T_App with T11...
Case "T_Fst". apply T_Fst with T2...
Case "T_Snd". apply T_Snd with T1...
Qed.
Lemma substitution_preserves_typing : forall Gamma x U t v T,
extend Gamma x U |- t \in T ->
empty |- v \in U ->
Gamma |- [x:=v]t \in T.
Proof with eauto.
intros Gamma x U t v T Ht Ht'.
generalize dependent Gamma. generalize dependent T.
t_cases (induction t) Case; intros T Gamma H;
(* in each case, we'll want to get at the derivation of H *)
inversion H; subst; simpl...
Case "tvar".
rename i into y. destruct (eq_id_dec x y).
SCase "x=y".
subst.
rewrite extend_eq in H2.
inversion H2; subst. clear H2.
eapply context_invariance... intros x Hcontra.
destruct (free_in_context _ _ T empty Hcontra) as [T' HT']...
inversion HT'.
SCase "x<>y".
apply T_Var. rewrite extend_neq in H2...
Case "tabs".
rename i into y. apply T_Abs.
destruct (eq_id_dec x y).
SCase "x=y".
eapply context_invariance...
subst.
intros x Hafi. unfold extend.
destruct (eq_id_dec y x)...
SCase "x<>y".
apply IHt. eapply context_invariance...
intros z Hafi. unfold extend.
destruct (eq_id_dec y z)...
subst. rewrite neq_id...
Qed.
Theorem preservation : forall t t' T,
empty |- t \in T ->
t ==> t' ->
empty |- t' \in T.
Proof with eauto.
remember (@empty ty) as Gamma.
intros t t' T HT. generalize dependent t'.
Ltac doesnt_step := inversion HE.
has_type_cases (induction HT) Case;
intros t' HE; subst.
Case "T_Var". doesnt_step.
Case "T_Abs". doesnt_step.
Case "T_App".
(* ST_AppAbs: app (abs x t1) t2 steps to t1[x=t2]
and substitution preserves typing.
ST_App: t1 t2 steps to app t1' t2
By induction, t1' has the same type as t1, so t1' t2 has the
same type as t1' t2.
*)
inversion HE; subst.
SCase "ST_AppAbs".
apply substitution_preserves_typing with T11...
inversion HT1...
SCase "ST_App".
apply T_App with T11 ;
[ apply IHHT1 ; [ reflexivity | assumption ]
| assumption].
Case "T_Unit". doesnt_step.
Case "T_Pair". doesnt_step.
Case "T_Fst".
(* ST_FstPair: fst (t1, t2) steps to t1
ST_Fst: fst t steps to fst t'
t goes to t', which has the same type as t by induction.
Then fst t' has the same type as fst t.
*)
inversion HE; subst.
SCase "ST_FstPair".
inversion HT ; subst. assumption.
SCase "ST_Fst".
apply T_Fst with T2.
apply IHHT; [reflexivity | assumption].
Case "T_Snd".
(* ST_SndPair: snd (t1, t2) steps to t2
ST_Snd: snd t steps to snd t'
t goes to t', which has the same type as t by induction.
Then snd t' has the same type as snd t.
*)
inversion HE; subst.
SCase "ST_SndPair".
inversion HT ; subst. assumption.
SCase "ST_Snd".
apply T_Snd with T1;
apply IHHT; [ reflexivity | assumption].
Case "T_Fix".
(* tfix t must step to (tapp t (tfix t)),
and because (tfix t) has type T, and t has type T -> T,
(tapp t (tfix t)) also has type T.
*)
inversion HE. subst.
apply T_App with T; [ assumption | apply T_Fix ; assumption].
Case "T_Formula". doesnt_step.
Case "T_If".
inversion HE ; subst.
SCase "IfAbs".
inversion HT1 ; subst ; inversion HT2 ; subst.
apply T_Abs.
apply T_If.
apply T_App with T0.
apply context_invariance with empty.
apply T_Abs ; assumption.
intros x0 Hfree.
absurd (appears_free_in x0 (tabs x1 T0 t0)).
assert (closed (tabs x1 T0 t0)).
apply closed_typing with (TArrow T0 T12) ; assumption.
unfold closed in H.
specialize H with x0. apply H.
apply Hfree.
apply T_Var. auto.
apply T_App with T0.
apply context_invariance with empty.
assumption.
intros x0 Hfree.
absurd (appears_free_in x0 (tabs x2 T0 t3)).
assert (closed (tabs x2 T0 t3)).
apply closed_typing with (TArrow T0 T12) ; assumption.
unfold closed in H.
specialize H with x0. apply H.
apply Hfree.
apply T_Var. auto.
SCase "IfUnit". inversion HT2. apply T_Unit.
SCase "IfPair".
inversion HT1 ; subst ; inversion HT2; subst.
apply T_Pair; apply T_If ; assumption.
SCase "IfFormula". inversion HT1. apply T_Formula.
SCase "IfLeft".
apply T_If.
apply (IHHT1 (eq_refl empty)) ; assumption.
assumption.
SCase "IfRight".
apply T_If.
assumption.
apply (IHHT2 (eq_refl empty)) ; assumption.
Qed.
Definition stuck (t:tm) : Prop :=
(normal_form step) t /\ ~ value t.
Corollary soundness : forall t t' T,
empty |- t \in T ->
t ==>* t' ->
~(stuck t').
Proof.
intros t t' T Hhas_type Hmulti. unfold stuck.
intros [Hnf Hnot_val]. unfold normal_form in Hnf.
induction Hmulti. apply Hnot_val.
destruct (progress x0 T).
apply Hhas_type. apply H.
contradiction.
apply IHHmulti.
apply (preservation x0 y0). apply Hhas_type. apply H.
apply Hnf. apply Hnot_val.
Qed.
Theorem step_deterministic : forall t t1 t2 T,
empty |- t \in T ->
t ==> t1 ->
t ==> t2 ->
t1 = t2
.
Proof.
intro t.
t_cases (induction t) Case;
(* terms which are values are solved by inversion on Hstep1 *)
intros ty1 ty2 T HT Hstep1 Hstep2 ; inversion Hstep1 ; subst ; inversion Hstep2 ; subst.
Case "tapp".
SCase "tf = tabs x0 T0 t0".
SSCase "tf = tabs x0 T0 t0". reflexivity.
SSCase "tf steps". inversion H2. (* tabs can't step *)
SCase "tf steps".
SSCase "tf = tabs". inversion H2. (* tabs can't step *)
SSCase "tf steps". (* by induction, tf steps to the same place *)
f_equal.
inversion HT.
apply IHt1 with (TArrow T11 T) ; assumption.
Case "tfst".
SCase "fst pair".
SSCase "fst pair". reflexivity.
SSCase "fst". inversion H0. (* pair can't step *)
SCase "fst".
SSCase "fst pair". inversion H0.
SSCase "fst".
f_equal.
inversion HT.
apply IHt with (TProd T T2) ; assumption.
Case "tsnd".
SCase "snd pair".
SSCase "snd pair". reflexivity.
SSCase "snd". inversion H0.
SCase "snd".
SSCase "snd pair". inversion H0.
SSCase "snd".
f_equal.
inversion HT.
apply IHt with (TProd T1 T) ; assumption.
Case "tfix". reflexivity.
Case "tif".
SCase "IfAbs".
SSCase "IfAbs". reflexivity.
SSCase "IfLeft". inversion H3.
SSCase "IfRight". inversion H4.
SCase "IfUnit".
SSCase "IfUnit". reflexivity.
SSCase "IfLeft". inversion H3.
SSCase "IfRight". inversion H4.
SCase "IfPair".
SSCase "IfPair". reflexivity.
SSCase "IfLeft". inversion H3.
SSCase "IfRight". inversion H4.
SCase "IfFormula".
SSCase "IfPair". reflexivity.
SSCase "IfLeft". inversion H3.
SSCase "IfRight". inversion H4.
SCase "IfLeft".
SSCase "IfAbs". inversion H3.
SSCase "IfUnit". inversion H3.
SSCase "IfPair". inversion H3.
SSCase "IfFormula". inversion H3.
SSCase "IfLeft".
f_equal.
inversion HT ; subst.
apply IHt1 with T ; assumption.
SSCase "IfRight".
destruct H4 ; inversion H3.
SCase "IfRight".
SSCase "IfAbs". inversion H4.
SSCase "IfUnit". inversion H4.
SSCase "IfPair". inversion H4.
SSCase "IfFormula". inversion H4.
SSCase "IfLeft". destruct H3 ; inversion H5.
SSCase "IfRight".
f_equal.
inversion HT ; subst.
apply IHt2 with T ; assumption.
Qed.
End SmtrgProp.
|
# Copyright (c) 2018-2021, Carnegie Mellon University
# See LICENSE for details
#
# Frontier
#
#F _DFUnion(<list_frontiers>) merges frontiers.
#F Multiple frontiers result from multiple outgoing paths.
#F
#F For example t3 = add(t1,t2) will have the frontier
#F [ [t3], _DFUnion([DefFrontier(t1), DefFrontier(t2)]) ]
#F
#F The parameter is a list of frontiers, where each frontier
#F is a list of sets of variables.
#F
_DFUnion := function(list_frontiers)
local depths, maxdepth, curset, res, numfrontiers, d, j;
numfrontiers := Length(list_frontiers);
depths := List(list_frontiers, Length);
maxdepth := Maximum0(depths);
res := [];
for d in [1..maxdepth] do
curset := Set([]);
for j in [1..numfrontiers] do
if d <= depths[j] then
curset := UnionSet(curset, list_frontiers[j][d]);
else
# if frontier is shorter than the longest, we carry over its last terms.
# For instance, the frontier of t5 in { t3=add(t1,t2); t5=add(t3, t4) }
# is [[t5], [t3, t4], [t1, t2, t3]].
curset := UnionSet(curset, list_frontiers[j][depths[j]]);
fi;
od;
Add(res, curset);
od;
return res;
end;
#F DefFrontier(<cmd>, <depth>) - computes the so-called def-frontier for a command.
#F The function can be defined inductively as:
#F DF(t, d) == DF(t.def, d), where t.def is the 'assign' defining t
#F DF(assign(t, ... ), 0) == [ [t] ]
#F DF(assign(t, f(a1,a2,...)), 1) == [ [t], [a1,a2,...] ]
#F DF(assign(t, f(a1,a2,...)), n) == [ [t], [a1,a2,...] ] concat
#F _DFUnion(DF(a1,n-1), DF(a2,n-1), ...)
#F
#F In plain words it returns the minimal set of variables that fully define a given
#F location (or cmd.loc).
#F
#F For instance, the frontier of t5 in { t3=add(t1,t2); t5=add(t3, t4) }
#F is [[t5], [t3, t4], [t1, t2, t4]].
#F
#F Note (!!!): MarkDefUse(code) must be called on the corresponding code object,
#F so that locations can be connected with their definitions
#F
#F If <depth> is negative, then the maximally deep frontier is computed.
#F
DefFrontier := (cmd, depth) -> Cond(
IsLoc(cmd), let(def := DefLoc(cmd), When(def=false, [[cmd]], DefFrontier(def, depth))),
depth = 0, [Set([cmd.loc])],
# IsLoc(cmd.exp), [Set([cmd.loc])],
let(args := ArgsExp(cmd.exp),
Concatenation([Set([cmd.loc])],
_DFUnion(
List(args, a -> DefFrontier(a, depth-1))))));
DefCollect := (cmd, depth) -> Cond(
depth = 0, cmd.loc,
IsLoc(cmd.exp), cmd.loc,
depth = 1, cmd.exp,
SubstLeaves(Copy(cmd.exp), var,
v -> let(def := DefLoc(v), When(def=false, v, DefCollect(def, depth-1)))));
AddFrontier := x -> Cond(
IsLoc(x), let(def := DefLoc(x),
Cond(def = false, x, AddFrontier(def.exp))),
ObjId(x) in [add,sub],
ApplyFunc(ObjId(x), List(x.args, AddFrontier)),
x);
AddFrontierD := (x,d) -> Cond(
d = 0, x,
IsLoc(x), let(def := DefLoc(x),
Cond(def = false, x, AddFrontierD(def.exp, d))),
ObjId(x) in [add,sub],
ApplyFunc(ObjId(x), List(x.args, a -> AddFrontierD(a, d-1))),
x);
AddFrontierDM := (x,d) -> Cond(
d = 0,
[x,false],
IsLoc(x), let(def := DefLoc(x),
Cond(def = false, [x,false], AddFrontierDM(def.exp, d))),
d = 1,
[x,ObjId(x)=mul],
ObjId(x) in [add,sub],
let(args := List(x.args, a -> AddFrontierDM(a, d-1)),
Cond(ForAll(args, a->a[2]=false), [x,false],
[ApplyFunc(ObjId(x), List([1..Length(args)], i->Cond(args[i][2], args[i][1], x.args[i]))), true])),
[x,ObjId(x)=mul]);
# NOTE: memoization
#
Class(madd, add, rec(
redundancy := self >> self.constantRedundancy(), # or self.termRedundancy(),
terms := self >> Map(self.args, a->a.args[2]),
constants := self >> Map(self.args, a->a.args[1].v),
nonTrivialConstants := self >> Filtered(Map(self.args, a->AbsFloat(a.args[1].v)), c->c<>1),
constantRedundancy := self >> let(
constants := self.nonTrivialConstants(),
constants <> [] and Length(constants)<>Length(Set(constants))),
termRedundancy := self >> let(
locs := Filtered(self.terms(), IsLoc),
locs <> [] and Length(locs) <> Length(Set(locs))),
factorConstants := meth(self)
local constants, res, t, c, a, i, fac_indices, l1, l2;
res := []; constants := []; l1:=[]; fac_indices := Set([]);
for a in self.args do
a := Copy(a);
c := a.args[1].v; t := a.args[2];
if c in [1,-1] then
Add(constants, c); Add(res, a);
elif c in constants then
i := Position(constants, c);
res[i].args[2] := add(res[i].args[2], t); AddSet(fac_indices,i);
elif -c in constants then
i := Position(constants, -c);
res[i].args[2] := sub(res[i].args[2], t); AddSet(fac_indices,i);
else
Add(constants, c); Add(res, a);
fi;
od;
l1 := res{fac_indices};
l2 := res{Difference([1..Length(res)], fac_indices)};
if l1=[] then return FoldL1(l2,add); fi;
l1 := FoldL1(l1,add);
if l2=[] then return l1; fi;
l2 := FoldL1(l2,add);
return add(l1, l2);
end,
factorTerms := meth(self)
local terms, res, t, c, a, i, fac_indices, l1, l2;
res := []; terms := []; fac_indices := Set([]);
for a in self.args do
a := Copy(a);
c := a.args[1].v; t := a.args[2];
if IsLoc(t) and t in terms then
i := Position(terms, t);
res[i].args[1].v := res[i].args[1].v + c; AddSet(fac_indices,i);
else
Add(terms, t); Add(res, a);
fi;
od;
l1 := res{fac_indices};
l2 := res{Difference([1..Length(res)], fac_indices)};
l1 := Filtered(l1, a->a.args[1].v <> 0);
l2 := Filtered(l2, a->a.args[1].v <> 0);
if l1=[] and l2=[] then return V(0); fi;
if l1=[] then return FoldL1(l2,add); fi;
l1 := FoldL1(l1,add);
if l2=[] then return l1; fi;
l2 := FoldL1(l2,add);
return add(l1, l2);
end
));
# "MultiAdd" is a normal form for linear expressions
# MultiAdd = madd(mul(c1,e1), mul(c2,e2), ...)
#
Class(ToMultiAdd, RuleSet, rec(
__call__ := (self, e) >> TDA(madd(mul(1,e)), self, SpiralDefaults)));
RewriteRules(ToMultiAdd, rec(
EliminateAdd := Rule(add, e->ApplyFunc(madd, List(e.args, a->mul(1,a)))),
EliminateSub := Rule(sub, e->madd(mul(1,e.args[1]), mul(-1, e.args[2]))),
MulMul := Rule([mul, @(1,Value), [mul, @(2,Value), @(3)]],
e -> mul(@(1).val.v * @(2).val.v, @(3).val)),
MaddSinkMul := ARule(madd, [[mul, @(1,Value), @(2,madd)]],
e -> List(@(2).val.args, a->mul(@(1).val*a.args[1], a.args[2]))),
FlattenMaddMadd := ARule(madd, [@(1,madd)], e->@(1).val.args)
));
NODE := x -> RCSE.node(TDouble, x);
REMOVE_NODE := x -> RCSE.remove_node(x);
deepCheck := function(c, d)
local frontier, linexp;
frontier := AddFrontierD(c.exp, d);
linexp := ToMultiAdd(frontier);
if not ObjId(linexp)=madd then return [c.loc,c.loc,"none"]; fi;
if linexp.constantRedundancy() then return [c.loc, linexp, "constant"];
elif linexp.termRedundancy() then return [c.loc, linexp, "term"];
else return [c.loc, c.loc, "none"];
fi;
end;
deepCheckM := function(c, d)
local frontier, linexp;
frontier := AddFrontierDM(c.exp, d)[1];
linexp := ToMultiAdd(frontier);
if not ObjId(linexp)=madd then return [c.loc,c.loc,"none"]; fi;
if linexp.constantRedundancy() then return [c.loc, linexp, "constant"];
elif linexp.termRedundancy() then return [c.loc, linexp, "term"];
else return [c.loc, c.loc, "none"];
fi;
end;
_DeepSimplifyCmd := function(c, d, frontier_func)
local node, frontier, linexp;
frontier := frontier_func(c.exp, d);
linexp := ToMultiAdd(frontier);
if not ObjId(linexp)=madd then return c; fi;
if linexp.constantRedundancy() then
linexp := linexp.factorConstants();
if IsValue(linexp) then node := linexp;
else node := ApplyFunc(ObjId(linexp), List(linexp.args, NODE));
fi;
c.exp := node;
elif linexp.termRedundancy() then
linexp := linexp.factorTerms();
if IsValue(linexp) then node := linexp;
else node := ApplyFunc(ObjId(linexp), List(linexp.args, NODE));
fi;
c.exp := node;
fi;
return c;
end;
DeepSimplifyCmd := function(c, d1, d2)
if PatternMatch(c.exp, [mul, @(1,Value), @(2,var,v->DefLoc(v)<>false)], empty_cx()) and
PatternMatch(DefLoc(@(2).val).exp, [mul, @(3,Value), @(4)], empty_cx()) then
c.exp := mul(@(1).val.v * @(3).val.v, @(4).val);
fi;
c := _DeepSimplifyCmd(c, d1, AddFrontierD);
c := _DeepSimplifyCmd(c, d2, (x,d)->AddFrontierDM(x,d)[1]);
return c;
end;
Declare(DeepSimplifyChain);
DEPTH1 := 2; # AddFrontierD
DEPTH2 := 3; # AddFrontierDM
DefUnbound := function(e, bound)
local unbound, defs;
unbound := Difference(e.free(), bound);
defs := Filtered(List(unbound, DefLoc), d->d<>false and not Same(d,e));
return Concatenation(Concatenation(List(defs, d -> DefUnbound(d, bound))), defs);
end;
_DeepSimplifyChain := function(code, bound)
local orig, c, res, cmds, args, supp, supplocs;
res := chain();
for c in code.cmds do
if ObjId(c) <> assign then Error("Can't handle 'c' of type '", ObjId(c), "'"); fi;
orig := c.exp;
DeepSimplifyCmd(c,DEPTH1,DEPTH2);
if IsVar(c.loc) then AddSet(bound, c.loc); fi;
supp := chain(DefUnbound(c.exp, bound));
supplocs := List(supp.cmds, cmd->cmd.loc);
DoForAll(supplocs, REMOVE_NODE);
UniteSet(bound, supplocs);
supp := FlattenCode(_DeepSimplifyChain(supp, bound));
Add(supp.cmds, c);
supp := RCSE(supp);
Append(res.cmds, supp.cmds);
od;
return res;
end;
DeepSimplifyChain := code -> _DeepSimplifyChain(code, code.free());
DeepSimplify := function(code)
local c, frontier, linexp, free, undefined;
code := BinSplit(code);
RCSE.flush();
MarkDefUse(code);
return SubstBottomUp(code, chain, DeepSimplifyChain);
end;
_CheckUninitializedChain := function(code, def, uninit)
local def1, def2, c, use;
for c in code.cmds do
if ObjId(c)=assign then
use := VarArgsExp(c.exp);
UniteSet(uninit, Filtered(use, x->not x in def));
AddSet(def, c.loc);
elif ObjId(c)=chain then
[def, uninit] := _CheckUninitializedChain(c, def, uninit);
elif ObjId(c)=IF then
[def1, uninit] := _CheckUninitializedChain(c.then_cmd, ShallowCopy(def), uninit);
[def2, uninit] := _CheckUninitializedChain(c.else_cmd, ShallowCopy(def), uninit);
def := Union(def1, def2);
fi;
od;
return [def, uninit];
end;
CheckUninitializedChain := function(code)
local def, uninit;
Constraint(ObjId(code)=chain);
def := Set([]);
uninit := Set([]);
[def, uninit] := _CheckUninitializedChain(code, def, uninit);
return uninit;
end;
reds := chain -> Filtered(chain.cmds, c -> deepCheck(c,DEPTH1)[3] <>"none" or
deepCheckM(c, DEPTH2)[3] <>"none");
DoDeepSimplify := function(code, num_iterations)
local c, frontier, linexp, free, undefined, i;
Print("Orig cost : ", ArithCostCode(code), "\n");
for i in [1..num_iterations] do
code := DeepSimplify(code); Print("DS : ", ArithCostCode(code), "\n");
code := CopyPropagate(code); Print("CP : ", ArithCostCode(code), "\n");
code := CopyPropagate(code); Print("CP : ", ArithCostCode(code), "\n");
code := CopyPropagate(code); Print("CP : ", ArithCostCode(code), "\n");
code := BinSplit(code);
code := RCSE(code); Print("RCSE : ", ArithCostCode(code), "\n");
od;
return code;
end;
# err := [];
# LIMIT := 1; ww:=DeepSimplify(Copy(w));; cm := CMatrix(pp(ww));
# if inf_norm(cm-dm) > 1e-10 then Print("ERROR\n"); Add(err, [Copy(ww), Copy(w), inf_norm(cm-dm)]); else Print("OK\n"); fi;
# LIMIT := 1; w:=DeepSimplify(Copy(ww));; cm := CMatrix(pp(w));
# if inf_norm(cm-dm) > 1e-10 then Print("ERROR\n"); err := Add(err, [Copy(w), Copy(ww), inf_norm(cm-dm)]); else Print("OK\n"); fi;
|
import data.nat.prime tactic.norm_num
structure Z_p (p : โ) (hp : nat.prime p) :=
(f : โ โ fin p)
namespace Z_p
variables (p : โ) (hp : nat.prime p)
variables {p}
theorem nat.prime.gt_zero : p > 0 := lt_of_lt_of_le (by norm_num : 0 < 2) (nat.prime.ge_two hp)
variables (p)
def pre_add_aux (n : โ) : fin p ร bool := (โจn % p, nat.mod_lt _ (nat.prime.gt_zero hp)โฉ, n โฅ p)
variables (p hp)
def pre_add (f g : โ โ fin p) : โ โ (fin p ร bool)
| 0 := pre_add_aux p hp ((f 0).val + (g 0).val)
| (nat.succ n) := pre_add_aux p hp ((f (n+1)).val + (g (n+1)).val + (if (pre_add n).2 then 1 else 0))
def add (f g : โ โ fin p) : โ โ fin p := prod.fst โ pre_add p hp f g
instance : has_add (Z_p p hp) := โจฮป m n, โจhp, add p hp m.f n.fโฉโฉ
def zero : Z_p p hp := โจhp, ฮป n, โจ0, nat.prime.gt_zero hpโฉโฉ
instance : has_zero (Z_p p hp) := โจzero p hpโฉ
def pre_neg_aux (n : โ) : fin p ร bool := (if h : n = 0 then (โจ0, nat.prime.gt_zero hpโฉ, ff) else (โจp-n, nat.sub_lt_self (nat.prime.gt_zero hp) (nat.pos_of_ne_zero h)โฉ, tt))
def pre_neg (f : โ โ fin p) : โ โ (fin p ร bool)
| 0 := pre_neg_aux p hp (f 0).val
| (nat.succ n) := (if (pre_neg n).2 then (โจp - nat.succ ((f (n+1)).val), nat.sub_lt_self (nat.prime.gt_zero hp) (nat.zero_lt_succ _)โฉ, tt) else pre_neg_aux p hp (f (n+1)).val)
def neg (f : โ โ fin p) : โ โ fin p := prod.fst โ pre_neg p hp f
instance : has_neg (Z_p p hp) := โจฮป n, โจhp, neg p hp n.fโฉโฉ
protected theorem ext : ฮ (m n : Z_p p hp), (โ i, m.f i = n.f i) โ m = n
| โจhpf, fโฉ โจhpg, gโฉ h := begin congr, exact funext h end
theorem zero_add (m : Z_p p hp) : m + 0 = m :=
begin
apply Z_p.ext,
intro i,
unfold has_add.add,
unfold add,
have h : pre_add p hp m.f (0 : Z_p p hp).f i = (m.f i, ff),
unfold has_zero.zero,
unfold zero,
induction i with i ih,
unfold pre_add,
unfold pre_add_aux,
dsimp,
rw nat.add_zero,
cases m.f 0,
congr,
dsimp,
exact nat.mod_eq_of_lt is_lt,
dsimp,
unfold to_bool,
apply if_neg,
exact not_le_of_lt is_lt,
unfold pre_add at *,
unfold pre_add_aux at *,
dsimp at *,
rw ih,
dsimp,
rw if_neg,
rw nat.add_zero,
rw nat.add_zero,
cases m.f (i+1),
congr,
dsimp,
exact nat.mod_eq_of_lt is_lt,
dsimp,
unfold to_bool,
apply if_neg,
exact not_le_of_lt is_lt,
trivial,
dsimp,
unfold function.comp,
rw h
end
end Z_p
|
header {* Normalizing Derivative *}
theory NDerivative
imports
Regular_Exp
begin
subsection {* Normalizing operations *}
text {* associativity, commutativity, idempotence, zero *}
fun nPlus :: "'a::order rexp \<Rightarrow> 'a rexp \<Rightarrow> 'a rexp"
where
"nPlus Zero r = r"
| "nPlus r Zero = r"
| "nPlus (Plus r s) t = nPlus r (nPlus s t)"
| "nPlus r (Plus s t) =
(if r = s then (Plus s t)
else if le_rexp r s then Plus r (Plus s t)
else Plus s (nPlus r t))"
| "nPlus r s =
(if r = s then r
else if le_rexp r s then Plus r s
else Plus s r)"
lemma lang_nPlus[simp]: "lang (nPlus r s) = lang (Plus r s)"
by (induction r s rule: nPlus.induct) auto
text {* associativity, zero, one *}
fun nTimes :: "'a::order rexp \<Rightarrow> 'a rexp \<Rightarrow> 'a rexp"
where
"nTimes Zero _ = Zero"
| "nTimes _ Zero = Zero"
| "nTimes One r = r"
| "nTimes r One = r"
| "nTimes (Times r s) t = Times r (nTimes s t)"
| "nTimes r s = Times r s"
lemma lang_nTimes[simp]: "lang (nTimes r s) = lang (Times r s)"
by (induction r s rule: nTimes.induct) (auto simp: conc_assoc)
primrec norm :: "'a::order rexp \<Rightarrow> 'a rexp"
where
"norm Zero = Zero"
| "norm One = One"
| "norm (Atom a) = Atom a"
| "norm (Plus r s) = nPlus (norm r) (norm s)"
| "norm (Times r s) = nTimes (norm r) (norm s)"
| "norm (Star r) = Star (norm r)"
lemma lang_norm[simp]: "lang (norm r) = lang r"
by (induct r) auto
primrec nderiv :: "'a::order \<Rightarrow> 'a rexp \<Rightarrow> 'a rexp"
where
"nderiv _ Zero = Zero"
| "nderiv _ One = Zero"
| "nderiv a (Atom b) = (if a = b then One else Zero)"
| "nderiv a (Plus r s) = nPlus (nderiv a r) (nderiv a s)"
| "nderiv a (Times r s) =
(let r's = nTimes (nderiv a r) s
in if nullable r then nPlus r's (nderiv a s) else r's)"
| "nderiv a (Star r) = nTimes (nderiv a r) (Star r)"
lemma lang_nderiv: "lang (nderiv a r) = Deriv a (lang r)"
by (induction r) (auto simp: Let_def nullable_iff)
lemma deriv_no_occurrence:
"x \<notin> atoms r \<Longrightarrow> nderiv x r = Zero"
by (induction r) auto
lemma atoms_nPlus[simp]: "atoms (nPlus r s) = atoms r \<union> atoms s"
by (induction r s rule: nPlus.induct) auto
lemma atoms_nTimes: "atoms (nTimes r s) \<subseteq> atoms r \<union> atoms s"
by (induction r s rule: nTimes.induct) auto
lemma atoms_norm: "atoms (norm r) \<subseteq> atoms r"
by (induction r) (auto dest!:subsetD[OF atoms_nTimes])
lemma atoms_nderiv: "atoms (nderiv a r) \<subseteq> atoms r"
by (induction r) (auto simp: Let_def dest!:subsetD[OF atoms_nTimes])
end
|
{-|
Module : MachineLearning.Model.Regression
Description : TIR expression data structures
Copyright : (c) Fabricio Olivetti de Franca, 2022
License : GPL-3
Maintainer : [email protected]
Stability : experimental
Portability : POSIX
Fitting functions for the coefficients.
-}
module MachineLearning.Model.Regression
( fitTask
, predictTask
, evalPenalty
, applyMeasures
, nonlinearFit
, tirToMatrix
) where
import Data.Bifunctor
import Data.SRTree (evalFun, Function(..), OptIntPow(..), evalFun, inverseFunc)
import MachineLearning.TIR (TIR(..), Individual(..), Dataset, Constraint, assembleTree, replaceConsts)
import qualified Data.Vector as V
import qualified Data.Vector.Storable as VS
import Data.Vector ((!))
import Data.List (nub)
import Data.Vector.Storable (Vector, splitAt)
import Numeric.LinearAlgebra ((<\>), Matrix)
import Numeric.GSL.Fitting (nlFitting, FittingMethod(..))
import Prelude hiding (splitAt)
import qualified Numeric.LinearAlgebra as LA
import MachineLearning.TIR (Individual(..))
import MachineLearning.Utils.Config (Task(..), Penalty(..))
import MachineLearning.Model.Measure (Measure(..))
-- * IT specific stuff
createQ :: Vector Double -> LA.Matrix Double -> LA.Matrix Double
createQ ys = LA.fromColumns . map (*ys) . LA.toColumns
{-# INLINE createQ #-}
avg :: Vector Double -> Vector Double
avg ys = LA.fromList [LA.sumElements ys / fromIntegral (LA.size ys)]
{-# INLINE avg #-}
-- | transform a data matrix using a TIR expression. This function returns
-- a tuple with the transformed data of the numerator and denominator, respectivelly.
-- Each column of the transformed data represents one term of the TIR expression.
tirToMatrix :: Dataset Double -> TIR -> (LA.Matrix Double, LA.Matrix Double)
tirToMatrix xss (TIR _ p q) = bimap (LA.fromColumns . (bias:)) LA.fromColumns (p', q')
where
bias = V.head xss
xss' = V.tail xss
sigma2mtx = map (\(_, g, ps) -> evalFun g $ evalPi ps)
evalPi = foldr (\(ix, k) acc -> acc * (xss' ! ix ^^ k)) 1
p' = sigma2mtx p
q' = sigma2mtx q
{-# INLINE tirToMatrix #-}
-- | Fits a linear model using l2-penalty
ridge :: Matrix Double -> Vector Double -> Matrix Double
ridge a b = oA <\> oB
where
mu = 0.01
a' = LA.tr a
b' = LA.tr $ LA.asColumn b
oA = (a' <> LA.tr a') + (mu * LA.ident (LA.rows a'))
oB = a' <> LA.tr b'
{-# INLINE ridge #-}
-- | Predicts a linear model
predict :: Matrix Double -> Vector Double -> Vector Double
predict xs w | LA.cols xs == LA.size w = xs LA.#> w
| otherwise = error $ "predict: " ++ show (LA.size xs) ++ show (LA.size w)
{-# INLINE predict #-}
-- | Solve the OLS *zss*w = ys*
solveOLS :: Matrix Double -> Vector Double -> Vector Double
solveOLS zss ys = zss <\> ys
{-# INLINE solveOLS #-}
-- | Applies OLS and returns a Solution
-- if the expression is invalid, it returns Infinity as a fitness
--regress :: Matrix Double -> Vector Double -> [Vector Double]
regress :: TIR -> Dataset Double -> Vector Double -> [Vector Double]
regress tir xss ys = [ws]
where
(zssP, zssQ) = tirToMatrix xss tir
ys' = evalFun (inverseFunc $ _funY tir) ys
zssQy = createQ ys' zssQ
zss = if LA.cols zssQ >= 1
then zssP LA.||| negate zssQy
else zssP
ws = if LA.cols zss == 1
then avg ys'
else solveOLS zss ys'
-- regress zss ys = [solveOLS zss ys]
{-# INLINE regress #-}
-- | Applies conjugate gradient for binary classification
--classify :: Matrix Double -> Vector Double -> [Vector Double]
classify :: Int -> TIR -> Dataset Double -> Vector Double -> [Vector Double]
classify niter tir xss ys = [ws]
where
ws = nonlinearFit niter zssP zssQ ys sigmoid dsigmoid theta0
theta0 = LA.konst 0 (LA.cols zssP + LA.cols zssQ)
(zssP, zssQ) = tirToMatrix xss tir
-- | Applies conjugate gradient for one-vs-all classification
--classifyMult :: Matrix Double -> Vector Double -> [Vector Double]
classifyMult :: Int -> TIR -> Dataset Double -> Vector Double -> [Vector Double]
classifyMult niter tir xss ys = zipWith minimize yss theta0
where
numLabels = length $ nub $ LA.toList ys
yss = map f [0 .. numLabels-1]
minimize y t = nonlinearFit niter zssP zssQ y sigmoid dsigmoid t
theta0 = replicate numLabels $ LA.konst 0 (LA.cols zssP + LA.cols zssQ)
(zssP, zssQ) = tirToMatrix xss tir
f sample = VS.map (\a -> if round a == sample then 1 else 0) ys
-- | chooses the appropriate fitting function
--fitTask :: Task -> Matrix Double -> Vector Double -> [Vector Double]
fitTask :: Task -> TIR -> Dataset Double -> Vector Double -> [Vector Double]
fitTask Regression = regress
fitTask (RegressionNL niter) = regressNL niter
fitTask (Classification niter) = classify niter
fitTask (ClassMult niter) = classifyMult niter
{-# INLINE fitTask #-}
-- | sigmoid function for classification.
sigmoid :: Floating a => a -> a
sigmoid z = 1 / (1+exp(-z))
{-# INLINE sigmoid #-}
-- | derivative sigmoid function for classification.
dsigmoid :: Floating a => a -> a
dsigmoid z = sigmoid z * (1 - sigmoid z)
{-# INLINE dsigmoid #-}
-- | chooses the appropriate prediction function
predictTask :: Task -> [Vector Double] -> Vector Double
predictTask _ [] = error "predictTask: empty coefficients matrix"
predictTask Regression yss = head yss
predictTask (RegressionNL _) yss = head yss
predictTask (Classification _) yss = sigmoid $ head yss
predictTask (ClassMult _) yss = LA.vector $ map (fromIntegral . LA.maxIndex) $ LA.toRows $ LA.fromColumns $ map sigmoid yss
{-# INLINE predictTask #-}
-- | evals the penalty function
evalPenalty :: Penalty -> Int -> Double -> Double
evalPenalty NoPenalty _ _ = 0.0
evalPenalty (Len c) len _ = fromIntegral len * c
evalPenalty (Shape c) _ val = val*c
{-# INLINE evalPenalty #-}
-- | applies a list of performance measures
applyMeasures :: [Measure] -> Vector Double -> Vector Double -> [Double]
applyMeasures measures ys ysHat = map ((`uncurry` (ys, ysHat)) . _fun) measures
{-# INLINE applyMeasures #-}
regressNL :: Int -> TIR -> Dataset Double -> Vector Double -> [Vector Double]
regressNL niter tir xss ys = [ws]
where
ws = nonlinearFit niter zssP zssQ ys f f' theta0
f = evalFun $ _funY tir
f' = derivative $ _funY tir
--theta0 = LA.konst 1 (LA.cols zssP + LA.cols zssQ)
theta0 = head $ regress tir xss ys
(zssP, zssQ) = tirToMatrix xss tir
-- | Non-linear optimization using Levenberg-Marquardt method.
--nonlinearFit :: Monad m => Vector Double -> Matrix Double -> Matrix Double -> Vector Double -> m (Vector Double)
nonlinearFit :: Int
-> Matrix Double
-> Matrix Double
-> Vector Double
-> (Vector Double -> Vector Double)
-> (Vector Double -> Vector Double)
-> Vector Double
-> Vector Double
nonlinearFit niter zssP zssQ ys f f' theta0 = fst $ nlFitting LevenbergMarquardtScaled 1e-6 1e-6 niter model' jacob' theta0
where
model' = model f ys zssP zssQ
jacob' = jacob f' zssP zssQ
-- | calculates the error given the parameter vector beta
model :: (Vector Double -> Vector Double) -> Vector Double -> Matrix Double -> Matrix Double -> Vector Double -> Vector Double
model f ys zssP zssQ beta
| LA.cols zssQ == 0 = f ysHat_P - ys
| otherwise = f ysHat - ys
where
(betaP, betaQ) = splitAt (LA.cols zssP) beta
ysHat_P = predict zssP betaP
ysHat_Q = if LA.cols zssQ == 0 then 0 else predict zssQ betaQ
ysHat = ysHat_P / (1 + ysHat_Q)
-- | calculates the Jacobian given the parameter vector beta. Doesn't support
-- the outer transformation function.
jacob :: (Vector Double -> Vector Double) -> Matrix Double -> Matrix Double -> Vector Double -> Matrix Double
jacob f zssP zssQ beta | LA.cols zssQ == 0 = zssP
| otherwise = LA.fromColumns $ pjac <> qjac
where
(betaP, betaQ) = splitAt (LA.cols zssP) beta
ysHat_P = predict zssP betaP
ysHat_Q = predict zssQ betaQ
ysHat = f $ ysHat_P / (1 + ysHat_Q)
pjac = [ysHat * c / ysHat_Q | c <- LA.toColumns zssP]
qjac = [-ysHat * c * (ysHat_P / (1 + ysHat_Q)^2) | c <- LA.toColumns zssQ]
derivative :: (Eq val, Floating val) => Function -> val -> val
derivative Id = const 1
derivative Abs = \x -> x / abs x
derivative Sin = cos
derivative Cos = negate.sin
derivative Tan = recip . (**2.0) . cos
derivative Sinh = cosh
derivative Cosh = sinh
derivative Tanh = (1-) . (**2.0) . tanh
derivative ASin = recip . sqrt . (1-) . (^2)
derivative ACos = negate . recip . sqrt . (1-) . (^2)
derivative ATan = recip . (1+) . (^2)
derivative ASinh = recip . sqrt . (1+) . (^2)
derivative ACosh = \x -> 1 / (sqrt (x-1) * sqrt (x+1))
derivative ATanh = recip . (1-) . (^2)
derivative Sqrt = recip . (2*) . sqrt
derivative Square = (2*)
derivative Exp = exp
derivative Log = recip
{-# INLINE derivative #-}
-- (w1 * p1 + w2 * p2) / (1 + w3 * p3 + w4 * p4)
-- d/dw1 = p1 / (1 + w3 * p3 + w4 * p4)
-- d/dw2 = p2 / (1 + w3 * p3 + w4 * p4)
-- d/dw3 = -p3 * (w1 * p2 + w2 * p2) / (1 + w3 * p3 + w4 * p4)^2
{-
toEv :: SRTree Int Double -> (V.Vector Double -> Double)
toEv (Var !ix) = (`V.unsafeIndex` ix)
toEv (Const !val) = const val
toEv (Add !l !r) = jn (+) (toEv l) (toEv r)
toEv (Mul !l !r) = jn (*) (toEv l) (toEv r)
toEv (Fun Exp !t) = exp . toEv t
{-# INLINE toEv #-}
jn :: (Double -> Double -> Double) -> (V.Vector Double -> Double) -> (V.Vector Double -> Double) -> (V.Vector Double -> Double)
jn op f g = \x -> op (f x) (g x)
{-# INLINE jn #-}
-}
|
# Numpy (Numeric python)
> - ํจํค์ง ์ด๋ฆ๊ณผ ๊ฐ์ด **์๋ฆฌ์ ํ์ด์ฌ ํ์ฉ**์ ์ํ ํ์ด์ฌ ํจํค์ง
> - **์ ํ๋์ํ ๊ตฌํ**๊ณผ **๊ณผํ์ ์ปดํจํ
์ฐ์ฐ**์ ์ํ ํจ์๋ฅผ ์ ๊ณต
> - (key) `nparray` ๋ค์ฐจ์ ๋ฐฐ์ด์ ์ฌ์ฉํ์ฌ **๋ฒกํฐ์ ์ฐ์ ์ฐ์ฐ**์ด ๊ฐ๋ฅ
> - **๋ธ๋ก๋์บ์คํ
**์ ํ์ฉํ์ฌ shape(ํํ ํน์ ๋ชจ์)์ด ๋ค๋ฅธ ๋ฐ์ดํฐ์ ์ฐ์ฐ์ด ๊ฐ๋ฅ
>> - ๊ธฐ์กด ์ธ์ด์์๋ ์ ๊ณต X
>> - ๊ต์ฅํ ํ์ํํ ๊ธฐ๋ฅ์ผ๋ก์ ๋น
๋ฐ์ดํฐ ์ฐ์ฐ์ ๊ต์ฅํ ํจ์จ์ด ์ข์
## Numpy ์ค์น ์ import
> - ์ ํ ํ์ต์ ํตํด ํด๋์ค์ ํจ์์์ ํด๋์ค๋ฅผ ๋ถ๋ฌ๋ค์ฌ ์ฌ์ฉํ ์ ์๋ค๊ณ ๋ฐฐ์ ์ต๋๋ค.
- ๋ค๋ง ์ง์ ์์ฑํ ํด๋์ค๊ฐ ์๋๊ฒฝ์ฐ, ๊ทธ๋ฆฌ๊ณ ํ์ฌ ์ปดํจํฐ์ ์ฌ์ฉํด์ผ ํ ํจํค์ง๊ฐ ์์๊ฒฝ์ฐ ๊ฐ๋จํ ๋ช
๋ น์ด๋ก ์ค์น๊ฐ๋ฅ.
>> - `pip`, `conda` ๋ช
๋ น์ด : python ๋ผ์ด๋ธ๋ฌ๋ฆฌ ๊ด๋ฆฌ ํ๋ก๊ทธ๋จ์ผ๋ก ์คํ์์ค๋ผ์ด๋ธ๋ฌ๋ฆฌ๋ฅผ ๊ฐํธํ๊ฒ ์ค์น ํ ์ ์๋๋ก ํ๋ ๋ช
๋ น์ด
> - 1. ์ฝ์์ฐฝ์์ ์คํ ์
**`pip` `install` `[ํจํค์ง๋ช
]`** ํน์
**`conda` `install` `[ํจํค์ง๋ช
]`**
> - 2. ์ฃผํผํฐ ๋
ธํธ๋ถ์ผ๋ก ์คํ ์
**`!pip` `install` `[ํจํค์ง๋ช
]`**
> - ์๋์ฝ๋ค ํ๊ฒฝ์ผ๋ก python ํ๊ฒฝ์ค์ ์ ๊ธฐ๋ณธ์ ์ผ๋ก Numpy ์ค์น๊ฐ ๋์ด์์
```python
# ์ฃผํผํฐ ๋
ธํธ๋ถ์์ Numpy ์ค์น
!pip install numpy
```
[33mDEPRECATION: Configuring installation scheme with distutils config files is deprecated and will no longer work in the near future. If you are using a Homebrew or Linuxbrew Python, please see discussion at https://github.com/Homebrew/homebrew-core/issues/76621[0m
Requirement already satisfied: numpy in /opt/homebrew/lib/python3.9/site-packages (1.21.2)
```python
# Numpy ์ฌ์ฉ์ ์ํด ํจํค์ง ๋ถ๋ฌ๋ค์ด๊ธฐ
import numpy as np
# ๊ด๋ก์ ์ผ๋ก np๋ผ๋ ์ฝ์๋ฅผ ๋ง์ด ์ฌ์ฉํ๊ฒ ๋ฉ๋๋ค.
# ํ์ด์ฌ์ ์ฌ์ฉํ๋ ๋๋ถ๋ถ์ ์ ์ ๋ค์ด ์ฌ์ฉํ๊ณ ์๋ ๋๋ค์์ด๋ ์ด๊ฑด ๊ผญ ์ง์ผ์ ์ฌ์ฉํด์ฃผ์๋ ๊ฒ์ ์ถ์ฒ๋๋ฆฝ๋๋ค.
```
## ๋ฐ์ดํฐ๋ถ์์ ์ํ ์ ๊น์ ์ ํ๋์ํ
numpy๋ ๊ธฐ๋ณธ์ ์ผ๋ก ์๋ฆฌ์ ์ปดํจํ
์ ์ํ ํจํค์ง ์
๋๋ค. ์ ํ๋์ํ์ ์ฝ๊ฐ๋ง ์ดํดํ๋ค๋ฉด ๋ฐ์ดํฐ๋ฅผ ํจ์ฌ ๋ ๊น์ด์๊ฒ ๋ค๋ฃฐ ์ ์์ต๋๋ค.
์ถ์ฒ : https://art28.github.io/blog/linear-algebra-1/
### ๋ฐ์ดํฐ์ ๊ตฌ๋ถ์ ๋ฐ๋ฅธ ํํ ๋ฐฉ๋ฒ๊ณผ ์์
#### ์ค์นผ๋ผ
1, 3.14, ์ค์ ํน์ ์ ์
#### ๋ฒกํฐ
[1, 2, 3, 4], "๋ฌธ์์ด"
#### 3 X 4 ๋งคํธ๋ฆญ์ค
[[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 0, 11, 12]]
#### 2 X 3 X 4 ํ
์
[[[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 0, 11, 12]],
[[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 0, 11, 12]]]
### ๋ฐ์ดํฐ๋ก ํํํ ์ ํ๋์ํ
์ถ์ฒ : https://m.blog.naver.com/nabilera1/221978354680
### ๋ฐ์ดํฐ ํํ์ ๋ฐ๋ฅธ ์ฌ์น์ฐ์ฐ
> ์ค์นผ๋ผ +, -, *, / -> ๊ฒฐ๊ณผ๋ ์ค์นผ๋ผ
๋ฒกํฐ +, -, ๋ด์ -> +, - ๊ฒฐ๊ณผ๋ ๋ฒกํฐ, ๋ด์ ๊ฒฐ๊ณผ๋ ์ค์นผ๋ผ
๋งคํธ๋ฆญ์ค +, -, *, /
ํ
์ +, -, *, /
### ๋ฐ์ดํฐ๋ถ์์ ์์ฃผ ์ฌ์ฉํ๋ ํน์ํ ์ฐ์ฐ
๋ฒกํฐ์ ๋ฒกํฐ์ ๋ด์
$$\begin{bmatrix}1 & 2 & 3 & 4 \end{bmatrix} \times \begin{bmatrix}1 \\ 2 \\ 3 \\ 4 \end{bmatrix} = 1 * 1 + \
2 * 2 + 3 * 3 + 4 * 4 = 30$$
# $$ A^TA $$
#### ๋ฒกํฐ์ ๋ฒกํฐ์ ๋ด์ ์ด ์ด๋ฃจ์ด์ง๋ ค๋ฉด
1. ๋ฒกํฐ๊ฐ ๋ง์ฃผํ๋ shape์ ๊ฐฏ์(๊ธธ์ด)๊ฐ ๊ฐ์์ผ ํฉ๋๋ค.
2. ์ฐ์ฐ ์์ ์์นํ ๋ฒกํฐ๋ ์ ์น(transpose) ๋์ด์ผ ํฉ๋๋ค.
์ถ์ฒ : https://ko.wikipedia.org/wiki/%EC%A0%84%EC%B9%98%ED%96%89%EB%A0%AC
#### ๋ฒกํฐ ๋ด์ ์ผ๋ก ๋ฐฉ์ ์ ๊ตฌํ
$$y = \begin{bmatrix}1 & 2 & 1 \end{bmatrix} \times \begin{bmatrix}x_1 \\ x_2 \\ x_3 \\ \end{bmatrix} = 1 * x_1 + \
2 * x_2 + 1 * x_3 = x_1 + 2x_2 + x_3$$
## ๋ธ๋ก๋์บ์คํ
> ํ์ด์ฌ ๋ํ์ด ์ฐ์ฐ์ ๋ธ๋ก๋์บ์คํ
์ ์ง์ํฉ๋๋ค.
๋ฒกํฐ์ฐ์ฐ ์ shape์ด ํฐ ๋ฒกํฐ์ ๊ธธ์ด๋งํผ shape์ด ์์ ๋ฒกํฐ๊ฐ ์ฐ์ฅ๋์ด ์ฐ์ฐ๋ฉ๋๋ค.
์ถ์ฒ : http://www.astroml.org/book_figures/appendix/fig_broadcast_visual.html
```python
np.arange(3).reshape((3,1))+np.arange(3)
```
array([[0, 1, 2],
[1, 2, 3],
[2, 3, 4]])
## Numpy function(์ ๋๋ฒ์
ํจ์)
> `numpy`๋ ์ปดํจํ
์ฐ์ฐ์ ์ํ ๋ค์ํ ์ฐ์ฐํจ์๋ฅผ ์ ๊ณตํฉ๋๋ค.
>> ์ฐ์ฐํจ์ ๊ธฐ๋ณธ๊ตฌ์กฐ
ex) **`np.sum`**(์ฐ์ฐ๋์, axis=์ฐ์ฐ๋ฐฉํฅ)
**`dtype()`**
### ์๋ฆฌ์ฐ์ฐ
- **`prod()`**
- **`dot()`**
- **`sum()`**
- **`cumprod()`**
- **`cumsum()`**
- **`abs()`**
- **`sqaure()`**
- **`sqrt()`**
- **`exp()`**
- **`log()`**
### ํต๊ณ์ฐ์ฐ
- **`mean()`**
- **`std()`**
- **`var()`**
- **`max()`**
- **`min()`**
- **`argmax()`**
- **`argmin()`**
### ๋ก์ง์ฐ์ฐ
- **`arange()`**
- **`isnan()`**
- **`isinf()`**
- **`unique()`**
### ๊ธฐํ
- **`shape()`**
- **`reshape()`**
- **`ndim()`**
- **`transpose()`**
๊ฐ์ข
์ฐ์ฐ ํจ์ ์ฐธ๊ณ : https://numpy.org/doc/stable/reference/routines.math.html
### numpy ํจ์ ์ค์ต
```python
# ํจ์ ์์ ๋ฅผ ์ํ ๋ฐ์ดํฐ์
test_list = [1, 2, 3, 4]
test_list2 = [[1, 3], [5, 7]]
test_flist = [1, 3.14, -4.5]
test_list_2nd = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
test_list_3rd = [[[1, 2, 3, 4],
[5, 6, 7, 8]],
[[1, 2, 3, 4],
[5, 6, 7, 8]],
[[1, 2, 3, 4],
[5, 6, 7, 8]]]
test_exp = [0, 1, 10]
test_nan = [0, np.nan, np.inf]
```
```python
# ๊ณฑ์ฐ์ฐ
np.prod(test_list)
```
24
```python
# ํฉ์ฐ์ฐ
np.sum(test_list)
```
10
```python
# ๋์ ๊ณฑ์ฐ์ฐ ๋ฒกํฐํํ
np.cumprod(test_list)
```
array([ 1, 2, 6, 24])
```python
# ๋์ ํฉ์ฐ์ฐ
# ๋งค์ผ/๋งค์ ๋งค์ถ์ ๋ํ ๊ณ์ฐ์ ํ ๋ ์์ฃผ ์ฌ์ฉ
np.cumsum(test_list)
```
array([ 1, 3, 6, 10])
```python
# ์ ๋๊ฐ
np.abs(test_flist)
```
array([1. , 3.14, 4.5 ])
```python
# ์ ๊ณฑ
np.sqrt(test_list)
```
array([1. , 1.41421356, 1.73205081, 2. ])
```python
# ๋ฃจํธ
```
```python
# exp
```
```python
# ๋ก๊ทธ
```
### ํต๊ณ๊ฐ
```python
# ํ๊ท
np.mean(test_list)
```
2.5
```python
# ํ์คํธ์ฐจ
np.std(test_list)
```
1.118033988749895
```python
# ๋ถ์ฐ
np.var(test_list)
```
1.25
```python
# ์ต๋๊ฐ
```
```python
# ์ต์๊ฐ
```
```python
test_list_2nd
```
[[1, 2, 3], [4, 5, 6], [7, 8, 9]]
```python
# ์ต๋๊ฐ์ด ์กด์ฌํ๊ณ ์๋ ์ธ๋ฑ์ค ๋๋ฒ๋ฅผ ๋ฆฌํด
# ์ถ๋ ฅ๊ฐ์ด ์ธ๋ฑ์ค
np.argmax(test_list_2nd)
```
8
```python
# ์ต์๊ฐ ์ธ๋ฑ์ค
np.argmin(test_list_2nd)
```
0
```python
# ๋ฒ์์ค์
# range() ํจ์์ ๋์ผํ๊ฒ ์๋ํจ
# for i in range(0, 100, 10):
# print(i)
#(์์ํฌ์ธํธ, ๋ง์ง๋งํฌ์ธํธ+1, ์คํ
์)
np.arange(10, 100, 10)
```
array([10, 20, 30, 40, 50, 60, 70, 80, 90])
```python
# ๋ฒ์์ค์ 2
#(์์ํฌ์ธํธ, ๋ง์ง๋งํฌ์ธํธ+1, ๋ฐ์ดํฐ์)
np.linspace(0, 10, 50) # 0~10๊น์ง 50๊ฐ ๋์ผ๊ฐ๊ฒฉ์ผ๋ก
```
array([ 0. , 0.20408163, 0.40816327, 0.6122449 , 0.81632653,
1.02040816, 1.2244898 , 1.42857143, 1.63265306, 1.83673469,
2.04081633, 2.24489796, 2.44897959, 2.65306122, 2.85714286,
3.06122449, 3.26530612, 3.46938776, 3.67346939, 3.87755102,
4.08163265, 4.28571429, 4.48979592, 4.69387755, 4.89795918,
5.10204082, 5.30612245, 5.51020408, 5.71428571, 5.91836735,
6.12244898, 6.32653061, 6.53061224, 6.73469388, 6.93877551,
7.14285714, 7.34693878, 7.55102041, 7.75510204, 7.95918367,
8.16326531, 8.36734694, 8.57142857, 8.7755102 , 8.97959184,
9.18367347, 9.3877551 , 9.59183673, 9.79591837, 10. ])
```python
test_nan
```
[0, nan, inf]
```python
# ๊ฒฐ์ธก ํ์ธ(nan ํ์ธ)
np.isnan(test_nan)
```
array([False, True, False])
```python
# ๋ฐ์ฐ ํ์ธ(๋ฌดํ๋ ํ์ธ)
np.isinf(test_nan)
```
array([False, False, True])
```python
test_list_3rd
```
[[[1, 2, 3, 4], [5, 6, 7, 8]],
[[1, 2, 3, 4], [5, 6, 7, 8]],
[[1, 2, 3, 4], [5, 6, 7, 8]]]
```python
# ๊ณ ์ ๊ฐ ํ์ธ
np.unique(test_list_3rd)
len(np.unique(test_list_3rd))
```
8
```python
# ๋ฐ์ดํฐ ๊ตฌ์กฐ(๋ชจ์)ํ์ธ(์ฐจ์ ํ์ธ)
# ์์ชฝ(์ด)shape๋ถํฐ ํ์ธํ์
np.shape(test_list_3rd)
```
(3, 2, 4)
```python
# ๋ฐ์ดํฐ shape ๋ณ๊ฒฝ
# ์ด๋ค ์กฐ๊ฑด์์ reshape๊ฐ๋ฅํ๊ฐ? ๋ฐ์ดํฐ ๋ด๋ถ์ ์กด์ฌํ๋ ์์ฑ ๊ฐฏ์๊ฐ ๊ฐ์์ผ ํจ.
np.reshape(test_list_3rd, (4,6))
np.reshape(test_list_3rd, (2,2,6))
```
array([[[1, 2, 3, 4, 5, 6],
[7, 8, 1, 2, 3, 4]],
[[5, 6, 7, 8, 1, 2],
[3, 4, 5, 6, 7, 8]]])
```python
test_list_3rd
```
[[[1, 2, 3, 4], [5, 6, 7, 8]],
[[1, 2, 3, 4], [5, 6, 7, 8]],
[[1, 2, 3, 4], [5, 6, 7, 8]]]
```python
# ๋ฐ์ดํฐ ์ฐจ์ํ์ธ
np.ndim(test_list_3rd)
# ๊ธฐํํ์ ๋ฐ์ดํฐ์ ์ฐจ์์๋ฅผ ์ด์ผ๊ธฐํ๊ณ ๋ฐ์ดํฐ๋ถ์์ ์ด๊ธฐ์ค์ผ๋ก ์ฐจ์์ ์ด์ผ๊ธฐํ๋ค
```
3
```python
test_list_2nd
```
[[1, 2, 3], [4, 5, 6], [7, 8, 9]]
```python
# ์ ์นํ๋ ฌ
np.transpose(test_list_2nd)
```
array([[1, 4, 7],
[2, 5, 8],
[3, 6, 9]])
```python
test_list
```
[1, 2, 3, 4]
```python
```
## Numpy array (๋ฐฐ์ด, ํ๋ ฌ)
> - numpy ์ฐ์ฐ์ ๊ธฐ๋ณธ์ด ๋๋ ๋ฐ์ดํฐ ๊ตฌ์กฐ์
๋๋ค.
- ๋ฆฌ์คํธ๋ณด๋ค ๊ฐํธํ๊ฒ ๋ง๋ค ์ ์์ผ๋ฉฐ **์ฐ์ฐ์ด ๋น ๋ฅธ** ์ฅ์ ์ด ์์ต๋๋ค.
- **๋ธ๋ก๋์บ์คํ
์ฐ์ฐ์ ์ง์**ํฉ๋๋ค.
- ๋จ, **๊ฐ์ type**์ ๋ฐ์ดํฐ๋ง ์ ์ฅ ๊ฐ๋ฅํฉ๋๋ค.
- array ๋ํ numpy์ ๊ธฐ๋ณธ ํจ์๋ก์ ์์ฑ ๊ฐ๋ฅํฉ๋๋ค.
>> array ํจ์ ํธ์ถ ๊ธฐ๋ณธ๊ตฌ์กฐ
ex) **`np.array(๋ฐฐ์ด๋ณํ ๋์ ๋ฐ์ดํฐ)`**
ex) **`np.arange(start, end, step_forward)`**
### numpy array ์ค์ต
```python
# ๊ธฐ์กด ๋ฐ์ดํฐ ๊ตฌ์กฐ๋ฅผ array๋ก ๋ณํ
test_array = np.array(test_list)
test_array2 = np.array(test_list2)
test_farray = np.array(test_flist)
test_array_2nd = np.array(test_list_2nd)
test_array_3rd = np.array(test_list_3rd)
```
```python
# array ์์ฑ ํ์ธ
test_array
```
array([1, 2, 3, 4])
```python
array_list = [1,2,4.5]
```
```python
# ๊ฐ์ ํ์
์ ๋ฐ์ดํฐ๋ง ๋ค์ด๊ฐ๋์ง ํ์ธ
array_test = np.array(array_list)
```
```python
array_test # ์ ์, ์ค์, ๋ฌธ์์ด ์์ผ๋ก ์ ์ฒด ํ์
์ด ์ค์ ๋จ
```
array([1. , 2. , 4.5])
```python
# 2์ฐจ์ ๋ฐฐ์ด ํ์ธ
test_list_2nd
# 2์ฐจ์ ๋ฐฐ์ด์ ์ข ๋ ํธํ๊ฒ ๋ณด์ฌ์ค
test_array_2nd
```
array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
```python
# 3์ฐจ์ ๋ฐฐ์ด ํ์ธ
test_list_3rd
test_array_3rd
```
array([[[1, 2, 3, 4],
[5, 6, 7, 8]],
[[1, 2, 3, 4],
[5, 6, 7, 8]],
[[1, 2, 3, 4],
[5, 6, 7, 8]]])
```python
# np.arange ํจ์๋ก ์์ฑ
np.arange(25).reshape(5,5)
```
array([[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24]])
```python
np.arange(1,25).reshape(2,12)
```
array([[ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
[13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]])
### ํน์ํ ํํ์ array๋ฅผ ํจ์๋ก ์์ฑ
ํจ์ ํธ์ถ์ ๊ธฐ๋ณธ๊ตฌ์กฐ
> ex) **`np.ones([์๋ฃ๊ตฌ์กฐ shape])`**
>> ์๋ฃ๊ตฌ์กฐ shape์ ์ ์, **[ ]**๋ฆฌ์คํธ, **( )** ํํ ๋ก๋ง ์
๋ ฅ๊ฐ๋ฅํฉ๋๋ค.
- ones()
- zeros()
- empty()
- eye()
```python
# 1๋ก ์ด๊ธฐํํ array ์์ฑ
np.ones([3,3])
```
array([[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]])
```python
# 0์ผ๋ก ์ด๊ธฐํ
np.zeros((5,5))
```
array([[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.]])
```python
# ๋น ๊ฐ์ผ๋ก ์ด๊ธฐํ
np.empty((2,2))
```
array([[0., 0.],
[0., 0.]])
```python
# ํญ๋ฑํ๋ ฌ ์ด๊ธฐํ
# A X ํญ๋ฑํ๋ ฌ = A
# shape๊ฐ ์๋ง๋ ๊ฒฝ์ฐ ์ฐ์ฐ์ด ๊ฐ๋ฅํ๋๋ก ๋ง๋ค๋ ์ฌ์ฉ
np.eye(4,4)
# ์์
# ๋ ์ง ๋งค์ถ
# ๋ ์ง ๋งค์ถ
# ๋ ์ง ๋งค์ถ
# ๋ ์ง ๋งค์ถ
# ๋ ์ง ๋งค์ถ
# ๋ ์ง ๋ ์ง ๋ ์ง ๋ ์ง ๋ ์ง
# ๋งค์ถ ์๊ณ ์๊ณ ์๊ณ ์๊ณ
# ์๊ณ ๋งค์ถ ์๊ณ ์๊ณ ์๊ณ
# ์๊ณ ์๊ณ ๋งค์ถ ์๊ณ ์๊ณ
```
array([[1., 0., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 1., 0.],
[0., 0., 0., 1.]])
```python
np.arange(16).reshape(4,4) @ np.eye(4,4)
```
array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[12., 13., 14., 15.]])
```python
test_array @ np.transpose(test_array)
```
array([1, 2, 3, 4])
### array ์์ฑ ๋ฐ ๋ด์ฅํจ์
`np.array` ์๋ ์ ์ฉํ ์๋ฆฌ, ํต๊ณ ์ฐ์ฐ์ ์ํ ํจ์๊ฐ ๊ฐ์ถ์ด์ ธ ์์ต๋๋ค. ๋ค์ฐจ์ ๊ธฐํํ์ ์ฐ์ฐ์ ์ํ ํจ์๋ ํจ๊ป ์ดํด๋ณด๊ฒ ์ต๋๋ค.
> array ๋ด์ฅ ์์ฑ ํธ์ถ ๊ธฐ๋ณธ๊ตฌ์กฐ
ex) **`test_array.ndim`**
์์ฃผ ์ฌ์ฉํ๋ ์์ฑ `shape`, `dtype`, `ndim`
> array ๋ด์ฅํจ์ ํธ์ถ ๊ธฐ๋ณธ๊ตฌ์กฐ
ex) **`test_array.prod()`**
์์ ํ์ตํ np.sum() ๊ณผ๋ ๋ฌ๋ฆฌ array ๋ณ์์ ์ธ์๋ฅผ ๋ฐ์ ๊ทธ๋๋ก ์ฌ์ฉํฉ๋๋ค.
#### array ์์ฑ
```python
test_array_3rd
```
array([[[1, 2, 3, 4],
[5, 6, 7, 8]],
[[1, 2, 3, 4],
[5, 6, 7, 8]],
[[1, 2, 3, 4],
[5, 6, 7, 8]]])
```python
# ๋ฐ์ดํฐ ํ์
ํ์ธ
test_array_3rd.dtype
```
dtype('int64')
```python
# ๋ฐ์ดํฐ ํ์
ํ์ธ
test_array_3rd.shape
```
(3, 2, 4)
```python
# ๋ฐ์ดํฐ ์ฐจ์ ํ์ธ
test_array_3rd.ndim
```
3
```python
# ์ ์นํ๋ ฌ
np.transpose(test_array_3rd)
test_array_3rd.T
```
array([[[1, 1, 1],
[5, 5, 5]],
[[2, 2, 2],
[6, 6, 6]],
[[3, 3, 3],
[7, 7, 7]],
[[4, 4, 4],
[8, 8, 8]]])
#### array ๋ด์ฅํจ์
```python
# ๋ด์ฅํจ์ ํธ์ถ
test_array.mean()
np.sqrt(test_array)
```
array([1. , 1.41421356, 1.73205081, 2. ])
```python
# numpy ํจ์์ ํค์๋๊ฐ ๊ฐ์ต๋๋ค.
test_array.prod()
test_array.sum(axis=)
...
...
```
### array ์ฐ์ฐ
์ปดํจํ
์ฐ์ฐ์ ์ํ ํจํค์ง์ธ ๋งํผ ํธ๋ฆฌํ ๋ฐฐ์ด ์ฐ์ฐ ๊ธฐ๋ฅ์ ์ง์ํฉ๋๋ค. ์ฌ๋ฌ array ์ฐ์ฐ์ ํตํด ๋ค๋ฅธ ์๋ฃ๊ตฌ์กฐ์์ ์ฐ์ฐ ์ฐจ์ด๋ฅผ ์์๋ด
์๋ค.
```python
test_list = [1,2,3,4,5]
test_list2 = [x*2 for x in test_list]
test_list2
```
[2, 4, 6, 8, 10]
```python
# array ๋ง์
, ๋บ์
, ๊ณฑ์
, ๋๋์
test_array = np.array(test_list)
test_array > 1
```
array([False, True, True, True, True])
```python
# ์ค์ ์ฐ์ฐ์๋ ์ฐจ์ด๋ฅผ ํ์ธํ๊ธฐ ์ํ ํฐ ๋ฐ์ดํฐ ์์ฑ
big_list = [x for x in range(400000)]
big_array = np.array(big_list)
len(big_list), len(big_array)
```
(400000, 400000)
```python
# for๋ก ๊ฐ ๋ฆฌ์คํธ์ 1์ฉ ๊ฐ์ ๋ํจ
big_list2 = [x+1 for x in big_list] # ์ผ๋ฐ์ ์ธ for๋ฌธ๋ณด๋ค ๋น ๋ฆ(ํ์ด์ฌ์ ์ค๋จ์๋ก ๊ฐ์ ํ์ธ)
# for index, item in enumerate(big_list):
# big_list[index] = item + 1
```
UsageError: Line magic function `%%time` not found.
```python
# array์ฑ์ง์ ์ด์ฉ
big_array + 1
```
array([ 1, 2, 3, ..., 399998, 399999, 400000])
```python
# ํ๋ ฌ๋ด์
first_array = np.arange(15).reshape(5, 3)
second_array = np.arange(15).reshape(3, 5)
```
```python
first_array.shape, second_array.shape
```
((5, 3), (3, 5))
```python
# ํ๋ ฌ๋ด์ ์ฐ์ฐ
first_array @ second_array
```
array([[ 25, 28, 31, 34, 37],
[ 70, 82, 94, 106, 118],
[115, 136, 157, 178, 199],
[160, 190, 220, 250, 280],
[205, 244, 283, 322, 361]])
### ๋ฒกํฐ ๊ฐ์คํฉ
๋ฒกํฐ์ ๋ด์ ์ ๊ฐ์คํฉ์ ๊ณ์ฐํ ๋ ์ฐ์ผ ์ ์๋ค. **๊ฐ์คํฉ(weighted sum)**์ด๋ ๋ณต์์ ๋ฐ์ดํฐ๋ฅผ ๋จ์ํ ํฉํ๋ ๊ฒ์ด ์๋๋ผ ๊ฐ๊ฐ์ ์์ ์ด๋ค ๊ฐ์ค์น ๊ฐ์ ๊ณฑํ ํ ์ด ๊ณฑ์
๊ฒฐ๊ณผ๋ค์ ๋ค์ ํฉํ ๊ฒ์ ๋งํ๋ค.
๋ง์ฝ ๋ฐ์ดํฐ ๋ฒกํฐ๊ฐ $x=[x_1, \cdots, x_N]^T$์ด๊ณ ๊ฐ์ค์น ๋ฒกํฐ๊ฐ $w=[w_1, \cdots, w_N]^T$์ด๋ฉด ๋ฐ์ดํฐ ๋ฒกํฐ์ ๊ฐ์คํฉ์ ๋ค์๊ณผ ๊ฐ๋ค.
$$
\begin{align}
w_1 x_1 + \cdots + w_N x_N = \sum_{i=1}^N w_i x_i
\end{align}
$$
์ด ๊ฐ์ ๋ฒกํฐ $x$์ $w$์ ๊ณฑ์ผ๋ก ๋ํ๋ด๋ฉด $w^Tx$ ๋๋ $x^Tw$ ๋ผ๋ ๊ฐ๋จํ ์์์ผ๋ก ํ์ํ ์ ์๋ค.
์ผํ์ ํ ๋ ๊ฐ ๋ฌผ๊ฑด์ ๊ฐ๊ฒฉ์ ๋ฐ์ดํฐ ๋ฒกํฐ, ๊ฐ ๋ฌผ๊ฑด์ ์๋์ ๊ฐ์ค์น๋ก ์๊ฐํ์ฌ ๋ด์ ์ ๊ตฌํ๋ฉด ์ด๊ธ์ก์ ๊ณ์ฐํ ์ ์๋ค.
```python
# ๋ฒกํฐ์ ๊ฐ์คํฉ ์ฐ์ต๋ฌธ์
# ์ผ์ฑ์ ์, ์
ํธ๋ฆฌ์จ, ์นด์นด์ค๋ก ํฌํธํด๋ฆฌ์ค๋ฅผ ๊ตฌ์ฑํ๋ คํ๋ค.
# ๊ฐ ์ข
๋ชฉ์ ๊ฐ๊ฒฉ์ 80,000์, 270,000์, 160,000์์ด๋ค.
# ์ผ์ฑ์ ์ 100์ฃผ, ์
ํธ๋ฆฌ์จ 30์ฃผ, ์นด์นด์ค 50์ฃผ๋ก ๊ตฌ์ฑํ๊ธฐ ์ํ ๋งค์๊ธ์ก์ ๊ตฌํ์์ค
price = np.array([80000,270000,160000])
item = np.array([100,30,50])
price @ item.T
# shape๊ฐ ๋ง์ง ์๋๋ฐ๋ ๊ฒฐ๊ณผ๊ฐ ๋์ด => @ ์ฐ์ฐ์ํ๋ฉด item.T๋ก ์๋ํจ
price @ item
```
24100000
### array ์ธ๋ฑ์ฑ, ์ฌ๋ผ์ด์ฑ(๋งค์ฐ์ค์)
> ๊ธฐ๋ณธ์ ์ผ๋ก ์๋ฃ๊ตฌ์กฐ๋ ๋ฐ์ดํฐ์ ๋ฌถ์, ๊ทธ ๋ฌถ์์ ๊ด๋ฆฌ ํ ์ ์๋ ๋ฐ๊ตฌ๋๋ฅผ ์ด์ผ๊ธฐ ํฉ๋๋ค.
๋ฐ์ดํฐ ๋ถ์์ ์ํด ์๋ฃ๊ตฌ์กฐ๋ฅผ ์ฌ์ฉํ์ง๋ง ์๋ฃ๊ตฌ์กฐ์ ๋ด์ฉ์ ์ ๊ทผ์ ํด์ผ ํ ๊ฒฝ์ฐ๋ ์์ต๋๋ค.
>> **์ธ๋ฑ์ฑ**์ด๋?
๋ฐ์ดํฐ ๋ฐ๊ตฌ๋ ์์ ๋ ๋ด์ฉ ํ๋์ ์ ๊ทผํ๋ ๋ช
๋ น, ์ธ๋ฑ์ค๋ ๋ด์ฉ์ ์๋ฒ
>> **์ฌ๋ผ์ด์ฑ**์ด๋?
๋ฐ์ดํฐ ๋ฐ๊ตฌ๋ ์์ ๋ ๋ด์ฉ ์ฌ๋ฌ๊ฐ์ง์ ์ ๊ทผ ํ๋ ๋ช
๋ น
๊ธฐ๋ณธ์ ์ผ๋ก ์ธ๋ฑ์ฑ๊ณผ ์ฌ๋ผ์ด์ฑ์ ์์ธ์ ๋ฆฌ์คํธ์ ๋์ผํฉ๋๋ค.
#### ์ธ๋ฑ์ฑ, ์ฌ๋ผ์ด์ฑ ์ค์ต
```python
# 10๋ถํฐ 19๊น์ง ๋ฒ์๋ฅผ ๊ฐ์ง๋ array์์ฑ
test = np.arange(10,20)
```
```python
# 0๋ถํฐ 3๋ฒ ์ธ๋ฑ์ค๊น์ง
test[:3]
```
array([10, 11, 12])
```python
# 4๋ฒ ์ธ๋ฑ์ค๋ถํฐ ๋ง์ง๋ง ์ธ๋ฑ์ค๊น์ง
test[4:]
```
array([14, 15, 16, 17, 18, 19])
```python
# ๋ง์ง๋ง ์ธ๋ฑ์ค๋ถํฐ ๋ค์์ 3๋ฒ์งธ ์ธ๋ฑ์ค๊น์ง
test[-3:]
```
array([17, 18, 19])
```python
# 0๋ถํฐ 3์ฉ ์ฆ๊ฐํ๋ ์ธ๋ฑ์ค
test[::3]
```
array([10, 13, 16, 19])
#### ์ฌ๋ฌ๊ฐ์ง ์ธ๋ฑ์ฑ ๋ฐ ์ฌ๋ผ์ด์ฑ ๋ฐฉ๋ฒ์ ์๋ํด๋ด
์๋ค
```python
index_test2 = np.array(range(25)).reshape([5, 5])
index_test2
```
array([[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24]])
```python
index_test2[2:,1:4]
```
array([[11, 12, 13],
[16, 17, 18],
[21, 22, 23]])
```python
index_test2[:2,2:]
```
array([[2, 3, 4],
[7, 8, 9]])
```python
index_test3 = np.arange(40).reshape(2, 5, 4)
index_test3
```
array([[[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15],
[16, 17, 18, 19]],
[[20, 21, 22, 23],
[24, 25, 26, 27],
[28, 29, 30, 31],
[32, 33, 34, 35],
[36, 37, 38, 39]]])
```python
index_test3[0,3:5,1:3]
```
array([[13, 14],
[17, 18]])
```python
index_test3[0:,0:,1]
```
array([[ 1, 5, 9, 13, 17],
[21, 25, 29, 33, 37]])
## ํฌ์์ธ๋ฑ์ฑ
numpy์์ ๋ฒกํฐ์ฐ์ฐ์ ํตํด bool ํํ์ ๋ฒกํฐ๋ฅผ ๊ธฐ์ค์ผ๋ก ๋ฐ์ดํฐ๋ฅผ ์ ๋ณํ๋ ๋ฐฉ๋ฒ
```python
pet = np.array(['๊ฐ','๊ณ ์์ด','๊ณ ์์ด','ํ์คํฐ','๊ฐ','ํ์คํฐ'])
num = np.array([1,2,3,4,5,6])
indexing_test = np.random.randn(6,5) # randn : ๋์๋ก ํ๋ ฌ์์ฑ
```
```python
indexing_test
```
array([[ 1.21892182, -0.57278769, -2.01837856, 0.37751666, -2.14632145],
[ 1.72627741, 0.19978955, -0.45559384, 2.3741275 , 0.61280794],
[ 0.38249301, -0.10195438, 1.1064851 , 0.14316647, -0.01103692],
[ 0.89879404, -0.08147537, -0.02147208, 0.77555509, -0.45326144],
[ 0.12338784, -0.00431352, -1.31633818, 0.85516829, 0.29007829],
[-0.17169369, 0.42409219, 1.69908292, 1.43223254, -1.29304307]])
```python
pet == '๊ฐ'
```
array([ True, False, False, False, True, False])
```python
indexing_test[pet=='๊ฐ']
```
array([[ 1.21892182, -0.57278769, -2.01837856, 0.37751666, -2.14632145],
[ 0.12338784, -0.00431352, -1.31633818, 0.85516829, 0.29007829]])
```python
indexing_test[~(pet=='๊ฐ')]
```
array([[ 1.72627741, 0.19978955, -0.45559384, 2.3741275 , 0.61280794],
[ 0.38249301, -0.10195438, 1.1064851 , 0.14316647, -0.01103692],
[ 0.89879404, -0.08147537, -0.02147208, 0.77555509, -0.45326144],
[-0.17169369, 0.42409219, 1.69908292, 1.43223254, -1.29304307]])
```python
(pet=='๊ฐ') | (pet=='ํ์คํฐ')
```
array([ True, False, False, True, True, True])
```python
indexing_test[(pet=='๊ฐ') | (pet=='ํ์คํฐ')]
```
array([[ 1.21892182, -0.57278769, -2.01837856, 0.37751666, -2.14632145],
[ 0.89879404, -0.08147537, -0.02147208, 0.77555509, -0.45326144],
[ 0.12338784, -0.00431352, -1.31633818, 0.85516829, 0.29007829],
[-0.17169369, 0.42409219, 1.69908292, 1.43223254, -1.29304307]])
```python
num > 3
```
array([False, False, False, True, True, True])
```python
indexing_test[num>3]
```
array([[ 0.89879404, -0.08147537, -0.02147208, 0.77555509, -0.45326144],
[ 0.12338784, -0.00431352, -1.31633818, 0.85516829, 0.29007829],
[-0.17169369, 0.42409219, 1.69908292, 1.43223254, -1.29304307]])
```python
# bool ํํ๋ 1,0์ผ๋ก ๊ณ์ฐ์ด ๋๋ค
(pet =='๊ฐ').sum()
```
2
```python
# true๊ฐ 1๊ฐ๋ผ๋ ์์ผ๋ฉด true
(pet =='๊ฐ').any()
```
True
```python
(pet =='๊ฐ').all()
```
False
|
------------------------------------------------------------------------
-- The Agda standard library
--
-- Membership for containers
------------------------------------------------------------------------
{-# OPTIONS --without-K --safe #-}
module Data.Container.Membership where
open import Level using (_โ_)
open import Relation.Unary using (Pred)
open import Relation.Binary.PropositionalEquality
open import Data.Container.Core
open import Data.Container.Relation.Unary.Any
module _ {s p} {C : Container s p} {x} {X : Set x} where
infix 4 _โ_
_โ_ : X โ Pred (โฆ C โง X) (p โ x)
x โ xs = โ C (_โก_ x) xs
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.