text
stringlengths 0
3.34M
|
---|
function data = fixtrialdef(data)
% FIXTRIALDEF adds a trl matrix to the raw data configuration
% which is constructed on the fly, assuming that the trials are
% consecutive segments of a continuous recording
% Copyright (C) 2009-2010, Robert Oostenveld and Jan-Mathijs Schoffelen
if isfield(data, 'sampleinfo')
return;
end
if ~isfield(data, 'cfg')
% fieldtrip raw data structures are expected to have a cfg
data.cfg = [];
end
hastrial = isfield(data, 'trial');
hastime = isfield(data, 'time');
hasfsample = isfield(data, 'fsample');
if ~hasfsample && isfield(data, 'time')
data.fsample = median(1./diff(data.time{1}));
end
if hastrial,
ntrial = length(data.trial);
else
ntrial = dimlength(data, 'rpt');
if ~isfinite(ntrial) && strcmp(data.dimord(1:6), 'rpttap') && isfield(data, 'cumtapcnt'),
ntrial = numel(data.cumtapcnt);
elseif ~isfinite(ntrial)
ntrial = 1;
end
end
trl = ft_findcfg(data.cfg, 'trl');
nsmp = zeros(ntrial,1);
if hastrial,
for i=1:ntrial
nsmp(i) = size(data.trial{i}, 2);
end
elseif ~isempty(trl)
nsmp = trl(:,2) - trl(:,1) + 1;
end
if size(trl,1)~=numel(nsmp)
warning_once('the trial definition in the configuration is inconsistent with the actual data');
trl = [];
elseif isempty(trl) || ~all(nsmp==trl(:,2)-trl(:,1)+1)
warning_once('the data does not contain a trial definition, assuming that the trials are consecutive segments of a continuous recording');
% construct a trial definition on the fly, assume that the trials are
% consecutive segments of a continuous recording
if ntrial==1,
begsample = 1;
else
begsample = cat(1, 0, cumsum(nsmp(1:end-1))) + 1;
end
endsample = begsample + nsmp - 1;
offset = zeros(ntrial,1);
if hastime,
for i=1:ntrial
offset(i) = time2offset(data.time{i}, data.fsample);
end
end
trl = [begsample endsample offset];
elseif size(trl,1)~=ntrial
warning_once('the trial definition in the configuration is inconsistent with the actual data');
trl = [];
elseif nsmp~=(trl(:,2)-trl(:,1)+1)
warning_once('the trial definition in the configuration is inconsistent with the actual data');
trl = [];
end
if ~isfield(data, 'sampleinfo') && ~isempty(trl)
data.sampleinfo = trl(:, 1:2);
elseif ~isfield(data, 'sampleinfo') && isempty(trl)
warning_once('failed to create sampleinfo field');
end
if (~isfield(data, 'trialinfo') || isempty(data.trialinfo)) && ~isempty(trl) && size(trl, 2) > 3,
data.trialinfo = trl(:, 4:end);
end
% if data is not raw then it does not make sense to keep the sampleinfo
if ~hastrial && isfield(data, 'sampleinfo')
data = rmfield(data, 'sampleinfo');
end
|
[STATEMENT]
lemma div_const_unit_poly: "is_unit c \<Longrightarrow> p div [:c:] = smult (1 div c) p"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. is_unit c \<Longrightarrow> p div [:c:] = smult ((1::'a) div c) p
[PROOF STEP]
by (simp add: is_unit_const_poly_iff unit_eq_div1) |
(* Title: HOL/Auth/n_germanSymIndex_lemma_inv__30_on_rules.thy
Author: Yongjian Li and Kaiqiang Duan, State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
Copyright 2016 State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
*)
header{*The n_germanSymIndex Protocol Case Study*}
theory n_germanSymIndex_lemma_inv__30_on_rules imports n_germanSymIndex_lemma_on_inv__30
begin
section{*All lemmas on causal relation between inv__30*}
lemma lemma_inv__30_on_rules:
assumes b1: "r \<in> rules N" and b2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__30 p__Inv2)"
shows "invHoldForRule s f r (invariants N)"
proof -
have c1: "(\<exists> i d. i\<le>N\<and>d\<le>N\<and>r=n_Store i d)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendReqS i)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendReqE__part__0 i)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendReqE__part__1 i)\<or>
(\<exists> i. i\<le>N\<and>r=n_RecvReqS N i)\<or>
(\<exists> i. i\<le>N\<and>r=n_RecvReqE N i)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendInv__part__0 i)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendInv__part__1 i)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendInvAck i)\<or>
(\<exists> i. i\<le>N\<and>r=n_RecvInvAck i)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendGntS i)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendGntE N i)\<or>
(\<exists> i. i\<le>N\<and>r=n_RecvGntS i)\<or>
(\<exists> i. i\<le>N\<and>r=n_RecvGntE i)"
apply (cut_tac b1, auto) done
moreover {
assume d1: "(\<exists> i d. i\<le>N\<and>d\<le>N\<and>r=n_Store i d)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_StoreVsinv__30) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendReqS i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendReqSVsinv__30) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendReqE__part__0 i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendReqE__part__0Vsinv__30) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendReqE__part__1 i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendReqE__part__1Vsinv__30) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvReqS N i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_RecvReqSVsinv__30) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvReqE N i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_RecvReqEVsinv__30) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendInv__part__0 i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendInv__part__0Vsinv__30) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendInv__part__1 i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendInv__part__1Vsinv__30) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendInvAck i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendInvAckVsinv__30) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvInvAck i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_RecvInvAckVsinv__30) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendGntS i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendGntSVsinv__30) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendGntE N i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendGntEVsinv__30) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvGntS i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_RecvGntSVsinv__30) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvGntE i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_RecvGntEVsinv__30) done
}
ultimately show "invHoldForRule s f r (invariants N)"
by satx
qed
end
|
The offset of a constant polynomial is itself. |
[STATEMENT]
lemma le_quasi_borel_iff:
"X \<le> Y \<longleftrightarrow> (if qbs_space X = qbs_space Y then qbs_Mx Y \<subseteq> qbs_Mx X else qbs_space X \<subset> qbs_space Y)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (X \<le> Y) = (if qbs_space X = qbs_space Y then qbs_Mx Y \<subseteq> qbs_Mx X else qbs_space X \<subset> qbs_space Y)
[PROOF STEP]
by(auto elim: less_eq_quasi_borel.cases intro: less_eq_quasi_borel.intros) |
In December 1789 , after five years of diplomatic wrangling about the terms of the Treaty of Mangalore that had ended the Second Anglo @-@ Mysore War , the ruler of Mysore Tipu Sultan again declared war on the British East India Company and their allies in Southern India . For the next two years the war continued as British forces and their allies drove the Mysore armies back towards the capital of Seringapatam . Both sides were reliant on supply by sea to maintain their campaigns inland : the British forces were supported from their major ports at Bombay and Madras , later stationing additional forces at the small port of Tellicherry inside Mysore territory . The Mysorean forces were supplied through Mangalore by French ships . France had been an ally of the Tipu Sultan 's father Hyder Ali during the Second Anglo @-@ Mysore War and although the political instability caused by the French Revolution in Europe prevented active involvement , they ensured that their ships kept up a supply of equipment to Mysore throughout the war .
|
The complex conjugate of the sum of two complex numbers is the sum of the complex conjugates of the two complex numbers. |
Setup Auto Delivery for Replacement Normal Brush Heads - 2 Pack | SP-2FC1B for Remington.
Refresh your skincare routine with the REMINGTON® Normal Brush Head Replacement 2 Pack. It's perfect for everyday cleansing on all skin types. Compatible with REMINGTON® Skin Care Products | Models: EP7070, FC500, FC1000 & FC1500.
Refresh your skincare routine with the REMINGTON Normal Brush Head Replacement.
Everyday cleaning for all skin types and skincare routines.
What Can This Normal Brush Head Do?
It's perfect for everyday cleansing on all skin types.
Designed for everyday use with a mixture of bristles to gently clean your skin.
Powered cleansing suitable for all skin types. |
lemma continuous_imp_tendsto: assumes "continuous (at x0) f" and "x \<longlonglongrightarrow> x0" shows "(f \<circ> x) \<longlonglongrightarrow> (f x0)" |
%DIF PREAMBLE START
\RequirePackage{xcolor}
\definecolor{MYDELCOLOR}{rgb}{0.464,0.531,0.598} % grey color for deleted text
\definecolor{MYADDCOLOR}{rgb}{0.55, 0.0, 0.55} % magenta color for added text
\RequirePackage{graphicx}%DIF PREAMBLE
\RequirePackage{tikz} %DIF PREAMBLE
\RequirePackage[normalem]{ulem} %DIF PREAMBLE
\providecommand{\DIFadd}[1]{{\protect\color{MYADDCOLOR}#1}} %DIF PREAMBLE
\providecommand{\DIFdel}[1]{{\protect\color{MYDELCOLOR}\sout{#1}}} %DIF PREAMBLE
\providecommand{\DIFaddcite}[1]{{\protect\color{MYADDCOLOR}\hypersetup{citecolor = MYADDCOLOR}#1}}
\providecommand{\DIFdelcite}[1]{{\protect\color{MYDELCOLOR}\hypersetup{citecolor = MYDELCOLOR}#1}} %DIF PREAMBLE
\providecommand{\DIFdelmath}[1]{{\protect\color{MYDELCOLOR}#1}} %DIF PREAMBLE
\providecommand{\DIFaddmath}[1]{{\protect\color{MYADDCOLOR}#1}} %DIF PREAMBLE
\providecommand{\DIFdelenv}[1]{{\protect\color{MYDELCOLOR}#1}} %DIF PREAMBLE
\providecommand{\DIFaddenv}[1]{{\protect\color{MYADDCOLOR}#1}} %DIF PREAMBLE
\providecommand{\DIFdeltab}[1]{{\protect\color{MYDELCOLOR}#1}} %DIF PREAMBLE
\providecommand{\DIFaddtab}[1]{{\protect\color{MYADDCOLOR}#1}} %DIF PREAMBLE
%DIF PREAMBLE
\providecommand{\DIFdelfig}[1]{ \begin{tikzpicture}
\node[anchor=south west,inner sep=0] (image) at (0,0) {\protect\color{MYDELCOLOR}
#1
};
\begin{scope}[x={(image.south east)},y={(image.north west)}]
\draw[MYDELCOLOR,line width=1 mm] (0, 1)--(1, 0); % cross line 1
\draw[MYDELCOLOR,line width=1 mm] (0, 0)--(1, 1); % cross line 2
\draw[MYDELCOLOR,line width=1 mm] (0, 0)--(0, 1)--(1,1)--(1,0)--(0,0)--(0,1); % box
\end{scope}
\end{tikzpicture} }
%DIF PREAMBLE
\providecommand{\DIFaddfig}[1]{ \begin{tikzpicture}
\node[anchor=south west,inner sep=0] (image) at (0,0) {\protect\color{MYADDCOLOR}
#1
};
\begin{scope}[x={(image.south east)},y={(image.north west)}]
\draw[MYADDCOLOR,line width=1 mm] (0, 0)--(0, 1)--(1,1)--(1,0)--(0,0)--(0,1); % box
\end{scope}
\end{tikzpicture} }
%DIF PREAMBLE END
\documentclass[preprint,12pt]{elsarticle}
\usepackage{amsmath,amssymb,bm}
\usepackage[colorlinks=true,citecolor=green]{hyperref}
\usepackage{verbatim}
\usepackage{caption}
\usepackage{subcaption}
\usepackage{booktabs}
\usepackage{csvsimple}
\newcommand{\matr}[1]{\mathbf{#1}}
\newcommand{\vect}[1]{\mathbf{#1}}
\newcommand{\ud}{\mathrm{d}}
\renewcommand{\vec}[1]{\mathbf{#1}}
\newcommand{\veca}[2]{\mathbf{#1}{#2}}
\renewcommand{\bm}[1]{\mathbf{#1}}
\newcommand{\bs}[1]{\boldsymbol{#1}}
\graphicspath{{figs/}{../../figures/}}
\journal{Composite Structures}
\begin{document}
\begin{frontmatter}
\title{Elastic constants identification of woven fabric reinforced composites by using guided wave dispersion curves and genetic algorithm}
\address[IFFM]{Institute of Fluid Flow Machinery, Polish Academy of Sciences, Poland}
\author{Pawel Kudela\corref{cor1}\fnref{IFFM}}
\ead{[email protected]}
\author{Maciej Radzienski\fnref{IFFM}}
\author{Piotr Fiborek \fnref{IFFM}}
\author{Tomasz Wandowski \fnref{IFFM}}
\cortext[cor1]{Corresponding author}
\begin{abstract}
Typically, material properties are estimated by destructive tests and used in computational models in the design and analysis of structures. This approach is well-established in relation to isotropic homogenous materials. However, if this approach is used for composite laminates, inaccuracies can arise that lead to vastly different stress distributions, strain rates, natural frequencies, and velocities of propagating elastic waves. In order to account for this problem, the alternative method is proposed, which utilise Lamb wave propagation phenomenon and optimisation technique. Propagating Lamb waves are highly sensitive to changes in material parameters and are often used for structural health monitoring of structures. In the proposed approach, the elastic constants, which are utilised to determine dispersion curves of Lamb waves, are optimised to achieve a good correlation between model predictions and experimental observations. The dispersion curves of Lamb waves were calculated by using the semi-analytical spectral element method. The resulting dispersion curves were compared with experimental measurements of full wavefield data conducted by scanning laser Doppler vibrometer and processed by 3D Fourier transform. Next, elastic constants were \DIFdel{optimised} \DIFadd{determined} by using a genetic algorithm which resulted in a good correlation between numerical and experimental dispersion curves.
\end{abstract}
\begin{keyword}
Lamb waves \sep dispersion curves \sep semi-analytical spectral element method \sep composite laminates \sep elastic constants.
\end{keyword}
\end{frontmatter}
\section{Introduction}
Elastic constant values are often difficult to obtain, especially for anisotropic media. These values are indispensable for the design of a structure which fulfils assumed requirements (strength, stiffness, vibration characteristics). For years elastic constants of isotropic materials have been estimated during destructive testing and strain gauge or static displacement recordings~\cite{Wang2000}. Similar results can be achieved by dynamic tests which rely on natural frequencies~\cite{Wang2000a, Wesolowski2009,Beluch2014}. Measurements of velocities of bulk waves (longitudinal and shear) can also be used for the determination of elastic constants~\cite{Rose1999}.
For anisotropic media, the determination of elastic constants is more complex. Destructive testing can still be used to determine some of the constants. Additional constants can be obtained by special cube-cutting procedures followed by destructive testing~\cite{Rose1991} or by using bulk wave measurement protocol for orthotropic media~\cite{Rose1999}. However, such experiments are cumbersome and expensive.
\DIFadd{Ultrasonic techniques usually are based on measurements of the time of flight and corresponding velocities of bulk waves (longitudinal and shear)~\cite{Castellano2014}.
Bulk wave propagation is defined by the small wavelength compared to the thickness of the plate.
In turn, small wavelengths correspond to high-frequency components which usually are strongly attenuated.
Hence, there is an upper bound frequency limit.
Another problem arises from the assumption of the infinite thickness of the plate in the approximation of bulk waves.
Neglecting the effect of plate boundaries causes errors in the time of flight estimation~\cite{Martens2017}.}
It should be noted that other ultrasonic methods exist such as the leaky Lamb wave technique~\cite{Karim1990,Karim1990a}. In this technique, a specimen is immersed in a liquid tank and insonified with an acoustic beam at various incident angles and frequencies. The obtained pattern provides a unique fingerprint of the underlying mechanical elasticity tensor at the insonified material spot. The method has been improved over the years by using the pulsed ultrasonic polar scan and inverse \DIFdelcite{methods~\cite{Kersemans2014,Martens2017}.} \DIFaddcite{methods~\cite{Martens2017,Kersemans2014,Martens2019a}.}
Due to simplicity, non-destructive methods based on one-sided Lamb wave propagation measurements have been evolving over the years.
Lamb waves were named after their discoverer, Horace Lamb, who developed the theory of their propagation in 1917~\cite{Lamb1917}. Interestingly, Lamb was not able to physically generate the waves he discovered. This was achieved by Worlton~\cite{Worlton1961} in 1961, who also noticed their potential usefulness for damage detection. Lamb waves are defined as a type of elastic waves that propagate in infinite media bounded by two surfaces and arise as a result of the superposition of multiple reflections of longitudinal waves and shear vertical waves from the bounding surfaces. In the case of these waves, medium particle oscillations are very complex in character. Depending on the distribution of displacements on the top and bottom bounding surface, two modes of Lamb waves appear: symmetric, denoted as S0, S1, S2, , and antisymmetric, denoted as A0, A1, A2, One should note that the number of these modes is infinite.
The analytic solution proposed by Lamb is limited to isotropic materials and infinite, unbounded media. The solution has a highly nonlinear character, and numerical methods must be used to obtain dispersion curves. The numerical approach, based on Lamb solution, for calculation of dispersion curves for isotropic materials, can be found in a book by Rose~\cite{Rose1999}.
Many studies have been devoted to the calculation of dispersion curves of Lamb waves propagating in composite laminates but only a few are related to the identification of elastic constants. Rose et al.~\cite{Rose1987} investigated Lamb wave propagation in unidirectional, two-directional, and quasi-isotropic graphite-epoxy composite panels. They derived polar characteristics of phase and group velocities of the low-frequency S0 Lamb mode. Mal et al.~\cite{Mal1993} and Karim et al.~\cite{Karim1990} determined the dynamic elastic moduli of the fibre-reinforced composite. The elastic constants were derived by inversion of a set of data measured by the leaky Lamb wave technique. In 1995 Rogers~\cite{Rogers1995} proposed a technique to measure the isotropic elastic constants of plate materials using Rayleigh-Lamb waves. In his paper, he presents the effect of an increase in longitudinal wave velocities or changes in Youngs modulus or Poissons ratio on the dispersion curves.
Dean et al.~\cite{Dean2008} determined elastic constants and thickness of the aluminium by minimising the error function comprised of the theoretical dispersion curves of Lamb waves and experimental data. They utilised full-field wavelength measurements of single-mode narrowband Lamb waves. Grinberg et al.~\cite{Grimberg2010} determined in-plane material parameters of composite fibre reinforced polymer (CFRP) from the equations for phase velocity of S0 and A0 modes at low-frequency range.
The paper by Bartoli et al.~\cite{Bartoli2006} deals with a semi-analytical finite element (SAFE) method for modelling wave propagation in waveguides of arbitrary cross-section. The method simply requires the finite element discretisation of the cross-section of the waveguide and assumes harmonic motion along the wave propagation direction. The general SAFE technique is extended to account for viscoelastic material damping by incorporating complex stiffness matrices. The dispersive solutions are obtained in terms of phase velocity, group velocity (for undamped media), energy velocity (for damped media), attenuation, and cross-sectional mode shapes. Taupin et al.~\cite{Taupin2011} applied the SAFE method to analyse composite laminates of various stacking sequences.
A new semi-analytical method using 3D elasticity theory was derived by Wang and Juan~\cite{Wang2007}. The group velocities of multiple higher-order Lamb waves obtained by the proposed model agree well with experimental measurements. However, the studies covered only wave propagation phenomenon in thin quasi-isotropic laminate.
Pol and Banerjee~\cite{Pol2013} derived a simplified 2D semi-analytical model based on a global matrix method to investigate the dispersion characteristics of propagating guided wave modes in multilayered composite laminates due to transient surface excitations. A relatively thin symmetric eight layered cross-ply composite laminate subjected to both narrowband and broadband surface excitations was considered. Comparison of group velocity curves obtained from the developed model and LS-DYNA has shown good agreement.
The paper by Beluch and Burczyski~\cite{Beluch2014} deals with the two-scale approach to the identification of material constants in composite materials. Structures made of unidirectional fibre-reinforced composites were examined. However, instead of Lamb wave data for optimisation of elastic constants, static displacements and eigenfrequencies were used. Additionally, a plane strain state was assumed in the model.
In 2016 Ong et al.~\cite{Ong2016} proposed a technique for determination of elastic properties of the woven composite panel using the Lamb wave dispersion characteristics. The investigated CFRP panel comprised of 16 plies with such a sequence so that it could be treated as quasi-isotropic material. A simple 2D plane strain model was assumed in the numerical simulations. The material properties were found by fitting dispersion curves from a numerical simulation with experimental data by the particle swarm optimisation method.
From the review of the literature, no research evidence was found to date which deals with the rapid and robust identification of elastic material properties of highly anisotropic composite structures. Therefore, it is essential to develop a robust material identification methodology for the inspection of a wide range of composite structures. Moreover, there is no method which enables online tracking of changes in elastic material properties.
The subject of this study focuses exclusively on an approach in which access to only one side of the specimen is required. In the proposed approach, dispersion curves of Lamb waves are utilised for determination of elastic constants in composite laminates. The paper is organized as follows: section~\ref{sec:dispersion_curves} describes a numerical method for calculation of dispersion curves of laminated composites, section~\ref{sec:experiment} is related to extraction of dispersion curves from full wavefield measurements, section~\ref{sec:optimization} describes the optimisation of elastic constants by using a genetic algorithm (GA) followed by conclusions.
\section{Dispersion curves of guided waves \label{sec:dispersion_curves}}
\subsection{Semi-analytical model}
Lamb wave dispersion phenomenon is related to wavenumber \(k\) dependency on frequency \(f\).
Lamb wave dispersion curves depend on elasticity constants of the material in which Lamb waves propagate.
Moreover, in composite laminates, Lamb waves behaviour is strictly related to the angle of propagation.
Parameters, such as wave attenuation, phase velocity, and group velocity, which are directly related to dispersion curves, depend on the angle of propagation.
Therefore, for proper material characterization, it is necessary to consider dispersion curves of Lamb waves at various angles of propagation.
The physical model of a plate-like waveguide is shown in Fig.~\ref{fig:layered_composite_SASE}.
The waveguide can generally be composed of anisotropic viscoelastic materials but for \DIFdel{simplicity} \DIFadd{simplicity,} only orthotropic material is shown in Fig.~\ref{fig:layered_composite_SASE} in which reinforcing fibres are at angle \(\theta\) \DIFdel{in} \DIFadd{with} respect to \(z\) axis.
The current mathematical model is a modification of the semi-analytical finite element (SAFE) method proposed in~\cite{Bartoli2006}.
The modification includes the application of spectral elements instead of classic finite elements through the thickness of a laminate, preserving wave equation in the propagation direction.
Hence, we propose to name it the semi-analytical spectral element (SASE) method. Moreover, instead of two-dimensional approximation of cross-section of the laminate, one-dimensional spectral elements were applied. Four-node spectral element is shown in Fig.~\ref{fig:layered_composite_SASE}. It has a non-uniform distribution of nodes and three degrees of freedom per node.
\begin{figure} [h!]
\centering
\includegraphics[width=\textwidth]{figure1.png}
\caption{SASE model of wave propagation along with degrees of freedom of a mono-dimensional four-node spectral element.}
\label{fig:layered_composite_SASE}
\end{figure}
Additionally, according to the concept proposed by Taupin et~al.~\cite{Taupin2011}, equations for dispersion curves are derived so that the solution can be obtained for
an arbitrary angle of propagation \(\beta\) shown in Fig.~\ref{fig:layered_composite_SASE}.
The wave propagation direction corresponds to the wavevector \(\vect{k}\) defined as:
\begin{equation}
\vect{k} = k \cos (\beta)\hat{ \vect{z}} - k \sin (\beta) \hat{\vect{y}},
\label{eq:wavevector}\end{equation}
where \(\hat{ \vect{z}}\) and \(\hat{\vect{y}}\) are unit vectors.
The general wave equation has a form of eigenvalue problem:
\begin{equation}
\left[\matr{A} - \omega^2\matr{M} \right] \vect{U} =0,
\label{eq:eig_dispersion}\end{equation}
where \(\omega\) is the angular frequency, \(\matr{M}\) is the mass matrix,
\(\matr{U}\) is the nodal displacement vector, and the matrix \(\matr{A}\) can be
defined as:
\begin{equation}
\begin{aligned}
\matr{A} & = k^2\left(s^2 \,\matr{K}_{22} + c^2\, \matr{K}_{33} - c s \, \matr{K}_{23} - c s\, \matr{K}_{32}\right) \\
& + i k\, \matr{T}^T\left(-c\, \matr{K}_{13} - s \, \matr{K}_{21} + s \, \matr{K}_{12} + c \, \matr{K}_{31}\right) \matr{T} +\matr{K}_{11},
\end{aligned}
\label{eq:dispersion}\end{equation}
where \(s = \sin(\beta)\), \(c = \cos(\beta)\), \(i = \sqrt{-1}\), and \(\beta\) is the angle of guided wave propagation.
The transformation matrix \(\matr{T}\) is diagonal and it is introduced in order to eliminate imaginary elements from Eq.~(\ref{eq:dispersion}) (see~\cite{Bartoli2006} for more details).
It should be noted that the system of equations~(\ref{eq:eig_dispersion}) explicitly depends on the angle \(\beta\).
Predicting the anisotropic behaviour of guided wave properties makes it necessary to loop over each direction considered.
Stiffness matrices \(\matr{K}_{mn}\) from Eq.~(\ref{eq:dispersion}) depend on elastic constants of composite laminate and relations between displacements and strains (more detailed derivation can be found in Appendix).
The definitions of these matrices on an elemental level are:
\begin{equation}
\matr{k}_{mn}^e= \int \limits_{(e)} \matr{B}_m^{T} \matr{C}_{\theta}^e \, \matr{B}_n\, \ud x,
\label{eq:stiffness_matrix_e}\end{equation}
\DIFdel{where \(\matr{C}_{\theta}\) is the elastic tensor and \(\matr{B}\) is the matrix relating
displacements and strains.
Equation~(\ref{eq:eig_dispersion}) can} be \DIFdel{solved numerically} in \DIFdelcite{two ways:
\begin{itemize}
\item as a standard eigenvalue problem \(\omega (k)\) (assuming given real values
of wavenumbers \(k\))
\item as a second-order polynomial eigenvalue problem \(k(\omega)\) for given
frequencies \(\omega\).
\end{itemize}
In the later case,} \DIFdel{the solution consists of recasting Eq.~(\ref{eq:eig_dispersion}) to a
first-order eigensystem by doubling its algebraic size. The obtained wavenumbers are
then of a complex character. This provides information about both the wave dispersion
(real part of the wavenumbers) and the attenuation of the waves (imaginary part of the wavenumbers). However, due to the fact that the priority is the computation time and
that the information about the wave attenuation is not necessary, it is preferable to
solve the standard eigenvalue problem \(\omega (k)\).}
\DIFadd{where \(\matr{C}_{\theta}\) is the elastic tensor and \(\matr{B}\) is the matrix relating displacements and strains.
Equation~(\ref{eq:eig_dispersion}) can be solved numerically in two ways:
\begin{itemize}
\item as a standard eigenvalue problem \(\omega (k)\) (assuming given real values of wavenumbers \(k\))
\item as a second-order polynomial eigenvalue problem \(k(\omega)\) for given frequencies \(\omega\).
\end{itemize}
In the latter case, the solution consists of recasting Eq.~(\ref{eq:eig_dispersion}) to a first-order eigensystem by doubling its algebraic size~\cite{Bartoli2006}.
The obtained wavenumbers are then of a complex character.
This provides information about both the wave dispersion (real part of the wavenumbers) and the attenuation of the waves (imaginary part of the wavenumbers).
However, in the proposed approach the priority is the computation time.
Therefore, the attenuation effect is neglected and the standard eigenvalue problem \(\omega (k)\) is solved.
Stiffness matrix components \(\matr{k}_{mn}^e\) from Eq.~(\ref{eq:stiffness_matrix_e}) depend on an angle \(\theta\) related to the layer orientation in the stacking sequence of a composite laminate.
Hence, the stiffness matrix of material \(\tilde{\matr{C}}\) corresponding to composite lamina is rotated by an angle \(\theta\) using a standard calculation~\cite{Bartoli2006,Taupin2011}:
}
\begin{equation}
\tilde{ \matr{C}_{\theta}}= \matr{R}_1(\theta) \,\tilde{\matr{C}} \,\matr{R}_2^{-1}(\theta).
\label{eq:elasticity_tensor}\end{equation}
It should be noted that in general, the stiffness matrix of material can be complex:
\begin{equation}
\tilde{\matr{C}}= \matr{C} - i \bs{\eta},
\label{eq:complex_elasticity_tensor}\end{equation}
where \(\matr{C} \) is the elastic stiffness tensor (matrix of elastic constants) and \(\bs{\eta}\) is the viscosity tensor.
However, viscosity is not considered here.
The matrix of elastic constants of an orthotropic linear elastic material can be written as:
\begin{equation}
\matr{C} = \left[\begin{array}{cccccc} C_{11} & C_{12}& C_{13} & 0&0&0\\[2pt]
C_{12}& C_{22} & C_{23}& 0&0&0\\[2pt]
C_{13}&C_{23}&C_{33}&0&0&0\\[2pt]
0& 0 &0&C_{44}& 0&0\\[2pt]
0&0&0&0&C_{55}&0\\[2pt]
0&0&0&0&0&C_{66}
\end{array}\right].
\label{eq:elastic_constatns}\end{equation}
It means that there are 9 independent coefficients which should be determined by using an optimisation technique.
But this is only applicable for the case in which the same material is used in each lamina and the layer orientation is known so that the Eq.~(\ref{eq:elasticity_tensor}) can be used.
However, if the stacking sequence is unknown or there is a misalignment in the layer orientation, the more general case should be considered in which the matrix of elastic constants is more populated:
\begin{equation}
\matr{C}_{\theta} = \left[\begin{array}{cccccc} C_{\theta 11} & C_{\theta 12}& C_{\theta 13} & 0&0&C_{\theta 16}\\[2pt]
C_{\theta 12}& C_{\theta 22} & C_{\theta 23}& 0 &0&C_{\theta 26}\\[2pt]
C_{\theta 13}&C_{\theta 23}&C_{\theta 33}&0&0&C_{\theta 36}\\[2pt]
0& 0&0&C_{\theta 44}& C_{\theta 45}&0\\[2pt]
0&0&0&C_{\theta 45}&C_{\theta 55}&0\\[2pt]
C_{\theta 16}&C_{\theta 26} &C_{\theta 36}&0&0&C_{\theta 66}
\end{array}\right],
\label{eq:elastic_constatns_theta}\end{equation}
which gives 13 independent elastic constants per lamina.
Of course, the determination of such a large number of elastic constants by optimisation methods is prohibitive due to high computation cost.
\DIFadd{
In the proposed approach it is assumed that the stacking sequence is known and the same type of prepreg is used in each layer.
Therefore, only 9 independent coefficients must be determined.
}
\subsection{\DIFdel{Reduction of the number of variables for optimisation}}
\subsection{Parametric studies of dispersion curves \label{sec:parametric}}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\DIFadd{
The SASE model was used for parametric studies of dispersion curves. The influence
of plain-weave textile-reinforced composite material properties on dispersion curves
was analysed.
The initial values of constants characterising composite material are given in Table~\ref{tab:matprop}.
The influence of each C-tensor component on dispersion curves was studied separately.
The variability range of each parameter was assumed as \(\pm\)30\% with
respect to initial values.
It was assumed in the SASE model that the laminate is composed of 8 layers of a total thickness of 3.9 mm.
The same orientation angle (0\(^{\circ}\)) for each ply was assumed.
\begin{table}[h!]
\renewcommand{\arraystretch}{1.3}
%\centering \footnotesize
\caption{Initial values of elastic constants used in parametric studies; Units: [GPa].}
\begin{center}
\begin{tabular}{ccccccccc}
\toprule
\(C_{11}\) & \(C_{12}\) & \(C_{13}\) & \(C_{22}\) & \(C_{23}\) & \(C_{33}\) &
\(C_{44}\) & \(C_{55}\) & \(C_{66}\) \\
\midrule
50 &5& 5& 50 & 5 & 9 & 3 & 3 & 3\\
\bottomrule
\end{tabular}
\end{center}
\label{tab:matprop}
\end{table}
The dispersion curves shown in all figures presented here are in the form \(k(f)\) where \(f=\omega/(2 \pi)\) is the frequency measured in hertz.
Black curves are calculated for initial values of material properties given in Table~\ref{tab:matprop}, red curves represent changes in dispersion curves caused by the increase of these parameters whereas blue curves represent changes in dispersion curves due to decreasing values of these parameters.
There are 11 solutions covering the range of \(\pm\)30\% for each investigated parameter which are presented in Figs.~\ref{fig:C11}--\ref{fig:C66}.
The influence of \(C_{11}\) elastic constant on the dispersion curves is shown in Fig.~\ref{fig:C11}.
Looking at the greatest wavenumber values at frequency 300 kHz in Fig.~\ref{fig:C11_0}, the following propagation modes occur in order: A0, symmetric SH0, A1 and S0.
\(C_{11}\) elastic constant at angle 0\(^{\circ}\) affects two modes, namely A0 and S0 mode.
The influence on the latter one is greater.
It should be noted that an increase of \(C_{11}\) elastic constant causes decrease of wavenumber values.
Looking at the greatest wavenumber values at frequency 300 kHz in Fig.~\ref{fig:C11_45}, the following propagation modes occur in order: A0, S0, symmetric SH0 and A1.
All modes are affected at angle 45\(^{\circ}\).
\begin{figure} [h!]
\centering
\begin{subfigure}[b]{0.47\textwidth}
\includegraphics{figure2a.png}
\caption{}
\label{fig:C11_0}
\end{subfigure}
\hfill
\begin{subfigure}[b]{0.47\textwidth}
\includegraphics{figure2b.png}
\caption{}
\label{fig:C11_45}
\end{subfigure}
\caption{The influence of \(C_{11}\) elastic constant on the dispersion curves for selected angle \(\beta\): (a) 0\(^{\circ}\) and (b) 45\(^{\circ}\).}
\label{fig:C11}
\end{figure}
The influence of \(C_{12}\) elastic constant on the dispersion curves is shown in Fig.~\ref{fig:C12}.
It has the least influence on dispersion curves.
Small changes can be observed at angle 45\(^{\circ}\).
\begin{figure} [h!]
\centering
\begin{subfigure}[b]{0.47\textwidth}
\centering
\includegraphics[]{figure3a.png}
\caption{}
\label{fig:C12_0}
\end{subfigure}
\hfill
\begin{subfigure}[b]{0.47\textwidth}
\centering
\includegraphics[]{figure3b.png}
\caption{}
\label{fig:C12_45}
\end{subfigure}
\caption{The influence of \(C_{12}\) elastic constant on the dispersion curves for selected angle \(\beta\): (a) 0\(^{\circ}\) and (b) 45\(^{\circ}\).}
\label{fig:C12}
\end{figure}
The influence of \(C_{13}\) elastic constant on the dispersion curves is shown in Fig.~\ref{fig:C13}.
Only S0 mode is affected at angle 0\(^{\circ}\) (Fig.~\ref{fig:C13_0}).
It should be noted that an increase of \(C_{13}\) elastic constant causes an increase of wavenumber values which is opposite behaviour with respect to \(C_{11}\) elastic constant (Fig.~\ref{fig:C11}).
Apart from the influence on S0 mode, additional influence is observed around 500 kHz at angle 45\(^{\circ}\) (Fig.~\ref{fig:C13_45}).
\begin{figure} [h!]
\centering
\begin{subfigure}[b]{0.47\textwidth}
\centering
\includegraphics[]{figure4a.png}
\caption{}
\label{fig:C13_0}
\end{subfigure}
\hfill
\begin{subfigure}[b]{0.47\textwidth}
\centering
\includegraphics[]{figure4b.png}
\caption{}
\label{fig:C13_45}
\end{subfigure}
\caption{The influence of \(C_{13}\) elastic constant on the dispersion curves for selected angle \(\beta\): (a) 0\(^{\circ}\) and (b) 45\(^{\circ}\).}
\label{fig:C13}
\end{figure}
The influence of \(C_{22}\) elastic constant on the dispersion curves is shown in Fig.~\ref{fig:C22}.
It can be seen that the dispersion curves at angle 0\(^{\circ}\) are not affected whereas the influence on dispersion curves at angle 45\(^{\circ}\) is the same as in case of \(C_{11}\) elastic constant (compare Fig.~\ref{fig:C11_45}).
Such behaviour is expected because \(C_{11}\) elastic constant corresponds to the principal direction at angle 0\(^{\circ}\) whereas \(C_{22}\) elastic constant corresponds to the principal direction at angle 90\(^{\circ}\).
\begin{figure} [h!]
\centering
\begin{subfigure}[b]{0.47\textwidth}
\centering
\includegraphics[]{figure5a.png}
\caption{}
\label{fig:C22_0}
\end{subfigure}
\hfill
\begin{subfigure}[b]{0.47\textwidth}
\centering
\includegraphics[]{figure5b.png}
\caption{}
\label{fig:C22_45}
\end{subfigure}
\caption{The influence of \(C_{22}\) elastic constant on the dispersion curves for selected angle \(\beta\): (a) 0\(^{\circ}\) and (b) 45\(^{\circ}\).}
\label{fig:C22}
\end{figure}
The influence of \(C_{23}\) elastic constant on the dispersion curves is shown in Fig.~\ref{fig:C23}.
It has no effect on dispersion curves at angle 0\(^{\circ}\).
The behaviour at at angle 45\(^{\circ}\) is the same as in case of \(C_{13}\) elastic constant (compare Fig.~\ref{fig:C23_45} with Fig.~\ref{fig:C13_45}).
\begin{figure} [h!]
\centering
\begin{subfigure}[b]{0.47\textwidth}
\centering
\includegraphics[]{figure6a.png}
\caption{}
\label{fig:C23_0}
\end{subfigure}
\hfill
\begin{subfigure}[b]{0.47\textwidth}
\centering
\includegraphics[]{figure6b.png}
\caption{}
\label{fig:C23_45}
\end{subfigure}
\caption{The influence of \(C_{23}\) elastic constant on the dispersion curves for selected angle \(\beta\): (a) 0\(^{\circ}\) and (b) 45\(^{\circ}\).}
\label{fig:C23}
\end{figure}
The influence of \(C_{33}\) elastic constant on the dispersion curves is shown in Fig.~\ref{fig:C33}.
The influence of \(C_{33}\) elastic constant is highly localised because it substantially affects S0 mode in frequency range about 250--500 kHz.
Other wave modes are unaffected except A1 mode at angle 45\(^{\circ}\) and frequency range about 400--500 kHz.
\begin{figure} [h!]
\centering
\begin{subfigure}[b]{0.47\textwidth}
\centering
\includegraphics[]{figure7a.png}
\caption{}
\label{fig:C33_0}
\end{subfigure}
\hfill
\begin{subfigure}[b]{0.47\textwidth}
\centering
\includegraphics[]{figure7b.png}
\caption{}
\label{fig:C33_45}
\end{subfigure}
\caption{The influence of \(C_{33}\) elastic constant on the dispersion curves for selected angle \(\beta\): (a) 0\(^{\circ}\) and (b) 45\(^{\circ}\).}
\label{fig:C33}
\end{figure}
The influence of \(C_{44}\) elastic constant on the dispersion curves is shown in Fig.~\ref{fig:C44}.
It influences A1 mode at at angle 0\(^{\circ}\) in a greatest extent.
A0 mode, A1 mode and S0 mode are affected at angle 45\(^{\circ}\) but S0 mode is affected only at frequencies above about 300 kHz.
It should be noted that the influence of \(C_{44}\) elastic constant on A0 and S0 mode increases with increasing frequencies whereas the influence on A1 mode is opposite.
\begin{figure} [h!]
\centering
\begin{subfigure}[b]{0.47\textwidth}
\centering
\includegraphics[]{figure8a.png}
\caption{}
\label{fig:C44_0}
\end{subfigure}
\hfill
\begin{subfigure}[b]{0.47\textwidth}
\centering
\includegraphics[]{figure8b.png}
\caption{}
\label{fig:C44_45}
\end{subfigure}
\caption{The influence of \(C_{44}\) elastic constant on the dispersion curves for selected angle \(\beta\): (a) 0\(^{\circ}\) and (b) 45\(^{\circ}\).}
\label{fig:C44}
\end{figure}
The influence of \(C_{55}\) elastic constant on the dispersion curves is shown in Fig.~\ref{fig:C55}.
It can be noticed that at angle 0\(^{\circ}\), A0 mode is strongly affected as well as S0 mode at frequencies above about 300 kHz.
The influence of \(C_{55}\) elastic constant on the dispersion curves at angle 45\(^{\circ}\) is the same as in case of \(C_{44}\) elastic constant (compare Fig.~\ref{fig:C55_45} with Fig.~\ref{fig:C44_45}).
\begin{figure} [h!]
\centering
\begin{subfigure}[b]{0.47\textwidth}
\centering
\includegraphics[]{figure14a.png}
\caption{}
\label{fig:C55_0}
\end{subfigure}
\hfill
\begin{subfigure}[b]{0.47\textwidth}
\centering
\includegraphics[]{figure14b.png}
\caption{}
\label{fig:C55_45}
\end{subfigure}
\caption{The influence of \(C_{55}\) elastic constant on the dispersion curves for selected angle \(\beta\): (a) 0\(^{\circ}\) and (b) 45\(^{\circ}\).}
\label{fig:C55}
\end{figure}
The influence of \(C_{66}\) elastic constant on the dispersion curves is shown in Fig.~\ref{fig:C66}.
The changes of \(C_{66}\) elastic constant affects in greatest extent the symmetric SH mode and A1 mode at angle 0\(^{\circ}\).
Only a slight influence on S0 mode is observed at angle 45\(^{\circ}\).
\begin{figure} [h!]
\centering
\begin{subfigure}[b]{0.47\textwidth}
\centering
\includegraphics[]{figure15a.png}
\caption{}
\label{fig:C66_0}
\end{subfigure}
\hfill
\begin{subfigure}[b]{0.47\textwidth}
\centering
\includegraphics[]{figure15b.png}
\caption{}
\label{fig:C66_45}
\end{subfigure}
\caption{The influence of \(C_{66}\) elastic constant on the dispersion curves for selected angle \(\beta\): (a) 0\(^{\circ}\) and (b) 45\(^{\circ}\).}
\label{fig:C66}
\end{figure}
It should be underlined that in most cases the influence of particular \(C_{ij}\) elastic constant on the dispersion curves is unique.
The influence on specific modes is different.
Even certain modes are only affected at a specific frequency range.
These findings give the basis to state that the dispersion curves can be used for identification of elastic constants.
It is expected, that better accuracy can be obtained by considering more modes and wider frequency range.
Moreover, the dependence of dispersion curves on the angle of propagation is significant.
Hence, it should be considered during the construction of an objective function used for the determination of elastic constants.
}
\section{Experimental measurements \label{sec:experiment}}
Guided wave propagation was analysed in carbon/epoxy laminate \DIFdel{composed} reinforced
by 16 layers of plain weave fabric. The prepregs GG 205 P (fibres Toray FT 300 - 3K
200 tex) by G. Angeloni and epoxy resin IMP503Z-HT by Impregnatex Compositi were
used for fabrication of the specimen in the autoclave. The composite laminate
dimensions were 1200\(\times\)1200~mm. The average thickness was 3.9\(\pm\)0.1
mm.
The total weight of the specimen was 8550 g, hence, based on the volume of the specimen, the density is about~1522.4~kg/m\textsuperscript{3}.
The parameters describing the geometry of a plain weave \DIFdel{textile reinforced} \DIFadd{textile-reinforced} composite are given in Table~\ref{tab:weave_geo}.
\begin{table}[h]
\renewcommand{\arraystretch}{1.3}
\centering \footnotesize
\caption{The geometry of a plain weave textile-reinforced composite [mm].}
%\begin{tabular}{@{}ccccccc@{}} % remove spaces from vertical lines
\begin{tabular}{cccccc}
%\hline
\toprule
\multicolumn{4}{c}{\textbf{width} } & \multicolumn{2}{c}{\textbf{thickness} } \\
% \hline \hline
\cmidrule(lr){1-4} \cmidrule(lr){5-6}
fill & warp & fill gap& warp gap& fill & warp\\
%\hline
\(a_f\) &\(a_w\)& \(g_f\) & \(g_w\) & \(h_f\)& \(h_w\) \\
%\hline
%\midrule
\cmidrule(lr){1-2} \cmidrule(lr){3-4} \cmidrule(lr){5-6}
1.92 &2.0& 0.05& 0.05 & 0.121875 & 0.121875 \\
%\hline
\bottomrule
\end{tabular}
\label{tab:weave_geo}
\end{table}
\DIFadd{The experimental setup is shown in Fig.~\ref{fig:setup}.
It consists of a waveform generator, signal amplifier, piezoelectric actuator, specimen, scanning laser Doppler vibrometer (SLDV) head operated by controller and software.}
\begin{figure} [h!]
\centering
\DIFaddfig{\includegraphics[width=\textwidth]{figure16.png}}
\caption{\DIFadd{Experimental setup.}}
\label{fig:setup}
\end{figure}
The piezoelectric transducer disc of 10 mm diameter was bonded to the surface of the specimen at their centre.
A chirp signal in the frequency range of 0-500 kHz lasting 200~\(\mu\)s was applied to the piezoelectric transducer.
Sampling frequency was 1.28~MHz. Full wavefield measurements of guided waves were conducted by using
\DIFdel{scanning laser Doppler vibrometer} \DIFadd{SLDV} (Polytec PSV-400) on the central area of width 0.726 m and length 0.726 m.
A grid of 499\(\times\)499 measurement points covering the surface of the specimen was used.
\DIFdel{Retro-reflective}
\DIFadd{The retro-reflective} tape was applied to the measurement area for improving \DIFdel{a} \DIFadd{the} quality of laser signal.
Measurements were taken 40 times at each grid point and averaged in order to increase the signal to noise ratio.
3D Fourier transform was applied to the full wavefield data in the space-time domain
(\(x\), \(y\), \(t\)).
Next 3D matrix was transformed from (\(k_x\), \(k_y\), \(\omega\)) coordinates to cylindrical coordinates (\(\beta\), \(k\), \(f\)).
Interpolation was employed to obtain 2D images (\(k\), \(f\)) representing dispersion curves \(k(f)\) at selected angles \(\beta = 0^{\circ} \ldots 90^{\circ}\) with the step of \(15^{\circ}\), resulting in nine 2D matrices.
Let's denote these matrices representing dispersion curves by \(\matr{D}_{\beta}\). The size of the matrix \(\matr{D}_{\beta}\) in the current approach was \(n_k=512 \times n_f= 512\), where \(n_k\) is the number of wavenumber points
and \(n_f\) is the number of frequency points.
An example of such a matrix at \DIFadd{an} angle \(60^{\circ}\) is presented in Fig.~\ref{fig:initial_optimized}.
The dispersion curves presented in Fig.~\ref{fig:dispersion60deg_initial} \DIFadd{are} calculated by using \DIFadd{the} SASE model and material properties \DIFadd{which are} given in Table~\ref{tab:matprop}.
\DIFadd{They are} in the form of \DIFdel{yellow} \DIFadd{white} curves \DIFdel{are} overlayed on the image for reference.
It can be seen that there is a high discrepancy between semi-analytic dispersion curves and \DIFadd{the} experimental one.
\section{Optimisation \label{sec:optimization}}
\subsection{Genetic algorithm parameters}
The following GA parameters were used for optimisation:
\begin{itemize}
\item Number of individuals per subpopulation: 100
\item Maximum number of generations: 70
\item Generation gap: 0.9
\item Number of variables in objective function: \DIFdel{6 (indirect approach), 9 (direct approach)} \DIFadd{9}
\item Precision of binary representation of variables: 12-bit.
\end{itemize}
Generation gap of 0.9 means that 90\% chromosomes from the old population are replaced by 90\% \DIFdel{best found} \DIFadd{best-found} chromosomes from new population while preserving 10\% \DIFdel{best found} \DIFadd{best-found} chromosomes from \DIFadd{the} old population.
The probability of mutation \(P_m\) is calculated as follows~\cite{Chipperfield1994}:
\begin{equation}
P_m = 0.7/L,
\end{equation}
where \(L\) is the length of the chromosome structure.
The upper and lower bounds of variables were set as \(\pm\)50\% with respect to initial values given in Table~\ref{tab:matprop}.
\subsection{Objective function}
The optimisation problem can be described as a minimization of an objective function:
\begin{equation}
\min_j \sum_{\beta}\sum_{m} \| k^{SASE}_{m}(\beta, \omega,\matr{C}^{(j)}) -k^{EXP}_{m}(\beta,\omega) \|,
\label{eq:error_fun}\end{equation}
where \(m\) denotes the mode of propagating wave.
The objective function is the norm of a difference between dispersion curves calculated by using \DIFadd{the} SASE model \(k^{SASE}_{m}(\beta, \omega,\matr{C}^{(j)})\) and dispersion curves from experiment \(k^{EXP}_{m}(\beta,\omega)\).
However, in the
\DIFdel{model} \DIFadd{model,} we have actual curves for each propagation mode \(m\) depending on the matrix of elastic constants \(\matr{C}^{(j)}\) and propagation angle \(\beta\) but the data from the experiment have a form of a matrix (image) \(\matr{D}^{EXP}_{\beta}\) at each angle \(\beta\) (see Fig.~\ref{fig:initial_optimized}).
Extraction of dispersion curves \(k^{EXP}_{m}(\beta,\omega)\) from images \(\matr{D}^{EXP}_{\beta}\) is cumbersome and could lead to inherent errors.
Moreover, in order to minimise the error function in \DIFdel{Eq.~\ref{eq:error_fun},} \DIFadd{Eq.~(\ref{eq:error_fun}),} dispersion curves in \DIFadd{the} SASE model must be sorted (which increases computation costs).
Therefore, dispersion curves corresponding to all modes in the investigated frequency range, i.e. \(m=1,\ldots, 6\) calculated by using \DIFadd{the} SASE model were converted to an image \(\matr{D}^{SASE}_{\beta}\) in which dimensions are the same as in the experiment (\(n_k=512 \times n_f= 512\)).
Pixels of the image \(\matr{D}^{SASE}_{\beta}\) which corresponds to dispersion curves are assigned value 1 whereas the remaining pixels have value 0.
It leads to logic matrix or mask which can be applied for filtering experimental images:
\begin{equation}
\tilde{\matr{D}}^{\beta} = \matr{D}^{SASE}_{\beta} .* \matr{D}^{EXP}_{\beta} ,
\label{eq:objective_fun}
\end{equation}
where \(.*\) is the \DIFdel{element wise multiplication (such notation is used in Matlab).} \DIFadd{element-wise multiplication.}
If dispersion curves from \DIFadd{the} SASE model align well with high values of experimental images, it leads to high values in \DIFadd{the} filtered image and vice versa.
The objective function value can be finally calculated as:
\begin{equation}
\tilde{F} = \frac{(-1)}{n_k \, n_f} \cdot \sum_{\beta} \sum_{i=1}^{n_k} \sum_{j=1}^{n_f} \tilde{D}_{ij}^{\beta}.
\label{eq:objective_fun_val}\end{equation}
In this \DIFdel{way} \DIFadd{way,} rich experimental data is fully used and roots sorting of \DIFadd{the} SASE dispersion curves is not necessary.
Additionally, for convenience, the objective function scaling can be applied in the form:
\begin{equation}
F = a \, \tilde{F} + b,
\end{equation}
where \(a\) and \(b\) are scaling parameters. The parameters can be selected so that the values of \DIFadd{the} objective function are positive (\(a=100\), \(b=360\) were assumed in the current studies).
It should be added that the calculation of the objective function is quite computationally intensive.
It takes about 5 minutes on Intel Xeon X5660 2.8~GHz to compute 100 evaluations of the objective function.
\subsection{GA convergence}
The proposed objective function enables quite fast convergence of GA.
An exemplary convergence of objective function with increasing generation number is shown in Fig.~\ref{fig:GAconvergence}.
Objective function values were calculated for the best chromosome and also for the mean value of all chromosomes in a generation.
Objective function stabilises in both cases after about 40 generations.
\begin{figure} [h!]
\centering
\includegraphics{figure9.png}
\caption{GA convergence in \DIFdel{direct} \DIFadd{the proposed} approach.}
\label{fig:GAconvergence}
\end{figure}
\subsection{Results and discussion}
\DIFadd{
It has been shown in Fig.~\ref{fig:dispersion60deg_initial} that even for deliberately assumed values of constants characterising the composite material (Table~\ref{tab:matprop}) there is a huge discrepancy between the model and experimental data. The best way to improve the model is by an identification of all necessary parameters by using optimisation methods.
The application of objective function given by Eq.~(\ref{eq:objective_fun_val}) leads to excellent agreement between numerical and experimental dispersion curves at angle \(60^{\circ}\) as it is shown in Fig.~\ref{fig:dispersion60deg}.
It should be added that a linear colour scale is applied for all experimental images.
}
\begin{figure} [h!]
\centering
\begin{subfigure}[b]{0.47\textwidth}
\centering
\includegraphics{figure10a.png}
\caption{initial parameters}
\label{fig:dispersion60deg_initial}
\end{subfigure}
\hfill
\begin{subfigure}[b]{0.47\textwidth}
\centering
\includegraphics{figure10b.png}
\caption{optimised}
\label{fig:dispersion60deg}
\end{subfigure}
\caption{Dispersion curves at angle \(\beta = 60^{\circ}\); \DIFdel{yellow} \DIFadd{white} curves: SASE
model;
image: experiment. }
\label{fig:initial_optimized}
\end{figure}
\DIFadd{
However, at angles \(0^{\circ}\) (Fig.~\ref{fig:dispersion0deg_direct_SH0}) and \(15^{\circ}\) (Fig.~\ref{fig:dispersion15deg_direct_SH0}) some discrepancies related to the behaviour of shear horizontal wave mode is observed. Dispersion curve corresponding to SH0 mode (red curve) tends to overlap with another dispersion curve (A0 mode) contributing significantly to the objective function value. Additionally, wavenumber values of SH0 mode are overestimated (see Fig.~\ref{fig:dispersion15deg_direct_SH0}). In other words, the velocity of the modelled SH0 mode is lower than in the experiment.
In order to alleviate the issue with the SH0 mode, we modified the matrix
\(\matr{D}^{SASE}_{\beta} \) from Eq.~(\ref{eq:objective_fun}).
For the dispersion curve \(m=2\) corresponding to the SH0 mode, instead of value 1, we inserted the following angle-dependent values:
}
\begin{equation}
\left. \matr{D}^{SASE}_{0^{\circ}}\right\vert_{m=2} = 0.1, \, \left. \matr{D}^{SASE}_{15^{\circ}}\right\vert_{m=2} = 2.0, \, \left. \matr{D}^{SASE}_{30^{\circ}}\right\vert_{m=2} = 1.5\label{eq:objective_fun_mod}
\end{equation}
\DIFadd{
and symmetrically for the remaining angles with respect to 45\(^{\circ}\).
Therefore the matrix \(\matr{D}^{SASE}_{\beta} \) is no longer a logical matrix but it is still functioning as a filter mask with tweaked weights.
It should be noted that the amplitudes of SH0 mode are much lower than other modes (see images e.g. in Fig.~\ref{fig:dispersion15deg_direct_SH0}).
The goal of the application of these weights is to increase the importance of SH0 mode so that it has a similar contribution to the objective function as other modes.
The dispersion curves calculated by using modified objective function fit well the experimental data as it is shown in Fig.~\ref{fig:optimized_direct} for each considered angle.
Also, the SH0 mode is properly aligned with experimental data.
}
\begin{figure} [h!]
\centering
\begin{subfigure}[b]{\textwidth}
\centering
\includegraphics{figure12a.png}
\caption{}
\label{fig:dispersion0deg_direct_SH0}
\end{subfigure}
\begin{subfigure}[b]{\textwidth}
\centering
\includegraphics{figure12b.png}
\caption{}
\label{fig:dispersion15deg_direct_SH0}
\end{subfigure}
\caption{Dispersion curves for optimised elastic constants \DIFdel{in \textbf{direct method}} at angles \(\beta\): (a) 0\(^{\circ}\), (b) 15\(^{\circ}\); yellow curves: SASE model; red
curve: SH0 mode; image: experiment. }
\label{fig:SH0_problem}
\end{figure}
\begin{figure} [h!]
\centering
\begin{subfigure}[b]{0.47\textwidth}
\centering
\includegraphics{figure13a.png}
\caption{}
\label{fig:dispersion0deg_direct}
\end{subfigure}
\hfill
\begin{subfigure}[b]{0.47\textwidth}
\centering
\includegraphics{figure13b.png}
\caption{}
\label{fig:dispersion15deg_direct}
\end{subfigure}
\hfill
\begin{subfigure}[b]{0.47\textwidth}
\centering
\includegraphics{figure13c.png}
\caption{}
\label{fig:dispersion30deg_direct}
\end{subfigure}
\hfill
\begin{subfigure}[b]{0.47\textwidth}
\centering
\includegraphics{figure13d.png}
\caption{}
\label{fig:dispersion45deg_direct}
\end{subfigure}
\hfill
\begin{subfigure}[b]{0.47\textwidth}
\centering
\includegraphics{figure13e.png}
\caption{}
\label{fig:dispersion75deg_direct}
\end{subfigure}
\hfill
\begin{subfigure}[b]{0.47\textwidth}
\centering
\includegraphics{figure13f.png}
\caption{}
\label{fig:dispersion90deg_direct}
\end{subfigure}
\caption{Dispersion curves for optimised elastic constants \DIFdel{in \textbf{direct method}} at angles \(\beta\): (a) 0\(^{\circ}\), (b) 15\(^{\circ}\), (c) 30\(^{\circ}\), (d)
45\(^{\circ}\),
(e) 75\(^{\circ}\), (f) 90\(^{\circ}\); \DIFdel{yellow} \DIFadd{white} curves: SASE model; image: experiment. }
\label{fig:optimized_direct}
\end{figure}
\DIFdel{It seems that from the perspective of wave propagation modelling in composite laminates, it is easier to select properties of composite material constituents than composite ply. It is because data related to the mechanical properties of the fibres and epoxy matrix is widely available. However, it has been shown in Fig.~\ref{fig:dispersion60deg_initial} that even for deliberately assumed values of constants characterising the composite material constituents (Table~\ref{tab:matprop}) there is a huge discrepancy between the model and experimental data.}
The \DIFdel{easiest way to improve the model is by tweaking the volume fraction of reinforcing fibres (grid search approach). But this method does not guarantee a good match between numerical and experimental dispersion curves.} \DIFadd{GA algorithm was run 15 times.}
The \DIFdel{best option is an identification of all necessary parameters by using} optimisation \DIFdel{methods which leads to excellent agreement between numerical and experimental dispersion curves as it is shown} \DIFadd{results are given} in \DIFdel{Fig.~\ref{fig:dispersion60deg}. It should be added that} \DIFadd{Table~\ref{tab:csv_results}.
They have} a \DIFdel{linear colour scale is applied for all experimental images.} \DIFadd{relatively small spread as evidenced by the quite low values of the standard deviation.}
\begin{table}[h!]
\caption{GA optimisation results \DIFdel{for the indirect approach} based on statistics (mean \(\mu\) and standard
deviation \(\sigma\)) of 15 GA \DIFdel{runs.} \DIFadd{runs; Units of elastic constants: [GPa].}}
\DIFdeltab{
\renewcommand{\arraystretch}{1.3}
\centering \footnotesize
\begin{tabular}{lrrr} \toprule
&\multicolumn{3}{c}{\textbf{indirect method}} \\
\cmidrule(lr){2-4}
&Best & \(\mu\) & \(\sigma\)\\
\cmidrule(lr){2-4}
\csvreader[table head=\toprule ,
late after line=\\ ]
{results_indirect_50.csv}{Row=\constantst,Tbest=\tbest,Tmean=\tmean,Tstd=\tstd}
{\constantst & \tbest & \tmean & \tstd}
\bottomrule
\end{tabular}
\label{tab:csv_indirect_results}
}
\DIFaddtab{
\renewcommand{\arraystretch}{1.3}
\centering \footnotesize
\begin{tabular}{crrr} \toprule
&Best & \(\mu\) & \(\sigma\)\\
\cmidrule{1-4}
\csvreader[table head=\toprule ,
late after line=\\ ]
{results_indirect_direct_50.csv}{Row=\constants,Cbest=\cbest,Cmean=\cmean,Cstd=\cstd,Cdbest=\cdbest,Cdmean=\cdmean,Cdstd=\cdstd}
{\constants & \cdbest & \cdmean & \cdstd}
\bottomrule
\end{tabular}
\label{tab:csv_results}
}
\end{table}
\DIFadd{
In our opinion, further reduction of standard deviation is possible by taking into account larger frequency range and in turn, more modes of propagating waves. However, the limitation here is the SLDV. Probably, higher frequency range can be considered by using newest lasers.
\clearpage
}
\section{Conclusions}
This work has demonstrated that the proposed method based on guided wave dispersion curves combined with the genetic algorithm is well suited for identification of elastic constants of woven fabric reinforced composites. \DIFdel{Two approaches have
been investigated: indirect and direct. In the indirect approach, material properties of
composite constituents are selected as optimisation variables which are next used
for calculation of elastic constants of lamina by using micromechanics and
homogenisation techniques. On the other hand, in the direct approach, the elastic
constants of the composite laminate are selected as optimisation variables. The
motivation behind the indirect approach is the fact that fewer variables are involved in
the optimisation process. However, it has been found that for parameter space
bounds \(\pm\)50\% in respect to the initial values, in spite of low value of the
objective function, the indirect method leads to ambiguous results. Much better}
\DIFadd{Satisfactory} results have been obtained by the \DIFdel{direct} \DIFadd{proposed} method.
This fact is demonstrated by the dispersion curves calculated by the SASE model which matches very well the experimental data coming from the SLDV measurements.
The advantage of the proposed methodology is one-sided non-contact measurements with contact excitation which can be conducted even directly on existing structures without the necessity of preparation of special samples.
The results from this study imply that the matrix of elastic constants of fabric reinforced composite can be estimated with a quite small spread which is connected with the nature of GA.
However, a number of potential sources of measurement errors, as well as the limitation of the devised methodology should be considered.
The most important source of errors lies in the variability of the thickness of the specimen.
There is an inherent deviation in the thickness of a composite occurred during manufacturing.
The thickness variability cannot be taken into account in the SASE model -- averaged thickness must be assumed in the model instead.
Another important issue is the temperature influence on propagating guided waves.
Since SLDV measurements can take even a few hours, temperature variation during the measurement period can affect the material properties of the investigated specimen. \DIFdel{A less important source of errors comes from the measurements of the length and width of the scanned area.}
It should be added that the accuracy of the proposed methodology depends on the frequency range and the respective number of modes which can be captured experimentally.
It should be underlined, that the devised methodology can be applied to composite laminates of higher anisotropy level. Further studies on composite laminates reinforced by unidirectional fibres are ongoing.
\appendix
\section{Derivation of stiffness and mass matrices}
Taking into account the wavevector given in Eq.~\ref{eq:wavevector}, the
displacement field can be expressed in terms of the wavenumber \(k\) and
propagation angle \(\beta\) as~\cite{Taupin2011}:
\begin{equation}
\vect{u}(x,y,z,t) = \matr{U}(x) \exp \left[ i (\omega t + k \sin (\beta) y - k \cos (\beta) z)\right].
\end{equation}
The discretisation is done only through the thickness of the plate (along \(x\) axis),
while the propagation kernel depends on the two other space variables \(y\) and \(z\).
The displacement at an arbitrary point in the plate can be written in terms of shape
functions \( \matr{N}\), nodal displacements \( \vect{d}\), and the propagation term:
\begin{equation}
\vect{u}^{(e)}(x,y,z,t) = \matr{N} \vect{d}^{(e)} \exp \left[ i (\omega t + k \sin (\beta) y - k \cos (\beta) z)\right],
\end{equation}
where
\begin{equation}
\vect{d}^{(e)} = \left[ u_{x,1} \, u_{y,1} \, u_{z,1} \ldots \, u_{x,n} \, u_{y,n} \, u_{z,n} \right]^T,
\end{equation}
and \(u_{x,j}\) denotes the nodal \(x\) displacement component at the \(j\)-th node of
element and \(n\) denotes the number of nodes per element.
The strain vector \(\bs{\varepsilon}\) for the element is given by:
\begin{equation}
\bs{\varepsilon}= \left[ \matr{B}_1 -i k_y \matr{B}_2 -i k_z \matr{B}_3 \right] \vect{d}^{(e)} \exp \left[ i (\omega t + k \sin (\beta) y - k \cos (\beta) z)\right]
\end{equation}
\begin{equation}
\matr{B}_1= \matr{L}_x \matr{N}_{,x},\; \matr{B}_2= \matr{L}_y \matr{N},\; \matr{B}_3= \matr{L}_z \matr{N}.
\end{equation}
The matrices \( \matr{L}_x \), \( \matr{L}_y \) and \( \matr{L}_z \) are defined as:
\begin{equation}
\begin{split}
& \matr{L}_x = \left[\begin{array}{ccc}
1 & 0 & 0 \\[4pt]
0&0&0\\[4pt]
0 &0&0 \\[4pt]
0&0&0\\[4pt]
0&0&1\\[4pt]
0&1&0
\end{array} \right],
\end{split} \quad
\begin{split}
& \matr{L}_y = \left[\begin{array}{ccc}
0&0&0\\[4pt]
0&1&0\\[4pt]
0 &0&0\\[4pt]
0&0&1\\[4pt]
0&0&0\\[4pt]
1&0&0
\end{array} \right],
\end{split} \quad
\begin{split}
& \matr{L}_z = \left[\begin{array}{ccc}
0&0&0\\[4pt]
0&0&0\\[4pt]
0 &0&1\\[4pt]
0&1&0\\[4pt]
1&0&0\\[4pt]
0&0&0
\end{array} \right],
\end{split}
\label{eq:selectors}\end{equation}
and \(\matr{N}\) is the matrix of shape functions:
\begin{equation}
\begin{split}
& \matr{N} = \left[\begin{array}{cccccccccc}
\varphi_1 & 0 & 0 & \varphi_2 & 0 & 0& \ldots & \varphi_n & 0 & 0\\[4pt]
0&\varphi_1&0 & 0&\varphi_2&0 & \ldots& 0&\varphi_n&0\\[4pt]
0 &0&\varphi_1 & 0 &0&\varphi_2 & \ldots& 0 &0&\varphi_n
\end{array} \right].
\end{split}
\end{equation}
The shape functions are Lagrange polynomials spanned over Gauss-Lobatto-Legendre points (see~\cite{Kudela2007} for more details).
In \DIFadd{the} practical application of the SASE \DIFdel{method} \DIFadd{method,} the order of Lagrange polynomials is usually 3-5.
The standard steps of the variational formulation and its discretization leads to the elemental stiffness and mass matrices which can be defined as:
\begin{equation}
\matr{k}_{mn}^e= \int \limits_{(e)} \matr{B}_m^{T} \matr{C}_{\theta}^e \, \matr{B}_n\, \ud x ,
\label{eq:stiffness_matrix}\end{equation}
\begin{equation}
\matr{m}^e= \int \limits_{(e)}\rho \matr{N}^{T} \, \matr{N}\, \ud x .
\end{equation}
It should be noted that it is assumed that at least one spectral element is used per layer of composite laminate.
The global matrices are obtained by standard assembly procedures:
\begin{equation}
\matr{K}_{mn}= \bigcup_{e=1}^{n_e} \matr{k}_{mn}^{e} \; \textrm{and} \; \matr{M}= \bigcup_{e=1}^{n_e} \matr{m}^{e}.
\end{equation}
\section*{Funding}
The research was funded by the Polish National Science Center under grant agreement no 2018/29/B/ST8/00045.
\section*{Declaration of interest}
The authors declare that they have no known competing financial interests or personal relationships that could have appeared to influence the work reported in this paper.
\section*{Data availability}
The raw/processed data required to reproduce these findings cannot be shared at this time as they are being used in an ongoing study.
\bibliographystyle{num_order}
\bibliography{Identification_GA_R2}
\end{document} |
If $f$ converges to $a$ and $g$ is eventually closer to $b$ than $f$ is to $a$, then $g$ converges to $b$. |
{-# OPTIONS --rewriting --confluence-check #-}
open import Agda.Builtin.Bool
open import Agda.Builtin.Equality
open import Agda.Builtin.Equality.Rewrite
module _ where
data Unit : Set where
unit : Unit
sym : {A : Set} {x y : A} → x ≡ y → y ≡ x
sym refl = refl
subst : {A : Set} (P : A → Set) {x y : A} → x ≡ y → P x → P y
subst P refl p = p
module _ (p : false ≡ true) where
block : {A : Set} → Unit → A → A
block unit x = x
r : ∀ u → block u false ≡ true
r unit = p
{-# REWRITE r #-}
r′ : ∀ u → block u false ≡ true
r′ u = refl
lazy : false ≡ true
lazy = r′ unit
T : Bool → Set
T true = Bool
T false = Bool → Bool
module _ (p : false ≡ true) where
bool : (Bool → Bool) → Bool
bool = subst T (lazy p)
fun : Bool → (Bool → Bool)
fun = subst T (sym (lazy p))
omega : Bool → Bool
omega = λ x → fun x x
loop : Bool
loop = omega (bool omega)
-- omega = λ p x → x x
-- loop = λ p → <BLACKHOLE>
|
[STATEMENT]
lemma (in encoding) divergence_respection_cond:
shows "enc_respects_divergence = (\<forall>S. \<lbrakk>S\<rbrakk> \<longmapsto>(Target)\<omega> \<longleftrightarrow> S \<longmapsto>(Source)\<omega>)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. enc_respects_divergence = (\<forall>S. \<lbrakk>S\<rbrakk> \<longmapsto>Target\<omega> = S \<longmapsto>Source\<omega>)
[PROOF STEP]
by auto |
[STATEMENT]
lemma find_handler_prealloc_pres:
assumes "preallocated h"
and fh: "find_handler P a h frs sh = (xp',h',frs',sh')"
shows "preallocated h'"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. preallocated h'
[PROOF STEP]
using assms find_handler_heap[OF fh]
[PROOF STATE]
proof (prove)
using this:
preallocated h
find_handler P a h frs sh = (xp', h', frs', sh')
h' = h
goal (1 subgoal):
1. preallocated h'
[PROOF STEP]
by simp |
{-# LANGUAGE DataKinds #-}
{-# LANGUAGE FlexibleContexts #-}
{-# LANGUAGE MultiParamTypeClasses #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE PartialTypeSignatures #-}
{-# LANGUAGE ScopedTypeVariables #-}
{-# LANGUAGE TemplateHaskell #-}
module Lib where
import Control.Monad.Except (MonadError,
runExceptT,
throwError)
import Control.Monad.IO.Class (MonadIO, liftIO)
import Data.ByteString (ByteString)
import Data.FileEmbed (embedFile)
import Data.Monoid ((<>))
import Data.Scientific (Scientific,
toRealFloat)
import Debug.Trace
import GHC.TypeLits ()
import Graphics.Plot (mplot)
import Graphics.Rendering.Chart.Backend.Diagrams (toFile)
import Graphics.Rendering.Chart.Easy (def, layout_title,
line, plot, points,
re, (.=))
import Numeric.GSL.Minimization (MinimizeMethod (NMSimplex),
MinimizeMethodD (SteepestDescent, VectorBFGS2),
minimizeD,
minimizeV,
minimizeVD)
import Numeric.LinearAlgebra (Container, accum,
matFunc, tr, ( #> ),
(<.>))
import qualified Numeric.LinearAlgebra as Matrix
import Numeric.LinearAlgebra.Data (Matrix, Vector,
asColumn, asRow,
cols, cond,
dropColumns,
dropRows, rows,
takeColumns,
takeRows, (><),
(|||))
import qualified Numeric.LinearAlgebra.Data as MData
import Numeric.LinearAlgebra.Static (L, dim, matrix, mul)
import qualified Numeric.LinearAlgebra.Static as Static
import Text.Megaparsec (Dec, ParseError,
Token, char,
newline, parse,
sepBy)
import Text.Megaparsec.ByteString (Parser)
import Text.Megaparsec.Lexer (number)
data Error =
ParseFailed (ParseError (Token ByteString) Dec)
deriving (Show, Eq)
------------------------------------------------------------
-- Parsing.
rawDataParser :: Parser [[Scientific]]
rawDataParser =
filter (not . null) <$> (number `sepBy` char ',') `sepBy` newline
toMatrix :: [[Scientific]] -> Matrix Double
toMatrix = Matrix.fromLists . fmap (fmap toRealFloat)
wineData :: ByteString
wineData = $(embedFile "data/wine.data")
wineParser :: Parser (Matrix Double)
wineParser = toMatrix <$> rawDataParser
loadData
:: (MonadError Error m, MonadIO m)
=> m (Matrix Double)
loadData = either (throwError . ParseFailed) pure (parse wineParser "" wineData)
------------------------------------------------------------
-- Matrices
a :: L 3 2
a = matrix [1 .. 6]
b :: L 2 1
b = matrix [1 .. 2]
c :: L 3 1
c = mul a b
------------------------------------------------------------
-- Linear Regression
initialTheta :: Int -> Vector Double
initialTheta n = MData.vector $ replicate n 0
------------------------------------------------------------
-- Charts
signal :: [Double] -> [(Double, Double)]
signal xs =
[(x, (sin (x * 3.14159 / 45) + 1) / 2 * sin (x * 3.14159 / 5)) | x <- xs]
writeChart :: IO ()
writeChart =
toFile def "example.svg" $ do
layout_title .= "Amplitude Modulation"
plot (line "am" [signal [0,0.5 .. 800]])
plot (points "am points" (signal [0,7 .. 800]))
------------------------------------------------------------
-- Main
main :: IO ()
main = do
result <-
runExceptT $ do
dataset <- loadData
process dataset
print result
data Result = Result
{ crossValidationSet :: Matrix Double
, crossValidationResult :: Vector Double
, finalTheta :: Vector Double
, finalHyp :: Vector Double
, finalCost :: Double
} deriving (Show)
process
:: (MonadIO m, MonadError Error m)
=> Matrix Double -> m Result
process dataSet = do
let is n v = cond v n 0 1 0
let target = 1
let scale :: Matrix Double =
accum
(MData.ident 13)
(*)
[((0, 0), 0.1), ((3, 3), 0.1), ((4, 4), 0.01), ((12, 12), 0.001)]
let m = rows dataSet
let f = cols dataSet
let y = is target $ MData.flatten $ takeColumns 1 dataSet
let x = Matrix.col (replicate m 1.0) ||| ((dropColumns 1 dataSet) <> scale)
let cvSize = 10
let trainingX = dropRows cvSize x
let cvX = takeRows cvSize x
let trainingY = MData.fromList $ drop cvSize $ MData.toList y
let cvY = MData.fromList $ take cvSize $ MData.toList y
let theta = initialTheta f
let (finalTheta, path) =
minimizeVD
SteepestDescent -- VectorBFGS2
10e-6
50
10e-6
0.1
(costFn x y)
(gradFn x y)
theta
pure $
Result
cvX
cvY
finalTheta
(hypothesis cvX finalTheta)
(costFn cvX cvY finalTheta)
sigmoid
:: Floating a
=> a -> a
sigmoid term = 1.0 / (1.0 + exp (-term))
hypothesis :: Matrix Double -> Vector Double -> Vector Double
hypothesis x theta = sigmoid (x #> theta)
costFn :: Matrix Double -> Vector Double -> Vector Double -> Double
costFn x y theta = traceShowId $ all
where
all :: Double
all = vsum (leftTerm - rightTerm) / fromIntegral (rows x)
h = hypothesis x theta
leftTerm = (-y) * log h
rightTerm = (1.0 - y) * log (1.0 - h)
gradFn :: Matrix Double -> Vector Double -> Vector Double -> Vector Double
gradFn x y theta = (tr x #> (hypothesis x theta - y)) / fromIntegral (rows x)
vsum :: Vector Double -> Double
vsum = sum . MData.toList
|
%!TEX root = ../main.tex
\section{Summary and discussion}
|
# get a small mesh
domain = [0 2.0 0 2.2 0 1.2]
n = [12;8; 7]
M = getRegularMesh(domain,n)
# sources / receivers on top edge
idx = reshape(collect(1:prod(n.+1)),tuple(n.+1...))
ib = idx[:,1,:];
n_nodes = prod(n.+1);
Q = SparseMatrixCSC(1.0I, n_nodes, n_nodes)
Q = Q[:,vec(ib)]
R = copy(Q)
# get param without parallelizaion
pFor = getEikonalInvParam(M,Q,R,true)
m0 = 0.2*rand(Float64,tuple(n.+1...)) .+ 1.0
dho, = getData(vec(m0),pFor)
pFor.HO = false;
dlo, = getData(vec(m0),pFor)
@test norm(dho-dlo)/norm(dho) < 0.05
# parallelize over sources
pForp,continuationDivision,SourcesSubInd = getEikonalInvParam(M,Q,R,true,nworkers())
dphor, = getData(vec(m0),pForp)
dpho = zeros(size(dho))
for k=1:length(dphor)
dpho[:,SourcesSubInd[k]] = fetch(dphor[k])
end
pForp,continuationDivision,SourcesSubInd = getEikonalInvParam(M,Q,R,false,nworkers())
dplor, = getData(vec(m0),pForp)
dplo = zeros(size(dho))
for k=1:length(dplor)
dplo[:,SourcesSubInd[k]] = fetch(dplor[k])
end
@test norm(dpho-dho)/norm(dho) < 1e-12
@test norm(dplo-dlo)/norm(dlo) < 1e-12
|
/-
Copyright (c) 2019 Scott Morrison. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Scott Morrison
-/
import category_theory.sums.basic
/-!
# Associator for binary disjoint union of categories.
The associator functor `((C ⊕ D) ⊕ E) ⥤ (C ⊕ (D ⊕ E))` and its inverse form an equivalence.
-/
universes v u
open category_theory
open sum
namespace category_theory.sum
variables (C : Type u) [category.{v} C]
(D : Type u) [category.{v} D]
(E : Type u) [category.{v} E]
/--
The associator functor `(C ⊕ D) ⊕ E ⥤ C ⊕ (D ⊕ E)` for sums of categories.
-/
def associator : (C ⊕ D) ⊕ E ⥤ C ⊕ (D ⊕ E) :=
{ obj := λ X, match X with
| inl (inl X) := inl X
| inl (inr X) := inr (inl X)
| inr X := inr (inr X)
end,
map := λ X Y f, match X, Y, f with
| inl (inl X), inl (inl Y), f := f
| inl (inr X), inl (inr Y), f := f
| inr X, inr Y, f := f
end }
@[simp] lemma associator_obj_inl_inl (X) : (associator C D E).obj (inl (inl X)) = inl X := rfl
@[simp] lemma associator_obj_inl_inr (X) : (associator C D E).obj (inl (inr X)) = inr (inl X) := rfl
@[simp] lemma associator_obj_inr (X) : (associator C D E).obj (inr X) = inr (inr X) := rfl
@[simp] lemma associator_map_inl_inl {X Y : C} (f : inl (inl X) ⟶ inl (inl Y)) :
(associator C D E).map f = f := rfl
@[simp] lemma associator_map_inl_inr {X Y : D} (f : inl (inr X) ⟶ inl (inr Y)) :
(associator C D E).map f = f := rfl
@[simp] lemma associator_map_inr {X Y : E} (f : inr X ⟶ inr Y) :
(associator C D E).map f = f := rfl
/--
The inverse associator functor `C ⊕ (D ⊕ E) ⥤ (C ⊕ D) ⊕ E` for sums of categories.
-/
def inverse_associator : C ⊕ (D ⊕ E) ⥤ (C ⊕ D) ⊕ E :=
{ obj := λ X, match X with
| inl X := inl (inl X)
| inr (inl X) := inl (inr X)
| inr (inr X) := inr X
end,
map := λ X Y f, match X, Y, f with
| inl X, inl Y, f := f
| inr (inl X), inr (inl Y), f := f
| inr (inr X), inr (inr Y), f := f
end }
@[simp] lemma inverse_associator_obj_inl (X) :
(inverse_associator C D E).obj (inl X) = inl (inl X) := rfl
@[simp] lemma inverse_associator_obj_inr_inl (X) :
(inverse_associator C D E).obj (inr (inl X)) = inl (inr X) := rfl
@[simp] lemma inverse_associator_obj_inr_inr (X) :
(inverse_associator C D E).obj (inr (inr X)) = inr X := rfl
@[simp]
/--
The equivalence of categories expressing associativity of sums of categories.
-/
def associativity : (C ⊕ D) ⊕ E ≌ C ⊕ (D ⊕ E) :=
equivalence.mk (associator C D E) (inverse_associator C D E)
(nat_iso.of_components (λ X, eq_to_iso (by tidy)) (by tidy))
(nat_iso.of_components (λ X, eq_to_iso (by tidy)) (by tidy))
instance associator_is_equivalence : is_equivalence (associator C D E) :=
(by apply_instance : is_equivalence (associativity C D E).functor)
instance inverse_associator_is_equivalence : is_equivalence (inverse_associator C D E) :=
(by apply_instance : is_equivalence (associativity C D E).inverse)
-- TODO unitors?
-- TODO pentagon natural transformation? ...satisfying?
end category_theory.sum
|
Formal statement is: lemma sgn_eq: "sgn z = z / complex_of_real (cmod z)" Informal statement is: The signum function of a complex number is equal to the complex number divided by its modulus. |
Formal statement is: lemma joinable_connected_component_eq: "\<lbrakk>connected T; T \<subseteq> S; connected_component_set S x \<inter> T \<noteq> {}; connected_component_set S y \<inter> T \<noteq> {}\<rbrakk> \<Longrightarrow> connected_component_set S x = connected_component_set S y" Informal statement is: If $T$ is a connected subset of a topological space $S$, and $x$ and $y$ are points of $T$ that belong to the same connected component of $S$, then $x$ and $y$ belong to the same connected component of $T$. |
open import FRP.JS.List using ( List ; [] ; _∷_ ; build ) renaming ( length to llength )
open import FRP.JS.Char using ( Char ) renaming ( _<_ to _<C_ ; _≟_ to _≟C_ )
open import FRP.JS.Nat using ( ℕ )
open import FRP.JS.Bool using ( Bool ; true ; false ; _∧_ ; _∨_ )
module FRP.JS.String where
infixr 5 _++_
infix 4 _≟_
open import FRP.JS.Primitive public using ( String )
private
primitive
primStringAppend : String → String → String
primStringEquality : String → String → Bool
primStringToList : String → List Char
_++_ : String → String → String
_++_ = primStringAppend
{-# COMPILED_JS _++_ function(x) { return function(y) { return x + y; }; } #-}
_≟_ : String → String → Bool
_≟_ = primStringEquality
{-# COMPILED_JS _≟_ function(x) { return function(y) { return x === y; }; } #-}
buildChars : (ℕ → Char) → ℕ → List Char
buildChars = build
toList : String → List Char
toList = primStringToList
{-# COMPILED_JS toList function(s) {
return exports.buildChars(function(n) { return s.charAt(n); },s.length);
} #-}
length : String → ℕ
length s = llength (toList s)
{-# COMPILED_JS length function(s) { return s.length; } #-}
_<*_ : List Char → List Char → Bool
as <* [] = false
[] <* (b ∷ bs) = true
(a ∷ as) <* (b ∷ bs) = (a <C b) ∨ ((a ≟C b) ∧ (as <* bs))
_<_ : String → String → Bool
s < t = toList s <* toList t
{-# COMPILED_JS _<_ function(x) { return function(y) { return x < y; }; } #-}
_≤_ : String → String → Bool
s ≤ t = (s ≟ t) ∨ (s < t)
{-# COMPILED_JS _≤_ function(x) { return function(y) { return x <= y; }; } #-}
|
State Before: α : Type u_1
β : Type ?u.15405
γ : Type ?u.15408
r : Rel α β
s : Set α
⊢ image Eq s = s State After: case h
α : Type u_1
β : Type ?u.15405
γ : Type ?u.15408
r : Rel α β
s : Set α
x : α
⊢ x ∈ image Eq s ↔ x ∈ s Tactic: ext x State Before: case h
α : Type u_1
β : Type ?u.15405
γ : Type ?u.15408
r : Rel α β
s : Set α
x : α
⊢ x ∈ image Eq s ↔ x ∈ s State After: no goals Tactic: simp [mem_image] |
module Render (cfileSink) where
import Control.Monad
import Data.ByteString.Builder
import Data.Complex
import IQ
import Pipes
import qualified Pipes.Prelude as P
import System.IO
foldChunk :: (Monad m, Monoid a) => Int -> Pipe a a m ()
foldChunk n = forever $ go n mempty
where go 0 x = yield x
go n x = do
x' <- await
go (n - 1) $ x <> x'
renderSample :: IQ -> Builder
renderSample (real :+ imag) = floatLE real <> floatLE imag
cfileSink :: MonadIO m => Handle -> Consumer IQ m ()
cfileSink h = P.map renderSample
>-> foldChunk 100
>-> P.mapM_ (liftIO . hPutBuilder h)
{-# INLINE cfileSink #-}
|
State Before: α : Type u
β : Type v
γ : Type w
δ : Type x
inst✝ : OrderedAddCommGroup β
f g : α → β
a : α
s : Set α
l : Filter α
hf : IsMinOn f s a
hg : IsMaxOn g s a
⊢ IsMinOn (fun x => f x - g x) s a State After: no goals Tactic: simpa only [sub_eq_add_neg] using hf.add hg.neg |
using SeisConvert
using Test
@testset "SeisConvert.jl" begin
# Write your own tests here.
end
|
(* Title: HOL/Auth/n_mesi_lemma_on_inv__3.thy
Author: Yongjian Li and Kaiqiang Duan, State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
Copyright 2016 State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
*)
header{*The n_mesi Protocol Case Study*}
theory n_mesi_lemma_on_inv__3 imports n_mesi_base
begin
section{*All lemmas on causal relation between inv__3 and some rule r*}
lemma n_t1Vsinv__3:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_t1 i)" and
a2: "(\<exists> p__Inv0 p__Inv2. p__Inv0\<le>N\<and>p__Inv2\<le>N\<and>p__Inv0~=p__Inv2\<and>f=inv__3 p__Inv0 p__Inv2)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_t1 i" apply fastforce done
from a2 obtain p__Inv0 p__Inv2 where a2:"p__Inv0\<le>N\<and>p__Inv2\<le>N\<and>p__Inv0~=p__Inv2\<and>f=inv__3 p__Inv0 p__Inv2" apply fastforce done
have "(i=p__Inv2)\<or>(i=p__Inv0)\<or>(i~=p__Inv0\<and>i~=p__Inv2)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv2)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i=p__Inv0)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv0\<and>i~=p__Inv2)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_t2Vsinv__3:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_t2 N i)" and
a2: "(\<exists> p__Inv0 p__Inv2. p__Inv0\<le>N\<and>p__Inv2\<le>N\<and>p__Inv0~=p__Inv2\<and>f=inv__3 p__Inv0 p__Inv2)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_t2 N i" apply fastforce done
from a2 obtain p__Inv0 p__Inv2 where a2:"p__Inv0\<le>N\<and>p__Inv2\<le>N\<and>p__Inv0~=p__Inv2\<and>f=inv__3 p__Inv0 p__Inv2" apply fastforce done
have "(i=p__Inv2)\<or>(i=p__Inv0)\<or>(i~=p__Inv0\<and>i~=p__Inv2)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv2)"
have "((formEval (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const E)) s))\<or>((formEval (andForm (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const M)) (neg (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const E)))) s))\<or>((formEval (andForm (andForm (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const I)) (neg (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const M)))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const E)))) s))\<or>((formEval (andForm (andForm (neg (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const I))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const M)))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const E)))) s))" by auto
moreover {
assume c1: "((formEval (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const E)) s))"
have "?P1 s"
proof(cut_tac a1 a2 b1 c1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume c1: "((formEval (andForm (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const M)) (neg (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const E)))) s))"
have "?P1 s"
proof(cut_tac a1 a2 b1 c1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume c1: "((formEval (andForm (andForm (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const I)) (neg (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const M)))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const E)))) s))"
have "?P1 s"
proof(cut_tac a1 a2 b1 c1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume c1: "((formEval (andForm (andForm (neg (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const I))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const M)))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const E)))) s))"
have "?P1 s"
proof(cut_tac a1 a2 b1 c1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately have "invHoldForRule s f r (invariants N)" by satx
}
moreover {
assume b1: "(i=p__Inv0)"
have "((formEval (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const E)) s))\<or>((formEval (andForm (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const M)) (neg (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const E)))) s))\<or>((formEval (andForm (andForm (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const I)) (neg (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const M)))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const E)))) s))\<or>((formEval (andForm (andForm (neg (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const I))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const M)))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const E)))) s))" by auto
moreover {
assume c1: "((formEval (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const E)) s))"
have "?P1 s"
proof(cut_tac a1 a2 b1 c1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume c1: "((formEval (andForm (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const M)) (neg (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const E)))) s))"
have "?P1 s"
proof(cut_tac a1 a2 b1 c1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume c1: "((formEval (andForm (andForm (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const I)) (neg (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const M)))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const E)))) s))"
have "?P1 s"
proof(cut_tac a1 a2 b1 c1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume c1: "((formEval (andForm (andForm (neg (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const I))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const M)))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const E)))) s))"
have "?P1 s"
proof(cut_tac a1 a2 b1 c1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately have "invHoldForRule s f r (invariants N)" by satx
}
moreover {
assume b1: "(i~=p__Inv0\<and>i~=p__Inv2)"
have "((formEval (andForm (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const E)) (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const E))) s))\<or>((formEval (andForm (andForm (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const M)) (neg (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const E)))) (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const E))) s))\<or>((formEval (andForm (andForm (andForm (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const I)) (neg (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const M)))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const E)))) (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const E))) s))\<or>((formEval (andForm (andForm (andForm (neg (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const I))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const M)))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const E)))) (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const E))) s))\<or>((formEval (andForm (andForm (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const E)) (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const M))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const E)))) s))\<or>((formEval (andForm (andForm (andForm (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const M)) (neg (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const E)))) (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const M))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const E)))) s))\<or>((formEval (andForm (andForm (andForm (andForm (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const I)) (neg (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const M)))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const E)))) (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const M))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const E)))) s))\<or>((formEval (andForm (andForm (andForm (andForm (neg (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const I))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const M)))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const E)))) (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const M))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const E)))) s))\<or>((formEval (andForm (andForm (andForm (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const E)) (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const I))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const M)))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const E)))) s))\<or>((formEval (andForm (andForm (andForm (andForm (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const M)) (neg (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const E)))) (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const I))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const M)))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const E)))) s))\<or>((formEval (andForm (andForm (andForm (andForm (andForm (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const I)) (neg (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const M)))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const E)))) (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const I))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const M)))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const E)))) s))\<or>((formEval (andForm (andForm (andForm (andForm (andForm (neg (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const I))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const M)))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const E)))) (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const I))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const M)))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const E)))) s))\<or>((formEval (andForm (andForm (andForm (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const E)) (neg (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const I)))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const M)))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const E)))) s))\<or>((formEval (andForm (andForm (andForm (andForm (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const M)) (neg (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const E)))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const I)))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const M)))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const E)))) s))\<or>((formEval (andForm (andForm (andForm (andForm (andForm (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const I)) (neg (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const M)))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const E)))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const I)))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const M)))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const E)))) s))\<or>((formEval (andForm (andForm (andForm (andForm (andForm (neg (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const I))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const M)))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const E)))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const I)))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const M)))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const E)))) s))" by auto
moreover {
assume c1: "((formEval (andForm (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const E)) (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const E))) s))"
have "?P1 s"
proof(cut_tac a1 a2 b1 c1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume c1: "((formEval (andForm (andForm (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const M)) (neg (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const E)))) (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const E))) s))"
have "?P1 s"
proof(cut_tac a1 a2 b1 c1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume c1: "((formEval (andForm (andForm (andForm (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const I)) (neg (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const M)))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const E)))) (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const E))) s))"
have "?P1 s"
proof(cut_tac a1 a2 b1 c1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume c1: "((formEval (andForm (andForm (andForm (neg (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const I))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const M)))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const E)))) (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const E))) s))"
have "?P1 s"
proof(cut_tac a1 a2 b1 c1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume c1: "((formEval (andForm (andForm (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const E)) (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const M))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const E)))) s))"
have "?P1 s"
proof(cut_tac a1 a2 b1 c1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume c1: "((formEval (andForm (andForm (andForm (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const M)) (neg (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const E)))) (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const M))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const E)))) s))"
have "?P1 s"
proof(cut_tac a1 a2 b1 c1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume c1: "((formEval (andForm (andForm (andForm (andForm (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const I)) (neg (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const M)))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const E)))) (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const M))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const E)))) s))"
have "?P1 s"
proof(cut_tac a1 a2 b1 c1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume c1: "((formEval (andForm (andForm (andForm (andForm (neg (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const I))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const M)))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const E)))) (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const M))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const E)))) s))"
have "?P1 s"
proof(cut_tac a1 a2 b1 c1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume c1: "((formEval (andForm (andForm (andForm (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const E)) (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const I))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const M)))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const E)))) s))"
have "?P1 s"
proof(cut_tac a1 a2 b1 c1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume c1: "((formEval (andForm (andForm (andForm (andForm (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const M)) (neg (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const E)))) (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const I))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const M)))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const E)))) s))"
have "?P1 s"
proof(cut_tac a1 a2 b1 c1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume c1: "((formEval (andForm (andForm (andForm (andForm (andForm (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const I)) (neg (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const M)))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const E)))) (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const I))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const M)))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const E)))) s))"
have "?P1 s"
proof(cut_tac a1 a2 b1 c1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume c1: "((formEval (andForm (andForm (andForm (andForm (andForm (neg (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const I))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const M)))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const E)))) (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const I))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const M)))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const E)))) s))"
have "?P1 s"
proof(cut_tac a1 a2 b1 c1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume c1: "((formEval (andForm (andForm (andForm (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const E)) (neg (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const I)))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const M)))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const E)))) s))"
have "?P1 s"
proof(cut_tac a1 a2 b1 c1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume c1: "((formEval (andForm (andForm (andForm (andForm (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const M)) (neg (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const E)))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const I)))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const M)))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const E)))) s))"
have "?P1 s"
proof(cut_tac a1 a2 b1 c1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume c1: "((formEval (andForm (andForm (andForm (andForm (andForm (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const I)) (neg (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const M)))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const E)))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const I)))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const M)))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const E)))) s))"
have "?P1 s"
proof(cut_tac a1 a2 b1 c1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume c1: "((formEval (andForm (andForm (andForm (andForm (andForm (neg (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const I))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const M)))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv2)) (Const E)))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const I)))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const M)))) (neg (eqn (IVar (Para (Ident ''state'') p__Inv0)) (Const E)))) s))"
have "?P1 s"
proof(cut_tac a1 a2 b1 c1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately have "invHoldForRule s f r (invariants N)" by satx
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_t3Vsinv__3:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_t3 N i)" and
a2: "(\<exists> p__Inv0 p__Inv2. p__Inv0\<le>N\<and>p__Inv2\<le>N\<and>p__Inv0~=p__Inv2\<and>f=inv__3 p__Inv0 p__Inv2)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_t3 N i" apply fastforce done
from a2 obtain p__Inv0 p__Inv2 where a2:"p__Inv0\<le>N\<and>p__Inv2\<le>N\<and>p__Inv0~=p__Inv2\<and>f=inv__3 p__Inv0 p__Inv2" apply fastforce done
have "(i=p__Inv2)\<or>(i=p__Inv0)\<or>(i~=p__Inv0\<and>i~=p__Inv2)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv2)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i=p__Inv0)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv0\<and>i~=p__Inv2)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_t4Vsinv__3:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_t4 N i)" and
a2: "(\<exists> p__Inv0 p__Inv2. p__Inv0\<le>N\<and>p__Inv2\<le>N\<and>p__Inv0~=p__Inv2\<and>f=inv__3 p__Inv0 p__Inv2)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_t4 N i" apply fastforce done
from a2 obtain p__Inv0 p__Inv2 where a2:"p__Inv0\<le>N\<and>p__Inv2\<le>N\<and>p__Inv0~=p__Inv2\<and>f=inv__3 p__Inv0 p__Inv2" apply fastforce done
have "(i=p__Inv2)\<or>(i=p__Inv0)\<or>(i~=p__Inv0\<and>i~=p__Inv2)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv2)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i=p__Inv0)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv0\<and>i~=p__Inv2)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
end
|
Mason Thomas provides legal services to accident victims and employees. They also are versed in horses equine law.
Mason Thomas has served clients in complex lawsuits and routine claims against drivers, big businesses, insurance companies, and employers. Their personal injury lawyers have handled nearly 200 jury trials involving car accidents, truck accidents, wrongful death, and products liability. They concentrate on serious personal injury claims and employment law violations, providing a strong voice for people who have suffered injury and damages due to negligence, workplace harassment and discrimination. They handle a wide range of employment law disputes and serve California seniors in elder law matters involving nursing home injuries and financial abuse.
Their attorneys are happy to answer any questions you may have about your options after a motor vehicle accident, workplace accident, slipandfall accident, or violation of your employee rights. They offer free case evaluations for accident victims and California employees, and can be contacted by calling or by confidential email using the form provided.
Mason Thomas offers personal service to clients involved in state and federal litigation in the Sacramento area and throughout California. Their employment law and personal injury lawyers maintain offices in Davis, although are also willing to meet with clients at home or in the hospital if necessary.
wiki:calbar:36454 Stephen A. Mason CalBar Profile Admitted to the bar on 1965112
wiki:calbar:78946 Bradley S. Thomas CalBar Profile Admitted to the bar on 19771228
wiki:calbar:248553 John C. Bridges CalBar Profile Admitted to the bar on 2007310
wiki:calbar:261219 Kelly L. Thomas CalBar Profile Admitted to the bar on 20081212
Up the creek without a paddle? Check out the Legal Services page.
|
module Meriv.Core.ExternRelation
|
lemma norm_le_infnorm: fixes x :: "'a::euclidean_space" shows "norm x \<le> sqrt DIM('a) * infnorm x" |
Formal statement is: lemma sup_measure_F_mono: "finite I \<Longrightarrow> J \<subseteq> I \<Longrightarrow> sup_measure.F id J \<le> sup_measure.F id I" Informal statement is: If $I$ is a finite set and $J$ is a subset of $I$, then the measure of $J$ is less than or equal to the measure of $I$. |
function aggregate(obj::ThirdMoment, arg0::SecondMoment)
return jcall(obj, "aggregate", void, (SecondMoment,), arg0)
end
function clear(obj::ThirdMoment)
return jcall(obj, "clear", void, ())
end
function copy(obj::ThirdMoment)
return jcall(obj, "copy", ThirdMoment, ())
end
function get_result(obj::ThirdMoment)
return jcall(obj, "getResult", jdouble, ())
end
function increment(obj::ThirdMoment, arg0::jdouble)
return jcall(obj, "increment", void, (jdouble,), arg0)
end
|
[STATEMENT]
lemma coprime_exp_mod: "coprime lu p \<Longrightarrow> n \<noteq> 0 \<Longrightarrow> lu mod p ^ n \<noteq> 0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>comm_monoid_mult_class.coprime lu p; n \<noteq> 0\<rbrakk> \<Longrightarrow> lu mod p ^ n \<noteq> 0
[PROOF STEP]
using prime
[PROOF STATE]
proof (prove)
using this:
prime p
goal (1 subgoal):
1. \<lbrakk>comm_monoid_mult_class.coprime lu p; n \<noteq> 0\<rbrakk> \<Longrightarrow> lu mod p ^ n \<noteq> 0
[PROOF STEP]
by fastforce |
%!TEX root = ../thesis.tex
% ******************************* Thesis Appendix B ********************************
\chapter{User Evaluation Interview Transcripts}
Below you will find the sample questions asked for each group, detailed descriptions and relevant transcript
snippets for each problem statements.
Any specifics regarding course, staff and event details have been anonymised.
Here is the participant characterisations table again.
\begin{table}[!h]
% \caption{Participants in user evaluation interviews}
\centering
% \label{table:participants-eval}
\begin{tabularx}{\textwidth}{>{\bfseries}lX}
Participant & Characterisation \\
\toprule
Educator A & lecturer in higher education for over 20 years, and an experienced higher education
administrator \\\midrule
Educator F & lecturer in higher education for over 10 years \\\midrule
Student C & a university course representative for 3 years, which involves collecting and
communicating student feedback and attending staff-student liaison meetings \\\midrule
Student E & a university course representative for 2 years and a peer assisted learning leader
for 1 year \\\bottomrule
\end{tabularx}
\end{table}
\section{Structured Questions}
These questions are structured as statements, which request for a response first on a Likert-type scale of 1 to 5:
\begin{enumerate}
\setlength\itemsep{0em}
\item Strongly disagree
\item Disagree
\item Neither agree nor disagree
\item Agree
\item Strongly agree
\end{enumerate}
The participant then explains why and how they've come to this opinion, with reference to the demonstrator system.
\newpage
\textbf{Q1. The features of the system communicate assessment expectations very well.}\\
\begin{table}[!ht]
% \caption{Participants in user evaluation interviews}
\centering
% \label{table:participants-eval}
\begin{tabular}{|c|c|c|c|c|}
\hline
Participant & A & F & C & E \\
\hline
Scale Given & \cellcolor{Dandelion}3 & \cellcolor{green}5 & \cellcolor{SpringGreen}4 & \cellcolor{SpringGreen}4 \\
\hline
\end{tabular}
\end{table}
\textit{Educator A}: "It was clear what the assessment types were... I don't think (the demonstration)
focused on the details of what the assessments required... but in terms of the features of the system,
yes it does have the potential to communicate (the expectations) well. If (the assessment brief) is
poorly written then it does not, if it is well written than it does, that is not down to the function of the system."
\textit{Educator F}: "I think this is more than we currently provide to our students... it would be nice if
(the list of knowledge required) has direct links to particular lectures or materials."
\textit{Student E}: "When it shows the objectives... before they upload the submission...
so they can't say they've forgotten about it. But it is a bit too much in your face, it would stress me out."\\
\textbf{Q2. The features of the system improve transparency in assessment procedures.}\\
\begin{table}[!ht]
% \caption{Participants in user evaluation interviews}
\centering
% \label{table:participants-eval}
\begin{tabularx}{0.325\textwidth}{|c|c|c|c|c|}
\hline
Participant & A & F & C & E \\
\hline
Scale Given & \cellcolor{Dandelion}3 & \cellcolor{green}5 & \cellcolor{green}5 & \cellcolor{green}5 \\
\hline
\end{tabularx}
\end{table}
\textit{Educator A}: "Current systems can do it very badly or very well. It makes it more transparent than
the systems that don't provide that level of transparency already now. Some of them already do. For example,
you can build the criteria-based marking scheme in your system into other systems. If you force people to do that
in a consistent way, yes it does."
\textit{Educator F}: "It absolutely does...
especially when a student can see what the assessor has picked (for each criteria)."
\textit{Student E}: "I think the marking criteria, and seeing what we got from the marker is really useful
because we don't have that currently. I think that is really good."\\
\textbf{Q3. The features of the system make curriculum personalisation convenient.}\\
\begin{table}[!ht]
% \caption{Participants in user evaluation interviews}
\centering
% \label{table:participants-eval}
\begin{tabularx}{0.325\textwidth}{|c|c|c|c|c|}
\hline
Participant & A & F & C & E \\
\hline
Scale Given & \cellcolor{green}5 & \cellcolor{green}5 & \cellcolor{green}5 & \cellcolor{SpringGreen}4 \\
\hline
\end{tabularx}
\end{table}
\textit{Educator A}: "Yes it does if what you are doing is collecting blocks. It was presented in a way that was
easy to see what it was."
\textit{Student E}: "(I like that) only your chosen modules are shown (in the Ongoing Modules page). At the moment, our system is very crowded."\\
\textbf{Q4. The features of the system provide good (administrative/ pastoral) support for curriculum personalisation.}\\
\begin{table}[!ht]
% \caption{Participants in user evaluation interviews}
\centering
% \label{table:participants-eval}
\begin{tabularx}{0.325\textwidth}{|c|c|c|c|c|}
\hline
Participant & A & F & C & E \\
\hline
Scale Given & \cellcolor{green}5 & \cellcolor{green}5 & \cellcolor{SpringGreen}4 & \cellcolor{Dandelion}3 \\
\hline
\end{tabularx}
\end{table}
\textit{Educator A}: "I think the interesting thing is the ability to chat with advisors about them.
There is also the potential for the chats to be stored.
I think having that (chat) record that can be revisited is a good thing as long as (it is kept private)."
\textit{Educator F}: "I like the fact that you can pick your modules while asking your tutor or support staff for more advice.
It could be administrative issues as well to the support staff. When I see (my students) choose their modules they sometimes
choose it blindly because they don't really have an idea what they want, so this could provide a lot more information and support.
It is actually good for the organisation as well... having a more accurate headcount as the students are more confident about their choices."
\textit{Student C}: "It's not a lot of admin work and it's a set of straightforward work, but you will need a few more people
ready to support."
\textit{Student E}: "I think it should be your decision what modules you want to do. The titles and programme outcomes...
the system should just decide automatically based on the modules you chose. I don't want to give my supervisor the power
to approve my decisions when I am paying (tuition)."\\
\textbf{Q5. The system can reduce tension and disagreements between educators and students.}\\
\begin{table}[!ht]
% \caption{Participants in user evaluation interviews}
\centering
% \label{table:participants-eval}
\begin{tabularx}{0.325\textwidth}{|c|c|c|c|c|}
\hline
Participant & A & F & C & E \\
\hline
Scale Given & \cellcolor{SpringGreen}4 & \cellcolor{Dandelion}3 & \cellcolor{Dandelion}3 & \cellcolor{SpringGreen}4 \\
\hline
\end{tabularx}
\end{table}
\textit{Educator A}: "I think these things are best measured in practice. I think it would because of the focus on
criteria-based statements... it doesn't stop people from arguing with the criteria, (students) are not really interested
in criteria they are interested in grades. So it wouldn't get rid of disagreements and tension, but it would reduce it
and focus the discussion on the criteria."
\textit{Educator F}: "(The features) increase the provenance and transparency in how we grade and mark,
that's a very good thing and that's what blockchain is all about, but I am not sure if this reduces the tension.
I think it will most likely help people resolve disagreements, not to reduce them to begin with."
\textit{Student C}: "If there is a disagreement, the student cannot report back in the system. The student should be
able to start a conversation and argue their case... that should be on the blockchain."
\textit{Student E}: "Somewhat, having the marking criteria and objectives on the page of submission means that they are the most
recent version when you submit. The feedback on marking scheme also makes more sense when you read it.
But it (could) stress students out more."\\
\textbf{Q6. The features of the system make educational history more transparent and trustworthy.}\\
\begin{table}[!ht]
% \caption{Participants in user evaluation interviews}
\centering
% \label{table:participants-eval}
\begin{tabularx}{0.325\textwidth}{|c|c|c|c|c|}
\hline
Participant & A & F & C & E \\
\hline
Scale Given & \cellcolor{green}5 & \cellcolor{green}5 & \cellcolor{Dandelion}3 & \cellcolor{green}5 \\
\hline
\end{tabularx}
\end{table}
\textit{Educator A}: "Yes it does because you've got immutable records."
\textit{Student C}: "If your conversations go wrong, you lose trust. Sometimes when people speak face to face
it is a different interaction than what they do on a computer."
\textit{Student E}: "It is very transparent. It is black and white. If you are a recruiter, you'd know it is official
and not made up."\\
\textbf{Q7. The access control features of the system preserve student privacy.}\\
\begin{table}[!ht]
% \caption{Participants in user evaluation interviews}
\centering
% \label{table:participants-eval}
\begin{tabularx}{0.325\textwidth}{|c|c|c|c|c|}
\hline
Participant & A & F & C & E \\
\hline
Scale Given & \cellcolor{SpringGreen}4 & \cellcolor{green}5 & \cellcolor{green}5 & \cellcolor{green}5 \\
\hline
\end{tabularx}
\end{table}
\textit{Educator A}: "That depends on the security of the blockchain. Also, it is not private between you and
the grader, or (instituional administrators)... privacy is not a right in all cases. (For the public readers,)
I think that's a really great idea, some universities are starting to do systems were you could make your transcript
available to people, (this system) puts you in control of it."
\textit{Educator F}: "Yes because you are allowed to set different levels of access based on someone's role and
that is essential."
\textit{Student C}: "I like that you've got a preview of what others can see and you can tap on and off."\\
\textbf{Q8. The features of the system increase trust in online education providers and credentials.}\\
\begin{table}[!ht]
% \caption{Participants in user evaluation interviews}
\centering
% \label{table:participants-eval}
\begin{tabularx}{0.325\textwidth}{|c|c|c|c|c|}
\hline
Participant & A & F & C & E \\
\hline
Scale Given & \cellcolor{SpringGreen}4 & \cellcolor{green}5 & \cellcolor{SpringGreen}4 & \cellcolor{SpringGreen}4 \\
\hline
\end{tabularx}
\end{table}
\textit{Educator A}: "If you are aggregating content from anywhere,
the quality of the content and assessments is the issue. The question is who is issuing the award,
because often credibility and trust is about the award issuer.
An institution might be more interested in protecting their reputation and use an instance of the blockchain
instead of participating in the global marketplace blockchain. A MOOC platform like Coursera will have to issue
their own awards. You cannot rely on the blockchain to give trust, unless you've got a crowd-sourced model,
where students and academics can review and rate courses."
% For existing (MOOC platforms) like coursera putting up their existing content,
% the quality of the content is the issue... for an institution offering courses through this platform,
% the credibility of the institution is the issue.
\textit{Educator F}: "I think if you ask me a year ago I wouldn't (be convinced), but now with bitcoin and
blockchain everywhere I think people are beginning to understand and it is becoming more mainstream in a way.
People have developed trust because they can understand the technology better."
\textit{Student C}: "There is clarity... and the simplicity of it. You can see everything. Some systems get rid
of your records after a period of time."\\
\section{Semi-Structured Questions}
\textbf{Q9. What are your thoughts on how the system conducts assessments and curriculum personalisation?}\\
\textit{Educator A}: "The system poses a lot of constraints on how assessment can be conducted.
The assessor should be able to override grades the rubric has calculated with an explicit justification...
that is transparent to the student."
\textit{Educator F}: "I think it will not replace existing systems, but as universities start to offer more
courses online to reduce cost and provide flexibility to students. I can see this system being extremely useful
for that situation."
\textit{Educator F}: "For traditional campus students, it could add value because it integrates functions of different systems,
such as blackboard (a course content delivery platform) and Wiseflow (a digital assessment platform)."
\textit{Educator F}: "There could be resistance from a lot of module leaders (on having detailed marking schemes)."
\textit{Student E}: "I do want to see my supervisors face to face. If everything happens on the system,
where is the trust in that? I want to know if they are behind that screen... a lot of students may not be comfortable
with video chats and recordings."\\
\textbf{Q10. Is the system useful? Would you consider enrolling into this marketplace platform in the future?}\\
\textit{Educator A}: "As a provider of content, yes. I think the market globally is going towards
individual providers of content. I think the UK is not the place of such a market but the US is.
Even for physical universities, that would be a good thing because they can recruit more students globally, and
take the pressure off their estate."
\textit{Educator A}: "I know of a case where a student from 20 years ago has came back to ask for his records
and a transcript, but the department no longer has them (after moving systems and records several times).
I think the student is trying to pull a fast one where he has never graduated. Recording assessments has massive value,
and gives much greater security."
\textit{Educator F}: "Potentially you can pick credits from Stanford and Harvard.
It makes sense from the student's point of view, and also to people who may not have access to institutions.
I would use it as a teacher as well."
\textit{Student C}: "Yes, just because it is easier and more convenient."
\textit{Student E}: "Yes I absolutely would. It makes you feel like every student has their own thing and
only you could see this. And I like the user experience as well."
|
{-# OPTIONS --universe-polymorphism #-}
module Issue204 where
open import Issue204.Dependency
postulate
ℓ : Level
r : R ℓ
d : D ℓ
open R r
open M d
|
Formal statement is: lemma bilinear_continuous_on_compose: fixes h :: "'a::euclidean_space \<Rightarrow> 'b::euclidean_space \<Rightarrow> 'c::real_normed_vector" and f :: "'d::t2_space \<Rightarrow> 'a" assumes "continuous_on S f" "continuous_on S g" "bilinear h" shows "continuous_on S (\<lambda>x. h (f x) (g x))" Informal statement is: If $h$ is bilinear and $f$ and $g$ are continuous, then $h(f(x), g(x))$ is continuous. |
#ifndef GBNET_MODELORNOR
#define GBNET_MODELORNOR
#include <cmath>
#include <time.h>
#include <gsl/gsl_rstat.h>
#include "ModelBase.h"
#include "GraphORNOR.h"
namespace gbn
{
class ModelORNOR: public ModelBase
{
private:
protected:
public:
ModelORNOR();
~ModelORNOR() override;
ModelORNOR(const network_t, const evidence_dict_t, const prior_active_tf_set_t = prior_active_tf_set_t(),
const double [3 * 3] = gbn::SPRIOR, double z_alpha = 25., double z_beta = 25., double z0_alpha = 25., double z0_beta = 25., double t_alpha = 25., double t_beta = 25.,
unsigned int = 3, bool = true, bool = true, bool = false,
double = 2., double = 2., double = 2., double = 2., double = 8., double = 2.);
};
}
#endif |
(*
Copyright (C) 2017 M.A.L. Marques
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
*)
(* type: gga_exc *)
$define xc_dimensions_2d
_2d_b86_mgc_beta := 0.003317:
_2d_b86_mgc_gamma := 0.008323:
_2d_b86_mgc_f := x -> 1 + _2d_b86_mgc_beta/X_FACTOR_2D_C*x^2/(1 + _2d_b86_mgc_gamma*x^2)^(3/4):
f := (rs, zeta, xt, xs0, xs1) -> gga_exchange(_2d_b86_mgc_f, rs, zeta, xs0, xs1):
|
If $S$ is connected and every continuous function $f$ from $S$ to $\mathbb{C}$ with $f(z) \neq 0$ for all $z \in S$ has a continuous square root, then $S$ is simply connected. |
= = = Peterborough United = = =
|
#include <boost/mpl/aux_/sort_impl.hpp>
|
-- 2014-04-06 Andreas, issue reported by Andres Sicard-Ramirez
-- {-# OPTIONS --termination-depth=100 -v term.matrices:40 #-}
data ℕ : Set where
zero : ℕ
succ : ℕ → ℕ
-- The following function is accepted by the termination checker in
-- Agda-2.3.2.2, but it is rejected by the termination checker in
-- the current development version. (The function was adapted from Lee,
-- Jones, and Ben-Amram, POPL '01).
p : ℕ → ℕ → ℕ → ℕ
p m n (succ r) = p m r n
p m (succ n) zero = p zero n m
p m zero zero = m
|
module plfa.part1.Induction where
import Relation.Binary.PropositionalEquality as Eq
open Eq using (_≡_; refl; cong; sym)
open Eq.≡-Reasoning
open import Data.Nat using (ℕ; zero; suc; _+_; _*_; _∸_)
+-assoc : ∀ (m n p : ℕ) → (m + n) + p ≡ m + (n + p)
+-assoc zero n p =
begin
(zero + n) + p
≡⟨⟩
n + p
≡⟨⟩
zero + (n + p)
∎
+-assoc (suc m) n p =
begin
(suc m + n) + p
≡⟨⟩
suc (m + n) + p
≡⟨⟩
suc ((m + n) + p)
-- A relation is said to be a congruence for
-- a given function if it is preserved by applying that function
-- If e is evidence that x ≡ y,
-- then cong f e is evidence that f x ≡ f y,
-- for any function f
-- The correspondence between proof by induction and
-- definition by recursion is one of the most appealing
-- aspects of Agda
-- cong : ∀ (f : A → B) {x y} → x ≡ y → f x ≡ f y
-- ^- suc ^- (m + n) + p ≡ m + (n + p)
-- ----------------------------------------------------------- (=> implies)
-- suc ((m + n) + p) ≡ suc (m + (n + p))
≡⟨ cong suc (+-assoc m n p) ⟩
suc (m + (n + p))
-- cong : ∀ (f : A → B) {x y} → x ≡ y → f x ≡ f y
-- cong f refl = refl
≡⟨⟩
suc m + (n + p)
∎
+-assoc-2 : ∀ (n p : ℕ) → (2 + n) + p ≡ 2 + (n + p)
+-assoc-2 n p =
begin
(2 + n) + p
≡⟨⟩
suc (1 + n) + p
≡⟨⟩
suc ((1 + n) + p)
≡⟨ cong suc (+-assoc-1 n p) ⟩
suc (1 + (n + p))
≡⟨⟩
2 + (n + p)
∎
where
+-assoc-1 : ∀ (n p : ℕ) -> (1 + n) + p ≡ 1 + (n + p)
+-assoc-1 n p =
begin
(1 + n) + p
≡⟨⟩
suc (0 + n) + p
≡⟨⟩
suc ((0 + n) + p)
≡⟨ cong suc (+-assoc-0 n p) ⟩
suc (0 + (n + p))
≡⟨⟩
1 + (n + p)
∎
where
+-assoc-0 : ∀ (n p : ℕ) → (0 + n) + p ≡ 0 + (n + p)
+-assoc-0 n p =
begin
(0 + n) + p
≡⟨⟩
n + p
≡⟨⟩
0 + (n + p)
∎
+-identityᴿ : ∀ (m : ℕ) → m + zero ≡ m
+-identityᴿ zero =
begin
zero + zero
≡⟨⟩
zero
∎
+-identityᴿ (suc m) =
begin
suc m + zero
≡⟨⟩
suc (m + zero)
≡⟨ cong suc (+-identityᴿ m) ⟩
suc m
∎
+-suc : ∀ (m n : ℕ) → m + suc n ≡ suc (m + n)
+-suc zero n =
begin
zero + suc n
≡⟨⟩
suc n
≡⟨⟩
suc (zero + n)
∎
+-suc (suc m) n =
begin
suc m + suc n
≡⟨⟩
suc (m + suc n)
≡⟨ cong suc (+-suc m n) ⟩
suc (suc (m + n))
≡⟨⟩
suc (suc m + n)
∎
+-comm : ∀ (m n : ℕ) → m + n ≡ n + m
+-comm m zero =
begin
m + zero
≡⟨ +-identityᴿ m ⟩
m
≡⟨⟩
zero + m
∎
+-comm m (suc n) =
begin
m + suc n
≡⟨ +-suc m n ⟩
suc (m + n)
≡⟨ cong suc (+-comm m n) ⟩
suc (n + m)
≡⟨⟩
suc n + m
∎
-- Rearranging
-- We can apply associativity to
-- rearrange parentheses however we like
+-rearrange
: ∀ (m n p q : ℕ)
→ (m + n) + (p + q) ≡ m + (n + p) + q
+-rearrange m n p q =
begin
(m + n) + (p + q)
≡⟨ +-assoc m n (p + q) ⟩
m + (n + (p + q))
≡⟨ cong (m +_) (sym (+-assoc n p q)) ⟩
m + ((n + p) + q)
-- +-assoc : (m + n) + p ≡ m + (n + p)
-- sym (+-assoc) : m + (n + p) ≡ (m + n) + p
≡⟨ sym (+-assoc m (n + p) q) ⟩
(m + (n + p)) + q
∎
-- Associativity with rewrite
-- Rewriting avoids not only chains of
-- equations but also the need to invoke cong
+-assoc' : ∀ (m n p : ℕ) → (m + n) + p ≡ m + (n + p)
+-assoc' zero n p = refl
+-assoc' (suc m) n p rewrite +-assoc' m n p = refl
+-identity' : ∀ (n : ℕ) → n + zero ≡ n
+-identity' zero = refl
+-identity' (suc n) rewrite +-identity' n = refl
+-suc' : ∀ (m n : ℕ) → m + suc n ≡ suc (m + n)
+-suc' zero n = refl
+-suc' (suc m) n rewrite +-suc' m n = refl
+-comm' : ∀ (m n : ℕ) → m + n ≡ n + m
+-comm' m zero rewrite +-identity' m = refl
+-comm' m (suc n) rewrite +-suc' m n | +-comm' m n = refl
-- Building proofs interactively
+-assoc'' : ∀ (m n p : ℕ) → (m + n) + p ≡ m + (n + p)
+-assoc'' zero n p = refl
+-assoc'' (suc m) n p rewrite +-assoc'' m n p = refl
-- Exercise
-- Note:
-- sym -- rewrites the left side of the Goal
+-swap : ∀ (m n p : ℕ) → m + (n + p) ≡ n + (m + p)
+-swap zero n p = refl
+-swap (suc m) n p rewrite
+-assoc'' m n p
| +-suc n (m + p)
| +-swap m n p
= refl
-- (suc m + n) * p ≡ suc m * p + n * p
-- p + (m * p + n * p) ≡ p + m * p + n * p
*-distrib-+ : ∀ (m n p : ℕ) → (m + n) * p ≡ m * p + n * p
*-distrib-+ zero n p = refl
*-distrib-+ (suc m) n p rewrite
*-distrib-+ m n p
| sym (+-assoc p (m * p) (n * p))
= refl
-- (n + m * n) * p ≡ n * p + m * (n * p)
*-assoc : ∀ (m n p : ℕ) → (m * n) * p ≡ m * (n * p)
*-assoc zero n p = refl
*-assoc (suc m) n p rewrite
*-distrib-+ n (m * n) p
| *-assoc m n p
= refl
|
# This file is auto-generated by AWSMetadata.jl
using AWS
using AWS.AWSServices: appsync
using AWS.Compat
using AWS.UUIDs
"""
CreateApiCache()
Creates a cache for the GraphQL API.
# Required Parameters
- `apiCachingBehavior`: Caching behavior. FULL_REQUEST_CACHING: All requests are fully cached. PER_RESOLVER_CACHING: Individual resolvers that you specify are cached.
- `apiId`: The GraphQL API Id.
- `ttl`: TTL in seconds for cache entries. Valid values are between 1 and 3600 seconds.
- `type`: The cache instance type. Valid values are SMALL MEDIUM LARGE XLARGE LARGE_2X LARGE_4X LARGE_8X (not available in all regions) LARGE_12X Historically, instance types were identified by an EC2-style value. As of July 2020, this is deprecated, and the generic identifiers above should be used. The following legacy instance types are available, but their use is discouraged: T2_SMALL: A t2.small instance type. T2_MEDIUM: A t2.medium instance type. R4_LARGE: A r4.large instance type. R4_XLARGE: A r4.xlarge instance type. R4_2XLARGE: A r4.2xlarge instance type. R4_4XLARGE: A r4.4xlarge instance type. R4_8XLARGE: A r4.8xlarge instance type.
# Optional Parameters
- `atRestEncryptionEnabled`: At rest encryption flag for cache. This setting cannot be updated after creation.
- `transitEncryptionEnabled`: Transit encryption flag when connecting to cache. This setting cannot be updated after creation.
"""
create_api_cache(apiCachingBehavior, apiId, ttl, type; aws_config::AWSConfig=global_aws_config()) = appsync("POST", "/v1/apis/$(apiId)/ApiCaches", Dict{String, Any}("apiCachingBehavior"=>apiCachingBehavior, "ttl"=>ttl, "type"=>type); aws_config=aws_config)
create_api_cache(apiCachingBehavior, apiId, ttl, type, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = appsync("POST", "/v1/apis/$(apiId)/ApiCaches", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("apiCachingBehavior"=>apiCachingBehavior, "ttl"=>ttl, "type"=>type), args)); aws_config=aws_config)
"""
CreateApiKey()
Creates a unique key that you can distribute to clients who are executing your API.
# Required Parameters
- `apiId`: The ID for your GraphQL API.
# Optional Parameters
- `description`: A description of the purpose of the API key.
- `expires`: The time from creation time after which the API key expires. The date is represented as seconds since the epoch, rounded down to the nearest hour. The default value for this parameter is 7 days from creation time. For more information, see .
"""
create_api_key(apiId; aws_config::AWSConfig=global_aws_config()) = appsync("POST", "/v1/apis/$(apiId)/apikeys"; aws_config=aws_config)
create_api_key(apiId, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = appsync("POST", "/v1/apis/$(apiId)/apikeys", args; aws_config=aws_config)
"""
CreateDataSource()
Creates a DataSource object.
# Required Parameters
- `apiId`: The API ID for the GraphQL API for the DataSource.
- `name`: A user-supplied name for the DataSource.
- `type`: The type of the DataSource.
# Optional Parameters
- `description`: A description of the DataSource.
- `dynamodbConfig`: Amazon DynamoDB settings.
- `elasticsearchConfig`: Amazon Elasticsearch Service settings.
- `httpConfig`: HTTP endpoint settings.
- `lambdaConfig`: AWS Lambda settings.
- `relationalDatabaseConfig`: Relational database settings.
- `serviceRoleArn`: The AWS IAM service role ARN for the data source. The system assumes this role when accessing the data source.
"""
create_data_source(apiId, name, type; aws_config::AWSConfig=global_aws_config()) = appsync("POST", "/v1/apis/$(apiId)/datasources", Dict{String, Any}("name"=>name, "type"=>type); aws_config=aws_config)
create_data_source(apiId, name, type, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = appsync("POST", "/v1/apis/$(apiId)/datasources", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("name"=>name, "type"=>type), args)); aws_config=aws_config)
"""
CreateFunction()
Creates a Function object. A function is a reusable entity. Multiple functions can be used to compose the resolver logic.
# Required Parameters
- `apiId`: The GraphQL API ID.
- `dataSourceName`: The Function DataSource name.
- `functionVersion`: The version of the request mapping template. Currently the supported value is 2018-05-29.
- `name`: The Function name. The function name does not have to be unique.
# Optional Parameters
- `description`: The Function description.
- `requestMappingTemplate`: The Function request mapping template. Functions support only the 2018-05-29 version of the request mapping template.
- `responseMappingTemplate`: The Function response mapping template.
"""
create_function(apiId, dataSourceName, functionVersion, name; aws_config::AWSConfig=global_aws_config()) = appsync("POST", "/v1/apis/$(apiId)/functions", Dict{String, Any}("dataSourceName"=>dataSourceName, "functionVersion"=>functionVersion, "name"=>name); aws_config=aws_config)
create_function(apiId, dataSourceName, functionVersion, name, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = appsync("POST", "/v1/apis/$(apiId)/functions", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("dataSourceName"=>dataSourceName, "functionVersion"=>functionVersion, "name"=>name), args)); aws_config=aws_config)
"""
CreateGraphqlApi()
Creates a GraphqlApi object.
# Required Parameters
- `authenticationType`: The authentication type: API key, AWS IAM, OIDC, or Amazon Cognito user pools.
- `name`: A user-supplied name for the GraphqlApi.
# Optional Parameters
- `additionalAuthenticationProviders`: A list of additional authentication providers for the GraphqlApi API.
- `logConfig`: The Amazon CloudWatch Logs configuration.
- `openIDConnectConfig`: The OpenID Connect configuration.
- `tags`: A TagMap object.
- `userPoolConfig`: The Amazon Cognito user pool configuration.
- `xrayEnabled`: A flag indicating whether to enable X-Ray tracing for the GraphqlApi.
"""
create_graphql_api(authenticationType, name; aws_config::AWSConfig=global_aws_config()) = appsync("POST", "/v1/apis", Dict{String, Any}("authenticationType"=>authenticationType, "name"=>name); aws_config=aws_config)
create_graphql_api(authenticationType, name, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = appsync("POST", "/v1/apis", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("authenticationType"=>authenticationType, "name"=>name), args)); aws_config=aws_config)
"""
CreateResolver()
Creates a Resolver object. A resolver converts incoming requests into a format that a data source can understand and converts the data source's responses into GraphQL.
# Required Parameters
- `apiId`: The ID for the GraphQL API for which the resolver is being created.
- `fieldName`: The name of the field to attach the resolver to.
- `typeName`: The name of the Type.
# Optional Parameters
- `cachingConfig`: The caching configuration for the resolver.
- `dataSourceName`: The name of the data source for which the resolver is being created.
- `kind`: The resolver type. UNIT: A UNIT resolver type. A UNIT resolver is the default resolver type. A UNIT resolver enables you to execute a GraphQL query against a single data source. PIPELINE: A PIPELINE resolver type. A PIPELINE resolver enables you to execute a series of Function in a serial manner. You can use a pipeline resolver to execute a GraphQL query against multiple data sources.
- `pipelineConfig`: The PipelineConfig.
- `requestMappingTemplate`: The mapping template to be used for requests. A resolver uses a request mapping template to convert a GraphQL expression into a format that a data source can understand. Mapping templates are written in Apache Velocity Template Language (VTL). VTL request mapping templates are optional when using a Lambda data source. For all other data sources, VTL request and response mapping templates are required.
- `responseMappingTemplate`: The mapping template to be used for responses from the data source.
- `syncConfig`: The SyncConfig for a resolver attached to a versioned datasource.
"""
create_resolver(apiId, fieldName, typeName; aws_config::AWSConfig=global_aws_config()) = appsync("POST", "/v1/apis/$(apiId)/types/$(typeName)/resolvers", Dict{String, Any}("fieldName"=>fieldName); aws_config=aws_config)
create_resolver(apiId, fieldName, typeName, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = appsync("POST", "/v1/apis/$(apiId)/types/$(typeName)/resolvers", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("fieldName"=>fieldName), args)); aws_config=aws_config)
"""
CreateType()
Creates a Type object.
# Required Parameters
- `apiId`: The API ID.
- `definition`: The type definition, in GraphQL Schema Definition Language (SDL) format. For more information, see the GraphQL SDL documentation.
- `format`: The type format: SDL or JSON.
"""
create_type(apiId, definition, format; aws_config::AWSConfig=global_aws_config()) = appsync("POST", "/v1/apis/$(apiId)/types", Dict{String, Any}("definition"=>definition, "format"=>format); aws_config=aws_config)
create_type(apiId, definition, format, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = appsync("POST", "/v1/apis/$(apiId)/types", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("definition"=>definition, "format"=>format), args)); aws_config=aws_config)
"""
DeleteApiCache()
Deletes an ApiCache object.
# Required Parameters
- `apiId`: The API ID.
"""
delete_api_cache(apiId; aws_config::AWSConfig=global_aws_config()) = appsync("DELETE", "/v1/apis/$(apiId)/ApiCaches"; aws_config=aws_config)
delete_api_cache(apiId, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = appsync("DELETE", "/v1/apis/$(apiId)/ApiCaches", args; aws_config=aws_config)
"""
DeleteApiKey()
Deletes an API key.
# Required Parameters
- `apiId`: The API ID.
- `id`: The ID for the API key.
"""
delete_api_key(apiId, id; aws_config::AWSConfig=global_aws_config()) = appsync("DELETE", "/v1/apis/$(apiId)/apikeys/$(id)"; aws_config=aws_config)
delete_api_key(apiId, id, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = appsync("DELETE", "/v1/apis/$(apiId)/apikeys/$(id)", args; aws_config=aws_config)
"""
DeleteDataSource()
Deletes a DataSource object.
# Required Parameters
- `apiId`: The API ID.
- `name`: The name of the data source.
"""
delete_data_source(apiId, name; aws_config::AWSConfig=global_aws_config()) = appsync("DELETE", "/v1/apis/$(apiId)/datasources/$(name)"; aws_config=aws_config)
delete_data_source(apiId, name, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = appsync("DELETE", "/v1/apis/$(apiId)/datasources/$(name)", args; aws_config=aws_config)
"""
DeleteFunction()
Deletes a Function.
# Required Parameters
- `apiId`: The GraphQL API ID.
- `functionId`: The Function ID.
"""
delete_function(apiId, functionId; aws_config::AWSConfig=global_aws_config()) = appsync("DELETE", "/v1/apis/$(apiId)/functions/$(functionId)"; aws_config=aws_config)
delete_function(apiId, functionId, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = appsync("DELETE", "/v1/apis/$(apiId)/functions/$(functionId)", args; aws_config=aws_config)
"""
DeleteGraphqlApi()
Deletes a GraphqlApi object.
# Required Parameters
- `apiId`: The API ID.
"""
delete_graphql_api(apiId; aws_config::AWSConfig=global_aws_config()) = appsync("DELETE", "/v1/apis/$(apiId)"; aws_config=aws_config)
delete_graphql_api(apiId, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = appsync("DELETE", "/v1/apis/$(apiId)", args; aws_config=aws_config)
"""
DeleteResolver()
Deletes a Resolver object.
# Required Parameters
- `apiId`: The API ID.
- `fieldName`: The resolver field name.
- `typeName`: The name of the resolver type.
"""
delete_resolver(apiId, fieldName, typeName; aws_config::AWSConfig=global_aws_config()) = appsync("DELETE", "/v1/apis/$(apiId)/types/$(typeName)/resolvers/$(fieldName)"; aws_config=aws_config)
delete_resolver(apiId, fieldName, typeName, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = appsync("DELETE", "/v1/apis/$(apiId)/types/$(typeName)/resolvers/$(fieldName)", args; aws_config=aws_config)
"""
DeleteType()
Deletes a Type object.
# Required Parameters
- `apiId`: The API ID.
- `typeName`: The type name.
"""
delete_type(apiId, typeName; aws_config::AWSConfig=global_aws_config()) = appsync("DELETE", "/v1/apis/$(apiId)/types/$(typeName)"; aws_config=aws_config)
delete_type(apiId, typeName, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = appsync("DELETE", "/v1/apis/$(apiId)/types/$(typeName)", args; aws_config=aws_config)
"""
FlushApiCache()
Flushes an ApiCache object.
# Required Parameters
- `apiId`: The API ID.
"""
flush_api_cache(apiId; aws_config::AWSConfig=global_aws_config()) = appsync("DELETE", "/v1/apis/$(apiId)/FlushCache"; aws_config=aws_config)
flush_api_cache(apiId, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = appsync("DELETE", "/v1/apis/$(apiId)/FlushCache", args; aws_config=aws_config)
"""
GetApiCache()
Retrieves an ApiCache object.
# Required Parameters
- `apiId`: The API ID.
"""
get_api_cache(apiId; aws_config::AWSConfig=global_aws_config()) = appsync("GET", "/v1/apis/$(apiId)/ApiCaches"; aws_config=aws_config)
get_api_cache(apiId, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = appsync("GET", "/v1/apis/$(apiId)/ApiCaches", args; aws_config=aws_config)
"""
GetDataSource()
Retrieves a DataSource object.
# Required Parameters
- `apiId`: The API ID.
- `name`: The name of the data source.
"""
get_data_source(apiId, name; aws_config::AWSConfig=global_aws_config()) = appsync("GET", "/v1/apis/$(apiId)/datasources/$(name)"; aws_config=aws_config)
get_data_source(apiId, name, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = appsync("GET", "/v1/apis/$(apiId)/datasources/$(name)", args; aws_config=aws_config)
"""
GetFunction()
Get a Function.
# Required Parameters
- `apiId`: The GraphQL API ID.
- `functionId`: The Function ID.
"""
get_function(apiId, functionId; aws_config::AWSConfig=global_aws_config()) = appsync("GET", "/v1/apis/$(apiId)/functions/$(functionId)"; aws_config=aws_config)
get_function(apiId, functionId, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = appsync("GET", "/v1/apis/$(apiId)/functions/$(functionId)", args; aws_config=aws_config)
"""
GetGraphqlApi()
Retrieves a GraphqlApi object.
# Required Parameters
- `apiId`: The API ID for the GraphQL API.
"""
get_graphql_api(apiId; aws_config::AWSConfig=global_aws_config()) = appsync("GET", "/v1/apis/$(apiId)"; aws_config=aws_config)
get_graphql_api(apiId, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = appsync("GET", "/v1/apis/$(apiId)", args; aws_config=aws_config)
"""
GetIntrospectionSchema()
Retrieves the introspection schema for a GraphQL API.
# Required Parameters
- `apiId`: The API ID.
- `format`: The schema format: SDL or JSON.
# Optional Parameters
- `includeDirectives`: A flag that specifies whether the schema introspection should contain directives.
"""
get_introspection_schema(apiId, format; aws_config::AWSConfig=global_aws_config()) = appsync("GET", "/v1/apis/$(apiId)/schema", Dict{String, Any}("format"=>format); aws_config=aws_config)
get_introspection_schema(apiId, format, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = appsync("GET", "/v1/apis/$(apiId)/schema", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("format"=>format), args)); aws_config=aws_config)
"""
GetResolver()
Retrieves a Resolver object.
# Required Parameters
- `apiId`: The API ID.
- `fieldName`: The resolver field name.
- `typeName`: The resolver type name.
"""
get_resolver(apiId, fieldName, typeName; aws_config::AWSConfig=global_aws_config()) = appsync("GET", "/v1/apis/$(apiId)/types/$(typeName)/resolvers/$(fieldName)"; aws_config=aws_config)
get_resolver(apiId, fieldName, typeName, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = appsync("GET", "/v1/apis/$(apiId)/types/$(typeName)/resolvers/$(fieldName)", args; aws_config=aws_config)
"""
GetSchemaCreationStatus()
Retrieves the current status of a schema creation operation.
# Required Parameters
- `apiId`: The API ID.
"""
get_schema_creation_status(apiId; aws_config::AWSConfig=global_aws_config()) = appsync("GET", "/v1/apis/$(apiId)/schemacreation"; aws_config=aws_config)
get_schema_creation_status(apiId, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = appsync("GET", "/v1/apis/$(apiId)/schemacreation", args; aws_config=aws_config)
"""
GetType()
Retrieves a Type object.
# Required Parameters
- `apiId`: The API ID.
- `format`: The type format: SDL or JSON.
- `typeName`: The type name.
"""
get_type(apiId, format, typeName; aws_config::AWSConfig=global_aws_config()) = appsync("GET", "/v1/apis/$(apiId)/types/$(typeName)", Dict{String, Any}("format"=>format); aws_config=aws_config)
get_type(apiId, format, typeName, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = appsync("GET", "/v1/apis/$(apiId)/types/$(typeName)", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("format"=>format), args)); aws_config=aws_config)
"""
ListApiKeys()
Lists the API keys for a given API. API keys are deleted automatically 60 days after they expire. However, they may still be included in the response until they have actually been deleted. You can safely call DeleteApiKey to manually delete a key before it's automatically deleted.
# Required Parameters
- `apiId`: The API ID.
# Optional Parameters
- `maxResults`: The maximum number of results you want the request to return.
- `nextToken`: An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.
"""
list_api_keys(apiId; aws_config::AWSConfig=global_aws_config()) = appsync("GET", "/v1/apis/$(apiId)/apikeys"; aws_config=aws_config)
list_api_keys(apiId, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = appsync("GET", "/v1/apis/$(apiId)/apikeys", args; aws_config=aws_config)
"""
ListDataSources()
Lists the data sources for a given API.
# Required Parameters
- `apiId`: The API ID.
# Optional Parameters
- `maxResults`: The maximum number of results you want the request to return.
- `nextToken`: An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.
"""
list_data_sources(apiId; aws_config::AWSConfig=global_aws_config()) = appsync("GET", "/v1/apis/$(apiId)/datasources"; aws_config=aws_config)
list_data_sources(apiId, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = appsync("GET", "/v1/apis/$(apiId)/datasources", args; aws_config=aws_config)
"""
ListFunctions()
List multiple functions.
# Required Parameters
- `apiId`: The GraphQL API ID.
# Optional Parameters
- `maxResults`: The maximum number of results you want the request to return.
- `nextToken`: An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.
"""
list_functions(apiId; aws_config::AWSConfig=global_aws_config()) = appsync("GET", "/v1/apis/$(apiId)/functions"; aws_config=aws_config)
list_functions(apiId, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = appsync("GET", "/v1/apis/$(apiId)/functions", args; aws_config=aws_config)
"""
ListGraphqlApis()
Lists your GraphQL APIs.
# Optional Parameters
- `maxResults`: The maximum number of results you want the request to return.
- `nextToken`: An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.
"""
list_graphql_apis(; aws_config::AWSConfig=global_aws_config()) = appsync("GET", "/v1/apis"; aws_config=aws_config)
list_graphql_apis(args::AbstractDict{String, Any}; aws_config::AWSConfig=global_aws_config()) = appsync("GET", "/v1/apis", args; aws_config=aws_config)
"""
ListResolvers()
Lists the resolvers for a given API and type.
# Required Parameters
- `apiId`: The API ID.
- `typeName`: The type name.
# Optional Parameters
- `maxResults`: The maximum number of results you want the request to return.
- `nextToken`: An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.
"""
list_resolvers(apiId, typeName; aws_config::AWSConfig=global_aws_config()) = appsync("GET", "/v1/apis/$(apiId)/types/$(typeName)/resolvers"; aws_config=aws_config)
list_resolvers(apiId, typeName, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = appsync("GET", "/v1/apis/$(apiId)/types/$(typeName)/resolvers", args; aws_config=aws_config)
"""
ListResolversByFunction()
List the resolvers that are associated with a specific function.
# Required Parameters
- `apiId`: The API ID.
- `functionId`: The Function ID.
# Optional Parameters
- `maxResults`: The maximum number of results you want the request to return.
- `nextToken`: An identifier that was returned from the previous call to this operation, which you can use to return the next set of items in the list.
"""
list_resolvers_by_function(apiId, functionId; aws_config::AWSConfig=global_aws_config()) = appsync("GET", "/v1/apis/$(apiId)/functions/$(functionId)/resolvers"; aws_config=aws_config)
list_resolvers_by_function(apiId, functionId, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = appsync("GET", "/v1/apis/$(apiId)/functions/$(functionId)/resolvers", args; aws_config=aws_config)
"""
ListTagsForResource()
Lists the tags for a resource.
# Required Parameters
- `resourceArn`: The GraphqlApi ARN.
"""
list_tags_for_resource(resourceArn; aws_config::AWSConfig=global_aws_config()) = appsync("GET", "/v1/tags/$(resourceArn)"; aws_config=aws_config)
list_tags_for_resource(resourceArn, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = appsync("GET", "/v1/tags/$(resourceArn)", args; aws_config=aws_config)
"""
ListTypes()
Lists the types for a given API.
# Required Parameters
- `apiId`: The API ID.
- `format`: The type format: SDL or JSON.
# Optional Parameters
- `maxResults`: The maximum number of results you want the request to return.
- `nextToken`: An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.
"""
list_types(apiId, format; aws_config::AWSConfig=global_aws_config()) = appsync("GET", "/v1/apis/$(apiId)/types", Dict{String, Any}("format"=>format); aws_config=aws_config)
list_types(apiId, format, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = appsync("GET", "/v1/apis/$(apiId)/types", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("format"=>format), args)); aws_config=aws_config)
"""
StartSchemaCreation()
Adds a new schema to your GraphQL API. This operation is asynchronous. Use to determine when it has completed.
# Required Parameters
- `apiId`: The API ID.
- `definition`: The schema definition, in GraphQL schema language format.
"""
start_schema_creation(apiId, definition; aws_config::AWSConfig=global_aws_config()) = appsync("POST", "/v1/apis/$(apiId)/schemacreation", Dict{String, Any}("definition"=>definition); aws_config=aws_config)
start_schema_creation(apiId, definition, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = appsync("POST", "/v1/apis/$(apiId)/schemacreation", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("definition"=>definition), args)); aws_config=aws_config)
"""
TagResource()
Tags a resource with user-supplied tags.
# Required Parameters
- `resourceArn`: The GraphqlApi ARN.
- `tags`: A TagMap object.
"""
tag_resource(resourceArn, tags; aws_config::AWSConfig=global_aws_config()) = appsync("POST", "/v1/tags/$(resourceArn)", Dict{String, Any}("tags"=>tags); aws_config=aws_config)
tag_resource(resourceArn, tags, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = appsync("POST", "/v1/tags/$(resourceArn)", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("tags"=>tags), args)); aws_config=aws_config)
"""
UntagResource()
Untags a resource.
# Required Parameters
- `resourceArn`: The GraphqlApi ARN.
- `tagKeys`: A list of TagKey objects.
"""
untag_resource(resourceArn, tagKeys; aws_config::AWSConfig=global_aws_config()) = appsync("DELETE", "/v1/tags/$(resourceArn)", Dict{String, Any}("tagKeys"=>tagKeys); aws_config=aws_config)
untag_resource(resourceArn, tagKeys, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = appsync("DELETE", "/v1/tags/$(resourceArn)", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("tagKeys"=>tagKeys), args)); aws_config=aws_config)
"""
UpdateApiCache()
Updates the cache for the GraphQL API.
# Required Parameters
- `apiCachingBehavior`: Caching behavior. FULL_REQUEST_CACHING: All requests are fully cached. PER_RESOLVER_CACHING: Individual resolvers that you specify are cached.
- `apiId`: The GraphQL API Id.
- `ttl`: TTL in seconds for cache entries. Valid values are between 1 and 3600 seconds.
- `type`: The cache instance type. Valid values are SMALL MEDIUM LARGE XLARGE LARGE_2X LARGE_4X LARGE_8X (not available in all regions) LARGE_12X Historically, instance types were identified by an EC2-style value. As of July 2020, this is deprecated, and the generic identifiers above should be used. The following legacy instance types are available, but their use is discouraged: T2_SMALL: A t2.small instance type. T2_MEDIUM: A t2.medium instance type. R4_LARGE: A r4.large instance type. R4_XLARGE: A r4.xlarge instance type. R4_2XLARGE: A r4.2xlarge instance type. R4_4XLARGE: A r4.4xlarge instance type. R4_8XLARGE: A r4.8xlarge instance type.
"""
update_api_cache(apiCachingBehavior, apiId, ttl, type; aws_config::AWSConfig=global_aws_config()) = appsync("POST", "/v1/apis/$(apiId)/ApiCaches/update", Dict{String, Any}("apiCachingBehavior"=>apiCachingBehavior, "ttl"=>ttl, "type"=>type); aws_config=aws_config)
update_api_cache(apiCachingBehavior, apiId, ttl, type, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = appsync("POST", "/v1/apis/$(apiId)/ApiCaches/update", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("apiCachingBehavior"=>apiCachingBehavior, "ttl"=>ttl, "type"=>type), args)); aws_config=aws_config)
"""
UpdateApiKey()
Updates an API key. The key can be updated while it is not deleted.
# Required Parameters
- `apiId`: The ID for the GraphQL API.
- `id`: The API key ID.
# Optional Parameters
- `description`: A description of the purpose of the API key.
- `expires`: The time from update time after which the API key expires. The date is represented as seconds since the epoch. For more information, see .
"""
update_api_key(apiId, id; aws_config::AWSConfig=global_aws_config()) = appsync("POST", "/v1/apis/$(apiId)/apikeys/$(id)"; aws_config=aws_config)
update_api_key(apiId, id, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = appsync("POST", "/v1/apis/$(apiId)/apikeys/$(id)", args; aws_config=aws_config)
"""
UpdateDataSource()
Updates a DataSource object.
# Required Parameters
- `apiId`: The API ID.
- `name`: The new name for the data source.
- `type`: The new data source type.
# Optional Parameters
- `description`: The new description for the data source.
- `dynamodbConfig`: The new Amazon DynamoDB configuration.
- `elasticsearchConfig`: The new Elasticsearch Service configuration.
- `httpConfig`: The new HTTP endpoint configuration.
- `lambdaConfig`: The new AWS Lambda configuration.
- `relationalDatabaseConfig`: The new relational database configuration.
- `serviceRoleArn`: The new service role ARN for the data source.
"""
update_data_source(apiId, name, type; aws_config::AWSConfig=global_aws_config()) = appsync("POST", "/v1/apis/$(apiId)/datasources/$(name)", Dict{String, Any}("type"=>type); aws_config=aws_config)
update_data_source(apiId, name, type, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = appsync("POST", "/v1/apis/$(apiId)/datasources/$(name)", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("type"=>type), args)); aws_config=aws_config)
"""
UpdateFunction()
Updates a Function object.
# Required Parameters
- `apiId`: The GraphQL API ID.
- `dataSourceName`: The Function DataSource name.
- `functionId`: The function ID.
- `functionVersion`: The version of the request mapping template. Currently the supported value is 2018-05-29.
- `name`: The Function name.
# Optional Parameters
- `description`: The Function description.
- `requestMappingTemplate`: The Function request mapping template. Functions support only the 2018-05-29 version of the request mapping template.
- `responseMappingTemplate`: The Function request mapping template.
"""
update_function(apiId, dataSourceName, functionId, functionVersion, name; aws_config::AWSConfig=global_aws_config()) = appsync("POST", "/v1/apis/$(apiId)/functions/$(functionId)", Dict{String, Any}("dataSourceName"=>dataSourceName, "functionVersion"=>functionVersion, "name"=>name); aws_config=aws_config)
update_function(apiId, dataSourceName, functionId, functionVersion, name, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = appsync("POST", "/v1/apis/$(apiId)/functions/$(functionId)", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("dataSourceName"=>dataSourceName, "functionVersion"=>functionVersion, "name"=>name), args)); aws_config=aws_config)
"""
UpdateGraphqlApi()
Updates a GraphqlApi object.
# Required Parameters
- `apiId`: The API ID.
- `name`: The new name for the GraphqlApi object.
# Optional Parameters
- `additionalAuthenticationProviders`: A list of additional authentication providers for the GraphqlApi API.
- `authenticationType`: The new authentication type for the GraphqlApi object.
- `logConfig`: The Amazon CloudWatch Logs configuration for the GraphqlApi object.
- `openIDConnectConfig`: The OpenID Connect configuration for the GraphqlApi object.
- `userPoolConfig`: The new Amazon Cognito user pool configuration for the GraphqlApi object.
- `xrayEnabled`: A flag indicating whether to enable X-Ray tracing for the GraphqlApi.
"""
update_graphql_api(apiId, name; aws_config::AWSConfig=global_aws_config()) = appsync("POST", "/v1/apis/$(apiId)", Dict{String, Any}("name"=>name); aws_config=aws_config)
update_graphql_api(apiId, name, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = appsync("POST", "/v1/apis/$(apiId)", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("name"=>name), args)); aws_config=aws_config)
"""
UpdateResolver()
Updates a Resolver object.
# Required Parameters
- `apiId`: The API ID.
- `fieldName`: The new field name.
- `typeName`: The new type name.
# Optional Parameters
- `cachingConfig`: The caching configuration for the resolver.
- `dataSourceName`: The new data source name.
- `kind`: The resolver type. UNIT: A UNIT resolver type. A UNIT resolver is the default resolver type. A UNIT resolver enables you to execute a GraphQL query against a single data source. PIPELINE: A PIPELINE resolver type. A PIPELINE resolver enables you to execute a series of Function in a serial manner. You can use a pipeline resolver to execute a GraphQL query against multiple data sources.
- `pipelineConfig`: The PipelineConfig.
- `requestMappingTemplate`: The new request mapping template. A resolver uses a request mapping template to convert a GraphQL expression into a format that a data source can understand. Mapping templates are written in Apache Velocity Template Language (VTL). VTL request mapping templates are optional when using a Lambda data source. For all other data sources, VTL request and response mapping templates are required.
- `responseMappingTemplate`: The new response mapping template.
- `syncConfig`: The SyncConfig for a resolver attached to a versioned datasource.
"""
update_resolver(apiId, fieldName, typeName; aws_config::AWSConfig=global_aws_config()) = appsync("POST", "/v1/apis/$(apiId)/types/$(typeName)/resolvers/$(fieldName)"; aws_config=aws_config)
update_resolver(apiId, fieldName, typeName, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = appsync("POST", "/v1/apis/$(apiId)/types/$(typeName)/resolvers/$(fieldName)", args; aws_config=aws_config)
"""
UpdateType()
Updates a Type object.
# Required Parameters
- `apiId`: The API ID.
- `format`: The new type format: SDL or JSON.
- `typeName`: The new type name.
# Optional Parameters
- `definition`: The new definition.
"""
update_type(apiId, format, typeName; aws_config::AWSConfig=global_aws_config()) = appsync("POST", "/v1/apis/$(apiId)/types/$(typeName)", Dict{String, Any}("format"=>format); aws_config=aws_config)
update_type(apiId, format, typeName, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = appsync("POST", "/v1/apis/$(apiId)/types/$(typeName)", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("format"=>format), args)); aws_config=aws_config)
|
module HTMLWriterTests
using Test
using Documenter
using Documenter: DocSystem
using Documenter.Writers.HTMLWriter: HTMLWriter, generate_version_file, expand_versions
function verify_version_file(versionfile, entries)
@test isfile(versionfile)
content = read(versionfile, String)
idx = 1
for entry in entries
i = findnext(entry, content, idx)
@test i !== nothing
idx = last(i)
end
end
@testset "HTMLWriter" begin
@test isdir(HTMLWriter.ASSETS)
@test isdir(HTMLWriter.ASSETS_SASS)
@test isdir(HTMLWriter.ASSETS_THEMES)
for theme in HTMLWriter.THEMES
@test isfile(joinpath(HTMLWriter.ASSETS_SASS, "$(theme).scss"))
@test isfile(joinpath(HTMLWriter.ASSETS_THEMES, "$(theme).css"))
end
# asset handling
let asset = asset("https://example.com/foo.js")
@test asset.uri == "https://example.com/foo.js"
@test asset.class == :js
@test asset.islocal === false
end
let asset = asset("http://example.com/foo.js", class=:ico)
@test asset.uri == "http://example.com/foo.js"
@test asset.class == :ico
@test asset.islocal === false
end
let asset = asset("foo/bar.css", islocal=true)
@test asset.uri == "foo/bar.css"
@test asset.class == :css
@test asset.islocal === true
end
@test_throws Exception asset("ftp://example.com/foo.js")
@test_throws Exception asset("example.com/foo.js")
@test_throws Exception asset("foo.js")
@test_throws Exception asset("https://example.com/foo.js?q=1")
@test_throws Exception asset("https://example.com/foo.js", class=:error)
# HTML format object
@test Documenter.HTML() isa Documenter.HTML
@test_throws ArgumentError Documenter.HTML(collapselevel=-200)
@test_throws Exception Documenter.HTML(assets=["foo.js", 10])
@test_throws ArgumentError Documenter.HTML(footer="foo\n\nbar")
@test_throws ArgumentError Documenter.HTML(footer="# foo")
@test_throws ArgumentError Documenter.HTML(footer="")
@test Documenter.HTML(footer="foo bar [baz](https://github.com)") isa Documenter.HTML
# MathEngine
let katex = KaTeX()
@test length(katex.config) == 1
@test haskey(katex.config, :delimiters)
end
let katex = KaTeX(Dict(:foo => 1))
@test length(katex.config) == 2
@test haskey(katex.config, :delimiters)
@test haskey(katex.config, :foo)
end
let katex = KaTeX(Dict(:delimiters => 1, :foo => 2))
@test length(katex.config) == 2
@test haskey(katex.config, :delimiters)
@test katex.config[:delimiters] == 1
@test haskey(katex.config, :foo)
end
let mathjax = MathJax2()
@test length(mathjax.config) == 5
@test haskey(mathjax.config, :tex2jax)
@test haskey(mathjax.config, :config)
@test haskey(mathjax.config, :jax)
@test haskey(mathjax.config, :extensions)
@test haskey(mathjax.config, :TeX)
end
let mathjax = MathJax2(Dict(:foo => 1))
@test length(mathjax.config) == 6
@test haskey(mathjax.config, :tex2jax)
@test haskey(mathjax.config, :config)
@test haskey(mathjax.config, :jax)
@test haskey(mathjax.config, :extensions)
@test haskey(mathjax.config, :TeX)
@test haskey(mathjax.config, :foo)
end
let mathjax = MathJax2(Dict(:tex2jax => 1, :foo => 2))
@test length(mathjax.config) == 6
@test haskey(mathjax.config, :tex2jax)
@test haskey(mathjax.config, :config)
@test haskey(mathjax.config, :jax)
@test haskey(mathjax.config, :extensions)
@test haskey(mathjax.config, :TeX)
@test haskey(mathjax.config, :foo)
@test mathjax.config[:tex2jax] == 1
end
mktempdir() do tmpdir
versionfile = joinpath(tmpdir, "versions.js")
versions = ["stable", "dev",
"2.1.1", "v2.1.0", "v2.0.1", "v2.0.0",
"1.1.1", "v1.1.0", "v1.0.1", "v1.0.0",
"0.1.1", "v0.1.0"] # note no `v` on first ones
cd(tmpdir) do
for version in versions
mkdir(version)
end
end
# expanding versions
versions = ["stable" => "v^", "v#.#", "dev" => "dev"] # default to makedocs
entries, symlinks = expand_versions(tmpdir, versions)
@test entries == ["stable", "v2.1", "v2.0", "v1.1", "v1.0", "v0.1", "dev"]
@test symlinks == ["stable"=>"2.1.1", "v2.1"=>"2.1.1", "v2.0"=>"v2.0.1",
"v1.1"=>"1.1.1", "v1.0"=>"v1.0.1", "v0.1"=>"0.1.1",
"v2"=>"2.1.1", "v1"=>"1.1.1", "v2.1.1"=>"2.1.1",
"v1.1.1"=>"1.1.1", "v0.1.1"=>"0.1.1"]
generate_version_file(versionfile, entries)
verify_version_file(versionfile, entries)
versions = ["v#"]
entries, symlinks = expand_versions(tmpdir, versions)
@test entries == ["v2.1", "v1.1"]
@test symlinks == ["v2.1"=>"2.1.1", "v1.1"=>"1.1.1", "v2"=>"2.1.1", "v1"=>"1.1.1",
"v2.0"=>"v2.0.1", "v1.0"=>"v1.0.1", "v0.1"=>"0.1.1",
"v2.1.1"=>"2.1.1", "v1.1.1"=>"1.1.1", "v0.1.1"=>"0.1.1"]
generate_version_file(versionfile, entries)
verify_version_file(versionfile, entries)
versions = ["v#.#.#"]
entries, symlinks = expand_versions(tmpdir, versions)
@test entries == ["v2.1.1", "v2.1.0", "v2.0.1", "v2.0.0", "v1.1.1", "v1.1.0",
"v1.0.1", "v1.0.0", "v0.1.1", "v0.1.0"]
@test symlinks == ["v2.1.1"=>"2.1.1", "v1.1.1"=>"1.1.1", "v0.1.1"=>"0.1.1",
"v2"=>"2.1.1", "v1"=>"1.1.1", "v2.1"=>"2.1.1",
"v2.0"=>"v2.0.1", "v1.1"=>"1.1.1", "v1.0"=>"v1.0.1", "v0.1"=>"0.1.1"]
generate_version_file(versionfile, entries)
verify_version_file(versionfile, entries)
versions = ["v^", "devel" => "dev", "foobar", "foo" => "bar"]
entries, symlinks = @test_logs(
(:warn, "no match for `versions` entry `\"foobar\"`"),
(:warn, "no match for `versions` entry `\"foo\" => \"bar\"`"),
expand_versions(tmpdir, versions)
)
@test entries == ["v2.1", "devel"]
@test ("v2.1" => "2.1.1") in symlinks
@test ("devel" => "dev") in symlinks
generate_version_file(versionfile, entries)
verify_version_file(versionfile, entries)
versions = ["stable" => "v^", "dev" => "stable"]
@test_throws ArgumentError expand_versions(tmpdir, versions)
end
# Exhaustive Conversion from Markdown to Nodes.
@testset "MD2Node" begin
for mod in Base.Docs.modules
for (binding, multidoc) in DocSystem.getmeta(mod)
for (typesig, docstr) in multidoc.docs
md = Documenter.DocSystem.parsedoc(docstr)
@test string(HTMLWriter.mdconvert(md; footnotes=[])) isa String
end
end
end
end
end
end
|
[STATEMENT]
lemma new_element_get_M\<^sub>E\<^sub>l\<^sub>e\<^sub>m\<^sub>e\<^sub>n\<^sub>t:
"h \<turnstile> new_element \<rightarrow>\<^sub>h h' \<Longrightarrow> h \<turnstile> new_element \<rightarrow>\<^sub>r new_element_ptr \<Longrightarrow> ptr \<noteq> new_element_ptr
\<Longrightarrow> preserved (get_M\<^sub>E\<^sub>l\<^sub>e\<^sub>m\<^sub>e\<^sub>n\<^sub>t ptr getter) h h'"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>h \<turnstile> new_element \<rightarrow>\<^sub>h h'; h \<turnstile> new_element \<rightarrow>\<^sub>r new_element_ptr; ptr \<noteq> new_element_ptr\<rbrakk> \<Longrightarrow> preserved (get_M ptr getter) h h'
[PROOF STEP]
by(auto simp add: new_element_def get_M_defs preserved_def
split: prod.splits option.splits elim!: bind_returns_result_E bind_returns_heap_E) |
/-
Copyright (c) 2017 Scott Morrison. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Scott Morrison, Reid Barton
! This file was ported from Lean 3 source module category_theory.full_subcategory
! leanprover-community/mathlib commit c3291da49cfa65f0d43b094750541c0731edc932
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathbin.CategoryTheory.Functor.FullyFaithful
/-!
# Induced categories and full subcategories
> THIS FILE IS SYNCHRONIZED WITH MATHLIB4.
> Any changes to this file require a corresponding PR to mathlib4.
Given a category `D` and a function `F : C → D `from a type `C` to the
objects of `D`, there is an essentially unique way to give `C` a
category structure such that `F` becomes a fully faithful functor,
namely by taking $$ Hom_C(X, Y) = Hom_D(FX, FY) $$. We call this the
category induced from `D` along `F`.
As a special case, if `C` is a subtype of `D`,
this produces the full subcategory of `D` on the objects belonging to `C`.
In general the induced category is equivalent to the full subcategory of `D` on the
image of `F`.
## Implementation notes
It looks odd to make `D` an explicit argument of `induced_category`,
when it is determined by the argument `F` anyways. The reason to make `D`
explicit is in order to control its syntactic form, so that instances
like `induced_category.has_forget₂` (elsewhere) refer to the correct
form of D. This is used to set up several algebraic categories like
def CommMon : Type (u+1) := induced_category Mon (bundled.map @comm_monoid.to_monoid)
-- not `induced_category (bundled monoid) (bundled.map @comm_monoid.to_monoid)`,
-- even though `Mon = bundled monoid`!
-/
namespace CategoryTheory
universe v v₂ u₁ u₂
-- morphism levels before object levels. See note [category_theory universes].
section Induced
variable {C : Type u₁} (D : Type u₂) [Category.{v} D]
variable (F : C → D)
include F
/- warning: category_theory.induced_category -> CategoryTheory.InducedCategory is a dubious translation:
lean 3 declaration is
forall {C : Type.{u₁}} (D : Type.{u₂}) [_inst_1 : CategoryTheory.Category.{v, u₂} D], (C -> D) -> Type.{u₁}
but is expected to have type
forall {C : Type.{u₁}} (D : Type.{u₂}), (C -> D) -> Type.{u₁}
Case conversion may be inaccurate. Consider using '#align category_theory.induced_category CategoryTheory.InducedCategoryₓ'. -/
/-- `induced_category D F`, where `F : C → D`, is a typeclass synonym for `C`,
which provides a category structure so that the morphisms `X ⟶ Y` are the morphisms
in `D` from `F X` to `F Y`.
-/
@[nolint has_nonempty_instance unused_arguments]
def InducedCategory : Type u₁ :=
C
#align category_theory.induced_category CategoryTheory.InducedCategory
variable {D}
/- warning: category_theory.induced_category.has_coe_to_sort -> CategoryTheory.InducedCategory.hasCoeToSort is a dubious translation:
lean 3 declaration is
forall {C : Type.{u₁}} {D : Type.{u₂}} [_inst_1 : CategoryTheory.Category.{v, u₂} D] (F : C -> D) {α : Sort.{u_1}} [_inst_2 : CoeSort.{succ u₂, u_1} D α], CoeSort.{succ u₁, u_1} (CategoryTheory.InducedCategory.{v, u₁, u₂} C D _inst_1 F) α
but is expected to have type
forall {C : Type.{u₁}} {D : Type.{u₂}} (_inst_1 : C -> D) {F : Sort.{u_1}} [α : CoeSort.{succ u₂, u_1} D F], CoeSort.{succ u₁, u_1} (CategoryTheory.InducedCategory.{u₁, u₂} C D _inst_1) F
Case conversion may be inaccurate. Consider using '#align category_theory.induced_category.has_coe_to_sort CategoryTheory.InducedCategory.hasCoeToSortₓ'. -/
instance InducedCategory.hasCoeToSort {α : Sort _} [CoeSort D α] :
CoeSort (InducedCategory D F) α :=
⟨fun c => ↥(F c)⟩
#align category_theory.induced_category.has_coe_to_sort CategoryTheory.InducedCategory.hasCoeToSort
#print CategoryTheory.InducedCategory.category /-
instance InducedCategory.category : Category.{v} (InducedCategory D F)
where
Hom X Y := F X ⟶ F Y
id X := 𝟙 (F X)
comp _ _ _ f g := f ≫ g
#align category_theory.induced_category.category CategoryTheory.InducedCategory.category
-/
#print CategoryTheory.inducedFunctor /-
/-- The forgetful functor from an induced category to the original category,
forgetting the extra data.
-/
@[simps]
def inducedFunctor : InducedCategory D F ⥤ D
where
obj := F
map x y f := f
#align category_theory.induced_functor CategoryTheory.inducedFunctor
-/
#print CategoryTheory.InducedCategory.full /-
instance InducedCategory.full : Full (inducedFunctor F) where preimage x y f := f
#align category_theory.induced_category.full CategoryTheory.InducedCategory.full
-/
#print CategoryTheory.InducedCategory.faithful /-
instance InducedCategory.faithful : Faithful (inducedFunctor F) where
#align category_theory.induced_category.faithful CategoryTheory.InducedCategory.faithful
-/
end Induced
section FullSubcategory
-- A full subcategory is the special case of an induced category with F = subtype.val.
variable {C : Type u₁} [Category.{v} C]
variable (Z : C → Prop)
/--
A subtype-like structure for full subcategories. Morphisms just ignore the property. We don't use
actual subtypes since the simp-normal form `↑X` of `X.val` does not work well for full
subcategories.
See <https://stacks.math.columbia.edu/tag/001D>. We do not define 'strictly full' subcategories.
-/
@[ext, nolint has_nonempty_instance]
structure FullSubcategory where
obj : C
property : Z obj
#align category_theory.full_subcategory CategoryTheory.FullSubcategoryₓ
/- warning: category_theory.full_subcategory.category -> CategoryTheory.FullSubcategory.category is a dubious translation:
lean 3 declaration is
forall {C : Type.{u2}} [_inst_1 : CategoryTheory.Category.{u1, u2} C] (Z : C -> Prop), CategoryTheory.Category.{u1, u2} (CategoryTheory.FullSubcategoryₓ.{u1, u2} C _inst_1 Z)
but is expected to have type
forall {C : Type.{u2}} [_inst_1 : CategoryTheory.Category.{u1, u2} C] (Z : C -> Prop), CategoryTheory.Category.{u1, u2} (CategoryTheory.FullSubcategory.{u2} C Z)
Case conversion may be inaccurate. Consider using '#align category_theory.full_subcategory.category CategoryTheory.FullSubcategory.categoryₓ'. -/
instance FullSubcategory.category : Category.{v} (FullSubcategory Z) :=
InducedCategory.category FullSubcategory.obj
#align category_theory.full_subcategory.category CategoryTheory.FullSubcategory.category
/- warning: category_theory.full_subcategory_inclusion -> CategoryTheory.fullSubcategoryInclusion is a dubious translation:
lean 3 declaration is
forall {C : Type.{u2}} [_inst_1 : CategoryTheory.Category.{u1, u2} C] (Z : C -> Prop), CategoryTheory.Functor.{u1, u1, u2, u2} (CategoryTheory.FullSubcategoryₓ.{u1, u2} C _inst_1 Z) (CategoryTheory.InducedCategory.category.{u1, u2, u2} (CategoryTheory.FullSubcategoryₓ.{u1, u2} C _inst_1 Z) C _inst_1 (CategoryTheory.FullSubcategoryₓ.obj.{u1, u2} C _inst_1 Z)) C _inst_1
but is expected to have type
forall {C : Type.{u2}} [_inst_1 : CategoryTheory.Category.{u1, u2} C] (Z : C -> Prop), CategoryTheory.Functor.{u1, u1, u2, u2} (CategoryTheory.FullSubcategory.{u2} C Z) (CategoryTheory.FullSubcategory.category.{u1, u2} C _inst_1 Z) C _inst_1
Case conversion may be inaccurate. Consider using '#align category_theory.full_subcategory_inclusion CategoryTheory.fullSubcategoryInclusionₓ'. -/
/-- The forgetful functor from a full subcategory into the original category
("forgetting" the condition).
-/
def fullSubcategoryInclusion : FullSubcategory Z ⥤ C :=
inducedFunctor FullSubcategory.obj
#align category_theory.full_subcategory_inclusion CategoryTheory.fullSubcategoryInclusion
/- warning: category_theory.full_subcategory_inclusion.obj -> CategoryTheory.fullSubcategoryInclusion.obj is a dubious translation:
lean 3 declaration is
forall {C : Type.{u2}} [_inst_1 : CategoryTheory.Category.{u1, u2} C] (Z : C -> Prop) {X : CategoryTheory.FullSubcategoryₓ.{u1, u2} C _inst_1 Z}, Eq.{succ u2} C (CategoryTheory.Functor.obj.{u1, u1, u2, u2} (CategoryTheory.FullSubcategoryₓ.{u1, u2} C _inst_1 Z) (CategoryTheory.InducedCategory.category.{u1, u2, u2} (CategoryTheory.FullSubcategoryₓ.{u1, u2} C _inst_1 Z) C _inst_1 (CategoryTheory.FullSubcategoryₓ.obj.{u1, u2} C _inst_1 Z)) C _inst_1 (CategoryTheory.fullSubcategoryInclusion.{u1, u2} C _inst_1 Z) X) (CategoryTheory.FullSubcategoryₓ.obj.{u1, u2} C _inst_1 Z X)
but is expected to have type
forall {C : Type.{u2}} [_inst_1 : CategoryTheory.Category.{u1, u2} C] (Z : C -> Prop) {X : CategoryTheory.FullSubcategory.{u2} C Z}, Eq.{succ u2} C (Prefunctor.obj.{succ u1, succ u1, u2, u2} (CategoryTheory.FullSubcategory.{u2} C Z) (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} (CategoryTheory.FullSubcategory.{u2} C Z) (CategoryTheory.Category.toCategoryStruct.{u1, u2} (CategoryTheory.FullSubcategory.{u2} C Z) (CategoryTheory.FullSubcategory.category.{u1, u2} C _inst_1 Z))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{u1, u1, u2, u2} (CategoryTheory.FullSubcategory.{u2} C Z) (CategoryTheory.FullSubcategory.category.{u1, u2} C _inst_1 Z) C _inst_1 (CategoryTheory.fullSubcategoryInclusion.{u1, u2} C _inst_1 Z)) X) (CategoryTheory.FullSubcategory.obj.{u2} C Z X)
Case conversion may be inaccurate. Consider using '#align category_theory.full_subcategory_inclusion.obj CategoryTheory.fullSubcategoryInclusion.objₓ'. -/
@[simp]
theorem fullSubcategoryInclusion.obj {X} : (fullSubcategoryInclusion Z).obj X = X.obj :=
rfl
#align category_theory.full_subcategory_inclusion.obj CategoryTheory.fullSubcategoryInclusion.obj
/- warning: category_theory.full_subcategory_inclusion.map -> CategoryTheory.fullSubcategoryInclusion.map is a dubious translation:
lean 3 declaration is
forall {C : Type.{u2}} [_inst_1 : CategoryTheory.Category.{u1, u2} C] (Z : C -> Prop) {X : CategoryTheory.FullSubcategoryₓ.{u1, u2} C _inst_1 Z} {Y : CategoryTheory.FullSubcategoryₓ.{u1, u2} C _inst_1 Z} {f : Quiver.Hom.{succ u1, u2} (CategoryTheory.FullSubcategoryₓ.{u1, u2} C _inst_1 Z) (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} (CategoryTheory.FullSubcategoryₓ.{u1, u2} C _inst_1 Z) (CategoryTheory.Category.toCategoryStruct.{u1, u2} (CategoryTheory.FullSubcategoryₓ.{u1, u2} C _inst_1 Z) (CategoryTheory.InducedCategory.category.{u1, u2, u2} (CategoryTheory.FullSubcategoryₓ.{u1, u2} C _inst_1 Z) C _inst_1 (CategoryTheory.FullSubcategoryₓ.obj.{u1, u2} C _inst_1 Z)))) X Y}, Eq.{succ u1} (Quiver.Hom.{succ u1, u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.obj.{u1, u1, u2, u2} (CategoryTheory.FullSubcategoryₓ.{u1, u2} C _inst_1 Z) (CategoryTheory.InducedCategory.category.{u1, u2, u2} (CategoryTheory.FullSubcategoryₓ.{u1, u2} C _inst_1 Z) C _inst_1 (CategoryTheory.FullSubcategoryₓ.obj.{u1, u2} C _inst_1 Z)) C _inst_1 (CategoryTheory.fullSubcategoryInclusion.{u1, u2} C _inst_1 Z) X) (CategoryTheory.Functor.obj.{u1, u1, u2, u2} (CategoryTheory.FullSubcategoryₓ.{u1, u2} C _inst_1 Z) (CategoryTheory.InducedCategory.category.{u1, u2, u2} (CategoryTheory.FullSubcategoryₓ.{u1, u2} C _inst_1 Z) C _inst_1 (CategoryTheory.FullSubcategoryₓ.obj.{u1, u2} C _inst_1 Z)) C _inst_1 (CategoryTheory.fullSubcategoryInclusion.{u1, u2} C _inst_1 Z) Y)) (CategoryTheory.Functor.map.{u1, u1, u2, u2} (CategoryTheory.FullSubcategoryₓ.{u1, u2} C _inst_1 Z) (CategoryTheory.InducedCategory.category.{u1, u2, u2} (CategoryTheory.FullSubcategoryₓ.{u1, u2} C _inst_1 Z) C _inst_1 (CategoryTheory.FullSubcategoryₓ.obj.{u1, u2} C _inst_1 Z)) C _inst_1 (CategoryTheory.fullSubcategoryInclusion.{u1, u2} C _inst_1 Z) X Y f) f
but is expected to have type
forall {C : Type.{u2}} [_inst_1 : CategoryTheory.Category.{u1, u2} C] (Z : C -> Prop) {X : CategoryTheory.FullSubcategory.{u2} C Z} {Y : CategoryTheory.FullSubcategory.{u2} C Z} {f : Quiver.Hom.{succ u1, u2} (CategoryTheory.FullSubcategory.{u2} C Z) (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} (CategoryTheory.FullSubcategory.{u2} C Z) (CategoryTheory.Category.toCategoryStruct.{u1, u2} (CategoryTheory.FullSubcategory.{u2} C Z) (CategoryTheory.FullSubcategory.category.{u1, u2} C _inst_1 Z))) X Y}, Eq.{succ u1} (Quiver.Hom.{succ u1, u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (Prefunctor.obj.{succ u1, succ u1, u2, u2} (CategoryTheory.FullSubcategory.{u2} C Z) (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} (CategoryTheory.FullSubcategory.{u2} C Z) (CategoryTheory.Category.toCategoryStruct.{u1, u2} (CategoryTheory.FullSubcategory.{u2} C Z) (CategoryTheory.FullSubcategory.category.{u1, u2} C _inst_1 Z))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{u1, u1, u2, u2} (CategoryTheory.FullSubcategory.{u2} C Z) (CategoryTheory.FullSubcategory.category.{u1, u2} C _inst_1 Z) C _inst_1 (CategoryTheory.fullSubcategoryInclusion.{u1, u2} C _inst_1 Z)) X) (Prefunctor.obj.{succ u1, succ u1, u2, u2} (CategoryTheory.FullSubcategory.{u2} C Z) (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} (CategoryTheory.FullSubcategory.{u2} C Z) (CategoryTheory.Category.toCategoryStruct.{u1, u2} (CategoryTheory.FullSubcategory.{u2} C Z) (CategoryTheory.FullSubcategory.category.{u1, u2} C _inst_1 Z))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{u1, u1, u2, u2} (CategoryTheory.FullSubcategory.{u2} C Z) (CategoryTheory.FullSubcategory.category.{u1, u2} C _inst_1 Z) C _inst_1 (CategoryTheory.fullSubcategoryInclusion.{u1, u2} C _inst_1 Z)) Y)) (Prefunctor.map.{succ u1, succ u1, u2, u2} (CategoryTheory.FullSubcategory.{u2} C Z) (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} (CategoryTheory.FullSubcategory.{u2} C Z) (CategoryTheory.Category.toCategoryStruct.{u1, u2} (CategoryTheory.FullSubcategory.{u2} C Z) (CategoryTheory.FullSubcategory.category.{u1, u2} C _inst_1 Z))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{u1, u1, u2, u2} (CategoryTheory.FullSubcategory.{u2} C Z) (CategoryTheory.FullSubcategory.category.{u1, u2} C _inst_1 Z) C _inst_1 (CategoryTheory.fullSubcategoryInclusion.{u1, u2} C _inst_1 Z)) X Y f) f
Case conversion may be inaccurate. Consider using '#align category_theory.full_subcategory_inclusion.map CategoryTheory.fullSubcategoryInclusion.mapₓ'. -/
@[simp]
theorem fullSubcategoryInclusion.map {X Y} {f : X ⟶ Y} : (fullSubcategoryInclusion Z).map f = f :=
rfl
#align category_theory.full_subcategory_inclusion.map CategoryTheory.fullSubcategoryInclusion.map
/- warning: category_theory.full_subcategory.full -> CategoryTheory.FullSubcategory.full is a dubious translation:
lean 3 declaration is
forall {C : Type.{u2}} [_inst_1 : CategoryTheory.Category.{u1, u2} C] (Z : C -> Prop), CategoryTheory.Full.{u1, u1, u2, u2} (CategoryTheory.FullSubcategoryₓ.{u1, u2} C _inst_1 Z) (CategoryTheory.InducedCategory.category.{u1, u2, u2} (CategoryTheory.FullSubcategoryₓ.{u1, u2} C _inst_1 Z) C _inst_1 (CategoryTheory.FullSubcategoryₓ.obj.{u1, u2} C _inst_1 Z)) C _inst_1 (CategoryTheory.fullSubcategoryInclusion.{u1, u2} C _inst_1 Z)
but is expected to have type
forall {C : Type.{u2}} [_inst_1 : CategoryTheory.Category.{u1, u2} C] (Z : C -> Prop), CategoryTheory.Full.{u1, u1, u2, u2} (CategoryTheory.FullSubcategory.{u2} C Z) (CategoryTheory.FullSubcategory.category.{u1, u2} C _inst_1 Z) C _inst_1 (CategoryTheory.fullSubcategoryInclusion.{u1, u2} C _inst_1 Z)
Case conversion may be inaccurate. Consider using '#align category_theory.full_subcategory.full CategoryTheory.FullSubcategory.fullₓ'. -/
instance FullSubcategory.full : Full (fullSubcategoryInclusion Z) :=
InducedCategory.full _
#align category_theory.full_subcategory.full CategoryTheory.FullSubcategory.full
/- warning: category_theory.full_subcategory.faithful -> CategoryTheory.FullSubcategory.faithful is a dubious translation:
lean 3 declaration is
forall {C : Type.{u2}} [_inst_1 : CategoryTheory.Category.{u1, u2} C] (Z : C -> Prop), CategoryTheory.Faithful.{u1, u1, u2, u2} (CategoryTheory.FullSubcategoryₓ.{u1, u2} C _inst_1 Z) (CategoryTheory.InducedCategory.category.{u1, u2, u2} (CategoryTheory.FullSubcategoryₓ.{u1, u2} C _inst_1 Z) C _inst_1 (CategoryTheory.FullSubcategoryₓ.obj.{u1, u2} C _inst_1 Z)) C _inst_1 (CategoryTheory.fullSubcategoryInclusion.{u1, u2} C _inst_1 Z)
but is expected to have type
forall {C : Type.{u2}} [_inst_1 : CategoryTheory.Category.{u1, u2} C] (Z : C -> Prop), CategoryTheory.Faithful.{u1, u1, u2, u2} (CategoryTheory.FullSubcategory.{u2} C Z) (CategoryTheory.FullSubcategory.category.{u1, u2} C _inst_1 Z) C _inst_1 (CategoryTheory.fullSubcategoryInclusion.{u1, u2} C _inst_1 Z)
Case conversion may be inaccurate. Consider using '#align category_theory.full_subcategory.faithful CategoryTheory.FullSubcategory.faithfulₓ'. -/
instance FullSubcategory.faithful : Faithful (fullSubcategoryInclusion Z) :=
InducedCategory.faithful _
#align category_theory.full_subcategory.faithful CategoryTheory.FullSubcategory.faithful
variable {Z} {Z' : C → Prop}
/- warning: category_theory.full_subcategory.map -> CategoryTheory.FullSubcategory.map is a dubious translation:
lean 3 declaration is
forall {C : Type.{u2}} [_inst_1 : CategoryTheory.Category.{u1, u2} C] {Z : C -> Prop} {Z' : C -> Prop}, (forall {{X : C}}, (Z X) -> (Z' X)) -> (CategoryTheory.Functor.{u1, u1, u2, u2} (CategoryTheory.FullSubcategoryₓ.{u1, u2} C _inst_1 Z) (CategoryTheory.FullSubcategory.category.{u1, u2} C _inst_1 Z) (CategoryTheory.FullSubcategoryₓ.{u1, u2} C _inst_1 Z') (CategoryTheory.FullSubcategory.category.{u1, u2} C _inst_1 Z'))
but is expected to have type
forall {C : Type.{u2}} [_inst_1 : CategoryTheory.Category.{u1, u2} C] {Z : C -> Prop} {Z' : C -> Prop}, (forall {{X : C}}, (Z X) -> (Z' X)) -> (CategoryTheory.Functor.{u1, u1, u2, u2} (CategoryTheory.FullSubcategory.{u2} C Z) (CategoryTheory.FullSubcategory.category.{u1, u2} C _inst_1 Z) (CategoryTheory.FullSubcategory.{u2} C Z') (CategoryTheory.FullSubcategory.category.{u1, u2} C _inst_1 Z'))
Case conversion may be inaccurate. Consider using '#align category_theory.full_subcategory.map CategoryTheory.FullSubcategory.mapₓ'. -/
/-- An implication of predicates `Z → Z'` induces a functor between full subcategories. -/
@[simps]
def FullSubcategory.map (h : ∀ ⦃X⦄, Z X → Z' X) : FullSubcategory Z ⥤ FullSubcategory Z'
where
obj X := ⟨X.1, h X.2⟩
map X Y f := f
#align category_theory.full_subcategory.map CategoryTheory.FullSubcategory.map
instance (h : ∀ ⦃X⦄, Z X → Z' X) : Full (FullSubcategory.map h) where preimage X Y f := f
instance (h : ∀ ⦃X⦄, Z X → Z' X) : Faithful (FullSubcategory.map h) where
/- warning: category_theory.full_subcategory.map_inclusion -> CategoryTheory.FullSubcategory.map_inclusion is a dubious translation:
lean 3 declaration is
forall {C : Type.{u2}} [_inst_1 : CategoryTheory.Category.{u1, u2} C] {Z : C -> Prop} {Z' : C -> Prop} (h : forall {{X : C}}, (Z X) -> (Z' X)), Eq.{succ (max u1 u2)} (CategoryTheory.Functor.{u1, u1, u2, u2} (CategoryTheory.FullSubcategoryₓ.{u1, u2} C _inst_1 (fun (X : C) => Z X)) (CategoryTheory.FullSubcategory.category.{u1, u2} C _inst_1 (fun (X : C) => Z X)) C _inst_1) (CategoryTheory.Functor.comp.{u1, u1, u1, u2, u2, u2} (CategoryTheory.FullSubcategoryₓ.{u1, u2} C _inst_1 (fun (X : C) => Z X)) (CategoryTheory.FullSubcategory.category.{u1, u2} C _inst_1 (fun (X : C) => Z X)) (CategoryTheory.FullSubcategoryₓ.{u1, u2} C _inst_1 (fun (X : C) => Z' X)) (CategoryTheory.FullSubcategory.category.{u1, u2} C _inst_1 (fun (X : C) => Z' X)) C _inst_1 (CategoryTheory.FullSubcategory.map.{u1, u2} C _inst_1 (fun (X : C) => Z X) (fun (X : C) => Z' X) h) (CategoryTheory.fullSubcategoryInclusion.{u1, u2} C _inst_1 Z')) (CategoryTheory.fullSubcategoryInclusion.{u1, u2} C _inst_1 Z)
but is expected to have type
forall {C : Type.{u2}} [_inst_1 : CategoryTheory.Category.{u1, u2} C] {Z : C -> Prop} {Z' : C -> Prop} (h : forall {{X : C}}, (Z X) -> (Z' X)), Eq.{max (succ u2) (succ u1)} (CategoryTheory.Functor.{u1, u1, u2, u2} (CategoryTheory.FullSubcategory.{u2} C (fun (X : C) => Z X)) (CategoryTheory.FullSubcategory.category.{u1, u2} C _inst_1 (fun (X : C) => Z X)) C _inst_1) (CategoryTheory.Functor.comp.{u1, u1, u1, u2, u2, u2} (CategoryTheory.FullSubcategory.{u2} C (fun (X : C) => Z X)) (CategoryTheory.FullSubcategory.category.{u1, u2} C _inst_1 (fun (X : C) => Z X)) (CategoryTheory.FullSubcategory.{u2} C (fun (X : C) => Z' X)) (CategoryTheory.FullSubcategory.category.{u1, u2} C _inst_1 (fun (X : C) => Z' X)) C _inst_1 (CategoryTheory.FullSubcategory.map.{u1, u2} C _inst_1 (fun (X : C) => Z X) (fun (X : C) => Z' X) h) (CategoryTheory.fullSubcategoryInclusion.{u1, u2} C _inst_1 Z')) (CategoryTheory.fullSubcategoryInclusion.{u1, u2} C _inst_1 Z)
Case conversion may be inaccurate. Consider using '#align category_theory.full_subcategory.map_inclusion CategoryTheory.FullSubcategory.map_inclusionₓ'. -/
@[simp]
theorem FullSubcategory.map_inclusion (h : ∀ ⦃X⦄, Z X → Z' X) :
FullSubcategory.map h ⋙ fullSubcategoryInclusion Z' = fullSubcategoryInclusion Z :=
rfl
#align category_theory.full_subcategory.map_inclusion CategoryTheory.FullSubcategory.map_inclusion
section lift
variable {D : Type u₂} [Category.{v₂} D] (P Q : D → Prop)
/- warning: category_theory.full_subcategory.lift -> CategoryTheory.FullSubcategory.lift is a dubious translation:
lean 3 declaration is
forall {C : Type.{u3}} [_inst_1 : CategoryTheory.Category.{u1, u3} C] {D : Type.{u4}} [_inst_2 : CategoryTheory.Category.{u2, u4} D] (P : D -> Prop) (F : CategoryTheory.Functor.{u1, u2, u3, u4} C _inst_1 D _inst_2), (forall (X : C), P (CategoryTheory.Functor.obj.{u1, u2, u3, u4} C _inst_1 D _inst_2 F X)) -> (CategoryTheory.Functor.{u1, u2, u3, u4} C _inst_1 (CategoryTheory.FullSubcategoryₓ.{u2, u4} D _inst_2 P) (CategoryTheory.FullSubcategory.category.{u2, u4} D _inst_2 P))
but is expected to have type
forall {C : Type.{u3}} [_inst_1 : CategoryTheory.Category.{u1, u3} C] {D : Type.{u4}} [_inst_2 : CategoryTheory.Category.{u2, u4} D] (P : D -> Prop) (F : CategoryTheory.Functor.{u1, u2, u3, u4} C _inst_1 D _inst_2), (forall (X : C), P (Prefunctor.obj.{succ u1, succ u2, u3, u4} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} C (CategoryTheory.Category.toCategoryStruct.{u1, u3} C _inst_1)) D (CategoryTheory.CategoryStruct.toQuiver.{u2, u4} D (CategoryTheory.Category.toCategoryStruct.{u2, u4} D _inst_2)) (CategoryTheory.Functor.toPrefunctor.{u1, u2, u3, u4} C _inst_1 D _inst_2 F) X)) -> (CategoryTheory.Functor.{u1, u2, u3, u4} C _inst_1 (CategoryTheory.FullSubcategory.{u4} D P) (CategoryTheory.FullSubcategory.category.{u2, u4} D _inst_2 P))
Case conversion may be inaccurate. Consider using '#align category_theory.full_subcategory.lift CategoryTheory.FullSubcategory.liftₓ'. -/
/-- A functor which maps objects to objects satisfying a certain property induces a lift through
the full subcategory of objects satisfying that property. -/
@[simps]
def FullSubcategory.lift (F : C ⥤ D) (hF : ∀ X, P (F.obj X)) : C ⥤ FullSubcategory P
where
obj X := ⟨F.obj X, hF X⟩
map X Y f := F.map f
#align category_theory.full_subcategory.lift CategoryTheory.FullSubcategory.lift
/- warning: category_theory.full_subcategory.lift_comp_inclusion -> CategoryTheory.FullSubcategory.lift_comp_inclusion is a dubious translation:
lean 3 declaration is
forall {C : Type.{u3}} [_inst_1 : CategoryTheory.Category.{u1, u3} C] {D : Type.{u4}} [_inst_2 : CategoryTheory.Category.{u2, u4} D] (P : D -> Prop) (F : CategoryTheory.Functor.{u1, u2, u3, u4} C _inst_1 D _inst_2) (hF : forall (X : C), P (CategoryTheory.Functor.obj.{u1, u2, u3, u4} C _inst_1 D _inst_2 F X)), CategoryTheory.Iso.{max u3 u2, max u1 u2 u3 u4} (CategoryTheory.Functor.{u1, u2, u3, u4} C _inst_1 D _inst_2) (CategoryTheory.Functor.category.{u1, u2, u3, u4} C _inst_1 D _inst_2) (CategoryTheory.Functor.comp.{u1, u2, u2, u3, u4, u4} C _inst_1 (CategoryTheory.FullSubcategoryₓ.{u2, u4} D _inst_2 P) (CategoryTheory.FullSubcategory.category.{u2, u4} D _inst_2 P) D _inst_2 (CategoryTheory.FullSubcategory.lift.{u1, u2, u3, u4} C _inst_1 D _inst_2 P F hF) (CategoryTheory.fullSubcategoryInclusion.{u2, u4} D _inst_2 P)) F
but is expected to have type
forall {C : Type.{u3}} [_inst_1 : CategoryTheory.Category.{u1, u3} C] {D : Type.{u4}} [_inst_2 : CategoryTheory.Category.{u2, u4} D] (P : D -> Prop) (F : CategoryTheory.Functor.{u1, u2, u3, u4} C _inst_1 D _inst_2) (hF : forall (X : C), P (Prefunctor.obj.{succ u1, succ u2, u3, u4} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} C (CategoryTheory.Category.toCategoryStruct.{u1, u3} C _inst_1)) D (CategoryTheory.CategoryStruct.toQuiver.{u2, u4} D (CategoryTheory.Category.toCategoryStruct.{u2, u4} D _inst_2)) (CategoryTheory.Functor.toPrefunctor.{u1, u2, u3, u4} C _inst_1 D _inst_2 F) X)), CategoryTheory.Iso.{max u3 u2, max (max (max u4 u3) u2) u1} (CategoryTheory.Functor.{u1, u2, u3, u4} C _inst_1 D _inst_2) (CategoryTheory.Functor.category.{u1, u2, u3, u4} C _inst_1 D _inst_2) (CategoryTheory.Functor.comp.{u1, u2, u2, u3, u4, u4} C _inst_1 (CategoryTheory.FullSubcategory.{u4} D P) (CategoryTheory.FullSubcategory.category.{u2, u4} D _inst_2 P) D _inst_2 (CategoryTheory.FullSubcategory.lift.{u1, u2, u3, u4} C _inst_1 D _inst_2 P F hF) (CategoryTheory.fullSubcategoryInclusion.{u2, u4} D _inst_2 P)) F
Case conversion may be inaccurate. Consider using '#align category_theory.full_subcategory.lift_comp_inclusion CategoryTheory.FullSubcategory.lift_comp_inclusionₓ'. -/
/-- Composing the lift of a functor through a full subcategory with the inclusion yields the
original functor. Unfortunately, this is not true by definition, so we only get a natural
isomorphism, but it is pointwise definitionally true, see
`full_subcategory.inclusion_obj_lift_obj` and `full_subcategory.inclusion_map_lift_map`. -/
def FullSubcategory.lift_comp_inclusion (F : C ⥤ D) (hF : ∀ X, P (F.obj X)) :
FullSubcategory.lift P F hF ⋙ fullSubcategoryInclusion P ≅ F :=
NatIso.ofComponents (fun X => Iso.refl _) (by simp)
#align category_theory.full_subcategory.lift_comp_inclusion CategoryTheory.FullSubcategory.lift_comp_inclusion
/- warning: category_theory.full_subcategory.inclusion_obj_lift_obj -> CategoryTheory.fullSubcategoryInclusion_obj_lift_obj is a dubious translation:
lean 3 declaration is
forall {C : Type.{u3}} [_inst_1 : CategoryTheory.Category.{u1, u3} C] {D : Type.{u4}} [_inst_2 : CategoryTheory.Category.{u2, u4} D] (P : D -> Prop) (F : CategoryTheory.Functor.{u1, u2, u3, u4} C _inst_1 D _inst_2) (hF : forall (X : C), P (CategoryTheory.Functor.obj.{u1, u2, u3, u4} C _inst_1 D _inst_2 F X)) {X : C}, Eq.{succ u4} D (CategoryTheory.Functor.obj.{u2, u2, u4, u4} (CategoryTheory.FullSubcategoryₓ.{u2, u4} D _inst_2 P) (CategoryTheory.InducedCategory.category.{u2, u4, u4} (CategoryTheory.FullSubcategoryₓ.{u2, u4} D _inst_2 P) D _inst_2 (CategoryTheory.FullSubcategoryₓ.obj.{u2, u4} D _inst_2 P)) D _inst_2 (CategoryTheory.fullSubcategoryInclusion.{u2, u4} D _inst_2 P) (CategoryTheory.Functor.obj.{u1, u2, u3, u4} C _inst_1 (CategoryTheory.FullSubcategoryₓ.{u2, u4} D _inst_2 P) (CategoryTheory.FullSubcategory.category.{u2, u4} D _inst_2 P) (CategoryTheory.FullSubcategory.lift.{u1, u2, u3, u4} C _inst_1 D _inst_2 P F hF) X)) (CategoryTheory.Functor.obj.{u1, u2, u3, u4} C _inst_1 D _inst_2 F X)
but is expected to have type
forall {C : Type.{u3}} [_inst_1 : CategoryTheory.Category.{u1, u3} C] {D : Type.{u4}} [_inst_2 : CategoryTheory.Category.{u2, u4} D] (P : D -> Prop) (F : CategoryTheory.Functor.{u1, u2, u3, u4} C _inst_1 D _inst_2) (hF : forall (X : C), P (Prefunctor.obj.{succ u1, succ u2, u3, u4} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} C (CategoryTheory.Category.toCategoryStruct.{u1, u3} C _inst_1)) D (CategoryTheory.CategoryStruct.toQuiver.{u2, u4} D (CategoryTheory.Category.toCategoryStruct.{u2, u4} D _inst_2)) (CategoryTheory.Functor.toPrefunctor.{u1, u2, u3, u4} C _inst_1 D _inst_2 F) X)) {X : C}, Eq.{succ u4} D (Prefunctor.obj.{succ u2, succ u2, u4, u4} (CategoryTheory.FullSubcategory.{u4} D P) (CategoryTheory.CategoryStruct.toQuiver.{u2, u4} (CategoryTheory.FullSubcategory.{u4} D P) (CategoryTheory.Category.toCategoryStruct.{u2, u4} (CategoryTheory.FullSubcategory.{u4} D P) (CategoryTheory.FullSubcategory.category.{u2, u4} D _inst_2 P))) D (CategoryTheory.CategoryStruct.toQuiver.{u2, u4} D (CategoryTheory.Category.toCategoryStruct.{u2, u4} D _inst_2)) (CategoryTheory.Functor.toPrefunctor.{u2, u2, u4, u4} (CategoryTheory.FullSubcategory.{u4} D P) (CategoryTheory.FullSubcategory.category.{u2, u4} D _inst_2 P) D _inst_2 (CategoryTheory.fullSubcategoryInclusion.{u2, u4} D _inst_2 P)) (Prefunctor.obj.{succ u1, succ u2, u3, u4} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} C (CategoryTheory.Category.toCategoryStruct.{u1, u3} C _inst_1)) (CategoryTheory.FullSubcategory.{u4} D P) (CategoryTheory.CategoryStruct.toQuiver.{u2, u4} (CategoryTheory.FullSubcategory.{u4} D P) (CategoryTheory.Category.toCategoryStruct.{u2, u4} (CategoryTheory.FullSubcategory.{u4} D P) (CategoryTheory.FullSubcategory.category.{u2, u4} D _inst_2 P))) (CategoryTheory.Functor.toPrefunctor.{u1, u2, u3, u4} C _inst_1 (CategoryTheory.FullSubcategory.{u4} D P) (CategoryTheory.FullSubcategory.category.{u2, u4} D _inst_2 P) (CategoryTheory.FullSubcategory.lift.{u1, u2, u3, u4} C _inst_1 D _inst_2 P F hF)) X)) (Prefunctor.obj.{succ u1, succ u2, u3, u4} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} C (CategoryTheory.Category.toCategoryStruct.{u1, u3} C _inst_1)) D (CategoryTheory.CategoryStruct.toQuiver.{u2, u4} D (CategoryTheory.Category.toCategoryStruct.{u2, u4} D _inst_2)) (CategoryTheory.Functor.toPrefunctor.{u1, u2, u3, u4} C _inst_1 D _inst_2 F) X)
Case conversion may be inaccurate. Consider using '#align category_theory.full_subcategory.inclusion_obj_lift_obj CategoryTheory.fullSubcategoryInclusion_obj_lift_objₓ'. -/
@[simp]
theorem CategoryTheory.fullSubcategoryInclusion_obj_lift_obj (F : C ⥤ D) (hF : ∀ X, P (F.obj X))
{X : C} : (fullSubcategoryInclusion P).obj ((FullSubcategory.lift P F hF).obj X) = F.obj X :=
rfl
#align category_theory.full_subcategory.inclusion_obj_lift_obj CategoryTheory.fullSubcategoryInclusion_obj_lift_obj
/- warning: category_theory.full_subcategory.inclusion_map_lift_map -> CategoryTheory.fullSubcategoryInclusion_map_lift_map is a dubious translation:
lean 3 declaration is
forall {C : Type.{u3}} [_inst_1 : CategoryTheory.Category.{u1, u3} C] {D : Type.{u4}} [_inst_2 : CategoryTheory.Category.{u2, u4} D] (P : D -> Prop) (F : CategoryTheory.Functor.{u1, u2, u3, u4} C _inst_1 D _inst_2) (hF : forall (X : C), P (CategoryTheory.Functor.obj.{u1, u2, u3, u4} C _inst_1 D _inst_2 F X)) {X : C} {Y : C} (f : Quiver.Hom.{succ u1, u3} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} C (CategoryTheory.Category.toCategoryStruct.{u1, u3} C _inst_1)) X Y), Eq.{succ u2} (Quiver.Hom.{succ u2, u4} D (CategoryTheory.CategoryStruct.toQuiver.{u2, u4} D (CategoryTheory.Category.toCategoryStruct.{u2, u4} D _inst_2)) (CategoryTheory.Functor.obj.{u2, u2, u4, u4} (CategoryTheory.FullSubcategoryₓ.{u2, u4} D _inst_2 P) (CategoryTheory.InducedCategory.category.{u2, u4, u4} (CategoryTheory.FullSubcategoryₓ.{u2, u4} D _inst_2 P) D _inst_2 (CategoryTheory.FullSubcategoryₓ.obj.{u2, u4} D _inst_2 P)) D _inst_2 (CategoryTheory.fullSubcategoryInclusion.{u2, u4} D _inst_2 P) (CategoryTheory.Functor.obj.{u1, u2, u3, u4} C _inst_1 (CategoryTheory.FullSubcategoryₓ.{u2, u4} D _inst_2 P) (CategoryTheory.FullSubcategory.category.{u2, u4} D _inst_2 P) (CategoryTheory.FullSubcategory.lift.{u1, u2, u3, u4} C _inst_1 D _inst_2 P F hF) X)) (CategoryTheory.Functor.obj.{u2, u2, u4, u4} (CategoryTheory.FullSubcategoryₓ.{u2, u4} D _inst_2 P) (CategoryTheory.InducedCategory.category.{u2, u4, u4} (CategoryTheory.FullSubcategoryₓ.{u2, u4} D _inst_2 P) D _inst_2 (CategoryTheory.FullSubcategoryₓ.obj.{u2, u4} D _inst_2 P)) D _inst_2 (CategoryTheory.fullSubcategoryInclusion.{u2, u4} D _inst_2 P) (CategoryTheory.Functor.obj.{u1, u2, u3, u4} C _inst_1 (CategoryTheory.FullSubcategoryₓ.{u2, u4} D _inst_2 P) (CategoryTheory.FullSubcategory.category.{u2, u4} D _inst_2 P) (CategoryTheory.FullSubcategory.lift.{u1, u2, u3, u4} C _inst_1 D _inst_2 P F hF) Y))) (CategoryTheory.Functor.map.{u2, u2, u4, u4} (CategoryTheory.FullSubcategoryₓ.{u2, u4} D _inst_2 P) (CategoryTheory.InducedCategory.category.{u2, u4, u4} (CategoryTheory.FullSubcategoryₓ.{u2, u4} D _inst_2 P) D _inst_2 (CategoryTheory.FullSubcategoryₓ.obj.{u2, u4} D _inst_2 P)) D _inst_2 (CategoryTheory.fullSubcategoryInclusion.{u2, u4} D _inst_2 P) (CategoryTheory.Functor.obj.{u1, u2, u3, u4} C _inst_1 (CategoryTheory.FullSubcategoryₓ.{u2, u4} D _inst_2 P) (CategoryTheory.FullSubcategory.category.{u2, u4} D _inst_2 P) (CategoryTheory.FullSubcategory.lift.{u1, u2, u3, u4} C _inst_1 D _inst_2 P F hF) X) (CategoryTheory.Functor.obj.{u1, u2, u3, u4} C _inst_1 (CategoryTheory.FullSubcategoryₓ.{u2, u4} D _inst_2 P) (CategoryTheory.FullSubcategory.category.{u2, u4} D _inst_2 P) (CategoryTheory.FullSubcategory.lift.{u1, u2, u3, u4} C _inst_1 D _inst_2 P F hF) Y) (CategoryTheory.Functor.map.{u1, u2, u3, u4} C _inst_1 (CategoryTheory.FullSubcategoryₓ.{u2, u4} D _inst_2 P) (CategoryTheory.FullSubcategory.category.{u2, u4} D _inst_2 P) (CategoryTheory.FullSubcategory.lift.{u1, u2, u3, u4} C _inst_1 D _inst_2 P F hF) X Y f)) (CategoryTheory.Functor.map.{u1, u2, u3, u4} C _inst_1 D _inst_2 F X Y f)
but is expected to have type
forall {C : Type.{u3}} [_inst_1 : CategoryTheory.Category.{u1, u3} C] {D : Type.{u4}} [_inst_2 : CategoryTheory.Category.{u2, u4} D] (P : D -> Prop) (F : CategoryTheory.Functor.{u1, u2, u3, u4} C _inst_1 D _inst_2) (hF : forall (X : C), P (Prefunctor.obj.{succ u1, succ u2, u3, u4} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} C (CategoryTheory.Category.toCategoryStruct.{u1, u3} C _inst_1)) D (CategoryTheory.CategoryStruct.toQuiver.{u2, u4} D (CategoryTheory.Category.toCategoryStruct.{u2, u4} D _inst_2)) (CategoryTheory.Functor.toPrefunctor.{u1, u2, u3, u4} C _inst_1 D _inst_2 F) X)) {X : C} {Y : C} (f : Quiver.Hom.{succ u1, u3} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} C (CategoryTheory.Category.toCategoryStruct.{u1, u3} C _inst_1)) X Y), Eq.{succ u2} (Quiver.Hom.{succ u2, u4} D (CategoryTheory.CategoryStruct.toQuiver.{u2, u4} D (CategoryTheory.Category.toCategoryStruct.{u2, u4} D _inst_2)) (Prefunctor.obj.{succ u2, succ u2, u4, u4} (CategoryTheory.FullSubcategory.{u4} D P) (CategoryTheory.CategoryStruct.toQuiver.{u2, u4} (CategoryTheory.FullSubcategory.{u4} D P) (CategoryTheory.Category.toCategoryStruct.{u2, u4} (CategoryTheory.FullSubcategory.{u4} D P) (CategoryTheory.FullSubcategory.category.{u2, u4} D _inst_2 P))) D (CategoryTheory.CategoryStruct.toQuiver.{u2, u4} D (CategoryTheory.Category.toCategoryStruct.{u2, u4} D _inst_2)) (CategoryTheory.Functor.toPrefunctor.{u2, u2, u4, u4} (CategoryTheory.FullSubcategory.{u4} D P) (CategoryTheory.FullSubcategory.category.{u2, u4} D _inst_2 P) D _inst_2 (CategoryTheory.fullSubcategoryInclusion.{u2, u4} D _inst_2 P)) (Prefunctor.obj.{succ u1, succ u2, u3, u4} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} C (CategoryTheory.Category.toCategoryStruct.{u1, u3} C _inst_1)) (CategoryTheory.FullSubcategory.{u4} D P) (CategoryTheory.CategoryStruct.toQuiver.{u2, u4} (CategoryTheory.FullSubcategory.{u4} D P) (CategoryTheory.Category.toCategoryStruct.{u2, u4} (CategoryTheory.FullSubcategory.{u4} D P) (CategoryTheory.FullSubcategory.category.{u2, u4} D _inst_2 P))) (CategoryTheory.Functor.toPrefunctor.{u1, u2, u3, u4} C _inst_1 (CategoryTheory.FullSubcategory.{u4} D P) (CategoryTheory.FullSubcategory.category.{u2, u4} D _inst_2 P) (CategoryTheory.FullSubcategory.lift.{u1, u2, u3, u4} C _inst_1 D _inst_2 P F hF)) X)) (Prefunctor.obj.{succ u2, succ u2, u4, u4} (CategoryTheory.FullSubcategory.{u4} D P) (CategoryTheory.CategoryStruct.toQuiver.{u2, u4} (CategoryTheory.FullSubcategory.{u4} D P) (CategoryTheory.Category.toCategoryStruct.{u2, u4} (CategoryTheory.FullSubcategory.{u4} D P) (CategoryTheory.FullSubcategory.category.{u2, u4} D _inst_2 P))) D (CategoryTheory.CategoryStruct.toQuiver.{u2, u4} D (CategoryTheory.Category.toCategoryStruct.{u2, u4} D _inst_2)) (CategoryTheory.Functor.toPrefunctor.{u2, u2, u4, u4} (CategoryTheory.FullSubcategory.{u4} D P) (CategoryTheory.FullSubcategory.category.{u2, u4} D _inst_2 P) D _inst_2 (CategoryTheory.fullSubcategoryInclusion.{u2, u4} D _inst_2 P)) (Prefunctor.obj.{succ u1, succ u2, u3, u4} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} C (CategoryTheory.Category.toCategoryStruct.{u1, u3} C _inst_1)) (CategoryTheory.FullSubcategory.{u4} D P) (CategoryTheory.CategoryStruct.toQuiver.{u2, u4} (CategoryTheory.FullSubcategory.{u4} D P) (CategoryTheory.Category.toCategoryStruct.{u2, u4} (CategoryTheory.FullSubcategory.{u4} D P) (CategoryTheory.FullSubcategory.category.{u2, u4} D _inst_2 P))) (CategoryTheory.Functor.toPrefunctor.{u1, u2, u3, u4} C _inst_1 (CategoryTheory.FullSubcategory.{u4} D P) (CategoryTheory.FullSubcategory.category.{u2, u4} D _inst_2 P) (CategoryTheory.FullSubcategory.lift.{u1, u2, u3, u4} C _inst_1 D _inst_2 P F hF)) Y))) (Prefunctor.map.{succ u2, succ u2, u4, u4} (CategoryTheory.FullSubcategory.{u4} D P) (CategoryTheory.CategoryStruct.toQuiver.{u2, u4} (CategoryTheory.FullSubcategory.{u4} D P) (CategoryTheory.Category.toCategoryStruct.{u2, u4} (CategoryTheory.FullSubcategory.{u4} D P) (CategoryTheory.FullSubcategory.category.{u2, u4} D _inst_2 P))) D (CategoryTheory.CategoryStruct.toQuiver.{u2, u4} D (CategoryTheory.Category.toCategoryStruct.{u2, u4} D _inst_2)) (CategoryTheory.Functor.toPrefunctor.{u2, u2, u4, u4} (CategoryTheory.FullSubcategory.{u4} D P) (CategoryTheory.FullSubcategory.category.{u2, u4} D _inst_2 P) D _inst_2 (CategoryTheory.fullSubcategoryInclusion.{u2, u4} D _inst_2 P)) (Prefunctor.obj.{succ u1, succ u2, u3, u4} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} C (CategoryTheory.Category.toCategoryStruct.{u1, u3} C _inst_1)) (CategoryTheory.FullSubcategory.{u4} D P) (CategoryTheory.CategoryStruct.toQuiver.{u2, u4} (CategoryTheory.FullSubcategory.{u4} D P) (CategoryTheory.Category.toCategoryStruct.{u2, u4} (CategoryTheory.FullSubcategory.{u4} D P) (CategoryTheory.FullSubcategory.category.{u2, u4} D _inst_2 P))) (CategoryTheory.Functor.toPrefunctor.{u1, u2, u3, u4} C _inst_1 (CategoryTheory.FullSubcategory.{u4} D P) (CategoryTheory.FullSubcategory.category.{u2, u4} D _inst_2 P) (CategoryTheory.FullSubcategory.lift.{u1, u2, u3, u4} C _inst_1 D _inst_2 P F hF)) X) (Prefunctor.obj.{succ u1, succ u2, u3, u4} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} C (CategoryTheory.Category.toCategoryStruct.{u1, u3} C _inst_1)) (CategoryTheory.FullSubcategory.{u4} D P) (CategoryTheory.CategoryStruct.toQuiver.{u2, u4} (CategoryTheory.FullSubcategory.{u4} D P) (CategoryTheory.Category.toCategoryStruct.{u2, u4} (CategoryTheory.FullSubcategory.{u4} D P) (CategoryTheory.FullSubcategory.category.{u2, u4} D _inst_2 P))) (CategoryTheory.Functor.toPrefunctor.{u1, u2, u3, u4} C _inst_1 (CategoryTheory.FullSubcategory.{u4} D P) (CategoryTheory.FullSubcategory.category.{u2, u4} D _inst_2 P) (CategoryTheory.FullSubcategory.lift.{u1, u2, u3, u4} C _inst_1 D _inst_2 P F hF)) Y) (Prefunctor.map.{succ u1, succ u2, u3, u4} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} C (CategoryTheory.Category.toCategoryStruct.{u1, u3} C _inst_1)) (CategoryTheory.FullSubcategory.{u4} D P) (CategoryTheory.CategoryStruct.toQuiver.{u2, u4} (CategoryTheory.FullSubcategory.{u4} D P) (CategoryTheory.Category.toCategoryStruct.{u2, u4} (CategoryTheory.FullSubcategory.{u4} D P) (CategoryTheory.FullSubcategory.category.{u2, u4} D _inst_2 P))) (CategoryTheory.Functor.toPrefunctor.{u1, u2, u3, u4} C _inst_1 (CategoryTheory.FullSubcategory.{u4} D P) (CategoryTheory.FullSubcategory.category.{u2, u4} D _inst_2 P) (CategoryTheory.FullSubcategory.lift.{u1, u2, u3, u4} C _inst_1 D _inst_2 P F hF)) X Y f)) (Prefunctor.map.{succ u1, succ u2, u3, u4} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} C (CategoryTheory.Category.toCategoryStruct.{u1, u3} C _inst_1)) D (CategoryTheory.CategoryStruct.toQuiver.{u2, u4} D (CategoryTheory.Category.toCategoryStruct.{u2, u4} D _inst_2)) (CategoryTheory.Functor.toPrefunctor.{u1, u2, u3, u4} C _inst_1 D _inst_2 F) X Y f)
Case conversion may be inaccurate. Consider using '#align category_theory.full_subcategory.inclusion_map_lift_map CategoryTheory.fullSubcategoryInclusion_map_lift_mapₓ'. -/
theorem CategoryTheory.fullSubcategoryInclusion_map_lift_map (F : C ⥤ D) (hF : ∀ X, P (F.obj X))
{X Y : C} (f : X ⟶ Y) :
(fullSubcategoryInclusion P).map ((FullSubcategory.lift P F hF).map f) = F.map f :=
rfl
#align category_theory.full_subcategory.inclusion_map_lift_map CategoryTheory.fullSubcategoryInclusion_map_lift_map
instance (F : C ⥤ D) (hF : ∀ X, P (F.obj X)) [Faithful F] :
Faithful (FullSubcategory.lift P F hF) :=
Faithful.of_comp_iso (FullSubcategory.lift_comp_inclusion P F hF)
instance (F : C ⥤ D) (hF : ∀ X, P (F.obj X)) [Full F] : Full (FullSubcategory.lift P F hF) :=
Full.ofCompFaithfulIso (FullSubcategory.lift_comp_inclusion P F hF)
/- warning: category_theory.full_subcategory.lift_comp_map -> CategoryTheory.FullSubcategory.lift_comp_map is a dubious translation:
lean 3 declaration is
forall {C : Type.{u3}} [_inst_1 : CategoryTheory.Category.{u1, u3} C] {D : Type.{u4}} [_inst_2 : CategoryTheory.Category.{u2, u4} D] (P : D -> Prop) (Q : D -> Prop) (F : CategoryTheory.Functor.{u1, u2, u3, u4} C _inst_1 D _inst_2) (hF : forall (X : C), P (CategoryTheory.Functor.obj.{u1, u2, u3, u4} C _inst_1 D _inst_2 F X)) (h : forall {{X : D}}, (P X) -> (Q X)), Eq.{succ (max u1 u2 u3 u4)} (CategoryTheory.Functor.{u1, u2, u3, u4} C _inst_1 (CategoryTheory.FullSubcategoryₓ.{u2, u4} D _inst_2 (fun (X : D) => Q X)) (CategoryTheory.FullSubcategory.category.{u2, u4} D _inst_2 (fun (X : D) => Q X))) (CategoryTheory.Functor.comp.{u1, u2, u2, u3, u4, u4} C _inst_1 (CategoryTheory.FullSubcategoryₓ.{u2, u4} D _inst_2 P) (CategoryTheory.FullSubcategory.category.{u2, u4} D _inst_2 P) (CategoryTheory.FullSubcategoryₓ.{u2, u4} D _inst_2 (fun (X : D) => Q X)) (CategoryTheory.FullSubcategory.category.{u2, u4} D _inst_2 (fun (X : D) => Q X)) (CategoryTheory.FullSubcategory.lift.{u1, u2, u3, u4} C _inst_1 D _inst_2 P F hF) (CategoryTheory.FullSubcategory.map.{u2, u4} D _inst_2 P (fun (X : D) => Q X) h)) (CategoryTheory.FullSubcategory.lift.{u1, u2, u3, u4} C _inst_1 D _inst_2 Q F (fun (X : C) => h (CategoryTheory.Functor.obj.{u1, u2, u3, u4} C _inst_1 D _inst_2 F X) (hF X)))
but is expected to have type
forall {C : Type.{u3}} [_inst_1 : CategoryTheory.Category.{u1, u3} C] {D : Type.{u4}} [_inst_2 : CategoryTheory.Category.{u2, u4} D] (P : D -> Prop) (Q : D -> Prop) (F : CategoryTheory.Functor.{u1, u2, u3, u4} C _inst_1 D _inst_2) (hF : forall (X : C), P (Prefunctor.obj.{succ u1, succ u2, u3, u4} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} C (CategoryTheory.Category.toCategoryStruct.{u1, u3} C _inst_1)) D (CategoryTheory.CategoryStruct.toQuiver.{u2, u4} D (CategoryTheory.Category.toCategoryStruct.{u2, u4} D _inst_2)) (CategoryTheory.Functor.toPrefunctor.{u1, u2, u3, u4} C _inst_1 D _inst_2 F) X)) (h : forall {{X : D}}, (P X) -> (Q X)), Eq.{max (max (max (succ u3) (succ u4)) (succ u1)) (succ u2)} (CategoryTheory.Functor.{u1, u2, u3, u4} C _inst_1 (CategoryTheory.FullSubcategory.{u4} D (fun (X : D) => Q X)) (CategoryTheory.FullSubcategory.category.{u2, u4} D _inst_2 (fun (X : D) => Q X))) (CategoryTheory.Functor.comp.{u1, u2, u2, u3, u4, u4} C _inst_1 (CategoryTheory.FullSubcategory.{u4} D P) (CategoryTheory.FullSubcategory.category.{u2, u4} D _inst_2 P) (CategoryTheory.FullSubcategory.{u4} D (fun (X : D) => Q X)) (CategoryTheory.FullSubcategory.category.{u2, u4} D _inst_2 (fun (X : D) => Q X)) (CategoryTheory.FullSubcategory.lift.{u1, u2, u3, u4} C _inst_1 D _inst_2 P F hF) (CategoryTheory.FullSubcategory.map.{u2, u4} D _inst_2 P (fun (X : D) => Q X) h)) (CategoryTheory.FullSubcategory.lift.{u1, u2, u3, u4} C _inst_1 D _inst_2 Q F (fun (X : C) => h (Prefunctor.obj.{succ u1, succ u2, u3, u4} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} C (CategoryTheory.Category.toCategoryStruct.{u1, u3} C _inst_1)) D (CategoryTheory.CategoryStruct.toQuiver.{u2, u4} D (CategoryTheory.Category.toCategoryStruct.{u2, u4} D _inst_2)) (CategoryTheory.Functor.toPrefunctor.{u1, u2, u3, u4} C _inst_1 D _inst_2 F) X) (hF X)))
Case conversion may be inaccurate. Consider using '#align category_theory.full_subcategory.lift_comp_map CategoryTheory.FullSubcategory.lift_comp_mapₓ'. -/
@[simp]
theorem FullSubcategory.lift_comp_map (F : C ⥤ D) (hF : ∀ X, P (F.obj X)) (h : ∀ ⦃X⦄, P X → Q X) :
FullSubcategory.lift P F hF ⋙ FullSubcategory.map h =
FullSubcategory.lift Q F fun X => h (hF X) :=
rfl
#align category_theory.full_subcategory.lift_comp_map CategoryTheory.FullSubcategory.lift_comp_map
end lift
end FullSubcategory
end CategoryTheory
|
State Before: a b : ℝ
n : ℕ
⊢ (∫ (x : ℝ) in a..b, cos x ^ 3) = sin b - sin a - (sin b ^ 3 - sin a ^ 3) / 3 State After: a b : ℝ
n : ℕ
this : (∫ (x : ℝ) in a..b, sin x ^ 0 * cos x ^ (2 * 1 + 1)) = ∫ (u : ℝ) in sin a..sin b, u ^ 0 * (1 - u ^ 2) ^ 1
⊢ (∫ (x : ℝ) in a..b, cos x ^ 3) = sin b - sin a - (sin b ^ 3 - sin a ^ 3) / 3 Tactic: have := @integral_sin_pow_mul_cos_pow_odd a b 0 1 State Before: a b : ℝ
n : ℕ
this : (∫ (x : ℝ) in a..b, sin x ^ 0 * cos x ^ (2 * 1 + 1)) = ∫ (u : ℝ) in sin a..sin b, u ^ 0 * (1 - u ^ 2) ^ 1
⊢ (∫ (x : ℝ) in a..b, cos x ^ 3) = sin b - sin a - (sin b ^ 3 - sin a ^ 3) / 3 State After: a b : ℝ
n : ℕ
this : (∫ (x : ℝ) in a..b, cos x ^ 3) = sin b - sin a - (sin b ^ 3 - sin a ^ 3) / 3
⊢ (∫ (x : ℝ) in a..b, cos x ^ 3) = sin b - sin a - (sin b ^ 3 - sin a ^ 3) / 3 Tactic: norm_num at this State Before: a b : ℝ
n : ℕ
this : (∫ (x : ℝ) in a..b, cos x ^ 3) = sin b - sin a - (sin b ^ 3 - sin a ^ 3) / 3
⊢ (∫ (x : ℝ) in a..b, cos x ^ 3) = sin b - sin a - (sin b ^ 3 - sin a ^ 3) / 3 State After: no goals Tactic: exact this |
[STATEMENT]
lemma normalise_language_equivalent[simp]:
"w \<Turnstile>\<^sub>n normalise \<phi> \<longleftrightarrow> w \<Turnstile>\<^sub>n \<phi>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. w \<Turnstile>\<^sub>n normalise \<phi> = w \<Turnstile>\<^sub>n \<phi>
[PROOF STEP]
using normalise_eq ltl_lang_equiv_def eq_implies_lang
[PROOF STATE]
proof (prove)
using this:
?\<phi> \<sim> normalise ?\<phi>
?\<phi> \<sim>\<^sub>L ?\<psi> \<equiv> \<forall>w. w \<Turnstile>\<^sub>n ?\<phi> = w \<Turnstile>\<^sub>n ?\<psi>
?\<phi> \<sim> ?\<psi> \<Longrightarrow> ?\<phi> \<sim>\<^sub>L ?\<psi>
goal (1 subgoal):
1. w \<Turnstile>\<^sub>n normalise \<phi> = w \<Turnstile>\<^sub>n \<phi>
[PROOF STEP]
by blast |
[GOAL]
X : Type u_1
Y : Type u_2
inst✝³ : TopologicalSpace X
inst✝² : TopologicalSpace Y
inst✝¹ : Preorder Y
inst✝ : OrderClosedTopology Y
f g : X → Y
s : Set X
a : X
h : IsLocalMaxOn f s a
hc : ContinuousOn f (closure s)
⊢ IsLocalMaxOn f (closure s) a
[PROOFSTEP]
rcases mem_nhdsWithin.1 h with ⟨U, Uo, aU, hU⟩
[GOAL]
case intro.intro.intro
X : Type u_1
Y : Type u_2
inst✝³ : TopologicalSpace X
inst✝² : TopologicalSpace Y
inst✝¹ : Preorder Y
inst✝ : OrderClosedTopology Y
f g : X → Y
s : Set X
a : X
h : IsLocalMaxOn f s a
hc : ContinuousOn f (closure s)
U : Set X
Uo : IsOpen U
aU : a ∈ U
hU : U ∩ s ⊆ {x | (fun x => f x ≤ f a) x}
⊢ IsLocalMaxOn f (closure s) a
[PROOFSTEP]
refine' mem_nhdsWithin.2 ⟨U, Uo, aU, _⟩
[GOAL]
case intro.intro.intro
X : Type u_1
Y : Type u_2
inst✝³ : TopologicalSpace X
inst✝² : TopologicalSpace Y
inst✝¹ : Preorder Y
inst✝ : OrderClosedTopology Y
f g : X → Y
s : Set X
a : X
h : IsLocalMaxOn f s a
hc : ContinuousOn f (closure s)
U : Set X
Uo : IsOpen U
aU : a ∈ U
hU : U ∩ s ⊆ {x | (fun x => f x ≤ f a) x}
⊢ U ∩ closure s ⊆ {x | (fun x => f x ≤ f a) x}
[PROOFSTEP]
rintro x ⟨hxU, hxs⟩
[GOAL]
case intro.intro.intro.intro
X : Type u_1
Y : Type u_2
inst✝³ : TopologicalSpace X
inst✝² : TopologicalSpace Y
inst✝¹ : Preorder Y
inst✝ : OrderClosedTopology Y
f g : X → Y
s : Set X
a : X
h : IsLocalMaxOn f s a
hc : ContinuousOn f (closure s)
U : Set X
Uo : IsOpen U
aU : a ∈ U
hU : U ∩ s ⊆ {x | (fun x => f x ≤ f a) x}
x : X
hxU : x ∈ U
hxs : x ∈ closure s
⊢ x ∈ {x | (fun x => f x ≤ f a) x}
[PROOFSTEP]
refine' ContinuousWithinAt.closure_le _ _ continuousWithinAt_const hU
[GOAL]
case intro.intro.intro.intro.refine'_1
X : Type u_1
Y : Type u_2
inst✝³ : TopologicalSpace X
inst✝² : TopologicalSpace Y
inst✝¹ : Preorder Y
inst✝ : OrderClosedTopology Y
f g : X → Y
s : Set X
a : X
h : IsLocalMaxOn f s a
hc : ContinuousOn f (closure s)
U : Set X
Uo : IsOpen U
aU : a ∈ U
hU : U ∩ s ⊆ {x | (fun x => f x ≤ f a) x}
x : X
hxU : x ∈ U
hxs : x ∈ closure s
⊢ x ∈ closure (U ∩ s)
[PROOFSTEP]
rwa [mem_closure_iff_nhdsWithin_neBot, nhdsWithin_inter_of_mem, ← mem_closure_iff_nhdsWithin_neBot]
[GOAL]
case intro.intro.intro.intro.refine'_1
X : Type u_1
Y : Type u_2
inst✝³ : TopologicalSpace X
inst✝² : TopologicalSpace Y
inst✝¹ : Preorder Y
inst✝ : OrderClosedTopology Y
f g : X → Y
s : Set X
a : X
h : IsLocalMaxOn f s a
hc : ContinuousOn f (closure s)
U : Set X
Uo : IsOpen U
aU : a ∈ U
hU : U ∩ s ⊆ {x | (fun x => f x ≤ f a) x}
x : X
hxU : x ∈ U
hxs : x ∈ closure s
⊢ U ∈ 𝓝[s] x
[PROOFSTEP]
exact nhdsWithin_le_nhds (Uo.mem_nhds hxU)
[GOAL]
case intro.intro.intro.intro.refine'_2
X : Type u_1
Y : Type u_2
inst✝³ : TopologicalSpace X
inst✝² : TopologicalSpace Y
inst✝¹ : Preorder Y
inst✝ : OrderClosedTopology Y
f g : X → Y
s : Set X
a : X
h : IsLocalMaxOn f s a
hc : ContinuousOn f (closure s)
U : Set X
Uo : IsOpen U
aU : a ∈ U
hU : U ∩ s ⊆ {x | (fun x => f x ≤ f a) x}
x : X
hxU : x ∈ U
hxs : x ∈ closure s
⊢ ContinuousWithinAt f (U ∩ s) x
[PROOFSTEP]
exact (hc _ hxs).mono ((inter_subset_right _ _).trans subset_closure)
|
include("utils.jl")
@testset "SymmetricMatrices" begin
M=SymmetricMatrices(3,ℝ)
A = [1 2 3; 4 5 6; 7 8 9]
A_sym = [1 2 3; 2 5 -1; 3 -1 9]
A_sym2 = [1 2 3; 2 5 -1; 3 -1 9]
B_sym = [1 2 3; 2 5 1; 3 1 -1]
M_complex = SymmetricMatrices(3,ℂ)
C = [1+im 1 im; 1 2 -im; im -im -1-im]
D = [1 0; 0 1];
X = zeros(3,3)
@testset "Real Symmetric Matrices Basics" begin
@test representation_size(M) == (3,3)
@test check_manifold_point(M,B_sym)===nothing
@test_throws DomainError is_manifold_point(M,A,true)
@test_throws DomainError is_manifold_point(M,C,true)
@test_throws DomainError is_manifold_point(M,D,true)
@test_throws DomainError is_manifold_point(M_complex, [:a :b :c; :b :d :e; :c :e :f],true)
@test check_tangent_vector(M,B_sym,B_sym)===nothing
@test_throws DomainError is_tangent_vector(M,B_sym,A,true)
@test_throws DomainError is_tangent_vector(M,A,B_sym,true)
@test_throws DomainError is_tangent_vector(M,B_sym,D,true)
@test_throws DomainError is_tangent_vector(M,B_sym, 1*im * zero_tangent_vector(M,B_sym),true)
@test_throws DomainError is_tangent_vector(M_complex, B_sym, [:a :b :c; :b :d :e; :c :e :f],true)
@test manifold_dimension(M) == 6
@test manifold_dimension(M_complex) == 12
@test A_sym2 == project_point!(M,A_sym)
@test A_sym2 == project_tangent(M,A_sym,A_sym)
end
types = [
Matrix{Float64},
MMatrix{3,3,Float64},
Matrix{Float32},
]
bases = (ArbitraryOrthonormalBasis(), ProjectedOrthonormalBasis(:svd))
for T in types
pts = [convert(T,A_sym),convert(T,B_sym),convert(T,X)]
@testset "Type $T" begin
test_manifold(
M,
pts,
test_injectivity_radius = false,
test_reverse_diff = isa(T, Vector),
test_project_tangent = true,
test_musical_isomorphisms = true,
test_vector_transport = true,
basis_types_vecs = bases,
basis_types_to_from = bases
)
test_manifold(
M_complex,
pts,
test_injectivity_radius = false,
test_reverse_diff = isa(T, Vector),
test_project_tangent = true,
test_musical_isomorphisms = true,
test_vector_transport = true,
basis_types_vecs = (ArbitraryOrthonormalBasis(),),
basis_types_to_from = (ArbitraryOrthonormalBasis(),)
)
@test isapprox(-pts[1], exp(M, pts[1], log(M, pts[1], -pts[1])))
end # testset type $T
end # for
end # test SymmetricMatrices
|
State Before: R : Type u
S : Type v
σ : Type u_1
τ : Type ?u.278467
r : R
e : ℕ
n✝ m : σ
s : σ →₀ ℕ
inst✝¹ : CommSemiring R
p✝ q : MvPolynomial σ R
inst✝ : DecidableEq σ
n : σ
p : MvPolynomial σ R
⊢ degreeOf n p = Multiset.count n (degrees p) State After: R : Type u
S : Type v
σ : Type u_1
τ : Type ?u.278467
r : R
e : ℕ
n✝ m : σ
s : σ →₀ ℕ
inst✝¹ : CommSemiring R
p✝ q : MvPolynomial σ R
inst✝ : DecidableEq σ
n : σ
p : MvPolynomial σ R
⊢ Multiset.count n (degrees p) = Multiset.count n (degrees p) Tactic: rw [degreeOf] State Before: R : Type u
S : Type v
σ : Type u_1
τ : Type ?u.278467
r : R
e : ℕ
n✝ m : σ
s : σ →₀ ℕ
inst✝¹ : CommSemiring R
p✝ q : MvPolynomial σ R
inst✝ : DecidableEq σ
n : σ
p : MvPolynomial σ R
⊢ Multiset.count n (degrees p) = Multiset.count n (degrees p) State After: no goals Tactic: convert rfl |
import tactic
variables {a b c : ℕ}
-- BEGIN
example (divab : a ∣ b) (divbc : b ∣ c) : a ∣ c :=
begin
cases divab with d beq,
cases divbc with e ceq,
rw [ceq, beq],
use (d * e),
ring,
end
-- END |
lemma sigma_finite_measure_count_space: fixes A :: "'a::countable set" shows "sigma_finite_measure (count_space A)" |
{-# LANGUAGE TupleSections #-}
{-# LANGUAGE OverloadedStrings #-}
module Main where
import Data.Monoid
import qualified Pipes.Prelude as P
import Statistics.Quantile.Util
import Statistics.Quantile.Exact
import Statistics.Quantile.Approximate.Sampling
import Statistics.Quantile.Bench
import Statistics.Quantile.Bench.Accuracy
import Statistics.Quantile.Types
import System.IO
data SelectorAccuracy = SelectorAccuracy String Deviation
deriving (Eq, Show)
renderAccuracy :: SelectorAccuracy -> String
renderAccuracy (SelectorAccuracy s d) = s <> "," <> renderDeviation d
main :: IO ()
main = do
hSetBuffering stdin LineBuffering
hSetBuffering stdout LineBuffering
(fp, fh) <- openTempFile tmpDir "accuracy.txt"
n <- countOut fh (streamHandle stdin)
hClose fh
inh <- openFile fp ReadMode
tv <- selectFromHandle median external inh
hClose inh
mapM (score fp tv) (estimators n sqrtN)
>>= mapM_ (putStrLn . renderAccuracy)
where estimators n f = [ ("exact", external)
, ("sampling", sampling (f n) n)
, ("jackknife-sampling", samplingJackknife (f n) n)
]
score fp tv (name, candidate) = do
a <- benchAccuracy 10 fp median tv candidate
pure (SelectorAccuracy name a)
|
module Web.Semantic.DL.Signature where
infixr 4 _,_
data Signature : Set₁ where
_,_ : (CN RN : Set) → Signature
CN : Signature → Set
CN (CN , RN) = CN
RN : Signature → Set
RN (CN , RN) = RN
|
{-# LANGUAGE FlexibleContexts #-}
module Main where
import Graphics.Plot
import Data.Packed.Vector
import Data.Packed.Matrix
import Numeric.Container
import Numeric.LinearAlgebra
import Numeric.LinearAlgebra.HMatrix (tr)
import System.Environment (getArgs)
-- TODO (a->b->c) -> Vector a -> Vector b -> Matrix c
outerOp :: (Element a, Element b) => (Matrix a -> Matrix b -> Matrix c) -> Vector a -> Vector b -> Matrix c
outerOp k x x' = k xp xq
where xp = repmat (asColumn x) 1 (dim x')
xq= repmat (asRow x') (dim x) 1
k_SE sigma l x x' = (sigma^2*) $ exp $ (-1/(2*l^2))*dx^2
where dx = outerOp (-) x x'
k = k_SE 3 0.3
inferenceTrivial k x y ySigma2 domain = (mean, variance)
where mean = k domain x <> invkxx <> y
variance = k domain domain - (kdomainx <> invkxx <> k x domain)
invkxx = inv $ k x x + diag ySigma2
kdomainx = k domain x
inferenceCholesky k x y ySigma2 domain = (mean, variance)
where mean = (tr kxdomain) <> alpha
variance = k domain domain - (tr v <> v)
alpha = tr l <\> (l <\> y)
l = chol $ k x x + diag ySigma2
v = l <\> kxdomain
kxdomain = k x domain
plotProcessDistribution :: Vector Double -> Vector Double -> Vector Double -> IO ()
plotProcessDistribution domain m v = mplot [domain, m, m + v, m - v, m + 2 * v, m - 2 * v]
main :: IO ()
main = do
datfile:[] <- getArgs
dat <- fmap readMatrix $ readFile datfile
let [x, y] = toColumns dat -- [x,y] or [x,f]
let domain = linspace 128 (0.5, 10.5)
let n = 1
let (x',y') = (vjoin $ replicate n x, vjoin $ take n $ repeat y)
let (m, v') = inferenceTrivial k x' y' (constant 1 $ dim y') domain
let v = takeDiag v'
plotProcessDistribution domain m v
|
[GOAL]
R : Type u
inst✝⁶ : CommRing R
n : Type v
inst✝⁵ : DecidableEq n
inst✝⁴ : Fintype n
N : Type w
inst✝³ : AddCommGroup N
inst✝² : Module R N
S : Type u_1
inst✝¹ : Ring S
inst✝ : Algebra R S
h : PowerBasis R S
⊢ charpoly (↑(leftMulMatrix h.basis) h.gen) = minpoly R h.gen
[PROOFSTEP]
cases subsingleton_or_nontrivial R
[GOAL]
case inl
R : Type u
inst✝⁶ : CommRing R
n : Type v
inst✝⁵ : DecidableEq n
inst✝⁴ : Fintype n
N : Type w
inst✝³ : AddCommGroup N
inst✝² : Module R N
S : Type u_1
inst✝¹ : Ring S
inst✝ : Algebra R S
h : PowerBasis R S
h✝ : Subsingleton R
⊢ charpoly (↑(leftMulMatrix h.basis) h.gen) = minpoly R h.gen
[PROOFSTEP]
apply Subsingleton.elim
[GOAL]
case inr
R : Type u
inst✝⁶ : CommRing R
n : Type v
inst✝⁵ : DecidableEq n
inst✝⁴ : Fintype n
N : Type w
inst✝³ : AddCommGroup N
inst✝² : Module R N
S : Type u_1
inst✝¹ : Ring S
inst✝ : Algebra R S
h : PowerBasis R S
h✝ : Nontrivial R
⊢ charpoly (↑(leftMulMatrix h.basis) h.gen) = minpoly R h.gen
[PROOFSTEP]
apply minpoly.unique' R h.gen (charpoly_monic _)
[GOAL]
case inr.hp
R : Type u
inst✝⁶ : CommRing R
n : Type v
inst✝⁵ : DecidableEq n
inst✝⁴ : Fintype n
N : Type w
inst✝³ : AddCommGroup N
inst✝² : Module R N
S : Type u_1
inst✝¹ : Ring S
inst✝ : Algebra R S
h : PowerBasis R S
h✝ : Nontrivial R
⊢ ↑(aeval h.gen) (charpoly (↑(leftMulMatrix h.basis) h.gen)) = 0
[PROOFSTEP]
apply (injective_iff_map_eq_zero (G := S) (leftMulMatrix _)).mp (leftMulMatrix_injective h.basis)
[GOAL]
case inr.hp.a
R : Type u
inst✝⁶ : CommRing R
n : Type v
inst✝⁵ : DecidableEq n
inst✝⁴ : Fintype n
N : Type w
inst✝³ : AddCommGroup N
inst✝² : Module R N
S : Type u_1
inst✝¹ : Ring S
inst✝ : Algebra R S
h : PowerBasis R S
h✝ : Nontrivial R
⊢ ↑(leftMulMatrix h.basis) (↑(aeval h.gen) (charpoly (↑(leftMulMatrix h.basis) h.gen))) = 0
[PROOFSTEP]
rw [← Polynomial.aeval_algHom_apply, aeval_self_charpoly]
[GOAL]
case inr.hl
R : Type u
inst✝⁶ : CommRing R
n : Type v
inst✝⁵ : DecidableEq n
inst✝⁴ : Fintype n
N : Type w
inst✝³ : AddCommGroup N
inst✝² : Module R N
S : Type u_1
inst✝¹ : Ring S
inst✝ : Algebra R S
h : PowerBasis R S
h✝ : Nontrivial R
⊢ ∀ (q : R[X]), degree q < degree (charpoly (↑(leftMulMatrix h.basis) h.gen)) → q = 0 ∨ ↑(aeval h.gen) q ≠ 0
[PROOFSTEP]
refine' fun q hq => or_iff_not_imp_left.2 fun h0 => _
[GOAL]
case inr.hl
R : Type u
inst✝⁶ : CommRing R
n : Type v
inst✝⁵ : DecidableEq n
inst✝⁴ : Fintype n
N : Type w
inst✝³ : AddCommGroup N
inst✝² : Module R N
S : Type u_1
inst✝¹ : Ring S
inst✝ : Algebra R S
h : PowerBasis R S
h✝ : Nontrivial R
q : R[X]
hq : degree q < degree (charpoly (↑(leftMulMatrix h.basis) h.gen))
h0 : ¬q = 0
⊢ ↑(aeval h.gen) q ≠ 0
[PROOFSTEP]
rw [Matrix.charpoly_degree_eq_dim, Fintype.card_fin] at hq
[GOAL]
case inr.hl
R : Type u
inst✝⁶ : CommRing R
n : Type v
inst✝⁵ : DecidableEq n
inst✝⁴ : Fintype n
N : Type w
inst✝³ : AddCommGroup N
inst✝² : Module R N
S : Type u_1
inst✝¹ : Ring S
inst✝ : Algebra R S
h : PowerBasis R S
h✝ : Nontrivial R
q : R[X]
hq : degree q < ↑h.dim
h0 : ¬q = 0
⊢ ↑(aeval h.gen) q ≠ 0
[PROOFSTEP]
contrapose! hq
[GOAL]
case inr.hl
R : Type u
inst✝⁶ : CommRing R
n : Type v
inst✝⁵ : DecidableEq n
inst✝⁴ : Fintype n
N : Type w
inst✝³ : AddCommGroup N
inst✝² : Module R N
S : Type u_1
inst✝¹ : Ring S
inst✝ : Algebra R S
h : PowerBasis R S
h✝ : Nontrivial R
q : R[X]
h0 : ¬q = 0
hq : ↑(aeval h.gen) q = 0
⊢ ↑h.dim ≤ degree q
[PROOFSTEP]
exact h.dim_le_degree_of_root h0 hq
|
library(png)
img <- readPNG("Unfilledcirc.png")
M <- img[ , , 1]
M <- ifelse(M < 0.5, 0, 1)
M <- rbind(M, 0)
M <- cbind(M, 0)
image(M, col = c(1, 0))
# https://en.wikipedia.org/wiki/Flood_fill
floodfill <- function(row, col, tcol, rcol) {
if (tcol == rcol) return()
if (M[row, col] != tcol) return()
Q <- matrix(c(row, col), 1, 2)
while (dim(Q)[1] > 0) {
n <- Q[1, , drop = FALSE]
west <- cbind(n[1] , n[2] - 1)
east <- cbind(n[1] , n[2] + 1)
north <- cbind(n[1] + 1, n[2] )
south <- cbind(n[1] - 1, n[2] )
Q <- Q[-1, , drop = FALSE]
if (M[n] == tcol) {
M[n] <<- rcol
if (M[west] == tcol) Q <- rbind(Q, west)
if (M[east] == tcol) Q <- rbind(Q, east)
if (M[north] == tcol) Q <- rbind(Q, north)
if (M[south] == tcol) Q <- rbind(Q, south)
}
}
return("filling completed")
}
startrow <- 100; startcol <- 100
floodfill(startrow, startcol, 0, 2)
startrow <- 50; startcol <- 50
floodfill(startrow, startcol, 1, 3)
image(M, col = c(1, 0, 2, 3))
|
(**
CoLoR, a Coq library on rewriting and termination.
See the COPYRIGHTS and LICENSE files.
- Frederic Blanqui, 2005-06-17
general results on booleans
*)
Set Implicit Arguments.
Require Import LogicUtil.
Require Export Bool.
Require Setoid.
Implicit Arguments orb_false_elim [b1 b2].
Implicit Arguments orb_true_elim [b1 b2].
Hint Rewrite negb_orb negb_andb negb_involutive eqb_negb1 eqb_negb2
orb_true_r orb_true_l orb_false_r orb_false_l orb_negb_r orb_assoc
andb_false_r andb_false_l andb_true_r andb_true_l andb_negb_r andb_assoc
absoption_andb absoption_orb
xorb_false_r xorb_false_l xorb_nilpotent xorb_assoc_reverse
: bool.
Ltac bool := autorewrite with bool.
(***********************************************************************)
(** equality *)
Lemma false_not_true : forall b, b = false <-> ~(b = true).
Proof. destruct b; intuition. Qed.
Lemma beq_true : forall b c, b = c <-> (b = true <-> c = true).
Proof.
split; intro h. subst. tauto. destruct c.
tauto. rewrite false_not_true. intuition.
Qed.
(***********************************************************************)
(** implication *)
Lemma implb1 : forall b, implb b b = true.
Proof. induction b; refl. Qed.
Lemma implb2 : forall b, implb b true = true.
Proof. induction b; refl. Qed.
(***********************************************************************)
(** conjunction *)
Lemma andb_elim : forall b c, b && c = true -> b = true /\ c = true.
Proof. destruct b; destruct c; intuition. Qed.
Implicit Arguments andb_elim [b c].
Lemma andb_eliml : forall b c, b && c = true -> b = true.
Proof. destruct b; destruct c; intuition. Qed.
Implicit Arguments andb_eliml [b c].
Lemma andb_elimr : forall b c, b && c = true -> c = true.
Proof. destruct b; destruct c; intuition. Qed.
Implicit Arguments andb_elimr [b c].
Lemma andb_intro : forall b c, b = true -> c = true -> b && c = true.
Proof. intros. subst b. subst c. refl. Qed.
Lemma andb_eq : forall b c, b && c = true <-> b = true /\ c = true.
Proof. split. intro. apply andb_elim. hyp. intuition. Qed.
Lemma andb_eq_false : forall b c, b && c = false <-> b = false \/ c = false.
Proof. destruct b; destruct c; bool; intuition. Qed.
(***********************************************************************)
(** negation *)
Definition neg (A : Type) (f : A->A->bool) x y := negb (f x y).
Lemma negb_lr : forall b c, negb b = c <-> b = negb c.
Proof. destruct b; destruct c; intuition. Qed.
(***********************************************************************)
(** disjonction *)
Lemma orb_intror : forall b c, c = true -> b || c = true.
Proof. intros. subst. bool. refl. Qed.
Lemma orb_introl : forall b c, c = true -> b || c = true.
Proof. intros. subst. bool. refl. Qed.
Lemma orb_eq : forall b c, b || c = true <-> b = true \/ c = true.
Proof. intuition. destruct b; auto. Qed.
(***********************************************************************)
(** equality *)
Lemma eqb_equiv : forall b b', b = b' <-> (b = true <-> b' = true).
Proof.
intros b b'. split; intro H. subst b'. refl.
destruct b. sym. rewrite <- H. refl.
destruct b'. rewrite H. refl. refl.
Qed.
(***********************************************************************)
(** decidability *)
Require Setoid.
Section dec.
Variables (A : Type) (P : A -> Prop)
(f : A -> bool) (f_ok : forall x, f x = true <-> P x).
Lemma ko : forall x, f x = false <-> ~P x.
Proof. intro x. rewrite <- f_ok. destruct (f x); intuition; discr. Qed.
Lemma dec : forall x, {P x}+{~P x}.
Proof.
intro x. case_eq (f x); intros.
left. rewrite <- f_ok. hyp. right. rewrite <- ko. hyp.
Defined.
End dec.
Implicit Arguments ko [A P f].
Implicit Arguments dec [A P f].
(***********************************************************************)
(** correspondance between boolean functions and logical connectors *)
Section bool_ok.
Variables (A : Type) (P Q : A->Prop) (bP bQ : A-> bool)
(bP_ok : forall x, bP x = true <-> P x)
(bQ_ok : forall x, bQ x = true <-> Q x).
Lemma negb_ok : forall x, negb (bP x) = true <-> ~P x.
Proof. intro. rewrite <- (ko bP_ok). destruct (bP x); simpl; intuition. Qed.
Lemma andb_ok : forall x, bP x && bQ x = true <-> P x /\ Q x.
Proof. intro. rewrite andb_eq. rewrite bP_ok. rewrite bQ_ok. refl. Qed.
Lemma orb_ok : forall x, bP x || bQ x = true <-> P x \/ Q x.
Proof. intro. rewrite orb_eq. rewrite bP_ok. rewrite bQ_ok. refl. Qed.
Lemma implb_ok : forall x, implb (bP x) (bQ x) = true <-> (P x -> Q x).
Proof.
intro x. unfold implb. case_eq (bP x).
rewrite bP_ok. rewrite bQ_ok. tauto.
rewrite (ko bP_ok). tauto.
Qed.
End bool_ok.
(***********************************************************************)
(** checking a property (P i) for all i<n *)
Require Import Arith.
Require Omega.
Section bforall_lt.
Variables (P : nat->Prop) (bP : nat->bool)
(bP_ok : forall x, bP x = true <-> P x).
Definition forall_lt n := forall i, i < n -> P i.
Fixpoint bforall_lt_aux b n := b &&
match n with
| 0 => true
| S n' => bforall_lt_aux (bP n') n'
end.
Lemma bforall_lt_aux_ok : forall n b,
bforall_lt_aux b n = true <-> b = true /\ forall_lt n.
Proof.
unfold forall_lt. induction n; simpl; intros. bool. fo.
rewrite andb_eq. rewrite IHn. rewrite bP_ok. intuition.
destruct (eq_nat_dec i n). subst. hyp. apply H2. omega.
Qed.
Definition bforall_lt := bforall_lt_aux true.
Lemma bforall_lt_ok : forall n, bforall_lt n = true <-> forall_lt n.
Proof. intro. unfold bforall_lt. rewrite bforall_lt_aux_ok. tauto. Qed.
End bforall_lt.
|
{-# LANGUAGE FlexibleContexts, FlexibleInstances, TypeFamilies #-}
module Tests.ApproxEq
(
ApproxEq(..)
) where
import Data.Complex (Complex(..), realPart)
import Data.List (intersperse)
import Data.Maybe (catMaybes)
import Numeric.MathFunctions.Constants (m_epsilon)
import Statistics.Matrix hiding (map, toList)
import Test.QuickCheck
import qualified Data.Vector as V
import qualified Data.Vector.Generic as G
import qualified Data.Vector.Unboxed as U
import qualified Statistics.Matrix as M
class (Eq a, Show a) => ApproxEq a where
type Bounds a
eq :: Bounds a -> a -> a -> Bool
eql :: Bounds a -> a -> a -> Property
eql eps a b = counterexample (show a ++ " /=~ " ++ show b) (eq eps a b)
(=~) :: a -> a -> Bool
(==~) :: a -> a -> Property
a ==~ b = counterexample (show a ++ " /=~ " ++ show b) (a =~ b)
instance ApproxEq Double where
type Bounds Double = Double
eq eps a b
| a == 0 && b == 0 = True
| otherwise = abs (a - b) <= eps * max (abs a) (abs b)
(=~) = eq m_epsilon
instance ApproxEq (Complex Double) where
type Bounds (Complex Double) = Double
eq eps a@(ar :+ ai) b@(br :+ bi)
| a == 0 && b == 0 = True
| otherwise = abs (ar - br) <= eps * d
&& abs (ai - bi) <= eps * d
where
d = max (realPart $ abs a) (realPart $ abs b)
(=~) = eq m_epsilon
instance ApproxEq [Double] where
type Bounds [Double] = Double
eq eps (x:xs) (y:ys) = eq eps x y && eq eps xs ys
eq _ [] [] = True
eq _ _ _ = False
eql = eqll length id id
(=~) = eq m_epsilon
(==~) = eql m_epsilon
instance ApproxEq (U.Vector Double) where
type Bounds (U.Vector Double) = Double
eq = eqv
(=~) = eq m_epsilon
eql = eqlv
(==~) = eqlv m_epsilon
instance ApproxEq (V.Vector Double) where
type Bounds (V.Vector Double) = Double
eq = eqv
(=~) = eq m_epsilon
eql = eqlv
(==~) = eqlv m_epsilon
instance ApproxEq Matrix where
type Bounds Matrix = Double
eq eps (Matrix r1 c1 e1 v1) (Matrix r2 c2 e2 v2) =
(r1,c1,e1) == (r2,c2,e2) && eq eps v1 v2
(=~) = eq m_epsilon
eql eps a b = eqll dimension M.toList (`quotRem` cols a) eps a b
(==~) = eql m_epsilon
eqv :: (ApproxEq a, G.Vector v Bool, G.Vector v a) =>
Bounds a -> v a -> v a -> Bool
eqv eps a b = G.length a == G.length b && G.and (G.zipWith (eq eps) a b)
eqlv :: (ApproxEq [a], G.Vector v a) => Bounds [a] -> v a -> v a -> Property
eqlv eps a b = eql eps (G.toList a) (G.toList b)
eqll :: (ApproxEq l, ApproxEq a, Show c, Show d, Eq d, Bounds l ~ Bounds a) =>
(l -> d) -> (l -> [a]) -> (Int -> c) -> Bounds l -> l -> l -> Property
eqll dim toList coord eps a b = counterexample fancy $ eq eps a b
where
fancy
| la /= lb = "size mismatch: " ++ show la ++ " /= " ++ show lb
| length summary < length full = summary
| otherwise = full
summary = concat . intersperse ", " . catMaybes $
zipWith3 whee (map coord [(0::Int)..]) xs ys
full | '\n' `elem` sa = sa ++ " /=~\n" ++ sb
| otherwise = sa ++ " /=~" ++ sb
(sa, sb) = (show a, show b)
(xs, ys) = (toList a, toList b)
(la, lb) = (dim a, dim b)
whee i x y | eq eps x y = Nothing
| otherwise = Just $ show i ++ ": " ++ show x ++ " /=~ " ++ show y
|
/-
Copyright (c) 2018 Chris Hughes. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Chris Hughes, Scott Morrison, Johan Commelin
-/
import data.finset.card
/-!
# Finsets in `fin n`
A few constructions for finsets in `fin n`.
## Main declarations
* `finset.fin_range`: `{0, 1, ..., n - 1}` as a `finset (fin n)`.
* `finset.attach_fin`: Turns a finset of naturals strictly less than `n` into a `finset (fin n)`.
-/
variables {n : ℕ}
namespace finset
/-- `finset.fin_range n` is the finset `{0, 1, ..., n - 1}`, as a `finset (fin n)`. -/
def fin_range (n : ℕ) : finset (fin n) := ⟨list.fin_range n, list.nodup_fin_range n⟩
@[simp]
lemma fin_range_card : (fin_range n).card = n := by simp [fin_range]
@[simp]
lemma mem_fin_range (m : fin n) : m ∈ fin_range n := list.mem_fin_range m
@[simp] lemma coe_fin_range (n : ℕ) : (fin_range n : set (fin n)) = set.univ :=
set.eq_univ_of_forall mem_fin_range
/-- Given a finset `s` of `ℕ` contained in `{0,..., n-1}`, the corresponding finset in `fin n`
is `s.attach_fin h` where `h` is a proof that all elements of `s` are less than `n`. -/
def attach_fin (s : finset ℕ) {n : ℕ} (h : ∀ m ∈ s, m < n) : finset (fin n) :=
⟨s.1.pmap (λ a ha, ⟨a, ha⟩) h, s.nodup.pmap $ λ _ _ _ _, fin.veq_of_eq⟩
@[simp] lemma mem_attach_fin {n : ℕ} {s : finset ℕ} (h : ∀ m ∈ s, m < n) {a : fin n} :
a ∈ s.attach_fin h ↔ (a : ℕ) ∈ s :=
⟨λ h, let ⟨b, hb₁, hb₂⟩ := multiset.mem_pmap.1 h in hb₂ ▸ hb₁,
λ h, multiset.mem_pmap.2 ⟨a, h, fin.eta _ _⟩⟩
@[simp] lemma card_attach_fin {n : ℕ} (s : finset ℕ) (h : ∀ m ∈ s, m < n) :
(s.attach_fin h).card = s.card :=
multiset.card_pmap _ _ _
end finset
|
import GMLAlgebra.Basic
import GMLAlgebra.Monoid
import GMLAlgebra.Semigroup
import GMLAlgebra.Semiring
namespace Algebra
variable {α} (s : UnitalSemiringSig α)
local infixr:70 " ⋆ " => s.mul
local infixr:65 " ⊹ " => s.add
local notation "e" => s.one
class UnitalSemiring extends Semiring (no_index s.toSemiringSig) : Prop where
protected mul_left_id (x) : e ⋆ x = x
protected mul_right_id (x) : x ⋆ e = x
protected def UnitalSemiring.infer [OpAssoc s.add] [OpComm s.add] [OpAssoc s.mul] [OpLeftDistrib s.mul s.add] [OpRightDistrib s.mul s.add] [OpLeftId s.mul s.one] [OpRightId s.mul s.one] : UnitalSemiring s where
add_assoc := op_assoc _
add_comm := op_comm _
mul_assoc := op_assoc _
mul_left_distrib := op_left_distrib _
mul_right_distrib := op_right_distrib _
mul_left_id := op_left_id _
mul_right_id := op_right_id _
namespace UnitalSemiring
variable {s} [self : UnitalSemiring s]
local instance : OpLeftId (no_index s.mul) (no_index s.one) := ⟨UnitalSemiring.mul_left_id⟩
local instance : OpRightId (no_index s.mul) (no_index s.one) := ⟨UnitalSemiring.mul_right_id⟩
instance toMulMonoid : Monoid (no_index s.toMulMonoidSig) := Monoid.infer _
end UnitalSemiring
class UnitalCommSemiring extends CommSemiring (no_index s.toSemiringSig) : Prop where
protected mul_right_id (x) : x ⋆ e = x
protected def UnitalCommSemiring.infer [OpAssoc s.add] [OpComm s.add] [OpAssoc s.mul] [OpComm s.mul] [OpRightDistrib s.mul s.add] [OpRightId s.mul s.one] : UnitalCommSemiring s where
add_assoc := op_assoc _
add_comm := op_comm _
mul_assoc := op_assoc _
mul_comm := op_comm _
mul_right_distrib := op_right_distrib _
mul_right_id := op_right_id _
namespace UnitalCommSemiring
variable {s} [self : UnitalCommSemiring s]
local instance : OpRightId (no_index s.mul) (no_index s.one) := ⟨UnitalCommSemiring.mul_right_id⟩
protected theorem mul_left_id (x : α) : e ⋆ x = x := calc
_ = x ⋆ e := by rw [op_comm (.⋆.) e x]
_ = x := by rw [op_right_id (.⋆.) x]
local instance : OpLeftId (no_index s.mul) (no_index s.one) := ⟨UnitalCommSemiring.mul_left_id⟩
instance toUnitalSemiring : UnitalSemiring s := UnitalSemiring.infer _
instance toMulCommMonoid : CommMonoid (no_index s.toMulMonoidSig) := CommMonoid.infer _
end UnitalCommSemiring
|
Require Import Coq.Logic.Classical_Prop.
Require Import Logic.lib.Ensembles_ext.
Require Import Logic.GeneralLogic.Base.
Require Import Logic.MinimunLogic.Syntax.
Require Import Logic.MinimunLogic.Semantics.Trivial.
Require Import Logic.PropositionalLogic.Syntax.
Require Import Logic.PropositionalLogic.Semantics.Trivial.
Require Import Logic.PropositionalLogic.DeepEmbedded.PropositionalLanguage.
Section TrivialSemantics.
Context {Sigma: PropositionalVariables}.
Existing Instances L minL pL.
Definition model: Type := Var -> Prop.
Fixpoint denotation (x: expr Sigma): Ensemble model :=
match x with
| andp y z => Semantics.andp (denotation y) (denotation z)
| orp y z => Semantics.orp (denotation y) (denotation z)
| impp y z => Semantics.impp (denotation y) (denotation z)
| falsep => Semantics.falsep
| varp p => fun m => m p
end.
Instance MD: Model :=
Build_Model model.
Instance SM: Semantics L MD :=
Build_Semantics L MD denotation.
Instance tminSM: TrivialMinimunSemantics L MD SM.
Proof.
constructor.
simpl; intros.
apply Same_set_refl.
Qed.
Instance tpSM: TrivialPropositionalSemantics L MD SM.
Proof.
constructor.
+ simpl; intros.
apply Same_set_refl.
+ simpl; intros.
apply Same_set_refl.
+ simpl; intros.
apply Same_set_refl.
Qed.
End TrivialSemantics.
|
correlator <- function() {
xVals <- c()
yVals <- c()
memo <- function(x, y) {
xVals <<- union(xVals, x)
yVals <<- union(yVals, y)
}
show <- function() {
r <- lm(xVals ~ yVals)
print(r)
}
return(list(memo=memo, show=show))
}
|
function cvx_optpnt = complex_lorentz( sx, dim )
%COMPLEX_LORENTZ Complex second-order cone.
% COMPLEX_LORENTZ(N), where N is a positive integer, creates a column
% variable of length N and a scalar variable, and constrains them
% to lie in a second-order cone. That is, given the declaration
% variable x(n) complex
% variable y
% the constraint
% {x,y} == complex_lorentz(n)
% is equivalent to
% norm(x,2) <= y
% The inequality form is more natural, and preferred in most cases. But
% in fact, the COMPLEX_LORENTZ set form is used by CVX itself to convert
% complex NORM()-based constraints to solvable form.
%
% COMPLEX_LORENTZ(SX,DIM), where SX is a valid size vector and DIM is a
% positive integer, creates an array variable of size SX and an array
% variable of size SY (see below) and applies the second-order cone
% constraint along dimension DIM. That is, given the declarations
% sy = sx; sy(min(dim,length(sx)+1))=1;
% variable x(sx) complex
% variable y(sy)
% the constraint
% {x,y} == complex_lorentz(sx,dim)
% is equivalent to
% norms(x,2,dim) <= y
% Again, the inequality form is preferred, but CVX uses the set form
% internally. DIM is optional; if it is omitted, the first non-singleton
% dimension is used.
%
% LORENTZ(SX,DIM,CPLX) creates real second-order cones if CPLX is FALSE,
% and complex second-order cones if CPLX is TRUE. The latter case is
% equivalent to COMPLEX_LORENTZ(SX,DIM).
%
% Disciplined convex programming information:
% LORENTZ is a cvx set specification. See the user guide for
% details on how to use sets.
narginchk(1,2);
if nargin == 1,
cvx_optpnt = lorentz( sx, [], true );
else
cvx_optpnt = lorentz( sx, dim, true );
end
% Copyright 2005-2016 CVX Research, Inc.
% See the file LICENSE.txt for full copyright information.
% The command 'cvx_where' will show where this file is located.
|
\name{capcay}
\alias{capcay}
\docType{data}
\title{ Capcay data }
\description{
Species composition and environmental data from Capricornia Cays}
\usage{data(capcay)}
\format{
A list containing the elements
\describe{
\item{abund }{
A data frame with 14 observations of abundance of 13 ant species
}
\item{adj.sr }{
A vector of adjusted species richness of ants based on sample-based rarefaction curves to standardise sampling intensity across sites (see Nakamura et al. 2015 for more details).
}
\item{env_sp }{
A data frame of 10 environmental variables, which best explained the variation in the matrix of similarity values.}
\item{env_assem }{
A data frame of 10 environmental variables, which best explained the variation in the matrix of similarity values.}
}
The data frame \code{abund} has the following variables:
\describe{
\item{Camponotus.mackayensis }{(numeric) relative abundance of \emph{Camponotus mackayensis}}
\item{Cardiocondyla..nuda }{(numeric) relative abundance of \emph{Cardiocondyla nuda}}
\item{Hypoponera.sp..A }{(numeric) relative abundance of \emph{Hypoponera }spA}
\item{Hypoponera.sp..B }{(numeric) relative abundance of \emph{Hypoponera }spB}
\item{Iridomyrmex.sp..A }{(numeric) relative abundance of \emph{Iridomyrmex }spA}
\item{Monomorium.leave }{(numeric) relative abundance of \emph{Monomorium leave}}
\item{Ochetellus.sp..A }{(numeric) relative abundance of \emph{Ochetellus }spA}
\item{Paratrechina.longicornis }{(numeric) relative abundance of \emph{Paratrechina longicornis}}
\item{Paratrechina.sp..A }{(numeric) relative abundance of \emph{Paratrechina }spA}
\item{Tapinoma.sp..A }{(numeric) relative abundance of \emph{Tapinoma }spA}
\item{Tetramorium.bicarinatum }{(numeric) relative abundance of \emph{Tetramorium bicarinatum}}
}
The data frame \code{env_sp} has the following variables:
\describe{
\item{NativePlSp }{(numeric) native plant species richness}
\item{P.megaAbund }{(numeric) log-transformed relative abundance of \emph{Pheidole megacephala}}
\item{P.megaPA }{(numeric) presence/absence of \emph{Pheidole megacephala}}
\item{HumanVisit }{(numeric) presence/absence of frequent human visitiation}
\item{MaxTemp }{(numeric) mean daily maximum temp(degree celsius)}
\item{Rain4wk }{(numeric) total rainfall in the past 4 weeks (mm)}
\item{DistContinent }{(numeric) distance to the nearest continent (km)}
\item{DistNrIs }{(numeric) log-transformed distance to the nearest island (km)}
\item{Y }{(numeric) Y coordinate}
\item{XY }{(numeric) X coordinate * Y coordinate}
}
The data frame \code{env_assem} has the following variables:
\describe{
\item{IslandSize }{(numeric) log-transformed island size (ha)}
\item{ExoticPlSp }{(numeric) log-transformed exotic plant species richness}
\item{NativePlSp }{(numeric) native plant species richness}
\item{P.megaPA }{(numeric) presence/absence of \emph{Pheidole megacephala}}
\item{HumanVisit }{(numeric) presence/absence of frequent human visitiation}
\item{Rainsamp }{(numeric) log-transformed total rainfall during sampling (mm)}
\item{DistContinent }{(numeric) distance to the nearest continent (km)}
\item{DistNrIs }{(numeric) log-transformed distance to the nearest island (km)}
\item{Y }{(numeric) Y coordinate}
\item{XY }{(numeric) X coordinate * Y coordinate}
}
}
\references{
Nakamura A., Burwell C.J., Lambkin C.L., Katabuchi M., McDougall A., Raven R.J. and Neldner V.J. (2015), The role of human disturbance in island biogeography of arthropods and plants: an information theoretic approach, Journal of Biogeography, DOI: 10.1111/jbi.12520
}
\keyword{datasets}
|
`is_element/vector_functions` := (N::posint) -> (A::set) -> proc(x)
local a;
global reason;
if not(is_table_on(A)(x)) then
reason := [convert(procname,string),"x is not a table on A",x,A];
return false;
fi;
for a in A do
if not `is_element/R`(N)(x[a]) then
reason := [convert(procname,string),"x[a] is not in R^N",a,x[a],N];
return false;
fi;
od;
return true;
end;
######################################################################
`is_equal/vector_functions` := (N::posint) -> (A::set) -> proc(x,y)
local a;
global reason;
for a in A do
if not(`is_equal/R`(N)(x[a],y[a])) then
reason := [convert(procname,string),"x[a] <> y[a]",a,x,y];
return false;
fi;
od;
return true;
end;
######################################################################
`is_nonnegative/vector_functions` := (N::posint) -> (A::set) -> proc(x)
local a,i;
for a in A do
for i from 1 to N do
if x[a][i] < 0 then
return false;
fi;
od;
od;
return true;
end:
######################################################################
`is_zero/vector_functions` := (N::posint) -> (A::set) -> proc(x)
local a,i;
for a in A do
for i from 1 to N do
if x[a][i] <> 0 then
return false;
fi;
od;
od;
return true;
end:
######################################################################
`is_leq/vector_functions` := (N::posint) -> (A::set) -> proc(x,y)
local a,i;
for a in A do
for i from 1 to N do
if x[a][i] > y[a][i] then
return false;
fi;
od;
od;
return true;
end:
######################################################################
`plus/vector_functions` := (N) -> (A::set) -> proc(x,y)
local z,a;
z := table();
for a in A do
z[a] := x[a] +~ y[a];
od:
return eval(z):
end:
######################################################################
`times/vector_functions` := (N) -> (A::set) -> proc(t,x)
local z,a;
z := table();
for a in A do
z[a] := t *~ x[a];
od:
return eval(z):
end:
######################################################################
`norm/vector_functions` := (N::posint) -> (A::set) -> proc(x)
local a,n;
n := 0;
for a in A do n := n + `norm_2/R`(N)(x[a])^2; od;
return sqrt(n);
end:
######################################################################
`sum/vector_functions` := (N::posint) -> (A::set) -> proc(x)
local a,u;
u := [0$N];
for a in A do u := u +~ x[a]; od;
return u;
end:
######################################################################
`average/vector_functions` := (N::posint) -> (A::set) -> proc(x)
if A = {} then
return FAIL;
fi;
return `sum/vector_functions`(N)(A)(x) /~ nops(A);
end:
######################################################################
`dist/vector_functions` := (N::posint) -> (A::set) -> proc(x,y)
local a,n;
n := 0;
for a in A do n := n + `d_2/R`(N)(x[a],y[a])^2; od;
return sqrt(n);
end:
######################################################################
`dot/vector_functions` := (N::posint) -> (A::set) -> proc(x,y)
local a,d;
d := 0;
for a in A do d := d + `dot/R`(N)(x[a],y[a]); od;
return d;
end:
######################################################################
`random_element/vector_functions` := (N::posint) -> (A::set) -> proc()
local x,a;
x := table();
for a in A do
x[a] := `random_element/R`(N)();
od:
return eval(x);
end;
######################################################################
`list_elements/vector_functions` := NULL;
`count_elements/vector_functions` := NULL;
|
module energy
use approx
use diagonalization
use global
use neighborhood
implicit none
private
public :: total_energy, show_energy, count_zeros
contains
subroutine total_energy
integer :: i
if (todo%energies) call energies
if (todo%penalty) call penalty
todo%energy = .false.
i = s%ne / 2
s%E(s%i) = 2 * sum(s%W(:i, s%i)) + s%penalty(s%i)
if (s%ne .gt. 2 * i) s%E(s%i) = s%E(s%i) + s%W(i + 1, s%i)
end subroutine total_energy
subroutine show_energy
if (todo%energy) call total_energy
write (*, "('E = ', F0.3, ' eV')") s%E(s%i)
end subroutine show_energy
subroutine count_zeros
integer :: i, n
if (todo%energies) call energies
n = 0
do i = 1, s%nX
n = n + 2 * modulo(s%ls(i), 2) - 1
end do
write (*, '(I0, 1X, I0)') count(s%W(:s%dim, s%i) .ap. 0.0_dp), n
end subroutine count_zeros
end module energy
|
module Data.GIS
import Control.Algebra
import Control.Algebra.NumericImplementations
import Data.Int.Algebra
import Data.Music
%access public export
interface Group ivls => GIS space ivls | space where
ref : space
int : space -> space -> ivls
LABEL : space -> ivls
int s t = LABEL t <-> LABEL s
-- LABEL s = if s == ref then neutral else int ref s
[IntScalarGIS] GIS Int Int where
ref = 0
LABEL = id
[DiatonicPitchGIS] GIS Diatonic.Pitch Int where
ref = (C, 0)
LABEL = cast
[PSpaceGIS] GIS Chromatic.Pitch Int where
ref = (C, 0)
LABEL = cast
[PCSpaceGIS] GIS Chromatic.PitchClass (Zn 12) where
ref = C
LABEL = cast
[DiatonicPitchClassGIS] GIS Diatonic.PitchClass (Zn 7) where
ref = C
LABEL = cast
[PitchClassRefFGIS] GIS Chromatic.PitchClass (Zn 12) where
ref = F
LABEL s = MkZn (cast s) <-> MkZn 5
|
{-
This second-order term syntax was created from the following second-order syntax description:
syntax CTLC | ΛC
type
N : 0-ary
_↣_ : 2-ary | r30
¬_ : 1-ary | r30
term
app : α ↣ β α -> β | _$_ l20
lam : α.β -> α ↣ β | ƛ_ r10
throw : α ¬ α -> β
callcc : ¬ α.α -> α
theory
(ƛβ) b : α.β a : α |> app (lam(x.b[x]), a) = b[a]
(ƛη) f : α ↣ β |> lam (x. app(f, x)) = f
-}
module CTLC.Syntax where
open import SOAS.Common
open import SOAS.Context
open import SOAS.Variable
open import SOAS.Families.Core
open import SOAS.Construction.Structure
open import SOAS.ContextMaps.Inductive
open import SOAS.Metatheory.Syntax
open import CTLC.Signature
private
variable
Γ Δ Π : Ctx
α β : ΛCT
𝔛 : Familyₛ
-- Inductive term declaration
module ΛC:Terms (𝔛 : Familyₛ) where
data ΛC : Familyₛ where
var : ℐ ⇾̣ ΛC
mvar : 𝔛 α Π → Sub ΛC Π Γ → ΛC α Γ
_$_ : ΛC (α ↣ β) Γ → ΛC α Γ → ΛC β Γ
ƛ_ : ΛC β (α ∙ Γ) → ΛC (α ↣ β) Γ
throw : ΛC α Γ → ΛC (¬ α) Γ → ΛC β Γ
callcc : ΛC α ((¬ α) ∙ Γ) → ΛC α Γ
infixl 20 _$_
infixr 10 ƛ_
open import SOAS.Metatheory.MetaAlgebra ⅀F 𝔛
ΛCᵃ : MetaAlg ΛC
ΛCᵃ = record
{ 𝑎𝑙𝑔 = λ where
(appₒ ⋮ a , b) → _$_ a b
(lamₒ ⋮ a) → ƛ_ a
(throwₒ ⋮ a , b) → throw a b
(callccₒ ⋮ a) → callcc a
; 𝑣𝑎𝑟 = var ; 𝑚𝑣𝑎𝑟 = λ 𝔪 mε → mvar 𝔪 (tabulate mε) }
module ΛCᵃ = MetaAlg ΛCᵃ
module _ {𝒜 : Familyₛ}(𝒜ᵃ : MetaAlg 𝒜) where
open MetaAlg 𝒜ᵃ
𝕤𝕖𝕞 : ΛC ⇾̣ 𝒜
𝕊 : Sub ΛC Π Γ → Π ~[ 𝒜 ]↝ Γ
𝕊 (t ◂ σ) new = 𝕤𝕖𝕞 t
𝕊 (t ◂ σ) (old v) = 𝕊 σ v
𝕤𝕖𝕞 (mvar 𝔪 mε) = 𝑚𝑣𝑎𝑟 𝔪 (𝕊 mε)
𝕤𝕖𝕞 (var v) = 𝑣𝑎𝑟 v
𝕤𝕖𝕞 (_$_ a b) = 𝑎𝑙𝑔 (appₒ ⋮ 𝕤𝕖𝕞 a , 𝕤𝕖𝕞 b)
𝕤𝕖𝕞 (ƛ_ a) = 𝑎𝑙𝑔 (lamₒ ⋮ 𝕤𝕖𝕞 a)
𝕤𝕖𝕞 (throw a b) = 𝑎𝑙𝑔 (throwₒ ⋮ 𝕤𝕖𝕞 a , 𝕤𝕖𝕞 b)
𝕤𝕖𝕞 (callcc a) = 𝑎𝑙𝑔 (callccₒ ⋮ 𝕤𝕖𝕞 a)
𝕤𝕖𝕞ᵃ⇒ : MetaAlg⇒ ΛCᵃ 𝒜ᵃ 𝕤𝕖𝕞
𝕤𝕖𝕞ᵃ⇒ = record
{ ⟨𝑎𝑙𝑔⟩ = λ{ {t = t} → ⟨𝑎𝑙𝑔⟩ t }
; ⟨𝑣𝑎𝑟⟩ = refl
; ⟨𝑚𝑣𝑎𝑟⟩ = λ{ {𝔪 = 𝔪}{mε} → cong (𝑚𝑣𝑎𝑟 𝔪) (dext (𝕊-tab mε)) } }
where
open ≡-Reasoning
⟨𝑎𝑙𝑔⟩ : (t : ⅀ ΛC α Γ) → 𝕤𝕖𝕞 (ΛCᵃ.𝑎𝑙𝑔 t) ≡ 𝑎𝑙𝑔 (⅀₁ 𝕤𝕖𝕞 t)
⟨𝑎𝑙𝑔⟩ (appₒ ⋮ _) = refl
⟨𝑎𝑙𝑔⟩ (lamₒ ⋮ _) = refl
⟨𝑎𝑙𝑔⟩ (throwₒ ⋮ _) = refl
⟨𝑎𝑙𝑔⟩ (callccₒ ⋮ _) = refl
𝕊-tab : (mε : Π ~[ ΛC ]↝ Γ)(v : ℐ α Π) → 𝕊 (tabulate mε) v ≡ 𝕤𝕖𝕞 (mε v)
𝕊-tab mε new = refl
𝕊-tab mε (old v) = 𝕊-tab (mε ∘ old) v
module _ (g : ΛC ⇾̣ 𝒜)(gᵃ⇒ : MetaAlg⇒ ΛCᵃ 𝒜ᵃ g) where
open MetaAlg⇒ gᵃ⇒
𝕤𝕖𝕞! : (t : ΛC α Γ) → 𝕤𝕖𝕞 t ≡ g t
𝕊-ix : (mε : Sub ΛC Π Γ)(v : ℐ α Π) → 𝕊 mε v ≡ g (index mε v)
𝕊-ix (x ◂ mε) new = 𝕤𝕖𝕞! x
𝕊-ix (x ◂ mε) (old v) = 𝕊-ix mε v
𝕤𝕖𝕞! (mvar 𝔪 mε) rewrite cong (𝑚𝑣𝑎𝑟 𝔪) (dext (𝕊-ix mε))
= trans (sym ⟨𝑚𝑣𝑎𝑟⟩) (cong (g ∘ mvar 𝔪) (tab∘ix≈id mε))
𝕤𝕖𝕞! (var v) = sym ⟨𝑣𝑎𝑟⟩
𝕤𝕖𝕞! (_$_ a b) rewrite 𝕤𝕖𝕞! a | 𝕤𝕖𝕞! b = sym ⟨𝑎𝑙𝑔⟩
𝕤𝕖𝕞! (ƛ_ a) rewrite 𝕤𝕖𝕞! a = sym ⟨𝑎𝑙𝑔⟩
𝕤𝕖𝕞! (throw a b) rewrite 𝕤𝕖𝕞! a | 𝕤𝕖𝕞! b = sym ⟨𝑎𝑙𝑔⟩
𝕤𝕖𝕞! (callcc a) rewrite 𝕤𝕖𝕞! a = sym ⟨𝑎𝑙𝑔⟩
-- Syntax instance for the signature
ΛC:Syn : Syntax
ΛC:Syn = record
{ ⅀F = ⅀F
; ⅀:CS = ⅀:CompatStr
; mvarᵢ = ΛC:Terms.mvar
; 𝕋:Init = λ 𝔛 → let open ΛC:Terms 𝔛 in record
{ ⊥ = ΛC ⋉ ΛCᵃ
; ⊥-is-initial = record { ! = λ{ {𝒜 ⋉ 𝒜ᵃ} → 𝕤𝕖𝕞 𝒜ᵃ ⋉ 𝕤𝕖𝕞ᵃ⇒ 𝒜ᵃ }
; !-unique = λ{ {𝒜 ⋉ 𝒜ᵃ} (f ⋉ fᵃ⇒) {x = t} → 𝕤𝕖𝕞! 𝒜ᵃ f fᵃ⇒ t } } } }
-- Instantiation of the syntax and metatheory
open Syntax ΛC:Syn public
open ΛC:Terms public
open import SOAS.Families.Build public
open import SOAS.Syntax.Shorthands ΛCᵃ public
open import SOAS.Metatheory ΛC:Syn public
|
// Boost.Range 2.0 Extension library
// via PStade Oven Library
//
// Copyright Akira Takahashi 2011.
// Copyright Shunsuke Sogame 2005-2007.
// Use, modification and distribution is subject to the Boost Software License,
// Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#include <boost/range/single.hpp>
#include <boost/range/algorithm/for_each.hpp>
#include <boost/range/adaptor/transformed.hpp>
#include <boost/lexical_cast.hpp>
std::string to_s(int x)
{
return boost::lexical_cast<std::string>(x);
}
struct disper {
template <class T>
void operator()(const T& x) const
{
std::cout << x << std::endl;
}
};
int main()
{
const int a = 1;
boost::for_each(boost::single(a), disper());
boost::for_each(boost::single(a) | boost::adaptors::transformed(to_s), disper());
}
|
Formal statement is: lemma components_iff: "S \<in> components U \<longleftrightarrow> (\<exists>x. x \<in> U \<and> S = connected_component_set U x)" Informal statement is: A set $S$ is a component of $U$ if and only if there exists $x \in U$ such that $S$ is the connected component of $U$ containing $x$. |
module Chapter10
import Data.Vect
-- describeList : List Int -> String
-- describeList [] = "Empty"
-- describeList (x::xs) = "Non-empty, tail " ++ show xs
-- data ListLast : List a -> Type where
-- Empty : ListLast []
-- NonEmpty: (xs: List a)-> (x:a) -> ListLast (xs ++ [x])
-- describeHelper : (input: List Int) -> (form: ListLast input) -> String
-- describeHelper [] Empty = ?describeHelper_rhs_1
-- describeHelper (xs ++ [x]) (NonEmpty xs x) = ?describeHelper_rhs_2
-- describeHelper [] form = ?describeHelper_rhs_1
-- describeHelper (_ :: _) Empty impossible
-- describeHelper (_ :: _) (NonEmpty _ _) impossible
--describeHelper [] Empty = "Empty"
--describeHelper (xs ++ [x]) (NonEmpty xs x) = ?hole
-- total
-- listLast : (xs: List a) -> ListLast xs
-- listLast [] = Empty
-- listLast (x :: xs) = case listLast xs of
-- Empty => NonEmpty [] x
-- NonEmpty ys y => NonEmpty (x::ys) y
-- describeListEnd : List Int -> String
-- describeListEnd input with (listLast input)
-- describeListEnd [] | Empty = "test"
-- describeListEnd (xs ++ [x]) | NonEmpty xs x = "test2"
-- recursive views
--data SnocList ty = Empty | Snoc (SnocList ty) ty
--reverseSnoc : SnocList ty -> List ty
--reverseSnoc Empty = []
--reverseSnoc (Snoc xs x) = x :: reverseSnoc xs
data SnocList : List a -> Type where
Empty: SnocList []
Snoc: (rec: SnocList xs) -> SnocList (xs ++ [x])
snocListHelp : (snoc: SnocList input) -> (rest: List a) -> SnocList (input ++ rest)
snocListHelp {input = input} snoc [] = rewrite appendNilRightNeutral input in snoc
snocListHelp {input = input} snoc (x :: xs) =
rewrite appendAssociative input [x] xs in
snocListHelp (Snoc snoc {x}) xs
snocList : (input : List a) -> SnocList input
snocList xs = snocListHelp Empty xs
myReverseHelper : (input: List a) -> SnocList input -> List a
myReverseHelper [] Empty = []
myReverseHelper (xs ++ [x]) (Snoc rec) = x :: myReverseHelper xs rec
myReverse: List a -> List a
myReverse input = myReverseHelper input (snocList input)
--myReverse : List a -> List a
--myReverse xs = myReverseHelper xs (snocList input)
-- myReverseHelper (xs ++ [x]) (Snoc rec) = ?myReverseHelper_2
|
//==============================================================================
// Copyright 2003 - 2011 LASMEA UMR 6602 CNRS/Univ. Clermont II
// Copyright 2009 - 2011 LRI UMR 8623 CNRS/Univ Paris Sud XI
//
// Distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt
//==============================================================================
#ifndef BOOST_SIMD_OPERATOR_FUNCTIONS_FAST_DIVIDES_HPP_INCLUDED
#define BOOST_SIMD_OPERATOR_FUNCTIONS_FAST_DIVIDES_HPP_INCLUDED
#include <boost/simd/include/functor.hpp>
#include <boost/dispatch/include/functor.hpp>
namespace boost { namespace simd
{
namespace tag
{
/*!
@brief fast_divides generic tag
Represents the fast_divides function in generic contexts.
@par Models:
Hierarchy
**/
struct fast_divides_ : ext::elementwise_<fast_divides_>
{
/// @brief Parent hierarchy
typedef ext::elementwise_<fast_divides_> parent;
template<class... Args>
static BOOST_FORCEINLINE BOOST_AUTO_DECLTYPE dispatch(Args&&... args)
BOOST_AUTO_DECLTYPE_BODY( dispatching_fast_divides_( ext::adl_helper(), static_cast<Args&&>(args)... ) )
};
}
namespace ext
{
template<class Site, class... Ts>
BOOST_FORCEINLINE generic_dispatcher<tag::fast_divides_, Site> dispatching_fast_divides_(adl_helper, boost::dispatch::meta::unknown_<Site>, boost::dispatch::meta::unknown_<Ts>...)
{
return generic_dispatcher<tag::fast_divides_, Site>();
}
template<class... Args>
struct impl_fast_divides_;
}
/*!
@par Semantic:
For every parameters of types respectively T0, T1:
@code
T0 r = fast_divides(a0,a1);
@endcode
is similar to:
@code
T0 r = a0*fast_rec(a1);
@endcode
@par Alias:
@c fast_div, @c fast_rdiv
@see @funcref{divides}, @funcref{rec}, @funcref{fast_rec}, @funcref{divs}, @funcref{divfloor},
@funcref{divceil}, @funcref{divround}, @funcref{divround2even}, @funcref{divfix}
@param a0
@param a1
@return a value of the same type as the second parameter
**/
BOOST_DISPATCH_FUNCTION_IMPLEMENTATION(tag::fast_divides_ , fast_divides , 2)
/// INTERNAL ONLY
BOOST_DISPATCH_FUNCTION_IMPLEMENTATION(tag::fast_divides_ , fast_div , 2)
/// INTERNAL ONLY
BOOST_DISPATCH_FUNCTION_IMPLEMENTATION(tag::fast_divides_ , fast_rdiv , 2)
} }
#endif
|
theory Rushby
imports Main
begin
typedecl u -- "U_beings"
consts "g" :: "u \<Rightarrow> u \<Rightarrow> bool" (infixr "\<^bold>>" 54)
consts "k" :: "u \<Rightarrow> u \<Rightarrow> bool" (infixr "\<^bold><" 54)
consts "e" :: "u \<Rightarrow> u \<Rightarrow> bool" (infixr "\<^bold>=" 54)
abbreviation Greater0 where
"Greater0 \<equiv> \<forall> x y. x \<^bold>> y \<or> y \<^bold>> x \<or> x \<^bold>= y"
abbreviation God :: "u\<Rightarrow>bool" ("G") where
"G \<equiv> \<lambda>x. \<not>(\<exists> y. (y \<^bold>> x))"
consts "re" :: "u \<Rightarrow> bool"
abbreviation ExUnd where
"ExUnd \<equiv> \<exists> x . God x"
abbreviation Greater1 where
"Greater1 \<equiv> \<forall> x . (\<not>re x) \<longrightarrow> (\<exists> y . y \<^bold>> x)"
theorem "God!":
assumes ExUnd
assumes "Greater1"
shows "\<exists> x. (G x \<and> re x)"
(*sledgehammer*)
using assms by blast
abbreviation Greater2 where
"Greater2 \<equiv> (\<forall> x y. (re x \<and> \<not> re y) \<longrightarrow> (x \<^bold>> y))"
abbreviation "Ex_re" where
"Ex_re \<equiv> \<exists> x. re x"
theorem "God!2":
assumes ExUnd
assumes "Greater2" and "Ex_re"
shows "\<exists> x. (G x \<and> re x)"
(*sledgehammer*)
using assms by blast
abbreviation "P1" where
"P1 \<equiv> \<exists> x. G x"
theorem "P1!":
assumes ExUnd
shows P1 using assms by -
consts "P" :: "(u \<Rightarrow> bool) \<Rightarrow> bool" -- "greater making property"
abbreviation "P_re" where
"P_re \<equiv> P re"
abbreviation "subsetP" where
"subsetP \<equiv> \<lambda> FF. \<forall>x. FF x \<longrightarrow> P x"
abbreviation "Greater3" where
"Greater3 \<equiv> \<forall> x y . x \<^bold>> y \<longleftrightarrow> ((\<forall>F. P F \<longrightarrow> (F y \<longrightarrow> F x)) \<and> (\<exists>F. P F \<and> (F x \<and> \<not>F y)))"
abbreviation "Realization" where
"Realization \<equiv> \<forall> FF. subsetP FF \<longrightarrow> (\<exists>x. \<forall>f. P f \<longrightarrow> (f x \<longleftrightarrow> FF f))"
(* "Realization \<equiv> \<forall> FF \<subseteq> {x . P x} . \<exists>x. \<forall>f. f(x) \<longleftrightarrow> f \<in> FF"*)
abbreviation aG where
"aG \<equiv> \<lambda>x . \<forall>y. x \<^bold>> y"
abbreviation ExUndA where
"ExUndA \<equiv> \<exists>x. aG x"
theorem "God!3":
assumes P_re
assumes Greater3
assumes Realization
assumes ExUndA
shows "\<exists> x. (aG x \<and> re x)"
proof -
from assms(4) have "\<exists>x. aG x" by -
moreover {
fix x
assume gx: "aG x"
then have "\<forall> y. (x \<^bold>> y)" by -
from this assms(2) have "\<forall>y.(\<forall>F. P F \<longrightarrow> (F y \<longrightarrow> F x)) \<and> (\<exists>F. P F \<and> (F x \<and> \<not>F y))" by blast
from this assms(1) have rx: "re x" by blast
from gx rx have "aG x \<and> re x" by (rule conjI)
hence "\<exists> x. (aG x \<and> re x)" by (rule exI)
}
ultimately show ?thesis by (rule exE)
qed
theorem "GodCanDoItAll":
assumes P_re
assumes Greater3
assumes Realization
assumes ExUndA
shows "\<forall>f. P f \<longrightarrow> (\<exists>x. aG x \<and> f x)"
proof -
{
fix f
assume "P f"
from assms(4) have "\<exists>x. aG x" by -
moreover {
fix x
assume gx: "aG x"
from this assms(2) assms(3) have "\<forall> y. x \<^bold>> y" by blast
from this assms(2) have "\<forall>y.(\<forall>F. P F \<longrightarrow> (F y \<longrightarrow> F x)) \<and> (\<exists>F. P F \<and> (F x \<and> \<not>F y))" by blast
from this assms(1) have rx: "f x" by blast
from gx rx have "aG x \<and> f x" by (rule conjI)
hence "\<exists> x. (aG x \<and> f x)" by (rule exI)
}
ultimately have "(\<exists>x. aG x \<and> f x)" by (rule exE)
}
thus ?thesis by blast
qed
consts D :: "(u \<Rightarrow> bool) \<Rightarrow> bool"
axiomatization where
dsubstP: "\<forall>f. D f \<longrightarrow> P f"
(* do not further consider quasi id, because we have a different proof *)
abbreviation quasi_id where
"quasi_id \<equiv> \<lambda>x y. \<forall>f. P f \<and> \<not>D f \<longrightarrow> f x = f y"
abbreviation Realization_W where
"Realization_W \<equiv> \<forall> FF. subsetP FF \<longrightarrow> (\<exists>x. \<forall>f. FF f \<longrightarrow> f x)"
end |
{-# OPTIONS --universe-polymorphism #-}
module Issue286 where
open import Common.Level
data Bool : Set where
true false : Bool
{-# BUILTIN BOOL Bool #-}
{-# BUILTIN TRUE true #-}
{-# BUILTIN FALSE false #-}
data _≡_ {ℓ : Level} {A : Set ℓ} : A → A → Set ℓ where
refl : {a : A} → a ≡ a
{-# BUILTIN EQUALITY _≡_ #-}
primitive
primEraseEquality : ∀ {a} {A : Set a} {x y : A} → x ≡ y → x ≡ y
{-# BUILTIN STRING String #-}
primitive
primStringEquality : String → String → Bool
data Maybe (A : Set) : Set where
just : A → Maybe A
nothing : Maybe A
_≟_ : (s₁ s₂ : String) → Maybe (s₁ ≡ s₂)
s₁ ≟ s₂ with primStringEquality s₁ s₂
... | true = just (primEraseEquality trustMe)
where postulate trustMe : _ ≡ _
... | false = nothing
_≟′_ : (s₁ s₂ : String) → Maybe (s₁ ≡ s₂)
s₁ ≟′ s₂ with s₁ ≟ s₂
s ≟′ .s | just refl = just refl
_ ≟′ _ | nothing = nothing
test : Maybe ("" ≡ "")
test = "" ≟′ ""
ok : test ≡ just refl
ok = refl
|
[STATEMENT]
lemma HcompNml_Nml_Obj:
assumes "Nml t" and "Obj u" and "Src t = Trg u"
shows "t \<^bold>\<lfloor>\<^bold>\<star>\<^bold>\<rfloor> u = t"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. t \<^bold>\<lfloor>\<^bold>\<star>\<^bold>\<rfloor> u = t
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
Nml t
Obj u
Src t = Trg u
goal (1 subgoal):
1. t \<^bold>\<lfloor>\<^bold>\<star>\<^bold>\<rfloor> u = t
[PROOF STEP]
by (cases u, simp_all) |
from __future__ import division
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
plt.rcParams['xtick.direction'] = 'in'
plt.rcParams['ytick.direction'] = 'in'
plt.rcParams['axes.spines.right'] = False
plt.rcParams['axes.spines.top'] = False
plt.rcParams['axes.linewidth'] = 2
plt.rcParams['xtick.major.width'] = 2
plt.rcParams['ytick.major.width'] = 2
def get_combined_data():
names = ('wavelength', 'element', 'EP', 'loggf', 'EW')
df1 = pd.read_table('linelist1.moog', delimiter=r'\s+', names=names, skiprows=1)
df2 = pd.read_table('linelist2.moog', delimiter=r'\s+', names=names, skiprows=1)
df1['wavelength'] = [round(w, 2) for w in df1['wavelength']]
df2['wavelength'] = [round(w, 2) for w in df2['wavelength']]
df = pd.merge(df1, df2, how='outer',
left_on='wavelength',
right_on='wavelength',
suffixes=('_1', '_2'))
df['diff'] = df['EW_1'] - df['EW_2']
return df
if __name__ == '__main__':
df = get_combined_data()
m, s = np.nanmedian(df['diff']), np.nanstd(df['diff'])
plt.figure()
plt.plot(df['EW_1'], df['diff'], '.')
plt.hlines([m, m+s, m-s], 4, 200)
plt.xlabel(r'EW$_1$ [m$\AA$]')
plt.ylabel(r'EW$_1$ - EW$_2$ [m$\AA$]')
# plt.savefig('../linelist_comparison.pdf')
plt.show()
|
#!/usr/bin/env Rscript
######## BEGIN FUNCTION BLOCK ##########
make_image<-function(d,outfile,input_width,text_adjust) {
# decide output type
filex = substr(outfile,nchar(outfile)-2,nchar(outfile))
if(filex=="pdf") {
pdf(outfile,bg="#FFFFFF",height=4.5,width=7)
} else if (filex=="png") {
png(outfile,bg="#FFFFFF",width=480,height=240)
} else {
stop("Unsupported type for output file.\n",call.=FALSE)
}
recwid = input_width
axcex = text_adjust
par(oma=c(0.5,0.5,0.5,0.5))
par(mar=c(4,5,2,0))
logtrans<-function(num) {
if(num==0) {
return(0)
}
if(num==1) {
return(1)
}
return(log(num,2)+1)
}
untrans<-function(num) {
if(num==0) {
return(0)
}
if(num==1) {
return(1)
}
return(2^(num-1))
}
neglogtrans<-function(num,lowest) {
tymin = -1*log(lowest,2)
return(-1*(-tymin+-1*log(num,2))/tymin)
}
# plot by exon distribution
#find biggest
maxexon = 20
biggest = 0
for(i in seq(1,maxexon,1)) {
currsize = length(d[d[,2]==i,1])
if(currsize > biggest) { biggest = currsize }
}
currsize = length(d[d[,2]>=maxexon,1])
if(currsize > biggest) { biggest = currsize }
endspace = 4
plot(1,type="n",xlim=c(0,maxexon+endspace),ylim=c(0,logtrans(biggest*2)+1),ylab="Number of Reads",bty="n",xlab="Exon Count",xaxt='n',yaxt='n',yaxs='i',cex.lab=axcex,yaxt='n')
axispoints = seq(0,logtrans(biggest*2),1)
axis(2,at=axispoints,labels=lapply(axispoints,untrans),cex.axis=axcex,lwd=recwid)
axis(1,at=seq(1,maxexon,1),labels=seq(1,maxexon,1),cex.axis=axcex,lwd=recwid)
mtext(paste(">",maxexon),side=1,at=maxexon+endspace-1.5,line=1,cex=axcex)
for(i in seq(1,maxexon)) {
exon = length(d[d[,2]==i,1])
rect(i+0.1-0.5,0,i+0.8-0.5,logtrans(exon),col="#777777",lwd=recwid)
}
# get the longest pooled
exon = length(d[d[,2]>maxexon,1])
rect(maxexon+endspace-1+0.1-0.5,0,maxexon+endspace-1+0.8-0.5,logtrans(exon),col="#777777",lwd=recwid)
dev.off()
}
args=commandArgs(trailingOnly=TRUE)
if(length(args)<2) {
stop("Must supply input and output\n",call.=FALSE)
}
infile = args[1]
outfile = args[2]
infilex = substr(infile,nchar(infile)-1,nchar(infile))
if(infilex=="gz") {
d<-read.csv(infile,sep="\t",header=FALSE)
} else {
d<-read.csv(gzfile(infile),sep="\t",header=FALSE)
}
input_width = 3
if(length(args) > 2) {
input_width = args[3]
}
text_adjust = 1.25
if(length(args) > 3) {
text_adjust = args[4]
}
make_image(d,outfile,input_width,text_adjust)
|
module EdgeTest
using Test
using ForneyLab: Interface, Edge, Variable, Interface, FactorNode, FactorGraph, currentGraph, addNode!, disconnect!, generateId
# Integration helper
mutable struct MockNode <: FactorNode
id::Symbol
interfaces::Vector{Interface}
i::Dict{Symbol,Interface}
function MockNode(; id=generateId(MockNode))
self = new(id, Interface[], Dict{Symbol,Interface}())
addNode!(currentGraph(), self)
return self
end
end
@testset "Edge" begin
# Edge should couple interfaces
FactorGraph()
a = Interface(MockNode())
b = Interface(MockNode())
edge = Edge(Variable(), a, b)
@test ===(edge.a, a)
@test ===(edge.b, b)
@test ===(a.edge, edge)
@test ===(b.edge, edge)
@test ===(a.partner, b)
@test ===(b.partner, a)
end
@testset "disconnect!" begin
FactorGraph()
a = Interface(MockNode())
b = Interface(MockNode())
edge = Edge(Variable(), a, b)
# disconnect! from 'a' side should decouple interfaces
disconnect!(edge, a)
@test ===(edge.a, b)
@test edge.b == nothing
@test a.edge == nothing
@test ===(b.edge, edge)
@test a.partner == nothing
@test b.partner == nothing
FactorGraph()
a = Interface(MockNode())
b = Interface(MockNode())
edge = Edge(Variable(), a, b)
# disconnect! from 'b' side should decouple interfaces
disconnect!(edge, b)
@test ===(edge.a, a)
@test edge.b == nothing
@test ===(a.edge, edge)
@test b.edge == nothing
@test a.partner == nothing
@test b.partner == nothing
end
end # module |
/**
* @file batch_zdotu_sub.c
*
* Part of API test for Batched BLAS routines.
*
* @author Samuel D. Relton
* @author Pedro V. Lara
* @author Mawussi Zounon
* @date
*
* @precisions normal z -> c d s
*
**/
#include <cblas.h>
#include "bblas.h"
#define COMPLEX
void batch_zdotu_sub(
const int *n,
BBLAS_Complex64_t const * const *x,
const int *incx,
BBLAS_Complex64_t const * const *y,
const int *incy,
BBLAS_Complex64_t *dotu,
const int batch_count, const enum BBLAS_OPTS batch_opts,
int* info)
{
/* Local variables */
int first_index = 0;
char func_name[15] = "batch_zdotu";
/*initialize the result */
for (int batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
dotu[batch_iter] = (BBLAS_Complex64_t)0.0;
}
/* Check input arguments */
if (batch_count < 0)
{
xerbla_batch(func_name, BBLAS_ERR_BATCH_COUNT, -1);
}
if (batch_opts == BBLAS_FIXED)
{
/* Call fixed size code */
batchf_zdotu_sub(
n[first_index],
x,
incx[first_index],
y,
incy[first_index],
dotu,
batch_count, info);
}
else if (batch_opts == BBLAS_VARIABLE)
{
/* Call variable size code */
batchv_zdotu_sub(
n,
x,
incx,
y,
incy,
dotu,
batch_count, info);
}
else
{
xerbla_batch(func_name, BBLAS_ERR_BATCH_OPTS, -1);
}
}
#undef COMPLEX
|
program testEquivalence
use, intrinsic :: iso_c_binding
use frand123
implicit none
! state
type( frand123State_t ) :: state
! didTestPass
logical :: passed
! test frand123Double
passed = equivalenceDouble( state )
if( passed ) then
write(*,*) 'Test frand123Double passed'
else
write(*,*) 'Test frand123Double failed'
stop 1
endif
! test frand123Single
passed = equivalenceSingle( state )
if( passed ) then
write(*,*) 'Test frand123Single passed'
else
write(*,*) 'Test frand123Single failed'
stop 1
endif
! test frand123NormDouble
passed = equivalenceNormDouble( state )
if( passed ) then
write(*,*) 'Test frand123NormDouble passed'
else
write(*,*) 'Test frand123NormDouble failed'
stop 1
endif
! test frand123NormSingle
passed = equivalenceNormSingle( state )
if( passed ) then
write(*,*) 'Test frand123NormSingle passed'
else
write(*,*) 'Test frand123NormSingle failed'
stop 1
endif
! test frand123Integer64
passed = equivalenceInteger64( state )
if( passed ) then
write(*,*) 'Test frand123Integer64 passed'
else
write(*,*) 'Test frand123Integer64 failed'
stop 1
endif
! test frand123Integer32
passed = equivalenceInteger32( state )
if( passed ) then
write(*,*) 'Test frand123Integer32 passed'
else
write(*,*) 'Test frand123Integer32 failed'
stop 1
endif
contains
! test equivalence of generating 10 x 2 doubles and 1 x 20 doubles
logical function equivalenceDouble( state )
implicit none
type( frand123State_t ), intent( inout ) :: state
integer( kind = c_int64_t ), dimension( 2 ) :: seed
real( kind = c_double ), dimension( 20 ) :: r10x2, r1x20
integer :: i
! overwrite in case of failure
equivalenceDouble = .true.
! generate random numbers starting from same state
seed = (/ 1, 2 /)
call frand123Init( state, 4, 5, seed )
do i = 1, 10
call frand123Double( state, r10x2( 2*i-1:2*i ) )
enddo
call frand123Init( state, 4, 5, seed )
call frand123Double( state, r1x20 )
! compare random numbers
do i = 1, 20
if( abs( r10x2( i ) - r1x20( i ) ) .gt. 1e-15 ) then
write(*, '( "Entries ", I2, ": absolute difference: ", ES11.4 )' ) &
i, abs( r10x2( i ) - r1x20( i ) )
equivalenceDouble = .false.
endif
enddo
end function equivalenceDouble
! test equivalence of generating 5 x 4 singles and 1 x 20 singles
logical function equivalenceSingle( state )
implicit none
type( frand123State_t ), intent( inout ) :: state
integer( kind = c_int64_t ), dimension( 2 ) :: seed
real( kind = c_float ), dimension( 20 ) :: r5x4, r1x20
integer :: i
! overwrite in case of failure
equivalenceSingle = .true.
! generate random numbers starting from same state
seed = (/ 1, 2 /)
call frand123Init( state, 4, 5, seed )
do i = 1, 5
call frand123Single( state, r5x4( 4*i-3:4*i ) )
enddo
call frand123Init( state, 4, 5, seed )
call frand123Single( state, r1x20 )
! compare random numbers
do i = 1, 20
if( abs( r5x4( i ) - r1x20( i ) ) .gt. 1e-7 ) then
write(*, '( "Entries ", I2, ": absolute difference: ", ES11.4 )' ) &
i, abs( r5x4( i ) - r1x20( i ) )
equivalenceSingle = .false.
endif
enddo
end function equivalenceSingle
! test equivalence of generating 10 x 2 normal doubles and 1 x 20 normal doubles
logical function equivalenceNormDouble( state )
implicit none
type( frand123State_t ), intent( inout ) :: state
integer( kind = c_int64_t ), dimension( 2 ) :: seed
real( kind = c_double ), dimension( 20 ) :: r10x2, r1x20
integer :: i
! overwrite in case of failure
equivalenceNormDouble = .true.
! generate random numbers starting from same state
seed = (/ 1, 2 /)
call frand123Init( state, 4, 5, seed )
do i = 1, 10
call frand123NormDouble( state, 2.d0, 3.d0, r10x2( 2*i-1:2*i ) )
enddo
call frand123Init( state, 4, 5, seed )
call frand123NormDouble( state, 2.d0, 3.d0, r1x20 )
! compare random numbers
do i = 1, 20
if( abs( r10x2( i ) - r1x20( i ) ) .gt. 1e-15 ) then
write(*, '( "Entries ", I2, ": absolute difference: ", ES11.4 )' ) &
i, abs( r10x2( i ) - r1x20( i ) )
equivalenceNormDouble = .false.
endif
enddo
end function equivalenceNormDouble
! test equivalence of generating 5 x 4 normal singles and 1 x 20 normal singles
logical function equivalenceNormSingle( state )
implicit none
type( frand123State_t ), intent( inout ) :: state
integer( kind = c_int64_t ), dimension( 2 ) :: seed
real( kind = c_float ), dimension( 20 ) :: r5x4, r1x20
integer :: i
! overwrite in case of failure
equivalenceNormSingle = .true.
! generate random numbers starting from same state
seed = (/ 1, 2 /)
call frand123Init( state, 4, 5, seed )
do i = 1, 5
call frand123NormSingle( state, 2., 3., r5x4( 4*i-3:4*i ) )
enddo
call frand123Init( state, 4, 5, seed )
call frand123NormSingle( state, 2., 3., r1x20 )
! compare random numbers
do i = 1, 20
if( abs( r5x4( i ) - r1x20( i ) ) .gt. 1e-7 ) then
write(*, '( "Entries ", I2, ": absolute difference: ", ES11.4 )' ) &
i, abs( r5x4( i ) - r1x20( i ) )
equivalenceNormSingle = .false.
endif
enddo
end function equivalenceNormSingle
! test equivalence of generating 10 x 2 64-bit integers and 1 x 20 64-bit integers
logical function equivalenceInteger64( state )
implicit none
type( frand123State_t ), intent( inout ) :: state
integer( kind = c_int64_t ), dimension( 2 ) :: seed
integer( kind = c_int64_t ), dimension( 20 ) :: r10x2, r1x20
integer :: i
! overwrite in case of failure
equivalenceInteger64 = .true.
! generate random numbers starting from same state
seed = (/ 1, 2 /)
call frand123Init( state, 4, 5, seed )
do i = 1, 10
call frand123Integer64( state, r10x2( 2*i-1:2*i ) )
enddo
call frand123Init( state, 4, 5, seed )
call frand123Integer64( state, r1x20 )
! compare random numbers
do i = 1, 20
if( r10x2( i ) .ne. r1x20( i ) ) then
write(*, '( "Entries ", I2, ": difference: ", I20 )' ) &
i, r10x2( i ) - r1x20( i )
equivalenceInteger64 = .false.
endif
enddo
end function equivalenceInteger64
! test equivalence of generating 5 x 4 32-bit integers and 1 x 20 32-bit integers
logical function equivalenceInteger32( state )
implicit none
type( frand123State_t ), intent( inout ) :: state
integer( kind = c_int64_t ), dimension( 2 ) :: seed
integer( kind = c_int32_t ), dimension( 20 ) :: r5x4, r1x20
integer :: i
! overwrite in case of failure
equivalenceInteger32 = .true.
! generate random numbers starting from same state
seed = (/ 1, 2 /)
call frand123Init( state, 4, 5, seed )
do i = 1, 5
call frand123Integer32( state, r5x4( 4*i-3:4*i ) )
enddo
call frand123Init( state, 4, 5, seed )
call frand123Integer32( state, r1x20 )
! compare random numbers
do i = 1, 20
if( r5x4( i ) .ne. r1x20( i ) ) then
write(*, '( "Entries ", I2, ": difference: ", I20 )' ) &
i, r5x4( i ) - r1x20( i )
equivalenceInteger32 = .false.
endif
enddo
end function equivalenceInteger32
end program testEquivalence
|
module Control.Monad.Dom
import public Control.Monad.Dom.DomIO
import public Control.Monad.Dom.Event
import public Control.Monad.Dom.Interface
|
State Before: ι : Sort u_2
α : Type u_3
β : Type u_1
γ : Type ?u.727906
inst✝³ : MeasurableSpace α
inst✝² : MeasurableSpace β
f : ι → α → β
μ : MeasureTheory.Measure α
p : α → (ι → β) → Prop
inst✝¹ : CompleteLattice β
inst✝ : Countable ι
hf : ∀ (i : ι), AEMeasurable (f i)
hp : ∀ᵐ (x : α) ∂μ, p x fun n => f n x
⊢ (⨆ (n : ι), aeSeq hf p n) =ᵐ[μ] ⨆ (n : ι), f n State After: ι : Sort u_2
α : Type u_3
β : Type u_1
γ : Type ?u.727906
inst✝³ : MeasurableSpace α
inst✝² : MeasurableSpace β
f : ι → α → β
μ : MeasureTheory.Measure α
p : α → (ι → β) → Prop
inst✝¹ : CompleteLattice β
inst✝ : Countable ι
hf : ∀ (i : ι), AEMeasurable (f i)
hp : ∀ᵐ (x : α) ∂μ, p x fun n => f n x
⊢ ↑↑μ {a | ¬(⨆ (i : ι), aeSeq hf p i a) = ⨆ (i : ι), f i a} = 0 Tactic: simp_rw [Filter.EventuallyEq, ae_iff, iSup_apply] State Before: ι : Sort u_2
α : Type u_3
β : Type u_1
γ : Type ?u.727906
inst✝³ : MeasurableSpace α
inst✝² : MeasurableSpace β
f : ι → α → β
μ : MeasureTheory.Measure α
p : α → (ι → β) → Prop
inst✝¹ : CompleteLattice β
inst✝ : Countable ι
hf : ∀ (i : ι), AEMeasurable (f i)
hp : ∀ᵐ (x : α) ∂μ, p x fun n => f n x
⊢ ↑↑μ {a | ¬(⨆ (i : ι), aeSeq hf p i a) = ⨆ (i : ι), f i a} = 0 State After: ι : Sort u_2
α : Type u_3
β : Type u_1
γ : Type ?u.727906
inst✝³ : MeasurableSpace α
inst✝² : MeasurableSpace β
f : ι → α → β
μ : MeasureTheory.Measure α
p : α → (ι → β) → Prop
inst✝¹ : CompleteLattice β
inst✝ : Countable ι
hf : ∀ (i : ι), AEMeasurable (f i)
hp : ∀ᵐ (x : α) ∂μ, p x fun n => f n x
h_ss : aeSeqSet hf p ⊆ {a | (⨆ (i : ι), aeSeq hf p i a) = ⨆ (i : ι), f i a}
⊢ ↑↑μ {a | ¬(⨆ (i : ι), aeSeq hf p i a) = ⨆ (i : ι), f i a} = 0 Tactic: have h_ss : aeSeqSet hf p ⊆ { a : α | (⨆ i : ι, aeSeq hf p i a) = ⨆ i : ι, f i a } := by
intro x hx
congr
exact funext fun i => aeSeq_eq_fun_of_mem_aeSeqSet hf hx i State Before: ι : Sort u_2
α : Type u_3
β : Type u_1
γ : Type ?u.727906
inst✝³ : MeasurableSpace α
inst✝² : MeasurableSpace β
f : ι → α → β
μ : MeasureTheory.Measure α
p : α → (ι → β) → Prop
inst✝¹ : CompleteLattice β
inst✝ : Countable ι
hf : ∀ (i : ι), AEMeasurable (f i)
hp : ∀ᵐ (x : α) ∂μ, p x fun n => f n x
h_ss : aeSeqSet hf p ⊆ {a | (⨆ (i : ι), aeSeq hf p i a) = ⨆ (i : ι), f i a}
⊢ ↑↑μ {a | ¬(⨆ (i : ι), aeSeq hf p i a) = ⨆ (i : ι), f i a} = 0 State After: no goals Tactic: exact measure_mono_null (Set.compl_subset_compl.mpr h_ss) (measure_compl_aeSeqSet_eq_zero hf hp) State Before: ι : Sort u_2
α : Type u_3
β : Type u_1
γ : Type ?u.727906
inst✝³ : MeasurableSpace α
inst✝² : MeasurableSpace β
f : ι → α → β
μ : MeasureTheory.Measure α
p : α → (ι → β) → Prop
inst✝¹ : CompleteLattice β
inst✝ : Countable ι
hf : ∀ (i : ι), AEMeasurable (f i)
hp : ∀ᵐ (x : α) ∂μ, p x fun n => f n x
⊢ aeSeqSet hf p ⊆ {a | (⨆ (i : ι), aeSeq hf p i a) = ⨆ (i : ι), f i a} State After: ι : Sort u_2
α : Type u_3
β : Type u_1
γ : Type ?u.727906
inst✝³ : MeasurableSpace α
inst✝² : MeasurableSpace β
f : ι → α → β
μ : MeasureTheory.Measure α
p : α → (ι → β) → Prop
inst✝¹ : CompleteLattice β
inst✝ : Countable ι
hf : ∀ (i : ι), AEMeasurable (f i)
hp : ∀ᵐ (x : α) ∂μ, p x fun n => f n x
x : α
hx : x ∈ aeSeqSet hf p
⊢ x ∈ {a | (⨆ (i : ι), aeSeq hf p i a) = ⨆ (i : ι), f i a} Tactic: intro x hx State Before: ι : Sort u_2
α : Type u_3
β : Type u_1
γ : Type ?u.727906
inst✝³ : MeasurableSpace α
inst✝² : MeasurableSpace β
f : ι → α → β
μ : MeasureTheory.Measure α
p : α → (ι → β) → Prop
inst✝¹ : CompleteLattice β
inst✝ : Countable ι
hf : ∀ (i : ι), AEMeasurable (f i)
hp : ∀ᵐ (x : α) ∂μ, p x fun n => f n x
x : α
hx : x ∈ aeSeqSet hf p
⊢ x ∈ {a | (⨆ (i : ι), aeSeq hf p i a) = ⨆ (i : ι), f i a} State After: case e_s
ι : Sort u_2
α : Type u_3
β : Type u_1
γ : Type ?u.727906
inst✝³ : MeasurableSpace α
inst✝² : MeasurableSpace β
f : ι → α → β
μ : MeasureTheory.Measure α
p : α → (ι → β) → Prop
inst✝¹ : CompleteLattice β
inst✝ : Countable ι
hf : ∀ (i : ι), AEMeasurable (f i)
hp : ∀ᵐ (x : α) ∂μ, p x fun n => f n x
x : α
hx : x ∈ aeSeqSet hf p
⊢ (fun i => aeSeq hf p i x) = fun i => f i x Tactic: congr State Before: case e_s
ι : Sort u_2
α : Type u_3
β : Type u_1
γ : Type ?u.727906
inst✝³ : MeasurableSpace α
inst✝² : MeasurableSpace β
f : ι → α → β
μ : MeasureTheory.Measure α
p : α → (ι → β) → Prop
inst✝¹ : CompleteLattice β
inst✝ : Countable ι
hf : ∀ (i : ι), AEMeasurable (f i)
hp : ∀ᵐ (x : α) ∂μ, p x fun n => f n x
x : α
hx : x ∈ aeSeqSet hf p
⊢ (fun i => aeSeq hf p i x) = fun i => f i x State After: no goals Tactic: exact funext fun i => aeSeq_eq_fun_of_mem_aeSeqSet hf hx i |
with(LinearAlgebra):
with(Groebner):
# Exterior powers of matrices (implicitly using lexicographic ordering
# of the standard bases of exterior powers of R^n).
exterior_power := proc(k::nonnegint,A)
local m,n,S,T,s,t;
m,n := Dimension(A);
S := combinat[choose](m,k);
T := combinat[choose](n,k);
Matrix([seq([seq(Determinant(SubMatrix(A,s,t)),t in T)],s in S)]);
end:
# If S is a subset of {1,..,n} then we have an associated shuffle
# permutation of {1,..,n}. The next function computes the signature.
shuffle_sgn := proc(n,S)
local k,T,ST;
k := nops(S);
T := sort([op({seq(i,i=1..n)} minus S)]);
ST := [op(S),op(T)];
return signum(mul(mul(ST[j]-ST[i],j=i+1..n),i=1..n));
end:
# Given a matrix A we get an induced map of Koszul algebras. Each
# Koszul algebra has a natural Frobenius form, with using which we
# get a kind of dual map in the opposite direction, which is computed
# by the function below. For a square matrix A, the composite of the
# k'th exterior power and the k'th koszul adjoint (in either order)
# is multiplication by det(A).
koszul_adjoint := proc(k::nonnegint,A)
local m,n,M,N,S,T,Sc,Tc,Se,Te,s,t,i,j;
m,n := Dimension(A);
M := {seq(i,i=1..m)};
N := {seq(j,j=1..n)};
S := combinat[choose](m,k);
T := combinat[choose](n,k);
Sc := map(s -> sort([op(M minus {op(s)})]),S);
Tc := map(t -> sort([op(N minus {op(t)})]),T);
Se := [seq([op(S[i]),op(Sc[i])],i=1..nops(S))];
Se := map(u -> signum(mul(mul(u[j]-u[i],j=i+1..m),i=1..m)),Se);
Te := [seq([op(T[i]),op(Tc[i])],i=1..nops(T))];
Te := map(u -> signum(mul(mul(u[j]-u[i],j=i+1..n),i=1..n)),Te);
M := Matrix(nops(T),nops(S));
for i from 1 to nops(S) do
for j from 1 to nops(T) do
M[j,i] := Determinant(SubMatrix(A,Sc[i],Tc[j])) * Se[i] * Te[j];
od:
od:
return M;
end:
# A list u of length n in a ring R gives a differential on the
# Koszul algebra of R^n. The function below gives the matrix of
# this differential with respect to the standard bases of the
# k'th and (k-1)'th exterior powers of R^n.
koszul_d := proc(k::posint,u)
local n,S,T,s,t,i,j,MT,M;
n := nops(convert(u,list));
S := combinat[choose](n,k);
T := combinat[choose](n,k-1);
MT := table():
for s in S do
for t in T do
MT[t,s] := 0;
od:
for i from 1 to k do
t := [seq(s[j],j=1..i-1),seq(s[j],j=i+1..k)];
MT[t,s] := (-1)^(i-1) * u[s[i]];
od;
od:
M := Matrix([seq([seq(MT[T[i],S[j]],j=1..nops(S))],i=1..nops(T))]);
return M;
end:
# koszul_p can be set equal to a prime number if we want to work in that
# characteristic.
koszul_p := infinity;
koszul_pmod := (x) -> `if`(koszul_p=infinity,x,mods(x,koszul_p)):
# The function below sets up a number of global variables. The
# original v can be supplied as a list. Then v_vec will be the corresponding
# row vector. Also u_vec will be the vector of variables x[i], and w_vec
# will be the vector of powers x[i]^h, where h is minimal such that x[i]^h
# lies in I = (v[1],...,v[n]) for all i. Next, A and B will be matrices with
# u_vec = v_vec.A and v_vec = w_vec.B, which witnesses the inclusions
# (x[1]^(h),...,x[n]^(h)) <= I <= (x[1],...,x[n]). These matrices induce
# morphisms of Koszul complexes alpha : K(u) ->K(v) and beta: K(v)->K(w). It
# is easy to understand K(u) and K(w) in particular, they only have homology
# in degree zero. All three complexes can be regarded as commutative
# Frobenius algebras in the category of chain complexes, so there are adjoint
# morphisms alpha^! : K(v) -> K(u) and beta^! : K(w) -> K(v). The idea is
# to use these to show that K(v) also has homology concentrated in degree zero.
koszul_setup := proc(v)
global n,u_vec,v_vec,w_vec,x_vec,x_vars,Iv,Mv,Nv,
soc_u,soc_v,soc_w,A,B,p;
local a,b,c,i,h,v0,v1,x0,x1,char;
n := nops(v);
v_vec := Transpose(Vector(v));
x_vec := Transpose(<seq(x[i],i=1..n)>);
w_vec := x_vec;
soc_w := 1;
x_vars := tdeg(seq(x[i],i=1..n));
a := table();
v0 := v_vec;
v1 := v_vec;
for i from n to 1 by -1 do
v0 := v1;
v1 := subs(x[i]=0,v1);
a[i] := koszul_pmod((v0 - v1)/x[i]);
od;
B := Matrix([seq(convert(a[i],list),i=1..n)]);
soc_v := koszul_pmod(Determinant(B));
char := `if`(koszul_p=infinity,NULL,characteristic = koszul_p);
Iv,Mv := Basis(convert(v,list),x_vars,output=extended,char);
Mv := Transpose(Matrix(Mv));
Nv := (z) -> koszul_pmod(NormalForm(z,Iv,x_vars,char));
h := 1;
x0 := map(Nv,convert(x_vec,list));
x1 := x0;
while h < 100 and x1 <> [0$n] do
h := h+1;
x1 := map(Nv,x0 *~ x1);
od;
if h >= 100 then
error("Variables are not nilpotent mod given ideal");
fi;
u_vec := Transpose(Vector([seq(x[i]^h,i=1..n)]));
soc_u := mul(x[i]^(h-1),i=1..n);
a := NULL;
b := NULL;
for i from 1 to n do
unassign('b');
c := NormalForm(x[i]^h,Iv,x_vars,b,char);
a := a,b;
od;
A := map(expand,Mv . Transpose(Matrix([a])));
NULL;
end:
|
# copied from library carstm
stmv_hyperparameters = function( reference_sd, alpha=0.5, reference_mean=0 ) {
# some generic PC priors, scaled by sd of data
# pc.prior to median .. minimally info. scale
hyper = list(
iid = list(
prec = list(
prior = "pc.prec", # exponential decay
param = c(reference_sd, alpha)
)
),
# means informative, sd marginally diffuse
# see: inla.set.control.fixed.default() for defaults
fixed = list(
mean.intercept = reference_mean,
prec.intercept = 1e-3,
mean=0,
prec=1e-2
),
# param=c(u, alpha); u=sigma; alpha=prob;
# see inla.doc("pc.prec") ..prior sd attributable to rw2
rw2 = list(
prec = list(
prior = "pc.prec", # exponential decay
param = c(reference_sd, alpha)
)
),
# see inla.doc("ar1") ; theta0, theta1 are expected
# param=c(u, alpha); u=sigma; alpha=prob;
# see inla.doc("pc.prec") ..prior sd attributable to autocor rho
# param=c(u, alpha); rho = 0.5; u=sqrt(1-rho); alpha=prob; see inla.doc("pc.cor1")
ar1 = list(
prec = list(
prior = "pc.prec", # exponential decay
param = c(reference_sd, alpha)
),
rho = list(
prior = "pc.cor0", # inla.doc("pc.cor0") ..base model: rho = 0 --- expoential; will tend to 0 unless there is info
param = c(sqrt(1-0.5), 0.1) # rho=0.5; u=sqrt(1-rho) ... 100-10% of probablity weight to rho 0.5 or less .. forces smooth and only goes high if really high
)
),
# naming convention is a bit different in groups .. Must be one of theta theta1 rho logit correlation
ar1_group = list(
# theta1 = list(
# prior = "pc.prec", # exponential decay
# param = c(reference_sd, alpha)
# ),
rho = list(
prior = "pc.cor0", # inla.doc("pc.cor0") ..base model: rho = 0 --- expoential; will tend to 0 unless there is info
param = c(sqrt(1-0.5), 0.1) # rho=0.5; u=sqrt(1-rho) ... 100-10% of probablity weight to rho 0.5 or less .. forces smooth and only goes high if really high
)
),
# param=c(u, alpha); u=phi (proportion spatial); alpha=prob
bym2 = list(
prec = list(
prior = "pc.prec",
param = c(reference_sd, alpha)
),
phi = list(
prior="pc", # see bottom of inla.doc("bym2")
param=c(0.5, 0.5) # c(phi=0.5, alpha=0.5)
)
)
)
return(hyper)
}
|
State Before: ι : Type ?u.2618181
α : Type u_1
β : Type ?u.2618187
inst✝¹ : DecidableEq α
inst✝ : Fintype α
f✝ g : Perm α
x y : α
f : Perm α
h : SameCycle f x y
⊢ ∃ i x_1 x_2, ↑(f ^ i) x = y State After: case pos
ι : Type ?u.2618181
α : Type u_1
β : Type ?u.2618187
inst✝¹ : DecidableEq α
inst✝ : Fintype α
f✝ g : Perm α
x y : α
f : Perm α
h : SameCycle f x y
hx : x ∈ support f
⊢ ∃ i x_1 x_2, ↑(f ^ i) x = y
case neg
ι : Type ?u.2618181
α : Type u_1
β : Type ?u.2618187
inst✝¹ : DecidableEq α
inst✝ : Fintype α
f✝ g : Perm α
x y : α
f : Perm α
h : SameCycle f x y
hx : ¬x ∈ support f
⊢ ∃ i x_1 x_2, ↑(f ^ i) x = y Tactic: by_cases hx : x ∈ f.support State Before: case pos
ι : Type ?u.2618181
α : Type u_1
β : Type ?u.2618187
inst✝¹ : DecidableEq α
inst✝ : Fintype α
f✝ g : Perm α
x y : α
f : Perm α
h : SameCycle f x y
hx : x ∈ support f
⊢ ∃ i x_1 x_2, ↑(f ^ i) x = y State After: case pos.intro.intro
ι : Type ?u.2618181
α : Type u_1
β : Type ?u.2618187
inst✝¹ : DecidableEq α
inst✝ : Fintype α
f✝ g : Perm α
x y : α
f : Perm α
h : SameCycle f x y
hx : x ∈ support f
k : ℕ
hk : k < card (support (cycleOf f x))
hk' : ↑(f ^ k) x = y
⊢ ∃ i x_1 x_2, ↑(f ^ i) x = y Tactic: obtain ⟨k, hk, hk'⟩ := h.exists_pow_eq_of_mem_support hx State Before: case pos.intro.intro
ι : Type ?u.2618181
α : Type u_1
β : Type ?u.2618187
inst✝¹ : DecidableEq α
inst✝ : Fintype α
f✝ g : Perm α
x y : α
f : Perm α
h : SameCycle f x y
hx : x ∈ support f
k : ℕ
hk : k < card (support (cycleOf f x))
hk' : ↑(f ^ k) x = y
⊢ ∃ i x_1 x_2, ↑(f ^ i) x = y State After: case pos.intro.intro.zero
ι : Type ?u.2618181
α : Type u_1
β : Type ?u.2618187
inst✝¹ : DecidableEq α
inst✝ : Fintype α
f✝ g : Perm α
x y : α
f : Perm α
h : SameCycle f x y
hx : x ∈ support f
hk : Nat.zero < card (support (cycleOf f x))
hk' : ↑(f ^ Nat.zero) x = y
⊢ ∃ i x_1 x_2, ↑(f ^ i) x = y
case pos.intro.intro.succ
ι : Type ?u.2618181
α : Type u_1
β : Type ?u.2618187
inst✝¹ : DecidableEq α
inst✝ : Fintype α
f✝ g : Perm α
x y : α
f : Perm α
h : SameCycle f x y
hx : x ∈ support f
k : ℕ
hk : Nat.succ k < card (support (cycleOf f x))
hk' : ↑(f ^ Nat.succ k) x = y
⊢ ∃ i x_1 x_2, ↑(f ^ i) x = y Tactic: cases' k with k State Before: case pos.intro.intro.zero
ι : Type ?u.2618181
α : Type u_1
β : Type ?u.2618187
inst✝¹ : DecidableEq α
inst✝ : Fintype α
f✝ g : Perm α
x y : α
f : Perm α
h : SameCycle f x y
hx : x ∈ support f
hk : Nat.zero < card (support (cycleOf f x))
hk' : ↑(f ^ Nat.zero) x = y
⊢ ∃ i x_1 x_2, ↑(f ^ i) x = y State After: case pos.intro.intro.zero.refine'_1
ι : Type ?u.2618181
α : Type u_1
β : Type ?u.2618187
inst✝¹ : DecidableEq α
inst✝ : Fintype α
f✝ g : Perm α
x y : α
f : Perm α
h : SameCycle f x y
hx : x ∈ support f
hk : Nat.zero < card (support (cycleOf f x))
hk' : ↑(f ^ Nat.zero) x = y
⊢ 0 < card (support (cycleOf f x))
case pos.intro.intro.zero.refine'_2
ι : Type ?u.2618181
α : Type u_1
β : Type ?u.2618187
inst✝¹ : DecidableEq α
inst✝ : Fintype α
f✝ g : Perm α
x y : α
f : Perm α
h : SameCycle f x y
hx : x ∈ support f
hk : Nat.zero < card (support (cycleOf f x))
hk' : ↑(f ^ Nat.zero) x = y
⊢ ↑(f ^ card (support (cycleOf f x))) x = y Tactic: refine' ⟨(f.cycleOf x).support.card, _, self_le_add_right _ _, _⟩ State Before: case pos.intro.intro.zero.refine'_1
ι : Type ?u.2618181
α : Type u_1
β : Type ?u.2618187
inst✝¹ : DecidableEq α
inst✝ : Fintype α
f✝ g : Perm α
x y : α
f : Perm α
h : SameCycle f x y
hx : x ∈ support f
hk : Nat.zero < card (support (cycleOf f x))
hk' : ↑(f ^ Nat.zero) x = y
⊢ 0 < card (support (cycleOf f x)) State After: case pos.intro.intro.zero.refine'_1
ι : Type ?u.2618181
α : Type u_1
β : Type ?u.2618187
inst✝¹ : DecidableEq α
inst✝ : Fintype α
f✝ g : Perm α
x y : α
f : Perm α
h : SameCycle f x y
hx : x ∈ support f
hk : Nat.zero < card (support (cycleOf f x))
hk' : ↑(f ^ Nat.zero) x = y
⊢ cycleOf f x ≠ 1 Tactic: refine' zero_lt_one.trans (one_lt_card_support_of_ne_one _) State Before: case pos.intro.intro.zero.refine'_1
ι : Type ?u.2618181
α : Type u_1
β : Type ?u.2618187
inst✝¹ : DecidableEq α
inst✝ : Fintype α
f✝ g : Perm α
x y : α
f : Perm α
h : SameCycle f x y
hx : x ∈ support f
hk : Nat.zero < card (support (cycleOf f x))
hk' : ↑(f ^ Nat.zero) x = y
⊢ cycleOf f x ≠ 1 State After: no goals Tactic: simpa using hx State Before: case pos.intro.intro.zero.refine'_2
ι : Type ?u.2618181
α : Type u_1
β : Type ?u.2618187
inst✝¹ : DecidableEq α
inst✝ : Fintype α
f✝ g : Perm α
x y : α
f : Perm α
h : SameCycle f x y
hx : x ∈ support f
hk : Nat.zero < card (support (cycleOf f x))
hk' : ↑(f ^ Nat.zero) x = y
⊢ ↑(f ^ card (support (cycleOf f x))) x = y State After: case pos.intro.intro.zero.refine'_2
ι : Type ?u.2618181
α : Type u_1
β : Type ?u.2618187
inst✝¹ : DecidableEq α
inst✝ : Fintype α
f✝ g : Perm α
x y : α
f : Perm α
h : SameCycle f x y
hx : x ∈ support f
hk : Nat.zero < card (support (cycleOf f x))
hk' : x = y
⊢ ↑(f ^ card (support (cycleOf f x))) x = y Tactic: simp only [Nat.zero_eq, pow_zero, coe_one, id_eq] at hk' State Before: case pos.intro.intro.zero.refine'_2
ι : Type ?u.2618181
α : Type u_1
β : Type ?u.2618187
inst✝¹ : DecidableEq α
inst✝ : Fintype α
f✝ g : Perm α
x y : α
f : Perm α
h : SameCycle f x y
hx : x ∈ support f
hk : Nat.zero < card (support (cycleOf f x))
hk' : x = y
⊢ ↑(f ^ card (support (cycleOf f x))) x = y State After: case pos.intro.intro.zero.refine'_2
ι : Type ?u.2618181
α : Type u_1
β : Type ?u.2618187
inst✝¹ : DecidableEq α
inst✝ : Fintype α
f✝ g : Perm α
x : α
f : Perm α
hx : x ∈ support f
hk : Nat.zero < card (support (cycleOf f x))
h : SameCycle f x x
⊢ ↑(f ^ card (support (cycleOf f x))) x = x Tactic: subst hk' State Before: case pos.intro.intro.zero.refine'_2
ι : Type ?u.2618181
α : Type u_1
β : Type ?u.2618187
inst✝¹ : DecidableEq α
inst✝ : Fintype α
f✝ g : Perm α
x : α
f : Perm α
hx : x ∈ support f
hk : Nat.zero < card (support (cycleOf f x))
h : SameCycle f x x
⊢ ↑(f ^ card (support (cycleOf f x))) x = x State After: no goals Tactic: rw [← (isCycle_cycleOf _ <| mem_support.1 hx).orderOf, ← cycleOf_pow_apply_self,
pow_orderOf_eq_one, one_apply] State Before: case pos.intro.intro.succ
ι : Type ?u.2618181
α : Type u_1
β : Type ?u.2618187
inst✝¹ : DecidableEq α
inst✝ : Fintype α
f✝ g : Perm α
x y : α
f : Perm α
h : SameCycle f x y
hx : x ∈ support f
k : ℕ
hk : Nat.succ k < card (support (cycleOf f x))
hk' : ↑(f ^ Nat.succ k) x = y
⊢ ∃ i x_1 x_2, ↑(f ^ i) x = y State After: no goals Tactic: exact ⟨k + 1, by simp, Nat.le_succ_of_le hk.le, hk'⟩ State Before: ι : Type ?u.2618181
α : Type u_1
β : Type ?u.2618187
inst✝¹ : DecidableEq α
inst✝ : Fintype α
f✝ g : Perm α
x y : α
f : Perm α
h : SameCycle f x y
hx : x ∈ support f
k : ℕ
hk : Nat.succ k < card (support (cycleOf f x))
hk' : ↑(f ^ Nat.succ k) x = y
⊢ 0 < k + 1 State After: no goals Tactic: simp State Before: case neg
ι : Type ?u.2618181
α : Type u_1
β : Type ?u.2618187
inst✝¹ : DecidableEq α
inst✝ : Fintype α
f✝ g : Perm α
x y : α
f : Perm α
h : SameCycle f x y
hx : ¬x ∈ support f
⊢ ∃ i x_1 x_2, ↑(f ^ i) x = y State After: case neg
ι : Type ?u.2618181
α : Type u_1
β : Type ?u.2618187
inst✝¹ : DecidableEq α
inst✝ : Fintype α
f✝ g : Perm α
x y : α
f : Perm α
h : SameCycle f x y
hx : ¬x ∈ support f
⊢ ↑(f ^ 1) x = y Tactic: refine' ⟨1, zero_lt_one, by simp, _⟩ State Before: case neg
ι : Type ?u.2618181
α : Type u_1
β : Type ?u.2618187
inst✝¹ : DecidableEq α
inst✝ : Fintype α
f✝ g : Perm α
x y : α
f : Perm α
h : SameCycle f x y
hx : ¬x ∈ support f
⊢ ↑(f ^ 1) x = y State After: case neg.intro
ι : Type ?u.2618181
α : Type u_1
β : Type ?u.2618187
inst✝¹ : DecidableEq α
inst✝ : Fintype α
f✝ g : Perm α
x : α
f : Perm α
hx : ¬x ∈ support f
k : ℤ
⊢ ↑(f ^ 1) x = ↑(f ^ k) x Tactic: obtain ⟨k, rfl⟩ := h State Before: case neg.intro
ι : Type ?u.2618181
α : Type u_1
β : Type ?u.2618187
inst✝¹ : DecidableEq α
inst✝ : Fintype α
f✝ g : Perm α
x : α
f : Perm α
hx : ¬x ∈ support f
k : ℤ
⊢ ↑(f ^ 1) x = ↑(f ^ k) x State After: case neg.intro
ι : Type ?u.2618181
α : Type u_1
β : Type ?u.2618187
inst✝¹ : DecidableEq α
inst✝ : Fintype α
f✝ g : Perm α
x : α
f : Perm α
hx : ↑f x = x
k : ℤ
⊢ ↑(f ^ 1) x = ↑(f ^ k) x Tactic: rw [not_mem_support] at hx State Before: case neg.intro
ι : Type ?u.2618181
α : Type u_1
β : Type ?u.2618187
inst✝¹ : DecidableEq α
inst✝ : Fintype α
f✝ g : Perm α
x : α
f : Perm α
hx : ↑f x = x
k : ℤ
⊢ ↑(f ^ 1) x = ↑(f ^ k) x State After: no goals Tactic: rw [pow_apply_eq_self_of_apply_eq_self hx, zpow_apply_eq_self_of_apply_eq_self hx] State Before: ι : Type ?u.2618181
α : Type u_1
β : Type ?u.2618187
inst✝¹ : DecidableEq α
inst✝ : Fintype α
f✝ g : Perm α
x y : α
f : Perm α
h : SameCycle f x y
hx : ¬x ∈ support f
⊢ 1 ≤ card (support (cycleOf f x)) + 1 State After: no goals Tactic: simp |
[STATEMENT]
lemma beta_preserves_typ_of: "typ_of r = Some T \<Longrightarrow> r \<rightarrow>\<^sub>\<beta> s \<Longrightarrow> typ_of s = Some T"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>typ_of r = Some T; r \<rightarrow>\<^sub>\<beta> s\<rbrakk> \<Longrightarrow> typ_of s = Some T
[PROOF STEP]
by (metis beta_preserves_typ_of1 typ_of_def) |
(* Title: HOL/MicroJava/BV/Typing_Framework_JVM.thy
Author: Tobias Nipkow, Gerwin Klein
Copyright 2000 TUM
*)
section \<open>The Typing Framework for the JVM \label{sec:JVM}\<close>
theory Typing_Framework_JVM
imports "../DFA/Abstract_BV" JVMType EffectMono BVSpec
begin
definition exec :: "jvm_prog \<Rightarrow> nat \<Rightarrow> ty \<Rightarrow> exception_table \<Rightarrow> instr list \<Rightarrow> JVMType.state step_type" where
"exec G maxs rT et bs ==
err_step (size bs) (\<lambda>pc. app (bs!pc) G maxs rT pc et) (\<lambda>pc. eff (bs!pc) G pc et)"
definition opt_states :: "'c prog \<Rightarrow> nat \<Rightarrow> nat \<Rightarrow> (ty list \<times> ty err list) option set" where
"opt_states G maxs maxr \<equiv> opt (\<Union>{list n (types G) |n. n \<le> maxs} \<times> list maxr (err (types G)))"
subsection \<open>Executability of @{term check_bounded}\<close>
primrec list_all'_rec :: "('a \<Rightarrow> nat \<Rightarrow> bool) \<Rightarrow> nat \<Rightarrow> 'a list \<Rightarrow> bool"
where
"list_all'_rec P n [] = True"
| "list_all'_rec P n (x#xs) = (P x n \<and> list_all'_rec P (Suc n) xs)"
definition list_all' :: "('a \<Rightarrow> nat \<Rightarrow> bool) \<Rightarrow> 'a list \<Rightarrow> bool" where
"list_all' P xs \<equiv> list_all'_rec P 0 xs"
lemma list_all'_rec:
"list_all'_rec P n xs = (\<forall>p < size xs. P (xs!p) (p+n))"
apply (induct xs arbitrary: n)
apply auto
apply (case_tac p)
apply auto
done
lemma list_all' [iff]:
"list_all' P xs = (\<forall>n < size xs. P (xs!n) n)"
by (unfold list_all'_def) (simp add: list_all'_rec)
subsection \<open>Connecting JVM and Framework\<close>
lemma check_bounded_is_bounded:
"check_bounded ins et \<Longrightarrow> bounded (\<lambda>pc. eff (ins!pc) G pc et) (length ins)"
by (unfold bounded_def) (blast dest: check_boundedD)
lemma special_ex_swap_
lemmas [iff del] = not_None_eq
theorem exec_pres_type:
"wf_prog wf_mb S \<Longrightarrow>
pres_type (exec S maxs rT et bs) (size bs) (states S maxs maxr)"
apply (unfold exec_def JVM_states_unfold)
apply (rule pres_type_lift)
apply clarify
apply (case_tac s)
apply simp
apply (drule effNone)
apply simp
apply (simp add: eff_def xcpt_eff_def norm_eff_def)
apply (case_tac "bs!p")
apply clarsimp
apply (drule listE_nth_in, assumption)
apply fastforce
apply (fastforce simp add: not_None_eq)
apply (fastforce simp add: not_None_eq typeof_empty_is_type)
apply clarsimp
apply (erule disjE)
apply fastforce
apply clarsimp
apply (rule_tac x="1" in exI)
apply fastforce
apply clarsimp
apply (erule disjE)
apply (fastforce dest: field_fields fields_is_type)
apply (simp add: match_some_entry image_iff)
apply (rule_tac x=1 in exI)
apply fastforce
apply clarsimp
apply (erule disjE)
apply fastforce
apply (simp add: match_some_entry image_iff)
apply (rule_tac x=1 in exI)
apply fastforce
apply clarsimp
apply (erule disjE)
apply fastforce
apply clarsimp
apply (rule_tac x=1 in exI)
apply fastforce
defer
apply fastforce
apply fastforce
apply clarsimp
apply (rule_tac x="n'+2" in exI)
apply simp
apply clarsimp
apply (rule_tac x="Suc (Suc (Suc (length ST)))" in exI)
apply simp
apply clarsimp
apply (rule_tac x="Suc (Suc (Suc (Suc (length ST))))" in exI)
apply simp
apply fastforce
apply fastforce
apply fastforce
apply fastforce
apply clarsimp
apply (erule disjE)
apply fastforce
apply clarsimp
apply (rule_tac x=1 in exI)
apply fastforce
apply (erule disjE)
apply clarsimp
apply (drule method_wf_mdecl, assumption+)
apply (clarsimp simp add: wf_mdecl_def wf_mhead_def)
apply fastforce
apply clarsimp
apply (rule_tac x=1 in exI)
apply fastforce
done
lemmas [iff] = not_None_eq
lemma sup_state_opt_unfold:
"sup_state_opt G \<equiv> Opt.le (Product.le (Listn.le (subtype G)) (Listn.le (Err.le (subtype G))))"
by (simp add: sup_state_opt_def sup_state_def sup_loc_def sup_ty_opt_def)
lemma app_mono:
"app_mono (sup_state_opt G) (\<lambda>pc. app (bs!pc) G maxs rT pc et) (length bs) (opt_states G maxs maxr)"
by (unfold app_mono_def lesub_def) (blast intro: EffectMono.app_mono)
lemma list_appendI:
"\<lbrakk>a \<in> list x A; b \<in> list y A\<rbrakk> \<Longrightarrow> a @ b \<in> list (x+y) A"
apply (unfold list_def)
apply (simp (no_asm))
apply blast
done
lemma list_map [simp]:
"(map f xs \<in> list (length xs) A) = (f ` set xs \<subseteq> A)"
apply (unfold list_def)
apply simp
done
lemma [iff]:
"(OK ` A \<subseteq> err B) = (A \<subseteq> B)"
apply (unfold err_def)
apply blast
done
lemma [intro]:
"x \<in> A \<Longrightarrow> replicate n x \<in> list n A"
by (induct n, auto)
lemma lesubstep_type_simple:
"a <=[Product.le (op =) r] b \<Longrightarrow> a \<le>|r| b"
apply (unfold lesubstep_type_def)
apply clarify
apply (simp add: set_conv_nth)
apply clarify
apply (drule le_listD, assumption)
apply (clarsimp simp add: lesub_def Product.le_def)
apply (rule exI)
apply (rule conjI)
apply (rule exI)
apply (rule conjI)
apply (rule sym)
apply assumption
apply assumption
apply assumption
done
lemma eff_mono:
"\<lbrakk>p < length bs; s <=_(sup_state_opt G) t; app (bs!p) G maxs rT pc et t\<rbrakk>
\<Longrightarrow> eff (bs!p) G p et s \<le>|sup_state_opt G| eff (bs!p) G p et t"
apply (unfold eff_def)
apply (rule lesubstep_type_simple)
apply (rule le_list_appendI)
apply (simp add: norm_eff_def)
apply (rule le_listI)
apply simp
apply simp
apply (simp add: lesub_def)
apply (case_tac s)
apply simp
apply (simp del: split_paired_All split_paired_Ex)
apply (elim exE conjE)
apply simp
apply (drule eff'_mono, assumption)
apply assumption
apply (simp add: xcpt_eff_def)
apply (rule le_listI)
apply simp
apply simp
apply (simp add: lesub_def)
apply (case_tac s)
apply simp
apply simp
apply (case_tac t)
apply simp
apply (clarsimp simp add: sup_state_conv)
done
lemma order_sup_state_opt:
"ws_prog G \<Longrightarrow> order (sup_state_opt G)"
by (unfold sup_state_opt_unfold) (blast dest: acyclic_subcls1 order_widen)
theorem exec_mono:
"ws_prog G \<Longrightarrow> bounded (exec G maxs rT et bs) (size bs) \<Longrightarrow>
mono (JVMType.le G maxs maxr) (exec G maxs rT et bs) (size bs) (states G maxs maxr)"
apply (unfold exec_def JVM_le_unfold JVM_states_unfold)
apply (rule mono_lift)
apply (fold sup_state_opt_unfold opt_states_def)
apply (erule order_sup_state_opt)
apply (rule app_mono)
apply assumption
apply clarify
apply (rule eff_mono)
apply assumption+
done
theorem semilat_JVM_slI:
"ws_prog G \<Longrightarrow> semilat (JVMType.sl G maxs maxr)"
apply (unfold JVMType.sl_def stk_esl_def reg_sl_def)
apply (rule semilat_opt)
apply (rule err_semilat_Product_esl)
apply (rule err_semilat_upto_esl)
apply (rule err_semilat_JType_esl, assumption+)
apply (rule err_semilat_eslI)
apply (rule Listn_sl)
apply (rule err_semilat_JType_esl, assumption+)
done
lemma sl_triple_conv:
"JVMType.sl G maxs maxr ==
(states G maxs maxr, JVMType.le G maxs maxr, JVMType.sup G maxs maxr)"
by (simp (no_asm) add: states_def JVMType.le_def JVMType.sup_def)
lemma is_type_pTs:
"\<lbrakk> wf_prog wf_mb G; (C,S,fs,mdecls) \<in> set G; ((mn,pTs),rT,code) \<in> set mdecls \<rbrakk>
\<Longrightarrow> set pTs \<subseteq> types G"
proof
assume "wf_prog wf_mb G"
"(C,S,fs,mdecls) \<in> set G"
"((mn,pTs),rT,code) \<in> set mdecls"
hence "wf_mdecl wf_mb G C ((mn,pTs),rT,code)"
by (rule wf_prog_wf_mdecl)
hence "\<forall>t \<in> set pTs. is_type G t"
by (unfold wf_mdecl_def wf_mhead_def) auto
moreover
fix t assume "t \<in> set pTs"
ultimately
have "is_type G t" by blast
thus "t \<in> types G" ..
qed
lemma jvm_prog_lift:
assumes wf:
"wf_prog (\<lambda>G C bd. P G C bd) G"
assumes rule:
"\<And>wf_mb C mn pTs C rT maxs maxl b et bd.
wf_prog wf_mb G \<Longrightarrow>
method (G,C) (mn,pTs) = Some (C,rT,maxs,maxl,b,et) \<Longrightarrow>
is_class G C \<Longrightarrow>
set pTs \<subseteq> types G \<Longrightarrow>
bd = ((mn,pTs),rT,maxs,maxl,b,et) \<Longrightarrow>
P G C bd \<Longrightarrow>
Q G C bd"
shows
"wf_prog (\<lambda>G C bd. Q G C bd) G"
using wf
apply (unfold wf_prog_def wf_cdecl_def)
apply clarsimp
apply (drule bspec, assumption)
apply (unfold wf_cdecl_mdecl_def)
apply clarsimp
apply (drule bspec, assumption)
apply (frule methd [OF wf [THEN wf_prog_ws_prog]], assumption+)
apply (frule is_type_pTs [OF wf], assumption+)
apply clarify
apply (drule rule [OF wf], assumption+)
apply (rule HOL.refl)
apply assumption+
done
end
|
using ExtremeVertexDesigns
using Test
@testset "ExtremeVertexDesigns.jl" begin
# Write your tests here.
end
|
If $S$ is an open connected set in $\mathbb{R}^n$ for $n \geq 2$, and $T$ is a countable set, then $S - T$ is connected. |
lemma norm_triangle_eq: fixes x y :: "'a::real_inner" shows "norm (x + y) = norm x + norm y \<longleftrightarrow> norm x *\<^sub>R y = norm y *\<^sub>R x" |
#include "FfmpegOutput.h"
#include "QtComponents/QtPluginView.h"
#include "Media/MediaPad.h"
#include "Media/MediaSampleFactory.h"
#include "Media/ImageSample.h"
#include "Media/BufferSample.h"
#include "Media/EventSample.h"
#include "ffmpegResources.h"
#include "ffmpegControls.h"
#include <boost/foreach.hpp>
extern "C"
{
#include <libavutil/avutil.h>
#include <libavutil/opt.h>
#include <libavutil/imgutils.h>
}
using namespace Limitless;
//FormatDescription::FormatDescription(AVOutputFormat *avFormat):
//avFormat(avFormat)
//{
// name=avFormat->name;
// fullName=avFormat->long_name;
//}
FfmpegOutput::FfmpegOutput(std::string name, SharedMediaFilter parent):
MediaAutoRegister(name, parent),
m_firstSample(true),
m_firstAudioSample(true),
m_avOutputFormat(nullptr),
m_codecContextId(0),
m_audioCodecContextId(0),
m_audioConnected(false)
{
m_accessibleFormats.push_back("f4v");
m_accessibleFormats.push_back("flv");
m_accessibleFormats.push_back("h264");
m_accessibleFormats.push_back("hls");
m_accessibleFormats.push_back("mp4");
m_accessibleFormats.push_back("mpeg");
m_accessibleFormats.push_back("mpeg2video");
m_accessibleFormats.push_back("ogg");
m_accessibleFormats.push_back("webm");
m_accessibleFormats.push_back("mpegts");
m_accessibleFormats.push_back("yuv4mpegpipe");
m_accessibleFormats.push_back("rtp");
m_accessibleFormats.push_back("rtsp");
}
FfmpegOutput::~FfmpegOutput()
{
}
bool FfmpegOutput::initialize(const Attributes &attributes)
{
FfmpegResources::instance().registerAll();
queryFormats();
Strings avFormats;
for(int i=0; i<m_avFormats.size(); ++i)
{
avFormats.push_back(m_avFormats[i].name);
}
//default mp4
// std::string avFormatName="mp4";
// std::string avFormatName="h264";
std::string avFormatName="f4v";
// std::string avFormatName="flv";
Strings::iterator iter=std::find(avFormats.begin(), avFormats.end(), avFormatName);
if(iter == avFormats.end())
avFormatName=avFormats[0];
m_enabled=false;
m_recording=false;
m_audioRecording=false;
addAttribute("enable", false);
addAttribute("outputFormat", avFormatName, avFormats);
addAttribute("outputLocation", "untitled.mp4");
// addAttribute("outputLocation", "test.mp4");
// addAttribute("outputLocation", "test.h264");
// addAttribute("outputLocation", "rtmp://localhost/live/test");
// addAttribute("outputLocation", "rtmp://localhost:1935/live/stream");
m_bufferSampleId=MediaSampleFactory::getTypeId("BufferSample");
m_ffmpegPacketSampleId=MediaSampleFactory::getTypeId("FfmpegPacketSample");
m_eventSampleId=Limitless::MediaSampleFactory::getTypeId("EventSample");
m_videoSinkPad=addSinkPad("Sink", "[{\"mime\":\"video/*\"}, {\"mime\":\"image/*\"}]");
m_audioSinkPad=addSinkPad("AudioSink", "[{\"mime\":\"audio/raw\"}]");
// addSinkPad("[{\"mime\":\"audio/*\"}]");
return true;
}
SharedPluginView FfmpegOutput::getView()
{
// if(m_view == SharedPluginView())
// {
// FfmpegControls *controls=new FfmpegControls(this);
// m_view.reset(new QtPluginView(controls));
// }
// return m_view;
return SharedPluginView();
}
bool FfmpegOutput::processSample(SharedMediaPad sinkPad, SharedMediaSample sample)
{
m_outputQueue.push_back(PadSample(sinkPad, sample));
return true;
}
void FfmpegOutput::processSampleThread(PadSample padSample)
{
SharedMediaPad sinkPad=padSample.first;
SharedMediaSample sample=padSample.second;
if(sample->isType(m_eventSampleId))
{
SharedEventSample eventSample=boost::dynamic_pointer_cast<EventSample>(sample);
if(eventSample->getEvent()==Limitless::Event::EndOf)
{
bool stoppedRecording=false;
if(sinkPad==m_videoSinkPad)
{
if(m_recording)
{
m_recording=false;
m_firstSample=true;
stoppedRecording=true;
}
}
else if(sinkPad==m_audioSinkPad)
{
if(m_audioRecording)
{
m_audioRecording=false;
m_firstAudioSample=true;
stoppedRecording=true;
}
}
if(stoppedRecording && !m_recording && !m_audioRecording)
{
av_write_trailer(m_avFormatContext);
avio_close(m_avFormatContext->pb);
std::string location=attribute("outputLocation")->toString();
std::string message="Closed "+location;
Log::message("FfmpegOutput", message);
}
}
}
if(!sample->isType(m_ffmpegPacketSampleId))
return;
int avError;
if(m_enabled || m_recording)
{
if(sinkPad==m_videoSinkPad)
{
if(m_firstSample)
{
SharedFfmpegPacketSample ffmpegPacketSample=boost::dynamic_pointer_cast<FfmpegPacketSample>(sample);
AVPacket *packet=ffmpegPacketSample->getPacket();
//wait for key frame to start
if(!(packet->flags&AV_PKT_FLAG_KEY))
return;
std::string location=attribute("outputLocation")->toString();
setupFormat();
if(m_audioConnected)
setupAudioFormat();
if(!(m_avOutputFormat->flags & AVFMT_NOFILE))
{
std::string message="Opening "+location;
Log::message("FfmpegOutput", message);
avError=avio_open(&m_avFormatContext->pb, location.c_str(), AVIO_FLAG_WRITE);
if(avError!=0)
{
message="Failed to open "+location;
Log::error("FfmpegOutput", message);
m_enabled=false;
return;
}
}
avError=avformat_write_header(m_avFormatContext, NULL);
m_firstSample=false;
m_recording=true;
m_audioRecording=true;
m_startPts=packet->pts;
writeSample(sample);
}
else
{
writeSample(sample);
}
}
else if(sinkPad==m_audioSinkPad)
{
if(!m_audioRecording)
return;
if(m_firstAudioSample)
{
SharedFfmpegPacketSample ffmpegPacketSample=boost::dynamic_pointer_cast<FfmpegPacketSample>(sample);
AVPacket *packet=ffmpegPacketSample->getPacket();
m_startAudioPts=packet->pts;
m_firstAudioSample=false;
}
writeAudioSample(sample);
}
}
// else
// {
// //check if we were recording and stop
// if(m_recording)
// {
// //keep pumping till keyframe
// SharedFfmpegPacketSample ffmpegPacketSample=boost::dynamic_pointer_cast<FfmpegPacketSample>(sample);
// AVPacket *packet=ffmpegPacketSample->getPacket();
//
// writeSample(ffmpegPacketSample.get());
// if(packet->flags&AV_PKT_FLAG_KEY)
// {
// m_recording=false;
//
// av_write_trailer(m_avFormatContext);
// avio_close(m_avFormatContext->pb);
// m_firstSample=true;
// m_firstAudioSample=true;
// }
// }
// }
// deleteSample(sample);
return;
}
void FfmpegOutput::writeSample(Limitless::SharedMediaSample sample)
{
SharedFfmpegPacketSample ffmpegPacketSample=boost::dynamic_pointer_cast<FfmpegPacketSample>(sample);
writeSample(ffmpegPacketSample.get());
}
void FfmpegOutput::writeSample(FfmpegPacketSample *ffmpegPacketSample)
{
AVPacket *packet=ffmpegPacketSample->getPacket();
AVPacket localPacket;
// av_init_packet(&localPacket);
int copyError=av_copy_packet(&localPacket, packet);
av_copy_packet_side_data(&localPacket, packet);
// AVRational rational={1, 24};
AVRational rational={1001, 30000};
localPacket.pts-=m_startPts;
localPacket.dts-=m_startPts;
localPacket.duration=1;
// localPacket.duration=1001;
av_packet_rescale_ts(&localPacket, rational, m_videoStream->time_base);
localPacket.stream_index=m_videoStream->index;
int avError;
Limitless::Log::message("FfmpegOutput", (boost::format("write frame: idx:%d size:%d pts:%d dts:%d\n")%localPacket.stream_index%localPacket.size%localPacket.pts%localPacket.dts).str().c_str());
avError=av_interleaved_write_frame(m_avFormatContext, &localPacket);
}
void FfmpegOutput::writeAudioSample(Limitless::SharedMediaSample sample)
{
SharedFfmpegPacketSample ffmpegPacketSample=boost::dynamic_pointer_cast<FfmpegPacketSample>(sample);
writeAudioSample(ffmpegPacketSample.get());
}
void FfmpegOutput::writeAudioSample(FfmpegPacketSample *ffmpegPacketSample)
{
int avError;
AVPacket *packet=ffmpegPacketSample->getPacket();
AVPacket localPacket;
av_init_packet(&localPacket);
av_copy_packet(&localPacket, packet);
// AVRational rational={1, 24};
AVRational rational={1, 48000};
localPacket.pts-=m_startAudioPts;
localPacket.dts-=m_startAudioPts;
// localPacket.duration=1;
av_packet_rescale_ts(&localPacket, rational, m_audioStream->time_base);
localPacket.stream_index=m_audioStream->index;
Limitless::Log::message("FfmpegOutput", (boost::format("write audio: idx:%d size:%d pts:%d dts:%d\n")%localPacket.stream_index%localPacket.size%localPacket.pts%localPacket.dts).str().c_str());
avError=av_interleaved_write_frame(m_avFormatContext, &localPacket);
}
IMediaFilter::StateChange FfmpegOutput::onReady()
{
// if((m_currentVideoEncoder < 0) || (m_currentVideoEncoder >= m_videoCodecs.size()))
// return;
// std::string videoEncoder=attribute("videoEncoder")->toString();
// std::string audioEncoder=attribute("audioEncoder")->toString();
//
// if(m_videoCodecs[m_currentVideoEncoder].name != videoEncoder)
// m_currentVideoEncoder=getVideoEncoderIndex(videoEncoder);
//
m_outputQueue.start(std::bind(&FfmpegOutput::processSampleThread, this, std::placeholders::_1));
SharedMediaPads sinkMediaPads=getSinkPads();
if(sinkMediaPads.size() <= 0)
return FAILED;
SharedMediaPad sinkMediaPad=sinkMediaPads[0];
if(!sinkMediaPad->linked())
return FAILED;
if(!exists("outputLocation"))
return FAILED;
int avError;
std::string location=attribute("outputLocation")->toString();
setupFormat();
// if(!(m_avOutputFormat->flags & AVFMT_NOFILE))
// {
// avError=avio_open(&m_avFormatContext->pb, location.c_str(), AVIO_FLAG_WRITE);
// }
// avError=avformat_write_header(m_avFormatContext, NULL);
// if((m_currentVideoEncoder >= 0) && (m_currentVideoEncoder < m_videoCodecs.size()))
// {
// SharedMediaFormat format=sinkMediaPad->format();
// int width=0, height=0;
// int avError;
//
// if(format->exists("width"))
// width=format->attribute("width")->toInt();
// if(format->exists("height"))
// height=format->attribute("height")->toInt();
//
// if((width != 0) && (height != 0))
// {
// if(m_frame == NULL)
// {
// m_frame=avcodec_alloc_frame();
//
//// m_frame->format=AV_PIX_FMT_BGR24;
// m_frame->format=m_videoEncoder->pix_fmt;
// m_frame->width=m_videoEncoder->width;
// m_frame->height=m_videoEncoder->height;
// }
//
//// avpicture_alloc(&m_picture, m_videoEncoder->pix_fmt, width, height);
//// *((AVPicture *)m_frame)=m_picture;
// m_avFrameSize=av_image_alloc(m_frame->data, m_frame->linesize, m_frame->width, m_frame->height, (AVPixelFormat)m_frame->format, 32);
//
// av_init_packet(&m_pkt);
//
// m_swsContext=sws_getContext(width, height, AV_PIX_FMT_BGR24, width, height, m_videoEncoder->pix_fmt, SWS_BILINEAR, NULL, NULL, NULL);
// }
// }
return SUCCESS;
}
IMediaFilter::StateChange FfmpegOutput::onPaused()
{
return SUCCESS;
}
IMediaFilter::StateChange FfmpegOutput::onPlaying()
{
return SUCCESS;
}
void FfmpegOutput::onLinkFormatChanged(SharedMediaPad pad, SharedMediaFormat format)
{
if(pad->type() == MediaPad::SINK)
{
if(pad==m_videoSinkPad)
{
if(format->exists("mime"))
m_codecId=FfmpegResources::instance().getAvCodecID(format->attribute("mime")->toString());
if(format->exists("bitrate"))
m_bitrate=format->attribute("bitrate")->toInt();
if(format->exists("timeBaseNum"))
m_timeBase.num=format->attribute("timeBaseNum")->toInt();
if(format->exists("timeBaseDen"))
m_timeBase.den=format->attribute("timeBaseDen")->toInt();
if(format->exists("keyframeRate"))
m_keyframeRate=format->attribute("keyframeRate")->toInt();
if(format->exists("format"))
m_pixelFormat=FfmpegResources::instance().getAvPixelFormat(format->attribute("format")->toString());
if(format->exists("width"))
m_width=format->attribute("width")->toInt();
if(format->exists("height"))
m_height=format->attribute("height")->toInt();
if(format->exists("ffmpegCodecContext"))
m_codecContextId=format->attribute("ffmpegCodecContext")->toInt();
}
else if(pad==m_audioSinkPad)
{
if(format->exists("mime"))
m_audioCodecId=FfmpegResources::instance().getAvCodecID(format->attribute("mime")->toString());
if(format->exists("bitrate"))
m_audioBitrate=format->attribute("bitrate")->toInt();
if(format->exists("sampleRate"))
{
m_audioSampleRate=format->attribute("sampleRate")->toInt();
m_audioTimeBase.num=1;
m_audioTimeBase.den=m_audioSampleRate;
}
if(format->exists("channels"))
m_audioChannels=format->attribute("channels")->toInt();
if(format->exists("sampleFormat"))
{
std::string sampleFormat=format->attribute("sampleFormat")->toString();
m_audioSampleFormat=FfmpegResources::getAudioFormatFromName(sampleFormat);
}
if(format->exists("frameSize"))
m_audioFrameSize=format->attribute("frameSize")->toInt();
if(format->exists("ffmpegCodecContext"))
m_audioCodecContextId=format->attribute("ffmpegCodecContext")->toInt();
m_audioConnected=true;
}
}
}
bool FfmpegOutput::onAcceptMediaFormat(SharedMediaPad pad, SharedMediaFormat format)
{
if(pad->type() == MediaPad::SINK)
{
SharedMediaPads sinkPads=getSinkPads();
SharedMediaPads::iterator iter=std::find(sinkPads.begin(), sinkPads.end(), pad);
if(iter != sinkPads.end())
{
if(pad==m_videoSinkPad)
{
if(format->exists("mime"))
{
std::string mime=format->attribute("mime")->toString();
if(mime.compare(0, 5, "video")==0)
return true;
if(mime.compare(0, 5, "image")==0)
return true;
}
}
else if(pad==m_audioSinkPad)
{
if(format->exists("mime"))
{
std::string mime=format->attribute("mime")->toString();
if(mime.compare(0, 5, "audio")==0)
return true;
}
}
}
}
return false;
}
void FfmpegOutput::onAttributeChanged(std::string name, SharedAttribute attribute)
{
if(name == "enable")
{
m_enabled=attribute->toBool();
}
else if(name == "outputFormat")
{
std::string outputFormat=attribute->toString();
for(int i=0; i<m_avFormats.size(); ++i)
{
if(m_avFormats[i].name == outputFormat)
{
// m_currentFormat=i;
}
}
}
else if(name == "outputLocation")
{
}
}
void FfmpegOutput::setupFormat()
{
// if((m_currentFormat < 0) || (m_currentFormat >= m_avFormats.size()))
// return;
//
// FormatDescription &format=m_avFormats[m_currentFormat];
if(!exists("outputFormat"))
return;
if(!exists("outputLocation"))
return;
std::string formatName=attribute("outputFormat")->toString();
std::string location=attribute("outputLocation")->toString();
if(location.compare(0, 7, "rtmp://")==0)
{
avformat_alloc_output_context2(&m_avFormatContext, NULL, "FLV", location.c_str());
m_avOutputFormat=m_avFormatContext->oformat;
}
else
{
AVOutputFormat *guessedFormat=av_guess_format(NULL, location.c_str(), NULL);
FormatDescriptions::iterator iter=std::find(m_avFormats.begin(), m_avFormats.end(), formatName);
if(iter==m_avFormats.end()&&guessedFormat==NULL)
return;
if(guessedFormat!=NULL)
{
// m_avFormatContext=avformat_alloc_context();
avformat_alloc_output_context2(&m_avFormatContext, guessedFormat, NULL, location.c_str());
m_avOutputFormat=m_avFormatContext->oformat;
}
else
{
avformat_alloc_output_context2(&m_avFormatContext, NULL, formatName.c_str(), location.c_str());
m_avOutputFormat=m_avFormatContext->oformat;
}
}
// AVCodec *codec;
//
// codec=avcodec_find_encoder(m_codecId);
//
// if(codec == NULL)
// return;
AVCodecContext *codecContext=FfmpegResources::getCodecContext(m_codecContextId);
if(codecContext == nullptr)
return;
m_videoStream=avformat_new_stream(m_avFormatContext, codecContext->codec);
// m_videoStream=avformat_new_stream(m_avFormatContext, NULL);
AVCodecContext *streamCodec=m_avFormatContext->streams[m_videoStream->index]->codec;
streamCodec->codec=codecContext->codec;
streamCodec->codec_id=m_codecId;
streamCodec->bit_rate=m_bitrate;
streamCodec->time_base=m_timeBase;
streamCodec->width=m_width;
streamCodec->height=m_height;
streamCodec->gop_size=m_keyframeRate;
streamCodec->pix_fmt=m_pixelFormat;
if(m_avFormatContext->oformat->flags & AVFMT_GLOBALHEADER)
streamCodec->flags!=CODEC_FLAG_GLOBAL_HEADER;
m_videoStream->id=m_avFormatContext->nb_streams-1;
m_videoStream->time_base=m_timeBase;
av_init_packet(&m_pkt);
m_pkt.stream_index=m_videoStream->index;
}
void FfmpegOutput::setupAudioFormat()
{
//audio
AVCodecContext *audioCodecContext=FfmpegResources::getCodecContext(m_audioCodecContextId);
m_audioStream=avformat_new_stream(m_avFormatContext, audioCodecContext->codec);
AVCodecContext *audioStreamCodec=m_avFormatContext->streams[m_audioStream->index]->codec;
// audioStreamCodec->codec=audioCodecContext->codec;
audioStreamCodec->codec_id=m_audioCodecId;
audioStreamCodec->bit_rate=m_audioBitrate;
audioStreamCodec->sample_rate=m_audioSampleRate;
audioStreamCodec->time_base=m_audioTimeBase;
audioStreamCodec->channels=m_audioChannels;
audioStreamCodec->sample_fmt=m_audioSampleFormat;
audioStreamCodec->frame_size=m_audioFrameSize;
// if(m_avFormatContext->oformat->flags & AVFMT_GLOBALHEADER)
// audioStreamCodec->flags!=CODEC_FLAG_GLOBAL_HEADER;
m_audioStream->time_base=m_audioTimeBase;
m_audioStream->id=m_avFormatContext->nb_streams-1;
av_init_packet(&m_audioPkt);
m_audioPkt.stream_index=m_audioStream->index;
}
void FfmpegOutput::queryFormats()
{
AVOutputFormat *avFormat=NULL;
while((avFormat=av_oformat_next(avFormat)) != NULL)
{
Strings::iterator iter=std::find(m_accessibleFormats.begin(), m_accessibleFormats.end(), avFormat->name);
if(iter != m_accessibleFormats.end())
m_avFormats.push_back(FormatDescription(avFormat));
}
} |
/-
Copyright (c) 2020 Kevin Kappelmann. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Kevin Kappelmann
-/
import Mathlib.PrePort
import Mathlib.Lean3Lib.init.default
import Mathlib.algebra.continued_fractions.computation.translations
import Mathlib.algebra.continued_fractions.terminated_stable
import Mathlib.algebra.continued_fractions.continuants_recurrence
import Mathlib.order.filter.at_top_bot
import Mathlib.PostPort
universes u_1
namespace Mathlib
/-!
# Correctness of Terminating Continued Fraction Computations (`gcf.of`)
## Summary
Let us write `gcf` for `generalized_continued_fraction`. We show the correctness of the
algorithm computing continued fractions (`gcf.of`) in case of termination in the following sense:
At every step `n : ℕ`, we can obtain the value `v` by adding a specific residual term to the last
denominator of the fraction described by `(gcf.of v).convergents' n`. The residual term will be zero
exactly when the continued fraction terminated; otherwise, the residual term will be given by the
fractional part stored in `gcf.int_fract_pair.stream v n`.
For an example, refer to `gcf.comp_exact_value_correctness_of_stream_eq_some` and for more
information about the computation process, refer to `algebra.continued_fraction.computation.basic`.
## Main definitions
- `gcf.comp_exact_value` can be used to compute the exact value approximated by the continued
fraction `gcf.of v` by adding a residual term as described in the summary.
## Main Theorems
- `gcf.comp_exact_value_correctness_of_stream_eq_some` shows that `gcf.comp_exact_value` indeed
returns the value `v` when given the convergent and fractional part as described in the summary.
- `gcf.of_correctness_of_terminated_at` shows the equality `v = (gcf.of v).convergents n`
if `gcf.of v` terminated at position `n`.
-/
namespace generalized_continued_fraction
/--
Given two continuants `pconts` and `conts` and a value `fr`, this function returns
- `conts.a / conts.b` if `fr = 0`
- `exact_conts.a / exact_conts.b` where `exact_conts = next_continuants 1 fr⁻¹ pconts conts` otherwise.
This function can be used to compute the exact value approxmated by a continued fraction `gcf.of v`
as described in lemma `comp_exact_value_correctness_of_stream_eq_some`.
-/
-- if the fractional part is zero, we exactly approximated the value by the last continuants
protected def comp_exact_value {K : Type u_1} [linear_ordered_field K] (pconts : pair K)
(conts : pair K) (fr : K) : K :=
ite (fr = 0) (pair.a conts / pair.b conts)
(let exact_conts : pair K := next_continuants 1 (fr⁻¹) pconts conts;
pair.a exact_conts / pair.b exact_conts)
-- otherwise, we have to include the fractional part in a final continuants step.
/-- Just a computational lemma we need for the next main proof. -/
protected theorem comp_exact_value_correctness_of_stream_eq_some_aux_comp {K : Type u_1}
[linear_ordered_field K] [floor_ring K] {a : K} (b : K) (c : K)
(fract_a_ne_zero : fract a ≠ 0) : (↑(floor a) * b + c) / fract a + b = (b * a + c) / fract a :=
sorry
/--
Shows the correctness of `comp_exact_value` in case the continued fraction `gcf.of v` did not
terminate at position `n`. That is, we obtain the value `v` if we pass the two successive
(auxiliary) continuants at positions `n` and `n + 1` as well as the fractional part at
`int_fract_pair.stream n` to `comp_exact_value`.
The correctness might be seen more readily if one uses `convergents'` to evaluate the continued
fraction. Here is an example to illustrate the idea:
Let `(v : ℚ) := 3.4`. We have
- `gcf.int_fract_pair.stream v 0 = some ⟨3, 0.4⟩`, and
- `gcf.int_fract_pair.stream v 1 = some ⟨2, 0.5⟩`.
Now `(gcf.of v).convergents' 1 = 3 + 1/2`, and our fractional term at position `2` is `0.5`. We hence
have `v = 3 + 1/(2 + 0.5) = 3 + 1/2.5 = 3.4`. This computation corresponds exactly to the one using
the recurrence equation in `comp_exact_value`.
-/
theorem comp_exact_value_correctness_of_stream_eq_some {K : Type u_1} [linear_ordered_field K]
{v : K} {n : ℕ} [floor_ring K] {ifp_n : int_fract_pair K} :
int_fract_pair.stream v n = some ifp_n →
v =
generalized_continued_fraction.comp_exact_value
(continuants_aux (generalized_continued_fraction.of v) n)
(continuants_aux (generalized_continued_fraction.of v) (n + 1))
(int_fract_pair.fr ifp_n) :=
sorry
/-- The convergent of `gcf.of v` at step `n - 1` is exactly `v` if the `int_fract_pair.stream` of
the corresponding continued fraction terminated at step `n`. -/
theorem of_correctness_of_nth_stream_eq_none {K : Type u_1} [linear_ordered_field K] {v : K} {n : ℕ}
[floor_ring K] (nth_stream_eq_none : int_fract_pair.stream v n = none) :
v = convergents (generalized_continued_fraction.of v) (n - 1) :=
sorry
/-- If `gcf.of v` terminated at step `n`, then the `n`th convergent is exactly `v`. -/
theorem of_correctness_of_terminated_at {K : Type u_1} [linear_ordered_field K] {v : K} {n : ℕ}
[floor_ring K] (terminated_at_n : terminated_at (generalized_continued_fraction.of v) n) :
v = convergents (generalized_continued_fraction.of v) n :=
(fun (this : int_fract_pair.stream v (n + 1) = none) => of_correctness_of_nth_stream_eq_none this)
(iff.elim_left of_terminated_at_n_iff_succ_nth_int_fract_pair_stream_eq_none terminated_at_n)
/-- If `gcf.of v` terminates, then there is `n : ℕ` such that the `n`th convergent is exactly `v`. -/
theorem of_correctness_of_terminates {K : Type u_1} [linear_ordered_field K] {v : K} [floor_ring K]
(terminates : terminates (generalized_continued_fraction.of v)) :
∃ (n : ℕ), v = convergents (generalized_continued_fraction.of v) n :=
exists.elim terminates
fun (n : ℕ) (terminated_at_n : seq.terminated_at (s (generalized_continued_fraction.of v)) n) =>
exists.intro n (of_correctness_of_terminated_at terminated_at_n)
/-- If `gcf.of v` terminates, then its convergents will eventually always be `v`. -/
theorem of_correctness_at_top_of_terminates {K : Type u_1} [linear_ordered_field K] {v : K}
[floor_ring K] (terminates : terminates (generalized_continued_fraction.of v)) :
filter.eventually (fun (n : ℕ) => v = convergents (generalized_continued_fraction.of v) n)
filter.at_top :=
sorry
end Mathlib |
[STATEMENT]
lemma wf\<^sub>t\<^sub>r\<^sub>m\<^sub>s_pairs: "wf\<^sub>t\<^sub>r\<^sub>m\<^sub>s (trms\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s F) \<Longrightarrow> wf\<^sub>t\<^sub>r\<^sub>m\<^sub>s (pair ` set F)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. wf\<^sub>t\<^sub>r\<^sub>m\<^sub>s (trms\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s F) \<Longrightarrow> wf\<^sub>t\<^sub>r\<^sub>m\<^sub>s (pair ` set F)
[PROOF STEP]
using fun_pair_wf\<^sub>t\<^sub>r\<^sub>m
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>wf\<^sub>t\<^sub>r\<^sub>m ?t; wf\<^sub>t\<^sub>r\<^sub>m ?t'\<rbrakk> \<Longrightarrow> wf\<^sub>t\<^sub>r\<^sub>m (pair (?t, ?t'))
goal (1 subgoal):
1. wf\<^sub>t\<^sub>r\<^sub>m\<^sub>s (trms\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s F) \<Longrightarrow> wf\<^sub>t\<^sub>r\<^sub>m\<^sub>s (pair ` set F)
[PROOF STEP]
by blast |
Tobacco shopper bag with linen beige stripes. It features a detatchable decoratvie tassel. The lining is of beige fabric material and has several interior pockets. |
Formal statement is: lemma orthogonal_to_span: assumes a: "a \<in> span S" and x: "\<And>y. y \<in> S \<Longrightarrow> orthogonal x y" shows "orthogonal x a" Informal statement is: If $a$ is in the span of $S$ and $x$ is orthogonal to every element of $S$, then $x$ is orthogonal to $a$. |
So, the question we are answering in this week’s blog is a simple one.
Can you get car finance with bad credit?
Well, here are a few ways that you can do just that.
This has the best long-term benefit but is the most time consuming of all the options here. Improving your credit rating is a good way to show lenders that you can be trusted with repayment. This increases your chances of getting car finance with decent rates.
Okay, so you may have a bad credit score. But what does bad actually look like? To get a decent view of your credit history, use a credit check service. This should tell you about any active credit, any missed payments and people who are financially linked to you.
This is a good place to start. Make sure all of your details are correct and up to date. Not only does this help when lenders do credit checks, but it will also reduce the risk of fraud.
You may have an old joint credit account with someone who has since fallen into bad credit. In cases like this, it is possible that the black mark against their name is dragging you down.
If you want to sever ties with these people financially, you will need to issue a notice of disassociation. After some checks, credit reference agencies should be able to remove this person from your file.
If you have a debt to pay off already, having a growing debt is not going to do you any favours. Keeping up with your repayments is a good sign to lenders that you can borrow responsibly. This will – over time – help to improve your credit score.
Also, try not to exceed 75% of your credit limit. Running yourself up to the wire again and again could be an indication that you are bad at managing your money. This also applies to late payments and exceeding your credit limit. Keep a close eye on your spending and prove that you can keep on top of your debt.This gives you a better chance of obtaining car finance with bad credit.
With finance deals like hire purchase or personal contract purchase, you’re usually asked to put down a deposit. Normally, this can be around 10%, but it will vary depending on what deal you are after.
Putting a little more money down at this stage could help the lender’s confidence in your ability to make repayments. This, in turn, may result in a better interest rate for you. |
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for kws_streaming.layers.gru."""
import numpy as np
from kws_streaming.layers import gru
from kws_streaming.layers import test_utils
from kws_streaming.layers.compat import tf
from kws_streaming.layers.compat import tf1
from kws_streaming.layers.modes import Modes
tf1.disable_eager_execution()
class GRUTest(tf.test.TestCase):
def setUp(self):
super(GRUTest, self).setUp()
test_utils.set_seed(123)
# generate input signal
self.inference_batch_size = 1
self.data_size = 32
self.feature_size = 4
self.signal = np.random.rand(self.inference_batch_size, self.data_size,
self.feature_size)
# create non streamable model
inputs = tf.keras.layers.Input(
shape=(self.data_size, self.feature_size),
batch_size=self.inference_batch_size,
dtype=tf.float32)
self.units = 3
outputs = gru.GRU(units=self.units, return_sequences=True)(inputs)
self.model_non_streamable = tf.keras.Model(inputs, outputs)
self.output_gru = self.model_non_streamable.predict(self.signal)
def test_streaming_inference_internal_state(self):
# create streaming inference model with internal state
mode = Modes.STREAM_INTERNAL_STATE_INFERENCE
inputs = tf.keras.layers.Input(
shape=(1, self.feature_size),
batch_size=self.inference_batch_size,
dtype=tf.float32)
outputs = gru.GRU(units=self.units, mode=mode)(inputs)
model_stream = tf.keras.Model(inputs, outputs)
# set weights + states
weights_states = self.model_non_streamable.get_weights() + [
np.zeros((self.inference_batch_size, self.units))
]
model_stream.set_weights(weights_states)
# compare streamable vs non streamable models
for i in range(self.data_size): # loop over time samples
input_stream = self.signal[:, i, :]
input_stream = np.expand_dims(input_stream, 1)
output_stream = model_stream.predict(input_stream)
self.assertAllClose(output_stream[0][0], self.output_gru[0][i])
def test__streaming_inference_external_state(self):
# create streaming inference model with external state
mode = Modes.STREAM_EXTERNAL_STATE_INFERENCE
inputs = tf.keras.layers.Input(
shape=(1, self.feature_size),
batch_size=self.inference_batch_size,
dtype=tf.float32)
gru_layer = gru.GRU(units=self.units, mode=mode)
outputs = gru_layer(inputs)
model_stream = tf.keras.Model([inputs] + gru_layer.get_input_state(),
[outputs] + gru_layer.get_output_state())
# set weights only
model_stream.set_weights(self.model_non_streamable.get_weights())
# input states
input_state1 = np.zeros((self.inference_batch_size, self.units))
# compare stateless streamable vs non streamable models
for i in range(self.data_size): # loop over time samples
input_stream = self.signal[:, i, :]
input_stream = np.expand_dims(input_stream, 1)
output_streams = model_stream.predict([input_stream, input_state1])
# update input states
input_state1 = output_streams[1]
# compare streaming and non streaming outputs
self.assertAllClose(output_streams[0][0][0], self.output_gru[0][i])
if __name__ == '__main__':
tf.test.main()
|
function logS = batch_uniformizer(sourceDirName, destinDirName, destinDCMDirName)
%
% function logS = batch_uniformizer(dirName)
%
% This function uniformizes CERR plans in the passed source directory and
% writes them to the destination directory
%
% APA, 08/19/2011
% sourceDirName = '/Users/aptea/Documents/MSKCC/Projects/TRISMUS/TRISMUS CERR';
% destinDCMDirName = '/Users/aptea/Documents/MSKCC/Projects/TRISMUS/TRISMUS CERR DICOM';
% destinDirName = '/Users/aptea/Documents/MSKCC/Projects/TRISMUS/TRISMUS CERR UNIFORM';
fileC = {};
if strcmpi(sourceDirName,'\') || strcmpi(sourceDirName,'/')
filesTmp = getCERRfiles(sourceDirName(1:end-1));
else
filesTmp = getCERRfiles(sourceDirName);
end
fileC = [fileC filesTmp];
% Initialize waitbar
hWait = waitbar(0,'Uniformizing cohort of CERR plans. Please wait...');
logS = struct('maxDiff','', 'fileName', '');
numFiles = length(fileC);
%Load CERR plan
for iFile=1:numFiles
%Update Waitbar
drawnow
waitbar(iFile/numFiles,hWait)
%Load CERR plan
try
planC = loadPlanC(fileC{iFile}, tempdir);
planC = updatePlanFields(planC);
indexS = planC{end};
%Check for mesh representation and load meshes into memory
currDir = cd;
meshDir = fileparts(which('libMeshContour.dll'));
cd(meshDir)
for strNum = 1:length(planC{indexS.structures})
if isfield(planC{indexS.structures}(strNum),'meshRep') && ~isempty(planC{indexS.structures}(strNum).meshRep) && planC{indexS.structures}(strNum).meshRep
try
calllib('libMeshContour','loadSurface',planC{indexS.structures}(strNum).strUID,planC{indexS.structures}(strNum).meshS)
catch
planC{indexS.structures}(strNum).meshRep = 0;
planC{indexS.structures}(strNum).meshS = [];
end
end
end
cd(currDir)
stateS.optS = opts4Exe([getCERRPath,'CERROptions.json']);
%Check color assignment for displaying structures
[assocScanV,relStrNumV] = getStructureAssociatedScan(1:length(planC{indexS.structures}),planC);
for scanNum = 1:length(planC{indexS.scan})
scanIndV = find(assocScanV==scanNum);
for i = 1:length(scanIndV)
strNum = scanIndV(i);
colorNum = relStrNumV(strNum);
if isempty(planC{indexS.structures}(strNum).structureColor)
color = stateS.optS.colorOrder( mod(colorNum-1, size(stateS.optS.colorOrder,1))+1,:);
planC{indexS.structures}(strNum).structureColor = color;
end
end
end
%Check dose-grid
for doseNum = 1:length(planC{indexS.dose})
if planC{indexS.dose}(doseNum).zValues(2) - planC{indexS.dose}(doseNum).zValues(1) < 0
planC{indexS.dose}(doseNum).zValues = flipud(planC{indexS.dose}(doseNum).zValues);
planC{indexS.dose}(doseNum).doseArray = flipdim(planC{indexS.dose}(doseNum).doseArray,3);
end
end
%Check whether uniformized data is in cellArray format.
if ~isempty(planC{indexS.structureArray}) && iscell(planC{indexS.structureArray}(1).indicesArray)
planC = setUniformizedData(planC,planC{indexS.CERROptions});
indexS = planC{end};
end
if length(planC{indexS.structureArrayMore}) ~= length(planC{indexS.structureArray})
for saNum = 1:length(planC{indexS.structureArray})
if saNum == 1
planC{indexS.structureArrayMore} = struct('indicesArray', {[]},...
'bitsArray', {[]},...
'assocScanUID',{planC{indexS.structureArray}(saNum).assocScanUID},...
'structureSetUID', {planC{indexS.structureArray}(saNum).structureSetUID});
else
planC{indexS.structureArrayMore}(saNum) = struct('indicesArray', {[]},...
'bitsArray', {[]},...
'assocScanUID',{planC{indexS.structureArray}(saNum).assocScanUID},...
'structureSetUID', {planC{indexS.structureArray}(saNum).structureSetUID});
end
end
end
%Create uniform slices
% planC = createUniformlySlicedPlanC(planC);
% Check scan grid and correct for numerical noise
zValuesV = [planC{indexS.scan}.scanInfo(:).zValue];
zDiffV = diff(zValuesV);
meanDiff = mean(zDiffV);
maxDifference = max(abs((zDiffV - meanDiff)));
% Write to log
[jnk, fileName] = fileparts(fileC{iFile});
logS(iFile).maxDiff = maxDifference;
logS(iFile).fileName = fileName;
if maxDifference < 0.05
newZvalsV = linspace(zValuesV(1), zValuesV(end), length(zValuesV));
else
continue;
end
for sliceNum = 1:length(planC{indexS.scan}.scanInfo)
planC{indexS.scan}.scanInfo(sliceNum).zValue = newZvalsV(sliceNum);
end
% Assign appropriate z-coordinates to structures
for strNum = 1:length(planC{indexS.structures}(strNum))
for slcNum = 1:length(planC{indexS.structures}(strNum).contour)
for segNum = 1:length( planC{indexS.structures}(strNum).contour(slcNum).segments)
if ~isempty( planC{indexS.structures}(strNum).contour(slcNum).segments(segNum).points)
planC{indexS.structures}(strNum).contour(slcNum).segments(segNum).points(:,3) = newZvalsV(slcNum);
end
end
end
end
%Save plan to destination directory
save_planC(planC,[], 'passed', fullfile(destinDirName,fileName));
% Export DICOM
dcmFolderName = fullfile(destinDCMDirName,strtok(fileName,'.'));
if ~exist(dcmFolderName, 'dir')
mkdir(dcmFolderName)
end
CERRExportDICOM(fullfile(destinDirName,fileName), dcmFolderName)
catch
disp([fileC{iFile}, ' failed to load'])
continue
end
end
close(hWait)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.