Datasets:
AI4M
/

text
stringlengths
0
3.34M
/** * @file * @copyright defined in LICENSE.txt */ #pragma once #include <boost/asio/ssl/context.hpp> namespace fc { //Add the platform's trusted root CAs to the ssl context void add_platform_root_cas_to_context(boost::asio::ssl::context& ctx); }
/* histogram/gsl_histogram2d.h * * Copyright (C) 1996, 1997, 1998, 1999, 2000 Brian Gough * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #ifndef __GSL_HISTOGRAM2D_H__ #define __GSL_HISTOGRAM2D_H__ #include <stdlib.h> #include <stdio.h> #include <gsl/gsl_types.h> #undef __BEGIN_DECLS #undef __END_DECLS #ifdef __cplusplus # define __BEGIN_DECLS extern "C" { # define __END_DECLS } #else # define __BEGIN_DECLS /* empty */ # define __END_DECLS /* empty */ #endif __BEGIN_DECLS typedef struct { size_t nx, ny ; double * xrange ; double * yrange ; double * bin ; } gsl_histogram2d ; typedef struct { size_t nx, ny ; double * xrange ; double * yrange ; double * sum ; } gsl_histogram2d_pdf ; GSL_EXPORT gsl_histogram2d * gsl_histogram2d_alloc (const size_t nx, const size_t ny); GSL_EXPORT gsl_histogram2d * gsl_histogram2d_calloc (const size_t nx, const size_t ny); GSL_EXPORT gsl_histogram2d * gsl_histogram2d_calloc_uniform (const size_t nx, const size_t ny, const double xmin, const double xmax, const double ymin, const double ymax); GSL_EXPORT void gsl_histogram2d_free (gsl_histogram2d * h); GSL_EXPORT int gsl_histogram2d_increment (gsl_histogram2d * h, double x, double y); GSL_EXPORT int gsl_histogram2d_accumulate (gsl_histogram2d * h, double x, double y, double weight); GSL_EXPORT int gsl_histogram2d_find (const gsl_histogram2d * h, const double x, const double y, size_t * i, size_t * j); GSL_EXPORT double gsl_histogram2d_get (const gsl_histogram2d * h, const size_t i, const size_t j); GSL_EXPORT int gsl_histogram2d_get_xrange (const gsl_histogram2d * h, const size_t i, double * xlower, double * xupper); GSL_EXPORT int gsl_histogram2d_get_yrange (const gsl_histogram2d * h, const size_t j, double * ylower, double * yupper); GSL_EXPORT double gsl_histogram2d_xmax (const gsl_histogram2d * h); GSL_EXPORT double gsl_histogram2d_xmin (const gsl_histogram2d * h); GSL_EXPORT size_t gsl_histogram2d_nx (const gsl_histogram2d * h); GSL_EXPORT double gsl_histogram2d_ymax (const gsl_histogram2d * h); GSL_EXPORT double gsl_histogram2d_ymin (const gsl_histogram2d * h); GSL_EXPORT size_t gsl_histogram2d_ny (const gsl_histogram2d * h); GSL_EXPORT void gsl_histogram2d_reset (gsl_histogram2d * h); GSL_EXPORT gsl_histogram2d * gsl_histogram2d_calloc_range(size_t nx, size_t ny, double *xrange, double *yrange); GSL_EXPORT int gsl_histogram2d_set_ranges_uniform (gsl_histogram2d * h, double xmin, double xmax, double ymin, double ymax); GSL_EXPORT int gsl_histogram2d_set_ranges (gsl_histogram2d * h, const double xrange[], size_t xsize, const double yrange[], size_t ysize); GSL_EXPORT int gsl_histogram2d_memcpy(gsl_histogram2d *dest, const gsl_histogram2d *source); GSL_EXPORT gsl_histogram2d * gsl_histogram2d_clone(const gsl_histogram2d * source); GSL_EXPORT double gsl_histogram2d_max_val(const gsl_histogram2d *h); GSL_EXPORT void gsl_histogram2d_max_bin (const gsl_histogram2d *h, size_t *i, size_t *j); GSL_EXPORT double gsl_histogram2d_min_val(const gsl_histogram2d *h); GSL_EXPORT void gsl_histogram2d_min_bin (const gsl_histogram2d *h, size_t *i, size_t *j); GSL_EXPORT double gsl_histogram2d_xmean (const gsl_histogram2d * h); GSL_EXPORT double gsl_histogram2d_ymean (const gsl_histogram2d * h); GSL_EXPORT double gsl_histogram2d_xsigma (const gsl_histogram2d * h); GSL_EXPORT double gsl_histogram2d_ysigma (const gsl_histogram2d * h); GSL_EXPORT double gsl_histogram2d_cov (const gsl_histogram2d * h); GSL_EXPORT double gsl_histogram2d_sum (const gsl_histogram2d *h); GSL_EXPORT int gsl_histogram2d_equal_bins_p(const gsl_histogram2d *h1, const gsl_histogram2d *h2) ; GSL_EXPORT int gsl_histogram2d_add(gsl_histogram2d *h1, const gsl_histogram2d *h2); GSL_EXPORT int gsl_histogram2d_sub(gsl_histogram2d *h1, const gsl_histogram2d *h2); GSL_EXPORT int gsl_histogram2d_mul(gsl_histogram2d *h1, const gsl_histogram2d *h2); GSL_EXPORT int gsl_histogram2d_div(gsl_histogram2d *h1, const gsl_histogram2d *h2); GSL_EXPORT int gsl_histogram2d_scale(gsl_histogram2d *h, double scale); GSL_EXPORT int gsl_histogram2d_shift(gsl_histogram2d *h, double shift); GSL_EXPORT int gsl_histogram2d_fwrite (FILE * stream, const gsl_histogram2d * h) ; GSL_EXPORT int gsl_histogram2d_fread (FILE * stream, gsl_histogram2d * h); GSL_EXPORT int gsl_histogram2d_fprintf (FILE * stream, const gsl_histogram2d * h, const char * range_format, const char * bin_format); GSL_EXPORT int gsl_histogram2d_fscanf (FILE * stream, gsl_histogram2d * h); GSL_EXPORT gsl_histogram2d_pdf * gsl_histogram2d_pdf_alloc (const size_t nx, const size_t ny); GSL_EXPORT int gsl_histogram2d_pdf_init (gsl_histogram2d_pdf * p, const gsl_histogram2d * h); GSL_EXPORT void gsl_histogram2d_pdf_free (gsl_histogram2d_pdf * p); GSL_EXPORT int gsl_histogram2d_pdf_sample (const gsl_histogram2d_pdf * p, double r1, double r2, double * x, double * y); __END_DECLS #endif /* __GSL_HISTOGRAM2D_H__ */
data Vect : Nat -> Type -> Type where Nil : Vect Z a (::) : a -> (1 xs : Vect k a) -> Vect (S k) a partial append : (1 _ : Vect n a) -> Vect m a -> Vect (n + m) a append (x :: zs@(y :: ws)) ys = ?foo -- zs usable, y+ws not cappend : (1 _ : Vect n a) -> Vect m a -> Vect (plus n m) $a cappend xs ys = case xs of Nil => ys x :: zs => ?bar -- zs usable, xs not cappend2 : (1 _ : Vect n a) -> Vect m a -> Vect (plus n m) a cappend2 xs ys = case xs of Nil => ys x :: zs => let ts = zs in ?baz -- ts usable, xs+zs not
From cap_machine Require Export logrel. From iris.proofmode Require Import tactics. From iris.program_logic Require Import weakestpre adequacy lifting. From stdpp Require Import base. From cap_machine Require Import ftlr_base_binary. From cap_machine Require Export rules_UnSeal. From cap_machine.ftlr_binary Require Import interp_weakening. From cap_machine.rules_binary Require Import rules_binary_base. Section fundamental. Context {Σ:gFunctors} {memg:memG Σ} {regg:regG Σ} {nainv: logrel_na_invs Σ} {cfgsg: cfgSG Σ} `{MachineParameters}. Notation D := ((prodO (leibnizO Word) (leibnizO Word)) -n> iPropO Σ). Notation R := ((prodO (leibnizO Reg) (leibnizO Reg)) -n> iPropO Σ). Implicit Types ww : (prodO (leibnizO Word) (leibnizO Word)). Implicit Types w : (leibnizO Word). Implicit Types interp : (D). Lemma unseal_case (r : prodO (leibnizO Reg) (leibnizO Reg)) (p : Perm) (b e a : Addr) (w w' : Word) (dst : RegName) (src1 src2 : RegName) (P : D): ftlr_instr r p b e a w w' (UnSeal dst src1 src2) P. Proof. intros Hp Hsome HisCorrect Hbae Hi. iIntros "#IH #Hspec #Hinv #Hreg #Hinva #Hread Hsmap Hown Hs Ha Ha' HP Hcls HPC Hmap". rewrite delete_insert_delete. iDestruct ((big_sepM_delete _ _ PC) with "[HPC Hmap]") as "Hmap /="; [apply lookup_insert|rewrite delete_insert_delete;iFrame|]. simpl. iApply (wp_UnSeal with "[$Ha $Hmap]"); eauto. { eapply lookup_insert. } { rewrite /subseteq /map_subseteq /set_subseteq_instance. intros rr _. apply elem_of_dom. apply lookup_insert_is_Some'; eauto. destruct Hsome with rr; eauto. } iIntros "!>" (regs' retv). iDestruct 1 as (HSpec) "[Ha Hmap]". (* we assert that w = w' *) iAssert (⌜w = w'⌝)%I as %Heqw. { iDestruct "Hread" as "[Hread _]". iSpecialize ("Hread" with "HP"). by iApply interp_eq. } destruct r as [r1 r2]. simpl in *. iDestruct (interp_reg_eq r1 r2 (WCap p b e a) with "[]") as %Heq;[iSplit;auto|]. rewrite -!Heq. destruct HSpec; cycle 1. - (* In case of failure, we do not necessarily get a contradiction, but the proof is trivial *) iApply wp_pure_step_later; auto. iMod ("Hcls" with "[Ha Ha' HP]"); [iExists w,w'; iFrame|iModIntro]. iNext; iIntros "_". iApply wp_value; auto. iIntros; discriminate. - destruct (decide (src1 = PC)) as [->|Hnesp]; simplify_map_eq. specialize (Hsome src1) as (_ & (vr2 & Hsr1')). iAssert (interp(_,vr2)) as "HFalse". { iApply ("Hreg" $! src1); eauto. } rewrite !fixpoint_interp1_eq /=. done. Qed. End fundamental.
/* multifit_nlinear/trust.c * * Copyright (C) 2016 Patrick Alken * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include <config.h> #include <gsl/gsl_math.h> #include <gsl/gsl_vector.h> #include <gsl/gsl_matrix.h> #include <gsl/gsl_linalg.h> #include <gsl/gsl_multifit_nlinear.h> #include <gsl/gsl_errno.h> #include <gsl/gsl_blas.h> #include <gsl/gsl_permutation.h> #include "common.c" #include "nielsen.c" /* * This module contains a high level driver for a general trust * region nonlinear least squares solver. This container handles * the computation of all of the quantities relevant to all trust * region methods, including: * * residual vector: f_k = f(x_k) * Jacobian matrix: J_k = J(x_k) * gradient vector: g_k = J_k^T f_k * scaling matrix: D_k */ typedef struct { size_t n; /* number of observations */ size_t p; /* number of parameters */ double delta; /* trust region radius */ double mu; /* LM parameter */ long nu; /* for updating LM parameter */ gsl_vector *diag; /* D = diag(J^T J) */ gsl_vector *x_trial; /* trial parameter vector */ gsl_vector *f_trial; /* trial function vector */ gsl_vector *workp; /* workspace, length p */ gsl_vector *workn; /* workspace, length n */ void *trs_state; /* workspace for trust region subproblem */ void *solver_state; /* workspace for linear least squares solver */ double avratio; /* current |a| / |v| */ /* tunable parameters */ gsl_multifit_nlinear_parameters params; } trust_state_t; static void * trust_alloc (const gsl_multifit_nlinear_parameters * params, const size_t n, const size_t p); static void trust_free(void *vstate); static int trust_init(void *vstate, const gsl_vector * swts, gsl_multifit_nlinear_fdf *fdf, const gsl_vector *x, gsl_vector *f, gsl_matrix *J, gsl_vector *g); static int trust_iterate(void *vstate, const gsl_vector *swts, gsl_multifit_nlinear_fdf *fdf, gsl_vector *x, gsl_vector *f, gsl_matrix *J, gsl_vector *g, gsl_vector *dx); static int trust_rcond(double *rcond, void *vstate); static double trust_avratio(void *vstate); static void trust_trial_step(const gsl_vector * x, const gsl_vector * dx, gsl_vector * x_trial); static double trust_calc_rho(const gsl_vector * f, const gsl_vector * f_trial, const gsl_vector * g, const gsl_matrix * J, const gsl_vector * dx, trust_state_t * state); static int trust_eval_step(const gsl_vector * f, const gsl_vector * f_trial, const gsl_vector * g, const gsl_matrix * J, const gsl_vector * dx, double * rho, trust_state_t * state); static double trust_scaled_norm(const gsl_vector *D, const gsl_vector *a); static void * trust_alloc (const gsl_multifit_nlinear_parameters * params, const size_t n, const size_t p) { trust_state_t *state; state = calloc(1, sizeof(trust_state_t)); if (state == NULL) { GSL_ERROR_NULL ("failed to allocate lm state", GSL_ENOMEM); } state->diag = gsl_vector_alloc(p); if (state->diag == NULL) { GSL_ERROR_NULL ("failed to allocate space for diag", GSL_ENOMEM); } state->workp = gsl_vector_alloc(p); if (state->workp == NULL) { GSL_ERROR_NULL ("failed to allocate space for workp", GSL_ENOMEM); } state->workn = gsl_vector_alloc(n); if (state->workn == NULL) { GSL_ERROR_NULL ("failed to allocate space for workn", GSL_ENOMEM); } state->x_trial = gsl_vector_alloc(p); if (state->x_trial == NULL) { GSL_ERROR_NULL ("failed to allocate space for x_trial", GSL_ENOMEM); } state->f_trial = gsl_vector_alloc(n); if (state->f_trial == NULL) { GSL_ERROR_NULL ("failed to allocate space for f_trial", GSL_ENOMEM); } state->trs_state = (params->trs->alloc)(params, n, p); if (state->trs_state == NULL) { GSL_ERROR_NULL ("failed to allocate space for trs state", GSL_ENOMEM); } state->solver_state = (params->solver->alloc)(n, p); if (state->solver_state == NULL) { GSL_ERROR_NULL ("failed to allocate space for solver state", GSL_ENOMEM); } state->n = n; state->p = p; state->delta = 0.0; state->params = *params; return state; } static void trust_free(void *vstate) { trust_state_t *state = (trust_state_t *) vstate; const gsl_multifit_nlinear_parameters *params = &(state->params); if (state->diag) gsl_vector_free(state->diag); if (state->workp) gsl_vector_free(state->workp); if (state->workn) gsl_vector_free(state->workn); if (state->x_trial) gsl_vector_free(state->x_trial); if (state->f_trial) gsl_vector_free(state->f_trial); if (state->trs_state) (params->trs->free)(state->trs_state); if (state->solver_state) (params->solver->free)(state->solver_state); free(state); } /* trust_init() Initialize trust region solver Inputs: vstate - workspace swts - sqrt(W) vector fdf - user callback functions x - initial parameter values f - (output) f(x) vector J - (output) J(x) matrix g - (output) J(x)' f(x) vector Return: success/error */ static int trust_init(void *vstate, const gsl_vector *swts, gsl_multifit_nlinear_fdf *fdf, const gsl_vector *x, gsl_vector *f, gsl_matrix *J, gsl_vector *g) { int status; trust_state_t *state = (trust_state_t *) vstate; const gsl_multifit_nlinear_parameters *params = &(state->params); double Dx; /* evaluate function and Jacobian at x and apply weight transform */ status = gsl_multifit_nlinear_eval_f(fdf, x, swts, f); if (status) return status; status = gsl_multifit_nlinear_eval_df(x, f, swts, params->h_df, params->fdtype, fdf, J, state->workn); if (status) return status; /* compute g = J^T f */ gsl_blas_dgemv(CblasTrans, 1.0, J, f, 0.0, g); /* initialize diagonal scaling matrix D */ (params->scale->init)(J, state->diag); /* compute initial trust region radius */ Dx = trust_scaled_norm(state->diag, x); state->delta = 0.3 * GSL_MAX(1.0, Dx); /* initialize LM parameter */ status = nielsen_init(J, state->diag, &(state->mu), &(state->nu)); if (status) return status; /* initialize trust region method solver */ { gsl_multifit_nlinear_trust_state trust_state; trust_state.x = x; trust_state.f = f; trust_state.g = g; trust_state.J = J; trust_state.diag = state->diag; trust_state.sqrt_wts = swts; trust_state.mu = &(state->mu); trust_state.params = params; trust_state.solver_state = state->solver_state; trust_state.fdf = fdf; trust_state.avratio = &(state->avratio); status = (params->trs->init)(&trust_state, state->trs_state); if (status) return status; } /* set default parameters */ state->avratio = 0.0; return GSL_SUCCESS; } /* trust_iterate() This function performs 1 iteration of the trust region algorithm. It calls a user-specified method for computing the next step (LM or dogleg), then tests if the computed step is acceptable. Args: vstate - trust workspace swts - data weights (NULL if unweighted) fdf - function and Jacobian pointers x - on input, current parameter vector on output, new parameter vector x + dx f - on input, f(x) on output, f(x + dx) J - on input, J(x) on output, J(x + dx) g - on input, g(x) = J(x)' f(x) on output, g(x + dx) = J(x + dx)' f(x + dx) dx - (output only) parameter step vector Return: 1) GSL_SUCCESS if we found a step which reduces the cost function 2) GSL_ENOPROG if 15 successive attempts were to made to find a good step without success 3) If a scaling matrix D is used, inputs and outputs are set to the unscaled quantities (ie: J and g) */ static int trust_iterate(void *vstate, const gsl_vector *swts, gsl_multifit_nlinear_fdf *fdf, gsl_vector *x, gsl_vector *f, gsl_matrix *J, gsl_vector *g, gsl_vector *dx) { int status; trust_state_t *state = (trust_state_t *) vstate; const gsl_multifit_nlinear_parameters *params = &(state->params); const gsl_multifit_nlinear_trs *trs = params->trs; gsl_multifit_nlinear_trust_state trust_state; gsl_vector *x_trial = state->x_trial; /* trial x + dx */ gsl_vector *f_trial = state->f_trial; /* trial f(x + dx) */ gsl_vector *diag = state->diag; /* diag(D) */ double rho; /* ratio actual_reduction/predicted_reduction */ int foundstep = 0; /* found step dx */ int bad_steps = 0; /* consecutive rejected steps */ /* store all state parameters needed by low level methods */ trust_state.x = x; trust_state.f = f; trust_state.g = g; trust_state.J = J; trust_state.diag = state->diag; trust_state.sqrt_wts = swts; trust_state.mu = &(state->mu); trust_state.params = params; trust_state.solver_state = state->solver_state; trust_state.fdf = fdf; trust_state.avratio = &(state->avratio); /* initialize trust region subproblem with this Jacobian */ status = (trs->preloop)(&trust_state, state->trs_state); if (status) return status; /* loop until we find an acceptable step dx */ while (!foundstep) { /* calculate new step */ status = (trs->step)(&trust_state, state->delta, dx, state->trs_state); /* occasionally the iterative methods (ie: CG Steihaug) can fail to find a step, * so in this case skip rho calculation and count it as a rejected step */ if (status == GSL_SUCCESS) { /* compute x_trial = x + dx */ trust_trial_step(x, dx, x_trial); /* compute f_trial = f(x + dx) */ status = gsl_multifit_nlinear_eval_f(fdf, x_trial, swts, f_trial); if (status) return status; /* check if step should be accepted or rejected */ status = trust_eval_step(f, f_trial, g, J, dx, &rho, state); if (status == GSL_SUCCESS) foundstep = 1; } else { /* an iterative TRS method failed to find a step vector */ rho = -1.0; } /* * update trust region radius: if rho is large, * then the quadratic model is a good approximation * to the objective function, enlarge trust region. * If rho is small (or negative), the model function * is a poor approximation so decrease trust region. This * can happen even if the step is accepted. */ if (rho > 0.75) state->delta *= params->factor_up; else if (rho < 0.25) state->delta /= params->factor_down; if (foundstep) { /* step was accepted */ /* compute J <- J(x + dx) */ status = gsl_multifit_nlinear_eval_df(x_trial, f_trial, swts, params->h_df, params->fdtype, fdf, J, state->workn); if (status) return status; /* update x <- x + dx */ gsl_vector_memcpy(x, x_trial); /* update f <- f(x + dx) */ gsl_vector_memcpy(f, f_trial); /* compute new g = J^T f */ gsl_blas_dgemv(CblasTrans, 1.0, J, f, 0.0, g); /* update scaling matrix D */ (params->scale->update)(J, diag); /* step accepted, decrease LM parameter */ status = nielsen_accept(rho, &(state->mu), &(state->nu)); if (status) return status; bad_steps = 0; } else { /* step rejected, increase LM parameter */ status = nielsen_reject(&(state->mu), &(state->nu)); if (status) return status; if (++bad_steps > 15) { /* if more than 15 consecutive rejected steps, report no progress */ return GSL_ENOPROG; } } } return GSL_SUCCESS; } /* trust_iterate() */ static int trust_rcond(double *rcond, void *vstate) { int status; trust_state_t *state = (trust_state_t *) vstate; const gsl_multifit_nlinear_parameters *params = &(state->params); status = (params->solver->rcond)(rcond, state->solver_state); return status; } static double trust_avratio(void *vstate) { trust_state_t *state = (trust_state_t *) vstate; return state->avratio; } /* compute x_trial = x + dx */ static void trust_trial_step(const gsl_vector * x, const gsl_vector * dx, gsl_vector * x_trial) { size_t i, N = x->size; for (i = 0; i < N; i++) { double dxi = gsl_vector_get (dx, i); double xi = gsl_vector_get (x, i); gsl_vector_set (x_trial, i, xi + dxi); } } /* trust_calc_rho() Calculate ratio of actual reduction to predicted reduction. rho = actual_reduction / predicted_reduction actual_reduction = 1 - ( ||f+|| / ||f|| )^2 predicted_reduction = -2 g^T dx / ||f||^2 - ( ||J*dx|| / ||f|| )^2 = -2 fhat . beta - ||beta||^2 where: beta = J*dx / ||f|| Inputs: f - f(x) f_trial - f(x + dx) g - gradient J^T f J - Jacobian dx - proposed step, size p state - workspace Return: rho = actual_reduction / predicted_reduction If actual_reduction is < 0, return rho = -1 */ static double trust_calc_rho(const gsl_vector * f, const gsl_vector * f_trial, const gsl_vector * g, const gsl_matrix * J, const gsl_vector * dx, trust_state_t * state) { int status; const gsl_multifit_nlinear_parameters *params = &(state->params); const gsl_multifit_nlinear_trs *trs = params->trs; const double normf = gsl_blas_dnrm2(f); const double normf_trial = gsl_blas_dnrm2(f_trial); gsl_multifit_nlinear_trust_state trust_state; double rho; double actual_reduction; double pred_reduction; double u; /* if ||f(x+dx)|| > ||f(x)|| reject step immediately */ if (normf_trial >= normf) return -1.0; trust_state.x = NULL; trust_state.f = f; trust_state.g = g; trust_state.J = J; trust_state.diag = state->diag; trust_state.sqrt_wts = NULL; trust_state.mu = &(state->mu); trust_state.params = params; trust_state.solver_state = state->solver_state; trust_state.fdf = NULL; trust_state.avratio = &(state->avratio); /* compute numerator of rho (actual reduction) */ u = normf_trial / normf; actual_reduction = 1.0 - u*u; /* * compute denominator of rho (predicted reduction); this is calculated * inside each trust region subproblem, since it depends on the local * model used, which can vary according to each TRS */ status = (trs->preduction)(&trust_state, dx, &pred_reduction, state->trs_state); if (status) return -1.0; if (pred_reduction > 0.0) rho = actual_reduction / pred_reduction; else rho = -1.0; return rho; } /* trust_eval_step() Evaluate proposed step to determine if it should be accepted or rejected */ static int trust_eval_step(const gsl_vector * f, const gsl_vector * f_trial, const gsl_vector * g, const gsl_matrix * J, const gsl_vector * dx, double * rho, trust_state_t * state) { int status = GSL_SUCCESS; const gsl_multifit_nlinear_parameters *params = &(state->params); if (params->trs == gsl_multifit_nlinear_trs_lmaccel) { /* reject step if acceleration is too large compared to velocity */ if (state->avratio > params->avmax) status = GSL_FAILURE; } /* compute rho */ *rho = trust_calc_rho(f, f_trial, g, J, dx, state); if (*rho <= 0.0) status = GSL_FAILURE; return status; } /* compute || diag(D) a || */ static double trust_scaled_norm(const gsl_vector *D, const gsl_vector *a) { const size_t n = a->size; double e2 = 0.0; size_t i; for (i = 0; i < n; ++i) { double Di = gsl_vector_get(D, i); double ai = gsl_vector_get(a, i); double u = Di * ai; e2 += u * u; } return sqrt (e2); } static const gsl_multifit_nlinear_type trust_type = { "trust-region", trust_alloc, trust_init, trust_iterate, trust_rcond, trust_avratio, trust_free }; const gsl_multifit_nlinear_type *gsl_multifit_nlinear_trust = &trust_type;
##Choose images from scene cetegoty import numpy as np import pandas as pd d = {} lst=[] data=pd.read_csv("E:\\fyp data\\ADEK-20\\uniqueLabels.txt",sep=" ") for point in data.values: (key,val)=point[0],point[1] d[key]=val category_data = pd.read_csv('E:\\fyp data\\ADEK-20\\scene_category.txt', sep=" ") arr=[] for point in category_data.values: if d[point[1]]==1 or d[point[1]]==2: arr.append(point[0]) print(len(arr)) with open("E:\\fyp data\\ADEK-20\\result1.txt","w+") as file: file.write('\n'.join(arr))
module Language.Reflection import public Language.Reflection.TT import public Language.Reflection.TTImp %default total ||| Elaboration scripts ||| Where types/terms are returned, binders will have unique, if not ||| necessarily human readabe, names export data Elab : Type -> Type where Pure : a -> Elab a Bind : Elab a -> (a -> Elab b) -> Elab b Fail : FC -> String -> Elab a LogMsg : String -> Nat -> String -> Elab () LogTerm : String -> Nat -> String -> TTImp -> Elab () -- Elaborate a TTImp term to a concrete value Check : TTImp -> Elab expected -- Quote a concrete expression back to a TTImp Quote : (0 _ : val) -> Elab TTImp -- Elaborate under a lambda Lambda : (0 x : Type) -> {0 ty : x -> Type} -> ((val : x) -> Elab (ty val)) -> Elab ((val : x) -> (ty val)) -- Get the current goal type, if known -- (it might need to be inferred from the solution) Goal : Elab (Maybe TTImp) -- Get the names of local variables in scope LocalVars : Elab (List Name) -- Generate a new unique name, based on the given string GenSym : String -> Elab Name -- Put a name in the current namespace InCurrentNS : Name -> Elab Name -- Get the types of every name which matches. -- There may be ambiguities - returns a list of fully explicit names -- and their types. If there's no results, the name is undefined. GetType : Name -> Elab (List (Name, TTImp)) -- Get the type of a local variable GetLocalType : Name -> Elab TTImp -- Get the constructors of a data type. The name must be fully resolved. GetCons : Name -> Elab (List Name) -- Check a group of top level declarations Declare : List Decl -> Elab () export Functor Elab where map f e = Bind e $ Pure . f export Applicative Elab where pure = Pure f <*> a = Bind f (<$> a) export Monad Elab where (>>=) = Bind ||| Report an error in elaboration export fail : String -> Elab a fail = Fail EmptyFC export failAt : FC -> String -> Elab a failAt = Fail ||| Write a log message, if the log level is >= the given level export logMsg : String -> Nat -> String -> Elab () logMsg = LogMsg ||| Write a log message and a rendered term, if the log level is >= the given level export logTerm : String -> Nat -> String -> TTImp -> Elab () logTerm = LogTerm ||| Log the current goal type, if the log level is >= the given level export logGoal : String -> Nat -> String -> Elab () logGoal str n msg = do g <- Goal case g of Nothing => pure () Just t => logTerm str n msg t ||| Check that some TTImp syntax has the expected type ||| Returns the type checked value export check : TTImp -> Elab expected check = Check ||| Return TTImp syntax of a given value export quote : (0 _ : val) -> Elab TTImp quote = Quote ||| Build a lambda expression export lambda : (0 x : Type) -> {0 ty : x -> Type} -> ((val : x) -> Elab (ty val)) -> Elab ((val : x) -> (ty val)) lambda = Lambda ||| Get the goal type of the current elaboration export goal : Elab (Maybe TTImp) goal = Goal ||| Get the names of the local variables in scope export localVars : Elab (List Name) localVars = LocalVars ||| Generate a new unique name export genSym : String -> Elab Name genSym = GenSym ||| Given a name, return the name decorated with the current namespace export inCurrentNS : Name -> Elab Name inCurrentNS = InCurrentNS ||| Given a possibly ambiguous name, get all the matching names and their types export getType : Name -> Elab (List (Name, TTImp)) getType = GetType ||| Get the type of a local variable export getLocalType : Name -> Elab TTImp getLocalType = GetLocalType ||| Get the constructors of a fully qualified data type name export getCons : Name -> Elab (List Name) getCons = GetCons ||| Make some top level declarations export declare : List Decl -> Elab () declare = Declare
The centre of the two fleets was divided by two separate squadrons of the British line : the forward division under admirals Benjamin Caldwell and George Bowyer and the rear under Lord Howe . While Howe in Queen Charlotte was engaging the French closely , his subordinates in the forward division were less active . Instead of moving in on their opposite numbers directly , the forward division sedately closed with the French in line ahead formation , engaging in a long distance duel which did not prevent their opponents from harassing the embattled Defence just ahead of them . Of all the ships in this squadron only HMS Invincible , under Thomas Pakenham , ranged close to the French lines . Invincible was badly damaged by her lone charge but managed to engage the larger Juste . HMS Barfleur under Bowyer did later enter the action , but Bowyer was not present , having lost a leg in the opening exchanges .
-- --------------------------------------------------------------------- -- Ejercicio 1. Realizar las siguientes acciones: -- 1. Importar la teoría de anillos. -- 2. Crear el espacio de nombres my_ring -- 3. Declarar R como una variable sobre anillos. -- 4. Declarar a como variable sobre R. -- ---------------------------------------------------------------------- import algebra.ring -- 1 namespace my_ring -- 2 variables {R : Type*} [ring R] -- 3 variables (a : R) -- 4 -- --------------------------------------------------------------------- -- Ejercicio 2. Demostrar que -- 1 + 1 = 2 -- ---------------------------------------------------------------------- lemma one_add_one_eq_two : 1 + 1 = (2 : R) := by refl -- --------------------------------------------------------------------- -- Ejercicio 3. Demostrar que -- 2 * a = a + a -- ---------------------------------------------------------------------- theorem two_mul : 2 * a = a + a := calc 2 * a = (1 + 1) * a : by rw one_add_one_eq_two ... = 1 * a + 1 * a : by rw add_mul ... = a + a : by rw one_mul end my_ring
using DiffEqBiological using Latexify using Test @reaction_func hill2(x, v, k) = v*x^2/(k^2 + x^2) rn = @reaction_network MyRnType begin hill2(y, v_x, k_x), 0 --> x p_y, 0 --> y (d_x, d_y), (x, y) --> 0 (r_b, r_u), x ↔ y end v_x k_x p_y d_x d_y r_b r_u @test latexify(rn; env=:chem) == raw"\begin{align} \require{mhchem} \ce{ \varnothing &->[\frac{v_{x} \cdot y^{2}}{k_{x}^{2} + y^{2}}] x}\\ \ce{ \varnothing &->[p_{y}] y}\\ \ce{ x &->[d_{x}] \varnothing}\\ \ce{ y &->[d_{y}] \varnothing}\\ \ce{ x &<=>[r_{b}][r_{u}] y} \end{align} " @test latexify(rn; env=:chem, expand=false) == raw"\begin{align} \require{mhchem} \ce{ \varnothing &->[\mathrm{hill2}\left( y, v_{x}, k_{x} \right)] x}\\ \ce{ \varnothing &->[p_{y}] y}\\ \ce{ x &->[d_{x}] \varnothing}\\ \ce{ y &->[d_{y}] \varnothing}\\ \ce{ x &<=>[r_{b}][r_{u}] y} \end{align} " @test md(rn; env=:chem) == raw"\begin{align} \require{mhchem} \ce{ \varnothing &->[\frac{v_{x} \cdot y^{2}}{k_{x}^{2} + y^{2}}] x}\\ \ce{ \varnothing &->[p_{y}] y}\\ \ce{ x &->[d_{x}] \varnothing}\\ \ce{ y &->[d_{y}] \varnothing}\\ \ce{ x &<=>[r_{b}][r_{u}] y} \end{align} " @test md(rn; env=:chem, expand=false, mathjax=false, starred=true, double_linebreak=true) == raw"\begin{align*} \ce{ \varnothing &->[\mathrm{hill2}\left( y, v_{x}, k_{x} \right)] x}\\\\ \ce{ \varnothing &->[p_{y}] y}\\\\ \ce{ x &->[d_{x}] \varnothing}\\\\ \ce{ y &->[d_{y}] \varnothing}\\\\ \ce{ x &<=>[r_{b}][r_{u}] y} \end{align*} " ode = @reaction_network InducedDegradation begin (d_F, d_Ff, d_R), (F, Ff, R) --> 0 # degradations (p_F, Ff), 0 --> (F, R) # productions (r_b * i, r_u), F ↔ Ff # bindin/unbinding end i p_F d_F r_b r_u d_Ff d_R # @Latexify.generate_test md(ode; env=:chem) @test md(ode; env=:chem) == raw"\begin{align} \require{mhchem} \ce{ F &->[d_{F}] \varnothing}\\ \ce{ Ff &->[d_{Ff}] \varnothing}\\ \ce{ R &->[d_{R}] \varnothing}\\ \ce{ \varnothing &->[p_{F}] F}\\ \ce{ \varnothing &->[Ff] R}\\ \ce{ F &<=>[r_{b} \cdot i][r_{u}] Ff} \end{align} " # @test_throws MethodError latexify(rn; env=:arrow, bad_kwarg="should error")
% Options for packages loaded elsewhere \PassOptionsToPackage{unicode}{hyperref} \PassOptionsToPackage{hyphens}{url} % \documentclass[ ]{article} \usepackage{lmodern} \usepackage{amssymb,amsmath} \usepackage{ifxetex,ifluatex} \ifnum 0\ifxetex 1\fi\ifluatex 1\fi=0 % if pdftex \usepackage[T1]{fontenc} \usepackage[utf8]{inputenc} \usepackage{textcomp} % provide euro and other symbols \else % if luatex or xetex \usepackage{unicode-math} \defaultfontfeatures{Scale=MatchLowercase} \defaultfontfeatures[\rmfamily]{Ligatures=TeX,Scale=1} \fi % Use upquote if available, for straight quotes in verbatim environments \IfFileExists{upquote.sty}{\usepackage{upquote}}{} \IfFileExists{microtype.sty}{% use microtype if available \usepackage[]{microtype} \UseMicrotypeSet[protrusion]{basicmath} % disable protrusion for tt fonts }{} \makeatletter \@ifundefined{KOMAClassName}{% if non-KOMA class \IfFileExists{parskip.sty}{% \usepackage{parskip} }{% else \setlength{\parindent}{0pt} \setlength{\parskip}{6pt plus 2pt minus 1pt}} }{% if KOMA class \KOMAoptions{parskip=half}} \makeatother \usepackage{xcolor} \IfFileExists{xurl.sty}{\usepackage{xurl}}{} % add URL line breaks if available \IfFileExists{bookmark.sty}{\usepackage{bookmark}}{\usepackage{hyperref}} \hypersetup{ hidelinks, pdfcreator={LaTeX via pandoc}} \urlstyle{same} % disable monospaced font for URLs \usepackage{graphicx,grffile} \makeatletter \def\maxwidth{\ifdim\Gin@nat@width>\linewidth\linewidth\else\Gin@nat@width\fi} \def\maxheight{\ifdim\Gin@nat@height>\textheight\textheight\else\Gin@nat@height\fi} \makeatother % Scale images if necessary, so that they will not overflow the page % margins by default, and it is still possible to overwrite the defaults % using explicit options in \includegraphics[width, height, ...]{} \setkeys{Gin}{width=\maxwidth,height=\maxheight,keepaspectratio} % Set default figure placement to htbp \makeatletter \def\fps@figure{htbp} \makeatother \setlength{\emergencystretch}{3em} % prevent overfull lines \providecommand{\tightlist}{% \setlength{\itemsep}{0pt}\setlength{\parskip}{0pt}} \date{} \begin{document} \hypertarget{user-documentation}{% \section{User Documentation}\label{user-documentation}} \hypertarget{capability-summary}{% \subsection{Capability Summary}\label{capability-summary}} Our web application has these capabilities: \begin{itemize} \tightlist \item create and schedule NFT drops \item reservation and minting of NFTs after it dropped \item allow users to buy and view NFTs in their own collection \item direct interaction with the blockchain, retrieving and displaying up-to-date NFT-Drop and Blockchain ABI information \item minted NFTs usable even without the webservice \item announcement creation, editing and deletion \item User management (signup, login/logout, password change, password reset, add to verified partners team, add to admins team) \item Wallet connection \end{itemize} \hypertarget{unregistered-user}{% \subsection{Unregistered User}\label{unregistered-user}} An unregistered user has the least abilities and can take a look at NFT-Drops and at the announcements but has no profile. At the moment, potentially everyone is able to connect their wallet to the application and reserve a buying position on an NFT Drop, even unregistered users. \hypertarget{sign-up}{% \subsubsection{sign up}\label{sign-up}} One can create a new account on the signup page. Enter a username, email and password, and create a new account. \begin{figure} \centering \includegraphics{images/Sign_up_page.png} \caption{Sign up} \end{figure}\newpage The email address is minimally checked for a certain syntactic format. It must be unique and cannot be shared by multiple accounts. The code in this project won't use your email address for any unsolicited purposes or marketing except for security purposes which are explicitly initiated by users. The password must at least contain 6 characters. There is no second password check field. It doesn't help so much if you just could copy and paste your password in both password fields. It will help you with a hint, it something didn't work. Same for the login page. \begin{figure} \centering \includegraphics{images/invalid_login.png} \caption{invalid login} \end{figure}\newpage This image shows an example for an invalid login password. \hypertarget{email-confirmation}{% \paragraph{email confirmation}\label{email-confirmation}} After creating an account, an email is automatically sent to the email address specified at sign up. The address is verified by clicking on the link in the received email. The email (in German) will look something like this: \begin{figure} \centering \includegraphics{images/account_confirmation_email.png} \caption{Confirm mail} \end{figure}\newpage \hypertarget{log-in}{% \subsubsection{Log In}\label{log-in}} They can log in their account by accessing the login page. The application header features a login-button in the top right (second from left). \begin{figure} \centering \includegraphics{images/header.png} \caption{Header} \end{figure} On the login page, they need to enter their email and password. \begin{figure} \centering \includegraphics{images/login_page.png} \caption{Login} \end{figure}\newpage The lower-right corner will redirect you to the signup page in case you notice that you don't actually have an account ;-) . \hypertarget{reset-password}{% \paragraph{Reset Password}\label{reset-password}} If you think, you forgot your password, you can send a password-reset-request to an email address that is linked to the account. To access this page, click on \texttt{Forget\ password?} in the lower-left corner of the login page. \begin{figure} \centering \includegraphics{images/password_reset_request.png} \caption{Request Password reset} \end{figure}\newpage The email contains a link which allows a one-time password recovery. (German in our case.) \begin{figure} \centering \includegraphics{images/reset_password_mail.png} \caption{Password reset email} \end{figure}\newpage While you can use as many different password reset links as you like until they expire after a finite time, each link can reset the password only once. \begin{figure} \centering \includegraphics{images/password_reset.png} \caption{Password Reset} \end{figure}\newpage \hypertarget{nft-drops}{% \subsubsection{NFT Drops}\label{nft-drops}} NFT Drops are collections of random NFTs which have a countdown. During the countdown you can reserve a ``buying position'' that you can use after the countdown to buy them. The reservation has no binding character but is required for being able to buy them later. Buyers receive a random NFT from that collection that they potentially can sell to others or use for whatever legal purpose. \textbf{Be aware that buying NFTs will not automatically transfer you legal or owner rights of any real or metaverse space or estate! As of 2022, the meaning of NFTs underlies community conventions and software application-logic and transfers no legal rights.} Users can view the NFTs of an NFT Drop by clicking on an entry in the NFT Drop container on the landing page. \begin{figure} \centering \includegraphics{images/NFT_Drop_container.png} \caption{NFT Drop container} \end{figure}\newpage This leads to the NFT Drop page. An example NFT Drop (for Nürnburg NFTs) looks like this: \begin{figure} \centering \includegraphics{images/NFT_Drop_page.png} \caption{NFT Drop page} \end{figure} It shows example NFTs as cards. \begin{figure} \centering \includegraphics{images/NFTs.png} \caption{NFTs} \end{figure}\newpage We also designed an individual NFT page with specific information which you can see in the \texttt{/assets/README.md} but we didn't implement it because other things were more important. If you didn't connect your wallet so far, you have the ability to do now when you want to buy some. You can only reserve up to 5\% of the total number of NFTs. Not a bug, it's a feature. \hypertarget{buying-nfts}{% \paragraph{Buying NFTs}\label{buying-nfts}} \textbf{CAUTION! Despite anyone being offered to buy NFTs of stale NFT Drops, not having registered a buying position during the countdown will make the transaction fail and waste your money!!} \textbf{Also don't waste time until buying them because Drop creators can set an arbitrary time limit how long your buying position remains valid after the countdown reached zero!} Select the number of NFTs that you have reserved to buy earlier. \begin{figure} \centering \includegraphics{images/NFT_drop_dashboard.png} \caption{amount of NFTs} \end{figure}\newpage When clicking on \texttt{Buy\ NFTs!} on a stale NFT Drop, MetaMask will open where you can confirm the transaction. If you are sure, you have a valid buying position and entered a valid number of NFTs to buy, \emph{skip the red warning about the transaction being expected to fail}. \begin{figure} \centering \includegraphics{images/metamask_confirmation.png} \caption{skip metamask warning} \end{figure}\newpage Before going ahead you probably want to adapt your gas fees for the transaction. Click on the blue \texttt{edit} link that appeared after dismissing the warning. Then you'll see this intermediate gas preview: \begin{figure} \centering \includegraphics{images/suggested_gas.png} \caption{suggested gas} \end{figure}\newpage Ignore the yellow warning on the gas preview screen. The default gas estimation is quite over-exaggerated. Adapt your gas fees in MetaMask: \begin{figure} \centering \includegraphics{images/edit_gas.png} \caption{change gas} \end{figure}\newpage There are three options. \begin{itemize} \item gas limit: The gas amount estimates how much effort (work) is needed to execute and verify the transaction. For buying NFTs from us, \(40 000\) is a good upper estimation for the transaction. The more NFTs you buy, the more work might be required to execute the Smart Contract. If you buy a huge amount of NFTs, say \(10 000\) this would require more gas than just a fee for \(100\) and you should maybe estimate with a higher gas limit than \(40000\). You should not be too stingy here. If you estimate too low, the transaction will fail and you wasted your money. But if you estimate too much, you will only pay the amount of gas that was actually used. \item priority fee: This is like a ``salary'' for the blockchain workers (``miners'') to verify your transaction which you are ready to pay. This loan is paid per single gas unit of work. Of course, more miserly payment gives workers fewer incentive, particularly if you want to burden them with a higher gas limit, and they will let your transaction wait longer until they pick it up, if they will at all. A value of \(2.5\) has been shown to provide very good incentives and will finish your transaction within 10 seconds. \item max fee This is the upper limit for priority fee + base fee that you are willing to pay AT MOST for each unit of gas. It caps the amount of work in a second way. The base fee are fixed costs which are negligable (at least for the Kovan testnet). \end{itemize} When your account can cover the chosen gas fees and you are fine, you can save it. Then confirm the transaction or reject it. \begin{figure} \centering \includegraphics{images/confirm_gas.png} \caption{confirm gas} \end{figure}\newpage \hypertarget{view-announcements}{% \subsubsection{View Announcements}\label{view-announcements}} Announcements are displayed on the landing page and the FAQ page. \begin{figure} \centering \includegraphics{images/announcements_sidebar.png} \caption{announcement sidebar} \end{figure}\newpage They can click on the announcement to be redirected to the dedicated announcement page. \begin{figure} \centering \includegraphics{images/announcement_page.png} \caption{announcement page} \end{figure}\newpage \hypertarget{faq-page}{% \subsubsection{FAQ page}\label{faq-page}} On the FAQ page users can get answers to frequently asked questions. A link is available right to the project logo very left in the application header. The FAQ page lists some important questions for newbies. By clicking on a question, it will expand an answer underneath and the question is emphased in green color. The answer can be collapsed back to normal by reclicking an expanded question. \begin{figure} \centering \includegraphics{images/faq.png} \caption{FAQ} \end{figure}\newpage The \texttt{all} button will collapse all questions if and only if all questions are expanded. Otherwise it will expand all. The \texttt{multi-mode} button toggles the expansion mode. By default, only one question is expanded at a time. With \texttt{multi-mode}, you can expand multiple. \#\# Regular Users If you sign up with a new account, you will be a regular user. There is also an email confirmation mechanism which is unfortunately not required at the moment to use your account. A regular user can do a subset of what Partners or Admins can do. In addition to unregistered users, they can view their NFT collection and can be promoted to become partners or Admins. In potential future development, additional features for registered users and limitations for unregistered users are possible. One vision was to incorporate a communication platform for NFT enthusiasts around certain suppliers (or artists) to offer an all-in-one solution without multiple accounts. \hypertarget{user-profile}{% \subsubsection{User Profile}\label{user-profile}} In the profile users can see basic information regarding their profile. \begin{figure} \centering \includegraphics{images/user_profile.png} \caption{Profile} \end{figure}\newpage The profile picture doesn't work?! At the point of writing, the profile image uses a static image and cannot be changed. There is a PR (announcement images) which contains a reusable EditableImage Component for (untested) uploading, updating and showing the image. At the bottom of the page, you can end the user session with the read \texttt{logout} button. There is currently no simple GUI way implemented to delete user accounts. \hypertarget{re-request-email-confirmation}{% \paragraph{Re-request email confirmation}\label{re-request-email-confirmation}} This feature exists because email confirmation is not necessary yet for using the account. Improved user management should remove this feature later. \begin{figure} \centering \includegraphics{images/resend_email_confirm.png} \caption{resend email confirmation} \end{figure}\newpage An email confirmation can be re-requested by clicking on the ``resent email verification'' link at top of the profile page. It is not display if you are already verified. Then you will see a green checkmark in the password section. \hypertarget{profiles-nft-collection}{% \paragraph{Profile's NFT Collection}\label{profiles-nft-collection}} You can access your collection either by click on the \texttt{My\ Collection} on the right side of the application header (when logged in) \begin{figure} \centering \includegraphics{images/admin_header.png} \caption{Admin Headerbar} \end{figure} or by clicking on the button \texttt{NFT\ Collection} under the image in your profile (see profile image above). If you didn't connect your wallet to the application recently, then you need to click on the blue \texttt{Connect\ MetaMask\ Wallet} button under the profile's statistics. \begin{figure} \centering \includegraphics{images/NFT_Collection.png} \caption{NFT Collection} \end{figure}\newpage If it can load your NFTs successfully, it will show an NFT-card container like the one used on the NFT Drop page. \hypertarget{password-change}{% \paragraph{Password Change}\label{password-change}} They can change their password within the profile page. \begin{figure} \centering \includegraphics{images/profile_change_password.png} \caption{Password change} \end{figure} In the \texttt{Change\ Password} section, enter your old and a new password. Repeat your new password for confirmation. Click on \texttt{save\ new\ password} below the text fields to activate the change. If it doesn't work, it will give you helpful error messages what didn't work. \begin{figure} \centering \includegraphics{images/invalid_password_change.png} \caption{Invalid Password Change} \end{figure}\newpage The image shows an example where the user didn't provide the correct old password. \hypertarget{connect-crypto-wallet-with-account}{% \paragraph{Connect Crypto Wallet with account}\label{connect-crypto-wallet-with-account}} They can connect their ETH wallet by accessing the profile. Currently, only MetaMask is supported. To this end, you need to click on the connect wallet button \begin{figure} \centering \includegraphics{images/profile_connect_wallet.png} \caption{connect meta mask} \end{figure} and confirm that you want to connect your wallet in MetaMask. \begin{figure} \centering \includegraphics{images/metamask_wallet_connection.png} \caption{connect meta mask2} \end{figure}\newpage \begin{figure} \centering \includegraphics{images/metamask_wallet_connection2.png} \caption{connect meta mask3} \end{figure}\newpage You can always trust us. \hypertarget{partner-features}{% \subsection{Partner Features}\label{partner-features}} Only partners are able to successfully create NFT Drops. For this ability, they need to be registered at the blockchain but unlike Admins they might not have the same adminstrative power. \hypertarget{nft-drop-creation}{% \subsubsection{NFT Drop Creation}\label{nft-drop-creation}} On their profile page, they have an additional button which allows them to access the drop creation page. \begin{figure} \centering \includegraphics{images/profile_partner_section.png} \caption{Partner profile section} \end{figure}\newpage The Drop Creation Page is tailored to make the whole process fairly easy for non-technical users to create one with just clicks and copy-pasting of URLs. \begin{figure} \centering \includegraphics{images/nft_drop_creation_page.png} \caption{NFT Drop Creation Page} \end{figure}\newpage There are mainly the NFT (Drop) name + price, a countdown, and a valid buying timespan (after the drop countdown) to set and of course the NFT images. If you are done, click on the green \texttt{confirm\ entered\ data} button at the bottom of the page to arrive at the confirmation where you check all your details for correctness. \begin{figure} \centering \includegraphics{images/nft_drop_creation_confirmation.png} \caption{NFT Drop creation confirmation} \end{figure}\newpage You can go back to the editing form by clicking on the rather hidden white cross in the upper right corner. You have to scroll up to the top to see it. If you are fine with the displayed settings, click on the blue \texttt{connect\ Wallet} button on the left and afterwards on the green \texttt{blockchain} button which opens up MetaMask to confirm the actual transaction which will create the NFT Drop. \begin{figure} \centering \includegraphics{images/drop_creation_confirm_question.png} \caption{NFT Drop confirmation question} \end{figure}\newpage Any change to the blockchain like the NFT Drop creation is a transaction for which you need to pay gas fees. \textbf{You can estimate with a gas limit of \(50000\).} \hypertarget{admin-features}{% \subsection{Admin Features}\label{admin-features}} Regular Admins (which aren't also verified Partners) cannot create drops but can manage users and, most important, create announcements ;-) . \hypertarget{admin-area-51}{% \subsubsection{Admin Area 51}\label{admin-area-51}} They can access a transcendental area by clicking the Admin button on the right side of the Admin's application header. \begin{figure} \centering \includegraphics{images/admin_header.png} \caption{Admin Header} \end{figure} Users, that try to access that URL without Admin rights won't see anything, mysterious! This is not just made up, here it is: \begin{figure} \centering \includegraphics{images/admin_area.png} \caption{Admin Area} \end{figure}\newpage The Admin Area allows for inviting or removing users from certain teams. Only Admins with partners status can fiddle around with the blockchain. \hypertarget{general}{% \subsubsection{General}\label{general}} \hypertarget{creation-deletion-and-modification-of-announcements}{% \paragraph{Creation, deletion and modification of Announcements}\label{creation-deletion-and-modification-of-announcements}} As an admin, announcements can be created. Announcements are news messages which can be used to notify visitors of the site about upcomming or interesting changes. All other users can view announcements, e.g.~on the landing page. For admins, extra buttons are displayed. \begin{figure} \centering \includegraphics{images/announcements_sidebar_admin.png} \caption{Admin announcement view} \end{figure}\newpage Clicking ``Delete'' will instantly kill the announcement without a question. ``Edit'' will lead you to the regular announcement page which however is enhanced with extra functionality for Admins where announcements can be edited. You can reach this page either by clicking on a link in the announcement sidebar when it is embedded into a page or by using the \texttt{create\ and\ edit\ announcements} button which you can see in the above image of the Admin area. \begin{figure} \centering \includegraphics{images/announcement_edit.png} \caption{Admin Announcement page} \end{figure}\newpage You can either create a very new announcement by filling out the text fields in the announcement editor at the top of the page or you can edit existing announcements to update or delete them. \begin{figure} \centering \includegraphics{images/create_new_announcement.png} \caption{Admin new announcement} \end{figure}\newpage \hypertarget{backend---team-database-management}{% \subsubsection{Backend - Team Database Management}\label{backend---team-database-management}} This section contains database operations in regards to the user teams. \hypertarget{inviting-and-editing-admins-team}{% \paragraph{Inviting and editing Admins Team}\label{inviting-and-editing-admins-team}} Admins can change Admin team associations by entering the email address of a person for which an operation should be applied. Then click on \texttt{SEARCH\ USER} button emphasized in blue. \begin{figure} \centering \includegraphics{images/adding_admins.png} \caption{Add Admins} \end{figure} It will search for the email address in the database and based on the result, it will suggest you to add the email address when not found, or remove the email address when found. The email addresss acts as represent for the account that is associated with it. \begin{figure} \centering \includegraphics{images/invite_new_admin.png} \caption{admin invitation} \end{figure}\newpage Before you can apply the operation, you first need to check the checkbox for confirmation to prevent accidentally clicking on the button. With an invitation, it will send an email to the provided email address, notifying them about their invitation which they can accept. You can also remove Admins, including yourself! \begin{figure} \centering \includegraphics{images/remove_existing_admin.png} \caption{admin removal} \end{figure}\newpage Let this be a lesson and don't invite people as Admins that could want to grab for power and kick you from your Admin status! \hypertarget{inviting-and-editing-partners-team}{% \paragraph{Inviting and editing Partners Team}\label{inviting-and-editing-partners-team}} As a verified Partner and Admin, you can invite new partners. There is no more powerful creature out there in ``NFT The World'' :-) . Admins without Partner status cannot add new Partners to the database or remove existing ones. \begin{figure} \centering \includegraphics{images/admins_arent_partners.png} \caption{Only Partners can manage Partners} \end{figure}\newpage Otherwise, as a partner, you can add new partners in the same way how new Admins can be added. \begin{figure} \centering \includegraphics{images/add_partners.png} \caption{Adding Partners} \end{figure}\newpage \hypertarget{contract---smart-contract-interaction}{% \subsubsection{Contract - Smart Contract interaction}\label{contract---smart-contract-interaction}} In this section, special blockchain transactions can be initiated which append live blockchain data. For security, the partners association is saved directly on the blockchain. \hypertarget{creating-an-nft-drop}{% \paragraph{Creating an NFT Drop}\label{creating-an-nft-drop}} Basically just a convenient shortcut for the additional profile button which can be used by Partners with Admin status. Read more about NFT Drop creation in the Partners Capabilities section. \begin{figure} \centering \includegraphics{images/drop_creation_admin_button.png} \caption{NFT Drop Creation Admin Button} \end{figure} \hypertarget{adding-partnersadmins}{% \paragraph{Adding Partners/Admins}\label{adding-partnersadmins}} Well, actually every Admin can add Partners and thus themselves as well. \begin{figure} \centering \includegraphics{images/Add_partner.png} \caption{Add a partner} \end{figure}\newpage Select which team should be modified and below the operation to apply to a (potential) team member. Then enter the hexadecimal address of the wallet that should obtain Partner status. A registered user logged in with that wallet will be entitled to the capabilities of verified partners! Click on the emphasized blue button saying \texttt{ADD\ USER\ TO\ CONTRACT\ PARTNER\ TEAM} to conduct the operation. Keep in mind that this is a blockchain transaction and requires paying a gas fee. MetaMask will open and prompt you for confirmation. You can estimate the gas limit with \(30000\). \end{document}
/* stable/stable_pdf.c * * Functions wrappers of GSL routines for random sample generation of * alpha-stable random variable. * * Copyright (C) 2013. Javier Royuela del Val * Federico Simmross Wattenberg * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 3 of the License. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; If not, see <http://www.gnu.org/licenses/>. * * * Javier Royuela del Val. * E.T.S.I. Telecomunicación * Universidad de Valladolid * Paseo de Belén 15, 47002 Valladolid, Spain. * [email protected] */ #include "stable.h" #include <gsl/gsl_randist.h> void stable_rnd_seed(StableDist * dist, unsigned long int s) { gsl_rng_set(dist->gslrand, s); } inline double stable_rnd_point(StableDist *dist) { return dist->mu_1 + gsl_ran_levy_skew(dist->gslrand, dist->sigma, dist->alpha, dist->beta); } void stable_rnd(StableDist *dist, double *rnd, unsigned int n) { //double *rnd; int i; //rnd = (double*)malloc(n*sizeof(double)); if (rnd ==NULL) { perror("stable_rnd: NULL output pointer"); return; } for(i=0;i<n;i++) { rnd[i]=stable_rnd_point(dist); } return; }
[STATEMENT] lemma R\<^sub>C_of_simp: assumes rr: "secure_refinement \<R>\<^sub>A \<R> P" shows "(\<langle>c\<^sub>1\<^sub>C, mds\<^sub>C, mem\<^sub>1\<^sub>C\<rangle>\<^sub>C, \<langle>c\<^sub>2\<^sub>C, mds\<^sub>C, mem\<^sub>2\<^sub>C\<rangle>\<^sub>C) \<in> R\<^sub>C_of \<R>\<^sub>A \<R> P = ((\<exists>c\<^sub>1\<^sub>A c\<^sub>2\<^sub>A. (\<langle>c\<^sub>1\<^sub>A, mds\<^sub>A_of mds\<^sub>C, mem\<^sub>A_of mem\<^sub>1\<^sub>C\<rangle>\<^sub>A, \<langle>c\<^sub>1\<^sub>C, mds\<^sub>C, mem\<^sub>1\<^sub>C\<rangle>\<^sub>C) \<in> \<R> \<and> (\<langle>c\<^sub>2\<^sub>A, mds\<^sub>A_of mds\<^sub>C, mem\<^sub>A_of mem\<^sub>2\<^sub>C\<rangle>\<^sub>A, \<langle>c\<^sub>2\<^sub>C, mds\<^sub>C, mem\<^sub>2\<^sub>C\<rangle>\<^sub>C) \<in> \<R> \<and> (\<langle>c\<^sub>1\<^sub>A, mds\<^sub>A_of mds\<^sub>C, mem\<^sub>A_of mem\<^sub>1\<^sub>C\<rangle>\<^sub>A, \<langle>c\<^sub>2\<^sub>A, mds\<^sub>A_of mds\<^sub>C, mem\<^sub>A_of mem\<^sub>2\<^sub>C\<rangle>\<^sub>A) \<in> \<R>\<^sub>A) \<and> conc.low_mds_eq mds\<^sub>C mem\<^sub>1\<^sub>C mem\<^sub>2\<^sub>C \<and> (\<langle>c\<^sub>1\<^sub>C, mds\<^sub>C, mem\<^sub>1\<^sub>C\<rangle>\<^sub>C, \<langle>c\<^sub>2\<^sub>C, mds\<^sub>C, mem\<^sub>2\<^sub>C\<rangle>\<^sub>C) \<in> P)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. ((\<langle>c\<^sub>1\<^sub>C, mds\<^sub>C, mem\<^sub>1\<^sub>C\<rangle>\<^sub>C, \<langle>c\<^sub>2\<^sub>C, mds\<^sub>C, mem\<^sub>2\<^sub>C\<rangle>\<^sub>C) \<in> R\<^sub>C_of \<R>\<^sub>A \<R> P) = ((\<exists>c\<^sub>1\<^sub>A c\<^sub>2\<^sub>A. (\<langle>c\<^sub>1\<^sub>A, mds\<^sub>A_of mds\<^sub>C, mem\<^sub>A_of mem\<^sub>1\<^sub>C\<rangle>\<^sub>A, \<langle>c\<^sub>1\<^sub>C, mds\<^sub>C, mem\<^sub>1\<^sub>C\<rangle>\<^sub>C) \<in> \<R> \<and> (\<langle>c\<^sub>2\<^sub>A, mds\<^sub>A_of mds\<^sub>C, mem\<^sub>A_of mem\<^sub>2\<^sub>C\<rangle>\<^sub>A, \<langle>c\<^sub>2\<^sub>C, mds\<^sub>C, mem\<^sub>2\<^sub>C\<rangle>\<^sub>C) \<in> \<R> \<and> (\<langle>c\<^sub>1\<^sub>A, mds\<^sub>A_of mds\<^sub>C, mem\<^sub>A_of mem\<^sub>1\<^sub>C\<rangle>\<^sub>A, \<langle>c\<^sub>2\<^sub>A, mds\<^sub>A_of mds\<^sub>C, mem\<^sub>A_of mem\<^sub>2\<^sub>C\<rangle>\<^sub>A) \<in> \<R>\<^sub>A) \<and> conc.low_mds_eq mds\<^sub>C mem\<^sub>1\<^sub>C mem\<^sub>2\<^sub>C \<and> (\<langle>c\<^sub>1\<^sub>C, mds\<^sub>C, mem\<^sub>1\<^sub>C\<rangle>\<^sub>C, \<langle>c\<^sub>2\<^sub>C, mds\<^sub>C, mem\<^sub>2\<^sub>C\<rangle>\<^sub>C) \<in> P) [PROOF STEP] using assms [PROOF STATE] proof (prove) using this: secure_refinement \<R>\<^sub>A \<R> P goal (1 subgoal): 1. ((\<langle>c\<^sub>1\<^sub>C, mds\<^sub>C, mem\<^sub>1\<^sub>C\<rangle>\<^sub>C, \<langle>c\<^sub>2\<^sub>C, mds\<^sub>C, mem\<^sub>2\<^sub>C\<rangle>\<^sub>C) \<in> R\<^sub>C_of \<R>\<^sub>A \<R> P) = ((\<exists>c\<^sub>1\<^sub>A c\<^sub>2\<^sub>A. (\<langle>c\<^sub>1\<^sub>A, mds\<^sub>A_of mds\<^sub>C, mem\<^sub>A_of mem\<^sub>1\<^sub>C\<rangle>\<^sub>A, \<langle>c\<^sub>1\<^sub>C, mds\<^sub>C, mem\<^sub>1\<^sub>C\<rangle>\<^sub>C) \<in> \<R> \<and> (\<langle>c\<^sub>2\<^sub>A, mds\<^sub>A_of mds\<^sub>C, mem\<^sub>A_of mem\<^sub>2\<^sub>C\<rangle>\<^sub>A, \<langle>c\<^sub>2\<^sub>C, mds\<^sub>C, mem\<^sub>2\<^sub>C\<rangle>\<^sub>C) \<in> \<R> \<and> (\<langle>c\<^sub>1\<^sub>A, mds\<^sub>A_of mds\<^sub>C, mem\<^sub>A_of mem\<^sub>1\<^sub>C\<rangle>\<^sub>A, \<langle>c\<^sub>2\<^sub>A, mds\<^sub>A_of mds\<^sub>C, mem\<^sub>A_of mem\<^sub>2\<^sub>C\<rangle>\<^sub>A) \<in> \<R>\<^sub>A) \<and> conc.low_mds_eq mds\<^sub>C mem\<^sub>1\<^sub>C mem\<^sub>2\<^sub>C \<and> (\<langle>c\<^sub>1\<^sub>C, mds\<^sub>C, mem\<^sub>1\<^sub>C\<rangle>\<^sub>C, \<langle>c\<^sub>2\<^sub>C, mds\<^sub>C, mem\<^sub>2\<^sub>C\<rangle>\<^sub>C) \<in> P) [PROOF STEP] by(blast dest: R\<^sub>C_ofD intro: R\<^sub>C_ofI)
Load LFindLoad. From lfind Require Import LFind. From QuickChick Require Import QuickChick. From adtind Require Import goal33. Derive Show for natural. Derive Arbitrary for natural. Instance Dec_Eq_natural : Dec_Eq natural. Proof. dec_eq. Qed. Lemma conj15synthconj2 : forall (lv0 : natural) (lv1 : natural) (lv2 : natural), (@eq natural (mult (plus (mult lv0 lv1) lv0) lv2) (plus (mult lv1 (mult lv0 lv2)) (mult lv0 lv2))). Admitted. QuickChick conj15synthconj2.
module Data.Bounded %default total public export interface Ord b => MinBound b where ||| The lower bound for the type minBound : b public export interface Ord b => MaxBound b where ||| The upper bound for the type maxBound : b public export %inline MinBound Bits8 where minBound = 0x0 public export %inline MaxBound Bits8 where maxBound = 0xff public export %inline MinBound Bits16 where minBound = 0x0 public export %inline MaxBound Bits16 where maxBound = 0xffff public export %inline MinBound Bits32 where minBound = 0x0 public export %inline MaxBound Bits32 where maxBound = 0xffffffff public export %inline MinBound Bits64 where minBound = 0x0 public export %inline MaxBound Bits64 where maxBound = 0xffffffffffffffff public export %inline MinBound Int8 where minBound = (- 0x80) public export %inline MaxBound Int8 where maxBound = 0x7f public export %inline MinBound Int16 where minBound = (- 0x8000) public export %inline MaxBound Int16 where maxBound = 0x7fff public export %inline MinBound Int32 where minBound = (- 0x80000000) public export %inline MaxBound Int32 where maxBound = 0x7fffffff public export %inline MinBound Int64 where minBound = (- 0x8000000000000000) public export %inline MaxBound Int64 where maxBound = 0x7fffffffffffffff public export %inline MinBound Int where minBound = (- 0x8000000000000000) public export %inline MaxBound Int where maxBound = 0x7fffffffffffffff
/- Copyright (c) 2017 Mario Carneiro. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Mario Carneiro Implementation of floating-point numbers (experimental). -/ import data.rat data.semiquot def int.shift2 (a b : ℕ) : ℤ → ℕ × ℕ | (int.of_nat e) := (a.shiftl e, b) | -[1+ e] := (a, b.shiftl e.succ) namespace fp inductive rmode | NE -- round to nearest even class float_cfg := (prec emax : ℕ) (prec_pos : prec > 0) (prec_max : prec ≤ emax) variable [C : float_cfg] include C def prec := C.prec def emax := C.emax def emin : ℤ := 1 - C.emax def valid_finite (e : ℤ) (m : ℕ) : Prop := emin ≤ e + prec - 1 ∧ e + prec - 1 ≤ emax ∧ e = max (e + m.size - prec) emin instance dec_valid_finite (e m) : decidable (valid_finite e m) := by unfold valid_finite; apply_instance inductive float | inf : bool → float | nan : float | finite : bool → Π e m, valid_finite e m → float def float.is_finite : float → bool | (float.finite s e m f) := tt | _ := ff def to_rat : Π (f : float), f.is_finite → ℚ | (float.finite s e m f) _ := let (n, d) := int.shift2 m 1 e, r := rat.mk_nat n d in if s then -r else r theorem float.zero.valid : valid_finite emin 0 := ⟨begin rw add_sub_assoc, apply le_add_of_nonneg_right, apply sub_nonneg_of_le, apply int.coe_nat_le_coe_nat_of_le, exact C.prec_pos end, by simpa [emin] using show (prec : ℤ) ≤ emax + float_cfg.emax, from le_trans (int.coe_nat_le.2 C.prec_max) (le_add_of_nonneg_left (int.coe_zero_le _)), by rw max_eq_right; simp⟩ def float.zero (s : bool) : float := float.finite s emin 0 float.zero.valid protected def float.sign' : float → semiquot bool | (float.inf s) := pure s | float.nan := ⊤ | (float.finite s e m f) := pure s protected def float.sign : float → bool | (float.inf s) := s | float.nan := ff | (float.finite s e m f) := s protected def float.is_zero : float → bool | (float.finite s e 0 f) := tt | _ := ff protected def float.neg : float → float | (float.inf s) := float.inf (bnot s) | float.nan := float.nan | (float.finite s e m f) := float.finite (bnot s) e m f def div_nat_lt_two_pow (n d : ℕ) : ℤ → bool | (int.of_nat e) := n < d.shiftl e | -[1+ e] := n.shiftl e.succ < d -- TODO(Mario): Prove these and drop 'meta' meta def of_pos_rat_dn (n : ℕ+) (d : ℕ+) : float × bool := begin let e₁ : ℤ := n.1.size - d.1.size - prec, cases h₁ : int.shift2 d.1 n.1 (e₁ + prec) with d₁ n₁, let e₂ := if n₁ < d₁ then e₁ - 1 else e₁, let e₃ := max e₂ emin, cases h₂ : int.shift2 d.1 n.1 (e₃ + prec) with d₂ n₂, let r := rat.mk_nat n₂ d₂, let m := r.floor, refine (float.finite ff e₃ (int.to_nat m) _, r.denom = 1), { exact undefined } end meta def next_up_pos (e m) (v : valid_finite e m) : float := let m' := m.succ in if ss : m'.size = m.size then float.finite ff e m' (by unfold valid_finite at *; rw ss; exact v) else if h : e = emax then float.inf ff else float.finite ff e.succ (nat.div2 m') undefined meta def next_dn_pos (e m) (v : valid_finite e m) : float := match m with | 0 := next_up_pos _ _ float.zero.valid | nat.succ m' := if ss : m'.size = m.size then float.finite ff e m' (by unfold valid_finite at *; rw ss; exact v) else if h : e = emin then float.finite ff emin m' undefined else float.finite ff e.pred (bit1 m') undefined end meta def next_up : float → float | (float.finite ff e m f) := next_up_pos e m f | (float.finite tt e m f) := float.neg $ next_dn_pos e m f | f := f meta def next_dn : float → float | (float.finite ff e m f) := next_dn_pos e m f | (float.finite tt e m f) := float.neg $ next_up_pos e m f | f := f meta def of_rat_up : ℚ → float | ⟨0, _, _, _⟩ := float.zero ff | ⟨nat.succ n, d, h, _⟩ := let (f, exact) := of_pos_rat_dn n.succ_pnat ⟨d, h⟩ in if exact then f else next_up f | ⟨-[1+n], d, h, _⟩ := float.neg (of_pos_rat_dn n.succ_pnat ⟨d, h⟩).1 meta def of_rat_dn (r : ℚ) : float := float.neg $ of_rat_up (-r) meta def of_rat : rmode → ℚ → float | rmode.NE r := let low := of_rat_dn r, high := of_rat_up r in if hf : high.is_finite then if r = to_rat _ hf then high else if lf : low.is_finite then if r - to_rat _ lf > to_rat _ hf - r then high else if r - to_rat _ lf < to_rat _ hf - r then low else match low, lf with float.finite s e m f, _ := if 2 ∣ m then low else high end else float.inf tt else float.inf ff namespace float instance : has_neg float := ⟨float.neg⟩ meta def add (mode : rmode) : float → float → float | nan _ := nan | _ nan := nan | (inf tt) (inf ff) := nan | (inf ff) (inf tt) := nan | (inf s₁) _ := inf s₁ | _ (inf s₂) := inf s₂ | (finite s₁ e₁ m₁ v₁) (finite s₂ e₂ m₂ v₂) := let f₁ := finite s₁ e₁ m₁ v₁, f₂ := finite s₂ e₂ m₂ v₂ in of_rat mode (to_rat f₁ rfl + to_rat f₂ rfl) meta instance : has_add float := ⟨float.add rmode.NE⟩ meta def sub (mode : rmode) (f1 f2 : float) : float := add mode f1 (-f2) meta instance : has_sub float := ⟨float.sub rmode.NE⟩ meta def mul (mode : rmode) : float → float → float | nan _ := nan | _ nan := nan | (inf s₁) f₂ := if f₂.is_zero then nan else inf (bxor s₁ f₂.sign) | f₁ (inf s₂) := if f₁.is_zero then nan else inf (bxor f₁.sign s₂) | (finite s₁ e₁ m₁ v₁) (finite s₂ e₂ m₂ v₂) := let f₁ := finite s₁ e₁ m₁ v₁, f₂ := finite s₂ e₂ m₂ v₂ in of_rat mode (to_rat f₁ rfl * to_rat f₂ rfl) meta def div (mode : rmode) : float → float → float | nan _ := nan | _ nan := nan | (inf s₁) (inf s₂) := nan | (inf s₁) f₂ := inf (bxor s₁ f₂.sign) | f₁ (inf s₂) := zero (bxor f₁.sign s₂) | (finite s₁ e₁ m₁ v₁) (finite s₂ e₂ m₂ v₂) := let f₁ := finite s₁ e₁ m₁ v₁, f₂ := finite s₂ e₂ m₂ v₂ in if f₂.is_zero then inf (bxor s₁ s₂) else of_rat mode (to_rat f₁ rfl / to_rat f₂ rfl) end float end fp
function[image, oriImSize] = e2s2_prepareImage(net, image, maxImageSize) % [image, oriImSize] = e2s2_prepareImage(net, image, maxImageSize) % % Resize the image and subtract the mean image. % % Copyright by Holger Caesar, 2015 % Resize image oriImSize = size(image); resizeFactor = maxImageSize / max(oriImSize(1:2)); targetSize = ceil(oriImSize(1:2) .* resizeFactor); % ceil corresponds to Matlab's imresize behavior image = imresize(image, resizeFactor); assert(size(image, 1) == targetSize(1) && size(image, 2) == targetSize(2)); if numel(net.meta.normalization.averageImage) == 3, % Subtract fixed number from each channel image(:, :, 1) = image(:, :, 1) - net.meta.normalization.averageImage(1); image(:, :, 2) = image(:, :, 2) - net.meta.normalization.averageImage(2); image(:, :, 3) = image(:, :, 3) - net.meta.normalization.averageImage(3); else % Resize averageImage and subtract it from each image % Note: This cannot be done on the gpu as Matlab's gpu-compatible % imresize function can only resize by a constant factor and the image % might not be square. averageImage = net.meta.normalization.averageImage ./ 255; averageImage = imresize(averageImage, targetSize); image = image - averageImage; end;
State Before: l : Type ?u.203967 m : Type ?u.203970 n : Type ?u.203973 o : Type u_1 p : Type ?u.203979 q : Type ?u.203982 m' : o → Type u_2 n' : o → Type u_3 p' : o → Type ?u.203997 R : Type ?u.204000 S : Type ?u.204003 α : Type u_4 β : Type ?u.204009 inst✝² : DecidableEq o inst✝¹ : Zero α inst✝ : Zero β ⊢ blockDiagonal' 0 = 0 State After: case a.h l : Type ?u.203967 m : Type ?u.203970 n : Type ?u.203973 o : Type u_1 p : Type ?u.203979 q : Type ?u.203982 m' : o → Type u_2 n' : o → Type u_3 p' : o → Type ?u.203997 R : Type ?u.204000 S : Type ?u.204003 α : Type u_4 β : Type ?u.204009 inst✝² : DecidableEq o inst✝¹ : Zero α inst✝ : Zero β i✝ : (i : o) × m' i x✝ : (i : o) × n' i ⊢ blockDiagonal' 0 i✝ x✝ = OfNat.ofNat 0 i✝ x✝ Tactic: ext State Before: case a.h l : Type ?u.203967 m : Type ?u.203970 n : Type ?u.203973 o : Type u_1 p : Type ?u.203979 q : Type ?u.203982 m' : o → Type u_2 n' : o → Type u_3 p' : o → Type ?u.203997 R : Type ?u.204000 S : Type ?u.204003 α : Type u_4 β : Type ?u.204009 inst✝² : DecidableEq o inst✝¹ : Zero α inst✝ : Zero β i✝ : (i : o) × m' i x✝ : (i : o) × n' i ⊢ blockDiagonal' 0 i✝ x✝ = OfNat.ofNat 0 i✝ x✝ State After: no goals Tactic: simp [blockDiagonal'_apply]
module Hedgehog import public Control.Monad.Either import public Control.Monad.Writer import public Hedgehog.Internal.Gen as Hedgehog import public Hedgehog.Internal.Property as Hedgehog import public Hedgehog.Internal.Range as Hedgehog import public Hedgehog.Internal.Runner as Hedgehog import public Hedgehog.Internal.Seed as Hedgehog import public Hedgehog.Internal.Shrink as Hedgehog import public Hedgehog.Internal.Util as Hedgehog
[STATEMENT] lemma (in flowgraph) ntr_mon_increasing_s: "(c,ee,c')\<in>ntr fg \<Longrightarrow> mon_c fg c \<subseteq> mon_c fg c'" [PROOF STATE] proof (prove) goal (1 subgoal): 1. (c, ee, c') \<in> ntr fg \<Longrightarrow> mon_c fg c \<subseteq> mon_c fg c' [PROOF STEP] by (erule gtrE) (auto dest: ntrs_mon_increasing_s simp add: mon_c_unconc)
Lois Maxwell as Miss Moneypenny : M 's secretary .
c c c moved to indom.f c c
(* ##################################################### ### PLEASE DO NOT DISTRIBUTE SOLUTIONS PUBLICLY ### ##################################################### *) Require Import Coq.Strings.Ascii. Require Import Coq.Lists.List. Import ListNotations. Open Scope char_scope. Definition rule := (ascii * list ascii) % type. Structure grammar := { grammar_vars : list ascii; grammar_terminals : list ascii; grammar_rules : list rule; grammar_start : ascii; }. Definition g1 := {| grammar_vars := ["C"]; grammar_terminals := ["{"; "}"]; grammar_start := "C"; grammar_rules := [ ("C", ["{"; "C"; "}"]); ("C", ["C"; "C"]); ("C", []) ]; |}. Inductive Yield (G:grammar) : list ascii -> list ascii -> Prop := | yield_def: forall u v w A w1 w2, In (A, w) (grammar_rules G) -> w1 = u ++ [A] ++ v -> w2 = u ++ w ++ v -> Yield G w1 w2. Inductive Derivation (G:grammar): list (list ascii) -> Prop := | derivation_nil: Derivation G [[grammar_start G]] | derivation_cons: forall u v ws, Derivation G (u :: ws) -> Yield G u v -> Derivation G (v :: u :: ws). Inductive Accept (G:grammar) : list ascii -> Prop := | accept_def: forall w ws, Derivation G (w::ws) -> Forall (fun c => List.In c (grammar_terminals G)) w -> Accept G w.
Require Import Coq.Lists.List. Set Implicit Arguments. (* Functional Dependent Types *) (******************************) (* In addition to defining types inductively, we can also define types * using definitions and fixpoints. A simple example is n-tuples. *) Section tuple. Variable T : Type. Fixpoint tuple (n : nat) : Type := match n with | 0 => unit | S n => T * tuple n end%type. Check @fst. Definition tuple_hd {a} : tuple (S a) -> T := @fst _ _. Print tuple_hd. Definition tuple_tl {a} : tuple (S a) -> tuple a := @snd _ _. Definition grabtype n:Type := match n with O => unit | S n => T end. Lemma lastL: forall (n: nat), tuple n -> grabtype n. Proof. induction n. - simpl; trivial. - simpl. destruct n. + intro H; destruct H; assumption. + simpl in IHn. intro. apply IHn. destruct X. destruct t0. split. exact t0. exact t1. Defined. Fixpoint lastF (n: nat): tuple n -> grabtype n:= match n as x return (tuple x -> grabtype x) with | O => fun t => t | S m => fun t (* tuple S m *) => (match m as n1 return ((tuple n1 -> grabtype n1) -> T * tuple n1 -> T) with | 0 => fun _ H => let (t, _) := H in t | S n1 => fun IHn0 X => IHn0 (let (_,t0) := X in let (t1,t2) := t0 in (t1,t2)) end) (lastF m) t end. Print lastL. Print lastF. Lemma last_eq: forall (n:nat)(t:tuple n), lastL n t = lastF n t. Proof. intros. reflexivity. Qed. Lemma last_eqf: lastL = lastF. Proof. intros. reflexivity. Qed. Definition lastOfNonempty (n:nat)(t:tuple (S n)):T := lastL (S n) t. Variable a b c: T. Definition f: tuple 1 := (a,tt). Definition g: tuple 2 := (b, f). Definition h: tuple 3 := (c, g). Eval compute in (lastOfNonempty h). End tuple.
lemma bounded_translation_minus: fixes S :: "'a::real_normed_vector set" shows "bounded S \<Longrightarrow> bounded ((\<lambda>x. x - a) ` S)"
(* Title: HOL/Auth/n_mutualEx_lemma_inv__3_on_rules.thy Author: Yongjian Li and Kaiqiang Duan, State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences Copyright 2016 State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences *) header{*The n_mutualEx Protocol Case Study*} theory n_mutualEx_lemma_inv__3_on_rules imports n_mutualEx_lemma_on_inv__3 begin section{*All lemmas on causal relation between inv__3*} lemma lemma_inv__3_on_rules: assumes b1: "r \<in> rules N" and b2: "(\<exists> p__Inv0 p__Inv1. p__Inv0\<le>N\<and>p__Inv1\<le>N\<and>p__Inv0~=p__Inv1\<and>f=inv__3 p__Inv0 p__Inv1)" shows "invHoldForRule s f r (invariants N)" proof - have c1: "(\<exists> i. i\<le>N\<and>r=n_Try i)\<or> (\<exists> i. i\<le>N\<and>r=n_Crit i)\<or> (\<exists> i. i\<le>N\<and>r=n_Exit i)\<or> (\<exists> i. i\<le>N\<and>r=n_Idle i)" apply (cut_tac b1, auto) done moreover { assume d1: "(\<exists> i. i\<le>N\<and>r=n_Try i)" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_TryVsinv__3) done } moreover { assume d1: "(\<exists> i. i\<le>N\<and>r=n_Crit i)" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_CritVsinv__3) done } moreover { assume d1: "(\<exists> i. i\<le>N\<and>r=n_Exit i)" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_ExitVsinv__3) done } moreover { assume d1: "(\<exists> i. i\<le>N\<and>r=n_Idle i)" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_IdleVsinv__3) done } ultimately show "invHoldForRule s f r (invariants N)" by satx qed end
""" Classifier evaluation within ARMORY Scenario Contributor: MITRE Corporation """ import numpy as np from PIL import ImageOps, Image try: from tensorflow import set_random_seed, ConfigProto, Session from tensorflow.keras.backend import set_session except ImportError: from tensorflow.compat.v1 import ( set_random_seed, ConfigProto, Session, disable_v2_behavior, ) from tensorflow.compat.v1.keras.backend import set_session disable_v2_behavior() from armory.scenarios.poison import Poison def gtsrb_scenario_preprocessing(batch): img_size = 48 img_out = [] quantization = 255.0 for im in batch: img_eq = ImageOps.equalize(Image.fromarray(im)) width, height = img_eq.size min_side = min(img_eq.size) center = width // 2, height // 2 left = center[0] - min_side // 2 top = center[1] - min_side // 2 right = center[0] + min_side // 2 bottom = center[1] + min_side // 2 img_eq = img_eq.crop((left, top, right, bottom)) img_eq = np.array(img_eq.resize([img_size, img_size])) / quantization img_out.append(img_eq) return np.array(img_out, dtype=np.float32) class GTSRB(Poison): def set_dataset_kwargs(self): super().set_dataset_kwargs() self.dataset_kwargs["preprocessing_fn"] = gtsrb_scenario_preprocessing def set_random_seed_tensorflow(self): # TODO: Handle automatically if not self.config["sysconfig"].get("use_gpu"): conf = ConfigProto(intra_op_parallelism_threads=1) set_session(Session(config=conf)) set_random_seed(self.seed) def set_random_seed(self): super().set_random_seed() self.set_random_seed_tensorflow()
Require Import Lists.List Lists.ListSet Arith. Definition D := nat. Definition D_eq_dec := eq_nat_dec. Inductive DFA : set D -> set D -> D -> set (D*D*D) -> set D -> Type := | aDFA : forall (Q:set D) (Sigma:set D) (q0:D) (delta:set (D*D*D)) (F:set D), set_In q0 Q -> (forall qi a qj, set_In (qi,a,qj) delta -> (set_In qi Q /\ set_In a Sigma /\ set_In qj Q)) -> (forall qi a qj qk, set_In (qi,a,qj) delta -> set_In (qi,a,qk) delta -> qj = qk) -> (forall qf, set_In qf F -> set_In qf Q) -> (forall qi s, set_In qi Q -> set_In s Sigma -> { qj | set_In (qi, s, qj) delta }) -> DFA Q Sigma q0 delta F. Hint Constructors DFA. Definition Binary := set_add D_eq_dec 0 (set_add D_eq_dec 1 (@empty_set D)). Inductive DFA_step Q Sigma q0 delta F (d:DFA Q Sigma q0 delta F) : D -> D -> D -> Prop := | aDFA_step : forall qi a qj, set_In (qi, a, qj) delta -> DFA_step Q Sigma q0 delta F d qi a qj. Hint Constructors DFA_step. Theorem DFA_step_fun: forall Q Sigma q0 delta F (d:DFA Q Sigma q0 delta F), forall qi a qj qk, DFA_step Q Sigma q0 delta F d qi a qj -> DFA_step Q Sigma q0 delta F d qi a qk -> qj = qk. Proof. intros. rename H into DSj. rename H0 into DSk. destruct DSj as [qi a qj INj]. destruct DSk as [qi a qk INk]. destruct d as [Q Sigma q0 delta F INq0 INdelta FUNdelta INF delta_dec]. eapply FUNdelta. apply INj. apply INk. Qed. Theorem DFA_step_dec: forall Q Sigma q0 delta F (d:DFA Q Sigma q0 delta F), forall qi a, set_In qi Q -> set_In a Sigma -> { qj | DFA_step Q Sigma q0 delta F d qi a qj /\ set_In qj Q }. Proof. intros. rename H into INqi. rename H0 into INa. destruct d as [Q Sigma q0 delta F INq0 INdelta FUNdelta INF delta_dec]. destruct (delta_dec qi a INqi INa) as [qj INqj]. exists qj. split; auto. apply INdelta in INqj. destruct INqj as [INqi2 INqj]; destruct INqj; auto. Qed. Inductive DFA_steps Q Sigma q0 delta F (d:DFA Q Sigma q0 delta F) : D -> list D -> D -> Prop := | aDFA_steps_mt : forall qi, DFA_steps Q Sigma q0 delta F d qi nil qi | aDFA_steps_cons : forall qi a qj aa qk, DFA_step Q Sigma q0 delta F d qi a qj -> DFA_steps Q Sigma q0 delta F d qj aa qk -> DFA_steps Q Sigma q0 delta F d qi (cons a aa) qk. Hint Constructors DFA_steps. Theorem DFA_steps_fun: forall Q Sigma q0 delta F (d:DFA Q Sigma q0 delta F), forall qi w qj qk, DFA_steps Q Sigma q0 delta F d qi w qj -> DFA_steps Q Sigma q0 delta F d qi w qk -> qj = qk. Proof. intros Q Sigma q0 delta F d. intros qi w. generalize w qi. clear w qi. induction w as [|a aa]; intros qi qj qk DS1 DS2. - inversion DS1. subst. inversion DS2. subst. auto. - inversion DS1. subst. inversion DS2. subst. rewrite (DFA_step_fun Q Sigma q0 delta F d qi a qj0 qj1 H2 H3) in *. apply (IHaa qj1); auto. Qed. Definition all_In (D:Set) (s:set D) (l:list D) := forall (a:D), In a l -> set_In a s. Hint Unfold all_In. Lemma all_In_eq : forall D Sigma a aa, all_In D Sigma (a :: aa) -> set_In a Sigma. Proof. intros D Sigma a aa AI. apply AI. apply in_eq. Qed. Hint Resolve all_In_eq. Lemma all_In_cons : forall D Sigma a aa, all_In D Sigma (a :: aa) -> all_In D Sigma aa. Proof. intros D Sigma a aa AI. intros x INx. apply AI. apply in_cons. auto. Qed. Hint Resolve all_In_cons. Theorem DFA_steps_dec: forall Q Sigma q0 delta F (d:DFA Q Sigma q0 delta F), forall qi aa, set_In qi Q -> all_In D Sigma aa -> { qj | DFA_steps Q Sigma q0 delta F d qi aa qj /\ set_In qj Q }. Proof. intros. generalize aa qi H H0. clear aa qi H H0. induction aa as [|a aa]; intros qi INqi INaa. exists qi. auto. destruct (DFA_step_dec Q Sigma q0 delta F d qi a) as [qj [DS INqj]]; auto. eauto. destruct d as [Q Sigma q0 delta F INq0 INdelta FUNdelta INF delta_dec]. destruct (IHaa qj INqj) as [qk [DSS INqk]]. eauto. exists qk. split; auto. econstructor. apply DS. apply DSS. Qed. Inductive DFA_member Q Sigma q0 delta F (d:DFA Q Sigma q0 delta F) : list D -> Prop := | aDFA_mem : forall w qa, set_In qa F -> DFA_steps Q Sigma q0 delta F d q0 w qa -> DFA_member Q Sigma q0 delta F d w. Hint Constructors DFA_member. Theorem DFA_member_dec: forall Q Sigma q0 delta F (d:DFA Q Sigma q0 delta F), forall w, all_In D Sigma w -> { DFA_member Q Sigma q0 delta F d w } + { ~ DFA_member Q Sigma q0 delta F d w }. Proof. intros. rename H into Aw. destruct (DFA_steps_dec Q Sigma q0 delta F d q0 w) as [qa [DSS INqj]]; auto. destruct d as [Q Sigma q0 delta F INq0 INdelta FUNdelta INF delta_dec]. auto. destruct (set_In_dec D_eq_dec qa F) as [INa | NINa]. left. econstructor. apply INa. auto. right. intros DM. destruct DM as [w qa2 NINa2 DSS2]. rewrite (DFA_steps_fun Q Sigma q0 delta F d q0 w qa qa2 DSS DSS2) in *. apply NINa. auto. Qed.
[GOAL] E : Type u_1 inst✝ : SeminormedGroup E ε δ : ℝ s t : Set E x y : E hs : Bounded s ht : Bounded t ⊢ Bounded (s * t) [PROOFSTEP] obtain ⟨Rs, hRs⟩ : ∃ R, ∀ x ∈ s, ‖x‖ ≤ R := hs.exists_norm_le' [GOAL] case intro E : Type u_1 inst✝ : SeminormedGroup E ε δ : ℝ s t : Set E x y : E hs : Bounded s ht : Bounded t Rs : ℝ hRs : ∀ (x : E), x ∈ s → ‖x‖ ≤ Rs ⊢ Bounded (s * t) [PROOFSTEP] obtain ⟨Rt, hRt⟩ : ∃ R, ∀ x ∈ t, ‖x‖ ≤ R := ht.exists_norm_le' [GOAL] case intro.intro E : Type u_1 inst✝ : SeminormedGroup E ε δ : ℝ s t : Set E x y : E hs : Bounded s ht : Bounded t Rs : ℝ hRs : ∀ (x : E), x ∈ s → ‖x‖ ≤ Rs Rt : ℝ hRt : ∀ (x : E), x ∈ t → ‖x‖ ≤ Rt ⊢ Bounded (s * t) [PROOFSTEP] refine' bounded_iff_forall_norm_le'.2 ⟨Rs + Rt, _⟩ [GOAL] case intro.intro E : Type u_1 inst✝ : SeminormedGroup E ε δ : ℝ s t : Set E x y : E hs : Bounded s ht : Bounded t Rs : ℝ hRs : ∀ (x : E), x ∈ s → ‖x‖ ≤ Rs Rt : ℝ hRt : ∀ (x : E), x ∈ t → ‖x‖ ≤ Rt ⊢ ∀ (x : E), x ∈ s * t → ‖x‖ ≤ Rs + Rt [PROOFSTEP] rintro z ⟨x, y, hx, hy, rfl⟩ [GOAL] case intro.intro.intro.intro.intro.intro E : Type u_1 inst✝ : SeminormedGroup E ε δ : ℝ s t : Set E x✝ y✝ : E hs : Bounded s ht : Bounded t Rs : ℝ hRs : ∀ (x : E), x ∈ s → ‖x‖ ≤ Rs Rt : ℝ hRt : ∀ (x : E), x ∈ t → ‖x‖ ≤ Rt x y : E hx : x ∈ s hy : y ∈ t ⊢ ‖(fun x x_1 => x * x_1) x y‖ ≤ Rs + Rt [PROOFSTEP] exact norm_mul_le_of_le (hRs x hx) (hRt y hy) [GOAL] E : Type u_1 inst✝ : SeminormedGroup E ε δ : ℝ s t : Set E x y : E ⊢ Bounded s → Bounded s⁻¹ [PROOFSTEP] simp_rw [bounded_iff_forall_norm_le', ← image_inv, ball_image_iff, norm_inv'] [GOAL] E : Type u_1 inst✝ : SeminormedGroup E ε δ : ℝ s t : Set E x y : E ⊢ (∃ C, ∀ (x : E), x ∈ s → ‖x‖ ≤ C) → ∃ C, ∀ (x : E), x ∈ s → ‖x‖ ≤ C [PROOFSTEP] exact id [GOAL] E : Type u_1 inst✝ : SeminormedCommGroup E ε δ : ℝ s✝ t : Set E x✝ y x : E s : Set E ⊢ infEdist x⁻¹ s⁻¹ = infEdist x s [PROOFSTEP] rw [← image_inv, infEdist_image isometry_inv] [GOAL] E : Type u_1 inst✝ : SeminormedCommGroup E ε δ : ℝ s✝ t : Set E x✝ y x : E s : Set E ⊢ infEdist x⁻¹ s = infEdist x s⁻¹ [PROOFSTEP] rw [← infEdist_inv_inv, inv_inv] [GOAL] E : Type u_1 inst✝ : SeminormedCommGroup E ε δ : ℝ s t : Set E x y : E ⊢ (thickening δ s)⁻¹ = thickening δ s⁻¹ [PROOFSTEP] simp_rw [thickening, ← infEdist_inv] [GOAL] E : Type u_1 inst✝ : SeminormedCommGroup E ε δ : ℝ s t : Set E x y : E ⊢ {x | EMetric.infEdist x s < ENNReal.ofReal δ}⁻¹ = {x | EMetric.infEdist x⁻¹ s < ENNReal.ofReal δ} [PROOFSTEP] rfl [GOAL] E : Type u_1 inst✝ : SeminormedCommGroup E ε δ : ℝ s t : Set E x y : E ⊢ (cthickening δ s)⁻¹ = cthickening δ s⁻¹ [PROOFSTEP] simp_rw [cthickening, ← infEdist_inv] [GOAL] E : Type u_1 inst✝ : SeminormedCommGroup E ε δ : ℝ s t : Set E x y : E ⊢ {x | EMetric.infEdist x s ≤ ENNReal.ofReal δ}⁻¹ = {x | EMetric.infEdist x⁻¹ s ≤ ENNReal.ofReal δ} [PROOFSTEP] rfl [GOAL] E : Type u_1 inst✝ : SeminormedCommGroup E ε δ : ℝ s t : Set E x y : E ⊢ {x} * ball y δ = ball (x * y) δ [PROOFSTEP] simp only [preimage_mul_ball, image_mul_left, singleton_mul, div_inv_eq_mul, mul_comm y x] [GOAL] E : Type u_1 inst✝ : SeminormedCommGroup E ε δ : ℝ s t : Set E x y : E ⊢ {x} / ball y δ = ball (x / y) δ [PROOFSTEP] simp_rw [div_eq_mul_inv, inv_ball, singleton_mul_ball] [GOAL] E : Type u_1 inst✝ : SeminormedCommGroup E ε δ : ℝ s t : Set E x y : E ⊢ ball x δ * {y} = ball (x * y) δ [PROOFSTEP] rw [mul_comm, singleton_mul_ball, mul_comm y] [GOAL] E : Type u_1 inst✝ : SeminormedCommGroup E ε δ : ℝ s t : Set E x y : E ⊢ ball x δ / {y} = ball (x / y) δ [PROOFSTEP] simp_rw [div_eq_mul_inv, inv_singleton, ball_mul_singleton] [GOAL] E : Type u_1 inst✝ : SeminormedCommGroup E ε δ : ℝ s t : Set E x y : E ⊢ {x} * ball 1 δ = ball x δ [PROOFSTEP] simp [GOAL] E : Type u_1 inst✝ : SeminormedCommGroup E ε δ : ℝ s t : Set E x y : E ⊢ {x} / ball 1 δ = ball x δ [PROOFSTEP] rw [singleton_div_ball, div_one] [GOAL] E : Type u_1 inst✝ : SeminormedCommGroup E ε δ : ℝ s t : Set E x y : E ⊢ ball 1 δ * {x} = ball x δ [PROOFSTEP] simp [ball_mul_singleton] [GOAL] E : Type u_1 inst✝ : SeminormedCommGroup E ε δ : ℝ s t : Set E x y : E ⊢ ball 1 δ / {x} = ball x⁻¹ δ [PROOFSTEP] rw [ball_div_singleton, one_div] [GOAL] E : Type u_1 inst✝ : SeminormedCommGroup E ε δ : ℝ s t : Set E x y : E ⊢ x • ball 1 δ = ball x δ [PROOFSTEP] rw [smul_ball, smul_eq_mul, mul_one] [GOAL] E : Type u_1 inst✝ : SeminormedCommGroup E ε δ : ℝ s t : Set E x y : E ⊢ {x} * closedBall y δ = closedBall (x * y) δ [PROOFSTEP] simp_rw [singleton_mul, ← smul_eq_mul, image_smul, smul_closedBall] [GOAL] E : Type u_1 inst✝ : SeminormedCommGroup E ε δ : ℝ s t : Set E x y : E ⊢ {x} / closedBall y δ = closedBall (x / y) δ [PROOFSTEP] simp_rw [div_eq_mul_inv, inv_closedBall, singleton_mul_closedBall] [GOAL] E : Type u_1 inst✝ : SeminormedCommGroup E ε δ : ℝ s t : Set E x y : E ⊢ closedBall x δ * {y} = closedBall (x * y) δ [PROOFSTEP] simp [mul_comm _ { y }, mul_comm y] [GOAL] E : Type u_1 inst✝ : SeminormedCommGroup E ε δ : ℝ s t : Set E x y : E ⊢ closedBall x δ / {y} = closedBall (x / y) δ [PROOFSTEP] simp [div_eq_mul_inv] [GOAL] E : Type u_1 inst✝ : SeminormedCommGroup E ε δ : ℝ s t : Set E x y : E ⊢ {x} * closedBall 1 δ = closedBall x δ [PROOFSTEP] simp [GOAL] E : Type u_1 inst✝ : SeminormedCommGroup E ε δ : ℝ s t : Set E x y : E ⊢ {x} / closedBall 1 δ = closedBall x δ [PROOFSTEP] rw [singleton_div_closedBall, div_one] [GOAL] E : Type u_1 inst✝ : SeminormedCommGroup E ε δ : ℝ s t : Set E x y : E ⊢ closedBall 1 δ * {x} = closedBall x δ [PROOFSTEP] simp [GOAL] E : Type u_1 inst✝ : SeminormedCommGroup E ε δ : ℝ s t : Set E x y : E ⊢ closedBall 1 δ / {x} = closedBall x⁻¹ δ [PROOFSTEP] simp [GOAL] E : Type u_1 inst✝ : SeminormedCommGroup E ε δ : ℝ s t : Set E x y : E ⊢ x • closedBall 1 δ = closedBall x δ [PROOFSTEP] simp [GOAL] E : Type u_1 inst✝ : SeminormedCommGroup E ε δ : ℝ s t : Set E x y : E ⊢ s * ball 1 δ = thickening δ s [PROOFSTEP] rw [thickening_eq_biUnion_ball] [GOAL] E : Type u_1 inst✝ : SeminormedCommGroup E ε δ : ℝ s t : Set E x y : E ⊢ s * ball 1 δ = ⋃ (x : E) (_ : x ∈ s), ball x δ [PROOFSTEP] convert iUnion₂_mul (fun x (_ : x ∈ s) => { x }) (ball (1 : E) δ) [GOAL] case h.e'_2.h.e'_5 E : Type u_1 inst✝ : SeminormedCommGroup E ε δ : ℝ s t : Set E x y : E ⊢ s = ⋃ (i : E) (_ : i ∈ s), {i} case h.e'_3.h.e'_3.h.f E : Type u_1 inst✝ : SeminormedCommGroup E ε δ : ℝ s t : Set E x y x✝¹ : E x✝ : x✝¹ ∈ s ⊢ ball x✝¹ δ = {x✝¹} * ball 1 δ [PROOFSTEP] exact s.biUnion_of_singleton.symm [GOAL] case h.e'_3.h.e'_3.h.f E : Type u_1 inst✝ : SeminormedCommGroup E ε δ : ℝ s t : Set E x y x✝¹ : E x✝ : x✝¹ ∈ s ⊢ ball x✝¹ δ = {x✝¹} * ball 1 δ [PROOFSTEP] ext x [GOAL] case h.e'_3.h.e'_3.h.f.h E : Type u_1 inst✝ : SeminormedCommGroup E ε δ : ℝ s t : Set E x✝² y x✝¹ : E x✝ : x✝¹ ∈ s x : E ⊢ x ∈ ball x✝¹ δ ↔ x ∈ {x✝¹} * ball 1 δ [PROOFSTEP] simp_rw [singleton_mul_ball, mul_one] [GOAL] E : Type u_1 inst✝ : SeminormedCommGroup E ε δ : ℝ s t : Set E x y : E ⊢ s / ball 1 δ = thickening δ s [PROOFSTEP] simp [div_eq_mul_inv, mul_ball_one] [GOAL] E : Type u_1 inst✝ : SeminormedCommGroup E ε δ : ℝ s t : Set E x y : E ⊢ ball 1 δ * s = thickening δ s [PROOFSTEP] rw [mul_comm, mul_ball_one] [GOAL] E : Type u_1 inst✝ : SeminormedCommGroup E ε δ : ℝ s t : Set E x y : E ⊢ ball 1 δ / s = thickening δ s⁻¹ [PROOFSTEP] simp [div_eq_mul_inv, ball_mul_one] [GOAL] E : Type u_1 inst✝ : SeminormedCommGroup E ε δ : ℝ s t : Set E x y : E ⊢ s * ball x δ = x • thickening δ s [PROOFSTEP] rw [← smul_ball_one, mul_smul_comm, mul_ball_one] [GOAL] E : Type u_1 inst✝ : SeminormedCommGroup E ε δ : ℝ s t : Set E x y : E ⊢ s / ball x δ = x⁻¹ • thickening δ s [PROOFSTEP] simp [div_eq_mul_inv] [GOAL] E : Type u_1 inst✝ : SeminormedCommGroup E ε δ : ℝ s t : Set E x y : E ⊢ ball x δ * s = x • thickening δ s [PROOFSTEP] rw [mul_comm, mul_ball] [GOAL] E : Type u_1 inst✝ : SeminormedCommGroup E ε δ : ℝ s t : Set E x y : E ⊢ ball x δ / s = x • thickening δ s⁻¹ [PROOFSTEP] simp [div_eq_mul_inv] [GOAL] E : Type u_1 inst✝ : SeminormedCommGroup E ε δ : ℝ s t : Set E x y : E hs : IsCompact s hδ : 0 ≤ δ ⊢ s * closedBall 1 δ = cthickening δ s [PROOFSTEP] rw [hs.cthickening_eq_biUnion_closedBall hδ] [GOAL] E : Type u_1 inst✝ : SeminormedCommGroup E ε δ : ℝ s t : Set E x y : E hs : IsCompact s hδ : 0 ≤ δ ⊢ s * closedBall 1 δ = ⋃ (x : E) (_ : x ∈ s), closedBall x δ [PROOFSTEP] ext x [GOAL] case h E : Type u_1 inst✝ : SeminormedCommGroup E ε δ : ℝ s t : Set E x✝ y : E hs : IsCompact s hδ : 0 ≤ δ x : E ⊢ x ∈ s * closedBall 1 δ ↔ x ∈ ⋃ (x : E) (_ : x ∈ s), closedBall x δ [PROOFSTEP] simp only [mem_mul, dist_eq_norm_div, exists_prop, mem_iUnion, mem_closedBall, exists_and_left, mem_closedBall_one_iff, ← eq_div_iff_mul_eq'', div_one, exists_eq_right] [GOAL] E : Type u_1 inst✝ : SeminormedCommGroup E ε δ : ℝ s t : Set E x y : E hs : IsCompact s hδ : 0 ≤ δ ⊢ s / closedBall 1 δ = cthickening δ s [PROOFSTEP] simp [div_eq_mul_inv, hs.mul_closedBall_one hδ] [GOAL] E : Type u_1 inst✝ : SeminormedCommGroup E ε δ : ℝ s t : Set E x y : E hs : IsCompact s hδ : 0 ≤ δ ⊢ closedBall 1 δ * s = cthickening δ s [PROOFSTEP] rw [mul_comm, hs.mul_closedBall_one hδ] [GOAL] E : Type u_1 inst✝ : SeminormedCommGroup E ε δ : ℝ s t : Set E x y : E hs : IsCompact s hδ : 0 ≤ δ ⊢ closedBall 1 δ / s = cthickening δ s⁻¹ [PROOFSTEP] simp [div_eq_mul_inv, mul_comm, hs.inv.mul_closedBall_one hδ] [GOAL] E : Type u_1 inst✝ : SeminormedCommGroup E ε δ : ℝ s t : Set E x✝ y : E hs : IsCompact s hδ : 0 ≤ δ x : E ⊢ s * closedBall x δ = x • cthickening δ s [PROOFSTEP] rw [← smul_closedBall_one, mul_smul_comm, hs.mul_closedBall_one hδ] [GOAL] E : Type u_1 inst✝ : SeminormedCommGroup E ε δ : ℝ s t : Set E x✝ y : E hs : IsCompact s hδ : 0 ≤ δ x : E ⊢ s / closedBall x δ = x⁻¹ • cthickening δ s [PROOFSTEP] simp [div_eq_mul_inv, mul_comm, hs.mul_closedBall hδ] [GOAL] E : Type u_1 inst✝ : SeminormedCommGroup E ε δ : ℝ s t : Set E x✝ y : E hs : IsCompact s hδ : 0 ≤ δ x : E ⊢ closedBall x δ * s = x • cthickening δ s [PROOFSTEP] rw [mul_comm, hs.mul_closedBall hδ] [GOAL] E : Type u_1 inst✝ : SeminormedCommGroup E ε δ : ℝ s t : Set E x✝ y : E hs : IsCompact s hδ : 0 ≤ δ x : E ⊢ closedBall x δ * s = x • cthickening δ s [PROOFSTEP] simp [div_eq_mul_inv, hs.closedBall_mul hδ]
(*<*) theory ex1_9 imports Main begin (*>*) text {* Read the chapter about total recursive functions in the ``Tutorial on Isabelle/HOL'' (@{text fun}, Chapter 3.5). *} text {* In this exercise you will define a function @{text Zip} that merges two lists by interleaving. Examples: @{text "Zip [a1, a2, a3] [b1, b2, b3] = [a1, b1, a2, b2, a3, b3]"} and @{text "Zip [a1] [b1, b2, b3] = [a1, b1, b2, b3]"}. Use three different approaches to define @{text Zip}: \begin{enumerate} \item by primitive recursion on the first list, \item by primitive recursion on the second list, \item by total recursion (using @{text fun}). \end{enumerate} *} primrec zip1 :: "'a list \<Rightarrow> 'a list \<Rightarrow> 'a list" where "zip1 [] x =x" |"zip1 (x#xs) ys = (case ys of [] \<Rightarrow> x#xs | y#ys \<Rightarrow> (x#y#(zip1 xs ys)))" primrec zip2 :: "'a list \<Rightarrow> 'a list \<Rightarrow> 'a list" where " zip2 x [] = x" | "zip2 x (y#ys) = (case x of [] \<Rightarrow> y#ys|x#xs \<Rightarrow>(x#y#(zip2 xs ys)))" fun zipr :: "'a list \<Rightarrow> 'a list \<Rightarrow> 'a list" where "zipr [] ys = ys" | "zipr xs [] = xs" | "zipr (x#xs) (y#ys) = x # y # zipr xs ys" text {* Show that all three versions of @{text Zip} are equivalent. *} lemma "zip1 x y = zip2 x y" apply(induct x arbitrary :y) apply (case_tac y) apply auto apply(case_tac y) apply auto done lemma "zip1 x y = zipr x y" apply (induct x arbitrary:y) apply (case_tac y) apply auto apply (case_tac y) apply auto done lemma "zip2 x y = zipr x y" apply (induct x arbitrary:y) apply (case_tac y) apply auto apply (case_tac y) apply auto done text {* Show that @{text zipr} distributes over @{text append}. *} lemma "\<lbrakk>length p = length u; length q = length v\<rbrakk> \<Longrightarrow> zipr (p@q) (u@v) = zipr p u @ zipr q v" apply(induct p arbitrary:q u v) apply auto apply(case_tac u) apply auto done text {* {\bf Note:} For @{text fun}, the order of your equations is relevant. If equations overlap, they will be disambiguated before they are added to the logic. You can have a look at these equations using @{text "thm zipr.simps"}. *} (*<*) end (*>*)
= = = Tropical Storm Six = = =
// Copyright (c) 2007-2013 Hartmut Kaiser // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #if !defined(HPX_LCOS_BARRIER_JUN_23_2008_0530PM) #define HPX_LCOS_BARRIER_JUN_23_2008_0530PM #include <hpx/lcos/local/spinlock.hpp> #include <hpx/util/scoped_unlock.hpp> #include <hpx/util/stringstream.hpp> #include <hpx/runtime/threads/thread_helpers.hpp> #include <boost/intrusive/slist.hpp> /////////////////////////////////////////////////////////////////////////////// namespace hpx { namespace lcos { namespace local { /// A barrier can be used to synchronize a specific number of threads, /// blocking all of the entering threads until all of the threads have /// entered the barrier. /// /// \note A \a barrier is not a LCO in the sense that it has no global id /// and it can't be triggered using the action (parcel) mechanism. /// It is just a low level synchronization primitive allowing to /// synchronize a given number of \a threads. class barrier { private: typedef lcos::local::spinlock mutex_type; // define data structures needed for intrusive slist container used for // the queues struct barrier_queue_entry { typedef boost::intrusive::slist_member_hook< boost::intrusive::link_mode<boost::intrusive::normal_link> > hook_type; barrier_queue_entry(threads::thread_id_type id) : id_(id) {} threads::thread_id_type id_; hook_type slist_hook_; }; typedef boost::intrusive::member_hook< barrier_queue_entry, barrier_queue_entry::hook_type, &barrier_queue_entry::slist_hook_ > slist_option_type; typedef boost::intrusive::slist< barrier_queue_entry, slist_option_type, boost::intrusive::cache_last<true>, boost::intrusive::constant_time_size<false> > queue_type; struct reset_queue_entry { reset_queue_entry(barrier_queue_entry& e, queue_type& q) : e_(e), q_(q), last_(q.last()) {} ~reset_queue_entry() { if (e_.id_) q_.erase(last_); // remove entry from queue } barrier_queue_entry& e_; queue_type& q_; queue_type::const_iterator last_; }; public: barrier(std::size_t number_of_threads) : number_of_threads_(number_of_threads) {} ~barrier() { if (!queue_.empty()) { LERR_(fatal) << "~barrier: thread_queue is not empty, aborting threads"; mutex_type::scoped_lock l(mtx_); while (!queue_.empty()) { threads::thread_id_type id = queue_.front().id_; queue_.front().id_ = 0; queue_.pop_front(); // we know that the id is actually the pointer to the thread threads::thread_data_base* thrd = static_cast<threads::thread_data_base*>(id); LERR_(fatal) << "~barrier: pending thread: " << get_thread_state_name(thrd->get_state()) << "(" << id << "): " << thrd->get_description(); // forcefully abort thread, do not throw error_code ec(lightweight); threads::set_thread_state(id, threads::pending, threads::wait_abort, threads::thread_priority_default, ec); if (ec) { LERR_(fatal) << "~barrier: could not abort thread" << get_thread_state_name(thrd->get_state()) << "(" << id << "): " << thrd->get_description(); } } } } /// The function \a wait will block the number of entering \a threads /// (as given by the constructor parameter \a number_of_threads), /// releasing all waiting threads as soon as the last \a thread /// entered this function. void wait() { threads::thread_self& self = threads::get_self(); mutex_type::scoped_lock l(mtx_); if (queue_.size() < number_of_threads_-1) { barrier_queue_entry e(self.get_thread_id()); queue_.push_back(e); reset_queue_entry r(e, queue_); { util::scoped_unlock<mutex_type::scoped_lock> ul(l); this_thread::suspend(threads::suspended, "barrier::wait"); } } else { // swap the list queue_type queue; queue.swap(queue_); l.unlock(); // release the threads while (!queue.empty()) { threads::thread_id_type id = queue.front().id_; if (HPX_UNLIKELY(!id)) { HPX_THROW_EXCEPTION(null_thread_id, "barrier::wait", "NULL thread id encountered"); } queue.front().id_ = 0; queue.pop_front(); threads::set_thread_lco_description(id); threads::set_thread_state(id, threads::pending); } } } private: std::size_t const number_of_threads_; mutable mutex_type mtx_; queue_type queue_; }; }}} #endif
/- Copyright (c) 2018 Chris Hughes. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Chris Hughes -/ import Mathlib.PrePort import Mathlib.Lean3Lib.init.default import Mathlib.algebra.big_operators.basic import Mathlib.PostPort namespace Mathlib namespace nat /-- Euler's totient function. This counts the number of positive integers less than `n` which are coprime with `n`. -/ def totient (n : ℕ) : ℕ := finset.card (finset.filter (coprime n) (finset.range n)) @[simp] theorem totient_zero : totient 0 = 0 := rfl theorem totient_le (n : ℕ) : totient n ≤ n := trans_rel_left LessEq (finset.card_le_of_subset (finset.filter_subset (coprime n) (finset.range n))) (finset.card_range n) theorem totient_pos {n : ℕ} : 0 < n → 0 < totient n := sorry theorem sum_totient (n : ℕ) : (finset.sum (finset.filter (fun (_x : ℕ) => _x ∣ n) (finset.range (Nat.succ n))) fun (m : ℕ) => totient m) = n := sorry
(* Copyright (C) 2017 M.A.L. Marques This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/. *) (* type: gga_exc *) $include "attenuation.mpl" $define gga_x_b88_params $include "gga_x_b88.mpl" (* Eq. 5 - Note that their K_s = 2*X_FACTOR_C*b88_f Note that there is a misprint sqrt(mu) -> sqrt(pi) *) lcgau_arg := (rs, z, xs) -> sqrt(2*X_FACTOR_C*b88_f(xs)/Pi)/(6*n_spin(rs, z)^(1/3)): (* Eq. 4 + Eq. 8 *) lcgau_f := (rs, z, xs) -> b88_f(xs) * ( + attenuation_erf(p_a_hyb_omega_0_*lcgau_arg(rs, z, xs)) + p_a_hyb_coeff_2_*attenuation_gau(p_a_hyb_omega_2_*lcgau_arg(rs, z, xs)) + p_a_hyb_coeff_3_*attenuation_gau(p_a_hyb_omega_3_*lcgau_arg(rs, z, xs)) ): f := (rs, zeta, xt, xs0, xs1) -> gga_exchange_nsp(lcgau_f, rs, zeta, xs0, xs1):
From Test Require Import tactic. Section FOFProblem. Variable Universe : Set. Variable UniverseElement : Universe. Variable wd_ : Universe -> Universe -> Prop. Variable col_ : Universe -> Universe -> Universe -> Prop. Variable col_swap1_1 : (forall A B C : Universe, (col_ A B C -> col_ B A C)). Variable col_swap2_2 : (forall A B C : Universe, (col_ A B C -> col_ B C A)). Variable col_triv_3 : (forall A B : Universe, col_ A B B). Variable wd_swap_4 : (forall A B : Universe, (wd_ A B -> wd_ B A)). Variable col_trans_5 : (forall P Q A B C : Universe, ((wd_ P Q /\ (col_ P Q A /\ (col_ P Q B /\ col_ P Q C))) -> col_ A B C)). Theorem pipo_6 : (forall O E Eprime U A B C : Universe, ((wd_ U O /\ (wd_ U E /\ (wd_ O E /\ (wd_ O Eprime /\ (wd_ E Eprime /\ (wd_ O U /\ (wd_ O Eprime /\ (wd_ U Eprime /\ (wd_ A O /\ (wd_ B O /\ (col_ O E U /\ (col_ O E A /\ (col_ O E B /\ col_ O E C))))))))))))) -> col_ O U C)). Proof. time tac. Qed. End FOFProblem.
header "Stack Machine and Compilation" theory ASM imports AExp begin subsection "Stack Machine" text_raw{*\snip{ASMinstrdef}{0}{1}{% *} datatype instr = LOADI val | LOAD vname | ADD text_raw{*}%endsnip*} text_raw{*\snip{ASMstackdef}{1}{2}{% *} type_synonym stack = "val list" text_raw{*}%endsnip*} abbreviation "hd2 xs == hd(tl xs)" abbreviation "tl2 xs == tl(tl xs)" text{* \noindent Abbreviations are transparent: they are unfolded after parsing and folded back again before printing. Internally, they do not exist.*} text_raw{*\snip{ASMexeconedef}{0}{1}{% *} fun exec1 :: "instr \<Rightarrow> state \<Rightarrow> stack \<Rightarrow> stack" where "exec1 (LOADI n) _ stk = n # stk" | "exec1 (LOAD x) s stk = s(x) # stk" | "exec1 ADD _ stk = (hd2 stk + hd stk) # tl2 stk" text_raw{*}%endsnip*} text_raw{*\snip{ASMexecdef}{1}{2}{% *} fun exec :: "instr list \<Rightarrow> state \<Rightarrow> stack \<Rightarrow> stack" where "exec [] _ stk = stk" | "exec (i#is) s stk = exec is s (exec1 i s stk)" text_raw{*}%endsnip*} value "exec [LOADI 5, LOAD ''y'', ADD] <''x'' := 42, ''y'' := 43> [50]" lemma exec_append[simp]: "exec (is1@is2) s stk = exec is2 s (exec is1 s stk)" apply(induction is1 arbitrary: stk) apply (auto) done subsection "Compilation" text_raw{*\snip{ASMcompdef}{0}{2}{% *} fun comp :: "aexp \<Rightarrow> instr list" where "comp (N n) = [LOADI n]" | "comp (V x) = [LOAD x]" | "comp (Plus e\<^sub>1 e\<^sub>2) = comp e\<^sub>1 @ comp e\<^sub>2 @ [ADD]" text_raw{*}%endsnip*} value "comp (Plus (Plus (V ''x'') (N 1)) (V ''z''))" theorem exec_comp: "exec (comp a) s stk = aval a s # stk" apply(induction a arbitrary: stk) apply (auto) done end
Latest Simple House - for lots, domestic design is an extraordinarily daunting approach. understanding in which to start, now not to say deciding upon a few factor unique or a theme for the complete condominium, is anyone many of us go away to experts. however, at we have made matters easy; our rooms website permits you to view the most famous photos of the day. It moreover acts as a description for the to be had classes for home ideas and domestic decor or even where gurus within the usa are placed. Are your needs very specific? Do you easiest need thoughts for unique rooms? wisely, that’s ok.
" He was beyond the age of fifty , he was more than fifty , and standing upright he measured about eight feet . His face had a golden tan , arched brows , a pair of bright eyes , a regular head form , a square mouth , a pair of protruding ears , and under his chin there were three locks of beard , a grizzled beard . On his head he wore a sky @-@ blue satin scarf , and he was dressed in a stately sky @-@ blue satin coat with a silken girdle , a pair of wide black trousers without crotch and satin boots with thin soles " .
SCC Symphony – Page 3 – A community-based endeavor of professionals and amateurs joining together in a light-hearted orchestra. BIO-hotels praise first film award from Nassereith/Munich, Germany, April 24, 2012. Wedding in green? From frequent flyer to the cyclists? Finally found the dream job at the coolest eco-fashion label? Or bio-revolution in the parental company? Under the slogan it’s different!”BIO-hotels 2012 write a film competition. “Participants of the action bio.clip. 012” are called upon, as actor, Director, observer, commentator and filmmaker the many facets of otherness “to represent the growing organic scene. BIO-hotels wish you movies that show sustainable thought, action and life of entrepreneurs and private individuals. Total prizes to the value of 15,000 euros. The action ends on October 31, 2012 with the film festival of the BIO-hotels”, where the three most exciting, weirdest and most unconventional stories will be awarded. Agriculture, energy, holidays, mobility or living – the clips should inspire and show that new and sustainable ways in all walks of life and sectors are possible. To make with, is actually nothing more than a cell phone – or hobby camera needed. The submitted films must even filmed, and must be no longer than three minutes. Applications are possible in all categories and genres: whether as a children’s film, music video, short film, animation or documentary. Each participant may submit up to three rolling clips by E-Mail or post at the BIO-hotels. Win Hotel vouchers and cash worth 15,000 euros the film price is interesting both for hobby, junior and professional filmmakers, because in addition to hotel cheques for a holiday in one of the over 75 houses of BIO-hotels, the Club sponsors also cash. A hotel cheque for 5,000 euros, as well as an additional 5,000 euros await the first winners in cash.
\name{dataImportClean} \alias{dataImportClean} \title{Data importation and refining} \description{The function will load all the data BED files from a local directory, retrieve the file names and save them to a list "ChIPSeqSamples", and transform the BED files to GRanges objects and save them to a GRangesList "samplesInBED". Finally, these objects are saved as RDS files to the current working directory.} \usage{dataImportClean(loc)} \arguments{\item{loc}{ the directory path to the data files}} \value{\item{samplesInBED.rds}{A GRanges List object of all GRanges data files} \item{ChIPSeqSamples.rds}{List of sample names as data files}} \note{ The function does not return any values, rather saves the aforementioned objects as files.} \author{Shaurya Jauhari} \examples{dataImportClean(loc)}
theory CoCallAnalysisBinds imports CoCallAnalysisSig AEnv "AList-Utils-HOLCF" "Arity-Nominal" "CoCallGraph-Nominal" begin context CoCallAnalysis begin definition ccBind :: "var \<Rightarrow> exp \<Rightarrow> ((AEnv \<times> CoCalls) \<rightarrow> CoCalls)" where "ccBind v e = (\<Lambda> (ae, G). if (v--v\<notin>G) \<or> \<not> isVal e then cc_restr (fv e) (fup\<cdot>(ccExp e)\<cdot>(ae v)) else ccSquare (fv e))" (* paper has: \<or> ae v = up\<cdot>0, but that is not monotone! But should give the same result. *) lemma ccBind_eq: "ccBind v e\<cdot>(ae, G) = (if v--v\<notin>G \<or> \<not> isVal e then \<G>\<^sup>\<bottom>\<^bsub>ae v\<^esub> e G|` fv e else (fv e)\<^sup>2)" unfolding ccBind_def apply (rule cfun_beta_Pair) apply (rule cont_if_else_above) apply simp apply simp apply (auto dest: subsetD[OF ccField_cc_restr])[1] (* Abstraction broken! Fix this. *) apply (case_tac p, auto, transfer, auto)[1] apply (rule adm_subst[OF cont_snd]) apply (rule admI, thin_tac "chain _", transfer, auto) done lemma ccBind_strict[simp]: "ccBind v e \<cdot> \<bottom> = \<bottom>" by (auto simp add: inst_prod_pcpo ccBind_eq simp del: Pair_strict) lemma ccField_ccBind: "ccField (ccBind v e\<cdot>(ae,G)) \<subseteq> fv e" by (auto simp add: ccBind_eq dest: subsetD[OF ccField_cc_restr]) definition ccBinds :: "heap \<Rightarrow> ((AEnv \<times> CoCalls) \<rightarrow> CoCalls)" where "ccBinds \<Gamma> = (\<Lambda> i. (\<Squnion>v\<mapsto>e\<in>map_of \<Gamma>. ccBind v e\<cdot>i))" lemma ccBinds_eq: "ccBinds \<Gamma>\<cdot>i = (\<Squnion>v\<mapsto>e\<in>map_of \<Gamma>. ccBind v e\<cdot>i)" unfolding ccBinds_def by simp lemma ccBinds_strict[simp]: "ccBinds \<Gamma>\<cdot>\<bottom>=\<bottom>" unfolding ccBinds_eq by (cases "\<Gamma> = []") simp_all lemma ccBinds_strict'[simp]: "ccBinds \<Gamma>\<cdot>(\<bottom>,\<bottom>)=\<bottom>" by (metis CoCallAnalysis.ccBinds_strict Pair_bottom_iff) lemma ccBinds_reorder1: assumes "map_of \<Gamma> v = Some e" shows "ccBinds \<Gamma> = ccBind v e \<squnion> ccBinds (delete v \<Gamma>)" proof- from assms have "map_of \<Gamma> = map_of ((v,e) # delete v \<Gamma>)" by (metis map_of_delete_insert) thus ?thesis by (auto intro: cfun_eqI simp add: ccBinds_eq delete_set_none) qed lemma ccBinds_Nil[simp]: "ccBinds [] = \<bottom>" unfolding ccBinds_def by simp lemma ccBinds_Cons[simp]: "ccBinds ((x,e)#\<Gamma>) = ccBind x e \<squnion> ccBinds (delete x \<Gamma>)" by (subst ccBinds_reorder1[where v = x and e = e]) auto lemma ccBind_below_ccBinds: "map_of \<Gamma> x = Some e \<Longrightarrow> ccBind x e\<cdot>ae \<sqsubseteq> (ccBinds \<Gamma>\<cdot>ae)" by (auto simp add: ccBinds_eq) lemma ccField_ccBinds: "ccField (ccBinds \<Gamma>\<cdot>(ae,G)) \<subseteq> fv \<Gamma>" by (auto simp add: ccBinds_eq dest: subsetD[OF ccField_ccBind] intro: subsetD[OF map_of_Some_fv_subset]) definition ccBindsExtra :: "heap \<Rightarrow> ((AEnv \<times> CoCalls) \<rightarrow> CoCalls)" where "ccBindsExtra \<Gamma> = (\<Lambda> i. snd i \<squnion> ccBinds \<Gamma> \<cdot> i \<squnion> (\<Squnion>x\<mapsto>e\<in>map_of \<Gamma>. ccProd (fv e) (ccNeighbors x (snd i))))" lemma ccBindsExtra_simp: "ccBindsExtra \<Gamma> \<cdot> i =snd i \<squnion> ccBinds \<Gamma> \<cdot> i \<squnion> (\<Squnion>x\<mapsto>e\<in>map_of \<Gamma>. ccProd (fv e) (ccNeighbors x (snd i)))" unfolding ccBindsExtra_def by simp lemma ccBindsExtra_eq: "ccBindsExtra \<Gamma>\<cdot>(ae,G) = G \<squnion> ccBinds \<Gamma>\<cdot>(ae,G) \<squnion> (\<Squnion>x\<mapsto>e\<in>map_of \<Gamma>. fv e G\<times> ccNeighbors x G)" unfolding ccBindsExtra_def by simp lemma ccBindsExtra_strict[simp]: "ccBindsExtra \<Gamma> \<cdot> \<bottom> = \<bottom>" by (auto simp add: ccBindsExtra_simp inst_prod_pcpo simp del: Pair_strict) lemma ccField_ccBindsExtra: "ccField (ccBindsExtra \<Gamma>\<cdot>(ae,G)) \<subseteq> fv \<Gamma> \<union> ccField G" by (auto simp add: ccBindsExtra_simp elem_to_ccField dest!: subsetD[OF ccField_ccBinds] subsetD[OF ccField_ccProd_subset] map_of_Some_fv_subset) end lemma ccBind_eqvt[eqvt]: "\<pi> \<bullet> (CoCallAnalysis.ccBind cccExp x e) = CoCallAnalysis.ccBind (\<pi> \<bullet> cccExp) (\<pi> \<bullet> x) (\<pi> \<bullet> e)" proof- { fix \<pi> ae G have "\<pi> \<bullet> ((CoCallAnalysis.ccBind cccExp x e) \<cdot> (ae,G)) = CoCallAnalysis.ccBind (\<pi> \<bullet> cccExp) (\<pi> \<bullet> x) (\<pi> \<bullet> e) \<cdot> (\<pi> \<bullet> ae, \<pi> \<bullet> G)" unfolding CoCallAnalysis.ccBind_eq by perm_simp (simp add: Abs_cfun_eqvt) } thus ?thesis by (auto intro: cfun_eqvtI) qed lemma ccBinds_eqvt[eqvt]: "\<pi> \<bullet> (CoCallAnalysis.ccBinds cccExp \<Gamma>) = CoCallAnalysis.ccBinds (\<pi> \<bullet> cccExp) (\<pi> \<bullet> \<Gamma>)" apply (rule cfun_eqvtI) unfolding CoCallAnalysis.ccBinds_eq apply (perm_simp) apply rule done lemma ccBindsExtra_eqvt[eqvt]: "\<pi> \<bullet> (CoCallAnalysis.ccBindsExtra cccExp \<Gamma>) = CoCallAnalysis.ccBindsExtra (\<pi> \<bullet> cccExp) (\<pi> \<bullet> \<Gamma>)" by (rule cfun_eqvtI) (simp add: CoCallAnalysis.ccBindsExtra_def) lemma ccBind_cong[fundef_cong]: "cccexp1 e = cccexp2 e \<Longrightarrow> CoCallAnalysis.ccBind cccexp1 x e = CoCallAnalysis.ccBind cccexp2 x e " apply (rule cfun_eqI) apply (case_tac xa) apply (auto simp add: CoCallAnalysis.ccBind_eq) done lemma ccBinds_cong[fundef_cong]: "\<lbrakk> (\<And> e. e \<in> snd ` set heap2 \<Longrightarrow> cccexp1 e = cccexp2 e); heap1 = heap2 \<rbrakk> \<Longrightarrow> CoCallAnalysis.ccBinds cccexp1 heap1 = CoCallAnalysis.ccBinds cccexp2 heap2" apply (rule cfun_eqI) unfolding CoCallAnalysis.ccBinds_eq apply (rule arg_cong[OF mapCollect_cong]) apply (rule arg_cong[OF ccBind_cong]) apply auto by (metis imageI map_of_SomeD snd_conv) lemma ccBindsExtra_cong[fundef_cong]: "\<lbrakk> (\<And> e. e \<in> snd ` set heap2 \<Longrightarrow> cccexp1 e = cccexp2 e); heap1 = heap2 \<rbrakk> \<Longrightarrow> CoCallAnalysis.ccBindsExtra cccexp1 heap1 = CoCallAnalysis.ccBindsExtra cccexp2 heap2" apply (rule cfun_eqI) unfolding CoCallAnalysis.ccBindsExtra_simp apply (rule arg_cong2[OF ccBinds_cong mapCollect_cong]) apply simp+ done end
close all; clear all; clc; rng('default'); % Create the directory for storing images [status_code,message,message_id] = mkdir('bin'); % Signal space N = 1000; % Number of measurements M = 200; % Sparsity levels Ks = 4:120; % Number of dictionaries to be created num_dict_trials = 100; % Number of signals to be created for each dictionary num_signal_trials = 20; % Number of trials for each K num_trials = num_dict_trials * num_signal_trials; omp_success_rates_with_k = zeros(numel(Ks), 1); omp_average_iterations_with_k = zeros(numel(Ks), 1); omp_maximum_iterations_with_k = zeros(numel(Ks), 1); Ls = [2, 4, 6, 8] num_ls = numel(Ls) gomp_success_rates_with_k = zeros(numel(Ks), num_ls); gomp_average_iterations_with_k = zeros(numel(Ks), num_ls); gomp_maximum_iterations_with_k = zeros(numel(Ks), num_ls); for K=Ks % Trial number nt = 0; omp_num_successes = 0; omp_num_iterations = 0; omp_max_iterations = 0; gomp_num_successes = zeros(num_ls, 1); gomp_num_iterations = zeros(num_ls, 1); gomp_max_iterations = zeros(num_ls, 1); for ndt=1:num_dict_trials % Sensing matrix Phi = spx.dict.simple.gaussian_dict(M, N); for nst=1:num_signal_trials nt = nt + 1; % Construct the signal generator. gen = spx.data.synthetic.SparseSignalGenerator(N, K); % Generate bi-uniform signals x = gen.gaussian; % Measurement vectors y = Phi.apply(x); % OMP solver instance solver = spx.pursuit.single.OrthogonalMatchingPursuit(Phi, K); % Solve the sparse recovery problem omp_result = solver.solve(y); % Solution vector z = omp_result.z; omp_stats = spx.commons.sparse.recovery_performance(Phi, K, y, x, z); omp_num_iterations = omp_num_iterations + omp_result.iterations; if omp_max_iterations < omp_result.iterations omp_max_iterations = omp_result.iterations; end omp_num_successes = omp_num_successes + omp_stats.success; fprintf('K=%d, Trial: %d, OMP: %s, ', ... K, nt, spx.io.true_false_short(omp_stats.success)); for nl=1:num_ls L = Ls(nl); % GOMP solver instance solver = spx.pursuit.single.GOMP(Phi, K); % Set the number of atoms to be selected in each iteration solver.L = L; % Solve the sparse recovery problem gomp_result = solver.solve(y); % Solution vector z = gomp_result.z; gomp_stats = spx.commons.sparse.recovery_performance(Phi, K, y, x, z); gomp_num_iterations(nl) = gomp_num_iterations(nl) + gomp_result.iterations; if gomp_max_iterations(nl) < gomp_result.iterations gomp_max_iterations(nl) = gomp_result.iterations; end gomp_num_successes(nl) = gomp_num_successes(nl) + gomp_stats.success; fprintf(' GOMP-%d:%s', ... L, spx.io.true_false_short(gomp_stats.success)); end fprintf('\n') end end omp_success_rate = omp_num_successes / num_trials; omp_average_iterations = omp_num_iterations / num_trials; omp_success_rates_with_k(K) = omp_success_rate; omp_average_iterations_with_k (K) = omp_average_iterations; omp_maximum_iterations_with_k(K) = omp_max_iterations; for nl=1:num_ls gomp_success_rate = gomp_num_successes(nl) / num_trials; gomp_average_iterations = gomp_num_iterations(nl) / num_trials; gomp_success_rates_with_k(K, nl) = gomp_success_rate; gomp_average_iterations_with_k (K, nl) = gomp_average_iterations; gomp_maximum_iterations_with_k(K, nl) = gomp_max_iterations(nl); end end save('bin/omp_vs_gomp_comparison.mat');
/* * Created by Anton Zhigaylo <[email protected]> * * This program is free software; you can redistribute it and/or * modify it under the terms of the MIT License */ #pragma once #include <map> #include <tuple> #include <string> #include <vector> #include <utility> #include <iostream> #include <boost/optional.hpp> #include <boost/property_tree/json_parser.hpp> #include <boost/date_time/posix_time/posix_time.hpp> #include <boost/date_time/posix_time/posix_time_io.hpp> namespace Parsers { class CCsvPrserImpl final { public: struct router_item_t { typedef std::pair <std::uint16_t, std::string> mapp_item_t; bool topic_sub; uint32_t number; std::string mqtt_topic; std::vector<mapp_item_t> mapping; }; typedef std::pair <std::string, router_item_t> gwt_item_t; explicit CCsvPrserImpl(const std::string& csv_prj_path, const std::string& table_path); ~CCsvPrserImpl(); void parseCsvProject(); private: typedef std::pair <std::string, uint32_t> header_item_t; typedef std::tuple<std::string, std::string, std::string, std::string> gtw_item_tuple_t; CCsvPrserImpl(const CCsvPrserImpl&) = delete; CCsvPrserImpl& operator=(const CCsvPrserImpl&) = delete; bool createGtwFile(const std::string &gtw_file, const std::vector<gtw_item_tuple_t>& gtw_vector); bool createValueNode(const std::string& value_str, boost::property_tree::ptree& value_node); bool prepareHeaderMap(const std::string &csv_file, std::map <std::string, uint32_t>& header_map); bool prepareGtwVector(const std::string &csv_file, std::vector<gtw_item_tuple_t>& gtw_vector); boost::optional<uint32_t> getColumByName(const std::string &header, const std::string &col_name); boost::optional<std::string> getColumValueByNum(uint32_t colum_num, const std::string &data_str); std::string getDate_str(); std::string m_table_path; std::string m_csv_prj_path; std::map <std::string, uint32_t> m_header_map; std::vector<gtw_item_tuple_t> m_gtw_vector; }; } //namespace Parsers
open import Prelude module Implicits.Resolution.GenericFinite.Algorithm.Completeness where open import Induction.WellFounded open import Induction.Nat open import Data.Fin.Substitution open import Data.Nat.Base using (_<′_) open import Data.Maybe as Maybe open import Data.Nat hiding (_<_) open import Data.Nat.Properties open import Relation.Binary using (module DecTotalOrder) open DecTotalOrder decTotalOrder using () renaming (refl to ≤-refl; trans to ≤-trans) open import Data.Unit open import Implicits.Syntax open import Implicits.Syntax.Type.Unification open import Implicits.Syntax.Type.Unification.Lemmas as mgu hiding (complete) open import Implicits.Substitutions open import Implicits.Substitutions.Lemmas open import Implicits.Resolution.GenericFinite.Resolution open import Implicits.Resolution.GenericFinite.Algorithm open import Implicits.Resolution.GenericFinite.TerminationCondition open import Implicits.Resolution.Termination open import Extensions.Bool as Bool private module M = MetaTypeMetaSubst module ResolutionComplete (cond : TerminationCondition) where open TerminationCondition cond open ResolutionAlgorithm cond open ResolutionRules cond open Lemmas lemx : ∀ {ν m} {Δ : ICtx ν} {Φ} r c τ (u : M.MetaSub MetaType ν m zero) → Δ , Φ ⊢ from-meta (r M./ u M.↑tp) tp[/tp c ] ↓ τ → Δ , Φ ⊢ from-meta (open-meta r M./ (to-meta c ∷ u)) ↓ τ lemx r c τ u p = {!!} mutual match'-complete : ∀ {ν m} (Δ : ICtx ν) (Φ : TCtx) τ (r : MetaType m ν) → (Φ↓ : T-Acc Φ) → (m↓ : m<-Acc r) → (∃ λ u → Δ , Φ ⊢ from-meta (r M./ u) ↓ τ) → Is-just (match' Δ Φ τ r Φ↓ m↓) match'-complete Δ Φ τ (a ⇒ b) f (acc g) (proj₁ , i-iabs x y b↓τ) with match' Δ Φ τ b f (g _ (b-m<-a⇒b a b)) | match'-complete Δ Φ τ b f (g _ (b-m<-a⇒b a b)) (, b↓τ) match'-complete Δ Φ τ (a ⇒ b) f (acc g) (u , i-iabs x y b↓τ) | nothing | () match'-complete Δ Φ τ (a ⇒ b) f (acc g) (u , i-iabs x₁ y b↓τ) | just u' | just px with (step Φ Δ (from-meta (a M./ u)) (from-meta (b M./ u)) τ) <? Φ match'-complete Δ Φ τ (a ⇒ b) f (acc g) (u , i-iabs x₁ y b↓τ) | just u' | just px | yes p = {!!} match'-complete Δ Φ τ (a ⇒ b) f (acc g) (u , i-iabs x₁ y b↓τ) | just u' | just px | no ¬p = {!¬p!} -- match'-complete Δ Φ τ (a ⇒ b) (acc f) (acc g) | just u | yes Φ< -- with resolve' Δ (step Φ Δ (from-meta (a M./ u)) (from-meta (b M./ u)) τ) -- (from-meta (a M./ u)) (f _ Φ<) -- match'-complete Δ Φ τ (a ⇒ b) (acc f) (acc g) | just u | yes Φ< | true = just u -- match'-complete Δ Φ τ (a ⇒ b) (acc f) (acc g) | just u | yes Φ< | false = nothing -- match'-complete Δ Φ τ (a ⇒ b) (acc f) (acc g) | just u | no φ> = nothing match'-complete Δ Φ τ (∀' r) Φ↓ (acc m↓) (u , i-tabs b open-r↓τ) with match' Δ Φ τ (open-meta r) Φ↓ (m↓ _ (open-meta-a-m<-∀'a r)) | match'-complete Δ Φ τ (open-meta r) Φ↓ (m↓ _ (open-meta-a-m<-∀'a r)) (, lemx r b τ u open-r↓τ) match'-complete Δ Φ τ (∀' r) Φ↓ (acc m↓) (u , i-tabs b open-r↓τ) | just x | just px = just tt match'-complete Δ Φ τ (∀' r) Φ↓ (acc m↓) (u , i-tabs b open-r↓τ) | nothing | () match'-complete Δ Φ .(tvar x) (simpl (tvar x)) Φ↓ m↓ (u , i-simp .(tvar x)) = mgu.complete (simpl (tvar x)) (tvar x) u refl match'-complete Δ Φ τ (simpl (mvar x)) Φ↓ m↓ (u , proj₂) = let (u' , u'-uni) = mvar-unifiable x τ in mgu.complete (simpl (mvar x)) τ u' u'-uni match'-complete Δ Φ ._ (simpl (a →' b)) Φ↓ m↓ (u , i-simp ._) = mgu.complete (simpl (a →' b)) _ u refl match'-complete Δ Φ .(tc x) (simpl (tc x)) Φ↓ m↓ (u , ResolutionRules.i-simp .(tc x)) = mgu.complete (simpl (tc x)) _ u refl match-complete : ∀ {ν} (Δ : ICtx ν) (Φ : TCtx) → (τ : SimpleType ν) → (r : Type ν) → (Φ↓ : T-Acc Φ) → Δ , Φ ⊢ r ↓ τ → Is-true (match Δ Φ τ r Φ↓) match-complete Δ Φ τ r Φ↓ p with match' Δ Φ τ (to-meta {zero} r) Φ↓ (m<-well-founded _) | match'-complete Δ Φ τ (to-meta {zero} r) Φ↓ (m<-well-founded _) ([] , subst (λ z → Δ , Φ ⊢ z ↓ τ) (sym $ from-to-meta-/-vanishes) p) match-complete Δ Φ τ r Φ↓ p | just x | just px = true tt match-complete Δ Φ τ r Φ↓ p | nothing | () match1st-complete : ∀ {ν} (Δ : ICtx ν) (Φ : TCtx) (ρs : ICtx ν) → (τ : SimpleType ν) → (Φ↓ : T-Acc Φ) → (∃ λ r → r List.∈ ρs × Δ , Φ ⊢ r ↓ τ) → Is-true (match1st Δ Φ ρs τ Φ↓) match1st-complete Δ Φ List.[] τ Φ↓ (_ , () , _) match1st-complete Δ Φ (x List.∷ ρs) τ Φ↓ (.x , here refl , r↓τ) with match Δ Φ τ x Φ↓ | match-complete Δ Φ τ x Φ↓ r↓τ match1st-complete Δ Φ (x List.∷ ρs) τ Φ↓ (.x , here refl , r↓τ) | true | true _ = true tt match1st-complete Δ Φ (x List.∷ ρs) τ Φ↓ (.x , here refl , r↓τ) | false | () match1st-complete Δ Φ (x List.∷ ρs) τ Φ↓ (proj₁ , there p , r↓τ) with match Δ Φ τ x Φ↓ match1st-complete Δ Φ (x List.∷ ρs) τ Φ↓ (r , there r∈ρs , r↓τ) | true = true tt match1st-complete Δ Φ (x List.∷ ρs) τ Φ↓ (r , there r∈ρs , r↓τ) | false = match1st-complete Δ Φ ρs τ Φ↓ (r , r∈ρs , r↓τ) complete' : ∀ {ν} (Δ : ICtx ν) Φ {r} → (Φ↓ : T-Acc Φ) → Δ , Φ ⊢ᵣ r → Is-true (resolve' Δ Φ r Φ↓) complete' Δ Φ Φ↓ (r-simp x∈Δ x↓τ) = match1st-complete Δ Φ Δ _ Φ↓ (_ , x∈Δ , x↓τ) complete' Δ Φ Φ↓ (r-iabs ρ₁ p) = complete' (ρ₁ List.∷ Δ) Φ Φ↓ p complete' Δ Φ Φ↓ (r-tabs p) = complete' (ictx-weaken Δ) Φ Φ↓ p complete : ∀ {ν} (Δ : ICtx ν) Φ {r} → (Φ↓ : T-Acc Φ) → Δ , Φ ⊢ᵣ r → Is-true (resolve Δ Φ r) complete Δ Φ Φ↓ p = complete' Δ Φ (wf-< _) p
(* Title: HOL/Auth/n_flash_nodata_cub_lemma_on_inv__107.thy Author: Yongjian Li and Kaiqiang Duan, State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences Copyright 2016 State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences *) header{*The n_flash_nodata_cub Protocol Case Study*} theory n_flash_nodata_cub_lemma_on_inv__107 imports n_flash_nodata_cub_base begin section{*All lemmas on causal relation between inv__107 and some rule r*} lemma n_PI_Remote_GetVsinv__107: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_PI_Remote_Get src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__107 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_PI_Remote_Get src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__107 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_PI_Remote_GetXVsinv__107: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_PI_Remote_GetX src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__107 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_PI_Remote_GetX src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__107 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_NakVsinv__107: assumes a1: "(\<exists> dst. dst\<le>N\<and>r=n_NI_Nak dst)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__107 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain dst where a1:"dst\<le>N\<and>r=n_NI_Nak dst" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__107 p__Inv4" apply fastforce done have "(dst=p__Inv4)\<or>(dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(dst=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(dst~=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_Get_Nak__part__0Vsinv__107: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Nak__part__0 src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__107 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Nak__part__0 src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__107 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_Get_Nak__part__1Vsinv__107: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Nak__part__1 src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__107 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Nak__part__1 src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__107 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_Get_Nak__part__2Vsinv__107: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Nak__part__2 src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__107 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Nak__part__2 src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__107 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_Get_Get__part__0Vsinv__107: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Get__part__0 src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__107 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Get__part__0 src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__107 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_Get_Get__part__1Vsinv__107: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Get__part__1 src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__107 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Get__part__1 src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__107 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_Get_Put_HeadVsinv__107: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Put_Head N src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__107 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Put_Head N src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__107 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_Get_PutVsinv__107: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Put src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__107 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Put src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__107 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_Get_Put_DirtyVsinv__107: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Put_Dirty src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__107 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Put_Dirty src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__107 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Remote_Get_NakVsinv__107: assumes a1: "(\<exists> src dst. src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_Get_Nak src dst)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__107 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src dst where a1:"src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_Get_Nak src dst" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__107 p__Inv4" apply fastforce done have "(src=p__Inv4\<and>dst~=p__Inv4)\<or>(src~=p__Inv4\<and>dst=p__Inv4)\<or>(src~=p__Inv4\<and>dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4\<and>dst~=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4\<and>dst=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4\<and>dst~=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Remote_Get_PutVsinv__107: assumes a1: "(\<exists> src dst. src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_Get_Put src dst)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__107 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src dst where a1:"src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_Get_Put src dst" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__107 p__Inv4" apply fastforce done have "(src=p__Inv4\<and>dst~=p__Inv4)\<or>(src~=p__Inv4\<and>dst=p__Inv4)\<or>(src~=p__Inv4\<and>dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4\<and>dst~=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4\<and>dst=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4\<and>dst~=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_GetX_Nak__part__0Vsinv__107: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_Nak__part__0 src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__107 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_Nak__part__0 src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__107 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_GetX_Nak__part__1Vsinv__107: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_Nak__part__1 src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__107 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_Nak__part__1 src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__107 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_GetX_Nak__part__2Vsinv__107: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_Nak__part__2 src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__107 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_Nak__part__2 src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__107 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_GetX_GetX__part__0Vsinv__107: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_GetX__part__0 src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__107 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_GetX__part__0 src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__107 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_GetX_GetX__part__1Vsinv__107: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_GetX__part__1 src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__107 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_GetX__part__1 src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__107 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_GetX_PutX_1Vsinv__107: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_1 N src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__107 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_1 N src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__107 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_GetX_PutX_2Vsinv__107: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_2 N src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__107 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_2 N src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__107 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_GetX_PutX_3Vsinv__107: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_3 N src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__107 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_3 N src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__107 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_GetX_PutX_4Vsinv__107: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_4 N src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__107 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_4 N src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__107 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_GetX_PutX_5Vsinv__107: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_5 N src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__107 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_5 N src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__107 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_GetX_PutX_6Vsinv__107: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_6 N src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__107 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_6 N src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__107 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_GetX_PutX_7__part__0Vsinv__107: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_7__part__0 N src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__107 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_7__part__0 N src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__107 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_GetX_PutX_7__part__1Vsinv__107: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_7__part__1 N src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__107 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_7__part__1 N src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__107 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_GetX_PutX_7_NODE_Get__part__0Vsinv__107: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_7_NODE_Get__part__0 N src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__107 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_7_NODE_Get__part__0 N src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__107 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_GetX_PutX_7_NODE_Get__part__1Vsinv__107: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_7_NODE_Get__part__1 N src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__107 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_7_NODE_Get__part__1 N src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__107 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_GetX_PutX_8_HomeVsinv__107: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_8_Home N src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__107 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_8_Home N src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__107 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_GetX_PutX_8_Home_NODE_GetVsinv__107: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_8_Home_NODE_Get N src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__107 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_8_Home_NODE_Get N src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__107 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_GetX_PutX_8Vsinv__107: assumes a1: "(\<exists> src pp. src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_Local_GetX_PutX_8 N src pp)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__107 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src pp where a1:"src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_Local_GetX_PutX_8 N src pp" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__107 p__Inv4" apply fastforce done have "(src=p__Inv4\<and>pp~=p__Inv4)\<or>(src~=p__Inv4\<and>pp=p__Inv4)\<or>(src~=p__Inv4\<and>pp~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4\<and>pp~=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4\<and>pp=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4\<and>pp~=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_GetX_PutX_8_NODE_GetVsinv__107: assumes a1: "(\<exists> src pp. src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_Local_GetX_PutX_8_NODE_Get N src pp)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__107 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src pp where a1:"src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_Local_GetX_PutX_8_NODE_Get N src pp" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__107 p__Inv4" apply fastforce done have "(src=p__Inv4\<and>pp~=p__Inv4)\<or>(src~=p__Inv4\<and>pp=p__Inv4)\<or>(src~=p__Inv4\<and>pp~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4\<and>pp~=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4\<and>pp=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4\<and>pp~=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_GetX_PutX_9__part__0Vsinv__107: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_9__part__0 N src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__107 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_9__part__0 N src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__107 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_GetX_PutX_9__part__1Vsinv__107: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_9__part__1 N src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__107 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_9__part__1 N src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__107 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_GetX_PutX_10_HomeVsinv__107: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_10_Home N src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__107 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_10_Home N src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__107 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_GetX_PutX_10Vsinv__107: assumes a1: "(\<exists> src pp. src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_Local_GetX_PutX_10 N src pp)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__107 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src pp where a1:"src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_Local_GetX_PutX_10 N src pp" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__107 p__Inv4" apply fastforce done have "(src=p__Inv4\<and>pp~=p__Inv4)\<or>(src~=p__Inv4\<and>pp=p__Inv4)\<or>(src~=p__Inv4\<and>pp~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4\<and>pp~=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4\<and>pp=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4\<and>pp~=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_GetX_PutX_11Vsinv__107: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_11 N src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__107 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_11 N src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__107 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Remote_GetX_NakVsinv__107: assumes a1: "(\<exists> src dst. src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_GetX_Nak src dst)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__107 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src dst where a1:"src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_GetX_Nak src dst" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__107 p__Inv4" apply fastforce done have "(src=p__Inv4\<and>dst~=p__Inv4)\<or>(src~=p__Inv4\<and>dst=p__Inv4)\<or>(src~=p__Inv4\<and>dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4\<and>dst~=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4\<and>dst=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4\<and>dst~=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Remote_GetX_PutXVsinv__107: assumes a1: "(\<exists> src dst. src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_GetX_PutX src dst)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__107 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src dst where a1:"src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_GetX_PutX src dst" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__107 p__Inv4" apply fastforce done have "(src=p__Inv4\<and>dst~=p__Inv4)\<or>(src~=p__Inv4\<and>dst=p__Inv4)\<or>(src~=p__Inv4\<and>dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4\<and>dst~=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4\<and>dst=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4\<and>dst~=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Remote_PutVsinv__107: assumes a1: "(\<exists> dst. dst\<le>N\<and>r=n_NI_Remote_Put dst)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__107 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain dst where a1:"dst\<le>N\<and>r=n_NI_Remote_Put dst" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__107 p__Inv4" apply fastforce done have "(dst=p__Inv4)\<or>(dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(dst=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(dst~=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Remote_PutXVsinv__107: assumes a1: "(\<exists> dst. dst\<le>N\<and>r=n_NI_Remote_PutX dst)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__107 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain dst where a1:"dst\<le>N\<and>r=n_NI_Remote_PutX dst" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__107 p__Inv4" apply fastforce done have "(dst=p__Inv4)\<or>(dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(dst=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(dst~=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_FAckVsinv__107: assumes a1: "(r=n_NI_FAck )" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__107 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__107 p__Inv4" apply fastforce done have "((formEval (eqn (IVar (Field (Field (Ident ''Sta'') ''Dir'') ''Dirty'')) (Const true)) s))\<or>((formEval (neg (eqn (IVar (Field (Field (Ident ''Sta'') ''Dir'') ''Dirty'')) (Const true))) s))" by auto moreover { assume c1: "((formEval (eqn (IVar (Field (Field (Ident ''Sta'') ''Dir'') ''Dirty'')) (Const true)) s))" have "?P3 s" apply (cut_tac a1 a2 c1, simp, rule_tac x="(neg (andForm (andForm (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''UniMsg'') p__Inv4) ''Cmd'')) (Const UNI_Get)) (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''UniMsg'') p__Inv4) ''HomeProc'')) (Const false))) (eqn (IVar (Field (Field (Ident ''Sta'') ''ShWbMsg'') ''Cmd'')) (Const SHWB_FAck))))" in exI, auto) done then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume c1: "((formEval (neg (eqn (IVar (Field (Field (Ident ''Sta'') ''Dir'') ''Dirty'')) (Const true))) s))" have "?P2 s" proof(cut_tac a1 a2 c1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Remote_GetX_PutX_HomeVsinv__107: assumes a1: "\<exists> dst. dst\<le>N\<and>r=n_NI_Remote_GetX_PutX_Home dst" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__107 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_PI_Local_GetX_PutX__part__0Vsinv__107: assumes a1: "r=n_PI_Local_GetX_PutX__part__0 " and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__107 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_NI_WbVsinv__107: assumes a1: "r=n_NI_Wb " and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__107 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_NI_InvAck_3Vsinv__107: assumes a1: "\<exists> src. src\<le>N\<and>r=n_NI_InvAck_3 N src" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__107 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_NI_InvAck_1Vsinv__107: assumes a1: "\<exists> src. src\<le>N\<and>r=n_NI_InvAck_1 N src" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__107 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_PI_Local_GetX_GetX__part__1Vsinv__107: assumes a1: "r=n_PI_Local_GetX_GetX__part__1 " and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__107 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_PI_Local_GetX_GetX__part__0Vsinv__107: assumes a1: "r=n_PI_Local_GetX_GetX__part__0 " and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__107 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_PI_Remote_ReplaceVsinv__107: assumes a1: "\<exists> src. src\<le>N\<and>r=n_PI_Remote_Replace src" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__107 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_PI_Local_ReplaceVsinv__107: assumes a1: "r=n_PI_Local_Replace " and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__107 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_NI_InvAck_existsVsinv__107: assumes a1: "\<exists> src pp. src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_InvAck_exists src pp" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__107 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_PI_Remote_PutXVsinv__107: assumes a1: "\<exists> dst. dst\<le>N\<and>r=n_PI_Remote_PutX dst" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__107 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_NI_Remote_Get_Put_HomeVsinv__107: assumes a1: "\<exists> dst. dst\<le>N\<and>r=n_NI_Remote_Get_Put_Home dst" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__107 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_NI_InvVsinv__107: assumes a1: "\<exists> dst. dst\<le>N\<and>r=n_NI_Inv dst" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__107 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_PI_Local_PutXVsinv__107: assumes a1: "r=n_PI_Local_PutX " and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__107 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_PI_Local_Get_PutVsinv__107: assumes a1: "r=n_PI_Local_Get_Put " and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__107 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_NI_ShWbVsinv__107: assumes a1: "r=n_NI_ShWb N " and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__107 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_PI_Local_GetX_PutX_HeadVld__part__0Vsinv__107: assumes a1: "r=n_PI_Local_GetX_PutX_HeadVld__part__0 N " and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__107 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_NI_ReplaceVsinv__107: assumes a1: "\<exists> src. src\<le>N\<and>r=n_NI_Replace src" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__107 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_NI_Remote_GetX_Nak_HomeVsinv__107: assumes a1: "\<exists> dst. dst\<le>N\<and>r=n_NI_Remote_GetX_Nak_Home dst" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__107 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_NI_Local_PutXAcksDoneVsinv__107: assumes a1: "r=n_NI_Local_PutXAcksDone " and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__107 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_PI_Local_GetX_PutX__part__1Vsinv__107: assumes a1: "r=n_PI_Local_GetX_PutX__part__1 " and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__107 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_NI_Remote_Get_Nak_HomeVsinv__107: assumes a1: "\<exists> dst. dst\<le>N\<and>r=n_NI_Remote_Get_Nak_Home dst" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__107 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_NI_InvAck_exists_HomeVsinv__107: assumes a1: "\<exists> src. src\<le>N\<and>r=n_NI_InvAck_exists_Home src" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__107 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_NI_Replace_HomeVsinv__107: assumes a1: "r=n_NI_Replace_Home " and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__107 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_NI_Local_PutVsinv__107: assumes a1: "r=n_NI_Local_Put " and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__107 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_NI_Nak_ClearVsinv__107: assumes a1: "r=n_NI_Nak_Clear " and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__107 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_PI_Local_Get_GetVsinv__107: assumes a1: "r=n_PI_Local_Get_Get " and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__107 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_NI_Nak_HomeVsinv__107: assumes a1: "r=n_NI_Nak_Home " and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__107 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_NI_InvAck_2Vsinv__107: assumes a1: "\<exists> src. src\<le>N\<and>r=n_NI_InvAck_2 N src" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__107 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_PI_Local_GetX_PutX_HeadVld__part__1Vsinv__107: assumes a1: "r=n_PI_Local_GetX_PutX_HeadVld__part__1 N " and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__107 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done end
```python import sympy as sp sp.init_printing() from cymbol import Cymbol ``` ```python E_T = Cymbol(r'E_{\mathrm{T}}', codename='E_T_', real=True, nonnegative=True) S_T = Cymbol(r'S_{\mathrm{T}}', codename='S_T_', real=True, nonnegative=True) r_T = Cymbol(r'r_{\mathrm{T}}', codename='r_T_', real=True, nonnegative=True) c_T = Cymbol(r'c_{\mathrm{T}}', codename='c_T_', real=True, nonnegative=True) E_N = Cymbol(r'E_{\mathrm{N}}', codename='E_N_', real=True, nonnegative=True) S_N = Cymbol(r'S_{\mathrm{N}}', codename='S_N_', real=True, nonnegative=True) r_N = Cymbol(r'r_{\mathrm{N}}', codename='r_N_', real=True, nonnegative=True) c_N = Cymbol(r'c_{\mathrm{N}}', codename='c_N_', real=True, nonnegative=True) eta = Cymbol(r'\eta', codename='eta_', real=True, nonnegative=True) r = Cymbol(r'r', codename='r_', real=True, nonnegative=True) ``` ```python omega_T = Cymbol(r'\omega_{\mathrm{T}}', codename='omega_T_', real=True, nonnegative=True) omega_N = Cymbol(r'\omega_{\mathrm{N}}', codename='omega_N_', real=True, nonnegative=True) ``` ```python Y_T = Cymbol(r'Y_{\mathrm{T}}', codename='Y_T_', real=True, nonnegative=True) Y_N = Cymbol(r'Y_{\mathrm{N}}', codename='Y_N_', real=True, nonnegative=True) ``` ```python S_NT = sp.sqrt(S_N*S_T) c_NT = sp.sqrt(c_N*c_T) ``` ```python omega_NT = 1 - sp.sqrt((1-omega_N)*(1-omega_T)) ``` ```python phi_N = (1 - omega_N)**c_N * S_N / (r+1) * (Y_N / S_N)**(r+1) phi_N ``` ```python phi_T = (1 - omega_T)**c_T * S_T / (r+1) * (Y_T / S_T)**(r+1) phi_T ``` ```python phi_NT = (1 - omega_NT)**c_NT * S_NT / (r+1) * ((Y_N + Y_T)/(S_NT))**(r+1) phi_NT ``` ```python phi = (1 - eta)*(phi_N + phi_T) + eta*phi_NT ``` ```python dot_omega_N = phi.diff(Y_N) dot_omega_T = phi.diff(Y_T) ``` ```python sp.simplify(dot_omega_N.subs(eta, 0)) ``` ```python sp.simplify(dot_omega_T.subs(eta, 0)) ``` ```python sp.simplify(dot_omega_N.subs(eta, 1)) ``` ```python sp.simplify(dot_omega_T.subs(eta, 1)) ``` ```python ```
lemma interior_surjective_linear_image: fixes f :: "'a::euclidean_space \<Rightarrow> 'a::euclidean_space" assumes "linear f" "surj f" shows "interior(f ` S) = f ` (interior S)"
Formal statement is: lemma filterlim_at_top_mult_at_top: assumes f: "LIM x F. f x :> at_top" and g: "LIM x F. g x :> at_top" shows "LIM x F. (f x * g x :: real) :> at_top" Informal statement is: If $f(x)$ and $g(x)$ both tend to infinity as $x$ tends to infinity, then $f(x)g(x)$ tends to infinity as $x$ tends to infinity.
[STATEMENT] lemma make_tr_nth: "j < n \<Longrightarrow> make_tr f n i ! j = f (i + j)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. j < n \<Longrightarrow> make_tr f n i ! j = f (i + j) [PROOF STEP] by (induct n arbitrary: i j) (auto simp add: nth_Cons')
""" """ import numpy as np import pandas as pd import matplotlib.pyplot as plt # download the data from given URL and with given columns url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/autos/imports-85.data' columns = ['symbolying','normalized-losses', 'make', 'fuel-type', 'aspiration', 'num-of-doors', 'body-style', 'drive-wheels', 'engine-location' ,'wheel-base', 'length', 'width', 'height', 'curb-weight', 'engine-type', 'num-of-cylinders', 'engine-size', 'fuel-system' ,'bore', 'stroke', 'compression-ratio', 'horsepower', 'peak-rpm', 'city-mpg', 'highway-mpg', 'price'] #loading the dataset using pandas and replacing "?" with NA values raw_data = pd.read_csv(url,names=columns, na_values="?") #We ignore 'symboling' column raw_data.pop("symbolying") # drop all rows with missing values dataset = raw_data.copy() dataset =dataset.dropna() #Now we assign a one-hot encoding for eatch categorical feature dataset = pd.get_dummies(dataset,columns=["num-of-cylinders","num-of-doors","make","fuel-type","aspiration","body-style","drive-wheels" ,"engine-location","engine-type","fuel-system"], prefix=["num-of-cylinders","num-of-doors","make","fuel-type","aspiration","body-style","drive-wheels" ,"engine-location","engine-type","fuel-system"],prefix_sep='_') # We set 80% of the available data for training and the rest for testing train_dataset = dataset.sample(frac = 0.8, random_state=0) test_dataset = dataset.drop(train_dataset.index) train_features = train_dataset.copy() test_features = test_dataset.copy() train_labels = train_features.pop('normalized-losses') test_labels = test_features.pop('normalized-losses') train_features = train_features.to_numpy() test_features = test_features.to_numpy() train_labels = train_labels.to_numpy() test_labels = test_labels.to_numpy() #since our target variable is always positive, we can log scale it and exponentiate after prediction. it works well with regression models. log_labels = np.log(train_labels) #first we get the identity matrix identity_size = train_features.shape[1] identity_matrix= np.zeros((identity_size, identity_size)) np.fill_diagonal(identity_matrix, 1) # we set a regularization parameter labmda to 1 lamb = 1 xTx = train_features.T.dot(train_features) + lamb * identity_matrix xTx_inv = np.linalg.inv(xTx) xTx_inv_xT = xTx_inv.dot(train_features.T) theta = xTx_inv_xT.dot(log_labels) #since we took the log of the lables, we must exponentiate after each prediction prediction = np.exp(test_features.dot(theta)) mse = (np.square(prediction - test_labels)).mean() percentage = np.mean(np.abs(prediction - test_labels)/(test_labels)) print("mean squared error is {} and the percentage is {}".format(mse,percentage)) #Our normal equation model achieved a mean squared error of 279.0 (16.67 RMSE) and a percentage Error of 11.75% plt.plot(test_labels) plt.plot(prediction) plt.show()
\chapter{Lagrange's Equations of the Second Kind}\label{c2} \section{Generalised coordinates}\label{c2s1} In the previous chapter we observed that in a system of $N$ particles and $r$ constraints, the $3N$ virtual displacements $\delta x^{(n)}_i$ are not independent. Any subset of $f = 3N - r$ virtual displacements is independent. $f$ is called the \emph{number of degrees of freedom} of the system. Consider a system of two particles connected by a rigid rod. The two particles each need three Cartesian coordinates. But the constraint makes only five of them independent. It is not clear which five to choose. Although the lack of independence of virtual displacements in all coordinates is taken into account by the Lagrange's equations of the first kind, one still has to describe the system in terms of $6$ coordinates. This situation gets worse in the case of a rigid body. If it made up of $N$ particles then we need $3N$ coordinates to describe its position when only $6$ of them are independent. Generalised coordinates are any $f$ numbers that suffice to describe the position of a system of $f$ degrees of freedom. For example, a rigid body with $N$ particles can be described by three Cartesian coordinates of its centre of mass and three Euler angles. We shall denote the $f$ generalised coordiates by $\{q_1, \ldots, q_f\}$. Let us therefore consider a system of $N$ particles and $f$ degrees of freedom and let $q_1, \ldots, q_f$ be its generalised coordinates. Then the Cartesian coordinates of its particles can be written as \begin{eqnarray} x_i &=& x_i(q_1, \ldots, q_f, t) \\ y_i &=& y_i(q_1, \ldots, q_f, t) \\ z_i &=& z_i(q_1, \ldots, q_f, t). \end{eqnarray} Note that the Cartesian coordinates of a particle are allowed to depend on \emph{all} generalised coordinates. Unlike their Cartesian counterparts, the generalised coordinates are not tied to a single particle. Further, note that the Cartesian coordinates may depend on time as well. From these equations we have \begin{eqnarray} dx_i &=& \pd{x_i}{q_j}dq_j + \pd{x_i}{t}dt \\ dy_i &=& \pd{y_i}{q_j}dq_j + \pd{y_i}{t}dt \\ dz_i &=& \pd{z_i}{q_j}dq_j + \pd{z_i}{t}dt \end{eqnarray} We have used the summation convention in these three equations. Although the functional forms of $x_i, y_i, z_i$ are arbitrary, the differentials $dx_i, dy_i, dz_i$ are linear functions of $dq_j$ and $dt$. The three Cartesian velocities are \begin{eqnarray} \dot{x}_i &=& \pd{x_i}{q_j}\dot{q}_j + \pd{x_i}{t} \label{c2s1e7} \\ \dot{y}_i &=& \pd{y_i}{q_j}\dot{q}_j + \pd{y_i}{t} \label{c2s1e8} \\ \dot{z}_i &=& \pd{z_i}{q_j}\dot{q}_j + \pd{z_i}{t} \label{c2s1e9} \end{eqnarray} Note that the symbol $\dot{x}_i$ denotes the total time derivative of $x_i$ and it differs from $\partial x_i/\partial t$. The generalised coordinates, however, do not depend explicitly on time. The kinetic energy of the system is \begin{eqnarray} T &=& \frac{1}{2}m_i(\dot{x}_i^2 + \dot{y}_i^2 + \dot{z}_i^2) \nonumber \\ &=& \frac{1}{2}m_i\left(\pd{x_i}{q_j}\dot{q}_j + \pd{x_i}{t}\right) \left(\pd{x_i}{q_k}\dot{q}_k + \pd{x_i}{t}\right) + \nonumber \\ & & \frac{1}{2}m_i\left(\pd{y_i}{q_j}\dot{q}_j + \pd{y_i}{t}\right) \left(\pd{y_i}{q_k}\dot{q}_k + \pd{y_i}{t}\right) + \nonumber \\ & & \frac{1}{2}m_i\left(\pd{z_i}{q_j}\dot{q}_j + \pd{z_i}{t}\right) \left(\pd{z_i}{q_k}\dot{q}_k + \pd{z_i}{t}\right) \label{c1s1e10} \end{eqnarray} We now compute the derivative of $T$ with respect to the generalised velocity $\dot{q}_l$. \begin{eqnarray} \pd{T}{\dot{q}_l} &=& m_i\left(\pd{x_i}{q_j}\dot{q}_j + \pd{x_i}{t}\right)\pd{x_i}{q_l} + m_i\left(\pd{y_i}{q_j}\dot{q}_j + \pd{y_i}{t}\right)\pd{y_i}{q_l} + \nonumber \\ & & m_i\left(\pd{z_i}{q_j}\dot{q}_j + \pd{z_i}{t}\right)\pd{z_i}{q_l} \label{c2s1e11} \end{eqnarray} Using equations \eqref{c2s1e7}, \eqref{c2s1e8} and \eqref{c2s1e9} we get \begin{equation}\label{c2s1e12} \pd{T}{\dot{q}_l} = m_i\left(\dot{x}_i\pd{x_i}{q_l} + \dot{y}_i\pd{y_i}{q_l} + \dot{z}_i\pd{z_i}{q_l}\right). \end{equation} We now take the total time derivative of this equation. \begin{eqnarray} \frac{d}{dt}\left(\pd{T}{\dot{q}_l}\right) &=& m_i\left(\ddot{x}_i\pd{x_i}{q_l} + \dot{x}_i\frac{d}{dt} \left(\pd{x_i}{q_l}\right)\right) + m_i\left(\ddot{y}_i\pd{y_i}{q_l} + \dot{y}_i\frac{d}{dt} \left(\pd{y_i}{q_l}\right)\right) + \nonumber \\ & & m_i\left(\ddot{z}_i\pd{z_i}{q_l} + \dot{z}_i\frac{d}{dt} \left(\pd{z_i}{q_l}\right)\right) \end{eqnarray} Evaluating the total time derivatives on the right hand side, \begin{eqnarray} \frac{d}{dt}\left(\pd{T}{\dot{q}_l}\right) &=& m_i\left(\ddot{x}_i\pd{x_i}{q_l} + \dot{x}_i\dot{q}_k\frac{\partial^2 x_i}{\partial q_k\partial q_l} + \dot{x_i}\frac{\partial}{\partial q_l}\pd{x_i}{t}\right) \nonumber \\ & & m_i\left(\ddot{y}_i\pd{y_i}{q_l} + \dot{y}_i\dot{q}_k\frac{\partial^2 y_i}{\partial q_k\partial q_l} + \dot{y_i}\frac{\partial}{\partial q_l}\pd{y_i}{t}\right) \nonumber \\ & & m_i\left(\ddot{z}_i\pd{z_i}{q_l} + \dot{z}_i\dot{q}_k\frac{\partial^2 z_i}{\partial q_k\partial q_l} + \dot{z_i}\frac{\partial}{\partial q_l}\pd{z_i}{t}\right) \label{c2s1e14} \end{eqnarray} We now observe that \[ \pd{\dot{q_1}}{q_k} = \frac{\partial}{\partial t}\delta_{lk} = 0 \] so that we can write \begin{eqnarray} \frac{d}{dt}\left(\pd{T}{\dot{q}_l}\right) &=& m_i\left(\ddot{x}_i\pd{x_i}{q_l} + \dot{x}_i\frac{\partial}{\partial q_l} \left(\dot{q}_k\pd{x_i}{q_k} + \pd{x_i}{t}\right)\right) \nonumber \\ & & m_i\left(\ddot{y}_i\pd{y_i}{q_l} + \dot{y}_i\frac{\partial}{\partial q_l} \left(\dot{q}_k\pd{y_i}{q_k} + \pd{y_i}{t}\right)\right) \nonumber \\ & & m_i\left(\ddot{z}_i\pd{z_i}{q_l} + \dot{z}_i\frac{\partial}{\partial q_l} \left(\dot{q}_k\pd{z_i}{q_k} + \pd{z_i}{t}\right)\right) \label{c2s1e15} \end{eqnarray} Using equations \eqref{c2s1e7}, \eqref{c2s1e8} and \eqref{c2s1e9} we get \[ \frac{d}{dt}\left(\pd{T}{\dot{q}_l}\right) = m_i\left[\left(\ddot{x}_i\pd{x_i}{q_l} + \dot{x}_i\pd{\dot{x}_i}{{q}_l}\right) + \nonumber \\ \left(\ddot{y}_i\pd{y_i}{q_l} + \dot{y}_i\pd{\dot{y}_i}{{q}_l}\right) + \nonumber \\ \left(\ddot{z}_i\pd{z_i}{q_l} + \dot{z}_i\pd{\dot{z}_i}{{q}_l}\right)\right]. \] Rearranging it \[ \frac{d}{dt}\left(\pd{T}{\dot{q}_l}\right) = \pd{T}{q_l} + m_i\left(\ddot{x}_i\pd{x_i}{q_l} + \ddot{y}_i\pd{y_i}{q_l} +\ddot{z}_i\pd{z_i}{q_l}\right) \] or \begin{equation}\label{c2s1e16} \frac{d}{dt}\left(\pd{T}{\dot{q}_l}\right) - \pd{T}{q_l} = m_i\left(\ddot{x}_i\pd{x_i}{q_l} + \ddot{y}_i\pd{y_i}{q_l} +\ddot{z}_i\pd{z_i}{q_l}\right) \end{equation} Multiplying both sides by $\delta q_l$, \begin{equation}\label{c2s1e17} \left(\frac{d}{dt}\left(\pd{T}{\dot{q}_l}\right) - \pd{T}{q_l}\right)\delta q_l = m_i\left(\ddot{x}_i\delta x_i + \ddot{y}_i\delta y_i + \ddot{z}_i\delta z_i\right) \end{equation} Comparing with equation \eqref{c1s2e13} we surmise that the left hand side of the above equation is \begin{equation} \frac{d}{dt}\left(\pd{T}{\dot{q}_l}\right) - \pd{T}{q_l}\delta q_l = X_i\delta x_i + Y_i\delta y_i + Z_i \delta z_i, \end{equation} where $(X_i, Y_i, Z_i)$ are components of the external force on the $i$th particle. If this force is conservative, we can write it in terms of a potential $V$ as \begin{eqnarray} \frac{d}{dt}\left(\pd{T}{\dot{q}_l}\right) - \pd{T}{q_l}\delta q_l &=& \pd{V}{x_i}\delta x_i + \pd{V}{z_i}\delta y_i + \pd{V}{z_i} \delta z_i \nonumber \\ &=& \pd{V}{x_i}\pd{x_i}{q_l}\delta q_l + \pd{V}{z_i}\pd{y_i}{q_l}\delta q_l + \pd{V}{z_i} \pd{z_i}{q_l}\delta q_l \nonumber \\ &=& \pd{V}{q_1}\delta q_l \label{c1s1e20} \end{eqnarray} Note that the last term here is not $3\partial V/\partial q_l$ because $V$ depends on all $x_1, \ldots, z_N$ and each of these is a function of all generalised coordinates. If $V$ \emph{does not} depend on generalised velocities then we can write \eqref{c1s1e20} as \begin{equation}\label{c2s1e21} \frac{d}{dt}\pd{L}{\dot{q}_l} - \pd{L}{q_l} = 0, \end{equation} where the function \begin{equation}\label{c1s1e22} L(q_1, \ldots, q_f, \dot{q}_1, \ldots, \dot{q}_f) = T - V \end{equation} is the Lagrangian of the system and equations \eqref{c2s1e21} are called Lagrange's equation of the second kind. A few remarks about \eqref{c2s1e21}: \begin{itemize} \item Often times the phrase `second kind' is omitted and equations are called Lagrange's equations. \item They equations are presently derived only for conservative forces. \item They do not involve the forces of constraint. Neither do they insist on the Cartesian coordinates. \item They are valid in all frames of references. However $T$ depends on generalised velocities and will change across frames of reference. Therefore, $T$ should always be calculated with respect to an inertial frame of reference \cite[p. 31]{akr}. \end{itemize} Problems. \begin{enumerate} \item A uniform rod leans against a wall \cite[Problem 1, chapter 4]{akr}. Let $l$ be its length. Its motion is confined to a plane. Its unconstrained motion has three degrees of freedom, the position of its centre of mass and its orientation. In the present problem, the $x$ coordinate of its end touching the wall is constrained to be $0$ and the $y$ coordinate of its end touching the floor is constrained to be $0$. Therefore, one generalised coordinate suffices to describe the motion. We choose $\theta$ the (smaller) angle made by the rod with the $x$ axis. If the rod is in the first quadrant then it makes and angle $\pi -\theta$ with the positive $x$ axis. The centre of the rod is at point $(0, l/2)$ when $\theta = \pi/2$ and it is at $(0, l/2)$ when $\theta = 0$. It traces the arc of a circle of radius $l/2$ as the rod slides down the path. The moment of inertia of the rod abouts its centre of mass is $ml^2/12$. Therefore, the rotational kinetic energy is \[ T_r = \frac{I}{2}\dot{\theta}^2 = \frac{m}{24}l^2\dot{\theta}^2 \] The translational kinetic energy of its centre of mass is \[ T_c = \frac{m}{2}v^2 = \frac{m}{8}l^2\dot{\theta}^2 \] The total kinetic energy is \[ T = \frac{ml^2}{12}\dot{\theta}^2. \] We will now find the potential energy. If $dm$ is the mass element of the rod at a height $h$ then \[ dV = gy dm \] If $\lambda$ be the linear mass density of the rod, $dm = \lambda ds = mds/l$. Thus, \[ dV = \frac{mg}{l} yds = \frac{mg}{l}y \sqrt{(dx)^2 + (dy)^2} = \frac{mg}{l}ydy\sqrt{1 + \left(\frac{dx}{dy}\right)^2}. \] Now the equation of the straight line representing the rod is $y = y_0 - \tan\theta x$ so that \[ \frac{dx}{dy} = \cot\theta \] so that \[ dV = \frac{mg}{l}\csc\theta ydy \] or \[ V = \int_{lsin\theta}^0 dV = -\frac{mg}{2}l\sin\theta. \] Therefore, the Lagrangian is \[ L = \frac{ml^2}{12}\dot{\theta}^2 + \frac{mg}{2}l\sin\theta \] and the equation of motion is \[ \frac{ml^2}{6}\ddot{\theta} - \frac{mgl}{2}\cos\theta = 0 \Rightarrow \frac{l}{3}\ddot{\theta} - g\cos\theta = 0 \Rightarrow \ddot{x} = \frac{3g}{l}\cos\theta. \] \item An old model of the He atom consisted of a fixed nucleus and two electrons on the opposite ends of the diameter of the circle centred at the nucleus \cite[Problem 3, chapter 4]{akr}. Without the constraints, the motion of the two electrons in a plane would have required four coordinates. Their constraints are: \begin{itemize} \item They are always at the opposite ends of a diameter. This means that we can consider them to be end of a rigid, weightless rod. \item Their centre of mass is fixed at the nucleus. This fixes the two coordinates of their centre of mass. \end{itemize} This leaves the system with just one degree of freedom. If $\theta$ is the angle made by one electron with the positive $x$ axis, the other electron makes an angle $\theta + \pi/2$. If $m$ is the mass of the electrons, their kinetic energy is \[ T = \frac{m}{2}r^2\dot{\theta}^2 + \frac{m}{2}r^2\dot{\theta}^2 = mr^2\dot{\theta}^2. \] Their potential energy is \[ V = -\frac{2e^2}{r} - \frac{2e^2}{r} = -\frac{4e^2}{r} \] so that the Lagrangian is \[ L = mr^2\dot{\theta}^2 + \frac{4e^2}{r^2}. \] The equation of motion is $\ddot{\theta} = 0$ or that $\theta = \alpha t + \beta$, where $\alpha$ and $\beta$ are the initial conditions. \end{enumerate}
lemma continuous_on_components_open_eq: fixes S :: "'a::real_normed_vector set" shows "open S \<Longrightarrow> (continuous_on S f \<longleftrightarrow> (\<forall>c \<in> components S. continuous_on c f))"
If $x$ is a real number greater than $1$, then the sequence $x^n$ tends to infinity.
[STATEMENT] lemma L_10_5_\<tau>_is_cat_cone[cat_cs_intros]: assumes "\<KK> : \<BB> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> \<CC>" and "\<TT> : \<BB> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> \<AA>" and "c \<in>\<^sub>\<circ> \<CC>\<lparr>Obj\<rparr>" and \<upsilon>'_def: "\<upsilon>' = ntcf_arrow \<upsilon>" and \<upsilon>: "\<upsilon> : Hom\<^sub>O\<^sub>.\<^sub>C\<^bsub>\<alpha>\<^esub>\<CC>(c,-) \<circ>\<^sub>C\<^sub>F \<KK> \<mapsto>\<^sub>C\<^sub>F Hom\<^sub>O\<^sub>.\<^sub>C\<^bsub>\<alpha>\<^esub>\<AA>(a,-) \<circ>\<^sub>C\<^sub>F \<TT> : \<BB> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> cat_Set \<alpha>" and a: "a \<in>\<^sub>\<circ> \<AA>\<lparr>Obj\<rparr>" shows "L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a : a <\<^sub>C\<^sub>F\<^sub>.\<^sub>c\<^sub>o\<^sub>n\<^sub>e \<TT> \<circ>\<^sub>C\<^sub>F c \<^sub>O\<Sqinter>\<^sub>C\<^sub>F \<KK> : c \<down>\<^sub>C\<^sub>F \<KK> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> \<AA>" [PROOF STATE] proof (prove) goal (1 subgoal): 1. L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a : a <\<^sub>C\<^sub>F\<^sub>.\<^sub>c\<^sub>o\<^sub>n\<^sub>e \<TT> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M c \<^sub>O\<Sqinter>\<^sub>C\<^sub>F \<KK> : c \<down>\<^sub>C\<^sub>F \<KK> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> \<AA> [PROOF STEP] proof- [PROOF STATE] proof (state) goal (1 subgoal): 1. L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a : a <\<^sub>C\<^sub>F\<^sub>.\<^sub>c\<^sub>o\<^sub>n\<^sub>e \<TT> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M c \<^sub>O\<Sqinter>\<^sub>C\<^sub>F \<KK> : c \<down>\<^sub>C\<^sub>F \<KK> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> \<AA> [PROOF STEP] let ?H_\<CC> = \<open>\<lambda>c. Hom\<^sub>O\<^sub>.\<^sub>C\<^bsub>\<alpha>\<^esub>\<CC>(c,-)\<close> [PROOF STATE] proof (state) goal (1 subgoal): 1. L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a : a <\<^sub>C\<^sub>F\<^sub>.\<^sub>c\<^sub>o\<^sub>n\<^sub>e \<TT> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M c \<^sub>O\<Sqinter>\<^sub>C\<^sub>F \<KK> : c \<down>\<^sub>C\<^sub>F \<KK> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> \<AA> [PROOF STEP] let ?H_\<AA> = \<open>\<lambda>a. Hom\<^sub>O\<^sub>.\<^sub>C\<^bsub>\<alpha>\<^esub>\<AA>(a,-)\<close> [PROOF STATE] proof (state) goal (1 subgoal): 1. L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a : a <\<^sub>C\<^sub>F\<^sub>.\<^sub>c\<^sub>o\<^sub>n\<^sub>e \<TT> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M c \<^sub>O\<Sqinter>\<^sub>C\<^sub>F \<KK> : c \<down>\<^sub>C\<^sub>F \<KK> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> \<AA> [PROOF STEP] interpret \<KK>: is_functor \<alpha> \<BB> \<CC> \<KK> [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<KK> : \<BB> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> \<CC> [PROOF STEP] by (rule assms(1)) [PROOF STATE] proof (state) goal (1 subgoal): 1. L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a : a <\<^sub>C\<^sub>F\<^sub>.\<^sub>c\<^sub>o\<^sub>n\<^sub>e \<TT> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M c \<^sub>O\<Sqinter>\<^sub>C\<^sub>F \<KK> : c \<down>\<^sub>C\<^sub>F \<KK> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> \<AA> [PROOF STEP] interpret \<TT>: is_functor \<alpha> \<BB> \<AA> \<TT> [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<TT> : \<BB> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> \<AA> [PROOF STEP] by (rule assms(2)) [PROOF STATE] proof (state) goal (1 subgoal): 1. L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a : a <\<^sub>C\<^sub>F\<^sub>.\<^sub>c\<^sub>o\<^sub>n\<^sub>e \<TT> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M c \<^sub>O\<Sqinter>\<^sub>C\<^sub>F \<KK> : c \<down>\<^sub>C\<^sub>F \<KK> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> \<AA> [PROOF STEP] from assms(3) [PROOF STATE] proof (chain) picking this: c \<in>\<^sub>\<circ> \<CC>\<lparr>Obj\<rparr> [PROOF STEP] interpret c\<KK>: category \<alpha> \<open>c \<down>\<^sub>C\<^sub>F \<KK>\<close> [PROOF STATE] proof (prove) using this: c \<in>\<^sub>\<circ> \<CC>\<lparr>Obj\<rparr> goal (1 subgoal): 1. category \<alpha> (c \<down>\<^sub>C\<^sub>F \<KK>) [PROOF STEP] by (cs_concl cs_shallow cs_intro: cat_comma_cs_intros) [PROOF STATE] proof (state) goal (1 subgoal): 1. L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a : a <\<^sub>C\<^sub>F\<^sub>.\<^sub>c\<^sub>o\<^sub>n\<^sub>e \<TT> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M c \<^sub>O\<Sqinter>\<^sub>C\<^sub>F \<KK> : c \<down>\<^sub>C\<^sub>F \<KK> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> \<AA> [PROOF STEP] from assms(3) [PROOF STATE] proof (chain) picking this: c \<in>\<^sub>\<circ> \<CC>\<lparr>Obj\<rparr> [PROOF STEP] interpret \<Pi>c: is_functor \<alpha> \<open>c \<down>\<^sub>C\<^sub>F \<KK>\<close> \<BB> \<open>c \<^sub>O\<Sqinter>\<^sub>C\<^sub>F \<KK>\<close> [PROOF STATE] proof (prove) using this: c \<in>\<^sub>\<circ> \<CC>\<lparr>Obj\<rparr> goal (1 subgoal): 1. c \<^sub>O\<Sqinter>\<^sub>C\<^sub>F \<KK> : c \<down>\<^sub>C\<^sub>F \<KK> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> \<BB> [PROOF STEP] by ( cs_concl cs_shallow cs_simp: cat_comma_cs_simps cs_intro: cat_cs_intros cat_comma_cs_intros ) [PROOF STATE] proof (state) goal (1 subgoal): 1. L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a : a <\<^sub>C\<^sub>F\<^sub>.\<^sub>c\<^sub>o\<^sub>n\<^sub>e \<TT> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M c \<^sub>O\<Sqinter>\<^sub>C\<^sub>F \<KK> : c \<down>\<^sub>C\<^sub>F \<KK> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> \<AA> [PROOF STEP] interpret \<upsilon>: is_ntcf \<alpha> \<BB> \<open>cat_Set \<alpha>\<close> \<open>?H_\<CC> c \<circ>\<^sub>C\<^sub>F \<KK>\<close> \<open>?H_\<AA> a \<circ>\<^sub>C\<^sub>F \<TT>\<close> \<upsilon> [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<upsilon> : Hom\<^sub>O\<^sub>.\<^sub>C\<^bsub>\<alpha>\<^esub>\<CC>(c,-) \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<KK> \<mapsto>\<^sub>C\<^sub>F Hom\<^sub>O\<^sub>.\<^sub>C\<^bsub>\<alpha>\<^esub>\<AA>(a,-) \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<TT> : \<BB> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> cat_Set \<alpha> [PROOF STEP] by (rule \<upsilon>) [PROOF STATE] proof (state) goal (1 subgoal): 1. L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a : a <\<^sub>C\<^sub>F\<^sub>.\<^sub>c\<^sub>o\<^sub>n\<^sub>e \<TT> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M c \<^sub>O\<Sqinter>\<^sub>C\<^sub>F \<KK> : c \<down>\<^sub>C\<^sub>F \<KK> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> \<AA> [PROOF STEP] show ?thesis [PROOF STATE] proof (prove) goal (1 subgoal): 1. L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a : a <\<^sub>C\<^sub>F\<^sub>.\<^sub>c\<^sub>o\<^sub>n\<^sub>e \<TT> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M c \<^sub>O\<Sqinter>\<^sub>C\<^sub>F \<KK> : c \<down>\<^sub>C\<^sub>F \<KK> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> \<AA> [PROOF STEP] proof(intro is_cat_coneI is_ntcfI') [PROOF STATE] proof (state) goal (14 subgoals): 1. \<Z> \<alpha> 2. vfsequence (L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a) 3. vcard (L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a) = 5\<^sub>\<nat> 4. dghm_const (c \<down>\<^sub>C\<^sub>F \<KK>) \<AA> a (\<AA>\<lparr>CId\<rparr>\<lparr>a\<rparr>) : c \<down>\<^sub>C\<^sub>F \<KK> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> \<AA> 5. \<TT> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M c \<^sub>O\<Sqinter>\<^sub>C\<^sub>F \<KK> : c \<down>\<^sub>C\<^sub>F \<KK> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> \<AA> 6. L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a\<lparr>NTDom\<rparr> = dghm_const (c \<down>\<^sub>C\<^sub>F \<KK>) \<AA> a (\<AA>\<lparr>CId\<rparr>\<lparr>a\<rparr>) 7. L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a\<lparr>NTCod\<rparr> = \<TT> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M c \<^sub>O\<Sqinter>\<^sub>C\<^sub>F \<KK> 8. L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a\<lparr>NTDGDom\<rparr> = c \<down>\<^sub>C\<^sub>F \<KK> 9. L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a\<lparr>NTDGCod\<rparr> = \<AA> 10. vsv (L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a\<lparr>NTMap\<rparr>) A total of 14 subgoals... [PROOF STEP] show "vfsequence (L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. vfsequence (L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a) [PROOF STEP] unfolding L_10_5_\<tau>_def [PROOF STATE] proof (prove) goal (1 subgoal): 1. vfsequence [\<lambda>bf\<in>\<^sub>\<circ>c \<down>\<^sub>C\<^sub>F \<KK>\<lparr>Obj\<rparr>. \<upsilon>'\<lparr>NTMap\<rparr>\<lparr>bf\<lparr>1\<^sub>\<nat>\<rparr>\<rparr>\<lparr>ArrVal\<rparr>\<lparr>bf\<lparr>2\<^sub>\<nat>\<rparr>\<rparr>, dghm_const (c \<down>\<^sub>C\<^sub>F \<KK>) (\<TT>\<lparr>HomCod\<rparr>) a (\<TT>\<lparr>HomCod\<rparr>\<lparr>CId\<rparr>\<lparr>a\<rparr>), \<TT> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M c \<^sub>O\<Sqinter>\<^sub>C\<^sub>F \<KK>, c \<down>\<^sub>C\<^sub>F \<KK>, \<TT>\<lparr>HomCod\<rparr>]\<^sub>\<circ> [PROOF STEP] by simp [PROOF STATE] proof (state) this: vfsequence (L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a) goal (13 subgoals): 1. \<Z> \<alpha> 2. vcard (L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a) = 5\<^sub>\<nat> 3. dghm_const (c \<down>\<^sub>C\<^sub>F \<KK>) \<AA> a (\<AA>\<lparr>CId\<rparr>\<lparr>a\<rparr>) : c \<down>\<^sub>C\<^sub>F \<KK> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> \<AA> 4. \<TT> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M c \<^sub>O\<Sqinter>\<^sub>C\<^sub>F \<KK> : c \<down>\<^sub>C\<^sub>F \<KK> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> \<AA> 5. L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a\<lparr>NTDom\<rparr> = dghm_const (c \<down>\<^sub>C\<^sub>F \<KK>) \<AA> a (\<AA>\<lparr>CId\<rparr>\<lparr>a\<rparr>) 6. L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a\<lparr>NTCod\<rparr> = \<TT> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M c \<^sub>O\<Sqinter>\<^sub>C\<^sub>F \<KK> 7. L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a\<lparr>NTDGDom\<rparr> = c \<down>\<^sub>C\<^sub>F \<KK> 8. L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a\<lparr>NTDGCod\<rparr> = \<AA> 9. vsv (L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a\<lparr>NTMap\<rparr>) 10. \<D>\<^sub>\<circ> (L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a\<lparr>NTMap\<rparr>) = c \<down>\<^sub>C\<^sub>F \<KK>\<lparr>Obj\<rparr> A total of 13 subgoals... [PROOF STEP] show "vcard (L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a) = 5\<^sub>\<nat>" [PROOF STATE] proof (prove) goal (1 subgoal): 1. vcard (L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a) = 5\<^sub>\<nat> [PROOF STEP] unfolding L_10_5_\<tau>_def [PROOF STATE] proof (prove) goal (1 subgoal): 1. vcard [\<lambda>bf\<in>\<^sub>\<circ>c \<down>\<^sub>C\<^sub>F \<KK>\<lparr>Obj\<rparr>. \<upsilon>'\<lparr>NTMap\<rparr>\<lparr>bf\<lparr>1\<^sub>\<nat>\<rparr>\<rparr>\<lparr>ArrVal\<rparr>\<lparr>bf\<lparr>2\<^sub>\<nat>\<rparr>\<rparr>, dghm_const (c \<down>\<^sub>C\<^sub>F \<KK>) (\<TT>\<lparr>HomCod\<rparr>) a (\<TT>\<lparr>HomCod\<rparr>\<lparr>CId\<rparr>\<lparr>a\<rparr>), \<TT> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M c \<^sub>O\<Sqinter>\<^sub>C\<^sub>F \<KK>, c \<down>\<^sub>C\<^sub>F \<KK>, \<TT>\<lparr>HomCod\<rparr>]\<^sub>\<circ> = 5\<^sub>\<nat> [PROOF STEP] by (simp add: nat_omega_simps) [PROOF STATE] proof (state) this: vcard (L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a) = 5\<^sub>\<nat> goal (12 subgoals): 1. \<Z> \<alpha> 2. dghm_const (c \<down>\<^sub>C\<^sub>F \<KK>) \<AA> a (\<AA>\<lparr>CId\<rparr>\<lparr>a\<rparr>) : c \<down>\<^sub>C\<^sub>F \<KK> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> \<AA> 3. \<TT> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M c \<^sub>O\<Sqinter>\<^sub>C\<^sub>F \<KK> : c \<down>\<^sub>C\<^sub>F \<KK> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> \<AA> 4. L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a\<lparr>NTDom\<rparr> = dghm_const (c \<down>\<^sub>C\<^sub>F \<KK>) \<AA> a (\<AA>\<lparr>CId\<rparr>\<lparr>a\<rparr>) 5. L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a\<lparr>NTCod\<rparr> = \<TT> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M c \<^sub>O\<Sqinter>\<^sub>C\<^sub>F \<KK> 6. L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a\<lparr>NTDGDom\<rparr> = c \<down>\<^sub>C\<^sub>F \<KK> 7. L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a\<lparr>NTDGCod\<rparr> = \<AA> 8. vsv (L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a\<lparr>NTMap\<rparr>) 9. \<D>\<^sub>\<circ> (L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a\<lparr>NTMap\<rparr>) = c \<down>\<^sub>C\<^sub>F \<KK>\<lparr>Obj\<rparr> 10. \<And>aa. aa \<in>\<^sub>\<circ> c \<down>\<^sub>C\<^sub>F \<KK>\<lparr>Obj\<rparr> \<Longrightarrow> L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a\<lparr>NTMap\<rparr>\<lparr>aa\<rparr> : dghm_const (c \<down>\<^sub>C\<^sub>F \<KK>) \<AA> a (\<AA>\<lparr>CId\<rparr>\<lparr>a\<rparr>)\<lparr>ObjMap\<rparr>\<lparr>aa\<rparr> \<mapsto>\<^bsub>\<AA>\<^esub> (\<TT> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M c \<^sub>O\<Sqinter>\<^sub>C\<^sub>F \<KK>)\<lparr>ObjMap\<rparr>\<lparr>aa\<rparr> A total of 12 subgoals... [PROOF STEP] from a [PROOF STATE] proof (chain) picking this: a \<in>\<^sub>\<circ> \<AA>\<lparr>Obj\<rparr> [PROOF STEP] interpret cf_const: is_functor \<alpha> \<open>c \<down>\<^sub>C\<^sub>F \<KK>\<close> \<AA> \<open>cf_const (c \<down>\<^sub>C\<^sub>F \<KK>) \<AA> a\<close> [PROOF STATE] proof (prove) using this: a \<in>\<^sub>\<circ> \<AA>\<lparr>Obj\<rparr> goal (1 subgoal): 1. dghm_const (c \<down>\<^sub>C\<^sub>F \<KK>) \<AA> a (\<AA>\<lparr>CId\<rparr>\<lparr>a\<rparr>) : c \<down>\<^sub>C\<^sub>F \<KK> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> \<AA> [PROOF STEP] by (cs_concl cs_intro: cat_cs_intros) [PROOF STATE] proof (state) goal (12 subgoals): 1. \<Z> \<alpha> 2. dghm_const (c \<down>\<^sub>C\<^sub>F \<KK>) \<AA> a (\<AA>\<lparr>CId\<rparr>\<lparr>a\<rparr>) : c \<down>\<^sub>C\<^sub>F \<KK> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> \<AA> 3. \<TT> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M c \<^sub>O\<Sqinter>\<^sub>C\<^sub>F \<KK> : c \<down>\<^sub>C\<^sub>F \<KK> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> \<AA> 4. L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a\<lparr>NTDom\<rparr> = dghm_const (c \<down>\<^sub>C\<^sub>F \<KK>) \<AA> a (\<AA>\<lparr>CId\<rparr>\<lparr>a\<rparr>) 5. L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a\<lparr>NTCod\<rparr> = \<TT> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M c \<^sub>O\<Sqinter>\<^sub>C\<^sub>F \<KK> 6. L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a\<lparr>NTDGDom\<rparr> = c \<down>\<^sub>C\<^sub>F \<KK> 7. L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a\<lparr>NTDGCod\<rparr> = \<AA> 8. vsv (L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a\<lparr>NTMap\<rparr>) 9. \<D>\<^sub>\<circ> (L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a\<lparr>NTMap\<rparr>) = c \<down>\<^sub>C\<^sub>F \<KK>\<lparr>Obj\<rparr> 10. \<And>aa. aa \<in>\<^sub>\<circ> c \<down>\<^sub>C\<^sub>F \<KK>\<lparr>Obj\<rparr> \<Longrightarrow> L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a\<lparr>NTMap\<rparr>\<lparr>aa\<rparr> : dghm_const (c \<down>\<^sub>C\<^sub>F \<KK>) \<AA> a (\<AA>\<lparr>CId\<rparr>\<lparr>a\<rparr>)\<lparr>ObjMap\<rparr>\<lparr>aa\<rparr> \<mapsto>\<^bsub>\<AA>\<^esub> (\<TT> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M c \<^sub>O\<Sqinter>\<^sub>C\<^sub>F \<KK>)\<lparr>ObjMap\<rparr>\<lparr>aa\<rparr> A total of 12 subgoals... [PROOF STEP] show "L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a\<lparr>NTMap\<rparr>\<lparr>bf\<rparr> : cf_const (c \<down>\<^sub>C\<^sub>F \<KK>) \<AA> a\<lparr>ObjMap\<rparr>\<lparr>bf\<rparr> \<mapsto>\<^bsub>\<AA>\<^esub> (\<TT> \<circ>\<^sub>C\<^sub>F c \<^sub>O\<Sqinter>\<^sub>C\<^sub>F \<KK>)\<lparr>ObjMap\<rparr>\<lparr>bf\<rparr>" if "bf \<in>\<^sub>\<circ> c \<down>\<^sub>C\<^sub>F \<KK>\<lparr>Obj\<rparr>" for bf [PROOF STATE] proof (prove) goal (1 subgoal): 1. L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a\<lparr>NTMap\<rparr>\<lparr>bf\<rparr> : dghm_const (c \<down>\<^sub>C\<^sub>F \<KK>) \<AA> a (\<AA>\<lparr>CId\<rparr>\<lparr>a\<rparr>)\<lparr>ObjMap\<rparr>\<lparr>bf\<rparr> \<mapsto>\<^bsub>\<AA>\<^esub> (\<TT> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M c \<^sub>O\<Sqinter>\<^sub>C\<^sub>F \<KK>)\<lparr>ObjMap\<rparr>\<lparr>bf\<rparr> [PROOF STEP] proof- [PROOF STATE] proof (state) goal (1 subgoal): 1. L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a\<lparr>NTMap\<rparr>\<lparr>bf\<rparr> : dghm_const (c \<down>\<^sub>C\<^sub>F \<KK>) \<AA> a (\<AA>\<lparr>CId\<rparr>\<lparr>a\<rparr>)\<lparr>ObjMap\<rparr>\<lparr>bf\<rparr> \<mapsto>\<^bsub>\<AA>\<^esub> (\<TT> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M c \<^sub>O\<Sqinter>\<^sub>C\<^sub>F \<KK>)\<lparr>ObjMap\<rparr>\<lparr>bf\<rparr> [PROOF STEP] from that assms(3) [PROOF STATE] proof (chain) picking this: bf \<in>\<^sub>\<circ> c \<down>\<^sub>C\<^sub>F \<KK>\<lparr>Obj\<rparr> c \<in>\<^sub>\<circ> \<CC>\<lparr>Obj\<rparr> [PROOF STEP] obtain b f where bf_def: "bf = [0, b, f]\<^sub>\<circ>" and b: "b \<in>\<^sub>\<circ> \<BB>\<lparr>Obj\<rparr>" and f: "f : c \<mapsto>\<^bsub>\<CC>\<^esub> \<KK>\<lparr>ObjMap\<rparr>\<lparr>b\<rparr>" [PROOF STATE] proof (prove) using this: bf \<in>\<^sub>\<circ> c \<down>\<^sub>C\<^sub>F \<KK>\<lparr>Obj\<rparr> c \<in>\<^sub>\<circ> \<CC>\<lparr>Obj\<rparr> goal (1 subgoal): 1. (\<And>b f. \<lbrakk>bf = [[]\<^sub>\<circ>, b, f]\<^sub>\<circ>; b \<in>\<^sub>\<circ> \<BB>\<lparr>Obj\<rparr>; f : c \<mapsto>\<^bsub>\<CC>\<^esub> \<KK>\<lparr>ObjMap\<rparr>\<lparr>b\<rparr>\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis [PROOF STEP] by auto [PROOF STATE] proof (state) this: bf = [[]\<^sub>\<circ>, b, f]\<^sub>\<circ> b \<in>\<^sub>\<circ> \<BB>\<lparr>Obj\<rparr> f : c \<mapsto>\<^bsub>\<CC>\<^esub> \<KK>\<lparr>ObjMap\<rparr>\<lparr>b\<rparr> goal (1 subgoal): 1. L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a\<lparr>NTMap\<rparr>\<lparr>bf\<rparr> : dghm_const (c \<down>\<^sub>C\<^sub>F \<KK>) \<AA> a (\<AA>\<lparr>CId\<rparr>\<lparr>a\<rparr>)\<lparr>ObjMap\<rparr>\<lparr>bf\<rparr> \<mapsto>\<^bsub>\<AA>\<^esub> (\<TT> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M c \<^sub>O\<Sqinter>\<^sub>C\<^sub>F \<KK>)\<lparr>ObjMap\<rparr>\<lparr>bf\<rparr> [PROOF STEP] from \<upsilon>.ntcf_NTMap_is_arr[OF b] a b assms(3) f [PROOF STATE] proof (chain) picking this: \<upsilon>\<lparr>NTMap\<rparr>\<lparr>b\<rparr> : (Hom\<^sub>O\<^sub>.\<^sub>C\<^bsub>\<alpha>\<^esub>\<CC>(c,-) \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<KK>)\<lparr>ObjMap\<rparr>\<lparr>b\<rparr> \<mapsto>\<^bsub>cat_Set \<alpha>\<^esub> (Hom\<^sub>O\<^sub>.\<^sub>C\<^bsub>\<alpha>\<^esub>\<AA>(a,-) \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<TT>)\<lparr>ObjMap\<rparr>\<lparr>b\<rparr> a \<in>\<^sub>\<circ> \<AA>\<lparr>Obj\<rparr> b \<in>\<^sub>\<circ> \<BB>\<lparr>Obj\<rparr> c \<in>\<^sub>\<circ> \<CC>\<lparr>Obj\<rparr> f : c \<mapsto>\<^bsub>\<CC>\<^esub> \<KK>\<lparr>ObjMap\<rparr>\<lparr>b\<rparr> [PROOF STEP] have "\<upsilon>\<lparr>NTMap\<rparr>\<lparr>b\<rparr> : Hom \<CC> c (\<KK>\<lparr>ObjMap\<rparr>\<lparr>b\<rparr>) \<mapsto>\<^bsub>cat_Set \<alpha>\<^esub> Hom \<AA> a (\<TT>\<lparr>ObjMap\<rparr>\<lparr>b\<rparr>)" [PROOF STATE] proof (prove) using this: \<upsilon>\<lparr>NTMap\<rparr>\<lparr>b\<rparr> : (Hom\<^sub>O\<^sub>.\<^sub>C\<^bsub>\<alpha>\<^esub>\<CC>(c,-) \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<KK>)\<lparr>ObjMap\<rparr>\<lparr>b\<rparr> \<mapsto>\<^bsub>cat_Set \<alpha>\<^esub> (Hom\<^sub>O\<^sub>.\<^sub>C\<^bsub>\<alpha>\<^esub>\<AA>(a,-) \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<TT>)\<lparr>ObjMap\<rparr>\<lparr>b\<rparr> a \<in>\<^sub>\<circ> \<AA>\<lparr>Obj\<rparr> b \<in>\<^sub>\<circ> \<BB>\<lparr>Obj\<rparr> c \<in>\<^sub>\<circ> \<CC>\<lparr>Obj\<rparr> f : c \<mapsto>\<^bsub>\<CC>\<^esub> \<KK>\<lparr>ObjMap\<rparr>\<lparr>b\<rparr> goal (1 subgoal): 1. \<upsilon>\<lparr>NTMap\<rparr>\<lparr>b\<rparr> : Hom \<CC> c (\<KK>\<lparr>ObjMap\<rparr>\<lparr>b\<rparr>) \<mapsto>\<^bsub>cat_Set \<alpha>\<^esub> Hom \<AA> a (\<TT>\<lparr>ObjMap\<rparr>\<lparr>b\<rparr>) [PROOF STEP] by ( cs_prems cs_shallow cs_simp: cat_cs_simps cat_op_simps cs_intro: cat_cs_intros cat_op_intros ) [PROOF STATE] proof (state) this: \<upsilon>\<lparr>NTMap\<rparr>\<lparr>b\<rparr> : Hom \<CC> c (\<KK>\<lparr>ObjMap\<rparr>\<lparr>b\<rparr>) \<mapsto>\<^bsub>cat_Set \<alpha>\<^esub> Hom \<AA> a (\<TT>\<lparr>ObjMap\<rparr>\<lparr>b\<rparr>) goal (1 subgoal): 1. L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a\<lparr>NTMap\<rparr>\<lparr>bf\<rparr> : dghm_const (c \<down>\<^sub>C\<^sub>F \<KK>) \<AA> a (\<AA>\<lparr>CId\<rparr>\<lparr>a\<rparr>)\<lparr>ObjMap\<rparr>\<lparr>bf\<rparr> \<mapsto>\<^bsub>\<AA>\<^esub> (\<TT> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M c \<^sub>O\<Sqinter>\<^sub>C\<^sub>F \<KK>)\<lparr>ObjMap\<rparr>\<lparr>bf\<rparr> [PROOF STEP] with that b f [PROOF STATE] proof (chain) picking this: bf \<in>\<^sub>\<circ> c \<down>\<^sub>C\<^sub>F \<KK>\<lparr>Obj\<rparr> b \<in>\<^sub>\<circ> \<BB>\<lparr>Obj\<rparr> f : c \<mapsto>\<^bsub>\<CC>\<^esub> \<KK>\<lparr>ObjMap\<rparr>\<lparr>b\<rparr> \<upsilon>\<lparr>NTMap\<rparr>\<lparr>b\<rparr> : Hom \<CC> c (\<KK>\<lparr>ObjMap\<rparr>\<lparr>b\<rparr>) \<mapsto>\<^bsub>cat_Set \<alpha>\<^esub> Hom \<AA> a (\<TT>\<lparr>ObjMap\<rparr>\<lparr>b\<rparr>) [PROOF STEP] show "L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a\<lparr>NTMap\<rparr>\<lparr>bf\<rparr> : cf_const (c \<down>\<^sub>C\<^sub>F \<KK>) \<AA> a\<lparr>ObjMap\<rparr>\<lparr>bf\<rparr> \<mapsto>\<^bsub>\<AA>\<^esub> (\<TT> \<circ>\<^sub>C\<^sub>F c \<^sub>O\<Sqinter>\<^sub>C\<^sub>F \<KK>)\<lparr>ObjMap\<rparr>\<lparr>bf\<rparr>" [PROOF STATE] proof (prove) using this: bf \<in>\<^sub>\<circ> c \<down>\<^sub>C\<^sub>F \<KK>\<lparr>Obj\<rparr> b \<in>\<^sub>\<circ> \<BB>\<lparr>Obj\<rparr> f : c \<mapsto>\<^bsub>\<CC>\<^esub> \<KK>\<lparr>ObjMap\<rparr>\<lparr>b\<rparr> \<upsilon>\<lparr>NTMap\<rparr>\<lparr>b\<rparr> : Hom \<CC> c (\<KK>\<lparr>ObjMap\<rparr>\<lparr>b\<rparr>) \<mapsto>\<^bsub>cat_Set \<alpha>\<^esub> Hom \<AA> a (\<TT>\<lparr>ObjMap\<rparr>\<lparr>b\<rparr>) goal (1 subgoal): 1. L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a\<lparr>NTMap\<rparr>\<lparr>bf\<rparr> : dghm_const (c \<down>\<^sub>C\<^sub>F \<KK>) \<AA> a (\<AA>\<lparr>CId\<rparr>\<lparr>a\<rparr>)\<lparr>ObjMap\<rparr>\<lparr>bf\<rparr> \<mapsto>\<^bsub>\<AA>\<^esub> (\<TT> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M c \<^sub>O\<Sqinter>\<^sub>C\<^sub>F \<KK>)\<lparr>ObjMap\<rparr>\<lparr>bf\<rparr> [PROOF STEP] unfolding bf_def \<upsilon>'_def [PROOF STATE] proof (prove) using this: [[]\<^sub>\<circ>, b, f]\<^sub>\<circ> \<in>\<^sub>\<circ> c \<down>\<^sub>C\<^sub>F \<KK>\<lparr>Obj\<rparr> b \<in>\<^sub>\<circ> \<BB>\<lparr>Obj\<rparr> f : c \<mapsto>\<^bsub>\<CC>\<^esub> \<KK>\<lparr>ObjMap\<rparr>\<lparr>b\<rparr> \<upsilon>\<lparr>NTMap\<rparr>\<lparr>b\<rparr> : Hom \<CC> c (\<KK>\<lparr>ObjMap\<rparr>\<lparr>b\<rparr>) \<mapsto>\<^bsub>cat_Set \<alpha>\<^esub> Hom \<AA> a (\<TT>\<lparr>ObjMap\<rparr>\<lparr>b\<rparr>) goal (1 subgoal): 1. L_10_5_\<tau> \<TT> \<KK> c (ntcf_arrow \<upsilon>) a\<lparr>NTMap\<rparr> \<lparr>[]\<^sub>\<circ>, b, f\<rparr>\<^sub>\<bullet> : dghm_const (c \<down>\<^sub>C\<^sub>F \<KK>) \<AA> a (\<AA>\<lparr>CId\<rparr>\<lparr>a\<rparr>)\<lparr>ObjMap\<rparr> \<lparr>[]\<^sub>\<circ>, b, f\<rparr>\<^sub>\<bullet> \<mapsto>\<^bsub>\<AA>\<^esub> (\<TT> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M c \<^sub>O\<Sqinter>\<^sub>C\<^sub>F \<KK>)\<lparr>ObjMap\<rparr> \<lparr>[]\<^sub>\<circ>, b, f\<rparr>\<^sub>\<bullet> [PROOF STEP] by ( cs_concl cs_simp: cat_cs_simps cat_Kan_cs_simps cat_comma_cs_simps cat_FUNCT_cs_simps cs_intro: cat_cs_intros cat_comma_cs_intros ) [PROOF STATE] proof (state) this: L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a\<lparr>NTMap\<rparr>\<lparr>bf\<rparr> : dghm_const (c \<down>\<^sub>C\<^sub>F \<KK>) \<AA> a (\<AA>\<lparr>CId\<rparr>\<lparr>a\<rparr>)\<lparr>ObjMap\<rparr>\<lparr>bf\<rparr> \<mapsto>\<^bsub>\<AA>\<^esub> (\<TT> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M c \<^sub>O\<Sqinter>\<^sub>C\<^sub>F \<KK>)\<lparr>ObjMap\<rparr>\<lparr>bf\<rparr> goal: No subgoals! [PROOF STEP] qed [PROOF STATE] proof (state) this: ?bf \<in>\<^sub>\<circ> c \<down>\<^sub>C\<^sub>F \<KK>\<lparr>Obj\<rparr> \<Longrightarrow> L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a\<lparr>NTMap\<rparr>\<lparr>?bf\<rparr> : dghm_const (c \<down>\<^sub>C\<^sub>F \<KK>) \<AA> a (\<AA>\<lparr>CId\<rparr>\<lparr>a\<rparr>)\<lparr>ObjMap\<rparr>\<lparr>?bf\<rparr> \<mapsto>\<^bsub>\<AA>\<^esub> (\<TT> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M c \<^sub>O\<Sqinter>\<^sub>C\<^sub>F \<KK>)\<lparr>ObjMap\<rparr>\<lparr>?bf\<rparr> goal (11 subgoals): 1. \<Z> \<alpha> 2. dghm_const (c \<down>\<^sub>C\<^sub>F \<KK>) \<AA> a (\<AA>\<lparr>CId\<rparr>\<lparr>a\<rparr>) : c \<down>\<^sub>C\<^sub>F \<KK> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> \<AA> 3. \<TT> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M c \<^sub>O\<Sqinter>\<^sub>C\<^sub>F \<KK> : c \<down>\<^sub>C\<^sub>F \<KK> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> \<AA> 4. L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a\<lparr>NTDom\<rparr> = dghm_const (c \<down>\<^sub>C\<^sub>F \<KK>) \<AA> a (\<AA>\<lparr>CId\<rparr>\<lparr>a\<rparr>) 5. L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a\<lparr>NTCod\<rparr> = \<TT> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M c \<^sub>O\<Sqinter>\<^sub>C\<^sub>F \<KK> 6. L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a\<lparr>NTDGDom\<rparr> = c \<down>\<^sub>C\<^sub>F \<KK> 7. L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a\<lparr>NTDGCod\<rparr> = \<AA> 8. vsv (L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a\<lparr>NTMap\<rparr>) 9. \<D>\<^sub>\<circ> (L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a\<lparr>NTMap\<rparr>) = c \<down>\<^sub>C\<^sub>F \<KK>\<lparr>Obj\<rparr> 10. \<And>aa b f. f : aa \<mapsto>\<^bsub>c \<down>\<^sub>C\<^sub>F \<KK>\<^esub> b \<Longrightarrow> L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a\<lparr>NTMap\<rparr>\<lparr>b\<rparr> \<circ>\<^sub>A\<^bsub>\<AA>\<^esub> dghm_const (c \<down>\<^sub>C\<^sub>F \<KK>) \<AA> a (\<AA>\<lparr>CId\<rparr>\<lparr>a\<rparr>)\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> = (\<TT> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M c \<^sub>O\<Sqinter>\<^sub>C\<^sub>F \<KK>)\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> \<circ>\<^sub>A\<^bsub>\<AA>\<^esub> L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a\<lparr>NTMap\<rparr>\<lparr>aa\<rparr> A total of 11 subgoals... [PROOF STEP] show "L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a\<lparr>NTMap\<rparr>\<lparr>B\<rparr> \<circ>\<^sub>A\<^bsub>\<AA>\<^esub> cf_const (c \<down>\<^sub>C\<^sub>F \<KK>) \<AA> a\<lparr>ArrMap\<rparr>\<lparr>F\<rparr> = (\<TT> \<circ>\<^sub>C\<^sub>F c \<^sub>O\<Sqinter>\<^sub>C\<^sub>F \<KK>)\<lparr>ArrMap\<rparr>\<lparr>F\<rparr> \<circ>\<^sub>A\<^bsub>\<AA>\<^esub> L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a\<lparr>NTMap\<rparr>\<lparr>A\<rparr>" if "F : A \<mapsto>\<^bsub>c \<down>\<^sub>C\<^sub>F \<KK>\<^esub> B" for A B F [PROOF STATE] proof (prove) goal (1 subgoal): 1. L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a\<lparr>NTMap\<rparr>\<lparr>B\<rparr> \<circ>\<^sub>A\<^bsub>\<AA>\<^esub> dghm_const (c \<down>\<^sub>C\<^sub>F \<KK>) \<AA> a (\<AA>\<lparr>CId\<rparr>\<lparr>a\<rparr>)\<lparr>ArrMap\<rparr>\<lparr>F\<rparr> = (\<TT> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M c \<^sub>O\<Sqinter>\<^sub>C\<^sub>F \<KK>)\<lparr>ArrMap\<rparr>\<lparr>F\<rparr> \<circ>\<^sub>A\<^bsub>\<AA>\<^esub> L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a\<lparr>NTMap\<rparr>\<lparr>A\<rparr> [PROOF STEP] proof- [PROOF STATE] proof (state) goal (1 subgoal): 1. L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a\<lparr>NTMap\<rparr>\<lparr>B\<rparr> \<circ>\<^sub>A\<^bsub>\<AA>\<^esub> dghm_const (c \<down>\<^sub>C\<^sub>F \<KK>) \<AA> a (\<AA>\<lparr>CId\<rparr>\<lparr>a\<rparr>)\<lparr>ArrMap\<rparr>\<lparr>F\<rparr> = (\<TT> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M c \<^sub>O\<Sqinter>\<^sub>C\<^sub>F \<KK>)\<lparr>ArrMap\<rparr>\<lparr>F\<rparr> \<circ>\<^sub>A\<^bsub>\<AA>\<^esub> L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a\<lparr>NTMap\<rparr>\<lparr>A\<rparr> [PROOF STEP] from \<KK>.is_functor_axioms that assms(3) [PROOF STATE] proof (chain) picking this: \<KK> : \<BB> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> \<CC> F : A \<mapsto>\<^bsub>c \<down>\<^sub>C\<^sub>F \<KK>\<^esub> B c \<in>\<^sub>\<circ> \<CC>\<lparr>Obj\<rparr> [PROOF STEP] obtain a' f a'' f' g where F_def: "F = [[0, a', f]\<^sub>\<circ>, [0, a'', f']\<^sub>\<circ>, [0, g]\<^sub>\<circ>]\<^sub>\<circ>" and A_def: "A = [0, a', f]\<^sub>\<circ>" and B_def: "B = [0, a'', f']\<^sub>\<circ>" and g: "g : a' \<mapsto>\<^bsub>\<BB>\<^esub> a''" and f: "f : c \<mapsto>\<^bsub>\<CC>\<^esub> \<KK>\<lparr>ObjMap\<rparr>\<lparr>a'\<rparr>" and f': "f' : c \<mapsto>\<^bsub>\<CC>\<^esub> \<KK>\<lparr>ObjMap\<rparr>\<lparr>a''\<rparr>" and f'_def: "\<KK>\<lparr>ArrMap\<rparr>\<lparr>g\<rparr> \<circ>\<^sub>A\<^bsub>\<CC>\<^esub> f = f'" [PROOF STATE] proof (prove) using this: \<KK> : \<BB> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> \<CC> F : A \<mapsto>\<^bsub>c \<down>\<^sub>C\<^sub>F \<KK>\<^esub> B c \<in>\<^sub>\<circ> \<CC>\<lparr>Obj\<rparr> goal (1 subgoal): 1. (\<And>a' f a'' f' g. \<lbrakk>F = [[[]\<^sub>\<circ>, a', f]\<^sub>\<circ>, [[]\<^sub>\<circ>, a'', f']\<^sub>\<circ>, [[]\<^sub>\<circ>, g]\<^sub>\<circ>]\<^sub>\<circ>; A = [[]\<^sub>\<circ>, a', f]\<^sub>\<circ>; B = [[]\<^sub>\<circ>, a'', f']\<^sub>\<circ>; g : a' \<mapsto>\<^bsub>\<BB>\<^esub> a''; f : c \<mapsto>\<^bsub>\<CC>\<^esub> \<KK>\<lparr>ObjMap\<rparr>\<lparr>a'\<rparr>; f' : c \<mapsto>\<^bsub>\<CC>\<^esub> \<KK>\<lparr>ObjMap\<rparr>\<lparr>a''\<rparr>; \<KK>\<lparr>ArrMap\<rparr>\<lparr>g\<rparr> \<circ>\<^sub>A\<^bsub>\<CC>\<^esub> f = f'\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis [PROOF STEP] by auto [PROOF STATE] proof (state) this: F = [[[]\<^sub>\<circ>, a', f]\<^sub>\<circ>, [[]\<^sub>\<circ>, a'', f']\<^sub>\<circ>, [[]\<^sub>\<circ>, g]\<^sub>\<circ>]\<^sub>\<circ> A = [[]\<^sub>\<circ>, a', f]\<^sub>\<circ> B = [[]\<^sub>\<circ>, a'', f']\<^sub>\<circ> g : a' \<mapsto>\<^bsub>\<BB>\<^esub> a'' f : c \<mapsto>\<^bsub>\<CC>\<^esub> \<KK>\<lparr>ObjMap\<rparr>\<lparr>a'\<rparr> f' : c \<mapsto>\<^bsub>\<CC>\<^esub> \<KK>\<lparr>ObjMap\<rparr>\<lparr>a''\<rparr> \<KK>\<lparr>ArrMap\<rparr>\<lparr>g\<rparr> \<circ>\<^sub>A\<^bsub>\<CC>\<^esub> f = f' goal (1 subgoal): 1. L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a\<lparr>NTMap\<rparr>\<lparr>B\<rparr> \<circ>\<^sub>A\<^bsub>\<AA>\<^esub> dghm_const (c \<down>\<^sub>C\<^sub>F \<KK>) \<AA> a (\<AA>\<lparr>CId\<rparr>\<lparr>a\<rparr>)\<lparr>ArrMap\<rparr>\<lparr>F\<rparr> = (\<TT> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M c \<^sub>O\<Sqinter>\<^sub>C\<^sub>F \<KK>)\<lparr>ArrMap\<rparr>\<lparr>F\<rparr> \<circ>\<^sub>A\<^bsub>\<AA>\<^esub> L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a\<lparr>NTMap\<rparr>\<lparr>A\<rparr> [PROOF STEP] from \<upsilon>.ntcf_Comp_commute[OF g] [PROOF STATE] proof (chain) picking this: \<upsilon>\<lparr>NTMap\<rparr>\<lparr>a''\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> (Hom\<^sub>O\<^sub>.\<^sub>C\<^bsub>\<alpha>\<^esub>\<CC>(c,-) \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<KK>)\<lparr>ArrMap\<rparr>\<lparr>g\<rparr> = (Hom\<^sub>O\<^sub>.\<^sub>C\<^bsub>\<alpha>\<^esub>\<AA>(a,-) \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<TT>)\<lparr>ArrMap\<rparr>\<lparr>g\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> \<upsilon>\<lparr>NTMap\<rparr>\<lparr>a'\<rparr> [PROOF STEP] have "(\<upsilon>\<lparr>NTMap\<rparr>\<lparr>a''\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> (?H_\<CC> c \<circ>\<^sub>C\<^sub>F \<KK>)\<lparr>ArrMap\<rparr>\<lparr>g\<rparr>)\<lparr>ArrVal\<rparr>\<lparr>f\<rparr> = ((?H_\<AA> a \<circ>\<^sub>C\<^sub>F \<TT>)\<lparr>ArrMap\<rparr>\<lparr>g\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> \<upsilon>\<lparr>NTMap\<rparr>\<lparr>a'\<rparr>)\<lparr>ArrVal\<rparr>\<lparr>f\<rparr>" [PROOF STATE] proof (prove) using this: \<upsilon>\<lparr>NTMap\<rparr>\<lparr>a''\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> (Hom\<^sub>O\<^sub>.\<^sub>C\<^bsub>\<alpha>\<^esub>\<CC>(c,-) \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<KK>)\<lparr>ArrMap\<rparr>\<lparr>g\<rparr> = (Hom\<^sub>O\<^sub>.\<^sub>C\<^bsub>\<alpha>\<^esub>\<AA>(a,-) \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<TT>)\<lparr>ArrMap\<rparr>\<lparr>g\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> \<upsilon>\<lparr>NTMap\<rparr>\<lparr>a'\<rparr> goal (1 subgoal): 1. (\<upsilon>\<lparr>NTMap\<rparr>\<lparr>a''\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> (Hom\<^sub>O\<^sub>.\<^sub>C\<^bsub>\<alpha>\<^esub>\<CC>(c,-) \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<KK>)\<lparr>ArrMap\<rparr>\<lparr>g\<rparr>)\<lparr>ArrVal\<rparr>\<lparr>f\<rparr> = ((Hom\<^sub>O\<^sub>.\<^sub>C\<^bsub>\<alpha>\<^esub>\<AA>(a,-) \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<TT>)\<lparr>ArrMap\<rparr>\<lparr>g\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> \<upsilon>\<lparr>NTMap\<rparr>\<lparr>a'\<rparr>)\<lparr>ArrVal\<rparr>\<lparr>f\<rparr> [PROOF STEP] by simp [PROOF STATE] proof (state) this: (\<upsilon>\<lparr>NTMap\<rparr>\<lparr>a''\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> (Hom\<^sub>O\<^sub>.\<^sub>C\<^bsub>\<alpha>\<^esub>\<CC>(c,-) \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<KK>)\<lparr>ArrMap\<rparr>\<lparr>g\<rparr>)\<lparr>ArrVal\<rparr>\<lparr>f\<rparr> = ((Hom\<^sub>O\<^sub>.\<^sub>C\<^bsub>\<alpha>\<^esub>\<AA>(a,-) \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<TT>)\<lparr>ArrMap\<rparr>\<lparr>g\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> \<upsilon>\<lparr>NTMap\<rparr>\<lparr>a'\<rparr>)\<lparr>ArrVal\<rparr>\<lparr>f\<rparr> goal (1 subgoal): 1. L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a\<lparr>NTMap\<rparr>\<lparr>B\<rparr> \<circ>\<^sub>A\<^bsub>\<AA>\<^esub> dghm_const (c \<down>\<^sub>C\<^sub>F \<KK>) \<AA> a (\<AA>\<lparr>CId\<rparr>\<lparr>a\<rparr>)\<lparr>ArrMap\<rparr>\<lparr>F\<rparr> = (\<TT> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M c \<^sub>O\<Sqinter>\<^sub>C\<^sub>F \<KK>)\<lparr>ArrMap\<rparr>\<lparr>F\<rparr> \<circ>\<^sub>A\<^bsub>\<AA>\<^esub> L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a\<lparr>NTMap\<rparr>\<lparr>A\<rparr> [PROOF STEP] from this a g f f' \<KK>.HomCod.category_axioms \<TT>.HomCod.category_axioms [PROOF STATE] proof (chain) picking this: (\<upsilon>\<lparr>NTMap\<rparr>\<lparr>a''\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> (Hom\<^sub>O\<^sub>.\<^sub>C\<^bsub>\<alpha>\<^esub>\<CC>(c,-) \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<KK>)\<lparr>ArrMap\<rparr>\<lparr>g\<rparr>)\<lparr>ArrVal\<rparr>\<lparr>f\<rparr> = ((Hom\<^sub>O\<^sub>.\<^sub>C\<^bsub>\<alpha>\<^esub>\<AA>(a,-) \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<TT>)\<lparr>ArrMap\<rparr>\<lparr>g\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> \<upsilon>\<lparr>NTMap\<rparr>\<lparr>a'\<rparr>)\<lparr>ArrVal\<rparr>\<lparr>f\<rparr> a \<in>\<^sub>\<circ> \<AA>\<lparr>Obj\<rparr> g : a' \<mapsto>\<^bsub>\<BB>\<^esub> a'' f : c \<mapsto>\<^bsub>\<CC>\<^esub> \<KK>\<lparr>ObjMap\<rparr>\<lparr>a'\<rparr> f' : c \<mapsto>\<^bsub>\<CC>\<^esub> \<KK>\<lparr>ObjMap\<rparr>\<lparr>a''\<rparr> category \<alpha> \<CC> category \<alpha> \<AA> [PROOF STEP] have [cat_cs_simps]: "\<upsilon>\<lparr>NTMap\<rparr>\<lparr>a''\<rparr>\<lparr>ArrVal\<rparr>\<lparr>\<KK>\<lparr>ArrMap\<rparr>\<lparr>g\<rparr> \<circ>\<^sub>A\<^bsub>\<CC>\<^esub> f\<rparr> = \<TT>\<lparr>ArrMap\<rparr>\<lparr>g\<rparr> \<circ>\<^sub>A\<^bsub>\<AA>\<^esub> \<upsilon>\<lparr>NTMap\<rparr>\<lparr>a'\<rparr>\<lparr>ArrVal\<rparr>\<lparr>f\<rparr>" [PROOF STATE] proof (prove) using this: (\<upsilon>\<lparr>NTMap\<rparr>\<lparr>a''\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> (Hom\<^sub>O\<^sub>.\<^sub>C\<^bsub>\<alpha>\<^esub>\<CC>(c,-) \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<KK>)\<lparr>ArrMap\<rparr>\<lparr>g\<rparr>)\<lparr>ArrVal\<rparr>\<lparr>f\<rparr> = ((Hom\<^sub>O\<^sub>.\<^sub>C\<^bsub>\<alpha>\<^esub>\<AA>(a,-) \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<TT>)\<lparr>ArrMap\<rparr>\<lparr>g\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> \<upsilon>\<lparr>NTMap\<rparr>\<lparr>a'\<rparr>)\<lparr>ArrVal\<rparr>\<lparr>f\<rparr> a \<in>\<^sub>\<circ> \<AA>\<lparr>Obj\<rparr> g : a' \<mapsto>\<^bsub>\<BB>\<^esub> a'' f : c \<mapsto>\<^bsub>\<CC>\<^esub> \<KK>\<lparr>ObjMap\<rparr>\<lparr>a'\<rparr> f' : c \<mapsto>\<^bsub>\<CC>\<^esub> \<KK>\<lparr>ObjMap\<rparr>\<lparr>a''\<rparr> category \<alpha> \<CC> category \<alpha> \<AA> goal (1 subgoal): 1. \<upsilon>\<lparr>NTMap\<rparr>\<lparr>a''\<rparr>\<lparr>ArrVal\<rparr>\<lparr>\<KK>\<lparr>ArrMap\<rparr>\<lparr>g\<rparr> \<circ>\<^sub>A\<^bsub>\<CC>\<^esub> f\<rparr> = \<TT>\<lparr>ArrMap\<rparr>\<lparr>g\<rparr> \<circ>\<^sub>A\<^bsub>\<AA>\<^esub> \<upsilon>\<lparr>NTMap\<rparr>\<lparr>a'\<rparr>\<lparr>ArrVal\<rparr>\<lparr>f\<rparr> [PROOF STEP] by (*slow*) ( cs_prems cs_simp: cat_cs_simps cat_op_simps cs_intro: cat_cs_intros cat_prod_cs_intros cat_op_intros ) [PROOF STATE] proof (state) this: \<upsilon>\<lparr>NTMap\<rparr>\<lparr>a''\<rparr>\<lparr>ArrVal\<rparr>\<lparr>\<KK>\<lparr>ArrMap\<rparr>\<lparr>g\<rparr> \<circ>\<^sub>A\<^bsub>\<CC>\<^esub> f\<rparr> = \<TT>\<lparr>ArrMap\<rparr>\<lparr>g\<rparr> \<circ>\<^sub>A\<^bsub>\<AA>\<^esub> \<upsilon>\<lparr>NTMap\<rparr>\<lparr>a'\<rparr>\<lparr>ArrVal\<rparr>\<lparr>f\<rparr> goal (1 subgoal): 1. L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a\<lparr>NTMap\<rparr>\<lparr>B\<rparr> \<circ>\<^sub>A\<^bsub>\<AA>\<^esub> dghm_const (c \<down>\<^sub>C\<^sub>F \<KK>) \<AA> a (\<AA>\<lparr>CId\<rparr>\<lparr>a\<rparr>)\<lparr>ArrMap\<rparr>\<lparr>F\<rparr> = (\<TT> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M c \<^sub>O\<Sqinter>\<^sub>C\<^sub>F \<KK>)\<lparr>ArrMap\<rparr>\<lparr>F\<rparr> \<circ>\<^sub>A\<^bsub>\<AA>\<^esub> L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a\<lparr>NTMap\<rparr>\<lparr>A\<rparr> [PROOF STEP] from that a g f f' \<KK>.HomCod.category_axioms \<TT>.HomCod.category_axioms [PROOF STATE] proof (chain) picking this: F : A \<mapsto>\<^bsub>c \<down>\<^sub>C\<^sub>F \<KK>\<^esub> B a \<in>\<^sub>\<circ> \<AA>\<lparr>Obj\<rparr> g : a' \<mapsto>\<^bsub>\<BB>\<^esub> a'' f : c \<mapsto>\<^bsub>\<CC>\<^esub> \<KK>\<lparr>ObjMap\<rparr>\<lparr>a'\<rparr> f' : c \<mapsto>\<^bsub>\<CC>\<^esub> \<KK>\<lparr>ObjMap\<rparr>\<lparr>a''\<rparr> category \<alpha> \<CC> category \<alpha> \<AA> [PROOF STEP] show ?thesis [PROOF STATE] proof (prove) using this: F : A \<mapsto>\<^bsub>c \<down>\<^sub>C\<^sub>F \<KK>\<^esub> B a \<in>\<^sub>\<circ> \<AA>\<lparr>Obj\<rparr> g : a' \<mapsto>\<^bsub>\<BB>\<^esub> a'' f : c \<mapsto>\<^bsub>\<CC>\<^esub> \<KK>\<lparr>ObjMap\<rparr>\<lparr>a'\<rparr> f' : c \<mapsto>\<^bsub>\<CC>\<^esub> \<KK>\<lparr>ObjMap\<rparr>\<lparr>a''\<rparr> category \<alpha> \<CC> category \<alpha> \<AA> goal (1 subgoal): 1. L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a\<lparr>NTMap\<rparr>\<lparr>B\<rparr> \<circ>\<^sub>A\<^bsub>\<AA>\<^esub> dghm_const (c \<down>\<^sub>C\<^sub>F \<KK>) \<AA> a (\<AA>\<lparr>CId\<rparr>\<lparr>a\<rparr>)\<lparr>ArrMap\<rparr>\<lparr>F\<rparr> = (\<TT> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M c \<^sub>O\<Sqinter>\<^sub>C\<^sub>F \<KK>)\<lparr>ArrMap\<rparr>\<lparr>F\<rparr> \<circ>\<^sub>A\<^bsub>\<AA>\<^esub> L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a\<lparr>NTMap\<rparr>\<lparr>A\<rparr> [PROOF STEP] unfolding F_def A_def B_def \<upsilon>'_def [PROOF STATE] proof (prove) using this: [[[]\<^sub>\<circ>, a', f]\<^sub>\<circ>, [[]\<^sub>\<circ>, a'', f']\<^sub>\<circ>, [[]\<^sub>\<circ>, g]\<^sub>\<circ>]\<^sub>\<circ> : [[]\<^sub>\<circ>, a', f]\<^sub>\<circ> \<mapsto>\<^bsub>c \<down>\<^sub>C\<^sub>F \<KK>\<^esub> [[]\<^sub>\<circ>, a'', f']\<^sub>\<circ> a \<in>\<^sub>\<circ> \<AA>\<lparr>Obj\<rparr> g : a' \<mapsto>\<^bsub>\<BB>\<^esub> a'' f : c \<mapsto>\<^bsub>\<CC>\<^esub> \<KK>\<lparr>ObjMap\<rparr>\<lparr>a'\<rparr> f' : c \<mapsto>\<^bsub>\<CC>\<^esub> \<KK>\<lparr>ObjMap\<rparr>\<lparr>a''\<rparr> category \<alpha> \<CC> category \<alpha> \<AA> goal (1 subgoal): 1. L_10_5_\<tau> \<TT> \<KK> c (ntcf_arrow \<upsilon>) a\<lparr>NTMap\<rparr> \<lparr>[]\<^sub>\<circ>, a'', f'\<rparr>\<^sub>\<bullet> \<circ>\<^sub>A\<^bsub>\<AA>\<^esub> dghm_const (c \<down>\<^sub>C\<^sub>F \<KK>) \<AA> a (\<AA>\<lparr>CId\<rparr>\<lparr>a\<rparr>)\<lparr>ArrMap\<rparr> \<lparr>[[]\<^sub>\<circ>, a', f]\<^sub>\<circ>, [[]\<^sub>\<circ>, a'', f']\<^sub>\<circ>, [[]\<^sub>\<circ>, g]\<^sub>\<circ>\<rparr>\<^sub>\<bullet> = (\<TT> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M c \<^sub>O\<Sqinter>\<^sub>C\<^sub>F \<KK>)\<lparr>ArrMap\<rparr> \<lparr>[[]\<^sub>\<circ>, a', f]\<^sub>\<circ>, [[]\<^sub>\<circ>, a'', f']\<^sub>\<circ>, [[]\<^sub>\<circ>, g]\<^sub>\<circ>\<rparr>\<^sub>\<bullet> \<circ>\<^sub>A\<^bsub>\<AA>\<^esub> L_10_5_\<tau> \<TT> \<KK> c (ntcf_arrow \<upsilon>) a\<lparr>NTMap\<rparr> \<lparr>[]\<^sub>\<circ>, a', f\<rparr>\<^sub>\<bullet> [PROOF STEP] (*slow*) [PROOF STATE] proof (prove) using this: [[[]\<^sub>\<circ>, a', f]\<^sub>\<circ>, [[]\<^sub>\<circ>, a'', f']\<^sub>\<circ>, [[]\<^sub>\<circ>, g]\<^sub>\<circ>]\<^sub>\<circ> : [[]\<^sub>\<circ>, a', f]\<^sub>\<circ> \<mapsto>\<^bsub>c \<down>\<^sub>C\<^sub>F \<KK>\<^esub> [[]\<^sub>\<circ>, a'', f']\<^sub>\<circ> a \<in>\<^sub>\<circ> \<AA>\<lparr>Obj\<rparr> g : a' \<mapsto>\<^bsub>\<BB>\<^esub> a'' f : c \<mapsto>\<^bsub>\<CC>\<^esub> \<KK>\<lparr>ObjMap\<rparr>\<lparr>a'\<rparr> f' : c \<mapsto>\<^bsub>\<CC>\<^esub> \<KK>\<lparr>ObjMap\<rparr>\<lparr>a''\<rparr> category \<alpha> \<CC> category \<alpha> \<AA> goal (1 subgoal): 1. L_10_5_\<tau> \<TT> \<KK> c (ntcf_arrow \<upsilon>) a\<lparr>NTMap\<rparr> \<lparr>[]\<^sub>\<circ>, a'', f'\<rparr>\<^sub>\<bullet> \<circ>\<^sub>A\<^bsub>\<AA>\<^esub> dghm_const (c \<down>\<^sub>C\<^sub>F \<KK>) \<AA> a (\<AA>\<lparr>CId\<rparr>\<lparr>a\<rparr>)\<lparr>ArrMap\<rparr> \<lparr>[[]\<^sub>\<circ>, a', f]\<^sub>\<circ>, [[]\<^sub>\<circ>, a'', f']\<^sub>\<circ>, [[]\<^sub>\<circ>, g]\<^sub>\<circ>\<rparr>\<^sub>\<bullet> = (\<TT> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M c \<^sub>O\<Sqinter>\<^sub>C\<^sub>F \<KK>)\<lparr>ArrMap\<rparr> \<lparr>[[]\<^sub>\<circ>, a', f]\<^sub>\<circ>, [[]\<^sub>\<circ>, a'', f']\<^sub>\<circ>, [[]\<^sub>\<circ>, g]\<^sub>\<circ>\<rparr>\<^sub>\<bullet> \<circ>\<^sub>A\<^bsub>\<AA>\<^esub> L_10_5_\<tau> \<TT> \<KK> c (ntcf_arrow \<upsilon>) a\<lparr>NTMap\<rparr> \<lparr>[]\<^sub>\<circ>, a', f\<rparr>\<^sub>\<bullet> [PROOF STEP] by ( cs_concl cs_simp: f'_def[symmetric] cat_cs_simps cat_Kan_cs_simps cat_comma_cs_simps cat_FUNCT_cs_simps cat_op_simps cs_intro: cat_cs_intros cat_op_intros ) [PROOF STATE] proof (state) this: L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a\<lparr>NTMap\<rparr>\<lparr>B\<rparr> \<circ>\<^sub>A\<^bsub>\<AA>\<^esub> dghm_const (c \<down>\<^sub>C\<^sub>F \<KK>) \<AA> a (\<AA>\<lparr>CId\<rparr>\<lparr>a\<rparr>)\<lparr>ArrMap\<rparr>\<lparr>F\<rparr> = (\<TT> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M c \<^sub>O\<Sqinter>\<^sub>C\<^sub>F \<KK>)\<lparr>ArrMap\<rparr>\<lparr>F\<rparr> \<circ>\<^sub>A\<^bsub>\<AA>\<^esub> L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a\<lparr>NTMap\<rparr>\<lparr>A\<rparr> goal: No subgoals! [PROOF STEP] qed [PROOF STATE] proof (state) this: ?F : ?A \<mapsto>\<^bsub>c \<down>\<^sub>C\<^sub>F \<KK>\<^esub> ?B \<Longrightarrow> L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a\<lparr>NTMap\<rparr>\<lparr>?B\<rparr> \<circ>\<^sub>A\<^bsub>\<AA>\<^esub> dghm_const (c \<down>\<^sub>C\<^sub>F \<KK>) \<AA> a (\<AA>\<lparr>CId\<rparr>\<lparr>a\<rparr>)\<lparr>ArrMap\<rparr>\<lparr>?F\<rparr> = (\<TT> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M c \<^sub>O\<Sqinter>\<^sub>C\<^sub>F \<KK>)\<lparr>ArrMap\<rparr>\<lparr>?F\<rparr> \<circ>\<^sub>A\<^bsub>\<AA>\<^esub> L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a\<lparr>NTMap\<rparr>\<lparr>?A\<rparr> goal (10 subgoals): 1. \<Z> \<alpha> 2. dghm_const (c \<down>\<^sub>C\<^sub>F \<KK>) \<AA> a (\<AA>\<lparr>CId\<rparr>\<lparr>a\<rparr>) : c \<down>\<^sub>C\<^sub>F \<KK> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> \<AA> 3. \<TT> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M c \<^sub>O\<Sqinter>\<^sub>C\<^sub>F \<KK> : c \<down>\<^sub>C\<^sub>F \<KK> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> \<AA> 4. L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a\<lparr>NTDom\<rparr> = dghm_const (c \<down>\<^sub>C\<^sub>F \<KK>) \<AA> a (\<AA>\<lparr>CId\<rparr>\<lparr>a\<rparr>) 5. L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a\<lparr>NTCod\<rparr> = \<TT> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M c \<^sub>O\<Sqinter>\<^sub>C\<^sub>F \<KK> 6. L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a\<lparr>NTDGDom\<rparr> = c \<down>\<^sub>C\<^sub>F \<KK> 7. L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a\<lparr>NTDGCod\<rparr> = \<AA> 8. vsv (L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a\<lparr>NTMap\<rparr>) 9. \<D>\<^sub>\<circ> (L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a\<lparr>NTMap\<rparr>) = c \<down>\<^sub>C\<^sub>F \<KK>\<lparr>Obj\<rparr> 10. a \<in>\<^sub>\<circ> \<AA>\<lparr>Obj\<rparr> [PROOF STEP] qed ( use assms in \<open> cs_concl cs_simp: cat_cs_simps cat_Kan_cs_simps cs_intro: cat_cs_intros cat_Kan_cs_intros a \<close> )+ [PROOF STATE] proof (state) this: L_10_5_\<tau> \<TT> \<KK> c \<upsilon>' a : a <\<^sub>C\<^sub>F\<^sub>.\<^sub>c\<^sub>o\<^sub>n\<^sub>e \<TT> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M c \<^sub>O\<Sqinter>\<^sub>C\<^sub>F \<KK> : c \<down>\<^sub>C\<^sub>F \<KK> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> \<AA> goal: No subgoals! [PROOF STEP] qed
[STATEMENT] lemma f_in_found_fds: "ftype_in_fds_f P ctx fds f = ty_opt_bot_opt (Some ty_f) \<longrightarrow> f \<in> case_fd (\<lambda>cl f. f) ` set fds" [PROOF STATE] proof (prove) goal (1 subgoal): 1. ftype_in_fds_f P ctx fds f = ty_opt_bot_opt (Some ty_f) \<longrightarrow> f \<in> case_fd (\<lambda>cl f. f) ` set fds [PROOF STEP] by (induct fds, auto split: fd.splits)
import Mathlib.Tactic.ClearExcept -- Most basic test example (_delete_this : Nat) (dont_delete_this : Int) : Nat := by clear * - dont_delete_this fail_if_success assumption exact dont_delete_this.toNat -- Confirms that clearExcept does not delete class instances example [dont_delete_this : Inhabited Nat] (dont_delete_this2 : Prop) : Inhabited Nat := by clear * - dont_delete_this2 assumption -- Confirms that clearExcept can clear hypotheses even when they have dependencies example (delete_this : Nat) (_delete_this2 : delete_this = delete_this) (dont_delete_this : Int) : Nat := by clear * - dont_delete_this fail_if_success assumption exact dont_delete_this.toNat -- Confirms that clearExcept does not clear hypotheses when they have dependencies that should not be cleared example (dont_delete_this : Nat) (dont_delete_this2 : dont_delete_this = dont_delete_this) : Nat := by clear * - dont_delete_this2 exact dont_delete_this -- Confirms that clearExcept can preserve multiple identifiers example (_delete_this : Nat) (dont_delete_this : Int) (dont_delete_this2 : Int) : Nat := by clear * - dont_delete_this dont_delete_this2 fail_if_success assumption exact dont_delete_this.toNat + dont_delete_this2.toNat
[STATEMENT] lemma hfsynth_hnr_from_hfI: assumes "\<forall>x xi. P x \<and> hfsynth_ID_R (fst R) x \<longrightarrow> hn_refine (emp * hn_ctxt (fst R) x xi) (f$xi) (emp * hn_ctxt (snd R) x xi) S (g$x)" shows "(f,g) \<in> [P]\<^sub>a R \<rightarrow> S" [PROOF STATE] proof (prove) goal (1 subgoal): 1. (f, g) \<in> [P]\<^sub>a R \<rightarrow> S [PROOF STEP] using assms [PROOF STATE] proof (prove) using this: \<forall>x xi. P x \<and> hfsynth_ID_R (fst R) x \<longrightarrow> hn_refine (emp * hn_ctxt (fst R) x xi) (f $ xi) (emp * hn_ctxt (snd R) x xi) S (g $ x) goal (1 subgoal): 1. (f, g) \<in> [P]\<^sub>a R \<rightarrow> S [PROOF STEP] unfolding hfref_def [PROOF STATE] proof (prove) using this: \<forall>x xi. P x \<and> hfsynth_ID_R (fst R) x \<longrightarrow> hn_refine (emp * hn_ctxt (fst R) x xi) (f $ xi) (emp * hn_ctxt (snd R) x xi) S (g $ x) goal (1 subgoal): 1. (f, g) \<in> {(f, g). \<forall>c a. P a \<longrightarrow> hn_refine (fst R a c) (f c) (snd R a c) S (g a)} [PROOF STEP] by (auto simp: hn_ctxt_def)
(** Syntax of the simply typed lambda calculus as a multisorted signature. Written by: Anders Mörtberg, 2017 version for simplified notion of HSS by Ralph Matthes (2022, 2023) the file is identical to the homonymous file in the parent directory, except for importing files from the present directory *) Require Import UniMath.Foundations.PartD. Require Import UniMath.Foundations.Sets. Require Import UniMath.MoreFoundations.Tactics. Require Import UniMath.Combinatorics.Lists. Require Import UniMath.CategoryTheory.Core.Categories. Require Import UniMath.CategoryTheory.Core.Functors. Require Import UniMath.CategoryTheory.FunctorCategory. Require Import UniMath.CategoryTheory.categories.HSET.Core. Require Import UniMath.CategoryTheory.categories.HSET.Colimits. Require Import UniMath.CategoryTheory.categories.HSET.Limits. Require Import UniMath.CategoryTheory.categories.HSET.Slice. Require Import UniMath.CategoryTheory.limits.initial. Require Import UniMath.CategoryTheory.limits.binproducts. Require Import UniMath.CategoryTheory.limits.bincoproducts. Require Import UniMath.CategoryTheory.limits.coproducts. Require Import UniMath.CategoryTheory.FunctorAlgebras. Require Import UniMath.CategoryTheory.Monads.Monads. Require Import UniMath.CategoryTheory.slicecat. Require Import UniMath.SubstitutionSystems.Signatures. Require Import UniMath.SubstitutionSystems.SimplifiedHSS.LiftingInitial_alt. Require Import UniMath.SubstitutionSystems.Notation. Local Open Scope subsys. Require Import UniMath.SubstitutionSystems.MonadsMultiSorted. Require Import UniMath.SubstitutionSystems.MultiSorted. Require Import UniMath.SubstitutionSystems.SimplifiedHSS.MultiSortedMonadConstruction. Local Open Scope cat. (** * The simply typed lambda calculus from a multisorted binding signature *) Section Lam. Variable (sort : hSet) (arr : sort → sort → sort). (** A lot of notations, upstream? *) Local Infix "::" := (@cons _). Local Notation "[]" := (@nil _) (at level 0, format "[]"). Local Notation "C / X" := (slice_cat C X). Local Notation "a + b" := (setcoprod a b) : set. Local Definition HSET_over_sort : category. Proof. exists (HSET / sort). now apply has_homsets_slice_precat. Defined. Let HSET_over_sort2 := [HSET/sort,HSET_over_sort]. Local Lemma BinProducts_HSET_over_sort2 : BinProducts HSET_over_sort2. Proof. apply BinProducts_functor_precat, BinProducts_slice_precat, PullbacksHSET. Defined. Local Lemma Coproducts_HSET_over_sort2 : Coproducts ((sort × sort) + (sort × sort))%set HSET_over_sort2. Proof. apply Coproducts_functor_precat, Coproducts_slice_precat, CoproductsHSET. apply setproperty. Defined. (** The signature of the simply typed lambda calculus *) Definition STLC_Sig : MultiSortedSig sort. Proof. use make_MultiSortedSig. - apply ((sort × sort) + (sort × sort))%set. (* todo: fix this once level of × is fixed *) - intros H; induction H as [st|st]; induction st as [s t]. + exact ((([],,arr s t) :: ([],,s) :: nil),,t). + exact (((cons s [],,t) :: []),,arr s t). Defined. (** The signature with strength for the simply typed lambda calculus *) Definition STLC_Signature : Signature (HSET / sort) _ _:= MultiSortedSigToSignature sort STLC_Sig. Let Id_H := Id_H _ (BinCoproducts_HSET_slice sort). Definition STLC_Functor : functor HSET_over_sort2 HSET_over_sort2 := Id_H STLC_Signature. Lemma STLC_Functor_Initial : Initial (FunctorAlg STLC_Functor). Proof. apply SignatureInitialAlgebraSetSort. apply is_omega_cocont_MultiSortedSigToSignature. apply slice_precat_colims_of_shape, ColimsHSET_of_shape. Defined. Definition STLC_Monad : Monad (HSET / sort) := MultiSortedSigToMonad sort STLC_Sig. (** Extract the constructors of the stlc from the initial algebra *) Definition STLC : HSET_over_sort2 := alg_carrier _ (InitialObject STLC_Functor_Initial). Let STLC_mor : HSET_over_sort2⟦STLC_Functor STLC,STLC⟧ := alg_map _ (InitialObject STLC_Functor_Initial). Let STLC_alg : algebra_ob STLC_Functor := InitialObject STLC_Functor_Initial. Local Lemma BP : BinProducts [HSET_over_sort,HSET]. Proof. apply BinProducts_functor_precat, BinProductsHSET. Defined. Local Notation "'1'" := (functor_identity HSET_over_sort). Local Notation "x ⊗ y" := (BinProductObject _ (BP x y)). (** The variables *) Definition var_map : HSET_over_sort2⟦1,STLC⟧ := BinCoproductIn1 (BinCoproducts_functor_precat _ _ _ _ _) · STLC_mor. (** The source of the application constructor *) Definition app_source (s t : sort) (X : HSET_over_sort2) : HSET_over_sort2 := ((X ∙ proj_functor sort (arr s t)) ⊗ (X ∙ proj_functor sort s)) ∙ hat_functor sort t. (** The application constructor *) Definition app_map (s t : sort) : HSET_over_sort2⟦app_source s t STLC,STLC⟧ := (CoproductIn _ _ (Coproducts_functor_precat _ _ _ _ _) (ii1 (s,, t))) · (BinCoproductIn2 (BinCoproducts_functor_precat _ _ _ _ _)) · STLC_mor. (** The source of the lambda constructor *) Definition lam_source (s t : sort) (X : HSET_over_sort2) : HSET_over_sort2 := (sorted_option_functor sort s ∙ X ∙ proj_functor sort t) ∙ hat_functor sort (arr s t). Definition lam_map (s t : sort) : HSET_over_sort2⟦lam_source s t STLC,STLC⟧ := (CoproductIn _ _ (Coproducts_functor_precat _ _ _ _ _) (ii2 (s,,t))) · BinCoproductIn2 (BinCoproducts_functor_precat _ _ _ _ _) · STLC_mor. Definition make_STLC_Algebra X (fvar : HSET_over_sort2⟦1,X⟧) (fapp : ∏ s t, HSET_over_sort2⟦app_source s t X,X⟧) (flam : ∏ s t, HSET_over_sort2⟦lam_source s t X,X⟧) : algebra_ob STLC_Functor. Proof. apply (tpair _ X). use (BinCoproductArrow _ fvar). use CoproductArrow. intro b; induction b as [st|st]; induction st as [s t]. - apply (fapp s t). - apply (flam s t). Defined. (** The recursor for the stlc *) Definition foldr_map X (fvar : HSET_over_sort2⟦1,X⟧) (fapp : ∏ s t, HSET_over_sort2⟦app_source s t X,X⟧) (flam : ∏ s t, HSET_over_sort2⟦lam_source s t X,X⟧) : algebra_mor _ STLC_alg (make_STLC_Algebra X fvar fapp flam). Proof. apply (InitialArrow STLC_Functor_Initial (make_STLC_Algebra X fvar fapp flam)). Defined. (** The equation for variables *) Lemma foldr_var X (fvar : HSET_over_sort2⟦1,X⟧) (fapp : ∏ s t, HSET_over_sort2⟦app_source s t X,X⟧) (flam : ∏ s t, HSET_over_sort2⟦lam_source s t X,X⟧) : var_map · foldr_map X fvar fapp flam = fvar. Proof. assert (F := maponpaths (λ x, BinCoproductIn1 (BinCoproducts_functor_precat _ _ _ _ _) · x) (algebra_mor_commutes _ _ _ (foldr_map X fvar fapp flam))). rewrite assoc in F. eapply pathscomp0; [apply F|]. rewrite assoc. eapply pathscomp0; [eapply cancel_postcomposition, BinCoproductOfArrowsIn1|]. rewrite <- assoc. eapply pathscomp0; [eapply maponpaths, BinCoproductIn1Commutes|]. apply id_left. Defined. (* TODO: how to define the equations for app and lam? *) End Lam.
{-# OPTIONS --without-K --safe #-} module Categories.Category.Instance.Globe where open import Level using (Level; zero) open import Relation.Binary using (IsEquivalence; module IsEquivalence) open import Relation.Binary.PropositionalEquality using (isEquivalence) open import Data.Nat using (ℕ; zero; suc; _<_; _≤_; z≤n; s≤s) open import Categories.Category data GlobeHom : (m n : ℕ) → Set where I : ∀ {place : ℕ} → GlobeHom place place σ : ∀ {n m : ℕ} → GlobeHom (suc n) m → GlobeHom n m τ : ∀ {n m : ℕ} → GlobeHom (suc n) m → GlobeHom n m data GlobeEq : {m n : ℕ} → GlobeHom m n → GlobeHom m n → Set where both-I : ∀ {m} → GlobeEq {m} {m} I I both-σ : ∀ {m n x y} → GlobeEq {m} {n} (σ x) (σ y) both-τ : ∀ {m n x y} → GlobeEq {m} {n} (τ x) (τ y) GlobeEquiv : ∀ {m n} → IsEquivalence (GlobeEq {m} {n}) GlobeEquiv = record { refl = refl; sym = sym; trans = trans } where refl : ∀ {m n} {x : GlobeHom m n} → GlobeEq x x refl {x = I} = both-I refl {x = σ y} = both-σ refl {x = τ y} = both-τ sym : ∀ {m n} {x y : GlobeHom m n} → GlobeEq x y → GlobeEq y x sym both-I = both-I sym both-σ = both-σ sym both-τ = both-τ trans : ∀ {m n} {x y z : GlobeHom m n} → GlobeEq x y → GlobeEq y z → GlobeEq x z trans both-I y∼z = y∼z trans both-σ both-σ = both-σ trans both-τ both-τ = both-τ infixl 7 _⊚_ _⊚_ : ∀ {l m n} → GlobeHom m n → GlobeHom l m → GlobeHom l n x ⊚ I = x x ⊚ σ y = σ (x ⊚ y) x ⊚ τ y = τ (x ⊚ y) Globe : Category Level.zero Level.zero Level.zero Globe = record { Obj = ℕ ; _⇒_ = GlobeHom ; _≈_ = GlobeEq ; id = I ; _∘_ = _⊚_ ; assoc = λ {_ _ _ _ f g h} → assoc {f = f} {g} {h} ; sym-assoc = λ {_ _ _ _ f g h} → GlobeEquiv.sym (assoc {f = f} {g} {h}) ; identityˡ = identityˡ ; identityʳ = identityʳ ; identity² = identity² ; equiv = GlobeEquiv ; ∘-resp-≈ = ∘-resp-≡ } where module GlobeEquiv {m n} = IsEquivalence (GlobeEquiv {m} {n}) assoc : ∀ {A B C D} {f : GlobeHom A B} {g : GlobeHom B C} {h : GlobeHom C D} → GlobeEq ((h ⊚ g) ⊚ f) (h ⊚ (g ⊚ f)) assoc {f = I} = refl where open IsEquivalence GlobeEquiv assoc {f = σ y} = both-σ assoc {f = τ y} = both-τ identityˡ : ∀ {A B} {f : GlobeHom A B} → GlobeEq (I ⊚ f) f identityˡ {f = I} = both-I identityˡ {f = σ y} = both-σ identityˡ {f = τ y} = both-τ identityʳ : ∀ {A B} {f : GlobeHom A B} → GlobeEq (f ⊚ I) f identityʳ = IsEquivalence.refl GlobeEquiv identity² : {m : ℕ} → GlobeEq {m} (I ⊚ I) I identity² = both-I ∘-resp-≡ : ∀ {A B C} {f h : GlobeHom B C} {g i : GlobeHom A B} → GlobeEq f h → GlobeEq g i → GlobeEq (f ⊚ g) (h ⊚ i) ∘-resp-≡ f∼h both-I = f∼h ∘-resp-≡ f∼h both-σ = both-σ ∘-resp-≡ f∼h both-τ = both-τ
Formal statement is: proposition\<^marker>\<open>tag important\<close> starlike_negligible_bounded_gmeasurable: fixes S :: "'a :: euclidean_space set" assumes S: "S \<in> sets lebesgue" and "bounded S" and eq1: "\<And>c x. \<lbrakk>(c *\<^sub>R x) \<in> S; 0 \<le> c; x \<in> S\<rbrakk> \<Longrightarrow> c = 1" shows "S \<in> null_sets lebesgue" Informal statement is: If $S$ is a bounded set of Lebesgue measure zero such that for all $c \geq 0$ and $x \in S$, if $cx \in S$, then $c = 1$, then $S$ is a null set.
{-# OPTIONS --cubical --no-import-sorts --safe #-} module Cubical.Categories.Equivalence.Properties where open import Cubical.Foundations.Prelude open import Cubical.Data.Sigma open import Cubical.Categories.Category open import Cubical.Categories.Functor open import Cubical.Categories.NaturalTransformation open import Cubical.Categories.Morphism open import Cubical.Categories.Equivalence.Base open import Cubical.HITs.PropositionalTruncation.Base open Precategory open Functor open NatIso open CatIso open NatTrans open isEquivalence private variable ℓC ℓC' ℓD ℓD' : Level -- Equivalence implies Full, Faithul, and Essentially Surjective module _ {C : Precategory ℓC ℓC'} {D : Precategory ℓD ℓD'} where symEquiv : ∀ {F : Functor C D} → (e : isEquivalence F) → isEquivalence (e .invFunc) symEquiv {F} record { invFunc = G ; η = η ; ε = ε } = record { invFunc = F ; η = symNatIso ε ; ε = symNatIso η } isEquiv→Faithful : ∀ {F : Functor C D} → isEquivalence F → isFaithful F isEquiv→Faithful {F} record { invFunc = G ; η = η ; ε = _ } c c' f g p = f ≡⟨ sqRL η ⟩ cIso .mor ⋆⟨ C ⟩ G ⟪ F ⟪ f ⟫ ⟫ ⋆⟨ C ⟩ c'Iso .inv ≡⟨ cong (λ v → cIso .mor ⋆⟨ C ⟩ (G ⟪ v ⟫) ⋆⟨ C ⟩ c'Iso .inv) p ⟩ cIso .mor ⋆⟨ C ⟩ G ⟪ F ⟪ g ⟫ ⟫ ⋆⟨ C ⟩ c'Iso .inv ≡⟨ sym (sqRL η) ⟩ g ∎ where -- isomorphism between c and GFc cIso = isIso→CatIso (η .nIso c) -- isomorphism between c' and GFc' c'Iso = isIso→CatIso (η .nIso c') module _ {C : Precategory ℓC ℓC'} {D : Precategory ℓD ℓD'} where isEquiv→Full : ∀ {F : Functor C D} → isEquivalence F → isFull F isEquiv→Full {F} eq@record { invFunc = G ; η = η ; ε = _ } c c' g = ∣ h , isEquiv→Faithful (symEquiv eq) _ _ _ _ GFh≡Gg ∣ -- apply faithfulness of G where -- isomorphism between c and GFc cIso = isIso→CatIso (η .nIso c) -- isomorphism between c' and GFc' c'Iso = isIso→CatIso (η .nIso c') -- reverses cIso⁻ = symCatIso cIso c'Iso⁻ = symCatIso c'Iso h = cIso .mor ⋆⟨ C ⟩ G ⟪ g ⟫ ⋆⟨ C ⟩ c'Iso .inv -- we show that both `G ⟪ g ⟫` and `G ⟪ F ⟪ h ⟫ ⟫` -- are equal to the same thing -- namely : cIso .inv ⋆⟨ C ⟩ h ⋆⟨ C ⟩ c'Iso .mor Gg≡ηhη : G ⟪ g ⟫ ≡ cIso .inv ⋆⟨ C ⟩ h ⋆⟨ C ⟩ c'Iso .mor Gg≡ηhη = invMoveL cAreInv move-c' ∙ sym (C .⋆Assoc _ _ _) where cAreInv : areInv (cIso .mor) (cIso .inv) cAreInv = CatIso→areInv cIso c'AreInv : areInv (c'Iso .mor) (c'Iso .inv) c'AreInv = CatIso→areInv c'Iso move-c' : cIso .mor ⋆⟨ C ⟩ G ⟪ g ⟫ ≡ h ⋆⟨ C ⟩ c'Iso .mor move-c' = invMoveR (symAreInv c'AreInv) refl GFh≡Gg : G ⟪ F ⟪ h ⟫ ⟫ ≡ G ⟪ g ⟫ GFh≡Gg = G ⟪ F ⟪ h ⟫ ⟫ ≡⟨ sqLR η ⟩ cIso .inv ⋆⟨ C ⟩ h ⋆⟨ C ⟩ c'Iso .mor ≡⟨ sym Gg≡ηhη ⟩ G ⟪ g ⟫ ∎ isEquiv→Surj : ∀ {F : Functor C D} → isEquivalence F → isEssentiallySurj F isEquiv→Surj isE d = (isE .invFunc ⟅ d ⟆) , isIso→CatIso ((isE .ε .nIso) d)
/- Copyright (c) 2022 Yaël Dillies. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Yaël Dillies -/ import order.upper_lower.basic import topology.sets.closeds /-! # Clopen upper sets > THIS FILE IS SYNCHRONIZED WITH MATHLIB4. > Any changes to this file require a corresponding PR to mathlib4. In this file we define the type of clopen upper sets. -/ open set topological_space variables {α β : Type*} [topological_space α] [has_le α] [topological_space β] [has_le β] /-! ### Compact open sets -/ /-- The type of clopen upper sets of a topological space. -/ structure clopen_upper_set (α : Type*) [topological_space α] [has_le α] extends clopens α := (upper' : is_upper_set carrier) namespace clopen_upper_set instance : set_like (clopen_upper_set α) α := { coe := λ s, s.carrier, coe_injective' := λ s t h, by { obtain ⟨⟨_, _⟩, _⟩ := s, obtain ⟨⟨_, _⟩, _⟩ := t, congr' } } lemma upper (s : clopen_upper_set α) : is_upper_set (s : set α) := s.upper' lemma clopen (s : clopen_upper_set α) : is_clopen (s : set α) := s.clopen' /-- Reinterpret a upper clopen as an upper set. -/ @[simps] def to_upper_set (s : clopen_upper_set α) : upper_set α := ⟨s, s.upper⟩ @[ext] protected lemma ext {s t : clopen_upper_set α} (h : (s : set α) = t) : s = t := set_like.ext' h @[simp] lemma coe_mk (s : clopens α) (h) : (mk s h : set α) = s := rfl instance : has_sup (clopen_upper_set α) := ⟨λ s t, ⟨s.to_clopens ⊔ t.to_clopens, s.upper.union t.upper⟩⟩ instance : has_inf (clopen_upper_set α) := ⟨λ s t, ⟨s.to_clopens ⊓ t.to_clopens, s.upper.inter t.upper⟩⟩ instance : has_top (clopen_upper_set α) := ⟨⟨⊤, is_upper_set_univ⟩⟩ instance : has_bot (clopen_upper_set α) := ⟨⟨⊥, is_upper_set_empty⟩⟩ instance : lattice (clopen_upper_set α) := set_like.coe_injective.lattice _ (λ _ _, rfl) (λ _ _, rfl) instance : bounded_order (clopen_upper_set α) := bounded_order.lift (coe : _ → set α) (λ _ _, id) rfl rfl @[simp] lemma coe_sup (s t : clopen_upper_set α) : (↑(s ⊔ t) : set α) = s ∪ t := rfl @[simp] lemma coe_inf (s t : clopen_upper_set α) : (↑(s ⊓ t) : set α) = s ∩ t := rfl @[simp] lemma coe_top : (↑(⊤ : clopen_upper_set α) : set α) = univ := rfl @[simp] instance : inhabited (clopen_upper_set α) := ⟨⊥⟩ end clopen_upper_set
(* Title: HOL/Old_Number_Theory/Factorization.thy Author: Thomas Marthedal Rasmussen Copyright 2000 University of Cambridge *) section {* Fundamental Theorem of Arithmetic (unique factorization into primes) *} theory Factorization imports Primes "~~/src/HOL/Library/Permutation" begin subsection {* Definitions *} definition primel :: "nat list => bool" where "primel xs = (\<forall>p \<in> set xs. prime p)" primrec nondec :: "nat list => bool" where "nondec [] = True" | "nondec (x # xs) = (case xs of [] => True | y # ys => x \<le> y \<and> nondec xs)" primrec prod :: "nat list => nat" where "prod [] = Suc 0" | "prod (x # xs) = x * prod xs" primrec oinsert :: "nat => nat list => nat list" where "oinsert x [] = [x]" | "oinsert x (y # ys) = (if x \<le> y then x # y # ys else y # oinsert x ys)" primrec sort :: "nat list => nat list" where "sort [] = []" | "sort (x # xs) = oinsert x (sort xs)" subsection {* Arithmetic *} lemma one_less_m: "(m::nat) \<noteq> m * k ==> m \<noteq> Suc 0 ==> Suc 0 < m" apply (cases m) apply auto done lemma one_less_k: "(m::nat) \<noteq> m * k ==> Suc 0 < m * k ==> Suc 0 < k" apply (cases k) apply auto done lemma mult_left_cancel: "(0::nat) < k ==> k * n = k * m ==> n = m" apply auto done lemma mn_eq_m_one: "(0::nat) < m ==> m * n = m ==> n = Suc 0" apply (cases n) apply auto done lemma prod_mn_less_k: "(0::nat) < n ==> 0 < k ==> Suc 0 < m ==> m * n = k ==> n < k" apply (induct m) apply auto done subsection {* Prime list and product *} lemma prod_append: "prod (xs @ ys) = prod xs * prod ys" apply (induct xs) apply (simp_all add: mult.assoc) done lemma prod_xy_prod: "prod (x # xs) = prod (y # ys) ==> x * prod xs = y * prod ys" apply auto done lemma primel_append: "primel (xs @ ys) = (primel xs \<and> primel ys)" apply (unfold primel_def) apply auto done lemma prime_primel: "prime n ==> primel [n] \<and> prod [n] = n" apply (unfold primel_def) apply auto done lemma prime_nd_one: "prime p ==> \<not> p dvd Suc 0" apply (unfold prime_def dvd_def) apply auto done lemma hd_dvd_prod: "prod (x # xs) = prod ys ==> x dvd (prod ys)" by (metis dvd_mult_left dvd_refl prod.simps(2)) lemma primel_tl: "primel (x # xs) ==> primel xs" apply (unfold primel_def) apply auto done lemma primel_hd_tl: "(primel (x # xs)) = (prime x \<and> primel xs)" apply (unfold primel_def) apply auto done lemma primes_eq: "prime p ==> prime q ==> p dvd q ==> p = q" apply (unfold prime_def) apply auto done lemma primel_one_empty: "primel xs ==> prod xs = Suc 0 ==> xs = []" apply (cases xs) apply (simp_all add: primel_def prime_def) done lemma prime_g_one: "prime p ==> Suc 0 < p" apply (unfold prime_def) apply auto done lemma prime_g_zero: "prime p ==> 0 < p" apply (unfold prime_def) apply auto done lemma primel_nempty_g_one: "primel xs \<Longrightarrow> xs \<noteq> [] \<Longrightarrow> Suc 0 < prod xs" apply (induct xs) apply simp apply (fastforce simp: primel_def prime_def elim: one_less_mult) done lemma primel_prod_gz: "primel xs ==> 0 < prod xs" apply (induct xs) apply (auto simp: primel_def prime_def) done subsection {* Sorting *} lemma nondec_oinsert: "nondec xs \<Longrightarrow> nondec (oinsert x xs)" apply (induct xs) apply simp apply (case_tac xs) apply (simp_all cong del: list.case_cong_weak) done lemma nondec_sort: "nondec (sort xs)" apply (induct xs) apply simp_all apply (erule nondec_oinsert) done lemma x_less_y_oinsert: "x \<le> y ==> l = y # ys ==> x # l = oinsert x l" apply simp_all done lemma nondec_sort_eq [rule_format]: "nondec xs \<longrightarrow> xs = sort xs" apply (induct xs) apply safe apply simp_all apply (case_tac xs) apply simp_all apply (case_tac xs) apply simp apply (rule_tac y = aa and ys = list in x_less_y_oinsert) apply simp_all done lemma oinsert_x_y: "oinsert x (oinsert y l) = oinsert y (oinsert x l)" apply (induct l) apply auto done subsection {* Permutation *} lemma perm_primel [rule_format]: "xs <~~> ys ==> primel xs --> primel ys" apply (unfold primel_def) apply (induct set: perm) apply simp apply simp apply (simp (no_asm)) apply blast apply blast done lemma perm_prod: "xs <~~> ys ==> prod xs = prod ys" apply (induct set: perm) apply (simp_all add: ac_simps) done lemma perm_subst_oinsert: "xs <~~> ys ==> oinsert a xs <~~> oinsert a ys" apply (induct set: perm) apply auto done lemma perm_oinsert: "x # xs <~~> oinsert x xs" apply (induct xs) apply auto done lemma perm_sort: "xs <~~> sort xs" apply (induct xs) apply (auto intro: perm_oinsert elim: perm_subst_oinsert) done lemma perm_sort_eq: "xs <~~> ys ==> sort xs = sort ys" apply (induct set: perm) apply (simp_all add: oinsert_x_y) done subsection {* Existence *} lemma ex_nondec_lemma: "primel xs ==> \<exists>ys. primel ys \<and> nondec ys \<and> prod ys = prod xs" apply (blast intro: nondec_sort perm_prod perm_primel perm_sort perm_sym) done lemma not_prime_ex_mk: "Suc 0 < n \<and> \<not> prime n ==> \<exists>m k. Suc 0 < m \<and> Suc 0 < k \<and> m < n \<and> k < n \<and> n = m * k" apply (unfold prime_def dvd_def) apply (auto intro: n_less_m_mult_n n_less_n_mult_m one_less_m one_less_k) done lemma split_primel: "primel xs \<Longrightarrow> primel ys \<Longrightarrow> \<exists>l. primel l \<and> prod l = prod xs * prod ys" apply (rule exI) apply safe apply (rule_tac [2] prod_append) apply (simp add: primel_append) done lemma factor_exists [rule_format]: "Suc 0 < n --> (\<exists>l. primel l \<and> prod l = n)" apply (induct n rule: nat_less_induct) apply (rule impI) apply (case_tac "prime n") apply (rule exI) apply (erule prime_primel) apply (cut_tac n = n in not_prime_ex_mk) apply (auto intro!: split_primel) done lemma nondec_factor_exists: "Suc 0 < n ==> \<exists>l. primel l \<and> nondec l \<and> prod l = n" apply (erule factor_exists [THEN exE]) apply (blast intro!: ex_nondec_lemma) done subsection {* Uniqueness *} lemma prime_dvd_mult_list [rule_format]: "prime p ==> p dvd (prod xs) --> (\<exists>m. m:set xs \<and> p dvd m)" apply (induct xs) apply (force simp add: prime_def) apply (force dest: prime_dvd_mult) done lemma hd_xs_dvd_prod: "primel (x # xs) ==> primel ys ==> prod (x # xs) = prod ys ==> \<exists>m. m \<in> set ys \<and> x dvd m" apply (rule prime_dvd_mult_list) apply (simp add: primel_hd_tl) apply (erule hd_dvd_prod) done lemma prime_dvd_eq: "primel (x # xs) ==> primel ys ==> m \<in> set ys ==> x dvd m ==> x = m" apply (rule primes_eq) apply (auto simp add: primel_def primel_hd_tl) done lemma hd_xs_eq_prod: "primel (x # xs) ==> primel ys ==> prod (x # xs) = prod ys ==> x \<in> set ys" apply (frule hd_xs_dvd_prod) apply auto apply (drule prime_dvd_eq) apply auto done lemma perm_primel_ex: "primel (x # xs) ==> primel ys ==> prod (x # xs) = prod ys ==> \<exists>l. ys <~~> (x # l)" apply (rule exI) apply (rule perm_remove) apply (erule hd_xs_eq_prod) apply simp_all done lemma primel_prod_less: "primel (x # xs) ==> primel ys ==> prod (x # xs) = prod ys ==> prod xs < prod ys" by (metis less_asym linorder_neqE_nat mult_less_cancel2 nat_0_less_mult_iff nat_less_le nat_mult_1 prime_def primel_hd_tl primel_prod_gz prod.simps(2)) lemma prod_one_empty: "primel xs ==> p * prod xs = p ==> prime p ==> xs = []" apply (auto intro: primel_one_empty simp add: prime_def) done lemma uniq_ex_aux: "\<forall>m. m < prod ys --> (\<forall>xs ys. primel xs \<and> primel ys \<and> prod xs = prod ys \<and> prod xs = m --> xs <~~> ys) ==> primel list ==> primel x ==> prod list = prod x ==> prod x < prod ys ==> x <~~> list" apply simp done lemma factor_unique [rule_format]: "\<forall>xs ys. primel xs \<and> primel ys \<and> prod xs = prod ys \<and> prod xs = n --> xs <~~> ys" apply (induct n rule: nat_less_induct) apply safe apply (case_tac xs) apply (force intro: primel_one_empty) apply (rule perm_primel_ex [THEN exE]) apply simp_all apply (rule perm.trans [THEN perm_sym]) apply assumption apply (rule perm.Cons) apply (case_tac "x = []") apply (metis perm_prod perm_refl prime_primel primel_hd_tl primel_tl prod_one_empty) apply (metis nat_0_less_mult_iff nat_mult_eq_cancel1 perm_primel perm_prod primel_prod_gz primel_prod_less primel_tl prod.simps(2)) done lemma perm_nondec_unique: "xs <~~> ys ==> nondec xs ==> nondec ys ==> xs = ys" by (metis nondec_sort_eq perm_sort_eq) theorem unique_prime_factorization [rule_format]: "\<forall>n. Suc 0 < n --> (\<exists>!l. primel l \<and> nondec l \<and> prod l = n)" by (metis factor_unique nondec_factor_exists perm_nondec_unique) end
theory sort_HSortPermutes imports Main "$HIPSTER_HOME/IsaHipster" begin datatype 'a list = Nil2 | Cons2 "'a" "'a list" datatype Nat = Z | S "Nat" datatype 'a Heap = Node "'a Heap" "'a" "'a Heap" | Nil2 fun toHeap2 :: "int list => (int Heap) list" where "toHeap2 (Nil2) = Nil2" | "toHeap2 (Cons2 y z) = Cons2 (Node (Nil2) y (Nil2)) (toHeap2 z)" fun hmerge :: "int Heap => int Heap => int Heap" where "hmerge (Node z x2 x3) (Node x4 x5 x6) = (if x2 <= x5 then Node (hmerge x3 (Node x4 x5 x6)) x2 z else Node (hmerge (Node z x2 x3) x6) x5 x4)" | "hmerge (Node z x2 x3) (Nil2) = Node z x2 x3" | "hmerge (Nil2) y = y" fun hpairwise :: "(int Heap) list => (int Heap) list" where "hpairwise (Nil2) = Nil2" | "hpairwise (Cons2 q (Nil2)) = Cons2 q (Nil2)" | "hpairwise (Cons2 q (Cons2 q2 qs)) = Cons2 (hmerge q q2) (hpairwise qs)" fun hmerging :: "(int Heap) list => int Heap" where "hmerging (Nil2) = Nil2" | "hmerging (Cons2 q (Nil2)) = q" | "hmerging (Cons2 q (Cons2 z x2)) = hmerging (hpairwise (Cons2 q (Cons2 z x2)))" fun toHeap :: "int list => int Heap" where "toHeap x = hmerging (toHeap2 x)" fun toList :: "int Heap => int list" where "toList (Node q y q2) = Cons2 y (toList (hmerge q q2))" | "toList (Nil2) = Nil2" fun dot :: "('b => 'c) => ('a => 'b) => 'a => 'c" where "dot x y z = x (y z)" fun hsort :: "int list => int list" where "hsort x = dot (% (y :: int Heap) => toList y) (% (z :: int list) => toHeap z) x" fun count :: "int => int list => Nat" where "count x (Nil2) = Z" | "count x (Cons2 z xs) = (if x = z then S (count x xs) else count x xs)" (*hipster toHeap2 hmerge hpairwise hmerging toHeap toList dot hsort count *) theorem x0 : "!! (x :: int) (y :: int list) . (count x (hsort y)) = (count x y)" by (tactic \<open>Subgoal.FOCUS_PARAMS (K (Tactic_Data.hard_tac @{context})) @{context} 1\<close>) end
\chapter*{Abstract} Human-robot interaction (HRI) has been a topic of both science fiction and academic speculation even before any robots existed. HRI research is focusing to build an intuitive and easy communication with the robot through speech, gestures and facial expressions. The use of hand gestures provides a better solution than conventional human-machine interfaces. Furthermore, translations of hand gestures can help in accomplishing the ease and naturalness desired for HRI. This has motivated a very active research concerned with computer vision-based analysis and interpretation of hand gestures. In this thesis, we aim to implement the hand gesture recognition for robots with modeling, training, classifying and recognizing gestures based on computer vision algorithms and machine learning techniques. Gestures are modeled based on skeletal points and the features are extracted using NiTE framework using a depth camera. In order to recognize gestures, we propose to learn and classify hand gestures with the help of Adaptive Naive Bayes Classifier using Gesture Recognition Toolkit. Furthermore, we aim to build a dashboard that can visualize the interaction between all essential parts of the system. Finally, we attempt to integrate all these functionalities into a system that interacts with a humanoid robot NAO. As a result, on one hand, gestures will be used command the robot to execute certain actions and on the other hand, gestures will be translated and spoken out by the robot. \subsection*{Keywords} Human-Robot Interaction, HRI, Hand Gesture Recognition, Humanoid Robot, NAO, Skeletal Points Tracking, NiTE, Depth Camera, Asus Xtion Pro Live, Machine Learning, Adaptive Naive Bayes Classifier, ANBC, Gesture Recognition Toolkit, GRT
Formal statement is: lemma convex_epigraph_convex: "convex S \<Longrightarrow> convex_on S f \<longleftrightarrow> convex(epigraph S f)" Informal statement is: If $S$ is convex and $f$ is convex on $S$, then the epigraph of $f$ is convex.
\chapter{Useful numbers} The table below gives some useful physical values for parameters often used in modelling. \begin{center} \begin{longtable}{lll} \hline Definition & Symbol & Value\\ \hline \endfirsthead % \multicolumn{2}{c}{{\tablename} -- Continued} \\[0.5ex] \hline Definition & Symbol & Value and units\\ \hline \endhead %This is the footer for all pages except the last page of the table... \\[0.5ex] \multicolumn{2}{l}{{Continued on Next Page\ldots}} \\ \endfoot %This is the footer for the last page of the table... \hline \endlastfoot % Radius of Earth (at equator) & $R_E^{eq}$ & $\m[6.3781\times 10^6]$\\ Radius of Earth (at pole) & $R_E^{p}$ & $\m[6.3568\times 10^6]$\\ Radius of Earth (average value) & $R_E^{av}$ & $\m[6.371\times 10^6]$\\ Mass of Earth & $M_E$ & $\kg[5.9742\times 10^{24}]$\\ Mass of Moon & $M_M$ & $\kg[7.36\times 10^{22}]$\\ Mass of Sun & $M_S$ & $\kg[1.98892\times 10^{30}]$\\ Earth's rotation rate (based on sidereal day) & $\Omega$ & $\rads[7.2921\times 10^{-5}]$\\ \end{longtable} \end{center}
State Before: R : Type u a b : R m n✝ : ℕ inst✝ : Semiring R p✝ q p : R[X] n i : ℕ ⊢ coeff (erase n p) i = if i = n then 0 else coeff p i State After: case ofFinsupp R : Type u a b : R m n✝ : ℕ inst✝ : Semiring R p q : R[X] n i : ℕ toFinsupp✝ : AddMonoidAlgebra R ℕ ⊢ coeff (erase n { toFinsupp := toFinsupp✝ }) i = if i = n then 0 else coeff { toFinsupp := toFinsupp✝ } i Tactic: rcases p with ⟨⟩ State Before: case ofFinsupp R : Type u a b : R m n✝ : ℕ inst✝ : Semiring R p q : R[X] n i : ℕ toFinsupp✝ : AddMonoidAlgebra R ℕ ⊢ coeff (erase n { toFinsupp := toFinsupp✝ }) i = if i = n then 0 else coeff { toFinsupp := toFinsupp✝ } i State After: case ofFinsupp R : Type u a b : R m n✝ : ℕ inst✝ : Semiring R p q : R[X] n i : ℕ toFinsupp✝ : AddMonoidAlgebra R ℕ ⊢ ↑(Finsupp.erase n toFinsupp✝) i = if i = n then 0 else ↑toFinsupp✝ i Tactic: simp only [erase_def, coeff] State Before: case ofFinsupp R : Type u a b : R m n✝ : ℕ inst✝ : Semiring R p q : R[X] n i : ℕ toFinsupp✝ : AddMonoidAlgebra R ℕ ⊢ ↑(Finsupp.erase n toFinsupp✝) i = if i = n then 0 else ↑toFinsupp✝ i State After: no goals Tactic: exact ite_congr rfl (fun _ => rfl) (fun _ => rfl)
State Before: α : Type u_1 inst✝ : LinearOrderedAddCommGroup α hα : Archimedean α p : α hp : 0 < p a✝ b✝ c : α n : ℤ a b : α m : ℤ ⊢ toIcoMod hp (m • p + a) b = m • p + toIcoMod hp a b State After: no goals Tactic: rw [add_comm, toIcoMod_add_zsmul', add_comm]
lemma (in order_topology) decreasing_tendsto: assumes bdd: "eventually (\<lambda>n. l \<le> f n) F" and en: "\<And>x. l < x \<Longrightarrow> eventually (\<lambda>n. f n < x) F" shows "(f \<longlongrightarrow> l) F"
mutual data MyBool = MyFalse | MyTrue even : Nat -> MyBool even (S k) = odd k even Z = MyTrue odd : Nat -> MyBool odd (S k) = even k odd Z = MyFalse eodd : Nat -> (Bool, Bool) eodd num = (isEven num, isOdd num) where mutual isEven : Nat -> Bool isEven (S k) = isOdd k isEven Z = True isOdd : Nat -> Bool isOdd (S k) = isEven k isOdd Z = False data Box : Type -> Type where MkBox : a -> Box a mutual Functor Box where map f b = do b' <- b pure (f b') Applicative Box where (<*>) f a = do f' <- f a' <- a pure (f' a') pure = MkBox Monad Box where (>>=) (MkBox val) k = k val boxy : Box Integer boxy = map (*2) (MkBox 20)
\documentclass[letterpaper,10pt]{article} \usepackage[utf8]{inputenc} \usepackage{xifthen} \usepackage[T1]{fontenc} \usepackage[dvipsnames]{xcolor} \usepackage[colorlinks=true,urlcolor=blue]{hyperref} \usepackage{titlesec} \usepackage[margin=1in]{geometry} \usepackage{longtable} \usepackage{titling} \author{Matthew Critchlow} \date{\today} \renewcommand{\maketitle}{ \par{\centering{\Huge \textsc{\theauthor}}\par} {\footnotesize\hfill{}\color{lightgray}(Last updated \thedate.)}} %Setting the font I want: \renewcommand{\familydefault}{\sfdefault} \renewcommand{\sfdefault}{ppl} \newcommand{\entry}[4]{ \ifthenelse{\isempty{#3}} {\slimentry{#1}{#2}}{ \begin{minipage}[t]{.15\textwidth} \hfill \textsc{#1} \end{minipage} \hfill\vline\hfill \begin{minipage}[t]{.80\textwidth} {\bf#2}---\textit{#3}. \footnotesize{#4} \end{minipage}\\ \vspace{.25cm} }} \newcommand{\slimentry}[2]{ \begin{minipage}[t]{.15\textwidth} \hfill \textsc{#1} \end{minipage} \hfill\vline\hfill \begin{minipage}[t]{.80\textwidth} #2 \end{minipage}\\ \vspace{.25cm} } %Institution macros, because laziness \newcommand{\uci}{University of California, Irvine} \newcommand{\ucsd}{University of California, San Diego} \newcommand{\ucsb}{University of California, Santa Barbara} \newcommand{\usd}{University San Diego} %Section spacing and format: \titleformat{\section}{\Large\scshape\raggedright}{}{1em}{}[\titlerule] \titlespacing{\section}{0pt}{3pt}{7pt} \titleformat{\subsection}{\large\sc\centering}{}{0em}{\underline}%[\rule{3cm}{.2pt}] \titlespacing{\subsection}{0pt}{7pt}{7pt} \setlength{\parindent}{0in} \setlength{\parindent}{0in} \begin{document} \maketitle \section{Basic Info} \vspace{.25cm} \begin{minipage}[t]{.5\linewidth} \begin{tabular}{rp{.75\linewidth}} \textsc{Email:} & \href{mailto:[email protected]}{[email protected]}\\ \textsc{www:}&\href{https://critchlow.xyz}{critchlow.xyz} \end{tabular} \end{minipage} \begin{minipage}[t]{.5\linewidth} \begin{tabular}{rl} \textsc{Github:} & \href{http://github.com/mcritchlow}{mcritchlow}\\ \textsc{Public Key:}&\href{http://pgp.mit.edu/pks/lookup?op=get&search=0x5B86D84AB60CAB15}{5B86D84AB60CAB15} \end{tabular} \end{minipage} \vspace{.25cm} \section{Experience} \entry{2017}{Lead Application Developer}{\ucsd}{ Collaborate directly with the Lead DevOps engineer to introduce several changes to our configuration management, continous integration and delivery strategies through the use of Ansible, Jenkins, and possibly orchestrated containers. Lead open source community engagement and technical initiatives that are of value to {\ucsd} with the Samvera, Blacklight, and Fedora projects. Work with entire development team to successfully deliver a new Digital Asset Management System based on Fedora and Samvera. } \entry{2013--2017}{Manager of Development and Web Services}{\ucsd}{ In this position I was able to successfully advocate and oversee the team's transition to adopting an open source Library repository framework, then called Hydra. This transition included an entirely new workflow and set of technologies, which I introduced successfully and the entire Library organization realized substantial value from. This included the transition to bi-weekly development iterations (Sprints), requiring comprehensive test coverage for our software applications, implementing a continous integration workflow and a chatops deployment strategy. } \entry{2009--2013}{Web Technical Manager}{\ucsd}{ Served as manager and technical expert for the Web Services team. Collaborated with a key Library stakeholder to introduce Agile principles for the first time in a Library project. This was done for the creation of the Library mobile website which was very well received. Other projects included a complete overhaul of the Library Public Website, the creation of a new Library Intranet, and several other critical projects such as the San Diego Technology Archive.} \entry{2006--2009}{Programmer/Analyst}{\ucsd}{ In this position I wrote a Audio Reserves application that students used to access playlists of content that was required listening for their course. This was a high demand application on campus. The software was written in Java and JSP, but a triplestore was used instead of a traditional relational database. This made the CRUD interactions for the application more complex, but allowed for very robust metadata. I also wrote the initial editing support for the Digital Asset Management System triplestore repository that interacted with Apache Jena to manipulate the RDF graphs for our digital objects. } \entry{2004--2006}{Programmer/Analyst}{\uci}{ I worked in the Registrar's Office software development team. I was responsible for writing software that students used to look up time-sensitive information such as their grades and course schedules. The software was predominately written in Java using the Apache Struts framework. I also learne a lot of Perl on the job, as the lead developer at the time thought it was the greatest thing man has ever invented.} \section{Workshops} \entry{2017}{\href{https://ucsdlib.github.io/git-novice/}{``Software Carpentry: Version Control with Git''}}{Samvera Connect}{Taught the Version Control with Git course of the Software Carpentry curriculum. Rewritten completely with Alex Dunn and Chrissy Rissmeyer from UC Santa Barbara to cater to an audience of metadata librarians.} \entry{2017}{\href{https://ucsdlib.github.io/git-novice/}{``Software Carpentry: Version Control with Git''}}{{\ucsb}}{Taught the Version Control with Git course of the Software Carpentry curriculum. Rewritten completely with Alex Dunn and Chrissy Rissmeyer from UC Santa Barbara to cater to an audience of metadata librarians.} \entry{2016}{\href{https://ucsdlib.github.io/workshops/posts/upcoming/swc/software-carpentry-sio/}{``Software Carpentry''}}{Scripps Institute of Oceanography, {\ucsd}}{Taught the Version Control with Git course of the Software Carpentry curriculum. Substantially rewritten by me, for a grad-student focus after initial feedback. TA for The Unix Shell and Programming with Python courses.} \entry{2016}{\href{https://ucsdlib.github.io/workshops/posts/library-carpentry/past/library-carpentry/}{``Library Carpentry''}}{The Library, {\ucsd}}{Taught the Version Control with Git course of the Library Carpentry curriculum. TA for Unix Shell and Open Refine courses.} \entry{2016}{\href{https://ucsdlib.github.io/workshops/posts/posts/software-carpentry/}{``Software Carpentry''}}{Biomedical Library, {\ucsd}}{Taught the Version Control with Git course of the Software Carpentry curriculum. TA for The Unix Shell and Programming with Python courses.} \section{Presentations} \entry{2017}{``Code Readability and Productivity Tips''}{Campus LISA Conference, {\ucsd}}{Paper presenting common software development strategies and best practices. This covered topics such as refactoring, using good names, patterns developers should be able to recognize and name, as well as productivity tips in both Ruby and .NET languages.} \entry{2016}{``Open Source at UC San Diego''}{Campus LISA Conference, {\ucsd}}{Paper presenting how the UC San Diego Library has been able to participate in open source development, and the challenges presented by working with upstream projects using the Apache 2 license, and how we worked around it.} \entry{2015}{``Co-working Space: Online Tools for Collaboration''}{Campus LISA Conference, {\ucsd}}{Paper presenting current tools that can be used for online collaboration and discussion of how they were integration into our workflows to make life more efficient. Covered technologies like Slack, Github, and Confluence.} \entry{2014}{\href{https://www.slideshare.net/mattcritchlow/sca2014-ucsdfinal}{``Programmer and Archivist Collaboration''}}{Society of California Archivists}{Forum-format presentation detailing how I worked closely with the Digital Library Project Manager, Cristela Garcia-Spitz, to come up with innovative ways of tracking progress of Digital Library project as well as development Sprints} \entry{2014}{\href{https://www.slideshare.net/mattcritchlow/the-evolution-of-the-uc-san-diego-library-dams}{``The Evolution of the UC San Diego Library DAMS''}}{Digital Initiatives Symposium, {\usd}}{Paper presenting an over of the {\ucsd} DAMS, the history behind it, and the use cases that drove the creation of a new system to support them} \entry{2014}{\href{http://www.slideshare.net/mattcritchlow/software-development-process-36844544}{``Software Development Process''}}{Campus LISA Conference, {\ucsd}}{Paper presenting the test driven development process and related tooling I helped implement} \entry{2014}{\href{https://www.slideshare.net/mattcritchlow/uc-san-diego-campus-lisa-2014-source-code}{``Source Code Management''}}{Campus LISA Conference, {\ucsd}}{Paper presenting the migration to using git, GitHub, and the git flow pattern for local development and release management that I helped implement.} \entry{2013}{\href{http://www.slideshare.net/mattcritchlow/c4-l-alltehmetadatas2013final}{``ALL TEH METADATAS Re-revisited''}}{Code4Lib Conference}{Paper presenting a comprehensive ontology modeling project I was a primary contributor to at {\ucsd}} \entry{2013}{\href{http://www.slideshare.net/mattcritchlow/ucsd-library-hot-topics-webinars-part-2-final}{``Metadata and Repository Services for Research Data Collections''}}{Duraspace Hot Topics Webinar Series}{Part of a three part webinar series in which I spoke about our Digital Asset Management System and related metadata modeling to support Research Data Collections at {\ucsd}} \section{Publications} \entry{2010}{\href{http://journal.code4lib.org/articles/4642}{``Using an Agile-based Approach to Develop A Library Mobile Website''} }{Matt Critchlow, Lia Friedman, Daniel Suchy}{Code4Lib Journal} \entry{2003}{``An Environment for Managing Evolving Product Line Architectures'' }{Ping Chen, Matt Critchlow, Akash Garg, Chris Van der Westhuizen, and André van der Hoek}{In Proceedings of the International Conference on Software Maintenance, Amsterdam, Netherlands, September 2003} \entry{2003}{``Differencing and Merging within an Evolving Product Line Architecture'' }{Ping Chen, Matt Critchlow, Akash Garg, Chris Van der Westhuizen, and André van der Hoek}{In Proceedings of the Fifth International Workshop on Product Family Engineering, Siena, Italy, November 2003} \section{Institutions} \entry{2011--2013}{Professional Certificate in Project Management}{\ucsd}{} \entry{1999--2004}{B.S. in Computer Science}{\uci}{} \section{Technical} \entry{Languages}{Ruby, Java, PHP, Python, JavaScript, Bash, CSS, HTML.}{}{} \entry{Markup}{Markdown, YAML, HAML, {\LaTeX}}{}{} \section{Hobbies} \entry{Open Source}{\href{https://solus-project.com/}{Solus (GNU/Linux Operating System)}, {\href{https://github.com/samvera}{Samvera}}}{}{} \entry{Other}{Cooking, Playing Guitar, Travel, Surfing}{}{} \end{document}
/************************************************************************** ** Copyright (C) 2015 Christian Manning ** ** This software may be modified and distributed under the terms ** of the MIT license. See the LICENSE file for details. **************************************************************************/ #ifndef LASTFMPP_TEST_DATA_DIR #error "LASTFMPP_TEST_DATA_DIR define needed" #endif #include "catch.hpp" #include <fstream> #include <boost/filesystem.hpp> #include <boost/filesystem/fstream.hpp> #include <boost/range/size.hpp> #include <jbson/json_reader.hpp> #include <lastfmpp/venue.hpp> #include <lastfmpp/detail/transform.hpp> #include <lastfmpp/detail/deserialise_venue.hpp> TEST_CASE("venues_deserialise") { boost::filesystem::path test_dir{LASTFMPP_TEST_DATA_DIR}; SECTION("get_info") { boost::filesystem::ifstream is{test_dir / "venue_search.json"}; std::string venue_json; REQUIRE(std::getline(is, venue_json, static_cast<char>(EOF))); auto doc = jbson::read_json(venue_json); REQUIRE(boost::size(doc) == 1); std::vector<lastfmpp::venue> venues; REQUIRE_NOTHROW( venues = lastfmpp::transform_select<std::vector<lastfmpp::venue>>("results.venuematches.venue.*")(doc)); REQUIRE(venues.size() == 50); CHECK(venues.front().location().city() == "Moscow"); CHECK(venues.front().location().street() == u8"Ленинградский проспект 31, стр. 4 (м. Динамо)"); } }
# By Hank in SDSU # source("split.training.testing.r") ## ********************************** # train_test_file: store the split index result # classes: all the classes # classn: total number of classes # n_in_each_class: number in each class # y: class labels # trainrates: split rate for training split.training.testing <- function(train_test_file, classes, classn, n_in_each_class, y, trainrates) { if (file.exists(train_test_file)) { cat("The train_test_file", train_test_file, "exists ...\n"); load(train_test_file); } else { # sample.ns <- vector(mode="integer", length=classn); # n_in_each_class train.index <- vector(mode="integer"); test.index <- vector(mode="integer"); n_in_each_class_train <- vector(mode="integer"); n_in_each_class_test <- vector(mode="integer"); for (i in 1:classn) { indexi <- which (y==classes[i]); ## randomly selecting 80% training & 20% testing n_in_each_class_train[i] <- as.integer(n_in_each_class[i]*trainrates); n_in_each_class_test[i] <- n_in_each_class[i] - n_in_each_class_train[i]; train.index.i <- sample.int(n_in_each_class[i], n_in_each_class_train[i]); test.index.i <- setdiff(1:n_in_each_class[i], train.index.i); train.index <- c(train.index, indexi[train.index.i]); test.index <- c(test.index, indexi[test.index.i]); } class_ns_train <- n_in_each_class_train; class_ns_test <- n_in_each_class_test; save(file=train_test_file, n_in_each_class, train.index, test.index, class_ns_train, class_ns_test, n_in_each_class_train, n_in_each_class_test); } # save(file=train_test_file, class_ns, train.index, test.index, class_ns_train, class_ns_test); list("train.index"=train.index, "test.index"=test.index, "n_in_each_class_train"=class_ns_train, "n_in_each_class_test"=class_ns_test) }
/*********************************************************************** * created: 11/6/2011 * author: Martin Preisler *************************************************************************/ /*************************************************************************** * Copyright (C) 2004 - 2011 Paul D Turner & The CEGUI Development Team * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. ***************************************************************************/ #include "CEGUI/Window.h" #include "CEGUI/WindowManager.h" #include <boost/test/unit_test.hpp> /* * Used to bring some Windows up for testing * * This is for exception safety, no matter what happens in the tests, * its destructor will be called */ struct LayoutSetupFixture { LayoutSetupFixture() { d_root = CEGUI::WindowManager::getSingleton().createWindow("DefaultWindow"); d_root->setPosition(CEGUI::UVector2(CEGUI::UDim(0, 0), CEGUI::UDim(0, 0))); d_root->setSize(CEGUI::USize(CEGUI::UDim(1, 0), CEGUI::UDim(1, 0))); d_insideRoot = CEGUI::WindowManager::getSingleton().createWindow("DefaultWindow"); d_insideRoot->setPosition(CEGUI::UVector2(CEGUI::UDim(0, 100), CEGUI::UDim(0, 50))); d_insideRoot->setSize(CEGUI::USize(CEGUI::UDim(0.5f, 0), CEGUI::UDim(0.5f, 0))); d_root->addChild(d_insideRoot); d_insideInsideRoot = CEGUI::WindowManager::getSingleton().createWindow("DefaultWindow"); d_insideInsideRoot->setPosition(CEGUI::UVector2(CEGUI::UDim(0, 100), CEGUI::UDim(0, 50))); d_insideInsideRoot->setSize(CEGUI::USize(CEGUI::UDim(0.5f, 0), CEGUI::UDim(0.5f, 0))); d_insideRoot->addChild(d_insideInsideRoot); CEGUI::System::getSingleton().getDefaultGUIContext().setRootWindow(d_root); CEGUI::System::getSingleton().notifyDisplaySizeChanged(CEGUI::Sizef(800, 600)); } ~LayoutSetupFixture() { CEGUI::System::getSingleton().getDefaultGUIContext().setRootWindow(nullptr); CEGUI::WindowManager::getSingleton().destroyWindow(d_root); } CEGUI::Window* d_root; CEGUI::Window* d_insideRoot; CEGUI::Window* d_insideInsideRoot; }; BOOST_FIXTURE_TEST_SUITE(Window, LayoutSetupFixture) BOOST_AUTO_TEST_CASE(Defaults) { /* * We check these to ensure we don't change them between releases, as people very likely depend on them! */ BOOST_CHECK(!d_root->isActive()); BOOST_CHECK(!d_root->isDisabled()); BOOST_CHECK(!d_root->isEffectiveDisabled()); BOOST_CHECK(d_root->isVisible()); BOOST_CHECK(d_root->isEffectiveVisible()); } BOOST_AUTO_TEST_CASE(PropertyInheritance) { /* * Alpha and Disabled state should get "inherited"/propagated to children via effective alpha/disabled */ d_root->setAlpha(0.5f); d_insideRoot->setAlpha(0.5f); BOOST_CHECK_EQUAL(d_insideInsideRoot->getEffectiveAlpha(), 0.25f); d_root->setAlpha(1.0f); d_insideRoot->setAlpha(1.0f); d_root->setDisabled(true); BOOST_CHECK_EQUAL(d_insideRoot->isEffectiveDisabled(), true); BOOST_CHECK_EQUAL(d_insideInsideRoot->isEffectiveDisabled(), true); d_root->setDisabled(false); BOOST_CHECK_EQUAL(d_insideRoot->isEffectiveDisabled(), false); BOOST_CHECK_EQUAL(d_insideInsideRoot->isEffectiveDisabled(), false); } BOOST_AUTO_TEST_CASE(UnifiedDimensions) { /* * Basic relative UDim tests */ BOOST_CHECK_EQUAL(d_insideInsideRoot->getPixelSize(), CEGUI::Sizef(200, 150)); BOOST_CHECK_EQUAL(d_insideInsideRoot->getUnclippedOuterRect().get(), CEGUI::Rectf(200, 100, 400, 250)); } BOOST_AUTO_TEST_CASE(HitTesting) { /* * We know where the windows are so lets check whether CEGUI reports correct hits for them */ BOOST_CHECK(d_insideInsideRoot->isHit(glm::vec2(300, 150))); d_insideInsideRoot->setDisabled(true); BOOST_CHECK(!d_insideInsideRoot->isHit(glm::vec2(300, 150), false)); BOOST_CHECK(d_insideInsideRoot->isHit(glm::vec2(300, 150), true)); d_insideInsideRoot->setDisabled(false); d_root->setDisabled(true); BOOST_CHECK(!d_insideInsideRoot->isHit(glm::vec2(300, 150), false)); BOOST_CHECK(d_insideInsideRoot->isHit(glm::vec2(300, 150), true)); d_root->setDisabled(false); } BOOST_AUTO_TEST_CASE(Hierarchy) { CEGUI::Window* child = d_insideInsideRoot->createChild("DefaultWindow"); BOOST_CHECK(d_insideInsideRoot->isChild(child)); d_insideInsideRoot->destroyChild(child); child = d_insideInsideRoot->createChild("DefaultWindow"); child->setDestroyedByParent(false); d_insideInsideRoot->removeChild(child); BOOST_CHECK(!d_insideInsideRoot->isChild(child)); CEGUI::WindowManager::getSingleton().destroyWindow(child); } BOOST_AUTO_TEST_CASE(RecursiveSearch) { int previousID[3]; previousID[0] = d_root->getID(); previousID[1] = d_insideRoot->getID(); previousID[2] = d_insideInsideRoot->getID(); d_root->setID(2); d_insideRoot->setID(3); d_insideInsideRoot->setID(5); BOOST_CHECK_EQUAL(d_insideInsideRoot, d_root->getChildRecursive(5)); // finding the correct window BOOST_CHECK_EQUAL(d_insideRoot, d_root->getChildRecursive(3)); BOOST_CHECK_EQUAL(d_insideInsideRoot, d_insideRoot->getChildRecursive(5)); BOOST_CHECK(0 == d_root->getChildRecursive(10)); // not finding these BOOST_CHECK(0 == d_root->getChildRecursive(6)); BOOST_CHECK(0 == d_insideRoot->getChildRecursive(10)); d_root->setID(previousID[0]); d_insideRoot->setID(previousID[1]); d_insideInsideRoot->setID(previousID[2]); } BOOST_AUTO_TEST_SUITE_END()
(* Title: HOL/HOLCF/IOA/Storage/Impl.thy Author: Olaf Müller *) section \<open>The implementation of a memory\<close> theory Impl imports IOA.IOA Action begin definition impl_sig :: "action signature" where "impl_sig = (\<Union>l.{Free l} \<union> {New}, \<Union>l.{Loc l}, {})" definition impl_trans :: "(action, nat * bool)transition set" where "impl_trans = {tr. let s = fst(tr); k = fst s; b = snd s; t = snd(snd(tr)); k' = fst t; b' = snd t in case fst(snd(tr)) of New \<Rightarrow> k' = k \<and> b' | Loc l \<Rightarrow> b \<and> l= k \<and> k'= (Suc k) \<and> \<not>b' | Free l \<Rightarrow> k'=k \<and> b'=b}" definition impl_ioa :: "(action, nat * bool)ioa" where "impl_ioa = (impl_sig, {(0,False)}, impl_trans,{},{})" lemma in_impl_asig: "New \<in> actions(impl_sig) \<and> Loc l \<in> actions(impl_sig) \<and> Free l \<in> actions(impl_sig) " by (simp add: impl_sig_def actions_def asig_projections) end
The benefits of Industry 4.0 may add between US$28.5 billion and $62.1 billion to Việt Nam’s gross domestic product (GDP) by 2030, according to a report by the Central Institute for Economic Management (CIEM). The report was presented at CIEM’s conference held in Hà Nội on Tuesday to discuss international experiences and recommendations for Việt Nam in developing a national Industry 4.0 strategy. Studying different scenarios in which Việt Nam implements only economic reforms or implements both economic reforms and an Industry 4.0 strategy, CIEM found that Industry 4.0 would increase GDP growth by 7 per cent in the worst-case scenario and even 16 per cent per year in the best-case scenario annually by 2030. In addition, GDP per capita would increase by $315-640 by 2030, thanks to improvements in labour productivity and employment, according to CIEM. Nguyễn Đình Cung, CIEM’s director, said that Việt Nam had a national goal of industrialisation and renovating its growth model, adding that science and technology were the most important factors for economic growth. Đặng Quang Vinh, deputy head of CIEM’s Business Environment and Competitiveness Department, said that new industries emerging from Industry 4.0 would be the major drivers for growth in Việt Nam, changing and enhancing the competitiveness of other industries, such as manufacturing and processing, trade, retail, agriculture, finance and banking, and insurance. However, a majority of firms in Việt Nam were small and medium sized enterprises and did not devote adequate attention to the application of new technologies, Vinh said. The State had a tight budget, and financing was often difficult to access, he added. Vinh said that besides efforts to improve the business climate, it was important to create space for technology research and development, complete legal frameworks for new industries and enhance the enforcement of regulations related to intellectual property to encourage innovation. Đinh Quang Trung, Deputy Director of the Ministry of Information and Communications’ Science and Technology Department, said that focus would be placed on the development of strategic IT products together with enhancing security. Prime Minister Nguyễn Xuân Phúc in July asked the Ministry of Planning and Investment to implement three focuses for Industry 4.0. These included the development of a national Industry 4.0 strategy expected to be reported to the Government at the end of this year, the development of the project for the foundation of a national innovation centre expected to start construction at the end of this year and building human resources for the Industry 4.0 strategy.
%kkapnmexport 'Flexible PNM export' % This MatLab function was automatically generated by a converter (KhorosToMatLab) from the Khoros kapnmexport.pane file % % Parameters: % InputFile: i 'Input ', required: 'First Input data object' % OutputFile: o 'Output', required: 'Resulting output data object' % % Example: o = kkapnmexport(i, {'i','';'o',''}) % % Khoros helpfile follows below: % % PROGRAM % kapnmexport - Flexible PNM export % % DESCRIPTION % This kroutine exports the input object (specified with the [-i] parameter) in PNM format (presently only binary PPM is supported). The output file (specified with the [-o] parameter) will serve as the final name for the output object if there is only one frame in the input object, if there are more than one frames (i.e. if d and/or t > 1) the output file [-o] will be a basename, and a separate file will be created for each frame. % If there is mask in the image, masked pixels will be substituted by a color (specified by the parameter [-maskcolor] for exporting. % % % % EXAMPLES % All examples for the Annotate toolbox are listed on the $ANNOTATE/manual/index.html on-line manual. % % "SEE ALSO" % kman page for ka_color_decodecolor or on-line manual for information on the color specification schemes. % % RESTRICTIONS % If there are more than one frames in the input file, the output file name will be used as a basename, and it will not be available for subsequent operators in a cantata workspace. % % REFERENCES % All references for the Annotate toolbox are listed on the $ANNOTATE/manual/index.html on-line manual. % % COPYRIGHT % Annotate Toolbox v1.0 Copyright (C) 1997, Rafael Santos. % function varargout = kkapnmexport(varargin) if nargin ==0 Inputs={};arglist={'',''}; elseif nargin ==1 Inputs=varargin{1};arglist={'',''}; elseif nargin ==2 Inputs=varargin{1}; arglist=varargin{2}; else error('Usage: [out1,..] = kkapnmexport(Inputs,arglist).'); end if size(arglist,2)~=2 error('arglist must be of form {''ParameterTag1'',value1;''ParameterTag2'',value2}') end narglist={'i', '__input';'o', '__output'}; maxval={0,0}; minval={0,0}; istoggle=[0,0]; was_set=istoggle * 0; paramtype={'InputFile','OutputFile'}; % identify the input arrays and assign them to the arguments as stated by the user if ~iscell(Inputs) Inputs = {Inputs}; end NumReqOutputs=1; nextinput=1; nextoutput=1; for ii=1:size(arglist,1) wasmatched=0; for jj=1:size(narglist,1) if strcmp(arglist{ii,1},narglist{jj,1}) % a given argument was matched to the possible arguments wasmatched = 1; was_set(jj) = 1; if strcmp(narglist{jj,2}, '__input') if (nextinput > length(Inputs)) error(['Input ' narglist{jj,1} ' has no corresponding input!']); end narglist{jj,2} = 'OK_in'; nextinput = nextinput + 1; elseif strcmp(narglist{jj,2}, '__output') if (nextoutput > nargout) error(['Output nr. ' narglist{jj,1} ' is not present in the assignment list of outputs !']); end if (isempty(arglist{ii,2})) narglist{jj,2} = 'OK_out'; else narglist{jj,2} = arglist{ii,2}; end nextoutput = nextoutput + 1; if (minval{jj} == 0) NumReqOutputs = NumReqOutputs - 1; end elseif isstr(arglist{ii,2}) narglist{jj,2} = arglist{ii,2}; else if strcmp(paramtype{jj}, 'Integer') & (round(arglist{ii,2}) ~= arglist{ii,2}) error(['Argument ' arglist{ii,1} ' is of integer type but non-integer number ' arglist{ii,2} ' was supplied']); end if (minval{jj} ~= 0 | maxval{jj} ~= 0) if (minval{jj} == 1 & maxval{jj} == 1 & arglist{ii,2} < 0) error(['Argument ' arglist{ii,1} ' must be bigger or equal to zero!']); elseif (minval{jj} == -1 & maxval{jj} == -1 & arglist{ii,2} > 0) error(['Argument ' arglist{ii,1} ' must be smaller or equal to zero!']); elseif (minval{jj} == 2 & maxval{jj} == 2 & arglist{ii,2} <= 0) error(['Argument ' arglist{ii,1} ' must be bigger than zero!']); elseif (minval{jj} == -2 & maxval{jj} == -2 & arglist{ii,2} >= 0) error(['Argument ' arglist{ii,1} ' must be smaller than zero!']); elseif (minval{jj} ~= maxval{jj} & arglist{ii,2} < minval{jj}) error(['Argument ' arglist{ii,1} ' must be bigger than ' num2str(minval{jj})]); elseif (minval{jj} ~= maxval{jj} & arglist{ii,2} > maxval{jj}) error(['Argument ' arglist{ii,1} ' must be smaller than ' num2str(maxval{jj})]); end end end if ~strcmp(narglist{jj,2},'OK_out') & ~strcmp(narglist{jj,2},'OK_in') narglist{jj,2} = arglist{ii,2}; end end end if (wasmatched == 0 & ~strcmp(arglist{ii,1},'')) error(['Argument ' arglist{ii,1} ' is not a valid argument for this function']); end end % match the remaining inputs/outputs to the unused arguments and test for missing required inputs for jj=1:size(narglist,1) if strcmp(paramtype{jj}, 'Toggle') if (narglist{jj,2} ==0) narglist{jj,1} = ''; end; narglist{jj,2} = ''; end; if ~strcmp(narglist{jj,2},'__input') && ~strcmp(narglist{jj,2},'__output') && istoggle(jj) && ~ was_set(jj) narglist{jj,1} = ''; narglist{jj,2} = ''; end; if strcmp(narglist{jj,2}, '__input') if (minval{jj} == 0) % meaning this input is required if (nextinput > size(Inputs)) error(['Required input ' narglist{jj,1} ' has no corresponding input in the list!']); else narglist{jj,2} = 'OK_in'; nextinput = nextinput + 1; end else % this is an optional input if (nextinput <= length(Inputs)) narglist{jj,2} = 'OK_in'; nextinput = nextinput + 1; else narglist{jj,1} = ''; narglist{jj,2} = ''; end; end; else if strcmp(narglist{jj,2}, '__output') if (minval{jj} == 0) % this is a required output if (nextoutput > nargout & nargout > 1) error(['Required output ' narglist{jj,1} ' is not stated in the assignment list!']); else narglist{jj,2} = 'OK_out'; nextoutput = nextoutput + 1; NumReqOutputs = NumReqOutputs-1; end else % this is an optional output if (nargout - nextoutput >= NumReqOutputs) narglist{jj,2} = 'OK_out'; nextoutput = nextoutput + 1; else narglist{jj,1} = ''; narglist{jj,2} = ''; end; end end end end if nargout varargout = cell(1,nargout); else varargout = cell(1,1); end global KhorosRoot if exist('KhorosRoot') && ~isempty(KhorosRoot) w=['"' KhorosRoot]; else if ispc w='"C:\Program Files\dip\khorosBin\'; else [s,w] = system('which cantata'); w=['"' w(1:end-8)]; end end [varargout{:}]=callKhoros([w 'kapnmexport" '],Inputs,narglist);
module consoleExamples.passwordCheckSimple where open import ConsoleLib open import Data.Bool.Base open import Data.Bool open import Data.String renaming (_==_ to _==str_) open import SizedIO.Base main : ConsoleProg main = run (GetLine >>= λ s → if s ==str "passwd" then WriteString "Success" else WriteString "Error")
= = = World War I = = =
= = = World War I = = =
Journey to the Center of the Moon was announced for the PC at E3 2005 . The Adventure Company collaborated with developers Kheops Studios for the release . Benoît Hozjan , the co @-@ founder of Kheops Studio , became Managing Director of the game , while Alexis Lang became the Lead Game Designer .
module Toolkit.Data.DVect.Elem import Data.Vect import Toolkit.Data.DVect import public Toolkit.Decidable.Equality.Indexed %default total public export data Elem : (iTy : Type) -> (elemTy : iTy -> Type) -> forall i, is . (e : elemTy i) -> (es : DVect iTy elemTy l is) -> Type where H : Elem iTy eTy x (y::xs) T : (later : Elem iTy eTy x xs) -> Elem iTy eTy x (x'::xs) -- [ EOF ]
import numpy as np dt = 0.001 if 0: # -- MNIST neurons = [12544, 12544, 2000] synapses = [313600, 5017600, 6272000, 20000] rates = [4.6, 15.6, 4.2] presentation_times = [0.2, 0.1, 0.06] elif 0: # -- CIFAR-10 neurons = [36864, 9216, 2304, 1152] synapses = [2764800, 14745600, 1327104, 663552, 11520] rates = [173.3, 99.0, 9.7, 7.2] presentation_times = [0.2, 0.15, 0.08, 0.06] elif 1: # -- Imagenet neurons = [193600, 139968, 64896, 43264, 43264, 4096, 4096] synapses = [70276800, 223948800, 112140288, 149520384, 99680256, 37748736, 16777216, 4096000] rates = [178.1, 48.8, 26.6, 30.6, 35.6, 19.1, 10.7] # rates = [1000.0, 178.1, 48.8, 26.6, 30.6, 35.6, 19.1, 10.7] presentation_times = [0.2, 0.08, 0.06] neurons = np.array(neurons, dtype=float) synapses = np.array(synapses, dtype=float) rates = np.array(rates, dtype=float) average_rate = (rates * neurons).sum() / neurons.sum() # --- compute flops on standard hardware flops_update = 1 flops_synop = 2 # flops = flops_synop * synapses.sum() + flops_update * neurons.sum() flops0 = flops_synop * synapses[0] flops = flops0 + flops_synop * synapses[1:].sum() + flops_update * neurons.sum() # --- compute energy on neuromorphic hardware flopjoules_update = 0.25 flopjoules_synop = 0.08 # synops = (synapses * rates).sum() synops = (synapses[1:] * rates).sum() updates = neurons.sum() / dt print("Average rate: %s" % (average_rate,)) print("Synops/s = %s, updates/s = %s" % (synops, updates)) for pt in presentation_times: energy = flops0 + (flopjoules_synop*synops + flopjoules_update*updates) * pt print("pt %d ms: flops = %0.2e, energy = %0.2e, efficiency = %0.2f" % ( 1000*pt, flops, energy, (flops / energy)))
%[ P, inls ] = ht_simple_ransac_p3p( u, X, rthr, maxiter ) %u: 3 x n image points %X: 3 x n 3D points %rthr: inlier threshold %maxiter: default=1000 function [ P, inls ] = ht_simple_ransac_p3p( u, X, rthr, max_iter ) if nargin < 4 max_iter = 1000; end %initialization u = bsxfun(@rdivide, u, sqrt(sum(u.^2, 1))); Npts = size(u, 2); rthr = cos(rthr); max_inlsnum = 3; no_iter = 0; P = []; inls = false(1, Npts); %ransac while no_iter < max_iter no_iter = no_iter + 1; idx = randperm(Npts, 3); P_cand = P3PSolver([u(:, idx); X(:, idx)]); [inls_cand, inls_cand_num] = calculate_inls_angular(P_cand, u, X, rthr); if length(P_cand) > 1 [inls_cand_num, inls_cand_idx] = max(inls_cand_num); inls_cand = inls_cand{inls_cand_idx}; P_cand = P_cand{inls_cand_idx}; else inls_cand_num = inls_cand_num(1); inls_cand = inls_cand{1}; P_cand = P_cand{1}; end if inls_cand_num >= max_inlsnum max_inlsnum = inls_cand_num; P = P_cand; inls = inls_cand; max_iter = min([max_iter, nsamples(max_inlsnum, Npts, 3, 0.95)]); end end end function [SampleCnt, q] = nsamples(ni, ptNum, pf, conf) q = prod (((ni-pf+1) : ni) ./ ((ptNum-pf+1) : ptNum)); if q < eps SampleCnt = Inf; else % SampleCnt = log(1 - conf) / log(1 - q); if q > conf SampleCnt = 1; else SampleCnt = log(1 - conf) / log(1 - q); end end end function [inls, inls_num] = calculate_inls_angular(Pcand, u, X, rthr) inls = cell(1, length(Pcand)); inls_num = zeros(1, length(Pcand)); for ii = 1:1:length(Pcand) X_reproj = Pcand{ii} * [X; ones(1, size(X, 2))]; X_reproj = bsxfun(@rdivide, X_reproj, sqrt(sum(X_reproj.^2, 1))); res = sum(u .* X_reproj, 1); inls{ii} = res > rthr; inls_num(ii) = sum(inls{ii}); end end
# Copyright (c) 2021, Eric Sabo # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. include("linearcode.jl") abstract type AbstractHammingCode <: AbstractLinearCode end function construct_ham_matrix(r::Int, q::Int) ncols = Int(floor((q^r - 1) / (q - 1))) M = Matrix{Int}(undef, r, ncols) for i in 1:ncols M[:, i] = reverse(digits(parse(Int, string(i, base = q)), pad = r), dims = 1) end return M end
-- -- Created by Dependently-Typed Lambda Calculus on 2019-05-15 -- imports -- Author: ice1000 -- {-# OPTIONS --without-K --safe #-} import Relation.Binary.PropositionalEquality open import Relation.Binary.PropositionalEquality import Relation.Binary.PropositionalEquality using () import Relation.Binary.PropositionalEquality using (sym) hiding (cong) import Relation.Binary.PropositionalEquality renaming (sym to symBla)
lemma islimpt_approachable_le: "x islimpt S \<longleftrightarrow> (\<forall>e>0. \<exists>x'\<in> S. x' \<noteq> x \<and> dist x' x \<le> e)" for x :: "'a::metric_space"
Another relationship Nicole pursued was with Aden Jefferies , her longtime closest friend . Aden had a strong fanbase from his previous relationship with Belle Taylor . This resulted in the audience being divided over their relationship . Nicole has also been featured in various other romantic storylines , such as a brief fling with Liam Murphy , James said that he was compatible with Nicole because he had " the edge she was after " . She also dated Trey Palmer and they became involved in sex tape storyline , many newspapers reported on the plot because it " echoed " co @-@ star Lewis ' real life sex tape scandal . Producer Welsh once stated he believed Nicole was destined to become " full circle " and Nicole began behaving erratic and wild once more , due to her failed romances and the death of her friend Belle . She also had an affair with an older male character , Sid Walker . James liked the fact Nicole had so many romances because she got to kiss many of her co @-@ stars .
\name{show-HeatmapList-method} \alias{show,HeatmapList-method} \title{ Draw a list of heatmaps with default parameters } \description{ Draw a list of heatmaps with default parameters } \usage{ \S4method{show}{HeatmapList}(object) } \arguments{ \item{object}{a \code{\link{HeatmapList-class}} object.} } \details{ Actually it calls \code{\link{draw,HeatmapList-method}}, but only with default parameters. If users want to customize the heatmap, they can pass parameters directly to \code{\link{draw,HeatmapList-method}}. } \value{ This function returns no value. } \examples{ # There is no example NULL }
function [x_sigma, x_gam] = gaussian_para_esti(x) % x = x(:); gam = 0.2:0.001:10; r_gam = (gamma(1./gam).*gamma(3./gam))./((gamma(2./gam)).^2); x_mu = mean(x); x_sigma_sq = mean((x - x_mu).^2); x_sigma = sqrt(x_sigma_sq); E_x = mean(abs(x - x_mu)); rho_x = x_sigma_sq/E_x^2; [x_diff, x_ind] = min(abs(rho_x - r_gam)); x_gam = gam(x_ind);
(* Title: Jinja/Compiler/TypeComp.thy Author: Tobias Nipkow Copyright TUM 2003 *) section \<open>Preservation of Well-Typedness\<close> theory TypeComp imports Compiler "../BV/BVSpec" begin (*<*) declare nth_append[simp] (*>*) locale TC0 = fixes P :: "J\<^sub>1_prog" and mxl :: nat begin definition "ty E e = (THE T. P,E \<turnstile>\<^sub>1 e :: T)" definition "ty\<^sub>l E A' = map (\<lambda>i. if i \<in> A' \<and> i < size E then OK(E!i) else Err) [0..<mxl]" definition "ty\<^sub>i' ST E A = (case A of None \<Rightarrow> None | \<lfloor>A'\<rfloor> \<Rightarrow> Some(ST, ty\<^sub>l E A'))" definition "after E A ST e = ty\<^sub>i' (ty E e # ST) E (A \<squnion> \<A> e)" end lemma (in TC0) ty_def2 [simp]: "P,E \<turnstile>\<^sub>1 e :: T \<Longrightarrow> ty E e = T" (*<*) apply (unfold ty_def) apply(blast intro: the_equality WT\<^sub>1_unique) done (*>*) lemma (in TC0) [simp]: "ty\<^sub>i' ST E None = None" (*<*)by (simp add: ty\<^sub>i'_def)(*>*) lemma (in TC0) ty\<^sub>l_app_diff[simp]: "ty\<^sub>l (E@[T]) (A - {size E}) = ty\<^sub>l E A" (*<*)by(auto simp add:ty\<^sub>l_def hyperset_defs)(*>*) lemma (in TC0) ty\<^sub>i'_app_diff[simp]: "ty\<^sub>i' ST (E @ [T]) (A \<ominus> size E) = ty\<^sub>i' ST E A" (*<*)by(auto simp add:ty\<^sub>i'_def hyperset_defs)(*>*) lemma (in TC0) ty\<^sub>l_antimono: "A \<subseteq> A' \<Longrightarrow> P \<turnstile> ty\<^sub>l E A' [\<le>\<^sub>\<top>] ty\<^sub>l E A" (*<*)by(auto simp:ty\<^sub>l_def list_all2_conv_all_nth)(*>*) lemma (in TC0) ty\<^sub>i'_antimono: "A \<subseteq> A' \<Longrightarrow> P \<turnstile> ty\<^sub>i' ST E \<lfloor>A'\<rfloor> \<le>' ty\<^sub>i' ST E \<lfloor>A\<rfloor>" (*<*)by(auto simp:ty\<^sub>i'_def ty\<^sub>l_def list_all2_conv_all_nth)(*>*) lemma (in TC0) ty\<^sub>l_env_antimono: "P \<turnstile> ty\<^sub>l (E@[T]) A [\<le>\<^sub>\<top>] ty\<^sub>l E A" (*<*)by(auto simp:ty\<^sub>l_def list_all2_conv_all_nth)(*>*) lemma (in TC0) ty\<^sub>i'_env_antimono: "P \<turnstile> ty\<^sub>i' ST (E@[T]) A \<le>' ty\<^sub>i' ST E A" (*<*)by(auto simp:ty\<^sub>i'_def ty\<^sub>l_def list_all2_conv_all_nth)(*>*) lemma (in TC0) ty\<^sub>i'_incr: "P \<turnstile> ty\<^sub>i' ST (E @ [T]) \<lfloor>insert (size E) A\<rfloor> \<le>' ty\<^sub>i' ST E \<lfloor>A\<rfloor>" (*<*)by(auto simp:ty\<^sub>i'_def ty\<^sub>l_def list_all2_conv_all_nth)(*>*) lemma (in TC0) ty\<^sub>l_incr: "P \<turnstile> ty\<^sub>l (E @ [T]) (insert (size E) A) [\<le>\<^sub>\<top>] ty\<^sub>l E A" (*<*)by(auto simp: hyperset_defs ty\<^sub>l_def list_all2_conv_all_nth)(*>*) lemma (in TC0) ty\<^sub>l_in_types: "set E \<subseteq> types P \<Longrightarrow> ty\<^sub>l E A \<in> list mxl (err (types P))" (*<*)by(auto simp add:ty\<^sub>l_def intro!:listI dest!: nth_mem)(*>*) locale TC1 = TC0 begin primrec compT :: "ty list \<Rightarrow> nat hyperset \<Rightarrow> ty list \<Rightarrow> expr\<^sub>1 \<Rightarrow> ty\<^sub>i' list" and compTs :: "ty list \<Rightarrow> nat hyperset \<Rightarrow> ty list \<Rightarrow> expr\<^sub>1 list \<Rightarrow> ty\<^sub>i' list" where "compT E A ST (new C) = []" | "compT E A ST (Cast C e) = compT E A ST e @ [after E A ST e]" | "compT E A ST (Val v) = []" | "compT E A ST (e\<^sub>1 \<guillemotleft>bop\<guillemotright> e\<^sub>2) = (let ST\<^sub>1 = ty E e\<^sub>1#ST; A\<^sub>1 = A \<squnion> \<A> e\<^sub>1 in compT E A ST e\<^sub>1 @ [after E A ST e\<^sub>1] @ compT E A\<^sub>1 ST\<^sub>1 e\<^sub>2 @ [after E A\<^sub>1 ST\<^sub>1 e\<^sub>2])" | "compT E A ST (Var i) = []" | "compT E A ST (i := e) = compT E A ST e @ [after E A ST e, ty\<^sub>i' ST E (A \<squnion> \<A> e \<squnion> \<lfloor>{i}\<rfloor>)]" | "compT E A ST (e\<bullet>F{D}) = compT E A ST e @ [after E A ST e]" | "compT E A ST (e\<^sub>1\<bullet>F{D} := e\<^sub>2) = (let ST\<^sub>1 = ty E e\<^sub>1#ST; A\<^sub>1 = A \<squnion> \<A> e\<^sub>1; A\<^sub>2 = A\<^sub>1 \<squnion> \<A> e\<^sub>2 in compT E A ST e\<^sub>1 @ [after E A ST e\<^sub>1] @ compT E A\<^sub>1 ST\<^sub>1 e\<^sub>2 @ [after E A\<^sub>1 ST\<^sub>1 e\<^sub>2] @ [ty\<^sub>i' ST E A\<^sub>2])" | "compT E A ST {i:T; e} = compT (E@[T]) (A\<ominus>i) ST e" | "compT E A ST (e\<^sub>1;;e\<^sub>2) = (let A\<^sub>1 = A \<squnion> \<A> e\<^sub>1 in compT E A ST e\<^sub>1 @ [after E A ST e\<^sub>1, ty\<^sub>i' ST E A\<^sub>1] @ compT E A\<^sub>1 ST e\<^sub>2)" | "compT E A ST (if (e) e\<^sub>1 else e\<^sub>2) = (let A\<^sub>0 = A \<squnion> \<A> e; \<tau> = ty\<^sub>i' ST E A\<^sub>0 in compT E A ST e @ [after E A ST e, \<tau>] @ compT E A\<^sub>0 ST e\<^sub>1 @ [after E A\<^sub>0 ST e\<^sub>1, \<tau>] @ compT E A\<^sub>0 ST e\<^sub>2)" | "compT E A ST (while (e) c) = (let A\<^sub>0 = A \<squnion> \<A> e; A\<^sub>1 = A\<^sub>0 \<squnion> \<A> c; \<tau> = ty\<^sub>i' ST E A\<^sub>0 in compT E A ST e @ [after E A ST e, \<tau>] @ compT E A\<^sub>0 ST c @ [after E A\<^sub>0 ST c, ty\<^sub>i' ST E A\<^sub>1, ty\<^sub>i' ST E A\<^sub>0])" | "compT E A ST (throw e) = compT E A ST e @ [after E A ST e]" | "compT E A ST (e\<bullet>M(es)) = compT E A ST e @ [after E A ST e] @ compTs E (A \<squnion> \<A> e) (ty E e # ST) es" | "compT E A ST (try e\<^sub>1 catch(C i) e\<^sub>2) = compT E A ST e\<^sub>1 @ [after E A ST e\<^sub>1] @ [ty\<^sub>i' (Class C#ST) E A, ty\<^sub>i' ST (E@[Class C]) (A \<squnion> \<lfloor>{i}\<rfloor>)] @ compT (E@[Class C]) (A \<squnion> \<lfloor>{i}\<rfloor>) ST e\<^sub>2" | "compTs E A ST [] = []" | "compTs E A ST (e#es) = compT E A ST e @ [after E A ST e] @ compTs E (A \<squnion> (\<A> e)) (ty E e # ST) es" definition compT\<^sub>a :: "ty list \<Rightarrow> nat hyperset \<Rightarrow> ty list \<Rightarrow> expr\<^sub>1 \<Rightarrow> ty\<^sub>i' list" where "compT\<^sub>a E A ST e = compT E A ST e @ [after E A ST e]" end lemma compE\<^sub>2_not_Nil[simp]: "compE\<^sub>2 e \<noteq> []" (*<*)by(induct e) auto(*>*) lemma (in TC1) compT_sizes[simp]: shows "\<And>E A ST. size(compT E A ST e) = size(compE\<^sub>2 e) - 1" and "\<And>E A ST. size(compTs E A ST es) = size(compEs\<^sub>2 es)" (*<*) apply(induct e and es rule: compE\<^sub>2.induct compEs\<^sub>2.induct) apply(auto split:bop.splits nat_diff_split) done (*>*) lemma (in TC1) [simp]: "\<And>ST E. \<lfloor>\<tau>\<rfloor> \<notin> set (compT E None ST e)" and [simp]: "\<And>ST E. \<lfloor>\<tau>\<rfloor> \<notin> set (compTs E None ST es)" (*<*)by(induct e and es rule: compT.induct compTs.induct) (simp_all add:after_def)(*>*) lemma (in TC0) pair_eq_ty\<^sub>i'_conv: "(\<lfloor>(ST, LT)\<rfloor> = ty\<^sub>i' ST\<^sub>0 E A) = (case A of None \<Rightarrow> False | Some A \<Rightarrow> (ST = ST\<^sub>0 \<and> LT = ty\<^sub>l E A))" (*<*)by(simp add:ty\<^sub>i'_def)(*>*) lemma (in TC0) pair_conv_ty\<^sub>i': "\<lfloor>(ST, ty\<^sub>l E A)\<rfloor> = ty\<^sub>i' ST E \<lfloor>A\<rfloor>" (*<*)by(simp add:ty\<^sub>i'_def)(*>*) (*<*) declare (in TC0) ty\<^sub>i'_antimono [intro!] after_def[simp] pair_conv_ty\<^sub>i'[simp] pair_eq_ty\<^sub>i'_conv[simp] (*>*) lemma (in TC1) compT_LT_prefix: "\<And>E A ST\<^sub>0. \<lbrakk> \<lfloor>(ST,LT)\<rfloor> \<in> set(compT E A ST\<^sub>0 e); \<B> e (size E) \<rbrakk> \<Longrightarrow> P \<turnstile> \<lfloor>(ST,LT)\<rfloor> \<le>' ty\<^sub>i' ST E A" and "\<And>E A ST\<^sub>0. \<lbrakk> \<lfloor>(ST,LT)\<rfloor> \<in> set(compTs E A ST\<^sub>0 es); \<B>s es (size E) \<rbrakk> \<Longrightarrow> P \<turnstile> \<lfloor>(ST,LT)\<rfloor> \<le>' ty\<^sub>i' ST E A" (*<*) proof(induct e and es rule: compT.induct compTs.induct) case FAss thus ?case by(fastforce simp:hyperset_defs elim!:sup_state_opt_trans) next case BinOp thus ?case by(fastforce simp:hyperset_defs elim!:sup_state_opt_trans split:bop.splits) next case Seq thus ?case by(fastforce simp:hyperset_defs elim!:sup_state_opt_trans) next case While thus ?case by(fastforce simp:hyperset_defs elim!:sup_state_opt_trans) next case Cond thus ?case by(fastforce simp:hyperset_defs elim!:sup_state_opt_trans) next case Block thus ?case by(force simp add:hyperset_defs ty\<^sub>i'_def simp del:pair_conv_ty\<^sub>i' elim!:sup_state_opt_trans) next case Call thus ?case by(fastforce simp:hyperset_defs elim!:sup_state_opt_trans) next case Cons_exp thus ?case by(fastforce simp:hyperset_defs elim!:sup_state_opt_trans) next case TryCatch thus ?case by(fastforce simp:hyperset_defs intro!:(* ty\<^sub>i'_env_antimono *) ty\<^sub>i'_incr elim!:sup_state_opt_trans) qed (auto simp:hyperset_defs) declare (in TC0) ty\<^sub>i'_antimono [rule del] after_def[simp del] pair_conv_ty\<^sub>i'[simp del] pair_eq_ty\<^sub>i'_conv[simp del] (*>*) lemma (in TC0) after_in_states: "\<lbrakk> wf_prog p P; P,E \<turnstile>\<^sub>1 e :: T; set E \<subseteq> types P; set ST \<subseteq> types P; size ST + max_stack e \<le> mxs \<rbrakk> \<Longrightarrow> OK (after E A ST e) \<in> states P mxs mxl" (*<*) apply(subgoal_tac "size ST + 1 \<le> mxs") apply(simp add: after_def ty\<^sub>i'_def JVM_states_unfold ty\<^sub>l_in_types) apply(blast intro!:listI WT\<^sub>1_is_type) using max_stack1[of e] apply simp done (*>*) lemma (in TC0) OK_ty\<^sub>i'_in_statesI[simp]: "\<lbrakk> set E \<subseteq> types P; set ST \<subseteq> types P; size ST \<le> mxs \<rbrakk> \<Longrightarrow> OK (ty\<^sub>i' ST E A) \<in> states P mxs mxl" (*<*) apply(simp add:ty\<^sub>i'_def JVM_states_unfold ty\<^sub>l_in_types) apply(blast intro!:listI) done (*>*) lemma is_class_type_aux: "is_class P C \<Longrightarrow> is_type P (Class C)" (*<*)by(simp)(*>*) (*<*) declare is_type_simps[simp del] subsetI[rule del] (*>*) theorem (in TC1) compT_states: assumes wf: "wf_prog p P" shows "\<And>E T A ST. \<lbrakk> P,E \<turnstile>\<^sub>1 e :: T; set E \<subseteq> types P; set ST \<subseteq> types P; size ST + max_stack e \<le> mxs; size E + max_vars e \<le> mxl \<rbrakk> \<Longrightarrow> OK ` set(compT E A ST e) \<subseteq> states P mxs mxl" (*<*)(is "\<And>E T A ST. PROP ?P e E T A ST")(*>*) and "\<And>E Ts A ST. \<lbrakk> P,E \<turnstile>\<^sub>1 es[::]Ts; set E \<subseteq> types P; set ST \<subseteq> types P; size ST + max_stacks es \<le> mxs; size E + max_varss es \<le> mxl \<rbrakk> \<Longrightarrow> OK ` set(compTs E A ST es) \<subseteq> states P mxs mxl" (*<*)(is "\<And>E Ts A ST. PROP ?Ps es E Ts A ST") proof(induct e and es rule: compT.induct compTs.induct) case new thus ?case by(simp) next case (Cast C e) thus ?case by (auto simp:after_in_states[OF wf]) next case Val thus ?case by(simp) next case Var thus ?case by(simp) next case LAss thus ?case by(auto simp:after_in_states[OF wf]) next case FAcc thus ?case by(auto simp:after_in_states[OF wf]) next case FAss thus ?case by(auto simp:image_Un WT\<^sub>1_is_type[OF wf] after_in_states[OF wf]) next case Seq thus ?case by(auto simp:image_Un after_in_states[OF wf]) next case BinOp thus ?case by(auto simp:image_Un WT\<^sub>1_is_type[OF wf] after_in_states[OF wf]) next case Cond thus ?case by(force simp:image_Un WT\<^sub>1_is_type[OF wf] after_in_states[OF wf]) next case While thus ?case by(auto simp:image_Un WT\<^sub>1_is_type[OF wf] after_in_states[OF wf]) next case Block thus ?case by(auto) next case (TryCatch e\<^sub>1 C i e\<^sub>2) moreover have "size ST + 1 \<le> mxs" using TryCatch.prems max_stack1[of e\<^sub>1] by auto ultimately show ?case by(auto simp:image_Un WT\<^sub>1_is_type[OF wf] after_in_states[OF wf] is_class_type_aux) next case Nil_exp thus ?case by simp next case Cons_exp thus ?case by(auto simp:image_Un WT\<^sub>1_is_type[OF wf] after_in_states[OF wf]) next case throw thus ?case by(auto simp: WT\<^sub>1_is_type[OF wf] after_in_states[OF wf]) next case Call thus ?case by(auto simp:image_Un WT\<^sub>1_is_type[OF wf] after_in_states[OF wf]) qed declare is_type_simps[simp] subsetI[intro!] (*>*) definition shift :: "nat \<Rightarrow> ex_table \<Rightarrow> ex_table" where "shift n xt \<equiv> map (\<lambda>(from,to,C,handler,depth). (from+n,to+n,C,handler+n,depth)) xt" lemma [simp]: "shift n [] = []" (*<*)by(simp add:shift_def)(*>*) lemma [simp]: "shift n (xt\<^sub>1 @ xt\<^sub>2) = shift n xt\<^sub>1 @ shift n xt\<^sub>2" (*<*)by(simp add:shift_def)(*>*) lemma [simp]: "shift m (shift n xt) = shift (m+n) xt" (*<*)by(induct xt)(auto simp:shift_def)(*>*) lemma [simp]: "pcs (shift n xt) = {pc+n|pc. pc \<in> pcs xt}" (*<*) apply(auto simp:shift_def pcs_def) apply(rule_tac x = "x-n" in exI) apply (force split:nat_diff_split) done (*>*) lemma shift_compxE\<^sub>2: shows "\<And>pc pc' d. shift pc (compxE\<^sub>2 e pc' d) = compxE\<^sub>2 e (pc' + pc) d" and "\<And>pc pc' d. shift pc (compxEs\<^sub>2 es pc' d) = compxEs\<^sub>2 es (pc' + pc) d" (*<*) apply(induct e and es rule: compxE\<^sub>2.induct compxEs\<^sub>2.induct) apply(auto simp:shift_def ac_simps) done (*>*) lemma compxE\<^sub>2_size_convs[simp]: shows "n \<noteq> 0 \<Longrightarrow> compxE\<^sub>2 e n d = shift n (compxE\<^sub>2 e 0 d)" and "n \<noteq> 0 \<Longrightarrow> compxEs\<^sub>2 es n d = shift n (compxEs\<^sub>2 es 0 d)" (*<*)by(simp_all add:shift_compxE\<^sub>2)(*>*) locale TC2 = TC1 + fixes T\<^sub>r :: ty and mxs :: pc begin definition wt_instrs :: "instr list \<Rightarrow> ex_table \<Rightarrow> ty\<^sub>i' list \<Rightarrow> bool" ("(\<turnstile> _, _ /[::]/ _)" [0,0,51] 50) where "\<turnstile> is,xt [::] \<tau>s \<longleftrightarrow> size is < size \<tau>s \<and> pcs xt \<subseteq> {0..<size is} \<and> (\<forall>pc< size is. P,T\<^sub>r,mxs,size \<tau>s,xt \<turnstile> is!pc,pc :: \<tau>s)" end notation TC2.wt_instrs ("(_,_,_ \<turnstile>/ _, _ /[::]/ _)" [50,50,50,50,50,51] 50) (*<*) lemmas (in TC2) wt_defs = wt_instrs_def wt_instr_def app_def eff_def norm_eff_def (*>*) lemma (in TC2) [simp]: "\<tau>s \<noteq> [] \<Longrightarrow> \<turnstile> [],[] [::] \<tau>s" (*<*) by (simp add: wt_defs) (*>*) lemma [simp]: "eff i P pc et None = []" (*<*)by (simp add: Effect.eff_def)(*>*) (*<*) declare split_comp_eq[simp del] (*>*) lemma wt_instr_appR: "\<lbrakk> P,T,m,mpc,xt \<turnstile> is!pc,pc :: \<tau>s; pc < size is; size is < size \<tau>s; mpc \<le> size \<tau>s; mpc \<le> mpc' \<rbrakk> \<Longrightarrow> P,T,m,mpc',xt \<turnstile> is!pc,pc :: \<tau>s@\<tau>s'" (*<*)by (fastforce simp:wt_instr_def app_def)(*>*) lemma relevant_entries_shift [simp]: "relevant_entries P i (pc+n) (shift n xt) = shift n (relevant_entries P i pc xt)" (*<*) apply (induct xt) apply (unfold relevant_entries_def shift_def) apply simp apply (auto simp add: is_relevant_entry_def) done (*>*) lemma [simp]: "xcpt_eff i P (pc+n) \<tau> (shift n xt) = map (\<lambda>(pc,\<tau>). (pc + n, \<tau>)) (xcpt_eff i P pc \<tau> xt)" (*<*) apply(simp add: xcpt_eff_def) apply(cases \<tau>) apply(auto simp add: shift_def) done (*>*) lemma [simp]: "app\<^sub>i (i, P, pc, m, T, \<tau>) \<Longrightarrow> eff i P (pc+n) (shift n xt) (Some \<tau>) = map (\<lambda>(pc,\<tau>). (pc+n,\<tau>)) (eff i P pc xt (Some \<tau>))" (*<*) apply(simp add:eff_def norm_eff_def) apply(cases "i",auto) done (*>*) lemma [simp]: "xcpt_app i P (pc+n) mxs (shift n xt) \<tau> = xcpt_app i P pc mxs xt \<tau>" (*<*)by (simp add: xcpt_app_def) (auto simp add: shift_def)(*>*) lemma wt_instr_appL: "\<lbrakk> P,T,m,mpc,xt \<turnstile> i,pc :: \<tau>s; pc < size \<tau>s; mpc \<le> size \<tau>s \<rbrakk> \<Longrightarrow> P,T,m,mpc + size \<tau>s',shift (size \<tau>s') xt \<turnstile> i,pc+size \<tau>s' :: \<tau>s'@\<tau>s" (*<*) apply(auto simp:wt_instr_def app_def) prefer 2 apply(fast) prefer 2 apply(fast) apply(cases "i",auto) done (*>*) lemma wt_instr_Cons: "\<lbrakk> P,T,m,mpc - 1,[] \<turnstile> i,pc - 1 :: \<tau>s; 0 < pc; 0 < mpc; pc < size \<tau>s + 1; mpc \<le> size \<tau>s + 1 \<rbrakk> \<Longrightarrow> P,T,m,mpc,[] \<turnstile> i,pc :: \<tau>#\<tau>s" (*<*) apply(drule wt_instr_appL[where \<tau>s' = "[\<tau>]"]) apply arith apply arith apply (simp split:nat_diff_split_asm) done (*>*) lemma wt_instr_append: "\<lbrakk> P,T,m,mpc - size \<tau>s',[] \<turnstile> i,pc - size \<tau>s' :: \<tau>s; size \<tau>s' \<le> pc; size \<tau>s' \<le> mpc; pc < size \<tau>s + size \<tau>s'; mpc \<le> size \<tau>s + size \<tau>s' \<rbrakk> \<Longrightarrow> P,T,m,mpc,[] \<turnstile> i,pc :: \<tau>s'@\<tau>s" (*<*) apply(drule wt_instr_appL[where \<tau>s' = \<tau>s']) apply arith apply arith apply (simp split:nat_diff_split_asm) done (*>*) lemma xcpt_app_pcs: "pc \<notin> pcs xt \<Longrightarrow> xcpt_app i P pc mxs xt \<tau>" (*<*) by (auto simp add: xcpt_app_def relevant_entries_def is_relevant_entry_def pcs_def) (*>*) lemma xcpt_eff_pcs: "pc \<notin> pcs xt \<Longrightarrow> xcpt_eff i P pc \<tau> xt = []" (*<*) by (cases \<tau>) (auto simp add: is_relevant_entry_def xcpt_eff_def relevant_entries_def pcs_def intro!: filter_False) (*>*) lemma pcs_shift: "pc < n \<Longrightarrow> pc \<notin> pcs (shift n xt)" (*<*)by (auto simp add: shift_def pcs_def)(*>*) lemma wt_instr_appRx: "\<lbrakk> P,T,m,mpc,xt \<turnstile> is!pc,pc :: \<tau>s; pc < size is; size is < size \<tau>s; mpc \<le> size \<tau>s \<rbrakk> \<Longrightarrow> P,T,m,mpc,xt @ shift (size is) xt' \<turnstile> is!pc,pc :: \<tau>s" (*<*)by (auto simp:wt_instr_def eff_def app_def xcpt_app_pcs xcpt_eff_pcs)(*>*) lemma wt_instr_appLx: "\<lbrakk> P,T,m,mpc,xt \<turnstile> i,pc :: \<tau>s; pc \<notin> pcs xt' \<rbrakk> \<Longrightarrow> P,T,m,mpc,xt'@xt \<turnstile> i,pc :: \<tau>s" (*<*)by (auto simp:wt_instr_def app_def eff_def xcpt_app_pcs xcpt_eff_pcs)(*>*) lemma (in TC2) wt_instrs_extR: "\<turnstile> is,xt [::] \<tau>s \<Longrightarrow> \<turnstile> is,xt [::] \<tau>s @ \<tau>s'" (*<*)by(auto simp add:wt_instrs_def wt_instr_appR)(*>*) lemma (in TC2) wt_instrs_ext: "\<lbrakk> \<turnstile> is\<^sub>1,xt\<^sub>1 [::] \<tau>s\<^sub>1@\<tau>s\<^sub>2; \<turnstile> is\<^sub>2,xt\<^sub>2 [::] \<tau>s\<^sub>2; size \<tau>s\<^sub>1 = size is\<^sub>1 \<rbrakk> \<Longrightarrow> \<turnstile> is\<^sub>1@is\<^sub>2, xt\<^sub>1 @ shift (size is\<^sub>1) xt\<^sub>2 [::] \<tau>s\<^sub>1@\<tau>s\<^sub>2" (*<*) apply(clarsimp simp:wt_instrs_def) apply(rule conjI, fastforce) apply(rule conjI, fastforce) apply clarsimp apply(rule conjI, fastforce simp:wt_instr_appRx) apply clarsimp apply(erule_tac x = "pc - size is\<^sub>1" in allE)+ apply(thin_tac "P \<longrightarrow> Q" for P Q) apply(erule impE, arith) apply(drule_tac \<tau>s' = "\<tau>s\<^sub>1" in wt_instr_appL) apply arith apply simp apply(fastforce simp add:add.commute intro!: wt_instr_appLx) done (*>*) corollary (in TC2) wt_instrs_ext2: "\<lbrakk> \<turnstile> is\<^sub>2,xt\<^sub>2 [::] \<tau>s\<^sub>2; \<turnstile> is\<^sub>1,xt\<^sub>1 [::] \<tau>s\<^sub>1@\<tau>s\<^sub>2; size \<tau>s\<^sub>1 = size is\<^sub>1 \<rbrakk> \<Longrightarrow> \<turnstile> is\<^sub>1@is\<^sub>2, xt\<^sub>1 @ shift (size is\<^sub>1) xt\<^sub>2 [::] \<tau>s\<^sub>1@\<tau>s\<^sub>2" (*<*)by(rule wt_instrs_ext)(*>*) corollary (in TC2) wt_instrs_ext_prefix [trans]: "\<lbrakk> \<turnstile> is\<^sub>1,xt\<^sub>1 [::] \<tau>s\<^sub>1@\<tau>s\<^sub>2; \<turnstile> is\<^sub>2,xt\<^sub>2 [::] \<tau>s\<^sub>3; size \<tau>s\<^sub>1 = size is\<^sub>1; prefix \<tau>s\<^sub>3 \<tau>s\<^sub>2 \<rbrakk> \<Longrightarrow> \<turnstile> is\<^sub>1@is\<^sub>2, xt\<^sub>1 @ shift (size is\<^sub>1) xt\<^sub>2 [::] \<tau>s\<^sub>1@\<tau>s\<^sub>2" (*<*)by(bestsimp simp:prefix_def elim: wt_instrs_ext dest:wt_instrs_extR)(*>*) corollary (in TC2) wt_instrs_app: assumes is\<^sub>1: "\<turnstile> is\<^sub>1,xt\<^sub>1 [::] \<tau>s\<^sub>1@[\<tau>]" assumes is\<^sub>2: "\<turnstile> is\<^sub>2,xt\<^sub>2 [::] \<tau>#\<tau>s\<^sub>2" assumes s: "size \<tau>s\<^sub>1 = size is\<^sub>1" shows "\<turnstile> is\<^sub>1@is\<^sub>2, xt\<^sub>1@shift (size is\<^sub>1) xt\<^sub>2 [::] \<tau>s\<^sub>1@\<tau>#\<tau>s\<^sub>2" (*<*) proof - from is\<^sub>1 have "\<turnstile> is\<^sub>1,xt\<^sub>1 [::] (\<tau>s\<^sub>1@[\<tau>])@\<tau>s\<^sub>2" by (rule wt_instrs_extR) hence "\<turnstile> is\<^sub>1,xt\<^sub>1 [::] \<tau>s\<^sub>1@\<tau>#\<tau>s\<^sub>2" by simp from this is\<^sub>2 s show ?thesis by (rule wt_instrs_ext) qed (*>*) corollary (in TC2) wt_instrs_app_last[trans]: "\<lbrakk> \<turnstile> is\<^sub>2,xt\<^sub>2 [::] \<tau>#\<tau>s\<^sub>2; \<turnstile> is\<^sub>1,xt\<^sub>1 [::] \<tau>s\<^sub>1; last \<tau>s\<^sub>1 = \<tau>; size \<tau>s\<^sub>1 = size is\<^sub>1+1 \<rbrakk> \<Longrightarrow> \<turnstile> is\<^sub>1@is\<^sub>2, xt\<^sub>1@shift (size is\<^sub>1) xt\<^sub>2 [::] \<tau>s\<^sub>1@\<tau>s\<^sub>2" (*<*) apply(cases \<tau>s\<^sub>1 rule:rev_cases) apply simp apply(simp add:wt_instrs_app) done (*>*) corollary (in TC2) wt_instrs_append_last[trans]: "\<lbrakk> \<turnstile> is,xt [::] \<tau>s; P,T\<^sub>r,mxs,mpc,[] \<turnstile> i,pc :: \<tau>s; pc = size is; mpc = size \<tau>s; size is + 1 < size \<tau>s \<rbrakk> \<Longrightarrow> \<turnstile> is@[i],xt [::] \<tau>s" (*<*) apply(clarsimp simp add:wt_instrs_def) apply(rule conjI, fastforce) apply(fastforce intro!:wt_instr_appLx[where xt = "[]",simplified] dest!:less_antisym) done (*>*) corollary (in TC2) wt_instrs_app2: "\<lbrakk> \<turnstile> is\<^sub>2,xt\<^sub>2 [::] \<tau>'#\<tau>s\<^sub>2; \<turnstile> is\<^sub>1,xt\<^sub>1 [::] \<tau>#\<tau>s\<^sub>1@[\<tau>']; xt' = xt\<^sub>1 @ shift (size is\<^sub>1) xt\<^sub>2; size \<tau>s\<^sub>1+1 = size is\<^sub>1 \<rbrakk> \<Longrightarrow> \<turnstile> is\<^sub>1@is\<^sub>2,xt' [::] \<tau>#\<tau>s\<^sub>1@\<tau>'#\<tau>s\<^sub>2" (*<*)using wt_instrs_app[where ?\<tau>s\<^sub>1.0 = "\<tau> # \<tau>s\<^sub>1"] by simp (*>*) corollary (in TC2) wt_instrs_app2_simp[trans,simp]: "\<lbrakk> \<turnstile> is\<^sub>2,xt\<^sub>2 [::] \<tau>'#\<tau>s\<^sub>2; \<turnstile> is\<^sub>1,xt\<^sub>1 [::] \<tau>#\<tau>s\<^sub>1@[\<tau>']; size \<tau>s\<^sub>1+1 = size is\<^sub>1 \<rbrakk> \<Longrightarrow> \<turnstile> is\<^sub>1@is\<^sub>2, xt\<^sub>1@shift (size is\<^sub>1) xt\<^sub>2 [::] \<tau>#\<tau>s\<^sub>1@\<tau>'#\<tau>s\<^sub>2" (*<*)using wt_instrs_app[where ?\<tau>s\<^sub>1.0 = "\<tau> # \<tau>s\<^sub>1"] by simp(*>*) corollary (in TC2) wt_instrs_Cons[simp]: "\<lbrakk> \<tau>s \<noteq> []; \<turnstile> [i],[] [::] [\<tau>,\<tau>']; \<turnstile> is,xt [::] \<tau>'#\<tau>s \<rbrakk> \<Longrightarrow> \<turnstile> i#is,shift 1 xt [::] \<tau>#\<tau>'#\<tau>s" (*<*) using wt_instrs_app2[where ?is\<^sub>1.0 = "[i]" and ?\<tau>s\<^sub>1.0 = "[]" and ?is\<^sub>2.0 = "is" and ?xt\<^sub>1.0 = "[]"] by simp corollary (in TC2) wt_instrs_Cons2[trans]: assumes \<tau>s: "\<turnstile> is,xt [::] \<tau>s" assumes i: "P,T\<^sub>r,mxs,mpc,[] \<turnstile> i,0 :: \<tau>#\<tau>s" assumes mpc: "mpc = size \<tau>s + 1" shows "\<turnstile> i#is,shift 1 xt [::] \<tau>#\<tau>s" (*<*) proof - from \<tau>s have "\<tau>s \<noteq> []" by (auto simp: wt_instrs_def) with mpc i have "\<turnstile> [i],[] [::] [\<tau>]@\<tau>s" by (simp add: wt_instrs_def) with \<tau>s show ?thesis by (fastforce dest: wt_instrs_ext) qed (*>*) lemma (in TC2) wt_instrs_last_incr[trans]: "\<lbrakk> \<turnstile> is,xt [::] \<tau>s@[\<tau>]; P \<turnstile> \<tau> \<le>' \<tau>' \<rbrakk> \<Longrightarrow> \<turnstile> is,xt [::] \<tau>s@[\<tau>']" (*<*) apply(clarsimp simp add:wt_instrs_def wt_instr_def) apply(rule conjI) apply(fastforce) apply(clarsimp) apply(rename_tac pc' tau') apply(erule allE, erule (1) impE) apply(clarsimp) apply(drule (1) bspec) apply(clarsimp) apply(subgoal_tac "pc' = size \<tau>s") prefer 2 apply(clarsimp simp:app_def) apply(drule (1) bspec) apply(clarsimp) apply(auto elim!:sup_state_opt_trans) done (*>*) lemma [iff]: "xcpt_app i P pc mxs [] \<tau>" (*<*)by (simp add: xcpt_app_def relevant_entries_def)(*>*) lemma [simp]: "xcpt_eff i P pc \<tau> [] = []" (*<*)by (simp add: xcpt_eff_def relevant_entries_def)(*>*) lemma (in TC2) wt_New: "\<lbrakk> is_class P C; size ST < mxs \<rbrakk> \<Longrightarrow> \<turnstile> [New C],[] [::] [ty\<^sub>i' ST E A, ty\<^sub>i' (Class C#ST) E A]" (*<*)by(simp add:wt_defs ty\<^sub>i'_def)(*>*) lemma (in TC2) wt_Cast: "is_class P C \<Longrightarrow> \<turnstile> [Checkcast C],[] [::] [ty\<^sub>i' (Class D # ST) E A, ty\<^sub>i' (Class C # ST) E A]" (*<*)by(simp add: ty\<^sub>i'_def wt_defs)(*>*) lemma (in TC2) wt_Push: "\<lbrakk> size ST < mxs; typeof v = Some T \<rbrakk> \<Longrightarrow> \<turnstile> [Push v],[] [::] [ty\<^sub>i' ST E A, ty\<^sub>i' (T#ST) E A]" (*<*)by(simp add: ty\<^sub>i'_def wt_defs)(*>*) lemma (in TC2) wt_Pop: "\<turnstile> [Pop],[] [::] (ty\<^sub>i' (T#ST) E A # ty\<^sub>i' ST E A # \<tau>s)" (*<*)by(simp add: ty\<^sub>i'_def wt_defs)(*>*) lemma (in TC2) wt_CmpEq: "\<lbrakk> P \<turnstile> T\<^sub>1 \<le> T\<^sub>2 \<or> P \<turnstile> T\<^sub>2 \<le> T\<^sub>1\<rbrakk> \<Longrightarrow> \<turnstile> [CmpEq],[] [::] [ty\<^sub>i' (T\<^sub>2 # T\<^sub>1 # ST) E A, ty\<^sub>i' (Boolean # ST) E A]" (*<*) by(auto simp:ty\<^sub>i'_def wt_defs elim!: refTE not_refTE) (*>*) lemma (in TC2) wt_IAdd: "\<turnstile> [IAdd],[] [::] [ty\<^sub>i' (Integer#Integer#ST) E A, ty\<^sub>i' (Integer#ST) E A]" (*<*)by(simp add:ty\<^sub>i'_def wt_defs)(*>*) lemma (in TC2) wt_Load: "\<lbrakk> size ST < mxs; size E \<le> mxl; i \<in>\<in> A; i < size E \<rbrakk> \<Longrightarrow> \<turnstile> [Load i],[] [::] [ty\<^sub>i' ST E A, ty\<^sub>i' (E!i # ST) E A]" (*<*)by(auto simp add:ty\<^sub>i'_def wt_defs ty\<^sub>l_def hyperset_defs)(*>*) lemma (in TC2) wt_Store: "\<lbrakk> P \<turnstile> T \<le> E!i; i < size E; size E \<le> mxl \<rbrakk> \<Longrightarrow> \<turnstile> [Store i],[] [::] [ty\<^sub>i' (T#ST) E A, ty\<^sub>i' ST E (\<lfloor>{i}\<rfloor> \<squnion> A)]" (*<*) by(auto simp:hyperset_defs nth_list_update ty\<^sub>i'_def wt_defs ty\<^sub>l_def intro:list_all2_all_nthI) (*>*) lemma (in TC2) wt_Get: "\<lbrakk> P \<turnstile> C sees F:T in D \<rbrakk> \<Longrightarrow> \<turnstile> [Getfield F D],[] [::] [ty\<^sub>i' (Class C # ST) E A, ty\<^sub>i' (T # ST) E A]" (*<*)by(auto simp: ty\<^sub>i'_def wt_defs dest: sees_field_idemp sees_field_decl_above)(*>*) lemma (in TC2) wt_Put: "\<lbrakk> P \<turnstile> C sees F:T in D; P \<turnstile> T' \<le> T \<rbrakk> \<Longrightarrow> \<turnstile> [Putfield F D],[] [::] [ty\<^sub>i' (T' # Class C # ST) E A, ty\<^sub>i' ST E A]" (*<*)by(auto intro: sees_field_idemp sees_field_decl_above simp: ty\<^sub>i'_def wt_defs)(*>*) lemma (in TC2) wt_Throw: "\<turnstile> [Throw],[] [::] [ty\<^sub>i' (Class C # ST) E A, \<tau>']" (*<*)by(auto simp: ty\<^sub>i'_def wt_defs)(*>*) lemma (in TC2) wt_IfFalse: "\<lbrakk> 2 \<le> i; nat i < size \<tau>s + 2; P \<turnstile> ty\<^sub>i' ST E A \<le>' \<tau>s ! nat(i - 2) \<rbrakk> \<Longrightarrow> \<turnstile> [IfFalse i],[] [::] ty\<^sub>i' (Boolean # ST) E A # ty\<^sub>i' ST E A # \<tau>s" (*<*) by(simp add: ty\<^sub>i'_def wt_defs eval_nat_numeral nat_diff_distrib) (*>*) lemma wt_Goto: "\<lbrakk> 0 \<le> int pc + i; nat (int pc + i) < size \<tau>s; size \<tau>s \<le> mpc; P \<turnstile> \<tau>s!pc \<le>' \<tau>s ! nat (int pc + i) \<rbrakk> \<Longrightarrow> P,T,mxs,mpc,[] \<turnstile> Goto i,pc :: \<tau>s" (*<*)by(clarsimp simp add: TC2.wt_defs)(*>*) lemma (in TC2) wt_Invoke: "\<lbrakk> size es = size Ts'; P \<turnstile> C sees M: Ts\<rightarrow>T = m in D; P \<turnstile> Ts' [\<le>] Ts \<rbrakk> \<Longrightarrow> \<turnstile> [Invoke M (size es)],[] [::] [ty\<^sub>i' (rev Ts' @ Class C # ST) E A, ty\<^sub>i' (T#ST) E A]" (*<*)by(fastforce simp add: ty\<^sub>i'_def wt_defs)(*>*) corollary (in TC2) wt_instrs_app3[simp]: "\<lbrakk> \<turnstile> is\<^sub>2,[] [::] (\<tau>' # \<tau>s\<^sub>2); \<turnstile> is\<^sub>1,xt\<^sub>1 [::] \<tau> # \<tau>s\<^sub>1 @ [\<tau>']; size \<tau>s\<^sub>1+1 = size is\<^sub>1\<rbrakk> \<Longrightarrow> \<turnstile> (is\<^sub>1 @ is\<^sub>2),xt\<^sub>1 [::] \<tau> # \<tau>s\<^sub>1 @ \<tau>' # \<tau>s\<^sub>2" (*<*)using wt_instrs_app2[where ?xt\<^sub>2.0 = "[]"] by (simp add:shift_def)(*>*) corollary (in TC2) wt_instrs_Cons3[simp]: "\<lbrakk> \<tau>s \<noteq> []; \<turnstile> [i],[] [::] [\<tau>,\<tau>']; \<turnstile> is,[] [::] \<tau>'#\<tau>s \<rbrakk> \<Longrightarrow> \<turnstile> (i # is),[] [::] \<tau> # \<tau>' # \<tau>s" (*<*) using wt_instrs_Cons[where ?xt = "[]"] by (simp add:shift_def) (*<*) declare nth_append[simp del] declare [[simproc del: list_to_set_comprehension]] (*>*) lemma (in TC2) wt_instrs_xapp[trans]: "\<lbrakk> \<turnstile> is\<^sub>1 @ is\<^sub>2, xt [::] \<tau>s\<^sub>1 @ ty\<^sub>i' (Class C # ST) E A # \<tau>s\<^sub>2; \<forall>\<tau> \<in> set \<tau>s\<^sub>1. \<forall>ST' LT'. \<tau> = Some(ST',LT') \<longrightarrow> size ST \<le> size ST' \<and> P \<turnstile> Some (drop (size ST' - size ST) ST',LT') \<le>' ty\<^sub>i' ST E A; size is\<^sub>1 = size \<tau>s\<^sub>1; is_class P C; size ST < mxs \<rbrakk> \<Longrightarrow> \<turnstile> is\<^sub>1 @ is\<^sub>2, xt @ [(0,size is\<^sub>1 - 1,C,size is\<^sub>1,size ST)] [::] \<tau>s\<^sub>1 @ ty\<^sub>i' (Class C # ST) E A # \<tau>s\<^sub>2" (*<*) apply(simp add:wt_instrs_def) apply(rule conjI) apply(clarsimp) apply arith apply clarsimp apply(erule allE, erule (1) impE) apply(clarsimp simp add: wt_instr_def app_def eff_def) apply(rule conjI) apply (thin_tac "\<forall>x\<in> A \<union> B. P x" for A B P) apply (thin_tac "\<forall>x\<in> A \<union> B. P x" for A B P) apply (clarsimp simp add: xcpt_app_def relevant_entries_def) apply (simp add: nth_append is_relevant_entry_def split!: if_splits) apply (drule_tac x="\<tau>s\<^sub>1!pc" in bspec) apply (blast intro: nth_mem) apply fastforce apply (rule conjI) apply clarsimp apply (erule disjE, blast) apply (erule disjE, blast) apply (clarsimp simp add: xcpt_eff_def relevant_entries_def split: if_split_asm) apply clarsimp apply (erule disjE, blast) apply (erule disjE, blast) apply (clarsimp simp add: xcpt_eff_def relevant_entries_def split: if_split_asm) apply (simp add: nth_append is_relevant_entry_def split: if_split_asm) apply (drule_tac x = "\<tau>s\<^sub>1!pc" in bspec) apply (blast intro: nth_mem) apply (fastforce simp add: ty\<^sub>i'_def) done declare [[simproc add: list_to_set_comprehension]] declare nth_append[simp] (*>*) lemma drop_Cons_Suc: "\<And>xs. drop n xs = y#ys \<Longrightarrow> drop (Suc n) xs = ys" apply (induct n) apply simp apply (simp add: drop_Suc) done lemma drop_mess: "\<lbrakk>Suc (length xs\<^sub>0) \<le> length xs; drop (length xs - Suc (length xs\<^sub>0)) xs = x # xs\<^sub>0\<rbrakk> \<Longrightarrow> drop (length xs - length xs\<^sub>0) xs = xs\<^sub>0" apply (cases xs) apply simp apply (simp add: Suc_diff_le) apply (case_tac "length list - length xs\<^sub>0") apply simp apply (simp add: drop_Cons_Suc) done (*<*) declare (in TC0) after_def[simp] pair_eq_ty\<^sub>i'_conv[simp] (*>*) lemma (in TC1) compT_ST_prefix: "\<And>E A ST\<^sub>0. \<lfloor>(ST,LT)\<rfloor> \<in> set(compT E A ST\<^sub>0 e) \<Longrightarrow> size ST\<^sub>0 \<le> size ST \<and> drop (size ST - size ST\<^sub>0) ST = ST\<^sub>0" and "\<And>E A ST\<^sub>0. \<lfloor>(ST,LT)\<rfloor> \<in> set(compTs E A ST\<^sub>0 es) \<Longrightarrow> size ST\<^sub>0 \<le> size ST \<and> drop (size ST - size ST\<^sub>0) ST = ST\<^sub>0" (*<*) proof(induct e and es rule: compT.induct compTs.induct) case (FAss e\<^sub>1 F D e\<^sub>2) moreover { let ?ST\<^sub>0 = "ty E e\<^sub>1 # ST\<^sub>0" fix A assume "\<lfloor>(ST, LT)\<rfloor> \<in> set (compT E A ?ST\<^sub>0 e\<^sub>2)" with FAss have "length ?ST\<^sub>0 \<le> length ST \<and> drop (size ST - size ?ST\<^sub>0) ST = ?ST\<^sub>0" by blast hence ?case by (clarsimp simp add: drop_mess) } ultimately show ?case by auto next case TryCatch thus ?case by auto next case Block thus ?case by auto next case Seq thus ?case by auto next case While thus ?case by auto next case Cond thus ?case by auto next case (Call e M es) moreover { let ?ST\<^sub>0 = "ty E e # ST\<^sub>0" fix A assume "\<lfloor>(ST, LT)\<rfloor> \<in> set (compTs E A ?ST\<^sub>0 es)" with Call have "length ?ST\<^sub>0 \<le> length ST \<and> drop (size ST - size ?ST\<^sub>0) ST = ?ST\<^sub>0" by blast hence ?case by (clarsimp simp add: drop_mess) } ultimately show ?case by auto next case (Cons_exp e es) moreover { let ?ST\<^sub>0 = "ty E e # ST\<^sub>0" fix A assume "\<lfloor>(ST, LT)\<rfloor> \<in> set (compTs E A ?ST\<^sub>0 es)" with Cons_exp have "length ?ST\<^sub>0 \<le> length ST \<and> drop (size ST - size ?ST\<^sub>0) ST = ?ST\<^sub>0" by blast hence ?case by (clarsimp simp add: drop_mess) } ultimately show ?case by auto next case (BinOp e\<^sub>1 bop e\<^sub>2) moreover { let ?ST\<^sub>0 = "ty E e\<^sub>1 # ST\<^sub>0" fix A assume "\<lfloor>(ST, LT)\<rfloor> \<in> set (compT E A ?ST\<^sub>0 e\<^sub>2)" with BinOp have "length ?ST\<^sub>0 \<le> length ST \<and> drop (size ST - size ?ST\<^sub>0) ST = ?ST\<^sub>0" by blast hence ?case by (clarsimp simp add: drop_mess) } ultimately show ?case by auto next case new thus ?case by auto next case Val thus ?case by auto next case Cast thus ?case by auto next case Var thus ?case by auto next case LAss thus ?case by auto next case throw thus ?case by auto next case FAcc thus ?case by auto next case Nil_exp thus ?case by auto qed declare (in TC0) after_def[simp del] pair_eq_ty\<^sub>i'_conv[simp del] (*>*) (* FIXME *) lemma fun_of_simp [simp]: "fun_of S x y = ((x,y) \<in> S)" (*<*) by (simp add: fun_of_def)(*>*) theorem (in TC2) compT_wt_instrs: "\<And>E T A ST. \<lbrakk> P,E \<turnstile>\<^sub>1 e :: T; \<D> e A; \<B> e (size E); size ST + max_stack e \<le> mxs; size E + max_vars e \<le> mxl \<rbrakk> \<Longrightarrow> \<turnstile> compE\<^sub>2 e, compxE\<^sub>2 e 0 (size ST) [::] ty\<^sub>i' ST E A # compT E A ST e @ [after E A ST e]" (*<*)(is "\<And>E T A ST. PROP ?P e E T A ST")(*>*) and "\<And>E Ts A ST. \<lbrakk> P,E \<turnstile>\<^sub>1 es[::]Ts; \<D>s es A; \<B>s es (size E); size ST + max_stacks es \<le> mxs; size E + max_varss es \<le> mxl \<rbrakk> \<Longrightarrow> let \<tau>s = ty\<^sub>i' ST E A # compTs E A ST es in \<turnstile> compEs\<^sub>2 es,compxEs\<^sub>2 es 0 (size ST) [::] \<tau>s \<and> last \<tau>s = ty\<^sub>i' (rev Ts @ ST) E (A \<squnion> \<A>s es)" (*<*) (is "\<And>E Ts A ST. PROP ?Ps es E Ts A ST") proof(induct e and es rule: compxE\<^sub>2.induct compxEs\<^sub>2.induct) case (TryCatch e\<^sub>1 C i e\<^sub>2) hence [simp]: "i = size E" by simp have wt\<^sub>1: "P,E \<turnstile>\<^sub>1 e\<^sub>1 :: T" and wt\<^sub>2: "P,E@[Class C] \<turnstile>\<^sub>1 e\<^sub>2 :: T" and "class": "is_class P C" using TryCatch by auto let ?A\<^sub>1 = "A \<squnion> \<A> e\<^sub>1" let ?A\<^sub>i = "A \<squnion> \<lfloor>{i}\<rfloor>" let ?E\<^sub>i = "E @ [Class C]" let ?\<tau> = "ty\<^sub>i' ST E A" let ?\<tau>s\<^sub>1 = "compT E A ST e\<^sub>1" let ?\<tau>\<^sub>1 = "ty\<^sub>i' (T#ST) E ?A\<^sub>1" let ?\<tau>\<^sub>2 = "ty\<^sub>i' (Class C#ST) E A" let ?\<tau>\<^sub>3 = "ty\<^sub>i' ST ?E\<^sub>i ?A\<^sub>i" let ?\<tau>s\<^sub>2 = "compT ?E\<^sub>i ?A\<^sub>i ST e\<^sub>2" let ?\<tau>\<^sub>2' = "ty\<^sub>i' (T#ST) ?E\<^sub>i (?A\<^sub>i \<squnion> \<A> e\<^sub>2)" let ?\<tau>' = "ty\<^sub>i' (T#ST) E (A \<squnion> \<A> e\<^sub>1 \<sqinter> (\<A> e\<^sub>2 \<ominus> i))" let ?go = "Goto (int(size(compE\<^sub>2 e\<^sub>2)) + 2)" have "PROP ?P e\<^sub>2 ?E\<^sub>i T ?A\<^sub>i ST" by fact hence "\<turnstile> compE\<^sub>2 e\<^sub>2,compxE\<^sub>2 e\<^sub>2 0 (size ST) [::] (?\<tau>\<^sub>3 # ?\<tau>s\<^sub>2) @ [?\<tau>\<^sub>2']" using TryCatch.prems by(auto simp:after_def) also have "?A\<^sub>i \<squnion> \<A> e\<^sub>2 = (A \<squnion> \<A> e\<^sub>2) \<squnion> \<lfloor>{size E}\<rfloor>" by(fastforce simp:hyperset_defs) also have "P \<turnstile> ty\<^sub>i' (T#ST) ?E\<^sub>i \<dots> \<le>' ty\<^sub>i' (T#ST) E (A \<squnion> \<A> e\<^sub>2)" by(simp add:hyperset_defs ty\<^sub>l_incr ty\<^sub>i'_def) also have "P \<turnstile> \<dots> \<le>' ty\<^sub>i' (T#ST) E (A \<squnion> \<A> e\<^sub>1 \<sqinter> (\<A> e\<^sub>2 \<ominus> i))" by(auto intro!: ty\<^sub>l_antimono simp:hyperset_defs ty\<^sub>i'_def) also have "(?\<tau>\<^sub>3 # ?\<tau>s\<^sub>2) @ [?\<tau>'] = ?\<tau>\<^sub>3 # ?\<tau>s\<^sub>2 @ [?\<tau>']" by simp also have "\<turnstile> [Store i],[] [::] ?\<tau>\<^sub>2 # [] @ [?\<tau>\<^sub>3]" using TryCatch.prems by(auto simp:nth_list_update wt_defs ty\<^sub>i'_def ty\<^sub>l_def list_all2_conv_all_nth hyperset_defs) also have "[] @ (?\<tau>\<^sub>3 # ?\<tau>s\<^sub>2 @ [?\<tau>']) = (?\<tau>\<^sub>3 # ?\<tau>s\<^sub>2 @ [?\<tau>'])" by simp also have "P,T\<^sub>r,mxs,size(compE\<^sub>2 e\<^sub>2)+3,[] \<turnstile> ?go,0 :: ?\<tau>\<^sub>1#?\<tau>\<^sub>2#?\<tau>\<^sub>3#?\<tau>s\<^sub>2 @ [?\<tau>']" by (auto simp: hyperset_defs ty\<^sub>i'_def wt_defs nth_Cons nat_add_distrib fun_of_def intro: ty\<^sub>l_antimono list_all2_refl split:nat.split) also have "\<turnstile> compE\<^sub>2 e\<^sub>1,compxE\<^sub>2 e\<^sub>1 0 (size ST) [::] ?\<tau> # ?\<tau>s\<^sub>1 @ [?\<tau>\<^sub>1]" using TryCatch by(auto simp:after_def) also have "?\<tau> # ?\<tau>s\<^sub>1 @ ?\<tau>\<^sub>1 # ?\<tau>\<^sub>2 # ?\<tau>\<^sub>3 # ?\<tau>s\<^sub>2 @ [?\<tau>'] = (?\<tau> # ?\<tau>s\<^sub>1 @ [?\<tau>\<^sub>1]) @ ?\<tau>\<^sub>2 # ?\<tau>\<^sub>3 # ?\<tau>s\<^sub>2 @ [?\<tau>']" by simp also have "compE\<^sub>2 e\<^sub>1 @ ?go # [Store i] @ compE\<^sub>2 e\<^sub>2 = (compE\<^sub>2 e\<^sub>1 @ [?go]) @ (Store i # compE\<^sub>2 e\<^sub>2)" by simp also let "?Q \<tau>" = "\<forall>ST' LT'. \<tau> = \<lfloor>(ST', LT')\<rfloor> \<longrightarrow> size ST \<le> size ST' \<and> P \<turnstile> Some (drop (size ST' - size ST) ST',LT') \<le>' ty\<^sub>i' ST E A" { have "?Q (ty\<^sub>i' ST E A)" by (clarsimp simp add: ty\<^sub>i'_def) moreover have "?Q (ty\<^sub>i' (T # ST) E ?A\<^sub>1)" by (fastforce simp add: ty\<^sub>i'_def hyperset_defs intro!: ty\<^sub>l_antimono) moreover have "\<And>\<tau>. \<tau> \<in> set (compT E A ST e\<^sub>1) \<Longrightarrow> ?Q \<tau>" using TryCatch.prems by clarsimp (frule compT_ST_prefix, fastforce dest!: compT_LT_prefix simp add: ty\<^sub>i'_def) ultimately have "\<forall>\<tau>\<in>set (ty\<^sub>i' ST E A # compT E A ST e\<^sub>1 @ [ty\<^sub>i' (T # ST) E ?A\<^sub>1]). ?Q \<tau>" by auto } also from TryCatch.prems max_stack1[of e\<^sub>1] have "size ST + 1 \<le> mxs" by auto ultimately show ?case using wt\<^sub>1 wt\<^sub>2 TryCatch.prems "class" by (simp add:after_def) next case new thus ?case by(auto simp add:after_def wt_New) next case (BinOp e\<^sub>1 bop e\<^sub>2) let ?op = "case bop of Eq \<Rightarrow> [CmpEq] | Add \<Rightarrow> [IAdd]" have T: "P,E \<turnstile>\<^sub>1 e\<^sub>1 \<guillemotleft>bop\<guillemotright> e\<^sub>2 :: T" by fact then obtain T\<^sub>1 T\<^sub>2 where T\<^sub>1: "P,E \<turnstile>\<^sub>1 e\<^sub>1 :: T\<^sub>1" and T\<^sub>2: "P,E \<turnstile>\<^sub>1 e\<^sub>2 :: T\<^sub>2" and bopT: "case bop of Eq \<Rightarrow> (P \<turnstile> T\<^sub>1 \<le> T\<^sub>2 \<or> P \<turnstile> T\<^sub>2 \<le> T\<^sub>1) \<and> T = Boolean | Add \<Rightarrow> T\<^sub>1 = Integer \<and> T\<^sub>2 = Integer \<and> T = Integer" by auto let ?A\<^sub>1 = "A \<squnion> \<A> e\<^sub>1" let ?A\<^sub>2 = "?A\<^sub>1 \<squnion> \<A> e\<^sub>2" let ?\<tau> = "ty\<^sub>i' ST E A" let ?\<tau>s\<^sub>1 = "compT E A ST e\<^sub>1" let ?\<tau>\<^sub>1 = "ty\<^sub>i' (T\<^sub>1#ST) E ?A\<^sub>1" let ?\<tau>s\<^sub>2 = "compT E ?A\<^sub>1 (T\<^sub>1#ST) e\<^sub>2" let ?\<tau>\<^sub>2 = "ty\<^sub>i' (T\<^sub>2#T\<^sub>1#ST) E ?A\<^sub>2" let ?\<tau>' = "ty\<^sub>i' (T#ST) E ?A\<^sub>2" from bopT have "\<turnstile> ?op,[] [::] [?\<tau>\<^sub>2,?\<tau>']" by (cases bop) (auto simp add: wt_CmpEq wt_IAdd) also have "PROP ?P e\<^sub>2 E T\<^sub>2 ?A\<^sub>1 (T\<^sub>1#ST)" by fact with BinOp.prems T\<^sub>2 have "\<turnstile> compE\<^sub>2 e\<^sub>2, compxE\<^sub>2 e\<^sub>2 0 (size (T\<^sub>1#ST)) [::] ?\<tau>\<^sub>1#?\<tau>s\<^sub>2@[?\<tau>\<^sub>2]" by (auto simp: after_def) also from BinOp T\<^sub>1 have "\<turnstile> compE\<^sub>2 e\<^sub>1, compxE\<^sub>2 e\<^sub>1 0 (size ST) [::] ?\<tau>#?\<tau>s\<^sub>1@[?\<tau>\<^sub>1]" by (auto simp: after_def) finally show ?case using T T\<^sub>1 T\<^sub>2 by (simp add: after_def hyperUn_assoc) next case (Cons_exp e es) have "P,E \<turnstile>\<^sub>1 e # es [::] Ts" by fact then obtain T\<^sub>e Ts' where T\<^sub>e: "P,E \<turnstile>\<^sub>1 e :: T\<^sub>e" and Ts': "P,E \<turnstile>\<^sub>1 es [::] Ts'" and Ts: "Ts = T\<^sub>e#Ts'" by auto let ?A\<^sub>e = "A \<squnion> \<A> e" let ?\<tau> = "ty\<^sub>i' ST E A" let ?\<tau>s\<^sub>e = "compT E A ST e" let ?\<tau>\<^sub>e = "ty\<^sub>i' (T\<^sub>e#ST) E ?A\<^sub>e" let ?\<tau>s' = "compTs E ?A\<^sub>e (T\<^sub>e#ST) es" let ?\<tau>s = "?\<tau> # ?\<tau>s\<^sub>e @ (?\<tau>\<^sub>e # ?\<tau>s')" have Ps: "PROP ?Ps es E Ts' ?A\<^sub>e (T\<^sub>e#ST)" by fact with Cons_exp.prems T\<^sub>e Ts' have "\<turnstile> compEs\<^sub>2 es, compxEs\<^sub>2 es 0 (size (T\<^sub>e#ST)) [::] ?\<tau>\<^sub>e#?\<tau>s'" by (simp add: after_def) also from Cons_exp T\<^sub>e have "\<turnstile> compE\<^sub>2 e, compxE\<^sub>2 e 0 (size ST) [::] ?\<tau>#?\<tau>s\<^sub>e@[?\<tau>\<^sub>e]" by (auto simp: after_def) moreover from Ps Cons_exp.prems T\<^sub>e Ts' Ts have "last ?\<tau>s = ty\<^sub>i' (rev Ts@ST) E (?A\<^sub>e \<squnion> \<A>s es)" by simp ultimately show ?case using T\<^sub>e by (simp add: after_def hyperUn_assoc) next case (FAss e\<^sub>1 F D e\<^sub>2) hence Void: "P,E \<turnstile>\<^sub>1 e\<^sub>1\<bullet>F{D} := e\<^sub>2 :: Void" by auto then obtain C T T' where C: "P,E \<turnstile>\<^sub>1 e\<^sub>1 :: Class C" and sees: "P \<turnstile> C sees F:T in D" and T': "P,E \<turnstile>\<^sub>1 e\<^sub>2 :: T'" and T'_T: "P \<turnstile> T' \<le> T" by auto let ?A\<^sub>1 = "A \<squnion> \<A> e\<^sub>1" let ?A\<^sub>2 = "?A\<^sub>1 \<squnion> \<A> e\<^sub>2" let ?\<tau> = "ty\<^sub>i' ST E A" let ?\<tau>s\<^sub>1 = "compT E A ST e\<^sub>1" let ?\<tau>\<^sub>1 = "ty\<^sub>i' (Class C#ST) E ?A\<^sub>1" let ?\<tau>s\<^sub>2 = "compT E ?A\<^sub>1 (Class C#ST) e\<^sub>2" let ?\<tau>\<^sub>2 = "ty\<^sub>i' (T'#Class C#ST) E ?A\<^sub>2" let ?\<tau>\<^sub>3 = "ty\<^sub>i' ST E ?A\<^sub>2" let ?\<tau>' = "ty\<^sub>i' (Void#ST) E ?A\<^sub>2" from FAss.prems sees T'_T have "\<turnstile> [Putfield F D,Push Unit],[] [::] [?\<tau>\<^sub>2,?\<tau>\<^sub>3,?\<tau>']" by (fastforce simp add: wt_Push wt_Put) also have "PROP ?P e\<^sub>2 E T' ?A\<^sub>1 (Class C#ST)" by fact with FAss.prems T' have "\<turnstile> compE\<^sub>2 e\<^sub>2, compxE\<^sub>2 e\<^sub>2 0 (size ST+1) [::] ?\<tau>\<^sub>1#?\<tau>s\<^sub>2@[?\<tau>\<^sub>2]" by (auto simp add: after_def hyperUn_assoc) also from FAss C have "\<turnstile> compE\<^sub>2 e\<^sub>1, compxE\<^sub>2 e\<^sub>1 0 (size ST) [::] ?\<tau>#?\<tau>s\<^sub>1@[?\<tau>\<^sub>1]" by (auto simp add: after_def) finally show ?case using Void C T' by (simp add: after_def hyperUn_assoc) next case Val thus ?case by(auto simp:after_def wt_Push) next case Cast thus ?case by (auto simp:after_def wt_Cast) next case (Block i T\<^sub>i e) let ?\<tau>s = "ty\<^sub>i' ST E A # compT (E @ [T\<^sub>i]) (A\<ominus>i) ST e" have IH: "PROP ?P e (E@[T\<^sub>i]) T (A\<ominus>i) ST" by fact hence "\<turnstile> compE\<^sub>2 e, compxE\<^sub>2 e 0 (size ST) [::] ?\<tau>s @ [ty\<^sub>i' (T#ST) (E@[T\<^sub>i]) (A\<ominus>(size E) \<squnion> \<A> e)]" using Block.prems by (auto simp add: after_def) also have "P \<turnstile> ty\<^sub>i' (T # ST) (E@[T\<^sub>i]) (A \<ominus> size E \<squnion> \<A> e) \<le>' ty\<^sub>i' (T # ST) (E@[T\<^sub>i]) ((A \<squnion> \<A> e) \<ominus> size E)" by(auto simp add:hyperset_defs intro: ty\<^sub>i'_antimono) also have "\<dots> = ty\<^sub>i' (T # ST) E (A \<squnion> \<A> e)" by simp also have "P \<turnstile> \<dots> \<le>' ty\<^sub>i' (T # ST) E (A \<squnion> (\<A> e \<ominus> i))" by(auto simp add:hyperset_defs intro: ty\<^sub>i'_antimono) finally show ?case using Block.prems by(simp add: after_def) next case Var thus ?case by(auto simp:after_def wt_Load) next case FAcc thus ?case by(auto simp:after_def wt_Get) next case (LAss i e) thus ?case using max_stack1[of e] by(auto simp: hyper_insert_comm after_def wt_Store wt_Push) next case Nil_exp thus ?case by auto next case throw thus ?case by(auto simp add: after_def wt_Throw) next case (While e c) obtain Tc where wte: "P,E \<turnstile>\<^sub>1 e :: Boolean" and wtc: "P,E \<turnstile>\<^sub>1 c :: Tc" and [simp]: "T = Void" using While by auto have [simp]: "ty E (while (e) c) = Void" using While by simp let ?A\<^sub>0 = "A \<squnion> \<A> e" let ?A\<^sub>1 = "?A\<^sub>0 \<squnion> \<A> c" let ?\<tau> = "ty\<^sub>i' ST E A" let ?\<tau>s\<^sub>e = "compT E A ST e" let ?\<tau>\<^sub>e = "ty\<^sub>i' (Boolean#ST) E ?A\<^sub>0" let ?\<tau>\<^sub>1 = "ty\<^sub>i' ST E ?A\<^sub>0" let ?\<tau>s\<^sub>c = "compT E ?A\<^sub>0 ST c" let ?\<tau>\<^sub>c = "ty\<^sub>i' (Tc#ST) E ?A\<^sub>1" let ?\<tau>\<^sub>2 = "ty\<^sub>i' ST E ?A\<^sub>1" let ?\<tau>' = "ty\<^sub>i' (Void#ST) E ?A\<^sub>0" let ?\<tau>s = "(?\<tau> # ?\<tau>s\<^sub>e @ [?\<tau>\<^sub>e]) @ ?\<tau>\<^sub>1 # ?\<tau>s\<^sub>c @ [?\<tau>\<^sub>c, ?\<tau>\<^sub>2, ?\<tau>\<^sub>1, ?\<tau>']" have "\<turnstile> [],[] [::] [] @ ?\<tau>s" by(simp add:wt_instrs_def) also have "PROP ?P e E Boolean A ST" by fact hence "\<turnstile> compE\<^sub>2 e,compxE\<^sub>2 e 0 (size ST) [::] ?\<tau> # ?\<tau>s\<^sub>e @ [?\<tau>\<^sub>e]" using While.prems by (auto simp:after_def) also have "[] @ ?\<tau>s = (?\<tau> # ?\<tau>s\<^sub>e) @ ?\<tau>\<^sub>e # ?\<tau>\<^sub>1 # ?\<tau>s\<^sub>c @ [?\<tau>\<^sub>c,?\<tau>\<^sub>2,?\<tau>\<^sub>1,?\<tau>']" by simp also let ?n\<^sub>e = "size(compE\<^sub>2 e)" let ?n\<^sub>c = "size(compE\<^sub>2 c)" let ?if = "IfFalse (int ?n\<^sub>c + 3)" have "\<turnstile> [?if],[] [::] ?\<tau>\<^sub>e # ?\<tau>\<^sub>1 # ?\<tau>s\<^sub>c @ [?\<tau>\<^sub>c, ?\<tau>\<^sub>2, ?\<tau>\<^sub>1, ?\<tau>']" by(simp add: wt_instr_Cons wt_instr_append wt_IfFalse nat_add_distrib split: nat_diff_split) also have "(?\<tau> # ?\<tau>s\<^sub>e) @ (?\<tau>\<^sub>e # ?\<tau>\<^sub>1 # ?\<tau>s\<^sub>c @ [?\<tau>\<^sub>c, ?\<tau>\<^sub>2, ?\<tau>\<^sub>1, ?\<tau>']) = ?\<tau>s" by simp also have "PROP ?P c E Tc ?A\<^sub>0 ST" by fact hence "\<turnstile> compE\<^sub>2 c,compxE\<^sub>2 c 0 (size ST) [::] ?\<tau>\<^sub>1 # ?\<tau>s\<^sub>c @ [?\<tau>\<^sub>c]" using While.prems wtc by (auto simp:after_def) also have "?\<tau>s = (?\<tau> # ?\<tau>s\<^sub>e @ [?\<tau>\<^sub>e,?\<tau>\<^sub>1] @ ?\<tau>s\<^sub>c) @ [?\<tau>\<^sub>c,?\<tau>\<^sub>2,?\<tau>\<^sub>1,?\<tau>']" by simp also have "\<turnstile> [Pop],[] [::] [?\<tau>\<^sub>c, ?\<tau>\<^sub>2]" by(simp add:wt_Pop) also have "(?\<tau> # ?\<tau>s\<^sub>e @ [?\<tau>\<^sub>e,?\<tau>\<^sub>1] @ ?\<tau>s\<^sub>c) @ [?\<tau>\<^sub>c,?\<tau>\<^sub>2,?\<tau>\<^sub>1,?\<tau>'] = ?\<tau>s" by simp also let ?go = "Goto (-int(?n\<^sub>c+?n\<^sub>e+2))" have "P \<turnstile> ?\<tau>\<^sub>2 \<le>' ?\<tau>" by(fastforce intro: ty\<^sub>i'_antimono simp: hyperset_defs) hence "P,T\<^sub>r,mxs,size ?\<tau>s,[] \<turnstile> ?go,?n\<^sub>e+?n\<^sub>c+2 :: ?\<tau>s" by(simp add: wt_Goto split: nat_diff_split) also have "?\<tau>s = (?\<tau> # ?\<tau>s\<^sub>e @ [?\<tau>\<^sub>e,?\<tau>\<^sub>1] @ ?\<tau>s\<^sub>c @ [?\<tau>\<^sub>c, ?\<tau>\<^sub>2]) @ [?\<tau>\<^sub>1, ?\<tau>']" by simp also have "\<turnstile> [Push Unit],[] [::] [?\<tau>\<^sub>1,?\<tau>']" using While.prems max_stack1[of c] by(auto simp add:wt_Push) finally show ?case using wtc wte by (simp add:after_def) next case (Cond e e\<^sub>1 e\<^sub>2) obtain T\<^sub>1 T\<^sub>2 where wte: "P,E \<turnstile>\<^sub>1 e :: Boolean" and wt\<^sub>1: "P,E \<turnstile>\<^sub>1 e\<^sub>1 :: T\<^sub>1" and wt\<^sub>2: "P,E \<turnstile>\<^sub>1 e\<^sub>2 :: T\<^sub>2" and sub\<^sub>1: "P \<turnstile> T\<^sub>1 \<le> T" and sub\<^sub>2: "P \<turnstile> T\<^sub>2 \<le> T" using Cond by auto have [simp]: "ty E (if (e) e\<^sub>1 else e\<^sub>2) = T" using Cond by simp let ?A\<^sub>0 = "A \<squnion> \<A> e" let ?A\<^sub>2 = "?A\<^sub>0 \<squnion> \<A> e\<^sub>2" let ?A\<^sub>1 = "?A\<^sub>0 \<squnion> \<A> e\<^sub>1" let ?A' = "?A\<^sub>0 \<squnion> \<A> e\<^sub>1 \<sqinter> \<A> e\<^sub>2" let ?\<tau>\<^sub>2 = "ty\<^sub>i' ST E ?A\<^sub>0" let ?\<tau>' = "ty\<^sub>i' (T#ST) E ?A'" let ?\<tau>s\<^sub>2 = "compT E ?A\<^sub>0 ST e\<^sub>2" have "PROP ?P e\<^sub>2 E T\<^sub>2 ?A\<^sub>0 ST" by fact hence "\<turnstile> compE\<^sub>2 e\<^sub>2, compxE\<^sub>2 e\<^sub>2 0 (size ST) [::] (?\<tau>\<^sub>2#?\<tau>s\<^sub>2) @ [ty\<^sub>i' (T\<^sub>2#ST) E ?A\<^sub>2]" using Cond.prems wt\<^sub>2 by(auto simp add:after_def) also have "P \<turnstile> ty\<^sub>i' (T\<^sub>2#ST) E ?A\<^sub>2 \<le>' ?\<tau>'" using sub\<^sub>2 by(auto simp add: hyperset_defs ty\<^sub>i'_def intro!: ty\<^sub>l_antimono) also let ?\<tau>\<^sub>3 = "ty\<^sub>i' (T\<^sub>1 # ST) E ?A\<^sub>1" let ?g\<^sub>2 = "Goto(int (size (compE\<^sub>2 e\<^sub>2) + 1))" from sub\<^sub>1 have "P,T\<^sub>r,mxs,size(compE\<^sub>2 e\<^sub>2)+2,[] \<turnstile> ?g\<^sub>2,0 :: ?\<tau>\<^sub>3#(?\<tau>\<^sub>2#?\<tau>s\<^sub>2)@[?\<tau>']" by(auto simp: hyperset_defs wt_defs nth_Cons ty\<^sub>i'_def split:nat.split intro!: ty\<^sub>l_antimono) also let ?\<tau>s\<^sub>1 = "compT E ?A\<^sub>0 ST e\<^sub>1" have "PROP ?P e\<^sub>1 E T\<^sub>1 ?A\<^sub>0 ST" by fact hence "\<turnstile> compE\<^sub>2 e\<^sub>1,compxE\<^sub>2 e\<^sub>1 0 (size ST) [::] ?\<tau>\<^sub>2 # ?\<tau>s\<^sub>1 @ [?\<tau>\<^sub>3]" using Cond.prems wt\<^sub>1 by(auto simp add:after_def) also let ?\<tau>s\<^sub>1\<^sub>2 = "?\<tau>\<^sub>2 # ?\<tau>s\<^sub>1 @ ?\<tau>\<^sub>3 # (?\<tau>\<^sub>2 # ?\<tau>s\<^sub>2) @ [?\<tau>']" let ?\<tau>\<^sub>1 = "ty\<^sub>i' (Boolean#ST) E ?A\<^sub>0" let ?g\<^sub>1 = "IfFalse(int (size (compE\<^sub>2 e\<^sub>1) + 2))" let ?code = "compE\<^sub>2 e\<^sub>1 @ ?g\<^sub>2 # compE\<^sub>2 e\<^sub>2" have "\<turnstile> [?g\<^sub>1],[] [::] [?\<tau>\<^sub>1] @ ?\<tau>s\<^sub>1\<^sub>2" by(simp add: wt_IfFalse nat_add_distrib split:nat_diff_split) also (wt_instrs_ext2) have "[?\<tau>\<^sub>1] @ ?\<tau>s\<^sub>1\<^sub>2 = ?\<tau>\<^sub>1 # ?\<tau>s\<^sub>1\<^sub>2" by simp also let ?\<tau> = "ty\<^sub>i' ST E A" have "PROP ?P e E Boolean A ST" by fact hence "\<turnstile> compE\<^sub>2 e, compxE\<^sub>2 e 0 (size ST) [::] ?\<tau> # compT E A ST e @ [?\<tau>\<^sub>1]" using Cond.prems wte by(auto simp add:after_def) finally show ?case using wte wt\<^sub>1 wt\<^sub>2 by(simp add:after_def hyperUn_assoc) next case (Call e M es) obtain C D Ts m Ts' where C: "P,E \<turnstile>\<^sub>1 e :: Class C" and "method": "P \<turnstile> C sees M:Ts \<rightarrow> T = m in D" and wtes: "P,E \<turnstile>\<^sub>1 es [::] Ts'" and subs: "P \<turnstile> Ts' [\<le>] Ts" using Call.prems by auto from wtes have same_size: "size es = size Ts'" by(rule WTs\<^sub>1_same_size) let ?A\<^sub>0 = "A \<squnion> \<A> e" let ?A\<^sub>1 = "?A\<^sub>0 \<squnion> \<A>s es" let ?\<tau> = "ty\<^sub>i' ST E A" let ?\<tau>s\<^sub>e = "compT E A ST e" let ?\<tau>\<^sub>e = "ty\<^sub>i' (Class C # ST) E ?A\<^sub>0" let ?\<tau>s\<^sub>e\<^sub>s = "compTs E ?A\<^sub>0 (Class C # ST) es" let ?\<tau>\<^sub>1 = "ty\<^sub>i' (rev Ts' @ Class C # ST) E ?A\<^sub>1" let ?\<tau>' = "ty\<^sub>i' (T # ST) E ?A\<^sub>1" have "\<turnstile> [Invoke M (size es)],[] [::] [?\<tau>\<^sub>1,?\<tau>']" by(rule wt_Invoke[OF same_size "method" subs]) also have "PROP ?Ps es E Ts' ?A\<^sub>0 (Class C # ST)" by fact hence "\<turnstile> compEs\<^sub>2 es,compxEs\<^sub>2 es 0 (size ST+1) [::] ?\<tau>\<^sub>e # ?\<tau>s\<^sub>e\<^sub>s" "last (?\<tau>\<^sub>e # ?\<tau>s\<^sub>e\<^sub>s) = ?\<tau>\<^sub>1" using Call.prems wtes by(auto simp add:after_def) also have "(?\<tau>\<^sub>e # ?\<tau>s\<^sub>e\<^sub>s) @ [?\<tau>'] = ?\<tau>\<^sub>e # ?\<tau>s\<^sub>e\<^sub>s @ [?\<tau>']" by simp also have "\<turnstile> compE\<^sub>2 e,compxE\<^sub>2 e 0 (size ST) [::] ?\<tau> # ?\<tau>s\<^sub>e @ [?\<tau>\<^sub>e]" using Call C by(auto simp add:after_def) finally show ?case using Call.prems C by(simp add:after_def hyperUn_assoc) next case Seq thus ?case by(auto simp:after_def) (fastforce simp:wt_Push wt_Pop hyperUn_assoc intro:wt_instrs_app2 wt_instrs_Cons) qed (*>*) lemma [simp]: "types (compP f P) = types P" (*<*)by auto(*>*) lemma [simp]: "states (compP f P) mxs mxl = states P mxs mxl" (*<*)by (simp add: JVM_states_unfold)(*>*) lemma [simp]: "app\<^sub>i (i, compP f P, pc, mpc, T, \<tau>) = app\<^sub>i (i, P, pc, mpc, T, \<tau>)" (*<*) apply (cases \<tau>) apply (cases i) apply auto apply (fastforce dest!: sees_method_compPD) apply (force dest: sees_method_compP) done (*>*) lemma [simp]: "is_relevant_entry (compP f P) i = is_relevant_entry P i" (*<*) apply (rule ext)+ apply (unfold is_relevant_entry_def) apply (cases i) apply auto done (*>*) lemma [simp]: "relevant_entries (compP f P) i pc xt = relevant_entries P i pc xt" (*<*) by (simp add: relevant_entries_def)(*>*) lemma [simp]: "app i (compP f P) mpc T pc mxl xt \<tau> = app i P mpc T pc mxl xt \<tau>" (*<*) apply (simp add: app_def xcpt_app_def eff_def xcpt_eff_def norm_eff_def) apply (fastforce simp add: image_def) done (*>*) lemma [simp]: "app i P mpc T pc mxl xt \<tau> \<Longrightarrow> eff i (compP f P) pc xt \<tau> = eff i P pc xt \<tau>" (*<*) apply (clarsimp simp add: eff_def norm_eff_def xcpt_eff_def app_def) apply (cases i) apply auto done (*>*) lemma [simp]: "subtype (compP f P) = subtype P" (*<*) apply (rule ext)+ apply (simp) done (*>*) lemma [simp]: "compP f P \<turnstile> \<tau> \<le>' \<tau>' = P \<turnstile> \<tau> \<le>' \<tau>'" (*<*) by (simp add: sup_state_opt_def sup_state_def sup_ty_opt_def)(*>*) lemma [simp]: "compP f P,T,mpc,mxl,xt \<turnstile> i,pc :: \<tau>s = P,T,mpc,mxl,xt \<turnstile> i,pc :: \<tau>s" (*<*)by (simp add: wt_instr_def cong: conj_cong)(*>*) declare TC1.compT_sizes[simp] TC0.ty_def2[simp] context TC2 begin lemma compT_method: fixes e and A and C and Ts and mxl\<^sub>0 defines [simp]: "E \<equiv> Class C # Ts" and [simp]: "A \<equiv> \<lfloor>{..size Ts}\<rfloor>" and [simp]: "A' \<equiv> A \<squnion> \<A> e" and [simp]: "mxl\<^sub>0 \<equiv> max_vars e" assumes mxs: "max_stack e = mxs" and mxl: "Suc (length Ts + max_vars e) = mxl" assumes assm: "wf_prog p P" "P,E \<turnstile>\<^sub>1 e :: T" "\<D> e A" "\<B> e (size E)" "set E \<subseteq> types P" "P \<turnstile> T \<le> T\<^sub>r" shows "wt_method (compP\<^sub>2 P) C Ts T\<^sub>r mxs mxl\<^sub>0 (compE\<^sub>2 e @ [Return]) (compxE\<^sub>2 e 0 0) (ty\<^sub>i' [] E A # compT\<^sub>a E A [] e)" (*<*) using assms apply (simp add: wt_method_def compT\<^sub>a_def after_def mxl) apply (rule conjI) apply (simp add: check_types_def OK_ty\<^sub>i'_in_statesI) apply (rule conjI) apply (drule (1) WT\<^sub>1_is_type) apply simp apply (insert max_stack1 [of e]) apply (rule OK_ty\<^sub>i'_in_statesI) apply (simp_all add: mxs)[3] apply (erule compT_states(1)) apply assumption apply (simp_all add: mxs mxl)[4] apply (rule conjI) apply (auto simp add: wt_start_def ty\<^sub>i'_def ty\<^sub>l_def list_all2_conv_all_nth nth_Cons mxl split: nat.split dest: less_antisym)[1] apply (frule (1) TC2.compT_wt_instrs [of P _ _ _ _ "[]" "max_stack e" "Suc (length Ts + max_vars e)" T\<^sub>r]) apply simp_all apply (clarsimp simp: after_def) apply hypsubst_thin apply (rule conjI) apply (clarsimp simp: wt_instrs_def after_def mxl mxs) apply clarsimp apply (drule (1) less_antisym) apply (clarsimp simp: wt_defs xcpt_app_pcs xcpt_eff_pcs ty\<^sub>i'_def) done (*>*) end definition compTP :: "J\<^sub>1_prog \<Rightarrow> ty\<^sub>P" where "compTP P C M = ( let (D,Ts,T,e) = method P C M; E = Class C # Ts; A = \<lfloor>{..size Ts}\<rfloor>; mxl = 1 + size Ts + max_vars e in (TC0.ty\<^sub>i' mxl [] E A # TC1.compT\<^sub>a P mxl E A [] e))" theorem wt_compP\<^sub>2: "wf_J\<^sub>1_prog P \<Longrightarrow> wf_jvm_prog (compP\<^sub>2 P)" (*<*) apply (simp add: wf_jvm_prog_def wf_jvm_prog_phi_def) apply(rule_tac x = "compTP P" in exI) apply (rule wf_prog_compPI) prefer 2 apply assumption apply (clarsimp simp add: wf_mdecl_def) apply (simp add: compTP_def) apply (rule TC2.compT_method [simplified]) apply (rule refl) apply (rule refl) apply assumption apply assumption apply assumption apply assumption apply (drule (1) sees_wf_mdecl) apply (simp add: wf_mdecl_def) apply (blast intro: sees_method_is_class) apply assumption done (*>*) theorem wt_J2JVM: "wf_J_prog P \<Longrightarrow> wf_jvm_prog (J2JVM P)" (*<*) apply(simp only:o_def J2JVM_def) apply(blast intro:wt_compP\<^sub>2 compP\<^sub>1_pres_wf) done end
[STATEMENT] lemma strip_comb_empty: "snd (strip_comb t) = [] \<Longrightarrow> fst (strip_comb t) = t" [PROOF STATE] proof (prove) goal (1 subgoal): 1. snd (strip_comb t) = [] \<Longrightarrow> fst (strip_comb t) = t [PROOF STEP] by (induction t rule: strip_comb_induct) (auto split: prod.splits)
import game.basic import list.modify.basic import tactic.fin_cases open list namespace game /--Two games that are equal except one entry in one sublist of components differs by at most d-/ structure modify {n : ℕ} (G1 G2 : game n) (d : ℤ) := (j : fin n) -- kind of the component they differ in (there are n kinds of componenents) (hj : ∀ i : fin n, i ≠ j → G1.f i = G2.f i) /- if we are NOT looking at the kind of component they differ in, the lists of such components in both games is the same-/ (hl : list.modify (G1.f j) (G2.f j) d) /- if we are looking at the kind of component they differ in, the lists of such components in both games differ in one component by at most d-/ /--For two games that are equal except one pair of components of the same type differs by at most d, both games have the same number of components of each type-/ lemma eq_list_lengths_of_modify {n : ℕ} {G1 G2 : game n} {d : ℤ} (h : modify G1 G2 d): ∀ (i : fin n), length (G1.f i) = length (G2.f i) := begin intro i, /- either the games differ in a component of kind i or they do differ somewhere else-/ by_cases p1 : i ≠ h.j, /- if they do not differ in a component of kind i, then by h.hj the lists are the same, hence their lengths as well-/ {rw h.hj i p1}, /- if they do differ in a component of kind i, then by h.hl the lists are modified lists, hence their lengths are the same by a lemma in list.modify.basic-/ {push_neg at p1, rw p1, exact eq_size_of_modify_list h.hl}, end end game open game /--if G1 is a version of G2 modified in one component by at most d, then G2 is a version of G1 modified in one component by at most d-/ def modify.symm {G1 G2 : game 2} {d : ℤ} (m : modify G1 G2 d) : modify G2 G1 d := { j := m.j,-- they still differ in the same components hj := begin intros i h, rw eq_comm, exact m.hj i h, end, --follows from symmetry of equality in m.hj hl := begin apply list.modify.symm, exact m.hl, end, -- follows from symmetry of list.modify } /--games modified in one comonent have the same size-/ theorem eq_size_of_modify {G1 G2 : game 2} {d : ℤ} (h : game.modify G1 G2 d) : G1.size2 = G2.size2 := begin cases h with j hj hl, --split h into its fields unfold size2, -- size Gn = length (Gn.f 0) + length (Gn.f 1) for n = 1, 2 fin_cases j, -- split into cases corresponding to possible values of j in fin 2 /- in both cases we prove the number of chains are the same (h0) and the number of looops are the same (h1) (or the lists are the same, which implies the above) then clearly the sizes of the games are the same-/ -- j = ⟨0,_⟩ { have h0 := eq_size_of_modify_list hl, -- length (G1.f ⟨0, _⟩) = length (G2.f ⟨0, _⟩) change length (G1.f 0) = length (G2.f 0) at h0, have h1 : G1.f 1 = G2.f 1, exact hj ⟨1, dec_trivial⟩ dec_trivial, -- as we do not modify a loop, by hj rw [h0, h1], }, -- j = ⟨1,_⟩ { have h0 : G1.f 0 = G2.f 0, exact hj ⟨0, dec_trivial⟩ dec_trivial, -- as we do not modify a chain, by hj have h1 := eq_size_of_modify_list hl, -- length (G1.f ⟨1, _⟩) = length (G2.f ⟨1, _⟩) change length (G1.f 1) = length (G2.f 1) at h1, rw [h0, h1], } end /--If you remove some component that is not the modified one from both modified games, you get a modified game-/ def game.remove_of_modify {G1 G2 : game 2} {d : ℤ } (p : modify G1 G2 d) (j : fin 2) (i1 : fin (G1.f j).length) (i2 : fin (G2.f j).length) (hi : i1.val = i2.val)(h : j ≠ p.j ∨ (j = p.j ∧ i1.val ≠ p.hl.n)): modify (G1.remove j i1) (G2.remove j i2) d := begin split, -- split goal into its fields -- FIELD j swap 3, {exact p.j}, -- they still differ in the same kind of component -- FIELD hj {intros x x_neq, -- say we are looking at a kind x of component, that is not p.j unfold game.remove, show dite (x = j) (λ (h : x = j), remove_nth (G1.f j) (i1.val)) (λ (h : ¬x = j), G1.f x) = dite (x = j) (λ (h : x = j), remove_nth (G2.f j) (i2.val)) (λ (h : ¬x = j), G2.f x), split_ifs with ite, -- either x = j or ¬ (x = j) /- in both cases by h.hj, G1.f x and G2.f x are the same because of x_neq-/ -- if-case (named ite) : x = j {rw hi, -- i1.val and i2.val are the same rw ite at x_neq, rw p.hj j x_neq}, -- else-case (named ite) : ¬ (x = j) { exact p.hj x x_neq,}, }, -- FIELD hl { unfold game.remove, -- in the following, list.modify is meant show modify (dite (p.j = j) (λ (h : p.j = j), remove_nth (G1.f j) (i1.val)) (λ (h : ¬p.j = j), G1.f (p.j))) (dite (p.j = j) (λ (h : p.j = j), remove_nth (G2.f j) (i2.val)) (λ (h : ¬p.j = j), G2.f (p.j))) d, split_ifs with ite, -- either p.j = j or ¬ (p.j = j) -- if-case (named ite) : p.j = j {rw ← hi, rw eq_comm at ite, simp only [ite], have pj2 : j = p.j ∧ i1.val ≠ p.hl.n, --this is true because of h and ite {finish}, /- hence i1.val ≠ p.hl.n, which together with p.hl is all we need to be able to use list.modify_remove_nth (see list.modify.basic)-/ exact list.modify_remove_nth p.hl i1.val pj2.right,}, -- else-case (named ite) : ¬ (p.j = j) {exact p.hl}, -- this is exactly what p.hl says }, end /--game.remove_of_modify with roles of G1 and G2 switched-/ def game.remove_of_modify_symm{G1 G2 : game 2} {d : ℤ } (p : modify G1 G2 d) (j : fin 2) (i1 : fin (G1.f j).length) (i2 : fin (G2.f j).length) (hi : i1.val = i2.val)(h : j ≠ p.j ∨ (j = p.j ∧ i1.val ≠ p.hl.n)): modify (G2.remove j i2) (G1.remove j i1) d := by exact modify.symm (game.remove_of_modify p j i1 i2 hi h)
/- Copyright (c) 2021 Kyle Miller. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Kyle Miller -/ import combinatorics.simple_graph.basic import combinatorics.simple_graph.subgraph import data.list /-! # Graph connectivity In a simple graph, * A *walk* is a finite sequence of adjacent vertices, and can be thought of equally well as a sequence of directed edges. * A *trail* is a walk whose edges each appear no more than once. * A *path* is a trail whose vertices appear no more than once. * A *cycle* is a nonempty trail whose first and last vertices are the same and whose vertices except for the first appear no more than once. **Warning:** graph theorists mean something different by "path" than do homotopy theorists. A "walk" in graph theory is a "path" in homotopy theory. Another warning: some graph theorists use "path" and "simple path" for "walk" and "path." Some definitions and theorems have inspiration from multigraph counterparts in [Chou1994]. ## Main definitions * `simple_graph.walk` (with accompanying pattern definitions `simple_graph.walk.nil'` and `simple_graph.walk.cons'`) * `simple_graph.walk.is_trail`, `simple_graph.walk.is_path`, and `simple_graph.walk.is_cycle`. * `simple_graph.path` * `simple_graph.walk.map` and `simple_graph.path.map` for the induced map on walks, given an (injective) graph homomorphism. * `simple_graph.reachable` for the relation of whether there exists a walk between a given pair of vertices * `simple_graph.preconnected` and `simple_graph.connected` are predicates on simple graphs for whether every vertex can be reached from every other, and in the latter case, whether the vertex type is nonempty. * `simple_graph.subgraph.connected` gives subgraphs the connectivity predicate via `simple_graph.subgraph.coe`. * `simple_graph.connected_component` is the type of connected components of a given graph. ## Tags walks, trails, paths, circuits, cycles -/ open function universes u v namespace simple_graph variables {V : Type u} {V' : Type v} (G : simple_graph V) (G' : simple_graph V') /-- A walk is a sequence of adjacent vertices. For vertices `u v : V`, the type `walk u v` consists of all walks starting at `u` and ending at `v`. We say that a walk *visits* the vertices it contains. The set of vertices a walk visits is `simple_graph.walk.support`. See `simple_graph.walk.nil'` and `simple_graph.walk.cons'` for patterns that can be useful in definitions since they make the vertices explicit. -/ @[derive decidable_eq] inductive walk : V → V → Type u | nil {u : V} : walk u u | cons {u v w: V} (h : G.adj u v) (p : walk v w) : walk u w attribute [refl] walk.nil instance walk.inhabited (v : V) : inhabited (G.walk v v) := ⟨by refl⟩ namespace walk variables {G} /-- Pattern to get `walk.nil` with the vertex as an explicit argument. -/ @[pattern] abbreviation nil' (u : V) : G.walk u u := walk.nil /-- Pattern to get `walk.cons` with the vertices as explicit arguments. -/ @[pattern] abbreviation cons' (u v w : V) (h : G.adj u v) (p : G.walk v w) : G.walk u w := walk.cons h p lemma exists_eq_cons_of_ne : Π {u v : V} (hne : u ≠ v) (p : G.walk u v), ∃ (w : V) (h : G.adj u w) (p' : G.walk w v), p = cons h p' | _ _ hne nil := (hne rfl).elim | _ _ _ (cons h p') := ⟨_, h, p', rfl⟩ /-- The length of a walk is the number of edges/darts along it. -/ def length : Π {u v : V}, G.walk u v → ℕ | _ _ nil := 0 | _ _ (cons _ q) := q.length.succ /-- The concatenation of two compatible walks. -/ @[trans] def append : Π {u v w : V}, G.walk u v → G.walk v w → G.walk u w | _ _ _ nil q := q | _ _ _ (cons h p) q := cons h (p.append q) /-- The concatenation of the reverse of the first walk with the second walk. -/ protected def reverse_aux : Π {u v w : V}, G.walk u v → G.walk u w → G.walk v w | _ _ _ nil q := q | _ _ _ (cons h p) q := reverse_aux p (cons (G.symm h) q) /-- The walk in reverse. -/ @[symm] def reverse {u v : V} (w : G.walk u v) : G.walk v u := w.reverse_aux nil /-- Get the `n`th vertex from a walk, where `n` is generally expected to be between `0` and `p.length`, inclusive. If `n` is greater than or equal to `p.length`, the result is the path's endpoint. -/ def get_vert : Π {u v : V} (p : G.walk u v) (n : ℕ), V | u v nil _ := u | u v (cons _ _) 0 := u | u v (cons _ q) (n+1) := q.get_vert n @[simp] lemma get_vert_zero {u v} (w : G.walk u v) : w.get_vert 0 = u := by { cases w; refl } lemma get_vert_of_length_le {u v} (w : G.walk u v) {i : ℕ} (hi : w.length ≤ i) : w.get_vert i = v := begin induction w with _ x y z hxy wyz IH generalizing i, { refl }, { cases i, { cases hi, }, { exact IH (nat.succ_le_succ_iff.1 hi) } } end @[simp] lemma get_vert_length {u v} (w : G.walk u v) : w.get_vert w.length = v := w.get_vert_of_length_le rfl.le lemma adj_get_vert_succ {u v} (w : G.walk u v) {i : ℕ} (hi : i < w.length) : G.adj (w.get_vert i) (w.get_vert (i+1)) := begin induction w with _ x y z hxy wyz IH generalizing i, { cases hi, }, { cases i, { simp [get_vert, hxy] }, { exact IH (nat.succ_lt_succ_iff.1 hi) } }, end @[simp] lemma cons_append {u v w x : V} (h : G.adj u v) (p : G.walk v w) (q : G.walk w x) : (cons h p).append q = cons h (p.append q) := rfl @[simp] lemma cons_nil_append {u v w : V} (h : G.adj u v) (p : G.walk v w) : (cons h nil).append p = cons h p := rfl @[simp] lemma append_nil : Π {u v : V} (p : G.walk u v), p.append nil = p | _ _ nil := rfl | _ _ (cons h p) := by rw [cons_append, append_nil] @[simp] lemma nil_append {u v : V} (p : G.walk u v) : nil.append p = p := rfl lemma append_assoc : Π {u v w x : V} (p : G.walk u v) (q : G.walk v w) (r : G.walk w x), p.append (q.append r) = (p.append q).append r | _ _ _ _ nil _ _ := rfl | _ _ _ _ (cons h p') q r := by { dunfold append, rw append_assoc, } @[simp] lemma reverse_nil {u : V} : (nil : G.walk u u).reverse = nil := rfl lemma reverse_singleton {u v : V} (h : G.adj u v) : (cons h nil).reverse = cons (G.symm h) nil := rfl @[simp] lemma cons_reverse_aux {u v w x : V} (p : G.walk u v) (q : G.walk w x) (h : G.adj w u) : (cons h p).reverse_aux q = p.reverse_aux (cons (G.symm h) q) := rfl @[simp] protected lemma append_reverse_aux : Π {u v w x : V} (p : G.walk u v) (q : G.walk v w) (r : G.walk u x), (p.append q).reverse_aux r = q.reverse_aux (p.reverse_aux r) | _ _ _ _ nil _ _ := rfl | _ _ _ _ (cons h p') q r := append_reverse_aux p' q (cons (G.symm h) r) @[simp] protected lemma reverse_aux_append : Π {u v w x : V} (p : G.walk u v) (q : G.walk u w) (r : G.walk w x), (p.reverse_aux q).append r = p.reverse_aux (q.append r) | _ _ _ _ nil _ _ := rfl | _ _ _ _ (cons h p') q r := by simp [reverse_aux_append p' (cons (G.symm h) q) r] protected lemma reverse_aux_eq_reverse_append {u v w : V} (p : G.walk u v) (q : G.walk u w) : p.reverse_aux q = p.reverse.append q := by simp [reverse] @[simp] lemma reverse_cons {u v w : V} (h : G.adj u v) (p : G.walk v w) : (cons h p).reverse = p.reverse.append (cons (G.symm h) nil) := by simp [reverse] @[simp] lemma reverse_append {u v w : V} (p : G.walk u v) (q : G.walk v w) : (p.append q).reverse = q.reverse.append p.reverse := by simp [reverse] @[simp] lemma reverse_reverse : Π {u v : V} (p : G.walk u v), p.reverse.reverse = p | _ _ nil := rfl | _ _ (cons h p) := by simp [reverse_reverse] @[simp] lemma length_nil {u : V} : (nil : G.walk u u).length = 0 := rfl @[simp] lemma length_cons {u v w : V} (h : G.adj u v) (p : G.walk v w) : (cons h p).length = p.length + 1 := rfl @[simp] lemma length_append : Π {u v w : V} (p : G.walk u v) (q : G.walk v w), (p.append q).length = p.length + q.length | _ _ _ nil _ := by simp | _ _ _ (cons _ _) _ := by simp [length_append, add_left_comm, add_comm] @[simp] protected lemma length_reverse_aux : Π {u v w : V} (p : G.walk u v) (q : G.walk u w), (p.reverse_aux q).length = p.length + q.length | _ _ _ nil _ := by simp! | _ _ _ (cons _ _) _ := by simp [length_reverse_aux, nat.add_succ, nat.succ_add] @[simp] lemma length_reverse {u v : V} (p : G.walk u v) : p.reverse.length = p.length := by simp [reverse] lemma eq_of_length_eq_zero : Π {u v : V} {p : G.walk u v}, p.length = 0 → u = v | _ _ nil _ := rfl @[simp] lemma exists_length_eq_zero_iff {u v : V} : (∃ (p : G.walk u v), p.length = 0) ↔ u = v := begin split, { rintro ⟨p, hp⟩, exact eq_of_length_eq_zero hp, }, { rintro rfl, exact ⟨nil, rfl⟩, }, end @[simp] lemma length_eq_zero_iff {u : V} {p : G.walk u u} : p.length = 0 ↔ p = nil := by cases p; simp /-- The `support` of a walk is the list of vertices it visits in order. -/ def support : Π {u v : V}, G.walk u v → list V | u v nil := [u] | u v (cons h p) := u :: p.support /-- The `darts` of a walk is the list of darts it visits in order. -/ def darts : Π {u v : V}, G.walk u v → list G.dart | u v nil := [] | u v (cons h p) := ⟨(u, _), h⟩ :: p.darts /-- The `edges` of a walk is the list of edges it visits in order. This is defined to be the list of edges underlying `simple_graph.walk.darts`. -/ def edges {u v : V} (p : G.walk u v) : list (sym2 V) := p.darts.map dart.edge @[simp] lemma support_nil {u : V} : (nil : G.walk u u).support = [u] := rfl @[simp] lemma support_cons {u v w : V} (h : G.adj u v) (p : G.walk v w) : (cons h p).support = u :: p.support := rfl lemma support_append {u v w : V} (p : G.walk u v) (p' : G.walk v w) : (p.append p').support = p.support ++ p'.support.tail := by induction p; cases p'; simp [*] @[simp] lemma support_reverse {u v : V} (p : G.walk u v) : p.reverse.support = p.support.reverse := by induction p; simp [support_append, *] lemma support_ne_nil {u v : V} (p : G.walk u v) : p.support ≠ [] := by cases p; simp lemma tail_support_append {u v w : V} (p : G.walk u v) (p' : G.walk v w) : (p.append p').support.tail = p.support.tail ++ p'.support.tail := by rw [support_append, list.tail_append_of_ne_nil _ _ (support_ne_nil _)] lemma support_eq_cons {u v : V} (p : G.walk u v) : p.support = u :: p.support.tail := by cases p; simp @[simp] lemma start_mem_support {u v : V} (p : G.walk u v) : u ∈ p.support := by cases p; simp @[simp] lemma end_mem_support {u v : V} (p : G.walk u v) : v ∈ p.support := by induction p; simp [*] lemma mem_support_iff {u v w : V} (p : G.walk u v) : w ∈ p.support ↔ w = u ∨ w ∈ p.support.tail := by cases p; simp lemma mem_support_nil_iff {u v : V} : u ∈ (nil : G.walk v v).support ↔ u = v := by simp @[simp] lemma mem_tail_support_append_iff {t u v w : V} (p : G.walk u v) (p' : G.walk v w) : t ∈ (p.append p').support.tail ↔ t ∈ p.support.tail ∨ t ∈ p'.support.tail := by rw [tail_support_append, list.mem_append] @[simp] lemma end_mem_tail_support_of_ne {u v : V} (h : u ≠ v) (p : G.walk u v) : v ∈ p.support.tail := by { obtain ⟨_, _, _, rfl⟩ := exists_eq_cons_of_ne h p, simp } @[simp] lemma mem_support_append_iff {t u v w : V} (p : G.walk u v) (p' : G.walk v w) : t ∈ (p.append p').support ↔ t ∈ p.support ∨ t ∈ p'.support := begin simp only [mem_support_iff, mem_tail_support_append_iff], by_cases h : t = v; by_cases h' : t = u; subst_vars; try { have := ne.symm h' }; simp [*], end lemma coe_support {u v : V} (p : G.walk u v) : (p.support : multiset V) = {u} + p.support.tail := by cases p; refl lemma coe_support_append {u v w : V} (p : G.walk u v) (p' : G.walk v w) : ((p.append p').support : multiset V) = {u} + p.support.tail + p'.support.tail := by rw [support_append, ←multiset.coe_add, coe_support] lemma coe_support_append' [decidable_eq V] {u v w : V} (p : G.walk u v) (p' : G.walk v w) : ((p.append p').support : multiset V) = p.support + p'.support - {v} := begin rw [support_append, ←multiset.coe_add], simp only [coe_support], rw add_comm {v}, simp only [← add_assoc, add_tsub_cancel_right], end lemma chain_adj_support : Π {u v w : V} (h : G.adj u v) (p : G.walk v w), list.chain G.adj u p.support | _ _ _ h nil := list.chain.cons h list.chain.nil | _ _ _ h (cons h' p) := list.chain.cons h (chain_adj_support h' p) lemma chain'_adj_support : Π {u v : V} (p : G.walk u v), list.chain' G.adj p.support | _ _ nil := list.chain.nil | _ _ (cons h p) := chain_adj_support h p lemma chain_dart_adj_darts : Π {d : G.dart} {v w : V} (h : d.snd = v) (p : G.walk v w), list.chain G.dart_adj d p.darts | _ _ _ h nil := list.chain.nil | _ _ _ h (cons h' p) := list.chain.cons h (chain_dart_adj_darts (by exact rfl) p) lemma chain'_dart_adj_darts : Π {u v : V} (p : G.walk u v), list.chain' G.dart_adj p.darts | _ _ nil := trivial | _ _ (cons h p) := chain_dart_adj_darts rfl p /-- Every edge in a walk's edge list is an edge of the graph. It is written in this form (rather than using `⊆`) to avoid unsightly coercions. -/ lemma edges_subset_edge_set : Π {u v : V} (p : G.walk u v) ⦃e : sym2 V⦄ (h : e ∈ p.edges), e ∈ G.edge_set | _ _ (cons h' p') e h := by rcases h with ⟨rfl, h⟩; solve_by_elim @[simp] lemma darts_nil {u : V} : (nil : G.walk u u).darts = [] := rfl @[simp] lemma darts_cons {u v w : V} (h : G.adj u v) (p : G.walk v w) : (cons h p).darts = ⟨(u, v), h⟩ :: p.darts := rfl @[simp] lemma darts_append {u v w : V} (p : G.walk u v) (p' : G.walk v w) : (p.append p').darts = p.darts ++ p'.darts := by induction p; simp [*] @[simp] lemma darts_reverse {u v : V} (p : G.walk u v) : p.reverse.darts = (p.darts.map dart.symm).reverse := by induction p; simp [*, sym2.eq_swap] lemma cons_map_snd_darts {u v : V} (p : G.walk u v) : u :: p.darts.map dart.snd = p.support := by induction p; simp! [*] lemma map_snd_darts {u v : V} (p : G.walk u v) : p.darts.map dart.snd = p.support.tail := by simpa using congr_arg list.tail (cons_map_snd_darts p) lemma map_fst_darts_append {u v : V} (p : G.walk u v) : p.darts.map dart.fst ++ [v] = p.support := by induction p; simp! [*] lemma map_fst_darts {u v : V} (p : G.walk u v) : p.darts.map dart.fst = p.support.init := by simpa! using congr_arg list.init (map_fst_darts_append p) @[simp] lemma edges_nil {u : V} : (nil : G.walk u u).edges = [] := rfl @[simp] lemma edges_cons {u v w : V} (h : G.adj u v) (p : G.walk v w) : (cons h p).edges = ⟦(u, v)⟧ :: p.edges := rfl @[simp] lemma edges_append {u v w : V} (p : G.walk u v) (p' : G.walk v w) : (p.append p').edges = p.edges ++ p'.edges := by simp [edges] @[simp] lemma edges_reverse {u v : V} (p : G.walk u v) : p.reverse.edges = p.edges.reverse := by simp [edges] @[simp] lemma length_support {u v : V} (p : G.walk u v) : p.support.length = p.length + 1 := by induction p; simp * @[simp] lemma length_darts {u v : V} (p : G.walk u v) : p.darts.length = p.length := by induction p; simp * @[simp] lemma length_edges {u v : V} (p : G.walk u v) : p.edges.length = p.length := by simp [edges] lemma dart_fst_mem_support_of_mem_darts : Π {u v : V} (p : G.walk u v) {d : G.dart}, d ∈ p.darts → d.fst ∈ p.support | u v (cons h p') d hd := begin simp only [support_cons, darts_cons, list.mem_cons_iff] at hd ⊢, rcases hd with (rfl|hd), { exact or.inl rfl, }, { exact or.inr (dart_fst_mem_support_of_mem_darts _ hd), }, end lemma dart_snd_mem_support_of_mem_darts : Π {u v : V} (p : G.walk u v) {d : G.dart}, d ∈ p.darts → d.snd ∈ p.support | u v (cons h p') d hd := begin simp only [support_cons, darts_cons, list.mem_cons_iff] at hd ⊢, rcases hd with (rfl|hd), { simp }, { exact or.inr (dart_snd_mem_support_of_mem_darts _ hd), }, end lemma mem_support_of_mem_edges {t u v w : V} (p : G.walk v w) (he : ⟦(t, u)⟧ ∈ p.edges) : t ∈ p.support := begin obtain ⟨d, hd, he⟩ := list.mem_map.mp he, rw dart_edge_eq_mk_iff' at he, rcases he with ⟨rfl, rfl⟩ | ⟨rfl, rfl⟩, { exact dart_fst_mem_support_of_mem_darts _ hd, }, { exact dart_snd_mem_support_of_mem_darts _ hd, }, end lemma darts_nodup_of_support_nodup {u v : V} {p : G.walk u v} (h : p.support.nodup) : p.darts.nodup := begin induction p, { simp, }, { simp only [darts_cons, support_cons, list.nodup_cons] at h ⊢, refine ⟨λ h', h.1 (dart_fst_mem_support_of_mem_darts p_p h'), p_ih h.2⟩, } end lemma edges_nodup_of_support_nodup {u v : V} {p : G.walk u v} (h : p.support.nodup) : p.edges.nodup := begin induction p, { simp, }, { simp only [edges_cons, support_cons, list.nodup_cons] at h ⊢, exact ⟨λ h', h.1 (mem_support_of_mem_edges p_p h'), p_ih h.2⟩, } end /-! ### Trails, paths, circuits, cycles -/ /-- A *trail* is a walk with no repeating edges. -/ structure is_trail {u v : V} (p : G.walk u v) : Prop := (edges_nodup : p.edges.nodup) /-- A *path* is a walk with no repeating vertices. Use `simple_graph.walk.is_path.mk'` for a simpler constructor. -/ structure is_path {u v : V} (p : G.walk u v) extends to_trail : is_trail p : Prop := (support_nodup : p.support.nodup) /-- A *circuit* at `u : V` is a nonempty trail beginning and ending at `u`. -/ structure is_circuit {u : V} (p : G.walk u u) extends to_trail : is_trail p : Prop := (ne_nil : p ≠ nil) /-- A *cycle* at `u : V` is a circuit at `u` whose only repeating vertex is `u` (which appears exactly twice). -/ structure is_cycle {u : V} (p : G.walk u u) extends to_circuit : is_circuit p : Prop := (support_nodup : p.support.tail.nodup) lemma is_trail_def {u v : V} (p : G.walk u v) : p.is_trail ↔ p.edges.nodup := ⟨is_trail.edges_nodup, λ h, ⟨h⟩⟩ lemma is_path.mk' {u v : V} {p : G.walk u v} (h : p.support.nodup) : is_path p := ⟨⟨edges_nodup_of_support_nodup h⟩, h⟩ lemma is_path_def {u v : V} (p : G.walk u v) : p.is_path ↔ p.support.nodup := ⟨is_path.support_nodup, is_path.mk'⟩ lemma is_cycle_def {u : V} (p : G.walk u u) : p.is_cycle ↔ is_trail p ∧ p ≠ nil ∧ p.support.tail.nodup := iff.intro (λ h, ⟨h.1.1, h.1.2, h.2⟩) (λ h, ⟨⟨h.1, h.2.1⟩, h.2.2⟩) @[simp] lemma is_trail.nil {u : V} : (nil : G.walk u u).is_trail := ⟨by simp [edges]⟩ lemma is_trail.of_cons {u v w : V} {h : G.adj u v} {p : G.walk v w} : (cons h p).is_trail → p.is_trail := by simp [is_trail_def] @[simp] lemma cons_is_trail_iff {u v w : V} (h : G.adj u v) (p : G.walk v w) : (cons h p).is_trail ↔ p.is_trail ∧ ⟦(u, v)⟧ ∉ p.edges := by simp [is_trail_def, and_comm] lemma is_trail.reverse {u v : V} (p : G.walk u v) (h : p.is_trail) : p.reverse.is_trail := by simpa [is_trail_def] using h @[simp] lemma reverse_is_trail_iff {u v : V} (p : G.walk u v) : p.reverse.is_trail ↔ p.is_trail := by split; { intro h, convert h.reverse _, try { rw reverse_reverse } } lemma is_trail.of_append_left {u v w : V} {p : G.walk u v} {q : G.walk v w} (h : (p.append q).is_trail) : p.is_trail := by { rw [is_trail_def, edges_append, list.nodup_append] at h, exact ⟨h.1⟩ } lemma is_trail.of_append_right {u v w : V} {p : G.walk u v} {q : G.walk v w} (h : (p.append q).is_trail) : q.is_trail := by { rw [is_trail_def, edges_append, list.nodup_append] at h, exact ⟨h.2.1⟩ } lemma is_trail.count_edges_le_one [decidable_eq V] {u v : V} {p : G.walk u v} (h : p.is_trail) (e : sym2 V) : p.edges.count e ≤ 1 := list.nodup_iff_count_le_one.mp h.edges_nodup e lemma is_trail.count_edges_eq_one [decidable_eq V] {u v : V} {p : G.walk u v} (h : p.is_trail) {e : sym2 V} (he : e ∈ p.edges) : p.edges.count e = 1 := list.count_eq_one_of_mem h.edges_nodup he @[simp] lemma is_path.nil {u : V} : (nil : G.walk u u).is_path := by { fsplit; simp } lemma is_path.of_cons {u v w : V} {h : G.adj u v} {p : G.walk v w} : (cons h p).is_path → p.is_path := by simp [is_path_def] @[simp] lemma cons_is_path_iff {u v w : V} (h : G.adj u v) (p : G.walk v w) : (cons h p).is_path ↔ p.is_path ∧ u ∉ p.support := by split; simp [is_path_def] { contextual := tt } lemma is_path.reverse {u v : V} {p : G.walk u v} (h : p.is_path) : p.reverse.is_path := by simpa [is_path_def] using h @[simp] lemma is_path_reverse_iff {u v : V} (p : G.walk u v) : p.reverse.is_path ↔ p.is_path := by split; intro h; convert h.reverse; simp lemma is_path.of_append_left {u v w : V} {p : G.walk u v} {q : G.walk v w} : (p.append q).is_path → p.is_path := by { simp only [is_path_def, support_append], exact list.nodup.of_append_left } lemma is_path.of_append_right {u v w : V} {p : G.walk u v} {q : G.walk v w} (h : (p.append q).is_path) : q.is_path := begin rw ←is_path_reverse_iff at h ⊢, rw reverse_append at h, apply h.of_append_left, end /-! ### Walk decompositions -/ section walk_decomp variables [decidable_eq V] /-- Given a vertex in the support of a path, give the path up until (and including) that vertex. -/ def take_until : Π {v w : V} (p : G.walk v w) (u : V) (h : u ∈ p.support), G.walk v u | v w nil u h := by rw mem_support_nil_iff.mp h | v w (cons r p) u h := if hx : v = u then by subst u else cons r (take_until p _ $ h.cases_on (λ h', (hx h'.symm).elim) id) /-- Given a vertex in the support of a path, give the path from (and including) that vertex to the end. In other words, drop vertices from the front of a path until (and not including) that vertex. -/ def drop_until : Π {v w : V} (p : G.walk v w) (u : V) (h : u ∈ p.support), G.walk u w | v w nil u h := by rw mem_support_nil_iff.mp h | v w (cons r p) u h := if hx : v = u then by { subst u, exact cons r p } else drop_until p _ $ h.cases_on (λ h', (hx h'.symm).elim) id /-- The `take_until` and `drop_until` functions split a walk into two pieces. The lemma `count_support_take_until_eq_one` specifies where this split occurs. -/ @[simp] lemma take_spec {u v w : V} (p : G.walk v w) (h : u ∈ p.support) : (p.take_until u h).append (p.drop_until u h) = p := begin induction p, { rw mem_support_nil_iff at h, subst u, refl, }, { obtain (rfl|h) := h, { simp! }, { simp! only, split_ifs with h'; subst_vars; simp [*], } }, end @[simp] lemma count_support_take_until_eq_one {u v w : V} (p : G.walk v w) (h : u ∈ p.support) : (p.take_until u h).support.count u = 1 := begin induction p, { rw mem_support_nil_iff at h, subst u, simp!, }, { obtain (rfl|h) := h, { simp! }, { simp! only, split_ifs with h'; rw eq_comm at h'; subst_vars; simp! [*, list.count_cons], } }, end lemma count_edges_take_until_le_one {u v w : V} (p : G.walk v w) (h : u ∈ p.support) (x : V) : (p.take_until u h).edges.count ⟦(u, x)⟧ ≤ 1 := begin induction p with u' u' v' w' ha p' ih, { rw mem_support_nil_iff at h, subst u, simp!, }, { obtain (rfl|h) := h, { simp!, }, { simp! only, split_ifs with h', { subst h', simp, }, { rw [edges_cons, list.count_cons], split_ifs with h'', { rw sym2.eq_iff at h'', obtain (⟨rfl,rfl⟩|⟨rfl,rfl⟩) := h'', { exact (h' rfl).elim }, { cases p'; simp! } }, { apply ih, } } } }, end lemma support_take_until_subset {u v w : V} (p : G.walk v w) (h : u ∈ p.support) : (p.take_until u h).support ⊆ p.support := λ x hx, by { rw [← take_spec p h, mem_support_append_iff], exact or.inl hx } lemma support_drop_until_subset {u v w : V} (p : G.walk v w) (h : u ∈ p.support) : (p.drop_until u h).support ⊆ p.support := λ x hx, by { rw [← take_spec p h, mem_support_append_iff], exact or.inr hx } lemma darts_take_until_subset {u v w : V} (p : G.walk v w) (h : u ∈ p.support) : (p.take_until u h).darts ⊆ p.darts := λ x hx, by { rw [← take_spec p h, darts_append, list.mem_append], exact or.inl hx } lemma darts_drop_until_subset {u v w : V} (p : G.walk v w) (h : u ∈ p.support) : (p.drop_until u h).darts ⊆ p.darts := λ x hx, by { rw [← take_spec p h, darts_append, list.mem_append], exact or.inr hx } lemma edges_take_until_subset {u v w : V} (p : G.walk v w) (h : u ∈ p.support) : (p.take_until u h).edges ⊆ p.edges := list.map_subset _ (p.darts_take_until_subset h) lemma edges_drop_until_subset {u v w : V} (p : G.walk v w) (h : u ∈ p.support) : (p.drop_until u h).edges ⊆ p.edges := list.map_subset _ (p.darts_drop_until_subset h) lemma length_take_until_le {u v w : V} (p : G.walk v w) (h : u ∈ p.support) : (p.take_until u h).length ≤ p.length := begin have := congr_arg walk.length (p.take_spec h), rw [length_append] at this, exact nat.le.intro this, end lemma length_drop_until_le {u v w : V} (p : G.walk v w) (h : u ∈ p.support) : (p.drop_until u h).length ≤ p.length := begin have := congr_arg walk.length (p.take_spec h), rw [length_append, add_comm] at this, exact nat.le.intro this, end protected lemma is_trail.take_until {u v w : V} {p : G.walk v w} (hc : p.is_trail) (h : u ∈ p.support) : (p.take_until u h).is_trail := is_trail.of_append_left (by rwa ← take_spec _ h at hc) protected lemma is_trail.drop_until {u v w : V} {p : G.walk v w} (hc : p.is_trail) (h : u ∈ p.support) : (p.drop_until u h).is_trail := is_trail.of_append_right (by rwa ← take_spec _ h at hc) protected lemma is_path.take_until {u v w : V} {p : G.walk v w} (hc : p.is_path) (h : u ∈ p.support) : (p.take_until u h).is_path := is_path.of_append_left (by rwa ← take_spec _ h at hc) protected lemma is_path.drop_until {u v w : V} (p : G.walk v w) (hc : p.is_path) (h : u ∈ p.support) : (p.drop_until u h).is_path := is_path.of_append_right (by rwa ← take_spec _ h at hc) /-- Rotate a loop walk such that it is centered at the given vertex. -/ def rotate {u v : V} (c : G.walk v v) (h : u ∈ c.support) : G.walk u u := (c.drop_until u h).append (c.take_until u h) @[simp] lemma support_rotate {u v : V} (c : G.walk v v) (h : u ∈ c.support) : (c.rotate h).support.tail ~r c.support.tail := begin simp only [rotate, tail_support_append], apply list.is_rotated.trans list.is_rotated_append, rw [←tail_support_append, take_spec], end lemma rotate_darts {u v : V} (c : G.walk v v) (h : u ∈ c.support) : (c.rotate h).darts ~r c.darts := begin simp only [rotate, darts_append], apply list.is_rotated.trans list.is_rotated_append, rw [←darts_append, take_spec], end lemma rotate_edges {u v : V} (c : G.walk v v) (h : u ∈ c.support) : (c.rotate h).edges ~r c.edges := (rotate_darts c h).map _ protected lemma is_trail.rotate {u v : V} {c : G.walk v v} (hc : c.is_trail) (h : u ∈ c.support) : (c.rotate h).is_trail := begin rw [is_trail_def, (c.rotate_edges h).perm.nodup_iff], exact hc.edges_nodup, end protected lemma is_circuit.rotate {u v : V} {c : G.walk v v} (hc : c.is_circuit) (h : u ∈ c.support) : (c.rotate h).is_circuit := begin refine ⟨hc.to_trail.rotate _, _⟩, cases c, { exact (hc.ne_nil rfl).elim, }, { intro hn, have hn' := congr_arg length hn, rw [rotate, length_append, add_comm, ← length_append, take_spec] at hn', simpa using hn', }, end protected lemma is_cycle.rotate {u v : V} {c : G.walk v v} (hc : c.is_cycle) (h : u ∈ c.support) : (c.rotate h).is_cycle := begin refine ⟨hc.to_circuit.rotate _, _⟩, rw list.is_rotated.nodup_iff (support_rotate _ _), exact hc.support_nodup, end end walk_decomp end walk /-! ### Walks to paths -/ /-- The type for paths between two vertices. -/ abbreviation path (u v : V) := {p : G.walk u v // p.is_path} namespace walk variables {G} [decidable_eq V] /-- Given a walk, produces a walk from it by bypassing subwalks between repeated vertices. The result is a path, as shown in `simple_graph.walk.bypass_is_path`. This is packaged up in `simple_graph.walk.to_path`. -/ def bypass : Π {u v : V}, G.walk u v → G.walk u v | u v nil := nil | u v (cons ha p) := let p' := p.bypass in if hs : u ∈ p'.support then p'.drop_until u hs else cons ha p' lemma bypass_is_path {u v : V} (p : G.walk u v) : p.bypass.is_path := begin induction p, { simp!, }, { simp only [bypass], split_ifs, { apply is_path.drop_until, assumption, }, { simp [*, cons_is_path_iff], } }, end lemma length_bypass_le {u v : V} (p : G.walk u v) : p.bypass.length ≤ p.length := begin induction p, { refl }, { simp only [bypass], split_ifs, { transitivity, apply length_drop_until_le, rw [length_cons], exact le_add_right p_ih, }, { rw [length_cons, length_cons], exact add_le_add_right p_ih 1, } }, end /-- Given a walk, produces a path with the same endpoints using `simple_graph.walk.bypass`. -/ def to_path {u v : V} (p : G.walk u v) : G.path u v := ⟨p.bypass, p.bypass_is_path⟩ lemma support_bypass_subset {u v : V} (p : G.walk u v) : p.bypass.support ⊆ p.support := begin induction p, { simp!, }, { simp! only, split_ifs, { apply list.subset.trans (support_drop_until_subset _ _), apply list.subset_cons_of_subset, assumption, }, { rw support_cons, apply list.cons_subset_cons, assumption, }, }, end lemma support_to_path_subset {u v : V} (p : G.walk u v) : (p.to_path : G.walk u v).support ⊆ p.support := support_bypass_subset _ lemma darts_bypass_subset {u v : V} (p : G.walk u v) : p.bypass.darts ⊆ p.darts := begin induction p, { simp!, }, { simp! only, split_ifs, { apply list.subset.trans (darts_drop_until_subset _ _), apply list.subset_cons_of_subset _ p_ih, }, { rw darts_cons, exact list.cons_subset_cons _ p_ih, }, }, end lemma edges_bypass_subset {u v : V} (p : G.walk u v) : p.bypass.edges ⊆ p.edges := list.map_subset _ p.darts_bypass_subset lemma darts_to_path_subset {u v : V} (p : G.walk u v) : (p.to_path : G.walk u v).darts ⊆ p.darts := darts_bypass_subset _ lemma edges_to_path_subset {u v : V} (p : G.walk u v) : (p.to_path : G.walk u v).edges ⊆ p.edges := edges_bypass_subset _ end walk /-! ## Mapping paths -/ namespace walk variables {G G'} /-- Given a graph homomorphism, map walks to walks. -/ protected def map (f : G →g G') : Π {u v : V}, G.walk u v → G'.walk (f u) (f v) | _ _ nil := nil | _ _ (cons h p) := cons (f.map_adj h) (map p) variables (f : G →g G') {u v : V} (p : G.walk u v) @[simp] lemma map_nil : (nil : G.walk u u).map f = nil := rfl @[simp] lemma map_cons {w : V} (h : G.adj w u) : (cons h p).map f = cons (f.map_adj h) (p.map f) := rfl @[simp] lemma length_map : (p.map f).length = p.length := by induction p; simp [*] lemma map_append {u v w : V} (p : G.walk u v) (q : G.walk v w) : (p.append q).map f = (p.map f).append (q.map f) := by induction p; simp [*] @[simp] lemma reverse_map : (p.map f).reverse = p.reverse.map f := by induction p; simp [map_append, *] @[simp] lemma support_map : (p.map f).support = p.support.map f := by induction p; simp [*] @[simp] lemma darts_map : (p.map f).darts = p.darts.map f.map_dart := by induction p; simp [*] @[simp] lemma edges_map : (p.map f).edges = p.edges.map (sym2.map f) := by induction p; simp [*] variables {p f} lemma map_is_path_of_injective (hinj : function.injective f) (hp : p.is_path) : (p.map f).is_path := begin induction p with w u v w huv hvw ih, { simp, }, { rw walk.cons_is_path_iff at hp, simp [ih hp.1], intros x hx hf, cases hinj hf, exact hp.2 hx, }, end protected lemma is_path.of_map {f : G →g G'} (hp : (p.map f).is_path) : p.is_path := begin induction p with w u v w huv hvw ih, { simp }, { rw [map_cons, walk.cons_is_path_iff, support_map] at hp, rw walk.cons_is_path_iff, cases hp with hp1 hp2, refine ⟨ih hp1, _⟩, contrapose! hp2, exact list.mem_map_of_mem f hp2, } end lemma map_is_path_iff_of_injective (hinj : function.injective f) : (p.map f).is_path ↔ p.is_path := ⟨is_path.of_map, map_is_path_of_injective hinj⟩ variables (p f) lemma map_injective_of_injective {f : G →g G'} (hinj : function.injective f) (u v : V) : function.injective (walk.map f : G.walk u v → G'.walk (f u) (f v)) := begin intros p p' h, induction p with _ _ _ _ _ _ ih generalizing p', { cases p', { refl }, simpa using h, }, { induction p', { simpa using h, }, { simp only [map_cons] at h, cases hinj h.1, simp only [eq_self_iff_true, heq_iff_eq, true_and], apply ih, simpa using h.2, } }, end end walk namespace path variables {G G'} /-- Given an injective graph homomorphism, map paths to paths. -/ @[simps] protected def map (f : G →g G') (hinj : function.injective f) {u v : V} (p : G.path u v) : G'.path (f u) (f v) := ⟨walk.map f p, walk.map_is_path_of_injective hinj p.2⟩ lemma map_injective {f : G →g G'} (hinj : function.injective f) (u v : V) : function.injective (path.map f hinj : G.path u v → G'.path (f u) (f v)) := begin rintros ⟨p, hp⟩ ⟨p', hp'⟩ h, simp only [path.map, subtype.coe_mk] at h, simp [walk.map_injective_of_injective hinj u v h], end /-- Given a graph embedding, map paths to paths. -/ @[simps] protected def map_embedding (f : G ↪g G') {u v : V} (p : G.path u v) : G'.path (f u) (f v) := path.map f.to_hom f.injective p lemma map_embedding_injective (f : G ↪g G') (u v : V) : function.injective (path.map_embedding f : G.path u v → G'.path (f u) (f v)) := map_injective f.injective u v end path /-! ## Deleting edges -/ namespace walk variables {G} /-- Given a walk that avoids a set of edges, produce a walk in the graph with those edges deleted. -/ @[simp] def to_delete_edges (s : set (sym2 V)) : Π {v w : V} (p : G.walk v w) (hp : ∀ e, e ∈ p.edges → ¬ e ∈ s), (G.delete_edges s).walk v w | _ _ nil _ := nil | _ _ (cons' u v w huv p) hp := cons ((G.delete_edges_adj _ _ _).mpr ⟨huv, hp ⟦(u, v)⟧ (by simp)⟩) (p.to_delete_edges (λ e he, hp e (by simp [he]))) /-- Given a walk that avoids an edge, create a walk in the subgraph with that edge deleted. This is an abbreviation for `simple_graph.walk.to_delete_edges`. -/ abbreviation to_delete_edge {v w : V} (e : sym2 V) (p : G.walk v w) (hp : e ∉ p.edges) : (G.delete_edges {e}).walk v w := p.to_delete_edges {e} (λ e', by { contrapose!, simp [hp] { contextual := tt } }) @[simp] lemma map_to_delete_edges_eq (s : set (sym2 V)) {v w : V} {p : G.walk v w} (hp) : walk.map (hom.map_spanning_subgraphs (G.delete_edges_le s)) (p.to_delete_edges s hp) = p := by induction p; simp [*] lemma is_path.to_delete_edges (s : set (sym2 V)) {v w : V} {p : G.walk v w} (h : p.is_path) (hp) : (p.to_delete_edges s hp).is_path := by { rw ← map_to_delete_edges_eq s hp at h, exact h.of_map } end walk /-! ## `reachable` and `connected` -/ /-- Two vertices are *reachable* if there is a walk between them. This is equivalent to `relation.refl_trans_gen` of `G.adj`. See `simple_graph.reachable_iff_refl_trans_gen`. -/ def reachable (u v : V) : Prop := nonempty (G.walk u v) variables {G} lemma reachable_iff_nonempty_univ {u v : V} : G.reachable u v ↔ (set.univ : set (G.walk u v)).nonempty := set.nonempty_iff_univ_nonempty protected lemma reachable.elim {p : Prop} {u v : V} (h : G.reachable u v) (hp : G.walk u v → p) : p := nonempty.elim h hp protected lemma reachable.elim_path {p : Prop} {u v : V} (h : G.reachable u v) (hp : G.path u v → p) : p := begin classical, exact h.elim (λ q, hp q.to_path), end @[refl] protected lemma reachable.refl (u : V) : G.reachable u u := by { fsplit, refl } protected lemma reachable.rfl {u : V} : G.reachable u u := reachable.refl _ @[symm] protected lemma reachable.symm {u v : V} (huv : G.reachable u v) : G.reachable v u := huv.elim (λ p, ⟨p.reverse⟩) @[trans] protected lemma reachable.trans {u v w : V} (huv : G.reachable u v) (hvw : G.reachable v w) : G.reachable u w := huv.elim (λ puv, hvw.elim (λ pvw, ⟨puv.append pvw⟩)) lemma reachable_iff_refl_trans_gen (u v : V) : G.reachable u v ↔ relation.refl_trans_gen G.adj u v := begin split, { rintro ⟨h⟩, induction h, { refl, }, { exact (relation.refl_trans_gen.single h_h).trans h_ih, }, }, { intro h, induction h with _ _ _ ha hr, { refl, }, { exact reachable.trans hr ⟨walk.cons ha walk.nil⟩, }, }, end variables (G) lemma reachable_is_equivalence : equivalence G.reachable := mk_equivalence _ (@reachable.refl _ G) (@reachable.symm _ G) (@reachable.trans _ G) /-- The equivalence relation on vertices given by `simple_graph.reachable`. -/ def reachable_setoid : setoid V := setoid.mk _ G.reachable_is_equivalence /-- A graph is preconnected if every pair of vertices is reachable from one another. -/ def preconnected : Prop := ∀ (u v : V), G.reachable u v lemma preconnected.map {G : simple_graph V} {H : simple_graph V'} (f : G →g H) (hf : surjective f) (hG : G.preconnected) : H.preconnected := hf.forall₂.2 $ λ a b, (hG _ _).map $ walk.map _ lemma iso.preconnected_iff {G : simple_graph V} {H : simple_graph V'} (e : G ≃g H) : G.preconnected ↔ H.preconnected := ⟨preconnected.map e.to_hom e.to_equiv.surjective, preconnected.map e.symm.to_hom e.symm.to_equiv.surjective⟩ /-- A graph is connected if it's preconnected and contains at least one vertex. This follows the convention observed by mathlib that something is connected iff it has exactly one connected component. There is a `has_coe_to_fun` instance so that `h u v` can be used instead of `h.preconnected u v`. -/ @[protect_proj] structure connected : Prop := (preconnected : G.preconnected) [nonempty : nonempty V] instance : has_coe_to_fun G.connected (λ _, Π (u v : V), G.reachable u v) := ⟨λ h, h.preconnected⟩ lemma connected.map {G : simple_graph V} {H : simple_graph V'} (f : G →g H) (hf : surjective f) (hG : G.connected) : H.connected := by { haveI := hG.nonempty.map f, exact ⟨hG.preconnected.map f hf⟩ } lemma iso.connected_iff {G : simple_graph V} {H : simple_graph V'} (e : G ≃g H) : G.connected ↔ H.connected := ⟨connected.map e.to_hom e.to_equiv.surjective, connected.map e.symm.to_hom e.symm.to_equiv.surjective⟩ /-- The quotient of `V` by the `simple_graph.reachable` relation gives the connected components of a graph. -/ def connected_component := quot G.reachable /-- Gives the connected component containing a particular vertex. -/ def connected_component_mk (v : V) : G.connected_component := quot.mk G.reachable v instance connected_component.inhabited [inhabited V] : inhabited G.connected_component := ⟨G.connected_component_mk default⟩ section connected_component variables {G} @[elab_as_eliminator] protected lemma connected_component.ind {β : G.connected_component → Prop} (h : ∀ (v : V), β (G.connected_component_mk v)) (c : G.connected_component) : β c := quot.ind h c @[elab_as_eliminator] protected lemma connected_component.ind₂ {β : G.connected_component → G.connected_component → Prop} (h : ∀ (v w : V), β (G.connected_component_mk v) (G.connected_component_mk w)) (c d : G.connected_component) : β c d := quot.induction_on₂ c d h protected lemma connected_component.sound {v w : V} : G.reachable v w → G.connected_component_mk v = G.connected_component_mk w := quot.sound protected lemma connected_component.exact {v w : V} : G.connected_component_mk v = G.connected_component_mk w → G.reachable v w := @quotient.exact _ G.reachable_setoid _ _ @[simp] protected lemma connected_component.eq {v w : V} : G.connected_component_mk v = G.connected_component_mk w ↔ G.reachable v w := @quotient.eq _ G.reachable_setoid _ _ /-- The `connected_component` specialization of `quot.lift`. Provides the stronger assumption that the vertices are connected by a path. -/ protected def connected_component.lift {β : Sort*} (f : V → β) (h : ∀ (v w : V) (p : G.walk v w), p.is_path → f v = f w) : G.connected_component → β := quot.lift f (λ v w (h' : G.reachable v w), h'.elim_path (λ hp, h v w hp hp.2)) @[simp] protected lemma connected_component.lift_mk {β : Sort*} {f : V → β} {h : ∀ (v w : V) (p : G.walk v w), p.is_path → f v = f w} {v : V} : connected_component.lift f h (G.connected_component_mk v) = f v := rfl protected lemma connected_component.«exists» {p : G.connected_component → Prop} : (∃ (c : G.connected_component), p c) ↔ ∃ v, p (G.connected_component_mk v) := (surjective_quot_mk G.reachable).exists protected lemma connected_component.«forall» {p : G.connected_component → Prop} : (∀ (c : G.connected_component), p c) ↔ ∀ v, p (G.connected_component_mk v) := (surjective_quot_mk G.reachable).forall lemma preconnected.subsingleton_connected_component (h : G.preconnected) : subsingleton G.connected_component := ⟨connected_component.ind₂ (λ v w, connected_component.sound (h v w))⟩ end connected_component variables {G} /-- A subgraph is connected if it is connected as a simple graph. -/ abbreviation subgraph.connected (H : G.subgraph) : Prop := H.coe.connected lemma preconnected.set_univ_walk_nonempty (hconn : G.preconnected) (u v : V) : (set.univ : set (G.walk u v)).nonempty := by { rw ← set.nonempty_iff_univ_nonempty, exact hconn u v } lemma connected.set_univ_walk_nonempty (hconn : G.connected) (u v : V) : (set.univ : set (G.walk u v)).nonempty := hconn.preconnected.set_univ_walk_nonempty u v /-! ### Walks of a given length -/ section walk_counting lemma set_walk_self_length_zero_eq (u : V) : {p : G.walk u u | p.length = 0} = {walk.nil} := by { ext p, simp } lemma set_walk_length_zero_eq_of_ne {u v : V} (h : u ≠ v) : {p : G.walk u v | p.length = 0} = ∅ := begin ext p, simp only [set.mem_set_of_eq, set.mem_empty_eq, iff_false], exact λ h', absurd (walk.eq_of_length_eq_zero h') h, end lemma set_walk_length_succ_eq (u v : V) (n : ℕ) : {p : G.walk u v | p.length = n.succ} = ⋃ (w : V) (h : G.adj u w), walk.cons h '' {p' : G.walk w v | p'.length = n} := begin ext p, cases p with _ _ w _ huw pwv, { simp [eq_comm], }, { simp only [nat.succ_eq_add_one, set.mem_set_of_eq, walk.length_cons, add_left_inj, set.mem_Union, set.mem_image, exists_prop], split, { rintro rfl, exact ⟨w, huw, pwv, rfl, rfl, heq.rfl⟩, }, { rintro ⟨w, huw, pwv, rfl, rfl, rfl⟩, refl, } }, end variables (G) [fintype V] [decidable_rel G.adj] [decidable_eq V] /-- The `finset` of length-`n` walks from `u` to `v`. This is used to give `{p : G.walk u v | p.length = n}` a `fintype` instance, and it can also be useful as a recursive description of this set when `V` is finite. See `simple_graph.coe_finset_walk_length_eq` for the relationship between this `finset` and the set of length-`n` walks. -/ def finset_walk_length : Π (n : ℕ) (u v : V), finset (G.walk u v) | 0 u v := if h : u = v then by { subst u, exact {walk.nil} } else ∅ | (n+1) u v := finset.univ.bUnion (λ (w : V), if h : G.adj u w then (finset_walk_length n w v).map ⟨λ p, walk.cons h p, λ p q, by simp⟩ else ∅) lemma coe_finset_walk_length_eq (n : ℕ) (u v : V) : (G.finset_walk_length n u v : set (G.walk u v)) = {p : G.walk u v | p.length = n} := begin induction n with n ih generalizing u v, { obtain rfl | huv := eq_or_ne u v; simp [finset_walk_length, set_walk_length_zero_eq_of_ne, *], }, { simp only [finset_walk_length, set_walk_length_succ_eq, finset.coe_bUnion, finset.mem_coe, finset.mem_univ, set.Union_true], ext p, simp only [set.mem_Union, finset.mem_coe, set.mem_image, set.mem_set_of_eq], congr' 2, ext w, simp only [set.ext_iff, finset.mem_coe, set.mem_set_of_eq] at ih, split_ifs with huw; simp [huw, ih], }, end variables {G} lemma walk.length_eq_of_mem_finset_walk_length {n : ℕ} {u v : V} (p : G.walk u v) : p ∈ G.finset_walk_length n u v → p.length = n := (set.ext_iff.mp (G.coe_finset_walk_length_eq n u v) p).mp variables (G) instance fintype_set_walk_length (u v : V) (n : ℕ) : fintype {p : G.walk u v | p.length = n} := fintype.subtype (G.finset_walk_length n u v) $ λ p, by rw [←finset.mem_coe, coe_finset_walk_length_eq] lemma set_walk_length_to_finset_eq (n : ℕ) (u v : V) : {p : G.walk u v | p.length = n}.to_finset = G.finset_walk_length n u v := by { ext p, simp [←coe_finset_walk_length_eq] } /- See `simple_graph.adj_matrix_pow_apply_eq_card_walk` for the cardinality in terms of the `n`th power of the adjacency matrix. -/ lemma card_set_walk_length_eq (u v : V) (n : ℕ) : fintype.card {p : G.walk u v | p.length = n} = (G.finset_walk_length n u v).card := fintype.card_of_subtype (G.finset_walk_length n u v) $ λ p, by rw [←finset.mem_coe, coe_finset_walk_length_eq] end walk_counting end simple_graph
""" """ struct RestrictedTriangulation{Dc,Dp,G} <: Triangulation{Dc,Dp} oldtrian::G cell_to_oldcell::Vector{Int} oldcell_to_cell::Vector{Int} void_to_oldcell::Vector{Int} function RestrictedTriangulation( oldtrian::Triangulation{Dc,Dp}, cell_to_oldcell::Vector{Int}, oldcell_to_cell::Vector{Int}, void_to_oldcell::Vector{Int}) where {Dc,Dp} new{Dc,Dp,typeof(oldtrian)}( oldtrian,cell_to_oldcell,oldcell_to_cell,void_to_oldcell) end end function RestrictedTriangulation( oldtrian::Triangulation{Dc,Dp},cell_to_oldcell::Vector{Int}) where {Dc,Dp} n_oldcells = num_cells(oldtrian) oldcell_to_cell = fill(Int(UNSET),n_oldcells) oldcell_to_cell[cell_to_oldcell] .= 1:length(cell_to_oldcell) void_to_oldcell = findall(oldcell_to_cell .== UNSET) oldcell_to_cell[void_to_oldcell] .= -(1:length(void_to_oldcell)) RestrictedTriangulation(oldtrian,cell_to_oldcell,oldcell_to_cell,void_to_oldcell) end function RestrictedTriangulation( oldtrian::Triangulation{Dc,Dp},oldcell_to_mask::Vector{Bool}) where {Dc,Dp} cell_to_oldcell = findall(oldcell_to_mask) RestrictedTriangulation(oldtrian,cell_to_oldcell) end function get_reffes(trian::RestrictedTriangulation) get_reffes(trian.oldtrian) end function get_cell_type(trian::RestrictedTriangulation) reindex(get_cell_type(trian.oldtrian),trian.cell_to_oldcell) end function get_cell_coordinates(trian::RestrictedTriangulation) reindex(get_cell_coordinates(trian.oldtrian),trian.cell_to_oldcell) end function restrict(f::AbstractArray,trian::RestrictedTriangulation) reindex(f,trian) end function get_cell_id(trian::RestrictedTriangulation) trian.cell_to_oldcell end function get_cell_map(trian::RestrictedTriangulation) cell_map = get_cell_map(trian.oldtrian) reindex(cell_map,trian.cell_to_oldcell) end
State Before: R : Type u_1 inst✝ : CommRing R n : ℕ P : Ideal R hP : Ideal.IsPrime P q : R[X] c : (R ⧸ P)[X] hq : map (mk P) q = c * X ^ n hc0 : degree c = 0 ⊢ ↑n = degree (map (mk P) q) State After: no goals Tactic: rw [hq, degree_mul, hc0, zero_add, degree_pow, degree_X, nsmul_one, Nat.cast_withBot]
program msis_driver !! will write to stdout if "-" specified, so we avoid printing to console unless file output is used use, intrinsic:: iso_fortran_env, only: sp=>real32, stderr=>error_unit, stdout=>output_unit, stdin=>input_unit implicit none (type, external) integer, parameter :: mass=48 integer :: iyd,sec,lz, i real(sp) :: f107a,f107,ap(7),stl,apday,ap3 real(sp) :: d(9),t(2) real(sp), allocatable :: glat(:),glon(:),alt(:) integer :: u character(256) :: buf character(:), allocatable :: infile,outfile external :: meters, gtd7 !> read in msis inputs if (command_argument_count() < 2) error stop 'msis_setup: must specify input and output filenames' call get_command_argument(1,buf) infile = trim(buf) if (infile == "-") then call get_stdin(iyd,sec,f107a,f107,apday,ap3,lz, glat, glon, alt) else call get_file_input(infile,iyd,sec,f107a,f107,apday,ap3,lz, glat, glon, alt) endif !> Run MSIS ap(1:7)=apday ap(2)=ap3 !> switch to mksa units call meters(.true.) !> output file call get_command_argument(2, buf) outfile = trim(buf) if (outfile == '-') then u = stdout else open(newunit=u,file=outfile,status='replace',form='unformatted',access='stream', action='write') endif !> call to msis routine do i=1,lz stl = sec/3600. + glon(i)/15. call gtd7(iyd, real(sec, sp),alt(i),glat(i),glon(i),stl,f107a,f107,ap,mass,d,t) if (outfile == '-') then write(u,'(F9.2, 9ES15.6, F9.2)') alt(i),d(1:9),t(2) else write(u) alt(i),d(1:9),t(2) endif end do if (outfile == '-') stop close(u) inquire(file=outfile, size=i) print *,'msis_setup: wrote ',i,' bytes to ',outfile if (i==0) error stop 'msis_setup failed to write file' contains subroutine get_stdin(iyd,sec,f107a,f107,apday,ap3,lz, glat, glon, alt) integer, intent(out) :: iyd,sec,lz real(sp), intent(out) :: f107a,f107,apday,ap3 real(sp), intent(out), allocatable :: glat(:),glon(:),alt(:) integer :: u u = stdin read(u, *) iyd read(u, *) sec read(u, *) f107a, f107, apday, ap3 read(u, *) lz call check_lz(lz) allocate(glat(lz),glon(lz),alt(lz)) read(u,*) glat read(u,*) glon read(u,*) alt end subroutine get_stdin subroutine get_file_input(filename,iyd,sec,f107a,f107,apday,ap3,lz, glat, glon, alt) character(*), intent(in) :: filename integer, intent(out) :: iyd,sec,lz real(sp), intent(out) :: f107a,f107,apday,ap3 real(sp), intent(out), allocatable :: glat(:),glon(:),alt(:) character(256) :: buf integer :: u open(newunit=u,file=infile, status='old',form='unformatted',access='stream', action='read') !! use binary to reduce file size and read times read(u) iyd read(u) sec read(u) f107a read(u) f107 read(u) apday read(u) ap3 read(u) lz call check_lz(lz) allocate(glat(lz),glon(lz),alt(lz)) read(u) glat,glon,alt close(u) end subroutine get_file_input subroutine check_lz(lz) integer, intent(in) :: lz character(256) :: buf integer :: i if (lz<1) error stop 'lz must be positive' call get_command_argument(3, buf, status=i) if (i==0) then read(buf,*) i if (i /= lz) then write(stderr,*) 'expected ',i,' grid points but read ',lz error stop endif endif end subroutine check_lz end program
/* @copyright Louis Dionne 2015 Distributed under the Boost Software License, Version 1.0. (See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt) */ #include <boost/hana/ext/std/array.hpp> #include <boost/hana/tuple.hpp> #include <laws/base.hpp> #include <laws/searchable.hpp> #include <array> using namespace boost::hana; using test::ct_eq; int main() { auto eq_arrays = make<Tuple>( std::array<ct_eq<0>, 0>{} , std::array<ct_eq<0>, 1>{} , std::array<ct_eq<0>, 2>{} , std::array<ct_eq<0>, 3>{} , std::array<ct_eq<0>, 4>{} ); auto eq_keys = make<Tuple>(ct_eq<0>{}); ////////////////////////////////////////////////////////////////////////// // Searchable ////////////////////////////////////////////////////////////////////////// test::TestSearchable<ext::std::Array>{eq_arrays, eq_keys}; }
lemma prime_elem_not_unit: "prime_elem p \<Longrightarrow> \<not>p dvd 1"