text
stringlengths 0
3.34M
|
---|
Formal statement is: lemma Chain3: assumes At0: "At x0 y0 z0 0" and AtSuc: "\<And>x y z n. At x y z n \<Longrightarrow> \<exists>x' y' z'. At x' y' z' (Suc n) \<and> Follows x' y' z' x y z" obtains f g h where "f 0 = x0" "g 0 = y0" "h 0 = z0" "\<And>n. At (f n) (g n) (h n) n" "\<And>n. Follows (f(Suc n)) (g(Suc n)) (h(Suc n)) (f n) (g n) (h n)" Informal statement is: Suppose that we have a predicate $At(x,y,z,n)$ that holds for some $x_0, y_0, z_0$ and $n=0$, and that for any $x,y,z,n$ such that $At(x,y,z,n)$ holds, there exists $x',y',z'$ such that $At(x',y',z',n+1)$ holds and $Follows(x',y',z',x,y,z)$ holds. Then there exist functions $f,g,h$ such that $f(0) = x_0$, $g(0) = y_0$, $h(0) = z_0$, $At(f(n),g(n),h(n),n)$ holds for all $n$, and $Follows(f(n+1),g(n+1),h(n+1),f(n),g(n),h(n))$ holds for all $n$. |
(*
* Copyright 2020, Data61, CSIRO (ABN 41 687 119 230)
*
* SPDX-License-Identifier: BSD-2-Clause
*)
theory EmptyFailLib
imports
"Monad_WP/NonDetMonad"
HaskellLib_H
begin
(* Collect generic empty_fail lemmas here. naming convention is emtpy_fail_NAME.
Unless there is a good reason, they should all be [intro!, simp] *)
lemma empty_fail_when [simp, intro!]:
"(P \<Longrightarrow> empty_fail x) \<Longrightarrow> empty_fail (when P x)"
unfolding when_def by simp
lemma empty_fail_bindD1:
"empty_fail (a >>= b) \<Longrightarrow> empty_fail a"
unfolding empty_fail_def bind_def
apply (clarsimp simp: split_def image_image)
apply (drule_tac x = s in spec)
apply simp
done
lemma empty_fail_liftM [simp, intro!]:
"empty_fail (liftM f m) = empty_fail m"
unfolding liftM_def
apply (rule iffI)
apply (erule empty_fail_bindD1)
apply (erule empty_fail_bind)
apply simp
done
lemma empty_fail_assert [simp, intro!]:
"empty_fail (assert P)"
unfolding empty_fail_def assert_def
by (simp add: return_def fail_def)
lemma empty_fail_unless [intro!, simp]:
"empty_fail f \<Longrightarrow> empty_fail (unless P f)"
by (simp add: unless_def)
lemma empty_fail_stateAssert [intro!, simp]:
"empty_fail (stateAssert P l)"
by (simp add: stateAssert_def empty_fail_def get_def assert_def
return_def fail_def bind_def)
end
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''periodicfeatures - Waqas Bhatti ([email protected]) - Oct 2017
License: MIT. See the LICENSE file for more details.
This contains functions that calculate various light curve features using
information about periods and fits to phased light curves.
'''
#############
## LOGGING ##
#############
import logging
from datetime import datetime
from traceback import format_exc
# setup a logger
LOGGER = None
LOGMOD = __name__
DEBUG = False
def set_logger_parent(parent_name):
globals()['LOGGER'] = logging.getLogger('%s.%s' % (parent_name, LOGMOD))
def LOGDEBUG(message):
if LOGGER:
LOGGER.debug(message)
elif DEBUG:
print('[%s - DBUG] %s' % (
datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
message)
)
def LOGINFO(message):
if LOGGER:
LOGGER.info(message)
else:
print('[%s - INFO] %s' % (
datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
message)
)
def LOGERROR(message):
if LOGGER:
LOGGER.error(message)
else:
print('[%s - ERR!] %s' % (
datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
message)
)
def LOGWARNING(message):
if LOGGER:
LOGGER.warning(message)
else:
print('[%s - WRN!] %s' % (
datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
message)
)
def LOGEXCEPTION(message):
if LOGGER:
LOGGER.exception(message)
else:
print(
'[%s - EXC!] %s\nexception was: %s' % (
datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
message, format_exc()
)
)
#############
## IMPORTS ##
#############
from time import time as unixtime
from itertools import combinations
import numpy as np
from scipy.signal import argrelmin, argrelmax
###################
## LOCAL IMPORTS ##
###################
from .. import lcmath
from ..varbase import lcfit
from ..lcmodels import sinusoidal, eclipses, transits
from ..periodbase.zgls import specwindow_lsp
from .varfeatures import lightcurve_ptp_measures
###################################
## FEATURE CALCULATION FUNCTIONS ##
###################################
def lcfit_features(times, mags, errs, period,
fourierorder=5,
# these are depth, duration, ingress duration
transitparams=[-0.01,0.1,0.1],
# these are depth, duration, depth ratio, secphase
ebparams=[-0.2,0.3,0.7,0.5],
sigclip=10.0,
magsarefluxes=False,
verbose=True):
'''
This calculates various features related to fitting models to light curves.
- calculates R_ij and phi_ij ratios for Fourier fit amplitudes and phases
- calculates the redchisq for fourier, EB, and planet transit fits
- calculates the redchisq for fourier, EB, planet transit fits w/2 x period
'''
# get the finite values
finind = np.isfinite(times) & np.isfinite(mags) & np.isfinite(errs)
ftimes, fmags, ferrs = times[finind], mags[finind], errs[finind]
# get nonzero errors
nzind = np.nonzero(ferrs)
ftimes, fmags, ferrs = ftimes[nzind], fmags[nzind], ferrs[nzind]
# get the MAD of the unphased light curve
lightcurve_median = np.median(fmags)
lightcurve_mad = np.median(np.abs(fmags - lightcurve_median))
#
# fourier fit
#
# we fit a Fourier series to the light curve using the best period and
# extract the amplitudes and phases up to the 8th order to fit the LC. the
# various ratios of the amplitudes A_ij and the differences in the phases
# phi_ij are also used as periodic variability features
# do the fit
ffit = lcfit.fourier_fit_magseries(ftimes, fmags, ferrs, period,
fourierorder=fourierorder,
sigclip=sigclip,
magsarefluxes=magsarefluxes,
verbose=verbose)
# get the coeffs and redchisq
fourier_fitcoeffs = ffit['fitinfo']['finalparams']
fourier_chisq = ffit['fitchisq']
fourier_redchisq = ffit['fitredchisq']
if fourier_fitcoeffs is not None:
fourier_modelmags, _, _, fpmags, _ = sinusoidal.fourier_sinusoidal_func(
[period,
ffit['fitinfo']['fitepoch'],
ffit['fitinfo']['finalparams'][:fourierorder],
ffit['fitinfo']['finalparams'][fourierorder:]],
ftimes,
fmags,
ferrs
)
fourier_residuals = fourier_modelmags - fpmags
fourier_residual_median = np.median(fourier_residuals)
fourier_residual_mad = np.median(np.abs(fourier_residuals -
fourier_residual_median))
# break them out into amps and phases
famplitudes = fourier_fitcoeffs[:fourierorder]
fphases = fourier_fitcoeffs[fourierorder:]
famp_combos = combinations(famplitudes,2)
famp_cinds = combinations(range(len(famplitudes)),2)
fpha_combos = combinations(fphases,2)
fpha_cinds = combinations(range(len(fphases)),2)
else:
LOGERROR('LC fit to sinusoidal series model failed, '
'using initial params')
initfourieramps = [0.6] + [0.2]*(fourierorder - 1)
initfourierphas = [0.1] + [0.1]*(fourierorder - 1)
fourier_modelmags, _, _, fpmags, _ = sinusoidal.fourier_sinusoidal_func(
[period,
ffit['fitinfo']['fitepoch'],
initfourieramps,
initfourierphas],
ftimes,
fmags,
ferrs
)
fourier_residuals = fourier_modelmags - fpmags
fourier_residual_median = np.median(fourier_residuals)
fourier_residual_mad = np.median(np.abs(fourier_residuals -
fourier_residual_median))
# break them out into amps and phases
famplitudes = initfourieramps
fphases = initfourierphas
famp_combos = combinations(famplitudes,2)
famp_cinds = combinations(range(len(famplitudes)),2)
fpha_combos = combinations(fphases,2)
fpha_cinds = combinations(range(len(fphases)),2)
fampratios = {}
fphadiffs = {}
# get the ratios for all fourier coeff combinations
for ampi, ampc, phai, phac in zip(famp_cinds,
famp_combos,
fpha_cinds,
fpha_combos):
ampratind = 'R_%s%s' % (ampi[1]+1, ampi[0]+1)
# this is R_ij
amprat = ampc[1]/ampc[0]
phadiffind = 'phi_%s%s' % (phai[1]+1, phai[0]+1)
# this is phi_ij
phadiff = phac[1] - phai[0]*phac[0]
fampratios[ampratind] = amprat
fphadiffs[phadiffind] = phadiff
# update the outdict for the Fourier fit results
outdict = {
'fourier_ampratios':fampratios,
'fourier_phadiffs':fphadiffs,
'fourier_fitparams':fourier_fitcoeffs,
'fourier_redchisq':fourier_redchisq,
'fourier_chisq':fourier_chisq,
'fourier_residual_median':fourier_residual_median,
'fourier_residual_mad':fourier_residual_mad,
'fourier_residual_mad_over_lcmad':fourier_residual_mad/lightcurve_mad
}
# EB and planet fits will find the epoch automatically
planetfitparams = [period,
None,
transitparams[0],
transitparams[1],
transitparams[2]]
ebfitparams = [period,
None,
ebparams[0],
ebparams[1],
ebparams[2],
ebparams[3]]
# do the planet and EB fit with this period
planet_fit = lcfit.traptransit_fit_magseries(ftimes, fmags, ferrs,
planetfitparams,
sigclip=sigclip,
magsarefluxes=magsarefluxes,
verbose=verbose)
planetfit_finalparams = planet_fit['fitinfo']['finalparams']
planetfit_chisq = planet_fit['fitchisq']
planetfit_redchisq = planet_fit['fitredchisq']
if planetfit_finalparams is not None:
planet_modelmags, _, _, ppmags, _ = transits.trapezoid_transit_func(
planetfit_finalparams,
ftimes,
fmags,
ferrs
)
else:
LOGERROR('LC fit to transit planet model failed, using initial params')
planet_modelmags, _, _, ppmags, _ = transits.trapezoid_transit_func(
planetfitparams,
ftimes,
fmags,
ferrs
)
planet_residuals = planet_modelmags - ppmags
planet_residual_median = np.median(planet_residuals)
planet_residual_mad = np.median(np.abs(planet_residuals -
planet_residual_median))
eb_fit = lcfit.gaussianeb_fit_magseries(ftimes, fmags, ferrs,
ebfitparams,
sigclip=sigclip,
magsarefluxes=magsarefluxes,
verbose=verbose)
ebfit_finalparams = eb_fit['fitinfo']['finalparams']
ebfit_chisq = eb_fit['fitchisq']
ebfit_redchisq = eb_fit['fitredchisq']
if ebfit_finalparams is not None:
eb_modelmags, _, _, ebpmags, _ = eclipses.invgauss_eclipses_func(
ebfit_finalparams,
ftimes,
fmags,
ferrs
)
else:
LOGERROR('LC fit to EB model failed, using initial params')
eb_modelmags, _, _, ebpmags, _ = eclipses.invgauss_eclipses_func(
ebfitparams,
ftimes,
fmags,
ferrs
)
eb_residuals = eb_modelmags - ebpmags
eb_residual_median = np.median(eb_residuals)
eb_residual_mad = np.median(np.abs(eb_residuals - eb_residual_median))
# do the EB fit with 2 x period
ebfitparams[0] = ebfitparams[0]*2.0
eb_fitx2 = lcfit.gaussianeb_fit_magseries(ftimes, fmags, ferrs,
ebfitparams,
sigclip=sigclip,
magsarefluxes=magsarefluxes,
verbose=verbose)
ebfitx2_finalparams = eb_fitx2['fitinfo']['finalparams']
ebfitx2_chisq = eb_fitx2['fitchisq']
ebfitx2_redchisq = eb_fitx2['fitredchisq']
if ebfitx2_finalparams is not None:
ebx2_modelmags, _, _, ebx2pmags, _ = eclipses.invgauss_eclipses_func(
ebfitx2_finalparams,
ftimes,
fmags,
ferrs
)
else:
LOGERROR('LC fit to EB model with 2xP failed, using initial params')
ebx2_modelmags, _, _, ebx2pmags, _ = eclipses.invgauss_eclipses_func(
ebfitparams,
ftimes,
fmags,
ferrs
)
ebx2_residuals = ebx2_modelmags - ebx2pmags
ebx2_residual_median = np.median(ebx2_residuals)
ebx2_residual_mad = np.median(np.abs(ebx2_residuals -
ebx2_residual_median))
# update the outdict
outdict.update({
'planet_fitparams':planetfit_finalparams,
'planet_chisq':planetfit_chisq,
'planet_redchisq':planetfit_redchisq,
'planet_residual_median':planet_residual_median,
'planet_residual_mad':planet_residual_mad,
'planet_residual_mad_over_lcmad':(
planet_residual_mad/lightcurve_mad,
),
'eb_fitparams':ebfit_finalparams,
'eb_chisq':ebfit_chisq,
'eb_redchisq':ebfit_redchisq,
'eb_residual_median':eb_residual_median,
'eb_residual_mad':eb_residual_mad,
'eb_residual_mad_over_lcmad':(
eb_residual_mad/lightcurve_mad,
),
'ebx2_fitparams':ebfitx2_finalparams,
'ebx2_chisq':ebfitx2_chisq,
'ebx2_redchisq':ebfitx2_redchisq,
'ebx2_residual_median':ebx2_residual_median,
'ebx2_residual_mad':ebx2_residual_mad,
'ebx2_residual_mad_over_lcmad':(
ebx2_residual_mad/lightcurve_mad,
),
})
return outdict
def periodogram_features(pgramlist, times, mags, errs,
sigclip=10.0,
pdiff_threshold=1.0e-4,
sidereal_threshold=1.0e-4,
sampling_peak_multiplier=5.0,
sampling_startp=None,
sampling_endp=None,
verbose=True):
'''This calculates various periodogram features (for each periodogram).
pgramlist is a list of dicts returned by any of the periodfinding methods in
astrobase.periodbase. This can also be obtained from the resulting pickle
from the lcproc.run_pf function. Might be a good idea to make pgramlist a
list of periodogram lists from all magnitude columns to test periodic
variability across all magnitude columns (e.g. period diffs between EPD and
TFA mags)
times, mags, errs are from the object's light curve. These are used to
recalculat the sampling L-S periodogram if one is not present in
pgramlist. If it's present, these can all be set to None.
sigclip is the sigclip to apply to the light curve.
pdiff_threshold is the max diff between periods to consider them the same.
sidereal_threshold is the max diff between any of the periods and the
sidereal day periods to consider them the same.
sampling_peak_multipler is the minimum multiplicative factor of a period's
normalized periodogram peak over the sampling periodogram peak at the same
period required to accept the period as possibly real.
sampling_startp and sampling_endp are provided if the pgramlist doesn't have
a spectral window LSP and this must be obtained from the times, mags, errs
directly by running periodbase.specwindow_lsp.
'''
# run the sampling peak periodogram if necessary
pfmethodlist = [pgram['method'] for pgram in pgramlist]
if 'win' not in pfmethodlist:
# get the finite values
finind = np.isfinite(times) & np.isfinite(mags) & np.isfinite(errs)
ftimes, fmags, ferrs = times[finind], mags[finind], errs[finind]
# get nonzero errors
nzind = np.nonzero(ferrs)
ftimes, fmags, ferrs = ftimes[nzind], fmags[nzind], ferrs[nzind]
sampling_lsp = specwindow_lsp(times, mags, errs,
startp=sampling_startp,
endp=sampling_endp,
sigclip=sigclip,
verbose=verbose)
else:
sampling_lsp = pgramlist[pfmethodlist.index('win')]
# get the normalized sampling periodogram peaks
normalized_sampling_lspvals = (
sampling_lsp['lspvals']/(np.nanmax(sampling_lsp['lspvals']) -
np.nanmin(sampling_lsp['lspvals']))
)
normalized_sampling_periods = sampling_lsp['periods']
# go through the periodograms and calculate normalized peak height of best
# periods over the normalized peak height of the sampling periodogram at the
# same periods
for pfm, pgram in zip(pfmethodlist, pgramlist):
if pfm == 'pdm':
best_peak_sampling_ratios = []
close_to_sidereal_flag = []
periods = pgram['periods']
peaks = pgram['lspvals']
normalized_peaks = (1.0 - peaks)/(np.nanmax(1.0 - peaks) -
np.nanmin(1.0 - peaks))
# get the best period normalized peaks
if pgram['nbestperiods'] is None:
LOGERROR('no period results for method: %s' % pfm)
continue
for bp in pgram['nbestperiods']:
if np.isfinite(bp):
#
# first, get the normalized peak ratio
#
thisp_norm_pgrampeak = normalized_peaks[periods == bp]
thisp_sampling_pgramind = (
np.abs(normalized_sampling_periods -
bp) < pdiff_threshold
)
thisp_sampling_peaks = normalized_sampling_lspvals[
thisp_sampling_pgramind
]
if thisp_sampling_peaks.size > 1:
thisp_sampling_ratio = (
thisp_norm_pgrampeak/np.mean(thisp_sampling_peaks)
)
elif thisp_sampling_peaks.size == 1:
thisp_sampling_ratio = (
thisp_norm_pgrampeak/thisp_sampling_peaks
)
else:
LOGERROR('sampling periodogram is not defined '
'at period %.5f, '
'skipping calculation of ratio' % bp)
thisp_sampling_ratio = np.nan
best_peak_sampling_ratios.append(thisp_sampling_ratio)
#
# next, see if the best periods are close to a sidereal day
# or any multiples of thus
#
sidereal_a_ratio = (bp - 1.0027379)/bp
sidereal_b_ratio = (bp - 0.9972696)/bp
if ((sidereal_a_ratio < sidereal_threshold) or
(sidereal_b_ratio < sidereal_threshold)):
close_to_sidereal_flag.append(True)
else:
close_to_sidereal_flag.append(False)
else:
LOGERROR('period is nan')
best_peak_sampling_ratios.append(np.nan)
close_to_sidereal_flag.append(False)
# update the pgram with these
pgram['nbestpeakratios'] = best_peak_sampling_ratios
pgram['siderealflags'] = close_to_sidereal_flag
elif pfm != 'win':
best_peak_sampling_ratios = []
close_to_sidereal_flag = []
periods = pgram['periods']
peaks = pgram['lspvals']
normalized_peaks = peaks/(np.nanmax(peaks) - np.nanmin(peaks))
# get the best period normalized peaks
if pgram['nbestperiods'] is None:
LOGERROR('no period results for method: %s' % pfm)
continue
#
# first, get the best period normalized peaks
#
for bp in pgram['nbestperiods']:
if np.isfinite(bp):
thisp_norm_pgrampeak = normalized_peaks[periods == bp]
thisp_sampling_pgramind = (
np.abs(normalized_sampling_periods -
bp) < pdiff_threshold
)
thisp_sampling_peaks = normalized_sampling_lspvals[
thisp_sampling_pgramind
]
if thisp_sampling_peaks.size > 1:
thisp_sampling_ratio = (
thisp_norm_pgrampeak/np.mean(thisp_sampling_peaks)
)
elif thisp_sampling_peaks.size == 1:
thisp_sampling_ratio = (
thisp_norm_pgrampeak/thisp_sampling_peaks
)
else:
LOGERROR('sampling periodogram is not defined '
'at period %.5f, '
'skipping calculation of ratio' % bp)
thisp_sampling_ratio = np.nan
best_peak_sampling_ratios.append(thisp_sampling_ratio)
#
# next, see if the best periods are close to a sidereal day
# or any multiples of thus
#
sidereal_a_ratio = (bp - 1.0027379)/bp
sidereal_b_ratio = (bp - 0.9972696)/bp
if ((sidereal_a_ratio < sidereal_threshold) or
(sidereal_b_ratio < sidereal_threshold)):
close_to_sidereal_flag.append(True)
else:
close_to_sidereal_flag.append(False)
else:
LOGERROR('period is nan')
best_peak_sampling_ratios.append(np.nan)
close_to_sidereal_flag.append(False)
# update the pgram with these
pgram['nbestpeakratios'] = best_peak_sampling_ratios
pgram['siderealflags'] = close_to_sidereal_flag
#
# done with calculations, get the features we need
#
# get the best periods across all the period finding methods
all_bestperiods = np.concatenate(
[x['nbestperiods']
for x in pgramlist if
(x['method'] != 'win' and x['nbestperiods'] is not None)]
)
all_bestperiod_diffs = np.array(
[abs(a-b) for a,b in combinations(all_bestperiods,2)]
)
all_sampling_ratios = np.concatenate(
[x['nbestpeakratios']
for x in pgramlist if
(x['method'] != 'win' and x['nbestperiods'] is not None)]
)
all_sidereal_flags = np.concatenate(
[x['siderealflags']
for x in pgramlist if
(x['method'] != 'win' and x['nbestperiods'] is not None)]
)
# bestperiods_n_abovesampling - number of top period estimates with peaks
# that are at least sampling_peak_multiplier x
# sampling peak height at the same period
bestperiods_n_abovesampling = (
all_sampling_ratios[all_sampling_ratios >
sampling_peak_multiplier]
).size
# bestperiods_n_sidereal - number of top period estimates that are
# consistent with a 1 day period (1.0027379 and
# 0.9972696 actually, for sidereal day period)
bestperiods_n_sidereal = all_sidereal_flags.sum()
# bestperiods_diffn_threshold - the number of cross-wise period diffs from
# all period finders that fall below the
# pdiff_threshold
bestperiods_diffn_threshold = (
all_bestperiod_diffs < pdiff_threshold
).size
resdict = {
'bestperiods_n_abovesampling':bestperiods_n_abovesampling,
'bestperiods_n_sidereal':bestperiods_n_sidereal,
'bestperiods_diffn_threshold':bestperiods_diffn_threshold
}
return resdict
def phasedlc_features(times,
mags,
errs,
period,
nbrtimes=None,
nbrmags=None,
nbrerrs=None):
'''This calculates various phased LC features for the object.
If nbrtimes, nbrmags, and nbrerrs are all not None, they should
be ndarrays with times, mags, errs of this object's closest neighbor (close
within some small number x FWHM of telescope to check for blending) will
also calculate extra features based on neighbor phased LC.
'''
# get the finite values
finind = np.isfinite(times) & np.isfinite(mags) & np.isfinite(errs)
ftimes, fmags, ferrs = times[finind], mags[finind], errs[finind]
# get nonzero errors
nzind = np.nonzero(ferrs)
ftimes, fmags, ferrs = ftimes[nzind], fmags[nzind], ferrs[nzind]
# only operate on LC if enough points
if ftimes.size > 49:
# get the MAD of the unphased light curve
lightcurve_median = np.median(fmags)
lightcurve_mad = np.median(np.abs(fmags - lightcurve_median))
# get p2p for raw lightcurve
p2p_unphasedlc = lightcurve_ptp_measures(ftimes, fmags, ferrs)
inveta_unphasedlc = 1.0/p2p_unphasedlc['eta_normal']
# phase the light curve with the given period, assume epoch is
# times.min()
phasedlc = lcmath.phase_magseries_with_errs(ftimes, fmags, ferrs,
period, ftimes.min(),
wrap=False)
phase = phasedlc['phase']
pmags = phasedlc['mags']
perrs = phasedlc['errs']
# get ptp measures for best period
ptp_bestperiod = lightcurve_ptp_measures(phase,pmags,perrs)
# phase the light curve with the given periodx2, assume epoch is
# times.min()
phasedlc = lcmath.phase_magseries_with_errs(ftimes, fmags, ferrs,
period*2.0, ftimes.min(),
wrap=False)
phasex2 = phasedlc['phase']
pmagsx2 = phasedlc['mags']
perrsx2 = phasedlc['errs']
# get ptp measures for best periodx2
ptp_bestperiodx2 = lightcurve_ptp_measures(phasex2,pmagsx2,perrsx2)
# eta_phasedlc_bestperiod - calculate eta for the phased LC with best
# period
inveta_bestperiod = 1.0/ptp_bestperiod['eta_normal']
# eta_phasedlc_bestperiodx2 - calculate eta for the phased LC with best
# period x 2
inveta_bestperiodx2 = 1.0/ptp_bestperiodx2['eta_normal']
# eta_phased_ratio_eta_raw - eta for best period phased LC / eta for raw
# LC
inveta_ratio_phased_unphased = inveta_bestperiod/inveta_unphasedlc
# eta_phasedx2_ratio_eta_raw - eta for best periodx2 phased LC/eta for
# raw LC
inveta_ratio_phasedx2_unphased = inveta_bestperiodx2/inveta_unphasedlc
# freq_model_max_delta_mags - absval of magdiff btw model phased LC
# maxima using period x 2. look at points
# more than 10 points away for maxima
phasedx2_maxval_ind = argrelmax(pmagsx2, order=10)
if phasedx2_maxval_ind[0].size > 1:
phasedx2_magdiff_maxval = (
np.max(np.abs(np.diff(pmagsx2[phasedx2_maxval_ind[0]])))
)
else:
phasedx2_magdiff_maxval = np.nan
# freq_model_min_delta_mags - absval of magdiff btw model phased LC
# minima using period x 2. look at points
# more than 10 points away for minima
phasedx2_minval_ind = argrelmin(pmagsx2, order=10)
if phasedx2_minval_ind[0].size > 1:
phasedx2_magdiff_minval = (
np.max(np.abs(np.diff(pmagsx2[phasedx2_minval_ind[0]])))
)
else:
phasedx2_magdiff_minval = np.nan
# p2p_scatter_pfold_over_mad - MAD of successive absolute mag diffs of
# the phased LC using best period divided
# by the MAD of the unphased LC
phased_magdiff = np.diff(pmags)
phased_magdiff_median = np.median(phased_magdiff)
phased_magdiff_mad = np.median(np.abs(phased_magdiff -
phased_magdiff_median))
phasedx2_magdiff = np.diff(pmagsx2)
phasedx2_magdiff_median = np.median(phasedx2_magdiff)
phasedx2_magdiff_mad = np.median(np.abs(phasedx2_magdiff -
phasedx2_magdiff_median))
phased_magdiffmad_unphased_mad_ratio = phased_magdiff_mad/lightcurve_mad
phasedx2_magdiffmad_unphased_mad_ratio = (
phasedx2_magdiff_mad/lightcurve_mad
)
# get the percentiles of the slopes of the adjacent mags for phasedx2
phasedx2_slopes = np.diff(pmagsx2)/np.diff(phasex2)
phasedx2_slope_percentiles = np.ravel(np.nanpercentile(phasedx2_slopes,
[10.0,90.0]))
phasedx2_slope_10percentile = phasedx2_slope_percentiles[0]
phasedx2_slope_90percentile = phasedx2_slope_percentiles[1]
# check if nbrtimes, _mags, _errs are available
if ((nbrtimes is not None) and
(nbrmags is not None) and
(nbrerrs is not None)):
# get the finite values
nfinind = (np.isfinite(nbrtimes) &
np.isfinite(nbrmags) &
np.isfinite(nbrerrs))
nftimes, nfmags, nferrs = (nbrtimes[nfinind],
nbrmags[nfinind],
nbrerrs[nfinind])
# get nonzero errors
nnzind = np.nonzero(nferrs)
nftimes, nfmags, nferrs = (nftimes[nnzind],
nfmags[nnzind],
nferrs[nnzind])
# only operate on LC if enough points
if nftimes.size > 49:
# get the phased light curve using the same period and epoch as
# the actual object
nphasedlc = lcmath.phase_magseries_with_errs(
nftimes, nfmags, nferrs,
period, ftimes.min(),
wrap=False
)
# normalize the object and neighbor phased mags
norm_pmags = pmags - np.median(pmags)
norm_npmags = nphasedlc['mags'] - np.median(nphasedlc['mags'])
# phase bin them both so we can compare LCs easily
phabinned_objectlc = lcmath.phase_bin_magseries(phase,
norm_pmags,
minbinelems=1)
phabinned_nbrlc = lcmath.phase_bin_magseries(nphasedlc['phase'],
norm_npmags,
minbinelems=1)
absdiffs = []
for pha, phamag in zip(phabinned_objectlc['binnedphases'],
phabinned_objectlc['binnedmags']):
try:
# get the matching phase from the neighbor phased LC
phadiffs = np.abs(pha - phabinned_nbrlc['binnedphases'])
minphadiffind = np.where(
(phadiffs < 1.0e-4) &
(phadiffs == np.min(phadiffs))
)
absmagdiff = np.abs(
phamag - phabinned_nbrlc['binnedmags'][
minphadiffind
]
)
if absmagdiff.size > 0:
absdiffs.append(absmagdiff.min())
except Exception as e:
continue
# sum of absdiff between the normalized to 0.0 phased LC of this
# object and that of the closest neighbor phased with the same
# period and epoch
if len(absdiffs) > 0:
sum_nbr_phasedlc_magdiff = sum(absdiffs)
else:
sum_nbr_phasedlc_magdiff = np.nan
else:
sum_nbr_phasedlc_magdiff = np.nan
else:
sum_nbr_phasedlc_magdiff = np.nan
return {
'inveta_unphasedlc':inveta_unphasedlc,
'inveta_bestperiod':inveta_bestperiod,
'inveta_bestperiodx2':inveta_bestperiodx2,
'inveta_ratio_phased_unphased':inveta_ratio_phased_unphased,
'inveta_ratio_phasedx2_unphased':inveta_ratio_phasedx2_unphased,
'phasedx2_magdiff_maxima':phasedx2_magdiff_maxval,
'phasedx2_magdiff_minina':phasedx2_magdiff_minval,
'phased_unphased_magdiff_mad_ratio':(
phased_magdiffmad_unphased_mad_ratio
),
'phasedx2_unphased_magdiff_mad_ratio':(
phasedx2_magdiffmad_unphased_mad_ratio
),
'phasedx2_slope_10percentile':phasedx2_slope_10percentile,
'phasedx2_slope_90percentile':phasedx2_slope_90percentile,
'sum_nbr_phasedlc_magdiff':sum_nbr_phasedlc_magdiff,
}
else:
return {
'inveta_unphasedlc':np.nan,
'inveta_bestperiod':np.nan,
'inveta_bestperiodx2':np.nan,
'inveta_ratio_phased_unphased':np.nan,
'inveta_ratio_phasedx2_unphased':np.nan,
'phasedx2_magdiff_maxima':np.nan,
'phasedx2_magdiff_minina':np.nan,
'phased_unphased_magdiff_mad_ratio':np.nan,
'phasedx2_unphased_magdiff_mad_ratio':np.nan,
'phasedx2_slope_10percentile':np.nan,
'phasedx2_slope_90percentile':np.nan,
'sum_nbr_phasedlc_magdiff':np.nan,
}
|
/* Author: Romain "Artefact2" Dal Maso <[email protected]> */
/* This program is free software. It comes without any warranty, to the
* extent permitted by applicable law. You can redistribute it and/or
* modify it under the terms of the Do What The Fuck You Want To Public
* License, Version 2, as published by Sam Hocevar. See
* http://sam.zoy.org/wtfpl/COPYING for more details. */
#define warn(...) do { \
char msg[256]; \
snprintf(msg, 256, __VA_ARGS__); \
fprintf(stderr, "%s(): %s\n", __func__, msg); \
} while(0)
#define fatal(...) do { \
warn(__VA_ARGS__); \
exit(1); \
} while(0)
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdbool.h>
#include <math.h>
#include <gsl/gsl_sf_gamma.h>
#define MCTS (1 << 15)
void read_rolls(unsigned int, unsigned int* rcount, unsigned int* n);
unsigned int ecdf_distance(unsigned int, unsigned int*);
void ecdf_gen_mc_table(unsigned int sides, unsigned int n, unsigned int count, unsigned int*);
bool find_in_sorted_array(unsigned int val, unsigned int count, unsigned int* array, unsigned int* low, unsigned int* high);
double chisq_test(unsigned int sides, unsigned int n, unsigned int*);
void confidence_test(unsigned int sides, unsigned int n, unsigned int*);
int main(int argc, char** argv) {
if(argc != 2) {
fprintf(stderr, "Usage: %s <num-sides> < rolls.txt\n", argv[0]);
exit(2);
}
unsigned int sides = strtol(argv[1], NULL, 10);
if(sides < 2) {
fatal("must have at least 2 sides, got %d", sides);
}
unsigned int rcount[sides];
unsigned int n;
read_rolls(sides, rcount, &n);
if(n % sides) {
fatal("sample size (%d) not a multiple of sides (%d)", n, sides);
}
printf("SmpSize: n=%d\n", n);
unsigned int ecdf_table[MCTS];
ecdf_gen_mc_table(sides, n, MCTS, ecdf_table);
unsigned int dist = ecdf_distance(sides, rcount);
unsigned int lo, hi;
bool found;
found = find_in_sorted_array(dist, MCTS, ecdf_table, &lo, &hi);
printf("ECDF: p%s%5.4f\n", found ? "=" : "<", found ? (1.0 - (double)(lo + hi) / (double)(2 * MCTS)) : ((double)(MCTS - lo) / (double)MCTS));
printf("ChiSq: p=%5.4f\n", chisq_test(sides, n, rcount));
printf("ConfInt99:");
confidence_test(sides, n, rcount);
printf("\n");
}
void read_rolls(unsigned int sides, unsigned int* rcount, unsigned int* n) {
char buf[16];
unsigned int roll;
memset(rcount, 0, sides * sizeof(unsigned int));
*n = 0;
while(!feof(stdin)) {
if(fgets(buf, 16, stdin) == NULL) continue;
if(buf[0] == '\n' || buf[0] == '\0') continue;
roll = strtol(buf, NULL, 10);
if(roll < 1 || roll > sides) {
warn("ignoring roll %d", roll);
continue;
}
++(rcount[roll - 1]);
++(*n);
}
}
unsigned int ecdf_distance(unsigned int sides, unsigned int* rcount) {
unsigned int cumulative = 0, ideal_cumulative, ideal_step, i, distance, tdist;
for(i = 0; i < sides; ++i) {
cumulative += rcount[i];
}
ideal_cumulative = cumulative;
ideal_step = ideal_cumulative / sides;
distance = 0;
for(i = 0; i < sides; ++i) {
cumulative -= rcount[i];
ideal_cumulative -= ideal_step;
tdist = (cumulative >= ideal_cumulative) ? (cumulative - ideal_cumulative) : (ideal_cumulative - cumulative);
if(tdist > distance) distance = tdist;
}
return distance;
}
int cmpuint(const void* a, const void* b) {
return *(unsigned int*)a - *(unsigned int*)b;
}
void ecdf_gen_mc_table(unsigned int sides, unsigned int n, unsigned int count, unsigned int* table) {
unsigned int rcount[sides];
unsigned int i, j;
for(i = 0; i < count; ++i) {
memset(rcount, 0, sides * sizeof(unsigned int));
for(j = 0; j < n; ++j) {
++(rcount[rand() % sides]);
}
table[i] = ecdf_distance(sides, rcount);
}
qsort(table, count, sizeof(unsigned int), cmpuint);
}
bool find_in_sorted_array(unsigned int val, unsigned int count, unsigned int* array, unsigned int* low, unsigned int* high) {
if(array[0] > val) {
*low = *high = 0;
return false;
} else if(array[count - 1] < val) {
*low = *high = count - 1;
return false;
}
unsigned int mid;
*low = 0;
*high = count - 1;
while(*high > *low) {
mid = *low + (*high - *low) / 2;
if(array[mid] == val) {
*low = *high = mid;
while(*low < count && array[*low] == val) --(*low);
++(*low);
while(*high < count && array[*high] == val) ++high;
--(*high);
return true;
} else if(array[mid] > val) {
*high = mid;
} else {
*low = mid;
}
}
return false;
}
double chisq_test(unsigned int sides, unsigned int n, unsigned int* rcount) {
unsigned int ideal = n / sides;
double chisq = 0.0;
for(unsigned int i = 0; i < sides; ++i) {
chisq += (rcount[i] - ideal) * (rcount[i] - ideal);
}
chisq /= (double)ideal;
return 1.0 - gsl_sf_gamma_inc_P((double)(sides - 1) / (double)2, chisq / (double)2);
}
void confidence_test(unsigned int sides, unsigned int n, unsigned int* rcount) {
double f, amp, ideal = 1.0 / (double)sides;
bool anomalies = false;
for(unsigned int i = 0; i < sides; ++i) {
f = (double)rcount[i] / (double)n;
amp = 2.575 * sqrt(f * (1.0 - f) / (double)n);
if(ideal > f + amp) {
printf(" %d-", i+1);
anomalies = true;
} else if(ideal < f - amp) {
printf(" %d+", i+1);
anomalies = true;
}
}
if(!anomalies) {
printf(" OK");
}
}
|
module Main where
import Numeric.NLOPT
-- import Numeric.LinearAlgebra (dot)
import Numeric.LinearAlgebra (fromList, toList)
main :: IO ()
main = do
let -- objf x = x `dot` x + 22
-- objf x = sum (toList x)
objf x = let [x1, x2, x3] = toList x
in 3 * x1 * x1 + 2 * x1 * x2 + x1 * x3
+ 2.5 * x2 * x2 + 2 * x2 * x3 + 2 * x3 * x3
- 8 * x1 - 3 * x2 - 3 * x3
stop = ObjectiveRelativeTolerance 1e-9 :| []
-- algorithm = SBPLX objf [] Nothing
algorithm = NELDERMEAD objf [] Nothing
subproblem = LocalProblem 3 stop algorithm
x0 = fromList [0, 0, 0]
solU = minimizeLocal subproblem x0
print solU
-- define constraint function:
let -- constraintf x = sum (toList x) - 1.0
-- constraintf x = let [x1, x2] = toList x
-- in x1 * x1 + x2 * x2 - 2
c1f x = let [x1, _, x3] = toList x
in x1 + x3 - 3
c2f x = let [_, x2, x3] = toList x
in x2 + x3
-- define constraint object to pass to the algorithm:
let -- constraint = EqualityConstraint (Scalar constraintf) 1e-6
c1 = EqualityConstraint (Scalar c1f) 1e-6
c2 = EqualityConstraint (Scalar c2f) 1e-6
-- problem = AugLagProblem [constraint] [] (AUGLAG_EQ_LOCAL subproblem)
problem = AugLagProblem [c1, c2] [] (AUGLAG_EQ_LOCAL subproblem)
-- x = (2, -1, 1)
solC = minimizeAugLag problem x0
print solC
putStrLn "-- Done."
|
[STATEMENT]
lemma (in Corps) Vr_has_poss_elem:"valuation K v \<Longrightarrow>
\<exists>x\<in>carrier (Vr K v) - {\<zero>\<^bsub>Vr K v\<^esub>}. 0 < v x"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. valuation K v \<Longrightarrow> \<exists>x\<in>carrier (Vr K v) - {\<zero>\<^bsub>Vr K v\<^esub>}. 0 < v x
[PROOF STEP]
apply (frule val_Pg[of v], erule conjE,
frule Lv_pos[of v], drule sym,
subst Vr_0_f_0, assumption+)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>valuation K v; Pg K v \<in> carrier K - {\<zero>}; 0 < Lv K v; Lv K v = v (Pg K v)\<rbrakk> \<Longrightarrow> \<exists>x\<in>carrier (Vr K v) - {\<zero>}. 0 < v x
[PROOF STEP]
apply (frule aeq_ale[of "Lv K v" "v (Pg K v)"],
frule aless_le_trans[of "0" "Lv K v" "v (Pg K v)"], assumption+,
frule val_poss_mem_Vr[of v "Pg K v"],
simp, assumption, blast)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done |
Formal statement is: lemma local_lipschitz_continuous_on: assumes local_lipschitz: "local_lipschitz T X f" assumes "t \<in> T" shows "continuous_on X (f t)" Informal statement is: If $f$ is a local Lipschitz function, then for each $t \in T$, the function $f_t$ is continuous. |
/-
Copyright (c) 2023 Kevin Buzzard. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Author : Kevin Buzzard
-/
import algebra.algebra.tower
import ring_theory.norm -- for norms
import ring_theory.trace -- for traces
/-
# Extensions of extensions
The problem with making every field a type and using `algebra` to
fix the embeddings from a smaller field to a bigger one, is that
when you have three or more extensions, you need to have a way of
saying that those maps are compatible.
On paper we might write "Let `E ⊆ F ⊆ K` be a tower of fields"
but in Lean we make each each pair into an `algebra` structure
and now we want to somehow explain that the `algebra_map` from
`E` to `F` composed with the one from `F` to `K` equals the
one from `E` to `K`. We assert this compatibility with
the `is_scalar_tower E F K` typeclass. Here's the proof
that in the presence of this prop-valued hypothesis, the
diagram commutes.
-/
example (E F K : Type) [field E] [field F] [field K] [algebra E F] [algebra F K]
[algebra E K] [is_scalar_tower E F K] (e : E) :
algebra_map E K e = algebra_map F K (algebra_map E F e) := is_scalar_tower.algebra_map_apply E F K e
/-
For me, what is surprising is that the definition of `is_scalar_tower` is
not at all what one would expect. The idea is due to Kenny Lau (a former Imperial
undergraduate) in 2020; Eric Wieser wrote a paper on how the system works in 2021
https://arxiv.org/abs/2108.10700 (this is Eric on the Discord). Guess what the
definition is and then right click on `is_scalar_tower` and jump to definition
to find out the truth (which might surprise you).
Now we have three compatible field extensions we can ask how the basic constructions
such as degree, norm and trace behave.
-/
variables (E F K : Type) [field E] [field F] [field K] [algebra E F] [algebra F K]
[algebra E K] [is_scalar_tower E F K]
-- There is a mathematically correct tower law, involving cardinals:
example : module.rank E F * module.rank F K = module.rank E K := dim_mul_dim E F K
-- But this is a pain to use, because cardinals are not a particularly well-behaved
-- object. So let's put in a finite-dimensional hypothesis and use `finrank`.
open finite_dimensional
-- Tower law for dimensions, natural number case.
example [finite_dimensional E F] : finrank E F * finrank F K = finrank E K := finrank_mul_finrank E F K
/- Note that if K/F is infinite-dimensional then `finrank F K = 0` as does `finrank E K`.
The same argument should apply if F/E is infinite-dimensional; this seems to be a minor
glitch in mathlib!
-/
-- Tricky exercise: look at proof of `finrank_mul_finrank` in mathlib and see if you
-- can generalise it by removing the `[finite_dimensional E F]` condition in the case
-- where everything is a field.
example : finrank E F * finrank F K = finrank E K :=
begin
sorry,
end
-- trace of trace is trace in a tower
example [finite_dimensional E F] [finite_dimensional F K] (k : K) :
(algebra.trace E F) ((algebra.trace F K) k) = (algebra.trace E K) k := algebra.trace_trace k
-- I can't find the norm version though :-/
|
\documentclass[../main.tex]{subfiles}
\begin{document}
\subsection{Some section}
This is a reference \citep{wallace_immunopathology_2014}
\lipsum[1-1]
\begin{figure}[h]
\includegraphics[width = 1\textwidth]{Figures/Fig1.png}
\centering
\caption{\textit{Wow!}, nice dog}
\end{figure}
\end{document}
|
{-# LANGUAGE DeriveGeneric #-}
{-|
Module : ODMatrix.SmithDecomposition.SparseMat
Description : Sparse representation of a matrix.
-}
module ODMatrix.SmithDecomposition.SparseMat (
SparseMat(..)
, denseSparseMat
, sparseDenseMat
, fixIndexes
, makeAssoc
) where
import GHC.Generics (Generic)
--import Data.Aeson (ToJSON, FromJSON)
import Numeric.LinearAlgebra as L hiding (rows, cols)
import Numeric.LinearAlgebra.Data as LD (toList)
-- | Sparse representation of a matrix.
data SparseMat = SparseMat {
rows :: Int
, cols :: Int
, cells :: AssocMatrix
} deriving (Generic, Show)
--instance ToJSON SparseMat
--instance FromJSON SparseMat
-- | Transform a sparse matrix in a regular one.
denseSparseMat :: SparseMat -> Matrix Double
denseSparseMat m = assoc (rows m, cols m) 0 (fixIndexes $ cells m)
-- | Transform a regular matrix to an sparse one.
sparseDenseMat :: Matrix Double-> SparseMat
sparseDenseMat m = SparseMat r c cs
where (r, c) = size m
vals = toList . flatten $ m
ids = [(i,j) | i <- [0..r-1], j <- [0..c-1]]
cs = filter (\(_,v) -> v /= 0) $ zip ids vals
-- * Support functions
-- | Adjust the indexes of the association matrix from 1-starting to 0-starting.
-- This is needed by the Linear Algebra library.
fixIndexes :: (Num a) => [((a,a),b)] -> [((a,a),b)]
fixIndexes = map (\((a,b),v) -> ((a-1,b-1),v))
-- | Given a constant value and a list of indexes, construct a association matrix only containing that value.
makeAssoc :: Double -> [(Int,Int)] -> AssocMatrix
makeAssoc v = fixIndexes . map (flip (,) v)
--------------------------------------------------------------------------------
--------------------------------------------------------------------------------
--------------------------------------------------------------------------------
|
[STATEMENT]
lemma no_overlap_ConsI: "check_no_overlap2 \<gamma> (x#xs) \<Longrightarrow> check_no_overlap2 \<gamma> xs"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. check_no_overlap2 \<gamma> (x # xs) \<Longrightarrow> check_no_overlap2 \<gamma> xs
[PROOF STEP]
unfolding check_no_overlap2_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>a\<in>set (x # xs). \<forall>b\<in>set (x # xs). a \<noteq> b \<and> ofe_prio a = ofe_prio b \<longrightarrow> \<not> (\<exists>p\<in>UNIV. \<gamma> (ofe_fields a) p \<and> \<gamma> (ofe_fields b) p) \<Longrightarrow> \<forall>a\<in>set xs. \<forall>b\<in>set xs. a \<noteq> b \<and> ofe_prio a = ofe_prio b \<longrightarrow> \<not> (\<exists>p\<in>UNIV. \<gamma> (ofe_fields a) p \<and> \<gamma> (ofe_fields b) p)
[PROOF STEP]
by simp |
open import Prelude
module Implicits.Oliveira.Deterministic.Decidable where
open import Data.Fin.Substitution
open import Implicits.Oliveira.Types
open import Implicits.Oliveira.Terms
open import Implicits.Oliveira.Contexts
open import Implicits.Oliveira.Substitutions
open import Implicits.Oliveira.Substitutions.Lemmas
open import Implicits.Oliveira.Deterministic.Resolution
open import Implicits.Oliveira.Types.Unification
open import Data.Star hiding (map)
MICtx : ℕ → ℕ → Set
MICtx m ν = List (MetaType m ν)
private
module M = MetaTypeMetaSubst
module T = MetaTypeTypeSubst
{-
{-# NO_TERMINATION_CHECK #-}
_,_amatch_ : ∀ {m ν} → (MetaType m ν) → (ρs : MICtx m ν) → (τ : SimpleType ν) → Maybe (ICtx ν)
(a ⇒ b) , ρs amatch τ = b , a List.∷ ρs amatch τ
∀' x , ρs amatch τ = open-meta x , (List.map meta-weaken ρs) amatch τ
(simpl x) , ρs amatch τ with mgu (simpl x) τ
(simpl x) , ρs amatch τ | just (u , proj₂) =
just $ List.map (λ r → from-meta $ substitute (asub u) r) ρs
(simpl x) , ρs amatch τ | nothing = nothing
_match_ : ∀ {ν} → (Type ν) → (SimpleType ν) → Maybe (ICtx ν)
r match a = (to-meta {zero} r) , List.[] amatch a
{-# NO_TERMINATION_CHECK #-}
lem-a6 : ∀ {m ν} {ρs ρs'} (a : MetaType m ν) (τ : SimpleType ν) → a , ρs amatch τ ≡ just ρs' →
(∃ λ (u : AList ν m zero) → (from-meta $ substitute (asub u) a) ◁ τ)
lem-a6 {ρs = ρs} (a ⇒ b) τ m
with _,_amatch_ b (a List.∷ ρs) τ | inspect (_,_amatch_ b (a List.∷ ρs)) τ
lem-a6 (a ⇒ b) τ refl | ._ | reveal[ eq ] with lem-a6 b τ eq
lem-a6 (a ⇒ b) τ refl | ._ | reveal[ eq ] | u , b/u◁τ = u , (m-iabs b/u◁τ)
lem-a6 {ρs = ρs} (∀' a) τ m
with open-meta a , (List.map meta-weaken ρs) amatch τ | inspect (_,_amatch_ (open-meta a) (List.map meta-weaken ρs)) τ
lem-a6 (∀' a) τ refl | ._ | reveal[ eq ] with lem-a6 (open-meta a) τ eq
lem-a6 (∀' a) τ refl | ._ | reveal[ eq ] | (t' // x ◅ u , b/u◁τ) = {!!} -- {!u!} , (m-tabs {!!})
lem-a6 (simpl x) τ m with mgu (simpl x) τ | inspect (mgu (simpl x)) τ
lem-a6 (simpl x) τ refl | just (u , proj₂) | reveal[ eq ] =
u , subst (λ u → u ◁ τ) (sym $ mgu-unifies (simpl x) τ {!!}) m-simp
lem-a6 (simpl x) τ () | nothing
_match1st_ : ∀ {ν} (Δ : ICtx ν) → (a : SimpleType ν) → Maybe (Type ν × ICtx ν)
List.[] match1st a = nothing
(x List.∷ Δ) match1st a with x match a
(x List.∷ Δ) match1st a | just z = just (x , z)
(x List.∷ Δ) match1st a | nothing = Δ match1st a
_⊢alg_ : ∀ {ν n} (K : Ktx ν n) → (a : Type ν) → Dec (K ⊢ᵣ a)
(Γ , Δ) ⊢alg simpl x with Δ match1st x
(Γ , Δ) ⊢alg simpl x | just (proj₁ , proj₂) = {!!}
(Γ , Δ) ⊢alg simpl x | nothing = {!!}
K ⊢alg (a ⇒ b) with (a ∷K K) ⊢alg b
K ⊢alg (a ⇒ b) | yes p = yes $ r-iabs a p
K ⊢alg (a ⇒ b) | no ¬p = no (λ{ (r-iabs .a x) → ¬p x })
K ⊢alg ∀' a with (ktx-weaken K) ⊢alg a
K ⊢alg ∀' a | yes p = yes (r-tabs p)
K ⊢alg ∀' a | no ¬p = no (λ{ (r-tabs p) → ¬p p })
-}
module relational where
data _⊢_amatch_↠_ {m ν} : (ρs : MICtx m ν) → MetaType m ν → SimpleType ν → (ICtx ν) → Set where
mtc-tabs : ∀ {r ρs ρs' a} → (List.map meta-weaken ρs) ⊢ (open-meta r) amatch a ↠ ρs' →
ρs ⊢ ∀' r amatch a ↠ ρs'
mtc-iabs : ∀ {ρs ρs' a b c} → (a List.∷ ρs) ⊢ b amatch c ↠ ρs' → ρs ⊢ a ⇒ b amatch c ↠ ρs'
mtc-simp : ∀ {ρs a b} → (u : Unifiable (simpl a) b) →
ρs ⊢ (simpl a) amatch b ↠ (List.map (λ r → from-meta $ r M./ (asub (proj₁ u))) ρs)
_⊢match_↠_ : ∀ {ν} → (Type ν) → (SimpleType ν) → ICtx ν → Set
r ⊢match a ↠ ρs = List.[] ⊢ (to-meta {zero} r) amatch a ↠ ρs
data _⊢match1st_↠_ {ν} : List (Type ν) → (a : SimpleType ν) → ICtx ν → Set where
m1-head : ∀ {rs ρs} {r : Type ν} {a} → r ⊢match a ↠ ρs → (r List.∷ rs) ⊢match1st a ↠ ρs
m1-tail : ∀ {rs ρs} {r : Type ν} {a} → ¬ r ⊢match a ↠ ρs → rs ⊢match1st a ↠ ρs →
(r List.∷ rs) ⊢match1st a ↠ ρs
{-}
gather : ∀ {ν} {ρs ρs' : ICtx ν} {a} → ρs ⊢match1st a ↠ ρs' → List (Type ν)
gather (m1-head x) = gather' x
where
gather' : ∀ {m ν} {ρs : MICtx m ν} {r a} → ρs ⊢ r amatch a ↠→ List (Type ν)
gather' (mtc-tabs x) = gather' x
gather' (mtc-iabs x) = gather' x
gather' {ρs = ρs} (mtc-simp {a = a} {b = b} u) =
(List.map (λ r → from-meta $ substitute (asub (proj₁ u)) r) ρs)
gather (m1-tail ¬x xs) = gather xs
-}
{-# NO_TERMINATION_CHECK #-}
amatch : ∀ {m ν} → (ρs : MICtx m ν) → (r : MetaType m ν) (a : SimpleType ν) →
Dec (∃ λ ρs' → ρs ⊢ r amatch a ↠ ρs')
amatch ρs (simpl x) a with mgu (simpl x) a | inspect (mgu (simpl x)) a
amatch ρs (simpl x) a | just mgu | _ = yes (, mtc-simp mgu)
amatch ρs (simpl x) a | nothing | reveal[ eq ] =
no (λ{ (._ , mtc-simp p) → (mgu-sound (simpl x) a eq) p})
amatch ρs (b ⇒ c) a with amatch (b List.∷ ρs) c a
amatch ρs (b ⇒ c) a | yes (_ , p) = yes $ , mtc-iabs p
amatch ρs (b ⇒ c) a | no ¬p = no (λ{ (._ , mtc-iabs x) → ¬p (, x)})
amatch ρs (∀' r) a with amatch (List.map meta-weaken ρs) (open-meta r) a
amatch ρs (∀' r) a | yes (_ , p) = yes $ , mtc-tabs p
amatch ρs (∀' r) a | no ¬p = no (λ{ (._ , mtc-tabs x) → ¬p (, x) })
match : ∀ {ν} → (r : Type ν) → (a : SimpleType ν) → Dec (∃ λ ρs → r ⊢match a ↠ ρs)
match r a = amatch List.[] (to-meta {zero} r) a
_match1st_ : ∀ {ν} (Δ : ICtx ν) → (a : SimpleType ν) → Dec (∃ λ ρs → Δ ⊢match1st a ↠ ρs)
List.[] match1st a = no (λ{ (_ , ()) })
(x List.∷ xs) match1st a with match x a
(x List.∷ xs) match1st a | yes (ρs , p) = yes (ρs , m1-head p)
(x List.∷ xs) match1st a | no ¬p with xs match1st a
(x List.∷ xs) match1st a | no ¬p | yes p = yes ? -- (, m1-tail ¬p p)
(x List.∷ xs) match1st a | no ¬p-head | no ¬p-tail =
no (λ{ (_ , m1-head p-head) → ¬p-head (, p-head) ; (_ , m1-tail _ p-tail) → ¬p-tail (, p-tail) })
module Lemmas where
lem-A3 : ∀ {ν n} (K : Ktx ν n) {a r} → proj₂ K ⟨ a ⟩= r → r List.∈ (proj₂ K)
lem-A3 f = proj₁ ∘ FirstLemmas.first⟶∈
lem-A6-1 : ∀ {m ν} → AList ν (suc m) zero → ASub ν (suc m) m × AList ν m zero
lem-A6-1 (t' // zero ◅ s) = t' // zero , s
lem-A6-1 {zero} (t' // suc () ◅ xs)
lem-A6-1 {suc m} (t' // x ◅ xs) with lem-A6-1 xs
lem-A6-1 {suc m} (t' // zero ◅ xs) | e , u = t' // zero , xs -- same case as above
lem-A6-1 {suc m} (t' // suc x ◅ xs) | e , u = asub-weaken e , t' // x ◅ u
lem-A6 : ∀ {m ν} {ρs ρs'} {a : MetaType m ν} {τ : SimpleType ν} → ρs ⊢ a amatch τ ↠ ρs' →
(∃ λ (u : AList ν m zero) → (from-meta $ a M./ (asub u)) ◁ τ)
lem-A6 {a = ∀' a} {τ = τ} (mtc-tabs x) with lem-A6 x
lem-A6 {a = ∀' a} {τ = τ} (mtc-tabs x) | u , p with lem-A6-1 u
lem-A6 {a = ∀' a} {τ = τ} (mtc-tabs x) | u , p | b , bs = bs , m-tabs {!p!}
where
-- q : from-meta (substitute (asub u) a []) ◁ τ
-- q = {!!}
lem-A6 (mtc-iabs x) with lem-A6 x
lem-A6 (mtc-iabs x) | u , p = u , (m-iabs p)
lem-A6 {τ = τ} (mtc-simp {a = a} (u , eq)) =
u , subst (flip _◁_ τ) (sym $ mgu-unifies (simpl a) τ (u , eq)) m-simp
lem-A6' : ∀ {ν} {ρs ρs' } {r : Type ν} {a} → r ◁ a → ρs ⊢ (to-meta {zero} r) amatch a ↠ ρs'
lem-A6' {a = a} m-simp = mtc-simp (mgu-id a)
lem-A6' (m-tabs r◁a) = mtc-tabs {!!}
lem-A6' (m-iabs r◁a) = mtc-iabs (lem-A6' r◁a)
-- p'haps counterintuitively the following proposition is NOT a theorem:
-- Δ⊢ᵣa⟶Δ≢Ø : ∀ {ν n} {K : Ktx ν n} {a} → K ⊢ᵣ a → ∃ λ b → b List.∈ (proj₂ K)
-- since [] ⊢ᵣ Nat ⇒ Nat through the r-iabs rule, but also:
-- [] ⊢ᵣ Nat ⇒ (Nat ⇒ Nat), etc; through recursion on r-iabs
lem-A7a : ∀ {ν} (Δ : ICtx ν) {a ρs} → Δ ⊢match1st a ↠ ρs → ∃ λ r → Δ ⟨ a ⟩= r
lem-A7a List.[] ()
lem-A7a (x List.∷ Δ) {a = a} (m1-head x₁) = , (l-head u Δ)
where
p = lem-A6 x₁
u = subst (λ u' → u' ◁ a) (alist-zero-vanishes (proj₁ p)) (proj₂ p)
lem-A7a (r List.∷ Δ) (m1-tail ¬pr y) =
, (l-tail (λ r◁a → ¬pr $ lem-A6' r◁a) (proj₂ $ lem-A7a Δ y))
open Lemmas
_⊢alg_ : ∀ {ν n} (K : Ktx ν n) → (a : Type ν) → Dec (K ⊢ᵣ a)
K ⊢alg simpl x with proj₂ K match1st x
K ⊢alg simpl x | yes p = yes (r-simp (proj₂ $ lem-A7a (proj₂ K) p) {!!})
K ⊢alg simpl x | no ¬p = no (λ{ (r-simp x₁ x₂) → {!!} })
K ⊢alg (a ⇒ b) with (a ∷K K) ⊢alg b
K ⊢alg (a ⇒ b) | yes p = yes $ r-iabs a p
K ⊢alg (a ⇒ b) | no ¬p = no (λ{ (r-iabs .a x) → ¬p x })
K ⊢alg ∀' a with (ktx-weaken K) ⊢alg a
K ⊢alg ∀' a | yes p = yes (r-tabs p)
K ⊢alg ∀' a | no ¬p = no (λ{ (r-tabs x) → ¬p x })
|
Require Import CoqlibC.
Set Implicit Arguments.
Class Linker (A: Type) := {
link: A -> A -> option A;
linkorder: A -> A -> Prop;
linkorder_refl: forall x, linkorder x x;
linkorder_trans: forall x y z, linkorder x y -> linkorder y z -> linkorder x z;
link_linkorder: forall x y z, link x y = Some z -> linkorder x z /\ linkorder y z
}.
Inductive link_res (A: Type): Type :=
| empty
| fail
| success: A -> link_res A.
Arguments empty [A].
Arguments fail [A].
Fixpoint link_list_aux X `{Linker X} (xs: list X): link_res X :=
match xs with
| [] => empty
| x0 :: tl =>
match link_list_aux tl with
| empty => success x0
| fail => fail
| success x1 =>
match link x0 x1 with
| Some x => success x
| None => fail
end
end
end.
Definition link_list X `{Linker X} (xs: list X): option X :=
match link_list_aux xs with
| empty => None (* Note that we are not giving semantics to empty programs. *)
| success x => Some x
| fail => None
end.
Lemma link_list_cons
X `{Linker X} hd tl restl res
(TL: link_list tl = Some restl)
(HD: link hd restl = Some res):
<<LINK: link_list (hd :: tl) = Some res>> /\ <<LINKORDER: Forall (fun x => linkorder x res) (hd :: tl)>>.
Proof.
split; red.
- unfold link_list in *. des_ifs; ss; des_ifs.
- eapply link_linkorder in HD. des. econs; auto. clear HD. unfold link_list in TL. des_ifs.
generalize dependent restl. generalize dependent res.
induction tl as [|h l]; auto. i. econs; unfold link_list_aux in *; des_ifs.
{ eapply link_linkorder in Heq1. des. eapply linkorder_trans; eauto. }
{ destruct l; auto. des_ifs. }
eapply IHl; eauto. eapply link_linkorder in Heq1. des. eapply linkorder_trans; eauto.
Qed.
Lemma link_list_linkorder
X `{Linker X} xs xs_res
(LINK: link_list xs = Some xs_res):
<<LINKORDER: Forall (fun x => linkorder x xs_res) xs>>.
Proof.
destruct xs as [| hd tl]; auto.
unfold link_list in LINK. des_ifs. unfold link_list_aux in Heq. des_ifs; fold link_list_aux in *.
{ destruct tl; ss. econs. apply linkorder_refl. econs. des_ifs. }
econs. { eapply link_linkorder in Heq1. des. auto. }
assert (link_list tl = Some x).
{ unfold link_list. rewrite Heq0. auto. }
exploit link_list_cons; eauto. i. des.
inv LINKORDER. auto.
Qed.
Lemma link_list_cons_inv
X `{Linker X} hd tl res
(LINK: link_list (hd :: tl) = Some res)
(LEN: tl <> []):
exists restl, <<TL: link_list tl = Some restl>> /\ <<HD: link hd restl = Some res>>.
Proof.
unfold link_list in LINK. des_ifs. unfold link_list_aux in Heq. des_ifs; fold link_list_aux in *.
{ destruct tl; ss. econs. des_ifs. }
exists x. split; auto. unfold link_list. rewrite Heq0. auto.
Unshelve. auto.
Qed.
|
module Mod_Refiners
use Mod_IsotropicAdaptiveRefiner
end module Mod_Refiners
|
open import Agda.Builtin.Nat
open import Agda.Builtin.Sigma
interleaved mutual
data Rec : Set
⟦_⟧ : Rec → Set
constructor `Nat : Rec
⟦ `Nat ⟧ = Nat
_ : Rec
_ = `Σ `Nat (λ _ → `Nat)
_ : Rec → Rec
_ = λ r → `Σ r (λ _ → `Nat)
constructor `Σ : (r : Rec) → (⟦ r ⟧ → Rec) → Rec
⟦ `Σ A B ⟧ = Σ ⟦ A ⟧ λ a → ⟦ B a ⟧
_+1-Nats : Nat → Rec
zero +1-Nats = `Nat
suc n +1-Nats = `Σ `Nat λ _ → n +1-Nats
Nats : Rec
Nats = `Σ `Nat _+1-Nats
[1] : ⟦ Nats ⟧
[1] = 0 , 1
[1⋯3] : ⟦ Nats ⟧
[1⋯3] = 2 , 1 , 2 , 3
|
import numpy as np
import torch
from models.components.mlp import MLPConsensus
from models.multiscale import MultiscaleModel
from models.multiscale import RecursiveMultiscaleModel
from subset_samplers import ExhaustiveSubsetSampler
class TestRecursiveMultiscaleModel:
n_classes = 20
n_mlps = 4
input_dim = 100
@classmethod
def setup_class(cls):
np.random.seed(42)
torch.random.manual_seed(42)
cls.mlps = [
MLPConsensus(cls.input_dim * i, 150, cls.n_classes, batch_norm=True)
for i in range(1, cls.n_mlps + 1)
]
for mlp in cls.mlps:
mlp.eval()
cls.priors = np.random.randn(cls.n_classes)
cls.priors /= cls.priors.sum()
def test_against_results_are_the_same_as_the_multiscale_model(self):
sampler = ExhaustiveSubsetSampler()
multiscale_model = MultiscaleModel(
self.mlps, softmax=False, sampler=sampler, save_intermediate=True
)
recursive_multiscale_model = RecursiveMultiscaleModel(
self.mlps,
sampler=sampler,
save_intermediate=True,
count_n_evaluations=False,
)
for n_video_frames in range(1, len(self.mlps) + 8):
example = torch.randn(n_video_frames, self.input_dim)
with torch.no_grad():
multiscale_model_results = multiscale_model(example.clone()).numpy()
with torch.no_grad():
recursive_multiscale_model_results = recursive_multiscale_model(
example.clone()
).numpy()
np.testing.assert_allclose(
recursive_multiscale_model_results,
multiscale_model_results,
err_msg=f"Failure comparing scores for a {n_video_frames} frame input",
rtol=1e-4,
)
|
!
! Distributed under the OSI-approved Apache License, Version 2.0. See
! accompanying file Copyright.txt for details.
!
! adios2_attribute_mod.f90 : ADIOS2 Fortran bindings for
! type(adios2_attribute) handler subroutines
! Created on: Dec 10, 2018
! Author: William F Godoy [email protected]
!
module adios2_attribute_mod
use adios2_parameters_mod
implicit none
contains
subroutine adios2_attribute_name(name, attribute, ierr)
character(len=:), allocatable, intent(out) :: name
type(adios2_attribute), intent(in) :: attribute
integer, intent(out) :: ierr
!local
integer :: length
if (allocated(name)) deallocate (name)
call adios2_attribute_name_length_f2c(length, attribute%f2c, ierr)
if (ierr == 0) then
allocate (character(length) :: name)
call adios2_attribute_name_f2c(name, attribute%f2c, ierr)
end if
end subroutine
subroutine adios2_attribute_check_type(attribute, adios2_type, hint, ierr)
type(adios2_attribute), intent(in):: attribute
integer, intent(in):: adios2_type
character*(*), intent(in):: hint
integer, intent(out):: ierr
if (attribute%type /= adios2_type) then
write (0, *) 'ERROR: adios2 attribute ', TRIM(attribute%name)//char(0), &
' type mismatch, in call to adios2_', TRIM(hint)//char(0)
ierr = adios2_error_invalid_argument
end if
end subroutine
end module
|
[STATEMENT]
lemma wt_lbv_wt_step:
assumes wf: "wf_prog wf_mb G"
assumes lbv: "wt_lbv G C pTs rT mxs mxl et cert ins"
assumes C: "is_class G C"
assumes pTs: "set pTs \<subseteq> types G"
defines [simp]: "mxr \<equiv> 1+length pTs+mxl"
shows "\<exists>ts \<in> list (size ins) (states G mxs mxr).
wt_step (JVMType.le G mxs mxr) Err (exec G mxs rT et ins) ts
\<and> OK (Some ([],(OK (Class C))#((map OK pTs))@(replicate mxl Err))) <=_(JVMType.le G mxs mxr) ts!0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<exists>ts\<in>list (length ins) (states G mxs mxr). wt_step (JVMType.le G mxs mxr) Err (exec G mxs rT et ins) ts \<and> OK (Some ([], OK (Class C) # map OK pTs @ replicate mxl Err)) \<sqsubseteq>\<^bsub>JVMType.le G mxs mxr\<^esub> ts ! 0
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<exists>ts\<in>list (length ins) (states G mxs mxr). wt_step (JVMType.le G mxs mxr) Err (exec G mxs rT et ins) ts \<and> OK (Some ([], OK (Class C) # map OK pTs @ replicate mxl Err)) \<sqsubseteq>\<^bsub>JVMType.le G mxs mxr\<^esub> ts ! 0
[PROOF STEP]
let ?step = "exec G mxs rT et ins"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<exists>ts\<in>list (length ins) (states G mxs mxr). wt_step (JVMType.le G mxs mxr) Err (exec G mxs rT et ins) ts \<and> OK (Some ([], OK (Class C) # map OK pTs @ replicate mxl Err)) \<sqsubseteq>\<^bsub>JVMType.le G mxs mxr\<^esub> ts ! 0
[PROOF STEP]
let ?r = "JVMType.le G mxs mxr"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<exists>ts\<in>list (length ins) (states G mxs mxr). wt_step (JVMType.le G mxs mxr) Err (exec G mxs rT et ins) ts \<and> OK (Some ([], OK (Class C) # map OK pTs @ replicate mxl Err)) \<sqsubseteq>\<^bsub>JVMType.le G mxs mxr\<^esub> ts ! 0
[PROOF STEP]
let ?f = "JVMType.sup G mxs mxr"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<exists>ts\<in>list (length ins) (states G mxs mxr). wt_step (JVMType.le G mxs mxr) Err (exec G mxs rT et ins) ts \<and> OK (Some ([], OK (Class C) # map OK pTs @ replicate mxl Err)) \<sqsubseteq>\<^bsub>JVMType.le G mxs mxr\<^esub> ts ! 0
[PROOF STEP]
let ?A = "states G mxs mxr"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<exists>ts\<in>list (length ins) (states G mxs mxr). wt_step (JVMType.le G mxs mxr) Err (exec G mxs rT et ins) ts \<and> OK (Some ([], OK (Class C) # map OK pTs @ replicate mxl Err)) \<sqsubseteq>\<^bsub>JVMType.le G mxs mxr\<^esub> ts ! 0
[PROOF STEP]
have "semilat (JVMType.sl G mxs mxr)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. semilat (JVMType.sl G mxs mxr)
[PROOF STEP]
by (rule semilat_JVM_slI, rule wf_prog_ws_prog, rule wf)
[PROOF STATE]
proof (state)
this:
semilat (JVMType.sl G mxs mxr)
goal (1 subgoal):
1. \<exists>ts\<in>list (length ins) (states G mxs mxr). wt_step (JVMType.le G mxs mxr) Err (exec G mxs rT et ins) ts \<and> OK (Some ([], OK (Class C) # map OK pTs @ replicate mxl Err)) \<sqsubseteq>\<^bsub>JVMType.le G mxs mxr\<^esub> ts ! 0
[PROOF STEP]
hence "semilat (?A, ?r, ?f)"
[PROOF STATE]
proof (prove)
using this:
semilat (JVMType.sl G mxs mxr)
goal (1 subgoal):
1. semilat (states G mxs mxr, JVMType.le G mxs mxr, JVMType.sup G mxs mxr)
[PROOF STEP]
by (unfold sl_triple_conv)
[PROOF STATE]
proof (state)
this:
semilat (states G mxs mxr, JVMType.le G mxs mxr, JVMType.sup G mxs mxr)
goal (1 subgoal):
1. \<exists>ts\<in>list (length ins) (states G mxs mxr). wt_step (JVMType.le G mxs mxr) Err (exec G mxs rT et ins) ts \<and> OK (Some ([], OK (Class C) # map OK pTs @ replicate mxl Err)) \<sqsubseteq>\<^bsub>JVMType.le G mxs mxr\<^esub> ts ! 0
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
semilat (states G mxs mxr, JVMType.le G mxs mxr, JVMType.sup G mxs mxr)
goal (1 subgoal):
1. \<exists>ts\<in>list (length ins) (states G mxs mxr). wt_step (JVMType.le G mxs mxr) Err (exec G mxs rT et ins) ts \<and> OK (Some ([], OK (Class C) # map OK pTs @ replicate mxl Err)) \<sqsubseteq>\<^bsub>JVMType.le G mxs mxr\<^esub> ts ! 0
[PROOF STEP]
have "top ?r Err"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Semilat.top (JVMType.le G mxs mxr) Err
[PROOF STEP]
by (simp add: JVM_le_unfold)
[PROOF STATE]
proof (state)
this:
Semilat.top (JVMType.le G mxs mxr) Err
goal (1 subgoal):
1. \<exists>ts\<in>list (length ins) (states G mxs mxr). wt_step (JVMType.le G mxs mxr) Err (exec G mxs rT et ins) ts \<and> OK (Some ([], OK (Class C) # map OK pTs @ replicate mxl Err)) \<sqsubseteq>\<^bsub>JVMType.le G mxs mxr\<^esub> ts ! 0
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
Semilat.top (JVMType.le G mxs mxr) Err
goal (1 subgoal):
1. \<exists>ts\<in>list (length ins) (states G mxs mxr). wt_step (JVMType.le G mxs mxr) Err (exec G mxs rT et ins) ts \<and> OK (Some ([], OK (Class C) # map OK pTs @ replicate mxl Err)) \<sqsubseteq>\<^bsub>JVMType.le G mxs mxr\<^esub> ts ! 0
[PROOF STEP]
have "Err \<in> ?A"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Err \<in> states G mxs mxr
[PROOF STEP]
by (simp add: JVM_states_unfold)
[PROOF STATE]
proof (state)
this:
Err \<in> states G mxs mxr
goal (1 subgoal):
1. \<exists>ts\<in>list (length ins) (states G mxs mxr). wt_step (JVMType.le G mxs mxr) Err (exec G mxs rT et ins) ts \<and> OK (Some ([], OK (Class C) # map OK pTs @ replicate mxl Err)) \<sqsubseteq>\<^bsub>JVMType.le G mxs mxr\<^esub> ts ! 0
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
Err \<in> states G mxs mxr
goal (1 subgoal):
1. \<exists>ts\<in>list (length ins) (states G mxs mxr). wt_step (JVMType.le G mxs mxr) Err (exec G mxs rT et ins) ts \<and> OK (Some ([], OK (Class C) # map OK pTs @ replicate mxl Err)) \<sqsubseteq>\<^bsub>JVMType.le G mxs mxr\<^esub> ts ! 0
[PROOF STEP]
have "bottom ?r (OK None)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. bottom (JVMType.le G mxs mxr) (OK None)
[PROOF STEP]
by (simp add: JVM_le_unfold bottom_def)
[PROOF STATE]
proof (state)
this:
bottom (JVMType.le G mxs mxr) (OK None)
goal (1 subgoal):
1. \<exists>ts\<in>list (length ins) (states G mxs mxr). wt_step (JVMType.le G mxs mxr) Err (exec G mxs rT et ins) ts \<and> OK (Some ([], OK (Class C) # map OK pTs @ replicate mxl Err)) \<sqsubseteq>\<^bsub>JVMType.le G mxs mxr\<^esub> ts ! 0
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
bottom (JVMType.le G mxs mxr) (OK None)
goal (1 subgoal):
1. \<exists>ts\<in>list (length ins) (states G mxs mxr). wt_step (JVMType.le G mxs mxr) Err (exec G mxs rT et ins) ts \<and> OK (Some ([], OK (Class C) # map OK pTs @ replicate mxl Err)) \<sqsubseteq>\<^bsub>JVMType.le G mxs mxr\<^esub> ts ! 0
[PROOF STEP]
have "OK None \<in> ?A"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. OK None \<in> states G mxs mxr
[PROOF STEP]
by (simp add: JVM_states_unfold)
[PROOF STATE]
proof (state)
this:
OK None \<in> states G mxs mxr
goal (1 subgoal):
1. \<exists>ts\<in>list (length ins) (states G mxs mxr). wt_step (JVMType.le G mxs mxr) Err (exec G mxs rT et ins) ts \<and> OK (Some ([], OK (Class C) # map OK pTs @ replicate mxl Err)) \<sqsubseteq>\<^bsub>JVMType.le G mxs mxr\<^esub> ts ! 0
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
OK None \<in> states G mxs mxr
goal (1 subgoal):
1. \<exists>ts\<in>list (length ins) (states G mxs mxr). wt_step (JVMType.le G mxs mxr) Err (exec G mxs rT et ins) ts \<and> OK (Some ([], OK (Class C) # map OK pTs @ replicate mxl Err)) \<sqsubseteq>\<^bsub>JVMType.le G mxs mxr\<^esub> ts ! 0
[PROOF STEP]
from lbv
[PROOF STATE]
proof (chain)
picking this:
wt_lbv G C pTs rT mxs mxl et cert ins
[PROOF STEP]
have "bounded ?step (length ins)"
[PROOF STATE]
proof (prove)
using this:
wt_lbv G C pTs rT mxs mxl et cert ins
goal (1 subgoal):
1. bounded (exec G mxs rT et ins) (length ins)
[PROOF STEP]
by (clarsimp simp add: wt_lbv_def exec_def)
(intro bounded_lift check_bounded_is_bounded)
[PROOF STATE]
proof (state)
this:
bounded (exec G mxs rT et ins) (length ins)
goal (1 subgoal):
1. \<exists>ts\<in>list (length ins) (states G mxs mxr). wt_step (JVMType.le G mxs mxr) Err (exec G mxs rT et ins) ts \<and> OK (Some ([], OK (Class C) # map OK pTs @ replicate mxl Err)) \<sqsubseteq>\<^bsub>JVMType.le G mxs mxr\<^esub> ts ! 0
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
bounded (exec G mxs rT et ins) (length ins)
goal (1 subgoal):
1. \<exists>ts\<in>list (length ins) (states G mxs mxr). wt_step (JVMType.le G mxs mxr) Err (exec G mxs rT et ins) ts \<and> OK (Some ([], OK (Class C) # map OK pTs @ replicate mxl Err)) \<sqsubseteq>\<^bsub>JVMType.le G mxs mxr\<^esub> ts ! 0
[PROOF STEP]
from lbv
[PROOF STATE]
proof (chain)
picking this:
wt_lbv G C pTs rT mxs mxl et cert ins
[PROOF STEP]
have "cert_ok cert (length ins) Err (OK None) ?A"
[PROOF STATE]
proof (prove)
using this:
wt_lbv G C pTs rT mxs mxl et cert ins
goal (1 subgoal):
1. cert_ok cert (length ins) Err (OK None) (states G mxs mxr)
[PROOF STEP]
by (unfold wt_lbv_def) (auto dest: check_certD)
[PROOF STATE]
proof (state)
this:
cert_ok cert (length ins) Err (OK None) (states G mxs mxr)
goal (1 subgoal):
1. \<exists>ts\<in>list (length ins) (states G mxs mxr). wt_step (JVMType.le G mxs mxr) Err (exec G mxs rT et ins) ts \<and> OK (Some ([], OK (Class C) # map OK pTs @ replicate mxl Err)) \<sqsubseteq>\<^bsub>JVMType.le G mxs mxr\<^esub> ts ! 0
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
cert_ok cert (length ins) Err (OK None) (states G mxs mxr)
goal (1 subgoal):
1. \<exists>ts\<in>list (length ins) (states G mxs mxr). wt_step (JVMType.le G mxs mxr) Err (exec G mxs rT et ins) ts \<and> OK (Some ([], OK (Class C) # map OK pTs @ replicate mxl Err)) \<sqsubseteq>\<^bsub>JVMType.le G mxs mxr\<^esub> ts ! 0
[PROOF STEP]
from wf
[PROOF STATE]
proof (chain)
picking this:
wf_prog wf_mb G
[PROOF STEP]
have "pres_type ?step (length ins) ?A"
[PROOF STATE]
proof (prove)
using this:
wf_prog wf_mb G
goal (1 subgoal):
1. pres_type (exec G mxs rT et ins) (length ins) (states G mxs mxr)
[PROOF STEP]
by (rule exec_pres_type)
[PROOF STATE]
proof (state)
this:
pres_type (exec G mxs rT et ins) (length ins) (states G mxs mxr)
goal (1 subgoal):
1. \<exists>ts\<in>list (length ins) (states G mxs mxr). wt_step (JVMType.le G mxs mxr) Err (exec G mxs rT et ins) ts \<and> OK (Some ([], OK (Class C) # map OK pTs @ replicate mxl Err)) \<sqsubseteq>\<^bsub>JVMType.le G mxs mxr\<^esub> ts ! 0
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
pres_type (exec G mxs rT et ins) (length ins) (states G mxs mxr)
goal (1 subgoal):
1. \<exists>ts\<in>list (length ins) (states G mxs mxr). wt_step (JVMType.le G mxs mxr) Err (exec G mxs rT et ins) ts \<and> OK (Some ([], OK (Class C) # map OK pTs @ replicate mxl Err)) \<sqsubseteq>\<^bsub>JVMType.le G mxs mxr\<^esub> ts ! 0
[PROOF STEP]
let ?start = "OK (Some ([],(OK (Class C))#(map OK pTs)@(replicate mxl Err)))"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<exists>ts\<in>list (length ins) (states G mxs mxr). wt_step (JVMType.le G mxs mxr) Err (exec G mxs rT et ins) ts \<and> OK (Some ([], OK (Class C) # map OK pTs @ replicate mxl Err)) \<sqsubseteq>\<^bsub>JVMType.le G mxs mxr\<^esub> ts ! 0
[PROOF STEP]
from lbv
[PROOF STATE]
proof (chain)
picking this:
wt_lbv G C pTs rT mxs mxl et cert ins
[PROOF STEP]
have "wtl_inst_list ins cert ?f ?r Err (OK None) ?step 0 ?start \<noteq> Err"
[PROOF STATE]
proof (prove)
using this:
wt_lbv G C pTs rT mxs mxl et cert ins
goal (1 subgoal):
1. wtl_inst_list ins cert (JVMType.sup G mxs mxr) (JVMType.le G mxs mxr) Err (OK None) (exec G mxs rT et ins) 0 (OK (Some ([], OK (Class C) # map OK pTs @ replicate mxl Err))) \<noteq> Err
[PROOF STEP]
by (simp add: wt_lbv_def lbvjvm_def)
[PROOF STATE]
proof (state)
this:
wtl_inst_list ins cert (JVMType.sup G mxs mxr) (JVMType.le G mxs mxr) Err (OK None) (exec G mxs rT et ins) 0 (OK (Some ([], OK (Class C) # map OK pTs @ replicate mxl Err))) \<noteq> Err
goal (1 subgoal):
1. \<exists>ts\<in>list (length ins) (states G mxs mxr). wt_step (JVMType.le G mxs mxr) Err (exec G mxs rT et ins) ts \<and> OK (Some ([], OK (Class C) # map OK pTs @ replicate mxl Err)) \<sqsubseteq>\<^bsub>JVMType.le G mxs mxr\<^esub> ts ! 0
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
wtl_inst_list ins cert (JVMType.sup G mxs mxr) (JVMType.le G mxs mxr) Err (OK None) (exec G mxs rT et ins) 0 (OK (Some ([], OK (Class C) # map OK pTs @ replicate mxl Err))) \<noteq> Err
goal (1 subgoal):
1. \<exists>ts\<in>list (length ins) (states G mxs mxr). wt_step (JVMType.le G mxs mxr) Err (exec G mxs rT et ins) ts \<and> OK (Some ([], OK (Class C) # map OK pTs @ replicate mxl Err)) \<sqsubseteq>\<^bsub>JVMType.le G mxs mxr\<^esub> ts ! 0
[PROOF STEP]
from C pTs
[PROOF STATE]
proof (chain)
picking this:
is_class G C
set pTs \<subseteq> types G
[PROOF STEP]
have "?start \<in> ?A"
[PROOF STATE]
proof (prove)
using this:
is_class G C
set pTs \<subseteq> types G
goal (1 subgoal):
1. OK (Some ([], OK (Class C) # map OK pTs @ replicate mxl Err)) \<in> states G mxs mxr
[PROOF STEP]
by (unfold JVM_states_unfold) (auto intro: list_appendI, force)
[PROOF STATE]
proof (state)
this:
OK (Some ([], OK (Class C) # map OK pTs @ replicate mxl Err)) \<in> states G mxs mxr
goal (1 subgoal):
1. \<exists>ts\<in>list (length ins) (states G mxs mxr). wt_step (JVMType.le G mxs mxr) Err (exec G mxs rT et ins) ts \<and> OK (Some ([], OK (Class C) # map OK pTs @ replicate mxl Err)) \<sqsubseteq>\<^bsub>JVMType.le G mxs mxr\<^esub> ts ! 0
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
OK (Some ([], OK (Class C) # map OK pTs @ replicate mxl Err)) \<in> states G mxs mxr
goal (1 subgoal):
1. \<exists>ts\<in>list (length ins) (states G mxs mxr). wt_step (JVMType.le G mxs mxr) Err (exec G mxs rT et ins) ts \<and> OK (Some ([], OK (Class C) # map OK pTs @ replicate mxl Err)) \<sqsubseteq>\<^bsub>JVMType.le G mxs mxr\<^esub> ts ! 0
[PROOF STEP]
from lbv
[PROOF STATE]
proof (chain)
picking this:
wt_lbv G C pTs rT mxs mxl et cert ins
[PROOF STEP]
have "0 < length ins"
[PROOF STATE]
proof (prove)
using this:
wt_lbv G C pTs rT mxs mxl et cert ins
goal (1 subgoal):
1. 0 < length ins
[PROOF STEP]
by (simp add: wt_lbv_def)
[PROOF STATE]
proof (state)
this:
0 < length ins
goal (1 subgoal):
1. \<exists>ts\<in>list (length ins) (states G mxs mxr). wt_step (JVMType.le G mxs mxr) Err (exec G mxs rT et ins) ts \<and> OK (Some ([], OK (Class C) # map OK pTs @ replicate mxl Err)) \<sqsubseteq>\<^bsub>JVMType.le G mxs mxr\<^esub> ts ! 0
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
semilat (states G mxs mxr, JVMType.le G mxs mxr, JVMType.sup G mxs mxr)
Semilat.top (JVMType.le G mxs mxr) Err
Err \<in> states G mxs mxr
bottom (JVMType.le G mxs mxr) (OK None)
OK None \<in> states G mxs mxr
bounded (exec G mxs rT et ins) (length ins)
cert_ok cert (length ins) Err (OK None) (states G mxs mxr)
pres_type (exec G mxs rT et ins) (length ins) (states G mxs mxr)
wtl_inst_list ins cert (JVMType.sup G mxs mxr) (JVMType.le G mxs mxr) Err (OK None) (exec G mxs rT et ins) 0 (OK (Some ([], OK (Class C) # map OK pTs @ replicate mxl Err))) \<noteq> Err
OK (Some ([], OK (Class C) # map OK pTs @ replicate mxl Err)) \<in> states G mxs mxr
0 < length ins
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
semilat (states G mxs mxr, JVMType.le G mxs mxr, JVMType.sup G mxs mxr)
Semilat.top (JVMType.le G mxs mxr) Err
Err \<in> states G mxs mxr
bottom (JVMType.le G mxs mxr) (OK None)
OK None \<in> states G mxs mxr
bounded (exec G mxs rT et ins) (length ins)
cert_ok cert (length ins) Err (OK None) (states G mxs mxr)
pres_type (exec G mxs rT et ins) (length ins) (states G mxs mxr)
wtl_inst_list ins cert (JVMType.sup G mxs mxr) (JVMType.le G mxs mxr) Err (OK None) (exec G mxs rT et ins) 0 (OK (Some ([], OK (Class C) # map OK pTs @ replicate mxl Err))) \<noteq> Err
OK (Some ([], OK (Class C) # map OK pTs @ replicate mxl Err)) \<in> states G mxs mxr
0 < length ins
goal (1 subgoal):
1. \<exists>ts\<in>list (length ins) (states G mxs mxr). wt_step (JVMType.le G mxs mxr) Err (exec G mxs rT et ins) ts \<and> OK (Some ([], OK (Class C) # map OK pTs @ replicate mxl Err)) \<sqsubseteq>\<^bsub>JVMType.le G mxs mxr\<^esub> ts ! 0
[PROOF STEP]
by (rule lbvs.wtl_sound_strong [OF lbvs.intro, OF lbv.intro lbvs_axioms.intro, OF Semilat.intro lbv_axioms.intro])
[PROOF STATE]
proof (state)
this:
\<exists>ts\<in>list (length ins) (states G mxs mxr). wt_step (JVMType.le G mxs mxr) Err (exec G mxs rT et ins) ts \<and> OK (Some ([], OK (Class C) # map OK pTs @ replicate mxl Err)) \<sqsubseteq>\<^bsub>JVMType.le G mxs mxr\<^esub> ts ! 0
goal:
No subgoals!
[PROOF STEP]
qed |
Require Import StlcIso.SpecAnnot.
Require Import CompilerFI.Compiler.
Require Import UValFI.UVal.
Require Import LogRelFI.LR.
Require Import BacktransFI.Emulate.
Require Import BacktransFI.InjectExtract.
Require Import BacktransFI.UpgradeDowngrade.
Definition backtranslateCtx n τ Cu : F.PCtx := (F.eraseAnnot_pctx (F.pctxA_cat
(F.a_papp₂ τ (UValFI n (compfi_ty τ)) (injectA n τ) F.a_phole)
(emulate_pctx n Cu))).
Lemma backtranslateCtx_works {Cu m n d p τs τu ts tu} :
ValidTy τu →
dir_world_prec m n d p →
⟪ ia⊢ Cu : I.empty, compfi_ty τs → I.empty, τu ⟫ →
⟪ pempty ⊩ ts ⟦ d, n ⟧ tu : embed τs ⟫ →
⟪ pempty ⊩ (F.pctx_app ts (backtranslateCtx m τs Cu)) ⟦ d, n ⟧ I.pctx_app tu (eraseAnnot_pctx
Cu) : pEmulDV m p τu ⟫.
Proof.
intros vτu dwp tCu lr; destruct p; unfold backtranslateCtx;
rewrite F.eraseAnnot_pctx_cat, F.pctx_cat_app;
[change pempty with (toEmulDV m precise I.empty) | change pempty with (toEmulDV m imprecise I.empty)];
eapply emulate_pctx_works; eauto using dwp_precise, dwp_imprecise with tyvalid;
eapply inject_works_open; eauto using dwp_precise, dwp_imprecise with tyvalid.
Qed.
|
(* Every Isabelle constant has prefix I *)
(* just from IFOL.thy *)
(* Q : What's class "term"? *)
(* Should A be set or type or sth else? *)
Notation tip := Set.
(* TODO: Define "Pointed set" type...
Inductive PSet :=
| C (S:Set) (s:S) : PSet *)
Inductive o :=
(* Equality *)
| Ieq (A:tip) : A -> A -> o
(* Propositional logic *)
| IFalse : o
| Iconj : o -> o -> o
| Iimp : o -> o -> o
(*| IAll (A:tip) : A -> A -> o*)
.
Inductive prop :=
| ITrueprop : o -> prop
| Iimpl : prop -> prop -> prop
| Iall (A:tip) : (A->prop) -> prop
.
(* Isabelle's judgement == Coq's Coercion? *)
Coercion tp := ITrueprop.
(* TODO: add context! *)
Inductive Prf : prop -> Type :=
(* Metalogic *)
| A1 A B : Prf (Iimpl A (Iimpl B A))
| A2 A B C : Prf (
Iimpl (Iimpl A (Iimpl B C)) (Iimpl (Iimpl A B) (Iimpl A C))
)
| MP A B : Prf (Iimpl A B) -> Prf A -> Prf B
(* | assu P : Prf (Iimpl P P) *)
(* Object logic *)
(* Equality *)
| Irefl : forall (A:tip) (a:A),
Prf (Ieq A a a)
| Isubst : forall (A:tip) (a b:A) (P:A -> o),
Prf (Ieq A a b) -> Prf (P a) -> Prf (P b)
(* Propositional logic *)
| IimpI : forall (P Q : o),
Prf (Iimpl P Q) -> Prf (Iimp P Q)
| Imp : forall (P Q : o),
Prf (Iimp P Q) -> Prf P -> Prf Q
.
Inductive PrfCtx (G : prop -> Type): prop -> Type :=
| ctx p : PrfCtx G p
| ax p : Prf p -> PrfCtx G p
.
(*
Inductive WithMP (G : prop -> Type): prop -> Type :=
| Imp : forall (P Q : o),
WithMP (Iimp P Q) -> WithMP P -> WithMP Q
*)
Theorem Deduction (P Q:prop) (H : Prf P -> Prf Q)
: Prf (Iimpl P Q).
Proof.
Abort.
(*
Context (P:prop).
Check A2 P P P.*)
(* The following is useful when one does not use context:
(as it possibly done in Isabelle) *)
Theorem assu P : Prf (Iimpl P P).
Proof.
refine (MP _ _ _ _).
refine (MP _ _ _ _).
refine (A2 P (Iimpl P P) P).
refine (A1 _ _).
refine (A1 _ _).
Defined.
Theorem dropL P Q : Prf Q -> Prf (Iimpl P Q).
Proof.
intro H.
refine (MP _ _ _ _).
refine (A1 _ _).
assumption.
Defined.
Theorem dropR P Q R : Prf (Iimpl P R)
-> Prf (Iimpl P (Iimpl Q R)).
Proof.
refine (MP _ _ _).
refine (MP _ _ _ _).
refine (A2 _ _ _).
refine (dropL _ _ _).
refine (A1 _ _).
Defined.
(* Simple theorems: *)
Definition ITrue : o := Iimp IFalse IFalse.
Theorem TrueI: Prf ITrue.
Proof.
unfold ITrue.
apply IimpI.
apply assu.
Defined.
(*--------------------------*)
(* What if "inductive extension"? *)
(*
Axiom elems : forall A:Type, A -> Type.
*)
Inductive bigunion {A} (f:A -> Type) : Type :=
| c : forall n : A, f n -> bigunion f
. (* exists *)
(*| z : f 0 -> bigunion
| s : forall n : nat, *)
Fixpoint q (n:nat) :=
|
councilors at the USC Annenberg School for Communication.
Arkansas governor and congressman who joined the board in 2006.
renewed attention to noncommercial broadcasting.
The elections came at CPB's board meeting in New Orleans Tuesday (Nov.
16). Ramer succeeds Ernie Wison as chairman.
The newly elected chairman of the Corporation for Public Broadcasting talks with B&C’s John Eggerton about transforming noncommercial broadcasting into multiplatform, locally-focused public-service media. |
[STATEMENT]
lemma pF4: "Fr_1 \<F> \<Longrightarrow> Fr_2 \<F> \<Longrightarrow> Fr_4(\<F>) \<Longrightarrow> \<forall>A. \<F>(\<I> A) \<^bold>\<preceq> \<F> A"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>Fr_1 \<F>; Fr_2 \<F>; Fr_4 \<F>\<rbrakk> \<Longrightarrow> \<forall>A. contains (\<F> A) (\<F> (\<I> A))
[PROOF STEP]
by (smt IDEMa_def IF2 IF4 Int_fr_def MONO_def PF1 PF6 PI4 diff_def monC pF1) |
module Postgres.DB.Wait
import Postgres.Utility
import Postgres.Data.Conn
import Postgres.DB.Core
%foreign helper "socket_wait"
prim__dbWait : Ptr PGconn -> PrimIO Int
||| Wait for something to happen on the given
||| connection.
|||
||| NOTE: Will call to consume as a convenience
||| after finished waiting.
|||
||| Returns True if waiting succeeds and False
||| if waiting fails for some reason.
export
pgWait : Conn -> IO Bool
pgWait (MkConn conn) = do True <- (map intToBool (primIO $ prim__dbWait conn))
| False => pure False
pgConsumeInput (MkConn conn)
|
(************************************************************************)
(* v * The Coq Proof Assistant / The Coq Development Team *)
(* <O___,, * INRIA - CNRS - LIX - LRI - PPS - Copyright 1999-2010 *)
(* \VV/ **************************************************************)
(* // * This file is distributed under the terms of the *)
(* * GNU Lesser General Public License Version 2.1 *)
(************************************************************************)
Inductive T : Set :=
| A : T
| B : T -> T.
Lemma lem1 : forall x y : T, {x = y} + {x <> y}.
decide equality.
Qed.
Lemma lem2 : forall x y : T, {x = y} + {x <> y}.
intros x y.
decide equality.
Qed.
Lemma lem4 : forall x y : T, {x = y} + {x <> y}.
intros x y.
compare x y; auto.
Qed.
|
Definition sig_extract (A:Set) (P:A -> Prop) (x:sig P) : A :=
match x with
| exist a Ha => a
end.
Theorem sig_extract_ok :
forall (A:Set) (P:A -> Prop) (y:sig P), P (sig_extract A P y).
Proof.
intros A P y; case y; simpl; trivial.
Qed.
Require Import ZArith.
Open Scope Z_scope.
Parameter
div_pair :
forall a b:Z,
0 < b ->
{p : Z * Z | a = fst p * b + snd p /\ 0 <= snd p < b}.
Definition div_pair' : forall a b:Z, 0 < b -> Z * Z.
intros a b Hb.
apply (sig_extract _ _ (div_pair a b Hb)).
Defined.
Theorem div_pair'_ok :
forall (a b:Z) (H:0 < b),
let p := div_pair' a b H in
a = fst p * b + snd p /\ 0 <= snd p < b.
intros a b H.
pattern (div_pair' a b H).
unfold div_pair'; apply sig_extract_ok.
Qed.
|
/-
Copyright (c) 2021 Jannis Limperg. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Jannis Limperg
-/
import Aesop
set_option aesop.check.all true
inductive Even : Nat → Type
| zero : Even 0
| plusTwo : Even n → Even (n + 2)
attribute [aesop safe] Even
example : Even 6 := by
aesop
|
slurp(s) = readcsv(IOBuffer(s))
conv(s)= colon(map(x->parse(Int,x),match(r"^(-?\d+)-(-?\d+)$", s).captures)...)
expand(s) = mapreduce(x -> isa(x,Number)? Int(x) : conv(x), vcat, slurp(s))
|
% Chapter Template
\chapter{Background Information and Theory} % Main chapter title
\label{Chapter2} % Change X to a consecutive number; for referencing this chapter elsewhere, use \ref{ChapterX}
%----------------------------------------------------------------------------------------
% SECTION 1
%----------------------------------------------------------------------------------------
\section{Machine Learning}
% \begin{note}
% \begin{itemize}
% \item Una definición rápida
% \item Clasificación y regresión
% \item Cross-validation
% \item Qué son los datos de train y test, y por qué se hace esa partición
% \item Qué es el sobre-ajuste
% \end{itemize}
% \end{note}
\begin{pre-delivery}
% Machine Learning uses statistical and mathematical models to give answers to
% problems when there is no known formula of procedure to compute the answer.
Machine Learning uses statistical and mathematical models to give
computational answers based on data to problems when there is no known
formula of procedure.
In the subfield of Supervised Learning, the objective is to predict a numerical
or categorical variable in response to some input data, and the way of doing
it is to feed the model with lots of different examples for which we already
know the correct answer, and we expect the models to be able to predict
the correct answer to instances that it hasn't seen before. When it does,
we say that the model is able to generalize.
When a model is trained with some data, there is always a risk of overfitting
\cite{hawkins2004problem}.
For a model to overfit means that it adjusts very well to the data that is
has seen, but can't predict the correct answer to new, unseen data.
% This
% happens because it has not only from the relevant information, but also
% from the random noise that the data had, and so it can only memorise, but
% not generalize.
This happens because it has not only fitted the relevant information,
but also random noise present in the data sample, and thus it generalizes
poorly. In the extreme case of ovefitting, the model tends to memorise the
data sample.
For this reason, when a Machine Learning algorithm is trained the data
is split in two subsets, a \textit{Training dataset} and a \textit{Testing
dataset}. The training dataset will be used to train the model, while the
Testing datasets will be used only to check it. If a model has generalized
well, it will achieve a good accuracy score on both the training and the
testing dataset, but if it has overfitted it will show good results in the
training dataset and bad ones in the testing dataset.
Many models need some parameters to tune the behaviour of the algorithm. For
example, some of them are used to adjust how much a model will fit to the data.
We usually call these ``hyperparameters''. The correct value for them is not
straightforward, and it is normally chosen with a resampling process called
``cross-validation''\cite{geisser2017predictive}. This process consists of
splitting the training dataset
in many subsets and check many possible values for the hyperparameters in order
to see which one gets a higher accuracy with unseen data.
\end{pre-delivery}
\section{Some currently used Machine Learning models}
% \begin{note}
% \section{Review de los principales modelos que existen}
% \end{note}
\subsection{Decision Tree}
\label{sec:dec-tree}
% \begin{note}
% \begin{itemize}
% \item No se basa en productos escalares
% \item Es extremadamente rápido
% \item Es más fácil de interpretar que otros modelos
% \item Es extremadamente inestable
% \item Cuando se hace un Random Forest, se randommiza un poco, de modo que
% árboles distintos entrenados con los mismos datos pueden ser destintos
% \item Es un modelo no lineal
% \end{itemize}
% \end{note}
\begin{pre-delivery}
Decision Tree\cite{breiman2017classification}\cite{lewis2000introduction}
is a predictive model which uses
the training data to build
a tree where each node splits the data in two sets according to some
feature, and the leafs contain the set of instances that belong to some class
(in classification problems) or that has a similar numerical response variable
(for regression problems).
To predict the answer to a new instance, it uses the features to ``decide''
the nodes to cross until it reaches a leaf. The response given is the
most prevailing class in the leaf for classification problems, or the mean
of the values of the rest of the instances in the leaf.
To decide what feature to use to split a node in two subsets, it uses
the Gini impurity: it will pick the feature that minimises the sum of
the Gini impurity of the two child nodes. Given a node with instances
belonging to $k$ classes, if $p_i$ is the proportion of instances that
belong to class $i$, the Gini impurity of the node is
$1 - \sum_{i = 1}^k p_i^2$.
Decision Trees have the advantages that it is easy to interpret the
tree produced and that it is very fast to build the tree. The way to avoid
overfitting is to limit its growth.
These models are very unstable. This means that small differences in the
training data can produce very different Decision Trees. This property
is very useful to build an ensemble of estimators to produce better answers.
Random Forest is an algorithm that trains many Decision Trees with some sort
of randomization.
\end{pre-delivery}
\subsection{Logistic Regression}
\label{ssec:log-reg}
\begin{pre-delivery}
Logistic Regression\cite{cox1958regression} models the probability that an instance belongs to
a class, and predicts the class with a higher probability. To do so it
uses the \textit{logistic sigmoid function}\cite{han1995influence}, defined by:
\begin{equation}
\sigma(a) = \frac{1}{1 + exp(-a)}
\end{equation}
Once the vector $w \in \reals^d$ has been found, the predicted probability
that an instance $\vx \in \reals^d$ belongs to a class is
$y(\vx) = \sigma(w^\transp\vx)$.
% To find a suitable $w$ it solves an optimization problem of finding $w$
% that maximizes the likelihood that each of the instances belongs to the
% specified class
Given
$D = \{\bm{\chi}, \bm{t}\}$
, where
$\bm{\chi} = \{\bm{x}_1, \ldots \bm{x}_n\}$, $\bm{x}_i \in \reals^d$, $\bm{t} = \{0, 1\}^n$
it tries to maximize a likelihood function that can be written
\begin{equation}
p(\bm{t} | w) = \prod_{i = 1}^n y_i^{\bm{t}_i} (1 - y_i)^{1 - \bm{t}_i}
\end{equation}
where $y_i = \sigma(w^\transp\vx_i + w_0)$.
% , the problem is to find $w \in \reals^d$ and $c \in \reals$ that minimizes
% \begin{equation}
% \frac{1}{2}w^\transp w + \sum_{i = 1}^n log\left( exp\left( -y_i(\vx_i^\transp w + c) \right) + 1\right)
% \end{equation}
\end{pre-delivery}
\subsection{Support Vector Machines}
\label{ssec:svm}
% \begin{note}
% \begin{itemize}
% \item Inicialmente pensadas para clasificación en 2 clases
% \item Pero se puede más clases con \eng{one-vs-rest} y también hay
% formas de hacer regresión
% \item Se basa únicamente en el producto escalar de sus entradas
% \item Intenta separar los datos con un híper-plano
% \item Actualmente es poco eficiente usarlas porque su coste s cúbico
% con la cantidad de entradas.
% \item Las fórmulas que quiere optimizar
% \end{itemize}
% \end{note}
\begin{pre-delivery}
Support Vector Machine\cite{Cortes1995} (SVM) is a model that finds in hyperplane that
divides the data in two sets. In two-class classification problems, each
side of the hyperplane contains the instances of each of the classes.
It does so by converting the problem to an optimization one.
Given some data
$D = \{\bm{\chi}, \bm{y}\}$
, where
$\bm{\chi} = \{\bm{x}_1, \ldots \bm{x}_n\}$, $\bm{x}_i \in \reals^d$, $\bm{y} = \{-1, +1\}^n$
, the optimization problem consists on finding $\bm{\alpha} \in \reals^n$
the maximises
\begin{equation}
L = \sum_{i = 1}^n\alpha_i -\frac{1}{2}\sum_{i = 1}^n\sum_{j = 1}^n\alpha_i\alpha_jy_iy_j\vx_i^\transp\vx_j
\end{equation}
subject to
\begin{align}
0 \leq \alpha_i \leq C; \forall i\\
\sum_{i = 1}^n \alpha_iy_i = 0
\end{align}
% $C$ is an hyperparameter to tune the amount of penalization for miss-classified
% instances.
$C$ is an hyper-parameter to tune the amount of penalization for missclassified
instances or instances located within the margin zone.
If we compute
\begin{equation}
w = \sum_{i = 1}^n\alpha_iy_i\vx_i
\end{equation}
and
\begin{equation}
b = y_i - w\cdot\vx_i
\end{equation}
for any $i$ so that $\alpha_i \neq 0$, we can compute the class of $\vx_0$ with
\begin{equation}
sign(w\cdot\vx_0 + b)
\end{equation}
Note that this algorithm just uses the dot product of the input data, not the
data itself. This property allows us to use the Kernel Trick with them.
See \ref{sec:kern-trick}
\end{pre-delivery}
\section{Ensemble Methods}
\label{sec:ens-meth}
% \subsection{Bagging}
% \begin{note}
% \begin{itemize}
% \item Bagging
% \begin{itemize}
% \item Inventado por Leo Breiman (referencia)
% \item Pretende reducir el sesgo
% \item Wikipedia dice que pretende reducir la varianza
% \item Es el boosting el que pretende reducir el sesgo
% \item Entrenamiento de los estimadores es independiente, se podría
% hacer en paralelo
% \item Actualmente casi solo se usa con DT, debido a su inestabilidad
% \end{itemize}
% \item Bootstrap
% \begin{itemize}
% \item Intenta solucionar el problema de que para bagging es bueno
% que los estimadores sean distintos
% \item Idealmente usaríamos un dataset distinto para cada estimador
% \item Consiste en hacer un resalmpling con repetición
% \item Si la cantidad de instancias del original es la misma que la de cada uno
% de los subconjuntos, se espera que la proporción de elementos úncos sea de
% $1 - \frac{1}{e} \approx 0.632$.
% \item Si el conjunto original tiene $n$ elementos, y tu haces un subconjunto
% de tamaño $r$, puedes esperar que la proporción de elementos del original que
% sí tienen presencia en el nuevo sea de $1 - e^{-\frac{r}{n}}$
% \end{itemize}
% \item Random Forest
% \end{itemize}
% \end{note}
\begin{pre-delivery}
Ensemble methods\cite{polikar2006ensemble} are a technique used in Machine
Learning to reduce the
overall accuracy error of a basic classification or regression model. The
idea is that a commimtee of models is expected to learn better than a single
one.
Some ensemble methods are focused on decreasing the error caused by the
variance of the model. One example is \textit{Bagging}\cite{breiman1996bagging}. Others are focused
on decreasing the bias error, like \textit{Boosting}\cite{freund1997decision}.
In Bagging, every model in the ensemble vote with equal weight. Thus, it is
important to promote the variance among each of the models, since not doing
it would be equivalent to training just one model. Ideally, one would train
each of the models with totally different datasets, with no correlation
among them. But in practice this is not always possible, because of a
limited number of instances to train. One alternative is to use a
technique called \textit{Bootstrap}\cite{efron1994introduction}. Bootstrap allows to generate
many different instances of a dataset by performing a resampling.
Given a dataset $D$ of size $n$, Bootstrap generates $m$ new datasets
$D_i$ of size $n$ by sampling instances from $D$ uniformly and with
replacement. This means that some of the instances in $D$ may be repeated
in $D_i$, and others may not appear at all. With a large $n$, it is expected
that each dataset $D_i$ will contain $63.2 \% $ of the instances in $D$.
Theoretically Bagging could be used with any kind of method. However, for
most of them Bootstrap is not enough to decorrelate the estimators.
In practice, Bagging is mostly used with Decision Tree, given that this
method produces very different trees with a small variation in the data.
Random Forest\cite{Breiman2001} is an algorithm that trains many Decision Trees with a
Bagging. Instead of building the tree in a deterministic way, in each
split it chooses a random subset of features on which to perform the
separation. Besides, it lets the estimators overfit, since it has a positive
impact in reducing the overall variance of the Forest.
\end{pre-delivery}
\section{The kernel trick}
\label{sec:kern-trick}
% \begin{note}
% \begin{itemize}
% \item Teorema de Bochner
% \item El kernel RBF
% \begin{itemize}
% \item Su fórmula es \ldots
% \item Equivalencia entre $\gamma$ y $\sigma$
% \item La noción de similitud que tiene
% \item \Hspace\ es de dimensionalidad infinita
% \item Permite ajustarse infinitamente a los datos, tuneando el
% híper-parámetro
% \item $\sigma$ más pequeño, más sobreajuste
% \item $\gamma$ más grande, más sobreajuste
% \end{itemize}
% \end{itemize}
% \end{note}
\begin{pre-delivery}
A Kernel\cite{bergman1970kernel} is a function that equals to the inner product of inputs mapped into
some Hilbert Space
\footnote{A Hilbert space is a generalization of the Euclidean Space which contains
the structure of an inner product that allows length and angle to be
measured.}
, i.e:
\begin{equation}
\kernel(x,y) = \phi(x)\cdot\phi(y)
\end{equation}
% A Hilbert space is just a generalization of the Euclidean Space which contains
% the structure of an inner product that allows length and angle to be measured.
They are interesting in Machine Learning because we don't need to know the
explicit function $\phi(\cdot)$. In fact, $\phi(\cdot)$ could map the data to
a Hilbert Space with infinite dimensions, and we could still compute
$\phi(\vx)\cdot\phi(\vy)$ through the kernel $\kernel$
Support Vector Machines (explained in \ref{ssec:svm}) can benefit a lot of
Kernel Functions. SVMs solve an
optimization problem to maximise
\begin{equation}
L = \sum_{i = 1}^n\alpha_i -\frac{1}{2}\sum_{i = 1}^n\sum_{j = 1}^n\alpha_i\alpha_jy_iy_j\vx_i^\transp\vx_j
\end{equation}
in order to find an hyperplane that separates the data points in two classes.
But with some problems there may not exist such hyperplane, and so it would
be needed to map the data to a different feature space. If we did that, then
the function to maximise would be
\begin{equation}
L = \sum_{i = 1}^n\alpha_i -\frac{1}{2}\sum_{i = 1}^n\sum_{j = 1}^n\alpha_i\alpha_jy_iy_j\phi(\vx_i)^\transp\phi(\vx_j)
\end{equation}
As we said previously, SVMs don't work with the data points alone, but just with
their inner products. Thus, a Kernel could be used to define the optimization
problem as
\begin{equation}
L = \sum_{i = 1}^n\alpha_i -\frac{1}{2}\sum_{i = 1}^n\sum_{j = 1}^n\alpha_i\alpha_jy_iy_j\kernel(\vx_i, \vx_j)
\end{equation}
This approach has one big advantage:
% first, we don't need to explicitly
% compute $\phi(\vx)^\transp\phi(\vy)$, which could have a high cost if the
% new dimensionality was too big.
as long as the learning technique relies
only on the inner product of the input, the underlying mapping $\phi(\cdot)$
does not need to be explicitly calculated and can, in fact, be unknown\cite{burges1998tutorial}.
Kernel functions can be characterised with the Mercer's condition
\cite{mercer1909functions}. It says that given a function $\kernel(\vx, \vy)$,
there exists a mapping $\phi(\cdot)$ so that
$\kernel(\vx, \vy) = \phi(\vx)\cdot\phi(\vy)$
if and only if for any $g(\vx)$ such that $\int g(\vx)^2 d\vx$ is finite then
$\int \kernel(\vx, \vy)g(\vx)g(\vy) \geq 0$.
There are many known Kernels. One that is very popular is the Radial Basis
Function Kernel\cite{vert2004primer}, RBF. This kernel is defined as:
\begin{equation}
\kernel(\vx,\vy) = \semiRbf
\end{equation}
where $\gamma > 0$ is a free parameter. The value of this Kernel decreases with the
euclidean distance of the parameters, so it can be interpreted as a measure
of similarity. The feature space of this kernel has infinite number of
dimensions.
When a kernel is used with an SVM, the answer can be computed with
\begin{equation}
sign\left(\sum_{i = 1}^n \alpha_iy_i\kernel(\vx_i, \vx)\right)
\end{equation}
SVMs using the RBF kernel have a huge ability to fit to the data, and is able
to separate classes for very difficult problems. The problem is that the
optimization of the function
\end{pre-delivery}
% \subsection{The RBF kernel}
\section{Random Fourier Features}
% \begin{note}
% \begin{itemize}
% \item Teorema de bochner
% \item Tiene que ser un shift invariant kernel
% \item Es más, tiene que ser un positive definite shift-invariant kernel
% \item Converge bounds for ability to approximate
% \item Instead, we propose to factor the kernel function itself
% \item La factorización no depende de los datos
% \item we propose explicitly mapping
% the data to a low-dimensional Euclidean inner product space using a randomized feature map z :
% Rd --> RD so that the inner product between a pair of transformed points approximates their kernel
% evaluation
% \item Puesto que los valores están entre -1 y 1, hay un teorema que asegura
% la convergencia exponencial hacia el kernel real
% \end{itemize}
% \end{note}
\begin{pre-delivery}
A kernel function
$\kernel(\vx, \vy)$ with $\vx, \vy \in \reals^d$
equals the inner product of inputs mapped with some function $\phi(\cdot)$,
so that
$\phi(\vx)^\transp\cdot\phi(\vy) = \kernel(\vx, \vy)$.
But $\phi(\cdot)$ could be a mapping to an infinitely-dimensional space, so
calculating $\phi(\vx)$ is not possible for some kernels.
Random Fourier Features\cite{rahimi2008random} provide a way to, given a
kernel $\kernel(\vx, \vy)$,
explicitly map the data to a
low-dimensional Euclidean inner product space using a randomized feature
map $z: \reals^d \mapsto \reals^D$ so that the inner product between a pair
of transformed points approximates their kernel evaluation, i.e:
\begin{equation}
\kernel(\vx, \vy) = \phi(\vx)^\transp\cdot\phi(\vy) \approx z(\vx)^\transp\cdot z(\vy)
\end{equation}
To approximate the RBF kernel, it uses the Bochner's Theorem, which says:
\newtheorem{theorem}{Theorem}
\begin{theorem}
\cite{rudin1962fourier}
A continuous kernel $\kernel(x, y) = \kernel(x - y)$ on $\reals^D$ is
positive definite if and only if $k(\delta)$ is the
Fourier Transform of a non-negative measure.
\end{theorem}
% Since RBF is defined as $\kernel(\vx, \vy) = e^{-\gamma\norm{\vx - \vy}^2}$
Since it is known that RBF is shift-invariant and positive definite, then
its Fourier transform is a proper probability distribution, and so
\begin{equation}
\kernel(\vx - \vy) = \int_{\reals^D} p(w)e^{iw^\transp (\vx-\vy)}
= \int_{\reals^D} p(w)cos\left(w^\transp (\vx-\vy)\right)
\end{equation}
A random feature can be obtained by picking $w \sim \{\mathcal{N}(0, 2\gamma)\}^d$
and $b \sim \mathcal{U}(0, 2\pi)$
and computing $\sqrt{2}cos(w^\transp\vx + b)$. To generate a lower variance
approximation of $\phi(\vx)$ with $D$ features we can concatenate $D$ randomly
chosen features $(f_1, \ldots, f_D)$ into a column vector and normalize each
component by $\sqrt{D}$.
It is guaranteed an exponentially fast convergence in $D$ between
$z(\vx)^\transp z(\vy)$ and $\kernel(\vx, \vy)$.
\end{pre-delivery}
\section{\Nys}
\begin{pre-delivery}
The \Nys\cite{NIPS2000_1866} method is a general method for low-rank approximations of
kernels. It achieves this by subsampling the data on which the kernel
is evaluated.
In kernel methods the data can be represented in a kernel matrix $K$, where
$K_{i,j} = \kernel(\vx_i, \vx_j)$. The problem of these methods is their
high computational cost associated with the kernel matrix: with non-linear
kernels, the cost of training the model is cubic with the number of
instances, something unacceptable for large-scale problems.
The \Nys\ method consists on generating an approximation of the kernel matrix of
ranq $q$, where $q$ can be a lot smaller than the number of instances, without
any significant decrease in the accuracy of the solution. This way, if there
are $n$ instances in a dataset, the complexity can be reduced from
$\mathcal{O}(n^3)$ to $\mathcal{O}(nq^2)$.
With \Nys, given a kernel $\kernel(\vx, \vy) = \phi(\vx)\cdot\phi(\vy)$, one can
construct a mapping $z: \reals^d \mapsto \reals^q$ so that
$z(\vx) \approx \phi(\vx)$. This function defines each component $j$ as
$z_j(\vy) = \frac{1}{q}\sum_{i = 1}^q \kernel(\vy, \vx_i)g_i(\vx_i)$,
where $\vx_1, \ldots, \vx_q$ are some chosen instances and
$g_i(\cdot)$ comes from a column from the Singular Value Decomposition
of the approximated kernel matrix.
\end{pre-delivery}
|
lemma poly_DERIV [simp]: "DERIV (\<lambda>x. poly p x) x :> poly (pderiv p) x" |
function [data] = ft_denoise_ssp(cfg, data)
% FT_DENOISE_SSP projects out topographies based on ambient noise on
% Neuromag/Elekta/MEGIN systems. These topographies are estimated during maintenance
% visits from the engineers of MEGIN
%
% Use as
% [data] = ft_denoise_ssp(cfg, data)
% where data should come from FT_PREPROCESSING and the configuration
% should contain
% cfg.ssp = 'all' or a cell array of SSP names to apply (default = 'all')
% cfg.trials = 'all' or a selection given as a 1xN vector (default = 'all')
% cfg.updatesens = 'no' or 'yes' (default = 'yes')
%
% To facilitate data-handling and distributed computing you can use
% cfg.inputfile = ...
% cfg.outputfile = ...
% If you specify one of these (or both) the input data will be read from a *.mat
% file on disk and/or the output data will be written to a *.mat file. These mat
% files should contain only a single variable, corresponding with the
% input/output structure.
%
% See also FT_PREPROCESSING, FT_DENOISE_SYNTHETIC, FT_DENOISE_PCA
% Copyright (C) 2004-2022, Gianpaolo Demarchi, Lau
% Møller Andersen, Robert Oostenveld, Jan-Mathijs Schoffelen
%
% This file is part of FieldTrip, see http://www.fieldtriptoolbox.org
% for the documentation and details.
%
% FieldTrip is free software: you can redistribute it and/or modify
% it under the terms of the GNU General Public License as published by
% the Free Software Foundation, either version 3 of the License, or
% (at your option) any later version.
%
% FieldTrip is distributed in the hope that it will be useful,
% but WITHOUT ANY WARRANTY; without even the implied warranty of
% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
% GNU General Public License for more details.
%
% You should have received a copy of the GNU General Public License
% along with FieldTrip. If not, see <http://www.gnu.org/licenses/>.
%
% $Id$
% these are used by the ft_preamble/ft_postamble function and scripts
ft_revision = '$Id$';
ft_nargin = nargin;
ft_nargout = nargout;
% do the general setup of the function
ft_defaults
ft_preamble init
ft_preamble debug
ft_preamble loadvar data
ft_preamble provenance data
% the ft_abort variable is set to true or false in ft_preamble_init
if ft_abort
return
end
% check if the input cfg is valid for this function
cfg = ft_checkconfig(cfg, 'forbidden', {'trial'}); % prevent accidental typos, see issue 1729
cfg = ft_checkconfig(cfg, 'required', {'ssp'});
% set the defaults
cfg.ssp = ft_getopt(cfg, 'ssp', 'all');
cfg.trials = ft_getopt(cfg, 'trials', 'all', 1);
cfg.updatesens = ft_getopt(cfg, 'updatesens', 'yes');
% store the original type of the input data
dtype = ft_datatype(data);
% check if the input data is valid for this function
% this will convert timelocked input data to a raw data representation if needed
data = ft_checkdata(data, 'datatype', 'raw', 'feedback', 'yes', 'hassampleinfo', 'yes');
% check whether it is neuromag data
if ~ft_senstype(data, 'neuromag')
ft_error('SSP vectors can only be applied to neuromag data');
end
% select trials of interest
tmpcfg = keepfields(cfg, {'trials', 'showcallinfo', 'trackcallinfo', 'trackusage', 'trackdatainfo', 'trackmeminfo', 'tracktimeinfo', 'checksize'});
data = ft_selectdata(tmpcfg, data);
% restore the provenance information
[cfg, data] = rollback_provenance(cfg, data);
% remember the original channel ordering
labelold = data.label;
% apply the balancing to the MEG data and to the gradiometer definition
current = data.grad.balance.current;
desired = cfg.ssp;
if ~strcmp(current, 'none')
% first undo/invert the previously applied balancing
try
current_montage = data.grad.balance.(current);
catch
ft_error('unknown balancing for input data');
end
fprintf('converting the data from "%s" to "none"\n', current);
data = ft_apply_montage(data, current_montage, 'keepunused', 'yes', 'inverse', 'yes');
if istrue(cfg.updatesens)
fprintf('converting the sensor description from "%s" to "none"\n', current);
data.grad = ft_apply_montage(data.grad, current_montage, 'keepunused', 'yes', 'inverse', 'yes');
data.grad.balance.current = 'none';
end
end % if current
if ~strcmp(desired, 'none')
% then apply the desired balancing
if strcmp(desired, 'all')
desireds = fieldnames(data.grad.balance);
else
desireds = cfg.ssp;
if ~iscell(desireds)
ft_error('cfg.ssp must be a cell array of projector names')
end
end
for desired_index = 1:length(desireds)
desired = desireds{desired_index};
if ~strcmp(desired, 'current')
try
desired_montage = data.grad.balance.(desired);
catch
ft_error('unknown balancing for input data');
end
fprintf('converting the data from "none" to "%s"\n', desired);
data = ft_apply_montage(data, desired_montage, 'keepunused', 'yes', 'balancename', desired);
if istrue(cfg.updatesens)
fprintf('converting the sensor description from "none" to "%s"\n', desired);
data.grad = ft_apply_montage(data.grad, desired_montage, 'keepunused', 'yes', 'balancename', desired);
end
end % if desired
end
end
% reorder the channels to stay close to the original ordering
[selold, selnew] = match_str(labelold, data.label);
if numel(selnew)==numel(labelold)
for i=1:numel(data.trial)
data.trial{i} = data.trial{i}(selnew,:);
end
data.label = data.label(selnew);
else
ft_warning('channel ordering might have changed');
end
% convert back to input type if necessary
switch dtype
case 'timelock'
data = ft_checkdata(data, 'datatype', 'timelock');
otherwise
% keep the output as it is
end
% do the general cleanup and bookkeeping at the end of the function
ft_postamble debug
ft_postamble previous data
ft_postamble provenance data
ft_postamble history data
ft_postamble savevar data
|
% pageify -- turn databases into one db with one observation and many pages
%
% ::
%
%
% db=pageify(pivot_date,varargin)
%
% Args:
%
% - **pivot_date** [char|serial date] : reference date, typically end of
% history.
%
% - **varargin** [struct|ts] : databases in time series format or in struct
%
% format
%
% Returns:
% :
%
% - **db** [ts] : time series with one observation and many pages
%
% Note:
%
% - This routine is useful for preparing data for conditional forecasting
%
% Example:
%
% See also:
% |
THE NEW TERRITORIES 新界 - The New Territories spans 86% of Hong Kong’s land mass and is home to half its population. It was the last of the three main regions to fall under british rule in 1898. It offers a hearty serving of Hong Kong’s cultural and natural beauty with some of the best hiking trails, ancient walled villages, a wetland nature reserve (home to a number of endangered species ) and over 200 islands. - We spent the majority of our childhood visits to Hong Kong staying with our Great Grandparents in a village in Tai Po, a district of The New Territories. They spoke Hakka, a Chinese dialect closer to Cantonese than Mandarin. My sister and I were only fluent in Chin-glish, so mum translated back and forth between us. They were kind and gentle souls. A life of hard graft and labour had shaped their bodies and how they moved, but could still squat for days!
poured my whole heart into this one. a whole collection of photos of the best memories i've made with the best people. straight from the heart. each magazine is $15. venmo me @tokyodrift626 i can ship anywhere! or hand it to you! thank you for supporting my art, my heart thrives from your support.
Psychology says, always go with the choice that scares you the most, because that's the one that is going to help you grow. |
function drawboard(board)
for i = 1:numel(board)
patch(board(i).xy(:,1), board(i).xy(:,2), [0.5 0.5 0.5]);
end
axis image off |
using Base: OneTo
using Base.Threads: @spawn
using Base: @sync
function to_band!(A::AbstractMatrix{T}, block_size = 32) where {T}
# iterate in blocks.
m = size(A, 1)
blocks = ÷(m, block_size, RoundUp)
offset_col = 1
offset_row = 1 + block_size
τs = Vector{T}(undef, m)
for block = OneTo(blocks)
cols_to_band!(A, offset_row, offset_col, block_size, τs)
offset_row += block_size
offset_col += block_size
end
return A
end
function cols_to_band!(A, offset_row, offset_col, block_size, τs)
# The part in which we do a tall and skinny QR
A_qr = view(A, offset_row:size(A, 1), offset_col:min(size(A, 2), offset_col+block_size-1))
# The part to which we apply the tall and skinny QR from the left
A_left = view(A, offset_row:size(A, 1), offset_col+block_size:size(A, 2))
# The part to which we apply the tall and skinny QR from the right.
A_right = view(A, :, offset_row:size(A, 1))
#=@sync=# for block_start = 1:block_size:size(A_qr, 1)
let
block_end = min(block_start + block_size - 1, size(A_qr, 1))
range = block_start:block_end
A_qr′ = view(A_qr, range, :)
τs′ = view(τs, range)
#=@spawn=# block_qr!(A_qr′, τs′)
end
end
#=@sync=# for block_start = 1:block_size:size(A_qr, 1)
let
block_end = min(block_start + block_size - 1, size(A_qr, 1))
range = block_start:block_end
A_qr′ = view(A_qr, range, :)
A_left′ = view(A_left, range, :)
τs′ = view(τs, range)
#=@spawn=# apply_left!(A_qr′, A_left′, τs′)
end
end
#=@sync=# for block_start = 1:block_size:size(A_qr, 1)
let
block_end = min(block_start + block_size - 1, size(A_qr, 1))
range = block_start:block_end
A_qr′ = view(A_qr, range, :)
A_right′ = view(A_right, :, range)
τs′ = view(τs, range)
#=@spawn=# apply_right!(A_qr′, A_right′, τs′)
end
end
# Pair-wise reduce to single QR.
merged_block_size = block_size
while true
# If there's only one block left, stop.
nblocks = ÷(size(A_qr, 1), merged_block_size, RoundUp)
nblocks ≤ 1 && break
#=@sync=# for block_start = 1:2merged_block_size:size(A_qr, 1)
let
# If only a single block or less fits, stop.
block_start + merged_block_size - 1 ≥ size(A_qr, 1) && break
block_end = min(block_start + 2merged_block_size - 1, size(A_qr, 1))
range = block_start:block_end
A_qr′ = view(A_qr, range, :)
τs′ = view(τs, range)
#=@spawn=# merge_block_qr!(A_qr′, τs′, merged_block_size)
end
end
#=@sync=# for block_start = 1:2merged_block_size:size(A_qr, 1)
let
# If only a single block or less fits, stop.
block_start + merged_block_size - 1 ≥ size(A_qr, 1) && break
block_end = min(block_start + 2merged_block_size - 1, size(A_qr, 1))
range = block_start:block_end
A_qr′ = view(A_qr, range, :)
A_left′ = view(A_left, range, :)
τs′ = view(τs, range)
#=@spawn=# apply_left_with_gap!(A_qr′, A_left′, τs′, merged_block_size)
end
end
#=@sync=# for block_start = 1:2merged_block_size:size(A_qr, 1)
let
# If only a single block or less fits, stop.
block_start + merged_block_size - 1 ≥ size(A_qr, 1) && break
block_end = min(block_start + 2merged_block_size - 1, size(A_qr, 1))
range = block_start:block_end
A_qr′ = view(A_qr, range, :)
A_right′ = view(A_right, :, range)
τs′ = view(τs, range)
#=@spawn=# apply_right_with_gap!(A_qr′, A_right′, τs′, merged_block_size)
end
end
merged_block_size *= 2
end
A
end
function block_qr!(A, τs)
m, n = size(A)
steps = min(m, n) - 1
# Create a bunch of reflectors and save the norm value (τ)
@inbounds for k = OneTo(steps)
x = view(A, k:m, k)
τs[k] = reflector!(x)
reflectorApplyLeft!(view(A, k:m, k+1:n), τs[k], x)
end
return A
end
function apply_left!(qr, A, τs)
m, n = size(qr)
steps = min(m, n) - 1
@inbounds for k = OneTo(steps)
reflectorApplyLeft!(view(A, k:size(A, 1), :), τs[k], view(qr, k:m, k))
end
return A
end
function apply_right!(qr, A, τs)
m, n = size(qr)
steps = min(m, n) - 1
@inbounds for k = OneTo(steps)
reflectorApplyRight!(view(A, :, k:size(A, 2)), τs[k], view(qr, k:m, k))
end
return A
end
function merge_block_qr!(A, τs, v_block_size)
m, n = size(A)
B = copy(A)
# Create a bunch of reflectors and save the norm value (τ)
@inbounds for k = OneTo(n)
gap = v_block_size - k + 1
to = min(m, k + v_block_size)
x = view(A, k:to, k)
τs[k] = reflector!(x, gap)
reflectorApplyLeft!(view(A, k:to, k+1:n), τs[k], x, gap)
end
return A
end
function apply_left_with_gap!(qr, A, τs, v_block_size)
m, n = size(qr)
# Create a bunch of reflectors and save the norm value (τ)
@inbounds for k = OneTo(n)
gap = v_block_size - k + 1
to = min(m, k + v_block_size)
x = view(qr, k:to, k)
reflectorApplyLeft!(view(A, k:to, :), τs[k], x, gap)
end
return A
end
function apply_right_with_gap!(qr, A, τs, v_block_size)
m, n = size(qr)
# Create a bunch of reflectors and save the norm value (τ)
@inbounds for k = OneTo(n)
gap = v_block_size - k + 1
to = min(m, k + v_block_size)
x = view(qr, k:to, k)
reflectorApplyRight!(view(A, :, k:to), τs[k], x, gap)
qr[k+1:to, k] .= 0
end
return A
end
@inline function reflector!(x::AbstractVector, gap = 1)
@inbounds begin
n = length(x)
n == 0 && return zero(eltype(x))
ξ1 = x[1]
normu = abs2(ξ1)
for i = 1+gap:n
normu += abs2(x[i])
end
iszero(normu) && return zero(eltype(x))
normu = sqrt(normu)
ν = copysign(normu, real(ξ1))
ξ1 += ν
x[1] = -ν
for i = 1+gap:n
x[i] /= ξ1
end
ξ1/ν
end
end
@inline function reflectorApplyLeft!(A::AbstractMatrix, τ::Number, x::AbstractVector, gap = 1)
m, n = size(A)
m == 0 && return A
@inbounds for j = 1:n
# dot
vAj = A[1, j]
for i = 1+gap:m
vAj += x[i]'*A[i, j]
end
vAj = conj(τ)*vAj
# ger
A[1, j] -= vAj
for i = 1+gap:m
A[i, j] -= x[i]*vAj
end
end
return A
end
@inline function reflectorApplyRight!(A::AbstractMatrix, τ::Number, x::AbstractVector, gap = 1)
m, n = size(A)
n == 0 && return A
@inbounds for j = 1:m
# dot
vAj = A[j, 1]
for i = 1+gap:n
vAj += x[i]*A[j, i]
end
vAj = τ*vAj
# ger
A[j, 1] -= vAj
for i = 1+gap:n
A[j, i] -= x[i]'*vAj
end
end
return A
end
|
module FS
using ..DataMod, ..Helpers
using MultivariateStats, Statistics, StatsBase, StatsModels
include("./manualmodel.jl")
export ManualModel, predict
module Utils
using DataFrames, LinearAlgebra, StatsBase, StatsModels, ..DataMod, ..Helpers, ..ManualModelMod
function compose(y::Symbol, terms::Vector{Symbol}; intercept::Bool = true)::FormulaTerm
return term(y) ~ foldl(+, term.(intercept ? [1; terms] : terms))
end
function lt_featurename(f1::Symbol, f2::Symbol)
s1, s2 = string(f1), string(f2)
if s1[1] == s2[1]
return isless(parse(Int, s1[2:end]), parse(Int, s2[2:end]))
else
return isless(s2[1], s1[1])
end
end
function get_model(data::Data, sel_xmat::Matrix{Float64}, y_vec::Vector{Float64},
desel_i::Vector{Int}, predicted::Symbol, intercept::Bool)::RegressionModel
# Simply doing `sel_xmat \ y_vec` resulted in a rare error when
# sel_xmat is square. The \ operator interprets this incorrectly.
# Therefore, this line was extracted from the \ operator's code:
weights = qr(sel_xmat, ColumnNorm()) \ y_vec
weights = copy(weights)
for i in desel_i
insert!(weights, intercept ? i + 1 : i, 0.0)
end
return ManualModel(weights, intercept, predicted, data)
end
function get_models(data::Data, rhs::Vector{Symbol}, intercept::Bool = true)::Vector{RegressionModel}
desel_i = findall(x -> x ∉ rhs, data.xs)
n = nrow(data.df)
sort!(rhs, lt = lt_featurename)
sel_xmat = isempty(rhs) ? zeros(Float64, n, 0) : (data.xs == rhs ? data.xmat : Matrix(select(data.xdf, rhs)))::Matrix{Float64}
intercept && (sel_xmat = hcat(ones(Float64, n), sel_xmat))
y_vecs = Vector{Float64}[data.ydf[!, y] for y in data.ys]
return map(i -> get_model(data, sel_xmat, y_vecs[i], desel_i, data.ys[i], intercept), 1:length(y_vecs))
end
Base.iterate(s::Symbol) = (s, nothing)
Base.iterate(::Symbol, ::Any) = nothing
Base.length(s::Symbol) = 1
export compose, get_models
end
using .Utils
export compose, get_models
include("./fs-stepwise.jl")
include("./fs-lasso.jl")
include("./fs-inipg.jl")
include("./fs-importance.jl")
include("./fs-ga.jl")
include("./fs-l1l2reg.jl")
llsqreg(data::Data)::Vector{RegressionModel} = get_models(data, data.xs)
function ridgereg(data::Data, λs::Vector{Float64} = .5 .^ collect(-5:14))::Vector{RegressionModel}
ridges = [ridge(data.xmat, data.ymat, λ) for λ in λs]
c = 1:length(data.ys)
splits = [[ begin
y = r[:, i]
[y[end]; y[1:end-1]]
end for i in c] for r in ridges]
models = [[ManualModel(split[i], true, data.ys[i], data) for i in c] for split in splits]
bics = [mean([bic(model[i]) for i in c]) for model in models]
minind = argmin(bics)
return models[minind]
end
export llsqreg, ridgereg
end
using .FS
|
module Error.Data
import public Loc
import public Syntax.Tokens
public export
data ExpectType
= ExpectTkn Tkn
| ExpectKeyword String
| ExpectId
| ExpectInt
| ExpectStr
| Unknown
| TknEOF
public export
data BetweenType
= Parenthesis
| CurlyBrackets
| SquareBrackets
public export
data ReaderError
= NotClosed BetweenType
| Expected ExpectType Tkn
public export
data LexerError
= UnterminatedString
| UnexpectedChar
public export
data CapitalizedNameErr
= CapitalModule
| CapitalType
| CapitalADTName
| CapitalDataConstructor
public export
data ParserError
= InvalidTopLevel String
| InvalidIdName String
| InvalidPath String
| ExpectedDataField
| ExpectedLiteral
| ExpectedIdentifier
| ExpectedIdButGotPath
| ExpectedTypeDef
| ImpossibleParsingError
| NeedCapitalizedName CapitalizedNameErr
| NeedMinusculeName
| NotSupportEmptyList
| NotAValidPattern
| NotAValidType
| NotAValidRecordField
| InvalidExpr
| Unreachable
| NotAllCasesHaveCond
| ArrowWithOneArg
public export
data ErrorType
= LexicalError Range LexerError
| ReadingError Range ReaderError
| ParsingError Range ParserError
| EOF
| ExpectedEOF
| UnexpectedInternalError
public export
record ErrorData where
constructor MkErrorData
fileName : String
sourceInput : String
error : ErrorType
|
/**
* This file is part of the "libterminal" project
* Copyright (c) 2019-2020 Christian Parpart <[email protected]>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <terminal/GraphicsAttributes.h>
#include <terminal/Line.h>
#include <terminal/primitives.h>
#include <crispy/algorithm.h>
#include <crispy/assert.h>
#include <crispy/ring.h>
#include <unicode/convert.h>
#include <range/v3/algorithm/copy.hpp>
#include <range/v3/iterator/insert_iterators.hpp>
#include <range/v3/view/iota.hpp>
#include <gsl/span>
#include <gsl/span_ext>
#include <algorithm>
#include <array>
#include <sstream>
#include <string>
#include <string_view>
#include <utility>
namespace terminal
{
// {{{ Margin
struct Margin
{
struct Horizontal
{
ColumnOffset from;
ColumnOffset
to; // TODO: call it begin and end and have end point to to+1 to avoid unnecessary +1's later
[[nodiscard]] constexpr ColumnCount length() const noexcept
{
return unbox<ColumnCount>(to - from) + ColumnCount(1);
}
[[nodiscard]] constexpr bool contains(ColumnOffset _value) const noexcept
{
return from <= _value && _value <= to;
}
[[nodiscard]] constexpr bool operator==(Horizontal rhs) const noexcept
{
return from == rhs.from && to == rhs.to;
}
[[nodiscard]] constexpr bool operator!=(Horizontal rhs) const noexcept { return !(*this == rhs); }
};
struct Vertical
{
LineOffset from;
// TODO: call it begin and end and have end point to to+1 to avoid unnecessary +1's later
LineOffset to;
[[nodiscard]] constexpr LineCount length() const noexcept
{
return unbox<LineCount>(to - from) + LineCount(1);
}
[[nodiscard]] constexpr bool contains(LineOffset _value) const noexcept
{
return from <= _value && _value <= to;
}
[[nodiscard]] constexpr bool operator==(Vertical const& rhs) const noexcept
{
return from == rhs.from && to == rhs.to;
}
[[nodiscard]] constexpr bool operator!=(Vertical const& rhs) const noexcept
{
return !(*this == rhs);
}
};
Vertical vertical {}; // top-bottom
Horizontal horizontal {}; // left-right
};
constexpr bool operator==(Margin const& a, PageSize b) noexcept
{
return a.horizontal.from.value == 0 && a.horizontal.to.value + 1 == b.columns.value
&& a.vertical.from.value == 0 && a.vertical.to.value + 1 == b.lines.value;
}
constexpr bool operator!=(Margin const& a, PageSize b) noexcept
{
return !(a == b);
}
// }}}
template <typename Cell>
using Lines = crispy::ring<Line<Cell>>;
/**
* Represents a logical grid line, i.e. a sequence lines that were written without
* an explicit linefeed, triggering an auto-wrap.
*/
template <typename Cell>
struct LogicalLine
{
LineOffset top {};
LineOffset bottom {};
std::vector<std::reference_wrapper<Line<Cell>>> lines {};
[[nodiscard]] Line<Cell> joinWithRightTrimmed() const
{
// TODO: determine final line's column count and pass it to ctor.
typename Line<Cell>::Buffer output;
auto lineFlags = lines.front().get().flags();
for (Line<Cell> const& line: lines)
for (Cell const& cell: line.cells())
output.emplace_back(cell);
while (!output.empty() && output.back().empty())
output.pop_back();
return Line<Cell>(output, lineFlags);
}
[[nodiscard]] std::string text() const
{
std::string output;
for (auto const& line: lines)
output += line.get().toUtf8();
return output;
}
};
template <typename Cell>
bool operator==(LogicalLine<Cell> const& a, LogicalLine<Cell> const& b) noexcept
{
return a.top == b.top && a.bottom == b.bottom;
}
template <typename Cell>
bool operator!=(LogicalLine<Cell> const& a, LogicalLine<Cell> const& b) noexcept
{
return !(a == b);
}
template <typename Cell>
struct LogicalLines
{
LineOffset topMostLine;
LineOffset bottomMostLine;
std::reference_wrapper<Lines<Cell>> lines;
struct iterator // {{{
{
std::reference_wrapper<Lines<Cell>> lines;
LineOffset top;
LineOffset next; // index to next logical line's beginning
LineOffset bottom;
LogicalLine<Cell> current;
iterator(std::reference_wrapper<Lines<Cell>> _lines,
LineOffset _top,
LineOffset _next,
LineOffset _bottom):
lines { _lines }, top { _top }, next { _next }, bottom { _bottom }
{
Require(_top <= next);
Require(next <= _bottom + 1);
++*this;
}
LogicalLine<Cell> const& operator*() const noexcept { return current; }
LogicalLine<Cell> const* operator->() const noexcept { return ¤t; }
iterator& operator++()
{
if (next == bottom + 1)
{
current.top = next;
current.bottom = next;
return *this;
}
Require(!lines.get()[unbox<int>(next)].wrapped());
current.top = LineOffset::cast_from(next);
current.lines.clear();
do
current.lines.emplace_back(lines.get()[unbox<int>(next++)]);
while (next <= bottom && lines.get()[unbox<int>(next)].wrapped());
current.bottom = LineOffset::cast_from(next - 1);
return *this;
}
iterator& operator--()
{
if (next == top - 1)
{
current.top = top - 1;
current.bottom = top - 1;
return *this;
}
auto const bottomMost = next - 1;
do
--next;
while (lines.get()[unbox<int>(next)].wrapped());
auto const topMost = next;
current.top = topMost;
current.bottom = bottomMost;
current.lines.clear();
for (auto i = topMost; i <= bottomMost; ++i)
current.lines.emplace_back(lines.get()[unbox<int>(i)]);
return *this;
}
iterator& operator++(int)
{
auto c = *this;
++*this;
return c;
}
iterator& operator--(int)
{
auto c = *this;
--*this;
return c;
}
bool operator==(iterator const& other) const noexcept { return current == other.current; }
bool operator!=(iterator const& other) const noexcept { return current != other.current; }
}; // }}}
iterator begin() const { return iterator(lines, topMostLine, topMostLine, bottomMostLine); }
iterator end() const { return iterator(lines, topMostLine, bottomMostLine + 1, bottomMostLine); }
};
template <typename Cell>
struct ReverseLogicalLines
{
LineOffset topMostLine;
LineOffset bottomMostLine;
std::reference_wrapper<Lines<Cell>> lines;
struct iterator // {{{
{
std::reference_wrapper<Lines<Cell>> lines;
LineOffset top;
LineOffset next; // index to next logical line's beginning
LineOffset bottom;
LogicalLine<Cell> current;
iterator(std::reference_wrapper<Lines<Cell>> _lines,
LineOffset _top,
LineOffset _next,
LineOffset _bottom):
lines { _lines }, top { _top }, next { _next }, bottom { _bottom }
{
Require(_top - 1 <= next);
Require(next <= _bottom);
++*this;
}
LogicalLine<Cell> const& operator*() const noexcept { return current; }
iterator& operator--()
{
if (next == bottom + 1)
{
current.top = bottom + 1;
current.bottom = bottom + 1;
return *this;
}
Require(!lines.get()[unbox<int>(next)].wrapped());
current.top = LineOffset::cast_from(next);
current.lines.clear();
do
current.lines.emplace_back(lines.get()[unbox<int>(next++)]);
while (next <= bottom && lines.get()[unbox<int>(next)].wrapped());
current.bottom = LineOffset::cast_from(next - 1);
return *this;
}
iterator& operator++()
{
if (next == top - 1)
{
current.top = next;
current.bottom = next;
return *this;
}
auto const bottomMost = next;
while (lines.get()[unbox<int>(next)].wrapped())
--next;
auto const topMost = next;
--next; // jump to next logical line's bottom line above the current logical one
current.top = topMost;
current.bottom = bottomMost;
current.lines.clear();
for (auto i = topMost; i <= bottomMost; ++i)
current.lines.emplace_back(lines.get()[unbox<int>(i)]);
return *this;
}
iterator& operator++(int)
{
auto c = *this;
++*this;
return c;
}
iterator& operator--(int)
{
auto c = *this;
--*this;
return c;
}
bool operator==(iterator const& other) const noexcept { return current == other.current; }
bool operator!=(iterator const& other) const noexcept { return current != other.current; }
}; // }}}
iterator begin() const { return iterator(lines, topMostLine, bottomMostLine, bottomMostLine); }
iterator end() const { return iterator(lines, topMostLine, topMostLine - 1, bottomMostLine); }
};
/**
* Manages the screen grid buffer (main screen + scrollback history).
*
* <h3>Future motivations</h3>
*
* <ul>
* <li>manages text reflow upon resize
* <li>manages underlying disk storage for very old scrollback history lines.
* </ul>
*
* <h3>Layout</h3>
*
* <pre>
* +0========================-3+ <-- scrollback top
* |1 -2|
* |2 Scrollback history -1|
* |3 0| <-- scrollback bottom
* +4-------------------------1+ <-- main page top
* |5 2|
* |6 main page area 3|
* |7 4| <-- main page bottom
* +---------------------------+
* ^ ^
* 1 pageSize.columns
* </pre>
*/
template <typename Cell>
class Grid
{
// TODO: Rename all "History" to "Scrollback"?
public:
Grid(PageSize _pageSize, bool _reflowOnResize, LineCount _maxHistoryLineCount);
Grid(): Grid(PageSize { LineCount(25), ColumnCount(80) }, false, LineCount(0)) {}
void reset();
// {{{ grid global properties
[[nodiscard]] LineCount maxHistoryLineCount() const noexcept { return maxHistoryLineCount_; }
void setMaxHistoryLineCount(LineCount _maxHistoryLineCount);
[[nodiscard]] LineCount totalLineCount() const noexcept { return maxHistoryLineCount_ + pageSize_.lines; }
[[nodiscard]] LineCount historyLineCount() const noexcept
{
return std::min(maxHistoryLineCount_, linesUsed_ - pageSize_.lines);
}
[[nodiscard]] bool reflowOnResize() const noexcept { return reflowOnResize_; }
void setReflowOnResize(bool _enabled) { reflowOnResize_ = _enabled; }
[[nodiscard]] PageSize pageSize() const noexcept { return pageSize_; }
/// Resizes the main page area of the grid and adapts the scrollback area's width accordingly.
///
/// @param _pageSize new size of the main page area
/// @param _currentCursorPos current cursor position
/// @param _wrapPending AutoWrap is on and a wrap is pending
///
/// @returns updated cursor position.
[[nodiscard]] CellLocation resize(PageSize _pageSize, CellLocation _currentCursorPos, bool _wrapPending);
// }}}
// {{{ Line API
/// @returns reference to Line at given relative offset @p _line.
Line<Cell>& lineAt(LineOffset _line) noexcept;
Line<Cell> const& lineAt(LineOffset _line) const noexcept;
gsl::span<Cell const> lineBuffer(LineOffset _line) const noexcept { return lineAt(_line).cells(); }
gsl::span<Cell const> lineBufferRightTrimmed(LineOffset _line) const noexcept;
[[nodiscard]] std::string lineText(LineOffset _line) const;
[[nodiscard]] std::string lineTextTrimmed(LineOffset _line) const;
[[nodiscard]] std::string lineText(Line<Cell> const& _line) const;
void setLineText(LineOffset _line, std::string_view _text);
// void resetLine(LineOffset _line, GraphicsAttributes _attribs) noexcept
// { lineAt(_line).reset(_attribs); }
[[nodiscard]] ColumnCount lineLength(LineOffset _line) const noexcept { return lineAt(_line).size(); }
[[nodiscard]] bool isLineBlank(LineOffset _line) const noexcept;
[[nodiscard]] bool isLineWrapped(LineOffset _line) const noexcept;
[[nodiscard]] int computeLogicalLineNumberFromBottom(LineCount _n) const noexcept;
[[nodiscard]] size_t zero_index() const noexcept { return lines_.zero_index(); }
// }}}
/// Gets a reference to the cell relative to screen origin (top left, 0:0).
[[nodiscard]] Cell& useCellAt(LineOffset _line, ColumnOffset _column) noexcept;
[[nodiscard]] Cell& at(LineOffset _line, ColumnOffset _column) noexcept;
[[nodiscard]] Cell const& at(LineOffset _line, ColumnOffset _column) const noexcept;
// page view API
gsl::span<Line<Cell>> pageAtScrollOffset(ScrollOffset _scrollOffset);
gsl::span<Line<Cell> const> pageAtScrollOffset(ScrollOffset _scrollOffset) const;
gsl::span<Line<Cell>> mainPage();
gsl::span<Line<Cell> const> mainPage() const;
LogicalLines<Cell> logicalLines()
{
return LogicalLines<Cell> { boxed_cast<LineOffset>(-historyLineCount()),
boxed_cast<LineOffset>(pageSize_.lines - 1),
lines_ };
}
ReverseLogicalLines<Cell> logicalLinesReverse()
{
return ReverseLogicalLines<Cell> { boxed_cast<LineOffset>(-historyLineCount()),
boxed_cast<LineOffset>(pageSize_.lines - 1),
lines_ };
}
// {{{ buffer manipulation
/// Completely deletes all scrollback lines.
void clearHistory();
/// Scrolls up by @p _n lines within the given margin.
///
/// @param _n number of lines to scroll up within the given margin.
/// @param _defaultAttributes SGR attributes the newly created grid cells will be initialized with.
/// @param _margin the margin coordinates to perform the scrolling action into.
LineCount scrollUp(LineCount _n, GraphicsAttributes _defaultAttributes, Margin _margin) noexcept;
/// Scrolls up main page by @p _n lines and re-initializes grid cells with @p _defaultAttributes.
LineCount scrollUp(LineCount _n, GraphicsAttributes _defaultAttributes = {}) noexcept;
/// Scrolls down by @p _n lines within the given margin.
///
/// @param _n number of lines to scroll down within the given margin.
/// @param _defaultAttributes SGR attributes the newly created grid cells will be initialized with.
/// @param _margin the margin coordinates to perform the scrolling action into.
void scrollDown(LineCount _n, GraphicsAttributes const& _defaultAttributes, Margin const& _margin);
// Scrolls the data within the margins to the left filling the new space on the right with empty cells.
void scrollLeft(GraphicsAttributes _defaultAttributes, Margin _margin) noexcept;
// }}}
// {{{ Rendering API
/// Renders the full screen by passing every grid cell to the callback.
template <typename RendererT>
void render(RendererT&& _render, ScrollOffset _scrollOffset = {}) const;
/// Takes text-screenshot of the main page.
[[nodiscard]] std::string renderMainPageText() const;
/// Renders the full grid's text characters.
///
/// Empty cells are represented as strings and lines split by LF.
[[nodiscard]] std::string renderAllText() const;
// }}}
[[nodiscard]] constexpr LineFlags defaultLineFlags() const noexcept;
[[nodiscard]] constexpr LineCount linesUsed() const noexcept;
void verifyState() const;
private:
CellLocation growLines(LineCount _newHeight, CellLocation _cursor);
void appendNewLines(LineCount _count, GraphicsAttributes _attr);
void clampHistory();
// {{{ buffer helpers
void resizeBuffers(PageSize _newSize)
{
auto const newTotalLineCount = historyLineCount() + _newSize.lines;
lines_.resize(unbox<size_t>(newTotalLineCount));
pageSize_ = _newSize;
}
void rezeroBuffers() noexcept { lines_.rezero(); }
void rotateBuffers(int offset) noexcept { lines_.rotate(offset); }
void rotateBuffersLeft(LineCount count) noexcept { lines_.rotate_left(unbox<size_t>(count)); }
void rotateBuffersRight(LineCount count) noexcept { lines_.rotate_right(unbox<size_t>(count)); }
// }}}
// private fields
//
PageSize pageSize_;
bool reflowOnResize_ = false;
LineCount maxHistoryLineCount_;
// Number of lines is at least the sum of maxHistoryLineCount_ + pageSize_.lines,
// because shrinking the page height does not necessarily
// have to resize the array (as optimization).
Lines<Cell> lines_;
// Number of lines used in the Lines buffer.
LineCount linesUsed_;
};
template <typename Cell>
std::ostream& dumpGrid(std::ostream& os, Grid<Cell> const& grid);
template <typename Cell>
std::string dumpGrid(Grid<Cell> const& grid);
// {{{ impl
template <typename Cell>
constexpr LineFlags Grid<Cell>::defaultLineFlags() const noexcept
{
return reflowOnResize_ ? LineFlags::Wrappable : LineFlags::None;
}
template <typename Cell>
constexpr LineCount Grid<Cell>::linesUsed() const noexcept
{
return linesUsed_;
}
template <typename Cell>
bool Grid<Cell>::isLineWrapped(LineOffset _line) const noexcept
{
return _line >= -boxed_cast<LineOffset>(historyLineCount())
&& boxed_cast<LineCount>(_line) < pageSize_.lines && lineAt(_line).wrapped();
}
template <typename Cell>
template <typename RendererT>
void Grid<Cell>::render(RendererT&& _render, ScrollOffset _scrollOffset) const
{
assert(!_scrollOffset || unbox<LineCount>(_scrollOffset) <= historyLineCount());
auto y = LineOffset(0);
for (int i = -*_scrollOffset, e = i + *pageSize_.lines; i != e; ++i, ++y)
{
auto x = ColumnOffset(0);
Line<Cell> const& line = lines_[i];
if (line.isTrivialBuffer())
_render.renderTrivialLine(line.trivialBuffer(), y);
else
{
_render.startLine(y);
for (Cell const& cell: line.cells())
_render.renderCell(cell, y, x++);
_render.endLine();
}
}
_render.finish();
}
// }}}
} // namespace terminal
// {{{ fmt formatter
namespace fmt
{
template <>
struct formatter<terminal::Margin::Horizontal>
{
template <typename ParseContext>
constexpr auto parse(ParseContext& ctx)
{
return ctx.begin();
}
template <typename FormatContext>
auto format(const terminal::Margin::Horizontal range, FormatContext& ctx)
{
return fmt::format_to(ctx.out(), "{}..{}", range.from, range.to);
}
};
template <>
struct formatter<terminal::Margin::Vertical>
{
template <typename ParseContext>
constexpr auto parse(ParseContext& ctx)
{
return ctx.begin();
}
template <typename FormatContext>
auto format(const terminal::Margin::Vertical range, FormatContext& ctx)
{
return fmt::format_to(ctx.out(), "{}..{}", range.from, range.to);
}
};
} // namespace fmt
// }}}
|
# -*- coding: utf-8 -*-
# @Time : 2020/6/15 19:42
# @Author : Fangpf
# @FileName: client.py
import time
import cv2
import requests
from PIL import Image, ImageDraw, ImageFont
import numpy as np
REQUEST_URL = "https://smallflyfly.ngrok2.xiaomiqiu.cn/upload"
def predict(byte_file):
im = byte_file
param = {'file': im}
res = requests.post(REQUEST_URL, files=param)
res = res.json()
predictions = []
landmarks = []
if res['success']:
predictions = res['prediction']
landmarks = res['landmarks']
return predictions, landmarks
if __name__ == '__main__':
video_capture = cv2.VideoCapture(0)
while True:
_, frame = video_capture.read()
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
_, jpeg = cv2.imencode('.jpg', small_frame)
rgb_small_frame = small_frame[:, :, ::-1]
tic = time.time()
dets, landmarks = predict(jpeg.tobytes())
print('net forward time: {:.4f}'.format(time.time() - tic))
for (det, landmark) in zip(dets, landmarks):
xmin, ymin, xmax, ymax, conf, ismasked = det
xmin = int(xmin * 4)
ymin = int(ymin * 4)
xmax = int(xmax * 4)
ymax = int(ymax * 4)
cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), (255, 255, 0), 1)
for (x, y) in landmark:
cv2.circle(frame, (int(x)*4, int(y)*4), 1, (255, 0, 255))
image = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
draw = ImageDraw.Draw(image)
fontStyle = ImageFont.truetype(
"font/FZY1JW.TTF", 20, encoding="utf-8"
)
draw.text((xmin, ymin-20), "佩戴口罩" if ismasked == 1 else "未佩戴口罩", (255, 0, 0), font=fontStyle)
frame = cv2.cvtColor(np.asarray(image), cv2.COLOR_BGR2RGB)
cv2.imshow('im', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()
|
Welcome to The Briars Nursery and Briars Cabin Nursery.
In both nurseries we aim to provide a welcoming, warm, friendly and safe learning environment in order to provide our children with the best start to their education.
Led by Teresa Stockham, we have a well established team who work with the Early Years Foundation Stage in order to encourage the children to learn through their play, the social, commutative and physical skills they need to develop into well-rounded confident individuals who will continue on to school, motivated and ready to learn.
Our caring and knowledgeable staff listen to and respect our children and encourage them to explore both the inside and outside environment, making use of all the resources available to them to ignite their curiosity and broaden their knowledge.
We understand that parents are a child’s first educator and know their child the best; we therefore work in partnership with parents and embrace the whole family.
For further information, please feel free to contact us and view the Prospectus. |
[STATEMENT]
lemma ET_HADelta:
" \<lbrakk> TS \<subseteq> ET ST; t \<in> TS \<rbrakk> \<Longrightarrow> t \<in> HADelta (HA ST)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>TS \<subseteq> ET ST; t \<in> TS\<rbrakk> \<Longrightarrow> t \<in> HADelta (HA ST)
[PROOF STEP]
apply (unfold HADelta_def)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>TS \<subseteq> ET ST; t \<in> TS\<rbrakk> \<Longrightarrow> t \<in> \<Union> (Delta ` SAs (HA ST))
[PROOF STEP]
apply auto
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>TS \<subseteq> ET ST; t \<in> TS\<rbrakk> \<Longrightarrow> \<exists>x\<in>SAs (HA ST). t \<in> Delta x
[PROOF STEP]
apply (unfold ET_def EnabledTrans_def Image_def)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>TS \<subseteq> (\<Union>SA\<in>SAs (HA ST). {y. \<exists>x\<in>{SA}. (x, y) \<in> {y. \<exists>x\<in>{ST}. (x, y) \<in> {(ST, SA, T). SA \<in> SAs (HA ST) \<and> T \<in> Delta SA \<and> source T \<in> Conf ST \<and> (Conf ST, Events ST, Value ST) |= label T}}}); t \<in> TS\<rbrakk> \<Longrightarrow> \<exists>x\<in>SAs (HA ST). t \<in> Delta x
[PROOF STEP]
apply auto
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done |
from scipy import io as sio
import numpy as np
import os
import dicom
import nibabel as nib
import datetime
from cafndl_fileio import *
from cafndl_utils import *
from cafndl_network import *
from keras.callbacks import ModelCheckpoint
from keras.optimizers import Adam
'''
convert dicom to nifti
$ mkdir DRF100_nifti
$ dicom2nifti DRF100 DRF100_nifti
'''
'''
dataset
'''
filename_checkpoint = '../ckpt/model_demo_mc.ckpt'
filename_init = ''
list_dataset_train = [
{
'inputs':['/data/enhaog/data_lowdose/GBM_Ex1496/DRF020_nifti/802_.nii.gz',
'/data/enhaog/data_lowdose/GBM_Ex1496/DRF100_nifti/803_.nii.gz'],
'gt':'/data/enhaog/data_lowdose/GBM_Ex1496/DRF001_nifti/800_.nii.gz'
}
]
num_dataset_train = len(list_dataset_train)
print('process {0} data description'.format(num_dataset_train))
'''
augmentation
'''
list_augments = []
num_augment_flipxy = 2
num_augment_flipx = 2
num_augment_flipy = 2
num_augment_shiftx = 1
num_augment_shifty = 1
for flipxy in range(num_augment_flipxy):
for flipx in range(num_augment_flipx):
for flipy in range(num_augment_flipy):
for shiftx in range(num_augment_shiftx):
for shifty in range(num_augment_shifty):
augment={'flipxy':flipxy,'flipx':flipx,'flipy':flipy,'shiftx':shiftx,'shifty':shifty}
list_augments.append(augment)
num_augment=len(list_augments)
print('will augment data with {0} augmentations'.format(num_augment))
'''
generate train data
'''
list_train_input = []
list_train_gt = []
for index_data in range(num_dataset_train):
# directory
list_data_train_input = []
for path_train_input in list_dataset_train[index_data]['inputs']:
# load data
data_train_input = prepare_data_from_nifti(path_train_input, list_augments)
list_data_train_input.append(data_train_input)
data_train_input = np.concatenate(list_data_train_input, axis=-1)
# get ground truth
path_train_gt = list_dataset_train[index_data]['gt']
data_train_gt = prepare_data_from_nifti(path_train_gt, list_augments)
# append
list_train_input.append(data_train_input)
list_train_gt.append(data_train_gt)
# generate and scale dataset
scale_data = 100.
data_train_input = scale_data * np.concatenate(list_train_input, axis = 0)
data_train_gt = scale_data * np.concatenate(list_train_gt, axis = 0)
data_train_residual = data_train_gt - data_train_input[:,:,:,0:1]
print('mean, min, max')
print(np.mean(data_train_input.flatten()),np.min(data_train_input.flatten()),np.max(data_train_input.flatten()))
print(np.mean(data_train_gt.flatten()),np.min(data_train_gt.flatten()),np.max(data_train_gt.flatten()))
print(np.mean(data_train_residual.flatten()),np.min(data_train_residual.flatten()),np.max(data_train_residual.flatten()))
print('generate train dataset with augmentation size {0},{1}'.format(
data_train_input.shape, data_train_gt.shape))
'''
setup parameters
'''
# related to model
num_poolings = 3
num_conv_per_pooling = 3
# related to training
lr_init = 0.001
num_epoch = 100
ratio_validation = 0.1
batch_size = 4
# default settings
num_channel_input = data_train_input.shape[-1]
num_channel_output = data_train_gt.shape[-1]
img_rows = data_train_input.shape[1]
img_cols = data_train_gt.shape[1]
keras_memory = 0.4
keras_backend = 'tf'
with_batch_norm = True
print('setup parameters')
'''
init model
'''
callback_checkpoint = ModelCheckpoint(filename_checkpoint,
monitor='val_loss',
save_best_only=True)
setKerasMemory(keras_memory)
model = deepEncoderDecoder(num_channel_input = num_channel_input,
num_channel_output = num_channel_output,
img_rows = img_rows,
img_cols = img_cols,
lr_init = lr_init,
num_poolings = num_poolings,
num_conv_per_pooling = num_conv_per_pooling,
with_bn = with_batch_norm, verbose=1)
print('train model:', filename_checkpoint)
print('parameter count:', model.count_params())
'''
train network
'''
try:
model.load_weights(filename_init)
print('model trains from loading ' + filename_init)
except:
print('model trains from scratch')
model.optimizer = Adam(lr = lr_init)
t_start_train = datetime.datetime.now()
history = model.fit(data_train_input, data_train_residual,
batch_size = batch_size,
epochs = num_epoch,
verbose = 1,
shuffle = True,
callbacks = [callback_checkpoint],
validation_split = ratio_validation)
t_end_train = datetime.datetime.now()
print('finish training on data size {0} for {1} epochs using time {2}'.format(
data_train_input.shape, num_epoch, t_end_train - t_start_train))
'''
save training results
'''
# save train loss/val loss
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
path_figure = filename_checkpoint+'.png'
plt.savefig(path_figure)
# save history dictionary
import json
path_history = filename_checkpoint+'.json'
with open(path_history, 'w') as outfile:
json.dump(history.history, outfile)
|
{-# LANGUAGE Safe #-}
{-# OPTIONS_GHC -interactive-print=Text.Pretty.Simple.pPrint #-}
{-# LANGUAGE ConstrainedClassMethods #-}
{-# LANGUAGE DeriveDataTypeable #-}
{-# LANGUAGE DeriveFoldable #-}
{-# LANGUAGE DeriveFunctor #-}
{-# LANGUAGE DeriveGeneric #-}
{-# LANGUAGE DeriveTraversable #-}
{-# LANGUAGE EmptyDataDecls #-}
{-# LANGUAGE ExistentialQuantification #-}
{-# LANGUAGE FlexibleContexts #-}
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE FunctionalDependencies #-}
{-# LANGUAGE ImplicitParams #-}
{-# LANGUAGE KindSignatures #-}
{-# LANGUAGE LambdaCase #-}
{-# LANGUAGE LiberalTypeSynonyms #-}
{-# LANGUAGE MagicHash #-}
{-# LANGUAGE MultiParamTypeClasses #-}
{-# LANGUAGE PackageImports #-}
{-# LANGUAGE ParallelListComp #-}
{-# LANGUAGE PatternGuards #-}
{-# LANGUAGE PolymorphicComponents #-}
{-# LANGUAGE PostfixOperators #-}
{-# LANGUAGE RankNTypes #-}
{-# LANGUAGE ScopedTypeVariables #-}
{-# LANGUAGE StandaloneDeriving #-}
{-# LANGUAGE TypeOperators #-}
{-# LANGUAGE TypeSynonymInstances #-}
{-# LANGUAGE UnicodeSyntax #-}
module L where
import Control.Applicative
import Control.Arrow
import Control.Monad
import Control.Monad.Cont
import Control.Monad.Identity
import Control.Monad.Reader
import Control.Monad.ST
import Control.Monad.State
import Control.Monad.Writer
import Data.Bits
import Data.Bool
import Data.Char
import Data.Complex
import Data.Dynamic
import Data.Either
import Data.Eq
import Data.Function
import Data.Functor
import Data.Int
import Data.List
import Data.Maybe
import Data.Monoid
import Data.Ord
import Data.Ratio
import Data.STRef
import Data.Tree
import Data.Tuple
import Data.Typeable
import Data.Word
import Numeric
import ShowFun
import System.Random
import Lambdabot.Plugin.Haskell.Eval.Trusted
import Text.PrettyPrint.HughesPJ hiding (empty)
import Text.Printf
import qualified Data.ByteString as BS
import qualified Data.ByteString.Char8 as BSC
import qualified Data.ByteString.Lazy as BSL
import qualified Data.ByteString.Lazy.Char8 as BSLC
import qualified Data.Foldable
import qualified Data.IntMap as IM
import qualified Data.IntSet as IS
import qualified Data.Map as M
import qualified Data.Sequence
import qualified Data.Set as S
import qualified Data.Traversable
{-# LINE 1 "<local>" #-}
|
Require Export D.
(** **** Problem #19 : 2 stars (beq_nat_true) *)
Theorem beq_nat_true : forall n m,
beq_nat n m = true -> n = m.
Proof.
intros n. induction n. intros. destruct m. reflexivity.
inversion H.
intros.
destruct m. inversion H.
Lemma nm_SnSm_eq : forall (n m:nat),
n=m -> S n=S m.
Proof. intros. rewrite ->H. reflexivity. Qed.
apply nm_SnSm_eq. apply IHn. inversion H. reflexivity.
Qed.
|
[STATEMENT]
lemma pointwise_less_Nil2 [simp]: "\<not> x \<lhd> Nil"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<not> x \<lhd> []
[PROOF STEP]
by (simp add: pointwise_less_def) |
% Overview
% KNI API Customer Documentation
% Neuronics AG
% Stephan Dünner 26. September 2008
\chapter{Overview}
The Katana Native Interface KNI is an open source software library for controlling the Katana robot. KNI is written in C++ and structured so that it can easily be ported to other languages and frameworks. The code is non-platform-specific and can be compiled under both Windows (with the MS Visual C++ Compiler) and Linux (with the GNU Compiler Toolchain).\newline
Since the KNI abstracts the underlying layers, applications can be written for the Katana without having to become involved in the details of the system. It takes just a few function calls to connect and initialise the robot. The protocol for controlling the robot from the PC is abstracted in its entirety. The KNI features an implementation of robot kinematics and path calculation routines for the synchronous control of all axes and the traversing of paths in space with the end effector.\newline
The openness of the common sources also makes the KNI the ideal tool for research and training, since the entire implementation can be traced, as well as modified and adapted at will.
|
= = Scheduling and preparations = =
|
From Undecidability.Synthetic
Require Import Definitions Undecidability.
From Undecidability.FOL
Require Import Sets.FST
Semantics.Tarski.FullFacts
Semantics.Tarski.FullSoundness
Deduction.FullNDFacts
Sets.Models.FST_model.
From Undecidability.FOL.Undecidability.Reductions
Require Import PCPb_to_FST PCPb_to_FSTD ZF_to_FST.
From Undecidability.PCP
Require Import PCP PCP_undec Reductions.PCPb_iff_dPCPb.
Theorem undecidable_entailment_FSTeq :
undecidable entailment_FSTeq.
Proof.
apply (undecidability_from_reducibility PCPb_undec).
exists solvable'. intros B. split.
- intros HP % (PCP_FSTD (p:=intu)).
apply soundness in HP. intros I M rho H. apply HP, H.
- apply PCP_FST.
Qed.
Theorem undecidable_deduction_FST :
undecidable deduction_FST.
Proof.
apply (undecidability_from_reducibility PCPb_undec).
exists solvable'. intros B. split.
- intros HP % (PCP_FSTD (p:=intu)). apply HP.
- intros HP % soundness. apply PCP_FST.
intros I M rho H. apply HP. apply H.
Qed.
Theorem undecidable_entailment_FST :
undecidable entailment_FST.
Proof.
apply (undecidability_from_reducibility PCPb_undec).
exists solvable. intros B. apply PCPb_to_FST.PCP_FST.
apply FST_model.
Qed.
Theorem undecidable_entailment_FSTI :
undecidable entailment_FSTI.
Proof.
apply (undecidability_from_reducibility PCPb_undec).
exists solvable. intros B. rewrite PCPb_iff_dPCPb. split; intros H.
- destruct H as [s H]. intros M HM rho H1 H2. eapply PCP_FST1; eauto.
intros sigma psi Hp. apply H2. now constructor.
- destruct FSTI_model as (D & I & H1 & H2 & H3).
specialize (H D I (fun _ => @i_func _ _ _ _ eset Vector.nil) H1 H3).
apply PCP_FST2 in H as [s Hs]; trivial. now exists s.
intros sigma psi Hp. apply H3. now constructor.
Qed.
Theorem undecidable_deduction_FSTI :
undecidable deduction_FSTI.
Proof.
apply (undecidability_from_reducibility PCPb_undec).
exists solvable. intros B. split; intros H.
- exists FSTeq. split; try apply FSTeq_base. now apply PCPb_to_FSTD.PCP_FSTD.
- destruct FSTI_model as (D & I & H1 & H2 & H3).
rewrite PCPb_iff_dPCPb. unshelve eapply PCP_FST2; eauto.
+ exact (fun _ => @i_func _ _ _ _ eset Vector.nil).
+ intros sigma psi Hp. apply H3. now constructor.
+ apply (tsoundness H). intros psi [theta [<-|[<-|[<-|[<-|Hp]]]]|theta].
1-4: cbn; setoid_rewrite H1; congruence.
* apply H3. now constructor.
* apply H3. constructor 2.
Qed.
|
[STATEMENT]
lemma append_prs2 [quot_preserve]:
assumes q: "Quotient3 R1 Abs1 Rep1"
and r: "Quotient3 R2 Abs2 Rep2"
shows "((map Rep1 \<circ> Rep2) ---> (map Rep1 \<circ> Rep2) ---> (Abs2 \<circ> map Abs1)) (@) =
(Rep2 ---> Rep2 ---> Abs2) (@)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ((map Rep1 \<circ> Rep2) ---> (map Rep1 \<circ> Rep2) ---> Abs2 \<circ> map Abs1) (@) = (Rep2 ---> Rep2 ---> Abs2) (@)
[PROOF STEP]
by (simp add: fun_eq_iff abs_o_rep[OF q] List.map.id) |
Kerry King – lead and rhythm guitar , backing vocals
|
! ${_warning_in_the_generated_file_not_to_edit}
! Template (mako) for generating Fortran 90 code to wrapped.
! MINPACK calling routine, wrapped by neqsys_wrapper.pyx
! NOTE: If the F77 version seems complex, look at:
! http://people.sc.fsu.edu/~jburkardt/f_src/minpack/minpack.f90
module neqsys
use iso_c_binding, only: c_double, c_int
implicit none
! Set problem specific values:
integer, parameter :: NX = ${NX} ! Number of values (>= NE)
integer, parameter :: NE = ${NE} ! Number of equations
integer, parameter :: NP = ${NPARAMS} ! Number of parameters
integer(c_int) :: NFEV, NJEV, NIT
public lm_solve, func, get_nfev, get_njev, get_ne, get_nx
contains
subroutine lm_solve(x, tol, info) bind(c)
real(c_double), intent(inout) :: x(NX+NP)
real(c_double), intent(in) :: tol ! relative error in sum of squares or in residuals
integer(c_int), intent(inout) :: info ! 0 improper input, 1, 2, 3 (success), others error
integer, parameter :: lwa = (5*NX+NE)*2 ! TODO: the last factor two seems to be need not to segault?
integer, parameter :: ldfjac = NE ! leading dimension of jacobian
integer, parameter :: m = NE
integer, parameter :: n = NX
integer :: ipvt(NX), wa(lwa)
real(c_double) :: fvec(NE), fjac(NE, NX)
wa = 0
NFEV = 0
NJEV = 0
write(6,*) 'about to call lmder1!'
flush(6)
call lmder1(func, NE, NX, x, fvec, fjac, ldfjac, tol, info, ipvt, wa, lwa)
write(6,*) 'back from lmder1!'
flush(6)
end subroutine lm_solve
subroutine func(m, n, x, fvec, fjac, ldfjac, iflag) bind(c)
! Function
integer(c_int), intent(in) :: m, n, ldfjac, iflag
real(c_double), intent(in) :: x(NX+NP)
real(c_double), intent(out) :: fvec(m)
real(c_double), intent(out) :: fjac(m,n)
% for cse_token, cse_expr in max(func_cse_defs, jac_cse_defs, key=len):
real(c_double) :: ${cse_token}
% endfor
if (iflag == 1) then
! Function evaluation
NFEV = NFEV + 1
% for cse_token, cse_expr in func_cse_defs:
${cse_token} = ${cse_expr}
% endfor
% for i, expr in enumerate(func_new_code, 1):
fvec(${i}) = ${expr}
% endfor
elseif (iflag == 2) then
! Jacobian evaluation
NJEV = NJEV + 1
% for cse_token, cse_expr in jac_cse_defs:
${cse_token} = ${cse_expr}
% endfor
% for i, expr in enumerate(jac_new_code):
fjac(${(i // NX) + 1}, ${(i % NX) + 1}) = ${expr}
% endfor
end if
end subroutine func
subroutine get_nfev(nfev_) bind(c)
integer(c_int), intent(inout) :: nfev_
nfev_ = NFEV
end subroutine get_nfev
subroutine get_njev(njev_) bind(c)
integer(c_int), intent(inout) :: njev_
njev_ = NJEV
end subroutine get_njev
subroutine get_ne(ne_) bind(c)
! Returns the number of equations in the problem
integer(c_int), intent(inout) :: ne_
ne_ = NE
end subroutine get_ne
subroutine get_nx(nx_) bind(c)
! Returns the number of independent variables in problem
integer(c_int), intent(inout) :: nx_
nx_ = NE
end subroutine get_nx
end module neqsys
|
Require Import oeuf.Common.
Require oeuf.StepLib.
Require Import Psatz.
Require Import oeuf.Utopia.
Require Import oeuf.Monads.
Require Export oeuf.HigherValue.
Require Import oeuf.AllValues.
Require Import oeuf.OpaqueOps.
Require Import oeuf.ListLemmas.
Inductive insn :=
| Arg (dst : nat)
| Self (dst : nat)
| Deref (dst : nat) (off : nat)
| Call (dst : nat)
| MkConstr (dst : nat) (tag : nat) (nargs : nat)
| Switch (dst : nat) (cases : list (list insn))
| MkClose (dst : nat) (f : function_name) (nfree : nat)
| OpaqueOp (dst : nat) (op : opaque_oper_name) (nargs : nat)
| Copy (dst : nat)
.
Definition env := list (list insn * nat).
(* Continuation-based step relation *)
Record frame := Frame {
arg : value;
self : value;
stack : list nat;
locals : list (nat * value)
}.
Definition push f l v :=
Frame (arg f) (self f) (l :: stack f) ((l, v) :: locals f).
Definition pop f n :=
Frame (arg f) (self f) (skipn n (stack f)) (locals f).
Definition pop_push f n l v :=
push (pop f n) l v.
Definition local f l := lookup (locals f) l.
Definition stack_local f idx :=
match nth_error (stack f) idx with
| Some l => local f l
| None => None
end.
Inductive cont :=
| Kret (code : list insn) (ret : nat) (dst : nat) (f : frame) (k : cont)
| Kstop (ret : nat).
Inductive state :=
| Run (i : list insn) (f : frame) (k : cont)
| Stop (v : value).
Inductive sstep (E : env) : state -> state -> Prop :=
| SArg : forall dst is f k,
sstep E (Run (Arg dst :: is) f k)
(Run is (push f dst (arg f)) k)
| SSelf : forall dst is f k,
sstep E (Run (Self dst :: is) f k)
(Run is (push f dst (self f)) k)
| SDerefinateConstr : forall dst off is f k tag args v,
stack_local f 0 = Some (Constr tag args) ->
nth_error args off = Some v ->
sstep E (Run (Deref dst off :: is) f k)
(Run is (pop_push f 1 dst v) k)
| SDerefinateClose : forall dst off is f k fname free v,
stack_local f 0 = Some (Close fname free) ->
nth_error free off = Some v ->
sstep E (Run (Deref dst off :: is) f k)
(Run is (pop_push f 1 dst v) k)
| SConstrDone : forall dst tag nargs is f k ls vs,
length (stack f) >= nargs ->
ls = rev (firstn nargs (stack f)) ->
Forall2 (fun l v => local f l = Some v) ls vs ->
sstep E (Run (MkConstr dst tag nargs :: is) f k)
(Run is (pop_push f nargs dst (Constr tag vs)) k)
| SCloseDone : forall dst fname nfree is f k ls vs,
length (stack f) >= nfree ->
ls = rev (firstn nfree (stack f)) ->
Forall2 (fun l v => local f l = Some v) ls vs ->
sstep E (Run (MkClose dst fname nfree :: is) f k)
(Run is (pop_push f nfree dst (Close fname vs)) k)
| SOpaqueOpDone : forall dst op nargs is f k ls vs v,
length (stack f) >= nargs ->
ls = rev (firstn nargs (stack f)) ->
Forall2 (fun l v => local f l = Some v) ls vs ->
opaque_oper_denote_higher op vs = Some v ->
sstep E (Run (OpaqueOp dst op nargs :: is) f k)
(Run is (pop_push f nargs dst v) k)
| SMakeCall : forall dst is f k fname free arg body ret,
stack_local f 0 = Some arg ->
stack_local f 1 = Some (Close fname free) ->
nth_error E fname = Some (body, ret) ->
sstep E (Run (Call dst :: is) f k)
(Run body (Frame arg (Close fname free) [] [])
(Kret is ret dst (pop f 2) k))
(* NB: `Switch` still has an implicit target of `Arg` *)
| SSwitchinate : forall dst cases is f k tag args case stk_vals,
arg f = Constr tag args ->
nth_error cases tag = Some case ->
Forall2 (fun l v => local f l = Some v) (stack f) stk_vals ->
sstep E (Run (Switch dst cases :: is) f k)
(Run (case ++ is) f k)
| SCopy : forall dst is f k v,
stack_local f 0 = Some v ->
sstep E (Run (Copy dst :: is) f k)
(Run is (pop_push f 1 dst v) k)
| SContRet : forall code f ret dst f' k v,
length (stack f) = 1 ->
local f ret = Some v ->
sstep E (Run [] f (Kret code ret dst f' k))
(Run code (push f' dst v) k)
| SContStop : forall ret f v,
length (stack f) = 1 ->
local f ret = Some v ->
sstep E (Run [] f (Kstop ret))
(Stop v)
.
Definition sstar BE := StepLib.sstar (sstep BE).
Definition SStarNil := @StepLib.SStarNil state.
Definition SStarCons := @StepLib.SStarCons state.
Definition splus BE := StepLib.splus (sstep BE).
Definition SPlusOne := @StepLib.SPlusOne state.
Definition SPlusCons := @StepLib.SPlusCons state.
Require Import oeuf.Metadata.
Require oeuf.Semantics.
Definition prog_type : Type := env * list metadata.
Definition val_level := VlHigher.
Definition valtype := value_type val_level.
Inductive is_callstate (prog : prog_type) : valtype -> valtype -> state -> Prop :=
| IsCallstate : forall fname free av body ret,
nth_error (fst prog) fname = Some (body, ret) ->
let fv := Close fname free in
HigherValue.public_value (snd prog) fv ->
HigherValue.public_value (snd prog) av ->
is_callstate prog fv av
(Run body
(Frame av fv [] [])
(Kstop ret)).
Inductive final_state (prog : prog_type) : state -> valtype -> Prop :=
| FinalState : forall v,
HigherValue.public_value (snd prog) v ->
final_state prog (Stop v) v.
Definition initial_env (prog : prog_type) : env := fst prog.
Definition semantics (prog : prog_type) : Semantics.semantics :=
@Semantics.Semantics_gen state env val_level
(is_callstate prog)
(sstep)
(final_state prog)
(initial_env prog).
(*
* Mutual recursion/induction schemes for expr
*)
Definition insn_rect_mut
(P : insn -> Type)
(Pl : list insn -> Type)
(Pll : list (list insn) -> Type)
(HArg : forall dst, P (Arg dst))
(HSelf : forall dst, P (Self dst))
(HDeref : forall dst off, P (Deref dst off))
(HCall : forall dst, P (Call dst))
(HConstr : forall dst tag nargs, P (MkConstr dst tag nargs))
(HSwitch : forall dst cases, Pll cases -> P (Switch dst cases))
(HClose : forall dst fname nfree, P (MkClose dst fname nfree))
(HOpaqueOp : forall dst op nargs, P (OpaqueOp dst op nargs))
(HCopy : forall dst, P (Copy dst))
(Hnil : Pl [])
(Hcons : forall i is, P i -> Pl is -> Pl (i :: is))
(Hnil2 : Pll [])
(Hcons2 : forall is iss, Pl is -> Pll iss -> Pll (is :: iss))
(i : insn) : P i :=
let fix go i :=
let fix go_list is :=
match is as is_ return Pl is_ with
| [] => Hnil
| i :: is => Hcons i is (go i) (go_list is)
end in
let fix go_list_list iss :=
match iss as iss_ return Pll iss_ with
| [] => Hnil2
| is :: iss => Hcons2 is iss (go_list is) (go_list_list iss)
end in
match i as i_ return P i_ with
| Arg dst => HArg dst
| Self dst => HSelf dst
| Deref dst off => HDeref dst off
| Call dst => HCall dst
| MkConstr dst tag nargs => HConstr dst tag nargs
| Switch dst cases => HSwitch dst cases (go_list_list cases)
| MkClose dst fname nfree => HClose dst fname nfree
| OpaqueOp dst op nargs => HOpaqueOp dst op nargs
| Copy dst => HCopy dst
end in go i.
(* Useful wrapper for `expr_rect_mut with (Pl := Forall P)` *)
Definition insn_ind' (P : insn -> Prop)
(HArg : forall dst, P (Arg dst))
(HSelf : forall dst, P (Self dst))
(HDeref : forall dst off, P (Deref dst off))
(HCall : forall dst, P (Call dst))
(HConstr : forall dst tag nargs, P (MkConstr dst tag nargs))
(HSwitch : forall dst cases, Forall (Forall P) cases -> P (Switch dst cases))
(HClose : forall dst fname nfree, P (MkClose dst fname nfree))
(HOpaqueOp : forall dst op nargs, P (OpaqueOp dst op nargs))
(HCopy : forall dst, P (Copy dst))
(i : insn) : P i :=
ltac:(refine (@insn_rect_mut P (Forall P) (Forall (Forall P))
HArg HSelf HDeref HCall HConstr HSwitch HClose HOpaqueOp HCopy _ _ _ _ i); eauto).
Definition insn_list_rect_mut
(P : insn -> Type)
(Pl : list insn -> Type)
(Pll : list (list insn) -> Type)
(HArg : forall dst, P (Arg dst))
(HSelf : forall dst, P (Self dst))
(HDeref : forall dst off, P (Deref dst off))
(HCall : forall dst, P (Call dst))
(HConstr : forall dst tag nargs, P (MkConstr dst tag nargs))
(HSwitch : forall dst cases, Pll cases -> P (Switch dst cases))
(HClose : forall dst fname nfree, P (MkClose dst fname nfree))
(HOpaqueOp : forall dst op nargs, P (OpaqueOp dst op nargs))
(HCopy : forall dst, P (Copy dst))
(Hnil : Pl [])
(Hcons : forall i is, P i -> Pl is -> Pl (i :: is))
(Hnil2 : Pll [])
(Hcons2 : forall is iss, Pl is -> Pll iss -> Pll (is :: iss))
(is : list insn) : Pl is :=
let go := insn_rect_mut P Pl Pll
HArg HSelf HDeref HCall HConstr HSwitch HClose HOpaqueOp
HCopy Hnil Hcons Hnil2 Hcons2 in
let fix go_list is :=
match is as is_ return Pl is_ with
| [] => Hnil
| i :: is => Hcons i is (go i) (go_list is)
end in go_list is.
Definition dest e :=
match e with
| Arg dst => dst
| Self dst => dst
| Deref dst _ => dst
| Call dst => dst
| MkConstr dst _ _ => dst
| Switch dst _ => dst
| MkClose dst _ _ => dst
| OpaqueOp dst _ _ => dst
| Copy dst => dst
end.
Definition pop_count e :=
match e with
| Arg _ => 0
| Self _ => 0
| Deref _ _ => 1
| Call _ => 2
| MkConstr _ _ nargs => nargs
| Switch _ _ => 0
| MkClose _ _ nfree => nfree
| OpaqueOp _ _ nargs => nargs
| Copy _ => 1
end.
|
import tactic.linarith
example (e b c a v0 v1 : ℚ) (h1 : v0 = 5*a) (h2 : v1 = 3*b) (h3 : v0 + v1 + c = 10) :
v0 + 5 + (v1 - 3) + (c - 2) = 10 :=
by linarith
example (ε : ℚ) (h1 : ε > 0) : ε / 2 + ε / 3 + ε / 7 < ε :=
by linarith
example (x y z : ℚ) (h1 : 2*x < 3*y) (h2 : -4*x + z/2 < 0)
(h3 : 12*y - z < 0) : false :=
by linarith
example (ε : ℚ) (h1 : ε > 0) : ε / 2 < ε :=
by linarith
example (ε : ℚ) (h1 : ε > 0) : ε / 3 + ε / 3 + ε / 3 = ε :=
by linarith
example (a b c : ℚ) (h2 : b + 2 > 3 + b) : false :=
by linarith {discharger := `[ring SOP]}
example (a b c : ℚ) (h2 : b + 2 > 3 + b) : false :=
by linarith
example (a b c : ℚ) (x y : ℤ) (h1 : x ≤ 3*y) (h2 : b + 2 > 3 + b) : false :=
by linarith {restrict_type := ℚ}
example (g v V c h : ℚ) (h1 : h = 0) (h2 : v = V) (h3 : V > 0) (h4 : g > 0)
(h5 : 0 ≤ c) (h6 : c < 1) :
v ≤ V :=
by linarith
example (x y z : ℚ) (h1 : 2*x + ((-3)*y) < 0) (h2 : (-4)*x + 2*z < 0)
(h3 : 12*y + (-4)* z < 0) (h4 : nat.prime 7) : false :=
by linarith
example (x y z : ℚ) (h1 : 2*1*x + (3)*(y*(-1)) < 0) (h2 : (-2)*x*2 < -(z + z))
(h3 : 12*y + (-4)* z < 0) (h4 : nat.prime 7) : false :=
by linarith
example (x y z : ℤ) (h1 : 2*x < 3*y) (h2 : -4*x + 2*z < 0)
(h3 : 12*y - 4* z < 0) : false :=
by linarith
example (x y z : ℤ) (h1 : 2*x < 3*y) (h2 : -4*x + 2*z < 0) (h3 : x*y < 5)
(h3 : 12*y - 4* z < 0) : false :=
by linarith
example (w x y z : ℤ) (h1 : 4*x + (-3)*y + 6*w ≤ 0) (h2 : (-1)*x < 0)
(h3 : y < 0) (h4 : w ≥ 0) (h5 : nat.prime x.nat_abs) : false :=
by linarith
example (a b c : ℚ) (h1 : a > 0) (h2 : b > 5) (h3 : c < -10)
(h4 : a + b - c < 3) : false :=
by linarith
example (a b c : ℚ) (h2 : b > 0) (h3 : b < 0) : false :=
by linarith
example (a b c : ℚ) (h2 : (2 : ℚ) > 3) : a + b - c ≥ 3 :=
by linarith {exfalso := ff}
example (x : ℚ) (hx : x > 0) (h : x.num < 0) : false :=
by linarith using [rat.num_pos_iff_pos.mpr hx]
example (x y z : ℚ) (hx : x ≤ 3*y) (h2 : y ≤ 2*z) (h3 : x ≥ 6*z) : x = 3*y :=
by linarith
example (x y z : ℕ) (hx : x ≤ 3*y) (h2 : y ≤ 2*z) (h3 : x ≥ 6*z) : x = 3*y :=
by linarith
example (h1 : (1 : ℕ) < 1) : false :=
by linarith
example (a b c : ℚ) (h2 : b > 0) (h3 : b < 0) : nat.prime 10 :=
by linarith
example (a b c : ℕ) : a + b ≥ a :=
by linarith |
{-# OPTIONS --no-unicode #-}
open import Agda.Builtin.Nat
pred : Nat -> Nat
pred = \ { n -> {!!} }
|
## Chapter 4
```python
import sys
from importlib import reload
sys.path.append("..")
```
```python
## INCLUDES
from sympy import *
init_printing(use_latex='mathjax')
s, w = symbols('s w', real=True)
import numpy as np
import matplotlib.pyplot as plt
import control
import Ch3.utilities as ch3_utils
reload(ch3_utils)
```
<module 'Ch3.utilities' from '../Ch3/utilities.py'>
```python
import Ch4.utilities as ch4_utils
reload(ch4_utils)
```
<module 'Ch4.utilities' from '../Ch4/utilities.py'>
### Problem 4.1
#### a)
The difference feedback rule can be used to compose the $\mathbf{\dot x} = A \mathbf{x} + B \mathbf{u}$ and $G$ blocks using a relationship from pg. 117, namely:
\begin{eqnarray}
H(s) = H_1(s)[I + H_2(s)H_1(s)]^{-1}.
\end{eqnarray}
$H_1(s) \equiv (sI-A)^{-1} B \triangleq \Phi(s) B$ comes from the state-space model and $H_2(s) \equiv G$. It follows that the single-loop feedback transfer function $H(s)$ is
\begin{eqnarray}
H'(s) = \Phi(s) B[I + G \Phi(s) B]^{-1}.
\end{eqnarray}
Finally, use the tandem rule $H(s) = H_2(s) H_1(s)$, with $H_2(s) \equiv C$ and $H_1(s) \equiv H'(s)$ above to get the final reduced transfer function from $\mathbf{u_0}(s)$ to $\mathbf{y}(s)$:
\begin{eqnarray}
H(s) = C \Phi(s) B[I + G \Phi(s) B]^{-1}.
\end{eqnarray}
#### b)
If $z$ is a zero of the open-loop process $\Phi(s)B$, then $\det(\Phi(z)B) = 0$. It follows that $\det(C \Phi(z) B) = \det(C) \det(\Phi(z)B) = 0$ _regardless of the matrices_ $C$ and $G$! Therefore, $z$ must also be a zero of the closed-loop process.
A nicer approach to showing this equivalence via a generalized eigenvalue problem is shown [here](https://ocw.mit.edu/courses/aeronautics-and-astronautics/16-30-feedback-control-systems-fall-2010/lecture-notes/MIT16_30F10_lec08.pdf). Basically, it can be shown that the closed-loop system is the same as the open-loop system up to some nonsingular transformation:
\begin{eqnarray}
T = \begin{pmatrix}
I & 0 \\
-G & I
\end{pmatrix}.
\end{eqnarray}
From this fact, it is clear that, if $z$ is a zero of the open-loop process (i.e. it is a singular value of the matrix resulting from the generalized eigenvalue problem) then it will be a zero of the closed-loop process because, again, it will be a singular value of the matrix arising from the generalized eigenvalue problem with state-feedback.
### Problem 4.2
#### a)
```python
g = symbols('g', real=True)
A = Matrix([[-1.5, 0.5, 0], [0.5, -1, 0.5], [0, 0.5, -1.5]])
Phi = ch3_utils.computeResolvent(A)
C = Matrix([[0,0,1]])
B = Matrix([[1],[0],[0]])
E = Matrix([[0],[0],[1]])
H = g*C*Phi*B
Iden = eye(1)
H1 = simplify(H * (Iden + H)**-1)[0]
print('Transfer function H1 (input to v3) is')
pprint(H1)
H2 = simplify(C*Phi*E)[0]
print('Transfer function H2 (disturbance to v3) is')
pprint(H2)
```
Transfer function H1 (input to v3) is
0.0833333333333333⋅g
──────────────────────────────────────────────────────────────────────────────
3 2
0.0833333333333333⋅g + 0.333333333333333⋅s + 1.33333333333333⋅s + 1.58333333
──────────────
333333⋅s + 0.5
Transfer function H2 (disturbance to v3) is
⎛ 2 ⎞
1.0⋅⎝0.333333333333333⋅s + 0.833333333333333⋅s + 0.416666666666667⎠
─────────────────────────────────────────────────────────────────────
3 2
0.333333333333333⋅s + 1.33333333333333⋅s + 1.58333333333333⋅s + 0.5
#### b)
```python
# need to use Routh-Hurwitz here
n, d = fraction(H1)
pprint(d)
Dcoeffs = [0.0625, 0.25, 0.296875, 0.09375+0.015625*g]
dets = ch4_utils.hurwitz(Dcoeffs)
pprint(dets)
```
3 2
0.0833333333333333⋅g + 0.333333333333333⋅s + 1.33333333333333⋅s + 1.58333333
333333⋅s + 0.5
⎡ 2 ⎤
⎣4.0, -0.25⋅g + 17.5, - 0.0625⋅g + 4.0⋅g + 26.25⎦
The Hurwitz conditions (see above) are:
\begin{eqnarray}
4 &>& 0 \\
17.5 - 0.25 g &>& 0 \\
-0.0625g^2 + 4g + 26.25 &>& 0
\end{eqnarray}
The first condition puts no constraints on g, the second requires $g < 70$, and the third requires a bit of work. By the quadratic equation, the two roots are $g = \frac{-4 \pm \sqrt{16 - 4*0.0625*26.25}}{-2*0.0625}$. The final condition yields a concave-down parabolic equation in $g$, the roots of which are $\approx 24.576$ and $\approx 56.576$. This condition is the most restrictive of the three, so conclude that $\approx 24.576 < g < \approx 56.576$ is the valid stability range for $g$.
Naively, it makes sense that driving the system with negative feedback would drive the system unstable, since the feedback would continue to drive the error larger and larger.
#### c)
The open loop transfer function is $C \Phi(s) B$. The [Python Control Systems Library](https://python-control.readthedocs.io/en/latest/index.html) has functions to do frequency domain plots.
```python
H = simplify(C * Phi * B)
n, d = fraction(H[0])
num = np.array([float(n)])
den = np.array([float(p) for p in poly(d).all_coeffs()])
G = control.TransferFunction(num, den)
control.root_locus(G, PrintGain=False)
plt.show()
```
#### d)
```python
plt.figure()
control.bode_plot(G)
plt.figure()
control.nyquist_plot(G)
plt.show()
```
#### e)
Following signal flow around the loop yields the following error equation:
\begin{eqnarray}
e(s) (1 + g C \Phi(s) B) &=& u_r(s) - C \Phi(s) E v_0(s) \\
\implies e(s) = \frac{u_r(s) - C \Phi(s) E v_0(s)}{1 + g C \Phi(s) B}
\end{eqnarray}
The steady-state error is computed by the final-value theorem:
\begin{eqnarray}
e_{ss} &=& \lim_{s \to 0} s e(s)
&=& \lim_{s \to 0} \frac{s \bigl ( e_d(s) - s C \Phi(s) E v_0(s) \bigr )}{1 + g C \Phi(s) B}
\end{eqnarray}
### Problem 4.3
#### a)
```python
h = symbols('h')
A = Matrix([[-1.5, 0.5, 0], [0.5, -1, 0.5], [0, 0.5, -1.5]])
Phi = ch3_utils.computeResolvent(A)
C = Matrix([[0,0,1]])
B = Matrix([[1],[0],[0]])
E = Matrix([[0],[0],[1]])
K = g + h/s
H = K*C*Phi*B
Iden = eye(1)
H1 = simplify(H * (Iden + H)**-1)[0]
#H2 = simplify(C*Phi*E)[0]
#print('Transfer function H2 (disturbance to v3) is')
#pprint(H2)
# need to use Routh-Hurwitz here
n, d = fraction(H1)
pprint(d)
Dcoeffs = [0.0625, 0.25, 0.296875, 0.09375+0.015625*g, 0.015625*h]
dets = ch4_utils.hurwitz(Dcoeffs)
pprint(dets)
```
3
0.0833333333333333⋅g⋅s + 0.0833333333333333⋅h + 0.333333333333333⋅s⋅s + 1.333
2
33333333333⋅s⋅s + 1.58333333333333⋅s⋅s + 0.5⋅s
⎡ 2 ⎛ 2
⎣4.0, -0.25⋅g + 17.5, - 0.0625⋅g + 4.0⋅g - 4.0⋅h + 26.25, h⋅⎝- 0.015625⋅g +
⎞⎤
1.0⋅g - 1.0⋅h + 6.5625⎠⎦
```python
def f(g, h):
c1 = (-0.25*g + 17.5 > 0)
c2 = (-0.0625*g**2 + 4.*g - 4.*h + 26.25 > 0)
c3 = (h*(-0.15625*g**2 + g - h + 6.5625) > 0)
return int(c1 and c2 and c3)
vf = np.vectorize(f)
gs = np.linspace(-100, 100, 501)
hs = np.linspace(-100, 100, 501)
X = np.meshgrid(gs,hs)
out = vf(X[0], X[1])
plt.imshow(out, origin='lower', extent=(-100, 100, -100, 100))
plt.xlabel('g1')
plt.ylabel('g2')
plt.title('Stability Region (in yellow)')
plt.show()
```
The plot above shows a rough discretization of the stability region vs $g_1$ and $g_2$ using the Hurwitz criteria.
#### b)
```python
H = simplify(C * Phi * B)[0]
PInoGain = 1. + 1./s
A = np.array([[-1.5, 0.5, 0], [0.5, -1, 0.5], [0, 0.5, -1.5]])
C = np.array([[0,0,1]]).astype('float')
B = np.array([[1],[0],[0]]).astype('float')
D = np.zeros((1, 1))
plantSS = control.StateSpace(A, B, C, D)
plantTF = control.ss2tf(plantSS)
PItf = control.TransferFunction(np.array([1]), np.array([1])) + control.TransferFunction(np.array([1]), np.array([0, 1]))
G = PItf * plantTF
control.root_locus(G, PrintGain=False)
plt.show()
```
#### c)
```python
plt.figure()
control.bode_plot(G)
plt.figure()
control.nyquist_plot(G)
plt.show()
```
### Problem 4.4
#### a)
\begin{eqnarray}
A &=& \begin{pmatrix}
-0.746 & 0.006 & 0.001 & 0.0369 \\
-12.9 & -0.746 & 0.387 & 0 \\
4.31 & 0.024 & -0.0174 & 0 \\
0 & 1 & 0 & 0 \\
\end{pmatrix}, \\
B &=& \begin{pmatrix}
0.0012 & 0.0092 \\
6.05 & 0.952 \\
-0.416 & -1.76 \\
0 & 0
\end{pmatrix}
\end{eqnarray}
#### b)
```python
A = np.array([[-.746, .006, .001, .0369], [-12.9, -.746, .387, 0], [4.31, .024, -.0174, 0], [0, 1, 0, 0]])
lam, v = np.linalg.eig(A)
pprint(lam)
```
[-1.31182557+0.j -0.14760023+0.62779697j -0.14760023-0.62779697j
0.09762603+0.j ]
The dutch roll mode is described by $s=-0.1477 \pm 0.6278j$. The roll subsidence mode is described by $s = -1.3118$. The spiral mode is described by $s = 0.0976$.
#### c)
```python
A = Matrix([[-.746, .006, .001, .0369], [-12.9, -.746, .387, 0], [4.31, .024, -.0174, 0], [0, 1, 0, 0]])
Phi = ch3_utils.computeResolvent(A)
# p(s) / deltaA(s)
C = Matrix([[0, 1, 0, 0]])
B = Matrix([[.0012],[6.05],[-.416],[0]])
H = simplify(C*Phi*B)[0]
print('Transfer function p(s) / deltaA(s)')
pprint(H)
n, d = fraction(H)
num = np.array([float(p) for p in poly(n).all_coeffs()])
den = np.array([float(p) for p in poly(d).all_coeffs()])
G = control.TransferFunction(num, den)
print(' -Zeros: {}'.format(G.zero()))
print(' -Poles: {}'.format(G.pole()))
print('')
# r(s) / deltaA(s)
C = Matrix([[0, 0, 1, 0]])
H = simplify(C*Phi*B)[0]
print('Transfer function r(s) / deltaA(s)')
pprint(H)
n, d = fraction(H)
num = np.array([float(p) for p in poly(n).all_coeffs()])
den = np.array([float(p) for p in poly(d).all_coeffs()])
G = control.TransferFunction(num, den)
print(' -Zeros: {}'.format(G.zero()))
print(' -Poles: {}'.format(G.pole()))
print('')
# p(s) / deltaR(s)
C = Matrix([[0, 1, 0, 0]])
B = Matrix([[.0092],[0.952],[-1.76],[0]])
H = simplify(C*Phi*B)[0]
print('Transfer function p(s) / deltaR(s)')
pprint(H)
n, d = fraction(H)
num = np.array([float(p) for p in poly(n).all_coeffs()])
den = np.array([float(p) for p in poly(d).all_coeffs()])
G = control.TransferFunction(num, den)
print(' -Zeros: {}'.format(G.zero()))
print(' -Poles: {}'.format(G.pole()))
print('')# r(s) / deltaR(s)
C = Matrix([[0, 0, 1, 0]])
H = simplify(C*Phi*B)[0]
print('Transfer function r(s) / deltaR(s)')
pprint(H)
n, d = fraction(H)
num = np.array([float(p) for p in poly(n).all_coeffs()])
den = np.array([float(p) for p in poly(d).all_coeffs()])
G = control.TransferFunction(num, den)
print(' -Zeros: {}'.format(G.zero()))
print(' -Poles: {}'.format(G.pole()))
print('')
```
Transfer function p(s) / deltaA(s)
⎛ 2
s⋅⎝4.00821518484166⋅s + 2.94295614151318⋅s - 0.040112296276666
──────────────────────────────────────────────────────────────────────────────
4 3 2
0.662514906585398⋅s + 1.0⋅s + 0.428169338810123⋅s + 0.309525513714059⋅s - 0
⎞
2⎠
─────────────────
.0352892003445077
-Zeros: [-0.74761697 0.01338589 0. ]
-Poles: [-1.31182557+0.j -0.14760023+0.62779697j -0.14760023-0.62779697j
0.09762603+0.j ]
Transfer function r(s) / deltaA(s)
4 3 2
- 0.297674418604651⋅s - 0.303219749552773⋅s + 0.0409134211091232⋅s + 0.
──────────────────────────────────────────────────────────────────────────────
5 4 3 2
0.715563506261181⋅s + 1.0⋅s + 0.341593516994633⋅s + 0.282561153974955⋅s -
546444831600429⋅s - 0.0611879441152057
──────────────────────────────────────────
0.0755241173407943⋅s + 0.00426505300615385
-Zeros: [-1.0371158 +0.93322642j -1.0371158 -0.93322642j 0.94370276+0.j
0.1119 +0.j ]
-Poles: [-1.31182557+0.j -0.14760023+0.62779697j -0.14760023-0.62779697j
0.1119 +0.j 0.09762603+0.j ]
Transfer function p(s) / deltaR(s)
⎛ 2
s⋅⎝0.630714191069299⋅s - 0.0483922088246985⋅s - 0.3073254320922
──────────────────────────────────────────────────────────────────────────────
4 3 2
0.662514906585398⋅s + 1.0⋅s + 0.428169338810123⋅s + 0.309525513714059⋅s - 0
⎞
22⎠
─────────────────
.0352892003445077
-Zeros: [ 0.73746065 -0.6607346 0. ]
-Poles: [-1.31182557+0.j -0.14760023+0.62779697j -0.14760023-0.62779697j
0.09762603+0.j ]
Transfer function r(s) / deltaR(s)
4 3 2
- 1.25939177101968⋅s - 1.69336386404293⋅s - 0.544150312701252⋅s - 0.40
──────────────────────────────────────────────────────────────────────────────
5 4 3 2
0.715563506261181⋅s + 1.0⋅s + 0.341593516994633⋅s + 0.282561153974955⋅s -
7284411131306⋅s + 0.0549589120692665
──────────────────────────────────────────
0.0755241173407943⋅s + 0.00426505300615385
-Zeros: [-1.23046487+0.j -0.11301188+0.55151517j -0.11301188-0.55151517j
0.1119 +0.j ]
-Poles: [-1.31182557+0.j -0.14760023+0.62779697j -0.14760023-0.62779697j
0.1119 +0.j 0.09762603+0.j ]
After computing the zeros and poles for each transfer function, it's clear that the pole-zero combinations for $p(s) / \delta_A(s)$ and $r(s) / \delta_R(s)$ are more amenable for control than the other pair; it requires less movement for pole-zero cancellation.
#### d)
I'd need to find an $s_0$ such that the rank of $H(s)$ is reduced when $s=s_0$. I can't think of a way to do this easily, so I'll just sketch out an approach.
The rank of the resulting transfer matrix $H(s)$, which has all input-output transfer functions represented as $(i, j)$ entries in the matrix ($i$ corresponds to system output, $j$ to input). The resulting $H(s)$ is $4 \times 2$, so it has rank 2 _at most_. I would try and find $s_0$ such that the two columns of $H(s)$ are linearly dependent. Whenever this condition was met, $s_0$ would be a transmission zero for the process.
I'd find the $4 \times 2$ matrix $H(s)$ via the following cell. I'd first factor out a common denominator and then I'd look at the resulting scaled matrix. I would then row-reduce the transpose of that matrix to row-reduced echelon form and then solve for $s_0$ that would turn a row into a linear combination of the others.
```python
numOutputs = 4
numInputs = 2
for i in range(numOutputs):
C = zeros(1,4)
C[i] = 1.
nc = []
dc = []
for j in range(numInputs):
if j == 0:
B = Matrix([[.0012],[6.05],[-.416],[0]])
else:
B = Matrix([[.0092],[0.952],[-1.76],[0]])
H = simplify(C*Phi*B)[0]
print('H({}, {})'.format(i,j))
pprint(H)
print('')
```
H(0, 0)
⎛ 8 7
1.0⋅⎝0.000568885787407855⋅s + 0.0182408866274625⋅s + 0.130251581209987⋅s
──────────────────────────────────────────────────────────────────────────────
9 8 7
0.474071489506546⋅s + 1.37807841284658⋅s + 1.53269315035438⋅s + 1.178448994
6 5 4 3
+ 0.155153309249951⋅s + 0.0558774649271001⋅s + 0.0393957508638441⋅s - 0.0
──────────────────────────────────────────────────────────────────────────────
6 5 4 3
20566⋅s + 0.66305902052578⋅s + 0.11872827480869⋅s + 0.0473332659022544⋅s -
2 ⎞
114264117146682⋅s + 0.000732296736037176⋅s - 5.80996009352629e-6⎠
─────────────────────────────────────────────────────────────────────
2
0.0315218334642727⋅s + 0.00398532843042886⋅s - 0.000150510310014108
H(0, 1)
⎛ 8 7 6
1.0⋅⎝0.00436145770346022⋅s + 0.0112982044779371⋅s + 0.0235211273647575⋅s
──────────────────────────────────────────────────────────────────────────────
9 8 7
0.474071489506546⋅s + 1.37807841284658⋅s + 1.53269315035438⋅s + 1.178448994
5 4 3
+ 0.012380325641365⋅s - 0.00788862267262728⋅s - 0.00047821206980725⋅s - 0.
──────────────────────────────────────────────────────────────────────────────
6 5 4 3
20566⋅s + 0.66305902052578⋅s + 0.11872827480869⋅s + 0.0473332659022544⋅s -
2 ⎞
0060548840266216⋅s + 0.00131142935617842⋅s - 6.92911092915793e-5⎠
─────────────────────────────────────────────────────────────────────
2
0.0315218334642727⋅s + 0.00398532843042886⋅s - 0.000150510310014108
H(1, 0)
⎛ 2
s⋅⎝4.00821518484166⋅s + 2.94295614151318⋅s - 0.040112296276666
──────────────────────────────────────────────────────────────────────────────
4 3 2
0.662514906585398⋅s + 1.0⋅s + 0.428169338810123⋅s + 0.309525513714059⋅s - 0
⎞
2⎠
─────────────────
.0352892003445077
H(1, 1)
⎛ 2
s⋅⎝0.630714191069299⋅s - 0.0483922088246985⋅s - 0.3073254320922
──────────────────────────────────────────────────────────────────────────────
4 3 2
0.662514906585398⋅s + 1.0⋅s + 0.428169338810123⋅s + 0.309525513714059⋅s - 0
⎞
22⎠
─────────────────
.0352892003445077
H(2, 0)
4 3 2
- 0.297674418604651⋅s - 0.303219749552773⋅s + 0.0409134211091232⋅s + 0.
──────────────────────────────────────────────────────────────────────────────
5 4 3 2
0.715563506261181⋅s + 1.0⋅s + 0.341593516994633⋅s + 0.282561153974955⋅s -
546444831600429⋅s - 0.0611879441152057
──────────────────────────────────────────
0.0755241173407943⋅s + 0.00426505300615385
H(2, 1)
4 3 2
- 1.25939177101968⋅s - 1.69336386404293⋅s - 0.544150312701252⋅s - 0.40
──────────────────────────────────────────────────────────────────────────────
5 4 3 2
0.715563506261181⋅s + 1.0⋅s + 0.341593516994633⋅s + 0.282561153974955⋅s -
7284411131306⋅s + 0.0549589120692665
──────────────────────────────────────────
0.0755241173407943⋅s + 0.00426505300615385
H(3, 0)
2
4.00821518484166⋅s + 2.94295614151318⋅s - 0.0401122962766662
──────────────────────────────────────────────────────────────────────────────
4 3 2
0.662514906585398⋅s + 1.0⋅s + 0.428169338810123⋅s + 0.309525513714059⋅s - 0
─────────────────
.0352892003445077
H(3, 1)
2
0.630714191069299⋅s - 0.0483922088246985⋅s - 0.30732543209222
──────────────────────────────────────────────────────────────────────────────
4 3 2
0.662514906585398⋅s + 1.0⋅s + 0.428169338810123⋅s + 0.309525513714059⋅s - 0
2
─────────────────
.0352892003445077
### Problem 4.5
#### a)
```python
A = np.array([[0, -14, 0, 0], [0, -1, 1, 0], [0, -5, -.5, 0], [0, 0, 1, 0]])
B = np.array([[-1], [-.1], [-9], [0]])
C = np.eye(4)
D = np.zeros((4,1))
sys = control.StateSpace(A, B, C, D)
pprint(sys.pole())
```
[ 0. +0.j 0. +0.j -0.75+2.2220486j -0.75-2.2220486j]
The open-loop poles are $s=0$, with multiplicity 2, and a complex conjugate pair $s = -0.75 \pm 2.222 i$.
#### b)
```python
# from deltaE to theta
C = Matrix([[0, 0, 0, 1]])
H1 = simplify(C*Phi*B)
print('theta(s) / deltaE(s):')
pprint(H1)
print('')
# from deltaE to deltau
C = Matrix([[1, 0, 0, 0]])
H2 = simplify(C*Phi*B)
print('deltau(s) / deltaE(s):')
pprint(H2)
print('')
```
theta(s) / deltaE(s):
⎡ 2
⎢ - 0.0662514906585398⋅s + 6.18832648734597⋅s - 2.601427746124
⎢─────────────────────────────────────────────────────────────────────────────
⎢ 4 3 2
⎣0.662514906585398⋅s + 1.0⋅s + 0.428169338810123⋅s + 0.309525513714059⋅s -
⎤
29 ⎥
──────────────────⎥
⎥
0.0352892003445077⎦
deltau(s) / deltaE(s):
⎡ 8 7 6
⎢ - 0.474071489506546⋅s - 1.02897216797396⋅s - 0.755030754202703⋅s -
⎢─────────────────────────────────────────────────────────────────────────────
⎢ 9 8 7
⎣0.474071489506546⋅s + 1.37807841284658⋅s + 1.53269315035438⋅s + 1.17844899
5 4 3
0.446291427009702⋅s - 0.187783759714934⋅s + 0.000198058901817677⋅s - 0.024
──────────────────────────────────────────────────────────────────────────────
6 5 4 3
420566⋅s + 0.66305902052578⋅s + 0.11872827480869⋅s + 0.0473332659022544⋅s
2 ⎤
5042133414924⋅s + 0.00633503699684281⋅s - 0.000363343030334783 ⎥
──────────────────────────────────────────────────────────────────────⎥
2 ⎥
- 0.0315218334642727⋅s + 0.00398532843042886⋅s - 0.000150510310014108⎦
### Problem 4.6
```python
K1, K2, K3 = symbols('K1 K2 K3')
Dcoeffs = [1.0, 36.32+3.04*K1, 182.4+110.4*K1, 554.5*K1 - 206.2*K2, -613.9*K1*K2] # typo in book, 36.62 vs. 36.32
dets = ch4_utils.hurwitz(Dcoeffs)
pprint(dets)
```
⎡ 2
⎣3.04⋅K₁ + 36.32, 335.616⋅K₁ + 4009.724⋅K₁ + 206.2⋅K₂ + 6624.768, 613.9⋅K₁⋅K₂
2
⋅(3.04⋅K₁ + 36.32) + (3.04⋅K₁ + 36.32)⋅(110.4⋅K₁ + 182.4)⋅(554.5⋅K₁ - 206.2⋅K
2 ⎛ 3
₂) - (554.5⋅K₁ - 206.2⋅K₂) , K₁⋅K₂⋅⎝- 3482911.457536⋅K₁ ⋅K₂ - 114246220.3008⋅K
3 2 2
₁ - 40738905.335296⋅K₁ ⋅K₂ - 1364940323.0162⋅K₁ - 59765823.5307839⋅K₁⋅K₂ - 2
2 ⎞⎤
255121044.1984⋅K₁ + 26102070.316⋅K₂ + 838604074.50624⋅K₂⎠⎦
```python
def f(k1, k2):
c1 = 3.04*k1 + 36.32 > 0
c2 = 335.616*k1**2 + 4009.724*k1 + 206.2*k2 + 6624.768 > 0
c3 = 613.9*k1*k2*(3.04*k1 + 36.32)**2 + (3.04*k1 + 36.32)*(110.4*k1 + 182.4)*(554.5*k1 - 206.2*k2) - (554.5*k1-206.2*k2)**2 > 0
c4 = k1*k2*(-3482911.457536 * k1**3 * k2 - 114246220.3008 * k1**3 - 40738905.335296 * k1**2 * k2 - 1364940323.0162 * k1**2 - 59765823.5307839 * k1 * k2 - 2255121044.1984 * k1 + 26102070.316 * k2**2 + 838604074.50624 * k2) > 0
return int(c1 and c2 and c3 and c4)
vf = np.vectorize(f)
gs = np.linspace(-10, 50, 301)
hs = np.linspace(-10, 50, 301)
X = np.meshgrid(gs,hs)
out = vf(X[0], -X[1])
plt.imshow(out, origin='lower', extent=(-10, 50, -10, 50))
plt.xlabel('k1')
plt.ylabel('k2')
plt.title('Stability Region (in yellow)')
plt.show()
```
The stability region above is larger than the one predicted by the book; the $K_1$ term appears to continue on past $50$. The $K_2$ term looks about right though, and the Hurwitz matrix (minus the typos) matches the book's.
### Problem 4.7
#### a)
```python
A = np.array([[0, -.00156, -.0711, 0, 0], [0, -.1419, .0711, 0, 0], [0, -.00875, -1.102, 0, 0], [0, -.00128, -.1489, 0, -.0013], [0, .0605, .1489, 0, -.0591]])
B = np.array([[0, -.143, 0], [0, 0, 0], [.392, 0, 0], [0, .108, -.0592], [0, -.0486, 0]])
C = np.eye(5)
D = np.zeros((5,3))
E = np.array([[.2174, 0, 0], [-.074, .1434, 0], [-.036, 0, 0.1814], [0, 0, 0], [0, 0, 0]])
sys = control.StateSpace(A, B, C, D)
pprint(sys.pole())
```
[ 0. 0. -0.0591 -0.14254842 -1.10135158]
The open-loop poles are $s=0$, with multiplicity 2, $s=-0.0591$, $s=-0.1425$, and $s=-1.1014$. All poles are real and negative $\implies$ stability.
#### b)
```python
A = Matrix([[0, -.00156, -.0711, 0, 0], [0, -.1419, .0711, 0, 0], [0, -.00875, -1.102, 0, 0], [0, -.00128, -.1489, 0, -.0013], [0, .0605, .1489, 0, -.0591]])
B = Matrix([[0, -.143, 0], [0, 0, 0], [.392, 0, 0], [0, .108, -.0592], [0, -.0486, 0]])
Phi = ch3_utils.computeResolvent(A)
obs = Matrix([[1, 0, 0, 0, 0], [0, 0, 0, 1, 0]])
print('Observation Matrix:')
pprint(obs)
print('\n\n')
C = Matrix([[1, 0, 0, 0, 0]])
print('x1(s) / u1(s)')
B = Matrix([[0], [0], [.392], [0], [0]])
pprint(simplify(C*Phi*B)[0])
print('\n\n')
print('x1(s) / u2(s)')
B = Matrix([[-.143], [0], [0], [.108], [-.0486]])
pprint(simplify(C*Phi*B)[0])
print('\n\n')
print('x1(s) / u3(s)')
print('\n\n')
B = Matrix([[0], [0], [0], [-.0592], [0]])
pprint(simplify(C*Phi*B)[0])
print('\n\n\n\n')
C = Matrix([[0, 0, 0, 1, 0]])
print('x4(s) / u1(s)')
B = Matrix([[0], [0], [.392], [0], [0]])
pprint(simplify(C*Phi*B)[0])
print('x4(s) / u2(s)')
print('\n\n')
B = Matrix([[-.143], [0], [0], [.108], [-.0486]])
pprint(simplify(C*Phi*B)[0])
print('\n\n')
print('x4(s) / u3(s)')
print('\n\n')
B = Matrix([[0], [0], [0], [-.0592], [0]])
pprint(simplify(C*Phi*B)[0])
print('\n\n')
```
Observation Matrix:
⎡1 0 0 0 0⎤
⎢ ⎥
⎣0 0 0 1 0⎦
x1(s) / u1(s)
⎛ 2 ⎞
-⎝3.46944695195361e-18⋅s + 0.0224063027574564⋅s + 0.00321440819358469⎠
────────────────────────────────────────────────────────────────────────
⎛ 2 ⎞
s⋅⎝0.803923144947343⋅s + 1.0⋅s + 0.126212657769917⎠
x1(s) / u2(s)
-0.143
───────
s
x1(s) / u3(s)
0
x4(s) / u1(s)
⎛ 5 4 3
1.0⋅⎝3.46944695195361e-18⋅s - 0.0229175860850446⋅s - 0.0331574202740587⋅s -
──────────────────────────────────────────────────────────────────────────────
⎛ 5 4 3
s⋅⎝0.392634182731949⋅s + 1.0⋅s + 0.788530385959402⋅s + 0.196543
2 ⎞
0.00958050451342888⋅s - 0.000976496519761018⋅s - 3.11024064661291e-5⎠
───────────────────────────────────────────────────────────────────────
2 ⎞
270117005⋅s + 0.0187406930713857⋅s + 0.000571942471073223⎠
x4(s) / u2(s)
⎛ 5 4 3
1.0⋅⎝0.0424044917350504⋅s + 0.108024806627665⋅s + 0.0852229956119204⋅s + 0.
──────────────────────────────────────────────────────────────────────────────
⎛ 5 4 3
s⋅⎝0.392634182731949⋅s + 1.0⋅s + 0.788530385959402⋅s + 0.196543270
2 ⎞
0212728452292586⋅s + 0.00203368368696928⋅s + 6.23812137003143e-5⎠
──────────────────────────────────────────────────────────────────
2 ⎞
117005⋅s + 0.0187406930713857⋅s + 0.000571942471073223⎠
x4(s) / u3(s)
-0.0592
────────
s
### Problem 4.8
#### a)
```python
A = Matrix([[0, -.00156, -.0711, 0.143*K2, 0], [0, -.1419, .0711, 0, 0], [-.392*K1, -.00875, -1.102, 0, 0], [0, -.00128, -.1489, -.108*K2, -.0013], [0, .0605, .1489, .0486*K1, -.0591]])
B = Matrix([[0, -.143*K2, 0], [0, 0, 0], [.392*K1, 0, 0], [0, .108*K2, -.0592], [0, -.0486*K2, 0]])
C = Matrix([[1, 0, 0, 0, 0]])
res = ch3_utils.computeResolvent(A, imag=False, smplfy=False)
M = simplify(C*res*B)[0]
C = Matrix([[0, 0, 0, 1, 0]])
n, d = fraction(M)
denCoeffs = [-1.44471785472e-8,
-1.5602952830976e-9*K2 - 1.88246736470016e-8,
-3.33022512249417e-9 - 2.03306475387617e-9*K2 + 4.01747429984109e-10*K1,
2.01948391736579e-28 * K1**2 + 1.6407412184584e-10 * K1 * K2 + 8.0427452651872e-11 * K1 - 1.34047556235777e-10,
2.54400716081478e-14 * K1**2 + 3.32772044714127e-11 * K1 * K2 + 3.27064729083152e-12 * K1 - 1.44771360734639e-11 * K2,
3.64963267290489e-15 * K1**2 + 1.41111171520274e-12 * K1 * K2
]
```
```python
pprint(denCoeffs)
dets = ch4_utils.hurwitz(denCoeffs)
pprint(dets)
```
⎡
⎣-1.44471785472e-08, -1.5602952830976e-9⋅K₂ - 1.88246736470016e-8, 4.017474299
84109e-10⋅K₁ - 2.03306475387617e-9⋅K₂ - 3.33022512249417e-9, 2.01948391736579e
2
-28⋅K₁ + 1.6407412184584e-10⋅K₁⋅K₂ + 8.0427452651872e-11⋅K₁ - 1.3404755623577
2
7e-10, 2.54400716081478e-14⋅K₁ + 3.32772044714127e-11⋅K₁⋅K₂ + 3.2706472908315
2
2e-12⋅K₁ - 1.44771360734639e-11⋅K₂, 3.64963267290489e-15⋅K₁ + 1.4111117152027
⎤
4e-12⋅K₁⋅K₂⎦
⎡ 2
⎣0.108⋅K₂ + 1.303, 1.39783966174986e-20⋅K₁ + 0.00835356183999998⋅K₁⋅K₂ - 0.03
2
066684939⋅K₁ + 0.015198192⋅K₂ + 0.20825849682⋅K₂ + 0.2910766115775, - 1.95395
4 3 3
571996097e-40⋅K₁ - 2.75519646669034e-22⋅K₁ ⋅K₂ + 3.50855640447375e-22⋅K₁ - 9
2 2 2
.48494258384628e-5⋅K₁ ⋅K₂ + 0.000302242169068602⋅K₁ ⋅K₂ + 0.00017338288443496
2 3 2
5⋅K₁ - 0.000145736739267888⋅K₁⋅K₂ - 0.00180939346433065⋅K₁⋅K₂ - 0.000540443
3
895709508⋅K₁⋅K₂ - 0.0015206036345384⋅K₁ - 1.16881863548097e-5⋅K₂ - 0.00014101
2
5803891825⋅K₂ + 0.000230988399112185⋅K₂ + 0.00270074245513609, 3.440725348036
6 5 5
28e-46⋅K₁ + 4.85163211474968e-28⋅K₁ ⋅K₂ - 7.16018487167673e-28⋅K₁ + 1.670205
4 2 4 4
83115162e-10⋅K₁ ⋅K₂ - 5.9085332429748e-10⋅K₁ ⋅K₂ - 8.93649489898064e-11⋅K₁ +
3 3 3 2
2.18729994952404e-7⋅K₁ ⋅K₂ - 6.93934515719473e-7⋅K₁ ⋅K₂ - 3.84669791996951e
3 3 2 4
-7⋅K₁ ⋅K₂ - 4.03299416938956e-8⋅K₁ + 3.35685701936201e-7⋅K₁ ⋅K₂ + 4.20404894
2 3 2 2 2
369815e-6⋅K₁ ⋅K₂ + 1.45274612586103e-6⋅K₁ ⋅K₂ + 2.36809806674641e-6⋅K₁ ⋅K₂ +
2 4
3.56438283685991e-7⋅K₁ + 8.97832931274887e-8⋅K₁⋅K₂ + 1.7084513281062e-6⋅K₁⋅
3 2
K₂ + 7.52054641074542e-6⋅K₁⋅K₂ - 1.24331582487075e-6⋅K₁⋅K₂ - 6.1141183832302
4 3
5e-7⋅K₁ - 1.17124228622051e-8⋅K₂ - 1.41308212865308e-7⋅K₂ + 2.31467374367488
2 ⎛ 7
e-7⋅K₂ + 2.70634268792669e-6⋅K₂, K₁⋅⎝- 8.6919280520133e-53⋅K₁ - 1.2256147472
6 6 5 2
0175e-34⋅K₁ ⋅K₂ + 1.80879917600072e-34⋅K₁ - 4.21925828073097e-17⋅K₁ ⋅K₂ + 1.
5 5
49260811736038e-16⋅K₁ ⋅K₂ + 2.25752894643146e-17⋅K₁ - 7.15689111377953e-14⋅K₁
4 3 4 2 4
⋅K₂ + 2.33012011190331e-13⋅K₁ ⋅K₂ + 1.05903541137873e-13⋅K₁ ⋅K₂ + 1.0188111
4 3 4 3 3
9847389e-14⋅K₁ - 2.14490038201434e-11⋅K₁ ⋅K₂ + 6.67172408219932e-11⋅K₁ ⋅K₂
3 2 3
+ 3.72051925909986e-11⋅K₁ ⋅K₂ + 3.34095442670455e-12⋅K₁ ⋅K₂ - 9.0043104386403
3 2 5 2 4
3e-14⋅K₁ - 3.27877187286534e-11⋅K₁ ⋅K₂ - 4.10648374864036e-10⋅K₁ ⋅K₂ - 1.42
2 3 2 2
326911132661e-10⋅K₁ ⋅K₂ - 2.33201115749105e-10⋅K₁ ⋅K₂ - 3.45006182467008e-11
2 2 5
⋅K₁ ⋅K₂ + 1.54454284236491e-13⋅K₁ - 8.76948092997961e-12⋅K₁⋅K₂ - 1.668680794
4 3 2
68044e-10⋅K₁⋅K₂ - 7.34525110712003e-10⋅K₁⋅K₂ + 1.21380984498162e-10⋅K₁⋅K₂ +
5
5.90352814140743e-11⋅K₁⋅K₂ + 1.14399756743293e-12⋅K₂ + 1.38021187996769e-11⋅
4 3 2⎞⎤
K₂ - 2.2608312245195e-11⋅K₂ - 2.64338940631891e-10⋅K₂ ⎠⎦
```python
def f(k1, k2):
c1 = 0.108*k2 + 1.303 > 0
c2 = 1.398e-20*k1**2 + 8.3536e-3*k1*k2 -3.067e-2*k1 + 1.520e-2*k2**2 + .2083*k2 + .2911 > 0
c3 = -1.954e-40*k1**4 - 2.7552e-22*k1**3*k2 + 3.509e-22*k1**3 - 9.485e-5*k1**2*k2**2 + 3.022e-4*k1**2*k2 + 1.734e-4*k1**2 - 1.457e-4*k1*k2**3 - 1.809e-3*k1*k2**2 - 5.404e-4*k1*k2 - 1.521e-3*k1 - 1.169e-5*k2**3 - 1.4101e-4*k2**2 + 2.310e-4*k2 + 2.701e-3 > 0
c4 = 3.441e-46*k1**6 + 4.852e-28*k1**5*k2 - 7.160e-28*k1**5 + 1.6702e-10*k1**4*k2**2 - 5.909e-10*k1**4*k2 - 8.936e-11*k1**4 + 2.187e-7*k1**3*k2**3 - 6.939e-7*k1**3*k2**2 - 3.847e-7*k1**3*k2 - 4.033e-8*k1**3 + 3.357e-7*k1**2*k2**4 + 4.204e-6*k1**2*k2**3 + 1.453e-6*k1**2*k2**2 + 2.368e-6*k1**2*k2 + 3.564e-7*k1**2 + 8.978e-8*k1*k2**4 + 1.708e-6*k1*k2**3 + 7.521e-6*k1*k2**2 - 1.243e-6*k1*k2 - 6.114e-7*k1 - 1.171e-8*k2**4 - 1.413e-7*k2**3 + 2.3147e-7*k2**2 + 2.706e-6*k2 > 0
c5 = k1*(-8.692e-53*k1**7 - 1.226e-34*k1**6*k2 + 1.809e-34*k1**6 - 4.219e-17*k1**5*k2**2 + 1.493e-16*k1**5*k2 + 2.258e-17*k1**5 - 7.157e-14*k1**4*k2**3 + 2.330e-13*k1**4*k2**2 + 1.059e-13*k1**4*k2 + 1.0188e-14*k1**4 - 2.145e-11*k1**3*k2**4 + 6.672e-11*k1**3*k2**3 + 3.721e-11*k1**3*k2**2 + 3.341e-12*k1**3*k2 - 9.003e-14*k1**3 - 3.279e-11*k1**2*k2**5 - 4.106e-10*k1**2*k2**4 - 1.423e-10*k1**2*k2**3 - 2.332e-10*k1**2*k2**2 - 3.450e-11*k1**2*k2 + 1.545e-13*k1**2 - 8.769e-12*k1*k2**5 - 1.667e-10*k1*k2**4 - 7.345e-10*k1*k2**3 + 1.214e-10*k1*k2**2 + 5.904e-11*k1*k2 + 1.44e-12*k2**5 + 1.380e-11*k2**4 - 2.261e-11*k2**3 - 2.643e-10*k2**2) > 0
return int(c1 and c2 and c3 and c4 and c5)
vf = np.vectorize(f)
gs = np.linspace(-25, 5, 301)
hs = np.linspace(-25, 5, 301)
X = np.meshgrid(gs,hs)
out = vf(X[0], -X[1])
plt.imshow(out, origin='lower', extent=(-10, 50, -10, 50))
plt.xlabel('k1')
plt.ylabel('k2')
plt.title('Stability Region (in yellow)')
plt.show()
```
#### b) and c)
```python
A = Matrix([[0, -.00156, -.0711, 0, 0], [0, -.1419, .0711, 0, 0], [0, -.00875, -1.102, 0, 0], [0, -.00128, -.1489, 0, -.0013], [0, .0605, .1489, 0, -.0591]])
B = Matrix([[0, -.143, 0], [0, 0, 0], [.392, 0, 0], [0, .108, -.0592], [0, -.0486, 0]])
# compute resolvent with complex frequency
Phi = ch3_utils.computeResolvent(A, imag=True, smplfy=False)
C = Matrix([[40., 0, 0, 0, 0]])
B = Matrix([[0], [0], [.392], [0], [0]])
H11 = simplify(C*Phi*B)[0]
C = Matrix([[-40., 0, 0, 0, 0]])
B = Matrix([[-.143], [0], [0], [.108], [-.0486]])
H12 = simplify(C*Phi*B)[0]
C = Matrix([[0, 0, 0, 40, 0]])
B = Matrix([[0], [0], [.392], [0], [0]])
H21 = simplify(C*Phi*B)[0]
C = Matrix([[0, 0, 0, -40, 0]])
B = Matrix([[-.143], [0], [0], [.108], [-.0486]])
H22 = simplify(C*Phi*B)[0]
IplusH = Matrix([[1+H11, H12], [H21, 1+H22]])
# compute product of (I+H)* times (I+H) - (I+H)* is the conjugate transpose
product = Matrix([[simplify(conjugate(1+H11)*(1+H11)) + simplify(conjugate(H21)*H21), simplify(conjugate(1+H11)*H12) + simplify(conjugate(H21)*(1+H22))],
[simplify(conjugate(H12)*(1+H11)) + simplify(conjugate(1+H22)*H21), simplify(conjugate(H12)*H12) + simplify(conjugate(1+H22)*(1+H22))]])
# compute determinant of (\lambda*I - product) in terms of w - the roots of this are the singular values
sigma = symbols('sigma', real=True)
charPoly = simplify(det(sigma*eye(2) - product))
pprint(charPoly)
```
⎛⎛ ⎛ 2
⎜⎜ ⎛ 2 ⎞ ⎜ _ _
-⎝⎝1.0⋅⎝- 1.0⋅w + 1.2439⋅ⅈ⋅w + 0.156995925⎠⋅⎝1.0⋅w + 1.2439⋅ⅈ⋅w - 0.15699592
──────────────────────────────────────────────────────────────────────────────
⎞
⎟ ⎛ 4 3 2
5⎠⋅⎝0.02042908⋅ⅈ⋅w + 0.0295570217936⋅w - 0.0085402054308382⋅ⅈ⋅w - 0.0008704
──────────────────────────────────────────────────────────────────────────────
⎛⎛ 5 4
⎞ ⎜⎜ _ _
63645162769⋅w + 2.77251516600044e-5⋅ⅈ⎠⋅⎝⎝0.00875⋅ⅈ⋅w - 0.022285375⋅w - 0.017
──────────────────────────────────────────────────────────────────────────────
3 2
_ _ _
57269535⋅ⅈ⋅w + 0.00438004047828375⋅w + 0.000417643372855731⋅ⅈ⋅w - 1.27459524
──────────────────────────────────────────────────────────────────────────────
⎞ 5 4 3
⎟ _ _ _ _
462934e-5⎠⋅w + 0.0378⋅w + 0.096294933⋅ⅈ⋅w - 0.0759690566334⋅w - 0.018962933
──────────────────────────────────────────────────────────────────────────────
2 ⎞ ⎛ 2
_ _ ⎟ ⎜ _
3300396⋅ⅈ⋅w + 0.00181285614381972⋅w + 5.56075496106657e-5⋅ⅈ⎠ - ⎝- 5.72⋅ⅈ⋅w +
──────────────────────────────────────────────────────────────────────────────
⎞
_ ⎟ ⎛ ⎛ 2 ⎞
7.115108⋅w + 0.898016691⋅ⅈ⎠⋅⎝w⋅⎝- 1.0⋅w + 1.2439⋅ⅈ⋅w + 0.156995925⎠ - 1.1148
──────────────────────────────────────────────────────────────────────────────
⎞ ⎛ 5 4 3
48⋅w + 0.15993609408⋅ⅈ⎠⋅⎝0.00875⋅ⅈ⋅w + 0.022285375⋅w - 0.01757269535⋅ⅈ⋅w -
──────────────────────────────────────────────────────────────────────────────
⎛
2 ⎞ ⎜
0.00438004047828375⋅w + 0.000417643372855731⋅ⅈ⋅w + 1.27459524462934e-5⎠⋅⎝0.00
──────────────────────────────────────────────────────────────────────────────
5 4 3 2
_ _ _ _
875⋅ⅈ⋅w - 0.022285375⋅w - 0.01757269535⋅ⅈ⋅w + 0.00438004047828375⋅w + 0.00
──────────────────────────────────────────────────────────────────────────────
⎞⎞ ⎛
_ ⎟⎟ ⎜ ⎛ 2
0417643372855731⋅ⅈ⋅w - 1.27459524462934e-5⎠⎠⋅⎝1.0⋅⎝- 1.0⋅w + 1.2439⋅ⅈ⋅w + 0.1
──────────────────────────────────────────────────────────────────────────────
⎛ 2 ⎞ ⎛ 4
⎞ ⎜ _ _ ⎟ ⎜ _
56995925⎠⋅⎝1.0⋅w + 1.2439⋅ⅈ⋅w - 0.156995925⎠⋅⎝0.02042908⋅ⅈ⋅w - 0.02955702179
──────────────────────────────────────────────────────────────────────────────
3 2
_ _ _
36⋅w - 0.0085402054308382⋅ⅈ⋅w + 0.000870463645162769⋅w + 2.77251516600044e-5
──────────────────────────────────────────────────────────────────────────────
⎞
⎟ ⎛ 5 4 3
⋅ⅈ⎠⋅⎝- 0.0378⋅w + 0.096294933⋅ⅈ⋅w + 0.0759690566334⋅w - 0.0189629333300396⋅
──────────────────────────────────────────────────────────────────────────────
2 ⎛ 5 4 3
ⅈ⋅w + w⋅⎝0.00875⋅ⅈ⋅w + 0.022285375⋅w - 0.01757269535⋅ⅈ⋅w - 0.0043800404782
──────────────────────────────────────────────────────────────────────────────
2 ⎞
8375⋅w + 0.000417643372855731⋅ⅈ⋅w + 1.27459524462934e-5⎠ - 0.0018128561438197
──────────────────────────────────────────────────────────────────────────────
⎛⎛
⎞ ⎛ 2 ⎞ ⎜⎜
2⋅w + 5.56075496106657e-5⋅ⅈ⎠ + ⎝5.72⋅ⅈ⋅w + 7.115108⋅w - 0.898016691⋅ⅈ⎠⋅⎝⎝1.0⋅
──────────────────────────────────────────────────────────────────────────────
2 ⎞ ⎞
_ _ ⎟ _ _ ⎟ ⎛ 5
w + 1.2439⋅ⅈ⋅w - 0.156995925⎠⋅w + 1.114848⋅w + 0.15993609408⋅ⅈ⎠⋅⎝0.00875⋅ⅈ⋅w
──────────────────────────────────────────────────────────────────────────────
4 3 2
+ 0.022285375⋅w - 0.01757269535⋅ⅈ⋅w - 0.00438004047828375⋅w + 0.0004176433
──────────────────────────────────────────────────────────────────────────────
⎛ 5 4
⎞ ⎜ _ _
72855731⋅ⅈ⋅w + 1.27459524462934e-5⎠⋅⎝0.00875⋅ⅈ⋅w - 0.022285375⋅w - 0.0175726
──────────────────────────────────────────────────────────────────────────────
3 2
_ _ _
9535⋅ⅈ⋅w + 0.00438004047828375⋅w + 0.000417643372855731⋅ⅈ⋅w - 1.274595244629
──────────────────────────────────────────────────────────────────────────────
⎞⎞ ⎛ ⎛ 2
⎟⎟ ⎜ ⎛ 2 ⎞ ⎜ _ _
34e-5⎠⎠ + ⎝- σ⋅w⋅⎝- 1.0⋅w + 1.2439⋅ⅈ⋅w + 0.156995925⎠⋅⎝1.0⋅w + 1.2439⋅ⅈ⋅w -
──────────────────────────────────────────────────────────────────────────────
⎞
⎟ ⎛ 5 4 3
0.156995925⎠⋅⎝0.00875⋅ⅈ⋅w + 0.022285375⋅w - 0.01757269535⋅ⅈ⋅w - 0.004380040
──────────────────────────────────────────────────────────────────────────────
⎛ 5
2 ⎞ ⎜ _
47828375⋅w + 0.000417643372855731⋅ⅈ⋅w + 1.27459524462934e-5⎠⋅⎝0.00875⋅ⅈ⋅w -
──────────────────────────────────────────────────────────────────────────────
2 ⎛ 2
2 ⎛ 2 ⎞ ⎜ _
w ⋅⎝- 1.0⋅w + 1.2439⋅ⅈ⋅w + 0.156995925⎠ ⋅⎝1.0⋅w + 1.2439⋅
4 3 2
_ _ _
0.022285375⋅w - 0.01757269535⋅ⅈ⋅w + 0.00438004047828375⋅w + 0.0004176433728
──────────────────────────────────────────────────────────────────────────────
2
⎞
_ ⎟ ⎛ 5 4 3
ⅈ⋅w - 0.156995925⎠ ⋅⎝0.00875⋅ⅈ⋅w + 0.022285375⋅w - 0.01757269535⋅ⅈ⋅w - 0.00
⎞
_ ⎟ _ ⎛ 2 ⎞
55731⋅ⅈ⋅w - 1.27459524462934e-5⎠⋅w + 1.0⋅⎝- 1.0⋅w + 1.2439⋅ⅈ⋅w + 0.156995925⎠
──────────────────────────────────────────────────────────────────────────────
2 ⎛
2 ⎞ ⎜
438004047828375⋅w + 0.000417643372855731⋅ⅈ⋅w + 1.27459524462934e-5⎠ ⋅⎝0.00875
⎛ 2 ⎞
⎜ _ _ ⎟ ⎛ 4 3
⋅⎝1.0⋅w + 1.2439⋅ⅈ⋅w - 0.156995925⎠⋅⎝0.02042908⋅ⅈ⋅w + 0.0295570217936⋅w - 0
──────────────────────────────────────────────────────────────────────────────
5 4 3 2
_ _ _ _
⋅ⅈ⋅w - 0.022285375⋅w - 0.01757269535⋅ⅈ⋅w + 0.00438004047828375⋅w + 0.00041
⎛
2 ⎞ ⎜
.0085402054308382⋅ⅈ⋅w - 0.000870463645162769⋅w + 2.77251516600044e-5⋅ⅈ⎠⋅⎝0.02
──────────────────────────────────────────────────────────────────────────────
2
⎞ 2
_ ⎟ _
7643372855731⋅ⅈ⋅w - 1.27459524462934e-5⎠ ⋅w
4 3 2
_ _ _
042908⋅ⅈ⋅w - 0.0295570217936⋅w - 0.0085402054308382⋅ⅈ⋅w + 0.000870463645162
──────────────────────────────────────────────────────────────────────────────
⎞
_ ⎟ ⎛ ⎛ 2 ⎞
769⋅w + 2.77251516600044e-5⋅ⅈ⎠ + ⎝w⋅⎝- 1.0⋅w + 1.2439⋅ⅈ⋅w + 0.156995925⎠ - 1.
──────────────────────────────────────────────────────────────────────────────
⎛⎛ 2 ⎞
⎞ ⎜⎜ _ _ ⎟ _
114848⋅w + 0.15993609408⋅ⅈ⎠⋅⎝⎝1.0⋅w + 1.2439⋅ⅈ⋅w - 0.156995925⎠⋅w + 1.114848⋅
──────────────────────────────────────────────────────────────────────────────
⎞
_ ⎟ ⎛ 5 4 3
w + 0.15993609408⋅ⅈ⎠⋅⎝0.00875⋅ⅈ⋅w + 0.022285375⋅w - 0.01757269535⋅ⅈ⋅w - 0.0
──────────────────────────────────────────────────────────────────────────────
⎛
2 ⎞ ⎜
0438004047828375⋅w + 0.000417643372855731⋅ⅈ⋅w + 1.27459524462934e-5⎠⋅⎝0.00875
──────────────────────────────────────────────────────────────────────────────
5 4 3 2
_ _ _ _
⋅ⅈ⋅w - 0.022285375⋅w - 0.01757269535⋅ⅈ⋅w + 0.00438004047828375⋅w + 0.00041
──────────────────────────────────────────────────────────────────────────────
⎞⎞ ⎛
_ ⎟⎟ ⎜ ⎛ 2
7643372855731⋅ⅈ⋅w - 1.27459524462934e-5⎠⎠⋅⎝σ⋅w⋅⎝- 1.0⋅w + 1.2439⋅ⅈ⋅w + 0.1569
──────────────────────────────────────────────────────────────────────────────
⎛ 2 ⎞
⎞ ⎜ _ _ ⎟ ⎛ 5 4
95925⎠⋅⎝1.0⋅w + 1.2439⋅ⅈ⋅w - 0.156995925⎠⋅⎝0.00875⋅ⅈ⋅w + 0.022285375⋅w - 0.
──────────────────────────────────────────────────────────────────────────────
3 2
01757269535⋅ⅈ⋅w - 0.00438004047828375⋅w + 0.000417643372855731⋅ⅈ⋅w + 1.27459
──────────────────────────────────────────────────────────────────────────────
⎛ 5 4 3
⎞ ⎜ _ _ _
524462934e-5⎠⋅⎝0.00875⋅ⅈ⋅w - 0.022285375⋅w - 0.01757269535⋅ⅈ⋅w + 0.00438004
──────────────────────────────────────────────────────────────────────────────
2 ⎞
_ _ ⎟ _ ⎛ 2
047828375⋅w + 0.000417643372855731⋅ⅈ⋅w - 1.27459524462934e-5⎠⋅w - ⎝- 1.0⋅w +
──────────────────────────────────────────────────────────────────────────────
⎛ 2 ⎞
⎞ ⎜ _ _ ⎟ ⎛ 5
1.2439⋅ⅈ⋅w + 0.156995925⎠⋅⎝1.0⋅w + 1.2439⋅ⅈ⋅w - 0.156995925⎠⋅⎝- 0.0378⋅w +
──────────────────────────────────────────────────────────────────────────────
4 3 2 ⎛
0.096294933⋅ⅈ⋅w + 0.0759690566334⋅w - 0.0189629333300396⋅ⅈ⋅w + w⋅⎝0.00875⋅ⅈ
──────────────────────────────────────────────────────────────────────────────
5 4 3 2
⋅w + 0.022285375⋅w - 0.01757269535⋅ⅈ⋅w - 0.00438004047828375⋅w + 0.0004176
──────────────────────────────────────────────────────────────────────────────
⎞
43372855731⋅ⅈ⋅w + 1.27459524462934e-5⎠ - 0.00181285614381972⋅w + 5.56075496106
──────────────────────────────────────────────────────────────────────────────
⎛⎛ 5 4 3
⎞ ⎜⎜ _ _ _
657e-5⋅ⅈ⎠⋅⎝⎝0.00875⋅ⅈ⋅w - 0.022285375⋅w - 0.01757269535⋅ⅈ⋅w + 0.00438004047
──────────────────────────────────────────────────────────────────────────────
2 ⎞ 5
_ _ ⎟ _ _
828375⋅w + 0.000417643372855731⋅ⅈ⋅w - 1.27459524462934e-5⎠⋅w + 0.0378⋅w + 0.
──────────────────────────────────────────────────────────────────────────────
4 3 2
_ _ _
096294933⋅ⅈ⋅w - 0.0759690566334⋅w - 0.0189629333300396⋅ⅈ⋅w + 0.001812856143
──────────────────────────────────────────────────────────────────────────────
⎞ ⎛
_ ⎟ ⎛ 2 ⎞ ⎜
81972⋅w + 5.56075496106657e-5⋅ⅈ⎠ + ⎝5.72⋅ⅈ⋅w + 7.115108⋅w - 0.898016691⋅ⅈ⎠⋅⎝-
──────────────────────────────────────────────────────────────────────────────
2 ⎞
_ _ ⎟ ⎛ 5 4
5.72⋅ⅈ⋅w + 7.115108⋅w + 0.898016691⋅ⅈ⎠⋅⎝0.00875⋅ⅈ⋅w + 0.022285375⋅w - 0.01
──────────────────────────────────────────────────────────────────────────────
3 2
757269535⋅ⅈ⋅w - 0.00438004047828375⋅w + 0.000417643372855731⋅ⅈ⋅w + 1.2745952
──────────────────────────────────────────────────────────────────────────────
⎛ 5 4 3
⎞ ⎜ _ _ _
4462934e-5⎠⋅⎝0.00875⋅ⅈ⋅w - 0.022285375⋅w - 0.01757269535⋅ⅈ⋅w + 0.0043800404
──────────────────────────────────────────────────────────────────────────────
2 ⎞⎞⎞
_ _ ⎟⎟⎟
7828375⋅w + 0.000417643372855731⋅ⅈ⋅w - 1.27459524462934e-5⎠⎠⎠
───────────────────────────────────────────────────────────────
I am not going to work through the remainder of this problem because the algebraic form above looks very nasty. If I could reduce all of the terms, I would compute the singular values using the characteristic polynomial of $|\lambda I-M|$ as a function of $\omega$. Also, a call to the matlab routine [`sigma`](https://www.mathworks.com/help/control/ref/sigma.html) would easily solve the problem given the transfer function $I + GH$.
### Problem 4.9
The characteristic equation comes from the denominator of $G(s)$, where
\begin{eqnarray}
G(s) = \frac{KH(s)}{1+KH(s)}
\end{eqnarray}
```python
K = symbols('K')
H = 100980 / (s*(s**3+140.2*s**2+10.449*s+100980))
G = K*H / (1+K*H)
pprint(simplify(G))
```
100980⋅K
────────────────────────────────────────────────
⎛ 3 2 ⎞
100980⋅K + s⋅⎝s + 140.2⋅s + 10.449⋅s + 100980⎠
```python
dets = ch4_utils.hurwitz([1., 140.2, 10.449,100980, 100980*K])
pprint(dets)
```
[140.2, -99515.0502, -1984866919.2⋅K - 10049029769.196, -K⋅(200431861500816.0⋅
K + 1.01475102609341e+15)]
The Hurwitz criteria cannot be met for all $K$ because there will always be a negative determinant in the Hurwitz process independent of $K$.
### Problem 4.10
```python
num = -1115.*np.array([1., 0., -2228.])
#to get the denom, uncomment the next two lines
#d = expand((0.01*s+1.)*(s**2 + 3.33*s+248.))
#pprint(d)
den = np.array([.01, 1.0333, 5.81, 248.])
G = control.TransferFunction(num, den)
plt.figure()
control.nyquist_plot(G)
plt.figure()
control.bode_plot(G)
plt.show()
```
### Problem 4.11
Using the feedback law $u = K_1(a_{NC} - Z_\alpha \alpha - Z_\delta \delta) - K_2q$, the closed loop system is:
\begin{eqnarray}
A &=& \begin{pmatrix}
\frac{Z_\alpha}{V} & 1 & \frac{Z_\delta}{V} \\
M_\alpha & 0 & M_\delta \\
-\frac{K_1 Z_\alpha}{\tau} & -\frac{K_2}{\tau} & -\frac{K_1 Z_\delta}{\tau} \\
\end{pmatrix}, \\
B &=& \begin{pmatrix}
0 \\
0 \\
\frac{K_1}{\tau}
\end{pmatrix}, \\
C &=& \begin{pmatrix}
Z_\alpha & 0 & Z_\delta
\end{pmatrix}
\end{eqnarray}
```python
V = 1253.
Za = -4170.
Zd = -1115.
Ma = -248.
Md = -662.
tau = .01
A = Matrix([[-3.33, 1., -0.89], [-248., 0., -662.], [417000.*K1, -100.*K2, 111500.*K1]])
pprint(A)
B = Matrix([[0.], [0.], [100.*K1]])
C = Matrix([[-4170., 0., -1115.]])
Phi = ch3_utils.computeResolvent(A, imag=False, smplfy=False)
pprint(C*B)
H = simplify(C*Phi*B)[0]
pprint(H)
```
⎡ -3.33 1.0 -0.89 ⎤
⎢ ⎥
⎢ -248.0 0.0 -662.0 ⎥
⎢ ⎥
⎣417000.0⋅K₁ -100.0⋅K₂ 111500.0⋅K₁⎦
[-111500.0⋅K₁]
100.0⋅K₁⋅(2484020.0⋅s⋅(s + 3.33) - 4170.0⋅s⋅(662.0⋅s + 1983.74) + 6
──────────────────────────────────────────────────────────────────────────────
(248402000.0⋅K₁ + 248.0⋅s)⋅(s⋅(s + 3.33) + 248.0) - (662.0⋅s + 1983.74)⋅(41700
16036960.0)
──────────────────────
0.0⋅K₁⋅s + 24800.0⋅K₂)
```python
d = expand((24802000.*K1 + 248.*s)*(s*(s+3.33) + 248.0) - (662.*s + 1983.74)*(417000.*K1*s+24800.*K2))
pprint(d)
```
2
- 251252000.0⋅K₁⋅s - 744628920.0⋅K₁⋅s + 6150896000.0⋅K₁ - 16417600.0⋅K₂⋅s - 4
3 2
9196752.0⋅K₂ + 248.0⋅s + 825.84⋅s + 61504.0⋅s
```python
coeffs = [248., 825.84 - 251252000.*K1, 61504. - 744628920.*K1 - 16417600.*K2+61504., -49196752.*K2 + 6150896000.*K1]
dets = ch4_utils.hurwitz(coeffs)
pprint(dets)
```
⎡ 2
⎣-1013112.90322581⋅K₁ + 3.33, 3041907931318.94⋅K₁ + 67068074193.5484⋅K₁⋅K₂ -
537304444.772581⋅K₁ - 22072.0⋅K₂ + 1651.68, (24802000.0⋅K₁ - 198374.0⋅K₂)⋅(-24
802000.0⋅K₁ + 198374.0⋅K₂ + (1013112.90322581⋅K₁ - 3.33)⋅(3002535.96774194⋅K₁
⎤
+ 66200.0⋅K₂ - 496.0))⎦
```python
def f(k1, k2):
c1 = -1013112.903*k1 + 3.33 > 0
c2 = 3041907931318.94*k1**2 + 67068074193.5484*k1*k2 - 537304444.773*k1 - 22072.*k2 + 1651.68 > 0
c3 = (24802000.*k1 - 198374.*k2)*(-24802000.*k1 + 198374.*k2 + (1013112.903*k1 - 3.33)*(3002535.968*k1 + 66200.*k2 - 496.)) > 0
return int(c1 and c2 and c3)
vf = np.vectorize(f)
gs = np.linspace(-100, 500, 501)
hs = np.linspace(-100, 500, 501)
X = np.meshgrid(gs,hs)
out = vf(X[0], -X[1])
plt.imshow(out, origin='lower', extent=(-100, 500, -100, 500))
plt.xlabel('k1')
plt.ylabel('k2')
plt.title('Stability Region (in yellow)')
plt.show()
```
#### b)
It's a little tough to tell from the plot, but it looks like the system cannot be made stable when $K_2$ is 0; i.e. when the feedback uses $\alpha$ and $\delta$ alone. Only when $K_2$ is increased to around $50$ can the system be made stable. Moreover, the range of stable $K_1$ (i.e. gain margin) increases with increasing $K_2$. These two facts make it easy to justify the additional cost of a rate gyro.
|
function [fhat,xhat,fcount,retcode] = sims_csminit(fcn,x0,f0,g0,badg,H0,varargin)
% [fhat,xhat,fcount,retcode] = csminit(fcn,x0,f0,g0,badg,H0,...
% P1,P2,P3,P4,P5,P6,P7,P8)
% retcodes: 0, normal step. 5, largest step still improves too fast.
% 4,2 back and forth adjustment of stepsize didn't finish. 3, smallest
% stepsize still improves too slow. 6, no improvement found. 1, zero
% gradient.
%---------------------
% Modified 7/22/96 to omit variable-length P list, for efficiency and compilation.
% Places where the number of P's need to be altered or the code could be returned to
% its old form are marked with ARGLIST comments.
%
% Fixed 7/17/93 to use inverse-hessian instead of hessian itself in bfgs
% update.
%
% Fixed 7/19/93 to flip eigenvalues of H to get better performance when
% it's not psd.
%
%tailstr = ')';
%for i=nargin-6:-1:1
% tailstr=[ ',P' num2str(i) tailstr];
%end
%ANGLE = .03;
ANGLE = .005;
%THETA = .03;
THETA = .3; %(0<THETA<.5) THETA near .5 makes long line searches, possibly fewer iterations.
FCHANGE = 1000;
MINLAMB = 1e-9;
% fixed 7/15/94
% MINDX = .0001;
% MINDX = 1e-6;
MINDFAC = .01;
fcount=0;
lambda=1;
xhat=x0;
f=f0;
fhat=f0;
g = g0;
gnorm = norm(g);
%
if (gnorm < 1.e-12) & ~badg % put ~badg 8/4/94
retcode =1;
dxnorm=0;
% gradient convergence
else
% with badg true, we don't try to match rate of improvement to directional
% derivative. We're satisfied just to get some improvement in f.
%
%if(badg)
% dx = -g*FCHANGE/(gnorm*gnorm);
% dxnorm = norm(dx);
% if dxnorm > 1e12
% disp('Bad, small gradient problem.')
% dx = dx*FCHANGE/dxnorm;
% end
%else
% Gauss-Newton step;
%---------- Start of 7/19/93 mod ---------------
%[v d] = eig(H0);
%toc
%d=max(1e-10,abs(diag(d)));
%d=abs(diag(d));
%dx = -(v.*(ones(size(v,1),1)*d'))*(v'*g);
% toc
dx = -H0*g;
% toc
dxnorm = norm(dx);
if dxnorm > 1e12
dx = dx*FCHANGE/dxnorm;
end
dfhat = dx'*g0;
%end
%
%
if ~badg
% test for alignment of dx with gradient and fix if necessary
a = -dfhat/(gnorm*dxnorm);
if a<ANGLE
dx = dx - (ANGLE*dxnorm/gnorm+dfhat/(gnorm*gnorm))*g;
% suggested alternate code: ---------------------
dx = dx*dxnorm/norm(dx) % This keeps scale invariant to the angle correction
% ------------------------------------------------
dfhat = dx'*g;
% dxnorm = norm(dx); % this line unnecessary with modification that keeps scale invariant
end
end
%
% Have OK dx, now adjust length of step (lambda) until min and
% max improvement rate criteria are met.
done=0;
factor=3;
shrink=1;
lambdaMin=0;
lambdaMax=inf;
lambdaPeak=0;
fPeak=f0;
lambdahat=0;
while ~done
if size(x0,2)>1
dxtest=x0+dx'*lambda;
else
dxtest=x0+dx*lambda;
end
% home
f = feval(fcn,dxtest,varargin{:});
if f<fhat
fhat=f;
xhat=dxtest;
lambdahat = lambda;
end
fcount=fcount+1;
shrinkSignal = (~badg & (f0-f < max([-THETA*dfhat*lambda 0]))) | (badg & (f0-f) < 0) ;
growSignal = ~badg & ( (lambda > 0) & (f0-f > -(1-THETA)*dfhat*lambda) );
if shrinkSignal & ( (lambda>lambdaPeak) | (lambda<0) )
if (lambda>0) & ((~shrink) | (lambda/factor <= lambdaPeak))
shrink=1;
factor=factor^.6;
while lambda/factor <= lambdaPeak
factor=factor^.6;
end
%if (abs(lambda)*(factor-1)*dxnorm < MINDX) | (abs(lambda)*(factor-1) < MINLAMB)
if abs(factor-1)<MINDFAC
if abs(lambda)<4
retcode=2;
else
retcode=7;
end
done=1;
end
end
if (lambda<lambdaMax) & (lambda>lambdaPeak)
lambdaMax=lambda;
end
lambda=lambda/factor;
if abs(lambda) < MINLAMB
if (lambda > 0) & (f0 <= fhat)
% try going against gradient, which may be inaccurate
lambda = -lambda*factor^6;
else
if lambda < 0
retcode = 6;
else
retcode = 3;
end
done = 1;
end
end
elseif (growSignal & lambda>0) | (shrinkSignal & ((lambda <= lambdaPeak) & (lambda>0)))
if shrink
shrink=0;
factor = factor^.6;
%if ( abs(lambda)*(factor-1)*dxnorm< MINDX ) | ( abs(lambda)*(factor-1)< MINLAMB)
if abs(factor-1)<MINDFAC
if abs(lambda)<4
retcode=4;
else
retcode=7;
end
done=1;
end
end
if ( f<fPeak ) & (lambda>0)
fPeak=f;
lambdaPeak=lambda;
if lambdaMax<=lambdaPeak
lambdaMax=lambdaPeak*factor*factor;
end
end
lambda=lambda*factor;
if abs(lambda) > 1e20;
retcode = 5;
done =1;
end
else
done=1;
if factor < 1.2
retcode=7;
else
retcode=0;
end
end
end
end
|
module Lib where
import Data.Complex
import qualified Data.Vector as V
import qualified Data.Vector.Storable as S
import qualified Data.Vector.Generic as G
import Codec.Picture
w :: Int
w = 500
h :: Int
h = 500
sz :: Float
sz = fromIntegral st / 2.5
st :: Int
st = 30
colors :: V.Vector PixelRGB8
colors = V.fromList [
PixelRGB8 22 160 133,
PixelRGB8 192 57 43,
PixelRGB8 44 62 80,
PixelRGB8 142 68 173 ]
roots :: [Complex Float]
roots = [
10 :+ 6,
(-2) :+ 4,
(-3) :+ (-9),
6 :+ (-3) ]
rootsV :: V.Vector (Complex Float)
rootsV = V.fromList roots
coefs :: [Complex Float]
coefs = coefsFromRoots rootsV
coefs' :: [Complex Float]
coefs' = deriveCoefs coefs
podd :: (Integral a, Num p) => a -> p
podd n
| odd n = -1
| otherwise = 1
binom :: Int -> Int -> [S.Vector Int]
binom n k
| k < 0 = []
| k > n = []
| n <= 0 = [G.empty]
| k == n = [G.enumFromN 1 n]
| otherwise = binom (n-1) k ++ ((`G.snoc` n) <$> binom (n-1) (k-1))
coefsFromRoots :: (Num a, S.Storable a, G.Vector v a) => v a -> [a]
coefsFromRoots roots =
let n = G.length roots
in [ podd (n-k) * sum [ G.product (G.map (\i -> roots G.! (i-1)) p) | p <- binom n (n-k) ] | k <- [0..n] ]
deriveCoefs :: Num b => [b] -> [b]
deriveCoefs coefs = (\(i, c) -> fromIntegral i * c) <$> zip [1..] (tail coefs)
evalPoly :: Num a => [a] -> a -> a
evalPoly [] _ = 0
evalPoly (c0:cs) z = c0 + z * evalPoly cs z
newton :: (RealFloat a, Num t, Eq t) => t -> [Complex a] -> [Complex a] -> Complex a -> Complex a
newton steps coefs coefs' x0 =
if steps == 0 || magnitude fx < 0.1 then x0
else newton (steps - 1) coefs coefs' (x0 - fx/fpx)
where
fx = evalPoly coefs x0
fpx = evalPoly coefs' x0
d :: RealFloat a => Complex a -> Complex a -> a
d c1 c2 = magnitude (c2 - c1)
pix :: Int -> Float -> Int -> Int -> PixelRGB8
pix st sz i j =
let z = realToFrac i / sz :+ realToFrac j / sz
zr = newton st coefs coefs' z
(dist, idx) = minimum (zip [d zr r | r <- roots] [0..])
in colors V.! idx
|
The Persuasive Power of Campaign Advertising offers a comprehensive overview of political advertisements and their changing role in the Internet age. Travis Ridout and Michael Franz examine how these ads function in various kinds of campaigns and how voters are influenced by them.
The authors particularly study where ads are placed, asserting that television advertising will still be relevant despite the growth of advertising on the Internet. The authors also explore the recent phenomenon of outrageous ads that "go viral" on the web-which often leads to their replaying as television news stories, generating additional attention.
The Persuasive Power of Campaign Advertising features the first analysis of the impact on voters of media coverage of political advertising and shows that televised political advertising continues to have widespread influence on the choices that voters make at the ballot box.
"The Persuasive Power of Campaign Advertising combines academic knowledge and the wisdom of experience in election campaigns. It is an important contribution to the fields of political communication and campaigns. I recommend this insightful analysis to political professionals, working journalists, communications specialists, as well as students of campaigning and lobbying. It is an excellent book for university students studying campaigns, political communication, and public relations."
"Travis Ridout and Michael Franz have developed a persuasive message. Using an impressive array of data, they show when ads matter and when they do not matter. As a result, The Persuasive Power of Campaign Advertising is a book all serious scholars of political communication should have on their shelves."
"The book offers a comprehensive overview of political advertising and its changing role in the Internet age, with an emphasis on television advertising. Marshalling an impressive amount of empirical evidence from recent elections, the authors examine precisely how ads function and influence voters in a variety of campaign environments.... The Persuasive Power of Campaign Advertising makes a compelling case that exposure to campaign advertising exerts widespread influence on the choices voters make at the ballot box. The authors' incisive analyses and interpretations leave little doubt that campaign ads help shape voter preferences.... This timely and well-written book is a fine example of social science that is highly relevant and readily applicable to practical politics.... A must-read for those involved in professional politics, especially political media and advertising specialists."
"For anyone interested in the role of ads in campaigns, this is a very valuable book. Summing Up: Recommended."
"Ridout and Franz have done scholars of political communication, behavior, and campaigns a great service in writing this book."
"The Persuasive Power of Campaign Advertising contributes significant insights into campaign advertising for students of political science, journalists, and anyone involved or interested in political campaigns. As a major source of information on candidates, television advertising has a strong effect on American democracy, and this work gives a basis for understanding how it works."
"(T)he book succeeds in establishing the ubiquity of ad impact... The authors have done a masterful job in laying out issues."
"(A) detailed look at the complex effects of political advertising. It is a comprehensive work that analyzes the effects of campaign advertising in Senate and Presidential races across several elections between 2000 and 2008.... The Persuasive Power of Campaign Advertising makes a strong case that political advertising is related to vote choice and candidate favorability. Even more, the book demonstrates that campaign context, advertising content, and receiver characteristics play a role in explaining when political advertising works.... (T)his book is a persuasive cautionary note against generalizing advertising effects found in one election to another and a productive starting point for those embarking on the difficult task of explaining why variability occurs."
Travis N. Ridout is Associate Professor of Political Science at Washington State University and a coauthor (with Michael M. Franz, Paul B. Freedman, and Kenneth M. Goldstein) of Campaign Advertising and American Democracy (Temple).
Michael M. Franz is Associate Professor of Government and Legal Studies at Bowdoin College, author of Choices and Changes: Interest Groups in the Electoral Process, and a coauthor (with Paul B. Freedman, Kenneth M. Goldstein, and Travis N. Ridout) of Campaign Advertising and American Democracy (Temple). |
#include <boost/asio/stream_socket_service.hpp>
|
```python
from sympy import *
init_printing()
x, m, b=symbols('x, m, b')
y=Function('y')
lin=latex(Eq(y(x), m*x+b))
lin
```
'y{\\left (x \\right )} = b + m x'
\begin{theorem}
fdjfgkj
\end{theorem}
markdown cell
{{lin}}
${{lin}}$
\begin{equation}
x {{lin}}
\end{equation}
The main paper on IPython is definitively \cite{PER-GRA:2007}. Other interesting references are certainly \cite{mckinney2012python, rossant2013learning}. Interestingly, a presentation of the IPython notebook has also be published recently in Nature \cite{shen2014interactive}.
# References
[<a id="cit-PER-GRA:2007" href="#call-PER-GRA:2007">1</a>] P\'erez Fernando and Granger Brian E., ``_IPython: a System for Interactive Scientific Computing_'', Computing in Science and Engineering, vol. 9, number 3, pp. 21--29, May 2007. [online](http://ipython.org)
[<a id="cit-mckinney2012python" href="#call-mckinney2012python">2</a>] Wes McKinney, ``_Python for data analysis: Data wrangling with Pandas, NumPy, and IPython_'', 2012.
[<a id="cit-rossant2013learning" href="#call-rossant2013learning">3</a>] Cyrille Rossant, ``_Learning IPython for interactive computing and data visualization_'', 2013.
[<a id="cit-shen2014interactive" href="#call-shen2014interactive">4</a>] Shen Helen, ``_Interactive notebooks: Sharing the code_'', Nature, vol. 515, number 7525, pp. 151--152, 2014.
|
module Markov where
import MusicData
import Utility
-- import Numeric.LinearAlgebra (Matrix, R, (><))
import Data.Map (Map)
import qualified Data.Map as Map (empty, insertWith, lookup)
import qualified Data.Map.Merge.Strict as Map' (merge, preserveMissing,
zipWithMatched)
import qualified Data.Map.Strict as Strict (Map)
import qualified Data.Map.Strict as Map' (difference, elems, empty,
fromList, insert, insertWith,
keys, lookup, member, toList)
import qualified Data.Maybe as Maybe (fromMaybe)
import qualified Data.Set as Set (fromList, size, toList)
-- |representation of bigrams and containing deterministic cadence sequences
type Bigram = (Cadence, Cadence)
-- |representation of counts for each trigram
type TransitionCounts = Strict.Map Bigram Double
-- -- |representations of the Markov transition matrix
-- type TransitionMatrix = Matrix R
-- |representation of markov transition matrix as key-value pairs
type MarkovMap = Map Cadence [(Cadence, Double)]
-- |mapping from list of events into list of existing preceding bigrams
bigrams :: [a] -> [(a, a)]
bigrams (x:xs)
| length (x:xs) < 2 = []
| otherwise = bigram (x:xs) : bigrams xs
where bigram (x:y:ys) = (\a b -> (a, b)) x y
-- |lifted 'shortcut' `toCadence` which operates on a list of integer lists
toCadences :: (Integral a, Num a) => [[a]] -> [Cadence]
toCadences xs = toCadence <$> bigrams (flatTriad <$> xs)
-- |mapping from input data into all theoretically possible bigrams
pairs :: [Cadence] -> [Bigram]
pairs xs =
let cs = unique xs
in [ (x, y) | x <- cs, y <- cs ]
-- |mapping from input data into all possible trigrams with counts of zero
zeroCounts :: [Cadence] -> TransitionCounts
zeroCounts xs =
let mInsert acc key = Map'.insert key 0 acc
in foldl mInsert Map'.empty $ pairs xs
-- |mapping from input data to counts of all occurring transitions
cadenceCounts :: [Cadence] -> TransitionCounts
cadenceCounts xs =
let mInsert acc key = Map'.insertWith (+) key 1 acc
in foldl mInsert Map'.empty $ bigrams xs
-- |mapping from input to counts of cadences, including 'stationary' movements
transitionCounts :: [Cadence] -> TransitionCounts
transitionCounts xs = mergeMaps (foldl mInsert cadences (Map'.keys diff)) zeros
where diff = Map'.difference zeros cadences
zeros = zeroCounts xs
cadences = cadenceCounts xs
mergeMaps m1 m2 = Map'.merge Map'.preserveMissing Map'.preserveMissing
(Map'.zipWithMatched (\k x y -> x)) m1 m2
keys k = [ (fst k, nxt) | nxt <- unique xs ]
newKey k = (fst k, fst k)
member k = sequenceA [ f ks | f <- [Map'.member], ks <- keys k ] (cadences)
mInsert acc key
| all (\x -> x == False) (member key) == True =
Map'.insert (newKey key) 1 acc
| otherwise =
Map'.insert key (Maybe.fromMaybe 0 $ Map'.lookup key $ cadences) acc
-- |helper function for probabilityList which generates probability sublists
transitionProbs :: [Cadence] -> [Double] -> [[Double]]
transitionProbs _ [] = []
transitionProbs xs ys =
let j = Set.size $ Set.fromList xs
i = j^2
xx = take j ys
recurse = drop j ys
in fmap (/ sum xx) xx : transitionProbs xs recurse
-- |mapping from list of Cadences into list of transitions with probabilities
probabilityMap :: [Cadence] -> Map Bigram Double
probabilityMap xs = Map'.fromList $ zip (Map'.keys $ zeroCounts xs) $ concat
. transitionProbs xs $ Map'.elems $ transitionCounts xs
-- -- |mapping from list of cadences into transition matrix
-- transitionMatrix :: [Cadence] -> TransitionMatrix
-- transitionMatrix xs =
-- let n = Set.size $ Set.fromList xs
-- in (n><n) $ Map'.elems $ probabilityMap xs :: Matrix R
-- |mapping from list of cadences into map with possible next probabilities
markovMap :: [Cadence] -> MarkovMap
markovMap xs = foldl mInsert Map.empty $ pairs xs
where mInsert acc key = Map.insertWith (++) (fst key) (pList key) acc
pList key = [(snd key, Maybe.fromMaybe 0 $ Map.lookup key pMap)]
pMap = probabilityMap xs |
/* gsl_histogram_calloc_range.c
* Copyright (C) 2000 Simone Piccardi
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this library; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/***************************************************************
*
* File gsl_histogram_calloc_range.c:
* Routine to create a variable binning histogram providing
* an input range vector. Need GSL library and header.
* Do range check and allocate the histogram data.
*
* Author: S. Piccardi
* Jan. 2000
*
***************************************************************/
#include <config.h>
#include <stdlib.h>
#include <gsl/gsl_errno.h>
#include <gsl/gsl_histogram.h>
gsl_histogram *
gsl_histogram_calloc_range (size_t n, double *range)
{
size_t i;
gsl_histogram *h;
/* check arguments */
if (n == 0)
{
GSL_ERROR_VAL ("histogram length n must be positive integer",
GSL_EDOM, 0);
}
/* check ranges */
for (i = 0; i < n; i++)
{
if (range[i] >= range[i + 1])
{
GSL_ERROR_VAL ("histogram bin extremes must be "
"in increasing order", GSL_EDOM, 0);
}
}
/* Allocate histogram */
h = (gsl_histogram *) malloc (sizeof (gsl_histogram));
if (h == 0)
{
GSL_ERROR_VAL ("failed to allocate space for histogram struct",
GSL_ENOMEM, 0);
}
h->range = (double *) malloc ((n + 1) * sizeof (double));
if (h->range == 0)
{
/* exception in constructor, avoid memory leak */
free (h);
GSL_ERROR_VAL ("failed to allocate space for histogram ranges",
GSL_ENOMEM, 0);
}
h->bin = (double *) malloc (n * sizeof (double));
if (h->bin == 0)
{
/* exception in constructor, avoid memory leak */
free (h->range);
free (h);
GSL_ERROR_VAL ("failed to allocate space for histogram bins",
GSL_ENOMEM, 0);
}
/* initialize ranges */
for (i = 0; i <= n; i++)
{
h->range[i] = range[i];
}
/* clear contents */
for (i = 0; i < n; i++)
{
h->bin[i] = 0;
}
h->n = n;
return h;
}
|
/*
VEGAS_TABLE Version 1. 5/27/16
report issues to Tim Waters,
[email protected]
**** Basic code description:
Input: An ascii table with entries
on a uniform grid of (x,y) values
and a (x,y) value for which an
interpolated value is sought
Output: The interpolated value
corresponding to that (x,y) pair
**** Detailed description:
>> Step 1. Instantiation:
The class constructor calls a method
to parse an ascii text file which contains
an (N+1)x(M+1) table:
The first row contains the M values of x.
The first column contains the N values of y.
The remainder of the table contains the NxM
values of z.
Here is a simple example of an input file:
4 1e0 1e1 1e2 1e3
1e4 1e-20 2e-20 3e-20 4e-20
1e5 5e-20 6e-20 7e-20 8e-20
1e6 1e-19 2e-19 3e-19 4e-19
1e7 5e-19 6e-19 7e-19 8e-19
1e8 1e-18 2e-18 3e-18 4e-18
In this example, N = 5 and M = 4.
M must be specified as the first element.
The remaining elements in row 1 are values
of the photoionization parameter, xi.
The left column below M are the temperature
values. The remaining elements are net heating
rates that are functions of T and xi.
By default, the input file is limited to size
1000 x 1000. To increase this size limit,
increase Nmax in the default constructor.
>> Step 2. Table initialization:
The file parsed above is read into a
table, but this is not the actual table
used. The actual table is made upon calling
an initialization function, and optionally
array arguments can be passed specifying the
min/max of both T and xi. This may be useful
when combining multiple overlapping tables
and will allow for flexibility if tables
generated from non-uniform grids are needed,
although a table-search algorithm has not
been implemented. The input table is then
reduced to a size that contains only the
values between these limits.
>> Step 3. Invocation:
The code returns the interpolated value
of whatever rate was in the input file.
The code uses the bicubic and bilinear
interpolation algorithms from the GSL
library. By default, bicubic is used.
Importantly, the code assumes that a
nearly uniform grid was produced via
x = j*(xmax-xmin)/(M-1) + xmin
y = i*(ymax-ymin)/(N-1) + ymin
This allows avoiding an expensive
table search for the lookup operation.
For a uniform grid generated from the
above formulas, the table indices
for a desired value of (x,y) are
i_x = (M-1)*(x - xmin)/(xmax - xmin)
i_y = (N-1)*(y - ymin)/(ymax - ymin)
Since xstar may slightly modify the
grid locations (x,y) originally
specified, our lookup operation
consists of first guessing that the
lookup value indices are (i_x,i_y)
and then allowing for some wiggle
room by searching 1 index left, right,
above, and below upon finding that our
guess was incorrect. The code will
terminate if this search fails.
**** Example usages:
1. Bicubic interpolation on a table that was
generated from a log-log spaced grid
VEGAS_LUT hc_xstar("input_table.tab","bicubic");
hc_xstar.initialize_table();
hc_rate = hc_xstar.get_rate(T,xi);
1a. If lin-lin spacing was used instead, use
VEGAS_LUT hc_xstar("input_table.tab","bicubic",false,false);
1c. To apply a bounding box, use instead
const double T_bbox[2] = {1e5,1e7};
const double xi_bbox[2] = {1e1,5e2};
hc_xstar.initialize_table(T_bbox,xi_bbox);
*/
#ifndef H_VEGAS_TABLE
#define H_VEGAS_TABLE
#include <iostream> // std::cout, std::endl
#include <iomanip> // std::setprecision
#include <fstream> // std::ifstream
#include <string>
#include <vector>
#include <time.h>
#include <cmath>
#include <gsl/gsl_interp2d.h>
#include <gsl/gsl_spline2d.h>
class VEGAS_LUT
{
public:
//default constructor will parse the input file
//log-log spacing for (T,xi) is assumed by default
VEGAS_LUT( const std::string a="VEGAS_table.tab",
const std::string b="bicubic",
bool c=true, bool d=true):
filename(a),gsl_routine(b),logT(c),logxi(d)
{
// allocate a (Nmax x Nmax) table for reading in the file
int Nmax = 1000;
input_table = new double*[Nmax];
for (int i = 0; i < Nmax; i++)
input_table[i] = new double[Nmax];
xivals = new double[Nmax];
Tvals = new double[Nmax];
gsl_init();
parse_file();
}
~VEGAS_LUT() { free_memory(); }
//user must call the initialize_table method
void initialize_table(const double*, const double*);
//user function that returns an interpolated value
double get_rate(double, double);
// actual table (T,xi) bounds:
// hydro code needs to know Temp bounds when doing implicit root finding
double y_min,y_max,x_min,x_max;
private:
bool logT,logxi; // determines log gridding
double* xlim; // input table xi bounds
double* ylim; // input table T bounds
const double* T_bounds; // user specified T bounds
const double* xi_bounds; // user specified xi bounds
double** input_table; // pointer to input table
double** table; // pointer to actual table
double* xivals; // xi-vals from input table
double* Tvals; // T-vals from input table
double* xvals; // xi-vals for actual table
double* yvals; // T-vals for actual table
unsigned int NT, // # rows of input table (# of Ts)
Nxi, // # cols of input table (# of xis)
N, // # rows of actual table (# of Ts)
M, // # cols of actual table (# of xis)
i_T, // T-offset between the input and actual tables
i_xi; // xi-offset between the input and actual tables
typedef std::vector<double> array_t; //dynamic array data type
std::string filename, gsl_routine; //input strings
//member functions for handling the input file
void parse_file();
bool read_value(std::ifstream&, const int, array_t&);
void free_input_table();
void free_memory();
//other member functions
void reduce_input_table();
void make_table(unsigned int, unsigned int);
void determine_box_indices(double, double, unsigned int&, unsigned int&);
//GSL variables and member functions
bool use_gsl_bicubic;
const gsl_interp2d_type *gsl_scheme;
typedef gsl_spline2d* spline_t;
spline_t **spline_table;
gsl_interp_accel *xacc, *yacc;
size_t nx,ny;
void gsl_init();
void make_spline_table();
};
void VEGAS_LUT::gsl_init()
{
if (gsl_routine == "bilinear")
{
gsl_scheme = gsl_interp2d_bilinear;
use_gsl_bicubic = false;
nx = 2;
ny = 2;
}
else if (gsl_routine == "bicubic")
{
gsl_scheme = gsl_interp2d_bicubic;
use_gsl_bicubic = true;
nx = 4;
ny = 4;
}
else
{
std::cout << "ERROR: GSL interpolation scheme " << gsl_routine
<< " not recognized. Choose from:\n"
<< "bilinear\n"
<< "bicubic\n"
<< std::endl;
exit(0);
}
}
bool VEGAS_LUT::read_value(std::ifstream& fin, const int j, array_t& input_data)
{
double value;
if (fin >> value)
{
input_data[j]=value;
return true;
}
else return false;
}
void VEGAS_LUT::parse_file()
{
std::ifstream fin(filename.c_str());
if (!fin)
{
std::cout << "File " << filename << " does not exist!" << std::endl;
exit(0);
}
bool file_is_open = false;
bool end_of_line = false;
// read in first value: Nxi
array_t val1(1);
file_is_open = read_value(fin,0,val1);
Nxi = (unsigned int) val1[0];
// read in remainder of first row: all xi-values
array_t this_row(Nxi+1);
int i=1,j;
while(!end_of_line)
{
file_is_open = read_value(fin,i,this_row);
xivals[i-1] = this_row[i];
if( i!=0 && i%Nxi == 0 )
end_of_line = true;
else i++; //next row element
}
// read in 1st element of 2nd row: the 1st T value
file_is_open = read_value(fin,0,this_row);
Tvals[0] = this_row[0];
// read in the rest of the file
end_of_line = false;
i = 1; j=0;
while(file_is_open)
{
file_is_open = read_value(fin,i,this_row);
if( i!=0 && i%Nxi == 0 )
{
end_of_line = true;
i=0; // j++; //reset column, increment row
}
else i++; //next row element
if(end_of_line)
{
// 1st element of this_row is T
Tvals[j] = this_row[0];
// remaining elements are rates
for (int ii=0; ii<Nxi; ii++)
//std::cout << "this_row[" << ii << "] = " << this_row[ii] << std::endl;
input_table[j][ii] = this_row[ii+1];
j++;
}
end_of_line = false;
}
NT = j; //number of temperature values
fin.close();
if (Nxi < 2 || NT < 2)
{
std::cout << "ERROR: Table dimensions must be at least 2x2!" << std::endl;
exit(0);
}
}
void VEGAS_LUT::make_table(unsigned int i0, unsigned int j0)
{
// allocate memory
table = new double*[N];
for (int j = 0; j < N; j++)
table[j] = new double[M];
// assign values
for (int j = 0; j < N; j++)
for (int i = 0; i < M; i++)
table[j][i] = input_table[j0+j][i0+i];
}
void VEGAS_LUT::make_spline_table()
{
// allocate memory for spline_table
spline_table = new spline_t*[N];
for (int j = 0; j < N; j++)
spline_table[j] = new spline_t[M];
/* Each location in our table is to be thought of as a 4-corner box.
* The corners have locations (xb[], yb[]) and function values (zb[])
* and this data is collected ahead of time so that GSL only needs to
* access it to carry out the interpolation. For the case of bicubic
* interpolation, GSL will also take numerical derivatives and store
* all that data, so a lot of expense is saved by collecting this at
* initialization. */
double *xb = new double[nx];
double *yb = new double[ny];
double *zb = new double[nx*ny];
/* Associations:
T = y(j), with j from (0,N-1)
xi = x(i), with i from (0,M-1)
*/
for (int j=1; j < (N-2); j++)
for (int i=1; i < (M-2); i++)
{
// allocate storage for interpolation data
gsl_spline2d *spline = gsl_spline2d_alloc(gsl_scheme, nx, ny);
// evaluate position data (xb[],yb[]) and store function values (zb[]) to spline
for (int jb=0; jb < nx; jb++)
for (int ib=0; ib < ny; ib++)
{
if (use_gsl_bicubic)
{
xb[ib] = xvals[i+ib-1];
yb[jb] = yvals[j+jb-1];
gsl_spline2d_set(spline, zb, ib, jb, table[j+jb-1][i+ib-1]);
}
else
{
xb[ib] = xvals[i+ib];
yb[jb] = yvals[j+jb];
gsl_spline2d_set(spline, zb, ib, jb, table[j+jb][i+ib]);
}
}
/* add position data
* In the case of bicubic, this also internally calculates
* derivative data and stores that as well. */
gsl_spline2d_init(spline, xb, yb, zb, nx, ny);
// store this interpolation data in our spline table
spline_table[j][i] = spline;
} //end loop
/* The above loop excludes all boxes on table edges, which always require
* doing bilinear interpolation, so add that data now */
int j_edges[2] = {0, N-2};
int i_edges[2] = {0, M-2};
int i,j;
for (int jj=0; jj < 2; jj++)
for (int i=0; i < (M-1); i++)
{
j = j_edges[jj];
gsl_spline2d *spline = gsl_spline2d_alloc(gsl_interp2d_bilinear, 2, 2);
// evaluate position data (xb[],yb[]) and store function values (zb[]) to spline
for (int jb=0; jb < 2; jb++)
for (int ib=0; ib < 2; ib++)
{
xb[ib] = xvals[i+ib];
yb[jb] = yvals[j+jb];
gsl_spline2d_set(spline, zb, ib, jb, table[j+jb][i+ib]);
}
/* add position data
* In the case of bicubic, this also internally calculates
* derivative data and stores that as well. */
gsl_spline2d_init(spline, xb, yb, zb, 2, 2);
// store this interpolation data in our spline table
//std::cout<< "(i,j) = (" << i << ", " << j << ")" << std::endl;
spline_table[j][i] = spline;
}
for (int j=0; j < (N-1); j++)
for (int ii=0; ii < 2; ii++)
{
i = i_edges[ii];
gsl_spline2d *spline = gsl_spline2d_alloc(gsl_interp2d_bilinear, 2, 2);
// evaluate position data (xb[],yb[]) and store function values (zb[]) to spline
for (int jb=0; jb < 2; jb++)
for (int ib=0; ib < 2; ib++)
{
xb[ib] = xvals[i+ib];
yb[jb] = yvals[j+jb];
gsl_spline2d_set(spline, zb, ib, jb, table[j+jb][i+ib]);
}
/* add position data
* In the case of bicubic, this also internally calculates
* derivative data and stores that as well. */
gsl_spline2d_init(spline, xb, yb, zb, 2, 2);
// store this interpolation data in our spline table
//std::cout<< "(i,j) = (" << i << ", " << j << ")" << std::endl;
spline_table[j][i] = spline;
}
// These I think are used when a lookup table must be searched
// but since we know the lookup locations there is no point
xacc = NULL; //gsl_interp_accel_alloc();
yacc = NULL; //gsl_interp_accel_alloc();
}
void VEGAS_LUT::free_memory()
{
for (int i = 0; i < N; i++)
{
delete[] table[i];
delete[] spline_table[i];
}
//gsl_interp_accel_free(xacc);
//gsl_interp_accel_free(yacc);
}
void VEGAS_LUT::free_input_table()
{
for (int i = 0; i < N; i++)
delete[] input_table[i];
}
void VEGAS_LUT::reduce_input_table()
{
double Tmin,Tmax,ximin,ximax;
Tmin = T_bounds[0];
Tmax = T_bounds[1];
ximin = xi_bounds[0];
ximax = xi_bounds[1];
if (Tmin < Tvals[0] || Tmax > Tvals[NT-1])
{
std::cout << "ERROR: Tmin or Tmax is not contained within file "
<< filename << "!"
<< std::endl;
exit(0);
}
if (ximin < xivals[0] || ximax > xivals[Nxi-1])
{
std::cout << "ERROR: x_min or x_max is not contained within file "
<< filename << "!"
<< std::endl;
exit(0);
}
// find the indices of the table corresponding to input min/max values
int i = 0;
while (Tvals[i] < Tmax && i < NT)
i++;
y_max = Tvals[i]; //so y_max is slightly greater than Tmax
int ii = 0;
while (Tvals[ii] <= Tmin && ii < i)
ii++;
y_min = Tvals[ii-1]; //so y_min is slightly less than Tmin
// assign private variables
i_T = ii-1; // start position in input table
N = i - ii + 2; // # of T-vals in desired table
i = 0;
while (xivals[i] < ximax && i < Nxi)
i++;
x_max = xivals[i]; //so x_max is slightly greater than ximax
ii = 0;
while (xivals[ii] <= ximin && ii < i)
ii++;
x_min = xivals[ii-1]; //so x_min is slightly less than ximin
// assign private variables
i_xi = ii-1; // start position in input table
M = i - ii + 2; // # of xi-vals in desired table
}
void VEGAS_LUT::initialize_table(const double * T_bbox = NULL, const double * xi_bbox = NULL)
{
int i0,j0;
if (T_bbox != NULL || xi_bbox != NULL)
{
T_bounds = T_bbox;
xi_bounds = xi_bbox;
reduce_input_table(); // this sets i_xi, i_T, N, M, y_min, Tmax, x_min, x_max
i0 = i_xi;
j0 = i_T;
}
else // the actual table will be the same size as the input table
{
y_min = Tvals[0];
y_max = Tvals[NT-1];
x_min = xivals[0];
x_max = xivals[Nxi-1];
N = NT;
M = Nxi;
i0 = 0;
j0 = 0;
}
// store the values of xi and T that will be used
// they way they will be used (i.e. log or lin)
xvals = new double[M];
yvals = new double[N];
double x,y;
for (int i=0; i<M; i++)
{
if(logxi) x = log10(xivals[i0+i]);
else x = xivals[i0+i];
xvals[i] = x;
}
for (int j=0; j<N; j++)
{
if (logT) y = log10(Tvals[j0+j]);
else y = Tvals[j0+j];
yvals[j] = y;
}
// generate the actual table of H/C rates
make_table(i0,j0);
// alloc storage for max/min vals of T,xi
xlim = new double[2]; // xi limits
ylim = new double[2]; // T limits
// set any log gridding: use limits of input table
if (logT) {
ylim[0] = log10(Tvals[0]);
ylim[1] = log10(Tvals[NT-1]); }
else {
ylim[0] = Tvals[0];
ylim[1] = Tvals[NT-1]; }
if (logxi) {
xlim[0] = log10(xivals[0]);
xlim[1] = log10(xivals[Nxi-1]); }
else {
xlim[0] = xivals[0];
xlim[1] = xivals[Nxi-1]; }
// generate the spline table for GSL
make_spline_table();
// delete input_table
free_input_table();
}
void VEGAS_LUT::determine_box_indices(double x, double y, unsigned int &i_x, unsigned int &i_y)
{
unsigned int i,j;
bool correct_guess = false;
/* First guess the indices assuming the input table was
* generated from a uniform grid: the actual xi and T values
* that xstar produces data for may be slightly different
* as xstar's stopping criteria algorithms may slightly adjust
* the input values. On rare occasions this guess can be off
* by 1 index, so here we allow for that. */
i = (Nxi-1) * (x - xlim[0]) / (xlim[1] - xlim[0]) - i_xi;
j = (NT-1) * (y - ylim[0]) / (ylim[1] - ylim[0]) - i_T ;
/* now we check our guess in x and if needed, adjust one box left/right */
if (x > xvals[i] && x < xvals[i+1]) //then we guessed right
{
i_x = i;
correct_guess = true;
}
if (!correct_guess)
{
if (i==0 || i==(M-1))
{
if (i==0 && x < xvals[2])
i_x = 1;
else if (i==(M-1) && x > xvals[M-2])
i_x = M-2;
}
else if (x < xvals[i]) //then move to the left 1
i_x = i-1;
else if (x > xvals[i+1]) //then move to the right 1
i_x = i+1;
else // we give up
{
std::cout << "ERROR: search for the table xi-index failed!"
<< "\nDouble check inputs or try debugging "
<< "function determine_box_indices()"
<< std::endl;
exit(0);
}
}
/* now we check our guess in y and if needed, adjust one box up/down */
correct_guess = false; // reset
if (y > yvals[j] && y < yvals[j+1]) //then we guessed right
{
i_y = j;
correct_guess = true;
}
if (!correct_guess)
{
if (j==0 || j==(N-1))
{
if (j==0 && y < yvals[2])
i_y = 1;
else if (j==(N-1) && y > yvals[N-2])
i_y = N-2;
}
else if (y < yvals[j]) //then move down 1
i_y = j-1;
else if (y > yvals[j+1]) //then move up 1
i_y = j+1;
else // we give up
{
std::cout << "ERROR: search for the table T-index failed!"
<< "\nDouble check inputs or try debugging "
<< "function determine_box_indices()"
<< std::endl;
exit(0);
}
}
}
double VEGAS_LUT::get_rate(double y_val, double x_val)
{
unsigned int i_x,i_y;
double x,y;
/* terminate program if values exceed bounding box
if (y_val < y_min || y_val > y_max || x_val < x_min || x_val > x_max)
{
std::cout
<< "\nFATAL ERROR: (y_val,x_val) = (" << y_val << ", " << x_val << ") "
<< "lies outside of table's bounding box!\n"
<< ">> Bounding Box:\n"
<< "(y_min,y_max) = (" << y_min << ", " << y_max << ")\n"
<< "(x_min,x_max) = (" << x_min << ", " << x_max << ")\n"
<< std::endl;
exit(0);
}
*/
/* use boundary rates for values outside bounding box */
double eps = 1e-10;
if (y_val < y_min) y_val = y_min*(1. + eps);
if (y_val > y_max) y_val = y_max*(1. - eps);
if (x_val < x_min) x_val = x_min*(1. + eps);
if (x_val > x_max) x_val = x_max*(1. - eps);
// apply any log scaling
if (logT) y = log10(y_val);
else y = y_val;
if (logxi) x = log10(x_val);
else x = x_val;
determine_box_indices(x,y,i_x,i_y);
return gsl_spline2d_eval(spline_table[i_y][i_x], x, y, xacc, yacc);
}
#endif //H_VEGAS_TABLE
|
[STATEMENT]
lemma SubstTermP_subst [simp]:
"(SubstTermP v i t u)(j::=w) = SubstTermP (subst j w v) (subst j w i) (subst j w t) (subst j w u)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (SubstTermP v i t u)(j::=w) = SubstTermP (subst j w v) (subst j w i) (subst j w t) (subst j w u)
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. (SubstTermP v i t u)(j::=w) = SubstTermP (subst j w v) (subst j w i) (subst j w t) (subst j w u)
[PROOF STEP]
obtain s::name and k::name
where "atom s \<sharp> (v,i,t,u,w,j,k)" "atom k \<sharp> (v,i,t,u,w,j)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>s k. \<lbrakk>atom s \<sharp> (v, i, t, u, w, j, k); atom k \<sharp> (v, i, t, u, w, j)\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by (metis obtain_fresh)
[PROOF STATE]
proof (state)
this:
atom s \<sharp> (v, i, t, u, w, j, k)
atom k \<sharp> (v, i, t, u, w, j)
goal (1 subgoal):
1. (SubstTermP v i t u)(j::=w) = SubstTermP (subst j w v) (subst j w i) (subst j w t) (subst j w u)
[PROOF STEP]
thus ?thesis
[PROOF STATE]
proof (prove)
using this:
atom s \<sharp> (v, i, t, u, w, j, k)
atom k \<sharp> (v, i, t, u, w, j)
goal (1 subgoal):
1. (SubstTermP v i t u)(j::=w) = SubstTermP (subst j w v) (subst j w i) (subst j w t) (subst j w u)
[PROOF STEP]
by (simp add: SubstTermP.simps [of s _ _ _ _ k])
[PROOF STATE]
proof (state)
this:
(SubstTermP v i t u)(j::=w) = SubstTermP (subst j w v) (subst j w i) (subst j w t) (subst j w u)
goal:
No subgoals!
[PROOF STEP]
qed |
(* Title: Map Function on Two Parallel Lists
Author: Anders Schlichtkrull <andschl at dtu.dk>, 2017
Maintainer: Anders Schlichtkrull <andschl at dtu.dk>
*)
section \<open>Map Function on Two Parallel Lists\<close>
theory Map2
imports Main
begin
text \<open>
This theory defines a map function that applies a (curried) binary function elementwise to two
parallel lists.
The definition is taken from @{url "https://www.isa-afp.org/browser_info/current/AFP/Jinja/Listn.html"}.
\<close>
abbreviation map2 :: "('a \<Rightarrow> 'b \<Rightarrow> 'c) \<Rightarrow> 'a list \<Rightarrow> 'b list \<Rightarrow> 'c list" where
"map2 f xs ys \<equiv> map (case_prod f) (zip xs ys)"
lemma map2_empty_iff[simp]: "map2 f xs ys = [] \<longleftrightarrow> xs = [] \<or> ys = []"
by (metis Nil_is_map_conv list.exhaust list.simps(3) zip.simps(1) zip_Cons_Cons zip_Nil)
lemma image_map2: "length t = length s \<Longrightarrow> g ` set (map2 f t s) = set (map2 (\<lambda>a b. g (f a b)) t s)"
by auto
lemma map2_tl: "length t = length s \<Longrightarrow> map2 f (tl t) (tl s) = tl (map2 f t s)"
by (metis (no_types, lifting) hd_Cons_tl list.sel(3) map2_empty_iff map_tl tl_Nil zip_Cons_Cons)
lemma map_zip_assoc:
"map f (zip (zip xs ys) zs) = map (\<lambda>(x, y, z). f ((x, y), z)) (zip xs (zip ys zs))"
by (induct zs arbitrary: xs ys) (auto simp add: zip.simps(2) split: list.splits)
lemma set_map2_ex:
assumes "length t = length s"
shows "set (map2 f s t) = {x. \<exists>i < length t. x = f (s ! i) (t ! i)}"
proof (rule; rule)
fix x
assume "x \<in> set (map2 f s t)"
then obtain i where i_p: "i < length (map2 f s t) \<and> x = map2 f s t ! i"
by (metis in_set_conv_nth)
from i_p have "i < length t"
by auto
moreover from this i_p have "x = f (s ! i) (t ! i)"
using assms by auto
ultimately show "x \<in> {x. \<exists>i < length t. x = f (s ! i) (t ! i)}"
using assms by auto
next
fix x
assume "x \<in> {x. \<exists>i < length t. x = f (s ! i) (t ! i)}"
then obtain i where i_p: "i < length t \<and> x = f (s ! i) (t ! i)"
by auto
then have "i < length (map2 f s t)"
using assms by auto
moreover from i_p have "x = map2 f s t ! i"
using assms by auto
ultimately show "x \<in> set (map2 f s t)"
by (metis in_set_conv_nth)
qed
end
|
Require Import AutoSep Malloc.
(* Two definitions based on hiding functions inside a new datatype, to avoid confusing our reification tactics *)
Inductive fn := Fn (f : W -> W).
Definition app (f : fn) (x : W) := let (f) := f in f x.
(* What does it mean for a program counter to implement a mathematical function? *)
Definition goodMemo (f : fn) (pc : W) : HProp := fun s m =>
(ExX : settings * state, Cptr pc #0
/\ ExX : settings * smem, #0 (s, m)
/\ Al st : settings * state, AlX : settings * smem, AlX : settings * state,
(Ex vs, Cptr st#Rp #0
/\ ![ ^[locals ("rp" :: "x" :: nil) vs 0 st#Sp] * #1 * #2 ] st
/\ Al st' : state,
([| Regs st' Sp = st#Sp /\ Regs st' Rv = app f (sel vs "x") |]
/\ Ex vs', ![ ^[locals ("rp" :: "x" :: nil) vs' 0 st#Sp] * #1 * #2 ] (fst st, st'))
---> #0 (fst st, st'))
---> #3 st)%PropX.
Module Type MEMO.
Parameter memo : fn -> W -> HProp.
(* Arguments: mathematical function that is implemented, and pointer to private data *)
Axiom memo_fwd : forall f p,
memo f p ===> Ex pc, Ex lastIn, Ex lastOut, (p ==*> pc, lastIn, lastOut) * [| lastOut = app f lastIn |] * goodMemo f pc.
Axiom memo_bwd : forall f p,
(Ex pc, Ex lastIn, Ex lastOut, (p ==*> pc, lastIn, lastOut) * [| lastOut = app f lastIn|] * goodMemo f pc) ===> memo f p.
End MEMO.
Module Memo : MEMO.
Definition memo (f : fn) (p : W) : HProp :=
(Ex pc, Ex lastIn, Ex lastOut, (p ==*> pc, lastIn, lastOut) * [| lastOut = app f lastIn |] * goodMemo f pc)%Sep.
Theorem memo_fwd : forall (f : fn) p,
memo f p ===> Ex pc, Ex lastIn, Ex lastOut, (p ==*> pc, lastIn, lastOut) * [| lastOut = app f lastIn |] * goodMemo f pc.
unfold memo; sepLemma.
Qed.
Theorem memo_bwd : forall f p,
(Ex pc, Ex lastIn, Ex lastOut, (p ==*> pc, lastIn, lastOut) * [| lastOut = app f lastIn|] * goodMemo f pc) ===> memo f p.
unfold memo; sepLemma.
Qed.
End Memo.
Import Memo.
Export Memo.
Definition hints : TacPackage.
prepare memo_fwd memo_bwd.
Defined.
Definition initS : spec := SPEC("f", "in", "out") reserving 7
Al f,
PRE[V] goodMemo f (V "f") * [| V "out" = app f (V "in") |] * mallocHeap
POST[R] memo f R * mallocHeap.
Definition callS : spec := SPEC("m", "x") reserving 4
Al f,
PRE[V] memo f (V "m")
POST[R] [| R = app f (V "x") |] * memo f (V "m").
Definition memoizeM := bimport [[ "malloc"!"malloc" @ [mallocS], "malloc"!"free" @ [freeS] ]]
bmodule "memoize" {{
bfunction "init"("f", "in", "out", "r") [initS]
"r" <-- Call "malloc"!"malloc"(1)
[PRE[V, R] R =?> 3
POST[R'] [| R' = R |] * R ==*> V "f", V "in", V "out" ];;
"r" *<- "f";;
"r" <- "r" + 4;;
"r" *<- "in";;
"r" <- "r" + 4;;
"r" *<- "out";;
Return "r" - 8
end with bfunction "call"("m", "x", "tmp", "tmp2") [callS]
"tmp" <-* "m" + 4;;
If ("x" = "tmp") {
(* We're in luck! This call is cached. *)
"tmp" <-* "m" + 8;;
Return "tmp"
} else {
(* This is a different argument from last time. Call the function again. *)
"tmp" <-* "m";;
"tmp" <-- ICall "tmp"("x")
[Al f,
PRE[V, R] [| R = app f (V "x") |] * memo f (V "m")
POST[R'] [| R' = R |] * memo f (V "m") ];;
"tmp2" <- "m" + 4;;
"tmp2" *<- "x";;
"tmp2" <- "m" + 8;;
"tmp2" *<- "tmp";;
Return "tmp"
}
end
}}.
Hint Extern 1 (@eq W _ _) =>
match goal with
| [ |- context[app] ] => fail 1
| _ => words
end.
Hint Extern 1 (interp ?specs (?U ?x ?y)) =>
match goal with
| [ H : interp ?specs (?f (?x, ?y)) |- _ ] =>
equate U (fun x y => f (x, y)); exact H
end.
Lemma goodMemo_elim : forall specs f pc P st,
interp specs (![ goodMemo f pc * P ] st)
-> exists pre, specs pc = Some pre
/\ exists inv, interp specs (![ inv * P ] st)
/\ forall st fr rpre,
interp specs ((Ex vs, Cptr st#Rp (fun x => rpre x)
/\ ![ ^[locals ("rp" :: "x" :: nil) vs 0 st#Sp] * fr * inv ] st
/\ Al st' : state,
([| Regs st' Sp = st#Sp /\ Regs st' Rv = app f (sel vs "x") |]
/\ Ex vs', ![ ^[locals ("rp" :: "x" :: nil) vs' 0 st#Sp] * fr * inv ] (fst st, st'))
---> rpre (fst st, st'))
---> pre st)%PropX.
Local Opaque locals.
rewrite sepFormula_eq; repeat (propxFo; repeat (eauto; esplit)).
specialize (H4 (a, b) (fun a_b => fr (fst a_b) (snd a_b)) rpre).
Local Transparent locals lift.
repeat rewrite sepFormula_eq in *.
assumption.
Local Opaque locals lift.
Qed.
Lemma goodMemo_intro : forall specs pre inv f pc,
specs pc = Some pre
-> (forall (st : ST.settings * state) (fr : hpropB nil)
(rpre : settings * state -> propX W (settings * state) nil),
interp specs
((Ex vs : vals,
Cptr (st) # (Rp) (fun x : settings * state => rpre x) /\
![^[locals ("rp" :: "x" :: nil) vs 0 (st) # (Sp)] * fr * inv] st /\
(Al st' : state,
[| Regs st' Sp = (st) # (Sp) /\
Regs st' Rv = app f (sel vs "x")|] /\
(Ex vs' : vals,
![^[locals ("rp" :: "x" :: nil) vs' 0 (st) # (Sp)] * fr * inv]
(fst st, st')) ---> rpre (fst st, st')))%PropX ---> pre st))
-> himp specs inv (goodMemo f pc).
intros.
unfold goodMemo, himp; propxFo.
imply_simp unf.
imply_simp unf.
imply_simp unf.
eauto.
imply_simp unf.
imply_simp unf.
instantiate (1 := fun p => inv (fst p) (snd p)); apply Imply_refl.
apply Imply_I; apply interp_weaken.
propxFo.
eapply Imply_trans; [ | apply H0 ].
rewrite sepFormula_eq.
instantiate (1 := a1).
instantiate (1 := fun a b => a0 (a, b)).
apply Imply_refl.
Qed.
Lemma switchUp : forall specs P Q R,
himp specs P R
-> himp specs (P * Q)%Sep (Q * R)%Sep.
intros; etransitivity; [ apply himp_star_comm | ]; apply himp_star_frame; auto; reflexivity.
Qed.
Hint Extern 1 (himp _ _ _) =>
apply switchUp; eapply goodMemo_intro; eassumption.
(* Alternate VC post-processor that understands indirect function calls *)
Ltac post :=
PreAutoSep.post;
try ((* This appears to be an indirect function call.
* Put the appropriate marker predicate in [H], to trigger use of a lemma about the
* point-of-view shift from caller to callee. *)
icall ("x" :: nil);
(* Trigger symbolic execution early. *)
evaluate hints;
(* Move [goodMemo] to the front of its hypothesis and eliminate it. *)
match goal with
| [ H : interp _ _ |- _ ] =>
toFront ltac:(fun P => match P with goodMemo _ _ => idtac end) H;
apply goodMemo_elim in H; sep_firstorder
end;
(* Find and apply the hypothesis explaining the spec of the function pointer. *)
match goal with
| [ H : forall x : ST.settings * state, _ |- _ ] =>
eapply Imply_sound; [ apply H | ]
end).
(* Main tactic *)
Ltac sep := post; PreAutoSep.sep hints; auto.
Theorem memoizeMOk : moduleOk memoizeM.
vcgen; abstract sep.
Qed.
|
Require Export DMFP.Day13_induction.
(* ################################################################# *)
(** * Existential Quantification *)
(** Another important logical connective is _existential
quantification_. To say that there is some [x] of type [T] such
that some property [P] holds of [x], we write [exists x : T,
P]. As with [forall], the type annotation [: T] can be omitted if
Coq is able to infer from the context what the type of [x] should
be.
The notion of "existential" is a common one in mathematics, but
you may not be familiar with its precise meaning. It might be
easier to understand [exists] as the "dual" of [forall]. When we
say [forall x : T, P], we mean to say that:
- Given any possible value [v] of type [T],
- the proposition [P] holds when we replace [x] with [v].
So, for example, when we say [forall n : nat, n + 0 = n], we mean
that we [0 + 0 = 0] (choosen [0] for [n]) and [1 + 0 = 1]
(choosing [1] for [n]) and [47 + 0 = 47] (choosing [47] for [n]).
When we say that [exists x : T, P], we mean that
- There is at least one value [v] of type [T], such that
- the proposition [P] holds when we replace [x] with [v].
For example, [exists n : nat, S n = 48] means "there is (at least
one) natural number [n] such that the successor of [n] is 48". It
turns out that this [S n = 48] for exactly _one_ [n]: 47. But it
need not be so: the proposition [exists l : list nat, length l =
1] means that "there exists a list of naturals [l] such that
[length l] is 1". There are many such lists: [ [1] ], [ [1337] ],
and so on.
*)
(** PROP ::= EXPR1 = EXPR2
| forall x : TYPE, PROP
| PROP1 -> PROP2
| PROP1 /\ PROP2
| PROP1 \/ PROP2
| True
| ~ PROP
| False
| PROP1 <-> PROP2
| exists x : TYPE, PROP <---- NEW!
*)
(** To prove a statement of the form [exists x, P], we must show that
[P] holds for some specific choice of value for [x], known as the
_witness_ of the existential. This is done in two steps: First,
we explicitly tell Coq which witness [t] we have in mind by
invoking the tactic [exists t]. Then we prove that [P] holds after
all occurrences of [x] are replaced by [t]. *)
Lemma four_is_even : exists n : nat, 4 = n + n.
Proof.
(* We want to show 4 is even, so we need to find a number [n] such
that [n + n = 4]. If we try 2 we have [2 + 2 = 4], which seems
to work. *)
exists 2. reflexivity.
Qed.
(** Conversely, if we have an existential hypothesis [exists x, P] in
the context, we can destruct it to obtain a witness [x] and a
hypothesis stating that [P] holds of [x]. *)
Theorem exists_example_2 : forall n,
(exists m, n = 4 + m) ->
(exists o, n = 2 + o).
Proof.
(* WORKED IN CLASS *)
intros n [m Hm]. (* note implicit [destruct] here *)
exists (2 + m).
apply Hm. Qed.
(** **** Exercise: 1 star, standard, especially useful (dist_not_exists)
Prove that "[P] holds for all [x]" implies "there is no [x] for
which [P] does not hold." (Hint: [destruct H as [x E]] works on
existential assumptions!) *)
Theorem dist_not_exists : forall (X:Type) (P : X -> Prop),
(forall x, P x) -> ~ (exists x, ~ P x).
Proof.
intros X P H [x Hx].
apply Hx. apply H.
Qed.
(** [] *)
(** **** Exercise: 1 star, standard, especially useful (dist_not_forall)
Prove that "there exists an [x] for which [P] holds" implies "it
is not the case that for all [x] [P] does not hold." (Hint:
[destruct H as [x E]] works on existential assumptions!) *)
Theorem dist_not_forall : forall (X:Type) (P : X -> Prop),
(exists x, P x) -> ~ (forall x, ~ P x).
Proof.
unfold not.
intros X P [x Hx].
intros H. assert ( Hx' : P x -> False).
-apply H.
-apply Hx'. apply Hx.
Qed.
(** [] *)
(** **** Exercise: 2 stars, standard, optional (dist_exists_or)
Prove that existential quantification distributes over
disjunction. *)
Theorem dist_exists_or : forall (X:Type) (P Q : X -> Prop),
(exists x, P x \/ Q x) <-> (exists x, P x) \/ (exists x, Q x).
Proof.
(* FILL IN HERE *) Admitted.
(** [] *)
(* ################################################################# *)
(** * Propositions and Booleans *)
(** We've seen two different ways of encoding logical facts in Coq:
with _booleans_ (of type [bool]), and with _propositions_ (of type
[Prop]).
For instance, to claim that a number [n] is even, we can say
either
- (1) that [evenb n] returns [true], or
- (2) that there exists some [k] such that [n = double k].
Indeed, these two notions of evenness are equivalent, as
can easily be shown with a couple of auxiliary lemmas.
Of course, it would be very strange if these two characterizations
of evenness did not describe the same set of natural numbers!
Fortunately, we can prove that they do... *)
(** We first need three helper lemmas. *)
(** **** Exercise: 2 stars, standard, optional (evenb_S)
One inconvenient aspect of our definition of [evenb n] is the
recursive call on [n - 2]. This makes proofs about [evenb n]
harder when done by induction on [n], since we may need an
induction hypothesis about [n - 2]. The following lemma gives an
alternative characterization of [evenb (S n)] that works better
with induction: *)
Theorem evenb_S : forall n : nat,
evenb (S n) = negb (evenb n).
Proof.
(* FILL IN HERE *) Admitted.
(** [] *)
Theorem evenb_double : forall k, evenb (double k) = true.
Proof.
intros k. induction k as [|k' IHk'].
- reflexivity.
- simpl. apply IHk'.
Qed.
(** **** Exercise: 3 stars, standard (evenb_double_conv) *)
Theorem evenb_double_conv : forall n,
exists k, n = if evenb n then double k
else S (double k).
Proof.
intros n. induction n as [| n' Hn'].
-exists 0. simpl. reflexivity.
-destruct Hn'. rewrite evenb_S. destruct evenb.
+simpl. exists x. rewrite H. reflexivity.
+simpl. exists (S x). simpl. rewrite H. reflexivity.
Qed.
(** [] *)
Theorem even_bool_prop : forall n,
evenb n = true <-> exists k, n = double k.
Proof.
intros n. split.
- intros H. destruct (evenb_double_conv n) as [k Hk].
rewrite Hk. rewrite H. exists k. reflexivity.
- intros [k Hk]. rewrite Hk. apply evenb_double.
Qed.
(** In view of this theorem, we say that the boolean
computation [evenb n] _reflects_ the logical proposition
[exists k, n = double k]. *)
(** However, even when the boolean and propositional formulations of a
claim are equivalent from a purely logical perspective, they need
not be equivalent _operationally_.
Equality provides an extreme example: knowing that [eqb n m =
true] is generally of little direct help in the middle of a proof
involving [n] and [m]; however, if we convert the statement to the
equivalent form [n = m], we can rewrite with it. *)
(** The case of even numbers is also interesting. Recall that,
when proving the backwards direction of [even_bool_prop] (i.e.,
[evenb_double], going from the propositional to the boolean
claim), we used a simple induction on [k]. On the other hand, the
converse (the [evenb_double_conv] exercise) required a clever
generalization, since we can't directly prove [(exists k, n =
double k) -> evenb n = true]. *)
(** For these examples, the propositional claims are more useful than
their boolean counterparts, but this is not always the case. For
instance, we cannot test whether a general proposition is true or
not in a function definition; as a consequence, the following code
fragment is rejected: *)
Fail Definition is_even_prime n :=
if n = 2 then true
else false.
(** Coq complains that [n = 2] has type [Prop], while it expects
an elements of [bool] (or some other inductive type with two
elements). The reason for this error message has to do with the
_computational_ nature of Coq's core language, which is designed
so that every function that it can express is computable and
total. One reason for this is to allow the extraction of
executable programs from Coq developments. As a consequence,
[Prop] in Coq does _not_ have a universal case analysis operation
telling whether any given proposition is true or false, since such
an operation would allow us to write non-computable functions.
Although general non-computable properties cannot be phrased as
boolean computations, it is worth noting that even many
_computable_ properties are easier to express using [Prop] than
[bool], since recursive function definitions are subject to
significant restrictions in Coq. For instance, later we'll shows
how to define the property that two lists are permutations of each
a given string using [Prop]. Doing the same with [bool] would
amount to the [is_permutation_of] function we wrote on day
6... which (as we'll see) is more complicated, harder to
understand, and harder to reason about than the [Prop] we'll
define.
Conversely, an important side benefit of stating facts using
booleans is enabling some proof automation through computation
with Coq terms, a technique known as _proof by reflection_.
Consider the following statement: *)
Example even_1000 : exists k, 1000 = double k.
(** The most direct proof of this fact is to give the value of [k]
explicitly. *)
Proof. exists 500. reflexivity. Qed.
(** On the other hand, the proof of the corresponding boolean
statement is even simpler: *)
Example even_1000' : evenb 1000 = true.
Proof. reflexivity. Qed.
(** What is interesting is that, since the two notions are equivalent,
we can use the boolean formulation to prove the other one without
mentioning the value 500 explicitly: *)
Example even_1000'' : exists k, 1000 = double k.
Proof. apply even_bool_prop. reflexivity. Qed.
(** Although we haven't gained much in terms of proof size in
this case, larger proofs can often be made considerably simpler by
the use of reflection. As an extreme example, the Coq proof of
the famous _4-color theorem_ uses reflection to reduce the
analysis of hundreds of different cases to a boolean computation.
We won't cover reflection in any real detail, but it serves as a
good example showing the complementary strengths of booleans and
general propositions. *)
(** As we go on to prove more interesting (and challenging!) things
about our programs, the various iff lemmas relating computation to
logic will surely come in handy. *)
(* ================================================================= *)
(** ** Classical vs. Constructive Logic *)
(** We have seen that it is not possible to test whether or not a
proposition [P] holds while defining a Coq function. You may be
surprised to learn that a similar restriction applies to _proofs_!
In other words, the following intuitive reasoning principle is not
derivable in Coq: *)
Definition excluded_middle := forall P : Prop,
P \/ ~ P.
(** To understand operationally why this is the case, recall
that, to prove a statement of the form [P \/ Q], we use the [left]
and [right] tactics, which effectively require knowing which side
of the disjunction holds. But the universally quantified [P] in
[excluded_middle] is an _arbitrary_ proposition, which we know
nothing about. We don't have enough information to choose which
of [left] or [right] to apply, just as Coq doesn't have enough
information to mechanically decide whether [P] holds or not inside
a function. *)
(** However, if we happen to know that [P] is reflected in some
boolean term [b], then knowing whether it holds or not is trivial:
we just have to check the value of [b]. *)
Theorem restricted_excluded_middle : forall P b,
(P <-> b = true) -> P \/ ~ P.
Proof.
(* Let a proposition P and boolean b be given.
Assume that P holds if and only if [b = true].
We must show [P \/ ~ P].
*)
intros P [] H.
(* We go by cases on [b].
If b is true, by our assumption we know P holds, so we have [P].
Otherwise, we likewise know by our assumption that P does not
hold, so we have [~P]. *)
- left. rewrite H. reflexivity.
- right. rewrite H. intros contra. discriminate contra.
Qed.
(** In particular, the excluded middle is valid for equations [n = m],
between natural numbers [n] and [m]. *)
Theorem restricted_excluded_middle_eq : forall (n m : nat),
(forall n m, n = m <-> eqb n m = true) -> (* we'll prove this on day 15 *)
n = m \/ n <> m.
Proof.
intros n m eqb_true_iff.
apply (restricted_excluded_middle (n = m) (eqb n m)).
apply eqb_true_iff.
Qed.
(** It may seem strange that the general excluded middle is not
available by default in Coq; after all, any given claim must be
either true or false. Nonetheless, there is an advantage in not
assuming the excluded middle: statements in Coq can make stronger
claims than the analogous statements in standard mathematics.
Notably, if there is a Coq proof of [exists x, P x], it is
possible to explicitly exhibit a value of [x] for which we can
prove [P x] -- in other words, every proof of existence is
necessarily _constructive_. *)
(** Logics like Coq's, which do not assume the excluded middle, are
referred to as _constructive logics_.
More conventional logical systems such as ZFC, in which the
excluded middle does hold for arbitrary propositions, are referred
to as _classical_. *)
(** The following example illustrates why assuming the excluded middle
may lead to non-constructive proofs:
_Claim_: There exist irrational numbers [a] and [b] such that [a ^
b] is rational.
_Proof_: It is not difficult to show that [sqrt 2] is irrational.
If [sqrt 2 ^ sqrt 2] is rational, it suffices to take [a = b =
sqrt 2] and we are done. Otherwise, [sqrt 2 ^ sqrt 2] is
irrational. In this case, we can take [a = sqrt 2 ^ sqrt 2] and
[b = sqrt 2], since [a ^ b = sqrt 2 ^ (sqrt 2 * sqrt 2) = sqrt 2 ^
2 = 2]. []
Do you see what happened here? We used the excluded middle to
consider separately the cases where [sqrt 2 ^ sqrt 2] is rational
and where it is not, without knowing which one actually holds!
Because of that, we wind up knowing that such [a] and [b] exist but
we cannot determine what their actual values are (at least, using
this line of argument).
As useful as constructive logic is, it does have its limitations:
There are many statements that can easily be proven in classical
logic but that have much more complicated constructive proofs, and
there are some that are known to have no constructive proof at all!
Fortunately, the excluded middle is known to be compatible with
Coq's logic, allowing us to add it safely as an axiom. However, we
will not need to do so in this book: the results that we cover can
be developed entirely within constructive logic at negligible extra
cost.
It takes some practice to understand which proof techniques must be
avoided in constructive reasoning, but arguments by contradiction,
in particular, are infamous for leading to non-constructive proofs.
Here's a typical example: suppose that we want to show that there
exists [x] with some property [P], i.e., such that [P x]. We start
by assuming that our conclusion is false; that is, [~ exists x, P
x]. From this premise, it is not hard to derive [forall x, ~ P x].
If we manage to show that this intermediate fact results in a
contradiction, we arrive at an existence proof without ever
exhibiting a value of [x] for which [P x] holds!
The technical flaw here, from a constructive standpoint, is that we
claimed to prove [exists x, P x] using a proof of [~ ~ (exists x, P
x)]. Allowing ourselves to remove double negations from arbitrary
statements is equivalent to assuming the excluded middle. Thus,
this line of reasoning cannot be encoded in Coq without assuming
additional axioms. *)
(** **** Exercise: 2 stars, standard (forall_exists)
In classical logic, [forall] and [exists] are _dual_: negating one
gets you to the other. We saw some of this duality with
[dist_not_exists] and [dist_not_forall].
Only one of the following lemmas is provable in constructive
logic. Which? Why isn't the other provable? *)
Lemma not_exists__forall_not : forall {X : Type} (P : X -> Prop),
(~ exists x, P x) -> forall x, ~ P x.
Proof.
intros X P H x H1. destruct H.
exists x. apply H1.
Qed.
Lemma not_forall__exists_not : forall {X : Type} (P : X -> Prop),
(~ forall x, P x) -> exists x, ~ P x.
Proof.
unfold not.
intros X P H. destruct H.
intros x.
Abort.
(** In addition to doing one of these proofs, please briefly explain
why the other doesn't work. *)
(*The not_forall_exists_not does not work. It is because in the second one, you have to prove that P holds for all not X implies, there exists an x that does not hold for P. The issue is that exist is in the subgoal of the proof. Thus, when destruct or induction is applied on the forall statement, like in not_exists_forall_not, the exists x is destructed. And when we introduce x, we get up with property of x, which we cannot prove. *)
(** [] *)
(* 2021-10-04 14:37 *)
|
<center>
</center>
# Reading Files Python
Estimated time needed: **40** minutes
## Objectives
After completing this lab you will be able to:
- Read text files using Python libraries
<h2>Table of Contents</h2>
<div class="alert alert-block alert-info" style="margin-top: 20px">
<ul>
<li><a href="download">Download Data</a></li>
<li><a href="read">Reading Text Files</a></li>
<li><a href="better">A Better Way to Open a File</a></li>
</ul>
</div>
<hr>
<h2 id="download">Download Data</h2>
```python
import urllib.request
url = 'https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/labs/example1.txt'
filename = 'Example1.txt'
urllib.request.urlretrieve(url, filename)
```
('Example1.txt', <http.client.HTTPMessage at 0x7fc204142c18>)
```python
# Download Example file
!wget -O /resources/data/Example1.txt https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/labs/example1.txt
```
--2021-01-29 11:39:14-- https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/labs/example1.txt
Resolving s3-api.us-geo.objectstorage.softlayer.net (s3-api.us-geo.objectstorage.softlayer.net)... 67.228.254.196
Connecting to s3-api.us-geo.objectstorage.softlayer.net (s3-api.us-geo.objectstorage.softlayer.net)|67.228.254.196|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 45 [text/plain]
Saving to: ‘/resources/data/Example1.txt’
/resources/data/Exa 100%[===================>] 45 --.-KB/s in 0s
2021-01-29 11:39:14 (11.6 MB/s) - ‘/resources/data/Example1.txt’ saved [45/45]
<hr>
<h2 id="read">Reading Text Files</h2>
One way to read or write a file in Python is to use the built-in <code>open</code> function. The <code>open</code> function provides a <b>File object</b> that contains the methods and attributes you need in order to read, save, and manipulate the file. In this notebook, we will only cover <b>.txt</b> files. The first parameter you need is the file path and the file name. An example is shown as follow:
The mode argument is optional and the default value is <b>r</b>. In this notebook we only cover two modes:
<ul>
<li><b>r</b> Read mode for reading files </li>
<li><b>w</b> Write mode for writing files</li>
</ul>
For the next example, we will use the text file <b>Example1.txt</b>. The file is shown as follow:
We read the file:
```python
# Read the Example1.txt
example1 = "Example1.txt"
file1 = open(example1, "r")
```
We can view the attributes of the file.
The name of the file:
```python
# Print the path of file
file1.name
```
'Example1.txt'
The mode the file object is in:
```python
# Print the mode of file, either 'r' or 'w'
file1.mode
```
'r'
We can read the file and assign it to a variable :
```python
# Read the file
FileContent = file1.read()
FileContent
```
'This is line 1 \nThis is line 2\nThis is line 3'
The <b>/n</b> means that there is a new line.
We can print the file:
```python
# Print the file with '\n' as a new line
print(FileContent)
```
This is line 1
This is line 2
This is line 3
The file is of type string:
```python
# Type of file content
type(FileContent)
```
str
It is very important that the file is closed in the end. This frees up resources and ensures consistency across different python versions.
```python
# Close file after finish
file1.close()
```
<hr>
<h2 id="better">A Better Way to Open a File</h2>
Using the <code>with</code> statement is better practice, it automatically closes the file even if the code encounters an exception. The code will run everything in the indent block then close the file object.
```python
# Open file using with
with open(example1, "r") as file1:
FileContent = file1.read()
print(FileContent)
```
This is line 1
This is line 2
This is line 3
The file object is closed, you can verify it by running the following cell:
```python
# Verify if the file is closed
file1.closed
```
True
We can see the info in the file:
```python
# See the content of file
print(FileContent)
```
This is line 1
This is line 2
This is line 3
The syntax is a little confusing as the file object is after the <code>as</code> statement. We also don’t explicitly close the file. Therefore we summarize the steps in a figure:
We don’t have to read the entire file, for example, we can read the first 4 characters by entering three as a parameter to the method **.read()**:
```python
# Read first four characters
with open(example1, "r") as file1:
print(file1.read(4))
```
This
Once the method <code>.read(4)</code> is called the first 4 characters are called. If we call the method again, the next 4 characters are called. The output for the following cell will demonstrate the process for different inputs to the method <code>read()</code>:
```python
# Read certain amount of characters
with open(example1, "r") as file1:
print(file1.read(4))
print(file1.read(4))
print(file1.read(7))
print(file1.read(15))
```
This
is
line 1
This is line 2
The process is illustrated in the below figure, and each color represents the part of the file read after the method <code>read()</code> is called:
Here is an example using the same file, but instead we read 16, 5, and then 9 characters at a time:
```python
# Read certain amount of characters
with open(example1, "r") as file1:
print(file1.read(16))
print(file1.read(5))
print(file1.read(9))
```
This is line 1
This
is line 2
We can also read one line of the file at a time using the method <code>readline()</code>:
```python
# Read one line
with open(example1, "r") as file1:
print("first line: " + file1.readline())
```
first line: This is line 1
We can also pass an argument to <code> readline() </code> to specify the number of charecters we want to read. However, unlike <code> read()</code>, <code> readline()</code> can only read one line at most.
```python
with open(example1, "r") as file1:
print(file1.readline(20)) # does not read past the end of line
print(file1.read(20)) # Returns the next 20 chars
```
This is line 1
This is line 2
This
We can use a loop to iterate through each line:
```python
# Iterate through the lines
with open(example1,"r") as file1:
i = 0;
for line in file1:
print("Iteration", str(i), ": ", line)
i = i + 1
```
Iteration 0 : This is line 1
Iteration 1 : This is line 2
Iteration 2 : This is line 3
We can use the method <code>readlines()</code> to save the text file to a list:
```python
# Read all lines and save as a list
with open(example1, "r") as file1:
FileasList = file1.readlines()
```
Each element of the list corresponds to a line of text:
```python
# Print the first line
FileasList[0]
```
'This is line 1 \n'
# Print the second line
FileasList[1]
```python
# Print the third line
FileasList[2]
```
'This is line 3'
<hr>
<h2> Exercise </h2>
<h4>Weather Data</h4>
Your friend, a rising star in the field of meterology, has called on you to write a script to perform some analysis on weather station data. Given below is a file "resources/ex4.csv", which contains some precipiation data for the month of June.
Each line in the file has the format - Date,Precipation (upto two decimal places). Note how the data is seperated using ','. The first row of the file contains headers and should be ignored.
Your task is to complete the <code>getNAvg</code> function that computes a simple moving average for N days for the precipiation data, where N is a parameter. Your function should return a list of moving averages for the given data.
The formula for a k day moving average over a series - $n_{0},n_{1},n_{2},n_{3}....n_{m}$is:
\begin{align}
M_{i} = M_{i-1} + \frac{n_{i} - n_{i-k}}{k}, \text{for i = k to m }
\\ \text{where $M_{i}$ is the moving average}
\end{align}
The skeleton code has been provided below. Edit only the required function.
<details><summary>Click here for the solution</summary>
```python
- Each line of the file has a '\n' char which should be removed
- The lines in the file are read as strings and need to be typecasted to floats
- For a k day moving average, The data points for the last k days must be known
```
</details>
```python
##Download the file
!wget https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-PY0101EN-SkillsNetwork/labs/Module%204/ex4.csv
```
--2021-01-29 12:15:18-- https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-PY0101EN-SkillsNetwork/labs/Module%204/ex4.csv
Resolving cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud (cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud)... 169.63.118.104
Connecting to cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud (cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud)|169.63.118.104|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 718 [text/csv]
Saving to: ‘ex4.csv’
ex4.csv 100%[===================>] 718 --.-KB/s in 0.001s
2021-01-29 12:15:18 (999 KB/s) - ‘ex4.csv’ saved [718/718]
```python
import matplotlib.pyplot as plt
statData ="ex4.csv"
def getNAvg(file,N):
row = 0 # keep track of rows
lastN = [] # keep track of last N points
mean = [0] # running avg
with open(file,"r") as rawData:
for line in rawData:
if (row == 0): # Ignore the headers
row = row + 1
continue
line = line.strip('\n')
lineData = float(line.split(',')[1])
if (row<=N):
lastN.append(lineData)
mean[0] = (lineData + mean[0]*(row-1))/row
else:
mean.append( mean[row - N -1]+ (lineData - lastN[0])/N)
lastN = lastN[1:]
lastN.append(lineData)
row = row +1
return mean
pass
def plotData(mean,N):
mean = [round(x,3) for x in mean]
plt.plot(mean,label=str(N) + ' day average')
plt.xlabel('Day')
plt.ylabel('Precipiation')
plt.legend()
```
#### Once you have finished, you can you use the block below to plot your data
```python
plotData(getNAvg(statData,1),1)
plotData ([0 for x in range(1,5)]+ getNAvg(statData,5),5 )
plotData([0 for x in range(1,7)] + getNAvg(statData,7),7)
```
You can use the code below to verify your progress -
```python
avg5 =[4.18,4.78,4.34,4.72,5.48,5.84,6.84,6.76,6.74,5.46,4.18,2.74,2.52,2.02,2.16,2.82,2.92,4.36,4.74,5.12,5.34,6.4,6.56,6.1,5.74,5.62,4.26]
avg7 =[4.043,4.757,5.071,5.629,6.343,5.886,6.157,5.871,5.243,4.386,3.514,2.714,2.586,2.443,2.571,3.643,4.143,4.443,4.814,5.6,6.314,6.414,5.429,5.443,4.986]
def testMsg(passed):
if passed:
return 'Test Passed'
else :
return ' Test Failed'
print("getNAvg : ")
try:
sol5 = getNAvg(statData,5)
sol7 = getNAvg(statData,7)
if(len(sol5)==len( avg5) and (len(sol7)==len(avg7))):
err5 = sum([abs(avg5[index] - sol5[index])for index in range(len(avg5))])
err7 = sum([abs(avg7[index] - sol7[index])for index in range(len(avg7))])
print(testMsg((err5 < 1) and (err7 <1)))
else:
print(testMsg(False))
except NameError as e:
print('Error! Code: {c}, Message: {m}'.format(c = type(e).__name__, m = str(e)))
except:
print("An error occured. Recheck your function")
```
<details><summary>Click here for the solution</summary>
```python
import matplotlib.pyplot as plt
statData ="ex4.csv"
def getNAvg(file,N):
"""
file - File containting all the raw weather station data
N - The number of days to compute the moving average over
Return a list of containg the moving average of all data points
"""
row = 0 # keep track of rows
lastN = [] # keep track of last N points
mean = [0] # running avg
with open(file,"r") as rawData:
for line in rawData:
if (row == 0): # Ignore the headers
row = row + 1
continue
line = line.strip('\n')
lineData = float(line.split(',')[1])
if (row<=N):
lastN.append(lineData)
mean[0] = (lineData + mean[0]*(row-1))/row
else:
mean.append( mean[row - N -1]+ (lineData - lastN[0])/N)
lastN = lastN[1:]
lastN.append(lineData)
row = row +1
return mean
def plotData(mean,N):
""" Plots running averages """
mean = [round(x,3) for x in mean]
plt.plot(mean,label=str(N) + ' day average')
plt.xlabel('Day')
plt.ylabel('Precipiation')
plt.legend()
plotData(getNAvg(statData,1),1)
plotData ([0 for x in range(1,5)]+ getNAvg(statData,5),5 )
plotData([0 for x in range(1,7)] + getNAvg(statData,7),7)
```
</details>
<div class="alert alert-success">
Note: Files with sets of data seperated using ',' or other charecters are called '.csv' files.
They are a very common way to store data. Usually when dealing with them, An external library is used that does the nitpicky tasks for you. In fact, There are numerous libraries for statistical functions to. You will learn about such libraries later in the course.
</div>
<hr>
<h2>The last exercise!</h2>
<p>Congratulations, you have completed your first lesson and hands-on lab in Python. However, there is one more thing you need to do. The Data Science community encourages sharing work. The best way to share and showcase your work is to share it on GitHub. By sharing your notebook on GitHub you are not only building your reputation with fellow data scientists, but you can also show it off when applying for a job. Even though this was your first piece of work, it is never too early to start building good habits. So, please read and follow <a href="https://cognitiveclass.ai/blog/data-scientists-stand-out-by-sharing-your-notebooks/" target="_blank">this article</a> to learn how to share your work.
<hr>
## Author
<a href="https://www.linkedin.com/in/joseph-s-50398b136/" target="_blank">Joseph Santarcangelo</a>
## Other contributors
<a href="www.linkedin.com/in/jiahui-mavis-zhou-a4537814a">Mavis Zhou</a>
## Change Log
| Date (YYYY-MM-DD) | Version | Changed By | Change Description |
| ----------------- | ------- | ------------- | --------------------------------------------------------- |
| 2020-09-30 | 1.2 | Malika Singla | Weather Data dataset link added |
| 2020-09-30 | 1.1 | Arjun Swani | Added exericse "Weather Data" |
| 2020-09-30 | 1.0 | Arjun Swani | Added blurbs about closing files and read() vs readline() |
| 2020-08-26 | 0.2 | Lavanya | Moved lab to course repo in GitLab |
| | | | |
| | | | |
<hr/>
## <h3 align="center"> © IBM Corporation 2020. All rights reserved. <h3/>
```python
```
|
Lemma top_winning_strategy {p} {s : strategy top p}
: winning_strategy s <-> p = player_P.
Proof.
unfold winning_strategy.
setoid_rewrite top_play_won_by.
firstorder.
eapply H.
apply top_player_follows_strategy.
Unshelve.
exact trivial_play.
Qed. |
(* Title: HOL/Datatype_Examples/Lift_BNF.thy
Author: Dmitriy Traytel, ETH Zürich
Copyright 2015
Demonstration of the "lift_bnf" command.
*)
section \<open>Demonstration of the \textbf{lift_bnf} Command\<close>
theory Lift_BNF
imports Main
begin
typedef 'a nonempty_list = "{xs :: 'a list. xs \<noteq> []}"
by blast
lift_bnf (no_warn_wits) (neset: 'a) nonempty_list
for map: nemap rel: nerel
by simp_all
typedef ('a :: finite, 'b) fin_nonempty_list = "{(xs :: 'a set, ys :: 'b list). ys \<noteq> []}"
by blast
lift_bnf (dead 'a :: finite, 'b) fin_nonempty_list
by auto
datatype 'a tree = Leaf | Node 'a "'a tree nonempty_list"
record 'a point =
xCoord :: 'a
yCoord :: 'a
copy_bnf ('a, 's) point_ext
typedef 'a it = "UNIV :: 'a set"
by blast
copy_bnf (plugins del: size) 'a it
typedef ('a, 'b) T_prod = "UNIV :: ('a \<times> 'b) set"
by blast
copy_bnf ('a, 'b) T_prod
typedef ('a, 'b, 'c) T_func = "UNIV :: ('a \<Rightarrow> 'b * 'c) set"
by blast
copy_bnf ('a, 'b, 'c) T_func
typedef ('a, 'b) sum_copy = "UNIV :: ('a + 'b) set"
by blast
copy_bnf ('a, 'b) sum_copy
typedef ('a, 'b) T_sum = "{Inl x | x. True} :: ('a + 'b) set"
by blast
lift_bnf (no_warn_wits) ('a, 'b) T_sum [wits: "Inl :: 'a \<Rightarrow> 'a + 'b"]
by (auto simp: map_sum_def sum_set_defs split: sum.splits)
typedef ('key, 'value) alist = "{xs :: ('key \<times> 'value) list. (distinct \<circ> map fst) xs}"
morphisms impl_of Alist
proof
show "[] \<in> {xs. (distinct o map fst) xs}"
by simp
qed
lift_bnf (dead 'k, 'v) alist [wits: "Nil :: ('k \<times> 'v) list"]
by simp_all
typedef 'a myopt = "{X :: 'a set. finite X \<and> card X \<le> 1}" by (rule exI[of _ "{}"]) auto
lemma myopt_type_def: "type_definition
(\<lambda>X. if card (Rep_myopt X) = 1 then Some (the_elem (Rep_myopt X)) else None)
(\<lambda>x. Abs_myopt (case x of Some x \<Rightarrow> {x} | _ \<Rightarrow> {}))
(UNIV :: 'a option set)"
apply unfold_locales
apply (auto simp: Abs_myopt_inverse dest!: card_eq_SucD split: option.splits)
apply (metis Rep_myopt_inverse)
apply (metis One_nat_def Rep_myopt Rep_myopt_inverse Suc_le_mono card_0_eq le0 le_antisym mem_Collect_eq nat.exhaust)
done
copy_bnf 'a myopt via myopt_type_def
typedef ('k, 'v) fmap = "{M :: ('k \<rightharpoonup> 'v). finite (dom M)}"
by (rule exI[of _ Map.empty]) simp_all
lift_bnf (dead 'k, 'v) fmap [wits: "Map.empty :: 'k \<Rightarrow> 'v option"]
by auto
end
|
\documentclass{article}
\usepackage[utf8]{inputenc}
%\usepackage{graphicx}
\usepackage[parfill]{parskip} % Activate to begin paragraphs with an empty line rather than an indent
%\usepackage{graphicx}
%\usepackage{amssymb}
%\usepackage{epstopdf}
%\DeclareGraphicsRule{.tif}{png}{.png}{`convert #1 `dirname #1`/`basename #1 .tif`.png}
%\usepackage{graphicx}
%\usepackage{xcolor}
\usepackage{AVGMacros}
%\usepackage{ImageMacros}
\input{ImageMacros.tex}
\title{Some Examples for the AROS Vision Group Latex Macros (AVGMacro.sty)}
\author{Andreas L. Teigen \and Rudolf Mester }
\date{October 2020}
\begin{document}
\maketitle
\section{Introduction}
\subsection{Person-specific comments / notes}
\verb"\note{This is an anonymous test note}"\\
yields:\\
\note{This is an anonymous test note}
\verb"\rmnote{This is a note by Rudolf}"\\
yields:\\
\rmnote{This is a note by Rudolf}
Similarly:
\asnote{This is a note by Annette}
\atnote{This is a note by Andreas}
\mynote{This is a note by Mauhing}
\pznote{This is a note by Peder}
\ahnote{This is a note by Axel}
\wknote{This is a note by William}
\subsection{Macros for marking up text parts for editing}
\bit
\item
\verb"\deletetext{This text should be deleted}"\\
yields\\
\deletetext{This text should be deleted}
\item
\verb"\replacetext{This text should be}{changed into this}"\\
yields\\
\replacetext{This text should be}{changed into this}
\item
\verb"\inserttext{This text should be inserted}"\\
yields:\\
\inserttext{This text should be inserted}
\item
\verb"\missingtext"\\
yields:\\
\missingtext \qquad \qquad(This is a symbol that here some text should be inserted, perhaps
a word missing, etc.)
\item
\verb"\markuptext{This text is highlighted}"\\
yields:\\
\markuptext{This text is highlighted}
\item
The macro \verb"infosource{...}" allows the mark the source of some information
in a text in a standardized way. Example:
\verb"infosource{NTNU home page}"\\
yields
\infosource{NTNU home page}
\eit
\section{Mathematical macros}
\subsection{Environments for equations in display style}
Writing \verb"\begin{equation} ... \end{equation}" etc.\ is often tedious.
Here are some shortcuts:
\bit
\item \verb"\begin{equation} ... \end{equation}" $ \quad \longrightarrow \quad$
\verb"\beq ... \eeq"
\item \verb"\begin{displaymode} ... \end{displaymode}"
$ \quad \longrightarrow \quad$
\verb"\bdm ... \edm"
\item \verb"\begin{itemize} ... \end{itemize}"
$ \quad \longrightarrow \quad$
\verb"\bit ... \eit"
\item \verb"\begin{enumerate} ... \end{enumerate}"
$ \quad \longrightarrow \quad$
\verb"\benum ... \eenum"
\eit
%\beq
%x = \frac{1+e^y}{1-e^y{+1}}\\
%x = \frac{1+e^y}{1-e^y{+1}}
%\eeq
%
%\bdm
%x = \frac{1+e^y}{1-e^y{+1}}\\
%x = \frac{1+e^y}{1-e^y{+1}}
%\edm
\bit
\item Equation arrays:\\
\begin{verbatim}
\bea x &=& \frac{1+e^y}{1-e^y{+1}}\\
\frac{1+e^y}{1-e^y{+1}} &=& x
\eea
\end{verbatim}
yields
\bea x &=& \frac{1+e^y}{1-e^y{+1}}\\
\frac{1+e^y}{1-e^y{+1}} &=& x
\eea
\eit
%\bit
%\item first
%\item second
%\item third
%\eit
%
%\benum
%\item first
%\item second
%\item third
%\eenum
\begin{table}[h]
\begin{tabular}{lll}
\verb"$a \yields b$" & $\quad \longrightarrow \quad$ & $a \yields b$ \\
\verb"$a \shallbe b$" & $\quad \longrightarrow \quad$ & $a \shallbe b$ \\
\verb"$a \definedas b$" & $\quad \longrightarrow \quad$ & $a \definedas b$ \\
\verb"$a \isapproximately b$" & $\quad \longrightarrow \quad$ & $a \isapproximately b$ \\
\verb"$\Prob{x\given y}$" & $\quad \longrightarrow \quad$ & $\Prob{x\given y}$ \\
\verb"$\Erw{X}$" & $\quad \longrightarrow \quad$ & $\Erw{X}$ \\
\verb"$\Var{X}$" & $\quad \longrightarrow \quad$ & $\Var{X}$ \\
\verb"$\Cov{X, Y}$" & $\quad \longrightarrow \quad$ & $\Cov{X, Y}$ \\
\verb"$\pdf{f(x)}$" & $\quad \longrightarrow \quad$ & $\pdf{f(x)}$ \\
\verb"$\Cor{X,Y}$" & $\quad \longrightarrow \quad$ & $\Cor{X,Y}$
\end{tabular}
\end{table}
\subsection{Vectors and Matrices}
\subsubsection{Vectors}
There are predefined macros for all small letters that denote a vector:\\
\verb"$\va = \vb + \vc$" $\quad \longrightarrow \quad \va = \vb + \vc$\\
Note that by redefinition of a \emph{single} macro, vector notation
can be changed from a vector symbol above the character
to a boldface character.
The \verb"\Vector{...}" macro simplifies the task of typesetting column vectors
with explicit display of the vector elements:
\verb"$ \va = \Vector{1 \\ 2 \\ 3 \\4}$" yields
\bdm
\va = \Vector{1 \\ 2 \\ 3 \\4}
\edm
\subsubsection{Matrices}
Like in most books, matrices are typeset, by convention, as boldface uppercase letters:\\
\verb"$\mat{X}$" $\quad \longrightarrow \quad \mat{X}$\\
However, this macro is rarely directly used.
There are predefined macros for all capital letters that denote a matrix:\\
\verb"$\MA = \MB^{-1} \cdot \MC$" $\quad \longrightarrow \quad \MA = \MB^{-1} \cdot \MC$
\subsubsection{Further macros on vectors and matrices}
\begin{table}[h]
\begin{tabular}{lll}
\verb"$\det{\MB}$" & $\quad \longrightarrow \quad$ & $\det{\MB}$ \\
\verb"$\trace{\MA}$" & $\quad \longrightarrow \quad$ & $\trace{\MA}$ \\
\verb"$\rank{\MA}$" & $\quad \longrightarrow \quad$ & $\rank{\MA}$ \\
\verb"$\diag{a_i}$" & $\quad \longrightarrow \quad$ & $\diag{a_i}$ \\
\verb"$\abs{a-b}$" & $\quad \longrightarrow \quad$ & $\abs{a-b}$ \\
\verb"$\norm{\MA-\MB}$" & $\quad \longrightarrow \quad$ & $\norm{\MA-\MB}$ \\
\verb"$\eigvec{\MA}$" & $\quad \longrightarrow \quad$ & $\eigvec{\MA}$
\end{tabular}
\end{table}
%$a \yields b$
%
%$a \shallbe b$
%
%$a \definedas b$
%
%$a \isapproximately b$
%$\e^x$
%
%$\ld x$
%
%$\si x$
%
%$\scha x$
%
%$\erf x$
%$\Prob{x\given y}$
%
%$\Erw{X}$
%
%$\Var{X}$
%
%$\Cov{X, Y}$
%\verb"X" & $\quad \longrightarrow \quad$ & X \\
%\verb"X" & $\quad \longrightarrow \quad$ & X \\
\subsubsection{Fractions}
The built-in LaTeX macro \verb"\frac" tends to decrease the font size
of the numerator and the denominator. This often leads to bad readability.
The macro \verb"\fracds{..}{..}" alleviates this problem:
\verb"$\fracds{1+e^y}{1-e^y{+1}}$" $\qquad \longrightarrow \qquad \fracds{1+e^y}{1-e^y{+1}}$
\clearpage
\section{Image macros}
\image{images/image1}{10cm}{This is image test 1}{img:image1}
\imagesc{images/image1}{10cm}{Small caption}{This is image test 2}{img:image2}
\imagenocap{images/image1}{10cm}
\imagenofloat{images/image1}{10cm}
\doubleimage{images/image1}{5cm}{This is image test 3}{img:image3}{images/image2}{5cm}{This is image test 4}{img:image4}
\newcommand{\testseven}{\dimage{images/image1}{5.5cm}{This is image test 7}{img:image7}}
\newcommand{\testeight}{\dimage{images/image2}{5.5cm}{This is image test 8}{img:image8}}
\doubleimageparts{\testseven}{\testeight}
\doubleimagenocap{images/image1}{5.5cm}{images/image2}{5.5cm}
\doubleimagenofloat{images/image1}{5.5cm}{images/image2}{5.5cm}
\doubleimageonecap{images/image1}{5.5cm}{images/image2}{5.5cm}{These are 2 images}{img:double1}
\doubleimageonecapsc{images/image1}{5.5cm}{images/image2}{5.5cm}{Short caption 10}{These are 2 images}{img:double2}
\ImageAndText[11cm]{images/image1}{5cm}{The image to the left shows the Eelume snake robot mid operation}{5cm}
\TextAndImage[11cm]{The image to the right shows the Eelume snake robot mid operation}{5cm}{images/image1}{5cm}
\end{document}
|
Require Import Coq.Reals.Reals.
Require Import ExtLib.Tactics.
Require Import Logic.Logic.
Require Import Logic.ProofRules.
Require Import Logic.Inductively.
Require Import Logic.EnabledLemmas.
Require Import Logic.Tactics.
Require Import Examples.System.
Set Implicit Arguments.
Set Strict Implicit.
Local Open Scope string_scope.
Section quadcopter.
Variable delta : R.
Hypothesis delta_gt_0 : (delta > 0)%R.
Variable gravity : R.
Variable angle_min : R.
Hypothesis angle_min_lt_0 : (angle_min < 0)%R.
Definition angle_max := (-angle_min)%R.
Local Open Scope HP_scope.
Definition small_angle : StateFormula :=
angle_min <= "pitch" <= angle_max
//\\ angle_min <= "roll" <= angle_max
//\\ 0 <= "A".
Definition W_quad : Evolution := fun st' =>
st' "x" = "vx" //\\ st' "y" = "vy" //\\
st' "z" = "vz"
//\\ st' "vx" = "A" * cos("pitch")*sin("roll")
//\\ st' "vy" = --"A" * sin("pitch")
//\\ st' "vz" = "A" * cos("pitch")*cos("roll") - gravity
//\\ st' "pitch" = 0 //\\ st' "roll" = 0 //\\
st' "A" = 0.
Lemma quadcopter_evolve_enabled :
|-- Enabled (World W_quad).
Admitted.
Definition Quadcopter (D : ActionFormula) :=
Sys (D //\\ next small_angle) W_quad delta.
Lemma Enabled_small_angle :
|-- Enabled (next small_angle).
Proof.
enable_ex_st. unfold angle_max. exists R0. exists R0.
exists R0. solve_linear.
Qed.
Theorem Quadcopter_refine :
forall D W I,
|-- TimedPreserves delta I (Sys D W delta) ->
|-- SysNeverStuck delta I (Sys D W delta) ->
D |-- next small_angle ->
W_quad |-- W ->
|-- TimedPreserves delta I (Quadcopter D) //\\
SysNeverStuck delta I (Quadcopter D).
Proof.
intros.
assert (D //\\ next small_angle -|- D)
by (split; charge_tauto).
charge_split.
{ unfold Quadcopter, TimedPreserves, Preserves in *.
rewrite H3. charge_intros. charge_apply H. rewrite H2.
charge_tauto. }
{ unfold Quadcopter, SysNeverStuck, Sys in *.
charge_intros. rewrite limplValid in H0. rewrite H0.
repeat (rewrite <- Enabled_and_push; [| intuition]).
charge_split; [ charge_assumption | ].
repeat rewrite <- Enabled_or. charge_cases.
{ rewrite H3. charge_tauto. }
{ charge_right. charge_clear.
apply quadcopter_evolve_enabled. } }
Qed.
Theorem Quadcopter_refine_SafeAndReactive :
forall D W I,
|-- SafeAndReactive delta I (Sys D W delta) ->
D |-- next small_angle ->
W_quad |-- W ->
|-- SafeAndReactive delta I (Quadcopter D).
Proof.
unfold SafeAndReactive. intros.
eapply Quadcopter_refine; eauto.
rewrite landL1 in H. eassumption. reflexivity.
rewrite landL2 in H. eassumption. reflexivity.
Qed.
Lemma SysDisjoin_Quadcopter' :
forall I1 I2 D1 D2,
Quadcopter
(Sys_D (SysDisjoin I1 (Quadcopter D1)
I2 (Quadcopter D2)))
-|- SysDisjoin I1 (Quadcopter D1) I2 (Quadcopter D2).
Proof.
unfold SysDisjoin, Quadcopter, Sys, Sys_D, Discr. intros.
split; charge_cases; try charge_tauto.
Qed.
Lemma SysDisjoin_Quadcopter :
forall I1 I2 D1 D2,
Quadcopter (SysDisjoin I1 (Sys D1 W_quad delta)
I2 (Sys D2 W_quad delta))
-|- SysDisjoin I1 (Quadcopter D1) I2 (Quadcopter D2).
Proof.
unfold SysDisjoin, Quadcopter, Sys, Sys_D, Discr. intros.
split; charge_cases; try charge_tauto.
Qed.
Require Import Coq.Classes.Morphisms.
Lemma Proper_Quadcopter_lequiv :
Proper (lequiv ==> lequiv) Quadcopter.
Proof.
morphism_intro. unfold Quadcopter. rewrite H.
reflexivity.
Qed.
Lemma Proper_Quadcopter_lentails :
Proper (lentails ==> lentails) Quadcopter.
Proof.
morphism_intro. unfold Quadcopter. rewrite H.
reflexivity.
Qed.
End quadcopter. |
/-
Copyright (c) 2020 Joseph Myers. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Joseph Myers
-/
import algebra.invertible
import algebra.indicator_function
import linear_algebra.affine_space.affine_map
import linear_algebra.affine_space.affine_subspace
import linear_algebra.finsupp
import tactic.fin_cases
/-!
# Affine combinations of points
This file defines affine combinations of points.
## Main definitions
* `weighted_vsub_of_point` is a general weighted combination of
subtractions with an explicit base point, yielding a vector.
* `weighted_vsub` uses an arbitrary choice of base point and is intended
to be used when the sum of weights is 0, in which case the result is
independent of the choice of base point.
* `affine_combination` adds the weighted combination to the arbitrary
base point, yielding a point rather than a vector, and is intended
to be used when the sum of weights is 1, in which case the result is
independent of the choice of base point.
These definitions are for sums over a `finset`; versions for a
`fintype` may be obtained using `finset.univ`, while versions for a
`finsupp` may be obtained using `finsupp.support`.
## References
* https://en.wikipedia.org/wiki/Affine_space
-/
noncomputable theory
open_locale big_operators classical affine
namespace finset
lemma univ_fin2 : (univ : finset (fin 2)) = {0, 1} :=
by { ext x, fin_cases x; simp }
variables {k : Type*} {V : Type*} {P : Type*} [ring k] [add_comm_group V] [module k V]
variables [S : affine_space V P]
include S
variables {ι : Type*} (s : finset ι)
variables {ι₂ : Type*} (s₂ : finset ι₂)
/-- A weighted sum of the results of subtracting a base point from the
given points, as a linear map on the weights. The main cases of
interest are where the sum of the weights is 0, in which case the sum
is independent of the choice of base point, and where the sum of the
weights is 1, in which case the sum added to the base point is
independent of the choice of base point. -/
def weighted_vsub_of_point (p : ι → P) (b : P) : (ι → k) →ₗ[k] V :=
∑ i in s, (linear_map.proj i : (ι → k) →ₗ[k] k).smul_right (p i -ᵥ b)
@[simp] lemma weighted_vsub_of_point_apply (w : ι → k) (p : ι → P) (b : P) :
s.weighted_vsub_of_point p b w = ∑ i in s, w i • (p i -ᵥ b) :=
by simp [weighted_vsub_of_point, linear_map.sum_apply]
/-- Given a family of points, if we use a member of the family as a base point, the
`weighted_vsub_of_point` does not depend on the value of the weights at this point. -/
lemma weighted_vsub_of_point_eq_of_weights_eq
(p : ι → P) (j : ι) (w₁ w₂ : ι → k) (hw : ∀ i, i ≠ j → w₁ i = w₂ i) :
s.weighted_vsub_of_point p (p j) w₁ = s.weighted_vsub_of_point p (p j) w₂ :=
begin
simp only [finset.weighted_vsub_of_point_apply],
congr,
ext i,
cases eq_or_ne i j with h h,
{ simp [h], },
{ simp [hw i h], },
end
/-- The weighted sum is independent of the base point when the sum of
the weights is 0. -/
lemma weighted_vsub_of_point_eq_of_sum_eq_zero (w : ι → k) (p : ι → P) (h : ∑ i in s, w i = 0)
(b₁ b₂ : P) : s.weighted_vsub_of_point p b₁ w = s.weighted_vsub_of_point p b₂ w :=
begin
apply eq_of_sub_eq_zero,
rw [weighted_vsub_of_point_apply, weighted_vsub_of_point_apply, ←sum_sub_distrib],
conv_lhs
{ congr,
skip,
funext,
rw [←smul_sub, vsub_sub_vsub_cancel_left] },
rw [←sum_smul, h, zero_smul]
end
/-- The weighted sum, added to the base point, is independent of the
base point when the sum of the weights is 1. -/
lemma weighted_vsub_of_point_vadd_eq_of_sum_eq_one (w : ι → k) (p : ι → P) (h : ∑ i in s, w i = 1)
(b₁ b₂ : P) :
s.weighted_vsub_of_point p b₁ w +ᵥ b₁ = s.weighted_vsub_of_point p b₂ w +ᵥ b₂ :=
begin
erw [weighted_vsub_of_point_apply, weighted_vsub_of_point_apply, ←@vsub_eq_zero_iff_eq V,
vadd_vsub_assoc, vsub_vadd_eq_vsub_sub, ←add_sub_assoc, add_comm, add_sub_assoc,
←sum_sub_distrib],
conv_lhs
{ congr,
skip,
congr,
skip,
funext,
rw [←smul_sub, vsub_sub_vsub_cancel_left] },
rw [←sum_smul, h, one_smul, vsub_add_vsub_cancel, vsub_self]
end
/-- The weighted sum is unaffected by removing the base point, if
present, from the set of points. -/
@[simp] lemma weighted_vsub_of_point_erase (w : ι → k) (p : ι → P) (i : ι) :
(s.erase i).weighted_vsub_of_point p (p i) w = s.weighted_vsub_of_point p (p i) w :=
begin
rw [weighted_vsub_of_point_apply, weighted_vsub_of_point_apply],
apply sum_erase,
rw [vsub_self, smul_zero]
end
/-- The weighted sum is unaffected by adding the base point, whether
or not present, to the set of points. -/
@[simp] lemma weighted_vsub_of_point_insert [decidable_eq ι] (w : ι → k) (p : ι → P) (i : ι) :
(insert i s).weighted_vsub_of_point p (p i) w = s.weighted_vsub_of_point p (p i) w :=
begin
rw [weighted_vsub_of_point_apply, weighted_vsub_of_point_apply],
apply sum_insert_zero,
rw [vsub_self, smul_zero]
end
/-- The weighted sum is unaffected by changing the weights to the
corresponding indicator function and adding points to the set. -/
lemma weighted_vsub_of_point_indicator_subset (w : ι → k) (p : ι → P) (b : P) {s₁ s₂ : finset ι}
(h : s₁ ⊆ s₂) :
s₁.weighted_vsub_of_point p b w = s₂.weighted_vsub_of_point p b (set.indicator ↑s₁ w) :=
begin
rw [weighted_vsub_of_point_apply, weighted_vsub_of_point_apply],
exact set.sum_indicator_subset_of_eq_zero w (λ i wi, wi • (p i -ᵥ b : V)) h (λ i, zero_smul k _)
end
/-- A weighted sum, over the image of an embedding, equals a weighted
sum with the same points and weights over the original
`finset`. -/
lemma weighted_vsub_of_point_map (e : ι₂ ↪ ι) (w : ι → k) (p : ι → P) (b : P) :
(s₂.map e).weighted_vsub_of_point p b w = s₂.weighted_vsub_of_point (p ∘ e) b (w ∘ e) :=
begin
simp_rw [weighted_vsub_of_point_apply],
exact finset.sum_map _ _ _
end
/-- A weighted sum of the results of subtracting a default base point
from the given points, as a linear map on the weights. This is
intended to be used when the sum of the weights is 0; that condition
is specified as a hypothesis on those lemmas that require it. -/
def weighted_vsub (p : ι → P) : (ι → k) →ₗ[k] V :=
s.weighted_vsub_of_point p (classical.choice S.nonempty)
/-- Applying `weighted_vsub` with given weights. This is for the case
where a result involving a default base point is OK (for example, when
that base point will cancel out later); a more typical use case for
`weighted_vsub` would involve selecting a preferred base point with
`weighted_vsub_eq_weighted_vsub_of_point_of_sum_eq_zero` and then
using `weighted_vsub_of_point_apply`. -/
lemma weighted_vsub_apply (w : ι → k) (p : ι → P) :
s.weighted_vsub p w = ∑ i in s, w i • (p i -ᵥ (classical.choice S.nonempty)) :=
by simp [weighted_vsub, linear_map.sum_apply]
/-- `weighted_vsub` gives the sum of the results of subtracting any
base point, when the sum of the weights is 0. -/
lemma weighted_vsub_eq_weighted_vsub_of_point_of_sum_eq_zero (w : ι → k) (p : ι → P)
(h : ∑ i in s, w i = 0) (b : P) : s.weighted_vsub p w = s.weighted_vsub_of_point p b w :=
s.weighted_vsub_of_point_eq_of_sum_eq_zero w p h _ _
/-- The `weighted_vsub` for an empty set is 0. -/
@[simp] lemma weighted_vsub_empty (w : ι → k) (p : ι → P) :
(∅ : finset ι).weighted_vsub p w = (0:V) :=
by simp [weighted_vsub_apply]
/-- The weighted sum is unaffected by changing the weights to the
corresponding indicator function and adding points to the set. -/
lemma weighted_vsub_indicator_subset (w : ι → k) (p : ι → P) {s₁ s₂ : finset ι} (h : s₁ ⊆ s₂) :
s₁.weighted_vsub p w = s₂.weighted_vsub p (set.indicator ↑s₁ w) :=
weighted_vsub_of_point_indicator_subset _ _ _ h
/-- A weighted subtraction, over the image of an embedding, equals a
weighted subtraction with the same points and weights over the
original `finset`. -/
lemma weighted_vsub_map (e : ι₂ ↪ ι) (w : ι → k) (p : ι → P) :
(s₂.map e).weighted_vsub p w = s₂.weighted_vsub (p ∘ e) (w ∘ e) :=
s₂.weighted_vsub_of_point_map _ _ _ _
/-- A weighted sum of the results of subtracting a default base point
from the given points, added to that base point, as an affine map on
the weights. This is intended to be used when the sum of the weights
is 1, in which case it is an affine combination (barycenter) of the
points with the given weights; that condition is specified as a
hypothesis on those lemmas that require it. -/
def affine_combination (p : ι → P) : (ι → k) →ᵃ[k] P :=
{ to_fun := λ w,
s.weighted_vsub_of_point p (classical.choice S.nonempty) w +ᵥ (classical.choice S.nonempty),
linear := s.weighted_vsub p,
map_vadd' := λ w₁ w₂, by simp_rw [vadd_vadd, weighted_vsub, vadd_eq_add, linear_map.map_add] }
/-- The linear map corresponding to `affine_combination` is
`weighted_vsub`. -/
@[simp] lemma affine_combination_linear (p : ι → P) :
(s.affine_combination p : (ι → k) →ᵃ[k] P).linear = s.weighted_vsub p :=
rfl
/-- Applying `affine_combination` with given weights. This is for the
case where a result involving a default base point is OK (for example,
when that base point will cancel out later); a more typical use case
for `affine_combination` would involve selecting a preferred base
point with
`affine_combination_eq_weighted_vsub_of_point_vadd_of_sum_eq_one` and
then using `weighted_vsub_of_point_apply`. -/
lemma affine_combination_apply (w : ι → k) (p : ι → P) :
s.affine_combination p w =
s.weighted_vsub_of_point p (classical.choice S.nonempty) w +ᵥ (classical.choice S.nonempty) :=
rfl
/-- `affine_combination` gives the sum with any base point, when the
sum of the weights is 1. -/
lemma affine_combination_eq_weighted_vsub_of_point_vadd_of_sum_eq_one (w : ι → k) (p : ι → P)
(h : ∑ i in s, w i = 1) (b : P) :
s.affine_combination p w = s.weighted_vsub_of_point p b w +ᵥ b :=
s.weighted_vsub_of_point_vadd_eq_of_sum_eq_one w p h _ _
/-- Adding a `weighted_vsub` to an `affine_combination`. -/
lemma weighted_vsub_vadd_affine_combination (w₁ w₂ : ι → k) (p : ι → P) :
s.weighted_vsub p w₁ +ᵥ s.affine_combination p w₂ = s.affine_combination p (w₁ + w₂) :=
by rw [←vadd_eq_add, affine_map.map_vadd, affine_combination_linear]
/-- Subtracting two `affine_combination`s. -/
lemma affine_combination_vsub (w₁ w₂ : ι → k) (p : ι → P) :
s.affine_combination p w₁ -ᵥ s.affine_combination p w₂ = s.weighted_vsub p (w₁ - w₂) :=
by rw [←affine_map.linear_map_vsub, affine_combination_linear, vsub_eq_sub]
lemma attach_affine_combination_of_injective
(s : finset P) (w : P → k) (f : s → P) (hf : function.injective f) :
s.attach.affine_combination f (w ∘ f) = (image f univ).affine_combination id w :=
begin
simp only [affine_combination, weighted_vsub_of_point_apply, id.def, vadd_right_cancel_iff,
function.comp_app, affine_map.coe_mk],
let g₁ : s → V := λ i, w (f i) • (f i -ᵥ classical.choice S.nonempty),
let g₂ : P → V := λ i, w i • (i -ᵥ classical.choice S.nonempty),
change univ.sum g₁ = (image f univ).sum g₂,
have hgf : g₁ = g₂ ∘ f, { ext, simp, },
rw [hgf, sum_image],
exact λ _ _ _ _ hxy, hf hxy,
end
lemma attach_affine_combination_coe (s : finset P) (w : P → k) :
s.attach.affine_combination (coe : s → P) (w ∘ coe) = s.affine_combination id w :=
by rw [attach_affine_combination_of_injective s w (coe : s → P) subtype.coe_injective,
univ_eq_attach, attach_image_coe]
omit S
/-- Viewing a module as an affine space modelled on itself, affine combinations are just linear
combinations. -/
@[simp] lemma affine_combination_eq_linear_combination (s : finset ι) (p : ι → V) (w : ι → k)
(hw : ∑ i in s, w i = 1) :
s.affine_combination p w = ∑ i in s, w i • p i :=
by simp [s.affine_combination_eq_weighted_vsub_of_point_vadd_of_sum_eq_one w p hw 0]
include S
/-- An `affine_combination` equals a point if that point is in the set
and has weight 1 and the other points in the set have weight 0. -/
@[simp]
/-- An affine combination is unaffected by changing the weights to the
corresponding indicator function and adding points to the set. -/
lemma affine_combination_indicator_subset (w : ι → k) (p : ι → P) {s₁ s₂ : finset ι}
(h : s₁ ⊆ s₂) :
s₁.affine_combination p w = s₂.affine_combination p (set.indicator ↑s₁ w) :=
by rw [affine_combination_apply, affine_combination_apply,
weighted_vsub_of_point_indicator_subset _ _ _ h]
/-- An affine combination, over the image of an embedding, equals an
affine combination with the same points and weights over the original
`finset`. -/
lemma affine_combination_map (e : ι₂ ↪ ι) (w : ι → k) (p : ι → P) :
(s₂.map e).affine_combination p w = s₂.affine_combination (p ∘ e) (w ∘ e) :=
by simp_rw [affine_combination_apply, weighted_vsub_of_point_map]
variables {V}
/-- Suppose an indexed family of points is given, along with a subset
of the index type. A vector can be expressed as
`weighted_vsub_of_point` using a `finset` lying within that subset and
with a given sum of weights if and only if it can be expressed as
`weighted_vsub_of_point` with that sum of weights for the
corresponding indexed family whose index type is the subtype
corresponding to that subset. -/
lemma eq_weighted_vsub_of_point_subset_iff_eq_weighted_vsub_of_point_subtype {v : V} {x : k}
{s : set ι} {p : ι → P} {b : P} :
(∃ (fs : finset ι) (hfs : ↑fs ⊆ s) (w : ι → k) (hw : ∑ i in fs, w i = x),
v = fs.weighted_vsub_of_point p b w) ↔
∃ (fs : finset s) (w : s → k) (hw : ∑ i in fs, w i = x),
v = fs.weighted_vsub_of_point (λ (i : s), p i) b w :=
begin
simp_rw weighted_vsub_of_point_apply,
split,
{ rintros ⟨fs, hfs, w, rfl, rfl⟩,
use [fs.subtype s, λ i, w i, sum_subtype_of_mem _ hfs, (sum_subtype_of_mem _ hfs).symm] },
{ rintros ⟨fs, w, rfl, rfl⟩,
refine ⟨fs.map (function.embedding.subtype _), map_subtype_subset _,
λ i, if h : i ∈ s then w ⟨i, h⟩ else 0, _, _⟩;
simp }
end
variables (k)
/-- Suppose an indexed family of points is given, along with a subset
of the index type. A vector can be expressed as `weighted_vsub` using
a `finset` lying within that subset and with sum of weights 0 if and
only if it can be expressed as `weighted_vsub` with sum of weights 0
for the corresponding indexed family whose index type is the subtype
corresponding to that subset. -/
lemma eq_weighted_vsub_subset_iff_eq_weighted_vsub_subtype {v : V} {s : set ι} {p : ι → P} :
(∃ (fs : finset ι) (hfs : ↑fs ⊆ s) (w : ι → k) (hw : ∑ i in fs, w i = 0),
v = fs.weighted_vsub p w) ↔
∃ (fs : finset s) (w : s → k) (hw : ∑ i in fs, w i = 0),
v = fs.weighted_vsub (λ (i : s), p i) w :=
eq_weighted_vsub_of_point_subset_iff_eq_weighted_vsub_of_point_subtype
variables (V)
/-- Suppose an indexed family of points is given, along with a subset
of the index type. A point can be expressed as an
`affine_combination` using a `finset` lying within that subset and
with sum of weights 1 if and only if it can be expressed an
`affine_combination` with sum of weights 1 for the corresponding
indexed family whose index type is the subtype corresponding to that
subset. -/
lemma eq_affine_combination_subset_iff_eq_affine_combination_subtype {p0 : P} {s : set ι}
{p : ι → P} :
(∃ (fs : finset ι) (hfs : ↑fs ⊆ s) (w : ι → k) (hw : ∑ i in fs, w i = 1),
p0 = fs.affine_combination p w) ↔
∃ (fs : finset s) (w : s → k) (hw : ∑ i in fs, w i = 1),
p0 = fs.affine_combination (λ (i : s), p i) w :=
begin
simp_rw [affine_combination_apply, eq_vadd_iff_vsub_eq],
exact eq_weighted_vsub_of_point_subset_iff_eq_weighted_vsub_of_point_subtype
end
variables {k V}
/-- Affine maps commute with affine combinations. -/
lemma map_affine_combination {V₂ P₂ : Type*} [add_comm_group V₂] [module k V₂] [affine_space V₂ P₂]
(p : ι → P) (w : ι → k) (hw : s.sum w = 1) (f : P →ᵃ[k] P₂) :
f (s.affine_combination p w) = s.affine_combination (f ∘ p) w :=
begin
have b := classical.choice (infer_instance : affine_space V P).nonempty,
have b₂ := classical.choice (infer_instance : affine_space V₂ P₂).nonempty,
rw [s.affine_combination_eq_weighted_vsub_of_point_vadd_of_sum_eq_one w p hw b,
s.affine_combination_eq_weighted_vsub_of_point_vadd_of_sum_eq_one w (f ∘ p) hw b₂,
← s.weighted_vsub_of_point_vadd_eq_of_sum_eq_one w (f ∘ p) hw (f b) b₂],
simp only [weighted_vsub_of_point_apply, ring_hom.id_apply, affine_map.map_vadd,
linear_map.map_smulₛₗ, affine_map.linear_map_vsub, linear_map.map_sum],
end
end finset
namespace finset
variables (k : Type*) {V : Type*} {P : Type*} [division_ring k] [add_comm_group V] [module k V]
variables [affine_space V P] {ι : Type*} (s : finset ι) {ι₂ : Type*} (s₂ : finset ι₂)
/-- The weights for the centroid of some points. -/
def centroid_weights : ι → k := function.const ι (card s : k) ⁻¹
/-- `centroid_weights` at any point. -/
@[simp] lemma centroid_weights_apply (i : ι) : s.centroid_weights k i = (card s : k) ⁻¹ :=
rfl
/-- `centroid_weights` equals a constant function. -/
lemma centroid_weights_eq_const :
s.centroid_weights k = function.const ι ((card s : k) ⁻¹) :=
rfl
variables {k}
/-- The weights in the centroid sum to 1, if the number of points,
converted to `k`, is not zero. -/
lemma sum_centroid_weights_eq_one_of_cast_card_ne_zero (h : (card s : k) ≠ 0) :
∑ i in s, s.centroid_weights k i = 1 :=
by simp [h]
variables (k)
/-- In the characteristic zero case, the weights in the centroid sum
to 1 if the number of points is not zero. -/
lemma sum_centroid_weights_eq_one_of_card_ne_zero [char_zero k] (h : card s ≠ 0) :
∑ i in s, s.centroid_weights k i = 1 :=
by simp [h]
/-- In the characteristic zero case, the weights in the centroid sum
to 1 if the set is nonempty. -/
lemma sum_centroid_weights_eq_one_of_nonempty [char_zero k] (h : s.nonempty) :
∑ i in s, s.centroid_weights k i = 1 :=
s.sum_centroid_weights_eq_one_of_card_ne_zero k (ne_of_gt (card_pos.2 h))
/-- In the characteristic zero case, the weights in the centroid sum
to 1 if the number of points is `n + 1`. -/
lemma sum_centroid_weights_eq_one_of_card_eq_add_one [char_zero k] {n : ℕ}
(h : card s = n + 1) : ∑ i in s, s.centroid_weights k i = 1 :=
s.sum_centroid_weights_eq_one_of_card_ne_zero k (h.symm ▸ nat.succ_ne_zero n)
include V
/-- The centroid of some points. Although defined for any `s`, this
is intended to be used in the case where the number of points,
converted to `k`, is not zero. -/
def centroid (p : ι → P) : P :=
s.affine_combination p (s.centroid_weights k)
/-- The definition of the centroid. -/
lemma centroid_def (p : ι → P) :
s.centroid k p = s.affine_combination p (s.centroid_weights k) :=
rfl
lemma centroid_univ (s : finset P) :
univ.centroid k (coe : s → P) = s.centroid k id :=
by { rw [centroid, centroid, ← s.attach_affine_combination_coe], congr, ext, simp, }
/-- The centroid of a single point. -/
@[simp] lemma centroid_singleton (p : ι → P) (i : ι) :
({i} : finset ι).centroid k p = p i :=
by simp [centroid_def, affine_combination_apply]
/-- The centroid of two points, expressed directly as adding a vector
to a point. -/
lemma centroid_insert_singleton [invertible (2 : k)] (p : ι → P) (i₁ i₂ : ι) :
({i₁, i₂} : finset ι).centroid k p = (2 ⁻¹ : k) • (p i₂ -ᵥ p i₁) +ᵥ p i₁ :=
begin
by_cases h : i₁ = i₂,
{ simp [h] },
{ have hc : (card ({i₁, i₂} : finset ι) : k) ≠ 0,
{ rw [card_insert_of_not_mem (not_mem_singleton.2 h), card_singleton],
norm_num,
exact nonzero_of_invertible _ },
rw [centroid_def,
affine_combination_eq_weighted_vsub_of_point_vadd_of_sum_eq_one _ _ _
(sum_centroid_weights_eq_one_of_cast_card_ne_zero _ hc) (p i₁)],
simp [h],
norm_num }
end
/-- The centroid of two points indexed by `fin 2`, expressed directly
as adding a vector to the first point. -/
lemma centroid_insert_singleton_fin [invertible (2 : k)] (p : fin 2 → P) :
univ.centroid k p = (2 ⁻¹ : k) • (p 1 -ᵥ p 0) +ᵥ p 0 :=
begin
rw univ_fin2,
convert centroid_insert_singleton k p 0 1
end
/-- A centroid, over the image of an embedding, equals a centroid with
the same points and weights over the original `finset`. -/
lemma centroid_map (e : ι₂ ↪ ι) (p : ι → P) : (s₂.map e).centroid k p = s₂.centroid k (p ∘ e) :=
by simp [centroid_def, affine_combination_map, centroid_weights]
omit V
/-- `centroid_weights` gives the weights for the centroid as a
constant function, which is suitable when summing over the points
whose centroid is being taken. This function gives the weights in a
form suitable for summing over a larger set of points, as an indicator
function that is zero outside the set whose centroid is being taken.
In the case of a `fintype`, the sum may be over `univ`. -/
def centroid_weights_indicator : ι → k := set.indicator ↑s (s.centroid_weights k)
/-- The definition of `centroid_weights_indicator`. -/
lemma centroid_weights_indicator_def :
s.centroid_weights_indicator k = set.indicator ↑s (s.centroid_weights k) :=
rfl
/-- The sum of the weights for the centroid indexed by a `fintype`. -/
lemma sum_centroid_weights_indicator [fintype ι] :
∑ i, s.centroid_weights_indicator k i = ∑ i in s, s.centroid_weights k i :=
(set.sum_indicator_subset _ (subset_univ _)).symm
/-- In the characteristic zero case, the weights in the centroid
indexed by a `fintype` sum to 1 if the number of points is not
zero. -/
lemma sum_centroid_weights_indicator_eq_one_of_card_ne_zero [char_zero k] [fintype ι]
(h : card s ≠ 0) : ∑ i, s.centroid_weights_indicator k i = 1 :=
begin
rw sum_centroid_weights_indicator,
exact s.sum_centroid_weights_eq_one_of_card_ne_zero k h
end
/-- In the characteristic zero case, the weights in the centroid
indexed by a `fintype` sum to 1 if the set is nonempty. -/
lemma sum_centroid_weights_indicator_eq_one_of_nonempty [char_zero k] [fintype ι]
(h : s.nonempty) : ∑ i, s.centroid_weights_indicator k i = 1 :=
begin
rw sum_centroid_weights_indicator,
exact s.sum_centroid_weights_eq_one_of_nonempty k h
end
/-- In the characteristic zero case, the weights in the centroid
indexed by a `fintype` sum to 1 if the number of points is `n + 1`. -/
lemma sum_centroid_weights_indicator_eq_one_of_card_eq_add_one [char_zero k] [fintype ι] {n : ℕ}
(h : card s = n + 1) : ∑ i, s.centroid_weights_indicator k i = 1 :=
begin
rw sum_centroid_weights_indicator,
exact s.sum_centroid_weights_eq_one_of_card_eq_add_one k h
end
include V
/-- The centroid as an affine combination over a `fintype`. -/
lemma centroid_eq_affine_combination_fintype [fintype ι] (p : ι → P) :
s.centroid k p = univ.affine_combination p (s.centroid_weights_indicator k) :=
affine_combination_indicator_subset _ _ (subset_univ _)
/-- An indexed family of points that is injective on the given
`finset` has the same centroid as the image of that `finset`. This is
stated in terms of a set equal to the image to provide control of
definitional equality for the index type used for the centroid of the
image. -/
lemma centroid_eq_centroid_image_of_inj_on {p : ι → P} (hi : ∀ i j ∈ s, p i = p j → i = j)
{ps : set P} [fintype ps] (hps : ps = p '' ↑s) :
s.centroid k p = (univ : finset ps).centroid k (λ x, x) :=
begin
let f : p '' ↑s → ι := λ x, x.property.some,
have hf : ∀ x, f x ∈ s ∧ p (f x) = x := λ x, x.property.some_spec,
let f' : ps → ι := λ x, f ⟨x, hps ▸ x.property⟩,
have hf' : ∀ x, f' x ∈ s ∧ p (f' x) = x := λ x, hf ⟨x, hps ▸ x.property⟩,
have hf'i : function.injective f',
{ intros x y h,
rw [subtype.ext_iff, ←(hf' x).2, ←(hf' y).2, h] },
let f'e : ps ↪ ι := ⟨f', hf'i⟩,
have hu : finset.univ.map f'e = s,
{ ext x,
rw mem_map,
split,
{ rintros ⟨i, _, rfl⟩,
exact (hf' i).1 },
{ intro hx,
use [⟨p x, hps.symm ▸ set.mem_image_of_mem _ hx⟩, mem_univ _],
refine hi _ _ (hf' _).1 hx _,
rw (hf' _).2,
refl } },
rw [←hu, centroid_map],
congr' with x,
change p (f' x) = ↑x,
rw (hf' x).2
end
/-- Two indexed families of points that are injective on the given
`finset`s and with the same points in the image of those `finset`s
have the same centroid. -/
lemma centroid_eq_of_inj_on_of_image_eq {p : ι → P} (hi : ∀ i j ∈ s, p i = p j → i = j)
{p₂ : ι₂ → P} (hi₂ : ∀ i j ∈ s₂, p₂ i = p₂ j → i = j) (he : p '' ↑s = p₂ '' ↑s₂) :
s.centroid k p = s₂.centroid k p₂ :=
by rw [s.centroid_eq_centroid_image_of_inj_on k hi rfl,
s₂.centroid_eq_centroid_image_of_inj_on k hi₂ he]
end finset
section affine_space'
variables {k : Type*} {V : Type*} {P : Type*} [ring k] [add_comm_group V] [module k V]
[affine_space V P]
variables {ι : Type*}
include V
/-- A `weighted_vsub` with sum of weights 0 is in the `vector_span` of
an indexed family. -/
lemma weighted_vsub_mem_vector_span {s : finset ι} {w : ι → k}
(h : ∑ i in s, w i = 0) (p : ι → P) :
s.weighted_vsub p w ∈ vector_span k (set.range p) :=
begin
rcases is_empty_or_nonempty ι with hι|⟨⟨i0⟩⟩,
{ resetI, simp [finset.eq_empty_of_is_empty s] },
{ rw [vector_span_range_eq_span_range_vsub_right k p i0, ←set.image_univ,
finsupp.mem_span_image_iff_total,
finset.weighted_vsub_eq_weighted_vsub_of_point_of_sum_eq_zero s w p h (p i0),
finset.weighted_vsub_of_point_apply],
let w' := set.indicator ↑s w,
have hwx : ∀ i, w' i ≠ 0 → i ∈ s := λ i, set.mem_of_indicator_ne_zero,
use [finsupp.on_finset s w' hwx, set.subset_univ _],
rw [finsupp.total_apply, finsupp.on_finset_sum hwx],
{ apply finset.sum_congr rfl,
intros i hi,
simp [w', set.indicator_apply, if_pos hi] },
{ exact λ _, zero_smul k _ } },
end
/-- An `affine_combination` with sum of weights 1 is in the
`affine_span` of an indexed family, if the underlying ring is
nontrivial. -/
lemma affine_combination_mem_affine_span [nontrivial k] {s : finset ι} {w : ι → k}
(h : ∑ i in s, w i = 1) (p : ι → P) :
s.affine_combination p w ∈ affine_span k (set.range p) :=
begin
have hnz : ∑ i in s, w i ≠ 0 := h.symm ▸ one_ne_zero,
have hn : s.nonempty := finset.nonempty_of_sum_ne_zero hnz,
cases hn with i1 hi1,
let w1 : ι → k := function.update (function.const ι 0) i1 1,
have hw1 : ∑ i in s, w1 i = 1,
{ rw [finset.sum_update_of_mem hi1, finset.sum_const_zero, add_zero] },
have hw1s : s.affine_combination p w1 = p i1 :=
s.affine_combination_of_eq_one_of_eq_zero w1 p hi1 (function.update_same _ _ _)
(λ _ _ hne, function.update_noteq hne _ _),
have hv : s.affine_combination p w -ᵥ p i1 ∈ (affine_span k (set.range p)).direction,
{ rw [direction_affine_span, ←hw1s, finset.affine_combination_vsub],
apply weighted_vsub_mem_vector_span,
simp [pi.sub_apply, h, hw1] },
rw ←vsub_vadd (s.affine_combination p w) (p i1),
exact affine_subspace.vadd_mem_of_mem_direction hv (mem_affine_span k (set.mem_range_self _))
end
variables (k) {V}
/-- A vector is in the `vector_span` of an indexed family if and only
if it is a `weighted_vsub` with sum of weights 0. -/
lemma mem_vector_span_iff_eq_weighted_vsub {v : V} {p : ι → P} :
v ∈ vector_span k (set.range p) ↔
∃ (s : finset ι) (w : ι → k) (h : ∑ i in s, w i = 0), v = s.weighted_vsub p w :=
begin
split,
{ rcases is_empty_or_nonempty ι with hι|⟨⟨i0⟩⟩, swap,
{ rw [vector_span_range_eq_span_range_vsub_right k p i0, ←set.image_univ,
finsupp.mem_span_image_iff_total],
rintros ⟨l, hl, hv⟩,
use insert i0 l.support,
set w := (l : ι → k) -
function.update (function.const ι 0 : ι → k) i0 (∑ i in l.support, l i) with hwdef,
use w,
have hw : ∑ i in insert i0 l.support, w i = 0,
{ rw hwdef,
simp_rw [pi.sub_apply, finset.sum_sub_distrib,
finset.sum_update_of_mem (finset.mem_insert_self _ _), finset.sum_const_zero,
finset.sum_insert_of_eq_zero_if_not_mem finsupp.not_mem_support_iff.1,
add_zero, sub_self] },
use hw,
have hz : w i0 • (p i0 -ᵥ p i0 : V) = 0 := (vsub_self (p i0)).symm ▸ smul_zero _,
change (λ i, w i • (p i -ᵥ p i0 : V)) i0 = 0 at hz,
rw [finset.weighted_vsub_eq_weighted_vsub_of_point_of_sum_eq_zero _ w p hw (p i0),
finset.weighted_vsub_of_point_apply, ←hv, finsupp.total_apply,
finset.sum_insert_zero hz],
change ∑ i in l.support, l i • _ = _,
congr' with i,
by_cases h : i = i0,
{ simp [h] },
{ simp [hwdef, h] } },
{ resetI,
rw [set.range_eq_empty, vector_span_empty, submodule.mem_bot],
rintro rfl,
use [∅],
simp } },
{ rintros ⟨s, w, hw, rfl⟩,
exact weighted_vsub_mem_vector_span hw p }
end
variables {k}
/-- A point in the `affine_span` of an indexed family is an
`affine_combination` with sum of weights 1. See also
`eq_affine_combination_of_mem_affine_span_of_fintype`. -/
lemma eq_affine_combination_of_mem_affine_span {p1 : P} {p : ι → P}
(h : p1 ∈ affine_span k (set.range p)) :
∃ (s : finset ι) (w : ι → k) (hw : ∑ i in s, w i = 1), p1 = s.affine_combination p w :=
begin
have hn : ((affine_span k (set.range p)) : set P).nonempty := ⟨p1, h⟩,
rw [affine_span_nonempty, set.range_nonempty_iff_nonempty] at hn,
cases hn with i0,
have h0 : p i0 ∈ affine_span k (set.range p) := mem_affine_span k (set.mem_range_self i0),
have hd : p1 -ᵥ p i0 ∈ (affine_span k (set.range p)).direction :=
affine_subspace.vsub_mem_direction h h0,
rw [direction_affine_span, mem_vector_span_iff_eq_weighted_vsub] at hd,
rcases hd with ⟨s, w, h, hs⟩,
let s' := insert i0 s,
let w' := set.indicator ↑s w,
have h' : ∑ i in s', w' i = 0,
{ rw [←h, set.sum_indicator_subset _ (finset.subset_insert i0 s)] },
have hs' : s'.weighted_vsub p w' = p1 -ᵥ p i0,
{ rw hs,
exact (finset.weighted_vsub_indicator_subset _ _ (finset.subset_insert i0 s)).symm },
let w0 : ι → k := function.update (function.const ι 0) i0 1,
have hw0 : ∑ i in s', w0 i = 1,
{ rw [finset.sum_update_of_mem (finset.mem_insert_self _ _), finset.sum_const_zero, add_zero] },
have hw0s : s'.affine_combination p w0 = p i0 :=
s'.affine_combination_of_eq_one_of_eq_zero w0 p
(finset.mem_insert_self _ _)
(function.update_same _ _ _)
(λ _ _ hne, function.update_noteq hne _ _),
use [s', w0 + w'],
split,
{ simp [pi.add_apply, finset.sum_add_distrib, hw0, h'] },
{ rw [add_comm, ←finset.weighted_vsub_vadd_affine_combination, hw0s, hs', vsub_vadd] }
end
lemma eq_affine_combination_of_mem_affine_span_of_fintype [fintype ι] {p1 : P} {p : ι → P}
(h : p1 ∈ affine_span k (set.range p)) :
∃ (w : ι → k) (hw : ∑ i, w i = 1), p1 = finset.univ.affine_combination p w :=
begin
obtain ⟨s, w, hw, rfl⟩ := eq_affine_combination_of_mem_affine_span h,
refine ⟨(s : set ι).indicator w, _, finset.affine_combination_indicator_subset w p s.subset_univ⟩,
simp only [finset.mem_coe, set.indicator_apply, ← hw],
convert fintype.sum_extend_by_zero s w,
ext i,
congr,
end
variables (k V)
/-- A point is in the `affine_span` of an indexed family if and only
if it is an `affine_combination` with sum of weights 1, provided the
underlying ring is nontrivial. -/
lemma mem_affine_span_iff_eq_affine_combination [nontrivial k] {p1 : P} {p : ι → P} :
p1 ∈ affine_span k (set.range p) ↔
∃ (s : finset ι) (w : ι → k) (hw : ∑ i in s, w i = 1), p1 = s.affine_combination p w :=
begin
split,
{ exact eq_affine_combination_of_mem_affine_span },
{ rintros ⟨s, w, hw, rfl⟩,
exact affine_combination_mem_affine_span hw p }
end
/-- Given a family of points together with a chosen base point in that family, membership of the
affine span of this family corresponds to an identity in terms of `weighted_vsub_of_point`, with
weights that are not required to sum to 1. -/
lemma mem_affine_span_iff_eq_weighted_vsub_of_point_vadd
[nontrivial k] (p : ι → P) (j : ι) (q : P) :
q ∈ affine_span k (set.range p) ↔
∃ (s : finset ι) (w : ι → k), q = s.weighted_vsub_of_point p (p j) w +ᵥ (p j) :=
begin
split,
{ intros hq,
obtain ⟨s, w, hw, rfl⟩ := eq_affine_combination_of_mem_affine_span hq,
exact ⟨s, w, s.affine_combination_eq_weighted_vsub_of_point_vadd_of_sum_eq_one w p hw (p j)⟩, },
{ rintros ⟨s, w, rfl⟩,
classical,
let w' : ι → k := function.update w j (1 - (s \ {j}).sum w),
have h₁ : (insert j s).sum w' = 1,
{ by_cases hj : j ∈ s,
{ simp [finset.sum_update_of_mem hj, finset.insert_eq_of_mem hj], },
{ simp [w', finset.sum_insert hj, finset.sum_update_of_not_mem hj, hj], }, },
have hww : ∀ i, i ≠ j → w i = w' i, { intros i hij, simp [w', hij], },
rw [s.weighted_vsub_of_point_eq_of_weights_eq p j w w' hww,
← s.weighted_vsub_of_point_insert w' p j,
← (insert j s).affine_combination_eq_weighted_vsub_of_point_vadd_of_sum_eq_one w' p h₁ (p j)],
exact affine_combination_mem_affine_span h₁ p, },
end
variables {k V}
/-- Given a set of points, together with a chosen base point in this set, if we affinely transport
all other members of the set along the line joining them to this base point, the affine span is
unchanged. -/
lemma affine_span_eq_affine_span_line_map_units [nontrivial k]
{s : set P} {p : P} (hp : p ∈ s) (w : s → units k) :
affine_span k (set.range (λ (q : s), affine_map.line_map p ↑q (w q : k))) = affine_span k s :=
begin
have : s = set.range (coe : s → P), { simp, },
conv_rhs { rw this, },
apply le_antisymm;
intros q hq;
erw mem_affine_span_iff_eq_weighted_vsub_of_point_vadd k V _ (⟨p, hp⟩ : s) q at hq ⊢;
obtain ⟨t, μ, rfl⟩ := hq;
use t;
[use λ x, (μ x) * ↑(w x), use λ x, (μ x) * ↑(w x)⁻¹];
simp [smul_smul],
end
end affine_space'
section division_ring
variables {k : Type*} {V : Type*} {P : Type*} [division_ring k] [add_comm_group V] [module k V]
variables [affine_space V P] {ι : Type*}
include V
open set finset
/-- The centroid lies in the affine span if the number of points,
converted to `k`, is not zero. -/
lemma centroid_mem_affine_span_of_cast_card_ne_zero {s : finset ι} (p : ι → P)
(h : (card s : k) ≠ 0) : s.centroid k p ∈ affine_span k (range p) :=
affine_combination_mem_affine_span (s.sum_centroid_weights_eq_one_of_cast_card_ne_zero h) p
variables (k)
/-- In the characteristic zero case, the centroid lies in the affine
span if the number of points is not zero. -/
lemma centroid_mem_affine_span_of_card_ne_zero [char_zero k] {s : finset ι} (p : ι → P)
(h : card s ≠ 0) : s.centroid k p ∈ affine_span k (range p) :=
affine_combination_mem_affine_span (s.sum_centroid_weights_eq_one_of_card_ne_zero k h) p
/-- In the characteristic zero case, the centroid lies in the affine
span if the set is nonempty. -/
lemma centroid_mem_affine_span_of_nonempty [char_zero k] {s : finset ι} (p : ι → P)
(h : s.nonempty) : s.centroid k p ∈ affine_span k (range p) :=
affine_combination_mem_affine_span (s.sum_centroid_weights_eq_one_of_nonempty k h) p
/-- In the characteristic zero case, the centroid lies in the affine
span if the number of points is `n + 1`. -/
lemma centroid_mem_affine_span_of_card_eq_add_one [char_zero k] {s : finset ι} (p : ι → P)
{n : ℕ} (h : card s = n + 1) : s.centroid k p ∈ affine_span k (range p) :=
affine_combination_mem_affine_span (s.sum_centroid_weights_eq_one_of_card_eq_add_one k h) p
end division_ring
namespace affine_map
variables {k : Type*} {V : Type*} (P : Type*) [comm_ring k] [add_comm_group V] [module k V]
variables [affine_space V P] {ι : Type*} (s : finset ι)
include V
-- TODO: define `affine_map.proj`, `affine_map.fst`, `affine_map.snd`
/-- A weighted sum, as an affine map on the points involved. -/
def weighted_vsub_of_point (w : ι → k) : ((ι → P) × P) →ᵃ[k] V :=
{ to_fun := λ p, s.weighted_vsub_of_point p.fst p.snd w,
linear := ∑ i in s,
w i • ((linear_map.proj i).comp (linear_map.fst _ _ _) - linear_map.snd _ _ _),
map_vadd' := begin
rintros ⟨p, b⟩ ⟨v, b'⟩,
simp [linear_map.sum_apply, finset.weighted_vsub_of_point, vsub_vadd_eq_vsub_sub,
vadd_vsub_assoc, add_sub, ← sub_add_eq_add_sub, smul_add, finset.sum_add_distrib]
end }
end affine_map
|
[STATEMENT]
lemma list_slice_nth_nth: "
\<lbrakk> m < length xs div k; n < k \<rbrakk> \<Longrightarrow>
(list_slice xs k) ! m ! n = xs ! (m * k + n)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>m < length xs div k; n < k\<rbrakk> \<Longrightarrow> list_slice xs k ! m ! n = xs ! (m * k + n)
[PROOF STEP]
apply (frule list_slice_nth_length[of m xs k])
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>m < length xs div k; n < k; length (list_slice xs k ! m) = k\<rbrakk> \<Longrightarrow> list_slice xs k ! m ! n = xs ! (m * k + n)
[PROOF STEP]
apply (simp add: list_slice_nth)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done |
#!/usr/bin/env julia
##########################################################################
# run_SNR.jl
#
# Run all SNR simulations.
##########################################################################
include("run_SNR-ibc.jl")
include("run_SNR-indoors.jl")
include("run_SNR-randomlargescale.jl")
include("run_SNR-triangularmacro.jl")
include("run_SNR-triangularhetnet.jl")
|
module Apply
-- These are not Biapplicatives. Those are in Data.Biapplicative
import Bifunctor
infixl 4 <<$>>, <<&>>, <<.>>, <<., .>>, <<..>>
||| Primarily used to make the definitions of bilift2 and bilift3 pretty
|||
||| ```idris example
||| bimap const const <<$>> (1, 2) <<.>> (3, 4) == (1, 2)
||| ```
|||
export
(<<$>>) : (a -> b) -> a -> b
(<<$>>) = id
||| <<$>> with the arguments reversed
|||
||| ```idris example
||| (1, 2) <<&>> bimap const const <<.>> (3, 4) == (1, 2)
||| ```
|||
(<<&>>) : a -> (a -> b) -> b
(<<&>>) = flip id
||| Biapplys (not to be confused with Biapplicatives)
||| @p The action of the Biapply on pairs of objects
public export
interface Bifunctor p => Biapply (p : Type -> Type -> Type) where
||| Applys a Bifunctor of functions to another Bifunctor of the same type
|||
||| ````idris example
||| (reverse, (\x => x + 1)) <<.>> ("hello", 1) == ("olleh", 2)
||| ````
|||
(<<.>>) : p (a -> b) (c -> d) -> p a c -> p b d
||| Given two Bifunctors, sequences them leftwards
|||
||| ````idris example
||| ("hello", 1) <<. ("goodbye", 2) == ("hello", 1)
||| ````
|||
(<<.) : p a b -> p c d -> p a b
a <<. b = bimap const const <<$>> a <<.>> b
||| Given two Bifunctors, sequences them rightwards
|||
||| ````idris example
||| ("hello", 1) <<. ("goodbye", 2) == ("goodbye", 2)
||| ````
|||
(.>>) : p a b -> p c d -> p c d
a .>> b = bimap (const id) (const id) <<$>> a <<.>> b
||| Lifts a pair of binary functions into a Bifunctor
|||
||| ````idris example
||| bilift2 (++) (+) ("hello", 1) ("goodbye", 2) == ("hellogoodbye", 3)
||| ````
|||
bilift2 : Biapply p => (a -> b -> c) -> (d -> e -> f) -> p a d -> p b e -> p c f
bilift2 f g a b = bimap f g <<$>> a <<.>> b
||| Lifts a pair of ternary functions into a Bifunctor
|||
||| ````idris example
||| bilift3 (\x,y,z => x ++ (y ++ z)) (\x,y,z => x + (y + z))
||| ("hello", 1) ("goodbye", 2) ("hello again", 3) ==
||| ("hellogoodbyehello again", 6)
||| ````
|||
bilift3 : Biapply p => (a -> b -> c -> d) -> (e -> f -> g -> h)
-> p a e -> p b f -> p c g -> p d h
bilift3 f g a b c = bimap f g <<$>> a <<.>> b <<.>> c
||| Applies the second of two Bifunctors to the first
|||
||| ````idris example
||| ("hello", 1) <<..>> (reverse, (\x => x + 1)) == ("olleh", 2)
||| ````
|||
(<<..>>): Biapply p => p a c -> p (a -> b) (c -> d) -> p b d
(<<..>>) = flip (<<.>>)
implementation Biapply Pair where
(f, g) <<.>> (a, b) = (f a, g b)
|
theory nat_acc_plus_comm
imports Main
"$HIPSTER_HOME/IsaHipster"
begin
datatype Nat = Z | S "Nat"
fun accplus :: "Nat => Nat => Nat" where
"accplus (Z) y = y"
| "accplus (S z) y = accplus z (S y)"
(*hipster accplus *)
theorem x0 :
"!! (x :: Nat) (y :: Nat) . (accplus x y) = (accplus y x)"
by (tactic \<open>Subgoal.FOCUS_PARAMS (K (Tactic_Data.hard_tac @{context})) @{context} 1\<close>)
end
|
(*noq5.v
20180622Z
jpt4
Nock5 in Coq*)
Inductive noun :=
| atom : nat -> noun
| cell : noun -> noun -> noun.
Definition get_atom (a:noun) : nat :=
match a with
| atom a' => a'
| cell _ _ => 0
end.
Compute get_atom (atom 5).
Compute (cell (cell (atom 6) (atom 11)) (atom 10)).
Notation "'%' x" := (atom x)
(at level 51, right associativity).
Notation "'[' x y ']'" :=
(cell x y)
(at level 50, right associativity).
Check [%3 %4].
Check [[%3 [%10 %11]] %2].
Compute 4 + 5 + 6 + 8.
Inductive wut :=
wt : Noun -> wut.
Compute wt (a 5).
Definition nock :=
Fixpoint nock5 (n:nock) : Noun :=
match n with
| wt (c a' b') => (a 0)
| wt (a n') => (a 1)
end.
|
\\ Test the FFI - importing dynamic libraries
IMPORT ./d.dylib
1 addFunction 2
IMPORTEND
: test ( -- )
100 99 addFunction .
; |
plus2 : Nat -> Nat
plus2 = S . S
okplus2Injective : (x, y : Nat) -> Equal (plus2 x) (plus2 y) -> Equal x y
okplus2Injective Z Z Refl = Refl
okplus2Injective (S n) (S n) Refl = Refl
okplus2Injective Z (S _) _ impossible
okplus2Injective (S _) Z _ impossible
badplus2Injective : (x, y : Nat) -> Equal (plus2 x) (plus2 y) -> Equal x y
badplus2Injective Z Z Refl = Refl
badplus2Injective Z (S _) _ impossible
badplus2Injective (S _) Z _ impossible
badplus2Injective (S n) (S n) Refl = Refl
|
{-# LANGUAGE CPP #-}
{-# LANGUAGE DataKinds #-}
{-# LANGUAGE FlexibleContexts #-}
{-# LANGUAGE GADTs #-}
{-# LANGUAGE ScopedTypeVariables #-}
{-# LANGUAGE TypeOperators #-}
module Grenade.Layers.Internal.BLAS
( -- easy interface
matXVec
, outerV
, checkVectors
, unsafeMemCopyVectorFromTo
, memCopyVectorFromTo
, unsafeMemZero
, memZero
-- more complicated, but direct, function calls
, BlasTranspose (..)
, swapTranspose
, dgemmUnsafe
, dgemvUnsafe
, dgerUnsafe
) where
import Control.Monad
import Data.IORef
import Data.Proxy
import qualified Data.Vector.Storable as V
import Foreign (withForeignPtr)
import Foreign.C.Types
import Foreign.Ptr
import Foreign.Storable (sizeOf)
import GHC.IO.Handle.Text (memcpy)
import GHC.TypeLits
import qualified Numeric.LinearAlgebra as LA
import qualified Numeric.LinearAlgebra.Static as LAS
import System.IO.Unsafe (unsafePerformIO)
import Grenade.Layers.Internal.CUDA
import Grenade.Types
import Grenade.Utils.Vector
import Debug.Trace
#define USE_DGEMM_ONLY 0
-- | Computes vec2 <- mat * vec1 + beta * vec2.
matXVec :: BlasTranspose -> V.Vector RealNum -> V.Vector RealNum -> RealNum -> V.Vector RealNum -> IO (V.Vector RealNum)
matXVec trMat mat vec1 beta vec2 =
#if USE_DGEMM_ONLY
dgemmUnsafe trMat BlasNoTranspose (m, k) (ay, 1) 1.0 mat vec1 beta vec2
# else
dgemvUnsafe trMat (m, k) 1.0 mat vec1 beta vec2
#endif
where
ay = V.length vec1
ax = V.length vec2
(m, k) = swapTranspose trMat (ax, ay)
-- | Computes the outer product of two vectors: mat <- vec1 `outer` vec2
outerV :: V.Vector RealNum -> V.Vector RealNum -> IO (V.Vector RealNum)
outerV vec1 vec2 =
#if USE_DGEMM_ONLY
dgemmUnsafe BlasNoTranspose BlasNoTranspose (o, 1) (1, i) 1.0 vec1 vec2 0 (createVectorUnsafe (i * o)) -- beta = 0 initialises the matrix
#else
createVector (i * o) >>= memZero >>= dgerUnsafe (o, i) 1.0 vec1 vec2
#endif
where o = V.length vec1
i = V.length vec2
-- | Check two vectors if they are equal. For testing purposes.
checkVectors :: V.Vector RealNum -> V.Vector RealNum -> Bool
checkVectors v1 v2 = V.length v1 == V.length v2 && and (zipWith (==) (toStr v1) (toStr v2))
where
toStr :: V.Vector RealNum -> [String]
toStr v = map (show . round . (*10^5)) $ V.toList v
{-# INLINE checkVectors #-}
-- | Newtype holding CINT for Transpose values.
newtype CBLAS_TRANSPOSET =
CBLAS_TransposeT CInt
deriving (Eq, Show)
-- | Transpose values
data BlasTranspose
= BlasNoTranspose
| BlasTranspose
| BlasConjTranspose
| BlasConjNoTranspose
deriving (Eq, Show)
encodeTransposeIntBool :: BlasTranspose -> Int
encodeTransposeIntBool BlasNoTranspose = 0
encodeTransposeIntBool BlasTranspose = 1
encodeTransposeIntBool BlasConjTranspose = 1
encodeTransposeIntBool BlasConjNoTranspose = 0
{-# INLINE encodeTransposeIntBool #-}
swapTranspose :: BlasTranspose -> (Int, Int) -> (Int, Int)
swapTranspose BlasNoTranspose x = x
swapTranspose BlasTranspose (a, b) = (b, a)
swapTranspose BlasConjNoTranspose x = x
swapTranspose BlasConjTranspose (a, b) = (b, a)
{-# INLINE swapTranspose #-}
-- | Error text
mkDimText :: (Show a1, Show a2, Show a3, Show a4, Show a5, Show a6) => (a1, a2) -> (a3, a4) -> (a5, a6) -> String
mkDimText (ax, ay) (bx, by) (cx, cy) = "resulting dimensions: [" ++ show ax ++ "x" ++ show ay ++ "]*[" ++ show bx ++ "x" ++ show by ++ "]=[" ++ show cx ++ "x" ++ show cy ++ "]"
-- | Computes: C <- alpha*op( A )*op( B ) + beta*C, where op(X) may transpose the matrix X
--
-- dgemm, see http://www.netlib.org/lapack/explore-html/d1/d54/group__double__blas__level3_gaeda3cbd99c8fb834a60a6412878226e1.html for the documentation.
--
-- void cblas_dgemm (
-- const CBLAS_LAYOUT layout,
-- const CBLAS_TRANSPOSE TransA,
-- const CBLAS_TRANSPOSE TransB,
-- const int M,
-- const int N,
-- const int K,
-- const double alpha,
-- const double * A,
-- const int lda,
-- const double * B,
-- const int ldb,
-- const double beta,
-- double * C,
-- const int ldc
-- )
{-# NOINLINE dgemmUnsafe #-}
dgemmUnsafe :: BlasTranspose -- ^ Transpose Matrix A
-> BlasTranspose -- ^ Transpose Matrix B
-> (Int, Int) -- ^ Rows and cols of A on entry (not transposed)
-> (Int, Int) -- ^ Rows and Cols of B on entry (not transposed)
-> RealNum -- ^ Alpha
-> V.Vector RealNum -- ^ A
-> V.Vector RealNum -- ^ B
-> RealNum -- ^ Beta
-> V.Vector RealNum -- ^ C
-> IO (V.Vector RealNum) -- ^ Return new C
dgemmUnsafe trA trB (axIn, ayIn) (bxIn, byIn) alpha matrixA matrixB beta matrixC
| isBadGemm =
error $!
"bad dimension args to dgemmUnsafe: ax ay bx by cx cy: " ++
show [ax, ay, bx, by, ax, by] ++ " matrix C length: " ++ show (V.length matrixC) ++ "\n\t" ++ mkDimText (ax, ay) (bx, by) (ax, by)
| otherwise = do
V.unsafeWith matrixA $ \aPtr' ->
V.unsafeWith matrixB $ \bPtr' ->
V.unsafeWith matrixC $ \cPtr' -> do
#ifdef USE_FLOAT
sgemm_direct
#else
dgemm_direct
#endif
(encodeTransposeIntBool trA) -- transpose A
(encodeTransposeIntBool trB) -- transpose B
(fromIntegral ax) -- rows of C = rows of A transposed
(fromIntegral by) -- cols of C = cols of B transposed
(fromIntegral ay) -- k = cols of A transposed = rows of B transposed
alpha
aPtr'
(fromIntegral axIn) -- LDA
bPtr'
(fromIntegral bxIn) -- LDB
beta
cPtr'
(fromIntegral ax) -- LDC
return matrixC
where
(ax, ay) = swapTranspose trA (axIn, ayIn)
(bx, by) = swapTranspose trB (bxIn, byIn)
isBadGemm = minimum [ax, ay, bx, by] <= 0 || not (ax * by == V.length matrixC && ay == bx)
-- | Computes: Y <- alpha*op( A )*X + beta*Y, where op(A) may transpose the matrix A
--
-- dgemv, see http://www.netlib.org/lapack/explore-html/d7/d15/group__double__blas__level2_gadd421a107a488d524859b4a64c1901a9.html#gadd421a107a488d524859b4a64c1901a9
--
-- void cblas_dgemv (
-- const CBLAS_LAYOUT layout,
-- const CBLAS_TRANSPOSE TransA,
-- const int M,
-- const int N,
-- const double alpha,
-- const double * A,
-- const int lda,
-- const double * X,
-- const int incX,
-- const double beta,
-- double * Y,
-- const int incY
-- )
{-# NOINLINE dgemvUnsafe #-}
dgemvUnsafe :: BlasTranspose -- ^ Transpose Matrix
-> (Int, Int) -- ^ rows and cols of A on entry (not transposed)
-> RealNum -- ^ Alpha
-> V.Vector RealNum -- ^ A
-> V.Vector RealNum -- ^ X
-> RealNum -- ^ Beta
-> V.Vector RealNum -- ^ C
-> IO (V.Vector RealNum) -- ^ Return new C
dgemvUnsafe trA (m, k) alpha matrixA vecX beta vecY
| ax /= V.length vecY || ay /= V.length vecX =
error $!
"bad dimension args to dgemvUnsafe: ax ay (length vecX) (length vecY): " ++
show [ax, ay, V.length vecX, V.length vecY] ++ " \n\t" ++ mkDimText (ax, ay) (V.length vecX, 1) (m, 1)
| otherwise = do
V.unsafeWith matrixA $ \aPtr' ->
V.unsafeWith vecX $ \xPtr' ->
V.unsafeWith vecY $ \yPtr' -> do
#ifdef USE_FLOAT
sgemv_direct
#else
dgemv_direct
#endif
(encodeTransposeIntBool trA) -- transpose A
(fromIntegral m)
(fromIntegral k)
alpha
aPtr'
(fromIntegral m)
xPtr'
1
beta
yPtr'
1
return vecY
where
(ax, ay) = swapTranspose trA (m, k)
-- | Computes: A <- alpha*X*Y^T + A
--
-- dger, see http://www.netlib.org/lapack/explore-html/d7/d15/group__double__blas__level2_ga458222e01b4d348e9b52b9343d52f828.html#ga458222e01b4d348e9b52b9343d52f828
--
-- void cblas_dger (
-- const CBLAS_LAYOUT layout,
-- const int M,
-- const int N,
-- const double alpha,
-- const double * X,
-- const int incX,
-- const double * Y,
-- const int incY,
-- double * A,
-- const int lda
-- )
{-# NOINLINE dgerUnsafe #-}
dgerUnsafe :: (Int, Int) -- ^ Dimensions of matrix A
-> RealNum -- ^ Alpha
-> V.Vector RealNum -- ^ X
-> V.Vector RealNum -- ^ C
-> V.Vector RealNum -- ^ A
-> IO (V.Vector RealNum) -- ^ Return new C
dgerUnsafe (ax, ay) alpha vecX vecY matrixA
| ax /= len || ay /= V.length vecY =
error $! "bad dimension args to dgerUnsafe: X Y ax ay: " ++ show [len, V.length vecY, ax, ay] ++ " \n\t" ++ mkDimText (ax, ay) (V.length vecX, 1) (V.length vecY, 1)
| otherwise = do
V.unsafeWith matrixA $ \aPtr' ->
V.unsafeWith vecX $ \xPtr' ->
V.unsafeWith vecY $ \yPtr' -> do
#ifdef USE_FLOAT
sger_direct
#else
dger_direct
#endif
(fromIntegral ax)
(fromIntegral ay)
alpha
xPtr'
1
yPtr'
1
aPtr'
(fromIntegral ax)
return matrixA
where
len = V.length vecX
-- | Matrix mult for general dense matrices
type BLASGemmFunFFI scale el
= Int -- transpose A: 1, not transpose A: 0
-> Int -- transpose B: 1, not transpose B: 0
-> CInt -- m
-> CInt -- n
-> CInt -- k
-> {- scal A * B -} scale -- alpha
-> {- Matrix A-} Ptr el -- A
-> CInt -- LDA
-> {- B -} Ptr el
-> CInt
-> scale -- beta
-> {- C -} Ptr el
-> CInt
-> IO ()
foreign import ccall unsafe "dgemm_direct" dgemm_direct :: BLASGemmFunFFI Double Double
foreign import ccall unsafe "sgemm_direct" sgemm_direct :: BLASGemmFunFFI Float Float
-- | Matrix mult for general dense matrices
type BLASGemvFunFFI scale el
= Int -- transpose A: 1, not transpose A: 0
-> CInt -- m
-> CInt -- n
-> scale -- alpha
-> Ptr el -- Matrix A
-> CInt -- LDA
-> Ptr el
-> CInt
-> scale -- beta
-> Ptr el
-> CInt
-> IO ()
foreign import ccall unsafe "dgemv_direct" dgemv_direct :: BLASGemvFunFFI Double Double
foreign import ccall unsafe "sgemv_direct" sgemv_direct :: BLASGemvFunFFI Float Float
type BlasGerxFunFFI scale el
= CInt
-> CInt
-> scale
-> Ptr el
-> CInt
-> Ptr el
-> CInt
-> Ptr el
-> CInt
-> IO ()
foreign import ccall unsafe "dger_direct" dger_direct :: BlasGerxFunFFI Double Double
foreign import ccall unsafe "sger_direct" sger_direct :: BlasGerxFunFFI Float Float
-- toRows :: Int -> V.Vector Double -> [V.Vector Double]
-- toRows m vec = LA.toRows . reshapeF m . LA.vector . V.toList $ vec
-- where reshapeF r = LA.tr' . LA.reshape r
-- vec1 :: LAS.R 10
-- vec1 = LAS.vector [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.34868882831497655, 0.0, 1.4026932193043212e-2]
-- -- dEdy:
-- -- mmCheck : [0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,-4.8711473950841355e-2,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,-1.9595481318788644e-3]
-- -- mm' : [0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,-0.24602759633121374,-1.2277801012906878e-2,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,-9.89711206989971e-3,-4.939070836308897e-4,0.0,0.0]
-- vec3 :: V.Vector Double
-- vec3 = (
-- V.fromList [0.0, 0.0, 0.0, 0.0, (-0.13969898085418284)])
-- vec2 :: LAS.R 5
-- vec2 = LAS.vector [0.0, 0.0, 0.0, 0.0, -0.13969898085418284]
-- res = vec1 `LAS.outer` vec2
-- test =
-- toRows 10 $
-- outerV (LAS.extract vec1) (LAS.extract vec2) (V.replicate (LAS.size vec1 * LAS.size vec2) 10)
|
\documentclass{article}
\usepackage{arxiv}
\input{_preamble}
\externaldocument{main}
\title{\textit{Supplementary Materials for}\\ Exploring High-Dimensional Biological Data with Sparse Contrastive Principal Component Analysis}
\author{
Philippe Boileau \\
Graduate Group in Biostatistics,\\
University of California, Berkeley\\
\texttt{philippe\[email protected]} \\
\And
Nima S.~Hejazi \\
Graduate Group in Biostatistics and\\
Center for Computational Biology,\\
University of California, Berkeley\\
\texttt{[email protected]} \\
\And
Sandrine Dudoit\\
Department of Statistics, \\
Division of Biostatistics, and\\
Center for Computational Biology,\\
University of California, Berkeley\\
\texttt{[email protected]} \\
}
\begin{document}\maketitle
\beginsupplement
\newpage
\section{Algorithm for scPCA}
\begin{algorithm}[!htbp]\label{algo1}
\dontprintsemicolon
\linesnotnumbered
\KwResult{Produces a sparse low-dimensional representation of the target data,
$\mathbf{X}_{n \times p}$, by contrasting the variation of
$\mathbf{X}_{n \times p}$ and some background data, $\mathbf{Y}_{m \times p}$,
while applying an $\ell_1$ penalty to the loadings generated by cPCA.}
\SetKwInOut{Input}{Input}\SetKwInOut{Output}{Output}
\Input{
\begin{itemize}
\item[] target dataset: $\mathbf{X}$
\item[] background dataset: $\mathbf{Y}$
\item[] binary variable indicating whether to column-scale the data: \texttt{scale}
\item[] vector of possible contrastive parameters: $\gamma = (\gamma_1, \ldots, \gamma_s)$
\item[] vector of possible $\ell_1$ penalty parameters: $\lambda_1 = (\lambda_{1,1}, \ldots, \lambda_{1, d})$
\item[] number of sparse contrastive principal components to compute: $k$
\item[] clustering method: \texttt{cluster\_meth}
\item[] number of clusters: \texttt{ncluster}
\end{itemize}
}
\BlankLine
Center (and \texttt{scale} if so desired) the columns of $\mathbf{X}$, $\mathbf{Y}$ \;
Calculate the empirical covariance matrices:
$\mathbf{C_X}_{p \times p} \coloneqq \frac{1}{n}\mathbf{X}^\top\mathbf{X}, \;
\mathbf{C_Y}_{p \times p} \coloneqq \frac{1}{m}\mathbf{Y}^\top\mathbf{Y}$\;
\For{each $\gamma_i \in \gamma$}{
\For{each $\lambda_{1,j} \in \lambda_1$}{
Compute the contrastive covariance matrix $\mathbf{C}_{\gamma_i}=\mathbf{C_X} -
\gamma_i \mathbf{C_Y}$\;
Compute the positive-semidefinite approximation of $\mathbf{C}_{\gamma_i}$, $\widetilde{\mathbf{C}}_{\gamma_i}$\;
Apply SPCA to $\widetilde{\mathbf{C}}_{\gamma_i}$ for $k$ components with $\ell_1$ penalty $\lambda_{1, j}$\;
Generate a low-dimensional representation by projecting $\mathbf{X}_{n \times p}$ on the sparse loadings of SPCA\;
Normalize the low-dimensional representation produced to be on the unit hypercube\;
Cluster the normalized low-dimensional representation using \texttt{cluster\_meth} with \texttt{ncluster}\;
Compute and record the clustering strength criterion associated with
$(\gamma_i, \lambda_{1, j})$ \;
}
}
Identify the combination of hyperparameters maximizing the clustering strength
criterion: $\gamma^{\star},\: \lambda_1^{\star}$ \;
\Output{The low-dimensional representation of the target data given by
$(\gamma^{\star}, \lambda_1^{\star})$, an $n \times k$ matrix;
the $p \times k$ matrix of loadings given by
$(\gamma^{\star}, \lambda_1^{\star})$;
contrastive parameter $\gamma^{\star}$;
$\ell_1$ penalty parameter $\lambda_1^{\star}$
}
\caption{scPCA}
\end{algorithm}
\newpage
\section{Algorithm for Cross-Validated scPCA}
\begin{algorithm}[!htbp]\label{algo2}
\dontprintsemicolon
%\setalgolined
\linesnotnumbered
\KwResult{Produces a sparse low-dimensional representation of the target data,
$\mathbf{X}_{n \times p}$, by contrasting the variation of
$\mathbf{X}_{n \times p}$ and some background data, $\mathbf{Y}_{m \times p}$,
while applying an $\ell_1$ penalty to the loadings generated by cPCA.}
\SetKwInOut{Input}{Input}\SetKwInOut{Output}{Output}
\Input{
\begin{itemize}
\item[] target dataset: $\mathbf{X}$
\item[] background dataset: $\mathbf{Y}$
\item[] binary variable indicating whether to column-scale the data:
\texttt{scale}
\item[] vector of possible contrastive parameters:
$\gamma = (\gamma_1, \ldots, \gamma_s)$
\item[] vector of possible $\ell_1$ penalty parameters: $\lambda_1 = (\lambda_{1,1}, \ldots, \lambda_{1, d})$
\item[] number of sparse contrastive principal components to compute: $k$
\item[] clustering method: \texttt{cluster\_meth}
\item[] number of clusters: \texttt{ncluster}
\item[] number of cross-validation folds: $W$
\end{itemize}
}
\BlankLine
For $\mathbf{X}_{n \times p}$, randomly partition the index set $\{1, \ldots,
n\}$ into $W$ validation sets, $\mathcal{W}_1^x, \ldots, \mathcal{W}_W^x$, of (approximately) the
same size (i.e., $\bigcup_{w=1}^W \mathcal{W}_w^x = \{1, \ldots, n\}$;
$\mathcal{W}_w^x
\cap \mathcal{W}_{w'}^x = \emptyset, \quad \forall w,w' \in \{1, \ldots, W\}$).
Denote the corresponding training sets by $\mathcal{T}_w^x = \{1, \ldots, n\}
\setminus \mathcal{W}_w^x$. For $\mathbf{Y}_{m \times p}$, randomly partition the
index set $\{1, \ldots, m\}$ into $W$ validation sets, $\mathcal{W}_1^y, \ldots,
\mathcal{W}_W^y$, of (approximately) the same size (i.e., $\bigcup_{w=1}^W \mathcal{W}_w^y = \{1,
\ldots, m\}$; $\mathcal{W}_w^y \cap \mathcal{W}_{w'}^y = \emptyset, \quad
\forall
w,w' \in \{1, \ldots, W\}$). Denote the corresponding training sets by
$\mathcal{T}_w^y = \{1, \ldots, m\} \setminus \mathcal{W}_w^y$. Denote by
$\mathbf{X}_{\mathcal{T}_w^x}$ the $\lvert \mathcal{T}_w^x \rvert\
\times p$ submatrix of $\mathbf{X}$ for training set $\mathcal{T}_w^x$ and by
$\mathbf{Y}_{\mathcal{T}_w^y}$ the $\lvert \mathcal{T}_w^y \rvert
\times p$ submatrix of $\mathbf{Y}$ for training set $\mathcal{T}^y_w$. Define similarly
$\mathbf{X}_{\mathcal{W}_w^x}$ and $\mathbf{Y}_{\mathcal{W}_w^y}$ for the validation
sets. Note that $\mathbf{Y}_{\mathcal{W}_w^y}$ is defined explicitly solely to
avoid ambiguity; it plays no role in subsequent developments.\; \BlankLine
\For{each $w$ in $\{1, \ldots, W\}$}{
Center (and \texttt{scale} if so desired) the columns of
$\{\mathbf{X}_{\mathcal{T}_w^x}, \mathbf{Y}_{\mathcal{T}_w^y} \}$ and
$\{\mathbf{X}_{\mathcal{W}_w^x}, \mathbf{Y}_{\mathcal{W}_w^y} \}$\;
Compute the empirical covariance matrices:
$\mathbf{C_X}_{p \times p} \coloneqq \frac{1}{\lvert \mathcal{T}_w^x
\rvert}\mathbf{X_{\mathcal{T}_w^x}}^\top\mathbf{X_{\mathcal{T}_w^x}}, \;
\mathbf{C_Y}_{p \times p} \coloneqq \frac{1}{\lvert \mathcal{T}_w^y
\rvert}\mathbf{Y_{\mathcal{T}_w^y}}^\top\mathbf{Y_{\mathcal{T}_w^y}}$\;
\For{each $\gamma_i \in \gamma$}{
\For{each $\lambda_{1,j} \in \lambda_1$}{
Compute the contrastive covariance matrix $\mathbf{C}_{\gamma_i}=\mathbf{C_X} -
\gamma_i \mathbf{C_Y}$\;
Compute the positive-semidefinite approximation of $\mathbf{C}_{\gamma_i}$, $\widetilde{\mathbf{C}}_{\gamma_i}$\;
Apply SPCA to $\widetilde{\mathbf{C}}_{\gamma_i}$ for $k$ components with $\ell_1$ penalty $\lambda_{1, j}$\;
Generate a low-dimensional representation of the target validation set by
projecting $\mathbf{X}_{\mathcal{W}_w^x}$ on the sparse loadings of SPCA\;
Normalize the low-dimensional representation produced to be on the unit hypercube\;
Cluster the normalized low-dimensional representation using \texttt{cluster\_meth} with \texttt{ncluster}\;
Compute and record the clustering strength criterion associated with $(\gamma_i, \lambda_{1, j})$ \;
}
}
}
Identify the combination of hyperparameters maximizing the cross-validated
mean (across all folds $\{1, \ldots, W\}$) of the clustering strength
criterion: $\gamma^{\star},\: \lambda_1^{\star}$ \;
\Output{The low-dimensional representation of the target data given by
$(\gamma^{\star}, \lambda_1^{\star})$, a $n \times k$ matrix;
the $p \times k$ matrix of loadings given by $(\gamma^{\star},
\lambda_1^{\star})$; contrastive parameter $\gamma^{\star}$;
$\ell_1$ penalty parameter $\lambda_1^{\star}$
}
\caption{Cross-validated scPCA}
\end{algorithm}
\newpage
\section{Intuition for the Contrastive Parameter $\gamma$}
\begin{figure}[!htbp]
\centering
\includegraphics[width=0.9\textwidth]{figures/cpca_example_plot}
\caption{
{\em Effect of contrastive parameter for cPCA.}
cPCA as implemented by \citet{Abid2018} was applied to a simulated dataset
of $n=400$ observations, split across 4 groups, with $p=30$ variables. The
first 10 variables are distributed as $N(0, 10)$ for all observations. Variables 11 through 20 are distributed as $N(0, 1)$ for Groups 1 an 2,
and as $N(3, 1)$
for Groups 3 and 4. Variables 21 through 30 are distributed as $N(-3, 1)$
for Groups 1 and 3, and as $N(0, 1)$ for Groups 2 and 4. cPCA also takes
as input a background dataset of $m=400$ observations, with $p=30$
variables, where the first 10 variables are distributed as $N(0, 10)$, the
following 10 as $N(0, 3)$, and the remaining 10 as $N(0, 1)$. The results
of cPCA are then presented for eight increasing values of the contrastive
parameter $\gamma$ from among 40 loagrithmically spaced values between 0.1 and 100, selected using the semi-automated technique described by \citet{Abid2018}. For the smaller values of the contrastive parameter, the
noise contained in the first 10 variables of the target data dominates the
signal contained in variables 11 through 30. As the contrastive parameter
increases, the signal in the target data set is unmasked. However, once the
contrastive parameter value becomes larger than $\approx 20$, the
distinction between groups becomes increasingly poor; the variation
contained in the background data begins to dominate the variation contained
in the target data. A virtually identical dataset is presented in the
supplementary material of \citet{Abid2018}.}
\label{contrastive_par}
\end{figure}
\FloatBarrier
\newpage
\section{Simulated scRNA-seq Data}\label{sup_sim}
See Section 3.1 %\ref{sim_scRNA-seq}
for information on the simulation model and dataset. cPCA was applied to the dataset using the non-cross-validated hyperparameter tuning framework. The $\ell_1$ penalty parameter was set to 0, and the vector of possible contrastive parameters consisted of 40 logarithmically spaced values between 0.1 and 1,000. scPCA was applied in the same manner as cPCA, though the vector of potential $\ell_1$ penalty parameters consisted of 20 equidistant values between 0.05 and 1. The \texttt{Rtsne R} package was used to create the t-SNE embedding. Two initializations were generated, that is, with and without an initial application of PCA to the simulated data. The embedding employing the initial PCA step retained the 50 leading principal components. For each initialization, the remaining parameters were set to their defaults, as done by \citet{Becht2019}, e.g., \texttt{perplexity = 30} and \texttt{max\_iter = 1000}. The \texttt{theta} parameter was set to \texttt{0} so that exact t-SNE was performed. The embedding produced using the PCA initialization was qualitatively better than that without. Therefore, it is used in Figure \ref{fig:sim} of this manuscript. The \texttt{umap} \texttt{R} package was used to generate the UMAP embedding. As in the quantitative analysis of UMAP performed by \citet{Becht2019}, \texttt{min\_dist} was set to \texttt{0.02}, \texttt{nearest\_neighbors} was set to \texttt{30}, and the Euclidean distance was used as a metric. The SIMLR was produced with a \texttt{k} of 10, as recommended in \citep{Wang2017}, and setting the number of pre-specified clusters to 2.
\begin{figure}[!htbp]
\centering
\includegraphics[width=0.9\textwidth]{figures/sim_de_genes}
\caption{{\em Simulated scRNA-seq data: Differential expression.}
The 98 differentially expressed genes in the simulated target dataset are ranked in decreasing order of their absolute level of differential expression between groups. In the \textit{Splatter} framework, genes are differentially expressed between groups by way of a group-specific multiplicative factor. Thus, the level of differential expression of any gene between two groups may be computed as the absolute value of the difference between each group's multiplicative factor. We find that all 20 of the genes with non-zero entries in scPCA's first loading vector, highlighted in green, are among the most differentially expressed.}
\label{fig:sim_de_genes}
\end{figure}
\begin{figure}[!htbp]
\centering
\includegraphics[width=0.9\textwidth]{figures/technical_effect_removal_sim}
\caption{{\em Simulated scRNA-seq data: Average silhouette width comparison.}
Methods are displayed in decreasing order of ability to remove unwanted technical variation, as measured by the average silhouette width. scPCA produces the densest biological clusters with the least amount of technical noise. The ZINB-WaVE method, when taking into account the batch effect, has a similar performance to scPCA with respect to the removal of unwanted effects, though the biological clusters it produces have lower average silhouette widths. Though cPCA produces denser biological clusters than ZINB-WaVE, it fails to completely remove the batch effect. The remaining methods are unable to disentangle the biological and technical effects.
}
\label{fig:ave_sil_widths}
\end{figure}
\begin{figure}[!htbp]
\centering
\includegraphics[width=0.9\textwidth]{figures/simulated_SIMLR.png}
\caption{
{\em Simulated scRNA-seq data: SIMLR.}
SIMLR's two-dimensional embedding produces dense clusters that nearly perfectly split cells into two biologically meaningful groups. However, upon close inspection -- and contrary to what the average silhouette widths suggest -- the batch effect is not removed from the biological groups. Because the reported average silhouette widths misrepresent the method's ability to remove unwanted variation in the data, its results were not included in Figure \ref{fig:ave_sil_widths}.}
\label{fig:simulated_SIMLR}
\end{figure}
\FloatBarrier
\section{Dengue Microarray Data}\label{sup_dengue}
See Section 3.2 %\ref{dengue_data}
for information on the data. cPCA and scPCA were fit using the non-cross-validated hyperparameter tuning framework. Both methods considered a vector of 40 logarithmically spaced values between 0.1 and 1,000 as potential contrastive parameters. scPCA also used a vector of 20 equidistant values between 0.05 and 1 as potential $\ell_1$ penalty parameters. As with the simulated data, two t-SNE embeddings were generated: one with an initial PCA step (retaining the first 50 principal components) and one without. Due to the small sample size, the \texttt{perplexity} parameter was set to 8. The remaining hyperparameters were set to their defaults, with the exception of \texttt{theta} which was set to 0. Both embeddings were qualitatively identical, and so only that which does not require the initial dimensionality reduction through PCA is presented in this manuscript. The qualitatively best UMAP embedding was found with the \texttt{n\_neighbors} parameter set to 15 and the \texttt{min\_dist} parameter set to 0.2. These parameter values are inspired from those used by \citet{Becht2019}, though the type of data considered are not identical.
\begin{figure}[!htbp]
\centering
\includegraphics[width=0.5\textwidth]{figures/dengue_tsne}
\caption{
{\em Dengue microarray data: t-SNE.}
Similarly to UMAP, t-SNE almost completely separates the convalescent patients from those with some form of dengue. The two main clusters are further split into distinct sub-clusters, perhaps indicating the presence of a batch effect.
}
\label{fig:dengue_tsne}
\end{figure}
% latex table generated in R 3.6.1 by xtable 1.8-4 package
% Mon Oct 21 14:32:37 2019
\begin{longtable}{| l | p{4cm} | p{8cm} | l |}
\caption{{\em Dengue microarray data: Genes with non-zero weights in the first scPCA loading vector.}}
\label{tab:dengue_1} \\
\hline
& Gene Symbol & Gene Name & Weight \\
\hline
\endhead
1 & PRSS33 & protease, serine, 33 & -0.0059 \\
2 & PDZK1IP1 & PDZK1 interacting protein 1 & -0.0347 \\
3 & SDC1 & syndecan 1 & 0.2507 \\
4 & CAV1 & caveolin 1, caveolae protein, 22kDa & 0.0889 \\
5 & GGH & gamma-glutamyl hydrolase (conjugase, folylpolygammaglutamyl hydrolase) & 0.2318 \\
6 & PI3 & peptidase inhibitor 3, skin-derived & -0.0209 \\
7 & BUB1B & budding uninhibited by benzimidazoles 1 homolog beta (yeast) & 0.1242 \\
8 & ZWINT & ZW10 interactor & 0.3984 \\
9 & TUBB2A & tubulin, beta 2A & -0.0004 \\
10 & PTGS2 & prostaglandin-endoperoxide synthase 2 (prostaglandin G/H synthase and cyclooxygenase) & -0.0627 \\
11 & TTK & TTK protein kinase & 0.0201 \\
12 & ORM1 /// ORM2 & orosomucoid 1 /// orosomucoid 2 & -0.0055 \\
13 & CD38 & CD38 molecule & 0.0399 \\
14 & CHI3L1 & chitinase 3-like 1 (cartilage glycoprotein-39) & -0.0384 \\
15 & HLA-DQB1 & major histocompatibility complex, class II, DQ beta 1 & 0.0720 \\
16 & BUB1 & budding uninhibited by benzimidazoles 1 homolog (yeast) & 0.0853 \\
17 & CDK1 & cyclin-dependent kinase 1 & 0.2650 \\
18 & IGH@ /// IGHA1 /// IGHA2 /// IGHD /// IGHG1 /// IGHG3 /// IGHG4 /// IGHM /// IGHV4-31 /// LOC100290146 /// LOC100290528 & immunoglobulin heavy locus /// immunoglobulin heavy constant alpha 1 /// immunoglobulin heavy constant alpha 2 (A2m marker) /// immunoglobulin heavy constant delta /// immunoglobulin heavy constant gamma 1 (G1m marker) /// immunoglobulin heavy constant gamma 3 (G3m marker) /// immunoglobulin heavy constant gamma 4 (G4m marker) /// immunoglobulin heavy constant mu /// immunoglobulin heavy variable 4-31 /// hypothetical protein LOC100290146 /// similar to pre-B lymphocyte gene 2 & 0.0180 \\
19 & IGH@ /// IGHA1 /// IGHA2 /// IGHD /// IGHG1 /// IGHG3 /// IGHG4 /// IGHM /// IGHV3-23 /// LOC100126583 /// LOC100290146 /// LOC652128 & immunoglobulin heavy locus /// immunoglobulin heavy constant alpha 1 /// immunoglobulin heavy constant alpha 2 (A2m marker) /// immunoglobulin heavy constant delta /// immunoglobulin heavy constant gamma 1 (G1m marker) /// immunoglobulin heavy constant gamma 3 (G3m marker) /// immunoglobulin heavy constant gamma 4 (G4m marker) /// immunoglobulin heavy constant mu /// immunoglobulin heavy variable 3-23 /// hypothetical LOC100126583 /// hypothetical protein LOC100290146 /// similar to Ig heavy chain V-II region ARH-77 precursor & 0.1867 \\
20 & NOV & nephroblastoma overexpressed gene & -0.0619 \\
21 & SELENBP1 & selenium binding protein 1 & -0.1315 \\
22 & IGHA1 /// IGHG1 /// IGHM /// LOC100290293 & immunoglobulin heavy constant alpha 1 /// immunoglobulin heavy constant gamma 1 (G1m marker) /// immunoglobulin heavy constant mu /// similar to hCG2042717 & 0.0162 \\
23 & CEP55 & centrosomal protein 55kDa & 0.2863 \\
24 & PBK & PDZ binding kinase & 0.1358 \\
25 & SHCBP1 & SHC SH2-domain binding protein 1 & 0.2901 \\
26 & MGC29506 & plasma cell-induced ER protein 1 & 0.4012 \\
27 & CNTNAP3 & contactin associated protein-like 3 & -0.0494 \\
28 & JAZF1 & JAZF zinc finger 1 & -0.0441 \\
29 & KIAA1324 & KIAA1324 & -0.0962 \\
30 & CDCA2 & cell division cycle associated 2 & 0.3858 \\
31 & KLHL14 & kelch-like 14 (Drosophila) & 0.0801 \\
32 & CYAT1 & cyclosporin A transporter 1 & 0.1657 \\
33 & HLA-DRB1 /// HLA-DRB3 /// HLA-DRB4 /// HLA-DRB5 /// LOC100294036 & major histocompatibility complex, class II, DR beta 1 /// major histocompatibility complex, class II, DR beta 3 /// major histocompatibility complex, class II, DR beta 4 /// major histocompatibility complex, class II, DR beta 5 /// similar to HLA class II histocompatibility antigen, DRB1-7 beta chain & 0.0422 \\
34 & FLJ10357 & protein SOLO & -0.0966 \\
\hline
\end{longtable}
% latex table generated in R 3.6.1 by xtable 1.8-4 package
% Mon Oct 21 14:52:46 2019
\begin{longtable}{| l | p{4cm} | p{8cm} | l |}
\caption{{\em Dengue microarray data: Genes with non-zero weights in the second scPCA loading vector.}} \label{tab:dengue_2} \\
\hline
& Gene Symbol & Gene Name & Weight \\
\hline
\endhead
1 & PRSS33 & protease, serine, 33 & 0.1822 \\
2 & IFI27 & interferon, alpha-inducible protein 27 & -0.0147 \\
3 & PI3 & peptidase inhibitor 3, skin-derived & 0.1692 \\
4 & SLC2A5 & solute carrier family 2 (facilitated glucose/fructose transporter), member 5 & 0.0701 \\
5 & MYOM2 & myomesin (M-protein) 2, 165kDa & -0.0278 \\
6 & HLA-DRB4 & major histocompatibility complex, class II, DR beta 4 & -0.0620 \\
7 & IGH@ /// IGHA1 /// IGHD /// IGHG1 /// IGHG3 /// IGHG4 /// IGHM /// IGHV3-23 /// IGHV4-31 /// LOC100290146 /// LOC100290528 & immunoglobulin heavy locus /// immunoglobulin heavy constant alpha 1 /// immunoglobulin heavy constant delta /// immunoglobulin heavy constant gamma 1 (G1m marker) /// immunoglobulin heavy constant gamma 3 (G3m marker) /// immunoglobulin heavy constant gamma 4 (G4m marker) /// immunoglobulin heavy constant mu /// immunoglobulin heavy variable 3-23 /// immunoglobulin heavy variable 4-31 /// hypothetical protein LOC100290146 /// similar to pre-B lymphocyte gene 2 & 0.4987 \\
8 & IGKV3-20 & Immunoglobulin kappa variable 3-20 & 0.1623 \\
9 & RSAD2 & radical S-adenosyl methionine domain containing 2 & -0.2294 \\
10 & USP18 & ubiquitin specific peptidase 18 & -0.4599 \\
11 & SIGLEC1 & sialic acid binding Ig-like lectin 1, sialoadhesin & -0.2750 \\
12 & KCTD14 & potassium channel tetramerisation domain containing 14 & -0.3142 \\
13 & FAM118A & family with sequence similarity 118, member A & -0.1382 \\
14 & & & 0.0269 \\
15 & SLC16A14 & solute carrier family 16, member 14 (monocarboxylic acid transporter 14) & 0.3232 \\
16 & ANKRD22 & ankyrin repeat domain 22 & -0.2800 \\
17 & KLC3 & kinesin light chain 3 & 0.1021 \\
18 & SIGLEC1 & sialic acid binding Ig-like lectin 1, sialoadhesin & -0.0434 \\
\hline
\end{longtable}
\begin{table}
\caption{{\em Dengue microarray data: Gene set enrichment analysis.} The Broad Institute's online gene set enrichment analysis (GSEA) tool was used to identify the ten most significant gene sets based on GO biological processes \citep{Subramanian15545,Liberzon2011,Liberzon2015}.}
\small
\begin{tabular}{|p{5cm} | p{6cm} | p{1.5cm} | p{1.5cm} | p{1.5cm}|}
\hline
Gene Set Name & Description & Genes in Overlap & $p$-value & FDR $q$-value \\
\hline
GO\_DEFENSE\_RESPONSE & Reactions, triggered in response to the presence of a foreign body or the occurrence of an injury, which result in restriction of damage to the organism attacked or prevention/recovery from the infection caused by the attack. & 13 & 1.12 e-8 & 8.2 e-5 \\
GO\_RESPONSE\_TO\_CYTOKINE & Any process that results in a change in state or activity of a cell or an organism (in terms of movement, secretion, enzyme production, gene expression, etc.) as a result of a cytokine stimulus. & 10 & 3.15 e-7 & 1.16 e-3 \\
GO\_MITOTIC\_CELL\_CYCLE\_CHECKPOINT & A cell cycle checkpoint that ensures accurate chromosome replication and segregation by preventing progression through a mitotic cell cycle until conditions are suitable for the cell to proceed to the next stage. & 5 & 8.39 e-7 & 2.06 e-3 \\
GO\_CYTOKINE\_MEDIATED\_SIGNALING\_PATHWAY & A series of molecular signals initiated by the binding of a cytokine to a receptor on the surface of a cell, and ending with regulation of a downstream cellular process, e.g. transcription. & 8 & 1.38 e-6 & 2.4 e-3 \\
GO\_PROTEIN\_LOCALIZATION\_TO\_CHROMOSOME\_ME\_CENTROMERIC\_REGION & Any process in which a protein is transported to, or maintained at, the centromeric region of a chromosome. & 3 & 1.63 e-6 & 2.4 e-3 \\
GO\_CELL\_CYCLE\_CHECKPOINT & A cell cycle process that controls cell cycle progression by monitoring the integrity of specific cell cycle events. A cell cycle checkpoint begins with detection of deficiencies or defects and ends with signal transduction. & 5 & 3.15 e-6 & 3.86 e-3 \\
GO\_POSITIVE\_REGULATION\_OF\_VASOCONSTRICRICTION & Any process that activates or increases the frequency, rate or extent of vasoconstriction. & 3 & 4.74 e-6 & 4.97 e-3 \\
GO\_NEGATIVE\_REGULATION\_OF\_METAPHASE\_AN\newline\_ANAPHASE\_TRANSITION\newline\_OF\_CELL\_CYCLE & Any process that stops, prevents or reduces the frequency, rate or extent of metaphase/anaphase transition of cell cycle. & 3 & 8.15 e-6 & 6.58 e-3 \\
GO\_MITOTIC\_CELL\_CYCLE & Progression through the phases of the mitotic cell cycle, the most common eukaryotic cell cycle, which canonically comprises four successive phases called G1, S, G2, and M and includes replication of the genome and the subsequent segregation of chromosomes into daughter cells. In some variant cell cycles nuclear replication or nuclear division may not be followed by cell division, or G1 and G2 phases may be absent. & 8 & 8.6 e-6 & 6.58 e-3 \\
GO\_INFLAMMATORY\_RESPONSE & The immediate defensive reaction (by vertebrate tissue) to infection or injury caused by chemical or physical agents. The process is characterized by local vasodilation, extravasation of plasma into intercellular spaces and accumulation of white blood cells and macrophages. & 7 & 9.3 e-6 & 6.58 e-3 \\
\hline
\end{tabular}
\label{tab:gsea_dengue}
\end{table}
\begin{figure}[!htbp]
\centering
\includegraphics[width=0.9\textwidth]{figures/dengue_cpca_centers}
\caption{
{\em Dengue microarray data: cPCA.}
When varying the \textit{a priori} specified number of clusters for cPCA, all four embeddings are virtually identical, suggesting that cPCA is robust to misspecifications of the number of clusters and that optimal contrastive parameters were selected in each case.}
\label{fig:dengue_cpca_centers}
\end{figure}
\begin{figure}[!htbp]
\centering
\includegraphics[width=0.9\textwidth]{figures/dengue_scpca_centers}
\caption{
{\em Dengue microarray data: scPCA.}
When varying the \textit{a priori} specified number of clusters for scPCA, we find that the two-dimensional embeddings are sensitive to this choice. When scPCA is performed on this data with four and five clusters, the results resemble those produced by PCA.}
\label{fig:dengue_scpca_centers}
\end{figure}
\FloatBarrier
\newpage
\section{Leukemia Patient scRNA-seq Data}\label{sup_aml}
See Section 3.3 %\ref{leukemia_data}
for information on the data. cPCA and scPCA were fit to both patients' data using the non-cross-validated hyperparameter tuning framework. Both methods considered a vector of 40 logarithmically spaced values between 0.1 and 1,000 as potential contrastive parameters. scPCA considered 20 logarithmically spaced values between $1e-9$ and $1$ as potential $\ell_1$ penalty parameters. Two t-SNE embeddings were produced per patient: one with an initial dimension reduction step performed with PCA (retaining the 50 leading principal components) and one without. The remaining parameters for each embedding were set to their defaults, except for \texttt{theta} which was set to 0. UMAP was performed with its default parameters, except for \texttt{n\_neighbors} and \texttt{min\_dist} which were set to 30 and 0.02, respectively. These values match those used by \citet{Becht2019}. The SIMLR embeddings were produced with \texttt{k = 30}, as recommended in \citet{Wang2017} for datasets of this size. The number of pre-specified clusters was set to 2.
\begin{figure}[!htbp]
\centering
\includegraphics[width=0.9\textwidth]{figures/aml035_SIMLR.png}
\caption{
{\em AML Patient 035 scRNA-seq data: SIMLR.} SIMLR fails to produce an informative two-dimensional embedding of the patient's BMMCs. Although a number of dense clusters are formed in the center of the figure, the spread of observations around these clusters renders the visualization uninterpretable.}
\label{fig:SIMLR_aml_035}
\end{figure}
\begin{table}
\caption{{\em AML Patient 035 scRNA-seq data: Gene set enrichment analysis.}
The Broad Institute's online gene set enrichment analysis tool was used to identify the ten most significant gene sets based on GO biological processes \citep{Subramanian15545, Liberzon2011, Liberzon2015}.}
\small
\begin{tabular}{|p{5cm} | p{6cm} | p{1.5cm} | p{1.5cm} | p{1.5cm}|}
\hline
Gene Set Name & Description & Genes in Overlap & $p$-value & FDR $q$-value \\
\hline
GO\_ESTABLISHMENT\_OF\_PROTEIN\_LOCALIZATIATION\_TO\_ENDOPLASMIC\_RETICULUM & The directed movement of a protein to a specific location in the endoplasmic reticulum. & 33 & 9.82 e-57 & 7.22 e-53 \\
GO\_COTRANSLATIONAL\_PROTEIN\_TARGETING\_TG\_TO\_MEMBRANE & The targeting of proteins to a membrane that occurs during translation. The transport of most secretory proteins, particularly those with more than 100 amino acids, into the endoplasmic reticulum lumen occurs in this manner, as does the import of some proteins into mitochondria. & 32 & 2.09 e-56 & 7.66 e-53 \\
GO\_TRANSLATIONAL\_INITIATION & The process preceding formation of the peptide bond between the first two amino acids of a protein. This includes the formation of a complex of the ribosome, mRNA or circRNA, and an initiation complex that contains the first aminoacyl-tRNA. & 36 & 1 e-54 & 2.46 e-51 \\
GO\_NUCLEAR\_TRANSCRIBED\_MRNA- \_CATABOLIC\_IC\_PROCESS\_NONSENSE\_MEDIATED\_DECAY & The nonsense-mediated decay pathway for nuclear-transcribed mRNAs degrades mRNAs in which an amino-acid codon has changed to a nonsense codon; this prevents the translation of such mRNAs into truncated, and potentially harmful, proteins. & 32 & 4.32 e-54 & 7.94 e-51 \\
GO\_PROTEIN\_LOCALIZATION\_TO\_ENDOPLASMICMIC\_RETICULUM & A process in which a protein is transported to, or maintained in, a location within the endoplasmic reticulum. & 33 & 1.45 e-53 & 2.13 e-50 \\
GO\_VIRAL\_GENE\_EXPRESSION & A process by which a viral gene is converted into a mature gene product or products (proteins or RNA). This includes viral transcription, processing to produce a mature RNA product, and viral translation. & 34 & 7.2 e-51 & 8.82 e-48 \\
GO\_PROTEIN\_TARGETING\_TO\_MEMBRANE & The process of directing proteins towards a membrane, usually using signals contained within the protein. & 34 & 2.71 e-50 & 2.85 e-47 \\
GO\_NUCLEAR\_TRANSCRIBED\_MRNA\_CATABOLIC\newline\_IC\_PROCESS & The chemical reactions and pathways resulting in the breakdown of nuclear-transcribed mRNAs in eukaryotic cells. & 33 & 1.22 e-47 & 1.12 e-44 \\
GO\_ESTABLISHMENT\_OF\_PROTEIN\_LOCALIZATIATION\_TO\_MEMBRANE & The directed movement of a protein to a specific location in a membrane. & 36 & 5.88 e-46 & 4.8 e-43 \\
GO\_PROTEIN\_TARGETING & The process of targeting specific proteins to particular regions of the cell, typically membrane-bounded subcellular organelles. Usually requires an organelle specific protein sequence motif. & 37 & 2.61 e-43 & 1.92 e-40 \\
\hline
\end{tabular}
\label{tab:gsea_aml035}
\end{table}
\begin{figure}[!htbp]
\centering
\includegraphics[width=0.9\textwidth]{figures/aml027_results}
\caption{
{\em AML Patient 027 scRNA-seq data.} Two-dimensional embeddings of the patient's BMMCs produced by PCA, ZINB-WaVE, t-SNE, UMAP, cPCA, and scPCA. cPCA and scPCA produce two-dimensional representations that distinguish between the pre- and post-transplant cells of Patient 027. Although cPCA's embedding contains denser clusters, scPCA's clusters are more distinct -- though they are oddly shaped. This is the result of sparsity: the scPCA embedding is produced with the count data of only five genes.}
\label{fig:comp_leuk_pat2}
\end{figure}
\begin{figure}[!htbp]
\centering
\includegraphics[width=0.9\textwidth]{figures/aml027_SIMLR.png}
\caption{
{\em AML Patient 027 scRNA-seq data: SIMLR.} As with AML Patient 035's data, SIMLR produces an uninterpretable representation of the patient's BMMCs.}
\label{fig:SIMLR_aml_027}
\end{figure}
\FloatBarrier
\section{Mouse Protein Expression Data}\label{sup_mice}
Down Syndrome, the leading genetic cause of intellectual disability \citep{Irving2008}, is the result of trisomy of all or part of the long arm of chromosome 21 \citep{Ahmed2015}. Recently, researchers have begun exploring the use of pharmacotherapies to mitigate these cognitive deficits using the Ts65Dn mouse model \citep{Ahmed2015,Higuera2015}. Though not a perfect model for the study of Down Syndrome, the Ts65Dn displays many relevant neurological phenotypic features, such as deficits in learning and memory \citep{Rueda2012}.
\citet{Ahmed2015} analyzed protein expression in the hippocampus and cortex of Ts65Dn and control mice after exposure to context fear conditioning and Memantine treatment. Memantine, a drug often prescribed to Alzheimer's patients, has been demonstrated to improve performance of the Ts65Dn in tasks that reflect cognitive abilities \citep{Ahmed2015}. The corresponding dataset was made available by Higuera et al. \citep{Higuera2015}. The data consist of normalized expression measures for 77 proteins from subcellular fractions of the cortex assayed from 38 control and 34 Ts65Dn mice. Each protein expression measurement was repeated 15 times (i.e., 15 technical replicates per mouse for each of the 77 proteins), though a small number of replicates contain missing protein expression measurements due to technical artifacts \citep{Higuera2015}. More details on the experimental design are provided in Figure \ref{fig:exp_design}.
\begin{figure}[!htbp]
\centering
\includegraphics[width=0.9\textwidth]{figures/mice_exp_design}
\caption{
{\em Mouse protein expression data: Experimental design.}
The control dataset is comprised of protein expression measurements for 15 technical replicates from each of 9 control mice subject to context fear conditioning and given a placebo (red leaf). The target dataset consists of protein expression measurements for 15 technical replicates from each of 9 control mice not subject to context fear condition and given a placebo (purple leaf) and 15 technical replicates from each of 9 trisomic mice not subject to context fear condition and given a placebo (green leaf).}
\label{fig:exp_design}
\end{figure}
To demonstrate scPCA's capacity to capture biologically meaningful and interpretable variation in protein expression data, the technical replicates of the subset comprising 9 control and 9 Ts65Dn mice not subject to context fear conditioning and given a placebo were designated as the target dataset. The technical replicates of the subset of 9 control mice that were subject to context fear conditioning and given a placebo made up the background dataset, as the variation in their protein expression measurements are believed to be similar to that found in the control mice of the target dataset. The data are identical to those used by \citet{Abid2018} to demonstrate cPCA. PCA, t-SNE, UMAP, cPCA, and scPCA were applied to the target dataset (Figure~\ref{fig:comp_mice}A) to identify differences in protein expression between the control and trisomic mice not exposed to the context fear conditioning experiment. In addition to the target dataset, cPCA and scPCA took as input the column-centered background dataset and specified two clusters \textit{a priori}. cPCA and scPCA were fit using the non-cross-validated hyperparameter tuning framework. Both methods considered a vector of 40 equally spaced values between 0.1 and 1,000 as potential contrastive parameters. scPCA used a vector of 20 equidistant values between 0.05 and 1 as potential $\ell_1$ penalty parameters. t-SNE embeddings were produced with and without an initial dimensionality reduction step step. The remaining variables were left at their defaults, with the exception of \texttt{theta} which was set to 0. The embedding produced without an initial application of PCA produced the best embedding, and so it is presented in the manuscript and supplement. The UMAP embedding was generated with \texttt{n\_neighbors} set to 30 and with \texttt{min\_dist} set to 0.02. The remaining parameters were set to their defaults.
PCA proved incapable of distinguishing between the biological groups of interest. UMAP, cPCA, and scPCA successfully split the control and trisomic mice into virtually distinct clusters, though the number of clusters found by UMAP and cPCA in two dimensions did not match, even when varying the \textit{a priori} specified number of clusters in cPCA (Figure~\ref{fig:mice_cpca_centers}). Comparing the results of UMAP and scPCA, we find that they produce the same number of clusters, but their representations of the global structure are markedly different, even when varying the number of clusters specified \textit{a priori} in scPCA (Figure~\ref{fig:mice_scpca_centers}). The presence of distinct Ts65Dn clusters in UMAP's representation may correspond to technical noise that is diminished in cPCA's and scPCA's embeddings, or may arise from UMAP's inability to dependably capture global structure. We also remark that cPCA and scPCA produce very similar embeddings, up to a rotation; however, the first and second columns of scPCA's loading matrix contain merely 12 and 16 non-zero entries, respectively (Figure~\ref{fig:comp_mice}B). Also note that the the separation of control and trisomic mice by scPCA only occurs in scPC2: the proteins with non-zero weights in the corresponding loading vector include AKT, APP, SOD1, and GSK3, each of which has been associated with Down Syndrome in human or mouse models \citep{Troca-Marn9445,Niceta2015,Gulesserian2001,Isacson2002}. The full list of proteins with non-zero weights in the first two loading vectors of scPCA are provided in Table \ref{tab:mouse_prot_1} and Table \ref{tab:mouse_prot_2}.
\begin{figure}[!htbp]
\centering
\includegraphics[width=0.9\textwidth]{figures/mice_results}
\caption{{\em Mouse protein expression data.} \textbf{A} All methods but PCA were capable of separating the control from the trisomic mice, though it is unclear why UMAP splits the Ts65Dn mice into two distinct groups. scPCA's low-dimensional representation of the protein expression data is markedly similar to that of cPCA, up to a rotation, despite relying on only a fraction of non-zero values in the loading matrix. On average, scPCA also produces the tightest clusters. Note: a small group of control mice were clustered with the trisomic mice in the UMAP, cPCA, and scPCA representation, potentially comprising a group of mislabeled mice. \textbf{B} scPCA's leading vectors of loadings are much sparser than those of cPCA, increasing the interpretability of findings and clarity of the visualization. The differing rotations of cPCA and scPCA, in addition to the drastically different weighting scheme of the proteins in their respective loading matrices, may indicate that the contrastive step performed by cPCA does not sufficiently dampen spurious sources of variation in the data.}
\label{fig:comp_mice}
\end{figure}
\begin{figure}[!htbp]
\centering
\includegraphics[width=0.5\textwidth]{figures/mice_tsne}
\caption{
\textit{Mouse protein expression data: t-SNE.}
t-SNE produces almost linearly-separable clusters, though these clusters contain many fractured, spurious sub-clusters that do not relate to biological signal.
}
\label{fig:mice_tsne}
\end{figure}
\begin{figure}[!htbp]
\centering
\includegraphics[width=0.9\textwidth]{figures/mice_cpca_centers}
\caption{
\textit{Mouse protein expression data: cPCA.}
When varying the \textit{a priori} specified number of clusters for cPCA, we find that the two-dimensional embedding is once again robust to misspecifications.
}
\label{fig:mice_cpca_centers}
\end{figure}
\begin{figure}[!htbp]
\centering
\includegraphics[width=0.9\textwidth]{figures/mice_scpca_centers}
\caption{
\textit{Mouse protein expression data: scPCA.}
Unlike with the dengue microarray data, when varying the \textit{a priori} specified number of clusters for scPCA, we find that the two-dimensional embedding is robust to misspecifications. This may indicate that the sensitivity of the method to this tuning parameter is data-dependent.
}
\label{fig:mice_scpca_centers}
\end{figure}
\begin{longtable}{| l | p{4cm} | l |}
\caption{{\em Mouse protein expression data: Proteins with non-zero weights in the first scPCA loading vector.}} \label{tab:mouse_prot_1} \\
\hline
& Protein Symbol & Weight \\
\hline
1 & ELK & 0.0618 \\
2 & BRAF & -0.1001 \\
3 & RSK & -0.0927 \\
4 & SOD1 & 0.1800 \\
5 & S6 & 0.1281 \\
6 & AcetylH3K9 & 0.3992 \\
7 & RRP1 & 0.0606 \\
8 & Tau & 0.7320 \\
9 & CASP9 & -0.0795 \\
10 & PSD95 & -0.0329 \\
11 & Ubiquitin & -0.3674 \\
12 & H3AcK18 & 0.2958 \\
\hline
\end{longtable}
\newpage
\begin{longtable}{| l | p{4cm} | l |}
\caption{{\em Mouse protien expression data: Proteins with non-zero weights in the second scPCA loading vector.}}
\label{tab:mouse_prot_2} \\
\hline
& Protein Symbol & Weight \\
\hline
1 & ELK & -0.2236 \\
2 & AKT & -0.0999 \\
3 & APP & 0.3525 \\
4 & SOD1 & -0.2323 \\
5 & NUMB & 0.4690 \\
6 & P70S6 & 0.1554 \\
7 & GSK3B & 0.3574 \\
8 & PKCG & 0.3978 \\
9 & S6 & 0.3674 \\
10 & RRP1 & 0.0272 \\
11 & GluR4 & 0.0404 \\
12 & IL1B & -0.2664 \\
13 & P3525 & 0.0196 \\
14 & PSD95 & -0.1289 \\
15 & SNCA & -0.0783 \\
16 & H3AcK18 & 0.0169 \\
\hline
\end{longtable}
\newpage
\section{Cross-Validated cPCA and scPCA}\label{cv_algo_example}
The cross-validated (CV) versions of cPCA and scPCA (as implemented in Algorithm~\ref{algo2}) were applied to the datasets presented in the main paper and in the supplement. The hyperparameter grids used by each method are the same as those employed for their non-cross-validated counterparts. See Sections~\ref{sup_sim},~\ref{sup_dengue},~\ref{sup_aml}, and \ref{sup_mice} for details. Five-fold CV was applied to the simulated scRNA-seq data, mouse protein expression data, and AML patient scRNA-seq data (Figures~\ref{fig:cv_sim},~\ref{fig:cv_mice},~\ref{fig:cv_aml035}, and ~\ref{fig:cv_aml027}), and 3-fold CV was applied to the dengue microarray gene expression data (Figure \ref{fig:cv_dengue}). A reduced number of folds was used on the latter dataset since it possessed much fewer observations than the others. For each dataset, the CV-cPCA and CV-scPCA embeddings closely resemble -- and are in some cases identical to -- their non-cross-validated counterparts.
\begin{figure}
\centering
\includegraphics[width = \textwidth]{figures/sim_cv_results.png}
\caption{\em{CV-cPCA and CV-scPCA on simulated scRNA-seq data.}}
\label{fig:cv_sim}
\end{figure}
\begin{figure}
\centering
\includegraphics[width = \textwidth]{figures/mice_cv_results.png}
\caption{\em{CV-cPCA and CV-scPCA on mouse protein expression data.}}
\label{fig:cv_mice}
\end{figure}
\begin{figure}
\centering
\includegraphics[width = \textwidth]{figures/dengue_cv_results.png}
\caption{\em{CV-cPCA and CV-scPCA on dengue microarray data.}}
\label{fig:cv_dengue}
\end{figure}
\begin{figure}
\centering
\includegraphics[width = \textwidth]{figures/aml035_cv_results.png}
\caption{\em{CV-cPCA and CV-scPCA on AML Patient 035 scRNA-seq data.}}
\label{fig:cv_aml035}
\end{figure}
\begin{figure}
\centering
\includegraphics[width = \textwidth]{figures/aml027_cv_results.png}
\caption{\em{CV-cPCA and CV-scPCA on AML Patient 027 scRNA-seq data}}
\label{fig:cv_aml027}
\end{figure}
\section{Running Time Comparison}\label{run_time}
The running times of PCA, t-SNE, UMAP, cPCA, scPCA, ZINB-WaVE, and SIMLR were recorded on the simulated scRNA-seq data from Section \ref{sim_scRNA-seq} and on the scRNA-seq data of AML Patient 035 from Section \ref{leukemia_data}.
If a method's software implementation offered the option to parallelize, four cores were used. The hyperparameters used by the methods on each dataset are identical to those described in their respective sections of the supplement, that is Sections~\ref{sup_sim} and \ref{sup_aml}. The \texttt{microbenchmark R} package was used to track the methods' running times. The comparison is presented in Figures~\ref{fig:rt_sim_sc_rna-seq} and \ref{fig:rt_aml035}.
We note that the iterative algorithm presented in Section 2 is computationally inefficient. However, the scPCA framework is not dependent on \citet{Zou2006}'s optimization procedure; other solutions to the SPCA criterion (Equation \eqref{eq:spca}) can be employed to sparsify the loadings of contrastive covariance matrices. Indeed, more efficient algorithms exist.
In particular, recent work by \citet{erichson2018sparse} provides a scalable algorithm by reformulating the SPCA criterion (Equation (3)) as a value function optimization problem. Instead of requiring an iterative routine to update $\mathbf{B}$ of the alternating algorithm presented in Section~\ref{method}, a single operator is used. This procedure can be sped up further through the use of randomized linear algebra methods to compute $\mathbf{A}$.
These sparsification methods were recently included in the \texttt{scPCA R} package, and the distribution of their running times are included in Figures~\ref{fig:rt_sim_sc_rna-seq} and \ref{fig:rt_aml035}. Using these recently developed methods to solve the SPCA step, the scPCA algorithm's running time is decreased by over an order of magnitude on both datasets. Its running time is similar to that of competing methods when using four cores. Given that the hyperparameter tuning framework is embarrassingly parallel, one can expect even faster computation times when more cores are employed.
\begin{figure}
\centering
\includegraphics[width = 0.9\textwidth]{figures/rt_analsysis_sim_data.png}
\caption{
{\em Running time comparison: Simulated scRNA-seq data.} Each method was applied to the data five times. Note that \textit{scPCA (var. proj.)} and \textit{scPCA (rand. var. proj.)} correspond to the scPCA algorithms relying on the SPCA procedures detailed in \citet{erichson2018sparse}'s recent work, the latter being being the procedure which relies on randomized techniques. \textit{scPCA (iterative)} pertains to the scPCA method that uses the SPCA algorithm detailed in \citet{Zou2006}. The median running time of each method is reported. The most general of the dimensionality reduction methods, PCA, t-SNE, and UMAP, were at least an order of magnitude faster than all other methods. The remaining method's running times were similar, with the exception of the much slower \textit{scPCA (iterative)}.
}
\label{fig:rt_sim_sc_rna-seq}
\end{figure}
\begin{figure}
\centering
\includegraphics[width = 0.9\textwidth]{figures/rt_analsysis_aml035.png}
\caption{
{\em Running time comparison: AML Patient 035 scRNA-seq data.} Each method was applied to the data three times. The median running time of each method is reported. Compared to the smaller dataset, the contrastive methods presented in the manuscript are competitive with the more general dimensionality reduction methods. Indeed, cPCA's running time is similar to that of UMAP, and the scPCA algorithm relying on random numerical methods for sparsification is faster than t-SNE.
}
\label{fig:rt_aml035}
\end{figure}
\newpage
\bibliographystyle{natbib}
\bibliography{appendix_ref}
\end{document} |
#input matrix is made by MakeHistograms.java
x<-read.table("ExperimentToConceptMatrix.txt")
x <- as.matrix(x)
#transpose
x <- t(x)
#remove experiments with no experiments
s<-sd(x)
x <- x[,which(s!=0)]
#show the data
#map <- heatmap(x, keep.dendro = TRUE, scale = "none", margins=c(20,10))
map <- heatmap(cor(x), symm = TRUE, distfun = function(c) as.dist(1 - abs(c)), keep.dendro = TRUE, scale = "none", margins=c(10,10))
png("dendro.png", width=8840, height=1040)
par(mar=c(42,8,4,6))
plot(map$Colv)
dev.off()
c<- cor(x)
threshold <- 0.7
where <- which(c>threshold & c!=1, arr.ind=TRUE)[,2]
where <- unique(where)
length(where)
xx <- x[,where]
map <- heatmap(cor(xx), symm = TRUE, distfun = function(c) as.dist(1 - abs(c)), keep.dendro = TRUE, scale = "none", margins=c(10,10))
png("dendro.png", width=1340, height=5040)
#bottom, ?, top, right
par(mar=c(2,13,2,42))
plot(map$Colv, horiz=TRUE)
dev.off()
|
import set_theory.zfc set_theory.ordinal
universe u
attribute [elab_as_eliminator] well_founded.induction
def Well_order.to_pSet (w : Well_order.{u}) : pSet.{u} :=
pSet.mk w.1 $ well_founded.fix (@is_well_order.wf w.1 w.2 w.3) $ λ x ih,
pSet.mk { y | w.r y x } $ λ p, ih p.1 p.2
theorem Well_order.to_pSet.def (w : Well_order.{u}) (x : w.1) :
w.to_pSet.func x = pSet.mk { y | w.r y x } (λ p, w.to_pSet.func p.1) :=
well_founded.fix_eq _ _ x
theorem ordinal.to_Set.aux (v w : Well_order.{u}) (e : v.2 ≃o w.2) (x : v.1) :
(Well_order.to_pSet v).func x ≈ (Well_order.to_pSet w).func (e x) :=
show pSet.equiv
(well_founded.fix (@is_well_order.wf v.1 v.2 v.3)
(λ x ih, pSet.mk { y | v.r y x } $ λ p, ih p.1 p.2) x)
(well_founded.fix (@is_well_order.wf w.1 w.2 w.3)
(λ x ih, pSet.mk { y | w.r y x } $ λ p, ih p.1 p.2) (e x)),
from well_founded.induction (@is_well_order.wf v.1 v.2 v.3) x $ λ x ih,
by rw [well_founded.fix_eq, well_founded.fix_eq];
from ⟨λ ⟨y, hy⟩, ⟨⟨e y, (order_iso.ord e).1 hy⟩, ih y hy⟩,
λ ⟨y, hy⟩, ⟨⟨e.symm y, by simpa using (order_iso.ord e.symm).1 hy⟩,
by have := ih (e.symm y) (by simpa using (order_iso.ord e.symm).1 hy); rw [order_iso.apply_inverse_apply] at this; from this⟩⟩
def ordinal.to_Set (o : ordinal.{u}) : Set.{u} :=
quotient.lift_on o (λ w, ⟦Well_order.to_pSet w⟧) $ λ ⟨v1, v2, v3⟩ ⟨w1, w2, w3⟩ ⟨e⟩, quotient.sound
⟨λ x, ⟨e x, ordinal.to_Set.aux _ _ e x⟩,
λ x, ⟨e.symm x, by simpa using ordinal.to_Set.aux ⟨v1, v2, v3⟩ ⟨w1, w2, w3⟩ e (e.symm x)⟩⟩
def pSet.type.setoid (p : pSet.{u}) : setoid p.type :=
⟨λ i j, ⟦p.func i⟧ = ⟦p.func j⟧, λ i, rfl, λ i j, eq.symm, λ i j k, eq.trans⟩
local attribute [instance] pSet.type.setoid
def Set.to_cardinal (s : Set.{u}) : cardinal.{u} :=
quotient.lift_on s (λ p, cardinal.mk $ quotient $ pSet.type.setoid p) $
λ ⟨p1, p2⟩ ⟨q1, q2⟩ H, quotient.sound $ nonempty.intro
{ to_fun := λ x, quotient.lift_on x (λ i, @@quotient.mk (pSet.type.setoid $ pSet.mk q1 q2) (classical.some (H.1 i))) $ λ i j H',
quotient.sound $
calc ⟦q2 (classical.some (H.1 i))⟧
= ⟦p2 i⟧ : eq.symm (quotient.sound $ classical.some_spec (H.1 i))
... = ⟦p2 j⟧ : H'
... = ⟦q2 (classical.some (H.1 j))⟧ : quotient.sound (classical.some_spec (H.1 j)),
inv_fun := λ x, quotient.lift_on x (λ i, @@quotient.mk (pSet.type.setoid $ pSet.mk p1 p2) $ classical.some $ H.2 i) $ λ i j H',
quotient.sound $
calc ⟦p2 (classical.some (H.2 i))⟧
= ⟦q2 i⟧ : quotient.sound (classical.some_spec (H.2 i))
... = ⟦q2 j⟧ : H'
... = ⟦p2 (classical.some (H.2 j))⟧ : eq.symm (quotient.sound $ classical.some_spec (H.2 j)),
left_inv := λ i, quotient.induction_on i $ λ i, quotient.sound $
calc ⟦p2 (classical.some (H.2 (classical.some (H.1 i))))⟧
= ⟦q2 (classical.some (H.1 i))⟧ : quotient.sound (classical.some_spec (H.2 _))
... = ⟦p2 i⟧ : eq.symm (quotient.sound $ classical.some_spec (H.1 i)),
right_inv := λ i, quotient.induction_on i $ λ i, quotient.sound $
calc ⟦q2 (classical.some (H.1 (classical.some (H.2 i))))⟧
= ⟦p2 (classical.some (H.2 i))⟧ : eq.symm (quotient.sound $ classical.some_spec (H.1 _))
... = ⟦q2 i⟧ : quotient.sound (classical.some_spec (H.2 i)) }
theorem Well_order.to_pSet.exact (w : Well_order.{u}) (x : w.1) :
∀ y, ⟦w.to_pSet.func x⟧ = ⟦w.to_pSet.func y⟧ → x = y :=
well_founded.induction (@is_well_order.wf w.1 w.2 w.3) x $ λ x ih y H,
begin
replace H := quotient.exact H,
rw [Well_order.to_pSet.def, Well_order.to_pSet.def] at H,
letI := w.3,
rcases is_trichotomous.trichotomous w.2 x y with h | h | h,
{ rcases H.2 ⟨x, h⟩ with ⟨⟨z, hzx⟩, h1⟩,
specialize ih z hzx x (quotient.sound h1),
exfalso,
subst ih,
exact is_irrefl.irrefl w.r _ hzx },
{ exact h },
{ rcases H.1 ⟨y, h⟩ with ⟨⟨z, hzy⟩, h1⟩,
specialize ih y h z (quotient.sound h1),
exfalso,
subst ih,
exact is_irrefl.irrefl w.r _ hzy }
end
example (c : cardinal.{u}) : c.ord.to_Set.to_cardinal = c :=
begin
apply quotient.induction_on c,
intro c,
have := cardinal.ord_eq c,
rcases this with ⟨r, wo, H⟩,
simp [H, ordinal.type, ordinal.to_Set],
rw [Set.mk, Set.to_cardinal, quotient.lift_on_beta],
apply quotient.sound,
split,
fapply equiv.mk,
{ fapply quotient.lift,
{ exact id },
{ intros x y H,
exact Well_order.to_pSet.exact _ _ _ H } },
{ exact quotient.mk },
{ intro x,
apply quotient.induction_on x,
intro x,
refl },
{ intro x, refl }
end
example : ordinal.to_Set 0 = ∅ :=
quotient.sound $ ⟨λ ⟨d⟩, match d with end, λ ⟨d⟩, match d with end⟩
theorem Well_order.to_pSet.sum (v w : Well_order.{u}) (x : v.1) :
(⟨v.1 ⊕ w.1, sum.lex v.2 w.2, by letI := v.3; letI := w.3; exactI sum.lex.is_well_order⟩ : Well_order).to_pSet.func (sum.inl x) ≈ v.to_pSet.func x :=
begin
cases v with v1 v2 v3,
cases w with w1 w2 w3,
resetI,
dsimp [Well_order.to_pSet, pSet.func],
rw [well_founded.fix_eq, well_founded.fix_eq],
apply well_founded.induction (is_well_order.wf v2) x,
intros x ih,
split,
{ intro z,
rcases z with ⟨z | z, hz | hz | hz⟩,
fapply exists.intro,
{ refine ⟨z, _⟩,
assumption },
dsimp,
rw [well_founded.fix_eq, well_founded.fix_eq],
exact ih z (by assumption) },
intro z,
cases z with z hz,
refine ⟨⟨sum.inl z, _⟩, _⟩,
{ constructor,
assumption },
dsimp,
rw [well_founded.fix_eq, well_founded.fix_eq],
exact ih z (by assumption)
end
example (o : ordinal.{u}) : o.succ.to_Set = insert o.to_Set o.to_Set :=
begin
apply quotient.induction_on o,
intro w,
cases w with w1 w2 w3, resetI,
apply quotient.sound,
dsimp [pSet.resp.f, Well_order.to_pSet, pSet.insert],
split,
{ intro s,
cases s with s s,
{ existsi (some s),
exact Well_order.to_pSet.sum (Well_order.mk w1 w2 w3)
⟨ulift unit, empty_relation, by apply_instance⟩ s },
existsi none,
dsimp,
rw [well_founded.fix_eq],
split,
{ intro s,
rcases s with ⟨s | s, hs | hs | hs⟩,
{ existsi s,
exact Well_order.to_pSet.sum (Well_order.mk w1 w2 w3)
⟨ulift unit, empty_relation, by apply_instance⟩ s },
exfalso,
assumption },
intro s,
refine ⟨⟨sum.inl s, _⟩, _⟩,
{ constructor },
exact Well_order.to_pSet.sum (Well_order.mk w1 w2 w3)
⟨ulift unit, empty_relation, by apply_instance⟩ s },
intro s,
cases s,
{ existsi (sum.inr (ulift.up unit.star)),
dsimp,
split,
{ intro s,
rcases s with ⟨s | s, hs | hs | hs⟩,
{ existsi s,
exact Well_order.to_pSet.sum (Well_order.mk w1 w2 w3)
⟨ulift unit, empty_relation, by apply_instance⟩ s },
exfalso,
assumption },
intro s,
refine ⟨⟨sum.inl s, _⟩, _⟩,
{ constructor },
exact Well_order.to_pSet.sum (Well_order.mk w1 w2 w3)
⟨ulift unit, empty_relation, by apply_instance⟩ s },
existsi (sum.inl s),
exact Well_order.to_pSet.sum (Well_order.mk w1 w2 w3)
⟨ulift unit, empty_relation, by apply_instance⟩ s
end
theorem Well_order.to_pSet.subrel (w : Well_order.{u}) (x : w.1) :
⟦w.to_pSet.func x⟧ = (@ordinal.typein w.1 w.2 w.3 x).to_Set :=
begin
letI := w.3,
apply quotient.sound,
rw [Well_order.to_pSet.def],
split;
{ intro y,
existsi y,
dsimp,
apply well_founded.induction (is_well_order.wf (subrel w.r _)) y,
intros y ih,
rw [Well_order.to_pSet.def, well_founded.fix_eq],
split,
{ intro z,
exact ⟨⟨⟨z.1, is_trans.trans _ _ _ z.2 y.2⟩, z.2⟩, ih ⟨_, _⟩ z.2⟩ },
intro z,
exact ⟨⟨z.1, z.2⟩, ih _ z.2⟩ }
end/-
#check cardinal.omega.succ.ord.to_Set
def ordinal.to_Set.inj.aux1 (p : set.range Well_order.to_pSet.{u}) : Well_order.{u+1} :=
{ α := set.range p.1.func,
r := subrel pSet.mem.{u} _,
wo :=
{ wf := _ } }
theorem ordinal.to_Set.inj (p q : ordinal.{u}) (H : p.to_Set = q.to_Set) : p = q :=
begin
revert H,
apply quotient.induction_on₂ p q,
intros v w H,
replace H := quotient.exact H,
apply quotient.sound,
suffices : v.2 ≃o w.2,
{ cases v, cases w, constructor, exact this },
fapply order_iso.mk,
fapply equiv.mk,
{ intro x,
exact classical.some (H.1 x) },
{ intro y,
exact classical.some (H.2 y) },
{ intro x,
apply Well_order.to_pSet.exact,
have h1 : ⟦pSet.func (Well_order.to_pSet _) _⟧ = ⟦pSet.func (Well_order.to_pSet _) _⟧ :=
quotient.sound (classical.some_spec (H.2 (classical.some (H.1 x)))),
have h2 : ⟦pSet.func (Well_order.to_pSet _) _⟧ = ⟦pSet.func (Well_order.to_pSet _) _⟧ :=
quotient.sound (classical.some_spec (H.1 x)),
rw [h1, h2] },
{ intro y,
apply Well_order.to_pSet.exact,
have h1 : ⟦pSet.func (Well_order.to_pSet _) _⟧ = ⟦pSet.func (Well_order.to_pSet _) _⟧ :=
quotient.sound (classical.some_spec (H.1 (classical.some (H.2 y)))),
have h2 : ⟦pSet.func (Well_order.to_pSet _) _⟧ = ⟦pSet.func (Well_order.to_pSet _) _⟧ :=
quotient.sound (classical.some_spec (H.2 y)),
rw [← h1, ← h2] },
end-/
example (v w : Well_order.{u}) (t : w.1) (H : v.to_pSet ≈ w.to_pSet.func t) :
⟦v⟧ = @ordinal.typein w.1 w.2 w.3 t :=
begin
apply quotient.sound,
letI := w.3,
suffices : v.2 ≃o subrel (w.r) {b : w.α | w.r b t},
{ cases v, constructor, exact this },
revert v,
apply well_founded.recursion (is_well_order.wf w.2) t,
intros t ih v H,
end
example (p q : ordinal.{u}) : p < q ↔ p.to_Set ∈ q.to_Set :=
begin
apply quotient.induction_on₂ p q,
intros v w,
letI := v.3,
letI := w.3,
have hv : v = {α := v.α, r := v.r, wo := by apply_instance},
{ cases v, congr },
have hw : w = {α := w.α, r := w.r, wo := by apply_instance},
{ cases w, congr },
split,
{ intro e,
replace e : nonempty (principal_seg v.2 w.2),
{ cases v, cases w, exact e },
cases e,
have := congr_arg ordinal.to_Set (ordinal.typein_top e),
existsi e.top,
suffices : (_ : pSet) ≈ _,
{ unfold has_equiv.equiv setoid.r at this,
exact this },
apply quotient.exact,
replace this := quotient.sound (quotient.exact this),
rw ← hv at this, rw ← this, symmetry,
exact Well_order.to_pSet.subrel _ e.top },
intro H,
cases H with t ht,
change Well_order.to_pSet v ≈ pSet.func (Well_order.to_pSet _) _ at ht,
suffices : principal_seg v.2 w.2,
{ cases v, cases w, constructor, exact this },
revert ht,
apply well_founded.recursion (is_well_order.wf w.2) t,
intros t ih H,
/- rw well_founded.fix_eq at hx,
fapply principal_seg.mk,
fapply order_embedding.mk,
fapply function.embedding.mk,
{ intro x,
exact (classical.some (hx.1 x)).1, },
{ intros x y h,
have h1 : ⟦pSet.func (Well_order.to_pSet _) _⟧ = ⟦pSet.func (Well_order.to_pSet _) _⟧ :=
quotient.sound (classical.some_spec (hx.1 x)),
have h2 : ⟦pSet.func (Well_order.to_pSet _) _⟧ = ⟦pSet.func (Well_order.to_pSet _) _⟧ :=
quotient.sound (classical.some_spec (hx.1 y)),
apply Well_order.to_pSet.exact,
rw [h1, h2, h] },
{ intros x y,
have : Well_order.to_pSet v ≈ pSet.func (Well_order.to_pSet _) _ := hx' },-/
end
--TODO: is ordinal iff transitive and linearly ordered
|
Formal statement is: lemma is_nth_power_nat_code [code]: "is_nth_power_nat n m = (if n = 0 then m = 1 else if m = 0 then n > 0 else if n = 1 then True else (\<exists>k\<in>{1..m}. k ^ n = m))" Informal statement is: The function is_nth_power_nat is defined by the following code: |
# Decision Lens API
#
# No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
#
# OpenAPI spec version: 1.0
#
# Generated by: https://github.com/swagger-api/swagger-codegen.git
#' ResourcePoolDeletedEvent Class
#'
#' @field id
#' @field name
#' @field field
#' @field portfolioId
#' @field portfolioPlan
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
ResourcePoolDeletedEvent <- R6::R6Class(
'ResourcePoolDeletedEvent',
public = list(
`id` = NULL,
`name` = NULL,
`field` = NULL,
`portfolioId` = NULL,
`portfolioPlan` = NULL,
initialize = function(`id`, `name`, `field`, `portfolioId`, `portfolioPlan`){
if (!missing(`id`)) {
stopifnot(is.character(`id`), length(`id`) == 1)
self$`id` <- `id`
}
if (!missing(`name`)) {
stopifnot(is.character(`name`), length(`name`) == 1)
self$`name` <- `name`
}
if (!missing(`field`)) {
stopifnot(R6::is.R6(`field`))
self$`field` <- `field`
}
if (!missing(`portfolioId`)) {
stopifnot(is.character(`portfolioId`), length(`portfolioId`) == 1)
self$`portfolioId` <- `portfolioId`
}
if (!missing(`portfolioPlan`)) {
stopifnot(R6::is.R6(`portfolioPlan`))
self$`portfolioPlan` <- `portfolioPlan`
}
},
toJSON = function() {
ResourcePoolDeletedEventObject <- list()
if (!is.null(self$`id`)) {
ResourcePoolDeletedEventObject[['id']] <- self$`id`
}
if (!is.null(self$`name`)) {
ResourcePoolDeletedEventObject[['name']] <- self$`name`
}
if (!is.null(self$`field`)) {
ResourcePoolDeletedEventObject[['field']] <- self$`field`$toJSON()
}
if (!is.null(self$`portfolioId`)) {
ResourcePoolDeletedEventObject[['portfolioId']] <- self$`portfolioId`
}
if (!is.null(self$`portfolioPlan`)) {
ResourcePoolDeletedEventObject[['portfolioPlan']] <- self$`portfolioPlan`$toJSON()
}
ResourcePoolDeletedEventObject
},
fromJSON = function(ResourcePoolDeletedEventJson) {
ResourcePoolDeletedEventObject <- dlensFromJSON(ResourcePoolDeletedEventJson)
if (!is.null(ResourcePoolDeletedEventObject$`id`)) {
self$`id` <- ResourcePoolDeletedEventObject$`id`
}
if (!is.null(ResourcePoolDeletedEventObject$`name`)) {
self$`name` <- ResourcePoolDeletedEventObject$`name`
}
if (!is.null(ResourcePoolDeletedEventObject$`field`)) {
fieldObject <- Field$new()
fieldObject$fromJSON(jsonlite::toJSON(ResourcePoolDeletedEventObject$field, auto_unbox = TRUE))
self$`field` <- fieldObject
}
if (!is.null(ResourcePoolDeletedEventObject$`portfolioId`)) {
self$`portfolioId` <- ResourcePoolDeletedEventObject$`portfolioId`
}
if (!is.null(ResourcePoolDeletedEventObject$`portfolioPlan`)) {
portfolioPlanObject <- PortfolioPlan$new()
portfolioPlanObject$fromJSON(jsonlite::toJSON(ResourcePoolDeletedEventObject$portfolioPlan, auto_unbox = TRUE))
self$`portfolioPlan` <- portfolioPlanObject
}
},
toJSONString = function() {
sprintf(
'{
"id": %s,
"name": %s,
"field": %s,
"portfolioId": %s,
"portfolioPlan": %s
}',
self$`id`,
self$`name`,
self$`field`$toJSON(),
self$`portfolioId`,
self$`portfolioPlan`$toJSON()
)
},
fromJSONString = function(ResourcePoolDeletedEventJson) {
ResourcePoolDeletedEventObject <- dlensFromJSON(ResourcePoolDeletedEventJson)
self$`id` <- ResourcePoolDeletedEventObject$`id`
self$`name` <- ResourcePoolDeletedEventObject$`name`
FieldObject <- Field$new()
self$`field` <- FieldObject$fromJSON(jsonlite::toJSON(ResourcePoolDeletedEventObject$field, auto_unbox = TRUE))
self$`portfolioId` <- ResourcePoolDeletedEventObject$`portfolioId`
PortfolioPlanObject <- PortfolioPlan$new()
self$`portfolioPlan` <- PortfolioPlanObject$fromJSON(jsonlite::toJSON(ResourcePoolDeletedEventObject$portfolioPlan, auto_unbox = TRUE))
}
)
)
|
\documentclass[12pt]{article}
\usepackage[english]{babel}
\usepackage[utf8x]{inputenc}
\usepackage[T1]{fontenc}
\usepackage{scribe}
\usepackage{listings}
\usepackage{hyperref}
\hypersetup{
colorlinks=true,
linkcolor=blue,
filecolor=magenta,
urlcolor=cyan,
}
\LectureNumber{5}
\LectureDate{DATE}
\LectureTitle{Stein Variational Gradient Descent as Gradient Flow}
\lstset{style=mystyle}
\begin{document}
\MakeScribeTop
%#############################################################
%#############################################################
%#############################################################
%#############################################################
Here is the Motivation
\section{Table of Contents}
Here's some more text.
% Here's a citation~\cite{Kar84a}.
\subsection{Here's a subsection}
\paragraph{Here's the note title} Here's some text for the note
\section{Math stuff}
%%%%%%%%%%% If you don't have citations then comment the lines below:
%
\bibliographystyle{abbrv} % if you need a bibliography
\bibliography{mybib} % assuming yours is named mybib.bib
%%%%%%%%%%% end of doc
\end{document} |
--
-- The Java KeyWord Lexer
--
%Options fp=CncKWLexer
%options package=CnCParser
%options template=KeywordTemplateF.gi
%Notice
/.
//
// This file is part of the CNC-C implementation and
// distributed under the Modified BSD License.
// See LICENSE for details.
//
// I AM A GENERATED FILE. PLEASE DO NOT CHANGE ME!!!
//
./
%End
%Include
KWLexerFoldedCaseMapF.gi
%End
%Export
T_ENV
T_UNSIGNED
T_STRUCT
%End
%Terminals
a b c d e f g h i j k l m
n o p q r s t u v w x y z
%End
%Start
KeyWord
%End
%Rules
KeyWord ::= e n v /.$setResult($_T_ENV);./
KeyWord ::= s t r u c t /.$setResult($_T_STRUCT);./
KeyWord ::= u n s i g n e d /.$setResult($_T_UNSIGNED);./
%End
|
||| Show sum of the numbers from input
module NumbersSum
import Data.String
sumNums : List (Maybe Integer) -> Maybe Integer
sumNums [] = Just 0
sumNums (x :: xs) = case (x, sumNums xs) of
(Just num1, Just num2) => Just(num1 + num2)
(_, _) => Nothing
main : IO ()
main = do
putStr "Enter numbers separated by a space: "
x <- getLine
case sumNums (map parseInteger (words x)) of
Just sum => putStrLn ("Sum is " ++ show sum)
Nothing => putStrLn "Invalid input" |
#' @inheritParams layer
#' @inheritParams geom_point
#' @inheritParams stat_density
#' @param scale if "area" (default), all violins have the same area (before trimming
#' the tails). If "count", areas are scaled proportionally to the number of
#' observations. If "width", all violins have the same maximum width.
#' @section Computed variables:
#' \describe{
#' \item{density}{density estimate}
#' \item{scaled}{density estimate, scaled to maximum of 1}
#' \item{count}{density * number of points - probably useless for violin plots}
#' \item{violinwidth}{density scaled for the violin plot, according to area, counts
#' or to a constant maximum width}
#' \item{n}{number of points}
#' \item{width}{width of violin bounding box}
#' }
#' @seealso [geom_violin()] for examples, and [stat_density()]
#' for examples with data along the x axis.
#' @export
#' @rdname geom_violin
stat_ydensity <- function(mapping = NULL, data = NULL,
geom = "violin", position = "dodge",
...,
bw = "nrd0",
adjust = 1,
kernel = "gaussian",
trim = TRUE,
scale = "area",
na.rm = FALSE,
orientation = NA,
show.legend = NA,
inherit.aes = TRUE) {
scale <- arg_match0(scale, c("area", "count", "width"))
layer(
data = data,
mapping = mapping,
stat = StatYdensity,
geom = geom,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list2(
bw = bw,
adjust = adjust,
kernel = kernel,
trim = trim,
scale = scale,
na.rm = na.rm,
...
)
)
}
#' @rdname ggplot2-ggproto
#' @format NULL
#' @usage NULL
#' @export
StatYdensity <- ggproto("StatYdensity", Stat,
required_aes = c("x", "y"),
non_missing_aes = "weight",
setup_params = function(data, params) {
params$flipped_aes <- has_flipped_aes(data, params, main_is_orthogonal = TRUE, group_has_equal = TRUE)
params
},
extra_params = c("na.rm", "orientation"),
compute_group = function(self, data, scales, width = NULL, bw = "nrd0", adjust = 1,
kernel = "gaussian", trim = TRUE, na.rm = FALSE, flipped_aes = FALSE) {
if (nrow(data) < 2) {
cli::cli_warn("Groups with fewer than two data points have been dropped.")
return(new_data_frame())
}
range <- range(data$y, na.rm = TRUE)
modifier <- if (trim) 0 else 3
bw <- calc_bw(data$y, bw)
dens <- compute_density(data$y, data$w, from = range[1] - modifier*bw, to = range[2] + modifier*bw,
bw = bw, adjust = adjust, kernel = kernel)
dens$y <- dens$x
dens$x <- mean(range(data$x))
# Compute width if x has multiple values
if (length(unique(data$x)) > 1) {
width <- diff(range(data$x)) * 0.9
}
dens$width <- width
dens
},
compute_panel = function(self, data, scales, width = NULL, bw = "nrd0", adjust = 1,
kernel = "gaussian", trim = TRUE, na.rm = FALSE,
scale = "area", flipped_aes = FALSE) {
data <- flip_data(data, flipped_aes)
data <- ggproto_parent(Stat, self)$compute_panel(
data, scales, width = width, bw = bw, adjust = adjust, kernel = kernel,
trim = trim, na.rm = na.rm
)
# choose how violins are scaled relative to each other
data$violinwidth <- switch(scale,
# area : keep the original densities but scale them to a max width of 1
# for plotting purposes only
area = data$density / max(data$density),
# count: use the original densities scaled to a maximum of 1 (as above)
# and then scale them according to the number of observations
count = data$density / max(data$density) * data$n / max(data$n),
# width: constant width (density scaled to a maximum of 1)
width = data$scaled
)
data$flipped_aes <- flipped_aes
flip_data(data, flipped_aes)
}
)
calc_bw <- function(x, bw) {
if (is.character(bw)) {
if (length(x) < 2) {
cli::cli_abort("{.arg x} must contain at least 2 elements to select a bandwidth automatically")
}
bw <- switch(
to_lower_ascii(bw),
nrd0 = stats::bw.nrd0(x),
nrd = stats::bw.nrd(x),
ucv = stats::bw.ucv(x),
bcv = stats::bw.bcv(x),
sj = ,
`sj-ste` = stats::bw.SJ(x, method = "ste"),
`sj-dpi` = stats::bw.SJ(x, method = "dpi"),
cli::cli_abort("{.var {bw}} is not a valid bandwidth rule")
)
}
bw
}
|
syntax "foo" : tactic
macro_rules | `(tactic| foo) => `(tactic| assumption)
macro_rules | `(tactic| foo) => `(tactic| apply Nat.pred_lt; assumption)
macro_rules | `(tactic| foo) => `(tactic| contradiction)
example (i : Nat) (h : i - 1 < i) : i - 1 < i := by
foo
example (i : Nat) (h : i ≠ 0) : i - 1 < i := by
foo
example (i : Nat) (h : False) : i - 1 < i := by
foo
|
%default total
-- Let's try to replicate the dependent example with simple algebraic data types
data PetrolVehicle = Car' Nat | Bus' Nat
data PedalVehicle = Bike'
data Vehicle' = Petrol' PetrolVehicle | Pedal' PedalVehicle
wheels' : Vehicle' -> Nat
wheels' (Petrol' (Car' _)) = 4
wheels' (Petrol' (Bus' _)) = 4
wheels' (Pedal' _) = 2
refuel' : PetrolVehicle -> PetrolVehicle
refuel' (Car' k) = Car' 100
refuel' (Bus' k) = Bus' 200
data Power = Petrol | Pedal | Electric
data Vehicle : Power -> Type where
Bycicle : Vehicle Pedal
Car : (fuel : Nat) -> Vehicle Petrol
Bus : (fuel : Nat) -> Vehicle Petrol
-- Extra
Unicycle : Vehicle Pedal
Motorcycle : (fuel : Nat) -> Vehicle Petrol
ElectricCar : (energy : Nat) -> Vehicle Electric
wheels : Vehicle _ -> Nat
wheels Bycicle = 2
wheels (Car fuel) = 4
wheels (Bus fuel) = 4
wheels Unicycle = 1
wheels (Motorcycle fuel) = 2
wheels (ElectricCar energy) = 4
refuel : Vehicle Petrol -> Vehicle Petrol
refuel (Car fuel) = Car 100
refuel (Bus fuel) = Bus 200
refuel (Motorcycle fuel) = Motorcycle 50
|
[STATEMENT]
lemma filterlim_real_of_int_at_top [tendsto_intros]:
"filterlim real_of_int at_top at_top"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. filterlim real_of_int at_top at_top
[PROOF STEP]
unfolding filterlim_at_top
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>Z. \<forall>\<^sub>F x in at_top. Z \<le> real_of_int x
[PROOF STEP]
proof
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>Z. \<forall>\<^sub>F x in at_top. Z \<le> real_of_int x
[PROOF STEP]
fix C :: real
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>Z. \<forall>\<^sub>F x in at_top. Z \<le> real_of_int x
[PROOF STEP]
show "eventually (\<lambda>n. real_of_int n \<ge> C) at_top"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>\<^sub>F n in at_top. C \<le> real_of_int n
[PROOF STEP]
using eventually_ge_at_top[of "\<lceil>C\<rceil>"]
[PROOF STATE]
proof (prove)
using this:
eventually ((\<le>) \<lceil>C\<rceil>) at_top
goal (1 subgoal):
1. \<forall>\<^sub>F n in at_top. C \<le> real_of_int n
[PROOF STEP]
by eventually_elim linarith
[PROOF STATE]
proof (state)
this:
\<forall>\<^sub>F n in at_top. C \<le> real_of_int n
goal:
No subgoals!
[PROOF STEP]
qed |
#include <boost/wave/cpplexer/re2clex/scanner.hpp>
|
import os.path as osp
import glob
import numpy as np
import neuron.ops as ops
from neuron.config import registry
from .dataset import SeqDataset
__all__ = ['TrackingNet']
@registry.register_module
class TrackingNet(SeqDataset):
r"""`TrackingNet <https://tracking-net.org/>`_ Datasets.
Publication:
``TrackingNet: A Large-Scale Dataset and Benchmark for Object Tracking in the Wild.``,
M. Muller, A. Bibi, S. Giancola, S. Al-Subaihi and B. Ghanem, ECCV 2018.
Args:
root_dir (string): Root directory of dataset where sequence
folders exist.
subset (string, optional): Specify ``train`` or ``test``
subset of TrackingNet.
"""
def __init__(self, root_dir=None, subset='test'):
assert subset in ['train', 'test'], 'Unknown subset.'
if root_dir is None:
root_dir = osp.expanduser('~/data/TrackingNet')
self.root_dir = root_dir
self.subset = subset
if subset == 'test':
subset_dirs = ['TEST']
elif subset == 'train':
subset_dirs = ['TRAIN_%d' % c for c in range(12)]
# initialize the dataset
super(TrackingNet, self).__init__(
name='TrackingNet_{}'.format(self.subset),
root_dir=self.root_dir,
subset_dirs=subset_dirs)
def _construct_seq_dict(self, root_dir, subset_dirs):
# image and annotation paths
anno_files = [glob.glob(osp.join(
root_dir, c, 'anno/*.txt')) for c in subset_dirs]
anno_files = sorted(sum(anno_files, []))
seq_dirs = [osp.join(
osp.dirname(osp.dirname(f)),
'frames', osp.basename(f)[:-4])
for f in anno_files]
seq_names = [osp.basename(d) for d in seq_dirs]
# construct seq_dict
seq_dict = {}
for s, seq_name in enumerate(seq_names):
if s % 100 == 0 or s + 1 == len(seq_names):
ops.sys_print('Processing sequence [%d/%d]: %s...' % (
s + 1, len(seq_names), seq_name))
img_files = glob.glob(
osp.join(seq_dirs[s], '*.jpg'))
img_files = sorted(
img_files,
key=lambda f: int(osp.basename(f)[:-4]))
anno = np.loadtxt(anno_files[s], delimiter=',')
if anno.ndim == 1:
anno = np.expand_dims(anno, axis=0)
anno[:, 2:] = anno[:, :2] + anno[:, 2:] - 1
# meta information
seq_len = len(img_files)
img0 = ops.read_image(img_files[0])
meta = {
'width': img0.shape[1],
'height': img0.shape[0],
'frame_num': seq_len,
'target_num': 1,
'total_instances': seq_len}
# update seq_dict
seq_dict[seq_name] = {
'img_files': img_files,
'target': {
'anno': anno,
'meta': meta}}
return seq_dict
|
Over the past century, Leviton's electrical components have made them a household name. But did you know that they produce an impressive line of fiber optic products as well? From consumables and connectors to fiber enclosures and adapter plates, Leviton fiber optic components are designed for fast, easy terminations and high quality connections. Read on to learn more about Leviton fiber optics, available right here at CableOrganizer.com!
Before you can terminate optical fiber, you've got to get it squeaky clean. For just that reason, you'll find a wide variety of Leviton fiber optic consumables products at CableOrganizer.com. The standard Leviton Consumables Kit comes complete with alcohol pads, polyester wipes, music wire, and lapping film. The Fast-Cure Kit is made up of all the adhesive, accelerator, needles and syringes needed for terminations with Leviton's Fast-Cure connectors. We even carry Leviton's Adhesive Accelerator kit, which contains primer and hardener for use with fiber optic connector adhesives.
Leviton Fan Out Kits are perfect for separating and protecting 250-micron fibers during fan out terminations, and make it easy to route them into 900-micron color-coded buffer tubes. Fan out kits are available in 24" and 36" lengths, for 6-fiber or 12-fiber configurations.
Whether you need to terminate single-mode or multimode fiber, Leviton's FastCAM connectors allow you to make fast, precise field terminations. Using a proven and revolutionary mechanical splice technology, FastCAM connectors eliminate the need for hand polishing, specials tools, and even epoxy! After a cleaved fiber is inserted into a FastCAM connector, the installer simply releases the factory-installed wedge clip, and an instant low-loss connection is formed.
EIA/TIA compliant FastCAM connectors are available in LC, SC or ST styles. They're perfect for any indoor premise or fiber-to-the-subscriber installation, as well as applications that require a fast data network. To make terminations even easier, try Leviton's FastCAM Installation Kit, which includes a fiber stripper, cleaver, and two types of cleaning wipes: dry and alcohol-moistened.
Perfect for both indoor and outdoor applications, Fast-Cure Adhesive Style Connectors from Leviton make it easy to quickly terminate optical fiber for LANs, WANs, point-to-point systems and Cable TV distribution networks. Available for single-mode and multimode fiber, Fast-Cure connectors come in LC, SC and ST styles, and feature precision, pre-radiused zirconia ferrules. Leviton's Fast-Cure adhesive connectors comply with TIA/EIA standards, and their high cable-retention crimp makes them ideal for use with jacketed fiber.
Thread-Lock connectors are a convenient solution for easy field terminations on single-mode and multimode fiber. TIA/EIA compliant Thread-Lock connectors are both removable and reusable, and meet Telecommunications Industry standards for tensile strength and performance. Best suited for indoor use, these Leviton fiber optic connectors don't require crimp tools, epoxies or heat curing, and take about 2 minutes each to install. Thread-Lock connectors have a typical insertion loss of 0.20dB, and come in SC and ST varieties, with duplex clips available.
Ideal for protecting and organizing optical splice fibers, the Leviton DPS Optical Splice Enclosure is designed to complement any 19" or 23" rack mount application. The DPS Optical Splice Enclosure is constructed of rugged aluminum, and features front and rear hinged doors (with an optional key lock) for convenient access to fibers and splice trays. Available in 3U, 4U and 6U sizes, this Leviton enclosure is NEBS GR-63 compliant, and is perfect for use with legacy systems.
Leviton's OPT-X® 1000 1RU Fiber Optic Enclosure is ideal for the splicing, patching and cross-connect of single-mode or multimode fiber in rack mount applications. A natural fit for tight spaces, the OPT-X™ 1000 can accommodate up to 24 fibers, and features front and rear tilt-up lids for easy access. Equipped with strain relief at cable entry points, this Leviton fiber enclosure is available with or without adapters, and accepts up to 3 splice trays. The OPT-X™ 1000 is UL-listed, and meets or exceeds all TIA/EIA standards that apply to it.
The perfect partner for Leviton OPT-X™ 1000 Rack-Mount Enclosures! OPT-X® Adapter Plates are available in multiple adapter types, so installers can easily add in or change connector styles. Equipped with easy-to-use plastic plungers, these 16-gauge steel adapter plates allow for fast installation. no tools needed! TIA/EIA compliant Leviton OPT-X™ adapter plates can be configured with 6, 8, or 12 ports, and accommodate FC, LC, ST, MT-RJ and QuickPort adapters. |
Require Import StLib.Main StLib.Kernels.
Require Import Psatz.
Require Import Utils.
Parameters N P : Z.
Axiom Ngt0 : N > 0.
Axiom Pgt0 : P > 0.
Module AmPutOpt <: (PROBLEM Z2).
Local Open Scope aexpr.
Definition space := 〚0, N-1〛×〚0, N*P-1〛.
Definition target := 〚0, N-1〛×〚0, N*P-1〛.
Definition dep c :=
match c with
| (t,s) =>
[(t+1,s); (t+1,s-1); (t+1,s+1)]
end.
End AmPutOpt.
Module P := Kern Z2 AmPutOpt.
Import P.
Definition my_comp0 :=
(For "t" From 0 To N-1 Do
For "x" From N*2*"id"+"t" To N*2*"id" + N*2-"t"-1 Do
Fire (N - 1 - "t", "x" : aexpr))%code.
Definition my_send :=
(If "to" =? "id" - 1 Then
For "t" From 0 To N-1 Do
Fire (N - 1 - "t", N*2*"id" + "t");;
For "t" From 1 To N-1 Do
Fire (N - "t", N*2*"id" + "t")
Else
If "to" =? "id" + 1 Then
For "t" From 0 To N-1 Do
Fire (N - 1 - "t", N*2*"id" + N*2-"t"-1);;
For "t" From 1 To N-1 Do
Fire (N - "t", N*2*"id" + N*2-"t"-1)
Else
Nop)%code.
Definition my_comp1_0 :=
(For "t" From 0 To N-1 Do
For "x" From N*2*"id" - "t" To N*2*"id" + "t" - 1 Do
Fire (N - 1 - "t":aexpr, "x":aexpr))%code.
Definition my_comp1_1 :=
(For "t" From 0 To N-1 Do
For "x" From N*2*"id" + N*2-"t" To N*2*"id" + N*2+"t"-1 Do
Fire (N - 1 - "t":aexpr, "x":aexpr))%code.
(** Merge all computation steps together. *)
Definition my_comp :=
(If "T" =? 0 Then
my_comp0
Else
If "T" =? 1 Then
my_comp1_0;; my_comp1_1
Else
Nop)%code.
(** And here is the kernel! *)
Definition my_code :=
makeKernel my_comp my_send.
(** Let's now prove the correctness of this kernel. *)
Theorem my_code_correct :
kcorrect my_code (P-1) 1.
Proof.
to_ctx Ngt0; to_ctx Pgt0.
synthesize trace; intros.
(** First goal is the verification conditions for the computation steps,
* which encapsulate the dependencies. *)
- decide T=0.
+ simplify sets with ceval; forward.
(** COMP0, first dependency (south). *)
decide i=0; [right; unfold space; forward; unfold fst, snd in *; omega|].
left; lhs. rhs; forward.
exists (i-1); forward. omega.
exists i0; forward; omega.
(** COMP0, second dependency (south-west). *)
decide i=0; [right; unfold space; forward; unfold fst, snd in *; omega|].
left; lhs. rhs; forward.
exists (i-1); forward. omega.
exists (i0-1); forward; omega.
(** COMP0, last dependency (south-east). *)
decide i=0; [right; unfold space; forward; unfold fst, snd in *; omega|].
left; lhs. rhs; forward.
exists (i-1); forward. omega.
exists (i0+1); forward; omega.
+ decide T=1.
simplify sets with ceval; forward.
(** COMP1, TL, first dependency (south). *)
assert (HT : N*2*id - i = i0 \/ i0 = N*2*id + i - 1
\/ N*2*id - i < i0 < N*2*id + i - 1)
by omega; destruct HT as [?|[?|?]].
(* Left edge of TL[id]. *)
decide id=0; [right; unfold space; forward; unfold fst, snd in *; omega|].
left; lhs; lhs; rhs; forward.
exists 0; forward.
exists (id-1); forward; try omega.
unfold sends_synth; simpl; forward.
destr_case (id =? id - 1 - 1).
destr_case (id =? id - 1 + 1).
forward.
simplify sets with ceval.
exists (i-1); forward; try omega.
lhs; forward; try omega; nia.
(* Right edge of TL[id]. *)
left. lhs; lhs; lhs; forward.
exists 0; forward.
unfold computes_synth; simpl; simplify sets with ceval; forward.
exists (i-1); forward; try omega.
exists i0; forward; omega.
(* Interior of TL[id]. *)
left; lhs; rhs; forward.
exists (i-1); forward; try omega.
exists i0; forward; omega.
(** COMP1, TL, second dependency (south-west). *)
destruct Z_le_gt_dec with i0 (N*2*id - i + 1).
(* The two left edges of TL[id]. *)
decide id=0; [right; unfold space; forward; unfold fst, snd in *; omega|].
left; lhs; lhs; rhs; forward.
exists 0; forward.
exists (id - 1); forward; try omega.
unfold sends_synth; simpl; forward.
destr_case (id=?id-1-1); destr_case (id=?id-1+1).
simplify sets with ceval; forward.
exists (i - 1); forward; try omega.
clear H11.
decide i0=(N*2*id - i).
rhs; forward.
exists i; forward; try omega. nia.
decide i0=(N*2*id - i + 1).
lhs; forward.
nia.
nia.
exfalso; omega.
(* The rest of TL[id]. *)
left. lhs; rhs; forward.
exists (i-1); forward; try omega.
exists (i0-1); forward; try omega.
(** COMP1, TL, last dependency (south-east). *)
destruct Z_le_gt_dec with (N*2*id + i - 2) i0.
(* The two right edges of TL[id]. *)
left; lhs; lhs; lhs; forward.
exists 0; forward.
unfold computes_synth; simpl; simplify sets with ceval; forward.
exists (i-1); forward; try omega.
exists (i0+1); forward; omega.
(* Interior of TL[id]. *)
left; lhs; rhs; forward.
exists (i-1); forward; try omega.
exists (i0+1); forward; omega.
(** COMP1, TR, first dependency (south). *)
assert (HT : N*2*id + N*2 - i = i0 \/ i0 = N*2*id + N*2 + i - 1
\/ N*2*id + N*2 - i < i0 < N*2*id + N*2 + i - 1)
by omega; destruct HT as [?|[?|?]].
(* Left edge of TR[id]. *)
left; lhs; lhs; lhs; lhs; forward.
exists 0; forward.
unfold computes_synth; simpl; simplify sets with ceval; forward.
exists (i-1); forward; try omega.
exists i0; forward; omega.
(* Right edge of TR[id]. *)
decide id=(P-1); [right; unfold space; forward; unfold fst, snd in *; nia|].
left; lhs; lhs; lhs; rhs; forward.
exists 0; forward.
exists (id+1); forward; try omega.
unfold sends_synth; simpl; forward.
destr_case (id =? id + 1 - 1).
forward.
exists (i-1); forward; try omega.
simplify sets with ceval.
lhs; forward. nia.
nia.
(* Interior of TR[id]. *)
left; lhs; rhs; forward.
exists (i-1); forward; try omega.
exists i0; forward; omega.
(** COMP1, TR, second dependency (south-west). *)
destruct Z_le_gt_dec with i0 (N*2*id + N*2 - i + 1).
(* Left edge of TR[id]. *)
left; lhs; lhs; lhs; lhs; forward.
exists 0; forward.
unfold computes_synth; simpl; simplify sets with ceval; forward.
exists (i-1); forward; try omega.
exists (i0-1); forward; try omega.
(* Interior of TR[id]. *)
left; lhs; rhs; forward.
exists (i-1); forward; try omega.
exists (i0-1); forward; try omega.
(** COMP1, TR, last dependency (south-east). *)
destruct Z_le_gt_dec with (N*2*id + N*2 + i - 2) i0.
(* Right edge of TR[id]. *)
decide id=(P-1); [right; unfold space; forward; unfold fst, snd in *; nia|].
left; lhs; lhs; lhs; rhs; forward.
exists 0; forward.
exists (id+1); forward; try omega.
unfold sends_synth; simpl; simplify sets with ceval.
destr_case (id =? id + 1 - 1); forward.
exists (i-1); forward; try omega.
clear H10; decide i0=(N*2*id + N*2 + i - 1).
rhs; forward.
exists i; forward; try omega. nia.
lhs; forward; nia.
(* Interior of TR[id]. *)
left; lhs; rhs; forward.
exists (i-1); forward; try omega.
exists (i0+1); forward; try omega.
omega.
(** Second goal is the verification conditions for the communication steps,
* which are trivial. *)
- destruct (to =? id - 1); forward.
destruct (to =? id + 1); forward.
(** Third goal is "conservation of knowledge": we are not allowed to send
* a value that we don't know. *)
- unfold af_synth, computes_synth, sends_synth in *; simpl in *.
simplify sets with ceval.
repeat intro; lhs.
destruct (to =? id - 1); forward.
(* Left border *)
exists 0; simpl; forward. (* Time step 0 *)
exists x0; forward.
exists (snd x); rewrite H10; simpl; forward'; omega.
exists 0; simpl; forward. (* Time step 0 *)
exists (x1 - 1); forward; try omega.
exists (snd x); rewrite H11; simpl; forward'; try omega.
repeat rhs; forward; omega.
destruct (to =? id + 1); simpl; forward.
(* Right border *)
exists 0; simpl; forward. (* Time step 0 *)
exists x0; forward.
exists (snd x); rewrite H10; simpl; forward'; try omega.
exists 0; simpl; forward. (* Time step 0 *)
exists (x1 - 1); forward; try omega.
exists (snd x); rewrite H11; simpl; forward'; try omega.
repeat rhs; forward; omega.
(** Last goal is completeness: we computed all the cells we were supposed
* to compute. *)
- unfold target, bf_synth, computes_synth, sends_synth; simpl.
repeat intro.
destruct x; forward; unfold fst, snd in *.
(** We have to prove that [x] belongs to some T[I], TL[I] or TR[I], for
* some "I". This "I" is obtained by take the quotient of x's abscissa
* by 2N. *)
pose (I := z0 / (N*2)).
assert (HNT : N*2 > 0) by omega.
to_ctx (Z_div_mod_spec z0 (N*2) HNT).
exists I; forward.
unfold I; nia.
unfold I; nia.
destruct seg_dec with (N*2*I + (N - 1 - z)) (N*2*I + N*2-(N - 1 - z)-1) z0 as [?|[?|?]];
lhs; forward.
(* Case T[I]. *)
exists 0; simpl; simplify sets with ceval;
forward; try omega. (* First computation step. *)
exists (N - 1 - z); forward; try omega.
exists z0; forward; try omega.
(* Case TL[I]. *)
exists 1; simpl; simplify sets with ceval;
forward; try omega. (* Second computation step. *)
lhs; forward. (* First sub-step *)
exists (N - 1 - z); forward; try omega.
exists z0; forward; try omega.
unfold I; omega.
(* Case TR[I] *)
exists 1; simpl; simplify sets with ceval;
forward; try omega. (* Second computation step. *)
rhs; forward. (* Second sub-step *)
exists (N - 1 - z); forward; try omega.
exists z0; forward; try omega.
unfold I; omega.
Qed. |
#pragma once
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <cblas.h>
#include <omp.h>
#include "matrix.h"
double l1_distance(double *vec_1, double *vec_2, int N);
void naive_ranking(double *matrix, double *result, int N);
void pagerank(double *matrix, double *result, int N,
double tolerance, double damping_factor, int max_iter); |
(* Author: Tobias Nipkow and Gerwin Klein *)
section "Compiler for IMP"
theory Chapter8_1 imports "~~/src/HOL/IMP/Big_Step" "~~/src/HOL/IMP/Star"
begin
subsection "List setup"
text {*
In the following, we use the length of lists as integers
instead of natural numbers. Instead of converting @{typ nat}
to @{typ int} explicitly, we tell Isabelle to coerce @{typ nat}
automatically when necessary.
*}
declare [[coercion_enabled]]
declare [[coercion "int :: nat \<Rightarrow> int"]]
text {*
Similarly, we will want to access the ith element of a list,
where @{term i} is an @{typ int}.
*}
fun inth :: "'a list \<Rightarrow> int \<Rightarrow> 'a" (infixl "!!" 100) where
"(x # xs) !! i = (if i = 0 then x else xs !! (i - 1))"
text {*
The only additional lemma we need about this function
is indexing over append:
*}
lemma inth_append [simp]:
"0 \<le> i \<Longrightarrow>
(xs @ ys) !! i = (if i < size xs then xs !! i else ys !! (i - size xs))"
by (induction xs arbitrary: i) (auto simp: algebra_simps)
text{* We hide coercion @{const int} applied to @{const length}: *}
abbreviation (output)
"isize xs == int (length xs)"
notation isize ("size")
subsection "Instructions and Stack Machine"
text_raw{*\snip{instrdef}{0}{1}{% *}
datatype instr =
LOADI int | LOAD vname | ADD | STORE vname |
JMP int | JMPLESS int | JMPGE int
text_raw{*}%endsnip*}
type_synonym stack = "val list"
type_synonym config = "int \<times> state \<times> stack"
abbreviation "hd2 xs == hd(tl xs)"
abbreviation "tl2 xs == tl(tl xs)"
fun iexec :: "instr \<Rightarrow> config \<Rightarrow> config" where
"iexec instr (i,s,stk) = (case instr of
LOADI n \<Rightarrow> (i+1,s, n#stk) |
LOAD x \<Rightarrow> (i+1,s, s x # stk) |
ADD \<Rightarrow> (i+1,s, (hd2 stk + hd stk) # tl2 stk) |
STORE x \<Rightarrow> (i+1,s(x := hd stk),tl stk) |
JMP n \<Rightarrow> (i+1+n,s,stk) |
JMPLESS n \<Rightarrow> (if hd2 stk < hd stk then i+1+n else i+1,s,tl2 stk) |
JMPGE n \<Rightarrow> (if hd2 stk >= hd stk then i+1+n else i+1,s,tl2 stk))"
definition
exec1 :: "instr list \<Rightarrow> config \<Rightarrow> config \<Rightarrow> bool"
("(_/ \<turnstile> (_ \<rightarrow>/ _))" [59,0,59] 60)
where
"P \<turnstile> c \<rightarrow> c' =
(\<exists>i s stk. c = (i,s,stk) \<and> c' = iexec(P!!i) (i,s,stk) \<and> 0 \<le> i \<and> i < size P)"
lemma exec1I [intro, code_pred_intro]:
"c' = iexec (P!!i) (i,s,stk) \<Longrightarrow> 0 \<le> i \<Longrightarrow> i < size P
\<Longrightarrow> P \<turnstile> (i,s,stk) \<rightarrow> c'"
by (simp add: exec1_def)
abbreviation
exec :: "instr list \<Rightarrow> config \<Rightarrow> config \<Rightarrow> bool" ("(_/ \<turnstile> (_ \<rightarrow>*/ _))" 50)
where
"exec P \<equiv> star (exec1 P)"
declare star.step[intro]
lemmas exec_induct = star.induct [of "exec1 P", split_format(complete)]
code_pred exec1 by (metis exec1_def)
values
"{(i,map t [''x'',''y''],stk) | i t stk.
[LOAD ''y'', STORE ''x''] \<turnstile>
(0, <''x'' := 3, ''y'' := 4>, []) \<rightarrow>* (i,t,stk)}"
subsection{* Verification infrastructure *}
text{* Below we need to argue about the execution of code that is embedded in
larger programs. For this purpose we show that execution is preserved by
appending code to the left or right of a program. *}
lemma iexec_shift [simp]:
"((n+i',s',stk') = iexec x (n+i,s,stk)) = ((i',s',stk') = iexec x (i,s,stk))"
by(auto split:instr.split)
lemma exec1_appendR: "P \<turnstile> c \<rightarrow> c' \<Longrightarrow> P@P' \<turnstile> c \<rightarrow> c'"
by (auto simp: exec1_def)
lemma exec_appendR: "P \<turnstile> c \<rightarrow>* c' \<Longrightarrow> P@P' \<turnstile> c \<rightarrow>* c'"
by (induction rule: star.induct) (fastforce intro: exec1_appendR)+
lemma exec1_appendL:
fixes i i' :: int
shows
"P \<turnstile> (i,s,stk) \<rightarrow> (i',s',stk') \<Longrightarrow>
P' @ P \<turnstile> (size(P')+i,s,stk) \<rightarrow> (size(P')+i',s',stk')"
unfolding exec1_def
by (auto simp del: iexec.simps)
lemma exec_appendL:
fixes i i' :: int
shows
"P \<turnstile> (i,s,stk) \<rightarrow>* (i',s',stk') \<Longrightarrow>
P' @ P \<turnstile> (size(P')+i,s,stk) \<rightarrow>* (size(P')+i',s',stk')"
by (induction rule: exec_induct) (blast intro!: exec1_appendL)+
text{* Now we specialise the above lemmas to enable automatic proofs of
@{prop "P \<turnstile> c \<rightarrow>* c'"} where @{text P} is a mixture of concrete instructions and
pieces of code that we already know how they execute (by induction), combined
by @{text "@"} and @{text "#"}. Backward jumps are not supported.
The details should be skipped on a first reading.
If we have just executed the first instruction of the program, drop it: *}
lemma exec_Cons_1 [intro]:
"P \<turnstile> (0,s,stk) \<rightarrow>* (j,t,stk') \<Longrightarrow>
instr#P \<turnstile> (1,s,stk) \<rightarrow>* (1+j,t,stk')"
by (drule exec_appendL[where P'="[instr]"]) simp
lemma exec_appendL_if[intro]:
fixes i i' j :: int
shows
"size P' <= i
\<Longrightarrow> P \<turnstile> (i - size P',s,stk) \<rightarrow>* (j,s',stk')
\<Longrightarrow> i' = size P' + j
\<Longrightarrow> P' @ P \<turnstile> (i,s,stk) \<rightarrow>* (i',s',stk')"
by (drule exec_appendL[where P'=P']) simp
text{* Split the execution of a compound program up into the excution of its
parts: *}
lemma exec_append_trans[intro]:
fixes i' i'' j'' :: int
shows
"P \<turnstile> (0,s,stk) \<rightarrow>* (i',s',stk') \<Longrightarrow>
size P \<le> i' \<Longrightarrow>
P' \<turnstile> (i' - size P,s',stk') \<rightarrow>* (i'',s'',stk'') \<Longrightarrow>
j'' = size P + i''
\<Longrightarrow>
P @ P' \<turnstile> (0,s,stk) \<rightarrow>* (j'',s'',stk'')"
by(metis star_trans[OF exec_appendR exec_appendL_if])
declare Let_def[simp]
subsection "Compilation"
fun acomp :: "aexp \<Rightarrow> instr list" where
"acomp (N n) = [LOADI n]" |
"acomp (V x) = [LOAD x]" |
"acomp (Plus a1 a2) = acomp a1 @ acomp a2 @ [ADD]"
lemma acomp_correct[intro]:
"acomp a \<turnstile> (0,s,stk) \<rightarrow>* (size(acomp a),s,aval a s#stk)"
by (induction a arbitrary: stk) fastforce+
fun bcomp :: "bexp \<Rightarrow> bool \<Rightarrow> int \<Rightarrow> instr list" where
"bcomp (Bc v) f n = (if v=f then [JMP n] else [])" |
"bcomp (Not b) f n = bcomp b (\<not>f) n" |
"bcomp (And b1 b2) f n =
(let cb2 = bcomp b2 f n;
m = if f then size cb2 else (size cb2::int)+n;
cb1 = bcomp b1 False m
in cb1 @ cb2)" |
"bcomp (Less a1 a2) f n =
acomp a1 @ acomp a2 @ (if f then [JMPLESS n] else [JMPGE n])"
value
"bcomp (And (Less (V ''x'') (V ''y'')) (Not(Less (V ''u'') (V ''v''))))
False 3"
lemma bcomp_correct[intro]:
fixes n :: int
shows
"0 \<le> n \<Longrightarrow>
bcomp b f n \<turnstile>
(0,s,stk) \<rightarrow>* (size(bcomp b f n) + (if f = bval b s then n else 0),s,stk)"
proof(induction b arbitrary: f n)
case Not
from Not(1)[where f="~f"] Not(2) show ?case by fastforce
next
case (And b1 b2)
from And(1)[of "if f then size(bcomp b2 f n) else size(bcomp b2 f n) + n"
"False"]
And(2)[of n f] And(3)
show ?case by fastforce
qed fastforce+
fun ccomp :: "com \<Rightarrow> instr list" where
"ccomp SKIP = []" |
"ccomp (x ::= a) = acomp a @ [STORE x]" |
"ccomp (c\<^sub>1;;c\<^sub>2) = ccomp c\<^sub>1 @ ccomp c\<^sub>2" |
"ccomp (IF b THEN c\<^sub>1 ELSE c\<^sub>2) =
(if c\<^sub>2 = SKIP then
(let cc\<^sub>1 = ccomp c\<^sub>1; cb = bcomp b False (size cc\<^sub>1) in cb @ cc\<^sub>1)
else (let cc\<^sub>1 = ccomp c\<^sub>1; cc\<^sub>2 = ccomp c\<^sub>2; cb = bcomp b False (size cc\<^sub>1 + 1)
in cb @ cc\<^sub>1 @ JMP (size cc\<^sub>2) # cc\<^sub>2))" |
"ccomp (WHILE b DO c) =
(let cc = ccomp c; cb = bcomp b False (size cc + 1)
in cb @ cc @ [JMP (-(size cb + size cc + 1))])"
value "ccomp
(IF Less (V ''u'') (N 1) THEN ''u'' ::= Plus (V ''u'') (N 1)
ELSE ''v'' ::= V ''u'')"
value "ccomp (WHILE Less (V ''u'') (N 1) DO (''u'' ::= Plus (V ''u'') (N 1)))"
subsection "Preservation of semantics"
lemma ccomp_bigstep:
"(c,s) \<Rightarrow> t \<Longrightarrow> ccomp c \<turnstile> (0,s,stk) \<rightarrow>* (size(ccomp c),t,stk)"
proof(induction arbitrary: stk rule: big_step_induct)
case (Assign x a s)
show ?case by (fastforce simp:fun_upd_def cong: if_cong)
next
case (Seq c1 s1 s2 c2 s3)
let ?cc1 = "ccomp c1" let ?cc2 = "ccomp c2"
have "?cc1 @ ?cc2 \<turnstile> (0,s1,stk) \<rightarrow>* (size ?cc1,s2,stk)"
using Seq.IH(1) by fastforce
moreover
have "?cc1 @ ?cc2 \<turnstile> (size ?cc1,s2,stk) \<rightarrow>* (size(?cc1 @ ?cc2),s3,stk)"
using Seq.IH(2) by fastforce
ultimately show ?case by simp (blast intro: star_trans)
next
case (WhileTrue b s1 c s2 s3)
let ?cc = "ccomp c"
let ?cb = "bcomp b False (size ?cc + 1)"
let ?cw = "ccomp(WHILE b DO c)"
have "?cw \<turnstile> (0,s1,stk) \<rightarrow>* (size ?cb,s1,stk)"
using `bval b s1` by fastforce
moreover
have "?cw \<turnstile> (size ?cb,s1,stk) \<rightarrow>* (size ?cb + size ?cc,s2,stk)"
using WhileTrue.IH(1) by fastforce
moreover
have "?cw \<turnstile> (size ?cb + size ?cc,s2,stk) \<rightarrow>* (0,s2,stk)"
by fastforce
moreover
have "?cw \<turnstile> (0,s2,stk) \<rightarrow>* (size ?cw,s3,stk)" by(rule WhileTrue.IH(2))
ultimately show ?case by(blast intro: star_trans)
qed fastforce+
end
|
"""
UniformMesh(start, stop, length)
1D uniform mesh data.
length : number of points
length-1 : number of cells
To remove the last point, set endpoint=false
"""
struct UniformMesh
start :: Float64
stop :: Float64
length :: Int
step :: Float64
points :: Vector{Float64}
endpoint :: Bool
function UniformMesh(start, stop, length::Int; endpoint=true)
if (endpoint)
points = range(start, stop=stop, length=length)
else
points = range(start, stop=stop, length=length+1)[1:end-1]
end
step = points[2]-points[1]
new( start, stop, length, step, points, endpoint)
end
end
|
theory Inits
imports Main "~~/src/HOL/Library/Sublist"
begin
fun inits where
"inits [] = [[]]"
| "inits (i#is) = [] # map (op # i) (inits is)"
lemma inits_Snoc[simp]:
"inits (is@[i]) = inits is @ [is@[i]]"
by (induction "is") auto
lemma inits_eq_Snoc:
"inits is = xs @ [x] \<longleftrightarrow> (is = [] \<and> xs = [] \<or> (\<exists> i is'. is = is'@[i] \<and> xs = inits is')) \<and> x = is"
by (cases "is" rule: rev_cases) auto
lemma in_set_inits[simp]: "is' \<in> set (inits is) \<longleftrightarrow> prefixeq is' is"
by (induction "is'" arbitrary: "is"; rename_tac "is", case_tac "is"; auto)
lemma prefixeq_snocD: "prefixeq (xs@[x]) ys \<Longrightarrow> prefix xs ys"
by (simp add: prefixI' prefix_order.dual_order.strict_trans1)
lemma prefixeq_butlast: "prefixeq (butlast xs) xs"
by (metis append_butlast_last_id butlast.simps(1) prefixI' prefix_order.eq_iff prefix_order.less_imp_le)
lemma prefixeq_app_Cons_elim:
assumes "prefixeq (xs@[y]) (z#zs)"
obtains "xs = []" and "y = z"
| xs' where "xs = z#xs'" and "prefixeq (xs'@[y]) zs"
using assms by (cases xs) auto
lemma prefixeq_app_Cons_simp:
"prefixeq (xs@[y]) (z#zs) \<longleftrightarrow> xs = [] \<and> y = z \<or> xs = z#tl xs \<and> prefixeq (tl xs@[y]) zs"
by (cases xs) auto
end |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.