text
stringlengths 0
3.34M
|
---|
/*
GENETIC - A simple genetic algorithm.
Copyright 2014, Javier Burguete Tolosa.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY Javier Burguete Tolosa ``AS IS'' AND ANY EXPRESS
OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
EVENT SHALL Javier Burguete Tolosa OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* \file genetic.c
* \brief Source file to define genetic algorithm main function.
* \author Javier Burguete Tolosa.
* \copyright Copyright 2014 Javier Burguete Tolosa. All rights reserved.
*/
#define _GNU_SOURCE
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <gsl/gsl_rng.h>
#include <glib.h>
#if HAVE_MPI
#include <mpi.h>
#endif
#include "bits.h"
#include "entity.h"
#include "population.h"
#include "reproduction.h"
#include "selection.h"
#include "evolution.h"
#include "genetic.h"
#define DEBUG_GENETIC 0 ///< Macro to debug the genetic functions.
static Population genetic_population[1];
///< Population of the genetic algorithm.
static double (*genetic_simulation) (Entity *);
///< Pointer to the function to perform a simulation.
/**
* Function to get a variable encoded in the genome of an entity.
*
* \return Variable value.
*/
double
genetic_get_variable (Entity * entity, ///< Entity struct.
GeneticVariable * variable) ///< Variable data.
{
double x;
#if DEBUG_GENETIC
fprintf (stderr, "get variable: start\n");
#endif
x = variable->minimum
+ bit_get_value (entity->genome, variable->location, variable->nbits)
* (variable->maximum - variable->minimum)
/ ((unsigned long long int) 1L << variable->nbits);
#if DEBUG_GENETIC
fprintf (stderr, "get variable: value=%lg\n", x);
fprintf (stderr, "get variable: end\n");
#endif
return x;
}
/**
* Funtion to apply the evolution of a population.
*/
static inline void
genetic_evolution (Population * population, ///< Population
gsl_rng * rng) ///< GSL random numbers generator.
{
#if DEBUG_GENETIC
fprintf (stderr, "genetic_evolution: start\n");
#endif
evolution_mutation (population, rng);
evolution_reproduction (population, rng);
evolution_adaptation (population, rng);
#if DEBUG_GENETIC
fprintf (stderr, "genetic_evolution: end\n");
#endif
}
/**
* Funtion to perform the simulations on a thread.
*/
static void
genetic_simulation_thread (GeneticThreadData * data) ///< Thread data.
{
unsigned int i;
#if DEBUG_GENETIC
fprintf (stderr, "genetic_simulation_thread: start\n");
fprintf (stderr, "genetic_simulation_thread: nmin=%u nmax=%u\n",
data->nmin, data->nmax);
#endif
for (i = data->nmin; i < data->nmax && !genetic_population->stop; ++i)
{
genetic_population->objective[i]
= genetic_simulation (genetic_population->entity + i);
if (genetic_population->objective[i] < genetic_population->threshold)
{
g_mutex_lock (mutex);
genetic_population->stop = 1;
g_mutex_unlock (mutex);
break;
}
}
#if DEBUG_GENETIC
fprintf (stderr, "genetic_simulation_thread: end\n");
#endif
}
/**
* Function to perform the simulations on a task.
*/
static void
genetic_simulation_master (unsigned int nsurvival)
///< Number of survival entities already simulated.
{
unsigned int j, nsimulate, nmin, nmax;
GThread *thread[nthreads];
GeneticThreadData thread_data[nthreads];
Population *population;
#if HAVE_MPI
unsigned int i;
unsigned int n[ntasks + 1];
unsigned int stop[ntasks];
char *genome_array;
MPI_Status mpi_status;
#endif
#if DEBUG_GENETIC
fprintf (stderr, "genetic_simulation_master: start\n");
#endif
population = genetic_population;
nmax = population->nentities;
nsimulate = nmax - nsurvival;
nmin = nsurvival;
#if DEBUG_GENETIC
fprintf (stderr, "genetic_simulation_master: nmax=%u nmin=%u nsimulate=%u\n",
nmax, nmin, nsimulate);
#endif
#if HAVE_MPI
nmax = nmin + nsimulate / ntasks;
// Send genome information to the slaves
#if DEBUG_GENETIC
fprintf (stderr, "genetic_simulation_master: nmax=%u nmin=%u nsimulate=%u\n",
nmax, nmin, nsimulate);
#endif
genome_array = (char *) g_malloc (nsimulate * population->genome_nbytes);
for (j = 0; j < nsimulate; ++j)
memcpy (genome_array + j * population->genome_nbytes,
population->entity[nmin + j].genome, population->genome_nbytes);
n[0] = 0;
for (i = 0; (int) ++i < ntasks;)
{
n[i] = i * nsimulate / ntasks;
j = (i + 1) * nsimulate / ntasks - n[i];
#if DEBUG_GENETIC
fprintf (stderr,
"genetic_simulation_master: send %u bytes on %u to task %u\n",
j * population->genome_nbytes, nsurvival + n[i], i);
#endif
MPI_Send (genome_array + n[i] * population->genome_nbytes,
j * population->genome_nbytes, MPI_CHAR, i, 1, MPI_COMM_WORLD);
}
n[i] = nsimulate;
#if DEBUG_GENETIC
for (j = 0; j <= ntasks; ++j)
fprintf (stderr, "genetic_simulation_master: n[%u]=%u\n", j, n[j]);
#endif
g_free (genome_array);
nsimulate = nmax - nmin;
#endif
#if DEBUG_GENETIC
fprintf (stderr, "genetic_simulation_master: nsimulate=%u\n", nsimulate);
fprintf (stderr, "genetic_simulation_master: performing simulations\n");
#endif
thread_data[0].nmin = nmin;
for (j = 0; ++j < nthreads;)
thread_data[j - 1].nmax = thread_data[j].nmin
= nmin + j * nsimulate / nthreads;
thread_data[j - 1].nmax = nmax;
for (j = 0; j < nthreads; ++j)
thread[j] = g_thread_new
(NULL, (GThreadFunc) (void (*)(void)) genetic_simulation_thread,
thread_data + j);
for (j = 0; j < nthreads; ++j)
g_thread_join (thread[j]);
#if HAVE_MPI
// Receive objective function resuts from the slaves
for (j = 0; (int) ++j < ntasks;)
{
#if DEBUG_GENETIC
fprintf (stderr,
"genetic_simulation_master: "
"receive %u reals from task %u on %u\n",
n[j + 1] - n[j], j, nsurvival + n[j]);
#endif
MPI_Recv (population->objective + nsurvival + n[j], n[j + 1] - n[j],
MPI_DOUBLE, j, 1, MPI_COMM_WORLD, &mpi_status);
#if DEBUG_GENETIC
fprintf (stderr,
"genetic_simulation_master: receive one integer from task %u\n",
j);
#endif
MPI_Recv (stop + j, j, MPI_UNSIGNED, j, 1, MPI_COMM_WORLD, &mpi_status);
if (stop[j])
genetic_population->stop = 1;
}
// Sending stop instruction to the slaves
for (j = 0; (int) ++j < ntasks;)
{
#if DEBUG_GENETIC
fprintf (stderr,
"genetic_simulation_master: sending one integer to task %u\n",
j);
#endif
MPI_Send (&genetic_population->stop, 1, MPI_UNSIGNED, j, 1,
MPI_COMM_WORLD);
}
#endif
#if DEBUG_GENETIC
fprintf (stderr, "genetic_simulation_master: end\n");
#endif
}
#if HAVE_MPI
/**
* Function to perform the simulations on a task.
*/
static void
genetic_simulation_slave (unsigned int nsurvival,
///< Number of survival entities already simulated.
int rank) ///< Number of task.
{
unsigned int j, nsimulate, nmin, nmax, stop;
GThread *thread[nthreads];
GeneticThreadData thread_data[nthreads];
Population *population;
char *genome_array;
MPI_Status mpi_status;
#if DEBUG_GENETIC
fprintf (stderr, "genetic_simulation_slave: rank=%d start\n", rank);
#endif
population = genetic_population;
nmax = population->nentities;
nsimulate = nmax - nsurvival;
nmin = nsurvival;
nmax = nmin + (rank + 1) * nsimulate / ntasks;
nmin += rank * nsimulate / ntasks;
// Receive genome information from the master
nsimulate = nmax - nmin;
genome_array = (char *) g_malloc (nsimulate * population->genome_nbytes);
#if DEBUG_GENETIC
fprintf (stderr,
"genetic_simulation_slave: rank=%d receive %u bytes from master\n",
rank, nsimulate * population->genome_nbytes);
#endif
MPI_Recv (genome_array, nsimulate * population->genome_nbytes, MPI_CHAR, 0,
1, MPI_COMM_WORLD, &mpi_status);
#if DEBUG_GENETIC
fprintf (stderr, "genetic_simulation_slave: rank=%d nmin=%u nsimulate=%u\n",
rank, nmin, nsimulate);
#endif
for (j = 0; j < nsimulate; ++j)
memcpy (population->entity[nmin + j].genome,
genome_array + j * population->genome_nbytes,
population->genome_nbytes);
#if DEBUG_GENETIC
fprintf (stderr, "genetic_simulation_slave: rank=%d freeing\n", rank);
#endif
g_free (genome_array);
#if DEBUG_GENETIC
fprintf (stderr, "genetic_simulation_slave: rank=%d performing simulations\n",
rank);
#endif
thread_data[0].nmin = nmin;
for (j = 0; ++j < nthreads;)
thread_data[j - 1].nmax = thread_data[j].nmin
= nmin + j * nsimulate / nthreads;
thread_data[j - 1].nmax = nmax;
for (j = 0; j < nthreads; ++j)
thread[j] = g_thread_new
(NULL, (GThreadFunc) (void (*)(void)) genetic_simulation_thread,
thread_data + j);
for (j = 0; j < nthreads; ++j)
g_thread_join (thread[j]);
#if DEBUG_GENETIC
fprintf (stderr,
"genetic_simulation_slave: rank=%d send %u reals on %u to master\n",
rank, nsimulate, nmin);
#endif
// Send the objective function resuts to the master
MPI_Send (population->objective + nmin, nsimulate, MPI_DOUBLE, 0, 1,
MPI_COMM_WORLD);
#if DEBUG_GENETIC
fprintf (stderr,
"genetic_simulation_slave: rank=%d send one integer to master\n",
rank);
#endif
// Send the stop variable from the master
MPI_Send (&genetic_population->stop, 1, MPI_UNSIGNED, 0, 1, MPI_COMM_WORLD);
#if DEBUG_GENETIC
fprintf (stderr,
"genetic_simulation_slave: "
"rank=%d receive one integer from master\n", rank);
#endif
// Receive the stop variable from the master
MPI_Recv (&stop, 1, MPI_UNSIGNED, 0, 1, MPI_COMM_WORLD, &mpi_status);
if (stop)
genetic_population->stop = 1;
#if DEBUG_GENETIC
fprintf (stderr, "genetic_simulation_slave: rank=%d end\n", rank);
#endif
}
#endif
/**
* Function to create the data of the genetic algorithm.
*
* \return 1 on succes, 0 on error.
*/
int
genetic_new (unsigned int nvariables, ///< Number of variables.
GeneticVariable * variable, ///< Array of variables data.
unsigned int nentities,
///< Number of entities in each generation.
double mutation_ratio, ///< Mutation ratio.
double reproduction_ratio, ///< Reproduction ratio.
double adaptation_ratio, ///< Adaptation ratio.
double threshold) ///< Threshold to finish the simulations.
{
unsigned int i, genome_nbits, nprocesses;
#if DEBUG_GENETIC
fprintf (stderr, "genetic_new: start\n");
#endif
// Checking variables number
if (!nvariables)
{
fprintf (stderr, "ERROR: no variables\n");
return 0;
}
// Checking variable bits number
for (i = genome_nbits = 0; i < nvariables; ++i)
{
if (!variable[i].nbits || variable[i].nbits > 32)
{
fprintf (stderr, "ERROR: bad bits number in variable %u\n", i + 1);
return 0;
}
variable[i].location = genome_nbits;
genome_nbits += variable[i].nbits;
}
// Checking processes number
nprocesses = ntasks * nthreads;
if (!nprocesses)
{
fprintf (stderr, "ERROR: no processes\n");
return 0;
}
// Init the population
#if DEBUG_GENETIC
fprintf (stderr, "genetic_new: init the population\n");
#endif
if (!population_new (genetic_population, variable, nvariables, genome_nbits,
nentities, mutation_ratio, reproduction_ratio,
adaptation_ratio, threshold))
return 0;
#if DEBUG_GENETIC
fprintf (stderr, "genetic_new: end\n");
#endif
return 1;
}
/**
* Function to perform the genetic algorithm.
*
* \return 1 on succes, 0 on error.
*/
int
genetic_algorithm (unsigned int nvariables, ///< Number of variables.
GeneticVariable * variable, ///< Array of variables data.
unsigned int nentities,
///< Number of entities in each generation.
unsigned int ngenerations, ///< Number of generations.
double mutation_ratio, ///< Mutation ratio.
double reproduction_ratio, ///< Reproduction ratio.
double adaptation_ratio, ///< Adaptation ratio.
const gsl_rng_type * type_random,
///< Type of GSL random numbers generator algorithm.
unsigned long random_seed,
///< Seed of the GSL random numbers generator.
unsigned int type_reproduction,
///< Type of reproduction algorithm.
unsigned int type_selection_mutation,
///< Type of mutation selection algorithm.
unsigned int type_selection_reproduction,
///< Type of reproduction selection algorithm.
unsigned int type_selection_adaptation,
///< Type of adaptation selection algorithm.
double threshold,
///< Threshold to finish the simulations.
double (*simulate_entity) (Entity *),
///< Pointer to the function to perform a simulation of an entity.
char **best_genome, ///< Best genome.
double **best_variables, ///< Best array of variables.
double *best_objective)
///< Best objective function value.
{
unsigned int i;
double *bv;
gsl_rng *rng;
Entity *best_entity;
#if HAVE_MPI
int rank;
#endif
#if DEBUG_GENETIC
fprintf (stderr, "genetic_algorithm: start\n");
#endif
// Init the data
if (!genetic_new (nvariables, variable, nentities,
mutation_ratio, reproduction_ratio, adaptation_ratio,
threshold))
return 0;
// Init the evaluation function
genetic_simulation = simulate_entity;
// Get the rank
#if HAVE_MPI
MPI_Comm_rank (MPI_COMM_WORLD, &rank);
#endif
// Variables to init only by master task
#if HAVE_MPI
if (rank == 0)
{
#endif
// Init the GSL random numbers generator
rng = gsl_rng_alloc (type_random);
if (random_seed)
gsl_rng_set (rng, random_seed);
// Init genomes
population_init_genomes (genetic_population, rng);
// Init selection, mutation and reproduction algorithms
reproduction_init (type_reproduction);
selection_init (type_selection_mutation, type_selection_reproduction,
type_selection_adaptation);
#if HAVE_MPI
}
if (rank == 0)
{
#endif
// First simulation of all entities
#if DEBUG_GENETIC
fprintf (stderr, "genetic_algorithm: first simulation of all entities\n");
#endif
genetic_simulation_master (0);
// Sorting by objective function results
#if DEBUG_GENETIC
fprintf (stderr,
"genetic_algorithm: sorting by objective function results\n");
#endif
evolution_sort (genetic_population);
// Population generations
#if DEBUG_GENETIC
fprintf (stderr, "genetic_algorithm: ngenerations=%u\n", ngenerations);
#endif
for (i = 1; i < ngenerations && !genetic_population->stop; ++i)
{
// Evolution
#if DEBUG_GENETIC
fprintf (stderr, "genetic_algorithm: evolution of the population\n");
#endif
genetic_evolution (genetic_population, rng);
// Simulation of the new entities
#if DEBUG_GENETIC
fprintf (stderr,
"genetic_algorithm: simulation of the new entities\n");
#endif
genetic_simulation_master (genetic_population->nsurvival);
// Sorting by objective function results
#if DEBUG_GENETIC
fprintf (stderr,
"genetic_algorithm: sorting by objective function results\n");
#endif
evolution_sort (genetic_population);
}
// Saving the best
#if DEBUG_GENETIC
fprintf (stderr, "genetic_algorithm: saving the best\n");
#endif
best_entity = genetic_population->entity;
*best_objective = genetic_population->objective[0];
*best_genome = (char *) g_malloc (genetic_population->genome_nbytes);
memcpy (*best_genome, best_entity->genome,
genetic_population->genome_nbytes);
*best_variables = bv = (double *) g_malloc (nvariables * sizeof (double));
for (i = 0; i < nvariables; ++i)
bv[i] = genetic_get_variable (best_entity, variable + i);
gsl_rng_free (rng);
#if HAVE_MPI
}
else
{
// First simulation of all entities
#if DEBUG_GENETIC
fprintf (stderr, "genetic_algorithm: first simulation of all entities\n");
#endif
genetic_simulation_slave (0, rank);
// Population generations
for (i = 1; i < ngenerations && !genetic_population->stop; ++i)
{
// Simulation of the new entities
#if DEBUG_GENETIC
fprintf (stderr,
"genetic_algorithm: simulation of the new entities\n");
#endif
genetic_simulation_slave (genetic_population->nsurvival, rank);
}
}
#endif
// Freeing memory
#if DEBUG_GENETIC
fprintf (stderr, "genetic_algorithm: freeing memory\n");
#endif
population_free (genetic_population);
#if DEBUG_GENETIC
fprintf (stderr, "genetic_algorithm: rank=%d\n", rank);
fprintf (stderr, "genetic_algorithm: end\n");
#endif
return 1;
}
/**
* Function to perform the genetic algorithm with default random and evolution
* algorithms.
*
* \return 1 on succes, 0 on error.
*/
int
genetic_algorithm_default (unsigned int nvariables,
///< Number of variables.
GeneticVariable * variable,
///< Array of variables data.
unsigned int nentities,
///< Number of entities in each generation.
unsigned int ngenerations,
///< Number of generations.
double mutation_ratio, ///< Mutation ratio.
double reproduction_ratio, ///< Reproduction ratio.
double adaptation_ratio, ///< Adaptation ratio.
unsigned long random_seed,
///< Seed of the GSL random numbers generator.
double threshold,
///< Threshold to finish the simulations.
double (*simulate_entity) (Entity *),
///< Pointer to the function to perform a simulation of an entity.
char **best_genome, ///< Best genome.
double **best_variables,
///< Best array of variables.
double *best_objective)
///< Best objective function value.
{
return genetic_algorithm (nvariables,
variable,
nentities,
ngenerations,
mutation_ratio,
reproduction_ratio,
adaptation_ratio,
gsl_rng_mt19937,
random_seed,
0,
0,
0,
0,
threshold,
simulate_entity,
best_genome, best_variables, best_objective);
}
|
function SeamVector=removalMap(X,lines);
% REMOVALMAP takes a given image and finds the ordered set of (vertical)
% seams that are removed from an image and returns them in an array, where
% the Nth column in the array corresponds to the Nth seam to be removed.
%
% Author: Danny Luong
% http://danluong.com
%
% Last updated: 12/20/07
[rows cols dim]=size(X);
E=findEnergy(X); %Finds the gradient image
for i=1:min(lines,cols-1)
%find "energy map" image used for seam calculation given the gradient image
S=findSeamImg(E);
%find seam vector given input "energy map" seam calculation image
SeamVector(:,i)=findSeam(S);
%remove seam from image
X=SeamCut(X,SeamVector(:,i));
E=SeamCut(E,SeamVector(:,i));
%updates size of image
[rows cols dim]=size(X);
end
|
lemma continuous_discrete_range_constant: fixes f :: "'a::topological_space \<Rightarrow> 'b::real_normed_algebra_1" assumes S: "connected S" and "continuous_on S f" and "\<And>x. x \<in> S \<Longrightarrow> \<exists>e>0. \<forall>y. y \<in> S \<and> f y \<noteq> f x \<longrightarrow> e \<le> norm (f y - f x)" shows "f constant_on S" |
[STATEMENT]
lemma Cong_class_memb_Cong_rep:
assumes "is_Cong_class \<T>" and "t \<in> \<T>"
shows "Cong t (Cong_class_rep \<T>)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. t \<approx> Cong_class_rep \<T>
[PROOF STEP]
using assms Cong_class_membs_are_Cong rep_in_Cong_class
[PROOF STATE]
proof (prove)
using this:
is_Cong_class \<T>
t \<in> \<T>
\<lbrakk>is_Cong_class ?\<T>; ?t \<in> ?\<T>; ?t' \<in> ?\<T>\<rbrakk> \<Longrightarrow> ?t \<approx> ?t'
is_Cong_class ?\<T> \<Longrightarrow> Cong_class_rep ?\<T> \<in> ?\<T>
goal (1 subgoal):
1. t \<approx> Cong_class_rep \<T>
[PROOF STEP]
by simp |
import sys
import unicodedata
import re
from pathlib import Path
import pickle
import pkg_resources
import numpy as np
from nltk import ngrams
from abydos.phonetic import Soundex, DoubleMetaphone
from sklearn.linear_model import LogisticRegression
from sklearn.feature_extraction.text import CountVectorizer
class EthnicClassifier():
def __init__(self):
super().__init__()
self.dmp_encoder = DoubleMetaphone()
self.soundex_encoder = Soundex()
self.eth2idx = {}
self.idx2eth = {}
self.vectorizer = CountVectorizer(token_pattern='\S+', max_features=25000)
self.clf = LogisticRegression(random_state=10, max_iter=1000)
self.padding = False
self.ch_feat = True
self.ng_feat = True
self.sd_feat = True
self.mp_feat = True
def config_options(self, config: str):
opts = config.split(":")
for opt in config.split(":"):
if (opt == 'pad') or (opt == 'all'):
self.padding = True
if (opt == 'ng') or (opt == 'all'):
self.ch_feat = True
if (opt == 'ch') or (opt == 'all'):
self.ng_feat = True
if (opt == 'sd') or (opt == 'all'):
self.sd_feat = True
if (opt == 'mp') or (opt == 'all'):
self.mp_feat = True
def _extract_ngram_feats(self, units):
feats = []
n_units = len(units)
for i, u in enumerate(units):
is_lastname = (i == (n_units-1))
feats += self._token_ngram_feats(u, is_lastname)
return feats
def _extract_stat_feats(self, units):
feats = []
return feats
def _extract_metaphone_feats(self, units):
feats = []
ngram_feats = []
n_units = len(units)
for i, u in enumerate(units):
is_lastname = (i == (n_units-1))
(a,b) = self.dmp_encoder.encode(u)
feats += ["M0|" + a]
ngram_feats += self._token_ngram_feats(a, is_lastname)
if b != "":
feats += ["M0|" + b]
ngram_feats += self._token_ngram_feats(b, is_lastname)
return feats + [("M|" + f) for f in ngram_feats]
def _extract_soundex_feats(self, units):
feats = []
for u in units:
f = self.soundex_encoder.encode(u)
feats += ["S|" + f]
return feats
# retract non-ascii characters
def _extract_char_feats(self, name):
feats = []
chars = re.sub(r"[A-Z_ .-]+", "", name)
for c in chars:
feats += ["C|" + c]
return feats
def _token_ngram_feats(self, text, is_lastname):
ngram_feats = []
if is_lastname:
text = "+" + text + "+"
else:
text = "$" + text + "$"
for n in [2,3,4]:
ngram_feats += self._word2ngrams(text, n)
return ngram_feats
def _word2ngrams(self, text, n=3):
return [text[i:i+n] for i in range(len(text)-n+1)]
def _deaccent(self, text):
norm = unicodedata.normalize("NFD", text)
result = ''.join(ch for ch in norm if unicodedata.category(ch) != 'Mn')
return unicodedata.normalize("NFC", result)
def extract_feat_str(self, name: str):
upname = name.upper()
ascii_name = self._deaccent(upname)
ascii_name = re.sub(r"[^A-Z -_']", "", ascii_name)
ascii_name = re.sub(r"[-_']", " ", ascii_name)
ascii_name = re.sub(r"^\\s+", "", ascii_name)
units = ascii_name.split(" ")
feats = []
# chfeats, ngramfeats, mpfeats, sdfeats
if self.ng_feat:
ngramfeats = self._extract_ngram_feats(units)
feats += ngramfeats
if self.mp_feat:
mpfeats = self._extract_metaphone_feats(units)
feats += mpfeats
if self.sd_feat:
sdfeats = self._extract_soundex_feats(units)
feats += sdfeats
if self.ch_feat:
chfeats = self._extract_char_feats(upname)
feats += chfeats
return ' '.join(feats)
# X: list of names
# y: list of ethnicity classes
def fit(self, names: list, ethnics: list):
print('extracting features:')
X = [self.extract_feat_str(n) for n in names]
print('[DONE]')
print('vectorize features:')
self.vectorizer = CountVectorizer(token_pattern='\S+', max_features=25000)
self.vectorizer.fit(X)
X_vector = self.vectorizer.transform(X)
self.eth2idx = {k: v for v, k in enumerate(set(ethnics))}
self.idx2eth = {v:k for k, v in self.eth2idx.items()}
y_vector = [self.eth2idx[y] for y in ethnics]
print('[DONE]')
print('fitting model:')
self.clf = LogisticRegression(random_state=10, max_iter=1000)
self.clf.fit(X_vector, y_vector)
print('[DONE]')
y_hat = self.clf.predict(X_vector)
correct_count = np.sum((y_hat == y_vector))
total_count = len(y_vector)
return float(correct_count) / float(total_count)
def classify_names(self, names: list):
n_names = len(names)
ethnics = [None] * n_names
for i, n in enumerate(names):
n = n.replace(' ', '_')
f = self.extract_feat_str(n)
X_vector = self.vectorizer.transform([f])
y_hat = self.clf.predict(X_vector)[0]
ethnics[i] = self.idx2eth[y_hat]
return ethnics
def classify_names_with_scores(self, names: list):
n_names = len(names)
ethnics = [None] * n_names
scores = [None] * n_names
for i, n in enumerate(names):
f = self.extract_feat_str(n)
X_vector = self.vectorizer.transform([f])
probs = self.clf.predict_proba(X_vector)[0]
y_hat = np.argmax(probs)
ethnics[i] = self.idx2eth[y_hat]
scores[i] = probs[y_hat]
return ethnics, scores
def ethnicity_classes(self):
return sorted(self.eth2idx, key=self.eth2idx.get)
# save 3 file for the model, 'ethnicseer_vec.pk', 'ethnicseer_lr.pk', 'ethnicseer_meta.pk'
def save_model(self, filename):
meta_file = filename + '_meta.pk'
vec_file = filename + '_vec.pk'
model_file = filename + '_model.pk'
pickle.dump({
'eth2idx': self.eth2idx,
'idx2eth': self.idx2eth,
}, open(meta_file, 'wb'))
pickle.dump(self.vectorizer, open(vec_file, 'wb'))
pickle.dump(self.clf, open(model_file, 'wb'))
@classmethod
def load_pretrained_model(cls, filename=None):
if (filename is None):
filename = 'ethnicseer'
meta_file = pkg_resources.resource_filename(__name__, filename + '_meta.pk')
vec_file = pkg_resources.resource_filename(__name__, filename + '_vec.pk')
model_file = pkg_resources.resource_filename(__name__, filename + '_model.pk')
ec = EthnicClassifier()
ec.vectorizer = pickle.load(open(vec_file, 'rb'))
ec.clf = pickle.load(open(model_file, 'rb'))
meta = pickle.load(open(meta_file, 'rb'))
ec.eth2idx = meta['eth2idx']
ec.idx2eth = meta['idx2eth']
return ec
|
This statement is so true! How many colors, patterns are you wearing today! That many! Great job!!!!
LOOK UP IN THE SKY!!!!
Looking good Liz! Wow! Tattoo is great!
Thanks Elisabeta!!!! Wow! Your fangs are ultra white!
Categories: Astonish, Blogging Blogger, Bonkers Away | Tags: 5/10/2018, Astonish, Astrid, clothes, different colors, Elisabeta, Elizabeth, fangs, Hiccup, patterns, Toothless, Wow! | Permalink.
I feel kind of willy-nilly today. How about you?
Trying to decide what to do? What to do? What to do?
Let’s see what do I have to do?
Feed Milo, Milo is a Domestic Short Hair Mix but he thinks he’s true Egyptian cat related to Bast .
Guess what – I’m not willy-nilly anymore. BLOGS it is!
Categories: Blogging Blogger, Bonkers Away, Daily Post comment, Willy-Nilly | Tags: 8/27/2017, blogs, Cat, Clean, clothes, cook, dust, Milo, what to do?, Willy-Nilly | Permalink. |
// Boost.Geometry (aka GGL, Generic Geometry Library)
// Copyright (c) 2007-2012 Barend Gehrels, Amsterdam, the Netherlands.
// Copyright (c) 2008-2012 Bruno Lalande, Paris, France.
// Copyright (c) 2009-2012 Mateusz Loskot, London, UK.
// Parts of Boost.Geometry are redesigned from Geodan's Geographic Library
// (geolib/GGL), copyright (c) 1995-2010 Geodan, Amsterdam, the Netherlands.
// Use, modification and distribution is subject to the Boost Software License,
// Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#ifndef BOOST_GEOMETRY_VIEWS_DETAIL_RANGE_TYPE_HPP
#define BOOST_GEOMETRY_VIEWS_DETAIL_RANGE_TYPE_HPP
#include <boost/range/value_type.hpp>
#include <boost/geometry/core/ring_type.hpp>
#include <boost/geometry/core/static_assert.hpp>
#include <boost/geometry/core/tag.hpp>
#include <boost/geometry/core/tags.hpp>
#include <boost/geometry/views/box_view.hpp>
namespace boost { namespace geometry
{
#ifndef DOXYGEN_NO_DISPATCH
namespace dispatch
{
template <typename Geometry,
typename Tag = typename tag<Geometry>::type>
struct range_type
{
BOOST_GEOMETRY_STATIC_ASSERT_FALSE(
"Not or not yet implemented for this Geometry type.",
Geometry, Tag);
};
template <typename Geometry>
struct range_type<Geometry, ring_tag>
{
typedef Geometry type;
};
template <typename Geometry>
struct range_type<Geometry, linestring_tag>
{
typedef Geometry type;
};
template <typename Geometry>
struct range_type<Geometry, polygon_tag>
{
typedef typename ring_type<Geometry>::type type;
};
template <typename Geometry>
struct range_type<Geometry, box_tag>
{
typedef box_view<Geometry> type;
};
// multi-point acts itself as a range
template <typename Geometry>
struct range_type<Geometry, multi_point_tag>
{
typedef Geometry type;
};
template <typename Geometry>
struct range_type<Geometry, multi_linestring_tag>
{
typedef typename boost::range_value<Geometry>::type type;
};
template <typename Geometry>
struct range_type<Geometry, multi_polygon_tag>
{
// Call its single-version
typedef typename dispatch::range_type
<
typename boost::range_value<Geometry>::type
>::type type;
};
} // namespace dispatch
#endif // DOXYGEN_NO_DISPATCH
// Will probably be replaced by the more generic "view_as", therefore in detail
namespace detail
{
/*!
\brief Meta-function defining a type which is a boost-range.
\details
- For linestrings and rings, it defines the type itself.
- For polygons it defines the ring type.
- For multi-points, it defines the type itself
- For multi-polygons and multi-linestrings, it defines the single-version
(so in the end the linestring and ring-type-of-multi-polygon)
\ingroup iterators
*/
template <typename Geometry>
struct range_type
{
typedef typename dispatch::range_type
<
Geometry
>::type type;
};
}
}} // namespace boost::geometry
#endif // BOOST_GEOMETRY_VIEWS_DETAIL_RANGE_TYPE_HPP
|
Load LFindLoad.
From lfind Require Import LFind.
From QuickChick Require Import QuickChick.
From adtind Require Import goal33.
Derive Show for natural.
Derive Arbitrary for natural.
Instance Dec_Eq_natural : Dec_Eq natural.
Proof. dec_eq. Qed.
Lemma conj3synthconj6 : forall (lv0 : natural), (@eq natural (lv0) lv0).
Admitted.
QuickChick conj3synthconj6.
|
State Before: ι : Type ?u.1120
α : Type u_1
inst✝¹ : Preorder α
inst✝ : LocallyFiniteOrder α
a a₁ a₂ b b₁ b₂ c x : α
⊢ Icc a b = ∅ ↔ ¬a ≤ b State After: no goals Tactic: rw [← coe_eq_empty, coe_Icc, Set.Icc_eq_empty_iff] |
module Test03
import Automa
import Automa.RegExp: @re_str
using Test
@testset "Test03" begin
re = Automa.RegExp
header = re"[ -~]*"
newline = re"\r?\n"
sequence = re.rep(re.cat(re"[A-Za-z]*", newline))
fasta = re.rep(re.cat('>', header, newline, sequence))
machine = Automa.compile(fasta)
for generator in (:table, :inline, :goto), checkbounds in (true, false), clean in (true, false)
ctx = Automa.CodeGenContext(generator=generator, checkbounds=checkbounds, clean=clean)
init_code = Automa.generate_init_code(ctx, machine)
exec_code = Automa.generate_exec_code(ctx, machine)
validate = @eval function (data)
$(init_code)
p_end = p_eof = lastindex(data)
$(exec_code)
return cs == 0
end
@test validate(b"") == true
@test validate(b">\naa\n") == true
@test validate(b">seq1\n") == true
@test validate(b">seq1\na\n") == true
@test validate(b">seq1\nac\ngt\n") == true
@test validate(b">seq1\r\nacgt\r\n") == true
@test validate(b">seq1\nac\n>seq2\ngt\n") == true
@test validate(b"a") == false
@test validate(b">") == false
@test validate(b">seq1\na") == false
@test validate(b">seq1\nac\ngt") == false
end
end
end
|
## Copyright (c) 2018-2021, Carnegie Mellon University
## See LICENSE for details
#F FDataNT(<datavar>, <nt>)
#F
Class(FDataNT, Function, rec(
__call__ := (self, datavar, nt) >> WithBases(self, rec(
var := datavar,
operations := PrintOps,
nt := nt,
domain := self >> Rows(self.nt)
)),
rChildren := self >> [ self.var, self.nt, self._domain, self._range],
rSetChild := rSetChildFields("var", "nt", "_domain", "_range"),
from_rChildren := (self, rch) >> ObjId(self)(rch[1], rch[2]).setDomain(rch[4]).setRange(rch[5]),
domain := self >> self.len,
print := self >> Print(self.name,"(",self.var,", ",self.nt,")"),
# these are not yet right
at := (self, n) >> When(IsInt(n) and IsValue(self.ofs) and IsBound(self.var.value),
self.var.value.v[n + self.ofs.v + 1],
nth(self.var, n + self.ofs)),
tolist := self >> List([0..EvalScalar(self.len-1)], i -> nth(self.var, self.ofs+i)),
lambda := self >> let(x := Ind(self.domain()), Lambda(x, nth(self.var, self.ofs+x))),
# up to here
domain := self >> Rows(self.nt),
range := self >> When(self._range=false, self.var.t.t, self._range),
inline := true,
free := self >> self.ofs.free()
));
# These need to be implemented properly -- for now they are just markers in TDAG
Class(TColMaj, TPtr);
|
/*
* Copyright (c) 2016-2021 lymastee, All rights reserved.
* Contact: [email protected]
*
* This file is part of the gslib project.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#pragma once
#ifndef rendersys_5fadb7a4_2c63_4b3a_8029_14043d2405e6_h
#define rendersys_5fadb7a4_2c63_4b3a_8029_14043d2405e6_h
#include <ariel/config.h>
#include <ariel/type.h>
#include <gslib/std.h>
#include <ariel/image.h>
__ariel_begin__
enum shader_type
{
st_vertex_shader,
st_pixel_shader,
st_geometry_shader,
st_hull_shader,
st_domain_shader,
st_tessellation_shader,
st_compute_shader,
/* more.. */
};
enum sampler_state_filter
{
ssf_point,
ssf_linear,
ssf_anisotropic,
};
struct render_device_info
{
uint vendor_id;
};
class __gs_novtable rendersys abstract
{
public:
typedef unordered_map<string, string> configs;
typedef render_vertex_buffer vertex_buffer;
typedef render_index_buffer index_buffer;
typedef render_constant_buffer constant_buffer;
typedef render_texture1d texture1d;
typedef render_texture2d texture2d;
typedef render_texture3d texture3d;
typedef render_sampler_state sampler_state;
typedef unordered_map<void*, rendersys*> rsys_map;
config_select_type(select_render_platform, vertex_format_desc);
public:
rendersys();
virtual ~rendersys() {}
virtual bool setup(uint hwnd, const configs& cfg) = 0;
virtual void destroy() = 0;
virtual void setup_pipeline_state() = 0;
virtual render_blob* compile_shader_from_file(const gchar* file, const gchar* entry, const gchar* sm, render_include* inc) = 0;
virtual render_blob* compile_shader_from_memory(const char* src, int len, const gchar* name, const gchar* entry, const gchar* sm, render_include* inc) = 0;
virtual vertex_shader* create_vertex_shader(const void* ptr, size_t len) = 0;
virtual pixel_shader* create_pixel_shader(const void* ptr, size_t len) = 0;
virtual compute_shader* create_compute_shader(const void* ptr, size_t len) = 0;
virtual geometry_shader* create_geometry_shader(const void* ptr, size_t len) = 0;
virtual hull_shader* create_hull_shader(const void* ptr, size_t len) = 0;
virtual domain_shader* create_domain_shader(const void* ptr, size_t len) = 0;
virtual vertex_format* create_vertex_format(const void* ptr, size_t len, vertex_format_desc desc[], uint n) = 0;
virtual vertex_buffer* create_vertex_buffer(uint stride, uint count, bool read, bool write, uint usage, const void* ptr = 0) = 0;
virtual index_buffer* create_index_buffer(uint count, bool read, bool write, uint usage, const void* ptr = 0) = 0;
virtual constant_buffer* create_constant_buffer(uint stride, bool read, bool write, const void* ptr = 0) = 0;
virtual shader_resource_view* create_shader_resource_view(render_resource* res) = 0; /* texture view in GL */
virtual depth_stencil_view* create_depth_stencil_view(render_resource* res) = 0;
virtual unordered_access_view* create_unordered_access_view(render_resource* res) = 0;
virtual sampler_state* create_sampler_state(sampler_state_filter filter) = 0;
virtual texture2d* create_texture2d(const image& img, uint mips, uint usage, uint bindflags, uint cpuflags, uint miscflags) = 0;
virtual texture2d* create_texture2d(int width, int height, uint format, uint mips, uint usage, uint bindflags, uint cpuflags, uint miscflags) = 0;
virtual void load_with_mips(texture2d* tex, const image& img) = 0;
virtual void update_buffer(void* buf, int size, const void* ptr) = 0;
virtual void set_vertex_format(vertex_format* vfmt) = 0;
virtual void set_vertex_buffer(vertex_buffer* vb, uint stride, uint offset) = 0;
virtual void set_index_buffer(index_buffer* ib, uint offset) = 0;
virtual void begin_render() = 0;
virtual void end_render() = 0;
virtual void set_render_option(render_option opt, uint val) = 0;
virtual void set_vertex_shader(vertex_shader* vs) = 0;
virtual void set_pixel_shader(pixel_shader* ps) = 0;
virtual void set_geometry_shader(geometry_shader* gs) = 0;
virtual void set_viewport(const viewport& vp) = 0;
virtual void set_constant_buffer(uint slot, constant_buffer* cb, shader_type st) = 0;
virtual void set_sampler_state(uint slot, sampler_state* sstate, shader_type st) = 0;
virtual void set_shader_resource(uint slot, shader_resource_view* srv, shader_type st) = 0;
virtual void draw(uint count, uint start) = 0;
virtual void draw_indexed(uint count, uint start, int base = 0) = 0;
virtual void capture_screen(image& img, const rectf& rc, int buff_id) = 0; /* before present, buff_id = 0; after present, buff_id = 1 */
virtual void enable_alpha_blend(bool b) = 0;
virtual void enable_depth(bool b) = 0;
public:
static bool is_vsync_enabled(const configs& cfg);
static bool is_full_screen(const configs& cfg);
static bool is_MSAA_enabled(const configs& cfg);
static uint get_MSAA_sampler_count(const configs& cfg);
static void register_dev_index_service(void* dev, rendersys* rsys);
static void unregister_dev_index_service(void* dev);
static rendersys* find_by_dev(void* dev); /* we could usually find the rendersys by its device ptr */
protected:
render_device_info _device_info;
float _bkcr[4];
public:
const render_device_info& get_device_info() const { return _device_info; }
void set_background_color(const color& cr);
private:
static rsys_map _dev_indexing;
};
extern void release_vertex_buffer(render_vertex_buffer* buf);
extern void release_index_buffer(render_index_buffer* buf);
extern void release_constant_buffer(render_constant_buffer* buf);
extern void release_texture2d(render_texture2d* tex);
template<class res_class>
render_resource* convert_to_resource(res_class*);
template<class _pack>
inline uint pack_cb_size()
{
uint s = sizeof(_pack);
uint m = s % 16;
return !m ? s : s + (16 - m);
}
__ariel_end__
#endif
|
#include <iostream>
#include <string>
#include <sstream>
#include <set>
#include <epoxy/gl.h>
#include <boost/date_time/posix_time/posix_time.hpp>
#include <boost/filesystem.hpp>
#include "api.hpp"
#include "demo.h"
#include <shadertoy/utils/log.hpp>
#define TEST_NO_GLFW
#include "test.hpp"
using namespace std;
using shadertoy::gl::gl_call;
namespace fs = boost::filesystem;
namespace u = shadertoy::utils;
struct my_context : public example_ctx
{
shadertoy::gl::query fps_query;
GLuint64 last_query_value;
int last_query_count;
int frame_count;
double last_clock;
my_context()
: example_ctx(),
fps_query(GL_TIMESTAMP),
last_query_value(0),
last_query_count(0),
frame_count(0),
last_clock(0.0)
{
}
};
std::unique_ptr<my_context> ctx;
void shadertoy_resize(int width, int height)
{
// Reallocate textures
ctx->render_size = shadertoy::rsize(width, height);
ctx->context.allocate_textures(ctx->chain);
}
double now()
{
struct timespec tp;
clock_gettime(CLOCK_MONOTONIC, &tp);
return tp.tv_sec + (tp.tv_nsec / 1e9);
}
void shadertoy_render_frame()
{
// Update uniforms
// iTime and iFrame
ctx->chain.set_uniform("iFrame", ctx->frame_count);
ctx->chain.set_uniform("iTime", static_cast<float>(now() - ctx->last_clock));
// No measurement of GL_TIMESTAMP yet, add it
if (ctx->last_query_value == 0)
{
ctx->fps_query.query_counter(GL_TIMESTAMP);
}
GLint available = 0;
ctx->fps_query.get_object_iv(GL_QUERY_RESULT_AVAILABLE, &available);
if (available)
{
// The time stamp is available
GLuint64 currentTime;
ctx->fps_query.get_object_ui64v(GL_QUERY_RESULT, ¤tTime);
float timeDelta = (1e-9 * (currentTime - ctx->last_query_value)) / (double)(ctx->frame_count - ctx->last_query_count);
ctx->chain.set_uniform("iTimeDelta", timeDelta);
ctx->chain.set_uniform("iFrameRate", 1.0f / timeDelta);
ctx->last_query_value = currentTime;
ctx->last_query_count = ctx->frame_count;
ctx->fps_query.query_counter(GL_TIMESTAMP);
}
// iDate
boost::posix_time::ptime dt = boost::posix_time::microsec_clock::local_time();
ctx->chain.set_uniform("iDate", glm::vec4(dt.date().year() - 1,
dt.date().month(),
dt.date().day(),
dt.time_of_day().total_nanoseconds() / 1e9f));
// End update uniforms
// Render to texture
ctx->context.render(ctx->chain);
// Update framecount
ctx->frame_count++;
if (libshadertoy_test_exit())
exit(0);
}
int shadertoy_load(const char *shader_id, const char *shader_api_key)
{
int code = 0;
// Options
string shaderId(shader_id), shaderApiKey(shader_api_key);
// Fetch shader code
ctx->chain = shadertoy::swap_chain();
code = load_remote(ctx->context, ctx->chain, ctx->render_size, shaderId, shaderApiKey);
if (code != 0)
return code;
// Add screen_member
ctx->chain.emplace_back<shadertoy::members::screen_member>(shadertoy::make_size_ref(ctx->render_size));
try
{
// Initialize chain
ctx->context.init(ctx->chain);
u::log::shadertoy()->info("Initialized rendering swap chain");
// Reset ctx
ctx->frame_count = 0;
ctx->last_clock = now();
ctx->last_query_value = 0;
ctx->last_query_count = 0;
ctx->chain.set_uniform("iTimeDelta", 1.0f / 30.0f);
ctx->chain.set_uniform("iFrameRate", 30.0f);
}
catch (shadertoy::gl::shader_compilation_error &sce)
{
std::cerr << "Failed to compile shader: " << sce.log();
code = 2;
}
catch (shadertoy::shadertoy_error &err)
{
std::cerr << "Error: "
<< err.what();
code = 2;
}
if (code != 0)
{
ctx->chain = shadertoy::swap_chain();
}
return code;
}
int shadertoy_init(const char *api_key, const char *query, const char *sort, int width, int height)
{
shadertoy::rsize size(width, height);
curl_global_init(CURL_GLOBAL_DEFAULT);
CURL *curl = curl_easy_init();
if (!curl)
{
u::log::shadertoy()->error("Failed to initialize curl.");
curl_global_cleanup();
return 1;
}
// Build search query url
std::stringstream ss;
ss << "https://www.shadertoy.com/api/v1/shaders/query/"
<< query
<< "?sort="
<< sort
<< "&key="
<< api_key;
// Get returned json
Json::Value sr = json_get(curl, ss.str());
// Create context
ctx = std::make_unique<my_context>();
ctx->render_size = size;
// Iterate shaders
bool foundShader = false;
srand(time(NULL) * getpid());
std::set<std::string> testedShaders;
for (; testedShaders.size() < sr["Results"].size(); )
{
std::string shaderId = sr["Results"][rand() % sr["Results"].size()].asString();
if (testedShaders.count(shaderId) == 0)
{
int code = shadertoy_load(shaderId.c_str(), api_key);
if (code == 0)
{
foundShader = true;
break;
}
testedShaders.insert(shaderId);
}
}
if (!foundShader)
{
int code = shadertoy_load("XsyGRW", api_key);
if (code != 0)
{
abort();
}
}
// Free curl memory
curl_easy_cleanup(curl);
return 0;
}
void shadertoy_free()
{
ctx.reset();
}
|
/-
Copyright (c) 2020 Bhavik Mehta. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Bhavik Mehta
-/
import Mathlib.PrePort
import Mathlib.Lean3Lib.init.default
import Mathlib.category_theory.natural_isomorphism
import Mathlib.category_theory.eq_to_hom
import Mathlib.data.sigma.basic
import Mathlib.category_theory.pi.basic
import Mathlib.PostPort
universes w₁ v₁ u₁ l u₂ v₂ w₂ w₃
namespace Mathlib
/-!
# Disjoint union of categories
We define the category structure on a sigma-type (disjoint union) of categories.
-/
namespace category_theory
namespace sigma
/--
The type of morphisms of a disjoint union of categories: for `X : C i` and `Y : C j`, a morphism
`(i, X) ⟶ (j, Y)` if `i = j` is just a morphism `X ⟶ Y`, and if `i ≠ j` there are no such morphisms.
-/
inductive sigma_hom {I : Type w₁} {C : I → Type u₁} [(i : I) → category (C i)] :
(sigma fun (i : I) => C i) → (sigma fun (i : I) => C i) → Type (max w₁ v₁ u₁)
where
| mk : {i : I} → {X Y : C i} → (X ⟶ Y) → sigma_hom (sigma.mk i X) (sigma.mk i Y)
namespace sigma_hom
/-- The identity morphism on an object. -/
def id {I : Type w₁} {C : I → Type u₁} [(i : I) → category (C i)] (X : sigma fun (i : I) => C i) :
sigma_hom X X :=
sorry
protected instance inhabited {I : Type w₁} {C : I → Type u₁} [(i : I) → category (C i)]
(X : sigma fun (i : I) => C i) : Inhabited (sigma_hom X X) :=
{ default := id X }
/-- Composition of sigma homomorphisms. -/
def comp {I : Type w₁} {C : I → Type u₁} [(i : I) → category (C i)] {X : sigma fun (i : I) => C i}
{Y : sigma fun (i : I) => C i} {Z : sigma fun (i : I) => C i} :
sigma_hom X Y → sigma_hom Y Z → sigma_hom X Z :=
sorry
protected instance sigma.category_theory.category_struct {I : Type w₁} {C : I → Type u₁}
[(i : I) → category (C i)] : category_struct (sigma fun (i : I) => C i) :=
category_struct.mk id fun (X Y Z : sigma fun (i : I) => C i) (f : X ⟶ Y) (g : Y ⟶ Z) => comp f g
@[simp] theorem comp_def {I : Type w₁} {C : I → Type u₁} [(i : I) → category (C i)] (i : I)
(X : C i) (Y : C i) (Z : C i) (f : X ⟶ Y) (g : Y ⟶ Z) : comp (mk f) (mk g) = mk (f ≫ g) :=
rfl
theorem assoc {I : Type w₁} {C : I → Type u₁} [(i : I) → category (C i)]
(X : sigma fun (i : I) => C i) (Y : sigma fun (i : I) => C i) (Z : sigma fun (i : I) => C i)
(W : sigma fun (i : I) => C i) (f : X ⟶ Y) (g : Y ⟶ Z) (h : Z ⟶ W) : (f ≫ g) ≫ h = f ≫ g ≫ h :=
sorry
theorem id_comp {I : Type w₁} {C : I → Type u₁} [(i : I) → category (C i)]
(X : sigma fun (i : I) => C i) (Y : sigma fun (i : I) => C i) (f : X ⟶ Y) : 𝟙 ≫ f = f :=
sorry
theorem comp_id {I : Type w₁} {C : I → Type u₁} [(i : I) → category (C i)]
(X : sigma fun (i : I) => C i) (Y : sigma fun (i : I) => C i) (f : X ⟶ Y) : f ≫ 𝟙 = f :=
sorry
end sigma_hom
protected instance sigma {I : Type w₁} {C : I → Type u₁} [(i : I) → category (C i)] :
category (sigma fun (i : I) => C i) :=
category.mk
/-- The inclusion functor into the disjoint union of categories. -/
@[simp] theorem incl_map {I : Type w₁} {C : I → Type u₁} [(i : I) → category (C i)] (i : I)
(X : C i) (Y : C i) : ∀ (ᾰ : X ⟶ Y), functor.map (incl i) ᾰ = sigma_hom.mk ᾰ :=
fun (ᾰ : X ⟶ Y) => Eq.refl (functor.map (incl i) ᾰ)
@[simp] theorem incl_obj {I : Type w₁} {C : I → Type u₁} [(i : I) → category (C i)] {i : I}
(X : C i) : functor.obj (incl i) X = sigma.mk i X :=
rfl
protected instance incl.category_theory.full {I : Type w₁} {C : I → Type u₁}
[(i : I) → category (C i)] (i : I) : full (incl i) :=
full.mk fun (X Y : C i) (_x : functor.obj (incl i) X ⟶ functor.obj (incl i) Y) => sorry
protected instance incl.category_theory.faithful {I : Type w₁} {C : I → Type u₁}
[(i : I) → category (C i)] (i : I) : faithful (incl i) :=
faithful.mk
/--
To build a natural transformation over the sigma category, it suffices to specify it restricted to
each subcategory.
-/
def nat_trans {I : Type w₁} {C : I → Type u₁} [(i : I) → category (C i)] {D : Type u₂} [category D]
{F : (sigma fun (i : I) => C i) ⥤ D} {G : (sigma fun (i : I) => C i) ⥤ D}
(h : (i : I) → incl i ⋙ F ⟶ incl i ⋙ G) : F ⟶ G :=
nat_trans.mk fun (_x : sigma fun (i : I) => C i) => sorry
@[simp] theorem nat_trans_app {I : Type w₁} {C : I → Type u₁} [(i : I) → category (C i)]
{D : Type u₂} [category D] {F : (sigma fun (i : I) => C i) ⥤ D}
{G : (sigma fun (i : I) => C i) ⥤ D} (h : (i : I) → incl i ⋙ F ⟶ incl i ⋙ G) (i : I) (X : C i) :
nat_trans.app (nat_trans h) (sigma.mk i X) = nat_trans.app (h i) X :=
rfl
/-- (Implementation). An auxiliary definition to build the functor `desc`. -/
def desc_map {I : Type w₁} {C : I → Type u₁} [(i : I) → category (C i)] {D : Type u₂} [category D]
(F : (i : I) → C i ⥤ D) (X : sigma fun (i : I) => C i) (Y : sigma fun (i : I) => C i) :
(X ⟶ Y) →
(functor.obj (F (sigma.fst X)) (sigma.snd X) ⟶
functor.obj (F (sigma.fst Y)) (sigma.snd Y)) :=
sorry
/--
Given a collection of functors `F i : C i ⥤ D`, we can produce a functor `(Σ i, C i) ⥤ D`.
The produced functor `desc F` satisfies: `incl i ⋙ desc F ≅ F i`, i.e. restricted to just the
subcategory `C i`, `desc F` agrees with `F i`, and it is unique (up to natural isomorphism) with
this property.
This witnesses that the sigma-type is the coproduct in Cat.
-/
@[simp] theorem desc_obj {I : Type w₁} {C : I → Type u₁} [(i : I) → category (C i)] {D : Type u₂}
[category D] (F : (i : I) → C i ⥤ D) (X : sigma fun (i : I) => C i) :
functor.obj (desc F) X = functor.obj (F (sigma.fst X)) (sigma.snd X) :=
Eq.refl (functor.obj (desc F) X)
@[simp] theorem desc_map_mk {I : Type w₁} {C : I → Type u₁} [(i : I) → category (C i)] {D : Type u₂}
[category D] (F : (i : I) → C i ⥤ D) {i : I} (X : C i) (Y : C i) (f : X ⟶ Y) :
functor.map (desc F) (sigma_hom.mk f) = functor.map (F i) f :=
rfl
/--
This shows that when `desc F` is restricted to just the subcategory `C i`, `desc F` agrees with
`F i`.
-/
-- We hand-generate the simp lemmas about this since they come out cleaner.
def incl_desc {I : Type w₁} {C : I → Type u₁} [(i : I) → category (C i)] {D : Type u₂} [category D]
(F : (i : I) → C i ⥤ D) (i : I) : incl i ⋙ desc F ≅ F i :=
nat_iso.of_components (fun (X : C i) => iso.refl (functor.obj (incl i ⋙ desc F) X)) sorry
@[simp] theorem incl_desc_hom_app {I : Type w₁} {C : I → Type u₁} [(i : I) → category (C i)]
{D : Type u₂} [category D] (F : (i : I) → C i ⥤ D) (i : I) (X : C i) :
nat_trans.app (iso.hom (incl_desc F i)) X = 𝟙 :=
rfl
@[simp] theorem incl_desc_inv_app {I : Type w₁} {C : I → Type u₁} [(i : I) → category (C i)]
{D : Type u₂} [category D] (F : (i : I) → C i ⥤ D) (i : I) (X : C i) :
nat_trans.app (iso.inv (incl_desc F i)) X = 𝟙 :=
rfl
/--
If `q` when restricted to each subcategory `C i` agrees with `F i`, then `q` is isomorphic to
`desc F`.
-/
def desc_uniq {I : Type w₁} {C : I → Type u₁} [(i : I) → category (C i)] {D : Type u₂} [category D]
(F : (i : I) → C i ⥤ D) (q : (sigma fun (i : I) => C i) ⥤ D) (h : (i : I) → incl i ⋙ q ≅ F i) :
q ≅ desc F :=
nat_iso.of_components (fun (_x : sigma fun (i : I) => C i) => sorry) sorry
@[simp] theorem desc_uniq_hom_app {I : Type w₁} {C : I → Type u₁} [(i : I) → category (C i)]
{D : Type u₂} [category D] (F : (i : I) → C i ⥤ D) (q : (sigma fun (i : I) => C i) ⥤ D)
(h : (i : I) → incl i ⋙ q ≅ F i) (i : I) (X : C i) :
nat_trans.app (iso.hom (desc_uniq F q h)) (sigma.mk i X) = nat_trans.app (iso.hom (h i)) X :=
rfl
@[simp] theorem desc_uniq_inv_app {I : Type w₁} {C : I → Type u₁} [(i : I) → category (C i)]
{D : Type u₂} [category D] (F : (i : I) → C i ⥤ D) (q : (sigma fun (i : I) => C i) ⥤ D)
(h : (i : I) → incl i ⋙ q ≅ F i) (i : I) (X : C i) :
nat_trans.app (iso.inv (desc_uniq F q h)) (sigma.mk i X) = nat_trans.app (iso.inv (h i)) X :=
rfl
/--
If `q₁` and `q₂` when restricted to each subcategory `C i` agree, then `q₁` and `q₂` are isomorphic.
-/
@[simp] theorem nat_iso_inv {I : Type w₁} {C : I → Type u₁} [(i : I) → category (C i)] {D : Type u₂}
[category D] {q₁ : (sigma fun (i : I) => C i) ⥤ D} {q₂ : (sigma fun (i : I) => C i) ⥤ D}
(h : (i : I) → incl i ⋙ q₁ ≅ incl i ⋙ q₂) :
iso.inv (nat_iso h) = nat_trans fun (i : I) => iso.inv (h i) :=
Eq.refl (iso.inv (nat_iso h))
/-- A function `J → I` induces a functor `Σ j, C (g j) ⥤ Σ i, C i`. -/
def map {I : Type w₁} (C : I → Type u₁) [(i : I) → category (C i)] {J : Type w₂} (g : J → I) :
(sigma fun (j : J) => C (g j)) ⥤ sigma fun (i : I) => C i :=
desc fun (j : J) => incl (g j)
@[simp] theorem map_obj {I : Type w₁} (C : I → Type u₁) [(i : I) → category (C i)] {J : Type w₂}
(g : J → I) (j : J) (X : C (g j)) : functor.obj (map C g) (sigma.mk j X) = sigma.mk (g j) X :=
rfl
@[simp] theorem map_map {I : Type w₁} (C : I → Type u₁) [(i : I) → category (C i)] {J : Type w₂}
(g : J → I) {j : J} {X : C (g j)} {Y : C (g j)} (f : X ⟶ Y) :
functor.map (map C g) (sigma_hom.mk f) = sigma_hom.mk f :=
rfl
/--
The functor `sigma.map C g` restricted to the subcategory `C j` acts as the inclusion of `g j`.
-/
@[simp] theorem incl_comp_map_hom_app {I : Type w₁} (C : I → Type u₁) [(i : I) → category (C i)]
{J : Type w₂} (g : J → I) (j : J) (X : C (g j)) :
nat_trans.app (iso.hom (incl_comp_map C g j)) X = 𝟙 :=
Eq.refl 𝟙
/-- The functor `sigma.map` applied to the identity function is just the identity functor. -/
@[simp] theorem map_id_hom_app (I : Type w₁) (C : I → Type u₁) [(i : I) → category (C i)]
(_x : sigma fun (i : I) => (fun (i : I) => (fun (i : I) => C (id i)) i) i) :
nat_trans.app (iso.hom (map_id I C)) _x =
nat_trans._match_1
(fun (i : I) =>
iso.hom
(nat_iso.of_components (fun (X : C i) => iso.refl (sigma.mk i X))
(map_id._proof_1 I C i)))
_x :=
sorry
/-- The functor `sigma.map` applied to a composition is a composition of functors. -/
@[simp] theorem map_comp_hom_app {I : Type w₁} (C : I → Type u₁) [(i : I) → category (C i)]
{J : Type w₂} {K : Type w₃} (f : K → J) (g : J → I)
(X : sigma fun (i : K) => (fun (j : K) => function.comp C g (f j)) i) :
nat_trans.app (iso.hom (map_comp C f g)) X =
iso.hom
(desc_uniq._match_1 (fun (j : K) => incl (g (f j))) (map (C ∘ g) f ⋙ map C g)
(fun (k : K) =>
iso_whisker_right (incl_comp_map (C ∘ g) f k) (map C g) ≪≫ incl_comp_map C g (f k))
X) :=
sorry
namespace functor
/--
Assemble an `I`-indexed family of functors into a functor between the sigma types.
-/
def sigma {I : Type w₁} {C : I → Type u₁} [(i : I) → category (C i)] {D : I → Type u₁}
[(i : I) → category (D i)] (F : (i : I) → C i ⥤ D i) :
(sigma fun (i : I) => C i) ⥤ sigma fun (i : I) => D i :=
desc fun (i : I) => F i ⋙ incl i
end functor
namespace nat_trans
/--
Assemble an `I`-indexed family of natural transformations into a single natural transformation.
-/
def sigma {I : Type w₁} {C : I → Type u₁} [(i : I) → category (C i)] {D : I → Type u₁}
[(i : I) → category (D i)] {F : (i : I) → C i ⥤ D i} {G : (i : I) → C i ⥤ D i}
(α : (i : I) → F i ⟶ G i) : functor.sigma F ⟶ functor.sigma G :=
nat_trans.mk
fun (f : sigma fun (i : I) => C i) =>
sigma_hom.mk (nat_trans.app (α (sigma.fst f)) (sigma.snd f))
end Mathlib |
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 25 14:06:09 2020
@author: u6265553
"""
'''This script generates the inverse transformation function required
for moving lens shift-scale calibration to align the synthetic images
generated by Blender to a reference image to obtain fully in-focus image
applying image stacking. This script calculates the shift and scaling
required to align the images and generate required transformaton functions.'''
import glob
import numpy as np
import cv2
from skimage import restoration
from scipy.signal import convolve2d as conv2
from scipy import misc, optimize, special
from matplotlib import pylab as plt
pixelsize=4e-6
#focal length of the lens in meter
f_lens=65e-3
#focal length of the lens in pixel
f_lens_pixel=f_lens/pixelsize
forward_translation=-0.021
backward_translation=0.019
average_camera_distance=0.15
max_camera_distnace=average_camera_distance+backward_translation
f_camera=average_camera_distance*f_lens/(average_camera_distance-f_lens)
#focal length of the camera in pixel
f_camera_pixel=f_camera/pixelsize
num_of_img=64
#linear displacement of the camera
del_d=(backward_translation-forward_translation)/num_of_img #in meter
del_d_pixel=del_d/pixelsize #in pixel
d_pixel=max_camera_distnace/pixelsize
d_list=np.zeros([num_of_img])
for i in range(num_of_img):
d_list[i]=d_pixel-i*del_d_pixel
d_ref_pixel=d_list[63]
scaling=np.zeros([num_of_img])
shift_x=np.zeros([num_of_img])
shift_y=np.zeros([num_of_img])
H=np.zeros([num_of_img,3,3])
for i in range(num_of_img):
#depth of the target calibration target
d_target_pixel=d_list[i]
scaling[i]=d_ref_pixel/d_target_pixel
shift_x[i]=(1-scaling[i])*(1080/2)
shift_y[i]=(1-scaling[i])*(720/2)
h=np.array([[scaling[i],0,shift_x[i]],[0,scaling[i],shift_y[i]],[0,0,1]])
H[i]=(np.linalg.inv(h))
H_file = 'F:/Arif/moving_lens/blender_images/Hs.npz'
homographies = []
np.savez(H_file, homographies=H)
|
> module SequentialDecisionProblems.applications.Main
> import Data.Vect
> -- import Data.Fin
> -- import Data.List
> -- import Data.List.Quantifiers
> import Data.So
> import Effects
> import Effect.Exception
> import Effect.StdIO
> import SequentialDecisionProblems.CoreTheory
> import SequentialDecisionProblems.FullTheory
> import SequentialDecisionProblems.TabBackwardsInduction
> import SequentialDecisionProblems.Utils
> import SequentialDecisionProblems.NonDeterministicDefaults
> import SequentialDecisionProblems.CoreTheoryOptDefaults
> import SequentialDecisionProblems.FullTheoryOptDefaults
> import SequentialDecisionProblems.TabBackwardsInductionOptDefaults
> import BoundedNat.BoundedNat
> -- import FastSimpleProb.SimpleProb
> -- import FastSimpleProb.BasicOperations
> -- import FastSimpleProb.BasicProperties
> -- import FastSimpleProb.MonadicOperations
> -- import FastSimpleProb.MonadicProperties
> -- import FastSimpleProb.Measures
> -- import FastSimpleProb.MeasuresProperties
> -- import FastSimpleProb.Operations
> -- import Sigma.Sigma
> import Double.Predicates
> -- import Double.Postulates
> -- import Double.Operations
> -- import Double.Properties
> import NonNegDouble.NonNegDouble
> -- import NonNegDouble.Constants
> import NonNegDouble.BasicOperations
> import NonNegDouble.Operations
> import NonNegDouble.Properties
> import NonNegDouble.Predicates
> import NonNegDouble.LTEProperties
> -- import Finite.Predicates
> -- import Finite.Operations
> -- import Finite.Properties
> -- import Decidable.Predicates
> -- import Decidable.Properties
> -- import LocalEffect.Exception
> -- import LocalEffect.StdIO
> -- import Fin.Operations
> -- import List.Operations
> -- import Unit.Properties
> -- %default total
> %auto_implicits off
> -- %logging 5
* States
> data Tag = V | X | O
> Board : Type
> Board = Vect 9 Tag
> validBoard : Nat -> Board -> Bool
> data ValidBoard : Nat -> Type where
> MkValidBoard : (t : Nat) -> (b : Board) -> validBoard t b = True -> ValidBoard t
> SequentialDecisionProblems.CoreTheory.State = ValidBoard
* Controls
> Move : Type
> Move = Maybe (LTB 9) -- Nothing is for the case the game is over before 5 rounds
> validMove : (t : Nat) -> ValidBoard t -> Move -> Bool
> data ValidMove : (t : Nat) -> ValidBoard t -> Type where
> MkValidMove : (t : Nat) -> (b : ValidBoard t) -> (m : Move) -> validMove t b m = True -> ValidMove t b
> SequentialDecisionProblems.CoreTheory.Ctrl = ValidMove
* Transition function
> possible : (t : Nat) -> (b : ValidBoard t) -> (m : ValidMove t b) -> List (ValidBoard (S t))
> SequentialDecisionProblems.CoreTheory.nexts t x y = possible t x y
* |Val| and |LTE|:
> SequentialDecisionProblems.CoreTheory.Val =
> NonNegDouble.NonNegDouble
> SequentialDecisionProblems.CoreTheory.plus =
> NonNegDouble.Operations.plus
> SequentialDecisionProblems.CoreTheory.zero =
> fromInteger 0
> SequentialDecisionProblems.CoreTheory.LTE =
> NonNegDouble.Predicates.LTE
> SequentialDecisionProblems.FullTheory.reflexiveLTE =
> NonNegDouble.LTEProperties.reflexiveLTE
> SequentialDecisionProblems.FullTheory.transitiveLTE =
> NonNegDouble.LTEProperties.transitiveLTE
> SequentialDecisionProblems.FullTheory.monotonePlusLTE =
> NonNegDouble.LTEProperties.monotonePlusLTE
> SequentialDecisionProblems.CoreTheoryOptDefaults.totalPreorderLTE =
> NonNegDouble.LTEProperties.totalPreorderLTE
* Reward function
> won : (t : Nat) -> (b : ValidBoard t) -> Bool
> lost : (t : Nat) -> (b : ValidBoard t) -> Bool
> SequentialDecisionProblems.CoreTheory.reward t x y x' =
> if won (S t) x'
> then cast 2.0
> else if lost (S t) x'
> then cast 0.0
> else cast 1.0
> {-
* Completing the problem specification
To be able to apply the verified, generic backwards induction algorithm
of |CoreTheory| to compute optimal policies for our problem, we have to
explain how the decision maker accounts for uncertainties on rewards
induced by uncertainties in the transition function. We first assume
that the decision maker measures uncertain rewards by their expected
value:
> SequentialDecisionProblems.CoreTheory.meas = expectedValue -- worst -- expectedValue
> SequentialDecisionProblems.FullTheory.measMon = monotoneExpectedValue -- monotoneWorst -- monotoneExpectedValue
Further on, we have to implement the notions of viability and
reachability. We start by positing that all states are viable for any
number of steps:
> -- Viable : (n : Nat) -> State t -> Type
> SequentialDecisionProblems.CoreTheory.Viable n x = Unit
From this definition, it trivially follows that all elements of an
arbitrary list of states are viable for an arbitrary number of steps:
> viableLemma : {t, n : Nat} -> (xs : List (State t)) -> All (Viable n) xs
> viableLemma Nil = Nil
> viableLemma {t} {n} (x :: xs) = () :: (viableLemma {t} {n} xs)
This fact and the (less trivial) result that simple probability
distributions are never empty, see |nonEmptyLemma| in
|MonadicProperties| in |SimpleProb|, allows us to show that the above
definition of |Viable| fulfills |viableSpec1|:
> -- viableSpec1 : (x : State t) -> Viable (S n) x -> GoodCtrl t x
> SequentialDecisionProblems.CoreTheory.viableSpec1 {t} {n} s v =
> MkSigma High (ne, av) where
> ne : SequentialDecisionProblems.CoreTheory.NotEmpty (nexts t s High)
> ne = nonEmptyLemma (nexts t s High)
> av : SequentialDecisionProblems.CoreTheory.All (Viable {t = S t} n) (nexts t s High)
> av = viableLemma {t = S t} (support (nexts t s High))
> SequentialDecisionProblems.Utils.finiteViable n x = finiteUnit
> SequentialDecisionProblems.Utils.decidableViable n x = decidableUnit
For reachability, we proceed in a similar way. We say that all states
are reachable
> -- Reachable : State t' -> Type
> SequentialDecisionProblems.CoreTheory.Reachable x' = Unit
which immediately implies |reachableSpec1|:
> -- reachableSpec1 : (x : State t) -> Reachable {t' = t} x -> (y : Ctrl t x) -> All (Reachable {t' = S t}) (nexts t x y)
> SequentialDecisionProblems.CoreTheory.reachableSpec1 {t} x r y = all (nexts t x y) where
> all : (sp : SimpleProb (State (S t))) -> SequentialDecisionProblems.CoreTheory.All (Reachable {t' = S t}) sp
> all sp = all' (support sp) where
> all' : (xs : List (State (S t))) -> Data.List.Quantifiers.All (Reachable {t' = S t}) xs
> all' Nil = Nil
> all' (x :: xs) = () :: (all' xs)
and decidability of |Reachable|:
> SequentialDecisionProblems.TabBackwardsInduction.decidableReachable x = decidableUnit
Finally, we have to show that controls are finite
> -- finiteCtrl : {t : Nat} -> (x : State t) -> Finite (Ctrl t x)
> SequentialDecisionProblems.Utils.finiteCtrl _ = finiteLowHigh
and, in order to use the fast, tail-recursive tabulated version of
backwards induction, that states are finite:
> SequentialDecisionProblems.TabBackwardsInduction.finiteState t =
> finiteTuple4 finiteFin finiteLowHigh finiteAvailableUnavailable finiteGoodBad
* Optimal policies, optimal decisions, ...
We can now apply the results of our |CoreTheory| and of the |FullTheory|
to compute verified optimal policies, possible state-control sequences,
etc. To this end, we need to be able to show the outcome of the decision
process. This means implemeting functions to print states and controls:
> -- showState : {t : Nat} -> State t -> String
> SequentialDecisionProblems.Utils.showState {t} (e, High, Unavailable, Good) =
> "(" ++ show (finToNat e) ++ ",H,U,G)"
> SequentialDecisionProblems.Utils.showState {t} (e, High, Unavailable, Bad) =
> "(" ++ show (finToNat e) ++ ",H,U,B)"
> SequentialDecisionProblems.Utils.showState {t} (e, High, Available, Good) =
> "(" ++ show (finToNat e) ++ ",H,A,G)"
> SequentialDecisionProblems.Utils.showState {t} (e, High, Available, Bad) =
> "(" ++ show (finToNat e) ++ ",H,A,B)"
> SequentialDecisionProblems.Utils.showState {t} (e, Low, Unavailable, Good) =
> "(" ++ show (finToNat e) ++ ",L,U,G)"
> SequentialDecisionProblems.Utils.showState {t} (e, Low, Unavailable, Bad) =
> "(" ++ show (finToNat e) ++ ",L,U,B)"
> SequentialDecisionProblems.Utils.showState {t} (e, Low, Available, Good) =
> "(" ++ show (finToNat e) ++ ",L,A,G)"
> SequentialDecisionProblems.Utils.showState {t} (e, Low, Available, Bad) =
> "(" ++ show (finToNat e) ++ ",L,A,B)"
> -- showControl : {t : Nat} -> {x : State t} -> Ctrl t x -> String
> SequentialDecisionProblems.Utils.showCtrl {t} {x} Low = "L"
> SequentialDecisionProblems.Utils.showCtrl {t} {x} High = "H"
> -- ad-hoc trajectories computation
> adHocPossibleStateCtrlSeqs : {t, n : Nat} ->
> (ps : PolicySeq t n) ->
> (x : State t) ->
> SimpleProb (StateCtrlSeq t n)
> adHocPossibleStateCtrlSeqs {t} {n = Z} Nil x =
> FastSimpleProb.MonadicOperations.ret (Nil x)
> adHocPossibleStateCtrlSeqs {t} {n = S m} (p :: ps') x =
> {-
> FastSimpleProb.MonadicOperations.fmap ((MkSigma x y) ::) (FastSimpleProb.MonadicOperations.naivebind mx' f) where
> y : Ctrl t x
> y = ctrl (p x () ())
> mx' : SimpleProb (State (S t))
> mx' = nexts t x y
> f : State (S t) -> M (StateCtrlSeq (S t) m)
> f = adHocPossibleStateCtrlSeqs {n = m} ps'
> ---}
> --{-
> let y = ctrl (p x () ()) in
> let mx' = nexts t x y in
> let f = adHocPossibleStateCtrlSeqs {n = m} ps' in
> FastSimpleProb.MonadicOperations.fmap ((MkSigma x y) ::) (FastSimpleProb.MonadicOperations.naivebind mx' f)
> ---}
> constHigh : (t : Nat) -> (n : Nat) -> PolicySeq t n
> constHigh t Z = Nil
> constHigh t (S n) = p :: (constHigh (S t) n) where
> p : Policy t (S n)
> p x r v = MkSigma High (ne, av) where
> ne : SequentialDecisionProblems.CoreTheory.NotEmpty (nexts t x High)
> ne = nonEmptyLemma (nexts t x High)
> av : SequentialDecisionProblems.CoreTheory.All (Viable {t = S t} n) (nexts t x High)
> av = viableLemma {t = S t} (support (nexts t x High))
> ||| Constant low policy sequences
> constLow : (t : Nat) -> (n : Nat) -> PolicySeq t n
> constLow t Z = Nil
> constLow t (S n) = p :: (constLow (S t) n) where
> p : Policy t (S n)
> p x r v = MkSigma Low (ne, av) where
> ne : SequentialDecisionProblems.CoreTheory.NotEmpty (nexts t x Low)
> ne = nonEmptyLemma (nexts t x Low)
> av : SequentialDecisionProblems.CoreTheory.All (Viable {t = S t} n) (nexts t x Low)
> av = viableLemma {t = S t} (support (nexts t x Low))
> computation : { [STDIO] } Eff ()
> computation =
> do putStr ("enter number of steps:\n")
> nSteps <- getNat
> putStrLn "nSteps (number of decision steps):"
> putStrLn (" " ++ show nSteps)
>
> putStrLn "crE (crit. cumulated emissions threshold):"
> putStrLn (" " ++ show crE)
> putStrLn "crN (crit. number of decision steps):"
> putStrLn (" " ++ show crN)
>
> putStrLn "pS1 (prob. of staying in a good world, cumulated emissions below crE):"
> putStrLn (" " ++ show pS1)
> putStrLn "pS2 (prob. of staying in a good world, cumulated emissions above crE):"
> putStrLn (" " ++ show pS2)
>
> putStrLn "pA1 (prob. of eff. tech. becoming available, number of steps below crN):"
> putStrLn (" " ++ show pA1)
> putStrLn "pA2 (prob. of eff. tech. becoming available, number of steps above crN):"
> putStrLn (" " ++ show pA2)
>
> putStrLn "pLL (prob. of low emission policies, emissions low, low selected):"
> putStrLn (" " ++ show pLL)
> putStrLn "pLH (prob. of low emission policies, emissions high, low selected):"
> putStrLn (" " ++ show pLH)
> putStrLn "pHL (prob. of high emission policies, emissions low, high selected):"
> putStrLn (" " ++ show pHL)
> putStrLn "pHH (prob. of high emission policies, emissions high, high selected):"
> putStrLn (" " ++ show pHH)
>
> putStrLn "badOverGood (step benefits ratio: bad over good world):"
> putStrLn (" " ++ show badOverGood)
> putStrLn "lowOverGoodUnavailable (benefits ratio: low emissions over step, good world, eff. tech. unavailable):"
> putStrLn (" " ++ show lowOverGoodUnavailable)
> putStrLn "lowOverGoodAvailable (benefits ratio: low emissions over step, good world, eff. tech. available):"
> putStrLn (" " ++ show lowOverGoodAvailable)
> putStrLn "highOverGood (benefits ratio: High emissions over step, good world):"
> putStrLn (" " ++ show highOverGood)
>
> putStrLn "computing constHigh policies ..."
> constHigh_ps <- pure (constHigh Z nSteps)
>
> putStrLn "computing constHigh state-control sequences ..."
> constHigh_mxys <- pure (adHocPossibleStateCtrlSeqs constHigh_ps (FZ, High, Unavailable, Good))
> putStrLn "pairing constHigh state-control sequences with their values ..."
> constHigh_mxysv <- pure (possibleStateCtrlSeqsRewards' constHigh_mxys)
> -- putStrLn "constHigh state-control sequences and their values:"
> -- putStrLn (showlong constHigh_mxysv)
>
> putStrLn "computing (naively) the number of constHigh state-control sequences ..."
> constHigh_n <- pure (length (toList constHigh_mxysv))
> putStrLn "number of constHigh state-control sequences:"
> putStrLn (" " ++ show constHigh_n)
>
> putStrLn "computing (naively) the most probable constHigh state-control sequence ..."
> constHigh_xysv <- pure (naiveMostProbableProb constHigh_mxysv)
> putStrLn "most probable constHigh state-control sequence and its probability:"
> putStrLn (" " ++ show constHigh_xysv)
>
> putStrLn "sorting (naively) the constHigh state-control sequence ..."
> constHigh_xysvs <- pure (naiveSortToList constHigh_mxysv)
> putStrLn "most probable constHigh state-control sequences (first 3) and their probabilities:"
> putStrLn (showlong (take 3 constHigh_xysvs))
>
> putStrLn "measure of constHigh rewards:"
> putStrLn (" " ++ show (meas (SequentialDecisionProblems.CoreTheory.fmap snd constHigh_mxysv)))
>
> putStrLn "computing constLow policies ..."
> constLow_ps <- pure (constLow Z nSteps)
>
> putStrLn "computing constLow state-control sequences ..."
> constLow_mxys <- pure (adHocPossibleStateCtrlSeqs constLow_ps (FZ, High, Unavailable, Good))
> putStrLn "pairing constLow state-control sequences with their values ..."
> constLow_mxysv <- pure (possibleStateCtrlSeqsRewards' constLow_mxys)
>
> putStrLn "computing (naively) the number of constLow state-control sequences ..."
> constLow_n <- pure (length (toList constLow_mxysv))
> putStrLn "number of constLow state-control sequences:"
> putStrLn (" " ++ show constLow_n)
>
> putStrLn "computing (naively) the most probable constLow state-control sequence ..."
> constLow_xysv <- pure (naiveMostProbableProb constLow_mxysv)
> putStrLn "most probable constLow state-control sequence and its probability:"
> putStrLn (" " ++ show constLow_xysv)
>
> putStrLn "sorting (naively) the constLow state-control sequence ..."
> constLow_xysvs <- pure (naiveSortToList constLow_mxysv)
> putStrLn "most probable constLow state-control sequences (first 3) and their probabilities:"
> putStrLn (showlong (take 3 constLow_xysvs))
>
> putStrLn "measure of constLow rewards:"
> putStrLn (" " ++ show (meas (SequentialDecisionProblems.CoreTheory.fmap snd constLow_mxysv)))
>
> putStrLn "computing optimal policies ..."
> ps <- pure (tabTailRecursiveBackwardsInduction Z nSteps)
>
> putStrLn "computing possible state-control sequences ..."
> mxys <- pure (adHocPossibleStateCtrlSeqs ps (FZ, High, Unavailable, Good))
> putStrLn "pairing possible state-control sequences with their values ..."
> mxysv <- pure (possibleStateCtrlSeqsRewards' mxys)
> -- putStrLn "possible state-control sequences and their values:"
> -- putStrLn (showlong mxysv)
>
> putStrLn "computing (naively) the number of possible state-control sequences ..."
> n <- pure (length (toList mxysv))
> putStrLn "number of possible state-control sequences:"
> putStrLn (" " ++ show n)
>
> putStrLn "computing (naively) the most probable state-control sequence ..."
> xysv <- pure (naiveMostProbableProb mxysv)
> putStrLn "most probable state-control sequence and its probability:"
> putStrLn (" " ++ show xysv)
>
> putStrLn "sorting (naively) the possible state-control sequence ..."
> xysvs <- pure (naiveSortToList mxysv)
> putStrLn "most probable state-control sequences (first 3) and their probabilities:"
> putStrLn (showlong (take 3 xysvs))
>
> putStrLn "measure of possible rewards:"
> putStrLn (" " ++ show (meas (SequentialDecisionProblems.CoreTheory.fmap snd mxysv)))
> putStrLn "done!"
> main : IO ()
> main = run computation
> ---}
-- Local Variables:
-- idris-packages: ("effects")
-- End:
|
\documentclass[12pt]{article}
\usepackage[utf8]{inputenc}
\usepackage{float}
\usepackage{amsmath}
\usepackage[hmargin=3cm,vmargin=6.0cm]{geometry}
%\topmargin=0cm
\topmargin=-2cm
\addtolength{\textheight}{6.5cm}
\addtolength{\textwidth}{2.0cm}
%\setlength{\leftmargin}{-5cm}
\setlength{\oddsidemargin}{0.0cm}
\setlength{\evensidemargin}{0.0cm}
%misc libraries goes here
%\usepackage{fitch}
\begin{document}
\section*{Student Information }
%Write your full name and id number between the colon and newline
%Put one empty space character after colon and before newline
Full Name : Yavuz Selim YEŞİLYURT \\
Id Number : 2259166 \\
% Write your answers below the section tags
\section*{Answer 1}
\hspace{5mm} 1)\\
\begin{table}[H]
\small
\centering
\begin{tabular}{|c|c|c|c|c|c|c|}
\hline
$p$ & $q$ & $\neg q$ & $p\rightarrow q$ & $\neg q\wedge(p \rightarrow q)$ & $\neg p$ & $ (\neg q\wedge(p \rightarrow q)) \rightarrow \neg p$\\
\hline
T & T & F & T & F & F & T\\
T & F & T & F & F & F & T\\
F & T & F & T & F & T & T\\
F & F & T & T & T & T & T\\
\hline
\end{tabular}
\end{table}
2)\\
\begin{table}[H]
\small
\centering
\begin{tabular}{|c|c|c|c|c|c|c|c|c|}
\hline
$p$ & $q$ & $r$ & $p \vee q$ & $\neg p$ & $\neg p \vee r$ & $(p \vee q) \wedge (\neg p \vee r) $ & $q \vee r$ & $((p \vee q) \wedge (\neg p \vee r)) \rightarrow q \vee r$\\
\hline
T & T & T & T & F & T & T & T & T\\
T & T & F & T & F & F & F & T & T\\
T & F & F & T & F & F & F & F & T\\
F & T & T & T & T & T & T & T & T\\
F & T & F & T & T & T & T & T & T\\
F & F & T & F & T & T & F & T & T\\
T & F & T & T & F & T & T & T & T\\
F & F & F & F & T & T & F & F &T\\
\hline
\end{tabular}
\end{table}
\section*{Answer 2}
\begin{equation*}
\begin{split}
(p \rightarrow q) \vee (p \rightarrow r) &\equiv (\neg p \vee q) \vee (p \rightarrow r)\ \qquad \quad \ table\ 7, Equivalence \ 1\\
&\equiv (\neg p \vee q) \vee (\neg p \vee r)\ \quad \quad \quad table\ 7, Equivalence\ 1\\
&\equiv (q \vee r) \vee (\neg p \vee \neg p) \quad \quad \quad \ table\ 6, Associative\ Law \\ &\equiv (q \vee r) \vee \neg p \ \ \ \quad \qquad \qquad \ table\ 6, Idempotent\ Law\\ &\equiv \neg(q \vee r) \rightarrow \neg p \ \ \ \quad \qquad \quad \ table\ 7, Equivalence\ 3\\ &\equiv (\neg q \wedge \neg r) \rightarrow \neg p \ \qquad \qquad \ table\ 6, De\ Morgan's\ Second\ Law\\
\end{split}
\end{equation*}
\newpage
\section*{Answer 3}
\begin{enumerate}
\item
(a) All cats are friends with at least one dog.\\
(b) Some cats are friends with all dogs.\\
\item
(a) $\forall x \forall y ((Eats(x,y) z\wedge Meal(y)) \rightarrow Customer(x)) $ \\
(b) $\exists x \exists y (Chef(x) \wedge Meal(y) \wedge \neg Cooks(x,y))$\\
(c) $\exists x \forall y \exists z(((Cooks(x,y) \wedge Chef(x)) \rightarrow Meal(y)) \rightarrow (Eats(z,y) \wedge Customer(z)))$\\
(d) $\forall x \exists y \exists z ((Chef(z) \wedge Chef(x) \wedge (x \neq z) \wedge Meal(y) \wedge \neg Cooks(x,y) \wedge Cooks(z,y)) \rightarrow Knows(x,z))$\\
\end{enumerate}
\section*{Answer 4}
$ \neg p$ and $ p \rightarrow q$ are given as premises and $ \neg q $ is given as conclusion. Since $ \neg p$ is given as true, the lefthandside of $p \rightarrow q$ is ($p$) false. For situations which $p$ is false (third and fourth rows on Table 1) $ p \rightarrow q$ returns true. But its truth value does not depend on $ q $. $q$ can be true or false. Therefore we can not deduce that $ \neg q$ is true. So this argument cannot be a deduction rule in a sound deductive system.\\
\begin{table}[H]
\small
\caption{ Truth table for $p \rightarrow q$ }
\centering
\begin{tabular}{|c|c|c|}
\hline
$p$ & $q$ & $p\rightarrow q$\\
\hline
T & T & T \\
T & F & F \\
F & T & T \\
F & F & T \\
\hline
\end{tabular}
\end{table}
\section*{Answer 5}
\begin{table}[H]
\begin{enumerate}
\item $ p \implies q \hfill premise$
\item $ q \implies r \hfill premise$
\item $ r \implies p \hfill premise$\\
\begin{tabular}{|p{10cm}|}
\hline
\item $ q \hfill assumed$
\item $ r \hfill \implies e,2,4$
\item $ p \hfill \implies e,3,5$\\
\hline
\end{tabular}
\item $ q \implies p \hfill \implies i,4-6$
\item $ p \iff q \hfill \iff i,1,7$\\
\begin{tabular}{|p{10cm}|}
\hline
\item $ p \hfill assumed$
\item $ q \hfill \implies e,1,9$
\item $ r \hfill \implies e,2,10$\\
\hline
\end{tabular}
\item $ p \implies r \hfill \implies i,9-11$
\item $ p \iff r \hfill \iff i,3,12$
\item $ (p \iff q) \land (p \iff r) \hfill \land i,8,13$
\end{enumerate}
\end{table}
%%%%%%%%%%%%%%%%%%%%%%
\section*{Answer 6}
\begin{table}[H]
\begin{enumerate}
\item $ \forall x (Q(x) \implies R(x)) \hfill premise$
\item $ \exists x (P(x) \implies Q(x)) \hfill premise$
\item $ \forall x (P(x)) \hfill premise$\\
\begin{tabular}{|p{10cm}|}
\hline
\item $ P(c) \implies Q(c) \hfill assumed $
\item $ P(c) \hfill \forall e,3$
\item $ Q(c) \hfill \implies e,4,5$
\item $ Q(c) \implies R(c) \hfill \forall e,1$
\item $ R(c) \hfill \implies e,6,7$
\item $ P(c) \land R(c) \hfill \land i,5,8$
\item $ \exists x (P(x) \land R(x)) \hfill \exists i,9$\\
\hline
\end{tabular}
\item $ \exists x (P(x) \land R(x)) \hfill \exists e,2,4-10$
\end{enumerate}
\end{table}
\end{document}
|
using JLD2
expected = parse(Int, ARGS[1])
for input in ARGS[2:end]
println(input)
data = load(input)["results"];
for noise in keys(data)
for signal in keys(data[noise])
shape = size(data[noise][signal])
if shape[2] < expected
println("$noise $signal ", shape)
end
end
end
println()
end
|
header {* \isachapter{Instantiating the Framework with a simple While-Language}
\isaheader{Commands} *}
theory Com imports Main begin
section {* Variables and Values *}
type_synonym vname = string -- "names for variables"
datatype val
= Bool bool -- "Boolean value"
| Intg int -- "integer value"
abbreviation "true == Bool True"
abbreviation "false == Bool False"
section {* Expressions and Commands*}
datatype bop = Eq | And | Less | Add | Sub -- "names of binary operations"
datatype expr
= Val val -- "value"
| Var vname -- "local variable"
| BinOp expr bop expr ("_ \<guillemotleft>_\<guillemotright> _" [80,0,81] 80) -- "binary operation"
fun binop :: "bop \<Rightarrow> val \<Rightarrow> val \<Rightarrow> val option"
where "binop Eq v\<^sub>1 v\<^sub>2 = Some(Bool(v\<^sub>1 = v\<^sub>2))"
| "binop And (Bool b\<^sub>1) (Bool b\<^sub>2) = Some(Bool(b\<^sub>1 \<and> b\<^sub>2))"
| "binop Less (Intg i\<^sub>1) (Intg i\<^sub>2) = Some(Bool(i\<^sub>1 < i\<^sub>2))"
| "binop Add (Intg i\<^sub>1) (Intg i\<^sub>2) = Some(Intg(i\<^sub>1 + i\<^sub>2))"
| "binop Sub (Intg i\<^sub>1) (Intg i\<^sub>2) = Some(Intg(i\<^sub>1 - i\<^sub>2))"
| "binop bop v\<^sub>1 v\<^sub>2 = None"
datatype cmd
= Skip
| LAss vname expr ("_:=_" [70,70] 70) -- "local assignment"
| Seq cmd cmd ("_;;/ _" [61,60] 60)
| Cond expr cmd cmd ("if '(_') _/ else _" [80,79,79] 70)
| While expr cmd ("while '(_') _" [80,79] 70)
fun num_inner_nodes :: "cmd \<Rightarrow> nat" ("#:_")
where "#:Skip = 1"
| "#:(V:=e) = 2" (* zusätzlicher Skip-Knoten *)
| "#:(c\<^sub>1;;c\<^sub>2) = #:c\<^sub>1 + #:c\<^sub>2"
| "#:(if (b) c\<^sub>1 else c\<^sub>2) = #:c\<^sub>1 + #:c\<^sub>2 + 1"
| "#:(while (b) c) = #:c + 2" (* zusätzlicher Skip-Knoten *)
lemma num_inner_nodes_gr_0:"#:c > 0"
by(induct c) auto
lemma [dest]:"#:c = 0 \<Longrightarrow> False"
by(induct c) auto
section {* The state *}
type_synonym state = "vname \<rightharpoonup> val"
fun "interpret" :: "expr \<Rightarrow> state \<Rightarrow> val option"
where Val: "interpret (Val v) s = Some v"
| Var: "interpret (Var V) s = s V"
| BinOp: "interpret (e\<^sub>1\<guillemotleft>bop\<guillemotright>e\<^sub>2) s =
(case interpret e\<^sub>1 s of None \<Rightarrow> None
| Some v\<^sub>1 \<Rightarrow> (case interpret e\<^sub>2 s of None \<Rightarrow> None
| Some v\<^sub>2 \<Rightarrow> (
case binop bop v\<^sub>1 v\<^sub>2 of None \<Rightarrow> None | Some v \<Rightarrow> Some v)))"
end
|
SUBROUTINE MTXMUL1(AMAT,BMAT,CMAT,NRA,NCA,NCB)
IMPLICIT REAL*8(A-H,O-Z)
C
C FOR N-BY-M MATRICS AMAT, BMAT, AND CMAT, FORM THE PRODUCT
C
C CMAT = AMAT * BMAT
C
C VAR DIM TYPE I/O DESCRIPTION
C --- --- ---- --- -----------
C
C AMAT NRA,NCA R*8 I INPUT MATRIX.
C
C BMAT NCA,NCB R*8 I INPUT MATRIX.
C
C CMAT NRA,NCB R*8 O OUTPUT MATRIX. PRODUCT AMAT*BMAT.
C NOTE THAT IN THE CALLING PROGRAM,
C CMAT MAY NOT BE ONE OF AMAT AND BMAT.
C THAT IS, THE CALL
C CALL MTXMUL1(A,B,A,.....) IS INVALID.
C
C NRA 1 I*4 I NUMBER OF ROWS IN MATRICES AMAT AND
C CMAT.
C
C NCA 1 I*4 I NUMBER OF COLUMNS IN AMAT AND ROWS IN
C BMAT.
C
C NCB 1 I*4 I NUMBER OF COLUMNS IN MATRICES BMAT AND
C CMAT.
C
C
C***********************************************************************
C
C CODED BY C PETRUZZO. 6/82.
C MODIFIED............
C
C***********************************************************************
C
C
REAL*8 AMAT(NRA,NCA),BMAT(NCA,NCB),CMAT(NRA,NCB)
C
DO 100 IRA=1,NRA
DO 100 ICB=1,NCB
TEMP=0.D0
DO 200 ICA=1,NCA
200 TEMP=TEMP+AMAT(IRA,ICA)*BMAT(ICA,ICB)
100 CMAT(IRA,ICB)=TEMP
C
RETURN
END
|
// Copyright (c) 2011-2012 Thomas Heller
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef HPX_LCOS_FUTURE_DATAFLOW_BASE_HPP
#define HPX_LCOS_FUTURE_DATAFLOW_BASE_HPP
#include <hpx/components/dataflow/dataflow_base_fwd.hpp>
#include <hpx/components/dataflow/dataflow_base_void.hpp>
#include <hpx/components/dataflow/dataflow_base_impl.hpp>
#include <hpx/components/dataflow/dataflow_fwd.hpp>
#include <hpx/components/dataflow/stubs/dataflow.hpp>
#include <boost/intrusive_ptr.hpp>
namespace hpx { namespace lcos
{
template <typename Result, typename RemoteResult>
struct dataflow_base
{
typedef RemoteResult remote_result_type;
typedef Result result_type;
dataflow_base()
{}
explicit dataflow_base(lcos::future<naming::id_type> promise)
: impl(new detail::dataflow_base_impl(std::move(promise)))
{}
void connect(naming::id_type const & id) const
{
impl->connect(id);
}
future<Result> get_future() const
{
promise<Result, remote_result_type> p;
impl->connect(p.get_gid());
return p.get_future();
}
bool valid()
{
return impl && impl->get_gid();
}
private:
friend class boost::serialization::access;
boost::intrusive_ptr<detail::dataflow_base_impl> impl;
template <typename Archive>
void serialize(Archive & ar, unsigned)
{
ar & impl;
}
};
}}
#endif
|
<center>
<h1> INF285 - Computación Científica </h1>
<h2> Weigthed Least Squares </h2>
<h2> <a href="#acknowledgements"> [S]cientific [C]omputing [T]eam </a> </h2>
<h2> Version: 1.00</h2>
</center>
<div id='toc' />
## Table of Contents
* [Introduction](#intro)
* [Weighted least-square](#wLeastSquare)
* [Example in explanation](#exampleExplanation)
* [Extension of "Initial Example" in jupyter notebook "07_08_Least_Squares"](#extensionInitialExample)
* [Acknowledgements](#acknowledgements)
```python
import numpy as np
import matplotlib.pyplot as plt
import scipy.linalg as spla
%matplotlib inline
# https://scikit-learn.org/stable/modules/classes.html#module-sklearn.datasets
from sklearn import datasets
import ipywidgets as widgets
from ipywidgets import interact, interact_manual
import matplotlib as mpl
mpl.rcParams['font.size'] = 14
mpl.rcParams['axes.labelsize'] = 20
mpl.rcParams['xtick.labelsize'] = 14
mpl.rcParams['ytick.labelsize'] = 14
M=8
```
<div id='intro' />
## Introduction
[Back to TOC](#toc)
This jupyter notebook presents the notion of weigthed linear least square problems.
It first present the theoretical background, then a few small examples and finally makes a connection with the "Initial Example" from the section "Overdetermined Linear Systems of Equations" in the notebook "07_08_Least_Squares".
We strongly suggest the reader to review that example first.
<div id='wLeastSquare' />
## Weighted least-square
[Back to TOC](#toc)
In this example we will consider we give a different weight to each term in the least square problem.
Mathematical, this can be seen as multiplying the least-square problem by a diagonal matrix, say $W=\text{diag}(w_1,w_2,\dots,w_m)$, with $w_i>0$.
For instance, consider the following least square example:
\begin{equation}
\underbrace{\begin{bmatrix}
1 & x_1 \\
1 & x_2 \\
1 & x_3 \\
\vdots & \vdots \\
1 & x_m
\end{bmatrix}}_{\displaystyle{A}}
\underbrace{\begin{bmatrix}
a\\
b
\end{bmatrix}}_{\mathbf{x}}
=
\underbrace{\begin{bmatrix}
y_1 \\
y_2 \\
y_3 \\
\vdots\\
y_m
\end{bmatrix}}_{\displaystyle{\mathbf{b}}}.
\end{equation}
In this case, all the equations have the same weight.
Now, giving a different weight to each equation, we could do the following:
\begin{equation}
\underbrace{W\,A}_{\displaystyle{B}}\,\mathbf{x}=\underbrace{W\,\mathbf{b}}_{\displaystyle{\mathbf{c}}},
\end{equation}
where, as mentioned before, we could define $W$ as follows,
\begin{equation}
W=\text{diag}(w_1,w_2,\dots,w_m).
\end{equation}
So, when computing the quadratic error we obtain,
\begin{equation}
E=\left\|W\,\mathbf{b}- W\,A\,\mathbf{x}\right\|_2^2=\sum_{i=1}^m w_i^2\,(y_i-a-b\,x_i)^2.
\end{equation}
This indicates that when we use a higher value of $w_i$ for some $i$'s, these equations will have a higher impact in the minimization.
From the normal equations point of view, this slightly modified problem will satisfy the following normal equations,
\begin{equation}
B^*\,B\,\overline{\mathbf{x}}_w=B^*\,\mathbf{c}.
\end{equation}
Notice that we used the sub-index $w$ for $\overline{\mathbf{x}}_w$ to differentiate it from the unweighted least square solution.
Thus, caming back to the original equations, we obtain,
\begin{align*}
B^*\,B\,\overline{\mathbf{x}}_w &=B^*\,\mathbf{c},\\
(W\,A)^*\,(W\,A)\,\overline{\mathbf{x}}_w &=(W\,A)^*\,W\,\mathbf{b},\\
A^*\,W^*\,W\,A\,\overline{\mathbf{x}}_w &=A^*\,W^*\,W\,\mathbf{b},\\
A^*\,W^2\,A\,\overline{\mathbf{x}}_w &=A^*\,W^2\,\mathbf{b},\\
\overline{\mathbf{x}}_w &=(A^*\,W^2\,A)^{-1}\,A^*\,W^2\,\mathbf{b}.\\
\end{align*}
So, if $W$ is the identity matrix or a non-zero multiple of it, i.e. $W=\alpha\,I$ for $\alpha\neq0$, the weigthed least square problem reduces to,
\begin{align*}
\overline{\mathbf{x}} &=(A^*\,\alpha^2\,I^2\,A)^{-1}\,A^*\,\alpha^2\,I^2\,\mathbf{b},\\
\overline{\mathbf{x}} &=(A^*\,A)^{-1}\,A^*\,\mathbf{b}.\\
\end{align*}
It is important to point out that, from the least equare point of view, any algebraic modification we do to any equation, it will change its respective weight respect to the other equations.
For instance, if we have the following problem,
\begin{equation}
\begin{bmatrix}
4 & 4\,x_1 \\
1 & x_2 \\
1 & x_3
\end{bmatrix}
\begin{bmatrix}
a\\
b
\end{bmatrix}
=
\begin{bmatrix}
4\,y_1 \\
y_2 \\
y_3
\end{bmatrix},
\end{equation}
it will have a different least square solution if we simplify the $4$ for the first equation.
This means, solving the following problem will lead to a different least square solution,
\begin{equation}
\begin{bmatrix}
1 & x_1 \\
1 & x_2 \\
1 & x_3
\end{bmatrix}
\begin{bmatrix}
a\\
b
\end{bmatrix}
=
\begin{bmatrix}
y_1 \\
y_2 \\
y_3
\end{bmatrix}.
\end{equation}
The reason for this is that the generate different quadratic errors.
It is very important to hightlight that this weight effect is important because we are dealing with overdetermined linear system of equation.
In the case we deal with an square and non-singular linear system of equations this does not change the solution, for instance, the analysis changes to,
\begin{align*}
B^*\,B\,\mathbf{x} &= B^*\,\mathbf{c},\\
(W\,A)^*\,(W\,A)\,\mathbf{x} &= (W\,A)^*\,W\,\mathbf{b},\\
A^*\,W^*\,W\,A\,\mathbf{x} &=A^*\,W^*\,W\,\mathbf{b},\\
A^*\,W^2\,A\,\mathbf{x} &=A^*\,W^2\,\mathbf{b},\\
\mathbf{x} &=(A^*\,W^2\,A)^{-1}\,A^*\,W^2\,\mathbf{b},\\
\mathbf{x} &=A^{-1}\,W^{-2}\,A^{-*}\,A^*\,W^2\,\mathbf{b},\\
\mathbf{x} &=A^{-1}\,W^{-2}\,\,W^2\,\mathbf{b},\\
\mathbf{x} &=A^{-1}\,\mathbf{b}.\\
\end{align*}
The key difference is on the sixth line.
In this case there exists the inverse matrix of each matrix in the parenthesis, which was not true previously.
<div id='exampleExplanation' />
### Example in explanation
[Back to TOC](#toc)
```python
x1,x2,x3 = 1,2,3
y1,y2,y3 = 5,-1,3
A1 = np.ones((3,2))
A1[:,1]=[x1,x2,x3]
b1=np.array([y1,y2,y3])
A1[0,:]*=4
b1[0]*=4
x1_bar=np.linalg.solve(A1.T @ A1,A1.T @ b1)
print('A1: ', A1)
print('b1: ', b1)
print('x1_bar: ', x1_bar)
A2 = np.ones((3,2))
A2[:,1]=[x1,x2,x3]
b2=np.array([y1,y2,y3])
x2_bar=np.linalg.solve(A2.T @ A2,A2.T @ b2)
print('A2: ', A2)
print('b2: ', b2)
print('x2_bar: ', x2_bar)
```
A1: [[4. 4.]
[1. 2.]
[1. 3.]]
b1: [20 -1 3]
x1_bar: [ 6.80246914 -1.92592593]
A2: [[1. 1.]
[1. 2.]
[1. 3.]]
b2: [ 5 -1 3]
x2_bar: [ 4.33333333 -1. ]
We clearly observe that the least square solutions are different!
This is consistent with the previous explanation.
<div id='extensionInitialExample' />
### Extension of "Initial Example" in jupyter notebook "07_08_Least_Squares"
[Back to TOC](#toc)
In this example we will approximate $m$ points considering a linear relationship.
This means that we will have the data points $(x_i,y_i)$ for $i\in\{1,2,\dots,m\}$ and consider the relationshop $y=a_0+a_1\,b$.
The error that will be added follows a normal distribution, but we will consider we use the following weight matrix,
\begin{equation}
W=\text{diag}(w,w,1,\dots,1).
\end{equation}
This means we will more weight to the first twio equations, this is arbitrarily, and it is only use to show what effects brings to the least square problem.
The example will only show the approximation output since we already saw all the other components in the original example.
### Question to think before to modify the weight $w$: What would you expect to happen with the approximation if you use a large value for $w$?
```python
def showWeightedOutput(w=1):
# Number of points to be used
m = 10
# Relationship considered
fv = np.vectorize(lambda x, a0, a1: a0+a1*x)
# Coefficients considered
a0, a1 = 1, 4
np.random.seed(0)
# Standard deviation for the error
sigma = 5e-1
# Error to be added
e = np.random.normal(0,sigma,m)
# Generating data points
x = np.linspace(0,1,m)
y = fv(x,a0,a1)+e
# Build the data matrix
A = np.ones((m,2))
A[:,1] = x
# Setting up the right hand side
b = np.copy(y)
A[:2,:]*=w
b[:2]*=w
# Building and solving the normal equations
# A^T A x_bar = A^T b
x_bar = np.linalg.solve(A.T @ A, A.T @ b)
# Showing the comparison between the "original function" and the "least-squared reconstructed approximation".
# We added in red a "sample" of possible functions.
# Notice that the colors used follow the description included in the classnotes.
# This means to consider the following analogy:
# blue: data points, this correspond to the right-hand-side vector "b".
# red: this correspond to the sub-space generated by Ax, i.e. the span of the columns of A.
# violet: This correspond to the least-square solution found.
plt.figure(figsize=(10,10))
for i in range(100):
plt.plot(x,fv(x,x_bar[0]+np.random.normal(0,1),x_bar[1]+np.random.normal(0,1)),'r-',linewidth=1,alpha=0.2)
plt.plot(x,fv(x,a0,a1),'k-',linewidth=8,alpha=0.8)
plt.plot(x,fv(x,x_bar[0],x_bar[1]),'--',color='darkviolet',linewidth=4)
plt.plot(x,fv(x,x_bar[0],x_bar[1]),'r.',markersize=20)
plt.plot(x,y,'b.',markersize=10)
plt.grid(True)
plt.xlabel(r'$x$')
plt.ylabel(r'$y$')
plt.show()
interact_manual(showWeightedOutput,w=(0.01,101,0.01))
```
interactive(children=(FloatSlider(value=1.0, description='w', max=101.0, min=0.01, step=0.01), Button(descript…
<function __main__.showWeightedOutput(w=1)>
<div id='acknowledgements' />
# Acknowledgements
[Back to TOC](#toc)
* _Material created by professor Claudio Torres_ (`[email protected]`) DI UTFSM. June 2021.- v1.0.
```python
```
|
REBOL [
Title: "Builds and Runs a single Red/System Tests"
File: %run-test.r
Author: "Peter W A Wood"
Version: 0.8.1
License: "BSD-3 - https://github.com/dockimbel/Red/blob/master/BSD-3-License.txt"
]
;; include quick-test.r
do %quick-test.r
;; set the base dir for the test source
qt/tests-dir: system/options/path
print ["system/options/path " system/options/path]
print ["qt/test-dir" qt/tests-dir]
print rejoin ["Quick-Test v" system/script/header/version]
print rejoin ["Running under REBOL " system/version]
;; get the name of the test file
src: system/script/args
either any [
(not find src ".r") and (not find src ".reds")
not src: to-file src
][
print "No valid test file supplied"
][
print ["run-test src " src]
either find src ".reds" [
;; compile & run reds pgm
either exe: qt/compile src [
qt/run exe
print qt/output
][
print "Compile Error!!!"
print qt/comp-output
]
][
either find read qt/tests-dir/:src "quick-unit-test.r" [
--run-unit-test src
][
;; copy and run rebol script
qt/run-script src
]
]
]
prin ""
|
#ifndef DAQI_REDIS_VALUE_HPP
#define DAQI_REDIS_VALUE_HPP
#include <string>
#include <stack>
#include <vector>
#include <utility>
#include <boost/variant.hpp>
namespace da4qi4
{
class RedisValue
{
public:
struct ErrorTag {};
RedisValue();
RedisValue(RedisValue&& other);
RedisValue(int64_t i);
RedisValue(const char* s);
RedisValue(const std::string& s);
RedisValue(std::vector<char> buf);
RedisValue(std::vector<char> buf, struct ErrorTag);
RedisValue(std::string const& custom_error, struct ErrorTag);
RedisValue(std::vector<RedisValue> array);
RedisValue(const RedisValue&) = default;
RedisValue& operator = (const RedisValue&) = default;
RedisValue& operator = (RedisValue&&) = default;
// Return the value as a std::string if
// type is a byte string; otherwise returns an empty std::string.
std::string ToString() const;
// Return the value as a std::vector<char> if
// type is a byte string; otherwise returns an empty std::vector<char>.
std::vector<char> ToByteArray() const;
// Return the value as a std::vector<RedisValue> if
// type is an int; otherwise returns 0.
int64_t ToInt() const;
int ToInt32() const
{
return static_cast<int>(ToInt());
}
// Return the value as an array if type is an array;
// otherwise returns an empty array.
std::vector<RedisValue> ToArray() const;
// Return the string representation of the value. Use
// for dump content of the value.
std::string Inspect() const;
// Return true if value not a error
bool IsOk() const;
// Return true if value is a error
bool IsError() const;
// Return true if this is a null.
bool IsNull() const;
// Return true if type is an int
bool IsInt() const;
// Return true if type is an array
bool IsArray() const;
// Return true if type is a string/byte array. Alias for IsString();
bool IsByteArray() const;
// Return true if type is a string/byte array. Alias for IsByteArray().
bool IsString() const;
// Methods for increasing perfomance
// Throws: boost::bad_get if the type does not match
std::vector<char>& GetByteArray();
const std::vector<char>& GetByteArray() const;
std::vector<RedisValue>& GetArray();
const std::vector<RedisValue>& GetArray() const;
bool operator == (const RedisValue& rhs) const;
bool operator != (const RedisValue& rhs) const;
protected:
template<typename T>
T cast_to() const;
template<typename T>
bool type_eq() const;
private:
struct NullTag
{
inline bool operator == (const NullTag&) const
{
return true;
}
};
boost::variant<NullTag, int64_t, std::vector<char>, std::vector<RedisValue>> _value;
bool _error;
};
template<typename T>
T RedisValue::cast_to() const
{
return (_value.type() == typeid(T)) ? boost::get<T>(_value) : T();
}
template<typename T>
bool RedisValue::type_eq() const
{
return (_value.type() == typeid(T));
}
} // namespace da4qi4
#endif // DAQI_REDIS_VALUE_HPP
|
# Copyright (c) 2018-2021, Carnegie Mellon University
# See LICENSE for details
# NOTE: find a better place for these! What about a single standard drop tag rule?
NewRulesFor(TRC, rec(
TRC_tag := rec(
forTransposition := false,
applicable := (self, nt) >>
(nt.isTag(1, spiral.paradigms.smp.AParSMP) or not nt.hasTags())
# AVecReg is taken from a namespace that is NOT YET LOADED
# hence the fully qualified name
and not nt.hasTag(spiral.paradigms.vector.AVecReg)
and not nt.hasTag(spiral.paradigms.vector.AVecRegCx),
children := nt -> [[ nt.params[1].withTags(nt.getTags()) ]],
apply := (nt, c, cnt) -> RC(c[1])
)
));
NewRulesFor(TDiag, rec(
TDiag_tag := rec(
forTransposition := false,
# YSV: Below limits applicability to the cases where diag size is divisible by vlen
# which is a safe thing to do. Because VectorCodegen, can't generated code
# for VDiags of size non-divisible by vlen. HOWEVER, if VDiag is propagate
# past any kind of VGath, this problem goes away. So having no restriction,
# will work MOST of the time, but not all the time.
#
# applicable := (self, nt) >> let(
# vtags := [spiral.paradigms.vector.AVecReg, spiral.paradigms.vector.AVecRegCx],
# dom := nt.params[1].domain(),
# not nt.hasAnyTag(vtags) or (dom mod nt.getAnyTag(vtags).v) = 0
# ),
apply := (t, C, Nonterms) -> let(
vtags := [spiral.paradigms.vector.AVecReg, spiral.paradigms.vector.AVecRegCx],
Cond(t.hasAnyTag(vtags),
spiral.paradigms.vector.sigmaspl.VDiag(t.params[1], t.getAnyTag(vtags).v),
Diag(t.params[1])
)
)
)
));
RulesFor(TRCDiag, rec(
TRCDiag_tag := rec(
forTransposition := false,
applicable := (self, nt) >> not nt.transposed,
rule := (P, C) -> RC(Diag(P[1])))
));
RulesFor(TId, rec(
TId_tag := rec(
forTransposition := false,
switch := false,
rule := (P, C) -> P[1])
));
NewRulesFor(TRaderMid, rec(
TRaderMid_tag := rec(
forTransposition := false,
apply := (t, C, Nonterms) -> t.raderMid(t.params[1], t.params[2], t.params[3])
)
));
NewRulesFor(TRDiag, rec(
TRDiag_RT_Diag := rec(
forTransposition := true,
apply := (t, C, Nonterms) -> t.terminate()
)
));
NewRulesFor(TCompose, rec(
TCompose_tag := rec(
forTransposition := false,
applicable := (self, nt) >> true,
children := nt -> [ List(nt.params[1], e -> e.withTags(nt.getTags())) ],
apply := (nt, c, nt) -> Grp(Compose(c))
)
));
NewRulesFor(TCond, rec(
TCond_tag := rec(
forTransposition := false,
applicable := (self, nt) >> true,
children := nt -> [[
nt.params[2].withTags(nt.getTags()), nt.params[3].withTags(nt.getTags()) ]],
apply := (t, C, Nonterms) -> COND(t.params[1], C[1], C[2])
)
));
NewRulesFor(TGrp, rec(
TGrp_tag := rec(
forTransposition := false,
children := nt -> [[ nt.params[1].withTags(nt.getTags()) ]],
apply := (nt, c, cnt) -> Grp(c[1])
)
));
NewRulesFor(TInplace, rec(
TInplace_tag := rec(
forTransposition := false,
children := nt -> [[ nt.params[1].withTags(nt.getTags()) ]],
apply := (nt, c, cnt) -> Inplace(c[1])
)
));
NewRulesFor(TICompose, rec(
TICompose_tag := rec(
forTransposition := false,
children := nt -> [[ nt.params[3].withTags(nt.getTags()) ]],
apply := (nt, c, cnt) -> ICompose(nt.params[1], nt.params[2], c[1])
)
));
########################################################################
# (A + B) rules
NewRulesFor(TDirectSum, rec(
# (A + B) terminate
A_dirsum_B := rec(
forTransposition := false,
children := (self, t) >> let( tags := t.getTags(),
[[ t.params[1].withTags(tags), t.params[2].setTags(tags) ]]
),
apply := (t, C, Nonterms) -> DirectSum(C)
#D children := (self, t) >> let (tags:=GetTags(t),
#D [[ AddTag(t.params[1], tags), SetTag(t.params[2], tags) ]]),
)
));
########################################################################
# (A x B) rules
NewRulesFor(TTensor, rec(
# (A x B) -> (A x I)(I x B)
AxI_IxB := rec(
info := "(A x B) -> (A x I)(I x B)",
forTransposition := false,
applicable := nt -> true,
inplace := false,
children := (self, nt) >> let(inp := When(self.inplace, TInplace, x->x),
[[ TCompose([
inp(TTensorI(nt.params[1], nt.params[2].dims()[1], AVec, AVec)),
TTensorI(nt.params[2], nt.params[1].dims()[2], APar, APar)
]).withTags(nt.getTags()) ]]),
apply := (nt, c, cnt) -> c[1],
#D isApplicable := P -> true,
#D allChildren := P -> [[TCompose([TTensorI(P[1], P[2].dims()[1], AVec, AVec), TTensorI(P[2], P[1].dims()[2], APar, APar)], P[3])]],
#D rule := (P, C) -> C[1]
),
# (A x B) -> (I x B)(A x I)
IxB_AxI := rec(
info := "(A x B) -> (I x B)(A x I)",
forTransposition := false,
applicable := nt -> true,
inplace := false,
children := (self, nt) >> let(inp := When(self.inplace, TInplace, x->x),
[[ TCompose([
inp(TTensorI(nt.params[2], nt.params[1].dims()[1], APar, APar)),
TTensorI(nt.params[1], nt.params[2].dims()[2], AVec, AVec)
]).withTags(nt.getTags()) ]]),
apply := (nt, c, cnt) -> c[1]
#D isApplicable := P -> true,
#D allChildren := P -> [[TCompose([TTensorI(P[2], P[1].dims()[1], APar, APar), TTensorI(P[1], P[2].dims()[2], AVec, AVec)], P[3])]],
#D rule := (P, C) -> C[1]
),
# (A x B) -> (L(B x I))(L(A x I))
L_BxI__L_AxI := rec(
info := "(A x B) -> (L(B x I))(L(A x I))",
forTransposition := false,
applicable := nt -> true,
children := nt -> [[ TCompose([
TTensorI(nt.params[2], nt.params[1].dims()[1], APar, AVec),
TTensorI(nt.params[1], nt.params[2].dims()[2], APar, AVec)
]).withTags(nt.getTags()) ]],
apply := (nt, c, cnt) -> c[1]
#D isApplicable := P -> true,
#D allChildren := P -> [[TCompose([TTensorI(P[2], P[1].dims()[1], APar, AVec), TTensorI(P[1], P[2].dims()[2], APar, AVec)], P[3])]],
#D rule := (P, C) -> C[1]
),
# (A x B) -> ((A x I)L)((B x I)L)
AxI_L__BxI_L := rec(
info := "(A x B) -> ((A x I)L)((B x I)L)",
forTransposition := false,
applicable := nt -> true,
children := nt -> [[ TCompose([
TTensorI(nt.params[1], nt.params[2].dims()[1], AVec, APar),
TTensorI(nt.params[2], nt.params[1].dims()[2], AVec, APar)
]).withTags(nt.getTags()) ]],
apply := (nt, c, cnt) -> c[1],
#D isApplicable := P -> true,
#D allChildren := P -> [[TCompose([TTensorI(P[1], P[2].dims()[1], AVec, APar), TTensorI(P[2], P[1].dims()[2], AVec, APar)], P[3])]],
#D rule := (P, C) -> C[1]
),
));
########################################################################
# rules for A x I, I x A, (A x I)L, (I x A)L
NewRulesFor(TTensorI, rec(
TTensorI_toGT := rec(
applicable := t -> true,
freedoms := t -> [], # no degrees of freedom
child := (t, fr) -> [ GT_TTensorI(t) ], # fr will be an empty list
apply := (t, C, Nonterms) -> C[1]
)
));
NewRulesFor(TTensorI, rec(
# base cases
# I x A
IxA_base := rec(
info := "IxA base",
forTransposition := false,
applicable := nt -> (not nt.hasTags() or nt.firstTag() = ANoTag) and IsParPar(nt.params),
children := nt -> [[ nt.params[1].withTags(nt.getTags()) ]],
apply := (nt, c, cnt) -> When(nt.params[2] > 1,
Tensor(I(nt.params[2]), c[1]),
c[1]
)
#D isApplicable := (self, P) >> PUntagged(self.nonTerminal, P) and IsParPar(P),
#D allChildren := P -> [[P[1]]],
#D rule := (P, C) -> When(P[2]>1,Tensor(I(P[2]),C[1]),C[1])
),
# A x I
AxI_base := rec(
info := "AxI base",
forTransposition := false,
applicable := nt -> (not nt.hasTags() or nt.firstTag() = ANoTag) and IsVecVec(nt.params),
children := nt -> [[ nt.params[1].withTags(nt.getTags()) ]],
apply := (nt, c, cnt) -> When( nt.params[2] > 1,
Tensor(c[1], I(nt.params[2])),
c[1]
),
#D isApplicable := (self, P) >> PUntagged(self.nonTerminal, P) and IsVecVec(P),
#D allChildren := P -> [[P[1]]],
#D rule := (P, C) -> When(P[2]>1,Tensor(C[1], I(P[2])),C[1])
),
# (I x A)L
IxA_L_base := rec(
info := "(IxA)L base",
forTransposition := false,
applicable := nt -> (not nt.hasTags() or nt.firstTag() = ANoTag) and IsParVec(nt.params),
children := nt -> [[ nt.params[1].withTags(nt.getTags()) ]],
apply := (nt, c, cnt) -> Tensor(I(nt.params[2]), c[1]) * L(c[1].dims()[2] * nt.params[2], nt.params[2]),
#D isApplicable := (self, P) >> PUntagged(self.nonTerminal, P) and IsParVec(P),
#D allChildren := P -> [[P[1]]],
#D rule := (P, C) -> Tensor(I(P[2]), C[1])*L(C[1].dims()[2]*P[2], P[2])
),
# L(I x A)
L_IxA_base := rec(
info := "L(IxA) base",
forTransposition := false,
applicable := nt -> (not nt.hasTags() or nt.firstTag() = ANoTag) and IsVecPar(nt.params),
children := nt -> [[ nt.params[1].withTags(nt.getTags()) ]],
apply := (nt, c, cnt) -> L(c[1].dims()[1] * nt.params[2], c[1].dims()[1]) * Tensor(I(nt.params[2]), c[1])
#D isApplicable := (self, P) >> PUntagged(self.nonTerminal, P) and IsVecPar(P),
#D allChildren := P -> [[P[1]]],
#D rule := (P, C) -> L(C[1].dims()[1]*P[2], C[1].dims()[1]) * Tensor(I(P[2]), C[1])
),
# splitting rules ##############################################################
# (A _m x I_n)L_mn_m
AxI_L_split := rec(
info := "split (A_m x I_n) L^mn_m --> (L_mn/u_m x I_u) * (I_n/u x (A_m x I_u) * L_mu_m )",
forTransposition := false,
applicable := nt -> (nt.firstTag().kind() = AGenericTag) and IsVecPar(nt.params),
children := nt -> let(t := nt.getTags(), p := nt.params, d := p[1].dims(), mu := t[1].params[1], [
TTensorI(TL(d[1] * p[2]/mu, d[1],1,1), mu, AVec, AVec).withTags(t),
TTensorI(p[1], mu, AVec, APar).withTags(t)
]),
apply := (nt, c, cnt) -> let(t := nt.getTags(), n := nt.params[2], mu := t[1].params[1],
c[1] * Tensor(I(n/mu), c[2])
),
# Example
# =======
# t:=TTensorI(DFT(4, 1), 4, AVec, APar).withTags([ AGenericTag(2) ]);
# c:=AxI_L_split.children(t);
# res := AxI_L_split.apply(t,c,false);
switch:=false
),
# (I_n x A_rxs) L^ns_n
IxA_L_split := rec(
info := "split (I_n x A_rxs) L^ns_n",
forTransposition := false,
applicable := nt -> IsParVec(nt.params),
children := nt -> let(t := nt.getTags(), p := nt.params, d := p[1].dims(), [[
TTensorI(p[1], p[2], APar, APar).withTags(t),
TL(d[2]*p[2], p[2], 1, 1).withTags(t)
]]),
apply := (nt, c, cnt) -> c[1] * c[2],
#D isApplicable := P -> P[3].isPar and P[4].isVec,
#D allChildren := P -> let(pv:=P[5], d:=P[1].dims(), [[TTensorI(P[1], P[2], APar, APar, pv), TL(d[2]*P[2], P[2], 1, 1, pv)]]),
#D rule := (P, C) -> C[1] * C[2],
switch := false
),
# L^nr_n (A_rxs x I_n)
L_AxI_split := rec(
info := "split L^nr_n (A_rxs x I_n) ",
forTransposition := false,
applicable := nt -> IsParVec(nt.params),
children := nt -> let( t := nt.getTags(), p := nt.params, d := p[1].dims(), [[
TL(d[1] * p[2], p[2], 1, 1).withTags(t),
TTensorI(p[1], p[2], AVec, AVec).withTags(t)
]]),
apply := (nt, c, cnt) -> c[1] * c[2],
switch := false
#D isApplicable := P -> P[3].isPar and P[4].isVec,
#D allChildren := P -> let(pv:=P[5], d:=P[1].dims(), [[ TL(d[1]*P[2], P[2], 1, 1, pv), TTensorI(P[1], P[2], AVec, AVec, pv) ]]),
#D rule := (P, C) -> C[1] * C[2],
),
# L^nr_r (I_n x A_rxs)
L_IxA_split := rec(
info := "split L^nr_r (I_n x A_rxs)",
forTransposition := false,
applicable := nt -> IsVecPar(nt.params),
children := nt -> let( t := nt.getTags(), p := nt.params, d := p[1].dims(), [[
TL(d[1]*p[2], d[1], 1, 1).withTags(t),
TTensorI(p[1], p[2], APar, APar).withTags(t)
]]),
apply := (nt, c, cnt) -> c[1] * c[2],
#D isApplicable := P -> P[3].isVec and P[4].isPar,
#D allChildren := P -> let(pv:=P[5], d:=P[1].dims(), [[TL(d[1]*P[2], d[1], 1, 1, pv), TTensorI(P[1], P[2], APar, APar, pv)]]),
#D rule := (P, C) -> C[1] * C[2],
switch := false
),
# (A_rxs x I_n) L^nr_s
AxI_L_split := rec(
info := "split (A_rxs x I_n) L^nr_s ",
forTransposition := false,
applicable := nt -> IsVecPar(nt.params),
children := nt -> let( t := nt.getTags(), p := nt.params, d := p[1].dims(), [[
TTensorI(p[1], p[2], APar, APar).withTags(t),
TL(d[2]*p[2], d[2], 1, 1).withTags(t)
]]),
apply := (nt, c, cnt) -> c[1] * c[2],
#D isApplicable := P -> P[3].isVec and P[4].isPar,
#D allChildren := P -> let(pv:=P[5], d:=P[1].dims(), [[ TTensorI(P[1], P[2], APar, APar, pv), TL(d[2]*P[2], d[2], 1, 1, pv)]]),
#D rule := (P, C) -> C[1] * C[2],
switch := false
),
## vector recursion #############################################################
# (I x (I x A)L)L
IxA_L_vecrec := rec(
info := "(I x (I x A)L)L vector recursion",
forTransposition := false,
applicable := nt -> ObjId(nt.params[1]) = TTensorI and IsParVec(nt.params) and IsParVec(nt.params[1].params),
children := nt -> let(k := nt.params[2], m := nt.params[1].params[2], n := nt.params[1].params[1].dims(), [[
TL(k*m, k, 1, n[1]).withTags(nt.getTags()),
TTensorI(nt.params[1].params[1], nt.params[2], APar, AVec).withTags(nt.getTags()),
TL(m*n[2], m, 1, k).withTags(nt.getTags())
]]),
apply := (nt, c, cnt) -> let(m := nt.params[1].params[2],
c[1] * Tensor(I(m), c[2]) * c[3]
),
#D isApplicable := P -> P[1].name = "TTensorI" and P[3].isPar and P[4].isVec and P[1].params[3].isPar and P[1].params[4].isVec,
#D allChildren := P -> let(k:=P[2], m:=P[1].params[2], n:=P[1].params[1].dims(),
#D [[ TL(k*m, k, 1, n[1], P[5]), TTensorI(P[1].params[1], P[2], APar, AVec, P[5]), TL(m*n[2], m, 1, k, P[5])]]),
#D rule := (P, C) -> let(k:=P[2], m:=P[1].params[2], n:=P[1].params[1].dims(),
#D C[1] * Tensor(I(m), C[2]) * C[3]
#D ),
switch := false
),
# L(I x L(I x A))
L_IxA_vecrec := rec(
info := "L(I x L(I x A)) vector recursion",
forTransposition := false,
applicable := nt -> ObjId(nt.params[1]) = TTensorI and IsVecPar(nt.params) and IsVecPar(nt.params[1].params),
children := nt -> let( k := nt.params[2], m := nt.params[1].params[2], n := nt.params[1].params[1].dims(), [[
TL(m*n[1], n[1], 1, k).withTags(nt.getTags()),
TTensorI(nt.params[1].params[1], nt.params[2], AVec, APar).withTags(nt.getTags()),
TL(k*m, m, 1, n[2]).withTags(nt.getTags())
]]),
apply := (nt, c, cnt) -> let(m := nt.params[1].params[2],
c[1] * Tensor(I(m), c[2]) * c[3]
),
#D isApplicable := P -> P[1].name = "TTensorI" and P[3].isVec and P[4].isPar and P[1].params[3].isVec and P[1].params[4].isPar,
#D allChildren := P -> let(k:=P[2], m:=P[1].params[2], n:=P[1].params[1].dims(),
#D [[ TL(m*n[1], n[1], 1, k, P[5]), TTensorI(P[1].params[1], P[2], AVec, APar, P[5]), TL(k*m, m, 1, n[2], P[5])]]),
#D rule := (P, C) -> let(k:=P[2], m:=P[1].params[2], n:=P[1].params[1].dims(),
#D C[1] * Tensor(I(m), C[2]) * C[3]
#D ),
switch := false
)
));
########################################################################
# rules for L
#D isVec := P->Length(P[5]) > 0 and P[5][1].isVec;
NewRulesFor(TL, rec(
# TL(N,n,l,r,[]) -> I_l x L(N,n) x I_r
L_base := rec(
forTransposition := false,
applicable := nt -> nt.isTag(1, spiral.paradigms.smp.AParSMP) or not nt.hasTags(),
apply := (nt, c, cnt) -> let(
c1 := When(nt.params[3]=1, [], [I(nt.params[3])]),
c2 := When(nt.params[4]=1, [], [I(nt.params[4])]),
Tensor(Concat(c1, [ L(nt.params[1], nt.params[2]) ], c2))
)
),
# TL(N,n,l,r,[]) -> I_l x L(N,n) x I_r
L_func := rec(
forTransposition := false,
applicable := nt -> nt.isTag(1, spiral.paradigms.smp.AParSMP) or not nt.hasTags(),
apply := (nt, c, cnt) -> let(
c1 := When(nt.params[3]=1, [], [fId(nt.params[3])]),
c2 := When(nt.params[4]=1, [], [fId(nt.params[4])]),
Prm(fTensor(Concat(c1, [ L(nt.params[1], nt.params[2]) ], c2)))
)
),
# recursion rules
IxLxI_kmn_n := rec (
info := "I(l) x L(kmn, n) x I(r) -> (I_l x L(kn,n) x I(mr))(I(kl) x L(mn, n) x I(r))",
forTransposition := false,
applicable := nt -> Length(DivisorsIntDrop(nt.params[1]/nt.params[2])) > 0,
children := nt -> let(
N := nt.params[1], n := nt.params[2],
km := N/n, ml := DivisorsIntDrop(km),
l := nt.params[3], r := nt.params[4],
List(ml, m -> let( k := km/m, [
TL(k*n, n, l, r*m).withTags(nt.getTags()),
TL(m*n, n, k*l, r).withTags(nt.getTags())
]))
),
apply := (nt, c, cnt) -> let(
spl := c[1] * c[2],
When(nt.params[1] = nt.params[2]^2,
SymSPL(spl),
spl
)
),
#D isApplicable := P -> #isVec(P) and let(v:=P[5][1].v, (P[1]*P[2] >= v or P[1]*P[3] >= v) and
#D Length(DivisorsIntDrop(P[1]/P[2])) > 0,
#D allChildren := P -> let(N:=P[1], n:=P[2], km:=N/n, ml:=DivisorsIntDrop(km), l:=P[3], r:=P[4], vp:=P[5],
#D List(ml, m->let(k:=km/m, [TL(k*n, n, l, r*m, vp), TL(m*n,n, k*l, r, vp)])) ),
#D rule := (P, C) -> let(spl := C[1]*C[2], When(P[1]=P[2]^2, SymSPL(spl), spl)),
switch := false
),
IxLxI_kmn_km := rec (
info := "I(l) x L(kmn, km) x I(r) -> (I(kl) x L(mn,m) x I(r))(I(l) x L(kn, k) x I(r))",
forTransposition := false,
applicable := nt -> Length(DivisorsIntDrop(nt.params[2])) > 0,
children := nt -> let(
N := nt.params[1], km := nt.params[2],
n := N/km, ml := DivisorsIntDrop(km),
l := nt.params[3], r := nt.params[4],
List(ml, m->let(
k := km/m,
[
TL(m*n, m, k*l, r).withTags(nt.getTags()),
TL(k*n,k, l, m*r).withTags(nt.getTags())
]
))
),
apply := (nt, C, cnt) -> let(P := nt.params, spl := C[1]*C[2], When(P[1]=P[2]^2, SymSPL(spl), spl)),
#D isApplicable := P -> #isVec(P) and let(v:=P[5][1].v, (P[1]*P[2] >= v or P[1]*P[3] >= v) and
#D Length(DivisorsIntDrop(P[2])) > 0,
#D allChildren := P -> let(N:=P[1], km:=P[2], n:=N/km, ml:=DivisorsIntDrop(km), l:=P[3], r:=P[4], vp:=P[5],
#D List(ml, m->let(k:=km/m, [TL(m*n, m, k*l, r, vp), TL(k*n,k, l, m*r, vp)])) ),
#D rule := (P, C) -> let(spl := C[1]*C[2], When(P[1]=P[2]^2, SymSPL(spl), spl)),
switch := false
),
IxLxI_IxLxI_up := rec (
info := "I(l) x L(kmn, km) x I(r) -> (I(l) x L(kmn, k) x I(r))(I(l) x L(kmn, m) x I(r))",
forTransposition := false,
applicable := nt -> Length(DivisorPairs(nt.params[2])) > 0,
children := nt -> let(
N := nt.params[1], km := DivisorPairs(nt.params[2]),
l := nt.params[3], r := nt.params[4], t := nt.getTags(),
List(km, i->[TL(N, i[1], l, r).withTags(t), TL(N, i[2], l, r).withTags(t)])
),
apply := (nt, c, nt) -> c[1] * c[2],
#D isApplicable := P -> Length(DivisorPairs(P[2])) > 0,
#D allChildren := P -> let(N:=P[1], km:=DivisorPairs(P[2]), l:=P[3], r:=P[4], vp:=P[5],
#D List(km, i->[TL(N, i[1], l, r, vp), TL(N, i[2], l, r, vp)])),
#D rule := (P, C) -> C[1]*C[2],
switch := false
),
IxLxI_IxLxI_down := rec (
info := "I(l) x L(kmn, k) x I(r) -> (I(l) x L(kmn, km) x I(r))(I(l) x L(kmn, kn) x I(r))",
forTransposition := false,
applicable := nt -> Length(DivisorPairs(nt.params[1]/nt.params[2])) > 0,
children := nt -> let(
N := nt.params[1], km := DivisorPairs(nt.params[1]/nt.params[2]),
l := nt.params[3], r := nt.params[4], t := nt.getTags(),
List(km, i->[TL(N, N/i[1], l, r).withTags(t), TL(N, N/i[2], l, r).withTags(t)])
),
apply := (nt, c, cnt) -> c[1] * c[2],
#D isApplicable := P -> Length(DivisorPairs(P[1]/P[2])) > 0,
#D allChildren := P -> let(N:=P[1], km:=DivisorPairs(P[1]/P[2]), l:=P[3], r:=P[4], vp:=P[5],
#D List(km, i->[TL(N, N/i[1], l, r, vp), TL(N, N/i[2], l, r, vp)])),
#D rule := (P, C) -> C[1]*C[2],
switch := false
),
IxLxI_loop1 := rec(
info := "I x L x I loop1",
forTransposition := false,
applicable := nt -> not nt.hasTags(),
apply := (nt, c, cnt) -> let(
m := nt.params[2], n := nt.params[1]/nt.params[2], j:=Ind(m), fid := fId(n), fbase := fBase(m,j),
gath := Gath(fTensor(fid, fbase)), scat := Scat(fTensor(fbase, fid)),
c0 := [ISum(j, m, scat*gath)],
c1 := When(nt.params[3]=1, [], [I(nt.params[3])]),
c2 := When(nt.params[4]=1, [], [I(nt.params[4])]),
Tensor(Concat(c1,c0,c2))
),
#D isApplicable := P -> Length(P[5]) = 0,
#D rule := (P, C) -> let(m:=P[2], n:=P[1]/P[2], j:=Ind(m), fid := fId(n), fbase := fBase(m,j),
#D gath := Gath(fTensor(fid, fbase)), scat := Scat(fTensor(fbase, fid)),
#D C0 := [ISum(j, m, scat*gath)], C1:=When(P[3]=1, [], [I(P[3])]), C2:=When(P[4]=1, [], [I(P[4])]), Tensor(Concat(C1, C0, C2))),
switch := false
),
IxLxI_loop2 := rec(
info := "I x L x I loop2",
forTransposition := false,
applicable := nt -> not nt.hasTags(),
apply := (nt, c, cnt) -> let(
m := nt.params[2], n := nt.params[1]/nt.params[2], j:=Ind(m), fid := fId(n), fbase := fBase(m,j),
gath := Gath(fTensor(fbase, fid)), scat := Scat(fTensor(fid, fbase)),
c0 := [ISum(j, m, scat*gath)],
c1 := When(nt.params[3]=1, [], [I(nt.params[3])]),
c2 := When(nt.params[4]=1, [], [I(nt.params[4])]),
Tensor(Concat(c1,c0,c2))
),
#D isApplicable := P -> Length(P[5]) = 0,
#D rule := (P, C) -> let(m:=P[2], n:=P[1]/P[2], j:=Ind(n), fid := fId(m), fbase := fBase(n,j),
#D gath := Gath(fTensor(fbase, fid)), scat := Scat(fTensor(fid, fbase)),
#D C0 := [ISum(j, n, scat*gath)], C1:=When(P[3]=1, [], [I(P[3])]), C2:=When(P[4]=1, [], [I(P[4])]), Tensor(Concat(C1, C0, C2))),
switch := false
)
));
###################################################################
NewRulesFor(TICompose, rec(
TICompose_unroll := rec(
forTransposition := false,
applicable := nt -> true,
children := nt -> [[
TCompose(
List([0..nt.params[2]-1], i -> RulesStrengthReduce(SubstBottomUp(Copy(nt.params[3]), nt.params[1], e -> V(i))))
).withTags(nt.getTags())
]],
apply := (nt, c, cnt) -> c[1]
)
));
NewRulesFor(TDR, rec(
TDR_base := rec(
forTransposition := false,
applicable := nt -> true,
apply := (nt, c, cnt) -> DR(nt.params[1], nt.params[2])
#D isApplicable := True,
#D rule := (P, C) -> DR(P[1], P[2])
)
));
NewRulesFor(TGath, rec(
TGath_base := rec(
applicable := True,
apply := (t, C, nt) -> t.terminate()
)
));
NewRulesFor(TScat, rec(
TScat_base := rec(
applicable := True,
apply := (t, C, nt) -> t.terminate()
)
));
NewRulesFor(TConj, rec(
TConj_tag := rec(
applicable := True,
children := t -> [[ t.params[1].withTags(t.getTags()) ]],
apply := (t, C, nt) -> ConjLR(C[1], t.params[2], t.params[3])
),
TConj_perm := rec(
applicable := True,
_cvtPerm := (t,p, use_tl) -> Cond(
ObjId(p) = fId,
I(p.params[1]),
ObjId(p) = L and use_tl,
TL(p.params[1], p.params[2], 1, 1).withTags(t.getTags()),
# else
FormatPrm(p)
),
# one degree of freedom -- use TL (true) or use FormatPrm(L) (false)
freedoms := (self, t) >> [[ true, false ]],
child := (self, t, fr) >> [
self._cvtPerm(t, t.params[2], fr[1]),
t.params[1].withTags(t.getTags()),
self._cvtPerm(t, t.params[3], fr[1])
],
apply := (self, t, C, Nonterms) >> C[1]*C[2]*C[3]
),
TConj_cplx := rec(
applicable := t -> t.params[1] _is TRC,
_cvtPerm := (t, p) -> Cond(
ObjId(p) = fId,
I(p.params[1]),
ObjId(p) = L,
TL(p.params[1], p.params[2], 1, 1).withTags(t.getTags()),
# else
FormatPrm(p)
),
freedoms := (self, t) >> [],
child := (self, t, fr) >> [
self._cvtPerm(t, t.params[2]),
t.params[1].withTags(List(t.getTags(), t->Cond(t.kind()=spiral.paradigms.vector.AVecReg, spiral.paradigms.vector.AVecRegCx(t.isa.cplx()), t))),
self._cvtPerm(t, t.params[3])
],
apply := (self, t, C, Nonterms) >> C[1]*C[2]*C[3]
),
));
#########################################################################
NewRulesFor(TTensorInd, rec(
# base cases
# I x A
dsA_base := rec(
info := "IxA base",
forTransposition := false,
applicable := nt -> not nt.hasTags() and IsParPar(nt.params),
children := nt -> [[ nt.params[1], InfoNt(nt.params[2]) ]],
apply := (nt, c, cnt) -> IDirSum(cnt[2].params[1], c[1])
),
# A x I
L_dsA_L_base := rec(
info := "AxI base",
forTransposition := false,
applicable := nt -> not nt.hasTags() and IsVecVec(nt.params),
children := nt -> [[ nt.params[1], InfoNt(nt.params[2]) ]],
apply := (nt, c, cnt) ->
L(c[1].dims()[1] * nt.params[2].range, c[1].dims()[1]) *
IDirSum(cnt[2].params[1], c[1]) *
L(c[1].dims()[2] * nt.params[2].range, nt.params[2].range)
),
# (I x A)L
dsA_L_base := rec(
info := "(IxA)L base",
forTransposition := false,
applicable := nt -> not nt.hasTags() and IsParVec(nt.params),
children := nt -> [[ nt.params[1], InfoNt(nt.params[2]) ]],
apply := (nt, c, cnt) ->
IDirSum(cnt[2].params[1], c[1]) *
L(c[1].dims()[2] * nt.params[2].range, nt.params[2].range),
),
# L(I x A)
L_dsA_base := rec(
info := "L(IxA) base",
forTransposition := false,
applicable := nt -> not nt.hasTags() and IsVecPar(nt.params),
children := nt -> [[ nt.params[1], InfoNt(nt.params[2]) ]],
apply := (nt, c, cnt) ->
L(c[1].dims()[1] * nt.params[2].range, c[1].dims()[1]) *
IDirSum(cnt[2].params[1], c[1])
)
));
|
As Wilde 's work came to be read and performed again , it was The Importance of Being Earnest that received the most productions . By the time of its centenary the journalist Mark Lawson described it as " the second most known and quoted play in English after Hamlet . "
|
= = = = 2006 = = = =
|
How many different numbers can be formed?
73. A cylindrical overhead tank of radius 2 m and height 7 m is to be filled from an underground tank of size 5.5m x 4m x 6m. How much portion of the underground tank is still filled with water after filling the overhead tank completely?
75. A and B walk around a circular park. They start at 8 a.m. from the same point in the opposite directions. A and B walk at a speed of 2 rounds per hour and 3 rounds per hour respecely. How many times shall they cross each other after 8 00 a.m. and before 9.30. a.m.?
76. W can do 25% of a work-in 30 days, X can do 1/4 of the work in 10 days, Y can do 40% of the work in 40 days and Z can do 1/3 of the work in 13 days. Who will complete the work first?
77. The average monthly income of a person in a certain family of 5 is Rs. 10,000. What will be the average monthly income of a person in the same family if the income of one person increased by Rs. 1,20,000 per year?
78. In a race, a competitor has to collect 6 apples which are kept in a straight line On a track and a bucket is placed at the beginning of the track which is a starting point. The condition is that the competitor can pick only one apple at a time, run back with it and drop it in the bucket. If he has to drop all the apples in the bucket, how much total distance he has to run if the bucket is 5 meters from the first apple and all other apples are placed 3 meters apart ?
79. A round archery target of diameter 1 m is marked with four scoring regions from the centre outwards as red, blue, yellow and white. The radius of the red band is 0.20 m. The width of all the remaining bands is equal. If archers throw arrows towards the target, what is the probability, that the arrows fall in the red region of the archery target?
80. A person allows 10% discount for cash payment from the marked price of a toy and still he makes a 10% gain. What is the cost price of the toy which is marked Rs. 770? |
/-
Copyright (c) 2017 Johannes Hölzl. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Johannes Hölzl, Yury G. Kudryashov, Scott Morrison
-/
import algebra.big_operators.finsupp
import linear_algebra.finsupp
import algebra.non_unital_alg_hom
/-!
# Monoid algebras
When the domain of a `finsupp` has a multiplicative or additive structure, we can define
a convolution product. To mathematicians this structure is known as the "monoid algebra",
i.e. the finite formal linear combinations over a given semiring of elements of the monoid.
The "group ring" ℤ[G] or the "group algebra" k[G] are typical uses.
In fact the construction of the "monoid algebra" makes sense when `G` is not even a monoid, but
merely a magma, i.e., when `G` carries a multiplication which is not required to satisfy any
conditions at all. In this case the construction yields a not-necessarily-unital,
not-necessarily-associative algebra but it is still adjoint to the forgetful functor from such
algebras to magmas, and we prove this as `monoid_algebra.lift_magma`.
In this file we define `monoid_algebra k G := G →₀ k`, and `add_monoid_algebra k G`
in the same way, and then define the convolution product on these.
When the domain is additive, this is used to define polynomials:
```
polynomial α := add_monoid_algebra ℕ α
mv_polynomial σ α := add_monoid_algebra (σ →₀ ℕ) α
```
When the domain is multiplicative, e.g. a group, this will be used to define the group ring.
## Implementation note
Unfortunately because additive and multiplicative structures both appear in both cases,
it doesn't appear to be possible to make much use of `to_additive`, and we just settle for
saying everything twice.
Similarly, I attempted to just define
`add_monoid_algebra k G := monoid_algebra k (multiplicative G)`, but the definitional equality
`multiplicative G = G` leaks through everywhere, and seems impossible to use.
-/
noncomputable theory
open_locale classical big_operators
open finset finsupp
universes u₁ u₂ u₃
variables (k : Type u₁) (G : Type u₂)
/-! ### Multiplicative monoids -/
section
variables [semiring k]
/--
The monoid algebra over a semiring `k` generated by the monoid `G`.
It is the type of finite formal `k`-linear combinations of terms of `G`,
endowed with the convolution product.
-/
@[derive [inhabited, add_comm_monoid]]
def monoid_algebra : Type (max u₁ u₂) := G →₀ k
instance : has_coe_to_fun (monoid_algebra k G) (λ _, G → k) :=
finsupp.has_coe_to_fun
end
namespace monoid_algebra
variables {k G}
section has_mul
variables [semiring k] [has_mul G]
/-- The product of `f g : monoid_algebra k G` is the finitely supported function
whose value at `a` is the sum of `f x * g y` over all pairs `x, y`
such that `x * y = a`. (Think of the group ring of a group.) -/
instance : has_mul (monoid_algebra k G) :=
⟨λf g, f.sum $ λa₁ b₁, g.sum $ λa₂ b₂, single (a₁ * a₂) (b₁ * b₂)⟩
lemma mul_def {f g : monoid_algebra k G} :
f * g = (f.sum $ λa₁ b₁, g.sum $ λa₂ b₂, single (a₁ * a₂) (b₁ * b₂)) :=
rfl
instance : non_unital_non_assoc_semiring (monoid_algebra k G) :=
{ zero := 0,
mul := (*),
add := (+),
left_distrib := assume f g h, by simp only [mul_def, sum_add_index, mul_add, mul_zero,
single_zero, single_add, eq_self_iff_true, forall_true_iff, forall_3_true_iff, sum_add],
right_distrib := assume f g h, by simp only [mul_def, sum_add_index, add_mul, zero_mul,
single_zero, single_add, eq_self_iff_true, forall_true_iff, forall_3_true_iff, sum_zero,
sum_add],
zero_mul := assume f, by simp only [mul_def, sum_zero_index],
mul_zero := assume f, by simp only [mul_def, sum_zero_index, sum_zero],
.. finsupp.add_comm_monoid }
end has_mul
section semigroup
variables [semiring k] [semigroup G]
instance : non_unital_semiring (monoid_algebra k G) :=
{ zero := 0,
mul := (*),
add := (+),
mul_assoc := assume f g h, by simp only [mul_def, sum_sum_index, sum_zero_index, sum_add_index,
sum_single_index, single_zero, single_add, eq_self_iff_true, forall_true_iff, forall_3_true_iff,
add_mul, mul_add, add_assoc, mul_assoc, zero_mul, mul_zero, sum_zero, sum_add],
.. monoid_algebra.non_unital_non_assoc_semiring}
end semigroup
section has_one
variables [semiring k] [has_one G]
/-- The unit of the multiplication is `single 1 1`, i.e. the function
that is `1` at `1` and zero elsewhere. -/
instance : has_one (monoid_algebra k G) :=
⟨single 1 1⟩
lemma one_def : (1 : monoid_algebra k G) = single 1 1 :=
rfl
end has_one
section mul_one_class
variables [semiring k] [mul_one_class G]
instance : non_assoc_semiring (monoid_algebra k G) :=
{ one := 1,
mul := (*),
zero := 0,
add := (+),
one_mul := assume f, by simp only [mul_def, one_def, sum_single_index, zero_mul,
single_zero, sum_zero, zero_add, one_mul, sum_single],
mul_one := assume f, by simp only [mul_def, one_def, sum_single_index, mul_zero,
single_zero, sum_zero, add_zero, mul_one, sum_single],
..monoid_algebra.non_unital_non_assoc_semiring }
variables {R : Type*} [semiring R]
/-- A non-commutative version of `monoid_algebra.lift`: given a additive homomorphism `f : k →+ R`
and a multiplicative monoid homomorphism `g : G →* R`, returns the additive homomorphism from
`monoid_algebra k G` such that `lift_nc f g (single a b) = f b * g a`. If `f` is a ring homomorphism
and the range of either `f` or `g` is in center of `R`, then the result is a ring homomorphism. If
`R` is a `k`-algebra and `f = algebra_map k R`, then the result is an algebra homomorphism called
`monoid_algebra.lift`. -/
def lift_nc (f : k →+ R) (g : G →* R) : monoid_algebra k G →+ R :=
lift_add_hom (λ x : G, (add_monoid_hom.mul_right (g x)).comp f)
@[simp] lemma lift_nc_single (f : k →+ R) (g : G →* R) (a : G) (b : k) :
lift_nc f g (single a b) = f b * g a :=
lift_add_hom_apply_single _ _ _
@[simp] lemma lift_nc_one (f : k →+* R) (g : G →* R) : lift_nc (f : k →+ R) g 1 = 1 :=
by simp [one_def]
lemma lift_nc_mul (f : k →+* R) (g : G →* R)
(a b : monoid_algebra k G) (h_comm : ∀ {x y}, y ∈ a.support → commute (f (b x)) (g y)) :
lift_nc (f : k →+ R) g (a * b) = lift_nc (f : k →+ R) g a * lift_nc (f : k →+ R) g b :=
begin
conv_rhs { rw [← sum_single a, ← sum_single b] },
simp_rw [mul_def, (lift_nc _ g).map_finsupp_sum, lift_nc_single, finsupp.sum_mul,
finsupp.mul_sum],
refine finset.sum_congr rfl (λ y hy, finset.sum_congr rfl (λ x hx, _)),
simp [mul_assoc, (h_comm hy).left_comm]
end
end mul_one_class
/-! #### Semiring structure -/
section semiring
variables [semiring k] [monoid G]
instance : semiring (monoid_algebra k G) :=
{ one := 1,
mul := (*),
zero := 0,
add := (+),
.. monoid_algebra.non_unital_semiring,
.. monoid_algebra.non_assoc_semiring }
variables {R : Type*} [semiring R]
/-- `lift_nc` as a `ring_hom`, for when `f x` and `g y` commute -/
def lift_nc_ring_hom (f : k →+* R) (g : G →* R) (h_comm : ∀ x y, commute (f x) (g y)) :
monoid_algebra k G →+* R :=
{ to_fun := lift_nc (f : k →+ R) g,
map_one' := lift_nc_one _ _,
map_mul' := λ a b, lift_nc_mul _ _ _ _ $ λ _ _ _, h_comm _ _,
..(lift_nc (f : k →+ R) g)}
end semiring
instance [comm_semiring k] [comm_monoid G] : comm_semiring (monoid_algebra k G) :=
{ mul_comm := assume f g,
begin
simp only [mul_def, finsupp.sum, mul_comm],
rw [finset.sum_comm],
simp only [mul_comm]
end,
.. monoid_algebra.semiring }
instance [semiring k] [nontrivial k] [nonempty G]: nontrivial (monoid_algebra k G) :=
finsupp.nontrivial
/-! #### Derived instances -/
section derived_instances
instance [semiring k] [subsingleton k] : unique (monoid_algebra k G) :=
finsupp.unique_of_right
instance [ring k] : add_group (monoid_algebra k G) :=
finsupp.add_group
instance [ring k] [monoid G] : ring (monoid_algebra k G) :=
{ neg := has_neg.neg,
add_left_neg := add_left_neg,
.. monoid_algebra.semiring }
instance [comm_ring k] [comm_monoid G] : comm_ring (monoid_algebra k G) :=
{ mul_comm := mul_comm, .. monoid_algebra.ring}
variables {R S : Type*}
instance [monoid R] [semiring k] [distrib_mul_action R k] :
has_scalar R (monoid_algebra k G) :=
finsupp.has_scalar
instance [monoid R] [semiring k] [distrib_mul_action R k] :
distrib_mul_action R (monoid_algebra k G) :=
finsupp.distrib_mul_action G k
instance [semiring R] [semiring k] [module R k] :
module R (monoid_algebra k G) :=
finsupp.module G k
instance [monoid R] [semiring k] [distrib_mul_action R k] [has_faithful_scalar R k] [nonempty G] :
has_faithful_scalar R (monoid_algebra k G) :=
finsupp.has_faithful_scalar
instance [monoid R] [monoid S] [semiring k] [distrib_mul_action R k] [distrib_mul_action S k]
[has_scalar R S] [is_scalar_tower R S k] :
is_scalar_tower R S (monoid_algebra k G) :=
finsupp.is_scalar_tower G k
instance [monoid R] [monoid S] [semiring k] [distrib_mul_action R k] [distrib_mul_action S k]
[smul_comm_class R S k] :
smul_comm_class R S (monoid_algebra k G) :=
finsupp.smul_comm_class G k
instance comap_distrib_mul_action_self [group G] [semiring k] :
distrib_mul_action G (monoid_algebra k G) :=
finsupp.comap_distrib_mul_action_self
end derived_instances
section misc_theorems
variables [semiring k]
local attribute [reducible] monoid_algebra
lemma mul_apply [has_mul G] (f g : monoid_algebra k G) (x : G) :
(f * g) x = (f.sum $ λa₁ b₁, g.sum $ λa₂ b₂, if a₁ * a₂ = x then b₁ * b₂ else 0) :=
begin
rw [mul_def],
simp only [finsupp.sum_apply, single_apply],
end
lemma mul_apply_antidiagonal [has_mul G] (f g : monoid_algebra k G) (x : G) (s : finset (G × G))
(hs : ∀ {p : G × G}, p ∈ s ↔ p.1 * p.2 = x) :
(f * g) x = ∑ p in s, (f p.1 * g p.2) :=
let F : G × G → k := λ p, if p.1 * p.2 = x then f p.1 * g p.2 else 0 in
calc (f * g) x = (∑ a₁ in f.support, ∑ a₂ in g.support, F (a₁, a₂)) :
mul_apply f g x
... = ∑ p in f.support.product g.support, F p : finset.sum_product.symm
... = ∑ p in (f.support.product g.support).filter (λ p : G × G, p.1 * p.2 = x), f p.1 * g p.2 :
(finset.sum_filter _ _).symm
... = ∑ p in s.filter (λ p : G × G, p.1 ∈ f.support ∧ p.2 ∈ g.support), f p.1 * g p.2 :
sum_congr (by { ext, simp only [mem_filter, mem_product, hs, and_comm] }) (λ _ _, rfl)
... = ∑ p in s, f p.1 * g p.2 : sum_subset (filter_subset _ _) $ λ p hps hp,
begin
simp only [mem_filter, mem_support_iff, not_and, not_not] at hp ⊢,
by_cases h1 : f p.1 = 0,
{ rw [h1, zero_mul] },
{ rw [hp hps h1, mul_zero] }
end
lemma support_mul [has_mul G] (a b : monoid_algebra k G) :
(a * b).support ⊆ a.support.bUnion (λa₁, b.support.bUnion $ λa₂, {a₁ * a₂}) :=
subset.trans support_sum $ bUnion_mono $ assume a₁ _,
subset.trans support_sum $ bUnion_mono $ assume a₂ _, support_single_subset
@[simp] lemma single_mul_single [has_mul G] {a₁ a₂ : G} {b₁ b₂ : k} :
(single a₁ b₁ : monoid_algebra k G) * single a₂ b₂ = single (a₁ * a₂) (b₁ * b₂) :=
(sum_single_index (by simp only [zero_mul, single_zero, sum_zero])).trans
(sum_single_index (by rw [mul_zero, single_zero]))
@[simp] lemma single_pow [monoid G] {a : G} {b : k} :
∀ n : ℕ, (single a b : monoid_algebra k G)^n = single (a^n) (b ^ n)
| 0 := by { simp only [pow_zero], refl }
| (n+1) := by simp only [pow_succ, single_pow n, single_mul_single]
section
/-- Like `finsupp.map_domain_add`, but for the convolutive multiplication we define in this file -/
lemma map_domain_mul {α : Type*} {β : Type*} {α₂ : Type*} [semiring β] [has_mul α] [has_mul α₂]
{x y : monoid_algebra β α} (f : mul_hom α α₂) :
(map_domain f (x * y : monoid_algebra β α) : monoid_algebra β α₂) =
(map_domain f x * map_domain f y : monoid_algebra β α₂) :=
begin
simp_rw [mul_def, map_domain_sum, map_domain_single, f.map_mul],
rw finsupp.sum_map_domain_index,
{ congr,
ext a b,
rw finsupp.sum_map_domain_index,
{ simp },
{ simp [mul_add] } },
{ simp },
{ simp [add_mul] }
end
variables (k G)
/-- The embedding of a magma into its magma algebra. -/
@[simps] def of_magma [has_mul G] : mul_hom G (monoid_algebra k G) :=
{ to_fun := λ a, single a 1,
map_mul' := λ a b, by simp only [mul_def, mul_one, sum_single_index, single_eq_zero, mul_zero], }
/-- The embedding of a unital magma into its magma algebra. -/
@[simps] def of [mul_one_class G] : G →* monoid_algebra k G :=
{ to_fun := λ a, single a 1,
map_one' := rfl,
.. of_magma k G }
end
lemma of_injective [mul_one_class G] [nontrivial k] : function.injective (of k G) :=
λ a b h, by simpa using (single_eq_single_iff _ _ _ _).mp h
lemma mul_single_apply_aux [has_mul G] (f : monoid_algebra k G) {r : k}
{x y z : G} (H : ∀ a, a * x = z ↔ a = y) :
(f * single x r) z = f y * r :=
have A : ∀ a₁ b₁, (single x r).sum (λ a₂ b₂, ite (a₁ * a₂ = z) (b₁ * b₂) 0) =
ite (a₁ * x = z) (b₁ * r) 0,
from λ a₁ b₁, sum_single_index $ by simp,
calc (f * single x r) z = sum f (λ a b, if (a = y) then (b * r) else 0) :
-- different `decidable` instances make it not trivial
by { simp only [mul_apply, A, H], congr, funext, split_ifs; refl }
... = if y ∈ f.support then f y * r else 0 : f.support.sum_ite_eq' _ _
... = f y * r : by split_ifs with h; simp at h; simp [h]
lemma mul_single_one_apply [mul_one_class G] (f : monoid_algebra k G) (r : k) (x : G) :
(f * single 1 r) x = f x * r :=
f.mul_single_apply_aux $ λ a, by rw [mul_one]
lemma support_mul_single [right_cancel_semigroup G]
(f : monoid_algebra k G) (r : k) (hr : ∀ y, y * r = 0 ↔ y = 0) (x : G) :
(f * single x r).support = f.support.map (mul_right_embedding x) :=
begin
ext y, simp only [mem_support_iff, mem_map, exists_prop, mul_right_embedding_apply],
by_cases H : ∃ a, a * x = y,
{ rcases H with ⟨a, rfl⟩,
rw [mul_single_apply_aux f (λ _, mul_left_inj x)],
simp [hr] },
{ push_neg at H,
simp [mul_apply, H] }
end
lemma single_mul_apply_aux [has_mul G] (f : monoid_algebra k G) {r : k} {x y z : G}
(H : ∀ a, x * a = y ↔ a = z) :
(single x r * f) y = r * f z :=
have f.sum (λ a b, ite (x * a = y) (0 * b) 0) = 0, by simp,
calc (single x r * f) y = sum f (λ a b, ite (x * a = y) (r * b) 0) :
(mul_apply _ _ _).trans $ sum_single_index this
... = f.sum (λ a b, ite (a = z) (r * b) 0) :
by { simp only [H], congr' with g s, split_ifs; refl }
... = if z ∈ f.support then (r * f z) else 0 : f.support.sum_ite_eq' _ _
... = _ : by split_ifs with h; simp at h; simp [h]
lemma single_one_mul_apply [mul_one_class G] (f : monoid_algebra k G) (r : k) (x : G) :
(single 1 r * f) x = r * f x :=
f.single_mul_apply_aux $ λ a, by rw [one_mul]
lemma support_single_mul [left_cancel_semigroup G]
(f : monoid_algebra k G) (r : k) (hr : ∀ y, r * y = 0 ↔ y = 0) (x : G) :
(single x r * f).support = f.support.map (mul_left_embedding x) :=
begin
ext y, simp only [mem_support_iff, mem_map, exists_prop, mul_left_embedding_apply],
by_cases H : ∃ a, x * a = y,
{ rcases H with ⟨a, rfl⟩,
rw [single_mul_apply_aux f (λ _, mul_right_inj x)],
simp [hr] },
{ push_neg at H,
simp [mul_apply, H] }
end
lemma lift_nc_smul [mul_one_class G] {R : Type*} [semiring R] (f : k →+* R) (g : G →* R) (c : k)
(φ : monoid_algebra k G) :
lift_nc (f : k →+ R) g (c • φ) = f c * lift_nc (f : k →+ R) g φ :=
begin
suffices : (lift_nc ↑f g).comp (smul_add_hom k (monoid_algebra k G) c) =
(add_monoid_hom.mul_left (f c)).comp (lift_nc ↑f g),
from add_monoid_hom.congr_fun this φ,
ext a b, simp [mul_assoc]
end
end misc_theorems
/-! #### Non-unital, non-associative algebra structure -/
section non_unital_non_assoc_algebra
variables {R : Type*} (k) [semiring R] [semiring k] [distrib_mul_action R k] [has_mul G]
instance is_scalar_tower_self [is_scalar_tower R k k] :
is_scalar_tower R (monoid_algebra k G) (monoid_algebra k G) :=
⟨λ t a b,
begin
ext m,
simp only [mul_apply, finsupp.smul_sum, smul_ite, smul_mul_assoc, sum_smul_index', zero_mul,
if_t_t, implies_true_iff, eq_self_iff_true, sum_zero, coe_smul, smul_eq_mul, pi.smul_apply,
smul_zero],
end⟩
/-- Note that if `k` is a `comm_semiring` then we have `smul_comm_class k k k` and so we can take
`R = k` in the below. In other words, if the coefficients are commutative amongst themselves, they
also commute with the algebra multiplication. -/
instance smul_comm_class_self [smul_comm_class R k k] :
smul_comm_class R (monoid_algebra k G) (monoid_algebra k G) :=
⟨λ t a b,
begin
ext m,
simp only [mul_apply, finsupp.sum, finset.smul_sum, smul_ite, mul_smul_comm, sum_smul_index',
implies_true_iff, eq_self_iff_true, coe_smul, ite_eq_right_iff, smul_eq_mul, pi.smul_apply,
mul_zero, smul_zero],
end⟩
instance smul_comm_class_symm_self [smul_comm_class k R k] :
smul_comm_class (monoid_algebra k G) R (monoid_algebra k G) :=
⟨λ t a b, by { haveI := smul_comm_class.symm k R k, rw ← smul_comm, } ⟩
variables {A : Type u₃} [non_unital_non_assoc_semiring A]
/-- A non_unital `k`-algebra homomorphism from `monoid_algebra k G` is uniquely defined by its
values on the functions `single a 1`. -/
lemma non_unital_alg_hom_ext [distrib_mul_action k A]
{φ₁ φ₂ : non_unital_alg_hom k (monoid_algebra k G) A}
(h : ∀ x, φ₁ (single x 1) = φ₂ (single x 1)) : φ₁ = φ₂ :=
non_unital_alg_hom.to_distrib_mul_action_hom_injective $
finsupp.distrib_mul_action_hom_ext' $
λ a, distrib_mul_action_hom.ext_ring (h a)
/-- See note [partially-applied ext lemmas]. -/
@[ext] lemma non_unital_alg_hom_ext' [distrib_mul_action k A]
{φ₁ φ₂ : non_unital_alg_hom k (monoid_algebra k G) A}
(h : φ₁.to_mul_hom.comp (of_magma k G) = φ₂.to_mul_hom.comp (of_magma k G)) : φ₁ = φ₂ :=
non_unital_alg_hom_ext k $ mul_hom.congr_fun h
/-- The functor `G ↦ monoid_algebra k G`, from the category of magmas to the category of non-unital,
non-associative algebras over `k` is adjoint to the forgetful functor in the other direction. -/
@[simps] def lift_magma [module k A] [is_scalar_tower k A A] [smul_comm_class k A A] :
mul_hom G A ≃ non_unital_alg_hom k (monoid_algebra k G) A :=
{ to_fun := λ f,
{ to_fun := λ a, a.sum (λ m t, t • f m),
map_smul' := λ t' a,
begin
rw [finsupp.smul_sum, sum_smul_index'],
{ simp_rw smul_assoc, },
{ intros m, exact zero_smul k (f m), },
end,
map_mul' := λ a₁ a₂,
begin
let g : G → k → A := λ m t, t • f m,
have h₁ : ∀ m, g m 0 = 0, { intros, exact zero_smul k (f m), },
have h₂ : ∀ m (t₁ t₂ : k), g m (t₁ + t₂) = g m t₁ + g m t₂, { intros, rw ← add_smul, },
simp_rw [finsupp.mul_sum, finsupp.sum_mul, smul_mul_smul, ← f.map_mul, mul_def,
sum_comm a₂ a₁, sum_sum_index h₁ h₂, sum_single_index (h₁ _)],
end,
.. lift_add_hom (λ x, (smul_add_hom k A).flip (f x)) },
inv_fun := λ F, F.to_mul_hom.comp (of_magma k G),
left_inv := λ f, by { ext m, simp only [non_unital_alg_hom.coe_mk, of_magma_apply,
non_unital_alg_hom.to_mul_hom_eq_coe, sum_single_index, function.comp_app, one_smul, zero_smul,
mul_hom.coe_comp, non_unital_alg_hom.coe_to_mul_hom], },
right_inv := λ F, by { ext m, simp only [non_unital_alg_hom.coe_mk, of_magma_apply,
non_unital_alg_hom.to_mul_hom_eq_coe, sum_single_index, function.comp_app, one_smul, zero_smul,
mul_hom.coe_comp, non_unital_alg_hom.coe_to_mul_hom], }, }
end non_unital_non_assoc_algebra
/-! #### Algebra structure -/
section algebra
local attribute [reducible] monoid_algebra
lemma single_one_comm [comm_semiring k] [mul_one_class G] (r : k) (f : monoid_algebra k G) :
single 1 r * f = f * single 1 r :=
by { ext, rw [single_one_mul_apply, mul_single_one_apply, mul_comm] }
/-- `finsupp.single 1` as a `ring_hom` -/
@[simps] def single_one_ring_hom [semiring k] [monoid G] : k →+* monoid_algebra k G :=
{ map_one' := rfl,
map_mul' := λ x y, by rw [single_add_hom, single_mul_single, one_mul],
..finsupp.single_add_hom 1}
/-- If two ring homomorphisms from `monoid_algebra k G` are equal on all `single a 1`
and `single 1 b`, then they are equal. -/
lemma ring_hom_ext {R} [semiring k] [monoid G] [semiring R]
{f g : monoid_algebra k G →+* R} (h₁ : ∀ b, f (single 1 b) = g (single 1 b))
(h_of : ∀ a, f (single a 1) = g (single a 1)) : f = g :=
ring_hom.coe_add_monoid_hom_injective $ add_hom_ext $ λ a b,
by rw [← one_mul a, ← mul_one b, ← single_mul_single, f.coe_add_monoid_hom,
g.coe_add_monoid_hom, f.map_mul, g.map_mul, h₁, h_of]
/-- If two ring homomorphisms from `monoid_algebra k G` are equal on all `single a 1`
and `single 1 b`, then they are equal.
See note [partially-applied ext lemmas]. -/
@[ext] lemma ring_hom_ext' {R} [semiring k] [monoid G] [semiring R]
{f g : monoid_algebra k G →+* R} (h₁ : f.comp single_one_ring_hom = g.comp single_one_ring_hom)
(h_of : (f : monoid_algebra k G →* R).comp (of k G) =
(g : monoid_algebra k G →* R).comp (of k G)) :
f = g :=
ring_hom_ext (ring_hom.congr_fun h₁) (monoid_hom.congr_fun h_of)
/--
The instance `algebra k (monoid_algebra A G)` whenever we have `algebra k A`.
In particular this provides the instance `algebra k (monoid_algebra k G)`.
-/
instance {A : Type*} [comm_semiring k] [semiring A] [algebra k A] [monoid G] :
algebra k (monoid_algebra A G) :=
{ smul_def' := λ r a, by { ext, simp [single_one_mul_apply, algebra.smul_def, pi.smul_apply], },
commutes' := λ r f, by { ext, simp [single_one_mul_apply, mul_single_one_apply,
algebra.commutes], },
..single_one_ring_hom.comp (algebra_map k A) }
/-- `finsupp.single 1` as a `alg_hom` -/
@[simps]
def single_one_alg_hom {A : Type*} [comm_semiring k] [semiring A] [algebra k A] [monoid G] :
A →ₐ[k] monoid_algebra A G :=
{ commutes' := λ r, by { ext, simp, refl, }, ..single_one_ring_hom}
@[simp] lemma coe_algebra_map {A : Type*} [comm_semiring k] [semiring A] [algebra k A] [monoid G] :
⇑(algebra_map k (monoid_algebra A G)) = single 1 ∘ (algebra_map k A) :=
rfl
lemma single_eq_algebra_map_mul_of [comm_semiring k] [monoid G] (a : G) (b : k) :
single a b = algebra_map k (monoid_algebra k G) b * of k G a :=
by simp
lemma single_algebra_map_eq_algebra_map_mul_of {A : Type*} [comm_semiring k] [semiring A]
[algebra k A] [monoid G] (a : G) (b : k) :
single a (algebra_map k A b) = algebra_map k (monoid_algebra A G) b * of A G a :=
by simp
lemma induction_on [semiring k] [monoid G] {p : monoid_algebra k G → Prop} (f : monoid_algebra k G)
(hM : ∀ g, p (of k G g)) (hadd : ∀ f g : monoid_algebra k G, p f → p g → p (f + g))
(hsmul : ∀ (r : k) f, p f → p (r • f)) : p f :=
begin
refine finsupp.induction_linear f _ (λ f g hf hg, hadd f g hf hg) (λ g r, _),
{ simpa using hsmul 0 (of k G 1) (hM 1) },
{ convert hsmul r (of k G g) (hM g),
simp only [mul_one, smul_single', of_apply] },
end
end algebra
section lift
variables {k G} [comm_semiring k] [monoid G]
variables {A : Type u₃} [semiring A] [algebra k A] {B : Type*} [semiring B] [algebra k B]
/-- `lift_nc_ring_hom` as a `alg_hom`, for when `f` is an `alg_hom` -/
def lift_nc_alg_hom (f : A →ₐ[k] B) (g : G →* B) (h_comm : ∀ x y, commute (f x) (g y)) :
monoid_algebra A G →ₐ[k] B :=
{ to_fun := lift_nc_ring_hom (f : A →+* B) g h_comm,
commutes' := by simp [lift_nc_ring_hom],
..(lift_nc_ring_hom (f : A →+* B) g h_comm)}
/-- A `k`-algebra homomorphism from `monoid_algebra k G` is uniquely defined by its
values on the functions `single a 1`. -/
lemma alg_hom_ext ⦃φ₁ φ₂ : monoid_algebra k G →ₐ[k] A⦄
(h : ∀ x, φ₁ (single x 1) = φ₂ (single x 1)) : φ₁ = φ₂ :=
alg_hom.to_linear_map_injective $ finsupp.lhom_ext' $ λ a, linear_map.ext_ring (h a)
/-- See note [partially-applied ext lemmas]. -/
@[ext] lemma alg_hom_ext' ⦃φ₁ φ₂ : monoid_algebra k G →ₐ[k] A⦄
(h : (φ₁ : monoid_algebra k G →* A).comp (of k G) =
(φ₂ : monoid_algebra k G →* A).comp (of k G)) : φ₁ = φ₂ :=
alg_hom_ext $ monoid_hom.congr_fun h
variables (k G A)
/-- Any monoid homomorphism `G →* A` can be lifted to an algebra homomorphism
`monoid_algebra k G →ₐ[k] A`. -/
def lift : (G →* A) ≃ (monoid_algebra k G →ₐ[k] A) :=
{ inv_fun := λ f, (f : monoid_algebra k G →* A).comp (of k G),
to_fun := λ F, lift_nc_alg_hom (algebra.of_id k A) F $ λ _ _, algebra.commutes _ _,
left_inv := λ f, by { ext, simp [lift_nc_alg_hom, lift_nc_ring_hom] },
right_inv := λ F, by { ext, simp [lift_nc_alg_hom, lift_nc_ring_hom] } }
variables {k G A}
lemma lift_apply' (F : G →* A) (f : monoid_algebra k G) :
lift k G A F f = f.sum (λ a b, (algebra_map k A b) * F a) := rfl
lemma lift_apply (F : G →* A) (f : monoid_algebra k G) :
lift k G A F f = f.sum (λ a b, b • F a) :=
by simp only [lift_apply', algebra.smul_def]
lemma lift_def (F : G →* A) :
⇑(lift k G A F) = lift_nc ((algebra_map k A : k →+* A) : k →+ A) F :=
rfl
@[simp] lemma lift_symm_apply (F : monoid_algebra k G →ₐ[k] A) (x : G) :
(lift k G A).symm F x = F (single x 1) := rfl
lemma lift_of (F : G →* A) (x) :
lift k G A F (of k G x) = F x :=
by rw [of_apply, ← lift_symm_apply, equiv.symm_apply_apply]
@[simp] lemma lift_single (F : G →* A) (a b) :
lift k G A F (single a b) = b • F a :=
by rw [lift_def, lift_nc_single, algebra.smul_def, ring_hom.coe_add_monoid_hom]
/-- Decomposition of a `k`-algebra homomorphism from `monoid_algebra k G` by
its values on `F (single a 1)`. -/
lemma lift_unique (F : monoid_algebra k G →ₐ[k] A) (f : monoid_algebra k G) :
F f = f.sum (λ a b, b • F (single a 1)) :=
by conv_lhs { rw lift_unique' F, simp [lift_apply] }
end lift
section
local attribute [reducible] monoid_algebra
variables (k)
/-- When `V` is a `k[G]`-module, multiplication by a group element `g` is a `k`-linear map. -/
def group_smul.linear_map [monoid G] [comm_semiring k]
(V : Type u₃) [add_comm_monoid V] [module k V] [module (monoid_algebra k G) V]
[is_scalar_tower k (monoid_algebra k G) V] (g : G) :
V →ₗ[k] V :=
{ to_fun := λ v, (single g (1 : k) • v : V),
map_add' := λ x y, smul_add (single g (1 : k)) x y,
map_smul' := λ c x, smul_algebra_smul_comm _ _ _ }
@[simp]
lemma group_smul.linear_map_apply [monoid G] [comm_semiring k]
(V : Type u₃) [add_comm_monoid V] [module k V] [module (monoid_algebra k G) V]
[is_scalar_tower k (monoid_algebra k G) V] (g : G) (v : V) :
(group_smul.linear_map k V g) v = (single g (1 : k) • v : V) :=
rfl
section
variables {k}
variables [monoid G] [comm_semiring k] {V W : Type u₃}
[add_comm_monoid V] [module k V] [module (monoid_algebra k G) V]
[is_scalar_tower k (monoid_algebra k G) V]
[add_comm_monoid W] [module k W] [module (monoid_algebra k G) W]
[is_scalar_tower k (monoid_algebra k G) W]
(f : V →ₗ[k] W)
(h : ∀ (g : G) (v : V), f (single g (1 : k) • v : V) = (single g (1 : k) • (f v) : W))
include h
/-- Build a `k[G]`-linear map from a `k`-linear map and evidence that it is `G`-equivariant. -/
def equivariant_of_linear_of_comm : V →ₗ[monoid_algebra k G] W :=
{ to_fun := f,
map_add' := λ v v', by simp,
map_smul' := λ c v,
begin
apply finsupp.induction c,
{ simp, },
{ intros g r c' nm nz w,
dsimp at *,
simp only [add_smul, f.map_add, w, add_left_inj, single_eq_algebra_map_mul_of, ← smul_smul],
erw [algebra_map_smul (monoid_algebra k G) r, algebra_map_smul (monoid_algebra k G) r,
f.map_smul, h g v, of_apply],
all_goals { apply_instance } }
end, }
@[simp]
lemma equivariant_of_linear_of_comm_apply (v : V) : (equivariant_of_linear_of_comm f h) v = f v :=
rfl
end
end
section
universe ui
variable {ι : Type ui}
local attribute [reducible] monoid_algebra
lemma prod_single [comm_semiring k] [comm_monoid G]
{s : finset ι} {a : ι → G} {b : ι → k} :
(∏ i in s, single (a i) (b i)) = single (∏ i in s, a i) (∏ i in s, b i) :=
finset.induction_on s rfl $ λ a s has ih, by rw [prod_insert has, ih,
single_mul_single, prod_insert has, prod_insert has]
end
section -- We now prove some additional statements that hold for group algebras.
variables [semiring k] [group G]
local attribute [reducible] monoid_algebra
@[simp]
lemma mul_single_apply (f : monoid_algebra k G) (r : k) (x y : G) :
(f * single x r) y = f (y * x⁻¹) * r :=
f.mul_single_apply_aux $ λ a, eq_mul_inv_iff_mul_eq.symm
@[simp]
lemma single_mul_apply (r : k) (x : G) (f : monoid_algebra k G) (y : G) :
(single x r * f) y = r * f (x⁻¹ * y) :=
f.single_mul_apply_aux $ λ z, eq_inv_mul_iff_mul_eq.symm
lemma mul_apply_left (f g : monoid_algebra k G) (x : G) :
(f * g) x = (f.sum $ λ a b, b * (g (a⁻¹ * x))) :=
calc (f * g) x = sum f (λ a b, (single a b * g) x) :
by rw [← finsupp.sum_apply, ← finsupp.sum_mul, f.sum_single]
... = _ : by simp only [single_mul_apply, finsupp.sum]
-- If we'd assumed `comm_semiring`, we could deduce this from `mul_apply_left`.
lemma mul_apply_right (f g : monoid_algebra k G) (x : G) :
(f * g) x = (g.sum $ λa b, (f (x * a⁻¹)) * b) :=
calc (f * g) x = sum g (λ a b, (f * single a b) x) :
by rw [← finsupp.sum_apply, ← finsupp.mul_sum, g.sum_single]
... = _ : by simp only [mul_single_apply, finsupp.sum]
end
section span
variables [semiring k] [mul_one_class G]
/-- An element of `monoid_algebra R M` is in the subalgebra generated by its support. -/
lemma mem_span_support (f : monoid_algebra k G) :
f ∈ submodule.span k (of k G '' (f.support : set G)) :=
by rw [of, monoid_hom.coe_mk, ← finsupp.supported_eq_span_single, finsupp.mem_supported]
end span
section opposite
open finsupp mul_opposite
variables [semiring k]
/-- The opposite of an `monoid_algebra R I` equivalent as a ring to
the `monoid_algebra Rᵐᵒᵖ Iᵐᵒᵖ` over the opposite ring, taking elements to their opposite. -/
@[simps {simp_rhs := tt}] protected noncomputable def op_ring_equiv [monoid G] :
(monoid_algebra k G)ᵐᵒᵖ ≃+* monoid_algebra kᵐᵒᵖ Gᵐᵒᵖ :=
{ map_mul' := begin
dsimp only [add_equiv.to_fun_eq_coe, ←add_equiv.coe_to_add_monoid_hom],
rw add_monoid_hom.map_mul_iff,
ext i₁ r₁ i₂ r₂ : 6,
simp
end,
..op_add_equiv.symm.trans $ (finsupp.map_range.add_equiv (op_add_equiv : k ≃+ kᵐᵒᵖ)).trans $
finsupp.dom_congr op_equiv }
@[simp] lemma op_ring_equiv_single [monoid G] (r : k) (x : G) :
monoid_algebra.op_ring_equiv (op (single x r)) = single (op x) (op r) :=
by simp
@[simp] lemma op_ring_equiv_symm_single [monoid G] (r : kᵐᵒᵖ) (x : Gᵐᵒᵖ) :
monoid_algebra.op_ring_equiv.symm (single x r) = op (single x.unop r.unop) :=
by simp
end opposite
end monoid_algebra
/-! ### Additive monoids -/
section
variables [semiring k]
/--
The monoid algebra over a semiring `k` generated by the additive monoid `G`.
It is the type of finite formal `k`-linear combinations of terms of `G`,
endowed with the convolution product.
-/
@[derive [inhabited, add_comm_monoid]]
def add_monoid_algebra := G →₀ k
instance : has_coe_to_fun (add_monoid_algebra k G) (λ _, G → k) :=
finsupp.has_coe_to_fun
end
namespace add_monoid_algebra
variables {k G}
section has_mul
variables [semiring k] [has_add G]
/-- The product of `f g : add_monoid_algebra k G` is the finitely supported function
whose value at `a` is the sum of `f x * g y` over all pairs `x, y`
such that `x + y = a`. (Think of the product of multivariate
polynomials where `α` is the additive monoid of monomial exponents.) -/
instance : has_mul (add_monoid_algebra k G) :=
⟨λf g, f.sum $ λa₁ b₁, g.sum $ λa₂ b₂, single (a₁ + a₂) (b₁ * b₂)⟩
lemma mul_def {f g : add_monoid_algebra k G} :
f * g = (f.sum $ λa₁ b₁, g.sum $ λa₂ b₂, single (a₁ + a₂) (b₁ * b₂)) :=
rfl
instance : non_unital_non_assoc_semiring (add_monoid_algebra k G) :=
{ zero := 0,
mul := (*),
add := (+),
left_distrib := assume f g h, by simp only [mul_def, sum_add_index, mul_add, mul_zero,
single_zero, single_add, eq_self_iff_true, forall_true_iff, forall_3_true_iff, sum_add],
right_distrib := assume f g h, by simp only [mul_def, sum_add_index, add_mul, mul_zero, zero_mul,
single_zero, single_add, eq_self_iff_true, forall_true_iff, forall_3_true_iff, sum_zero,
sum_add],
zero_mul := assume f, by simp only [mul_def, sum_zero_index],
mul_zero := assume f, by simp only [mul_def, sum_zero_index, sum_zero],
nsmul := λ n f, n • f,
nsmul_zero' := by { intros, ext, simp [-nsmul_eq_mul, add_smul] },
nsmul_succ' := by { intros, ext, simp [-nsmul_eq_mul, nat.succ_eq_one_add, add_smul] },
.. finsupp.add_comm_monoid }
end has_mul
section has_one
variables [semiring k] [has_zero G]
/-- The unit of the multiplication is `single 1 1`, i.e. the function
that is `1` at `0` and zero elsewhere. -/
instance : has_one (add_monoid_algebra k G) :=
⟨single 0 1⟩
lemma one_def : (1 : add_monoid_algebra k G) = single 0 1 :=
rfl
end has_one
section semigroup
variables [semiring k] [add_semigroup G]
instance : non_unital_semiring (add_monoid_algebra k G) :=
{ zero := 0,
mul := (*),
add := (+),
mul_assoc := assume f g h, by simp only [mul_def, sum_sum_index, sum_zero_index, sum_add_index,
sum_single_index, single_zero, single_add, eq_self_iff_true, forall_true_iff, forall_3_true_iff,
add_mul, mul_add, add_assoc, mul_assoc, zero_mul, mul_zero, sum_zero, sum_add],
.. add_monoid_algebra.non_unital_non_assoc_semiring }
end semigroup
section mul_one_class
variables [semiring k] [add_zero_class G]
instance : non_assoc_semiring (add_monoid_algebra k G) :=
{ one := 1,
mul := (*),
zero := 0,
add := (+),
one_mul := assume f, by simp only [mul_def, one_def, sum_single_index, zero_mul,
single_zero, sum_zero, zero_add, one_mul, sum_single],
mul_one := assume f, by simp only [mul_def, one_def, sum_single_index, mul_zero,
single_zero, sum_zero, add_zero, mul_one, sum_single],
.. add_monoid_algebra.non_unital_non_assoc_semiring }
variables {R : Type*} [semiring R]
/-- A non-commutative version of `add_monoid_algebra.lift`: given a additive homomorphism `f : k →+
R` and a multiplicative monoid homomorphism `g : multiplicative G →* R`, returns the additive
homomorphism from `add_monoid_algebra k G` such that `lift_nc f g (single a b) = f b * g a`. If `f`
is a ring homomorphism and the range of either `f` or `g` is in center of `R`, then the result is a
ring homomorphism. If `R` is a `k`-algebra and `f = algebra_map k R`, then the result is an algebra
homomorphism called `add_monoid_algebra.lift`. -/
def lift_nc (f : k →+ R) (g : multiplicative G →* R) : add_monoid_algebra k G →+ R :=
lift_add_hom (λ x : G, (add_monoid_hom.mul_right (g $ multiplicative.of_add x)).comp f)
@[simp] lemma lift_nc_single (f : k →+ R) (g : multiplicative G →* R) (a : G) (b : k) :
lift_nc f g (single a b) = f b * g (multiplicative.of_add a) :=
lift_add_hom_apply_single _ _ _
@[simp] lemma lift_nc_one (f : k →+* R) (g : multiplicative G →* R) :
lift_nc (f : k →+ R) g 1 = 1 :=
@monoid_algebra.lift_nc_one k (multiplicative G) _ _ _ _ f g
lemma lift_nc_mul (f : k →+* R) (g : multiplicative G →* R) (a b : add_monoid_algebra k G)
(h_comm : ∀ {x y}, y ∈ a.support → commute (f (b x)) (g $ multiplicative.of_add y)) :
lift_nc (f : k →+ R) g (a * b) = lift_nc (f : k →+ R) g a * lift_nc (f : k →+ R) g b :=
@monoid_algebra.lift_nc_mul k (multiplicative G) _ _ _ _ f g a b @h_comm
end mul_one_class
/-! #### Semiring structure -/
section semiring
instance {R : Type*} [monoid R] [semiring k] [distrib_mul_action R k] :
has_scalar R (add_monoid_algebra k G) :=
finsupp.has_scalar
variables [semiring k] [add_monoid G]
instance : semiring (add_monoid_algebra k G) :=
{ one := 1,
mul := (*),
zero := 0,
add := (+),
.. add_monoid_algebra.non_unital_semiring,
.. add_monoid_algebra.non_assoc_semiring, }
variables {R : Type*} [semiring R]
/-- `lift_nc` as a `ring_hom`, for when `f` and `g` commute -/
def lift_nc_ring_hom (f : k →+* R) (g : multiplicative G →* R)
(h_comm : ∀ x y, commute (f x) (g y)) :
add_monoid_algebra k G →+* R :=
{ to_fun := lift_nc (f : k →+ R) g,
map_one' := lift_nc_one _ _,
map_mul' := λ a b, lift_nc_mul _ _ _ _ $ λ _ _ _, h_comm _ _,
..(lift_nc (f : k →+ R) g)}
end semiring
instance [comm_semiring k] [add_comm_monoid G] : comm_semiring (add_monoid_algebra k G) :=
{ mul_comm := @mul_comm (monoid_algebra k $ multiplicative G) _,
.. add_monoid_algebra.semiring }
instance [semiring k] [nontrivial k] [nonempty G] : nontrivial (add_monoid_algebra k G) :=
finsupp.nontrivial
/-! #### Derived instances -/
section derived_instances
instance [semiring k] [subsingleton k] : unique (add_monoid_algebra k G) :=
finsupp.unique_of_right
instance [ring k] : add_group (add_monoid_algebra k G) :=
finsupp.add_group
instance [ring k] [add_monoid G] : ring (add_monoid_algebra k G) :=
{ neg := has_neg.neg,
add_left_neg := add_left_neg,
sub := has_sub.sub,
sub_eq_add_neg := finsupp.add_group.sub_eq_add_neg,
.. add_monoid_algebra.semiring }
instance [comm_ring k] [add_comm_monoid G] : comm_ring (add_monoid_algebra k G) :=
{ mul_comm := mul_comm, .. add_monoid_algebra.ring}
variables {R S : Type*}
instance [monoid R] [semiring k] [distrib_mul_action R k] :
distrib_mul_action R (add_monoid_algebra k G) :=
finsupp.distrib_mul_action G k
instance [monoid R] [semiring k] [distrib_mul_action R k] [has_faithful_scalar R k] [nonempty G] :
has_faithful_scalar R (add_monoid_algebra k G) :=
finsupp.has_faithful_scalar
instance [semiring R] [semiring k] [module R k] : module R (add_monoid_algebra k G) :=
finsupp.module G k
instance [monoid R] [monoid S] [semiring k] [distrib_mul_action R k] [distrib_mul_action S k]
[has_scalar R S] [is_scalar_tower R S k] :
is_scalar_tower R S (add_monoid_algebra k G) :=
finsupp.is_scalar_tower G k
instance [monoid R] [monoid S] [semiring k] [distrib_mul_action R k] [distrib_mul_action S k]
[smul_comm_class R S k] :
smul_comm_class R S (add_monoid_algebra k G) :=
finsupp.smul_comm_class G k
/-! It is hard to state the equivalent of `distrib_mul_action G (add_monoid_algebra k G)`
because we've never discussed actions of additive groups. -/
end derived_instances
section misc_theorems
variables [semiring k]
lemma mul_apply [has_add G] (f g : add_monoid_algebra k G) (x : G) :
(f * g) x = (f.sum $ λa₁ b₁, g.sum $ λa₂ b₂, if a₁ + a₂ = x then b₁ * b₂ else 0) :=
@monoid_algebra.mul_apply k (multiplicative G) _ _ _ _ _
lemma mul_apply_antidiagonal [has_add G] (f g : add_monoid_algebra k G) (x : G) (s : finset (G × G))
(hs : ∀ {p : G × G}, p ∈ s ↔ p.1 + p.2 = x) :
(f * g) x = ∑ p in s, (f p.1 * g p.2) :=
@monoid_algebra.mul_apply_antidiagonal k (multiplicative G) _ _ _ _ _ s @hs
lemma support_mul [has_add G] (a b : add_monoid_algebra k G) :
(a * b).support ⊆ a.support.bUnion (λa₁, b.support.bUnion $ λa₂, {a₁ + a₂}) :=
@monoid_algebra.support_mul k (multiplicative G) _ _ _ _
lemma single_mul_single [has_add G] {a₁ a₂ : G} {b₁ b₂ : k} :
(single a₁ b₁ * single a₂ b₂ : add_monoid_algebra k G) = single (a₁ + a₂) (b₁ * b₂) :=
@monoid_algebra.single_mul_single k (multiplicative G) _ _ _ _ _ _
-- This should be a `@[simp]` lemma, but the simp_nf linter times out if we add this.
-- Probably the correct fix is to make a `[add_]monoid_algebra.single` with the correct type,
-- instead of relying on `finsupp.single`.
lemma single_pow [add_monoid G] {a : G} {b : k} :
∀ n : ℕ, ((single a b)^n : add_monoid_algebra k G) = single (n • a) (b ^ n)
| 0 := by { simp only [pow_zero, zero_nsmul], refl }
| (n+1) :=
by rw [pow_succ, pow_succ, single_pow n, single_mul_single, add_comm, add_nsmul, one_nsmul]
/-- Like `finsupp.map_domain_add`, but for the convolutive multiplication we define in this file -/
lemma map_domain_mul {α : Type*} {β : Type*} {α₂ : Type*}
[semiring β] [has_add α] [has_add α₂]
{x y : add_monoid_algebra β α} (f : add_hom α α₂) :
(map_domain f (x * y : add_monoid_algebra β α) : add_monoid_algebra β α₂) =
(map_domain f x * map_domain f y : add_monoid_algebra β α₂) :=
begin
simp_rw [mul_def, map_domain_sum, map_domain_single, f.map_add],
rw finsupp.sum_map_domain_index,
{ congr,
ext a b,
rw finsupp.sum_map_domain_index,
{ simp },
{ simp [mul_add] } },
{ simp },
{ simp [add_mul] }
end
section
variables (k G)
/-- The embedding of an additive magma into its additive magma algebra. -/
@[simps] def of_magma [has_add G] : mul_hom (multiplicative G) (add_monoid_algebra k G) :=
{ to_fun := λ a, single a 1,
map_mul' := λ a b, by simpa only [mul_def, mul_one, sum_single_index, single_eq_zero, mul_zero], }
/-- Embedding of a magma with zero into its magma algebra. -/
def of [add_zero_class G] : multiplicative G →* add_monoid_algebra k G :=
{ to_fun := λ a, single a 1,
map_one' := rfl,
.. of_magma k G }
/-- Embedding of a magma with zero `G`, into its magma algebra, having `G` as source. -/
def of' : G → add_monoid_algebra k G := λ a, single a 1
end
@[simp] lemma of_apply [add_zero_class G] (a : multiplicative G) : of k G a = single a.to_add 1 :=
rfl
@[simp] lemma of'_apply (a : G) : of' k G a = single a 1 := rfl
lemma of'_eq_of [add_zero_class G] (a : G) : of' k G a = of k G a := rfl
lemma of_injective [nontrivial k] [add_zero_class G] : function.injective (of k G) :=
λ a b h, by simpa using (single_eq_single_iff _ _ _ _).mp h
lemma mul_single_apply_aux [has_add G] (f : add_monoid_algebra k G) (r : k)
(x y z : G) (H : ∀ a, a + x = z ↔ a = y) :
(f * single x r) z = f y * r :=
@monoid_algebra.mul_single_apply_aux k (multiplicative G) _ _ _ _ _ _ _ H
lemma mul_single_zero_apply [add_zero_class G] (f : add_monoid_algebra k G) (r : k) (x : G) :
(f * single 0 r) x = f x * r :=
f.mul_single_apply_aux r _ _ _ $ λ a, by rw [add_zero]
lemma single_mul_apply_aux [has_add G] (f : add_monoid_algebra k G) (r : k) (x y z : G)
(H : ∀ a, x + a = y ↔ a = z) :
(single x r * f : add_monoid_algebra k G) y = r * f z :=
@monoid_algebra.single_mul_apply_aux k (multiplicative G) _ _ _ _ _ _ _ H
lemma single_zero_mul_apply [add_zero_class G] (f : add_monoid_algebra k G) (r : k) (x : G) :
(single 0 r * f : add_monoid_algebra k G) x = r * f x :=
f.single_mul_apply_aux r _ _ _ $ λ a, by rw [zero_add]
lemma mul_single_apply [add_group G] (f : add_monoid_algebra k G) (r : k) (x y : G) :
(f * single x r) y = f (y - x) * r :=
(sub_eq_add_neg y x).symm ▸
@monoid_algebra.mul_single_apply k (multiplicative G) _ _ _ _ _ _
lemma single_mul_apply [add_group G] (r : k) (x : G) (f : add_monoid_algebra k G) (y : G) :
(single x r * f : add_monoid_algebra k G) y = r * f (- x + y) :=
@monoid_algebra.single_mul_apply k (multiplicative G) _ _ _ _ _ _
lemma support_mul_single [add_right_cancel_semigroup G]
(f : add_monoid_algebra k G) (r : k) (hr : ∀ y, y * r = 0 ↔ y = 0) (x : G) :
(f * single x r : add_monoid_algebra k G).support = f.support.map (add_right_embedding x) :=
@monoid_algebra.support_mul_single k (multiplicative G) _ _ _ _ hr _
lemma support_single_mul [add_left_cancel_semigroup G]
(f : add_monoid_algebra k G) (r : k) (hr : ∀ y, r * y = 0 ↔ y = 0) (x : G) :
(single x r * f : add_monoid_algebra k G).support = f.support.map (add_left_embedding x) :=
@monoid_algebra.support_single_mul k (multiplicative G) _ _ _ _ hr _
lemma lift_nc_smul {R : Type*} [add_zero_class G] [semiring R] (f : k →+* R)
(g : multiplicative G →* R) (c : k) (φ : monoid_algebra k G) :
lift_nc (f : k →+ R) g (c • φ) = f c * lift_nc (f : k →+ R) g φ :=
@monoid_algebra.lift_nc_smul k (multiplicative G) _ _ _ _ f g c φ
variables {k G}
lemma induction_on [add_monoid G] {p : add_monoid_algebra k G → Prop} (f : add_monoid_algebra k G)
(hM : ∀ g, p (of k G (multiplicative.of_add g)))
(hadd : ∀ f g : add_monoid_algebra k G, p f → p g → p (f + g))
(hsmul : ∀ (r : k) f, p f → p (r • f)) : p f :=
begin
refine finsupp.induction_linear f _ (λ f g hf hg, hadd f g hf hg) (λ g r, _),
{ simpa using hsmul 0 (of k G (multiplicative.of_add 0)) (hM 0) },
{ convert hsmul r (of k G (multiplicative.of_add g)) (hM g),
simp only [mul_one, to_add_of_add, smul_single', of_apply] },
end
end misc_theorems
section span
variables [semiring k]
/-- An element of `add_monoid_algebra R M` is in the submodule generated by its support. -/
lemma mem_span_support [add_zero_class G] (f : add_monoid_algebra k G) :
f ∈ submodule.span k (of k G '' (f.support : set G)) :=
by rw [of, monoid_hom.coe_mk, ← finsupp.supported_eq_span_single, finsupp.mem_supported]
/-- An element of `add_monoid_algebra R M` is in the subalgebra generated by its support, using
unbundled inclusion. -/
lemma mem_span_support' (f : add_monoid_algebra k G) :
f ∈ submodule.span k (of' k G '' (f.support : set G)) :=
by rw [of', ← finsupp.supported_eq_span_single, finsupp.mem_supported]
end span
end add_monoid_algebra
/-!
#### Conversions between `add_monoid_algebra` and `monoid_algebra`
We have not defined `add_monoid_algebra k G = monoid_algebra k (multiplicative G)`
because historically this caused problems;
since the changes that have made `nsmul` definitional, this would be possible,
but for now we just contruct the ring isomorphisms using `ring_equiv.refl _`.
-/
/-- The equivalence between `add_monoid_algebra` and `monoid_algebra` in terms of
`multiplicative` -/
protected def add_monoid_algebra.to_multiplicative [semiring k] [has_add G] :
add_monoid_algebra k G ≃+* monoid_algebra k (multiplicative G) :=
{ to_fun := equiv_map_domain multiplicative.of_add,
map_mul' := λ x y, begin
repeat {rw equiv_map_domain_eq_map_domain},
dsimp [multiplicative.of_add],
convert monoid_algebra.map_domain_mul (mul_hom.id (multiplicative G)),
end,
..finsupp.dom_congr multiplicative.of_add }
/-- The equivalence between `monoid_algebra` and `add_monoid_algebra` in terms of `additive` -/
protected def monoid_algebra.to_additive [semiring k] [has_mul G] :
monoid_algebra k G ≃+* add_monoid_algebra k (additive G) :=
{ to_fun := equiv_map_domain additive.of_mul,
map_mul' := λ x y, begin
repeat {rw equiv_map_domain_eq_map_domain},
dsimp [additive.of_mul],
convert monoid_algebra.map_domain_mul (mul_hom.id G),
end,
..finsupp.dom_congr additive.of_mul }
namespace add_monoid_algebra
variables {k G}
/-! #### Non-unital, non-associative algebra structure -/
section non_unital_non_assoc_algebra
variables {R : Type*} (k) [semiring R] [semiring k] [distrib_mul_action R k] [has_add G]
instance is_scalar_tower_self [is_scalar_tower R k k] :
is_scalar_tower R (add_monoid_algebra k G) (add_monoid_algebra k G) :=
@monoid_algebra.is_scalar_tower_self k (multiplicative G) R _ _ _ _ _
/-- Note that if `k` is a `comm_semiring` then we have `smul_comm_class k k k` and so we can take
`R = k` in the below. In other words, if the coefficients are commutative amongst themselves, they
also commute with the algebra multiplication. -/
instance smul_comm_class_self [smul_comm_class R k k] :
smul_comm_class R (add_monoid_algebra k G) (add_monoid_algebra k G) :=
@monoid_algebra.smul_comm_class_self k (multiplicative G) R _ _ _ _ _
instance smul_comm_class_symm_self [smul_comm_class k R k] :
smul_comm_class (add_monoid_algebra k G) R (add_monoid_algebra k G) :=
@monoid_algebra.smul_comm_class_symm_self k (multiplicative G) R _ _ _ _ _
variables {A : Type u₃} [non_unital_non_assoc_semiring A]
/-- A non_unital `k`-algebra homomorphism from `add_monoid_algebra k G` is uniquely defined by its
values on the functions `single a 1`. -/
lemma non_unital_alg_hom_ext [distrib_mul_action k A]
{φ₁ φ₂ : non_unital_alg_hom k (add_monoid_algebra k G) A}
(h : ∀ x, φ₁ (single x 1) = φ₂ (single x 1)) : φ₁ = φ₂ :=
@monoid_algebra.non_unital_alg_hom_ext k (multiplicative G) _ _ _ _ _ φ₁ φ₂ h
/-- See note [partially-applied ext lemmas]. -/
@[ext] lemma non_unital_alg_hom_ext' [distrib_mul_action k A]
{φ₁ φ₂ : non_unital_alg_hom k (add_monoid_algebra k G) A}
(h : φ₁.to_mul_hom.comp (of_magma k G) = φ₂.to_mul_hom.comp (of_magma k G)) : φ₁ = φ₂ :=
@monoid_algebra.non_unital_alg_hom_ext' k (multiplicative G) _ _ _ _ _ φ₁ φ₂ h
/-- The functor `G ↦ add_monoid_algebra k G`, from the category of magmas to the category of
non-unital, non-associative algebras over `k` is adjoint to the forgetful functor in the other
direction. -/
@[simps] def lift_magma [module k A] [is_scalar_tower k A A] [smul_comm_class k A A] :
mul_hom (multiplicative G) A ≃ non_unital_alg_hom k (add_monoid_algebra k G) A :=
{ to_fun := λ f, { to_fun := λ a, sum a (λ m t, t • f (multiplicative.of_add m)),
.. (monoid_algebra.lift_magma k f : _)},
inv_fun := λ F, F.to_mul_hom.comp (of_magma k G),
.. (monoid_algebra.lift_magma k : mul_hom (multiplicative G) A ≃ non_unital_alg_hom k _ A) }
end non_unital_non_assoc_algebra
/-! #### Algebra structure -/
section algebra
variables {R : Type*}
local attribute [reducible] add_monoid_algebra
/-- `finsupp.single 0` as a `ring_hom` -/
@[simps] def single_zero_ring_hom [semiring k] [add_monoid G] : k →+* add_monoid_algebra k G :=
{ map_one' := rfl,
map_mul' := λ x y, by rw [single_add_hom, single_mul_single, zero_add],
..finsupp.single_add_hom 0}
/-- If two ring homomorphisms from `add_monoid_algebra k G` are equal on all `single a 1`
and `single 0 b`, then they are equal. -/
lemma ring_hom_ext {R} [semiring k] [add_monoid G] [semiring R]
{f g : add_monoid_algebra k G →+* R} (h₀ : ∀ b, f (single 0 b) = g (single 0 b))
(h_of : ∀ a, f (single a 1) = g (single a 1)) : f = g :=
@monoid_algebra.ring_hom_ext k (multiplicative G) R _ _ _ _ _ h₀ h_of
/-- If two ring homomorphisms from `add_monoid_algebra k G` are equal on all `single a 1`
and `single 0 b`, then they are equal.
See note [partially-applied ext lemmas]. -/
@[ext] lemma ring_hom_ext' {R} [semiring k] [add_monoid G] [semiring R]
{f g : add_monoid_algebra k G →+* R}
(h₁ : f.comp single_zero_ring_hom = g.comp single_zero_ring_hom)
(h_of : (f : add_monoid_algebra k G →* R).comp (of k G) =
(g : add_monoid_algebra k G →* R).comp (of k G)) :
f = g :=
ring_hom_ext (ring_hom.congr_fun h₁) (monoid_hom.congr_fun h_of)
section opposite
open finsupp mul_opposite
variables [semiring k]
/-- The opposite of an `add_monoid_algebra R I` is ring equivalent to
the `add_monoid_algebra Rᵐᵒᵖ I` over the opposite ring, taking elements to their opposite. -/
@[simps {simp_rhs := tt}] protected noncomputable def op_ring_equiv [add_comm_monoid G] :
(add_monoid_algebra k G)ᵐᵒᵖ ≃+* add_monoid_algebra kᵐᵒᵖ G :=
{ map_mul' := begin
dsimp only [add_equiv.to_fun_eq_coe, ←add_equiv.coe_to_add_monoid_hom],
rw add_monoid_hom.map_mul_iff,
ext i r i' r' : 6,
dsimp,
simp only [map_range_single, single_mul_single, ←op_mul, add_comm]
end,
..mul_opposite.op_add_equiv.symm.trans
(finsupp.map_range.add_equiv (mul_opposite.op_add_equiv : k ≃+ kᵐᵒᵖ))}
@[simp] lemma op_ring_equiv_single [add_comm_monoid G] (r : k) (x : G) :
add_monoid_algebra.op_ring_equiv (op (single x r)) = single x (op r) :=
by simp
@[simp] lemma op_ring_equiv_symm_single [add_comm_monoid G] (r : kᵐᵒᵖ) (x : Gᵐᵒᵖ) :
add_monoid_algebra.op_ring_equiv.symm (single x r) = op (single x r.unop) :=
by simp
end opposite
/--
The instance `algebra R (add_monoid_algebra k G)` whenever we have `algebra R k`.
In particular this provides the instance `algebra k (add_monoid_algebra k G)`.
-/
instance [comm_semiring R] [semiring k] [algebra R k] [add_monoid G] :
algebra R (add_monoid_algebra k G) :=
{ smul_def' := λ r a, by { ext, simp [single_zero_mul_apply, algebra.smul_def, pi.smul_apply], },
commutes' := λ r f, by { ext, simp [single_zero_mul_apply, mul_single_zero_apply,
algebra.commutes], },
..single_zero_ring_hom.comp (algebra_map R k) }
/-- `finsupp.single 0` as a `alg_hom` -/
@[simps] def single_zero_alg_hom [comm_semiring R] [semiring k] [algebra R k] [add_monoid G] :
k →ₐ[R] add_monoid_algebra k G :=
{ commutes' := λ r, by { ext, simp, refl, }, ..single_zero_ring_hom}
@[simp] lemma coe_algebra_map [comm_semiring R] [semiring k] [algebra R k] [add_monoid G] :
(algebra_map R (add_monoid_algebra k G) : R → add_monoid_algebra k G) =
single 0 ∘ (algebra_map R k) :=
rfl
end algebra
section lift
variables {k G} [comm_semiring k] [add_monoid G]
variables {A : Type u₃} [semiring A] [algebra k A] {B : Type*} [semiring B] [algebra k B]
/-- `lift_nc_ring_hom` as a `alg_hom`, for when `f` is an `alg_hom` -/
def lift_nc_alg_hom (f : A →ₐ[k] B) (g : multiplicative G →* B)
(h_comm : ∀ x y, commute (f x) (g y)) :
add_monoid_algebra A G →ₐ[k] B :=
{ to_fun := lift_nc_ring_hom (f : A →+* B) g h_comm,
commutes' := by simp [lift_nc_ring_hom],
..(lift_nc_ring_hom (f : A →+* B) g h_comm)}
/-- A `k`-algebra homomorphism from `monoid_algebra k G` is uniquely defined by its
values on the functions `single a 1`. -/
lemma alg_hom_ext ⦃φ₁ φ₂ : add_monoid_algebra k G →ₐ[k] A⦄
(h : ∀ x, φ₁ (single x 1) = φ₂ (single x 1)) : φ₁ = φ₂ :=
@monoid_algebra.alg_hom_ext k (multiplicative G) _ _ _ _ _ _ _ h
/-- See note [partially-applied ext lemmas]. -/
@[ext] lemma alg_hom_ext' ⦃φ₁ φ₂ : add_monoid_algebra k G →ₐ[k] A⦄
(h : (φ₁ : add_monoid_algebra k G →* A).comp (of k G) =
(φ₂ : add_monoid_algebra k G →* A).comp (of k G)) : φ₁ = φ₂ :=
alg_hom_ext $ monoid_hom.congr_fun h
variables (k G A)
/-- Any monoid homomorphism `G →* A` can be lifted to an algebra homomorphism
`monoid_algebra k G →ₐ[k] A`. -/
def lift : (multiplicative G →* A) ≃ (add_monoid_algebra k G →ₐ[k] A) :=
{ inv_fun := λ f, (f : add_monoid_algebra k G →* A).comp (of k G),
to_fun := λ F,
{ to_fun := lift_nc_alg_hom (algebra.of_id k A) F $ λ _ _, algebra.commutes _ _,
.. @monoid_algebra.lift k (multiplicative G) _ _ A _ _ F},
.. @monoid_algebra.lift k (multiplicative G) _ _ A _ _ }
variables {k G A}
lemma lift_apply' (F : multiplicative G →* A) (f : monoid_algebra k G) :
lift k G A F f = f.sum (λ a b, (algebra_map k A b) * F (multiplicative.of_add a)) := rfl
lemma lift_apply (F : multiplicative G →* A) (f : monoid_algebra k G) :
lift k G A F f = f.sum (λ a b, b • F (multiplicative.of_add a)) :=
by simp only [lift_apply', algebra.smul_def]
lemma lift_def (F : multiplicative G →* A) :
⇑(lift k G A F) = lift_nc ((algebra_map k A : k →+* A) : k →+ A) F :=
rfl
@[simp] lemma lift_symm_apply (F : add_monoid_algebra k G →ₐ[k] A) (x : multiplicative G) :
(lift k G A).symm F x = F (single x.to_add 1) := rfl
lemma lift_of (F : multiplicative G →* A) (x : multiplicative G) :
lift k G A F (of k G x) = F x :=
by rw [of_apply, ← lift_symm_apply, equiv.symm_apply_apply]
@[simp] lemma lift_single (F : multiplicative G →* A) (a b) :
lift k G A F (single a b) = b • F (multiplicative.of_add a) :=
by rw [lift_def, lift_nc_single, algebra.smul_def, ring_hom.coe_add_monoid_hom]
lemma lift_unique' (F : add_monoid_algebra k G →ₐ[k] A) :
F = lift k G A ((F : add_monoid_algebra k G →* A).comp (of k G)) :=
((lift k G A).apply_symm_apply F).symm
/-- Decomposition of a `k`-algebra homomorphism from `monoid_algebra k G` by
its values on `F (single a 1)`. -/
lemma lift_unique (F : add_monoid_algebra k G →ₐ[k] A) (f : monoid_algebra k G) :
F f = f.sum (λ a b, b • F (single a 1)) :=
by conv_lhs { rw lift_unique' F, simp [lift_apply] }
lemma alg_hom_ext_iff {φ₁ φ₂ : add_monoid_algebra k G →ₐ[k] A} :
(∀ x, φ₁ (finsupp.single x 1) = φ₂ (finsupp.single x 1)) ↔ φ₁ = φ₂ :=
⟨λ h, alg_hom_ext h, by rintro rfl _; refl⟩
end lift
section
local attribute [reducible] add_monoid_algebra
universe ui
variable {ι : Type ui}
lemma prod_single [comm_semiring k] [add_comm_monoid G]
{s : finset ι} {a : ι → G} {b : ι → k} :
(∏ i in s, single (a i) (b i)) = single (∑ i in s, a i) (∏ i in s, b i) :=
finset.induction_on s rfl $ λ a s has ih, by rw [prod_insert has, ih,
single_mul_single, sum_insert has, prod_insert has]
end
end add_monoid_algebra
variables {R : Type*} [comm_semiring R] (k G)
/-- The algebra equivalence between `add_monoid_algebra` and `monoid_algebra` in terms of
`multiplicative`. -/
def add_monoid_algebra.to_multiplicative_alg_equiv [semiring k] [algebra R k] [add_monoid G] :
add_monoid_algebra k G ≃ₐ[R] monoid_algebra k (multiplicative G) :=
{ commutes' := λ r, by simp [add_monoid_algebra.to_multiplicative],
..add_monoid_algebra.to_multiplicative k G }
/-- The algebra equivalence between `monoid_algebra` and `add_monoid_algebra` in terms of
`additive`. -/
def monoid_algebra.to_additive_alg_equiv [semiring k] [algebra R k] [monoid G] :
monoid_algebra k G ≃ₐ[R] add_monoid_algebra k (additive G) :=
{ commutes' := λ r, by simp [monoid_algebra.to_additive],
..monoid_algebra.to_additive k G }
|
function msg = er_displayParfiles(view);
%
% msg = er_displayParfiles(view);
%
% Pop up a message box with the scan group currently assigned
% to the present scan (if any), and the parfiles for each
% scan in the group. Returns the message text.
%
%
% ras, 02/06.
if notDefined('view'), view = getSelectedInplane; end
mrGlobals;
dt = view.curDataType;
scan = view.curScan;
dtName = dataTYPES(dt).name;
if ~isfield(dataTYPES(dt).scanParams(scan), 'scanGroup') | ...
~isfield(dataTYPES(dt).scanParams(scan), 'parfile')
msg = 'Parfiles and/or scan group not assigned for this scan.';
myWarnDlg(msg);
return
end
if ~isempty(dataTYPES(dt).scanParams(scan).scanGroup)
[scans dt] = er_getScanGroup(view);
scanGroup = sprintf('%s scans %s', dataTYPES(dt).name, num2str(scans));
else
% just use cur scan / dt
scans = scan;
scanGroup = 'none';
end
msg = [sprintf('%s %s scan %i \n', view.name, dtName, scan) ...
sprintf('Scan Group: %s \n\n', scanGroup) ...
sprintf('Parfiles: \n')];
for s = scans
msg = [msg sprintf('%i: %s \n', s, dataTYPES(dt).scanParams(s).parfile)];
end
msgbox(msg, 'Event-Related Info');
return
|
[STATEMENT]
lemma [simp]: "pr_conv_3_to_2 (pr_conv_2_to_3 f) = f"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. pr_conv_3_to_2 (pr_conv_2_to_3 f) = f
[PROOF STEP]
by(simp add: pr_conv_3_to_2_def pr_conv_2_to_3_def) |
library(utils)
## designate packages to install/load
all_pkgs <- c('reticulate', 'png', 'RColorBrewer', 'MALDIquant', 'data.table', 'docxtractr', 'xlsx', 'cairoDevice')
## find packages that need to be installed
already_installed <- rownames( installed.packages() )
to_install <- setdiff(all_pkgs, already_installed)
if (length(to_install) > 0) {
install.packages(to_install, dependencies=TRUE)
}
## now load all packages
sapply(all_pkgs, library, character.only=TRUE, logical.return=TRUE)
##############################################################################################
# Fucntions usied for data Input
##############################################################################################
#Function used in ReadDataDump.
#Converts time to minutes
ConvertTime <- function(x){
vals <- strsplit(x,":")[[1]]
retval <- NA
if(length(vals)==3)
{
retval <- as.integer(vals[1])*60+as.integer(vals[2])+as.single(vals[3])/60
}
if(length(vals)==2)
{
retval <- as.integer(vals[1])+as.single(vals[2])/60
}
return(retval)
}
pharming_harvest <- function(main_dir="Y:/Mario Giacobassi/Manjus experiments", area_conversion=1.625, img_name_vec = NULL, image_question=T){
cat('#########################################\nPHARM HARVEST\n#########################################\n')
tryCatch(cat(readLines("Y:/Box Sync/procpharm/farm.txt"),sep='\n'), error=function(e) cat('\nPHARM ON PHARM ANIMAL\n'))
cat('\nI am your data harvester. In this function I will\nextract data from any video files you have, and package everything together\n')
#MAIN DIRECTORY SELECTION
if(is.null(main_dir)){
cat('\nHOLD UP! Tell me where your Pharm is!\nThis is where each experiment is in a seperate folder.\nExample "Z:/Lee Leavitt"\n')
main_dir<-scan(n=1,what="character")
}
#AREA CONVERSION
if(is.null(area_conversion)){
cat('\nYou have NOT told me the area conversion please enter that now,For example\n4x bin = 1.625\n4x nobin = 3.25\n10x bin 3.25\n10x nobin = 6.5\n')
area_conversion <- scan(n=1, what='numeric')
}
#IMPORT RETICULATE
if( !library(reticulate, logical.return = T) ){
install.packages('reticulate');library(reticulate)
}
if( !library(png, logical.return = T) ){
install.packages('png');library(png)
}
#INITIALIZE EXPERIMENT names
setwd(main_dir)
cat('\nI have entered your Pharm,\n',main_dir, '\nselect each experiment I need to harvest.\n')
exp_dir<-select.list(list.dirs(), multiple=T)
(exp_dir_1 <- sub("./","",exp_dir))
(exp_dir_2 <- lapply(strsplit(exp_dir_1, '/'), function(x) x[length(x)] ))
(exp_dir_3 <- Reduce(c, exp_dir_2))
(exp_dir_4 <- lapply(strsplit(exp_dir_3,' '), function(x) x[1]))
(exp_dir_5 <- Reduce(c, exp_dir_4))
rd.names <- paste0('RD.', exp_dir_5)
cat('\nThese are the experiments I am going to process for you\n')
cat(rd.names, sep='\n')
total_start_time <- Sys.time()
for( i in 1:length(exp_dir) ){
setwd(exp_dir[i])
#Input the names of the files that CP created
cell_data_name <- list.files(pattern= '^cell.*[.]txt$')
#cell_data_name<-"celldatacells_filtered.txt"
################################################
#IMAGE IMPORT
################################################
#Names of the files you want loaded into the RD file
if( is.null(img_name_vec) ){
img_name_vec<-c(
"bf.gfp.tritc.start.png",
"gfp.tritc.start.ci.ltl.rs.png",
"tritc.start.ci.ltl.rs.png",
"gfp.start.ci.ltl.rs.png",
"bf.start.lab.png",
"fura2.png",
"fura2.divide.start.png",
"roi.img.png")
}
# Add images
img_list<-list()
for( j in 1:length(img_name_vec) ){
img_list[[ paste0("img",j) ]] <- tryCatch(png::readPNG(img_name_vec[j]), error=function(e)NULL)
}
if(image_question == T){
cat('\nThese are the images I have attempted to load for you\nIf any are NULL, and want to add different images say yes to the \nnext question. You will be asked to select a png image for each loaction.\n\n')
cat(img_name_vec, sep='\n')
cat(str(img_list))
cat('\nDO YOU WANT DIFFERENT IMAGES[y,n]?\n')
img_reselect <- scan(n=1,what='character')
if( img_reselect=='y' ){
cat("\nAlright buddy I am going to give you options if you don't\nwant any image there just go ahead and press 0\n\n")
png_imgs <- list.files(pattern='png')
for( j in 1:8 ){
cat("\nWhat do you want for image ", j, '\n')
selection <- menu(png_imgs)
if(selection==0){
img_list[[paste0("img",j)]] <- NULL
}else{
img_list[[paste0("img",j)]] <- png::readPNG(png_imgs[selection])
}
cat('\nI have added ', png_imgs[selection],' to position ',j,'\n')
}
}
}
########################################################
#VIDEO PROCESSING
########################################################
c_dat_make<-file.info(cell_data_name)$mtime
video_data_name <- list.files(pattern="[vV]ideo.*[.]txt$")
video_dat_make <- file.info(video_data_name)$mtime
#If the video was make before the c.dat then you need to make the video_data.txt
#this means if time_since_make is less than 1 than the
time_since_make <- video_dat_make - c_dat_make
if(length(video_data_name) < 1 ){
py_pharm <- import('python_pharmer')
video <- list.files(pattern="^[Vv]ideo.*nd2$")
if( length(video) > 1 ){
cat("\nSelect your video to process\n")
video_num <- menu(video)
video <- video[video_num]
}
# Now read in video DataS
py_pharm$video_roi_extractor_faster(video)
}else{
if(time_since_make < 0){
py_pharm <- import('python_pharmer')
video <- list.files(pattern="^[Vv]ideo.*nd2$")
if( length(video) > 1 ){
cat("\nSelect your video to process\n")
video_num <- menu(video)
video <- video[video_num]
}
# Now read in video Data
py_pharm$video_roi_extractor_faster(video)
}
}
require(data.table)
f2_img <- fread("video_data.txt")
##DCAST Mean Trace
start_time<-Sys.time()
t.340 <- dcast(data = f2_img, ImageNumber ~ ObjectNumber, value.var = 'Intensity_MeanIntensity_f2_340')
t.380 <- dcast(data = f2_img, ImageNumber ~ ObjectNumber, value.var = 'Intensity_MeanIntensity_f2_380')
t.dat <- t.340/t.380
print(Sys.time()-start_time)
###################################################
#TIME INFO EXTRACTION
###################################################
#Older version of NIS elements is not compatible with the nd2reader python package
#until then the researcher will need to expor the time information before they start
#this function. The funciton will sense whether the file is present or not.
py_pharm <- import('python_pharmer')
#video <- list.files(pattern="^[Vv]ideo.*nd2$")
if( length(list.files(pattern = "^time.*txt$")) < 1 ){
py_pharm$time_info_gather( video )
}
time_info <- read.delim("time.info.txt", sep="\t")
time_min <- round(time_info[2]/1000/60, digits=3)
# Create row.names
cell.names <- paste("X.", colnames(t.340)[-1], sep="")
traces <- ls(pattern = "^[t.]{2}.*[0-9a-z]{3}")
for( j in 1:length(traces) ){
traze <- get( traces[j] )
traze[,1] <- time_min[,1]
traze <- as.data.frame(traze)
colnames(traze) <- c("Time",cell.names)
row.names(traze)<-time_min[,1]
assign(traces[j], traze)
}
########################################################
# wr1 import
########################################################
wrdef <- "wr1.docx"
wrdef <- list.files(pattern = '^wr1')
wrdef_logic <- grep(".docx", wrdef, ignore.case=T, value=T)
# If it is a wr1.docx, continue
if( length(wrdef_logic) == 1 ){
require(docxtractr)
wr <- docx.wr1.importer(wrdef)
w.dat <- MakeWr.docx(t.dat, wr)
## Check for duplicated rows
if(length(which(duplicated(row.names(t.dat))))>=1){
dup<-which(duplicated(row.names(t.dat)))
paste(dup)
t.dat<-t.dat[-dup,]
w.dat<-w.dat[-dup,]
}
}else{
tryCatch(
wr <- ReadResponseWindowFile(wrdef)
, error=function(e) print("YOU NEED A wr1.docx or wr1.csv")
)
#Wr<-length(wr[,1])#complete and revise this section
wr['at'] <- wr['at'] - (10/60)
w.dat <- MakeWr(t.dat,wr)
}
########################################################
#CELL DATA PROCESSING
########################################################\
c.dat<-read.delim(cell_data_name, header=T, sep="\t")
#Now use the neewly created roi_checker i built in python
roiToRemove <- py_pharm$roi_checker()
if(length(roiToRemove)>0){
cat("\nremoving ROI:", roiToRemove,'\n')
c.dat <- c.dat[-roiToRemove,]
}
#These are the collumns needed for analysis
c_dat_names<-c(
"ObjectNumber",
"AreaShape_Area",
"AreaShape_Center_X",
"AreaShape_Center_Y",
"AreaShape_FormFactor",
"AreaShape_Perimeter",
"Intensity_MeanIntensity_CGRP_start_ci",
"Intensity_MeanIntensity_CGRP_end_ci",
"Intensity_MeanIntensity_IB4_start_ci",
"Intensity_MeanIntensity_IB4_end_ci",
"Intensity_MeanIntensity_TRITC_end_ci",
"Intensity_MeanIntensity_BF_start",
"Intensity_MeanIntensity_BF_end",
"Intensity_MeanIntensity_DAPI_ci",
"Intensity_MeanIntensity_mcherry_start_ci",
"Intensity_MeanIntensity_mcherry_end_ci",
"Location_Center_X",
"Location_Center_Y")
c_dat_rn <- c(
"id",
"area",
"center.x.simplified",
"center.y.simplified",
"circularity",
"perimeter",
"mean.gfp.start",
"mean.gfp.end",
"mean.cy5.start",
"mean.cy5.end",
"mean.tritc.end",
"mean.bf.start",
"mean.bf.end",
"mean.dapi",
"mean.mcherry.start",
"mean.mcherry.end",
"center.x",
"center.y")
# Find these collumns in the original c.dat
(c_dat_names_update <- grep(paste0(c_dat_names, collapse="|"), names(c.dat), value=T, ignore.case=T) )
# Find which collumn names remain
cdnp_val<-c()
for(j in 1:length(c_dat_names_update) ){
value <- grep(c_dat_names_update[j], c_dat_names, ignore.case=T)
if(length(value) > 0){
cdnp_val[j] <- grep(c_dat_names_update[j], c_dat_names, ignore.case=T)
}else{ cdnp_val[j] <- NA }
}
( cdnp_val <- cdnp_val[ !is.na(cdnp_val) ] )
# Update the renaming values
c_dat_rn_update <- c_dat_rn[cdnp_val]
# Create data subset to rename
c_dat_1 <- c.dat[ c_dat_names_update ]
# Rename the collumns in this new data.frame
names(c_dat_1)<- c_dat_rn_update
#put this newly renamed data frame at the start of the c.dat
c.dat<-cbind(c_dat_1, c.dat)
#From line 81
c.dat[,"id"] <- cell.names
row.names(c.dat) <- cell.names
#convert area
c.dat[,"area"]<-c.dat$area*area_conversion
# Initial and simple Data processing
tmp.rd <- list(t.dat=t.dat,w.dat=w.dat,c.dat=c.dat)
levs<-setdiff(unique(as.character(w.dat[,2])),"")
snr.lim=5; hab.lim=.05; sm=2; ws=3; blc="SNIP"
pcp <- ProcConstPharm(tmp.rd,sm,ws,blc)
scp <- ScoreConstPharm(tmp.rd,pcp$blc,pcp$snr,pcp$der,snr.lim,hab.lim,sm)
bin <- bScore(pcp$blc,pcp$snr,snr.lim,hab.lim,levs,tmp.rd$w.dat[,"wr1"])
bin <- bin[,levs]
bin["drop"] <- 0 #maybe try to generate some drop criteria from the scp
tmp.rd <- list(t.dat=t.dat,t.340=t.340,t.380=t.380,
w.dat=w.dat,c.dat=c.dat, bin=bin, scp=scp, snr=pcp$snr, blc=pcp$blc, der=pcp$der)
tmp.rd <- TraceBrewer(tmp.rd)
tmp.rd <- c(tmp.rd, img_list)
rd.name <- rd.names[i]
f.name <- paste(rd.name,".Rdata",sep="")
assign(rd.name,tmp.rd)
save(list=rd.name,file=f.name)
rm(f2.img)
rm(rd.name)
rm(tmp.rd)
rm(c.dat)
rm(cell.names)
setwd(main_dir)
gc()
alarm()
cat("\n#####################################################\nYour harvest has gone Successfully. Congratulations.\n#####################################################\n")
}
total_end_time <- Sys.time()
cat("\n#####################################################\nTotal Harvest took. ",total_end_time - total_start_time,"\n#####################################################\n")
}
# readdatadump sam espinosa
ReadDataDump.se <- function(fname=NULL,wrdef=NULL, Wr=NULL, c.dat=NULL,img1=NULL,img2=NULL,img3=NULL,img4=NULL, img5=NULL, img6=NULL, img7=NULL, img8=NULL,rd.name=NULL,sep="\t"){
require(png)
require(zoom)
require(RColorBrewer)
require(MALDIquant)
tmp <- read.delim(fname,fileEncoding="UCS-2LE",sep=sep)
all.names <- names(tmp)
time.name <- grep("Time",all.names,value=T,ignore=T)[1]
if(time.name != "Time..ms."){warning(paste(time.name,"assumed to be in ms"))}
id.name <- grep("ID",all.names,value=T,ignore=T)[1]
if(id.name != "ID"){warning(paste(id.name,"assumed to be it ROI.ID"))}
ratio.name <- grep("Ratio",all.names,value=T,ignore=T)
if(is.na(ratio.name)){stop("no ratio data")}
else{if(ratio.name != "Ratio.340.380"){warning(ratio.name,"assumed to be Ratio data")}}
x.names <- unique(tmp[,id.name])
x.tab <- table(tmp[,id.name])
if(max(x.tab) != min(x.tab)){error("all ids do not have the same number of data points")}
x.row <- max(x.tab)
t.dat <- matrix(tmp[,ratio.name],byrow=FALSE,nrow=x.row)
time.val <- tmp[tmp[,id.name]==x.names[1],time.name]
if(length(grep(":",time.val[1]))==0)
{
x <- as.single(time.val)
if(max(x) > 1000000)#in ms
{
x <- x/60000
}
else if(max(x) > 1500) #in seconds
{
x <- x/60
}
time.val <- x
}
else{time.val <- sapply(as.character(time.val),ConvertTime)}
t.dat <- cbind(time.val,t.dat) #note assumption of ms
t.dat <- as.data.frame(t.dat)
t.dat<- t.dat[unique(row.names(t.dat)),]
names(t.dat) <- c("Time",paste("X.",x.names,sep=""))
if(!is.null(c.dat)){
c.dat<-read.delim(file=c.dat,fileEncoding="UCS-2LE", sep=sep)
c.dat.names<-names(c.dat)
cx.name <- grep("Xpx",c.dat.names,value=T,ignore=T)
if(is.na(cx.name)){stop("no Center X data")}
else{if(cx.name != "CentreXpx"){warning(cx.name,"assumed to be Center X data")}}
cy.name <- grep("Ypx",c.dat.names,value=T,ignore=T)
if(is.na(cy.name)){stop("no Center Y data")}
else{if(cy.name != "CentreYpx"){warning(cy.name,"assumed to be Center Y data")}}
area.name <- grep("Area",c.dat.names,value=T,ignore=T)
if(is.na(area.name)){stop("no Area data")}
else{if(area.name != "ROIArea"){warning(paste(area.name,"assumed to be Area"))}}
mean.gfp<-grep("MeanGreen",c.dat.names,value=T,ignore=T)
if(length(mean.gfp)==0){warning(paste("no gfp.1 data from c.dat"))}
else{if(mean.gfp!="MeanGFP"){warning(paste(mean.gfp, "assumed to be GFP.1"))}}
mean.tritc<-grep("MeanBlue",c.dat.names,value=T,ignore=T)
if(length(mean.tritc)==0){warning(paste("no tritc data from c.dat"))}
else{if(mean.tritc!="MeanTRITC"){warning(paste(mean.tritc, "assumed to be TRITC"))}}
cnames <- c(area.name, cx.name, cy.name, mean.gfp, mean.tritc)
# o.names <- setdiff(c.dat.names,c(time.name,id.name,area.name,ratio.name,cx.name,cy.name, mean.gfp, mean.tritc))
# if(length(o.names) > 0){warning(paste(o.names,"added to c.dat"));cnames <- c(cnames,o.names)}
c.dat <- c.dat[,cnames]
c.dat <- cbind(paste("X.",x.names,sep=""),c.dat)
c.dat <- data.frame(c.dat)
colnames(c.dat)[1:4] <- c("id","area","center.x", "center.y")
# If gfp and tritc are not present then evaluate
# 1st if there is only tritc, name the 6th column mean.tritc
# 2nd if there is only gfp, name the 6th collumn mean.gfp
# 3rd if there are both then rename both 6th and 7th collumn
if(!length(mean.gfp)==0 & !length(mean.tritc)==0){
if(length(mean.gfp)==0 & length(mean.tritc)==1){colnames(c.dat)[5]<-"mean.tritc"}
if(length(mean.tritc)==0 & length(mean.gfp)==1){colnames(c.dat)[5]<-c("mean.gfp")}
if(length(mean.tritc)==1 & length(mean.gfp)==1){colnames(c.dat)[5:6]<-c("mean.gfp","mean.tritc")}
row.names(c.dat) <- c.dat[,"id"]
}}
else{
area.name <- grep("Area",all.names,value=T,ignore=T)[1]
if(is.na(area.name)){stop("no ROI.Area data")}
else{if(area.name != "ROI.Area"){warning(paste(area.name,"assumed to be ROI.Area"))}}
cx.name <- grep("Center.X",all.names,value=T,ignore=T)
if(is.na(cx.name)){stop("no Center X data")}
else{if(cx.name != "Center.X"){warning(cx.name,"assumed to be Center X data")}}
cy.name <- grep("Center.Y",all.names,value=T,ignore=T)
if(is.na(cy.name)){stop("no Center Y data")}
else{if(cy.name != "Center.Y"){warning(cy.name,"assumed to be Center Y data")}}
cnames <- c(area.name,cx.name,cy.name)
c.dat <- tmp[match(x.names,tmp[,id.name]),cnames]
c.dat <- cbind(paste("X.",x.names,sep=""),c.dat)
c.dat <- data.frame(c.dat)
names(c.dat)[1:4] <- c("id","area","center.X","center.Y")
row.names(c.dat) <- c.dat[,"id"]
}
if(!is.null(wrdef))
{
wr <- ReadResponseWindowFile(wrdef)
Wr<-length(wr[,1])#complete and revise this section
if(length(colnames(wr))<2){w.dat<-WrMultiplex(t.dat,wr,n=Wr)}
else{w.dat <- MakeWr(t.dat,wr)}
}
else
{
WrCreate.rdd(t.dat, n=Wr)
wr <- ReadResponseWindowFile("wr1.csv")
w.dat <- MakeWr(t.dat,wr)
}
if(!is.null(img1)){img1<-png::readPNG(img1)}
if(!is.null(img2)){img2<-png::readPNG(img2)}
if(!is.null(img3)){img3<-png::readPNG(img3)}
if(!is.null(img4)){img4<-png::readPNG(img4)}
if(is.null(rd.name)){rd.name <- paste("RD",make.names(date()),sep="")}
if(length(which(duplicated(row.names(t.dat))))>=1){
dup<-which(duplicated(row.names(t.dat)))
paste(dup)
t.dat<-t.dat[-dup,]
w.dat<-w.dat[-dup,]
}
tmp.rd <- list(t.dat=t.dat,w.dat=w.dat,c.dat=c.dat, img1=img1, img2=img2, img3=img3)
f.name <- paste(rd.name,".Rdata",sep="")
assign(rd.name,tmp.rd)
save(list=rd.name,file=f.name)
return(paste(nrow(tmp.rd$c.dat),"traces read saved to ",f.name))
#save as RD file
}
# readdatadump Lee Leavitt
#ReadDataDump.lee <- function(fname=NULL,wrdef=NULL, Wr=NULL, c.dat=NULL,img1=NULL,img2=NULL,img3=NULL,img4=NULL,rd.name=NULL,sep="\t")
# fancy added for cell definer
ReadDataDump.lee <- function(rd.name=NULL,img1="bf.f2.png",img2="bf.f2.lab.png",img3="bf.png",img4=NULL,img5=NULL, img6=NULL, img7=NULL, img8=NULL, fancy=F,fname="Data (full).txt",wrdef="wr1.csv", Wr=NULL, c.dat="ROI Data.txt" ,sep="\t"){
require(png)
require(zoom)
require(RColorBrewer)
require(MALDIquant)
##################################################################################
# Video Data import
##################################################################################
if(length(fname)>1){
tmp1 <- read.delim(fname[1],fileEncoding="UCS-2LE",sep=sep)
tmp2 <- read.delim(fname[2],fileEncoding="UCS-2LE",sep=sep)
tmp<-rbind(tmp1, tmp2)
}else{
tmp <- read.delim(fname,fileEncoding="UCS-2LE",sep=sep)
}
all.names <- names(tmp)
time.name <- grep("Time",all.names,value=T,ignore=T)[1]
if(time.name != "Time..ms."){warning(paste(time.name,"assumed to be in ms"))}
id.name <- grep("ID",all.names,value=T,ignore=T)[1]
if(id.name != "ID"){warning(paste(id.name,"assumed to be it ROI.ID"))}
ratio.name <- grep("Ratio",all.names,value=T,ignore=T)
if(is.na(ratio.name)){stop("no ratio data")}
else{if(ratio.name != "Ratio.340.380"){warning(ratio.name,"assumed to be Ratio data")}}
x.names <- unique(tmp[,id.name])
x.tab <- table(tmp[,id.name])
if(max(x.tab) != min(x.tab)){warning("all ids do not have the same number of data points")}
x.row <- max(x.tab)
t.dat <- matrix(tmp[,ratio.name],byrow=FALSE,nrow=x.row)
time.val <- tmp[tmp[,id.name]==x.names[1],time.name]
if(length(grep(":",time.val[1]))==0)
{
x <- as.single(time.val)
if(max(x) > 1000000)#in ms
{
x <- x/60000
}
else if(max(x) > 1500) #in seconds
{
x <- x/60
}
time.val <- x
}
else{time.val <- sapply(as.character(time.val),ConvertTime)}
t.dat <- cbind(time.val,t.dat) #note assumption of ms
t.dat <- as.data.frame(t.dat)
t.dat<- t.dat[unique(row.names(t.dat)),]
names(t.dat) <- c("Time",paste("X.",x.names,sep=""))
##################################################################################
# Cell Data import
##################################################################################
if(!is.null(c.dat)){
c.dat<-read.delim(file=c.dat,fileEncoding="UCS-2LE", sep=sep)
c.dat.names<-names(c.dat)
id.name <- grep("id",c.dat.names,value=T,ignore=T)
if(is.na(id.name)){stop("no ID data")}
else{if(id.name != "RoiID"){warning(cx.name,"assumed to be ID data")}}
cx.name <- grep("Xpx",c.dat.names,value=T,ignore=T)
if(is.na(cx.name)){stop("no Center X data")}
else{if(cx.name != "CentreXpx"){warning(cx.name,"assumed to be Center X data")}}
cy.name <- grep("Ypx",c.dat.names,value=T,ignore=T)
if(is.na(cy.name)){stop("no Center Y data")}
else{if(cy.name != "CentreYpx"){warning(cy.name,"assumed to be Center Y data")}}
perimeter.name<-grep("perimeter", c.dat.names, value=T, ignore=T)
if(is.na(perimeter.name)){stop("no Perimeter data")}
else{if(perimeter.name != "Perimeter"){warning(paste(perimeter.name,"assumed to be Perimeter"))}}
area.name <- grep("Area",c.dat.names,value=T,ignore=T)
if(is.na(area.name)){stop("no Area data")}
else{if(area.name != "ROIArea"){warning(paste(area.name,"assumed to be Area"))}}
#mean.gfp<-grep("gfp.1",c.dat.names,value=T,ignore=T)
mean.gfp<-grep("GFP",c.dat.names,value=T,ignore=F)
if(length(mean.gfp)==0){mean.gfp<-grep("gfp",c.dat.names,value=T,ignore=T);warning(paste("no gfp.1 data from c.dat"))}
else{if(mean.gfp!="MeanGFP"){warning(paste(mean.gfp, "assumed to be GFP.1"))}}
mean.gfp.2<-grep("gfp.2",c.dat.names,value=T,ignore=T)
if(length(mean.gfp.2)==0){warning(paste("no gfp.2 data from c.dat"))}
else{if(mean.gfp.2!="MeanGFP"){warning(paste(mean.gfp.2, "assumed to be GFP.2"))}}
mean.tritc<-grep("TRITC",c.dat.names,value=T,ignore=F)
if(length(mean.tritc)==0){warning(paste("no tritc data from c.dat"))}
else{if(mean.tritc!="MeanTRITC"){warning(paste(mean.tritc, "assumed to be TRITC"))}}
mean.dapi<-grep("DAPI",c.dat.names,value=T,ignore=F)
if(length(mean.dapi)==0){warning(paste("no dapi data from c.dat"))}
else{if(mean.dapi!="MeanDAPI"){warning(paste(mean.dapi, "assumed to be DAPI"))}}
cnames <- c(id.name,area.name, perimeter.name, cx.name, cy.name, mean.gfp, mean.gfp.2, mean.tritc, mean.dapi)
# o.names <- setdiff(c.dat.names,c(time.name,id.name,area.name,ratio.name,cx.name,cy.name, mean.gfp, mean.tritc))
# if(length(o.names) > 0){warning(paste(o.names,"added to c.dat"));cnames <- c(cnames,o.names)}
c.dat<-c.dat[cnames]#create c.dat with specified collumns from cnames
c.dat <- c.dat[order(c.dat[,id.name]),] # order rows by ROIid
c.dat[,id.name] <- paste("X.",c.dat[,id.name],sep="")#rename ROIid with a X.cell#
row.names(c.dat)<-c.dat[,id.name]# assign row.names the ROIid name
c.dat <- data.frame(c.dat)#convert to data frame
colnames(c.dat)[1:5] <- c("id","area","perimeter","center.x", "center.y")#rename collumns these names
c.dat["circularity"]<-((c.dat$perimeter^2)/(4*pi*c.dat$area)) # create a circularity measurement
## If the class of the collumn is a factor, then the collumn is filled with "N/A"
# therefore make the NULL/ remove it. If not, then perform an unecessarily complex
# set of selection to rename the collumn what you want.
if(class(c.dat[,mean.gfp])=="factor"){c.dat[,mean.gfp]<-NULL
}else{
colnames(c.dat)[which(colnames(c.dat)==mean.gfp)]<-"mean.gfp"}
if(class(c.dat[,mean.gfp.2])=="factor"){c.dat[,mean.gfp.2]<-NULL
}else{colnames(c.dat)[which(colnames(c.dat)==mean.gfp.2)]<-"mean.gfp.2"}
if(class(c.dat[,mean.tritc])=="factor"){c.dat[,mean.tritc]<-NULL
}else{colnames(c.dat)[which(colnames(c.dat)==mean.tritc)]<-"mean.tritc"}
if(class(c.dat[,mean.dapi])=="factor"){c.dat[,mean.dapi]<-NULL
}else{colnames(c.dat)[which(colnames(c.dat)==mean.dapi)]<-"mean.dapi"}
}
else{
area.name <- grep("Area",all.names,value=T,ignore=T)[1]
if(is.na(area.name)){stop("no ROI.Area data")}
else{if(area.name != "ROI.Area"){warning(paste(area.name,"assumed to be ROI.Area"))}}
cx.name <- grep("Center.X",all.names,value=T,ignore=T)
if(is.na(cx.name)){stop("no Center X data")}
else{if(cx.name != "Center.X"){warning(cx.name,"assumed to be Center X data")}}
cy.name <- grep("Center.Y",all.names,value=T,ignore=T)
if(is.na(cy.name)){stop("no Center Y data")}
else{if(cy.name != "Center.Y"){warning(cy.name,"assumed to be Center Y data")}}
cnames <- c(area.name,cx.name,cy.name)
c.dat <- tmp[match(x.names,tmp[,id.name]),cnames]
c.dat <- cbind(paste("X.",x.names,sep=""),c.dat)
c.dat <- data.frame(c.dat)
names(c.dat)[1:4] <- c("id","area","center.x","center.y")
row.names(c.dat) <- c.dat[,"id"]
}
#####################################################
# Window Region Definition
#####################################################
if(!is.null(wrdef))
{
wr <- ReadResponseWindowFile(wrdef)
Wr<-length(wr[,1])#complete and revise this section
if(length(colnames(wr))<2){w.dat<-WrMultiplex(t.dat,wr,n=Wr)}
else{w.dat <- MakeWr(t.dat,wr)}
}
else
{
WrCreate.rdd(t.dat, n=Wr)
wr <- ReadResponseWindowFile("wr1.csv")
w.dat <- MakeWr(t.dat,wr)
}
tmp.rd <- list(t.dat=t.dat,w.dat=w.dat,c.dat=c.dat)
#####################################################
#Create Despiked data
#####################################################
wts <- tmp.rd$t.dat
for(i in 1:5) #run the despike 5 times.
{
wt.mn3 <- Mean3(wts)
wts <- SpikeTrim2(wts,1,-1)
print(sum(is.na(wts))) #this prints out the number of points removed should be close to 0 after 5 loops.
wts[is.na(wts)] <- wt.mn3[is.na(wts)]
}
tmp.rd$mp <- wts
# Initial Data processing
levs<-setdiff(unique(as.character(w.dat[,2])),"")
snr.lim=4;hab.lim=.05;sm=3;ws=30;blc="SNIP"
pcp <- ProcConstPharm(tmp.rd$mp,sm,ws,blc)
scp <- ScoreConstPharm(tmp.rd,pcp$blc,pcp$snr,pcp$der,snr.lim,hab.lim,sm)
bin <- bScore(pcp$blc,pcp$snr,snr.lim,hab.lim,levs,tmp.rd$w.dat[,"wr1"])
bin <- bin[,levs]
bin["drop"] <- 0 #maybe try to generate some drop criteria from the scp file.
bin<-pf.function(bin,levs)
tmp.rd$t.dat<-t.dat
tmp.rd$w.dat<-w.dat
tmp.rd$c.dat<-c.dat
tmp.rd$bin<-bin
tmp.rd$scp<-scp
tmp.rd$snr<-pcp$snr
tmp.rd$blc<-pcp$blc
tmp.rd$der<-pcp$der
# Add images
if(!is.null(img1)){tmp.rd$img1<-png::readPNG(img1)}
if(!is.null(img2)){tmp.rd$img2<-png::readPNG(img2)}
if(!is.null(img3)){tmp.rd$img3<-png::readPNG(img3)}
if(!is.null(img4)){tmp.rd$img4<-png::readPNG(img4)}
if(!is.null(img5)){tmp.rd$img5<-png::readPNG(img5)}
if(!is.null(img6)){tmp.rd$img6<-png::readPNG(img6)}
if(!is.null(img7)){tmp.rd$img7<-png::readPNG(img7)}
if(!is.null(img8)){tmp.rd$img8<-png::readPNG(img8)}
#####################################################
# Cell Label Scoring
#####################################################
if(fancy==TRUE){tmp.rd<-cell.creator(tmp.rd)} # Create list of binary labeled neurons}
else{tmp.rd$cells<-NULL}
if(is.null(rd.name)){rd.name <- paste("RD",make.names(date()),sep="")}
if(length(which(duplicated(row.names(t.dat))))>=1){
dup<-which(duplicated(row.names(t.dat)))
paste(dup)
t.dat<-t.dat[-dup,]
w.dat<-w.dat[-dup,]
}
f.name <- paste(rd.name,".Rdata",sep="")
assign(rd.name,tmp.rd)
save(list=rd.name,file=f.name)
return(paste(nrow(tmp.rd$c.dat),"traces read saved to ",f.name))
#save as RD file
}
#HAS TRACEBREWER
ReadDataDump.lee.2 <- function(rd.name=NULL,img1=NULL,img2=NULL,img3=NULL,img4=NULL,img5=NULL, img6=NULL, img7=NULL, img8=NULL, fancy=F,fname="Data (full).txt",wrdef="wr1.docx", Wr=NULL, c.dat="ROI Data.txt" ,sep="\t")
{
require(png)
require(MALDIquant)
##################################################################################
# Video Data import
##################################################################################
if(length(fname)>1){
tmp1 <- read.delim(fname[1],fileEncoding="UCS-2LE",sep=sep)
tmp2 <- read.delim(fname[2],fileEncoding="UCS-2LE",sep=sep)
tmp<-rbind(tmp1, tmp2)
}else{
tmp <- read.delim(fname,fileEncoding="UCS-2LE",sep=sep)
}
all.names <- names(tmp)
time.name <- grep("Time",all.names,value=T,ignore=T)[1]
if(time.name != "Time..ms."){warning(paste(time.name,"assumed to be in ms"))}
id.name <- grep("ID",all.names,value=T,ignore=T)[1]
if(id.name != "ID"){warning(paste(id.name,"assumed to be it ROI.ID"))}
ratio.name <- grep("Ratio",all.names,value=T,ignore=T)
if(is.na(ratio.name)){stop("no ratio data")}else{if(ratio.name != "Ratio.340.380"){warning(ratio.name,"assumed to be Ratio data")}}
x.names <- unique(tmp[,id.name])
x.tab <- table(tmp[,id.name])
if(max(x.tab) != min(x.tab)){warning("all ids do not have the same number of data points")}
x.row <- max(x.tab)
t.dat <- matrix(tmp[,ratio.name],byrow=FALSE,nrow=x.row)
time.val <- tmp[tmp[,id.name]==x.names[1],time.name]
if(length(grep(":",time.val[1]))==0)
{
x <- as.single(time.val)
if(max(x) > 1000000)#in ms
{
x <- x/60000
}
else if(max(x) > 1500) #in seconds
{
x <- x/60
}
time.val <- x
}else{time.val <- sapply(as.character(time.val),ConvertTime)}
t.dat <- cbind(time.val,t.dat) #note assumption of ms
t.dat <- as.data.frame(t.dat)
t.dat<- t.dat[unique(row.names(t.dat)),]
names(t.dat) <- c("Time",paste("X.",x.names,sep=""))
##################################################################################
# Cell Data import
##################################################################################
if(!is.null(c.dat)){
c.dat<-read.delim(file=c.dat,fileEncoding="UCS-2LE", sep=sep)
c.dat.names<-names(c.dat)
id.name <- grep("id",c.dat.names,value=T,ignore=T)
if(is.na(id.name)){stop("no ID data")
}else{if(id.name != "RoiID"){warning(id.name,"assumed to be ID data")}}
cx.name <- grep("Xpx",c.dat.names,value=T,ignore=T)
if(is.na(cx.name)){stop("no Center X data")}else{if(cx.name != "CentreXpx"){warning(cx.name,"assumed to be Center X data")}}
cy.name <- grep("Ypx",c.dat.names,value=T,ignore=T)
if(is.na(cy.name)){stop("no Center Y data")}else{if(cy.name != "CentreYpx"){warning(cy.name,"assumed to be Center Y data")}}
perimeter.name<-grep("perimeter", c.dat.names, value=T, ignore=T)
if(is.na(perimeter.name)){stop("no Perimeter data")}else{if(perimeter.name != "Perimeter"){warning(paste(perimeter.name,"assumed to be Perimeter"))}}
area.name <- grep("Area",c.dat.names,value=T,ignore=T)
if(is.na(area.name)){stop("no Area data")}else{if(area.name != "ROIArea"){warning(paste(area.name,"assumed to be Area"))}}
#mean.gfp<-grep("gfp.1",c.dat.names,value=T,ignore=T)
mean.gfp<-grep("GFP",c.dat.names,value=T,ignore=F)
if(length(mean.gfp)==0){mean.gfp<-grep("gfp",c.dat.names,value=T,ignore=T);warning(paste("no gfp.1 data from c.dat"))}else{if(mean.gfp!="MeanGFP"){warning(paste(mean.gfp, "assumed to be GFP.1"))}}
mean.gfp.2<-grep("gfp.2",c.dat.names,value=T,ignore=T)
if(length(mean.gfp.2)==0){warning(paste("no gfp.2 data from c.dat"))}else{if(mean.gfp.2!="MeanGFP"){warning(paste(mean.gfp.2, "assumed to be GFP.2"))}}
mean.tritc<-grep("TRITC",c.dat.names,value=T,ignore=F)
if(length(mean.tritc)==0){warning(paste("no tritc data from c.dat"))}else{if(mean.tritc!="MeanTRITC"){warning(paste(mean.tritc, "assumed to be TRITC"))}}
mean.cy5<-grep("TRITC",c.dat.names,value=T,ignore=F)
if(length(mean.cy5)==0){warning(paste("no cy5 data from c.dat"))}else{if(mean.cy5!="MeanTRITC"){warning(paste(mean.cy5, "assumed to be TRITC"))}}
mean.dapi<-grep("DAPI",c.dat.names,value=T,ignore=F)
if(length(mean.dapi)==0){warning(paste("no dapi data from c.dat"))}
else{if(mean.dapi!="MeanDAPI"){warning(paste(mean.dapi, "assumed to be DAPI"))}}
cnames <- c(id.name,area.name, perimeter.name, cx.name, cy.name, mean.gfp, mean.gfp.2, mean.tritc,mean.cy5, mean.dapi)
# o.names <- setdiff(c.dat.names,c(time.name,id.name,area.name,ratio.name,cx.name,cy.name, mean.gfp, mean.tritc))
# if(length(o.names) > 0){warning(paste(o.names,"added to c.dat"));cnames <- c(cnames,o.names)}
c.dat<-c.dat[cnames]#create c.dat with specified collumns from cnames
c.dat <- c.dat[order(c.dat[,id.name]),] # order rows by ROIid
c.dat[,id.name] <- paste("X.",c.dat[,id.name],sep="")#rename ROIid with a X.cell#
row.names(c.dat)<-c.dat[,"RoiID"]# assign row.names the ROIid name
c.dat <- data.frame(c.dat)#convert to data frame
colnames(c.dat)[1:5] <- c("id","area","perimeter","center.x", "center.y")#rename collumns these names
c.dat["circularity"]<-((c.dat$perimeter^2)/(4*pi*c.dat$area)) # create a circularity measurement
## If the class of the collumn is a factor, then the collumn is filled with "N/A"
# therefore make the NULL/ remove it. If not, then perform an unecessarily complex
# set of selection to rename the collumn what you want.
if(class(c.dat[,mean.gfp])=="factor"){c.dat[,mean.gfp]<-NULL
}else{
colnames(c.dat)[which(colnames(c.dat)==mean.gfp)]<-"mean.gfp"}
if(class(c.dat[,mean.gfp.2])=="factor"){c.dat[,mean.gfp.2]<-NULL
}else{colnames(c.dat)[which(colnames(c.dat)==mean.gfp.2)]<-"mean.gfp.2"}
if(class(c.dat[,mean.tritc])=="factor"){c.dat[,mean.tritc]<-NULL
}else{colnames(c.dat)[which(colnames(c.dat)==mean.tritc)]<-"mean.tritc"}
if(class(c.dat[,mean.cy5])=="factor"){c.dat[,mean.cy5]<-NULL
}else{colnames(c.dat)[which(colnames(c.dat)==mean.cy5)]<-"mean.cy5"}
if(class(c.dat[,mean.dapi])=="factor"){c.dat[,mean.dapi]<-NULL
}else{colnames(c.dat)[which(colnames(c.dat)==mean.dapi)]<-"mean.dapi"}
}
else{
area.name <- grep("Area",all.names,value=T,ignore=T)[1]
if(is.na(area.name)){stop("no ROI.Area data")}
else{if(area.name != "ROI.Area"){warning(paste(area.name,"assumed to be ROI.Area"))}}
cx.name <- grep("Center.X",all.names,value=T,ignore=T)
if(is.na(cx.name)){stop("no Center X data")}
else{if(cx.name != "Center.X"){warning(cx.name,"assumed to be Center X data")}}
cy.name <- grep("Center.Y",all.names,value=T,ignore=T)
if(is.na(cy.name)){stop("no Center Y data")}
else{if(cy.name != "Center.Y"){warning(cy.name,"assumed to be Center Y data")}}
cnames <- c(area.name,cx.name,cy.name)
c.dat <- tmp[match(x.names,tmp[,id.name]),cnames]
c.dat <- cbind(paste("X.",x.names,sep=""),c.dat)
c.dat <- data.frame(c.dat)
names(c.dat)[1:4] <- c("id","area","center.x","center.y")
row.names(c.dat) <- c.dat[,"id"]
}
#####################################################
# Window Region Definition
#####################################################
############ wr1 import
wrdef<-"wr1.docx"
require(docxtractr)
if(!is.null(wrdef)){
wr<-docx.wr1.importer(wrdef)
w.dat<-MakeWr.docx(t.dat,wr)
}
#if(!is.null(wrdef))
# {
## wr <- ReadResponseWindowFile(wrdef)
# Wr<-length(wr[,1])#complete and revise this section
# if(length(colnames(wr))<2){w.dat<-WrMultiplex(t.dat,wr,n=Wr)}
# else{w.dat <- MakeWr(t.dat,wr)}
# }
# else
# {
# WrCreate.rdd(t.dat, n=Wr)
# wr <- ReadResponseWindowFile("wr1.csv")
# w.dat <- MakeWr(t.dat,wr)
# }
tmp.rd <- list(t.dat=t.dat,w.dat=w.dat,c.dat=c.dat)
#####################################################
#Create Despiked data
#####################################################
wts <- tmp.rd$t.dat
for(i in 1:5) #run the despike 5 times.
{
wt.mn3 <- Mean3(wts)
wts <- SpikeTrim2(wts,1,-1)
print(sum(is.na(wts))) #this prints out the number of points removed should be close to 0 after 5 loops.
wts[is.na(wts)] <- wt.mn3[is.na(wts)]
}
tmp.rd$mp <- wts
# Initial Data processing
levs<-setdiff(unique(as.character(w.dat[,2])),"")
snr.lim=4;hab.lim=.05;sm=2;ws=20;blc="SNIP"
pcp <- ProcConstPharm(tmp.rd$mp,sm,ws,blc)
scp <- ScoreConstPharm(tmp.rd,pcp$blc,pcp$snr,pcp$der,snr.lim,hab.lim,sm)
bin <- bScore(pcp$blc,pcp$snr,snr.lim,hab.lim,levs,tmp.rd$w.dat[,"wr1"])
bin <- bin[,levs]
bin["drop"] <- 0 #maybe try to generate some drop criteria from the scp file.
bin<-pf.function(bin,levs)
tmp.rd$t.dat<-t.dat
tmp.rd$w.dat<-w.dat
tmp.rd$c.dat<-c.dat
tmp.rd$bin<-bin
tmp.rd$scp<-scp
tmp.rd$snr<-pcp$snr
tmp.rd$blc<-pcp$blc
tmp.rd$der<-pcp$der
tmp.rd<-TraceBrewer(tmp.rd)
# Add images
if(!is.null(img1)){tmp.rd$img1<-png::readPNG(img1)}
if(!is.null(img2)){tmp.rd$img2<-png::readPNG(img2)}
if(!is.null(img3)){tmp.rd$img3<-png::readPNG(img3)}
if(!is.null(img4)){tmp.rd$img4<-png::readPNG(img4)}
if(!is.null(img5)){tmp.rd$img5<-png::readPNG(img5)}
if(!is.null(img6)){tmp.rd$img6<-png::readPNG(img6)}
if(!is.null(img7)){tmp.rd$img7<-png::readPNG(img7)}
if(!is.null(img8)){tmp.rd$img8<-png::readPNG(img8)}
#####################################################
# Cell Label Scoring
#####################################################
if(fancy==TRUE){tmp.rd<-cell.creator(tmp.rd)} # Create list of binary labeled neurons}
else{tmp.rd$cells<-NULL}
if(is.null(rd.name)){rd.name <- paste("RD",make.names(date()),sep="")}
if(length(which(duplicated(row.names(t.dat))))>=1){
dup<-which(duplicated(row.names(t.dat)))
paste(dup)
t.dat<-t.dat[-dup,]
w.dat<-w.dat[-dup,]
}
f.name <- paste(rd.name,".Rdata",sep="")
assign(rd.name, tmp.rd, envir=.GlobalEnv)
assign(rd.name,tmp.rd)
save(list=rd.name,file=f.name)
return(paste(nrow(tmp.rd$c.dat),"traces read saved to ",f.name))
#save as RD file
}
# readdatadump Lee Leavitt 170209
# readdatadump Lee Leavitt 170209
#ReadDataDump.lee <- function(fname=NULL,wrdef=NULL, Wr=NULL, c.dat=NULL,img1=NULL,img2=NULL,img3=NULL,img4=NULL,rd.name=NULL,sep="\t")
# fancy added for cell definer
#this import now has a 340 and 380 ch
ReadDataDump.microglia <- function(rd.name=NULL,img1="bf.f2.png",img2="bf.f2.lab.png",img3="bf.png",img4=NULL,img5=NULL, img6=NULL, img7=NULL, img8=NULL, fancy=F,fname="Data (full).txt",wrdef="wr1.docx", Wr=NULL, c.dat="ROI Data.txt" ,sep="\t"){
require(png)
require(RColorBrewer)
require(MALDIquant)
##################################################################################
# Video Data import
##################################################################################
if(length(fname)>1){
tmp1 <- read.delim(fname[1],fileEncoding="UCS-2LE",sep=sep)
tmp2 <- read.delim(fname[2],fileEncoding="UCS-2LE",sep=sep)
tmp<-rbind(tmp1, tmp2)
}else{
tmp <- read.delim(fname,fileEncoding="UCS-2LE",sep=sep)
}
all.names <- names(tmp)
time.name <- grep("Time",all.names,value=T,ignore=T)[1]
if(time.name != "Time..ms."){warning(paste(time.name,"assumed to be in ms"))}
id.name <- grep("ID",all.names,value=T,ignore=T)[1]
if(id.name != "ID"){warning(paste(id.name,"assumed to be it ROI.ID"))}
ratio.name <- grep("Ratio",all.names,value=T,ignore=T)
if(is.na(ratio.name)){stop("no ratio data")}
else{if(ratio.name != "Ratio.340.380"){warning(ratio.name,"assumed to be Ratio data")}}
x.names <- unique(tmp[,id.name])
x.tab <- table(tmp[,id.name])
if(max(x.tab) != min(x.tab)){warning("all ids do not have the same number of data points")}
x.row <- max(x.tab)
t.dat <- matrix(tmp[,ratio.name],byrow=FALSE,nrow=x.row)
time.val <- tmp[tmp[,id.name]==x.names[1],time.name]
if(length(grep(":",time.val[1]))==0)
{
x <- as.single(time.val)
if(max(x) > 1000000)#in ms
{
x <- x/60000
}
else if(max(x) > 1500) #in seconds
{
x <- x/60
}
time.val <- x
}
else{time.val <- sapply(as.character(time.val),ConvertTime)}
t.dat <- cbind(time.val,t.dat) #note assumption of ms
t.dat <- as.data.frame(t.dat)
t.dat<- t.dat[unique(row.names(t.dat)),]
names(t.dat) <- c("Time",paste("X.",x.names,sep=""))
##################################################################################
# Cell Data import
##################################################################################
if(!is.null(c.dat)){
c.dat<-read.delim(file=c.dat,fileEncoding="UCS-2LE", sep=sep)
c.dat.names<-names(c.dat)
id.name <- grep("id",c.dat.names,value=T,ignore=T)
if(is.na(id.name)){stop("no ID data")}
else{if(id.name != "RoiID"){warning(id.name,"assumed to be ID data")}}
cx.name <- grep("Xpx",c.dat.names,value=T,ignore=T)
if(is.na(cx.name)){stop("no Center X data")}
else{if(cx.name != "CentreXpx"){warning(cx.name,"assumed to be Center X data")}}
cy.name <- grep("Ypx",c.dat.names,value=T,ignore=T)
if(is.na(cy.name)){stop("no Center Y data")}
else{if(cy.name != "CentreYpx"){warning(cy.name,"assumed to be Center Y data")}}
perimeter.name<-grep("Perimeter", c.dat.names, value=T, ignore=T)
if(is.na(perimeter.name)){stop("no Perimeter data")}
else{if(perimeter.name != "Perimeter"){warning(paste(perimeter.name,"assumed to be Perimeter"))}}
area.name <- grep("ROIArea",c.dat.names,value=T,ignore=T)
if(is.na(area.name)){stop("no Area data")}
else{if(area.name != "ROIArea"){warning(paste(area.name,"assumed to be Area"))}}
#mean.gfp<-grep("gfp.1",c.dat.names,value=T,ignore=T)
mean.gfp.start<-grep("MeanGFP.start",c.dat.names,value=T,ignore=F)
if(length(mean.gfp.start)==0){mean.gfp.start<-grep("gfp",c.dat.names,value=T,ignore=T);warning(paste("no gfp.1 data from c.dat"))}
else{if(mean.gfp.start!="MeanGFP"){warning(paste(mean.gfp.start, "assumed to be GFP.1"))}}
mean.gfp.end<-grep("MeanGFP.end",c.dat.names,value=T,ignore=T)
if(length(mean.gfp.end)==0){warning(paste("no gfp.2 data from c.dat"))}
else{if(mean.gfp.end!="MeanGFP"){warning(paste(mean.gfp.end, "assumed to be GFP.2"))}}
mean.tritc.start<-grep("MeanTRITC.start",c.dat.names,value=T,ignore=F)
if(length(mean.tritc.start)==0){warning(paste("no tritc data from c.dat"))}
else{if(mean.tritc.start!="MeanTRITC"){warning(paste(mean.tritc.start, "assumed to be TRITC"))}}
mean.tritc.end<-grep("MeanTRITC.end",c.dat.names,value=T,ignore=F)
if(length(mean.tritc.end)==0){warning(paste("no tritc data from c.dat"))}
else{if(mean.tritc.end!="MeanTRITC"){warning(paste(mean.tritc.end, "assumed to be TRITC"))}}
mean.dapi<-grep("DAPI",c.dat.names,value=T,ignore=F)
if(length(mean.dapi)==0){warning(paste("no dapi data from c.dat"))}
else{if(mean.dapi!="MeanDAPI"){warning(paste(mean.dapi, "assumed to be DAPI"))}}
cnames <- c(id.name,area.name, perimeter.name, cx.name, cy.name, mean.gfp.start, mean.gfp.end, mean.tritc.start, mean.tritc.end, mean.dapi)
# o.names <- setdiff(c.dat.names,c(time.name,id.name,area.name,ratio.name,cx.name,cy.name, mean.gfp, mean.tritc))
# if(length(o.names) > 0){warning(paste(o.names,"added to c.dat"));cnames <- c(cnames,o.names)}
c.dat<-c.dat[cnames]#create c.dat with specified collumns from cnames
c.dat <- c.dat[order(c.dat[,id.name]),] # order rows by ROIid
c.dat[,id.name] <- paste("X.",c.dat[,id.name],sep="")#rename ROIid with a X.cell#
row.names(c.dat)<-c.dat[,id.name]# assign row.names the ROIid name
c.dat <- data.frame(c.dat)#convert to data frame
colnames(c.dat)[1:5] <- c("id","area","perimeter","center.x", "center.y")#rename collumns these names
c.dat["circularity"]<-((c.dat$perimeter^2)/(4*pi*c.dat$area)) # create a circularity measurement
## If the class of the collumn is a factor, then the collumn is filled with "N/A"
# therefore make the NULL/ remove it. If not, then perform an unecessarily complex
# set of selection to rename the collumn what you want.
if(class(c.dat[,mean.gfp.start])=="factor"){c.dat[,mean.gfp.start]<-NULL
}else{
colnames(c.dat)[which(colnames(c.dat)==mean.gfp.start)]<-"mean.gfp.start"}
if(class(c.dat[,mean.gfp.end])=="factor"){c.dat[,mean.gfp.end]<-NULL
}else{colnames(c.dat)[which(colnames(c.dat)==mean.gfp.end)]<-"mean.gfp.end"}
if(class(c.dat[,mean.tritc.start])=="factor"){c.dat[,mean.tritc.start]<-NULL
}else{colnames(c.dat)[which(colnames(c.dat)==mean.tritc.start)]<-"mean.tritc.start"}
if(class(c.dat[,mean.tritc.end])=="factor"){c.dat[,mean.tritc.end]<-NULL
}else{colnames(c.dat)[which(colnames(c.dat)==mean.tritc.end)]<-"mean.tritc.end"}
if(class(c.dat[,mean.dapi])=="factor"){c.dat[,mean.dapi]<-NULL
}else{colnames(c.dat)[which(colnames(c.dat)==mean.dapi)]<-"mean.dapi"}
}
else{
area.name <- grep("Area",all.names,value=T,ignore=T)[1]
if(is.na(area.name)){stop("no ROI.Area data")}
else{if(area.name != "ROI.Area"){warning(paste(area.name,"assumed to be ROI.Area"))}}
cx.name <- grep("Center.X",all.names,value=T,ignore=T)
if(is.na(cx.name)){stop("no Center X data")}
else{if(cx.name != "Center.X"){warning(cx.name,"assumed to be Center X data")}}
cy.name <- grep("Center.Y",all.names,value=T,ignore=T)
if(is.na(cy.name)){stop("no Center Y data")}
else{if(cy.name != "Center.Y"){warning(cy.name,"assumed to be Center Y data")}}
cnames <- c(area.name,cx.name,cy.name)
c.dat <- tmp[match(x.names,tmp[,id.name]),cnames]
c.dat <- cbind(paste("X.",x.names,sep=""),c.dat)
c.dat <- data.frame(c.dat)
names(c.dat)[1:4] <- c("id","area","center.x","center.y")
row.names(c.dat) <- c.dat[,"id"]
}
#####################################################
# Window Region Definition
#####################################################
########################################################
# wr1 import
########################################################
wrdef <- "wr1.docx"
wrdef <- list.files(pattern = '^wr1')
wrdef_logic <- grep(".docx", wrdef, ignore.case=T, value=T)
if( length(wrdef_logic) >= 1 ){
require(docxtractr)
wr <- docx.wr1.importer(wrdef)
w.dat <- MakeWr.docx(t.dat, wr)
## Check for duplicated rows
if(length(which(duplicated(row.names(t.dat))))>=1){
dup<-which(duplicated(row.names(t.dat)))
paste(dup)
t.dat<-t.dat[-dup,]
w.dat<-w.dat[-dup,]
}
}else{
wr <- ReadResponseWindowFile(wrdef)
Wr<-length(wr[,1])#complete and revise this section
w.dat <- MakeWr(t.dat,wr)
}
#####################################################
#Create Despiked data
#####################################################
wts <- tmp.rd$t.dat
for(i in 1:5) #run the despike 5 times.
{
wt.mn3 <- Mean3(wts)
wts <- SpikeTrim2(wts,1,-1)
print(sum(is.na(wts))) #this prints out the number of points removed should be close to 0 after 5 loops.
wts[is.na(wts)] <- wt.mn3[is.na(wts)]
}
tmp.rd$mp <- wts
#170127
# Take the despiked data, subtract the minimum value from the trace, then divide by the maximun value
# to create traces that are all on the same 0 to 1 scale
tmp.dat<-tmp.rd$mp
for(k in 1:length(colnames(tmp.rd$mp))){
tmp.dat[,k]<-tmp.rd$mp[,k]-min(tmp.rd$mp[,k])
tmp.dat[,k]<-tmp.dat[,k]/max(tmp.dat[,k])
}
tmp.dat[,1]<-tmp.rd$t.dat[,1]
tmp.rd$mp.1<-tmp.dat
# Initial Data processing
levs<-setdiff(unique(as.character(w.dat[,2])),"")
snr.lim=4;hab.lim=.05;sm=3;ws=30;blc="SNIP"
pcp <- ProcConstPharm(tmp.rd$mp,sm,ws,blc)
scp <- ScoreConstPharm(tmp.rd,pcp$blc,pcp$snr,pcp$der,snr.lim,hab.lim,sm)
bin <- bScore(pcp$blc,pcp$snr,snr.lim,hab.lim,levs,tmp.rd$w.dat[,"wr1"])
bin <- bin[,levs]
bin["drop"] <- 0 #maybe try to generate some drop criteria from the scp file.
bin<-pf.function(bin,levs)
tmp.rd$t.dat<-t.dat
tmp.rd$w.dat<-w.dat
tmp.rd$c.dat<-c.dat
tmp.rd$bin<-bin
tmp.rd$scp<-scp
tmp.rd$snr<-pcp$snr
tmp.rd$blc<-pcp$blc
tmp.rd$der<-pcp$der
# Add images
if(!is.null(img1)){tmp.rd$img1<-png::readPNG(img1)}
if(!is.null(img2)){tmp.rd$img2<-png::readPNG(img2)}
if(!is.null(img3)){tmp.rd$img3<-png::readPNG(img3)}
if(!is.null(img4)){tmp.rd$img4<-png::readPNG(img4)}
if(!is.null(img5)){tmp.rd$img5<-png::readPNG(img5)}
if(!is.null(img6)){tmp.rd$img6<-png::readPNG(img6)}
if(!is.null(img7)){tmp.rd$img7<-png::readPNG(img7)}
if(!is.null(img8)){tmp.rd$img8<-png::readPNG(img8)}
#####################################################
# Cell Label Scoring
#####################################################
if(fancy==TRUE){tmp.rd<-cell.creator(tmp.rd)} # Create list of binary labeled neurons}
else{tmp.rd$cells<-NULL}
if(is.null(rd.name)){rd.name <- paste("RD",make.names(date()),sep="")}
if(length(which(duplicated(row.names(t.dat))))>=1){
dup<-which(duplicated(row.names(t.dat)))
paste(dup)
t.dat<-t.dat[-dup,]
w.dat<-w.dat[-dup,]
}
f.name <- paste(rd.name,".Rdata",sep="")
assign(rd.name,tmp.rd)
save(list=rd.name,file=f.name)
return(paste(nrow(tmp.rd$c.dat),"traces read saved to ",f.name))
#save as RD file
}
# creates despiked trace from t.dat trace
# While using function, save back to RD, or a tmp.rd object
mp.brewer<-function(tmp.rd){
dat.name<-deparse(substitute(tmp.rd))
#####################################################
#Create Despiked data
#####################################################
wts <- tmp.rd$t.dat
for(i in 1:5) #run the despike 5 times.
{
wt.mn3 <- Mean3(wts)
wts <- SpikeTrim2(wts,1,-1)
print(sum(is.na(wts))) #this prints out the number of points removed should be close to 0 after 5 loops.
wts[is.na(wts)] <- wt.mn3[is.na(wts)]
}
tmp.rd$mp <- wts
#170127
# Take the despiked data, subtract the minimum value from the trace, then divide by the maximun value
# to create traces that are all on the same 0 to 1 scale
tmp.dat<-tmp.rd$mp
for(k in 1:length(colnames(tmp.rd$mp))){
tmp.dat[,k]<-tmp.rd$mp[,k]-min(tmp.rd$mp[,k])
tmp.dat[,k]<-tmp.dat[,k]/max(tmp.dat[,k])
}
tmp.dat[,1]<-tmp.rd$t.dat[,1]
tmp.rd$mp.1<-tmp.dat
#return(tmp.rd)
assign(dat.name, tmp.rd, envir=.GlobalEnv)
}
#develope cellular binary score and place the binary label of the cells into a cell list called cells
cell.creator<-function(dat, score=F, subset.n=250){
if(is.null(subset.n)){subset.n<-250
}else{subset.n<-subset.n}
if(score){dat<-ROIreview(dat, subset.n=subset.n, pad=5)
}else{dat<-dat}
levs<-setdiff(unique(as.character(dat$w.dat$wr1)),"")
cells<-list()
neuron.response<-select.list(levs, title="What defines Neurons?", multiple=T)
neurons<-cellzand(dat$bin,neuron.response, 1)
drop<-cellzand(dat$bin, "drop", 1)
neurons<-setdiff(neurons,drop)
pf<-apply(dat$bin[,c("gfp.bin", "tritc.bin")],1,paste, collapse="")
dat$bin["lab.pf"]<-as.factor(pf)
#lab.groups<-unique(dat$bin$lab.pf)[-grep(pattern="NA",unique(dat$bin$lab.pf))]
lab.groups<-as.character(unique(dat$bin$lab.pf))
cells<-list()
for(i in lab.groups){
x.names<-row.names(dat$bin[which(dat$bin[,"lab.pf"]==i, arr.ind=T),])
cells[[i]]<-x.names
}
glia.response<-select.list(c(levs, "none"), title="What defines glia?", multiple=T)
if(glia.response!="none"){
drop<-cellzand(dat$bin, "drop", 1)
glia<-cellzand(dat$bin,glia.response, 1)
glia<-setdiff(glia,drop)
cells[["000"]]<-setdiff(glia, neurons)
}
else {cells[["000"]]<-setdiff(row.names(dat$c.dat), neurons)}
dat$cells<-cells
return(dat)
}
ReadResponseWindowFile <- function(fname){
dat <- read.csv(fname)
return(dat)
}
#wr file should be
#NEW format for wr file (three column table, treatment, at, duration)
#IMPORT DOCX tables
docx.wr1.importer<-function(file.name='wr1.docx'){
if( !library(docxtractr, logical.return=T) ){install.packages(docxtractr)}else{}
#read in docx
wr1<-read_docx(file.name)
#Extract each table
wr1<-docx_extract_all_tbls(wr1, guess_header=F)
#out table is the third one
wr1<-Reduce(c,wr1[[3]])
#split up each vaue based on a single space
wr1<-strsplit(wr1, ' ')
#Now perform a test and provide a wait if there is an error where the window
#region has to little information
error<-0
print("There is an >1 <2 info error at")
for(i in 1:length(wr1)){
if(length(wr1[[i]])>1 & length(wr1[[i]])<3){
print(wr1[[i]])
error<-error+1
}
}
print("There is an >3 info error at")
for(i in 1:length(wr1)){
if(length(wr1[[i]])>3){
print(wr1[[i]])
error<-error+1
}
}
print(paste("You have a total of",error,"errors"))
if(error>0){
alarm()
print("Fix these Errors")
print("PRESS ANY KEY TO CONTINUE")
scan(n=1)
cat("These are your window region definitions
If you would like to make anymore changes do so now
")
for(i in 1:length(wr1)){
if(length(wr1[[i]])==3){
print(wr1[[i]])
}
}
print("PRESS ANY KEY TO CONTINUE")
scan(n=1)
wr1<-read_docx(file.name)
#Extract each table
wr1<-docx_extract_all_tbls(wr1, guess_header=F)
#out table is the third one
wr1<-Reduce(c,wr1[[3]])
#split up each vaue based on a single space
wr1<-strsplit(wr1, ' ')
}else{}
wr1.logic<-unlist(lapply(wr1, function(x) length(x)>1 ))
wr1.locations<-which(wr1.logic, arr.ind=T)
wr1<-wr1[wr1.locations]
#wr1<-Reduce(rbind,wr1)
wr1<-do.call(cbind, lapply(wr1, data.frame, stringsAsFactors=F))
row.names(wr1)<-c('at','treatment','duration')
wr1['at',]<-as.numeric(wr1['at',])-(10/60)
return(wr1)
}
MakeWr.docx <- function(t.dat,wr1,padL=0,padR=0)
{
w.dat <- t.dat[,1:2]
names(w.dat)[2] <- "wr1"
w.dat["wr1"] <- ""
wr1["treatment",] <- make.names(wr1["treatment",],unique=T)
for(i in 1:ncol(wr1))
{
x1 <- which.min(abs(as.numeric(wr1["at",i])-t.dat[,"Time"]))
x2 <- which.min(abs(( as.numeric( wr1["at",i] ) + as.numeric( wr1["duration",i] ) )-t.dat[,"Time"]))
w.dat[max((x1-padL),1):min((x2+padR),nrow(t.dat)),"wr1"] <- as.character(wr1["treatment",i])
}
return(w.dat)
}
GetWr <- function(fname)
{
wr1 <- read.csv(fname)
return(wr1)
}
MakeWr <- function(t.dat,wr1,padL=0,padR=0)
{
w.dat <- t.dat[,1:2]
names(w.dat)[2] <- "wr1"
w.dat["wr1"] <- ""
wr1["treatment"] <- make.names(wr1[,"treatment"],unique=T)
for(i in 1:nrow(wr1))
{
x1 <- which.min(abs(wr1[i,"at"]-t.dat[,"Time"]))
x2 <- which.min(abs((wr1[i,"at"] + wr1[i,"duration"]) - t.dat[,"Time"]))
w.dat[max((x1-padL),1):min((x2+padR),nrow(t.dat)),"wr1"] <- as.character(wr1[i,"treatment"])
}
return(w.dat)
}
#fill forward for flevs in the window region.
FillWR <- function(wr1,flevs)
{
u.names <- unique(wr1)
wr2 <- NumBlanks(wr1)
u2.names <- unique(wr2)
b.names <- grep("blank",u2.names,value=T)
for(i in flevs)
{
for(j in 1:(length(u2.names)-1))
{
if(u2.names[j]==i & is.element(u2.names[j+1],b.names) )
{
wr1[wr2==u2.names[j+1]] <- i
}
}
}
return(wr1)
}
#adjust the windows to maximize shift regions and peak regions
#try to minimize the false positive rates but growing/shinking windows
#works reasonably well, but is only counting peaks. It is not accountinf
#for shape aspects of the trace.
WrAdjust <- function(dat,pcp=NULL,wr=NULL,wr.levs=NULL,snrT=4,minT=10)
{
gtrfunc <- function(x,a){sum(x>a)}
if(is.null(wr)){wr <- dat$w.dat[,"wr1"]}
wr.new <- wr
wrb <- NumBlanks(wr)
wi <- 1:(length(wrb))
x.names <- names(dat$t.dat[,-1])
if(is.element("bin",names(dat)))
if(is.element("drop",names(dat$bin)))
{
x.names <- row.names(dat$bin[dat$bin[,"drop"]==0,])
}
if(is.null(wr.levs))
{
wr.levs <- unique(wr)
wr.levs <- wr.levs[wr.levs != ""]
}
if(is.null(pcp))
{
pcp <- ProcConstPharm(dat)
}
#OK expand/contract each window to give best false positive ratio.
#keep a min width.
hits <- apply(pcp$snr[,x.names],1,gtrfunc,a=snrT)
wrb.levs <- unique(wrb)
b.levs <- grep("blank",wrb.levs,value=T)
for(i in wr.levs[wr.levs != wrb.levs[length(wrb.levs)]])
{
i1 <- match(i,wrb.levs)
if(is.element(wrb.levs[i1+1],b.levs))
{
targs <- hits[wrb==i | wrb==wrb.levs[i1+1]]
tval <- NULL
endT <- length(targs)
lp <- 0
for(j in minT:(endT-1))
{
lp <- lp+1
#tval[lp] <- mean(targs[1:j])/((sum(targs[(j+1):endT])+1)/length(targs[(j+1):endT]))
tval[lp] <- 1/((sum(targs[(j+1):endT])+1)/length(targs[(j+1):endT]))
}
iopt <- match(i,wr)+which.max(tval)+(minT-1)
}
else
{iopt <- max(wi[wr==i])}
wr.new[wr==i] <- ""
wr.new[match(i,wr):iopt] <- i
}
return(wr.new)
}
WrCreate.rdd<-function(t.dat, n=NULL){
window.dat<-data.frame()
#dev.new(width=10,height=6)
x.names<-names(t.dat)[-1]
LinesSome(t.dat,m.names=x.names,lmain="",subset.n=15)
## Plot the total sum of all peaks
#t.sum<-apply(t.dat[-1], 1, sum)
#plot(t.dat[,1], t.sum, type="l", lwd=2)
i<-1
for(i in i:n){
dose<-locator(n=2, type="o", pch=15, col="red")
abline(v=c(dose$x[1],dose$x[2]), col="red", lwd=1)
dose.type<-scan(file="", what="character", n=1, quiet=T)
duration<-dose$x[2]-dose$x[1]
window.dat[i,1]<-dose.type
window.dat[i,2]<-dose$x[1]
window.dat[i,3]<-duration
window.dat<-print(window.dat)
names(window.dat)<-c("treatment", "at", "duration")
}
graphics.off()
write.csv(window.dat, file="wr1.csv", row.names=F)
}
# General Read data dump for an already created RD file without window data
WrCreate.1<-function(dat, n=14, cell=NULL){
window.dat<-data.frame()
if(is.null(cell)){cell<-"X.1"}
else(cell<-cell)
t.sum<-apply(dat$t.dat[-1], 1, sum)
dev.new(width=14,height=4)
ymax<-max(dat$t.dat[,cell])*1.05
ymin<-min(dat$t.dat[,cell])*.95
yrange<-ymax-ymin
ylim <- c(ymin,ymax)
xlim <- range(dat$t.dat[,1]) # use same xlim on all plots for better comparison
par(mar=c(6,4.5,3.5,11))
plot(dat$t.dat[,cell]~dat$t.dat[,1], main=cell,xlim=xlim,ylim=ylim,xlab="", ylab="",pch=16, lwd=1, cex=.5)
#axis(1, at=seq(0, length(dat$t.dat[,1]), 5),tick=TRUE )
for(i in 1:n){
dose<-locator(n=2, type="o", pch=15, col="red")
abline(v=c(dose$x[1],dose$x[2]), col="red", lwd=1)
dose.type<-scan(file="", what="character", n=1, quiet=T)
duration<-dose$x[2]-dose$x[1]
window.dat[i,1]<-dose.type
window.dat[i,2]<-dose$x[1]
window.dat[i,3]<-duration
window.dat<-print(window.dat)
names(window.dat)<-c("treatment", "at", "duration")
wr1<-window.dat
}
t.dat<-return(MakeWr(dat$t.dat,wr1,padL=0,padR=0))
}
WrMultiplex<-function(t.dat, wr, n=NULL){
w.dat<-t.dat[,1:2]
names(w.dat)[2]<-"wr1"
w.dat["wr1"]<-""
if(is.null(n)){n=length(wr[,1])}
library(cluster)
pamk<-pam(w.dat[,1], k=n)
wr[1] <- make.names(wr[,1],unique=T)
levs<-wr[,1]
w.dat[,"wr1"]<-levs[pamk$clustering]
return(w.dat)}
#Made a mistake in you window regions?
#If it is only the time sequence, but all other info is corrected
#then make complete F. This will allow you to select the windows that need reapri
#if the naming is off, then make complete=F. You will need to do a complete reapri
# you will lose all information from RDView
WindowRepair<-function(dat, complete=T){
tmp<-dat #first create a tmp to repair
tmp.rd<-dat # then create a tmp.rd to completely screwup for repairs
#Now do all of this to tmp.rd
wrdef<-"wr1.csv"
t.dat<-tmp.rd$t.dat
if(!is.null(wrdef)){
wr <- ReadResponseWindowFile(wrdef)
Wr<-length(wr[,1])#complete and revise this section
if(length(colnames(wr))<2){w.dat<-WrMultiplex(t.dat,wr,n=Wr)}
else{
wr['at'] <- wr['at'] - (10/60)
w.dat <- MakeWr(t.dat,wr)
}
tmp.rd$w.dat<-w.dat
}
levs<-setdiff(unique(as.character(tmp.rd$w.dat$wr1)),"")
#5 set the thresholds for scoring and run the automatic scoring
sm <- 2 #smooth window size set
ws <- 30 #window peak size
snr.lim <- 4 #signal to noise threshold
hab.lim <- .05 #height above baseline threshold
blc="SNIP"
pcp <- ProcConstPharm(tmp.rd,sm,ws,blc)
scp <- ScoreConstPharm(tmp.rd,pcp$blc,pcp$snr,pcp$der,snr.lim,hab.lim,sm)
bin <- bScore(pcp$blc,pcp$snr,snr.lim,hab.lim,levs,tmp.rd$w.dat[,"wr1"])
bin <- bin[,levs]
bin["drop"] <- 0 #maybe try to generate some drop criteria from the scp file.
bin<-pf.function(bin,levs)
tmp.rd$bin<-bin
tmp.rd$blc<-pcp$blc
tmp.rd$snr<-pcp$snr
tmp.rd$der<-pcp$der
tmp.rd$bin<-bin
tmp.rd$scp<-scp
#Now surgically add the selected corrected data from tmp.rd to the tmp
#starting with the window region
tmp$w.dat$wr1<-tmp.rd$w.dat$wr1
#select the window region you want to repair
if(complete==T){
tmp$scp<-tmp.rd$scp
tmp$bin<-tmp.rd$bin
}else{
print("Select windows to repair")
windows.tp<-select.list(names(tmp.rd$bin), multiple=T)
if(length(windows.tp)>1){
#next add the binary information
for(i in windows.tp){
tmp$bin[i]<-tmp.rd$bin[i]
win.stats<-grep(i,names(tmp$scp), value=T)
tmp$scp[win.stats]<-tmp.rd$scp[win.stats]
}
}else{}
}
#now save back to the RD object
dat<-tmp
return(dat)
}
#Made a mistake in you window regions?
#If it is only the time sequence, but all other info is corrected
#then make complete F. This will allow you to select the windows that need reapri
#if the naming is off, then make complete=F. You will need to do a complete reapri
# you will lose all information from RDView
WindowRepair_docx<-function(dat, complete=F, trace_brew=F){
require(docxtractr)
tmp<-dat #first create a tmp to repair
tmp.rd<-dat # then create a tmp.rd to completely screwup for repairs
#Now do all of this to tmp.rd
wrdef<-"wr1.docx"
t.dat<-tmp.rd$t.dat
if(!is.null(wrdef)){
wr <- docx.wr1.importer(wrdef)
Wr<-length(wr[,1])#complete and revise this section
if(length(colnames(wr))<2){
w.dat<-WrMultiplex(t.dat,wr,n=Wr)
}else{
w.dat <- MakeWr.docx(t.dat,wr)
}
tmp.rd$w.dat<-w.dat
}
levs<-setdiff(unique(as.character(tmp.rd$w.dat$wr1)),"")
#5 set the thresholds for scoring and run the automatic scoring
sm <- 2 #smooth window size set
ws <- 30 #window peak size
snr.lim <- 4 #signal to noise threshold
hab.lim <- .05 #height above baseline threshold
blc="SNIP"
pcp <- ProcConstPharm(tmp.rd,sm,ws,blc)
scp <- ScoreConstPharm(tmp.rd,pcp$blc,pcp$snr,pcp$der,snr.lim,hab.lim,sm)
bin <- bScore(pcp$blc,pcp$snr,snr.lim,hab.lim,levs,tmp.rd$w.dat[,"wr1"])
bin <- bin[,levs]
bin["drop"] <- 0 #maybe try to generate some drop criteria from the scp file.
bin<-pf.function(bin,levs)
tmp.rd$bin<-bin
tmp.rd$blc<-pcp$blc
tmp.rd$snr<-pcp$snr
tmp.rd$der<-pcp$der
tmp.rd$bin<-bin
tmp.rd$scp<-scp
#Now surgically add the selected corrected data from tmp.rd to the tmp
#starting with the window region
tmp$w.dat$wr1<-tmp.rd$w.dat$wr1
if(length(summary(names(tmp.rd)=='t.norm')) == 2 | trace_brew ){
tmp.rd <- TraceBrewer(tmp.rd)
}
#select the window region you want to repair
if(complete==T){
tmp$scp<-tmp.rd$scp
tmp$bin<-tmp.rd$bin
}else{
print("Select windows to repair")
windows.tp<-select.list(names(tmp.rd$bin), multiple=T)
if(length(windows.tp)>1){
#next add the binary information
for(i in windows.tp){
tmp$bin[i]<-tmp.rd$bin[i]
win.stats<-grep(i,names(tmp$scp), value=T)
tmp$scp[win.stats]<-tmp.rd$scp[win.stats]
}
}else{}
}
#now save back to the RD object
dat<-tmp
return(dat)
}
#Something happened within you experiment and you need to delete a region of row fro the traces
#to recover the data and clean up the display.
#dat: This is the RD object
#cell: Cell the display on the plot, if left empty, X.1 will be used
#complete: logical, If you say true, all window regions will be reassessed. If False window region can be selected.
RegionDeleter<-function(dat, cell=NULL, complete=T){
if(is.null(cell)){cell<-"X.1"}
tmp<-dat #first create a tmp to repair
tmp.rd<-dat # then create a tmp.rd to completely screwup for repairs
#Now do all of this to tmp.rd
dev.new(width=16, height=4)
PeakFunc7(tmp.rd, cell)
x.del<-locator(n=2, type="o", col="red", pch=4, lwd=2)$x
rows.to.remove<-which(tmp.rd$t.dat[,1]>=x.del[1] & tmp.rd$t.dat[,1]<=x.del[2],arr.ind=T)
tmp.rd$t.dat<-tmp.rd$t.dat<-dat$t.dat[-rows.to.remove,]
tmp.rd$w.dat<-tmp.rd$w.dat<-dat$w.dat[-rows.to.remove,]
tmp.rd<-mp.brewer(tmp.rd)
levs<-setdiff(unique(as.character(tmp.rd$w.dat$wr1)),"")
#5 set the thresholds for scoring and run the automatic scoring
sm <- 2 #smooth window size set
ws <- 30 #window peak size
snr.lim <- 4 #signal to noise threshold
hab.lim <- .05 #height above baseline threshold
blc="SNIP"
pcp <- ProcConstPharm(tmp.rd,sm,ws,blc)
scp <- ScoreConstPharm(tmp.rd,pcp$blc,pcp$snr,pcp$der,snr.lim,hab.lim,sm)
bin <- bScore(pcp$blc,pcp$snr,snr.lim,hab.lim,levs,tmp.rd$w.dat[,"wr1"])
bin <- bin[,levs]
bin["drop"] <- 0 #maybe try to generate some drop criteria from the scp file.
bin<-pf.function(bin,levs)
tmp.rd$bin<-bin
tmp.rd$blc<-pcp$blc
tmp.rd$snr<-pcp$snr
tmp.rd$der<-pcp$der
tmp.rd$bin<-bin
tmp.rd$scp<-scp
#Now surgically add the selected corrected data from tmp.rd to the tmp
#starting with the window region
#tmp$w.dat$wr1<-tmp.rd$w.dat$wr1
#select the window region you want to repair
if(complete==T){
tmp<-tmp.rd
}else{
tmp$t.dat<-tmp.rd$t.dat
tmp$w.dat<-tmp.rd$w.dat
tmp$blc<-tmp.rd$blc
tmp$snr<-tmp.rd$snr
tmp$mp<-tmp.rd$mp
tmp$mp.1<-tmp.rd$mp.1
print("Select windows to repair")
windows.tp<-select.list(names(tmp.rd$bin), multiple=T)
if(length(windows.tp)>1){
#next add the binary information
for(i in windows.tp){
tmp$bin[i]<-tmp.rd$bin[i]
win.stats<-grep(i,names(tmp$scp), value=T)
tmp$scp[win.stats]<-tmp.rd$scp[win.stats]
}
}else{}
}
#now save back to the RD object
dat<-tmp
return(dat)
}
#This is a function to rename mislabeled pulses
WindowRenamer<-function(dat){
dat.name<-deparse(substitute(dat))
pulsenames<-setdiff(unique(as.character(dat$w.dat$wr1)),"")
pulse<-select.list(pulsenames,title="Pulse To Rename", multiple=T)
pulze<-pulse
#bringtotop(-1)
print("#############These are your pulses###############")
print(pulsenames)
print("#############This is the pulse to rename:")
print(pulse)
print("#############Enter the new name")
pulse.rn<-scan(n=length(pulse),what="character")
pulse.rn<-make.names(pulse.rn, unique=T)
print(pulse.rn)
for(i in length(pulse):1){
#Rename Bin dataframe
colnames(dat$bin)[colnames(dat$bin)==pulse[i]] <- pulse.rn[i]
#Rename w.dat
dat$w.dat[which(dat$w.dat[,"wr1"]==pulse[i], arr.ind=T),"wr1"]<-pulse.rn[i]
#Rename SCP
pulze[i]<-paste(pulse[i],".", sep="")
scp.col.to.rn<-grep(pulze[i],colnames(dat$scp),fixed=T)
names(dat$scp)<-sub(pulze[i],paste(pulse.rn[i],".", sep=""), names(dat$scp))
}
#assign(dat.name,dat, envir=.GlobalEnv)
return(dat)
}
##############################################################################################
##############################################################################################
#take a defined window vector and
#number of the contiguos blank regions ("")
NumBlanks <- function(x){
nw <- as.character(x)
xlen <- length(x)
bl.cnt <- 1
mi <- match("",nw)
while(!is.na(mi) & bl.cnt < 20)
{
mi2 <- mi+1
while((x[mi2] == "") & mi2 <= xlen){mi2 <- mi2+1}
nw[mi:(mi2-1)] <- paste("blank",bl.cnt,sep="")
bl.cnt <- bl.cnt+1
mi <- match("",nw)
}
return(as.factor(nw))
}
NumAny <- function(x,targ){
nw <- as.character(x)
xlen <- length(x)
bl.cnt <- 1
mi <- match(targ,nw)
while(!is.na(mi) & bl.cnt < 20)
{
mi2 <- mi+1
while((x[mi2] == targ) & mi2 <= xlen){mi2 <- mi2+1}
nw[mi:(mi2-1)] <- paste(targ,bl.cnt,sep="")
bl.cnt <- bl.cnt+1
mi <- match(targ,nw)
}
return(as.factor(nw))
}
#measure the variance about the smooth
SmoothVar <- function(x,shws=2){
s1 <- createMassSpectrum(1:length(x),x)
s3 <- smoothIntensity(s1, method="SavitzkyGolay", halfWindowSize=shws)
return(sd(x-intensity(s3)))
}
#########################################################################
#Trace Spike removal, smoothing, normalizing
##########################################################################
#adds an mp data.frame to the tmp RD object.
#RD object must have t.dat and w.dat[,"wr1"]
#this will have the data for the Despiked and smoothed traces.
#ulim and dlim define the limits of two point rise and fall.
#if they are not set the function will try to estimate them from
#the blank regions of the traces, RD must have blank regions (e.g. "")
#currently only the negative spikes are eliminated. That is points
#that have large increases followed by large decreases (or vice versa)
#smoothing is done using 13 points.
smoothfunc <- function(y,ts,pts=min(13,sum(!is.na(y)))){
yloe <- loess(y ~ ts,span=pts/length(y))
ymp <- predict(yloe,newdata=data.frame(ts=ts))
return(ymp)
}
DespikeSmooth <- function(tmp,ulim=NULL,dlim= NULL){
print("Starting Despike-smooth timing")
start_time <- Sys.time()
wt <- tmp$t.dat
ts <- tmp$t.dat[,1]
wtd <- wt[-1,] - wt[-nrow(wt),]
print(paste("step 1 time",Sys.time()-start_time))
wtd <- sweep(wtd[,-1],1,wtd[,1],'/')
print(paste("step 2 time", Sys.time()-start_time))
wtm <- wtd[-1,]*wtd[-nrow(wtd),]
print(paste("step 3 time", Sys.time()-start_time))
wrb <- NumBlanks(tmp$w.dat[,"wr1"])
print(paste("step 4 time", Sys.time()-start_time) )
if(is.null(ulim) | is.null(dlim))
{
qvals <- quantile(
as.vector(
as.matrix(
wtm[grep("blank",wrb[1:nrow(wtm)]),]
)
)
,probs=c(0,.001,.5,.999,1))
}
print(paste("step 5 time",Sys.time()-start_time))
if(is.null(dlim)){dlim <- qvals[2]}
if(is.null(ulim)){ulim <- qvals[4]}
wtm.z <- wtm[1,]
wtm.z[,] <- 0
wtm <- rbind(wtm.z,wtm,wtm.z)
wtrm <- wtm < dlim
x <- wt[,1]
wt <- wt[,-1]
wt[wtrm] <- NA
mp <- sapply(wt, smoothfunc, ts=x)
print(paste("step 6 time",Sys.time()-start_time))
tmp$mp <- tmp$t.dat
tmp$mp[,-1] <- mp
return(tmp)
}
LinearBLextend <- function(x,y,intvl=3,plotit=F){
require(MASS)
#break into intervals of 3 minute.
time.tot <- max(x)-min(x)
gaps <- ceiling(time.tot/intvl)
gap.fac <- sort(rep(seq(1,gaps),length.out=length(x)))
gap.i <- tapply(y,gap.fac,which.min)
gap.i <- gap.i + match(unique(gap.fac),gap.fac)-1
mindat <- data.frame(x=x[gap.i],y=y[gap.i])
rlt <- rlm(y ~ x,data=mindat)
if(plotit)
{
plot(x,y)
points(x[gap.i],y[gap.i],pch=16,col="red")
abline(rlt,lty=2,col="blue",lwd=2)
#lines(x[gap.i],predict(xloe))
#points(x1,predict(rlt,newdata=data.frame(x=x1)),pch=16,col="blue",cex=2)
}
return(coefficients(rlt))
}
#add points to the end of the t.dat
PadTdat <- function(tmp,n=5){
xdat <- tmp$t.dat
r1 <- sapply(tmp$t.dat[,-1],LinearBLextend,x=tmp$t.dat[,1],plotit=F)
r1 <- data.frame(t(r1))
tmp$scp[row.names(r1),"rlm.a"] <- r1[,1]
tmp$scp[row.names(r1),"rlm.b"] <- r1[,2]
x <- xdat[,1]
dx <- x[-1]-x[-length(x)]
tmax <- max(x)
tseq <- seq(1,n)*median(dx)+tmax
r2 <- data.frame(t(r1[,"x"] %*% t(tseq) + r1[,1]))
names(r2) <- row.names(r1)
r2[,"Time"] <- tseq
xdat <- rbind(xdat,r2[,names(xdat)])
w1 <- tmp$w.dat[1:n,]
w1[,"Time"] <- tseq
for(i in names(w1)[sapply(w1,is.character)]){w1[,i] <- "epad"}
tmp$w.dat <- rbind(tmp$w.dat,w1)
row.names(tmp$w.dat)<-tmp$w.dat$Time #Lee's additions
tmp$t.dat <- xdat
row.names(tmp$t.dat)<-tmp$t.dat$Time #Lee's additions
tmp$bin['epad']<-0 #Lee's additions
return(tmp)
}
TraceNormal<-function(dat, t.type='blc'){
tmp.dat<-dat[[t.type]]
for(k in 1:length(colnames(dat$mp))){
tmp.dat[,k]<-dat$mp[,k]-min(dat$mp[,k])
tmp.dat[,k]<-tmp.dat[,k]/max(tmp.dat[,k])
}
tmp.dat[,1]<-dat$t.dat[,1]
dat$t.norm<-tmp.dat
return(dat)
}
TraceBrewer<-function(dat){
cat('
#The current flow of our trace cleaning protocol is as follows, and this is
#what the function automatically fills in for the RD list
#t.dat> Raw data t.dat
#t.dat.pad> 3 End points added at end NA
#t.dat.pad.ds.s> despike and smooth mp
#t.dat.pad.ds.s.n> normalize 0 to 1 t.norm
#t.dat.pad.ds.s.n.blc> Baseline Corrected blc
')
tmp.rd<-dat
start.time<-proc.time()
# Kevin has created a new way of creating cleaned up traces.
# Add a 3 point padding to the end of the experiment which return the trace back to baseline
# This helps preserve the response shape
tmp.rd<-PadTdat(tmp.rd)
print(paste("Completed Padding at:",(proc.time()-start.time))[3])
# Kevin now uses this to despike and smooth the data
tmp.rd<-DespikeSmooth(tmp.rd)
print(paste("Completed Despiking at:",(proc.time()-start.time)[3]))
# Now what we need to do is provide the analysis with some type of normalized trace
tmp.rd<-TraceNormal(tmp.rd,'mp')
print(paste("Completed Normalizing at:",(proc.time()-start.time)[3]))
# Now do a baseline correction based on the trace padding, and the despike smooth function, and
# the normalized trace
pcp.tmp<-ProcConstPharm(tmp.rd$t.norm)
print(paste("Completed Baseline Correction at:",(proc.time()-start.time)[3]))
tmp.rd$blc<-pcp.tmp$blc
tmp.rd$snr<-pcp.tmp$snr
# Now perform trace statistics on the specified trace
tmp.rd$scp<-ScoreConstPharm.2(tmp.rd,'blc')
print(paste("Completed Window Statistics at:",(proc.time()-start.time)[3]))
return(tmp.rd)
}
##############################################################################################
# Cornerstones of trace washing, peak detection, and binary scoring
##############################################################################################
#the first argument is the raw data
#the second argument is the halfwindow size for smoothing (shws)
#the third argument is the peak detection halfwindow size (phws)
#the last argument is the baseline correction method (TopHat = blue line SNIP = red line)
#Note that you should use the RoughReview function to determine the best values for
#arguments 2,3 and 4.
#returns a list with two dataframes: snr and blc.
#snr has the peaks detected for all cells, blc has the baseline corrected data for all cells.
SpikeTrim2 <- function(wt,ulim=NULL,dlim= NULL){
wtd <- wt[-1,]-wt[-nrow(wt),]
wtd <- sweep(wtd[,-1],1,wtd[,1],'/')
if(is.null(ulim) | is.null(dlim))
{
qvals <- quantile(as.vector(as.matrix(wtd)),probs=c(0,.01,.5,.99,1))
}
if(is.null(dlim)){dlim <- qvals[2]}
if(is.null(ulim)){ulim <- qvals[4]}
wt.up <- wtd > ulim
wt.dn <- wtd < dlim
wt.ud <- wt.up[-nrow(wt.up),] + wt.dn[-1,]
wt.du <- wt.up[-1,] + wt.dn[-nrow(wt.dn),]
wt.na <- wt[2:(nrow(wt)-1),-1]
wt.na[wt.ud==2] <- NA
wt.na[wt.du==2] <- NA
sum(is.na(wt.na))
wt[2:(nrow(wt)-1),-1] <- wt.na
#impute missing using mean of flanking.
#consider replicating first and last columns and doing this all as a vector
return(wt)
}
#each point is replaced with the mean of the two neighboring points
Mean3 <- function(wt){
wt.mn <- (wt[-c(1,2),]+wt[-c(nrow(wt),(nrow(wt)-1)),])/2
wt[2:(nrow(wt)-1),] <- wt.mn
return(wt)
}
ProcConstPharm <- function(dat,shws=2,phws=20,bl.meth="SNIP"){
if(class(dat)=="data.frame"){(dat1<-dat)}else{dat1 <- dat$t.dat}
t.names <- names(dat1)[-1]#Time in first column
dat1.snr <- dat1 #peak calls stored as SNR
dat1.snr[,t.names] <- 0
dat1.bc <- dat1.snr #baseline corrected data
for(i in t.names)
{
p1 <- PeakFunc2(dat1,i,shws=shws,phws=phws,Plotit=F,bl.meth=bl.meth)
dat1.snr[match(mass(p1$peaks),dat1[,1]),i] <- snr(p1$peaks)
dat1.bc[i] <- intensity(p1$dat)
}
dat1.der<-dat1.bc[-1,]-dat1.bc[-nrow(dat1.bc),]
dat1.der <- sweep(dat1.der[,-1],1,dat1.der[,1],'/')
# dat1.crr <- allCRR(dat1,t.names,Plotit=F) #leave off advanced processing for now
return(list(snr=dat1.snr,blc=dat1.bc, der=dat1.der))
}
#binary score for all cells for the regions of interest bScore
#argument 1 is the baseline corrected data
#argument 2 is the snr peak data
#argument 3 is the threshold for significance on the peaks
#argument 4 is the intensity above baseline theshold
#argument 5 indicates the regions of interest. (e.g. the response windows for which the cells will be scored)
#argument 6 indicates the response windows.
#argument 7 indicates the cells to score (if null all cells will be scored)
#returns the scoring for all cells subject to the above parameters.
#as well as the sum for the snr scores and the sd for the snr scores.
bScore <- function(blc,snr,snr.lim,blc.lim,levs,wr,cnames=NULL){
notzero <- function(x){as.integer(sum(x) > 0)}
if(is.null(cnames)){cnames <- names(blc)[-1]}
wr2 <- wr[is.element(wr,levs)]
b.snr <- snr[is.element(wr,levs),cnames]
b.blc <- blc[is.element(wr,levs),cnames]
b.call <- b.blc
b.call[,] <- 0
b.call[b.snr > snr.lim & b.blc > blc.lim] <- 1
b.score <- data.frame(tot=apply(b.snr,2,sum))
b.score["sd"] <- apply(b.snr,2,sd)
for(i in levs)
{
b.score[i] <- apply(b.call[wr2==i,],2,notzero)
}
return(b.score)
}
# Binary scoring dependent upon score const pharm talbe values
# Best way to determine parameters is to look through trace click before hand
# snr.min = minimun signal to noise value
# max.min= minimun above baseline threshold
# tot.min= area minimun to consider
# wm.min= which max, Where within the window region does the maximun value occur
# wm.max= where to stop looking for the maximun value
bscore2<-function(dat, levs.1=NULL, snr.min=2.8, max.min=.03, wm.min=0, wm.max=600){
scp<-dat$scp
levs<-setdiff(unique(as.character(dat$w.dat[,2])),"")
if(is.null(levs.1)){levs.1<-levs}
else{levs.1<-levs.1}
#dat2<-matrix(0, nrow=length(dat$c.dat[,1]), ncol=length(levs))
dat2<-dat$bin[levs]
#row.names(dat2)<-dat$c.dat[,1]
#colnames(dat2)<-levs
x.names<-dat$c.dat[,1]
for(j in x.names){
for(i in levs.1){
snr.name<-grep(paste(i,".snr", sep=""), names(dat$scp), value=T)
tot.name<-grep(paste(i,".tot", sep=""), names(dat$scp), value=T)
max.name<-grep(paste(i,".max", sep=""), names(dat$scp), value=T)
wm.name<-grep(paste(i,".wm", sep=""), names(dat$scp), value=T)
if(dat$scp[j,snr.name]>=snr.min &
dat$scp[j,max.name]>=max.min &
dat$scp[j,wm.name]>=wm.min &
dat$scp[j,wm.name]<=wm.max)
{dat2[j,i]<-1}
else{dat2[j,i]<-0}
}
}
return(dat2)}
# calculate a table of cell characteristics globally and
# within specific windows
# these specifics should include
# mean and sd, sum of in window peaks, sum of out of window peaks
# 1) some measure of dead cell
# 2) yes/no peak response for each window
# 3) peak height
# 4) max peak SNR
# 5) peak timing in window
# 6)
# variance of smoothed - raw in window
# define and number blank windows.
ScoreConstPharm <- function(dat,blc=NULL, snr=NULL, der=NULL, snr.lim=3,blc.lim=.03,shws=2)
{
t.dat<-dat$t.dat
if(is.null(blc)){blc<-dat$blc
}else{blc<-blc}
if(is.null(snr)){snr<-dat$snr
}else{snr<-snr}
if(is.null(der)){der<-dat$der
}else{der<-der}
wr<-dat$w.dat$wr1
gtfunc <- function(x,alph){sum(x > alph,na.rm=T)}
lt5func <- function(x,y)
{
ltfunc <- function(i){summary(lm(y[i:(i+5)] ~ x[i:(i+5)]))$coefficients[2,3]}
iseq <- 1:(length(x)-5)
res <- sapply(iseq,ltfunc)
return(range(res))
}
levs <- setdiff(unique(wr),"")
cnames <- names(t.dat)[-1]
res.tab <- data.frame(mean=apply(blc[,cnames],2,mean))
res.tab["sd"] <- apply(blc[,cnames],2,sd)
res.tab["snr.iws"] <- apply(snr[is.element(wr,levs),cnames],2,sum)
res.tab["snr.ows"] <- apply(snr[!is.element(wr,levs),cnames],2,sum)
res.tab["snr.iwc"] <- apply(snr[is.element(wr,levs),cnames],2,gtfunc,alph=snr.lim)
res.tab["snr.owc"] <- apply(snr[!is.element(wr,levs),cnames],2,gtfunc,alph=snr.lim)
dat.der<-der
for(i in cnames)
{
s1 <- createMassSpectrum(t.dat[,"Time"],t.dat[,i])
s3 <- smoothIntensity(s1, method="SavitzkyGolay", halfWindowSize=shws)
bl.th <- estimateBaseline(s3, method="TopHat")[,"intensity"]
bl.snp <- estimateBaseline(s3, method="SNIP")[,"intensity"]
eseq <- 1:ceiling((nrow(t.dat)/2))
lseq <- max(eseq):nrow(t.dat)
res.tab[i,"bl.diff"] <- mean(bl.th-bl.snp)
res.tab[i,"earl.bl.diff"] <- mean(bl.th[eseq]-bl.snp[eseq])
res.tab[i,"late.bl.diff"] <- mean(bl.th[lseq]-bl.snp[lseq])
}
for(i in levs)
{
res.tab[paste(i,".snr",sep="")] <- apply(snr[wr==i,cnames],2,max)
res.tab[paste(i,".tot",sep="")] <- apply(blc[wr==i,cnames],2,sum)
res.tab[paste(i,".max",sep="")] <- apply(blc[wr==i,cnames],2,max)
res.tab[paste(i,".ph.a.r",sep="")] <-res.tab[paste(i,".tot",sep="")]/res.tab[paste(i,".max",sep="")]
res.tab[paste(i,".wm",sep="")] <- apply(blc[wr==i,cnames],2,which.max)
## Derviative measures
#res.tab[paste(i,".der.tot",sep="")] <- apply(dat.der[wr==i,cnames],2,sum)
res.tab[paste(i,".der.tot",sep="")] <- apply(dat.der[wr==i,cnames],2,sum)
#res.tab[paste(i,".der.tot",sep="")] <- apply(na.omit(dat.der[wr==i,cnames]),2,function(x){sum(x[x>0])})
res.tab[paste(i,".der.max",sep="")] <- apply(na.omit(dat.der[wr==i,cnames]),2,max)
res.tab[paste(i,".der.min",sep="")] <- apply(na.omit(dat.der[wr==i,cnames]),2,min)
res.tab[paste(i,".der.wmax",sep="")] <- apply(na.omit(dat.der[wr==i,cnames]),2,which.max)#function(x){which.max(x[5:length(row.names(x))])})
res.tab[paste(i,".der.wmin",sep="")] <- apply(na.omit(dat.der[wr==i,cnames]),2,which.min)
# res.tab[c(paste(i,".dn5",sep=""),paste(i,".up5",sep=""))] <- t(apply(t.dat[wr==i,cnames],2,lt5func,x=t.dat[wr==i,1]))
# res.tab[paste(i,".dn5",sep="")] <- apply(blc[wr==i,cnames],2,dn5func)
}
return(res.tab)
}
ScoreConstPharm.2 <- function(dat,t.type=NULL, snr=NULL, der=NULL, snr.lim=3,blc.lim=.03,shws=2){
require(MALDIquant)
t.dat<-dat$t.dat
if(is.null(t.type)){t.type<-'blc'
}else{t.type<-t.type}
if(is.null(snr)){snr<-dat$snr
}else{snr<-snr}
if(is.null(der)){der<-dat$der
}else{der<-der}
wr<-dat$w.dat$wr1
gtfunc <- function(x,alph){sum(x > alph,na.rm=T)}
lt5func <- function(x,y)
{
ltfunc <- function(i){summary(lm(y[i:(i+5)] ~ x[i:(i+5)]))$coefficients[2,3]}
iseq <- 1:(length(x)-5)
res <- sapply(iseq,ltfunc)
return(range(res))
}
levs <- setdiff(unique(wr),"")
cnames <- names(t.dat)[-1]
res.tab <- data.frame(mean=apply(dat[['blc']][,cnames],2,mean))
res.tab["sd"] <- apply(dat$blc[,cnames],2,sd)
res.tab["snr.iws"] <- apply(snr[is.element(wr,levs),cnames],2,sum)
res.tab["snr.ows"] <- apply(snr[!is.element(wr,levs),cnames],2,sum)
res.tab["snr.iwc"] <- apply(snr[is.element(wr,levs),cnames],2,gtfunc,alph=snr.lim)
res.tab["snr.owc"] <- apply(snr[!is.element(wr,levs),cnames],2,gtfunc,alph=snr.lim)
dat.der<-der
for(i in cnames)
{
s1 <- createMassSpectrum(t.dat[,"Time"],t.dat[,i])
s3 <- smoothIntensity(s1, method="SavitzkyGolay", halfWindowSize=shws)
bl.th <- estimateBaseline(s3, method="TopHat")[,"intensity"]
bl.snp <- estimateBaseline(s3, method="SNIP")[,"intensity"]
eseq <- 1:ceiling((nrow(t.dat)/2))
lseq <- max(eseq):nrow(t.dat)
res.tab[i,"bl.diff"] <- mean(bl.th-bl.snp)
res.tab[i,"earl.bl.diff"] <- mean(bl.th[eseq]-bl.snp[eseq])
res.tab[i,"late.bl.diff"] <- mean(bl.th[lseq]-bl.snp[lseq])
}
for(i in levs)
{
res.tab[paste(i,".snr",sep="")] <- apply(snr[wr==i,cnames],2,max)
res.tab[paste(i,".tot",sep="")] <- apply(dat[[t.type]][wr==i,cnames],2,sum)
res.tab[paste(i,".max",sep="")] <- apply(dat[[t.type]][wr==i,cnames],2,max)
res.tab[paste(i,".ph.a.r",sep="")] <-res.tab[paste(i,".tot",sep="")]/res.tab[paste(i,".max",sep="")]
res.tab[paste(i,".wm",sep="")] <- apply(dat[[t.type]][wr==i,cnames],2,which.max)
}
return(res.tab)
}
##############################################################################################
##############################################################################################
##############################################################################################
# Response Scoring
##############################################################################################
#should probably break this into ScoreMulti and ReviewMulti
#Score all RD...Rdata files in a given directory with review
#check for an existing bin file and just review that.
#add a "drop" column to the bin file
# Needs work on drop cells
ScoreMulti <- function(dir.name=NULL,snr.lim=4,hab.lim=.05,sm=3,ws=30,review=T){
if(is.null(dir.name)){dir.name <- getwd()}
setwd(dir.name)
f.names <- list.files(pattern="RD.*\\.Rdata$")
if(length(f.names) == 0){stop("no RD...Rdata files in given directory")}
rd.list <- sub("\\.Rdata*","",f.names)
RD.names <- rd.list #paste(rd.list,".b",sep="")
RD.f.names <- paste(RD.names,".Rdata",sep="")
sel.i <- menu(rd.list,title="Select Data to review")
while(sel.i != 0)
{
j <- sel.i
load(f.names[j])
i <- rd.list[j]
tmp <- get(i)
tlevs <- c(as.character(unique(tmp$w.dat[,"wr1"])[-1]),"drop")
if(is.null(tmp$bin))
{
tmp.pcp <- ProcConstPharm(tmp,sm,ws,"TopHat")
tmp.scp <- ScoreConstPharm(tmp$t.dat,tmp.pcp$blc,tmp.pcp$snr,snr.lim,hab.lim,tmp$w.dat[,"wr1"],sm)
tmp.bin <- bScore(tmp.pcp$blc,tmp.pcp$snr,snr.lim,hab.lim,tlevs,tmp$w.dat[,"wr1"])
tmp.bin["drop"] <- 0 #maybe try to generate some drop criteria from the scp file.
}
else
{
tmp.bin <- tmp$bin
tmp.scp <- tmp$scp
tmp.blc <- tmp$blc
}
if(review)
{
tmp.bin <- ScoreReview1(tmp$t.dat,tmp.bin[,tlevs],tmp$w.dat[,"wr1"])
tmp.bin <- ScoreReview0(tmp$t.dat,tmp.bin[,tlevs],tmp$w.dat[,"wr1"])
}
tmp$bin <- tmp.bin[,tlevs]
pf<-apply(tmp$bin[,tlevs],1,paste,collapse="")
pf.sum<-summary(as.factor(pf),maxsum=500)
pf.sum<-pf.sum[order(pf.sum,decreasing=T)]
pf.ord<-pf.sum
pf.ord[]<-seq(1,length(pf.sum))
tmp$c.dat["pf"]<-as.factor(pf)
tmp$c.dat["pf.sum"]<-pf.sum[pf]
tmp$c.dat["pf.ord"]<-pf.ord[pf]
tmp$c.dat<-cbind(tmp$c.dat, tmp$bin)
tmp$scp <- tmp.scp
tmp$snr<-tmp.pcp$snr
tmp$blc <- tmp.pcp$blc
assign(RD.names[j],tmp)
save(list=RD.names[j],file=RD.f.names[j])
print(paste("DONE REVIEWING ",RD.names[j]," CHANGES SAVED TO FILE.",sep=""))
sel.i <- menu(rd.list,title="Select Data to review")
}
return(RD.f.names)
}
ScoreSelect <- function(t.dat,snr=NULL,m.names,wr,levs=NULL,lmain=""){
sf <- .8
library(RColorBrewer)
m.names <- intersect(m.names,names(t.dat))
lwds <- 3
if(length(m.names) == 0)
{stop("no named traces exist in trace dataframe.")}
xseq <- t.dat[,1]
cols <-brewer.pal(8,"Dark2")
cols <- rep(cols,ceiling(length(m.names)/length(cols)))
cols <- cols[1:length(m.names)]
dev.new(width=14,height=8)
m.pca <- prcomp(t(t.dat[,m.names]),scale=F,center=T)
m.names <- m.names[order(m.pca$x[,1],decreasing=sum(m.pca$rot[,1]) < 0)]
hbc <- length(m.names)*sf+min(2,max(t.dat[,m.names]))
hb <- ceiling(hbc)
plot(xseq,t.dat[,m.names[1]],ylim=c(-sf,hbc),xlab="Time (min)",ylab="Ratio with shift",main=lmain,type="n", xaxt="n")
axis(1, at=seq(0, length(t.dat[,1]), 5))
if(length(wr) > 0)
{
if(is.null(levs)){levs <- setdiff(unique(wr),"")}
x1s <- tapply(xseq,as.factor(wr),min)[levs]
x2s <- tapply(xseq,as.factor(wr),max)[levs]
y1s <- rep(-.3,length(x1s))
y2s <- rep(hbc+.2,length(x1s))
rect(x1s,y1s,x2s,y2s,col="lightgrey")
text(xseq[match(levs,wr)],rep(-.1,length(levs)),levs,pos=4,offset=0,cex=1)
}
x.sel <- NULL
xs <-rep(0,(length(m.names)+4))
ys <- seq(1,length(m.names))*sf+t.dat[1,m.names]
ys <- as.vector(c(ys,c(2*sf,sf,0,-sf)))
# xs[(length(xs)-2):length(xs)] <- c(0,5,10)
p.names <- c(m.names,"ALL","NONE","FINISH","DROP")
drop.i <- length(p.names)
done.n <- drop.i-1
none.i <- drop.i-2
all.i <- drop.i-3
p.cols <- c(cols,c("black","black","black","black"))
for(i in 1:length(m.names))
{
lines(xseq,t.dat[,m.names[i]]+i*sf,col=cols[i],lwd=lwds)
if(!is.null(snr))
{
pp1 <- snr[,m.names[i]] > 0 & is.element(wr,levs)
pp2 <- snr[,m.names[i]] > 0 & !is.element(wr,levs)
points(xseq[pp1],t.dat[pp1,m.names[i]]+i*sf,pch=1,col=cols[i])
points(xseq[pp2],t.dat[pp2,m.names[i]]+i*sf,pch=0,col=cols[i])
}
}
text(x=xs,y=ys,labels=p.names,pos=2,cex=.7,col=p.cols)
points(x=xs,y=ys,pch=16,col=p.cols)
click.i <- 1
while(click.i < done.n)
{
click.i <- identify(xs,ys,n=1,plot=F)
if(click.i < (length(m.names)+1) & click.i > 0)
{
i <- click.i
if(is.element(i,x.sel))
{
lines(xseq,t.dat[,m.names[i]]+i*sf,col=cols[i],lwd=lwds)
x.sel <- setdiff(x.sel,i)
}
else
{
lines(xseq,t.dat[,m.names[i]]+i*sf,col="black",lwd=lwds)
#lines(xseq,t.dat[,m.names[i]]+i*sf,col="white",lwd=2,lty=2)
x.sel <- union(x.sel,i)
}
}
if(click.i == none.i)
{
x.sel <- NULL
for(i in 1:length(m.names))
{
lines(xseq,t.dat[,m.names[i]]+i*sf,col=cols[i],lwd=lwds)
}
}
if(click.i == all.i)
{
x.sel <- seq(1,length(m.names))
for(i in 1:length(m.names))
{
lines(xseq,t.dat[,m.names[i]]+i*sf,col="black",lwd=lwds)
}
}
}
return(list(cells=m.names[x.sel],click = p.names[click.i]))
}
##review binary scoring file and toggle 1/0
##names of binary scoring bin must be in wr
##NO NAs
ScoreReview1 <- function(tdat,bin,wr,maxt=20){
subD <- function(xdat)#trace dat with names NO TIME COL
{
s.x <- apply(xdat,2,sum)
s.names <- names(xdat)[order(s.x)]
sub.list <- list()
sub.i <- seq(1,ncol(xdat),by=(maxt+1))
if(length(sub.i) > 1)
{
for(i in 1:(length(sub.i)-1))
{
sub.list[[i]] <- s.names[sub.i[i]:(sub.i[i]+maxt)]
}
}
i <- length(sub.i)
sub.list[[i]] <- s.names[sub.i[i]:(ncol(xdat))]
return(sub.list)
}
b.levs <- names(bin)[names(bin) != "drop"]
drop <- rep(0,nrow(bin))
if(is.element("drop",names(bin))){drop <- bin[,"drop"]}
names(drop) <- row.names(bin)
for(i in b.levs)
{
lmain <- paste("Scored as 1 for ",i,sep="")
b.1 <- row.names(bin)[bin[,i]==1 & drop==0]
if(length(b.1) > 0)
{
if(length(b.1) < maxt){sub1 <- list(b.1)}else{sub1 <- subD(tdat[wr==i,b.1])}
for(x.names in sub1)
{
no.names <- NULL
dropit <- TRUE
while(dropit==TRUE & (length(x.names) > 0))
{
inp <- ScoreSelect(tdat,,x.names,wr,i,lmain)
no.names <- inp[["cells"]]
dropit <- (inp[["click"]]=="DROP")
if(dropit){drop[no.names] <- 1;x.names <- setdiff(x.names,no.names)}
dev.off()
}
if(length(no.names) > 0)
{
bin[no.names,i] <- 0
}
}
}
}
bin["drop"] <- drop
return(bin)
}
ScoreReview0 <- function(tdat,bin,wr,maxt=20){
subD <- function(xdat)#trace dat with names NO TIME COL
{
s.x <- apply(xdat,2,sum)
s.names <- names(xdat)[order(s.x)]
sub.list <- list()
sub.i <- seq(1,ncol(xdat),by=(maxt+1))
if(length(sub.i) > 1)
{
for(i in 1:(length(sub.i)-1))
{
sub.list[[i]] <- s.names[sub.i[i]:(sub.i[i]+maxt)]
}
}
i <- length(sub.i)
sub.list[[i]] <- s.names[sub.i[i]:(ncol(xdat))]
return(sub.list)
}
b.levs <- names(bin)[names(bin) != "drop"]
drop <- rep(0,nrow(bin))
if(is.element("drop",names(bin))){drop <- bin[,"drop"]}
names(drop) <- row.names(bin)
for(i in b.levs)
{
lmain <- paste("Scored as 0 for ",i,sep="")
b.1 <- row.names(bin)[bin[,i]==0 & drop==0]
if(length(b.1) > 0)
{
if(length(b.1) < maxt){sub1 <- list(b.1)}else{sub1 <- subD(tdat[wr==i,b.1])}
for(x.names in sub1)
{
no.names <- NULL
dropit <- TRUE
while(dropit==TRUE & (length(x.names)>0))
{
inp <- ScoreSelect(tdat,,x.names,wr,i,lmain)
no.names <- inp[["cells"]]
dropit <- (inp[["click"]]=="DROP")
if(dropit){drop[no.names] <- 1;x.names <- setdiff(x.names,no.names)}
dev.off()
}
if(length(no.names) > 0)
{
bin[no.names,i] <- 1
}
}
}
}
bin["drop"] <- drop
return(bin)
}
#Now lets create the function to put premade groups into a binary collumns
col_binner<-function(dat,cells){
cell.names<-select.list(names(cells),multiple=T)
cells<-cells[cell.names]
for(i in 1:length(cell.names)){
dat$bin[cell.names[i]]<-0
dat$bin[ cells[[i]], cell.names[i] ]<-1
}
return(dat)
}
# Create Binary Classes of cells
#AdvANCED, ANY WORK DOWNE IS AUTOMATICALLY SAVED TO THE dAT
#<- IS NO LOINGER REUQIRED
#dAT IS THE rd. INPUT
#LEVS, IS AN OPTIONAL ARGUEMENT, IF LEFT BLACK THE FUNCTION WILL LOOK IN THE BIN DATA.FRAME COLLUMN ANMES
# TO ALLOW TO MANUALLY SECT THE COLLUMNS YOU WANT TO COMBINE
combinner<-function(dat, levs=NULL, bin_it=T){
tmp<-dat$bin
if(is.null(levs)){
levs<-select.list(names(dat$bin), multiple=T)
}else{}
newcolnames<-paste(levs,collapse="___")
pf<-apply(tmp[,levs],1,paste, collapse="")
pf.sum<-summary(as.factor(pf), maxsum=1500)
pf.sum<-pf.sum[order(pf.sum, decreasing=T)]
pf.ord<-pf.sum
pf.ord[]<-seq(1,length(pf.sum))
dat$scp[newcolnames]<-as.factor(pf)
dat$bin<-tmp
cat("We have added this barcode to the scp dataframes","\n")
cat(newcolnames,"\n")
cat(sort(summary(dat$scp[newcolnames], maxsum=1500), T), sep="\n" )
if(bin_it){
dat <- pf_summary(dat,,ncol(dat$scp))
}
return(dat)
}
# Create Binary Classes of cells
pf.function<-function(dat, levs){
tmp<-dat
pf<-apply(tmp[,levs],1,paste, collapse="")
pf.sum<-summary(as.factor(pf), maxsum=1500)
pf.sum<-pf.sum[order(pf.sum, decreasing=T)]
pf.ord<-pf.sum
pf.ord[]<-seq(1,length(pf.sum))
tmp["pf"]<-as.factor(pf)
tmp["pf.sum"]<-pf.sum[pf]
tmp["pf.ord"]<-pf.ord[pf]
return(tmp)
}
bin_to_group<-function(dat){
bin<-dat$bin
cat("
Select the collumns you would like to collect the rows that are scored as 1's.\n")
cols_sel<-select.list(names(dat$bin), multiple=T)
cell_group<-list()
for(i in 1:length(cols_sel)){
cell_group[[ cols_sel[i] ]]<-row.names(which(dat$bin[ cols_sel[i] ]==1,arr.ind=T))
}
return(cell_group)
}
#This takes a pf and allows you to create a binarry table based on the barcode
#Created in pf.function
pf_summary<-function(dat, response_classes = NULL, pf_col = NULL){
if(is.null(pf_col)){
pf_col <- menu( colnames(dat$scp) )
}else{ pf_col <- pf_col }
if(is.null(response_classes)){
response_classes <- unique(dat$scp[,pf_col])
}else{}
for(i in 1:length(response_classes)){
response.types<-row.names(
which(
dat$scp[pf_col] == as.character(response_classes[i])
, arr.ind=T)
)
dat$bin[ as.character(response_classes[i]) ]<-0
dat$bin[ response.types, as.character(response_classes[i]) ]<-1
}
cat("I Have added new rows to your bin dataframe based off of this \nresponse combination","\n\n")
cat(colnames(dat$scp)[pf_col], sep="\n")
cat(as.character(response_classes), sep='\n')
return(dat)
}
census_viewer<-function(dat, census){
cat("This is a function where you select your census of interest and then a cell type of interest, use the table /n to reference your choices here")
sel.i<-1
while(sel.i!=0){
cells_to_view<-select.list(names(census))
cell_type_to_reference<-select.list(names(dat$cell_types))
cells_of_interest<-intersect(dat$cell_types[[cell_type_to_reference]],census[[cells_to_view]])
if(length(cells_of_interest)>1){
tcd(dat, cells_of_interest)
}else{}
cat("Would you like to look at t another cell in your census? enter 1, if not enter 0 \n")
sel.i<-scan(n=1)
}
}
##############################################################################################
##############################################################################################
#tmp is an RD object, x.names are the cell ids to investiage
#pad is the extra amount of image to select around the cell e.g. 1 = at cell bondaries 1.05 = 5% extra
#stain.name is the stain to display ("tritc","gfp","dapi") anything else defaults to yellow ROI boundaries
#title1 will be the title of the grid selection window.
SelectGrid <- function(tmp,x.names,pad=1.05,stain.name="area",title1="SelectRed",window.h=7,window.w=7,l.col="red",roi.img=NULL){
imgs <- grep("img",names(tmp),value=T)
imgs.yes <- rep(F,length(imgs))
for(i in 1:length(imgs)){imgs.yes[i] <- length(dim(tmp[[imgs[i]]])) == 3}
imgs <- imgs[imgs.yes]
if(length(imgs) < 1){stop("no image data")}
imgs.yes <- rep(F,length(imgs))
for(i in 1:length(imgs)){imgs.yes[i] <- dim(tmp[[imgs[i]]])[3] == 3}
imgs <- imgs[imgs.yes]
if(length(imgs) < 1){stop("no image data")}
img.rgb <- data.frame(name=imgs)
img.rgb["r"] <- 0
img.rgb["g"] <- 0
img.rgb["b"] <- 0
for(j in 1:nrow(img.rgb))
{
img.rgb[j,"r"] <- as.numeric(mean(tmp[[imgs[j]]][,,1]))
img.rgb[j,"g"] <- as.numeric(mean(tmp[[imgs[j]]][,,2]))
img.rgb[j,"b"] <- as.numeric(mean(tmp[[imgs[j]]][,,3]))
}
img.rgb["rgb"]<-rowSums(img.rgb[,2:4])
#set the channel to use and subtract the others. red=1, green=2, blue=3
#also select the best image.
img.red <- imgs[which.max((img.rgb[,"r"]-img.rgb[,"g"]-img.rgb[,"b"])/img.rgb[,"rgb"])]
#This is the old way for finding the green image.
img.green <- imgs[which.max((img.rgb[,"g"]-(img.rgb[,"r"]-img.rgb[,"b"]))/img.rgb[,"rgb"])]
#Find Green Image by finding the red neagtive images first
#red.negative<-which(img.rgb['r']==min(img.rgb['r']))
#then find the row in a red.neagitive matrix where the green is maximized
#img.green<-img.rgb[which.max(img.rgb[red.negative,'g']), 1]
img.blue <- imgs[which.max((img.rgb[,"b"]-(img.rgb[,"r"]-img.rgb[,"g"]))/img.rgb[,"rgb"])]
#img.yellow <- imgs[which.max(img.rgb[,"r"]+img.rgb[,"g"]-img.rgb[,"b"])]
if(is.null(roi.img)){img.yellow<-"img7"}
else(img.yellow<-roi.img)
if(is.element(stain.name,c("tritc","gfp","dapi","mcherry","cy5","tritc.immuno")))
{
sn <- grep(stain.name,names(tmp$c.dat),ignore.case=T,value=T)[1]
print(sn)
if(is.null(sn)){stop("no stain value data")}
x.names <- x.names[order(tmp$c.dat[x.names,sn])]
if(stain.name=="tritc")
{
img.name <- imgs[which.max((img.rgb[,"r"]-img.rgb[,"g"]-img.rgb[,"b"])/img.rgb[,"rgb"])]
chn <- 1
}
if(stain.name=="mcherry")
{
img.name <- imgs[which.max((img.rgb[,"r"]-img.rgb[,"g"]-img.rgb[,"b"])/img.rgb[,"rgb"])]
chn <- 1
}
if(stain.name=="cy5")
{
img.name <- imgs[which.max((img.rgb[,"r"]-img.rgb[,"g"]-img.rgb[,"b"])/img.rgb[,"rgb"])]
chn <- 1
}
if(stain.name=="gfp")
{
img.name <- imgs[which.max((img.rgb[,"g"]-(img.rgb[,"r"]+img.rgb[,"g"]))/img.rgb[,"rgb"])]
chn <- 2
}
if(stain.name=="dapi")
{
img.name <- imgs[which.max((img.rgb[,"b"]-img.rgb[,"r"]-img.rgb[,"g"])/img.rgb[,"rgb"])]
chn <- 3
}
if(stain.name=="tritc.immuno")
{
img.name <- imgs[which.max((img.rgb[,"b"]-img.rgb[,"r"]-img.rgb[,"g"])/img.rgb[,"rgb"])]
chn <- 3
}
img <- tmp[[img.name]]
img.dat <- img[,,chn]
for(i in setdiff(c(1,2,3),chn)){gt.mat <- img.dat < img[,,i];img.dat[gt.mat] <- 0}
}else{
img.name <- img.yellow
if(is.null(img.name)){img.name <- imgs[which.max(img.rgb[,"b"]+img.rgb[,"r"]-img.rgb[,"g"])]}
sn <- intersect(c("area","circularity"),names(tmp$c.dat))[1]
x.names <- x.names[order(tmp$c.dat[x.names,sn])]
img <- tmp[[img.name]]
img.dat <- (img[,,1]+img[,,2])/2
med.r <- .99
med.b <- .99
if(sum(as.vector(img[,,1]) > med.r)==0){med.r <- quantile(as.vector(img[,,1]),probs=c(.95))[1]}
if(sum(as.vector(img[,,2]) > med.b)==0){med.b <- quantile(as.vector(img[,,2]),probs=c(.95))[1]}
img.dat[img[,,1] < med.r] <- 0
img.dat[img[,,2] < med.b] <- 0
#single.img <- tmp$img4
}
#set up two devices
graphics.off()
dev.new(height=window.h,width=window.w,canvas="black",title="SingleCell")
dev.single <- dev.cur()
op <- par(mar=c(0,0,0,0))
plot(c(0,1),c(0,1),xaxt="n",yaxt="n",type="n",ylab="",xlab="")
dev.new(height=window.w,width=window.h,canvas="black",title=title1)
dev.grid <- dev.cur()
op <- par(mar=c(0,0,0,0))
plot(c(0,1),c(0,1),xaxt="n",yaxt="n",type="n",ylab="",xlab="")
xn <- length(x.names)
num.grid <- xn+3
nr <- floor(sqrt(num.grid))
nc <- ceiling((num.grid)/nr)
mtx <- max(nr,nc)
dx <- seq(0,1,length.out=(mtx+1))[-1]
sl <- (dx[2]-dx[1])/2
dx <- dx-sl
all.x <- as.vector(matrix(rep(dx,mtx),byrow=F,ncol=mtx))
all.y <- as.vector(matrix(rep(dx,mtx),nrow=mtx,byrow=T))
zf<-(sqrt(tmp$c.dat[x.names,"area"])/pi)*pad
x <- tmp$c.dat[x.names,"center.x"]
y <- tmp$c.dat[x.names,"center.y"]
img.dimx<-dim(tmp$img1)[2]
img.dimy<-dim(tmp$img1)[1]
zf[zf > x] <- x[zf > x]
zf[zf > y] <- y[zf > y]
zf[x+zf > img.dimx] <- img.dimx-x[x+zf > img.dimx]
zf[y+zf > img.dimy] <- img.dimy-y[y+zf > img.dimy]
img.left<- x-zf
img.left[img.left < 1] <- 1
img.right<- x+zf
img.right[img.right > img.dimx] <- img.dimx
img.top<- y-zf
img.top[img.top < 1] <- 1
img.bottom<-y+zf
img.bottom[img.bottom > img.dimy] <- img.dimy
img.bottom[img.top>=img.bottom & img.top<img.dimy] <- img.top[img.top>=img.bottom] + 1
img.right[img.left>=img.right & img.left<img.dimx] <- img.left[img.left>=img.right] + 1
img.top[img.top == img.dimy] <- img.dimy-1
img.left[img.left == img.dimx] <- img.dimx-1
for(i in 1:xn)
{
xl <- all.x[i]-sl*.9
xr <- all.x[i]+sl*.9
xt <- all.y[i]-sl*.9
xb <- all.y[i]+sl*.9
#rasterImage(tmp$img1[img.bottom[i]:img.top[i],img.left[i]:img.right[i],],xl,xb,xr,xt)
rasterImage(img.dat[img.bottom[i]:img.top[i],img.left[i]:img.right[i]],xl,xb,xr,xt)
}
fg <- rep("black",length(all.x))
fg[1:xn] <- "grey"
cexr <- sl/.04
symbols(all.x,all.y,squares=rep(sl*1.9,length(all.x)),add=T,inches=F,fg=fg,lwd=cexr)
text(all.x[xn+1],all.y[xn+1],"Done",col="white",cex= cexr)
text(all.x[xn+2],all.y[xn+2],"All",col="white",cex= cexr)
text(all.x[xn+3],all.y[xn+3],"None",col="white",cex= cexr)
#first click defines the split
all.sel <- rep(0,xn)
names(all.sel) <- x.names
not.done=TRUE
click1 <- locator(n=1)
dist <- sqrt((click1$x[[1]]-all.x)^2 + (click1$y[[1]]-all.y)^2)
sel.i <- which.min(dist)
if(sel.i == xn+1){not.done=FALSE;return(all.sel)}
if(sel.i == xn+2){all.sel[1:xn] <- 1;fg[1:xn] <- l.col}
if(sel.i == xn+3){all.sel[1:xn] <- 0;fg[1:xn] <- "grey"}
if(sel.i <= xn)
{
dev.set(which=dev.single)
rasterImage(tmp[[img.red]][img.bottom[sel.i]:img.top[sel.i],img.left[sel.i]:img.right[sel.i],],0,0,.5,.5,interpolate=F)
rasterImage(tmp[[img.green]][img.bottom[sel.i]:img.top[sel.i],img.left[sel.i]:img.right[sel.i],],0,.5,.5,1,interpolate=F)
rasterImage(tmp[[img.blue]][img.bottom[sel.i]:img.top[sel.i],img.left[sel.i]:img.right[sel.i],],.5,0,1,.5,interpolate=F)
rasterImage(tmp[[img.yellow]][img.bottom[sel.i]:img.top[sel.i],img.left[sel.i]:img.right[sel.i],],.5,.5,1,1,interpolate=F)
abline(h=.5,col="grey")
abline(v=.5,col="grey")
dev.set(which=dev.grid)
neg.i <- 1:max((sel.i-1),1)
all.sel[neg.i] <- 0
pos.i <- sel.i:xn
all.sel[pos.i] <- 1
fg[neg.i] <- "grey"
fg[pos.i] <- l.col
}
while(not.done)
{
symbols(all.x,all.y,squares=rep(sl*1.9,length(all.x)),add=T,inches=F,fg=fg,lwd=cexr)
click1 <- locator(n=1)
dist <- sqrt((click1$x[[1]]-all.x)^2 + (click1$y[[1]]-all.y)^2)
sel.i <- which.min(dist)
if(sel.i == xn+1){not.done=FALSE;return(all.sel)}
if(sel.i == xn+2){all.sel[1:xn] <- 1;fg[1:xn] <- l.col}
if(sel.i == xn+3){all.sel[1:xn] <- 0;fg[1:xn] <- "grey"}
if(sel.i <= xn)
{
dev.set(which=dev.single)
rasterImage(tmp[[img.red]][img.bottom[sel.i]:img.top[sel.i],img.left[sel.i]:img.right[sel.i],],0,0,.5,.5,interpolate=F)
rasterImage(tmp[[img.green]][img.bottom[sel.i]:img.top[sel.i],img.left[sel.i]:img.right[sel.i],],0,.5,.5,1,interpolate=F)
rasterImage(tmp[[img.blue]][img.bottom[sel.i]:img.top[sel.i],img.left[sel.i]:img.right[sel.i],],.5,0,1,.5,interpolate=F)
rasterImage(tmp[[img.yellow]][img.bottom[sel.i]:img.top[sel.i],img.left[sel.i]:img.right[sel.i],],.5,.5,1,1,interpolate=F)
abline(h=.5,col="grey")
abline(v=.5,col="grey")
dev.set(which=dev.grid)
if(all.sel[sel.i] ==0)
{
all.sel[sel.i] <- 1
fg[sel.i] <- l.col
}
else
{
all.sel[sel.i] <- 0
fg[sel.i] <- "grey"
}
}
}
}
#three tests Drop (confirm), Red (confirm) and Green (confirm)
#return and RD object with the changes made to c.dat and bin
#tmp is an RD object with images, "tritc.mean" and "gfp.mean" in c.dat
#x.names is a list of specific cells to review
#pad is the expansion factor about the center of the cell.
#subset.n is number of cells to review at once instead of all at once.
ROIreview <- function(tmp,x.names=NULL,pad=2,wh=7,hh=7,subset.n=500, roi.img=NULL){
print(names(tmp$c.dat)[1:20])
choices<-select.list(
title="Score what?",
choices=c("CGRP.GFP", "IB4.TRITC", "IB4.CY5", "NF200.TRITC", "MCHERRY", "Drops"),
multiple=T)
print("how to display ROI")
if(is.null(roi.img)){roi.img<-image.selector(tmp)}else{roi.img<-roi.img}
dice <- function(x, n,min.n=10)
{
x.lst <- split(x, as.integer((seq_along(x) - 1) / n))
x.i <- length(x.lst)
if(length(x.lst[x.i]) < min.n & x.i > 1)
{
x.lst[[x.i-1]] <- c(x.lst[[x.i-1]],x.lst[[x.i]])
x.lst <- x.lst[1:(x.i-1)]
}
return(x.lst)
}
if(is.null(x.names)){x.names <- row.names(tmp$c.dat)}
x.names <- x.names[tmp$bin[x.names,"drop"]==0]
if(is.na(subset.n) | subset.n > length(x.names)){subset.n=length(x.names)}
subset.list <- dice(x.names,subset.n,subset.n/4)
for(x.names in subset.list)
{
#drop cells
if(length(grep("TRUE",choices=="Drops"))>0){
d.names <- SelectGrid(tmp,x.names,pad,"area","SelectDrops",window.h=hh,window.w=wh,roi.img=roi.img)
d1.names <- names(d.names[d.names==1])
if(length(d1.names) > 5)
{
d1.names <- SelectGrid(tmp,d1.names,pad,"area","ConfirmDrops",window.h=hh,window.w=wh,roi.img=roi.img)
d1.names <- names(d1.names)[d1.names==1]
if(length(d1.names) > 0){tmp$bin[d1.names,"drop"] <- 1;x.names <- setdiff(x.names,d1.names)}
}
}else{}
#Red Cells
if(length(grep("TRUE",choices=="IB4.TRITC"))>0){
r.names <- SelectGrid(tmp,x.names,pad,"tritc","SelectRed",window.h=hh,window.w=wh,roi.img=roi.img)
r1.names <- names(r.names[r.names==1])
q1 <- 1:floor(length(r1.names)*.25)
r2.names <- r1.names[q1]
if(length(r2.names) > 5)
{
r2.names <- SelectGrid(tmp,r2.names,pad*2,"tritc","ConfirmRed",window.h=hh,window.w=wh,roi.img=roi.img)
r.names[names(r2.names)] <- r2.names
}
tmp$bin[names(r.names),"tritc.bin"] <- r.names
}else{}
#Red Cells
if(length(grep("TRUE",choices=="IB4.CY5"))>0){
r.names <- SelectGrid(tmp,x.names,pad,"cy5","SelectRed",window.h=hh,window.w=wh,roi.img=roi.img)
r1.names <- names(r.names[r.names==1])
q1 <- 1:floor(length(r1.names)*.25)
r2.names <- r1.names[q1]
if(length(r2.names) > 5)
{
r2.names <- SelectGrid(tmp,r2.names,pad*2,"cy5","ConfirmRed",window.h=hh,window.w=wh,roi.img=roi.img)
r.names[names(r2.names)] <- r2.names
}
tmp$bin[names(r.names),"cy5.bin"] <- r.names
}else{}
#Green Cells
if(length(grep("TRUE",choices=="CGRP.GFP"))>0){
r.names <- SelectGrid(tmp,x.names,pad,"gfp","SelectGreen",window.h=hh,window.w=wh,l.col="green",roi.img=roi.img)
r1.names <- names(r.names[r.names==1])
q1 <- 1:floor(length(r1.names)*.25)
r2.names <- r1.names[q1]
if(length(r2.names) > 5)
{
r2.names <- SelectGrid(tmp,r2.names,pad*2,"gfp","ConfirmGreen",window.h=hh,window.w=wh,l.col="green",roi.img=roi.img)
r.names[names(r2.names)] <- r2.names
}
tmp$bin[names(r.names),"gfp.bin"] <- r.names
}else{}
#NF200
if(length(grep("TRUE",choices=="NF200.TRITC"))>0){
r.names <- SelectGrid(tmp,x.names,pad,"tritc.immuno","SelectBlue",window.h=hh,window.w=wh,l.col="blue",roi.img=roi.img)
r1.names <- names(r.names[r.names==1])
q1 <- 1:floor(length(r1.names)*.25)
r2.names <- r1.names[q1]
if(length(r2.names) > 5)
{
r2.names <- SelectGrid(tmp,r2.names,pad*2,"tritc.immuno","ConfirmBlue",window.h=hh,window.w=wh,l.col="blue",roi.img=roi.img)
r.names[names(r2.names)] <- r2.names
}
tmp$bin[names(r.names),"tritc.bin"] <- r.names
}else{}
#MCHERRY
if(length(grep("TRUE",choices=="MCHERRY"))>0){
r.names <- SelectGrid(tmp,x.names,pad,"mcherry","SelectRed",window.h=hh,window.w=wh,l.col="red",roi.img=roi.img)
r1.names <- names(r.names[r.names==1])
q1 <- 1:floor(length(r1.names)*.25)
r2.names <- r1.names[q1]
if(length(r2.names) > 5)
{
r2.names <- SelectGrid(tmp,r2.names,pad*2,"mcherry","ConfirmRed",window.h=hh,window.w=wh,l.col="red",roi.img=roi.img)
r.names[names(r2.names)] <- r2.names
}
tmp$bin[names(r.names),"mcherry.bin"] <- r.names
}else{}
}
graphics.off()
return(tmp)
}
#three tests Drop (confirm), Red (confirm) and Green (confirm)
#return and RD object with the changes made to c.dat and bin
#tmp is an RD object with images, "tritc.mean" and "gfp.mean" in c.dat
#x.names is a list of specific cells to review
#pad is the expansion factor about the center of the cell.
#subset.n is number of cells to review at once instead of all at once.
ROIreview2 <- function(tmp,x.names=NULL,pad=2,wh=7,hh=7,subset.n=NA){
dice <- function(x, n,min.n=10)
{
x.lst <- split(x, as.integer((seq_along(x) - 1) / n))
x.i <- length(x.lst)
if(length(x.lst[x.i]) < min.n & x.i > 1)
{
x.lst[[x.i-1]] <- c(x.lst[[x.i-1]],x.lst[[x.i]])
x.lst <- x.lst[1:(x.i-1)]
}
return(x.lst)
}
if(is.null(x.names)){x.names <- row.names(tmp$c.dat)}
x.names <- x.names[tmp$bin[x.names,"drop"]==0]
if(is.na(subset.n) | subset.n > length(x.names)){subset.n=length(x.names)}
subset.list <- dice(x.names,subset.n,subset.n/4)
for(x.names in subset.list)
{
#drop cells
d.names <- SelectGrid(tmp,x.names,pad,"area","SelectDrops",window.h=hh,window.w=wh)
d1.names <- names(d.names[d.names==1])
if(length(d1.names) > 5)
{
d1.names <- SelectGrid(tmp,d1.names,pad,"area","ConfirmDrops",window.h=hh,window.w=wh)
d1.names <- names(d1.names)[d1.names==1]
if(length(d1.names) > 0){tmp$bin[d1.names,"drop"] <- 1;x.names <- setdiff(x.names,d1.names)}
}
r.names <- SelectGrid(tmp,x.names,pad,"tritc","SelectRed",window.h=hh,window.w=wh)
r1.names <- names(r.names[r.names==1])
q1 <- 1:floor(length(r1.names)*.25)
r2.names <- r1.names[q1]
if(length(r2.names) > 5)
{
r2.names <- SelectGrid(tmp,r2.names,pad*2,"tritc","ConfirmRed",window.h=hh,window.w=wh)
r.names[names(r2.names)] <- r2.names
}
tmp$bin[names(r.names),"tritc.bin"] <- r.names
#r.names <- SelectGrid(tmp,x.names,pad,"gfp","SelectGreen",window.h=hh,window.w=wh,l.col="green")
#r1.names <- names(r.names[r.names==1])
#q1 <- 1:floor(length(r1.names)*.25)
#r2.names <- r1.names[q1]
#if(length(r2.names) > 5)
#{
# r2.names <- SelectGrid(tmp,r2.names,pad*2,"gfp","ConfirmGreen",window.h=hh,window.w=wh,l.col="green")
# r.names[names(r2.names)] <- r2.names
#}
#tmp$bin[names(r.names),"gfp.bin"] <- r.names
}
return(tmp)
}
##############################################################################################
# Drop Scoring
##############################################################################################
# Functions to allow for dropping of cells. Main function is DropTestMulti
# Drops based on spikey traces, out of window peaks, and baselineshifts
SpikyNorm <- function(xdat){
shapfunc <- function(x){shapiro.test(x)$p.value}
i1 <- seq(1,nrow(xdat))
s1 <- xdat[c(1,i1[-length(i1)]),] #shift 1 time interval forward
s2 <- xdat[c(i1[-1],i1[length(i1)]),] #shift 1 time interval back
s3 <- xdat-((s1+s2)/2)
s.x <- apply(abs(s3),2,shapfunc)
return(s.x)
}
DropPick <- function(tdat,bin,wr,maxt=10,s.x=NULL,lmain="Select Cells to drop"){
#order traces by spikey trait.
#allow drop selection until 0 selected.
#spikes are defined as single point deviations from previous and next.
subD <- function(s.x)#trace dat with names NO TIME COL
{
s.names <- names(s.x)[order(s.x)]
sub.list <- list()
sub.i <- seq(1,length(s.x),by=(maxt+1))
if(length(sub.i) > 1)
{
for(i in 1:(length(sub.i)-1))
{
sub.list[[i]] <- s.names[sub.i[i]:(sub.i[i]+maxt)]
}
}
i <- length(sub.i)
sub.list[[i]] <- s.names[sub.i[i]:(length(s.x))]
return(sub.list)
}
b.levs <- c("drop") #names(bin)[names(bin) != "drop"]
drop <- rep(0,nrow(bin))
if(is.element("drop",names(bin))){drop <- bin[,"drop"]}
names(drop) <- row.names(bin)
for(i in b.levs)
{
b.1 <- row.names(bin)[bin[,i]==0 & drop==0]
if(is.null(s.x)){s.x <- SpikyNorm(tdat[,-1])}
if(length(b.1) > 0)
{
s.x <-s.x[b.1]
if(length(b.1) < maxt){sub1 <- list(b.1)}else{sub1 <- subD(s.x)}
for(x.names in sub1)
{
no.names <- NULL
dropit <- TRUE
nd <- 0
while(dropit==TRUE & (length(x.names)>0))
{
inp <- ScoreSelect(tdat,,x.names,wr,,lmain)
no.names <- inp[["cells"]]
dropit <- (inp[["click"]]=="DROP")
if(dropit){drop[no.names] <- 1;x.names <- setdiff(x.names,no.names);nd=1}
dev.off()
}
if(length(no.names) > 0)
{
drop[no.names] <- 1
}
if(length(no.names)==0 & nd==0)
{break}
}
}
}
return(drop)
}
DropTestList <- function(tmp){
#tmp <- get(rd.name)
x1 <- DropPick(tmp$t.dat,tmp$bin,tmp$w.dat[,"wr1"],lmain="Select spikey traces to Drop") #defaults to spiky test
tmp$bin[,"drop"] <- x1
x1 <- DropPick(tmp$t.dat,tmp$bin,tmp$w.dat[,"wr1"],s.x= -apply(tmp$scp[,"snr.owc",drop=F],1,mean),lmain="Select out of window peaks to Drop")
tmp$bin[,"drop"] <- x1
x1 <- DropPick(tmp$t.dat,tmp$bin,tmp$w.dat[,"wr1"],s.x= -apply(tmp$scp[,"bl.diff",drop=F],1,mean),lmain="Select Baseline Drops")
tmp$bin[,"drop"] <- x1
if(sum(x1 > 0)) #check highest correlations with dropped cells.
{
d.names <- names(x1[x1==1])
ct <- cor(tmp$t.dat[,-1])
mn <- -apply(ct[,d.names],1,max)
x1 <- DropPick(tmp$t.dat,tmp$bin,tmp$w.dat[,"wr1"],s.x= mn,lmain="Correlated with other drops")
tmp$bin[,"drop"] <- x1
}
return(tmp)
}
DropTestMulti <- function(dir.name=NULL,snr.lim=4,hab.lim=.05,sm=3,ws=30,review=F){
if(is.null(dir.name)){dir.name <- getwd()}
setwd(dir.name)
f.names <- list.files(pattern="RD.*\\.Rdata$")
if(length(f.names) == 0){stop("no RD...Rdata files in given directory")}
rd.list <- sub("\\.Rdata*","",f.names)
RD.names <- rd.list #paste(rd.list,".b",sep="")
RD.f.names <- paste(RD.names,".Rdata",sep="")
sel.i <- menu(rd.list,title="Select Data to review")
while(sel.i != 0)
{
j <- sel.i
load(f.names[j])
i <- rd.list[j]
tmp <- get(i)
tlevs <- c(as.character(unique(tmp$w.dat[,"wr1"])[-1]),"drop")
if(is.null(tmp$bin))
{
tmp.pcp <- ProcConstPharm(tmp,sm,ws,"TopHat")
tmp.scp <- ScoreConstPharm(tmp$t.dat,tmp.pcp$blc,tmp.pcp$snr,snr.lim,hab.lim,tmp$w.dat[,"wr1"],sm)
tmp.bin <- bScore(tmp.pcp$blc,tmp.pcp$snr,snr.lim,hab.lim,tlevs,tmp$w.dat[,"wr1"])
tmp.bin["drop"] <- 0 #maybe try to generate some drop criteria from the scp file.
}
else
{
tmp.pcp <- ProcConstPharm(tmp,sm,ws,"TopHat")
tmp.scp <- ScoreConstPharm(tmp$t.dat,tmp.pcp$blc,tmp.pcp$snr,snr.lim,hab.lim,tmp$w.dat[,"wr1"],sm)
tmp.bin <- tmp$bin
tmp.scp <- tmp$scp
#tmp.blc <- tmp$blc
}
tmp$bin <- tmp.bin[,tlevs]
tmp$scp <- tmp.scp
#tmp$blc <- tmp.blc
tmp <- DropTestList(tmp)
if(review)
{
tmp.bin <- ScoreReview1(tmp$t.dat,tmp.bin[,tlevs],tmp$w.dat[,"wr1"])
tmp.bin <- ScoreReview0(tmp$t.dat,tmp.bin[,tlevs],tmp$w.dat[,"wr1"])
tmp$bin <- tmp.bin[,tlevs]
}
pf<-apply(tmp$bin[,tlevs],1,paste,collapse="")
pf.sum<-summary(as.factor(pf),maxsum=500)
pf.sum<-pf.sum[order(pf.sum,decreasing=T)]
pf.ord<-pf.sum
pf.ord[]<-seq(1,length(pf.sum))
tmp$c.dat["pf"]<-as.factor(pf)
tmp$c.dat["pf.sum"]<-pf.sum[pf]
tmp$c.dat["pf.ord"]<-pf.ord[pf]
tmp$scp <- tmp.scp
tmp$snr<-tmp.pcp$snr
tmp$blc <- tmp.pcp$blc
assign(RD.names[j],tmp)
save(list=RD.names[j],file=RD.f.names[j])
print(paste("DONE REVIEWING ",RD.names[j]," CHANGES SAVED TO FILE.",sep=""))
print(paste("Dropped Cells:", table(tmp$bin[,"drop"])[2]))
sel.i <- menu(rd.list,title="Select Data to review")
}
return(RD.f.names)
}
##############################################################################################
##############################################################################################
##############################################################################################
# No Scoring, only processing
##############################################################################################
Trace.prep<-function(dir.name=NULL,snr.lim=4,hab.lim=.05,sm=3,ws=30,blc="SNIP"){
if(is.null(dir.name)){dir.name <- getwd()}
setwd(dir.name)
f.names <- list.files(pattern="RD.*\\.Rdata$")
if(length(f.names) == 0){stop("no RD...Rdata files in given directory")}
rd.list <- sub("\\.Rdata*","",f.names)
RD.names <- rd.list #paste(rd.list,".b",sep="")
RD.f.names <- paste(RD.names,".Rdata",sep="")
sel.i <- menu(rd.list,title="Select Data to review")
while(sel.i != 0)
{
j <- sel.i
load(f.names[j])
i <- rd.list[j]
tmp <- get(i)
tlevs<-c(setdiff(unique(as.character(tmp$w.dat[,2])),""),"drop")
tmp.pcp <- ProcConstPharm(tmp,sm,ws,blc)
tmp.scp <- ScoreConstPharm(tmp,tmp.pcp$blc,tmp.pcp$snr, tmp.pcp$der,snr.lim,hab.lim,sm)
tmp.bin <- bScore(tmp.pcp$blc,tmp.pcp$snr,snr.lim,hab.lim,tlevs,tmp$w.dat[,"wr1"])
tmp.bin["drop"] <- 0 #maybe try to generate some drop criteria from the scp file.
pf<-apply(tmp.bin[,tlevs],1,paste,collapse="")
pf.sum<-summary(as.factor(pf),maxsum=500)
pf.sum<-pf.sum[order(pf.sum,decreasing=T)]
pf.ord<-pf.sum
pf.ord[]<-seq(1,length(pf.sum))
tmp$c.dat["pf"]<-as.factor(pf)
tmp$c.dat["pf.sum"]<-pf.sum[pf]
tmp$c.dat["pf.ord"]<-pf.ord[pf]
tmp$bin<-tmp.bin
tmp$scp <- tmp.scp
tmp$snr<-tmp.pcp$snr
tmp$blc <- tmp.pcp$blc
tmp$der<-tmp.pcp$der
assign(RD.names[j],tmp)
save(list=RD.names[j],file=RD.f.names[j])
print(paste("DONE REVIEWING ",RD.names[j]," CHANGES SAVED TO FILE.",sep=""))
sel.i <- menu(rd.list,title="Select Data to review")
}
return(RD.f.names)
}
#this is not complete
#condi is the indicator for the conditional frequency table
#this is bad
#####add selection section of selection of experiments to include/exclude
#####conditional expresion tables.
SummarizeMulti <- function(dir.name=NULL,condi=1,recur=F){
if(is.null(dir.name)){stop("not a directory")}
setwd(dir.name)
f.names <- list.files(pattern=".*RD.*\\.Rdata$",recursive=recur,full.names=T)
f.names <- select.list(f.names,multiple=T,title="Select Experiments For Analysis")
if(length(f.names) == 0){stop("no RD...Rdata files in given directory")}
for(i in f.names){load(i)}
rd.list <- sub("\\.Rdata*","",basename(f.names))
RD.names <- ls(pat="^RD")
RD.names <- intersect(rd.list,RD.names)
if(!setequal(RD.names,rd.list)){stop("dataframes loaded do not match files listed in directory")}
RD.f.names <- paste(RD.names,".Rdata",sep="")
i <- rd.list[1]
tmp <- get(i)
if(sum(is.element(c("bin","scp"),names(tmp))) < 2){stop("Data frame has not been scored")}
if(names(tmp$bin)[c(1,2)]==c("tot","sd"))
{tmp$bin <- tmp$bin[,-c(1,2)]}
freq.tab <- data.frame(mean=apply(tmp$bin[tmp$bin[,"drop"]==0,],2,mean))
kfreq.tab <- data.frame(mean=apply(tmp$bin[tmp$bin[,"drop"]==0 & tmp$bin[,condi]==1,],2,mean))
b.names <- row.names(freq.tab)[row.names(freq.tab) != "drop"]
q.names <- paste(b.names,".max",sep="")
resp.tab <- data.frame(mean=apply(tmp$scp[tmp$bin[,"drop"]==0,q.names],2,mean))
for(rn in row.names(resp.tab)){resp.tab[rn,"mean"] <- mean(tmp$scp[tmp$bin[,"drop"]==0 & tmp$bin[,sub("\\.max$","",rn)]==1,rn],na.rm=T)}
pf.tot <- data.frame(str = apply(tmp$bin[tmp$bin[,"drop"]==0,names(tmp$bin)!="drop"],1,paste,collapse=""))
pf.tot["exp"] <- i
for(j in 2:length(RD.names))
{
i <- rd.list[j]
tmp <- get(i)
if(names(tmp$bin)[c(1,2)]==c("tot","sd"))
{tmp$bin <- tmp$bin[,-c(1,2)]}
m1 <- apply(tmp$bin[tmp$bin[,"drop"]==0,],2,mean)
freq.tab[i] <- m1[row.names(freq.tab)]
m2 <- apply(tmp$bin[tmp$bin[,"drop"]==0 & tmp$bin[,condi]==1,],2,mean)
kfreq.tab[i] <- m2[row.names(kfreq.tab)]
resp.tab[i] <- NA
for(rn in intersect(row.names(resp.tab),names(tmp$scp))){resp.tab[rn,i] <- mean(tmp$scp[tmp$bin[,"drop"]==0 & tmp$bin[,sub("\\.max$","",rn)]==1,rn],na.rm=T)}
pf.tmp <- data.frame(str = apply(tmp$bin[tmp$bin[,"drop"]==0,names(tmp$bin)!="drop"],1,paste,collapse=""))
pf.tmp["exp"] <- i
pf.tot <- rbind(pf.tot,pf.tmp)
}
names(freq.tab)[1] <- rd.list[1]
names(kfreq.tab)[1] <- rd.list[1]
names(resp.tab)[1] <- rd.list[1]
pf.tab <- table(pf.tot[,1],pf.tot[,2])
return(list(freq.tab=freq.tab,kfreq.tab=kfreq.tab,resp.tab=resp.tab,pf.tab=pf.tab))
}
##############################################################################################
# Stacked traces Plotting
##############################################################################################
LinesSome <- function(t.dat,snr=NULL,m.names,wr=NULL,levs=NULL,lmain="",pdf.name=NULL,morder=NULL,subset.n=5,sf=.25,lw=2,bcex=.6){
library(cluster)
if(length(m.names) < subset.n)
{stop("group size lower than subset size")}
pam5 <- pam(t(t.dat[,m.names]),k=subset.n)
s.names <- row.names(pam5$medoids)
if(!is.null(morder))
{
names(morder) <- m.names
morder <- morder[s.names]
}
pam5.tab <- table(pam5$clustering)
tags <- paste(paste("#",names(pam5.tab),sep=""),as.vector(pam5.tab),sep=":")
LinesEvery(t.dat,snr,s.names,wr,levs,lmain,pdf.name,morder,rtag=tags,sf,lw,bcex)
return(pam5$clustering)
}
LinesEvery <- function(t.dat,snr=NULL,m.names,wr,levs=NULL,lmain="",pdf.name=NULL,morder=NULL,rtag=NULL,sf=.7,lw=3,bcex=1,p.ht=7,p.wd=10){
m.names <- intersect(m.names,names(t.dat))
xseq <- t.dat[,1]
library(RColorBrewer)
if(length(m.names) > 0)
{
if(is.null(pdf.name))
{dev.new(width=14,height=8)}
else
{if(length(grep("\\.pdf",pdf.name))>0){pdf(pdf.name,width=p.wd,height=p.ht)}else{png(pdf.name,width=1200,height=600)}}#pdf(pdf.name,width=28,height=16)}
if(is.null(morder))
{
m.pca <- prcomp(t(t.dat[,m.names]),scale=F,center=T)
morder <- m.pca$x[,1] * c(1,-1)[(sum(m.pca$rot[,1]) < 0)+1]
#m.names <- m.names[order(m.pca$x[,1],decreasing=sum(m.pca$rot[,1]) < 0)]
}
m.names <- m.names[order(morder)]
hbc <- length(m.names)*sf+max(t.dat[,m.names])
hb <- ceiling(hbc)
#cols <- rainbow(length(m.names),start=.55)
cols <-brewer.pal(8,"Dark2")
cols <- rep(cols,ceiling(length(m.names)/length(cols)))
cols <- cols[1:length(m.names)]
par(mar=c(4,1,4,1))
plot(xseq,t.dat[,m.names[1]],ylim=c(0,hbc),xlab="Time (min)",main=lmain,type="n", xaxt="n",yaxt="n",xlim=c(min(xseq)-1.5,max(xseq)+1.5))#-sf
axis(1, at=seq(floor(min(t.dat[,1])),ceiling(max(t.dat[,1])), 1))
if(!is.null(wr))
{
if(!is.null(levs))
{
#levs <- setdiff(unique(wr),"")
x1s <- tapply(xseq,as.factor(wr),min)[levs]
x2s <- tapply(xseq,as.factor(wr),max)[levs]
y1s <- rep(-.3,length(x1s))
y2s <- rep(hbc+.2,length(x1s))
rect(x1s,y1s,x2s,y2s,col=NA,border="darkgrey")
cpx <- xseq[match(levs,wr)+round(table(wr)[levs]/2,0)]
offs <- nchar(levs)*.5
text(cpx,rep(c(sf/2,sf),length=length(levs)),levs,pos=1,cex=bcex)#,offset=-offs
}
}
for(i in 1:length(m.names))
{
lines(xseq,t.dat[,m.names[i]]+i*sf, cex=.5,col=cols[i],lty=1, lwd=lw)
points(xseq,t.dat[,m.names[i]]+i*sf,pch=15, cex=.5,col=cols[i])
if(!is.null(snr))
{
pp1 <- snr[,m.names[i]] > 0 & is.element(wr,levs)
pp2 <- snr[,m.names[i]] > 0 & !is.element(wr,levs)
# pp3 <- dat$crr[,m.names[i]] > 0
points(xseq[pp1],t.dat[pp1,m.names[i]]+i/10,pch=1,col=cols[i])
points(xseq[pp2],t.dat[pp2,m.names[i]]+i/10,pch=0,col=cols[i])
# points(xseq[pp3],t.dat[pp3,m.names[i]]+i/10,pch=2,col=cols[i],cex=.5)
}
}
text(rep(0,length(m.names)),seq(1,length(m.names))*sf+t.dat[1,m.names],m.names,cex=.8*bcex,col=cols,pos=2)
if(!is.null(rtag))
{
rtag <- rtag[order(morder)]
text(rep(max(xseq),length(m.names)),seq(1,length(m.names))*sf+t.dat[nrow(t.dat),m.names],rtag,cex=.8*bcex,col=cols,pos=4)
}
if(!is.null(pdf.name))
{dev.off()}
}
}
#Simplified LinesEvery which only needs 2 entries; RD and m.names.
LinesEvery.2 <- function(dat,m.names, blc=FALSE, snr=NULL,lmain="",cols=NULL, levs=NULL,m.order=NULL,rtag=NULL,rtag2=NULL,rtag3=NULL, plot.new=TRUE,sf=.7,lw=.9,bcex=.8,p.ht=7,p.wd=10){
if(blc){t.dat<-dat$blc}
else{t.dat<-dat$t.dat}
wr<-dat$w.dat[,2]
if(is.null(levs)){levs <- setdiff(unique(as.character(dat$w.dat[,"wr1"])),"")}
else{levs<-levs}
m.names <- intersect(m.names,names(t.dat))
xseq <- t.dat[,1]
if(plot.new){dev.new(width=10,height=6)}
library(RColorBrewer)
## Tool for Sorting cells based on c.dat collumn name
if(length(m.names) > 0)
{
if(!is.null(m.order)){
tmp<-dat$c.dat[m.names,]
n.order<-tmp[order(tmp[,m.order]),]
m.names <- row.names(n.order)
}
else{
m.pca <- prcomp(t(t.dat[,m.names]),scale=F,center=T)
morder <- m.pca$x[,1] * c(1,-1)[(sum(m.pca$rot[,1]) < 0)+1]
m.names <- m.names[order(m.pca$x[,1],decreasing=sum(m.pca$rot[,1]) < 0)]
m.names <- m.names[order(morder)]
}
## Tool for color labeleing
if(is.null(cols)){
#cols <- rainbow(length(m.names),start=.55)
cols <-brewer.pal(8,"Dark2")
cols <- rep(cols,ceiling(length(m.names)/length(cols)))
cols <- cols[1:length(m.names)]
}
## Tool for single color labeling
else {cols<-cols
cols <- rep(cols,ceiling(length(m.names)/length(cols)))
cols <- cols[1:length(m.names)]
}
hbc <- length(m.names)*sf+max(t.dat[,m.names])
hb <- ceiling(hbc)
#par(xpd=TRUE)
par(mar=c(4,2,4,3))
plot(xseq,t.dat[,m.names[1]],ylim=c(0,hbc),xlab="Time (min)",main=lmain,type="n", xaxt="n",yaxt="n",xlim=c(min(xseq)-1.5,max(xseq)))#-sf
axis(1, at=seq(floor(min(t.dat[,1])),ceiling(max(t.dat[,1])), 1))
axis(2, 1.4, )
text(rep(0,length(m.names)),seq(1,length(m.names))*sf+t.dat[1,m.names],m.names,cex=.8*bcex,col=cols,pos=2)
## Tool for adding window region labeling
if(length(wr) > 0){
#levs <- setdiff(unique(as.character(dat$w.dat[,"wr1"])),"")
x1s <- tapply(xseq,as.factor(wr),min)[levs]
x2s <- tapply(xseq,as.factor(wr),max)[levs]
y1s <- rep(-.3,length(x1s))
y2s <- rep(hbc+.2,length(x1s))
rect(x1s,y1s,x2s,y2s,col="grey90",border="black")
cpx <- xseq[match(levs,wr)+round(table(wr)[levs]/2,0)]
offs <- nchar(levs)*.5
text(dat$t.dat[match(levs,wr),"Time"],rep(c(sf/2,sf),length=length(levs)),levs,pos=4,offset=0,cex=bcex)#,offset=-offs}
## Tool for adding line and point plot for graph
for(i in 1:length(m.names)){
lines(xseq,t.dat[,m.names[i]]+i*sf, lty=1,col=cols[i],lwd=lw)
points(xseq,t.dat[,m.names[i]]+i*sf,pch=16,col=cols[i],cex=.3)
if(!is.null(snr)){
pp1 <- snr[,m.names[i]] > 0 & is.element(wr,levs)
pp2 <- snr[,m.names[i]] > 0 & !is.element(wr,levs)
points(xseq[pp1],t.dat[pp1,m.names[i]]+i/10,pch=1,col=cols[i])
points(xseq[pp2],t.dat[pp2,m.names[i]]+i/10,pch=0,col=cols[i])
}
}
}
## Tool for adding cell data labeling to end of graph
if(!is.null(dat$c.dat[m.names, "area"])){rtag<-"area";rtag <- round(dat$c.dat[m.names,rtag], digits=0)}
else{rtag<-NULL}
if(!is.null(dat$c.dat[m.names, "CGRP"])){rtag2<-"CGRP";rtag2 <- round(dat$c.dat[m.names,rtag2], digits=0)}
else{rtag2<-NULL}
#if(!is.null(dat$c.dat[m.names, "mean.gfp"])){rtag2<-"mean.gfp";rtag2 <- round(dat$c.dat[m.names,rtag2], digits=0)}
#else{rtag2<-NULL}
if(!is.null(dat$c.dat[m.names, "mean.gfp"])){rtag2<-"mean.gfp";rtag2 <- round(dat$c.dat[m.names,rtag2], digits=0)}
else{rtag2<-NULL}
if(!is.null(dat$c.dat[m.names, "IB4"])){rtag3<-"IB4";rtag3 <- round(dat$c.dat[m.names,rtag3], digits=0)}
else{rtag3<-NULL}
if(!is.null(dat$c.dat[m.names, "mean.tritc"])){rtag3<-"mean.tritc";rtag3 <- round(dat$c.dat[m.names,rtag3], digits=0)}
else{rtag3<-NULL}
if(!is.null(dat$c.dat[m.names, "mean.gfp.2"])){rtag4<-"mean.gfp.2";rtag4 <- round(dat$c.dat[m.names,rtag4], digits=0)}
else{rtag4<-NULL}
text(rep(max(xseq),length(m.names)),seq(1,length(m.names))*sf+t.dat[nrow(t.dat),m.names],rtag,cex=.9*bcex,col=cols,pos=4)
text(rep(max(xseq)*1.04,length(m.names)),seq(1,length(m.names))*sf+t.dat[nrow(t.dat),m.names],rtag2,cex=.9*bcex,col="darkgreen",pos=4)
text(rep(max(xseq)*1.08,length(m.names)),seq(1,length(m.names))*sf+t.dat[nrow(t.dat),m.names],rtag3,cex=.9*bcex,col="red",pos=4)
}
}
# pic.plot=T plots images next to trace, unles more than 10 traces
# XY.plot, shows cells in image
LinesEvery.3 <- function(dat,m.names, img=NULL,pic.plot=TRUE, XY.plot=TRUE, blc=T, snr=NULL,lmain="",cols=NULL, levs=NULL, levs.cols="grey90",m.order=NULL,rtag=NULL,rtag2=NULL,rtag3=NULL, plot.new=TRUE,sf=.7,lw=.9,bcex=.6,p.ht=7,p.wd=10){
if(blc){t.dat<-dat$blc}
else{t.dat<-dat$t.dat}
wr<-dat$w.dat[,2]
if(is.null(levs)){levs <- setdiff(unique(as.character(dat$w.dat[,"wr1"])),"")}
else{levs<-levs}
m.names <- intersect(m.names,names(t.dat))
xseq <- t.dat[,1]
hbc <- length(m.names)*sf+max(t.dat[,m.names])
hb <- ceiling(hbc)
library(RColorBrewer)
## Tool for Sorting cells based on c.dat collumn name
if(length(m.names) > 0)
{
if(!is.null(m.order)){
tmp<-dat$c.dat[m.names,]
n.order<-tmp[order(tmp[,m.order]),]
m.names <- row.names(n.order)
}
else{
m.pca <- prcomp(t(t.dat[,m.names]),scale=F,center=T)
morder <- m.pca$x[,1] * c(1,-1)[(sum(m.pca$rot[,1]) < 0)+1]
m.names <- m.names[order(m.pca$x[,1],decreasing=sum(m.pca$rot[,1]) < 0)]
m.names <- m.names[order(morder)]
}
### Picture Plotting!
if(XY.plot==T){cell.zoom.2048(dat, cell=m.names,img=img, cols="white",zoom=F, plot.new=T)}
## Tool for color labeleing
if(is.null(cols)){
#cols <- rainbow(length(m.names),start=.55)
cols <-brewer.pal(8,"Dark2")
cols <- rep(cols,ceiling(length(m.names)/length(cols)))
cols <- cols[1:length(m.names)]
}
## Tool for single color labeling
else {cols<-cols
cols <- rep(cols,ceiling(length(m.names)/length(cols)))
cols <- cols[1:length(m.names)]
}
if(plot.new){dev.new(width=10,height=6)}
par(xpd=FALSE)
par(mar=c(4,2,4,5))
plot(xseq,t.dat[,m.names[1]],ylim=c(0,hbc),xlab="Time (min)",main=lmain,type="n", xaxt="n",yaxt="n",xlim=c(min(xseq)-1.5,max(xseq)))#-sf
axis(1, at=seq(floor(min(t.dat[,1])),ceiling(max(t.dat[,1])), 1))
axis(2, 1.4, )
text(rep(0,length(m.names)),seq(1,length(m.names))*sf+t.dat[1,m.names],m.names,cex=.8*bcex,col=cols,pos=2)
## Tool for adding window region labeling
if(length(wr) > 0){
#levs <- setdiff(unique(as.character(dat$w.dat[,"wr1"])),"")
x1s <- tapply(xseq,as.factor(wr),min)[levs]
x2s <- tapply(xseq,as.factor(wr),max)[levs]
y1s <- rep(-.3,length(x1s))
y2s <- rep(hbc+.2,length(x1s))
rect(x1s,y1s,x2s,y2s,col=levs.cols,border="black")
cpx <- xseq[match(levs,wr)+round(table(wr)[levs]/2,0)]
offs <- nchar(levs)*.5
par(xpd=TRUE)
text(dat$t.dat[match(levs,wr),"Time"],rep(c((sf*.7)/5,(sf*.7)),length=length(levs)),levs,pos=4,offset=0,cex=bcex)#,offset=-offs}
par(xpd=FALSE)
}
## Tool for adding line, point and picture to the plot
for(i in 1:length(m.names)){
ypos<-t.dat[,m.names[i]]+i*sf
lines(xseq,ypos, lty=1,col=cols[i],lwd=lw)
points(xseq,ypos,pch=16,col=cols[i],cex=.3)
if(!is.null(snr)){
pp1 <- snr[,m.names[i]] > 0 & is.element(wr,levs)
pp2 <- snr[,m.names[i]] > 0 & !is.element(wr,levs)
points(xseq[pp1],t.dat[pp1,m.names[i]]+i/10,pch=1,col=cols[i])
points(xseq[pp2],t.dat[pp2,m.names[i]]+i/10,pch=0,col=cols[i])
}
}
par(xpd=TRUE)
if(!is.null(dat$c.dat[m.names, "area"])){rtag<-"area";rtag <- round(dat$c.dat[m.names,rtag], digits=0)
text(rep(max(xseq),length(m.names)),seq(1,length(m.names))*sf+t.dat[nrow(t.dat),m.names],paste(rtag),cex=.9*bcex,col=cols,pos=4)}
if(!is.null(dat$c.dat[m.names, "mean.gfp"])){rtag2<-"mean.gfp";rtag2 <- round(dat$c.dat[m.names,rtag2], digits=0)
text(rep(max(xseq)*1.04,length(m.names)),seq(1,length(m.names))*sf+(t.dat[nrow(t.dat),m.names]),paste(rtag2),cex=.9*bcex,col="springgreen3",pos=4)}
if(!is.null(dat$c.dat[m.names, "mean.gfp.1"])){rtag2<-"mean.gfp.1";rtag2 <- round(dat$c.dat[m.names,rtag2], digits=0)
text(rep(max(xseq)*1.04,length(m.names)),seq(1,length(m.names))*sf+(t.dat[nrow(t.dat),m.names]),paste(rtag2),cex=.9*bcex,col="springgreen3",pos=4)}
if(!is.null(dat$c.dat[m.names, "mean.tritc"])){rtag3<-"mean.tritc";rtag3 <- round(dat$c.dat[m.names,rtag3], digits=0)
text(rep(max(xseq)*1.08,length(m.names)),seq(1,length(m.names))*sf+(t.dat[nrow(t.dat),m.names]),paste(rtag3),cex=.9*bcex,col="red1",pos=4)}
if(is.null(img)){img<-dat$img.gtd}
if(pic.plot==TRUE & length(m.names)<5){
pic.pos<-list()
for(i in 1:length(m.names)){
ypos<-t.dat[,m.names[i]]+i*sf
pic.pos[[i]]<-mean(ypos)}
for(i in 1:length(m.names)){
zf<-20
x<-dat$c.dat[m.names[i],"center.x"]
left<-x-zf
if(left<=0){left=0; right=2*zf}
right<-x+zf
if(right>=2048){left=2048-(2*zf);right=2048}
y<-dat$c.dat[m.names[i],"center.y"]
top<-y-zf
if(top<=0){top=0; bottom=2*zf}
bottom<-y+zf
if(bottom>=2048){top=2048-(2*zf);bottom=2048}
par(xpd=TRUE)
xleft<-max(dat$t.dat[,1])*1.05
xright<-max(dat$t.dat[,1])*1.13
ytop<-pic.pos[[i]]+(.06*hb)
ybottom<-pic.pos[[i]]-(.06*hb)
rasterImage(img[top:bottom,left:right,],xleft,ytop,xright,ybottom)
}
}
else{multi.pic.zoom(dat, m.names,img=img, plot.new=T)}
}
#return(pic.pos)
}
# LinesEvery With all inputs into a single window, excpet XY plot
LinesEvery.4 <- function(dat,m.names, img=NULL,pic.plot=TRUE, zf=NULL, t.type=FALSE, snr=NULL,lmain="",cols=NULL, levs=NULL, levs.cols="grey90",m.order=NULL,rtag=NULL,rtag2=NULL,rtag3=NULL,plot.new=T,sf=.7,lw=.9,bcex=.6,p.ht=7,p.wd=10){
require(png)
#if(blc){t.dat<-dat$blc}
if(t.type){t.type<-menu(names(dat));t.dat<-dat[[t.type]]}# if trace type is empty select the data, you would like your trace to be
else{t.dat<-dat$blc}
wr<-dat$w.dat[,2]
if(is.null(levs)){levs <- setdiff(unique(as.character(dat$w.dat[,"wr1"])),"")}
else{levs<-levs}
m.names <- intersect(m.names,names(t.dat))
xseq <- t.dat[,1]
hbc <- length(m.names)*sf+max(t.dat[,m.names])
hb <- ceiling(hbc)
library(RColorBrewer)
## Tool for Sorting cells based on c.dat collumn name
if(length(m.names) > 0)
{
if(!is.null(m.order)){
tmp<-dat$c.dat[m.names,]
n.order<-tmp[order(tmp[,m.order]),]
m.names <- row.names(n.order)
}
else{
#m.pca <- prcomp(t(t.dat[,m.names]),scale=F,center=T)
#morder <- m.pca$x[,1] * c(1,-1)[(sum(m.pca$rot[,1]) < 0)+1]
#m.names <- m.names[order(m.pca$x[,1],decreasing=sum(m.pca$rot[,1]) < 0)]
#m.names <- m.names[order(morder)]
m.names<-m.names
}
### Picture Plotting!
#if(XY.plot==T){cell.zoom.2048(dat, cell=m.names,img=img, cols="white",zoom=F, plot.new=T)}
## Tool for color labeleing
if(is.null(cols)){
#cols <- rainbow(length(m.names),start=.55)
cols <-brewer.pal(8,"Dark2")
cols <- rep(cols,ceiling(length(m.names)/length(cols)))
cols <- cols[1:length(m.names)]
}
## Tool for single color labeling
else {cols<-cols
cols <- rep(cols,ceiling(length(m.names)/length(cols)))
cols <- cols[1:length(m.names)]
}
if(plot.new){
if(length(m.names)>5){dev.new(width=16,height=6);layout(matrix(c(1,2), 1, 2, byrow = TRUE),widths=c(10,6), heights=c(6,6))}
else(dev.new(width=10,height=6))
}
else{
if(length(m.names)>5){layout(matrix(c(1,2), 1, 2, byrow = TRUE),widths=c(10,6), heights=c(6,6))}
}
par(xpd=FALSE,mar=c(4,2,4,5), bty="l")
plot(xseq,t.dat[,m.names[1]],ylim=c(0,hbc),xlab="Time (min)",main=lmain,type="n", xaxt="n",yaxt="n",xlim=c(min(xseq)-1.5,max(xseq)))#-sf
bob<-dev.cur()
axis(1, at=seq(floor(min(t.dat[,1])),ceiling(max(t.dat[,1])), 1))
axis(2, 1.4, )
text(rep(0,length(m.names)),seq(1,length(m.names))*sf+t.dat[1,m.names],m.names,cex=.8*bcex,col=cols,pos=2)
## Tool for adding window region labeling
if(length(wr) > 0){
#levs <- setdiff(unique(as.character(dat$w.dat[,"wr1"])),"")
x1s <- tapply(xseq,as.factor(wr),min)[levs]
x2s <- tapply(xseq,as.factor(wr),max)[levs]
y1s <- rep(-.3,length(x1s))
y2s <- rep(hbc+.2,length(x1s))
rect(x1s,y1s,x2s,y2s,col=levs.cols,border="black")
cpx <- xseq[match(levs,wr)+round(table(wr)[levs]/2,0)]
offs <- nchar(levs)*.5
par(xpd=TRUE)
text(dat$t.dat[match(levs,wr),"Time"],rep(c((sf*.7)/5,(sf*.7)),length=length(levs)),levs,pos=4,offset=0,cex=bcex)#,offset=-offs}
par(xpd=FALSE)
}
## Tool for adding line, point and picture to the plot
for(i in 1:length(m.names)){
ypos<-t.dat[,m.names[i]]+i*sf
lines(xseq,ypos, lty=1,col=cols[i],lwd=lw)
points(xseq,ypos,pch=16,col=cols[i],cex=.3)
if(!is.null(snr)){
pp1 <- snr[,m.names[i]] > 0 & is.element(wr,levs)
pp2 <- snr[,m.names[i]] > 0 & !is.element(wr,levs)
points(xseq[pp1],t.dat[pp1,m.names[i]]+i/10,pch=1,col=cols[i])
points(xseq[pp2],t.dat[pp2,m.names[i]]+i/10,pch=0,col=cols[i])
}
}
par(xpd=TRUE)
if(!is.null(dat$c.dat[m.names, "area"])){rtag<-"area";rtag <- round(dat$c.dat[m.names,rtag], digits=0)
text(rep(max(xseq),length(m.names)),seq(1,length(m.names))*sf+t.dat[nrow(t.dat),m.names],paste(rtag),cex=.9*bcex,col=cols,pos=4)}
if(!is.null(dat$c.dat[m.names, "mean.gfp"])){rtag2<-"mean.gfp.bin";rtag2 <- round(dat$bin[m.names,rtag2], digits=0)
text(rep(max(xseq)*1.04,length(m.names)),seq(1,length(m.names))*sf+(t.dat[nrow(t.dat),m.names]),paste(rtag2),cex=.9*bcex,col="springgreen3",pos=4)}
if(!is.null(dat$c.dat[m.names, "mean.gfp.1"])){rtag2<-"mean.gfp.1";rtag2 <- round(dat$c.dat[m.names,rtag2], digits=0)
text(rep(max(xseq)*1.04,length(m.names)),seq(1,length(m.names))*sf+(t.dat[nrow(t.dat),m.names]),paste(rtag2),cex=.9*bcex,col="springgreen3",pos=4)}
if(!is.null(dat$c.dat[m.names, "mean.tritc"])){rtag3<-"mean.tritc.bin";rtag3 <- round(dat$bin[m.names,rtag3], digits=0)
text(rep(max(xseq)*1.08,length(m.names)),seq(1,length(m.names))*sf+(t.dat[nrow(t.dat),m.names]),paste(rtag3),cex=.9*bcex,col="red1",pos=4)}
if(is.null(img)){img<-dat[[select.list(grep("img",names(dat), value=T))]]}
if(pic.plot==TRUE & length(m.names)<=5){
pic.pos<-list()
for(i in 1:length(m.names)){
ypos<-t.dat[,m.names[i]]+i*sf
pic.pos[[i]]<-mean(ypos)}
for(i in 1:length(m.names)){
#if(dat$bin[m.names[1],"mean.gfp.bin"]!=1 & dat$bin[m.names[1],"mean.tritc.bin"]!=1){img.p<-dat$img.gtd #if the cell is neither red or green, then make the img to plot img.gtd
#}else{img.p<-img}
img.p<-img
if(is.null(zf)){zf<-20}else{zf<-zf}
x<-dat$c.dat[m.names[i],"center.x"]
left<-x-zf
if(left<=0){left=0; right=2*zf}
right<-x+zf
if(right>=2048){left=2048-(2*zf);right=2048}
y<-dat$c.dat[m.names[i],"center.y"]
top<-y-zf
if(top<=0){top=0; bottom=2*zf}
bottom<-y+zf
if(bottom>=2048){top=2048-(2*zf);bottom=2048}
par(xpd=TRUE)
xleft<-max(dat$t.dat[,1])*1.05
xright<-max(dat$t.dat[,1])*1.13
ytop<-pic.pos[[i]]+(.06*hb)
ybottom<-pic.pos[[i]]-(.06*hb)
if(length(dim(img))>2){rasterImage(img.p[top:bottom,left:right,],xleft,ytop,xright,ybottom)
}else{rasterImage(img.p[top:bottom,left:right],xleft,ytop,xright,ybottom)}
}
}
else{
par(mar=c(0,0,0,0))
plot(0,0,xlim=c(0,6), ylim=c(0,6), xaxs="i",yaxs="i", xaxt='n', yaxt='n')
tmp.img<-multi.pic.zoom.2(dat, m.names,img=img)
dev.set(bob) # FUCK THIS!
rasterImage(tmp.img, 0,0,6,6)
}
}
#return(pic.pos)
}
# LinesEvery same as .4 but has image at begining of trace and moves to pic plot at >10
LinesEvery.5.1 <- function(dat,m.names, img=dat$img1,pic.plot=TRUE, multi.pic=T,zf=NULL, t.type="mp", snr=NULL,lmain="",cols=NULL, levs=NULL, levs.cols="grey90", m.order=NULL,rtag=NULL,rtag2=NULL,rtag3=NULL,plot.new=T,sf=1,lw=2,bcex=.6,p.ht=7,p.wd=10, lns=T, pts=F){
require(png)
#if(blc){t.dat<-dat$blc}
if(class(t.type)=="character"){t.dat<-dat[[t.type]]}# if trace type is empty select the data, you would like your trace to be
else{t.type<-menu(names(dat));t.dat<-dat[[t.type]]}
wr<-dat$w.dat[,2]
if(is.null(levs)){levs <- setdiff(unique(as.character(dat$w.dat[,"wr1"])),"")}
else{levs<-levs}
m.names <- intersect(m.names,names(t.dat))
xseq <- t.dat[,1]
hbc <- length(m.names)*sf+max(t.dat[,m.names])
hb <- ceiling(hbc)
library(RColorBrewer)
## Tool for Sorting cells based on c.dat collumn name
if(length(m.names) > 0)
{
#if(is.null(pdf.name))
# {dev.new(width=14,height=8)}
#else
#{if(length(grep("\\.pdf",pdf.name))>0){pdf(pdf.name,width=p.wd,height=p.ht)}else{png(pdf.name,width=1200,height=600)}}
if(!is.null(m.order)){
tmp<-dat$c.dat[m.names,]
n.order<-tmp[order(tmp[,m.order]),]
m.names <- row.names(n.order)
}
else{
#m.pca <- prcomp(t(t.dat[,m.names]),scale=F,center=T)
#morder <- m.pca$x[,1] * c(1,-1)[(sum(m.pca$rot[,1]) < 0)+1]
#m.names <- m.names[order(m.pca$x[,1],decreasing=sum(m.pca$rot[,1]) < 0)]
#m.names <- m.names[order(morder)]
m.names<-m.names
}
### Picture Plotting!
#if(XY.plot==T){cell.zoom.2048(dat, cell=m.names,img=img, cols="white",zoom=F, plot.new=T)}
## Tool for color labeleing
if(is.null(cols)){
#cols <- rainbow(length(m.names),start=.55)
cols <-brewer.pal(8,"Dark2")
cols <- rep(cols,ceiling(length(m.names)/length(cols)))
cols <- cols[1:length(m.names)]
}
## Tool for single color labeling
else {cols<-cols
cols <- rep(cols,ceiling(length(m.names)/length(cols)))
cols <- cols[1:length(m.names)]
}
if(multi.pic){
if(plot.new){
if(length(m.names)>10){dev.new(width=16,height=6);layout(matrix(c(1,2), 1, 2, byrow = TRUE),widths=c(10,6), heights=c(6,6))}
else(dev.new(width=10,height=6))
}
else{
if(length(m.names)>10){layout(matrix(c(1,2), 1, 2, byrow = TRUE),widths=c(10,6), heights=c(6,6))}
}
}else{dev.new(width=10,height=6)}
par(xpd=FALSE,mar=c(4,3,4,10), bty="l")
plot(xseq,t.dat[,m.names[1]],ylim=c(0,hbc),xlab="Time (min)",main=lmain,type="n", xaxt="n",yaxt="n",xlim=c(min(xseq)-1.5,max(xseq)))#-sf
bob<-dev.cur()
axis(1, at=seq(floor(min(t.dat[,1])),ceiling(max(t.dat[,1])), 1))
axis(2, 1.4, )
text(rep(0,length(m.names)),seq(1,length(m.names))*sf+t.dat[1,m.names],m.names, cex=.5,col=cols,pos=2)
## Tool for adding window region labeling
if(length(wr) > 0){
#levs <- setdiff(unique(as.character(dat$w.dat[,"wr1"])),"")
x1s <- tapply(dat$w.dat[,1],as.factor(wr),min)[levs]
x2s <- tapply(dat$w.dat[,1],as.factor(wr),max)[levs]
y1s <- rep(-.3,length(x1s))
y2s <- rep(hbc+.2,length(x1s))
rect(x1s,y1s,x2s,y2s,col=levs.cols,border="black")
cpx <- xseq[match(levs,wr)+round(table(wr)[levs]/2,0)]
offs <- nchar(levs)*.5
par(xpd=TRUE)
text(dat$t.dat[match(levs,wr),"Time"],rep(c((sf*.7)*.5,(sf*.7),(sf*.7)/5),length=length(levs)),levs,pos=4,offset=0,cex=bcex*.8)#,offset=-offs}
par(xpd=FALSE)
}
## Tool for adding line, point and picture to the plot
for(i in 1:length(m.names)){
ypos<-t.dat[,m.names[i]]+i*sf
if(lns){lines(xseq,ypos, lty=1,col=cols[i],lwd=lw)}
if(pts){points(xseq,ypos,pch=16,col=cols[i],cex=.3)}
if(!is.null(snr)){
pp1 <- snr[,m.names[i]] > 0 & is.element(wr,levs)
pp2 <- snr[,m.names[i]] > 0 & !is.element(wr,levs)
points(xseq[pp1],t.dat[pp1,m.names[i]]+i/10,pch=1,col=cols[i])
points(xseq[pp2],t.dat[pp2,m.names[i]]+i/10,pch=0,col=cols[i])
}
}
par(xpd=TRUE)
if(!is.null(dat$c.dat[m.names, "area"])){rtag<-"area";rtag <- round(dat$c.dat[m.names,rtag], digits=0)
text(rep(max(xseq),length(m.names)),seq(1,length(m.names))*sf+t.dat[nrow(t.dat),m.names],paste(rtag),cex=.9*bcex,col=cols,pos=4)}
if(!is.null(dat$c.dat[m.names, "mean.gfp.start"])){rtag2<-"mean.gfp.start";rtag2 <- round(dat$c.dat[m.names,rtag2], digits=6)
text(rep(max(xseq)+xinch(.5),length(m.names)),seq(1,length(m.names))*sf+(t.dat[nrow(t.dat),m.names]),paste(rtag2),cex=.6*bcex,col="darkgreen",pos=4)}
#if(!is.null(dat$c.dat[m.names, "mean.gfp.end"])){rtag2<-"mean.gfp.end";rtag2 <- round(dat$c.dat[m.names,rtag2], digits=6)
#text(rep(max(xseq)+xinch(1),length(m.names)),seq(1,length(m.names))*sf+(t.dat[nrow(t.dat),m.names]),paste(rtag2),cex=.6*bcex,col="darkgreen",pos=4)}
if(!is.null(dat$c.dat[m.names, "mean.tritc.start"])){rtag3<-"mean.tritc.start";rtag3 <- round(dat$c.dat[m.names,rtag3], digits=6)
text(rep(max(xseq)+xinch(1),length(m.names)),seq(1,length(m.names))*sf+(t.dat[nrow(t.dat),m.names]),paste(rtag3),cex=.6*bcex,col=,pos=4)}
if(!is.null(dat$c.dat[m.names, "mean.tritc.end"])){rtag3<-"mean.tritc.end";rtag3 <- round(dat$c.dat[m.names,rtag3], digits=6)
text(rep(max(xseq)+xinch(1.5),length(m.names)),seq(1,length(m.names))*sf+(t.dat[nrow(t.dat),m.names]),paste(rtag3),cex=.6*bcex,col=,pos=4)}
if(is.null(img)){
img.p<-dat[[select.list(grep("img",names(dat), value=T))]]
if(is.null(img.p)){img.p<-dat$img1}
}else{img.p<-img}
if(is.null(zf)){zf<-20}else{zf<-zf}
#if(pic.plot==TRUE & length(m.names)<=10){
if(pic.plot==TRUE){
if(length(m.names)<=10){
pic.pos<-list()
for(i in 1:length(m.names)){
ypos<-t.dat[1,m.names[i]]+i*sf
pic.pos[[i]]<-ypos}
for(i in 1:length(m.names)){
#if(dat$bin[m.names[1],"mean.gfp.bin"]!=1 & dat$bin[m.names[1],"mean.tritc.bin"]!=1){img.p<-dat$img.gtd #if the cell is neither red or green, then make the img to plot img.gtd
#}else{img.p<-img}
#img.p<-img
img.dim<-dim(dat$img1)[1]
x<-dat$c.dat[m.names[i],"center.x"]
left<-x-zf
if(left<=0){left=0; right=2*zf}
right<-x+zf
if(right>=img.dim){left=img.dim-(2*zf);right=img.dim}
y<-dat$c.dat[m.names[i],"center.y"]
top<-y-zf
if(top<=0){top=0; bottom=2*zf}
bottom<-y+zf
if(bottom>=img.dim){top=img.dim-(2*zf);bottom=img.dim}
par(xpd=TRUE)
xleft<-min(dat$t.dat[,1])-xinch(1)
xright<-min(dat$t.dat[,1])-xinch(.5)
ytop<-pic.pos[[i]]+yinch(.25)
ybottom<-pic.pos[[i]]-yinch(.25)
tryCatch(rasterImage(img.p[top:bottom,left:right,],xleft,ybottom,xright,ytop),error=function(e) rasterImage(img.p[top:bottom,left:right],xleft,ybottom,xright,ytop))
}
}
else{
par(mar=c(0,0,0,0))
plot(0,0,xlim=c(0,6), ylim=c(0,6), xaxs="i",yaxs="i", xaxt='n', yaxt='n')
tmp.img<-multi.pic.zoom.2(dat, m.names,img=img.p, labs=T, zf=zf, cols=cols)
dev.set(bob) # FUCK THIS!
rasterImage(tmp.img, 0,0,6,6)
}
}
}
#if(!is.null(pdf.name))
#{dev.off()}
#return(pic.pos)
}
# LinesEvery same as .4 but has image at begining of trace and moves to pic plot at >10
#multipi does not work on this. Instead, if greater than 10, the traces are plotted as a portrait orientation
# Also window labels are rotated on axis and place on the bottom of the plot
# I am also adding two more images to the left side of the plot
#171009 added underline. Helps to show irreversibility
#171031 added dat.n for the name fotthe experiment
LinesEvery.5 <- function(dat,m.names, img="img1",channel=NULL,pic.plot=TRUE,zf=NULL, t.type="mp.1", snr=NULL,lmain="",cols="black", levs=NULL, levs.cols="grey90", values=NULL,plot.new=T,sf=1,lw=1,bcex=1,p.ht=7,p.wd=10, lns=T, pts=F, underline=T,dat.n=NULL){
#require(RColorBrewer)
dat.name<-deparse(substitute(dat))
if(dat.name=="dat" | dat.name == "tmp.rd" | dat.name == "tmp_rd"){
dat.name<-dat.n
}else{
dat.name<-dat.name
}
#Trace Selector if t.type is empty. t.type must be character input
if(class(t.type)=="character"){
t.dat<-dat[[t.type]]# if trace type is empty select the data, you would like your trace to be
}else{
t.type<-menu(names(dat));t.dat<-dat[[t.type]]
}
m.names <- intersect(m.names,names(t.dat))
xseq <- t.dat[,1]
#upper ylimit
hbc <- length(m.names)*sf+max(t.dat[,m.names])
#Selecting multiple images
if(is.null(img)){
img.l<-select.list(grep("img",names(dat), value=T), multiple=T)
}else{
img.l<-img
}
if(length(m.names) > 0){
#For pdf output
#if(is.null(pdf.name))
# {dev.new(width=14,height=8)}
#else
#{if(length(grep("\\.pdf",pdf.name))>0){pdf(pdf.name,width=p.wd,height=p.ht)}else{png(pdf.name,width=1200,height=600)}}
## Tool for addind value tags displayed on the right side of trace
#See line 3016 for where values come into play
#values<-c("area", "mean.gfp.start", "mean.gfp.end" "mean.tritc.start", "mean.tritc.end")
if(is.null(values)){
values<-c("area")
}else{values<-values}
## Tool for color labeleing
## Tool for single color labeling
if(cols=="brew.pal"){
#cols <- rainbow(length(m.names),start=.55)
require(RColorBrewer)
cols <-brewer.pal(8,"Dark2")
cols <- rep(cols,ceiling(length(m.names)/length(cols)))
cols <- cols[1:length(m.names)]
}
if(cols=="rainbow"){
cols<-rainbow(length(m.names),start=.7,end=.1)
}
if(cols=="topo"){
cols<-topo.colors(length(m.names))
}else{
cols<-cols
cols <- rep(cols,ceiling(length(m.names)/length(cols)))
cols <- cols[1:length(m.names)]
}
if(plot.new){
if(length(m.names)>10){dev.new(width=10+length(img)+(length(values)*.6),height=12)}
else(dev.new(width=10+length(img)+(length(values)*.6),height=8))
}
xinch(length(img))
par(xpd=FALSE,mai=c(2,.5+(.5*length(img.l)), 1, 0.6*length(values)), bty="l")
plot(xseq,t.dat[,m.names[1]],ylim=c(0,hbc),xlab="",main=lmain,type="n",yaxt="n",xlim=c(min(xseq)-1.5,max(xseq)), ylab="")#-sf
#axis(1, at=seq(floor(min(t.dat[,1])),ceiling(max(t.dat[,1])), 1))
#axis(2, 1.4, )
#Label cell names
text(rep(0,length(m.names))-xinch(.1),seq(1,length(m.names))*sf+t.dat[1,m.names],m.names, cex=.5,col=cols,pos=3)
## Tool for adding window region labeling
if(is.null(levs)){levs <- setdiff(unique(as.character(dat$w.dat[,"wr1"])),"")
}else{levs<-levs}
wr<-dat$w.dat$wr1
if(length(wr) > 0){
x1s <- tapply(dat$w.dat[,1],as.factor(wr),min)[levs]
x2s <- tapply(dat$w.dat[,1],as.factor(wr),max)[levs]
y1s <- rep(par("usr")[3],length(x1s))
y2s <- rep(par("usr")[4],length(x1s))
rect(x1s,y1s,x2s,y2s,col=levs.cols,border="black")
par(xpd=TRUE)
#text(x1s-xinch(.1),par("usr")[3]-yinch(1),levs,cex=.8*bcex, srt=90)
#dat$t.dat[match(levs,wr),"Time"]
levs.loc<-tapply(dat$w.dat[,"Time"],as.factor(wr),mean)[levs]
levs_cex <- nchar(levs)
levs_cex[ levs_cex <= 12*1.3 ] <- 1
levs_cex[ levs_cex > (12*1.3) ] <- 12/levs_cex[ levs_cex>(12*1.3) ]*1.3
text(levs.loc,par("usr")[3],levs,pos=3,offset=-4.3,cex=levs_cex, srt=90)
par(xpd=FALSE)
}
## Tool for adding line, point and picture to the plot
for(i in 1:length(m.names)){
ypos<-t.dat[,m.names[i]]+i*sf
if(lns){lines(xseq,ypos, lty=1,col=cols[i],lwd=lw)}
if(pts){points(xseq,ypos,pch=16,col=cols[i],cex=.3)}
if(!is.null(snr)){
pp1 <- snr[,m.names[i]] > 0 & is.element(wr,levs)
pp2 <- snr[,m.names[i]] > 0 & !is.element(wr,levs)
points(xseq[pp1],t.dat[pp1,m.names[i]]+i/10,pch=1,col=cols[i])
points(xseq[pp2],t.dat[pp2,m.names[i]]+i/10,pch=0,col=cols[i])
}
if(underline){abline(h=min(ypos), col="black")}else{}
}
par(xpd=TRUE)
## Tool for adding Value info on right side of trace
placement<-seq(0,length(values),.5)
digits<-c(0,rep(4,length(values)))
text(max(xseq)+xinch(placement[1:length(values)]), par("usr")[4]+yinch(.2), pos=4, values,cex=bcex*.75, srt=30)
for(i in 1:length(values)){
if(!is.null(dat$c.dat[m.names, values[i]])){
rtag<-values[i]
rtag <- round(dat$c.dat[m.names,rtag], digits=digits[i])
text(
rep(max(xseq)+xinch(placement[i]),length(m.names)),
seq(1,length(m.names))*sf+t.dat[nrow(t.dat),m.names],
paste(rtag),
cex=.65*bcex,
col=cols,
pos=4)
}
}
##Tool for adding images to the left side of the plot
if(is.null(zf)){
zf<-20
}else{zf<-zf}
pic.pos<-list()
for(i in 1:length(m.names)){
ypos<-t.dat[1,m.names[i]]+i*sf
pic.pos[[i]]<-ypos
}
xinchseq1<-seq(1,5,.5)
xinchseq2<-seq(.5,5,.5)
if(is.null(channel)){channel<-rep(list(c(1:3)),length(img.l))
}else{channel<-channel}
for(j in 1:length(img.l)){
for(i in 1:length(m.names)){
img.dim<-dim(dat$img1)[1]
x<-dat$c.dat[m.names[i],"center.x"]
left<-x-zf
if(left<=0){
left=0
right=2*zf
}
right<-x+zf
if(right>=img.dim){
left=img.dim-(2*zf)
right=img.dim
}
y<-dat$c.dat[m.names[i],"center.y"]
top<-y-zf
if(top<=0){
top=0
bottom=2*zf
}
bottom<-y+zf
if(bottom>=img.dim){
top=img.dim-(2*zf)
bottom=img.dim
}
par(xpd=TRUE)
xleft<-min(dat$t.dat[,1])-xinch(xinchseq1[j])
xright<-min(dat$t.dat[,1])-xinch(xinchseq2[j])
ytop<-pic.pos[[i]]+yinch(.25)
ybottom<-pic.pos[[i]]-yinch(.25)
tryCatch(
rasterImage(dat[[img.l[j]]][top:bottom,left:right,channel[[j]]],xleft,ybottom,xright,ytop),
error=function(e) rasterImage(dat[[img.l[j]]][top:bottom,left:right],xleft,ybottom,xright,ytop)
)
}
}
}
tryCatch(
legend(x=par("usr")[2]-xinch(1.2), y=par("usr")[3]-yinch(1.6), xpd=TRUE, inset=c(0,-.14), bty="n", cex=.7, legend=dat.name),
error=function(e) NULL)
#if(!is.null(pdf.name))
#{dev.off()}
#return(pic.pos)
}
#How to display single or multiple window regions as specified by you
#performs pam analysis around as many mediods as you want
#displays the information as a heat map with red represeenting most populace group
# and white as least populace
#legend
#xlim= Logical added to have option to group traces around window regions
#medios= Logical. If true then groupw ill be split into subset.n groups
LevsViewer <- function(dat,m.names=NULL, ylim=c(0,1.4), xlim=F, mediods=T, min.group=0,linez=F,subset.n=5,img=NULL, pic.plot=FALSE, t.type="mp.1",lmain="",cols=NULL, levs=NULL, levs.cols="grey90",plot.new=T,lw=.9,bcex=.8,opacity=3){
# if trace type is empty select the data, you would like your trace to be
if(is.null(t.type)){
t.type<-"t.dat"
t.dat<-dat[[t.type]]
}else{
t.dat<-dat[[t.type]]
}
## select the region to plot from window region
levs <- setdiff(unique(as.character(dat$w.dat[,"wr1"])),"")
## This is how i determine the x region to plot
if(xlim){
plot.region<-select.list(levs, multiple=T)
}else{
plot.region<-levs
}
## What cells would you like to plot?
# if you do not have cells entered, then all cells will be selected.
if(is.null(m.names)){
m.names<-dat$c.dat$id
}else{
m.names<-m.names}
## Create new plot if you have nothing selected.
if(plot.new){dev.new(height=5,width=5*length(plot.region))}
## If you have decided to select window regions based on levs, then this code will select
# the minimun x value from all selected levs regions, and the ultimate maximun region from the selected levs region
## If not, then the maximun and minimun x regions will be the maximun and minimun region of the entire time frame
if(xlim){
x.min<-which(t.dat$Time==
min(tapply(t.dat[,"Time"], as.factor(dat$w.dat$wr1), min)[plot.region]))
x.max<-which(t.dat[,"Time"]==
max(tapply(t.dat[,"Time"], as.factor(dat$w.dat$wr1), max)[plot.region]))
}else{
x.min<-min(t.dat[,"Time"])
x.min<-which(t.dat[,"Time"]==x.min)
x.max<-max(t.dat[,"Time"])
x.max<-which(t.dat[,"Time"]==x.max)
}
## Y limits will be automatically decided if this input is NULL
# if not, the y limits can be specified via c()
if(is.null(ylim)){
ylim<-c(min(t.dat[,m.names]),max(t.dat[,m.names]))
}else{
ylim<-ylim
}
## xseq is an arguement input for the plotting function
xseq<-t.dat[x.min:x.max,"Time"]
## I'm not sure why i added this to the code. Perhaps I have had issues in the past
m.names <- intersect(m.names,names(t.dat))
## Make sure this is loaded for pretty colors, although we are currently using heat.colors
library(RColorBrewer)
###PLOTTING####
##parameters for plot
par(xpd=FALSE,mar=c(4,2,4,5), bty="l", bg="gray70")
plot(xseq,t.dat[x.min:x.max,m.names[1]],ylim=ylim,xlab="Time (min)",main=lmain,type="n")#-sf
axis(1, at=seq(floor(min(t.dat[,1])),ceiling(max(t.dat[,1])), .5))
axis(2, tick=T, )
abline(h=seq(0,max(ylim),.2), lty=3, lwd=.1 )
#abline(v=seq(
# floor(t.dat$Time[x.min]),
# ceiling(t.dat$Time[x.max]),.5),
# lty=3, lwd=.1
#)
## Add all lines to the plot with light opacity.
# in the future this may need a type of calculation to allow for a realtionship between
# opacity and number of lines
if(linez){
for(i in 1:length(m.names)){
ypos<-t.dat[x.min:x.max,m.names[i]]
color<-rgb(1,1,1, opacity, maxColorValue=10)
lines(xseq,ypos, lty=1,col=color,lwd=lw)
#points(xseq,ypos,pch=16,col=color,cex=.3)
}
}
##Tool for adding trace difference averages using Partitioning about mediods
if(mediods){
library(cluster)
if(subset.n>=(length(m.names)/2)){
subset.n<-floor(length(m.names)/4)
}else{subset.n<-subset.n}
pam5 <- pam(t(t.dat[x.min:x.max,m.names]),k=subset.n)
s.names <- row.names(pam5$medoids)
pam5.tab <- table(pam5$clustering)
#Create Labels for the PAM groupings
#tags <- paste(paste("#",names(pam5.tab),sep=""),as.vector(pam5.tab),sep=":")
group.means<-list()
group.names<-list()
## Create naming for PAM clusters
for(i in 1:subset.n){
x.names<-names(which(pam5$clustering==i, arr.ind=T))
#group.info<-paste(i,":",length(x.names), sep="")
group.info<-length(x.names)
group.names[[i]]<-x.names
names(group.names)[i]<-group.info
}
#only select groups that have more than 2 traces
#this takes some magic to happen
bg<-summary(group.names)
bg[,1]<-as.numeric(bg[,1])
if(subset.n<=length(m.names)){
bg.big<-which(as.numeric(bg[,1])>min.group)
}else{
bg.big<-which(as.numeric(bg[,1])>min.group)
}
if(length(bg.big)>1){
bg.2<-bg[bg.big,]
bg.2<-bg.2[order(as.numeric(bg.2[,1]),decreasing=T),]
bg.2.names<-row.names(bg.2)
group.names<-group.names[bg.2.names]
}else{
bg.2<-bg
bg.2<-bg.2[order(as.numeric(bg.2[,1]),decreasing=T),]
bg.2.names<-row.names(bg.2)
group.names<-group.names[bg.2.names]
}
cols <-brewer.pal(8,"Dark2")
cols<-rainbow(length(group.names))
#cols<-rep(cols,length(group.names))
#cols <- rep(cols,ceiling(length(s.names)/length(cols)))
#cols <- cols[1:length(s.names)]
#cols<-heat.colors(length(group.names))
#cols<-rev(cols)
for(i in 1:length(group.names)){
if(length(group.names[[i]])>1){
lines(xinch(i*0)+xseq, yinch(i*.15)+apply(t.dat[x.min:x.max,group.names[[i]]],1,mean), col=cols[i], lwd=2)
}else{
lines(xinch(i*0)+xseq, yinch(i*.15)+t.dat[x.min:x.max,group.names[[i]]], col=cols[i], lwd=2)
}
}
legend("topright",legend=rev(names(group.names)),title="Cell Total", cex=.8,lty=1,lwd=2, bty="", col=rev(cols))
}
## Tool for adding window region labeling
wr<-dat$w.dat[x.min:x.max,2]
if(length(wr) > 0){
#levs <- setdiff(unique(as.character(dat$w.dat[,"wr1"])),"")
x1s <- tapply(xseq,as.factor(wr),min)[plot.region]
#abline(v=x1s, lwd=1.2)
par(xpd=TRUE)
text(x1s,rep(.9,length=length(plot.region)),plot.region,pos=4,offset=0,cex=bcex)#,offset=-offs}
par(xpd=FALSE)
}
if(is.null(img)){img<-dat$img2}
if( (pic.plot) & (length(m.names)>=5)){
dev.new()
par(mar=c(0,0,0,0))
plot(0,0,xlim=c(0,6), ylim=c(0,6), xaxs="i",yaxs="i", xaxt='n', yaxt='n')
multi.pic.zoom(dat, m.names,img=img)
}
return(group.names)
}
LinesSome.2 <- function(dat,m.names,snr=NULL,lmain="",pdf.name=NULL,morder=NULL,subset.n=5,sf=1,lw=3,bcex=1)
{
library(cluster)
t.dat<-dat$t.dat
wr<-dat$w.dat[,2]
levs<-unique(as.character(wr))[-1]
if(length(m.names) < subset.n)
{stop("group size lower than subset size")}
pam5 <- pam(t(t.dat[,m.names]),k=subset.n)
s.names <- row.names(pam5$medoids)
if(!is.null(morder))
{
names(morder) <- m.names
morder <- morder[s.names]
}
pam5.tab <- table(pam5$clustering)
tags <- paste(paste("#",names(pam5.tab),sep=""),as.vector(pam5.tab),sep=":")
LinesEvery(t.dat,snr,s.names,wr,levs,lmain,pdf.name,morder,rtag=tags,sf,lw,bcex)
return(pam5$clustering)
}
TraceSelect <- function(dat,m.names,blc=NULL,snr=NULL,wr=NULL,levs=NULL,lmain="",m.order=NULL,rtag=NULL,rtag2=NULL,rtag3=NULL){
if(!is.null(blc)){t.dat<-dat$blc}
else{t.dat<-dat$t.dat}
if(is.null(wr)){wr<-dat$w.dat[,2]}
sf <- .2
bcex<-1
library(RColorBrewer)
m.names <- intersect(m.names,names(t.dat))
lwds <- 3
if(length(m.names) > 0)
{
xseq <- t.dat[,1]
cols <-brewer.pal(8,"Dark2")
cols <- rep(cols,ceiling(length(m.names)/length(cols)))
cols <- cols[1:length(m.names)]
dev.new(width=14,height=8)
if(!is.null(m.order)){
(tmp<-dat$c.dat[m.names,])
(n.order<-tmp[order(tmp[,m.order]),])
(m.names <- row.names(n.order))
}
else{
m.pca <- prcomp(t(t.dat[,m.names]),scale=F,center=T)
m.names <- m.names[order(m.pca$x[,1],decreasing=sum(m.pca$rot[,1]) < 0)]
}
hbc <- length(m.names)*sf+max(t.dat[,m.names])
hb <- ceiling(hbc)
plot(xseq,t.dat[,m.names[1]],ylim=c(-sf,hbc),xlab="Time (min)",ylab="Ratio with shift",main=lmain,type="n", xaxt="n")
axis(1, at=seq(0, length(t.dat[,1]), 5))
if(length(wr) > 0)
{
if(is.null(levs)){levs <- setdiff(unique(wr),"")}
x1s <- tapply(xseq,as.factor(wr),min)[levs]
x2s <- tapply(xseq,as.factor(wr),max)[levs]
y1s <- rep(-.3,length(x1s))
y2s <- rep(hbc+.2,length(x1s))
rect(x1s,y1s,x2s,y2s,col="lightgrey")
text(xseq[match(levs,wr)],rep(-.1,length(levs)),levs,pos=4,offset=0,cex=1)
}
x.sel <- NULL
xs <-c(rep(0,length(m.names)),c(.1,.1,.1))
ys <- seq(1,length(m.names))*sf+t.dat[1,m.names]
ys <- as.vector(c(ys,c(sf,0,-sf)))
# xs[(length(xs)-2):length(xs)] <- c(0,5,10)
p.names <- c(m.names,"ALL","NONE","FINISH")
done.n <- length(p.names)
none.i <- done.n-1
all.i <- none.i-1
p.cols <- c(cols,c("black","black","black"))
for(i in 1:length(m.names))
{
lines(xseq,t.dat[,m.names[i]]+i*sf,col=cols[i],lwd=lwds)
if(!is.null(snr))
{
pp1 <- snr[,m.names[i]] > 0 & is.element(wr,levs)
pp2 <- snr[,m.names[i]] > 0 & !is.element(wr,levs)
points(xseq[pp1],t.dat[pp1,m.names[i]]+i*sf,pch=1,col=cols[i])
points(xseq[pp2],t.dat[pp2,m.names[i]]+i*sf,pch=0,col=cols[i])
}
}
text(x=xs,y=ys,labels=p.names,pos=2,cex=.7,col=p.cols)
points(x=xs,y=ys,pch=16,col=p.cols)
if(is.null(rtag)){
if(!is.null(m.order)){
rtag <- dat$c.dat[m.names,m.order]
text(rep(max(xseq),length(m.names)),seq(1,length(m.names))*sf+t.dat[nrow(t.dat),m.names],rtag,cex=.8*bcex,col=cols,pos=4)
}}
else{
rtag <- round(dat$c.dat[m.names,rtag], digits=0)
text(rep(max(xseq),length(m.names)),seq(1,length(m.names))*sf+t.dat[nrow(t.dat),m.names],rtag,cex=.8*bcex,col=cols,pos=4)
}
if(!is.null(rtag2)){
(rtag2 <- round(dat$c.dat[m.names,rtag2], digits=0))
text(rep(max(xseq),length(m.names)),seq(1,length(m.names))*sf+t.dat[nrow(t.dat),m.names],rtag2,cex=.8*bcex,col=cols,pos=3)
}
if(!is.null(rtag3)){
rtag3 <- round(dat$c.dat[m.names,rtag3], digits=0)
text(rep(max(xseq),length(m.names)),seq(1,length(m.names))*sf+t.dat[nrow(t.dat),m.names],rtag3,cex=.8*bcex,col=cols,pos=1)
}
click.i <- 1
while(click.i != done.n)
{
click.i <- identify(xs,ys,n=1,plot=F)
if(click.i < (length(m.names)+1) & click.i > 0)
{
i <- click.i
if(is.element(i,x.sel))
{
lines(xseq,t.dat[,m.names[i]]+i*sf,col=cols[i],lwd=lwds)
x.sel <- setdiff(x.sel,i)
}
else
{
lines(xseq,t.dat[,m.names[i]]+i*sf,col="black",lwd=lwds)
#lines(xseq,t.dat[,m.names[i]]+i*sf,col="white",lwd=2,lty=2)
x.sel <- union(x.sel,i)
}
}
if(click.i == none.i)
{
x.sel <- NULL
for(i in 1:length(m.names))
{
lines(xseq,t.dat[,m.names[i]]+i*sf,col=cols[i],lwd=lwds)
}
}
if(click.i == all.i)
{
x.sel <- seq(1,length(m.names))
for(i in 1:length(m.names))
{
lines(xseq,t.dat[,m.names[i]]+i*sf,col="black",lwd=lwds)
}
}
}
dev.off()
return(m.names[x.sel])
}
}
#TraceSelectLarge takes a large list of traces
#subsets it and passes each on to Trace select
TraceSelectLarge <- function(t.dat,snr=NULL,m.names,wr,levs=NULL,lmain="",subset.n=10,rtag=NULL){
sel.names <- NULL
s <- ceiling(length(m.names)/subset.n)
for(i in 1:s)
{
x1 <- (i-1)*subset.n+1
x2 <- min(length(m.names),x1+subset.n)
x.sel <- TraceSelect(t.dat,snr,m.names[x1:x2],wr,levs,lmain,rtag[x1:x2])
sel.names <- union(sel.names,x.sel)
}
return(sel.names)
}
LinesStack <- function(dat,m.names,lmain="",levs=NULL, plot.new=TRUE,bcex=.7, sf=.2, subset.n=5){
if(plot.new){dev.new(width=10,height=6)}
if(length(m.names)>subset.n){
t.dat<-dat$t.dat
wr<-dat$w.dat[,2]
if(is.null(levs)){levs <- setdiff(unique(as.character(dat$w.dat[,"wr1"])),"")}
else{levs<-levs}
m.names <- intersect(m.names,names(t.dat))
hbc <- subset.n*sf+max(t.dat[,m.names])
xseq <- t.dat[,1]
library(RColorBrewer)
par(mar=c(4,2,4,4))
hbc <- (subset.n*(.8*sf))+max(t.dat[,m.names])
#ylim <- c(-.1,2.5)
ylim<-c(-.1,hbc)
plot(xseq,t.dat[,m.names[1]],ylim=ylim,xlab="Time (min)",main=lmain,type="n", xaxt="n",xlim=c(min(xseq)-1.5,max(xseq)+25))#-sf
axis(1, at=seq(floor(min(t.dat[,1])),ceiling(max(t.dat[,1])), 1))
## Tool for adding window region labeling
if(length(wr) > 0){
#levs <- setdiff(unique(as.character(dat$w.dat[,"wr1"])),"")
x1s <- tapply(xseq,as.factor(wr),min)[levs]
x2s <- tapply(xseq,as.factor(wr),max)[levs]
y1s <- rep(min(ylim)-.2,length(x1s))
y2s <- rep(max(ylim)+.2,length(x1s))
rect(x1s,y1s,x2s,y2s,col="grey90",border="black")
text(dat$t.dat[match(levs,wr),"Time"],rep(c(-.05, abs(min(ylim))),length=length(levs)),levs,cex=bcex,offset=0, pos=4)#,offset=-offs}
}
blc<-dat$blc
## Tool for adding line and point plot for all lines
#matlines(xseq, blc[,m.names], col=rgb(0,0,0,3, maxColorValue=100), lwd=.01)
#matpoints(xseq, blc[,m.names], col=rgb(0,0,0,3, maxColorValue=100), pch=16, cex=.03)
#cols <- rainbow(length(m.names),start=.55)
library(cluster)
blc<-dat$blc
pam5 <- pam(t(blc[,m.names]),k=subset.n)
s.names <- row.names(pam5$medoids)
pam5.tab <- table(pam5$clustering)
#tags <- paste(paste("#",names(pam5.tab),sep=""),as.vector(pam5.tab),sep=":")
group.means<-list()
group.names<-list()
for(i in 1:subset.n){
x.names<-names(which(pam5$clustering==i, arr.ind=T))
group.names[[i]]<-x.names
group.means[i]<-paste(
round(mean(dat$c.dat[x.names, "area"]), digits=0),"\u00b1",round(sd(dat$c.dat[x.names, "area"]), digits=0)," : ",
round(mean(dat$c.dat[x.names, "mean.gfp"]), digits=0),"\u00b1",round(sd(dat$c.dat[x.names, "mean.gfp"]), digits=0)," : ",
round(mean(dat$c.dat[x.names, "mean.tritc"]), digits=0),"\u00b1",round(sd(dat$c.dat[x.names, "mean.tritc"]), digits=0), sep="")
# adding standard deviation,"\u00b1",round(sd(dat$c.dat[x.names, "area"]), digits=0)," : ",
}
tags <- paste(as.vector(pam5.tab),":",group.means)
info<-pam5$clustering
## Tool For adding color to selected Traces
cols <-brewer.pal(8,"Dark2")
cols <- rep(cols,ceiling(length(s.names)/length(cols)))
cols <- cols[1:length(s.names)]
## Tool for adding labeling for single line within stacked traces
for(i in 1:length(s.names)){
lines(xseq, blc[,s.names[i]]+i*sf, col=cols[i], lwd=.2)
points(xseq, blc[,s.names[i]]+i*sf, col=cols[i], pch=16, cex=.02)
matlines(xseq, blc[,names(which(info==i, arr.ind=T))]+i*sf, col=rgb(0,0,0,50, maxColorValue=100), lwd=.01)
text(x=min(blc[,1]), y=blc[nrow(t.dat),s.names[i]]+i*sf, labels=s.names[i], col=cols[i], pos=2, cex=bcex)
text(x=max(blc[,1]), y=blc[nrow(t.dat),s.names[i]]+i*sf, labels=tags[i], col=cols[i], pos=4, cex=bcex)
}
#return(pam5$clustering)
return(group.names)
}
}
### Best Linesstack Yet
# stack traces accorinding to # of groups defined
# uses pam clustering
# and bp.func2
LinesStack.2.1 <- function(dat,m.names=NULL,lmain="",levs=NULL, plot.new=TRUE,bcex=.7, sf=.1, subset.n=5, img=NULL, cols=NULL,bp.param=NULL){
graphics.off()
if(is.null(img)){img<-dat$img1}
if(is.null(m.names)){m.names<-dat$c.dat$id}
if(plot.new){dev.new(width=10,height=6)}
if(length(m.names)>subset.n){
t.dat<-dat$t.dat
wr<-dat$w.dat[,2]
if(is.null(levs)){levs <- setdiff(unique(as.character(dat$w.dat[,"wr1"])),"")}
else{levs<-levs}
m.names <- intersect(m.names,names(t.dat))
hbc <-max(t.dat[,m.names])*subset.n *.643
xseq <- t.dat[,1]
library(RColorBrewer)
par(mar=c(4,2,4,4),bty="l")
#hbc <- (subset.n*(.8*sf))+max(t.dat[,m.names])
#ylim <- c(-.1,2.5)
#ylim<-c(-.1,hbc)
ylim<-c(0,subset.n+subset.n*sf)
plot(xseq,t.dat[,m.names[1]],ylim=ylim,xlab="Time (min)",main=lmain,type="n", xaxt="n",xlim=c(min(xseq)-1.5,max(xseq)+25))#-sf
axis(1, at=seq(floor(min(t.dat[,1])),ceiling(max(t.dat[,1])), 1))
## Tool for adding window region labeling
if(length(wr) > 0){
levs <- setdiff(unique(as.character(dat$w.dat[,"wr1"])),"")
x1s <- tapply(xseq,as.factor(wr),min)[levs]
#x2s <- tapply(xseq,as.factor(wr),max)[levs]
#y1s <- rep(min(ylim)-.2,length(x1s))
#y2s <- rep(max(ylim)+.2,length(x1s))
#rect(x1s,y1s,x2s,y2s,col="grey90",border="black")
}
abline(v=x1s)
text(dat$t.dat[match(levs,wr),"Time"]+.5,rep(c(-.05, abs(min(ylim)),abs(min(ylim))+.1),length=length(levs)),levs,cex=bcex,offset=0, pos=4)#,offset=-offs}
blc<-dat$blc
## Tool for adding line and point plot for all lines
#matlines(xseq, blc[,m.names], col=rgb(0,0,0,3, maxColorValue=100), lwd=.01)
#matpoints(xseq, blc[,m.names], col=rgb(0,0,0,3, maxColorValue=100), pch=16, cex=.03)
#cols <- rainbow(length(m.names),start=.55)
library(cluster)
blc<-dat$blc
pam5 <- pam(t(blc[,m.names]),k=subset.n)
s.names <- row.names(pam5$medoids)
pam5.tab <- table(pam5$clustering)
#tags <- paste(paste("#",names(pam5.tab),sep=""),as.vector(pam5.tab),sep=":")
group.means<-list()
group.names<-list()
for(i in 1:subset.n){
x.names<-names(which(pam5$clustering==i, arr.ind=T))
group.names[[i]]<-x.names
group.means[i]<-paste(
round(mean(dat$c.dat[x.names, "area"]), digits=0),"\u00b1",round(sd(dat$c.dat[x.names, "area"]), digits=0)," : ",
round(mean(dat$c.dat[x.names, "mean.gfp"]), digits=0),"\u00b1",round(sd(dat$c.dat[x.names, "mean.gfp"]), digits=0)," : ",
round(mean(dat$c.dat[x.names, "mean.tritc"]), digits=0),"\u00b1",round(sd(dat$c.dat[x.names, "mean.tritc"]), digits=0), sep="")
# adding standard deviation,"\u00b1",round(sd(dat$c.dat[x.names, "area"]), digits=0)," : ",
}
tags <- paste(as.vector(pam5.tab),":",group.means)
info<-pam5$clustering
## Tool For adding color to selected Traces
if(is.null(cols)){
cols <-brewer.pal(8,"Dark2")
cols <- rep(cols,ceiling(length(s.names)/length(cols)))
cols <- cols[1:length(s.names)]
}else{cols<-rep(cols, length(m.names))}
## Tool for adding labeling for single line within stacked traces
par(xpd=T)
for(i in 1:length(s.names)){
if(length(group.names[[i]])>=2){
matlines(xseq, blc[,group.names[[i]]]+i+sf, col=rgb(0,0,0,20, maxColorValue=100), lwd=.01)
lines(xseq, apply(blc[,group.names[[i]]],1,mean)+i+sf, col=cols[i], lwd=.2)
points(xseq, apply(blc[,group.names[[i]]],1,mean)+i+sf, col=cols[i], pch=16, cex=.02)
text(x=min(blc[,1]), y=blc[nrow(t.dat),s.names[i]]+i+sf, labels=i, col=cols[i], pos=2, cex=bcex)
text(x=max(blc[,1]), y=blc[nrow(t.dat),s.names[i]]+i+sf, labels=tags[i], col=cols[i], pos=4, cex=bcex)
}else{
lines(xseq, blc[,group.names[[i]]]+i+sf, col=cols[i], lwd=.2)
points(xseq, blc[,group.names[[i]]]+i+sf, col=cols[i], pch=16, cex=.02)
text(x=min(blc[,1]), y=blc[nrow(t.dat),s.names[i]]+i+sf, labels=i, col=cols[i], pos=2, cex=bcex)
text(x=max(blc[,1]), y=blc[nrow(t.dat),s.names[i]]+i+sf, labels=tags[i], col=cols[i], pos=4, cex=bcex)
}
}
par(xpd=F)
# Tool for adding boxplot
par(xpd=T)
dev.current<-dev.cur()
if(is.null(bp.param)){
dat.select<-"c.dat"
bp.param<-c(
grep("area",names(dat$c.dat),value=T),
#tryCatch(grep("mean.gfp",names(dat$c.dat)),error=function(e) NULL),
grep("mean.gfp",names(dat$c.dat),value=T),
grep("mean.tritc",names(dat$c.dat),value=T))
cols<-c("blue", "darkgreen","red")
#}else{
# dat.select<-select.list(names(dat))
# bp.param<-as.character(select.list(names(dat[[dat.select]]), multiple=T))
# cols<-NULL
# }
}else{
dat.select<-"c.dat"
bp.param<-bp.param}
for(i in 1:length(s.names)){
xleft<-max(blc[,1])+xinch(.3)
xright<-xleft+xinch(1)*length(bp.param)
y<-(blc[nrow(t.dat),group.names[[i]]]+i+sf)
ybottom<- y-yinch(.5)
ytop<-y+yinch(.5)
bp.img<-bpfunc.3(dat,group.names[[i]],dat.select, bp.param, print.out=T, cols=cols, bcex=bcex)
dev.set(dev.current)
rasterImage(bp.img,xleft, ybottom, xright, ytop)
}
continue<-select.list(c("yes", "no"))
if(continue=="yes"){
while(i!=length(s.names)+1){
i<-scan(n=1)
if(i>length(s.names)| i==0){i<-length(s.names)+1
}else{LinesEvery.5(dat,sample(names(which(info==i, arr.ind=T)))[1:15], img=NULL, pic.plot=T, sf=.3, lmain=i,m.order="area")}
#multi.pic.zoom(dat, names(which(info==i, arr.ind=T)), img, plot.new=T)
}
}
}
else{LinesEvery.5(dat, m.names,img)}
#return(pam5$clustering)
return(group.names)
}
##170109
#intereact: LOGICAL;
#TRUE select cell groups to work though and return list of groups of cells
#FALSE only plot out the groups, and dont return group of cells
##region.group: Select a region to group the cells around. Brings up option to select region to group around
#170403 bp logical: lets you choose whether to boxplot
#170508 Allows to select the trace you would like to use for grouping with option:
#t.type:input character
#170605: Adding a drop function to this. It will automatically update the RD.file. I need something to drop cells much faster
#
LinesStack.2<- function(dat,m.names=NULL,t.type=NULL,lmain="", interact=T, region.group=T,levs=NULL, plot.new=TRUE,bcex=.7, sf=1.1, subset.n=NULL, img=NULL,bp.param=NULL, bp=F, bp.pts=F){
#graphics.off()
#
if(is.null(img)){img<-dat$img1}
# If a list of cells is not input, then look at all cells
if(is.null(m.names)){
dropped.cells<-cellzand(dat$bin, "drop",1)
m.names<-setdiff(dat$c.dat$id, dropped.cells)
}else{
dropped.cells<-cellzand(dat$bin, "drop",1)
m.names<-setdiff(m.names, dropped.cells)
}
if(is.null(subset.n)){subset.n<-as.numeric(select.list(as.character(c(5,10,15,20,25,30))))}
if(plot.new){
if(subset.n>=10){
dev.new(width=14,height=10)
}
else{dev.new(width=14,height=10)}
linesstack.win<-dev.cur()
}
if(length(m.names)>subset.n){
if(is.null(t.type)){t.dat<-dat$t.dat}
else{t.dat<-dat[[t.type]]}
wr<-dat$w.dat[,2]
if(is.null(levs)){levs <- setdiff(unique(as.character(dat$w.dat[,"wr1"])),"")}
else{levs<-levs}
m.names <- intersect(m.names,names(t.dat))
hbc <-(max(t.dat[,m.names])+subset.n)*sf
#hbc <- (subset.n*(.8*sf))+max(t.dat[,m.names])
xseq <- t.dat[,1]
library(RColorBrewer)
par(mai=c(2,1,1,1))
ylim<-c(-.1,hbc)
#ylim <- c(-.1,2.5)
plot(xseq,
t.dat[,m.names[1]],
ylim=ylim,
xlab="",
ylab='',
main=lmain,
type="n",
xlim=c(min(xseq)-1.5,max(xseq)+10),
bty='n'
)#-sf
#axis(1, at=seq(floor(min(t.dat[,1])),ceiling(max(t.dat[,1])), 1))
## Tool for adding window region labeling
if(length(wr) > 0){
#levs <- setdiff(unique(as.character(dat$w.dat[,"wr1"])),"")
x1s <- tapply(xseq,as.factor(wr),min)[levs]
x2s <- tapply(xseq,as.factor(wr),max)[levs]
y1s <- par('usr')[3]
y2s <- par('usr')[4]
rect(x1s,y1s,x2s,y2s,col="grey90",border="black")
levs.loc<-tapply(dat$w.dat[,"Time"],as.factor(wr),mean)[levs]
par(xpd=T)
text(levs.loc, par("usr")[3] - yinch(.5),levs,cex=bcex, srt=90)
}
## Tool for adding line and point plot for all lines
#matlines(xseq, blc[,m.names], col=rgb(0,0,0,3, maxColorValue=100), lwd=.01)
#matpoints(xseq, blc[,m.names], col=rgb(0,0,0,3, maxColorValue=100), pch=16, cex=.03)
#cols <- rainbow(length(m.names),start=.55)
library(cluster)
## To select data within the experiment to group around
if(region.group){
dev.new(width=15, height=10)
LinesEvery.5(dat, sample(dat$c.dat$id)[1:5], plot.new=F, lmain="Click to Select region to Groups Cells", t.type="t.dat", img="img1")
#LinesEvery.4(dat, sample(row.names(dat$c.dat)[1:5]), plot.new=F, lmain="Click to Select region to Groups Cells", img=dat$img1)
b.xseq<-locator(n=2, type="o", pch=15, col="red")$x
dev.off()
x.min<-which(abs(t.dat$Time-b.xseq[1])==min(abs(t.dat$Time-b.xseq[1])))
x.max<-which(abs(t.dat$Time-b.xseq[2])==min(abs(t.dat$Time-b.xseq[2])))
pam5 <- pam(t(t.dat[x.min:x.max,m.names]),k=subset.n)
s.names <- row.names(pam5$medoids)
pam5.tab <- table(pam5$clustering)
#tags <- paste(paste("#",names(pam5.tab),sep=""),as.vector(pam5.tab),sep=":")
group.means<-list()
group.names<-list()
for(i in 1:subset.n){
x.names<-names(which(pam5$clustering==i, arr.ind=T))
group.names[[i]]<-x.names
group.means[i]<-paste(
tryCatch(round(mean(dat$c.dat[x.names, "area"]), digits=0),error=function(e) NULL),
"\u00b1",
tryCatch(round(sd(dat$c.dat[x.names, "area"]), digits=1),error=function(e) NULL))#," : ",
#tryCatch(round(mean(dat$c.dat[x.names, "mean.gfp"]), digits=0),"\u00b1",round(sd(dat$c.dat[x.names, "mean.gfp"]), digits=0),error=function(e) NULL)," : ",
#tryCatch(round(mean(dat$c.dat[x.names, "mean.tritc"]), digits=0),"\u00b1",round(sd(dat$c.dat[x.names, "mean.tritc"]), digits=0),error=function(e) NULL), sep="")
#tryCatch(round(sd(dat$c.dat[x.names, "area"]), digits=0),"\u00b1",error=function(e) NULL)
}
}else{
library(cluster)
pam5 <- pam(t(t.dat[,m.names]),k=subset.n)
s.names <- row.names(pam5$medoids)
pam5.tab <- table(pam5$clustering)
#tags <- paste(paste("#",names(pam5.tab),sep=""),as.vector(pam5.tab),sep=":")
group.means<-list()
group.names<-list()
for(i in 1:subset.n){
x.names<-names(which(pam5$clustering==i, arr.ind=T))
group.names[[i]]<-x.names
group.means[i]<-paste(
tryCatch(round(mean(dat$c.dat[x.names, "area"]), digits=0),error=function(e) NULL),
"\u00b1",
tryCatch(round(sd(dat$c.dat[x.names, "area"]), digits=1),error=function(e) NULL))
#round(mean(dat$c.dat[x.names, "mean.gfp"]), digits=0),"\u00b1",round(sd(dat$c.dat[x.names, "mean.gfp"]), digits=0)," : ",
#round(mean(dat$c.dat[x.names, "mean.tritc"]), digits=0),"\u00b1",round(sd(dat$c.dat[x.names, "mean.tritc"]), digits=0)
#adding standard deviation
#"\u00b1",round(sd(dat$c.dat[x.names, "area"]), digits=0), sep="")
}
}
tags <- paste(as.vector(pam5.tab),":",group.means)
info<-pam5$clustering
## Tool For adding color to selected Traces
cols <-brewer.pal(8,"Dark2")
cols <- rep(cols,ceiling(length(s.names)/length(cols)))
cols <- cols[1:length(s.names)]
## Tool for adding labeling for single line within stacked traces
par(xpd=T)
dev.set(which=linesstack.win)
for(i in 1:length(s.names)){
if(length(group.names[[i]])>=2){
matlines(xseq, (t.dat[,group.names[[i]]]+i)*sf, col=rgb(0,0,0,20, maxColorValue=100), lwd=.01)
lines(xseq, apply(t.dat[,group.names[[i]]],1,mean)+i*sf, col=cols[i], lwd=.2)
points(xseq, apply(t.dat[,group.names[[i]]],1,mean)+i*sf, col=cols[i], pch=16, cex=.02)
text(x=min(t.dat[,1]), y=t.dat[1,s.names[i]]+i*sf, labels=i, col=cols[i], pos=2, cex=bcex)
text(x=max(t.dat[,1]), y=t.dat[nrow(dat$t.dat),s.names[i]]+i*sf, labels=tags[i], col=cols[i], pos=4, cex=bcex)
}else{
lines(xseq, t.dat[,group.names[[i]]]+i*sf, col=cols[i], lwd=.2)
points(xseq, t.dat[,group.names[[i]]]+i*sf, col=cols[i], pch=16, cex=.02)
text(x=min(t.dat[,1]), y=t.dat[1,s.names[i]]+i*sf, labels=i, col=cols[i], pos=2, cex=bcex)
text(x=max(t.dat[,1]), y=t.dat[nrow(dat$t.dat),s.names[i]]+i*sf, labels=tags[i], col=cols[i], pos=4, cex=bcex)
}
}
if(region.group){
text(mean(b.xseq),
par('usr')[4]+yinch(3),
"Cluster Region",
col='blue',
font=2,
cex=2
)
par(xpd=F)
abline(v=b.xseq, col="blue", lwd=2)
}else{}
par(xpd=F)
#### Tool for adding boxplot
if(bp){
par(xpd=T)
dev.current<-dev.cur()
if(is.null(bp.param)){
#dat.select<-"c.dat"
#bp.param<-c(
#grep("area",names(dat$c.dat),value=T),
##tryCatch(grep("mean.gfp",names(dat$c.dat)),error=function(e) NULL),
#grep("mean.gfp",names(dat$c.dat),value=T),
#grep("mean.tritc",names(dat$c.dat),value=T))
#cols<-c("blue", "darkgreen","red")
#}else{
dat.select<-select.list(names(dat))
bp.param<-as.character(select.list(names(dat[[dat.select]]), multiple=T))
cols<-NULL
}else{
dat.select<-"c.dat"
bp.param<-bp.param
}
#for(i in 1:length(group.names)){
#if(length(group.names[[i]])>5){
# xleft<-max(t.dat[,1])+xinch(.3)
# xright<-xleft+xinch(1)*length(bp.param)
# y<-(apply(t.dat[nrow(t.dat),group.names[[i]]],1,mean)+i)*sf
# ybottom<- y-yinch(.5)
# ytop<-y+yinch(.5)
# bp.img<-bpfunc.3(dat,group.names[[i]],dat.select, bp.param, print.out=T, cols=cols, bcex=bcex)
# dev.set(dev.current)
# rasterImage(bp.img,xleft, ybottom, xright, ytop)
#}else{}
#170509 How to create a new window with these boxplots
dev.new(width=length(bp.param), height=subset.n)
bp.win<-dev.cur()
par(mfrow=c(subset.n,1))
group.names.rev<-rev(group.names)
for(i in 1:length(group.names.rev)){
par(mar=c(0,0,0,0))
plot(0,0)
dim<-par("usr")
xleft<-par("usr")[1]
xright<-par("usr")[2]
ybottom<- par("usr")[3]
ytop<-par("usr")[4]
bp.img<-bpfunc.3(dat,group.names.rev[[i]],dat.select, bp.param, print.out=T, cols=cols, bcex=bcex, bp.pts=bp.pts)
dev.set(bp.win)
rasterImage(bp.img,xleft, ybottom, xright, ytop)
text(xleft+xinch(.1), 0, subset.n-i+1, cex=2)
}
}
}
if(interact){
continue<-select.list(c("yes", "no"))
if(continue=="yes"){
while(i!=length(s.names)+1){
i<-scan(n=1)
if(i>length(s.names)| i==0){i<-length(s.names)+1}
else{
assesment.selection<-select.list(c("Trace.Click","LinesEvery","LinesStack", "drop"))
if(assesment.selection=="Trace.Click"){
Trace.Click.dev(dat,names(which(info==i, arr.ind=T)))
}
if(assesment.selection=="LinesEvery"){
number.to.display<-as.numeric(select.list(as.character(c(5,10,20))))
LinesEvery.5(dat,sample(names(which(info==i, arr.ind=T)))[1:number.to.display], img, pic.plot=T, lmain=i,m.order="area", plot.new=T, col="black")
}
if(assesment.selection=="LinesStack"){
LinesStack.2(dat,names(which(info==i, arr.ind=T)),bp=F,lmain=i, interact=T, region.group=T,levs=NULL, plot.new=TRUE,bcex=.7, img=dat$img1, t.type="mp.1")
}
if(assesment.selection=="drop"){
rd.namels2 <- as.character(substitute(dat))
dat$bin[names(which(info==i, arr.ind=T)), "drop"]<-1
assign(rd.namels2, dat, envir=.GlobalEnv)
print(paste("You Dropped Group",i))
}
}
}
}
#return(pam5$clustering)
}
#dev.off(which=linesstack.win)
return(group.names)
}
# Stacked Traces,
# Input is a list of cells
# Currently Created for the 5 cell classes of,
# +ib4+cgrp, +IB4, +CGRP, -/-, glia
LinesStack.3 <- function(dat,cells=NULL,lmain="",levs=NULL, plot.new=TRUE,bcex=.7, sf=.9, img=NULL, sample.num=NULL){
graphics.off()
if(is.null(img)){img<-dat$img1}
if(is.null(sample.num)){sample.num<-10}
if(is.null(cells)){
cells<-dat$cells
cells<-cells[c('000','00','01','10','11')]
}else{
cells.main<-dat$cells
cells.main<-cells.main[c('000','00','01','10','11')]
bob<-list()
for(i in 1:length(cells.main)){
x.names<-intersect(cells,cells.main[[i]])
bob[[i]]<-x.names
}
cells<-bob
names(cells)<-c('000','00','01','10','11')
}
#cells<-cells[c('000','00','01','10','11')]
if(plot.new){dev.new(width=10,height=6)}
t.dat<-dat$t.dat
wr<-dat$w.dat[,2]
if(is.null(levs)){levs <- setdiff(unique(as.character(dat$w.dat[,"wr1"])),"")}
else{levs<-levs}
#m.names <- intersect(m.names,names(t.dat))
xseq <- t.dat[,1]
library(RColorBrewer)
par(mar=c(4,2,4,4), bty="L")
#hbc <- (5*(.8*sf))+max(t.dat[,Reduce(c,stack(cells)[1])])
#hbc <- 5*sf+max(t.dat[,Reduce(c,stack(cells)[1])])
ylim <- c(.5,5.2)
#ylim<-c(-.1,hbc)
plot(xseq,t.dat[,cells[[1]][1]],ylim=ylim,xlab="Time (min)",main=lmain,type="n", xaxt="n",xlim=c(min(xseq), max(xseq)*1.5))#-sf
axis(1, at=seq(floor(min(t.dat[,1])),ceiling(max(t.dat[,1])), 1))
## Tool for adding window region labeling
if(length(wr) > 0){
#levs <- setdiff(unique(as.character(dat$w.dat[,"wr1"])),"")
x1s <- tapply(xseq,as.factor(wr),min)[levs]
x2s <- tapply(xseq,as.factor(wr),max)[levs]
y1s <- rep(min(ylim),length(x1s))*1.03-rep(min(ylim),length(x1s))
y2s <- rep(max(ylim),length(x1s))*1.03
rect(x1s,y1s,x2s,y2s,col="grey90",border="black")
text(dat$w.dat[match(levs,wr),"Time"],rep(c(.5,.6,.7),length=length(levs)),levs,cex=.6,offset=0, pos=4)#,offset=-offs}
}
blc<-dat$blc
## Tool for creating mean and st.dev calculation
library(cluster)
blc<-dat$blc
group.means<-list()
group.names<-list()
for(i in 1:length(cells)){
if(length(cells[[i]])>1){
x.names<-cells[[i]]
group.names[[i]]<-names(cells[i])
group.means[i]<-paste(
length(cells[[i]]),":",
round(mean(dat$c.dat[x.names, "area"]), digits=0),"\u00b1",round(sd(dat$c.dat[x.names, "area"]), digits=0)," : ",
round(mean(dat$c.dat[x.names, "mean.gfp"]), digits=0),"\u00b1",round(sd(dat$c.dat[x.names, "mean.gfp"]), digits=0)," : ",
round(mean(dat$c.dat[x.names, "mean.tritc"]), digits=0),"\u00b1",round(sd(dat$c.dat[x.names, "mean.tritc"]), digits=0), sep="")
#adding standard deviation,"\u00b1",round(sd(dat$c.dat[x.names, "area"]), digits=0)," : ",
}
else{
x.names<-cells[[i]]
group.names[[i]]<-names(cells[i])
group.means[i]<-paste(
length(cells[[i]]))
}}
## Tool For adding color to selected Traces
cols <-brewer.pal(8,"Dark2")
cols <- rep(cols,5)
cols <- cols[1:5]
cols<-c("mediumpurple1","goldenrod1", "firebrick1", "limegreen", "steelblue3")
## Tool for adding labeling for single line within stacked traces
for(i in 1:length(cells)){
if(length(cells[[i]])>1){
matlines(xseq, blc[,cells[[i]]]+i*sf, col=rgb(0,0,0,20, maxColorValue=100), lwd=.3)
lines(xseq, apply(blc[,cells[[i]]],1,mean)+i*sf, col=cols[i], lwd=1.2)
text(x=min(blc[,1]), y=blc[nrow(t.dat),cells[[i]]]+i*sf, labels=group.names[i], col=cols[i], pos=2, cex=bcex)
text(x=max(blc[,1]), y=blc[nrow(t.dat),cells[[i]]]+i*sf, labels=group.means[i], col="black", pos=4, cex=bcex)
}
else{
lines(xseq, blc[,cells[[i]]]+i*sf, col=rgb(0,0,0,20, maxColorValue=100), lwd=.3)
text(x=min(blc[,1]), y=blc[nrow(t.dat),cells[[i]]]+i*sf, labels=group.names[i], col=cols[i], pos=2, cex=bcex)
}}
## Tool for adding boxplot to plot
dev.current<-dev.cur()
for(i in 1:length(cells)){
xleft<-max(blc[,1])*1.05
xright<-xleft+xinch(2.74)
y<-(blc[nrow(t.dat),cells[[i]]]+i*sf)
ybottom<- y-.55
ytop<-ybottom+yinch(.85)
#dev.set(dev.list()[length(dev.list())])
bp.img<-bpfunc.2(dat,cells[[i]])
dev.set(dev.current)
rasterImage(bp.img,xleft, ybottom, xright, ytop)
}
continue<-select.list(c("yes", "no"))
if(continue=="yes"){
i<-1
while(i!=00){
i<-scan(n=1)
cells.tp<-cells[[i]]
LinesEvery.4(dat,sample(cells.tp)[1:15], img, pic.plot=T, sf=.3, lmain=i,m.order="area")
#multi.pic.zoom(dat, names(which(info==i, arr.ind=T)), img, plot.new=T)
}
#for(i in 1:length(cells)){
# if(length(cells[[i]])<20){
# LinesEvery.4(dat,cells[[i]], img, pic.plot=T, lmain=names(cells[i]), m.order="area", levs=levs, sf=.6)
# }
# else{
# # select the range of
# sample.num<-ceiling(sample.num/2)
# cells.n<-sort(c(ceiling(seq(1,length(cells[[i]]), length.out=5)),ceiling(seq(1,length(cells[[i]]), length.out=5))+1))
# cells.rs<-c.sort(dat$c.dat[cells[[i]],], "area")
# LinesEvery.4(dat, cells.rs[cells.n],img, lmain=names(cells[i]), m.order="area",levs=levs, sf=.4)}
# #multi.pic.zoom(dat, names(which(info==i, arr.ind=T)), img, plot.new=T)
#}
}
#else{LinesEvery.4(dat, m.names,img)}
#return(pam5$clustering)
#return(group.names)
#print(group.means)
}
LinesStack.4 <- function(dat,cells=NULL,lmain="",levs=NULL, plot.new=TRUE,bcex=.7, sf=.9, img=NULL, sample.num=NULL){
if(is.null(img)){img<-dat$img.gtd}
if(is.null(sample.num)){sample.num<-10}
if(is.null(cells)){cells<-dat$cells}
else{
cells.main<-dat$cells
cells.main<-cells.main[c('000','00','01','10','11')]
bob<-list()
for(i in 1:length(cells.main)){
x.names<-intersect(cells,cells.main[[i]])
bob[[i]]<-x.names
}
cells<-bob
names(cells)<-c('000','00','01','10','11')
}
#cells<-cells[c('000','00','01','10','11')]
if(plot.new){dev.new(width=10,height=6)}
t.dat<-dat$t.dat
wr<-dat$w.dat[,2]
if(is.null(levs)){levs <- setdiff(unique(as.character(dat$w.dat[,"wr1"])),"")}
else{levs<-levs}
#m.names <- intersect(m.names,names(t.dat))
xseq <- t.dat[,1]
library(RColorBrewer)
par(mar=c(4,2,4,4), bty="L")
#hbc <- (5*(.8*sf))+max(t.dat[,Reduce(c,stack(cells)[1])])
#hbc <- 5*sf+max(t.dat[,Reduce(c,stack(cells)[1])])
ylim <- c(.5,5.2)
#ylim<-c(-.1,hbc)
plot(xseq,t.dat[,cells[[1]][1]],ylim=ylim,xlab="Time (min)",main=lmain,type="n", xaxt="n",xlim=c(min(xseq), max(xseq)*1.5))#-sf
axis(1, at=seq(floor(min(t.dat[,1])),ceiling(max(t.dat[,1])), 1))
## Tool for adding window region labeling
if(length(wr) > 0){
#levs <- setdiff(unique(as.character(dat$w.dat[,"wr1"])),"")
x1s <- tapply(xseq,as.factor(wr),min)[levs]
x2s <- tapply(xseq,as.factor(wr),max)[levs]
y1s <- rep(min(ylim),length(x1s))*1.03-rep(min(ylim),length(x1s))
y2s <- rep(max(ylim),length(x1s))*1.03
rect(x1s,y1s,x2s,y2s,col="grey90",border="black")
text(dat$t.dat[match(levs,wr),"Time"],rep(c(.5,.6,.7),length=length(levs)),levs,cex=.6,offset=0, pos=4)#,offset=-offs}
}
blc<-dat$blc
## Tool for creating mean and st.dev calculation
library(cluster)
blc<-dat$blc
group.means<-list()
group.names<-list()
for(i in 1:length(cells)){
if(length(cells[[i]])>1){
x.names<-cells[[i]]
group.names[[i]]<-names(cells[i])
group.means[i]<-paste(
length(cells[[i]]),":",
round(mean(dat$c.dat[x.names, "area"]), digits=0),"\u00b1",round(sd(dat$c.dat[x.names, "area"]), digits=0)," : ",
round(mean(dat$c.dat[x.names, "mean.gfp"]), digits=0),"\u00b1",round(sd(dat$c.dat[x.names, "mean.gfp"]), digits=0)," : ",
round(mean(dat$c.dat[x.names, "mean.tritc"]), digits=0),"\u00b1",round(sd(dat$c.dat[x.names, "mean.tritc"]), digits=0), sep="")
#adding standard deviation,"\u00b1",round(sd(dat$c.dat[x.names, "area"]), digits=0)," : ",
}
else{
x.names<-cells[[i]]
group.names[[i]]<-names(cells[i])
group.means[i]<-paste(
length(cells[[i]]))
}}
## Tool For adding color to selected Traces
cols <-brewer.pal(8,"Dark2")
cols <- rep(cols,5)
cols <- cols[1:5]
cols<-c("mediumpurple1","goldenrod1", "firebrick1", "limegreen", "steelblue3")
## Tool for adding labeling for single line within stacked traces
for(i in 1:length(cells)){
if(length(cells[[i]])>1){
matlines(xseq, blc[,cells[[i]]]+i*sf, col=rgb(0,0,0,10, maxColorValue=100), lwd=.3)
lines(xseq, apply(blc[,cells[[i]]],1,mean)+i*sf, col=cols[i], lwd=1.2)
text(x=min(blc[,1]), y=blc[nrow(t.dat),cells[[i]]]+i*sf, labels=group.names[i], col=cols[i], pos=2, cex=bcex)
text(x=max(blc[,1]), y=blc[nrow(t.dat),cells[[i]]]+i*sf, labels=group.means[i], col="black", pos=4, cex=bcex)
}
else{
lines(xseq, blc[,cells[[i]]]+i*sf, col=rgb(0,0,0,80, maxColorValue=100), lwd=.3)
text(x=min(blc[,1]), y=blc[nrow(t.dat),cells[[i]]]+i*sf, labels=group.names[i], col=cols[i], pos=2, cex=bcex)
}}
## Tool for adding boxplot to plot
for(i in 1:length(cells)){
xleft<-max(blc[,1])*1.05
xright<-xleft+xinch(2.74)
y<-(blc[nrow(t.dat),cells[[i]]]+i*sf)
ybottom<- y-.55
ytop<-ybottom+yinch(.85)
#dev.set(dev.list()[length(dev.list())])
rasterImage(bpfunc.2(dat,cells[[i]]),xleft, ybottom, xright, ytop)
}
continue<-select.list(c("yes", "no"))
if(continue=="yes"){
for(i in 1:length(cells)){
if(length(cells[[i]])<20){
LinesEvery.4(dat,cells[[i]], img, pic.plot=T, lmain=names(cells[i]), m.order="area", levs=levs, sf=.6)
}
else{
# select the range of
sample.num<-ceiling(sample.num/2)
cells.n<-sort(c(ceiling(seq(1,length(cells[[i]]), length.out=5)),ceiling(seq(1,length(cells[[i]]), length.out=5))+1))
cells.rs<-c.sort(dat$c.dat[cells[[i]],], "area")
LinesEvery.4(dat, cells.rs[cells.n],img, lmain=names(cells[i]), m.order="area",levs=levs, sf=.4)}
#multi.pic.zoom(dat, names(which(info==i, arr.ind=T)), img, plot.new=T)
}
}
#else{LinesEvery.4(dat, m.names,img)}
#return(pam5$clustering)
return(group.names)
print(group.means)
}
bpfunc<-function(dat,n.names){
if(length(n.names)>4){
#par(width=12, height=4.5)
par(mfrow=c(2,3))
par(mar=c(2.5,2.5,2.5,2.5))
par(cex=.8)
dat.names<-names(dat$c.dat)
#lab.1<-grep("gfp.1",dat.names,ignore.case=T, value=T)
#lab.2<-grep("gfp.2",dat.names, ignore.case=T, value=T)
#lab.3<-grep("tritc",dat.names, ignore.case=T, value=T)
#lab.4<-grep("area",dat.names, ignore.case=T, value=T)
#if(dat$c.dat[n.names, lab.1]!="N/A"){lab.1<-lab.1}
#else{rm(lab.1)}
#if(dat$c.dat[n.names, lab.2]!="N/A"){}
#if(dat$c.dat[n.names, lab.3]!="N/A"){ }
#if(dat$c.dat[n.names, lab.4]!="N/A"){}
##Color intensity 1
boxplot(dat$c.dat[n.names,"mean.gfp"],main="GFP",bty="n",ylim=c(0,max(dat$c.dat["mean.gfp"])), col="springgreen4", outline=F)
text(x=jitter(rep(1, length(dat$c.dat[n.names,"mean.gfp"])), factor=10),
y=dat$c.dat[n.names,"mean.gfp"],
labels=as.character(dat$c.dat[n.names,"id"]))
#Color Intensity 2
boxplot(dat$c.dat[n.names,"mean.tritc"],main="IB4",ylim=c(0,max(dat$c.dat["mean.tritc"])), col="firebrick4", outline=F)
text(x=jitter(rep(1, length(dat$c.dat[n.names,"mean.tritc"])), factor=10),
y=dat$c.dat[n.names,"mean.tritc"],
labels=as.character(dat$c.dat[n.names,"id"]))
# area
boxplot(dat$c.dat[n.names,"area"],main="Area",ylim=c(0,max(dat$c.dat["area"])), col="lightslateblue", outline=F)
text(x=jitter(rep(1, length(dat$c.dat[n.names,"area"])), factor=10),
y=dat$c.dat[n.names,"area"],
labels=as.character(dat$c.dat[n.names,"id"]))
##Color intensity 1 log
boxplot(1+dat$c.dat[n.names,"mean.gfp"],main="GFP",bty="n", col="springgreen4", outline=T, log="y")
text(x=jitter(rep(1, length(dat$c.dat[n.names,"mean.gfp"])), factor=10),
y=1+dat$c.dat[n.names,"mean.gfp"],
labels=as.character(dat$c.dat[n.names,"id"]))
#Color Intensity 2 log
boxplot(1+dat$c.dat[n.names,"mean.tritc"],main="IB4", col="firebrick4", outline=T, log="y")
text(x=jitter(rep(1, length(dat$c.dat[n.names,"mean.tritc"])), factor=10),
y=1+dat$c.dat[n.names,"mean.tritc"],
labels=as.character(dat$c.dat[n.names,"id"]))
# area log
boxplot(1+dat$c.dat[n.names,"area"],main="Area", col="lightslateblue", outline=T, log="y")
text(x=jitter(rep(1, length(dat$c.dat[n.names,"area"])), factor=10),
y=1+dat$c.dat[n.names,"area"],
labels=as.character(dat$c.dat[n.names,"id"]))
dev.set(dev.list()[1])}
else{
par(mfrow=c(1,3))
par(mar=c(2,2,2,2))
stripchart(dat$c.dat[n.names,"mean.gfp"],main="GFP",ylim=c(0,max(dat$c.dat["mean.gfp"])),cex=2, col=c("green4"), outline=T, vertical=T, pch=".")
text(x=1,
y=dat$c.dat[n.names,"mean.gfp"],
labels=as.character(dat$c.dat[n.names,"id"]), col="green4")
stripchart(dat$c.dat[n.names,"mean.tritc"],main="IB4",ylim=c(0,max(dat$c.dat["mean.tritc"])), ,cex=2,col="red", outline=F, vertical=T, pch=".")
text(x=1,
y=dat$c.dat[n.names,"mean.tritc"],
labels=as.character(dat$c.dat[n.names,"id"]), col="red")
stripchart(dat$c.dat[n.names,"area"],main="Area",ylim=c(0,max(dat$c.dat["area"])), ,cex=2,col="lightslateblue", outline=F, vertical=T, pch=".")
text(x=1,
y=dat$c.dat[n.names,"area"],
labels=as.character(dat$c.dat[n.names,"id"]), col="lightslateblue")
dev.set(dev.list()[5])}
}
bpfunc.2<-function(dat,n.names, bp.pts=T){
require(png)
png('tmp.png', width=2.74, height=.85, units="in", res=200)
#dev.new(width=2.74, height=1)
if(length(n.names)>4){
par(mfrow=c(1,3),mar=c(1,3,2,0), bty="n",lwd=1, lty=1, cex.axis=.8, cex=.6)
dat.names<-names(dat$c.dat)
#lab.1<-grep("gfp.1",dat.names,ignore.case=T, value=T)
#lab.2<-grep("gfp.2",dat.names, ignore.case=T, value=T)
#lab.3<-grep("tritc",dat.names, ignore.case=T, value=T)
#lab.4<-grep("area",dat.names, ignore.case=T, value=T)
#if(dat$c.dat[n.names, lab.1]!="N/A"){lab.1<-lab.1}
#else{rm(lab.1)}
#if(dat$c.dat[n.names, lab.2]!="N/A"){}
#if(dat$c.dat[n.names, lab.3]!="N/A"){ }
#if(dat$c.dat[n.names, lab.4]!="N/A"){}
##Color intensity 1
boxplot(dat$c.dat[n.names,"mean.gfp"],main="GFP",
ylim=c(min(dat$c.dat["mean.gfp"]),max(dat$c.dat["mean.gfp"])), col="springgreen4", outline=F,yaxt="n", boxwex=.8, medlwd=.4,whisklty=1)
if(bp.pts==T){stripchart(dat$c.dat[n.names,"mean.gfp"], add=T, method="jitter", vertical=T, jitter=.2, pch=18, cex=.7)}
mtext(paste(round(mean(dat$c.dat[n.names, "mean.gfp"]), digits=3),"\u00b1",round(sd(dat$c.dat[n.names, "mean.gfp"]), digits=3)),1, cex=.5)
#text(x=jitter(rep(1, length(dat$c.dat[n.names,"mean.gfp"])), factor=10),
#y=dat$c.dat[n.names,"mean.gfp"],
#labels=as.character(dat$c.dat[n.names,"id"]), cex=.4)
axis(2, at=c(round(min(dat$c.dat["mean.gfp"]), digits=3),round(max(dat$c.dat["mean.gfp"]), digits=3)))#,labels=x, col.axis="red", las=2)
box("figure")
#Color Intensity 2
boxplot(dat$c.dat[n.names,"mean.tritc"],main="IB4",
ylim=c(min(dat$c.dat["mean.tritc"]),max(dat$c.dat["mean.tritc"])), col="red", outline=F, boxwex=.8, yaxt="n", medlwd=.4,whisklty=1)
if(bp.pts==T){stripchart(dat$c.dat[n.names,"mean.tritc"], add=T, method="jitter", vertical=T, jitter=.2, pch=18, cex=.7)}
#text(x=jitter(rep(1, length(dat$c.dat[n.names,"mean.tritc"])), factor=10),
#y=dat$c.dat[n.names,"mean.tritc"],
#labels=as.character(dat$c.dat[n.names,"id"]), cex=.4)
mtext(paste(round(mean(dat$c.dat[n.names, "mean.tritc"]), digits=3),"\u00b1",round(sd(dat$c.dat[n.names, "mean.tritc"]), digits=3)),1, cex=.5)
axis(2, at=c(round(min(dat$c.dat["mean.tritc"]), digits=3),round(max(dat$c.dat["mean.tritc"]), digits=3)))#,labels=x, col.axis="red", las=2)
box("figure")
# area
boxplot(dat$c.dat[n.names,"area"],main="Area",
ylim=c(min(dat$c.dat["area"]),max(dat$c.dat["area"])), col="lightslateblue", outline=F, boxwex=.8, yaxt="n", medlwd=.4,whisklty=1)
if(bp.pts==T){stripchart(dat$c.dat[n.names,"area"], add=T, method="jitter", vertical=T, jitter=.2, pch=18, cex=.7)}
#text(x=jitter(rep(1, length(dat$c.dat[n.names,"mean.tritc"])), factor=10),
#y=dat$c.dat[n.names,"mean.tritc"],
#labels=as.character(dat$c.dat[n.names,"id"]), cex=.4)
mtext(paste(round(mean(dat$c.dat[n.names, "area"]), digits=0),"\u00b1",round(sd(dat$c.dat[n.names, "mean.tritc"]), digits=0)),1, cex=.5)
axis(2, at=c(round(min(dat$c.dat["area"]), digits=0),round(max(dat$c.dat["area"]), digits=0)))#,labels=x, col.axis="red", las=2)
box("figure")
}
else{
par(mfrow=c(1,3))
par(mar=c(2,2,2,2))
stripchart(dat$c.dat[n.names,"mean.gfp"],main="GFP",ylim=c(0,max(dat$c.dat["mean.gfp"])),cex=1, col=c("green4"), outline=T, vertical=T, pch=".")
text(x=1,
y=dat$c.dat[n.names,"mean.gfp"],
labels=as.character(dat$c.dat[n.names,"id"]), col="green4", cex=.8)
box("figure")
stripchart(dat$c.dat[n.names,"mean.tritc"],main="IB4",ylim=c(0,max(dat$c.dat["mean.tritc"])),cex=1,col="red", outline=T, vertical=T, pch=".")
text(x=1,
y=dat$c.dat[n.names,"mean.tritc"],
labels=as.character(dat$c.dat[n.names,"id"]), col="red", cex=.8)
box("figure")
stripchart(dat$c.dat[n.names,"area"],main="Area",ylim=c(0,max(dat$c.dat["area"])),cex=1,col="lightslateblue", outline=T, vertical=T, pch=".")
text(x=1,
y=dat$c.dat[n.names,"area"],
labels=as.character(dat$c.dat[n.names,"id"]), col="lightslateblue", cex=.8)
box("figure")
}
dev.off()
tmp.png <- png::readPNG("tmp.png")
dim(tmp.png)
unlink("tmp.png")
return(tmp.png)
}
bpfunc.3<-function(dat,n.names=NULL, dat.select=NULL, parameters=NULL,bp.pts=F, print.out=F, plot.new=T,bcex=NULL, ylim=NULL, cols=NULL){
if(class(dat)=="list"){
if(is.null(dat.select)){dat.select<-select.list(names(dat))}else{dat.select<-dat.select}
dat<-dat[[dat.select]]
}else{dat<-dat}
require(png)
#dev.new(width=2.74, height=1)
if(is.null(n.names)){n.names<-dat$c.dat$id}else{n.names<-n.names}
if(is.null(parameters)){parameters<-select.list(names(dat), multiple=T)
}else{parameters<-parameters}
if(length(parameters)>6){width=ceiling(sqrt(length(parameters)));height=ceiling(sqrt(length(parameters)))
}else{width=length(parameters);height=1}
if(print.out){png('tmp.png', width=width*1.5, height=height*1.5, units="in", res=200,type="cairo")
}else{if(plot.new){dev.new(width=width*1.5, height=height*1.5)}}
if(is.null(bcex)){bcex<-.8}else{bcex<-bcex}
if(is.null(cols)){cols<-"blue";cols<-(rep(cols, length(parameters)))}else{cols<-cols}
par(mfrow=c(height,width),mar=c(1,3,3,0), bty="n",lwd=1, lty=1, cex.axis=.8, cex=.6)
#loop through selected parameters, and applies it to selected dataframe for dat list
if(length(n.names)>4){
for(i in 1:length(parameters)){
if(is.null(ylim)){ylim.i<-c(min(dat[,parameters[i]]),max(dat[,parameters[i]]))
}else{ylim.i<-ylim}
main.name<-strsplit(parameters[i], "[.]")[[1]]
boxplot(dat[n.names,parameters[i]],main=paste(main.name, collapse=" "),
ylim=ylim.i, col=cols[i], outline=F,yaxt="n", boxwex=.8, medlwd=.4,whisklty=1, cex=bcex)
if(bp.pts==T){stripchart(dat[n.names,parameters[i]], add=T, method="jitter", vertical=T, jitter=.2, pch=18, cex=.5)
}else{
text(x=jitter(rep(1, length(dat[n.names,parameters[i]])), factor=10),
y=dat[n.names,parameters[i]],
labels=as.character(row.names(dat[n.names,])), cex=bcex,
col=rgb(0,0,0,4,maxColorValue=10))
}
mtext(paste(round(mean(dat[n.names, parameters[i]]), digits=3),"\u00b1",round(sd(dat[n.names, parameters[i]]), digits=3)),1, cex=bcex)
axis(2, at=c(min(ylim.i),max(ylim.i)), cex=bcex)#,labels=x, col.axis="red", las=2)
box("figure")
}
}else{
for(i in 1:length(parameters)){
if(is.null(ylim)){ylim.i<-c(min(dat[,parameters[i]]),max(dat[,parameters[i]]))
}else{ylim.i<-ylim}
main.name<-strsplit(parameters[i], "[.]")[[1]]
stripchart(dat[n.names,parameters[i]],main=paste(main.name, collapse=" "),ylim=ylim.i,cex=1, col=c("green4"), outline=T, vertical=T, pch=".")
text(x=1,
y=dat[n.names,parameters[i]],
labels=as.character(dat[n.names,"id"]), col=cols[i], cex=2)
box("figure")
}
}
if(print.out){
dev.off()
tmp.png <- png::readPNG("tmp.png")
dim(tmp.png)
unlink("tmp.png")
return(tmp.png)
}
}
# New Boxplot Function
# dat : Experiment list RD.
# l.cells: Cells in a list format
# dat.name: Dataframe to pull data from
# col.name: collumn name to get data for the boxplot
# jitter.f: Factor of jitter to accomplish
# pts: points to add to boxplot
# notchs: logical (T/F) stand for notch selection
#c("area","mean.gfp.start","mean.cy5.start")
boxplotlist<-function(dat,l.cells=NULL,dat.name="c.dat",col.name=NULL,jitter.f=.5,pts=T, notchs=F, bplog="y", sort=T){
#back up operation to fill with cells for the boxplotting
if(is.null(l.cells)){
l.cells<-dat$cell.types
}else{
l.cells<-l.cells
}
l.cells <- l.cells[lengths(l.cells)>0]
if(is.null(dat.name)){
dat.name<-"c.dat"
}else{
dat.name<-dat.name
}
if(is.null(col.name)){
col.name<-select.list(names(dat[[dat.name]]), multiple=T)
}else{
col.name<-col.name
}
#Create a blank list to fill with information
l.info<-list()
l.cell.types<-names(l.cells)
l.cell.types<-select.list(l.cell.types,multiple=T)
#First create a boxplot to get the median statistics
#but first use a for loop to gather the data needed
for(i in 1:length(l.cell.types)){
l.info[[ l.cell.types[i] ]]<-dat[[dat.name]][l.cells[[ l.cell.types[i] ]],col.name[1]]
}
#open a window
dev.new()
bp.stats<-boxplot(l.info)#plot the boxplot and assign it to an object ot gather stats
colnames(bp.stats$stats)<-bp.stats$names #rename the collumn in the stats portion
#reorder the data based on the median measure and gather the cell names
if(sort==T){
l.cell.types<-colnames(bp.stats$stats)[order(bp.stats$stats[3,], decreasing=T)]
}
dev.off()#tunr off window
#now create an empty list to refill with data
l.info<-list()
#once again regather the data
for(i in l.cell.types){
l.info[[i]]<-dat[[dat.name]][l.cells[[ i ]],c("id",col.name)]
}
# Now begin createing a dataframe to creata boxplot that can be intereacted with based on clicking
l.cell.types<-names(l.info)
bp.width<-vector()
for(i in 1:length(l.cell.types)){
l.info[[i]]["xplot"]<-jitter(rep(i,length(l.cells[[ l.cell.types[i] ]])),jitter.f)
l.info[[i]]["cell.type"]<-l.cell.types[i]
l.info[[i]]["cell.type.total"]<-length(l.cells[[ l.cell.types[i] ]])
l.info[[i]]["cell.type.total.cb"]<-paste(l.cell.types[i],":",length(l.cells[[ l.cell.types[i] ]]),sep=" ")
bp.width[i]<-length(l.cells[[ l.cell.types[i] ]])
}
#reduce the list into a dataframe
bp.df<-Reduce(rbind,l.info)
#Make the collum of cell types has a levels input for the boxplot below
#this will allow it to be plotted based on above ordering
bp.df$cell.type.total.cb<-ordered(bp.df$cell.type.total.cb,levels=unique(as.character(bp.df$cell.type.total.cb)))
#now Boxplot
dev.new(width=8, height=(3*length(col.name)))
par(mfrow=c(length(col.name),1), bty="l")
for(i in 1:length(col.name)){
boxplot(get(col.name[i])~cell.type.total.cb, data=bp.df, varwidth=T,las=2, lwd=1.5,lty=1, outline=T, log=bplog, notch=notchs,main=tools::toTitleCase(gsub("\\.", " ", col.name[i])))
if(pts){
text(bp.df[,"xplot"], bp.df[,col.name[i]], "*", cex=.5)
#text(bp.df[,"xplot"], bp.df[,col.name[i]], bp.df[,"id"], cex=.5)
}else{}
}
bp.sel<-select.list(col.name, title="Select a Bp")
windows(width=12, height=6,xpos=0, ypos=10)
bp.win<-dev.cur()
windows(width=14,height=4,xpos=0, ypos=540)
click.window<-dev.cur()
dev.set(bp.win)
par(mai=c(2,1,1,1), bty="l")
final.bp<-boxplot(get(bp.sel)~cell.type.total.cb, data=bp.df, varwidth=T,las=2, cex=.8, lwd=1.5,lty=1, outline=T, log=bplog, notch=notchs,main=tools::toTitleCase(gsub("\\.", " ", bp.sel)))
text(bp.df[,"xplot"], bp.df[,bp.sel], bp.df[,"id"], cex=.5, col=rgb(0,0,0,15,maxColorValue=100))
xreg<-par("usr")[1]
yreg<-par("usr")[2]
#points(xreg+xinch1)
i<-identify(bp.df[,"xplot"], bp.df[,bp.sel], labels=bp.df[,"id"], n=1)
ret.list <- NULL
while(length(i) > 0){
cell.i<-bp.df[i,"id"]
dev.set(click.window)
PeakFunc7(dat,cell.i,t.type="mp.1")
dev.set(bp.win)
#i<-identify(bp.df[,"xplot"], bp.df[,bp.sel], labels=bp.df[,"id"], n=1)
i<-identify(bp.df[,"xplot"], bp.df[,bp.sel],labels="", n=1)
}
return(list(l.cell.types=l.cell.types,final.bp=final.bp, bp.df=bp.df))
}
LinesStack.select <- function(dat,m.names,lmain="",levs=NULL, plot.new=TRUE,bcex=.8, sf=.2, subset.n=5){
t.dat<-dat$t.dat
wr<-dat$w.dat[,2]
if(is.null(levs)){levs <- setdiff(unique(as.character(dat$w.dat[,"wr1"])),"")}
else{levs<-levs}
m.names <- intersect(m.names,names(t.dat))
hbc <- subset.n*sf+max(t.dat[,m.names])
xseq <- t.dat[,1]
if(plot.new){dev.new(width=10,height=6)}
library(RColorBrewer)
par(mar=c(4,2,4,4))
#ylim <- c(-.1,1.4)
ylim<-c(-.1,hbc)
plot(xseq,t.dat[,m.names[1]],ylim=ylim,xlab="Time (min)",main=lmain,type="n", xaxt="n",xlim=c(min(xseq)-1.5,max(xseq)+1.5))#-sf
axis(1, at=seq(floor(min(t.dat[,1])),ceiling(max(t.dat[,1])), 1))
## Tool for adding window region labeling
if(length(wr) > 0){
#levs <- setdiff(unique(as.character(dat$w.dat[,"wr1"])),"")
x1s <- tapply(xseq,as.factor(wr),min)[levs]
x2s <- tapply(xseq,as.factor(wr),max)[levs]
y1s <- rep(min(ylim)-.2,length(x1s))
y2s <- rep(max(ylim)+.2,length(x1s))
rect(x1s,y1s,x2s,y2s,col="grey90",border="black")
text(dat$t.dat[match(levs,wr),"Time"],rep(c(abs(min(ylim)), abs(min(ylim*1.5))),length=length(levs)),levs,cex=bcex,offset=0, pos=4)#,offset=-offs}
}
blc<-dat$blc
## Tool for adding line and point plot for all lines
#matlines(xseq, blc[,m.names], col=rgb(0,0,0,3, maxColorValue=100), lwd=.01)
#matpoints(xseq, blc[,m.names], col=rgb(0,0,0,3, maxColorValue=100), pch=16, cex=.03)
#cols <- rainbow(length(m.names),start=.55)
library(cluster)
blc<-dat$blc
pam5 <- pam(t(blc[,m.names]),k=subset.n)
s.names <- row.names(pam5$medoids)
pam5.tab <- table(pam5$clustering)
tags <- paste(paste("#",names(pam5.tab),sep=""),as.vector(pam5.tab),sep=":")
info<-pam5$clustering
## Tool For adding color to selected Traces
cols <-brewer.pal(8,"Dark2")
cols <- rep(cols,ceiling(length(s.names)/length(cols)))
cols <- cols[1:length(s.names)]
## Tool for adding labeling for single line within stacked traces
for(i in 1:length(s.names)){
matlines(xseq, blc[,names(which(info==i, arr.ind=T))]+i*sf, col=rgb(0,0,0,10, maxColorValue=100), lwd=.01)
lines(xseq, blc[,s.names[i]]+i*sf, col=cols[i], lwd=.5)
points(xseq, blc[,s.names[i]]+i*sf, col=cols[i], pch=16, cex=.03)
text(x=min(blc[,1]), y=blc[nrow(t.dat),s.names[i]]+i*sf, labels=s.names[i], col=cols[i], pos=2, cex=bcex)
text(x=max(blc[,1]), y=blc[nrow(t.dat),s.names[i]]+i*sf, labels=tags[i], col=cols[i], pos=4, cex=bcex)
}
return(pam5$clustering)
}
Lines.Multi<-function(dat,n.names){
dev.new(width=2, height=2)
par(mar=c(0,0,0,0))
plot(0,0, pch=NA, xlim=c(0,2), ylim=c(0,2))
points(x=c(1,1), y=c(1.5,1), pch=15)
text(x=c(1,1), y=c(1.5,1), c("next", "off"), pos=2)
dev.new()
click.i<-0
i<-1
while(click.i!=2){
dev.set(dev.list()[2])
LinesEvery.2(dat,n.names[i:(10+i)], m.order="area", plot.new=F)
dev.set(dev.list()[1])
click.i<-identify(x=c(1,1), y=c(1.5,1), n=1)
if(click.i==1){i<-i+10}
}
graphics.off()
}
linesmean<-function(dat, x.names,t.type=NULL, ylim=NULL, bcex=NULL, cols=NULL,lmain=NULL, lines.all=T, pic.plot=F){
if(is.null(ylim)){ylim<-c(0,1.5)}else{ylim<-ylim}
if(is.null(bcex)){bcex<-.9}else{bcex<-bcex}
if(is.null(cols)){cols<-"red"}else{cols<-cols}
if(is.null(t.type)){t.type<-select.list(names(dat))
}else{t.type<-t.type}
dat.t<-dat[[t.type]]
dev.new(width=10,height=4)
x.mean<-apply(dat.t[,x.names],1,mean)
xseq<-dat$blc[,1]
plot(xseq, x.mean, col="white", lwd=.2, ylim=ylim,main=lmain)
levs <- setdiff(unique(as.character(dat$w.dat[,"wr1"])),"")
wr<-dat$w.dat$wr1
x1s <- tapply(xseq,as.factor(wr),min)[levs]
x2s <- tapply(xseq,as.factor(wr),max)[levs]
y1s <- rep(par("usr")[3],length(x1s))
y2s <- rep(par("usr")[4],length(x1s))
rect(x1s,y1s,x2s,y2s,col="grey90",border="black")
par(xpd=T)
text(dat$w.dat[match(levs,wr),"Time"],rep(c(par("usr")[3]-yinch(.4),par("usr")[3]-yinch(.65)),length=length(levs)),levs,cex=bcex,offset=0, pos=4)#,offset=-offs}
if(lines.all){
matlines(xseq, dat.t[,x.names], col=rgb(0,0,0,20, maxColorValue=100), lwd=.01)
}
lines(xseq, x.mean, col=cols, lwd=.2)
points(xseq, x.mean, col=cols, pch=16, cex=.02)
if(pic.plot){
cell.view()}
}
PulseViewer<-function(dat, cell, window.min=NULL, select.trace="t.dat"){
if(class(select.trace)=="character"){
dat.select<-select.trace
dat.t<-dat[[dat.select]]
}
else{
dat.select<-menu(names(dat))
dat.t<-dat[[dat.select]]
}
window.region<-select.list(setdiff(unique(dat$w.dat$wr1),""))
if(is.null(window.min)){window.min=7
}else{window.min<-window.min}
#What is the time frame from w.dat?
window.time<-dat$w.dat[which(dat$w.dat$wr1==window.region,arr.ind=T,useNames=T),"Time"]
#What is the maximun window defined
window.max<-min(window.time)+window.min
#what is the actual value
window.region<-row.names(dat$w.dat[which(dat$w.dat$Time>=window.min & dat$w.dat$Time<=window.max, useNames=T),"Time"])
plot(dat.t[window.region,"Time"], dat.t[window.region, cell])
dat.t[,c]~dat.t[,1]
}
#Display the analysis of a single trace
#dat is the trace dataframe with "Time" in the first column and cell trace intensities in subsequent columns
#i is the index column to be analyzed and displayed.
#shws is the smoothing half window size
#Plotit is a flag indicating that the results should be ploted or not.
#wr is the response window factor
#SNR.lim is the signal to noise ratio limit for peak detection
#bl.meth is the method for baseline correction.
PeakFunc2 <- function(dat,i,shws=2,phws=20,Plotit=F,wr=NULL,SNR.lim=2,bl.meth="TopHat",lmain=NULL){
library("MALDIquant")
s1 <- createMassSpectrum(dat[,"Time"],dat[,i])
if(shws > 1)
s3 <- smoothIntensity(s1, method="SavitzkyGolay", halfWindowSize=shws)
else
s3 <- s1
if(Plotit)
{
bSnip <- estimateBaseline(s3, method="SNIP")
bTopHat <- estimateBaseline(s3, method="TopHat")
}
s4 <- removeBaseline(s3, method=bl.meth)
Baseline <- estimateBaseline(s3, method=bl.meth)
p <- detectPeaks(s4, method="MAD", halfWindowSize=phws, SNR=SNR.lim)
if(Plotit)
{
xlim <- range(mass(s1)) # use same xlim on all plots for better comparison
ylim <- c(-.1,1.4)
# ylim <- range(intensity(s1))
plot(s1, main=paste(lmain,i),xlim=xlim,ylim=ylim,xlab="Time (min)", xaxt="n")
axis(1, at=seq(0, length(dat[,1]), 5))
if(length(wr) > 0)
{
levs <- setdiff(unique(wr),"")
levs <- setdiff(levs,grep("blank",levs,value=T))
x1s <- tapply(dat[,"Time"],as.factor(wr),min)[levs]
x2s <- tapply(dat[,"Time"],as.factor(wr),max)[levs]
y1s <- rep(min(ylim)-.2,length(x1s))
y2s <- rep(max(ylim)+.2,length(x1s))
# cols <- rainbow(length(x1s))
rect(x1s,y1s,x2s,y2s,col="lightgrey")
# points(dat[,"Time"],as.integer(wr=="")*-1,pch=15,cex=.6)
## for(j in levs)
## {
## x1 <- mass(s3)[min(grep(j,wr))]
## x2 <- mass(s3)[max(grep(j,wr))]
## y1 <- min(ylim)-.2
## y2 <- max(ylim)+.2
## polygon(c(x1,x2,x2,x1),c(y1,y1,y2,y2),col="lightgrey",lwd=.1)
## }
text(dat[match(levs,wr),"Time"],rep(-.1,length(levs)),levs,pos=4,offset=0,cex=.5)
}
lines(s3,lwd=3,col="cyan")
lines(s1)
lines(bSnip, lwd=2, col="red")
lines(bTopHat, lwd=2, col="blue")
lines(s4,lwd=2)
}
if((length(p) > 0)&Plotit)
{
points(p)
## label top 40 peaks
top40 <- intensity(p) %in% sort(intensity(p), decreasing=TRUE)[1:40]
labelPeaks(p, index=top40, underline=TRUE,labels=round(snr(p)[top40],2))
}
return(list(peaks=p,baseline=Baseline,dat=s4))
}
PeakFunc3 <- function(dat,n.names,shws=2,phws=20,wr=NULL,SNR.lim=2,bl.meth="TopHat",lmain=NULL){
xlim <- range(dat$t.dat[,1]) # use same xlim on all plots for better comparison
ylim <- c(-.1,1.4)
# ylim <- range(intensity(s1))
levs<-setdiff(unique(as.character(dat$w.dat[,"wr1"])),"")
plot(dat$t.dat[,n.names],dat$t.dat[,1], main=paste(lmain,n.names),xlim=xlim,ylim=ylim,xlab="", xaxt="n",pch=16, lwd=1, cex=.5)
axis(1, at=seq(0, length(dat$t.dat[,1]), 5))
lines(dat$t.dat[,n.names]~dat$t.dat[,1])
points(dat$t.dat[,n.names]~dat$t.dat[,1], pch=16, cex=.4)
lines(dat$blc[,n.names]~dat$t.dat[,1], lwd=1, cex=.5)
points(dat$blc[,n.names]~dat$t.dat[,1], pch=16, cex=.4)
# Tool for labeling window regions
if(is.null(wr)){
wr<-dat$w.dat[,"wr1"]
levs<-setdiff(unique(as.character(dat$w.dat[,"wr1"])),"")
x1s <- tapply(dat$t.dat[,"Time"],as.factor(wr),min)[levs]
x2s <- tapply(dat$t.dat[,"Time"],as.factor(wr),max)[levs]
y1s <- rep(min(ylim)-.2,length(x1s))
y2s <- rep(max(ylim)+.2,length(x1s))
rect(x1s,y1s,x2s,y2s,col="grey95")
text(dat$t.dat[match(levs,wr),"Time"],rep(-.1,length(levs)),levs,pos=4,offset=0,cex=.5)
}
# Tool for labeling the binary score
if(length(levs)>0){
levs<-setdiff(unique(as.character(dat$w.dat[,"wr1"])),"")
z<-t(dat$bin[n.names,levs])
zz<-z==1
zi<-attributes(zz)
zzz<-which(zz, arr.ind=T)
#levs<-zi$dimnames[[2]][zzz[,2]]
levs<-unique(as.character(row.names(zzz)))
x1s <- tapply(dat$t.dat[,"Time"],as.factor(wr),min)[levs]
x2s <- tapply(dat$t.dat[,"Time"],as.factor(wr),max)[levs]
y1s <- rep(min(ylim)-.2,length(x1s))
y2s <- rep(max(ylim)+.2,length(x1s))
rect(x1s,y1s,x2s,y2s,col="grey69")
levs <- setdiff(unique(wr),"")
text(dat$t.dat[match(levs,wr),"Time"],rep(-.1,length(levs)),levs,pos=4,offset=0,cex=.5)
}
# Tool for labeling cellular aspects, gfp.1, gfp.2, tritc, area
legend("topright", xpd=TRUE, inset=c(0,-.14), legend=c(
if(!is.null(dat$c.dat[n.names, "CGRP"])){paste("CGRP","",round(dat$c.dat[n.names,"CGRP"],digits=0))},
if(!is.null(dat$c.dat[n.names, "mean.gfp"])){paste("GFP","",round(dat$c.dat[n.names,"mean.gfp"],digits=0))},
if(!is.null(dat$c.dat[n.names, "mean.gfp.2"])){paste("GFP.2","",round(dat$c.dat[n.names,"mean.gfp.2"],digits=0))},
if(!is.null(dat$c.dat[n.names, "IB4"])){paste("IB4","",round(dat$c.dat[n.names,"IB4"],digits=0))},
if(!is.null(dat$c.dat[n.names, "mean.tritc"])){paste("IB4","",round(dat$c.dat[n.names, "mean.tritc"], digits=0))},
if(!is.null(dat$c.dat[n.names, "area"])){paste("area","", round(dat$c.dat[n.names, "area"], digits=0))})
,bty="n", cex=.8)
# Tool for lableing window region information
x.name<-n.names
levs<-setdiff(unique(as.character(dat$w.dat[,"wr1"])), "")
levs.loc<-tapply(dat$t.dat[,"Time"],as.factor(wr),mean)[levs]
mtext(c("snr", "tot", "max", "wm"), side=1, at=-1, line=c(1.4, 2.1, 2.8, 3.5), cex=.6)
for(i in levs){
snr.name<-grep(paste(i,".snr", sep=""), names(dat$scp), value=T)
tot.name<-grep(paste(i,".tot", sep=""), names(dat$scp), value=T)
max.name<-grep(paste(i,".max", sep=""), names(dat$scp), value=T)
wm.name<-grep(paste(i,".wm", sep=""), names(dat$scp), value=T)
snr.val<-round(dat$scp[x.name, snr.name], digits=1)
tot.val<-round(dat$scp[x.name, tot.name], digits=2)
max.val<-round(dat$scp[x.name, max.name], digits=2)
wm.val<-round(dat$scp[x.name, wm.name], digits=1)
mtext(snr.val, side=1, at=levs.loc[i], line=1.4, cex=.6)
mtext(tot.val, side=1, at=levs.loc[i], line=2.1, cex=.6)
mtext(max.val, side=1, at=levs.loc[i], line=2.8, cex=.6)
mtext(wm.val, side=1, at=levs.loc[i], line=3.5, cex=.6)
}
}
PeakFunc4 <- function(dat,n.names,Plotit.maldi=T,Plotit.der=T,lmain=NULL){
par(mfrow=c(2,1))
if(Plotit.der)
{
ylim<-c(-1, 2)
plot(dat$der[,n.names]~dat$t.dat[-1,1], ylim=ylim,type="l",ylab=expression(paste(Delta," (340/380)/time")),xlab="",main=paste("Derivative",n.names), xaxt="n",pch=16, lwd=1, cex=.5)
# Tool for labeling window regions
wr<-dat$w.dat[,"wr1"]
levs<-setdiff(unique(as.character(dat$w.dat[,"wr1"])),"")
x1s <- tapply(dat$t.dat[,"Time"],as.factor(wr),min)[levs]
x2s <- tapply(dat$t.dat[,"Time"],as.factor(wr),max)[levs]
y1s <- rep(min(ylim)-.2,length(x1s))
y2s <- rep(max(ylim)+.2,length(x1s))
rect(x1s,y1s,x2s,y2s,col="grey95")
text(dat$t.dat[match(levs,wr),"Time"],rep(-1,length(levs)),levs,pos=4,offset=0,cex=.5)
# Tool for labeling the binary score
if(length(levs)>0){
levs<-setdiff(unique(as.character(dat$w.dat[,"wr1"])),"")
z<-t(dat$bin[n.names,levs])
zz<-z==1
zi<-attributes(zz)
zzz<-which(zz, arr.ind=T)
#levs<-zi$dimnames[[2]][zzz[,2]]
levs<-unique(as.character(row.names(zzz)))
x1s <- tapply(dat$t.dat[,"Time"],as.factor(wr),min)[levs]
x2s <- tapply(dat$t.dat[,"Time"],as.factor(wr),max)[levs]
y1s <- rep(min(ylim)-.2,length(x1s))
y2s <- rep(max(ylim)+.2,length(x1s))
rect(x1s,y1s,x2s,y2s,col="grey69")
levs <- setdiff(unique(wr),"")
text(dat$t.dat[match(levs,wr),"Time"],rep(-1,length(levs)),levs,pos=4,offset=0,cex=.5)
}
# Tool for lableing window region information
x.name<-n.names
levs<-setdiff(unique(as.character(dat$w.dat[,"wr1"])), "")
levs.loc<-tapply(dat$t.dat[,"Time"],as.factor(wr),mean)[levs]
mtext(c("tot", "max", "min", "wmax", "wmin"), side=1, at=-1, line=c(0.7,1.4, 2.1, 2.8, 3.5), cex=.6)
for(i in levs){
tot.name<-grep(paste(i,".der.tot", sep=""), names(dat$scp), value=T)
max.name<-grep(paste(i,".der.max", sep=""), names(dat$scp), value=T)
min.name<-grep(paste(i,".der.min", sep=""), names(dat$scp), value=T)
wmax.name<-grep(paste(i,".der.wmax", sep=""), names(dat$scp), value=T)
wmin.name<-grep(paste(i,".der.wmin", sep=""), names(dat$scp), value=T)
tot.val<-round(dat$scp[x.name, tot.name], digits=2)
max.val<-round(dat$scp[x.name, max.name], digits=2)
min.val<-round(dat$scp[x.name, min.name], digits=2)
wmax.val<-round(dat$scp[x.name, wmax.name], digits=2)
wmin.val<-round(dat$scp[x.name, wmin.name], digits=2)
mtext(tot.val, side=1, at=levs.loc[i], line=0.7, cex=.6)
mtext(max.val, side=1, at=levs.loc[i], line=1.4, cex=.6)
mtext(min.val, side=1, at=levs.loc[i], line=2.1, cex=.6)
mtext(wmax.val, side=1, at=levs.loc[i], line=2.8, cex=.6)
mtext(wmin.val, side=1, at=levs.loc[i], line=3.5, cex=.6)
}
lines(dat$der[,n.names]~dat$t.dat[-1,1], lwd=.01, col="black")
abline(h=0.5)
#axis(1, at=seq(0, length(dat$t.dat[,1]), 5))
}
if(Plotit.maldi)
{
xlim <- range(dat$t.dat[,1]) # use same xlim on all plots for better comparison
ylim <- c(0,1.4)
# ylim <- range(intensity(s1))
levs<-setdiff(unique(as.character(dat$w.dat[,"wr1"])),"")
plot(dat$t.dat[,n.names]~dat$t.dat[,1], main=paste(lmain,n.names),xlim=xlim,ylim=ylim,xlab="", ylab="(340/380)", xaxt="n",pch=16, lwd=1, cex=.5)
axis(1, at=seq(0, length(dat$t.dat[,1]), 5))
# Tool for labeling window regions
wr<-dat$w.dat[,"wr1"]
levs<-setdiff(unique(as.character(dat$w.dat[,"wr1"])),"")
x1s <- tapply(dat$t.dat[,"Time"],as.factor(wr),min)[levs]
x2s <- tapply(dat$t.dat[,"Time"],as.factor(wr),max)[levs]
y1s <- rep(min(ylim)-.2,length(x1s))
y2s <- rep(max(ylim)+.2,length(x1s))
rect(x1s,y1s,x2s,y2s,col="grey95")
#text(dat$t.dat[match(levs,wr),"Time"],rep(-.1,length(levs)),levs,pos=4,offset=0,cex=.5)
# Tool for labeling the binary score
levs<-setdiff(unique(as.character(dat$w.dat[,"wr1"])),"")
z<-t(dat$bin[n.names,levs])
zz<-z==1
zi<-attributes(zz)
zzz<-which(zz, arr.ind=T)
#levs<-zi$dimnames[[2]][zzz[,2]]
levs<-unique(as.character(row.names(zzz)))
x1s <- tapply(dat$t.dat[,"Time"],as.factor(wr),min)[levs]
x2s <- tapply(dat$t.dat[,"Time"],as.factor(wr),max)[levs]
y1s <- rep(min(ylim)-.2,length(x1s))
y2s <- rep(max(ylim)+.2,length(x1s))
rect(x1s,y1s,x2s,y2s,col="grey69")
levs <- setdiff(unique(wr),"")
text(dat$t.dat[match(levs,wr),"Time"],rep(-1,length(levs)),levs,pos=4,offset=0,cex=.5)
# Tool for labeling cellular aspects, gfp.1, gfp.2, tritc, area
legend("topright", xpd=TRUE, inset=c(0,-.14), legend=c(
if(!is.null(dat$c.dat[n.names, "CGRP"])){paste("CGRP","",round(dat$c.dat[n.names,"CGRP"],digits=0))},
if(!is.null(dat$c.dat[n.names, "mean.gfp"])){paste("GFP","",round(dat$c.dat[n.names,"mean.gfp"],digits=0))},
if(!is.null(dat$c.dat[n.names, "mean.gfp.1"])){paste("GFP.1","",round(dat$c.dat[n.names,"mean.gfp.1"],digits=0))},
if(!is.null(dat$c.dat[n.names, "mean.gfp.2"])){paste("GFP.2","",round(dat$c.dat[n.names,"mean.gfp.2"],digits=0))},
if(!is.null(dat$c.dat[n.names, "IB4"])){paste("IB4","",round(dat$c.dat[n.names,"IB4"],digits=0))},
if(!is.null(dat$c.dat[n.names, "mean.tritc"])){paste("IB4","",round(dat$c.dat[n.names, "mean.tritc"], digits=0))},
if(!is.null(dat$c.dat[n.names, "area"])){paste("area","", round(dat$c.dat[n.names, "area"], digits=0))})
,bty="n", cex=.8)
# Tool for lableing window region information
x.name<-n.names
levs<-setdiff(unique(as.character(dat$w.dat[,"wr1"])), "")
levs.loc<-tapply(dat$t.dat[,"Time"],as.factor(wr),mean)[levs]
mtext(c("snr", "tot", "max", "wm"), side=1, at=-1, line=c(1.4, 2.1, 2.8, 3.5), cex=.6)
for(i in levs){
snr.name<-grep(paste(i,".snr", sep=""), names(dat$scp), value=T)
tot.name<-grep(paste(i,".tot", sep=""), names(dat$scp), value=T)
max.name<-grep(paste(i,".max", sep=""), names(dat$scp), value=T)
wm.name<-grep(paste(i,".wm", sep=""), names(dat$scp), value=T)
snr.val<-round(dat$scp[x.name, snr.name], digits=1)
tot.val<-round(dat$scp[x.name, tot.name], digits=2)
max.val<-round(dat$scp[x.name, max.name], digits=2)
wm.val<-round(dat$scp[x.name, wm.name], digits=1)
mtext(snr.val, side=1, at=levs.loc[i], line=1.4, cex=.6)
mtext(tot.val, side=1, at=levs.loc[i], line=2.1, cex=.6)
mtext(max.val, side=1, at=levs.loc[i], line=2.8, cex=.6)
mtext(wm.val, side=1, at=levs.loc[i], line=3.5, cex=.6)
}
lines(dat$t.dat[,n.names]~dat$t.dat[,1])
points(dat$t.dat[,n.names]~dat$t.dat[,1], pch=16, cex=.4)
lines(dat$blc[,n.names]~dat$t.dat[,1], lwd=1, cex=.5)
points(dat$blc[,n.names]~dat$t.dat[,1], pch=16, cex=.4)
#abline(h=.5)
}
# return(list(peaks=p,baseline=Baseline,dat=s4))
}
# Fixed y axis
# Photo addition
# Derivative plot
# win
PeakFunc5 <- function(dat,n.names,select.trace=F,Plotit.trace=T,Plotit.both=F, info=T,lmain=NULL, bcex=.7, ylim.max=1.6){
if(is.null(ylim.max)){ylim.max<-1.4}else{ylim.max<-ylim.max}
if(Plotit.trace){ylim <- c(-.1,ylim.max)}
if(Plotit.both){ylim <- c(-.5,ylim.max)}
par(xpd=FALSE)
if(select.trace==TRUE){
dat.select<-menu(names(dat))
dat.t<-dat[[dat.select]]
}
else(dat.t<-dat$t.dat)
xlim <- range(dat.t[,1]) # use same xlim on all plots for better comparison
# ylim <- range(intensity(s1))
levs<-setdiff(unique(as.character(dat$w.dat[,"wr1"])),"")
par(mar=c(6,4.5,3.5,11))
plot(dat.t[,n.names]~dat.t[,1], main=paste(lmain,n.names),xlim=xlim,ylim=ylim,xlab="", ylab="",pch=16, lwd=1, cex=.5)
#axis(1, at=seq(0, length(dat.t[,1]), 5),tick=TRUE )
# Tool for labeling window regions
wr<-dat$w.dat[,"wr1"]
levs<-setdiff(unique(as.character(dat$w.dat[,"wr1"])),"")
x1s <- tapply(dat$w.dat[,"Time"],as.factor(wr),min)[levs]
x2s <- tapply(dat$w.dat[,"Time"],as.factor(wr),max)[levs]
y1s <- rep(par("usr")[4],length(x1s))
y2s <- rep(par("usr")[3],length(x1s))
rect(x1s,y1s,x2s,y2s,col="grey95")
#text(dat.t[match(levs,wr),"Time"],rep(-.1,length(levs)),levs,pos=4,offset=0,cex=.5)
# Tool for labeling the binary score
levs<-setdiff(unique(as.character(dat$w.dat[,"wr1"])),"")
z<-t(dat$bin[n.names,levs])
zz<-z==1
zi<-attributes(zz)
zzz<-which(zz, arr.ind=T)
#levs<-zi$dimnames[[2]][zzz[,2]]
levs<-unique(as.character(row.names(zzz)))
x1s <- tapply(dat$w.dat[,"Time"],as.factor(wr),min)[levs]
x2s <- tapply(dat$w.dat[,"Time"],as.factor(wr),max)[levs]
y1s <- rep(par("usr")[4],length(x1s))
y2s <- rep(par("usr")[3],length(x1s))
rect(x1s,y1s,x2s,y2s,col="grey69")
levs <- setdiff(unique(wr),"")
text(dat.t[match(levs,wr),"Time"],c(min(ylim), .1),levs,pos=4,offset=0,cex=bcex)
# Tool for labeling cellular aspects, gfp.1, gfp.2, tritc, area
legend(x=par("usr")[2]-xinch(.4), y=par("usr")[4]+yinch(.5), xpd=TRUE, inset=c(0,-.14), legend=c(
if(!is.null(dat$c.dat[n.names, "CGRP"])){paste("CGRP","",round(dat$c.dat[n.names,"CGRP"],digits=0))},
if(!is.null(dat$c.dat[n.names, "mean.gfp"])){paste("GFP","",round(dat$c.dat[n.names,"mean.gfp"],digits=0))},
if(!is.null(dat$c.dat[n.names, "mean.gfp.1"])){paste("GFP.1","",round(dat$c.dat[n.names,"mean.gfp.1"],digits=0))},
if(!is.null(dat$c.dat[n.names, "mean.gfp.2"])){paste("GFP.2","",round(dat$c.dat[n.names,"mean.gfp.2"],digits=0))},
if(!is.null(dat$c.dat[n.names, "mean.dapi"])){paste("DAPI","",round(dat$c.dat[n.names,"mean.dapi"],digits=0))},
if(!is.null(dat$c.dat[n.names, "IB4"])){paste("IB4","",round(dat$c.dat[n.names,"IB4"],digits=0))},
if(!is.null(dat$c.dat[n.names, "mean.tritc"])){paste("IB4","",round(dat$c.dat[n.names, "mean.tritc"], digits=0))},
if(!is.null(dat$c.dat[n.names, "area"])){paste("area","", round(dat$c.dat[n.names, "area"], digits=0))},
if(!is.null(dat$c.dat[n.names, "ROI.Area"])){paste("area","", round(dat$c.dat[n.names, "ROI.Area"], digits=0))},
#if(!is.null(dat$c.dat[n.names, "perimeter"])){paste("perimeter","", round(dat$c.dat[n.names, "perimeter"], digits=0))},
if(!is.null(dat$c.dat[n.names, "circularity"])){paste("circularity","", round(dat$c.dat[n.names, "circularity"], digits=3))}
)
,bty="n", cex=.7)
#Adding binary scoring for labeling to plot
par(xpd=TRUE)
if(!is.null(dat$bin[n.names, "gfp.bin"])){text(y=1.9, x=max(dat.t[,1])*1.09, paste("mean.gfp :",dat$bin[n.names,"gfp.bin"]), cex=.7)}
if(!is.null(dat$bin[n.names, "tritc.bin"])){text(y=1.9, x=max(dat.t[,1])*1.19, paste("IB4 :",dat$bin[n.names,"tritc.bin"]), cex=.7)}
# Tool for lableing window region information
if(info){
x.name<-n.names
levs<-setdiff(unique(as.character(dat$w.dat[,"wr1"])), "")
levs.loc<-tapply(dat$w.dat[,"Time"],as.factor(wr),mean)[levs]
mtext(c("max","tot","snr"), side=1, at=-max(dat.t[,1])*.05, line=c(1.4, 2.1, 2.8), cex=.6)
for(i in levs){
max.name<-paste(i,".max", sep="")
max.val<-round(dat$scp[x.name, max.name], digits=3)
mtext(max.val, side=1, at=levs.loc[i], line=1.4, cex=.6)
tot.name<-paste(i,".tot", sep="")
tot.val<-round(dat$scp[x.name, tot.name], digits=3)
mtext(tot.val, side=1, at=levs.loc[i], line=2.1, cex=.6)
snr.name<-paste(i,".snr", sep="")
snr.val<-round(dat$scp[x.name, snr.name], digits=3)
mtext(snr.val, side=1, at=levs.loc[i], line=2.8, cex=.6)
}
}
par(xpd=FALSE)
if(Plotit.both){
if(!is.null(dat$der)){lines(dat$der[,n.names]~dat.t[-1,1], lwd=.01, col="paleturquoise4")}
abline(h=0)
lines(dat.t[,n.names]~dat.t[,1])
points(dat.t[,n.names]~dat.t[,1], pch=16, cex=.3)
}
if(Plotit.trace){
lines(dat.t[,n.names]~dat.t[,1])
points(dat.t[,n.names]~dat.t[,1], pch=16, cex=.3)
}
## Tool for adding rasterImages to plot
img.dim<-dim(dat$img1)[1]
zf<-20
x<-dat$c.dat[n.names,"center.x"]
left<-x-zf
if(left<=0){left=0; right=2*zf}
right<-x+zf
if(right>=img.dim){left=img.dim-(2*zf);right=img.dim}
y<-dat$c.dat[n.names,"center.y"]
top<-y-zf
if(top<=0){top=0; bottom=2*zf}
bottom<-y+zf
if(bottom>=img.dim){top=img.dim-(2*zf);bottom=img.dim}
par(xpd=TRUE)
ymax<-par("usr")[4]
xmax<-par("usr")[2]
if(!is.null(dat$img1)){
img1<-dat$img1
xleft<-xmax
xright<-xmax+xinch(.8)
ytop<-ymax
ybottom<-ymax-yinch(.8)
rasterImage(img1[top:bottom,left:right,],xleft,ybottom,xright,ytop)
}
if(!is.null(dat$img2)){
img2<-dat$img2
xleft<-xmax+xinch(.8)
xright<-xmax+xinch(1.6)
ytop<-ymax
ybottom<-ymax-yinch(.8)
rasterImage(img2[top:bottom,left:right,],xleft,ybottom,xright,ytop)
}
if(!is.null(dat$img3)){
img3<-dat$img3
xleft<-xmax
xright<-xmax+xinch(.8)
ytop<-ymax-yinch(.8)
ybottom<-ymax-yinch(1.6)
rasterImage(img3[top:bottom,left:right,],xleft,ybottom,xright,ytop)
}
if(!is.null(dat$img4)){
img4<-dat$img4
xleft<-xmax+xinch(.8)
xright<-xmax+xinch(1.6)
ytop<-ymax-yinch(.8)
ybottom<-ymax-yinch(1.6)
rasterImage(img4[top:bottom,left:right,],xleft,ybottom,xright,ytop)
}
}
# Y axis self adjusting works with trac.click.3
#select trace added to select trace to plot
#yvar: logical. If true y axis will vary
#ylim.max how to set top y limits. Single value only
#zf added 170127
PeakFunc6 <- function(dat,n.names,t.type="t.dat",Plotit.trace=T,Plotit.both=F, info=T,lmain=NULL, bcex=.7, yvar=F, ylim.max=NULL, zf=40){
if(class(t.type)=="character"){
dat.select<-t.type
dat.t<-dat[[dat.select]]
}
else{
dat.select<-menu(names(dat))
dat.t<-dat[[dat.select]]
}
if(yvar){
ymax<-max(dat.t[,n.names])*1.05
ymin<-min(dat.t[,n.names])*.95
yrange<-ymax-ymin
}else{
if(is.null(ylim.max)){ylim.max<-1.4}else{ylim.max<-ylim.max}
if(Plotit.trace){ylim <- c(-.1,ylim.max)}
if(Plotit.both){ylim <- c(-.5,ylim.max)}
ymin<-min(ylim)
ymax<-max(ylim)
yrange<-ymax-ymin
}
if(Plotit.trace){ylim <- c(ymin,ymax)}
if(Plotit.both){ymin<- -.5;ylim <- c(ymin,ymax)}
par(xpd=FALSE)
xlim <- range(dat.t[,1]) # use same xlim on all plots for better comparison
# ylim <- range(intensity(s1))
levs<-setdiff(unique(as.character(dat$w.dat[,"wr1"])),"")
par(mar=c(6,4.5,3.5,11))
plot(dat.t[,n.names]~dat.t[,1], main=paste(lmain,n.names),xlim=xlim,ylim=ylim,xlab="", ylab="",pch="", cex=.5)
#axis(1, at=seq(0, length(dat.t[,1]), 5),tick=TRUE )
# Tool for labeling window regions
wr<-dat$w.dat[,"wr1"]
levs<-setdiff(unique(as.character(dat$w.dat[,"wr1"])),"")
x1s <- tapply(dat$w.dat[,"Time"],as.factor(wr),min)[levs]
x2s <- tapply(dat$w.dat[,"Time"],as.factor(wr),max)[levs]
y1s <- rep(par("usr")[4],length(x1s))
y2s <- rep(par("usr")[3],length(x1s))
rect(x1s,y1s,x2s,y2s,col="grey95")
#text(dat.t[match(levs,wr),"Time"],rep(-.1,length(levs)),levs,pos=4,offset=0,cex=.5)
# Tool for labeling cellular aspects, gfp.1, gfp.2, tritc, area
legend(x=max(xlim)*.95, y=ymax+(.45*yrange), xpd=TRUE, inset=c(0,-.14), legend=c(
if(!is.null(dat$c.dat[n.names, "CGRP"])){paste("CGRP","",round(dat$c.dat[n.names,"CGRP"],digits=4))},
if(!is.null(dat$c.dat[n.names, "mean.gfp"])){paste("GFP","",round(dat$c.dat[n.names,"mean.gfp"],digits=4))},
if(!is.null(dat$c.dat[n.names, "mean.gfp.1"])){paste("GFP.1","",round(dat$c.dat[n.names,"mean.gfp.1"],digits=4))},
if(!is.null(dat$c.dat[n.names, "mean.gfp.2"])){paste("GFP.2","",round(dat$c.dat[n.names,"mean.gfp.2"],digits=4))},
if(!is.null(dat$c.dat[n.names, "mean.dapi"])){paste("DAPI","",round(dat$c.dat[n.names,"mean.dapi"],digits=4))},
if(!is.null(dat$c.dat[n.names, "IB4"])){paste("IB4","",round(dat$c.dat[n.names,"IB4"],digits=4))},
if(!is.null(dat$c.dat[n.names, "mean.tritc"])){paste("IB4","",round(dat$c.dat[n.names, "mean.tritc"], digits=4))},
if(!is.null(dat$c.dat[n.names, "area"])){paste("area","", round(dat$c.dat[n.names, "area"], digits=4))},
if(!is.null(dat$c.dat[n.names, "ROI.Area"])){paste("area","", round(dat$c.dat[n.names, "ROI.Area"], digits=4))},
#if(!is.null(dat$c.dat[n.names, "perimeter"])){paste("perimeter","", round(dat$c.dat[n.names, "perimeter"], digits=0))},
if(!is.null(dat$c.dat[n.names, "circularity"])){paste("circularity","", round(dat$c.dat[n.names, "circularity"], digits=4))}
)
,bty="n", cex=.7)
#Adding binary scoring for labeling to plot
par(xpd=TRUE)
if(!is.null(dat$bin[n.names, "gfp.bin"])){text(y=ymax+(.25*yrange), x=max(dat.t[,1])*1.09, paste("mean.gfp :",dat$bin[n.names,"gfp.bin"]), cex=.7)}
if(!is.null(dat$bin[n.names, "tritc.bin"])){text(y=ymax+(.25*yrange), x=max(dat.t[,1])*1.19, paste("IB4 :",dat$bin[n.names,"tritc.bin"]), cex=.7)}
# Tool for lableing window region information
if(info){
x.name<-n.names
levs<-setdiff(unique(as.character(dat$w.dat[,"wr1"])), "")
levs.loc<-tapply(dat$w.dat[,"Time"],as.factor(wr),mean)[levs]
mtext(c("max","tot"), side=1, at=-max(dat.t[,1])*.05, line=c(1.4, 2.1), cex=.6)
for(i in levs){
max.name<-paste(i,".max", sep="")
max.val<-round(dat$scp[x.name, max.name], digits=3)
mtext(max.val, side=1, at=levs.loc[i], line=1.4, cex=.6)
tot.name<-paste(i,".tot", sep="")
tot.val<-round(dat$scp[x.name, tot.name], digits=3)
mtext(tot.val, side=1, at=levs.loc[i], line=2.1, cex=.6)
}
# Tool for labeling the binary score
levs<-setdiff(unique(as.character(dat$w.dat[,"wr1"])),"")
z<-t(dat$bin[n.names,levs])
zz<-z==1
zi<-attributes(zz)
zzz<-which(zz, arr.ind=T)
#levs<-zi$dimnames[[2]][zzz[,2]]
levs<-unique(as.character(row.names(zzz)))
x1s <- tapply(dat$w.dat[,"Time"],as.factor(wr),min)[levs]
x2s <- tapply(dat$w.dat[,"Time"],as.factor(wr),max)[levs]
y1s <- rep(par("usr")[4],length(x1s))
y2s <- rep(par("usr")[3],length(x1s))
rect(x1s,y1s,x2s,y2s,col="grey69")
levs <- setdiff(unique(wr),"")
}
text(dat.t[match(levs,wr),"Time"],c(ymin, ymin+(yrange*.2)),levs,pos=4,offset=0,cex=bcex)
if(Plotit.both){
if(!is.null(dat$der)){lines(dat$der[,n.names]~dat.t[-1,1], lwd=.01, col="paleturquoise4")}
par(xpd=T)
abline(h=0)
lines(dat.t[,n.names]~dat.t[,1])
points(dat.t[,n.names]~dat.t[,1], pch=16, cex=.3)
par(xpd=F)
}
if(Plotit.trace){
par(xpd=T)
lines(dat.t[,n.names]~dat.t[,1])
points(dat.t[,n.names]~dat.t[,1], pch=16, cex=.3)
par(xpd=F)
}
## Tool for adding rasterImages to plot
###Finding the picture loaction of the cells
if(is.null(zf)){zf<-20
}else{zf<-zf}
img.dim<-dim(dat$img1)[1]
x<-dat$c.dat[n.names,"center.x"]
left<-x-zf
if(left<=0){left=0; right=2*zf}
right<-x+zf
if(right>=img.dim){left=img.dim-(2*zf);right=img.dim}
y<-dat$c.dat[n.names,"center.y"]
top<-y-zf
if(top<=0){top=0; bottom=2*zf}
bottom<-y+zf
if(bottom>=img.dim){top=img.dim-(2*zf);bottom=img.dim}
par(xpd=TRUE)
### Where to plot pictures
#ymax<-max(dat.t[,n.names])*1.05
#ymin<-min(dat.t[,n.names])*.95
#yrange<-ymax-ymin
ymax<-par("usr")[4]
xmax<-par("usr")[2]
if(!is.null(dat$img1)){
img1<-dat$img1
xleft<-xmax
xright<-xmax+xinch(.8)
ytop<-ymax+yinch(.8)
ybottom<-ymax
rasterImage(img1[top:bottom,left:right,],xleft,ybottom,xright,ytop)
}
if(!is.null(dat$img2)){
img2<-dat$img2
xleft<-xmax+xinch(.8)
xright<-xmax+xinch(1.6)
ytop<-ymax+yinch(.8)
ybottom<-ymax
rasterImage(img2[top:bottom,left:right,],xleft,ybottom,xright,ytop)
}
if(!is.null(dat$img3)){
img3<-dat$img3
xleft<-xmax
xright<-xmax+xinch(.8)
ytop<-ymax
ybottom<-ymax-yinch(.8)
rasterImage(img3[top:bottom,left:right,],xleft,ybottom,xright,ytop)
}
if(!is.null(dat$img4)){
img4<-dat$img4
xleft<-xmax+xinch(.8)
xright<-xmax+xinch(1.6)
ytop<-ymax
ybottom<-ymax-yinch(.8)
tryCatch(rasterImage(img4[top:bottom,left:right,],xleft,ybottom,xright,ytop),error=function(e) rasterImage(img4[top:bottom,left:right],xleft,ybottom,xright,ytop))
}
if(!is.null(dat$img5)){
img5<-dat$img5
xleft<-xmax
xright<-xmax+xinch(.8)
ytop<-ymax-yinch(.8)
ybottom<-ymax-yinch(1.6)
rasterImage(img5[top:bottom,left:right,],xleft,ybottom,xright,ytop)
}
if(!is.null(dat$img6)){
img6<-dat$img6
xleft<-xmax+xinch(.8)
xright<-xmax+xinch(1.6)
ytop<-ymax-yinch(.8)
ybottom<-ymax-yinch(1.6)
rasterImage(img6[top:bottom,left:right,],xleft,ybottom,xright,ytop)
}
if(!is.null(dat$img7)){
img7<-dat$img7
xleft<-xmax
xright<-xmax+xinch(.8)
ytop<-ymax-yinch(1.6)
ybottom<-ymax-yinch(2.4)
rasterImage(img7[top:bottom,left:right,],xleft,ybottom,xright,ytop)
}
if(!is.null(dat$img8)){
img8<-dat$img8
xleft<-xmax+xinch(.8)
xright<-xmax+xinch(1.6)
ytop<-ymax-yinch(1.6)
ybottom<-ymax-yinch(2.4)
rasterImage(img8[top:bottom,left:right,],xleft,ybottom,xright,ytop)
}
}
#This peak func allows for multiple t.types to be plotted
#170515: added pts and lns: (logical)
#added dat.n for insertation of the name for the rd file
PeakFunc7 <- function(dat,n.names,t.type="t.dat",Plotit.trace=T,Plotit.both=F, info=T,lmain=NULL, bcex=.7, yvar=T, ylim.max=NULL, zf=40, pts=T, lns=T, levs=NULL, underline=T, dat.n=""){
dat.name<-deparse(substitute(dat))
if(dat.name=="dat"){dat.name<-dat.n
}else{dat.name<-dat.name}
if(is.null(lmain)){
lmain=n.names
}else{lmain=lmain}
if(class(t.type)=="character")
{
dat.select<-t.type
dat.t<-dat[[dat.select]]
}else{
dat.select<-select.list(names(dat), multiple=T)
dat.t<-dat[[dat.select]]
}
if(yvar){
ymax<-max(dat.t[,n.names])*1.05
ymin<-min(dat.t[,n.names])*.95
yrange<-ymax-ymin
}else{
if(is.null(ylim.max)){ylim.max<-1.4}else{ylim.max<-ylim.max}
if(Plotit.trace){ylim <- c(-.1,ylim.max)}
if(Plotit.both){ylim <- c(-.5,ylim.max)}
ymin<-min(ylim)
ymax<-max(ylim)
yrange<-ymax-ymin
}
if(Plotit.trace){ylim <- c(ymin,ymax)}
if(Plotit.both){ymin<- -.5;ylim <- c(ymin,ymax)}
par(xpd=FALSE)
xlim <- range(dat.t[,1]) # use same xlim on all plots for better comparison
# ylim <- range(intensity(s1))
if(is.null(levs)){levs<-setdiff(unique(as.character(dat$w.dat[,"wr1"])),"")
}else{levs<-levs}
par(mar=c(9,6.2,3.5,13), bty="n")
plot(dat.t[,n.names]~dat.t[,1], main=lmain,xlim=xlim,ylim=ylim,xlab="", ylab="",pch="", cex=.5)
#axis(3,tick=TRUE, outer=F )
axis(1, at= seq(0, max(dat.t[,1]),10), tick=TRUE)
# Tool for labeling window regions
wr<-dat$w.dat[,"wr1"]
#levs<-setdiff(unique(as.character(dat$w.dat[,"wr1"])),"")
x1s <- tapply(dat$w.dat[,"Time"],as.factor(wr),min)[levs]
x2s <- tapply(dat$w.dat[,"Time"],as.factor(wr),max)[levs]
y1s <- rep(par("usr")[4],length(x1s))
y2s <- rep(par("usr")[3],length(x1s))
rect(x1s,y1s,x2s,y2s,col="grey95")
# Tool for labeling cellular aspects, gfp.1, gfp.2, tritc, area
legend(x=par("usr")[1]-xinch(1.45), y=par("usr")[3]-yinch(.25), xpd=TRUE, inset=c(0,-.14),bty="n", cex=.7, legend=c(
if(!is.null(dat$c.dat[n.names, "CGRP"])){paste("CGRP","",round(dat$c.dat[n.names,"CGRP"],digits=4))},
if(!is.null(dat$c.dat[n.names, "mean.gfp"])){paste("GFP","",round(dat$c.dat[n.names,"mean.gfp"],digits=4))},
if(!is.null(dat$c.dat[n.names, "mean.gfp.1"])){paste("GFP.1","",round(dat$c.dat[n.names,"mean.gfp.1"],digits=4))},
if(!is.null(dat$c.dat[n.names, "mean.gfp.2"])){paste("GFP.2","",round(dat$c.dat[n.names,"mean.gfp.2"],digits=4))},
if(!is.null(dat$c.dat[n.names, "mean.gfp.start"])){paste("mean.gfp.start","",round(dat$c.dat[n.names,"mean.gfp.start"],digits=4))},
if(!is.null(dat$c.dat[n.names, "mean.gfp.end"])){paste("mean.gfp.end","",round(dat$c.dat[n.names,"mean.gfp.end"],digits=4))},
if(!is.null(dat$c.dat[n.names, "mean.gfp.immuno"])){paste("CGRP immunostain","",round(dat$c.dat[n.names,"mean.gfp.immuno"],digits=4))},
if(!is.null(dat$c.dat[n.names, "mean.dapi"])){paste("DAPI","",round(dat$c.dat[n.names,"mean.dapi"],digits=4))},
if(!is.null(dat$c.dat[n.names, "IB4"])){paste("IB4","",round(dat$c.dat[n.names,"IB4"],digits=4))},
if(!is.null(dat$c.dat[n.names, "mean.tritc"])){paste("IB4","",round(dat$c.dat[n.names, "mean.tritc"], digits=4))},
if(!is.null(dat$c.dat[n.names, "mean.tritc.start"])){paste("IB4.start","",round(dat$c.dat[n.names, "mean.tritc.start"], digits=4))},
if(!is.null(dat$c.dat[n.names, "mean.tritc.end"])){paste("IB4.end","",round(dat$c.dat[n.names, "mean.tritc.end"], digits=4))},
if(!is.null(dat$c.dat[n.names, "mean.tritc.immuno"])){paste("NF200 immunostain","",round(dat$c.dat[n.names, "mean.tritc.immuno"], digits=4))},
if(!is.null(dat$c.dat[n.names, "mean.cy5.start"])){paste("IB4.start","",round(dat$c.dat[n.names, "mean.cy5.start"], digits=4))},
if(!is.null(dat$c.dat[n.names, "mean.cy5.end"])){paste("IB4.end","",round(dat$c.dat[n.names, "mean.cy5.end"], digits=4))},
if(!is.null(dat$c.dat[n.names, "area"])){paste("area","", round(dat$c.dat[n.names, "area"], digits=4))},
if(!is.null(dat$c.dat[n.names, "ROI.Area"])){paste("area","", round(dat$c.dat[n.names, "ROI.Area"], digits=4))},
#if(!is.null(dat$c.dat[n.names, "perimeter"])){paste("perimeter","", round(dat$c.dat[n.names, "perimeter"], digits=0))},
if(!is.null(dat$c.dat[n.names, "circularity"])){paste("circularity","", round(dat$c.dat[n.names, "circularity"], digits=4))}
)
)
legend(x=par("usr")[2]+xinch(.8), y=par("usr")[3]-yinch(.9), xpd=TRUE, inset=c(0,-.14), bty="n", cex=.7, legend=dat.name)
#Adding binary scoring for labeling to plot
par(xpd=TRUE)
if(!is.null(dat$bin[n.names, "gfp.bin"])){text(y=par("usr")[4]+yinch(.5), x=par("usr")[2]+xinch(1.8), paste("GFP:",dat$bin[n.names,"gfp.bin"]), cex=.7)}
if(!is.null(dat$bin[n.names, "tritc.bin"])){text(y=par("usr")[4]+yinch(.25), x=par("usr")[2]+xinch(1.8), paste("IB4 :",dat$bin[n.names,"tritc.bin"]), cex=.7)}
if(!is.null(dat$bin[n.names, "cy5.bin"])){text(y=par("usr")[4]+yinch(.25), x=par("usr")[2]+xinch(1.8), paste("IB4 :",dat$bin[n.names,"cy5.bin"]), cex=.7)}
if(!is.null(dat$bin[n.names, "drop"])){text(y=par("usr")[4]+yinch(0), x=par("usr")[2]+xinch(1.8), paste("Drop :",dat$bin[n.names,"drop"]), cex=.7)}
# Tool for lableing window region information
levs.loc<-tapply(dat$w.dat[,"Time"],as.factor(wr),mean)[levs]
if(info){
x.name<-n.names
#levs<-setdiff(unique(as.character(dat$w.dat[,"wr1"])), "")
mtext(c("max","snr"), side=3, at=-max(dat.t[,1])*.05, line=c(0, .7), cex=.6)
for(i in 1:length(levs)){
max.name<-paste(levs[i],".max", sep="")
max.val<-round(dat$scp[x.name, max.name], digits=3)
mtext(max.val, side=3, at=levs.loc[ levs[i] ], line=0, cex=.6)
tot.name<-paste(levs[i],".snr", sep="")
tot.val<-round(dat$scp[x.name, tot.name], digits=3)
mtext(tot.val, side=3, at=levs.loc[ levs[i] ], line=.7, cex=.6)
}
# Tool for labeling the binary score
#levs<-setdiff(unique(as.character(dat$w.dat[,"wr1"])),"")
z<-t(dat$bin[n.names,levs])
zz<-z==1
zi<-attributes(zz)
zzz<-which(zz, arr.ind=T)
#levs<-zi$dimnames[[2]][zzz[,2]]
levs1<-unique(as.character(row.names(zzz)))
x1s <- tapply(dat$w.dat[,"Time"],as.factor(wr),min)[levs1]
x2s <- tapply(dat$w.dat[,"Time"],as.factor(wr),max)[levs1]
y1s <- rep(par("usr")[4],length(x1s))
y2s <- rep(par("usr")[3],length(x1s))
rect(x1s,y1s,x2s,y2s,col="grey80")
#levs <- setdiff(unique(wr),"")
}
#text(dat.t[match(levs,wr),"Time"],c(ymin, ymin+(yrange*.2)),levs,pos=4,offset=0,cex=bcex)
#text(dat.t[match(levs,wr),"Time"],par("usr")[3],levs,pos=3,offset=-4.2,cex=bcex, srt=90)
levs_cex <- nchar(levs)
cexSize<- 12
levs_cex[ levs_cex <= cexSize*1.3 ] <- 1
levs_cex[ levs_cex > cexSize*1.3 ] <- cexSize/levs_cex[ levs_cex>cexSize*1.3 ]*1.3
text(levs.loc,par("usr")[3],levs,pos=3,offset=-4.3,cex=levs_cex, srt=90)
if(Plotit.both){
if(!is.null(dat$der)){lines(dat$der[,n.names]~dat.t[-1,1], lwd=.01, col="paleturquoise4")}
par(xpd=T)
abline(h=0)
if(lns){lines(dat.t[,n.names]~dat.t[,1])
}else{}
if(pts){points(dat.t[,n.names]~dat.t[,1], pch=16, cex=.3)
}else{}
par(xpd=F)
}
if(Plotit.trace){
par(xpd=T)
if(lns){lines(dat.t[,n.names]~dat.t[,1])
}else{}
if(pts){points(dat.t[,n.names]~dat.t[,1], pch=16, cex=.3)
}else{}
par(xpd=F)
}
##Tool for adding underline to plot
if(underline){
par(xpd=F)
abline(h=min(dat.t[,n.names]), col="black")
par(xpd=T)
}else{}
## Tool for adding rasterImages to plot
###Finding the picture loaction of the cells
if(!is.null(dat$img1)){
if(is.null(zf)){zf<-20
}else{zf<-zf}
img.dim<-dim(dat$img1)[1]
x<-dat$c.dat[n.names,"center.x"]
left<-x-zf
if(left<=0){left=0; right=2*zf}
right<-x+zf
if(right>=img.dim){left=img.dim-(2*zf);right=img.dim}
y<-dat$c.dat[n.names,"center.y"]
top<-y-zf
if(top<=0){top=0; bottom=2*zf}
bottom<-y+zf
if(bottom>=img.dim){top=img.dim-(2*zf);bottom=img.dim}
par(xpd=TRUE)
}
### Where to plot pictures
#ymax<-max(dat.t[,n.names])*1.05
#ymin<-min(dat.t[,n.names])*.95
#yrange<-ymax-ymin
ymax<-par("usr")[4]
xmax<-par("usr")[2]
if(!is.null(dat$img1)){
img1<-dat$img1
xleft<-xmax
xright<-xmax+xinch(.8)
ytop<-ymax+yinch(.8)
ybottom<-ymax
tryCatch(
rasterImage(img1[top:bottom,left:right,],xleft,ybottom,xright,ytop),
error=function(e) rasterImage(img1[top:bottom,left:right],xleft,ybottom,xright,ytop))
}
if(!is.null(dat$img2)){
img2<-dat$img2
xleft<-xmax+xinch(.8)
xright<-xmax+xinch(1.6)
ytop<-ymax+yinch(.8)
ybottom<-ymax
tryCatch(
rasterImage(img2[top:bottom,left:right,],xleft,ybottom,xright,ytop),
error=function(e) rasterImage(img2[top:bottom,left:right],xleft,ybottom,xright,ytop))
}
if(!is.null(dat$img3)){
img3<-dat$img3
xleft<-xmax
xright<-xmax+xinch(.8)
ytop<-ymax
ybottom<-ymax-yinch(.8)
tryCatch(
rasterImage(img3[top:bottom,left:right,],xleft,ybottom,xright,ytop),
error=function(e) rasterImage(img3[top:bottom,left:right],xleft,ybottom,xright,ytop))
}
if(!is.null(dat$img4)){
img4<-dat$img4
xleft<-xmax+xinch(.8)
xright<-xmax+xinch(1.6)
ytop<-ymax
ybottom<-ymax-yinch(.8)
tryCatch(
rasterImage(img4[top:bottom,left:right,],xleft,ybottom,xright,ytop),
error=function(e) rasterImage(img4[top:bottom,left:right],xleft,ybottom,xright,ytop))
}
if(!is.null(dat$img5)){
img5<-dat$img5
xleft<-xmax
xright<-xmax+xinch(.8)
ytop<-ymax-yinch(.8)
ybottom<-ymax-yinch(1.6)
tryCatch(
rasterImage(img5[top:bottom,left:right,],xleft,ybottom,xright,ytop),
error=function(e) rasterImage(img5[top:bottom,left:right],xleft,ybottom,xright,ytop))
}
if(!is.null(dat$img6)){
img6<-dat$img6
xleft<-xmax+xinch(.8)
xright<-xmax+xinch(1.6)
ytop<-ymax-yinch(.8)
ybottom<-ymax-yinch(1.6)
tryCatch(
rasterImage(img6[top:bottom,left:right,],xleft,ybottom,xright,ytop),
error=function(e) rasterImage(img6[top:bottom,left:right],xleft,ybottom,xright,ytop))
}
if(!is.null(dat$img7)){
img7<-dat$img7
xleft<-xmax
xright<-xmax+xinch(.8)
ytop<-ymax-yinch(1.6)
ybottom<-ymax-yinch(2.4)
tryCatch(
rasterImage(img7[top:bottom,left:right,],xleft,ybottom,xright,ytop),
error=function(e) rasterImage(img7[top:bottom,left:right],xleft,ybottom,xright,ytop))
}
if(!is.null(dat$img8)){
img8<-dat$img8
xleft<-xmax+xinch(.8)
xright<-xmax+xinch(1.6)
ytop<-ymax-yinch(1.6)
ybottom<-ymax-yinch(2.4)
tryCatch(
rasterImage(img8[top:bottom,left:right,],xleft,ybottom,xright,ytop),
error=function(e) rasterImage(img8[top:bottom,left:right],xleft,ybottom,xright,ytop))
}
}
PeakFunc8 <- function(dat, n.names, t.type="t.dat", xlim=NULL, Plotit.trace=T,Plotit.both=F, info=T, lmain=NULL, bcex=.7, yvar=T, ylim=NULL, zf=40, pts=T, lns=T, levs=NULL, underline=T, dat.n="", lwd=1, img_plot=T){
#How to Add the name of the experiment to plot
dat.name<-deparse(substitute(dat))
if(dat.name=="dat"){dat.name<-dat.n
}else{dat.name<-dat.name}
#How to add a name to the plot automatically
if(is.null(lmain)){
lmain=n.names
}else{lmain=lmain}
#Choose the trace to display on the plot
if(class(t.type)=="character")
{
dat.select<-t.type
dat.t<-dat[[dat.select]]
}else{
dat.select<-select.list(names(dat), multiple=T)
dat.t<-dat[[dat.select]]
}
#y limit plots
if(yvar){
ymax<-max(dat.t[,n.names])*1.05
ymin<-min(dat.t[,n.names])*.95
yrange<-ymax-ymin
}else{
ylim<-ylim
}
#if(Plotit.trace){ylim <- c(ymin,ymax)}
#if(Plotit.both){ymin<- -.5;ylim <- c(ymin,ymax)}
par(xpd=FALSE)
#Tool to chagne the display of the xlimits
if(is.null(xlim)){
xlim <- range(dat.t[,1])# use same xlim on all plots for better comparison
}else{xlim<-xlim}
# ylim <- range(intensity(s1))
if(is.null(levs)){levs<-setdiff(unique(as.character(dat$w.dat[,"wr1"])),"")
}else{levs<-levs}
if(img_plot){
par(mar=c(9,6.2,3.5,13), bty="n")
}else{
par(mar=c(9,6.2,3.5,5), bty="n")
}
plot(dat.t[,n.names]~dat.t[,1], main=lmain,xlim=xlim,ylim=ylim,xlab="", ylab=expression(paste(Delta,"F/F")),pch="", cex=.5)
#axis(3,tick=TRUE, outer=F )
axis(1, at= seq(0, max(dat.t[,1]),10), tick=TRUE)
# Tool for labeling window regions
wr<-dat$w.dat[,"wr1"]
#levs<-setdiff(unique(as.character(dat$w.dat[,"wr1"])),"")
x1s <- tapply(dat$w.dat[,"Time"],as.factor(wr),min)[levs]
x2s <- tapply(dat$w.dat[,"Time"],as.factor(wr),max)[levs]
y1s <- rep(par("usr")[4],length(x1s))
y2s <- rep(par("usr")[3],length(x1s))
rect(x1s,y1s,x2s,y2s,col="grey95")
# Tool for labeling cellular aspects, gfp.1, gfp.2, tritc, area
legend(x=par("usr")[1]-xinch(1.45), y=par("usr")[3]-yinch(.25), xpd=TRUE, inset=c(0,-.14),bty="n", cex=.7, legend=c(
if(!is.null(dat$c.dat[n.names, "CGRP"])){paste("CGRP","",round(dat$c.dat[n.names,"CGRP"],digits=4))},
if(!is.null(dat$c.dat[n.names, "mean.gfp"])){paste("GFP","",round(dat$c.dat[n.names,"mean.gfp"],digits=4))},
if(!is.null(dat$c.dat[n.names, "mean.gfp.1"])){paste("GFP.1","",round(dat$c.dat[n.names,"mean.gfp.1"],digits=4))},
if(!is.null(dat$c.dat[n.names, "mean.gfp.2"])){paste("GFP.2","",round(dat$c.dat[n.names,"mean.gfp.2"],digits=4))},
if(!is.null(dat$c.dat[n.names, "mean.gfp.start"])){paste("mean.gfp.start","",round(dat$c.dat[n.names,"mean.gfp.start"],digits=4))},
if(!is.null(dat$c.dat[n.names, "mean.gfp.end"])){paste("mean.gfp.end","",round(dat$c.dat[n.names,"mean.gfp.end"],digits=4))},
if(!is.null(dat$c.dat[n.names, "mean.gfp.immuno"])){paste("CGRP immunostain","",round(dat$c.dat[n.names,"mean.gfp.immuno"],digits=4))},
if(!is.null(dat$c.dat[n.names, "mean.dapi"])){paste("DAPI","",round(dat$c.dat[n.names,"mean.dapi"],digits=4))},
if(!is.null(dat$c.dat[n.names, "IB4"])){paste("IB4","",round(dat$c.dat[n.names,"IB4"],digits=4))},
if(!is.null(dat$c.dat[n.names, "mean.tritc"])){paste("IB4","",round(dat$c.dat[n.names, "mean.tritc"], digits=4))},
if(!is.null(dat$c.dat[n.names, "mean.tritc.start"])){paste("IB4.start","",round(dat$c.dat[n.names, "mean.tritc.start"], digits=4))},
if(!is.null(dat$c.dat[n.names, "mean.tritc.end"])){paste("IB4.end","",round(dat$c.dat[n.names, "mean.tritc.end"], digits=4))},
if(!is.null(dat$c.dat[n.names, "mean.tritc.immuno"])){paste("NF200 immunostain","",round(dat$c.dat[n.names, "mean.tritc.immuno"], digits=4))},
if(!is.null(dat$c.dat[n.names, "mean.cy5.start"])){paste("IB4.start","",round(dat$c.dat[n.names, "mean.cy5.start"], digits=4))},
if(!is.null(dat$c.dat[n.names, "mean.cy5.end"])){paste("IB4.end","",round(dat$c.dat[n.names, "mean.cy5.end"], digits=4))},
if(!is.null(dat$c.dat[n.names, "area"])){paste("area","", round(dat$c.dat[n.names, "area"], digits=4))},
if(!is.null(dat$c.dat[n.names, "ROI.Area"])){paste("area","", round(dat$c.dat[n.names, "ROI.Area"], digits=4))},
#if(!is.null(dat$c.dat[n.names, "perimeter"])){paste("perimeter","", round(dat$c.dat[n.names, "perimeter"], digits=0))},
if(!is.null(dat$c.dat[n.names, "circularity"])){paste("circularity","", round(dat$c.dat[n.names, "circularity"], digits=4))}
)
)
legend(x=par("usr")[2]+xinch(.8), y=par("usr")[3]-yinch(.9), xpd=TRUE, inset=c(0,-.14), bty="n", cex=.7, legend=dat.name)
#Adding binary scoring for labeling to plot
par(xpd=TRUE)
if(!is.null(dat$bin[n.names, "gfp.bin"])){text(y=par("usr")[4]+yinch(.5), x=par("usr")[2]+xinch(1.8), paste("GFP:",dat$bin[n.names,"gfp.bin"]), cex=.7)}
if(!is.null(dat$bin[n.names, "tritc.bin"])){text(y=par("usr")[4]+yinch(.25), x=par("usr")[2]+xinch(1.8), paste("IB4 :",dat$bin[n.names,"tritc.bin"]), cex=.7)}
if(!is.null(dat$bin[n.names, "cy5.bin"])){text(y=par("usr")[4]+yinch(.25), x=par("usr")[2]+xinch(1.8), paste("IB4 :",dat$bin[n.names,"cy5.bin"]), cex=.7)}
if(!is.null(dat$bin[n.names, "drop"])){text(y=par("usr")[4]+yinch(0), x=par("usr")[2]+xinch(1.8), paste("Drop :",dat$bin[n.names,"drop"]), cex=.7)}
# Tool for lableing window region information
levs.loc<-tapply(dat$w.dat[,"Time"],as.factor(wr),mean)[levs]
if(info){
x.name<-n.names
#levs<-setdiff(unique(as.character(dat$w.dat[,"wr1"])), "")
mtext(c("max","snr"), side=3, at=-max(dat.t[,1])*.05, line=c(0, .7), cex=.6)
for(i in 1:length(levs)){
max.name<-paste(levs[i],".max", sep="")
max.val<-round(dat$scp[x.name, max.name], digits=3)
mtext(max.val, side=3, at=levs.loc[ levs[i] ], line=0, cex=.6)
tot.name<-paste(levs[i],".snr", sep="")
tot.val<-round(dat$scp[x.name, tot.name], digits=3)
mtext(tot.val, side=3, at=levs.loc[ levs[i] ], line=.7, cex=.6)
}
# Tool for labeling the binary score
#levs<-setdiff(unique(as.character(dat$w.dat[,"wr1"])),"")
z<-t(dat$bin[n.names,levs])
zz<-z==1
zi<-attributes(zz)
zzz<-which(zz, arr.ind=T)
#levs<-zi$dimnames[[2]][zzz[,2]]
levs1<-unique(as.character(row.names(zzz)))
x1s <- tapply(dat$w.dat[,"Time"],as.factor(wr),min)[levs1]
x2s <- tapply(dat$w.dat[,"Time"],as.factor(wr),max)[levs1]
y1s <- rep(par("usr")[4],length(x1s))
y2s <- rep(par("usr")[3],length(x1s))
rect(x1s,y1s,x2s,y2s,col="grey80")
#levs <- setdiff(unique(wr),"")
}
#text(dat.t[match(levs,wr),"Time"],c(ymin, ymin+(yrange*.2)),levs,pos=4,offset=0,cex=bcex)
#text(dat.t[match(levs,wr),"Time"],par("usr")[3],levs,pos=3,offset=-4.2,cex=bcex, srt=90)
levs_cex <- nchar(levs)
levs_cex[ levs_cex<=12 ] <- 1
levs_cex[ levs_cex > 12 ] <- 12/levs_cex[ levs_cex>12 ]*1.3
text(levs.loc,par("usr")[3],levs,pos=3,offset=-4.2,cex=levs_cex, srt=90)
if(Plotit.both){
if(!is.null(dat$der)){lines(dat$der[,n.names]~dat.t[-1,1], lwd=.01, col="paleturquoise4")}
par(xpd=T)
abline(h=0)
if(lns){lines(dat.t[,n.names]~dat.t[,1], lwd=lwd)
}else{}
if(pts){points(dat.t[,n.names]~dat.t[,1], pch=16, cex=.3)
}else{}
par(xpd=F)
}
if(Plotit.trace){
par(xpd=F)
if(lns){lines(dat.t[,n.names]~dat.t[,1], lwd=lwd)
}else{}
if(pts){points(dat.t[,n.names]~dat.t[,1], pch=16, cex=.3)
}else{}
#par(xpd=F)
}
##Tool for adding underline to plot
if(underline){
par(xpd=F)
abline(h=min(dat.t[,n.names]), col="black")
par(xpd=T)
}else{}
## Tool for adding rasterImages to plot
######################################
#Adding the image plots to the traces
######################################
if(img_plot){
###Finding the picture loaction of the cells
if(!is.null(dat$img1)){
if(is.null(zf)){zf<-20
}else{zf<-zf}
img.dim<-dim(dat$img1)[1]
x<-dat$c.dat[n.names,"center.x"]
left<-x-zf
if(left<=0){left=0; right=2*zf}
right<-x+zf
if(right>=img.dim){left=img.dim-(2*zf);right=img.dim}
y<-dat$c.dat[n.names,"center.y"]
top<-y-zf
if(top<=0){top=0; bottom=2*zf}
bottom<-y+zf
if(bottom>=img.dim){top=img.dim-(2*zf);bottom=img.dim}
par(xpd=TRUE)
}
### Where to plot pictures
#ymax<-max(dat.t[,n.names])*1.05
#ymin<-min(dat.t[,n.names])*.95
#yrange<-ymax-ymin
ymax<-par("usr")[4]
xmax<-par("usr")[2]
if(!is.null(dat$img1)){
img1<-dat$img1
xleft<-xmax
xright<-xmax+xinch(.8)
ytop<-ymax+yinch(.8)
ybottom<-ymax
tryCatch(
rasterImage(img1[top:bottom,left:right,],xleft,ybottom,xright,ytop),
error=function(e) rasterImage(img1[top:bottom,left:right],xleft,ybottom,xright,ytop))
}
if(!is.null(dat$img2)){
img2<-dat$img2
xleft<-xmax+xinch(.8)
xright<-xmax+xinch(1.6)
ytop<-ymax+yinch(.8)
ybottom<-ymax
tryCatch(
rasterImage(img2[top:bottom,left:right,],xleft,ybottom,xright,ytop),
error=function(e) rasterImage(img2[top:bottom,left:right],xleft,ybottom,xright,ytop))
}
if(!is.null(dat$img3)){
img3<-dat$img3
xleft<-xmax
xright<-xmax+xinch(.8)
ytop<-ymax
ybottom<-ymax-yinch(.8)
tryCatch(
rasterImage(img3[top:bottom,left:right,],xleft,ybottom,xright,ytop),
error=function(e) rasterImage(img3[top:bottom,left:right],xleft,ybottom,xright,ytop))
}
if(!is.null(dat$img4)){
img4<-dat$img4
xleft<-xmax+xinch(.8)
xright<-xmax+xinch(1.6)
ytop<-ymax
ybottom<-ymax-yinch(.8)
tryCatch(
rasterImage(img4[top:bottom,left:right,],xleft,ybottom,xright,ytop),
error=function(e) rasterImage(img4[top:bottom,left:right],xleft,ybottom,xright,ytop))
}
if(!is.null(dat$img5)){
img5<-dat$img5
xleft<-xmax
xright<-xmax+xinch(.8)
ytop<-ymax-yinch(.8)
ybottom<-ymax-yinch(1.6)
tryCatch(
rasterImage(img5[top:bottom,left:right,],xleft,ybottom,xright,ytop),
error=function(e) rasterImage(img5[top:bottom,left:right],xleft,ybottom,xright,ytop))
}
if(!is.null(dat$img6)){
img6<-dat$img6
xleft<-xmax+xinch(.8)
xright<-xmax+xinch(1.6)
ytop<-ymax-yinch(.8)
ybottom<-ymax-yinch(1.6)
tryCatch(
rasterImage(img6[top:bottom,left:right,],xleft,ybottom,xright,ytop),
error=function(e) rasterImage(img6[top:bottom,left:right],xleft,ybottom,xright,ytop))
}
if(!is.null(dat$img7)){
img7<-dat$img7
xleft<-xmax
xright<-xmax+xinch(.8)
ytop<-ymax-yinch(1.6)
ybottom<-ymax-yinch(2.4)
tryCatch(
rasterImage(img7[top:bottom,left:right,],xleft,ybottom,xright,ytop),
error=function(e) rasterImage(img7[top:bottom,left:right],xleft,ybottom,xright,ytop))
}
if(!is.null(dat$img8)){
img8<-dat$img8
xleft<-xmax+xinch(.8)
xright<-xmax+xinch(1.6)
ytop<-ymax-yinch(1.6)
ybottom<-ymax-yinch(2.4)
tryCatch(
rasterImage(img8[top:bottom,left:right,],xleft,ybottom,xright,ytop),
error=function(e) rasterImage(img8[top:bottom,left:right],xleft,ybottom,xright,ytop))
}
}
}
# USe to sort based on features from bin and c.dat
c.sort<-function(dat,char=NULL){
tmp<-cbind(dat$c.dat, dat$bin)
bob<-row.names(tmp[order(tmp[,char], decreasing=T),])
return(bob)
}
# Click through a set of selected cell and create a stack plot
# Could use labeling improvements
Trace.Click.1<-function(dat, cells=NULL){
graphics.off()
dev.new(width=14,height=4)
dev.new(width=12,height=8)
if(is.null(cells)){cnames <- names(dat$t.dat[,-1])}
else{cnames<-cells}
lines.flag <- 0
cell.i <- 1
g.names<-NULL
click.i <- 1
#group.names<-NULL
linefunc <- function(dat,m.names,snr=NULL,lmain="",cols=NULL,m.order=NULL,rtag=NULL,rtag2=NULL,rtag3=NULL, sf=.25,lw=3,bcex=1,p.ht=7,p.wd=10)
{
t.dat<-dat$t.dat
wr<-dat$w.dat[,2]
levs<-unique(as.character(dat$w.dat[,2]))[-1]
m.names <- intersect(m.names,names(t.dat))
xseq <- t.dat[,1]
library(RColorBrewer)
if(length(m.names) > 0)
{
if(!is.null(m.order)){
dat<-dat$c.dat[m.names,]
n.order<-dat[order(dat[,m.order]),]
m.names <- row.names(n.order)
}
#else{
#m.pca <- prcomp(t(t.dat[,m.names]),scale=F,center=T)
#morder <- m.pca$x[,1] * c(1,-1)[(sum(m.pca$rot[,1]) < 0)+1]
#m.names <- m.names[order(m.pca$x[,1],decreasing=sum(m.pca$rot[,1]) < 0)]
#um.names <- m.names[order(morder)]
#}
if(is.null(cols)){
#cols <- rainbow(length(m.names),start=.55)
cols <-brewer.pal(8,"Dark2")
cols <- rep(cols,ceiling(length(m.names)/length(cols)))
cols <- cols[1:length(m.names)]
}
else { cols<-cols
cols <- rep(cols,ceiling(length(m.names)/length(cols)))
cols <- cols[1:length(m.names)]
}
hbc <- length(m.names)*sf+max(t.dat[,m.names])
hb <- ceiling(hbc)
par(mar=c(4,1,4,1))
plot(xseq,t.dat[,m.names[1]],ylim=c(0,hbc),xlab="Time (min)",main=lmain,type="n", xaxt="n",yaxt="n",xlim=c(min(xseq)-1.5,max(xseq)+1.5))#-sf
axis(1, at=seq(floor(min(t.dat[,1])),ceiling(max(t.dat[,1])), 1))
if(length(wr) > 0)
{
if(!is.null(levs))
{
#levs <- setdiff(unique(wr),"")
x1s <- tapply(xseq,as.factor(wr),min)[levs]
x2s <- tapply(xseq,as.factor(wr),max)[levs]
y1s <- rep(-.3,length(x1s))
y2s <- rep(hbc+.2,length(x1s))
rect(x1s,y1s,x2s,y2s,col=NA,border="darkgrey")
cpx <- xseq[match(levs,wr)+round(table(wr)[levs]/2,0)]
offs <- nchar(levs)*.5
text(cpx,rep(c(sf/2,sf),length=length(levs)),levs,pos=1,cex=bcex)#,offset=-offs
}
}
for(i in 1:length(m.names))
{
lines(xseq,t.dat[,m.names[i]]+i*sf,col=cols[i],lwd=lw)
if(!is.null(snr))
{
pp1 <- snr[,m.names[i]] > 0 & is.element(wr,levs)
pp2 <- snr[,m.names[i]] > 0 & !is.element(wr,levs)
# pp3 <- dat$crr[,m.names[i]] > 0
points(xseq[pp1],t.dat[pp1,m.names[i]]+i/10,pch=1,col=cols[i])
points(xseq[pp2],t.dat[pp2,m.names[i]]+i/10,pch=0,col=cols[i])
# points(xseq[pp3],t.dat[pp3,m.names[i]]+i/10,pch=2,col=cols[i],cex=.5)
}
}
text(rep(0,length(m.names)),seq(1,length(m.names))*sf+t.dat[1,m.names],m.names,cex=.8*bcex,col=cols,pos=2)
if(is.null(rtag)){
if(!is.null(m.order)){
rtag <- dat$c.dat[m.names,m.order]
text(rep(max(xseq),length(m.names)),seq(1,length(m.names))*sf+t.dat[nrow(t.dat),m.names],rtag,cex=.8*bcex,col=cols,pos=4)
}}
else{
rtag <- dat$c.dat[m.names,rtag]
text(rep(max(xseq),length(m.names)),seq(1,length(m.names))*sf+t.dat[nrow(t.dat),m.names],rtag2,cex=.8*bcex,col=cols,pos=4)
}
if(!is.null(rtag2)){
rtag2 <- dat$c.dat[m.names,rtag2]
text(rep(max(xseq),length(m.names)),seq(1,length(m.names))*sf+t.dat[nrow(t.dat),m.names],rtag2,cex=.8*bcex,col="green4",pos=3)
text(rep(max(xseq),length(n.names)),seq(1,length(n.names))*sf+t.dat[nrow(t.dat),n.names],rtag2,cex=.8*bcex,col="green4",pos=3)
}
if(!is.null(rtag3)){
rtag3 <- dat$c.dat[m.names,rtag3]
text(rep(max(xseq),length(m.names)),seq(1,length(m.names))*sf+t.dat[nrow(t.dat),m.names],rtag3,cex=.8*bcex,col="Red",pos=1)
}
}
}
while(click.i!=4)
{
cell.pick <- cnames[cell.i]
dev.set(dev.list()[1])
p1 <- PeakFunc2(dat$mp,cell.pick,shws=2,phws=20,Plotit=T,wr=dat$w.dat$wr1,SNR.lim=2,bl.meth="SNIP")
p1.par<-par()
if(lines.flag==1){dev.set(dev.list()[2]);linefunc(dat, g.names);lines.flag <- 0}
if(lines.flag==0){dev.set(dev.list()[1])}
#title(sub=paste("Group ",group.i," n=",g.num," Cell ",cell.i,sep=""))
xs <- rep(dat$t.dat[50,"Time"],4)
points(x=xs,y=c(1.2,1.1,1.0,.9),pch=16)
text(x=xs,y=c(1.2,1.1,1.0,.9),labels=c("Cell +","Cell -","Stack", "off"),pos=2,cex=.5)
click.i <- identify(x=xs,y=c(1.2,1.1,1.0,.9),n=1,plot=F)
if(click.i==1)
{cell.i <- cell.i + 1;if(cell.i>length(cnames)){cell.i<-1}}
if(click.i==2)
{cell.i <- cell.i - 1;if(cell.i<1){cell.i<-length(cnames)}}
if(click.i==3)
{g.names<-union(g.names,cnames[cell.i]);lines.flag<-1}
if(click.i==4){graphics.off()}
}
print(g.names)
}
# Click Throug cells, and zoom on cell of interest
Trace.Click.2<-function(dat, cells=NULL,img=NULL, plotit=T){
graphics.off()
dev.new(width=14,height=4)
dev.new(width=10,height=6)
dev.new(width=8, height=8)
if(is.null(cells)){cnames <- names(dat$t.dat[,-1])}
else{cnames<-cells}
lines.flag <- 0
cell.i <- 1
g.names<-NULL
click.i <- 1
#group.names<-NULL
while(click.i!=5)
{
cell.pick <- cnames[cell.i]
dev.set(dev.list()[1])
p1 <- PeakFunc5(dat,cell.pick,ylim.max=1.6)
p1.par<-par()
if(lines.flag==2){dev.set(dev.list()[3]);cell.veiw.2048(dat, img=img, cell=cell.pick, cells=cells,cols="red",plot.new=F,cell.name=T);lines.flag <- 0}
if(lines.flag==1){dev.set(dev.list()[2]);LinesEvery.2(dat,g.names,plot.new=FALSE);lines.flag <- 0}
if(lines.flag==0){dev.set(dev.list()[1])}
#title(sub=paste("Group ",group.i," n=",g.num," Cell ",cell.i,sep=""))
#xs <- -(rep(dat$t.dat[50,"Time"],5)*1.08)
xs<- rep(par("usr")[1]-yinch(.2), 5)
ys<-seq(par("usr")[4],by=-yinch(.5), length.out=5)
points(x=xs,y=ys,pch=16)
text(x=xs,y=ys,labels=c("Cell +","Cell -","Veiw","Stack","off"),pos=2,cex=.5)
## How many cells are you looking at
maxy<-par("usr")[4]
text(par("usr")[1], par("usr")[4]+yinch(.3),paste(cell.i, ":",length(cnames)))
click.i <- identify(x=xs,y=ys,n=1,plot=F)
if(click.i==1)
{cell.i <- cell.i + 1;if(cell.i>length(cnames)){cell.i<-1};lines.flag<-0}
if(click.i==2)
{cell.i <- cell.i - 1;if(cell.i<1){cell.i<-length(cnames)};lines.flag<-0}
if(click.i==3)
{lines.flag<-2}
if(click.i==4)
{g.names<-union(g.names,cnames[cell.i]);lines.flag<-1}
if(click.i==5){graphics.off()}
}
print(g.names)}
Trace.Click<-function(dat, cells=NULL,img=dat$img1, yvar=FALSE, t.type="t.dat", plot.new=F, info=T, pts=T, lns=T, bcex=1){
if(plot.new){graphics.off()}
dev.new(width=14,height=4)
click.window<-dev.cur()
dev.new(width=10,height=6)
lines.window<-dev.cur()
dev.new(width=8, height=8)
view.window<-dev.cur()
if(is.null(cells)){cnames <- names(dat$t.dat[,-1])}
else{cnames<-cells}
lines.flag <- 0
cell.i <- 1
g.names<-NULL
click.i <- 1
#group.names<-NULL
while(click.i!=9)
{
cell.pick <- cnames[cell.i]
#dev.set(dev.list()[1])
dev.set(which=click.window)
p1 <- PeakFunc7(dat,cell.pick, t.type=t.type,yvar=yvar, info=info, bcex=bcex, pts=pts, lns=lns)
p1.par<-par()
if(lines.flag==1){
#dev.set(dev.list()[2])
dev.set(which=lines.window)
LinesEvery.5(dat,g.names,plot.new=F, img=img, t.type=t.type, col="black")
lines.flag <- 0
}
if(lines.flag==2){
#dev.set(dev.list()[3])
dev.set(which=view.window)
cell.view(dat,cell=cell.pick, img=img,cols="red",plot.new=F,cell.name=T, zoom=FALSE)
lines.flag <- 0
}
if(lines.flag==0){
#dev.set(dev.list()[1])
dev.set(which=click.window)
}
#title(sub=paste("Group ",group.i," n=",g.num," Cell ",cell.i,sep=""))
xs<- rep(par("usr")[1]-xinch(.5), 9)
ys<-seq(par("usr")[4],by=-yinch(.2), length.out=9)
points(x=xs,y=ys,pch=16)
text(x=xs,y=ys,labels=c("Cell +","Cell -","Veiw","Stack","yvar","Select Trace","Points","Lines","off"),pos=2,cex=.5)
## How many cells are you looking at
text(par("usr")[1], par("usr")[4]+yinch(.3),paste(cell.i, ":",length(cnames)))
click.i <- identify(x=xs,y=ys,n=1,plot=F)
if(click.i==1)
{cell.i <- cell.i + 1;if(cell.i>length(cnames)){cell.i<-1};lines.flag<-0}
if(click.i==2)
{cell.i <- cell.i - 1;if(cell.i<1){cell.i<-length(cnames)};lines.flag<-0}
if(click.i==3)
{lines.flag<-2}
if(click.i==4)
{g.names<-union(g.names,cnames[cell.i]);lines.flag<-1}
if(click.i==5){
if(yvar){yvar<-FALSE}else{yvar<-TRUE}
}
if(click.i==6){
t.type<-select.list(names(dat))
}
if(click.i==7){
if(pts){pts<-FALSE}else{pts<-TRUE}
}
if(click.i==8){
if(lns){lns<-FALSE}else{lns<-TRUE}
}
if(click.i==9){
#graphics.off()
dev.off(which=click.window)
dev.off(which=lines.window)
dev.off(which=view.window)
}
}
print(g.names)
}
readkeygraph <- function(prompt){
getGraphicsEvent(prompt = prompt,
onMouseDown = NULL, onMouseMove = NULL,
onMouseUp = NULL, onKeybd = onKeybd,
consolePrompt = "uh")
Sys.sleep(0.01)
return(keyPressed)
}
onKeybd <- function(key){
keyPressed <<- key
}
#170606 Added:
#up arrow: move through list specified in entry
#down arrow: Move down through list spcified in entry
#c: add cells to g.names
#r: reset g.names
#1-0 : add cells to g.names1 through g.name10
#shift+# removes cell from that group
#s: stack g.names
#y: Zoom yaxis automatically
#t: brings up list of RD file. Select Trace
#o: order cells in a new way
#p: Toggles points on graph
##d: changes drop collumn to 1 automatically. Remeber to save RD file at end of experiment
##k: changes drop collumn to 0 automatically. Remeber to save RD file at end of experiment
#l: choose window region to display on stack trace plot
#i: Select image to display on multi view options
#I: image for stacked traces
#u: Underlines the Trace
#f: New trace fitting for pottassium pulses
#F: New smoothing factor for imputer
#z: zoom factor to apply to the view of the cell on the right side of the trace, and to the view window
tcd<-function(dat, cells=NULL,img=dat$img1, l.img=c("img1"), yvar=FALSE, t.type="t.dat", plot.new=F, info=F, pts=T, lns=T, bcex=.5, levs=NULL, klevs=NULL, sft=NULL, underline=T, zf=20, lw=2, sf=1, dat.name=NULL, view_func_description=F, save_question = T, ps=9){
graphics.off()
print(environment())
if(is.null(dat.name)){
dat.name<-deparse(substitute(dat))
}else{dat.name<-dat.name}
if(view_func_description){
cat(
"
#############################################
Welcome to Trace.Click.dev
#############################################
The output of this function returns a list of vectors of cell names
There are a few ways to input cell names into this program;
1)Character; ex. cells=c('X.1','X.2','X.3','X.4')
2)Numeric; ex. cells=c(1,2,3,4)
3)Character Lists; ex. active.cells[[1]]
Character lists/Cell groups, can be handled and displayed in a variety
of ways. Using Keyboard commands (CASE SENSITVE);
1)s: Stack group of cells
2)v: View images of cells
3)P: Pick a group to scroll through with up and down arrows
UP ARROW: move through list specified in entry
DOWN ARROW: Move down through list spcified in entry
o: reorders traces in the way specified.
4)r: Rename your group use '.' and a space seperator ex. 'cool.cellz'
5)R: Empty the specified group of all cells
UP ARROW: move through list specified in entry
DOWN ARROW: Move down through list spcified in entry
########################
Stacked Traces Features
u: Add or remove line under trace
p: Add or removed points in single trace view
t: Select the type of trace to display (anythin starting with a t or mp)
d: Remove most information on the single trace view
D: How much the traces are seperated, Must be greater than 0 ex. 0.2
i: Image/Images to display on left side of traces
V: 1.Choose Dataframe 2.Choose Values to display on right side of trace
####################
Viewing cell images
v: Select the group to view
I: Change the image
##############################
Making Groups
1,2,3,4,5,6,7,8,9,0,-,+: add cells to g.names1 through g.name12
shift+ (above value) removes cell from that group
To clean up a group press P, select the group of interest
press 'o' the sort the group in a specified way (ex area)
and then use shift + whatever key the cells are stored
ex('1,2,3,4,5,6,7,8,9,0,-,+')
q: Quits the program
c: add cells to g.names
s: stack g.names
#d: details for peakfunc
#D: LinesEvery seperation
#f: New trace fitting for pottassium pulses
#F: New smoothing factor for fit trace
#i: Select image to display on Stacked Traces
#I: image for Multiview
#l: choose window region to display on stack trace plot
#o: order all cells in a new way
#O: order cells in Stacked Traces and multiview
#p: Toggles points on graph
#P: Pick a group/cells to click through
#R: reset group specified
#r: rename group names
#s: stack selected Groups
#t: brings up list of RD file. Select Trace (anything starting with t or mp)
#u: Underlines the Trace
#v: Show where cells are located and give zoomed in view
#V: choose cell info to display on traces
#w: Change Line Width on plot
#x: score this cell as a drop
#y: Zoom yaxis automatically
#z: image zoom
")
}else{}
dat.tmp<-dat
if(plot.new){graphics.off()}
if(is.null(sft)){sft<-7}
#windows(width=14,height=4,xpos=0, ypos=50)
Cairo(pointsize=ps, width=14, height=4)
click.window<-dev.cur()
#windows(width=10,height=6,xpos=0, ypos=450)
Cairo(pointsize=ps, width=10,height=6)
lines.window<-dev.cur()
dimx<-dim(img)[2]
dimy<-dim(img)[1]
haight<-10*dimy/dimx
#windows(width=haight*dimx/dimy, height=haight,xpos=1130, ypos=200)
Cairo(pointsize=ps, width=haight*dimx/dimy, height=haight)
view.window<-dev.cur()
#windows(width=8, height=8,xpos=1130, ypos=0)
Cairo(pointsize=ps, width=8, height=8)
multipic.window<-dev.cur()
#windows(width=12, height=2,xpos=0, ypos=550)
Cairo(pointsize=ps, width=12, height=2)
traceimpute.window<-dev.cur()
window.flag<-0
lines.flag <- 0
cell.i <- 1
p.names<-NULL
values<-"area"
lines.color='black'
#If no cell input collect all cells
if(is.null(cells)){
cells<-dat$c.dat$id
cnames <- names(dat$c.dat$id)
g.names<-cnames
}else{}
#If inputing a numeric vector, convert to character by adding a X. to beiging
if(class(cells)=="numeric"){
cells<-paste("X.", cells, sep="")
cnames<-cells
g.names<-cnames
}
#If inputing a list fill in
if(class(cells)=="list"){
#Reduce g.names to combine all cells from the list into g.names
g.names<-Reduce(union,cells)
#initialize a list
gt.names<-list()
#Now fill in the list
if( !is.null( names(cells) ) ){
for(i in 1:length(cells)){
#Fill in the gt.names with the names of the cells
gt.names[[ names(cells)[i] ]]<-cells[[i]]
#assign(names(cells)[i],cells[[i]])
}
}else{
for(i in 1:length(cells)){
#Fill in the gt.names with the names of the cells
gt.names[[ paste0("g.names",i) ]]<-cells[[i]]
#assign(names(cells)[i],cells[[i]])
}
}
#if the length of the cell list is less than 12, fill in the remaining
#list entries with empty regions
if(length(gt.names)<12){
for(i in ( length(gt.names)+1 ):12){
#fill in with an NA
gt.names[[paste("g.names",i,sep="")]]<-NA
#remove the NA to allow for
gt.names<-lapply(gt.names, function(x) x[!is.na(x)])
}
}
cells<-dat$c.dat$id
cnames<-cells
#gt.names<-list(g.names1=g.names1, g.names2=g.names2, g.names3=g.names3, g.names4=g.names4, g.names5=g.names5, g.names6=g.names6, g.names7=g.names7, g.names8=g.names8,g.names9=g.names9, g.names10=g.names10, g.names11=g.names11, g.names12=g.names12, g.names=g.names)
}else{
cnames<-cells
g.names<-cnames
g.names1<-NA
g.names2<-NA
g.names3<-NA
g.names4<-NA
g.names5<-NA
g.names6<-NA
g.names7<-NA
g.names8<-NA
g.names9<-NA
g.names10<-NA
g.names11<-NA
g.names12<-NA
gt.names<-list(g.names1=g.names1, g.names2=g.names2, g.names3=g.names3, g.names4=g.names4, g.names5=g.names5, g.names6=g.names6, g.names7=g.names7, g.names8=g.names8,g.names9=g.names9, g.names10=g.names10, g.names11=g.names11, g.names12=g.names12, g.names=g.names)
gt.names<-lapply(gt.names, function(x) x[!is.na(x)])
cells<-cells
cnames<-cells
}
keyPressed <- "z"
#group.names<-NULL
if(is.null(levs)){levs<-setdiff(unique(as.character(dat$w.dat[,"wr1"])),"")
}else{levs<-levs}
if(is.null(klevs)){klevs<-setdiff(unique(as.character(dat$w.dat[,"wr1"])),"")
}else{klevs<-levs}
while(keyPressed!="q")
{
cell.pick <- cnames[cell.i]
dev.set(which=click.window)
p1 <- PeakFunc7(dat,cell.pick, t.type=t.type, yvar=yvar, info=info, bcex=bcex, pts=pts, lns=lns, levs=levs, underline=underline, dat.n=dat.name, zf=zf)
p1.par<-par()
##LinesEvery
if(lines.flag==1){
#if(length(p.names)<100){
if(length(p.names)>11){
dev.off(which=lines.window)
Cairo(pointsize=ps, width=10,height=12)
lines.window<-dev.cur()
}else{
dev.off(which=lines.window)
Cairo(pointsize=ps, width=10,height=7)
lines.window<-dev.cur()
}
dev.set(which=lines.window)
tryCatch(LinesEvery.5(dat,p.names,plot.new=F, img=l.img,lmain=paste(gsub("[$]","",p.namez), 'n=',length(p.names)), t.type=t.type, lw=lw, col=lines.color, lns=lns, levs=levs, bcex=1, underline=underline, dat.n=dat.name, zf=zf, sf=sf, values=values),error=function(e) print("You haven't stacked traces yet, yo."))
lines.flag <- 0
#}
}
if(lines.flag==2){
sample.to.display<-as.numeric(select.list(as.character(c(5,10,20,50,70,100))),title='Sample Number?')
tryCatch(dev.off(which=lines.window.2), error=function(e) print("this windows hasn't been opened yet"))
<<<<<<< HEAD:extras/procPharm MAC edits.r
Cairo(pointsize=ps, width=10,height=12)
=======
if(sample.to.display > 20){
windows(width=10,height=12,xpos=0, ypos=250)
}else{
windows(width=10,height=7,xpos=0, ypos=250)
}
>>>>>>> 02c76eaf0507e5b2abfc9b5cea8d86cca953c6ef:procPharm 170210.r
lines.window.2<-dev.cur()
dev.set(which=lines.window.2)
tryCatch(
LinesEvery.5(
dat,
sample(p.names)[1:sample.to.display],
plot.new=F,
lmain=paste("Sample",sample.to.display,"out of",length(p.names)),
img=l.img, lw=lw, t.type=t.type, col="black", lns=lns, levs=levs, bcex=1, underline=underline, dat.n=dat.name, zf=zf,sf=sf, values=values)
,error=function(e) print("You haven't stacked traces yet, yo."))
lines.flag <- 0
}
##Pulse Imputer
if(lines.flag==3){
#dev.off(which=traceimpute.window)
#windows(width=2*length(klevs),height=2,xpos=0, ypos=550)
#traceimpute.window<-dev.cur()
dev.set(which=traceimpute.window)
tryCatch(PulseImputer(dat,cell.pick,levs,sf=sf),error=function(e) print("You haven't stacked traces yet, yo."))
lines.flag <- 0
}
##Pic zoom
if(window.flag==1){
dev.set(which=view.window)
tryCatch(cell.view(dat,cell=p.names, img=img,cols="Yellow",plot.new=F,cell.name=T, lmain=paste(gsub("[$]","",p.namez)), zoom=FALSE),error=function(e) print("You haven't collected cells to view"))
dev.set(which=multipic.window)
tryCatch(multi.pic.zoom(dat,p.names,img, plot.new=F, zf=zf, labs=F) ,error=function(e) print("You haven't collected cells to view"))
window.flag <- 0
}
if(lines.flag==0){
#dev.set(dev.list()[1])
dev.set(which=click.window)
}
#title(sub=paste("Group ",group.i," n=",g.num," Cell ",cell.i,sep=""))
## How many cells are you looking at
text(par("usr")[1], par("usr")[4]+yinch(.5),paste(cell.i, ":",length(cnames)))
#click.i <- identify(x=xs,y=ys,n=1,plot=F)
keyPressed <- readkeygraph("[press any key to continue]")
if(keyPressed=="Up")
{cell.i <- cell.i + 1;if(cell.i>length(cnames)){cell.i<-1};lines.flag<-0}
if(keyPressed=="Down")
{cell.i <- cell.i - 1;if(cell.i<1){cell.i<-length(cnames)};lines.flag<-0}
#a: Assign this will place the cell of interest into the correct cell class
if(keyPressed=='a'){
#need to see if the dat has a cell_types
cellToReassign <- cnames[cell.i]
cat('\nReassigning cell', cellToReassign,'\n')
<<<<<<< HEAD:extras/procPharm MAC edits.r
cellTypeId <- grep('^cell',names(dat), value=T)
if(length(cellTypeId) > 0){
#get only cell types that we want to reassign
cellTypeNames <- names(dat[[cellTypeId]])
cellTypesToNotClean <- c('neurons', 'glia')
=======
#cellTypeId <- grep('^cell',names(dat), value=T)
if(length(cellTypeId) > 0){
#get only cell types that we want to reassign
cellTypeNames <- names(dat[[cellTypeId]])
cellTypesToNotClean <- c('neurons')
>>>>>>> 02c76eaf0507e5b2abfc9b5cea8d86cca953c6ef:procPharm 170210.r
cellTypesToClean <- setdiff(cellTypeNames, cellTypesToNotClean)
#remove it from all groups
dat[[cellTypeId]][cellTypesToClean] <- lapply(dat[[cellTypeId]][cellTypesToClean], function(X) setdiff(X,cellToReassign))
###now that we have indicated that we would like to place this cell into a new group
#First lets find all groups we can assign to\
<<<<<<< HEAD:extras/procPharm MAC edits.r
##bringtotop(-1)
=======
bringToTop(-1)
>>>>>>> 02c76eaf0507e5b2abfc9b5cea8d86cca953c6ef:procPharm 170210.r
cat('\nWhich cell class does this actually belong to?\n')
correctCellClass <- cellTypesToClean[menu(cellTypesToClean)]
print(dat[[cellTypeId]][[correctCellClass]])
dat[[cellTypeId]][[correctCellClass]] <- union(dat[[cellTypeId]][[correctCellClass]], cellToReassign)
print(dat[[cellTypeId]][correctCellClass])
assign(dat.name, dat, envir=.GlobalEnv)
}else{
cat('\nSorry You haven\'t defined cell types yet. Please do this first!\n')
}
}
#c: add cells to g.names
if(keyPressed=="c"){
g.names<-union(g.names,cnames[cell.i]);print(g.names)}
#C: Remove cells from g.names
if(keyPressed=="C"){
g.names<-setdiff(g.names,cnames[cell.i]);print(g.names)}
#d: details for peakfunc
if(keyPressed=="d"){
if(info){info=F}else{info=T}
lines.flag<-1
}
#D: LinesEvery seperation
if(keyPressed=="D"){
##bringtotop(-1)
print("change the zoom factor")
print(paste("This is the current zoom",sf))
sf<-scan(n=1)
if(sf==0){sf<-.001}
lines.flag<-1
}
# #f: New trace fitting for pottassium pulses
# if(keyPressed=="f"){
# lines.flag<-3
# }
#f: Fixes the scoring
#This function will allow you to fix the region you click on
# if(keyPressed === 'f'){
# #first find the middle region of each levs to correct the scoring
# levsMiddle <- tapply( levsMiddle <- tapply(dat$w.dat[,1], as.factor(dat$w.dat$wr1),mean)[levs]
# yLocation <- rep(par('usr')[4] + yinch(.5), length(levsMiddle))
# par(xpd=T)
# points(levsMiddle, yLocation, pch=7))
# identify(levsMiddle, yLocation, n=1)
# par(xpd=F)
# }
# }
#F: New smoothing factor for fit trace
if(keyPressed=="F"){
print("Change the loess smoothing factor")
print(paste("This is the current smoothing",sft))
sft<-scan(n=1)
lines.flag<-3
}
#h: Change the hue/color of the traces
if(keyPressed=="h"){
lines.color<-select.list(c('rainbow','black','brew.pal','topo'))
if(lines.color==''){
lines.color<-'black'
}
lines.flag<-1
}
#i: Select image to display on Stacked Traces
if(keyPressed=="i"){
l.img<-image.selector(dat)
lines.flag<-1
}
#I: image for Multiview
if(keyPressed=="I"){
img<-dat[[image.selector(dat, multi=F)]]
#lines.flag<-1
window.flag<-1
}
#l: choose window region to display on stack trace plot
if(keyPressed=="l"){
#if(lns){lns<-FALSE}else{lns<-TRUE}
levs<-select.list(setdiff(unique(as.character(dat$w.dat[,"wr1"])),""), multiple=T)
if( (levs=="") || identical(levs,character(0)) ){levs<-NULL}#levs<-setdiff(unique(as.character(dat$w.dat$wr1)),"")}
lines.flag<-1
}
#m: Move groups to another group
if(keyPressed=="m"){
<<<<<<< HEAD:extras/procPharm MAC edits.r
##bringtotop(-1)
=======
bringToTop(-1)
>>>>>>> 02c76eaf0507e5b2abfc9b5cea8d86cca953c6ef:procPharm 170210.r
cat("
Select the Group you would like to move
")
gt_to_move<-select.list(names(gt.names), multiple=F)
print(paste("You Selected Group ",gt_to_move))
cat("
Select the Target group to replace
")
gt_to_replace<-select.list(names(gt.names), multiple=F)
print(paste("Group ",gt_to_replace, "was replaced by ", gt_to_move))
gt.names[[gt_to_replace]]<-gt.names[[gt_to_move]]
}
#o: order all cells in a new way
if(keyPressed=="o"){
toMatch<-c("c.dat","bin","scp")
order_dat<-grep(paste(toMatch,collapse="|"),names(dat),value=TRUE)
datfram<-select.list(order_dat,title="Where is the data?")
collumn<-select.list(names(dat[[datfram]]),title="Collumn to sort")
tryCatch(cnames<-c.sort.2(dat[[datfram]],cnames,collumn=collumn),error=function(e) print("Something went wrong try again"))
cell.i<-1
}
#O: order cells in Stacked Traces and multiview
if(keyPressed=="O"){
tryCatch(p.names<-c.sort.2(dat,p.names),error=function(e) print("You have not stacked traces yet."))
lines.flag<-1
window.flag<-1
}
#p: Toggles points on graph
if(keyPressed=="p"){
if(pts){pts<-FALSE}else{pts<-TRUE}
lines.flag<-1
}
#P: Pick a group/cells to click through
if(keyPressed=="P"){
##bringtotop(-1)
print("Pick a Group of cells or a single cell to observe \nIf you Click cancel, all cells will be returned")
selection<-select.list(c("group","cells"))
if(selection=="group"){
gt.to.click<-select.list(names(gt.names), multiple=F)
if( is.null(gt.names[[gt.to.click]]) | is.logical( gt.names[[gt.to.click]]) ){
##bringtotop(-1)
print("Nothing is in this Group")
}else{
cell.i<-1
print(gt.to.click)
cnames<-gt.names[[gt.to.click]]
tryCatch(
cnames<-c.sort.2(dat[[datfram]],cnames,collumn=collumn),
error=function(e) print("Something went wrong try again") )
print(cnames)
}
}
if(selection=="cells"){
cell.i<-1
cnames<-select.list(as.character(dat$c.dat$id), multiple=T)
tryCatch(cnames<-c.sort.2(dat[[datfram]],cnames,collumn=collumn),error=function(e) print("Something went wrong try again"))
}
if(selection==""){
cell.i<-1
cnames<-dat$c.dat$id
}
}
#R: reset group specified
if(keyPressed=="R"){
p.namez<-paste(select.list(names(gt.names)),sep="")
if(p.namez!=""){
print(p.namez)
gt.names[[p.namez]]<-NA
gt.names[[p.namez]]<-gt.names[[p.namez]][ !is.na(gt.names[[p.namez]]) ]
#gt.names[[p.namez]]<-lapply(gt.names[[p.namez]], function(x) x[!is.na(x)])
print(paste("You Emptied", p.namez))
}else{}
}
#r: rename group names
if(keyPressed=="r"){
#bringtotop(-1)
print("Select a group to rename")
gt.to.rename<-select.list(names(gt.names), multiple=F)
name.number<-which(names(gt.names)==gt.to.rename,arr.ind=T)
print("Type in the new name Cannot start with number, no spaces.")
tryCatch(names(gt.names)[name.number]<-scan(n=1, what='character'),error=function(e) print("You did not enter a name, so this group was not renamed"))
#assign(names(gt.names)[name.number],gt.names[[name.number]])
#lines.flag<-1
}
#s: stack selected groups
if(keyPressed=="s"){
p.namez<-paste(select.list(names(gt.names)),sep="")
print(p.namez)
p.names<-gt.names[[p.namez]]
#p.names<-get(ls(pattern=p.namez))
print(p.names)
lines.flag<-1
}
#S: Sample selected groups
if(keyPressed=="S"){
p.namez<-paste(select.list(names(gt.names)),sep="")
print(p.namez)
p.names<-gt.names[[p.namez]]
#p.names<-get(ls(pattern=p.namez))
print(p.names)
lines.flag<-2
}
#t: brings up list of RD file. Select Trace (anything starting with t or mp)
if(keyPressed=="t"){
toMatch<-c("t[.]","blc","snr","mp")
trace_dat<-grep(paste(toMatch,collapse="|"),names(dat),value=TRUE)
t.type1<-t.type
t.type<-select.list(trace_dat)
if(t.type==""){t.type<-t.type1}
lines.flag<-1
}
#u: Underlines the Trace
if(keyPressed=="u"){
if(underline){underline=F}else{underline=T}
lines.flag<-1
}
#v: Show where cells are located and give zoomed in view
if(keyPressed=="v"){
p.namez<-paste(select.list(names(gt.names)),sep="")
print(p.namez)
p.names<-gt.names[[p.namez]]
print(p.names)
window.flag<-1
}
#V: choose cell info to display on traces
if(keyPressed=="V"){
#if(lns){lns<-FALSE}else{lns<-TRUE}
values<-select.list(names(dat$c.dat), multiple=T)
lines.flag<-1
}
#w: Change Line Width on plot
if(keyPressed=="w"){
#bringtotop(-1)
print("change the line width (lw) for LinesEvery")
print(paste("This is the current lw",lw))
lw<-scan(n=1)
lines.flag<-1
}
#x: Drop cell
if(keyPressed=="x")
{
print(cnames[cell.i])
dat$bin[cnames[cell.i], "drop"]<-1
print(dat$bin[cnames[cell.i], "drop"])
print(paste("You Dropped Cell",cnames[cell.i]))
# now that you have dropped a cell, this need to be removed from
# cell types
cellTypeId <- grep('^cell',names(dat), value=T)
if(length(cellTypeId) > 0){
<<<<<<< HEAD:extras/procPharm MAC edits.r
drops <- row.names(dat$bin[dat$bin$drop,])
=======
drops <- row.names(dat$bin[dat$bin$drop==1,])
print(drops)
>>>>>>> 02c76eaf0507e5b2abfc9b5cea8d86cca953c6ef:procPharm 170210.r
dat[[cellTypeId]] <- lapply(dat[[cellTypeId]], function(X) setdiff(X,drops))
assign(dat.name,dat, envir=.GlobalEnv)
}else{assign(dat.name,dat, envir=.GlobalEnv)}
}
#X: undrop cell
if(keyPressed=="X")
{
print(cnames[cell.i])
dat$bin[cnames[cell.i], "drop"]<-0
print(dat$bin[cnames[cell.i], "drop"])
print(paste("You Dropped Cell",cnames[cell.i]))
}
#y: Zoom yaxis automatically
if(keyPressed=="y"){
if(yvar){yvar<-FALSE}else{yvar<-TRUE}
}
#z: image zoom
if(keyPressed=="z"){
<<<<<<< HEAD:extras/procPharm MAC edits.r
#bringtotop(-1)
=======
bringToTop(-1)
>>>>>>> 02c76eaf0507e5b2abfc9b5cea8d86cca953c6ef:procPharm 170210.r
print("change the zoom factor")
print(paste("This is the current zoom",zf))
zf<-scan(n=1)
lines.flag<-1
window.flag<-1
}
#if(keyPressed=="k")
#{
# dat$bin[cnames[cell.i], "drop"]<-0
# print(paste("You Dropped Cell",cnames[cell.i]))
#}
#F1: Simple bp.selector. Create the statistic labeled on the plot. The localize question
#allows you to click the boxplot to select a subset of cells to observe
if(keyPressed=="F1"){
#first open a new window
#after undergoing a logical test to see if it exists
if(length(ls(pattern='bp.selector.window'))==0){
dev.new(width=14, height=8)
#give this window a name
bp.selector.window<-dev.cur()
}else{}
#give the focus to the new window
dev.set(bp.selector.window)
#empty gt.names[[12]]
gt.names[[12]]<-NA
#remove the NA, which will be repalced with a logical(0)
gt.names[[12]]<-lapply(gt.names[[12]], function(x) x[!is.na(x)])
#do the function bp.selector to gather data
#bringtotop(-1)
cat("This function allows you to create statistics based on the statistic you select.
\n This Function finds a represention of peak amplification and or block
\n This function will take in what ever you are currently scrolling through
\n You have the option to localize your boxplot. This means, select cells
\n specifically based on where you click on the boxplot. Two clicks means you need
\n to specigy the lower range followed by the upper range.
\n One click will take everything greater than your click
\n The Other option that will arise is, would you like the save the stat.
\n If you do, the console will prompt you to enter a name. Ensure no spaces in the name
\n The next option will be whether you would like to make another statistic.")
cat(" \n Would you like to localize your boxplot? \n")
print("T=yes, F=no")
localize_log<-scan(n=1,what="character")
print(localize_log != "T")
if(localize_log != "T"){localize_log<-"F"}
print(cnames[cell.i])
dev.set(bp.selector.window)
gt.names[[12]]<-bp.selector(dat,cnames[cell.i],cnames,plot.new=F,dat.name=NULL,env=environment(),localize=localize_log)
#Now fill TCD with the cells just selected.
cnames<-gt.names[[12]]
cell.i<-1
lines.flag<-1
windows.flag<-1
}
#F2: Advanced Statistic maker This function uses the function (After-Before)/(After+Before)
#this function allows you to save the stat. This will be added to the scp dataframe at the bottom.
#if you have created statistics, be sure to save your RD file before you close
if(keyPressed=="F2"){
#first open a new window
#after undergoing a logical test to see if it exists
if(length(ls(pattern='bp.selector.window'))==0){
dev.new(width=14, height=8)
#give this window a name
bp.selector.window<-dev.cur()
}else{}
#give the focus to the new window
dev.set(bp.selector.window)
#empty gt.names[[12]]
gt.names[[12]]<-NA
#remove the NA, which will be repalced with a logical(0)
gt.names[[12]]<-lapply(gt.names[[12]], function(x) x[!is.na(x)])
#do the function bp.selector to gather data
#bringtotop(-1)
cat("This function allows you to create statistics based on the statistic you select.
\n This Function finds a represention of peak amplification and or block
\n This function will take in what ever you are currently scrolling through
\n You have the option to localize your boxplot. This means, select cells
\n specifically based on where you click on the boxplot. Two clicks means you need
\n to specigy the lower range followed by the upper range.
\n One click will take everything greater than your click
\n The Other option that will arise is, would you like the save the stat.
\n If you do, the console will prompt you to enter a name. Ensure no spaces in the name
\n The next option will be whether you would like to make another statistic.")
cat(" \n Would you like to localize your boxplot? \n")
print("T=yes, F=no")
localize_log<-scan(n=1,what="character")
print(localize_log != "T")
if( length(localize_log) == 0 ){
localize_log<-"F"
}else{
if(localize_log != "T"){
localize_log<-"F"
}
}
print(cnames[cell.i])
dev.set(bp.selector.window)
gt.names[[12]]<-bp.selector.advanced(dat,cnames[cell.i],cnames,plot.new=F,dat.name=NULL,env=environment(),localize=localize_log)
#Now fill TCD with the cells just selected.
cnames<-gt.names[[12]]
cell.i<-1
lines.flag<-1
windows.flag<-1
}
#F3: Plotting the Density plots. There are many options for this plot
if(keyPressed=="F3"){
if(length(ls(pattern="density_win"))==0){
dev.new(width=10,height=10)
density_win<-dev.cur()
}else{}
#bringtotop(-1)
cat("What dataframe wil contain your stat? \n")
dense_df_q<-select.list(names(dat))
cat("What attribute would you like to see the distribution? \n")
dense_df_att<-menu(names(dat[[dense_df_q]]))
statz<-dat[[dense_df_q]][dense_df_att]
#define the top xlim value
cat("Define Top xlim value \n")
cat("Enter 0 to allow default Max value \n")
xlim_top<-scan(n=1)
if(xlim_top==0){
xlim_top<-max(dat[[dense_df_q]][dense_df_att])
}
cat("Define bottom xlim value \n")
xlim_bottom<-scan(n=1)
if(xlim_bottom==0){
xlim_bottom<-min(dat[[dense_df_q]][dense_df_att])
}
dev.set(density_win)
density_ct_plotter(dat,g.names,cell_types=NULL, stat=statz,overlay=T, dense_sep=TRUE,plot_new=F,xlim_top=xlim_top,xlim_bottom=xlim_bottom,dat.name=dat.name)
lines.flag<-1
}
#F4: Utilizing Topview
if(keyPressed=="F4"){
p.namez<-paste(select.list(names(gt.names)),sep="")
p.names<-gt.names[[p.namez]]
aux_var<-c('area')
#What i need to do is selectively import gfp and tritc variables into the
#topview function
#this means search in the bin data frame for ib4 and gfp
add_vars <- grep('mcherry|cy5|gfp|drop', names(dat$bin),value=T)
aux_var<-c(aux_var, add_vars)
TopView(dat, p.names, 12, 6, dat_name=dat.name, aux.var=aux_var)
}
#F5: Censusus Viewer
if(keyPressed=="F5"){
cnames_orig <- cnames
cells_to_view <- census_viewer(dat)
if( is.na(cells_to_view) ){
cnames <- cnames_orig
cat(
"There were no cells in that selection"
)
}else{
cell.i<-1
cnames <- cells_to_view$cells
gt.names[[12]] <- cells_to_view$cells
names(gt.names)[12] <- cells_to_view$name
p.namez <- cells_to_view$name
p.names <- gt.names[[12]]
lines.flag<-1
}
}
#F6: Censusus Viewer
if(keyPressed=="F6"){
cnames_orig <- cnames
cat("Please select the collumn you would like to view\n")
cells_to_view <- cellzand_tcd(dat$bin)
if( is.na(cells_to_view) ){
cnames <- cnames_orig
cat(
"There were no cells in that selection"
)
}else{
cell.i<-1
cnames <- cells_to_view$cells
gt.names[[12]] <- cells_to_view$cells
names(gt.names)[12] <- cells_to_view$name
if(length(cells_to_view$cells[1]) > 20 ){
p.namez <- cells_to_view$name
p.names <- gt.names[[12]]
lines.flag<-1
}
}
}
#F7: Load cell Types into the groups to pick with 'P'
if(keyPressed=='F7') {
cellTypeId <- grep('^cell',names(dat), value=T)
if(length(cellTypeId)>0){
<<<<<<< HEAD:extras/procPharm MAC edits.r
#bringtotop(-1)
print("\nI have filled in your cell_types to choose by pressing \'P\' ENJOY!\n")
=======
if(length(cellTypeId)>1){
bringToTop(-1)
cat('\n Select the cell type to load in \n')
cellTypeId <- select.list(cellTypeId, title="Select Cell Type")
}
bringToTop(-1)
cat("\nI have filled in your cell_types to choose by pressing \'P\' ENJOY!\n")
>>>>>>> 02c76eaf0507e5b2abfc9b5cea8d86cca953c6ef:procPharm 170210.r
gt.names <- list()
for(i in 1:length(dat[[cellTypeId]])){
#Fill in the gt.names with each cell type
gt.names[[ names(dat[[cellTypeId]][i]) ]]<-dat[[cellTypeId]][[i]]
}
}else{
cat('\nSorry you haven\'t defined cell types yet, so i can\'t fill it it for you.\n')
}
}
if(keyPressed=="1")
{gt.names[[1]]<-union(gt.names[[1]],cnames[cell.i]);print(gt.names[1])}
if(keyPressed=="!")
{gt.names[[1]]<-setdiff(gt.names[[1]],cnames[cell.i]);print(gt.names[1])}
if(keyPressed=="2")
{gt.names[[2]]<-union(gt.names[[2]],cnames[cell.i]);print(gt.names[2])}
if(keyPressed=="@")
{gt.names[[2]]<-setdiff(gt.names[[2]],cnames[cell.i]);print(gt.names[2])}
if(keyPressed=="3")
{gt.names[[3]]<-union(gt.names[[3]],cnames[cell.i]);print(gt.names[3])}
if(keyPressed=="#")
{gt.names[[3]]<-setdiff(gt.names[[3]],cnames[cell.i]);print(gt.names[3])}
if(keyPressed=="4")
{gt.names[[4]]<-union(gt.names[[4]],cnames[cell.i]);print(gt.names[4])}
if(keyPressed=="$")
{gt.names[[4]]<-setdiff(gt.names[[4]],cnames[cell.i]);print(gt.names[4])}
if(keyPressed=="5")
{gt.names[[5]]<-union(gt.names[[5]],cnames[cell.i]);print(gt.names[5])}
if(keyPressed=="%")
{gt.names[[5]]<-setdiff(gt.names[[5]],cnames[cell.i]);print(gt.names[5])}
if(keyPressed=="6")
{gt.names[[6]]<-union(gt.names[[6]],cnames[cell.i]);print(gt.names[6])}
if(keyPressed=="^")
{gt.names[[6]]<-setdiff(gt.names[[6]],cnames[cell.i]);print(gt.names[6])}
if(keyPressed=="7")
{gt.names[[7]]<-union(gt.names[[7]],cnames[cell.i]);print(gt.names[7])}
if(keyPressed=="&")
{gt.names[[7]]<-setdiff(gt.names[[7]],cnames[cell.i]);print(gt.names[7])}
if(keyPressed=="8")
{gt.names[[8]]<-union(gt.names[[8]],cnames[cell.i]);print(gt.names[8])}
if(keyPressed=="*")
{gt.names[[8]]<-setdiff(gt.names[[8]],cnames[cell.i]);print(gt.names[8])}
if(keyPressed=="9")
{gt.names[[9]]<-union(gt.names[[9]],cnames[cell.i]);print(gt.names[9])}
if(keyPressed=="(")
{gt.names[[9]]<-setdiff(gt.names[[9]],cnames[cell.i]);print(gt.names[9])}
if(keyPressed=="0")
{gt.names[[10]]<-union(gt.names[[10]],cnames[cell.i]);print(gt.names[10])}
if(keyPressed==")")
{gt.names[[10]]<-setdiff(gt.names[[10]],cnames[cell.i]);print(gt.names[10])}
if(keyPressed=="-")
{gt.names[[11]]<-union(gt.names[[11]],cnames[cell.i]);print(gt.names[11])}
if(keyPressed=="_")
{gt.names[[11]]<-setdiff(gt.names[[11]],cnames[cell.i]);print(gt.names[11])}
if(keyPressed=="=")
{gt.names[[12]]<-union(gt.names[[12]],cnames[cell.i]);print(gt.names[12])}
if(keyPressed=="+")
{gt.names[[12]]<-setdiff(gt.names[[12]],cnames[cell.i]);print(gt.names[12])}
BACKUP<<-gt.names
if(keyPressed=="q")
{
#graphics.off()
dev.off(which=click.window)
dev.off(which=lines.window)
tryCatch(dev.off(which=lines.window.2), error=function(e) print("this windows hasn't been opened yet"))
dev.off(which=view.window)
dev.off(which=multipic.window)
dev.off(which=traceimpute.window)
}
}
#rd.name <- as.character(substitute(dat))
#print(rd.name)
#assign(rd.name, dat, envir=.GlobalEnv)
#gt.names<-list(g.names1=g.names1, g.names2=g.names2, g.names3=g.names3, g.names4=g.names4, g.names5=g.names5, g.names6=g.names6, g.names7=g.names7, g.names8=g.names8,g.names9=g.names9, g.names10=g.names10, g.names11=g.names11, g.names12=g.names12, g.names=g.names)
BACKUP<<-gt.names
assign(dat.name,dat, envir=.GlobalEnv)
#bringtotop(-1)
if(save_question){
print('Would y ou like to save you cell groups?')
selection<-select.list(c('no','yes'),title='Save Groups?')
if(selection=='yes'){
print("Write in your name")
save.names <- scan(n=1, what='character')
save_label <- save.names
assign(save.names, gt.names, envir = .GlobalEnv)
assign(save.names , gt.names)
save(list = save.names ,file=paste(save_label,'.Rdata',sep=''))
gt.names<<-gt.names
}else{
gt.names<<-gt.names
return(gt.names)
}
}else{
gt.names<<-gt.names
return(gt.names)
}
#print(rd.name)
}
#create a trace.click that allows for scoring while clicking
Trace.Click.repair<-function(dat, cells=NULL,img=dat$img1, yvar=FALSE, t.type="t.dat", plot.new=F, info=T, bcex=1, save.bp=F,view.cells=F){
if(plot.new){graphics.off()}
dev.new(width=14,height=4)
click.window<-dev.cur()
dev.new(width=10,height=6)
lines.window<-dev.cur()
dev.new(width=8, height=8)
view.window<-dev.cur()
if(is.null(cells)){cnames <- names(dat$t.dat[,-1])}
else{cnames<-cells}
lines.flag <- 0
cell.i <- 1
g.names<-NULL
click.i <- 1
#group.names<-NULL
while(click.i!=7)
{
cell.pick <- cnames[cell.i]
#dev.set(dev.list()[1])
dev.set(which=click.window)
p1 <- PeakFunc6(dat,cell.pick, t.type=t.type,yvar=yvar, info=info, bcex=bcex)
p1.par<-par()
if(lines.flag==1){
#dev.set(dev.list()[2])
dev.set(which=lines.window)
LinesEvery.5(dat,g.names,plot.new=F, img=img, t.type=t.type, col="black")
lines.flag <- 0
}
if(lines.flag==2){
#dev.set(dev.list()[3])
dev.set(which=view.window)
cell.view(dat,cell=cell.pick, img=img,cols="red",plot.new=F,cell.name=T, zoom=FALSE)
lines.flag <- 0
}
if(lines.flag==0){
#dev.set(dev.list()[1])
dev.set(which=click.window)
}
#title(sub=paste("Group ",group.i," n=",g.num," Cell ",cell.i,sep=""))
xs<- rep(par("usr")[1]-xinch(.5), 7)
ys<-seq(par("usr")[4],by=-yinch(.2), length.out=7)
points(x=xs,y=ys,pch=16)
text(x=xs,y=ys,labels=c("Cell +","Cell -","Veiw","Stack","yvar","Select Trace","off"),pos=2,cex=.5)
## How many cells are you looking at
text(par("usr")[1], par("usr")[4]+yinch(.3),paste(cell.i, ":",length(cnames)))
click.i <- identify(x=xs,y=ys,n=1,plot=F)
if(click.i==1)
{cell.i <- cell.i + 1;if(cell.i>length(cnames)){cell.i<-1};lines.flag<-0}
if(click.i==2)
{cell.i <- cell.i - 1;if(cell.i<1){cell.i<-length(cnames)};lines.flag<-0}
if(click.i==3)
{lines.flag<-2}
if(click.i==4)
{g.names<-union(g.names,cnames[cell.i]);lines.flag<-1}
if(click.i==5){
if(yvar){yvar<-FALSE}else{yvar<-TRUE}
}
if(click.i==6){
t.type<-select.list(names(dat))
}
if(click.i==7){
#graphics.off()
dev.off(which=click.window)
dev.off(which=lines.window)
dev.off(which=view.window)}
}
print(g.names)
}
bp.selector<-function(dat,cell=NULL,cells=NULL,dat.name=NULL,plot.new=T,save.bp=F,view.cells=F, env=NULL, localize=T){
print(environment())
if(is.null(env)){
env<-.GlobalEnv
}else{env<-env}
if(is.null(dat.name)){
dat.name<-deparse(substitute(dat))
}else{dat.name<-dat.name}
#grab the RD name from the RD
if(is.null(dat.name)){
dat.name<-deparse(substitute(dat))
}else{dat.name<-dat.name}
#Make sure you have some type of cells
if(is.null(cells)){
cells<-dat$c.dat$id
}else{cells<-cells}
#Choose a cell to display fro selecting stats
if(is.null(cell)){
cell<-dat$c.dat[1,'id']
}else{cell<-cell}
###################################################################
#This region needs significant work to improve to all data aspects
###################################################################
## Selcet eith Area or Peak Height
type<-select.list(c("Peak Height", "Area"), multiple=F, title="Parameter?")
if(type=="Peak Height"){type<-".max"
}else{type<-".tot"}
#Find the window regions
levs<-setdiff(unique(as.character(dat$w.dat[,"wr1"])),"")
#Find the middle region of the windows
levs.mean<-sort(tapply(dat$t.dat[,"Time"], as.factor(dat$w.dat$wr1), mean))
#clean up the levs
levs<-setdiff(names(levs.mean),"")
#not sre
levs.mean<-levs.mean[levs]
#regional asignment for window region labeling
#ys<-rep(1.05*(max(dat$t.dat[,"X.1"])), length(levs))
#Create a new plot
if(plot.new){
dev.new(width=14, height=8)
}else{}
#Define the layout of the window region
layout(matrix(c(1,1,2,3), 2, 2, byrow = TRUE))
par(bg="gray90")
#define the open window
peakfunc.window<-dev.cur()
#plot the trace specified at the beigning
PeakFunc7(dat,cell, lmain=" ",bcex=bcex, info=F)
title(expression("RED"* phantom("/BLUE")), col.main="red")
title(expression(phantom("RED/")*"BLUE"),col.main="blue")
title(expression(phantom("RED")*"/"*phantom("BLUE")),col.main="black")
# add point to the plot to define buttons
ys<-rep(par("usr")[3],length(levs))
points(levs.mean, ys, pch=16, cex=2)
#label each point with levs text
#text(levs.mean,ys,labels=names(levs.mean),pos=c(1,3),cex=1, srt=90)
###Selecting Control Windows
#bringtotop(-1)
cat("Choose one or more window regions for the denominator in the equations,
Amplification-or-block = active.window / control.window
CLICK LARGE BLACK DOTS to select
Click stop in the top left.
"
)
#Select windows to define numerator
controlwindows <- identify(x=levs.mean,y=ys,labels="X",plot=T, col="red", cex=1.5)
#collect the names of what you have selected
controlwindows<- levs[controlwindows]
###Selecting Active Windows
#bringtotop(-1)
cat("Choose one or more window regions for the numerator in the equations,
Amplification-or-block = active.window / control.window
Click stop in the top left, and then STOP LOCATOR from the drop down
"
)
#change focus back to the peakwindow for active window selection
dev.set(peakfunc.window)
activewindows <- identify(x=levs.mean,y=ys,labels="X",plot=T, col="blue",cex=1.5)
activewindows<-levs[activewindows]
#now if there are multiple control windows selected,
if(length(controlwindows)>1){
#create the name for scp collumn lookup
controlmax<-paste(controlwindows, type, sep="")
#add that name to scp, and do a row mean
controlmaxmean<-data.frame(rowMeans(dat$scp[controlmax]))
}else{
controlmax<-paste(controlwindows, type, sep="")
controlmaxmean<-dat$scp[controlmax]
}
#same as above!
if(length(activewindows)>1){
activemax<-paste(activewindows, type, sep="")
activemaxmean<-data.frame(rowMeans(dat$scp[activemax]))
}else{
activemax<-paste(activewindows, type, sep="")
activemaxmean<-dat$scp[activemax]
}
max_amp_mean<-activemaxmean/controlmaxmean
max_amp_mean[,2]<-seq(from=1,to=dim(max_amp_mean)[1],by=1)
max_amp_mean_cells<-data.frame(activemaxmean[cells,])/data.frame(controlmaxmean[cells,])
max_amp_mean_cells[,2]<-seq(from=1,to=dim(max_amp_mean_cells)[1],by=1)
row.names(max_amp_mean_cells)<-cells
# Calculate percent change and select for cells
print("Would you like to save this statistic to scp?")
save_stat_op<-select.list(c("yes","no"), title="Save Stat?")
if(save_stat_op=="yes"){
print("Enter the name of the statistic to be added to scp")
stat.name<-scan(n=1, what='character')
dat$scp[stat.name]<-max_amp_mean
assign(dat.name,dat, envir=env)
}
density_ct_plotter(dat, cells, NULL, max_amp_mean[1],xlim_top=3,xlim_bottom=0, overlay=T,dense_sep=F,plot_new=F)
#dev.new(width=15, height=5)
#par(mfrow=c(1,2), bty="l")
#hist(max_amp_mean_cells[,1], breaks=length(max_amp_mean_cells[,1])/2, xlim=c(0,2))
boxplot(max_amp_mean_cells[,1], outline=F, ylim=c(0,2),width=10, lty=1, lwd=2, main=paste(activewindows,"Amplification Cutoff"), ylab="Active.Max/Control.Max", horizontal=T)
text(
jitter(
rep(
1,
length(max_amp_mean_cells[,1])
),10
)~max_amp_mean_cells[,1],
labels=row.names(max_amp_mean_cells),
cex=.5,
col=rgb(1,1,1,4, maxColorValue=10)
)#,ylim=c(0,2.5), add=T, vertical=T, method="jitter", jitter=.2)
#170131 adding 2 point localization
if(localize=="T"){
selector<-select.list(c("one", "two"), title="Left side first!")
if(selector=="one"){loc<-locator(n=1, type="p", pch=15, col="red")}
if(selector=="two"){loc<-locator(n=2, type="p", pch=15, col="red")}
abline(v=loc$x,col="red")
if(length(loc$x)==1){
x.names<-row.names(which(max_amp_mean[1]>loc$x, arr.ind=T, useNames=T))
x.names<-row.names(max_amp_mean[order(max_amp_mean[,1],decreasing=T),])
}
if(length(loc$x)==2){
x.names<-which(max_amp_mean[1]>loc$x[1] & max_amp_mean[1]<loc$x[2], arr.ind=T,useNames=T)
x.names<-row.names(max_amp_mean[order(max_amp_mean[,1],decreasing=T),])
}
}else{
x.names<-row.names(max_amp_mean_cells[order(max_amp_mean_cells[,1],decreasing=T),])
print(x.names)
}
if(view.cells){
continue<-select.list(c("Yes", "No"), multiple=F, title="View Selected Cells?")
}else{continue<-"No"}
if(continue=="Yes"){
print(length(x.names))
#graphics.off()
real.cells<-tcd(dat, x.names,dat.name=dat.name)
return(real.cells)
}else{
return(x.names)
}
}
bp.selector.advanced<-function(dat,cell=NULL,cells=NULL,dat.name=NULL,plot.new=T,save.bp=F,view.cells=F, env=NULL, localize=T){
if(is.null(env)){
env<-.GlobalEnv
}else{env<-env}
if(is.null(dat.name)){
dat.name<-deparse(substitute(dat))
}else{dat.name<-dat.name}
#grab the RD name from the RD
if(is.null(dat.name)){
dat.name<-deparse(substitute(dat))
}else{dat.name<-dat.name}
#Make sure you have some type of cells
if(is.null(cells)){
cells<-dat$c.dat$id
}else{cells<-cells}
#Choose a cell to display fro selecting stats
if(is.null(cell)){
cell<-dat$c.dat[1,'id']
}else{cell<-cell}
###################################################################
#This region needs significant work to improve to all data aspects
###################################################################
## Selcet eith Area or Peak Height
type<-select.list(c("Peak Height", "Area"), multiple=F, title="Parameter?")
if(type=="Peak Height"){type<-".max"
}else{type<-".tot"}
#Find the window regions
levs<-setdiff(unique(as.character(dat$w.dat[,"wr1"])),"")
#Find the middle region of the windows
levs.mean<-sort(tapply(dat$t.dat[,"Time"], as.factor(dat$w.dat$wr1), mean))
#clean up the levs
levs<-setdiff(names(levs.mean),"")
#not sre
levs.mean<-levs.mean[levs]
#regional asignment for window region labeling
#ys<-rep(1.05*(max(dat$t.dat[,"X.1"])), length(levs))
#Create a new plot
if(plot.new){
dev.new(width=14, height=8)
}else{}
#Define the layout of the window region
layout(matrix(c(1,1,2,3), 2, 2, byrow = TRUE))
par(bg="gray90")
#define the open window
peakfunc.window<-dev.cur()
#plot the trace specified at the beigning
PeakFunc7(dat,cell, lmain=" ",bcex=bcex, info=F)
title("(After-Before)/(After+Before)")
# add point to the plot to define buttons
ys<-rep(par("usr")[3],length(levs))
points(levs.mean, ys, pch=16, cex=2)
#label each point with levs text
#text(levs.mean,ys,labels=names(levs.mean),pos=c(1,3),cex=1, srt=90)
continue<-"yes"
while(continue=="yes"){
###Selecting Control Windows
#bringtotop(-1)
cat("
Choose the Pulse Following the compound of interest.
This is the AFTER pulse
CLICK LARGE BLACK DOTS to select
You Only Get one shot.
CLICK ANY KEY TO CONTINUE
"
)
scan(n=1)
#Select windows to define numerator
afterwindows <- identify(x=levs.mean,y=ys,labels="X",plot=T, col="red", cex=1.5,n=1)
#collect the names of what you have selected
afterwindows<- levs[afterwindows]
###Selecting Active Windows
#bringtotop(-1)
cat("
###############################################
Choose the Pulse Before the compound of interest.
This is the BEFORE pulse
You only get one click.
PRESS ANY KEY TO CONTINUE
"
)
scan(n=1)
#change focus back to the peakwindow for active window selection
dev.set(peakfunc.window)
beforewindows <- identify(x=levs.mean,y=ys,labels="X",plot=T, col="blue",cex=1.5,n=1)
beforewindows<-levs[beforewindows]
#Find the scp collumn to provide the best stat
aftermax<-paste(afterwindows, type, sep="")
aftermaxmean<-dat$scp[aftermax]
beforemax<-paste(beforewindows, type, sep="")
beforemaxmean<-dat$scp[beforemax]
max_amp_mean<-(aftermaxmean-beforemaxmean)/(aftermaxmean+beforemaxmean)
max_amp_mean[,2]<-seq(from=1,to=dim(max_amp_mean)[1],by=1)
max_amp_mean_cells<-(
( data.frame(aftermaxmean[cells,])-data.frame(beforemaxmean[cells,]) )/
( data.frame(aftermaxmean[cells,])+data.frame(beforemaxmean[cells,]) ) )
max_amp_mean_cells[,2]<-seq(from=1,to=dim(max_amp_mean_cells)[1],by=1)
row.names(max_amp_mean_cells)<-cells
# Calculate percent change and select for cells
cat("Would you like to save this statistic to scp? \n")
save_stat_op<-select.list(c("yes","no"), title="Save Stat?")
if(save_stat_op=="yes"){
cat("Enter the name of the statistic to be added to scp \n")
stat.name<-scan(n=1, what='character')
dat$scp[stat.name]<-max_amp_mean
assign(dat.name,dat, envir=env)
}
cat("
Make another stat?")
continue<-select.list(c("yes","no"))
}
density_ct_plotter(dat, cells, NULL, max_amp_mean[1],xlim_bottom=-1,xlim_top=1, overlay=T,dense_sep=F,plot_new=F)
#dev.new(width=15, height=5)
#par(mfrow=c(1,2), bty="l")
#hist(max_amp_mean_cells[,1], breaks=length(max_amp_mean_cells[,1])/2, xlim=c(0,2))
boxplot(max_amp_mean_cells[,1], outline=F, ylim=c(-1,1),width=10, lty=1, lwd=2, main="Amplification Cutoff", ylab="Active.Max/Control.Max", horizontal=T)
text(
jitter(
rep(
1,
length(max_amp_mean_cells[,1])
),10
)~max_amp_mean_cells[,1],
labels=row.names(max_amp_mean_cells),
cex=.5,
col=rgb(1,1,1,4, maxColorValue=10)
)#,ylim=c(0,2.5), add=T, vertical=T, method="jitter", jitter=.2)
#170131 adding 2 point localization
if(localize){
selector<-select.list(c("one", "two"), title="Left side first!")
if(selector=="one"){loc<-locator(n=1, type="p", pch=15, col="red")}
if(selector=="two"){loc<-locator(n=2, type="p", pch=15, col="red")}
abline(v=loc$x,col="red")
if(length(loc$x)==1){
#now we need to
#1.select cells based on the first click on the boxplot graphic
x.names<-row.names(which(max_amp_mean_cells[1]>loc$x, arr.ind=T, useNames=T))
#now that we have fouynd the cells which respond in these ways we will
#sort the dataframe based on these stats
new_max_amp_mean_cells<-max_amp_mean_cells[x.names,]
x.names<-row.names(new_max_amp_mean_cells[order(new_max_amp_mean_cells[1], decreasing=T),])
}
if(length(loc$x)==2){
x.names<-row.names(which(max_amp_mean_cells[1]>loc$x[1] & max_amp_mean_cells[1]<loc$x[2], arr.ind=T,useNames=T))
new_max_amp_mean_cells<-max_amp_mean_cells[x.names,]
x.names<-row.names(new_max_amp_mean_cells[order(new_max_amp_mean_cells[1], decreasing=T),])
}
}else{
x.names<-row.names(max_amp_mean_cells[order(max_amp_mean_cells[,1],decreasing=T),])
print(x.names)
}
if(view.cells){
continue<-select.list(c("Yes", "No"), multiple=F, title="View Selected Cells?")
}else{continue<-"No"}
if(continue=="Yes"){
print(length(x.names))
#graphics.off()
real.cells<-tcd(dat, x.names,dat.name=dat.name)
return(real.cells)
}else{
return(x.names)
}
}
#This function plots the stat(data.frame form) of all cells, and individual
#cell type densities.
#dat: RD data
#cells: Total group
#cell_types: How to seperate the groups
#stat: premade statistic in data.frame formate where row names are cell.names
#xlim_top: this is the maximun xlim value to display
#xlim_bottom: this is the minimun xlim value to display
#overlay: will plot the density plot ontop of the cells density plot
#dens_sep: This will plot out the densitys on seperate plots
#plot_new: Will create a new window for this plot
#abline_loc: where to display the added line to help display data better
density_ct_plotter<-function(dat, cells, cell_types,stat=dat$c.dat["area"],xlim_top=NULL, xlim_bottom=NULL,overlay=T,dense_sep=T,plot_new=T,env=NULL,dat.name=NULL, abline_loc=0){
par(xpd=F)
if(is.null(dat.name)){
dat.name<-deparse(substitute(dat))
}else{dat.name<-dat.name}
if(plot_new & dense_sep){
dev.new(width=10,height=10)
density_window<-dev.cur()
#density_plot<-dev.cur()
}
if(plot_new & dense_sep==F){
dev.new(width=5,height=5)
density_window<-dev.cur()
#density_plot<-dev.cur()
}
#Now add a density plot per cell type to show the distribution of cell type effects
require(RColorBrewer)
color<-brewer.pal(8,"Dark2")
color<-rep(color,10)
#color<-sample(rainbow(length(cell_types),start=.2, end=.85))
all.cells.density<-density(stat[,1])
#Overlay plot used in bp.selector
if(is.null(cell_types)){
if(!is.null(dat$cell_types)){
cell_types<-dat$cell_types
}else{
overlay=F
dense_sep=F
}
#perform a logical test to determine whether to plot the cells
#selected_cell_types<-list()
#for(i in 1:length(cell_types)){
# print(paste(names(cell_types)[i],"=",length(cell_types[[i]])))
# if(length(cell_types[[i]])>10){
# selected_cell_types<-append(selected_cell_types,cell_types[i])
# }
#}
#cell_types<-selected_cell_types
}else{
#bringtotop(-1)
print("which Cell Types would you like to view on the plotter")
selected_cell_types<-select.list(names(cell_types), multiple=T)
cell_types<-cell_types[selected_cell_types]
}
if(dense_sep==T){
plot_sep<-ceiling(sqrt(length(cell_types)+1))
par(mfrow=c(plot_sep,plot_sep),mai=c(.25,.25,.25,.25))
}
if(is.null(xlim_top)){
xlim_top<-max(stat[,1])
}else{xlim_top<-xlim_top}
if(is.null(xlim_bottom)){
xlim_bottom<-min(stat[,1])
}else{xlim_bottom<-xlim_bottom}
density_window<-dev.cur()
xlim<-c(xlim_bottom,xlim_top)
dev.set(density_window)
plot(
all.cells.density,
xlim=xlim,
ylim=c(0,max(all.cells.density$y)*1.5),
pch="",lwd=3, col="black",
main=names(stat)
)
polygon(all.cells.density,col="red",lwd=1)
#Provide density plots with lines overla
if(overlay==T){
for(i in 1:length(cell_types)){
if(length(cell_types[[i]])>2){
cell_type_density<-density(stat[cell_types[[i]],])
lines(cell_type_density, col="black", lwd=5)
lines(cell_type_density, col=color[i], lwd=2)
}
}
legend("topleft",legend=names(cell_types), fill=color, cex=.6,box.col="Black")
}
if(dense_sep){
par(xpd=T)
for(i in 1:length(cell_types)){
if(length(cell_types[[i]])>2){
cell_type_density<-density(stat[cell_types[[i]],])
plot(
cell_type_density, col="black", lwd=5,
xlim=xlim,
ylim=c(0,max(all.cells.density$y)*1.5),
main=paste(names(cell_types[i])," n=",length(cell_types[[i]])),
bty="l"
)
abline(v=abline_loc,col="red")
lines(cell_type_density, col=color[i], lwd=2)
}else{
plot(0,0,pch="",main=paste(names(cell_types[i])," n=",length(cell_types[[i]])),bty="l")
}
}
plot(0,0,main=NULL,xlab=NULL,ylab=NULL,xaxt=NULL,yaxt=NULL,bty="n",pch="")
#legend("topleft",legend=names(cell_types), fill=color, cex=.8,bg="gray70")
text(0+xinch(.2),0,dat.name, cex=1.1)
}
}
#Repairs score from levs only
# Uses peakfunc5
bin.repair<-function(dat, n.names=NULL){
if(is.null(n.names)){n.names<-names(dat$t.dat[,-1])}
cell.i<-1
cell<-n.names[cell.i]
levs<-setdiff(unique(as.character(dat$w.dat[,"wr1"])),"")
levs.mean<-sort(tapply(dat$t.dat[,"Time"], as.factor(dat$w.dat$wr1), mean))
levs<-setdiff(names(levs.mean),"")
levs.mean<-levs.mean[levs]
xs <- c(levs.mean,rep(dat$t.dat[50,"Time"],4))
ys<-c(rep(1.4, length(levs.mean)),1.2, 1.1, 1.0, 0.9)
dev.new(width=14, height=5)
dev.set(dev.list()[1])
PeakFunc6(dat,cell, Plotit.both=F)
linesflag<-0
click.i<-0
while(click.i!=length(levs.mean)+4){
points(x=xs,y=ys,pch=16)
text(x=xs,y=c(rep(1.4, length(levs.mean)),1.2,1.1,1.0,0.9),labels=c(names(levs.mean),"Cell +","Cell -","drop","off"),pos=2,cex=.5)
click.i <- identify(x=xs,y=ys,n=1,plot=T)
cell<-n.names[cell.i]
if(click.i<=length(levs.mean)){
if(dat$bin[cell, levs[click.i]]==1){dat$bin[cell, levs[click.i]]=0;dat$bin[cell,"drop"]=0;linesflag<-0}
else{dat$bin[cell, levs[click.i]]=1;dat$bin[cell,"drop"]=0;linesflag<-0}
dev.set(dev.list()[1]);PeakFunc6(dat, cell, Plotit.both=F)
}
if(click.i==length(levs.mean)+1){cell.i <- cell.i + 1;if(cell.i>length(n.names)){cell.i<-1};linesflag<-1}
if(click.i==length(levs.mean)+2){cell.i <- cell.i - 1;if(cell.i<1){cell.i<-length(n.names)};linesflag<-1}
if(click.i==length(levs.mean)+3){dat$bin[cell, "drop"]=1;dev.set(dev.list()[1]);PeakFunc6(dat, cell, Plotit.both=F)} #dat$bin[cell,levs]=0;
if(linesflag==1){PeakFunc6(dat, n.names[cell.i], Plotit.both=F)}
}
graphics.off()
return(dat$bin)
}
### Repairs GFP and TRITC score from label bin
# uses peakfunc5
bin.repair.2<-function(dat, n.names=NULL){
if(is.null(n.names)){n.names<-names(dat$t.dat[,-1])}
cell.i<-1
cell<-n.names[cell.i]
levs<-setdiff(unique(as.character(dat$w.dat[,"wr1"])),"")
levs.mean<-sort(tapply(dat$t.dat[,"Time"], as.factor(dat$w.dat$wr1), mean))
levs<-setdiff(names(levs.mean),"")
levs.mean<-levs.mean[levs]
rep(max(dat$t.dat[,"Time"])-(max(dat$t.dat[,"Time"])*1.095),4)
xs <- c(levs.mean,c(max(dat$t.dat[,1])*1.09, max(dat$t.dat[,1])*1.19),rep(max(dat$t.dat[,"Time"])-(max(dat$t.dat[,"Time"])*1.095),4))
ys<-c(rep(1.8, length(levs.mean)+2),1.2, 1.0, 0.8, 0.6)
dev.new(width=14, height=5)
dev.set(dev.list()[1])
PeakFunc5(dat,cell, Plotit.both=T)
linesflag<-0
click.i<-0
while(click.i!=length(levs.mean)+2+4){
points(x=xs,y=ys,pch=16)
text(x=xs,y=ys,labels=c(names(levs.mean),"mean.gfp", "tritc","Cell +","Cell -","drop","off"),pos=3,cex=.5)
click.i <- identify(x=xs,y=ys,n=1,plot=T)
cell<-n.names[cell.i]
if(click.i<=length(levs.mean)){
if(dat$bin[cell, levs[click.i]]==1){dat$bin[cell, levs[click.i]]=0;dat$bin[cell,"drop"]=0;linesflag<-0}
else{dat$bin[cell, levs[click.i]]=1;dat$bin[cell,"drop"]=0;linesflag<-0}
dev.set(dev.list()[1]);PeakFunc5(dat, cell, Plotit.both=T)
}
if(click.i==length(levs.mean)+1){
if(dat$bin[cell, "gfp.bin"]==1){dat$bin[cell, "gfp.bin"]=0;dat$bin[cell,"drop"]=0;linesflag<-0}
else{dat$bin[cell, "gfp.bin"]=1;dat$bin[cell,"drop"]=0;linesflag<-0}
dev.set(dev.list()[1]);PeakFunc5(dat, cell, Plotit.both=T)
}
if(click.i==length(levs.mean)+2){
if(dat$bin[cell, "tritc.bin"]==1){dat$bin[cell, "tritc.bin"]=0;dat$bin[cell,"drop"]=0;linesflag<-0}
else{dat$bin[cell, "tritc.bin"]=1;dat$bin[cell,"drop"]=0;linesflag<-0}
dev.set(dev.list()[1]);PeakFunc5(dat, cell, Plotit.both=T)
}
if(click.i==length(levs.mean)+3){cell.i <- cell.i + 1;if(cell.i>length(n.names)){cell.i<-1};linesflag<-1}
if(click.i==length(levs.mean)+4){cell.i <- cell.i - 1;if(cell.i<1){cell.i<-length(n.names)};linesflag<-1}
if(click.i==length(levs.mean)+5){dat$bin[cell, "drop"]=1;dev.set(dev.list()[1]);PeakFunc5(dat, cell, Plotit.both=T)} #dat$bin[cell,levs]=0;
if(linesflag==1){PeakFunc5(dat, n.names[cell.i], Plotit.both=T)}
}
graphics.off()
neuron.response<-select.list(levs, title="What defines Neurons?", multiple=T)
neurons<-cellz(dat$bin,neuron.response, 1)
drop<-cellz(dat$bin, "drop", 1)
neurons<-setdiff(neurons,drop)
pf<-apply(dat$bin[,c("gfp.bin", "tritc.bin")],1,paste, collapse="")
dat$bin["lab.pf"]<-as.factor(pf)
lab.groups<-unique(dat$bin$lab.pf)
cells<-list()
for(i in lab.groups){
x.names<-cellz(dat$bin[neurons,], "lab.pf", i)
cells[[i]]<-x.names
}
glia.response<-select.list(c(levs, "none"), title="What defines glia?", multiple=T)
if(glia.response!="none"){
drop<-cellz(dat$bin, "drop", 1)
glia<-cellz(dat$bin,glia.response, 1)
glia<-setdiff(glia,drop)
cells[["000"]]<-setdiff(glia, neurons)
}
else {cells[["000"]]<-setdiff(row.names(dat$c.dat), neurons)}
dat$cells<-cells
return(dat)
}
bin.rep.cells<-function(dat){
cells<-dat$cells
for(i in 1:length(cells)){
dat<-bin.repair.2(dat, cells[[i]])
}
return(dat)
}
# Creates Binary socring for labeling
# Input RD list, and # of cells to observe for sampling
# Outuput bin dataframe with added intensity scoring
label.bin<-function(dat, cells=10){
rand.names<-attributes(sample(dat$c.dat$id))$levels
n.names<-rand.names[1:cells]
cell.i<-1
dev.new(width=15, height=3)
yes.green<-vector()
no.green<-vector()
yes.red<-vector()
no.red<-vector()
for(i in 1:length(n.names)){
par(mfrow=c(1,5))
multi.pic.zoom(dat, n.names[i], dat$img1, plot.new=F)
multi.pic.zoom(dat, n.names[i], dat$img2, plot.new=F)
multi.pic.zoom(dat, n.names[i], dat$img3, plot.new=F)
multi.pic.zoom(dat, n.names[i], dat$img4, plot.new=F)
par(mar=c(0,0,0,0))
xloc<-c(2,2,2,2)
yloc<-c(3.5,2.5,1.5,0.5)
loc<-cbind(xloc, yloc)
plot(loc,xlim=c(0,4), pch=15, ylim=c(0,4), xaxt="n", yaxt="n", cex=1.5)
text(loc, c("+GFP","+TRITC", "+GFP & +TRITC","No Label") ,pos=4, cex=1.5)
click.i<-identify(loc, n=1, plot=T)
if(click.i==1){yes.green[i]<-dat$c.dat[n.names[i],"mean.gfp"];no.red[i]<-dat$c.dat[n.names[i],"mean.tritc"]}
if(click.i==2){yes.red[i]<-dat$c.dat[n.names[i],"mean.tritc"];no.green[i]<-dat$c.dat[n.names[i],"mean.gfp"]}
if(click.i==3){yes.red[i]<-dat$c.dat[n.names[i],"mean.tritc"];yes.green[i]<-dat$c.dat[n.names[i],"mean.gfp"]}
if(click.i==4){no.red[i]<-dat$c.dat[n.names[i],"mean.tritc"];no.green[i]<-dat$c.dat[n.names[i],"mean.gfp"]}
}
graphics.off()
if(length(yes.green)>=1){yes.green<-setdiff(yes.green,c("NA",NA))}
if(length(no.green)>=1){no.green<-setdiff(no.green,c("NA",NA))}
if(length(yes.red)>=1){yes.red<-setdiff(yes.red,c("NA",NA))}
if(length(no.red)>=1){no.red<-setdiff(no.red,c("NA",NA))}
dat$bin["gfp.bin"]<-0
dat$bin["tritc.bin"]<-0
if(length(yes.green)>=1){green.names<-row.names(dat$c.dat)[dat$c.dat$mean.gfp>min(yes.green)]}
if(length(yes.red)>=1){red.names<-row.names(dat$c.dat)[dat$c.dat$mean.tritc>min(yes.red)]}
if(length(yes.green)>=1){dat$bin[green.names,"gfp.bin"]<-1}
if(length(yes.red)>=1){dat$bin[red.names,"tritc.bin"]<-1}
print(paste("Green Cells : ",min(yes.green)))
print(paste("Red Cells : ",min(yes.red)))
print(paste("No label Green : ",max(no.green),"No label Red", max(no.red)))
pf<-apply(dat$bin[,c("gfp.bin", "tritc.bin")],1,paste, collapse="")
dat$bin["lab.pf"]<-as.factor(pf)
return(dat$bin)
}
##############################################################################################
# Cell Group Review
##############################################################################################
#Group summarry
#generate pdfs with line graphs
#table of means and frequencies for all c.dat
#THIS MUST BE CLEANED UP 040314
GroupSummary <- function(dat,snr,c.dat,wr,levs,groups,pref="Group"){
g.levs <- unique(groups)
for(i in g.levs)
{
cnames <- names(groups[groups==i])
pdf.name <- paste(pref,i,".pdf",sep="")
lmain <- paste(pref,i,sep="")
LinesEvery(dat,snr,cnames,wr,levs,lmain,pdf.name)
dev.off()
}
res.tab <- data.frame(mean=apply(c.dat[names(groups),],2,mean))
res.tab["sd"] <- apply(c.dat[names(groups),],2,sd)
for(i in g.levs)
{
cnames <- names(groups[groups==i])
res.tab[paste(pref,i,".mean",sep="")] <- apply(c.dat[cnames,],2,mean)
res.tab[paste(pref,i,".sd",sep="")] <- apply(c.dat[cnames,],2,sd)
}
tab.name <- paste(pref,".table.csv",sep="")
write.csv(res.tab,file=tab.name)
#lines figure similar to boxplot
## tmp <- scale(c.dat[names(groups),],center=T,scale=T)
## tmp.mn <- data.frame(t(apply(tmp,2,function(x){tapply(x,as.factor(groups),mean)})))
## tmp.sd <- data.frame(t(apply(tmp,2,function(x){tapply(x,as.factor(groups),sd)})))
## tmp.se <- t(t(tmp.sd)/sqrt(summary(as.factor(groups))))
## ylim <- c(min(tmp.mn)-2,max(tmp.mn))
## miny <- min(ylim)+1
## dev.new()
## par(xaxt="n",mar=c(2,4,4,2))
## plot(seq(1,nrow(tmp.mn)),tmp.mn[,1],ylim=ylim,xlim=c(0,(nrow(tmp.mn)+1)),type="n",ylab="Normalized Mean +- SE",xaxt="n")
## cols <- rainbow(ncol(tmp.mn),start=.3)
## names(cols) <- names(tmp.mn)
## nudge <- 0
## ## for(i in names(tmp.mn))
## {
## xseq <- seq(1,nrow(tmp.mn))
## rect(nudge+seq(1,nrow(tmp.mn))-.05,tmp.mn[,i]-tmp.se[,i],nudge+seq(1,nrow(tmp.mn))+.05,tmp.mn[,i]+tmp.se[,i],col=cols[i],border=NA)
## points(nudge+seq(1,nrow(tmp.mn)),tmp.mn[,i],pch=16,col=cols[i],lwd=2,type="b")
## nudge <- nudge+.1
## }
## text(rep(nrow(tmp.mn),ncol(tmp.mn)),tmp.mn[nrow(tmp.mn),],paste(pref,names(tmp.mn),sep=""),cex=.8,col=cols,pos=4)
## text(seq(1,nrow(tmp.mn))+.25,miny,names(c.dat),srt=90,pos=3)
c.mn <- data.frame(t(apply(c.dat,2,function(x){tapply(x,as.factor(groups),mean)})))
c.sd <- data.frame(t(apply(c.dat,2,function(x){tapply(x,as.factor(groups),sd)})))
c.se <- t(t(c.sd)/sqrt(summary(as.factor(groups))))
return(list(mean=c.mn,sd=c.sd,se=c.se))
}
# Fucntion plotting cell locations, barplots of labeled intensities, stacked traces, and
# single traces of all scored groups.
# Needs work on click funcitons, and recognition of NULL intensities from experiemnts
GroupReview.2 <- function(dat,bp.plot=T,shws=2,phws=20,wr.i=2,bl.meth="TopHat"){
library(cluster)
graphics.off()
#peakfunc window= dev.list()[1]
windows(width=8,height=4, xpos=0, ypos=0)
#linefunc window= dev.list()[2]
windows(width=8,height=5, xpos=0, ypos=360)
#bpfunc window= dev.list()[3]
windows(width=5,height=4, xpos=800, ypos=420)
#cell.locate window= dev.list[4]
windows(width=12,height=12, xpos=820, ypos=0)
#gui window= dev.list[5]
windows(width=2,height=2, xpos=1400, ypos=620)
# Plotting all traces ontop of each other
# Could attempt something like a LinesEvery function
# Should replace linesfunce with linesevery.2. If there are more than 15 cells
# then i need to plot traces like tracechase. Needs window plotting.
# shade windows according to scoring
#Cell locate still needs to be able to move through images. \
# New data set will have 4-5 images
# Also, this function needs have all click features available, including click
# cells for peakfunc selections
# Create a table with binary groups as rows
# collumn 1=total cells in group
# collumn 2=group number
total.cell<-sort(summary(dat$c.dat[,"pf"]))
group.sum<-cbind(total.cell, seq(1,length(total.cell), by=1))
as.table(group.sum)
colnames(group.sum)<-c("c.tot", "g.num")
#make clust (which is the definition of clusters) be equal to the group numbers
#in group.sum
#clust<-group.sum[,"g.num"]
levs<-setdiff(unique(as.character(dat$w.dat[,"wr1"])),"")
pf<-apply(dat$bin[,levs],1,paste,collapse="")
pf.sum<-summary(as.factor(pf),maxsum=500)
pf.sum<-pf.sum[order(pf.sum,decreasing=T)]
pf.ord<-pf.sum
pf.ord[]<-seq(1,length(pf.sum))
dat$c.dat["pf"]<-as.factor(pf)
dat$c.dat["pf.sum"]<-pf.sum[pf]
dat$c.dat["pf.ord"]<-pf.ord[pf]
clust<-dat$c.dat[,"pf.ord"]
clust.name <- unique(clust)
levs<-setdiff(unique(as.character(dat$w.dat[,2])),"")
dev.set(dev.list()[5])
par(mar=c(0,0,0,0))
plot(2,2, pch=NA)
points(x=c(rep(1.75,5),rep(2.5,6)),y=c(2.5,2.25,2.0,1.75,1.5,2.5,2.25,2,1.75,1.5,1.25),pch=16)
text(x=c(rep(1.75,5),rep(2.5,6)),y=c(2.5,2.25,2.0,1.75,1.5,2.5,2.25,2,1.75,1.5,1.25),
labels=c("Group +","Group -","Cell +","Cell -","Done", "Image 1", "Image 2", "Image 3", "Zoom", "+ Pulse", "- Pulse"),pos=2,cex=.8)
img<-dat$img1
#an intiator of the linesfunc if lines.flag=1
lines.flag <- 1
#this is a list of all cell names
g.names <- names(dat$t.dat[,-1])
#highest group #
pam.k <- max(clust)
#initial group and cell to start analysis
group.i <- 1
cell.i <- 1
peak.i<-1
# define first click
click.i <- 1
while(click.i)
{
#initiate the single peak plot, but only if the group exists
g.num <- sum(clust==group.i)
if(g.num > 0)
{
#first group defined above, but can be further defined below
group.names <- g.names[clust==group.i]
#first cell is defined above, but can be further defined below
cell.pick <- group.names[cell.i]
# Intial setting for image changer
#move to next plot and start peakfunc2
#p1 <- PeakFunc2(dat,cell.pick,shws=shws,phws=phws,Plotit=T,wr=dat$w.dat[,wr.i],SNR.lim=2,bl.meth=bl.meth)
}
#start boxplot of color intensities
if(lines.flag==1){
dev.set(dev.list()[1]);PeakFunc5(dat, cell.pick)
dev.set(dev.list()[2]);if(length(group.names)>10){LinesStack(dat, group.names, plot.new=F)}else{LinesEvery.2(dat, group.names,plot.new=F)}
dev.set(dev.list()[3]);bpfunc(dat,group.names)
dev.set(dev.list()[4]);cell.zoom.2048(dat,img, group.names, plot.new=F);lines.flag <- 0
}
dev.set(dev.list()[5])
click.i <- identify(x=c(rep(1.75,5),rep(2.5,6)),y=c(2.5,2.25,2.0,1.75,1.5,2.5,2.25,2,1.75,1.5,1.25),n=1,plot=F)
# syntax for first click on peakfumc2. if click group+ group.i+1
if(click.i==1)
{group.i <- group.i + 1;if(group.i > pam.k){group.i <- 1};cell.i<-1;lines.flag <- 1}
if(click.i==2)
{group.i <- group.i - 1;if(group.i < 1){group.i <- pam.k};cell.i<-1;lines.flag <- 1}
if(click.i==3){
cell.i <- cell.i + 1
if(cell.i > g.num){cell.i <- 1}
dev.set(dev.list()[1]);PeakFunc5(dat, cell.pick)
}
if(click.i==4){
cell.i <- cell.i - 1
if(cell.i < 1){cell.i <- g.num}
dev.set(dev.list()[1]);PeakFunc5(dat, cell.pick)
}
if(click.i==5)
{graphics.off();stop()}
if(click.i==6){if(!is.null(dat$img1)){img<-dat$img1};lines.flag<-1}
if(click.i==7){if(!is.null(dat$img2)){img<-dat$img2};lines.flag<-1}
if(click.i==8){if(!is.null(dat$img3)){img<-dat$img3};dev.set(dev.list()[4]);lines.flag<-1}
if(click.i==9){}#cell.pick<-group.names[cell.i];cell.locate(cell.pick, zoom=5)}
if(click.i==10){peak.i<-peak.i+1;group.names<-row.names(dat$bin)[dat$bin[,levs[peak.i]]==1];lines.flag <- 1}
if(click.i==11){group.names<-p.names[[peak.i-1]]; cell.i<-1 ;lines.flag<-1}
}
dev.off()
}
##############################################################################################
# Trace Searching
##############################################################################################
#topdown parsing of all traces
TraceChase <- function(dat,blc=NULL,levs=NULL,x.names=NULL,scale=T){
library(cluster)
if(is.null(blc)){
if(is.element("blc",names(dat))){blc <- dat$blc}
else
{tmp.pcp <- ProcConstPharm(dat);blc <- tmp.pcp$blc}}
if(is.null(levs))
{
levs <- unique(dat$w.dat[,"wr1"])
levs <- select.list(levs,multiple=T,title="Select Regions for clustering")
}
dmat <- t(scale(blc[is.element(dat$w.dat[,"wr1"],levs),-1],scale=scale,center=scale))
a.names <- names(blc)[-1]
if(!is.null(x.names)){a.names <- intersect(x.names,names(blc))}
done=FALSE
while(!done)
{
if(length(a.names) < 21)
{
x.names <- TraceSelect(dat,a.names,dat$w.dat[,"wr1"],levs, "Final Select")
done=TRUE
}
else
{
#pam20 <- pam(dmat[a.names,],k=20)
clmb20 <- ClimbTree(dmat[a.names,],k=20)
lmain <- paste("Select Traces (all or none to end) n=",length(a.names))
#x.names <- SmashSelect(blc[c("Time",a.names)],pam20$clustering,row.names(pam20$medoids),dat$w.dat[,"wr1"],levs,lmain=lmain)
x.names <- SmashSelect(blc[c("Time",a.names)],clmb20,names(clmb20)[match(1:length(unique(clmb20)),clmb20)],dat$w.dat[,"wr1"],levs,lmain=lmain)
if(length(a.names)==length(x.names)){done = TRUE}
if(length(x.names)==0){done= TRUE}
a.names <- x.names
}
}
return(x.names)
}
#given a set of traces (or trace seqments)
#calculate the distances and group into K groups
#by height of tree cutting. One of the K groups will
#be a catch-all for all small groups
ClimbTree <- function(x,k=20){
tabstat <- function(x){return(list(mean=mean(x),length=length(x),median=median(x),sd=sd(x),gt5c=sum(x>5)))}
library(cluster)
d1 <- dist(x)
h1 <- hclust(d1)
q1 <- quantile(h1$height,probs=1:10/10)
clust <- cutree(h1,h=q1[5])
clust.tab <- table(clust)
clust.tab <- clust.tab[order(clust.tab,decreasing=T)]
new.num <- clust.tab
new.num[] <- seq(1,length(new.num))
clust[] <- new.num[as.character(clust)]
clust.tab <- table(clust)
if(length(clust.tab) > k)
{
in.grp <- names(clust.tab[1:(k-1)])
out.grp <- setdiff(names(clust.tab),in.grp)
clust[is.element(clust,out.grp)] <- k
}
return(clust)
# clust.stat <- data.frame(tabstat(clust.tab))
# for(i in 2:length(q1))
# {
# clust <- cutree(h1,h=q1[i])
# clust.tab <- table(clust)
# clust.stat[i,] <- tabstat(clust.tab)
# }
# return(clust.stat)
}
#smash select plot the smashes and return the selected.
#all data in t.dat is ploted (1st col must be time)
#m.names are taken to be the medoids of the clusters
SmashSelect <- function(t.dat,clust,m.names,wr,levs=NULL,lmain=""){
rtag <- table(clust)
names(rtag) <- m.names[order(clust[m.names])]
sf <- 1
gcol <- rgb(10,10,10,alpha=120,max=255)
#gcol <- "grey"
x <- t.dat[,-1]
xm <- apply(x,2,max)
xn <- scale(x,center=F,scale=xm)
for(i in 1:nrow(xn)){xn[i,] <- xn[i,]+clust}
library(RColorBrewer)
lwds <- 2
xseq <- t.dat[,1]
cols <-brewer.pal(8,"Dark2")
cols <- rep(cols,ceiling(length(m.names)/length(cols)))
cols <- cols[1:length(m.names)]
dev.new(width=14,height=9)
op <- par(yaxt="n",bty="n",mar=c(4,0,2,1),cex=1)
plot(xseq,xn[,m.names[1]],ylim=c((min(xn)-2),max(xn)),xlab="Time (min)",ylab="Ratio with shift",main=lmain,type="n", xaxt="n")
axis(1, at=seq(0, length(t.dat[,1]), 5))
apply(xn,2,lines,x=xseq,col=gcol,lwd=2)
hbc <- 1
if(length(wr) > 0)
{
if(is.null(levs)){levs <- setdiff(unique(wr),"")}
x1s <- tapply(xseq,as.factor(wr),min)[levs]
x2s <- tapply(xseq,as.factor(wr),max)[levs]
y1s <- rep(-.3,length(x1s))
y2s <- rep(hbc+.2,length(x1s))
#rect(x1s,y1s,x2s,y2s,col="lightgrey")
text(xseq[match(levs,wr)],rep(c(.2,-.2),length.out=length(levs)),levs,pos=4,offset=0,cex=1)
}
x.sel <- NULL
xs <-c(rep(0,length(m.names)),c(.1,.1,.1))
ys <- xn[1,m.names]
ys <- as.vector(c(ys,c(sf*.9,0,-sf*.9)))
# xs[(length(xs)-2):length(xs)] <- c(0,5,10)
p.names <- c(rep(" ",length(m.names)),"ALL","NONE","FINISH")
done.n <- length(p.names)
none.i <- done.n-1
all.i <- none.i-1
p.cols <- c(cols,c("black","black","black"))
for(i in 1:length(m.names))
{
#lines(xseq,xn[,m.names[i]],col="black",lwd=lwds*.5)
lines(xseq,xn[,m.names[i]],col=cols[i],lwd=lwds)
}
text(x=rep(max(xseq),length(m.names)),y=xn[nrow(xn),m.names],cex=.9,rtag,pos=4,col=p.cols)
text(x=xs,y=ys,labels=p.names,pos=2,cex=.7,col=p.cols)
points(x=xs,y=ys,pch=16,col=p.cols,cex=1.5)
click.i <- 1
while(click.i != done.n)
{
click.i <- identify(xs,ys,n=1,plot=F)
if(click.i < (length(m.names)+1) & click.i > 0)
{
i <- click.i
if(is.element(i,x.sel))
{
lines(xseq,xn[,m.names[i]],col=cols[i],lwd=lwds)
x.sel <- setdiff(x.sel,i)
}
else
{
lines(xseq,xn[,m.names[i]],col="black",lwd=lwds)
x.sel <- union(x.sel,i)
}
}
if(click.i == none.i)
{
x.sel <- NULL
for(i in 1:length(m.names))
{
lines(xseq,xn[,m.names[i]],col=cols[i],lwd=lwds)
}
}
if(click.i == all.i)
{
x.sel <- seq(1,length(m.names))
for(i in 1:length(m.names))
{
lines(xseq,xn[,m.names[i]],col="black",lwd=lwds)
}
}
}
c.sel <- clust[m.names[x.sel]]
x.ret <- names(clust[is.element(clust,c.sel)])
dev.off()
return(x.ret)
}
#this simply finds the traces in t.dat that are similar to targs
#note this is "complete" similarity other options may be
#"average" and "best"
GetCloser <- function(t.dat,targs,k=20){
x.names <- setdiff(names(t.dat),targs)
ct <- cor(t.dat[,x.names],t.dat[,targs])
x.max <- apply(ct,1,min)
y.names <- x.names[order(x.max,decreasing=T)[1:k]]
return(y.names)
}
#this is a bit raw still
#Given a set of traces (t.dat) and a list of targets (targs)
#identify the 20 most similar traces using wr and the select levs.
#allow the user to select from those to add to the master list.
SimilarSelect <- function(t.dat,targs,wr,levs=NULL){
plot(t.dat[,1],t.dat[,targs[1]],type="n",ylim=c(min(t.dat[-1]),(length(targs)+50)*.2))
sf <- 0
for(i in targs){lines(t.dat[,1],t.dat[,i]+sf);sf<-sf+.2}
a.names <- setdiff(names(t.dat)[-1],targs)
rjct <- rep(0,length(a.names))
names(rjct) <- a.names
done=FALSE
tps <- seq(1:nrow(t.dat))
if(!is.null(levs)){tps <- tps[is.element(wr,levs)]}
while(!done)
{
if(sum(rjct==0) < 21)
{done=TRUE}
else
{
x.names <- GetCloser(t.dat[tps,c(a.names[rjct==0],targs)],targs)
rjct[x.names] <- 1
y.names <- TraceSelect(t.dat,,x.names,wr)
if(length(y.names)==0){done=TRUE}
else
{
targs <- c(targs,y.names)
for(i in y.names){lines(t.dat[,1],t.dat[,i]+sf);sf<-sf+.2}
}
}
}
return(targs)
#plot targs and allow user to
#paint region of interest if you can do this it makes a very good window adjust function.
#find matches within t.dat
#show matches in trace select allow user to choose.
#merge all selected and return that list.
}
##############################################################################################
# Interactive Image analysis
##############################################################################################
#Function to Automatically Rename images from the time i used a different naming scheme
ImageRenamer<-function(dat){
image.names<-grep("img",names(dat))
for(i in 1:length(image.names)){
names(dat)[image.names[i]]<-paste("img",i,sep="")
}
return(dat)
}
#How to create a function to select and add images to the specified experiment files
#from the current working directory.
ImageFiller<-function(dat){
require(png)
potential.images<-list.files(pattern='png')
print(potential.images)
#bringtotop(-1)
print("######################")
print("These are the images you have the option of selecting")
print("Now select the images to fill in for image 1 to 8")
for(i in 1:8){
image.to.add<-select.list(list.files(pattern='png'),title=paste('img',i,sep=''))
if(image.to.add==""){dat[[paste('img',i,sep='')]]<-NULL
}else{
dat[[paste('img',i,sep='')]]<-png::readPNG(image.to.add)
}
}
return(dat)
}
ImageFillerv2 <- function(dat, img_name_vec){
if( is.null(img_name_vec) ){
img_name_vec<-c(
"bf.gfp.tritc.start.png",
"gfp.tritc.start.ci.ltl.rs.png",
"tritc.start.ci.ltl.rs.png",
"gfp.start.ci.ltl.rs.png",
"bf.start.lab.png",
"fura2.png",
"fura2.divide.start.png",
"roi.img.png")
image_question <- T
}else{
image_question <- F
}
# Add images
img_list<-list()
for( j in 1:length(img_name_vec) ){
dat[[ paste0("img",j) ]] <- tryCatch(png::readPNG(img_name_vec[j]), error=function(e)NULL)
}
if(image_question == T){
cat('\nThese are the images I have attempted to load for you\nIf any are NULL, and want to add different images say yes to the \nnext question. You will be asked to select a png image for each loaction.\n\n')
cat(img_name_vec, sep='\n')
cat(str(img_list))
cat('\nDO YOU WANT DIFFERENT IMAGES[y,n]?\n')
img_reselect <- scan(n=1,what='character')
if( img_reselect=='y' ){
cat("\nAlright buddy I am going to give you options if you don't\nwant any image there just go ahead and press 0\n\n")
png_imgs <- list.files(pattern='png')
for( j in 1:8 ){
cat("\nWhat do you want for image ", j, '\n')
selection <- menu(png_imgs)
if(selection==0){
dat[[paste0("img",j)]] <- NULL
}else{
dat[[paste0("img",j)]] <- png::readPNG(png_imgs[selection])
}
cat('\nI have added ', png_imgs[selection],' to position ',j,'\n')
}
}
}
return(dat)
}
# Fucntion locates single cell or groups of cells on plot.
# Needs more optional assignments
cell.veiw.2048<-function(dat, img=NULL, cell=NULL, cells=NULL, cols=NULL,lmain="", bcex=.5, plot.new=T, cell.name=T){
if(plot.new){dev.new()}
require(png)
require(zoom)
par(mar=c(0,0,1,0))
cells.x<-dat$c.dat[cells,"center.x"]
cells.y<-dat$c.dat[cells,"center.y"]
cell.x<-dat$c.dat[cell,"center.x"]
cell.y<-dat$c.dat[cell,"center.y"]
if(is.null(img)){img<-dat$img1}
else{img<-img}
if(is.null(cols)){cols="white"}
else{cols=cols}
plot(0, 0, xlim=c(0,2048),ylim=c(2048,0), main=lmain,xaxs="i", yaxs="i", xlab="Pixels", ylab="Pixels")
rasterImage(img, 0, 2048, 2048, 0)
points(cell.x, cell.y, col=cols, pch=4, cex=1)
text(cell.x, cell.y, labels=cell, col=cols, pos=2, cex=1)
points(cells.x, cells.y, col="white", pch=4, cex=bcex)
text(cells.x, cells.y, labels=dat$c.dat[cells,1], col="white", pch=4, pos=2, cex=bcex)
}
cell.view<-function(dat, cell=NULL,img=NULL, zoom=TRUE, cols=NULL,lmain="", bcex=.8, labs=T, plot.new=T, cell.name=T){
if(plot.new){dev.new()}
require(png)
par(mar=c(0,0,1,0))
x<-dat$c.dat[,"center.x"]
y<-dat$c.dat[,"center.y"]
cell.x<-dat$c.dat[cell,"center.x"]
cell.y<-dat$c.dat[cell,"center.y"]
if(is.null(img)){img<-dat$img1}
else{img<-img}
if(is.null(cols)){cols="white"}
else{cols=cols}
img.dimy<-dim(img)[1]
img.dimx<-dim(img)[2]
plot(0, 0, xlim=c(0,img.dimx),ylim=c(img.dimy,0), main=lmain,xaxs="i", yaxs="i", xlab="Pixels", ylab="Pixels")
rasterImage(img, 0, img.dimy, img.dimx, 0)
if(labs){
if(!is.null(cell)){
points(cell.x, cell.y, col=cols, pch=0, cex=2)
text(cell.x, cell.y, labels=cell, col=cols, pos=2, cex=bcex)
}
else{
points(x, y, col=cols, pch=4, cex=2)
text(x, y, labels=dat$c.dat[,1], col=cols, pch=0, pos=2, cex=bcex)
}
}
if(zoom==TRUE & length(cell)>1){
cell.1<-row.names(dat$c.dat[order(dat$c.dat$center.x),])
cell<-intersect(cell,cell.1)
multi.pic.zoom(dat,cell,img)
}
}
cell.zoom.640.480<-function(dat, img=NULL, cell=NULL, zoom=NULL, cols=NULL, labs=T, plot.new=T, cell.name=T)
{
if(plot.new){dev.new()}
require(png)
require(zoom)
par(mar=c(0,0,0,0))
x<-dat$c.dat[,"center.x"]
y<-dat$c.dat[,"center.y"]
cell.x<-dat$c.dat[cell,"center.x"]
cell.y<-dat$c.dat[cell,"center.y"]
if(is.null(img)){img<-dat$img1}
else{img<-img}
if(is.null(cols)){cols="white"}
else{cols=cols}
plot(0, 0, xlim=c(0,640),ylim=c(480,0), xaxs="i", yaxs="i", xlab="Pixels", ylab="Pixels")
rasterImage(img, 0, 480, 640, 0)
if(labs){
if(!is.null(cell)){
points(cell.x, cell.y, col=cols )
text(cell.x, cell.y, labels=cell, col=cols, pos=2, cex=.8)
}
else{
points(x, y, col=cols)
text(x, y, labels=dat$c.dat[,1], col=cols, pos=2, cex=.5)
}}
if(!is.null(zoom)){
zoomplot.zoom(x=cell.x, y=cell.y, fact=zoom)
}
else{zm()}
}
XYtrace.640.480 <- function(dat, img=NULL, cols=NULL, labs=T){
x.coor<-grep("\\.x",names(dat$c.dat), value=T, ignore.case=T)
y.coor<-grep("\\.y",names(dat$c.dat), value=T, ignore.case=T)
area<-grep("area",names(dat$c.dat), value=T, ignore.case=T)
lab1<-grep("cgrp",names(dat$c.dat), value=T, ignore.case=T)
if(length(lab1)==0){lab1<-grep("gfp.1",names(dat$c.dat), value=T, ignore.case=T)}
lab1.1<-grep("cgrp",names(dat$c.dat), value=T, ignore.case=T)
if(length(lab1.1)==0){lab1.1<-grep("gfp.2",names(dat$c.dat), value=T, ignore.case=T)}
lab2<-grep("ib4",names(dat$c.dat), value=T, ignore.case=T)
if(length(lab2)==0){lab2<-grep("tritc",names(dat$c.dat), value=T, ignore.case=T)}
cell.coor<-dat$c.dat[,c(x.coor, y.coor)]
# select the names of the collumns containing coordinates
levs <- unique(dat$w.dat[,"wr1"])
levs<-setdiff(levs, "")
if(labs==TRUE){
if(is.null(cols)){cols="grey5"} else{cols=cols}}
pch=16
dev.new(height=4,width=12)
dev.new(width=10, height=8)
dev.new(height=8,width=12)
lmain<-"XY ROI"
dev.set(dev.list()[2])
par(mar=c(0,0,0,0))
plot(0, 0, xlim=c(0,640),ylim=c(480,0),xaxs="i", yaxs="i",col=cols,pch=".")
if(is.null(img)){img<-dat$img1}
if(!is.null(img)){rasterImage(img, 0, 480, 640, 0);points(cell.coor[,1],cell.coor[,2],col=cols,pch=0,cex=2.4)}
else{
points(cell.coor[,1],cell.coor[,2], col=cols, cex=dat$c.dat[,area]/200)
points(cell.coor[,1],cell.coor[,2],col=cols, pch=4)}
i <- identify(cell.coor[,1],cell.coor[,2],n=1,plot=F, col=NA, tolerance=0.05)
i.names<-row.names(dat$c.dat)[i]
while(length(i) > 0)
{ #selected name of cell
s.names <- row.names(dat$c.dat)[i]
dev.set(dev.list()[1])
PeakFunc2(dat,s.names,3,30,TRUE,,lmain=lmain)
dev.set(dev.list()[2])
# If a cell is selected, that has already been selected,
# then remove that cell from the list
if(length(intersect(i.names,s.names))==1){
i.names<-setdiff(i.names,s.names)
points(cell.coor[s.names,1],cell.coor[s.names,2],col="grey90",pch=0,cex=2.4)
points(cell.coor[i.names,1],cell.coor[i.names,2],col="red",pch=0,cex=2.4)}
# If it han't been selected, then add it to the list
else{i.names<-union(i.names,s.names)
points(cell.coor[i.names,1],cell.coor[i.names,2],col="red",pch=0,cex=2.4)}
if(length(i.names)>=2){dev.set(dev.list()[3]);LinesEvery.2(dat,m.names=i.names, plot.new=F)}
dev.set(dev.list()[2])
i <- identify(cell.coor[,1],cell.coor[,2],labels=dat$c.dat[,1],n=1,plot=T, pch=0,col="grey90", tolerance=0.05)
}
dev.off()
graphics.off()
return(dat$c.dat[i.names,1])
}
# Function allows for selection and deselection of cells to build stacked traces
XYtrace <- function(dat, cell=NULL, img=NULL, cols=NULL, labs=F, y.var=T){
graphics.off()
dat.name<-deparse(substitute(dat))
x.coor<-grep("\\.x",names(dat$c.dat), value=T, ignore.case=T)
if(length(x.coor)>1){x.coor<-"center.x"}
y.coor<-grep("\\.y",names(dat$c.dat), value=T, ignore.case=T)
if(length(x.coor)>1){y.coor<-"center.y"}
area<-grep("area",names(dat$c.dat), value=T, ignore.case=T)
lab1<-grep("cgrp",names(dat$c.dat), value=T, ignore.case=T)
if(length(lab1)==0){lab1<-grep("gfp.1",names(dat$c.dat), value=T, ignore.case=T)}
lab1.1<-grep("cgrp",names(dat$c.dat), value=T, ignore.case=T)
if(length(lab1.1)==0){lab1.1<-grep("gfp.2",names(dat$c.dat), value=T, ignore.case=T)}
lab2<-grep("ib4",names(dat$c.dat), value=T, ignore.case=T)
if(length(lab2)==0){lab2<-grep("tritc",names(dat$c.dat), value=T, ignore.case=T)}
if(is.null(cell)){cell<-row.names(dat$c.dat)}
else{cell<-cell}
cell.coor<-dat$c.dat[cell,c(x.coor, y.coor)]
# select the names of the collumns containing coordinates
levs <- unique(dat$w.dat[,"wr1"])
levs<-setdiff(levs, "")
if(labs==TRUE){
if(is.null(cols)){cols="orangered1"} else{cols=cols}}
pch=16
dev.new(height=4,width=12)
dev.new(width=8, height=8)
dev.new(height=8,width=12)
lmain<-"XY ROI"
if(is.null(img)){img<-dat$img1}
img.dim.x<-dim(img)[1]
img.dim.y<-dim(img)[2]
dev.set(dev.list()[2])
par(mar=c(0,0,0,0))
plot(0, 0, xlim=c(0,img.dim.x),ylim=c(img.dim.y,0),xaxs="i", yaxs="i",col=cols,pch=".")
if(!is.null(img)){rasterImage(img, 0, img.dim.y, img.dim.x, 0);points(cell.coor[,1],cell.coor[,2],col=cols,pch=0)}
else{
points(cell.coor[,1],cell.coor[,2], col=cols, cex=dat$c.dat[,area]/200)
points(cell.coor[,1],cell.coor[,2],col=cols, pch=4)}
i <- identify(cell.coor[,1],cell.coor[,2],n=1,plot=F, col=NA, tolerance=0.05)
i.names<-row.names(dat$c.dat[cell,])[i]
while(length(i) > 0)
{ #selected name of cell
s.names <- row.names(dat$c.dat[cell,])[i]
dev.set(dev.list()[1])
if(y.var){PeakFunc6(dat,s.names, Plotit.both=F)}
else{PeakFunc5(dat,s.names, Plotit.both=T)}
dev.set(dev.list()[2])
# If a cell is selected, that has already been selected,
# then remove that cell from the list
if(length(intersect(i.names,s.names))==1){
i.names<-setdiff(i.names,s.names)
points(cell.coor[s.names,1],cell.coor[s.names,2],col="gray70",pch=0,cex=2.4)
points(cell.coor[i.names,1],cell.coor[i.names,2],col="red",pch=0,cex=2.4)
}
# If it han't been selected, then add it to the list
else{i.names<-union(i.names,s.names)
points(cell.coor[i.names,1],cell.coor[i.names,2],col="red",pch=0,cex=2.4)}
if(length(i.names)>=1){
dev.set(dev.list()[3])
LinesEvery.5(dat,m.names=i.names, plot.new=F,img="img1", cols="black", dat.n=dat.name)}
dev.set(dev.list()[2])
i <- identify(cell.coor[,1],cell.coor[,2],labels=dat$c.dat[cell,1],n=1,plot=T, pch=0,col="white", tolerance=0.05)
}
dev.off()
graphics.off()
return(row.names(dat$c.dat[i.names,]))
}
XYtrace.2<-function(dat, cells=NULL, img=NULL, cols=NULL, zoom=T, labs=T, yvar=F, zf=40, t.type=NULL, sf=1,plot.labs=T){
dat.name<-deparse(substitute(dat))
print(class(cells))
if(is.null(t.type)){t.type<-select.list(names(dat),title="Select a Trace")}
#setup first windows for analysis and give each of them names
dev.new(width=8, height=8)
pic.window<-dev.cur()
#plot image in the window
if(is.null(cells)){cells<-dat$c.dat$id
}else{cells<-cells}
#if(is.null(img)){img<-dat$img1}
if(is.null(img)){
img.name<-image.selector(dat)
img<-dat[[img.name]]
}
if(is.null(cols)){cols<-cols}
img.dim.y<-dim(img)[1]
img.dim.x<-dim(img)[2]
dev.set(which=pic.window)
par(mar=c(0,0,0,0))
plot(0, 0, xlim=c(0,img.dim.x),ylim=c(img.dim.y,0),xaxs="i", yaxs="i",col=cols,pch=".")
rasterImage(img, 0, img.dim.y, img.dim.x, 0)
if(zoom){
zoom<-select.list(c("Manual", "Regional"), title="Zoom? Cancel=NO")
if(zoom=="Manual"){
#Select regions to zoom on
print("select X region first, then Y Region")
x.sel<-locator(n=2, type="p", col="Red")$x
y.sel<-locator(n=2, type="p", col="Red")$y
rect(x.sel[1],y.sel[2],x.sel[2],y.sel[1], border="red")
# before moving on, lets shrink won the image bya factor of 1/2 to have a preview image
# to refer to
dev.new(width=4, height=4)
pic.window.2<-dev.cur()
par(mar=c(0,0,0,0))
plot(0, 0, xlim=c(0,img.dim.x),ylim=c(img.dim.y,0),xaxs="i", yaxs="i",col=cols,pch=".")
if(!is.null(img)){
rasterImage(img, 0, img.dim.y, img.dim.x, 0)
}
rect(x.sel[1],y.sel[2],x.sel[2],y.sel[1], border="red")
# now i need to clsoe the window and open a new one with the same type of selection
x.size<-abs(x.sel[1]-x.sel[2])
y.size<-abs(y.sel[1]-y.sel[2])
#if you want to mainatin the same aspect ratio
#width vs height ratio
x.plot.size<-8*(x.size/img.dim.x)
y.plot.size<-8*(y.size/img.dim.y)
#if you want to double the aspect ratio
#width vs height ratio
x.plot.size<-16*(x.size/img.dim.x)
y.plot.size<-16*(y.size/img.dim.y)
#plot the new image
dev.off(which=pic.window)
dev.new(width=x.plot.size, height=y.plot.size)
pic.window<-dev.cur()
par(mar=c(0,0,0,0))
plot(0, 0, xlim=c(x.sel[1],x.sel[2]),ylim=c(y.sel[2],y.sel[1]),xaxs="i", yaxs="i",pch=".")
rasterImage(img[y.sel[1]:y.sel[2],x.sel[1]:x.sel[2], ], x.sel[1], y.sel[2], x.sel[2], y.sel[1])
}
if(zoom=="Regional"){
rect(0,img.dim.y/2, img.dim.x/2, 0, border="blue",lwd=3)
rect(img.dim.x/2, img.dim.y/2, img.dim.x, 0, border="red", lwd=3)
rect(0, img.dim.y, img.dim.x/2, img.dim.y/2, border="green", lwd=3)
rect(img.dim.x/2, img.dim.y, img.dim.x, img.dim.y/2, border="purple", lwd=3)
rect(img.dim.x*1/4, img.dim.y*3/4, img.dim.x*3/4, img.dim.y*1/4, border="navy", lwd=3)
rect(img.dim.x*6/16, img.dim.y*10/16, img.dim.x*10/16, img.dim.y*6/16, border="red", lwd=3)
text.place.x<-c(.02, .52, .02, .52, .27,.395)
text.place.x<-text.place.x*img.dim.x
text.place.y<-c(.02, .02, .52, .52, .27,.395)
text.place.y<-text.place.y*img.dim.y
#text.y<-img.dim.y*round(text.place$y/img.dim.y, digits=2)
#text.x<-img.dim.x*round(text.place$x/img.dim.x, digits=2)
text(text.place.x, text.place.y, c(1,2,3,4,5,6), col=c("blue", "red", "green", "purple","navy","red"), cex=3)
region.selection<-as.numeric(select.list(as.character(c(1,2,3,4,5,6))))
if(region.selection==1){
dev.set(which=pic.window)
par(mar=c(0,0,0,0))
plot(0, 0,
xlim=c(0, img.dim.x/2),
ylim=c(img.dim.y/2,0),xaxs="i", yaxs="i",col=cols,pch="."
)
rasterImage(img, 0, img.dim.y, img.dim.x, 0)
}
if(region.selection==2){
dev.set(which=pic.window)
par(mar=c(0,0,0,0))
plot(0, 0,
xlim=c(img.dim.x/2, img.dim.x),
ylim=c(img.dim.y/2,0),xaxs="i", yaxs="i",col=cols,pch="."
)
rasterImage(img, 0, img.dim.y, img.dim.x, 0)
}
if(region.selection==3){
dev.set(which=pic.window)
par(mar=c(0,0,0,0))
plot(0, 0,
xlim=c(0, img.dim.x/2),
ylim=c(img.dim.y/2,img.dim.y),xaxs="i", yaxs="i",col=cols,pch="."
)
rasterImage(img, 0, img.dim.y, img.dim.x, 0)
}
if(region.selection==4){
dev.set(which=pic.window)
par(mar=c(0,0,0,0))
plot(0, 0,
xlim=c(img.dim.x/2, img.dim.x),
ylim=c(img.dim.y/2,img.dim.y),xaxs="i", yaxs="i",col=cols,pch="."
)
rasterImage(img, 0, img.dim.y, img.dim.x, 0)
#rasterImage(
# img[img.dim.y/2:img.dim.y,img.dim.x/2:img.dim.x,],
# img.dim.x/2, img.dim.y, img.dim.x, img.dim.y/2)
}
if(region.selection==5){
dev.set(which=pic.window)
par(mar=c(0,0,0,0))
plot(0, 0,
xlim=c(img.dim.x*1/4, img.dim.x*3/4),
ylim=c(img.dim.y*3/4,img.dim.y*1/4),xaxs="i", yaxs="i",col=cols,pch="."
)
rasterImage(img, 0, img.dim.y, img.dim.x, 0)
}
if(region.selection==6){
dev.set(which=pic.window)
par(mar=c(0,0,0,0))
plot(0, 0,
xlim=c(img.dim.x*6/16, img.dim.x*10/16),
ylim=c(img.dim.y*10/16,img.dim.y*6/16),xaxs="i", yaxs="i",col=cols,pch="."
)
rasterImage(img, 0, img.dim.y, img.dim.x, 0)
}
}
}
#Define the collumn names
x.coor<-grep("\\.x",names(dat$c.dat), value=T, ignore.case=T)
if(length(x.coor)>1){x.coor<-"center.x"}
y.coor<-grep("\\.y",names(dat$c.dat), value=T, ignore.case=T)
if(length(y.coor)>1){y.coor<-"center.y"}
area<-grep("area",names(dat$c.dat), value=T, ignore.case=T)
if(length(area)>1){area<-"area"}
#Interactive Plot
dev.new(height=4,width=12)
trace.window<-dev.cur()
dev.new(height=8,width=12)
lines.window<-dev.cur()
cell.coor<-dat$c.dat[cells,c(x.coor, y.coor)]
dev.set(which=pic.window)
if(labs){points(cell.coor[,1],cell.coor[,2],col="gold", pch=4, cex=.1)}
i <- identify(cell.coor[,1],cell.coor[,2],n=1,plot=F, col=NA, tolerance=0.1)
i.names<-row.names(dat$c.dat[cells,])[i]
while(length(i) > 0)
{ #selected name of cell
s.names <- row.names(dat$c.dat[cells,])[i]
dev.set(which=trace.window)
if(yvar){PeakFunc7(dat,s.names, yvar=F, bcex=bcex, zf=zf, t.type=t.type,dat.n=dat.name)}
else{PeakFunc7(dat,s.names, yvar=F, zf=zf, bcex=bcex, t.type=t.type,dat.n=dat.name)}
dev.set(which=pic.window)
# If a cell is selected, that has already been selected,
# then remove that cell from the list
if(length(intersect(i.names,s.names))==1){
i.names<-setdiff(i.names,s.names)
#points(cell.coor[s.names,1],cell.coor[s.names,2],col="gray70",pch=0,cex=2.4)
#points(cell.coor[i.names,1],cell.coor[i.names,2],col="red",pch=0,cex=2.4)
}
# If it han't been selected, then add it to the list
else{i.names<-union(i.names,s.names)
#points(cell.coor[i.names,1],cell.coor[i.names,2],col="red",pch=0,cex=2.4)
}
if(length(i.names)>=1){
dev.set(which=lines.window)
LinesEvery.5(dat,m.names=i.names, plot.new=F, img=c("img1", "img2", "img6","img7"), cols="black",sf=sf, t.type=t.type)}
dev.set(which=pic.window)
#i <- identify(cell.coor[,1],cell.coor[,2],n=1,plot=F,col="white", tolerance=0.05)
i <- identify(cell.coor[,1],cell.coor[,2],labels=dat$c.dat[cells,1],n=1,plot=T, pch=0,col="white", tolerance=0.05, cex=.5)
}
dev.off()
graphics.off()
return(row.names(dat$c.dat[i.names,]))
}
# View Individual cell picture
multi.pic.zoom<-function(dat, m.names, img, labs=T,plot.new=T, zf=20){
col.row<-ceiling(sqrt(length(m.names)))
if(plot.new)
{
dev.new()
par(mfrow=c(col.row, col.row))
par(mar=c(0,0,0,0))
}
else{
par(mfrow=c(col.row, col.row))
par(mar=c(0,0,0,0))
}
m.names<-rev(m.names)
for(i in 1:length(m.names)){
img.dim<-dim(img)
x<-dat$c.dat[m.names[i],"center.x"]
left<-x-zf
if(left<=20){left=0; right=zf}
right<-x+zf
if(right>=img.dim-zf){left=img.dim-zf;right=img.dim}
y<-dat$c.dat[m.names[i],"center.y"]
top<-y-zf
if(top<=20){top=0; bottom=zf}
bottom<-y+zf
if(bottom>=img.dim-zf){top=img.dim-zf;bottom=img.dim}
par(xpd=TRUE)
xleft<-0
xright<-20
ytop<-0
ybottom<-20
plot(c(xright, xleft), c(ytop, ybottom), ylim=c(20,0) ,xaxs="i", yaxs="i", axes=F)
rasterImage(img[top:bottom,left:right,],xleft,ybottom,xright,ytop)
text(4,1.5, m.names[i], col="white", cex=.8)
box(lty = 1, col = "white",lwd=2)
text(16.5, 2, labels=dat$c.dat[m.names[i], "area"], col="white")
if(labs){
points(x=10,y=10, type="p", pch=3, cex=2,col="white")
text(16.5, 2, labels=dat$c.dat[m.names[i], "ROI.Area"], col="white")
text(16.5, 3.5, labels=dat$c.dat[m.names[i], "mean.gfp.1"], col="green")
text(16.5, 3.5, labels=dat$c.dat[m.names[i], "mean.gfp"], col="green")
text(16.5, 3.5, labels=dat$c.dat[m.names[i], "CGRP"], col="green")
text(16.5, 5, labels=dat$c.dat[m.names[i], "mean.tritc"], col="red")
text(16.5, 5, labels=dat$c.dat[m.names[i], "IB4"], col="red")
text(16.5, 6.5, labels=dat$c.dat[m.names[i], "mean.dapi"], col="blue")
}
}
}
# View Individual cell picture creates a png image
# must assgin multi.pic.zoom to a variable name
# For use in linesEvery.4
multi.pic.zoom.2<-function(dat, m.names, img, labs=F, zf=NULL, cols=NULL){
if(is.null(cols)){cols<-rep("white", length(m.names))}else{cols<-cols}
col.row<-ceiling(sqrt(length(m.names)))
#png("tmp.png",width=6,height=6,units="in",res=72,bg="transparent", type="cairo")
#dev.new()
png('tmp.png', res=70)
par(mfrow=c(col.row, col.row))
par(mar=c(0,0,0,0))
#else{par(mar=c(0,0,0,0))}
m.names<-rev(m.names)
img.dim<-as.numeric(dim(img)[1])
for(i in 1:length(m.names)){
if(is.null(zf)){zf<-20}else{zf<-zf}
#zf<-20
x<-dat$c.dat[m.names[i],"center.x"]
left<-x-zf
right<-x+zf
if(left<=zf){left=0; right=zf}
if(right>=img.dim){left=img.dim-zf;right=img.dim
}else{right=right}
y<-dat$c.dat[m.names[i],"center.y"]
top<-y-zf
if(top<=zf){top=0; bottom=zf}
bottom<-y+zf
if(bottom>=img.dim-zf*2){top=img.dim-zf;bottom=img.dim}
par(xpd=TRUE)
xleft<-0
xright<-20
ytop<-0
ybottom<-20
plot(c(xright, xleft), c(ytop, ybottom), ylim=c(20,0) ,xaxs="i", yaxs="i", axes=F)
if(length(dim(img))>2){rasterImage(img[top:bottom,left:right,],xleft,ytop,xright,ybottom)
}else{rasterImage(img[top:bottom,left:right],xleft,ytop,xright,ybottom)}
points(x=10,y=10, type="p", pch=3, cex=2,col="white")
text(4,1.5, labels=m.names[i], col="white", cex=1.3)
#text(4,1.5, labels=m.names[i], col=cols[i], cex=1.2)
box(lty = 1, col = "white",lwd=2)
if(labs){
#label.names<-c("ROI.Area", "mean.gfp.1", "CGRP", "IB4")
label.names<-c("area","mean.gfp","mean.tritc", "mean.dapi")
label.y.location<-c(2,3.5,5,6.5)
label.cols<-c("white", "green", "red", "blue")
for(j in 1:length(label.names)){
text(16.5, label.y.location[j], labels=tryCatch(round(dat$c.dat[m.names[i],label.names[j]],digits=5),error=function(e) NULL), col=label.cols[j])
}
}
}
dev.off()
tmp.png <- png::readPNG("tmp.png")
unlink("tmp.png")
return(tmp.png)
}
#multipiczoom
multi.pic.zoom.3<-function(dat, m.names, img, labs=T,plot.new=T, zf=20){
col.row<-ceiling(sqrt(length(m.names)))
if(plot.new){
dev.new()
par(mfrow=c(col.row, col.row))
par(mar=c(0,0,0,0))
}
else{par(mar=c(0,0,0,0))}
m.names<-rev(m.names)
for(i in 1:length(m.names)){
x<-dat$c.dat[m.names[i],"center.x"]
left<-x-zf
if(left<=20){left=0; right=zf}
right<-x+zf
if(right>=1004){left=2048-zf;right=2048}
y<-dat$c.dat[m.names[i],"center.y"]
top<-y-zf
if(top<=20){top=0; bottom=zf}
bottom<-y+zf
if(bottom>=1004){top=2048-zf;bottom=2048}
par(xpd=TRUE)
xleft<-0
xright<-20
ytop<-0
ybottom<-20
plot(c(xright, xleft), c(ytop, ybottom), ylim=c(20,0) ,xaxs="i", yaxs="i", axes=F)
rasterImage(img[top:bottom,left:right,],xleft,ytop,xright,ybottom)
points(x=10,y=10, type="p", pch=3, cex=2,col="white")
box(lty = 1, col = "white",lwd=2)
if(labs){
text(4,1.5, labels=m.names[i], col="white", cex=1.2)
text(16.5, 2, labels=dat$c.dat[m.names[i], "area"], col="white")
text(16.5, 2, labels=dat$c.dat[m.names[i], "ROI.Area"], col="white")
text(16.5, 3.5, labels=dat$c.dat[m.names[i], "mean.gfp.1"], col="green")
text(16.5, 3.5, labels=dat$c.dat[m.names[i], "mean.gfp"], col="green")
text(16.5, 3.5, labels=dat$c.dat[m.names[i], "CGRP"], col="green")
text(16.5, 5, labels=dat$c.dat[m.names[i], "mean.tritc"], col="red")
text(16.5, 5, labels=dat$c.dat[m.names[i], "IB4"], col="red")
text(16.5, 6.5, labels=dat$c.dat[m.names[i], "mean.dapi"], col="blue")
}
}
}
image.selector<-function(tmp.rd, multi=T){
img.names<-grep(names(tmp.rd),pattern="img", value=T)
null.images<-vector()
for(i in 1:length(img.names)){null.images[i]<-!is.null(tmp.rd[[img.names[i]]])}
img.logical<-cbind(img.names,null.images)
real.imgs<-which(img.logical[,2]=="TRUE")
img.names<-img.logical[real.imgs, 1]
dev.new(width=ceiling(sqrt(length(img.names)))*4, height=ceiling(sqrt(length(img.names)))*4)
img.sel<-dev.cur()
par(mfrow=c(ceiling(sqrt(length(img.names))),ceiling(sqrt(length(img.names)))))
for(i in 1:length(img.names)){
par(mar=c(0,0,0,0))
img<-tmp.rd[[img.names[[i]]]]
img.dim.y<-dim(img)[1]
img.dim.x<-dim(img)[2]
top<-img.dim.y*.25
bottom<-img.dim.y*.75
left<-img.dim.x*.25
right<-img.dim.x*.75
plot(0, 0,
xlim=c(img.dim.x*.4, img.dim.x*.6),
ylim=c(img.dim.y*.4,img.dim.y*.6),xaxt="n", yaxt="n",pch="."
)
rasterImage(img[top:bottom,left:right,], 0, img.dim.y, img.dim.x, 0)
text(img.dim.x*.45,img.dim.y*.45,labels=paste(i), cex=2, col="white")
}
img<-select.list(img.names, title="Images", multiple=multi)
dev.off(img.sel)
return(img)
}
PointTrace <- function(lookat,png=F,col=rep("black",nrow(lookat)),pch=16,cex=1,lmain="PointTrace",ylim=c(-2,2),x.trt=NULL,y.trt=NULL,wr="wr1",t.names=NULL){
if(!is.null(x.trt)){lookat["x"] <- lookat[,x.trt]}
if(!is.null(y.trt)){lookat["y"] <- lookat[,y.trt]}
dev.new(height=4,width=14)
rr.dev <- dev.cur()
dev.new(height=4,width=4)
plot(lookat[,"x"],lookat[,"y"],col=col,pch=pch,cex=cex,main=lmain,xlab=x.trt,ylab=y.trt, ylim=ylim)
ret.list <- NULL
i <- identify(lookat[,"x"],lookat[,"y"],n=1,plot=F)
my.dev <- dev.cur()
while(length(i) > 0)
{
x.names <- lookat[i,"trace.id"]
#points(lookat[i,"x"],lookat[i,"y"],pch=8,cex=.5)
rn.i <- row.names(lookat)[i]
tmp <- get(lookat[i,"rd.name"])
levs <- unique(tmp$w.dat[,"wr1"])
lmain <- paste(i,lookat[i,"rd.name"])
#LinesEvery(tmp$t.dat,,x.names,tmp$w.dat[,"wr1"],levs,lmain=lmain)
dev.set(which=rr.dev)
PeakFunc5(tmp,x.names,lmain=lookat[i,"rd.name"])
if(!is.null(t.names)){mtext(paste(t.names,tmp$c.dat[x.names,t.names],collapse=":"))}
if(png==TRUE)
{
f.name <- paste(lookat[i,"rd.name"],lookat[i,"trace.id"],"png",sep=".")
png(f.name,heigh=600,width=1200)
PeakFunc2(tmp$t.dat,x.names,3,30,TRUE,tmp$w.dat[,wr],lmain=lookat[i,"rd.name"])
dev.off()
}
dev.set(which=my.dev)
if(is.element(rn.i,ret.list))
{points(lookat[i,"x"],lookat[i,"y"],col=col[i],pch=pch,cex=cex);ret.list <- setdiff(ret.list,rn.i)}
else
{points(lookat[i,"x"],lookat[i,"y"],col="red",pch=pch,cex=cex);ret.list <- union(rn.i,ret.list)}
i <- identify(lookat[,"x"],lookat[,"y"],n=1,plot=F)
}
return(ret.list)
}
PointTrace.2 <- function(lookat,png=F,col=rep("black",nrow(lookat)),pch=16,cex=1,lmain="PointTrace",x.trt=NULL,y.trt=NULL,wr="wr1",t.names=NULL){
graphics.off()
if(!is.null(x.trt)){lookat["x"] <- lookat[,x.trt]}else{lookat["x"] <- lookat[,select.list(names(lookat))]}
if(!is.null(y.trt)){lookat["y"] <- lookat[,y.trt]}else{lookat["y"] <- lookat[,select.list(names(lookat))]}
dev.new(height=4,width=14)
rr.dev <- dev.cur()
dev.new(height=4,width=4)
plot(lookat[,"x"],lookat[,"y"],pch=pch,cex=cex,main=lmain,xlab=x.trt,ylab=y.trt, col="white")
text(lookat[,"x"],lookat[,"y"],labels=lookat$trace.id)
ret.list <- NULL
i <- identify(lookat[,"x"],lookat[,"y"],n=1,plot=F)
my.dev <- dev.cur()
while(length(i) > 0)
{
x.names <- lookat[i,"trace.id"]
#points(lookat[i,"x"],lookat[i,"y"],pch=8,cex=.5)
rn.i <- row.names(lookat)[i]
tmp <- get(lookat[i,"rd.name"])
levs <- unique(tmp$w.dat[,"wr1"])
lmain <- paste(i,lookat[i,"rd.name"])
#LinesEvery(tmp$t.dat,,x.names,tmp$w.dat[,"wr1"],levs,lmain=lmain)
dev.set(which=rr.dev)
#PeakFunc5(tmp,x.names,lmain=lookat[i,"rd.name"])
rtpcr.multi.plotter(tmp,x.names,pdf=F,bcex=1, melt.plot=T, plot.new=F)
if(!is.null(t.names)){mtext(paste(t.names,tmp$c.dat[x.names,t.names],collapse=":"))}
if(png==TRUE)
{
f.name <- paste(lookat[i,"rd.name"],lookat[i,"trace.id"],"png",sep=".")
png(f.name,heigh=600,width=1200)
PeakFunc2(tmp$t.dat,x.names,3,30,TRUE,tmp$w.dat[,wr],lmain=lookat[i,"rd.name"])
dev.off()
}
dev.set(which=my.dev)
if(is.element(rn.i,ret.list))
{points(lookat[i,"x"],lookat[i,"y"],col=col[i],pch=pch,cex=cex);ret.list <- setdiff(ret.list,rn.i)}
else
{points(lookat[i,"x"],lookat[i,"y"],col="red",pch=pch,cex=cex);ret.list <- union(rn.i,ret.list)}
i <- identify(lookat[,"x"],lookat[,"y"],n=1,plot=F)
}
return(ret.list)
}
##############################################################################################
# Multi Experiment Analysis
##############################################################################################
#calculate means and sems for all cnames of dat
#divided by the levels of fac.name
#make a bargraph of these
MeanSemGraph <- function(dat,cnames,fac.name,t.cols=NULL,ylab=NULL,main.lab=NULL,x.labs=NULL,bt=.1,lgc="topleft",ylim=NULL){
semfunc <- function(x)
{
n <- sum(!is.na(x))
if(n < 3){return(NA)}
return(sd(x,na.rm=T)/sqrt(n))
}
x <- as.factor(dat[,fac.name])
x.levs <- levels(x)
if(1/length(x.levs) < bt){bt <- 1/length(x.levs)}
sem.levs <- paste(x.levs,"sem",sep=".")
x.res <- data.frame(apply(dat[x==x.levs[1],cnames,drop=F],2,mean,na.rm=T))
for(i in x.levs)
{
x.res[i] <- apply(dat[x==i,cnames,drop=F],2,mean,na.rm=T)
x.res[paste(i,"sem",sep=".")] <- apply(dat[x==i,cnames,drop=F],2,semfunc)
}
xlim <- c(1,length(cnames)+length(x.levs)*bt)
if(is.null(ylim)){ylim <- c(-.02,max(x.res[,x.levs]+x.res[,sem.levs]*2)*1.2)}
if(is.null(t.cols)){t.cols <- rainbow(length(x.levs));names(t.cols) <- x.levs}
plot(x.res[,x.levs[1]],xlim=xlim,ylim=ylim,type="n",xaxt="n",xlab="",ylab=ylab,main=main.lab)
for(i in 1:length(x.levs))
{
x1 <- seq(1,length(cnames))+(i-1)*bt
y1 <- x.res[,x.levs[i]]
rect(x1,rep(0,length(x1)),x1+bt,y1,col=t.cols[x.levs[i]])
}
for(i in 1:length(x.levs))
{
x1 <- seq(1,length(cnames))+(i-1)*bt+(bt)/2
y1 <- x.res[,x.levs[i]] + x.res[,sem.levs[i]]*2
y2 <- x.res[,x.levs[i]] - x.res[,sem.levs[i]]*2
arrows(x1,y2,x1,y1,angle=90,col="black",length=bt*.25,code=3)
}
if(is.null(x.labs)){x.labs <- row.names(x.res)}
text(seq(1,length(cnames)),rep(-.02,length(cnames)),x.labs,pos=4,cex=.8,offset=0)
legend(lgc,col=t.cols,names(t.cols),pch=15)
return(x.res[,-1])
}
cells.plotter<-function(dat, tmp.names, subset.n=5,multi=TRUE, pic=TRUE){
rd.names<-unique(dat$rd.name)
rd.list<-list()
for(i in 1:length(rd.names)){
x.names<-row.names(dat[tmp.names,])[dat[tmp.names,"rd.name"]==rd.names[i]]
x.names<-dat[x.names,"id"]
x.names<-setdiff(x.names, "NA")
x.name<-setdiff(x.names, NA)
rd.list[[i]]<-x.names
names(rd.list)[i]<-rd.names[i]
}
if(multi){
for(i in 1:length(rd.list)){
tmp<-load(paste(names(rd.list)[i],".rdata",sep=""))
LinesStack.2(get(tmp), rd.list[[i]], names(rd.list[i]), subset.n=subset.n)
rm(tmp)
}
}
if(pic){
for(i in 1:length(rd.list)){
LinesEvery.3(get(names(rd.list)[i]), rd.list[[i]],img=get(names(rd.list)[i])$img1, lmain=names(rd.list[i]))
}
}
return(rd.list)
}
bg.plotter<-function(gid.bin, dat, subset.n=5,multi=TRUE, pic=TRUE){
tmp.names<-row.names(dat)[dat$gid.bin==gid.bin]
rd.names<-unique(dat$rd.name)
rd.list<-list()
for(i in 1:length(rd.names)){
x.names<-row.names(dat[tmp.names,])[dat[tmp.names,"rd.name"]==rd.names[i]]
x.names<-dat[x.names,"id"]
x.names<-setdiff(x.names, "NA")
x.name<-setdiff(x.names, NA)
rd.list[[i]]<-x.names
names(rd.list)[i]<-rd.names[i]
}
if(multi){
for(i in 1:length(rd.list)){
LinesStack.2(get(names(rd.list)[i]), rd.list[[i]], names(rd.list[i]), subset.n=subset.n)
}
}
if(pic){
for(i in 1:length(rd.list)){
LinesEvery.3(get(names(rd.list)[i]), rd.list[[i]],img=get(names(rd.list)[i])$img1, lmain=names(rd.list[i]))
}
}
return(rd.list)
}
pf.plotter<-function(dat,pf, subset.n=5,multi=TRUE, pic=TRUE){
tmp.names<-row.names(dat)[dat$pf==pf]
rd.names<-unique(dat$rd.name)
rd.list<-list()
for(i in 1:length(rd.names)){
x.names<-row.names(dat[tmp.names,])[dat[tmp.names,"rd.name"]==rd.names[i]]
x.names<-dat[x.names,"id"]
#x.names<-na.exclude(x.names)
rd.list[[i]]<-x.names
names(rd.list)[i]<-rd.names[i]
}
if(multi){
for(i in 1:length(rd.list)){
LinesStack.2(get(names(rd.list)[i]), rd.list[[i]], names(rd.list[i]), subset.n=subset.n)
}
}
if(pic){
for(i in 1:length(rd.list)){
LinesEvery.3(get(names(rd.list)[i]), rd.list[[i]],img=get(names(rd.list)[i])$img3, lmain=names(rd.list[i]))
}
}
return(rd.list)
}
#Updated with Linesevery3
levs.plotter<-function(dat,levs,levs.no, subset.n=5,multi=F, pic=T, click=F){
tmp.names<-row.names(dat)[dat[,levs]==1]
rd.names<-unique(dat$rd.name)
rd.list<-list()
for(i in 1:length(rd.names)){
x.names<-row.names(dat[tmp.names,])[dat[tmp.names,"rd.name"]==rd.names[i]]
x.names<-dat[x.names,"id"]
#x.names<-na.exclude(x.names)
rd.list[[i]]<-x.names
names(rd.list)[i]<-rd.names[i]
}
if(multi){
for(i in 1:length(rd.list)){
tmp<-load(paste(names(rd.list)[i],".rdata",sep=""))
LinesStack(get(tmp), rd.list[[i]], names(rd.list[i]), subset.n=subset.n)
}
}
if(pic){
for(i in 1:length(rd.list)){
tmp<-load(paste(names(rd.list)[i],".rdata",sep=""))
LinesEvery.3(get(tmp), rd.list[[i]],img=get(names(rd.list)[i])$img3, lmain=names(rd.list[i]), pic.plot=F, XY.plot=T)
}
}
if(click){
for(i in 1:length(rd.list)){
tmp<-load(paste(names(rd.list)[i],".rdata",sep=""))
Trace.Click.3(get(tmp), rd.list[[i]],img=get(names(rd.list)[i])$img3, lmain=names(rd.list[i]), pic.plot=F, XY.plot=T)
}
}
rm(list=ls(rd.list))
return(rd.list)
}
all.plotter<-function(dat, subset.n=5,multi=F, pic=F, click=T){
tmp.names<-row.names(dat)
rd.names<-unique(dat$rd.name)
rd.list<-list()
for(i in 1:length(rd.names)){
x.names<-row.names(dat[tmp.names,])[dat[tmp.names,"rd.name"]==rd.names[i]]
x.names<-dat[x.names,"id"]
#x.names<-na.exclude(x.names)
rd.list[[i]]<-x.names
names(rd.list)[i]<-rd.names[i]
}
if(multi){
for(i in 1:length(rd.list)){
tmp<-load(paste(names(rd.list)[i],".rdata",sep=""))
LinesStack(get(tmp), rd.list[[i]], names(rd.list[i]), subset.n=subset.n)
}
}
if(pic){
for(i in 1:length(rd.list)){
tmp<-load(paste(names(rd.list)[i],".rdata",sep=""))
LinesEvery.3(get(tmp), rd.list[[i]],img=get(names(rd.list)[i])$img3, lmain=names(rd.list[i]), pic.plot=F, XY.plot=T)
}
}
selected.cells<-list()
if(click){
for(i in 1:length(rd.list)){
tmp<-load(paste(names(rd.list)[i],".rdata",sep=""))
selected.cells[[i]]<-Trace.Click.3(get(tmp), rd.list[[i]])
names(selected.cells)[i]<-rd.names[i]
}
}
rm(list=ls(rd.list))
if(multi==T | pic==T){return(rd.list)}
if(click==T){return(selected.cells)}
}
noci.plotter<-function(dat,type, subset.n=5,multi=F, pic=T){
tmp.names<-row.names(dat)[dat$noci.type==type]
tmp.names<-setdiff(tmp.names, "NA")
rd.names<-unique(dat$rd.name)
rd.list<-list()
for(i in 1:length(rd.names)){
x.names<-row.names(dat[tmp.names,])[dat[tmp.names,"rd.name"]==rd.names[i]]
x.names<-dat[x.names,"id"]
x.names<-setdiff(x.names, c("NA",NA))
#x.names<-na.exclude(x.names)
rd.list[[i]]<-x.names
names(rd.list)[i]<-rd.names[i]
}
if(multi){
for(i in 1:length(rd.list)){
tmp<-load(paste(names(rd.list)[i],".rdata",sep=""))
LinesStack.2(get(tmp), rd.list[[i]], names(rd.list[i]), subset.n=subset.n)
rm(tmp)
}
}
if(pic){
for(i in 1:length(rd.list)){
tmp<-load(paste(names(rd.list)[i],".rdata",sep=""))
LinesEvery.3(get(tmp), rd.list[[i]],img=get(names(rd.list)[i])$img3, lmain=names(rd.list[i]))
rm(tmp)
}
}
rm(list=ls(rd.list))
return(rd.list)
}
### Function to select rows based on collumn parameters
# dat can be either a raw RD object or an RD dataframe
# ex dat -or- dat$bin
cellzand<-function(dat,collumn=NULL, parameter=1,cells=NULL){
bob<-list()
if(is.null(cells)){cells<-dat$c.dat$id}else{cells<-cells}
if(class(dat)=="list"){
dat.select<-select.list(names(dat), title="Select DataFrame")
dat<-dat[[dat.select]]
if(is.null(cells)){cells<-row.names(dat)}else{cells<-cells}
}else{
dat<-dat
if(is.null(cells)){cells<-row.names(dat)}else{cells<-cells}
}
if(is.null(collumn)){
collumn<-select.list(names(dat), multiple=T, title="Select Collumn")
}else(collumn<-collumn)
if(is.null(parameter)){
parameter<-1
}else(parameter<-parameter)
for(i in collumn){
bob[[i]]<-row.names(dat)[dat[,i]>=parameter]
}
bob<-Reduce(union, bob)
#bob<-intersect(bob, cells)
bob<-intersect(bob,cells)
return(bob)
}
### Function to select rows based on collumn parameters
# dat can be either a raw RD object or an RD dataframe
# ex dat -or- dat$bin
cellzand_tcd<-function(dat,collumn=NULL, parameter=1,cells=NULL){
cells_to_view <- list()
bob<-list()
if(is.null(cells)){cells<-dat$c.dat$id}else{cells<-cells}
if(class(dat)=="list"){
dat.select<-select.list(names(dat), title="Select DataFrame")
dat<-dat[[dat.select]]
if(is.null(cells)){
cells<-row.names(dat)}else{cells<-cells
}
}else{
dat<-dat
if(is.null(cells)){cells<-row.names(dat)}else{cells<-cells}
}
if(is.null(collumn)){
collumn<-select.list(names(dat), multiple=T, title="Select Collumn")
cells_to_view$name <- collumn
}else{collumn<-collumn}
if(is.null(parameter)){
parameter<-1
}else{parameter<-parameter}
for(i in collumn){
bob[[i]]<-row.names(dat)[dat[,i]>=parameter]
}
bob<-Reduce(union, bob)
bob<-intersect(bob,cells)
cells_to_view$cells <- bob
if( length(bob) == 0){
return(NA)
}else{
return(cells_to_view)
}
}
cellzor<-function(dat,collumn=NULL, parameter=1,cells=NULL){
bob<-list()
if(is.null(cells)){cells<-dat$c.dat$id}else{cells<-cells}
if(class(dat)=="list"){
dat.select<-select.list(names(dat), title="Select DataFrame")
dat<-dat[[dat.select]]
if(is.null(cells)){cells<-row.names(dat)}else{cells<-cells}
}else{
dat<-dat
if(is.null(cells)){cells<-row.names(dat)}else{cells<-cells}
}
if(is.null(collumn)){
collumn<-select.list(names(dat), multiple=T, title="Select Collumn")
}else(collumn<-collumn)
if(is.null(parameter)){
parameter<-1
}else(parameter<-parameter)
for(i in collumn){
bob[[i]]<-row.names(dat)[dat[,i]>=parameter]
}
bob<-Reduce(intersect, bob)
#bob<-intersect(bob, cells)
bob<-intersect(bob,cells)
return(bob)
}
cellz<-function(dat,collumn=NULL, parameter){
bob<-list()
if(class(dat)=="list"){
dat.select<-select.list(names(dat))
dat<-dat[[dat.select]]
}else{dat<-dat}
if(is.null(collumn)){
collumn<-select.list(names(dat), multiple=T)}
else(collumn<-collumn)
if(is.null(parameter)){
parameter<-1}
else(parameter<-parameter)
for(i in collumn){
bob[[i]]<-row.names(dat)[dat[,i]==parameter]
}
bob<-Reduce(union, bob)
#bob<-intersect(bob,cells)
return(bob)
}
# function to obtained sorted cell names based off
# collumn names from c.dat and bin
c.sort<-function(dat,char=NULL){
char<-select.list(names(dat))
sort.dir<-select.list(c("TRUE", "FALSE"), title="Decreasing?")
bob<-row.names(dat[order(dat[,char], decreasing=sort.dir),])
return(bob)
}
c.sort.2<-function(dat,cells=NULL,collumn=NULL){
if(class(dat)=="list"){
dat.selector<-select.list(intersect(names(dat), c("c.dat","bin", "scp")), title="Select DataFrame")
dat<-dat[[dat.selector]]
}else{dat<-dat}
if(is.null(collumn)){
collumn<-select.list(names(dat), title="Select Variable to Sort")
}else{collumn=collumn}
sort.dir<-select.list(c("TRUE", "FALSE"), title="Decreasing?")
bob<-row.names(dat[order(dat[,collumn], decreasing=sort.dir),])
if(!is.null(cells)){bob<-intersect(bob,cells)}
return(bob)
}
#create a list that uses the names input for the names in the list
named.list<-function(...){
bob<-list(...)
names(bob)<-as.character(substitute((...)))[-1]
return(bob)
}
cell.ti<-function(dat, x.names, img=NULL){
graphics.off()
dev.new(width=15, height=5)
PeakFunc5(dat, x.names)
if(is.null(img)){img<-dat$img1}else{img<-img}
cell.view(dat,x.names,img)
multi.pic.zoom(dat, x.names, img, zf=80)
}
#given a list of file names collect and merge all bin scp and c.dat data
CollectMulti <- function(f.names,rd.names=NULL){
if(is.null(rd.names))
{
rd.names <- sub("\\.rdata$","",sub(".*\\/","",f.names),ignore.case=T)
for(i in f.names){load(i)}
}
b.names <- NULL
s.names <- NULL
cnames <- NULL
for(i in rd.names)
{
tmp <- get(i)
names(tmp$bin) <- make.names(names(tmp$bin))
names(tmp$scp) <- make.names(names(tmp$scp))
names(tmp$c.dat) <- make.names(names(tmp$c.dat))
b.names <- union(b.names,names(tmp$bin))
s.names <- union(s.names,names(tmp$scp))
cnames <- union(cnames,names(tmp$c.dat))
}
cnames <- setdiff(cnames,b.names)
s.names <- setdiff(s.names,b.names)
cnames <- setdiff(cnames,s.names)
tot.names <- c(b.names,s.names,cnames,"rd.name","trace.id")
ret.dat <- data.frame(matrix(rep(1,length(tot.names)),ncol=length(tot.names)))
names(ret.dat) <- tot.names
for(i in rd.names)
{
tmp <- get(i)
names(tmp$bin) <- make.names(names(tmp$bin))
names(tmp$scp) <- make.names(names(tmp$scp))
names(tmp$c.dat) <- make.names(names(tmp$c.dat))
ret.tmp <- data.frame(cbind(tmp$bin,tmp$scp,tmp$c.dat))
ret.tmp["rd.name"] <- i
ret.tmp["trace.id"] <- row.names(tmp$bin)
# ret.dat <- merge(ret.dat,ret.tmp)
i.names <- setdiff(tot.names,names(ret.tmp))
for(j in i.names)
{
ret.tmp[j] <- NA
}
ret.add <- ret.tmp[,tot.names]
ret.dat <- rbind(ret.dat,ret.add)
}
ret.dat <- ret.dat[-1,]
return(ret.dat)
}
census.brewer<-function(dat){
cell.types<-dat$cell.types
dev.new(width=10, height=5)
stacked.traces<-dev.cur()
LinesEvery.5.1(dat, sample(row.names(dat$c.dat)[1:5]), plot.new=F, lmain="WAZZZUPPPP", t.type="t.dat", img=dat$img1)
cat("HOWDY PARTNER How Many groups to census?\n")
bringToTop(-1)
group.number<-scan(n=1, what='numeric')
cat("\nEnter the names of your census groups seperated by '.'\n")
census.names<-scan(n=as.numeric(group.number), what='character')
dev.off(stacked.traces)
selected.cell.groups<-select.list(names(cell.types), title="Select groups to census", multiple=T)
cat("\nThese are the cells you have chosen\n")
print(selected.cell.groups)
census<-list()
for(i in 1:length(selected.cell.groups))
{
print(selected.cell.groups[i])
if(length(cell.types[[selected.cell.groups[i]]])>1){
census[[i]]<-tcd(dat, Reduce(union,cell.types[[selected.cell.groups[i]]]), save_question=F)
names(census[[i]])<-census.names
}else{
census[[i]]<-NA
}
}
print(names(census))
print(selected.cell.groups)
names(census)<-selected.cell.groups
dat$census<-census
dat <- census_to_table(dat)
return(dat)
}
census.brewer.2<-function(dat){
cell.types<-dat$cell.types
if(is.null(dat$census)){
dev.new(width=12, height=5)
stacked.traces<-dev.cur()
LinesEvery.5.1(dat, sample(row.names(dat$c.dat)[1:5]), plot.new=F, lmain="Reference Plot", t.type="t.dat", img=dat$img1)
print("How Many groups to census?")
group.number<-scan(n=1, what='numeric')
print("enter the names of your census groups seperated by '.' (6)")
census.names<-scan(n=as.numeric(group.number), what='character')
dev.off(stacked.traces)
}else{
census.names<-names(dat$census[[1]])[!is.na(names(dat$census[[1]]))]
}
selected.cell.groups<-select.list(names(cell.types), title="Select groups to census", multiple=T)
print("These are the cells you have chosen")
print(selected.cell.groups)
if(is.null(dat$census)){
dat$census<-list()
}
for(i in selected.cell.groups)
{
print(i)
if(length(cell.types[[i]])>1){
dat$census[[i]]<-tcd(dat, Reduce(union,cell.types[[i]]))
names(dat$census[[i]])<-census.names
}else{
dat$census[[i]]<-NULL
}
}
#names(dat$census)<-selected.cell.groups
#dat$census<-census
census.df<-dat$bin
census.df.cn<-names(dat$census[[1]])[!is.na(names(dat$census[[1]]))]#census data frame column names
for(a in 1:length(dat$census)){
for(b in 1:length(census.df.cn)){
census.df[dat$census[[a]][[b]],census.df.cn[b]]<-1
}
}
census.df[is.na(census.df)]<-0#convert all NA to 0
dat$bin<-census.df
return(dat)
}
census_to_table<-function(dat){
census.df<-dat$bin
i<-1
while( is.na(dat$census[[i]]) ){
i<-i+1
}
(census.df.cn<-names(dat$census[[i]])[!is.na(names(dat$census[[i]]))])#census data frame column names)
for(i in 1:length(census.df.cn)){
census.df[census.df.cn[i]]<-0
}
for(a in 1:length(dat$census)){
if(!is.na(dat$census[[a]])){
for(b in 1:length(census.df.cn)){
census.df[ dat$census[[a]][[b]],census.df.cn[b]]<-1
}
}
}
#census.df[is.na(census.df)]<-0#convert all NA to 0
dat$bin<-census.df
return(dat)
}
multi.plotter<-function(dat,cells,levs=NULL, values=NULL){
dat<-dat
tmp.rd<-dat
rd.name<-sub(".Rdata|.rdata", "", list.files(pattern="RD.1"))
if(is.null(levs)){levs<-select.list(names(dat$bin), multiple=TRUE)
}else{levs<-levs}
#if(is.null(values)){values<-select.list(names(dat$c.dat), multiple=T)
if(is.null(values)){values<-"area"
}else{values<-values}
#(img<-image.selector(dat))
img<-"img1"
channel<-list(c(1:3))
cell.length<-length(cells)
print(cell.length)
cseries1<-seq(1,2000,10)
cseries2<-seq(10,2000,10)
cell.groups<-max(which(cseries1-round(cell.length)<=0, arr.ind=T))
for(k in 1:cell.groups){
LinesEvery.5(tmp.rd,cells[cseries1[k]:cseries2[k]], plot.new=T, levs=levs, t.type="mp2", values=values, img=img, channel=channel, bcex=1.2, lmain=paste(rd.name,"LU.ide"))
}
}
TraceImpute.2 <- function(x,ts=seq(0,(length(x)-1))*.03,xspan=5/length(x),time.step=1/120,plotit=F, lmain=NULL){
if(is.null(lmain)){lmain="traceImpute.2"}else{lmain=lmain}
targ <- data.frame(ts=seq(min(ts),max(ts),by=time.step))
xloe <- loess(x ~ ts,span=xspan)
xp <- predict(xloe,newdata=targ)
cols<-rainbow(n=length(ts))
if(plotit)
{
plot(ts,x,pch=16,cex=1.2,xlab="time",ylab="Response", col="black", main=lmain)
points(targ[,1],xp,type="p", col="red", pch=16, cex=.8)
}
return(xp)
}
PulseImputer<-function(tmp,cell,pulse.names=NULL,plot.new=F,sf=8){
if(is.null(pulse.names)){pulse.names<-intersect(grep("^K",names(tmp$bin),ignore.case=T,value=T),tmp$w.dat[,"wr1"])
}else{pulse.names<-pulse.names}
if(plot.new){dev.new(width=1.5*length(pulse.names), height=2)}
par(mfrow=c(1,length(pulse.names)), mar=c(1,1,1,1))
for(i in 1:length(pulse.names)){
cell.pulse<-tmp$mp[tmp$w.dat[,"wr1"]==pulse.names[i],cell]
cell.time<-tmp$mp[tmp$w.dat[,"wr1"]==pulse.names[i],1]
alpha<-sf/length(cell.pulse)
TraceImpute.2(cell.pulse,cell.time,plotit=T,lmain=pulse.names[i], xspan=alpha)
}
}
<<<<<<< HEAD:extras/procPharm MAC edits.r
#function to build a table with defined cell types, and selected columns
TableBrewer<-function(dat, ct.names=NULL){
#require(xlsx)
=======
#function to build a table with defined cell types, and selected collumns
TableBrewer<-function(dat, ct.names=NULL, save=T, xlsx=T){
>>>>>>> 02c76eaf0507e5b2abfc9b5cea8d86cca953c6ef:procPharm 170210.r
dat.name<-deparse(substitute(dat))
pulse<-select.list(names(dat$bin), multiple=T, title="select variables for table")
ct.sum<-data.frame()
if(is.null(ct.names)){
#F7: Load cell Types into the groups to pick with 'P'
cellTypeId <- grep('^cell', names(dat), value=T)
if(length(cellTypeId)>0){
if(length(cellTypeId)>1){
bringToTop(-1)
cat('\n Select the cell type to load in \n')
cellTypeId <- select.list(cellTypeId, title="Select Cell Type")
}
}
cell.type.names <- names(dat[[cellTypeId]])
cell.types <- dat[[cellTypeId]]
}else{
cell.type.names <- names(ct.names)
cell.types <- ct.names
}
for(z in 1:length(pulse)){
for(x in 1:length(cell.type.names)){
#first count the number of cells in the cell type group
ct.sum[as.character(dat.name),cell.type.names[x]]<-length(cell.types[[ cell.type.names[x] ]])
#sum the collumn with only the cell.types defined rows based on the current selected collumn
ct.sum[pulse[z],cell.type.names[x]]<-sum(dat$bin[cell.types[[ cell.type.names[x] ]],pulse[z]])
}
}
<<<<<<< HEAD:extras/procPharm MAC edits.r
print('Enter your file name without spaces')
save.names<-scan(n=1, what='character')
print(paste(save.names,'csv',sep=''))
write.csv(ct.sum, file=paste(save.names,'.csv',sep=''))
=======
if(save){
print('Enter you file name without spaces')
save.names <- scan(n=1, what='character')
print(paste(save.names,'xlsx',sep=''))
if(xlsx){
require(xlsx)
tryCatch(
write.xlsx(ct.sum, file=paste(save.names,'.xlsx',sep='')),
error=function(e) print("You Forgot to input cells.")
)
}else{
write.csv(ct.sum, file=paste(save.names,'.csv',sep=''))
}
}
>>>>>>> 02c76eaf0507e5b2abfc9b5cea8d86cca953c6ef:procPharm 170210.r
return(ct.sum)
}
#########################################
##############################################################
#Function with 3 options. Edit_ct, classify UL , classify thermos
#This follows Marios scheme for classifying our cell types
Cell_Typer<-function(tmp.rd, edit_ct=T, UL_classify=T, thermos_classify=T){
dropped<-cellzand(tmp.rd$bin,"drop",1)
#selected bin and dropped
print("Select The response that coorespond to Neurons,
ex.
K+.40mM, and capsaicin.300nM")
neurons<-cellzand(tmp.rd$bin, , 1)
neurons<-setdiff(neurons, dropped)
greeR.Cells<-cellzand(tmp.rd$bin,"gfp.bin" ,1) #selected bin then gfp.bin
red.cells<-cellzand(tmp.rd$bin,"cy5.bin" ,1)
caG.Cells<-cellzand(tmp.rd$bin, grep("caps",names(tmp.rd$bin),ignore.case=T, value=T), 1)
aitc.cells<-cellzand(tmp.rd$bin, grep("aitc",names(tmp.rd$bin),ignore.case=T, value=T), 1)
menth.cells<-cellzand(tmp.rd$bin, grep("menth",names(tmp.rd$bin),ignore.case=T, value=T), 1)
menth.only<-setdiff(menth.cells, aitc.cells)
large.cells.330<-cellzand(tmp.rd$c.dat,"area" ,330)#selected c.dat then area
glia<-setdiff(tmp.rd$c.dat$id, neurons)
glia<-setdiff(glia, dropped)
peptidergic<-greeR.Cells
not.cgrp<-setdiff(neurons, greeR.Cells)
#Sort green cells first by capsaicin then aitc
G.C<-intersect(greeR.Cells, caG.Cells)
G.0<-setdiff(greeR.Cells, G.C)
G.C.A<-intersect(G.C, aitc.cells)
G.C<-setdiff(G.C, G.C.A)
G.A<-intersect(G.0, aitc.cells)
G.A<-setdiff(G.A, G.C.A)
G.0<-setdiff(G.0, G.A)
G.M<-intersect(G.0, menth.cells)
G.0<-setdiff(G.0, G.M)
#This gives us G.C, G.C.A, G.A, G.M, and G.0 (zero) under the green cells
#next we seperate red from unlabeled
nonpep<-intersect(not.cgrp, red.cells)
nonpep<-setdiff(nonpep, menth.only)
unlabeled<-setdiff(not.cgrp, nonpep)
#Chase down the red classes, the two that are pretty unambiguous are R.A, R.C and R.other
R.A<-intersect(nonpep, aitc.cells)
R.A<-setdiff(R.A, caG.Cells)
R.other<-setdiff(nonpep, R.A)
R.C<-intersect(R.other, caG.Cells)
R.C<-setdiff(R.C, aitc.cells)
R.other<-setdiff(R.other, R.C)
#This gives us our red groups: R.A, R.C and R.other
#Finally we chase down our unlabeled groups (unlabeled)
thermos<-menth.only
thermos<-intersect(thermos, neurons)
unlabeled<-setdiff(unlabeled, thermos)
UL<-intersect(large.cells.330,unlabeled)
UL<-intersect(UL,neurons)
UL<-setdiff(UL, caG.Cells)
UL<-setdiff(UL, aitc.cells)
US<-setdiff(unlabeled, UL)
US<-intersect(US,neurons)
US.A<-intersect(US, aitc.cells)
US.A<-setdiff(US.A, caG.Cells)
US.C<-intersect(US, caG.Cells)
US.C<-setdiff(US.C, US.A)
US.0<-setdiff(US, US.A)
US.0<-setdiff(US.0, US.C)
#review the autosorted cell classes. Remove the cells that are not part of each class and put into discard pile, press "1" to move cells to the discard pile
if(edit_ct){
cat(
"Review the autosorted cell classes. Remove the cells that are not part of each class
and put into discard pile, press '1' to move cells to the discard pile (button 1)
")
cat("Editing UL
Remove anything that responds to capsaicin, menthol or aitc.
Also remove cells labeled with IB4 or CGRP-GFP
")
UL.edit<-tcd(tmp.rd, c(UL))
discard<-UL.edit[[1]]
UL<-setdiff(UL, discard)
cat("Editing G.M
Remove cells that are not green or do not respond to menthol.
Also remove red cells and cells responding to AITC
")
G.M.edit<-tcd(tmp.rd, c(G.M))
discard1<-G.M.edit[[1]]
G.M<-setdiff(G.M, discard1)
discard<-union(discard, discard1)
cat("Editing G.0
Remove cells that respond to menthol, capsaicin or aitc,
cells that are not green and cells that are red.
")
G.0.edit<-tcd(tmp.rd, c(G.0))
discard1<-G.0.edit[[1]]
G.0<-setdiff(G.0, discard1)
discard<-union(discard, discard1)
cat("Editing G.A
Remove cells that respond to Capsaicin or have 'noisy' variable baseline,
or cells that AITC response does not return to baseline"
)
G.A.edit<-tcd(tmp.rd, c(G.A))
discard1<-G.A.edit[[1]]
G.A<-setdiff(G.A, discard1)
discard<-union(discard, discard1)
cat("Editing G.C")
G.C.edit<-tcd(tmp.rd, c(G.C))
discard1<-G.C.edit[[1]]
G.C<-setdiff(G.C, discard1)
discard<-union(discard, discard1)
cat("Editing G.C.A")
G.C.A.edit<-tcd(tmp.rd, c(G.C.A))
discard1<-G.C.A.edit[[1]]
G.C.A<-setdiff(G.C.A, discard1)
discard<-union(discard, discard1)
cat("Editing R.A")
R.A.edit<-tcd(tmp.rd, c(R.A))
discard1<-R.A.edit[[1]]
R.A<-setdiff(R.A, discard1)
discard<-union(discard, discard1)
cat("Editing R.C
Remove cells that respond to aitc or are green
")
R.C.edit<-tcd(tmp.rd, c(R.C))
discard1<-R.C.edit[[1]]
R.C<-setdiff(R.C, discard1)
discard<-union(discard, discard1)
cat("Editing R.Other")
R.other.edit<-tcd(tmp.rd, c(R.other))
discard1<-R.other.edit[[1]]
R.other<-setdiff(R.other, discard1)
discard<-union(discard, discard1)
cat("Editing Thermosensors
Remove cells that either do not respond to menthol or
have an aitc response larger than the menthol response
")
thermos.edit<-tcd(tmp.rd, c(thermos))
discard1<-thermos.edit[[1]]
thermos<-setdiff(thermos, discard1)
discard<-union(discard, discard1)
cat("Editing US.A")
US.A.edit<-tcd(tmp.rd, c(US.A))
discard1<-US.A.edit[[1]]
US.A<-setdiff(US.A, discard1)
discard<-union(discard, discard1)
cat("Editing US.C")
US.C.edit<-tcd(tmp.rd, c(US.C))
discard1<-US.C.edit[[1]]
US.C<-setdiff(US.C, discard1)
discard<-union(discard, discard1)
cat("Editing US.0")
US.0.edit<-tcd(tmp.rd, c(US.0))
discard1<-US.0.edit[[1]]
US.0<-setdiff(US.0, discard1)
discard<-union(discard, discard1)
cat("
Sort the discard pile
1 UL
2 G.M
3 G.0
4 G.A
5 G.C
6 G.A.C
7 R.A
8 R.C
9 R.other
10 thermos
11 US.A
12 US.C
")
if(length(discard)>0){
hand.sort<-tcd(tmp.rd, c(discard))
#Union between hand sorted and edited autosort. Sort UL into 4 groups based on R3J response (1=prop, 2=jagged, 3=ide, 4=NE), Create a few thermos groups
UL<-union(UL, hand.sort[[1]])
G.M<-union(G.M, hand.sort[[2]])
G.0<-union(G.0, hand.sort[[3]])
G.A<-union(G.A, hand.sort[[4]])
G.C<-union(G.C, hand.sort[[5]])
G.C.A<-union(G.C.A, hand.sort[[6]])
R.A<-union(R.A, hand.sort[[7]])
R.C<-union(R.C, hand.sort[[8]])
R.other<-union(R.other, hand.sort[[9]])
thermos<-union(thermos, hand.sort[[10]])
US.A<-union(US.A, hand.sort[[11]])
US.C<-union(US.C, hand.sort[[12]])
}else{}
}else{}
if(UL_classify){
cat(" Sort the Unlabled Large into
1:Propriocepters
2:Jagged
3:IDE only
4:No Effect
")
UL.groups<-tcd(tmp.rd, c(UL))
UL.1<-UL.groups[[1]] #proprioceptor
UL.2<-UL.groups[[2]] #jagged
UL.3<-UL.groups[[3]] #IDE only
UL.4<-UL.groups[[4]] #no effect
}
if(thermos_classify){
thermos.groups<-tcd(tmp.rd, c(thermos))
thermos.high<-thermos.groups[[1]]
thermos.low<-thermos.groups[[2]]
thermos.C<-intersect(thermos, caG.Cells)
}
cell.types<-named.list(
neurons,
glia,
UL,
G.M,
G.0,
G.A,
G.C,
G.C.A,
R.A,
R.C,
R.other,
thermos,
US.A,
US.C,
US.0
)
if(UL_classify){
UL_ct<-named.list(
UL.1,
UL.2,
UL.3,
UL.4)
cell.types<-append(cell.types,UL_ct)
}else{}
if(thermos_classify){
thermos_ct<-named.list(
thermos.high,
thermos.low,
thermos.C)
cell.types<-append(cell.types,thermos_ct)
}else{}
tmp.rd$cell.types<-cell.types
return(tmp.rd)
}
##############################################################
#Function with 3 options. Edit_ct, classify UL , classify thermos
#This follows Marios scheme for classifying our cell types
#########################################
##############################################################
#Function with 3 options_ Edit_ct, classify UL , classify thermos
#This follows Marios scheme for classifying our cell types
#edit_ct=Logical, if true each cell class will be double checked
#UL_classify= If TRUE then classify large diameter cells
#GFP=logical, if TRUE then classify green cells
#cell_types=list input. This is mainly used if the large cell types have already been classified.
#if so then then the large cell types are passed straight to the cell_types
#181016 If the large diameter cells have been classified, then do not score again.
Cell_Typer_2<-function(tmp_rd, edit_ct=F, UL_classify=T, GFP=T, cell_types=NULL){
if(is.null(cell_types)){
large_cell_types_names <- NULL
}else{
#If your cell_types is not null do large celltyping
UL_classify <- T
#perform a test on your cell_types to see if there are large ones
#based on the names within cell_types
cell_types_names<-names(cell_types)
#find ones that have an L
large_cell_types_names<-grep("^L",cell_types_names,value=T)
#If you have any that have and L
if(length(large_cell_types_names)>1){
#Do not cell_type the large cells
UL_classify <- F
UL_ct<-cell_types[large_cell_types_names]
UL_classes_logic <- T
}
}
dropped<-cellzand(tmp_rd$bin,"drop",1)
#selected bin and dropped
cat("Select The response that coorespond to Neurons,
ex_
K+_40mM, and capsaicin_300nM
")
#identfy Neurons
neurons<-cellzand(tmp_rd$bin, , 1)
#Remove dropped cells from he neuron class
neurons<-setdiff(neurons, dropped)
#Idenfy green cells_ Corrected with ROIReview
if(GFP){
green_cells<-cellzand(tmp_rd$bin,"gfp.bin" ,1) #selected bin then gfp_bin
}
#identify red cells
ib4_label <- grep("cy5|tritc", names(tmp_rd$bin), value=T)
red_cells<-cellzand(tmp_rd$bin,ib4_label ,1)
#define Unlabeled cells as not green or red labeling
if(GFP){
unlabeled<-setdiff(neurons, green_cells)
}else{unlabeled<-neurons}
unlabeled<-setdiff(unlabeled, red_cells)
#cells that respond to capsaicin_ These cells wer
cap_cells<-cellzand(tmp_rd$bin,
grep("cap",names(tmp_rd$bin),ignore.case=T, value=T)[length(grep("cap",names(tmp_rd$bin),ignore.case=T, value=T))],
1)
#identify AITC responses
aitc_cells<-cellzand(tmp_rd$bin,
grep("aitc",names(tmp_rd$bin),ignore.case=T, value=T)[length(grep("aitc",names(tmp_rd$bin),ignore.case=T, value=T))],
1)
#Indentify Menthol Responses
menth_cells<-cellzand(tmp_rd$bin,
grep("men",names(tmp_rd$bin),ignore.case=T, value=T)[length(grep("men",names(tmp_rd$bin),ignore.case=T, value=T))],
1)
#Remove aitc responders to find trpm8 only cells
menth_only<-setdiff(menth_cells, aitc_cells)
#Find AITC and capsaicin
aitc_and_caps<-intersect(aitc_cells,cap_cells)
#define large cells as larger that 330uM^2
large_cells_330<-cellzand(tmp_rd$c.dat,"area" ,330)
#define glia is a very weak way_ Antyhing that isnt a
#neuron is considered glia
glia<-setdiff(tmp_rd$c.dat$id, neurons)
glia<-setdiff(glia, dropped)
cell_types<-named.list(neurons, glia)
discard<-c()
####################
#GREEN Group
#Sort green cells first by capsaicin then aitc
if(GFP){
#G8 gpf+, menthol negative, capsaicin only
G8<-intersect(green_cells, cap_cells)
G8<-setdiff(G8, aitc_cells)
G8<-setdiff(G8, menth_cells)
#now clean the green group?
#G9 gfp+, menthol negative, aitc and capsaicin
#first discover cells that are positive for caps and aitc
#now intersect the green cells with a+ c+
G9<-intersect(green_cells, aitc_and_caps)
#G10 gfp+, AITC positive only
G10<-intersect(green_cells, aitc_cells)
#remove capsaicin from this group
G10<-setdiff(G10, cap_cells)
#G7 gfp+, Menthol + only
G7<-intersect(green_cells, menth_cells)
#remove aitc responders
G7<-setdiff(G7, aitc_cells)
#Create G7_capsaicin cells
G7_c<-intersect(G7,cap_cells)
#now create a group of green responding cell that are
#not classified by th previous green groups
#This groups contains miscored Menthol responses and the large cell groups
G_0<-setdiff(green_cells, c(G7,G8,G9,G10))
print(G_0)
}
########################################
#RED ONLY GROUP
########################################
#remove any green from red cells
if(GFP){
red_only<-setdiff(red_cells,green_cells)
}else{red_only<-red_cells}
#Chase down the red classes, the two that are pretty unambiguous are R_A, R_C and R_other
#R13 IB4 only,AITC only
R13<-intersect(red_only, aitc_cells)
#remove capsaisin responses from this group
R13<-setdiff(R13, cap_cells)
#R11 IB4 only, Capsaicin only
R11<-intersect(red_only, cap_cells)
#remove AITC from this group
R11<-setdiff(R11, aitc_cells)
#R12 IB4 only, Capsaicin and AITC
R12<-intersect(red_only, aitc_and_caps)
#R_0 Where the unclassified Red only cells are stored
R_0<-setdiff(red_only, c(R11,R12,R13))
#This gives us our red groups: R_A, R_C and R_other
#Finally we chase down our unlabeled groups (unlabeled)
#######################################
#Unlabeled Cell Types
#######################################
#N15 no-label, Menthol sensitive
#How do we find menthol responses larger or equal to the
#aitc response.
#1 find the cells that respond to menthol
#2 find cells taht respond to aitc
#3 Compare peak heights of these two responses.
#4 if the AITC response is >= 90% of the menthol response
#4a add to a new group
aitc_stat<-intersect(
grep(".max", names(tmp_rd$scp), ignore.case=T, value=T),
grep("aitc",names(tmp_rd$scp),ignore.case=T, value=T)
)
menth_stat<-intersect(
grep(".max", names(tmp_rd$scp), ignore.case=T, value=T),
grep("men",names(tmp_rd$scp),ignore.case=T, value=T)
)
menth_stat<-menth_stat[length(menth_stat)]
#find trpm8 and trpa1 containing neurons
menth_and_aitc_cells<-intersect(menth_cells, aitc_cells)
if(length(menth_and_aitc_cells) > 0 ){
menth_and_aitc_neurons<-intersect(neurons, menth_and_aitc_cells)
trpm8_trpa1<-c()
for(i in 1:length(menth_and_aitc_neurons)){
if(tmp_rd$scp[menth_and_aitc_neurons[i],menth_stat] >=
((tmp_rd$scp[menth_and_aitc_neurons[i],aitc_stat])*1.1)
){
trpm8_trpa1<-c(trpm8_trpa1,menth_and_aitc_neurons[i])
}
}
N15_a<-trpm8_trpa1
}else{
N15_a <- NULL
}
################################
#Unlabeled
################################
#Unlabeled smaller neurons responding to menthol and not AITC
N15<-menth_only
if(GFP){
N15<-setdiff(N15, G7)
}
#ensure these are neurons
N15<-intersect(N15, neurons)
#remove these cells from the unlabeled group
unlabeled<-setdiff(unlabeled, N15)
#N15_c Menthol capsaicin
N15_c<-intersect(N15, cap_cells)
#Now create an unlabeled large group of cells
UL<-intersect(large_cells_330,unlabeled)
#ensure they are neurons
UL<-intersect(UL,neurons)
#remoce any capsaicin or aitc responders
UL<-setdiff(UL, c(cap_cells,aitc_cells))
#Create N13, and N16, US is a super category
US<-setdiff(unlabeled, UL)
US<-intersect(US,neurons)
#N14 unlabeled capsaicin negative
N14<-intersect(US, aitc_cells)
N14<-setdiff(N14, cap_cells)
#N16 unlabeled, capsaicin positive
N16<-intersect(US, cap_cells)
N16<-setdiff(N16, aitc_cells)
#create a US_0 class where these additional values are stored
US_0<-setdiff(US, c(N14,N16))
#N14 is a miscellaneous class that stores addiional unclassified cells
N14<-union(N14, R_0)
UC<- union(R_0, US_0)
#N14<-union(N14, US_0)
#######################################
#UL
#######################################
if(UL_classify){
cat(" Sort the Unlabled Large into
1:Propriocepters
2:Jagged
3:IDE only
4:No Effect
5:Discard
PRESS ANY KEY TO CONTINUE
")
scan(n=1)
if(length(UL) > 0){
UL_groups <- tcd(tmp_rd, c(UL), save_question=F)
L1<-UL_groups[[1]] #proprioceptor
L2<-UL_groups[[2]] #jagged
L3<-UL_groups[[3]] #IDE only
L4<-UL_groups[[4]] #no effect
if(edit_ct){discard<-union(discard, UL_groups[[5]])}
}else{
L1 <- NULL
L2 <- NULL
L3 <- NULL
L4 <- NULL
}
if(GFP){
cat(" Sort the Unlabled Large into
1:R3J IDE
2:no Effect
3:Discard
PRESS ANY KEY TO CONTINUE
")
scan(n=1)
if(length(G_0)>0){
G_0_sort<-tcd(tmp_rd, c(G_0), save_question=F)
L5<-G_0_sort[[1]]
L6<-G_0_sort[[2]]
if(edit_ct){discard<-union(discard,G_0_sort[[3]])}
}else{
L5 <- NULL
L6 <- NULL
}
}
}else{
UL<-large_cells_330
UL<-setdiff(UL, c(cap_cells, aitc_cells, menth_cells) )
#print(UL)
}
#review the autosorted cell classes_ Remove the cells that are not part of each class and put into discard pile, press "1" to move cells to the discard pile
if(edit_ct){
cat(
"Review the autosorted cell classes_ Remove the cells that are not part of each class
and put into discard pile, press '1' to move cells to the discard pile (button 1)
")
if(GFP){
cat("G7: GFP+, Menthol Only")
G7.edit<-tcd(tmp.rd, c(G7))
discard1<-G7.edit[[1]]
G7<-setdiff(G7, discard1)
discard<-union(discard, discard1)
cat("G8 GFP+, Capsaicin Only")
G8.edit<-tcd(tmp.rd, c(G8))
discard1<-G8.edit[[1]]
G8<-setdiff(G8, discard1)
discard<-union(discard, discard1)
cat("G9 GFP+,AITC AND Capsaicin+")
G9.edit<-tcd(tmp.rd, c(G9))
discard1<-G9.edit[[1]]
G9<-setdiff(G9, discard1)
discard<-union(discard, discard1)
cat("G10 GFP+, AITC+ only")
G10.edit<-tcd(tmp.rd, c(G10))
discard1<-G10.edit[[1]]
G10<-setdiff(G10, discard1)
discard<-union(discard, discard1)
}
cat("R11 IB4+ Only, Capsaicin Only")
R11.edit<-tcd(tmp.rd, c(R11))
discard1<-R11.edit[[1]]
R11<-setdiff(R11, discard1)
discard<-union(discard, discard1)
cat("R12 IB4 only, Capsaicin and AITC only")
R12.edit<-tcd(tmp.rd, c(R12))
discard1<-R12.edit[[1]]
R12<-setdiff(R12, discard1)
discard<-union(discard, discard1)
cat("R13 IB4 only, AITC only")
R13.edit<-tcd(tmp.rd, c(R13))
discard1<-R13.edit[[1]]
R13<-setdiff(R13, discard1)
discard<-union(discard, discard1)
cat("N14 No-label, capsaicin only")
N14.edit<-tcd(tmp.rd, c(N14))
discard1<-N14.edit[[1]]
N14<-setdiff(N14, discard1)
discard<-union(discard, discard1)
cat("N15 No-label, Menthol")
N15.edit<-tcd(tmp.rd, c(N15))
discard1<-N15.edit[[1]]
N15<-setdiff(N15, discard1)
discard<-union(discard, discard1)
#Remove cells that either do not respond to menthol or have an aitc response larger than the menthol response
cat("N16 No-label, Capsaicin only")
N16.edit<-tcd(tmp.rd, c(N16))
discard1<-N16.edit[[1]]
N16<-setdiff(N16, discard1)
discard<-union(discard, discard1)
cat("N17 No-label, Menthol and AITC+ CONVINCING TRPM8 AND TRPA1")
N17.edit<-tcd(tmp.rd, N17)
discard1<-N17.edit[[1]]
N17<-setdiff(N17,discard1)
discard<-union(discard, discard1)
cat("
Sort the discard pile
#1 Large
#2 Large.green
#3 G7 (G.M)
#4 G8 (G.C)
#5 G9 (G.A.C)
#6 G10 (G.A)
#7 R11 (R.C)
#8 R12 (R.A.C)
#9 R13 (R.A)
#10 N14 (US)
#11 N15 (thermos)
#12 N16 (US.C)
#c N17 (trpm8 trpa1)
")
if(length(discard)>0){
hand_sort<-tcd(tmp_rd, c(discard))
#Union between hand sorted and edited autosort_ Sort UL into 4 groups based on R3J response (1=prop, 2=jagged, 3=ide, 4=NE), Create a few thermos groups
Large.sort<-tcd(tmp.rd, hand.sort[[1]])
if(GFP){
Large.green.sort<-tcd(tmp.rd, hand.sort[[2]])
G7<-union(G7, hand.sort[[3]])
G8<-union(G8, hand.sort[[4]])
G9<-union(G9, hand.sort[[5]])
G10<-union(G10, hand.sort[[6]])
}
R11<-union(R11, hand.sort[[7]])
R12<-union(R12, hand.sort[[8]])
R13<-union(R13, hand.sort[[9]])
N14<-union(N14, hand.sort[[10]])
N15<-union(N15, hand.sort[[11]])
N16<-union(N16, hand.sort[[12]])
}else{}#discard option
}else{}#edit_ct
if(UL_classify){
UL_ct<-named.list(
L1,
L2,
L3,
L4
)
cell_types<-append(cell_types,UL_ct)
if(GFP){
UL_gfp_ct<-named.list(
L5,
L6
)
cell_types<-append(cell_types,UL_gfp_ct)
}else{
}
}else{
if(UL_classes_logic){
cell_types<-append(cell_types,UL_ct)
}else{
cell_types<-append(cell_types,named.list(UL))
}
}
if(GFP){
gfp_ct<-named.list(
G7,
G7_c,
G8,
G9,
G10
)
cell_types<-append(cell_types,gfp_ct)
}else{}
red_ul_ct<-named.list(
R11,
R12,
R13,
N14,
N15,
N15_c,
N15_a,
N16,
UC
)
cell_types<-append(cell_types,red_ul_ct)
tmp_rd$cell_types<-cell_types
for(i in 1:length(cell_types)){
print(
paste(
names(tmp_rd$cell_types)[i],
"=",
length( tmp_rd$cell_types[[i]] )
)
)
}
return(tmp_rd)
}
# I have a series of pdf files
gif_maker<-function(dense=200, fps=2, file.name=NULL, type='png'){
require(magick)
#select the reader for
if(type=='pdf'){
reader <- get( paste0('image_read_', "pdf") )
}
if(type=='png'){
reader <- get('image_read')
}
#MAKE FILE NAME
if(is.null(file.name)){
cat("\nThis function will create a gif for either png's or pdfs.\nPlease Enter the name of the file you want to create.\nex. pdfs_in_gif.png\n")
file.name<-scan(n=1,what="character")
}
#ASKING AND ANSWERING QUESTIONS
cat("\nLets create a gif with this data, below are all",type,"s in your experiment \n")
cat(list.files(pattern=type),sep="\n")
pdf_imgs<-list.files(pattern=type)
cat("How many images would you like in your gif? \n")
imgs_for_gif<-scan(n=1)
#SELECT EACH PDF FOR
pdfs_for_gif<-c()
for(i in 1:imgs_for_gif){
img_selection<-menu(list.files(pattern=type),title=paste("Select image ",i))
pdfs_for_gif[i]<-pdf_imgs[as.numeric(img_selection)]
cat("These are the selected images \n")
cat(pdfs_for_gif,sep="\n")
}
#BEGIN MAKING PDFs, FIRST HAS RED BORDER
gif<-reader(pdfs_for_gif[1],density=dense)
gif<-image_border(gif,"red","10x10")
for(i in 2:length(pdfs_for_gif)){
gifz<-reader(pdfs_for_gif[i],density=dense)
gifz<-image_border(gifz,"black","10x10")
gif<-c(gif,gifz)
}
animation<-image_animate(gif,fps=fps)
image_write(animation,paste0(file.name,'.gif'))
}
# I have a series of pdf files
gif_png_maker<-function(dense=200,fps=2,file.name=NULL){
require(magick)
if(is.null(file.name)){
cat("Write the name fo the file you would like the ned image to be \n")
file.name<-scan(n=1,what="character")
file.name <- paste0(file.name,'.gif')
}
cat("Lets create a gif with this data, below are all pngs in your experiment \n")
cat(list.files(pattern="[pP][nN][gG]"),sep="\n")
imgs<-list.files(pattern="[pP][nN][gG]")
cat("How many images would you like in your gif? \n")
imgs_to_add <-scan(n=1)
imgs_for_gif<-c()
for(i in 1:imgs_to_add){
img_selection <- menu(list.files(pattern="[pP][nN][gG]"),title=paste("Select image ",i))
imgs_for_gif[i] <- imgs[as.numeric(img_selection)]
cat("These are the selected images \n")
cat(imgs_for_gif,sep="\n")
}
#dense<-200
gif<-image_read(imgs_for_gif[1], density=dense)
gif<-image_border(gif, "red", "10x10")
for(i in 2:length(imgs_for_gif)){
gifz<-image_read(imgs_for_gif[i], density=dense)
gifz<-image_border(gifz,"black","10x10")
gif<-c(gif,gifz)
}
#fps=2
animation<-image_animate(gif,fps=fps)
image_write(animation,file.name)
}
Trace_select_grid<-function(dat, x.names, levs=select.list(names(dat$bin)), t.type="blc", preselect=T, l.col="red", window.w=10, window.h=10, title1="hi"){
x.names<-rev(x.names)
#Now create 3 extra spaces for buttons
xn <- length(x.names)
num.grid <- xn+4
#This is the number of grids for the rows
nr <- floor(sqrt(num.grid))
#this is the number of grids for the rows
nc <- ceiling((num.grid)/nr)
#this is the maximun value needed to aquire the matrix of interest
mtx <- max(nr,nc)
#this helps to find the center location of each cell
dx <- seq(0,1,length.out=(mtx+1))[-1]
#this defines the size between the cells
sl <- (dx[2]-dx[1])/2
#This relocates the cells to the far left
dx <- dx-sl
all.x <- as.vector(matrix(rep(dx,mtx),byrow=F,ncol=mtx))
all.y <- as.vector(matrix(rep(dx,mtx),nrow=mtx,byrow=T))
#Lees trace image plotter
if(is.null(levs)){
levs<-setdiff(unique(dat$w.dat$wr1),"")
}else{levs<-levs}
levs_min<-min(as.numeric(row.names(which(dat$w.dat["wr1"]==levs,arr.ind=T))))
levs_max<-max(as.numeric(row.names(which(dat$w.dat["wr1"]==levs,arr.ind=T))))
levs_min<-which(row.names(dat$blc)==as.character(levs_min))
levs_max<-which(row.names(dat$blc)==as.character(levs_max))
peak_min<-min(dat[[t.type]][levs_min:levs_max,dat$c.dat$id])
peak_max<-max(dat[[t.type]][levs_min:levs_max,dat$c.dat$id])*1.4
#now loop through the data and create png plots of each region
png.name<-c()
start.time<-Sys.time()
for(i in 1:xn){
png.name[i]<-paste("tmp_png_",i,".png", sep="")
png(png.name[i], 40,40, res=20, bg="transparent")
par(bty="n",mai=c(0,0,0,0))
plot(dat[[t.type]][ levs_min:levs_max, x.names[i] ],type='l',lwd=2,xaxt='n',yaxt='n',col="white", ylim=c(-0.2,peak_max))
dev.off()
#print(i)
}
end_time<-Sys.time()
print(paste("Elapsed time saving:",end_time-start.time))
#now lets open up single view window
dev.new(width=14,height=4,title="SingleCell")
trace_view <- dev.cur()
#Open the grid window
dev.new(height=window.w,width=window.h,canvas="black",title=title1)
grid_view <- dev.cur()
op <- par(mar=c(0,0,0,0))
plot(c(0,1),c(0,1),xaxt="n",yaxt="n",type="n",ylab="",xlab="")
require(png)
start.time<-Sys.time()
for(i in 1:xn){
tmp_img<-png::readPNG(png.name[i])
dim(tmp_img)
xl <- all.x[i]-sl*.9
xr <- all.x[i]+sl*.9
xt <- all.y[i]-sl*.9
xb <- all.y[i]+sl*.9
dev.set(grid_view)
rasterImage(tmp_img,xl,xt,xr,xb)
unlink(png.name[i])
}
end.time<-Sys.time()
print(paste("Elapsed plot time", end.time-start.time))
cexr <- sl/.05
text(all.x[xn+1],all.y[xn+1],"Done",col="white",cex= cexr)
text(all.x[xn+2],all.y[xn+2],"All",col="white",cex= cexr)
text(all.x[xn+3],all.y[xn+3],"None",col="white",cex= cexr)
text(all.x[xn+4],all.y[xn+4],"Reset",col="white",cex= cexr)
if(preselect){
fg <- rep("black",length(all.x))
all.sel <- dat$bin[x.names,levs]
names(all.sel) <- x.names
fg[1:xn]<-all.sel
fg[fg=="1"]<-"red"
fg[fg=="0"]<-"blue"
}else{
fg[1:xn]="blue"
}
#fg[1:xn] <- "blue"
symbols(all.x,all.y,squares=rep(sl*1.9,length(all.x)),add=T,inches=F,fg=fg,lwd=3)
cexd<-4
#first click defines the split
#create a named squence, where all are scored as a 0
#name it
#fg<-all.sel
#fg[fg==1]="red"
#fg[fg==0] <- "blue"
#symbols(all.x,all.y,squares=rep(sl*1.9,length(all.x)),add=T,inches=F,fg=fg,lwd=cexr)
not.done=TRUE
#Click to define
if(!preselect){
click1 <- locator(n=1)
#this isnhow kevin find the click location
dist <- sqrt((click1$x[[1]]-all.x)^2 + (click1$y[[1]]-all.y)^2)
sel.i <- which.min(dist)
print(sel.i)
###Done
if(sel.i == xn+1){
not.done=FALSE
return(all.sel)
}
###All
if(sel.i == xn+2){
all.sel[1:xn] <- 1
fg[1:xn] <- l.col
}
###None
if(sel.i == xn+3){
all.sel[1:xn] <- 0
fg[1:xn] <- "blue"
}
###Reset
if(sel.i == xn+4){
#make everything score to a 0
all.sel[] <- 0
#now recolor them
fg[1:xn] <- "blue"
symbols(all.x,all.y,squares=rep(sl*1.9,length(all.x)),add=T,inches=F,fg=fg,lwd=cexd)
#now click again
click1 <- locator(n=1)
#this isnhow kevin find the click location
dist <- sqrt((click1$x[[1]]-all.x)^2 + (click1$y[[1]]-all.y)^2)
sel.i <- which.min(dist)
dev.set(grid_view)
#now from 1 to the value selected
pos.i <- 1:max((sel.i-1),1)
#make everything above your selection 0
all.sel[neg.i] <- 0
#now from selection to the start
neg.i <- sel.i:xn
#score as a 1
all.sel[pos.i] <- 1
#define the colors
fg[neg.i] <- "blue"
fg[pos.i] <- "red"
symbols(all.x,all.y,squares=rep(sl*1.9,length(all.x)),add=T,inches=F,fg=fg,lwd=cexd)
}
if(sel.i <= xn){
#go to trace view
dev.set(trace_view)
#plot the trace
PeakFunc7(dat,x.names[sel.i], t.type="blc", bcex=bcex)
#go back to the grid
dev.set(grid_view)
#now from 1 to the value selected
neg.i <- 1:max((sel.i-1),1)
#make everything above your selection 0
all.sel[neg.i] <- 0
#now from selection to the start
pos.i <- sel.i:xn
#score as a 1
all.sel[pos.i] <- 1
#define the colors
fg[neg.i] <- "blue"
fg[pos.i] <- "red"
symbols(all.x,all.y,squares=rep(sl*1.9,length(all.x)),add=T,inches=F,fg=fg,lwd=cexd)
}
}else{}
while(not.done){
symbols(all.x,all.y,squares=rep(sl*1.9,length(all.x)),add=T,inches=F,fg=fg,lwd=cexd)
click1 <- locator(n=1)
dist <- sqrt((click1$x[[1]]-all.x)^2 + (click1$y[[1]]-all.y)^2)
sel.i <- which.min(dist)
###Done
if(sel.i == xn+1){
not.done=FALSE
return(all.sel)
}
###All
if(sel.i == xn+2){
all.sel[1:xn] <- 1
fg[1:xn] <- l.col
}
###None
if(sel.i == xn+3){
all.sel[1:xn] <- 0
fg[1:xn] <- "blue"
}
###Reset
if(sel.i == xn+4){
#make everything score to a 0
all.sel[] <- 0
#now recolor them
fg[1:xn] <- "blue"
symbols(all.x,all.y,squares=rep(sl*1.9,length(all.x)),add=T,inches=F,fg=fg,lwd=cexd)
#now click again
click1 <- locator(n=1)
#this isnhow kevin find the click location
dist <- sqrt((click1$x[[1]]-all.x)^2 + (click1$y[[1]]-all.y)^2)
sel.i <- which.min(dist)
print(sel.i)
dev.set(grid_view)
#now from 1 to the value selected
neg.i <- 1:max((sel.i-1),1)
#make everything above your selection 0
all.sel[neg.i] <- 0
#now from selection to the start
pos.i <- sel.i:xn
#score as a 1
all.sel[pos.i] <- 1
#define the colors
fg[neg.i] <- "blue"
fg[pos.i] <- "red"
symbols(all.x,all.y,squares=rep(sl*1.9,length(all.x)),add=T,inches=F,fg=fg,lwd=cexd)
}
if(sel.i <= xn){
#go to trace view
dev.set(trace_view)
#plot the trace
PeakFunc7(dat,x.names[sel.i], t.type="blc",bcex=bcex)
#go back to the grid
dev.set(grid_view)
if(all.sel[sel.i] ==0)
{
all.sel[sel.i] <- 1
fg[sel.i] <- l.col
}else{
all.sel[sel.i] <- 0
fg[sel.i] <- "blue"
}
}
}
}
dice <- function(x, n,min.n=10){
x.lst <- split(x, as.integer((seq_along(x) - 1) / n))
x.i <- length(x.lst)
if(length(x.lst[[x.i]]) < min.n & x.i > 1)
{
x.lst[[x.i-1]] <- c(x.lst[[x.i-1]],x.lst[[x.i]])
x.lst <- x.lst[1:(x.i-1)]
}
return(x.lst)
}
#################################
#Welcome to a new method to score cells
#################################
RDView_2<-function(dat, cells=NULL, levs=NULL){
dat.name<-deparse(substitute(dat))
cat(
"HOWDY partner, we R bout to score some rowdy responses \n
from your cells. Please selact what we should score \n
and how we should initially sort this data. \n")
if(is.null(levs)){
levs<-setdiff(unique(dat$w.dat$wr1),"")
}else{levs<-levs}
if(is.null(cells)){
cells<-dat$c.dat$id
}else{
cells<-cells
}
cat(
"\nWitch window region would you like to score????\n \n What do you say?\n")
lev<-levs[menu(levs)]
#lev<-levs[26]
#how would you like to sor this variable?
cat("#############\nAnd how shall we sort? \n ############### \n")
sorted.cells<-c.sort.2(dat$scp[grep(lev, names(dat$scp),value=T)],cells)
#sorted.cells
subset.list<-dice(sorted.cells, 300, 300/4)
#subset.list
for(x.names in subset.list){
graphics.off()
scored.cells<-Trace_select_grid(dat,x.names, lev, t.type="blc", preselect=T)
dat$bin[names(which(scored.cells==1)),lev]=1
dat$bin[names(which(scored.cells==0)),lev]=0
cat("would you like to continue scoring?")
choice<-select.list(c("yes","no"))
if(choice=="yes"){
}else{
print("your dun")
break
}
}
assign(dat.name,dat, envir=.GlobalEnv)
}
osmo_correct<-function(vol_des_mL=50, osmo_original=281, osmo_desired=300){
osmo_no_glucose <- 270
gluc_original_M <- osmo_original - osmo_no_glucose
gluc_desired_M <- osmo_desired - osmo_no_glucose
glucose_fw <- 180
gluc_orignal_grams<-(gluc_original_M)/1000 * glucose_fw * (vol_des_mL)/1000
gluc_desired_grams<-(gluc_desired_M)/1000 * glucose_fw * (vol_des_mL)/1000
glucose_to_add <- gluc_desired_grams-gluc_orignal_grams
cat(paste("\nGood Day Sir, to correct your orignal osmolarity from",osmo_original,"to", osmo_desired, "please add, \n"))
cat(paste(glucose_to_add*1000, "mg D-glucose"))
cat("\nto your orignal solution.\n")
}
#######################################################
#TOPVIEW DEVELOPED BY KEVIN CHASE
########################################################
LinesEvery.TV <- function(dat,m.names, img=dat$img1, pic.plot=TRUE, multi.pic=T, zf=NULL, t.type="mp", snr=NULL, lmain="", cols=NULL, levs=NULL, levs.cols="grey90", m.order=NULL,rtag=NULL, rtag2=NULL, rtag3=NULL, plot.new=F, sf=1, lw=2, bcex=.6, p.ht=7, p.wd=10, lns=T, pts=F){
require(png)
if(class(t.type)=="character"){t.dat<-dat[[t.type]]}# if trace type is empty select the data, you would like your trace to be
else{t.type<-menu(names(dat));t.dat<-dat[[t.type]]}
wr<-dat$w.dat[,"wr1"]
if(is.null(levs)){levs <- setdiff(unique(as.character(dat$w.dat[,"wr1"])),"")}
else{levs<-levs}
m.names <- intersect(m.names,names(t.dat))
xseq <- t.dat[,1]
hbc <- length(m.names)*sf+max(t.dat[,m.names])
hb <- ceiling(hbc)
library(RColorBrewer)
if(length(m.names) > 0)
{
if(!is.null(m.order))
{
tmp<-dat$c.dat[m.names,]
n.order<-tmp[order(tmp[,m.order]),]
m.names <- row.names(n.order)
}
### Picture Plotting!
#if(XY.plot==T){cell.zoom.2048(dat, cell=m.names,img=img, cols="white",zoom=F, plot.new=T)}
## Tool for color labeleing
if(is.null(cols)){
cols <-brewer.pal(8,"Dark2")
cols <- rep(cols,ceiling(length(m.names)/length(cols)))
cols <- cols[1:length(m.names)]
}
## Tool for single color labeling
else {cols<-cols
cols <- rep(cols,ceiling(length(m.names)/length(cols)))
cols <- cols[1:length(m.names)]
}
if(multi.pic){
if(plot.new){
if(length(m.names)>10){dev.new(width=16,height=10);layout(matrix(c(1,2), 1, 2, byrow = TRUE),widths=c(10,6), heights=c(6,6))}
else(dev.new(width=12,height=8))
}
else{
if(length(m.names)>10){layout(matrix(c(1,2), 1, 2, byrow = TRUE),widths=c(10,6), heights=c(6,6))}
}
}else{dev.new(width=12,height=8)}
par(xpd=TRUE,mar=c(4,3,2,2), bty="l")
plot(xseq,t.dat[,m.names[1]],ylim=c(0,hbc),xlab="Time (min)",main=lmain,type="n", xaxt="n",yaxt="n",xlim=c(min(xseq)-1.5,max(xseq)))#-sf
bob<-dev.cur()
axis(1, at=seq(floor(min(t.dat[,1])),ceiling(max(t.dat[,1])), 1))
#axis(2, 1.4, )
text(rep(0,length(m.names)),seq(1,length(m.names))*sf+t.dat[1,m.names],m.names, cex=.5,col=cols,pos=2)
## Tool for adding window region labeling
if(length(wr) > 0){
#levs <- setdiff(unique(as.character(dat$w.dat[,"wr1"])),"")
x1s <- tapply(dat$w.dat[,1],as.factor(wr),min)[levs]
x2s <- tapply(dat$w.dat[,1],as.factor(wr),max)[levs]
y1s <- rep(-.3,length(x1s))
y2s <- rep(hbc+.2,length(x1s))
rect(x1s,y1s,x2s,y2s,col=levs.cols,border="black")
cpx <- xseq[match(levs,wr)+round(table(wr)[levs]/2,0)]
offs <- nchar(levs)*.5
# par(xpd=TRUE)
text(dat$t.dat[match(levs,wr),"Time"],rep(c((sf*.7)*.5,(sf*.7),(sf*.7)/5),length=length(levs)),levs,pos=4,offset=0,cex=bcex*.8)#,offset=-offs}
#par(xpd=FALSE)
}
## Tool for adding line, point and picture to the plot
for(i in 1:length(m.names)){
ypos<-t.dat[,m.names[i]]+i*sf
if(lns){lines(xseq,ypos, lty=1,col=cols[i],lwd=lw)}
if(pts){points(xseq,ypos,pch=16,col=cols[i],cex=.3)}
if(!is.null(snr)){
pp1 <- snr[,m.names[i]] > 0 & is.element(wr,levs)
pp2 <- snr[,m.names[i]] > 0 & !is.element(wr,levs)
points(xseq[pp1],t.dat[pp1,m.names[i]]+i/10,pch=1,col=cols[i])
points(xseq[pp2],t.dat[pp2,m.names[i]]+i/10,pch=0,col=cols[i])
}
}
if(is.null(img)){
img.p<-dat[[select.list(grep("img",names(dat), value=T))]]
if(is.null(img.p)){img.p<-dat$img1}
}else{img.p<-img}
if(is.null(zf)){zf<-20}else{zf<-zf}
#if(pic.plot==TRUE & length(m.names)<=10){
if(pic.plot==TRUE){
if(length(m.names)<=100){
pic.pos<-list()
for(i in 1:length(m.names)){
ypos<-t.dat[1,m.names[i]]+i*sf
pic.pos[[i]]<-ypos}
for(i in 1:length(m.names)){
#if(dat$bin[m.names[1],"mean.gfp.bin"]!=1 & dat$bin[m.names[1],"mean.tritc.bin"]!=1){img.p<-dat$img.gtd #if the cell is neither red or green, then make the img to plot img.gtd
#}else{img.p<-img}
#img.p<-img
img.dim<-dim(dat$img1)[1]
x<-dat$c.dat[m.names[i],"center.x"]
left <- x-zf
if(left<=0){left=0; right=2*zf}
right<- x+zf
if(right>=img.dim){left=img.dim-(2*zf);right=img.dim}
y<-dat$c.dat[m.names[i],"center.y"]
top<-y-zf
if(top<=0){top=0; bottom=2*zf}
bottom<-y+zf
if(bottom>=img.dim){top=img.dim-(2*zf);bottom=img.dim}
#par(xpd=TRUE)
xleft<-min(dat$t.dat[,1])-xinch(1)
xright<-min(dat$t.dat[,1])-xinch(.5)
ytop<-pic.pos[[i]]+yinch(.25)
ybottom<-pic.pos[[i]]-yinch(.25)
tryCatch(rasterImage(img.p[top:bottom,left:right,],xleft,ybottom,xright,ytop),error=function(e) rasterImage(img.p[top:bottom,left:right],xleft,ybottom,xright,ytop))
}
}
else{
par(mar=c(0,0,0,0))
plot(0,0,xlim=c(0,6), ylim=c(0,6), xaxs="i",yaxs="i", xaxt='n', yaxt='n')
tmp.img<-multi.pic.zoom.2(dat, m.names,img=img.p, labs=T, zf=zf, cols=cols)
dev.set(bob) # FUCK THIS!
rasterImage(tmp.img, 0,0,6,6)
}
}
}
#if(!is.null(pdf.name))
#{dev.off()}
#return(pic.pos)
}
#matrix image
PlotHeatMat <- function(mat,wt=14,ht=10,new.dev=T,title="TOPVIEW", dat_name = NULL){
if(is.null(dat_name)){
dat_name<-""
}
mat <- mat-min(mat)
mat <- mat/max(mat)
if(new.dev)
{
dev.new(width=wt,height=ht,family="mono",canvas="black",title=title)
par(fg="darkgrey",col.axis="white",col.lab="grey",col.main="grey",mar=c(1,3,6,1))
plot(c(0,1),c(0,1),xaxt="n",yaxt="n",xlab="",ylab="",type="n")
}
rasterImage(mat,0,0,1,1,interpolate=F)
gx <- !grepl("gap",dimnames(mat)[[2]])
ux <- unique(dimnames(mat[,gx])[[2]])
gi <- match(ux,dimnames(mat)[[2]])/ncol(mat)
xi <- seq(0,(ncol(mat)-1))/(ncol(mat)-1)
xi.mat <- data.frame(min=tapply(xi,dimnames(mat)[[2]],min))
xi.mat[,"max"] <- tapply(xi,dimnames(mat)[[2]],max)
xi.mat[,"med"] <- (xi.mat[,"max"]+xi.mat[,"min"])/2
xi.lab <- row.names(xi.mat)
xi.lab[grep("gap",xi.lab)] <- ""
#xi.mat <- xi.mat[!grepl("gap",row.names(xi.mat)),]
axis(side=3,at=xi.mat[xi.lab != "","med"],labels=xi.lab[xi.lab != ""],las=2,col.axis="darkgrey")
axis(side=3,at=xi.mat[xi.lab=="","min"],labels=NA,tck=1,lwd=.2)
axis(side=3,at=xi.mat[xi.lab=="","max"],labels=NA,tck=1,lwd=.2)
yi <- seq(0,(nrow(mat)-1))/(nrow(mat)-1)
y.names <- dimnames(mat)[[1]]
y.names <- sub("^w[0987654321]*\\.","",y.names)
yi.mat <- data.frame(min=tapply(yi,y.names,min))
yi.mat[,"max"] <- tapply(yi,y.names,max)
yi.mat[,"med"] <- (yi.mat[,"min"]+yi.mat[,"max"])/2
yi.mat <- yi.mat[!grepl("blank",row.names(yi.mat)),]
axis(side=2,at=1-yi.mat[,"med"],labels=row.names(yi.mat),las=3,cex.axis=.5,col.axis="darkgrey")
par(xpd=T)
points(par('usr')[1], par('usr')[3], col='white', cex=6, pch = 16)
text(par('usr')[1], par('usr')[3], "END", col='black', cex=1, pch = 16)
text(par('usr')[2]-xinch(.5), par('usr')[3]-yinch(.1), dat_name, col='white', cex=.7 )
#dev.off()
}
#tmp is an RD object
#x.names defines the cells to display
#wt = device window width
#ht = device window height
#scale.var is the variable used to scale each cell. If not in the RD object defaults to a log scale transform then each row scale 0-1 min to max.
#aux.var is a list of auxillary variables to be displayed to the right of the traces. If there are missing values, no variation or variables not in the RD object they are not shown.
#img is the img name sent to LinesEvery.TV
#t.type is the trace type data sent to LinesEvery.TV
#title is the device window title.
#190508, Added a stat val. This sorts the traces based on the trace statistic you want it to
TopView <- function(tmp, x.names=NULL, wt=7, ht=4, scale.var="mean.sm.sd", aux.var=c("diameter","IB45.bin","gfp5.bin"), img="img1", t.type="blc", title="TOPVIEW", stat_val='max', dat_name=NULL){
if( is.null(dat_name) ){
dat_name <- deparse(substitute(tmp))
}
#vet the vars
#m.tot <- CollectMulti(rd.names=c(deparse(substitute(tmp))))
m.tot <- CollectMulti(rd.names=dat_name)
scale.var <- intersect(scale.var,names(m.tot))
aux.var <- intersect(aux.var,names(m.tot))
if(is.null(x.names)){
x.names <- row.names(tmp$bin)
}
blc.s <- as.matrix( t(tmp$blc[,x.names]) )
name2 <- make.names( tmp$w.dat[,"wr1"], unique=F)
name2[is.element(name2,c("X","epad"))] <- "gap"
dimnames(blc.s)[[2]] <- name2
if(length(scale.var)==1)
{
blc.s <- sweep(blc.s,1,m.tot[x.names,scale.var],'/')
}
else
{
blc.s <- log10(blc.s+1)
med <- apply(blc.s,1,min)
blc.s <- sweep(blc.s,1,med,'-')
blc.s[blc.s<0] <- 0
}
blc.s <- blc.s-min(blc.s)
blc.s <- blc.s/max(blc.s)
if(length(aux.var) > 0)
{
aux.mat <- NULL
n <- ceiling(nrow(tmp$w.dat)/50)
for(i in aux.var)
{
mat1 <- matrix(rep(m.tot[x.names,i],n),ncol=n)
dimnames(mat1)[[1]] <- x.names
dimnames(mat1)[[2]] <- rep(i,n)
aux.mat <- cbind(aux.mat,mat1)
}
aux.mat[is.na(aux.mat)]<-0
aux.min <- apply(aux.mat,2,min)
aux.mat <- sweep(aux.mat,2,aux.min,'-')
aux.max <- apply(aux.mat,2,max)
aux.mat <- sweep(aux.mat,2,aux.max,'/')
aux.mean <- apply(aux.mat,2,mean)
#aux.mat <- aux.mat[,is.na(aux.mean)]
blc.s <- cbind(blc.s,aux.mat)
}
name2 <- dimnames(blc.s)[[2]]
name2[!is.element(name2,names(m.tot))] <- "gap"
seqi <- seq(0,(ncol(blc.s)-1))/(ncol(blc.s)-1)
click.id <- data.frame(x=tapply(seqi,as.factor(dimnames(blc.s)[[2]]),median))
click.id[,"y"] <- 1
click.id <- click.id[intersect(row.names(click.id),names(m.tot)),]
click.id <- click.id[order(click.id$x),]
click.id.rn.torn<-row.names(click.id)[nrow(click.id)-length(aux.var)]
vals_to_click <- paste0(click.id.rn.torn,'.', stat_val)
click.vals <- m.tot[ x.names, row.names(click.id)]
PlotHeatMat(blc.s,wt=wt,ht=ht,title=title, dat_name=dat_name)
xy.click <- list(x=1,y=1)
while(xy.click$x > 0 | xy.click$y > 0)
{
xy.click <- locator(n=1,type="n")
if(xy.click$y > 1)
{
sort.trt <- row.names(click.id)[which.min(abs(xy.click$x-click.id[,"x"]))]
sval <- click.vals[x.names,sort.trt]
x.names <- x.names[order(sval)]
blc.s <- blc.s[x.names,]
PlotHeatMat(blc.s,new.dev=F,wt=wt,ht=ht, dat_name=dat_name)
}
if(xy.click$y < 1)
{
len1 <- length(x.names)-1
y.i <- abs(seq(0,len1)/len1 - (1-xy.click$y))
names(y.i) <- x.names
sort.i <- names(sort(y.i)[1:10])
di <- dev.cur()
if( length( ls(pattern="^lines_tv$") )<1 ){
dev.new(width=10, height=12)
lines_tv<-dev.cur()
}else{
dev.set(lines_tv)
}
LinesEvery.TV(tmp,sort.i,lw=3,levs.cols=grey(.95),img=tmp[[img]],t.type=t.type,m.order <- seq(1,length(sort.i)),rtag="diameter",rtag2="gfp5.bin",rtag3="IB45.bin",zf=15,cols="black")
dev.set(di)
}
}
dev.off(di)
dev.off(lines_tv)
}
census_viewer <- function(dat){
cat(
"This function will essecially return cells from a specified cell\nin the census table
\n1. Select all of cells from a specific cell class.
\n1a. If you click cancel all cells will be returned.
\n2. bin >> collumn >> cell class cells scored as one.
\n3. returns a vector of cell names ex c(X.3, X.30)
"
)
(cell_list_name <- grep("^cell", names(dat), value=T))
(cell_types <- names( dat[[ cell_list_name ]] ))
(cell_type_name <- select.list( cell_types, title="Select the cell_type" ))
#Tool to return all cells if cancel is selected.
if(cell_type_name == ''){
cell_type <- dat$c.dat$id
}else{
(cell_type <-dat[[ cell_list_name ]] [[ cell_type_name ]])
}
(bin_col <- select.list(names(dat$bin), title="Select bin collumn"))
(cells <- cell_type[ dat$bin[cell_type , bin_col] == 1 ])
if( length(cells) == 0 ){
return(NA)
}else{
cells_to_view <- list()
cells_to_view[[ 'name' ]]<- paste0(cell_type_name,"__", bin_col)
cells_to_view[[ 'cells' ]] <- cells
return(cells_to_view)
}
}
#Funciton to save the work along with create a unique savehistory
saveRD <- function(dat){
cat("\nDO NOT CLOSE UNTIL I SAY YOU CAN!\nWait for the sound...")
flush.console()
<<<<<<< HEAD:extras/procPharm MAC edits.r
#bringtotop(-1)
=======
bringToTop(-1)
>>>>>>> 02c76eaf0507e5b2abfc9b5cea8d86cca953c6ef:procPharm 170210.r
Sys.sleep(1)
#History Saver
experimentorsName <- strsplit(getwd(),'/')[[1]][2]
historyName <- paste(experimentorsName, Sys.time(), 'History.r')
historyName <- gsub(":", '_',historyName)
savehistory(historyName)
#Exp Saver
expName <- deparse(substitute(dat))
#expToSave <- get(expName, envir = .GlobalEnv)
assign(expName, dat)
save(list=expName, file=paste0(expName,".Rdata") )
alarm()
cat('\nYou can now close. Please consider cleaning up the file,\n',historyName,'\n')
}
<<<<<<< HEAD:extras/procPharm MAC edits.r
=======
cellTypeFixer<- function(dat){
dropped <- row.names(dat$bin[dat$bin$drop==1,])
dat$cell_types <- lapply(dat$cell_types, function(x) setdiff(x, dropped))
return(dat)
}
# MAKE SURE BOTH VIDEOS ARE IN SEPERATE FOLDERS,
# ALSO HAVE SEPERATE WR1's that match exactly what happened
# 1 Now tht we know the videos are ok we will simply do the cell profiler pipeline
# 2 we copy everything from the cell profiler pipeline into the 2 folders that contain the videos
# 3 Now do the pharming harvest and select both folders that contain the seperate videos
# dat1 is the flocation of the frist RD fil ex. "./1 video that yeeted itself/RD.1.Rdata"
# dat2 is the second rd file taht needs to stich to "./2 R3J, TTAA, agonist vid/RD.2.Rdata"
# timeBuffer is the time separation in minutes between the frist trace and the second trace
# newName is the name of the experiment to save
# Example of how to use
#traceSticher(
# './1 video that yeeted itself/RD.1.Rdata',
# './2 R3J, TTAA, agonist vid/RD.2.Rdata',
# 3,
# 'RD.bob')
traceSticher <- function(dat1Loc, dat2Loc, timeBuffer = 3, newName = NULL){
cat("
# MAKE SURE BOTH VIDEOS ARE IN SEPERATE FOLDERS,
# ALSO HAVE SEPERATE WR1's that match exactly what happened
# 1 Now tht we know the videos are ok we will simply do the cell profiler pipeline
# 2 we copy everything from the cell profiler pipeline into the 2 folders that contain the videos
# 3 Now do the pharming harvest and select both folders that contain the seperate videos
traceSticher(
'./1 video that yeeted itself/RD.1.Rdata',
'./2 R3J, TTAA, agonist vid/RD.2.Rdata',
3,
'RD.bob')
")
dat1 <- get(load(dat1Loc))
dat2 <- get(load(dat2Loc))
fName <- paste0(newName, ".Rdata")
assign(newName, dat1)
assign(newName, dat1, envir = .GlobalEnv)
save(list = newName ,file=fName )
#How long were your experiments seperated?
#I think at least 7 min is mandatory
#time_buffer <- 3
#look at the max time value in the t.dat on the first experiemnt
#add it to the time_buffer
#look at the max time value in the t.dat on the first experiemnt
#add it to the time_buffer
time_to_change<-max(dat1$t.dat[1])+timeBuffer
#now increase every time value in the second experiment by the
#value above
changed_time <- dat2$t.dat[,1] + time_to_change
#Now we need make unique names of rhte windew regions
names_to_change<-setdiff(unique(dat2$w.dat[,"wr1"]),"")
new_names<-paste(names_to_change,"_2", sep="")
#dat2$w.dat["ignore"]<-0
for(i in 1:length(names_to_change)){
dat2$w.dat[ dat2$w.dat["wr1"]==names_to_change[i] ,2]<-new_names[i]
}
#Select the t.dat, blc and, w.dat
t_to_view<-c('t.dat', 't.340', 't.380', 'w.dat')
##now merg ethe datasets
for(i in 1:length(t_to_view)){
#we need to get rid of the epad
notEpadLogic <- dat1$w.dat$wr1 != "epad"
dat1[[ t_to_view[i] ]] <- dat1[[ t_to_view[i] ]][notEpadLogic, ]
#change the row names first
notEpadLogic <- dat2$w.dat$wr1 != "epad"
dat2[[ t_to_view[i] ]] <- dat2[[ t_to_view[i] ]][notEpadLogic, ]
row.names( dat2[[ t_to_view[i] ]] ) <- as.character(changed_time)
#change the first collumn value
dat2[[ t_to_view[i] ]][1]<-changed_time
#combine the experiments trace dataframes together
dat1[[ t_to_view[i] ]]<-rbind(dat1[[ t_to_view[i] ]], dat2[[ t_to_view[i] ]])
}
#And reprocess the data
levs<-setdiff(unique(as.character(dat1$w.dat[,2])),"")
snr.lim=5;hab.lim=.05;sm=2;ws=3;blc="SNIP"
pcp <- ProcConstPharm(dat1,sm,ws,blc)
scp <- ScoreConstPharm(dat1,pcp$blc,pcp$snr,pcp$der,snr.lim,hab.lim,sm)
bin <- bScore(pcp$blc,pcp$snr,snr.lim,hab.lim,levs,dat1$w.dat[,"wr1"])
dat1$scp<-scp
dat1$snr<-pcp$snr
dat1$blc<-pcp$blc
dat1$bin<-bin
#dat1 <- TraceBrewer(dat1)
fName <- paste0(newName, '.Rdata')
assign(newName, dat1)
assign(newName, dat1, envir = .GlobalEnv)
save(list = newName ,file=fName )
}
# now create table
barPlotter <- function(dat = NULL, cols = 'YlOrRd'){
cat("\nREAD ME\nWelcome to barPlotter to use me,\nbarPlotter(dat=RD.experiment, col = \'YlOrRd\')\nCustomize your colors, go to this webpage to get other names \nhttps://www.datanovia.com/en/wp-content/uploads/dn-tutorials/ggplot2/figures/0101-rcolorbrewer-palette-rcolorbrewer-palettes-1.png")
cat('\nSelect what you Want to be on your barPlot\n')
table <- TableBrewer(dat, ,F,F)
# for each row except the first (this is the number of cells in each cell type)
tableList <- list()
for(i in 2:dim(table)[1]){
tableList[[ row.names(table)[i] ]] <- table[c(1,i),,drop=F]
}
#tableList[['ATP']] <- read.csv('./atp.csv', row.names=1)
#tableList[['Ca-free ATP']] <- read.csv('./caFree.csv', row.names=1)
#tableList[['MRS-2365']] <- read.csv('./mrs2365.csv', row.names=1)
tablePercs <- Reduce(rbind,lapply(tableList, function(x) round(x[2,]/x[1,]*100, digits=2)))
tablePercsMut <- as.matrix(rev(tablePercs[nrow(tablePercs):1,]))
# BARPLOT
require(RColorBrewer)
cols <- rev(brewer.pal(5, cols))[length(tableList):1]
graphics.off()
dev.new(width=5, height=12)
par(mar=c(5,4,4,7))
bpDims <- barplot(
tablePercsMut,
beside=T,
horiz=T,
col=cols,
yaxt='n',
xlab='% Cell Class Reponding',
xlim=c(0,100),
border=NA)
# LEGEND
par(xpd=T)
responses <- names(tableList)
legend(
par('usr')[2]+xinch(.2),
par('usr')[4]+yinch(.2),
responses,
fill=rev(cols),
border=NA,
bty='n',
horiz=F,
cex=.7)
# YLAB
yLocs <- apply(bpDims, 2, mean)
xLocs <- rep(par('usr')[1]-xinch(.5), length(yLocs))
tableLabs <- rev(names(tableList[[1]]))
par(xpd=T)
text(xLocs, yLocs, tableLabs)
# BARLABS
tablePercsMut <- tablePercsMut[nrow(tablePercsMut):1,,drop=F]
bpDims <- bpDims[nrow(bpDims):1,, drop=F]
for(i in 1:nrow(bpDims)){
xLocs <- tablePercsMut[i,] + xinch(.3)
yLocs <- bpDims[i,] +yinch(.02)
textToPlace <- paste(rev(tableList[[i]][2,]), '/', rev(tableList[[i]][1,]))
text(xLocs, yLocs, textToPlace, cex=.6)
}
}
>>>>>>> 02c76eaf0507e5b2abfc9b5cea8d86cca953c6ef:procPharm 170210.r
|
McEvoy Olive Oil has a rich, robust flavor as a result of blending six Italian varietals: Frantoio, Leccino, Pendolino, Maurino, Leccio del Corno and Coratina. Frantoio and Leccino comprise 85% of the blend, giving the oil its green fruit and pungent characteristics, as well as a softer note. The remaining varieties give the oil balance and depth. |
(* Title: HOL/Auth/n_german_lemma_on_inv__51.thy
Author: Yongjian Li and Kaiqiang Duan, State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
Copyright 2016 State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
*)
header{*The n_german Protocol Case Study*}
theory n_german_lemma_on_inv__51 imports n_german_base
begin
section{*All lemmas on causal relation between inv__51 and some rule r*}
lemma n_RecvInvAckVsinv__51:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_RecvInvAck i)" and
a2: "(\<exists> p__Inv1 p__Inv2. p__Inv1\<le>N\<and>p__Inv2\<le>N\<and>p__Inv1~=p__Inv2\<and>f=inv__51 p__Inv1 p__Inv2)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_RecvInvAck i" apply fastforce done
from a2 obtain p__Inv1 p__Inv2 where a2:"p__Inv1\<le>N\<and>p__Inv2\<le>N\<and>p__Inv1~=p__Inv2\<and>f=inv__51 p__Inv1 p__Inv2" apply fastforce done
have "(i=p__Inv2)\<or>(i=p__Inv1)\<or>(i~=p__Inv1\<and>i~=p__Inv2)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv2)"
have "((formEval (eqn (IVar (Ident ''ExGntd'')) (Const true)) s))\<or>((formEval (neg (eqn (IVar (Ident ''ExGntd'')) (Const true))) s))" by auto
moreover {
assume c1: "((formEval (eqn (IVar (Ident ''ExGntd'')) (Const true)) s))"
have "?P1 s"
proof(cut_tac a1 a2 b1 c1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume c1: "((formEval (neg (eqn (IVar (Ident ''ExGntd'')) (Const true))) s))"
have "?P1 s"
proof(cut_tac a1 a2 b1 c1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately have "invHoldForRule s f r (invariants N)" by satx
}
moreover {
assume b1: "(i=p__Inv1)"
have "((formEval (eqn (IVar (Ident ''ExGntd'')) (Const true)) s))\<or>((formEval (neg (eqn (IVar (Ident ''ExGntd'')) (Const true))) s))" by auto
moreover {
assume c1: "((formEval (eqn (IVar (Ident ''ExGntd'')) (Const true)) s))"
have "?P1 s"
proof(cut_tac a1 a2 b1 c1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume c1: "((formEval (neg (eqn (IVar (Ident ''ExGntd'')) (Const true))) s))"
have "?P1 s"
proof(cut_tac a1 a2 b1 c1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately have "invHoldForRule s f r (invariants N)" by satx
}
moreover {
assume b1: "(i~=p__Inv1\<and>i~=p__Inv2)"
have "((formEval (eqn (IVar (Ident ''ExGntd'')) (Const true)) s))\<or>((formEval (neg (eqn (IVar (Ident ''ExGntd'')) (Const true))) s))" by auto
moreover {
assume c1: "((formEval (eqn (IVar (Ident ''ExGntd'')) (Const true)) s))"
have "?P1 s"
proof(cut_tac a1 a2 b1 c1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume c1: "((formEval (neg (eqn (IVar (Ident ''ExGntd'')) (Const true))) s))"
have "?P2 s"
proof(cut_tac a1 a2 b1 c1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately have "invHoldForRule s f r (invariants N)" by satx
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_SendGntSVsinv__51:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendGntS i)" and
a2: "(\<exists> p__Inv1 p__Inv2. p__Inv1\<le>N\<and>p__Inv2\<le>N\<and>p__Inv1~=p__Inv2\<and>f=inv__51 p__Inv1 p__Inv2)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_SendGntS i" apply fastforce done
from a2 obtain p__Inv1 p__Inv2 where a2:"p__Inv1\<le>N\<and>p__Inv2\<le>N\<and>p__Inv1~=p__Inv2\<and>f=inv__51 p__Inv1 p__Inv2" apply fastforce done
have "(i=p__Inv2)\<or>(i=p__Inv1)\<or>(i~=p__Inv1\<and>i~=p__Inv2)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv2)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i=p__Inv1)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv1\<and>i~=p__Inv2)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_SendGntEVsinv__51:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendGntE N i)" and
a2: "(\<exists> p__Inv1 p__Inv2. p__Inv1\<le>N\<and>p__Inv2\<le>N\<and>p__Inv1~=p__Inv2\<and>f=inv__51 p__Inv1 p__Inv2)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_SendGntE N i" apply fastforce done
from a2 obtain p__Inv1 p__Inv2 where a2:"p__Inv1\<le>N\<and>p__Inv2\<le>N\<and>p__Inv1~=p__Inv2\<and>f=inv__51 p__Inv1 p__Inv2" apply fastforce done
have "(i=p__Inv2)\<or>(i=p__Inv1)\<or>(i~=p__Inv1\<and>i~=p__Inv2)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv2)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i=p__Inv1)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv1\<and>i~=p__Inv2)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_SendReqE__part__1Vsinv__51:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_SendReqE__part__1 i" and
a2: "(\<exists> p__Inv1 p__Inv2. p__Inv1\<le>N\<and>p__Inv2\<le>N\<and>p__Inv1~=p__Inv2\<and>f=inv__51 p__Inv1 p__Inv2)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_StoreVsinv__51:
assumes a1: "\<exists> i d. i\<le>N\<and>d\<le>N\<and>r=n_Store i d" and
a2: "(\<exists> p__Inv1 p__Inv2. p__Inv1\<le>N\<and>p__Inv2\<le>N\<and>p__Inv1~=p__Inv2\<and>f=inv__51 p__Inv1 p__Inv2)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_RecvGntSVsinv__51:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_RecvGntS i" and
a2: "(\<exists> p__Inv1 p__Inv2. p__Inv1\<le>N\<and>p__Inv2\<le>N\<and>p__Inv1~=p__Inv2\<and>f=inv__51 p__Inv1 p__Inv2)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_RecvReqEVsinv__51:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_RecvReqE N i" and
a2: "(\<exists> p__Inv1 p__Inv2. p__Inv1\<le>N\<and>p__Inv2\<le>N\<and>p__Inv1~=p__Inv2\<and>f=inv__51 p__Inv1 p__Inv2)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_RecvGntEVsinv__51:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_RecvGntE i" and
a2: "(\<exists> p__Inv1 p__Inv2. p__Inv1\<le>N\<and>p__Inv2\<le>N\<and>p__Inv1~=p__Inv2\<and>f=inv__51 p__Inv1 p__Inv2)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_SendInvAckVsinv__51:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_SendInvAck i" and
a2: "(\<exists> p__Inv1 p__Inv2. p__Inv1\<le>N\<and>p__Inv2\<le>N\<and>p__Inv1~=p__Inv2\<and>f=inv__51 p__Inv1 p__Inv2)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_SendInv__part__0Vsinv__51:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_SendInv__part__0 i" and
a2: "(\<exists> p__Inv1 p__Inv2. p__Inv1\<le>N\<and>p__Inv2\<le>N\<and>p__Inv1~=p__Inv2\<and>f=inv__51 p__Inv1 p__Inv2)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_SendReqE__part__0Vsinv__51:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_SendReqE__part__0 i" and
a2: "(\<exists> p__Inv1 p__Inv2. p__Inv1\<le>N\<and>p__Inv2\<le>N\<and>p__Inv1~=p__Inv2\<and>f=inv__51 p__Inv1 p__Inv2)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_SendInv__part__1Vsinv__51:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_SendInv__part__1 i" and
a2: "(\<exists> p__Inv1 p__Inv2. p__Inv1\<le>N\<and>p__Inv2\<le>N\<and>p__Inv1~=p__Inv2\<and>f=inv__51 p__Inv1 p__Inv2)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_SendReqSVsinv__51:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_SendReqS i" and
a2: "(\<exists> p__Inv1 p__Inv2. p__Inv1\<le>N\<and>p__Inv2\<le>N\<and>p__Inv1~=p__Inv2\<and>f=inv__51 p__Inv1 p__Inv2)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_RecvReqSVsinv__51:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_RecvReqS N i" and
a2: "(\<exists> p__Inv1 p__Inv2. p__Inv1\<le>N\<and>p__Inv2\<le>N\<and>p__Inv1~=p__Inv2\<and>f=inv__51 p__Inv1 p__Inv2)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
end
|
```python
import numpy as np
import matplotlib.pyplot as plt
import scipy
from sklearn.model_selection import ParameterGrid
from sklearn.manifold import Isomap
import time
from tqdm import tqdm
import librosa
from librosa import cqt
from librosa.core import amplitude_to_db
from librosa.display import specshow
import os
import glob
```
```python
data_dir = '/Users/sripathisridhar/Desktop/SOL'
```
```python
file_paths= sorted(glob.glob(os.path.join(data_dir, '**', '*.wav')))
file_names= []
for file_path in file_paths:
file_names.append(os.path.basename(file_path))
```
```python
hop_size= 512
q= 24
```
```python
import h5py
with h5py.File("TinySOL.h5", "r") as f:
features_dict = {key:f[key][()] for key in f.keys()}
```
```python
grid = {
'Q': [24],
'k': [3],
'comp': ['log'],
'instr': ['Hp-ord'],
'dyn': ['all']
}
settings = list(ParameterGrid(grid))
for setting in settings:
if setting["instr"] == 'all':
setting['instr'] = ''
if setting['dyn'] == 'all':
setting['dyn'] = ''
```
```python
batch_str = []
CQT_OCTAVES = 7
features_keys = list(features_dict.keys())
for setting in settings:
q = setting['Q']
# Batch process and store in a folder
batch_str = [setting['instr'], setting['dyn']]
batch_features = []
for feature_key in features_keys:
# Get features that match setting
if all(x in feature_key for x in batch_str):
batch_features.append(features_dict[feature_key])
batch_features = np.stack(batch_features, axis=1)
# Isomap parameters
hop_size = 512
compression = 'log'
features = amplitude_to_db(batch_features)
n_neighbors = setting['k']
n_dimensions = 3
n_octaves = 3
# Prune feature matrix
bin_low = np.where((np.std(features, axis=1) / np.std(features)) > 0.1)[0][0] + q
bin_high = bin_low + n_octaves*q
X = features[bin_low:bin_high, :]
# Z-score Standardization- improves contrast in correlation matrix
mus = np.mean(X, axis=1)
sigmas = np.std(X, axis=1)
X_std = (X - mus[:, np.newaxis]) / (1e-6 + sigmas[:, np.newaxis]) # 1e-6 to avoid runtime division by zero
# Pearson correlation matrix
rho_std = np.dot(X_std, X_std.T) / X_std.shape[1]
# Isomap embedding
isomap = Isomap(n_components= n_dimensions, n_neighbors= n_neighbors)
coords = isomap.fit_transform(rho_std)
# Get note value
freqs= librosa.cqt_frequencies(q*CQT_OCTAVES, fmin=librosa.note_to_hz('C1'), bins_per_octave=q) #librosa CQT default fmin is C1
chroma_list= librosa.core.hz_to_note(freqs[bin_low:bin_high])
notes = []
reps = q//12
for chroma in chroma_list:
for i in range(reps):
notes.append(chroma)
```
```python
curr_fig= plt.figure(figsize=(5.5, 2.75))
ax= curr_fig.add_subplot(121)
ax.axis('off')
import colorcet as cc
subsampled_color_ids = np.floor(np.linspace(0, 256, q, endpoint=False)).astype('int')
color_list= [cc.cyclic_mygbm_30_95_c78[i] for i in subsampled_color_ids]
# Plot embedding with color
for i in range(coords.shape[0]):
plt.scatter(coords[i, 0], coords[i, 1], color= color_list[i%q], s=30.0)
plt.plot(coords[:, 0], coords[:, 1], color='black', linewidth=0.2)
# Plot Pearson correlation matrix
rho_frequencies = freqs[bin_low:bin_high]
freq_ticklabels = ['A2', 'A3', 'A4']
freq_ticks = librosa.core.note_to_hz(freq_ticklabels)
tick_bins = []
tick_labels= []
for i,freq_tick in enumerate(freq_ticks):
tick_bin = np.argmin(np.abs(rho_frequencies-freq_tick))
tick_bins.append(tick_bin)
tick_labels.append(freq_ticklabels[i])
plt.figure(figsize=(2.5,2.5))
plt.imshow(np.abs(rho_std), cmap='magma_r')
plt.xticks(tick_bins)
plt.gca().set_xticklabels(freq_ticklabels)
# plt.xlabel('Log-frequency (octaves)')
plt.yticks(tick_bins)
plt.gca().set_yticklabels(freq_ticklabels)
# plt.ylabel('Log-frequency (octaves)')
plt.gca().invert_yaxis()
plt.clim(0, 1)
```
### Circle projection
```python
import circle_fit
import importlib
importlib.reload(circle_fit)
from circle_fit import circle_fit
A = np.transpose(coords[:,:-1])
x, r, circle_residual = circle_fit(A, verbose=True)
```
```python
import matplotlib
matplotlib.rc('font', family='serif')
fig, axes = plt.subplots()
plt.scatter(A[0,:],A[1,:])
plt.plot(x[0],x[1],'rx')
circle = plt.Circle(x, radius=r, fill=False, linestyle='-.')
axes.set_aspect(1)
axes.add_artist(circle)
# axes.set_ylim([-5,6])
# axes.set_xlim([-2,8])
plt.title('Circle fit: TinySOL all instr', pad=10.0)
plt.show()
print(np.sqrt(circle_residual)/72)
```
```python
r
```
123.68845666079883
```python
def d_squared(a, b):
# Takes two n-D tuples and returns euclidean distance between them
# Cast to array for computation
# Cast first to tuple in case a or b are Sympy Point objects
p_a = np.array(tuple(a), dtype='float')
p_b = np.array(tuple(b), dtype='float')
return np.sum(np.square(p_a - p_b))
```
```python
import sympy
from sympy.geometry import Circle, Point, Line
center = Point(x, evaluate=False)
c = Circle(center, r, evaluate=False)
l = Line(Point(coords[0,:-1]), center, evaluate=False)
points = [tuple(p) for p in l.points]
xy_prime = []
# TODO: Optimize to a more pythonic manner
for x,y in coords[:,:2]:
intersections = c.intersection(Line(Point(x,y), center, evaluate=False))
if d_squared((x,y),intersections[0]) < d_squared((x,y), intersections[1]):
xy_prime.append([float(p) for p in intersections[0]])
else:
xy_prime.append([float(p) for p in intersections[1]])
```
```python
fig, axes = plt.subplots()
plt.scatter(np.array(xy_prime)[:,0],np.array(xy_prime)[:,1], s=10,
label='projected points')
plt.scatter(A[0,:],A[1,:], s=0.5, label='isomap embedding points (2D)')
plt.plot(center[0],center[1],'rx')
circle = plt.Circle([float(p) for p in center], radius=r, fill=False,
linestyle='--', label='estimated circle fit')
axes.set_aspect(1)
axes.add_artist(circle)
plt.title('Projected points on circle', pad=10.0)
plt.legend(bbox_to_anchor=(1,1))
plt.show()
```
### Line projection
```python
z = np.arange(len(coords[:,2]))
z_fit = scipy.stats.linregress(z, coords[:,2])
print(z_fit.stderr)
```
0.0018336634903286483
```python
plt.figure()
plt.title('Line fit: TinySOL all instr')
plt.scatter(np.arange(len(coords[:,2])), coords[:,2])
plt.plot(z_fit.intercept + z_fit.slope*z, 'b')
```
```python
# New line coordinates
z_prime = [i * z_fit.slope + z_fit.intercept for i,_ in enumerate(coords[:,2])]
```
```python
coords_prime = np.append(np.array(xy_prime), np.expand_dims(np.array(z_prime), axis=1), axis=1)
coords_length = coords_prime.shape[0]
```
### Distance matrices
```python
# Projected helix self-distance matrix
D_proj = np.zeros((coords_length, coords_length))
for i in range(coords_length):
for j in range(i,coords_length):
D_proj[i][j] = d_squared(coords_prime[i,:], coords_prime[j,:])
```
```python
# Isomap embedding self-distance matrix
D_isomap = np.zeros((coords_length, coords_length)) # Projected points same no. as isomap
for i in range(coords_length):
for j in range(i, coords_length):
D_isomap[i][j] = d_squared(coords[i,:], coords[j,:])
```
```python
# Geodesic self-distance matrix
D_geodesic = isomap.dist_matrix_
# Convert to upper triangular sparse matrix
for i in range(coords_length):
for j in range(i):
D_geodesic[i,j] = 0
```
```python
## Centering matrix
def centered(A, Q=24, J=3):
# Returns centered distance matrix
'''
Inputs
-----
A - squared distance matrix
Q - quality factor, 24 by default
J - number of octaves, 3 by default
Returns
-----
tau - MDS style diagonalized matrix of A
'''
coords_length = A.shape[0]
H = np.zeros((coords_length, coords_length))
const = 1/(Q*J)
for i in range(coords_length):
for j in range(coords_length):
if j==i:
H[i,j] = 1 - const
else:
H[i,j] = -const
return -0.5 * np.matmul(np.matmul(H, A), H)
```
```python
def frobenius_distance(A, B):
# Given two nxn matrices, return their 'Frobenius distance'
return np.sqrt(np.sum(np.square(A - B)))
```
```python
loss_isomap = frobenius_distance(centered(D_geodesic), centered(D_isomap))/coords_length
loss_total = frobenius_distance(centered(D_geodesic), centered(D_proj))/coords_length
loss_proj = frobenius_distance(centered(D_isomap), centered(D_proj))/coords_length
```
```python
print(f"Isomap loss= {loss_isomap}")
print(f"Projection loss= {loss_proj}")
print(f"Total loss= {loss_total}")
```
Isomap loss= 117.47799081825059
Projection loss= 1.0433842670152964
Total loss= 116.82039800129996
```python
(loss_total) - (loss_isomap + loss_proj) < 0
```
True
```python
## NOT STABLE- REWRITE
helicality_dict[setting['instr']] = [loss_isomap, loss_proj, loss_total]
```
```python
helicality_dict
```
{'TpC-ord': [14.780170171262531, 6.90431053444174, 13.687580004785238],
'Hp-ord': [117.47799081825059, 1.0433842670152964, 116.82039800129996],
'Fl-ord': [10.860110079953524, 7.292949241337281, 9.508129580612602]}
```python
import json
with open("SOL_instr.json", "w") as outfile:
json.dump(helicality_dict, outfile)
```
```python
infile = open("SOL_instr.json")
helicality_data = json.load(infile)
print(helicality_data)
```
{'TpC-ord': [14.780170171262531, 6.90431053444174, 13.687580004785238], 'Hp-ord': [117.47799081825059, 1.0433842670152964, 116.82039800129996], 'Fl-ord': [10.860110079953524, 7.292949241337281, 9.508129580612602]}
|
# 최적화(Optimization) 문제 풀이를 위한 GUROBI solver 사용하기
<div style="text-align: right"> Written by JunPyo Park </div>
* 최적화(Optimization) 문제를 풀기 위한 솔버 중 하나인 GUROBI 사용법을 간단하게 정리해 보았습니다.
* 사용 후기: 규모가 큰 문제를 풀려면 라이센스가 필요합니다. 학교 다니시면 아카데믹 라이센스 무료로 이용가능합니다.
**Reference**
* [GUROBI Python Documentation](https://www.gurobi.com/documentation/9.1/quickstart_windows/cs_python.html#section:Python)
* [GUROBI Tutorial Slide](https://pages.gurobi.com/rs/181-ZYS-005/images/Videos-MIP-v18.pdf)
* [Introduction to Mathematical Optimization Modeling](https://colab.research.google.com/github/Gurobi/modeling-examples/blob/master/intro_to_modeling/introduction_to_modeling_gcl.ipynb)
---
<h1>Table of Contents<span class="tocSkip"></span></h1>
<div class="toc"><ul class="toc-item"><li><span><a href="#Installation" data-toc-modified-id="Installation-1"><span class="toc-item-num">1 </span>Installation</a></span></li><li><span><a href="#Simple-MIP(MIxed-Integer-Linear-Programmin)-Example" data-toc-modified-id="Simple-MIP(MIxed-Integer-Linear-Programmin)-Example-2"><span class="toc-item-num">2 </span>Simple MIP(MIxed Integer Linear Programmin) Example</a></span><ul class="toc-item"><li><span><a href="#Objective-(최적-문제의-목적식)" data-toc-modified-id="Objective-(최적-문제의-목적식)-2.1"><span class="toc-item-num">2.1 </span>Objective (최적 문제의 목적식)</a></span></li><li><span><a href="#Constraints-(제약식)" data-toc-modified-id="Constraints-(제약식)-2.2"><span class="toc-item-num">2.2 </span>Constraints (제약식)</a></span></li><li><span><a href="#전체-코드" data-toc-modified-id="전체-코드-2.3"><span class="toc-item-num">2.3 </span>전체 코드</a></span></li><li><span><a href="#모델(최적화-문제)-생성" data-toc-modified-id="모델(최적화-문제)-생성-2.4"><span class="toc-item-num">2.4 </span>모델(최적화 문제) 생성</a></span></li><li><span><a href="#모델에-변수-추가-하기" data-toc-modified-id="모델에-변수-추가-하기-2.5"><span class="toc-item-num">2.5 </span>모델에 변수 추가 하기</a></span></li><li><span><a href="#목적식(objective)-세팅" data-toc-modified-id="목적식(objective)-세팅-2.6"><span class="toc-item-num">2.6 </span>목적식(objective) 세팅</a></span></li><li><span><a href="#제약식(constraints)-추가" data-toc-modified-id="제약식(constraints)-추가-2.7"><span class="toc-item-num">2.7 </span>제약식(constraints) 추가</a></span></li><li><span><a href="#모델-최적화" data-toc-modified-id="모델-최적화-2.8"><span class="toc-item-num">2.8 </span>모델 최적화</a></span></li><li><span><a href="#결과-분석" data-toc-modified-id="결과-분석-2.9"><span class="toc-item-num">2.9 </span>결과 분석</a></span></li></ul></li><li><span><a href="#Consulting-Company-Problem" data-toc-modified-id="Consulting-Company-Problem-3"><span class="toc-item-num">3 </span>Consulting Company Problem</a></span><ul class="toc-item"><li><span><a href="#Problem-Description" data-toc-modified-id="Problem-Description-3.1"><span class="toc-item-num">3.1 </span>Problem Description</a></span></li><li><span><a href="#Data" data-toc-modified-id="Data-3.2"><span class="toc-item-num">3.2 </span>Data</a></span></li><li><span><a href="#Decision-Variables" data-toc-modified-id="Decision-Variables-3.3"><span class="toc-item-num">3.3 </span>Decision Variables</a></span></li><li><span><a href="#Job-constraints" data-toc-modified-id="Job-constraints-3.4"><span class="toc-item-num">3.4 </span>Job constraints</a></span></li><li><span><a href="#Resource-constraints" data-toc-modified-id="Resource-constraints-3.5"><span class="toc-item-num">3.5 </span>Resource constraints</a></span></li><li><span><a href="#Objective-function" data-toc-modified-id="Objective-function-3.6"><span class="toc-item-num">3.6 </span>Objective function</a></span></li></ul></li></ul></div>
## Installation
```
pip install gurobipy
```
## Simple MIP(MIxed Integer Linear Programmin) Example
### Objective (최적 문제의 목적식)
$\text{Maximize}$ <br>
$x + y + 2 z$ <br>
### Constraints (제약식)
$\text{subject to}$ <br>
$x + 2 y + 3 z <= 4$ <br>
$x + y >= 1$ <br>
$x, y, z$ $\text{binary}$ <br>
x,y,z 가 0,1 값을 가지는 binary 형태이고 목적식과 제약식이 모두 linear 하기에 위 문제는 MIP라고 불립니다.
### 전체 코드
밑에 부분별 설명이 있습니다.
```python
import gurobipy as gp
from gurobipy import GRB
```
```python
try:
# Create a new model
m = gp.Model("mip1")
# Create variables
x = m.addVar(vtype=GRB.BINARY, name="x")
y = m.addVar(vtype=GRB.BINARY, name="y")
z = m.addVar(vtype=GRB.BINARY, name="z")
# Set objective
m.setObjective(x + y + 2 * z, GRB.MAXIMIZE)
# Add constraint: x + 2 y + 3 z <= 4
# m.addConstr(x + 2 * y + 3 * z <= 4, "c0")
constr = gp.LinExpr()
constr += x
constr += 2 * y
constr += 3 * z
constr
m.addConstr(constr <= 4, "c0")
# Add constraint: x + y >= 1
m.addConstr(x + y >= 1, "c1")
# Optimize model
m.optimize()
for v in m.getVars():
print('%s %g' % (v.varName, v.x))
print('Obj: %g' % m.objVal)
except gp.GurobiError as e:
print('Error code ' + str(e.errno) + ': ' + str(e))
except AttributeError:
print('Encountered an attribute error')
```
Gurobi Optimizer version 9.1.2 build v9.1.2rc0 (win64)
Thread count: 8 physical cores, 16 logical processors, using up to 16 threads
Optimize a model with 2 rows, 3 columns and 5 nonzeros
Model fingerprint: 0x98886187
Variable types: 0 continuous, 3 integer (3 binary)
Coefficient statistics:
Matrix range [1e+00, 3e+00]
Objective range [1e+00, 2e+00]
Bounds range [1e+00, 1e+00]
RHS range [1e+00, 4e+00]
Found heuristic solution: objective 2.0000000
Presolve removed 2 rows and 3 columns
Presolve time: 0.00s
Presolve: All rows and columns removed
Explored 0 nodes (0 simplex iterations) in 0.00 seconds
Thread count was 1 (of 16 available processors)
Solution count 2: 3 2
Optimal solution found (tolerance 1.00e-04)
Best objective 3.000000000000e+00, best bound 3.000000000000e+00, gap 0.0000%
x 1
y 0
z 1
Obj: 3
### 모델(최적화 문제) 생성
```python
# Create a new model
m = gp.Model("mip1")
```
모델에 하나의 최적화 문제 배정. 모델은 변수들과 constraint들 그리고 최적화 문제와 관련된 attribute(변수 범위, 변수 타입, 목적식의 계수 등)들로 구성
### 모델에 변수 추가 하기
```python
# Create binary variable
x = m.addVar(vtype=GRB.BINARY, name="x")
```
`model.addVar()` 메서드를 통해 모델에 변수를 추가 가능합니다. 어레이 같은 구조를 통해 여러 변수를 한번에 추가하려면 `addVars()` 메서드를 사용하면 됩니다.
```python
help(m.addVar)
```
Help on method addVar in module gurobipy:
addVar(lb=0.0, ub=1e+100, obj=0.0, vtype='C', name='', column=None) method of gurobipy.Model instance
ROUTINE:
addVar(lb, ub, obj, vtype, name, column)
PURPOSE:
Add a variable to the model.
ARGUMENTS:
lb (float): Lower bound (default is zero)
ub (float): Upper bound (default is infinite)
obj (float): Objective coefficient (default is zero)
vtype (string): Variable type (default is GRB.CONTINUOUS)
name (string): Variable name (default is no name)
column (Column): Initial coefficients for column (default is None)
RETURN VALUE:
The created Var object.
EXAMPLE:
v = model.addVar(ub=2.0, name="NewVar")
### 목적식(objective) 세팅
**Maximize** <br>
$x + y + 2 z$ <br>
```python
# Set objective
m.setObjective(x + y + 2 * z, GRB.MAXIMIZE)
```
식이 복잡할 경우 아래와 같이 단계적으로(incrementally) 수식 표현 가능
```python
obj = gp.LinExpr()
obj += x
obj += y
obj += 2*z
m.setObjective(obj, GRB.MAXIMIZE)
```
### 제약식(constraints) 추가
```python
# Add constraint: x + 2 y + 3 z <= 4
m.addConstr(x + 2 * y + 3 * z <= 4, "c0")
```
* `model.addConstr()` 메서드를 통해 제약식 추가 가능
* 두번째 arg(`"c0"`)는 제약식의 이름
* 식이 복잡할 경우 위의 <a href="#목적식(objective)-세팅" data-toc-modified-id="목적식(objective)-세팅-2.3"><span class="toc-item-num"></span>2.3</a>과 같이 incrementally 추가 가능
* [addConstrs()](https://www.gurobi.com/documentation/9.1/refman/py_model_addconstrs.html) 를 사용해서 여러 제약식 한번에 추가 가능
### 모델 최적화
```python
# Optimize model
m.optimize()
```
추가된 목적식과 제약식을 바탕으로 최적화를 진행합니다.
### 결과 분석
```python
for v in m.getVars():
print('%s %g' % (v.varName, v.x))
```
* `variable.varName()` 메서드를 통해 사전에 정의한 변수 이름에 접근 가능
* `v.x`: 변수 v의 최적 value
```python
print('Obj: %g' % m.objVal) # optimal value
```
---
## Consulting Company Problem
이거 보면서 풀어보면 감 잡힙니다. 번역은 귀찮아서....
### Problem Description
Consider a consulting company that has three open positions: Tester, Java Developer, and Architect. The three top candidates (resources) for the positions are: Carlos, Joe, and Monika. The consulting company administered competency tests to each candidate in order to assess their ability to perform each of the jobs. The results of these tests are called matching scores. Assume that only one candidate can be assigned to a job, and at most one job can be assigned to a candidate.
The problem is to determine an assignment of resources and jobs such that each job is fulfilled, each resource is assigned to at most one job, and the total matching scores of the assignments is maximized.
### Data
The list $R$ contains the names of the three resources: Carlos, Joe, and Monika.
The list $J$ contains the names of the job positions: Tester, Java Developer, and Architect.
$r \in R$: index and set of resources. The resource $r$ belongs to the set of resources $R$.
$j \in J$: index and set of jobs. The job $j$ belongs to the set of jobs $J$.
For each resource $r$ and job $j$, there is a corresponding matching score $s$. The matching score $s$ can only take values between 0 and 100. That is, $s_{r,j} \in [0, 100]$ for all resources $r \in R$ and jobs $j \in J$.
```python
# Resource and job sets
R = ['Carlos', 'Joe', 'Monika']
J = ['Tester', 'JavaDeveloper', 'Architect']
```
```python
help(gp.multidict)
```
Help on built-in function multidict in module gurobipy:
multidict(...)
ROUTINE:
multidict(data)
PURPOSE:
Split a single dictionary into multiple dictionaries.
ARGUMENTS:
data: A dictionary that maps each key to a list of 'n' values.
RETURN VALUE:
A list of the shared keys, followed by individual tupledicts.
EXAMPLE:
(keys, dict1, dict2) = multidict( {
'key1': [1, 2],
'key2': [1, 3],
'key3': [1, 4] } )
```python
# Matching score data
combinations, scores = gp.multidict({
('Carlos', 'Tester'): 53,
('Carlos', 'JavaDeveloper'): 27,
('Carlos', 'Architect'): 13,
('Joe', 'Tester'): 80,
('Joe', 'JavaDeveloper'): 47,
('Joe', 'Architect'): 67,
('Monika', 'Tester'): 53,
('Monika', 'JavaDeveloper'): 73,
('Monika', 'Architect'): 47
})
```
The following constructor creates an empty ``Model`` object “m”. We specify the model name by passing the string "RAP" as an argument. The ``Model`` object “m” holds a single optimization problem. It consists of a set of variables, a set of constraints, and the objective function.
```python
# Declare and initialize model
m = gp.Model('RAP')
```
### Decision Variables
To solve this assignment problem, we need to identify which resource is assigned to which job. We introduce a decision variable for each possible assignment of resources to jobs. Therefore, we have 9 decision variables.
To simplify the mathematical notation of the model formulation, we define the following indices for resources and jobs:
For example, $x_{2,1}$ is the decision variable associated with assigning the resource Joe to the job Tester. Therefore, decision variable $x_{r,j}$ equals 1 if resource $r \in R$ is assigned to job $j \in J$, and 0 otherwise.
The ``Model.addVars()`` method creates the decision variables for a ``Model`` object.
This method returns a Gurobi ``tupledict`` object that contains the newly created variables. We supply the ``combinations`` object as the first argument to specify the variable indices. The ``name`` keyword is used to specify a name for the newly created decision variables. By default, variables are assumed to be non-negative.
```python
combinations
```
<gurobi.tuplelist (9 tuples, 2 values each):
( Carlos , Tester )
( Carlos , JavaDeveloper )
( Carlos , Architect )
( Joe , Tester )
( Joe , JavaDeveloper )
( Joe , Architect )
( Monika , Tester )
( Monika , JavaDeveloper )
( Monika , Architect )
>
```python
# Create decision variables for the RAP model
x = m.addVars(combinations, name="assign")
```
### Job constraints
We now discuss the constraints associated with the jobs. These constraints need to ensure that each job is filled by exactly one resource.
The job constraint for the Tester position requires that resource 1 (Carlos), resource 2 (Joe), or resource 3 (Monika) is assigned to this job. This corresponds to the following constraint.
Constraint (Tester=1)
$$
x_{1,1} + x_{2,1} + x_{3,1} = 1
$$
Similarly, the constraints for the Java Developer and Architect positions can be defined as follows.
Constraint (Java Developer = 2)
$$
x_{1,2} + x_{2,2} + x_{3,2} = 1
$$
Constraint (Architect = 3)
$$
x_{1,3} + x_{2,3} + x_{3,3} = 1
$$
The job constraints are defined by the columns of the following table.
In general, the constraint for the job Tester can defined as follows.
$$
x_{1,1} + x_{2,1} + x_{3,1} = \sum_{r=1}^{3 } x_{r,1} = \sum_{r \in R} x_{r,1} = 1
$$
All of the job constraints can be defined in a similarly succinct manner. For each job $j \in J$, take the summation of the decision variables over all the resources. We can write the corresponding job constraint as follows.
$$
\sum_{r \in R} x_{r,j} = 1
$$
The ``Model.addConstrs()`` method of the Gurobi/Python API defines the job constraints of the ``Model`` object “m”. This method returns a Gurobi ``tupledict`` object that contains the job constraints.
The first argument of this method, "x.sum(‘*’, j)", is the sum method and defines the LHS of the jobs constraints as follows:
For each job $j$ in the set of jobs $J$, take the summation of the decision variables over all the resources. The $==$ defines an equality constraint, and the number "1" is the RHS of the constraints.
These constraints are saying that exactly one resource should be assigned to each job.
The second argument is the name of this type of constraints.
```python
# Create job constraints
jobs = m.addConstrs((x.sum('*',j) == 1 for j in J), name='job')
```
### Resource constraints
The constraints for the resources need to ensure that at most one job is assigned to each resource. That is, it is possible that not all the resources are assigned.
For example, we want a constraint that requires Carlos to be assigned to at most one of the jobs: either job 1 (Tester), job 2 (Java Developer ), or job 3 (Architect). We can write this constraint as follows.
Constraint (Carlos=1)
$$
x_{1, 1} + x_{1, 2} + x_{1, 3} \leq 1.
$$
This constraint is less or equal than 1 to allow the possibility that Carlos is not assigned to any job. Similarly, the constraints for the resources Joe and Monika can be defined as follows:
Constraint (Joe=2)
$$
x_{2, 1} + x_{2, 2} + x_{2, 3} \leq 1.
$$
Constraint (Monika=3)
$$
x_{3, 1} + x_{3, 2} + x_{3, 3} \leq 1.
$$
Observe that the resource constraints are defined by the rows of the following table.
The constraint for the resource Carlos can be defined as follows.
$$
x_{1, 1} + x_{1, 2} + x_{1, 3} = \sum_{j=1}^{3 } x_{1,j} = \sum_{j \in J} x_{1,j} \leq 1.
$$
Again, each of these constraints can be written in a succinct manner. For each resource $r \in R$, take the summation of the decision variables over all the jobs. We can write the corresponding resource constraint as follows.
$$
\sum_{j \in J} x_{r,j} \leq 1.
$$
The ``Model.addConstrs()`` method of the Gurobi/Python API defines the resource constraints of the ``Model`` object “m”.
The first argument of this method, "x.sum(r, ‘*’)", is the sum method and defines the LHS of the resource constraints as follows: For each resource $r$ in the set of resources $R$, take the summation of the decision variables over all the jobs.
The $<=$ defines a less or equal constraints, and the number “1” is the RHS of the constraints.
These constraints are saying that each resource can be assigned to at most 1 job.
The second argument is the name of this type of constraints.
```python
# Create resource constraints
resources = m.addConstrs((x.sum(r,'*') <= 1 for r in R), name='resource')
```
### Objective function
The objective function is to maximize the total matching score of the assignments that satisfy the job and resource constraints.
For the Tester job, the matching score is $53x_{1,1}$, if resource Carlos is assigned, or $80x_{2,1}$, if resource Joe is assigned, or $53x_{3,1}$, if resource Monika is assigned.
Consequently, the matching score for the Tester job is as follows, where only one term in this summation will be nonzero.
$$
53x_{1,1} + 80x_{2,1} + 53x_{3,1}.
$$
Similarly, the matching scores for the Java Developer and Architect jobs are defined as follows. The matching score for the Java Developer job is:
$$
27x_{1, 2} + 47x_{2, 2} + 73x_{3, 2}.
$$
The matching score for the Architect job is:
$$
13x_{1, 3} + 67x_{2, 3} + 47x_{3, 3}.
$$
The total matching score is the summation of each cell in the following table.
The goal is to maximize the total matching score of the assignments. Therefore, the objective function is defined as follows.
\begin{equation}
\text{Maximize} \quad (53x_{1,1} + 80x_{2,1} + 53x_{3,1}) \; +
\end{equation}
\begin{equation}
\quad (27x_{1, 2} + 47x_{2, 2} + 73x_{3, 2}) \; +
\end{equation}
\begin{equation}
\quad (13x_{1, 3} + 67x_{2, 3} + 47x_{3, 3}).
\end{equation}
Each term in parenthesis in the objective function can be expressed as follows.
\begin{equation}
(53x_{1,1} + 80x_{2,1} + 53x_{3,1}) = \sum_{r \in R} s_{r,1}x_{r,1}.
\end{equation}
\begin{equation}
(27x_{1, 2} + 47x_{2, 2} + 73x_{3, 2}) = \sum_{r \in R} s_{r,2}x_{r,2}.
\end{equation}
\begin{equation}
(13x_{1, 3} + 67x_{2, 3} + 47x_{3, 3}) = \sum_{r \in R} s_{r,3}x_{r,3}.
\end{equation}
Hence, the objective function can be concisely written as:
\begin{equation}
\text{Maximize} \quad \sum_{j \in J} \sum_{r \in R} s_{r,j}x_{r,j}.
\end{equation}
The ``Model.setObjective()`` method of the Gurobi/Python API defines the objective function of the ``Model`` object “m”. The objective expression is specified in the first argument of this method.
Notice that both the matching score parameters “score” and the assignment decision variables “x” are defined over the “combinations” keys. Therefore, we use the method “x.prod(score)” to obtain the summation of the elementwise multiplication of the "score" matrix and the "x" variable matrix.
The second argument, ``GRB.MAXIMIZE``, is the optimization "sense." In this case, we want to *maximize* the total matching scores of all assignments.
```python
scores
```
{('Carlos', 'Tester'): 53,
('Carlos', 'JavaDeveloper'): 27,
('Carlos', 'Architect'): 13,
('Joe', 'Tester'): 80,
('Joe', 'JavaDeveloper'): 47,
('Joe', 'Architect'): 67,
('Monika', 'Tester'): 53,
('Monika', 'JavaDeveloper'): 73,
('Monika', 'Architect'): 47}
```python
x.prod(scores)
```
<gurobi.LinExpr: 53.0 assign[Carlos,Tester] + 27.0 assign[Carlos,JavaDeveloper] + 13.0 assign[Carlos,Architect] + 80.0 assign[Joe,Tester] + 47.0 assign[Joe,JavaDeveloper] + 67.0 assign[Joe,Architect] + 53.0 assign[Monika,Tester] + 73.0 assign[Monika,JavaDeveloper] + 47.0 assign[Monika,Architect]>
```python
# Objective: maximize total matching score of all assignments
m.setObjective(x.prod(scores), GRB.MAXIMIZE)
```
```python
# Save model for inspection
m.write('RAP.lp')
```
```python
m.display()
```
Maximize
<gurobi.LinExpr: 53.0 assign[Carlos,Tester] + 27.0 assign[Carlos,JavaDeveloper] + 13.0 assign[Carlos,Architect] + 80.0 assign[Joe,Tester] + 47.0 assign[Joe,JavaDeveloper] + 67.0 assign[Joe,Architect] + 53.0 assign[Monika,Tester] + 73.0 assign[Monika,JavaDeveloper] + 47.0 assign[Monika,Architect]>
Subject To
job[Tester] : <gurobi.LinExpr: assign[Carlos,Tester] + assign[Joe,Tester] + assign[Monika,Tester]> = 1.0
job[JavaDeveloper] : <gurobi.LinExpr: assign[Carlos,JavaDeveloper] + assign[Joe,JavaDeveloper] + assign[Monika,JavaDeveloper]> = 1.0
job[Architect] : <gurobi.LinExpr: assign[Carlos,Architect] + assign[Joe,Architect] + assign[Monika,Architect]> = 1.0
resource[Carlos] : <gurobi.LinExpr: assign[Carlos,Tester] + assign[Carlos,JavaDeveloper] + assign[Carlos,Architect]> <= 1.0
resource[Joe] : <gurobi.LinExpr: assign[Joe,Tester] + assign[Joe,JavaDeveloper] + assign[Joe,Architect]> <= 1.0
resource[Monika] : <gurobi.LinExpr: assign[Monika,Tester] + assign[Monika,JavaDeveloper] + assign[Monika,Architect]> <= 1.0
```python
# Run optimization engine
m.optimize()
```
Gurobi Optimizer version 9.1.2 build v9.1.2rc0 (win64)
Thread count: 8 physical cores, 16 logical processors, using up to 16 threads
Optimize a model with 6 rows, 9 columns and 18 nonzeros
Model fingerprint: 0xb343b6eb
Coefficient statistics:
Matrix range [1e+00, 1e+00]
Objective range [1e+01, 8e+01]
Bounds range [0e+00, 0e+00]
RHS range [1e+00, 1e+00]
Presolve time: 0.00s
Presolved: 6 rows, 9 columns, 18 nonzeros
Iteration Objective Primal Inf. Dual Inf. Time
0 4.6000000e+32 1.800000e+31 4.600000e+02 0s
5 1.9300000e+02 0.000000e+00 0.000000e+00 0s
Solved in 5 iterations and 0.01 seconds
Optimal objective 1.930000000e+02
```python
# Display optimal values of decision variables
for v in m.getVars():
if v.x > 1e-6:
print(v.varName, v.x)
# Display optimal total matching score
print('Total matching score: ', m.objVal)
```
assign[Carlos,Tester] 1.0
assign[Joe,Architect] 1.0
assign[Monika,JavaDeveloper] 1.0
Total matching score: 193.0
|
(* Title: HOL/Auth/n_german_lemma_on_inv__23.thy
Author: Yongjian Li and Kaiqiang Duan, State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
Copyright 2016 State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
*)
(*header{*The n_german Protocol Case Study*}*)
theory n_german_lemma_on_inv__23 imports n_german_base
begin
section{*All lemmas on causal relation between inv__23 and some rule r*}
lemma n_StoreVsinv__23:
assumes a1: "(\<exists> i d. i\<le>N\<and>d\<le>N\<and>r=n_Store i d)" and
a2: "(\<exists> p__Inv0 p__Inv1 p__Inv2. p__Inv0\<le>N\<and>p__Inv1\<le>N\<and>p__Inv2\<le>N\<and>p__Inv0~=p__Inv1\<and>p__Inv0~=p__Inv2\<and>p__Inv1~=p__Inv2\<and>f=inv__23 p__Inv0 p__Inv1 p__Inv2)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i d where a1:"i\<le>N\<and>d\<le>N\<and>r=n_Store i d" apply fastforce done
from a2 obtain p__Inv0 p__Inv1 p__Inv2 where a2:"p__Inv0\<le>N\<and>p__Inv1\<le>N\<and>p__Inv2\<le>N\<and>p__Inv0~=p__Inv1\<and>p__Inv0~=p__Inv2\<and>p__Inv1~=p__Inv2\<and>f=inv__23 p__Inv0 p__Inv1 p__Inv2" apply fastforce done
have "(i=p__Inv2)\<or>(i=p__Inv0)\<or>(i=p__Inv1)\<or>(i=p__Inv2)\<or>(i=p__Inv0)\<or>(i=p__Inv1)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv2)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Para (Ident ''ShrSet'') p__Inv2)) (Const false)) (neg (eqn (IVar (Field (Para (Ident ''Cache'') p__Inv2) ''State'')) (Const I)))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i=p__Inv0)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Para (Ident ''ShrSet'') p__Inv0)) (Const false)) (neg (eqn (IVar (Field (Para (Ident ''Cache'') p__Inv0) ''State'')) (Const I)))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i=p__Inv1)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Para (Ident ''ShrSet'') p__Inv1)) (Const false)) (neg (eqn (IVar (Field (Para (Ident ''Cache'') p__Inv1) ''State'')) (Const I)))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i=p__Inv2)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Para (Ident ''ShrSet'') p__Inv2)) (Const false)) (neg (eqn (IVar (Field (Para (Ident ''Cache'') p__Inv2) ''State'')) (Const I)))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i=p__Inv0)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Para (Ident ''ShrSet'') p__Inv0)) (Const false)) (neg (eqn (IVar (Field (Para (Ident ''Cache'') p__Inv0) ''State'')) (Const I)))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i=p__Inv1)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Para (Ident ''ShrSet'') p__Inv1)) (Const false)) (neg (eqn (IVar (Field (Para (Ident ''Cache'') p__Inv1) ''State'')) (Const I)))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_RecvInvAckVsinv__23:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_RecvInvAck i)" and
a2: "(\<exists> p__Inv0 p__Inv1 p__Inv2. p__Inv0\<le>N\<and>p__Inv1\<le>N\<and>p__Inv2\<le>N\<and>p__Inv0~=p__Inv1\<and>p__Inv0~=p__Inv2\<and>p__Inv1~=p__Inv2\<and>f=inv__23 p__Inv0 p__Inv1 p__Inv2)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_RecvInvAck i" apply fastforce done
from a2 obtain p__Inv0 p__Inv1 p__Inv2 where a2:"p__Inv0\<le>N\<and>p__Inv1\<le>N\<and>p__Inv2\<le>N\<and>p__Inv0~=p__Inv1\<and>p__Inv0~=p__Inv2\<and>p__Inv1~=p__Inv2\<and>f=inv__23 p__Inv0 p__Inv1 p__Inv2" apply fastforce done
have "(i=p__Inv2)\<or>(i=p__Inv0)\<or>(i=p__Inv1)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv2)"
have "((formEval (eqn (IVar (Ident ''ExGntd'')) (Const true)) s))\<or>((formEval (neg (eqn (IVar (Ident ''ExGntd'')) (Const true))) s))" by auto
moreover {
assume c1: "((formEval (eqn (IVar (Ident ''ExGntd'')) (Const true)) s))"
have "?P3 s"
apply (cut_tac a1 a2 b1 c1, simp, rule_tac x="(neg (andForm (andForm (eqn (IVar (Field (Para (Ident ''Chan3'') p__Inv2) ''Cmd'')) (Const InvAck)) (eqn (IVar (Ident ''ExGntd'')) (Const true))) (neg (eqn (IVar (Field (Para (Ident ''Chan3'') p__Inv2) ''Data'')) (IVar (Ident ''AuxData''))))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume c1: "((formEval (neg (eqn (IVar (Ident ''ExGntd'')) (Const true))) s))"
have "?P3 s"
apply (cut_tac a1 a2 b1 c1, simp, rule_tac x="(neg (andForm (eqn (IVar (Ident ''ExGntd'')) (Const false)) (neg (eqn (IVar (Ident ''MemData'')) (IVar (Ident ''AuxData''))))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately have "invHoldForRule s f r (invariants N)" by satx
}
moreover {
assume b1: "(i=p__Inv0)"
have "((formEval (eqn (IVar (Ident ''ExGntd'')) (Const true)) s))\<or>((formEval (neg (eqn (IVar (Ident ''ExGntd'')) (Const true))) s))" by auto
moreover {
assume c1: "((formEval (eqn (IVar (Ident ''ExGntd'')) (Const true)) s))"
have "?P3 s"
apply (cut_tac a1 a2 b1 c1, simp, rule_tac x="(neg (andForm (andForm (eqn (IVar (Field (Para (Ident ''Chan3'') p__Inv0) ''Cmd'')) (Const InvAck)) (eqn (IVar (Ident ''ExGntd'')) (Const true))) (neg (eqn (IVar (Field (Para (Ident ''Chan3'') p__Inv0) ''Data'')) (IVar (Ident ''AuxData''))))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume c1: "((formEval (neg (eqn (IVar (Ident ''ExGntd'')) (Const true))) s))"
have "?P3 s"
apply (cut_tac a1 a2 b1 c1, simp, rule_tac x="(neg (andForm (eqn (IVar (Ident ''ExGntd'')) (Const false)) (neg (eqn (IVar (Ident ''MemData'')) (IVar (Ident ''AuxData''))))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately have "invHoldForRule s f r (invariants N)" by satx
}
moreover {
assume b1: "(i=p__Inv1)"
have "((formEval (eqn (IVar (Ident ''ExGntd'')) (Const true)) s))\<or>((formEval (neg (eqn (IVar (Ident ''ExGntd'')) (Const true))) s))" by auto
moreover {
assume c1: "((formEval (eqn (IVar (Ident ''ExGntd'')) (Const true)) s))"
have "?P3 s"
apply (cut_tac a1 a2 b1 c1, simp, rule_tac x="(neg (andForm (andForm (eqn (IVar (Field (Para (Ident ''Chan3'') p__Inv1) ''Cmd'')) (Const InvAck)) (eqn (IVar (Ident ''ExGntd'')) (Const true))) (neg (eqn (IVar (Field (Para (Ident ''Chan3'') p__Inv1) ''Data'')) (IVar (Ident ''AuxData''))))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume c1: "((formEval (neg (eqn (IVar (Ident ''ExGntd'')) (Const true))) s))"
have "?P3 s"
apply (cut_tac a1 a2 b1 c1, simp, rule_tac x="(neg (andForm (eqn (IVar (Ident ''ExGntd'')) (Const false)) (neg (eqn (IVar (Ident ''MemData'')) (IVar (Ident ''AuxData''))))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately have "invHoldForRule s f r (invariants N)" by satx
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_SendGntSVsinv__23:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendGntS i)" and
a2: "(\<exists> p__Inv0 p__Inv1 p__Inv2. p__Inv0\<le>N\<and>p__Inv1\<le>N\<and>p__Inv2\<le>N\<and>p__Inv0~=p__Inv1\<and>p__Inv0~=p__Inv2\<and>p__Inv1~=p__Inv2\<and>f=inv__23 p__Inv0 p__Inv1 p__Inv2)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_SendGntS i" apply fastforce done
from a2 obtain p__Inv0 p__Inv1 p__Inv2 where a2:"p__Inv0\<le>N\<and>p__Inv1\<le>N\<and>p__Inv2\<le>N\<and>p__Inv0~=p__Inv1\<and>p__Inv0~=p__Inv2\<and>p__Inv1~=p__Inv2\<and>f=inv__23 p__Inv0 p__Inv1 p__Inv2" apply fastforce done
have "(i=p__Inv2)\<or>(i=p__Inv0)\<or>(i=p__Inv1)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv2)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i=p__Inv0)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i=p__Inv1)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_SendGntEVsinv__23:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendGntE N i)" and
a2: "(\<exists> p__Inv0 p__Inv1 p__Inv2. p__Inv0\<le>N\<and>p__Inv1\<le>N\<and>p__Inv2\<le>N\<and>p__Inv0~=p__Inv1\<and>p__Inv0~=p__Inv2\<and>p__Inv1~=p__Inv2\<and>f=inv__23 p__Inv0 p__Inv1 p__Inv2)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_SendGntE N i" apply fastforce done
from a2 obtain p__Inv0 p__Inv1 p__Inv2 where a2:"p__Inv0\<le>N\<and>p__Inv1\<le>N\<and>p__Inv2\<le>N\<and>p__Inv0~=p__Inv1\<and>p__Inv0~=p__Inv2\<and>p__Inv1~=p__Inv2\<and>f=inv__23 p__Inv0 p__Inv1 p__Inv2" apply fastforce done
have "(i=p__Inv2)\<or>(i=p__Inv0)\<or>(i=p__Inv1)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv2)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i=p__Inv0)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i=p__Inv1)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_SendReqE__part__1Vsinv__23:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_SendReqE__part__1 i" and
a2: "(\<exists> p__Inv0 p__Inv1 p__Inv2. p__Inv0\<le>N\<and>p__Inv1\<le>N\<and>p__Inv2\<le>N\<and>p__Inv0~=p__Inv1\<and>p__Inv0~=p__Inv2\<and>p__Inv1~=p__Inv2\<and>f=inv__23 p__Inv0 p__Inv1 p__Inv2)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_RecvGntSVsinv__23:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_RecvGntS i" and
a2: "(\<exists> p__Inv0 p__Inv1 p__Inv2. p__Inv0\<le>N\<and>p__Inv1\<le>N\<and>p__Inv2\<le>N\<and>p__Inv0~=p__Inv1\<and>p__Inv0~=p__Inv2\<and>p__Inv1~=p__Inv2\<and>f=inv__23 p__Inv0 p__Inv1 p__Inv2)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_RecvReqEVsinv__23:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_RecvReqE N i" and
a2: "(\<exists> p__Inv0 p__Inv1 p__Inv2. p__Inv0\<le>N\<and>p__Inv1\<le>N\<and>p__Inv2\<le>N\<and>p__Inv0~=p__Inv1\<and>p__Inv0~=p__Inv2\<and>p__Inv1~=p__Inv2\<and>f=inv__23 p__Inv0 p__Inv1 p__Inv2)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_RecvGntEVsinv__23:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_RecvGntE i" and
a2: "(\<exists> p__Inv0 p__Inv1 p__Inv2. p__Inv0\<le>N\<and>p__Inv1\<le>N\<and>p__Inv2\<le>N\<and>p__Inv0~=p__Inv1\<and>p__Inv0~=p__Inv2\<and>p__Inv1~=p__Inv2\<and>f=inv__23 p__Inv0 p__Inv1 p__Inv2)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_SendInvAckVsinv__23:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_SendInvAck i" and
a2: "(\<exists> p__Inv0 p__Inv1 p__Inv2. p__Inv0\<le>N\<and>p__Inv1\<le>N\<and>p__Inv2\<le>N\<and>p__Inv0~=p__Inv1\<and>p__Inv0~=p__Inv2\<and>p__Inv1~=p__Inv2\<and>f=inv__23 p__Inv0 p__Inv1 p__Inv2)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_SendInv__part__0Vsinv__23:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_SendInv__part__0 i" and
a2: "(\<exists> p__Inv0 p__Inv1 p__Inv2. p__Inv0\<le>N\<and>p__Inv1\<le>N\<and>p__Inv2\<le>N\<and>p__Inv0~=p__Inv1\<and>p__Inv0~=p__Inv2\<and>p__Inv1~=p__Inv2\<and>f=inv__23 p__Inv0 p__Inv1 p__Inv2)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_SendReqE__part__0Vsinv__23:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_SendReqE__part__0 i" and
a2: "(\<exists> p__Inv0 p__Inv1 p__Inv2. p__Inv0\<le>N\<and>p__Inv1\<le>N\<and>p__Inv2\<le>N\<and>p__Inv0~=p__Inv1\<and>p__Inv0~=p__Inv2\<and>p__Inv1~=p__Inv2\<and>f=inv__23 p__Inv0 p__Inv1 p__Inv2)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_SendInv__part__1Vsinv__23:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_SendInv__part__1 i" and
a2: "(\<exists> p__Inv0 p__Inv1 p__Inv2. p__Inv0\<le>N\<and>p__Inv1\<le>N\<and>p__Inv2\<le>N\<and>p__Inv0~=p__Inv1\<and>p__Inv0~=p__Inv2\<and>p__Inv1~=p__Inv2\<and>f=inv__23 p__Inv0 p__Inv1 p__Inv2)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_SendReqSVsinv__23:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_SendReqS i" and
a2: "(\<exists> p__Inv0 p__Inv1 p__Inv2. p__Inv0\<le>N\<and>p__Inv1\<le>N\<and>p__Inv2\<le>N\<and>p__Inv0~=p__Inv1\<and>p__Inv0~=p__Inv2\<and>p__Inv1~=p__Inv2\<and>f=inv__23 p__Inv0 p__Inv1 p__Inv2)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_RecvReqSVsinv__23:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_RecvReqS N i" and
a2: "(\<exists> p__Inv0 p__Inv1 p__Inv2. p__Inv0\<le>N\<and>p__Inv1\<le>N\<and>p__Inv2\<le>N\<and>p__Inv0~=p__Inv1\<and>p__Inv0~=p__Inv2\<and>p__Inv1~=p__Inv2\<and>f=inv__23 p__Inv0 p__Inv1 p__Inv2)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
end
|
Formal statement is: lemma cball_eq_atLeastAtMost: fixes a b::real shows "cball a b = {a - b .. a + b}" Informal statement is: The closed ball of radius $b$ centered at $a$ is equal to the closed interval $[a - b, a + b]$. |
(** In this lecture, we start to learn the first approach of describing program
specification and/or program semantics. *)
(* ################################################################# *)
(** * Assertions *)
(** In order to talk about what properties a program does/should satisfy, we
have to be able to talk about properties of program states (程序状态) first. *)
(**
To talk about specifications of programs, the
first thing we need is a way of making asser-
tions about properties that hold at particular
points during a program's execution — i.e.,
claims about the current state of the memory
when execution reaches that point.
--- << Software Foundation, Volume 2 >>
*)
(** Informally, an assertion (断言) is a proposition (命题) which describes a
particular property of program states. Using the following C function as an
example,
int fib(int n) {
int a0 = 0, a1 = 1, a2;
int i;
for (i = 0; i < n; ++ i) {
a2 = a0 + a1;
a0 = a1;
a1 = a2;
}
return a0;
}
In this C function, there are only 5 program variables, [a0], [a1], [a2], [i]
and [n]. A program state is determined by these program variables' values and
the followings are typical program assertions.
[ {[a0]} = 0 AND {[a1]} = 1 ]
[ {[a0]} < {[a1]} ]
[ EXISTS k, {[a0]} = fib(k) AND {[a1]} = fib(k+1) AND {[a2]} = fib(k + 2) ]
*)
(** In more general cases, a C program state contains program variables' value,
program variables' address, memory contents etc. In the last lecture, we have
seen a wrong program which computes the sum of elements in a linked list. Here
is a correct program.
struct list {unsigned int head; struct list *tail;};
unsigned int sumlist (struct list * t) {
unsigned int s = 0;
while (t) {
s = s + (t->head);
t = t->tail;
}
return s;
}
And here is an assertion.
[ ( {[t]} |-> 0 ) AND ( {[t]} + 4 |-> NULL ) AND ( {[s]} = 0 ) ]
We introduce a new predicate in this assertion [X |-> Y]. It means that the
value stored on address [X] is [Y].
*)
(* ================================================================= *)
(** ** What is a "proposition"? *)
(** As mentioned above, an assertion is a proposition which describes a property
of program states. And we have seen many assertions already. You may still ask:
what is a proposition, formally? *)
(** Mainly, it is a philosophical question. We have two answers to it. Answer 1:
a proposition is the sentence itself which describes the property. Answer 2: a
proposition is the meaning of the sentence. The math definitions of "proposition"
beyond these two answers are different. For example, assertions may be defined
as syntax trees (sentences) or sets of program states (meaning of sentences).
Both approaches are accepted by mathematicians and computer scientists. In this
course, we will just say "propositions" when we do not need to distinguish these
two representations. *)
(* ================================================================= *)
(** ** Assertions v.s. boolean functions *)
(** On one hand, assertions and boolean functions are different. *)
(** 1. Not all assertions can be represented as boolean function. Here is an
example:
[ FORALL k, k < {[n]} OR (k is_prime) OR (fib(k) is_not_prime) ]
*)
(** 2. Not all boolean functions can be represented as assertions. There can be
side effects. *)
(** 3. Assertions and boolean functions are categorically different. Assertions
describes properties but boolean functions are mainly about computation. *)
(** On the other hand, there are some connections between them. Many dynamic
program analysis tools do use boolean functions to represent assertions. *)
(* ################################################################# *)
(** * Assertion equivalence and comparison *)
(** Given two assertions [P] and [Q], if every program state [m] which satisfies
[P] also satisfies [Q], we say that [P] is stronger than [Q], or written as
[P |-- Q]. If [P] is stronger than [Q] and [Q] is stronger than [P] at the same
time, we say that [P] and [Q] are equivalent with each other. We write
[P --||-- Q]. *)
(* ################################################################# *)
(** * A formally defined toy language *)
(** Before we go on and introduce more advanced concepts, it is important that
we can make things really formal. Specifically, we will have a formal
programming language (but a simple one) and a formal assertion language. Since
it is the first time that we use Coq formal definitions in this course, we hide
those Coq code but only show some examples. *)
Require Import PL.Imp.
Import Assertion_S.
Import Concrete_Pretty_Printing.
(** We pack those definitions in another Coq file and we "import" it in Coq by
this line of code above. *)
(** The following instructions tell you how do that on your
own laptop. You can also find this instruction from << Software Foundation >>
volume 1, Chapter 2, Induction (slightly different). *)
(** BEGINNING of instruction from << Software Foundation >>. *)
(** For the [Require Import] to work, Coq needs to be able to
find a compiled version of [Imp.v], called [Imp.vo], in a directory
associated with the prefix [PL]. This file is analogous to the [.class]
files compiled from [.java] source files and the [.o] files compiled from
[.c] files.
First create a file named [_CoqProject] containing the following line
(if you obtained the whole volume "Logical Foundations" as a single
archive, a [_CoqProject] should already exist and you can skip this step):
[-Q . PL]
This maps the current directory ("[.]", which contains [Imp.v],
[Induction.v], etc.) to the prefix (or "logical directory") "[PL]".
PG and CoqIDE read [_CoqProject] automatically, so they know to where to
look for the file [Imp.vo] corresponding to the library [PL.Imp].
Once [_CoqProject] is thus created, there are various ways to build
[Imp.vo]:
- In Proof General: The compilation can be made to happen automatically
when you submit the [Require] line above to PG, by setting the emacs
variable [coq-compile-before-require] to [t].
- In CoqIDE: Open [Imp.v]; then, in the "Compile" menu, click
on "Compile Buffer".
- From the command line: Generate a [Makefile] using the [coq_makefile]
utility, that comes installed with Coq (if you obtained the whole
volume as a single archive, a [Makefile] should already exist
and you can skip this step):
[coq_makefile -f _CoqProject *.v -o Makefile]
Note: You should rerun that command whenever you add or remove Coq files
to the directory.
Then you can compile [Imp.v] by running [make] with the corresponding
[.vo] file as a target:
[make Imp.vo]
All files in the directory can be compiled by giving no arguments:
[make]
Under the hood, [make] uses the Coq compiler, [coqc]. You can also
run [coqc] directly:
[coqc -Q . PL Imp.v]
But [make] also calculates dependencies between source files to compile
them in the right order, so [make] should generally be prefered over
explicit [coqc].
If you have trouble (e.g., if you get complaints about missing
identifiers later in the file), it may be because the "load path"
for Coq is not set up correctly. The [Print LoadPath.] command
may be helpful in sorting out such issues.
In particular, if you see a message like
[Compiled library Foo makes inconsistent assumptions over
library Bar]
check whether you have multiple installations of Coq on your machine.
It may be that commands (like [coqc]) that you execute in a terminal
window are getting a different version of Coq than commands executed by
Proof General or CoqIDE.
- Another common reason is that the library [Bar] was modified and
recompiled without also recompiling [Foo] which depends on it. Recompile
[Foo], or everything if too many files are affected. (Using the third
solution above: [make clean; make].)
One more tip for CoqIDE users: If you see messages like [Error:
Unable to locate library Imp], a likely reason is
inconsistencies between compiling things _within CoqIDE_ vs _using
[coqc] from the command line_. This typically happens when there
are two incompatible versions of [coqc] installed on your
system (one associated with CoqIDE, and one associated with [coqc]
from the terminal). The workaround for this situation is
compiling using CoqIDE only (i.e. choosing "make" from the menu),
and avoiding using [coqc] directly at all. *)
(** END of instruction from << Software Foundation >>. *)
Module Playground_for_Program_Variables_and_Assertions.
(** This toy language only have one kind of program variables---variables with
integer type. And we can introduce some new program variables as below. *)
Local Instance a0: var := new_var().
Local Instance a1: var := new_var().
Local Instance a2: var := new_var().
(** And now, we can use assertions to talk about some properties. *)
Definition assert1: Assertion := {[a0]} = 0 AND {[a1]} = 1.
Definition assert2: Assertion := {[a0]} < {[a1]}.
(** Fibonacci numbers can be easily defined in Coq. But we do not bother to
define it here; we assume that such function exists. *)
Hypothesis fib: Z -> Z.
(** Z means integer in math. And this hypothesis says [fib] is a function from
integers to integers. We can use this function in Coq-defined Assertions as
well. *)
Definition assert3: Assertion :=
EXISTS k, {[a0]} = fib(k) AND {[a1]} = fib(k+1) AND {[a2]} = fib(k + 2).
End Playground_for_Program_Variables_and_Assertions.
(** To make things simple, we only allow two different kinds of expressions in
this toy language. Also, only limited arithmetic operators, logical operators
and programs commands are supported. Here is a brief illustration of its syntax.
a ::= Z
| var
| a + a
| a - a
| a * a
b ::= true
| false
| a == a
| a <= a
| ! b
| b && b
c ::= Skip
| var ::= a
| c ;; c
| If b Then c Else c Endif
| While b Do c EndWhile
No function call, pointer, no memory space, no break or continue commands are in
this language. Also, we assume that there is no bound on arithmetic results.
Although this language is simple, it is enough for us to write some interesting
programs. *)
Module Playground_for_Programs.
Local Instance A: var := new_var().
Local Instance B: var := new_var().
Local Instance TEMP: var := new_var().
Definition swap_two_int: com :=
TEMP ::= A;;
A ::= B;;
B ::= TEMP.
Definition decrease_to_zero: com :=
While ! (A <= 0) Do
A ::= A - 1
EndWhile.
Definition ABSOLUTE_VALUE: com :=
If A <= 0
Then B ::= 0 - A
Else B ::= A
EndIf.
End Playground_for_Programs.
(** One important property of this simple programming language is that it is
type-safe, i.e. there is no run-time-error problem. We intensionally delete "/"
and pointer operations to achieve this. This enables us to introduce new
concepts and theories in a concise way. But these theories can all be
generalized to complicated real programming languages, like C. *)
(* ################################################################# *)
(** * Pre/postconditions *)
(** Remark. Some material in this section and the next section is from <<
Software Foundation >> volume 2. *)
(** Next, we need a way of making formal claims about the behavior of commands.
In general, the behavior of a command is to transform one state to another, so
it is natural to express claims about commands in terms of assertions that are
true before and after the command executes:
- "If command [c] is started in a state satisfying assertion
[P], and if [c] eventually terminates in some final state,
then this final state will satisfy the assertion [Q]."
Such a claim is called a _Hoare Triple_ (霍尔三元组). The assertion [P] is
called the _precondition_ (前条件) of [c], while [Q] is the _postcondition_
(后条件). *)
(** This kind of claims about programs are widely used as specifications.
Computer scientists use the following notation to represent it.
{{ P }} c {{ Q }}
*)
(* ################################################################# *)
(** * Hoare triples as program semantics *)
(** Till now, we have learnt to use pre/postconditions to make formal claims
about programs. In other words, given a pair of precondition and postcondition,
we get a program specification. *)
(** Now, we turn to the other side. We will use Hoare triples to describe
program behavior. Formally speaking, we will use Hoare triples to define the
program semantics of our simple imperative programming language (指令式编程语言).
*)
(** Remark 1. We have not yet describe how a program of [com] will execute! We
only have some intuition on it by the similarity between this simple language
and some other practical languages. Now we will do it formally for the first
time. *)
(** Remark 2. When we talk about "program specification", we say whether a
specific program satisfies a program specification or not. When we talk about
"program semantics", we say the program semantics of some programming language,
which defines the behavior of specific programs. *)
(* ================================================================= *)
(** ** Sequence *)
(** The following axiom defines the behavior of sequential compositions. *)
Axiom hoare_seq : forall (P Q R: Assertion) (c1 c2: com),
{{P}} c1 {{Q}} ->
{{Q}} c2 {{R}} ->
{{P}} c1;;c2 {{R}}.
(** This axiom says, if the command [c1] takes any state where [P] holds to a
state where [Q] holds, and if [c2] takes any state where [Q] holds to one where
[R] holds, then doing [c1] followed by [c2] will take any state where [P] holds
to one where [R] holds.
*)
(** Remark. If we instantiate [P], [Q], [R] and [c1], [c2] with concrete
commands and assertions, this rule is only about the logical relation among
three concrete Hoare triples, or in other words, only describe how the behavior
of two concrete program [c1] and [c2] relates to their sequential combination.
But this rule is not about concrete programs and concrete assertions! It talks
about sequential combination in general. That's why we say that we are using
the relation among Hoare triples to define the semantics of this simple
programming language. *)
(* ================================================================= *)
(** ** Example: Swapping *)
(** We want to prove that the following program always swaps the values of
variables [X] and [Y]. Or, formally, for any [x] and [y],
{{ {[X]} = x AND {[Y]} = y }}
TEMP ::= X;;
X ::= Y;;
Y ::= TEMP
{{ {[X]} = y AND {[Y]} = x }}.
First, the following three triples are obviously true.
1. {{ {[X]} = x AND {[Y]} = y }}
TEMP ::= X
{{ {[Y]} = y AND {[TEMP]} = x }}
2. {{ {[Y]} = y AND {[TEMP]} = x }}
X ::= Y
{{ {[X]} = y AND {[TEMP]} = x }}
3. {{ {[X]} = y AND {[TEMP]} = x }}
Y ::= TEMP
{{ {[X]} = y AND {[Y]} = x }}.
Then, from 2 and 3, we know:
4. {{ {[Y]} = y AND {[TEMP]} = x }}
X ::= Y;;
Y ::= TEMP
{{ {[X]} = y AND {[Y]} = x }}.
In the end, from 1 and 4:
5. {{ {[X]} = x AND {[Y]} = y }}
TEMP ::= X;;
X ::= Y;;
Y ::= TEMP
{{ {[X]} = y AND {[Y]} = x }}.
*)
(* ================================================================= *)
(** ** Example: Swapping Using Addition and Subtraction *)
(** Here is a program that swaps the values of two variables using addition and
subtraction instead of by assigning to a temporary variable.
X ::= X + Y;;
Y ::= X - Y;;
X ::= X - Y
Again, we can prove it correct by three triples for assignments and [hoare_seq].
*)
(**
1. {{ {[X]} = x AND {[Y]} == y }}
X ::= X + Y
{{ {[X]} = x + y AND {[Y]} = y }}
2. {{ {[X]} = x + y AND {[Y]} = y }}
Y ::= X - Y
{{ {[X]} = x + y AND {[Y]} = x }}
3. {{ {[X]} = x + y AND {[Y]} = x }}
X ::= X - Y
{{ {[X]} = y AND {[Y]} = x }}.
*)
(* ================================================================= *)
(** ** Skip *)
(** Since [Skip] doesn't change the state, it preserves any assertion [P]. *)
Axiom hoare_skip : forall P,
{{P}} Skip {{P}}.
(* ================================================================= *)
(** ** Condition *)
(** What sort of rule do we want for describing the behavior of if-commands?
Certainly, if the same assertion [Q] holds after executing either of the
branches, then it holds after the whole conditional. So we might be tempted to
write: *)
Axiom hoare_if_first_try : forall P Q b c1 c2,
{{P}} c1 {{Q}} ->
{{P}} c2 {{Q}} ->
{{P}} If b Then c1 Else c2 EndIf {{Q}}.
(** But we can say something more precise. In the "then" branch, we know that
the boolean expression [b] evaluates to [true], and in the "else" branch, we
know it evaluates to [false]. Making this information available in the premises
of the rule forms a more complete definition of program semantics. Here is the
Coq formalization: *)
Axiom hoare_if : forall P Q b c1 c2,
{{ P AND {[b]} }} c1 {{ Q }} ->
{{ P AND NOT {[b]} }} c2 {{ Q }} ->
{{ P }} If b Then c1 Else c2 EndIf {{ Q }}.
(* Thu Mar 5 01:20:37 CST 2020 *)
|
[STATEMENT]
lemma lunit_naturality:
assumes "arr \<mu>"
shows "\<mu> \<cdot> \<l>[dom \<mu>] = \<l>[cod \<mu>] \<cdot> (trg \<mu> \<star> \<mu>)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<mu> \<cdot> \<l>[local.dom \<mu>] = \<l>[cod \<mu>] \<cdot> (trg \<mu> \<star> \<mu>)
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<mu> \<cdot> \<l>[local.dom \<mu>] = \<l>[cod \<mu>] \<cdot> (trg \<mu> \<star> \<mu>)
[PROOF STEP]
let ?a = "src \<mu>" and ?b = "trg \<mu>"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<mu> \<cdot> \<l>[local.dom \<mu>] = \<l>[cod \<mu>] \<cdot> (trg \<mu> \<star> \<mu>)
[PROOF STEP]
interpret Left: subcategory V \<open>left ?b\<close>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. subcategory (\<cdot>) (left (trg \<mu>))
[PROOF STEP]
using assms obj_trg left_hom_is_subcategory weak_unit_self_composable
[PROOF STATE]
proof (prove)
using this:
arr \<mu>
arr ?\<mu> \<Longrightarrow> obj (trg ?\<mu>)
arr ?\<mu> \<Longrightarrow> subcategory (\<cdot>) (left ?\<mu>)
weak_unit ?a \<Longrightarrow> ide ?a
weak_unit ?a \<Longrightarrow> ide (?a \<star> ?a)
weak_unit ?a \<Longrightarrow> ?a \<star> ?a \<noteq> null
goal (1 subgoal):
1. subcategory (\<cdot>) (left (trg \<mu>))
[PROOF STEP]
by force
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<mu> \<cdot> \<l>[local.dom \<mu>] = \<l>[cod \<mu>] \<cdot> (trg \<mu> \<star> \<mu>)
[PROOF STEP]
interpret Left: left_hom_with_unit V H \<a> \<open>\<i>[?b]\<close> ?b
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. left_hom_with_unit (\<cdot>) (\<star>) \<a> \<i>[trg \<mu>] (trg \<mu>)
[PROOF STEP]
using assms obj_is_weak_unit iso_unit\<^sub>P\<^sub>B\<^sub>U
[PROOF STATE]
proof (prove)
using this:
arr \<mu>
obj ?a \<Longrightarrow> weak_unit ?a
weak_unit ?a \<Longrightarrow> local.iso \<i>[?a]
goal (1 subgoal):
1. left_hom_with_unit (\<cdot>) (\<star>) \<a> \<i>[trg \<mu>] (trg \<mu>)
[PROOF STEP]
by (unfold_locales, auto)
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<mu> \<cdot> \<l>[local.dom \<mu>] = \<l>[cod \<mu>] \<cdot> (trg \<mu> \<star> \<mu>)
[PROOF STEP]
interpret Left.L: endofunctor \<open>Left ?b\<close> Left.L
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. endofunctor Left.comp Left.L
[PROOF STEP]
using assms endofunctor_H\<^sub>L [of ?b] weak_unit_self_composable obj_trg obj_is_weak_unit
[PROOF STATE]
proof (prove)
using this:
arr \<mu>
\<lbrakk>ide (trg \<mu>); trg \<mu> \<star> trg \<mu> \<noteq> null\<rbrakk> \<Longrightarrow> endofunctor Left.comp Left.L
weak_unit ?a \<Longrightarrow> ide ?a
weak_unit ?a \<Longrightarrow> ide (?a \<star> ?a)
weak_unit ?a \<Longrightarrow> ?a \<star> ?a \<noteq> null
arr ?\<mu> \<Longrightarrow> obj (trg ?\<mu>)
obj ?a \<Longrightarrow> weak_unit ?a
goal (1 subgoal):
1. endofunctor Left.comp Left.L
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<mu> \<cdot> \<l>[local.dom \<mu>] = \<l>[cod \<mu>] \<cdot> (trg \<mu> \<star> \<mu>)
[PROOF STEP]
have 1: "Left.in_hom \<mu> (dom \<mu>) (cod \<mu>)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Left.in_hom \<mu> (local.dom \<mu>) (cod \<mu>)
[PROOF STEP]
using assms Left.hom_char Left.arr_char\<^sub>S\<^sub>b\<^sub>C left_def composable_char\<^sub>P\<^sub>B\<^sub>H obj_trg
[PROOF STATE]
proof (prove)
using this:
arr \<mu>
Left.hom ?a ?b = hom ?a ?b \<inter> Collect (left (trg \<mu>))
Left.arr ?f = left (trg \<mu>) ?f
left ?\<tau> \<equiv> \<lambda>\<mu>. ?\<tau> \<star> \<mu> \<noteq> null
(?\<nu> \<star> ?\<mu> \<noteq> null) = (arr ?\<mu> \<and> arr ?\<nu> \<and> src ?\<nu> = trg ?\<mu>)
arr ?\<mu> \<Longrightarrow> obj (trg ?\<mu>)
goal (1 subgoal):
1. Left.in_hom \<mu> (local.dom \<mu>) (cod \<mu>)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
Left.in_hom \<mu> (local.dom \<mu>) (cod \<mu>)
goal (1 subgoal):
1. \<mu> \<cdot> \<l>[local.dom \<mu>] = \<l>[cod \<mu>] \<cdot> (trg \<mu> \<star> \<mu>)
[PROOF STEP]
have 2: "Left.in_hom \<l>[Left.dom \<mu>] (?b \<star> dom \<mu>) (dom \<mu>)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Left.in_hom \<l>[Left.dom \<mu>] (trg \<mu> \<star> local.dom \<mu>) (local.dom \<mu>)
[PROOF STEP]
unfolding lunit_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Left.in_hom (left_hom_with_unit.lunit (\<cdot>) (\<star>) \<a> \<i>[trg (Left.dom \<mu>)] (trg (Left.dom \<mu>)) (Left.dom \<mu>)) (trg \<mu> \<star> local.dom \<mu>) (local.dom \<mu>)
[PROOF STEP]
using assms 1 Left.in_hom_char\<^sub>S\<^sub>b\<^sub>C trg_dom Left.lunit_char(1) H\<^sub>L_def
Left.arr_char\<^sub>S\<^sub>b\<^sub>C Left.dom_char\<^sub>S\<^sub>b\<^sub>C Left.ide_dom
[PROOF STATE]
proof (prove)
using this:
arr \<mu>
Left.in_hom \<mu> (local.dom \<mu>) (cod \<mu>)
Left.in_hom ?f ?a ?b = (Left.arr ?a \<and> Left.arr ?b \<and> Left.arr ?f \<and> \<guillemotleft>?f : ?a \<Rightarrow> ?b\<guillemotright>)
trg (local.dom ?\<mu>) = trg ?\<mu>
Left.ide ?f \<Longrightarrow> Left.in_hom (Left.lunit ?f) (Left.L ?f) ?f
H\<^sub>L ?g \<equiv> (\<star>) ?g
Left.arr ?f = left (trg \<mu>) ?f
Left.dom ?f = (if Left.arr ?f then local.dom ?f else null)
Left.arr ?f \<Longrightarrow> Left.ide (Left.dom ?f)
goal (1 subgoal):
1. Left.in_hom (left_hom_with_unit.lunit (\<cdot>) (\<star>) \<a> \<i>[trg (Left.dom \<mu>)] (trg (Left.dom \<mu>)) (Left.dom \<mu>)) (trg \<mu> \<star> local.dom \<mu>) (local.dom \<mu>)
[PROOF STEP]
by force
[PROOF STATE]
proof (state)
this:
Left.in_hom \<l>[Left.dom \<mu>] (trg \<mu> \<star> local.dom \<mu>) (local.dom \<mu>)
goal (1 subgoal):
1. \<mu> \<cdot> \<l>[local.dom \<mu>] = \<l>[cod \<mu>] \<cdot> (trg \<mu> \<star> \<mu>)
[PROOF STEP]
have 3: "Left.in_hom \<l>[Left.cod \<mu>] (?b \<star> cod \<mu>) (cod \<mu>)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Left.in_hom \<l>[Left.cod \<mu>] (trg \<mu> \<star> cod \<mu>) (cod \<mu>)
[PROOF STEP]
unfolding lunit_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Left.in_hom (left_hom_with_unit.lunit (\<cdot>) (\<star>) \<a> \<i>[trg (Left.cod \<mu>)] (trg (Left.cod \<mu>)) (Left.cod \<mu>)) (trg \<mu> \<star> cod \<mu>) (cod \<mu>)
[PROOF STEP]
using assms 1 Left.in_hom_char\<^sub>S\<^sub>b\<^sub>C trg_cod Left.lunit_char(1) H\<^sub>L_def
Left.cod_char\<^sub>S\<^sub>b\<^sub>C Left.ide_cod
[PROOF STATE]
proof (prove)
using this:
arr \<mu>
Left.in_hom \<mu> (local.dom \<mu>) (cod \<mu>)
Left.in_hom ?f ?a ?b = (Left.arr ?a \<and> Left.arr ?b \<and> Left.arr ?f \<and> \<guillemotleft>?f : ?a \<Rightarrow> ?b\<guillemotright>)
trg (cod ?\<mu>) = trg ?\<mu>
Left.ide ?f \<Longrightarrow> Left.in_hom (Left.lunit ?f) (Left.L ?f) ?f
H\<^sub>L ?g \<equiv> (\<star>) ?g
Left.cod ?f = (if Left.arr ?f then cod ?f else null)
Left.arr ?f \<Longrightarrow> Left.ide (Left.cod ?f)
goal (1 subgoal):
1. Left.in_hom (left_hom_with_unit.lunit (\<cdot>) (\<star>) \<a> \<i>[trg (Left.cod \<mu>)] (trg (Left.cod \<mu>)) (Left.cod \<mu>)) (trg \<mu> \<star> cod \<mu>) (cod \<mu>)
[PROOF STEP]
by force
[PROOF STATE]
proof (state)
this:
Left.in_hom \<l>[Left.cod \<mu>] (trg \<mu> \<star> cod \<mu>) (cod \<mu>)
goal (1 subgoal):
1. \<mu> \<cdot> \<l>[local.dom \<mu>] = \<l>[cod \<mu>] \<cdot> (trg \<mu> \<star> \<mu>)
[PROOF STEP]
have 4: "Left.in_hom (Left.L \<mu>) (?b \<star> dom \<mu>) (?b \<star> cod \<mu>)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Left.in_hom (Left.L \<mu>) (trg \<mu> \<star> local.dom \<mu>) (trg \<mu> \<star> cod \<mu>)
[PROOF STEP]
using 1 Left.L.preserves_hom [of \<mu> "dom \<mu>" "cod \<mu>"] H\<^sub>L_def
[PROOF STATE]
proof (prove)
using this:
Left.in_hom \<mu> (local.dom \<mu>) (cod \<mu>)
Left.in_hom \<mu> (local.dom \<mu>) (cod \<mu>) \<Longrightarrow> Left.in_hom (Left.L \<mu>) (Left.L (local.dom \<mu>)) (Left.L (cod \<mu>))
H\<^sub>L ?g \<equiv> (\<star>) ?g
goal (1 subgoal):
1. Left.in_hom (Left.L \<mu>) (trg \<mu> \<star> local.dom \<mu>) (trg \<mu> \<star> cod \<mu>)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
Left.in_hom (Left.L \<mu>) (trg \<mu> \<star> local.dom \<mu>) (trg \<mu> \<star> cod \<mu>)
goal (1 subgoal):
1. \<mu> \<cdot> \<l>[local.dom \<mu>] = \<l>[cod \<mu>] \<cdot> (trg \<mu> \<star> \<mu>)
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<mu> \<cdot> \<l>[local.dom \<mu>] = \<l>[cod \<mu>] \<cdot> (trg \<mu> \<star> \<mu>)
[PROOF STEP]
by (metis "1" "2" H\<^sub>L_def Left.comp_simp Left.in_homE Left.lunit_naturality
Left.seqI' lunit_def trg_cod trg_dom)
[PROOF STATE]
proof (state)
this:
\<mu> \<cdot> \<l>[local.dom \<mu>] = \<l>[cod \<mu>] \<cdot> (trg \<mu> \<star> \<mu>)
goal:
No subgoals!
[PROOF STEP]
qed |
If $S$ is a Lebesgue measurable set, then for every $\epsilon > 0$, there exists an open set $T$ such that $S \subseteq T$, $T - S$ is Lebesgue measurable, and $\mu(T - S) < \epsilon$. |
/-
Copyright (c) 2018 Mario Carneiro. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Mario Carneiro, Robert Y. Lewis
Generalizes the Cauchy completion of (ℚ, abs) to the completion of a
commutative ring with absolute value.
-/
import data.real.cau_seq
namespace cau_seq.completion
open cau_seq
section
parameters {α : Type*} [discrete_linear_ordered_field α]
parameters {β : Type*} [comm_ring β] {abv : β → α} [is_absolute_value abv]
def Cauchy := @quotient (cau_seq _ abv) cau_seq.equiv
def mk : cau_seq _ abv → Cauchy := quotient.mk
@[simp] theorem mk_eq_mk (f) : @eq Cauchy ⟦f⟧ (mk f) := rfl
theorem mk_eq {f g} : mk f = mk g ↔ f ≈ g := quotient.eq
def of_rat (x : β) : Cauchy := mk (const abv x)
instance : has_zero Cauchy := ⟨of_rat 0⟩
instance : has_one Cauchy := ⟨of_rat 1⟩
instance : inhabited Cauchy := ⟨0⟩
theorem of_rat_zero : of_rat 0 = 0 := rfl
theorem of_rat_one : of_rat 1 = 1 := rfl
@[simp] theorem mk_eq_zero {f} : mk f = 0 ↔ lim_zero f :=
by have : mk f = 0 ↔ lim_zero (f - 0) := quotient.eq;
rwa sub_zero at this
instance : has_add Cauchy :=
⟨λ x y, quotient.lift_on₂ x y (λ f g, mk (f + g)) $
λ f₁ g₁ f₂ g₂ hf hg, quotient.sound $
by simpa [(≈), setoid.r] using add_lim_zero hf hg⟩
@[simp] theorem mk_add (f g : cau_seq β abv) : mk f + mk g = mk (f + g) := rfl
instance : has_neg Cauchy :=
⟨λ x, quotient.lift_on x (λ f, mk (-f)) $
λ f₁ f₂ hf, quotient.sound $
by simpa [(≈), setoid.r] using neg_lim_zero hf⟩
@[simp] theorem mk_neg (f : cau_seq β abv) : -mk f = mk (-f) := rfl
instance : has_mul Cauchy :=
⟨λ x y, quotient.lift_on₂ x y (λ f g, mk (f * g)) $
λ f₁ g₁ f₂ g₂ hf hg, quotient.sound $
by simpa [(≈), setoid.r, mul_add, mul_comm] using
add_lim_zero (mul_lim_zero g₁ hf) (mul_lim_zero f₂ hg)⟩
@[simp] theorem mk_mul (f g : cau_seq β abv) : mk f * mk g = mk (f * g) := rfl
theorem of_rat_add (x y : β) : of_rat (x + y) = of_rat x + of_rat y :=
congr_arg mk (const_add _ _)
theorem of_rat_neg (x : β) : of_rat (-x) = -of_rat x :=
congr_arg mk (const_neg _)
theorem of_rat_mul (x y : β) : of_rat (x * y) = of_rat x * of_rat y :=
congr_arg mk (const_mul _ _)
private lemma zero_def : 0 = mk 0 := rfl
private lemma one_def : 1 = mk 1 := rfl
instance : comm_ring Cauchy :=
by refine { neg := has_neg.neg,
add := (+), zero := 0, mul := (*), one := 1, .. };
{ repeat {refine λ a, quotient.induction_on a (λ _, _)},
simp [zero_def, one_def, mul_left_comm, mul_comm, mul_add] }
theorem of_rat_sub (x y : β) : of_rat (x - y) = of_rat x - of_rat y :=
congr_arg mk (const_sub _ _)
end
local attribute [instance] classical.prop_decidable
section
parameters {α : Type*} [discrete_linear_ordered_field α]
parameters {β : Type*} [discrete_field β] {abv : β → α} [is_absolute_value abv]
local notation `Cauchy` := @Cauchy _ _ _ _ abv _
noncomputable instance : has_inv Cauchy :=
⟨λ x, quotient.lift_on x
(λ f, mk $ if h : lim_zero f then 0 else inv f h) $
λ f g fg, begin
have := lim_zero_congr fg,
by_cases hf : lim_zero f,
{ simp [hf, this.1 hf, setoid.refl] },
{ have hg := mt this.2 hf, simp [hf, hg],
have If : mk (inv f hf) * mk f = 1 := mk_eq.2 (inv_mul_cancel hf),
have Ig : mk (inv g hg) * mk g = 1 := mk_eq.2 (inv_mul_cancel hg),
rw [mk_eq.2 fg, ← Ig] at If,
rw mul_comm at Ig,
rw [← mul_one (mk (inv f hf)), ← Ig, ← mul_assoc, If,
mul_assoc, Ig, mul_one] }
end⟩
@[simp] theorem inv_zero : (0 : Cauchy)⁻¹ = 0 :=
congr_arg mk $ by rw dif_pos; [refl, exact zero_lim_zero]
@[simp] theorem inv_mk {f} (hf) : (@mk α _ β _ abv _ f)⁻¹ = mk (inv f hf) :=
congr_arg mk $ by rw dif_neg
lemma cau_seq_zero_ne_one : ¬ (0 : cau_seq _ abv) ≈ 1 := λ h,
have lim_zero (1 - 0), from setoid.symm h,
have lim_zero 1, by simpa,
one_ne_zero $ const_lim_zero.1 this
lemma zero_ne_one : (0 : Cauchy) ≠ 1 :=
λ h, cau_seq_zero_ne_one $ mk_eq.1 h
protected theorem inv_mul_cancel {x : Cauchy} : x ≠ 0 → x⁻¹ * x = 1 :=
quotient.induction_on x $ λ f hf, begin
simp at hf, simp [hf],
exact quotient.sound (cau_seq.inv_mul_cancel hf)
end
noncomputable def discrete_field : discrete_field Cauchy :=
{ inv := has_inv.inv,
inv_mul_cancel := @cau_seq.completion.inv_mul_cancel,
mul_inv_cancel := λ x x0, by rw [mul_comm, cau_seq.completion.inv_mul_cancel x0],
zero_ne_one := zero_ne_one,
inv_zero := inv_zero,
has_decidable_eq := by apply_instance,
..cau_seq.completion.comm_ring }
local attribute [instance] discrete_field
theorem of_rat_inv (x : β) : of_rat (x⁻¹) = ((of_rat x)⁻¹ : Cauchy) :=
congr_arg mk $ by split_ifs with h; try {simp [const_lim_zero.1 h]}; refl
theorem of_rat_div (x y : β) : of_rat (x / y) = (of_rat x / of_rat y : Cauchy) :=
by simp only [div_eq_inv_mul, of_rat_inv, of_rat_mul]
end
end cau_seq.completion |
/*
* Copyright (c) 2014, Columbia University
* All rights reserved.
*
* This software was developed by Theofilos Petsios <[email protected]>
* at Columbia University, New York, NY, USA, in September 2014.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Columbia University nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <boost/algorithm/string.hpp>
#include <cctype>
#include <fstream>
#include <sstream>
#include <string>
#include <iostream>
#include <iterator>
#include <vector>
#include "llvm/Instruction.h"
#include "llvm/Instructions.h"
#include "llvm/LLVMContext.h"
#include "llvm/Function.h"
#include "llvm/Module.h"
#include "llvm/Support/Debug.h"
#include "llvm/Analysis/CallGraph.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Cloning.h"
#include "llvm/IRBuilder.h"
#include "llvm/ExecutionEngine/ExecutionEngine.h"
#include "Infoflow.h"
#include "Slice.h"
#include "SQLRand.h"
using std::set;
using namespace llvm;
using namespace deps;
namespace {
//FIXME need to handle constant assignments as well!
//What about environment variables?
static const struct CallTaintEntry bLstSourceSummaries[] = {
//FIXME check which args need to be tainted. For now we are tainting
//the variable part to see if it leads to a mysql query
// function tainted values tainted direct memory tainted root ptrs
{ "strcpy", TAINTS_NOTHING, TAINTS_ARG_1, TAINTS_NOTHING },
{ "strncpy", TAINTS_NOTHING, TAINTS_ARG_1, TAINTS_NOTHING },
{ "strcat", TAINTS_NOTHING, TAINTS_ARG_1, TAINTS_NOTHING },
{ "strncat", TAINTS_NOTHING, TAINTS_ARG_1, TAINTS_NOTHING },
{ "sprintf", TAINTS_NOTHING, TAINTS_ARG_1, TAINTS_NOTHING },
{ "snprintf",TAINTS_NOTHING, TAINTS_ARG_1, TAINTS_NOTHING },
{ "memcpy", TAINTS_NOTHING, TAINTS_ARG_1, TAINTS_NOTHING },
{ "memmove", TAINTS_NOTHING, TAINTS_ARG_1, TAINTS_NOTHING },
{ "getenv", TAINTS_RETURN_VAL, TAINTS_ARG_1, TAINTS_NOTHING },
{ 0, TAINTS_NOTHING, TAINTS_NOTHING, TAINTS_NOTHING }
};
static const struct CallTaintEntry sanitizeSummaries[] = {
// function, tainted values, tainted direct memory, tainted root ptrs
{ "mysql_real_query", TAINTS_ARG_2, TAINTS_NOTHING, TAINTS_NOTHING },
{ "mysql_query", TAINTS_ARG_2, TAINTS_NOTHING, TAINTS_NOTHING },
{ "PQexec", TAINTS_ARG_2, TAINTS_NOTHING, TAINTS_NOTHING },
{ 0, TAINTS_NOTHING, TAINTS_NOTHING, TAINTS_NOTHING }
};
/* ****************************************************************************
* ============================================================================
* Taint Functions
* ============================================================================
* ****************************************************************************/
static const CallTaintEntry *
findEntryForFunction(const CallTaintEntry *Summaries,
const std::string &FuncName) {
unsigned Index;
for (Index = 0; Summaries[Index].Name; ++Index) {
if (Summaries[Index].Name == FuncName)
return &Summaries[Index];
}
// Return the default summary.
return &Summaries[Index];
}
void
SQLRandPass::taintForward(std::string srcKind,
CallInst *ci,
const CallTaintEntry *entry)
{
const CallTaintSummary *vSum = &(entry->ValueSummary);
const CallTaintSummary *dSum = &(entry->DirectPointerSummary);
const CallTaintSummary *rSum = &(entry->RootPointerSummary);
/* vsum */
if (vSum->TaintsReturnValue)
infoflow->setTainted(srcKind, *ci);
for (unsigned ArgIndex = 0; ArgIndex < vSum->NumArguments; ++ArgIndex) {
if (vSum->TaintsArgument[ArgIndex])
infoflow->setTainted(srcKind, *(ci->getOperand(ArgIndex)));
}
/* dsum */
if (dSum->TaintsReturnValue)
infoflow->setDirectPtrTainted(srcKind, *ci);
for (unsigned ArgIndex = 0; ArgIndex < dSum->NumArguments; ++ArgIndex) {
if (dSum->TaintsArgument[ArgIndex])
infoflow->setDirectPtrTainted(srcKind, *(ci->getOperand(ArgIndex)));
}
/* rsum */
if (rSum->TaintsReturnValue)
infoflow->setReachPtrTainted(srcKind, *ci);
for (unsigned ArgIndex = 0; ArgIndex < rSum->NumArguments; ++ArgIndex) {
if (rSum->TaintsArgument[ArgIndex])
infoflow->setReachPtrTainted(srcKind, *(ci->getOperand(ArgIndex)));
}
}
void
SQLRandPass::taintBackwards(std::string sinkKind,
CallInst *ci,
const CallTaintEntry *entry)
{
const CallTaintSummary *vSum = &(entry->ValueSummary);
const CallTaintSummary *dSum = &(entry->DirectPointerSummary);
const CallTaintSummary *rSum = &(entry->RootPointerSummary);
/* vsum */
if (vSum->TaintsReturnValue)
infoflow->setUntainted(sinkKind, *ci);
for (unsigned ArgIndex = 0; ArgIndex < vSum->NumArguments; ++ArgIndex) {
if (vSum->TaintsArgument[ArgIndex])
infoflow->setUntainted(sinkKind, *(ci->getOperand(ArgIndex)));
}
/* dsum */
if (dSum->TaintsReturnValue)
infoflow->setDirectPtrUntainted(sinkKind, *ci);
for (unsigned ArgIndex = 0; ArgIndex < dSum->NumArguments; ++ArgIndex) {
if (dSum->TaintsArgument[ArgIndex])
infoflow->setDirectPtrUntainted(sinkKind,
*(ci->getOperand(ArgIndex)));
}
/* rsum */
if (rSum->TaintsReturnValue)
infoflow->setReachPtrUntainted(sinkKind, *ci);
for (unsigned ArgIndex = 0; ArgIndex < rSum->NumArguments; ++ArgIndex) {
if (rSum->TaintsArgument[ArgIndex])
infoflow->setReachPtrUntainted(sinkKind,
*(ci->getOperand(ArgIndex)));
}
}
bool
SQLRandPass::checkBackwardTainted(Value &V, InfoflowSolution* soln, bool direct)
{
bool ret = (!soln->isTainted(V));
if (direct)
ret = ret || (!soln->isDirectPtrTainted(V));
return ret;
}
bool
SQLRandPass::checkForwardTainted(Value &V, InfoflowSolution* soln, bool direct)
{
bool ret = (soln->isTainted(V));
if (direct)
ret = ret || (soln->isDirectPtrTainted(V));
return ret;
}
/* ****************************************************************************
* ============================================================================
* Solution Functions
* ============================================================================
* ****************************************************************************/
InfoflowSolution *
SQLRandPass::getForwardSolFromEntry(std::string srcKind,
CallInst *ci,
const CallTaintEntry *entry)
{
//XXX Do not change order
taintForward(srcKind, ci, entry);
std::set<std::string> kinds;
kinds.insert(srcKind);
//This does forward analysis
InfoflowSolution *fsoln = infoflow->leastSolution(kinds, false, true);
return fsoln;
}
InfoflowSolution *
SQLRandPass::getBackwardsSol(std::string sinkKind, CallInst *ci)
{
//XXX Do not change order
infoflow->setUntainted(sinkKind, *ci);
std::set<std::string> kinds;
kinds.insert(sinkKind);
InfoflowSolution *fsoln = infoflow->greatestSolution(kinds, false);
return fsoln;
}
InfoflowSolution *
SQLRandPass::getBackwardsSolFromEntry(std::string sinkKind,
CallInst *ci,
const CallTaintEntry *entry)
{
//XXX Do not change order
taintBackwards(sinkKind, ci, entry);
std::set<std::string> kinds;
kinds.insert(sinkKind);
InfoflowSolution *fsoln = infoflow->greatestSolution(kinds, false);
return fsoln;
}
InfoflowSolution *
SQLRandPass::getForwardSolFromGlobal(std::string srcKind, Value *val)
{
infoflow->setTainted(srcKind, *val);
std::set<std::string> kinds;
kinds.insert(srcKind);
InfoflowSolution *fsoln = infoflow->leastSolution(kinds, false, true);
return fsoln;
}
/* ****************************************************************************
* ============================================================================
* Main Pass
* ============================================================================
* ****************************************************************************/
int
SQLRandPass::doInitialization(Module &M)
{
infoflow = &getAnalysis<Infoflow>();
dbg("Initialization");
int sqlType = getSQLType(M);
if (sqlType == 0) {
/* MySQL */
dbg("Found db: MySQL");
hashSQLKeywords(true);
} else if (sqlType == 1) {
/* PGSQL */
dbg("Found db: PostgreSQL");
hashSQLKeywords(false);
} else {
/* abort */
return -1;
}
unique_id = 0;
for (Module::global_iterator ii = M.global_begin();
ii != M.global_end();
++ii){
GlobalVariable *gv = ii;
if (gv->isConstant()) {
std::string name = gv->getName().str();
for (GlobalVariable::use_iterator U = gv->use_begin();
U != gv->use_end();
U++ ) {
User *user = dyn_cast<User>(*U);
Value *val = user->getOperand(0);
if (isa<ConstantExpr>(user) && (val != NULL)) {
InfoflowSolution *fsoln =
getForwardSolFromGlobal(name, val);
if (backwardsFromGlobal(M, fsoln, val) &&
gv->hasInitializer()) {
dbg("FOUND mysql from global Variable");
ConstantDataSequential *cds =
dyn_cast<ConstantDataSequential>(gv->getInitializer());
std::string sanitized =
sanitizeString(cds->getAsString().str());
Constant *san =
ConstantDataArray::getString(M.getContext(),
sanitized, false);
if (san->getType() == gv->getInitializer()->getType()) {
gv->setInitializer(san);
}
}
}
}
}
}
return 0;
}
void
SQLRandPass::sanitizeLiteralsBackwards(Module &M, InfoflowSolution *sol)
{
for (Module::iterator mi = M.begin(); mi != M.end(); mi++) {
Function& F = *mi;
for (Function::iterator bi = F.begin(); bi != F.end(); bi++) {
BasicBlock& B = *bi;
for (BasicBlock::iterator ii = B.begin(); ii !=B.end(); ii++) {
if (CallInst* ci = dyn_cast<CallInst>(ii)) {
Function* f = ci->getCalledFunction();
if (!f)
continue;
for (size_t i = 0; i < ci->getNumArgOperands(); i++) {
if (isLiteral(ci->getArgOperand(i)) &&
checkBackwardTainted(*(ci->getArgOperand(i)),sol)) {
Value *s = sanitizeArgOp(M,
ci->getArgOperand(i));
ci->setArgOperand(i, s);
}
}
}
}
}
}
}
void
SQLRandPass::doFinalization(Module &M)
{
dbg("Removing checks");
for (Module::iterator mi = M.begin(); mi != M.end(); mi++) {
Function& F = *mi;
for (Function::iterator bi = F.begin(); bi != F.end(); bi++) {
BasicBlock& B = *bi;
for (BasicBlock::iterator ii = B.begin(); ii !=B.end(); ii++) {
if (CallInst* ci = dyn_cast<CallInst>(ii)) {
Function* f = ci->getCalledFunction();
if (!f)
continue;
const CallTaintEntry *entry =
findEntryForFunction(sanitizeSummaries, f->getName());
if (entry->Name) {
/* Update the arg if it is a ConstExpr */
if (isLiteral(ci->getArgOperand(1))) {
Value *s = sanitizeArgOp(M,
ci->getArgOperand(1));
ci->setArgOperand(1, s);
} else {
std::string sinkKind = getKindId("sql", &unique_id);
InfoflowSolution *soln = getBackwardsSol(sinkKind,
ci);
sanitizeLiteralsBackwards(M, soln);
}
/* Construct Function */
insertSQLCheckFunction(M,
"__sqlrand_" + f->getName().str(),
ci,
ii);
ii = B.begin();
}
}
}
}
}
}
Value *
SQLRandPass::sanitizeArgOp(Module &M, Value *op)
{
ConstantExpr *constExpr = dyn_cast<ConstantExpr>(op);
GlobalVariable *gv = dyn_cast<GlobalVariable>(constExpr->getOperand(0));
if (gv == NULL || !gv->hasInitializer())
return op;
std::string var_name = gv->getName().str();
ConstantDataSequential *cds =
dyn_cast<ConstantDataSequential>(gv->getInitializer());
if (cds != NULL && cds->isString()) {
std::string sanitized = sanitizeString(cds->getAsString().str());
Constant *san =
ConstantDataArray::getString(M.getContext(), sanitized, false);
if (san->getType() == gv->getInitializer()->getType()) {
dbgMsg(cds->getAsString().str() + " becomes :", sanitized);
gv->setInitializer(san);
} else {
san->getType()->dump();
errs() << "\t";
gv->getInitializer()->dump();
errs() << "\n";
}
}
constExpr->setOperand(0, gv);
return dyn_cast<Value>(constExpr);
}
bool
SQLRandPass::runOnModule(Module &M)
{
int ret = doInitialization(M);
/* If we did not find SQL abort */
if (ret == -1)
return false;
for (Module::iterator mi = M.begin(); mi != M.end(); mi++) {
Function& F = *mi;
for (Function::iterator bi = F.begin(); bi != F.end(); bi++) {
BasicBlock& B = *bi;
for (BasicBlock::iterator ii = B.begin(); ii !=B.end(); ii++) {
if (CallInst* ci = dyn_cast<CallInst>(ii)) {
Function* f = ci->getCalledFunction();
if (!f)
continue;
/* Check if function needs to be sanitized */
const CallTaintEntry *entry =
findEntryForFunction(bLstSourceSummaries, f->getName());
if (entry->Name) {
std::string srcKind = getKindId("src", &unique_id);
InfoflowSolution *fsoln =
getForwardSolFromEntry(srcKind, ci, entry);
if (backwardSlicingBlacklisting(M, fsoln, ci)) {
if (f->getName() != "getenv") {
/* If we found mysql sanitize */
for (size_t i = 0;
i < ci->getNumArgOperands();
i++) {
if (isLiteral(ci->getArgOperand(i))) {
Value *s = sanitizeArgOp(M,
ci->getArgOperand(i));
ci->setArgOperand(i, s);
}
}
} else {
dbg("getenv called");
if (isLiteral(ci)) {
dbg("Literal");
//Value *s = sanitizeArgOp(M, ci);
//ci = s;
}
}
}
}
}
}
}
}
doFinalization(M);
return false;
}
bool
SQLRandPass::backwardSlicingBlacklisting(Module &M,
InfoflowSolution* fsoln,
CallInst* srcCI)
{
for (Module::iterator mi = M.begin(); mi != M.end(); mi++) {
Function& F = *mi;
for (Function::iterator bi = F.begin(); bi != F.end(); bi++) {
BasicBlock& B = *bi;
for (BasicBlock::iterator ii = B.begin(); ii !=B.end(); ii++) {
if (CallInst* ci = dyn_cast<CallInst>(ii)) {
Function *f = ci->getCalledFunction();
if (!f)
continue;
const CallTaintEntry *entry =
findEntryForFunction(sanitizeSummaries, f->getName());
if (entry->Name) {
if (checkForwardTainted(*(ci->getOperand(1)), fsoln)) {
//this returns all sources that are tainted
std::string sinkKind = getKindId("sql", &unique_id);
InfoflowSolution *soln = getBackwardsSol(sinkKind,
ci);
//check if source is in our list
if (checkBackwardTainted(*srcCI, soln))
return true;
}
}
}
}
}
}
return false;
}
bool
SQLRandPass::backwardsFromGlobal(Module &M,
InfoflowSolution* fsoln,
Value* val)
{
for (Module::iterator mi = M.begin(); mi != M.end(); mi++) {
Function& F = *mi;
for (Function::iterator bi = F.begin(); bi != F.end(); bi++) {
BasicBlock& B = *bi;
for (BasicBlock::iterator ii = B.begin(); ii !=B.end(); ii++) {
if (CallInst* ci = dyn_cast<CallInst>(ii)) {
Function *f = ci->getCalledFunction();
if (!f)
continue;
const CallTaintEntry *entry =
findEntryForFunction(sanitizeSummaries, f->getName());
if (entry->Name) {
if (checkForwardTainted(*(ci->getOperand(1)), fsoln)) {
dbg("Found call from global (!)");
return true;
}
}
}
}
}
}
return false;
}
/* ****************************************************************************
* ============================================================================
* HELPER FUNCTIONS
* ============================================================================
* ****************************************************************************/
void
SQLRandPass::insertSQLCheckFunction(Module &M,
std::string name,
CallInst *ci,
BasicBlock::iterator ii)
{
Constant *fc = NULL;
/* Create Args */
std::vector<Value *> fargs;
if (name == "__sqlrand_mysql_real_query") {
fc = M.getOrInsertFunction(name,
/* type */ ci->getType(),
/* arg0 */ ci->getArgOperand(0)->getType(),
/* arg1 */ ci->getArgOperand(1)->getType(),
/* arg2 */ ci->getArgOperand(2)->getType(),
/* Linkage */ GlobalValue::ExternalLinkage,
(Type *)0);
/* Push argument to Args */
fargs.push_back(ci->getArgOperand(0));
fargs.push_back(ci->getArgOperand(1));
fargs.push_back(ci->getArgOperand(2));
} else if ((name == "__sqlrand_mysql_query") ||
(name == "__sqlrand_PQexec")) {
fc = M.getOrInsertFunction(name,
/* type */ ci->getType(),
/* arg0 */ ci->getArgOperand(0)->getType(),
/* arg1 */ ci->getArgOperand(1)->getType(),
/* Linkage */ GlobalValue::ExternalLinkage,
(Type *)0);
/* Push argument to Args */
fargs.push_back(ci->getArgOperand(0));
fargs.push_back(ci->getArgOperand(1));
}
ArrayRef<Value *> functionArguments(fargs);
CallInst *sqlCheck = CallInst::Create(fc, functionArguments, "");
sqlCheck->setCallingConv(ci->getCallingConv());
sqlCheck->setTailCall(ci->isTailCall());
sqlCheck->setAttributes(ci->getAttributes());
ReplaceInstWithInst(ci, sqlCheck);
}
int
SQLRandPass::getSQLType(Module &M)
{
for (Module::iterator mi = M.begin(); mi != M.end(); mi++) {
Function& F = *mi;
for (Function::iterator bi = F.begin(); bi != F.end(); bi++) {
BasicBlock& B = *bi;
for (BasicBlock::iterator ii = B.begin(); ii !=B.end(); ii++) {
if (CallInst* ci = dyn_cast<CallInst>(ii)) {
Function* f = ci->getCalledFunction();
if (!f)
continue;
if (StringRef(f->getName()).startswith("mysql_"))
return 0;
if (f->getName() == "PQexec")
return 1;
}
}
}
}
return -1;
}
/*
* Checks if @word is one of MySQL reserved keywords
*/
bool
SQLRandPass::isKeyword(std::string word)
{
/* convert to uppercase first so as not to miss smth */
std::string up_word = boost::to_upper_copy(word);
return MYSQL_KEYWORDS.find(word) != MYSQL_KEYWORDS.end();
}
/*
* For now just add a padding
*/
std::string
SQLRandPass::pad(std::string word, std::string suffix)
{
return word + "_" + suffix;
}
// trim from start
std::string &
SQLRandPass::ltrim(std::string &s) {
s.erase(s.begin(),
std::find_if(s.begin(),
s.end(),
std::not1(std::ptr_fun<int, int>(std::isspace))
)
);
return s;
}
// trim from end
std::string &
SQLRandPass::rtrim(std::string &s) {
s.erase(std::find_if(s.rbegin(),
s.rend(),
std::not1(std::ptr_fun<int, int>(std::isspace))
).base(),
s.end());
return s;
}
/*
* Sanitize all possible keywords in the string. Leave the rest intact
*/
std::string
SQLRandPass::sanitizeString(std::string input)
{
std::string word, sanitized, sanitizedWord;
sanitized = "";
size_t i = 0;
while (i < input.length()) {
if (isalnum(input[i])) {
word = "";
while (isalnum(input[i]) || input[i] == '_') {
word += input[i];
i++;
}
std::string up_word = boost::to_upper_copy(word);
if (isKeyword(up_word)) {
/* convert to uppercase first so as not to miss smth */
sanitizedWord = keyToHash[up_word];
sanitized += sanitizedWord;
} else {
sanitized += word;
}
}
sanitized += input[i++];
}
return sanitized;
}
//FIXME
bool
SQLRandPass::isLiteral(Value *operand)
{
return (isa<ConstantExpr>(operand));
}
//FIXME
bool
SQLRandPass::isVariable(Value *operand)
{
return (isa<Instruction>(operand));
}
void
SQLRandPass::hashSQLKeywords(bool isMySQL)
{
std::string hash, key;
std::ofstream outfile;
std::ifstream infile;
if (isMySQL)
infile.open(MYSQL_MAPPING_FILE, std::ios::binary | std::ios::in);
else
infile.open(PGSQL_MAPPING_FILE, std::ios::binary | std::ios::in);
if (infile.is_open()) {
std::string line;
while (std::getline(infile, line)) {
std::istringstream iss(line);
iss >> hash;
iss >> key;
hashToKey[hash] = key;
keyToHash[key] = hash;
}
infile.close();
return;
}
/* If file not here, create it */
if (isMySQL)
outfile.open(MYSQL_MAPPING_FILE, std::ios::binary);
else
outfile.open(PGSQL_MAPPING_FILE, std::ios::binary);
if (outfile.is_open()) {
for (std::set<std::string>::iterator it=MYSQL_KEYWORDS.begin();
it!=MYSQL_KEYWORDS.end();
++it) {
do {
/* get a new hash until all hashes are unique */
hash = hashString(*it);
} while (hashToKey.count(hash) != 0);
hashToKey[hash] = *it;
keyToHash[*it] = hash;
/* write to file */
outfile << hash << " " << *it << "\n";
}
outfile.close();
} else {
dbg("Could not open mapping file");
exit(-1);
}
}
/*
* Get hashed string of @input with the same length
*/
std::string
SQLRandPass::hashString(std::string input)
{
char s[MAX_CHAR];
unsigned int len = input.size();
/* FIXME This is not random!! */
static const char alphanum[] =
"0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"abcdefghijklmnopqrstuvwxyz";
for (unsigned int i = 0; i < len; ++i) {
s[i] = alphanum[rand() % (sizeof(alphanum) - 1)];
}
s[len] = 0;
return std::string(s).substr(0, len);
}
/*
* Randomizes suffix as string from /dev/urandom
*/
void
SQLRandPass::randomizeSuffix()
{
char suffix[SUFFIX_LEN];
FILE *fp;
fp = fopen("/dev/urandom", "r");
if (fp == NULL)
exit(-1);
if (fread(&suffix, sizeof(char), SUFFIX_LEN, fp) != SUFFIX_LEN)
if (ferror(fp))
exit(-1);
fclose(fp);
strncpy(SUFFIX, suffix, SUFFIX_LEN);
}
std::string
SQLRandPass::getKindId(std::string name, uint64_t *unique_id)
{
std::stringstream SS;
SS << (*unique_id)++;
return name + SS.str();
}
void
SQLRandPass::dbgMsg(std::string a, std::string b)
{
llvm::errs() << "\n[SQLRand] DBG:" << a << b << "\n";
}
void
SQLRandPass::dbg(std::string s)
{
llvm::errs() << "\n[SQLRand] DBG:" << s << "\n";
}
} /* ------------------ namespace end ------------------ */
/* ****************************************************************************
* ============================================================================
* REGISTER PASS TO LLVM
* ============================================================================
* ****************************************************************************/
namespace {
/* ID for SQLRandPass */
char SQLRandPass::ID = 1;
static RegisterPass<SQLRandPass>
XX ("SQLRand", "Implements SQLRand Pass", true, true);
static void
initializeSQLRandPasses(PassRegistry &Registry) {
llvm::initializeAllocIdentifyPass(Registry);
llvm::initializePDTCachePass(Registry);
}
static void
registerSQLRandPasses(const PassManagerBuilder &, PassManagerBase &PM)
{
PM.add(llvm::createPromoteMemoryToRegisterPass());
PM.add(llvm::createPDTCachePass());
PM.add(new SQLRandPass());
}
class StaticInitializer {
public:
StaticInitializer() {
char* passend = getenv("__PASSEND__");
if (passend) {
errs() << "== EP_LoopOptimizerEnd ==\n";
RegisterStandardPasses
RegisterSQLRandPass(PassManagerBuilder::EP_LoopOptimizerEnd,
registerSQLRandPasses);
} else {
errs() << "== EP_ModuleOptimizerEarly ==\n";
RegisterStandardPasses
RegisterSQLRandPass(PassManagerBuilder::EP_ModuleOptimizerEarly,
registerSQLRandPasses);
}
PassRegistry &Registry = *PassRegistry::getPassRegistry();
initializeSQLRandPasses(Registry);
}
};
static StaticInitializer InitializeEverything;
} /* ------------------ namespace end ------------------ */
|
Saprang stands 161 centimeters tall and as of March 2007 , weighed 52 kilograms .
|
Require Import Coq.Numbers.Natural.Peano.NPeano.
Require Import Coq.Arith.Plus.
Require Import Coq.Arith.Mult.
Require Import Coq.Arith.Wf_nat.
Lemma plus_red_l : forall a b p, a = b -> p + a = p + b.
auto.
Qed.
Lemma plus_red_r : forall a b p, a = b -> a + p = b + p.
auto.
Qed.
Lemma mult_red_l : forall a b p, a = b -> p * a = p * b.
auto.
Qed.
Lemma mult_red_r : forall a b p, a = b -> a * p = b * p.
auto.
Qed.
Proposition div_n0 : forall n, n / 0 = 0.
Proof.
auto.
Qed.
Lemma div_0 : forall n, 0 / n = 0.
Proof.
destruct n; auto.
Qed.
Proposition div_S : forall n m, m <> 0 -> (n + m) / m = S (n / m).
Proof.
intros n m.
case m.
now tauto.
intros m' NeqH.
unfold div.
assert (lem : forall x y z, divmod (x + (S z)) y 0 z = divmod (S x) y 0 0).
{
intros x y.
induction z.
+ rewrite plus_comm. now auto.
+
transitivity (divmod (S (x + S z)) y 0 (S z)).
assert(EqS : (x + S (S z)) = S (x + S z)).
{
transitivity (S (S z) + x).
now apply plus_comm.
transitivity (S (S z + x)).
now auto.
rewrite plus_comm.
now auto.
}
rewrite EqS.
now reflexivity.
transitivity (divmod (x + S z) y 0 z).
now auto.
rewrite IHz.
now auto.
}
rewrite lem.
simpl.
assert(lem2 : forall a b c d, fst (divmod a b (S c) d) = S (fst (divmod a b c d))).
{
induction a.
now auto.
intros b c.
destruct d.
+
simpl.
apply IHa.
+
simpl.
apply IHa.
}
apply lem2.
Qed.
Proposition div_le : forall n m, n < m -> n / m = 0.
Proof.
assert(forall a b c, a < c -> fst (divmod a b 0 c) = 0).
{
induction a.
now auto.
intros b c leac.
destruct c.
+
apply lt_n_0 in leac.
destruct leac.
+
simpl.
apply IHa.
apply lt_S_n.
auto.
}
intros n m lenm.
unfold div.
destruct m.
now auto.
unfold lt in lenm.
apply le_S_n in lenm.
apply le_lt_or_eq in lenm.
destruct lenm as [lenm | eqnm].
+
apply H.
apply lenm.
+
rewrite eqnm.
cut (forall a b, fst (divmod a b 0 a) = 0).
now auto.
induction a.
now auto.
simpl.
apply IHa.
Qed.
Proposition mod_plus : forall a b, b <> 0 -> modulo (a + b) b = modulo a b.
Proof.
intros a b neb0.
generalize (div_mod (a + b) b neb0); intro divapb.
rewrite (div_S _ _ neb0) in divapb.
rewrite mult_comm in divapb.
simpl in divapb.
rewrite <- plus_assoc in divapb.
rewrite (plus_comm a b) in divapb.
apply plus_reg_l in divapb.
rewrite (div_mod a _ neb0) in divapb at 1.
rewrite mult_comm in divapb.
apply plus_reg_l in divapb.
symmetry.
rewrite (plus_comm a b).
auto.
Qed.
Lemma mult_div_mod : forall a b r, b <> 0 -> r < b -> (a * b + r) / b = a.
Proof.
induction a.
+
simpl.
intros b r neb0 lerb.
apply div_le.
apply lerb.
+
intros b r neqb lerb.
simpl.
rewrite plus_assoc_reverse.
rewrite plus_comm.
rewrite (div_S _ _ neqb).
rewrite (IHa _ _ neqb lerb).
auto.
Qed.
Lemma mult_div : forall a b, b <> 0 -> a * b / b = a.
Proof.
intros a b neb0.
transitivity ((a*b+0) / b).
{
rewrite plus_comm.
simpl.
auto.
}
apply (mult_div_mod _ _ _ neb0).
unfold lt.
destruct b.
elimtype False.
apply neb0.
reflexivity.
apply le_n_S.
apply le_0_n.
Qed.
Lemma mult_div' : forall a b, b <> 0 -> b * a / b = a.
Proof.
intros a b neb.
rewrite mult_comm.
apply mult_div.
auto.
Qed.
Lemma mult_rem_div : forall a b r q, b <> 0 -> (a = q*b + r /\ r < b) -> q = a / b.
Proof.
intros a b r.
induction q.
+
intros neqb0 [Eqa lerb].
simpl in Eqa.
rewrite Eqa.
rewrite (div_le _ _ lerb).
auto.
+
intros neqb0 [Eqa lerb].
simpl in Eqa.
rewrite Eqa.
rewrite plus_assoc_reverse.
rewrite plus_comm.
rewrite (div_S _ _ neqb0).
rewrite (mult_div_mod _ _ _ neqb0 lerb).
auto.
Qed.
Lemma mult_rem_mod : forall a b r q, b <> 0 -> (a = q*b + r /\ r < b) -> modulo a b = r.
Proof.
intros a b r q neb0 [Eqa lerb].
assert(Eqq : q = a / b).
{
apply (mult_rem_div a b r q neb0).
split; assumption.
}
rewrite Eqq in Eqa.
generalize (div_mod a _ neb0); intro Eqa'.
rewrite mult_comm in Eqa'.
rewrite Eqa' in Eqa at 1.
apply plus_reg_l in Eqa.
apply Eqa.
Qed.
Proposition mod_le : forall a b, a < b -> modulo a b = a.
Proof.
intros a b leab.
destruct b.
now auto.
assert(neb : S b <> 0).
intro eqb0.
inversion eqb0.
symmetry.
rewrite (div_mod a _ neb) at 1.
rewrite (div_le _ _ leab).
cut (S b * 0 = 0).
intros EqH.
rewrite EqH.
now auto.
auto.
Qed.
Lemma mod_bound : forall a b, b <> 0 -> modulo a b < b.
Proof.
intros a b neb0.
apply mod_bound_pos.
apply le_0_n.
apply neq_0_lt.
intro eq0b.
apply neb0.
rewrite eq0b.
auto.
Qed.
Lemma mod_exists : forall a b c, b <> 0 -> modulo a b = c -> exists n, a = n * b + c.
Proof.
intros a b c beq Eqc.
generalize (div_mod a _ beq); intro eqa.
rewrite Eqc in eqa.
exists (a / b).
rewrite mult_comm.
apply eqa.
Qed.
Section pow3.
Definition pow2 m := m * m.
Definition mod3 n := modulo n 3.
Lemma ne03 : 3 <> 0.
Proof.
auto.
Qed.
Lemma mod3_le : forall n, mod3 n <= 2.
Proof.
intro n.
unfold mod3.
apply lt_n_Sm_le.
apply mod_bound.
apply ne03.
Qed.
Lemma mod3_dest : forall n, mod3 n = 0 \/ mod3 n = 1 \/ mod3 n = 2.
Proof.
intro n.
set (mod3 n) as m.
assert(lem3 : m <= 2).
apply mod3_le.
destruct m.
now auto.
destruct m.
now auto.
destruct m.
now auto.
repeat apply le_S_n in lem3.
apply le_Sn_0 in lem3.
destruct lem3.
Qed.
Lemma mod3_0 : forall n, mod3 n = 0 -> exists p, n = 3*p.
Proof.
intros n modH.
generalize (mod_exists _ _ _ ne03 modH); intro HH.
destruct HH as [p Eqn].
exists p.
rewrite Eqn.
transitivity (p*3).
now rewrite plus_comm; auto.
apply mult_comm.
Qed.
Lemma mod3_1 : forall n, mod3 n = 1 -> exists p, n = 3*p + 1.
Proof.
intros n modH.
generalize (mod_exists _ _ _ ne03 modH); intro HH.
destruct HH as [p Eqn].
exists p.
rewrite Eqn.
rewrite mult_comm.
auto.
Qed.
Lemma mod3_2 : forall n, mod3 n = 2 -> exists p, n = 3*p + 2.
Proof.
intros n modH.
generalize (mod_exists _ _ _ ne03 modH); intro HH.
destruct HH as [p Eqn].
exists p.
rewrite Eqn.
rewrite mult_comm.
auto.
Qed.
Lemma To_mod3_0 : forall n, mod3 (3 * n) = 0.
Proof.
intro n.
apply (mult_rem_mod _ _ _ n).
apply ne03.
split.
transitivity (n*3).
now apply mult_comm.
rewrite plus_comm.
auto.
unfold lt.
apply le_n_S.
apply le_0_n.
Qed.
Lemma To_mod3_1 : forall n, mod3 (3 * n + 1) = 1.
Proof.
intro n.
apply (mult_rem_mod _ _ _ n).
apply ne03.
split.
rewrite mult_comm.
auto.
unfold lt.
apply le_n_S.
apply le_n_S.
apply le_0_n.
Qed.
Lemma To_mod3_2 : forall n, mod3 (3 * n + 2) = 2.
Proof.
intro n.
apply (mult_rem_mod _ _ _ n).
apply ne03.
split.
rewrite mult_comm.
auto.
unfold lt.
apply le_n_S.
apply le_n_S.
apply le_n_S.
apply le_0_n.
Qed.
Lemma plus_reg_r : forall n m p : nat, n + p = m + p -> n = m.
Proof.
intros n m p eq.
rewrite (plus_comm n p) in eq.
rewrite (plus_comm m p) in eq.
apply plus_reg_l in eq.
auto.
Qed.
Lemma mult_plus_distr : forall a b c d, (a + b) * (c + d) = a * c + b * c + a * d + b * d.
Proof.
intros a b c d.
transitivity ((a+b) * c + (a+b) * d).
now apply mult_plus_distr_l.
transitivity ((a*c + b*c) + ((a+b) * d)).
{
apply plus_red_r.
apply mult_plus_distr_r.
}
transitivity ((a*c + b*c) + (a*d + b*d)).
{
apply plus_red_l.
apply mult_plus_distr_r.
}
rewrite plus_assoc.
auto.
Qed.
Lemma pow2_distr : forall a b, pow2 (a + b) = pow2 a + 2 * (a*b) + pow2 b.
Proof.
intros a b.
unfold pow2 at 1.
rewrite mult_plus_distr.
transitivity (pow2 a + b * a + a * b + pow2 b).
{
unfold pow2.
auto.
}
apply plus_red_r.
rewrite plus_assoc_reverse.
apply plus_red_l.
transitivity (a * b + a * b).
{
rewrite (mult_comm b a).
auto.
}
simpl.
apply plus_red_l.
rewrite plus_comm.
auto.
Qed.
Lemma pow23p0 : forall p, pow2 (3 * p) = 3 * (3 * pow2 p).
Proof.
intro p.
unfold pow2.
transitivity (3 * (p * (3 * p))).
now apply mult_assoc_reverse.
apply mult_red_l.
transitivity (p * 3 * p).
now apply mult_assoc.
transitivity (3 * p * p).
{
apply mult_red_r.
apply mult_comm.
}
apply mult_assoc_reverse.
Qed.
Lemma pow23p1 : forall p, pow2 (3*p+1) = 3 * (3 * pow2 p + 2 * p) + 1.
Proof.
intro p.
rewrite pow2_distr.
transitivity (pow2 (3 * p) + (2 * (3* p * 1)) + 1).
{
cut (pow2 1 = 1).
intro relH.
rewrite relH.
auto.
reflexivity.
}
apply plus_red_r.
transitivity (pow2 (3 * p) + 3 * (2 * p)).
{
apply plus_red_l.
rewrite mult_1_r.
transitivity (2 * 3 * p).
now apply mult_assoc.
transitivity (3 * 2 * p).
{
apply mult_red_r.
apply mult_comm.
}
apply mult_assoc_reverse.
}
rewrite pow23p0.
symmetry.
apply mult_plus_distr_l.
Qed.
Lemma pow23p2 : forall p, pow2 (3*p+2) = 3 * (3 * pow2 p + 4 * p + 1) + 1.
Proof.
intro p.
rewrite pow2_distr.
transitivity (pow2 (3 * p) + 2 * (3 * p * 2) + 3 * 1 + 1).
{
transitivity (pow2 (3 * p) + (2 * (3 * p * 2) + pow2 2)).
now apply plus_assoc_reverse.
transitivity (pow2 (3 * p) + (2 * (3 * p * 2) + (3 * 1 + 1))).
{
apply plus_red_l.
apply plus_red_l.
reflexivity.
}
rewrite (plus_assoc _ _ 1).
rewrite plus_assoc.
apply plus_red_r.
apply plus_assoc.
}
apply plus_red_r.
transitivity (pow2 (3 * p) + 3 * (4 * p) + 3 * 1).
{
apply plus_red_r.
apply plus_red_l.
transitivity (2 * (3 * 2 * p)).
{
apply mult_red_l.
transitivity (3 * (p * 2)).
now apply mult_assoc_reverse.
rewrite (mult_comm p 2).
apply mult_assoc.
}
transitivity (2 * (6 * p)).
{
apply mult_red_l.
apply mult_red_r.
reflexivity.
}
transitivity (2 * 6 * p).
now apply mult_assoc.
transitivity (12 * p).
{
apply mult_red_r.
reflexivity.
}
transitivity (3 * 4 * p).
{
apply mult_red_r.
reflexivity.
}
apply mult_assoc_reverse.
}
rewrite pow23p0.
rewrite <- (mult_plus_distr_l 3).
rewrite <- (mult_plus_distr_l 3).
reflexivity.
Qed.
Theorem Problem1 : forall n, mod3 (pow2 n) = 0 \/ mod3 (pow2 n) = 1.
Proof.
intro n.
destruct (mod3_dest n) as [case1 | [case2 | case3]].
+
left.
apply mod3_0 in case1.
destruct case1 as [p eqn].
rewrite eqn.
rewrite pow23p0.
apply To_mod3_0.
+
right.
apply mod3_1 in case2.
destruct case2 as [p eqn].
rewrite eqn.
rewrite pow23p1.
apply To_mod3_1.
+
right.
apply mod3_2 in case3.
destruct case3 as [p eqn].
rewrite eqn.
rewrite pow23p2.
apply To_mod3_1.
Qed.
Lemma mod3pow20_mod30 : forall a, mod3 (pow2 a) = 0 -> mod3 a = 0.
Proof.
intros a modH.
destruct (mod3_dest a) as [case1 | [case2 | case3]].
+ auto.
+
apply mod3_1 in case2.
destruct case2 as [p eqa].
rewrite eqa in modH.
rewrite pow23p1 in modH.
rewrite To_mod3_1 in modH.
discriminate.
+
apply mod3_2 in case3.
destruct case3 as [p eqa].
rewrite eqa in modH.
rewrite pow23p2 in modH.
rewrite To_mod3_1 in modH.
discriminate.
Qed.
Theorem Problem2 : forall a b c, pow2 a + pow2 b = 3 * pow2 c -> mod3 a = 0 /\ mod3 b = 0 /\ mod3 c = 0.
Proof.
intros a b c eq.
assert(Lemma1 : mod3 a = 0 /\ mod3 b = 0).
{
assert(pow2_lem : mod3 (pow2 a) = 0 /\ mod3 (pow2 b) = 0).
{
destruct (Problem1 a) as [eq00 | eq01] ; destruct (Problem1 b) as [eq10 | eq11].
+
split; auto.
+
apply mod3_0 in eq00.
destruct eq00 as [a' eqa].
apply mod3_1 in eq11.
destruct eq11 as [b' eqb].
rewrite eqa in eq.
rewrite eqb in eq.
rewrite plus_assoc in eq.
rewrite <- (mult_plus_distr_l 3) in eq.
assert(P1 : mod3 (3 * pow2 c) = 1).
{
rewrite <- eq.
apply To_mod3_1.
}
rewrite To_mod3_0 in P1.
discriminate P1.
+
apply mod3_1 in eq01.
destruct eq01 as [a' eqa].
apply mod3_0 in eq10.
destruct eq10 as [b' eqb].
rewrite eqa in eq.
rewrite eqb in eq.
rewrite plus_assoc_reverse in eq.
rewrite (plus_comm 1 (3 * b')) in eq.
rewrite (plus_assoc) in eq.
rewrite <- (mult_plus_distr_l 3) in eq.
assert(P1 : mod3 (3 * pow2 c) = 1).
{
rewrite <- eq.
apply To_mod3_1.
}
rewrite To_mod3_0 in P1.
discriminate P1.
+
apply mod3_1 in eq01.
destruct eq01 as [a' eqa].
apply mod3_1 in eq11.
destruct eq11 as [b' eqb].
rewrite eqa in eq.
rewrite eqb in eq.
assert(P1 : mod3 (3 * pow2 c) = 2).
{
rewrite <- eq.
cut (3 * a' + 1 + (3 * b' + 1) = 3 * (a' + b') + 2).
{
intro relH.
rewrite relH.
apply To_mod3_2.
}
rewrite plus_assoc.
transitivity (3 * a' + 3 * b' + 1 + 1).
{
apply plus_red_r.
transitivity (3 * a' + (1 + 3 * b')).
now apply plus_assoc_reverse.
rewrite (plus_comm 1 (3*b')).
apply plus_assoc.
}
rewrite <- (mult_plus_distr_l 3).
rewrite plus_assoc_reverse.
apply plus_red_l.
reflexivity.
}
rewrite To_mod3_0 in P1.
discriminate.
}
destruct pow2_lem as [mpa0 mpb0].
split; apply mod3pow20_mod30; assumption.
}
destruct Lemma1 as [mda0 mdb0].
assert(mdc0 : mod3 c = 0).
{
apply mod3_0 in mda0.
destruct mda0 as [a' eqa].
apply mod3_0 in mdb0.
destruct mdb0 as [b' eqb].
apply mod3pow20_mod30.
cut (pow2 c = 3 * (pow2 a' + pow2 b')).
{
intro relH.
rewrite relH.
apply To_mod3_0.
}
cut (3 * pow2 c = 3 * (3 * (pow2 a' + pow2 b'))).
{
intro relH.
transitivity (3 * pow2 c / 3).
{
symmetry.
apply mult_div'.
intro reH.
discriminate.
}
rewrite relH.
apply mult_div'.
intro neq.
discriminate.
}
rewrite <- eq.
rewrite eqa.
rewrite eqb.
rewrite pow23p0.
rewrite pow23p0.
rewrite <- mult_plus_distr_l.
apply mult_red_l.
symmetry.
apply mult_plus_distr_l.
}
split.
assumption.
split.
assumption.
assumption.
Qed.
(*
Lemma mod3_0_dest : forall n, mod3 n = 0 -> (n = 0 \/ (exists p r, n = 3 ^ p * r /\ mod3 r <> 0)).
Proof.
*)
Lemma mod3_0_dest : forall n, n = 0 \/ (exists p r, 3 * n = 3 ^ p * r /\ (mod3 r = 1 \/ mod3 r = 2)).
Proof.
apply (well_founded_induction lt_wf (fun n => n = 0 \/ (exists p r, 3 * n = 3^p*r /\ (mod3 r = 1 \/ mod3 r = 2)))).
intros n IndH.
destruct n.
now auto.
destruct (IndH _ (lt_n_Sn n)) as [Eqy0 | [p [r [Eqn [Eqr1 | Eqr2]]]]].
{
rewrite Eqy0.
right.
exists 1.
exists 1.
split.
now reflexivity.
left.
now reflexivity.
} {
right.
destruct p.
-
rewrite pow_0_r in Eqn.
rewrite mult_1_l in Eqn.
rewrite <- Eqn in Eqr1.
rewrite To_mod3_0 in Eqr1.
discriminate.
-
exists 1.
exists (3^p*r+1).
split.
+
rewrite mult_succ_r.
rewrite Eqn.
transitivity(3*(3^p*r)+3).
{
apply plus_red_r.
rewrite mult_assoc.
apply mult_red_r.
reflexivity.
}
transitivity(3*(3^p*r) + 3*1).
{
apply plus_red_l.
reflexivity.
}
rewrite <- mult_plus_distr_l.
apply mult_red_r.
reflexivity.
+
destruct p.
*
right.
rewrite pow_0_r.
rewrite mult_1_l.
apply mod3_1 in Eqr1.
destruct Eqr1 as [q Eqr].
rewrite Eqr.
rewrite plus_assoc_reverse.
assert(H112 : 1+1=2).
now reflexivity.
rewrite H112.
apply To_mod3_2.
*
cut(3^(S p) * r + 1 = 3*(3^p*r)+1).
{
intro relH.
rewrite relH.
left.
apply To_mod3_1.
}
apply plus_red_r.
rewrite mult_assoc.
apply mult_red_r.
reflexivity.
} {
right.
destruct p.
-
rewrite pow_0_r in Eqn.
rewrite mult_1_l in Eqn.
rewrite <- Eqn in Eqr2.
rewrite To_mod3_0 in Eqr2.
discriminate.
-
destruct p.
{
assert(Eqnr : n = r).
{
cut(3*n = 3*r).
{
intro Eq3n.
transitivity (3*n / 3).
{
rewrite (mult_div' _ _ ne03).
reflexivity.
}
rewrite Eq3n.
apply (mult_div' _ _ ne03).
}
rewrite Eqn.
apply mult_red_r.
reflexivity.
}
rewrite <- Eqnr in Eqr2.
clear Eqn.
clear Eqnr.
clear r.
assert(mod3nS : mod3 (S n) = 0).
{
apply mod3_2 in Eqr2.
destruct Eqr2 as [p Eqr2].
rewrite Eqr2.
cut(S(3*p+2) = 3*(p+1)).
{
intro relH.
rewrite relH.
apply To_mod3_0.
}
transitivity(3*p+2+1).
{
rewrite (plus_comm _ 1).
reflexivity.
}
rewrite mult_plus_distr_l.
rewrite plus_assoc_reverse.
reflexivity.
}
apply mod3_0 in mod3nS.
destruct mod3nS as [p EqSn].
assert(lepSn : p < S n).
{
rewrite EqSn.
rewrite <- mult_1_l at 1.
apply mult_lt_compat_r.
unfold lt.
apply le_n_S.
apply le_n_S.
now apply le_0_n.
cut (p <> 0).
{
intro nep0.
destruct p.
now discriminate.
unfold lt.
apply le_n_S.
apply le_0_n.
}
intro Eqp0.
rewrite Eqp0 in EqSn.
simpl in EqSn.
discriminate EqSn.
}
rewrite EqSn.
destruct (IndH _ lepSn) as [Eqp0 | [t [u [Eq3p [Equ1 | Equ2]]]]].
{
rewrite Eqp0 in EqSn.
simpl in EqSn.
discriminate EqSn.
} {
rewrite Eq3p.
exists (S t).
exists u.
split.
-
rewrite mult_assoc.
reflexivity.
-
auto.
} {
rewrite Eq3p.
exists (S t).
exists u.
split.
-
rewrite mult_assoc.
reflexivity.
-
auto.
}
}
{
exists 1.
exists (3^(S p) * r + 1).
split.
-
transitivity (3 * (n+1)).
{
apply mult_red_l.
rewrite plus_comm.
reflexivity.
}
rewrite mult_plus_distr_l.
rewrite Eqn.
transitivity(3*3^(S p) * r + 3*1).
{
apply plus_red_r.
apply mult_red_r.
reflexivity.
}
transitivity(3*(3^(S p)*r + 1)).
{
rewrite mult_plus_distr_l.
apply plus_red_r.
apply mult_assoc_reverse.
}
apply mult_red_r.
reflexivity.
-
left.
cut(3^(S p) * r + 1 = 3*(3^p*r)+1).
{
intro relH.
rewrite relH.
apply To_mod3_1.
}
apply plus_red_r.
rewrite mult_assoc.
apply mult_red_r.
reflexivity.
}
}
Qed.
Lemma IndLemma : forall (P : nat -> Prop), (forall n, P n -> exists n', n = 3 * n' /\ P n') -> forall n, P n -> n = 0.
Proof.
intros P IndH n Pn.
assert(eq3n : exists n', n = 3*n').
{
destruct(IndH _ Pn) as [n' [Eqn Pn']].
exists n'.
auto.
}
destruct eq3n as [n' Eqn].
destruct (mod3_0_dest n') as [Eqn0 | HH].
{
rewrite Eqn0 in Eqn.
simpl in Eqn.
auto.
}
destruct HH as [p [r [Eq3n' modr]]].
assert(PInd : forall b t, P(3^t*b) -> P b).
{
intro b.
induction t.
{
assert(relH : 3^0*b=b).
{
rewrite pow_0_r.
rewrite mult_1_l.
auto.
}
rewrite relH.
tauto.
} {
intro PH'.
apply IHt.
destruct (IndH _ PH') as [t' [Eqt' Pt']].
cut(t' = 3^t*b).
{
intro relH.
rewrite <- relH.
auto.
}
transitivity(3*t'/3).
{
rewrite mult_div'.
auto.
apply ne03.
}
rewrite <- Eqt'.
assert(Eq3t : 3^(S t) * b = 3*(3^t * b)).
{
rewrite mult_assoc.
apply mult_red_r.
reflexivity.
}
rewrite Eq3t.
apply mult_div'.
apply ne03.
}
}
rewrite Eqn in Pn.
rewrite Eq3n' in Pn.
apply PInd in Pn.
assert(modr0 : mod3 r = 0).
{
apply IndH in Pn.
destruct Pn as [r' [Eqr' Pr']].
rewrite Eqr'.
apply To_mod3_0.
}
rewrite modr0 in modr.
destruct modr; discriminate.
Qed.
Theorem Problem3 : forall a b c, pow2 a + pow2 b = 3 * pow2 c -> a = 0 /\ b = 0 /\ c = 0.
Proof.
set (fun a b c => pow2 a + pow2 b = 3 * pow2 c) as P.
cut(forall a b c, P a b c -> a=0 /\ b=0 /\ c=0).
{
tauto.
}
assert(Lem : forall x y z, P x y z -> exists x' y' z', (x = 3 * x' /\ y = 3 * y' /\ z = 3 * z') /\ P x' y' z').
(* assert(Lemma : forall x y z, pow2 x + pow2 y = 3 * pow2 z -> exists x' y' z', (x = 3 * x' /\ y= 3 * y' /\ z = 3 * z') /\ pow2 x' + pow2 y' = 3 * pow2 z').*)
{
unfold P.
intros x y z eq.
generalize (Problem2 _ _ _ eq); intro eq'.
destruct eq' as [mx [my mz]].
apply mod3_0 in mx.
apply mod3_0 in my.
apply mod3_0 in mz.
destruct mx as [x' eqx].
destruct my as [y' eqy].
destruct mz as [z' eqz].
exists x'.
exists y'.
exists z'.
split.
{
split.
assumption.
split.
assumption.
assumption.
}
rewrite eqx in eq.
rewrite eqy in eq.
rewrite eqz in eq.
rewrite pow23p0 in eq.
rewrite pow23p0 in eq.
rewrite pow23p0 in eq.
symmetry.
transitivity ((9 * (3 * pow2 z')) / 9).
{
symmetry.
apply mult_div'.
intro dis.
discriminate.
}
transitivity ((3 * (3 * (3 * pow2 z'))) / 9).
{
cut (9 * (3 * pow2 z') = 3 * (3 * (3 * pow2 z'))).
intro relH; rewrite relH; auto.
transitivity ((3 * 3) * (3 * pow2 z')).
{
apply mult_red_r.
reflexivity.
}
apply mult_assoc_reverse.
}
rewrite <- eq.
rewrite <- mult_plus_distr_l.
rewrite <- mult_plus_distr_l.
rewrite mult_assoc.
transitivity ((9 * (pow2 x' + pow2 y')) / 9).
{
cut (3*3=9).
intro relH; rewrite relH; auto.
reflexivity.
}
apply mult_div'.
intro dis.
discriminate.
}
set (fun a => exists b c, P a b c) as P1.
set (fun b => exists a c, P a b c) as P2.
set (fun c => exists a b, P a b c) as P3.
intros a b c Pabc.
assert(P1H : P1 a).
{
unfold P1.
exists b.
exists c.
now assumption.
}
assert(P2H : P2 b).
{
unfold P2.
exists a.
exists c.
now assumption.
}
assert(P3H : P3 c).
{
unfold P3.
exists a.
exists b.
now assumption.
}
split; try split.
+
generalize P1H.
apply IndLemma.
intros x P1x.
unfold P1 in P1x.
destruct P1x as [y [z Pxyz]].
apply Lem in Pxyz.
destruct Pxyz as [x' [y' [z' [[Eqx [Eqy Eqz]] Pxyz']]]].
exists x'.
split.
now apply Eqx.
exists y'.
exists z'.
now assumption.
+
generalize P2H.
apply IndLemma.
intros y P2y.
unfold P2 in P2y.
destruct P2y as [x [z Pxyz]].
apply Lem in Pxyz.
destruct Pxyz as [x' [y' [z' [[Eqx [Eqy Eqz]] Pxyz']]]].
exists y'.
split.
now apply Eqy.
exists x'.
exists z'.
now assumption.
+
generalize P3H.
apply IndLemma.
intros z P3z.
unfold P3 in P3z.
destruct P3z as [x [y Pxyz]].
apply Lem in Pxyz.
destruct Pxyz as [x' [y' [z' [[Eqx [Eqy Eqz]] Pxyz']]]].
exists z'.
split.
now apply Eqz.
exists x'.
exists y'.
now assumption.
Qed.
End pow3.
|
// The various memcached commands that we support.
#include "commands.h"
#include "connections.h"
#include "locking.h"
#include "protocol.h"
#include "server.h"
#include "threads.h"
#include "utils.h"
#include <assert.h>
#include <gsl/gsl_randist.h>
#include <stdio.h>
#include <stdlib.h>
static const char* default_key = "skeleton";
/* // process a memcached get(s) command. (we don't support CAS). This function */
/* // performs the request parsing and setup of backend RPC's. */
/* void process_get_command(conn *c, token_t *tokens, size_t ntokens, */
/* bool return_cas) { */
/* char *key; */
/* size_t nkey; */
/* int i = 0; */
/* item *it; */
/* token_t *key_token = &tokens[KEY_TOKEN]; */
/* char *suffix; */
/* worker_thread_t *t = c->thread; */
/* memcached_t *mc; */
/* */
/* assert(c != NULL); */
/* */
/* key = key_token->value; */
/* nkey = key_token->length; */
/* */
/* if (config.use_dist) { */
/* long size = config.dist_arg1 + gsl_ran_gaussian(config.r, config.dist_arg2); */
/* if (config.verbose > 1) { */
/* fprintf(stderr, "allocated blob: %ld\n", size); */
/* } */
/* c->mem_blob = malloc(sizeof(char) * size); */
/* } */
/* */
/* if(nkey > KEY_MAX_LENGTH) { */
/* error_response(c, "CLIENT_ERROR bad command line format"); */
/* return; */
/* } */
/* */
/* // lookup key-value. */
/* it = item_get(key, nkey); */
/* */
/* // hit. */
/* if (it) { */
/* if (i >= c->isize && !conn_expand_items(c)) { */
/* item_remove(it); */
/* error_response(c, "SERVER_ERROR out of memory writing get response"); */
/* return; */
/* } */
/* // add item to remembered list (i.e., we've taken ownership of them */
/* // through refcounting and later must release them once we've */
/* // written out the iov associated with them). */
/* item_update(it); */
/* *(c->ilist + i) = it; */
/* i++; */
/* } */
/* */
/* // make sure it's a single get */
/* key_token++; */
/* if (key_token->length != 0 || key_token->value != NULL) { */
/* error_response(c, "SERVER_ERROR only support single `get`"); */
/* return; */
/* } */
/* */
/* // update our rememberd reference set. */
/* c->icurr = c->ilist; */
/* c->ileft = i; */
/* */
/* // setup RPC calls. */
/* for (i = 0; i < t->memcache_used; i++) { */
/* mc = t->memcache[i]; */
/* if (!conn_add_msghdr(mc) != 0) { */
/* error_response(mc, "SERVER_ERROR out of memory preparing response"); */
/* return; */
/* } */
/* memcache_get(mc, c, default_key); */
/* } */
/* conn_set_state(c, conn_rpc_wait); */
/* } */
// complete the response to a get request.
void finish_get_command(conn *c) {
item *it;
int i;
// setup all items for writing out.
for (i = 0; i < c->ileft; i++) {
it = *(c->ilist + i);
if (it) {
// Construct the response. Each hit adds three elements to the
// outgoing data list:
// "VALUE <key> <flags> <data_length>\r\n"
// "<data>\r\n"
// The <data> element is stored on the connection item list, not on
// the iov list.
if (!conn_add_iov(c, "VALUE ", 6) ||
!conn_add_iov(c, ITEM_key(it), it->nkey) ||
!conn_add_iov(c, ITEM_suffix(it), it->nsuffix + it->nbytes)) {
item_remove(it);
error_response(c, "SERVER_ERROR out of memory writing get response");
return;
}
if (config.verbose > 1) {
fprintf(stderr, ">%d sending key %s\n", c->sfd, ITEM_key(it));
}
} else {
fprintf(stderr, "ERROR corrupted ilist!\n");
exit(1);
}
}
if (config.verbose > 1) {
fprintf(stderr, ">%d END\n", c->sfd);
}
if (!conn_add_iov(c, "END\r\n", 5) != 0) {
error_response(c, "SERVER_ERROR out of memory writing get response");
} else {
conn_set_state(c, conn_mwrite);
}
}
// process a memcached get(s) command. (we don't support CAS).
void process_get_command(conn *c, token_t *tokens, size_t ntokens,
bool return_cas) {
char *key;
size_t nkey;
int i = 0;
item *it;
token_t *key_token = &tokens[KEY_TOKEN];
char *suffix;
assert(c != NULL);
if (config.alloc && c->mem_blob == NULL) {
long size = config.alloc_mean + gsl_ran_gaussian(c->thread->r, config.alloc_stddev);
size = size <= 0 ? 10 : size;
if (config.verbose > 0) {
fprintf(stderr, "allocated blob: %ld\n", size);
}
c->mem_blob = malloc(sizeof(char) * size);
c->mem_free_delay = 0;
if (config.rtt_delay) {
double r = config.rtt_mean + gsl_ran_gaussian(c->thread->r, config.rtt_stddev);
if (r >= config.rtt_cutoff) {
int wait = r / 100;
if (config.verbose > 0) {
fprintf(stderr, "delay: %d\n", wait);
}
c->mem_free_delay = wait;
conn_set_state(c, conn_mwrite);
}
}
}
// process the whole command line, (only part of it may be tokenized right now)
do {
// process all tokenized keys at this stage.
while(key_token->length != 0) {
key = key_token->value;
nkey = key_token->length;
if(nkey > KEY_MAX_LENGTH) {
error_response(c, "CLIENT_ERROR bad command line format");
return;
}
// lookup key-value.
it = item_get(key, nkey);
// hit.
if (it) {
if (i >= c->isize && !conn_expand_items(c)) {
item_remove(it);
break;
}
// Construct the response. Each hit adds three elements to the
// outgoing data list:
// "VALUE <key> <flags> <data_length>\r\n"
// "<data>\r\n"
// The <data> element is stored on the connection item list, not on
// the iov list.
if (!conn_add_iov(c, "VALUE ", 6) != 0 ||
!conn_add_iov(c, ITEM_key(it), it->nkey) != 0 ||
!conn_add_iov(c, ITEM_suffix(it), it->nsuffix + it->nbytes) != 0) {
item_remove(it);
break;
}
if (config.verbose > 1) {
fprintf(stderr, ">%d sending key %s\n", c->sfd, key);
}
// add item to remembered list (i.e., we've taken ownership of them
// through refcounting and later must release them once we've
// written out the iov associated with them).
item_update(it);
*(c->ilist + i) = it;
i++;
}
key_token++;
}
/*
* If the command string hasn't been fully processed, get the next set
* of tokens.
*/
if(key_token->value != NULL) {
ntokens = tokenize_command(key_token->value, tokens, MAX_TOKENS);
key_token = tokens;
}
} while(key_token->value != NULL);
c->icurr = c->ilist;
c->ileft = i;
if (config.verbose > 1) {
fprintf(stderr, ">%d END\n", c->sfd);
}
// If the loop was terminated because of out-of-memory, it is not reliable
// to add END\r\n to the buffer, because it might not end in \r\n. So we
// send SERVER_ERROR instead.
if (key_token->value != NULL || !conn_add_iov(c, "END\r\n", 5) != 0) {
error_response(c, "SERVER_ERROR out of memory writing get response");
} else {
conn_set_state(c, conn_mwrite);
}
}
// process a memcached set command.
void process_update_command(conn *c, token_t *tokens,
const size_t ntokens,
int comm, bool handle_cas) {
int vlen;
assert(c != NULL);
if (tokens[KEY_TOKEN].length > KEY_MAX_LENGTH ||
!safe_strtol(tokens[4].value, (int32_t *)&vlen)) {
error_response(c, "CLIENT_ERROR bad command line format");
return;
}
if (vlen < 0) {
error_response(c, "CLIENT_ERROR bad command line format");
return;
}
// setup value to be read
c->sbytes = vlen + 2; // for \r\n consumption.
conn_set_state(c, conn_read_value);
}
// process a memcached stat command.
void process_stat_command(conn *c, token_t *tokens, const size_t ntokens) {
mutex_lock(&c->stats->lock);
// just for debugging right now
fprintf(stderr, "STAT client_id %d\n", c->stats->client_id);
fprintf(stderr, "STAT total_connections %d\n", c->stats->total_connections);
fprintf(stderr, "STAT live_connections %d\n", c->stats->live_connections);
fprintf(stderr, "STAT requests %d\n", c->stats->requests);
mutex_unlock(&c->stats->lock);
conn_set_state(c, conn_new_cmd);
}
|
#include <rosbag/bag.h>
#include <rosbag/view.h>
#include <std_msgs/Int32.h>
#include <std_msgs/String.h>
#include <fstream>
#include <frc_msgs/MatchSpecificData.h>
#include <boost/foreach.hpp>
#define foreach BOOST_FOREACH
int main(int argc, char **argv)
{
if(argc < 2)
{
std::cout << "need a file thanks \n";
return 1;
}
if(argc > 2)
{
std::cout << "why so many files ahhh \n";
return 1;
}
std::string bag_name = argv[1];
rosbag::Bag bag;
bag.open(bag_name, rosbag::bagmode::Read);
std::ofstream temp_file;
temp_file.open ("temp_file.txt");
std::vector<std::string> topics;
topics.push_back(std::string("/frcrobot/match_data"));
rosbag::View view(bag, rosbag::TopicQuery(topics));
foreach(rosbag::MessageInstance const m, view)
{
frc_msgs::MatchSpecificData::ConstPtr s = m.instantiate<frc_msgs::MatchSpecificData>();
if (s != NULL){
if (s->allianceData != "")
{
temp_file << "We have data! Keep this file!!" << std::endl;
}
temp_file << s->header << std::endl;
temp_file << s->matchTimeRemaining << std::endl;
temp_file << "allianceData: " << s->allianceData << std::endl;
temp_file << static_cast<int>(s->allianceColor) << std::endl;
temp_file << static_cast<int>(s->matchType) << std::endl;
temp_file << static_cast<int>(s->driverStationLocation) << std::endl;
temp_file << "matchNumber: " << static_cast<int>(s->matchNumber) << std::endl;
temp_file << static_cast<bool>(s->isEnabled) << std::endl;
temp_file << static_cast<bool>(s->isDisabled) << std::endl;
temp_file << static_cast<bool>(s->isAutonomous) << std::endl;
}
}
bag.close();
return 0;
}
|
"""
AbstractControlLogicCore.jl defines the AbstractControlLogicCore type and
methods
------------------------------------------------------------------------------
COPYRIGHT/LICENSE. This file is part of the XYZ package. It is subject to
the license terms in the LICENSE file found in the top-level directory of
this distribution. No part of the XYZ package, including this file, may be
copied, modified, propagated, or distributed except according to the terms
contained in the LICENSE file.
------------------------------------------------------------------------------
"""
# --- Exports
# ------ Types
export AbstractControlLogicCore
# ------ Functions
export wait_for_input_ready
export decode_control_signal, process_control_signal!
# --- Type definitions
"""
AbstractControlLogicCore
Supertype for control logic core types.
Interface
=========
wait_for_input_ready(logic_core::AbstractControlLogicCore,
input_channels::Vector{InputChannel})
decode_control_signal(::Type{<:AbstractControlLogicCore},
bytes::Vector{UInt8})
process_control_signal!(signal, logic_core::AbstractControlLogicCore)
get_state(logic_core::AbstractControlLogicCore)
set_state!(logic_core::AbstractControlLogicCore, state::Dict)
Common Signals to Support
-------------------------
* Request for the output type. To support this signal, include the Node or
OutputChannel as a field in the concrete subtype.
"""
abstract type AbstractControlLogicCore end
# --- Method definitions
#
# Note: the following method definitions are no-op place holders to provide
# a central location for docstrings.
"""
wait_for_input_ready(logic_core::AbstractControlLogicCore,
input_channels::Vector{InputChannel})
Wait for input channels to be ready for processing.
"""
wait_for_input_ready(logic_core::AbstractControlLogicCore,
input_channels::Vector{InputChannel}) = nothing
"""
decode_control_signal(::Type{<:AbstractControlLogicCore},
bytes::Vector{UInt8})
Convert `bytes` to a signal that is understood by `process_control_signal!()`.
Throw an ArgumentError if `bytes` does not convert to a valid signal.
"""
decode_control_signal(::Type{<:AbstractControlLogicCore},
bytes::Vector{UInt8}) = nothing
"""
process_control_signal!(logic_core::AbstractControlLogicCore, signal)
Process the control `signal` updating the data fields of `logic_core`, if
necesasry. Return an appropriate status response.
"""
process_control_signal!(logic_core::AbstractControlLogicCore, signal) = nothing
"""
get_state(logic_core::AbstractControlLogicCore)
Return a Dict containing sufficient information to reconstruct the operational
state of `logic_core`.
"""
get_state(logic_core::AbstractControlLogicCore) = nothing
"""
set_state!(logic_core::AbstractControlLogicCore, state::Dict)
Set the operational state of `logic_core` using values from `state`.
"""
set_state!(logic_core::AbstractControlLogicCore, state::Dict) = nothing
|
Formal statement is: lemma const_in_iff: "c \<noteq> 0 \<Longrightarrow> (\<lambda>_. c) \<in> L F (f) \<longleftrightarrow> (\<lambda>_. 1) \<in> L F (f)" Informal statement is: If $c \neq 0$, then the constant function $f(x) = c$ is in the linear span of $f$ if and only if the constant function $f(x) = 1$ is in the linear span of $f$. |
rm(list=ls())
library(dplyr)
library(ggplot2)
library(tidyr)
library(ggpubr)
# set dataset
dataset <- "Singhal"
setwd(paste("/Users/ChatNoir/Projects/Squam/Graphs/",dataset, sep=''))
# Read in data
load(paste("/Users/ChatNoir/Projects/Squam/Graphs/",dataset,"/Calcs_",dataset,".RData", sep=''))
# Colors
color_S <- "orange"
color_TP <- "springgreen4"
color_AI <- "#2BB07FFF"
color_SA <- "#38598CFF"
color_SI <- "#C2DF23FF"
# you'll have to graph first, then reset this
maxloci <- 1200
ymax <- seq(0,maxloci,100)
#Streicher 350,50
#Singhal 1200,100
#quartz()
# Set title and input data
title.g <- paste(dataset," - Tox vs Sclero",sep="")
cc.g <- color_TP
df.g <- mLGL
xval.g <- mLGL$TvS
xlablab.g <- 'dGLS values'
# Get max min for graph to set x axis values
limit.g <- 5 + round(max(abs(min(xval.g)),abs(max(xval.g))),-1)
limit.g
tic.g <- seq(-limit.g,limit.g,10)
lines.g <- c(0.5,-0.5)
GL_hist <- ggplot(data=df.g, aes(x=xval.g)) +
geom_histogram(binwidth = limit.g*0.01, alpha=1, position="dodge", color=cc.g, fill=cc.g)+
theme_classic() +
theme(plot.title = element_text(hjust = 0.5, size=16),
axis.text = element_text(size=10, color="black"),
text = element_text(size=14),
legend.title = element_text(size = 12),
legend.text = element_text(size = 10)) +
labs(y="Number of Loci",x=xlablab.g) +
ggtitle(title.g) +
coord_cartesian(ylim=ymax, xlim = tic.g) +
scale_x_continuous(breaks = c(0,tic.g)) +
scale_y_continuous(breaks = ymax) +
# geom_vline(xintercept=lines.g,color=c("black"), linetype="dashed", size=0.5) +
geom_vline(xintercept=c(0),color=c("black"), linetype="dashed", size=0.2)
GL_hist
# Set title and input data
title <- ""
cc <- color_TP
df <- mLBF
xval <- mLBF$TvS
xlablab <- '2ln(BF) values'
# Get max min for graph to set x axis values
limit <- 10 + round(max(abs(min(xval)),abs(max(xval))),-1)
limit
tic <- seq(-limit,limit,20)
lines <- c(10,-10)
BF_hist <- ggplot(data=df, aes(x=xval)) +
geom_histogram(binwidth = limit*0.01, alpha=1, position="dodge", color=cc, fill=cc)+
theme_classic() +
theme(plot.title = element_text(hjust = 0.5, size=16),
axis.text = element_text(size=10, color="black"),
text = element_text(size=14),
legend.title = element_text(size = 12),
legend.text = element_text(size = 10)) +
labs(y="Number of Loci",x=xlablab) +
ggtitle(title) +
coord_cartesian(ylim=ymax, xlim = tic) +
scale_x_continuous(breaks = c(0,tic)) +
scale_y_continuous(breaks = ymax) +
#geom_vline(xintercept=lines,color=c("black"), linetype="dashed", size=0.5) +
geom_vline(xintercept=c(0),color=c("black"), linetype="dashed", size=0.2)
BF_hist
graph_combined <- ggarrange(GL_hist, BF_hist, ncol=1, nrow=2, align="v")
graph_combined
ggsave(paste(dataset,"_histo.pdf",sep=""), plot=graph_combined,width = 9, height = 7, units = "in", device = 'pdf',bg = "transparent")
|
If the image of the closure of a set $S$ under a function $f$ is bounded, then the image of $S$ under $f$ is bounded. |
data Vect : Nat -> Type -> Type where
Nil : Vect Z a
(::) : a -> Vect k a -> Vect (S k) a
%name Vect xs, ys, zs
zipWith : (a -> b -> c) -> Vect n a -> Vect n b -> Vect n c
zipWith f [] [] = []
zipWith f (x :: xs) (y :: ys) = f x y :: zipWith f xs ys
|
% Copyright (c) 2014 Adobe Systems Incorporated. All rights reserved.
% Licensed under the Apache License, Version 2.0 (the "License");
% you may not use this file except in compliance with the License.
% You may obtain a copy of the License at
% http://www.apache.org/licenses/LICENSE-2.0
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS,
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
% See the License for the specific language governing permissions and
% limitations under the License.
\section{Upcoming Features}
\label{upcoming}
So far, we have just described the intended feature set of the first version of
AS4. In this section we present additional features that we want to introduce as
quickly as possible thereafter.
Note that we cannot guarantee that these or any other features will appear in
any particular form or order. Everything we have written from here on is to be
considered as somewhat speculative.
\subsection{The \code{Symbol} Class for String Interning}
Instances of \code{String} objects are not interned, neither explicitly nor
implicitly. If interning behavior is expected, the \code{Symbol} class must be
used.
\begin{verbatim}
public restricted class Symbol extends String {
/**
* "Intern" a given string.
*
* 1. If the identical string is already in the global intenring table,
* return it.
* 2. Otherwise, if a string of equal content is already in the table,
* return it.
* 3. Otherwise, enter a copy of the string typed as \code{Symbol}
* into the table, then return this resident unique representative
* of all strings of equal content.
*
* The cost of the initial copying can quickly be amortized
* when comparing symbols to each other by identity
* instead of by equality as with non-interned strings.
*
* We assume that string interning is predominantly going to be used
* for relatively short strings, which mitigates the initial interning cost.
*
* Explicit coercion of a value to \code{Symbol} (\code{value as Symbol})
* translates to a call to this method.
* If necessary, this is preceded by implicit coercion to \code{String}
* (\code{value as String as Symbol}).
*/
public static final function fromString(string :String) :Symbol;
override public function equal(other :*):bool
{
return this.identical(other);
}
...
}
\end{verbatim}
All string literals are already \code{Symbol} instances. Additional
\code{Symbol} instances can be produced from Strings by explicit coercion,
as shown below or by direct calls to the above constructor.
\begin{verbatim}
function world():Symbol { return "World"; }
let s1 :Symbol = "Hello"; // OK, because string literals are Symbols already
let s2 :Symbol = "Hello" + world(); // Error, concatenating computed symbols produces a String
let string1 :String = "Hello" + world();
let string2 :String = "Hello" + world();
print(string1 == string2); // true
print(string1 === string2); // false
let symbol1 :Symbol = string1 as Symbol;
let symbol2 :Symbol = string2 as Symbol;
print(symbol1 == symbol2); // true
print(symbol1 === symbol2); // true
\end{verbatim}
\code{Symbol} extends \code{String}, but \code{String} is otherwise not
extensible by user code without certain restrictions, which we explain in the
following section.
\subsection{Restricted Classes}
Generalizing the concept behind class \code{Symbol}, we introduce the keyword
\code{restricted} as class attribute. A {\em restricted} class can be extended
by subclasses, but those must not declare any additional instance fields.
Furthermore, every subclass of a restricted class must be a restricted class,
too.
Class \code{String} is a restricted class and class \code{Symbol} extends it
(see above).
Restricted classes can be used for encoding relevant invariants, which can thus
be statically guaranteed by the type checker. That a string is guaranteed to be
interned when typed as a \code{Symbol} is just one example. Other conceivable
(user-defined) examples are: strings that comply with a certain lexical grammar
(e.g. ``well-formed'' identifiers), lists that are assumed to be sorted,
acyclical graphs, etc. Of course, the user has to be careful that the guaranteed
invariants actually hold and cannot be broken by mutation of constituent
variables.
\subsection{Enumeration Types}
In AS3, finite sets of discrete, enumerated values are customarily represented
by string constants, which is a poor choice for various obvious reasons.
Alternatively, one can use integer numbers, but this is not ideal either. There
is a pattern that provides a more type-safe and comfortable encoding, but it
requires a lot of boiler plate code and strict adherence to certain protocol.
Example:
\begin{verbatim}
public final class RbgColor // AS3 enum pattern code:
{
private var _ordinal:int;
public function get ordinal():String
{
return _ordinal;
}
private var _name:String;
public function get name():String
{
return _name;
}
public function RgbColor(ordinal:int, name:String)
{
_ordinal = ordinal;
_name = name
}
public static const RED:RgbColor = new RgbColor("red");
public static const GREEN:RgbColor = new RgbColor("green");
public static const BLUE:RgbColor = new RgbColor("blue");
}
var myColor:RbgColor = someCondition() ? RgbColor.BLUE : someOtherColor();
...
switch (myColor.ordinal)
{
case RED:
...
break;
case GREEN:
...
break;
default:
trace(myColor.name);
break;
}
\end{verbatim}
In AS4, this will soon be condensed as follows:
\begin{verbatim}
public enum Color { // AS4 code:
RED, GREEN, BLUE
}
var myColor :RbgColor = someCondition() ? RgbColor.BLUE : someOtherColor();
...
switch (myColor) {
case RED:
...
break;
case GREEN:
...
break;
default:
trace(myColor);
break;
}
\end{verbatim}
There are more elaborate patterns for enum constants that have extra
payload properties, and there are equally powerful syntactic enhancements in
languages such as Haxe, C\# , Java, and others. We will take further inspiration
from these and later extend AS4's enum capabilities accordingly as an extension
of the above.
\subsection{Abstract Classes and Methods}
The new keyword \code{abstract} is used as an attribute for both classes and
methods. An {\em abstract} class cannot be instantiated directly. Only its
non-abstract subclasses can.
An {\em abstract} method must not have a function body and it must not be
native. The first non-abstract subclass of its declaring class must override it
with a full method.
Constructor functions cannot be abstract.
A class that contains any abstract method must be declared as an abstract class.
\subsection{Constructor, Method, and Function Overloading}
AS4 will allow several different constructors with different parameter lists in
the same class. And then, more generally, methods or functions with different
function signatures can also have the same name, under certain conditions (to be specified later). This is also known
as ad-hoc polymorphism, in that the compiler disambiguates direct calls to such
methods or functions by discerning function signatures at compile time.
\subsection{ActionScript Workers}
We intend to reintroduce workers and to allow them in every deployement mode in
AS4, not just in JIT-based deployments as in AS3. It is likely that we will
update the Worker API to better leverage AS4 language features.
\section{Future Features}
\label{future}
Last but not least, let us briefly look further ahead towards features that we
have not fully designed yet, but that we are particularly keen on introducing to AS4.
\begin{description}
\item[Immutable Data Structure Types]
\item[User-Defined Value Types]
\item[Generics (Parameteric Polymorphism)]
\item[Collection Library]
\item[Module System]
\item[Multi-Threading]
\item[Nested Transactions on Versioned Data Structures]
\item[Data Parallelism]
\end{description}
We will elaborate on these once we have completed designs in hand.
|
/-
Copyright © 2020 Nicolò Cavalleri. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Nicolò Cavalleri
! This file was ported from Lean 3 source module geometry.manifold.derivation_bundle
! leanprover-community/mathlib commit a0735864ba72769da4b378673d3dbe2453924fde
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathbin.Geometry.Manifold.Algebra.SmoothFunctions
import Mathbin.RingTheory.Derivation
/-!
# Derivation bundle
In this file we define the derivations at a point of a manifold on the algebra of smooth fuctions.
Moreover, we define the differential of a function in terms of derivations.
The content of this file is not meant to be regarded as an alternative definition to the current
tangent bundle but rather as a purely algebraic theory that provides a purely algebraic definition
of the Lie algebra for a Lie group.
-/
variable (𝕜 : Type _) [NontriviallyNormedField 𝕜] {E : Type _} [NormedAddCommGroup E]
[NormedSpace 𝕜 E] {H : Type _} [TopologicalSpace H] (I : ModelWithCorners 𝕜 E H) (M : Type _)
[TopologicalSpace M] [ChartedSpace H M] (n : ℕ∞)
open Manifold
-- the following two instances prevent poorly understood type class inference timeout problems
instance smoothFunctionsAlgebra : Algebra 𝕜 C^∞⟮I, M; 𝕜⟯ := by infer_instance
#align smooth_functions_algebra smoothFunctionsAlgebra
instance smooth_functions_tower : IsScalarTower 𝕜 C^∞⟮I, M; 𝕜⟯ C^∞⟮I, M; 𝕜⟯ := by infer_instance
#align smooth_functions_tower smooth_functions_tower
/-- Type synonym, introduced to put a different `has_smul` action on `C^n⟮I, M; 𝕜⟯`
which is defined as `f • r = f(x) * r`. -/
@[nolint unused_arguments]
def PointedSmoothMap (x : M) :=
C^n⟮I, M; 𝕜⟯
#align pointed_smooth_map PointedSmoothMap
-- mathport name: pointed_smooth_map
scoped[Derivation] notation "C^" n "⟮" I ", " M "; " 𝕜 "⟯⟨" x "⟩" => PointedSmoothMap 𝕜 I M n x
variable {𝕜 M}
namespace PointedSmoothMap
instance {x : M} : CoeFun C^∞⟮I, M; 𝕜⟯⟨x⟩ fun _ => M → 𝕜 :=
ContMdiffMap.hasCoeToFun
instance {x : M} : CommRing C^∞⟮I, M; 𝕜⟯⟨x⟩ :=
SmoothMap.commRing
instance {x : M} : Algebra 𝕜 C^∞⟮I, M; 𝕜⟯⟨x⟩ :=
SmoothMap.algebra
instance {x : M} : Inhabited C^∞⟮I, M; 𝕜⟯⟨x⟩ :=
⟨0⟩
instance {x : M} : Algebra C^∞⟮I, M; 𝕜⟯⟨x⟩ C^∞⟮I, M; 𝕜⟯ :=
Algebra.id C^∞⟮I, M; 𝕜⟯
instance {x : M} : IsScalarTower 𝕜 C^∞⟮I, M; 𝕜⟯⟨x⟩ C^∞⟮I, M; 𝕜⟯ :=
IsScalarTower.right
variable {I}
/-- `smooth_map.eval_ring_hom` gives rise to an algebra structure of `C^∞⟮I, M; 𝕜⟯` on `𝕜`. -/
instance evalAlgebra {x : M} : Algebra C^∞⟮I, M; 𝕜⟯⟨x⟩ 𝕜 :=
(SmoothMap.evalRingHom x : C^∞⟮I, M; 𝕜⟯⟨x⟩ →+* 𝕜).toAlgebra
#align pointed_smooth_map.eval_algebra PointedSmoothMap.evalAlgebra
/-- With the `eval_algebra` algebra structure evaluation is actually an algebra morphism. -/
def eval (x : M) : C^∞⟮I, M; 𝕜⟯ →ₐ[C^∞⟮I, M; 𝕜⟯⟨x⟩] 𝕜 :=
Algebra.ofId C^∞⟮I, M; 𝕜⟯⟨x⟩ 𝕜
#align pointed_smooth_map.eval PointedSmoothMap.eval
theorem smul_def (x : M) (f : C^∞⟮I, M; 𝕜⟯⟨x⟩) (k : 𝕜) : f • k = f x * k :=
rfl
#align pointed_smooth_map.smul_def PointedSmoothMap.smul_def
instance (x : M) : IsScalarTower 𝕜 C^∞⟮I, M; 𝕜⟯⟨x⟩ 𝕜
where smul_assoc k f h := by
simp only [smul_def, Algebra.id.smul_eq_mul, SmoothMap.coe_smul, Pi.smul_apply, mul_assoc]
end PointedSmoothMap
open Derivation
/-- The derivations at a point of a manifold. Some regard this as a possible definition of the
tangent space -/
@[reducible]
def PointDerivation (x : M) :=
Derivation 𝕜 C^∞⟮I, M; 𝕜⟯⟨x⟩ 𝕜
#align point_derivation PointDerivation
section
variable (I) {M} (X Y : Derivation 𝕜 C^∞⟮I, M; 𝕜⟯ C^∞⟮I, M; 𝕜⟯) (f g : C^∞⟮I, M; 𝕜⟯) (r : 𝕜)
/-- Evaluation at a point gives rise to a `C^∞⟮I, M; 𝕜⟯`-linear map between `C^∞⟮I, M; 𝕜⟯` and `𝕜`.
-/
def SmoothFunction.evalAt (x : M) : C^∞⟮I, M; 𝕜⟯ →ₗ[C^∞⟮I, M; 𝕜⟯⟨x⟩] 𝕜 :=
(PointedSmoothMap.eval x).toLinearMap
#align smooth_function.eval_at SmoothFunction.evalAt
namespace Derivation
variable {I}
/-- The evaluation at a point as a linear map. -/
def evalAt (x : M) : Derivation 𝕜 C^∞⟮I, M; 𝕜⟯ C^∞⟮I, M; 𝕜⟯ →ₗ[𝕜] PointDerivation I x :=
(SmoothFunction.evalAt I x).compDer
#align derivation.eval_at Derivation.evalAt
theorem evalAt_apply (x : M) : evalAt x X f = (X f) x :=
rfl
#align derivation.eval_at_apply Derivation.evalAt_apply
end Derivation
variable {I} {E' : Type _} [NormedAddCommGroup E'] [NormedSpace 𝕜 E'] {H' : Type _}
[TopologicalSpace H'] {I' : ModelWithCorners 𝕜 E' H'} {M' : Type _} [TopologicalSpace M']
[ChartedSpace H' M']
/-- The heterogeneous differential as a linear map. Instead of taking a function as an argument this
differential takes `h : f x = y`. It is particularly handy to deal with situations where the points
on where it has to be evaluated are equal but not definitionally equal. -/
def hfdifferential {f : C^∞⟮I, M; I', M'⟯} {x : M} {y : M'} (h : f x = y) :
PointDerivation I x →ₗ[𝕜] PointDerivation I' y
where
toFun v :=
Derivation.mk'
{ toFun := fun g => v (g.comp f)
map_add' := fun g g' => by rw [SmoothMap.add_comp, Derivation.map_add]
map_smul' := fun k g => by
simp only [SmoothMap.smul_comp, Derivation.map_smul, RingHom.id_apply] }
fun g g' => by
simp only [Derivation.leibniz, SmoothMap.mul_comp, LinearMap.coe_mk,
PointedSmoothMap.smul_def, ContMdiffMap.comp_apply, h]
map_smul' k v := rfl
map_add' v w := rfl
#align hfdifferential hfdifferential
/-- The homogeneous differential as a linear map. -/
def fdifferential (f : C^∞⟮I, M; I', M'⟯) (x : M) :
PointDerivation I x →ₗ[𝕜] PointDerivation I' (f x) :=
hfdifferential (rfl : f x = f x)
#align fdifferential fdifferential
-- mathport name: fdifferential
-- Standard notation for the differential. The abbreviation is `MId`.
scoped[Manifold] notation "𝒅" => fdifferential
-- mathport name: hfdifferential
-- Standard notation for the differential. The abbreviation is `MId`.
scoped[Manifold] notation "𝒅ₕ" => hfdifferential
@[simp]
theorem apply_fdifferential (f : C^∞⟮I, M; I', M'⟯) {x : M} (v : PointDerivation I x)
(g : C^∞⟮I', M'; 𝕜⟯) : 𝒅 f x v g = v (g.comp f) :=
rfl
#align apply_fdifferential apply_fdifferential
@[simp]
theorem apply_hfdifferential {f : C^∞⟮I, M; I', M'⟯} {x : M} {y : M'} (h : f x = y)
(v : PointDerivation I x) (g : C^∞⟮I', M'; 𝕜⟯) : 𝒅ₕ h v g = 𝒅 f x v g :=
rfl
#align apply_hfdifferential apply_hfdifferential
variable {E'' : Type _} [NormedAddCommGroup E''] [NormedSpace 𝕜 E''] {H'' : Type _}
[TopologicalSpace H''] {I'' : ModelWithCorners 𝕜 E'' H''} {M'' : Type _} [TopologicalSpace M'']
[ChartedSpace H'' M'']
@[simp]
theorem fdifferential_comp (g : C^∞⟮I', M'; I'', M''⟯) (f : C^∞⟮I, M; I', M'⟯) (x : M) :
𝒅 (g.comp f) x = (𝒅 g (f x)).comp (𝒅 f x) :=
rfl
#align fdifferential_comp fdifferential_comp
end
|
inductive Foo : Bool → Type where
| Z : Foo false
| O : Foo false → Foo true
| E : Foo true → Foo false
open Foo
def toNat : {b : Bool} → Foo b → Nat
| _, Z => 0
| _, O n => toNat n + 1
| _, E n => toNat n + 1
example : toNat (E (O Z)) = 2 :=
rfl
example : toNat Z = 0 :=
rfl
example (a : Foo false) : toNat (O a) = toNat a + 1 :=
rfl
example (a : Foo true) : toNat (E a) = toNat a + 1 :=
rfl
|
Formal statement is: lemma DERIV_add_const: "DERIV f x :> D \<Longrightarrow> DERIV (\<lambda>x. a + f x :: 'a::real_normed_field) x :> D" Informal statement is: If $f$ is differentiable at $x$, then $a + f$ is differentiable at $x$. |
theory Insertion_Sort
imports Sorting
begin
context begin
qualified primrec insert :: "'a::linorder \<Rightarrow> 'a list \<Rightarrow> 'a list" where
"insert x [] = [x]" |
"insert x (y # ys) = (if x \<le> y then x # y # ys else y # insert x ys)"
qualified lemma insert_sorted: "sorted xs \<Longrightarrow> sorted (insert x xs)"
proof (induction xs rule: sorted.induct)
case empty
show ?case
by (auto intro: single)
next
case (single y)
show ?case
by (cases "x \<le> y") (auto intro: sorted.intros) (* if_splits *)
next
case (cons x\<^sub>1 x\<^sub>2 xs)
show ?case
proof (cases "x \<le> x\<^sub>1")
case True
hence "insert x (x\<^sub>1 # x\<^sub>2 # xs) = x # x\<^sub>1 # x\<^sub>2 # xs"
by simp
moreover have "sorted (x # x\<^sub>1 # x\<^sub>2 # xs)"
apply (rule sorted.intros)
apply fact
apply (rule sorted.intros)
apply fact+
done
ultimately show ?thesis
by simp
next
case False
hence "insert x (x\<^sub>1 # x\<^sub>2 # xs) = x\<^sub>1 # insert x (x\<^sub>2 # xs)"
by simp
moreover have "sorted (x\<^sub>1 # insert x (x\<^sub>2 # xs))"
apply (cases "x \<le> x\<^sub>2")
using cons apply (auto intro: sorted.intros)
apply (rule sorted.cons)
using False apply simp
apply assumption
done
ultimately show ?thesis
by simp
qed
qed
qualified lemma insert_permutation: "mset (insert x xs) = {#x#} + mset xs"
proof (induction xs)
case Nil
show ?case
by simp
next
case (Cons y ys)
show ?case
proof (cases "x \<le> y")
case True
thus ?thesis
by (simp add: union_commute)
next
case False
thus ?thesis
apply simp
apply (subst Cons)
by (simp add: union_assoc)
qed
qed
primrec insort :: "'a::linorder list \<Rightarrow> 'a list" where
"insort [] = []" |
"insort (x # xs) = insert x (insort xs)"
end
global_interpretation insertion_sort: sorting insort
proof
fix xs :: "'a::linorder list"
show "sorted (insort xs)"
proof (induction xs)
case Nil
show ?case
apply simp
apply (rule sorted.empty)
done
next
case (Cons y ys)
show ?case
apply simp
apply (rule Insertion_Sort.insert_sorted)
apply (rule Cons)
done
qed
show "mset (insort xs) = mset xs"
by (induction xs) (auto simp: Insertion_Sort.insert_permutation union_commute)
qed
export_code insort
checking Scala
end |
lemma open_Collect_less: fixes f g :: "'a::topological_space \<Rightarrow> 'b::linorder_topology" assumes f: "continuous_on UNIV f" and g: "continuous_on UNIV g" shows "open {x. f x < g x}" |
function findFirstSmaller(x::Float64,v::Array{Float64,1})
l = length(v)
if l == 129 # 8bit case
idx = 64 # starting index (l-1)/2
n = 6 #
elseif l == 32769 # 16bit case
idx = 16384 # starting index (l-1)/2
n = 14
else
throw(BoundsError())
end
if x > v[end-1] # floatmax and infinity case
return l
end
# binary tree search
for i in 1:n
if x < v[idx]
idx -= 2^(n-i)
else
idx += 2^(n-i)
end
end
# split off the i = n+1 case
if x >= v[idx]
idx += 1
end
return idx
end
|
Call 'd him soft names in many a <unk> rhyme ,
|
subroutine readINFRACT
use fractdata
implicit double precision(a-h,o-z)
open(unit=1,file='IN_FRACT',status='old')
read(1,90)icomm
read(1,*)Level
read(1,90)icomm
read(1,*)dtol
c dtol is not used in the fragmentation
c but in a later program
90 format(a80)
close(unit=1)
return
end
|
Should You Maintain, Restore, or Modify your Classic Car?
The new Lincoln Continental has got to be the best looking classic car that's being brought back. Suicide doors and everything!
Classic Cars stored in forest since WWII. |
```python
#@markdown Configuración Inicial
from IPython.utils import io
from google.colab.data_table import DataTable
from IPython.display import display, display_svg
from IPython.display import Javascript
from IPython.display import Markdown, Latex
from IPython.display import Audio, Image
from IPython.display import IFrame, HTML
with io.capture_output() as capt:
# https://matplotlib-axes-aligner.readthedocs.io/en/latest/
!pip install mpl-axes-aligner
!pip install gradio
import gradio as gr
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import patches
from mpl_toolkits.mplot3d import Axes3D
# https://matplotlib-axes-aligner.readthedocs.io/en/latest/
from mpl_axes_aligner import align
import random
from scipy import constants as const
from scipy import stats as st
from sympy import Point, Polygon
# Avoids scroll-in-the-scroll in the entire Notebook
# https://stackoverflow.com/a/66891328
def resize_colab_cell():
display(Javascript(
'google.colab.output.setIframeHeight(0, true, {maxHeight: 5000})'
))
get_ipython().events.register('pre_run_cell', resize_colab_cell)
def dLatex(self):
return display(Latex(self))
def dMarkdown(self):
return display(Markdown(self))
```
```python
G_dict = const.physical_constants['Newtonian constant of gravitation']
dMarkdown(f'G: {G_dict}')
def sphereGrav(x, z, R, rho):
A = 4*np.pi*const.G*rho*R**3
xz = (x/z)**2+1
B = 3*z**2*xz**(3/2)
g = A/B
if np.isscalar(x):
return g
else:
return np.c_[x, g]
xx = np.r_[-100:100:501j]
xe = 23
p_0 = {
'z': -20, #(m)
'R': 8, #(m)
'rho': 1000 #(kg/m^3)
}
p_1 = {
'z': -45, #(m)
'R': 12, #(m)
'rho': 600 #(kg/m^3)
}
columns = ['x','z','R','rho','g']
df = pd.DataFrame([p_0,p_1], columns=columns[:-1])
df['x'] = xe
display(df)
phi, theta = np.mgrid[0:1*np.pi:100j,
0:2*np.pi:100j]
sp = lambda R: R*np.array([np.sin(phi)*np.cos(theta),
np.sin(phi)*np.sin(theta),
np.cos(phi)
])
xyz = lambda x, z, R: [sum(_) for _ in zip(sp(R), (x, 0, z))]
fig = plt.figure(figsize=plt.figaspect(1))
ax = Axes3D(fig)
ax.plot_surface(*xyz(x=0, z=p_0['z'], R=p_0['R']),
color='tab:cyan', lw=0, alpha=2/3)
ax.plot_surface(*xyz(x=0, z=p_1['z'], R=p_1['R']),
color='tab:blue', lw=0, alpha=2/3)
ax.set_xlim(-30, +30)
ax.set_ylim(-30, +30)
ax.set_zlim(-60, 0)
ax.set_xlabel('x (m)')
ax.set_ylabel('y (m)')
ax.set_zlabel('z (m)')
plt.show()
```
```python
p_0b = p_0.copy()
p_0b['x'] = xe
S_0 = sphereGrav(**p_0b)
p_0b['g'] = S_0
table_0 = pd.DataFrame(p_0b,
columns=columns,
index=[0])
display(table_0)
fig, ax_z = plt.subplots()
ax_g = ax_z.twinx()
ax_z.axhline(c='k', lw=1, zorder=0)
ax_g.axvline(100, c='k', lw=1)
p = ax_g.plot(*sphereGrav(xx, **p_0).T)
c = p[-1].get_color()
circle_0 = plt.Circle((0, p_0['z']), p_0['R'],
fill=True, color=c)
ax_z.add_artist(circle_0)
ax_z.set_aspect(1)
ax_g.set_yticks(np.r_[0:4e-7:6j])
ax_z.set_yticks(np.r_[-60:0:7j])
ax_z.set_xlim(-100,100)
ax_z.set_xticks(np.r_[-92:92:9j])
align.yaxes(ax_g, 0, ax_z, 0, 1/2)
ax_z.set_xlabel('x (m)')
ax_z.set_ylabel('z (m)', y=1/4)
ax_g.set_ylabel('g (mGal)', y=3/4)
plt.show()
```
```python
p_1b = p_1.copy()
p_1b['x'] = xe
S_1 = sphereGrav(**p_1b)
p_1b['g'] = S_1
table_1 = pd.DataFrame(p_1b,
columns=columns,
index=[1])
display(table_1)
fig, ax_z = plt.subplots()
ax_g = ax_z.twinx()
ax_z.axhline(c='k', lw=1, zorder=0)
ax_g.axvline(xe, c='k', lw=1)
p = ax_g.plot(*sphereGrav(xx, **p_1).T)
c = p[-1].get_color()
circle_1 = plt.Circle((0, p_1['z']), p_1['R'],
fill=True, color=c)
ax_z.add_artist(circle_1)
ax_z.set_aspect(1)
ax_g.set_yticks(np.r_[0:4e-7:6j])
ax_z.set_yticks(np.r_[-60:0:7j])
ax_z.set_xlim(-100,100)
ax_z.set_xticks(np.r_[-92:92:9j])
align.yaxes(ax_g, 0, ax_z, 0, 1/2)
ax_z.set_xlabel('x (m)')
ax_z.set_ylabel('z (m)', y=1/4)
ax_g.set_ylabel('g (mGal)', y=3/4)
plt.show()
```
```python
table = pd.concat([table_0, table_1])
display(table)
err = abs(S_1-S_0)/(S_1+S_0)*2
dMarkdown(f'err={err:.6g}')
fig, ax_z = plt.subplots()
ax_g = ax_z.twinx()
ax_z.axhline(c='k', lw=1, zorder=0)
ax_g.axvline(xe, c='k', lw=1)
for p_i in (p_0, p_1):
p = ax_g.plot(*sphereGrav(xx, **p_i).T)
c = p[-1].get_color()
circle_i = plt.Circle((0, p_i['z']), p_i['R'],
fill=True, color=c)
ax_z.add_artist(circle_i)
ax_z.set_aspect(1)
ax_g.set_yticks(np.r_[0:4e-7:6j])
ax_z.set_yticks(np.r_[-60:0:7j])
ax_z.set_xlim(-100,100)
ax_z.set_xticks(np.r_[-92:92:9j])
align.yaxes(ax_g, 0, ax_z, 0, 1/2)
ax_z.set_xlabel('x (m)')
ax_z.set_ylabel('z (m)', y=1/4)
ax_g.set_ylabel('g (mGal)', y=3/4)
plt.show()
xxb = np.r_[22.5:23.5:11j]
fig, ax = plt.subplots()
ax.plot(*sphereGrav(xxb, **p_0).T)
ax.plot(*sphereGrav(xxb, **p_1).T)
ax.axvline(xe, c='k', lw=1)
ax.set_xlabel('x (m)')
ax.set_ylabel('g (mGal)')
plt.show()
```
```python
def fun_sphere(p_a, p_b):
fig, ax_z = plt.subplots()
ax_g = ax_z.twinx()
ax_z.axhline(c='k', lw=1, zorder=0)
for p_i in (p_a, p_b):
p = ax_g.plot(*sphereGrav(xx, **p_i).T)
c = p[-1].get_color()
circle_i = plt.Circle((0, p_i['z']), p_i['R'],
fill=True, color=c, alpha=1/2)
ax_z.add_artist(circle_i)
ax_z.set_aspect(1)
ax_g.set_yticks(np.r_[0:4e-7:6j])
ax_z.set_yticks(np.r_[-60:0:7j])
ax_z.set_xlim(-100,100)
ax_z.set_xticks(np.r_[-92:92:9j])
align.yaxes(ax_g, 0, ax_z, 0, 1/2)
ax_z.set_xlabel('x (m)')
ax_z.set_ylabel('z (m)', y=1/4)
ax_g.set_ylabel('g (mGal)', y=3/4)
fig.tight_layout(pad=0)
plt.close()
return fig
fun_sphere(p_0, p_1)
```
```python
my_fun = lambda z, R, rho: fun_sphere(
p_0.copy(),
{'z': z, 'R': R, 'rho': rho})
print(p_1)
my_fun(**p_1)
```
```python
iface = gr.Interface(
fn=my_fun,
inputs=[gr.inputs.Slider(-60, -10 , .1, default=p_0['z']),
gr.inputs.Slider(5, 15 , .1, default=p_0['R']),
gr.inputs.Slider(0, 2500 , 10, default=p_0['rho'])],
outputs='plot',
live=True,
allow_flagging=False,
allow_screenshot=False,
# title='Gravedad Teorica',
# description='Valores Teoricos de Gravedad',
# article = """<p style='text-align: center'>
# <a href='https://en.wikipedia.org/wiki/Theoretical_gravity'>
# Wikipedia | Theoretical Gravity</a></p>""",
examples=[list(p_i.values()) for p_i in (p_1, p_0)],
theme='huggingface', # "default", "compact" or "huggingface"
layout='unaligned' # 'horizontal', 'unaligned', 'vertical'
)
with io.capture_output() as captured:
iface.launch(inline=True)
print(iface.share_url)
IFrame(src=iface.share_url, width=1200, height=1000)
```
<IPython.core.display.Javascript object>
https://47231.gradio.app
```python
def Talwani(Model, XZ, rho=600):
k = len(Model)
lenXZ = len(XZ)
xietalist = [Model-XZ[i] for i in range(lenXZ)]
lenxieta = len(xietalist)
grav = np.empty(lenxieta)
for j in range(lenxieta):
xi = xietalist[j].T[0]
eta = xietalist[j].T[1]
sum = 0
for i in range(k):
A = (xi[i-1]*eta[i] - xi[i]*eta[i-1])/\
((xi[i]-xi[i-1])**2 + (eta[i]-eta[i-1])**2)
B1 = 0.5*(eta[i] - eta[i-1])*\
np.log((xi[i]**2 + eta[i]**2)/\
(xi[i-1]**2 + eta[i-1]**2))
B2 = (xi[i] - xi[i-1])*\
(np.arctan(xi[i]/eta[i])-\
np.arctan(xi[i-1]/eta[i-1]))
sum += A*(B1+B2)
grav[j] = sum
grav = 1e6*2*const.G*rho*grav
return np.c_[XZ[:,0], grav]
def draw_eye(s, q=(1,1), d=(0,0), N=50):
m = N//2
n = (N-m)
q = np.array(q)/2
gauss = st.norm(0, 1/s)
f = lambda x: (gauss.pdf(x)-gauss.pdf(1))/\
(gauss.pdf(0)-gauss.pdf(1))
ii = np.r_[-1:+1:(n+1)*1j]
jj = np.r_[+1:-1:(m+1)*1j]
top = np.c_[ii, +f(ii)][:-1]
bottom = np.c_[jj, -f(jj)][:-1]
eye = q*np.r_[top, bottom] + d
return eye
XZ = np.c_[-90:90:101j, 0:0:101j]
cosas = np.c_[1:5.7:5j,
40:10:5j,
10:40:5j,
-10:-70:5j,
600:1500:5j
]
fig, ax_z = plt.subplots()
ax_g = ax_z.twinx()
ax_z.axhline(c='k', lw=1, zorder=0)
for w, qx, qz, dz, rho in cosas[:3]:
eye = draw_eye(w, (qx, qz), (0, dz), 100)
grav = Talwani(eye, XZ, rho)
p = ax_g.plot(*grav.T)
c = p[-1].get_color()
eye_poly = patches.Polygon(
eye,
Fill = True,
color=c)
ax_z.add_patch(eye_poly)
ax_z.set_aspect(1)
ax_g.set_yticks(np.r_[0:1.6:6j])
ax_z.set_yticks(np.r_[-60:0:7j])
ax_z.set_xlim(-90,90)
ax_z.set_ylim(-100,0)
ax_z.set_xticks(np.r_[-90:90:9j])
align.yaxes(ax_g, 0, ax_z, 0, 1/2)
ax_z.set_xlabel('x (m)')
ax_z.set_ylabel('z (m)', y=1/4)
ax_g.set_ylabel('g (mGal)', y=3/4)
plt.show()
```
|
Require Import Nijn.Prelude.Checks.
Require Import Nijn.Prelude.Basics.Decidable.
Require Import Nijn.Prelude.Relations.WellfoundedRelation.
Declare Scope qr.
Open Scope qr.
Delimit Scope qr with qr.
(** * Quasi-orders *)
Record QuasiRel :=
{
carrier_qr :> Type ;
ge_qr : carrier_qr -> carrier_qr -> Prop
}.
Arguments ge_qr {_} _ _.
Notation "x >= y" := (ge_qr x y) : qr.
Class isQuasiRel ( X : QuasiRel ) :=
{
ge_qr_refl : forall (x : X),
x >= x;
ge_qr_trans : forall { x y z : X },
x >= y -> y >= z -> x >= z
}.
Close Scope qr.
(** * Compatible relations *)
Declare Scope compat.
Open Scope compat.
Delimit Scope compat with compat.
(** A compatible relation is a type equipped with two relations *)
Record CompatRel :=
{
carrier :> Type ;
gt : carrier -> carrier -> Prop ;
ge : carrier -> carrier -> Prop
}.
Arguments gt {_} _ _.
Arguments ge {_} _ _.
Notation "x > y" := (gt x y) : compat.
Notation "x >= y" := (ge x y) : compat.
(** These are the axioms that should hold for compatible relations. Note that we do not require the relations to be well-founded, but that condition is formulated separately. *)
Class isCompatRel (X : CompatRel) :=
{
gt_trans : forall {x y z : carrier X},
x > y -> y > z -> x > z ;
ge_trans : forall {x y z : X},
x >= y -> y >= z -> x >= z ;
ge_refl : forall (x : X),
x >= x ;
compat : forall {x y : X},
x > y -> x >= y ;
ge_gt : forall {x y z : X},
x >= y -> y > z -> x > z ;
gt_ge : forall {x y z : X},
x > y -> y >= z -> x > z
}.
(** ** Lemmata for compatible relations *)
Proposition eq_gt
{X : CompatRel}
{x y z : X}
(p : x = y)
(q : y > z)
: x > z.
Proof.
induction p.
exact q.
Qed.
Proposition gt_eq
{X : CompatRel}
{x y z : X}
(p : x > y)
(q : y = z)
: x > z.
Proof.
induction q.
exact p.
Qed.
Proposition eq_ge
{X : CompatRel}
{x y z : X}
(p : x = y)
(q : y >= z)
: x >= z.
Proof.
induction p.
exact q.
Qed.
Proposition ge_eq
{X : CompatRel}
{x y z : X}
(p : x >= y)
(q : y = z)
: x >= z.
Proof.
induction q.
exact p.
Qed.
Proposition eq_to_ge
{X : CompatRel}
`{isCompatRel X}
{x y : X}
(p : x = y)
: x >= y.
Proof.
induction p.
apply ge_refl.
Qed.
(** ** Minimal elements in a compatible relation *)
Definition is_minimal_element
{X : CompatRel}
(x : X)
: Prop
:= forall (y : X), y >= x.
Record minimal_element (X : CompatRel) :=
make_min_el
{
min_el :> X ;
is_minimal : is_minimal_element min_el
}.
Definition is_strict_minimal_element
{X : CompatRel}
(x : X)
: Prop
:= forall (y : X), y <> x -> y > x.
Record strict_minimal_element (X : CompatRel) :=
make_strict_min_el
{
strict_min_el :> X ;
is_strict_minimal : is_strict_minimal_element strict_min_el
}.
Definition is_minimal_to_strict_minimal
{X : CompatRel}
`{decEq X}
`{isCompatRel X}
(x : strict_minimal_element X)
: minimal_element X.
Proof.
simple refine (make_min_el _ _ _).
- exact x.
- intros y.
destruct (dec_eq y x) as [ p | p ].
+ apply eq_to_ge.
exact p.
+ exact (compat (is_strict_minimal _ x y p)).
Defined.
|
\nocite{*}
\begin{appendix}
\section{Mathematical developments}
\subsection{Random positioning optimization}
This section presents the methods used for the random positioning of points that must respect certain properties.
A first approach would be to hide the points not respecting the required conditions.
It is more efficient to draw only points having already the required properties.
\subsubsection{Continuous uniform distribution in a circle}\label{sec:circle-random-position}
A polar coordinate system \( (\varphi, r) \) is used in a disk of radius \( R_0 \) in which random positions are to be drawn.
The probability density of presence of a point in the disk is \( 1 / \left( \pi {R_0}^2 \right) \).
At each random draw, the probability to find the dislocation in the surface element \( r dr d\varphi\) is then:
\begin{equation}
\frac{r dr d\varphi}{\pi {R_0}^2} =
\frac{2 r}{{R_0}^2} dr \frac{1}{2 \pi} d\varphi
\end{equation}
\bigskip
Let \( \phi \) and \( R \) be two random variables such that \( \phi(\omega) \in [0, 2\pi[ \) and \( R(\omega) \in [0, R_0] \). Let \( f_\phi \) and \( f_R \) be the probability density functions of \( \phi \) and \( R \), respectively.
\begin{equation}
f_\phi(\varphi) =
\frac{1}{2 \pi}
\end{equation}
\begin{equation}
f_R(r) =
\frac{2r}{{R_0}^2}
\end{equation}
\medskip
Let \( F_\phi \) and \( F_R \) be the cumulative distribution functions of \( \phi \) and \( R \), respectively.
\begin{equation}
F_\phi(\varphi) =
\mathbb{P}(\phi \leq \varphi) =
\int_0^\varphi f_\phi(t) dt = \frac{\varphi }{2 \pi}
\end{equation}
\begin{equation}
F_R(r) =
\mathbb{P}(R \leq r) =
\int_0^r f_R(t) dt = \left( \frac{r}{R_0} \right)^2
\end{equation}
\medskip
Let \( X_\phi \) and \( X_R \) be two new random variables defined using \( \phi \) and \( R \) such that \( X_\phi \) and \( X_R \) follow a uniform law \( U(0, 1) \).
\begin{equation}
X_\phi =
\frac{\phi}{2 \pi}
\end{equation}
\begin{equation}
X_R =
\left( \frac{R}{R_0} \right)^2
\end{equation}
\medskip
Since \( X_\phi \sim U(0, 1) \) and \( X_R \sim U(0, 1) \), the random position \( (\varphi, r)\) of a point on the disk can be choosen with the sampling \( ( x_\phi, x_R ) \) of two uniformly distributed random variables \( X_\phi \) and \( X_R \):
\begin{equation}\label{eq:circle-random-position-theta}
\varphi =
2 \pi x_\phi
\end{equation}
\begin{equation}\label{eq:circle-random-position-r}
r =
R_0 \sqrt{x_R}
\end{equation}
\subsubsection{Drawing of points in cell walls}\label{sec:cell-random-position}
\bigfig{fig:cell-random-position}{insert/positions}{walls}{Division of the surface occupied by the walls}
To randomly select points in the cell borders, 4 virtual rectangles \( A, B, C, D \) are created in each cell. If the probability of presence in the cell is given by \( P = \gls{area}_{\text{cell}} / \gls{area}_{\gls{roi}} \), then the probability of presence in the rectangles is given by \( P_A = P_B = P_C = P_D = P / 4 \).
The position in the selected rectangle is then chosen by applying a uniform continuous law.
\subsection{Overlapping}\label{sec:overlapping}
\subsubsection{Circular segment area}
For the area shaded in \figref{fig:overlapping:circular-segment} the formula is obtained by calculating the angle between d and r.
\begin{minipage}{0.5\linewidth}
\bigfig{fig:overlapping:circular-segment}{insert/overlapping}{circular_segment}{Circular segment area}
\end{minipage}%
\begin{minipage}{0.5\linewidth}
\begin{equation}\label{eq:overlapping:circular-segment}
\cirseg(r, d) =
r^2 \arccos\left( \frac{d}{r} \right) - d \sqrt{r^2 - d^2}
\end{equation}
\end{minipage}%
\subsubsection{Intersection area of two circles}\label{sec:circle-circle-intersection}
The area of intersection of two circles of radius \( r \) and \( R \) whose centers are spaced by a distance \( d \) as shown in \figref{fig:circle-circle-intersection} can be calculated using equation \eqref{eq:overlapping:circular-segment}. The expression is given by the function \( \circir (r, R, d) \) in equation \eqref{eq:circle-circle-intersection}.
\begin{align}
\gls{area}_0 (r, R, d) & =
\textstyle
\cirseg \left(r, \frac{d^2 + r^2 - R^2}{2 d} \right) + \cirseg \left( R, \frac{d^2 + R^2 - r^2}{2 d} \right)
\\[1mm]
\gls{area}_0 (r, R, d) & =
\textstyle
r^2 \arccos \left( \frac{d^2 + r^2 - R^2}{2 d r} \right)
+ R^2 \arccos \left( \frac{d^2 + R^2 - r^2}{2 d R} \right)
- \frac{\sqrt{(-d+r+R)(d+r-R)(d-r+R)(d+r+R)}}{2}
\end{align}
\begin{minipage}{0.5\linewidth}
\bigfig{fig:circle-circle-intersection}{insert/overlapping}{circle_circle}{Intersection of two circles}
\end{minipage}%
\begin{minipage}{0.5\linewidth}
\begin{equation}\label{eq:circle-circle-intersection}
\circir (r, R, d) =
\begin{cases}
0 & \text{for } r + R < d \\[2mm]
\pi r^2 & \text{for } d + r < R \\[2mm]
\pi R^2 & \text{for } d + R < r \\[2mm]
\gls{area}_0 (r, R, d) & \text{else}
\end{cases}
\end{equation}
\end{minipage}%
\subsubsection{Intersection area of a circle and a square}\label{sec:circle-square-intersection}
\bigfig{fig:overlapping:circle-square-examples}{insert/overlapping}{circle_square}{Some possibilities of overlapping of a square and a circle}
In order to calculate the area \cirsqr \ of intersection between a circle of radius \( r \) and a square of side \( S \) one would have to distinguish many cases of the circle overtaking the right, left and bottom of the square.
To simplify the distinction of cases the study is limited to one quadrant of the circle in a first time.
The area of the quadrant outside the square is calculated and the contribution of each quadrant is summed and subtracted from the area of the circle to obtain the total area of intersection.
\figref{fig:overlapping:circle-square-quadrants} show the different considered possibilities in a circle quadrant.
\bigfig{fig:overlapping:circle-square-quadrants}{insert/overlapping}{quadrants}{Possibilities illustrated in the upper right circle quadrant}
\bigskip
The occurrence of the cases presented in \figref{fig:overlapping:circle-square-quadrants} is formally defined as a function of \( d_1 \), \( d_2 \) and \( r \) in equations \eqref{eq:quadrant-case-1}, \eqref{eq:quadrant-case-2} and \eqref{eq:quadrant-case-3}.
\begin{align}
C1 &\Longleftrightarrow \left( \neg C3 \right) \wedge \left( d_1 < r \right) \label{eq:quadrant-case-1} \\
C2 &\Longleftrightarrow \left( \neg C3 \right) \wedge \left( d_2 < r \right) \label{eq:quadrant-case-2} \\
C3 &\Longleftrightarrow \left( d_1^2 + d_2^2 < r^2 \right) \label{eq:quadrant-case-3}
\end{align}
\medskip
The areas grayed in \figref{fig:overlapping:circle-square-quadrants} are expressed in \eqref{eq:overlapping:area-12} and \eqref{eq:overlapping:area-3} using \( \cirseg(r, d) \) defined in equation \eqref{eq:overlapping:circular-segment}.
\begin{align}
\gls{area}_{1 \lor 2} (r, d) &= \frac{\cirseg(r, d)}{2} \label{eq:overlapping:area-12} \ \text{for } C1 \text{ or } C2 \\
\gls{area}_3 (r, d_1, d_2) &= \frac{\pi r^2}{4} - d_1 d_2 \label{eq:overlapping:area-3} \ \text{for } C3
\end{align}
\medskip
The area \( \gls{area}_Q (r, d_1, d_2) \) outside a quadrant is then given in equation \eqref{eq:outside-quadrant}.
\begin{equation}\label{eq:outside-quadrant}
\gls{area}_Q (r, d_1, d_2) =
\gls{indfun}_{C1} \gls{area}_{1 \lor 2} (r, d_1) +
\gls{indfun}_{C2} \gls{area}_{1 \lor 2} (r, d_2) +
\gls{indfun}_{C3} \gls{area}_{3} (r, d_1, d_2)
\end{equation}
\medskip
Finally, to obtain the area \( \cirsqr (r, S, x, y) \) of intersection between a circle of radius \( r \) centered in \( (x, y) \) and a square of side \( S \), the contribution of each quadrant is taken into account in equation \eqref{eq:circle-square-intersection}.
\begin{equation}\label{eq:circle-square-intersection}
\cirsqr (r, S, x, y) =
\pi r^2
- \gls{area}_Q (r, x, y)
- \gls{area}_Q (r, S - x, y)
- \gls{area}_Q (r, x, S - y)
- \gls{area}_Q (r, S - x, S - y)
\end{equation}
\subsubsection{Average intersection area of two circles}
The expected value of intersection of two circles of radius \( r \) and \( R \) whose centers are spaced by a random distance \( D \in [0, R] \) is given by the function \( \gls{expval} \left( \circir (r, R, D) \right) \) defined in equation \eqref{eq:mean-circle-circle-intersection}.
For a uniform distribution of points, \( \textstyle D = R \sqrt{X} \) with \( X \) following a uniform law \( U(0, 1) \) as explained in \ref{sec:circle-random-position}.
\begin{equation}\label{eq:mean-circle-circle-intersection}
\gls{expval} \left( \circir(r, R, D) \right) =
\int_{x=0}^1 \circir (r, R, R \sqrt{x}) dx
\end{equation}
\bigfig{fig:mean-circle-circle-intersection}{insert/overlapping}{mean_circle_circle}{Comparison of the analytical formulation \eqref{eq:mean-circle-circle-intersection} of \( \gls{expval} \left( \circir \right) \) with a simulation}
\subsubsection{Average intersection area of a circle and a square}
Let \( S \) be the side of a square and \( r \) the circle of a radius.
Let \( X \) and \( Y \) be two random variables following a uniform law \( U(0, S) \).
Here it is sought the analytical expression of \( \gls{expval} \left( \cirsqr (r, S, X, Y) \right) \).
\figref{fig:overlapping:application-areas} show the areas of occurrence of the cases presented in \figref{fig:overlapping:circle-square-quadrants}.
When the center \( ( X, Y ) \) of the circle is choosen in a gray area of \figref{fig:overlapping:application-areas}, then we calculate the corresponding air in \figref{fig:overlapping:circle-square-quadrants}.
The probability that the center of the circle is chosen in a gray area is proportional to the area of the area since the law is uniform.
But the air outside the square depends on the position.
It will therefore be necessary to integrate these areas over the positions of occurrence.
\bigfig{fig:overlapping:application-areas}{insert/overlapping}{application_areas}{Case application areas for various radius values}
\medskip
It is defined limits of integration from \figref{fig:overlapping:application-areas}.
\begin{align}
\varphi_1 &=
\arccos \left( \frac{S}{\max(r, S)} \right) \\
\varphi_2 &=
\arctan \left( \frac{S}{\min(r, S)} \right) \\
\varphi_3 &=
\arcsin \left( \frac{S}{\max(r, S)} \right) \\
x_1 (\varphi) &=
\frac{\min(r, S)}{\cos{\varphi}} \\
x_2 (\varphi) &=
\frac{S}{\sin{\varphi}}
\end{align}
\medskip
The contributions for one quadrant of the different cases can then be expressed.
\begin{align}
\gls{expval} \left( \gls{area}_{1 \lor 2} \right) &=
\frac{1}{S^2} \left(
\int_{\varphi = \varphi_1}^{\varphi_2}
\int_{x=r}^{x_1(\varphi)}
\gls{area}_{1 \lor 2} (r, x \cos \left( \varphi) \right)
x dx d\varphi
+
\int_{\varphi = \varphi_2}^{\varphi_3}
\int_{x=r}^{x_2(\varphi)}
\gls{area}_{1 \lor 2} (r, x \cos \left( \varphi) \right)
x dx d\varphi
\right)
\\
\gls{expval} \left( \gls{area}_{3} \right) &=
\frac{1}{S^2} \left(
\int_{\varphi = \varphi_1}^{\varphi_3}
\int_{x=0}^{r}
\gls{area}_{3} \left( r, x \sin(\varphi), x \cos(\varphi) \right)
x dx d\varphi
+
2 \int_{y = 0}^{S}
\int_{x=0}^{\tan(\varphi_1)y}
\gls{area}_{3} \left( r, x, y \right)
dx dy
\right)
\end{align}
\medskip
Finally, for the mean intersection area, the contribution of each quadrant is taken into account.
\begin{equation}\label{eq:mean-circle-square-intersection}
\gls{expval} \left( \cirsqr (r, S, X, Y) \right) =
\begin{cases}
0 &\text{if } r = 0
\\
\pi r^2 - 4 \left(2 \gls{expval} \left( \gls{area}_{1 \lor 2} \right) +\gls{expval} \left( \gls{area}_{3} \right)\right) &\text{if } r \in \ ] 0, \sqrt{2} S [
\\
S^2 &\text{if } r \geq \sqrt{2} S
\end{cases}
\end{equation}
\bigfig{fig:mean-circle-square-intersection}{insert/overlapping}{mean_circle_square}{Comparison of the analytical formulation \eqref{eq:mean-circle-square-intersection} of \( \gls{expval} \left( \cirsqr \right) \) with a simulation}
\subsubsection{Conjecture}
The behavior of the expected value of overlapping area becomes simple when it is integrated over a radius of neighborhood \( r \in [r_0, +\infty[\).
It seems that for at least some functions \( f \) we have the relation given in equations \eqref{eq-conjecture-circle} and \eqref{eq-conjecture-square}.
\( \mathcal{D}_{\symcirclecircle f} \) and \( \mathcal{D}_{\symcirclesquare f} \) are multiplicative constants of the size of the region of interest.
They introduce an equivalent effective size of the latter.
\begin{align}
\int_{r=r_0}^{+\infty} f(r) \frac{d \gls{expval} \left( \circir (r, R, D) \right)}{dr} dr
\ \underset{\frac{r_0}{R_{\gls{roi}}} \rightarrow 0}{\sim} \
\int_{r=r_0}^{\mathcal{D}_{\symcirclecircle f} R} f(r) 2 \pi r dr
\label{eq-conjecture-circle} \\[2mm]
\int_{r=r_0}^{+\infty} f(r) \frac{d \gls{expval} \left( \cirsqr (r, S, X, Y) \right)}{dr} dr
\ \underset{\frac{r_0}{R_{\gls{roi}}} \rightarrow 0}{\sim} \
\int_{r=r_0}^{\mathcal{D}_{\symcirclesquare f} S} f(r) 2 \pi r dr
\label{eq-conjecture-square}
\end{align}
\section{Data}\label{sec:data}
The construction and analysis of the data produced for the study was automated.
Everything was done from a same directory called \textit{workspace}.
This repository contains the parameters and the the scripts that will allow you to replicate the results obtained.
The installation of the three packages mentioned above is necessary for the execution of the scripts.
\bigskip
You can find the scripts and documentations here: \github{lpa-workspace}
\bigskip
You can also clone the project in tour local directory with git:
\begin{tcolorbox}[width=\linewidth, title=shell]
\begin{verbatim}
$ cd <the-place-you-want-to-clone-the-repository>
$ git clone https://github.com/DunstanBecht/lpa-workspace.git
\end{verbatim}
\end{tcolorbox}
\bigskip
The following pages provide a summary of the data generated for each distribution studied.
\newpage
\begin{multicols}{2}
\input{load/tex/appendix}
\null
\end{multicols}
\section{Glossary}
\printnoidxglossaries
\newpage
\printbibliography[heading=bibintoc, heading=bibnumbered]
\end{appendix}
|
[STATEMENT]
lemma inline_Done [simp]: "inline (Done x) s = Done (x, s)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. local.inline (Generative_Probabilistic_Value.Done x) s = Generative_Probabilistic_Value.Done (x, s)
[PROOF STEP]
by(rule gpv.expand)(simp add: inline_sel) |
[STATEMENT]
lemma finite_same_card_bij_on_ext_funcset:
assumes "finite A" "finite B" "card A = card B"
shows "\<exists>f. f \<in> A \<rightarrow>\<^sub>E B \<and> bij_betw f A B"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<exists>f. f \<in> A \<rightarrow>\<^sub>E B \<and> bij_betw f A B
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<exists>f. f \<in> A \<rightarrow>\<^sub>E B \<and> bij_betw f A B
[PROOF STEP]
from assms
[PROOF STATE]
proof (chain)
picking this:
finite A
finite B
card A = card B
[PROOF STEP]
obtain f' where f': "bij_betw f' A B"
[PROOF STATE]
proof (prove)
using this:
finite A
finite B
card A = card B
goal (1 subgoal):
1. (\<And>f'. bij_betw f' A B \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using finite_same_card_bij
[PROOF STATE]
proof (prove)
using this:
finite A
finite B
card A = card B
\<lbrakk>finite ?A; finite ?B; card ?A = card ?B\<rbrakk> \<Longrightarrow> \<exists>h. bij_betw h ?A ?B
goal (1 subgoal):
1. (\<And>f'. bij_betw f' A B \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
bij_betw f' A B
goal (1 subgoal):
1. \<exists>f. f \<in> A \<rightarrow>\<^sub>E B \<and> bij_betw f A B
[PROOF STEP]
define f where "\<And>x. f x = (if x \<in> A then f' x else undefined)"
[PROOF STATE]
proof (state)
this:
f ?x = (if ?x \<in> A then f' ?x else undefined)
goal (1 subgoal):
1. \<exists>f. f \<in> A \<rightarrow>\<^sub>E B \<and> bij_betw f A B
[PROOF STEP]
have "f \<in> A \<rightarrow>\<^sub>E B"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. f \<in> A \<rightarrow>\<^sub>E B
[PROOF STEP]
using f'
[PROOF STATE]
proof (prove)
using this:
bij_betw f' A B
goal (1 subgoal):
1. f \<in> A \<rightarrow>\<^sub>E B
[PROOF STEP]
unfolding f_def
[PROOF STATE]
proof (prove)
using this:
bij_betw f' A B
goal (1 subgoal):
1. (\<lambda>x. if x \<in> A then f' x else undefined) \<in> A \<rightarrow>\<^sub>E B
[PROOF STEP]
by (auto simp add: bij_betwE)
[PROOF STATE]
proof (state)
this:
f \<in> A \<rightarrow>\<^sub>E B
goal (1 subgoal):
1. \<exists>f. f \<in> A \<rightarrow>\<^sub>E B \<and> bij_betw f A B
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
f \<in> A \<rightarrow>\<^sub>E B
goal (1 subgoal):
1. \<exists>f. f \<in> A \<rightarrow>\<^sub>E B \<and> bij_betw f A B
[PROOF STEP]
have "bij_betw f A B"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. bij_betw f A B
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. bij_betw f A B
[PROOF STEP]
have "bij_betw f' A B \<longleftrightarrow> bij_betw f A B"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. bij_betw f' A B = bij_betw f A B
[PROOF STEP]
unfolding f_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. bij_betw f' A B = bij_betw (\<lambda>x. if x \<in> A then f' x else undefined) A B
[PROOF STEP]
by (auto intro!: bij_betw_cong)
[PROOF STATE]
proof (state)
this:
bij_betw f' A B = bij_betw f A B
goal (1 subgoal):
1. bij_betw f A B
[PROOF STEP]
from this \<open>bij_betw f' A B\<close>
[PROOF STATE]
proof (chain)
picking this:
bij_betw f' A B = bij_betw f A B
bij_betw f' A B
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
bij_betw f' A B = bij_betw f A B
bij_betw f' A B
goal (1 subgoal):
1. bij_betw f A B
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
bij_betw f A B
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
bij_betw f A B
goal (1 subgoal):
1. \<exists>f. f \<in> A \<rightarrow>\<^sub>E B \<and> bij_betw f A B
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
f \<in> A \<rightarrow>\<^sub>E B
bij_betw f A B
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
f \<in> A \<rightarrow>\<^sub>E B
bij_betw f A B
goal (1 subgoal):
1. \<exists>f. f \<in> A \<rightarrow>\<^sub>E B \<and> bij_betw f A B
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<exists>f. f \<in> A \<rightarrow>\<^sub>E B \<and> bij_betw f A B
goal:
No subgoals!
[PROOF STEP]
qed |
From Test Require Import tactic.
Section FOFProblem.
Variable Universe : Set.
Variable UniverseElement : Universe.
Variable wd_ : Universe -> Universe -> Prop.
Variable col_ : Universe -> Universe -> Universe -> Prop.
Variable col_swap1_1 : (forall A B C : Universe, (col_ A B C -> col_ B A C)).
Variable col_swap2_2 : (forall A B C : Universe, (col_ A B C -> col_ B C A)).
Variable col_triv_3 : (forall A B : Universe, col_ A B B).
Variable wd_swap_4 : (forall A B : Universe, (wd_ A B -> wd_ B A)).
Variable col_trans_5 : (forall P Q A B C : Universe, ((wd_ P Q /\ (col_ P Q A /\ (col_ P Q B /\ col_ P Q C))) -> col_ A B C)).
Theorem pipo_6 : (forall A B C D X P : Universe, ((wd_ C D /\ (wd_ A B /\ (wd_ A X /\ (wd_ P A /\ (wd_ P B /\ (col_ A B X /\ (col_ C D X /\ (col_ P C D /\ (col_ A C D /\ col_ A P X))))))))) -> col_ P A B)).
Proof.
time tac.
Qed.
End FOFProblem.
|
classdef StiffnessTensor2VoigtConverterPS < FourthOrderTensor2VoigtConverterPS
properties
end
methods (Access = public)
function obj = StiffnessTensor2VoigtConverterPS(tensor)
obj.computeConversion(tensor)
end
end
methods (Access = protected)
function selectVoigtTensorClass(obj)
obj.voigtTensor = StiffnessPlaneStressVoigtTensor();
end
end
methods (Access = protected,Static)
function f = getVoigtFactor(iv,jv)
f = 1;
end
end
end
|
(** ** Sub(pre)categories
Authors: Benedikt Ahrens, Chris Kapulkin, Mike Shulman (January 2013)
Reorganized and expanded: Langston Barrett (@siddharthist) (March 2018)
*)
(** ** Contents :
- Subprecategories
- A sub-precategory forms a precategory ([carrier_of_sub_precategory])
- (Inclusion) functor from a sub-precategory to the ambient precategory
([sub_precategory_inclusion])
- Subcategories ([subcategory])
- Restriction of a functor to a subcategory
*)
Require Import UniMath.Foundations.Sets.
Require Import UniMath.MoreFoundations.PartA.
Require Import UniMath.CategoryTheory.Core.Categories.
Require Import UniMath.CategoryTheory.Core.Functors.
Require Import UniMath.CategoryTheory.Core.Isos.
Local Open Scope cat.
(** ** Definitions *)
(** A sub-precategory is specified through a predicate on objects
and a dependent predicate on morphisms
which is compatible with identity and composition
*)
Definition is_sub_precategory {C : category}
(C' : hsubtype C)
(Cmor' : ∏ a b : C, hsubtype (a --> b)) :=
(∏ a : C, C' a -> Cmor' _ _ (identity a)) ×
(∏ (a b c : C) (f: a --> b) (g : b --> c),
Cmor' _ _ f -> Cmor' _ _ g -> Cmor' _ _ (f · g)).
Definition sub_precategories (C : category) :=
total2 (fun C' : (hsubtype (ob C)) × (∏ a b:ob C, hsubtype (a --> b)) =>
is_sub_precategory (pr1 C') (pr2 C')).
(** We have a coercion [carrier] turning every predicate [P] on a type [A] into the
total space [ { a : A & P a} ].
For later, we define some projections with the appropriate type, also to
avoid confusion with the aforementioned coercion.
*)
Definition sub_precategory_predicate_objects {C : category}
(C': sub_precategories C):
hsubtype (ob C) := pr1 (pr1 C').
Definition sub_ob {C : category}(C': sub_precategories C): UU :=
(*carrier*) (sub_precategory_predicate_objects C').
Definition sub_precategory_predicate_morphisms {C : category}
(C':sub_precategories C) (a b : C) : hsubtype (a --> b) := pr2 (pr1 C') a b.
Definition sub_precategory_morphisms {C : category}(C':sub_precategories C)
(a b : C) : UU := sub_precategory_predicate_morphisms C' a b.
(** Projections for compatibility of the predicate with identity and
composition.
*)
Definition sub_precategory_id (C : category) (C':sub_precategories C) :
∏ a : ob C,
sub_precategory_predicate_objects C' a ->
sub_precategory_predicate_morphisms C' _ _ (identity a) :=
dirprod_pr1 (pr2 C').
Definition sub_precategory_comp (C : category) (C':sub_precategories C) :
∏ (a b c: ob C) (f: a --> b) (g : b --> c),
sub_precategory_predicate_morphisms C' _ _ f ->
sub_precategory_predicate_morphisms C' _ _ g ->
sub_precategory_predicate_morphisms C' _ _ (f · g) :=
dirprod_pr2 (pr2 C').
(** An object of a subprecategory is an object of the original precategory. *)
Definition precategory_object_from_sub_precategory_object (C:category)
(C':sub_precategories C) (a : sub_ob C') :
ob C := pr1 a.
Coercion precategory_object_from_sub_precategory_object :
sub_ob >-> ob.
(** A morphism of a subprecategory is also a morphism of the original precategory. *)
Definition precategory_morphism_from_sub_precategory_morphism (C:category)
(C':sub_precategories C) (a b : ob C)
(f : sub_precategory_morphisms C' a b) : a --> b := pr1 f .
Coercion precategory_morphism_from_sub_precategory_morphism :
sub_precategory_morphisms >-> precategory_morphisms.
(** *** A sub-precategory forms a precategory. *)
Definition sub_precategory_ob_mor (C : category)(C':sub_precategories C) :
precategory_ob_mor.
Proof.
exists (sub_ob C').
exact (λ a b, @sub_precategory_morphisms _ C' a b).
Defined.
(*
Coercion sub_precategory_ob_mor : sub_precategories >-> precategory_ob_mor.
*)
Definition sub_precategory_data (C : category)(C':sub_precategories C) :
precategory_data.
Proof.
exists (sub_precategory_ob_mor C C').
split.
intro c.
exists (identity (C:=C) (pr1 c)).
apply sub_precategory_id.
apply (pr2 c).
intros a b c f g.
exists (compose (pr1 f) (pr1 g)).
apply sub_precategory_comp.
apply (pr2 f). apply (pr2 g).
Defined.
(** A useful lemma for equality in the sub-precategory. *)
Lemma eq_in_sub_precategory (C : category)(C':sub_precategories C)
(a b : sub_ob C') (f g : sub_precategory_morphisms C' a b) :
pr1 f = pr1 g -> f = g.
Proof.
intro H.
apply (total2_paths_f H).
apply proofirrelevance.
apply pr2.
Qed.
(*
Lemma eq_in_sub_precategory2 (C : precategory)(C':sub_precategories C)
(a b : sub_ob C') (f g : a --> b)
(pf : sub_precategory_predicate_morphisms C' _ _ f)
(pg : sub_precategory_predicate_morphisms C' _ _ g):
f = g -> (tpair (λ f, sub_precategory_predicate_morphisms _ _ _ f) f pf) =
(tpair (λ f, sub_precategory_predicate_morphisms _ _ _ f) g pg).
Proof.
intro H.
apply (two_arg_paths_f H).
destruct H.
apply (two_arg_paths_f (idpath _ )).
*)
Definition is_precategory_sub_precategory (C : category)(C':sub_precategories C) :
is_precategory (sub_precategory_data C C').
Proof.
repeat split;
simpl; intros.
unfold sub_precategory_comp;
apply eq_in_sub_precategory; simpl;
apply id_left.
apply eq_in_sub_precategory. simpl.
apply id_right.
apply eq_in_sub_precategory.
cbn.
apply assoc.
apply eq_in_sub_precategory.
cbn.
apply assoc'.
Defined.
Definition carrier_of_sub_precategory (C : category)(C':sub_precategories C) :
precategory := tpair _ _ (is_precategory_sub_precategory C C').
Definition has_homsets_carrier_of_subcategory (C : category) (C' : sub_precategories C)
: has_homsets (carrier_of_sub_precategory C C').
Proof.
intros a b.
cbn.
apply (isofhleveltotal2 2).
- apply C.
- intro f.
apply hlevelntosn.
apply propproperty.
Qed.
Definition carrier_of_sub_category (C : category) (C' : sub_precategories C)
: category
:= make_category _ (has_homsets_carrier_of_subcategory C C').
Coercion carrier_of_sub_category : sub_precategories >-> category.
(** An object satisfying the predicate is an object of the subprecategory *)
Definition precategory_object_in_subcat {C : category} {C':sub_precategories C}
(a : ob C) (p : sub_precategory_predicate_objects C' a) :
ob C' := tpair _ a p.
(** A morphism satisfying the predicate is a morphism of the subprecategory *)
Definition precategory_morphisms_in_subcat {C : category} {C':sub_precategories C}
{a b : ob C'}(f : pr1 a --> pr1 b)
(p : sub_precategory_predicate_morphisms C' (pr1 a) (pr1 b) (f)) :
precategory_morphisms (C:=C') a b := tpair _ f p.
(** A (z-)isomorphism of a subprecategory is also a (z-)isomorphism of the original precategory. *)
Lemma is_z_iso_from_is_z_iso_in_subcategory (C:category) (C':sub_precategories C)
(a b : C') (f : C'⟦ a , b ⟧)
(H: is_z_isomorphism f)
: is_z_isomorphism
(precategory_morphism_from_sub_precategory_morphism _ _ _ _ f).
Proof.
induction H as (g,(gl,gr)).
induction g as (g_und,?).
use make_is_z_isomorphism.
+ exact g_und.
+ split.
- exact (maponpaths pr1 gl).
- exact (maponpaths pr1 gr).
Defined.
(** *** (Inclusion) functor from a sub-precategory to the ambient precategory *)
Definition sub_precategory_inclusion_data (C : category) (C':sub_precategories C):
functor_data C' C.
Proof.
exists (@pr1 _ _ ).
intros a b.
exact (@pr1 _ _ ).
Defined.
Definition is_functor_sub_precategory_inclusion (C : category)
(C':sub_precategories C) :
is_functor (sub_precategory_inclusion_data C C').
Proof.
split; simpl.
unfold functor_idax . intros. apply (idpath _ ).
unfold functor_compax . intros. apply (idpath _ ).
Qed.
Definition sub_precategory_inclusion (C : category) (C' : sub_precategories C) :
functor C' C := tpair _ _ (is_functor_sub_precategory_inclusion C C').
(** ** Subcategories *)
(** The hom-types of a subprecategory are sets if the hom-types of the original
category are. *)
Lemma is_set_sub_precategory_morphisms {C : category}
(C' : sub_precategories C) (a b : ob C) :
isaset (sub_precategory_morphisms C' a b).
Proof.
apply isofhlevel_hsubtype, C.
Defined.
Definition sub_precategory_morphisms_set {C : category}
(C':sub_precategories C) (a b : ob C) : hSet :=
tpair _ (sub_precategory_morphisms C' a b)
(is_set_sub_precategory_morphisms C' a b).
Definition subcategory (C : category) (C' : sub_precategories C) : category.
Proof.
use make_category.
- exact (carrier_of_sub_precategory C C').
- intros ? ?.
apply is_set_sub_precategory_morphisms.
Defined.
(** ** Restriction of a functor to a subcategory *)
Definition restrict_functor_to_sub_precategory {C D : category}
(C' : sub_precategories C) (F : functor C D) : functor C' D.
Proof.
use make_functor.
- use make_functor_data.
+ exact (F ∘ precategory_object_from_sub_precategory_object _ C')%functions.
+ intros ? ?.
apply (# F ∘ precategory_morphism_from_sub_precategory_morphism _ C' _ _)%functions.
- use make_dirprod.
+ intro; apply (functor_id F).
+ intros ? ? ? ? ?; apply (functor_comp F).
Defined.
|
function out = isdelta(f)
%ISDELTA Always returns false, since CLASSICFUNs don't have delta functions.
% Copyright 2017 by The University of Oxford and The Chebfun Developers.
% See http://www.chebfun.org/ for Chebfun information.
out = false;
end
|
State Before: α : Type u_2
β : Type ?u.18395
inst✝ : EDist α
x y : α
s✝ t : Set α
ι : Type u_1
o : Option ι
s : ι → Set α
⊢ einfsep (⋃ (i : ι) (_ : i ∈ o), s i) = ⨅ (i : ι) (_ : i ∈ o), einfsep (s i) State After: no goals Tactic: cases o <;> simp |
#ifndef SIGSCANNERMEMORYDATA_HHHHH
#define SIGSCANNERMEMORYDATA_HHHHH
#include <vector>
#include <gsl/span>
#include <string>
namespace MPSig {
class SigScannerMemoryData {
gsl::span<const char> m_refData;
public:
explicit SigScannerMemoryData(gsl::span<const char> data);
bool IsInRange(std::intptr_t addr) const;
char* Deref(std::intptr_t addrWithOffset) const;
std::pair<char*, bool> DerefTry(std::intptr_t addrWithOffset) const;
gsl::span<const char> Get() const;
std::intptr_t GetOffset() const;
};
}
#endif
|
```python
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
import itertools
plt.rcParams["figure.figsize"] = (15, 15)
%matplotlib inline
```
# Modeling the random walk
We assume the following:
1) X and Y are independent
2) Each jump of the fire over given timestep t
Our approach is to treat the spread of fire as a random walk on $x$ and $y$.
For each step of the random walk, we'll treat the velocity as a random variable from a distribution $N(\mu, \sigma^2)$
Our input data is a set of trajectories based off of the given tweets.
Each trajectory gives us a set of jumps associated with the given fire of interest in our region.
```python
numtweets = 30
r = 50
variance = 5
binsize = 4
#x, y, and timestamp of each tweet
testx = np.linspace(0, r, numtweets) + np.random.normal(0,variance,numtweets)
testy = np.linspace(0, r, numtweets) + np.random.normal(0,variance,numtweets)
testt = np.arange(testx.shape[0])
bins = np.arange(min(testt), max(testt), binsize) #make some bins based on binsize
inds = np.digitize(testt, bins) #for each time in testt, assign it to a bin;
#the bin of element i in testt will be the bin for element i in testx and testy also
numbins = len(bins)
x_trajectories = []
y_trajectories = []
for i in range(numbins):
tempx = []
tempy = []
for j in range(len(testt)):
if inds[j] == i + 1: #since bin 0 is indexed as 1
tempx += [testx[j]] #if the jth element of testt is in bin i,
tempy += [testy[j]] #add testx[j] and testy[j] to their appropriate bins
x_trajectories += [tempx]
y_trajectories += [tempy]
combinatorial_x_trajectories = list(itertools.product(*x_trajectories))
combinatorial_y_trajectories = list(itertools.product(*y_trajectories))
x_distances = [np.diff(i) for i in combinatorial_x_trajectories]
y_distances = [np.diff(i) for i in combinatorial_y_trajectories]
x_distances = np.reshape(x_distances, -1)
y_distances = np.reshape(y_distances, -1)
vx = x_distances/binsize
vy = y_distances/binsize
```
```python
sns.scatterplot(testx, testy);
```
```python
def MLE_mu(data):
n = len(data)
return 1/n * np.sum(data)
def MLE_sigma2(data):
n = len(data)
mu_hat = MLE_mu(data)
return 1/n * np.sum((data - mu_hat)**2)
```
Since the behavior of fire spread is uncertain, we assume each trajectory represents an equally likely path of the the fire.
Based off of this, each link $A\to B$ gives us a "representative" sample of the underlying distribution of velocities, both of x and y.
Therefore, the approach will to be to calculate the $v_x = d_x/t$ for each link of Tweets per trajectory, then to use these to calculate MLEs for the normal distribution governing the velocities of each hop.
Once we have the normal distribution for these hops, we can use this to predict the probability that the fire has reached some point $A$ by some time $T$.
There are two ways we can do this:
1) Set some timestep $t$ within the range of timesteps that we have seen in the data, and subdivide the desired segment into even chunks such that $\sum t_i = T$ and then add the normals such that the add up to the correct value.
In this case, say we have $n$ chunks,
Then, the probability that a fire has spread to at least to point $A$ is
$$
\begin{align}
P\left(\sum_{i=1}^n x_i \geq A \right) &= P\left(\sum_{i=1}^n tv_i \geq A \right) \\
&= P\left(\sum_{i=1}^n v_i \geq \frac{A}{t} \right)\\
&= P\left(N(n\mu, n\sigma^2)\geq \frac{A}{t} \right)\\
&= P\left(N(n\mu, n\sigma^2)\geq \frac{A}{T/n} \right)\\
&= P\left(N(n\mu, n\sigma^2)\geq \frac{nA}{T} \right)\\
&= P\left(N(\mu, \sigma^2)\geq \frac{A}{T} \right)
\end{align}
$$
2) Find the average velocity required to traverse the whole path in one go and find that probability.
$$
\begin{align}
P(X \geq A) &= P\left(vT \geq A\right)\\
&= P\left(v \geq \frac{A}{T}\right)\\
&= P\left(N(\mu, \sigma^2) \geq \frac{A}{T}\right)\\
\end{align}
$$
Let's apply these ideas below.
First, calculate velocity components based off of the given data:
```python
# vx, vy
```
(array([ 2.52310822, -1.6544361 , 1.58150142, ..., 3.94943556,
0.20712346, 2.1478424 ]),
array([ 3.51277876, 3.83798799, -0.82903521, ..., 3.69953839,
1.46260731, 0.83939873]))
```python
# timesteps = np.diff(testt)
# vx = np.diff(testx)/timesteps
# vy = np.diff(testy)/timesteps
```
Get MLE estimates for $v_x$, $v_y$
```python
muhat_y, sigmahat_y = MLE_mu(vy), MLE_sigma2(vy)
muhat_x, sigmahat_x = MLE_mu(vx), MLE_sigma2(vx)
print("means, x and y: " , muhat_x, muhat_y)
print("variances, x and y: ", sigmahat_x, sigmahat_y)
```
means, x and y: 1.8084442148956452 1.670938735439651
variances, x and y: 3.3483606084057564 2.7596994513903934
```python
vy_dist = norm(loc = muhat_y, scale = np.sqrt(sigmahat_y))
vx_dist = norm(loc = muhat_x, scale = np.sqrt(sigmahat_x))
```
```python
predictx = [vx_dist.mean() * t for t in testt]
```
```python
simulated_y_trajectories = []
for i in range(40):
simulated_position = [0]
last = 0
for t in testt[1:]:
last += vy_dist.rvs()
simulated_position += [last]
plt.plot(testt, simulated_position)
simulated_y_trajectories += [simulated_position]
```
```python
simulated_x_trajectories = []
for i in range(40):
simulated_position = [0]
last = 0
for t in testt[1:]:
last += vx_dist.rvs()
simulated_position += [last]
plt.plot(testt, simulated_position)
simulated_x_trajectories += [simulated_position]
```
```python
import seaborn as sns
simulated_x_trajectories_plot = np.reshape(simulated_x_trajectories, -1)
simulated_y_trajectories_plot = np.reshape(simulated_y_trajectories, -1)
```
```python
plt.figure(figsize = (10, 10))
sns.kdeplot(simulated_x_trajectories_plot, simulated_y_trajectories_plot, shade = True)
for x_traj, y_traj in zip(simulated_x_trajectories, simulated_y_trajectories):
plt.plot(x_traj, y_traj)
plt.scatter(testx, testy, label = "test data", color = 'orange')
plt.legend();
```
```python
def rw_simulation(tweetinputlist, numbins = 5, plotstep = .1, minlat = None, maxlat = None, minlong = None , matlong = None):
"""
Take in a 2D list, where each element has
Latitude at index 0
Longitude at index 1
Time of tweet at index 2
Also takes in the number of buckets that the time has been discretized into
Latitude is y, longitude is x
plotstep determines the mesh size for which we plot
Returns:
2D list, where each element is
Latitude at index 0
Longitude at index 1
Weight on (0,1) at index 2
"""
###############
### imports ###
###############
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
import itertools
import seaborn as sns
######################
### Helper methods ###
######################
def MLE_mu(data):
n = len(data)
return 1/n * np.sum(data)
def MLE_sigma2(data):
n = len(data)
mu_hat = MLE_mu(data)
return 1/n * np.sum((data - mu_hat)**2)
#################
### Main body ###
#################
#unpack the input
testt = [i[2] for i in tweetinputlist]
testx = [i[0] for i in tweetinputlist]
testy = [i[1] for i in tweetinputlist]
numtweets = 30
r = 50
variance = 5
binsize = 4
#x, y, and timestamp of each tweet
testx = np.linspace(0, r, numtweets) + np.random.normal(0,variance,numtweets)
testy = np.linspace(0, r, numtweets) + np.random.normal(0,variance,numtweets)
testt = np.arange(testx.shape[0])
bins = np.arange(min(testt), max(testt), binsize) #make some bins based on binsize
inds = np.digitize(testt, bins) #for each time in testt, assign it to a bin;
#the bin of element i in testt will be the bin for element i in testx and testy also
numbins = len(bins)
x_trajectories = []
y_trajectories = []
for i in range(numbins):
tempx = []
tempy = []
for j in range(len(testt)):
if inds[j] == i + 1: #since bin 0 is indexed as 1
tempx += [testx[j]] #if the jth element of testt is in bin i,
tempy += [testy[j]] #add testx[j] and testy[j] to their appropriate bins
x_trajectories += [tempx]
y_trajectories += [tempy]
combinatorial_x_trajectories = list(itertools.product(*x_trajectories))
combinatorial_y_trajectories = list(itertools.product(*y_trajectories))
x_distances = [np.diff(i) for i in combinatorial_x_trajectories]
y_distances = [np.diff(i) for i in combinatorial_y_trajectories]
x_distances = np.reshape(x_distances, -1)
y_distances = np.reshape(y_distances, -1)
vx = x_distances/binsize
vy = y_distances/binsize
muhat_y, sigmahat_y = MLE_mu(vy), MLE_sigma2(vy)
muhat_x, sigmahat_x = MLE_mu(vx), MLE_sigma2(vx)
vy_dist = norm(loc = muhat_y, scale = np.sqrt(sigmahat_y))
vx_dist = norm(loc = muhat_x, scale = np.sqrt(sigmahat_x))
simulated_y_trajectories = []
for i in range(40):
simulated_position = [0]
last = 0
for t in testt[1:]:
last += vy_dist.rvs()
simulated_position += [last]
simulated_y_trajectories += [simulated_position]
simulated_x_trajectories = []
for i in range(40):
simulated_position = [0]
last = 0
for t in testt[1:]:
last += vx_dist.rvs()
simulated_position += [last]
simulated_x_trajectories += [simulated_position]
simulated_x_trajectories_plot = np.reshape(simulated_x_trajectories, -1)
simulated_y_trajectories_plot = np.reshape(simulated_y_trajectories, -1)
#################
### Viz block ###
#################
# plt.figure(figsize = (10, 10))
# sns.kdeplot(simulated_x_trajectories_plot, simulated_y_trajectories_plot, shade = True)
# for x_traj, y_traj in zip(simulated_x_trajectories, simulated_y_trajectories):
# plt.plot(x_traj, y_traj)
# plt.scatter(testx, testy, label = "test data", color = 'orange')
# plt.legend();
import numpy as np
import scipy.stats as stats
from matplotlib.pyplot import imshow
rvs = np.append(np.array([simulated_x_trajectories_plot]),
np.array([simulated_y_trajectories_plot]),
axis=1).reshape(-1, 2)
kde = stats.kde.gaussian_kde(rvs.T)
# Regular grid to evaluate kde upon
kde = stats.kde.gaussian_kde(rvs.T)
# Regular grid to evaluate kde upon
if minlat == None:
x_flat = np.arange(rvs[:, 0].min(), rvs[:,0].max(), plotstep)
y_flat = np.arange(rvs[:, 1].min(), rvs[:,1].max(), plotstep)
else:
x_flat = np.arange(minlong, maxlong, .1)
y_flat = np.arange(minlat, maxlat, .1)
x,y = np.meshgrid(x_flat,y_flat)
grid_coords = np.append(x.reshape(-1,1),y.reshape(-1,1),axis=1)
z = np.array(kde(grid_coords.T))
return np.array([[x, y, z] for x, y, z in zip(grid_coords[:,0], grid_coords[:,1], z)])
```
```python
simtweets = [list(i) for i in zip(testx, testy, testt)]
```
```python
plotpoints = rw_simulation(simtweets, 5, 1)
```
```python
x, y, z = plotpoints[:, 0], plotpoints[:, 1], plotpoints[:, 2]
```
```python
import pandas as pd
import numpy as np
from scipy.interpolate import griddata
import matplotlib.pyplot as plt
%matplotlib inline
def plot_contour(x,y,z,resolution = 50,contour_method='linear'):
resolution = str(resolution)+'j'
X,Y = np.mgrid[min(x):max(x):complex(resolution), min(y):max(y):complex(resolution)]
points = [[a,b] for a,b in zip(x,y)]
Z = griddata(points, z, (X, Y), method=contour_method)
return X,Y,Z
X,Y,Z = plot_contour(x,y,z,resolution = 50,contour_method='linear')
with plt.style.context("seaborn-white"):
fig, ax = plt.subplots(figsize=(13,8))
ax.scatter(x,y, color="black", linewidth=1, edgecolor="ivory", s=50)
ax.contourf(X,Y,Z)
ax.scatter(testx, testy);
```
# What happens with multiple fires?
Inclusion exclusion:
$$
\begin{align}
P(fire @ A @ t) &= P(fire 1 @ A @ t) + P(fire 2 @ A @ t) - P(fire 1 \cap fire 2 @ A @ t)\\
&= P(fire 1 @ A @ t) + P(fire 2 @ A @ t) - P(fire 1 @ A @ t)P(fire 1 @ A @ t)
\end{align}
$$
for time $t$ and point of interest $A$
```python
```
|
!!# PRINTING MODULE <<PRN_MoCshort>>
MODULE PRN_MoCshort
!!## PURPOSE
!! The printing of routines for the Method of Characteristics with
!! short (intra-cell) interpolation.
USE ISO_varying_string !!((03-A-ISO_varying_string.f90))
USE USR_fdbk !!((08-C-USR_fdbk.f90))
USE PRN_Table !!((11-B-PRN_Table.f90))
USE FUN_STR !!((05-B-FUN_STR.f90))
USE KND_Mesh !!((05-B-KND_Mesh.f90))
USE TBX_Mesh,ONLY: & !!((15-B-TBX_Mesh.f90))
NUM_Cells,NUM_Faces,NUM_Verts,&
CellCentroid,DomainFaceCentroid,FaceCentroid,&
Vert,FaceNormal,SymmetryFace,&
MeshScale,SymmetryDirection,SymmetryPoint
USE KND_ScalarFluxes !!((02-A-KND_ScalarFluxes.f90))
USE FUN_Substitute !!((06-C-FUN_Substitute.f90))
USE KND_AngularFluxes !!((02-A-KND_AngularFluxes.f90))
USE FUN_VSTROPTION !!((16-B-FUN_VSTROPTION.f90))
USE FUN_NewFile !!((05-B-FUN_NewFile.f90))
USE FUN_Default !!((04-A-FUN_Default.f90))
USE SUB_CLEAR !!((04-A-SUB_CLEAR.f90))
USE FUN_NewFile !!((05-B-FUN_NewFile.f90))
USE FUN_qmcfull !!((76-C-FUN_qmcfull.f90))
USE SUB_SimpSurf !!((08-C-SUB_SimpSurf.f90))
!!## DEFAULT IMPLICIT
IMPLICIT NONE
!!## DEFAULT ACCESS
PRIVATE
!!## ACCESS
PUBLIC :: PRINT_InteriorCells
PUBLIC :: PRINT_IncomingDirections
PUBLIC :: PRINT_StreamingInterpolants
PUBLIC :: PRINT_SweepOrder
PUBLIC :: PRINT_CharacteristicInfo
PUBLIC :: PRINT_InterpInfo
PUBLIC :: PRINT_AFSymmetryCheck
PUBLIC :: PRINT_AFSymmetryCheckF
PUBLIC :: PRINT_Options_MCS
PUBLIC :: GNUPLOT_AngularFlux
PUBLIC :: GNUPLOT_Qfull2D
!!## MODULE PROCEDURES
CONTAINS
!!### GNUPLOT PLOT SUBROUTINE: <GNUPLOT_LO_ScalarFluxFandC>
SUBROUTINE GNUPLOT_LO_ScalarFluxFandC()
!!#### PURPOSE
!! Print out both low-order face-average and cell-average
!! scalar flux.
!!#### GLOBAL VARIABLES
USE VAR_ScalarFluxes !!((04-C-VAR_ScalarFluxes.f90))
USE VAR_Mesh,ONLY: Mesh !!((46-B-VAR_Mesh.f90))
!!#### LOCAL VARIABLES
INTEGER :: i,j,Unit
REAL(KIND_MSH) :: CC(2),FC(2)
!!--begin--
Unit = NewFile("LO_SFFandC")
DO i=1,NUM_Cells(Mesh)
CC = CellCentroid(Mesh,i)
WRITE(Unit,"(f12.7,1x,f12.7,1x,e21.13,1x)")&
CC(1),CC(2),ScalarFluxC(1,i)
END DO
DO j=1,NUM_Faces(Mesh)
FC = FaceCentroid(Mesh,j)
WRITE(Unit,"(f12.7,1x,f12.7,1x,e21.13,1x)")&
FC(1),FC(2),ScalarFluxF(1,j)
END DO
CLOSE( Unit )
!!--end--
END SUBROUTINE
!!### GNUPLOT SUBROUTINE: <GNUPLOT_Qfull2D>
SUBROUTINE GNUPLOT_Qfull2D()
!!#### EXTERNAL VARIABLES
USE VAR_Mesh,ONLY: Mesh !!((46-B-VAR_Mesh.f90))
!!#### LOCAL VARIABLES
INTEGER :: Unit
REAL(KIND_MSH) :: xseq(3),yseq(3)
REAL(KIND_MSH) :: DFC(2)
!!--begin--
!get y-extents from domain
DFC = DomainFaceCentroid(Mesh,1)
yseq(1) = DFC(2)
DFC = DomainFaceCentroid(Mesh,3)
yseq(3) = DFC(2)
!get x-extents from domain
DFC = DomainFaceCentroid(Mesh,4)
xseq(1) = DFC(1)
DFC = DomainFaceCentroid(Mesh,2)
xseq(3) = DFC(1)
!get 50 increments for each
xseq(2) = (xseq(3)-xseq(1))/50.d0
yseq(2) = (yseq(3)-yseq(1))/50.d0
!plot the simple surface
Unit = Newfile("Qfull2D")
CALL SimpSurf( Unit , Qfull2D , xseq , yseq )
CLOSE(Unit)
!!--end--
END SUBROUTINE
!!### GNUPLOT SUBROUTINE: <GNUPLOT_AngularFlux>
SUBROUTINE GNUPLOT_AngularFlux()
!!#### PURPOSE
!! Print out the angular flux by directions to files
!! <AF_m> for each direction <m>.
!!#### MODULES
USE VAR_Mesh,ONLY: Mesh !!((46-B-VAR_Mesh.f90))
USE VAR_AngularFluxes !!((04-C-VAR_AngularFluxes.f90))
!!#### LOCAL VARIABLES
INTEGER :: Unit,k,m,i,j
REAL(KIND_MSH) :: V(2)
!!--begin--
DO m=1,SIZE(AngularFluxV,3)
Unit = Newfile("AFV_"//TRIM(STR(m)))
DO k=1,NUM_Verts(Mesh)
V = Vert(Mesh,k)
WRITE(Unit,"(3e20.13)")V(1),V(2),AngularFluxV(1,k,m)
END DO
CLOSE(Unit)
IF( ASSOCIATED(AngularFluxF) )THEN
Unit = Newfile("AFF_"//TRIM(STR(m)))
DO j=1,NUM_Faces(Mesh)
V = FaceCentroid(Mesh,j)
WRITE(Unit,"(3e20.13)")V(1),V(2),AngularFluxF(1,j,m)
END DO
CLOSE(Unit)
END IF
IF( ASSOCIATED(AngularFluxC) )THEN
Unit = Newfile("AFC_"//TRIM(STR(m)))
DO i=1,NUM_Cells(Mesh)
V = CellCentroid(Mesh,i)
WRITE(Unit,"(3e20.13)")V(1),V(2),AngularFluxC(1,i,m)
END DO
CLOSE(Unit)
END IF
END DO
!!--end--
END SUBROUTINE
!!### PRINTING SUBROUTINE: <PRINT_AFSymmetryCheckF>
SUBROUTINE PRINT_AFSymmetryCheckF(Unit,Pn_sym,tol,relerr)
!!#### PURPOSE
!! Print a check of symmetry of the angular
!! fluxes of a problem, about a given line.
USE VAR_AngularFluxes !!((04-C-VAR_AngularFluxes.f90))
USE VAR_DiscreteOrdinates,ONLY: Ordinates !!((47-B-VAR_DiscreteOrdinates.f90))
USE VAR_Mesh,ONLY: Mesh !!((46-B-VAR_Mesh.f90))
!!#### REQUIRED INPUT
INTEGER ,INTENT(IN) :: Unit
REAL(KIND_MSH),INTENT(IN) :: Pn_sym(3)
!!#### OPTIONAL OUTPUT
REAL(KIND_MSH),OPTIONAL,INTENT(OUT) :: relerr
REAL(KIND_MSH),OPTIONAL,INTENT(IN) :: tol
!!#### LOCAL VARIABLES
INTEGER :: Nr,Nj,Nm,g,j,m,r
INTEGER :: m1,m2,j1,j2
CHARACTER(32),ALLOCATABLE :: D(:,:)
REAL(KIND_MSH) :: max_reldiff,AF1,AF2,DAF,tol_,AVG,max_diff
!!--begin--
tol_ = Default(MeshScale(Mesh)*EPSILON(1._KIND_MSH),tol)
g = 1
Nj = SIZE(AngularFluxF,2)
Nm = SIZE(AngularFluxF,3)
Nr = (Nj)*(Nm)
ALLOCATE( D(Nr+1,7) )
CALL CLEAR(D)
r = 1
D(1,:) = (/"m1 ","m2 ","j1 ","j2 ","AF1","AF2","DAF"/)
max_diff=0.d0
max_reldiff = 0.d0
DO j=1,Nj
DO m=1,Nm
m1 = m
m2 = SymmetryDirection(Ordinates,Pn_sym,Ordinates(:,m1))
j1 = j
j2 = SymmetryFace(Mesh,Pn_sym,FaceCentroid(Mesh,j1),FaceNormal(Mesh,j1),tol=tol_)
r = r+1
D(r,1) = STR(m1)
D(r,2) = STR(m2)
D(r,3) = STR(j1)
D(r,4) = STR(j2)
AF1 = AngularFluxF(g,j1,m1)
D(r,5) = STR(AF1,"(Es16.8)")
IF( j2/=0 .AND. m2/=0 )THEN
AF2 = AngularFluxF(g,j2,m2)
DAF = AF1-AF2
AVG = 0.5_KIND_AngularFlux*(AF1+AF2)
max_diff=MAX(max_diff,ABS(DAF))
max_reldiff = MAX(max_reldiff,ABS(DAF)/AVG)
D(r,6) = STR(AF2,"(Es16.8)")
D(r,7) = STR(DAF,"(Es12.4)")
ELSE
D(r,6) = "No Symmetric Point"
D(r,7) = "N/A"
END IF
END DO
END DO
IF( Unit/=0 )THEN
CALL PRINT_Table(D,Unit=Unit)
WRITE(Unit,"(a)")" maximum relative difference: "//TRIM(STR(max_reldiff,"(Es12.4)"))
WRITE(Unit,"(a)")" maximum difference: "//TRIM(STR(max_diff,"(Es12.4)"))
END IF
IF( PRESENT(relerr) )THEN
relerr = max_reldiff
END IF
DEALLOCATE(D)
!!--end--
END SUBROUTINE
!!### PRINTING SUBROUTINE: <PRINT_AFSymmetryCheck>
SUBROUTINE PRINT_AFSymmetryCheck(Unit,Pn_sym,tol,relerr)
!!#### PURPOSE
!! Print a check of symmetry of the angular
!! fluxes of a problem, about a given line.
USE VAR_AngularFluxes !!((04-C-VAR_AngularFluxes.f90))
USE VAR_DiscreteOrdinates,ONLY: Ordinates !!((47-B-VAR_DiscreteOrdinates.f90))
USE VAR_Mesh,ONLY: Mesh !!((46-B-VAR_Mesh.f90))
!!#### REQUIRED INPUT
INTEGER ,INTENT(IN) :: Unit
REAL(KIND_MSH),INTENT(IN) :: Pn_sym(3)
!!#### OPTIONAL OUTPUT
REAL(KIND_MSH),OPTIONAL,INTENT(OUT) :: relerr
REAL(KIND_MSH),OPTIONAL,INTENT(IN) :: tol
!!#### LOCAL VARIABLES
INTEGER :: Nr,Nk,Nm,g,k,m,r
INTEGER :: m1,m2,k1,k2
CHARACTER(32),ALLOCATABLE :: D(:,:)
REAL(KIND_MSH) :: max_reldiff,AF1,AF2,DAF,tol_,AVG,max_diff
!!--begin--
tol_ = Default(MeshScale(Mesh)*EPSILON(1._KIND_MSH),tol)
g = 1
Nk = SIZE(AngularFluxV,2)
Nm = SIZE(AngularFluxV,3)
Nr = (Nk)*(Nm)
ALLOCATE( D(Nr+1,7) )
CALL CLEAR(D)
r = 1
D(1,:) = (/"m1 ","m2 ","k1 ","k2 ","AF1","AF2","DAF"/)
max_reldiff = 0.d0
max_diff=0.d0
DO k=1,Nk
DO m=1,Nm
m1 = m
m2 = SymmetryDirection(Ordinates,Pn_sym,Ordinates(:,m1))
k1 = k
k2 = SymmetryPoint(Mesh,Pn_sym,Vert(Mesh,k1),tol=tol_)
r = r+1
D(r,1) = STR(m1)
D(r,2) = STR(m2)
D(r,3) = STR(k1)
D(r,4) = STR(k2)
AF1 = AngularFluxV(g,k1,m1)
D(r,5) = STR(AF1,"(Es16.8)")
IF( k2/=0 .AND. m2/=0 )THEN
AF2 = AngularFluxV(g,k2,m2)
DAF = AF1-AF2
AVG = 0.5_KIND_AngularFlux*(AF1+AF2)
max_diff=MAX(max_diff,ABS(DAF))
max_reldiff = MAX(max_reldiff,ABS(DAF)/AVG)
D(r,6) = STR(AF2,"(Es16.8)")
D(r,7) = STR(DAF,"(Es12.4)")
ELSE
D(r,6) = "No Symmetric Point"
D(r,7) = "N/A"
END IF
END DO
END DO
IF( Unit/=0 )THEN
CALL PRINT_Table(D,Unit=Unit)
WRITE(Unit,"(a)")" maximum relative difference: "//TRIM(STR(max_reldiff,"(Es12.4)"))
WRITE(Unit,"(a)")" maximum difference: "//TRIM(STR(max_diff,"(Es12.4)"))
END IF
IF( PRESENT(relerr) )THEN
relerr = max_reldiff
END IF
DEALLOCATE(D)
!!--end--
END SUBROUTINE
!!### PRINTING SUBROUTINE: <PRINT_SweepOrder>
SUBROUTINE PRINT_SweepOrder( FdBk , Unit )
!!#### PURPOSE
!! Print the sweeping order.
!!#### MODULES
USE VAR_MoCshort ,ONLY: pThread,WithinCell !!((47-B-VAR_MoCshort.f90))
USE VAR_DiscreteOrdinates,ONLY: Ordinates !!((47-B-VAR_DiscreteOrdinates.f90))
!!#### OPTIONAL INPUT
INTEGER,OPTIONAL,INTENT(IN) :: Unit
!!#### OPTIONAL INPUT/OUTPUT
TYPE(TYPE_FdBk),OPTIONAL,INTENT(INOUT) :: FdBk
!!#### LOCAL VARIABLES
INTEGER :: Nk,Nm,Nc,Nr,o,m,r,c,kdig,mpdig
CHARACTER(64),ALLOCATABLE :: Tab(:,:)
TYPE(varying_string) :: fmtk,fmtmp
!!--begin--
Nk = SIZE(WithinCell,1)
Nm = SIZE(Ordinates,2)
Nr = Nk + 1
Nc = Nm + 1
ALLOCATE( Tab(Nr,Nc) )
CALL CLEAR(Tab)
!calculate number of digits
kdig = int(log10(real(abs(Nk))))+1
mpdig = int(log10(real(abs(Nm))))+1
fmtk="" ; fmtk = "(I"//TRIM(STR(kdig+1))//"."//TRIM(STR(kdig))//")"
fmtmp="" ; fmtmp = "(I"//TRIM(STR(mpdig+1))//"."//TRIM(STR(mpdig))//")"
!initialize row number
r = 1
!top left corner
Tab(r,1) = "o\m"
!set m from left to right
DO c=2,Nc
Tab(1,c) = STR(c-1,STR(fmtmp))
END DO
!set k from top to bottom
DO r=2,Nr
Tab(r,1) = STR(r-1,STR(fmtk))
END DO
fmtk=""
fmtmp=""
!table body
DO m=1,Nm
DO o=1,SIZE(pthread(m)%path(1)%order)
r = o+1
c = m+1
Tab(r,c) = STR( pthread(m)%path(1)%order(o) )
END DO
END DO
!print table
CALL PRINT_Table( Tab(1:r,:) , Unit=Unit )
DEALLOCATE( Tab )
!!--end--
END SUBROUTINE
!!### PRINTING SUBROUTINE: <PRINT_IncomingDirections>
SUBROUTINE PRINT_IncomingDirections( FdBk , Unit )
!!#### PURPOSE
!! For each vertex/direction combination, those which are incoming to the
!! domain.
!!#### MODULES
USE VAR_MoCshort ,ONLY: WithinCell !!((47-B-VAR_MoCshort.f90))
!!#### OPTIONAL INPUT
INTEGER,OPTIONAL,INTENT(IN) :: Unit
!!#### OPTIONAL INPUT/OUTPUT
TYPE(TYPE_FdBk),OPTIONAL,INTENT(INOUT) :: FdBk
!!#### LOCAL VARIABLES
INTEGER :: Nk,Nm,Nc,Nr,k,m,r,c,kdig,mdig
CHARACTER(64),ALLOCATABLE :: Tab(:,:)
TYPE(varying_string) :: fmtk,fmtm
!!--begin--
Nk = SIZE(WithinCell,1)
Nm = SIZE(WithinCell,2)
Nr = Nk + 1
Nc = Nm + 1
ALLOCATE( Tab(Nr,Nc) )
CALL CLEAR(Tab)
!calculate number of digits
kdig = int(log10(real(abs(Nk))))+1
mdig = int(log10(real(abs(Nm))))+1
fmtk="" ; fmtk = "(I"//TRIM(STR(kdig+1))//"."//TRIM(STR(kdig))//")"
fmtm="" ; fmtm = "(I"//TRIM(STR(mdig+1))//"."//TRIM(STR(mdig))//")"
!initialize row number
r = 1
!top left corner
Tab(r,1) = "k\m"
!set ma from left to right
DO c=2,Nc
Tab(1,c) = STR(c-1,STR(fmtm))
END DO
!set k from top to bottom
DO r=2,Nr
Tab(r,1) = STR(r-1,STR(fmtk))
END DO
fmtk=""
fmtm=""
!table body
DO k=1,Nk
DO m=1,Nm
r = k+1
c = m+1
Tab(r,c) = STR(WithinCell(k,m)<=0)
END DO
END DO
!print table
CALL PRINT_Table( Tab(1:r,:) , Unit=Unit )
DEALLOCATE( Tab )
!!--end--
END SUBROUTINE
!!### PRINTING SUBROUTINE: <PRINT_InteriorCells>
SUBROUTINE PRINT_InteriorCells( FdBk , Unit )
!!#### PURPOSE
!! Output interior cell for each vertex, direction.
!!#### MODULES
USE VAR_MoCshort ,ONLY: WithinCell !!((47-B-VAR_MoCshort.f90))
USE VAR_DiscreteOrdinates,ONLY: Ordinates !!((47-B-VAR_DiscreteOrdinates.f90))
!!#### OPTIONAL INPUT/OUTPUT
TYPE(TYPE_FdBk),OPTIONAL,INTENT(INOUT) :: FdBk
!!#### OPTIONAL INPUT
INTEGER,OPTIONAL,INTENT(IN) :: Unit
!!#### LOCAL VARIABLES
INTEGER :: Nk,Nm,Nc,Nr,k,m,r,c,kdig,mdig
CHARACTER(64),ALLOCATABLE :: Tab(:,:)
TYPE(varying_string) :: fmtk,fmtm
!!--begin--
Nk = SIZE(WithinCell,1)
Nm = SIZE(WithinCell,2)
Nr = Nk + 1
Nc = Nm + 1
ALLOCATE( Tab(Nr,Nc) )
CALL CLEAR(Tab)
!calculate number of digits
kdig = int(log10(real(abs(Nk))))+1
mdig = int(log10(real(abs(Nm))))+1
fmtk="" ; fmtk = "(I"//TRIM(STR(kdig+1))//"."//TRIM(STR(kdig))//")"
fmtm="" ; fmtm = "(I"//TRIM(STR(mdig+1))//"."//TRIM(STR(mdig))//")"
!initialize row number
r = 1
!top left corner
Tab(r,1) = "k\m"
!set ma from left to right
DO c=2,Nc
Tab(1,c) = STR(c-1,STR(fmtm))
END DO
!set k from top to bottom
DO r=2,Nr
Tab(r,1) = STR(r-1,STR(fmtk))
END DO
fmtk=""
fmtm=""
!table body
DO k=1,Nk
DO m=1,Nm
r = k+1
c = m+1
Tab(r,c) = STR(WithinCell(k,m))
END DO
END DO
!print table
CALL PRINT_Table( Tab(1:r,:) , Unit=Unit )
DEALLOCATE( Tab )
!!--end--
END SUBROUTINE
!!### PRINTING SUBROUTINE: <PRINT_StreamingInterpolants>
SUBROUTINE PRINT_StreamingInterpolants( FdBk , Unit )
!!#### PURPOSE
!! Output the streaming interpolants (vertices used in
!! the streaming interpolation) for each vertex
!! and directions.
!!#### MODULES
USE VAR_Mesh ,ONLY: Mesh !!((46-B-VAR_Mesh.f90))
USE VAR_MoCshort,ONLY: InterpOrder,FrontPos,k_ !!((47-B-VAR_MoCshort.f90))
!!#### OPTIONAL INPUT/OUTPUT
TYPE(TYPE_FdBk),OPTIONAL,INTENT(INOUT) :: FdBk
!!#### REQUIRED INPUT
INTEGER,OPTIONAL,INTENT(IN) :: Unit
!!#### LOCAL VARIABLES
INTEGER :: k,m,r,s,Nr,Nc,Nk,Nm
CHARACTER(32),ALLOCATABLE :: Tab(:,:)
!!--begin--
Nc = 2+2*(InterpOrder+1)
Nk = SIZE(k_,2)
Nm = SIZE(k_,3)
Nr = Nk*Nm + 1
ALLOCATE( Tab(Nr,Nc) )
CALL CLEAR(Tab)
!initialize row number
r = 1
!header
Tab(r,1) = "k"
Tab(r,2) = "m"
DO s=1,InterpOrder+1
Tab(r,s+2 ) = "k"//TRIM(STR(s))
END DO
DO s=1,InterpOrder+1
Tab(r,s+3+InterpOrder) = "P"//TRIM(STR(s))
END DO
!table body
DO k=1,Nk
!loop through directions
DO m=1,Nm
!cycle if there is no interpolation for this vert/direction combo
IF( k_(1,k,m)==0 )CYCLE
!update the row number
r = r + 1
!assemble the table body
Tab(r,1) = STR(k)
Tab(r,2) = STR(m)
DO s=1,InterpOrder+1
Tab(r,s+2 ) = STR(k_(s,k,m))
END DO
DO s=1,InterpOrder+1
IF( k_(s,k,m)/=0 )THEN
Tab(r,s+3+InterpOrder) = STR(FrontPos(s,k,m),"(Es22.15)")
END IF
END DO
END DO
END DO
!print table
CALL PRINT_Table( Tab(1:r,:) , Unit=Unit )
DEALLOCATE( Tab )
!!--end--
END SUBROUTINE
!!### PRINTING SUBROUTINE: <PRINT_InterpInfo>
SUBROUTINE PRINT_InterpInfo(fdbk,Unit)
!!#### PURPOSE
!! Print information about the interpolation.
USE VAR_Mesh,ONLY: Mesh !!((46-B-VAR_Mesh.f90))
USE VAR_DiscreteOrdinates,ONLY: Ordinates !!((47-B-VAR_DiscreteOrdinates.f90))
USE VAR_MoCshort,ONLY: pThread,k_,WithinCell,FrontPos !!((47-B-VAR_MoCshort.f90))
USE VAR_EnergyGroups,ONLY: Ng !!((47-B-VAR_EnergyGroups.f90))
INTEGER,OPTIONAL,INTENT(IN) :: Unit
TYPE(TYPE_fdbk),INTENT(INOUT),OPTIONAL :: fdbk
INTEGER :: Nm,Nk,Nr,Nc,r,m,o,k,i,k1,k2,k3,p
REAL(KIND_MSH) :: x1,x2,x3,f1,f2,f3,c12,c23,c31
CHARACTER(32),ALLOCATABLE :: Tab(:,:)
!!--begin--
Nm = SIZE(Ordinates,2)
Nk = NUM_Verts(Mesh)
Nr = Nk*Nm+1
Nc = 13
WRITE(*,*)Nm,Nk,Nr,Nc
ALLOCATE( Tab(Nr,Nc) )
CALL CLEAR(Tab)
Tab(1,:) = (/"o ","k ","m ","i ","k1","k2","k3","x1","x2","x3","f1","f2","f3"/)
r = 1
!enter main loop over polar angles
m_loop: DO m = 1,SIZE(pthread)
!proceed down each path
path_loop: DO p = 1,SIZE(pthread(m)%path)
!in the proper order of verts
order_loop: DO o = 1,SIZE(pthread(m)%path(p)%order)
!get the vert index
k = pthread(m)%path(p)%order(o)
!get the cell index
i = WithinCell(k,m)
r = r + 1
Tab(r,1) = STR(o,"(I)")
Tab(r,2) = STR(k,"(I)")
Tab(r,3) = STR(m,"(I)")
Tab(r,4) = STR(i,"(I)")
!cycle if there is no cell index
IF( i<=0 )CYCLE
Tab(r,4) = STR(r-1,"(I)")
!get the two vertices for the interpolation of the scalar flux
k1 = k_(1,k,m) ; Tab(r,5) = STR(k1)
k2 = k_(2,k,m) ; Tab(r,6) = STR(k2)
IF( SIZE(k_,1)>2 )THEN
k3 = k_(3,k,m) ; Tab(r,7) = STR(k3)
ELSE
k3 = 0
END IF
!get front positions
IF( ASSOCIATED(FrontPos) )THEN
x1 = FrontPos( 1 , k , m ) ; Tab(r,08) = STR(x1,"(Es22.15)")
x2 = FrontPos( 2 , k , m ) ; Tab(r,09) = STR(x2,"(Es22.15)")
IF( SIZE(k_,1)>2 )THEN
x3 = FrontPos( 3 , k , m ) ; Tab(r,10) = STR(x3,"(Es22.15)")
ELSE
x3 = 0.d0
END IF
!calculate streaming angular fluxes
IF( k1==0 )THEN
f1 = 0.d0
f2 = - x2/( x3 - x2 )
f3 = + x3/( x3 - x2 )
ELSE IF( k2==0 )THEN
f1 = - x1/( x3 - x1 )
f2 = 0.d0
f3 = + x3/( x3 - x1 )
ELSE IF( k3==0 )THEN
f1 = + x2/( x2 - x1 )
f2 = - x1/( x2 - x1 )
f3 = 0.d0
ELSE
!(all this comes from symbolic evalutation in maple)
c12 = x1*x2
c23 = x2*x3
c31 = x3*x1
f1 = + c23/(-c12+c23-c31+x1**2)
f2 = - c31/(+c12+c23-c31-x2**2)
f3 = + c12/(+c12-c23-c31+x3**2)
END IF
Tab(r,11) = STR(f1,"(F)")
Tab(r,12) = STR(f2,"(F)")
Tab(r,13) = STR(f3,"(F)")
END IF
END DO order_loop
END DO path_loop
END DO m_loop
CALL PRINT_Table(Tab(1:r,:),Unit=Unit)
DEALLOCATE( Tab )
!!--end--
END SUBROUTINE
!!### PRINTING SUBROUTINE: <PRINT_CharacteristicInfo_>
SUBROUTINE PRINT_CharacteristicInfo(fdbk,Unit)
!!#### PURPOSE
!! Print information about the characteristics.
USE VAR_Mesh,ONLY: Mesh !!((46-B-VAR_Mesh.f90))
USE VAR_EnergyGroups,ONLY: Ng !!((47-B-VAR_EnergyGroups.f90))
USE VAR_DiscreteOrdinates,ONLY: Ordinates !!((47-B-VAR_DiscreteOrdinates.f90))
USE VAR_MoCshort,ONLY: pthread,WithinCell,k_,SourceDist,StreamDist !!((47-B-VAR_MoCshort.f90))
INTEGER,OPTIONAL,INTENT(IN) :: Unit
TYPE(TYPE_fdbk),INTENT(INOUT),OPTIONAL :: fdbk
INTEGER :: Nm,Nk,Nr,Nc,r,k,m,k1,k2,k3,p,o,i
REAL(KIND_MSH) :: s1,s2,s3,s0
CHARACTER(32),ALLOCATABLE :: Tab(:,:)
!!--begin--
Nm = SIZE(Ordinates,2)
Nk = NUM_Verts(Mesh)
Nr = Nk*Nm+1
Nc = 9
ALLOCATE( Tab(Nr,Nc) )
CALL CLEAR(Tab)
Tab(1,:) = (/"k ","m ","k1","k2","k3","s1","s2","s3","s0"/)
r = 1
!enter main loop over polar angles
m_loop: DO m = 1,SIZE(pthread)
!proceed down each path
path_loop: DO p = 1,SIZE(pthread(m)%path)
!in the proper order of verts
order_loop: DO o = 1,SIZE(pthread(m)%path(p)%order)
!get the vert index
k = pthread(m)%path(p)%order(o)
i = WithinCell(k,m)
r = r + 1
Tab(r,1) = STR(k,"(I)")
Tab(r,2) = STR(m,"(I)")
!cycle if there is no cell index
IF( i<=0 )CYCLE
!get the two vertices for the interpolation of the scalar flux
k1 = k_(1,k,m) ; Tab(r,3) = STR(k1)
k2 = k_(2,k,m) ; Tab(r,4) = STR(k2)
IF( SIZE(k_,1)>2 )THEN
k3 = k_(3,k,m) ; Tab(r,5) = STR(k3)
ELSE
k3 = 0
END IF
!get streaming distances
IF( ASSOCIATED(StreamDist) )THEN
s1 = StreamDist( 1 , k , m ) ; Tab(r,6) = STR(s1,"(Es22.15)")
s2 = StreamDist( 2 , k , m ) ; Tab(r,7) = STR(s2,"(Es22.15)")
IF( SIZE(k_,1)>2 )THEN
s3 = StreamDist( 3 , k , m ) ; Tab(r,8) = STR(s3,"(Es22.15)")
ELSE
s3 = 0.d0
END IF
s0 = SourceDist(k,m) ; Tab(r,9) = STR(s0,"(Es22.15)")
END IF
END DO order_loop
END DO path_loop
END DO m_loop
CALL PRINT_Table(Tab(1:r,:),Unit=Unit)
DEALLOCATE( Tab )
!!--end--
END SUBROUTINE
SUBROUTINE PrintAngularFlux()
USE VAR_Mesh,ONLY: Mesh !!((46-B-VAR_Mesh.f90))
USE VAR_AngularFluxes !!((04-C-VAR_AngularFluxes.f90))
!!#### LOCAL VARIABLES
INTEGER :: Unit,k,m
REAL(KIND_MSH) :: V(2)
!!--begin--
DO m=1,SIZE(AngularFluxV,3)
Unit = Newfile("AF_"//TRIM(STR(m)))
DO k=1,NUM_Verts(Mesh)
V = Vert(Mesh,k)
WRITE(Unit,"(3e16.5)")V(1),V(2),AngularFluxV(1,k,m)
END DO
CLOSE(Unit)
END DO
!!--end--
END SUBROUTINE
SUBROUTINE PrintQTracks()
INTEGER :: Unit
!!--begin--
Unit = Newfile("Qtracks")
CALL SimpSurf( Unit , Qfull2D , (/0.d0,0.1d0,5.d0/) , (/0.d0,0.1d0,3.d0/) )
CLOSE(Unit)
!!--end--
END SUBROUTINE
SUBROUTINE PRINT_Options_MCS(Unit)
USE VAR_MoCshort !!((47-B-VAR_MoCshort.f90))
INTEGER,INTENT(IN) :: Unit
!!--begin--
WRITE(Unit,"(a)")" * [[MCS]] Options"
CALL PUT_line(Unit,VSTROPTION("InterpOrder",STR(InterpOrder)))
CALL PUT_line(Unit,VSTROPTION("SourceOrder",STR(SourceOrder)))
CALL PUT_line(Unit,VSTROPTION("OnlyGeometry",STR(OnlyGeometry)))
CALL PUT_line(Unit,VSTROPTION("InterpPlaneU",STR(InterpPlaneU)))
CALL PUT_line(Unit,VSTROPTION("Using_LinearSourceTest",STR(Using_LinearSourceTest)))
CALL PUT_line(Unit,VSTROPTION("Using_Monotonization",STR(Using_Monotonization)))
CALL PUT_line(Unit,VSTROPTION("Using_MonoLin",STR(Using_MonoLin)))
CALL PUT_line(Unit,VSTROPTION("Using_Splitting",STR(Using_Splitting)))
CALL PUT_line(Unit,VSTROPTION("Using_LogTransform",STR(Using_LogTransform)))
CALL PUT_line(Unit,VSTROPTION("Using_Jiggle",STR(Using_Jiggle)))
CALL PUT_line(Unit,VSTROPTION("Using_ExplodeFix",STR(Using_ExplodeFix)))
CALL PUT_line(Unit,VSTROPTION("Using_NoBacksies",STR(Using_NoBacksies)))
CALL PUT_line(Unit,VSTROPTION("Using_LongCharacteristics",STR(Using_LongCharacteristics)))
CALL PUT_line(Unit,VSTROPTION("Using_CachedLongCharacteristics",STR(Using_CachedLongCharacteristics)))
CALL PUT_line(Unit,VSTROPTION("Using_PackedCaching",STR(Using_PackedCaching)))
CALL PUT_line(Unit,VSTROPTION("Using_AFSymmetryCheck",STR(Using_AFSymmetryCheck)))
!!--end--
END SUBROUTINE
END MODULE
|
(* Author: R. Thiemann *)
subsection \<open>Perron-Frobenius theorem via Brouwer's fixpoint theorem.\<close>
theory Perron_Frobenius
imports
"HOL-Analysis.Brouwer_Fixpoint"
Perron_Frobenius_Aux
begin
text \<open>We follow the textbook proof of Serre \cite[Theorem 5.2.1]{SerreMatrices}.\<close>
context
fixes A :: "complex ^ 'n ^ 'n :: finite"
assumes rnnA: "real_non_neg_mat A"
begin
private abbreviation(input) sr where "sr \<equiv> spectral_radius A"
private definition max_v_ev :: "(complex^'n) \<times> complex" where
"max_v_ev = (SOME v_ev. eigen_vector A (fst v_ev) (snd v_ev)
\<and> norm (snd v_ev) = sr)"
private definition "max_v = (1 / norm1 (fst max_v_ev)) *\<^sub>R fst max_v_ev"
private definition "max_ev = snd max_v_ev"
private lemma max_v_ev:
"eigen_vector A max_v max_ev"
"norm max_ev = sr"
"norm1 max_v = 1"
proof -
obtain v ev where id: "max_v_ev = (v,ev)" by force
from spectral_radius_ev[of A] someI_ex[of "\<lambda> v_ev. eigen_vector A (fst v_ev) (snd v_ev)
\<and> norm (snd v_ev) = sr", folded max_v_ev_def, unfolded id]
have v: "eigen_vector A v ev" and ev: "norm ev = sr" by auto
from normalize_eigen_vector[OF v] ev
show "eigen_vector A max_v max_ev" "norm max_ev = sr" "norm1 max_v = 1"
unfolding max_v_def max_ev_def id by auto
qed
text \<open>In the definition of S, we use the linear norm instead of the
default euclidean norm which is defined via the type-class.
The reason is that S is not convex if one uses the euclidean norm.\<close>
private definition B :: "real ^ 'n ^ 'n" where "B \<equiv> \<chi> i j. Re (A $ i $ j)"
private definition S where "S = {v :: real ^ 'n . norm1 v = 1 \<and> (\<forall> i. v $ i \<ge> 0) \<and>
(\<forall> i. (B *v v) $ i \<ge> sr * (v $ i))}"
private definition f :: "real ^ 'n \<Rightarrow> real ^ 'n" where
"f v = (1 / norm1 (B *v v)) *\<^sub>R (B *v v)"
private
private lemma boundedS: "bounded S"
proof -
{
fix v :: "real ^ 'n"
from norm1_ge_norm[of v] have "norm1 v = 1 \<Longrightarrow> norm v \<le> 1" by auto
}
thus ?thesis
unfolding S_def bounded_iff
by (auto intro!: exI[of _ 1])
qed
private lemma compactS: "compact S"
using boundedS closedS
by (simp add: compact_eq_bounded_closed)
private lemmas rnn = real_non_neg_matD[OF rnnA]
lemma B_norm: "B $ i $ j = norm (A $ i $ j)"
using rnn[of i j]
by (cases "A $ i $ j", auto simp: B_def)
lemma mult_B_mono: assumes "\<And> i. v $ i \<ge> w $ i"
shows "(B *v v) $ i \<ge> (B *v w) $ i" unfolding matrix_vector_mult_def vec_lambda_beta
by (rule sum_mono, rule mult_left_mono[OF assms], unfold B_norm, auto)
private lemma non_emptyS: "S \<noteq> {}"
proof -
let ?v = "(\<chi> i. norm (max_v $ i)) :: real ^ 'n"
have "norm1 max_v = 1" by (rule max_v_ev(3))
hence nv: "norm1 ?v = 1" unfolding norm1_def by auto
{
fix i
have "sr * (?v $ i) = sr * norm (max_v $ i)" by auto
also have "\<dots> = (norm max_ev) * norm (max_v $ i)" using max_v_ev by auto
also have "\<dots> = norm ((max_ev *s max_v) $ i)" by (auto simp: norm_mult)
also have "max_ev *s max_v = A *v max_v" using max_v_ev(1)[unfolded eigen_vector_def] by auto
also have "norm ((A *v max_v) $ i) \<le> (B *v ?v) $ i"
unfolding matrix_vector_mult_def vec_lambda_beta
by (rule sum_norm_le, auto simp: norm_mult B_norm)
finally have "sr * (?v $ i) \<le> (B *v ?v) $ i" .
} note le = this
have "?v \<in> S" unfolding S_def using nv le by auto
thus ?thesis by blast
qed
private lemma convexS: "convex S"
proof (rule convexI)
fix v w a b
assume *: "v \<in> S" "w \<in> S" "0 \<le> a" "0 \<le> b" "a + b = (1 :: real)"
let ?lin = "a *\<^sub>R v + b *\<^sub>R w"
from * have 1: "norm1 v = 1" "norm1 w = 1" unfolding S_def by auto
have "norm1 ?lin = a * norm1 v + b * norm1 w"
unfolding norm1_def sum_distrib_left sum.distrib[symmetric]
proof (rule sum.cong)
fix i :: 'n
from * have "v $ i \<ge> 0" "w $ i \<ge> 0" unfolding S_def by auto
thus "norm (?lin $ i) = a * norm (v $ i) + b * norm (w $ i)"
using *(3-4) by auto
qed simp
also have "\<dots> = 1" using *(5) 1 by auto
finally have norm1: "norm1 ?lin = 1" .
{
fix i
from * have "0 \<le> v $ i" "sr * v $ i \<le> (B *v v) $ i" unfolding S_def by auto
with \<open>a \<ge> 0\<close> have a: "a * (sr * v $ i) \<le> a * (B *v v) $ i" by (intro mult_left_mono)
from * have "0 \<le> w $ i" "sr * w $ i \<le> (B *v w) $ i" unfolding S_def by auto
with \<open>b \<ge> 0\<close> have b: "b * (sr * w $ i) \<le> b * (B *v w) $ i" by (intro mult_left_mono)
from a b have "a * (sr * v $ i) + b * (sr * w $ i) \<le> a * (B *v v) $ i + b * (B *v w) $ i" by auto
} note le = this
have switch[simp]: "\<And> x y. x * a * y = a * x * y" "\<And> x y. x * b * y = b * x * y" by auto
have [simp]: "x \<in> {v,w} \<Longrightarrow> a * (r * x $h i) = r * (a * x $h i)" for a r i x by auto
show "a *\<^sub>R v + b *\<^sub>R w \<in> S" using * norm1 le unfolding S_def
by (auto simp: matrix_vect_scaleR matrix_vector_right_distrib ring_distribs)
qed
private abbreviation (input) r :: "real \<Rightarrow> complex" where
"r \<equiv> of_real"
private abbreviation rv :: "real ^'n \<Rightarrow> complex ^'n" where
"rv v \<equiv> \<chi> i. r (v $ i)"
private lemma rv_0: "(rv v = 0) = (v = 0)"
by (simp add: of_real_hom.map_vector_0 map_vector_def vec_eq_iff)
private lemma rv_mult: "A *v rv v = rv (B *v v)"
proof -
have "map_matrix r B = A"
using rnnA unfolding map_matrix_def B_def real_non_neg_mat_def map_vector_def elements_mat_h_def
by vector
thus ?thesis
using of_real_hom.matrix_vector_mult_hom[of B, where 'a = complex]
unfolding map_vector_def by auto
qed
context
assumes zero_no_ev: "\<And> v. v \<in> S \<Longrightarrow> A *v rv v \<noteq> 0"
begin
private lemma normB_S: assumes v: "v \<in> S"
shows "norm1 (B *v v) \<noteq> 0"
proof -
from zero_no_ev[OF v, unfolded rv_mult rv_0]
show ?thesis by auto
qed
private lemma image_f: "f ` S \<subseteq> S"
proof -
{
fix v
assume v: "v \<in> S"
hence norm: "norm1 v = 1" and ge: "\<And> i. v $ i \<ge> 0" "\<And> i. sr * v $ i \<le> (B *v v) $ i" unfolding S_def by auto
from normB_S[OF v] have normB: "norm1 (B *v v) > 0" using norm1_nonzero by auto
have fv: "f v = (1 / norm1 (B *v v)) *\<^sub>R (B *v v)" unfolding f_def by auto
from normB have Bv0: "B *v v \<noteq> 0" unfolding norm1_0_iff[symmetric] by linarith
have norm: "norm1 (f v) = 1" unfolding fv using normB Bv0 by simp
define c where "c = (1 / norm1 (B *v v))"
have c: "c > 0" unfolding c_def using normB by auto
{
fix i
have 1: "f v $ i \<ge> 0" unfolding fv c_def[symmetric] using c ge
by (auto simp: matrix_vector_mult_def sum_distrib_left B_norm intro!: sum_nonneg)
have id1: "\<And> i. (B *v f v) $ i = c * ((B *v (B *v v)) $ i)"
unfolding f_def c_def matrix_vect_scaleR by simp
have id3: "\<And> i. sr * f v $ i = c * ((B *v (sr *\<^sub>R v)) $ i)"
unfolding f_def c_def[symmetric] matrix_vect_scaleR by auto
have 2: "sr * f v $ i \<le> (B *v f v) $ i" unfolding id1 id3
unfolding real_mult_le_cancel_iff2[OF \<open>c > 0\<close>]
by (rule mult_B_mono, insert ge(2), auto)
note 1 2
}
with norm have "f v \<in> S" unfolding S_def by auto
}
thus ?thesis by blast
qed
private lemma cont_f: "continuous_on S f"
unfolding f_def[abs_def] continuous_on using normB_S
unfolding norm1_def
by (auto intro!: tendsto_eq_intros)
qualified lemma perron_frobenius_positive_ev:
"\<exists> v. eigen_vector A v (r sr) \<and> real_non_neg_vec v"
proof -
from brouwer[OF compactS convexS non_emptyS cont_f image_f]
obtain v where v: "v \<in> S" and fv: "f v = v" by auto
define ev where "ev = norm1 (B *v v)"
from normB_S[OF v] have "ev \<noteq> 0" unfolding ev_def by auto
with norm1_ge_0[of "B *v v", folded ev_def] have norm: "ev > 0" by auto
from arg_cong[OF fv[unfolded f_def], of "\<lambda> (w :: real ^ 'n). ev *\<^sub>R w"] norm
have ev: "B *v v = ev *s v" unfolding ev_def[symmetric] scalar_mult_eq_scaleR by simp
with v[unfolded S_def] have ge: "\<And> i. sr * v $ i \<le> ev * v $ i" by auto
have "A *v rv v = rv (B *v v)" unfolding rv_mult ..
also have "\<dots> = ev *s rv v" unfolding ev vec_eq_iff
by (simp add: scaleR_conv_of_real scaleR_vec_def)
finally have ev: "A *v rv v = ev *s rv v" .
from v have v0: "v \<noteq> 0" unfolding S_def by auto
hence "rv v \<noteq> 0" unfolding rv_0 .
with ev have ev: "eigen_vector A (rv v) ev" unfolding eigen_vector_def by auto
hence "eigen_value A ev" unfolding eigen_value_def by auto
from spectral_radius_max[OF this] have le: "norm (r ev) \<le> sr" .
from v0 obtain i where "v $ i \<noteq> 0" unfolding vec_eq_iff by auto
from v have "v $ i \<ge> 0" unfolding S_def by auto
with \<open>v $ i \<noteq> 0\<close> have "v $ i > 0" by auto
with ge[of i] have ge: "sr \<le> ev" by auto
with le have sr: "r sr = ev" by auto
from v have *: "real_non_neg_vec (rv v)" unfolding S_def real_non_neg_vec_def vec_elements_h_def by auto
show ?thesis unfolding sr
by (rule exI[of _ "rv v"], insert * ev norm, auto)
qed
end
qualified lemma perron_frobenius_both:
"\<exists> v. eigen_vector A v (r sr) \<and> real_non_neg_vec v"
proof (cases "\<forall> v \<in> S. A *v rv v \<noteq> 0")
case True
show ?thesis
by (rule Perron_Frobenius.perron_frobenius_positive_ev[OF rnnA], insert True, auto)
next
case False
then obtain v where v: "v \<in> S" and A0: "A *v rv v = 0" by auto
hence id: "A *v rv v = 0 *s rv v" and v0: "v \<noteq> 0" unfolding S_def by auto
from v0 have "rv v \<noteq> 0" unfolding rv_0 .
with id have ev: "eigen_vector A (rv v) 0" unfolding eigen_vector_def by auto
hence "eigen_value A 0" unfolding eigen_value_def ..
from spectral_radius_max[OF this] have 0: "0 \<le> sr" by auto
from v[unfolded S_def] have ge: "\<And> i. sr * v $ i \<le> (B *v v) $ i" by auto
from v[unfolded S_def] have rnn: "real_non_neg_vec (rv v)"
unfolding real_non_neg_vec_def vec_elements_h_def by auto
from v0 obtain i where "v $ i \<noteq> 0" unfolding vec_eq_iff by auto
from v have "v $ i \<ge> 0" unfolding S_def by auto
with \<open>v $ i \<noteq> 0\<close> have vi: "v $ i > 0" by auto
from rv_mult[of v, unfolded A0] have "rv (B *v v) = 0" by simp
hence "B *v v = 0" unfolding rv_0 .
from ge[of i, unfolded this] vi have ge: "sr \<le> 0" by (simp add: mult_le_0_iff)
with \<open>0 \<le> sr\<close> have "sr = 0" by auto
show ?thesis unfolding \<open>sr = 0\<close> using rnn ev by auto
qed
end
text \<open>Perron Frobenius: The largest complex eigenvalue of a real-valued non-negative matrix
is a real one, and it has a real-valued non-negative eigenvector.\<close>
lemma perron_frobenius:
assumes "real_non_neg_mat A"
shows "\<exists>v. eigen_vector A v (of_real (spectral_radius A)) \<and> real_non_neg_vec v"
by (rule Perron_Frobenius.perron_frobenius_both[OF assms])
text \<open>And a version which ignores the eigenvector.\<close>
lemma perron_frobenius_eigen_value:
assumes "real_non_neg_mat A"
shows "eigen_value A (of_real (spectral_radius A))"
using perron_frobenius[OF assms] unfolding eigen_value_def by blast
end
|
After Thai 2K closed and the owners opened KetMoRee across the street, they eventually opened Red 88 Noodle Bar in the old location. Their http://www.abc.ca.gov/datport/LQSData.asp?ID2030473271 liquor license was issued 20090916. They were mentioned in the Davis Enterprise http://docs.newsbank.com/openurl?ctx_verz39.882004&rft_idinfo:sid/iw.newsbank.com:AWNB:DVEB&rft_val_formatinfo:ofi/fmt:kev:mtx:ctx&rft_dat12B2DE1453208BA0&svc_datInfoWeb:aggregated5&req_dat0FCEDEB95BD4D563 column Comings and Goings by Wendy Weitzel on 20091004.
Lunch and Dinner
Red88 Noodle Bar serves a variety of noodle soups, noodle dishes, rice plates, and appetizers during all hours of operation, even until 3AM Wednesday through Saturday. Prices start at $4 for a Crispy Tofu appetizer up to $9 for Duck Soup or Spicy Lamb over Rice. There are also rotating specials on the chalkboard outside and the neon sign behind the bar. Most of the vegetarian options listed are also vegan. Most items in an entree may be substituted to accommodate vegetarian and vegan options, as well as allergies. During the daily 5pm7pm Happy Hour all appetizers are $1 off.
The Bar
On draft are $4 Coor Lt., Blue Moon, & Boont Amber Ale pints, $14 for a pitcher. $4 wells, and 10pm12am they will do a double of one of the wells for $5 (Only $1 more). There is also a full bar with a few options for a variety of different types of liquor. Red 88 also has their own cocktail menu with drinks not available anywhere else. During the daily happy hour of 57pm there is $1 off all specialty cocktails and $5 top shelf.
A Recent Addition
Red 88 has recently gotten several Singha Beer Towers, which holds 8 beers, and is only $25, which is perfect for a group of people.
All day Wednesday (12PM1:30AM) there are $5 FML cocktails.
Red88 offers stamp cards which grants a stamp for every $10 spent, whether it is on food or alcohol. On weekends free cover passes for KetMoRees nightclub may also be given out for every $10 spent.
20091009 21:39:55 nbsp I saw Cocktails on the very tiptop of their menu posted on the window and ran away. Besides, it looked AND sounded kind of trendy and Im not very trendy. Perhaps Ill be braver another day, especially once reviews come in that talk about 1) quality of food and 2) quantity of food for the price. Okay, yall, get to dining! Users/RyanMikulovsky
20091011 12:50:44 nbsp We went there for dinner last night. I had the duck noodles, and my husband had a soup. Both were very good, and the prices were quite reasonable ($79). Service was friendly and fast, and the owner came over and gave us her recommendations. It does have a limited menu, which made it hard to understand why our server hadnt tried the food yet and couldnt help us choose between two of the dishes we were looking at. Users/KaraU
20091011 16:13:39 nbsp Went for lunch today with a small group of friends. There was only one other table being occupied in the entire restaurant, which made us wonder why it took so long for the waiter to come take our orders and for our food to come out (at least a 25 minute wait for the food). When we first saw the food, we were disappointed by the portion size, but in the end we were surprised that the food filled us up. The Thai iced tea is refillable and delicious, but I had to dilute it with water because it was way too sweet. I had the duck noodle with soup, which was pretty good but the soup was lukewarm and sweet, which I was not expecting. Service was a bit slow, but very attentive...but then again there were only two tables being used during the time. Prices are a little too high. I hope they expand the menu. All in all, it a pleasant experience...not a good one, but certainly not bad. Users/BenLee
20091012 20:12:23 nbsp I had dinner here tonight and it was really good. I had thai iced tea and the roast duck soup, both really good. The appetizers were $2 off with order of an entree, so we had the crab and cream cheese wontons which were also really good. Id definitely go here again, but the menu is pretty limited. Users/NickSchmalenberger
20091016 16:24:47 nbsp Had their spicy pork noodles for lunch (not the soup) and was rather disappointed. It was a small clump of egg noodles with a sprinkle of chopped pork and other garnishes. It was a rather dry dish needing more seasoning. Im willing to try their duck noodles from what Ive read here but wont be ordering these pork noodles again. Users/GeorgeScheer
20091019 20:31:37 nbsp The food is actually pretty good, though a bit overpriced for what you get in my opinion. THe Mayan calendar predicts the world will end in the year 2011.
Well if you want to sing out, sing out. Users/red88fan
Not that this has anything to do with this restaurant, but the Mayan calendar actually does not say the world will end in 2011, nor 2012.
20091021 14:23:02 nbsp First well seasoned spicy food Ive had in Davis. They dont simply overpower it with loads of one spicy food such a jalapeno, and succeed in making a spicy dish without an overpowering single spice. Both of the spicy noodles they have on the menu are quite good (or at least, better then anywhere else in Davis for spicy food), and while I really like the spicy pork, I agree with GeorgeScheer that it is a tad dry and the pork doesnt have much flavor in itself. The noodles are quite delicious, and the peanut sauce is too. Service is slightly slow, but very friendly and food is tasty for reasonable price.
Id recommend. Users/William42
20091027 19:42:01 nbsp Went there the other night and was quite impressed. As far as noodle dishes go, this place puts the other places to shame. Had the chicken chow mein and it was the best noodle dish I have ever had anywhere. I found the prices to be reasonable as well. Users/RossHanstad
20091029 18:54:21 nbsp Nothing special, tasted soso. Got brocolli stems, and we wanted really brocolli! super little portion and overpriced. The service was subpar. No lunch special, shouldVe gone somewhere else. 2 stars out of 5. Users/emptyricepot
20100120 15:31: nbsp I am sorry that our servers abilities were not up to your standards. We are working on our lunch specials but have not yet finalized our menu. We do not currently serve American Broccoli and I understand how someone who is not familiar with Chinese Broccoli would mistake it for American Broccoli stems.
It is unfortunate that your experience with us was so negative. I would appreciate it if you gave us another chance. We have taken some steps to improve our service and keep our portions consistent with what we think is a fair value. Users/Red88
20091031 22:22:37 nbsp 3 out of 5 The duck noodle soup was rather tasty and surprisingly filling despite the deceptively small bowl it was served in. This place has a lot of potential as a social lounge that also happens to serve noodle soup, but they need find ways to get people through the door first and then keep them there.
Ive always been a sucker for restaurants with good ambiance and half a dozen flatscreen TVs. My boyfriend and I decided to give this place a try after passing by it and getting a sample of their spicy (popcorn?) chicken appetizer. Aside from two parties finishing up and a couple of what seemed to be restaurant regulars having a conversation with staff at the bar, we were the only ones seated. The menu is limited, with only a handful of appetizers, entrees, and desserts to choose from (but many more drinks). We bother ordered a duck noodle soup which, despite being only moderately warm upon arriving, was still pretty flavorful. My boyfriend approved of the 88 sauce, which he described as tangy and a little spicy. Im allergic to the color green and chartreuse, so Ill have to take his word for it.
Serving sizes were adequate, despite the seemingly small bowls they were served it in (tip to owners: try using bowls that look bigger so passersby will be impressed). However, if youre really starving for something, one bowl of noodles probably wont fill you up and youll feel like you overpaid. The service was relatively slow compared to other noodle places like Noodle City we were the only party ordering something from the kitchen and it still took about 15 minutes to get our food. Given the simplicity of preparing a noodle soup, I was a little putoff by the delay.
Overall, I would consider coming back here if I ever have a hankering for duck noodle soup. However, I cant say I would strongly recommend this place to my friends (unless, of course, theyre looking for duck noodle soup). Users/AbbYu
20091102 17:41:53 nbsp Red 88 Noodle Bar has a very small menu. 5 meatfilled appetizers, 3 vegetarian appetizers, 5 meat soups, 5 low meat noodle dishes, and 3 vegetarian dishes (2 soups, 1 veggie chow mein). Cash only, ATM avaiable in house. Okay for lunch or simple dinner. Prices for appetizers are $68; soups & noodes are $79. Nice decor, but a lot of hard benches available ... easy to find a chair. Noone seems to know theyre open yet. Nice waiter who was also manning the bar. Free wifi.
The shrimp dumplings (tiny) are wonderful, the spicey pork noodle was completely not worth the effort due to the lack of meat & veg, the roasted duck noodle was great with added spicey sauce from the shrimp dumplings. I would try their soups next and the pork dumplings, but the shrimp ones were really good. Looks like a full bar.
KetMoRee much better, but this is less expensive and fine for a quick bite. I picked up their takeout menu. If someone can tell me how to post a pdf of their menu, Ill do that.
Users/Deacon
20091103 17:49:40 nbsp I just went there today and was very pleased. I had the spicy noodles; all the spices they bring out to add to your dishes are quite good and have very different tastes. The service was very quick and we did not have to wait long for our food or for our refills of Thai icedtea. According to the Aggie they plan on expanding their menu as they become more popular. The fact that they only take cash is quite annoying (just as in Crepeville) but I am glad they stay open past 2am half of the week which is a great idea and I hope the small space can accommodate the amount of patrons they will probably have especially since GSt Pub is right across the street. Users/LolaTorney
20091104 22:12:45 nbsp Red 88 Noodle Bar is very disappointing.
1. They said that the spices that are given on the side are very spicy, but its not at all. I dont think they even tried the spices, to begin with. It just looks very very red, but it has a roasted taste to it.
2. They give very little soup for their noodle soup dishes. it seems that the noodles sucked up the soup when they give it to you.
3. and the dish is not even hot, its lukewarm.
4. I asked for salt&pepper, because it wasnt very flavorful, and they came back saying they didnt have any. how is that possible?
5. Another thing, i do not understand is why they cannot substitute the noodles. I dont like udon noodles, so instead i asked for thin noodles and they said thats not possible. I mean, you have both noodles, it shouldnt be hard. I would understand if the restaurant was very busy and filled with customers, but it was only me and my friends, and another table of 4 ppl.
6. my friend ordered the wings for appetizer and when the food came out, they didnt even cook the meat all the way. had to ask to recook it.
7. i agree with the other reviews about the wait.
Please note that: I usually do not complain about my food. im not a very picky eater, but this place made me seem like the pickiest eater ever. I gave this place several tries. im not just writing this review after one visit.
I worry about this restaurant. Users/anonymon
20091106 12:12:17 nbsp This place is a joke. Food is overpriced and underserved, not to mention the shady quality. Take the wonton they have for example; a small piece of shrimp/meat wrapped in wonton paper. Ive had microwaved wontons that taste better than theirs. Users/Col
20091106 14:45:50 nbsp Ive actually been to this restaurant on 3 occasions now. I actually have gone twice with anonymon. Here is my experience with Noodle 88.
Service
The one server with the goatee is very nice. He is very patient and takes his time to take your orders. He is very helpful, he is actually the best server Noodle 88 has. Aside from him, all the other servers have been very rude.
The one man who seems to be a shift leader from KetMoRee is very very rude. He rushed us while we were ordering our food. He refused to allow us a simple noodle substitution that anonymon requested, which she was allowed the first time we went, but this server rudely told us that we cannot substitute things. He basically yelled at us and said that we should not be asking for substitutions or special requests.
If you want things like salt, pepper, or other staples I suggest you do not ask. The servers will tell you that they are not allowed to give you those kinds of things and they seem to get pretty annoyed when people ask for them.
Even when we have gone to this restaurant during slow times the food takes at least 2025 minutes to be served.
Food
Noodle dishes come in a small bowl that can hardly fit anything. They overstuff the bowl with noodles, and give you hardly any broth. In terms of meats, they really short you. They give you like 5 or so little pieces of meat that do not satiate ones hunger.
The food overall is very bland. The chicken noodle soup lacks any flavor. Some of the other dishes we have tried were also very very bland (thai boat noodle soup, curry chicken).
The spices they serve are not really spicey. The servers tell you they are really spicey, you can put tons of chili into your soup and it still tastes very bland.
Pricing
Pricing is reasonable for the basic items like the chicken noodle soup. Any of the items that around the $9 range are extremely small in proportions in terms of what you pay. Users/ThUn
20091119 03:55:36 nbsp If they had halfoff prices for appetizers after midnight, Id totally be there. But if not, what is the point of being open until 3am? Users/SunjeetBaadkar
20091206 03:49:49 nbsp I was somewhat disappointed. Right after I sat down, I had a feeling the food wasnt going to be that great already. The service was good, however, waiters were attentive and humble. I had the duck noodle soup since everybody seemed to say that was the best thing they had. My first impression when they handed me the small bowl was not a very good one. I waited 15 minutes for a dish that takes 5 minutes to prepare. Although the soup wasnt hot, it was tasty, nothing special though. For 9 dollars, I felt like I didnt get what I paid for, but who does nowadays? The duck noodle soup came with 5 small pieces of duck and the amount of noodles equivalent to a bag of ramen. I felt like I was eating a kimchi bowl with a few pieces of leftover roast duck from chinatown.
3 out of 5 Users/betabeta
20091207 23:10:42 nbsp Duck noodle soup is pretty good, though a bit overpriced. Thai Iced Tea was WAYYYYY too sweet. Sugar overload. Asked if they could add more tea... they said that they couldnt do that, but could add more ice so itd eventually dilute it. Diluting anything with milk/cream in it already is never a good idea.... It ended up tasting like milky sugar water. Bleh. Users/JenniferGiang
20091210 15:30:32 nbsp ONE OF MY FAVORITE RESTAURANTS IN DAVIS! Get the cream cheese wontons or curry chicken soup! Amazing. Loved it! Users/Lexie
20091212 01:19:10 nbsp 1. The spicy pork noodles were entirely too spicy. Ridiculously spicy.
2. There was one waitress/hostess for the full restaurant, and needless to say she was about to lose it.
3. Cash only.
Glad I tried it, and Ill probably try it again, but that was definitely $10 I wish I hadnt parted with... Users/Shiva
20091212 07:04:32 nbsp Went here for the first time this week to warm up with some soup, and liked it fairly well. It may sound odd to point out that everything in the duck soup tastes a lot like duck, but I usually prefer a more subtle broth. It also could have used some veggies. But these are small complaints. The pork dumpling appetizers were yummy too, and came with 3 or 4 different spicy sauces which were delicious. Users/EdHenn
20091213 11:40:02 nbsp Warning to all vegetarians/ vegans: Their Vegetarian menu options are any thing but, there is chicken broth in both of the vegetarian soups so the only dish that is vegetarian is the chow mein which I tried and left me wanting in flavor, portion, and over all quality. I also had the spring rolls which were good, but big whoop, you can get those any where. For better and actual vegetarian Chinese food Im going to stick to Ding How in north Davis! Users/amber15
20100120 15:41 nbsp I am not sure where you are getting your information from. We have 1 vegetarian/vegan soup option (Veggie Soup), 1 vegetarian noodle option (Veggie Chow Mein), 1 vegan noodle option (Tofu Rad Nar), 2 vegetarian appetizer options (Veggie Spring Rolls and Corn Cakes), and 1 vegan appetizer option (Crispy Tofu).
None of our Vegetarian options have Chicken stock or broth in them. None of our Vegan options are made with any animal products. We are currently adding more dishes which will increase our number of options available for customers with special dietary needs. Users/Red88
20091218 15:33:30 nbsp Just got back from lunch. Basically OK, but doubt we will be going back. Some of the low points: 1) WAAAAAY too much sweetened condensed milk in the Thai Iced Tea the ratio seemed to be like 4:1 condensed milk to tea, sickeningly sweet. 2) Vegetarian options pretty uninspired. 3) Server spent several seconds sort of absentlymindedly stirring the sauces while he gave semicoherent description warning us about how hot the were sort of offputting and confusing, in the end the sauces were not all that hot as others have noted. 4) Too much black pepper in the Tofu Rad Nar, and I really like black pepper, but it really overwhelmed whatever other flavors were in there (which admittedly didnt seem to be much beyond pretty intensely salty/sweet).
Upon leaving the restaurant a passerby asked us about the food. Fine but not great, I said. That basically sums it up. Users/neilk
20100114 16:24:58 nbsp I havent tried the food, but my boyfriend and I went in on a Friday night around 11 or so (pretty early) and sat at the bar. The service at the bar was great. The bartenders got us what we wanted promptly and offered us food throughout our stay, though we had already eaten. They even gave my boyfriend two free birthday drinks, both of which he really enjoyed. It seemed pretty slow, so I can see how at other times the service might not be up to par, but based on my own experience, I would recommend Red 88 on its service alone. Users/emmaleanne
20100120 15:09:03 nbsp Let me take this opportunity to thank all of you who have written in your thoughts, concerns, criticisms, and yes even a little praise about our little restaurant in Downtown Davis. My name is Dante and I am the current manager of Red 88 Noodle Bar. This is my first real effort to participate in the world of Wiki.
I have always found Wiki to be a great way to see points of view other than my own. These points of view are especially welcome in my line of work. Although I spend a large amount of my waking life at Red 88, it seems that there are many things that I have been unaware of. I would like to let all the Red 88 Noodle Bar Davis Wiki participants know that I will try my best to address the issues that are brought up on this page.
I will also be trying to do more with Wiki as I learn how. Users/Red88
20100130 18:54:01 nbsp Just had lunch there today. I was absolutely satiated by the end. I had the thai iced tea, which was delicious and i love the free refills. It was not as sweet as other thai teas that ive had in davis and was one of the better tasting ones for sure. I also ordered crab cream cheese wontons which were delicious. i love that they are super crispy and not all soft and greasy. the sweet tasting sauce that comes with them is also super delish! We asked our waiter and she said it was a plum sauce. she also said that none of their food is frozen before cooking, which i could taste. everything tasted fresh. I ordered the thai boat soup as my main course and it was delicious. it had just the right amount of broth and lots of noodles to fill you up. the beef was really tasty and extra tender, not chewy and tough at all.
I loved my meal there and have been recommending red88 to friends. the service was pleasant and fast, and extra helpful.
I would like an expanded menu, although everything ive had so far (chicken chow mein was super good and tasted fresh and not greasy at all) has been tasty. also, it seems as if every dish on the menu has different ingredients, so at least they are all very different. Users/AlexandraEllis
20100203 20:42:25 nbsp I had the duck noodle soup. Barf. The broth was too strong and there was really barely any duck, but more duck skin floating around. Not impressed. Users/JamieParker
20100219 12:24:27 nbsp Anyone can give a good approximation of what kind of cuisine Red 88s food falls under? I am stumped. Anyways, food is basically edible with slightly below average flavor. Would only opt to eat here late at night when no other options are available since they are open so late. Spiciness is completely subjective, but for me no dish that Red 88 serves is anywhere near spicy. You could pile on the various spicy toppings that are provided but those too will do little to enhance your soups spiciness. Prices are slightly high in relation to portions.
As already duly noted by others, the Thai Tea is unnecessarily SWEET. Seriously, the thing is basically swirling with sugar. I reckon that if you were to boil Red 88s Thai Tea a bunch of sugar clumps would gather at the bottom! Knock a few refills down and you may start feeling noxious good thing you have the often lukewarm and unsavory soup to keep you afloat! Users/blastoff
20100222 19:46:57 nbsp They had a happy hour from 811 on saturday night. It was pretty cool, a pitcher of blue moon for $11! I would really appreciate it if Manager Dante posted his happy hour times on the wiki (hint, hint). Users/FloridianPlywood
20100403 11:21:28 nbsp I appreciate how the prices on the menu are simply the dollar values, it makes the menu very simplistic. I have been to this restaurant twice now and both times I have left happy. I have only purchased the Duck Noodle Soup on both occasions, I was satisfied with the service (pretty quick, especially late at night/morning). My only wish is that they would put more duck in the soup. Other than that, I enjoy going here after a night around town. They definitely had the right idea to keep this place open until 3:00 AM after last call at the bars. Users/IdealParadigm
20100405 15:18:20 nbsp The menu is much too limited. Ive been to noodle places in San Francisco which have a much wider variety of meats and veggies that they can add to the noodles. I ordered the Veggie Noodle Soup and it was quite unremarkable bland and boring, even. I had to force myself to finish it. Service was s....l.....o....w...... and I was the only person in the restaurant at the time. I wont be going back. Users/NotSure
20100531 11:09:03 nbsp Their Spicy Silver Noodle is THE BEST. Nor is it for wimps. I dont eat pork so I had them put Tofu in it; the waiter was nice enough to warn me it was made with Oyster sauce, which isnt a problem for me but it was good that he mentioned it. Food came out in minutes and was absolutely beautiful.
Sorry, I dont understand the culture of dumbing down spicy foods. If it says spicy, it should be SPICY, and Red 88 delivers as promises. I havent tried anything else because the Silver Noodle is so, so good. At $7 I think its worth it; when Im not hungry I simply save a portion and its pretty good the next day.
I like the simple menu and it seems they are willing to put in other meats or switch them out for tofu in the dishes. This is my favorite spicy noodle place in Davis. Users/lunelectronique
20100630 13:53:10 nbsp This place is simply AMAZING!
Users/caitlin22
20100718 11:36:56 nbsp COMPLETELY GROSS, HERES WHY: 4 of the 7 dishes were too salty to eat. 1 dish never arrived. 2 dishes had wires in them, presumably from the kitchen scouring pad? Its also cash only, so when I got up to get cash from the ATM they have in the back, the hallway wreaked of pot. The wait staff was rude, impatient, and considering how gross everything was, they should have comped us our entire meal rather than having us pay for drinks and appetizers. Its not the money I care about but the principle in this case. Also, their wait staff should have communicated with one another a bit betterafter our several complaints, another waitress came to our table to ask us if we wanted a frequent diner punch card. Wow. Big pass on that, thanks. Users/NothingHere
I actually was there when this happened, I just want to say that the hallway did not smell like pot. I heard you were complaining to the waitress so I walked into the hall way myself and I had no idea what you were talking about because I didnt smell anything and neither did the waitress. Also, you cant give out alcohol for free by law. You consumed all the beer and therefore you pay for the beer.Users/nu1985
20100718 13:38:45 nbsp red 88 was not that bad, but didnt shine. Still its a latenight game so it gets points. Users/StevenDaubert
20100720 08:21:07 nbsp Its a small menu and cashonly, but I have enjoyed both my times eating there. Sometimes, simple is just fine, and they post the menu and pictures on the door. In that situation, I dont think you can complain about the menuit either suits your fancy or you can move along to the next place (which likely wont have its menu posted). Users/ScottLay
20100723 11:17:37 nbsp Ive been here a few times now and very much like it. I wouldnt say it pops in my head as a place to go to when hungry, but it is good for a change. I always seem to go when there is little to no crowd, so the wait staff is overly attentive (which is nice sometimes). The food itself is fairly good but I guess it is personal taste. I have ordered the soups and never thought they were too salty. My only quibble with the noodle soups is that there are just so many noodles I can never finish it all and sorta feel bad. The popcorn chicken is very tasty and spiced just right to go along with any of their sauces (the red 88 sauce is probably my favorite). I have yet to have a bad experience here and have in fact had quite a few memorable ones because of the staff (where else would they recommend a shot of whiskey before having a particular dish and they were actually right?). Id recommend people try this place a couple times because it does seem to have a level of inconsistency (but sometimes places are worth the inconsistency). Users/SunjeetBaadkar
20100726 22:25:37 nbsp Ordered the Lamb Rad Nar tonight to go. The portion matched the price and they gave three different types of spicy sauce to spicen my meal. My order was ready in 10 minutes, so service was fast. The food tasted greatnot too salty and definitely tasted like lamb (as all lamb dishes should be). I would most definitely go here again. Users/AndrewTran
20100815 20:43:32 nbsp I have come here a few times to pick up some food and the guy who works the bar/register on Friday nights seems really conscientious, always cleaning and making drinks with care. So far I have liked every dish I have tried and will probably be back to try other things as well. Users/hankim
20100924 23:16:06 nbsp The first time I went to Red 88 was the LAST time. When I went I ordered Thai Iced Tea (one of my favorite drinks which I have has served many different ways) and it was the WORST Ive ever had. There was nearly no tea in the drink; It was so sweet I couldnt drink it at all. I asked the waiter if he wouldnt mind adding a bit more tea for me because of the sweetness, and he has the audacity to tell me that actually, the tea is the sweetest part, adding more will only make it sweeter. Sorry, but since when has the tea in Thai Iced Tea been the SWEET part, isnt that typically the job of the sweetened condensed milk (or milk/cream in some recipes)?? I asked him if he was sure, and he tells me that he is, and if I want the drink less sweet I should just let the ice melt to water it down. I gave in and let the ice melt, and what did i have in the end? A drink that tasted like sugary, milky water. Brilliant idea. Basically I wasted the $2.50 or so, on the drink that I got about one sip of.
Besides the lack of correct information and the gross Thai Tea, my actual food was subpar as well. I cannot remember the name of the dish i ordered, but it was something along the lines of pad Thai meets red curry. When I got the dish however, I realized that it was nothing more than overpriced spaghetti with marinara (and a hint of turmeric) over rice noodles.
Needless to say I was disappointed with the quality of the food/drinks as well as the waiter that I had, and the food was too expensive for the quality.
My recommendation: Skip Red 88, and head over to Sophias Thai Kitchen instead. Its pretty average, but for the same price as Red 88, youll get better quality food. Users/AshleyMatson
The Tea is the sweetest part of the tea, just so you know. Most places add halfnhalf or milk/soymilk whatever, to make it less sweet. Users/anonymon
20110103 13:34:25 nbsp Went here for the first time today and had the Tofu Rad Nar, it was awesome! I am vegan and there were three vegan choices for me. I will be returning. Users/Nancycat
20110227 22:55:45 nbsp I am in love with Red 88. I have tried a good portion of the menu now and I havent had anything I dont like. The Karee chicken soup is so delicious and the lamb spaghetti will blow your mind. The service is very friendly and attentive. I am definitely a huge fan. Keep up the great work!!! Users/ddavis
20110309 23:25:49 nbsp Went to Red88 for the third time in as many weeks, and tonight was the first time for dinner. My girlfriend and I chatted with a super awesome bartender/waitress Rose who was really friendly (as was every employee Ive met there so far). Food came out quickly and it was pretty good. Happy hour has 2 buck bud lights and dollar off of appetizers. Its generally pretty empty and quiet even on weekend nights, which is a nice change from the more crowded/loud places (eg. Vitos, Sophias). I think Ive found a new bar to grab drinks at! Also, and most importantly, awesome sink in the bathroom! Users/AndrewWalker
20110322 07:50:31 nbsp This is one of new my favorite places! The food is very good quality. The chicken chow mein is amazing! There are pieces of meat in it that you can recognize as coming from a real chicken, and vegetables that you can imagine were actually part of a plant not too long ago. Still crispy and full of life! I dont know about you, but I find that very rare when it comes to chow mein. The Karee Chicken Soup is excellent too. The Asian Style Fries are strange but oddly satisfying if you are in the mood for something of that nature. I love Thai and Asian food in general and I think I am pretty picky but this place rocks. The service has been very good every time I have been there. They always seem to be slow and quiet, which is nice for customers who enjoy it but I hope more people will begin to appreciate this little gem. Note: Red88 is no longer completely cash only, they allow you to use a card for orders over $20.00. As far as prices go, its about 79 bucks for a main dish and 56 for an appetizer. Pretty good portions, not huge but large enough. They also have specials and happy hour deals. The only tiny gripe I have is that I wish they put a little more broth and a few less noodles in their soups, but hey its a noodle bar, so what can you expect?Users/WikiChiki
20110324 13:47:42 nbsp The noodle soups are delicious! They usually have a special of the day up too, which is just an item from the menu, but cheaper. Its worth going to! Users/Ravyn
20110411 17:00:16 nbsp This page seems rather misleading with some of the earlier comments (now that Im reading them), as the owner seems to have fixed a lot of the problems (small dishes, limited menu, slow service, etc). I come here quite a lot (either by myself or in a big group), and the servers have always seemed incredibly nice and patient. A lot of times being part of a big group chatting animatedly (plus background noise), its hard for the waiters to get everyones attention to get drink or dish orders, but theyre very understanding and polite about it.
The service is pretty quick nowadays, and theyre diligent about refilling water, which is nice.
Im also quite pleased by their appetizers, I really like the cream cheese wontons which comes with some sweet honey? sauce, and the thai style wings (which isnt all that spicy, but is flavorful and quite tasty! I cant handle really spicy foods anyway, so it works out for me.)
Also, they now have some cardstamp thing where you get $10 off a meal if you fill up 10 stamps (I forget how much each stamp is though, maybe $10?) Users/Ravyn
20110524 01:32:26 nbsp rudest service ever. the waiter never brought water and took a long time to take our order. He obvioiusly was having a bad night and took it out on his costumer. Food was mediocre at best. Overpriced and poor food quality. This place only exists to scam to the drunk college students. Only accepts cards over
$20, but points you to the ATM machine. Restaurant, owned by KetMoRee is in place for the $$, not to serve quality food.
Users/TracyR
20110608 11:49:48 nbsp Im returning here for another review. I do love this place and I have a new favorite, the Karee Chicken soup. It is just the right about of spice and sweet from the cocount milk, and I love the flavors and texture.
I am disturbed by one thing though. On their drink menu they have the BANGKOK TRAFFICKER. Im sure they mean it as a joke, and probably in reference to animals, but it can be sorely misinterpreted and its just not funny. When you look at the state of Human Trafficking and what happens behind itchild prostitution, rape, etc. thats not something anyone would drink to. They should really change the name, its just inappropriate. Users/lunelectronique
20110719 22:09:38 nbsp The free Thai Ice Tea refills are nice, $3 each! Users/txwong
20110903 19:42:14 nbsp Definitely my favorite bar in Davis. My good buddy and I are regulars and weve filled up 2 or 3 stamp cards already (yes, theyre $10/stamp). My favorite drink is the Red Rocket which tastes exactly like the popsicle I loved as a child. Their Long Island is especially good, nowhere else does one as well as they do. Supposedly their best seller is the FML the wikis daddy which is incredibly strong at a low price for the alcohol content, but I personally dont like it. For food: Duck noodles. Every time. Users/MasonMurray
20110917 16:23:17 nbsp I went there yesterday and got the crab wontons. They tasted like frozen pizza rolls with some nasty sauce injected in them. I have to say I have given this place a fighting chance and they never seem to come through. I have tried lunch, and after hours; both the food and service was horrible. If you are going with a group, AVOID this place, they dont do split checks. Avoid this place is you want to take your date there because many of the staff members have attitudes (Owner Included). Avoid this place after hours AT ALL COSTS, you will be glad you did. Just to go Burgers and Brew they have better everything. RED88 should takes some notes from B&B, every time I drive past that place its always slammed. (NO I DONT WORK THERE). RED 88 gets 1 out of 5 stars. I give it a 1 because I feel sorry for the owner. Users/Glen,Tanner
20111107 22:41:07 nbsp Do not go here unless you want to be urinated on, exposed to a drunk customers genitalia, and receive horrible service. Saturday night, I was waiting in line behind two men to use the restroom. They both entered the single bathroom when it opened up. Suddenly, a bouncer ran in and dragged one of the guys out (he was peeing in the sink). I got a full frontal view of his dangling penis, which was still in the process of urinating because the bouncer didn’t give him time to zip up. Some of this urine splashed on my dress, as the guy was still trickling while the bouncer dragged him into the hallway. I only received a casual apology from the bouncer when I confronted him, which I felt was not adequate, so asked for a manager. The owner (who runs both Red88 and KetMoRee) came to my table where I was sitting with a group of friends. Her apology was also tepid. Not only that, she defended the bouncer, who she had brought over. My friends and I got upset and began to yell at two of them. The owner then asked if I wanted to have the customer, who urinated on me, apologize. She pointed out that clearly, the man was drunk and could not control himself. The bouncer in turn got aggressive and told me what do you want me to do, get down on my knees and ask for your forgiveness? In my heat of anger, I poured a glass of water onto the floor (I guess my symbolism of urinating) and stormed out of the restaurant knocking a chair down on my way out. I was restrained by another bouncer, but let go.
I went by to KetMoRee the next morning to try and speak to the owner. She called me and left a message, so I returned the call. But she put a manager on the line even though she was present. Not only is this horrible business practice, its cowardly. In response to my comment about being urinated on, manager basically replied well, you know, people are quite drunk at that time and things happen.
So, Red 88/KetMoRee, are you saying that having individuals urinating in places other than in your restroom is not a big deal during hours when the majority of the people there are intoxicated?
Let’s now discuss your bouncers and their clear lack of cognitive ability and over saturation of testosterone. If the bouncer had any sense, he would have waited until the customer was finished relieving himself before kicking him out. Instead, he tried to make a heroic scene of dragging the bad guy with his dick flopping everywhere out of the restaurant, humiliating a customer who had to see a random individuals penis while getting pissed on in the process. His lack of rationale caused not only an indecent exposure, but most likely violated a health code when he dragged a urinating individual in the vicinity of dining customers. Clearly, the bouncers are NOT doing their job if they are allowing overintoxicated people into the establishment.
The least Red 88/KetMoRee should do is fire the bouncer. I was also shocked to discover the owner had such poor customerservice skills. I strongly believe these two restaurants are still in business because the majority of the clientele are students, and thus the turn over is quick and the circulation of horrible events such as this is minimal.
In the end, what did I get? A halfassed apology and a urine stained dress. Thanks, Red 88/KetMoRee. Im never coming back. Ever. YumikoH
this (exact) comment was written on Yelp as well, and a Manager replied to this comment on the Yelp, if you wish to see the Managers comment, please visit Red 88 Noodle Bars Yelp page (http://www.yelp.com/biz/red88noodlebardavis)
20120212 22:23:10 nbsp Took our family of five here tonight. We all chose something different, and everyone was pleased with their meal. The service was fast and friendly. Great on a cold night when you want a big bowl of noodle soup, great comfort food. Users/Ginahowey
20120519 22:03:21 nbsp Just been there for a second time. Both times the food was great as well as the service. The sauces they serve with dinner are delicious and I love the Lamb Rad Nar. The Thai Iced Coffee is better at Thai Canteen, but that doesnt really matter so much. This time I had the Crispy Rad Nar; it was great and my boyfriend had the Chicken Chow Mein, which was delicious. Havent really eaten a bad thing there so far! Users/OpticalOut
20120801 12:28:00 nbsp My friend was bragging a while back about Red 88s Chicken Kai Ree Soup and how delicious it is. So being a Pho lover and not really finding a good Pho fix in Davis has me craving a great soup on a cold day. So I ventured in and ordered the Vegetarian Kai Ree soup and the love affair began. Out came a gigantic bowl of the most beautiful soup I have ever had. I loaded it up with all those lovely sauces they provide with your meal. Oh My Goodness!!! This soup makes me feel better about life. It now doesnt matter if its 50 degrees or over 105 outside, Im in it to win it. One of Davis hidden gems. Please give it a try today. Users/krissyp
20130227 18:58:37 nbsp I really like the duck noodle soup it has decent chunks of duck meat in it, and the broth is wonderful. The crab wontons are also good. The chicken chow mein was alright, wasnt greasy or anything. Ive only gotten takeout so far, but its always been reasonably fast to pick things up. Overall, I would say the standout item is definitely the duck soup I recommend trying that! Users/KelleyPaugh
20130312 23:42:48 nbsp AWFUL!!! Bad food etiquette. Im a strict vegetarian and found chicken in my veggie chow mien. The owner is so incompetent she doesnt see the problem in using the same cooking utensils used on raw chicken to spoon my vegetarian dish in a box. She had nothing to say for herself or the cook. Dont eat here Im sick to my stomach by their lack of respect for vegetarians and probably raw meat their food handling comes from that of a food truck its crap dont eat here. I will be reporting red 88 to the better business bureau because their food practices are unsanitary. Users/ChristinaC
20130824 08:04:18 nbsp I really like the staff here, and the drinks are yummy, so I usually come out. All the cocktails are basically delicious, but the Red rocket is dangerous since it doesnt taste like alcohol. and I dont mean doesnt taste like alcohol but theres still that burn, i mean it tastes like candy. I dont really eat here aside from appetizers, and even then its just the fries for me. Ive had a few of the noodles and they were good and I love the additional spice rack, though I think it would be wise to invest in more so the servers dont have to constantly be moving them back and forth. Service can be slow on busy nights but Ive never been displeased here I think. Users/HannahToru
20140111 19:41:30 nbsp Hello Davis!
Good news Red 88 has an open position for a front of house employee. So hurry on down and fill out an application, we will be conducting interviews for noteworthy applicants as soon as possible. Come join our team! We are located at 223 G St.
Red 88 Management Users/DanielleHarter
20140802 15:54:52 nbsp Mr. Sandeep Dahal broke my Bank of America debit card, verbally harassed me and said he didnt know why he did it....... Very bad experience at this establishment due to Mr. Sandeep Dahal. He is listed on the Better Business Bureau as the bar Manager for KetMoRee. I was visiting Davis on a Saturday, July 26th. I was with a group of about 20 people. We had stopped by at KetMoRee nearly 15 minutes before they closed. They were still charging a cover fee to get in so we decided to go somewhere else. I did not see Mr. Sandeep Dahal until a little later that night when my group had decided to go to Red 88. I got in line just like everyone else. Mr. Sandeep Dahal allowed everyone in front of me entrance into the restaurant. When it was my turn I smiled but he immediately put his hand up to my chest and stopped me. He asked me what do you want? I said we were all together and we were getting some food. He said I needed to order something and pay for it before he could let me in. I didnt have a menu yet so he pointed to the small pictures of the plates posted on the windows. I pointed to a number 3 and I handed him my Bank of America debit card. My debit card has the San Francisco Giants logo on it. I waited outside as he walked inside the restaurant with my card. I, along with the rest of my friends waiting outside were very confused. As I waited, a friend that had been allowed to enter before us came out and informed me that that guy just broke your card. I walked inside to where he was and told him I wanted to cancel my order. Mr. Sandeep Dahal returned my card in two pieces and said your money is no good here. Get out. I asked him why he did that and he responded with Because youre a ing hole and the Giants ing suck! I informed Mr. Dahal that he does not know me and I do not know him. Im not even from Davis. He asked what I wanted once again. He asked if I wanted money. I said I didnt want any money, I just wanted to know why he did that and I wanted an apology. Instead, he continued to verbally harass me about the logo on the debit card along with calling me an hole. At this point we were outside. I had my hands behind my back and against the wall as not to appear threatening to Mr. Dahal as he was very agitated. I asked him to calm down and he told me to shut up. Some other employees, including Mr. Dahals brother came outside. Shortly after, Mr. Sandeep Dahal noticed the owners son approaching us. (I know this to be the owners son from witnesses) Mr. Dahal immediately walked over to him and told him not to listen to us and that we were just drunk. (Which was not true Mr. Dahal!)They walked back across the street to KetMoRee without acknowledging us any further. I attempted to call the restaurant the next day. He was not available. I left my name and phone number. To this day I have not received a call. I paid $5 for a new debit card. Im still waiting for it to arrive. I have talked to my bank about the situation. I will also be filing a complaint with Mastercard as a merchant violation. Never again will I, nor my friends, step foot inside of Red 88 or KetMoRee. I dont know why Mr. Sandeep Dahal became upset with me. Users/MartinB
|
(* http://ilyasergey.net/pnp/ *)
(** * 「Hoare Type Theory の基礎」から抜粋 *)
(** * Elements of Hoare Type Theory *)
Require Import ssreflect ssrbool ssrnat eqtype seq ssrfun.
Add LoadPath "./../htt".
Require Import prelude pred pcm unionmap heap heaptac
stmod stsep stlog stlogR.
Set Implicit Arguments.
Unset Strict Implicit.
Unset Printing Implicit Defensive.
(** ** 階乗計算手続 の証明 *)
(* ** Verifying the factorial procedure mechanically *)
(** 純粋関数型の階乗 *)
Fixpoint fact_pure n := if n is n'.+1 then n * (fact_pure n') else 1.
(** ループ不変式 *)
Definition fact_inv (n acc : ptr) (N : nat) h : Prop :=
exists n' a': nat,
[/\ h = n :-> n' \+ acc :-> a' &
(fact_pure n') * a' = fact_pure N].
(** 階乗の加算器(accumulator)部分の証明 *)
Definition fact_acc_tp n acc :=
unit -> {N},
STsep (fact_inv n acc N,
[vfun (res : nat) h => fact_inv n acc N h /\ res = fact_pure N]).
Program Definition fact_acc (n acc : ptr): fact_acc_tp n acc :=
Fix (fun (loop : fact_acc_tp n acc) (_ : unit) =>
Do (a1 <-- !acc;
n' <-- !n;
if n' == 0 then
ret a1
else
acc ::= a1 * n';;
n ::= n' - 1;;
loop tt)).
Next Obligation.
apply: ghR => i N. (* conseq を消す。 *)
case=> n' [a'] []. move=> -> Hi _. (* case=> n' [a'][->{i}] Hi _. *)
Search (verify _ _ _).
(* (x <-- !x; e x) を2個処理する。 *)
do 2! [apply: bnd_readR => /=]. (* heval. *)
case X: (n' == 0).
(* (n' == 0) = true *)
Search _ (verify _ _ _) (ret _).
(* (ret v) を変形する。 *)
- apply: val_ret => /= _.
(* fact_inv n acc N (n :-> n' \+ acc :-> a') /\ a' = fact_pure N
を証明する。 *)
move/eqP : X => Z; subst n'. (* 前提のXを n' = 0 にする。 *)
rewrite mul1n in Hi.
split.
+ exists 0. exists a'. rewrite /= mul1n. by [].
+ by [].
(* (n' == 0) = false *)
(* 代入文の並びなら、heval でよい。 *)
- heval.
Check (gh_ex N).
(* (loop tt) を (Do loop tt) にするだけのように見えるが、
({N}, STsep (_, _)) を (STbin _) に、Nを追い出している。 *)
Check (loop tt).
Check (Do loop tt).
apply: (gh_ex N).
(* Do loop tt からループ不変式を取り出す。 *)
apply: val_doR => // _.
(* fact_inv n acc N (n :-> (n' - 1) \+ acc :-> (a' * n'))
を証明する。 *)
rewrite /fact_inv. exists (n' - 1). exists (a' * n').
split => //=.
rewrite -Hi=> {Hi}.
rewrite [a' * _]mulnC mulnA [_ * n']mulnC.
case: n' X => //= n' _.
by rewrite subn1 -pred_Sn.
Qed.
(** 全体の証明 *)
Definition fact_tp N :=
STsep ([Pred h | h = Unit],
[vfun res h => res = fact_pure N /\ h = Unit]).
Program Definition fact (N : nat) : fact_tp N :=
Do (n <-- alloc N;
acc <-- alloc 1;
res <-- fact_acc n acc tt;
dealloc n;;
dealloc acc;;
ret res).
Next Obligation.
rewrite /conseq => /=.
move=> i ->. (* rewriteは、 i = Unit *)
heval=> n. (* n <-- alloc N をheapに。 *)
heval=> acc. (* acc <-- alloc 1 をheapに。 *)
(* x \+ y を y \+ x *)
(* x \+ Unit を x *)
rewrite joinC unitR.
Search _ (verify _ _ _).
(* (x <-- e1; e2 x) を変形する。 *)
apply: bnd_seq => /=.
(* (fact_acc n acc tt) を (Do fact_acc n acc tt) *)
apply: (gh_ex N).
(* (Do fact_acc n acc tt) *)
apply: val_doR => //.
(* fact_inv n acc N (n :-> N \+ acc :-> 1) の証明 *)
- by exists N, 1; rewrite muln1.
(* *)
- move=> x m.
case.
case=> n'.
case=> a'.
case=> H1 _ H2 _.
(* move=> x m [] [n'] [a'] [] H1 _ H2 _.
または
move=> x m [[n'] [a'] [H1] _ H2 _]. *)
rewrite H1 H2.
by heval. (* e1;; e2;; ret _ *)
Qed.
(* END *)
|
function u = acsc(a)
%ACSC Slope inverse cosecant acsc(a)
%
% written 12/06/98 S.M. Rump
% modified 04/04/04 S.M. Rump set round to nearest for safety
% modified 04/06/05 S.M. Rump rounding unchanged
% modified 09/28/08 S.M. Rump check for rounding to nearest improved
% modified 08/26/12 S.M. Rump global variables removed
%
e = 1e-30;
if 1+e==1-e % fast check for rounding to nearest
rndold = 0;
else
rndold = getround;
setround(0)
end
INTLAB_SLOPE = getappdata(0,'INTLAB_SLOPE');
u = a;
u.r = acsc(a.r);
indexc = 1:INTLAB_SLOPE.NUMVAR;
indexr = 2:INTLAB_SLOPE.NUMVAR+1;
Xxs = hull(a.r(:,indexc),a.r(:,indexr));
Index = 1:size(a.r.inf,1);
index = all( a.r.sup<=0 , 2);
if any(index)
aindex.r = a.r(index,:);
aindex.s = a.s(index,:);
u.s(index,:) = ...
slopeconvexconcave('acsc','-1./(abs(%).*sqrt(sqr(%)-1))',aindex,0);
Index(index) = 0;
end
index = all( a.r.inf>=0 , 2);
if any(index)
aindex.r = a.r(index,:);
aindex.s = a.s(index,:);
u.s(index,:) = ...
slopeconvexconcave('acsc','-1./(abs(%).*sqrt(sqr(%)-1))',aindex,1);
Index(index) = 0;
end
if any(Index)
Index( Index==0 ) = [];
Xxs = Xxs(Index);
u.s(Index,:) = - a.s(Index,:) ./ ( abs(Xxs) .* sqrt( sqr(Xxs)-1 ) );
end
if rndold
setround(rndold)
end
|
The University of Brighton’s School of Architecture and Design is delighted to announce that its annual graduate show will take place on Hastings Pier.
Hastings Pier recently awarded the RIBA’s Stirling prize, hosts a wide range of activities. This Year’s student show will coincide with a monthly food market, outdoor performances of A Midsummer Night’s Dream and the regions annual 1066 cycle festival. Saturday will include talks by designers and architects, including the Pier’s architect.
Full details on the show can be found here. |
module Auto.Prelude where
open import Agda.Primitive public
using (Level)
data ⊥ : Set where
¬ : Set → Set
¬ A = A → ⊥
⊥-e : (A : Set) → ⊥ → A
⊥-e A ()
record ⊤ : Set where
record _∧_ (A B : Set) : Set where
constructor ∧-i
field fst : A
snd : B
data _∨_ (A B : Set) : Set where
∨-i₁ : A → A ∨ B
∨-i₂ : B → A ∨ B
∨-e : (A B C : Set) → A ∨ B → (A → C) → (B → C) → C
∨-e A B C (∨-i₁ x) h₁ h₂ = h₁ x
∨-e A B C (∨-i₂ x) h₁ h₂ = h₂ x
data Π (A : Set) (F : A → Set) : Set where
fun : ((a : A) → F a) → Π A F
record Σ (X : Set) (P : X → Set) : Set where
constructor Σ-i
field wit : X
prf : P wit
data ℕ : Set where
zero : ℕ
succ : ℕ → ℕ
_+_ : ℕ → ℕ → ℕ
zero + n = n
succ m + n = succ (m + n)
data Fin : ℕ → Set where
zero : ∀ {n} → Fin (succ n)
suc : ∀ {n} → Fin n → Fin (succ n)
data List (X : Set) : Set where
[] : List X
_∷_ : X → List X → List X
_++_ : {X : Set} → List X → List X → List X
[] ++ ys = ys
(x ∷ xs) ++ ys = x ∷ (xs ++ ys)
data Vec (X : Set) : ℕ → Set where
[] : Vec X zero
_∷_ : ∀ {n} → X → Vec X n → Vec X (succ n)
-- -----------------------------------
data _≡_ {a} {A : Set a} (x : A) : A → Set where
refl : x ≡ x
subst : {i j : Level} {X : Set i} → (P : X → Set j) → (x y : X) → y ≡ x → P x → P y
subst P x .x refl h = h
trans : ∀ {a} {A : Set a} → {x y z : A} → x ≡ y → y ≡ z → x ≡ z
trans refl refl = refl
sym : ∀ {a} {A : Set a} → {x y : A} → x ≡ y → y ≡ x
sym refl = refl
cong : ∀ {a b} {A : Set a} {B : Set b}
(f : A → B) {x y} → x ≡ y → f x ≡ f y
cong f refl = refl
data _IsRelatedTo_ {a : Level} {Carrier : Set a} (x y : Carrier) : Set a where
relTo : (x∼y : x ≡ y) → x IsRelatedTo y
begin_ : {a : Level} {Carrier : Set a} → {x y : Carrier} → x IsRelatedTo y → x ≡ y
begin relTo x∼y = x∼y
_∎ : {a : Level} {Carrier : Set a} → (x : Carrier) → x IsRelatedTo x
_∎ _ = relTo refl
_≡⟨_⟩_ : {a : Level} {Carrier : Set a} → (x : Carrier) {y z : Carrier} → x ≡ y → y IsRelatedTo z → x IsRelatedTo z
_ ≡⟨ x∼y ⟩ relTo y∼z = relTo (trans x∼y y∼z)
-- -----------------------------------
|
\section*{Multi-class Hinge Loss}
One vs. One | One vs. All | Maintain $w^{(1)},..,w^{(c)}$\hfil
%$l_{MC-H}(w^{(1)},..,w^{(c)};x,y) =
%\underset{j\in\{1,..,y-1,y+1,..,c\}}{\operatorname{max}}\\
%(0,1+\text{max } w^{(j)T} x - w^{(y)T} x)$ |
#include <boost/connector/entity/entity_key.hpp>
namespace boost::connector
{
bool
operator==(entity_key const &l, entity_key const &r)
{
const auto pl = l.impl_.get();
const auto pr = r.impl_.get();
if (pl == pr)
return true;
if (!pl || !pr)
return false;
return pl->test_equal(pr->get_details());
}
std::size_t
hash_value(entity_key const &arg)
{
std::size_t seed = 0;
if (arg.impl_)
seed = arg.impl_->compute_hash();
return seed;
}
} // namespace boost::connector |
{-# LANGUAGE TypeSynonymInstances #-}
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE ConstraintKinds #-}
module DSLsofMath.Algebra where
import qualified Data.Ratio
import qualified Prelude
import Prelude (Double, Rational, Int, Integer, Bool(..), otherwise,
Foldable(foldr), (.), const, (==), (<), error)
import Data.Complex
-------------------------------
-- Classes
infixl 6 -
infixl 6 +
infixl 7 *
infixl 7 /
class Additive a where
zero :: a
(+) :: a -> a -> a
sum :: (Foldable t, Additive a) => t a -> a
sum = foldr (+) zero
times :: Additive a => Integer -> a -> a
times n0 = if n0 < 0 then error "Algebra.Classes.times: negative number of times"
else go n0
where go 0 _ = zero
go 1 x = x
go n x = if r == 0 then twoy else x + twoy
where (m,r) = n `Prelude.divMod` 2
y = go m x
twoy = y+y
(-) :: AddGroup a => a -> a -> a
x - y = x + negate y
class Additive a => AddGroup a where
negate :: a -> a
mult :: AddGroup a => Integer -> a -> a
mult n x = if n < 0 then negate (times (negate n) x) else times n x
class Multiplicative a where
one :: a
(*) :: a -> a -> a
two :: (Additive a, Multiplicative a) => a
two = one+one
(^+) :: Multiplicative a => a -> Int -> a
x0 ^+ n0 = if n0 < 0 then error "Algebra.Classes.^: negative exponent"
else go n0 x0
where go 0 _ = one
go 1 x = x
go n x = if r == 0 then y2 else x * y2
where (m,r) = n `Prelude.divMod` 2
y = go m x
y2 = y * y
type Ring a = (AddGroup a, Multiplicative a)
fromInteger :: Ring a => Integer -> a
fromInteger n = mult n one
class Multiplicative a => MulGroup a where
{-# MINIMAL (recip | (/)) #-}
recip :: a -> a
recip x = one / x
(/) :: a -> a -> a
x / y = x * recip y
(^) :: MulGroup a => a -> Int -> a
a ^ b | b < 0 = recip (a ^+ (negate b))
| otherwise = (a ^+ b)
type Field a = (Ring a, MulGroup a)
fromRational :: Field a => Data.Ratio.Ratio Integer -> a
fromRational x = fromInteger (Data.Ratio.numerator x) / fromInteger (Data.Ratio.denominator x)
class Field a => Algebraic a where
sqrt :: a -> a
-- normally it should be "Algebraic" instead of "Field" but we're lazy like that.
-- (Also Transcendental is a terrible name; taken from the "numeric prelude".)
class Field a => Transcendental a where
pi :: a
exp :: a -> a
sin :: a -> a
cos :: a -> a
cosh, sinh :: Transcendental a => a -> a
cosh x = (exp x + exp (negate x))/two
sinh x = (exp x - exp (negate x))/two
---------------------------------
-- Instances
instance Additive Int where (+) = (Prelude.+); zero = 0
instance Additive Integer where (+) = (Prelude.+); zero = 0
instance Additive Rational where (+) = (Prelude.+); zero = 0
instance Additive Double where (+) = (Prelude.+); zero = 0
instance AddGroup Int where negate = Prelude.negate
instance AddGroup Integer where negate = Prelude.negate
instance AddGroup Rational where negate = Prelude.negate
instance AddGroup Double where negate = Prelude.negate
instance Multiplicative Int where (*) = (Prelude.*); one = 1
instance Multiplicative Integer where (*) = (Prelude.*); one = 1
instance Multiplicative Rational where (*) = (Prelude.*); one = 1
instance Multiplicative Double where (*) = (Prelude.*); one = 1
instance MulGroup Rational where (/) = (Prelude./); recip = Prelude.recip
instance MulGroup Double where (/) = (Prelude./); recip = Prelude.recip
lift0 :: a -> (x->a)
lift1 :: (a->b) -> (x->a) -> (x->b)
lift2 :: (a->b->c) -> (x->a) -> (x->b) -> (x->c)
lift0 = const
lift1 = (.)
lift2 op2 f g = \x -> op2 (f x) (g x)
instance Additive a => Additive (x -> a) where (+) = lift2 (+); zero = lift0 zero
instance Multiplicative a => Multiplicative (x -> a) where (*) = lift2 (*); one = lift0 one
instance AddGroup a => AddGroup (x -> a) where negate = lift1 negate
instance MulGroup a => MulGroup (x -> a) where recip = lift1 recip
instance Algebraic a => Algebraic (x -> a) where sqrt = lift1 sqrt
instance Transcendental a => Transcendental (x -> a) where
pi = lift0 pi; sin = lift1 sin; cos = lift1 cos; exp = lift1 exp
instance Algebraic Double where sqrt = Prelude.sqrt
instance Transcendental Double where
pi = Prelude.pi; sin = Prelude.sin; cos = Prelude.cos; exp = Prelude.exp
instance Additive a => Additive (Complex a) where (+) = addC; zero = zeroC
instance Ring a => Multiplicative (Complex a) where (*) = mulC; one = oneC
instance AddGroup a => AddGroup (Complex a) where negate = negateC
instance Field a => MulGroup (Complex a) where recip = recipC
addC :: Additive a => Complex a -> Complex a -> Complex a
addC (x :+ y) (x' :+ y') = (x + x') :+ (y+y')
negateC :: AddGroup a => Complex a -> Complex a
negateC (a :+ b) = negate a :+ negate b
mulC :: Ring a => Complex a -> Complex a -> Complex a
mulC (a :+ b) (a' :+ b') = (a * a' - b * b') :+ (a * b' + b * a')
toC :: Additive a => a -> Complex a
toC x = x :+ zero
zeroC :: Additive a => Complex a
zeroC = toC zero
oneC :: (Additive a, Multiplicative a) => Complex a
oneC = toC one
recipC (a :+ b) = (a / m) :+ (negate b / m)
where m = a*a + b*b
instance (Algebraic a, Prelude.RealFloat a) => Algebraic (Complex a) where
sqrt = Prelude.sqrt
instance (Transcendental a) => Transcendental (Complex a) where
pi = piC; exp = expC; sin = sinC; cos = cosC
piC :: Transcendental a => Complex a
piC = toC pi
expC, sinC, cosC, sinhC, coshC :: Transcendental a => Complex a -> Complex a
expC (x:+y) = expx * cos y :+ expx * sin y where expx = exp x
sinC (x:+y) = sin x * cosh y :+ cos x * sinh y
cosC (x:+y) = cos x * cosh y :+ negate (sin x * sinh y)
sinhC (x:+y) = cos y * sinh x :+ sin y * cosh x
coshC (x:+y) = cos y * cosh x :+ sin y * sinh x
---------------------------------
-- For RebindableSyntax
ifThenElse :: Bool -> p -> p -> p
ifThenElse c a b = if c then a else b
-------------------------------
-- Typesetting aliases.
neg :: AddGroup a => a -> a
neg = negate -- |neg| is used to typeset unary minus as a shorter dash, closer to its argument
frac :: MulGroup a => a -> a -> a
frac = (/) -- |frac| is used to typeset a fraction (more compactly than |x / y|)
|
/-
Copyright (c) 2015 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Leonardo de Moura
Factorial
-/
import data.nat.div
namespace nat
definition fact : nat → nat
| 0 := 1
| (succ n) := (succ n) * fact n
lemma fact_zero : fact 0 = 1 :=
rfl
lemma fact_one : fact 1 = 1 :=
rfl
lemma fact_succ (n) : fact (succ n) = succ n * fact n :=
rfl
lemma fact_pos : ∀ n, fact n > 0
| 0 := zero_lt_one
| (succ n) := mul_pos !succ_pos (fact_pos n)
lemma fact_ne_zero (n : ℕ) : fact n ≠ 0 := ne_of_gt !fact_pos
lemma dvd_fact : ∀ {m n}, m > 0 → m ≤ n → m ∣ fact n
| m 0 h₁ h₂ := absurd h₁ (not_lt_of_ge h₂)
| m (succ n) h₁ h₂ :=
begin
rewrite fact_succ,
cases (eq_or_lt_of_le h₂) with he hl,
{subst m, apply dvd_mul_right},
{have aux : m ∣ fact n, from dvd_fact h₁ (le_of_lt_succ hl),
apply dvd_mul_of_dvd_right aux}
end
lemma fact_le {m n} : m ≤ n → fact m ≤ fact n :=
begin
induction n with n ih,
{intro h,
have meq0 : m = 0, from eq_zero_of_le_zero h,
subst m},
{intro m_le_succ_n,
cases (eq_or_lt_of_le m_le_succ_n) with h₁ h₂,
{subst m},
{transitivity (fact n),
exact ih (le_of_lt_succ h₂),
rewrite [fact_succ, -one_mul (fact n) at {1}],
exact nat.mul_le_mul (succ_le_succ (zero_le n)) !le.refl}}
end
end nat
|
# Upbit Open API
#
# ## REST API for Upbit Exchange - Base URL: [https://api.upbit.com] - Official Upbit API Documents: [https://docs.upbit.com] - Official Support email: [[email protected]]
#
# OpenAPI spec version: 1.0.0
# Contact: [email protected]
# Generated by: https://github.com/swagger-api/swagger-codegen.git
#' TradeTicks Class
#'
#' @field market
#' @field trade_date_utc
#' @field trade_time_utc
#' @field timestamp
#' @field trade_price
#' @field trade_volume
#' @field prev_closing_price
#' @field change_price
#' @field ask_bid
#' @field sequential_id
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
TradeTicks <- R6::R6Class(
'TradeTicks',
public = list(
`market` = NULL,
`trade_date_utc` = NULL,
`trade_time_utc` = NULL,
`timestamp` = NULL,
`trade_price` = NULL,
`trade_volume` = NULL,
`prev_closing_price` = NULL,
`change_price` = NULL,
`ask_bid` = NULL,
`sequential_id` = NULL,
initialize = function(`market`, `trade_date_utc`, `trade_time_utc`, `timestamp`, `trade_price`, `trade_volume`, `prev_closing_price`, `change_price`, `ask_bid`, `sequential_id`){
if (!missing(`market`)) {
stopifnot(is.character(`market`), length(`market`) == 1)
self$`market` <- `market`
}
if (!missing(`trade_date_utc`)) {
stopifnot(is.character(`trade_date_utc`), length(`trade_date_utc`) == 1)
self$`trade_date_utc` <- `trade_date_utc`
}
if (!missing(`trade_time_utc`)) {
stopifnot(is.character(`trade_time_utc`), length(`trade_time_utc`) == 1)
self$`trade_time_utc` <- `trade_time_utc`
}
if (!missing(`timestamp`)) {
self$`timestamp` <- `timestamp`
}
if (!missing(`trade_price`)) {
stopifnot(is.numeric(`trade_price`), length(`trade_price`) == 1)
self$`trade_price` <- `trade_price`
}
if (!missing(`trade_volume`)) {
stopifnot(is.numeric(`trade_volume`), length(`trade_volume`) == 1)
self$`trade_volume` <- `trade_volume`
}
if (!missing(`prev_closing_price`)) {
stopifnot(is.numeric(`prev_closing_price`), length(`prev_closing_price`) == 1)
self$`prev_closing_price` <- `prev_closing_price`
}
if (!missing(`change_price`)) {
stopifnot(is.numeric(`change_price`), length(`change_price`) == 1)
self$`change_price` <- `change_price`
}
if (!missing(`ask_bid`)) {
stopifnot(is.character(`ask_bid`), length(`ask_bid`) == 1)
self$`ask_bid` <- `ask_bid`
}
if (!missing(`sequential_id`)) {
self$`sequential_id` <- `sequential_id`
}
},
toJSON = function() {
TradeTicksObject <- list()
if (!is.null(self$`market`)) {
TradeTicksObject[['market']] <- self$`market`
}
if (!is.null(self$`trade_date_utc`)) {
TradeTicksObject[['trade_date_utc']] <- self$`trade_date_utc`
}
if (!is.null(self$`trade_time_utc`)) {
TradeTicksObject[['trade_time_utc']] <- self$`trade_time_utc`
}
if (!is.null(self$`timestamp`)) {
TradeTicksObject[['timestamp']] <- self$`timestamp`
}
if (!is.null(self$`trade_price`)) {
TradeTicksObject[['trade_price']] <- self$`trade_price`
}
if (!is.null(self$`trade_volume`)) {
TradeTicksObject[['trade_volume']] <- self$`trade_volume`
}
if (!is.null(self$`prev_closing_price`)) {
TradeTicksObject[['prev_closing_price']] <- self$`prev_closing_price`
}
if (!is.null(self$`change_price`)) {
TradeTicksObject[['change_price']] <- self$`change_price`
}
if (!is.null(self$`ask_bid`)) {
TradeTicksObject[['ask_bid']] <- self$`ask_bid`
}
if (!is.null(self$`sequential_id`)) {
TradeTicksObject[['sequential_id']] <- self$`sequential_id`
}
TradeTicksObject
},
fromJSON = function(TradeTicksJson) {
TradeTicksObject <- jsonlite::fromJSON(TradeTicksJson)
if (!is.null(TradeTicksObject$`market`)) {
self$`market` <- TradeTicksObject$`market`
}
if (!is.null(TradeTicksObject$`trade_date_utc`)) {
self$`trade_date_utc` <- TradeTicksObject$`trade_date_utc`
}
if (!is.null(TradeTicksObject$`trade_time_utc`)) {
self$`trade_time_utc` <- TradeTicksObject$`trade_time_utc`
}
if (!is.null(TradeTicksObject$`timestamp`)) {
self$`timestamp` <- TradeTicksObject$`timestamp`
}
if (!is.null(TradeTicksObject$`trade_price`)) {
self$`trade_price` <- TradeTicksObject$`trade_price`
}
if (!is.null(TradeTicksObject$`trade_volume`)) {
self$`trade_volume` <- TradeTicksObject$`trade_volume`
}
if (!is.null(TradeTicksObject$`prev_closing_price`)) {
self$`prev_closing_price` <- TradeTicksObject$`prev_closing_price`
}
if (!is.null(TradeTicksObject$`change_price`)) {
self$`change_price` <- TradeTicksObject$`change_price`
}
if (!is.null(TradeTicksObject$`ask_bid`)) {
self$`ask_bid` <- TradeTicksObject$`ask_bid`
}
if (!is.null(TradeTicksObject$`sequential_id`)) {
self$`sequential_id` <- TradeTicksObject$`sequential_id`
}
},
toJSONString = function() {
sprintf(
'{
"market": %s,
"trade_date_utc": %s,
"trade_time_utc": %s,
"timestamp": %s,
"trade_price": %d,
"trade_volume": %d,
"prev_closing_price": %d,
"change_price": %d,
"ask_bid": %s,
"sequential_id": %s
}',
self$`market`,
self$`trade_date_utc`,
self$`trade_time_utc`,
self$`timestamp`,
self$`trade_price`,
self$`trade_volume`,
self$`prev_closing_price`,
self$`change_price`,
self$`ask_bid`,
self$`sequential_id`
)
},
fromJSONString = function(TradeTicksJson) {
TradeTicksObject <- jsonlite::fromJSON(TradeTicksJson)
self$`market` <- TradeTicksObject$`market`
self$`trade_date_utc` <- TradeTicksObject$`trade_date_utc`
self$`trade_time_utc` <- TradeTicksObject$`trade_time_utc`
self$`timestamp` <- TradeTicksObject$`timestamp`
self$`trade_price` <- TradeTicksObject$`trade_price`
self$`trade_volume` <- TradeTicksObject$`trade_volume`
self$`prev_closing_price` <- TradeTicksObject$`prev_closing_price`
self$`change_price` <- TradeTicksObject$`change_price`
self$`ask_bid` <- TradeTicksObject$`ask_bid`
self$`sequential_id` <- TradeTicksObject$`sequential_id`
}
)
)
|
module Conway
import Data.Vect
import Data.Fin
-- Local Variables:
-- idris-load-packages: ("prelude" "effects" "contrib" "base")
-- End:
board : Nat -> Nat -> Type
board n m = Vect n (Vect m Bool)
printChar : Bool -> String
printChar False = "x"
printChar True = "o"
printRow : Vect m Bool -> String
printRow [] = "\n"
printRow (x :: xs) = (printChar x) ++ (printRow xs)
printBoard : board n m -> String
printBoard = concat . map printRow
listFins : (n : Nat) -> Vect n (Fin n)
listFins Z = []
listFins (S k) = FZ :: (map FS (listFins k))
boardAux : Fin n -> Vect m (Fin n, Fin m)
boardAux {n} {m} f = map (\x => (f,x)) (listFins m)
boardCoords : (n : Nat) -> (m : Nat) -> Vect n (Vect m (Fin n, Fin m))
boardCoords n m = map boardAux (listFins n)
promote : Fin k -> Fin (S k)
promote FZ = FZ
promote (FS x) = (FS (promote x))
safeDec : Fin n -> Fin n
safeDec FZ = FZ
safeDec (FS x) = promote x
maxFin : (n : Nat) -> Fin (S n)
maxFin Z = FZ
maxFin (S k) = FS (maxFin k)
-- this should return the maximum element in the finite set
safeInc : Fin n -> Fin n
safeInc {n = Z} FZ impossible
safeInc {n = Z} (FS _) impossible
safeInc {n = (S k)} f = case (strengthen f) of
Left same => same
Right f' => (FS f')
myStrengthen : Fin (S n) -> Either (Fin (S n)) (Fin n)
myStrengthen {n = Z} f = Left FZ
myStrengthen {n = (S k)} FZ = Right FZ
myStrengthen {n = (S k)} (FS x) = case (myStrengthen x) of
Left same => Left (FS x)
Right decked => Right (FS decked)
lastV : Vect (S n) a -> a
lastV (x :: []) = x
lastV (x :: (y :: xs)) = lastV (y :: xs)
filterV : (a -> Bool) -> Vect n a -> (m ** (Vect m a))
filterV f [] = (0 ** [])
filterV f (x :: xs) = case (f x) of
True => let (n ** xs') = filterV f xs in (S n ** (x :: xs'))
False => filterV f xs
nubV : Eq a => Vect n a -> (m ** (Vect m a))
nubV [] = (Z ** [])
nubV (x :: xs) = let (n ** xs') = filterV (\y => y /= x) xs
(n' ** xs'') = nubV xs'
in (S n' ** (x :: xs''))
neighCoords : Fin n -> Fin m -> List (Fin n, Fin m)
neighCoords fx fy = [(safeInc fx , fy),
(safeInc fx , safeInc fy),
(fx , safeInc fy),
(safeDec fx, safeInc fy),
(safeInc fx , safeDec fy),
(safeDec fx, fy),
(fx , safeDec fy),
(safeDec fx, safeDec fy)]
ixer : board n m -> (Fin n, Fin m) -> Lazy Bool
ixer b (fx, fy) = index fy (index fx b)
rhs : Vect n (Vect m (Fin n, Fin m)) -> (b : Vect n (Vect m Bool)) -> (Fin n, Fin m) -> Bool
rhs coords b f = let neigh = uncurry neighCoords f in and $ map (ixer b) neigh
gameStep : board n m -> board n m
gameStep {n} {m} b = let coords = boardCoords n m in map (map (rhs coords b)) coords
repeatF : Nat -> (a -> a) -> a -> a
repeatF Z f = id
repeatF (S k) f = f . (repeatF k f)
-- this is way too specific, but I'm getting bored
placeBoard : board n m -> Fin n -> Fin m -> board n m
placeBoard b fx fy = updateAt fx (\v => updateAt fy (const True) v) b
blankBoard : board n m
blankBoard {n} {m} = map (map (const False)) (boardCoords n m)
mkBoard : (List (Fin n, Fin m)) -> board n m
mkBoard l = foldr (\f,b => uncurry (placeBoard b) f) blankBoard l
|
section \<open>Dictionary Construction\<close>
theory Introduction
imports Main
begin
subsection \<open>Introduction\<close>
text \<open>
Isabelle's logic features \emph{type classes}~\<^cite>\<open>"haftmann2007typeclasses" and "wenzel1997typeclasses"\<close>.
These are built into the kernel and are used extensively in theory developments.
The existing \emph{code generator}, when targeting Standard ML, performs the well-known dictionary
construction or \emph{dictionary translation}~\<^cite>\<open>"haftmann2010codegeneration"\<close>.
This works by replacing type classes with records, instances with values, and occurrences with
explicit parameters.
Haftmann and Nipkow give a pen-and-paper correctness proof of this construction
\cite[\<open>\<section>\<close>4.1]{haftmann2010codegeneration}, based on a notion of \emph{higher-order rewrite
systems.}
The resulting theorem then states that any well-typed term is reduction-equivalent before and after
class elimination.
In this work, the dictionary construction is performed in a certified fashion, that is, the
equivalence is a theorem inside the logic.
\<close>
subsection \<open>Encoding classes\<close>
text \<open>
The choice of representation of a dictionary itself is straightforward: We model it as a
@{command datatype}, along with functions returning values of that type. The alternative here
would have been to use the @{command record} package. The obvious advantage is that we could
easily model subclass relationships through record inheritance. However, records do not support
multiple inheritance. Since records offer no advantage over datatypes in that regard, we opted for
the more modern @{command datatype} package.
\<close>
text \<open>Consider the following example:\<close>
class plus =
fixes plus :: "'a \<Rightarrow> 'a \<Rightarrow> 'a"
text \<open>
This will get translated to a @{command datatype} with a single constructor taking a single
argument:
\<close>
datatype 'a dict_plus =
mk_plus (param_plus: "'a \<Rightarrow> 'a \<Rightarrow> 'a")
text \<open>A function using the @{class plus} constraint:\<close>
definition double :: "'a::plus \<Rightarrow> 'a" where
"double x = plus x x"
definition double' :: "'a dict_plus \<Rightarrow> 'a \<Rightarrow> 'a" where
"double' dict x = param_plus dict x x"
subsection \<open>Encoding instances\<close>
text \<open>
A more controversial design decision is how to represent dictionary certificates. For example,
given a value of type @{typ "nat dict_plus"}, how do we know that this is a faithful representation
of the @{class plus} instance for @{typ nat}?
\<close>
text \<open>
\<^item> Florian Haftmann proposed a ``shallow encoding''. It works by exploiting the internal treatment
of constants with sort constraints in the Isabelle kernel. Constants themselves do not carry
sort constraints, only their definitional equations. The fact that a constant only appears with
these constraints on the surface of the system is a feature of type inference.
Instead, we can instruct the system to ignore these constraints. However, any attempt at
``hiding'' the constraints behind a type definition ultimately does not work: The nonemptiness
proof requires a witness of a valid dictionary for an arbitrary, but fixed type @{typ 'a}, which
is of course not possible (see \<open>\<section>\<close>\ref{sec:impossibility} for details).
\<^item> The certificates contain the class axioms directly. For example, the @{class semigroup_add}
class requires @{term "(a + b) + c = a + (b + c)"}.
Translated into a definition, this would look as follows:
@{term
"cert_plus dict \<longleftrightarrow>
(\<forall>a b c. param_plus dict (param_plus dict a b) c = param_plus dict a (param_plus dict b c))"}
Proving that instances satisfy this certificate is trivial.
However, the equality proof of \<open>f'\<close> and \<open>f\<close> is impossible: they are simply not equal in general.
Nothing would prevent someone from defining an alternative dictionary using multiplication
instead of addition and the certificate would still hold; but obviously functions using
@{const plus} on numbers would expect addition.
Intuitively, this makes sense: the above notion of ``certificate'' establishes no connection
between original instantiation and newly-generated dictionaries.
Instead of proving equality, one would have to ``lift'' all existing theorems over the old
constants to the new constants.
\<^item> In order for equality between new and old constants to hold, the certificate needs to capture
that the dictionary corresponds exactly to the class constants. This is achieved by the
representation below.
It literally states that the fields of the dictionary are equal to the class constants.
The condition of the resulting equation can only be instantiated with dictionaries corresponding
to existing class instances. This constitutes a \<^emph>\<open>closed world\<close> assumption, i.e., callers of
generated code may not invent own instantiations.
\<close>
definition cert_plus :: "'a::plus dict_plus \<Rightarrow> bool" where
"cert_plus dict \<longleftrightarrow> (param_plus dict = plus)"
text \<open>
Based on that definition, we can prove that @{const double} and @{const double'} are equivalent:
\<close>
lemma "cert_plus dict \<Longrightarrow> double' dict = double"
unfolding cert_plus_def double'_def double_def
by auto
text \<open>
An unconditional equation can be obtained by specializing the theorem to a ground type and
supplying a valid dictionary.
\<close>
subsection \<open>Implementation\<close>
text \<open>
When translating a constant \<open>f\<close>, we use existing mechanisms in Isabelle to obtain its
\<^emph>\<open>code graph\<close>. The graph contains the code equations of all transitive dependencies (i.e.,
other constants) of \<open>f\<close>. In general, we have to re-define each of these dependencies. For that,
we use the internal interface of the @{command function} package and feed it the code equations
after performing the dictionary construction. In the standard case, where the user has not
performed a custom code setup, the resulting function looks similar to its original definition.
But the user may have also changed the implementation of a function significantly afterwards.
This imposes some restrictions:
\<^item> The new constant needs to be proven terminating. We apply some heuristics to transfer the
original termination proof to the new definition. This only works when the termination condition
does not rely on class axioms. (See \<open>\<section>\<close>\ref{sec:termination} for details.)
\<^item> Pattern matching must be performed on datatypes, instead of the more general
@{command code_datatype}s.
\<^item> The set of code equations must be exhaustive and non-overlapping.
\<close>
end |
library(quantmod)
library(tidyverse)
library(data.table)
library(roll)
library(scales)
library(ggforce)
library(gganimate)
library(readxl)
library(htmltab)
library(httr)
library(fuzzyjoin)
setwd('~\\Public_Policy\\Projects\\Taxes vs. Deficits\\data')
##### Get political data #####
the_url = 'https://www.presidency.ucsb.edu/statistics/data/house-and-senate-concurrence-with-presidents'
concurrence_table = html_table(GET(the_url) %>% content(), fill = TRUE)
# MIT data lab
house_elections = read.csv('1976-2018-house.csv') %>%
mutate(candidate_votes = as.numeric(candidatevotes)) %>%
filter(stage == 'gen') %>% data.table()
senate_elections = read.csv('1976-2018-senate.csv') %>% filter(stage == 'gen') %>% data.table()
presidential_elections = read.csv('1976-2016-president.csv') %>% data.table()
large_text_theme = theme(
plot.title = element_text(size = 24),
plot.subtitle = element_text(size = 18, face = 'italic'),
plot.caption = element_text(size = 13, face = 'italic', hjust = 0),
axis.text = element_text(size = 16),
axis.title = element_text(size = 18)
)
setwd("~/Public_Policy/Projects/Presidential Approval/data")
the_sheets = excel_sheets("American Presidency Project - Approval Ratings for POTUS.xlsx")
president_start_dates = tibble(
President = c('Franklin D. Roosevelt','Harry S. Truman','Dwight D. Eisenhower','John F. Kennedy',
'Lyndon B. Johnson','Richard Nixon','Gerald R. Ford','Jimmy Carter','Ronald Reagan','George Bush',
'William J. Clinton','George W. Bush','Barack Obama','Donald Trump'),
start_date = c('1933-03-04', '1945-04-12', '1953-01-20', '1961-01-20',
'1963-11-22', '1969-01-20', '1974-08-09', '1977-01-20',
'1981-01-20', '1989-01-20', '1993-01-20', '2001-01-20',
'2009-01-20', '2017-01-20') %>% as.Date(),
end_date = lead(start_date, 1) %>% as.Date()
)
debt_gdp_ratio = getSymbols('GFDEGDQ188S', src = 'FRED', from = 1945, to = 2020, auto.assign = F)
annual_deficit = getSymbols('FYFSD', src = 'FRED', from = 1945, to = 2020, auto.assign = F)
annual_gdp = getSymbols('GDPA', src = 'FRED', from = 1945, to = 2020, auto.assign = F)
fuzzy_left_join(
president_start_dates, by = c('month_date' = 'start_date', 'month_date' = 'end_date'),
match_fun = list(`>=`, `<=`)
|
Before you turn this problem in, make sure everything runs as expected. First, **restart the kernel** (in the menubar, select Kernel → Restart) and then **run all cells** (in the menubar, select Cell → Run All).
Make sure you fill in any place that says YOUR CODE HERE or "YOUR ANSWER HERE", as well as your name and collaborators below:
```python
NAME = "Prabal Chowdhury"
COLLABORATORS = ""
```
# **CSE330 Lab: Hermite Interpolation**
Hermite Interpolation is an example of a variant of the interpolation problem, where the interpolant matches one or more derivatives of f at each of the nodes, in addition to the function values.
## **Importing the necessary libraries**
```python
import numpy as np
import matplotlib.pyplot as plt
from itertools import combinations
from numpy.polynomial import Polynomial
```
## **Creating the components for Hermite interpolation**
For the case of Hermite Interpolation, we look for a polynomial that matches both $f'(x_i)$ and $f(x_i)$ at the nodes $x_i = x_0,\dots,x_n$ . Say you have $n+1$ data points, $(x_0,y_0),(x_1,y_1),x_2,y_2),…,(x_n,y_n)$ and you happen to know the first-order derivative at all of these points, namely, $(x_0, y_0 ^\prime ), (x_1, y_1 ^\prime ), x_2, y_2 ^\prime ), \dots ,(x_n, y_n ^\prime )$ . According to hermite interpolation, since there are $2n+2$ conditions; $n+1$ for $f(x_i)$ plus $n+1$ for $f′(x_i)$ ; you can fit a polynomial of order $2n+1$ .
General form of a $2n+1$ degree Hermite polynomial:
$$p_{2n+1} = \sum_{k=0}^{n} \left(f(x_k)h_k(x) + f'(x_k)\hat{h}_k(x)\right), \tag{1}$$
where $h_k$ and $\hat{h}_k$ are defined using Lagrange basis functions by the following equations:
$$h_k(x) = (1-2(x-x_k)l^\prime_k(x_k))l^2_k(x_k), \tag{2}$$
and
$$\hat{h}_k(x) = (x-x_k)l^2_k(x_k), \tag{3}$$
where the Lagrange basis function being:
$$l_k(x) = \prod_{j=0, j\neq k}^{n} \frac{x-x_j}{x_k-x_j}. \tag{4}$$
**Note** that, we can rewrite Equation $(2)$ in this way,
\begin{align}
h_k(x) &= \left(1-2(x-x_k)l^\prime_k(x_k) \right)l^2_k(x_k) \\
&= \left(1 - 2xl^\prime_k(x_k) + 2x_kl^\prime_k(x_k) \right)l^2_k(x_k) \\
&= \left(1 + 2x_kl^\prime_k(x_k) - 2l'_k(x_k)x \right) l^2_k(x_k) \tag{5}
\end{align}
Replacing $l′_k(x_k)$ with m , we get:
$$h_k(x) = (1 - 2xm + 2x_km)l^2_k(x_k). \tag{6}$$
## **Tasks:**
* The functions: l(k, x), `h(k, x)` and `h_hat(k, x)` calculate the corresponding
$l_k$, $h_k$, and $\hat{h}_k$, respectively.
* Function `l(k, x)` has already been defined for you.
* Your task is to complete the `h(k, x)`, `h_hat(k, x)`, and `hermit(x, y, y_prime)` functions.
Later we will draw some plots to check if the code is working.
### **Part 1: Calculate $l_k$**
This function uses the following equation to calculate $l_k(x)$ and returns a polynomial:
$$l_k(x) = \prod_{j=0, j\neq k}^{n} \frac{x-x_j}{x_k-x_j}$$
```python
def l(k, x):
n = len(x)
assert (k < len(x))
x_k = x[k]
x_copy = np.delete(x, k)
denominator = np.prod(x_copy - x_k)
coeff = []
for i in range(n):
coeff.append(sum([np.prod(x) for x in combinations(x_copy, i)]) * (-1)**(i) / denominator)
coeff.reverse()
return Polynomial(coeff)
```
### **Part 2: Calculate $h_k$**
This function calculates $h_k(x)$ using the following equation:
$$h_k(x) = \left(1 + 2x_kl^\prime_k(x_k) - 2l'_k(x_k)x \right) l^2_k(x_k).$$
This equation is basically a multiplication of two polynomials.
First polynomial: $1 + 2x_kl^\prime_k(x_k) - 2l'_k(x_k)x$
Second polynomial: $l^2_k(x_k)$ .
The `coeff` variable should contain a python list of coefficient values for the **first** polynomial of the equation. These coefficient values are used to create a polynomial `p`.
```python
def h(k, x):
# initialize with None. Replace with appropriate values/function calls
# initialize with None. Replace with appropriate values/function calls
l_k = l(k, x)
l_k_sqr = l_k * l_k
l_k_prime = l_k.deriv(1)
coeff = [1 + 2 * x[k] * l_k_prime(x[k]), -2 * l_k_prime(x[k])]
p = Polynomial(coeff)
# --------------------------------------------
# YOUR CODE HERE
# --------------------------------------------
return p * l_k_sqr
```
```python
# Test case for the h(k, x) function
x = [3, 5, 7, 9]
k = 2
h_test = h(k, [3, 5, 7, 9])
h_result = Polynomial([-2.5, 0.5]) * (l(k, x) ** 2)
assert Polynomial.has_samecoef(h_result, h_test)
assert h_result == h_test
```
### **Part 3: Calculate $\hat{h}_k$**
This function calculates $\hat{h}_k(x)$ using the following equation:
$$\hat{h}_k(x) = (x-x_k)l^2_k(x_k).$$
This equation is also a multiplication of two polynomials.
First polynomial: $x-x_k$ .
Second polynomial: $l^2_k(x_k)$ .
The `coeff` variable should contain a python list of coefficient values for the **first** polynomial of the equation. These coefficient values are used to create a polynomial `p`.
```python
def h_hat(k, x):
# Initialize with none
l_k = l(k, x)
l_k_sqr = l_k * l_k
coeff = [-x[k], 1]
p = Polynomial(coeff)
# --------------------------------------------
# YOUR CODE HERE
# --------------------------------------------
return p * l_k_sqr
```
```python
# Test case for the h(k, x) function
x = [3, 5, 7, 9]
k = 2
h_test = h_hat(k, [3, 5, 7, 9])
h_result = Polynomial([-7, 1]) * (l(k, x) ** 2)
assert Polynomial.has_samecoef(h_result, h_test)
assert h_result == h_test
```
### **Part 4: The Hermite Polynomial**
This function uses the following equation:
$$p_{2n+1} = \sum_{k=0}^{n} \left(f(x_k)h_k(x) + f'(x_k)\hat{h}_k(x)\right).$$
The polynomial denoted by the equation is calculated by the variable `f`.
```python
def hermit(x, y, y_prime):
assert len(x) == len(y)
assert len(y) == len(y_prime)
f = Polynomial([0.0])
# --------------------------------------------
# YOUR CODE HERE
for i in range(len(x)):
f += y[i] * h(i, x) + y_prime[i] * h_hat(i, x)
# --------------------------------------------
return f
```
### **Testing our methods by plotting graphs.**
**Note:**
* For each of the 5 plots, there will be 2 curves plotted: one being the original function, and the other being the interpolated curve.
* The original functions are displayed in orange color, while the hermite interpolated curves are in blue.
* `x`, `y`, and `y_prime` contain $x_i$, $f(x_i)$, and $f'(x_i)$ of the given nodes of the original function $f$ .
Upon calling the `hermit()` function, it returns a polynomial `f`. For example, for plot 1, it is called `f3`.
In general, a polynomial may look like the following: $f = 1 + 2x + 3x^2$. Next, we pass in a number of $x$ values to the polynomial by calling the `.linspace()` function on the polynomial object using `f.linspace()`. This function outputs a tuple, which is stored in a variable called data. First element of data contains a 1D numpy array of $x_i$ values generated by `linspace()`, and the second element of data contains a 1D numpy array of the corresponding $y_i$ values outputted by our example polynomial: $f = 1 + 2x + 3x^2$.
Using `test_x`, we generate a range of $x_i$ values to plot the original function, and `test_y` contains the corresponding $y_i$ values of the original function. For the first plot, our original function is the sine curve.
For all the plots:
`plt.plot(test_x, test_y)` plots the original function.
`plt.plot(data[0], data[1])` plots the interpolated polynomial.
```python
pi = np.pi
x = np.array([0.0, pi/2.0, pi, 3.0*pi/2.0])
y = np.array([0.0, 1.0, 0.0, -1.0])
y_prime = np.array([1.0, 0.0, 1.0, 0.0])
```
**Plot 1**: trying to interpolate a sine curve `(np.sin())` using first 2 nodes in `x` and `y`, and their corresponding derivative in `y_prime`.
```python
n = 1
f3 = hermit(x[:(n+1)], y[:(n+1)], y_prime[:(n+1)])
data = f3.linspace(n=50, domain=[-3, 3])
test_x = np.linspace(-3, 3, 50, endpoint=True)
test_y = np.sin(test_x)
plt.plot(data[0], data[1])
plt.plot(test_x, test_y)
plt.show()
np.testing.assert_allclose(data[1][20:32], test_y[20:32], atol=0.7, rtol=1.4)
```
**Plot 2**: trying to interpolate a sine curve `(np.sin())` using first 3 nodes in `x` and `y` and their corresponding derivative in `y_prime`.
```python
n = 2
f5 = hermit(x[:(n+1)], y[:(n+1)], y_prime[:(n+1)])
data = f5.linspace(n=50, domain=[-0.7, 3])
test_x = np.linspace(-2*pi, 2*pi, 50, endpoint=True)
test_y = np.sin(test_x)
plt.plot(test_x, test_y) # 25-
plt.plot(data[0], data[1]) # 10-33
plt.show()
data = f5.linspace(n=50, domain=[0, 3])
test_x = np.linspace(0, 3, 50, endpoint=True)
test_y = np.sin(test_x)
np.testing.assert_allclose(data[1], test_y, atol=0.5, rtol=1.7)
```
**Plot 3**: trying to interpolate a sine curve `(np.sin())` using first 4 nodes in `x` and `y` and their corresponding derivative in `y_prime`.
```python
n = 3
f7 = hermit(x[:(n+1)], y[:(n+1)], y_prime[:(n+1)])
data = f7.linspace(n=50, domain=[-0.3, 3])
test_x = np.linspace(-2*pi, 2*pi, 50, endpoint=True)
test_y = np.sin(test_x)
plt.plot(data[0], data[1])
plt.plot(test_x, test_y)
plt.show()
data = f7.linspace(n=50, domain=[0, 3])
test_x = np.linspace(0, 3, 50, endpoint=True)
test_y = np.sin(test_x)
np.testing.assert_allclose(data[1], test_y, atol=0.8, rtol=1.9)
```
**Plot 4**: trying to interpolate an exponential curve `(np.exp())` using all nodes in `x` and `y` and their corresponding derivatives in `y_prime`.
```python
#defining new set of given node information: x, y and y'
x = np.array([0.0, 1.0, 2.0 ])
y = np.array([1.0, 2.71828183, 54.59815003])
y_prime = np.array([0.0, 5.43656366, 218.39260013])
f7 = hermit( x, y, y_prime)
data = f7.linspace(n=50, domain=[-0.5, 2.2])
test_x = np.linspace(-0.5, 2.2, 50, endpoint=True)
test_y = np.exp(test_x**2)
plt.plot(data[0], data[1])
plt.plot(test_x, test_y)
plt.show()
np.testing.assert_allclose(test_y[27:47], data[1][27:47], atol=3, rtol=0.4)
```
**Plot 5:** trying to interpolate $y = (x-3)^2 + 1$ using all nodes in `x` and `y` and their corresponding derivatives in `y_prime`.
For this plot you might be able to see only one curve due to the two curves overlapping. This means that our polynomial is accurately interpolating the original function.
```python
#defining new set of given node information: x, y and y'
x = np.array([1.0, 3.0, 5.0])
y = np.array([5.0, 1.0, 5.0])
y_prime = np.array([-4.0, 0.0, 4.0])
f7 = hermit( x, y, y_prime)
data = f7.linspace(n=50, domain=[-10, 10])
test_x = np.linspace(-10, 10, 50, endpoint=True)
test_y = (test_x-3)**2 + 1
plt.plot(data[0], data[1])
plt.plot(test_x, test_y)
plt.show()
np.testing.assert_allclose(test_y, data[1], atol=0.1, rtol=0.1)
```
|
The Gamma function has a pole at $-n$ for every non-negative integer $n$. |
Formal statement is: lemma bigtheta_trans2: "f \<in> \<Theta>[F](g) \<Longrightarrow> g \<in> L F (h) \<Longrightarrow> f \<in> L F (h)" Informal statement is: If $f \in \Theta(g)$ and $g \in O(h)$, then $f \in O(h)$. |
Formal statement is: lemma continuous_diff [continuous_intros]: fixes f g :: "'a::t2_space \<Rightarrow> 'b::topological_group_add" shows "continuous F f \<Longrightarrow> continuous F g \<Longrightarrow> continuous F (\<lambda>x. f x - g x)" Informal statement is: If $f$ and $g$ are continuous functions, then so is $f - g$. |
module Selective.Libraries.ReceiveSublist where
open import Selective.ActorMonad public
open import Prelude
accept-sublist-unwrapped : (xs ys zs : InboxShape) → ∀{MT} → MT ∈ (xs ++ ys ++ zs) → Bool
accept-sublist-unwrapped [] [] zs p = false
accept-sublist-unwrapped [] (y ∷ ys) zs Z = true
accept-sublist-unwrapped [] (y ∷ ys) zs (S p) = accept-sublist-unwrapped [] ys zs p
accept-sublist-unwrapped (x ∷ xs) ys zs Z = false
accept-sublist-unwrapped (x ∷ xs) ys zs (S p) = accept-sublist-unwrapped xs ys zs p
accept-sublist : (xs ys zs : InboxShape) → MessageFilter (xs ++ ys ++ zs)
accept-sublist xs ys zs (Msg received-message-type received-fields) = accept-sublist-unwrapped xs ys zs received-message-type
record AcceptSublistDependent (IS : InboxShape) (accepted-type : MessageType) : Set₁ where
field
accepted-which : accepted-type ∈ IS
fields : All receive-field-content accepted-type
receive-sublist : {i : Size} →
{Γ : TypingContext} →
(xs ys zs : InboxShape) →
∞ActorM i (xs ++ ys ++ zs)
(Message ys)
Γ
(add-references Γ)
receive-sublist xs ys zs = do
record { msg = Msg {MT} p f ; msg-ok = msg-ok } ← selective-receive (accept-sublist xs ys zs)
let record {accepted-which = aw ; fields = fields } = rewrap-message xs ys zs p f msg-ok
return₁ (Msg {MT = MT} aw fields)
where
rewrap-message : ∀{MT} →
(xs ys zs : InboxShape) →
(p : MT ∈ (xs ++ ys ++ zs)) →
All receive-field-content MT →
(accept-sublist-unwrapped xs ys zs p) ≡ true →
AcceptSublistDependent ys MT
rewrap-message [] [] zs p f ()
rewrap-message [] (x ∷ ys) zs Z f q = record { accepted-which = Z ; fields = f }
rewrap-message [] (x ∷ ys) zs (S p) f q =
let
rec = rewrap-message [] ys zs p f q
open AcceptSublistDependent
in record { accepted-which = S (rec .accepted-which) ; fields = rec .fields }
rewrap-message (x ∷ xs) ys zs Z f ()
rewrap-message (x ∷ xs) ys zs (S p) f q = rewrap-message xs ys zs p f q
|
#ifndef _SEARCHBEST_
#define _SEARCHBEST_
#include <assert.h>
#include <cmath>
#include <float.h>
#include <climits>
// use openblas
#include <cblas.h>
#include "cosine_similarity.h"
// Step 1, g++ main.cpp search_best.cpp cosine_similarity.cpp -std=c++11
// Step 2, g++ main.cpp search_best.cpp cosine_similarity.cpp -std=c++11 -O3
// Step 3, g++ main.cpp search_best.cpp cosine_similarity.cpp -std=c++11 -O3 -Ofast -ffast-math
template <typename T>
int SearchBest(const T* __restrict__ const pVecA, // 待搜索的单个特征向量首地址
const int lenA, // 待搜索特征向量长度(1 x 单个特征维数)
const T* __restrict__ const pVecDB, // 底库首地址
const int lenDB) // 底库长度(特征个数 x 单个特征维数)
{
assert(lenDB%lenA == 0);
const int featsize = lenA;
const int facenum = lenDB / lenA;
int best_index = - INT_MAX;
T best_similarity = - FLT_MAX;
#if 0
// Step 5, 加上OpenMP
//GCC很聪明,OpenMP默认线程数就是多核处理器的核心数量,不必显示指定
//OpenMP起线程,收回线程也是有开销的,所以要合理安排每个线程的任务量大小,不宜放入内层for循环(任务量太小划不来)
//#pragma omp parallel for num_threads(8)
#pragma omp parallel for
for(int i = 0; i < facenum; i++) {
// 普通C++代码实现的余弦相似度计算
T similarity = Cosine_similarity(pVecA, pVecDB + i*featsize, featsize);
// 使用向量化代码实现的余弦相似度计算
//T similarity = Cosine_similarity_avx(pVecA, pVecDB + i*featsize, featsize);
if(similarity > best_similarity) {
best_similarity = similarity;
best_index = i;
}
}
#else
// Step 12,使用OpenBLAS
T simAll[facenum] = {0.0f};
cblas_sgemv(CblasRowMajor, CblasNoTrans, facenum, featsize, 1, pVecDB, featsize, pVecA, 1, 0, simAll, 1);
// 寻找simAll里面最大的,它的序号就是要找的id
for(int i = 0; i < facenum; i++) {
if(simAll[i] > best_similarity) {
best_similarity = simAll[i];
best_index = i;
}
}
#endif
return best_index;
}
#endif //!_SEARCHBEST_
|
Formal statement is: lemma uncountable_convex: fixes a :: "'a::real_normed_vector" assumes "convex S" "a \<in> S" "b \<in> S" "a \<noteq> b" shows "uncountable S" Informal statement is: If $S$ is a convex set containing two distinct points $a$ and $b$, then $S$ is uncountable. |
theory ToDo_Tensor
imports Tensor_Product
begin
lemma cinner_tensor: "(\<gamma> \<otimes> \<psi>) \<bullet>\<^sub>C (\<delta> \<otimes> \<phi>) = (\<psi> \<bullet>\<^sub>C \<phi>) * (\<gamma> \<bullet>\<^sub>C \<delta>)" for \<gamma> \<psi> \<delta> \<phi> :: \<open>_ ell2\<close>
sorry
lemma addState_adj_times_addState[simp]:
includes cblinfun_notation no_blinfun_notation
fixes \<psi> \<phi> :: "'a ell2"
shows "addState \<psi>* o\<^sub>C\<^sub>L addState \<phi> = (\<psi> \<bullet>\<^sub>C \<phi>) *\<^sub>C (id_cblinfun::('b ell2,'b ell2) cblinfun)"
proof -
have "\<gamma> \<bullet>\<^sub>C ((addState \<psi>* o\<^sub>C\<^sub>L addState \<phi>) *\<^sub>V \<delta>) = \<gamma> \<bullet>\<^sub>C (((\<psi> \<bullet>\<^sub>C \<phi>) *\<^sub>C id_cblinfun) *\<^sub>V \<delta>)" for \<gamma> \<delta> :: "'b ell2"
apply (simp add: cblinfun_compose_image cinner_adj_right)
apply (transfer fixing: \<psi> \<phi> \<delta> \<gamma>)
by (simp add: cinner_tensor)
hence "(addState \<psi>* o\<^sub>C\<^sub>L addState \<phi>) *\<^sub>V \<delta> = ((\<psi> \<bullet>\<^sub>C \<phi>) *\<^sub>C id_cblinfun) *\<^sub>V \<delta>" for \<delta> :: "'b ell2"
by (metis (no_types, lifting) adjoint_eqI cinner_adj_left double_adj)
thus ?thesis
by (rule cblinfun_eqI)
qed
lemma ket_product: "ket (a,b) = ket a \<otimes> ket b"
sorry
lemma tensorOp_applyOp_distr:
includes cblinfun_notation no_blinfun_notation
shows "(A \<otimes> B) *\<^sub>V (\<psi> \<otimes> \<phi>) = (A *\<^sub>V \<psi>) \<otimes> (B *\<^sub>V \<phi>)"
sorry
lemma assoc_op_apply_tensor[simp]:
includes cblinfun_notation no_blinfun_notation
shows "assoc_op *\<^sub>V (\<psi> \<otimes> (\<phi> \<otimes> \<tau>)) = (\<psi> \<otimes> \<phi>) \<otimes> \<tau>"
sorry
lemma comm_op_apply_tensor[simp]:
includes cblinfun_notation no_blinfun_notation
shows "comm_op *\<^sub>V (\<psi>\<otimes>\<phi>) = (\<phi>\<otimes>\<psi>)"
sorry
lemma assoc_op_adj_apply_tensor[simp]:
includes cblinfun_notation no_blinfun_notation
shows "assoc_op* *\<^sub>V ((\<psi>\<otimes>\<phi>)\<otimes>\<tau>) = \<psi>\<otimes>(\<phi>\<otimes>\<tau>)"
sorry
lemma span_tensor: "ccspan G \<otimes> ccspan H = ccspan {g\<otimes>h|g h. g\<in>G \<and> h\<in>H}"
sorry
lemma span_tensors:
"closure (cspan {C1 \<otimes> C2| (C1::(_,_) l2bounded) (C2::(_,_) l2bounded). True}) = UNIV"
sorry
end
|
C @(#)kmp_ptib.f 20.8 5/3/00
C****************************************************************
C
C File: kmp_ptib.f
C
C Purpose: Routine to compare PTI bus names for qiksrt.
C
c Return code: n = <bus1> - <bus2>
c
C Author: Walt Powell Date: 21 May 1996
C Called by: chk_ptib.f
C
C****************************************************************
integer function kmp_ptib ( p, q)
integer p, q
include 'ipfinc/parametr.inc'
include 'ipfinc/pti_data.inc'
include 'ipfinc/qksrt.inc'
include 'ipfinc/prt.inc'
integer MAXPTIRECORDS
parameter (MAXPTIRECORDS = 16000)
common /scratch/ count, array(4,MAXBUS), htable_2(MAXBUS),
& nextptr_2(MAXBUS), count_newbus,
& newbusno(MAXBUS), count_newzone,
& newzoneno(MAXCZN), count_newown,
& newownno(MAXOWN), tempc(MAXPTIRECORDS),
& sort_tempc(MAXPTIRECORDS)
integer array, count, htable_2, count_newbus, newbusno,
& count_newzone, newzoneno, count_newown, newownno,
& sort_tempc
character tempc*80
common /bpa_num / user_rule, num_area_rule, num_zone_rule,
& num_owner_rule, num_default_rule,
& area_rule(3,MAXCAR), zone_rule(3,MAXZON),
& owner_rule(3,MAXOWN), default_rule(2,100),
& owner_code_ge(MAXOWN)
integer user_rule, num_area_rule, num_zone_rule, num_owner_rule,
& num_default_rule, area_rule, zone_rule, owner_rule,
& default_rule
character owner_code_ge*4
integer HASHSIZE
parameter (HASHSIZE = 1999)
common /masterlist/ num_master, bus_master(MAXBUS),
& base_master(MAXBUS), area_master(MAXBUS),
& zone_master(MAXBUS), owner_master(MAXBUS),
& case_master(MAXBUS), htable_master(0:HASHSIZE),
& nextptr_master(0:MAXBUS), busnum_master(MAXBUS)
character bus_master*8, area_master*10, zone_master*2,
& owner_master*3, case_master*10
integer num_master, htable_master, nextptr_master,
& busnum_master
real base_master
integer ftn_atoi
character busname1*8, busname2*8, areaname1*8, areaname2*8,
& zonename1*2, zonename2*8, ownername1*3, ownername2*3,
& word1(10)*32, word2(10)*32
if (key .eq. 1) then
kmp_ptib = kompr (pti_name(sort(p)), pti_name(sort(q)), junk)
if (kmp_ptib .eq. 0) then
kmp_ptib = 100.0 * (pti_base(sort(p)) - pti_base(sort(q)))
endif
if (kmp_ptib .eq. 0) then
kmp_ptib = pti_area(sort(p)) - pti_area(sort(q))
endif
else if (key .eq. 2) then
ip = newbusno(p)
iq = newbusno(q)
kmp_ptib = pti_num(ip) - pti_num(iq)
else if (key .eq. 3) then
ip = newzoneno(p)
iq = newzoneno(q)
kmp_ptib = pti_znum(ip) - pti_znum(iq)
else if (key .eq. 4) then
ip = newownno(p)
iq = newownno(q)
kmp_ptib = pti_onum(ip) - pti_onum(iq)
else if (key .eq. 5) then
kmp_ptib = pti_anum(p) - pti_anum(q)
else if (key .eq. 6) then
ip = sort(p)
iq = sort(q)
kmp_ptib = pti_znum(ip) - pti_znum(iq)
else if (key .eq. 7) then
ip = sort(p)
iq = sort(q)
kmp_ptib = pti_onum(ip) - pti_onum(iq)
if (kmp_ptib .eq. 0)
& kmp_ptib = kompr (owner_code_ge(ip), owner_code_ge(iq), junk)
else if (key .eq. 8) then
ip = sort(p)
iq = sort(q)
kmp_ptib = pti_anum(ip) - pti_anum(iq)
else if (key .eq. 101) then
c
c Sort old PTI bus numbers
c
ip = sort_tempc(p)
iq = sort_tempc(q)
read (tempc(ip), 10010, err=230) numbus1, busname1, basekv1,
& numarea1, numzone1
read (tempc(iq), 10010, err=232) numbus2, busname2, basekv2,
& numarea2, numzone2
10010 format (1x, i5, 2x, a8, f4.0, i4, 20x, i4)
kmp_ptib = numbus1 - numbus2
if (kmp_ptib .eq. 0) then
kmp_ptib = kompr (busname1, busname2, junk)
endif
if (kmp_ptib .eq. 0) then
kmp_ptib = 100.0 * (basekv1 - basekv2)
endif
if (kmp_ptib .eq. 0) then
kmp_ptib = numarea1 - numarea2
endif
if (kmp_ptib .eq. 0) then
kmp_ptib = numzone1 - numzone2
endif
if (kmp_ptib .eq. 0) then
kmp_ptib = kompr (tempc(ip)(1:1), tempc(iq)(1:1), junk)
endif
go to 240
230 write (errbuf(1), 10020) tempc(ip)(1:60)
10020 format ('Error decoding bus record in *.TRN file ', a)
call prterx ('W', 1)
error = error + 1
go to 240
232 write (errbuf(1), 10020) tempc(iq)(1:60)
call prterx ('W', 1)
error = error + 1
240 continue
else if (key .eq. 102) then
c
c Sort old PTI area numbers
c
ip = sort_tempc(p)
iq = sort_tempc(q)
read (tempc(ip), 10050, err=290) numarea1, areaname1
read (tempc(ip), 10050, err=292) numarea2, areaname2
10050 format (1x, i3, 2x, a8)
kmp_ptib = numarea1 - numarea2
if (kmp_ptib .eq. 0) then
kmp_ptib = kompr (areaname1, areaname2, junk)
endif
if (kmp_ptib .eq. 0) then
kmp_ptib = kompr (tempc(ip)(1:1), tempc(iq)(1:1), junk)
endif
go to 300
290 write (errbuf(1), 10060) tempc(ip)(1:60)
10060 format ('Error decoding area record in *.TRN file ', a)
call prterx ('W', 1)
error = error + 1
go to 300
292 write (errbuf(1), 10060) tempc(iq)(1:60)
call prterx ('W', 1)
error = error + 1
300 continue
else if (key .eq. 103) then
c
c Sort old PTI zone numbers
c
ip = sort_tempc(p)
iq = sort_tempc(q)
read (tempc(ip), 10090, err=360) numzone1, zonename1
read (tempc(ip), 10050, err=362) numzone2, zonename2
10090 format (1x, i3, 2x, a2)
kmp_ptib = numzone1 - numzone2
if (kmp_ptib .eq. 0) then
kmp_ptib = kompr (zonename1, zonename2, junk)
endif
if (kmp_ptib .eq. 0) then
kmp_ptib = kompr (tempc(ip)(1:1), tempc(iq)(1:1), junk)
endif
go to 370
360 write (errbuf(1), 10100) tempc(ip)(1:60)
10100 format ('Error decoding zone record in *.TRN file ', a)
call prterx ('W', 1)
error = error + 1
go to 370
362 write (errbuf(1), 10100) tempc(iq)(1:60)
call prterx ('W', 1)
error = error + 1
370 continue
else if (key .eq. 104) then
c
c Sort old PTI owner numbers
c
ip = sort_tempc(p)
iq = sort_tempc(q)
read (tempc(ip), 10130, err=420) numowner1, ownername1
read (tempc(ip), 10130, err=422) numowner2, ownername2
10130 format (1x, i3, 2x, a3)
kmp_ptib = numowner1 - numowner2
if (kmp_ptib .eq. 0) then
kmp_ptib = kompr (ownername1, ownername2, junk)
endif
if (kmp_ptib .eq. 0) then
kmp_ptib = kompr (tempc(ip)(1:1), tempc(iq)(1:1), junk)
endif
go to 430
420 write (errbuf(1), 10140) tempc(ip)(1:60)
10140 format ('Error decoding owner record in *.TRN file ', a)
call prterx ('W', 1)
error = error + 1
go to 430
422 write (errbuf(1), 10140) tempc(iq)(1:60)
call prterx ('W', 1)
error = error + 1
430 continue
else if (key .eq. 201) then
c
c Sort old GE bus numbers
c
ip = sort_tempc(p)
iq = sort_tempc(q)
last1 = lastch (tempc(ip))
call uscan (tempc(ip)(1:last1), word1, nwrd1, '=', ' ,')
last2 = lastch (tempc(iq))
call uscan (tempc(iq)(1:last2), word2, nwrd2, '=', ' ,')
kmp_ptib = ftn_atoi (word1(1)) - ftn_atoi (word2(1))
if (kmp_ptib .eq. 0) then
kmp_ptib = kompr (word1(2), word2(2), junk)
endif
if (kmp_ptib .eq. 0) then
kmp_ptib = 100.0 * (ftn_atof(word1(3)) - ftn_atof(word2(3)))
endif
if (kmp_ptib .eq. 0) then
kmp_ptib = ftn_atoi (word1(4)) - ftn_atoi (word2(4))
endif
else if (key .eq. 202) then
c
c Sort old GE area numbers
c
ip = sort_tempc(p)
iq = sort_tempc(q)
last1 = lastch (tempc(ip))
call uscan (tempc(ip)(1:last1), word1, nwrd1, '=', ' ,')
last2 = lastch (tempc(iq))
call uscan (tempc(iq)(1:last2), word2, nwrd2, '=', ' ,')
kmp_ptib = ftn_atoi (word1(1)) - ftn_atoi (word2(1))
if (kmp_ptib .eq. 0) then
kmp_ptib = kompr (word1(2), word2(2), junk)
endif
else if (key .eq. 203) then
c
c Sort old GE zone numbers
c
ip = sort_tempc(p)
iq = sort_tempc(q)
last1 = lastch (tempc(ip))
call uscan (tempc(ip)(1:last1), word1, nwrd1, '=', ' ,')
last2 = lastch (tempc(iq))
call uscan (tempc(iq)(1:last2), word2, nwrd2, '=', ' ,')
kmp_ptib = ftn_atoi (word1(1)) - ftn_atoi (word2(1))
if (kmp_ptib .eq. 0) then
kmp_ptib = kompr (word1(2), word2(2), junk)
endif
if (kmp_ptib .eq. 0) then
kmp_ptib = kompr (word1(3), word2(3), junk)
endif
else if (key .eq. 204) then
c
c Sort old GE owner numbers
c
ip = sort_tempc(p)
iq = sort_tempc(q)
last1 = lastch (tempc(ip))
call uscan (tempc(ip)(1:last1), word1, nwrd1, '=', ' ,')
last2 = lastch (tempc(iq))
call uscan (tempc(iq)(1:last2), word2, nwrd2, '=', ' ,')
kmp_ptib = ftn_atoi (word1(1)) - ftn_atoi (word2(1))
if (kmp_ptib .eq. 0) then
kmp_ptib = kompr (word1(2), word2(2), junk)
endif
if (kmp_ptib .eq. 0) then
kmp_ptib = kompr (word1(3), word2(3), junk)
endif
if (kmp_ptib .eq. 0) then
kmp_ptib = kompr (word1(4), word2(4), junk)
endif
else if (key .eq. 205) then
ip = sort(p)
iq = sort(q)
kmp_ptib = area_rule(1,ip) - area_rule(1,iq)
else if (key .eq. 206) then
ip = sort(p)
iq = sort(q)
kmp_ptib = zone_rule(1,ip) - zone_rule(1,iq)
else if (key .eq. 207) then
ip = sort(p)
iq = sort(q)
kmp_ptib = owner_rule(1,ip) - owner_rule(1,iq)
else if (key .eq. 301) then
c
c Sort new GE bus numbers
c
ip = sort(p)
iq = sort(q)
nptib1 = busnum_master(ip)
nptib2 = busnum_master(iq)
kmp_ptib = pti_num(nptib1) - pti_num(nptib2)
if (kmp_ptib .eq. 0) then
kmp_ptib = kompr (pti_name(nptib1), pti_name(nptib2), junk)
endif
if (kmp_ptib .eq. 0) then
kmp_ptib = 100.0 * (pti_base(nptib1) - pti_base(nptib2))
endif
if (kmp_ptib .eq. 0) then
kmp_ptib = pti_area(nptib1) - pti_area(nptib2)
endif
endif
return
end
|
/-
Copyright (c) 2018 Sébastien Gouëzel. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Sébastien Gouëzel, Johannes Hölzl, Rémy Degenne
-/
import order.filter.cofinite
/-!
# liminfs and limsups of functions and filters
Defines the Liminf/Limsup of a function taking values in a conditionally complete lattice, with
respect to an arbitrary filter.
We define `f.Limsup` (`f.Liminf`) where `f` is a filter taking values in a conditionally complete
lattice. `f.Limsup` is the smallest element `a` such that, eventually, `u ≤ a` (and vice versa for
`f.Liminf`). To work with the Limsup along a function `u` use `(f.map u).Limsup`.
Usually, one defines the Limsup as `Inf (Sup s)` where the Inf is taken over all sets in the filter.
For instance, in ℕ along a function `u`, this is `Inf_n (Sup_{k ≥ n} u k)` (and the latter quantity
decreases with `n`, so this is in fact a limit.). There is however a difficulty: it is well possible
that `u` is not bounded on the whole space, only eventually (think of `Limsup (λx, 1/x)` on ℝ. Then
there is no guarantee that the quantity above really decreases (the value of the `Sup` beforehand is
not really well defined, as one can not use ∞), so that the Inf could be anything. So one can not
use this `Inf Sup ...` definition in conditionally complete lattices, and one has to use a less
tractable definition.
In conditionally complete lattices, the definition is only useful for filters which are eventually
bounded above (otherwise, the Limsup would morally be +∞, which does not belong to the space) and
which are frequently bounded below (otherwise, the Limsup would morally be -∞, which is not in the
space either). We start with definitions of these concepts for arbitrary filters, before turning to
the definitions of Limsup and Liminf.
In complete lattices, however, it coincides with the `Inf Sup` definition.
-/
open filter set
open_locale filter
variables {α β ι : Type*}
namespace filter
section relation
/-- `f.is_bounded (≺)`: the filter `f` is eventually bounded w.r.t. the relation `≺`, i.e.
eventually, it is bounded by some uniform bound.
`r` will be usually instantiated with `≤` or `≥`. -/
def is_bounded (r : α → α → Prop) (f : filter α) := ∃ b, ∀ᶠ x in f, r x b
/-- `f.is_bounded_under (≺) u`: the image of the filter `f` under `u` is eventually bounded w.r.t.
the relation `≺`, i.e. eventually, it is bounded by some uniform bound. -/
def is_bounded_under (r : α → α → Prop) (f : filter β) (u : β → α) := (f.map u).is_bounded r
variables {r : α → α → Prop} {f g : filter α}
/-- `f` is eventually bounded if and only if, there exists an admissible set on which it is
bounded. -/
lemma is_bounded_iff : f.is_bounded r ↔ (∃s∈f.sets, ∃b, s ⊆ {x | r x b}) :=
iff.intro
(assume ⟨b, hb⟩, ⟨{a | r a b}, hb, b, subset.refl _⟩)
(assume ⟨s, hs, b, hb⟩, ⟨b, mem_of_superset hs hb⟩)
/-- A bounded function `u` is in particular eventually bounded. -/
lemma is_bounded_under_of {f : filter β} {u : β → α} :
(∃b, ∀x, r (u x) b) → f.is_bounded_under r u
| ⟨b, hb⟩ := ⟨b, show ∀ᶠ x in f, r (u x) b, from eventually_of_forall hb⟩
lemma is_bounded_bot : is_bounded r ⊥ ↔ nonempty α :=
by simp [is_bounded, exists_true_iff_nonempty]
lemma is_bounded_top : is_bounded r ⊤ ↔ (∃t, ∀x, r x t) :=
by simp [is_bounded, eq_univ_iff_forall]
lemma is_bounded_principal (s : set α) : is_bounded r (𝓟 s) ↔ (∃t, ∀x∈s, r x t) :=
by simp [is_bounded, subset_def]
lemma is_bounded_sup [is_trans α r] (hr : ∀b₁ b₂, ∃b, r b₁ b ∧ r b₂ b) :
is_bounded r f → is_bounded r g → is_bounded r (f ⊔ g)
| ⟨b₁, h₁⟩ ⟨b₂, h₂⟩ := let ⟨b, rb₁b, rb₂b⟩ := hr b₁ b₂ in
⟨b, eventually_sup.mpr ⟨h₁.mono (λ x h, trans h rb₁b), h₂.mono (λ x h, trans h rb₂b)⟩⟩
lemma is_bounded.mono (h : f ≤ g) : is_bounded r g → is_bounded r f
| ⟨b, hb⟩ := ⟨b, h hb⟩
lemma is_bounded_under.mono {f g : filter β} {u : β → α} (h : f ≤ g) :
g.is_bounded_under r u → f.is_bounded_under r u :=
λ hg, hg.mono (map_mono h)
lemma is_bounded.is_bounded_under {q : β → β → Prop} {u : α → β}
(hf : ∀a₀ a₁, r a₀ a₁ → q (u a₀) (u a₁)) : f.is_bounded r → f.is_bounded_under q u
| ⟨b, h⟩ := ⟨u b, show ∀ᶠ x in f, q (u x) (u b), from h.mono (λ x, hf x b)⟩
lemma not_is_bounded_under_of_tendsto_at_top [preorder β] [no_top_order β] {f : α → β}
{l : filter α} [l.ne_bot] (hf : tendsto f l at_top) :
¬ is_bounded_under (≤) l f :=
begin
rintro ⟨b, hb⟩,
rw eventually_map at hb,
obtain ⟨b', h⟩ := no_top b,
have hb' := (tendsto_at_top.mp hf) b',
have : {x : α | f x ≤ b} ∩ {x : α | b' ≤ f x} = ∅ :=
eq_empty_of_subset_empty (λ x hx, (not_le_of_lt h) (le_trans hx.2 hx.1)),
exact (nonempty_of_mem (hb.and hb')).ne_empty this
end
lemma not_is_bounded_under_of_tendsto_at_bot [preorder β] [no_bot_order β] {f : α → β}
{l : filter α} [l.ne_bot](hf : tendsto f l at_bot) :
¬ is_bounded_under (≥) l f :=
@not_is_bounded_under_of_tendsto_at_top α (order_dual β) _ _ _ _ _ hf
lemma is_bounded_under.bdd_above_range_of_cofinite [semilattice_sup β] {f : α → β}
(hf : is_bounded_under (≤) cofinite f) : bdd_above (range f) :=
begin
rcases hf with ⟨b, hb⟩,
haveI : nonempty β := ⟨b⟩,
rw [← image_univ, ← union_compl_self {x | f x ≤ b}, image_union, bdd_above_union],
exact ⟨⟨b, ball_image_iff.2 $ λ x, id⟩, (hb.image f).bdd_above⟩
end
lemma is_bounded_under.bdd_below_range_of_cofinite [semilattice_inf β] {f : α → β}
(hf : is_bounded_under (≥) cofinite f) : bdd_below (range f) :=
@is_bounded_under.bdd_above_range_of_cofinite α (order_dual β) _ _ hf
lemma is_bounded_under.bdd_above_range [semilattice_sup β] {f : ℕ → β}
(hf : is_bounded_under (≤) at_top f) : bdd_above (range f) :=
by { rw ← nat.cofinite_eq_at_top at hf, exact hf.bdd_above_range_of_cofinite }
lemma is_bounded_under.bdd_below_range [semilattice_inf β] {f : ℕ → β}
(hf : is_bounded_under (≥) at_top f) : bdd_below (range f) :=
@is_bounded_under.bdd_above_range (order_dual β) _ _ hf
/-- `is_cobounded (≺) f` states that the filter `f` does not tend to infinity w.r.t. `≺`. This is
also called frequently bounded. Will be usually instantiated with `≤` or `≥`.
There is a subtlety in this definition: we want `f.is_cobounded` to hold for any `f` in the case of
complete lattices. This will be relevant to deduce theorems on complete lattices from their
versions on conditionally complete lattices with additional assumptions. We have to be careful in
the edge case of the trivial filter containing the empty set: the other natural definition
`¬ ∀ a, ∀ᶠ n in f, a ≤ n`
would not work as well in this case.
-/
def is_cobounded (r : α → α → Prop) (f : filter α) := ∃b, ∀a, (∀ᶠ x in f, r x a) → r b a
/-- `is_cobounded_under (≺) f u` states that the image of the filter `f` under the map `u` does not
tend to infinity w.r.t. `≺`. This is also called frequently bounded. Will be usually instantiated
with `≤` or `≥`. -/
def is_cobounded_under (r : α → α → Prop) (f : filter β) (u : β → α) := (f.map u).is_cobounded r
/-- To check that a filter is frequently bounded, it suffices to have a witness
which bounds `f` at some point for every admissible set.
This is only an implication, as the other direction is wrong for the trivial filter.-/
lemma is_cobounded.mk [is_trans α r] (a : α) (h : ∀s∈f, ∃x∈s, r a x) : f.is_cobounded r :=
⟨a, assume y s, let ⟨x, h₁, h₂⟩ := h _ s in trans h₂ h₁⟩
/-- A filter which is eventually bounded is in particular frequently bounded (in the opposite
direction). At least if the filter is not trivial. -/
lemma is_bounded.is_cobounded_flip [is_trans α r] [ne_bot f] :
f.is_bounded r → f.is_cobounded (flip r)
| ⟨a, ha⟩ := ⟨a, assume b hb,
let ⟨x, rxa, rbx⟩ := (ha.and hb).exists in
show r b a, from trans rbx rxa⟩
lemma is_bounded.is_cobounded_ge [preorder α] [ne_bot f] (h : f.is_bounded (≤)) :
f.is_cobounded (≥) :=
h.is_cobounded_flip
lemma is_bounded.is_cobounded_le [preorder α] [ne_bot f] (h : f.is_bounded (≥)) :
f.is_cobounded (≤) :=
h.is_cobounded_flip
lemma is_cobounded_bot : is_cobounded r ⊥ ↔ (∃b, ∀x, r b x) :=
by simp [is_cobounded]
lemma is_cobounded_top : is_cobounded r ⊤ ↔ nonempty α :=
by simp [is_cobounded, eq_univ_iff_forall, exists_true_iff_nonempty] {contextual := tt}
lemma is_cobounded_principal (s : set α) :
(𝓟 s).is_cobounded r ↔ (∃b, ∀a, (∀x∈s, r x a) → r b a) :=
by simp [is_cobounded, subset_def]
lemma is_cobounded.mono (h : f ≤ g) : f.is_cobounded r → g.is_cobounded r
| ⟨b, hb⟩ := ⟨b, assume a ha, hb a (h ha)⟩
end relation
lemma is_cobounded_le_of_bot [preorder α] [order_bot α] {f : filter α} : f.is_cobounded (≤) :=
⟨⊥, assume a h, bot_le⟩
lemma is_cobounded_ge_of_top [preorder α] [order_top α] {f : filter α} : f.is_cobounded (≥) :=
⟨⊤, assume a h, le_top⟩
lemma is_bounded_le_of_top [preorder α] [order_top α] {f : filter α} : f.is_bounded (≤) :=
⟨⊤, eventually_of_forall $ λ _, le_top⟩
lemma is_bounded_ge_of_bot [preorder α] [order_bot α] {f : filter α} : f.is_bounded (≥) :=
⟨⊥, eventually_of_forall $ λ _, bot_le⟩
lemma is_bounded_under_sup [semilattice_sup α] {f : filter β} {u v : β → α} :
f.is_bounded_under (≤) u → f.is_bounded_under (≤) v → f.is_bounded_under (≤) (λa, u a ⊔ v a)
| ⟨bu, (hu : ∀ᶠ x in f, u x ≤ bu)⟩ ⟨bv, (hv : ∀ᶠ x in f, v x ≤ bv)⟩ :=
⟨bu ⊔ bv, show ∀ᶠ x in f, u x ⊔ v x ≤ bu ⊔ bv,
by filter_upwards [hu, hv] assume x, sup_le_sup⟩
lemma is_bounded_under_inf [semilattice_inf α] {f : filter β} {u v : β → α} :
f.is_bounded_under (≥) u → f.is_bounded_under (≥) v → f.is_bounded_under (≥) (λa, u a ⊓ v a)
| ⟨bu, (hu : ∀ᶠ x in f, u x ≥ bu)⟩ ⟨bv, (hv : ∀ᶠ x in f, v x ≥ bv)⟩ :=
⟨bu ⊓ bv, show ∀ᶠ x in f, u x ⊓ v x ≥ bu ⊓ bv,
by filter_upwards [hu, hv] assume x, inf_le_inf⟩
/-- Filters are automatically bounded or cobounded in complete lattices. To use the same statements
in complete and conditionally complete lattices but let automation fill automatically the
boundedness proofs in complete lattices, we use the tactic `is_bounded_default` in the statements,
in the form `(hf : f.is_bounded (≥) . is_bounded_default)`. -/
meta def is_bounded_default : tactic unit :=
tactic.applyc ``is_cobounded_le_of_bot <|>
tactic.applyc ``is_cobounded_ge_of_top <|>
tactic.applyc ``is_bounded_le_of_top <|>
tactic.applyc ``is_bounded_ge_of_bot
section conditionally_complete_lattice
variables [conditionally_complete_lattice α]
/-- The `Limsup` of a filter `f` is the infimum of the `a` such that, eventually for `f`,
holds `x ≤ a`. -/
def Limsup (f : filter α) : α := Inf { a | ∀ᶠ n in f, n ≤ a }
/-- The `Liminf` of a filter `f` is the supremum of the `a` such that, eventually for `f`,
holds `x ≥ a`. -/
def Liminf (f : filter α) : α := Sup { a | ∀ᶠ n in f, a ≤ n }
/-- The `limsup` of a function `u` along a filter `f` is the infimum of the `a` such that,
eventually for `f`, holds `u x ≤ a`. -/
def limsup (f : filter β) (u : β → α) : α := (f.map u).Limsup
/-- The `liminf` of a function `u` along a filter `f` is the supremum of the `a` such that,
eventually for `f`, holds `u x ≥ a`. -/
def liminf (f : filter β) (u : β → α) : α := (f.map u).Liminf
section
variables {f : filter β} {u : β → α}
theorem limsup_eq : f.limsup u = Inf { a | ∀ᶠ n in f, u n ≤ a } := rfl
theorem liminf_eq : f.liminf u = Sup { a | ∀ᶠ n in f, a ≤ u n } := rfl
end
theorem Limsup_le_of_le {f : filter α} {a}
(hf : f.is_cobounded (≤) . is_bounded_default) (h : ∀ᶠ n in f, n ≤ a) : f.Limsup ≤ a :=
cInf_le hf h
theorem le_Liminf_of_le {f : filter α} {a}
(hf : f.is_cobounded (≥) . is_bounded_default) (h : ∀ᶠ n in f, a ≤ n) : a ≤ f.Liminf :=
le_cSup hf h
theorem le_Limsup_of_le {f : filter α} {a}
(hf : f.is_bounded (≤) . is_bounded_default) (h : ∀ b, (∀ᶠ n in f, n ≤ b) → a ≤ b) :
a ≤ f.Limsup :=
le_cInf hf h
theorem Liminf_le_of_le {f : filter α} {a}
(hf : f.is_bounded (≥) . is_bounded_default) (h : ∀ b, (∀ᶠ n in f, b ≤ n) → b ≤ a) :
f.Liminf ≤ a :=
cSup_le hf h
theorem Liminf_le_Limsup {f : filter α} [ne_bot f]
(h₁ : f.is_bounded (≤) . is_bounded_default) (h₂ : f.is_bounded (≥) . is_bounded_default) :
f.Liminf ≤ f.Limsup :=
Liminf_le_of_le h₂ $ assume a₀ ha₀, le_Limsup_of_le h₁ $ assume a₁ ha₁,
show a₀ ≤ a₁, from let ⟨b, hb₀, hb₁⟩ := (ha₀.and ha₁).exists in le_trans hb₀ hb₁
lemma Liminf_le_Liminf {f g : filter α}
(hf : f.is_bounded (≥) . is_bounded_default) (hg : g.is_cobounded (≥) . is_bounded_default)
(h : ∀ a, (∀ᶠ n in f, a ≤ n) → ∀ᶠ n in g, a ≤ n) : f.Liminf ≤ g.Liminf :=
cSup_le_cSup hg hf h
lemma Limsup_le_Limsup {f g : filter α}
(hf : f.is_cobounded (≤) . is_bounded_default) (hg : g.is_bounded (≤) . is_bounded_default)
(h : ∀ a, (∀ᶠ n in g, n ≤ a) → ∀ᶠ n in f, n ≤ a) : f.Limsup ≤ g.Limsup :=
cInf_le_cInf hf hg h
lemma Limsup_le_Limsup_of_le {f g : filter α} (h : f ≤ g)
(hf : f.is_cobounded (≤) . is_bounded_default) (hg : g.is_bounded (≤) . is_bounded_default) :
f.Limsup ≤ g.Limsup :=
Limsup_le_Limsup hf hg (assume a ha, h ha)
lemma Liminf_le_Liminf_of_le {f g : filter α} (h : g ≤ f)
(hf : f.is_bounded (≥) . is_bounded_default) (hg : g.is_cobounded (≥) . is_bounded_default) :
f.Liminf ≤ g.Liminf :=
Liminf_le_Liminf hf hg (assume a ha, h ha)
lemma limsup_le_limsup {α : Type*} [conditionally_complete_lattice β] {f : filter α} {u v : α → β}
(h : u ≤ᶠ[f] v)
(hu : f.is_cobounded_under (≤) u . is_bounded_default)
(hv : f.is_bounded_under (≤) v . is_bounded_default) :
f.limsup u ≤ f.limsup v :=
Limsup_le_Limsup hu hv $ assume b, h.trans
lemma liminf_le_liminf {α : Type*} [conditionally_complete_lattice β] {f : filter α} {u v : α → β}
(h : ∀ᶠ a in f, u a ≤ v a)
(hu : f.is_bounded_under (≥) u . is_bounded_default)
(hv : f.is_cobounded_under (≥) v . is_bounded_default) :
f.liminf u ≤ f.liminf v :=
@limsup_le_limsup (order_dual β) α _ _ _ _ h hv hu
lemma limsup_le_limsup_of_le {α β} [conditionally_complete_lattice β] {f g : filter α} (h : f ≤ g)
{u : α → β} (hf : f.is_cobounded_under (≤) u . is_bounded_default)
(hg : g.is_bounded_under (≤) u . is_bounded_default) :
f.limsup u ≤ g.limsup u :=
Limsup_le_Limsup_of_le (map_mono h) hf hg
lemma liminf_le_liminf_of_le {α β} [conditionally_complete_lattice β] {f g : filter α} (h : g ≤ f)
{u : α → β} (hf : f.is_bounded_under (≥) u . is_bounded_default)
(hg : g.is_cobounded_under (≥) u . is_bounded_default) :
f.liminf u ≤ g.liminf u :=
Liminf_le_Liminf_of_le (map_mono h) hf hg
theorem Limsup_principal {s : set α} (h : bdd_above s) (hs : s.nonempty) :
(𝓟 s).Limsup = Sup s :=
by simp [Limsup]; exact cInf_upper_bounds_eq_cSup h hs
theorem Liminf_principal {s : set α} (h : bdd_below s) (hs : s.nonempty) :
(𝓟 s).Liminf = Inf s :=
@Limsup_principal (order_dual α) _ s h hs
lemma limsup_congr {α : Type*} [conditionally_complete_lattice β] {f : filter α} {u v : α → β}
(h : ∀ᶠ a in f, u a = v a) : limsup f u = limsup f v :=
begin
rw limsup_eq,
congr' with b,
exact eventually_congr (h.mono $ λ x hx, by simp [hx])
end
lemma liminf_congr {α : Type*} [conditionally_complete_lattice β] {f : filter α} {u v : α → β}
(h : ∀ᶠ a in f, u a = v a) : liminf f u = liminf f v :=
@limsup_congr (order_dual β) _ _ _ _ _ h
lemma limsup_const {α : Type*} [conditionally_complete_lattice β] {f : filter α} [ne_bot f]
(b : β) : limsup f (λ x, b) = b :=
by simpa only [limsup_eq, eventually_const] using cInf_Ici
lemma liminf_const {α : Type*} [conditionally_complete_lattice β] {f : filter α} [ne_bot f]
(b : β) : liminf f (λ x, b) = b :=
@limsup_const (order_dual β) α _ f _ b
lemma liminf_le_limsup {f : filter β} [ne_bot f] {u : β → α}
(h : f.is_bounded_under (≤) u . is_bounded_default)
(h' : f.is_bounded_under (≥) u . is_bounded_default) :
liminf f u ≤ limsup f u :=
Liminf_le_Limsup h h'
end conditionally_complete_lattice
section complete_lattice
variables [complete_lattice α]
@[simp] theorem Limsup_bot : (⊥ : filter α).Limsup = ⊥ :=
bot_unique $ Inf_le $ by simp
@[simp] theorem Liminf_bot : (⊥ : filter α).Liminf = ⊤ :=
top_unique $ le_Sup $ by simp
@[simp] theorem Limsup_top : (⊤ : filter α).Limsup = ⊤ :=
top_unique $ le_Inf $
by simp [eq_univ_iff_forall]; exact assume b hb, (top_unique $ hb _)
@[simp] theorem Liminf_top : (⊤ : filter α).Liminf = ⊥ :=
bot_unique $ Sup_le $
by simp [eq_univ_iff_forall]; exact assume b hb, (bot_unique $ hb _)
/-- Same as limsup_const applied to `⊥` but without the `ne_bot f` assumption -/
lemma limsup_const_bot {f : filter β} : limsup f (λ x : β, (⊥ : α)) = (⊥ : α) :=
begin
rw [limsup_eq, eq_bot_iff],
exact Inf_le (eventually_of_forall (λ x, le_refl _)),
end
/-- Same as limsup_const applied to `⊤` but without the `ne_bot f` assumption -/
lemma liminf_const_top {f : filter β} : liminf f (λ x : β, (⊤ : α)) = (⊤ : α) :=
@limsup_const_bot (order_dual α) β _ _
theorem has_basis.Limsup_eq_infi_Sup {ι} {p : ι → Prop} {s} {f : filter α} (h : f.has_basis p s) :
f.Limsup = ⨅ i (hi : p i), Sup (s i) :=
le_antisymm
(le_binfi $ λ i hi, Inf_le $ h.eventually_iff.2 ⟨i, hi, λ x, le_Sup⟩)
(le_Inf $ assume a ha, let ⟨i, hi, ha⟩ := h.eventually_iff.1 ha in
infi_le_of_le _ $ infi_le_of_le hi $ Sup_le ha)
theorem has_basis.Liminf_eq_supr_Inf {p : ι → Prop} {s : ι → set α} {f : filter α}
(h : f.has_basis p s) : f.Liminf = ⨆ i (hi : p i), Inf (s i) :=
@has_basis.Limsup_eq_infi_Sup (order_dual α) _ _ _ _ _ h
theorem Limsup_eq_infi_Sup {f : filter α} : f.Limsup = ⨅ s ∈ f, Sup s :=
f.basis_sets.Limsup_eq_infi_Sup
theorem Liminf_eq_supr_Inf {f : filter α} : f.Liminf = ⨆ s ∈ f, Inf s :=
@Limsup_eq_infi_Sup (order_dual α) _ _
/-- In a complete lattice, the limsup of a function is the infimum over sets `s` in the filter
of the supremum of the function over `s` -/
theorem limsup_eq_infi_supr {f : filter β} {u : β → α} : f.limsup u = ⨅ s ∈ f, ⨆ a ∈ s, u a :=
(f.basis_sets.map u).Limsup_eq_infi_Sup.trans $
by simp only [Sup_image, id]
lemma limsup_eq_infi_supr_of_nat {u : ℕ → α} : limsup at_top u = ⨅ n : ℕ, ⨆ i ≥ n, u i :=
(at_top_basis.map u).Limsup_eq_infi_Sup.trans $
by simp only [Sup_image, infi_const]; refl
lemma limsup_eq_infi_supr_of_nat' {u : ℕ → α} : limsup at_top u = ⨅ n : ℕ, ⨆ i : ℕ, u (i + n) :=
by simp only [limsup_eq_infi_supr_of_nat, supr_ge_eq_supr_nat_add]
theorem has_basis.limsup_eq_infi_supr {p : ι → Prop} {s : ι → set β} {f : filter β} {u : β → α}
(h : f.has_basis p s) : f.limsup u = ⨅ i (hi : p i), ⨆ a ∈ s i, u a :=
(h.map u).Limsup_eq_infi_Sup.trans $ by simp only [Sup_image, id]
/-- In a complete lattice, the liminf of a function is the infimum over sets `s` in the filter
of the supremum of the function over `s` -/
theorem liminf_eq_supr_infi {f : filter β} {u : β → α} : f.liminf u = ⨆ s ∈ f, ⨅ a ∈ s, u a :=
@limsup_eq_infi_supr (order_dual α) β _ _ _
lemma liminf_eq_supr_infi_of_nat {u : ℕ → α} : liminf at_top u = ⨆ n : ℕ, ⨅ i ≥ n, u i :=
@limsup_eq_infi_supr_of_nat (order_dual α) _ u
lemma liminf_eq_supr_infi_of_nat' {u : ℕ → α} : liminf at_top u = ⨆ n : ℕ, ⨅ i : ℕ, u (i + n) :=
@limsup_eq_infi_supr_of_nat' (order_dual α) _ _
theorem has_basis.liminf_eq_supr_infi {p : ι → Prop} {s : ι → set β} {f : filter β} {u : β → α}
(h : f.has_basis p s) : f.liminf u = ⨆ i (hi : p i), ⨅ a ∈ s i, u a :=
@has_basis.limsup_eq_infi_supr (order_dual α) _ _ _ _ _ _ _ h
@[simp] lemma liminf_nat_add (f : ℕ → α) (k : ℕ) :
at_top.liminf (λ i, f (i + k)) = at_top.liminf f :=
by { simp_rw liminf_eq_supr_infi_of_nat, exact supr_infi_ge_nat_add f k }
@[simp] lemma limsup_nat_add (f : ℕ → α) (k : ℕ) :
at_top.limsup (λ i, f (i + k)) = at_top.limsup f :=
@liminf_nat_add (order_dual α) _ f k
lemma liminf_le_of_frequently_le' {α β} [complete_lattice β]
{f : filter α} {u : α → β} {x : β} (h : ∃ᶠ a in f, u a ≤ x) :
f.liminf u ≤ x :=
begin
rw liminf_eq,
refine Sup_le (λ b hb, _),
have hbx : ∃ᶠ a in f, b ≤ x,
{ revert h,
rw [←not_imp_not, not_frequently, not_frequently],
exact λ h, hb.mp (h.mono (λ a hbx hba hax, hbx (hba.trans hax))), },
exact hbx.exists.some_spec,
end
lemma le_limsup_of_frequently_le' {α β} [complete_lattice β]
{f : filter α} {u : α → β} {x : β} (h : ∃ᶠ a in f, x ≤ u a) :
x ≤ f.limsup u :=
@liminf_le_of_frequently_le' _ (order_dual β) _ _ _ _ h
end complete_lattice
section conditionally_complete_linear_order
lemma eventually_lt_of_lt_liminf {f : filter α} [conditionally_complete_linear_order β]
{u : α → β} {b : β} (h : b < liminf f u) (hu : f.is_bounded_under (≥) u . is_bounded_default) :
∀ᶠ a in f, b < u a :=
begin
obtain ⟨c, hc, hbc⟩ : ∃ (c : β) (hc : c ∈ {c : β | ∀ᶠ (n : α) in f, c ≤ u n}), b < c :=
exists_lt_of_lt_cSup hu h,
exact hc.mono (λ x hx, lt_of_lt_of_le hbc hx)
end
lemma eventually_lt_of_limsup_lt {f : filter α} [conditionally_complete_linear_order β]
{u : α → β} {b : β} (h : limsup f u < b) (hu : f.is_bounded_under (≤) u . is_bounded_default) :
∀ᶠ a in f, u a < b :=
@eventually_lt_of_lt_liminf _ (order_dual β) _ _ _ _ h hu
lemma liminf_le_of_frequently_le {α β} [conditionally_complete_linear_order β] {f : filter α}
{u : α → β} {b : β} (hu_le : ∃ᶠ x in f, u x ≤ b)
(hu : f.is_bounded_under (≥) u . is_bounded_default) :
f.liminf u ≤ b :=
@le_limsup_of_frequently_le _ (order_dual β) _ f u b hu_le hu
lemma frequently_lt_of_lt_limsup {α β} [conditionally_complete_linear_order β] {f : filter α}
{u : α → β} {b : β}
(hu : f.is_cobounded_under (≤) u . is_bounded_default) (h : b < f.limsup u) :
∃ᶠ x in f, b < u x :=
begin
contrapose! h,
apply Limsup_le_of_le hu,
simpa using h,
end
lemma frequently_lt_of_liminf_lt {α β} [conditionally_complete_linear_order β] {f : filter α}
{u : α → β} {b : β}
(hu : f.is_cobounded_under (≥) u . is_bounded_default) (h : f.liminf u < b) :
∃ᶠ x in f, u x < b :=
@frequently_lt_of_lt_limsup _ (order_dual β) _ f u b hu h
end conditionally_complete_linear_order
end filter
section order
open filter
lemma galois_connection.l_limsup_le {α β γ} [conditionally_complete_lattice β]
[conditionally_complete_lattice γ] {f : filter α} {v : α → β}
{l : β → γ} {u : γ → β} (gc : galois_connection l u)
(hlv : f.is_bounded_under (≤) (λ x, l (v x)) . is_bounded_default)
(hv_co : f.is_cobounded_under (≤) v . is_bounded_default) :
l (f.limsup v) ≤ f.limsup (λ x, l (v x)) :=
begin
refine le_Limsup_of_le hlv (λ c hc, _),
rw filter.eventually_map at hc,
simp_rw (gc _ _) at hc ⊢,
exact Limsup_le_of_le hv_co hc,
end
lemma order_iso.limsup_apply {γ} [conditionally_complete_lattice β]
[conditionally_complete_lattice γ] {f : filter α} {u : α → β} (g : β ≃o γ)
(hu : f.is_bounded_under (≤) u . is_bounded_default)
(hu_co : f.is_cobounded_under (≤) u . is_bounded_default)
(hgu : f.is_bounded_under (≤) (λ x, g (u x)) . is_bounded_default)
(hgu_co : f.is_cobounded_under (≤) (λ x, g (u x)) . is_bounded_default) :
g (f.limsup u) = f.limsup (λ x, g (u x)) :=
begin
refine le_antisymm (g.to_galois_connection.l_limsup_le hgu hu_co) _,
rw [←(g.symm.symm_apply_apply (f.limsup (λ (x : α), g (u x)))), g.symm_symm],
refine g.monotone _,
have hf : u = λ i, g.symm (g (u i)), from funext (λ i, (g.symm_apply_apply (u i)).symm),
nth_rewrite 0 hf,
refine g.symm.to_galois_connection.l_limsup_le _ hgu_co,
simp_rw g.symm_apply_apply,
exact hu,
end
lemma order_iso.liminf_apply {γ} [conditionally_complete_lattice β]
[conditionally_complete_lattice γ] {f : filter α} {u : α → β} (g : β ≃o γ)
(hu : f.is_bounded_under (≥) u . is_bounded_default)
(hu_co : f.is_cobounded_under (≥) u . is_bounded_default)
(hgu : f.is_bounded_under (≥) (λ x, g (u x)) . is_bounded_default)
(hgu_co : f.is_cobounded_under (≥) (λ x, g (u x)) . is_bounded_default) :
g (f.liminf u) = f.liminf (λ x, g (u x)) :=
@order_iso.limsup_apply α (order_dual β) (order_dual γ) _ _ f u g.dual hu hu_co hgu hgu_co
end order
|
(* Title: ZF/Induct/PropLog.thy
Author: Tobias Nipkow & Lawrence C Paulson
Copyright 1993 University of Cambridge
*)
section {* Meta-theory of propositional logic *}
theory PropLog imports Main begin
text {*
Datatype definition of propositional logic formulae and inductive
definition of the propositional tautologies.
Inductive definition of propositional logic. Soundness and
completeness w.r.t.\ truth-tables.
Prove: If @{text "H |= p"} then @{text "G |= p"} where @{text "G \<in>
Fin(H)"}
*}
subsection {* The datatype of propositions *}
consts
propn :: i
datatype propn =
Fls
| Var ("n \<in> nat") ("#_" [100] 100)
| Imp ("p \<in> propn", "q \<in> propn") (infixr "=>" 90)
subsection {* The proof system *}
consts thms :: "i => i"
abbreviation
thms_syntax :: "[i,i] => o" (infixl "|-" 50)
where "H |- p == p \<in> thms(H)"
inductive
domains "thms(H)" \<subseteq> "propn"
intros
H: "[| p \<in> H; p \<in> propn |] ==> H |- p"
K: "[| p \<in> propn; q \<in> propn |] ==> H |- p=>q=>p"
S: "[| p \<in> propn; q \<in> propn; r \<in> propn |]
==> H |- (p=>q=>r) => (p=>q) => p=>r"
DN: "p \<in> propn ==> H |- ((p=>Fls) => Fls) => p"
MP: "[| H |- p=>q; H |- p; p \<in> propn; q \<in> propn |] ==> H |- q"
type_intros "propn.intros"
declare propn.intros [simp]
subsection {* The semantics *}
subsubsection {* Semantics of propositional logic. *}
consts
is_true_fun :: "[i,i] => i"
primrec
"is_true_fun(Fls, t) = 0"
"is_true_fun(Var(v), t) = (if v \<in> t then 1 else 0)"
"is_true_fun(p=>q, t) = (if is_true_fun(p,t) = 1 then is_true_fun(q,t) else 1)"
definition
is_true :: "[i,i] => o" where
"is_true(p,t) == is_true_fun(p,t) = 1"
-- {* this definition is required since predicates can't be recursive *}
lemma is_true_Fls [simp]: "is_true(Fls,t) \<longleftrightarrow> False"
by (simp add: is_true_def)
lemma is_true_Var [simp]: "is_true(#v,t) \<longleftrightarrow> v \<in> t"
by (simp add: is_true_def)
lemma is_true_Imp [simp]: "is_true(p=>q,t) \<longleftrightarrow> (is_true(p,t)\<longrightarrow>is_true(q,t))"
by (simp add: is_true_def)
subsubsection {* Logical consequence *}
text {*
For every valuation, if all elements of @{text H} are true then so
is @{text p}.
*}
definition
logcon :: "[i,i] => o" (infixl "|=" 50) where
"H |= p == \<forall>t. (\<forall>q \<in> H. is_true(q,t)) \<longrightarrow> is_true(p,t)"
text {*
A finite set of hypotheses from @{text t} and the @{text Var}s in
@{text p}.
*}
consts
hyps :: "[i,i] => i"
primrec
"hyps(Fls, t) = 0"
"hyps(Var(v), t) = (if v \<in> t then {#v} else {#v=>Fls})"
"hyps(p=>q, t) = hyps(p,t) \<union> hyps(q,t)"
subsection {* Proof theory of propositional logic *}
lemma thms_mono: "G \<subseteq> H ==> thms(G) \<subseteq> thms(H)"
apply (unfold thms.defs)
apply (rule lfp_mono)
apply (rule thms.bnd_mono)+
apply (assumption | rule univ_mono basic_monos)+
done
lemmas thms_in_pl = thms.dom_subset [THEN subsetD]
inductive_cases ImpE: "p=>q \<in> propn"
lemma thms_MP: "[| H |- p=>q; H |- p |] ==> H |- q"
-- {* Stronger Modus Ponens rule: no typechecking! *}
apply (rule thms.MP)
apply (erule asm_rl thms_in_pl thms_in_pl [THEN ImpE])+
done
lemma thms_I: "p \<in> propn ==> H |- p=>p"
-- {*Rule is called @{text I} for Identity Combinator, not for Introduction. *}
apply (rule thms.S [THEN thms_MP, THEN thms_MP])
apply (rule_tac [5] thms.K)
apply (rule_tac [4] thms.K)
apply simp_all
done
subsubsection {* Weakening, left and right *}
lemma weaken_left: "[| G \<subseteq> H; G|-p |] ==> H|-p"
-- {* Order of premises is convenient with @{text THEN} *}
by (erule thms_mono [THEN subsetD])
lemma weaken_left_cons: "H |- p ==> cons(a,H) |- p"
by (erule subset_consI [THEN weaken_left])
lemmas weaken_left_Un1 = Un_upper1 [THEN weaken_left]
lemmas weaken_left_Un2 = Un_upper2 [THEN weaken_left]
lemma weaken_right: "[| H |- q; p \<in> propn |] ==> H |- p=>q"
by (simp_all add: thms.K [THEN thms_MP] thms_in_pl)
subsubsection {* The deduction theorem *}
theorem deduction: "[| cons(p,H) |- q; p \<in> propn |] ==> H |- p=>q"
apply (erule thms.induct)
apply (blast intro: thms_I thms.H [THEN weaken_right])
apply (blast intro: thms.K [THEN weaken_right])
apply (blast intro: thms.S [THEN weaken_right])
apply (blast intro: thms.DN [THEN weaken_right])
apply (blast intro: thms.S [THEN thms_MP [THEN thms_MP]])
done
subsubsection {* The cut rule *}
lemma cut: "[| H|-p; cons(p,H) |- q |] ==> H |- q"
apply (rule deduction [THEN thms_MP])
apply (simp_all add: thms_in_pl)
done
lemma thms_FlsE: "[| H |- Fls; p \<in> propn |] ==> H |- p"
apply (rule thms.DN [THEN thms_MP])
apply (rule_tac [2] weaken_right)
apply (simp_all add: propn.intros)
done
lemma thms_notE: "[| H |- p=>Fls; H |- p; q \<in> propn |] ==> H |- q"
by (erule thms_MP [THEN thms_FlsE])
subsubsection {* Soundness of the rules wrt truth-table semantics *}
theorem soundness: "H |- p ==> H |= p"
apply (unfold logcon_def)
apply (induct set: thms)
apply auto
done
subsection {* Completeness *}
subsubsection {* Towards the completeness proof *}
lemma Fls_Imp: "[| H |- p=>Fls; q \<in> propn |] ==> H |- p=>q"
apply (frule thms_in_pl)
apply (rule deduction)
apply (rule weaken_left_cons [THEN thms_notE])
apply (blast intro: thms.H elim: ImpE)+
done
lemma Imp_Fls: "[| H |- p; H |- q=>Fls |] ==> H |- (p=>q)=>Fls"
apply (frule thms_in_pl)
apply (frule thms_in_pl [of concl: "q=>Fls"])
apply (rule deduction)
apply (erule weaken_left_cons [THEN thms_MP])
apply (rule consI1 [THEN thms.H, THEN thms_MP])
apply (blast intro: weaken_left_cons elim: ImpE)+
done
lemma hyps_thms_if:
"p \<in> propn ==> hyps(p,t) |- (if is_true(p,t) then p else p=>Fls)"
-- {* Typical example of strengthening the induction statement. *}
apply simp
apply (induct_tac p)
apply (simp_all add: thms_I thms.H)
apply (safe elim!: Fls_Imp [THEN weaken_left_Un1] Fls_Imp [THEN weaken_left_Un2])
apply (blast intro: weaken_left_Un1 weaken_left_Un2 weaken_right Imp_Fls)+
done
lemma logcon_thms_p: "[| p \<in> propn; 0 |= p |] ==> hyps(p,t) |- p"
-- {* Key lemma for completeness; yields a set of assumptions satisfying @{text p} *}
apply (drule hyps_thms_if)
apply (simp add: logcon_def)
done
text {*
For proving certain theorems in our new propositional logic.
*}
lemmas propn_SIs = propn.intros deduction
and propn_Is = thms_in_pl thms.H thms.H [THEN thms_MP]
text {*
The excluded middle in the form of an elimination rule.
*}
lemma thms_excluded_middle:
"[| p \<in> propn; q \<in> propn |] ==> H |- (p=>q) => ((p=>Fls)=>q) => q"
apply (rule deduction [THEN deduction])
apply (rule thms.DN [THEN thms_MP])
apply (best intro!: propn_SIs intro: propn_Is)+
done
lemma thms_excluded_middle_rule:
"[| cons(p,H) |- q; cons(p=>Fls,H) |- q; p \<in> propn |] ==> H |- q"
-- {* Hard to prove directly because it requires cuts *}
apply (rule thms_excluded_middle [THEN thms_MP, THEN thms_MP])
apply (blast intro!: propn_SIs intro: propn_Is)+
done
subsubsection {* Completeness -- lemmas for reducing the set of assumptions *}
text {*
For the case @{prop "hyps(p,t)-cons(#v,Y) |- p"} we also have @{prop
"hyps(p,t)-{#v} \<subseteq> hyps(p, t-{v})"}.
*}
lemma hyps_Diff:
"p \<in> propn ==> hyps(p, t-{v}) \<subseteq> cons(#v=>Fls, hyps(p,t)-{#v})"
by (induct set: propn) auto
text {*
For the case @{prop "hyps(p,t)-cons(#v => Fls,Y) |- p"} we also have
@{prop "hyps(p,t)-{#v=>Fls} \<subseteq> hyps(p, cons(v,t))"}.
*}
lemma hyps_cons:
"p \<in> propn ==> hyps(p, cons(v,t)) \<subseteq> cons(#v, hyps(p,t)-{#v=>Fls})"
by (induct set: propn) auto
text {* Two lemmas for use with @{text weaken_left} *}
lemma cons_Diff_same: "B-C \<subseteq> cons(a, B-cons(a,C))"
by blast
lemma cons_Diff_subset2: "cons(a, B-{c}) - D \<subseteq> cons(a, B-cons(c,D))"
by blast
text {*
The set @{term "hyps(p,t)"} is finite, and elements have the form
@{term "#v"} or @{term "#v=>Fls"}; could probably prove the stronger
@{prop "hyps(p,t) \<in> Fin(hyps(p,0) \<union> hyps(p,nat))"}.
*}
lemma hyps_finite: "p \<in> propn ==> hyps(p,t) \<in> Fin(\<Union>v \<in> nat. {#v, #v=>Fls})"
by (induct set: propn) auto
lemmas Diff_weaken_left = Diff_mono [OF _ subset_refl, THEN weaken_left]
text {*
Induction on the finite set of assumptions @{term "hyps(p,t0)"}. We
may repeatedly subtract assumptions until none are left!
*}
lemma completeness_0_lemma [rule_format]:
"[| p \<in> propn; 0 |= p |] ==> \<forall>t. hyps(p,t) - hyps(p,t0) |- p"
apply (frule hyps_finite)
apply (erule Fin_induct)
apply (simp add: logcon_thms_p Diff_0)
txt {* inductive step *}
apply safe
txt {* Case @{prop "hyps(p,t)-cons(#v,Y) |- p"} *}
apply (rule thms_excluded_middle_rule)
apply (erule_tac [3] propn.intros)
apply (blast intro: cons_Diff_same [THEN weaken_left])
apply (blast intro: cons_Diff_subset2 [THEN weaken_left]
hyps_Diff [THEN Diff_weaken_left])
txt {* Case @{prop "hyps(p,t)-cons(#v => Fls,Y) |- p"} *}
apply (rule thms_excluded_middle_rule)
apply (erule_tac [3] propn.intros)
apply (blast intro: cons_Diff_subset2 [THEN weaken_left]
hyps_cons [THEN Diff_weaken_left])
apply (blast intro: cons_Diff_same [THEN weaken_left])
done
subsubsection {* Completeness theorem *}
lemma completeness_0: "[| p \<in> propn; 0 |= p |] ==> 0 |- p"
-- {* The base case for completeness *}
apply (rule Diff_cancel [THEN subst])
apply (blast intro: completeness_0_lemma)
done
lemma logcon_Imp: "[| cons(p,H) |= q |] ==> H |= p=>q"
-- {* A semantic analogue of the Deduction Theorem *}
by (simp add: logcon_def)
lemma completeness:
"H \<in> Fin(propn) ==> p \<in> propn \<Longrightarrow> H |= p \<Longrightarrow> H |- p"
apply (induct arbitrary: p set: Fin)
apply (safe intro!: completeness_0)
apply (rule weaken_left_cons [THEN thms_MP])
apply (blast intro!: logcon_Imp propn.intros)
apply (blast intro: propn_Is)
done
theorem thms_iff: "H \<in> Fin(propn) ==> H |- p \<longleftrightarrow> H |= p \<and> p \<in> propn"
by (blast intro: soundness completeness thms_in_pl)
end
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.