repo_name
stringlengths
6
112
path
stringlengths
4
204
copies
stringlengths
1
3
size
stringlengths
4
6
content
stringlengths
714
810k
license
stringclasses
15 values
arahuja/scikit-learn
examples/cluster/plot_digits_agglomeration.py
377
1694
#!/usr/bin/python # -*- coding: utf-8 -*- """ ========================================================= Feature agglomeration ========================================================= These images how similar features are merged together using feature agglomeration. """ print(__doc__) # Code source: Gaël Varoquaux # Modified for documentation by Jaques Grobler # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn import datasets, cluster from sklearn.feature_extraction.image import grid_to_graph digits = datasets.load_digits() images = digits.images X = np.reshape(images, (len(images), -1)) connectivity = grid_to_graph(*images[0].shape) agglo = cluster.FeatureAgglomeration(connectivity=connectivity, n_clusters=32) agglo.fit(X) X_reduced = agglo.transform(X) X_restored = agglo.inverse_transform(X_reduced) images_restored = np.reshape(X_restored, images.shape) plt.figure(1, figsize=(4, 3.5)) plt.clf() plt.subplots_adjust(left=.01, right=.99, bottom=.01, top=.91) for i in range(4): plt.subplot(3, 4, i + 1) plt.imshow(images[i], cmap=plt.cm.gray, vmax=16, interpolation='nearest') plt.xticks(()) plt.yticks(()) if i == 1: plt.title('Original data') plt.subplot(3, 4, 4 + i + 1) plt.imshow(images_restored[i], cmap=plt.cm.gray, vmax=16, interpolation='nearest') if i == 1: plt.title('Agglomerated data') plt.xticks(()) plt.yticks(()) plt.subplot(3, 4, 10) plt.imshow(np.reshape(agglo.labels_, images[0].shape), interpolation='nearest', cmap=plt.cm.spectral) plt.xticks(()) plt.yticks(()) plt.title('Labels') plt.show()
bsd-3-clause
magland/mountainsort
packages/pyms/synthesis/synthesize_single_waveform.py
1
2355
import numpy as np def exp_growth(amp1,amp2,dur1,dur2): t=np.arange(0,dur1) Y=np.exp(t/dur2) # Want Y[0]=amp1 # Want Y[-1]=amp2 Y=Y/(Y[-1]-Y[0])*(amp2-amp1) Y=Y-Y[0]+amp1; return Y def exp_decay(amp1,amp2,dur1,dur2): Y=exp_growth(amp2,amp1,dur1,dur2) Y=np.flipud(Y) # used to be flip, but that was not supported by older versions of numpy return Y def smooth_it(Y,t): Z=np.zeros(Y.size) for j in range(-t,t+1): Z=Z+np.roll(Y,j) return Z def synthesize_single_waveform(*,N=800,durations=[200,10,30,200],amps=[0.5,10,-1,0]): durations=np.array(durations).ravel() if (np.sum(durations)>=N-2): durations[-1]=N-2-np.sum(durations[0:durations.size-1]) amps=np.array(amps).ravel() timepoints=np.round(np.hstack((0,np.cumsum(durations)-1))).astype('int'); t=np.r_[0:np.sum(durations)+1] Y=np.zeros(len(t)) Y[timepoints[0]:timepoints[1]+1]=exp_growth(0,amps[0],timepoints[1]+1-timepoints[0],durations[0]/4) Y[timepoints[1]:timepoints[2]+1]=exp_growth(amps[0],amps[1],timepoints[2]+1-timepoints[1],durations[1]) Y[timepoints[2]:timepoints[3]+1]=exp_decay(amps[1],amps[2],timepoints[3]+1-timepoints[2],durations[2]/4) Y[timepoints[3]:timepoints[4]+1]=exp_decay(amps[2],amps[3],timepoints[4]+1-timepoints[3],durations[3]/5) Y=smooth_it(Y,3) Y=Y-np.linspace(Y[0],Y[-1],len(t)) Y=np.hstack((Y,np.zeros(N-len(t)))) Nmid=int(np.floor(N/2)) peakind=np.argmax(np.abs(Y)) Y=np.roll(Y,Nmid-peakind) return Y #Y=smooth_it(Y,3); #Y=Y-linspace(Y(1),Y(end),length(Y)); # #Y=[Y,zeros(1,N-length(Y))]; # #Nmid=floor(N/2); #[~,peakind]=max(abs(Y)); #Y=circshift(Y,[0,Nmid-peakind]); # #end # #function test_synth_waveform #Y=synthesize_single_waveform(800); #figure; plot(Y); #end # #function Y=exp_growth(amp1,amp2,dur1,dur2) #t=1:dur1; #Y=exp(t/dur2); #% Want Y(1)=amp1 #% Want Y(end)=amp2 #Y=Y/(Y(end)-Y(1))*(amp2-amp1); #Y=Y-Y(1)+amp1; #end # #function Y=exp_decay(amp1,amp2,dur1,dur2) #Y=exp_growth(amp2,amp1,dur1,dur2); #Y=Y(end:-1:1); #end # #function Z=smooth_it(Y,t) #Z=Y; #Z(1+t:end-t)=0; #for j=-t:t # Z(1+t:end-t)=Z(1+t:end-t)+Y(1+t+j:end-t+j)/(2*t+1); #end; #end if __name__ == '__main__': Y=synthesize_single_waveform() import matplotlib.pyplot as plt plt.plot(Y)
mit
abhishekgahlot/scikit-learn
examples/linear_model/plot_ols_ridge_variance.py
387
2060
#!/usr/bin/python # -*- coding: utf-8 -*- """ ========================================================= Ordinary Least Squares and Ridge Regression Variance ========================================================= Due to the few points in each dimension and the straight line that linear regression uses to follow these points as well as it can, noise on the observations will cause great variance as shown in the first plot. Every line's slope can vary quite a bit for each prediction due to the noise induced in the observations. Ridge regression is basically minimizing a penalised version of the least-squared function. The penalising `shrinks` the value of the regression coefficients. Despite the few data points in each dimension, the slope of the prediction is much more stable and the variance in the line itself is greatly reduced, in comparison to that of the standard linear regression """ print(__doc__) # Code source: Gaël Varoquaux # Modified for documentation by Jaques Grobler # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn import linear_model X_train = np.c_[.5, 1].T y_train = [.5, 1] X_test = np.c_[0, 2].T np.random.seed(0) classifiers = dict(ols=linear_model.LinearRegression(), ridge=linear_model.Ridge(alpha=.1)) fignum = 1 for name, clf in classifiers.items(): fig = plt.figure(fignum, figsize=(4, 3)) plt.clf() plt.title(name) ax = plt.axes([.12, .12, .8, .8]) for _ in range(6): this_X = .1 * np.random.normal(size=(2, 1)) + X_train clf.fit(this_X, y_train) ax.plot(X_test, clf.predict(X_test), color='.5') ax.scatter(this_X, y_train, s=3, c='.5', marker='o', zorder=10) clf.fit(X_train, y_train) ax.plot(X_test, clf.predict(X_test), linewidth=2, color='blue') ax.scatter(X_train, y_train, s=30, c='r', marker='+', zorder=10) ax.set_xticks(()) ax.set_yticks(()) ax.set_ylim((0, 1.6)) ax.set_xlabel('X') ax.set_ylabel('y') ax.set_xlim(0, 2) fignum += 1 plt.show()
bsd-3-clause
google-research/google-research
reset_free_learning/utils/metrics.py
1
16606
# coding=utf-8 # Copyright 2021 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Additional metrics for reset-free learning.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import io import matplotlib matplotlib.use('Agg') import matplotlib.pylab as plt # pylint: disable=g-import-not-at-top import numpy as np import seaborn as sns import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import from tf_agents.metrics import tf_metric from tf_agents.metrics.tf_metrics import TFDeque from tf_agents.utils import common class FailedEpisodes(tf_metric.TFStepMetric): """Counts the number of episodes ending in failure / requiring human intervention.""" def __init__(self, failure_function, name='FailedEpisodes', prefix='Metrics', dtype=tf.int64): super(FailedEpisodes, self).__init__(name=name, prefix=prefix) self.dtype = dtype self._failure_function = failure_function self.number_failed_episodes = common.create_variable( initial_value=0, dtype=self.dtype, shape=(), name='number_failed_episodes') def call(self, trajectory): """Increase the number of number_failed_episodes according to trajectory's final reward. It would increase for all trajectory.is_last(). Args: trajectory: A tf_agents.trajectory.Trajectory Returns: The arguments, for easy chaining. """ # The __call__ will execute this. num_failed_episodes = tf.cast( self._failure_function(trajectory), self.dtype) num_failed_episodes = tf.reduce_sum(input_tensor=num_failed_episodes) self.number_failed_episodes.assign_add(num_failed_episodes) return trajectory def result(self): return tf.identity(self.number_failed_episodes, name=self.name) @common.function def reset(self): self.number_failed_episodes.assign(0) class AnyStepGoalMetric(tf_metric.TFStepMetric): """Counts the number of episodes ending in failure / requiring human intervention.""" def __init__(self, goal_success_fn, name='GoalSuccess', prefix='Metrics', batch_size=1, dtype=tf.int64): super(AnyStepGoalMetric, self).__init__(name=name, prefix=prefix) self.dtype = dtype self._goal_success_fn = goal_success_fn self._new_ep_and_goal_not_seen = tf.constant(False) self.number_successful_episodes = common.create_variable( initial_value=0, dtype=self.dtype, shape=(), name='num_successful_episodes') self._success_accumulator = common.create_variable( initial_value=0, dtype=self.dtype, shape=(batch_size,), name='num_successful_episodes') def call(self, trajectory): """If the agent is successful at any step in the episode, the metric increases by 1, else 0. Args: trajectory: A tf_agents.trajectory.Trajectory Returns: The arguments, for easy chaining. """ self._success_accumulator.assign( tf.where(trajectory.is_first(), tf.zeros_like(self._success_accumulator), self._success_accumulator)) self._success_accumulator.assign_add( tf.cast(self._goal_success_fn(trajectory), self.dtype)) self.number_successful_episodes.assign_add( tf.cast( tf.reduce_sum( tf.where( tf.logical_and( tf.greater(self._success_accumulator, 0), trajectory.is_last()), 1, 0)), self.dtype)) return trajectory def result(self): return tf.identity(self.number_successful_episodes, name=self.name) @common.function def reset(self): self.number_successful_episodes.assign(0) self._success_accumulator.assign([0]) class StateVisitationHistogram(tf_metric.TFHistogramStepMetric): """Metric to compute the frequency of states visited.""" def __init__(self, state_selection_function, state_shape=(), name='StateVisitationHistogram', dtype=tf.float64, buffer_size=100): super(StateVisitationHistogram, self).__init__(name=name) self._buffer = TFDeque(buffer_size, dtype, shape=state_shape) self._dtype = dtype self._state_selection_function = state_selection_function @common.function def call(self, trajectory): self._buffer.extend(self._state_selection_function(trajectory.observation)) return trajectory @common.function def result(self): return self._buffer.data @common.function def reset(self): self._buffer.clear() class StateVisitationHeatmap(tf_metric.TFStepMetric): def __init__( self, trajectory_to_xypos, # acts on trajectory.observation state_max=None, x_range=None, y_range=None, num_bins=10, # per axis name='StateVisitationHeatmap', prefix='Metrics', dtype=tf.int64): super(StateVisitationHeatmap, self).__init__(name=name, prefix=prefix) self.dtype = dtype self._conversion_function = trajectory_to_xypos self._state_max = state_max # either state_max is None or x,y range are None self._x_range = x_range self._y_range = y_range self._num_bins = num_bins self._create_state_visitation_variables() def _find_heatmap_key(self, pos): if self._state_max is not None: x_key = tf.cast( tf.clip_by_value( tf.math.floor((pos[0, 0] + self._state_max) / self._state_delta), 0, self._num_bins - 1), dtype=tf.int64) y_key = tf.cast( tf.clip_by_value( tf.math.floor((pos[0, 1] + self._state_max) / self._state_delta), 0, self._num_bins - 1), dtype=tf.int64) else: x_key = tf.cast( tf.clip_by_value( tf.math.floor((pos[0, 0] - self._x_low) / self._x_delta), 0, self._num_bins - 1), dtype=tf.int64) y_key = tf.cast( tf.clip_by_value( tf.math.floor((pos[0, 1] - self._y_low) / self._y_delta), 0, self._num_bins - 1), dtype=tf.int64) return (x_key, y_key) def _create_state_visitation_variables(self, reinitialize=False): if not reinitialize: # self._state_visit_dict = {} self._state_visit_tf_array = common.create_variable( initial_value=np.zeros((self._num_bins, self._num_bins)), dtype=self.dtype, shape=(self._num_bins, self._num_bins), name='state_visit_count') if self._state_max is not None: self._state_delta = 2 * self._state_max / self._num_bins self._xticks = [ round(-self._state_max + (2 * x_idx + 1) * self._state_delta / 2, 2) # pylint: disable=invalid-unary-operand-type for x_idx in range(self._num_bins) ] self._yticks = [ round(-self._state_max + (2 * y_idx + 1) * self._state_delta / 2, 2) # pylint: disable=invalid-unary-operand-type for y_idx in range(self._num_bins) ] else: self._x_low, self._x_high = self._x_range self._y_low, self._y_high = self._y_range self._x_delta = (self._x_high - self._x_low) / self._num_bins self._y_delta = (self._y_high - self._y_low) / self._num_bins self._xticks = [ round(self._x_low + (2 * x_idx + 1) * self._x_delta / 2, 2) for x_idx in range(self._num_bins) ] self._yticks = [ round(self._y_low + (2 * y_idx + 1) * self._y_delta / 2, 2) for y_idx in range(self._num_bins) ] # for x_idx in range(self._num_bins): # x_low = -self._state_max + x_idx * self._state_delta # x_high = -self._state_max + (x_idx + 1) * self._state_delta # for y_idx in range(self._num_bins): # y_low = -self._state_max + y_idx * self._state_delta # y_high = -self._state_max + (y_idx + 1) * self._state_delta # self._state_visit_dict[(x_idx, y_idx)] = [ # (x_low, x_high), # (y_low, y_high), # ] else: self._state_visit_tf_array = common.create_variable( initial_value=np.zeros((self._num_bins, self._num_bins)), dtype=self.dtype, shape=(self._num_bins, self._num_bins), name='state_visit_count') def call(self, trajectory): pos = self._conversion_function(trajectory.observation) key = self._find_heatmap_key(pos) cur_val = self._state_visit_tf_array[key[0], key[1]] self._state_visit_tf_array[key[0], key[1]].assign(cur_val + 1) return trajectory def result(self): figure = plt.figure(figsize=(10, 10)) image_array = self._state_visit_tf_array.numpy() sns.heatmap( image_array, xticklabels=self._yticks, yticklabels=self._xticks, linewidth=0.5) buf = io.BytesIO() plt.savefig(buf, format='png') plt.close(figure) buf.seek(0) image = tf.image.decode_png(buf.getvalue(), channels=4) image = tf.expand_dims(image, 0) return image def tf_summaries(self, train_step=None, step_metrics=()): """Generates summaries against train_step and all step_metrics. Args: train_step: (Optional) Step counter for training iterations. If None, no metric is generated against the global step. step_metrics: (Optional) Iterable of step metrics to generate summaries against. Returns: A list of summaries. """ summaries = [] prefix = self._prefix tag = common.join_scope(prefix, self.name) result = self.result() if train_step is not None: summaries.append( tf.compat.v2.summary.image(name=tag, data=result, step=train_step)) if prefix: prefix += '_' for step_metric in step_metrics: # Skip plotting the metrics against itself. if self.name == step_metric.name: continue step_tag = '{}vs_{}/{}'.format(prefix, step_metric.name, self.name) # Summaries expect the step value to be an int64. step = tf.cast(step_metric.result(), tf.int64) summaries.append( tf.compat.v2.summary.image(name=step_tag, data=result, step=step)) return summaries @common.function def reset(self): self._create_state_visitation_variables(reinitialize=True) class ValueFunctionHeatmap(tf_metric.TFStepMetric): def __init__( self, trajectory_to_xypos, # acts on trajectory.observation state_max=None, x_range=None, y_range=None, num_bins=10, # per axis name='ValueFunctionHeatmap', prefix='ResetMetrics', dtype=tf.int64): super(ValueFunctionHeatmap, self).__init__(name=name, prefix=prefix) self.dtype = dtype self._conversion_function = trajectory_to_xypos self._state_max = state_max # either state_max is None or x,y range are None self._x_range = x_range self._y_range = y_range self._num_bins = num_bins self._create_state_visitation_variables() def _find_heatmap_key(self, pos): if self._state_max is not None: x_key = np.clip( np.floor((pos[0, 0] + self._state_max) / self._state_delta), 0, self._num_bins - 1).astype(dtype=np.int64) y_key = np.clip( np.floor((pos[0, 1] + self._state_max) / self._state_delta), 0, self._num_bins - 1).astype(dtype=np.int64) else: x_key = np.clip( np.floor((pos[0, 0] - self._x_low) / self._x_delta), 0, self._num_bins - 1).astype(dtype=np.int64) y_key = np.clip( np.floor((pos[0, 1] - self._y_low) / self._y_delta), 0, self._num_bins - 1).astype(dtype=np.int64) return (x_key, y_key) def _create_state_visitation_variables(self, reinitialize=False): self._state_val_array = np.zeros((self._num_bins, self._num_bins)) if not reinitialize: if self._state_max is not None: self._state_delta = 2 * self._state_max / self._num_bins self._xticks = [ round(-self._state_max + (2 * x_idx + 1) * self._state_delta / 2, 2) # pylint: disable=invalid-unary-operand-type for x_idx in range(self._num_bins) ] self._yticks = [ round(-self._state_max + (2 * y_idx + 1) * self._state_delta / 2, 2) # pylint: disable=invalid-unary-operand-type for y_idx in range(self._num_bins) ] else: self._x_low, self._x_high = self._x_range self._y_low, self._y_high = self._y_range self._x_delta = (self._x_high - self._x_low) / self._num_bins self._y_delta = (self._y_high - self._y_low) / self._num_bins self._xticks = [ round(self._x_low + (2 * x_idx + 1) * self._x_delta / 2, 2) for x_idx in range(self._num_bins) ] self._yticks = [ round(self._y_low + (2 * y_idx + 1) * self._y_delta / 2, 2) for y_idx in range(self._num_bins) ] def result(self, reset_states, values): figure = plt.figure(figsize=(10, 10)) reset_states = reset_states.numpy() xy_pos = self._conversion_function(reset_states) values = values.numpy() discretized_state_value_lists = [ [] for _ in range(self._num_bins * self._num_bins) ] for idx in range(xy_pos.shape[0]): x_key, y_key = self._find_heatmap_key(xy_pos[idx:idx + 1]) discretized_state_value_lists[x_key * self._num_bins + y_key].append( values[idx]) val_min = np.inf val_max = -np.inf for x_idx in range(self._num_bins): for y_idx in range(self._num_bins): cur_val_list = discretized_state_value_lists[x_idx * self._num_bins + y_idx] if cur_val_list: mean_value = np.mean(cur_val_list) self._state_val_array[x_idx, y_idx] = mean_value val_min = min(val_min, mean_value) val_max = max(val_max, mean_value) else: self._state_val_array[x_idx, y_idx] = -np.inf # ticklabels and matrix indices are reversed sns.heatmap( self._state_val_array, vmin=val_min, vmax=val_max, xticklabels=self._yticks, yticklabels=self._xticks, linewidth=0.5) buf = io.BytesIO() plt.savefig(buf, format='png') plt.close(figure) buf.seek(0) image = tf.image.decode_png(buf.getvalue(), channels=4) image = tf.expand_dims(image, 0) return image def tf_summaries(self, reset_states, values, train_step=None, step_metrics=()): """Generates summaries against train_step and all step_metrics. Args: reset_states: candidate states for reset values: values assigned by our function train_step: (Optional) Step counter for training iterations. If None, no metric is generated against the global step. step_metrics: (Optional) Iterable of step metrics to generate summaries against. Returns: A list of summaries. """ summaries = [] prefix = self._prefix tag = common.join_scope(prefix, self.name) result = self.result(reset_states, values) if train_step is not None: summaries.append( tf.compat.v2.summary.image(name=tag, data=result, step=train_step)) if prefix: prefix += '_' for step_metric in step_metrics: # Skip plotting the metrics against itself. if self.name == step_metric.name: continue step_tag = '{}vs_{}/{}'.format(prefix, step_metric.name, self.name) # Summaries expect the step value to be an int64. step = tf.cast(step_metric.result(), tf.int64) summaries.append( tf.compat.v2.summary.image(name=step_tag, data=result, step=step)) return summaries @common.function def reset(self): self._create_state_visitation_variables(reinitialize=True)
apache-2.0
idlead/scikit-learn
examples/preprocessing/plot_function_transformer.py
158
1993
""" ========================================================= Using FunctionTransformer to select columns ========================================================= Shows how to use a function transformer in a pipeline. If you know your dataset's first principle component is irrelevant for a classification task, you can use the FunctionTransformer to select all but the first column of the PCA transformed data. """ import matplotlib.pyplot as plt import numpy as np from sklearn.model_selection import train_test_split from sklearn.decomposition import PCA from sklearn.pipeline import make_pipeline from sklearn.preprocessing import FunctionTransformer def _generate_vector(shift=0.5, noise=15): return np.arange(1000) + (np.random.rand(1000) - shift) * noise def generate_dataset(): """ This dataset is two lines with a slope ~ 1, where one has a y offset of ~100 """ return np.vstack(( np.vstack(( _generate_vector(), _generate_vector() + 100, )).T, np.vstack(( _generate_vector(), _generate_vector(), )).T, )), np.hstack((np.zeros(1000), np.ones(1000))) def all_but_first_column(X): return X[:, 1:] def drop_first_component(X, y): """ Create a pipeline with PCA and the column selector and use it to transform the dataset. """ pipeline = make_pipeline( PCA(), FunctionTransformer(all_but_first_column), ) X_train, X_test, y_train, y_test = train_test_split(X, y) pipeline.fit(X_train, y_train) return pipeline.transform(X_test), y_test if __name__ == '__main__': X, y = generate_dataset() lw = 0 plt.figure() plt.scatter(X[:, 0], X[:, 1], c=y, lw=lw) plt.figure() X_transformed, y_transformed = drop_first_component(*generate_dataset()) plt.scatter( X_transformed[:, 0], np.zeros(len(X_transformed)), c=y_transformed, lw=lw, s=60 ) plt.show()
bsd-3-clause
endrebak/epic
epic/matrixes/matrixes.py
1
9081
import sys import logging from os.path import dirname, join, basename from subprocess import call from itertools import chain from typing import Iterable, Sequence, Tuple from argparse import Namespace import numpy as np import pandas as pd from joblib import Parallel, delayed from natsort import natsorted from epic.windows.count.remove_out_of_bounds_bins import remove_bins_with_ends_out_of_bounds from epic.config.genomes import get_genome_size_file def write_matrix_files(chip_merged, input_merged, df, args): # type: (Dict[str, pd.DataFrame], Dict[str, pd.DataFrame], pd.DataFrame, Namespace) -> None matrixes = create_matrixes(chip_merged, input_merged, df, args) matrix = pd.concat(matrixes, axis=0, sort=False) matrix = matrix.dropna() matrix = matrix.set_index("Chromosome Bin".split()) if args.store_matrix: print_matrixes(matrix, args) if args.bigwig or args.individual_log2fc_bigwigs or args.chip_bigwig or args.input_bigwig or args.log2fc_bigwig: matrix = matrix.astype(np.float64) matrix = matrix.drop("Enriched", axis=1) ends = pd.Series(matrix.index.get_level_values("Bin"), index=matrix.index) + (int(args.window_size) - 1) matrix.insert(0, "End", ends) matrix = matrix.set_index("End", append=True) matrix = matrix.sort_index(level="Chromosome") # TODO: remove out of bounds bins if args.bigwig: # defer initialization so not run during travis from epic.bigwig.create_bigwigs import create_bigwigs create_bigwigs(matrix, args.bigwig, args) if args.individual_log2fc_bigwigs: # defer initialization so not run during travis from epic.bigwig.create_bigwigs import create_log2fc_bigwigs create_log2fc_bigwigs(matrix, args.individual_log2fc_bigwigs, args) if args.chip_bigwig or args.input_bigwig or args.log2fc_bigwig: # defer initialization so not run during travis from epic.bigwig.create_bigwigs import create_sum_bigwigs create_sum_bigwigs(matrix, args) def _create_matrixes(chromosome, chip, input, islands, chromosome_size, window_size): # type: (str, Dict[str, pd.DataFrame], Dict[str, pd.DataFrame], pd.DataFrame, int, int) -> pd.DataFrame # print("islands2\n" + islands.head(10).to_csv(sep=" "), file=sys.stderr) chip_df = get_chromosome_df(chromosome, chip) input_df = get_chromosome_df(chromosome, input) try: chromo_islands = islands.xs(chromosome, drop_level=False) except KeyError: return pd.DataFrame(index="Chromosome Bin".split()) chip_df["Chromosome"] = chip_df["Chromosome"].astype("category") # START workaround # Should ideally have been just one line: chip_df["Bin"] = chip_df["Bin"].astype(int) # Workaround for the following error: # ValueError: assignment destination is read-only bins = chip_df["Bin"].astype(int) chip_df = chip_df.drop("Bin", axis=1) chip_df.insert(0, "Bin", bins) # print("chip_df1\n", chip_df.head(10).to_csv(sep=" "), file=sys.stderr) # END workaround chip_df = chip_df.set_index("Chromosome Bin".split()) # print("chilp_df", chip_df.head()) # removing duplicates to avoid joining problems chip_df = chip_df[~chip_df.index.duplicated(keep='first')] chromo_islands = chromo_islands[~chromo_islands.index.duplicated(keep='first')] # chromo_islands.to_csv("chromo_islands.csv", sep=" ") # chip_df.to_csv("chip_df.csv", sep=" ") # print(chromo_islands.head(20).to_csv(sep=" "), file=sys.stderr) # print(chromosome) # print("chip_df2\n", chip_df.head(10).to_csv(sep=" "), file=sys.stderr) # print(chromo_islands.head(10).to_csv(sep=" "), file=sys.stderr) chip_df = chromo_islands.join(chip_df, how="outer").fillna(0) # print("chip_df3\n", chip_df.head(10).to_csv(sep=" "), file=sys.stderr) # print("chip_df", chip_df.tail().to_csv(sep=" "), file=sys.stderr) chip_df = chip_df[~chip_df.index.duplicated(keep='first')] # print("chip_df4\n", chip_df.head(10).to_csv(sep=" "), file=sys.stderr) # print("chip_df", chip_df.tail().to_csv(sep=" "), file=sys.stderr) input_df["Chromosome"] = input_df["Chromosome"].astype("category") # START workaround # Should ideally have been just one line: input_df["Bin"] = input_df["Bin"].astype(int) # Workaround for the following error: # ValueError: assignment destination is read-only bins = input_df["Bin"].astype(int) input_df = input_df.drop("Bin", axis=1) input_df.insert(0, "Bin", bins) input_df = input_df.set_index("Chromosome Bin".split()) # END workaround input_df = input_df[~input_df.index.duplicated(keep='first')] dfm = chip_df.join(input_df, how="outer", sort=False).fillna(0) # print("dfm1\n", dfm.head(10).to_csv(sep=" "), file=sys.stderr) dfm = remove_bins_with_ends_out_of_bounds(dfm, chromosome_size, window_size) dfm = dfm[~dfm.index.duplicated(keep='first')] # print("dfm2\n", dfm.head(10).to_csv(sep=" "), file=sys.stderr) # print(dfm.tail().to_csv(sep=" "), file=sys.stderr) # print(dfm.head(), file=sys.stderr) dfm.reset_index(inplace=True) return dfm def create_matrixes(chip, input, df, args): # type: (Iterable[pd.DataFrame], Iterable[pd.DataFrame], pd.DataFrame, Namespace) -> List[pd.DataFrame] "Creates matrixes which can be written to file as is (matrix) or as bedGraph." genome = args.chromosome_sizes chip = put_dfs_in_chromosome_dict(chip) input = put_dfs_in_chromosome_dict(input) all_chromosomes = natsorted(set(list(chip.keys()) + list(input.keys()))) # print("df1\n", df, file=sys.stderr) islands = enriched_bins(df, args) # print("islands1\n", islands, file=sys.stderr) logging.info("Creating matrixes from count data.") dfms = Parallel(n_jobs=args.number_cores)(delayed(_create_matrixes)( chromosome, chip, input, islands, genome[chromosome], args.window_size) for chromosome in all_chromosomes) return dfms def print_matrixes(matrix, args): # type: (Iterable[pd.DataFrame], Namespace) -> None outpath = args.store_matrix dir = dirname(outpath) if dir: call("mkdir -p {}".format(dir), shell=True) logging.info("Writing data matrix to file: " + outpath) matrix.to_csv(outpath, sep=" ", header=True, compression="gzip") def get_island_bins(df, window_size, genome, args): # type: (pd.DataFrame, int, str, Namespace) -> Dict[str, Set[int]] """Finds the enriched bins in a df.""" # need these chromos because the df might not have islands in all chromos chromosomes = natsorted(list(args.chromosome_sizes)) chromosome_island_bins = {} # type: Dict[str, Set[int]] df_copy = df.reset_index(drop=False) for chromosome in chromosomes: cdf = df_copy.loc[df_copy.Chromosome == chromosome] if cdf.empty: chromosome_island_bins[chromosome] = set() else: island_starts_ends = zip(cdf.Start.values.tolist(), cdf.End.values.tolist()) island_bins = chain(*[range( int(start), int(end), window_size) for start, end in island_starts_ends]) chromosome_island_bins[chromosome] = set(island_bins) return chromosome_island_bins def put_dfs_in_dict(dfs): # type: (Iterable[pd.DataFrame]) -> Dict[str, pd.DataFrame] sample_dict = {} for df in dfs: if df.empty: continue chromosome = df.head(1).Chromosome.values[0] sample_dict[chromosome] = df return sample_dict def put_dfs_in_chromosome_dict(dfs): # type: (Iterable[pd.DataFrame]) -> Dict[str, pd.DataFrame] chromosome_dict = {} # type: Dict[str, pd.DataFrame] for df in dfs: if df.empty: continue chromosome = df.head(1).Chromosome.values[0] chromosome_dict[chromosome] = df return chromosome_dict def get_chromosome_df(chromosome, df_dict): # type: (str, Dict[str, pd.DataFrame]) -> pd.DataFrame if chromosome in df_dict: df = df_dict[chromosome] else: df = pd.DataFrame(columns="Chromosome Bin".split()) # print(chromosome, file=sys.stderr) # print(df, file=sys.stderr) return df def enriched_bins(df, args): # type: (pd.DataFrame, Namespace) -> pd.DataFrame df = df.loc[df.FDR < args.false_discovery_rate_cutoff] idx_rowdicts = [] for _, row in df.iterrows(): for bin in range( int(row.Start), int(row.End), int(args.window_size)): idx_rowdicts.append({"Chromosome": row.Chromosome, "Bin": bin, "Enriched": 1}) islands = pd.DataFrame.from_dict(idx_rowdicts) islands.loc[:, "Chromosome"].astype("category") islands.loc[:, "Bin"].astype(int) return islands.set_index("Chromosome Bin".split())
mit
meganbkratz/acq4
acq4/util/matplotlibexporter.py
3
7794
__author__ = 'pbmanis' """ Copyright 2014 Paul Manis and Luke Campagnola Distributed under MIT/X11 license. See license.txt for more infomation. """ import re from PyQt4 import QtGui, QtCore import acq4.pyqtgraph as pg try: import matplotlib as MP from matplotlib.ticker import FormatStrFormatter import matplotlib.pyplot as pylab import matplotlib.gridspec as gridspec import matplotlib.gridspec as GS HAVE_MPL = True except ImportError: HAVE_MPL = False if HAVE_MPL: MP.use('TKAgg') # Do not modify the following code # sets up matplotlib with sans-serif plotting... pylab.rcParams['text.usetex'] = True pylab.rcParams['interactive'] = False pylab.rcParams['font.family'] = 'sans-serif' pylab.rcParams['font.sans-serif'] = 'Arial' pylab.rcParams['mathtext.default'] = 'sf' pylab.rcParams['figure.facecolor'] = 'white' # next setting allows pdf font to be readable in Adobe Illustrator pylab.rcParams['pdf.fonttype'] = 42 pylab.rcParams['text.dvipnghack'] = True # to here (matplotlib stuff - touchy!) stdFont = 'Arial' def cleanRepl(matchobj): """ Clean up a directory name so that it can be written to a matplotlib title without encountering LaTeX escape sequences Replace backslashes with forward slashes replace underscores (subscript) with escaped underscores """ if matchobj.group(0) == '\\': return '/' if matchobj.group(0) == '_': return '\_' if matchobj.group(0) == '/': return '/' else: return '' def matplotlibExport(gridlayout=None, title=None): """ Constructs a matplotlib window that shows the current plots laid out in the same format as the pyqtgraph window You might use this for publication purposes, since matplotlib allows export of the window to a variety of formats, and will contain proper fonts (not "outlined"). Also can be used for automatic generation of PDF files with savefig. :param: QtGridLayout object that specifies how the grid was built The layout will contain pyqtgraph widgets added with .addLayout :return: nothing """ if not HAVE_MPL: raise Exception("Method matplotlibExport requires matplotlib; not importable.") if gridlayout is None or gridlayout.__class__ != QtGui.QGridLayout().__class__: raise Exception("Method matplotlibExport requires a QGridLayout") fig = pylab.figure() pylab.rcParams['text.usetex'] = False # escape filename information so it can be rendered by removing # common characters that trip up latex...: escs = re.compile('[\\\/_]') print title if title is not None: tiname = '%r' % title tiname = re.sub(escs, cleanRepl, tiname)[1:-1] fig.suptitle(r''+tiname) pylab.autoscale(enable=True, axis='both', tight=None) # build the plot based on the grid layout gs = gridspec.GridSpec(gridlayout.rowCount(), gridlayout.columnCount()) # build matplotlib gridspec for i in range(gridlayout.count()): w = gridlayout.itemAt(i).widget() # retrieve the plot widget... (x, y, c, r) = gridlayout.getItemPosition(i) # and gridspecs paramters mplax = pylab.subplot(gs[x:(c+x), y:(r+y)]) # map to mpl subplot geometry export_panel(w, mplax) # now fill the plot gs.update(wspace=0.25, hspace=0.5) # adjust spacing # pylab.draw() # hook to save figure - not used here # pylab.savefig(os.path.join(self.commonPrefix, self.protocolfile)) pylab.show() def export_panel(pgitem, ax): """ export_panel recreates the contents of one pyqtgraph plot item into a specified matplotlib axis item :param fileName: :return: """ # get labels from the pyqtgraph graphic item plitem = pgitem.getPlotItem() xlabel = plitem.axes['bottom']['item'].label.toPlainText() ylabel = plitem.axes['left']['item'].label.toPlainText() title = plitem.titleLabel.text fn = pg.functions ax.clear() cleanAxes(ax) # make a "nice" plot for item in plitem.curves: x, y = item.getData() opts = item.opts pen = fn.mkPen(opts['pen']) if pen.style() == QtCore.Qt.NoPen: linestyle = '' else: linestyle = '-' color = tuple([c/255. for c in fn.colorTuple(pen.color())]) symbol = opts['symbol'] if symbol == 't': symbol = '^' symbolPen = fn.mkPen(opts['symbolPen']) symbolBrush = fn.mkBrush(opts['symbolBrush']) markeredgecolor = tuple([c/255. for c in fn.colorTuple(symbolPen.color())]) markerfacecolor = tuple([c/255. for c in fn.colorTuple(symbolBrush.color())]) markersize = opts['symbolSize'] if opts['fillLevel'] is not None and opts['fillBrush'] is not None: fillBrush = fn.mkBrush(opts['fillBrush']) fillcolor = tuple([c/255. for c in fn.colorTuple(fillBrush.color())]) ax.fill_between(x=x, y1=y, y2=opts['fillLevel'], facecolor=fillcolor) pl = ax.plot(x, y, marker=symbol, color=color, linewidth=pen.width(), linestyle=linestyle, markeredgecolor=markeredgecolor, markerfacecolor=markerfacecolor, markersize=markersize) xr, yr = plitem.viewRange() ax.set_xbound(*xr) ax.set_ybound(*yr) ax.set_xlabel(xlabel) # place the labels. ax.set_ylabel(ylabel) # for matplotlib cleanup: # These were borrowed from Manis' "PlotHelpers.py" # def cleanAxes(axl): if type(axl) is not list: axl = [axl] for ax in axl: for loc, spine in ax.spines.iteritems(): if loc in ['left', 'bottom']: pass elif loc in ['right', 'top']: spine.set_color('none') # do not draw the spine else: raise ValueError('Unknown spine location: %s' % loc) # turn off ticks when there is no spine ax.xaxis.set_ticks_position('bottom') # stopped working in matplotlib 1.10 ax.yaxis.set_ticks_position('left') update_font(ax) def update_font(axl, size=6, font=stdFont): if type(axl) is not list: axl = [axl] fontProperties = {'family': 'sans-serif', 'sans-serif': [font], 'weight': 'normal', 'font-size': size} for ax in axl: for tick in ax.xaxis.get_major_ticks(): tick.label1.set_family('sans-serif') tick.label1.set_fontname(stdFont) tick.label1.set_size(size) for tick in ax.yaxis.get_major_ticks(): tick.label1.set_family('sans-serif') tick.label1.set_fontname(stdFont) tick.label1.set_size(size) # xlab = ax.axes.get_xticklabels() # print xlab # print dir(xlab) # for x in xlab: # x.set_fontproperties(fontProperties) # ylab = ax.axes.get_yticklabels() # for y in ylab: # y.set_fontproperties(fontProperties) #ax.set_xticklabels(ax.get_xticks(), fontProperties) #ax.set_yticklabels(ax.get_yticks(), fontProperties) ax.xaxis.set_smart_bounds(True) ax.yaxis.set_smart_bounds(True) ax.tick_params(axis='both', labelsize=9) def formatTicks(axl, axis='xy', fmt='%d', font='Arial'): """ Convert tick labels to intergers to do just one axis, set axis = 'x' or 'y' control the format with the formatting string """ if type(axl) is not list: axl = [axl] majorFormatter = FormatStrFormatter(fmt) for ax in axl: if 'x' in axis: ax.xaxis.set_major_formatter(majorFormatter) if 'y' in axis: ax.yaxis.set_major_formatter(majorFormatter)
mit
zfrenchee/pandas
pandas/tests/io/msgpack/test_extension.py
8
2204
from __future__ import print_function import array import pandas.io.msgpack as msgpack from pandas.io.msgpack import ExtType from .common import frombytes, tobytes def test_pack_ext_type(): def p(s): packer = msgpack.Packer() packer.pack_ext_type(0x42, s) return packer.bytes() assert p(b'A') == b'\xd4\x42A' # fixext 1 assert p(b'AB') == b'\xd5\x42AB' # fixext 2 assert p(b'ABCD') == b'\xd6\x42ABCD' # fixext 4 assert p(b'ABCDEFGH') == b'\xd7\x42ABCDEFGH' # fixext 8 assert p(b'A' * 16) == b'\xd8\x42' + b'A' * 16 # fixext 16 assert p(b'ABC') == b'\xc7\x03\x42ABC' # ext 8 assert p(b'A' * 0x0123) == b'\xc8\x01\x23\x42' + b'A' * 0x0123 # ext 16 assert (p(b'A' * 0x00012345) == b'\xc9\x00\x01\x23\x45\x42' + b'A' * 0x00012345) # ext 32 def test_unpack_ext_type(): def check(b, expected): assert msgpack.unpackb(b) == expected check(b'\xd4\x42A', ExtType(0x42, b'A')) # fixext 1 check(b'\xd5\x42AB', ExtType(0x42, b'AB')) # fixext 2 check(b'\xd6\x42ABCD', ExtType(0x42, b'ABCD')) # fixext 4 check(b'\xd7\x42ABCDEFGH', ExtType(0x42, b'ABCDEFGH')) # fixext 8 check(b'\xd8\x42' + b'A' * 16, ExtType(0x42, b'A' * 16)) # fixext 16 check(b'\xc7\x03\x42ABC', ExtType(0x42, b'ABC')) # ext 8 check(b'\xc8\x01\x23\x42' + b'A' * 0x0123, ExtType(0x42, b'A' * 0x0123)) # ext 16 check(b'\xc9\x00\x01\x23\x45\x42' + b'A' * 0x00012345, ExtType(0x42, b'A' * 0x00012345)) # ext 32 def test_extension_type(): def default(obj): print('default called', obj) if isinstance(obj, array.array): typecode = 123 # application specific typecode data = tobytes(obj) return ExtType(typecode, data) raise TypeError("Unknown type object %r" % (obj, )) def ext_hook(code, data): print('ext_hook called', code, data) assert code == 123 obj = array.array('d') frombytes(obj, data) return obj obj = [42, b'hello', array.array('d', [1.1, 2.2, 3.3])] s = msgpack.packb(obj, default=default) obj2 = msgpack.unpackb(s, ext_hook=ext_hook) assert obj == obj2
bsd-3-clause
dpinney/omf
omf/scratch/Neural_Net_Experimentation/forecast_testing/VB.py
2
11885
import argparse import cProfile import matplotlib.pyplot as plt import numpy as np import time from numpy import * class VirtualBattery(object): """ Base class for abstraction. """ def __init__(self, ambient_temp, capacitance, resistance, rated_power, COP, deadband, setpoint, tcl_number): # C :thermal capacitance # R : thermal resistance # P: rated power (kW) of each TCL # eta: COP # delta: temperature deadband # theta_s: temperature setpoint # N: number of TCL # ambient: ambient temperature self.ambient = ambient_temp self.C = capacitance self.R = resistance self.P = rated_power self.eta = COP self.delta = deadband self.theta_s = setpoint self.N = tcl_number def generate(self, participation_number, P0_number): """ Main calculation happens here. """ #heuristic function of participation atan = np.arctan participation = participation_number P0 = P0_number P0[P0 < 0] = 0.0 # set negative power consumption to 0 p_lower = self.N*participation*P0 # aggregated baseline power consumption considering participation p_upper = self.N*participation*(self.P - P0) p_upper[p_upper < 0] = 0.0 # set negative power upper bound to 0 e_ul = self.N*participation*self.C*self.delta/2/self.eta return p_lower, p_upper, e_ul class AC(VirtualBattery): """ Derived Class for specifically AC Virtual Battery. """ def __init__(self, theta_a, capacitance, resistance, rated_power, COP, deadband, setpoint, tcl_number): super(AC, self).__init__(theta_a, capacitance, resistance, rated_power, COP, deadband, setpoint, tcl_number) # self.tcl_idx = tcl_idx self.theta_a = self.ambient # theta_a == ambient temperature def generate(self): #heuristic function of participation atan = np.arctan # participation for AC Ta = np.linspace(20, 45, num=51) participation = (atan(self.theta_a-27) - atan(Ta[0]-27))/((atan(Ta[-1]-27) - atan(Ta[0]-27))) participation = np.clip(participation, 0, 1) #P0 for AC P0 = (self.theta_a - self.theta_s)/self.R/self.eta # average baseline power consumption for the given temperature setpoint return super(AC, self).generate(participation, P0) class HP(VirtualBattery): """ Derived Class for specifically HP Virtual Battery. """ def __init__(self, theta_a, capacitance, resistance, rated_power, COP, deadband, setpoint, tcl_number): super(HP, self).__init__(theta_a, capacitance, resistance, rated_power, COP, deadband, setpoint, tcl_number) # self.tcl_idx = tcl_idx self.theta_a = self.ambient # theta_a == ambient temperature def generate(self): #heuristic function of participation atan = np.arctan # participation for HP Ta = np.linspace(0, 25, num=51) participation = 1-(atan(self.theta_a-10) - atan(Ta[0]-10))/((atan(Ta[-1]-10) - atan(Ta[0]-10))) participation = np.clip(participation, 0, 1) #P0 for HP P0 = (self.theta_s - self.theta_a)/self.R/self.eta return super(HP, self).generate(participation, P0) class RG(VirtualBattery): """ Derived Class for specifically RG Virtual Battery. """ def __init__(self, theta_a, capacitance, resistance, rated_power, COP, deadband, setpoint, tcl_number): super(RG, self).__init__(theta_a, capacitance, resistance, rated_power, COP, deadband, setpoint, tcl_number) # self.tcl_idx = tcl_idx self.theta_a = self.ambient # theta_a == ambient temperature def generate(self): #heuristic function of participation atan = np.arctan # participation for RG participation = np.ones(self.theta_a.shape) participation = np.clip(participation, 0, 1) #P0 for RG P0 = (self.theta_a - self.theta_s)/self.R/self.eta # average baseline power consumption for the given temperature setpoint return super(RG, self).generate(participation, P0) class WH(VirtualBattery): """ Derived class for specifically Water Heater Virtual Battery. """ N_wh = 50 def __init__(self, theta_a, capacitance, resistance, rated_power, COP, deadband, setpoint, tcl_number,Tout, water): super(WH, self).__init__(theta_a, capacitance, resistance, rated_power, COP, deadband, setpoint, tcl_number) self.C_wh = self.C*np.ones((self.N_wh, 1)) # thermal capacitance, set in parent class self.R_wh = self.R*np.ones((self.N_wh, 1)) # thermal resistance self.P_wh = self.P*np.ones((self.N_wh, 1)) # rated power (kW) of each TCL self.delta_wh = self.delta*np.ones((self.N_wh, 1)) # temperature deadband self.theta_s_wh = self.theta_s*np.ones((self.N_wh, 1)) # temperature setpoint self.Tout=Tout self.water = water # self.N = self.para[6] # number of TCL def calculate_twat(self,tout_avg,tout_madif): tout_avg=tout_avg/5*9+32 tout_madif=tout_madif/5*9 ratio = 0.4 + 0.01 * (tout_avg - 44) lag = 35 - 1.0 * (tout_avg - 44) twat = 1*np.ones((365*24*60,1)) for i in range(365): for j in range(60*24): twat[i*24*60+j]= (tout_avg+6)+ratio*(tout_madif/ 2) * sin((0.986 * (i - 15 - lag) - 90)/180*3.14) twat=(twat-32.)/9.*5. return twat def prepare_pare_for_calculate_twat(self,tou_raw): tout_avg = sum(tou_raw)/len(tou_raw) mon=[31,28,31,30,31,30,31,31,30,31,30,31] mon_ave=1*np.ones((12,1)) mon_ave[1]=sum(tou_raw[0:mon[1]*24])/mon[1]/24 stop=mon[1]*24 for idx in range(1,len(mon)): mon_ave[idx]=sum(tou_raw[stop:stop+mon[idx]*24])/mon[idx]/24; tou_madif=max(mon_ave)- min(mon_ave) return tout_avg, tou_madif def generate(self): # theta_a is the ambient temperature # theta_a = (72-32)*5.0/9*np.ones((365, 24*60)) # This is a hard-coded 72degF, converted to degCel theta_a = self.ambient#*np.ones((365, 24*60)) # theta_a == ambient temperature #nRow, nCol = theta_a.shape nRow, nCol = 365, 24*60 theta_a = np.reshape(theta_a, [nRow*nCol, 1]) Tout1min= np.zeros((size(theta_a))); for i in range(len(self.Tout)): theta_a[i]= (self.Tout[i]+self.ambient[i])/2; # CHANGED THIS # h is the model time discretization step in seconds h = 60 #T is the number of time step considered, i.e., T = 365*24*60 means a year # with 1 minute time discretization T = len(theta_a) tou_avg,maxdiff=self.prepare_pare_for_calculate_twat(self.Tout) twat=self.calculate_twat(tou_avg,maxdiff); # print twat # theta_lower is the temperature lower bound theta_lower_wh = self.theta_s_wh - self.delta_wh/2.0 # theta_upper is the temperature upper bound theta_upper_wh = self.theta_s_wh + self.delta_wh/2.0 # m_water is the water draw in unit of gallon per minute m_water = self.water#np.genfromtxt("Flow_raw_1minute_BPA.csv", delimiter=',')[1:, 1:] where_are_NaNs = isnan(m_water) m_water[where_are_NaNs] = 0 m_water = m_water *0.00378541178*1000/h m_water_row, m_water_col = m_water.shape water_draw = np.zeros((m_water_row, int(self.N_wh))) for i in range(int(self.N_wh)): k = np.random.randint(m_water_col) water_draw[:, i] = np.roll(m_water[:, k], (1, np.random.randint(-14, 1))) + m_water[:, k] * 0.1 * (np.random.random() - 0.5) # k = m_water_col - 1 # print(k) # raise(ArgumentError, "Stop here") # water_draw[:, i] = m_water[:, k] first = -( np.matmul(theta_a, np.ones((1, self.N_wh))) - np.matmul(np.ones((T, 1)), self.theta_s_wh.transpose()) ) # print(np.argwhere(np.isnan(first))) second = np.matmul(np.ones((T, 1)), self.R_wh.transpose()) # print(np.argwhere(np.isnan(second))) Po = ( first / second - 4.2 * np.multiply(water_draw, (55-32) * 5/9.0 - np.matmul(np.ones((T, 1)), self.theta_s_wh.transpose())) ) # print(water_draw.shape) # print(len(water_draw[:1])) # Po_total is the analytically predicted aggregate baseline power Po_total = np.sum(Po, axis=1) upper_limit = np.sum(self.P_wh, axis=0) # print(np.argwhere(np.isnan(water_draw))) Po_total[Po_total > upper_limit[0]] = upper_limit # theta is the temperature of TCLs theta = np.zeros((self.N_wh, T)) theta[:, 0] = self.theta_s_wh.reshape(-1) # m is the indicator of on-off state: 1 is on, 0 is off m = np.ones((self.N_wh, T)) m[:int(self.N_wh*0.8), 0] = 0 for t in range(T - 1): theta[:, t+1] = ( (1 - h/(self.C_wh * 3600) / self.R_wh).reshape(-1) * theta[:, t] + (h / (self.C_wh * 3600) / self.R_wh).reshape(-1) * theta_a[t] + ((h/(self.C_wh * 3600))*self.P_wh).reshape(-1)*m[:, t] ) m[theta[:, t+1] > (theta_upper_wh).reshape(-1), t+1] = 0 m[theta[:, t+1] < (theta_lower_wh).reshape(-1), t+1] = 1 m[(theta[:, t+1] >= (theta_lower_wh).reshape(-1)) & (theta[:, t+1] <= (theta_upper_wh).reshape(-1)), t+1] = m[(theta[:, t+1] >= (theta_lower_wh).reshape(-1)) & (theta[:, t+1] <= (theta_upper_wh).reshape(-1)), t] theta[:, 0] = theta[:, -1] m[:, 0] = m[:, -1] # Po_total_sim is the predicted aggregate baseline power using simulations Po_total_sim = np.zeros((T, 1)) Po_total_sim[0] = np.sum(m[:, 0]*(self.P_wh.reshape(-1))) for t in range(T - 1): # print t theta[:, t+1] = (1 - h/(self.C_wh * 3600)/self.R_wh).reshape(-1) * theta[:, t] + (h/(self.C_wh * 3600)/self.R_wh).reshape(-1)*theta_a[t] + (h/(self.C_wh*3600)).reshape(-1)*m[:, t]*self.P_wh.reshape(-1) + h*4.2*water_draw[t, :].transpose() * (twat[t] -theta[:, t]) / ((self.C_wh*3600).reshape(-1)) m[theta[:, t+1] > (theta_upper_wh).reshape(-1), t+1] = 0 m[theta[:, t+1] < (theta_lower_wh).reshape(-1), t+1] = 1 m[(theta[:, t+1] >= (theta_lower_wh).reshape(-1)) & (theta[:, t+1] <= (theta_upper_wh).reshape(-1)), t+1] = m[(theta[:, t+1] >= (theta_lower_wh).reshape(-1)) & (theta[:, t+1] <= (theta_upper_wh).reshape(-1)), t] Po_total_sim[t+1] = np.sum(m[:, t+1] * self.P_wh.reshape(-1)) index_available = np.ones((self.N_wh, T)) for t in range(T - 1): index_available[(theta[:, t] < (theta_lower_wh-0.5).reshape(-1)) | (theta[:, t] > (theta_upper_wh+0.5).reshape(-1)), t] = 0 # Virtual battery parameters p_upper_wh1 = np.sum(self.P_wh) - Po_total_sim p_lower_wh1 = Po_total_sim e_ul_wh1 = np.sum((np.matmul(self.C_wh, np.ones((1, T))) * np.matmul(self.delta_wh, np.ones((1, T))) / 2 * index_available).transpose(), axis=1) # calculate hourly average data from minute output for power p_upper_wh1 = np.reshape(p_upper_wh1, [8760,60]) p_upper_wh = np.mean(p_upper_wh1, axis=1)*float(self.N)/float(self.N_wh) p_lower_wh1 = np.reshape(p_lower_wh1, [8760,60]) p_lower_wh = np.mean(p_lower_wh1, axis=1)*float(self.N)/float(self.N_wh) # extract hourly data from minute output for energy e_ul_wh = e_ul_wh1[59:len(e_ul_wh1):60]*float(self.N)/float(self.N_wh) return p_lower_wh, p_upper_wh, e_ul_wh
gpl-2.0
tomlof/scikit-learn
examples/cluster/plot_adjusted_for_chance_measures.py
105
4300
""" ========================================================== Adjustment for chance in clustering performance evaluation ========================================================== The following plots demonstrate the impact of the number of clusters and number of samples on various clustering performance evaluation metrics. Non-adjusted measures such as the V-Measure show a dependency between the number of clusters and the number of samples: the mean V-Measure of random labeling increases significantly as the number of clusters is closer to the total number of samples used to compute the measure. Adjusted for chance measure such as ARI display some random variations centered around a mean score of 0.0 for any number of samples and clusters. Only adjusted measures can hence safely be used as a consensus index to evaluate the average stability of clustering algorithms for a given value of k on various overlapping sub-samples of the dataset. """ print(__doc__) # Author: Olivier Grisel <[email protected]> # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from time import time from sklearn import metrics def uniform_labelings_scores(score_func, n_samples, n_clusters_range, fixed_n_classes=None, n_runs=5, seed=42): """Compute score for 2 random uniform cluster labelings. Both random labelings have the same number of clusters for each value possible value in ``n_clusters_range``. When fixed_n_classes is not None the first labeling is considered a ground truth class assignment with fixed number of classes. """ random_labels = np.random.RandomState(seed).randint scores = np.zeros((len(n_clusters_range), n_runs)) if fixed_n_classes is not None: labels_a = random_labels(low=0, high=fixed_n_classes, size=n_samples) for i, k in enumerate(n_clusters_range): for j in range(n_runs): if fixed_n_classes is None: labels_a = random_labels(low=0, high=k, size=n_samples) labels_b = random_labels(low=0, high=k, size=n_samples) scores[i, j] = score_func(labels_a, labels_b) return scores score_funcs = [ metrics.adjusted_rand_score, metrics.v_measure_score, metrics.adjusted_mutual_info_score, metrics.mutual_info_score, ] # 2 independent random clusterings with equal cluster number n_samples = 100 n_clusters_range = np.linspace(2, n_samples, 10).astype(np.int) plt.figure(1) plots = [] names = [] for score_func in score_funcs: print("Computing %s for %d values of n_clusters and n_samples=%d" % (score_func.__name__, len(n_clusters_range), n_samples)) t0 = time() scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range) print("done in %0.3fs" % (time() - t0)) plots.append(plt.errorbar( n_clusters_range, np.median(scores, axis=1), scores.std(axis=1))[0]) names.append(score_func.__name__) plt.title("Clustering measures for 2 random uniform labelings\n" "with equal number of clusters") plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples) plt.ylabel('Score value') plt.legend(plots, names) plt.ylim(ymin=-0.05, ymax=1.05) # Random labeling with varying n_clusters against ground class labels # with fixed number of clusters n_samples = 1000 n_clusters_range = np.linspace(2, 100, 10).astype(np.int) n_classes = 10 plt.figure(2) plots = [] names = [] for score_func in score_funcs: print("Computing %s for %d values of n_clusters and n_samples=%d" % (score_func.__name__, len(n_clusters_range), n_samples)) t0 = time() scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range, fixed_n_classes=n_classes) print("done in %0.3fs" % (time() - t0)) plots.append(plt.errorbar( n_clusters_range, scores.mean(axis=1), scores.std(axis=1))[0]) names.append(score_func.__name__) plt.title("Clustering measures for random uniform labeling\n" "against reference assignment with %d classes" % n_classes) plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples) plt.ylabel('Score value') plt.ylim(ymin=-0.05, ymax=1.05) plt.legend(plots, names) plt.show()
bsd-3-clause
liangz0707/scikit-learn
examples/plot_multioutput_face_completion.py
330
3019
""" ============================================== Face completion with a multi-output estimators ============================================== This example shows the use of multi-output estimator to complete images. The goal is to predict the lower half of a face given its upper half. The first column of images shows true faces. The next columns illustrate how extremely randomized trees, k nearest neighbors, linear regression and ridge regression complete the lower half of those faces. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn.datasets import fetch_olivetti_faces from sklearn.utils.validation import check_random_state from sklearn.ensemble import ExtraTreesRegressor from sklearn.neighbors import KNeighborsRegressor from sklearn.linear_model import LinearRegression from sklearn.linear_model import RidgeCV # Load the faces datasets data = fetch_olivetti_faces() targets = data.target data = data.images.reshape((len(data.images), -1)) train = data[targets < 30] test = data[targets >= 30] # Test on independent people # Test on a subset of people n_faces = 5 rng = check_random_state(4) face_ids = rng.randint(test.shape[0], size=(n_faces, )) test = test[face_ids, :] n_pixels = data.shape[1] X_train = train[:, :np.ceil(0.5 * n_pixels)] # Upper half of the faces y_train = train[:, np.floor(0.5 * n_pixels):] # Lower half of the faces X_test = test[:, :np.ceil(0.5 * n_pixels)] y_test = test[:, np.floor(0.5 * n_pixels):] # Fit estimators ESTIMATORS = { "Extra trees": ExtraTreesRegressor(n_estimators=10, max_features=32, random_state=0), "K-nn": KNeighborsRegressor(), "Linear regression": LinearRegression(), "Ridge": RidgeCV(), } y_test_predict = dict() for name, estimator in ESTIMATORS.items(): estimator.fit(X_train, y_train) y_test_predict[name] = estimator.predict(X_test) # Plot the completed faces image_shape = (64, 64) n_cols = 1 + len(ESTIMATORS) plt.figure(figsize=(2. * n_cols, 2.26 * n_faces)) plt.suptitle("Face completion with multi-output estimators", size=16) for i in range(n_faces): true_face = np.hstack((X_test[i], y_test[i])) if i: sub = plt.subplot(n_faces, n_cols, i * n_cols + 1) else: sub = plt.subplot(n_faces, n_cols, i * n_cols + 1, title="true faces") sub.axis("off") sub.imshow(true_face.reshape(image_shape), cmap=plt.cm.gray, interpolation="nearest") for j, est in enumerate(sorted(ESTIMATORS)): completed_face = np.hstack((X_test[i], y_test_predict[est][i])) if i: sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j) else: sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j, title=est) sub.axis("off") sub.imshow(completed_face.reshape(image_shape), cmap=plt.cm.gray, interpolation="nearest") plt.show()
bsd-3-clause
anugrah-saxena/pycroscopy
examples/plot_writing_to_h5.py
1
23696
""" ==================================================================================================== Tutorials for Developing Scientific Workflows in Pycroscopy - Part 2: Writing to pycroscopy H5 files ==================================================================================================== **Suhas Somnath** 8/8/2017 This set of notebooks will serve as examples for developing end-to-end workflows for and using pycroscopy. While pycroscopy contains many popular data processing function, it may not have a function you need. Since pycroscopy is data-centric, it is preferable to write processing results back to the same file as well. **In this example, we will write the results of K-Means clustering (on a Scanning Tunnelling Spectroscopy (STS) dataset) back to the file.** K-Means clustering is a quick and simple method to determine the types of spectral responses present in the data and their spatial occurrance. Introduction: ============= Data structuring and file format: ================================= **Before proceeding with this example, we highly recommend you read about the data formatting in pycroscopy as well as reading and writing to HDF5 files.** We will summarize some key points below: * pycroscopy uses the **heirarchical data format (HDF5)** files to store data * These HDF5 or H5 files contain datasets and datagroups * pycroscopy data files have two kinds of datasets: * **`main`** datasets: These must be of the form: `[instance, features]`. * All imaging or measurement data satisfy this category, where positions form the instances and the spectral points form the features. Thus, even standard 2D images or a single spectra also satisfy this condition. * A collection of `k` chosen spectra would still satisfy this condition. Some examples include: * the cluster centers obtained from a clustering algorithm like `k-Means clustering`. * The abundance maps obtained from decomposition algorithms like `Singular Value Decomposition (SVD)` or `Non-negetive matrix factorization (NMF)` * **`ancillary`** datasets: All other datasets fall into this category. These include the frequency vector or bias vector as a function of which the main dataset was collected. * pycroscopy stores all data in two dimensional matrices with all position dimensions collapsed to the first dimension and all other (spectroscopic) dimensions collapsed to the second dimension. * All these **`main`** datasets are always accompanied by four ancillary datasets: * Position Indices * Position Values * Spectroscopic Indices * Spectroscopic Values * These ancillary datasets are always two dimensional. * The Position datasets are NxM where N is the total number of positions and M is the number of position dimensions. * The Spectroscopic datasets are MxN where M is the number of spectroscopic dimensions and N is the total number os specstroscopic steps. * All **`main`** datasets always have two attributes that describe the measurement itself: * `quantity`: The physical quantity contained in each cell of the dataset - such as voltage, current, force etc. * `units`: The units for the physical quantity such as `V` for volts, `nA` for nano amperes, `pN` for pico newtons etc. * All **`main`** datasets additionally have 4 attributes that provide the references or links to the 4 aforementions ancillary datasets * Storing just the references allows us to re-use the same position / spectroscopic datasets without having to remake them * For more information see the data format documentation This bookkeeping is necessary for helping the code to understand the dimensionality and structure of the data. While these rules may seem tedious, there are several functions and a few classes that make these tasks much easier Classes for writing files ========================= In order to deal with the numerous challenges in writing data in a consistent manner, especially during translation, in the pycroscopy format, we developed two main classes: **MicroData** and **ioHDF5**. MicroData ========= The abstract class MicroData is extended by **MicroDataset** and **MicroDatagroup** which are skeletal counterparts for the h5py.Dataset and h5py.Datagroup classes respectively. These classes allow programmers to quickly and simply set up the tree structure that needs to be written to H5 files without having to worry about the low-level HDF5 constructs or defensive programming strategies necessary for writing the H5 files. Besides facilitating the construction of a tree structure, each of the classes have a few features specific to pycroscopy to alleviate file writing. ioHDF5 ====== While we use **h5py** to read from pycroscopy files, the ioHDF5 class is used to write data to H5 files. ioHDF5 translates the tree structure described by a MicroDataGroup object and writes the contents to H5 files in a standardized manner. As a wrapper around h5py, tt handles the low-level file I/O calls and includes defensive programming strategies to minimize issues with writing to H5 files. Why bother with Microdata and ioHDF5? ===================================== * These classes simplify the process of writing to H5 files considerably. The programmer only needs to construct the tree structure with simple python objects such as dictionaries for parameters, numpy datasets for storing data, etc. * It is easy to corrupt H5 files. ioHDF5 uses defensive programming strategies to solve these problems. Translation can be challenging in many cases: * It may not be possible to read the entire data from the raw data file to memory as we did in the tutorial on Translation * ioHDF5 allows the general tree structure and the attributes to be written before the data is populated. * Sometimes, the raw data files do not come with sufficient parameters that describe the size and shape of the data. This makes it challenging to prepare the H5 file. * ioHDF5 allows dataets to be dataFile I/O is expensive and we don't want to read the same raw data files multiple times """ # Ensure python 3 compatibility: from __future__ import division, print_function, absolute_import, unicode_literals # The package for accessing files in directories, etc.: import os import wget # The mathematical computation package: import numpy as np # The package used for creating and manipulating HDF5 files: import h5py # Packages for plotting: import matplotlib.pyplot as plt # Package for performing k-Means clustering: from sklearn.cluster import KMeans # Finally import pycroscopy for certain scientific analysis: import pycroscopy as px from pycroscopy.io.translators.omicron_asc import AscTranslator ############################################################################### # Loading the dataset # =================== # # We wil start by downloading the raw data file as generated by the microscope and then translate the file to a # pycroscopy H5 file. # download the raw data file from Github: data_file_path = 'temp.asc' url = 'https://raw.githubusercontent.com/pycroscopy/pycroscopy/master/data/STS.asc' if os.path.exists(data_file_path): os.remove(data_file_path) _ = wget.download(url, data_file_path, bar=None) # Translating from raw data to h5: tran = AscTranslator() h5_path = tran.translate(data_file_path) ############################################################################### # Reading the H5 dataset # ====================== # # This data is a Scanning Tunnelling Spectroscopy (STS) dataset wherein current was measured as a function of voltage # on a two dimensional grid of points. Thus, the data has three dimensions (X, Y, Bias). Note, that in pycroscopy, all # position dimensions are collapsed to the first dimension and all spectroscopic (only bias in this case) dimensions # are collapsed to the second axis of a two dimensional matrix. So, the data is represented as (position, bias) # instead. # opening the file: hdf = px.ioHDF5(h5_path) h5_file = hdf.file # Visualize the tree structure in the file print('Tree structure within the file:') px.hdf_utils.print_tree(h5_file) # Extracting some parameters that will be necessary later on: h5_meas_grp = h5_file['Measurement_000'] num_cols = int(px.hdf_utils.get_attr(h5_meas_grp, 'x-pixels')) num_rows = int(px.hdf_utils.get_attr(h5_meas_grp, 'y-pixels')) # There are multiple ways of accessing the Raw_Data dataset. Here's one approach: h5_main = h5_meas_grp['Channel_000/Raw_Data'] # Prepare the label for plots: y_label = px.hdf_utils.get_attr(h5_main, 'quantity') + ' [' + px.hdf_utils.get_attr(h5_main, 'units') + ']' # Get the voltage vector that this data was acquired as a function of: h5_spec_vals = px.hdf_utils.getAuxData(h5_main, 'Spectroscopic_Values')[0] volt_vec = np.squeeze(h5_spec_vals[()]) # Get the descriptor for this x_label = px.hdf_utils.get_attr(h5_spec_vals, 'labels')[0] + ' [' + px.hdf_utils.get_attr(h5_spec_vals, 'units')[0] + ']' # Currently, the data is within the h5 dataset. We need to read this to memory: data_mat = h5_main[()] print('\nData now loaded to memory and is of shape:', data_mat.shape) print('Data has', num_rows, 'rows and', num_cols, 'columns each having a', data_mat.shape[1], 'long measurement of', y_label,'as a function of', x_label) ############################################################################### # Performing k-Means Clustering: # ============================== # # Now that the data is loaded to memory, we can perform k-Means clustering on data_mat. As a reminder, K-Means # clustering is a quick and simple method to determine the types of spectral responses present in the data and their # spatial occurance. # # Let us assume that we have a `P x S` dataset with `P` positions each with spectra that are `S` long. When K-Means # is asked to identify `k` clusters, it will produce two results: # * cluster_centers: This contains the different kinds of spectral responses and is represented as a two dimensional # array of the form [cluster number, representative spectra for this cluster]. Thus this dataset will have a shape # of `k x S` # * labels: This provides the information about which spatial pixel belongs to which group. It will be a # 1 dimensional array of size `P` wherein the value for each element in the array (cluster id for each pixel) will # be within `[0, k)` # # **Our goal is to write back these two datasets to the H5 file** num_clusters = 9 # Now, we can perform k-Means clustering: estimators = KMeans(num_clusters) results = estimators.fit(data_mat) print('K-Means Clustering performed on the dataset of shape', data_mat.shape, 'resulted in a cluster centers matrix of shape', results.cluster_centers_.shape, 'and a labels array of shape', results.labels_.shape) """ By default, the clusters identified by K-Means are NOT arranged according to their relative distances to each other. Visualizing and interpreting this data is challenging. We will sort the results using a handy function already in pycroscopy: """ labels, centroids = px.processing.cluster.reorder_clusters(results.labels_, results.cluster_centers_) ############################################################################### # Visualize the results: # ====================== # # We will visualize both the raw results from k-Means as well as the distance-sorted results from pycroscopy. # You will notice that the sorted results are easier to understand and interpret. This is an example of the kind of # additional value that can be packed into pycroscopy wrappers on existing data analysis / processing functions. # # A second example of value addition - The pycroscopy wrapper for Clustering handles real, complex, and compound # valued datasets seamlessly in the background. px.plot_utils.plot_cluster_results_together(np.reshape(results.labels_, (num_rows, num_cols)), results.cluster_centers_, spec_val=volt_vec, cmap=plt.cm.inferno, spec_label=x_label, resp_label=y_label); px.plot_utils.plot_cluster_results_together(np.reshape(labels, (num_rows, num_cols)), centroids, spec_val=volt_vec, cmap=plt.cm.inferno, spec_label=x_label, resp_label=y_label); ############################################################################### # Preparing to write results # ========================== # # The two datasets we need to write back to the H5 file are the `centroids` and `labels` matrices. Both the # `centroids` and `labels` matrices satisfy the condition to be elevated to the status of **`main`** datasets. # However, in order to be recognized as **`main`** datasets, they need the four ancillary datasets to go along with # them. Recall that the main datasets only need to store references to the ancillary datasets and that we do not # need to store copies of the same ancillary datasets if multiple main datasets use them. # # Here, we will refer to the dataset on which K-means was performed as the **`source`** dataset. # # Identifying the ancillary datasets: # =================================== # * `centroids`: # * Spectroscopic Indices and Values: Since the `source` dataset and the `centroids` datasets both contain the # same spectral information, the `centroids` dataset can simply reuse the ancillary spectroscopic datasets used by # the `source` dataset. # * Position Indices and Values: The `centroids` dataset has `k` instances while the `source` dataset has `P`, # so we need to create a new position indicies and a new position values dataset for `centroids` # * `labels`: # * Spectroscopic Indices and Values: Unlike the `source` dataset that has spectra of length `S`, this dataset # only has a single value (cluster index) at each location. Consequently, `labels` needs two new ancilary datasets # * Position Indices and Values: Since both `source` and `labels` have the same number of positions and the # positions mean the same quantities for both datasets, we can simply reuse the ancillary dataset from `source` # for `labels` # # Preparing the missing ancillary arrays # ====================================== labels_spec_mat = np.arange(1, dtype=np.uint32) centroids_pos_mat = np.arange(num_clusters, dtype=np.uint32) print('Spectroscopic Dataset for Labels', labels_spec_mat.shape) print('Position Dataset for Centroids', centroids_pos_mat.shape) print('Centroids',centroids.shape) print('Labels', labels.shape) ############################################################################### # Reshape the matricies to the correct dimensions # =============================================== # # 1. Since `labels` is a main dataset, it needs to be two dimensional matrix of size `P x 1` # 2. The `Spectroscopic` ancillary datasets for `labels` need to be of the form `dimension x points`. Since the # spectroscopic axis of `labels` is only one deep, `labels` has only one spectroscopic dimension which itself has # just one point. Thus the `Spectroscopic` matrix should be of size `1 x 1` # 3. The `centroids` matrix is already of the form: `position x spectra`, so it does not need any reshaping # 4. The `Position` ancillary datasets for `centroids` need to be of the form `points x dimensions` as well. # In this case, `centroids` has `k` positions all in one dimension. Thus the matrix needs to be reshaped to `k x 1` labels_spec_mat = np.atleast_2d(labels_spec_mat) centroids_pos_mat = np.atleast_2d(centroids_pos_mat).T labels_mat = np.uint32(labels.reshape([-1, 1])) print('Spectroscopic Dataset for Labels', labels_spec_mat.shape) print('Position Dataset for Centroids', centroids_pos_mat.shape) print('Centroids',centroids.shape) print('Labels', labels_mat.shape) ############################################################################### # Create the Main MicroDataset objects # ==================================== # Remember that it is important to either inherit or add the `quantity` and `units` attributes to each **main** dataset # The two main datasets ds_label_mat = px.MicroDataset('Labels', labels_mat, dtype=np.uint32) # Adding the mandatory attributes ds_label_mat.attrs = {'quantity': 'Cluster ID', 'units': 'a. u.'} ds_cluster_centroids = px.MicroDataset('Mean_Response', centroids, dtype=h5_main.dtype) # Inhereting / copying the mandatory attributes px.hdf_utils.copy_main_attributes(h5_main, ds_cluster_centroids) ############################################################################### # Create the ancillary MicroDataset objects # ========================================= # Ancillary datasets ds_cluster_inds = px.MicroDataset('Cluster_Indices', centroids_pos_mat, dtype=np.uint32) ds_cluster_vals = px.MicroDataset('Cluster_Values', centroids_pos_mat, dtype=np.float32) ds_label_inds = px.MicroDataset('Label_Spectroscopic_Indices', labels_spec_mat, dtype=np.uint32) ds_label_vals = px.MicroDataset('Label_Spectroscopic_Values', labels_spec_mat, dtype=np.float32) # Creating region references: clust_slices = {'Cluster': (slice(None), slice(0, 1))} ds_cluster_inds.attrs['labels'] = clust_slices ds_cluster_inds.attrs['units'] = [''] ds_cluster_vals.attrs['labels'] = clust_slices ds_cluster_vals.attrs['units'] = [''] ############################################################################### # Create the group that will contain these datasets # ================================================= # We will be appending data to the existing h5 file and since HDF5 uses a tree structure to store information, we # would need to specify where to add the sub-tree that we are building. # # Recall that the name of the DataGroup provides information of the operation that has been performed on the # `source` dataset. Therefore, we need to be careful about naming the group. # # It is also important to add relevant information about the operation. For example, the name of our operation # is `Cluster` analogous to the `SkLearn` package organization. Thus, the name of the algorithm - `k-Means` needs # to be written as an attribute of the group as well. # # Occasionaly, the same operation may be performed multiple times on the same dataset with different parameters. # In the case of K-means it may be the number of clusters. pycroscopy allows all these results to be stored instead # of being overwritten by appending an index number to the end of the group name. Thus, one could have a tree # that contains the following groups: # * Raw_Data-Cluster_000 <--- K-means with 9 clusters # * Raw_Data-Cluster_001 <--- Agglomerative clustering # * Raw_Data-Cluster_002 <--- K-means again with 4 clusters # # Leaving a '_' at the end of the group name will instruct ioHDF5 to look for the last instance of the same # operation being performed on the same dataset. The index will then be updated accordingly source_dset_name = h5_main.name.split('/')[-1] operation_name = 'Cluster' subtree_root_path = h5_main.parent.name[1:] cluster_grp = px.MicroDataGroup(source_dset_name + '-' + operation_name + '_', subtree_root_path) print('New group to be created with name:', cluster_grp.name) print('This group (subtree) will be appended to the H5 file under the group:', subtree_root_path) # Making a tree structure by adding the MicroDataset objects as children of this group cluster_grp.addChildren([ds_label_mat, ds_cluster_centroids, ds_cluster_inds, ds_cluster_vals, ds_label_inds, ds_label_vals]) print('\nWill write the following tree:') cluster_grp.showTree() cluster_grp.attrs['num_clusters'] = num_clusters cluster_grp.attrs['num_samples'] = h5_main.shape[0] cluster_grp.attrs['cluster_algorithm'] = 'KMeans' # Get the parameters of the KMeans object that was used and write them as attributes of the group for parm in estimators.get_params().keys(): cluster_grp.attrs[parm] = estimators.get_params()[parm] print('\nWriting the following attrbutes to the group:') for at_name in cluster_grp.attrs: print(at_name, ':', cluster_grp.attrs[at_name]) ############################################################################### # Write to H5 and access the written objects # ========================================== # # Once the tree is prepared (previous cell), ioHDF5 will handle all the file writing. h5_clust_refs = hdf.writeData(cluster_grp) h5_labels = px.hdf_utils.getH5DsetRefs(['Labels'], h5_clust_refs)[0] h5_centroids = px.hdf_utils.getH5DsetRefs(['Mean_Response'], h5_clust_refs)[0] h5_clust_inds = px.hdf_utils.getH5DsetRefs(['Cluster_Indices'], h5_clust_refs)[0] h5_clust_vals = px.hdf_utils.getH5DsetRefs(['Cluster_Values'], h5_clust_refs)[0] h5_label_inds = px.hdf_utils.getH5DsetRefs(['Label_Spectroscopic_Indices'], h5_clust_refs)[0] h5_label_vals = px.hdf_utils.getH5DsetRefs(['Label_Spectroscopic_Values'], h5_clust_refs)[0] ############################################################################### # Look at the H5 file contents now # ================================ # Compare this tree with the one printed earlier. The new group and datasets should be apparent px.hdf_utils.print_tree(h5_file) ############################################################################### # Make `centroids` and `labels` -> `main` datasets # ================================================ # We elevate the status of these datasets by linking them to the four ancillary datasets. This part is also made # rather easy by a few pycroscopy functions. # we already got the reference to the spectroscopic values in the first few cells h5_spec_inds = px.hdf_utils.getAuxData(h5_main, 'Spectroscopic_Indices')[0] px.hdf_utils.checkAndLinkAncillary(h5_labels, ['Position_Indices', 'Position_Values'], h5_main=h5_main) px.hdf_utils.checkAndLinkAncillary(h5_labels, ['Spectroscopic_Indices', 'Spectroscopic_Values'], anc_refs=[h5_label_inds, h5_label_vals]) px.hdf_utils.checkAndLinkAncillary(h5_centroids, ['Spectroscopic_Indices', 'Spectroscopic_Values'], anc_refs=[h5_spec_inds, h5_spec_vals]) px.hdf_utils.checkAndLinkAncillary(h5_centroids, ['Position_Indices', 'Position_Values'], anc_refs=[h5_clust_inds, h5_clust_vals]) ############################################################################### # Why bother with all this? # ========================= # * Though long, this simple file writing procedure needs to be written once for a given data analysis / processing tool # * The general nature of this Clustering algorithm facilitates the application to several other datasets # regardless of their origin # * Once the data is written in the pycroscopy format, it is possible to apply other data analytics operations # to the datasets with a single line # * Generalized versions of visualization algorithms can be written to visualize clustering results quickly. # # Here is an example of very quick visualization with effectively just a single parameter - the group containing # clustering results. The ancillary datasets linked to `labels` and `centroids` instructed the code about the # spatial and spectroscopic dimensionality and enabled it to automatically render the plots below px.plot_utils.plot_cluster_h5_group(h5_labels.parent, ''); ############################################################################### # Cleanup # ======= # Deletes the temporary files created in the example os.remove(data_file_path) hdf.close() os.remove(h5_path)
mit
pprett/statsmodels
statsmodels/graphics/gofplots.py
1
7297
import numpy as np from scipy import stats from statsmodels.regression.linear_model import OLS from statsmodels.tools.tools import add_constant from . import utils __all__ = ['qqplot'] def qqplot(data, dist=stats.norm, distargs=(), a=0, loc=0, scale=1, fit=False, line=False, ax=None): """ qqplot of the quantiles of x versus the quantiles/ppf of a distribution. Can take arguments specifying the parameters for dist or fit them automatically. (See fit under kwargs.) Parameters ---------- data : array-like 1d data array dist : A scipy.stats or statsmodels distribution Compare x against dist. The default is scipy.stats.distributions.norm (a standard normal). distargs : tuple A tuple of arguments passed to dist to specify it fully so dist.ppf may be called. loc : float Location parameter for dist a : float Offset for the plotting position of an expected order statistic, for example. The plotting positions are given by (i - a)/(nobs - 2*a + 1) for i in range(0,nobs+1) scale : float Scale parameter for dist fit : boolean If fit is false, loc, scale, and distargs are passed to the distribution. If fit is True then the parameters for dist are fit automatically using dist.fit. The quantiles are formed from the standardized data, after subtracting the fitted loc and dividing by the fitted scale. line : str {'45', 's', 'r', q'} or None Options for the reference line to which the data is compared.: - '45' - 45-degree line - 's' - standardized line, the expected order statistics are scaled by the standard deviation of the given sample and have the mean added to them - 'r' - A regression line is fit - 'q' - A line is fit through the quartiles. - None - by default no reference line is added to the plot. - If True a reference line is drawn on the graph. The default is to fit a line via OLS regression. ax : Matplotlib AxesSubplot instance, optional If given, this subplot is used to plot in instead of a new figure being created. Returns ------- fig : Matplotlib figure instance If `ax` is None, the created figure. Otherwise the figure to which `ax` is connected. Examples -------- >>> import statsmodels.api as sm >>> from matplotlib import pyplot as plt >>> data = sm.datasets.longley.load() >>> data.exog = sm.add_constant(data.exog) >>> mod_fit = sm.OLS(data.endog, data.exog).fit() >>> res = mod_fit.resid >>> fig = sm.qqplot(res) >>> plt.show() qqplot against quantiles of t-distribution with 4 degrees of freedom: >>> import scipy.stats as stats >>> fig = sm.qqplot(res, stats.t, distargs=(4,)) >>> plt.show() qqplot against same as above, but with mean 3 and std 10: >>> fig = sm.qqplot(res, stats.t, distargs=(4,), loc=3, scale=10) >>> plt.show() Automatically determine parameters for t distribution including the loc and scale: >>> fig = sm.qqplot(res, stats.t, fit=True, line='45') >>> plt.show() Notes ----- Depends on matplotlib. If `fit` is True then the parameters are fit using the distribution's fit() method. """ fig, ax = utils.create_mpl_ax(ax) if not hasattr(dist, 'ppf'): raise ValueError("distribution must have a ppf method") nobs = data.shape[0] if fit: fit_params = dist.fit(data) loc = fit_params[-2] scale = fit_params[-1] if len(fit_params)>2: dist = dist(*fit_params[:-2], **dict(loc = 0, scale = 1)) else: dist = dist(loc=0, scale=1) elif distargs or loc != 0 or scale != 1: dist = dist(*distargs, **dict(loc=loc, scale=scale)) try: theoretical_quantiles = dist.ppf(plotting_pos(nobs, a)) except: raise ValueError('distribution requires more parameters') sample_quantiles = np.array(data, copy=True) sample_quantiles.sort() if fit: sample_quantiles -= loc sample_quantiles /= scale ax.set_xmargin(0.02) ax.plot(theoretical_quantiles, sample_quantiles, 'bo') if line: if line not in ['r','q','45','s']: msg = "%s option for line not understood" % line raise ValueError(msg) qqline(ax, line, theoretical_quantiles, sample_quantiles, dist) ax.set_xlabel("Theoretical Quantiles") ax.set_ylabel("Sample Quantiles") return fig def qqline(ax, line, x=None, y=None, dist=None, fmt='r-'): """ Plot a reference line for a qqplot. Parameters ---------- ax : matplotlib axes instance The axes on which to plot the line line : str {'45','r','s','q'} Options for the reference line to which the data is compared.: - '45' - 45-degree line - 's' - standardized line, the expected order statistics are scaled by the standard deviation of the given sample and have the mean added to them - 'r' - A regression line is fit - 'q' - A line is fit through the quartiles. - None - By default no reference line is added to the plot. x : array X data for plot. Not needed if line is '45'. y : array Y data for plot. Not needed if line is '45'. dist : scipy.stats.distribution A scipy.stats distribution, needed if line is 'q'. Notes ----- There is no return value. The line is plotted on the given `ax`. """ if line == '45': end_pts = zip(ax.get_xlim(), ax.get_ylim()) end_pts[0] = max(end_pts[0]) end_pts[1] = min(end_pts[1]) ax.plot(end_pts, end_pts, fmt) return # does this have any side effects? if x is None and y is None: raise ValueError("If line is not 45, x and y cannot be None.") elif line == 'r': # could use ax.lines[0].get_xdata(), get_ydata(), # but don't know axes are 'clean' y = OLS(y, add_constant(x)).fit().fittedvalues ax.plot(x,y,fmt) elif line == 's': m,b = y.std(), y.mean() ref_line = x*m + b ax.plot(x, ref_line, fmt) elif line == 'q': q25 = stats.scoreatpercentile(y, 25) q75 = stats.scoreatpercentile(y, 75) theoretical_quartiles = dist.ppf([.25,.75]) m = (q75 - q25) / np.diff(theoretical_quartiles) b = q25 - m*theoretical_quartiles[0] ax.plot(x, m*x + b, fmt) #about 10x faster than plotting_position in sandbox and mstats def plotting_pos(nobs, a): """ Generates sequence of plotting positions Parameters ---------- nobs : int Number of probability points to plot a : float Offset for the plotting position of an expected order statistic, for example. Returns ------- plotting_positions : array The plotting positions Notes ----- The plotting positions are given by (i - a)/(nobs - 2*a + 1) for i in range(0,nobs+1) See also -------- scipy.stats.mstats.plotting_positions """ return (np.arange(1.,nobs+1) - a)/(nobs- 2*a + 1)
bsd-3-clause
wzbozon/scikit-learn
examples/calibration/plot_compare_calibration.py
241
5008
""" ======================================== Comparison of Calibration of Classifiers ======================================== Well calibrated classifiers are probabilistic classifiers for which the output of the predict_proba method can be directly interpreted as a confidence level. For instance a well calibrated (binary) classifier should classify the samples such that among the samples to which it gave a predict_proba value close to 0.8, approx. 80% actually belong to the positive class. LogisticRegression returns well calibrated predictions as it directly optimizes log-loss. In contrast, the other methods return biased probilities, with different biases per method: * GaussianNaiveBayes tends to push probabilties to 0 or 1 (note the counts in the histograms). This is mainly because it makes the assumption that features are conditionally independent given the class, which is not the case in this dataset which contains 2 redundant features. * RandomForestClassifier shows the opposite behavior: the histograms show peaks at approx. 0.2 and 0.9 probability, while probabilities close to 0 or 1 are very rare. An explanation for this is given by Niculescu-Mizil and Caruana [1]: "Methods such as bagging and random forests that average predictions from a base set of models can have difficulty making predictions near 0 and 1 because variance in the underlying base models will bias predictions that should be near zero or one away from these values. Because predictions are restricted to the interval [0,1], errors caused by variance tend to be one- sided near zero and one. For example, if a model should predict p = 0 for a case, the only way bagging can achieve this is if all bagged trees predict zero. If we add noise to the trees that bagging is averaging over, this noise will cause some trees to predict values larger than 0 for this case, thus moving the average prediction of the bagged ensemble away from 0. We observe this effect most strongly with random forests because the base-level trees trained with random forests have relatively high variance due to feature subseting." As a result, the calibration curve shows a characteristic sigmoid shape, indicating that the classifier could trust its "intuition" more and return probabilties closer to 0 or 1 typically. * Support Vector Classification (SVC) shows an even more sigmoid curve as the RandomForestClassifier, which is typical for maximum-margin methods (compare Niculescu-Mizil and Caruana [1]), which focus on hard samples that are close to the decision boundary (the support vectors). .. topic:: References: .. [1] Predicting Good Probabilities with Supervised Learning, A. Niculescu-Mizil & R. Caruana, ICML 2005 """ print(__doc__) # Author: Jan Hendrik Metzen <[email protected]> # License: BSD Style. import numpy as np np.random.seed(0) import matplotlib.pyplot as plt from sklearn import datasets from sklearn.naive_bayes import GaussianNB from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier from sklearn.svm import LinearSVC from sklearn.calibration import calibration_curve X, y = datasets.make_classification(n_samples=100000, n_features=20, n_informative=2, n_redundant=2) train_samples = 100 # Samples used for training the models X_train = X[:train_samples] X_test = X[train_samples:] y_train = y[:train_samples] y_test = y[train_samples:] # Create classifiers lr = LogisticRegression() gnb = GaussianNB() svc = LinearSVC(C=1.0) rfc = RandomForestClassifier(n_estimators=100) ############################################################################### # Plot calibration plots plt.figure(figsize=(10, 10)) ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2) ax2 = plt.subplot2grid((3, 1), (2, 0)) ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated") for clf, name in [(lr, 'Logistic'), (gnb, 'Naive Bayes'), (svc, 'Support Vector Classification'), (rfc, 'Random Forest')]: clf.fit(X_train, y_train) if hasattr(clf, "predict_proba"): prob_pos = clf.predict_proba(X_test)[:, 1] else: # use decision function prob_pos = clf.decision_function(X_test) prob_pos = \ (prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min()) fraction_of_positives, mean_predicted_value = \ calibration_curve(y_test, prob_pos, n_bins=10) ax1.plot(mean_predicted_value, fraction_of_positives, "s-", label="%s" % (name, )) ax2.hist(prob_pos, range=(0, 1), bins=10, label=name, histtype="step", lw=2) ax1.set_ylabel("Fraction of positives") ax1.set_ylim([-0.05, 1.05]) ax1.legend(loc="lower right") ax1.set_title('Calibration plots (reliability curve)') ax2.set_xlabel("Mean predicted value") ax2.set_ylabel("Count") ax2.legend(loc="upper center", ncol=2) plt.tight_layout() plt.show()
bsd-3-clause
RyanChinSang/LeagueLatency
BETA/Test Code/ColArea/ColAreaEx.py
1
3643
import matplotlib.pyplot as plt import numpy as np x = np.arange(0.0, 2, 0.01) y1 = np.sin(2*np.pi*x) y2 = 1.2*np.sin(4*np.pi*x) # fig, (ax1, ax2, ax3) = plt.subplots(3, 1, sharex=True) # ax1.fill_between(x, 0, y1) # ax1.set_ylabel('between y1 and 0') # ax2.fill_between(x, y1, 1) # ax2.set_ylabel('between y1 and 1') # # ax3.fill_between(x, y1, y2) # ax3.set_ylabel('between y1 and y2') # ax3.set_xlabel('x') # now fill between y1 and y2 where a logical condition is met. Note # this is different than calling # fill_between(x[where], y1[where],y2[where] # because of edge effects over multiple contiguous regions. # fig, (ax, ax1) = plt.subplots(2, 1, sharex=True) # ax.plot(x, y1, x, y2, color='black') # ax.fill_between(x, y1, y2, where=y2 >= y1, facecolor='green', interpolate=True) # ax.fill_between(x, y1, y2, where=y2 <= y1, facecolor='red', interpolate=True) # ax.set_title('fill between where') # Test support for masked arrays. # y2 = np.ma.masked_greater(y2, 1.0) # ax1.plot(x, y1, x, y2, color='black') # ax1.fill_between(x, y1, y2, where=y2 >= y1, facecolor='green', interpolate=True) # ax1.fill_between(x, y1, y2, where=y2 <= y1, facecolor='red', interpolate=True) # ax1.set_title('Now regions with y2>1 are masked') # This example illustrates a problem; because of the data # gridding, there are undesired unfilled triangles at the crossover # points. A brute-force solution would be to interpolate all # arrays to a very fine grid before plotting. # show how to use transforms to create axes spans where a certain condition is satisfied fig, ax = plt.subplots() y = 1000*np.sin(4*np.pi*x) ax.plot(x, y, color='black') # use the data coordinates for the x-axis and the axes coordinates for the y-axis import matplotlib.transforms as mtransforms trans = mtransforms.blended_transform_factory(ax.transData, ax.transAxes) theta = 0.9 lim = float(ax.get_xbound()[0]) # print ax.get_xbound(), ax.get_ybound() # print ax.get_xlim(), ax.get_ylim() # print y.max(), x.max() # ax.axhline(theta, color='green', lw=2, alpha=0.5) # ax.axhline(-theta, color='red', lw=2, alpha=0.5) # ax.fill_between(x, 0, 1, where=y > theta, facecolor='green', alpha=0.5, transform=trans) # ax.fill_between(x, 0, 1, where=y < -theta, facecolor='red', alpha=0.5, transform=trans) # ax.fill_between(x, ax.get_ylim()[0], ax.get_ylim()[1], where=y > float('-inf'), facecolor='red', alpha=0.5, transform=trans) # ax.fill_between(ax.get_xlim(), ax.get_ylim()[0], ax.get_ylim()[1], facecolor='red', alpha=0.5, transform=trans) # ax.fill_between(ax.get_xbound(), ax.get_ybound()[0], ax.get_ybound()[1], facecolor='blue', alpha=0.5) # ax.axhspan(ax.get_ylim()[0], ax.get_ylim()[1], color='green') # ax.axvspan(ax.get_xlim()[0], ax.get_xlim()[1], color='green') # ax.axvspan(ax.get_xbound()[0], ax.get_xbound()[1], color='green') # ax.set_ylim((1.1*y.min(), 1.1*y.max())) # ax.set_xlim((1.1*x.min(), 1.1*x.max())) yax_min = 1.1*y.min() yax_max = 1.1*y.max() ax.set_ylim([yax_min, yax_max]) # xax_min = 1.1*x.min() # xax_max = 1.1*x.max() # print ax.get_xbound(), ax.get_ybound() print ax.get_xlim(), ax.get_ylim(), y.min() # print y.max(), x.max() if yax_max > 500: ax.axhspan(yax_min, 200, fc='green', ec='none', alpha=0.25) ax.axhspan(200, 500, fc='yellow', ec='none', alpha=0.25) ax.axhspan(500, yax_max, fc='red', ec='none', alpha=0.25) elif yax_max > 200: ax.axhspan(yax_min, 200, fc='green', ec='none', alpha=0.25) ax.axhspan(200, yax_max, fc='yellow', ec='none', alpha=0.25) else: ax.axhspan(yax_min, yax_max, fc='green', ec='none', alpha=0.25) # ax.axvspan(1.1*x.min(), 1.1*x.max(), color='green') plt.show()
gpl-3.0
mehdidc/scikit-learn
sklearn/hmm.py
12
48722
# Hidden Markov Models # # Author: Ron Weiss <[email protected]> # and Shiqiao Du <[email protected]> # API changes: Jaques Grobler <[email protected]> """ The :mod:`sklearn.hmm` module implements hidden Markov models. **Warning:** :mod:`sklearn.hmm` is orphaned, undocumented and has known numerical stability issues. This module will be removed in version 0.17. It has been moved to a separate repository: https://github.com/hmmlearn/hmmlearn """ import string import numpy as np from .utils import check_random_state, deprecated from .utils.extmath import logsumexp from .utils.validation import check_is_fitted from .base import BaseEstimator from .mixture import ( GMM, log_multivariate_normal_density, sample_gaussian, distribute_covar_matrix_to_match_covariance_type, _validate_covars) from . import cluster from . import _hmmc __all__ = ['GMMHMM', 'GaussianHMM', 'MultinomialHMM', 'decoder_algorithms', 'normalize'] ZEROLOGPROB = -1e200 EPS = np.finfo(float).eps NEGINF = -np.inf decoder_algorithms = ("viterbi", "map") @deprecated("WARNING: The HMM module and its functions will be removed in 0.17 " "as it no longer falls within the project's scope and API. " "It has been moved to a separate repository: " "https://github.com/hmmlearn/hmmlearn") def normalize(A, axis=None): """ Normalize the input array so that it sums to 1. WARNING: The HMM module and its functions will be removed in 0.17 as it no longer falls within the project's scope and API. Parameters ---------- A: array, shape (n_samples, n_features) Non-normalized input data axis: int dimension along which normalization is performed Returns ------- normalized_A: array, shape (n_samples, n_features) A with values normalized (summing to 1) along the prescribed axis WARNING: Modifies inplace the array """ A += EPS Asum = A.sum(axis) if axis and A.ndim > 1: # Make sure we don't divide by zero. Asum[Asum == 0] = 1 shape = list(A.shape) shape[axis] = 1 Asum.shape = shape return A / Asum @deprecated("WARNING: The HMM module and its function will be removed in 0.17" "as it no longer falls within the project's scope and API. " "It has been moved to a separate repository: " "https://github.com/hmmlearn/hmmlearn") class _BaseHMM(BaseEstimator): """Hidden Markov Model base class. Representation of a hidden Markov model probability distribution. This class allows for easy evaluation of, sampling from, and maximum-likelihood estimation of the parameters of a HMM. See the instance documentation for details specific to a particular object. .. warning:: The HMM module and its functions will be removed in 0.17 as it no longer falls within the project's scope and API. Attributes ---------- n_components : int Number of states in the model. transmat : array, shape (`n_components`, `n_components`) Matrix of transition probabilities between states. startprob : array, shape ('n_components`,) Initial state occupation distribution. transmat_prior : array, shape (`n_components`, `n_components`) Matrix of prior transition probabilities between states. startprob_prior : array, shape ('n_components`,) Initial state occupation prior distribution. algorithm : string, one of the decoder_algorithms decoder algorithm random_state: RandomState or an int seed (0 by default) A random number generator instance n_iter : int, optional Number of iterations to perform. thresh : float, optional Convergence threshold. params : string, optional Controls which parameters are updated in the training process. Can contain any combination of 's' for startprob, 't' for transmat, and other characters for subclass-specific emmission parameters. Defaults to all parameters. init_params : string, optional Controls which parameters are initialized prior to training. Can contain any combination of 's' for startprob, 't' for transmat, and other characters for subclass-specific emmission parameters. Defaults to all parameters. See Also -------- GMM : Gaussian mixture model """ # This class implements the public interface to all HMMs that # derive from it, including all of the machinery for the # forward-backward and Viterbi algorithms. Subclasses need only # implement _generate_sample_from_state(), _compute_log_likelihood(), # _init(), _initialize_sufficient_statistics(), # _accumulate_sufficient_statistics(), and _do_mstep(), all of # which depend on the specific emission distribution. # # Subclasses will probably also want to implement properties for # the emission distribution parameters to expose them publicly. def __init__(self, n_components=1, startprob=None, transmat=None, startprob_prior=None, transmat_prior=None, algorithm="viterbi", random_state=None, n_iter=10, thresh=1e-2, params=string.ascii_letters, init_params=string.ascii_letters): self.n_components = n_components self.n_iter = n_iter self.thresh = thresh self.params = params self.init_params = init_params self.startprob_ = startprob self.startprob_prior = startprob_prior self.transmat_ = transmat self.transmat_prior = transmat_prior self._algorithm = algorithm self.random_state = random_state def eval(self, X): return self.score_samples(X) def score_samples(self, obs): """Compute the log probability under the model and compute posteriors. Parameters ---------- obs : array_like, shape (n, n_features) Sequence of n_features-dimensional data points. Each row corresponds to a single point in the sequence. Returns ------- logprob : float Log likelihood of the sequence ``obs``. posteriors : array_like, shape (n, n_components) Posterior probabilities of each state for each observation See Also -------- score : Compute the log probability under the model decode : Find most likely state sequence corresponding to a `obs` """ obs = np.asarray(obs) framelogprob = self._compute_log_likelihood(obs) logprob, fwdlattice = self._do_forward_pass(framelogprob) bwdlattice = self._do_backward_pass(framelogprob) gamma = fwdlattice + bwdlattice # gamma is guaranteed to be correctly normalized by logprob at # all frames, unless we do approximate inference using pruning. # So, we will normalize each frame explicitly in case we # pruned too aggressively. posteriors = np.exp(gamma.T - logsumexp(gamma, axis=1)).T posteriors += np.finfo(np.float32).eps posteriors /= np.sum(posteriors, axis=1).reshape((-1, 1)) return logprob, posteriors def score(self, obs): """Compute the log probability under the model. Parameters ---------- obs : array_like, shape (n, n_features) Sequence of n_features-dimensional data points. Each row corresponds to a single data point. Returns ------- logprob : float Log likelihood of the ``obs``. See Also -------- score_samples : Compute the log probability under the model and posteriors decode : Find most likely state sequence corresponding to a `obs` """ obs = np.asarray(obs) framelogprob = self._compute_log_likelihood(obs) logprob, _ = self._do_forward_pass(framelogprob) return logprob def _decode_viterbi(self, obs): """Find most likely state sequence corresponding to ``obs``. Uses the Viterbi algorithm. Parameters ---------- obs : array_like, shape (n, n_features) Sequence of n_features-dimensional data points. Each row corresponds to a single point in the sequence. Returns ------- viterbi_logprob : float Log probability of the maximum likelihood path through the HMM. state_sequence : array_like, shape (n,) Index of the most likely states for each observation. See Also -------- score_samples : Compute the log probability under the model and posteriors. score : Compute the log probability under the model """ obs = np.asarray(obs) framelogprob = self._compute_log_likelihood(obs) viterbi_logprob, state_sequence = self._do_viterbi_pass(framelogprob) return viterbi_logprob, state_sequence def _decode_map(self, obs): """Find most likely state sequence corresponding to `obs`. Uses the maximum a posteriori estimation. Parameters ---------- obs : array_like, shape (n, n_features) Sequence of n_features-dimensional data points. Each row corresponds to a single point in the sequence. Returns ------- map_logprob : float Log probability of the maximum likelihood path through the HMM state_sequence : array_like, shape (n,) Index of the most likely states for each observation See Also -------- score_samples : Compute the log probability under the model and posteriors. score : Compute the log probability under the model. """ _, posteriors = self.score_samples(obs) state_sequence = np.argmax(posteriors, axis=1) map_logprob = np.max(posteriors, axis=1).sum() return map_logprob, state_sequence def decode(self, obs, algorithm="viterbi"): """Find most likely state sequence corresponding to ``obs``. Uses the selected algorithm for decoding. Parameters ---------- obs : array_like, shape (n, n_features) Sequence of n_features-dimensional data points. Each row corresponds to a single point in the sequence. algorithm : string, one of the `decoder_algorithms` decoder algorithm to be used Returns ------- logprob : float Log probability of the maximum likelihood path through the HMM state_sequence : array_like, shape (n,) Index of the most likely states for each observation See Also -------- score_samples : Compute the log probability under the model and posteriors. score : Compute the log probability under the model. """ if self._algorithm in decoder_algorithms: algorithm = self._algorithm elif algorithm in decoder_algorithms: algorithm = algorithm decoder = {"viterbi": self._decode_viterbi, "map": self._decode_map} logprob, state_sequence = decoder[algorithm](obs) return logprob, state_sequence def predict(self, obs, algorithm="viterbi"): """Find most likely state sequence corresponding to `obs`. Parameters ---------- obs : array_like, shape (n, n_features) Sequence of n_features-dimensional data points. Each row corresponds to a single point in the sequence. Returns ------- state_sequence : array_like, shape (n,) Index of the most likely states for each observation """ _, state_sequence = self.decode(obs, algorithm) return state_sequence def predict_proba(self, obs): """Compute the posterior probability for each state in the model Parameters ---------- obs : array_like, shape (n, n_features) Sequence of n_features-dimensional data points. Each row corresponds to a single point in the sequence. Returns ------- T : array-like, shape (n, n_components) Returns the probability of the sample for each state in the model. """ _, posteriors = self.score_samples(obs) return posteriors def sample(self, n=1, random_state=None): """Generate random samples from the model. Parameters ---------- n : int Number of samples to generate. random_state: RandomState or an int seed (0 by default) A random number generator instance. If None is given, the object's random_state is used Returns ------- (obs, hidden_states) obs : array_like, length `n` List of samples hidden_states : array_like, length `n` List of hidden states """ if random_state is None: random_state = self.random_state random_state = check_random_state(random_state) startprob_pdf = self.startprob_ startprob_cdf = np.cumsum(startprob_pdf) transmat_pdf = self.transmat_ transmat_cdf = np.cumsum(transmat_pdf, 1) # Initial state. rand = random_state.rand() currstate = (startprob_cdf > rand).argmax() hidden_states = [currstate] obs = [self._generate_sample_from_state( currstate, random_state=random_state)] for _ in range(n - 1): rand = random_state.rand() currstate = (transmat_cdf[currstate] > rand).argmax() hidden_states.append(currstate) obs.append(self._generate_sample_from_state( currstate, random_state=random_state)) return np.array(obs), np.array(hidden_states, dtype=int) def fit(self, obs): """Estimate model parameters. An initialization step is performed before entering the EM algorithm. If you want to avoid this step, pass proper ``init_params`` keyword argument to estimator's constructor. Parameters ---------- obs : list List of array-like observation sequences, each of which has shape (n_i, n_features), where n_i is the length of the i_th observation. Notes ----- In general, `logprob` should be non-decreasing unless aggressive pruning is used. Decreasing `logprob` is generally a sign of overfitting (e.g. a covariance parameter getting too small). You can fix this by getting more training data, or strengthening the appropriate subclass-specific regularization parameter. """ if self.algorithm not in decoder_algorithms: self._algorithm = "viterbi" self._init(obs, self.init_params) logprob = [] for i in range(self.n_iter): # Expectation step stats = self._initialize_sufficient_statistics() curr_logprob = 0 for seq in obs: framelogprob = self._compute_log_likelihood(seq) lpr, fwdlattice = self._do_forward_pass(framelogprob) bwdlattice = self._do_backward_pass(framelogprob) gamma = fwdlattice + bwdlattice posteriors = np.exp(gamma.T - logsumexp(gamma, axis=1)).T curr_logprob += lpr self._accumulate_sufficient_statistics( stats, seq, framelogprob, posteriors, fwdlattice, bwdlattice, self.params) logprob.append(curr_logprob) # Check for convergence. if i > 0 and abs(logprob[-1] - logprob[-2]) < self.thresh: break # Maximization step self._do_mstep(stats, self.params) return self def _get_algorithm(self): "decoder algorithm" return self._algorithm def _set_algorithm(self, algorithm): if algorithm not in decoder_algorithms: raise ValueError("algorithm must be one of the decoder_algorithms") self._algorithm = algorithm algorithm = property(_get_algorithm, _set_algorithm) def _get_startprob(self): """Mixing startprob for each state.""" return np.exp(self._log_startprob) def _set_startprob(self, startprob): if startprob is None: startprob = np.tile(1.0 / self.n_components, self.n_components) else: startprob = np.asarray(startprob, dtype=np.float) # check if there exists a component whose value is exactly zero # if so, add a small number and re-normalize if not np.alltrue(startprob): normalize(startprob) if len(startprob) != self.n_components: raise ValueError('startprob must have length n_components') if not np.allclose(np.sum(startprob), 1.0): raise ValueError('startprob must sum to 1.0') self._log_startprob = np.log(np.asarray(startprob).copy()) startprob_ = property(_get_startprob, _set_startprob) def _get_transmat(self): """Matrix of transition probabilities.""" return np.exp(self._log_transmat) def _set_transmat(self, transmat): if transmat is None: transmat = np.tile(1.0 / self.n_components, (self.n_components, self.n_components)) # check if there exists a component whose value is exactly zero # if so, add a small number and re-normalize if not np.alltrue(transmat): normalize(transmat, axis=1) if (np.asarray(transmat).shape != (self.n_components, self.n_components)): raise ValueError('transmat must have shape ' '(n_components, n_components)') if not np.all(np.allclose(np.sum(transmat, axis=1), 1.0)): raise ValueError('Rows of transmat must sum to 1.0') self._log_transmat = np.log(np.asarray(transmat).copy()) underflow_idx = np.isnan(self._log_transmat) self._log_transmat[underflow_idx] = NEGINF transmat_ = property(_get_transmat, _set_transmat) def _do_viterbi_pass(self, framelogprob): n_observations, n_components = framelogprob.shape state_sequence, logprob = _hmmc._viterbi( n_observations, n_components, self._log_startprob, self._log_transmat, framelogprob) return logprob, state_sequence def _do_forward_pass(self, framelogprob): n_observations, n_components = framelogprob.shape fwdlattice = np.zeros((n_observations, n_components)) _hmmc._forward(n_observations, n_components, self._log_startprob, self._log_transmat, framelogprob, fwdlattice) fwdlattice[fwdlattice <= ZEROLOGPROB] = NEGINF return logsumexp(fwdlattice[-1]), fwdlattice def _do_backward_pass(self, framelogprob): n_observations, n_components = framelogprob.shape bwdlattice = np.zeros((n_observations, n_components)) _hmmc._backward(n_observations, n_components, self._log_startprob, self._log_transmat, framelogprob, bwdlattice) bwdlattice[bwdlattice <= ZEROLOGPROB] = NEGINF return bwdlattice def _compute_log_likelihood(self, obs): pass def _generate_sample_from_state(self, state, random_state=None): pass def _init(self, obs, params): if 's' in params: self.startprob_.fill(1.0 / self.n_components) if 't' in params: self.transmat_.fill(1.0 / self.n_components) # Methods used by self.fit() def _initialize_sufficient_statistics(self): stats = {'nobs': 0, 'start': np.zeros(self.n_components), 'trans': np.zeros((self.n_components, self.n_components))} return stats def _accumulate_sufficient_statistics(self, stats, seq, framelogprob, posteriors, fwdlattice, bwdlattice, params): stats['nobs'] += 1 if 's' in params: stats['start'] += posteriors[0] if 't' in params: n_observations, n_components = framelogprob.shape # when the sample is of length 1, it contains no transitions # so there is no reason to update our trans. matrix estimate if n_observations > 1: lneta = np.zeros((n_observations - 1, n_components, n_components)) lnP = logsumexp(fwdlattice[-1]) _hmmc._compute_lneta(n_observations, n_components, fwdlattice, self._log_transmat, bwdlattice, framelogprob, lnP, lneta) stats['trans'] += np.exp(np.minimum(logsumexp(lneta, 0), 700)) def _do_mstep(self, stats, params): # Based on Huang, Acero, Hon, "Spoken Language Processing", # p. 443 - 445 if self.startprob_prior is None: self.startprob_prior = 1.0 if self.transmat_prior is None: self.transmat_prior = 1.0 if 's' in params: self.startprob_ = normalize( np.maximum(self.startprob_prior - 1.0 + stats['start'], 1e-20)) if 't' in params: transmat_ = normalize( np.maximum(self.transmat_prior - 1.0 + stats['trans'], 1e-20), axis=1) self.transmat_ = transmat_ class GaussianHMM(_BaseHMM): """Hidden Markov Model with Gaussian emissions Representation of a hidden Markov model probability distribution. This class allows for easy evaluation of, sampling from, and maximum-likelihood estimation of the parameters of a HMM. .. warning:: The HMM module and its functions will be removed in 0.17 as it no longer falls within the project's scope and API. Parameters ---------- n_components : int Number of states. ``_covariance_type`` : string String describing the type of covariance parameters to use. Must be one of 'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'. Attributes ---------- ``_covariance_type`` : string String describing the type of covariance parameters used by the model. Must be one of 'spherical', 'tied', 'diag', 'full'. n_features : int Dimensionality of the Gaussian emissions. n_components : int Number of states in the model. transmat : array, shape (`n_components`, `n_components`) Matrix of transition probabilities between states. startprob : array, shape ('n_components`,) Initial state occupation distribution. means : array, shape (`n_components`, `n_features`) Mean parameters for each state. covars : array Covariance parameters for each state. The shape depends on ``_covariance_type``:: (`n_components`,) if 'spherical', (`n_features`, `n_features`) if 'tied', (`n_components`, `n_features`) if 'diag', (`n_components`, `n_features`, `n_features`) if 'full' random_state: RandomState or an int seed (0 by default) A random number generator instance n_iter : int, optional Number of iterations to perform. thresh : float, optional Convergence threshold. params : string, optional Controls which parameters are updated in the training process. Can contain any combination of 's' for startprob, 't' for transmat, 'm' for means, and 'c' for covars. Defaults to all parameters. init_params : string, optional Controls which parameters are initialized prior to training. Can contain any combination of 's' for startprob, 't' for transmat, 'm' for means, and 'c' for covars. Defaults to all parameters. Examples -------- >>> from sklearn.hmm import GaussianHMM >>> GaussianHMM(n_components=2) ... #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE GaussianHMM(algorithm='viterbi',... See Also -------- GMM : Gaussian mixture model """ def __init__(self, n_components=1, covariance_type='diag', startprob=None, transmat=None, startprob_prior=None, transmat_prior=None, algorithm="viterbi", means_prior=None, means_weight=0, covars_prior=1e-2, covars_weight=1, random_state=None, n_iter=10, thresh=1e-2, params=string.ascii_letters, init_params=string.ascii_letters): _BaseHMM.__init__(self, n_components, startprob, transmat, startprob_prior=startprob_prior, transmat_prior=transmat_prior, algorithm=algorithm, random_state=random_state, n_iter=n_iter, thresh=thresh, params=params, init_params=init_params) self._covariance_type = covariance_type if not covariance_type in ['spherical', 'tied', 'diag', 'full']: raise ValueError('bad covariance_type') self.means_prior = means_prior self.means_weight = means_weight self.covars_prior = covars_prior self.covars_weight = covars_weight @property def covariance_type(self): """Covariance type of the model. Must be one of 'spherical', 'tied', 'diag', 'full'. """ return self._covariance_type def _get_means(self): """Mean parameters for each state.""" return self._means_ def _set_means(self, means): means = np.asarray(means) if (hasattr(self, 'n_features') and means.shape != (self.n_components, self.n_features)): raise ValueError('means must have shape ' '(n_components, n_features)') self._means_ = means.copy() self.n_features = self._means_.shape[1] means_ = property(_get_means, _set_means) def _get_covars(self): """Return covars as a full matrix.""" if self._covariance_type == 'full': return self._covars_ elif self._covariance_type == 'diag': return [np.diag(cov) for cov in self._covars_] elif self._covariance_type == 'tied': return [self._covars_] * self.n_components elif self._covariance_type == 'spherical': return [np.eye(self.n_features) * f for f in self._covars_] def _set_covars(self, covars): covars = np.asarray(covars) _validate_covars(covars, self._covariance_type, self.n_components) self._covars_ = covars.copy() covars_ = property(_get_covars, _set_covars) def _compute_log_likelihood(self, obs): check_is_fitted(self, '_means_') return log_multivariate_normal_density( obs, self._means_, self._covars_, self._covariance_type) def _generate_sample_from_state(self, state, random_state=None): if self._covariance_type == 'tied': cv = self._covars_ else: cv = self._covars_[state] return sample_gaussian(self._means_[state], cv, self._covariance_type, random_state=random_state) def _init(self, obs, params='stmc'): super(GaussianHMM, self)._init(obs, params=params) if (hasattr(self, 'n_features') and self.n_features != obs[0].shape[1]): raise ValueError('Unexpected number of dimensions, got %s but ' 'expected %s' % (obs[0].shape[1], self.n_features)) self.n_features = obs[0].shape[1] if 'm' in params: self._means_ = cluster.KMeans( n_clusters=self.n_components).fit(obs[0]).cluster_centers_ if 'c' in params: cv = np.cov(obs[0].T) if not cv.shape: cv.shape = (1, 1) self._covars_ = distribute_covar_matrix_to_match_covariance_type( cv, self._covariance_type, self.n_components) self._covars_[self._covars_ == 0] = 1e-5 def _initialize_sufficient_statistics(self): stats = super(GaussianHMM, self)._initialize_sufficient_statistics() stats['post'] = np.zeros(self.n_components) stats['obs'] = np.zeros((self.n_components, self.n_features)) stats['obs**2'] = np.zeros((self.n_components, self.n_features)) stats['obs*obs.T'] = np.zeros((self.n_components, self.n_features, self.n_features)) return stats def _accumulate_sufficient_statistics(self, stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice, params): super(GaussianHMM, self)._accumulate_sufficient_statistics( stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice, params) if 'm' in params or 'c' in params: stats['post'] += posteriors.sum(axis=0) stats['obs'] += np.dot(posteriors.T, obs) if 'c' in params: if self._covariance_type in ('spherical', 'diag'): stats['obs**2'] += np.dot(posteriors.T, obs ** 2) elif self._covariance_type in ('tied', 'full'): for t, o in enumerate(obs): obsobsT = np.outer(o, o) for c in range(self.n_components): stats['obs*obs.T'][c] += posteriors[t, c] * obsobsT def _do_mstep(self, stats, params): super(GaussianHMM, self)._do_mstep(stats, params) # Based on Huang, Acero, Hon, "Spoken Language Processing", # p. 443 - 445 denom = stats['post'][:, np.newaxis] if 'm' in params: prior = self.means_prior weight = self.means_weight if prior is None: weight = 0 prior = 0 self._means_ = (weight * prior + stats['obs']) / (weight + denom) if 'c' in params: covars_prior = self.covars_prior covars_weight = self.covars_weight if covars_prior is None: covars_weight = 0 covars_prior = 0 means_prior = self.means_prior means_weight = self.means_weight if means_prior is None: means_weight = 0 means_prior = 0 meandiff = self._means_ - means_prior if self._covariance_type in ('spherical', 'diag'): cv_num = (means_weight * (meandiff) ** 2 + stats['obs**2'] - 2 * self._means_ * stats['obs'] + self._means_ ** 2 * denom) cv_den = max(covars_weight - 1, 0) + denom self._covars_ = (covars_prior + cv_num) / np.maximum(cv_den, 1e-5) if self._covariance_type == 'spherical': self._covars_ = np.tile( self._covars_.mean(1)[:, np.newaxis], (1, self._covars_.shape[1])) elif self._covariance_type in ('tied', 'full'): cvnum = np.empty((self.n_components, self.n_features, self.n_features)) for c in range(self.n_components): obsmean = np.outer(stats['obs'][c], self._means_[c]) cvnum[c] = (means_weight * np.outer(meandiff[c], meandiff[c]) + stats['obs*obs.T'][c] - obsmean - obsmean.T + np.outer(self._means_[c], self._means_[c]) * stats['post'][c]) cvweight = max(covars_weight - self.n_features, 0) if self._covariance_type == 'tied': self._covars_ = ((covars_prior + cvnum.sum(axis=0)) / (cvweight + stats['post'].sum())) elif self._covariance_type == 'full': self._covars_ = ((covars_prior + cvnum) / (cvweight + stats['post'][:, None, None])) def fit(self, obs): """Estimate model parameters. An initialization step is performed before entering the EM algorithm. If you want to avoid this step, pass proper ``init_params`` keyword argument to estimator's constructor. Parameters ---------- obs : list List of array-like observation sequences, each of which has shape (n_i, n_features), where n_i is the length of the i_th observation. Notes ----- In general, `logprob` should be non-decreasing unless aggressive pruning is used. Decreasing `logprob` is generally a sign of overfitting (e.g. the covariance parameter on one or more components becomminging too small). You can fix this by getting more training data, or increasing covars_prior. """ return super(GaussianHMM, self).fit(obs) class MultinomialHMM(_BaseHMM): """Hidden Markov Model with multinomial (discrete) emissions .. warning:: The HMM module and its functions will be removed in 0.17 as it no longer falls within the project's scope and API. Attributes ---------- n_components : int Number of states in the model. n_symbols : int Number of possible symbols emitted by the model (in the observations). transmat : array, shape (`n_components`, `n_components`) Matrix of transition probabilities between states. startprob : array, shape ('n_components`,) Initial state occupation distribution. emissionprob : array, shape ('n_components`, 'n_symbols`) Probability of emitting a given symbol when in each state. random_state: RandomState or an int seed (0 by default) A random number generator instance n_iter : int, optional Number of iterations to perform. thresh : float, optional Convergence threshold. params : string, optional Controls which parameters are updated in the training process. Can contain any combination of 's' for startprob, 't' for transmat, 'e' for emmissionprob. Defaults to all parameters. init_params : string, optional Controls which parameters are initialized prior to training. Can contain any combination of 's' for startprob, 't' for transmat, 'e' for emmissionprob. Defaults to all parameters. Examples -------- >>> from sklearn.hmm import MultinomialHMM >>> MultinomialHMM(n_components=2) ... #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE MultinomialHMM(algorithm='viterbi',... See Also -------- GaussianHMM : HMM with Gaussian emissions """ def __init__(self, n_components=1, startprob=None, transmat=None, startprob_prior=None, transmat_prior=None, algorithm="viterbi", random_state=None, n_iter=10, thresh=1e-2, params=string.ascii_letters, init_params=string.ascii_letters): """Create a hidden Markov model with multinomial emissions. Parameters ---------- n_components : int Number of states. """ _BaseHMM.__init__(self, n_components, startprob, transmat, startprob_prior=startprob_prior, transmat_prior=transmat_prior, algorithm=algorithm, random_state=random_state, n_iter=n_iter, thresh=thresh, params=params, init_params=init_params) def _get_emissionprob(self): """Emission probability distribution for each state.""" return np.exp(self._log_emissionprob) def _set_emissionprob(self, emissionprob): emissionprob = np.asarray(emissionprob) if hasattr(self, 'n_symbols') and \ emissionprob.shape != (self.n_components, self.n_symbols): raise ValueError('emissionprob must have shape ' '(n_components, n_symbols)') # check if there exists a component whose value is exactly zero # if so, add a small number and re-normalize if not np.alltrue(emissionprob): normalize(emissionprob) self._log_emissionprob = np.log(emissionprob) underflow_idx = np.isnan(self._log_emissionprob) self._log_emissionprob[underflow_idx] = NEGINF self.n_symbols = self._log_emissionprob.shape[1] emissionprob_ = property(_get_emissionprob, _set_emissionprob) def _compute_log_likelihood(self, obs): check_is_fitted(self, 'emissionprob_') return self._log_emissionprob[:, obs].T def _generate_sample_from_state(self, state, random_state=None): cdf = np.cumsum(self.emissionprob_[state, :]) random_state = check_random_state(random_state) rand = random_state.rand() symbol = (cdf > rand).argmax() return symbol def _init(self, obs, params='ste'): super(MultinomialHMM, self)._init(obs, params=params) self.random_state = check_random_state(self.random_state) if 'e' in params: if not hasattr(self, 'n_symbols'): symbols = set() for o in obs: symbols = symbols.union(set(o)) self.n_symbols = len(symbols) emissionprob = normalize(self.random_state.rand(self.n_components, self.n_symbols), 1) self.emissionprob_ = emissionprob def _initialize_sufficient_statistics(self): stats = super(MultinomialHMM, self)._initialize_sufficient_statistics() stats['obs'] = np.zeros((self.n_components, self.n_symbols)) return stats def _accumulate_sufficient_statistics(self, stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice, params): super(MultinomialHMM, self)._accumulate_sufficient_statistics( stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice, params) if 'e' in params: for t, symbol in enumerate(obs): stats['obs'][:, symbol] += posteriors[t] def _do_mstep(self, stats, params): super(MultinomialHMM, self)._do_mstep(stats, params) if 'e' in params: self.emissionprob_ = (stats['obs'] / stats['obs'].sum(1)[:, np.newaxis]) def _check_input_symbols(self, obs): """check if input can be used for Multinomial.fit input must be both positive integer array and every element must be continuous. e.g. x = [0, 0, 2, 1, 3, 1, 1] is OK and y = [0, 0, 3, 5, 10] not """ symbols = np.asarray(obs).flatten() if symbols.dtype.kind != 'i': # input symbols must be integer return False if len(symbols) == 1: # input too short return False if np.any(symbols < 0): # input contains negative intiger return False symbols.sort() if np.any(np.diff(symbols) > 1): # input is discontinous return False return True def fit(self, obs, **kwargs): """Estimate model parameters. An initialization step is performed before entering the EM algorithm. If you want to avoid this step, pass proper ``init_params`` keyword argument to estimator's constructor. Parameters ---------- obs : list List of array-like observation sequences, each of which has shape (n_i, n_features), where n_i is the length of the i_th observation. """ err_msg = ("Input must be both positive integer array and " "every element must be continuous, but %s was given.") if not self._check_input_symbols(obs): raise ValueError(err_msg % obs) return _BaseHMM.fit(self, obs, **kwargs) class GMMHMM(_BaseHMM): """Hidden Markov Model with Gaussin mixture emissions .. warning:: The HMM module and its functions will be removed in 0.17 as it no longer falls within the project's scope and API. Attributes ---------- n_components : int Number of states in the model. transmat : array, shape (`n_components`, `n_components`) Matrix of transition probabilities between states. startprob : array, shape ('n_components`,) Initial state occupation distribution. gmms : array of GMM objects, length `n_components` GMM emission distributions for each state. random_state : RandomState or an int seed (0 by default) A random number generator instance n_iter : int, optional Number of iterations to perform. thresh : float, optional Convergence threshold. init_params : string, optional Controls which parameters are initialized prior to training. Can contain any combination of 's' for startprob, 't' for transmat, 'm' for means, 'c' for covars, and 'w' for GMM mixing weights. Defaults to all parameters. params : string, optional Controls which parameters are updated in the training process. Can contain any combination of 's' for startprob, 't' for transmat, 'm' for means, and 'c' for covars, and 'w' for GMM mixing weights. Defaults to all parameters. Examples -------- >>> from sklearn.hmm import GMMHMM >>> GMMHMM(n_components=2, n_mix=10, covariance_type='diag') ... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE GMMHMM(algorithm='viterbi', covariance_type='diag',... See Also -------- GaussianHMM : HMM with Gaussian emissions """ def __init__(self, n_components=1, n_mix=1, startprob=None, transmat=None, startprob_prior=None, transmat_prior=None, algorithm="viterbi", gmms=None, covariance_type='diag', covars_prior=1e-2, random_state=None, n_iter=10, thresh=1e-2, params=string.ascii_letters, init_params=string.ascii_letters): """Create a hidden Markov model with GMM emissions. Parameters ---------- n_components : int Number of states. """ _BaseHMM.__init__(self, n_components, startprob, transmat, startprob_prior=startprob_prior, transmat_prior=transmat_prior, algorithm=algorithm, random_state=random_state, n_iter=n_iter, thresh=thresh, params=params, init_params=init_params) # XXX: Hotfit for n_mix that is incompatible with the scikit's # BaseEstimator API self.n_mix = n_mix self._covariance_type = covariance_type self.covars_prior = covars_prior self.gmms = gmms if gmms is None: gmms = [] for x in range(self.n_components): if covariance_type is None: g = GMM(n_mix) else: g = GMM(n_mix, covariance_type=covariance_type) gmms.append(g) self.gmms_ = gmms # Read-only properties. @property def covariance_type(self): """Covariance type of the model. Must be one of 'spherical', 'tied', 'diag', 'full'. """ return self._covariance_type def _compute_log_likelihood(self, obs): return np.array([g.score(obs) for g in self.gmms_]).T def _generate_sample_from_state(self, state, random_state=None): return self.gmms_[state].sample(1, random_state=random_state).flatten() def _init(self, obs, params='stwmc'): super(GMMHMM, self)._init(obs, params=params) allobs = np.concatenate(obs, 0) for g in self.gmms_: g.set_params(init_params=params, n_iter=0) g.fit(allobs) def _initialize_sufficient_statistics(self): stats = super(GMMHMM, self)._initialize_sufficient_statistics() stats['norm'] = [np.zeros(g.weights_.shape) for g in self.gmms_] stats['means'] = [np.zeros(np.shape(g.means_)) for g in self.gmms_] stats['covars'] = [np.zeros(np.shape(g.covars_)) for g in self.gmms_] return stats def _accumulate_sufficient_statistics(self, stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice, params): super(GMMHMM, self)._accumulate_sufficient_statistics( stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice, params) for state, g in enumerate(self.gmms_): _, lgmm_posteriors = g.score_samples(obs) lgmm_posteriors += np.log(posteriors[:, state][:, np.newaxis] + np.finfo(np.float).eps) gmm_posteriors = np.exp(lgmm_posteriors) tmp_gmm = GMM(g.n_components, covariance_type=g.covariance_type) n_features = g.means_.shape[1] tmp_gmm._set_covars( distribute_covar_matrix_to_match_covariance_type( np.eye(n_features), g.covariance_type, g.n_components)) norm = tmp_gmm._do_mstep(obs, gmm_posteriors, params) if np.any(np.isnan(tmp_gmm.covars_)): raise ValueError stats['norm'][state] += norm if 'm' in params: stats['means'][state] += tmp_gmm.means_ * norm[:, np.newaxis] if 'c' in params: if tmp_gmm.covariance_type == 'tied': stats['covars'][state] += tmp_gmm.covars_ * norm.sum() else: cvnorm = np.copy(norm) shape = np.ones(tmp_gmm.covars_.ndim) shape[0] = np.shape(tmp_gmm.covars_)[0] cvnorm.shape = shape stats['covars'][state] += tmp_gmm.covars_ * cvnorm def _do_mstep(self, stats, params): super(GMMHMM, self)._do_mstep(stats, params) # All that is left to do is to apply covars_prior to the # parameters updated in _accumulate_sufficient_statistics. for state, g in enumerate(self.gmms_): n_features = g.means_.shape[1] norm = stats['norm'][state] if 'w' in params: g.weights_ = normalize(norm) if 'm' in params: g.means_ = stats['means'][state] / norm[:, np.newaxis] if 'c' in params: if g.covariance_type == 'tied': g.covars_ = ((stats['covars'][state] + self.covars_prior * np.eye(n_features)) / norm.sum()) else: cvnorm = np.copy(norm) shape = np.ones(g.covars_.ndim) shape[0] = np.shape(g.covars_)[0] cvnorm.shape = shape if (g.covariance_type in ['spherical', 'diag']): g.covars_ = (stats['covars'][state] + self.covars_prior) / cvnorm elif g.covariance_type == 'full': eye = np.eye(n_features) g.covars_ = ((stats['covars'][state] + self.covars_prior * eye[np.newaxis]) / cvnorm)
bsd-3-clause
Jimmy-Morzaria/scikit-learn
sklearn/feature_selection/variance_threshold.py
26
2532
# Author: Lars Buitinck <[email protected]> # License: 3-clause BSD import numpy as np from ..base import BaseEstimator from .base import SelectorMixin from ..utils import check_array from ..utils.sparsefuncs import mean_variance_axis from ..utils.validation import check_is_fitted class VarianceThreshold(BaseEstimator, SelectorMixin): """Feature selector that removes all low-variance features. This feature selection algorithm looks only at the features (X), not the desired outputs (y), and can thus be used for unsupervised learning. Parameters ---------- threshold : float, optional Features with a training-set variance lower than this threshold will be removed. The default is to keep all features with non-zero variance, i.e. remove the features that have the same value in all samples. Attributes ---------- variances_ : array, shape (n_features,) Variances of individual features. Examples -------- The following dataset has integer features, two of which are the same in every sample. These are removed with the default setting for threshold:: >>> X = [[0, 2, 0, 3], [0, 1, 4, 3], [0, 1, 1, 3]] >>> selector = VarianceThreshold() >>> selector.fit_transform(X) array([[2, 0], [1, 4], [1, 1]]) """ def __init__(self, threshold=0.): self.threshold = threshold def fit(self, X, y=None): """Learn empirical variances from X. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Sample vectors from which to compute variances. y : any Ignored. This parameter exists only for compatibility with sklearn.pipeline.Pipeline. Returns ------- self """ X = check_array(X, ('csr', 'csc'), dtype=np.float64) if hasattr(X, "toarray"): # sparse matrix _, self.variances_ = mean_variance_axis(X, axis=0) else: self.variances_ = np.var(X, axis=0) if np.all(self.variances_ <= self.threshold): msg = "No feature in X meets the variance threshold {0:.5f}" if X.shape[0] == 1: msg += " (X contains only one sample)" raise ValueError(msg.format(self.threshold)) return self def _get_support_mask(self): check_is_fitted(self, 'variances_') return self.variances_ > self.threshold
bsd-3-clause
meduz/scikit-learn
sklearn/datasets/tests/test_20news.py
75
3266
"""Test the 20news downloader, if the data is available.""" import numpy as np import scipy.sparse as sp from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_true from sklearn.utils.testing import SkipTest from sklearn import datasets def test_20news(): try: data = datasets.fetch_20newsgroups( subset='all', download_if_missing=False, shuffle=False) except IOError: raise SkipTest("Download 20 newsgroups to run this test") # Extract a reduced dataset data2cats = datasets.fetch_20newsgroups( subset='all', categories=data.target_names[-1:-3:-1], shuffle=False) # Check that the ordering of the target_names is the same # as the ordering in the full dataset assert_equal(data2cats.target_names, data.target_names[-2:]) # Assert that we have only 0 and 1 as labels assert_equal(np.unique(data2cats.target).tolist(), [0, 1]) # Check that the number of filenames is consistent with data/target assert_equal(len(data2cats.filenames), len(data2cats.target)) assert_equal(len(data2cats.filenames), len(data2cats.data)) # Check that the first entry of the reduced dataset corresponds to # the first entry of the corresponding category in the full dataset entry1 = data2cats.data[0] category = data2cats.target_names[data2cats.target[0]] label = data.target_names.index(category) entry2 = data.data[np.where(data.target == label)[0][0]] assert_equal(entry1, entry2) def test_20news_length_consistency(): """Checks the length consistencies within the bunch This is a non-regression test for a bug present in 0.16.1. """ try: data = datasets.fetch_20newsgroups( subset='all', download_if_missing=False, shuffle=False) except IOError: raise SkipTest("Download 20 newsgroups to run this test") # Extract the full dataset data = datasets.fetch_20newsgroups(subset='all') assert_equal(len(data['data']), len(data.data)) assert_equal(len(data['target']), len(data.target)) assert_equal(len(data['filenames']), len(data.filenames)) def test_20news_vectorized(): try: datasets.fetch_20newsgroups(subset='all', download_if_missing=False) except IOError: raise SkipTest("Download 20 newsgroups to run this test") # test subset = train bunch = datasets.fetch_20newsgroups_vectorized(subset="train") assert_true(sp.isspmatrix_csr(bunch.data)) assert_equal(bunch.data.shape, (11314, 130107)) assert_equal(bunch.target.shape[0], 11314) assert_equal(bunch.data.dtype, np.float64) # test subset = test bunch = datasets.fetch_20newsgroups_vectorized(subset="test") assert_true(sp.isspmatrix_csr(bunch.data)) assert_equal(bunch.data.shape, (7532, 130107)) assert_equal(bunch.target.shape[0], 7532) assert_equal(bunch.data.dtype, np.float64) # test subset = all bunch = datasets.fetch_20newsgroups_vectorized(subset='all') assert_true(sp.isspmatrix_csr(bunch.data)) assert_equal(bunch.data.shape, (11314 + 7532, 130107)) assert_equal(bunch.target.shape[0], 11314 + 7532) assert_equal(bunch.data.dtype, np.float64)
bsd-3-clause
npricejones/spectralspace
spectralspace/sample/star_sample.py
1
31388
import numpy as np import os,inspect from tqdm import tqdm import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import spectralspace.sample.access_spectrum as acs import apogee.samples.rc as rcmodel import apogee.tools.read as apread from apogee.tools.path import change_dr from spectralspace.sample.read_clusterdata import read_caldata from importlib import reload import isodist font = {'family': 'serif', 'weight': 'normal', 'size' : 20 } matplotlib.rc('font',**font) plt.ion() aspcappix = 7214 def rgsample(): """ Selects red giants from APOGEE sample """ data= apread.allStar(main=True,exclude_star_bad=True,exclude_star_warn=True) jk= data['J0']-data['K0'] z= isodist.FEH2Z(data['METALS'],zsolar=0.017) z[z > 0.024]= 0.024 logg= data['LOGG'] indx= ((jk >= 0.8) +(logg > rcmodel.loggteffcut(data['TEFF'],z,upper=True))) rgindx=indx*(data['METALS'] > -.8) return data[rgindx] def get_synthetic(model,datadict=None,data=[],spectra=[],spectra_errs=[],bitmask=[]): """ Retrieves information about a synthetic sample model: star_sample object to fill with synthetic information datadict: condensed argument containing entries for each of following kwargs. if the following are passed separately they are overridden by the dictionary entries data: numpy structured array with columns for TEFF, LOGG and FE_H, one entry per star spectra: array of ASPCAP shaped spectra (7214 pixels per spectrum) spectra_errs: array of ASPCAP shaped uncertaintes on spectra (7214 pixels per spectrum) bitmask: base 2 bitmask indicating flagged pixels. defaults to no flags Updates properties of model, returns nothing. """ if isinstance(datadict,dict): data = datadict['data'] spectra = datadict['spectra'] spectra_errs = datadict['spectra_errs'] bitmask = datadict['bitmask'] model.data = data # Create fit variable arrays model.teff = np.ma.masked_array(data['TEFF']) model.logg = np.ma.masked_array(data['LOGG']) model.fe_h = np.ma.masked_array(data['FE_H']) model.c_h = np.ma.masked_array(data['C_H']) model.n_h = np.ma.masked_array(data['N_H']) model.o_h = np.ma.masked_array(data['O_H']) model.fib = np.ma.masked_array(data['MEANFIB']) # Create spectra arrays model.spectra = np.ma.masked_array(spectra) model.spectra_errs = np.ma.masked_array(np.zeros((len(data), aspcappix))) if isinstance(spectra_errs,(int,float)): model.spectra_errs+=spectra_errs elif isinstance(spectra_errs,(np.ndarray)): model.spectra_errs += spectra_errs model._bitmasks = np.zeros((len(data),aspcappix),dtype=np.int64) if isinstance(bitmask,(np.ndarray)): model._bitmasks = bitmask # Functions to access particular sample types readfn = {'apogee':{'clusters' : read_caldata, # Sample of clusters 'OCs': read_caldata, # Sample of open clusters 'GCs': read_caldata, # Sample of globular clusters 'red_clump' : apread.rcsample,# Sample of red clump star 'red_giant' : rgsample, # Sample of red giant star 'syn': get_synthetic } } # List of accepted keys to do slice in keyList = ['RA','DEC','GLON','GLAT','TEFF','LOGG','TEFF_ERR','LOGG_ERR', 'AL_H','CA_H','C_H','FE_H','K_H','MG_H','MN_H','NA_H','NI_H', 'N_H','O_H','SI_H','S_H','TI_H','V_H','CLUSTER','MEANFIB','SIGFIB'] keyList.sort() # List of accepted keys for upper and lower limits _upperKeys = ['max','m','Max','Maximum','maximum',''] _lowerKeys = ['min','m','Min','Minimum','minimum',''] class starSample(object): """ Gets properties of a sample of stars given a key that defines the read function. """ def __init__(self,dataSource,sampleType,ask=True,datadict=None): """ Get properties for all stars that match the sample type sampleType: designator of the sample type - must be a key in readfn and independentVariables in data.py """ if sampleType == 'syn': self._sampleType = sampleType self._dataSource = dataSource self.DR = '0' if not isinstance(datadict,dict): print('Initialized empty star sample object, call get_synthetic(), passing the name of this object as the first argument') elif isinstance(datadict,dict): get_synthetic(self,datadict) elif sampleType != 'syn': self._dataSource = dataSource if self._dataSource == 'apogee': if ask: self.DR = input('Which data release? (Enter for 12): ') if self.DR=='': self.DR='12' if not ask: self.DR = '12' if self.DR=='12': os.environ['RESULTS_VERS']='v603' change_dr('12') if self.DR=='13': os.environ['RESULTS_VERS']='l30e.2' change_dr('13') os.system('echo RESULTS_VERS $RESULTS_VERS') change_dr(self.DR) self._sampleType = sampleType self._getProperties() def _getProperties(self): """ Get properties of all possible stars to be used. """ if self.DR: self.data = readfn[self._dataSource][self._sampleType]() if self.DR=='12': fib = np.load(os.path.join(os.path.dirname(os.path.realpath(__file__)),'..','data','DR12_supplement','fiberinfo.npy')) if self._sampleType=='clusters': notmissing = (np.array([i for i in range(len(self.data['APOGEE_ID'])) if self.data['APOGEE_ID'][i] in fib['APOGEE_ID']]),) else: notmissing = (np.arange(0,len(self.data)),) import numpy.lib.recfunctions as rfunc self.data = rfunc.append_fields(self.data,('MEANFIB','SIGFIB'),data=(np.zeros(len(self.data)),np.zeros(len(self.data))),dtypes=('f4','f4'),usemask=False) meanfib = dict(zip(fib['APOGEE_ID'],fib['MEANFIB'])) sigfib = dict(zip(fib['APOGEE_ID'],fib['SIGFIB'])) self.data['MEANFIB'][notmissing] = np.array([meanfib[apoid] for apoid in self.data['APOGEE_ID'][notmissing]]) self.data['SIGFIB'][notmissing] = np.array([sigfib[apoid] for apoid in self.data['APOGEE_ID'][notmissing]]) print('properties ',dir(self)) def initArrays(self,stardata): """ Initialize arrays. """ # Create fit variable arrays self.teff = np.ma.masked_array(np.zeros((len(stardata)), dtype=float)) self.logg = np.ma.masked_array(np.zeros((len(stardata)), dtype=float)) self.fe_h = np.ma.masked_array(np.zeros((len(stardata)), dtype=float)) self.c_h = np.ma.masked_array(np.zeros((len(stardata)), dtype=float)) self.n_h = np.ma.masked_array(np.zeros((len(stardata)), dtype=float)) self.o_h = np.ma.masked_array(np.zeros((len(stardata)), dtype=float)) self.fib = np.ma.masked_array(np.zeros((len(stardata)), dtype=float)) # Create spectra arrays self.spectra = np.ma.masked_array(np.zeros((len(stardata),aspcappix), dtype=float)) self.spectra_errs = np.ma.masked_array(np.zeros((len(stardata), aspcappix), dtype=float)) self._bitmasks = np.zeros((len(stardata),aspcappix),dtype=np.int64) def makeArrays(self,stardata): """ Create arrays across all stars in the sample with shape number of stars by aspcappix. stardata: array whose columns contain information about stars in sample """ self.initArrays(stardata) missing = 0 # Fill arrays for each star print(stardata.dtype) for star in tqdm(range(len(stardata)),desc='read star data'): LOC = stardata[star]['LOCATION_ID'] APO = stardata[star]['APOGEE_ID'] TEFF = stardata[star]['TEFF'] LOGG = stardata[star]['LOGG'] FE_H = stardata[star]['FE_H'] if self.DR=='12': C_H = stardata[star]['C_H'] N_H = stardata[star]['N_H'] O_H = stardata[star]['O_H'] elif self.DR=='13': C_H = stardata[star]['C_FE'] N_H = stardata[star]['N_FE'] O_H = stardata[star]['O_FE'] FIB = stardata[star]['MEANFIB'] # Fit variables self.teff[star] = np.ma.masked_array(TEFF) self.logg[star] = np.ma.masked_array(LOGG) self.fe_h[star] = np.ma.masked_array(FE_H) self.c_h[star] = np.ma.masked_array(C_H) self.n_h[star] = np.ma.masked_array(N_H) self.o_h[star] = np.ma.masked_array(O_H) self.fib[star] = np.ma.masked_array(FIB) # Spectral data try: self.spectra[star] = apread.aspcapStar(LOC,APO,ext=1, header=False,dr=self.DR, aspcapWavegrid=True) self.spectra_errs[star] = apread.aspcapStar(LOC,APO,ext=2, header=False, dr=self.DR, aspcapWavegrid=True) self._bitmasks[star] = apread.apStar(LOC,APO,ext=3, header=False,dr=self.DR, aspcapWavegrid=True)[1] except IOError: print('Star {0} missing '.format(star)) self.spectra[star] = np.zeros(aspcappix) self.spectra_errs[star] = np.ones(aspcappix) self._bitmasks[star] = np.ones(aspcappix).astype(np.int16) missing +=1 if LOGG<-1000 or TEFF<-1000 or FE_H<-1000 or self.data[star]['SIGFIB'] < 0 or self.data[star]['MEANFIB'] < 0: self._bitmasks[star] = np.ones(aspcappix).astype(np.int16) print('Total {0} of {1} stars missing'.format(missing,len(stardata))) def show_sample_coverage(self,coords=True,phi_ind='RC_GALPHI',r_ind='RC_GALR',z_ind='RC_GALZ'): """ Plots the sample in Galacto-centric cylindrical coordinates. """ # Start figure plt.figure(figsize=(10,5.5)) # Set up Cartesian axes car = plt.subplot(121) # Set up polar axes pol = plt.subplot(122,projection='polar') if coords: # Find location data phi = self.data[phi_ind] r = self.data[r_ind] z = self.data[z_ind] # Plot data car.plot(r,z,'ko',markersize=2,alpha=0.2) pol.plot(phi,r,'ko',markersize=2,alpha=0.2) # Reorient polar plot to match convention pol.set_theta_direction(-1) # Constrain plot limits and set labels car.set_xlim(min(r),max(r)) car.set_ylim(min(z),max(z)) car.set_xlabel('R (kpc)') car.set_ylabel('z (kpc)') pol.set_rlabel_position(135) pol.set_rlim(min(r),max(r)) pol.set_xticks([]) plt.subplots_adjust(wspace=0.05) def plotHistogram(self,array,title = '',xlabel = '',norm=True, ylabel = 'number of stars',saveName=None,**kwargs): """ Plots a histogram of some input array, with the option to save it. array: array to plot as histogram title: (optional) title of the plot xlabel: (optional) x-axis label of the plot ylabel: y-axis label of the plot (default: 'number of stars') saveName: (optional) path to save plot, without file extension **kwargs: kwargs for numpy.histogram """ plt.figure(figsize=(10,8)) hist,binEdges = np.histogram(array,**kwargs) if norm: area = np.sum(hist*(binEdges[1]-binEdges[0])) barlist = plt.bar(binEdges[:-1],hist/area,width = binEdges[1]-binEdges[0]) elif not norm: barlist = plt.bar(binEdges[:-1],hist,width = binEdges[1]-binEdges[0]) colours = plt.get_cmap('plasma')(np.linspace(0, 0.85, len(barlist))) for bar in range(len(barlist)): barlist[bar].set_color(colours[bar]) plt.title(title) plt.xlabel(xlabel) plt.ylabel(ylabel) if saveName: plt.savefig('plots/'+saveName+'.png') plt.close() class makeFilter(starSample): """ Contains functions to create a filter and associated directory name for a starSample. """ def __init__(self,dataSource,sampleType,ask=True,datadict=None,datadir='.',func=None,file=None): """ Sets up filter_function.py file to contain the appropriate function and puts the save directory name in the docstring of the function. sampleType: designator of the sample type - must be a key in readfn and independentVariables in data.py ask: if True, function asks for user input to make filter_function.py, if False, uses existing filter_function.py """ starSample.__init__(self,dataSource,sampleType,ask=ask,datadict=datadict) self.datadir=datadir if ask: self.done = False print('Type done at any prompt when finished') # Start name and condition string self.name = self.datadir+'/'+self._sampleType+'_'+str(self.DR) self.condition = '' # Ask for new key conditions until the user signals done while not self.done: self.done = self._sampleInfo() # Check that the user set conditions. # If conditions not set, recursively call init if self.condition == '': print('No conditions set') self.__init__(dataSource,sampleType,ask=True) # When conditions set, trim trailing ampersand self.condition = self.condition[:-2] # Write the new filter function to file f = open('filter_function.py','w') f.write(self._basicStructure()+self.condition) f.close() elif not ask: if callable(func): starFilter=func self.name = starFilter.__doc__.split('\n')[-2] self.name = self.name.split('\t')[-1] self.name = self.name.strip() f = open('filter_function.py','w') functext = ''.join(inspect.getsourcelines(starFilter)[0]) f.write('import numpy as np\n\n'+functext) f.close() elif not callable(func): # Import existing filter function. If function doesn't exist, # recursively call init if isinstance(file,str): f = open(file,'r') filter_text = f.readlines() f.close() f = open('filter_function.py','w') f.write(filter_text) f.close() else: try: import filter_function reload(filter_function) from filter_function import starFilter self.name = starFilter.__doc__.split('\n')[-2] self.name = self.name.split('\t')[-1] except ImportError: print('filter_function.py does not contain the required starFilter function.') self.__init__(dataSource,sampleType,ask=True) self.getDirectory() self.filterCopy() def _basicStructure(self): """ Returns the basic form of filter_function.py """ return 'import numpy as np\n\ndef starFilter(data):\n\t"""\n\t{0}\n\t"""\n\treturn'.format(self.name) def _sampleInfo(self): """ Retrieves information about the sample from the user. """ key = input('Data key: ') # Check if key is accepted if key in keyList: self.name+='_'+key # Get info for this key match = self._match(key) if match[0]=='done': return True elif match[0]=='a': self.name+='_fullsample' self.condition = 'np.where(data)' return True elif match[0]=='m': # Add string form of the matching condition and # update the name self.name+='_match'+match[1] andor = input('And/or? ') if andor == 'and' or andor=='a' or andor=='&': self.condition += ' (data[\'{0}\'] == "{1}") &'.format(key,match[1]) return False elif andor == 'or' or andor=='o' or andor=='|': self.condition += ' (data[\'{0}\'] == "{1}") |'.format(key,match[1]) return False elif andor == 'done': self.condition += ' (data[\'{0}\'] == "{1}") &'.format(key,match[1]) return True else: print('Invalid choice of "and" or "or", using "or" by default') self.condition += ' (data[\'{0}\'] == "{1}") |'.format(key,match[1]) return False elif match[0]=='s': # Add string form of the slicing condition and # update the name self.name+='_up'+str(match[1])+'_lo'+str(match[2]) andor = input('And/or? ') if andor == 'and' or andor=='a' or andor=='&': self.condition += ' (data[\'{0}\'] < {1}) & (data[\'{0}\'] > {2}) &'.format(key,match[1],match[2]) return False elif andor == 'or' or andor=='o' or andor=='|': self.condition += ' ((data[\'{0}\'] < {1}) & (data[\'{0}\'] > {2})) |'.format(key,match[1],match[2]) return False elif andor =='done': self.condition += ' ((data[\'{0}\'] < {1}) & (data[\'{0}\'] > {2})) &'.format(key,match[1],match[2]) return True else: print('Invalid choice of "and" or "or", using "or" by default') self.condition += ' ((data[\'{0}\'] < {1}) & (data[\'{0}\'] > {2})) |'.format(key,match[1],match[2]) return False # If key not accepted, make recursive call elif key not in keyList and key != 'done': print('Got a bad key. Try choosing one of ',keyList) result = self._sampleInfo() return result # If done condition, exit elif key == 'done': print('Done getting filter information') return True def _match(self,key): """ Returns user-generated conditions to match or slice in a given key. key: label of property of the data set """ # Check whether we will match to key or slice in its range match = input('Default is full range. Match or slice? ').strip() if match == 'match' or match == 'm' or match == 'Match': m = input('Match value: ') if m=='done': print('Done getting filter information') return 'done',None # Check if match value has at least one star, # if not call _match recursively elif m!='done' and m in self.data[key]: return 'm',m elif m not in self.data[key]: print('No match for this key. Try choosing one of ',np.unique(self.data[key])) self._match(key) elif match == 'slice' or match == 's' or match == 'Slice': # Get limits of slice upperLimit = input('Upper limit (Enter for maximum): ') lowerLimit = input('Lower limit (Enter for minimum): ') if upperLimit == 'done' or lowerLimit == 'done': print('Done getting filter information') return 'done',None elif upperLimit != 'done' and lowerLimit != 'done': if upperLimit == 'max' or upperLimit == 'm' or upperLimit == '': upperLimit = np.max(self.data[key]) if lowerLimit == 'min' or lowerLimit == 'm' or lowerLimit == '': lowerLimit = np.min(self.data[key]) # Check limits are good - if not, call _match recursively try: if float(upperLimit) <= float(lowerLimit): print('Limits are the same or are in the wrong order. Try again.') self._match(key) elif float(upperLimit) > float(lowerLimit): print('Found good limits') return 's',float(upperLimit),float(lowerLimit) except ValueError as e: print('Please enter floats for the limits') self._match(key) # Option to use the entire sample elif match == 'all' or match == 'a' or match == 'All': return 'a',None # Exit filter finding elif match == 'done': print('Done getting filter information') return 'done',None # Invalid entry condition else: print('Invalid choice, please type match, slice or all') result = self._match(key) return result def getDirectory(self): """ Create directory to store results for given filter. """ if not os.path.isdir(self.name): os.system('mkdir -p {0}/'.format(self.name)) return def directoryClean(self): """ Removes all files from a specified directory. """ os.system('rm -rf {0}/*.npy'.format(self.name)) def filterCopy(self): """ Copies filter function to data directory. """ os.system('cp filter_function.py {0}/'.format(self.name)) class subStarSample(makeFilter): """ Given a filter function, defines a subsample of the total sample of stars. """ def __init__(self,dataSource,sampleType,ask=True,datadict=None,datadir='.',func=None): """ Create a subsample according to a starFilter function sampleType: designator of the sample type - must be a key in readfn and independentVariables in data.py ask: if True, function asks for user input to make filter_function.py, if False, uses existing filter_function.py """ # Create starFilter makeFilter.__init__(self,dataSource,sampleType,ask=ask,datadict=datadict,datadir=datadir,func=func) import filter_function reload(filter_function) from filter_function import starFilter # Find stars that satisfy starFilter and cut data accordingly change_dr(self.DR) self._matchingStars = starFilter(self.data) self.matchingData = self.data[self._matchingStars] #self.numberStars = len(self.matchingData) if self._sampleType != 'syn': self.checkArrays() def numberStars(self): return len(self.matchingData) def checkArrays(self): """ Check if input data has already been saved as arrays. If not, create them. """ fnames = np.array([self.name+'/teff.npy', self.name+'/logg.npy', self.name+'/fe_h.npy', self.name+'/c_h.npy', self.name+'/n_h.npy', self.name+'/o_h.npy', self.name+'/fib.npy', self.name+'/spectra.npy', self.name+'/spectra_errs.npy', self.name+'/bitmasks.npy']) fexist = True for f in fnames: fexist *= os.path.isfile(f) # If all files exist, read data from file for increased initialization speed if fexist: self.teff = np.load(self.name+'/teff.npy') self.logg = np.load(self.name+'/logg.npy') self.fe_h = np.load(self.name+'/fe_h.npy') self.c_h = np.load(self.name+'/c_h.npy') self.n_h = np.load(self.name+'/n_h.npy') self.o_h = np.load(self.name+'/o_h.npy') self.fib = np.load(self.name+'/fib.npy') self.spectra = np.ma.masked_array(np.load(self.name+'/spectra.npy')) self.spectra_errs = np.ma.masked_array(np.load(self.name+'/spectra_errs.npy')) self._bitmasks = np.load(self.name+'/bitmasks.npy') # If any file is missing, generate arrays and write to file elif not fexist: self.makeArrays(self.matchingData) np.save(self.name+'/teff.npy',self.teff.data) np.save(self.name+'/logg.npy',self.logg.data) np.save(self.name+'/fe_h.npy',self.fe_h.data) np.save(self.name+'/c_h.npy',self.c_h.data) np.save(self.name+'/n_h.npy',self.n_h.data) np.save(self.name+'/o_h.npy',self.o_h.data) np.save(self.name+'/fib.npy',self.fib.data) np.save(self.name+'/spectra.npy',self.spectra.data) np.save(self.name+'/spectra_errs.npy',self.spectra_errs.data) np.save(self.name+'/bitmasks.npy',self._bitmasks) def correctUncertainty(self,correction=None): """ Performs a correction on measurement uncertainty. correction: Information on how to perform the correction. May be a path to a pickled file, a float, or list of values. """ if isinstance(correction,(str)): correction = acs.pklread(correction) if isinstance(correction,(float,int)): self.spectra_errs *= np.sqrt(correction) elif isinstance(correction,(list)): correction = np.array(correction) if isinstance(correction,(np.ndarray)): if correction.shape != self.spectra_errs.shape: correction = np.tile(correction,(self.spectra_errs.shape[0],1)) self.spectra_errs = np.sqrt(correction*self.spectra_errs**2) def uncorrectUncertainty(self,correction=None): """ Undoes correction on measurement uncertainty. correction: Information on how to perform the correction. May be a path to a pickled file, a float, or list of values. """ if isinstance(correction,(str)): correction = acs.pklread(correction) if isinstance(correction,(float,int)): self.spectra_errs /= np.sqrt(correction) elif isinstance(correction,(list)): correction = np.array(correction) if isinstance(correction,(np.ndarray)): if correction.shape != self.spectra_errs.shape: correction = np.tile(correction,(self.spectra_errs.shape[0],1)) self.spectra_errs = np.sqrt(self.spectra_errs**2/correction) def imshow(self,plotData,saveName=None,title = '',xlabel='pixels',ylabel='stars',zlabel='',**kwargs): """ Creates a square 2D plot of some input array, with the option to save it. plotData: 2D array to plot saveName: (optional) path to save plot without file extension title: (optional) title for the plot xlabel: x-axis label for the plot (default:'pixels') ylabel: y-axis label for the plot (default:'stars') **kwargs: kwargs for matplotlib.pyplot.imshow """ plt.imshow(plotData,interpolation='nearest', aspect = float(plotData.shape[1])/plotData.shape[0],**kwargs) plt.title(title) plt.xlabel(xlabel) plt.ylabel(ylabel) plt.colorbar(label=zlabel) if saveName=='title': sname = title.split(' ') sname = '_'.join(sname) plt.savefig(self.name+'/'+sname+'.png') plt.close() elif saveName: plt.savefig(self.name+'/'+saveName+'.png') plt.close() def logplot(self,arrays,labels,ylabel='log10 of coefficients',xlabel='coefficient',reshape=True, coeff_labels=True): """ Creates a plot with log values of input array, with legend indicating where array is positive and where its negative. arrays: List of input arrays labels: Legend labels for the input arrays ylabel: Label for y-axis xlabel: Label for x-axis reshape: If True, reshape input arrays (use if one of the inputs is a numpy matrix) coeff_labels: If True, use the default list of labels for the x-axis fit coefficients """ if not isinstance(arrays,(list,np.ndarray)): arrays = [arrays] plt.figure(figsize=(14,8)) for a in range(len(arrays)): array = arrays[a] if reshape: array = np.reshape(np.array(arrays[a]),(len(coeff_inds[self._sampleType]),)) # Find independent indices x = np.arange(len(array)) # Find where positive and negative pos = np.where(array>0) neg = np.where(array<0) # Create legend labels poslabel = 'positive {0}'.format(labels[a]) neglabel = 'negative {0}'.format(labels[a]) # Plot positive and negative separately plt.plot(x[pos],np.log10(array[pos]),'o',label=poslabel) plt.plot(x[neg],np.log10(np.fabs(array[neg])),'o',label=neglabel) # Extend limits so all points are clearly visible plt.xlim(x[0]-1,x[-1]+1) # Label axes plt.ylabel(ylabel) plt.xlabel(xlabel) # Label x-ticks if requested if coeff_labels: plt.xticks(range(len(coeff_inds[self._sampleType])),coeff_inds[self._sampleType]) # Draw legend plt.legend(loc='best') plt.show()
bsd-3-clause
procoder317/scikit-learn
examples/calibration/plot_compare_calibration.py
241
5008
""" ======================================== Comparison of Calibration of Classifiers ======================================== Well calibrated classifiers are probabilistic classifiers for which the output of the predict_proba method can be directly interpreted as a confidence level. For instance a well calibrated (binary) classifier should classify the samples such that among the samples to which it gave a predict_proba value close to 0.8, approx. 80% actually belong to the positive class. LogisticRegression returns well calibrated predictions as it directly optimizes log-loss. In contrast, the other methods return biased probilities, with different biases per method: * GaussianNaiveBayes tends to push probabilties to 0 or 1 (note the counts in the histograms). This is mainly because it makes the assumption that features are conditionally independent given the class, which is not the case in this dataset which contains 2 redundant features. * RandomForestClassifier shows the opposite behavior: the histograms show peaks at approx. 0.2 and 0.9 probability, while probabilities close to 0 or 1 are very rare. An explanation for this is given by Niculescu-Mizil and Caruana [1]: "Methods such as bagging and random forests that average predictions from a base set of models can have difficulty making predictions near 0 and 1 because variance in the underlying base models will bias predictions that should be near zero or one away from these values. Because predictions are restricted to the interval [0,1], errors caused by variance tend to be one- sided near zero and one. For example, if a model should predict p = 0 for a case, the only way bagging can achieve this is if all bagged trees predict zero. If we add noise to the trees that bagging is averaging over, this noise will cause some trees to predict values larger than 0 for this case, thus moving the average prediction of the bagged ensemble away from 0. We observe this effect most strongly with random forests because the base-level trees trained with random forests have relatively high variance due to feature subseting." As a result, the calibration curve shows a characteristic sigmoid shape, indicating that the classifier could trust its "intuition" more and return probabilties closer to 0 or 1 typically. * Support Vector Classification (SVC) shows an even more sigmoid curve as the RandomForestClassifier, which is typical for maximum-margin methods (compare Niculescu-Mizil and Caruana [1]), which focus on hard samples that are close to the decision boundary (the support vectors). .. topic:: References: .. [1] Predicting Good Probabilities with Supervised Learning, A. Niculescu-Mizil & R. Caruana, ICML 2005 """ print(__doc__) # Author: Jan Hendrik Metzen <[email protected]> # License: BSD Style. import numpy as np np.random.seed(0) import matplotlib.pyplot as plt from sklearn import datasets from sklearn.naive_bayes import GaussianNB from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier from sklearn.svm import LinearSVC from sklearn.calibration import calibration_curve X, y = datasets.make_classification(n_samples=100000, n_features=20, n_informative=2, n_redundant=2) train_samples = 100 # Samples used for training the models X_train = X[:train_samples] X_test = X[train_samples:] y_train = y[:train_samples] y_test = y[train_samples:] # Create classifiers lr = LogisticRegression() gnb = GaussianNB() svc = LinearSVC(C=1.0) rfc = RandomForestClassifier(n_estimators=100) ############################################################################### # Plot calibration plots plt.figure(figsize=(10, 10)) ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2) ax2 = plt.subplot2grid((3, 1), (2, 0)) ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated") for clf, name in [(lr, 'Logistic'), (gnb, 'Naive Bayes'), (svc, 'Support Vector Classification'), (rfc, 'Random Forest')]: clf.fit(X_train, y_train) if hasattr(clf, "predict_proba"): prob_pos = clf.predict_proba(X_test)[:, 1] else: # use decision function prob_pos = clf.decision_function(X_test) prob_pos = \ (prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min()) fraction_of_positives, mean_predicted_value = \ calibration_curve(y_test, prob_pos, n_bins=10) ax1.plot(mean_predicted_value, fraction_of_positives, "s-", label="%s" % (name, )) ax2.hist(prob_pos, range=(0, 1), bins=10, label=name, histtype="step", lw=2) ax1.set_ylabel("Fraction of positives") ax1.set_ylim([-0.05, 1.05]) ax1.legend(loc="lower right") ax1.set_title('Calibration plots (reliability curve)') ax2.set_xlabel("Mean predicted value") ax2.set_ylabel("Count") ax2.legend(loc="upper center", ncol=2) plt.tight_layout() plt.show()
bsd-3-clause
sunzhxjs/JobGIS
lib/python2.7/site-packages/pandas/io/pytables.py
9
156275
""" High level interface to PyTables for reading and writing pandas data structures to disk """ # pylint: disable-msg=E1101,W0613,W0603 from datetime import datetime, date import time import re import copy import itertools import warnings import os import numpy as np import pandas as pd from pandas import (Series, DataFrame, Panel, Panel4D, Index, MultiIndex, Int64Index, Timestamp) from pandas.sparse.api import SparseSeries, SparseDataFrame, SparsePanel from pandas.sparse.array import BlockIndex, IntIndex from pandas.tseries.api import PeriodIndex, DatetimeIndex from pandas.tseries.tdi import TimedeltaIndex from pandas.core.base import StringMixin from pandas.core.common import adjoin, pprint_thing from pandas.core.algorithms import match, unique from pandas.core.categorical import Categorical from pandas.core.common import _asarray_tuplesafe from pandas.core.internals import (BlockManager, make_block, _block2d_to_blocknd, _factor_indexer, _block_shape) from pandas.core.index import _ensure_index from pandas.tseries.timedeltas import _coerce_scalar_to_timedelta_type import pandas.core.common as com from pandas.tools.merge import concat from pandas import compat from pandas.compat import u_safe as u, PY3, range, lrange, string_types, filter from pandas.io.common import PerformanceWarning from pandas.core.config import get_option from pandas.computation.pytables import Expr, maybe_expression import pandas.lib as lib import pandas.algos as algos import pandas.tslib as tslib from contextlib import contextmanager from distutils.version import LooseVersion # versioning attribute _version = '0.15.2' ### encoding ### # PY3 encoding if we don't specify _default_encoding = 'UTF-8' def _ensure_decoded(s): """ if we have bytes, decode them to unicode """ if isinstance(s, np.bytes_): s = s.decode('UTF-8') return s def _ensure_encoding(encoding): # set the encoding if we need if encoding is None: if PY3: encoding = _default_encoding return encoding Term = Expr def _ensure_term(where, scope_level): """ ensure that the where is a Term or a list of Term this makes sure that we are capturing the scope of variables that are passed create the terms here with a frame_level=2 (we are 2 levels down) """ # only consider list/tuple here as an ndarray is automaticaly a coordinate # list level = scope_level + 1 if isinstance(where, (list, tuple)): wlist = [] for w in filter(lambda x: x is not None, where): if not maybe_expression(w): wlist.append(w) else: wlist.append(Term(w, scope_level=level)) where = wlist elif maybe_expression(where): where = Term(where, scope_level=level) return where class PossibleDataLossError(Exception): pass class ClosedFileError(Exception): pass class IncompatibilityWarning(Warning): pass incompatibility_doc = """ where criteria is being ignored as this version [%s] is too old (or not-defined), read the file in and write it out to a new file to upgrade (with the copy_to method) """ class AttributeConflictWarning(Warning): pass attribute_conflict_doc = """ the [%s] attribute of the existing index is [%s] which conflicts with the new [%s], resetting the attribute to None """ class DuplicateWarning(Warning): pass duplicate_doc = """ duplicate entries in table, taking most recently appended """ performance_doc = """ your performance may suffer as PyTables will pickle object types that it cannot map directly to c-types [inferred_type->%s,key->%s] [items->%s] """ # formats _FORMAT_MAP = { u('f'): 'fixed', u('fixed'): 'fixed', u('t'): 'table', u('table'): 'table', } format_deprecate_doc = """ the table keyword has been deprecated use the format='fixed(f)|table(t)' keyword instead fixed(f) : specifies the Fixed format and is the default for put operations table(t) : specifies the Table format and is the default for append operations """ # map object types _TYPE_MAP = { Series: u('series'), SparseSeries: u('sparse_series'), pd.TimeSeries: u('series'), DataFrame: u('frame'), SparseDataFrame: u('sparse_frame'), Panel: u('wide'), Panel4D: u('ndim'), SparsePanel: u('sparse_panel') } # storer class map _STORER_MAP = { u('TimeSeries'): 'LegacySeriesFixed', u('Series'): 'LegacySeriesFixed', u('DataFrame'): 'LegacyFrameFixed', u('DataMatrix'): 'LegacyFrameFixed', u('series'): 'SeriesFixed', u('sparse_series'): 'SparseSeriesFixed', u('frame'): 'FrameFixed', u('sparse_frame'): 'SparseFrameFixed', u('wide'): 'PanelFixed', u('sparse_panel'): 'SparsePanelFixed', } # table class map _TABLE_MAP = { u('generic_table'): 'GenericTable', u('appendable_series'): 'AppendableSeriesTable', u('appendable_multiseries'): 'AppendableMultiSeriesTable', u('appendable_frame'): 'AppendableFrameTable', u('appendable_multiframe'): 'AppendableMultiFrameTable', u('appendable_panel'): 'AppendablePanelTable', u('appendable_ndim'): 'AppendableNDimTable', u('worm'): 'WORMTable', u('legacy_frame'): 'LegacyFrameTable', u('legacy_panel'): 'LegacyPanelTable', } # axes map _AXES_MAP = { DataFrame: [0], Panel: [1, 2], Panel4D: [1, 2, 3], } # register our configuration options from pandas.core import config dropna_doc = """ : boolean drop ALL nan rows when appending to a table """ format_doc = """ : format default format writing format, if None, then put will default to 'fixed' and append will default to 'table' """ with config.config_prefix('io.hdf'): config.register_option('dropna_table', False, dropna_doc, validator=config.is_bool) config.register_option( 'default_format', None, format_doc, validator=config.is_one_of_factory(['fixed', 'table', None]) ) # oh the troubles to reduce import time _table_mod = None _table_file_open_policy_is_strict = False def _tables(): global _table_mod global _table_file_open_policy_is_strict if _table_mod is None: import tables _table_mod = tables # version requirements if LooseVersion(tables.__version__) < '3.0.0': raise ImportError("PyTables version >= 3.0.0 is required") # set the file open policy # return the file open policy; this changes as of pytables 3.1 # depending on the HDF5 version try: _table_file_open_policy_is_strict = tables.file._FILE_OPEN_POLICY == 'strict' except: pass return _table_mod # interface to/from ### def to_hdf(path_or_buf, key, value, mode=None, complevel=None, complib=None, append=None, **kwargs): """ store this object, close it if we opened it """ if append: f = lambda store: store.append(key, value, **kwargs) else: f = lambda store: store.put(key, value, **kwargs) if isinstance(path_or_buf, string_types): with HDFStore(path_or_buf, mode=mode, complevel=complevel, complib=complib) as store: f(store) else: f(path_or_buf) def read_hdf(path_or_buf, key=None, **kwargs): """ read from the store, close it if we opened it Retrieve pandas object stored in file, optionally based on where criteria Parameters ---------- path_or_buf : path (string), or buffer to read from key : group identifier in the store. Can be omitted a HDF file contains a single pandas object. where : list of Term (or convertable) objects, optional start : optional, integer (defaults to None), row number to start selection stop : optional, integer (defaults to None), row number to stop selection columns : optional, a list of columns that if not None, will limit the return columns iterator : optional, boolean, return an iterator, default False chunksize : optional, nrows to include in iteration, return an iterator Returns ------- The selected object """ # grab the scope if 'where' in kwargs: kwargs['where'] = _ensure_term(kwargs['where'], scope_level=1) if isinstance(path_or_buf, string_types): try: exists = os.path.exists(path_or_buf) #if filepath is too long except (TypeError,ValueError): exists = False if not exists: raise IOError('File %s does not exist' % path_or_buf) # can't auto open/close if we are using an iterator # so delegate to the iterator store = HDFStore(path_or_buf, **kwargs) auto_close = True elif isinstance(path_or_buf, HDFStore): if not path_or_buf.is_open: raise IOError('The HDFStore must be open for reading.') store = path_or_buf auto_close = False else: raise NotImplementedError('Support for generic buffers has not been ' 'implemented.') try: if key is None: keys = store.keys() if len(keys) != 1: raise ValueError('key must be provided when HDF file contains ' 'multiple datasets.') key = keys[0] return store.select(key, auto_close=auto_close, **kwargs) except: # if there is an error, close the store try: store.close() except: pass raise class HDFStore(StringMixin): """ dict-like IO interface for storing pandas objects in PyTables either Fixed or Table format. Parameters ---------- path : string File path to HDF5 file mode : {'a', 'w', 'r', 'r+'}, default 'a' ``'r'`` Read-only; no data can be modified. ``'w'`` Write; a new file is created (an existing file with the same name would be deleted). ``'a'`` Append; an existing file is opened for reading and writing, and if the file does not exist it is created. ``'r+'`` It is similar to ``'a'``, but the file must already exist. complevel : int, 1-9, default 0 If a complib is specified compression will be applied where possible complib : {'zlib', 'bzip2', 'lzo', 'blosc', None}, default None If complevel is > 0 apply compression to objects written in the store wherever possible fletcher32 : bool, default False If applying compression use the fletcher32 checksum Examples -------- >>> from pandas import DataFrame >>> from numpy.random import randn >>> bar = DataFrame(randn(10, 4)) >>> store = HDFStore('test.h5') >>> store['foo'] = bar # write to HDF5 >>> bar = store['foo'] # retrieve >>> store.close() """ def __init__(self, path, mode=None, complevel=None, complib=None, fletcher32=False, **kwargs): try: import tables except ImportError as ex: # pragma: no cover raise ImportError('HDFStore requires PyTables, "{ex}" problem importing'.format(ex=str(ex))) if complib not in (None, 'blosc', 'bzip2', 'lzo', 'zlib'): raise ValueError("complib only supports 'blosc', 'bzip2', lzo' " "or 'zlib' compression.") self._path = path if mode is None: mode = 'a' self._mode = mode self._handle = None self._complevel = complevel self._complib = complib self._fletcher32 = fletcher32 self._filters = None self.open(mode=mode, **kwargs) @property def root(self): """ return the root node """ self._check_if_open() return self._handle.root @property def filename(self): return self._path def __getitem__(self, key): return self.get(key) def __setitem__(self, key, value): self.put(key, value) def __delitem__(self, key): return self.remove(key) def __getattr__(self, name): """ allow attribute access to get stores """ self._check_if_open() try: return self.get(name) except: pass raise AttributeError("'%s' object has no attribute '%s'" % (type(self).__name__, name)) def __contains__(self, key): """ check for existance of this key can match the exact pathname or the pathnm w/o the leading '/' """ node = self.get_node(key) if node is not None: name = node._v_pathname if name == key or name[1:] == key: return True return False def __len__(self): return len(self.groups()) def __unicode__(self): output = '%s\nFile path: %s\n' % (type(self), pprint_thing(self._path)) if self.is_open: lkeys = sorted(list(self.keys())) if len(lkeys): keys = [] values = [] for k in lkeys: try: s = self.get_storer(k) if s is not None: keys.append(pprint_thing(s.pathname or k)) values.append( pprint_thing(s or 'invalid_HDFStore node')) except Exception as detail: keys.append(k) values.append("[invalid_HDFStore node: %s]" % pprint_thing(detail)) output += adjoin(12, keys, values) else: output += 'Empty' else: output += "File is CLOSED" return output def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): self.close() def keys(self): """ Return a (potentially unordered) list of the keys corresponding to the objects stored in the HDFStore. These are ABSOLUTE path-names (e.g. have the leading '/' """ return [n._v_pathname for n in self.groups()] def items(self): """ iterate on key->group """ for g in self.groups(): yield g._v_pathname, g iteritems = items def open(self, mode='a', **kwargs): """ Open the file in the specified mode Parameters ---------- mode : {'a', 'w', 'r', 'r+'}, default 'a' See HDFStore docstring or tables.open_file for info about modes """ tables = _tables() if self._mode != mode: # if we are changing a write mode to read, ok if self._mode in ['a', 'w'] and mode in ['r', 'r+']: pass elif mode in ['w']: # this would truncate, raise here if self.is_open: raise PossibleDataLossError( "Re-opening the file [{0}] with mode [{1}] " "will delete the current file!" .format(self._path, self._mode) ) self._mode = mode # close and reopen the handle if self.is_open: self.close() if self._complib is not None: if self._complevel is None: self._complevel = 9 self._filters = _tables().Filters(self._complevel, self._complib, fletcher32=self._fletcher32) try: self._handle = tables.open_file(self._path, self._mode, **kwargs) except (IOError) as e: # pragma: no cover if 'can not be written' in str(e): print('Opening %s in read-only mode' % self._path) self._handle = tables.open_file(self._path, 'r', **kwargs) else: raise except (ValueError) as e: # trap PyTables >= 3.1 FILE_OPEN_POLICY exception # to provide an updated message if 'FILE_OPEN_POLICY' in str(e): e = ValueError("PyTables [{version}] no longer supports opening multiple files\n" "even in read-only mode on this HDF5 version [{hdf_version}]. You can accept this\n" "and not open the same file multiple times at once,\n" "upgrade the HDF5 version, or downgrade to PyTables 3.0.0 which allows\n" "files to be opened multiple times at once\n".format(version=tables.__version__, hdf_version=tables.get_hdf5_version())) raise e except (Exception) as e: # trying to read from a non-existant file causes an error which # is not part of IOError, make it one if self._mode == 'r' and 'Unable to open/create file' in str(e): raise IOError(str(e)) raise def close(self): """ Close the PyTables file handle """ if self._handle is not None: self._handle.close() self._handle = None @property def is_open(self): """ return a boolean indicating whether the file is open """ if self._handle is None: return False return bool(self._handle.isopen) def flush(self, fsync=False): """ Force all buffered modifications to be written to disk. Parameters ---------- fsync : bool (default False) call ``os.fsync()`` on the file handle to force writing to disk. Notes ----- Without ``fsync=True``, flushing may not guarantee that the OS writes to disk. With fsync, the operation will block until the OS claims the file has been written; however, other caching layers may still interfere. """ if self._handle is not None: self._handle.flush() if fsync: try: os.fsync(self._handle.fileno()) except: pass def get(self, key): """ Retrieve pandas object stored in file Parameters ---------- key : object Returns ------- obj : type of object stored in file """ group = self.get_node(key) if group is None: raise KeyError('No object named %s in the file' % key) return self._read_group(group) def select(self, key, where=None, start=None, stop=None, columns=None, iterator=False, chunksize=None, auto_close=False, **kwargs): """ Retrieve pandas object stored in file, optionally based on where criteria Parameters ---------- key : object where : list of Term (or convertable) objects, optional start : integer (defaults to None), row number to start selection stop : integer (defaults to None), row number to stop selection columns : a list of columns that if not None, will limit the return columns iterator : boolean, return an iterator, default False chunksize : nrows to include in iteration, return an iterator auto_close : boolean, should automatically close the store when finished, default is False Returns ------- The selected object """ group = self.get_node(key) if group is None: raise KeyError('No object named %s in the file' % key) # create the storer and axes where = _ensure_term(where, scope_level=1) s = self._create_storer(group) s.infer_axes() # function to call on iteration def func(_start, _stop, _where): return s.read(start=_start, stop=_stop, where=_where, columns=columns, **kwargs) # create the iterator it = TableIterator(self, s, func, where=where, nrows=s.nrows, start=start, stop=stop, iterator=iterator, chunksize=chunksize, auto_close=auto_close) return it.get_result() def select_as_coordinates( self, key, where=None, start=None, stop=None, **kwargs): """ return the selection as an Index Parameters ---------- key : object where : list of Term (or convertable) objects, optional start : integer (defaults to None), row number to start selection stop : integer (defaults to None), row number to stop selection """ where = _ensure_term(where, scope_level=1) return self.get_storer(key).read_coordinates(where=where, start=start, stop=stop, **kwargs) def select_column(self, key, column, **kwargs): """ return a single column from the table. This is generally only useful to select an indexable Parameters ---------- key : object column: the column of interest Exceptions ---------- raises KeyError if the column is not found (or key is not a valid store) raises ValueError if the column can not be extracted individually (it is part of a data block) """ return self.get_storer(key).read_column(column=column, **kwargs) def select_as_multiple(self, keys, where=None, selector=None, columns=None, start=None, stop=None, iterator=False, chunksize=None, auto_close=False, **kwargs): """ Retrieve pandas objects from multiple tables Parameters ---------- keys : a list of the tables selector : the table to apply the where criteria (defaults to keys[0] if not supplied) columns : the columns I want back start : integer (defaults to None), row number to start selection stop : integer (defaults to None), row number to stop selection iterator : boolean, return an iterator, default False chunksize : nrows to include in iteration, return an iterator Exceptions ---------- raises KeyError if keys or selector is not found or keys is empty raises TypeError if keys is not a list or tuple raises ValueError if the tables are not ALL THE SAME DIMENSIONS """ # default to single select where = _ensure_term(where, scope_level=1) if isinstance(keys, (list, tuple)) and len(keys) == 1: keys = keys[0] if isinstance(keys, string_types): return self.select(key=keys, where=where, columns=columns, start=start, stop=stop, iterator=iterator, chunksize=chunksize, **kwargs) if not isinstance(keys, (list, tuple)): raise TypeError("keys must be a list/tuple") if not len(keys): raise ValueError("keys must have a non-zero length") if selector is None: selector = keys[0] # collect the tables tbls = [self.get_storer(k) for k in keys] s = self.get_storer(selector) # validate rows nrows = None for t, k in itertools.chain([(s,selector)], zip(tbls, keys)): if t is None: raise KeyError("Invalid table [%s]" % k) if not t.is_table: raise TypeError( "object [%s] is not a table, and cannot be used in all " "select as multiple" % t.pathname ) if nrows is None: nrows = t.nrows elif t.nrows != nrows: raise ValueError( "all tables must have exactly the same nrows!") # axis is the concentation axes axis = list(set([t.non_index_axes[0][0] for t in tbls]))[0] def func(_start, _stop, _where): # retrieve the objs, _where is always passed as a set of coordinates here objs = [t.read(where=_where, columns=columns, **kwargs) for t in tbls] # concat and return return concat(objs, axis=axis, verify_integrity=False).consolidate() # create the iterator it = TableIterator(self, s, func, where=where, nrows=nrows, start=start, stop=stop, iterator=iterator, chunksize=chunksize, auto_close=auto_close) return it.get_result(coordinates=True) def put(self, key, value, format=None, append=False, **kwargs): """ Store object in HDFStore Parameters ---------- key : object value : {Series, DataFrame, Panel} format : 'fixed(f)|table(t)', default is 'fixed' fixed(f) : Fixed format Fast writing/reading. Not-appendable, nor searchable table(t) : Table format Write as a PyTables Table structure which may perform worse but allow more flexible operations like searching / selecting subsets of the data append : boolean, default False This will force Table format, append the input data to the existing. encoding : default None, provide an encoding for strings dropna : boolean, default False, do not write an ALL nan row to the store settable by the option 'io.hdf.dropna_table' """ if format is None: format = get_option("io.hdf.default_format") or 'fixed' kwargs = self._validate_format(format, kwargs) self._write_to_group(key, value, append=append, **kwargs) def remove(self, key, where=None, start=None, stop=None): """ Remove pandas object partially by specifying the where condition Parameters ---------- key : string Node to remove or delete rows from where : list of Term (or convertable) objects, optional start : integer (defaults to None), row number to start selection stop : integer (defaults to None), row number to stop selection Returns ------- number of rows removed (or None if not a Table) Exceptions ---------- raises KeyError if key is not a valid store """ where = _ensure_term(where, scope_level=1) try: s = self.get_storer(key) except: if where is not None: raise ValueError( "trying to remove a node with a non-None where clause!") # we are actually trying to remove a node (with children) s = self.get_node(key) if s is not None: s._f_remove(recursive=True) return None if s is None: raise KeyError('No object named %s in the file' % key) # remove the node if where is None and start is None and stop is None: s.group._f_remove(recursive=True) # delete from the table else: if not s.is_table: raise ValueError( 'can only remove with where on objects written as tables') return s.delete(where=where, start=start, stop=stop) def append(self, key, value, format=None, append=True, columns=None, dropna=None, **kwargs): """ Append to Table in file. Node must already exist and be Table format. Parameters ---------- key : object value : {Series, DataFrame, Panel, Panel4D} format: 'table' is the default table(t) : table format Write as a PyTables Table structure which may perform worse but allow more flexible operations like searching / selecting subsets of the data append : boolean, default True, append the input data to the existing data_columns : list of columns to create as data columns, or True to use all columns min_itemsize : dict of columns that specify minimum string sizes nan_rep : string to use as string nan represenation chunksize : size to chunk the writing expectedrows : expected TOTAL row size of this table encoding : default None, provide an encoding for strings dropna : boolean, default False, do not write an ALL nan row to the store settable by the option 'io.hdf.dropna_table' Notes ----- Does *not* check if data being appended overlaps with existing data in the table, so be careful """ if columns is not None: raise TypeError("columns is not a supported keyword in append, " "try data_columns") if dropna is None: dropna = get_option("io.hdf.dropna_table") if format is None: format = get_option("io.hdf.default_format") or 'table' kwargs = self._validate_format(format, kwargs) self._write_to_group(key, value, append=append, dropna=dropna, **kwargs) def append_to_multiple(self, d, value, selector, data_columns=None, axes=None, dropna=False, **kwargs): """ Append to multiple tables Parameters ---------- d : a dict of table_name to table_columns, None is acceptable as the values of one node (this will get all the remaining columns) value : a pandas object selector : a string that designates the indexable table; all of its columns will be designed as data_columns, unless data_columns is passed, in which case these are used data_columns : list of columns to create as data columns, or True to use all columns dropna : if evaluates to True, drop rows from all tables if any single row in each table has all NaN. Default False. Notes ----- axes parameter is currently not accepted """ if axes is not None: raise TypeError("axes is currently not accepted as a parameter to" " append_to_multiple; you can create the " "tables independently instead") if not isinstance(d, dict): raise ValueError( "append_to_multiple must have a dictionary specified as the " "way to split the value" ) if selector not in d: raise ValueError( "append_to_multiple requires a selector that is in passed dict" ) # figure out the splitting axis (the non_index_axis) axis = list(set(range(value.ndim)) - set(_AXES_MAP[type(value)]))[0] # figure out how to split the value remain_key = None remain_values = [] for k, v in d.items(): if v is None: if remain_key is not None: raise ValueError( "append_to_multiple can only have one value in d that " "is None" ) remain_key = k else: remain_values.extend(v) if remain_key is not None: ordered = value.axes[axis] ordd = ordered.difference(Index(remain_values)) ordd = sorted(ordered.get_indexer(ordd)) d[remain_key] = ordered.take(ordd) # data_columns if data_columns is None: data_columns = d[selector] # ensure rows are synchronized across the tables if dropna: idxs = (value[cols].dropna(how='all').index for cols in d.values()) valid_index = next(idxs) for index in idxs: valid_index = valid_index.intersection(index) value = value.ix[valid_index] # append for k, v in d.items(): dc = data_columns if k == selector else None # compute the val val = value.reindex_axis(v, axis=axis) self.append(k, val, data_columns=dc, **kwargs) def create_table_index(self, key, **kwargs): """ Create a pytables index on the table Paramaters ---------- key : object (the node to index) Exceptions ---------- raises if the node is not a table """ # version requirements _tables() s = self.get_storer(key) if s is None: return if not s.is_table: raise TypeError( "cannot create table index on a Fixed format store") s.create_index(**kwargs) def groups(self): """return a list of all the top-level nodes (that are not themselves a pandas storage object) """ _tables() self._check_if_open() return [ g for g in self._handle.walk_nodes() if (getattr(g._v_attrs, 'pandas_type', None) or getattr(g, 'table', None) or (isinstance(g, _table_mod.table.Table) and g._v_name != u('table'))) ] def get_node(self, key): """ return the node with the key or None if it does not exist """ self._check_if_open() try: if not key.startswith('/'): key = '/' + key return self._handle.get_node(self.root, key) except: return None def get_storer(self, key): """ return the storer object for a key, raise if not in the file """ group = self.get_node(key) if group is None: return None s = self._create_storer(group) s.infer_axes() return s def copy(self, file, mode='w', propindexes=True, keys=None, complib=None, complevel=None, fletcher32=False, overwrite=True): """ copy the existing store to a new file, upgrading in place Parameters ---------- propindexes: restore indexes in copied file (defaults to True) keys : list of keys to include in the copy (defaults to all) overwrite : overwrite (remove and replace) existing nodes in the new store (default is True) mode, complib, complevel, fletcher32 same as in HDFStore.__init__ Returns ------- open file handle of the new store """ new_store = HDFStore( file, mode=mode, complib=complib, complevel=complevel, fletcher32=fletcher32) if keys is None: keys = list(self.keys()) if not isinstance(keys, (tuple, list)): keys = [keys] for k in keys: s = self.get_storer(k) if s is not None: if k in new_store: if overwrite: new_store.remove(k) data = self.select(k) if s.is_table: index = False if propindexes: index = [a.name for a in s.axes if a.is_indexed] new_store.append( k, data, index=index, data_columns=getattr(s, 'data_columns', None), encoding=s.encoding ) else: new_store.put(k, data, encoding=s.encoding) return new_store # private methods ###### def _check_if_open(self): if not self.is_open: raise ClosedFileError("{0} file is not open!".format(self._path)) def _validate_format(self, format, kwargs): """ validate / deprecate formats; return the new kwargs """ kwargs = kwargs.copy() # validate try: kwargs['format'] = _FORMAT_MAP[format.lower()] except: raise TypeError("invalid HDFStore format specified [{0}]" .format(format)) return kwargs def _create_storer(self, group, format=None, value=None, append=False, **kwargs): """ return a suitable class to operate """ def error(t): raise TypeError( "cannot properly create the storer for: [%s] [group->%s," "value->%s,format->%s,append->%s,kwargs->%s]" % (t, group, type(value), format, append, kwargs) ) pt = _ensure_decoded(getattr(group._v_attrs, 'pandas_type', None)) tt = _ensure_decoded(getattr(group._v_attrs, 'table_type', None)) # infer the pt from the passed value if pt is None: if value is None: _tables() if (getattr(group, 'table', None) or isinstance(group, _table_mod.table.Table)): pt = u('frame_table') tt = u('generic_table') else: raise TypeError( "cannot create a storer if the object is not existing " "nor a value are passed") else: try: pt = _TYPE_MAP[type(value)] except: error('_TYPE_MAP') # we are actually a table if format == 'table': pt += u('_table') # a storer node if u('table') not in pt: try: return globals()[_STORER_MAP[pt]](self, group, **kwargs) except: error('_STORER_MAP') # existing node (and must be a table) if tt is None: # if we are a writer, determin the tt if value is not None: if pt == u('series_table'): index = getattr(value, 'index', None) if index is not None: if index.nlevels == 1: tt = u('appendable_series') elif index.nlevels > 1: tt = u('appendable_multiseries') elif pt == u('frame_table'): index = getattr(value, 'index', None) if index is not None: if index.nlevels == 1: tt = u('appendable_frame') elif index.nlevels > 1: tt = u('appendable_multiframe') elif pt == u('wide_table'): tt = u('appendable_panel') elif pt == u('ndim_table'): tt = u('appendable_ndim') else: # distiguish between a frame/table tt = u('legacy_panel') try: fields = group.table._v_attrs.fields if len(fields) == 1 and fields[0] == u('value'): tt = u('legacy_frame') except: pass try: return globals()[_TABLE_MAP[tt]](self, group, **kwargs) except: error('_TABLE_MAP') def _write_to_group(self, key, value, format, index=True, append=False, complib=None, encoding=None, **kwargs): group = self.get_node(key) # remove the node if we are not appending if group is not None and not append: self._handle.remove_node(group, recursive=True) group = None # we don't want to store a table node at all if are object is 0-len # as there are not dtypes if getattr(value, 'empty', None) and (format == 'table' or append): return if group is None: paths = key.split('/') # recursively create the groups path = '/' for p in paths: if not len(p): continue new_path = path if not path.endswith('/'): new_path += '/' new_path += p group = self.get_node(new_path) if group is None: group = self._handle.create_group(path, p) path = new_path s = self._create_storer(group, format, value, append=append, encoding=encoding, **kwargs) if append: # raise if we are trying to append to a Fixed format, # or a table that exists (and we are putting) if (not s.is_table or (s.is_table and format == 'fixed' and s.is_exists)): raise ValueError('Can only append to Tables') if not s.is_exists: s.set_object_info() else: s.set_object_info() if not s.is_table and complib: raise ValueError( 'Compression not supported on Fixed format stores' ) # write the object s.write(obj=value, append=append, complib=complib, **kwargs) if s.is_table and index: s.create_index(columns=index) def _read_group(self, group, **kwargs): s = self._create_storer(group) s.infer_axes() return s.read(**kwargs) def get_store(path, **kwargs): """ Backwards compatible alias for ``HDFStore`` """ return HDFStore(path, **kwargs) class TableIterator(object): """ define the iteration interface on a table Parameters ---------- store : the reference store s : the refered storer func : the function to execute the query where : the where of the query nrows : the rows to iterate on start : the passed start value (default is None) stop : the passed stop value (default is None) iterator : boolean, whether to use the default iterator chunksize : the passed chunking value (default is 50000) auto_close : boolean, automatically close the store at the end of iteration, default is False kwargs : the passed kwargs """ def __init__(self, store, s, func, where, nrows, start=None, stop=None, iterator=False, chunksize=None, auto_close=False): self.store = store self.s = s self.func = func self.where = where self.nrows = nrows or 0 self.start = start or 0 if stop is None: stop = self.nrows self.stop = min(self.nrows, stop) self.coordinates = None if iterator or chunksize is not None: if chunksize is None: chunksize = 100000 self.chunksize = int(chunksize) else: self.chunksize = None self.auto_close = auto_close def __iter__(self): # iterate current = self.start while current < self.stop: stop = min(current + self.chunksize, self.stop) value = self.func(None, None, self.coordinates[current:stop]) current = stop if value is None or not len(value): continue yield value self.close() def close(self): if self.auto_close: self.store.close() def get_result(self, coordinates=False): # return the actual iterator if self.chunksize is not None: if not self.s.is_table: raise TypeError( "can only use an iterator or chunksize on a table") self.coordinates = self.s.read_coordinates(where=self.where) return self # if specified read via coordinates (necessary for multiple selections if coordinates: where = self.s.read_coordinates(where=self.where) else: where = self.where # directly return the result results = self.func(self.start, self.stop, where) self.close() return results class IndexCol(StringMixin): """ an index column description class Parameters ---------- axis : axis which I reference values : the ndarray like converted values kind : a string description of this type typ : the pytables type pos : the position in the pytables """ is_an_indexable = True is_data_indexable = True _info_fields = ['freq', 'tz', 'index_name'] def __init__(self, values=None, kind=None, typ=None, cname=None, itemsize=None, name=None, axis=None, kind_attr=None, pos=None, freq=None, tz=None, index_name=None, **kwargs): self.values = values self.kind = kind self.typ = typ self.itemsize = itemsize self.name = name self.cname = cname self.kind_attr = kind_attr self.axis = axis self.pos = pos self.freq = freq self.tz = tz self.index_name = index_name self.table = None self.meta = None self.metadata = None if name is not None: self.set_name(name, kind_attr) if pos is not None: self.set_pos(pos) def set_name(self, name, kind_attr=None): """ set the name of this indexer """ self.name = name self.kind_attr = kind_attr or "%s_kind" % name if self.cname is None: self.cname = name return self def set_axis(self, axis): """ set the axis over which I index """ self.axis = axis return self def set_pos(self, pos): """ set the position of this column in the Table """ self.pos = pos if pos is not None and self.typ is not None: self.typ._v_pos = pos return self def set_table(self, table): self.table = table return self def __unicode__(self): temp = tuple( map(pprint_thing, (self.name, self.cname, self.axis, self.pos, self.kind))) return "name->%s,cname->%s,axis->%s,pos->%s,kind->%s" % temp def __eq__(self, other): """ compare 2 col items """ return all([getattr(self, a, None) == getattr(other, a, None) for a in ['name', 'cname', 'axis', 'pos']]) def __ne__(self, other): return not self.__eq__(other) @property def is_indexed(self): """ return whether I am an indexed column """ try: return getattr(self.table.cols, self.cname).is_indexed except: False def copy(self): new_self = copy.copy(self) return new_self def infer(self, handler): """infer this column from the table: create and return a new object""" table = handler.table new_self = self.copy() new_self.set_table(table) new_self.get_attr() new_self.read_metadata(handler) return new_self def convert(self, values, nan_rep, encoding): """ set the values from this selection: take = take ownership """ try: values = values[self.cname] except: pass values = _maybe_convert(values, self.kind, encoding) kwargs = dict() if self.freq is not None: kwargs['freq'] = _ensure_decoded(self.freq) if self.index_name is not None: kwargs['name'] = _ensure_decoded(self.index_name) try: self.values = Index(values, **kwargs) except: # if the output freq is different that what we recorded, # it should be None (see also 'doc example part 2') if 'freq' in kwargs: kwargs['freq'] = None self.values = Index(values, **kwargs) self.values = _set_tz(self.values, self.tz) return self def take_data(self): """ return the values & release the memory """ self.values, values = None, self.values return values @property def attrs(self): return self.table._v_attrs @property def description(self): return self.table.description @property def col(self): """ return my current col description """ return getattr(self.description, self.cname, None) @property def cvalues(self): """ return my cython values """ return self.values def __iter__(self): return iter(self.values) def maybe_set_size(self, min_itemsize=None, **kwargs): """ maybe set a string col itemsize: min_itemsize can be an interger or a dict with this columns name with an integer size """ if _ensure_decoded(self.kind) == u('string'): if isinstance(min_itemsize, dict): min_itemsize = min_itemsize.get(self.name) if min_itemsize is not None and self.typ.itemsize < min_itemsize: self.typ = _tables( ).StringCol(itemsize=min_itemsize, pos=self.pos) def validate(self, handler, append, **kwargs): self.validate_names() def validate_names(self): pass def validate_and_set(self, handler, append, **kwargs): self.set_table(handler.table) self.validate_col() self.validate_attr(append) self.validate_metadata(handler) self.write_metadata(handler) self.set_attr() def validate_col(self, itemsize=None): """ validate this column: return the compared against itemsize """ # validate this column for string truncation (or reset to the max size) if _ensure_decoded(self.kind) == u('string'): c = self.col if c is not None: if itemsize is None: itemsize = self.itemsize if c.itemsize < itemsize: raise ValueError( "Trying to store a string with len [%s] in [%s] " "column but\nthis column has a limit of [%s]!\n" "Consider using min_itemsize to preset the sizes on " "these columns" % (itemsize, self.cname, c.itemsize)) return c.itemsize return None def validate_attr(self, append): # check for backwards incompatibility if append: existing_kind = getattr(self.attrs, self.kind_attr, None) if existing_kind is not None and existing_kind != self.kind: raise TypeError("incompatible kind in col [%s - %s]" % (existing_kind, self.kind)) def update_info(self, info): """ set/update the info for this indexable with the key/value if there is a conflict raise/warn as needed """ for key in self._info_fields: value = getattr(self, key, None) idx = _get_info(info, self.name) existing_value = idx.get(key) if key in idx and value is not None and existing_value != value: # frequency/name just warn if key in ['freq', 'index_name']: ws = attribute_conflict_doc % (key, existing_value, value) warnings.warn(ws, AttributeConflictWarning, stacklevel=6) # reset idx[key] = None setattr(self, key, None) else: raise ValueError( "invalid info for [%s] for [%s], existing_value [%s] " "conflicts with new value [%s]" % (self.name, key, existing_value, value)) else: if value is not None or existing_value is not None: idx[key] = value return self def set_info(self, info): """ set my state from the passed info """ idx = info.get(self.name) if idx is not None: self.__dict__.update(idx) def get_attr(self): """ set the kind for this colummn """ self.kind = getattr(self.attrs, self.kind_attr, None) def set_attr(self): """ set the kind for this colummn """ setattr(self.attrs, self.kind_attr, self.kind) def read_metadata(self, handler): """ retrieve the metadata for this columns """ self.metadata = handler.read_metadata(self.cname) def validate_metadata(self, handler): """ validate that kind=category does not change the categories """ if self.meta == 'category': new_metadata = self.metadata cur_metadata = handler.read_metadata(self.cname) if new_metadata is not None and cur_metadata is not None \ and not com.array_equivalent(new_metadata, cur_metadata): raise ValueError("cannot append a categorical with different categories" " to the existing") def write_metadata(self, handler): """ set the meta data """ if self.metadata is not None: handler.write_metadata(self.cname,self.metadata) class GenericIndexCol(IndexCol): """ an index which is not represented in the data of the table """ @property def is_indexed(self): return False def convert(self, values, nan_rep, encoding): """ set the values from this selection: take = take ownership """ self.values = Int64Index(np.arange(self.table.nrows)) return self def get_attr(self): pass def set_attr(self): pass class DataCol(IndexCol): """ a data holding column, by definition this is not indexable Parameters ---------- data : the actual data cname : the column name in the table to hold the data (typically values) meta : a string description of the metadata metadata : the actual metadata """ is_an_indexable = False is_data_indexable = False _info_fields = ['tz','ordered'] @classmethod def create_for_block( cls, i=None, name=None, cname=None, version=None, **kwargs): """ return a new datacol with the block i """ if cname is None: cname = name or 'values_block_%d' % i if name is None: name = cname # prior to 0.10.1, we named values blocks like: values_block_0 an the # name values_0 try: if version[0] == 0 and version[1] <= 10 and version[2] == 0: m = re.search("values_block_(\d+)", name) if m: name = "values_%s" % m.groups()[0] except: pass return cls(name=name, cname=cname, **kwargs) def __init__(self, values=None, kind=None, typ=None, cname=None, data=None, meta=None, metadata=None, block=None, **kwargs): super(DataCol, self).__init__( values=values, kind=kind, typ=typ, cname=cname, **kwargs) self.dtype = None self.dtype_attr = u("%s_dtype" % self.name) self.meta = meta self.meta_attr = u("%s_meta" % self.name) self.set_data(data) self.set_metadata(metadata) def __unicode__(self): temp = tuple( map(pprint_thing, (self.name, self.cname, self.dtype, self.kind, self.shape))) return "name->%s,cname->%s,dtype->%s,kind->%s,shape->%s" % temp def __eq__(self, other): """ compare 2 col items """ return all([getattr(self, a, None) == getattr(other, a, None) for a in ['name', 'cname', 'dtype', 'pos']]) def set_data(self, data, dtype=None): self.data = data if data is not None: if dtype is not None: self.dtype = dtype self.set_kind() elif self.dtype is None: self.dtype = data.dtype.name self.set_kind() def take_data(self): """ return the data & release the memory """ self.data, data = None, self.data return data def set_metadata(self, metadata): """ record the metadata """ if metadata is not None: metadata = np.array(metadata,copy=False).ravel() self.metadata = metadata def set_kind(self): # set my kind if we can if self.dtype is not None: dtype = _ensure_decoded(self.dtype) if dtype.startswith(u('string')) or dtype.startswith(u('bytes')): self.kind = 'string' elif dtype.startswith(u('float')): self.kind = 'float' elif dtype.startswith(u('complex')): self.kind = 'complex' elif dtype.startswith(u('int')) or dtype.startswith(u('uint')): self.kind = 'integer' elif dtype.startswith(u('date')): self.kind = 'datetime' elif dtype.startswith(u('timedelta')): self.kind = 'timedelta' elif dtype.startswith(u('bool')): self.kind = 'bool' else: raise AssertionError( "cannot interpret dtype of [%s] in [%s]" % (dtype, self)) # set my typ if we need if self.typ is None: self.typ = getattr(self.description, self.cname, None) def set_atom(self, block, block_items, existing_col, min_itemsize, nan_rep, info, encoding=None, **kwargs): """ create and setup my atom from the block b """ self.values = list(block_items) # short-cut certain block types if block.is_categorical: return self.set_atom_categorical(block, items=block_items, info=info) elif block.is_datetimetz: return self.set_atom_datetime64tz(block, info=info) elif block.is_datetime: return self.set_atom_datetime64(block) elif block.is_timedelta: return self.set_atom_timedelta64(block) elif block.is_complex: return self.set_atom_complex(block) dtype = block.dtype.name inferred_type = lib.infer_dtype(block.values) if inferred_type == 'date': raise TypeError( "[date] is not implemented as a table column") elif inferred_type == 'datetime': # after 8260 # this only would be hit for a mutli-timezone dtype # which is an error raise TypeError( "too many timezones in this block, create separate " "data columns" ) elif inferred_type == 'unicode': raise TypeError( "[unicode] is not implemented as a table column") # this is basically a catchall; if say a datetime64 has nans then will # end up here ### elif inferred_type == 'string' or dtype == 'object': self.set_atom_string( block, block_items, existing_col, min_itemsize, nan_rep, encoding) # set as a data block else: self.set_atom_data(block) def get_atom_string(self, block, itemsize): return _tables().StringCol(itemsize=itemsize, shape=block.shape[0]) def set_atom_string(self, block, block_items, existing_col, min_itemsize, nan_rep, encoding): # fill nan items with myself, don't disturb the blocks by # trying to downcast block = block.fillna(nan_rep, downcast=False) if isinstance(block, list): block = block[0] data = block.values # see if we have a valid string type inferred_type = lib.infer_dtype(data.ravel()) if inferred_type != 'string': # we cannot serialize this data, so report an exception on a column # by column basis for i, item in enumerate(block_items): col = block.iget(i) inferred_type = lib.infer_dtype(col.ravel()) if inferred_type != 'string': raise TypeError( "Cannot serialize the column [%s] because\n" "its data contents are [%s] object dtype" % (item, inferred_type) ) # itemsize is the maximum length of a string (along any dimension) data_converted = _convert_string_array(data, encoding) itemsize = data_converted.itemsize # specified min_itemsize? if isinstance(min_itemsize, dict): min_itemsize = int(min_itemsize.get( self.name) or min_itemsize.get('values') or 0) itemsize = max(min_itemsize or 0, itemsize) # check for column in the values conflicts if existing_col is not None: eci = existing_col.validate_col(itemsize) if eci > itemsize: itemsize = eci self.itemsize = itemsize self.kind = 'string' self.typ = self.get_atom_string(block, itemsize) self.set_data(data_converted.astype('|S%d' % itemsize, copy=False)) def get_atom_coltype(self, kind=None): """ return the PyTables column class for this column """ if kind is None: kind = self.kind if self.kind.startswith('uint'): col_name = "UInt%sCol" % kind[4:] else: col_name = "%sCol" % kind.capitalize() return getattr(_tables(), col_name) def get_atom_data(self, block, kind=None): return self.get_atom_coltype(kind=kind)(shape=block.shape[0]) def set_atom_complex(self, block): self.kind = block.dtype.name itemsize = int(self.kind.split('complex')[-1]) // 8 self.typ = _tables().ComplexCol(itemsize=itemsize, shape=block.shape[0]) self.set_data(block.values.astype(self.typ.type, copy=False)) def set_atom_data(self, block): self.kind = block.dtype.name self.typ = self.get_atom_data(block) self.set_data(block.values.astype(self.typ.type, copy=False)) def set_atom_categorical(self, block, items, info=None, values=None): # currently only supports a 1-D categorical # in a 1-D block values = block.values codes = values.codes self.kind = 'integer' self.dtype = codes.dtype.name if values.ndim > 1: raise NotImplementedError("only support 1-d categoricals") if len(items) > 1: raise NotImplementedError("only support single block categoricals") # write the codes; must be in a block shape self.ordered = values.ordered self.typ = self.get_atom_data(block, kind=codes.dtype.name) self.set_data(_block_shape(codes)) # write the categories self.meta = 'category' self.set_metadata(block.values.categories) # update the info self.update_info(info) def get_atom_datetime64(self, block): return _tables().Int64Col(shape=block.shape[0]) def set_atom_datetime64(self, block, values=None): self.kind = 'datetime64' self.typ = self.get_atom_datetime64(block) if values is None: values = block.values.view('i8') self.set_data(values, 'datetime64') def set_atom_datetime64tz(self, block, info, values=None): if values is None: values = block.values # convert this column to i8 in UTC, and save the tz values = values.asi8.reshape(block.shape) # store a converted timezone self.tz = _get_tz(block.values.tz) self.update_info(info) self.kind = 'datetime64' self.typ = self.get_atom_datetime64(block) self.set_data(values, 'datetime64') def get_atom_timedelta64(self, block): return _tables().Int64Col(shape=block.shape[0]) def set_atom_timedelta64(self, block, values=None): self.kind = 'timedelta64' self.typ = self.get_atom_timedelta64(block) if values is None: values = block.values.view('i8') self.set_data(values, 'timedelta64') @property def shape(self): return getattr(self.data, 'shape', None) @property def cvalues(self): """ return my cython values """ return self.data def validate_attr(self, append): """validate that we have the same order as the existing & same dtype""" if append: existing_fields = getattr(self.attrs, self.kind_attr, None) if (existing_fields is not None and existing_fields != list(self.values)): raise ValueError("appended items do not match existing items" " in table!") existing_dtype = getattr(self.attrs, self.dtype_attr, None) if (existing_dtype is not None and existing_dtype != self.dtype): raise ValueError("appended items dtype do not match existing " "items dtype in table!") def convert(self, values, nan_rep, encoding): """set the data from this selection (and convert to the correct dtype if we can) """ try: values = values[self.cname] except: pass self.set_data(values) # use the meta if needed meta = _ensure_decoded(self.meta) # convert to the correct dtype if self.dtype is not None: dtype = _ensure_decoded(self.dtype) # reverse converts if dtype == u('datetime64'): # recreate with tz if indicated self.data = _set_tz(self.data, self.tz, coerce=True) elif dtype == u('timedelta64'): self.data = np.asarray(self.data, dtype='m8[ns]') elif dtype == u('date'): try: self.data = np.asarray( [date.fromordinal(v) for v in self.data], dtype=object) except ValueError: self.data = np.asarray( [date.fromtimestamp(v) for v in self.data], dtype=object) elif dtype == u('datetime'): self.data = np.asarray( [datetime.fromtimestamp(v) for v in self.data], dtype=object) elif meta == u('category'): # we have a categorical categories = self.metadata self.data = Categorical.from_codes(self.data.ravel(), categories=categories, ordered=self.ordered) else: try: self.data = self.data.astype(dtype, copy=False) except: self.data = self.data.astype('O', copy=False) # convert nans / decode if _ensure_decoded(self.kind) == u('string'): self.data = _unconvert_string_array( self.data, nan_rep=nan_rep, encoding=encoding) return self def get_attr(self): """ get the data for this colummn """ self.values = getattr(self.attrs, self.kind_attr, None) self.dtype = getattr(self.attrs, self.dtype_attr, None) self.meta = getattr(self.attrs, self.meta_attr, None) self.set_kind() def set_attr(self): """ set the data for this colummn """ setattr(self.attrs, self.kind_attr, self.values) setattr(self.attrs, self.meta_attr, self.meta) if self.dtype is not None: setattr(self.attrs, self.dtype_attr, self.dtype) class DataIndexableCol(DataCol): """ represent a data column that can be indexed """ is_data_indexable = True def validate_names(self): if not Index(self.values).is_object(): raise ValueError("cannot have non-object label DataIndexableCol") def get_atom_string(self, block, itemsize): return _tables().StringCol(itemsize=itemsize) def get_atom_data(self, block, kind=None): return self.get_atom_coltype(kind=kind)() def get_atom_datetime64(self, block): return _tables().Int64Col() def get_atom_timedelta64(self, block): return _tables().Int64Col() class GenericDataIndexableCol(DataIndexableCol): """ represent a generic pytables data column """ def get_attr(self): pass class Fixed(StringMixin): """ represent an object in my store facilitate read/write of various types of objects this is an abstract base class Parameters ---------- parent : my parent HDFStore group : the group node where the table resides """ pandas_kind = None obj_type = None ndim = None is_table = False def __init__(self, parent, group, encoding=None, **kwargs): self.parent = parent self.group = group self.encoding = _ensure_encoding(encoding) self.set_version() @property def is_old_version(self): return (self.version[0] <= 0 and self.version[1] <= 10 and self.version[2] < 1) def set_version(self): """ compute and set our version """ version = _ensure_decoded( getattr(self.group._v_attrs, 'pandas_version', None)) try: self.version = tuple([int(x) for x in version.split('.')]) if len(self.version) == 2: self.version = self.version + (0,) except: self.version = (0, 0, 0) @property def pandas_type(self): return _ensure_decoded(getattr(self.group._v_attrs, 'pandas_type', None)) @property def format_type(self): return 'fixed' def __unicode__(self): """ return a pretty representation of myself """ self.infer_axes() s = self.shape if s is not None: if isinstance(s, (list, tuple)): s = "[%s]" % ','.join([pprint_thing(x) for x in s]) return "%-12.12s (shape->%s)" % (self.pandas_type, s) return self.pandas_type def set_object_info(self): """ set my pandas type & version """ self.attrs.pandas_type = str(self.pandas_kind) self.attrs.pandas_version = str(_version) self.set_version() def copy(self): new_self = copy.copy(self) return new_self @property def storage_obj_type(self): return self.obj_type @property def shape(self): return self.nrows @property def pathname(self): return self.group._v_pathname @property def _handle(self): return self.parent._handle @property def _filters(self): return self.parent._filters @property def _complevel(self): return self.parent._complevel @property def _fletcher32(self): return self.parent._fletcher32 @property def _complib(self): return self.parent._complib @property def attrs(self): return self.group._v_attrs def set_attrs(self): """ set our object attributes """ pass def get_attrs(self): """ get our object attributes """ pass @property def storable(self): """ return my storable """ return self.group @property def is_exists(self): return False @property def nrows(self): return getattr(self.storable, 'nrows', None) def validate(self, other): """ validate against an existing storable """ if other is None: return return True def validate_version(self, where=None): """ are we trying to operate on an old version? """ return True def infer_axes(self): """ infer the axes of my storer return a boolean indicating if we have a valid storer or not """ s = self.storable if s is None: return False self.get_attrs() return True def read(self, **kwargs): raise NotImplementedError( "cannot read on an abstract storer: subclasses should implement") def write(self, **kwargs): raise NotImplementedError( "cannot write on an abstract storer: sublcasses should implement") def delete(self, where=None, start=None, stop=None, **kwargs): """ support fully deleting the node in its entirety (only) - where specification must be None """ if where is None and start is None and stop is None: self._handle.remove_node(self.group, recursive=True) return None raise TypeError("cannot delete on an abstract storer") class GenericFixed(Fixed): """ a generified fixed version """ _index_type_map = {DatetimeIndex: 'datetime', PeriodIndex: 'period'} _reverse_index_map = dict([(v, k) for k, v in compat.iteritems(_index_type_map)]) attributes = [] # indexer helpders def _class_to_alias(self, cls): return self._index_type_map.get(cls, '') def _alias_to_class(self, alias): if isinstance(alias, type): # pragma: no cover # compat: for a short period of time master stored types return alias return self._reverse_index_map.get(alias, Index) def _get_index_factory(self, klass): if klass == DatetimeIndex: def f(values, freq=None, tz=None): return DatetimeIndex._simple_new(values, None, freq=freq, tz=tz) return f return klass def validate_read(self, kwargs): if kwargs.get('columns') is not None: raise TypeError("cannot pass a column specification when reading " "a Fixed format store. this store must be " "selected in its entirety") if kwargs.get('where') is not None: raise TypeError("cannot pass a where specification when reading " "from a Fixed format store. this store must be " "selected in its entirety") @property def is_exists(self): return True def set_attrs(self): """ set our object attributes """ self.attrs.encoding = self.encoding def get_attrs(self): """ retrieve our attributes """ self.encoding = _ensure_encoding(getattr(self.attrs, 'encoding', None)) for n in self.attributes: setattr(self, n, _ensure_decoded(getattr(self.attrs, n, None))) def write(self, obj, **kwargs): self.set_attrs() def read_array(self, key): """ read an array for the specified node (off of group """ import tables node = getattr(self.group, key) data = node[:] attrs = node._v_attrs transposed = getattr(attrs, 'transposed', False) if isinstance(node, tables.VLArray): ret = data[0] else: dtype = getattr(attrs, 'value_type', None) shape = getattr(attrs, 'shape', None) if shape is not None: # length 0 axis ret = np.empty(shape, dtype=dtype) else: ret = data if dtype == u('datetime64'): # reconstruct a timezone if indicated ret = _set_tz(ret, getattr(attrs, 'tz', None), coerce=True) elif dtype == u('timedelta64'): ret = np.asarray(ret, dtype='m8[ns]') if transposed: return ret.T else: return ret def read_index(self, key): variety = _ensure_decoded(getattr(self.attrs, '%s_variety' % key)) if variety == u('multi'): return self.read_multi_index(key) elif variety == u('block'): return self.read_block_index(key) elif variety == u('sparseint'): return self.read_sparse_intindex(key) elif variety == u('regular'): _, index = self.read_index_node(getattr(self.group, key)) return index else: # pragma: no cover raise TypeError('unrecognized index variety: %s' % variety) def write_index(self, key, index): if isinstance(index, MultiIndex): setattr(self.attrs, '%s_variety' % key, 'multi') self.write_multi_index(key, index) elif isinstance(index, BlockIndex): setattr(self.attrs, '%s_variety' % key, 'block') self.write_block_index(key, index) elif isinstance(index, IntIndex): setattr(self.attrs, '%s_variety' % key, 'sparseint') self.write_sparse_intindex(key, index) else: setattr(self.attrs, '%s_variety' % key, 'regular') converted = _convert_index(index, self.encoding, self.format_type).set_name('index') self.write_array(key, converted.values) node = getattr(self.group, key) node._v_attrs.kind = converted.kind node._v_attrs.name = index.name if isinstance(index, (DatetimeIndex, PeriodIndex)): node._v_attrs.index_class = self._class_to_alias(type(index)) if hasattr(index, 'freq'): node._v_attrs.freq = index.freq if hasattr(index, 'tz') and index.tz is not None: node._v_attrs.tz = _get_tz(index.tz) def write_block_index(self, key, index): self.write_array('%s_blocs' % key, index.blocs) self.write_array('%s_blengths' % key, index.blengths) setattr(self.attrs, '%s_length' % key, index.length) def read_block_index(self, key): length = getattr(self.attrs, '%s_length' % key) blocs = self.read_array('%s_blocs' % key) blengths = self.read_array('%s_blengths' % key) return BlockIndex(length, blocs, blengths) def write_sparse_intindex(self, key, index): self.write_array('%s_indices' % key, index.indices) setattr(self.attrs, '%s_length' % key, index.length) def read_sparse_intindex(self, key): length = getattr(self.attrs, '%s_length' % key) indices = self.read_array('%s_indices' % key) return IntIndex(length, indices) def write_multi_index(self, key, index): setattr(self.attrs, '%s_nlevels' % key, index.nlevels) for i, (lev, lab, name) in enumerate(zip(index.levels, index.labels, index.names)): # write the level level_key = '%s_level%d' % (key, i) conv_level = _convert_index(lev, self.encoding, self.format_type).set_name(level_key) self.write_array(level_key, conv_level.values) node = getattr(self.group, level_key) node._v_attrs.kind = conv_level.kind node._v_attrs.name = name # write the name setattr(node._v_attrs, '%s_name%d' % (key, i), name) # write the labels label_key = '%s_label%d' % (key, i) self.write_array(label_key, lab) def read_multi_index(self, key): nlevels = getattr(self.attrs, '%s_nlevels' % key) levels = [] labels = [] names = [] for i in range(nlevels): level_key = '%s_level%d' % (key, i) name, lev = self.read_index_node(getattr(self.group, level_key)) levels.append(lev) names.append(name) label_key = '%s_label%d' % (key, i) lab = self.read_array(label_key) labels.append(lab) return MultiIndex(levels=levels, labels=labels, names=names, verify_integrity=True) def read_index_node(self, node): data = node[:] # If the index was an empty array write_array_empty() will # have written a sentinel. Here we relace it with the original. if ('shape' in node._v_attrs and self._is_empty_array(getattr(node._v_attrs, 'shape'))): data = np.empty(getattr(node._v_attrs, 'shape'), dtype=getattr(node._v_attrs, 'value_type')) kind = _ensure_decoded(node._v_attrs.kind) name = None if 'name' in node._v_attrs: name = node._v_attrs.name index_class = self._alias_to_class(getattr(node._v_attrs, 'index_class', '')) factory = self._get_index_factory(index_class) kwargs = {} if u('freq') in node._v_attrs: kwargs['freq'] = node._v_attrs['freq'] if u('tz') in node._v_attrs: kwargs['tz'] = node._v_attrs['tz'] if kind in (u('date'), u('datetime')): index = factory( _unconvert_index(data, kind, encoding=self.encoding), dtype=object, **kwargs) else: index = factory( _unconvert_index(data, kind, encoding=self.encoding), **kwargs) index.name = name return name, index def write_array_empty(self, key, value): """ write a 0-len array """ # ugly hack for length 0 axes arr = np.empty((1,) * value.ndim) self._handle.create_array(self.group, key, arr) getattr(self.group, key)._v_attrs.value_type = str(value.dtype) getattr(self.group, key)._v_attrs.shape = value.shape def _is_empty_array(self, shape): """Returns true if any axis is zero length.""" return any(x == 0 for x in shape) def write_array(self, key, value, items=None): if key in self.group: self._handle.remove_node(self.group, key) # Transform needed to interface with pytables row/col notation empty_array = self._is_empty_array(value.shape) transposed = False if com.is_categorical_dtype(value): raise NotImplementedError("cannot store a category dtype") if not empty_array: value = value.T transposed = True if self._filters is not None: atom = None try: # get the atom for this datatype atom = _tables().Atom.from_dtype(value.dtype) except ValueError: pass if atom is not None: # create an empty chunked array and fill it from value if not empty_array: ca = self._handle.create_carray(self.group, key, atom, value.shape, filters=self._filters) ca[:] = value getattr(self.group, key)._v_attrs.transposed = transposed else: self.write_array_empty(key, value) return if value.dtype.type == np.object_: # infer the type, warn if we have a non-string type here (for # performance) inferred_type = lib.infer_dtype(value.ravel()) if empty_array: pass elif inferred_type == 'string': pass else: try: items = list(items) except: pass ws = performance_doc % (inferred_type, key, items) warnings.warn(ws, PerformanceWarning, stacklevel=7) vlarr = self._handle.create_vlarray(self.group, key, _tables().ObjectAtom()) vlarr.append(value) else: if empty_array: self.write_array_empty(key, value) else: if com.is_datetime64_dtype(value.dtype): self._handle.create_array(self.group, key, value.view('i8')) getattr( self.group, key)._v_attrs.value_type = 'datetime64' elif com.is_datetime64tz_dtype(value.dtype): # store as UTC # with a zone self._handle.create_array(self.group, key, value.asi8) node = getattr(self.group, key) node._v_attrs.tz = _get_tz(value.tz) node._v_attrs.value_type = 'datetime64' elif com.is_timedelta64_dtype(value.dtype): self._handle.create_array(self.group, key, value.view('i8')) getattr( self.group, key)._v_attrs.value_type = 'timedelta64' else: self._handle.create_array(self.group, key, value) getattr(self.group, key)._v_attrs.transposed = transposed class LegacyFixed(GenericFixed): def read_index_legacy(self, key): node = getattr(self.group, key) data = node[:] kind = node._v_attrs.kind return _unconvert_index_legacy(data, kind, encoding=self.encoding) class LegacySeriesFixed(LegacyFixed): def read(self, **kwargs): self.validate_read(kwargs) index = self.read_index_legacy('index') values = self.read_array('values') return Series(values, index=index) class LegacyFrameFixed(LegacyFixed): def read(self, **kwargs): self.validate_read(kwargs) index = self.read_index_legacy('index') columns = self.read_index_legacy('columns') values = self.read_array('values') return DataFrame(values, index=index, columns=columns) class SeriesFixed(GenericFixed): pandas_kind = u('series') attributes = ['name'] @property def shape(self): try: return len(getattr(self.group, 'values')), except: return None def read(self, **kwargs): self.validate_read(kwargs) index = self.read_index('index') values = self.read_array('values') return Series(values, index=index, name=self.name) def write(self, obj, **kwargs): super(SeriesFixed, self).write(obj, **kwargs) self.write_index('index', obj.index) self.write_array('values', obj.values) self.attrs.name = obj.name class SparseSeriesFixed(GenericFixed): pandas_kind = u('sparse_series') attributes = ['name', 'fill_value', 'kind'] def read(self, **kwargs): self.validate_read(kwargs) index = self.read_index('index') sp_values = self.read_array('sp_values') sp_index = self.read_index('sp_index') return SparseSeries(sp_values, index=index, sparse_index=sp_index, kind=self.kind or u('block'), fill_value=self.fill_value, name=self.name) def write(self, obj, **kwargs): super(SparseSeriesFixed, self).write(obj, **kwargs) self.write_index('index', obj.index) self.write_index('sp_index', obj.sp_index) self.write_array('sp_values', obj.sp_values) self.attrs.name = obj.name self.attrs.fill_value = obj.fill_value self.attrs.kind = obj.kind class SparseFrameFixed(GenericFixed): pandas_kind = u('sparse_frame') attributes = ['default_kind', 'default_fill_value'] def read(self, **kwargs): self.validate_read(kwargs) columns = self.read_index('columns') sdict = {} for c in columns: key = 'sparse_series_%s' % c s = SparseSeriesFixed(self.parent, getattr(self.group, key)) s.infer_axes() sdict[c] = s.read() return SparseDataFrame(sdict, columns=columns, default_kind=self.default_kind, default_fill_value=self.default_fill_value) def write(self, obj, **kwargs): """ write it as a collection of individual sparse series """ super(SparseFrameFixed, self).write(obj, **kwargs) for name, ss in compat.iteritems(obj): key = 'sparse_series_%s' % name if key not in self.group._v_children: node = self._handle.create_group(self.group, key) else: node = getattr(self.group, key) s = SparseSeriesFixed(self.parent, node) s.write(ss) self.attrs.default_fill_value = obj.default_fill_value self.attrs.default_kind = obj.default_kind self.write_index('columns', obj.columns) class SparsePanelFixed(GenericFixed): pandas_kind = u('sparse_panel') attributes = ['default_kind', 'default_fill_value'] def read(self, **kwargs): self.validate_read(kwargs) items = self.read_index('items') sdict = {} for name in items: key = 'sparse_frame_%s' % name s = SparseFrameFixed(self.parent, getattr(self.group, key)) s.infer_axes() sdict[name] = s.read() return SparsePanel(sdict, items=items, default_kind=self.default_kind, default_fill_value=self.default_fill_value) def write(self, obj, **kwargs): super(SparsePanelFixed, self).write(obj, **kwargs) self.attrs.default_fill_value = obj.default_fill_value self.attrs.default_kind = obj.default_kind self.write_index('items', obj.items) for name, sdf in compat.iteritems(obj): key = 'sparse_frame_%s' % name if key not in self.group._v_children: node = self._handle.create_group(self.group, key) else: node = getattr(self.group, key) s = SparseFrameFixed(self.parent, node) s.write(sdf) class BlockManagerFixed(GenericFixed): attributes = ['ndim', 'nblocks'] is_shape_reversed = False @property def shape(self): try: ndim = self.ndim # items items = 0 for i in range(self.nblocks): node = getattr(self.group, 'block%d_items' % i) shape = getattr(node, 'shape', None) if shape is not None: items += shape[0] # data shape node = getattr(self.group, 'block0_values') shape = getattr(node, 'shape', None) if shape is not None: shape = list(shape[0:(ndim - 1)]) else: shape = [] shape.append(items) # hacky - this works for frames, but is reversed for panels if self.is_shape_reversed: shape = shape[::-1] return shape except: return None def read(self, **kwargs): self.validate_read(kwargs) axes = [] for i in range(self.ndim): ax = self.read_index('axis%d' % i) axes.append(ax) items = axes[0] blocks = [] for i in range(self.nblocks): blk_items = self.read_index('block%d_items' % i) values = self.read_array('block%d_values' % i) blk = make_block(values, placement=items.get_indexer(blk_items)) blocks.append(blk) return self.obj_type(BlockManager(blocks, axes)) def write(self, obj, **kwargs): super(BlockManagerFixed, self).write(obj, **kwargs) data = obj._data if not data.is_consolidated(): data = data.consolidate() self.attrs.ndim = data.ndim for i, ax in enumerate(data.axes): if i == 0: if not ax.is_unique: raise ValueError("Columns index has to be unique for fixed format") self.write_index('axis%d' % i, ax) # Supporting mixed-type DataFrame objects...nontrivial self.attrs.nblocks = len(data.blocks) for i, blk in enumerate(data.blocks): # I have no idea why, but writing values before items fixed #2299 blk_items = data.items.take(blk.mgr_locs) self.write_array('block%d_values' % i, blk.values, items=blk_items) self.write_index('block%d_items' % i, blk_items) class FrameFixed(BlockManagerFixed): pandas_kind = u('frame') obj_type = DataFrame class PanelFixed(BlockManagerFixed): pandas_kind = u('wide') obj_type = Panel is_shape_reversed = True def write(self, obj, **kwargs): obj._consolidate_inplace() return super(PanelFixed, self).write(obj, **kwargs) class Table(Fixed): """ represent a table: facilitate read/write of various types of tables Attrs in Table Node ------------------- These are attributes that are store in the main table node, they are necessary to recreate these tables when read back in. index_axes : a list of tuples of the (original indexing axis and index column) non_index_axes: a list of tuples of the (original index axis and columns on a non-indexing axis) values_axes : a list of the columns which comprise the data of this table data_columns : a list of the columns that we are allowing indexing (these become single columns in values_axes), or True to force all columns nan_rep : the string to use for nan representations for string objects levels : the names of levels metadata : the names of the metadata columns """ pandas_kind = u('wide_table') table_type = None levels = 1 is_table = True is_shape_reversed = False def __init__(self, *args, **kwargs): super(Table, self).__init__(*args, **kwargs) self.index_axes = [] self.non_index_axes = [] self.values_axes = [] self.data_columns = [] self.metadata = [] self.info = dict() self.nan_rep = None self.selection = None @property def table_type_short(self): return self.table_type.split('_')[0] @property def format_type(self): return 'table' def __unicode__(self): """ return a pretty representatgion of myself """ self.infer_axes() dc = ",dc->[%s]" % ','.join( self.data_columns) if len(self.data_columns) else '' ver = '' if self.is_old_version: ver = "[%s]" % '.'.join([str(x) for x in self.version]) return "%-12.12s%s (typ->%s,nrows->%s,ncols->%s,indexers->[%s]%s)" % ( self.pandas_type, ver, self.table_type_short, self.nrows, self.ncols, ','.join([a.name for a in self.index_axes]), dc ) def __getitem__(self, c): """ return the axis for c """ for a in self.axes: if c == a.name: return a return None def validate(self, other): """ validate against an existing table """ if other is None: return if other.table_type != self.table_type: raise TypeError("incompatible table_type with existing [%s - %s]" % (other.table_type, self.table_type)) for c in ['index_axes', 'non_index_axes', 'values_axes']: sv = getattr(self, c, None) ov = getattr(other, c, None) if sv != ov: # show the error for the specific axes for i, sax in enumerate(sv): oax = ov[i] if sax != oax: raise ValueError( "invalid combinate of [%s] on appending data [%s] " "vs current table [%s]" % (c, sax, oax)) # should never get here raise Exception( "invalid combinate of [%s] on appending data [%s] vs " "current table [%s]" % (c, sv, ov)) @property def is_multi_index(self): """the levels attribute is 1 or a list in the case of a multi-index""" return isinstance(self.levels, list) def validate_metadata(self, existing): """ create / validate metadata """ self.metadata = [ c.name for c in self.values_axes if c.metadata is not None ] def validate_multiindex(self, obj): """validate that we can store the multi-index; reset and return the new object """ levels = [l if l is not None else "level_{0}".format(i) for i, l in enumerate(obj.index.names)] try: return obj.reset_index(), levels except ValueError: raise ValueError("duplicate names/columns in the multi-index when " "storing as a table") @property def nrows_expected(self): """ based on our axes, compute the expected nrows """ return np.prod([i.cvalues.shape[0] for i in self.index_axes]) @property def is_exists(self): """ has this table been created """ return u('table') in self.group @property def storable(self): return getattr(self.group, 'table', None) @property def table(self): """ return the table group (this is my storable) """ return self.storable @property def dtype(self): return self.table.dtype @property def description(self): return self.table.description @property def axes(self): return itertools.chain(self.index_axes, self.values_axes) @property def ncols(self): """ the number of total columns in the values axes """ return sum([len(a.values) for a in self.values_axes]) @property def is_transposed(self): return False @property def data_orientation(self): """return a tuple of my permutated axes, non_indexable at the front""" return tuple(itertools.chain([int(a[0]) for a in self.non_index_axes], [int(a.axis) for a in self.index_axes])) def queryables(self): """ return a dict of the kinds allowable columns for this object """ # compute the values_axes queryables return dict( [(a.cname, a) for a in self.index_axes] + [(self.storage_obj_type._AXIS_NAMES[axis], None) for axis, values in self.non_index_axes] + [(v.cname, v) for v in self.values_axes if v.name in set(self.data_columns)] ) def index_cols(self): """ return a list of my index cols """ return [(i.axis, i.cname) for i in self.index_axes] def values_cols(self): """ return a list of my values cols """ return [i.cname for i in self.values_axes] def _get_metadata_path(self, key): """ return the metadata pathname for this key """ return "{group}/meta/{key}/meta".format(group=self.group._v_pathname, key=key) def write_metadata(self, key, values): """ write out a meta data array to the key as a fixed-format Series Parameters ---------- key : string values : ndarray """ values = Series(values) self.parent.put(self._get_metadata_path(key), values, format='table', encoding=self.encoding, nan_rep=self.nan_rep) def read_metadata(self, key): """ return the meta data array for this key """ if getattr(getattr(self.group,'meta',None),key,None) is not None: return self.parent.select(self._get_metadata_path(key)) return None def set_info(self): """ update our table index info """ self.attrs.info = self.info def set_attrs(self): """ set our table type & indexables """ self.attrs.table_type = str(self.table_type) self.attrs.index_cols = self.index_cols() self.attrs.values_cols = self.values_cols() self.attrs.non_index_axes = self.non_index_axes self.attrs.data_columns = self.data_columns self.attrs.nan_rep = self.nan_rep self.attrs.encoding = self.encoding self.attrs.levels = self.levels self.attrs.metadata = self.metadata self.set_info() def get_attrs(self): """ retrieve our attributes """ self.non_index_axes = getattr( self.attrs, 'non_index_axes', None) or [] self.data_columns = getattr( self.attrs, 'data_columns', None) or [] self.info = getattr( self.attrs, 'info', None) or dict() self.nan_rep = getattr(self.attrs, 'nan_rep', None) self.encoding = _ensure_encoding( getattr(self.attrs, 'encoding', None)) self.levels = getattr( self.attrs, 'levels', None) or [] self.index_axes = [ a.infer(self) for a in self.indexables if a.is_an_indexable ] self.values_axes = [ a.infer(self) for a in self.indexables if not a.is_an_indexable ] self.metadata = getattr( self.attrs, 'metadata', None) or [] def validate_version(self, where=None): """ are we trying to operate on an old version? """ if where is not None: if (self.version[0] <= 0 and self.version[1] <= 10 and self.version[2] < 1): ws = incompatibility_doc % '.'.join( [str(x) for x in self.version]) warnings.warn(ws, IncompatibilityWarning) def validate_min_itemsize(self, min_itemsize): """validate the min_itemisze doesn't contain items that are not in the axes this needs data_columns to be defined """ if min_itemsize is None: return if not isinstance(min_itemsize, dict): return q = self.queryables() for k, v in min_itemsize.items(): # ok, apply generally if k == 'values': continue if k not in q: raise ValueError( "min_itemsize has the key [%s] which is not an axis or " "data_column" % k) @property def indexables(self): """ create/cache the indexables if they don't exist """ if self._indexables is None: self._indexables = [] # index columns self._indexables.extend([ IndexCol(name=name, axis=axis, pos=i) for i, (axis, name) in enumerate(self.attrs.index_cols) ]) # values columns dc = set(self.data_columns) base_pos = len(self._indexables) def f(i, c): klass = DataCol if c in dc: klass = DataIndexableCol return klass.create_for_block(i=i, name=c, pos=base_pos + i, version=self.version) self._indexables.extend( [f(i, c) for i, c in enumerate(self.attrs.values_cols)]) return self._indexables def create_index(self, columns=None, optlevel=None, kind=None): """ Create a pytables index on the specified columns note: cannot index Time64Col() or ComplexCol currently; PyTables must be >= 3.0 Paramaters ---------- columns : False (don't create an index), True (create all columns index), None or list_like (the indexers to index) optlevel: optimization level (defaults to 6) kind : kind of index (defaults to 'medium') Exceptions ---------- raises if the node is not a table """ if not self.infer_axes(): return if columns is False: return # index all indexables and data_columns if columns is None or columns is True: columns = [a.cname for a in self.axes if a.is_data_indexable] if not isinstance(columns, (tuple, list)): columns = [columns] kw = dict() if optlevel is not None: kw['optlevel'] = optlevel if kind is not None: kw['kind'] = kind table = self.table for c in columns: v = getattr(table.cols, c, None) if v is not None: # remove the index if the kind/optlevel have changed if v.is_indexed: index = v.index cur_optlevel = index.optlevel cur_kind = index.kind if kind is not None and cur_kind != kind: v.remove_index() else: kw['kind'] = cur_kind if optlevel is not None and cur_optlevel != optlevel: v.remove_index() else: kw['optlevel'] = cur_optlevel # create the index if not v.is_indexed: if v.type.startswith('complex'): raise TypeError('Columns containing complex values can be stored but cannot' ' be indexed when using table format. Either use fixed ' 'format, set index=False, or do not include the columns ' 'containing complex values to data_columns when ' 'initializing the table.') v.create_index(**kw) def read_axes(self, where, **kwargs): """create and return the axes sniffed from the table: return boolean for success """ # validate the version self.validate_version(where) # infer the data kind if not self.infer_axes(): return False # create the selection self.selection = Selection(self, where=where, **kwargs) values = self.selection.select() # convert the data for a in self.axes: a.set_info(self.info) a.convert(values, nan_rep=self.nan_rep, encoding=self.encoding) return True def get_object(self, obj): """ return the data for this obj """ return obj def validate_data_columns(self, data_columns, min_itemsize): """take the input data_columns and min_itemize and create a data columns spec """ if not len(self.non_index_axes): return [] axis, axis_labels = self.non_index_axes[0] info = self.info.get(axis, dict()) if info.get('type') == 'MultiIndex' and data_columns: raise ValueError("cannot use a multi-index on axis [{0}] with " "data_columns {1}".format(axis, data_columns)) # evaluate the passed data_columns, True == use all columns # take only valide axis labels if data_columns is True: data_columns = axis_labels elif data_columns is None: data_columns = [] # if min_itemsize is a dict, add the keys (exclude 'values') if isinstance(min_itemsize, dict): existing_data_columns = set(data_columns) data_columns.extend([ k for k in min_itemsize.keys() if k != 'values' and k not in existing_data_columns ]) # return valid columns in the order of our axis return [c for c in data_columns if c in axis_labels] def create_axes(self, axes, obj, validate=True, nan_rep=None, data_columns=None, min_itemsize=None, **kwargs): """ create and return the axes leagcy tables create an indexable column, indexable index, non-indexable fields Parameters: ----------- axes: a list of the axes in order to create (names or numbers of the axes) obj : the object to create axes on validate: validate the obj against an existing object already written min_itemsize: a dict of the min size for a column in bytes nan_rep : a values to use for string column nan_rep encoding : the encoding for string values data_columns : a list of columns that we want to create separate to allow indexing (or True will force all columns) """ # set the default axes if needed if axes is None: try: axes = _AXES_MAP[type(obj)] except: raise TypeError("cannot properly create the storer for: " "[group->%s,value->%s]" % (self.group._v_name, type(obj))) # map axes to numbers axes = [obj._get_axis_number(a) for a in axes] # do we have an existing table (if so, use its axes & data_columns) if self.infer_axes(): existing_table = self.copy() existing_table.infer_axes() axes = [a.axis for a in existing_table.index_axes] data_columns = existing_table.data_columns nan_rep = existing_table.nan_rep self.encoding = existing_table.encoding self.info = copy.copy(existing_table.info) else: existing_table = None # currently support on ndim-1 axes if len(axes) != self.ndim - 1: raise ValueError( "currently only support ndim-1 indexers in an AppendableTable") # create according to the new data self.non_index_axes = [] self.data_columns = [] # nan_representation if nan_rep is None: nan_rep = 'nan' self.nan_rep = nan_rep # create axes to index and non_index index_axes_map = dict() for i, a in enumerate(obj.axes): if i in axes: name = obj._AXIS_NAMES[i] index_axes_map[i] = _convert_index( a, self.encoding, self.format_type ).set_name(name).set_axis(i) else: # we might be able to change the axes on the appending data if # necessary append_axis = list(a) if existing_table is not None: indexer = len(self.non_index_axes) exist_axis = existing_table.non_index_axes[indexer][1] if append_axis != exist_axis: # ahah! -> reindex if sorted(append_axis) == sorted(exist_axis): append_axis = exist_axis # the non_index_axes info info = _get_info(self.info, i) info['names'] = list(a.names) info['type'] = a.__class__.__name__ self.non_index_axes.append((i, append_axis)) # set axis positions (based on the axes) self.index_axes = [ index_axes_map[a].set_pos(j).update_info(self.info) for j, a in enumerate(axes) ] j = len(self.index_axes) # check for column conflicts if validate: for a in self.axes: a.maybe_set_size(min_itemsize=min_itemsize) # reindex by our non_index_axes & compute data_columns for a in self.non_index_axes: obj = _reindex_axis(obj, a[0], a[1]) def get_blk_items(mgr, blocks): return [mgr.items.take(blk.mgr_locs) for blk in blocks] # figure out data_columns and get out blocks block_obj = self.get_object(obj).consolidate() blocks = block_obj._data.blocks blk_items = get_blk_items(block_obj._data, blocks) if len(self.non_index_axes): axis, axis_labels = self.non_index_axes[0] data_columns = self.validate_data_columns( data_columns, min_itemsize) if len(data_columns): mgr = block_obj.reindex_axis( Index(axis_labels).difference(Index(data_columns)), axis=axis )._data blocks = list(mgr.blocks) blk_items = get_blk_items(mgr, blocks) for c in data_columns: mgr = block_obj.reindex_axis([c], axis=axis)._data blocks.extend(mgr.blocks) blk_items.extend(get_blk_items(mgr, mgr.blocks)) # reorder the blocks in the same order as the existing_table if we can if existing_table is not None: by_items = dict([(tuple(b_items.tolist()), (b, b_items)) for b, b_items in zip(blocks, blk_items)]) new_blocks = [] new_blk_items = [] for ea in existing_table.values_axes: items = tuple(ea.values) try: b, b_items = by_items.pop(items) new_blocks.append(b) new_blk_items.append(b_items) except: raise ValueError( "cannot match existing table structure for [%s] on " "appending data" % ','.join(com.pprint_thing(item) for item in items)) blocks = new_blocks blk_items = new_blk_items # add my values self.values_axes = [] for i, (b, b_items) in enumerate(zip(blocks, blk_items)): # shape of the data column are the indexable axes klass = DataCol name = None # we have a data_column if (data_columns and len(b_items) == 1 and b_items[0] in data_columns): klass = DataIndexableCol name = b_items[0] self.data_columns.append(name) # make sure that we match up the existing columns # if we have an existing table if existing_table is not None and validate: try: existing_col = existing_table.values_axes[i] except: raise ValueError("Incompatible appended table [%s] with " "existing table [%s]" % (blocks, existing_table.values_axes)) else: existing_col = None try: col = klass.create_for_block( i=i, name=name, version=self.version) col.set_atom(block=b, block_items=b_items, existing_col=existing_col, min_itemsize=min_itemsize, nan_rep=nan_rep, encoding=self.encoding, info=self.info, **kwargs) col.set_pos(j) self.values_axes.append(col) except (NotImplementedError, ValueError, TypeError) as e: raise e except Exception as detail: raise Exception( "cannot find the correct atom type -> " "[dtype->%s,items->%s] %s" % (b.dtype.name, b_items, str(detail)) ) j += 1 # validate our min_itemsize self.validate_min_itemsize(min_itemsize) # validate our metadata self.validate_metadata(existing_table) # validate the axes if we have an existing table if validate: self.validate(existing_table) def process_axes(self, obj, columns=None): """ process axes filters """ # make a copy to avoid side effects if columns is not None: columns = list(columns) # make sure to include levels if we have them if columns is not None and self.is_multi_index: for n in self.levels: if n not in columns: columns.insert(0, n) # reorder by any non_index_axes & limit to the select columns for axis, labels in self.non_index_axes: obj = _reindex_axis(obj, axis, labels, columns) # apply the selection filters (but keep in the same order) if self.selection.filter is not None: for field, op, filt in self.selection.filter.format(): def process_filter(field, filt): for axis_name in obj._AXIS_NAMES.values(): axis_number = obj._get_axis_number(axis_name) axis_values = obj._get_axis(axis_name) # see if the field is the name of an axis if field == axis_name: # if we have a multi-index, then need to include # the levels if self.is_multi_index: filt = filt.union(Index(self.levels)) takers = op(axis_values, filt) return obj.ix._getitem_axis(takers, axis=axis_number) # this might be the name of a file IN an axis elif field in axis_values: # we need to filter on this dimension values = _ensure_index(getattr(obj, field).values) filt = _ensure_index(filt) # hack until we support reversed dim flags if isinstance(obj, DataFrame): axis_number = 1 - axis_number takers = op(values, filt) return obj.ix._getitem_axis(takers, axis=axis_number) raise ValueError( "cannot find the field [%s] for filtering!" % field) obj = process_filter(field, filt) return obj def create_description(self, complib=None, complevel=None, fletcher32=False, expectedrows=None): """ create the description of the table from the axes & values """ # provided expected rows if its passed if expectedrows is None: expectedrows = max(self.nrows_expected, 10000) d = dict(name='table', expectedrows=expectedrows) # description from the axes & values d['description'] = dict([(a.cname, a.typ) for a in self.axes]) if complib: if complevel is None: complevel = self._complevel or 9 filters = _tables().Filters( complevel=complevel, complib=complib, fletcher32=fletcher32 or self._fletcher32) d['filters'] = filters elif self._filters is not None: d['filters'] = self._filters return d def read_coordinates(self, where=None, start=None, stop=None, **kwargs): """select coordinates (row numbers) from a table; return the coordinates object """ # validate the version self.validate_version(where) # infer the data kind if not self.infer_axes(): return False # create the selection self.selection = Selection( self, where=where, start=start, stop=stop, **kwargs) coords = self.selection.select_coords() if self.selection.filter is not None: for field, op, filt in self.selection.filter.format(): data = self.read_column(field, start=coords.min(), stop=coords.max()+1) coords = coords[op(data.iloc[coords-coords.min()], filt).values] return Index(coords) def read_column(self, column, where=None, start=None, stop=None, **kwargs): """return a single column from the table, generally only indexables are interesting """ # validate the version self.validate_version() # infer the data kind if not self.infer_axes(): return False if where is not None: raise TypeError("read_column does not currently accept a where " "clause") # find the axes for a in self.axes: if column == a.name: if not a.is_data_indexable: raise ValueError( "column [%s] can not be extracted individually; it is " "not data indexable" % column) # column must be an indexable or a data column c = getattr(self.table.cols, column) a.set_info(self.info) return Series(_set_tz(a.convert(c[start:stop], nan_rep=self.nan_rep, encoding=self.encoding ).take_data(), a.tz, True), name=column) raise KeyError("column [%s] not found in the table" % column) class WORMTable(Table): """ a write-once read-many table: this format DOES NOT ALLOW appending to a table. writing is a one-time operation the data are stored in a format that allows for searching the data on disk """ table_type = u('worm') def read(self, **kwargs): """ read the indicies and the indexing array, calculate offset rows and return """ raise NotImplementedError("WORMTable needs to implement read") def write(self, **kwargs): """ write in a format that we can search later on (but cannot append to): write out the indicies and the values using _write_array (e.g. a CArray) create an indexing table so that we can search """ raise NotImplementedError("WORKTable needs to implement write") class LegacyTable(Table): """ an appendable table: allow append/query/delete operations to a (possibily) already existing appendable table this table ALLOWS append (but doesn't require them), and stores the data in a format that can be easily searched """ _indexables = [ IndexCol(name='index', axis=1, pos=0), IndexCol(name='column', axis=2, pos=1, index_kind='columns_kind'), DataCol(name='fields', cname='values', kind_attr='fields', pos=2) ] table_type = u('legacy') ndim = 3 def write(self, **kwargs): raise TypeError("write operations are not allowed on legacy tables!") def read(self, where=None, columns=None, **kwargs): """we have n indexable columns, with an arbitrary number of data axes """ if not self.read_axes(where=where, **kwargs): return None factors = [Categorical.from_array(a.values, ordered=True) for a in self.index_axes] levels = [f.categories for f in factors] N = [len(f.categories) for f in factors] labels = [f.codes for f in factors] # compute the key key = _factor_indexer(N[1:], labels) objs = [] if len(unique(key)) == len(key): sorter, _ = algos.groupsort_indexer( com._ensure_int64(key), np.prod(N)) sorter = com._ensure_platform_int(sorter) # create the objs for c in self.values_axes: # the data need to be sorted sorted_values = c.take_data().take(sorter, axis=0) if sorted_values.ndim == 1: sorted_values = sorted_values.reshape((sorted_values.shape[0],1)) take_labels = [l.take(sorter) for l in labels] items = Index(c.values) block = _block2d_to_blocknd( values=sorted_values, placement=np.arange(len(items)), shape=tuple(N), labels=take_labels, ref_items=items) # create the object mgr = BlockManager([block], [items] + levels) obj = self.obj_type(mgr) # permute if needed if self.is_transposed: obj = obj.transpose( *tuple(Series(self.data_orientation).argsort())) objs.append(obj) else: warnings.warn(duplicate_doc, DuplicateWarning, stacklevel=5) # reconstruct long_index = MultiIndex.from_arrays( [i.values for i in self.index_axes]) for c in self.values_axes: lp = DataFrame(c.data, index=long_index, columns=c.values) # need a better algorithm tuple_index = long_index._tuple_index unique_tuples = lib.fast_unique(tuple_index.values) unique_tuples = _asarray_tuplesafe(unique_tuples) indexer = match(unique_tuples, tuple_index) indexer = com._ensure_platform_int(indexer) new_index = long_index.take(indexer) new_values = lp.values.take(indexer, axis=0) lp = DataFrame(new_values, index=new_index, columns=lp.columns) objs.append(lp.to_panel()) # create the composite object if len(objs) == 1: wp = objs[0] else: wp = concat(objs, axis=0, verify_integrity=False).consolidate() # apply the selection filters & axis orderings wp = self.process_axes(wp, columns=columns) return wp class LegacyFrameTable(LegacyTable): """ support the legacy frame table """ pandas_kind = u('frame_table') table_type = u('legacy_frame') obj_type = Panel def read(self, *args, **kwargs): return super(LegacyFrameTable, self).read(*args, **kwargs)['value'] class LegacyPanelTable(LegacyTable): """ support the legacy panel table """ table_type = u('legacy_panel') obj_type = Panel class AppendableTable(LegacyTable): """ suppor the new appendable table formats """ _indexables = None table_type = u('appendable') def write(self, obj, axes=None, append=False, complib=None, complevel=None, fletcher32=None, min_itemsize=None, chunksize=None, expectedrows=None, dropna=False, **kwargs): if not append and self.is_exists: self._handle.remove_node(self.group, 'table') # create the axes self.create_axes(axes=axes, obj=obj, validate=append, min_itemsize=min_itemsize, **kwargs) for a in self.axes: a.validate(self, append) if not self.is_exists: # create the table options = self.create_description(complib=complib, complevel=complevel, fletcher32=fletcher32, expectedrows=expectedrows) # set the table attributes self.set_attrs() # create the table table = self._handle.create_table(self.group, **options) else: table = self.table # update my info self.set_info() # validate the axes and set the kinds for a in self.axes: a.validate_and_set(self, append) # add the rows self.write_data(chunksize, dropna=dropna) def write_data(self, chunksize, dropna=False): """ we form the data into a 2-d including indexes,values,mask write chunk-by-chunk """ names = self.dtype.names nrows = self.nrows_expected # if dropna==True, then drop ALL nan rows if dropna: masks = [] for a in self.values_axes: # figure the mask: only do if we can successfully process this # column, otherwise ignore the mask mask = com.isnull(a.data).all(axis=0) masks.append(mask.astype('u1', copy=False)) # consolidate masks mask = masks[0] for m in masks[1:]: mask = mask & m mask = mask.ravel() else: mask = None # broadcast the indexes if needed indexes = [a.cvalues for a in self.index_axes] nindexes = len(indexes) bindexes = [] for i, idx in enumerate(indexes): # broadcast to all other indexes except myself if i > 0 and i < nindexes: repeater = np.prod( [indexes[bi].shape[0] for bi in range(0, i)]) idx = np.tile(idx, repeater) if i < nindexes - 1: repeater = np.prod([indexes[bi].shape[0] for bi in range(i + 1, nindexes)]) idx = np.repeat(idx, repeater) bindexes.append(idx) # transpose the values so first dimension is last # reshape the values if needed values = [a.take_data() for a in self.values_axes] values = [v.transpose(np.roll(np.arange(v.ndim), v.ndim - 1)) for v in values] bvalues = [] for i, v in enumerate(values): new_shape = (nrows,) + self.dtype[names[nindexes + i]].shape bvalues.append(values[i].reshape(new_shape)) # write the chunks if chunksize is None: chunksize = 100000 rows = np.empty(min(chunksize,nrows), dtype=self.dtype) chunks = int(nrows / chunksize) + 1 for i in range(chunks): start_i = i * chunksize end_i = min((i + 1) * chunksize, nrows) if start_i >= end_i: break self.write_data_chunk( rows, indexes=[a[start_i:end_i] for a in bindexes], mask=mask[start_i:end_i] if mask is not None else None, values=[v[start_i:end_i] for v in bvalues]) def write_data_chunk(self, rows, indexes, mask, values): """ Parameters ---------- rows : an empty memory space where we are putting the chunk indexes : an array of the indexes mask : an array of the masks values : an array of the values """ # 0 len for v in values: if not np.prod(v.shape): return try: nrows = indexes[0].shape[0] if nrows != len(rows): rows = np.empty(nrows, dtype=self.dtype) names = self.dtype.names nindexes = len(indexes) # indexes for i, idx in enumerate(indexes): rows[names[i]] = idx # values for i, v in enumerate(values): rows[names[i + nindexes]] = v # mask if mask is not None: m = ~mask.ravel().astype(bool, copy=False) if not m.all(): rows = rows[m] except Exception as detail: raise Exception("cannot create row-data -> %s" % detail) try: if len(rows): self.table.append(rows) self.table.flush() except Exception as detail: raise TypeError("tables cannot write this data -> %s" % detail) def delete(self, where=None, start=None, stop=None, **kwargs): # delete all rows (and return the nrows) if where is None or not len(where): if start is None and stop is None: nrows = self.nrows self._handle.remove_node(self.group, recursive=True) else: # pytables<3.0 would remove a single row with stop=None if stop is None: stop = self.nrows nrows = self.table.remove_rows(start=start, stop=stop) self.table.flush() return nrows # infer the data kind if not self.infer_axes(): return None # create the selection table = self.table self.selection = Selection(self, where, start=start, stop=stop, **kwargs) values = self.selection.select_coords() # delete the rows in reverse order l = Series(values).sort_values() ln = len(l) if ln: # construct groups of consecutive rows diff = l.diff() groups = list(diff[diff > 1].index) # 1 group if not len(groups): groups = [0] # final element if groups[-1] != ln: groups.append(ln) # initial element if groups[0] != 0: groups.insert(0, 0) # we must remove in reverse order! pg = groups.pop() for g in reversed(groups): rows = l.take(lrange(g, pg)) table.remove_rows(start=rows[rows.index[0] ], stop=rows[rows.index[-1]] + 1) pg = g self.table.flush() # return the number of rows removed return ln class AppendableFrameTable(AppendableTable): """ suppor the new appendable table formats """ pandas_kind = u('frame_table') table_type = u('appendable_frame') ndim = 2 obj_type = DataFrame @property def is_transposed(self): return self.index_axes[0].axis == 1 def get_object(self, obj): """ these are written transposed """ if self.is_transposed: obj = obj.T return obj def read(self, where=None, columns=None, **kwargs): if not self.read_axes(where=where, **kwargs): return None info = (self.info.get(self.non_index_axes[0][0], dict()) if len(self.non_index_axes) else dict()) index = self.index_axes[0].values frames = [] for a in self.values_axes: # we could have a multi-index constructor here # _ensure_index doesn't recognized our list-of-tuples here if info.get('type') == 'MultiIndex': cols = MultiIndex.from_tuples(a.values) else: cols = Index(a.values) names = info.get('names') if names is not None: cols.set_names(names, inplace=True) if self.is_transposed: values = a.cvalues index_ = cols cols_ = Index(index, name=getattr(index, 'name', None)) else: values = a.cvalues.T index_ = Index(index, name=getattr(index, 'name', None)) cols_ = cols # if we have a DataIndexableCol, its shape will only be 1 dim if values.ndim == 1 and isinstance(values, np.ndarray): values = values.reshape((1, values.shape[0])) block = make_block(values, placement=np.arange(len(cols_))) mgr = BlockManager([block], [cols_, index_]) frames.append(DataFrame(mgr)) if len(frames) == 1: df = frames[0] else: df = concat(frames, axis=1, verify_integrity=False).consolidate() # apply the selection filters & axis orderings df = self.process_axes(df, columns=columns) return df class AppendableSeriesTable(AppendableFrameTable): """ support the new appendable table formats """ pandas_kind = u('series_table') table_type = u('appendable_series') ndim = 2 obj_type = Series storage_obj_type = DataFrame @property def is_transposed(self): return False def get_object(self, obj): return obj def write(self, obj, data_columns=None, **kwargs): """ we are going to write this as a frame table """ if not isinstance(obj, DataFrame): name = obj.name or 'values' obj = DataFrame({name: obj}, index=obj.index) obj.columns = [name] return super(AppendableSeriesTable, self).write( obj=obj, data_columns=obj.columns, **kwargs) def read(self, columns=None, **kwargs): is_multi_index = self.is_multi_index if columns is not None and is_multi_index: for n in self.levels: if n not in columns: columns.insert(0, n) s = super(AppendableSeriesTable, self).read(columns=columns, **kwargs) if is_multi_index: s.set_index(self.levels, inplace=True) s = s.iloc[:, 0] # remove the default name if s.name == 'values': s.name = None return s class AppendableMultiSeriesTable(AppendableSeriesTable): """ support the new appendable table formats """ pandas_kind = u('series_table') table_type = u('appendable_multiseries') def write(self, obj, **kwargs): """ we are going to write this as a frame table """ name = obj.name or 'values' obj, self.levels = self.validate_multiindex(obj) cols = list(self.levels) cols.append(name) obj.columns = cols return super(AppendableMultiSeriesTable, self).write(obj=obj, **kwargs) class GenericTable(AppendableFrameTable): """ a table that read/writes the generic pytables table format """ pandas_kind = u('frame_table') table_type = u('generic_table') ndim = 2 obj_type = DataFrame @property def pandas_type(self): return self.pandas_kind @property def storable(self): return getattr(self.group, 'table', None) or self.group def get_attrs(self): """ retrieve our attributes """ self.non_index_axes = [] self.nan_rep = None self.levels = [] self.index_axes = [a.infer(self) for a in self.indexables if a.is_an_indexable] self.values_axes = [a.infer(self) for a in self.indexables if not a.is_an_indexable] self.data_columns = [a.name for a in self.values_axes] @property def indexables(self): """ create the indexables from the table description """ if self._indexables is None: d = self.description # the index columns is just a simple index self._indexables = [GenericIndexCol(name='index', axis=0)] for i, n in enumerate(d._v_names): dc = GenericDataIndexableCol( name=n, pos=i, values=[n], version=self.version) self._indexables.append(dc) return self._indexables def write(self, **kwargs): raise NotImplementedError("cannot write on an generic table") class AppendableMultiFrameTable(AppendableFrameTable): """ a frame with a multi-index """ table_type = u('appendable_multiframe') obj_type = DataFrame ndim = 2 _re_levels = re.compile("^level_\d+$") @property def table_type_short(self): return u('appendable_multi') def write(self, obj, data_columns=None, **kwargs): if data_columns is None: data_columns = [] elif data_columns is True: data_columns = obj.columns[:] obj, self.levels = self.validate_multiindex(obj) for n in self.levels: if n not in data_columns: data_columns.insert(0, n) return super(AppendableMultiFrameTable, self).write( obj=obj, data_columns=data_columns, **kwargs) def read(self, **kwargs): df = super(AppendableMultiFrameTable, self).read(**kwargs) df = df.set_index(self.levels) # remove names for 'level_%d' df.index = df.index.set_names([ None if self._re_levels.search(l) else l for l in df.index.names ]) return df class AppendablePanelTable(AppendableTable): """ suppor the new appendable table formats """ table_type = u('appendable_panel') ndim = 3 obj_type = Panel def get_object(self, obj): """ these are written transposed """ if self.is_transposed: obj = obj.transpose(*self.data_orientation) return obj @property def is_transposed(self): return self.data_orientation != tuple(range(self.ndim)) class AppendableNDimTable(AppendablePanelTable): """ suppor the new appendable table formats """ table_type = u('appendable_ndim') ndim = 4 obj_type = Panel4D def _reindex_axis(obj, axis, labels, other=None): ax = obj._get_axis(axis) labels = _ensure_index(labels) # try not to reindex even if other is provided # if it equals our current index if other is not None: other = _ensure_index(other) if (other is None or labels.equals(other)) and labels.equals(ax): return obj labels = _ensure_index(labels.unique()) if other is not None: labels = labels & _ensure_index(other.unique()) if not labels.equals(ax): slicer = [slice(None, None)] * obj.ndim slicer[axis] = labels obj = obj.loc[tuple(slicer)] return obj def _get_info(info, name): """ get/create the info for this name """ try: idx = info[name] except: idx = info[name] = dict() return idx ### tz to/from coercion ### def _get_tz(tz): """ for a tz-aware type, return an encoded zone """ zone = tslib.get_timezone(tz) if zone is None: zone = tslib.tot_seconds(tz.utcoffset()) return zone def _set_tz(values, tz, preserve_UTC=False, coerce=False): """ coerce the values to a DatetimeIndex if tz is set preserve the input shape if possible Parameters ---------- values : ndarray tz : string/pickled tz object preserve_UTC : boolean, preserve the UTC of the result coerce : if we do not have a passed timezone, coerce to M8[ns] ndarray """ if tz is not None: values = values.ravel() tz = tslib.get_timezone(_ensure_decoded(tz)) values = DatetimeIndex(values) if values.tz is None: values = values.tz_localize('UTC').tz_convert(tz) if preserve_UTC: if tz == 'UTC': values = list(values) elif coerce: values = np.asarray(values, dtype='M8[ns]') return values def _convert_index(index, encoding=None, format_type=None): index_name = getattr(index, 'name', None) if isinstance(index, DatetimeIndex): converted = index.asi8 return IndexCol(converted, 'datetime64', _tables().Int64Col(), freq=getattr(index, 'freq', None), tz=getattr(index, 'tz', None), index_name=index_name) elif isinstance(index, TimedeltaIndex): converted = index.asi8 return IndexCol(converted, 'timedelta64', _tables().Int64Col(), freq=getattr(index, 'freq', None), index_name=index_name) elif isinstance(index, (Int64Index, PeriodIndex)): atom = _tables().Int64Col() return IndexCol( index.values, 'integer', atom, freq=getattr(index, 'freq', None), index_name=index_name) if isinstance(index, MultiIndex): raise TypeError('MultiIndex not supported here!') inferred_type = lib.infer_dtype(index) values = np.asarray(index) if inferred_type == 'datetime64': converted = values.view('i8') return IndexCol(converted, 'datetime64', _tables().Int64Col(), freq=getattr(index, 'freq', None), tz=getattr(index, 'tz', None), index_name=index_name) elif inferred_type == 'timedelta64': converted = values.view('i8') return IndexCol(converted, 'timedelta64', _tables().Int64Col(), freq=getattr(index, 'freq', None), index_name=index_name) elif inferred_type == 'datetime': converted = np.asarray([(time.mktime(v.timetuple()) + v.microsecond / 1E6) for v in values], dtype=np.float64) return IndexCol(converted, 'datetime', _tables().Time64Col(), index_name=index_name) elif inferred_type == 'date': converted = np.asarray([v.toordinal() for v in values], dtype=np.int32) return IndexCol(converted, 'date', _tables().Time32Col(), index_name=index_name) elif inferred_type == 'string': # atom = _tables().ObjectAtom() # return np.asarray(values, dtype='O'), 'object', atom converted = _convert_string_array(values, encoding) itemsize = converted.dtype.itemsize return IndexCol( converted, 'string', _tables().StringCol(itemsize), itemsize=itemsize, index_name=index_name ) elif inferred_type == 'unicode': if format_type == 'fixed': atom = _tables().ObjectAtom() return IndexCol(np.asarray(values, dtype='O'), 'object', atom, index_name=index_name) raise TypeError( "[unicode] is not supported as a in index type for [{0}] formats" .format(format_type) ) elif inferred_type == 'integer': # take a guess for now, hope the values fit atom = _tables().Int64Col() return IndexCol(np.asarray(values, dtype=np.int64), 'integer', atom, index_name=index_name) elif inferred_type == 'floating': atom = _tables().Float64Col() return IndexCol(np.asarray(values, dtype=np.float64), 'float', atom, index_name=index_name) else: # pragma: no cover atom = _tables().ObjectAtom() return IndexCol(np.asarray(values, dtype='O'), 'object', atom, index_name=index_name) def _unconvert_index(data, kind, encoding=None): kind = _ensure_decoded(kind) if kind == u('datetime64'): index = DatetimeIndex(data) elif kind == u('timedelta64'): index = TimedeltaIndex(data) elif kind == u('datetime'): index = np.asarray([datetime.fromtimestamp(v) for v in data], dtype=object) elif kind == u('date'): try: index = np.asarray( [date.fromordinal(v) for v in data], dtype=object) except (ValueError): index = np.asarray( [date.fromtimestamp(v) for v in data], dtype=object) elif kind in (u('integer'), u('float')): index = np.asarray(data) elif kind in (u('string')): index = _unconvert_string_array(data, nan_rep=None, encoding=encoding) elif kind == u('object'): index = np.asarray(data[0]) else: # pragma: no cover raise ValueError('unrecognized index type %s' % kind) return index def _unconvert_index_legacy(data, kind, legacy=False, encoding=None): kind = _ensure_decoded(kind) if kind == u('datetime'): index = lib.time64_to_datetime(data) elif kind in (u('integer')): index = np.asarray(data, dtype=object) elif kind in (u('string')): index = _unconvert_string_array(data, nan_rep=None, encoding=encoding) else: # pragma: no cover raise ValueError('unrecognized index type %s' % kind) return index def _convert_string_array(data, encoding, itemsize=None): """ we take a string-like that is object dtype and coerce to a fixed size string type Parameters ---------- data : a numpy array of object dtype encoding : None or string-encoding itemsize : integer, optional, defaults to the max length of the strings Returns ------- data in a fixed-length string dtype, encoded to bytes if needed """ # encode if needed if encoding is not None and len(data): data = Series(data.ravel()).str.encode(encoding).values.reshape(data.shape) # create the sized dtype if itemsize is None: itemsize = lib.max_len_string_array(com._ensure_object(data.ravel())) data = np.asarray(data, dtype="S%d" % itemsize) return data def _unconvert_string_array(data, nan_rep=None, encoding=None): """ inverse of _convert_string_array Parameters ---------- data : fixed length string dtyped array nan_rep : the storage repr of NaN, optional encoding : the encoding of the data, optional Returns ------- an object array of the decoded data """ shape = data.shape data = np.asarray(data.ravel(), dtype=object) # guard against a None encoding in PY3 (because of a legacy # where the passed encoding is actually None) encoding = _ensure_encoding(encoding) if encoding is not None and len(data): itemsize = lib.max_len_string_array(com._ensure_object(data)) if compat.PY3: dtype = "U{0}".format(itemsize) else: dtype = "S{0}".format(itemsize) if isinstance(data[0], compat.binary_type): data = Series(data).str.decode(encoding).values else: data = data.astype(dtype, copy=False).astype(object, copy=False) if nan_rep is None: nan_rep = 'nan' data = lib.string_array_replace_from_nan_rep(data, nan_rep) return data.reshape(shape) def _maybe_convert(values, val_kind, encoding): if _need_convert(val_kind): conv = _get_converter(val_kind, encoding) # conv = np.frompyfunc(conv, 1, 1) values = conv(values) return values def _get_converter(kind, encoding): kind = _ensure_decoded(kind) if kind == 'datetime64': return lambda x: np.asarray(x, dtype='M8[ns]') elif kind == 'datetime': return lib.convert_timestamps elif kind == 'string': return lambda x: _unconvert_string_array(x, encoding=encoding) else: # pragma: no cover raise ValueError('invalid kind %s' % kind) def _need_convert(kind): kind = _ensure_decoded(kind) if kind in (u('datetime'), u('datetime64'), u('string')): return True return False class Selection(object): """ Carries out a selection operation on a tables.Table object. Parameters ---------- table : a Table object where : list of Terms (or convertable to) start, stop: indicies to start and/or stop selection """ def __init__(self, table, where=None, start=None, stop=None, **kwargs): self.table = table self.where = where self.start = start self.stop = stop self.condition = None self.filter = None self.terms = None self.coordinates = None if com.is_list_like(where): # see if we have a passed coordinate like try: inferred = lib.infer_dtype(where) if inferred == 'integer' or inferred == 'boolean': where = np.asarray(where) if where.dtype == np.bool_: start, stop = self.start, self.stop if start is None: start = 0 if stop is None: stop = self.table.nrows self.coordinates = np.arange(start, stop)[where] elif issubclass(where.dtype.type, np.integer): if ((self.start is not None and (where < self.start).any()) or (self.stop is not None and (where >= self.stop).any())): raise ValueError( "where must have index locations >= start and " "< stop" ) self.coordinates = where except: pass if self.coordinates is None: self.terms = self.generate(where) # create the numexpr & the filter if self.terms is not None: self.condition, self.filter = self.terms.evaluate() def generate(self, where): """ where can be a : dict,list,tuple,string """ if where is None: return None q = self.table.queryables() try: return Expr(where, queryables=q, encoding=self.table.encoding) except NameError as detail: # raise a nice message, suggesting that the user should use # data_columns raise ValueError( "The passed where expression: {0}\n" " contains an invalid variable reference\n" " all of the variable refrences must be a " "reference to\n" " an axis (e.g. 'index' or 'columns'), or a " "data_column\n" " The currently defined references are: {1}\n" .format(where, ','.join(q.keys())) ) def select(self): """ generate the selection """ if self.condition is not None: return self.table.table.read_where(self.condition.format(), start=self.start, stop=self.stop) elif self.coordinates is not None: return self.table.table.read_coordinates(self.coordinates) return self.table.table.read(start=self.start, stop=self.stop) def select_coords(self): """ generate the selection """ start, stop = self.start, self.stop nrows = self.table.nrows if start is None: start = 0 elif start < 0: start += nrows if self.stop is None: stop = nrows elif stop < 0: stop += nrows if self.condition is not None: return self.table.table.get_where_list(self.condition.format(), start=start, stop=stop, sort=True) elif self.coordinates is not None: return self.coordinates return np.arange(start, stop) # utilities ### def timeit(key, df, fn=None, remove=True, **kwargs): if fn is None: fn = 'timeit.h5' store = HDFStore(fn, mode='w') store.append(key, df, **kwargs) store.close() if remove: os.remove(fn)
mit
glidernet/ogn-python
app/main/matplotlib_service.py
2
1400
from app import db from app.model import DirectionStatistic import random import numpy as np import matplotlib.pyplot as plt from matplotlib.figure import Figure def create_range_figure2(sender_id): fig = Figure() axis = fig.add_subplot(1, 1, 1) xs = range(100) ys = [random.randint(1, 50) for x in xs] axis.plot(xs, ys) return fig def create_range_figure(sender_id): sds = db.session.query(DirectionStatistic) \ .filter(DirectionStatistic.sender_id == sender_id) \ .order_by(DirectionStatistic.directions_count.desc()) \ .limit(1) \ .one() fig = Figure() direction_data = sds.direction_data max_range = max([r['max_range'] / 1000.0 for r in direction_data]) theta = np.array([i['direction'] / 180 * np.pi for i in direction_data]) radii = np.array([i['max_range'] / 1000 if i['max_range'] > 0 else 0 for i in direction_data]) width = np.array([13 / 180 * np.pi for i in direction_data]) colors = plt.cm.viridis(radii / max_range) ax = fig.add_subplot(111, projection='polar') ax.bar(theta, radii, width=width, bottom=0.0, color=colors, edgecolor='b', alpha=0.5) #ax.set_rticks([0, 25, 50, 75, 100, 125, 150]) ax.set_theta_zero_location("N") ax.set_theta_direction(-1) fig.suptitle(f"Range between sender '{sds.sender.name}' and receiver '{sds.receiver.name}'") return fig
agpl-3.0
sanketloke/scikit-learn
examples/neighbors/plot_approximate_nearest_neighbors_scalability.py
85
5728
""" ============================================ Scalability of Approximate Nearest Neighbors ============================================ This example studies the scalability profile of approximate 10-neighbors queries using the LSHForest with ``n_estimators=20`` and ``n_candidates=200`` when varying the number of samples in the dataset. The first plot demonstrates the relationship between query time and index size of LSHForest. Query time is compared with the brute force method in exact nearest neighbor search for the same index sizes. The brute force queries have a very predictable linear scalability with the index (full scan). LSHForest index have sub-linear scalability profile but can be slower for small datasets. The second plot shows the speedup when using approximate queries vs brute force exact queries. The speedup tends to increase with the dataset size but should reach a plateau typically when doing queries on datasets with millions of samples and a few hundreds of dimensions. Higher dimensional datasets tends to benefit more from LSHForest indexing. The break even point (speedup = 1) depends on the dimensionality and structure of the indexed data and the parameters of the LSHForest index. The precision of approximate queries should decrease slowly with the dataset size. The speed of the decrease depends mostly on the LSHForest parameters and the dimensionality of the data. """ from __future__ import division print(__doc__) # Authors: Maheshakya Wijewardena <[email protected]> # Olivier Grisel <[email protected]> # # License: BSD 3 clause ############################################################################### import time import numpy as np from sklearn.datasets.samples_generator import make_blobs from sklearn.neighbors import LSHForest from sklearn.neighbors import NearestNeighbors import matplotlib.pyplot as plt # Parameters of the study n_samples_min = int(1e3) n_samples_max = int(1e5) n_features = 100 n_centers = 100 n_queries = 100 n_steps = 6 n_iter = 5 # Initialize the range of `n_samples` n_samples_values = np.logspace(np.log10(n_samples_min), np.log10(n_samples_max), n_steps).astype(np.int) # Generate some structured data rng = np.random.RandomState(42) all_data, _ = make_blobs(n_samples=n_samples_max + n_queries, n_features=n_features, centers=n_centers, shuffle=True, random_state=0) queries = all_data[:n_queries] index_data = all_data[n_queries:] # Metrics to collect for the plots average_times_exact = [] average_times_approx = [] std_times_approx = [] accuracies = [] std_accuracies = [] average_speedups = [] std_speedups = [] # Calculate the average query time for n_samples in n_samples_values: X = index_data[:n_samples] # Initialize LSHForest for queries of a single neighbor lshf = LSHForest(n_estimators=20, n_candidates=200, n_neighbors=10).fit(X) nbrs = NearestNeighbors(algorithm='brute', metric='cosine', n_neighbors=10).fit(X) time_approx = [] time_exact = [] accuracy = [] for i in range(n_iter): # pick one query at random to study query time variability in LSHForest query = queries[[rng.randint(0, n_queries)]] t0 = time.time() exact_neighbors = nbrs.kneighbors(query, return_distance=False) time_exact.append(time.time() - t0) t0 = time.time() approx_neighbors = lshf.kneighbors(query, return_distance=False) time_approx.append(time.time() - t0) accuracy.append(np.in1d(approx_neighbors, exact_neighbors).mean()) average_time_exact = np.mean(time_exact) average_time_approx = np.mean(time_approx) speedup = np.array(time_exact) / np.array(time_approx) average_speedup = np.mean(speedup) mean_accuracy = np.mean(accuracy) std_accuracy = np.std(accuracy) print("Index size: %d, exact: %0.3fs, LSHF: %0.3fs, speedup: %0.1f, " "accuracy: %0.2f +/-%0.2f" % (n_samples, average_time_exact, average_time_approx, average_speedup, mean_accuracy, std_accuracy)) accuracies.append(mean_accuracy) std_accuracies.append(std_accuracy) average_times_exact.append(average_time_exact) average_times_approx.append(average_time_approx) std_times_approx.append(np.std(time_approx)) average_speedups.append(average_speedup) std_speedups.append(np.std(speedup)) # Plot average query time against n_samples plt.figure() plt.errorbar(n_samples_values, average_times_approx, yerr=std_times_approx, fmt='o-', c='r', label='LSHForest') plt.plot(n_samples_values, average_times_exact, c='b', label="NearestNeighbors(algorithm='brute', metric='cosine')") plt.legend(loc='upper left', prop=dict(size='small')) plt.ylim(0, None) plt.ylabel("Average query time in seconds") plt.xlabel("n_samples") plt.grid(which='both') plt.title("Impact of index size on response time for first " "nearest neighbors queries") # Plot average query speedup versus index size plt.figure() plt.errorbar(n_samples_values, average_speedups, yerr=std_speedups, fmt='o-', c='r') plt.ylim(0, None) plt.ylabel("Average speedup") plt.xlabel("n_samples") plt.grid(which='both') plt.title("Speedup of the approximate NN queries vs brute force") # Plot average precision versus index size plt.figure() plt.errorbar(n_samples_values, accuracies, std_accuracies, fmt='o-', c='c') plt.ylim(0, 1.1) plt.ylabel("precision@10") plt.xlabel("n_samples") plt.grid(which='both') plt.title("precision of 10-nearest-neighbors queries with index size") plt.show()
bsd-3-clause
sdss/marvin
tests/tools/test_quantities.py
1
10522
#!/usr/bin/env python # -*- coding: utf-8 -*- # # @Author: Brian Cherinka, José Sánchez-Gallego, and Brett Andrews # @Date: 2018-07-20 # @Filename: test_quantities.py # @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause) # # @Last modified by: andrews # @Last modified time: 2018-10-19 14:10:15 import matplotlib import numpy import pytest from astropy import units as u from tests import marvin_test_if from marvin.tools.quantities import DataCube, Spectrum spaxel_unit = u.Unit('spaxel', represents=u.pixel, doc='A spectral pixel', parse_strict='silent') @pytest.fixture(scope='function') def datacube(): """Produces a simple 3D array for datacube testing.""" flux = numpy.tile([numpy.arange(1, 1001, dtype=numpy.float32)], (100, 1)).T.reshape(1000, 10, 10) ivar = (1. / (flux / 100))**2 mask = numpy.zeros(flux.shape, dtype=numpy.int) wave = numpy.arange(1, 1001) redcorr = numpy.ones(1000) * 1.5 mask[50:100, 5, 5] = 2**10 mask[500:600, 3, 3] = 2**4 scale = 1e-3 datacube = DataCube(flux, wave, ivar=ivar, mask=mask, redcorr=redcorr, scale=scale, unit=u.erg / u.s / (u.cm ** 2) / u.Angstrom / spaxel_unit, pixmask_flag='MANGA_DRP3PIXMASK') yield datacube @pytest.fixture(scope='function') def spectrum(): """Produces a simple 1D array for datacube testing.""" flux = numpy.arange(1, 1001, dtype=numpy.float32) ivar = (1. / (flux / 100))**2 mask = numpy.zeros(flux.shape, dtype=numpy.int) wave = numpy.arange(1, 1001) mask[50:100] = 2**10 mask[500:600] = 2**4 scale = 1e-3 datacube = Spectrum(flux, wave, ivar=ivar, mask=mask, scale=scale, unit=u.erg / u.s / (u.cm ** 2) / u.Angstrom / spaxel_unit, pixmask_flag='MANGA_DRP3PIXMASK') yield datacube class TestDataCube(object): def test_datacube(self, datacube): assert datacube.value is not None assert datacube.ivar is not None assert datacube.mask is not None numpy.testing.assert_array_equal(datacube.value.shape, datacube.ivar.shape) numpy.testing.assert_array_equal(datacube.value.shape, datacube.mask.shape) assert datacube.pixmask is not None def test_masked(self, datacube): assert isinstance(datacube.masked, numpy.ma.MaskedArray) assert numpy.sum(datacube.masked.mask) == 50 datacube.pixmask_flag = None assert numpy.sum(datacube.masked.mask) == 150 def test_snr(self, datacube): assert datacube.snr[100, 5, 5] == pytest.approx(100) def test_error(self, datacube): numpy.testing.assert_almost_equal(datacube.error.value, numpy.sqrt(1 / datacube.ivar)) assert datacube.error.unit == datacube.unit numpy.testing.assert_almost_equal(datacube.error.value, datacube.std.value) def test_descale(self, datacube): assert datacube.unit.scale == 1e-3 descaled = datacube.descale() datacube.unit.scale == 1 numpy.testing.assert_almost_equal(descaled.value, datacube.value * datacube.unit.scale) numpy.testing.assert_almost_equal(descaled.ivar, datacube.ivar / datacube.unit.scale**2) def test_redcorr(self, datacube): der = datacube.deredden() assert isinstance(der, DataCube) numpy.testing.assert_allclose(der.value, datacube.value * 1.5) numpy.testing.assert_allclose(der.ivar, datacube.ivar / 1.5**2) numpy.testing.assert_allclose(der.mask, datacube.mask) assert der.redcorr is None assert der.pixmask_flag == datacube.pixmask_flag new_redcorr = (numpy.ones(1000) * 2.) new_der = datacube.deredden(redcorr=new_redcorr) numpy.testing.assert_allclose(new_der.value, datacube.value * 2) numpy.testing.assert_allclose(new_der.ivar, datacube.ivar / 2**2) datacube.redcorr = None with pytest.raises(ValueError): datacube.deredden() def test_slice_datacube(self, datacube): new_datacube = datacube[:, 3:5, 3:5] assert isinstance(new_datacube, DataCube) numpy.testing.assert_almost_equal(new_datacube.value, datacube.value[:, 3:5, 3:5]) numpy.testing.assert_almost_equal(new_datacube.ivar, datacube.ivar[:, 3:5, 3:5]) numpy.testing.assert_almost_equal(new_datacube.mask, datacube.mask[:, 3:5, 3:5]) numpy.testing.assert_almost_equal(new_datacube.redcorr, datacube.redcorr) assert new_datacube.pixmask_flag == datacube.pixmask_flag def test_slice_wave(self, datacube): new_datacube = datacube[10:100] assert isinstance(new_datacube, DataCube) numpy.testing.assert_almost_equal(new_datacube.value, datacube.value[10:100, :, :]) numpy.testing.assert_almost_equal(new_datacube.ivar, datacube.ivar[10:100, :, :]) numpy.testing.assert_almost_equal(new_datacube.mask, datacube.mask[10:100, :, :]) numpy.testing.assert_almost_equal(new_datacube.redcorr, datacube.redcorr[10:100]) assert new_datacube.pixmask_flag == datacube.pixmask_flag def test_slice_spectrum(self, datacube): new_spectrum = datacube[:, 5, 5] assert isinstance(new_spectrum, Spectrum) numpy.testing.assert_almost_equal(new_spectrum.value, datacube.value[:, 5, 5]) numpy.testing.assert_almost_equal(new_spectrum.ivar, datacube.ivar[:, 5, 5]) numpy.testing.assert_almost_equal(new_spectrum.mask, datacube.mask[:, 5, 5]) assert new_spectrum.pixmask_flag == datacube.pixmask_flag @marvin_test_if(mark='include', cube={'plateifu': '8485-1901', 'data_origin': 'file', 'initial_mode': 'local'}) def test_cube_quantities(self, cube): assert cube.flux is not None assert isinstance(cube.flux, numpy.ndarray) assert isinstance(cube.flux, DataCube) assert isinstance(cube.spectral_resolution, Spectrum) if cube.release in ['MPL-4', 'MPL-5']: with pytest.raises(AssertionError) as ee: cube.spectral_resolution_prepixel assert 'spectral_resolution_prepixel is not present in his MPL version' in str(ee) else: assert isinstance(cube.spectral_resolution_prepixel, Spectrum) assert cube.flux.pixmask.values_to_bits(3) == [0, 1] assert cube.flux.pixmask.values_to_labels(3) == ['NOCOV', 'LOWCOV'] @pytest.mark.parametrize('names, expected', [(['NOCOV', 'LOWCOV'], 3), ('DONOTUSE', 1024)]) def test_labels_to_value(self, cube, names, expected): assert cube.flux.pixmask.labels_to_value(names) == expected @marvin_test_if(mark='include', modelcube={'plateifu': '8485-1901', 'data_origin': 'file', 'initial_mode': 'local'}) def test_modelcube_quantities(self, modelcube): for mc in modelcube.datamodel: if hasattr(modelcube, mc.name): modelcube_quantity = getattr(modelcube, mc.name) assert isinstance(modelcube_quantity, DataCube) assert modelcube_quantity.pixmask_flag == 'MANGA_DAPSPECMASK' class TestSpectrum(object): def test_spectrum(self, spectrum): assert spectrum.value is not None assert spectrum.ivar is not None assert spectrum.mask is not None numpy.testing.assert_array_equal(spectrum.value.shape, spectrum.ivar.shape) numpy.testing.assert_array_equal(spectrum.value.shape, spectrum.mask.shape) assert spectrum.pixmask is not None def test_masked(self, spectrum): assert isinstance(spectrum.masked, numpy.ma.MaskedArray) assert numpy.sum(spectrum.masked.mask) == 50 spectrum.pixmask_flag = None assert numpy.sum(spectrum.masked.mask) == 150 def test_snr(self, spectrum): assert spectrum.snr[100] == pytest.approx(100) def test_error(self, spectrum): numpy.testing.assert_almost_equal(spectrum.error.value, numpy.sqrt(1 / spectrum.ivar)) assert spectrum.error.unit == spectrum.unit numpy.testing.assert_almost_equal(spectrum.error.value, spectrum.std.value) def test_descale(self, spectrum): assert spectrum.unit.scale == 1e-3 descaled = spectrum.descale() spectrum.unit.scale == 1 numpy.testing.assert_almost_equal(descaled.value, spectrum.value * spectrum.unit.scale) numpy.testing.assert_almost_equal(descaled.ivar, spectrum.ivar / spectrum.unit.scale**2) def test_slice_spectrum(self, spectrum): new_spectrum = spectrum[10:100] assert isinstance(new_spectrum, Spectrum) numpy.testing.assert_almost_equal(new_spectrum.value, spectrum.value[10:100]) numpy.testing.assert_almost_equal(new_spectrum.ivar, spectrum.ivar[10:100]) numpy.testing.assert_almost_equal(new_spectrum.mask, spectrum.mask[10:100]) assert new_spectrum.pixmask_flag == spectrum.pixmask_flag @marvin_test_if(mark='include', cube={'plateifu': '8485-1901', 'data_origin': 'file', 'initial_mode': 'local'}) def test_cube_quantities(self, cube): for sp in cube.datamodel.spectra: cube_quantity = getattr(cube, sp.name) assert isinstance(cube_quantity, Spectrum) assert cube_quantity.pixmask_flag is None def test_plot(self, spectrum): ax = spectrum.plot(show_std=True) assert isinstance(ax, matplotlib.axes.Axes) def test_plot_no_std_no_mask(self): sp = Spectrum(numpy.random.randn(1000), wavelength=numpy.arange(1000)) sp.plot() def test_plot_no_std(self): mask = numpy.zeros(1000, dtype=numpy.int) mask[50:100] = 2**10 mask[500:600] = 2**4 sp = Spectrum( flux=numpy.random.randn(1000), wavelength=numpy.arange(1000), mask=mask, pixmask_flag='MANGA_DRP3PIXMASK', ) sp.plot() def test_plot_no_mask(self): flux = numpy.random.randn(1000) ivar = (1. / (flux / 100))**2 sp = Spectrum( flux=flux, wavelength=numpy.arange(1000), ivar=ivar, ) sp.plot()
bsd-3-clause
ishank08/scikit-learn
sklearn/cluster/tests/test_spectral.py
72
7950
"""Testing for Spectral Clustering methods""" from sklearn.externals.six.moves import cPickle dumps, loads = cPickle.dumps, cPickle.loads import numpy as np from scipy import sparse from sklearn.utils import check_random_state from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_warns_message from sklearn.cluster import SpectralClustering, spectral_clustering from sklearn.cluster.spectral import spectral_embedding from sklearn.cluster.spectral import discretize from sklearn.metrics import pairwise_distances from sklearn.metrics import adjusted_rand_score from sklearn.metrics.pairwise import kernel_metrics, rbf_kernel from sklearn.datasets.samples_generator import make_blobs def test_spectral_clustering(): S = np.array([[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0], [1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0], [1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0], [0.2, 0.2, 0.2, 1.0, 1.0, 1.0, 1.0], [0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0], [0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0], [0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]]) for eigen_solver in ('arpack', 'lobpcg'): for assign_labels in ('kmeans', 'discretize'): for mat in (S, sparse.csr_matrix(S)): model = SpectralClustering(random_state=0, n_clusters=2, affinity='precomputed', eigen_solver=eigen_solver, assign_labels=assign_labels ).fit(mat) labels = model.labels_ if labels[0] == 0: labels = 1 - labels assert_array_equal(labels, [1, 1, 1, 0, 0, 0, 0]) model_copy = loads(dumps(model)) assert_equal(model_copy.n_clusters, model.n_clusters) assert_equal(model_copy.eigen_solver, model.eigen_solver) assert_array_equal(model_copy.labels_, model.labels_) def test_spectral_amg_mode(): # Test the amg mode of SpectralClustering centers = np.array([ [0., 0., 0.], [10., 10., 10.], [20., 20., 20.], ]) X, true_labels = make_blobs(n_samples=100, centers=centers, cluster_std=1., random_state=42) D = pairwise_distances(X) # Distance matrix S = np.max(D) - D # Similarity matrix S = sparse.coo_matrix(S) try: from pyamg import smoothed_aggregation_solver amg_loaded = True except ImportError: amg_loaded = False if amg_loaded: labels = spectral_clustering(S, n_clusters=len(centers), random_state=0, eigen_solver="amg") # We don't care too much that it's good, just that it *worked*. # There does have to be some lower limit on the performance though. assert_greater(np.mean(labels == true_labels), .3) else: assert_raises(ValueError, spectral_embedding, S, n_components=len(centers), random_state=0, eigen_solver="amg") def test_spectral_unknown_mode(): # Test that SpectralClustering fails with an unknown mode set. centers = np.array([ [0., 0., 0.], [10., 10., 10.], [20., 20., 20.], ]) X, true_labels = make_blobs(n_samples=100, centers=centers, cluster_std=1., random_state=42) D = pairwise_distances(X) # Distance matrix S = np.max(D) - D # Similarity matrix S = sparse.coo_matrix(S) assert_raises(ValueError, spectral_clustering, S, n_clusters=2, random_state=0, eigen_solver="<unknown>") def test_spectral_unknown_assign_labels(): # Test that SpectralClustering fails with an unknown assign_labels set. centers = np.array([ [0., 0., 0.], [10., 10., 10.], [20., 20., 20.], ]) X, true_labels = make_blobs(n_samples=100, centers=centers, cluster_std=1., random_state=42) D = pairwise_distances(X) # Distance matrix S = np.max(D) - D # Similarity matrix S = sparse.coo_matrix(S) assert_raises(ValueError, spectral_clustering, S, n_clusters=2, random_state=0, assign_labels="<unknown>") def test_spectral_clustering_sparse(): X, y = make_blobs(n_samples=20, random_state=0, centers=[[1, 1], [-1, -1]], cluster_std=0.01) S = rbf_kernel(X, gamma=1) S = np.maximum(S - 1e-4, 0) S = sparse.coo_matrix(S) labels = SpectralClustering(random_state=0, n_clusters=2, affinity='precomputed').fit(S).labels_ assert_equal(adjusted_rand_score(y, labels), 1) def test_affinities(): # Note: in the following, random_state has been selected to have # a dataset that yields a stable eigen decomposition both when built # on OSX and Linux X, y = make_blobs(n_samples=20, random_state=0, centers=[[1, 1], [-1, -1]], cluster_std=0.01 ) # nearest neighbors affinity sp = SpectralClustering(n_clusters=2, affinity='nearest_neighbors', random_state=0) assert_warns_message(UserWarning, 'not fully connected', sp.fit, X) assert_equal(adjusted_rand_score(y, sp.labels_), 1) sp = SpectralClustering(n_clusters=2, gamma=2, random_state=0) labels = sp.fit(X).labels_ assert_equal(adjusted_rand_score(y, labels), 1) X = check_random_state(10).rand(10, 5) * 10 kernels_available = kernel_metrics() for kern in kernels_available: # Additive chi^2 gives a negative similarity matrix which # doesn't make sense for spectral clustering if kern != 'additive_chi2': sp = SpectralClustering(n_clusters=2, affinity=kern, random_state=0) labels = sp.fit(X).labels_ assert_equal((X.shape[0],), labels.shape) sp = SpectralClustering(n_clusters=2, affinity=lambda x, y: 1, random_state=0) labels = sp.fit(X).labels_ assert_equal((X.shape[0],), labels.shape) def histogram(x, y, **kwargs): # Histogram kernel implemented as a callable. assert_equal(kwargs, {}) # no kernel_params that we didn't ask for return np.minimum(x, y).sum() sp = SpectralClustering(n_clusters=2, affinity=histogram, random_state=0) labels = sp.fit(X).labels_ assert_equal((X.shape[0],), labels.shape) # raise error on unknown affinity sp = SpectralClustering(n_clusters=2, affinity='<unknown>') assert_raises(ValueError, sp.fit, X) def test_discretize(seed=8): # Test the discretize using a noise assignment matrix random_state = np.random.RandomState(seed) for n_samples in [50, 100, 150, 500]: for n_class in range(2, 10): # random class labels y_true = random_state.randint(0, n_class + 1, n_samples) y_true = np.array(y_true, np.float) # noise class assignment matrix y_indicator = sparse.coo_matrix((np.ones(n_samples), (np.arange(n_samples), y_true)), shape=(n_samples, n_class + 1)) y_true_noisy = (y_indicator.toarray() + 0.1 * random_state.randn(n_samples, n_class + 1)) y_pred = discretize(y_true_noisy, random_state) assert_greater(adjusted_rand_score(y_true, y_pred), 0.8)
bsd-3-clause
CrazyGuo/bokeh
examples/app/stock_applet/stock_app.py
42
7786
""" This file demonstrates a bokeh applet, which can either be viewed directly on a bokeh-server, or embedded into a flask application. See the README.md file in this directory for instructions on running. """ import logging logging.basicConfig(level=logging.DEBUG) from os import listdir from os.path import dirname, join, splitext import numpy as np import pandas as pd from bokeh.models import ColumnDataSource, Plot from bokeh.plotting import figure, curdoc from bokeh.properties import String, Instance from bokeh.server.app import bokeh_app from bokeh.server.utils.plugins import object_page from bokeh.models.widgets import HBox, VBox, VBoxForm, PreText, Select # build up list of stock data in the daily folder data_dir = join(dirname(__file__), "daily") try: tickers = listdir(data_dir) except OSError as e: print('Stock data not available, see README for download instructions.') raise e tickers = [splitext(x)[0].split("table_")[-1] for x in tickers] # cache stock data as dict of pandas DataFrames pd_cache = {} def get_ticker_data(ticker): fname = join(data_dir, "table_%s.csv" % ticker.lower()) data = pd.read_csv( fname, names=['date', 'foo', 'o', 'h', 'l', 'c', 'v'], header=False, parse_dates=['date'] ) data = data.set_index('date') data = pd.DataFrame({ticker: data.c, ticker + "_returns": data.c.diff()}) return data def get_data(ticker1, ticker2): if pd_cache.get((ticker1, ticker2)) is not None: return pd_cache.get((ticker1, ticker2)) # only append columns if it is the same ticker if ticker1 != ticker2: data1 = get_ticker_data(ticker1) data2 = get_ticker_data(ticker2) data = pd.concat([data1, data2], axis=1) else: data = get_ticker_data(ticker1) data = data.dropna() pd_cache[(ticker1, ticker2)] = data return data class StockApp(VBox): extra_generated_classes = [["StockApp", "StockApp", "VBox"]] jsmodel = "VBox" # text statistics pretext = Instance(PreText) # plots plot = Instance(Plot) line_plot1 = Instance(Plot) line_plot2 = Instance(Plot) hist1 = Instance(Plot) hist2 = Instance(Plot) # data source source = Instance(ColumnDataSource) # layout boxes mainrow = Instance(HBox) histrow = Instance(HBox) statsbox = Instance(VBox) # inputs ticker1 = String(default="AAPL") ticker2 = String(default="GOOG") ticker1_select = Instance(Select) ticker2_select = Instance(Select) input_box = Instance(VBoxForm) def __init__(self, *args, **kwargs): super(StockApp, self).__init__(*args, **kwargs) self._dfs = {} @classmethod def create(cls): """ This function is called once, and is responsible for creating all objects (plots, datasources, etc) """ # create layout widgets obj = cls() obj.mainrow = HBox() obj.histrow = HBox() obj.statsbox = VBox() obj.input_box = VBoxForm() # create input widgets obj.make_inputs() # outputs obj.pretext = PreText(text="", width=500) obj.make_source() obj.make_plots() obj.make_stats() # layout obj.set_children() return obj def make_inputs(self): self.ticker1_select = Select( name='ticker1', value='AAPL', options=['AAPL', 'GOOG', 'INTC', 'BRCM', 'YHOO'] ) self.ticker2_select = Select( name='ticker2', value='GOOG', options=['AAPL', 'GOOG', 'INTC', 'BRCM', 'YHOO'] ) @property def selected_df(self): pandas_df = self.df selected = self.source.selected['1d']['indices'] if selected: pandas_df = pandas_df.iloc[selected, :] return pandas_df def make_source(self): self.source = ColumnDataSource(data=self.df) def line_plot(self, ticker, x_range=None): p = figure( title=ticker, x_range=x_range, x_axis_type='datetime', plot_width=1000, plot_height=200, title_text_font_size="10pt", tools="pan,wheel_zoom,box_select,reset" ) p.circle( 'date', ticker, size=2, source=self.source, nonselection_alpha=0.02 ) return p def hist_plot(self, ticker): global_hist, global_bins = np.histogram(self.df[ticker + "_returns"], bins=50) hist, bins = np.histogram(self.selected_df[ticker + "_returns"], bins=50) width = 0.7 * (bins[1] - bins[0]) center = (bins[:-1] + bins[1:]) / 2 start = global_bins.min() end = global_bins.max() top = hist.max() p = figure( title="%s hist" % ticker, plot_width=500, plot_height=200, tools="", title_text_font_size="10pt", x_range=[start, end], y_range=[0, top], ) p.rect(center, hist / 2.0, width, hist) return p def make_plots(self): ticker1 = self.ticker1 ticker2 = self.ticker2 p = figure( title="%s vs %s" % (ticker1, ticker2), plot_width=400, plot_height=400, tools="pan,wheel_zoom,box_select,reset", title_text_font_size="10pt", ) p.circle(ticker1 + "_returns", ticker2 + "_returns", size=2, nonselection_alpha=0.02, source=self.source ) self.plot = p self.line_plot1 = self.line_plot(ticker1) self.line_plot2 = self.line_plot(ticker2, self.line_plot1.x_range) self.hist_plots() def hist_plots(self): ticker1 = self.ticker1 ticker2 = self.ticker2 self.hist1 = self.hist_plot(ticker1) self.hist2 = self.hist_plot(ticker2) def set_children(self): self.children = [self.mainrow, self.histrow, self.line_plot1, self.line_plot2] self.mainrow.children = [self.input_box, self.plot, self.statsbox] self.input_box.children = [self.ticker1_select, self.ticker2_select] self.histrow.children = [self.hist1, self.hist2] self.statsbox.children = [self.pretext] def input_change(self, obj, attrname, old, new): if obj == self.ticker2_select: self.ticker2 = new if obj == self.ticker1_select: self.ticker1 = new self.make_source() self.make_plots() self.set_children() curdoc().add(self) def setup_events(self): super(StockApp, self).setup_events() if self.source: self.source.on_change('selected', self, 'selection_change') if self.ticker1_select: self.ticker1_select.on_change('value', self, 'input_change') if self.ticker2_select: self.ticker2_select.on_change('value', self, 'input_change') def make_stats(self): stats = self.selected_df.describe() self.pretext.text = str(stats) def selection_change(self, obj, attrname, old, new): self.make_stats() self.hist_plots() self.set_children() curdoc().add(self) @property def df(self): return get_data(self.ticker1, self.ticker2) # The following code adds a "/bokeh/stocks/" url to the bokeh-server. This URL # will render this StockApp. If you don't want serve this applet from a Bokeh # server (for instance if you are embedding in a separate Flask application), # then just remove this block of code. @bokeh_app.route("/bokeh/stocks/") @object_page("stocks") def make_stocks(): app = StockApp.create() return app
bsd-3-clause
fw1121/galaxy_tools
inchlib_clust/inchlib_clust.py
8
24156
#coding: utf-8 from __future__ import print_function import csv, json, copy, re, argparse, os, urllib2 import numpy, scipy, fastcluster, sklearn import scipy.cluster.hierarchy as hcluster from sklearn import preprocessing from scipy import spatial LINKAGES = ["single", "complete", "average", "centroid", "ward", "median", "weighted"] RAW_LINKAGES = ["ward", "centroid"] DISTANCES = {"numeric": ["braycurtis", "canberra", "chebyshev", "cityblock", "correlation", "cosine", "euclidean", "mahalanobis", "minkowski", "seuclidean", "sqeuclidean"], "binary": ["dice","hamming","jaccard","kulsinski","matching","rogerstanimoto","russellrao","sokalmichener","sokalsneath","yule"]} class Dendrogram(): """Class which handles the generation of cluster heatmap format of clustered data. As an input it takes a Cluster instance with clustered data.""" def __init__(self, clustering): self.cluster_object = clustering self.data_type = clustering.data_type self.axis = clustering.clustering_axis self.clustering = clustering.clustering self.tree = hcluster.to_tree(self.clustering) self.data = clustering.data self.data_names = clustering.data_names self.header = clustering.header self.dendrogram = False def __get_cluster_heatmap__(self, write_data): root, nodes = hcluster.to_tree(self.clustering, rd=True) node_id2node = {} dendrogram = {"nodes":{}} for node in nodes: node_id = node.id if node.count == 1: node_id2node[node_id] = {"count":1, "distance":0} else: node_left_child = node.get_left().id node_right_child = node.get_right().id node_id2node[node_id] = {"count":node.count, "distance":round(node.dist, 3), "left_child": node_left_child, "right_child": node_right_child} for n in node_id2node: node = node_id2node[n] if node["count"] != 1: node_id2node[node["left_child"]]["parent"] = n node_id2node[node["right_child"]]["parent"] = n for n in node_id2node: node = node_id2node[n] if node["count"] == 1: data = self.data[n] node["objects"] = [self.data_names[n]] if node_id2node[node["parent"]]["left_child"] == n: node_id2node[node["parent"]]["left_child"] = n else: node_id2node[node["parent"]]["right_child"] = n if not write_data: data = [] node["features"] = data dendrogram["nodes"][n] = node for n in node_id2node: if node_id2node[n]["count"] != 1: dendrogram["nodes"][n] = node_id2node[n] return dendrogram def __get_column_dendrogram__(self): root, nodes = hcluster.to_tree(self.cluster_object.column_clustering, rd=True) node_id2node = {} dendrogram = {"nodes":{}} for node in nodes: node_id = node.id if node.count == 1: node_id2node[node_id] = {"count":1, "distance":0} else: node_left_child = node.get_left().id node_right_child = node.get_right().id node_id2node[node_id] = {"count":node.count, "distance":round(node.dist, 3), "left_child": node_left_child, "right_child": node_right_child} for n in node_id2node: node = node_id2node[n] if node["count"] != 1: node_id2node[node["left_child"]]["parent"] = n node_id2node[node["right_child"]]["parent"] = n for n in node_id2node: if not n in dendrogram["nodes"]: dendrogram["nodes"][n] = node_id2node[n] return dendrogram def create_cluster_heatmap(self, compress=False, compressed_value="median", write_data=True): """Creates cluster heatmap representation in inchlib format. By setting compress parameter to True you can cut the dendrogram in a distance to decrease the row size of the heatmap to specified count. When compressing the type of the resulted value of merged rows is given by the compressed_value parameter (median, mean). When the metadata are nominal (text values) the most frequent is the result after compression. By setting write_data to False the data features won't be present in the resulting format.""" self.dendrogram = {"data": self.__get_cluster_heatmap__(write_data)} self.compress = compress self.compressed_value = compressed_value self.compress_cluster_treshold = 0 if self.compress and self.compress >= 0: self.compress_cluster_treshold = self.__get_distance_treshold__(compress) print("Distance treshold for compression:", self.compress_cluster_treshold) if self.compress_cluster_treshold >= 0: self.__compress_data__() else: self.compress = False if self.header and write_data: self.dendrogram["data"]["feature_names"] = [h for h in self.header] elif self.header and not write_data: self.dendrogram["data"]["feature_names"] = [] if self.axis == "both" and len(self.cluster_object.column_clustering): column_dendrogram = hcluster.to_tree(self.cluster_object.column_clustering) self.dendrogram["column_dendrogram"] = self.__get_column_dendrogram__() return def __compress_data__(self): nodes = {} to_remove = set() compressed_value2fnc = { "median": lambda values: [round(numpy.median(value), 3) for value in values], "mean": lambda values: [round(numpy.average(value), 3) for value in values], } for n in self.dendrogram["data"]["nodes"]: node = self.dendrogram["data"]["nodes"][n] if node["count"] == 1: objects = node["objects"] data = node["features"] node_id = n while self.dendrogram["data"]["nodes"][node["parent"]]["distance"] <= self.compress_cluster_treshold: to_remove.add(node_id) node_id = node["parent"] node = self.dendrogram["data"]["nodes"][node_id] if node["count"] != 1: if not "objects" in self.dendrogram["data"]["nodes"][node_id]: self.dendrogram["data"]["nodes"][node_id]["objects"] = [] self.dendrogram["data"]["nodes"][node_id]["features"] = [] self.dendrogram["data"]["nodes"][node_id]["objects"].extend(objects) if data: self.dendrogram["data"]["nodes"][node_id]["features"].append(data) for node in to_remove: self.dendrogram["data"]["nodes"].pop(node) for k in self.dendrogram["data"]["nodes"]: node = self.dendrogram["data"]["nodes"][k] if "objects" in node and node["count"] != 1: self.dendrogram["data"]["nodes"][k]["distance"] = 0 self.dendrogram["data"]["nodes"][k]["count"] = 1 self.dendrogram["data"]["nodes"][k].pop("left_child") self.dendrogram["data"]["nodes"][k].pop("right_child") rows = zip(*self.dendrogram["data"]["nodes"][k]["features"]) self.dendrogram["data"]["nodes"][k]["features"] = compressed_value2fnc[self.compressed_value](rows) self.__adjust_node_counts__() return def __adjust_node_counts__(self): leaves = [] for n in self.dendrogram["data"]["nodes"]: if self.dendrogram["data"]["nodes"][n]["count"] > 1: self.dendrogram["data"]["nodes"][n]["count"] = 0 else: leaves.append(n) for n in leaves: node = self.dendrogram["data"]["nodes"][n] parent_id = node["parent"] while parent_id: node = self.dendrogram["data"]["nodes"][parent_id] self.dendrogram["data"]["nodes"][parent_id]["count"] += 1 parent_id = False if "parent" in node: parent_id = node["parent"] return def __get_distance_treshold__(self, cluster_count): print("Calculating distance treshold for cluster compression...") if cluster_count >= self.tree.count: return -1 i = 0 count = cluster_count + 1 test_step = self.tree.dist/2 while test_step >= 0.1: count = len(set([c for c in hcluster.fcluster(self.clustering, i, "distance")])) if count < cluster_count: if i == 0: return 0 i = i - test_step test_step = test_step/2 elif count == cluster_count: return i else: i += test_step return i+test_step*2 def export_cluster_heatmap_as_json(self, filename=None): """Returns cluster heatmap in a JSON format or exports it to the file specified by the filename parameter.""" dendrogram_json = json.dumps(self.dendrogram, indent=4) if filename: output = open(filename, "w") output.write(dendrogram_json) return dendrogram_json def export_cluster_heatmap_as_html(self, htmldir="."): """Export simple HTML page with embedded cluster heatmap and dependencies to given directory.""" if not os.path.exists(htmldir): os.makedirs(htmldir) dendrogram_json = json.dumps(self.dendrogram, indent=4) template = """<html> <head> <script src="jquery-2.0.3.min.js"></script> <script src="kinetic-v5.0.0.min.js"></script> <script src="inchlib-1.0.1.min.js"></script> <script> $(document).ready(function() {{ var data = {}; var inchlib = new InCHlib({{ target: "inchlib", max_height: 1200, width: 1000, }}); inchlib.read_data(data); inchlib.draw(); }}); </script> </head> <body> <div id="inchlib"></div> </body> </html>""".format(dendrogram_json) lib2url = {"inchlib-1.0.1.min.js": "http://openscreen.cz/software/inchlib/static/js/inchlib-1.0.1.min.js", "jquery-2.0.3.min.js": "http://openscreen.cz/software/inchlib/static/js/jquery-2.0.3.min.js", "kinetic-v5.0.0.min.js": "http://openscreen.cz/software/inchlib/static/js/kinetic-v5.0.0.min.js"} for lib, url in lib2url.items(): try: source = urllib2.urlopen(url) source_html = source.read() with open(os.path.join(htmldir, lib), "w") as output: output.write(source_html) except urllib2.URLError, e: raise Exception("\nCan't download file {}.\nPlease check your internet connection and try again.\nIf the error persists there can be something wrong with the InCHlib server.\n".format(url)) with open(os.path.join(htmdlir, "inchlib.html"), "w") as output: output.write(template) return def add_metadata_from_file(self, metadata_file, delimiter, header=True, metadata_compressed_value="median"): """Adds metadata from csv file. Metadata_compressed_value specifies the resulted value when the data are compressed (median/mean/frequency)""" self.metadata_compressed_value = metadata_compressed_value self.metadata, self.metadata_header = self.__read_metadata_file__(metadata_file, delimiter, header) self.__connect_metadata_to_data__() return def add_metadata(self, metadata, header=True, metadata_compressed_value="median"): """Adds metadata in a form of list of lists (tuples). Metadata_compressed_value specifies the resulted value when the data are compressed (median/mean/frequency)""" self.metadata_compressed_value = metadata_compressed_value self.metadata, self.metadata_header = self.__read_metadata__(metadata, header) self.__connect_metadata_to_data__() return def __connect_metadata_to_data__(self): if len(set(self.metadata.keys()) & set(self.data_names)) == 0: raise Exception("Metadata objects must correspond with original data objects.") if not self.dendrogram: raise Exception("You must create dendrogram before adding metadata.") self.dendrogram["metadata"] = {"nodes":{}} if self.metadata_header: self.dendrogram["metadata"]["feature_names"] = self.metadata_header leaves = {n:node for n, node in self.dendrogram["data"]["nodes"].items() if node["count"] == 1} if not self.compress: for leaf_id, leaf in leaves.items(): try: self.dendrogram["metadata"]["nodes"][leaf_id] = self.metadata[leaf["objects"][0]] except Exception, e: continue else: compressed_value2fnc = { "median": lambda values: round(numpy.median(col), 3), "mean": lambda values: round(numpy.average(col), 3) } for leaf in leaves: objects = [] for item in leaves[leaf]["objects"]: try: objects.append(self.metadata[item]) except Exception, e: continue cols = zip(*objects) row = [] cols = [list(c) for c in cols] for col in cols: if self.metadata_compressed_value in compressed_value2fnc: try: col = [float(c) for c in col] value = compressed_value2fnc[self.metadata_compressed_value](col) except ValueError: freq2val = {col.count(v):v for v in set(col)} value = freq2val[max(freq2val.keys())] elif self.metadata_compressed_value == "frequency": freq2val = {col.count(v):v for v in set(col)} value = freq2val[max(freq2val.keys())] else: raise Exception("Unkown type of metadata_compressed_value: {}. Possible values are: median, mean, frequency.".format(self.metadata_compressed_value)) row.append(value) self.dendrogram["metadata"]["nodes"][leaf] = row return def __read_metadata__(self, metadata, header): metadata_header = [] rows = metadata metadata = {} data_start = 0 if header: metadata_header = rows[0][1:] data_start = 1 for row in rows[data_start:]: metadata[str(row[0])] = [r for r in row[1:]] return metadata, metadata_header def __read_metadata_file__(self, metadata_file, delimiter, header): csv_reader = csv.reader(open(metadata_file, "r"), delimiter=delimiter) metadata_header = [] rows = [row for row in csv_reader] metadata = {} data_start = 0 if header: metadata_header = rows[0][1:] data_start = 1 for row in rows[data_start:]: metadata_id = str(row[0]) metadata[metadata_id] = [r for r in row[1:]] return metadata, metadata_header class Cluster(): """Class for data clustering""" def __init__(self): self.write_original = False def read_csv(self, filename, delimiter=",", header=False): """Reads data from the CSV file""" self.filename = filename csv_reader = csv.reader(open(self.filename, "r"), delimiter=delimiter) rows = [row for row in csv_reader] self.read_data(rows, header) def read_data(self, rows, header=False): """Reads data in a form of list of lists (tuples)""" self.header = header data_start = 0 if self.header: self.header = rows[0][1:] data_start = 1 self.data_names = [str(row[0]) for row in rows[data_start:]] self.data = [[round(float(value), 3) for value in row[1:]] for row in rows[data_start:]] self.original_data = copy.deepcopy(self.data) return def normalize_data(self, feature_range=(0,1), write_original=False): """Normalizes data to a scale from 0 to 1. When write_original is set to True, the normalized data will be clustered, but original data will be written to the heatmap.""" self.write_original = write_original min_max_scaler = preprocessing.MinMaxScaler(feature_range) self.data = min_max_scaler.fit_transform(self.data) self.data = [[round(v, 3) for v in row] for row in self.data] return def cluster_data(self, data_type="numeric", row_distance="euclidean", row_linkage="single", axis="row", column_distance="euclidean", column_linkage="ward"): """Performs clustering according to the given parameters. @data_type - numeric/binary @row_distance/column_distance - see. DISTANCES variable @row_linkage/column_linkage - see. LINKAGES variable @axis - row/both """ print("Clustering rows:", row_distance, row_linkage) self.data_type = data_type self.clustering_axis = axis row_linkage = str(row_linkage) if row_linkage in RAW_LINKAGES: self.clustering = fastcluster.linkage(self.data, method=row_linkage, metric=row_distance) else: self.distance_vector = fastcluster.pdist(self.data, row_distance) if data_type in DISTANCES and not row_distance in DISTANCES[data_type]: raise Exception("".join(["When clustering" , data_type, "data you must choose from these distance measures: ", ", ".join(DISTANCES[data_type])])) elif not data_type in DISTANCES.keys(): raise Exception("".join(["You can choose only from data types: ", ", ".join(DISTANCES.keys())])) self.clustering = fastcluster.linkage(self.distance_vector, method=str(row_linkage)) self.column_clustering = [] if axis == "both" and len(self.data[0]) > 2: print("Clustering columns:", column_distance, column_linkage) self.__cluster_columns__(column_distance, column_linkage) if self.write_original: self.data = self.original_data return def __cluster_columns__(self, column_distance, column_linkage): columns = zip(*self.data) self.column_clustering = fastcluster.linkage(columns, method=column_linkage, metric=column_distance) self.data_order = hcluster.leaves_list(self.column_clustering) self.data = self.__reorder_data__(self.data, self.data_order) self.original_data = self.__reorder_data__(self.original_data, self.data_order) if self.header: self.header = self.__reorder_data__([self.header], self.data_order)[0] return def __reorder_data__(self, data, order): for i in xrange(len(data)): reordered_data = [] for j in order: reordered_data.append(data[i][j]) reordered_data.reverse() data[i] = reordered_data return data def _process_(arguments): c = Cluster() c.read_csv(arguments.data_file, arguments.data_delimiter, arguments.data_header) if arguments.normalize: c.normalize_data(feature_range=(0,1), write_original=arguments.write_original) c.cluster_data(data_type=arguments.datatype, row_distance=arguments.row_distance, row_linkage=arguments.row_linkage, axis=arguments.axis, column_distance=arguments.column_distance, column_linkage=arguments.column_linkage) d = Dendrogram(c) d.create_cluster_heatmap(compress=arguments.compress, compressed_value=arguments.compressed_value, write_data=not arguments.dont_write_data) if arguments.metadata: d.add_metadata_from_file(metadata_file=arguments.metadata, delimiter=arguments.metadata_delimiter, header=arguments.metadata_header, metadata_compressed_value=arguments.metadata_compressed_value) if arguments.output_file or arguments.html_dir: if arguments.output_file: d.export_cluster_heatmap_as_json(arguments.output_file) else: d.export_cluster_heatmap_as_html(arguments.html_dir) else: print(json.dumps(d.dendrogram, indent=4)) if __name__ == '__main__': parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("data_file", type=str, help="csv(text) data file with delimited values") parser.add_argument("-o", "--output_file", type=str, help="the name of output file") parser.add_argument("-html", "--html_dir", type=str, help="the directory to store HTML page with dependencies") parser.add_argument("-rd", "--row_distance", type=str, default="euclidean", help="set the distance to use for clustering rows") parser.add_argument("-rl", "--row_linkage", type=str, default="ward", help="set the linkage to use for clustering rows") parser.add_argument("-cd", "--column_distance", type=str, default="euclidean", help="set the distance to use for clustering columns (only when clustering by both axis -a parameter)") parser.add_argument("-cl", "--column_linkage", type=str, default="ward", help="set the linkage to use for clustering columns (only when clustering by both axis -a parameter)") parser.add_argument("-a", "--axis", type=str, default="row", help="define clustering axis (row/both)") parser.add_argument("-dt", "--datatype", type=str, default="numeric", help="specify the type of the data (numeric/binary)") parser.add_argument("-dd", "--data_delimiter", type=str, default=",", help="delimiter of values in data file") parser.add_argument("-m", "--metadata", type=str, default=None, help="csv(text) metadata file with delimited values") parser.add_argument("-md", "--metadata_delimiter", type=str, default=",", help="delimiter of values in metadata file") parser.add_argument("-dh", "--data_header", default=False, help="whether the first row of data file is a header", action="store_true") parser.add_argument("-mh", "--metadata_header", default=False, help="whether the first row of metadata file is a header", action="store_true") parser.add_argument("-c", "--compress", type=int, default=0, help="compress the data to contain maximum of specified count of rows") parser.add_argument("-cv", "--compressed_value", type=str, default="median", help="the resulted value from merged rows when the data are compressed (median/mean/frequency)") parser.add_argument("-mcv", "--metadata_compressed_value", type=str, default="median", help="the resulted value from merged rows of metadata when the data are compressed (median/mean/count)") parser.add_argument("-dwd", "--dont_write_data", default=False, help="don't write clustered data to the inchlib data format", action="store_true") parser.add_argument("-n", "--normalize", default=False, help="normalize data to [0, 1] range", action="store_true") parser.add_argument("-wo", "--write_original", default=False, help="cluster normalized data but write the original ones to the heatmap", action="store_true") args = parser.parse_args() _process_(args)
mit
louispotok/pandas
pandas/core/sparse/scipy_sparse.py
12
5673
""" Interaction with scipy.sparse matrices. Currently only includes SparseSeries.to_coo helpers. """ from pandas.core.index import MultiIndex, Index from pandas.core.series import Series from pandas.compat import OrderedDict, lmap def _check_is_partition(parts, whole): whole = set(whole) parts = [set(x) for x in parts] if set.intersection(*parts) != set(): raise ValueError( 'Is not a partition because intersection is not null.') if set.union(*parts) != whole: raise ValueError('Is not a partition because union is not the whole.') def _to_ijv(ss, row_levels=(0, ), column_levels=(1, ), sort_labels=False): """ For arbitrary (MultiIndexed) SparseSeries return (v, i, j, ilabels, jlabels) where (v, (i, j)) is suitable for passing to scipy.sparse.coo constructor. """ # index and column levels must be a partition of the index _check_is_partition([row_levels, column_levels], range(ss.index.nlevels)) # from the SparseSeries: get the labels and data for non-null entries values = ss._data.internal_values()._valid_sp_values nonnull_labels = ss.dropna() def get_indexers(levels): """ Return sparse coords and dense labels for subset levels """ # TODO: how to do this better? cleanly slice nonnull_labels given the # coord values_ilabels = [tuple(x[i] for i in levels) for x in nonnull_labels.index] if len(levels) == 1: values_ilabels = [x[0] for x in values_ilabels] # # performance issues with groupby ################################### # TODO: these two lines can rejplace the code below but # groupby is too slow (in some cases at least) # labels_to_i = ss.groupby(level=levels, sort=sort_labels).first() # labels_to_i[:] = np.arange(labels_to_i.shape[0]) def _get_label_to_i_dict(labels, sort_labels=False): """ Return OrderedDict of unique labels to number. Optionally sort by label. """ labels = Index(lmap(tuple, labels)).unique().tolist() # squish if sort_labels: labels = sorted(list(labels)) d = OrderedDict((k, i) for i, k in enumerate(labels)) return (d) def _get_index_subset_to_coord_dict(index, subset, sort_labels=False): def robust_get_level_values(i): # if index has labels (that are not None) use those, # else use the level location try: return index.get_level_values(index.names[i]) except KeyError: return index.get_level_values(i) ilabels = list(zip(*[robust_get_level_values(i) for i in subset])) labels_to_i = _get_label_to_i_dict(ilabels, sort_labels=sort_labels) labels_to_i = Series(labels_to_i) if len(subset) > 1: labels_to_i.index = MultiIndex.from_tuples(labels_to_i.index) labels_to_i.index.names = [index.names[i] for i in subset] else: labels_to_i.index = Index(x[0] for x in labels_to_i.index) labels_to_i.index.name = index.names[subset[0]] labels_to_i.name = 'value' return (labels_to_i) labels_to_i = _get_index_subset_to_coord_dict(ss.index, levels, sort_labels=sort_labels) # ##################################################################### # ##################################################################### i_coord = labels_to_i[values_ilabels].tolist() i_labels = labels_to_i.index.tolist() return i_coord, i_labels i_coord, i_labels = get_indexers(row_levels) j_coord, j_labels = get_indexers(column_levels) return values, i_coord, j_coord, i_labels, j_labels def _sparse_series_to_coo(ss, row_levels=(0, ), column_levels=(1, ), sort_labels=False): """ Convert a SparseSeries to a scipy.sparse.coo_matrix using index levels row_levels, column_levels as the row and column labels respectively. Returns the sparse_matrix, row and column labels. """ import scipy.sparse if ss.index.nlevels < 2: raise ValueError('to_coo requires MultiIndex with nlevels > 2') if not ss.index.is_unique: raise ValueError('Duplicate index entries are not allowed in to_coo ' 'transformation.') # to keep things simple, only rely on integer indexing (not labels) row_levels = [ss.index._get_level_number(x) for x in row_levels] column_levels = [ss.index._get_level_number(x) for x in column_levels] v, i, j, rows, columns = _to_ijv(ss, row_levels=row_levels, column_levels=column_levels, sort_labels=sort_labels) sparse_matrix = scipy.sparse.coo_matrix( (v, (i, j)), shape=(len(rows), len(columns))) return sparse_matrix, rows, columns def _coo_to_sparse_series(A, dense_index=False): """ Convert a scipy.sparse.coo_matrix to a SparseSeries. Use the defaults given in the SparseSeries constructor. """ s = Series(A.data, MultiIndex.from_arrays((A.row, A.col))) s = s.sort_index() s = s.to_sparse() # TODO: specify kind? if dense_index: # is there a better constructor method to use here? i = range(A.shape[0]) j = range(A.shape[1]) ind = MultiIndex.from_product([i, j]) s = s.reindex(ind) return s
bsd-3-clause
mitdbg/aurum-datadiscovery
networkbuildercoordinator.py
1
5695
from modelstore.elasticstore import StoreHandler from knowledgerepr import fieldnetwork from knowledgerepr import networkbuilder from knowledgerepr.fieldnetwork import FieldNetwork from inputoutput import inputoutput as io import sys import time def main(output_path=None): start_all = time.time() network = FieldNetwork() store = StoreHandler() # Get all fields from store fields_gen = store.get_all_fields() # Network skeleton and hierarchical relations (table - field), etc start_schema = time.time() network.init_meta_schema(fields_gen) end_schema = time.time() print("Total skeleton: {0}".format(str(end_schema - start_schema))) print("!!1 " + str(end_schema - start_schema)) # Schema_sim relation start_schema_sim = time.time() schema_sim_index = networkbuilder.build_schema_sim_relation(network) end_schema_sim = time.time() print("Total schema-sim: {0}".format(str(end_schema_sim - start_schema_sim))) print("!!2 " + str(end_schema_sim - start_schema_sim)) # Entity_sim relation start_entity_sim = time.time() #fields, entities = store.get_all_fields_entities() #networkbuilder.build_entity_sim_relation(network, fields, entities) end_entity_sim = time.time() print("Total entity-sim: {0}".format(str(end_entity_sim - start_entity_sim))) """ # Content_sim text relation (random-projection based) start_text_sig_sim = time.time() st = time.time() text_signatures = store.get_all_fields_text_signatures(network) et = time.time() print("Time to extract signatures from store: {0}".format(str(et - st))) print("!!3 " + str(et - st)) networkbuilder.build_content_sim_relation_text_lsa(network, text_signatures) end_text_sig_sim = time.time() print("Total text-sig-sim: {0}".format(str(end_text_sig_sim - start_text_sig_sim))) print("!!4 " + str(end_text_sig_sim - start_text_sig_sim)) """ # Content_sim text relation (minhash-based) start_text_sig_sim = time.time() st = time.time() mh_signatures = store.get_all_mh_text_signatures() et = time.time() print("Time to extract minhash signatures from store: {0}".format(str(et - st))) print("!!3 " + str(et - st)) content_sim_index = networkbuilder.build_content_sim_mh_text(network, mh_signatures) end_text_sig_sim = time.time() print("Total text-sig-sim (minhash): {0}".format(str(end_text_sig_sim - start_text_sig_sim))) print("!!4 " + str(end_text_sig_sim - start_text_sig_sim)) # Content_sim num relation start_num_sig_sim = time.time() id_sig = store.get_all_fields_num_signatures() #networkbuilder.build_content_sim_relation_num(network, id_sig) networkbuilder.build_content_sim_relation_num_overlap_distr(network, id_sig) #networkbuilder.build_content_sim_relation_num_overlap_distr_indexed(network, id_sig) end_num_sig_sim = time.time() print("Total num-sig-sim: {0}".format(str(end_num_sig_sim - start_num_sig_sim))) print("!!5 " + str(end_num_sig_sim - start_num_sig_sim)) # Primary Key / Foreign key relation start_pkfk = time.time() networkbuilder.build_pkfk_relation(network) end_pkfk = time.time() print("Total PKFK: {0}".format(str(end_pkfk - start_pkfk))) print("!!6 " + str(end_pkfk - start_pkfk)) end_all = time.time() print("Total time: {0}".format(str(end_all - start_all))) print("!!7 " + str(end_all - start_all)) path = "test/datagov/" if output_path is not None: path = output_path fieldnetwork.serialize_network(network, path) # Serialize indexes path_schsim = path + "/schema_sim_index.pkl" io.serialize_object(schema_sim_index, path_schsim) path_cntsim = path + "/content_sim_index.pkl" io.serialize_object(content_sim_index, path_cntsim) print("DONE!") def plot_num(): network = FieldNetwork() store = StoreHandler() fields, num_signatures = store.get_all_fields_num_signatures() xaxis = [] yaxis = [] numpoints = 0 for x, y in num_signatures: numpoints = numpoints + 1 xaxis.append(x) yaxis.append(y) print("Num points: " + str(numpoints)) import matplotlib.pyplot as plt plt.plot(xaxis, yaxis, 'ro') plt.axis([0, 600000, 0, 600000]) #plt.axis([0, 10000, 0, 10000]) #plt.axis([0, 500, 0, 500]) plt.show() def test_content_sim_num(): ''' SETUP ''' start_all = time.time() network = FieldNetwork() store = StoreHandler() # Get all fields from store fields_gen = store.get_all_fields() # Network skeleton and hierarchical relations (table - field), etc start_schema = time.time() network.init_meta_schema(fields_gen) end_schema = time.time() print("Total skeleton: {0}".format(str(end_schema - start_schema))) ''' ACTUAL TEST ''' # Content_sim num relation start_num_sig_sim = time.time() id_sig = store.get_all_fields_num_signatures() # networkbuilder.build_content_sim_relation_num(network, id_sig) networkbuilder.build_content_sim_relation_num_overlap_distr(network, id_sig) end_num_sig_sim = time.time() print("Total num-sig-sim: {0}".format(str(end_num_sig_sim - start_num_sig_sim))) if __name__ == "__main__": #test_content_sim_num() #exit() path = None if len(sys.argv) == 3: path = sys.argv[2] else: print("USAGE: ") print("python networkbuildercoordinator.py --opath <path>") print("where opath must be writable by the process") exit() main(path) #test_read_store() #test() #plot_num() #test_cardinality_propagation()
mit
meduz/scikit-learn
sklearn/metrics/tests/test_common.py
5
43094
from __future__ import division, print_function from functools import partial from itertools import product import numpy as np import scipy.sparse as sp from sklearn.datasets import make_multilabel_classification from sklearn.preprocessing import LabelBinarizer from sklearn.utils.multiclass import type_of_target from sklearn.utils.validation import check_random_state from sklearn.utils import shuffle from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_not_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_raise_message from sklearn.utils.testing import assert_true from sklearn.utils.testing import ignore_warnings from sklearn.utils.testing import _named_check from sklearn.metrics import accuracy_score from sklearn.metrics import average_precision_score from sklearn.metrics import brier_score_loss from sklearn.metrics import cohen_kappa_score from sklearn.metrics import confusion_matrix from sklearn.metrics import coverage_error from sklearn.metrics import explained_variance_score from sklearn.metrics import f1_score from sklearn.metrics import fbeta_score from sklearn.metrics import hamming_loss from sklearn.metrics import hinge_loss from sklearn.metrics import jaccard_similarity_score from sklearn.metrics import label_ranking_average_precision_score from sklearn.metrics import label_ranking_loss from sklearn.metrics import log_loss from sklearn.metrics import matthews_corrcoef from sklearn.metrics import mean_absolute_error from sklearn.metrics import mean_squared_error from sklearn.metrics import median_absolute_error from sklearn.metrics import precision_score from sklearn.metrics import r2_score from sklearn.metrics import recall_score from sklearn.metrics import roc_auc_score from sklearn.metrics import zero_one_loss # TODO Curve are currently not covered by invariance test # from sklearn.metrics import precision_recall_curve # from sklearn.metrics import roc_curve from sklearn.metrics.base import _average_binary_score # Note toward developers about metric testing # ------------------------------------------- # It is often possible to write one general test for several metrics: # # - invariance properties, e.g. invariance to sample order # - common behavior for an argument, e.g. the "normalize" with value True # will return the mean of the metrics and with value False will return # the sum of the metrics. # # In order to improve the overall metric testing, it is a good idea to write # first a specific test for the given metric and then add a general test for # all metrics that have the same behavior. # # Two types of datastructures are used in order to implement this system: # dictionaries of metrics and lists of metrics wit common properties. # # Dictionaries of metrics # ------------------------ # The goal of having those dictionaries is to have an easy way to call a # particular metric and associate a name to each function: # # - REGRESSION_METRICS: all regression metrics. # - CLASSIFICATION_METRICS: all classification metrics # which compare a ground truth and the estimated targets as returned by a # classifier. # - THRESHOLDED_METRICS: all classification metrics which # compare a ground truth and a score, e.g. estimated probabilities or # decision function (format might vary) # # Those dictionaries will be used to test systematically some invariance # properties, e.g. invariance toward several input layout. # REGRESSION_METRICS = { "mean_absolute_error": mean_absolute_error, "mean_squared_error": mean_squared_error, "median_absolute_error": median_absolute_error, "explained_variance_score": explained_variance_score, "r2_score": partial(r2_score, multioutput='variance_weighted'), } CLASSIFICATION_METRICS = { "accuracy_score": accuracy_score, "unnormalized_accuracy_score": partial(accuracy_score, normalize=False), "confusion_matrix": confusion_matrix, "hamming_loss": hamming_loss, "jaccard_similarity_score": jaccard_similarity_score, "unnormalized_jaccard_similarity_score": partial(jaccard_similarity_score, normalize=False), "zero_one_loss": zero_one_loss, "unnormalized_zero_one_loss": partial(zero_one_loss, normalize=False), # These are needed to test averaging "precision_score": precision_score, "recall_score": recall_score, "f1_score": f1_score, "f2_score": partial(fbeta_score, beta=2), "f0.5_score": partial(fbeta_score, beta=0.5), "matthews_corrcoef_score": matthews_corrcoef, "weighted_f0.5_score": partial(fbeta_score, average="weighted", beta=0.5), "weighted_f1_score": partial(f1_score, average="weighted"), "weighted_f2_score": partial(fbeta_score, average="weighted", beta=2), "weighted_precision_score": partial(precision_score, average="weighted"), "weighted_recall_score": partial(recall_score, average="weighted"), "micro_f0.5_score": partial(fbeta_score, average="micro", beta=0.5), "micro_f1_score": partial(f1_score, average="micro"), "micro_f2_score": partial(fbeta_score, average="micro", beta=2), "micro_precision_score": partial(precision_score, average="micro"), "micro_recall_score": partial(recall_score, average="micro"), "macro_f0.5_score": partial(fbeta_score, average="macro", beta=0.5), "macro_f1_score": partial(f1_score, average="macro"), "macro_f2_score": partial(fbeta_score, average="macro", beta=2), "macro_precision_score": partial(precision_score, average="macro"), "macro_recall_score": partial(recall_score, average="macro"), "samples_f0.5_score": partial(fbeta_score, average="samples", beta=0.5), "samples_f1_score": partial(f1_score, average="samples"), "samples_f2_score": partial(fbeta_score, average="samples", beta=2), "samples_precision_score": partial(precision_score, average="samples"), "samples_recall_score": partial(recall_score, average="samples"), "cohen_kappa_score": cohen_kappa_score, } THRESHOLDED_METRICS = { "coverage_error": coverage_error, "label_ranking_loss": label_ranking_loss, "log_loss": log_loss, "unnormalized_log_loss": partial(log_loss, normalize=False), "hinge_loss": hinge_loss, "brier_score_loss": brier_score_loss, "roc_auc_score": roc_auc_score, "weighted_roc_auc": partial(roc_auc_score, average="weighted"), "samples_roc_auc": partial(roc_auc_score, average="samples"), "micro_roc_auc": partial(roc_auc_score, average="micro"), "macro_roc_auc": partial(roc_auc_score, average="macro"), "average_precision_score": average_precision_score, "weighted_average_precision_score": partial(average_precision_score, average="weighted"), "samples_average_precision_score": partial(average_precision_score, average="samples"), "micro_average_precision_score": partial(average_precision_score, average="micro"), "macro_average_precision_score": partial(average_precision_score, average="macro"), "label_ranking_average_precision_score": label_ranking_average_precision_score, } ALL_METRICS = dict() ALL_METRICS.update(THRESHOLDED_METRICS) ALL_METRICS.update(CLASSIFICATION_METRICS) ALL_METRICS.update(REGRESSION_METRICS) # Lists of metrics with common properties # --------------------------------------- # Lists of metrics with common properties are used to test systematically some # functionalities and invariance, e.g. SYMMETRIC_METRICS lists all metrics that # are symmetric with respect to their input argument y_true and y_pred. # # When you add a new metric or functionality, check if a general test # is already written. # Those metrics don't support binary inputs METRIC_UNDEFINED_BINARY = [ "samples_f0.5_score", "samples_f1_score", "samples_f2_score", "samples_precision_score", "samples_recall_score", "coverage_error", "roc_auc_score", "micro_roc_auc", "weighted_roc_auc", "macro_roc_auc", "samples_roc_auc", "average_precision_score", "weighted_average_precision_score", "micro_average_precision_score", "macro_average_precision_score", "samples_average_precision_score", "label_ranking_loss", "label_ranking_average_precision_score", ] # Those metrics don't support multiclass inputs METRIC_UNDEFINED_MULTICLASS = [ "brier_score_loss", "matthews_corrcoef_score", # with default average='binary', multiclass is prohibited "precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score", ] # Metric undefined with "binary" or "multiclass" input METRIC_UNDEFINED_BINARY_MULTICLASS = set(METRIC_UNDEFINED_BINARY).union( set(METRIC_UNDEFINED_MULTICLASS)) # Metrics with an "average" argument METRICS_WITH_AVERAGING = [ "precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score" ] # Threshold-based metrics with an "average" argument THRESHOLDED_METRICS_WITH_AVERAGING = [ "roc_auc_score", "average_precision_score", ] # Metrics with a "pos_label" argument METRICS_WITH_POS_LABEL = [ "roc_curve", "brier_score_loss", "precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score", # pos_label support deprecated; to be removed in 0.18: "weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score", "weighted_precision_score", "weighted_recall_score", "micro_f0.5_score", "micro_f1_score", "micro_f2_score", "micro_precision_score", "micro_recall_score", "macro_f0.5_score", "macro_f1_score", "macro_f2_score", "macro_precision_score", "macro_recall_score", ] # Metrics with a "labels" argument # TODO: Handle multi_class metrics that has a labels argument as well as a # decision function argument. e.g hinge_loss METRICS_WITH_LABELS = [ "confusion_matrix", "hamming_loss", "precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score", "weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score", "weighted_precision_score", "weighted_recall_score", "micro_f0.5_score", "micro_f1_score", "micro_f2_score", "micro_precision_score", "micro_recall_score", "macro_f0.5_score", "macro_f1_score", "macro_f2_score", "macro_precision_score", "macro_recall_score", "cohen_kappa_score", ] # Metrics with a "normalize" option METRICS_WITH_NORMALIZE_OPTION = [ "accuracy_score", "jaccard_similarity_score", "zero_one_loss", ] # Threshold-based metrics with "multilabel-indicator" format support THRESHOLDED_MULTILABEL_METRICS = [ "log_loss", "unnormalized_log_loss", "roc_auc_score", "weighted_roc_auc", "samples_roc_auc", "micro_roc_auc", "macro_roc_auc", "average_precision_score", "weighted_average_precision_score", "samples_average_precision_score", "micro_average_precision_score", "macro_average_precision_score", "coverage_error", "label_ranking_loss", ] # Classification metrics with "multilabel-indicator" format MULTILABELS_METRICS = [ "accuracy_score", "unnormalized_accuracy_score", "hamming_loss", "jaccard_similarity_score", "unnormalized_jaccard_similarity_score", "zero_one_loss", "unnormalized_zero_one_loss", "weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score", "weighted_precision_score", "weighted_recall_score", "macro_f0.5_score", "macro_f1_score", "macro_f2_score", "macro_precision_score", "macro_recall_score", "micro_f0.5_score", "micro_f1_score", "micro_f2_score", "micro_precision_score", "micro_recall_score", "samples_f0.5_score", "samples_f1_score", "samples_f2_score", "samples_precision_score", "samples_recall_score", ] # Regression metrics with "multioutput-continuous" format support MULTIOUTPUT_METRICS = [ "mean_absolute_error", "mean_squared_error", "r2_score", "explained_variance_score" ] # Symmetric with respect to their input arguments y_true and y_pred # metric(y_true, y_pred) == metric(y_pred, y_true). SYMMETRIC_METRICS = [ "accuracy_score", "unnormalized_accuracy_score", "hamming_loss", "jaccard_similarity_score", "unnormalized_jaccard_similarity_score", "zero_one_loss", "unnormalized_zero_one_loss", "f1_score", "micro_f1_score", "macro_f1_score", "weighted_recall_score", # P = R = F = accuracy in multiclass case "micro_f0.5_score", "micro_f1_score", "micro_f2_score", "micro_precision_score", "micro_recall_score", "matthews_corrcoef_score", "mean_absolute_error", "mean_squared_error", "median_absolute_error", "cohen_kappa_score", ] # Asymmetric with respect to their input arguments y_true and y_pred # metric(y_true, y_pred) != metric(y_pred, y_true). NOT_SYMMETRIC_METRICS = [ "explained_variance_score", "r2_score", "confusion_matrix", "precision_score", "recall_score", "f2_score", "f0.5_score", "weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score", "weighted_precision_score", "macro_f0.5_score", "macro_f2_score", "macro_precision_score", "macro_recall_score", "log_loss", "hinge_loss" ] # No Sample weight support METRICS_WITHOUT_SAMPLE_WEIGHT = [ "cohen_kappa_score", "confusion_matrix", # Left this one here because the tests in this file do # not work for confusion_matrix, as its output is a # matrix instead of a number. Testing of # confusion_matrix with sample_weight is in # test_classification.py "median_absolute_error", ] @ignore_warnings def test_symmetry(): # Test the symmetry of score and loss functions random_state = check_random_state(0) y_true = random_state.randint(0, 2, size=(20, )) y_pred = random_state.randint(0, 2, size=(20, )) # We shouldn't forget any metrics assert_equal(set(SYMMETRIC_METRICS).union( NOT_SYMMETRIC_METRICS, THRESHOLDED_METRICS, METRIC_UNDEFINED_BINARY_MULTICLASS), set(ALL_METRICS)) assert_equal( set(SYMMETRIC_METRICS).intersection(set(NOT_SYMMETRIC_METRICS)), set([])) # Symmetric metric for name in SYMMETRIC_METRICS: metric = ALL_METRICS[name] assert_almost_equal(metric(y_true, y_pred), metric(y_pred, y_true), err_msg="%s is not symmetric" % name) # Not symmetric metrics for name in NOT_SYMMETRIC_METRICS: metric = ALL_METRICS[name] assert_true(np.any(metric(y_true, y_pred) != metric(y_pred, y_true)), msg="%s seems to be symmetric" % name) @ignore_warnings def test_sample_order_invariance(): random_state = check_random_state(0) y_true = random_state.randint(0, 2, size=(20, )) y_pred = random_state.randint(0, 2, size=(20, )) y_true_shuffle, y_pred_shuffle = shuffle(y_true, y_pred, random_state=0) for name, metric in ALL_METRICS.items(): if name in METRIC_UNDEFINED_BINARY_MULTICLASS: continue assert_almost_equal(metric(y_true, y_pred), metric(y_true_shuffle, y_pred_shuffle), err_msg="%s is not sample order invariant" % name) @ignore_warnings def test_sample_order_invariance_multilabel_and_multioutput(): random_state = check_random_state(0) # Generate some data y_true = random_state.randint(0, 2, size=(20, 25)) y_pred = random_state.randint(0, 2, size=(20, 25)) y_score = random_state.normal(size=y_true.shape) y_true_shuffle, y_pred_shuffle, y_score_shuffle = shuffle(y_true, y_pred, y_score, random_state=0) for name in MULTILABELS_METRICS: metric = ALL_METRICS[name] assert_almost_equal(metric(y_true, y_pred), metric(y_true_shuffle, y_pred_shuffle), err_msg="%s is not sample order invariant" % name) for name in THRESHOLDED_MULTILABEL_METRICS: metric = ALL_METRICS[name] assert_almost_equal(metric(y_true, y_score), metric(y_true_shuffle, y_score_shuffle), err_msg="%s is not sample order invariant" % name) for name in MULTIOUTPUT_METRICS: metric = ALL_METRICS[name] assert_almost_equal(metric(y_true, y_score), metric(y_true_shuffle, y_score_shuffle), err_msg="%s is not sample order invariant" % name) assert_almost_equal(metric(y_true, y_pred), metric(y_true_shuffle, y_pred_shuffle), err_msg="%s is not sample order invariant" % name) @ignore_warnings def test_format_invariance_with_1d_vectors(): random_state = check_random_state(0) y1 = random_state.randint(0, 2, size=(20, )) y2 = random_state.randint(0, 2, size=(20, )) y1_list = list(y1) y2_list = list(y2) y1_1d, y2_1d = np.array(y1), np.array(y2) assert_equal(y1_1d.ndim, 1) assert_equal(y2_1d.ndim, 1) y1_column = np.reshape(y1_1d, (-1, 1)) y2_column = np.reshape(y2_1d, (-1, 1)) y1_row = np.reshape(y1_1d, (1, -1)) y2_row = np.reshape(y2_1d, (1, -1)) for name, metric in ALL_METRICS.items(): if name in METRIC_UNDEFINED_BINARY_MULTICLASS: continue measure = metric(y1, y2) assert_almost_equal(metric(y1_list, y2_list), measure, err_msg="%s is not representation invariant " "with list" % name) assert_almost_equal(metric(y1_1d, y2_1d), measure, err_msg="%s is not representation invariant " "with np-array-1d" % name) assert_almost_equal(metric(y1_column, y2_column), measure, err_msg="%s is not representation invariant " "with np-array-column" % name) # Mix format support assert_almost_equal(metric(y1_1d, y2_list), measure, err_msg="%s is not representation invariant " "with mix np-array-1d and list" % name) assert_almost_equal(metric(y1_list, y2_1d), measure, err_msg="%s is not representation invariant " "with mix np-array-1d and list" % name) assert_almost_equal(metric(y1_1d, y2_column), measure, err_msg="%s is not representation invariant " "with mix np-array-1d and np-array-column" % name) assert_almost_equal(metric(y1_column, y2_1d), measure, err_msg="%s is not representation invariant " "with mix np-array-1d and np-array-column" % name) assert_almost_equal(metric(y1_list, y2_column), measure, err_msg="%s is not representation invariant " "with mix list and np-array-column" % name) assert_almost_equal(metric(y1_column, y2_list), measure, err_msg="%s is not representation invariant " "with mix list and np-array-column" % name) # These mix representations aren't allowed assert_raises(ValueError, metric, y1_1d, y2_row) assert_raises(ValueError, metric, y1_row, y2_1d) assert_raises(ValueError, metric, y1_list, y2_row) assert_raises(ValueError, metric, y1_row, y2_list) assert_raises(ValueError, metric, y1_column, y2_row) assert_raises(ValueError, metric, y1_row, y2_column) # NB: We do not test for y1_row, y2_row as these may be # interpreted as multilabel or multioutput data. if (name not in (MULTIOUTPUT_METRICS + THRESHOLDED_MULTILABEL_METRICS + MULTILABELS_METRICS)): assert_raises(ValueError, metric, y1_row, y2_row) @ignore_warnings def test_invariance_string_vs_numbers_labels(): # Ensure that classification metrics with string labels random_state = check_random_state(0) y1 = random_state.randint(0, 2, size=(20, )) y2 = random_state.randint(0, 2, size=(20, )) y1_str = np.array(["eggs", "spam"])[y1] y2_str = np.array(["eggs", "spam"])[y2] pos_label_str = "spam" labels_str = ["eggs", "spam"] for name, metric in CLASSIFICATION_METRICS.items(): if name in METRIC_UNDEFINED_BINARY_MULTICLASS: continue measure_with_number = metric(y1, y2) # Ugly, but handle case with a pos_label and label metric_str = metric if name in METRICS_WITH_POS_LABEL: metric_str = partial(metric_str, pos_label=pos_label_str) measure_with_str = metric_str(y1_str, y2_str) assert_array_equal(measure_with_number, measure_with_str, err_msg="{0} failed string vs number invariance " "test".format(name)) measure_with_strobj = metric_str(y1_str.astype('O'), y2_str.astype('O')) assert_array_equal(measure_with_number, measure_with_strobj, err_msg="{0} failed string object vs number " "invariance test".format(name)) if name in METRICS_WITH_LABELS: metric_str = partial(metric_str, labels=labels_str) measure_with_str = metric_str(y1_str, y2_str) assert_array_equal(measure_with_number, measure_with_str, err_msg="{0} failed string vs number " "invariance test".format(name)) measure_with_strobj = metric_str(y1_str.astype('O'), y2_str.astype('O')) assert_array_equal(measure_with_number, measure_with_strobj, err_msg="{0} failed string vs number " "invariance test".format(name)) for name, metric in THRESHOLDED_METRICS.items(): if name in ("log_loss", "hinge_loss", "unnormalized_log_loss", "brier_score_loss"): # Ugly, but handle case with a pos_label and label metric_str = metric if name in METRICS_WITH_POS_LABEL: metric_str = partial(metric_str, pos_label=pos_label_str) measure_with_number = metric(y1, y2) measure_with_str = metric_str(y1_str, y2) assert_array_equal(measure_with_number, measure_with_str, err_msg="{0} failed string vs number " "invariance test".format(name)) measure_with_strobj = metric(y1_str.astype('O'), y2) assert_array_equal(measure_with_number, measure_with_strobj, err_msg="{0} failed string object vs number " "invariance test".format(name)) else: # TODO those metrics doesn't support string label yet assert_raises(ValueError, metric, y1_str, y2) assert_raises(ValueError, metric, y1_str.astype('O'), y2) def test_inf_nan_input(): invalids =[([0, 1], [np.inf, np.inf]), ([0, 1], [np.nan, np.nan]), ([0, 1], [np.nan, np.inf])] METRICS = dict() METRICS.update(THRESHOLDED_METRICS) METRICS.update(REGRESSION_METRICS) for metric in METRICS.values(): for y_true, y_score in invalids: assert_raise_message(ValueError, "contains NaN, infinity", metric, y_true, y_score) # Classification metrics all raise a mixed input exception for metric in CLASSIFICATION_METRICS.values(): for y_true, y_score in invalids: assert_raise_message(ValueError, "Can't handle mix of binary and continuous", metric, y_true, y_score) @ignore_warnings def check_single_sample(name): # Non-regression test: scores should work with a single sample. # This is important for leave-one-out cross validation. # Score functions tested are those that formerly called np.squeeze, # which turns an array of size 1 into a 0-d array (!). metric = ALL_METRICS[name] # assert that no exception is thrown for i, j in product([0, 1], repeat=2): metric([i], [j]) @ignore_warnings def check_single_sample_multioutput(name): metric = ALL_METRICS[name] for i, j, k, l in product([0, 1], repeat=4): metric(np.array([[i, j]]), np.array([[k, l]])) def test_single_sample(): for name in ALL_METRICS: if (name in METRIC_UNDEFINED_BINARY_MULTICLASS or name in THRESHOLDED_METRICS): # Those metrics are not always defined with one sample # or in multiclass classification continue yield check_single_sample, name for name in MULTIOUTPUT_METRICS + MULTILABELS_METRICS: yield check_single_sample_multioutput, name def test_multioutput_number_of_output_differ(): y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]]) y_pred = np.array([[0, 0], [1, 0], [0, 0]]) for name in MULTIOUTPUT_METRICS: metric = ALL_METRICS[name] assert_raises(ValueError, metric, y_true, y_pred) def test_multioutput_regression_invariance_to_dimension_shuffling(): # test invariance to dimension shuffling random_state = check_random_state(0) y_true = random_state.uniform(0, 2, size=(20, 5)) y_pred = random_state.uniform(0, 2, size=(20, 5)) for name in MULTIOUTPUT_METRICS: metric = ALL_METRICS[name] error = metric(y_true, y_pred) for _ in range(3): perm = random_state.permutation(y_true.shape[1]) assert_almost_equal(metric(y_true[:, perm], y_pred[:, perm]), error, err_msg="%s is not dimension shuffling " "invariant" % name) @ignore_warnings def test_multilabel_representation_invariance(): # Generate some data n_classes = 4 n_samples = 50 _, y1 = make_multilabel_classification(n_features=1, n_classes=n_classes, random_state=0, n_samples=n_samples, allow_unlabeled=True) _, y2 = make_multilabel_classification(n_features=1, n_classes=n_classes, random_state=1, n_samples=n_samples, allow_unlabeled=True) # To make sure at least one empty label is present y1 = np.vstack([y1, [[0] * n_classes]]) y2 = np.vstack([y2, [[0] * n_classes]]) y1_sparse_indicator = sp.coo_matrix(y1) y2_sparse_indicator = sp.coo_matrix(y2) for name in MULTILABELS_METRICS: metric = ALL_METRICS[name] # XXX cruel hack to work with partial functions if isinstance(metric, partial): metric.__module__ = 'tmp' metric.__name__ = name measure = metric(y1, y2) # Check representation invariance assert_almost_equal(metric(y1_sparse_indicator, y2_sparse_indicator), measure, err_msg="%s failed representation invariance " "between dense and sparse indicator " "formats." % name) def test_raise_value_error_multilabel_sequences(): # make sure the multilabel-sequence format raises ValueError multilabel_sequences = [ [[0, 1]], [[1], [2], [0, 1]], [(), (2), (0, 1)], [[]], [()], np.array([[], [1, 2]], dtype='object')] for name in MULTILABELS_METRICS: metric = ALL_METRICS[name] for seq in multilabel_sequences: assert_raises(ValueError, metric, seq, seq) def test_normalize_option_binary_classification(n_samples=20): # Test in the binary case random_state = check_random_state(0) y_true = random_state.randint(0, 2, size=(n_samples, )) y_pred = random_state.randint(0, 2, size=(n_samples, )) for name in METRICS_WITH_NORMALIZE_OPTION: metrics = ALL_METRICS[name] measure = metrics(y_true, y_pred, normalize=True) assert_greater(measure, 0, msg="We failed to test correctly the normalize option") assert_almost_equal(metrics(y_true, y_pred, normalize=False) / n_samples, measure) def test_normalize_option_multiclasss_classification(): # Test in the multiclass case random_state = check_random_state(0) y_true = random_state.randint(0, 4, size=(20, )) y_pred = random_state.randint(0, 4, size=(20, )) n_samples = y_true.shape[0] for name in METRICS_WITH_NORMALIZE_OPTION: metrics = ALL_METRICS[name] measure = metrics(y_true, y_pred, normalize=True) assert_greater(measure, 0, msg="We failed to test correctly the normalize option") assert_almost_equal(metrics(y_true, y_pred, normalize=False) / n_samples, measure) def test_normalize_option_multilabel_classification(): # Test in the multilabel case n_classes = 4 n_samples = 100 # for both random_state 0 and 1, y_true and y_pred has at least one # unlabelled entry _, y_true = make_multilabel_classification(n_features=1, n_classes=n_classes, random_state=0, allow_unlabeled=True, n_samples=n_samples) _, y_pred = make_multilabel_classification(n_features=1, n_classes=n_classes, random_state=1, allow_unlabeled=True, n_samples=n_samples) # To make sure at least one empty label is present y_true += [0]*n_classes y_pred += [0]*n_classes for name in METRICS_WITH_NORMALIZE_OPTION: metrics = ALL_METRICS[name] measure = metrics(y_true, y_pred, normalize=True) assert_greater(measure, 0, msg="We failed to test correctly the normalize option") assert_almost_equal(metrics(y_true, y_pred, normalize=False) / n_samples, measure, err_msg="Failed with %s" % name) @ignore_warnings def _check_averaging(metric, y_true, y_pred, y_true_binarize, y_pred_binarize, is_multilabel): n_samples, n_classes = y_true_binarize.shape # No averaging label_measure = metric(y_true, y_pred, average=None) assert_array_almost_equal(label_measure, [metric(y_true_binarize[:, i], y_pred_binarize[:, i]) for i in range(n_classes)]) # Micro measure micro_measure = metric(y_true, y_pred, average="micro") assert_almost_equal(micro_measure, metric(y_true_binarize.ravel(), y_pred_binarize.ravel())) # Macro measure macro_measure = metric(y_true, y_pred, average="macro") assert_almost_equal(macro_measure, np.mean(label_measure)) # Weighted measure weights = np.sum(y_true_binarize, axis=0, dtype=int) if np.sum(weights) != 0: weighted_measure = metric(y_true, y_pred, average="weighted") assert_almost_equal(weighted_measure, np.average(label_measure, weights=weights)) else: weighted_measure = metric(y_true, y_pred, average="weighted") assert_almost_equal(weighted_measure, 0) # Sample measure if is_multilabel: sample_measure = metric(y_true, y_pred, average="samples") assert_almost_equal(sample_measure, np.mean([metric(y_true_binarize[i], y_pred_binarize[i]) for i in range(n_samples)])) assert_raises(ValueError, metric, y_true, y_pred, average="unknown") assert_raises(ValueError, metric, y_true, y_pred, average="garbage") def check_averaging(name, y_true, y_true_binarize, y_pred, y_pred_binarize, y_score): is_multilabel = type_of_target(y_true).startswith("multilabel") metric = ALL_METRICS[name] if name in METRICS_WITH_AVERAGING: _check_averaging(metric, y_true, y_pred, y_true_binarize, y_pred_binarize, is_multilabel) elif name in THRESHOLDED_METRICS_WITH_AVERAGING: _check_averaging(metric, y_true, y_score, y_true_binarize, y_score, is_multilabel) else: raise ValueError("Metric is not recorded as having an average option") def test_averaging_multiclass(n_samples=50, n_classes=3): random_state = check_random_state(0) y_true = random_state.randint(0, n_classes, size=(n_samples, )) y_pred = random_state.randint(0, n_classes, size=(n_samples, )) y_score = random_state.uniform(size=(n_samples, n_classes)) lb = LabelBinarizer().fit(y_true) y_true_binarize = lb.transform(y_true) y_pred_binarize = lb.transform(y_pred) for name in METRICS_WITH_AVERAGING: yield (_named_check(check_averaging, name), name, y_true, y_true_binarize, y_pred, y_pred_binarize, y_score) def test_averaging_multilabel(n_classes=5, n_samples=40): _, y = make_multilabel_classification(n_features=1, n_classes=n_classes, random_state=5, n_samples=n_samples, allow_unlabeled=False) y_true = y[:20] y_pred = y[20:] y_score = check_random_state(0).normal(size=(20, n_classes)) y_true_binarize = y_true y_pred_binarize = y_pred for name in METRICS_WITH_AVERAGING + THRESHOLDED_METRICS_WITH_AVERAGING: yield (_named_check(check_averaging, name), name, y_true, y_true_binarize, y_pred, y_pred_binarize, y_score) def test_averaging_multilabel_all_zeroes(): y_true = np.zeros((20, 3)) y_pred = np.zeros((20, 3)) y_score = np.zeros((20, 3)) y_true_binarize = y_true y_pred_binarize = y_pred for name in METRICS_WITH_AVERAGING: yield (_named_check(check_averaging, name), name, y_true, y_true_binarize, y_pred, y_pred_binarize, y_score) # Test _average_binary_score for weight.sum() == 0 binary_metric = (lambda y_true, y_score, average="macro": _average_binary_score( precision_score, y_true, y_score, average)) _check_averaging(binary_metric, y_true, y_pred, y_true_binarize, y_pred_binarize, is_multilabel=True) def test_averaging_multilabel_all_ones(): y_true = np.ones((20, 3)) y_pred = np.ones((20, 3)) y_score = np.ones((20, 3)) y_true_binarize = y_true y_pred_binarize = y_pred for name in METRICS_WITH_AVERAGING: yield (_named_check(check_averaging, name), name, y_true, y_true_binarize, y_pred, y_pred_binarize, y_score) @ignore_warnings def check_sample_weight_invariance(name, metric, y1, y2): rng = np.random.RandomState(0) sample_weight = rng.randint(1, 10, size=len(y1)) # check that unit weights gives the same score as no weight unweighted_score = metric(y1, y2, sample_weight=None) assert_almost_equal( unweighted_score, metric(y1, y2, sample_weight=np.ones(shape=len(y1))), err_msg="For %s sample_weight=None is not equivalent to " "sample_weight=ones" % name) # check that the weighted and unweighted scores are unequal weighted_score = metric(y1, y2, sample_weight=sample_weight) assert_not_equal( unweighted_score, weighted_score, msg="Unweighted and weighted scores are unexpectedly " "equal (%f) for %s" % (weighted_score, name)) # check that sample_weight can be a list weighted_score_list = metric(y1, y2, sample_weight=sample_weight.tolist()) assert_almost_equal( weighted_score, weighted_score_list, err_msg=("Weighted scores for array and list " "sample_weight input are not equal (%f != %f) for %s") % ( weighted_score, weighted_score_list, name)) # check that integer weights is the same as repeated samples repeat_weighted_score = metric( np.repeat(y1, sample_weight, axis=0), np.repeat(y2, sample_weight, axis=0), sample_weight=None) assert_almost_equal( weighted_score, repeat_weighted_score, err_msg="Weighting %s is not equal to repeating samples" % name) # check that ignoring a fraction of the samples is equivalent to setting # the corresponding weights to zero sample_weight_subset = sample_weight[1::2] sample_weight_zeroed = np.copy(sample_weight) sample_weight_zeroed[::2] = 0 y1_subset = y1[1::2] y2_subset = y2[1::2] weighted_score_subset = metric(y1_subset, y2_subset, sample_weight=sample_weight_subset) weighted_score_zeroed = metric(y1, y2, sample_weight=sample_weight_zeroed) assert_almost_equal( weighted_score_subset, weighted_score_zeroed, err_msg=("Zeroing weights does not give the same result as " "removing the corresponding samples (%f != %f) for %s" % (weighted_score_zeroed, weighted_score_subset, name))) if not name.startswith('unnormalized'): # check that the score is invariant under scaling of the weights by a # common factor for scaling in [2, 0.3]: assert_almost_equal( weighted_score, metric(y1, y2, sample_weight=sample_weight * scaling), err_msg="%s sample_weight is not invariant " "under scaling" % name) # Check that if sample_weight.shape[0] != y_true.shape[0], it raised an # error assert_raises(Exception, metric, y1, y2, sample_weight=np.hstack([sample_weight, sample_weight])) def test_sample_weight_invariance(n_samples=50): random_state = check_random_state(0) # binary random_state = check_random_state(0) y_true = random_state.randint(0, 2, size=(n_samples, )) y_pred = random_state.randint(0, 2, size=(n_samples, )) y_score = random_state.random_sample(size=(n_samples,)) for name in ALL_METRICS: if (name in METRICS_WITHOUT_SAMPLE_WEIGHT or name in METRIC_UNDEFINED_BINARY): continue metric = ALL_METRICS[name] if name in THRESHOLDED_METRICS: yield _named_check(check_sample_weight_invariance, name), name,\ metric, y_true, y_score else: yield _named_check(check_sample_weight_invariance, name), name,\ metric, y_true, y_pred # multiclass random_state = check_random_state(0) y_true = random_state.randint(0, 5, size=(n_samples, )) y_pred = random_state.randint(0, 5, size=(n_samples, )) y_score = random_state.random_sample(size=(n_samples, 5)) for name in ALL_METRICS: if (name in METRICS_WITHOUT_SAMPLE_WEIGHT or name in METRIC_UNDEFINED_BINARY_MULTICLASS): continue metric = ALL_METRICS[name] if name in THRESHOLDED_METRICS: yield _named_check(check_sample_weight_invariance, name), name,\ metric, y_true, y_score else: yield _named_check(check_sample_weight_invariance, name), name,\ metric, y_true, y_pred # multilabel indicator _, ya = make_multilabel_classification(n_features=1, n_classes=20, random_state=0, n_samples=100, allow_unlabeled=False) _, yb = make_multilabel_classification(n_features=1, n_classes=20, random_state=1, n_samples=100, allow_unlabeled=False) y_true = np.vstack([ya, yb]) y_pred = np.vstack([ya, ya]) y_score = random_state.randint(1, 4, size=y_true.shape) for name in (MULTILABELS_METRICS + THRESHOLDED_MULTILABEL_METRICS + MULTIOUTPUT_METRICS): if name in METRICS_WITHOUT_SAMPLE_WEIGHT: continue metric = ALL_METRICS[name] if name in THRESHOLDED_METRICS: yield (_named_check(check_sample_weight_invariance, name), name, metric, y_true, y_score) else: yield (_named_check(check_sample_weight_invariance, name), name, metric, y_true, y_pred) @ignore_warnings def test_no_averaging_labels(): # test labels argument when not using averaging # in multi-class and multi-label cases y_true_multilabel = np.array([[1, 1, 0, 0], [1, 1, 0, 0]]) y_pred_multilabel = np.array([[0, 0, 1, 1], [0, 1, 1, 0]]) y_true_multiclass = np.array([0, 1, 2]) y_pred_multiclass = np.array([0, 2, 3]) labels = np.array([3, 0, 1, 2]) _, inverse_labels = np.unique(labels, return_inverse=True) for name in METRICS_WITH_AVERAGING: for y_true, y_pred in [[y_true_multiclass, y_pred_multiclass], [y_true_multilabel, y_pred_multilabel]]: if name not in MULTILABELS_METRICS and y_pred.ndim > 1: continue metric = ALL_METRICS[name] score_labels = metric(y_true, y_pred, labels=labels, average=None) score = metric(y_true, y_pred, average=None) assert_array_equal(score_labels, score[inverse_labels])
bsd-3-clause
fredhusser/scikit-learn
sklearn/feature_extraction/hashing.py
183
6155
# Author: Lars Buitinck <[email protected]> # License: BSD 3 clause import numbers import numpy as np import scipy.sparse as sp from . import _hashing from ..base import BaseEstimator, TransformerMixin def _iteritems(d): """Like d.iteritems, but accepts any collections.Mapping.""" return d.iteritems() if hasattr(d, "iteritems") else d.items() class FeatureHasher(BaseEstimator, TransformerMixin): """Implements feature hashing, aka the hashing trick. This class turns sequences of symbolic feature names (strings) into scipy.sparse matrices, using a hash function to compute the matrix column corresponding to a name. The hash function employed is the signed 32-bit version of Murmurhash3. Feature names of type byte string are used as-is. Unicode strings are converted to UTF-8 first, but no Unicode normalization is done. Feature values must be (finite) numbers. This class is a low-memory alternative to DictVectorizer and CountVectorizer, intended for large-scale (online) learning and situations where memory is tight, e.g. when running prediction code on embedded devices. Read more in the :ref:`User Guide <feature_hashing>`. Parameters ---------- n_features : integer, optional The number of features (columns) in the output matrices. Small numbers of features are likely to cause hash collisions, but large numbers will cause larger coefficient dimensions in linear learners. dtype : numpy type, optional The type of feature values. Passed to scipy.sparse matrix constructors as the dtype argument. Do not set this to bool, np.boolean or any unsigned integer type. input_type : string, optional Either "dict" (the default) to accept dictionaries over (feature_name, value); "pair" to accept pairs of (feature_name, value); or "string" to accept single strings. feature_name should be a string, while value should be a number. In the case of "string", a value of 1 is implied. The feature_name is hashed to find the appropriate column for the feature. The value's sign might be flipped in the output (but see non_negative, below). non_negative : boolean, optional, default np.float64 Whether output matrices should contain non-negative values only; effectively calls abs on the matrix prior to returning it. When True, output values can be interpreted as frequencies. When False, output values will have expected value zero. Examples -------- >>> from sklearn.feature_extraction import FeatureHasher >>> h = FeatureHasher(n_features=10) >>> D = [{'dog': 1, 'cat':2, 'elephant':4},{'dog': 2, 'run': 5}] >>> f = h.transform(D) >>> f.toarray() array([[ 0., 0., -4., -1., 0., 0., 0., 0., 0., 2.], [ 0., 0., 0., -2., -5., 0., 0., 0., 0., 0.]]) See also -------- DictVectorizer : vectorizes string-valued features using a hash table. sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features encoded as columns of integers. """ def __init__(self, n_features=(2 ** 20), input_type="dict", dtype=np.float64, non_negative=False): self._validate_params(n_features, input_type) self.dtype = dtype self.input_type = input_type self.n_features = n_features self.non_negative = non_negative @staticmethod def _validate_params(n_features, input_type): # strangely, np.int16 instances are not instances of Integral, # while np.int64 instances are... if not isinstance(n_features, (numbers.Integral, np.integer)): raise TypeError("n_features must be integral, got %r (%s)." % (n_features, type(n_features))) elif n_features < 1 or n_features >= 2 ** 31: raise ValueError("Invalid number of features (%d)." % n_features) if input_type not in ("dict", "pair", "string"): raise ValueError("input_type must be 'dict', 'pair' or 'string'," " got %r." % input_type) def fit(self, X=None, y=None): """No-op. This method doesn't do anything. It exists purely for compatibility with the scikit-learn transformer API. Returns ------- self : FeatureHasher """ # repeat input validation for grid search (which calls set_params) self._validate_params(self.n_features, self.input_type) return self def transform(self, raw_X, y=None): """Transform a sequence of instances to a scipy.sparse matrix. Parameters ---------- raw_X : iterable over iterable over raw features, length = n_samples Samples. Each sample must be iterable an (e.g., a list or tuple) containing/generating feature names (and optionally values, see the input_type constructor argument) which will be hashed. raw_X need not support the len function, so it can be the result of a generator; n_samples is determined on the fly. y : (ignored) Returns ------- X : scipy.sparse matrix, shape = (n_samples, self.n_features) Feature matrix, for use with estimators or further transformers. """ raw_X = iter(raw_X) if self.input_type == "dict": raw_X = (_iteritems(d) for d in raw_X) elif self.input_type == "string": raw_X = (((f, 1) for f in x) for x in raw_X) indices, indptr, values = \ _hashing.transform(raw_X, self.n_features, self.dtype) n_samples = indptr.shape[0] - 1 if n_samples == 0: raise ValueError("Cannot vectorize empty sequence.") X = sp.csr_matrix((values, indices, indptr), dtype=self.dtype, shape=(n_samples, self.n_features)) X.sum_duplicates() # also sorts the indices if self.non_negative: np.abs(X.data, X.data) return X
bsd-3-clause
jnez71/aLQR
demo_buoys.py
1
5462
from __future__ import division import numpy as np import numpy.linalg as npl from matplotlib import pyplot as plt import matplotlib.animation as ani import alqr # Hmm let's try a linear system with two positional # states [position1, position2, velocity1, velocity2] # and with damping but no springs. # One "thruster" is available on each axis. # Remember that xDOT = Ax + Bu. nstates = 4 ncontrols = 2 drag = 3 def linearize(x): A = np.array([ [ 0, 0, 1, 0], [ 0, 0, 0, 1], [ 0, 0, -drag/1, 0], [ 0, 0, 0, -drag] ]) B = np.array([ [0, 0], [0, 0], [1, 0], [0, 1] ]) return (A, B) def dynamics(x, u): # Truly linear system! A, B = linearize(x) return A.dot(x) + B.dot(u) # Set up a cost field goal = [1, 1, 0, 0] cost_field = alqr.Cost_Field(nstates, ncontrols, 2, goal, goal_weight=2, effort_weight=0.3, obstacle_weight=1) # Noised grid of obstacles obs_grid_x, obs_grid_y = np.mgrid[slice(0.3, 1+0.2, 0.2), slice(0.3, 1+0.2, 0.2)] obs_grid_x = obs_grid_x.reshape(obs_grid_x.size) obs_grid_y = obs_grid_y.reshape(obs_grid_y.size) obs = [np.zeros(2)] * obs_grid_x.size for i in range(len(obs)): obs[i] = np.round([obs_grid_x[i], obs_grid_y[i]] + 0.1*(np.random.rand(2)-0.5), 2) name = 'buoy' + str(i) if npl.norm(obs[i] - goal[:2]) > 0.1: cost_field.add_obstacle(name, obs[i], 0.1) # Associate an alqr planner planning_horizon = 10 # s planning_resolution = 0.03 # s planner = alqr.Planner(dynamics, linearize, cost_field, planning_horizon, planning_resolution, demo_plots=True) # Initial condition and time x = [0, 0.05, 0, 0] dt = planning_resolution # convenient to use in sim testing too t_arr = np.arange(0, planning_horizon, dt) framerate = 30 show_cost_field = True # Plan a path from these initial conditions planner.update_plan(x) # Preallocate results memory x_history = np.zeros((len(t_arr), nstates)) goal_history = np.zeros((len(t_arr), nstates)) u_history = np.zeros((len(t_arr), ncontrols)) c_history = np.zeros(len(t_arr)) # Integrate dynamics for i, t in enumerate(t_arr): # Planner's decision u = planner.get_effort(t) # Record this instant x_history[i, :] = x goal_history[i, :] = goal u_history[i, :] = u c_history[i] = cost_field.state_cost(x) # First-order integrate xdot = dynamics(x, u) x = x + xdot*dt # Plot results fig1 = plt.figure() fig1.suptitle('Results', fontsize=20) ax1 = fig1.add_subplot(2, 3, 1) ax1.set_ylabel('Position 1', fontsize=16) ax1.plot(t_arr, x_history[:, 0], 'k', t_arr, goal_history[:, 0], 'g--') ax1.grid(True) ax1 = fig1.add_subplot(2, 3, 2) ax1.set_ylabel('Position 2', fontsize=16) ax1.plot(t_arr, x_history[:, 1], 'k', t_arr, goal_history[:, 1], 'g--') ax1.grid(True) ax1 = fig1.add_subplot(2, 3, 3) ax1.set_ylabel('Efforts', fontsize=16) ax1.plot(t_arr, u_history[:, 0], 'b', t_arr, u_history[:, 1], 'g') ax1.grid(True) ax1 = fig1.add_subplot(2, 3, 4) ax1.set_ylabel('Velocity 1', fontsize=16) ax1.plot(t_arr, x_history[:, 2], 'k', t_arr, goal_history[:, 2], 'g--') ax1.set_xlabel('Time') ax1.grid(True) ax1 = fig1.add_subplot(2, 3, 5) ax1.set_ylabel('Velocity 2', fontsize=16) ax1.plot(t_arr, x_history[:, 3], 'k', t_arr, goal_history[:, 3], 'g--') ax1.set_xlabel('Time') ax1.grid(True) ax1 = fig1.add_subplot(2, 3, 6) ax1.set_ylabel('State Cost', fontsize=16) ax1.plot(t_arr, c_history, 'k') ax1.grid(True) ax1.set_xlabel('Time') print("\nClose the plot window to continue to animation.") plt.show() # Animation fig2 = plt.figure() fig2.suptitle('Evolution', fontsize=24) plt.axis('equal') ax2 = fig2.add_subplot(1, 1, 1) ax2.set_xlabel('- Position 1 +') ax2.set_ylabel('- Position 2 +') ax2.grid(True) radius = 0.02 xlim = (min(x_history[:, 0])*1.1 - radius, max(x_history[:, 0])*1.1 + radius) ylim = (min(x_history[:, 1])*1.1 - radius, max(x_history[:, 1])*1.1 + radius) ax2.set_xlim(xlim) ax2.set_ylim(ylim) # (color map of cost function over position space, zero velocity) if show_cost_field: # resolution dX, dY = 0.01, 0.01 # grid X, Y = np.mgrid[slice(xlim[0], xlim[1] + dX, dX), slice(ylim[0], ylim[1] + dY, dY)] Jmap = np.zeros_like(X) # evaluate cost field for i, xval in enumerate(X[:, 0]): for j, yval in enumerate(Y[0, :]): Jmap[i, j] = cost_field.state_cost([xval, yval, 0, 0]) if Jmap[i, j] < 0: print "Negative cost! At ({0}, {1})".format(xval, yval) Jmap = Jmap[:-1, :-1] plt.pcolor(X, Y, Jmap, cmap='YlOrRd', vmin=np.min(Jmap), vmax=np.max(Jmap)) plt.colorbar() graphic_robot = ax2.add_patch(plt.Circle((x_history[0, 0], x_history[0, 1]), radius=radius, fc='k')) graphic_goal = ax2.add_patch(plt.Circle((goal_history[0, 0], goal_history[0, 1]), radius=radius, fc='g')) for p in cost_field.obstacle_positions: ax2.add_patch(plt.Circle((p[0], p[1]), radius=radius, fc='r')) def ani_update(arg, ii=[0]): i = ii[0] # don't ask... if np.isclose(t_arr[i], np.around(t_arr[i], 1)): fig2.suptitle('Evolution (Time: {})'.format(t_arr[i]), fontsize=24) graphic_robot.center = ((x_history[i, 0], x_history[i, 1])) ii[0] += int(1 / (dt * framerate)) if ii[0] >= len(t_arr): print("Resetting animation!") ii[0] = 0 return [graphic_robot] # Run animation print("\nStarting animation. \nBlack: robot \nRed: obstacles \nGreen: goal \nHeat Map: state cost\n") animation = ani.FuncAnimation(fig2, func=ani_update, interval=dt*1000) plt.show()
mit
sciunto/digit_recognition
machine_learning.py
2
2005
#!/usr/bin/env python # -*- coding: utf-8 -*- # Author: Francois Boulogne # Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org> # License: Simplified BSD import os import numpy as np import matplotlib.pyplot as plt import skimage.io def load_knowndata(filenames, show=False): training = {'images': [], 'targets': [], 'data': [], 'name': []} for index, filename in enumerate(filenames): target = os.path.splitext(os.path.basename(filename))[0] target = int(target.split('-')[0]) image = skimage.io.imread(filename) training['targets'].append(target) training['images'].append(image) training['name'].append(filename) training['data'].append(image.flatten().tolist()) if show: plt.subplot(6, 5, index + 1) plt.axis('off') plt.imshow(image, cmap=pl.cm.gray_r, interpolation='nearest') plt.title('Training: %i' % target) if show: plt.show() # To apply an classifier on this data, we need to flatten the image, to # turn the data in a (samples, feature) matrix: training['images'] = np.array(training['images']) training['targets'] = np.array(training['targets']) training['data'] = np.array(training['data']) return training def load_unknowndata(filenames): training = {'images': [], 'targets': [], 'data': [], 'name': []} for index, filename in enumerate(filenames): image = skimage.io.imread(filename) training['targets'].append(-1) # Target = -1: unkown training['images'].append(image) training['name'].append(filename) training['data'].append(image.flatten().tolist()) # To apply an classifier on this data, we need to flatten the image, to # turn the data in a (samples, feature) matrix: training['images'] = np.array(training['images']) training['targets'] = np.array(training['targets']) training['data'] = np.array(training['data']) return training
gpl-3.0
ilyes14/scikit-learn
sklearn/preprocessing/tests/test_function_transformer.py
176
2169
from nose.tools import assert_equal import numpy as np from sklearn.preprocessing import FunctionTransformer def _make_func(args_store, kwargs_store, func=lambda X, *a, **k: X): def _func(X, *args, **kwargs): args_store.append(X) args_store.extend(args) kwargs_store.update(kwargs) return func(X) return _func def test_delegate_to_func(): # (args|kwargs)_store will hold the positional and keyword arguments # passed to the function inside the FunctionTransformer. args_store = [] kwargs_store = {} X = np.arange(10).reshape((5, 2)) np.testing.assert_array_equal( FunctionTransformer(_make_func(args_store, kwargs_store)).transform(X), X, 'transform should have returned X unchanged', ) # The function should only have recieved X. assert_equal( args_store, [X], 'Incorrect positional arguments passed to func: {args}'.format( args=args_store, ), ) assert_equal( kwargs_store, {}, 'Unexpected keyword arguments passed to func: {args}'.format( args=kwargs_store, ), ) # reset the argument stores. args_store[:] = [] # python2 compatible inplace list clear. kwargs_store.clear() y = object() np.testing.assert_array_equal( FunctionTransformer( _make_func(args_store, kwargs_store), pass_y=True, ).transform(X, y), X, 'transform should have returned X unchanged', ) # The function should have recieved X and y. assert_equal( args_store, [X, y], 'Incorrect positional arguments passed to func: {args}'.format( args=args_store, ), ) assert_equal( kwargs_store, {}, 'Unexpected keyword arguments passed to func: {args}'.format( args=kwargs_store, ), ) def test_np_log(): X = np.arange(10).reshape((5, 2)) # Test that the numpy.log example still works. np.testing.assert_array_equal( FunctionTransformer(np.log1p).transform(X), np.log1p(X), )
bsd-3-clause
rohanp/scikit-learn
examples/ensemble/plot_gradient_boosting_oob.py
50
4764
""" ====================================== Gradient Boosting Out-of-Bag estimates ====================================== Out-of-bag (OOB) estimates can be a useful heuristic to estimate the "optimal" number of boosting iterations. OOB estimates are almost identical to cross-validation estimates but they can be computed on-the-fly without the need for repeated model fitting. OOB estimates are only available for Stochastic Gradient Boosting (i.e. ``subsample < 1.0``), the estimates are derived from the improvement in loss based on the examples not included in the bootstrap sample (the so-called out-of-bag examples). The OOB estimator is a pessimistic estimator of the true test loss, but remains a fairly good approximation for a small number of trees. The figure shows the cumulative sum of the negative OOB improvements as a function of the boosting iteration. As you can see, it tracks the test loss for the first hundred iterations but then diverges in a pessimistic way. The figure also shows the performance of 3-fold cross validation which usually gives a better estimate of the test loss but is computationally more demanding. """ print(__doc__) # Author: Peter Prettenhofer <[email protected]> # # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn import ensemble from sklearn.model_selection import KFold from sklearn.model_selection import train_test_split # Generate data (adapted from G. Ridgeway's gbm example) n_samples = 1000 random_state = np.random.RandomState(13) x1 = random_state.uniform(size=n_samples) x2 = random_state.uniform(size=n_samples) x3 = random_state.randint(0, 4, size=n_samples) p = 1 / (1.0 + np.exp(-(np.sin(3 * x1) - 4 * x2 + x3))) y = random_state.binomial(1, p, size=n_samples) X = np.c_[x1, x2, x3] X = X.astype(np.float32) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=9) # Fit classifier with out-of-bag estimates params = {'n_estimators': 1200, 'max_depth': 3, 'subsample': 0.5, 'learning_rate': 0.01, 'min_samples_leaf': 1, 'random_state': 3} clf = ensemble.GradientBoostingClassifier(**params) clf.fit(X_train, y_train) acc = clf.score(X_test, y_test) print("Accuracy: {:.4f}".format(acc)) n_estimators = params['n_estimators'] x = np.arange(n_estimators) + 1 def heldout_score(clf, X_test, y_test): """compute deviance scores on ``X_test`` and ``y_test``. """ score = np.zeros((n_estimators,), dtype=np.float64) for i, y_pred in enumerate(clf.staged_decision_function(X_test)): score[i] = clf.loss_(y_test, y_pred) return score def cv_estimate(n_folds=3): cv = KFold(n_folds=n_folds) cv_clf = ensemble.GradientBoostingClassifier(**params) val_scores = np.zeros((n_estimators,), dtype=np.float64) for train, test in cv.split(X_train, y_train): cv_clf.fit(X_train[train], y_train[train]) val_scores += heldout_score(cv_clf, X_train[test], y_train[test]) val_scores /= n_folds return val_scores # Estimate best n_estimator using cross-validation cv_score = cv_estimate(3) # Compute best n_estimator for test data test_score = heldout_score(clf, X_test, y_test) # negative cumulative sum of oob improvements cumsum = -np.cumsum(clf.oob_improvement_) # min loss according to OOB oob_best_iter = x[np.argmin(cumsum)] # min loss according to test (normalize such that first loss is 0) test_score -= test_score[0] test_best_iter = x[np.argmin(test_score)] # min loss according to cv (normalize such that first loss is 0) cv_score -= cv_score[0] cv_best_iter = x[np.argmin(cv_score)] # color brew for the three curves oob_color = list(map(lambda x: x / 256.0, (190, 174, 212))) test_color = list(map(lambda x: x / 256.0, (127, 201, 127))) cv_color = list(map(lambda x: x / 256.0, (253, 192, 134))) # plot curves and vertical lines for best iterations plt.plot(x, cumsum, label='OOB loss', color=oob_color) plt.plot(x, test_score, label='Test loss', color=test_color) plt.plot(x, cv_score, label='CV loss', color=cv_color) plt.axvline(x=oob_best_iter, color=oob_color) plt.axvline(x=test_best_iter, color=test_color) plt.axvline(x=cv_best_iter, color=cv_color) # add three vertical lines to xticks xticks = plt.xticks() xticks_pos = np.array(xticks[0].tolist() + [oob_best_iter, cv_best_iter, test_best_iter]) xticks_label = np.array(list(map(lambda t: int(t), xticks[0])) + ['OOB', 'CV', 'Test']) ind = np.argsort(xticks_pos) xticks_pos = xticks_pos[ind] xticks_label = xticks_label[ind] plt.xticks(xticks_pos, xticks_label) plt.legend(loc='upper right') plt.ylabel('normalized loss') plt.xlabel('number of iterations') plt.show()
bsd-3-clause
summychou/TBTracker
src/TBTracker_MainWindow.py
1
51648
# -*- coding: utf-8 -*- import warnings warnings.filterwarnings('ignore') # ********************第三方相关模块导入******************** import logging Logger = logging.getLogger("TBTracker") Logger.setLevel(logging.DEBUG) InfoHandler = logging.FileHandler("TBTracker_Log/info.log") InfoHandler.setLevel(logging.INFO) INFOFORMATTER = logging.Formatter('[%(asctime)s][%(name)s][%(levelname)s]: %(message)s') InfoHandler.setFormatter(INFOFORMATTER) Logger.addHandler(InfoHandler) ErrHandler = logging.FileHandler("TBTracker_Log/error.log") ErrHandler.setLevel(logging.ERROR) ERRORFORMATTER = logging.Formatter('[%(asctime)s][%(name)s][%(levelname)s] File "%(filename)s", line %(lineno)d: %(message)s') ErrHandler.setFormatter(ERRORFORMATTER) Logger.addHandler(ErrHandler) import math import matplotlib.dates as mdate import matplotlib.pyplot as plt import os import random import requests import sqlite3 as sqlite import sys import xlwt import yaml from bs4 import BeautifulSoup from PIL import Image from io import BytesIO from selenium import webdriver from selenium.common.exceptions import * from selenium.webdriver.common.desired_capabilities import DesiredCapabilities USER_AGENT = 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:50.0) Gecko/20100101 Firefox/50.0' HEADERS = {'user-agent': USER_AGENT} DCAP = dict(DesiredCapabilities.PHANTOMJS) DCAP["phantomjs.page.settings.userAgent"] = USER_AGENT DCAP["phantomjs.page.settings.loadImages"] = False from selenium.webdriver.common.by import By from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions as EC DRIVER = webdriver.PhantomJS(desired_capabilities=DCAP, service_args=[ '--load-images=no', # 禁止加载图片 '--disk-cache=yes', # 开启浏览器缓存 '--ignore-ssl-errors=true', # 忽略HTTPS错误 '--ssl-protocol=TLSv1']) DRIVER.set_window_size(1280, 1024) # ********************PyQt5相关模块导入******************** from PyQt5.QtCore import QEvent from PyQt5.QtCore import Qt from PyQt5.QtCore import QVariant from PyQt5.QtGui import QFont from PyQt5.QtGui import QIcon from PyQt5.QtGui import QImage from PyQt5.QtGui import QPixmap from PyQt5.QtWidgets import qApp from PyQt5.QtWidgets import QAbstractItemView from PyQt5.QtWidgets import QComboBox from PyQt5.QtWidgets import QFrame from PyQt5.QtWidgets import QGridLayout from PyQt5.QtWidgets import QHBoxLayout from PyQt5.QtWidgets import QHeaderView from PyQt5.QtWidgets import QLabel from PyQt5.QtWidgets import QLineEdit from PyQt5.QtWidgets import QPlainTextEdit from PyQt5.QtWidgets import QProgressBar from PyQt5.QtWidgets import QRadioButton from PyQt5.QtWidgets import QSlider from PyQt5.QtWidgets import QTableWidget from PyQt5.QtWidgets import QTableWidgetItem from PyQt5.QtWidgets import QTabWidget from PyQt5.QtWidgets import QTextEdit from PyQt5.QtWidgets import QTreeWidget from PyQt5.QtWidgets import QTreeWidgetItem from PyQt5.QtWidgets import QTreeWidgetItemIterator from PyQt5.QtWidgets import QVBoxLayout from PyQt5.QtWidgets import QWidget # ********************用户自定义相关模块导入******************** from TBTracker_AuxiliaryFunction import * from TBTracker_Gui.TBTracker_Gui_Button import * from TBTracker_Gui.TBTracker_Gui_Canvas import * from TBTracker_Gui.TBTracker_Gui_Dialog import * ''' @author : Zhou Jian @email : [email protected] @version : V1.0 @date : 2018.04.22 ''' class TBTrackerMainWindow(QWidget): def __init__(self): super(TBTrackerMainWindow, self).__init__() self.create_main_window() def create_main_window(self): self.setWindowTitle("商品数据追踪系统") self.setWindowIcon(QIcon('TBTracker_Ui/Spider.ico')) self.width, self.height = get_current_screen_size() self.setMinimumSize(self.width, self.height) self.setMaximumSize(self.width, self.height) self.set_widgets() self.setLayout(self.layout) self.show_product_id() self.show_database() self.plot_product_tree() def set_widgets(self): labelFont = QFont() labelFont.setPointSize(12) self.table_1_Font = QFont() self.table_1_Font.setPointSize(10) self.table_1_Font.setStyleName("Bold") self.table_2_Font = QFont() self.table_2_Font.setPointSize(12) self.table_2_Font.setStyleName("Bold") # ***************************************************************************************** firstWidget = QWidget() self.searchLineEdit = QLineEdit() searchButton = SearchButton() searchButton.clicked.connect(self.call_spider) searchRegionLayout = QHBoxLayout() searchRegionLayout.setContentsMargins(240, 0, 240, 0) searchRegionLayout.setSpacing(20) searchRegionLayout.addWidget(self.searchLineEdit) searchRegionLayout.addWidget(searchButton) self.taobaoDataTable = QTableWidget(0, 4) self.taobaoDataTable.horizontalHeader().hide() self.taobaoDataTable.verticalHeader().hide() self.taobaoDataTable.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch) self.taobaoDataTable.setEditTriggers(QAbstractItemView.NoEditTriggers) self.productIDTable = QTableWidget(0, 1) self.productIDTable.setHorizontalHeaderLabels(["已有商品标签"]) self.productIDTable.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch) self.productIDTable.setSelectionMode(QAbstractItemView.NoSelection) self.productIDTable.setEditTriggers(QAbstractItemView.NoEditTriggers) tableRegionLayout = QHBoxLayout() tableRegionLayout.addWidget(self.taobaoDataTable) tableRegionLayout.addWidget(self.productIDTable) tableRegionLayout.setStretchFactor(self.taobaoDataTable, 3) tableRegionLayout.setStretchFactor(self.productIDTable, 1) self.progressBar = QProgressBar() self.addProductIDLineEdit = QLineEdit() addProductIDButton = AddButton() addProductIDButton.clicked.connect(self.add_product_id) self.attachProductIDLineEdit = QLineEdit() attachProductIDButton = AttachButton() attachProductIDButton.clicked.connect(self.attach_product_id) importDataButton = ImportButton() importDataButton.clicked.connect(self.import_data) dataOperateLayout = QHBoxLayout() dataOperateLayout.addStretch() dataOperateLayout.addWidget(self.addProductIDLineEdit) dataOperateLayout.addSpacing(5) dataOperateLayout.addWidget(addProductIDButton) dataOperateLayout.addSpacing(25) dataOperateLayout.addWidget(self.attachProductIDLineEdit) dataOperateLayout.addSpacing(5) dataOperateLayout.addWidget(attachProductIDButton) dataOperateLayout.addSpacing(25) dataOperateLayout.addWidget(importDataButton) firstWidgetLayout = QVBoxLayout() firstWidgetLayout.setSpacing(10) firstWidgetLayout.addLayout(searchRegionLayout) firstWidgetLayout.addLayout(tableRegionLayout) firstWidgetLayout.addWidget(self.progressBar) firstWidgetLayout.addLayout(dataOperateLayout) firstWidget.setLayout(firstWidgetLayout) # ***************************************************************************************** secondWidget = QWidget() self.DBTable = QTableWidget(0, 6) self.DBTable.setHorizontalHeaderLabels(["商品标识", "标题", "店铺名", "价格", "淘宝价", "是否删除数据?"]) self.DBTable.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch) self.DBTable.setSelectionMode(QAbstractItemView.NoSelection) self.DBTable.setEditTriggers(QAbstractItemView.NoEditTriggers) self.insertButton = InsertButton() self.insertButton.clicked.connect(self.add_data) deleteButton = DeleteButton() deleteButton.clicked.connect(self.delete_data) DBOperateLayout = QHBoxLayout() DBOperateLayout.addStretch() DBOperateLayout.setSpacing(20) DBOperateLayout.addWidget(self.insertButton) DBOperateLayout.addWidget(deleteButton) secondWidgetLayout = QVBoxLayout() secondWidgetLayout.setSpacing(10) secondWidgetLayout.addWidget(self.DBTable) secondWidgetLayout.addLayout(DBOperateLayout) secondWidget.setLayout(secondWidgetLayout) # ***************************************************************************************** thirdWidget = QWidget() self.productTree = QTreeWidget() self.productTree.setColumnCount(2) self.productTree.setHeaderLabels(['商品标识','商品数量']) self.productTree.header().setSectionResizeMode(QHeaderView.Stretch) self.productTree.setSelectionMode(QAbstractItemView.NoSelection) productTreeLayout = QHBoxLayout() productTreeLayout.addWidget(self.productTree) upLayout = QHBoxLayout() upLayout.setSpacing(20) upLayout.addLayout(productTreeLayout) globalSelectButton = GlobalSelectButton() globalSelectButton.clicked.connect(self.select_global) allSelectButton = AllSelectButton() allSelectButton.clicked.connect(self.select_all) removeButton = DeleteButton() removeButton.clicked.connect(self.remove_data) exportButton = ExportButton() exportButton.clicked.connect(self.export_data) dataExportLayout = QHBoxLayout() dataExportLayout.addStretch() dataExportLayout.setSpacing(20) dataExportLayout.addWidget(globalSelectButton) dataExportLayout.addWidget(allSelectButton) dataExportLayout.addWidget(removeButton) dataExportLayout.addWidget(exportButton) thirdWidgetLayout = QVBoxLayout() thirdWidgetLayout.setSpacing(20) thirdWidgetLayout.setContentsMargins(50, 20, 50, 20) thirdWidgetLayout.addLayout(upLayout) thirdWidgetLayout.addLayout(dataExportLayout) thirdWidget.setLayout(thirdWidgetLayout) # ***************************************************************************************** fourthWidget = QWidget() self.historyDataCanvas = HistoryDataCanvas() historyDataLayout = QVBoxLayout() historyDataLayout.addWidget(self.historyDataCanvas) self.selectCommodityButton = SelectCommodityButton() self.monthlyDataButton = MonthlyDataButton() self.yearlyDataButton = YearlyDataButton() manualUpdateButton = ManualUpdateButton() manualUpdateButton.clicked.connect(self.manual_update) buttonLayout = QHBoxLayout() buttonLayout.addStretch() buttonLayout.setSpacing(30) buttonLayout.addWidget(self.selectCommodityButton) buttonLayout.addWidget(self.monthlyDataButton) buttonLayout.addWidget(self.yearlyDataButton) buttonLayout.addWidget(manualUpdateButton) fourthWidgetLayout = QVBoxLayout() fourthWidgetLayout.setSpacing(10) fourthWidgetLayout.setContentsMargins(50, 0, 50, 10) fourthWidgetLayout.addLayout(historyDataLayout) fourthWidgetLayout.addLayout(buttonLayout) fourthWidget.setLayout(fourthWidgetLayout) # ***************************************************************************************** self.tabWidget = QTabWidget() self.tabWidget.addTab(firstWidget, "数据爬虫") self.tabWidget.addTab(secondWidget, "数据后台") self.tabWidget.addTab(thirdWidget, "数据导出") self.tabWidget.addTab(fourthWidget, "数据跟踪") self.layout = QVBoxLayout() self.layout.setContentsMargins(50, 20, 50, 13) self.layout.addWidget(self.tabWidget) def closeEvent(self, event): pass @staticmethod def remove_pics(): root_dir = 'TBTracker_Temp' for root, dirs, files in os.walk(root_dir): Logger.info('正在删除图片') for filename in files: if filename != "__init__.py": os.remove(root+'/'+filename) Logger.info('图片删除完毕!') def find_out_real_price(self, i, shop_url, match_price): price, taobao_price = '', '' try: DRIVER.get(shop_url) Logger.info("第{0}家店铺的商品页面读取成功".format(i)) try: price = WebDriverWait(DRIVER, 10).until( EC.presence_of_element_located((By.CLASS_NAME, 'originPrice'))).text.lstrip("¥").strip() except Exception as e: pass try: taobao_price = WebDriverWait(DRIVER, 10).until( EC.presence_of_element_located((By.CLASS_NAME, 'J_actPrice'))).text.lstrip("¥").strip() except Exception as e: pass if price == '' and taobao_price == '': try: J_StrPriceModBox = WebDriverWait(DRIVER, 10).until( EC.presence_of_element_located((By.ID, 'J_StrPriceModBox'))) try: price = WebDriverWait(J_StrPriceModBox, 10).until( EC.presence_of_element_located((By.CLASS_NAME, 'tb-rmb-num'))).text.strip() except Exception as e: try: price = WebDriverWait(J_StrPriceModBox, 10).until( EC.presence_of_element_located((By.CLASS_NAME, 'tm-price'))).text.strip() except Exception as e: pass except Exception as e: pass try: J_PromoPrice = WebDriverWait(DRIVER, 10).until( EC.presence_of_element_located((By.ID, 'J_PromoPrice'))) try: taobao_price = WebDriverWait(J_PromoPrice, 10).until( EC.presence_of_element_located((By.CLASS_NAME, 'tb-rmb-num'))).text.strip() except Exception as e: try: taobao_price = WebDriverWait(J_PromoPrice, 10).until( EC.presence_of_element_located((By.CLASS_NAME, 'tm-price'))).text.strip() except Exception as e: pass except Exception as e: pass if price == '' and taobao_price == '': try: tm_price_panel = WebDriverWait(DRIVER, 10).until( EC.presence_of_element_located((By.CLASS_NAME, 'tm-price-panel'))) price = WebDriverWait(tm_price_panel, 10).until( EC.presence_of_element_located((By.CLASS_NAME, 'tm-price'))).text.strip() except Exception as e: pass try: tm_promo_panel = WebDriverWait(DRIVER, 10).until( EC.presence_of_element_located((By.CLASS_NAME, 'tm-promo-panel'))) taobao_price = WebDriverWait(tm_promo_panel, 10).until( EC.presence_of_element_located((By.CLASS_NAME, 'tm-price'))).text.strip() except Exception as e: pass except Exception as e: Logger.error(e) Logger.warn('第{0}家店铺的商品页面读取失败'.format(i)) Logger.warn(shop_url) finally: if price == '' and taobao_price != '': price = taobao_price elif price != '' and taobao_price == '': taobao_price = price elif price == '' and taobao_price == '': price = taobao_price = match_price return price, taobao_price def call_spider(self): searchWord = self.searchLineEdit.text().strip() if searchWord != '': Logger.info(''' ┏┓   ┏┓ ┏┛┻━━━┛┻┓ ┃       ┃ ┃   ━   ┃ ┃ ┳┛ ┗┳ ┃ ┃       ┃ ┃   ┻   ┃ ┃       ┃ ┗━┓   ┏━┛   ┃   ┃神兽保佑   ┃   ┃代码无BUG!   ┃   ┗━━━┓   ┃       ┣┓   ┃       ┏┛   ┗┓┓┏━┳┓┏┛    ┃┫┫ ┃┫┫    ┗┻┛ ┗┻┛ ''') self.remove_pics() try: webDriver = webdriver.PhantomJS(desired_capabilities=DCAP, service_args=[ '--load-images=no', # 禁止加载图片 '--disk-cache=yes', # 开启浏览器缓存 '--ignore-ssl-errors=true', # 忽略HTTPS错误 '--ssl-protocol=TLSv1']) webDriver.set_window_size(1280, 1024) try: Logger.info("模拟登录淘宝网") webDriver.get("https://www.taobao.com/") try: search_combobox = WebDriverWait(webDriver, 10).until( EC.presence_of_element_located((By.CLASS_NAME, 'search-combobox-input-wrap'))) search_input = WebDriverWait(search_combobox, 10).until( EC.presence_of_element_located((By.ID, 'q'))) # 发送搜索词 search_input.send_keys(searchWord.strip()) search_button_wrap = WebDriverWait(webDriver, 10).until( EC.presence_of_element_located((By.CLASS_NAME, 'search-button'))) search_button = WebDriverWait(search_button_wrap, 10).until( EC.presence_of_element_located((By.CLASS_NAME, 'btn-search'))) search_button.click() try: Logger.info('搜索成功,正在返回搜索结果') mainsrp_itemlist = WebDriverWait(webDriver, 10).until( EC.presence_of_element_located((By.ID, 'mainsrp-itemlist'))) m_itemlist = WebDriverWait(mainsrp_itemlist, 10).until( EC.presence_of_element_located((By.CLASS_NAME, 'm-itemlist'))) items = WebDriverWait(m_itemlist, 10).until( EC.presence_of_all_elements_located((By.CLASS_NAME, 'items')))[0] allItems = WebDriverWait(items, 10).until( EC.presence_of_all_elements_located((By.CLASS_NAME, 'J_MouserOnverReq')) ) self.returnCNT = len(allItems) Logger.info('总共返回{0}个搜索结果'.format(self.returnCNT)) self.taobaoDataTable.clearContents() self.taobaoDataTable.setRowCount(self.returnCNT * 6) ## TODO imageLabel = [QLabel() for _ in range(self.returnCNT)] titleItem = [QTableWidgetItem() for _ in range(self.returnCNT)] shopItem = [QTableWidgetItem("店铺:") for _ in range(self.returnCNT)] shopValueItem = [QTableWidgetItem() for _ in range(self.returnCNT)] sourceItem = [QTableWidgetItem("来源地:") for _ in range(self.returnCNT)] sourceValueItem = [QTableWidgetItem() for _ in range(self.returnCNT)] priceItem = [QTableWidgetItem("价格:") for _ in range(self.returnCNT)] priceValueItem = [QTableWidgetItem() for _ in range(self.returnCNT)] tbPriceItem = [QTableWidgetItem("淘宝价:") for _ in range(self.returnCNT)] tbPriceValueItem = [QTableWidgetItem() for _ in range(self.returnCNT)] dealItem = [QTableWidgetItem("付款人数:") for _ in range(self.returnCNT)] dealValueItem = [QTableWidgetItem() for _ in range(self.returnCNT)] isJoinedItem = [QTableWidgetItem("是否加入价格跟踪队列?") for _ in range(self.returnCNT)] checkItem = [QTableWidgetItem() for _ in range(self.returnCNT)] self.URLList = [] for (j, item) in enumerate(allItems): try: # 抓取商品图 Logger.info('正在爬取第{0}家店铺的数据'.format(j + 1)) pic_box = WebDriverWait(item, 10).until( EC.presence_of_element_located((By.CLASS_NAME, 'pic-box'))) itemPic = WebDriverWait(pic_box, 10).until( EC.presence_of_element_located((By.CLASS_NAME, 'J_ItemPic'))) itemPic_id = itemPic.get_attribute('id') itemPic_data_src = itemPic.get_attribute('data-src') if not itemPic_data_src.startswith("https:"): itemPic_data_src = "https:" + itemPic_data_src itemPic_alt = itemPic.get_attribute('alt').strip() if itemPic_id == '': random_serial = '' for _ in range(12): random_serial += str(random.randint(0, 10)) itemPic_id = "J_Itemlist_Pic_" + random_serial Logger.info("正在爬取第{0}家店铺的商品图片".format(j + 1)) try: stream = requests.get(itemPic_data_src, timeout=10, headers=HEADERS) except requests.RequestException as e: Logger.error(e) finally: Logger.info("第{0}家店铺的商品图片爬取完毕".format(j + 1)) try: im = Image.open(BytesIO(stream.content)) if im.mode != 'RGB': im = im.convert('RGB') im.save("TBTracker_Temp/{0}.jpeg".format(itemPic_id)) Logger.info("第{0}家店铺的商品图片保存完毕".format(j + 1)) self.taobaoDataTable.setSpan(j * 6, 0, 6, 1) imageLabel[j].setPixmap(QPixmap.fromImage(QImage("TBTracker_Temp/{0}.jpeg".format(itemPic_id)).scaled(int(230 * 0.7), int(230 * 0.7)))) imageLabel[j].setAlignment(Qt.AlignHCenter | Qt.AlignVCenter) self.taobaoDataTable.setCellWidget(j * 6, 0, imageLabel[j]) except Exception as e: Logger.error(e) ctx_box = WebDriverWait(item, 10).until( EC.presence_of_element_located((By.CLASS_NAME, 'ctx-box'))) # 抓取商品价格和店铺网址 row_row_2 = WebDriverWait(ctx_box, 10).until( EC.presence_of_element_located((By.CLASS_NAME, 'row-2'))) item_price_and_link = WebDriverWait(row_row_2, 10).until( EC.presence_of_element_located((By.TAG_NAME, 'a')) ) item_match_price = item_price_and_link.get_attribute('trace-price') item_link = item_price_and_link.get_attribute('href') if not item_link.startswith("https:"): item_link = "https:" + item_link self.URLList.append(item_link) status_code = requests.get(item_link).status_code Logger.info(status_code) if status_code == 200: item_title = itemPic_alt # 淘宝价格有时候会为空,暂时性的解决方案 item_price, item_taobao_price = self.find_out_real_price(j + 1, item_link, item_match_price) if item_taobao_price == '': item_taobao_price = item_price Logger.info('第{0}家店铺的商品价格和链接爬取完毕'.format(j + 1)) self.taobaoDataTable.setSpan(j * 6, 1, 1, 2) titleItem[j].setData(Qt.DisplayRole, QVariant(item_title)) titleItem[j].setFont(self.table_1_Font) titleItem[j].setTextAlignment(Qt.AlignHCenter | Qt.AlignVCenter) self.taobaoDataTable.setItem(j * 6, 1, titleItem[j]) priceItem[j].setFont(self.table_2_Font) priceItem[j].setTextAlignment(Qt.AlignRight | Qt.AlignVCenter) self.taobaoDataTable.setItem(j * 6 + 3, 1, priceItem[j]) priceValueItem[j].setData(Qt.DisplayRole, QVariant(item_price)) self.taobaoDataTable.setItem(j * 6 + 3, 2, priceValueItem[j]) tbPriceItem[j].setFont(self.table_2_Font) tbPriceItem[j].setTextAlignment(Qt.AlignRight | Qt.AlignVCenter) self.taobaoDataTable.setItem(j * 6 + 4, 1, tbPriceItem[j]) tbPriceValueItem[j].setData(Qt.DisplayRole, QVariant(item_taobao_price)) self.taobaoDataTable.setItem(j * 6 + 4, 2, tbPriceValueItem[j]) else: Logger.warn('第{0}家店铺的商品价格和链接爬取失败'.format(j + 1)) # 抓取商品交易量 row_row_1 = WebDriverWait(ctx_box, 10).until( EC.presence_of_element_located((By.CLASS_NAME, 'row-1'))) item_deal = WebDriverWait(row_row_1, 10).until( EC.presence_of_element_located((By.CLASS_NAME, 'deal-cnt'))).text.strip() Logger.info('第{0}家店铺的商品交易量爬取完毕'.format(j + 1)) dealItem[j].setFont(self.table_2_Font) dealItem[j].setTextAlignment(Qt.AlignRight | Qt.AlignVCenter) self.taobaoDataTable.setItem(j * 6 + 5, 1, dealItem[j]) dealValueItem[j].setData(Qt.DisplayRole, QVariant(item_deal)) self.taobaoDataTable.setItem(j * 6 + 5, 2, dealValueItem[j]) # 抓取店铺名和店铺所在地 row_row_3 = WebDriverWait(ctx_box, 10).until( EC.presence_of_element_located((By.CLASS_NAME, 'row-3'))) item_shop_name = WebDriverWait(row_row_3, 10).until( EC.presence_of_all_elements_located((By.TAG_NAME, 'span')))[4].text.strip() Logger.info('第{0}家店铺的商铺名爬取完毕'.format(j + 1)) shopItem[j].setFont(self.table_2_Font) shopItem[j].setTextAlignment(Qt.AlignRight | Qt.AlignVCenter) self.taobaoDataTable.setItem(j * 6 + 1, 1, shopItem[j]) shopValueItem[j].setData(Qt.DisplayRole, QVariant(item_shop_name)) self.taobaoDataTable.setItem(j * 6 + 1, 2, shopValueItem[j]) item_location = WebDriverWait(row_row_3, 10).until( EC.presence_of_element_located((By.CLASS_NAME, 'location'))).text.strip() Logger.info('第{0}家店铺的货源地爬取完毕'.format(j + 1)) if item_location == '': item_location = "抓取为空" sourceItem[j].setFont(self.table_2_Font) sourceItem[j].setTextAlignment(Qt.AlignRight | Qt.AlignVCenter) self.taobaoDataTable.setItem(j * 6 + 2, 1, sourceItem[j]) sourceValueItem[j].setData(Qt.DisplayRole, QVariant(item_location)) self.taobaoDataTable.setItem(j * 6 + 2, 2, sourceValueItem[j]) isJoinedItem[j].setFont(self.table_1_Font) isJoinedItem[j].setTextAlignment(Qt.AlignHCenter | Qt.AlignVCenter) self.taobaoDataTable.setItem(j * 6, 3, isJoinedItem[j]) self.taobaoDataTable.setSpan(j * 6 + 1, 3, 5, 1) checkItem[j].setCheckState(False) self.taobaoDataTable.setItem(j * 6 + 1, 3, checkItem[j]) self.progressBar.setValue(math.ceil(((j + 1)/self.returnCNT) * 100)) except Exception as e: Logger.error(e) webDriver.quit() DRIVER.quit() Logger.info("数据爬取完毕") messageDialog = MessageDialog() messageDialog.information(self, "消息提示", "数据爬取完毕!") except NoSuchElementException as e: webDriver.quit() DRIVER.quit() Logger.error(e) except NoSuchElementException as e: webDriver.quit() DRIVER.quit() Logger.error(e) except TimeoutException as e: webDriver.quit() DRIVER.quit() Logger.error(e) except WebDriverException as e: Logger.error(e) else: messageDialog = MessageDialog() messageDialog.warning(self, "消息提示", "请先输入搜索词!") def add_product_id(self): productID = self.addProductIDLineEdit.text().strip() if productID != '': conn = sqlite.connect('TBTracker_DB/TBTrackerTag.db') c = conn.cursor() c.execute('select count(*) from tag where TagName="{}"'.format(productID)) count = c.fetchone()[0] if count == 0: c.execute('insert into tag values ("{}", "{}")'.format(productID, get_current_system_time())) conn.commit() c.close() messageDialog = MessageDialog() messageDialog.information(self, "消息提示", "标签入库成功!") else: messageDialog = MessageDialog() messageDialog.information(self, "消息提示", "标签已经存在!") else: messageDialog = MessageDialog() messageDialog.warning(self, "消息提示", "请先填写商品标签!") def attach_product_id(self): self.productID = self.attachProductIDLineEdit.text() messageDialog = MessageDialog() messageDialog.information(self, "消息提示", "标签标注成功!") def import_data(self): try: for j in range(self.returnCNT): flag = self.taobaoDataTable.item(j * 6 + 1, 3).checkState() if flag == 2: conn = sqlite.connect('TBTracker_DB/TBTracker.db') c = conn.cursor() c.execute('insert into product values ("{}", "{}", "{}", "{}", "{}", "{}", "{}")'.format( self.productID, self.URLList[j], self.taobaoDataTable.item(j * 6, 1).text(), self.taobaoDataTable.item(j * 6 + 1, 2).text(), self.taobaoDataTable.item(j * 6 + 3, 2).text(), self.taobaoDataTable.item(j * 6 + 4, 2).text(), get_current_system_time())) conn.commit() c.close() messageDialog = MessageDialog() messageDialog.information(self, "消息提示", " 数据成功入库! ") self.show_database() except AttributeError as e: messageDialog = MessageDialog() messageDialog.warning(self, "消息提示", "未选择任何待导入的数据!") def show_product_id(self): conn_1 = sqlite.connect('TBTracker_DB/TBTrackerTag.db') c_1 = conn_1.cursor() conn_2 = sqlite.connect('TBTracker_DB/TBTracker.db') c_2 = conn_2.cursor() c_1.execute('select * from tag') tagQueries = c_1.fetchall() CNT = len(tagQueries) _CNT = CNT for j in range(CNT): c_2.execute('select count(*) from product where ProductName="{}"'.format(tagQueries[j][0])) cnt = c_2.fetchone() if cnt[0] == 0: c_1.execute('delete from tag where TagName="{}"'.format(tagQueries[j][0])) conn_1.commit() _CNT -= 1 CNT = _CNT self.productIDTable.setRowCount(CNT) for j in range(CNT): self.productIDTable.setItem(j, 0, QTableWidgetItem(tagQueries[j][0])) c_1.close() c_2.close() def show_database(self): conn = sqlite.connect('TBTracker_DB/TBTracker.db') c = conn.cursor() c.execute('select * from product order by CreateTime desc') queries = c.fetchall() self.DBCNT = len(queries) c.close() self.DBTable.setRowCount(self.DBCNT) for j in range(self.DBCNT): self.DBTable.setItem(j, 0, QTableWidgetItem(queries[j][0])) self.DBTable.setItem(j, 1, QTableWidgetItem(queries[j][2])) self.DBTable.setItem(j, 2, QTableWidgetItem(queries[j][3])) self.DBTable.setItem(j, 3, QTableWidgetItem(queries[j][4])) self.DBTable.setItem(j, 4, QTableWidgetItem(queries[j][5])) flag = QTableWidgetItem() flag.setCheckState(False) self.DBTable.setItem(j, 5, flag) def add_data(self): pass def delete_data(self): notDeleteCNT = 0 for j in range(self.DBCNT): flag = self.DBTable.item(j, 5).checkState() if flag == Qt.Checked: conn = sqlite.connect('TBTracker_DB/TBTracker.db') c = conn.cursor() c.execute('delete from product where ProductName="{}" and Title="{}" and ShopName="{}" and Price="{}"'.format( self.DBTable.item(j, 0).text(), self.DBTable.item(j, 1).text(), self.DBTable.item(j, 2).text(), self.DBTable.item(j, 3).text())) conn.commit() c.close() else: notDeleteCNT += 1 if notDeleteCNT == self.DBCNT: messageDialog = MessageDialog() messageDialog.warning(self, "消息提示", " 无效操作! ") else: self.show_database() def plot_product_tree(self): conn = sqlite.connect('TBTracker_DB/TBTrackerTag.db') c = conn.cursor() c.execute('select * from tag') tagQueries = c.fetchall() c.close() conn = sqlite.connect('TBTracker_DB/TBTracker.db') c = conn.cursor() roots = [QTreeWidgetItem(self.productTree) for _ in range(len(tagQueries))] for i, tagQuery in enumerate(tagQueries): roots[i].setText(0, tagQuery[0]) roots[i].setFont(0, self.table_2_Font) roots[i].setCheckState(0, False) c.execute('select ShopName from product where ProductName="{}"'.format(tagQuery[0])) shopNames = list(set([query[0] for query in c.fetchall()])) childs = [QTreeWidgetItem(roots[i]) for _ in range(len(shopNames))] for j, child in enumerate(childs): child.setText(0, shopNames[j]) child.setFont(0, self.table_1_Font) child.setCheckState(0, False) c.execute('select count(*) from product where ProductName="{}" and ShopName="{}"'.format(tagQuery[0], shopNames[j])) child.setText(1, str(c.fetchone()[0])) self.productTree.addTopLevelItem(roots[i]) c.close() def select_global(self): currentTopLevelItemIndex = 0 it = QTreeWidgetItemIterator(self.productTree) while it.value(): if it.value() is self.productTree.topLevelItem(currentTopLevelItemIndex): currentTopLevelItemIndex += 1 it.value().setCheckState(0, Qt.Checked) for _ in range(it.value().childCount()): it = it.__iadd__(1) it.value().setCheckState(0, Qt.Checked) it = it.__iadd__(1) def select_all(self): currentTopLevelItemIndex = 0 it = QTreeWidgetItemIterator(self.productTree) while it.value(): if it.value() is self.productTree.topLevelItem(currentTopLevelItemIndex): currentTopLevelItemIndex += 1 if it.value().checkState(0) == Qt.Checked: for _ in range(it.value().childCount()): it = it.__iadd__(1) it.value().setCheckState(0, Qt.Checked) it = it.__iadd__(1) def remove_data(self): conn = sqlite.connect('TBTracker_DB/TBTracker.db') c = conn.cursor() currentTopLevelItemIndex = 0 it = QTreeWidgetItemIterator(self.productTree) while it.value(): if it.value() is self.productTree.topLevelItem(currentTopLevelItemIndex): currentTopLevelItemIndex += 1 else: if it.value().checkState(0) == Qt.Checked: c.execute('delete from product where ProductName="{}" and ShopName="{}"'.format( it.value().parent().text(0), it.value().text(0))) conn.commit() it = it.__iadd__(1) c.close() self.show_database() def export_data(self): mainDirectory = check_os() currentFileDialog = SaveFileDialog() fileName, filetype = currentFileDialog.save_file(self, caption="手动保存数据", directory=mainDirectory, filter="Excel Files (*.xlsx)") conn = sqlite.connect('TBTracker_DB/TBTracker.db') c = conn.cursor() currentTopLevelItemIndex = 0 exportDataList = [] it = QTreeWidgetItemIterator(self.productTree) while it.value(): if it.value() is self.productTree.topLevelItem(currentTopLevelItemIndex): currentTopLevelItemIndex += 1 else: if it.value().checkState(0) == Qt.Checked: c.execute('select * from product where ProductName="{}" and ShopName="{}"'.format( it.value().parent().text(0), it.value().text(0))) queries = c.fetchall() exportDataList += queries it = it.__iadd__(1) c.close() excel = xlwt.Workbook() sheet = excel.add_sheet('商品数据', cell_overwrite_ok=True) sheet.write(0, 0, "商品标识") sheet.write(0, 1, "URL") sheet.write(0, 2, "标题") sheet.write(0, 3, "店铺名") sheet.write(0, 4, "价格") sheet.write(0, 5, "淘宝价") sheet.write(0, 6, "上次更新时间") for i, data in enumerate(exportDataList): sheet.write(i + 1, 0, data[0]) sheet.write(i + 1, 1, data[1]) sheet.write(i + 1, 2, data[2]) sheet.write(i + 1, 3, data[3]) sheet.write(i + 1, 4, data[4]) sheet.write(i + 1, 5, data[5]) sheet.write(i + 1, 6, data[6]) excel.save("{}.xlsx".format(fileName)) def plot_history_data(self, dateList, priceList): dateList = generate_date_list((2016, 12, 1), (2017, 1, 1)) priceList = [random.randint(100, 300) for _ in range(len(dateList))] self.historyDataCanvas.axes.plot_date(dateList, priceList, 'r-o', linewidth=2) self.historyDataCanvas.axes.xaxis_date() self.historyDataCanvas.axes.xaxis.set_major_formatter(mdate.DateFormatter('%Y-%m-%d')) self.historyDataCanvas.axes.set_xticks(dateList) self.historyDataCanvas.axes.set_xticklabels(dateList, rotation=90, fontsize=6) self.historyDataCanvas.axes.set_xlabel("时间轴", fontproperties=FONT, fontsize=10) self.historyDataCanvas.axes.set_yticks([100 * i for i in range(11)]) self.historyDataCanvas.axes.set_ylabel("价格数据/¥", fontproperties=FONT, fontsize=10) self.historyDataCanvas.axes.set_title("商品历史数据图", fontproperties=FONT, fontsize=14) self.historyDataCanvas.draw() def select_commodity(self): pass def select_month(self): pass def select_year(self): pass def manual_update(self): import subprocess child = subprocess.Popen(["sudo", "python3", "TBTracker_RoutineSpider.py"]) child.wait() messageDialog = MessageDialog() messageDialog.information(self, "消息提示", "手动更新完毕!") def eventFilter(self, source, event): if event.type() == QEvent.MouseButtonPress: pass return QWidget.eventFilter(self, source, event) class TBTrackerAddDataWindow(QWidget): def __init__(self): super(TBTrackerAddDataWindow, self).__init__() self.create_main_window() def create_main_window(self): self.setWindowTitle("添加数据") self.setWindowIcon(QIcon('TBTracker_Ui/Spider.ico')) self.setMinimumSize(500, 350) self.setMaximumSize(500, 350) self.set_widgets() self.setLayout(self.layout) def set_widgets(self): self.productIDLineEdit = QLineEdit() self.URLLineEdit = QLineEdit() self.titleLineEdit = QLineEdit() self.shopNameLineEdit = QLineEdit() self.priceLineEdit = QLineEdit() self.taobaoPriceLineEdit = QLineEdit() self.createTimeLineEdit = QLineEdit() inputLayout = QGridLayout() inputLayout.addWidget(QLabel("商品标识"), 0, 0, 1, 1) inputLayout.addWidget(self.productIDLineEdit, 0, 1, 1, 3) inputLayout.addWidget(QLabel("URL"), 1, 0, 1, 1) inputLayout.addWidget(self.URLLineEdit, 1, 1, 1, 3) inputLayout.addWidget(QLabel("标题"), 2, 0, 1, 1) inputLayout.addWidget(self.titleLineEdit, 2, 1, 1, 3) inputLayout.addWidget(QLabel("店铺名"), 3, 0, 1, 1) inputLayout.addWidget(self.shopNameLineEdit, 3, 1, 1, 3) inputLayout.addWidget(QLabel("价格"), 4, 0, 1, 1) inputLayout.addWidget(self.priceLineEdit, 4, 1, 1, 3) inputLayout.addWidget(QLabel("淘宝价"), 5, 0, 1, 1) inputLayout.addWidget(self.taobaoPriceLineEdit, 5, 1, 1, 3) self.confirmButton = ConfirmButton() self.confirmButton.clicked.connect(self.confirm) cancelButton = CancelButton() cancelButton.clicked.connect(self.cancel) operateLayout = QHBoxLayout() operateLayout.addStretch() operateLayout.setSpacing(20) operateLayout.addWidget(self.confirmButton) operateLayout.addWidget(cancelButton) self.layout = QVBoxLayout() self.layout.setContentsMargins(50, 20, 50, 20) self.layout.setSpacing(10) self.layout.addLayout(inputLayout) self.layout.addLayout(operateLayout) def confirm(self): pass def cancel(self): self.close() class TBTrackerSelectCommodityWindow(QWidget): def __init__(self): super(TBTrackerSelectCommodityWindow, self).__init__() self.create_main_window() def create_main_window(self): self.setWindowTitle("选择商品") self.setWindowIcon(QIcon('TBTracker_Ui/Spider.ico')) self.setMinimumSize(700, 350) self.setMaximumSize(700, 350) self.set_widgets() self.setLayout(self.layout) def set_widgets(self): self.pull_all_commodities() self.confirmButton = ConfirmButton() cancelButton = CancelButton() cancelButton.clicked.connect(self.cancel) operateLayout = QHBoxLayout() operateLayout.addStretch() operateLayout.setSpacing(20) operateLayout.addWidget(self.confirmButton) operateLayout.addWidget(cancelButton) self.layout = QVBoxLayout() self.layout.setContentsMargins(40, 20, 40, 20) self.layout.setSpacing(10) self.layout.addWidget(self.commodityTable) self.layout.addLayout(operateLayout) def confirm(self): pass def cancel(self): self.close() def pull_all_commodities(self): conn = sqlite.connect('TBTracker_DB/TBTracker.db') c = conn.cursor() c.execute('select Title from product') titleQueries = c.fetchall() c.close() self.commodityTable = QTableWidget(len(titleQueries), 2) self.commodityTable.horizontalHeader().hide() self.commodityTable.verticalHeader().hide() self.commodityTable.setSelectionMode(QAbstractItemView.NoSelection) self.commodityTable.setEditTriggers(QAbstractItemView.NoEditTriggers) self.commodityTable.setColumnWidth(0, 25) self.commodityTable.setColumnWidth(1, 577) radioButtonList = [QRadioButton() for i in range(len(titleQueries))] commodityList = [QTableWidgetItem(titleQueries[i][0]) for i in range(len(titleQueries))] for i in range(len(titleQueries)): self.commodityTable.setCellWidget(i, 0, radioButtonList[i]) self.commodityTable.setItem(i, 1, commodityList[i]) class TBTrackerSelectMonthWindow(QWidget): def __init__(self): super(TBTrackerSelectMonthWindow, self).__init__() self.create_main_window() def create_main_window(self): self.setWindowTitle("选择月份") self.setWindowIcon(QIcon('TBTracker_Ui/Spider.ico')) self.setMinimumSize(250, 100) self.setMaximumSize(250, 100) self.set_widgets() self.setLayout(self.layout) def set_widgets(self): self.monthComboBox = QComboBox() monthList = ["一月份数据", "二月份数据", "三月份数据", "四月份数据", "五月份数据", "六月份数据", "七月份数据", "八月份数据", "九月份数据", "十月份数据", "十一月份数据", "十二月份数据"] self.monthComboBox.addItems(monthList) self.confirmButton = ConfirmButton() cancelButton = CancelButton() cancelButton.clicked.connect(self.cancel) operateLayout = QHBoxLayout() operateLayout.addStretch() operateLayout.setSpacing(20) operateLayout.addWidget(self.confirmButton) operateLayout.addWidget(cancelButton) self.layout = QVBoxLayout() self.layout.setContentsMargins(20, 20, 20,10) self.layout.setSpacing(10) self.layout.addWidget(self.monthComboBox) self.layout.addLayout(operateLayout) def confirm(self): pass def cancel(self): self.close() class TBTrackerSelectYearWindow(QWidget): def __init__(self): super(TBTrackerSelectYearWindow, self).__init__() self.create_main_window() def create_main_window(self): self.setWindowTitle("选择年份") self.setWindowIcon(QIcon('TBTracker_Ui/Spider.ico')) self.setMinimumSize(250, 100) self.setMaximumSize(250, 100) self.set_widgets() self.setLayout(self.layout) def set_widgets(self): self.yearComboBox = QComboBox() self.get_year_range() self.yearComboBox.addItems(self.yearList) self.confirmButton = ConfirmButton() cancelButton = CancelButton() cancelButton.clicked.connect(self.cancel) operateLayout = QHBoxLayout() operateLayout.addStretch() operateLayout.setSpacing(20) operateLayout.addWidget(self.confirmButton) operateLayout.addWidget(cancelButton) self.layout = QVBoxLayout() self.layout.setContentsMargins(20, 20, 20, 10) self.layout.setSpacing(10) self.layout.addWidget(self.yearComboBox) self.layout.addLayout(operateLayout) def confirm(self): pass def cancel(self): self.close() def get_year_range(self): import datetime current_year = datetime.datetime.now().year self.yearList = [str(x) for x in range(2017, current_year + 1)]
mit
jpoon/generalfusion
average/average.py
1
1205
#!/usr/bin/env python """Calculates average given waveforms""" import sys import os import numpy as np import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt def average(experimentDir, outputFile): for dirpath, _, filenames in os.walk(experimentDir): sensorList = [] for filename in filenames: filepath = os.path.join(dirpath, filename) sensor = open(filepath).read().splitlines() sensorList.append(sensor) label = os.path.splitext(filename)[0] plt.plot(sensor, label=label) # calculate mean mean = np.array(sensorList).astype(float).mean(axis=0) np.savetxt(outputFile, mean, newline='\n') print "Saving average =", outputFile # graph plt.plot(mean, label="average") plt.legend() plt.title("Average") plt.savefig(outputFile + ".png") def main(inputDir, outputDir): for dirpath, dirnames, _ in os.walk(inputDir): for dirname in dirnames: average(os.path.join(dirpath, dirname), os.path.join(outputDir, dirname)) return 0 if __name__ == '__main__': print "Args =", sys.argv main(*sys.argv[1:])
mit
drivendata/countable-care-3rd-place
src/ensemble_features.py
1
2023
#!/usr/bin/env python from scipy import sparse from sklearn.datasets import dump_svmlight_file import argparse import logging import numpy as np import os import pandas as pd logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s', level=logging.DEBUG) def ensemble_feature(valid_file, label_file, test_file, feature_dir, feature_name): # Load data files logging.info('Loading training and test data') val = np.loadtxt(valid_file, delimiter=',') tst = np.loadtxt(test_file, delimiter=',') label = np.loadtxt(label_file, delimiter=',') logging.info('{}x{}, {}x{}'.format(val.shape[0], val.shape[1], tst.shape[0], tst.shape[1])) n_val = val.shape[0] n_tst = tst.shape[0] logging.info('Saving features into {}'.format(feature_dir)) for i in range(label.shape[1]): train_feature_file = os.path.join(feature_dir, '{}.trn{:02d}.sps'.format(feature_name, i)) test_feature_file = os.path.join(feature_dir, '{}.tst{:02d}.sps'.format(feature_name, i)) dump_svmlight_file(val, label[:, i], train_feature_file, zero_based=False) dump_svmlight_file(tst, np.zeros((n_tst,)), test_feature_file, zero_based=False) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--valid-file', required=True, dest='valid') parser.add_argument('--label-file', required=True, dest='label') parser.add_argument('--test-file', required=True, dest='test') parser.add_argument('--feature-dir', required=True, dest='feature_dir') parser.add_argument('--feature-name', required=True, dest='feature_name') args = parser.parse_args() ensemble_feature(valid_file=args.valid, label_file=args.label, test_file=args.test, feature_dir=args.feature_dir, feature_name=args.feature_name)
mit
aclarkData/aclarkData.github.io
markdown_generator/talks.py
199
4000
# coding: utf-8 # # Talks markdown generator for academicpages # # Takes a TSV of talks with metadata and converts them for use with [academicpages.github.io](academicpages.github.io). This is an interactive Jupyter notebook ([see more info here](http://jupyter-notebook-beginner-guide.readthedocs.io/en/latest/what_is_jupyter.html)). The core python code is also in `talks.py`. Run either from the `markdown_generator` folder after replacing `talks.tsv` with one containing your data. # # TODO: Make this work with BibTex and other databases, rather than Stuart's non-standard TSV format and citation style. # In[1]: import pandas as pd import os # ## Data format # # The TSV needs to have the following columns: title, type, url_slug, venue, date, location, talk_url, description, with a header at the top. Many of these fields can be blank, but the columns must be in the TSV. # # - Fields that cannot be blank: `title`, `url_slug`, `date`. All else can be blank. `type` defaults to "Talk" # - `date` must be formatted as YYYY-MM-DD. # - `url_slug` will be the descriptive part of the .md file and the permalink URL for the page about the paper. # - The .md file will be `YYYY-MM-DD-[url_slug].md` and the permalink will be `https://[yourdomain]/talks/YYYY-MM-DD-[url_slug]` # - The combination of `url_slug` and `date` must be unique, as it will be the basis for your filenames # # ## Import TSV # # Pandas makes this easy with the read_csv function. We are using a TSV, so we specify the separator as a tab, or `\t`. # # I found it important to put this data in a tab-separated values format, because there are a lot of commas in this kind of data and comma-separated values can get messed up. However, you can modify the import statement, as pandas also has read_excel(), read_json(), and others. # In[3]: talks = pd.read_csv("talks.tsv", sep="\t", header=0) talks # ## Escape special characters # # YAML is very picky about how it takes a valid string, so we are replacing single and double quotes (and ampersands) with their HTML encoded equivilents. This makes them look not so readable in raw format, but they are parsed and rendered nicely. # In[4]: html_escape_table = { "&": "&amp;", '"': "&quot;", "'": "&apos;" } def html_escape(text): if type(text) is str: return "".join(html_escape_table.get(c,c) for c in text) else: return "False" # ## Creating the markdown files # # This is where the heavy lifting is done. This loops through all the rows in the TSV dataframe, then starts to concatentate a big string (```md```) that contains the markdown for each type. It does the YAML metadata first, then does the description for the individual page. # In[5]: loc_dict = {} for row, item in talks.iterrows(): md_filename = str(item.date) + "-" + item.url_slug + ".md" html_filename = str(item.date) + "-" + item.url_slug year = item.date[:4] md = "---\ntitle: \"" + item.title + '"\n' md += "collection: talks" + "\n" if len(str(item.type)) > 3: md += 'type: "' + item.type + '"\n' else: md += 'type: "Talk"\n' md += "permalink: /talks/" + html_filename + "\n" if len(str(item.venue)) > 3: md += 'venue: "' + item.venue + '"\n' if len(str(item.location)) > 3: md += "date: " + str(item.date) + "\n" if len(str(item.location)) > 3: md += 'location: "' + str(item.location) + '"\n' md += "---\n" if len(str(item.talk_url)) > 3: md += "\n[More information here](" + item.talk_url + ")\n" if len(str(item.description)) > 3: md += "\n" + html_escape(item.description) + "\n" md_filename = os.path.basename(md_filename) #print(md) with open("../_talks/" + md_filename, 'w') as f: f.write(md) # These files are in the talks directory, one directory below where we're working from.
mit
apur27/public
ASX-Python/LoadTrainPredict-LinearRegression.py
1
2587
import glob #import os import pandas as pd colnames=['Ticker', 'Date', 'Open', 'High', 'Low', 'Close', 'Volume'] def pivotAndInterpolate(row,index,column,reIndex, interpolater,limiter, df): dfOut = df.pivot_table(row, index, column) dfOut.index = pd.to_datetime(dfOut.index, format='%Y%m%d') dfOut = dfOut.reindex(reIndex) dfOut=dfOut.interpolate(method=interpolater, limit_area=limiter) dfOut=dfOut.fillna(0) return dfOut all_files = glob.glob('C:/QM/rnd/ASX-2015-2018/ASX-2015-2018/2*.txt') # advisable to use os.path.join as this makes concatenation OS independent df_from_each_file = (pd.read_csv(f, names=colnames, header=None, encoding='utf-8') for f in all_files) data = pd.concat(df_from_each_file, ignore_index=True, sort=True) data['HighLow'] = data['High']/data['Low'] index = pd.date_range('20150102','20180629') dfOpen=pivotAndInterpolate('Open', ['Date'], 'Ticker',index, 'linear','inside', data) dfLow=pivotAndInterpolate('High', ['Date'], 'Ticker',index, 'linear','inside',data) dfHigh=pivotAndInterpolate('Low', ['Date'], 'Ticker',index, 'linear','inside',data) dfClose=pivotAndInterpolate('Close', ['Date'], 'Ticker',index, 'linear','inside',data) dfVolume=pivotAndInterpolate('Volume', ['Date'], 'Ticker',index, 'linear','inside',data) dfHighLow=pivotAndInterpolate('HighLow', ['Date'], 'Ticker',index, 'linear','inside',data) dfCloseReturns=dfClose/dfClose.shift(1) - 1 #Close to close Returns import numpy as np from fastai.structured import add_datepart import matplotlib.pyplot as plt asxTicker='VHY' ticker=dfClose[asxTicker] ticker=ticker.reset_index() add_datepart(ticker, 'index') trainSize=700 ticker['mon_fri'] = 0 for i in range(0,len(ticker)): if (ticker['indexDayofweek'][i] == 0 or ticker['indexDayofweek'][i] == 4): ticker['mon_fri'][i] = 1 else: ticker['mon_fri'][i] = 0 train = ticker[:trainSize] valid = ticker[trainSize:] x_train = train.drop(asxTicker, axis=1) y_train = train[asxTicker] x_valid = valid.drop(asxTicker, axis=1) y_valid = valid[asxTicker] #implement linear regression from sklearn.linear_model import LinearRegression model = LinearRegression() model.fit(x_train,y_train) preds = model.predict(x_valid) rms=np.sqrt(np.mean(np.power((np.array(y_valid)-np.array(preds)),2))) valid['Predictions'] = 0 valid['Predictions'] = preds valid.index = ticker[trainSize:].index train.index = ticker[:trainSize].index plt.plot(train[asxTicker]) plt.plot(valid[[asxTicker, 'Predictions']])
artistic-2.0
hsuantien/scikit-learn
sklearn/utils/tests/test_extmath.py
130
16270
# Authors: Olivier Grisel <[email protected]> # Mathieu Blondel <[email protected]> # Denis Engemann <[email protected]> # # License: BSD 3 clause import numpy as np from scipy import sparse from scipy import linalg from scipy import stats from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_raises from sklearn.utils.extmath import density from sklearn.utils.extmath import logsumexp from sklearn.utils.extmath import norm, squared_norm from sklearn.utils.extmath import randomized_svd from sklearn.utils.extmath import row_norms from sklearn.utils.extmath import weighted_mode from sklearn.utils.extmath import cartesian from sklearn.utils.extmath import log_logistic from sklearn.utils.extmath import fast_dot, _fast_dot from sklearn.utils.extmath import svd_flip from sklearn.utils.extmath import _batch_mean_variance_update from sklearn.utils.extmath import _deterministic_vector_sign_flip from sklearn.datasets.samples_generator import make_low_rank_matrix def test_density(): rng = np.random.RandomState(0) X = rng.randint(10, size=(10, 5)) X[1, 2] = 0 X[5, 3] = 0 X_csr = sparse.csr_matrix(X) X_csc = sparse.csc_matrix(X) X_coo = sparse.coo_matrix(X) X_lil = sparse.lil_matrix(X) for X_ in (X_csr, X_csc, X_coo, X_lil): assert_equal(density(X_), density(X)) def test_uniform_weights(): # with uniform weights, results should be identical to stats.mode rng = np.random.RandomState(0) x = rng.randint(10, size=(10, 5)) weights = np.ones(x.shape) for axis in (None, 0, 1): mode, score = stats.mode(x, axis) mode2, score2 = weighted_mode(x, weights, axis) assert_true(np.all(mode == mode2)) assert_true(np.all(score == score2)) def test_random_weights(): # set this up so that each row should have a weighted mode of 6, # with a score that is easily reproduced mode_result = 6 rng = np.random.RandomState(0) x = rng.randint(mode_result, size=(100, 10)) w = rng.random_sample(x.shape) x[:, :5] = mode_result w[:, :5] += 1 mode, score = weighted_mode(x, w, axis=1) assert_array_equal(mode, mode_result) assert_array_almost_equal(score.ravel(), w[:, :5].sum(1)) def test_logsumexp(): # Try to add some smallish numbers in logspace x = np.array([1e-40] * 1000000) logx = np.log(x) assert_almost_equal(np.exp(logsumexp(logx)), x.sum()) X = np.vstack([x, x]) logX = np.vstack([logx, logx]) assert_array_almost_equal(np.exp(logsumexp(logX, axis=0)), X.sum(axis=0)) assert_array_almost_equal(np.exp(logsumexp(logX, axis=1)), X.sum(axis=1)) def test_randomized_svd_low_rank(): # Check that extmath.randomized_svd is consistent with linalg.svd n_samples = 100 n_features = 500 rank = 5 k = 10 # generate a matrix X of approximate effective rank `rank` and no noise # component (very structured signal): X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features, effective_rank=rank, tail_strength=0.0, random_state=0) assert_equal(X.shape, (n_samples, n_features)) # compute the singular values of X using the slow exact method U, s, V = linalg.svd(X, full_matrices=False) # compute the singular values of X using the fast approximate method Ua, sa, Va = randomized_svd(X, k) assert_equal(Ua.shape, (n_samples, k)) assert_equal(sa.shape, (k,)) assert_equal(Va.shape, (k, n_features)) # ensure that the singular values of both methods are equal up to the real # rank of the matrix assert_almost_equal(s[:k], sa) # check the singular vectors too (while not checking the sign) assert_almost_equal(np.dot(U[:, :k], V[:k, :]), np.dot(Ua, Va)) # check the sparse matrix representation X = sparse.csr_matrix(X) # compute the singular values of X using the fast approximate method Ua, sa, Va = randomized_svd(X, k) assert_almost_equal(s[:rank], sa[:rank]) def test_norm_squared_norm(): X = np.random.RandomState(42).randn(50, 63) X *= 100 # check stability X += 200 assert_almost_equal(np.linalg.norm(X.ravel()), norm(X)) assert_almost_equal(norm(X) ** 2, squared_norm(X), decimal=6) assert_almost_equal(np.linalg.norm(X), np.sqrt(squared_norm(X)), decimal=6) def test_row_norms(): X = np.random.RandomState(42).randn(100, 100) sq_norm = (X ** 2).sum(axis=1) assert_array_almost_equal(sq_norm, row_norms(X, squared=True), 5) assert_array_almost_equal(np.sqrt(sq_norm), row_norms(X)) Xcsr = sparse.csr_matrix(X, dtype=np.float32) assert_array_almost_equal(sq_norm, row_norms(Xcsr, squared=True), 5) assert_array_almost_equal(np.sqrt(sq_norm), row_norms(Xcsr)) def test_randomized_svd_low_rank_with_noise(): # Check that extmath.randomized_svd can handle noisy matrices n_samples = 100 n_features = 500 rank = 5 k = 10 # generate a matrix X wity structure approximate rank `rank` and an # important noisy component X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features, effective_rank=rank, tail_strength=0.5, random_state=0) assert_equal(X.shape, (n_samples, n_features)) # compute the singular values of X using the slow exact method _, s, _ = linalg.svd(X, full_matrices=False) # compute the singular values of X using the fast approximate method # without the iterated power method _, sa, _ = randomized_svd(X, k, n_iter=0) # the approximation does not tolerate the noise: assert_greater(np.abs(s[:k] - sa).max(), 0.05) # compute the singular values of X using the fast approximate method with # iterated power method _, sap, _ = randomized_svd(X, k, n_iter=5) # the iterated power method is helping getting rid of the noise: assert_almost_equal(s[:k], sap, decimal=3) def test_randomized_svd_infinite_rank(): # Check that extmath.randomized_svd can handle noisy matrices n_samples = 100 n_features = 500 rank = 5 k = 10 # let us try again without 'low_rank component': just regularly but slowly # decreasing singular values: the rank of the data matrix is infinite X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features, effective_rank=rank, tail_strength=1.0, random_state=0) assert_equal(X.shape, (n_samples, n_features)) # compute the singular values of X using the slow exact method _, s, _ = linalg.svd(X, full_matrices=False) # compute the singular values of X using the fast approximate method # without the iterated power method _, sa, _ = randomized_svd(X, k, n_iter=0) # the approximation does not tolerate the noise: assert_greater(np.abs(s[:k] - sa).max(), 0.1) # compute the singular values of X using the fast approximate method with # iterated power method _, sap, _ = randomized_svd(X, k, n_iter=5) # the iterated power method is still managing to get most of the structure # at the requested rank assert_almost_equal(s[:k], sap, decimal=3) def test_randomized_svd_transpose_consistency(): # Check that transposing the design matrix has limit impact n_samples = 100 n_features = 500 rank = 4 k = 10 X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features, effective_rank=rank, tail_strength=0.5, random_state=0) assert_equal(X.shape, (n_samples, n_features)) U1, s1, V1 = randomized_svd(X, k, n_iter=3, transpose=False, random_state=0) U2, s2, V2 = randomized_svd(X, k, n_iter=3, transpose=True, random_state=0) U3, s3, V3 = randomized_svd(X, k, n_iter=3, transpose='auto', random_state=0) U4, s4, V4 = linalg.svd(X, full_matrices=False) assert_almost_equal(s1, s4[:k], decimal=3) assert_almost_equal(s2, s4[:k], decimal=3) assert_almost_equal(s3, s4[:k], decimal=3) assert_almost_equal(np.dot(U1, V1), np.dot(U4[:, :k], V4[:k, :]), decimal=2) assert_almost_equal(np.dot(U2, V2), np.dot(U4[:, :k], V4[:k, :]), decimal=2) # in this case 'auto' is equivalent to transpose assert_almost_equal(s2, s3) def test_svd_flip(): # Check that svd_flip works in both situations, and reconstructs input. rs = np.random.RandomState(1999) n_samples = 20 n_features = 10 X = rs.randn(n_samples, n_features) # Check matrix reconstruction U, S, V = linalg.svd(X, full_matrices=False) U1, V1 = svd_flip(U, V, u_based_decision=False) assert_almost_equal(np.dot(U1 * S, V1), X, decimal=6) # Check transposed matrix reconstruction XT = X.T U, S, V = linalg.svd(XT, full_matrices=False) U2, V2 = svd_flip(U, V, u_based_decision=True) assert_almost_equal(np.dot(U2 * S, V2), XT, decimal=6) # Check that different flip methods are equivalent under reconstruction U_flip1, V_flip1 = svd_flip(U, V, u_based_decision=True) assert_almost_equal(np.dot(U_flip1 * S, V_flip1), XT, decimal=6) U_flip2, V_flip2 = svd_flip(U, V, u_based_decision=False) assert_almost_equal(np.dot(U_flip2 * S, V_flip2), XT, decimal=6) def test_randomized_svd_sign_flip(): a = np.array([[2.0, 0.0], [0.0, 1.0]]) u1, s1, v1 = randomized_svd(a, 2, flip_sign=True, random_state=41) for seed in range(10): u2, s2, v2 = randomized_svd(a, 2, flip_sign=True, random_state=seed) assert_almost_equal(u1, u2) assert_almost_equal(v1, v2) assert_almost_equal(np.dot(u2 * s2, v2), a) assert_almost_equal(np.dot(u2.T, u2), np.eye(2)) assert_almost_equal(np.dot(v2.T, v2), np.eye(2)) def test_cartesian(): # Check if cartesian product delivers the right results axes = (np.array([1, 2, 3]), np.array([4, 5]), np.array([6, 7])) true_out = np.array([[1, 4, 6], [1, 4, 7], [1, 5, 6], [1, 5, 7], [2, 4, 6], [2, 4, 7], [2, 5, 6], [2, 5, 7], [3, 4, 6], [3, 4, 7], [3, 5, 6], [3, 5, 7]]) out = cartesian(axes) assert_array_equal(true_out, out) # check single axis x = np.arange(3) assert_array_equal(x[:, np.newaxis], cartesian((x,))) def test_logistic_sigmoid(): # Check correctness and robustness of logistic sigmoid implementation naive_logistic = lambda x: 1 / (1 + np.exp(-x)) naive_log_logistic = lambda x: np.log(naive_logistic(x)) x = np.linspace(-2, 2, 50) assert_array_almost_equal(log_logistic(x), naive_log_logistic(x)) extreme_x = np.array([-100., 100.]) assert_array_almost_equal(log_logistic(extreme_x), [-100, 0]) def test_fast_dot(): # Check fast dot blas wrapper function if fast_dot is np.dot: return rng = np.random.RandomState(42) A = rng.random_sample([2, 10]) B = rng.random_sample([2, 10]) try: linalg.get_blas_funcs(['gemm'])[0] has_blas = True except (AttributeError, ValueError): has_blas = False if has_blas: # Test _fast_dot for invalid input. # Maltyped data. for dt1, dt2 in [['f8', 'f4'], ['i4', 'i4']]: assert_raises(ValueError, _fast_dot, A.astype(dt1), B.astype(dt2).T) # Malformed data. ## ndim == 0 E = np.empty(0) assert_raises(ValueError, _fast_dot, E, E) ## ndim == 1 assert_raises(ValueError, _fast_dot, A, A[0]) ## ndim > 2 assert_raises(ValueError, _fast_dot, A.T, np.array([A, A])) ## min(shape) == 1 assert_raises(ValueError, _fast_dot, A, A[0, :][None, :]) # test for matrix mismatch error assert_raises(ValueError, _fast_dot, A, A) # Test cov-like use case + dtypes. for dtype in ['f8', 'f4']: A = A.astype(dtype) B = B.astype(dtype) # col < row C = np.dot(A.T, A) C_ = fast_dot(A.T, A) assert_almost_equal(C, C_, decimal=5) C = np.dot(A.T, B) C_ = fast_dot(A.T, B) assert_almost_equal(C, C_, decimal=5) C = np.dot(A, B.T) C_ = fast_dot(A, B.T) assert_almost_equal(C, C_, decimal=5) # Test square matrix * rectangular use case. A = rng.random_sample([2, 2]) for dtype in ['f8', 'f4']: A = A.astype(dtype) B = B.astype(dtype) C = np.dot(A, B) C_ = fast_dot(A, B) assert_almost_equal(C, C_, decimal=5) C = np.dot(A.T, B) C_ = fast_dot(A.T, B) assert_almost_equal(C, C_, decimal=5) if has_blas: for x in [np.array([[d] * 10] * 2) for d in [np.inf, np.nan]]: assert_raises(ValueError, _fast_dot, x, x.T) def test_incremental_variance_update_formulas(): # Test Youngs and Cramer incremental variance formulas. # Doggie data from http://www.mathsisfun.com/data/standard-deviation.html A = np.array([[600, 470, 170, 430, 300], [600, 470, 170, 430, 300], [600, 470, 170, 430, 300], [600, 470, 170, 430, 300]]).T idx = 2 X1 = A[:idx, :] X2 = A[idx:, :] old_means = X1.mean(axis=0) old_variances = X1.var(axis=0) old_sample_count = X1.shape[0] final_means, final_variances, final_count = _batch_mean_variance_update( X2, old_means, old_variances, old_sample_count) assert_almost_equal(final_means, A.mean(axis=0), 6) assert_almost_equal(final_variances, A.var(axis=0), 6) assert_almost_equal(final_count, A.shape[0]) def test_incremental_variance_ddof(): # Test that degrees of freedom parameter for calculations are correct. rng = np.random.RandomState(1999) X = rng.randn(50, 10) n_samples, n_features = X.shape for batch_size in [11, 20, 37]: steps = np.arange(0, X.shape[0], batch_size) if steps[-1] != X.shape[0]: steps = np.hstack([steps, n_samples]) for i, j in zip(steps[:-1], steps[1:]): batch = X[i:j, :] if i == 0: incremental_means = batch.mean(axis=0) incremental_variances = batch.var(axis=0) # Assign this twice so that the test logic is consistent incremental_count = batch.shape[0] sample_count = batch.shape[0] else: result = _batch_mean_variance_update( batch, incremental_means, incremental_variances, sample_count) (incremental_means, incremental_variances, incremental_count) = result sample_count += batch.shape[0] calculated_means = np.mean(X[:j], axis=0) calculated_variances = np.var(X[:j], axis=0) assert_almost_equal(incremental_means, calculated_means, 6) assert_almost_equal(incremental_variances, calculated_variances, 6) assert_equal(incremental_count, sample_count) def test_vector_sign_flip(): # Testing that sign flip is working & largest value has positive sign data = np.random.RandomState(36).randn(5, 5) max_abs_rows = np.argmax(np.abs(data), axis=1) data_flipped = _deterministic_vector_sign_flip(data) max_rows = np.argmax(data_flipped, axis=1) assert_array_equal(max_abs_rows, max_rows) signs = np.sign(data[range(data.shape[0]), max_abs_rows]) assert_array_equal(data, data_flipped * signs[:, np.newaxis])
bsd-3-clause
waqasbhatti/hats19to21
plotbase.py
1
14567
#!/usr/bin/env python ''' plotbase.py - Waqas Bhatti ([email protected]) - Feb 2016 License: MIT. Contains various useful functions for plotting light curves and associated data. ''' import os import os.path import cPickle as pickle import numpy as np from numpy import nan as npnan, median as npmedian, \ isfinite as npisfinite, min as npmin, max as npmax, abs as npabs import matplotlib.pyplot as plt import logging from datetime import datetime from traceback import format_exc ############# ## LOGGING ## ############# # setup a logger LOGGER = None def set_logger_parent(parent_name): globals()['LOGGER'] = logging.getLogger('%s.plotbase' % parent_name) def LOGDEBUG(message): if LOGGER: LOGGER.debug(message) elif DEBUG: print('%sZ [DBUG]: %s' % (datetime.utcnow().isoformat(), message)) def LOGINFO(message): if LOGGER: LOGGER.info(message) else: print('%sZ [INFO]: %s' % (datetime.utcnow().isoformat(), message)) def LOGERROR(message): if LOGGER: LOGGER.error(message) else: print('%sZ [ERR!]: %s' % (datetime.utcnow().isoformat(), message)) def LOGWARNING(message): if LOGGER: LOGGER.warning(message) else: print('%sZ [WRN!]: %s' % (datetime.utcnow().isoformat(), message)) def LOGEXCEPTION(message): if LOGGER: LOGGER.exception(message) else: print( '%sZ [EXC!]: %s\nexception was: %s' % ( datetime.utcnow().isoformat(), message, format_exc() ) ) ################### ## LOCAL IMPORTS ## ################### from lcmath import phase_magseries, phase_magseries_with_errs, \ phase_bin_magseries, phase_bin_magseries_with_errs, \ time_bin_magseries, time_bin_magseries_with_errs from varbase import spline_fit_magseries ######################### ## SIMPLE LIGHT CURVES ## ######################### def plot_mag_series(times, mags, errs=None, outfile=None, sigclip=30.0, timebin=None, yrange=None): '''This plots a magnitude time series. If outfile is none, then plots to matplotlib interactive window. If outfile is a string denoting a filename, uses that to write a png/eps/pdf figure. timebin is either a float indicating binsize in seconds, or None indicating no time-binning is required. ''' if errs is not None: # remove nans find = npisfinite(times) & npisfinite(mags) & npisfinite(errs) ftimes, fmags, ferrs = times[find], mags[find], errs[find] # get the median and stdev = 1.483 x MAD median_mag = npmedian(fmags) stddev_mag = (npmedian(npabs(fmags - median_mag))) * 1.483 # sigclip next if sigclip: sigind = (npabs(fmags - median_mag)) < (sigclip * stddev_mag) stimes = ftimes[sigind] smags = fmags[sigind] serrs = ferrs[sigind] LOGINFO('sigclip = %s: before = %s observations, ' 'after = %s observations' % (sigclip, len(times), len(stimes))) else: stimes = ftimes smags = fmags serrs = ferrs else: # remove nans find = npisfinite(times) & npisfinite(mags) ftimes, fmags, ferrs = times[find], mags[find], None # get the median and stdev = 1.483 x MAD median_mag = npmedian(fmags) stddev_mag = (npmedian(npabs(fmags - median_mag))) * 1.483 # sigclip next if sigclip: sigind = (npabs(fmags - median_mag)) < (sigclip * stddev_mag) stimes = ftimes[sigind] smags = fmags[sigind] serrs = None LOGINFO('sigclip = %s: before = %s observations, ' 'after = %s observations' % (sigclip, len(times), len(stimes))) else: stimes = ftimes smags = fmags serrs = None # now we proceed to binning if timebin and errs is not None: binned = time_bin_magseries_with_errs(stimes, smags, serrs, binsize=timebin) btimes, bmags, berrs = (binned['binnedtimes'], binned['binnedmags'], binned['binnederrs']) elif timebin and errs is None: binned = time_bin_magseries(stimes, smags, binsize=timebin) btimes, bmags, berrs = binned['binnedtimes'], binned['binnedmags'], None else: btimes, bmags, berrs = stimes, smags, serrs # finally, proceed with plotting fig = plt.figure() fig.set_size_inches(7.5,4.8) plt.errorbar(btimes, bmags, fmt='go', yerr=berrs, markersize=2.0, markeredgewidth=0.0, ecolor='grey', capsize=0) # make a grid plt.grid(color='#a9a9a9', alpha=0.9, zorder=0, linewidth=1.0, linestyle=':') # fix the ticks to use no offsets plt.gca().get_yaxis().get_major_formatter().set_useOffset(False) plt.gca().get_xaxis().get_major_formatter().set_useOffset(False) # get the yrange if yrange and isinstance(yrange,list) and len(yrange) == 2: ymin, ymax = yrange else: ymin, ymax = plt.ylim() plt.ylim(ymax,ymin) plt.xlim(npmin(btimes) - 0.001*npmin(btimes), npmax(btimes) + 0.001*npmin(btimes)) plt.xlabel('time [JD]') plt.ylabel('magnitude') if outfile and isinstance(outfile, str): plt.savefig(outfile,bbox_inches='tight') plt.close() return os.path.abspath(outfile) else: plt.show() plt.close() return ######################### ## PHASED LIGHT CURVES ## ######################### def plot_phased_mag_series(times, mags, period, errs=None, epoch='min', outfile=None, sigclip=30.0, phasewrap=True, phasesort=True, phasebin=None, plotphaselim=[-0.8,0.8], yrange=None): '''This plots a phased magnitude time series using the period provided. If epoch is None, uses the min(times) as the epoch. If epoch is a string 'min', then fits a cubic spline to the phased light curve using min(times), finds the magnitude minimum from the fitted light curve, then uses the corresponding time value as the epoch. If epoch is a float, then uses that directly to phase the light curve and as the epoch of the phased mag series plot. If outfile is none, then plots to matplotlib interactive window. If outfile is a string denoting a filename, uses that to write a png/eps/pdf figure. ''' if errs is not None: # remove nans find = npisfinite(times) & npisfinite(mags) & npisfinite(errs) ftimes, fmags, ferrs = times[find], mags[find], errs[find] # get the median and stdev = 1.483 x MAD median_mag = npmedian(fmags) stddev_mag = (npmedian(npabs(fmags - median_mag))) * 1.483 # sigclip next if sigclip: sigind = (npabs(fmags - median_mag)) < (sigclip * stddev_mag) stimes = ftimes[sigind] smags = fmags[sigind] serrs = ferrs[sigind] LOGINFO('sigclip = %s: before = %s observations, ' 'after = %s observations' % (sigclip, len(times), len(stimes))) else: stimes = ftimes smags = fmags serrs = ferrs else: # remove nans find = npisfinite(times) & npisfinite(mags) ftimes, fmags, ferrs = times[find], mags[find], None # get the median and stdev = 1.483 x MAD median_mag = npmedian(fmags) stddev_mag = (npmedian(npabs(fmags - median_mag))) * 1.483 # sigclip next if sigclip: sigind = (npabs(fmags - median_mag)) < (sigclip * stddev_mag) stimes = ftimes[sigind] smags = fmags[sigind] serrs = None LOGINFO('sigclip = %s: before = %s observations, ' 'after = %s observations' % (sigclip, len(times), len(stimes))) else: stimes = ftimes smags = fmags serrs = None # figure out the epoch, if it's None, use the min of the time if epoch is None: epoch = npmin(stimes) # if the epoch is 'min', then fit a spline to the light curve phased # using the min of the time, find the fit mag minimum and use the time for # that as the epoch elif isinstance(epoch,str) and epoch == 'min': spfit = spline_fit_magseries(stimes, smags, serrs, period) epoch = spfit['fitepoch'] # now phase (and optionally, phase bin the light curve) if errs is not None: # phase the magseries phasedlc = phase_magseries_with_errs(stimes, smags, serrs, period, epoch, wrap=phasewrap, sort=phasesort) plotphase = phasedlc['phase'] plotmags = phasedlc['mags'] ploterrs = phasedlc['errs'] # if we're supposed to bin the phases, do so if phasebin: binphasedlc = phase_bin_magseries_with_errs(plotphase, plotmags, ploterrs, binsize=phasebin) plotphase = binphasedlc['binnedphases'] plotmags = binphasedlc['binnedmags'] ploterrs = binphasedlc['binnederrs'] else: # phase the magseries phasedlc = phase_magseries(stimes, smags, period, epoch, wrap=phasewrap, sort=phasesort) plotphase = phasedlc['phase'] plotmags = phasedlc['mags'] ploterrs = None # if we're supposed to bin the phases, do so if phasebin: binphasedlc = phase_bin_magseries(plotphase, plotmags, binsize=phasebin) plotphase = binphasedlc['binnedphases'] plotmags = binphasedlc['binnedmags'] ploterrs = None # finally, make the plots # initialize the plot fig = plt.figure() fig.set_size_inches(7.5,4.8) plt.errorbar(plotphase, plotmags, fmt='bo', yerr=ploterrs, markersize=2.0, markeredgewidth=0.0, ecolor='#B2BEB5', capsize=0) # make a grid plt.grid(color='#a9a9a9', alpha=0.9, zorder=0, linewidth=1.0, linestyle=':') # make lines for phase 0.0, 0.5, and -0.5 plt.axvline(0.0,alpha=0.9,linestyle='dashed',color='g') plt.axvline(-0.5,alpha=0.9,linestyle='dashed',color='g') plt.axvline(0.5,alpha=0.9,linestyle='dashed',color='g') # fix the ticks to use no offsets plt.gca().get_yaxis().get_major_formatter().set_useOffset(False) plt.gca().get_xaxis().get_major_formatter().set_useOffset(False) # get the yrange if yrange and isinstance(yrange,list) and len(yrange) == 2: ymin, ymax = yrange else: ymin, ymax = plt.ylim() plt.ylim(ymax,ymin) # set the x axis limit if not plotphaselim: plot_xlim = plt.xlim() plt.xlim((npmin(plotphase)-0.1, npmax(plotphase)+0.1)) else: plt.xlim((plotphaselim[0],plotphaselim[1])) # set up the labels plt.xlabel('phase') plt.ylabel('magnitude') plt.title('using period: %.6f d and epoch: %.6f' % (period, epoch)) # make the figure if outfile and isinstance(outfile, str): plt.savefig(outfile,bbox_inches='tight') plt.close() return os.path.abspath(outfile) else: plt.show() plt.close() return ################## ## PERIODOGRAMS ## ################## def plot_periodbase_lsp(lspinfo, outfile=None): '''Makes a plot of the L-S periodogram obtained from periodbase functions. If lspinfo is a dictionary, uses the information directly. If it's a filename string ending with .pkl, then this assumes it's a periodbase LSP pickle and loads the corresponding info from it. ''' # get the lspinfo from a pickle file transparently if isinstance(lspinfo,str) and os.path.exists(lspinfo): LOGINFO('loading LSP info from pickle %s' % lspinfo) with open(lspinfo,'rb') as infd: lspinfo = pickle.load(infd) # get the things to plot out of the data periods = lspinfo['periods'] lspvals = lspinfo['lspvals'] bestperiod = lspinfo['bestperiod'] # make the LSP plot on the first subplot plt.plot(periods,lspvals) plt.xscale('log',basex=10) plt.xlabel('Period [days]') plt.ylabel('LSP power') plottitle = 'best period = %.6f d' % bestperiod plt.title(plottitle) # show the best five peaks on the plot for bestperiod, bestpeak in zip(lspinfo['nbestperiods'], lspinfo['nbestlspvals']): plt.annotate('%.6f' % bestperiod, xy=(bestperiod, bestpeak), xycoords='data', xytext=(0.0,25.0), textcoords='offset points', arrowprops=dict(arrowstyle="->"),fontsize='x-small') # make a grid plt.grid(color='#a9a9a9', alpha=0.9, zorder=0, linewidth=1.0, linestyle=':') # make the figure if outfile and isinstance(outfile, str): plt.savefig(outfile,bbox_inches='tight') plt.close() return os.path.abspath(outfile) else: plt.show() plt.close() return
mit
petosegan/scikit-learn
examples/neighbors/plot_regression.py
349
1402
""" ============================ Nearest Neighbors regression ============================ Demonstrate the resolution of a regression problem using a k-Nearest Neighbor and the interpolation of the target using both barycenter and constant weights. """ print(__doc__) # Author: Alexandre Gramfort <[email protected]> # Fabian Pedregosa <[email protected]> # # License: BSD 3 clause (C) INRIA ############################################################################### # Generate sample data import numpy as np import matplotlib.pyplot as plt from sklearn import neighbors np.random.seed(0) X = np.sort(5 * np.random.rand(40, 1), axis=0) T = np.linspace(0, 5, 500)[:, np.newaxis] y = np.sin(X).ravel() # Add noise to targets y[::5] += 1 * (0.5 - np.random.rand(8)) ############################################################################### # Fit regression model n_neighbors = 5 for i, weights in enumerate(['uniform', 'distance']): knn = neighbors.KNeighborsRegressor(n_neighbors, weights=weights) y_ = knn.fit(X, y).predict(T) plt.subplot(2, 1, i + 1) plt.scatter(X, y, c='k', label='data') plt.plot(T, y_, c='g', label='prediction') plt.axis('tight') plt.legend() plt.title("KNeighborsRegressor (k = %i, weights = '%s')" % (n_neighbors, weights)) plt.show()
bsd-3-clause
chaluemwut/fbserver
venv/lib/python2.7/site-packages/sklearn/tests/test_qda.py
23
2833
import numpy as np from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_greater from sklearn import qda # Data is just 6 separable points in the plane X = np.array([[0, 0], [-2, -2], [-2, -1], [-1, -1], [-1, -2], [1, 3], [1, 2], [2, 1], [2, 2]]) y = np.array([1, 1, 1, 1, 1, 2, 2, 2, 2]) y3 = np.array([1, 2, 3, 2, 3, 1, 2, 3, 1]) # Degenerate data with 1 feature (still should be separable) X1 = np.array([[-3, ], [-2, ], [-1, ], [-1, ], [0, ], [1, ], [1, ], [2, ], [3, ]]) # Data that has zero variance in one dimension and needs regularization X2 = np.array([[-3, 0], [-2, 0], [-1, 0], [-1, 0], [0, 0], [1, 0], [1, 0], [2, 0], [3, 0]]) def test_qda(): """ QDA classification. This checks that QDA implements fit and predict and returns correct values for a simple toy dataset. """ clf = qda.QDA() y_pred = clf.fit(X, y).predict(X) assert_array_equal(y_pred, y) # Assure that it works with 1D data y_pred1 = clf.fit(X1, y).predict(X1) assert_array_equal(y_pred1, y) # Test probas estimates y_proba_pred1 = clf.predict_proba(X1) assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y) y_log_proba_pred1 = clf.predict_log_proba(X1) assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1, 8) y_pred3 = clf.fit(X, y3).predict(X) # QDA shouldn't be able to separate those assert_true(np.any(y_pred3 != y3)) def test_qda_priors(): clf = qda.QDA() y_pred = clf.fit(X, y).predict(X) n_pos = np.sum(y_pred == 2) neg = 1e-10 clf = qda.QDA(priors=np.array([neg, 1 - neg])) y_pred = clf.fit(X, y).predict(X) n_pos2 = np.sum(y_pred == 2) assert_greater(n_pos2, n_pos) def test_qda_store_covariances(): # The default is to not set the covariances_ attribute clf = qda.QDA().fit(X, y) assert_true(not hasattr(clf, 'covariances_')) # Test the actual attribute: clf = qda.QDA().fit(X, y, store_covariances=True) assert_true(hasattr(clf, 'covariances_')) assert_array_almost_equal( clf.covariances_[0], np.array([[0.7, 0.45], [0.45, 0.7]]) ) assert_array_almost_equal( clf.covariances_[1], np.array([[0.33333333, -0.33333333], [-0.33333333, 0.66666667]]) ) def test_qda_regularization(): # the default is reg_param=0. and will cause issues # when there is a constant variable clf = qda.QDA() y_pred = clf.fit(X2, y).predict(X2) assert_true(np.any(y_pred != y)) # adding a little regularization fixes the problem clf = qda.QDA(reg_param=0.01) y_pred = clf.fit(X2, y).predict(X2) assert_array_equal(y_pred, y)
apache-2.0
ryanjoneil/decision-models-for-data-science
ipynb/images/ch2/fig2-6-optimal-solution.py
1
1063
import matplotlib matplotlib.use('Agg') from matplotlib import patches, pyplot from matplotlib.path import Path verts = [ (0., 0.), (0., 1.), (1., 2.), (2., 2.), (2., 2.), (3., 0.), (0., 0.), ] codes = [ Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY, ] path = Path(verts, codes) patch = patches.PathPatch(path, facecolor=(.53,.81,.92), lw=1) fig = pyplot.figure() fig.set_size_inches(3, 9/4.) ax = fig.add_subplot(111) ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) ax.spines['bottom'].set_visible(False) ax.spines['left'].set_visible(False) ax.set_xlim(-0.5, 3.5) ax.set_ylim(-0.5, 2.5) ax.add_patch(patch) ax.plot([1.5,3.5], [2.5,.5], lw=1, c='red') ax.plot([2], [2], marker='o', c="grey") pyplot.xlabel('x') pyplot.ylabel('y') pyplot.xticks([0, 1, 2, 3]) pyplot.yticks([0, 1, 2]) pyplot.grid(b=True, which='major', color=(.5,.5,.5,.25), linestyle='-') pyplot.savefig('fig2-6-optimal-solution.png', bbox_inches='tight')
bsd-2-clause
PATRIC3/p3diffexp
expression_transform.py
1
22822
#!/usr/bin/env python import argparse import pandas as pd import json import sys import numpy as np import requests import os import uuid import csv from scipy import stats from itertools import islice try: from lib import diffexp_api except ImportError: import diffexp_api #requires 2.7.9 or greater to deal with https comodo intermediate certs if sys.version_info < (2, 7): raise "must use python 2.7 or greater" #stamp out annoying warnings that are beyond control import warnings warnings.simplefilter(action = "ignore", category = FutureWarning) pd.options.mode.chained_assignment = None #Input #1. metadata in json with the following: """ {xformat:"csv || tsv || xls || xlsx", xsetup:"gene_matrix || gene_list", source_id_type:"refseq_locus_tag || alt_locus_tag || feature_id || gi || gene_id || protein_id || seed_id", data_type: "Transcriptomics || Proteomics || Phenomics", title: "User input", desc: "User input", organism: "user input", pmid: "user_input", output_path: "path", "metadata_format":"csv || tsv || xls || xlsx"} """ #2. server info for the data api """ {"data_api":"url"} """ #Sample Output #experiment.json #{"origFileName":"filename","geneMapped":4886,"samples":8,"geneTotal":4985,"cdate":"2013-01-28 13:40:47","desc":"user input","organism":"some org","owner":"user name","title":"user input","pmid":"user input","expid":"whatever","collectionType":"ExpressionExperiment","genesMissed":99,"mdate":"2013-01-28 13:40:47"} #expression.json #{"expression":[{"log_ratio":"0.912","na_feature_id":"36731006","exp_locus_tag":"VBISalEnt101322_0001","pid":"8f2e7338-9f04-4ba5-9fe2-5365c857d57fS0","z_score":"-0.23331085637221843"}] #mapping.json #{"mapping":{"unmapped_list":[{"exp_locus_tag":"VBISalEnt101322_pg001"}],"unmapped_ids":99,"mapped_list":[{"na_feature_id":"36731006","exp_locus_tag":"VBISalEnt101322_0001"}],"mapped_ids":4886}} #sample.json #{"sample":[{"sig_log_ratio":2675,"expmean":"1.258","sampleUserGivenId":"LB_stat_AerobicM9_stat_aerobic","expname":"LB_stat_AerobicM9_stat_aerobic","pid":"8f2e7338-9f04-4ba5-9fe2-5365c857d57fS0","genes":4429,"sig_z_score":139,"expstddev":"1.483"}]} def pretty_print_POST(req): """ printed and may differ from the actual request. """ print('{}\n{}\n{}\n\n{}'.format( '-----------START-----------', req.method + ' ' + req.url, '\n'.join('{}: {}'.format(k, v) for k, v in req.headers.items()), req.body, )) #convert gene list format to gene matrix #there is definitely a more efficient conversion than this... def gene_list_to_matrix(cur_table): comparisons=set(cur_table['sampleUserGivenId']) genes=set(cur_table['exp_locus_tag']) result=pd.DataFrame(index=list(genes), columns=list(comparisons)) result['exp_locus_tag']=result.index gene_pos=cur_table.columns.get_loc('exp_locus_tag') comparison_pos=cur_table.columns.get_loc('sampleUserGivenId') ratio_pos=cur_table.columns.get_loc('log_ratio') for row in cur_table.iterrows(): gene_id=row[-1][gene_pos] comp=row[-1][comparison_pos] ratio=row[-1][ratio_pos] result[comp][gene_id]=ratio return result #convert gene matrix format to gene list #there is definitely a more efficient conversion than this... def gene_matrix_to_list(cur_table): result=pd.melt(cur_table, id_vars=['exp_locus_tag'], var_name='sampleUserGivenId', value_name='log_ratio') return result def list_to_mapping_table(cur_table): genes=set(cur_table['exp_locus_tag']) if len(genes) == 0: sys.stderr.write("No genes in differential expression gmx file\n") sys.exit(2) result=pd.DataFrame(index=list(genes)) result['exp_locus_tag']=result.index return result #deal with weird naming of columns. def fix_headers(cur_table, parameter_type, die): def fix_name(x, all_columns): fixed_name=' '.join(x.split()).strip().lower().replace(" ","_") #patrics downloadable template is not consistent with its help info if fixed_name.endswith('s') and fixed_name[:-1] in set(all_columns): fixed_name=fixed_name[:-1] return fixed_name matrix_columns=['gene_id'] list_columns=['gene_id', 'comparison_id', 'log_ratio'] template_columns=["comparison_id","title","pubmed","accession","organism","strain","gene_modification","experiment_condition","time_point"] all_columns=list_columns+template_columns check_columns=None target_setup=None if parameter_type=="xfile": target_setup= "gene_list" if all([(fix_name(x,all_columns) in list_columns) for x in cur_table.columns]) else "gene_matrix" else: target_setup="template" limit_columns=True if target_setup == 'gene_matrix': check_columns=matrix_columns limit_columns=False rename={'gene_id': 'exp_locus_tag'} elif target_setup == 'gene_list': check_columns=list_columns rename={'comparison_id':'sampleUserGivenId','gene_id': 'exp_locus_tag'} elif target_setup == 'template': check_columns=template_columns rename={'comparison_id':'sampleUserGivenId', 'title':'expname', 'gene_modification':'mutant', 'experiment_condition':'condition', 'time_point':'timepoint'} else: sys.stderr.write("unrecognized setup "+target_setup+"\n") if die: assert False cur_table.columns=[fix_name(x,all_columns) if fix_name(x,all_columns) in check_columns else x for x in cur_table.columns] columns_ok = True for i in check_columns: columns_ok=columns_ok and i in cur_table.columns if not columns_ok: sys.stderr.write("Missing appropriate column names in "+target_setup+"\n") if die: assert False if limit_columns: cur_table=cur_table[check_columns] if rename: cur_table=cur_table.rename(columns=rename) return (target_setup, cur_table) #read in the comparisons data and metadata def process_table(target_file, param_type, die, target_format="start", tries=0): tries+=1 starting=False target_setup=None if not os.path.exists(target_file): sys.stderr.write("can't find target file "+target_file+"\n") if die: sys.exit(2) if target_format=="start": starting=True fileName, fileExtension = os.path.splitext(target_file) target_format=fileExtension.replace('.','').lower() if starting and not target_format in set(["csv","tsv","xls","xlsx"]): temp_handle=open(target_file, 'rb') target_sep=csv.Sniffer().sniff("\n".join(list(islice(temp_handle,10)))) temp_handle.close() if target_sep.delimiter=="\t": target_format="tsv" sys.stdout.write("guessing "+target_format+" format\n") elif target_sep.delimiter==",": target_format="csv" sys.stdout.write("guessing "+target_format+" format\n") cur_table=None next_up="tsv" try: if target_format == 'tsv': next_up="csv" cur_table=pd.read_table(target_file, header=0) elif target_format == 'csv': next_up="xls" cur_table=pd.read_csv(target_file, header=0) elif target_format == 'xls' or target_format == 'xlsx': cur_table=pd.io.excel.read_excel(target_file, 0, index_col=None) else: sys.stderr.write("unrecognized format "+target_format+" for "+target_setup+"\n") if die: sys.exit(2) #assume the first column is "gene_id" for the comparison table and rename it as "gene_id" to handle user misspelled column name for gene_id if param_type=="xfile": cur_table=cur_table.rename(columns={cur_table.columns[0]:'gene_id'}) target_setup, cur_table=fix_headers(cur_table, param_type, die) except: sys.stdout.write("failed at reading "+target_format+" format\n") if tries > 5: raise else: sys.stdout.write("guessing "+next_up+" format\n") return process_table(target_file, param_type, die, next_up, tries) return (target_setup, cur_table) #{source_id_type:"refseq_locus_tag || alt_locus_tag || feature_id", #data_type: "Transcriptomics || Proteomics || Phenomics", #experiment_title: "User input", experiment_description: "User input", #organism name: "user input", pubmed_id: "user_input"} #Sample Output #experiment.json #{"origFileName":"filename","geneMapped":4886,"samples":8,"geneTotal":4985,"cdate":"2013-01-28 13:40:47","desc":"user input","organism":"some org","owner":"user name","title":"user input","pmid":"user input","expid":"whatever","collectionType":"ExpressionExperiment","genesMissed":99,"mdate":"2013-01-28 13:40:47"} def create_experiment_file(output_path, mapping_dict, sample_dict, expression_dict, form_data, experiment_id): experiment_dict={"geneMapped":mapping_dict["mapping"]["mapped_ids"],"samples":len(sample_dict['sample']),"geneTotal":mapping_dict["mapping"]["mapped_ids"]+mapping_dict["mapping"]["unmapped_ids"],"desc":form_data.get('desc',form_data.get("experiment_description","")),"organism":form_data.get('organism',''),"title":form_data.get("title",form_data.get("experiment_title","")),"pmid":form_data.get("pmid",""),"expid":experiment_id,"collectionType":"ExpressionExperiment","genesMissed":mapping_dict["mapping"]["unmapped_ids"]} output_file=os.path.join(output_path, 'experiment.json') out_handle=open(output_file, 'w') json.dump(experiment_dict, out_handle) out_handle.close() return experiment_dict #expression.json #{"expression":[{"log_ratio":"0.912","na_feature_id":"36731006","exp_locus_tag":"VBISalEnt101322_0001","pid":"8f2e7338-9f04-4ba5-9fe2-5365c857d57fS0","z_score":"-0.23331085637221843"}] #sample.json #{"sample":[{"sig_log_ratio":2675,"expmean":"1.258","sampleUserGivenId":"LB_stat_AerobicM9_stat_aerobic","expname":"LB_stat_AerobicM9_stat_aerobic","pid":"8f2e7338-9f04-4ba5-9fe2-5365c857d57fS0","genes":4429,"sig_z_score":139,"expstddev":"1.483"}]} def create_comparison_files(output_path, comparisons_table, mfile, form_data, experiment_id, sig_z, sig_log): #create dicts for json sample_dict={'sample':[]} expression_dict={'expression':[]} #create stats table for sample.json grouped=comparisons_table.groupby(["sampleUserGivenId"], sort=False) sample_stats=grouped.agg([np.mean, np.std])['log_ratio'] sample_stats=sample_stats.rename(columns={'mean':'expmean','std':'expstddev'}) sample_stats["genes"]=grouped.count()["exp_locus_tag"] sample_stats["pid"]=[str(experiment_id)+"S"+str(i) for i in range(0,len(sample_stats))] sample_stats["sampleUserGivenId"]=sample_stats.index sample_stats["expname"]=sample_stats.index #get zscore and significance columns comparisons_table["z_score"]=grouped.transform(stats.zscore)["log_ratio"] comparisons_table["sig_z"]=comparisons_table["z_score"].abs() >= sig_z comparisons_table["sig_log"]=comparisons_table["log_ratio"].abs() >= sig_log #store counts in stats z_score_breakdown=comparisons_table.groupby(["sampleUserGivenId","sig_z"], sort=False).count()['z_score'].unstack() if True in z_score_breakdown: sample_stats["sig_z_score"]=z_score_breakdown[True] else: z_score_breakdown.columns=[True] z_score_breakdown[True]=z_score_breakdown[True].apply(lambda x: 0) sample_stats["sig_z_score"]=z_score_breakdown[True] log_breakdown=comparisons_table.groupby(["sampleUserGivenId","sig_log"], sort=False).count()['log_ratio'].unstack() if True in log_breakdown: sample_stats["sig_log_ratio"]=log_breakdown[True] else: log_breakdown.columns=[True] log_breakdown[True]=log_breakdown[True].apply(lambda x: 0) sample_stats["sig_log_ratio"]=log_breakdown[True] sample_stats["sig_log_ratio"]=sample_stats["sig_log_ratio"].fillna(0).astype('int64') sample_stats["sig_z_score"]=sample_stats["sig_z_score"].fillna(0).astype('int64') #set pid's for expression.json comparisons_table=comparisons_table.merge(sample_stats[["pid","sampleUserGivenId"]], how="left", on="sampleUserGivenId") #pull in metadata spreadsheet if provided if mfile and mfile.strip(): sys.stdout.write("reading metadata template\n") target_setup, meta_table=process_table(mfile, "mfile", die=True) try: meta_key="sampleUserGivenId" to_add=meta_table.columns-sample_stats.columns meta_table=meta_table.set_index('sampleUserGivenId') sample_stats.update(meta_table) sample_stats=sample_stats.merge(meta_table[to_add], left_index=True, right_index=True, how='left') except: sys.stderr.write("failed to parse user provide metadata template\n") sys.exit(2) #populate json dicts sample_stats=sample_stats.fillna("") sample_dict['sample']=json.loads(sample_stats.to_json(orient='records', date_format='iso')) #sample_dict['sample']=sample_stats.to_dict(outtype='records') cols = [col for col in comparisons_table.columns if col not in ['sig_z', 'sig_log']] expression_dict['expression']=json.loads(comparisons_table[cols].to_json(orient='records')) output_file=os.path.join(output_path, 'sample.json') out_handle=open(output_file, 'w') json.dump(sample_dict, out_handle) out_handle.close() output_file=os.path.join(output_path, 'expression.json') out_handle=open(output_file, 'w') json.dump(expression_dict, out_handle) out_handle.close() return (sample_dict, expression_dict) #mapping.json #{"mapping":{"unmapped_list":[{"exp_locus_tag":"VBISalEnt101322_pg001"}],"unmapped_ids":99,"mapped_list":[{"na_feature_id":"36731006","exp_locus_tag":"VBISalEnt101322_0001"}],"mapped_ids":4886}} #creates mapping.json for results def create_mapping_file(output_path, mapping_table, form_data): mapping_dict={"mapping":{"unmapped_list":[],"unmapped_ids":0,"mapped_list":[],"mapped_ids":0}} mapping_dict['mapping']['unmapped_list']=mapping_table[mapping_table.isnull().any(axis=1)][['exp_locus_tag']].to_dict('records') mapping_dict['mapping']['mapped_list']=mapping_table[mapping_table.notnull().all(axis=1)].to_dict('records') mapping_dict['mapping']['unmapped_ids']=len(mapping_dict['mapping']['unmapped_list']) mapping_dict['mapping']['mapped_ids']=len(mapping_dict['mapping']['mapped_list']) output_file=os.path.join(output_path, 'mapping.json') out_handle=open(output_file, 'w') json.dump(mapping_dict, out_handle) out_handle.close() return mapping_dict #mapped_list=[{form_data["source_id_type"]: i["Map ID"], "exp_locus_tag":i['Gene ID']} for i in mapping_table[mapping_table.notnull().any(axis=1)]] #mapped_list=[{form_data["source_id_type"]: i["Map ID"], "exp_locus_tag":i["Gene ID"]} for i in mapping_table.query('Gene ID != @np.nan')] def place_ids(query_results,cur_table,form_data): source_types=form_data["source_types"]+form_data["int_types"] count=0 try: for d in query_results.json()['response']['docs']: source_ids=[] target_id=None for id_type in source_types: if id_type in d: source_ids.append(d[id_type]) if 'feature_id' in d: target_id=d['feature_id'] if target_id: #because which of the source id's are in the input data check them locally against the exp_locus_tag for source_id in source_ids: if source_id in cur_table["feature_id"]: count+=1 cur_table["feature_id"][source_id]=target_id break except ValueError: sys.stderr.write("mapping failed. either PATRICs API is down or the Gene IDs are unknown\n") raise if count==0: sys.stderr.write("mapping failed. either PATRICs API is down or the Gene IDs are unknown\n") sys.exit(2) def make_map_query(id_list, form_data, server_setup, chunk_size): id_list = id_list.apply(str) source_types=form_data["source_types"] int_types=form_data["int_types"] current_query={'q':""} map_queries=[] int_ids=[] if "source_id_type" in form_data and len(form_data["source_id_type"]) > 0: source_types=[form_data["source_id_type"]] else: for id in id_list: if np.issubdtype(type(id), np.number) or id.isdigit(): int_ids.append(str(id)) if len(int_ids): for s_type in int_types: map_queries.append("("+s_type+":("+" OR ".join(int_ids)+"))") for s_type in source_types: map_queries.append("("+s_type+":("+" OR ".join(id_list)+"))") if "host" in form_data and form_data["host"]: current_query["q"]+="("+" OR ".join(map_queries)+") AND annotation:RefSeq" else: current_query["q"]+="("+" OR ".join(map_queries)+") AND annotation:PATRIC" if "genome_id" in form_data and form_data["genome_id"]: current_query["q"]+=" AND genome_id:"+form_data["genome_id"] current_query["fl"]="feature_id,"+",".join(source_types+int_types) current_query["rows"]="20000" current_query["wt"]="json" headers = {"Content-Type": "application/solrquery+x-www-form-urlencoded", "accept":"application/solr+json"} #print "switch THE HEADER BACK!" #headers = {'Content-Type': 'application/x-www-form-urlencoded; charset=utf-8'} req = requests.Request('POST', server_setup["data_api"], headers=headers, data=current_query) diffexp_api.authenticateByEnv(req) prepared = req.prepare() #pretty_print_POST(prepared) s = requests.Session() response=s.send(prepared) if not response.ok: sys.stderr.write("Error code %s invoking data api: %s\nquery: %s\n" % (response.status_code, response.text, current_query)) sys.exit(2) return response def chunker(seq, size): return (seq[pos:pos + size] for pos in xrange(0, len(seq), size)) def map_gene_ids(cur_table, form_data, server_setup, host=False): cur_table["feature_id"]=np.nan chunk_size=1000 if host: for source_id in cur_table["exp_locus_tag"]: cur_table["feature_id"][source_id]=source_id else: for i in chunker(cur_table['exp_locus_tag'], chunk_size): mapping_results=make_map_query(i, form_data, server_setup, chunk_size) place_ids(mapping_results, cur_table, form_data) def main(): sig_z=2 sig_log=1 valid_formats=set(['csv', 'tsv', 'xls', 'xlsx']) valid_setups=set(['gene_matrix','gene_list']) #req_info=['xformat','xsetup','source_id_type','data_type','experiment_title','experiment_description','organism'] req_info=['data_type','experiment_title','experiment_description','organism'] parser = argparse.ArgumentParser() parser.add_argument('--xfile', help='the source Expression comparisons file', required=True) parser.add_argument('--mfile', help='the metadata template if it exists', required=False) parser.add_argument('--output_path', help='location for output', required=True) parser.add_argument('--host', help='host genome, prevent id mapping', action='store_true', default=False, required=False) userinfo = parser.add_mutually_exclusive_group(required=True) userinfo.add_argument('--ufile', help='json file from user input') userinfo.add_argument('--ustring', help='json string from user input') serverinfo = parser.add_mutually_exclusive_group(required=True) serverinfo.add_argument('--sfile', help='server setup JSON file') serverinfo.add_argument('--sstring', help='server setup JSON string') map_args = parser.parse_args() if len(sys.argv) ==1: parser.print_help() sys.exit(2) #get comparison and metadata files xfile=map_args.xfile mfile=map_args.mfile if 'mfile' in map_args else None #parse user form data form_data=None user_parse=None server_parse=None parse_server = json.loads if 'sstring' in map_args else json.load try: form_data = json.loads(map_args.ustring) if map_args.ustring else json.load(open(map_args.ufile,'r')) except: sys.stderr.write("Failed to parse user provided form data \n") raise #parse setup data try: server_setup= json.loads(map_args.sstring) if map_args.sstring else json.load(open(map_args.sfile,'r')) except: sys.stderr.write("Failed to parse server data\n") raise #part of auto-detection of id type add source id types to map from form_data["source_types"]=["refseq_locus_tag","alt_locus_tag","feature_id","protein_id","patric_id"]#,"gi"] form_data["int_types"]=["gi","gene_id"] #make sure all required info present missing=[x not in form_data for x in req_info] if (any(missing)): sys.stderr.write("Missing required user input data: "+" ".join([req_info[i] for i in range(len(missing)) if missing[i]])+"\n") sys.exit(2) #if (mfile or 'metadata_format' in form_data) and ('metadata_format' not in form_data or not mfile): # sys.stderr.write("Expression transformation: (file,format) pair must be given for metadata template\n") #sys.exit(2) #read comparisons file sys.stdout.write("reading comparisons file\n") target_setup, comparisons_table=process_table(xfile, "xfile", die=True) output_path=map_args.output_path #convert gene matrix to list if target_setup == 'gene_matrix': comparisons_table=gene_matrix_to_list(comparisons_table) #limit log ratios comparisons_table.ix[comparisons_table["log_ratio"] > 1000000, 'log_ratio']=1000000 comparisons_table.ix[comparisons_table["log_ratio"] < -1000000, 'log_ratio']=-1000000 comparisons_table=comparisons_table.dropna() comparisons_table=comparisons_table[comparisons_table.exp_locus_tag != "-"] #map gene ids mapping_table=list_to_mapping_table(comparisons_table) map_gene_ids(mapping_table, form_data, server_setup, map_args.host) comparisons_table=comparisons_table.merge(mapping_table, how='left', on='exp_locus_tag') #create json files to represent experiment experiment_id=str(uuid.uuid1()) mapping_dict=create_mapping_file(output_path, mapping_table, form_data) (sample_dict, expression_dict) = create_comparison_files(output_path, comparisons_table, mfile, form_data, experiment_id, sig_z, sig_log) experiment_dict=create_experiment_file(output_path, mapping_dict, sample_dict, expression_dict, form_data, experiment_id) sys.stdout.write(json.dumps(experiment_dict)+"\n") if __name__ == "__main__": main()
mit
jblackburne/scikit-learn
examples/exercises/plot_cv_diabetes.py
53
2861
""" =============================================== Cross-validation on diabetes Dataset Exercise =============================================== A tutorial exercise which uses cross-validation with linear models. This exercise is used in the :ref:`cv_estimators_tut` part of the :ref:`model_selection_tut` section of the :ref:`stat_learn_tut_index`. """ from __future__ import print_function print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn import datasets from sklearn.linear_model import LassoCV from sklearn.linear_model import Lasso from sklearn.model_selection import KFold from sklearn.model_selection import cross_val_score diabetes = datasets.load_diabetes() X = diabetes.data[:150] y = diabetes.target[:150] lasso = Lasso(random_state=0) alphas = np.logspace(-4, -0.5, 30) scores = list() scores_std = list() n_folds = 3 for alpha in alphas: lasso.alpha = alpha this_scores = cross_val_score(lasso, X, y, cv=n_folds, n_jobs=1) scores.append(np.mean(this_scores)) scores_std.append(np.std(this_scores)) scores, scores_std = np.array(scores), np.array(scores_std) plt.figure().set_size_inches(8, 6) plt.semilogx(alphas, scores) # plot error lines showing +/- std. errors of the scores std_error = scores_std / np.sqrt(n_folds) plt.semilogx(alphas, scores + std_error, 'b--') plt.semilogx(alphas, scores - std_error, 'b--') # alpha=0.2 controls the translucency of the fill color plt.fill_between(alphas, scores + std_error, scores - std_error, alpha=0.2) plt.ylabel('CV score +/- std error') plt.xlabel('alpha') plt.axhline(np.max(scores), linestyle='--', color='.5') plt.xlim([alphas[0], alphas[-1]]) ############################################################################## # Bonus: how much can you trust the selection of alpha? # To answer this question we use the LassoCV object that sets its alpha # parameter automatically from the data by internal cross-validation (i.e. it # performs cross-validation on the training data it receives). # We use external cross-validation to see how much the automatically obtained # alphas differ across different cross-validation folds. lasso_cv = LassoCV(alphas=alphas, random_state=0) k_fold = KFold(3) print("Answer to the bonus question:", "how much can you trust the selection of alpha?") print() print("Alpha parameters maximising the generalization score on different") print("subsets of the data:") for k, (train, test) in enumerate(k_fold.split(X, y)): lasso_cv.fit(X[train], y[train]) print("[fold {0}] alpha: {1:.5f}, score: {2:.5f}". format(k, lasso_cv.alpha_, lasso_cv.score(X[test], y[test]))) print() print("Answer: Not very much since we obtained different alphas for different") print("subsets of the data and moreover, the scores for these alphas differ") print("quite substantially.") plt.show()
bsd-3-clause
CosmicElysium/GmailClientExtractor
clientextractor.py
1
16066
''' Note: This module uses source code provided by Google Inc. The original oauth2.py script can be found at: https://github.com/google/gmail-oauth2-tools/blob/master/python/oauth2.py ''' import imaplib import email import lxml import sys import urllib import json import re import datetime import xlsxwriter from os import listdir import calendar from pandas import read_html from optparse import OptionParser # Gmail credentials file path CREDENTIALS_PATH = "./creds_filled.data" # The URL root for accessing Google Accounts. GOOGLE_ACCOUNTS_BASE_URL = 'https://accounts.google.com' # Hardcoded dummy redirect URI for non-web apps. REDIRECT_URI = 'urn:ietf:wg:oauth:2.0:oob' #SCOPE= 'https://www.googleapis.com/auth/gmail.readonly' SCOPE= 'https://mail.google.com/' ENDOFHEADER= "Number\r\n" MONTH_ABBR_NUMBERS = {v + ".": k for k,v in enumerate(calendar.month_abbr)} MONTH_NUMBERS = {v: k for k,v in enumerate(calendar.month_name)} MONTH_NUMBERS_INVERSE = {k: v for k,v in enumerate(calendar.month_name)} MONTH_ABBR_NUMBERS_INVERSE = {k: v for k,v in enumerate(calendar.month_abbr)} CURRENTYEAR = 2017 class Client: def __init__(self, ref_number, update_datetime, created_datetime, firstName, lastName, email, airlines, flight_number, origin, arrival_datetime, arrival_weekday ): self.ref_number= ref_number self.update_datetime= update_datetime self.created_datetime= created_datetime self.firstName= firstName self.lastName= lastName self.email= email self.airlines= airlines self.flight_number= flight_number self.origin= origin self.arrival_datetime= arrival_datetime self.arrival_weekday= arrival_weekday def GetDataSetAsList(): return [self.firstName, self.lastName, self.flight_number, self.arrival_weekday, self.getArrivalDateAsString(), self.getArrivalTimeAsString, "TODO", "TODO", "TODO", "TODO"] def setDateTimeLastUpdated(self, year, month, day, hour, minute): self.dateTimeUpdated = datetime.datetime(year, month, day, hour, minute) def setDateTimeCreated(self, year, month, day, hour, minute): self.dateTimeCreated = datetime.datetime(year, month, day, hour, minute) def setReferenceNumber(self, refNumber): self.referenceNumber = refNumber def setFirstName(self, firstName): self.firstName = firstName def setLastName(self, lastName): self.lastName = lastName def getArrivalDateAsString(self): return self.arrival_datetime.day + " " + MONTH_ABBR_NUMBERS_INVERSE[self.arrival_datetime.day] + " " + self.arrival_datetime.year def getArrivalTimeAsString(self): return self.arrival_datetime.hour + ":" + self.arrival_datetime.minute class ClientExtractor: def __init__(self): self.auth_string= "" self.clientSet = [] def ExecuteSequence(self): self.InitializeCredentials() self.GetRawClientList() self.ConvertListToClients() self.WriteSpreadsheet() def InitializeCredentials(self): credentials = MiscTools.GetGmailCreds(CREDENTIALS_PATH) self.username = credentials['USERNAME'] self.client_id = credentials['CLIENTID'] self.client_token = credentials['CLIENTTOKEN'] def GetRawClientList(self): if self.auth_string == "": print 'To authorize token, visit this url and follow the directions:' print ' %s' % OAuth2Tools.GeneratePermissionUrl(self.client_id, SCOPE) authorization_code = raw_input('Enter verification code: ') auth_tokens= OAuth2Tools.AuthorizeTokens(self.client_id, self.client_token, authorization_code) self.auth_string= OAuth2Tools.GenerateOAuth2String(self.username, auth_tokens['access_token'], base64_encode=False) latestRawEmail= EmailTools.GetLatestEmail(self.username, self.auth_string) latestEmail= EmailTools.ConvertRawToEmailMessage(latestRawEmail) emailData= EmailTools.ConvertEmailMessageToData(latestEmail, 0) self.dataList = DataTools.BreakDataStringToDataList(emailData) def GetFakeRawClientList(self): with open("./data/EmailData2.txt",'r') as emailData: self.dataList = emailData.read() def ConvertListToClients(self): month = int(raw_input('Enter Month #: ')) day = int(raw_input('Enter Day #: ')) dateToGet = datetime.date(CURRENTYEAR, month, day) self.dateFound = dateToGet self.clientSet = DataTools.HtmlStringToClientList(self.dataList, dateToGet) def WriteSpreadsheet(self): updateNumber = 1 filePrefix = 'students_' + self.dateFound.day + MONTH_NUMBERS_INVERSE[self.dateFound.month] + '_update' directoryFileList = listdir("./spreadsheets") for eachFile in directoryFileList: if filePrefix in eachFile: updateNumber++ workbook = xlsxwriter.Workbook( filePrefix + updateNumber + '.xlsx') worksheet = workbook.add_worksheet() headers = ["First name", "Family name", "Airline Flight No.", "Arrival day Arrival Date", "Arrival time (est.)", "Extra passengers", "Drop-off (University Residence)", "Drop-off Address (other)", "Suburb"] numberClients = len(clientSet) for col,eachHeader in enumerate(headers): worksheet.write(0, col, eachHeader) for row, eachClient in enumerate(self.clientSet): for col, eachData in enumerate(eachClient.GetDataSetAsList()) worksheet.write(row + 1, col, eachData) workbook.close() #check if flightnumbers match flights #highlight updates class DataTools: @staticmethod def HtmlStringToClientList(html_string, date): clientList = [] htmlParsed = re.split("<tr>|</tr>", html_string) betterParsed = htmlParsed[18:-3:2] for client in betterParsed: currentClient = read_html("<table>" + "<tr>" + client + "</tr>" + "</table>")[0].values.tolist()[0] pickUpDate = currentClient[16] ###print pickUpDate if pickUpDate == u'\xc2': continue pickUpDateParsed = pickUpDate.split() monthName = pickUpDateParsed[1] if "." in monthName: pickUpDateObject = datetime.date(int(pickUpDateParsed[2]), MONTH_ABBR_NUMBERS[monthName], int(pickUpDateParsed[0])) else: pickUpDateObject = datetime.date(int(pickUpDateParsed[2]), MONTH_NUMBERS[monthName], int(pickUpDateParsed[0])) if pickUpDateObject == date: pickUpTime = MiscTools.TimeStringToTimeObject(currentClient[17]) pickUpDateTime = datetime.datetime(pickUpDateObject.year, pickUpDateObject.month, pickUpDateObject.day, pickUpTime.hour, pickUpTime.minute) newClient = Client(currentClient[1], MiscTools.DateTimeStringToDateTimeObjects(currentClient[2]), MiscTools.DateTimeStringToDateTimeObjects(currentClient[3]), currentClient[4], currentClient[5], currentClient[6], currentClient[12], currentClient[13], currentClient[14], pickUpDateTime, currentClient[15]) clientList.append(newClient) return clientList @staticmethod def BreakDataStringToDataList(dataString): dataList = re.split('[0-9][0-9][0-9][0-9][0-9][0-9]\-[0-9][0-9][0-9][0-9][0-9][0-9]', dataString) return dataList @staticmethod def SplitFirstWordOffString(string_to_split): string_to_split = string_to_split.lstrip() first_word = string_to_split.split(" ")[0] new_string = string_to_split.replace(first_word,"") new_string = new_string.lstrip() return first_word, new_string class MiscTools: @staticmethod def DateTimeStringToDateTimeObjects(dateTimeString): #print dateTimeString date, time = dateTimeString.split(' ') day,month,year = date.split('/') timeNumber, amPm = time.split(' ') hour, minute = timeNumber.split('.') if amPm == u"PM": hour = int(hour) + 12 if hour == 24: hour = 0 return datetime.datetime(int(year), int(month), int(day), int(hour), int(minute)) @staticmethod def TimeStringToTimeObject(timeString): hour = int(timeString[0:2]) if timeString[3:5] == '': minute = 0 else: minute = int(timeString[3:5]) if 'PM' in timeString : hour = hour + 12 return datetime.time(hour, minute) @staticmethod def DatesAreCloseEnough(date1, date2, distanceInDays): pass @staticmethod def GetGmailCreds(path_to_data_file): credentials = {} with open(path_to_data_file, 'r') as credsFile: for line in credsFile: (key, val) = line.split('=') key = key.replace(" ","") val = val.replace("\n","") val = val.replace(" ","") credentials[key] = val return credentials class EmailTools: @staticmethod def GetLatestEmail(EMAILUSER, auth_string): imap_conn = imaplib.IMAP4_SSL('imap.gmail.com') imap_conn.debug = 4 imap_conn.authenticate('XOAUTH2', lambda x: auth_string) imap_conn.select('INBOX') result, data = imap_conn.uid('search', None, "ALL") # search and return uids instead latest_email_uid = data[0].split()[-1] result, data = imap_conn.uid('fetch', latest_email_uid, '(RFC822)') raw_email = data[0][1] return raw_email @staticmethod def ConvertRawToEmailMessage(raw_email): return email.message_from_string(raw_email) #TODO:save text files of both raw emails to avoid data cap @staticmethod def ConvertEmailMessageToData(email_message, payload_index): emailPayload= email_message.get_payload(payload_index) dataDecodeable= emailPayload.get_payload(decode= True) dataDecoded= dataDecodeable.decode('utf-8') return dataDecoded #startDataIndex= dataDecoded.find(ENDOFHEADER) #return dataDecoded[(startDataIndex + len(ENDOFHEADER)):] class OAuth2Tools: @staticmethod def AccountsUrl(command): """Generates the Google Accounts URL. Args: command: The command to execute. Returns: A URL for the given command. """ return '%s/%s' % (GOOGLE_ACCOUNTS_BASE_URL, command) @staticmethod def UrlEscape(text): # See OAUTH 5.1 for a definition of which characters need to be escaped. return urllib.quote(text, safe='~-._') @staticmethod def UrlUnescape(text): # See OAUTH 5.1 for a definition of which characters need to be escaped. return urllib.unquote(text) @staticmethod def FormatUrlParams(params): """Formats parameters into a URL query string. Args: params: A key-value map. Returns: A URL query string version of the given parameters. """ param_fragments = [] for param in sorted(params.iteritems(), key=lambda x: x[0]): param_fragments.append('%s=%s' % (param[0], OAuth2Tools.UrlEscape(param[1]))) return '&'.join(param_fragments) @staticmethod def GeneratePermissionUrl(client_id, scope='https://mail.google.com/'): """Generates the URL for authorizing access. This uses the "OAuth2 for Installed Applications" flow described at https://developers.google.com/accounts/docs/OAuth2InstalledApp Args: client_id: Client ID obtained by registering your app. scope: scope for access token, e.g. 'https://mail.google.com' Returns: A URL that the user should visit in their browser. """ params = {} params['client_id'] = client_id params['redirect_uri'] = REDIRECT_URI params['scope'] = scope params['response_type'] = 'code' return '%s?%s' % (OAuth2Tools.AccountsUrl('o/oauth2/auth'), OAuth2Tools.FormatUrlParams(params)) @staticmethod def AuthorizeTokens(client_id, client_secret, authorization_code): """Obtains OAuth access token and refresh token. This uses the application portion of the "OAuth2 for Installed Applications" flow at https://developers.google.com/accounts/docs/OAuth2InstalledApp#handlingtheresponse Args: client_id: Client ID obtained by registering your app. client_secret: Client secret obtained by registering your app. authorization_code: code generated by Google Accounts after user grants permission. Returns: The decoded response from the Google Accounts server, as a dict. Expected fields include 'access_token', 'expires_in', and 'refresh_token'. """ params = {} params['client_id'] = client_id params['client_secret'] = client_secret params['code'] = authorization_code params['redirect_uri'] = REDIRECT_URI params['grant_type'] = 'authorization_code' request_url = OAuth2Tools.AccountsUrl('o/oauth2/token') response = urllib.urlopen(request_url, urllib.urlencode(params)).read() return json.loads(response) @staticmethod def RefreshToken(client_id, client_secret, refresh_token): """Obtains a new token given a refresh token. See https://developers.google.com/accounts/docs/OAuth2InstalledApp#refresh Args: client_id: Client ID obtained by registering your app. client_secret: Client secret obtained by registering your app. refresh_token: A previously-obtained refresh token. Returns: The decoded response from the Google Accounts server, as a dict. Expected fields include 'access_token', 'expires_in', and 'refresh_token'. """ params = {} params['client_id'] = client_id """Generates an IMAP OAuth2 authentication string. See https://developers.google.com/google-apps/gmail/oauth2_overview Args: username: the username (email address) of the account to authenticate access_token: An OAuth2 access token. base64_encode: Whether to base64-encode the output. Returns: The SASL argument for the OAuth2 mechanism. """ auth_string = 'user=%s\1auth=Bearer %s\1\1' % (username, access_token) if base64_encode: auth_string = base64.b64encode(auth_string) return auth_string @staticmethod def TestImapAuthentication(user, auth_string): """Authenticates to IMAP with the given auth_string. Prints a debug trace of the attempted IMAP connection. Args: user: The Gmail username (full email address) auth_string: A valid OAuth2 string, as returned by GenerateOAuth2String. Must not be base64-encoded, since imaplib does its own base64-encoding. """ print imap_conn = imaplib.IMAP4_SSL('imap.gmail.com') imap_conn.debug = 4 imap_conn.authenticate('XOAUTH2', lambda x: auth_string) imap_conn.select('INBOX') @staticmethod def TestSmtpAuthentication(user, auth_string): """Authenticates to SMTP with the given auth_string. Args: user: The Gmail username (full email address) auth_string: A valid OAuth2 string, not base64-encoded, as returned by GenerateOAuth2String. """ print smtp_conn = smtplib.SMTP('smtp.gmail.com', 587) smtp_conn.set_debuglevel(True)
apache-2.0
rahul-c1/scikit-learn
examples/linear_model/plot_lasso_lars.py
363
1080
#!/usr/bin/env python """ ===================== Lasso path using LARS ===================== Computes Lasso Path along the regularization parameter using the LARS algorithm on the diabetes dataset. Each color represents a different feature of the coefficient vector, and this is displayed as a function of the regularization parameter. """ print(__doc__) # Author: Fabian Pedregosa <[email protected]> # Alexandre Gramfort <[email protected]> # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn import linear_model from sklearn import datasets diabetes = datasets.load_diabetes() X = diabetes.data y = diabetes.target print("Computing regularization path using the LARS ...") alphas, _, coefs = linear_model.lars_path(X, y, method='lasso', verbose=True) xx = np.sum(np.abs(coefs.T), axis=1) xx /= xx[-1] plt.plot(xx, coefs.T) ymin, ymax = plt.ylim() plt.vlines(xx, ymin, ymax, linestyle='dashed') plt.xlabel('|coef| / max|coef|') plt.ylabel('Coefficients') plt.title('LASSO Path') plt.axis('tight') plt.show()
bsd-3-clause
annegabrielle/secure_adhoc_network_ns-3
src/contrib/flow-monitor/examples/wifi-olsr-flowmon.py
5
6935
# -*- Mode: Python; -*- # Copyright (c) 2009 INESC Porto # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as # published by the Free Software Foundation; # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # # Authors: Gustavo Carneiro <[email protected]> import sys import ns3 DISTANCE = 100 # (m) NUM_NODES_SIDE = 3 def main(argv): cmd = ns3.CommandLine() cmd.NumNodesSide = None cmd.AddValue("NumNodesSide", "Grid side number of nodes (total number of nodes will be this number squared)") cmd.Results = None cmd.AddValue("Results", "Write XML results to file") cmd.Plot = None cmd.AddValue("Plot", "Plot the results using the matplotlib python module") cmd.Parse(argv) wifi = ns3.WifiHelper.Default() wifiMac = ns3.NqosWifiMacHelper.Default() wifiPhy = ns3.YansWifiPhyHelper.Default() wifiChannel = ns3.YansWifiChannelHelper.Default() wifiPhy.SetChannel(wifiChannel.Create()) ssid = ns3.Ssid("wifi-default") wifi.SetRemoteStationManager("ns3::ArfWifiManager") wifiMac.SetType ("ns3::AdhocWifiMac", "Ssid", ns3.SsidValue(ssid)) internet = ns3.InternetStackHelper() list_routing = ns3.Ipv4ListRoutingHelper() olsr_routing = ns3.OlsrHelper() static_routing = ns3.Ipv4StaticRoutingHelper() list_routing.Add(static_routing, 0) list_routing.Add(olsr_routing, 100) internet.SetRoutingHelper(list_routing) ipv4Addresses = ns3.Ipv4AddressHelper() ipv4Addresses.SetBase(ns3.Ipv4Address("10.0.0.0"), ns3.Ipv4Mask("255.255.255.0")) port = 9 # Discard port(RFC 863) onOffHelper = ns3.OnOffHelper("ns3::UdpSocketFactory", ns3.Address(ns3.InetSocketAddress(ns3.Ipv4Address("10.0.0.1"), port))) onOffHelper.SetAttribute("DataRate", ns3.DataRateValue(ns3.DataRate("100kbps"))) onOffHelper.SetAttribute("OnTime", ns3.RandomVariableValue(ns3.ConstantVariable(1))) onOffHelper.SetAttribute("OffTime", ns3.RandomVariableValue(ns3.ConstantVariable(0))) addresses = [] nodes = [] if cmd.NumNodesSide is None: num_nodes_side = NUM_NODES_SIDE else: num_nodes_side = int(cmd.NumNodesSide) for xi in range(num_nodes_side): for yi in range(num_nodes_side): node = ns3.Node() nodes.append(node) internet.Install(ns3.NodeContainer(node)) mobility = ns3.ConstantPositionMobilityModel() mobility.SetPosition(ns3.Vector(xi*DISTANCE, yi*DISTANCE, 0)) node.AggregateObject(mobility) devices = wifi.Install(wifiPhy, wifiMac, node) ipv4_interfaces = ipv4Addresses.Assign(devices) addresses.append(ipv4_interfaces.GetAddress(0)) for i, node in enumerate(nodes): destaddr = addresses[(len(addresses) - 1 - i) % len(addresses)] #print i, destaddr onOffHelper.SetAttribute("Remote", ns3.AddressValue(ns3.InetSocketAddress(destaddr, port))) app = onOffHelper.Install(ns3.NodeContainer(node)) app.Start(ns3.Seconds(ns3.UniformVariable(20, 30).GetValue())) #internet.EnablePcapAll("wifi-olsr") flowmon_helper = ns3.FlowMonitorHelper() #flowmon_helper.SetMonitorAttribute("StartTime", ns3.TimeValue(ns3.Seconds(31))) monitor = flowmon_helper.InstallAll() monitor.SetAttribute("DelayBinWidth", ns3.DoubleValue(0.001)) monitor.SetAttribute("JitterBinWidth", ns3.DoubleValue(0.001)) monitor.SetAttribute("PacketSizeBinWidth", ns3.DoubleValue(20)) ns3.Simulator.Stop(ns3.Seconds(44.0)) ns3.Simulator.Run() def print_stats(os, st): print >> os, " Tx Bytes: ", st.txBytes print >> os, " Rx Bytes: ", st.rxBytes print >> os, " Tx Packets: ", st.txPackets print >> os, " Rx Packets: ", st.rxPackets print >> os, " Lost Packets: ", st.lostPackets if st.rxPackets > 0: print >> os, " Mean{Delay}: ", (st.delaySum.GetSeconds() / st.rxPackets) print >> os, " Mean{Jitter}: ", (st.jitterSum.GetSeconds() / (st.rxPackets-1)) print >> os, " Mean{Hop Count}: ", float(st.timesForwarded) / st.rxPackets + 1 if 0: print >> os, "Delay Histogram" for i in range(st.delayHistogram.GetNBins () ): print >> os, " ",i,"(", st.delayHistogram.GetBinStart (i), "-", \ st.delayHistogram.GetBinEnd (i), "): ", st.delayHistogram.GetBinCount (i) print >> os, "Jitter Histogram" for i in range(st.jitterHistogram.GetNBins () ): print >> os, " ",i,"(", st.jitterHistogram.GetBinStart (i), "-", \ st.jitterHistogram.GetBinEnd (i), "): ", st.jitterHistogram.GetBinCount (i) print >> os, "PacketSize Histogram" for i in range(st.packetSizeHistogram.GetNBins () ): print >> os, " ",i,"(", st.packetSizeHistogram.GetBinStart (i), "-", \ st.packetSizeHistogram.GetBinEnd (i), "): ", st.packetSizeHistogram.GetBinCount (i) for reason, drops in enumerate(st.packetsDropped): print " Packets dropped by reason %i: %i" % (reason, drops) #for reason, drops in enumerate(st.bytesDropped): # print "Bytes dropped by reason %i: %i" % (reason, drops) monitor.CheckForLostPackets() classifier = flowmon_helper.GetClassifier() if cmd.Results is None: for flow_id, flow_stats in monitor.GetFlowStats(): t = classifier.FindFlow(flow_id) proto = {6: 'TCP', 17: 'UDP'} [t.protocol] print "FlowID: %i (%s %s/%s --> %s/%i)" % \ (flow_id, proto, t.sourceAddress, t.sourcePort, t.destinationAddress, t.destinationPort) print_stats(sys.stdout, flow_stats) else: print monitor.SerializeToXmlFile(cmd.Results, True, True) if cmd.Plot is not None: import pylab delays = [] for flow_id, flow_stats in monitor.GetFlowStats(): tupl = classifier.FindFlow(flow_id) if tupl.protocol == 17 and tupl.sourcePort == 698: continue delays.append(flow_stats.delaySum.GetSeconds() / flow_stats.rxPackets) pylab.hist(delays, 20) pylab.xlabel("Delay (s)") pylab.ylabel("Number of Flows") pylab.show() return 0 if __name__ == '__main__': sys.exit(main(sys.argv))
gpl-2.0
LangmuirSim/langmuir
LangmuirPython/analyze/equil.py
2
3244
# -*- coding: utf-8 -*- """ equil.py ======== .. argparse:: :module: equil :func: create_parser :prog: equil .. moduleauthor:: Adam Gagorik <[email protected]> """ import matplotlib.pyplot as plt import matplotlib as mpl import langmuir as lm import pandas as pd import numpy as np import argparse import os desc = """ Check simulation(s) for equilibration. """ def create_parser(): parser = argparse.ArgumentParser() parser.description = desc parser.add_argument(dest='pkls', default=[], type=str, nargs='*', metavar='pkl', help='input files') parser.add_argument('-r', action='store_true', help='search for files recursivly') parser.add_argument('--ycol', default='drain:current', type=str, help='ycolumn to plot') parser.add_argument('--legend', action='store_true', help='show legend') parser.add_argument('--zoom', default=0.05, type=float, help='zoom factor') parser.add_argument('--show', action='store_true', help='show plot') parser.add_argument('--save', action='store_true', help='save plot') return parser def get_arguments(args=None): parser = create_parser() opts = parser.parse_args(args) if not opts.pkls: opts.pkls = [os.getcwd()] pkls = [] for pkl in opts.pkls: if os.path.isdir(pkl): pkls.extend(lm.find.pkls(pkl, stub='combined*', r=opts.r)) else: pkls.append(pkl) opts.pkls = pkls if not opts.pkls: raise RuntimeError, 'can not find any pkl files' if not opts.show and not opts.save: opts.show = True return opts if __name__ == '__main__': work = os.getcwd() opts = get_arguments() y_col = opts.ycol cmap = mpl.cm.get_cmap('spectral') colors = [cmap(val) for val in np.linspace(0, 1, len(opts.pkls))] fig, ax1 = lm.plot.subplots(1, 1, b=0.75, l=1.5, r=1.5) xmax = 1 ymax = 0 handles, labels = [], [] for i, pkl in enumerate(opts.pkls): print i, pkl data = lm.common.load_pkl(pkl) data = lm.analyze.calculate(data) data[y_col].plot(lw=0.25, color='k') roll = int(len(data.index) / float(32)) pd.rolling_mean(data[y_col], roll).plot(lw=1, color=colors[i]) handles.append(plt.Rectangle((0, 0), 1, 1, fc=colors[i], lw=0)) labels.append('sim%d' % (i + 1)) xmax = max(xmax, data.index[-1]) ymax = max(ymax, data.ix[int(data.index.size * 0.10):, y_col].max()) ymax = abs(ymax) if opts.legend: plt.legend(handles, labels, prop=dict(size='xx-small'), loc='upper left', bbox_transform=ax1.transAxes, bbox_to_anchor=(1, 1)) plt.ylim(-ymax, ymax) plt.xlim(0, xmax) lm.plot.zoom(l=0, r=0, factor=opts.zoom) plt.ticklabel_format(scilimits=(-4, 4)) plt.tick_params(labelsize='small') lm.plot.maxn_locator(x=5, y=5) plt.xlabel('time (ps)', size='small') plt.ylabel(y_col, size='small') if opts.save: handle = lm.common.format_output(stub=opts.stub, name='rdf', ext=opts.ext) print 'saved: %s' % handle lm.plot.save(handle) if opts.show: plt.show()
gpl-2.0
rsivapr/scikit-learn
benchmarks/bench_plot_parallel_pairwise.py
297
1247
# Author: Mathieu Blondel <[email protected]> # License: BSD 3 clause import time import pylab as pl from sklearn.utils import check_random_state from sklearn.metrics.pairwise import pairwise_distances from sklearn.metrics.pairwise import pairwise_kernels def plot(func): random_state = check_random_state(0) one_core = [] multi_core = [] sample_sizes = range(1000, 6000, 1000) for n_samples in sample_sizes: X = random_state.rand(n_samples, 300) start = time.time() func(X, n_jobs=1) one_core.append(time.time() - start) start = time.time() func(X, n_jobs=-1) multi_core.append(time.time() - start) pl.figure('scikit-learn parallel %s benchmark results' % func.__name__) pl.plot(sample_sizes, one_core, label="one core") pl.plot(sample_sizes, multi_core, label="multi core") pl.xlabel('n_samples') pl.ylabel('Time (s)') pl.title('Parallel %s' % func.__name__) pl.legend() def euclidean_distances(X, n_jobs): return pairwise_distances(X, metric="euclidean", n_jobs=n_jobs) def rbf_kernels(X, n_jobs): return pairwise_kernels(X, metric="rbf", n_jobs=n_jobs, gamma=0.1) plot(euclidean_distances) plot(rbf_kernels) pl.show()
bsd-3-clause
adrian-soto/QEdark_repo
tools/bandsndos/bandsndos_TlBr.py
6
20254
# # Adrian Soto # 22-12-2014 # Stony Brook University # ################################################ # Plot band structure and DOS from the # output of the bands.x program in the # Quantum Espresso package. # # Features: # 1) Allows for scissor correction (band shift) # 2) # ################################################ import math import numpy as np import matplotlib.pyplot as plt from matplotlib import rcParams from matplotlib.ticker import AutoMinorLocator import matplotlib.gridspec as gridspec import csv plt.rcParams['font.family'] = 'Serif' plt.rcParams['font.serif'] = 'Times New Roman' #rcParams['text.usetex'] = True rcParams['font.size'] = 24 class band: def __init__(self, numkpoints, bandenergies): self.nks = numkpoints if (len(bandenergies) != numkpoints): print "ERROR: list of band energies has wrong length. Setting band to 0." self.nrg = [0] * numkpoints else: self.nrg = bandenergies def printband(self): print self.nrg def shift(self, delta): self.nrg = map(lambda x : x+delta, self.nrg) # watch for scope here. return ################################################ # End of class band ################################################ class kpoints: def __init__(self): self.klist = [] class dos: def __init__(self): #, numE, dosE, dosG, dosI): self.numE = 0 self.dosE = [] self.dosG = [] self.dosI = [] def Load(self, dosfile): # # Load DOS from dos.x output # print " " print "Loading DOS from ", dosfile print " " # Count lines in file self.numE=sum(1 for line in open(dosfile)) # Read file line by line and process f=open(dosfile, 'r') # First line is header. Discard data=f.readline() # Iterate over file lines for ilin in range(1,self.numE): data=f.readline() E=float(data[0:7]) self.dosE.append(E) G=float(data[9:19]) self.dosG.append(G) I=float(data[21:31]) self.dosI.append(I) f.close() return ################################################ # End of class dos ################################################ # # Global functions # def w0gauss(x): # As in flib/w0gauss.f90 in the QE package pi = 3.141592653589793 w0 = 1.0/math.sqrt(pi)*math.exp(-(x-1.0/math.sqrt(2.0))**2)*(2.0-math.sqrt(2.0)*x) return w0 def ReadBandStructure(bandsfile): # # This function reads the band structure as written # to output of the bands.x program. It returns the bs # as a flat list with all energies and another list with # the k-point coordinates. # f = open(bandsfile, 'r') # First line contains nbnd and nks. Read. currentline = f.readline() nks = int(currentline[22:26]) nbnd = int(currentline[12:16]) # Following lines contain the k-point coordinates # and the band energies. # Calculate number of lines containing band structure: # nks k-point lines # At each k-point there are (1+nbnd/10) energy values. nlpkp = 1+nbnd/10 # Number of Lines Per K-Point nlines = nks + nks * nlpkp bsaux = [] xk = [] for ik in range (0, nks): currentline = f.readline() #kpoint = currentline[12:40] kpoint = [float(x) for x in currentline.split()] xk.append(kpoint) auxenerg = [] for ibnd in range(0, nlpkp): currentline = f.readline() # append current line to auxiliary list auxenerg.append( float(x) for x in currentline.split() ) # flatten list of lists containing energies for a given kpoint # (each sublist corresponds to one line in the bands.dat file) energ = [item for sublist in auxenerg for item in sublist] # Sort ascendingly band energies for current k-point (to # prevent artificial level crossings if QE bands.x output # does not sort them correctly) and append to band structure bsaux.append(sorted(energ)) f.close() # Flatten bs list bsflat = [item for sublist in bsaux for item in sublist] return nks, nbnd, xk, bsflat def SortByBands(nks, nbnd, bsflat): # Rearrarange bs from k-points to bands bs = [] for ibnd in range (0, nbnd): currentband=[] for ik in range (0, nks): #currentband.append(bsflat[ik*nbnd+ibnd]) bs.append(bsflat[ik*nbnd+ibnd]) #bs.append( currentband ) return bs def FindHLGap(nks, hvb, lcb): # # Find HOMO and LUMO energies and energy gap # # hvb = highest valence band # lcb = lowest conduction band # # Ehvb = highest valence energy or HOMO energy # Elcb = lowest conduction energy or LUMO energy # gap = lcb[0] - hvb[0] for ik1 in range (0, nks): auxcond = lcb[ik1] for ik2 in range (0, nks): auxval = hvb[ik2] currentgap = auxcond-auxval if (currentgap < 0.0): print "ERROR: negative gap" elif (currentgap < gap): gap = currentgap Ehvb = max(hvb) Elcb = min(lcb) return Ehvb, Elcb, gap def Scissor(nks, newgap, bands, shifttype): # # shifttype == 0 : shift valence bands by -0.5*delta and # conduction bands by 0.5*delta # shifttype == 1 : as in 0 but placing the highest valence # energy at 0.0 # shifttype == 2 : as in 0 but placing the gap center at 0.0 # EHOMO, ELUMO, oldgap = FindHLGap(nks, bands[nval-1].nrg , bands[nval].nrg) delta=(newgap-oldgap)/2.0 # Apply scissor to band structure for ibnd in range (0, nbnd): if (ibnd < nval): bands[ibnd].shift(-1.0*delta) else: bands[ibnd].shift(delta) if (shifttype==0): print "Scissor correction to band energies has been applied." return elif (shifttype==1): EHOMO, ELUMO, gap = FindHLGap(nks, bands[nval-1].nrg , bands[nval].nrg) delta = -1.0*EHOMO #print "delta=", delta for ibnd in range (0, nbnd): bands[ibnd].shift(delta) print "Scissor correction to band energies has been applied." print "Highest valence energy has been set to 0.0 eV" return elif (shifttype==2): EHOMO, ELUMO, gap = FindHLGap(nks, bands[nval-1].nrg , bands[nval].nrg) delta = -0.5*(EHOMO+ELUMO) for ibnd in range (0, nbnd): bands[ibnd].shift(delta) print "Scissor correction to band energies has been applied." print "Gap center has been set to 0.0 eV" return else: print "ERROR: shifttype has an non-valid value. Default value shifttype==0." print "Scissor correction to band energies has been applied." return def CreateDOS(nks, nbnd, bzv, Emin, Emax, deltaE, bnd, normalize): # ATTENTION: bnd must be an object of the class band Emin = min(bnd[0].nrg) Emax = max(bnd[nbnd-1].nrg) ndos = int((Emax - Emin)/deltaE + 0.50000001) # int always rounds to lower integer dosE = [] dosG = [] intg=0.0 deltaEgauss=5.0*deltaE d3k=(1.0/nks)*bzv wk=2.0/nks print "Creating DOS with uniform k-point weights" # Create DOS for idos in range (0, ndos): E = Emin + idos * deltaE dosg = 0.0 for ik in range(0, nks): for ibnd in range (0, nbnd): dosg = dosg + w0gauss ( (E - bnd[ibnd].nrg[ik] ) / deltaEgauss ) * wk dosg = dosg/deltaEgauss intg = intg + dosg*deltaE # integrated DOS dosE.append(E) dosG.append(dosg) print "\n Integrated DOS=", intg, # Normalize DOS if (normalize == 1): print "Normalizing DOS to 1.0 \n" dosGnorm=dosG for idos in range (0, ndos): dosGnorm[idos]=dosGnorm[idos]/intg return dosE, dosGnorm if(normalize==0): return dosE, dosG else: print " ERROR!! in CreateDOS function: wrong DOS normalization choice." return def PlotBandStructure(nbnd, nval, bnd, plotfile, Ef, sympoints, nks_btw_sympoints ): # # ATTENTION: bnd must be an object of the class band # # nval: number of valence bands # Ef: Fermi Energy. If false then it won't print horizontal line # sympoints: list containing labels of symmetry points # nks_btw_sympoints: number of k-points between symmetry points # # NOTE: this function assumes that the number of points # between symmetry points is constant # print "Plotting band structure to", plotfile col = 'k' for ibnd in range (0, nbnd): #if (ibnd < nval): # col='b' #else: # col='r' plt.plot(bnd[ibnd].nrg, markersize=2, linestyle='-', color=col) #marker = 'o') y_min = min(bnd[0].nrg) y_max = min(bnd[nbnd-1].nrg) plt.xlabel("Brillouin zone path") plt.ylabel("band energies (eV)") numsympoints = len(sympoints) kpath=[] xticks = range(0, numsympoints*nks_btw_sympoints + 1, nks_btw_sympoints) for i in range(0, numsympoints): kpath.append(sympoints[i]) if (i < numsympoints-1): for j in range (0, nks_btw_sympoints-1): kpath.append('') # plt.axvline(x=xticks, ymin=0, ymax=1, hold=None, **kwargs) # Ticks and vertical lines across BS plot plt.xticks(xticks, sympoints) for i in range(0,numsympoints): plt.axvline(x=xticks[i], ymin=y_min, ymax=y_max, hold=None, color='k', linewidth=0.25) if (not Ef): plt.axhline(Ef, color="black", linestyle="--") plt.xlim( 0, len(bnd[0].nrg)-1 ) plt.savefig(plotfile) return def PlotDOS(dosE, dosG, plotname): # ATTENTION: dosG and dosE must be lists of reals plt.plot(dosG, dosE) plt.xlabel("Density Of States") plt.ylabel("band energies (eV)") plt.gca().set_xlim(left=0) plt.savefig(plotname) return def PlotBnD(nbnd, nval, bnd, Ef, sympoints, nks_btw_sympoints, dosE, dosG, plotname): col = 'k' # Two subplots, unpack the axes array immediately f, (ax1, ax2) = plt.subplots(1, 2, sharey=True) for ibnd in range (0, nbnd): ax1.plot(bnd[ibnd].nrg, markersize=2, linestyle='-', color=col) #marker = 'o') ax1.set_title('Sharing Y axis') ax2.plot(dosG, dosE) ax2.set_xlim([0, 0.1]) plt.ylim([-15.0, 20.0]) #plt.subplots_adjust(left=0.0, right=0.8) plt.subplots_adjust(wspace = 0.0) plt.show() return def PlotBnDD(nbnd, nval, bnd, Ef, sympoints, nks_btw_sympoints, sym_pt_dists, dosE1, dosG1, dosE2, dosG2, plotname): ###################################### # Plot generation and formatting ###################################### # Two subplots, unpack the axes array immediately gs = gridspec.GridSpec(1, 2,width_ratios=[4,1]) f = plt.figure() ax1 = plt.subplot(gs[0]) ax2 = plt.subplot(gs[1]) # Formatting col = 'k' ax1.set_xlabel("Brillouin zone path") ax1.xaxis.set_label_position("bottom") ax1.set_ylabel("E [eV]") ax1.text(0.07, -12.50, 'Si', fontsize=28) ###ax2.text(0.07, 18.00, 'Si', fontsize=18) ax2.set_xlabel("DOS \n [eV$^{-1}$]") ax2.xaxis.set_label_position("top") # ax2.yaxis.set_label_position("right") # ax2.set_ylabel("E [eV]", rotation=270) y_min = -13.0 y_max = 20.0 x2_min = 0.0 x2_max = 5.0 ax1.set_ylim([y_min, y_max]) ax2.set_xlim([x2_min, x2_max]) ax2.set_ylim([y_min, y_max]) # Ticks minor_locator = AutoMinorLocator(2) ax2.xaxis.set_minor_locator(minor_locator) # Number of symmetry points numsympoints = len(sympoints) # for i in range(0, numsympoints): # kpath.append(sympoints[i]) # if (i < numsympoints-1): # for j in range (0, nks_btw_sympoints-1): # kpath.append('') # Generate horizontal axis containing k-path accumulated length (for BS plot) x=0.0 klen=[x] dx=1.0/((numsympoints-1)*nks_btw_sympoints) for isym in range(0, numsympoints-1): dx=sym_pt_dists[isym]/nks_btw_sympoints for ipt in range(1, nks_btw_sympoints+1): x=x+dx klen.append(x) #xticks = range(0, numsympoints*nks_btw_sympoints + 1, nks_btw_sympoints) xticks=[] for isym in range(0, numsympoints): j = isym * nks_btw_sympoints xticks.append(klen[j]) x1_min=min(xticks) x1_max=max(xticks) ax1.set_xlim(x1_min, x1_max) # Plot bands col = '0.4' for ibnd in range (0, nbnd): ax1.plot(klen , bnd[ibnd].nrg, markersize=2, linestyle='-', color=col) #marker = 'o') # plt.axvline(x=xticks, ymin=0, ymax=1, hold=None, **kwargs) # Ticks and vertical lines across BS plot ax1.set_xticks(xticks) ax1.set_xticklabels(sympoints) # Plot DOSs ax2.plot(dosG1, dosE1, linestyle='-', linewidth=1.0, color='b') ax2.plot(dosG2, dosE2, linestyle='-', color='r') dosticks=[0, 5] #dosticks=[0] ax2.set_xticks(dosticks) ax2.set_xticklabels(dosticks) minorx2ticks=[1.0, 2.0, 3.0, 4.0] ax2.set_xticks(minorx2ticks, minor = True) # BS ticks yticks=[-10, -5, 0, 5, 10, 15, 20] minor_locator = AutoMinorLocator(5) ax1.yaxis.set_minor_locator(minor_locator) ax2.yaxis.set_minor_locator(minor_locator) ax1.xaxis.tick_top() ax1.set_yticks(yticks) ax1.set_yticklabels(yticks) #ax2.axes.get_yaxis().set_visible(False) ax2.yaxis.tick_right() ax2.set_yticks(yticks) #ax2.set_yticklabels(yticks) #plt.subplots_adjust(left=0.0, right=0.8) plt.subplots_adjust(wspace = 0.0) # Attempt to fill the area to the left of the DOS # split values into positive and negative alpha_fill=0.5 dosE1neg=[] dosG1neg=[] dosE1pos=[] dosG1pos=[] for i in range(0, len(dosE1)): if(dosE1[i] < 0.0): dosE1neg.append(dosE1[i]) dosG1neg.append(dosG1[i]) else: dosE1pos.append(dosE1[i]) dosG1pos.append(dosG1[i]) # ax2.fill_between(dosG1pos, 0, dosE1pos, alpha=alpha_fill, linewidth=0.0) # Fill left of curve above 0.0 eV # ax2.fill_between(dosG1neg, 0, dosE1neg, alpha=alpha_fill, linewidth=0.0) # Fill left of curve below 0.0 eV dosE1new =[y_min]+dosE1 dosG1new =[0.0]+dosG1 ax2.fill_between(dosG1new, 0, dosE1new, alpha=alpha_fill, linewidth=0.0, edgecolor='w') # Vertical lines across BS plot for i in range(0,numsympoints): ax1.axvline(x=xticks[i], ymin=y_min, ymax=y_max, color='k', linewidth=0.25) # Horizontal line at top of valence band if (not Ef): ax1.axhline(Ef, color="black", linestyle="--") ax2.axhline(Ef, color="black", linestyle="--") #plt.show() plt.savefig(plotname, bbox_inches='tight') return def PlotMultipleDOS(dosE, dosG, plotname): # ATTENTION: dosG and dosE must be lists of lists of reals Ndos=len(dosE[:]) for i in range(0, Ndos): plt.plot(dosG[i], dosE[i]) plt.xlabel("Density Of States") plt.ylabel("band energies (eV)") plt.savefig(plotname) return #def WriteBandStructure(): # print (" %10.6f%10.6f%10.6f" % (kpoint[0], kpoint[1], kpoint[2]) ) ############################################################################################ ############################################################################################ ############################################################################################ ############################################################################################ ############################ PROGRAM STARTS HERE ################################### ############################################################################################ ############################################################################################ ############################################################################################ ############################################################################################ bohr2ang=0.52918 ############ # Band structure ############ filename="si.bands.dat" nks = 0 nbnd=0 xk=[] bsflt=[] bs=[] sympoints=['$L$','$\Gamma$', '$X$', '$W$', '$K$', '$\Gamma$'] sym_pt_dists=[0.5*math.sqrt(3), 1.0, 0.5, 0.25*math.sqrt(2), 0.75*math.sqrt(2)] ## distances between symmetry points (by hand) nks_btw_sympoints=50 # Read from file and sort bs by bands nks, nbnd, xk, bsflt = ReadBandStructure(filename) if(nbnd==0): print "%% ERROR READING BANDS. EXIT %%" else: bs = SortByBands(nks, nbnd, bsflt) print "nks=", nks print "nbnd=", nbnd # Create band objects bands=[] for ibnd in range (0, nbnd): ledge = ibnd*nks redge = ledge+nks currentband = bs[ledge:redge] bands.append( band(nks, currentband) ) # Scissor correction # Si alat = 10.330495 # Bohr nval = 4 # for Si exptgap = 1.11 # eV # Si # Ge ###alat = 10.8171069 # Bohr ###nval = 14 # for Ge with semicore ###exptgap = 0.67 # Ge # Convert to ANG and calculate BZV alat=alat*bohr2ang V=(alat**3)/4.0 # Good for FCC bzv = (2.0*math.pi)**3/V ncond = nbnd - nval Scissor(nks, exptgap, bands, 1) # 3rd argument is 1. Then set 3rd argument of PlotBandStructure to 0.0 print "Scissor correction with gap set to", exptgap ############# # DOS ############# filename='si.bands_full.dat' nks1, nbnd1, xk1, bsflt1 = ReadBandStructure(filename) if(nbnd==0): print "%% ERROR READING BANDS. EXIT %%" else: bs1 = SortByBands(nks1, nbnd1, bsflt1) print "nks=", nks1 print "nbnd=", nbnd1 # Create band objects bands1=[] for ibnd in range (0, nbnd1): ledge1 = ibnd*nks1 redge1 = ledge1+nks1 currentband1 = bs1[ledge1:redge1] bands1.append( band(nks1, currentband1) ) # Scissor correction Scissor(nks1, exptgap, bands1, 1) # 3rd argument is 1. Then set 3rd argument of PlotBandStructure to 0.0 print "Scissor correction with gap set to", exptgap filename='si.bands_243.dat' nks2, nbnd2, xk2, bsflt2 = ReadBandStructure(filename) if(nbnd==0): print "%% ERROR READING BANDS. EXIT %%" else: bs2 = SortByBands(nks2, nbnd2, bsflt2) print "nks=", nks2 print "nbnd=", nbnd2 # Create band objects bands2=[] for ibnd in range (0, nbnd2): ledge2 = ibnd*nks2 redge2 = ledge2+nks2 currentband2 = bs2[ledge2:redge2] bands2.append( band(nks2, currentband2) ) # Scissor correction Scissor(nks2, exptgap, bands2, 1) # 3rd argument is 1. Then set 3rd argument of PlotBandStructure to 0.0 print "Scissor correction with gap set to", exptgap # Generate DOSs deltaE = 0.03 #eV dosE1, dosG1 = CreateDOS(nks1, nbnd1, bzv, -13.0, 25.0, deltaE, bands1, 0) dosE2, dosG2 = CreateDOS(nks2, nbnd2, bzv, -13.0, 25.0, deltaE, bands2, 0) # Plot #PlotDOS(dosE, dosG, "DOS.pdf") #PlotBandStructure(nbnd, nval, bands, "BS.pdf", 0.0, sympoints, nks_btw_sympoints) PlotBnDD(nbnd, nval, bands, 0.0, sympoints, nks_btw_sympoints, sym_pt_dists, dosE1, dosG1, dosE2, dosG2, "BSnDOS.pdf") # DOS #mydos=dos() #mydos.Load('dos_full.dat') #mydos.Printout()
gpl-2.0
CGATOxford/CGATPipelines
obsolete/reports/pipeline_chipseq/trackers/Manuscript.py
1
2172
import os import sys import re import types import itertools import matplotlib.pyplot as plt import numpy import numpy.ma from ChipseqReport import * class ReproducibilityBetweenSamples(DefaultTracker): def __call__(self, track, slice=None): set1 = track + "R1" set2 = track + "R2" statement = ''' SELECT MAX(pexons_ovl1, pexons_ovl2) FROM overlap WHERE set1 IN ('%(set1)s', '%(set2)s') AND set2 IN ('%(set1)s', '%(set2)s') ''' % locals() return odict((("overlap", self.getValue(statement)),)) class AnnotatorTracksComparisonBetweenMotifs(DefaultTracker): '''get fold enrichment for AnnotatorTracks comparing intervals with and without motifs.''' prefix = "annotator_tracks" def getSlices(self, subset=None): return self.getValues("SELECT DISTINCT category FROM %s" % self.prefix) def __call__(self, track, slice=None): statement = ''' SELECT fold, pvalue, qvalue FROM %(tablename)s WHERE track = '%(track)s' AND category = '%(slice)s' ''' tablename = self.prefix + "_with_motif" with_motif, with_pvalue, with_qvalue = self.getFirstRow( statement % locals()) tablename = self.prefix + "_without_motif" without_motif, without_pvalue, without_qvalue = self.getFirstRow( statement % locals()) try: ratio = "%5.2f" % (100.0 * (with_motif / without_motif)) except ZeroDivisionError: ratio = "na" return odict((("with motif - fold", with_motif), ("without motif", without_motif), ("with/without motif [%]", ratio), ("with motif - pvalue", with_pvalue), ("with motif - qvalue", with_qvalue), ("without motif - pvalue", without_pvalue), ("without motif - qvalue", without_qvalue))) class AnnotatorRegionsOfInterestComparisonBetweenMotifs(AnnotatorTracksComparisonBetweenMotifs): '''get fold enrichment for AnnotatorRegionsOfInterest comparing intervals with and without motifs.''' prefix = "annotator_roi"
mit
x75/paparazzi
sw/airborne/test/math/compare_utm_enu.py
77
2714
#!/usr/bin/env python from __future__ import division, print_function, absolute_import import sys import os PPRZ_SRC = os.getenv("PAPARAZZI_SRC", "../../../..") sys.path.append(PPRZ_SRC + "/sw/lib/python") from pprz_math.geodetic import * from pprz_math.algebra import DoubleRMat, DoubleEulers, DoubleVect3 from math import radians, degrees, tan import matplotlib.pyplot as plt import numpy as np # Origin at ENAC UTM_EAST0 = 377349 # in m UTM_NORTH0 = 4824583 # in m UTM_ZONE0 = 31 ALT0 = 147.000 # in m utm_origin = UtmCoor_d(north=UTM_NORTH0, east=UTM_EAST0, alt=ALT0, zone=UTM_ZONE0) print("origin %s" % utm_origin) lla_origin = utm_origin.to_lla() ecef_origin = lla_origin.to_ecef() ltp_origin = ecef_origin.to_ltp_def() print(ltp_origin) # convergence angle to "true north" is approx 1 deg here earth_radius = 6378137.0 n = 0.9996 * earth_radius UTM_DELTA_EAST = 500000. dist_to_meridian = utm_origin.east - UTM_DELTA_EAST conv = dist_to_meridian / n * tan(lla_origin.lat) # or (middle meridian of UTM zone 31 is at 3deg) #conv = atan(tan(lla_origin.lon - radians(3))*sin(lla_origin.lat)) print("approx. convergence angle (north error compared to meridian): %f deg" % degrees(conv)) # Rotation matrix to correct for "true north" R = DoubleEulers(psi=-conv).to_rmat() # calculate ENU coordinates for 100 points in 100m distance nb_points = 100 dist_points = 100 enu_res = np.zeros((nb_points, 2)) enu_res_c = np.zeros((nb_points, 2)) utm_res = np.zeros((nb_points, 2)) for i in range(0, nb_points): utm = UtmCoor_d() utm.north = i * dist_points + utm_origin.north utm.east = i * dist_points+ utm_origin.east utm.alt = utm_origin.alt utm.zone = utm_origin.zone #print(utm) utm_res[i, 0] = utm.east - utm_origin.east utm_res[i, 1] = utm.north - utm_origin.north lla = utm.to_lla() #print(lla) ecef = lla.to_ecef() enu = ecef.to_enu(ltp_origin) enu_res[i, 0] = enu.x enu_res[i, 1] = enu.y enu_c = R * DoubleVect3(enu.x, enu.y, enu.z) enu_res_c[i, 0] = enu_c.x enu_res_c[i, 1] = enu_c.y #print(enu) dist = np.linalg.norm(utm_res, axis=1) error = np.linalg.norm(utm_res - enu_res, axis=1) error_c = np.linalg.norm(utm_res - enu_res_c, axis=1) plt.figure(1) plt.subplot(311) plt.title("utm vs. enu") plt.plot(enu_res[:, 0], enu_res[:, 1], 'g', label="ENU") plt.plot(utm_res[:, 0], utm_res[:, 1], 'r', label="UTM") plt.ylabel("y/north [m]") plt.xlabel("x/east [m]") plt.legend(loc='upper left') plt.subplot(312) plt.plot(dist, error, 'r') plt.xlabel("dist from origin [m]") plt.ylabel("error [m]") plt.subplot(313) plt.plot(dist, error_c, 'r') plt.xlabel("dist from origin [m]") plt.ylabel("error with north fix [m]") plt.show()
gpl-2.0
ethilliez/Image_augmentation
image_augmentation_nolib.py
1
9456
import numpy as np import math from scipy import ndimage, misc from scipy.interpolate import interp1d import re import matplotlib.pyplot as plt from glob import glob from define_parameters import paths, parameters import logging logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) class image_augmentation: def __init__(self): self.data_path = paths.DATA_PATH self.output_path = paths.OUTPUT_PATH self.translation = parameters.TRANSLATION self.rotation = parameters.ROTATION self.shearing = parameters.SHEARING self.contrast = parameters.CONTRAST self.test = False def _read_data(self, file): image = misc.imread(file) return image def mirror_rotate(self, image, direction='horizontal'): if(direction == 'horizontal'): image_mirror = image[:,::-1,:] elif(direction == 'vertical'): image_mirror = image[::-1,:,:] return image_mirror def translate(self, image, shift): image_trans_left = np.append(image[:,int((1-shift)*len(image[0])):,:], image[:,0:int((1-shift)*len(image[0])),:], axis = 1) image_trans_right = np.append(image[:,int(shift*len(image[0])):,:], image[:,0:int(shift*len(image[0])),:], axis = 1) image_trans_down = np.append(image[:int(shift*len(image)),:,:], image[:int((1-shift)*len(image)),:,:], axis = 0) image_trans_up = np.append(image[int(shift*len(image)):,:,:], image[int((1-shift)*len(image)):,:,:], axis = 0) return image_trans_left, image_trans_right, image_trans_up, image_trans_down def rotate(self, image, angle): angle = angle*3.14159/180.0 image_rotated = np.zeros([len(image),len(image[0]),len(image[0][0])]) for chan in range(0,len(image[0][0])): for x in range(0,len(image)): for y in range(0,len(image[0])): # Rotate pixels xpix = x - len(image)/2 ypix = y - len(image[0])/2 newX = int(round(math.cos(angle)*(xpix) + math.sin(angle)*(ypix))) newY = int(round(-math.sin(angle)*(xpix) + math.cos(angle)*(ypix))) if(newX <= 0 and newY < 0): newX2 = newX + int(len(image)/2) newY2 = newY + int(len(image[0])/2) if(newX < 0 and newY >= 0): newX2 = newX + int(len(image)/2) newY2 = newY - int(len(image[0])/2) if(newX >= 0 and newY > 0): newX2 = newX - int(len(image)/2) newY2 = newY - int(len(image[0])/2) if(newX > 0 and newY <= 0): newX2 = newX - int(len(image)/2) newY2 = newY + int(len(image[0])/2) image_rotated[newX2,newY2,chan] = image[x,y,chan] # Fix lost pixels by correcting with previous pixel if(chan == len(image[0][0])-1 and np.array_equal(image_rotated[x,y], np.array([0,0,0])) and np.array_equal(image_rotated[x, y-1], np.array([0,0,0])) == False): image_rotated[x,y] = image_rotated[x, y-1] return image_rotated def shear(self, image, factor, direction='horizontal'): image_shear = np.zeros([len(image),len(image[0]),len(image[0][0])]) for chan in range(0,len(image[0][0])): if(direction == "vertical"): for x in range(0,len(image)): for y in range(0,len(image[0])): newX = int(round(x + factor*y)) newY = y if(newX >= len(image)-1): break image_shear[newX,newY,chan] = image[x,y,chan] elif(direction == "horizontal"): for x in range(0,len(image)): for y in range(0,len(image[0])): newX = x newY = int(round(y + factor*x)) if(newY >= len(image[0])-1): break image_shear[newX,newY,chan] = image[x,y,chan] return image_shear def change_contrast(self, image, factor_gain, factor_bias): image_contrast = np.zeros([len(image),len(image[0]),len(image[0][0])]) for chan in range(0,len(image[0][0])): image_contrast[:,:,chan] = (chan+1)*factor_gain*image[:,:,chan]+factor_bias return image_contrast def resize_image(self, image, npix): resized_image_l = [] # resize x axis for c in range(0,len(image[0][0])): for y in range(0,len(image)): f = interp1d(np.arange(0,len(image[0])), image[y,:,c], kind='cubic') xnew = np.linspace(0, len(image[0])-1, num=npix) if(y==0): resized_image_c = f(xnew) else: resized_image_c = np.vstack((resized_image_c,f(xnew))) resized_image_l.append(resized_image_c) resized_image_a = np.dstack([resized_image_l[0],resized_image_l[1],resized_image_l[2]]) # resize y axis resized_image_l = [] for c in range(0,len(image[0][0])): for x in range(0,len(resized_image_a[0])): f = interp1d(np.arange(0,len(resized_image_a)), resized_image_a[:,x,c], kind='cubic') xnew = np.linspace(0, len(resized_image_a)-1, num=npix) if(x==0): resized_image_c = f(xnew) else: resized_image_c = np.vstack((resized_image_c,f(xnew))) resized_image_l.append(resized_image_c) resized_image = np.dstack([resized_image_l[0],resized_image_l[1],resized_image_l[2]]) # Fix image orientation for i in range(0,3): resized_image=np.rot90(resized_image) return resized_image def save_image(self, image, ori_file, suffixe): # Use Regular Expression to get the name of the Data folder count_slash = ori_file.count('/') pattern="" for i in range(count_slash-1): pattern=pattern+".*/" pattern=pattern+"(.*?)."+ori_file[-3] # Save the image using the Data folder as name for i in range(0,len(image)): misc.imsave(self.output_path+re.search(pattern, ori_file).group(1) +"_"+suffixe[i]+".jpg",image[i]) def plot_image(self, image): plt.imshow(image) plt.show() def perform_augmentation(self,npix): # List all images within folder filelist = glob(self.data_path+'*[a-zA-Z0-9].*') logger.info(("All images to be augmented are: ", filelist)) #-- # Set size for all images logger.info(("Image size: ",npix)) #-- # Tranformation for each image for file in filelist: logger.info(("Performing transformation for image: ", file)) # Read image and resize it image = self._read_data(file) image = self.resize_image(image, npix) self.save_image([image],file,["z"]) # Perform mirror rotation and save image_mirror = self.mirror_rotate(image) self.save_image([image_mirror],file,["a"]) # Perform translation on original image and save image_trans_left, image_trans_right, image_trans_up, image_trans_down = self.translate(image, self.translation) self.save_image([image_trans_left, image_trans_right, image_trans_up, image_trans_down],file,["b","c","d","e"]) # Perform translation on mirror image and save image_trans_left, image_trans_right, image_trans_up, image_trans_down = self.translate(image_mirror, self.translation) self.save_image([image_trans_left, image_trans_right, image_trans_up, image_trans_down],file,["f","g","h","i"]) # Perform rotation on original image and save image_rotated = self.rotate(image, self.rotation) self.save_image([image_rotated],file,["j"]) # Perform rotation on mirror image and save image_rotated = self.rotate(image_mirror, self.rotation) self.save_image([image_rotated],file,["k"]) # Perform shearing on original image and save image_shear = self.shear(image, self.shearing) self.save_image([image_shear],file,["l"]) # Perform shearing on mirror image and save image_shear = self.shear(image_mirror, self.shearing) self.save_image([image_shear],file,["m"]) # Perform change contrast on original image and save image_contrast = self.change_contrast(image, self.contrast[0], self.contrast[1]) self.save_image([image_contrast],file,["n"]) # Perform change contrast on mirror image and save image_contrast = self.change_contrast(image_mirror, self.contrast[0], self.contrast[1]) self.save_image([image_contrast],file,["o"]) # Limit to the first image for testing if(self.test): logger.info("Stopping after the first image.") exit() if __name__ == '__main__': process = image_augmentation() process.perform_augmentation(npix = parameters.SIZE_IMAGE)
mit
astocko/statsmodels
statsmodels/sandbox/examples/ex_cusum.py
33
3219
# -*- coding: utf-8 -*- """ Created on Fri Apr 02 11:41:25 2010 Author: josef-pktd """ import numpy as np from scipy import stats from numpy.testing import assert_almost_equal import statsmodels.api as sm from statsmodels.sandbox.regression.onewaygls import OneWayLS from statsmodels.stats.diagnostic import recursive_olsresiduals from statsmodels.sandbox.stats.diagnostic import _recursive_olsresiduals2 as recursive_olsresiduals2 #examples from ex_onewaygls.py #choose example #-------------- example = ['null', 'smalldiff', 'mediumdiff', 'largediff'][1] example_size = [20, 100][1] example_groups = ['2', '2-2'][1] #'2-2': 4 groups, # groups 0 and 1 and groups 2 and 3 have identical parameters in DGP #generate example #---------------- #np.random.seed(87654589) nobs = example_size x1 = 0.1+np.random.randn(nobs) y1 = 10 + 15*x1 + 2*np.random.randn(nobs) x1 = sm.add_constant(x1, prepend=False) #assert_almost_equal(x1, np.vander(x1[:,0],2), 16) #res1 = sm.OLS(y1, x1).fit() #print res1.params #print np.polyfit(x1[:,0], y1, 1) #assert_almost_equal(res1.params, np.polyfit(x1[:,0], y1, 1), 14) #print res1.summary(xname=['x1','const1']) #regression 2 x2 = 0.1+np.random.randn(nobs) if example == 'null': y2 = 10 + 15*x2 + 2*np.random.randn(nobs) # if H0 is true elif example == 'smalldiff': y2 = 11 + 16*x2 + 2*np.random.randn(nobs) elif example == 'mediumdiff': y2 = 12 + 16*x2 + 2*np.random.randn(nobs) else: y2 = 19 + 17*x2 + 2*np.random.randn(nobs) x2 = sm.add_constant(x2, prepend=False) # stack x = np.concatenate((x1,x2),0) y = np.concatenate((y1,y2)) if example_groups == '2': groupind = (np.arange(2*nobs)>nobs-1).astype(int) else: groupind = np.mod(np.arange(2*nobs),4) groupind.sort() #x = np.column_stack((x,x*groupind[:,None])) res1 = sm.OLS(y, x).fit() skip = 8 rresid, rparams, rypred, rresid_standardized, rresid_scaled, rcusum, rcusumci = \ recursive_olsresiduals(res1, skip) print(rcusum) print(rresid_scaled[skip-1:]) assert_almost_equal(rparams[-1], res1.params) import matplotlib.pyplot as plt plt.plot(rcusum) plt.plot(rcusumci[0]) plt.plot(rcusumci[1]) plt.figure() plt.plot(rresid) plt.plot(np.abs(rresid)) print('cusum test reject:') print(((rcusum[1:]>rcusumci[1])|(rcusum[1:]<rcusumci[0])).any()) rresid2, rparams2, rypred2, rresid_standardized2, rresid_scaled2, rcusum2, rcusumci2 = \ recursive_olsresiduals2(res1, skip) #assert_almost_equal(rparams[skip+1:], rparams2[skip:-1],13) assert_almost_equal(rparams[skip:], rparams2[skip:],13) #np.c_[rparams[skip+1:], rparams2[skip:-1]] #plt.show() #################### Example break test #import statsmodels.sandbox.tools.stattools from statsmodels.sandbox.stats.diagnostic import breaks_hansen, \ breaks_cusumolsresid#, breaks_cusum H, crit95, ft, s = breaks_hansen(res1) print(H) print(crit95) supb, pval, crit = breaks_cusumolsresid(res1.resid) print(supb, pval, crit) ##check whether this works directly: Ploberger/Kramer framing of standard cusum ##no, it's different, there is another denominator #print breaks_cusumolsresid(rresid[skip:]) #this function is still completely wrong, cut and paste doesn't apply #print breaks_cusum(rresid[skip:])
bsd-3-clause
probml/pyprobml
scripts/celeba_tfds.py
1
6881
import numpy as np import matplotlib as mpl import matplotlib.pyplot as plt import os figdir = "../figures" def save_fig(fname): plt.tight_layout() plt.savefig(os.path.join(figdir, fname)) import tensorflow as tf from tensorflow import keras import tensorflow_datasets as tfds #dataname = 'cifar10' # https://www.tensorflow.org/datasets/catalog/cifar10 dataname = 'celeb_a' # 1.3GB # Useful pre-processing functions #https://github.com/google/compare_gan/blob/master/compare_gan/datasets.py def preprocess_celeba_tf(features, H=64, W=64, crop=True): # Crop, resize and scale to [0,1] # If input is not square, and we resize to a square, we will # get distortions. So better to take a square crop first.. img = features["image"] if crop: img = tf.image.resize_with_crop_or_pad(img, 160, 160) img = tf.image.resize(img, [H, W]) img = tf.cast(img, tf.float32) / 255.0 img = img.numpy() return img tfds.disable_progress_bar() datasets, datasets_info = tfds.load(name=dataname, with_info=True, as_supervised=False) print(datasets_info) ''' tfds.core.DatasetInfo( name='celeb_a', version=0.3.0, description='CelebFaces Attributes Dataset (CelebA) is a large-scale face attributes dataset with more than 200K celebrity images, each with 40 attribute annotations. The images in this dataset cover large pose variations and background clutter. CelebA has large diversities, large quantities, and rich annotations, including - 10,177 number of identities, - 202,599 number of face images, and - 5 landmark locations, 40 binary attributes annotations per image. The dataset can be employed as the training and test sets for the following computer vision tasks: face attribute recognition, face detection, and landmark (or facial part) localization. ', urls=['http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html'], features=FeaturesDict({ 'attributes': FeaturesDict({ '5_o_Clock_Shadow': Tensor(shape=(), dtype=tf.bool), 'Arched_Eyebrows': Tensor(shape=(), dtype=tf.bool), 'Attractive': Tensor(shape=(), dtype=tf.bool), 'Bags_Under_Eyes': Tensor(shape=(), dtype=tf.bool), 'Bald': Tensor(shape=(), dtype=tf.bool), 'Bangs': Tensor(shape=(), dtype=tf.bool), 'Big_Lips': Tensor(shape=(), dtype=tf.bool), 'Big_Nose': Tensor(shape=(), dtype=tf.bool), 'Black_Hair': Tensor(shape=(), dtype=tf.bool), 'Blond_Hair': Tensor(shape=(), dtype=tf.bool), 'Blurry': Tensor(shape=(), dtype=tf.bool), 'Brown_Hair': Tensor(shape=(), dtype=tf.bool), 'Bushy_Eyebrows': Tensor(shape=(), dtype=tf.bool), 'Chubby': Tensor(shape=(), dtype=tf.bool), 'Double_Chin': Tensor(shape=(), dtype=tf.bool), 'Eyeglasses': Tensor(shape=(), dtype=tf.bool), 'Goatee': Tensor(shape=(), dtype=tf.bool), 'Gray_Hair': Tensor(shape=(), dtype=tf.bool), 'Heavy_Makeup': Tensor(shape=(), dtype=tf.bool), 'High_Cheekbones': Tensor(shape=(), dtype=tf.bool), 'Male': Tensor(shape=(), dtype=tf.bool), 'Mouth_Slightly_Open': Tensor(shape=(), dtype=tf.bool), 'Mustache': Tensor(shape=(), dtype=tf.bool), 'Narrow_Eyes': Tensor(shape=(), dtype=tf.bool), 'No_Beard': Tensor(shape=(), dtype=tf.bool), 'Oval_Face': Tensor(shape=(), dtype=tf.bool), 'Pale_Skin': Tensor(shape=(), dtype=tf.bool), 'Pointy_Nose': Tensor(shape=(), dtype=tf.bool), 'Receding_Hairline': Tensor(shape=(), dtype=tf.bool), 'Rosy_Cheeks': Tensor(shape=(), dtype=tf.bool), 'Sideburns': Tensor(shape=(), dtype=tf.bool), 'Smiling': Tensor(shape=(), dtype=tf.bool), 'Straight_Hair': Tensor(shape=(), dtype=tf.bool), 'Wavy_Hair': Tensor(shape=(), dtype=tf.bool), 'Wearing_Earrings': Tensor(shape=(), dtype=tf.bool), 'Wearing_Hat': Tensor(shape=(), dtype=tf.bool), 'Wearing_Lipstick': Tensor(shape=(), dtype=tf.bool), 'Wearing_Necklace': Tensor(shape=(), dtype=tf.bool), 'Wearing_Necktie': Tensor(shape=(), dtype=tf.bool), 'Young': Tensor(shape=(), dtype=tf.bool), }), 'image': Image(shape=(218, 178, 3), dtype=tf.uint8), 'landmarks': FeaturesDict({ 'lefteye_x': Tensor(shape=(), dtype=tf.int64), 'lefteye_y': Tensor(shape=(), dtype=tf.int64), 'leftmouth_x': Tensor(shape=(), dtype=tf.int64), 'leftmouth_y': Tensor(shape=(), dtype=tf.int64), 'nose_x': Tensor(shape=(), dtype=tf.int64), 'nose_y': Tensor(shape=(), dtype=tf.int64), 'righteye_x': Tensor(shape=(), dtype=tf.int64), 'righteye_y': Tensor(shape=(), dtype=tf.int64), 'rightmouth_x': Tensor(shape=(), dtype=tf.int64), 'rightmouth_y': Tensor(shape=(), dtype=tf.int64), }), }), total_num_examples=202599, splits={ 'test': 19962, 'train': 162770, 'validation': 19867, }, supervised_keys=None, citation="""@inproceedings{conf/iccv/LiuLWT15, added-at = {2018-10-09T00:00:00.000+0200}, author = {Liu, Ziwei and Luo, Ping and Wang, Xiaogang and Tang, Xiaoou}, biburl = {https://www.bibsonomy.org/bibtex/250e4959be61db325d2f02c1d8cd7bfbb/dblp}, booktitle = {ICCV}, crossref = {conf/iccv/2015}, ee = {http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.425}, interhash = {3f735aaa11957e73914bbe2ca9d5e702}, intrahash = {50e4959be61db325d2f02c1d8cd7bfbb}, isbn = {978-1-4673-8391-2}, keywords = {dblp}, pages = {3730-3738}, publisher = {IEEE Computer Society}, timestamp = {2018-10-11T11:43:28.000+0200}, title = {Deep Learning Face Attributes in the Wild.}, url = {http://dblp.uni-trier.de/db/conf/iccv/iccv2015.html#LiuLWT15}, year = 2015 }""", redistribution_info=, ) ''' input_shape = datasets_info.features['image'].shape print(input_shape) # (218, 178, 3) #H, W, C = input_shape H = 64; W = 64; C = 3 nvalid = 19867 attr_names = datasets_info.features['attributes'].keys() names = list(attr_names) names.append('imgnum') import pandas as pd val_dataset = datasets['validation'] df = pd.DataFrame(columns=names) i = 0 N = 2 images = np.zeros((N, H, W, C)) for sample in val_dataset: #print(sample) #img = sample['image'] img = preprocess_celeba_tf(sample, H=H, W=W, crop=True) attr = sample['attributes'] d = {'imgnum': i} for k in attr_names: v = attr[k].numpy() d[k] = v df = df.append(d, ignore_index=True) images[i] = img if i >= N: break i += 1
mit
zorojean/scikit-learn
sklearn/cluster/birch.py
207
22706
# Authors: Manoj Kumar <[email protected]> # Alexandre Gramfort <[email protected]> # Joel Nothman <[email protected]> # License: BSD 3 clause from __future__ import division import warnings import numpy as np from scipy import sparse from math import sqrt from ..metrics.pairwise import euclidean_distances from ..base import TransformerMixin, ClusterMixin, BaseEstimator from ..externals.six.moves import xrange from ..utils import check_array from ..utils.extmath import row_norms, safe_sparse_dot from ..utils.validation import NotFittedError, check_is_fitted from .hierarchical import AgglomerativeClustering def _iterate_sparse_X(X): """This little hack returns a densified row when iterating over a sparse matrix, insted of constructing a sparse matrix for every row that is expensive. """ n_samples = X.shape[0] X_indices = X.indices X_data = X.data X_indptr = X.indptr for i in xrange(n_samples): row = np.zeros(X.shape[1]) startptr, endptr = X_indptr[i], X_indptr[i + 1] nonzero_indices = X_indices[startptr:endptr] row[nonzero_indices] = X_data[startptr:endptr] yield row def _split_node(node, threshold, branching_factor): """The node has to be split if there is no place for a new subcluster in the node. 1. Two empty nodes and two empty subclusters are initialized. 2. The pair of distant subclusters are found. 3. The properties of the empty subclusters and nodes are updated according to the nearest distance between the subclusters to the pair of distant subclusters. 4. The two nodes are set as children to the two subclusters. """ new_subcluster1 = _CFSubcluster() new_subcluster2 = _CFSubcluster() new_node1 = _CFNode( threshold, branching_factor, is_leaf=node.is_leaf, n_features=node.n_features) new_node2 = _CFNode( threshold, branching_factor, is_leaf=node.is_leaf, n_features=node.n_features) new_subcluster1.child_ = new_node1 new_subcluster2.child_ = new_node2 if node.is_leaf: if node.prev_leaf_ is not None: node.prev_leaf_.next_leaf_ = new_node1 new_node1.prev_leaf_ = node.prev_leaf_ new_node1.next_leaf_ = new_node2 new_node2.prev_leaf_ = new_node1 new_node2.next_leaf_ = node.next_leaf_ if node.next_leaf_ is not None: node.next_leaf_.prev_leaf_ = new_node2 dist = euclidean_distances( node.centroids_, Y_norm_squared=node.squared_norm_, squared=True) n_clusters = dist.shape[0] farthest_idx = np.unravel_index( dist.argmax(), (n_clusters, n_clusters)) node1_dist, node2_dist = dist[[farthest_idx]] node1_closer = node1_dist < node2_dist for idx, subcluster in enumerate(node.subclusters_): if node1_closer[idx]: new_node1.append_subcluster(subcluster) new_subcluster1.update(subcluster) else: new_node2.append_subcluster(subcluster) new_subcluster2.update(subcluster) return new_subcluster1, new_subcluster2 class _CFNode(object): """Each node in a CFTree is called a CFNode. The CFNode can have a maximum of branching_factor number of CFSubclusters. Parameters ---------- threshold : float Threshold needed for a new subcluster to enter a CFSubcluster. branching_factor : int Maximum number of CF subclusters in each node. is_leaf : bool We need to know if the CFNode is a leaf or not, in order to retrieve the final subclusters. n_features : int The number of features. Attributes ---------- subclusters_ : array-like list of subclusters for a particular CFNode. prev_leaf_ : _CFNode prev_leaf. Useful only if is_leaf is True. next_leaf_ : _CFNode next_leaf. Useful only if is_leaf is True. the final subclusters. init_centroids_ : ndarray, shape (branching_factor + 1, n_features) manipulate ``init_centroids_`` throughout rather than centroids_ since the centroids are just a view of the ``init_centroids_`` . init_sq_norm_ : ndarray, shape (branching_factor + 1,) manipulate init_sq_norm_ throughout. similar to ``init_centroids_``. centroids_ : ndarray view of ``init_centroids_``. squared_norm_ : ndarray view of ``init_sq_norm_``. """ def __init__(self, threshold, branching_factor, is_leaf, n_features): self.threshold = threshold self.branching_factor = branching_factor self.is_leaf = is_leaf self.n_features = n_features # The list of subclusters, centroids and squared norms # to manipulate throughout. self.subclusters_ = [] self.init_centroids_ = np.zeros((branching_factor + 1, n_features)) self.init_sq_norm_ = np.zeros((branching_factor + 1)) self.squared_norm_ = [] self.prev_leaf_ = None self.next_leaf_ = None def append_subcluster(self, subcluster): n_samples = len(self.subclusters_) self.subclusters_.append(subcluster) self.init_centroids_[n_samples] = subcluster.centroid_ self.init_sq_norm_[n_samples] = subcluster.sq_norm_ # Keep centroids and squared norm as views. In this way # if we change init_centroids and init_sq_norm_, it is # sufficient, self.centroids_ = self.init_centroids_[:n_samples + 1, :] self.squared_norm_ = self.init_sq_norm_[:n_samples + 1] def update_split_subclusters(self, subcluster, new_subcluster1, new_subcluster2): """Remove a subcluster from a node and update it with the split subclusters. """ ind = self.subclusters_.index(subcluster) self.subclusters_[ind] = new_subcluster1 self.init_centroids_[ind] = new_subcluster1.centroid_ self.init_sq_norm_[ind] = new_subcluster1.sq_norm_ self.append_subcluster(new_subcluster2) def insert_cf_subcluster(self, subcluster): """Insert a new subcluster into the node.""" if not self.subclusters_: self.append_subcluster(subcluster) return False threshold = self.threshold branching_factor = self.branching_factor # We need to find the closest subcluster among all the # subclusters so that we can insert our new subcluster. dist_matrix = np.dot(self.centroids_, subcluster.centroid_) dist_matrix *= -2. dist_matrix += self.squared_norm_ closest_index = np.argmin(dist_matrix) closest_subcluster = self.subclusters_[closest_index] # If the subcluster has a child, we need a recursive strategy. if closest_subcluster.child_ is not None: split_child = closest_subcluster.child_.insert_cf_subcluster( subcluster) if not split_child: # If it is determined that the child need not be split, we # can just update the closest_subcluster closest_subcluster.update(subcluster) self.init_centroids_[closest_index] = \ self.subclusters_[closest_index].centroid_ self.init_sq_norm_[closest_index] = \ self.subclusters_[closest_index].sq_norm_ return False # things not too good. we need to redistribute the subclusters in # our child node, and add a new subcluster in the parent # subcluster to accomodate the new child. else: new_subcluster1, new_subcluster2 = _split_node( closest_subcluster.child_, threshold, branching_factor) self.update_split_subclusters( closest_subcluster, new_subcluster1, new_subcluster2) if len(self.subclusters_) > self.branching_factor: return True return False # good to go! else: merged = closest_subcluster.merge_subcluster( subcluster, self.threshold) if merged: self.init_centroids_[closest_index] = \ closest_subcluster.centroid_ self.init_sq_norm_[closest_index] = \ closest_subcluster.sq_norm_ return False # not close to any other subclusters, and we still # have space, so add. elif len(self.subclusters_) < self.branching_factor: self.append_subcluster(subcluster) return False # We do not have enough space nor is it closer to an # other subcluster. We need to split. else: self.append_subcluster(subcluster) return True class _CFSubcluster(object): """Each subcluster in a CFNode is called a CFSubcluster. A CFSubcluster can have a CFNode has its child. Parameters ---------- linear_sum : ndarray, shape (n_features,), optional Sample. This is kept optional to allow initialization of empty subclusters. Attributes ---------- n_samples_ : int Number of samples that belong to each subcluster. linear_sum_ : ndarray Linear sum of all the samples in a subcluster. Prevents holding all sample data in memory. squared_sum_ : float Sum of the squared l2 norms of all samples belonging to a subcluster. centroid_ : ndarray Centroid of the subcluster. Prevent recomputing of centroids when ``CFNode.centroids_`` is called. child_ : _CFNode Child Node of the subcluster. Once a given _CFNode is set as the child of the _CFNode, it is set to ``self.child_``. sq_norm_ : ndarray Squared norm of the subcluster. Used to prevent recomputing when pairwise minimum distances are computed. """ def __init__(self, linear_sum=None): if linear_sum is None: self.n_samples_ = 0 self.squared_sum_ = 0.0 self.linear_sum_ = 0 else: self.n_samples_ = 1 self.centroid_ = self.linear_sum_ = linear_sum self.squared_sum_ = self.sq_norm_ = np.dot( self.linear_sum_, self.linear_sum_) self.child_ = None def update(self, subcluster): self.n_samples_ += subcluster.n_samples_ self.linear_sum_ += subcluster.linear_sum_ self.squared_sum_ += subcluster.squared_sum_ self.centroid_ = self.linear_sum_ / self.n_samples_ self.sq_norm_ = np.dot(self.centroid_, self.centroid_) def merge_subcluster(self, nominee_cluster, threshold): """Check if a cluster is worthy enough to be merged. If yes then merge. """ new_ss = self.squared_sum_ + nominee_cluster.squared_sum_ new_ls = self.linear_sum_ + nominee_cluster.linear_sum_ new_n = self.n_samples_ + nominee_cluster.n_samples_ new_centroid = (1 / new_n) * new_ls new_norm = np.dot(new_centroid, new_centroid) dot_product = (-2 * new_n) * new_norm sq_radius = (new_ss + dot_product) / new_n + new_norm if sq_radius <= threshold ** 2: (self.n_samples_, self.linear_sum_, self.squared_sum_, self.centroid_, self.sq_norm_) = \ new_n, new_ls, new_ss, new_centroid, new_norm return True return False @property def radius(self): """Return radius of the subcluster""" dot_product = -2 * np.dot(self.linear_sum_, self.centroid_) return sqrt( ((self.squared_sum_ + dot_product) / self.n_samples_) + self.sq_norm_) class Birch(BaseEstimator, TransformerMixin, ClusterMixin): """Implements the Birch clustering algorithm. Every new sample is inserted into the root of the Clustering Feature Tree. It is then clubbed together with the subcluster that has the centroid closest to the new sample. This is done recursively till it ends up at the subcluster of the leaf of the tree has the closest centroid. Read more in the :ref:`User Guide <birch>`. Parameters ---------- threshold : float, default 0.5 The radius of the subcluster obtained by merging a new sample and the closest subcluster should be lesser than the threshold. Otherwise a new subcluster is started. branching_factor : int, default 50 Maximum number of CF subclusters in each node. If a new samples enters such that the number of subclusters exceed the branching_factor then the node has to be split. The corresponding parent also has to be split and if the number of subclusters in the parent is greater than the branching factor, then it has to be split recursively. n_clusters : int, instance of sklearn.cluster model, default None Number of clusters after the final clustering step, which treats the subclusters from the leaves as new samples. By default, this final clustering step is not performed and the subclusters are returned as they are. If a model is provided, the model is fit treating the subclusters as new samples and the initial data is mapped to the label of the closest subcluster. If an int is provided, the model fit is AgglomerativeClustering with n_clusters set to the int. compute_labels : bool, default True Whether or not to compute labels for each fit. copy : bool, default True Whether or not to make a copy of the given data. If set to False, the initial data will be overwritten. Attributes ---------- root_ : _CFNode Root of the CFTree. dummy_leaf_ : _CFNode Start pointer to all the leaves. subcluster_centers_ : ndarray, Centroids of all subclusters read directly from the leaves. subcluster_labels_ : ndarray, Labels assigned to the centroids of the subclusters after they are clustered globally. labels_ : ndarray, shape (n_samples,) Array of labels assigned to the input data. if partial_fit is used instead of fit, they are assigned to the last batch of data. Examples -------- >>> from sklearn.cluster import Birch >>> X = [[0, 1], [0.3, 1], [-0.3, 1], [0, -1], [0.3, -1], [-0.3, -1]] >>> brc = Birch(branching_factor=50, n_clusters=None, threshold=0.5, ... compute_labels=True) >>> brc.fit(X) Birch(branching_factor=50, compute_labels=True, copy=True, n_clusters=None, threshold=0.5) >>> brc.predict(X) array([0, 0, 0, 1, 1, 1]) References ---------- * Tian Zhang, Raghu Ramakrishnan, Maron Livny BIRCH: An efficient data clustering method for large databases. http://www.cs.sfu.ca/CourseCentral/459/han/papers/zhang96.pdf * Roberto Perdisci JBirch - Java implementation of BIRCH clustering algorithm https://code.google.com/p/jbirch/ """ def __init__(self, threshold=0.5, branching_factor=50, n_clusters=3, compute_labels=True, copy=True): self.threshold = threshold self.branching_factor = branching_factor self.n_clusters = n_clusters self.compute_labels = compute_labels self.copy = copy def fit(self, X, y=None): """ Build a CF Tree for the input data. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Input data. """ self.fit_, self.partial_fit_ = True, False return self._fit(X) def _fit(self, X): X = check_array(X, accept_sparse='csr', copy=self.copy) threshold = self.threshold branching_factor = self.branching_factor if branching_factor <= 1: raise ValueError("Branching_factor should be greater than one.") n_samples, n_features = X.shape # If partial_fit is called for the first time or fit is called, we # start a new tree. partial_fit = getattr(self, 'partial_fit_') has_root = getattr(self, 'root_', None) if getattr(self, 'fit_') or (partial_fit and not has_root): # The first root is the leaf. Manipulate this object throughout. self.root_ = _CFNode(threshold, branching_factor, is_leaf=True, n_features=n_features) # To enable getting back subclusters. self.dummy_leaf_ = _CFNode(threshold, branching_factor, is_leaf=True, n_features=n_features) self.dummy_leaf_.next_leaf_ = self.root_ self.root_.prev_leaf_ = self.dummy_leaf_ # Cannot vectorize. Enough to convince to use cython. if not sparse.issparse(X): iter_func = iter else: iter_func = _iterate_sparse_X for sample in iter_func(X): subcluster = _CFSubcluster(linear_sum=sample) split = self.root_.insert_cf_subcluster(subcluster) if split: new_subcluster1, new_subcluster2 = _split_node( self.root_, threshold, branching_factor) del self.root_ self.root_ = _CFNode(threshold, branching_factor, is_leaf=False, n_features=n_features) self.root_.append_subcluster(new_subcluster1) self.root_.append_subcluster(new_subcluster2) centroids = np.concatenate([ leaf.centroids_ for leaf in self._get_leaves()]) self.subcluster_centers_ = centroids self._global_clustering(X) return self def _get_leaves(self): """ Retrieve the leaves of the CF Node. Returns ------- leaves: array-like List of the leaf nodes. """ leaf_ptr = self.dummy_leaf_.next_leaf_ leaves = [] while leaf_ptr is not None: leaves.append(leaf_ptr) leaf_ptr = leaf_ptr.next_leaf_ return leaves def partial_fit(self, X=None, y=None): """ Online learning. Prevents rebuilding of CFTree from scratch. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features), None Input data. If X is not provided, only the global clustering step is done. """ self.partial_fit_, self.fit_ = True, False if X is None: # Perform just the final global clustering step. self._global_clustering() return self else: self._check_fit(X) return self._fit(X) def _check_fit(self, X): is_fitted = hasattr(self, 'subcluster_centers_') # Called by partial_fit, before fitting. has_partial_fit = hasattr(self, 'partial_fit_') # Should raise an error if one does not fit before predicting. if not (is_fitted or has_partial_fit): raise NotFittedError("Fit training data before predicting") if is_fitted and X.shape[1] != self.subcluster_centers_.shape[1]: raise ValueError( "Training data and predicted data do " "not have same number of features.") def predict(self, X): """ Predict data using the ``centroids_`` of subclusters. Avoid computation of the row norms of X. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Input data. Returns ------- labels: ndarray, shape(n_samples) Labelled data. """ X = check_array(X, accept_sparse='csr') self._check_fit(X) reduced_distance = safe_sparse_dot(X, self.subcluster_centers_.T) reduced_distance *= -2 reduced_distance += self._subcluster_norms return self.subcluster_labels_[np.argmin(reduced_distance, axis=1)] def transform(self, X, y=None): """ Transform X into subcluster centroids dimension. Each dimension represents the distance from the sample point to each cluster centroid. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Input data. Returns ------- X_trans : {array-like, sparse matrix}, shape (n_samples, n_clusters) Transformed data. """ check_is_fitted(self, 'subcluster_centers_') return euclidean_distances(X, self.subcluster_centers_) def _global_clustering(self, X=None): """ Global clustering for the subclusters obtained after fitting """ clusterer = self.n_clusters centroids = self.subcluster_centers_ compute_labels = (X is not None) and self.compute_labels # Preprocessing for the global clustering. not_enough_centroids = False if isinstance(clusterer, int): clusterer = AgglomerativeClustering( n_clusters=self.n_clusters) # There is no need to perform the global clustering step. if len(centroids) < self.n_clusters: not_enough_centroids = True elif (clusterer is not None and not hasattr(clusterer, 'fit_predict')): raise ValueError("n_clusters should be an instance of " "ClusterMixin or an int") # To use in predict to avoid recalculation. self._subcluster_norms = row_norms( self.subcluster_centers_, squared=True) if clusterer is None or not_enough_centroids: self.subcluster_labels_ = np.arange(len(centroids)) if not_enough_centroids: warnings.warn( "Number of subclusters found (%d) by Birch is less " "than (%d). Decrease the threshold." % (len(centroids), self.n_clusters)) else: # The global clustering step that clusters the subclusters of # the leaves. It assumes the centroids of the subclusters as # samples and finds the final centroids. self.subcluster_labels_ = clusterer.fit_predict( self.subcluster_centers_) if compute_labels: self.labels_ = self.predict(X)
bsd-3-clause
asnorkin/sentiment_analysis
site/lib/python2.7/site-packages/sklearn/linear_model/bayes.py
50
16145
""" Various bayesian regression """ from __future__ import print_function # Authors: V. Michel, F. Pedregosa, A. Gramfort # License: BSD 3 clause from math import log import numpy as np from scipy import linalg from .base import LinearModel from ..base import RegressorMixin from ..utils.extmath import fast_logdet, pinvh from ..utils import check_X_y ############################################################################### # BayesianRidge regression class BayesianRidge(LinearModel, RegressorMixin): """Bayesian ridge regression Fit a Bayesian ridge model and optimize the regularization parameters lambda (precision of the weights) and alpha (precision of the noise). Read more in the :ref:`User Guide <bayesian_regression>`. Parameters ---------- n_iter : int, optional Maximum number of iterations. Default is 300. tol : float, optional Stop the algorithm if w has converged. Default is 1.e-3. alpha_1 : float, optional Hyper-parameter : shape parameter for the Gamma distribution prior over the alpha parameter. Default is 1.e-6 alpha_2 : float, optional Hyper-parameter : inverse scale parameter (rate parameter) for the Gamma distribution prior over the alpha parameter. Default is 1.e-6. lambda_1 : float, optional Hyper-parameter : shape parameter for the Gamma distribution prior over the lambda parameter. Default is 1.e-6. lambda_2 : float, optional Hyper-parameter : inverse scale parameter (rate parameter) for the Gamma distribution prior over the lambda parameter. Default is 1.e-6 compute_score : boolean, optional If True, compute the objective function at each step of the model. Default is False fit_intercept : boolean, optional whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). Default is True. normalize : boolean, optional, default False If True, the regressors X will be normalized before regression. This parameter is ignored when `fit_intercept` is set to False. When the regressors are normalized, note that this makes the hyperparameters learnt more robust and almost independent of the number of samples. The same property is not valid for standardized data. However, if you wish to standardize, please use `preprocessing.StandardScaler` before calling `fit` on an estimator with `normalize=False`. copy_X : boolean, optional, default True If True, X will be copied; else, it may be overwritten. verbose : boolean, optional, default False Verbose mode when fitting the model. Attributes ---------- coef_ : array, shape = (n_features) Coefficients of the regression model (mean of distribution) alpha_ : float estimated precision of the noise. lambda_ : float estimated precision of the weights. scores_ : float if computed, value of the objective function (to be maximized) Examples -------- >>> from sklearn import linear_model >>> clf = linear_model.BayesianRidge() >>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2]) ... # doctest: +NORMALIZE_WHITESPACE BayesianRidge(alpha_1=1e-06, alpha_2=1e-06, compute_score=False, copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06, n_iter=300, normalize=False, tol=0.001, verbose=False) >>> clf.predict([[1, 1]]) array([ 1.]) Notes ----- See examples/linear_model/plot_bayesian_ridge.py for an example. """ def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6, lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False, fit_intercept=True, normalize=False, copy_X=True, verbose=False): self.n_iter = n_iter self.tol = tol self.alpha_1 = alpha_1 self.alpha_2 = alpha_2 self.lambda_1 = lambda_1 self.lambda_2 = lambda_2 self.compute_score = compute_score self.fit_intercept = fit_intercept self.normalize = normalize self.copy_X = copy_X self.verbose = verbose def fit(self, X, y): """Fit the model Parameters ---------- X : numpy array of shape [n_samples,n_features] Training data y : numpy array of shape [n_samples] Target values Returns ------- self : returns an instance of self. """ X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True) X, y, X_offset, y_offset, X_scale = self._preprocess_data( X, y, self.fit_intercept, self.normalize, self.copy_X) n_samples, n_features = X.shape # Initialization of the values of the parameters alpha_ = 1. / np.var(y) lambda_ = 1. verbose = self.verbose lambda_1 = self.lambda_1 lambda_2 = self.lambda_2 alpha_1 = self.alpha_1 alpha_2 = self.alpha_2 self.scores_ = list() coef_old_ = None XT_y = np.dot(X.T, y) U, S, Vh = linalg.svd(X, full_matrices=False) eigen_vals_ = S ** 2 # Convergence loop of the bayesian ridge regression for iter_ in range(self.n_iter): # Compute mu and sigma # sigma_ = lambda_ / alpha_ * np.eye(n_features) + np.dot(X.T, X) # coef_ = sigma_^-1 * XT * y if n_samples > n_features: coef_ = np.dot(Vh.T, Vh / (eigen_vals_ + lambda_ / alpha_)[:, None]) coef_ = np.dot(coef_, XT_y) if self.compute_score: logdet_sigma_ = - np.sum( np.log(lambda_ + alpha_ * eigen_vals_)) else: coef_ = np.dot(X.T, np.dot( U / (eigen_vals_ + lambda_ / alpha_)[None, :], U.T)) coef_ = np.dot(coef_, y) if self.compute_score: logdet_sigma_ = lambda_ * np.ones(n_features) logdet_sigma_[:n_samples] += alpha_ * eigen_vals_ logdet_sigma_ = - np.sum(np.log(logdet_sigma_)) # Update alpha and lambda rmse_ = np.sum((y - np.dot(X, coef_)) ** 2) gamma_ = (np.sum((alpha_ * eigen_vals_) / (lambda_ + alpha_ * eigen_vals_))) lambda_ = ((gamma_ + 2 * lambda_1) / (np.sum(coef_ ** 2) + 2 * lambda_2)) alpha_ = ((n_samples - gamma_ + 2 * alpha_1) / (rmse_ + 2 * alpha_2)) # Compute the objective function if self.compute_score: s = lambda_1 * log(lambda_) - lambda_2 * lambda_ s += alpha_1 * log(alpha_) - alpha_2 * alpha_ s += 0.5 * (n_features * log(lambda_) + n_samples * log(alpha_) - alpha_ * rmse_ - (lambda_ * np.sum(coef_ ** 2)) - logdet_sigma_ - n_samples * log(2 * np.pi)) self.scores_.append(s) # Check for convergence if iter_ != 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol: if verbose: print("Convergence after ", str(iter_), " iterations") break coef_old_ = np.copy(coef_) self.alpha_ = alpha_ self.lambda_ = lambda_ self.coef_ = coef_ self._set_intercept(X_offset, y_offset, X_scale) return self ############################################################################### # ARD (Automatic Relevance Determination) regression class ARDRegression(LinearModel, RegressorMixin): """Bayesian ARD regression. Fit the weights of a regression model, using an ARD prior. The weights of the regression model are assumed to be in Gaussian distributions. Also estimate the parameters lambda (precisions of the distributions of the weights) and alpha (precision of the distribution of the noise). The estimation is done by an iterative procedures (Evidence Maximization) Read more in the :ref:`User Guide <bayesian_regression>`. Parameters ---------- n_iter : int, optional Maximum number of iterations. Default is 300 tol : float, optional Stop the algorithm if w has converged. Default is 1.e-3. alpha_1 : float, optional Hyper-parameter : shape parameter for the Gamma distribution prior over the alpha parameter. Default is 1.e-6. alpha_2 : float, optional Hyper-parameter : inverse scale parameter (rate parameter) for the Gamma distribution prior over the alpha parameter. Default is 1.e-6. lambda_1 : float, optional Hyper-parameter : shape parameter for the Gamma distribution prior over the lambda parameter. Default is 1.e-6. lambda_2 : float, optional Hyper-parameter : inverse scale parameter (rate parameter) for the Gamma distribution prior over the lambda parameter. Default is 1.e-6. compute_score : boolean, optional If True, compute the objective function at each step of the model. Default is False. threshold_lambda : float, optional threshold for removing (pruning) weights with high precision from the computation. Default is 1.e+4. fit_intercept : boolean, optional whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). Default is True. normalize : boolean, optional, default False If True, the regressors X will be normalized before regression. This parameter is ignored when `fit_intercept` is set to False. When the regressors are normalized, note that this makes the hyperparameters learnt more robust and almost independent of the number of samples. The same property is not valid for standardized data. However, if you wish to standardize, please use `preprocessing.StandardScaler` before calling `fit` on an estimator with `normalize=False`. copy_X : boolean, optional, default True. If True, X will be copied; else, it may be overwritten. verbose : boolean, optional, default False Verbose mode when fitting the model. Attributes ---------- coef_ : array, shape = (n_features) Coefficients of the regression model (mean of distribution) alpha_ : float estimated precision of the noise. lambda_ : array, shape = (n_features) estimated precisions of the weights. sigma_ : array, shape = (n_features, n_features) estimated variance-covariance matrix of the weights scores_ : float if computed, value of the objective function (to be maximized) Examples -------- >>> from sklearn import linear_model >>> clf = linear_model.ARDRegression() >>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2]) ... # doctest: +NORMALIZE_WHITESPACE ARDRegression(alpha_1=1e-06, alpha_2=1e-06, compute_score=False, copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06, n_iter=300, normalize=False, threshold_lambda=10000.0, tol=0.001, verbose=False) >>> clf.predict([[1, 1]]) array([ 1.]) Notes -------- See examples/linear_model/plot_ard.py for an example. """ def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6, lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False, threshold_lambda=1.e+4, fit_intercept=True, normalize=False, copy_X=True, verbose=False): self.n_iter = n_iter self.tol = tol self.fit_intercept = fit_intercept self.normalize = normalize self.alpha_1 = alpha_1 self.alpha_2 = alpha_2 self.lambda_1 = lambda_1 self.lambda_2 = lambda_2 self.compute_score = compute_score self.threshold_lambda = threshold_lambda self.copy_X = copy_X self.verbose = verbose def fit(self, X, y): """Fit the ARDRegression model according to the given training data and parameters. Iterative procedure to maximize the evidence Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vector, where n_samples in the number of samples and n_features is the number of features. y : array, shape = [n_samples] Target values (integers) Returns ------- self : returns an instance of self. """ X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True) n_samples, n_features = X.shape coef_ = np.zeros(n_features) X, y, X_offset, y_offset, X_scale = self._preprocess_data( X, y, self.fit_intercept, self.normalize, self.copy_X) # Launch the convergence loop keep_lambda = np.ones(n_features, dtype=bool) lambda_1 = self.lambda_1 lambda_2 = self.lambda_2 alpha_1 = self.alpha_1 alpha_2 = self.alpha_2 verbose = self.verbose # Initialization of the values of the parameters alpha_ = 1. / np.var(y) lambda_ = np.ones(n_features) self.scores_ = list() coef_old_ = None # Iterative procedure of ARDRegression for iter_ in range(self.n_iter): # Compute mu and sigma (using Woodbury matrix identity) sigma_ = pinvh(np.eye(n_samples) / alpha_ + np.dot(X[:, keep_lambda] * np.reshape(1. / lambda_[keep_lambda], [1, -1]), X[:, keep_lambda].T)) sigma_ = np.dot(sigma_, X[:, keep_lambda] * np.reshape(1. / lambda_[keep_lambda], [1, -1])) sigma_ = - np.dot(np.reshape(1. / lambda_[keep_lambda], [-1, 1]) * X[:, keep_lambda].T, sigma_) sigma_.flat[::(sigma_.shape[1] + 1)] += 1. / lambda_[keep_lambda] coef_[keep_lambda] = alpha_ * np.dot( sigma_, np.dot(X[:, keep_lambda].T, y)) # Update alpha and lambda rmse_ = np.sum((y - np.dot(X, coef_)) ** 2) gamma_ = 1. - lambda_[keep_lambda] * np.diag(sigma_) lambda_[keep_lambda] = ((gamma_ + 2. * lambda_1) / ((coef_[keep_lambda]) ** 2 + 2. * lambda_2)) alpha_ = ((n_samples - gamma_.sum() + 2. * alpha_1) / (rmse_ + 2. * alpha_2)) # Prune the weights with a precision over a threshold keep_lambda = lambda_ < self.threshold_lambda coef_[~keep_lambda] = 0 # Compute the objective function if self.compute_score: s = (lambda_1 * np.log(lambda_) - lambda_2 * lambda_).sum() s += alpha_1 * log(alpha_) - alpha_2 * alpha_ s += 0.5 * (fast_logdet(sigma_) + n_samples * log(alpha_) + np.sum(np.log(lambda_))) s -= 0.5 * (alpha_ * rmse_ + (lambda_ * coef_ ** 2).sum()) self.scores_.append(s) # Check for convergence if iter_ > 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol: if verbose: print("Converged after %s iterations" % iter_) break coef_old_ = np.copy(coef_) self.coef_ = coef_ self.alpha_ = alpha_ self.sigma_ = sigma_ self.lambda_ = lambda_ self._set_intercept(X_offset, y_offset, X_scale) return self
mit
kevin-intel/scikit-learn
sklearn/semi_supervised/_self_training.py
2
12842
import warnings import numpy as np from ..base import MetaEstimatorMixin, clone, BaseEstimator from ..utils.validation import check_is_fitted from ..utils.metaestimators import if_delegate_has_method from ..utils import safe_mask __all__ = ["SelfTrainingClassifier"] # Authors: Oliver Rausch <[email protected]> # Patrice Becker <[email protected]> # License: BSD 3 clause def _validate_estimator(estimator): """Make sure that an estimator implements the necessary methods.""" if not hasattr(estimator, "predict_proba"): msg = "base_estimator ({}) should implement predict_proba!" raise ValueError(msg.format(type(estimator).__name__)) class SelfTrainingClassifier(MetaEstimatorMixin, BaseEstimator): """Self-training classifier. This class allows a given supervised classifier to function as a semi-supervised classifier, allowing it to learn from unlabeled data. It does this by iteratively predicting pseudo-labels for the unlabeled data and adding them to the training set. The classifier will continue iterating until either max_iter is reached, or no pseudo-labels were added to the training set in the previous iteration. Read more in the :ref:`User Guide <self_training>`. Parameters ---------- base_estimator : estimator object An estimator object implementing ``fit`` and ``predict_proba``. Invoking the ``fit`` method will fit a clone of the passed estimator, which will be stored in the ``base_estimator_`` attribute. threshold : float, default=0.75 The decision threshold for use with `criterion='threshold'`. Should be in [0, 1). When using the 'threshold' criterion, a :ref:`well calibrated classifier <calibration>` should be used. criterion : {'threshold', 'k_best'}, default='threshold' The selection criterion used to select which labels to add to the training set. If 'threshold', pseudo-labels with prediction probabilities above `threshold` are added to the dataset. If 'k_best', the `k_best` pseudo-labels with highest prediction probabilities are added to the dataset. When using the 'threshold' criterion, a :ref:`well calibrated classifier <calibration>` should be used. k_best : int, default=10 The amount of samples to add in each iteration. Only used when `criterion` is k_best'. max_iter : int or None, default=10 Maximum number of iterations allowed. Should be greater than or equal to 0. If it is ``None``, the classifier will continue to predict labels until no new pseudo-labels are added, or all unlabeled samples have been labeled. verbose : bool, default=False Enable verbose output. Attributes ---------- base_estimator_ : estimator object The fitted estimator. classes_ : ndarray or list of ndarray of shape (n_classes,) Class labels for each output. (Taken from the trained ``base_estimator_``). transduction_ : ndarray of shape (n_samples,) The labels used for the final fit of the classifier, including pseudo-labels added during fit. labeled_iter_ : ndarray of shape (n_samples,) The iteration in which each sample was labeled. When a sample has iteration 0, the sample was already labeled in the original dataset. When a sample has iteration -1, the sample was not labeled in any iteration. n_features_in_ : int Number of features seen during :term:`fit`. .. versionadded:: 0.24 n_iter_ : int The number of rounds of self-training, that is the number of times the base estimator is fitted on relabeled variants of the training set. termination_condition_ : {'max_iter', 'no_change', 'all_labeled'} The reason that fitting was stopped. - 'max_iter': `n_iter_` reached `max_iter`. - 'no_change': no new labels were predicted. - 'all_labeled': all unlabeled samples were labeled before `max_iter` was reached. Examples -------- >>> import numpy as np >>> from sklearn import datasets >>> from sklearn.semi_supervised import SelfTrainingClassifier >>> from sklearn.svm import SVC >>> rng = np.random.RandomState(42) >>> iris = datasets.load_iris() >>> random_unlabeled_points = rng.rand(iris.target.shape[0]) < 0.3 >>> iris.target[random_unlabeled_points] = -1 >>> svc = SVC(probability=True, gamma="auto") >>> self_training_model = SelfTrainingClassifier(svc) >>> self_training_model.fit(iris.data, iris.target) SelfTrainingClassifier(...) References ---------- David Yarowsky. 1995. Unsupervised word sense disambiguation rivaling supervised methods. In Proceedings of the 33rd annual meeting on Association for Computational Linguistics (ACL '95). Association for Computational Linguistics, Stroudsburg, PA, USA, 189-196. DOI: https://doi.org/10.3115/981658.981684 """ _estimator_type = "classifier" def __init__(self, base_estimator, threshold=0.75, criterion='threshold', k_best=10, max_iter=10, verbose=False): self.base_estimator = base_estimator self.threshold = threshold self.criterion = criterion self.k_best = k_best self.max_iter = max_iter self.verbose = verbose def fit(self, X, y): """ Fits this ``SelfTrainingClassifier`` to a dataset. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Array representing the data. y : {array-like, sparse matrix} of shape (n_samples,) Array representing the labels. Unlabeled samples should have the label -1. Returns ------- self : object Returns an instance of self. """ # we need row slicing support for sparce matrices X, y = self._validate_data(X, y, accept_sparse=[ 'csr', 'csc', 'lil', 'dok']) if self.base_estimator is None: raise ValueError("base_estimator cannot be None!") self.base_estimator_ = clone(self.base_estimator) if self.max_iter is not None and self.max_iter < 0: raise ValueError("max_iter must be >= 0 or None," f" got {self.max_iter}") if not (0 <= self.threshold < 1): raise ValueError("threshold must be in [0,1)," f" got {self.threshold}") if self.criterion not in ['threshold', 'k_best']: raise ValueError(f"criterion must be either 'threshold' " f"or 'k_best', got {self.criterion}.") if y.dtype.kind in ['U', 'S']: raise ValueError("y has dtype string. If you wish to predict on " "string targets, use dtype object, and use -1" " as the label for unlabeled samples.") has_label = y != -1 if np.all(has_label): warnings.warn("y contains no unlabeled samples", UserWarning) if self.criterion == 'k_best' and (self.k_best > X.shape[0] - np.sum(has_label)): warnings.warn("k_best is larger than the amount of unlabeled " "samples. All unlabeled samples will be labeled in " "the first iteration", UserWarning) self.transduction_ = np.copy(y) self.labeled_iter_ = np.full_like(y, -1) self.labeled_iter_[has_label] = 0 self.n_iter_ = 0 while not np.all(has_label) and (self.max_iter is None or self.n_iter_ < self.max_iter): self.n_iter_ += 1 self.base_estimator_.fit( X[safe_mask(X, has_label)], self.transduction_[has_label]) # Validate the fitted estimator since `predict_proba` can be # delegated to an underlying "final" fitted estimator as # generally done in meta-estimator or pipeline. _validate_estimator(self.base_estimator_) # Predict on the unlabeled samples prob = self.base_estimator_.predict_proba( X[safe_mask(X, ~has_label)]) pred = self.base_estimator_.classes_[np.argmax(prob, axis=1)] max_proba = np.max(prob, axis=1) # Select new labeled samples if self.criterion == 'threshold': selected = max_proba > self.threshold else: n_to_select = min(self.k_best, max_proba.shape[0]) if n_to_select == max_proba.shape[0]: selected = np.ones_like(max_proba, dtype=bool) else: # NB these are indicies, not a mask selected = \ np.argpartition(-max_proba, n_to_select)[:n_to_select] # Map selected indices into original array selected_full = np.nonzero(~has_label)[0][selected] # Add newly labeled confident predictions to the dataset self.transduction_[selected_full] = pred[selected] has_label[selected_full] = True self.labeled_iter_[selected_full] = self.n_iter_ if selected_full.shape[0] == 0: # no changed labels self.termination_condition_ = "no_change" break if self.verbose: print(f"End of iteration {self.n_iter_}," f" added {selected_full.shape[0]} new labels.") if self.n_iter_ == self.max_iter: self.termination_condition_ = "max_iter" if np.all(has_label): self.termination_condition_ = "all_labeled" self.base_estimator_.fit( X[safe_mask(X, has_label)], self.transduction_[has_label]) self.classes_ = self.base_estimator_.classes_ return self @if_delegate_has_method(delegate='base_estimator') def predict(self, X): """Predict the classes of X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Array representing the data. Returns ------- y : ndarray of shape (n_samples,) Array with predicted labels. """ check_is_fitted(self) return self.base_estimator_.predict(X) def predict_proba(self, X): """Predict probability for each possible outcome. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Array representing the data. Returns ------- y : ndarray of shape (n_samples, n_features) Array with prediction probabilities. """ check_is_fitted(self) return self.base_estimator_.predict_proba(X) @if_delegate_has_method(delegate='base_estimator') def decision_function(self, X): """Calls decision function of the `base_estimator`. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Array representing the data. Returns ------- y : ndarray of shape (n_samples, n_features) Result of the decision function of the `base_estimator`. """ check_is_fitted(self) return self.base_estimator_.decision_function(X) @if_delegate_has_method(delegate='base_estimator') def predict_log_proba(self, X): """Predict log probability for each possible outcome. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Array representing the data. Returns ------- y : ndarray of shape (n_samples, n_features) Array with log prediction probabilities. """ check_is_fitted(self) return self.base_estimator_.predict_log_proba(X) @if_delegate_has_method(delegate='base_estimator') def score(self, X, y): """Calls score on the `base_estimator`. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Array representing the data. y : array-like of shape (n_samples,) Array representing the labels. Returns ------- score : float Result of calling score on the `base_estimator`. """ check_is_fitted(self) return self.base_estimator_.score(X, y)
bsd-3-clause
kenshay/ImageScripter
ProgramData/SystemFiles/Python/Lib/site-packages/pandas/util/clipboard/__init__.py
7
3420
""" Pyperclip A cross-platform clipboard module for Python. (only handles plain text for now) By Al Sweigart [email protected] BSD License Usage: import pyperclip pyperclip.copy('The text to be copied to the clipboard.') spam = pyperclip.paste() if not pyperclip.copy: print("Copy functionality unavailable!") On Windows, no additional modules are needed. On Mac, the module uses pbcopy and pbpaste, which should come with the os. On Linux, install xclip or xsel via package manager. For example, in Debian: sudo apt-get install xclip Otherwise on Linux, you will need the gtk or PyQt4 modules installed. gtk and PyQt4 modules are not available for Python 3, and this module does not work with PyGObject yet. """ __version__ = '1.5.27' # flake8: noqa import platform import os import subprocess from .clipboards import (init_osx_clipboard, init_gtk_clipboard, init_qt_clipboard, init_xclip_clipboard, init_xsel_clipboard, init_klipper_clipboard, init_no_clipboard) from .windows import init_windows_clipboard # `import PyQt4` sys.exit()s if DISPLAY is not in the environment. # Thus, we need to detect the presence of $DISPLAY manually # and not load PyQt4 if it is absent. HAS_DISPLAY = os.getenv("DISPLAY", False) CHECK_CMD = "where" if platform.system() == "Windows" else "which" def _executable_exists(name): return subprocess.call([CHECK_CMD, name], stdout=subprocess.PIPE, stderr=subprocess.PIPE) == 0 def determine_clipboard(): # Determine the OS/platform and set # the copy() and paste() functions accordingly. if 'cygwin' in platform.system().lower(): # FIXME: pyperclip currently does not support Cygwin, # see https://github.com/asweigart/pyperclip/issues/55 pass elif os.name == 'nt' or platform.system() == 'Windows': return init_windows_clipboard() if os.name == 'mac' or platform.system() == 'Darwin': return init_osx_clipboard() if HAS_DISPLAY: # Determine which command/module is installed, if any. try: import gtk # check if gtk is installed except ImportError: pass else: return init_gtk_clipboard() try: import PyQt4 # check if PyQt4 is installed except ImportError: pass else: return init_qt_clipboard() if _executable_exists("xclip"): return init_xclip_clipboard() if _executable_exists("xsel"): return init_xsel_clipboard() if _executable_exists("klipper") and _executable_exists("qdbus"): return init_klipper_clipboard() return init_no_clipboard() def set_clipboard(clipboard): global copy, paste clipboard_types = {'osx': init_osx_clipboard, 'gtk': init_gtk_clipboard, 'qt': init_qt_clipboard, 'xclip': init_xclip_clipboard, 'xsel': init_xsel_clipboard, 'klipper': init_klipper_clipboard, 'windows': init_windows_clipboard, 'no': init_no_clipboard} copy, paste = clipboard_types[clipboard]() copy, paste = determine_clipboard() __all__ = ["copy", "paste"] # pandas aliases clipboard_get = paste clipboard_set = copy
gpl-3.0
jswoboda/SimISR
Test/statstest.py
2
17635
#!/usr/bin/env python """ Created on Wed Mar 30 13:01:31 2016 This will create a number of data sets for statistical analysis. It'll then make statistics and histograms of the output parameters. @author: John Swoboda """ import itertools import math from SimISR import Path import scipy as sp import pandas as pd import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import seaborn as sns import pdb from SimISR.utilFunctions import MakePulseDataRep,CenteredLagProduct,readconfigfile,spect2acf,makeconfigfile from SimISR.IonoContainer import IonoContainer from SimISR.runsim import main as runsim from SimISR.analysisplots import analysisdump,maketi #from radarsystools.radarsystools import RadarSys PVALS = [1e11,2.1e3,1.1e3,0.] SIMVALUES = sp.array([[PVALS[0],PVALS[2]],[PVALS[0],PVALS[1]]]) # TE = {'param':'Te','paramLT':'T_e','lims':[1200.,3000.],'val':PVALS[1]} TI = {'param':'Ti','paramLT':'T_i','lims':[300.,1900.],'val':PVALS[2]} NE = {'param':'Ne','paramLT':'N_e','lims':[4e10,2e11],'val':PVALS[0]} VI = {'param':'Vi','paramLT':'V_i','lims':[-250.,250.],'val':PVALS[3]} PARAMDICT = {'Te':TE, 'Ti':TI, 'Ne':NE, 'Vi':VI} def makehistmult(testpathlist,npulseslist): """ Plot a set of histograms over each other. """ sns.set_style("whitegrid") sns.set_context("notebook") params = ['Ne','Te','Ti','Vi'] paramsLT = ['N_e','T_e','T_i','V_i'] errdictlist =[ makehistdata(params,itest)[0] for itest in testpathlist] (figmplf, axmat) = plt.subplots(2, 2,figsize=(12,8), facecolor='w') axvec = axmat.flatten() histlims = [[4e10,2e11],[1200.,3000.],[300.,1900.],[-250.,250.]] histvecs = [sp.linspace(ipm[0],ipm[1],100) for ipm in histlims] linehand = [] lablist= ['J = {:d}'.format(i) for i in npulseslist] for iax,iparam in enumerate(params): for idict,inpulse in zip(errdictlist,npulseslist): curvals = idict[iparam] curhist, binout = sp.histogram(curvals,bins=histvecs[iax]) dx=binout[1]-binout[0] curhist_norm = curhist.astype(float)/(curvals.size*dx) plthand = axvec[iax].plot(binout[:-1],curhist_norm,label='J = {:d}'.format(inpulse))[0] linehand.append(plthand) axvec[iax].set_xlabel(r'$'+paramsLT[iax]+'$') axvec[iax].set_title(r'Histogram for $'+paramsLT[iax]+'$') leg = figmplf.legend(linehand[:len(npulseslist)],lablist) plt.tight_layout() plt.subplots_adjust(top=0.9) spti = figmplf.suptitle('Parameter Distributions',fontsize=18) return (figmplf,axvec,linehand) def makehistsingle(testpath,npulses): """ Make a histogram from a single collection of data. """ sns.set_style("whitegrid") sns.set_context("notebook") params = ['Ne','Te','Ti','Vi'] paramsLT = ['N_e','T_e','T_i','V_i'] datadict,er1,er2,edatadict = makehistdata(params,testpath) (figmplf, axmat) = plt.subplots(2, 2,figsize=(12,8), facecolor='w') axvec = axmat.flatten() histlims = [[4e10,2e11],[1200.,3000.],[300.,1900.],[-250.,250.]] histvecs = [sp.linspace(ipm[0],ipm[1],100) for ipm in histlims] linehand = [] lablist=['Histogram','Variance','Error'] for iax,iparam in enumerate(params): mu = PVALS[iax] curvals = datadict[iparam] mu = sp.nanmean(curvals.real) RMSE = sp.sqrt(sp.nanvar(curvals)) Error_mean = sp.sqrt(sp.nanmean(sp.power(edatadict[iparam],2))) curhist,x = sp.histogram(curvals,bins=histvecs[iax]) dx=x[1]-x[0] curhist_norm = curhist.astype(float)/(curvals.size*dx) plthand = axvec[iax].plot(x[:-1],curhist_norm,'r-',label='Histogram'.format(npulses))[0] linehand.append(plthand) rmsedist = sp.stats.norm.pdf((x-mu)/RMSE)/RMSE plthand = axvec[iax].plot(x,rmsedist,label='Var'.format(npulses))[0] linehand.append(plthand) emeandist = sp.stats.norm.pdf((x-mu)/Error_mean)/Error_mean plthand = axvec[iax].plot(x,emeandist,label='Error'.format(npulses))[0] linehand.append(plthand) axvec[iax].set_xlabel(r'$'+paramsLT[iax]+'$') axvec[iax].set_title(r'Distributions for $'+paramsLT[iax]+'$') leg = figmplf.legend(linehand[:len(lablist)],lablist) plt.tight_layout() plt.subplots_adjust(top=0.9) spti = figmplf.suptitle('Pulses J = {:d}'.format(npulses),fontsize=18) return (figmplf,axvec,linehand) def make2dhist(testpath, xaxis=TE, yaxis=TI, figmplf=None, curax=None): """ This will plot a 2-D histogram of two variables. Args: testpath (obj:`str`): The path where the SimISR data is stored. npulses (obj:`int`): The number of pulses. xaxis (obj: `dict`): default TE, Dictionary that holds the parameter info along the x axis of the distribution. yaxis (obj: `dict`): default TE, Dictionary that holds the parameter info along the y axis of the distribution. figmplf (obj: `matplotb figure`): default None, Figure that the plot will be placed on. curax (obj: `matplotlib axis`): default None, Axis that the plot will be made on. Returns: figmplf (obj: `matplotb figure`), curax (obj: `matplotlib axis`):,hist_h (obj: `matplotlib axis`)): The figure handle the plot is made on, the axis handle the plot is on, the plot handle itself. """ sns.set_style("whitegrid") sns.set_context("notebook") params = [xaxis['param'], yaxis['param']] datadict, _, _, _ = makehistdata(params, testpath) if (figmplf is None) and (curax is None): (figmplf, curax) = plt.subplots(1, 1, figsize=(6, 6), facecolor='w') b1 = sp.linspace(*xaxis['lims']) b2 = sp.linspace(*yaxis['lims']) bins = [b1, b2] d1 = sp.column_stack((datadict[params[0]],datadict[params[1]])) H, xe, ye = sp.histogram2d(d1[:,0].real, d1[:,1].real, bins=bins, normed=True) hist_h = curax.pcolor(xe[:-1], ye[:-1], sp.transpose(H), cmap='viridis', vmin=0) curax.set_xlabel(r'$'+xaxis['paramLT']+'$') curax.set_ylabel(r'$'+yaxis['paramLT']+'$') curax.set_title(r'Joint distributions for $'+ xaxis['paramLT']+'$'+' and $'+ yaxis['paramLT']+'$') plt.colorbar(hist_h, ax=curax, label='Probability', format='%1.1e') return (figmplf, curax, hist_h) def makehist(testpath,npulses): """ This functions are will create histogram from data made in the testpath. Inputs testpath - The path that the data is located. npulses - The number of pulses in the sim. """ sns.set_style("whitegrid") sns.set_context("notebook") params = ['Ne', 'Te', 'Ti', 'Vi'] histlims = [[1e10, 3e11], [1000., 3000.], [100., 2500.], [-400., 400.]] erlims = [[-2e11, 2e11], [-1000., 1000.], [-800., 800], [-400., 400.]] erperlims = [[-100., 100.]]*4 lims_list = [histlims, erlims, erperlims] errdict = makehistdata(params, testpath)[:4] ernames = ['Data', 'Error', 'Error Percent'] # Two dimensiontal histograms pcombos = [i for i in itertools.combinations(params, 2)] c_rows = int(math.ceil(float(len(pcombos))/2.)) (figmplf, axmat) = plt.subplots(c_rows, 2, figsize=(12, c_rows*6), facecolor='w') axvec = axmat.flatten() for icomn, icom in enumerate(pcombos): curax = axvec[icomn] str1, str2 = icom _, _, _ = make2dhist(testpath, PARAMDICT[str1], PARAMDICT[str2], figmplf, curax) filetemplate = str(Path(testpath).joinpath('AnalysisPlots', 'TwoDDist')) plt.tight_layout() plt.subplots_adjust(top=0.95) figmplf.suptitle('Pulses: {0}'.format(npulses), fontsize=20) fname = filetemplate+'_{0:0>5}Pulses.png'.format(npulses) plt.savefig(fname) plt.close(figmplf) # One dimensiontal histograms for ierr, iername in enumerate(ernames): filetemplate = str(Path(testpath).joinpath('AnalysisPlots', iername)) (figmplf, axmat) = plt.subplots(2, 2, figsize=(20, 15), facecolor='w') axvec = axmat.flatten() for ipn, iparam in enumerate(params): plt.sca(axvec[ipn]) if sp.any(sp.isinf(errdict[ierr][iparam])): continue binlims = lims_list[ierr][ipn] bins = sp.linspace(binlims[0], binlims[1], 100) xdata = errdict[ierr][iparam] xlog = sp.logical_and(xdata >= binlims[0], xdata < binlims[1]) histhand = sns.distplot(xdata[xlog], bins=bins, kde=True, rug=False) axvec[ipn].set_title(iparam) figmplf.suptitle(iername +' Pulses: {0}'.format(npulses), fontsize=20) fname = filetemplate+'_{0:0>5}Pulses.png'.format(npulses) plt.savefig(fname) plt.close(figmplf) def makehistdata(params,maindir): """ This will make the histogram data for the statistics. Inputs params - A list of parameters that will have statistics created maindir - The directory that the simulation data is held. Outputs datadict - A dictionary with the data values in numpy arrays. The keys are param names. errordict - A dictionary with the data values in numpy arrays. The keys are param names. errdictrel - A dictionary with the error values in numpy arrays, normalized by the correct value. The keys are param names. """ maindir = Path(maindir) ffit = maindir.joinpath('Fitted', 'fitteddata.h5') inputfiledir = maindir.joinpath('Origparams') paramslower = [ip.lower() for ip in params] eparamslower = ['n'+ip.lower() for ip in params] # set up data dictionary errordict = {ip:[] for ip in params} errordictrel = {ip:[] for ip in params} #Read in fitted data Ionofit = IonoContainer.readh5(str(ffit)) times = Ionofit.Time_Vector dataloc = Ionofit.Sphere_Coords rng = dataloc[:, 0] rng_log = sp.logical_and(rng > 200., rng < 400) dataloc_out = dataloc[rng_log] pnames = Ionofit.Param_Names pnameslower = sp.array([ip.lower() for ip in pnames.flatten()]) p2fit = [sp.argwhere(ip == pnameslower)[0][0] if ip in pnameslower else None for ip in paramslower] datadict = {ip:Ionofit.Param_List[rng_log, :, p2fit[ipn]].flatten() for ipn, ip in enumerate(params)} ep2fit = [sp.argwhere(ip==pnameslower)[0][0] if ip in pnameslower else None for ip in eparamslower] edatadict = {ip:Ionofit.Param_List[rng_log, :, ep2fit[ipn]].flatten() for ipn, ip in enumerate(params)} # Determine which input files are to be used. dirlist = [str(i) for i in inputfiledir.glob('*.h5')] _, outime, filelisting, _, _ = IonoContainer.gettimes(dirlist) time2files = [] for itn, itime in enumerate(times): log1 = (outime[:, 0] >= itime[0]) & (outime[:, 0] < itime[1]) log2 = (outime[:, 1] > itime[0]) & (outime[:, 1] <= itime[1]) log3 = (outime[:, 0] <= itime[0]) & (outime[:, 1] > itime[1]) tempindx = sp.where(log1|log2|log3)[0] time2files.append(filelisting[tempindx]) curfilenum = -1 for iparam, pname in enumerate(params): curparm = paramslower[iparam] # Use Ne from input to compare the ne derived from the power. if curparm == 'nepow': curparm = 'ne' datalist = [] for itn, itime in enumerate(times): for filenum in time2files[itn]: filenum = int(filenum) if curfilenum != filenum: curfilenum = filenum datafilename = dirlist[filenum] Ionoin = IonoContainer.readh5(datafilename) if ('ti' in paramslower) or ('vi' in paramslower): Ionoin = maketi(Ionoin) pnames = Ionoin.Param_Names pnameslowerin = sp.array([ip.lower() for ip in pnames.flatten()]) prmloc = sp.argwhere(curparm == pnameslowerin) if prmloc.size != 0: curprm = prmloc[0][0] # build up parameter vector bs the range values by finding the closest point in space in the input curdata = sp.zeros(len(dataloc_out)) for irngn, curcoord in enumerate(dataloc_out): tempin = Ionoin.getclosestsphere(curcoord, [itime])[0] Ntloc = tempin.shape[0] tempin = sp.reshape(tempin, (Ntloc, len(pnameslowerin))) curdata[irngn] = tempin[0, curprm] datalist.append(curdata) errordict[pname] = datadict[pname]-sp.hstack(datalist) errordictrel[pname] = 100.*errordict[pname]/sp.absolute(sp.hstack(datalist)) return datadict, errordict, errordictrel, edatadict def configfilesetup(testpath,npulses): """ This will create the configureation file given the number of pulses for the test. This will make it so that there will be 12 integration periods for a given number of pulses. Input testpath - The location of the data. npulses - The number of pulses. """ curloc = Path(__file__).resolve().parent defcon = curloc.joinpath('statsbase.ini') (sensdict, simparams) = readconfigfile(defcon) tint = simparams['IPP']*npulses ratio1 = tint/simparams['Tint'] simparams['Tint'] = ratio1 * simparams['Tint'] simparams['Fitinter'] = ratio1 * simparams['Fitinter'] simparams['TimeLim'] = ratio1 * simparams['TimeLim'] simparams['startfile'] = 'startfile.h5' makeconfigfile(testpath.joinpath('stats.ini'),simparams['Beamlist'],sensdict['Name'],simparams) def makedata(testpath): """ This will make the input data for the test case. The data will have the default set of parameters Ne=Ne=1e11 and Te=Ti=2000. Inputs testpath - Directory that will hold the data. """ finalpath = testpath.joinpath('Origparams') if not finalpath.exists(): finalpath.mkdir() data = SIMVALUES z = sp.linspace(50., 1e3, 50) nz = len(z) params = sp.tile(data[sp.newaxis, sp.newaxis, :, :], (nz, 1, 1, 1)) coords = sp.column_stack((sp.ones(nz), sp.ones(nz), z)) species = ['O+', 'e-'] times = sp.array([[0, 1e9]]) vel = sp.zeros((nz, 1, 3)) Icont1 = IonoContainer(coordlist=coords, paramlist=params, times=times, sensor_loc=sp.zeros(3), ver=0, coordvecs=['x', 'y', 'z'], paramnames=None, species=species, velocity=vel) finalfile = finalpath.joinpath('0 stats.h5') Icont1.saveh5(str(finalfile)) # set start temp to 1000 K. Icont1.Param_List[:, :, :, 1] = 1e3 Icont1.saveh5(str(testpath.joinpath('startfile.h5'))) def main(plist = None, functlist = ['spectrums','radardata','fitting','analysis','stats'], datadir=None): """ This function will call other functions to create the input data, config file and run the radar data sim. The path for the simulation will be created in the Testdata directory in the SimISR module. The new folder will be called BasicTest. The simulation is a long pulse simulation will the desired number of pulses from the user. Inputs npulse - Number of pulses for the integration period, default==100. functlist - The list of functions for the SimISR to do. """ if plist is None: plist = sp.array([50, 100, 200, 500, 1000, 2000, 5000]) if isinstance(plist, list): plist = sp.array(plist) if datadir is None: curloc = Path(__file__).resolve().parent testpath = curloc.parent.joinpath('Testdata', 'StatsTest') else: datadir = Path(datadir) testpath = datadir testpath.mkdir(exist_ok=True, parents=True) functlist_default = ['spectrums', 'radardata', 'fitting'] check_list = sp.array([i in functlist for i in functlist_default]) check_run = sp.any(check_list) functlist_red = sp.array(functlist_default)[check_list].tolist() allfolds = [] # rsystools = [] for ip in plist: foldname = 'Pulses_{:04d}'.format(ip) curfold = testpath.joinpath(foldname) allfolds.append(curfold) curfold.mkdir(exist_ok=True, parents=True) configfilesetup(curfold, ip) makedata(curfold) config = curfold/'stats.ini' # rtemp = RadarSys(sensdict,simparams['Rangegatesfinal'],ip) # rsystools.append(rtemp.rms(sp.array([1e12]),sp.array([2.5e3]),sp.array([2.5e3]))) if check_run: runsim(functlist_red, curfold, str(curfold.joinpath('stats.ini')), True) if 'analysis' in functlist: analysisdump(curfold, config, params = ['Ne', 'Te', 'Ti', 'Vi']) if 'stats' in functlist: makehist(curfold, ip) if __name__== '__main__': from argparse import ArgumentParser descr = ''' This script will perform the basic run est for ISR sim. ''' parser = ArgumentParser(description=descr) parser.add_argument("-p", "--npulses", help='Number of pulses.', nargs='+', type=int, default=[50, 100, 200, 500, 1000, 2000, 5000]) parser.add_argument('-f','--funclist', help='Functions to be uses', nargs='+', default=['spectrums', 'radardata', 'fitting', 'analysis', 'stats'])#action='append',dest='collection',default=['spectrums','radardata','fitting','analysis']) parser.add_argument('-d','--dir', help='original directory', default='../Testdata/StatsTest/') args = parser.parse_args() main(args.npulses, args.funclist, args.dir)
mit
mlperf/training_results_v0.6
Google/benchmarks/transformer/implementations/tpu-v3-32-transformer/dataset_preproc/data_generators/image_utils.py
7
14211
# coding=utf-8 # Copyright 2018 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Base classes and utilities for image datasets.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import io import os import numpy as np from tensor2tensor.data_generators import generator_utils from tensor2tensor.data_generators import problem from tensor2tensor.data_generators import text_encoder from tensor2tensor.layers import common_layers from tensor2tensor.utils import metrics from tensor2tensor.utils import registry import tensorflow as tf def matplotlib_pyplot(): import matplotlib # pylint: disable=g-import-not-at-top matplotlib.use("agg") import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top return plt def image_to_tf_summary_value(image, tag): """Converts a NumPy image to a tf.Summary.Value object. Args: image: 3-D NumPy array. tag: name for tf.Summary.Value for display in tensorboard. Returns: image_summary: A tf.Summary.Value object. """ curr_image = np.asarray(image, dtype=np.uint8) height, width, n_channels = curr_image.shape s = io.BytesIO() matplotlib_pyplot().imsave(s, curr_image, format="png") img_sum = tf.Summary.Image(encoded_image_string=s.getvalue(), height=height, width=width, colorspace=n_channels) return tf.Summary.Value(tag=tag, image=img_sum) def convert_predictions_to_image_summaries(hook_args): """Optionally converts images from hooks_args to image summaries. Args: hook_args: DecodeHookArgs namedtuple Returns: summaries: list of tf.Summary values if hook_args.decode_hpara """ decode_hparams = hook_args.decode_hparams if not decode_hparams.display_decoded_images: return [] predictions = hook_args.predictions[0] # Display ten random inputs and outputs so that tensorboard does not hang. all_summaries = [] rand_predictions = np.random.choice(predictions, size=10) for ind, prediction in enumerate(rand_predictions): output_summary = image_to_tf_summary_value( prediction["outputs"], tag="%d_output" % ind) input_summary = image_to_tf_summary_value( prediction["inputs"], tag="%d_input" % ind) all_summaries.append(input_summary) all_summaries.append(output_summary) return all_summaries def resize_by_area(img, size): """image resize function used by quite a few image problems.""" return tf.to_int64( tf.image.resize_images(img, [size, size], tf.image.ResizeMethod.AREA)) def make_multiscale(image, resolutions, resize_method=tf.image.ResizeMethod.BICUBIC, num_channels=3): """Returns list of scaled images, one for each resolution. Args: image: Tensor of shape [height, height, num_channels]. resolutions: List of heights that image's height is resized to. resize_method: tf.image.ResizeMethod. num_channels: Number of channels in image. Returns: List of Tensors, one for each resolution with shape given by [resolutions[i], resolutions[i], num_channels]. """ scaled_images = [] for height in resolutions: scaled_image = tf.image.resize_images( image, size=[height, height], # assuming that height = width method=resize_method) scaled_image = tf.to_int64(scaled_image) scaled_image.set_shape([height, height, num_channels]) scaled_images.append(scaled_image) return scaled_images def make_multiscale_dilated(image, resolutions, num_channels=3): """Returns list of scaled images, one for each resolution. Resizes by skipping every nth pixel. Args: image: Tensor of shape [height, height, num_channels]. resolutions: List of heights that image's height is resized to. The function assumes VALID padding, so the original image's height must be divisible by each resolution's height to return the exact resolution size. num_channels: Number of channels in image. Returns: List of Tensors, one for each resolution with shape given by [resolutions[i], resolutions[i], num_channels] if resolutions properly divide the original image's height; otherwise shape height and width is up to valid skips. """ image_height = common_layers.shape_list(image)[0] scaled_images = [] for height in resolutions: dilation_rate = image_height // height # assuming height = width scaled_image = image[::dilation_rate, ::dilation_rate] scaled_image = tf.to_int64(scaled_image) scaled_image.set_shape([None, None, num_channels]) scaled_images.append(scaled_image) return scaled_images class ImageProblem(problem.Problem): """Base class for problems with images.""" @property def num_channels(self): """Number of color channels.""" return 3 @property def vocab_size(self): """Number of pixel values.""" return 256 def example_reading_spec(self): data_fields = { "image/encoded": tf.FixedLenFeature((), tf.string), "image/format": tf.FixedLenFeature((), tf.string), } data_items_to_decoders = { "inputs": tf.contrib.slim.tfexample_decoder.Image( image_key="image/encoded", format_key="image/format", channels=self.num_channels), } return data_fields, data_items_to_decoders def preprocess_example(self, example, mode, hparams): if not self._was_reversed: example["inputs"] = tf.image.per_image_standardization(example["inputs"]) return example def eval_metrics(self): eval_metrics = [ metrics.Metrics.ACC, metrics.Metrics.ACC_TOP5, metrics.Metrics.ACC_PER_SEQ, metrics.Metrics.NEG_LOG_PERPLEXITY ] if self._was_reversed: eval_metrics += [metrics.Metrics.IMAGE_SUMMARY] return eval_metrics @property def decode_hooks(self): return [convert_predictions_to_image_summaries] class Image2ClassProblem(ImageProblem): """Base class for image classification problems.""" @property def is_small(self): raise NotImplementedError() @property def num_classes(self): raise NotImplementedError() @property def train_shards(self): raise NotImplementedError() @property def dev_shards(self): return 1 @property def class_labels(self): return ["ID_%d" % i for i in range(self.num_classes)] def feature_encoders(self, data_dir): del data_dir return { "inputs": text_encoder.ImageEncoder(channels=self.num_channels), "targets": text_encoder.ClassLabelEncoder(self.class_labels) } def generator(self, data_dir, tmp_dir, is_training): raise NotImplementedError() def example_reading_spec(self): label_key = "image/class/label" data_fields, data_items_to_decoders = ( super(Image2ClassProblem, self).example_reading_spec()) data_fields[label_key] = tf.FixedLenFeature((1,), tf.int64) data_items_to_decoders[ "targets"] = tf.contrib.slim.tfexample_decoder.Tensor(label_key) return data_fields, data_items_to_decoders def hparams(self, defaults, unused_model_hparams): p = defaults p.input_modality = {"inputs": (registry.Modalities.IMAGE, 256)} p.target_modality = (registry.Modalities.CLASS_LABEL, self.num_classes) p.batch_size_multiplier = 4 if self.is_small else 256 p.loss_multiplier = 3.0 if self.is_small else 1.0 if self._was_reversed: p.loss_multiplier = 1.0 p.input_space_id = problem.SpaceID.IMAGE p.target_space_id = problem.SpaceID.IMAGE_LABEL def generate_data(self, data_dir, tmp_dir, task_id=-1): generator_utils.generate_dataset_and_shuffle( self.generator(data_dir, tmp_dir, True), self.training_filepaths(data_dir, self.train_shards, shuffled=False), self.generator(data_dir, tmp_dir, False), self.dev_filepaths(data_dir, self.dev_shards, shuffled=False)) def encode_images_as_png(images): """Yield images encoded as pngs.""" if tf.contrib.eager.in_eager_mode(): for image in images: yield tf.image.encode_png(image).numpy() else: (height, width, channels) = images[0].shape with tf.Graph().as_default(): image_t = tf.placeholder(dtype=tf.uint8, shape=(height, width, channels)) encoded_image_t = tf.image.encode_png(image_t) with tf.Session() as sess: for image in images: enc_string = sess.run(encoded_image_t, feed_dict={image_t: image}) yield enc_string def image_generator(images, labels): """Generator for images that takes image and labels lists and creates pngs. Args: images: list of images given as [width x height x channels] numpy arrays. labels: list of ints, same length as images. Yields: A dictionary representing the images with the following fields: * image/encoded: the string encoding the image as PNG, * image/format: the string "png" representing image format, * image/class/label: an integer representing the label, * image/height: an integer representing the height, * image/width: an integer representing the width. Every field is actually a singleton list of the corresponding type. Raises: ValueError: if images is an empty list. """ if not images: raise ValueError("Must provide some images for the generator.") width, height, _ = images[0].shape for (enc_image, label) in zip(encode_images_as_png(images), labels): yield { "image/encoded": [enc_image], "image/format": ["png"], "image/class/label": [int(label)], "image/height": [height], "image/width": [width] } class Image2TextProblem(ImageProblem): """Base class for image-to-text problems.""" @property def is_character_level(self): raise NotImplementedError() @property def vocab_problem(self): raise NotImplementedError() # Not needed if self.is_character_level. @property def target_space_id(self): raise NotImplementedError() @property def train_shards(self): raise NotImplementedError() @property def dev_shards(self): raise NotImplementedError() def generator(self, data_dir, tmp_dir, is_training): raise NotImplementedError() def example_reading_spec(self): label_key = "image/class/label" data_fields, data_items_to_decoders = ( super(Image2TextProblem, self).example_reading_spec()) data_fields[label_key] = tf.VarLenFeature(tf.int64) data_items_to_decoders[ "targets"] = tf.contrib.slim.tfexample_decoder.Tensor(label_key) return data_fields, data_items_to_decoders def feature_encoders(self, data_dir): if self.is_character_level: encoder = text_encoder.ByteTextEncoder() else: vocab_filename = os.path.join( data_dir, self.vocab_problem.vocab_filename) encoder = text_encoder.SubwordTextEncoder(vocab_filename) input_encoder = text_encoder.ImageEncoder(channels=self.num_channels) return {"inputs": input_encoder, "targets": encoder} def hparams(self, defaults, unused_model_hparams): p = defaults p.input_modality = {"inputs": (registry.Modalities.IMAGE, 256)} encoder = self._encoders["targets"] p.target_modality = (registry.Modalities.SYMBOL, encoder.vocab_size) p.batch_size_multiplier = 256 p.loss_multiplier = 1.0 p.input_space_id = problem.SpaceID.IMAGE p.target_space_id = self.target_space_id def generate_data(self, data_dir, tmp_dir, task_id=-1): generator_utils.generate_dataset_and_shuffle( self.generator(data_dir, tmp_dir, True), self.training_filepaths(data_dir, self.train_shards, shuffled=False), self.generator(data_dir, tmp_dir, False), self.dev_filepaths(data_dir, self.dev_shards, shuffled=False)) def image_augmentation(images, do_colors=False, crop_size=None): """Image augmentation: cropping, flipping, and color transforms.""" if crop_size is None: crop_size = [299, 299] images = tf.random_crop(images, crop_size + [3]) images = tf.image.random_flip_left_right(images) if do_colors: # More augmentation, but might be slow. images = tf.image.random_brightness(images, max_delta=32. / 255.) images = tf.image.random_saturation(images, lower=0.5, upper=1.5) images = tf.image.random_hue(images, max_delta=0.2) images = tf.image.random_contrast(images, lower=0.5, upper=1.5) return images def cifar_image_augmentation(images): """Image augmentation suitable for CIFAR-10/100. As described in https://arxiv.org/pdf/1608.06993v3.pdf (page 5). Args: images: a Tensor. Returns: Tensor of the same shape as images. """ images = tf.image.resize_image_with_crop_or_pad(images, 40, 40) images = tf.random_crop(images, [32, 32, 3]) images = tf.image.random_flip_left_right(images) return images def random_shift(image, wsr=0.1, hsr=0.1): """Apply random horizontal and vertical shift to images. This is the default data-augmentation strategy used on CIFAR in Glow. Args: image: a 3-D Tensor wsr: Width shift range, as a float fraction of the width. hsr: Height shift range, as a float fraction of the width. Returns: images: images translated by the provided wsr and hsr. """ height, width, _ = common_layers.shape_list(image) width_range, height_range = wsr*width, hsr*height height_translations = tf.random_uniform((1,), -height_range, height_range) width_translations = tf.random_uniform((1,), -width_range, width_range) translations = tf.concat((height_translations, width_translations), axis=0) return tf.contrib.image.translate(image, translations=translations)
apache-2.0
ndingwall/scikit-learn
sklearn/covariance/_robust_covariance.py
1
32339
""" Robust location and covariance estimators. Here are implemented estimators that are resistant to outliers. """ # Author: Virgile Fritsch <[email protected]> # # License: BSD 3 clause import warnings import numbers import numpy as np from scipy import linalg from scipy.stats import chi2 from . import empirical_covariance, EmpiricalCovariance from ..utils.extmath import fast_logdet from ..utils import check_random_state, check_array from ..utils.validation import _deprecate_positional_args # Minimum Covariance Determinant # Implementing of an algorithm by Rousseeuw & Van Driessen described in # (A Fast Algorithm for the Minimum Covariance Determinant Estimator, # 1999, American Statistical Association and the American Society # for Quality, TECHNOMETRICS) # XXX Is this really a public function? It's not listed in the docs or # exported by sklearn.covariance. Deprecate? def c_step(X, n_support, remaining_iterations=30, initial_estimates=None, verbose=False, cov_computation_method=empirical_covariance, random_state=None): """C_step procedure described in [Rouseeuw1984]_ aiming at computing MCD. Parameters ---------- X : array-like of shape (n_samples, n_features) Data set in which we look for the n_support observations whose scatter matrix has minimum determinant. n_support : int Number of observations to compute the robust estimates of location and covariance from. This parameter must be greater than `n_samples / 2`. remaining_iterations : int, default=30 Number of iterations to perform. According to [Rouseeuw1999]_, two iterations are sufficient to get close to the minimum, and we never need more than 30 to reach convergence. initial_estimates : tuple of shape (2,), default=None Initial estimates of location and shape from which to run the c_step procedure: - initial_estimates[0]: an initial location estimate - initial_estimates[1]: an initial covariance estimate verbose : bool, default=False Verbose mode. cov_computation_method : callable, \ default=:func:`sklearn.covariance.empirical_covariance` The function which will be used to compute the covariance. Must return array of shape (n_features, n_features). random_state : int, RandomState instance or None, default=None Determines the pseudo random number generator for shuffling the data. Pass an int for reproducible results across multiple function calls. See :term: `Glossary <random_state>`. Returns ------- location : ndarray of shape (n_features,) Robust location estimates. covariance : ndarray of shape (n_features, n_features) Robust covariance estimates. support : ndarray of shape (n_samples,) A mask for the `n_support` observations whose scatter matrix has minimum determinant. References ---------- .. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant Estimator, 1999, American Statistical Association and the American Society for Quality, TECHNOMETRICS """ X = np.asarray(X) random_state = check_random_state(random_state) return _c_step(X, n_support, remaining_iterations=remaining_iterations, initial_estimates=initial_estimates, verbose=verbose, cov_computation_method=cov_computation_method, random_state=random_state) def _c_step(X, n_support, random_state, remaining_iterations=30, initial_estimates=None, verbose=False, cov_computation_method=empirical_covariance): n_samples, n_features = X.shape dist = np.inf # Initialisation support = np.zeros(n_samples, dtype=bool) if initial_estimates is None: # compute initial robust estimates from a random subset support[random_state.permutation(n_samples)[:n_support]] = True else: # get initial robust estimates from the function parameters location = initial_estimates[0] covariance = initial_estimates[1] # run a special iteration for that case (to get an initial support) precision = linalg.pinvh(covariance) X_centered = X - location dist = (np.dot(X_centered, precision) * X_centered).sum(1) # compute new estimates support[np.argsort(dist)[:n_support]] = True X_support = X[support] location = X_support.mean(0) covariance = cov_computation_method(X_support) # Iterative procedure for Minimum Covariance Determinant computation det = fast_logdet(covariance) # If the data already has singular covariance, calculate the precision, # as the loop below will not be entered. if np.isinf(det): precision = linalg.pinvh(covariance) previous_det = np.inf while (det < previous_det and remaining_iterations > 0 and not np.isinf(det)): # save old estimates values previous_location = location previous_covariance = covariance previous_det = det previous_support = support # compute a new support from the full data set mahalanobis distances precision = linalg.pinvh(covariance) X_centered = X - location dist = (np.dot(X_centered, precision) * X_centered).sum(axis=1) # compute new estimates support = np.zeros(n_samples, dtype=bool) support[np.argsort(dist)[:n_support]] = True X_support = X[support] location = X_support.mean(axis=0) covariance = cov_computation_method(X_support) det = fast_logdet(covariance) # update remaining iterations for early stopping remaining_iterations -= 1 previous_dist = dist dist = (np.dot(X - location, precision) * (X - location)).sum(axis=1) # Check if best fit already found (det => 0, logdet => -inf) if np.isinf(det): results = location, covariance, det, support, dist # Check convergence if np.allclose(det, previous_det): # c_step procedure converged if verbose: print("Optimal couple (location, covariance) found before" " ending iterations (%d left)" % (remaining_iterations)) results = location, covariance, det, support, dist elif det > previous_det: # determinant has increased (should not happen) warnings.warn("Determinant has increased; this should not happen: " "log(det) > log(previous_det) (%.15f > %.15f). " "You may want to try with a higher value of " "support_fraction (current value: %.3f)." % (det, previous_det, n_support / n_samples), RuntimeWarning) results = previous_location, previous_covariance, \ previous_det, previous_support, previous_dist # Check early stopping if remaining_iterations == 0: if verbose: print('Maximum number of iterations reached') results = location, covariance, det, support, dist return results def select_candidates(X, n_support, n_trials, select=1, n_iter=30, verbose=False, cov_computation_method=empirical_covariance, random_state=None): """Finds the best pure subset of observations to compute MCD from it. The purpose of this function is to find the best sets of n_support observations with respect to a minimization of their covariance matrix determinant. Equivalently, it removes n_samples-n_support observations to construct what we call a pure data set (i.e. not containing outliers). The list of the observations of the pure data set is referred to as the `support`. Starting from a random support, the pure data set is found by the c_step procedure introduced by Rousseeuw and Van Driessen in [RV]_. Parameters ---------- X : array-like of shape (n_samples, n_features) Data (sub)set in which we look for the n_support purest observations. n_support : int The number of samples the pure data set must contain. This parameter must be in the range `[(n + p + 1)/2] < n_support < n`. n_trials : int or tuple of shape (2,) Number of different initial sets of observations from which to run the algorithm. This parameter should be a strictly positive integer. Instead of giving a number of trials to perform, one can provide a list of initial estimates that will be used to iteratively run c_step procedures. In this case: - n_trials[0]: array-like, shape (n_trials, n_features) is the list of `n_trials` initial location estimates - n_trials[1]: array-like, shape (n_trials, n_features, n_features) is the list of `n_trials` initial covariances estimates select : int, default=1 Number of best candidates results to return. This parameter must be a strictly positive integer. n_iter : int, default=30 Maximum number of iterations for the c_step procedure. (2 is enough to be close to the final solution. "Never" exceeds 20). This parameter must be a strictly positive integer. verbose : bool, default=False Control the output verbosity. cov_computation_method : callable, \ default=:func:`sklearn.covariance.empirical_covariance` The function which will be used to compute the covariance. Must return an array of shape (n_features, n_features). random_state : int, RandomState instance or None, default=None Determines the pseudo random number generator for shuffling the data. Pass an int for reproducible results across multiple function calls. See :term: `Glossary <random_state>`. See Also --------- c_step Returns ------- best_locations : ndarray of shape (select, n_features) The `select` location estimates computed from the `select` best supports found in the data set (`X`). best_covariances : ndarray of shape (select, n_features, n_features) The `select` covariance estimates computed from the `select` best supports found in the data set (`X`). best_supports : ndarray of shape (select, n_samples) The `select` best supports found in the data set (`X`). References ---------- .. [RV] A Fast Algorithm for the Minimum Covariance Determinant Estimator, 1999, American Statistical Association and the American Society for Quality, TECHNOMETRICS """ random_state = check_random_state(random_state) if isinstance(n_trials, numbers.Integral): run_from_estimates = False elif isinstance(n_trials, tuple): run_from_estimates = True estimates_list = n_trials n_trials = estimates_list[0].shape[0] else: raise TypeError("Invalid 'n_trials' parameter, expected tuple or " " integer, got %s (%s)" % (n_trials, type(n_trials))) # compute `n_trials` location and shape estimates candidates in the subset all_estimates = [] if not run_from_estimates: # perform `n_trials` computations from random initial supports for j in range(n_trials): all_estimates.append( _c_step( X, n_support, remaining_iterations=n_iter, verbose=verbose, cov_computation_method=cov_computation_method, random_state=random_state)) else: # perform computations from every given initial estimates for j in range(n_trials): initial_estimates = (estimates_list[0][j], estimates_list[1][j]) all_estimates.append(_c_step( X, n_support, remaining_iterations=n_iter, initial_estimates=initial_estimates, verbose=verbose, cov_computation_method=cov_computation_method, random_state=random_state)) all_locs_sub, all_covs_sub, all_dets_sub, all_supports_sub, all_ds_sub = \ zip(*all_estimates) # find the `n_best` best results among the `n_trials` ones index_best = np.argsort(all_dets_sub)[:select] best_locations = np.asarray(all_locs_sub)[index_best] best_covariances = np.asarray(all_covs_sub)[index_best] best_supports = np.asarray(all_supports_sub)[index_best] best_ds = np.asarray(all_ds_sub)[index_best] return best_locations, best_covariances, best_supports, best_ds def fast_mcd(X, support_fraction=None, cov_computation_method=empirical_covariance, random_state=None): """Estimates the Minimum Covariance Determinant matrix. Read more in the :ref:`User Guide <robust_covariance>`. Parameters ---------- X : array-like of shape (n_samples, n_features) The data matrix, with p features and n samples. support_fraction : float, default=None The proportion of points to be included in the support of the raw MCD estimate. Default is `None`, which implies that the minimum value of `support_fraction` will be used within the algorithm: `(n_sample + n_features + 1) / 2`. This parameter must be in the range (0, 1). cov_computation_method : callable, \ default=:func:`sklearn.covariance.empirical_covariance` The function which will be used to compute the covariance. Must return an array of shape (n_features, n_features). random_state : int, RandomState instance or None, default=None Determines the pseudo random number generator for shuffling the data. Pass an int for reproducible results across multiple function calls. See :term: `Glossary <random_state>`. Returns ------- location : ndarray of shape (n_features,) Robust location of the data. covariance : ndarray of shape (n_features, n_features) Robust covariance of the features. support : ndarray of shape (n_samples,), dtype=bool A mask of the observations that have been used to compute the robust location and covariance estimates of the data set. Notes ----- The FastMCD algorithm has been introduced by Rousseuw and Van Driessen in "A Fast Algorithm for the Minimum Covariance Determinant Estimator, 1999, American Statistical Association and the American Society for Quality, TECHNOMETRICS". The principle is to compute robust estimates and random subsets before pooling them into a larger subsets, and finally into the full data set. Depending on the size of the initial sample, we have one, two or three such computation levels. Note that only raw estimates are returned. If one is interested in the correction and reweighting steps described in [RouseeuwVan]_, see the MinCovDet object. References ---------- .. [RouseeuwVan] A Fast Algorithm for the Minimum Covariance Determinant Estimator, 1999, American Statistical Association and the American Society for Quality, TECHNOMETRICS .. [Butler1993] R. W. Butler, P. L. Davies and M. Jhun, Asymptotics For The Minimum Covariance Determinant Estimator, The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400 """ random_state = check_random_state(random_state) X = check_array(X, ensure_min_samples=2, estimator='fast_mcd') n_samples, n_features = X.shape # minimum breakdown value if support_fraction is None: n_support = int(np.ceil(0.5 * (n_samples + n_features + 1))) else: n_support = int(support_fraction * n_samples) # 1-dimensional case quick computation # (Rousseeuw, P. J. and Leroy, A. M. (2005) References, in Robust # Regression and Outlier Detection, John Wiley & Sons, chapter 4) if n_features == 1: if n_support < n_samples: # find the sample shortest halves X_sorted = np.sort(np.ravel(X)) diff = X_sorted[n_support:] - X_sorted[:(n_samples - n_support)] halves_start = np.where(diff == np.min(diff))[0] # take the middle points' mean to get the robust location estimate location = 0.5 * (X_sorted[n_support + halves_start] + X_sorted[halves_start]).mean() support = np.zeros(n_samples, dtype=bool) X_centered = X - location support[np.argsort(np.abs(X_centered), 0)[:n_support]] = True covariance = np.asarray([[np.var(X[support])]]) location = np.array([location]) # get precision matrix in an optimized way precision = linalg.pinvh(covariance) dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1) else: support = np.ones(n_samples, dtype=bool) covariance = np.asarray([[np.var(X)]]) location = np.asarray([np.mean(X)]) X_centered = X - location # get precision matrix in an optimized way precision = linalg.pinvh(covariance) dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1) # Starting FastMCD algorithm for p-dimensional case if (n_samples > 500) and (n_features > 1): # 1. Find candidate supports on subsets # a. split the set in subsets of size ~ 300 n_subsets = n_samples // 300 n_samples_subsets = n_samples // n_subsets samples_shuffle = random_state.permutation(n_samples) h_subset = int(np.ceil(n_samples_subsets * (n_support / float(n_samples)))) # b. perform a total of 500 trials n_trials_tot = 500 # c. select 10 best (location, covariance) for each subset n_best_sub = 10 n_trials = max(10, n_trials_tot // n_subsets) n_best_tot = n_subsets * n_best_sub all_best_locations = np.zeros((n_best_tot, n_features)) try: all_best_covariances = np.zeros((n_best_tot, n_features, n_features)) except MemoryError: # The above is too big. Let's try with something much small # (and less optimal) n_best_tot = 10 all_best_covariances = np.zeros((n_best_tot, n_features, n_features)) n_best_sub = 2 for i in range(n_subsets): low_bound = i * n_samples_subsets high_bound = low_bound + n_samples_subsets current_subset = X[samples_shuffle[low_bound:high_bound]] best_locations_sub, best_covariances_sub, _, _ = select_candidates( current_subset, h_subset, n_trials, select=n_best_sub, n_iter=2, cov_computation_method=cov_computation_method, random_state=random_state) subset_slice = np.arange(i * n_best_sub, (i + 1) * n_best_sub) all_best_locations[subset_slice] = best_locations_sub all_best_covariances[subset_slice] = best_covariances_sub # 2. Pool the candidate supports into a merged set # (possibly the full dataset) n_samples_merged = min(1500, n_samples) h_merged = int(np.ceil(n_samples_merged * (n_support / float(n_samples)))) if n_samples > 1500: n_best_merged = 10 else: n_best_merged = 1 # find the best couples (location, covariance) on the merged set selection = random_state.permutation(n_samples)[:n_samples_merged] locations_merged, covariances_merged, supports_merged, d = \ select_candidates( X[selection], h_merged, n_trials=(all_best_locations, all_best_covariances), select=n_best_merged, cov_computation_method=cov_computation_method, random_state=random_state) # 3. Finally get the overall best (locations, covariance) couple if n_samples < 1500: # directly get the best couple (location, covariance) location = locations_merged[0] covariance = covariances_merged[0] support = np.zeros(n_samples, dtype=bool) dist = np.zeros(n_samples) support[selection] = supports_merged[0] dist[selection] = d[0] else: # select the best couple on the full dataset locations_full, covariances_full, supports_full, d = \ select_candidates( X, n_support, n_trials=(locations_merged, covariances_merged), select=1, cov_computation_method=cov_computation_method, random_state=random_state) location = locations_full[0] covariance = covariances_full[0] support = supports_full[0] dist = d[0] elif n_features > 1: # 1. Find the 10 best couples (location, covariance) # considering two iterations n_trials = 30 n_best = 10 locations_best, covariances_best, _, _ = select_candidates( X, n_support, n_trials=n_trials, select=n_best, n_iter=2, cov_computation_method=cov_computation_method, random_state=random_state) # 2. Select the best couple on the full dataset amongst the 10 locations_full, covariances_full, supports_full, d = select_candidates( X, n_support, n_trials=(locations_best, covariances_best), select=1, cov_computation_method=cov_computation_method, random_state=random_state) location = locations_full[0] covariance = covariances_full[0] support = supports_full[0] dist = d[0] return location, covariance, support, dist class MinCovDet(EmpiricalCovariance): """Minimum Covariance Determinant (MCD): robust estimator of covariance. The Minimum Covariance Determinant covariance estimator is to be applied on Gaussian-distributed data, but could still be relevant on data drawn from a unimodal, symmetric distribution. It is not meant to be used with multi-modal data (the algorithm used to fit a MinCovDet object is likely to fail in such a case). One should consider projection pursuit methods to deal with multi-modal datasets. Read more in the :ref:`User Guide <robust_covariance>`. Parameters ---------- store_precision : bool, default=True Specify if the estimated precision is stored. assume_centered : bool, default=False If True, the support of the robust location and the covariance estimates is computed, and a covariance estimate is recomputed from it, without centering the data. Useful to work with data whose mean is significantly equal to zero but is not exactly zero. If False, the robust location and covariance are directly computed with the FastMCD algorithm without additional treatment. support_fraction : float, default=None The proportion of points to be included in the support of the raw MCD estimate. Default is None, which implies that the minimum value of support_fraction will be used within the algorithm: `(n_sample + n_features + 1) / 2`. The parameter must be in the range (0, 1). random_state : int, RandomState instance or None, default=None Determines the pseudo random number generator for shuffling the data. Pass an int for reproducible results across multiple function calls. See :term: `Glossary <random_state>`. Attributes ---------- raw_location_ : ndarray of shape (n_features,) The raw robust estimated location before correction and re-weighting. raw_covariance_ : ndarray of shape (n_features, n_features) The raw robust estimated covariance before correction and re-weighting. raw_support_ : ndarray of shape (n_samples,) A mask of the observations that have been used to compute the raw robust estimates of location and shape, before correction and re-weighting. location_ : ndarray of shape (n_features,) Estimated robust location. covariance_ : ndarray of shape (n_features, n_features) Estimated robust covariance matrix. precision_ : ndarray of shape (n_features, n_features) Estimated pseudo inverse matrix. (stored only if store_precision is True) support_ : ndarray of shape (n_samples,) A mask of the observations that have been used to compute the robust estimates of location and shape. dist_ : ndarray of shape (n_samples,) Mahalanobis distances of the training set (on which :meth:`fit` is called) observations. Examples -------- >>> import numpy as np >>> from sklearn.covariance import MinCovDet >>> from sklearn.datasets import make_gaussian_quantiles >>> real_cov = np.array([[.8, .3], ... [.3, .4]]) >>> rng = np.random.RandomState(0) >>> X = rng.multivariate_normal(mean=[0, 0], ... cov=real_cov, ... size=500) >>> cov = MinCovDet(random_state=0).fit(X) >>> cov.covariance_ array([[0.7411..., 0.2535...], [0.2535..., 0.3053...]]) >>> cov.location_ array([0.0813... , 0.0427...]) References ---------- .. [Rouseeuw1984] P. J. Rousseeuw. Least median of squares regression. J. Am Stat Ass, 79:871, 1984. .. [Rousseeuw] A Fast Algorithm for the Minimum Covariance Determinant Estimator, 1999, American Statistical Association and the American Society for Quality, TECHNOMETRICS .. [ButlerDavies] R. W. Butler, P. L. Davies and M. Jhun, Asymptotics For The Minimum Covariance Determinant Estimator, The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400 """ _nonrobust_covariance = staticmethod(empirical_covariance) @_deprecate_positional_args def __init__(self, *, store_precision=True, assume_centered=False, support_fraction=None, random_state=None): self.store_precision = store_precision self.assume_centered = assume_centered self.support_fraction = support_fraction self.random_state = random_state def fit(self, X, y=None): """Fits a Minimum Covariance Determinant with the FastMCD algorithm. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data, where `n_samples` is the number of samples and `n_features` is the number of features. y: Ignored Not used, present for API consistence purpose. Returns ------- self : object """ X = self._validate_data(X, ensure_min_samples=2, estimator='MinCovDet') random_state = check_random_state(self.random_state) n_samples, n_features = X.shape # check that the empirical covariance is full rank if (linalg.svdvals(np.dot(X.T, X)) > 1e-8).sum() != n_features: warnings.warn("The covariance matrix associated to your dataset " "is not full rank") # compute and store raw estimates raw_location, raw_covariance, raw_support, raw_dist = fast_mcd( X, support_fraction=self.support_fraction, cov_computation_method=self._nonrobust_covariance, random_state=random_state) if self.assume_centered: raw_location = np.zeros(n_features) raw_covariance = self._nonrobust_covariance(X[raw_support], assume_centered=True) # get precision matrix in an optimized way precision = linalg.pinvh(raw_covariance) raw_dist = np.sum(np.dot(X, precision) * X, 1) self.raw_location_ = raw_location self.raw_covariance_ = raw_covariance self.raw_support_ = raw_support self.location_ = raw_location self.support_ = raw_support self.dist_ = raw_dist # obtain consistency at normal models self.correct_covariance(X) # re-weight estimator self.reweight_covariance(X) return self def correct_covariance(self, data): """Apply a correction to raw Minimum Covariance Determinant estimates. Correction using the empirical correction factor suggested by Rousseeuw and Van Driessen in [RVD]_. Parameters ---------- data : array-like of shape (n_samples, n_features) The data matrix, with p features and n samples. The data set must be the one which was used to compute the raw estimates. Returns ------- covariance_corrected : ndarray of shape (n_features, n_features) Corrected robust covariance estimate. References ---------- .. [RVD] A Fast Algorithm for the Minimum Covariance Determinant Estimator, 1999, American Statistical Association and the American Society for Quality, TECHNOMETRICS """ # Check that the covariance of the support data is not equal to 0. # Otherwise self.dist_ = 0 and thus correction = 0. n_samples = len(self.dist_) n_support = np.sum(self.support_) if n_support < n_samples and np.allclose(self.raw_covariance_, 0): raise ValueError('The covariance matrix of the support data ' 'is equal to 0, try to increase support_fraction') correction = np.median(self.dist_) / chi2(data.shape[1]).isf(0.5) covariance_corrected = self.raw_covariance_ * correction self.dist_ /= correction return covariance_corrected def reweight_covariance(self, data): """Re-weight raw Minimum Covariance Determinant estimates. Re-weight observations using Rousseeuw's method (equivalent to deleting outlying observations from the data set before computing location and covariance estimates) described in [RVDriessen]_. Parameters ---------- data : array-like of shape (n_samples, n_features) The data matrix, with p features and n samples. The data set must be the one which was used to compute the raw estimates. Returns ------- location_reweighted : ndarray of shape (n_features,) Re-weighted robust location estimate. covariance_reweighted : ndarray of shape (n_features, n_features) Re-weighted robust covariance estimate. support_reweighted : ndarray of shape (n_samples,), dtype=bool A mask of the observations that have been used to compute the re-weighted robust location and covariance estimates. References ---------- .. [RVDriessen] A Fast Algorithm for the Minimum Covariance Determinant Estimator, 1999, American Statistical Association and the American Society for Quality, TECHNOMETRICS """ n_samples, n_features = data.shape mask = self.dist_ < chi2(n_features).isf(0.025) if self.assume_centered: location_reweighted = np.zeros(n_features) else: location_reweighted = data[mask].mean(0) covariance_reweighted = self._nonrobust_covariance( data[mask], assume_centered=self.assume_centered) support_reweighted = np.zeros(n_samples, dtype=bool) support_reweighted[mask] = True self._set_covariance(covariance_reweighted) self.location_ = location_reweighted self.support_ = support_reweighted X_centered = data - self.location_ self.dist_ = np.sum( np.dot(X_centered, self.get_precision()) * X_centered, 1) return location_reweighted, covariance_reweighted, support_reweighted
bsd-3-clause
nvoron23/statsmodels
statsmodels/sandbox/nonparametric/tests/ex_gam_new.py
34
3845
# -*- coding: utf-8 -*- """Example for GAM with Poisson Model and PolynomialSmoother This example was written as a test case. The data generating process is chosen so the parameters are well identified and estimated. Created on Fri Nov 04 13:45:43 2011 Author: Josef Perktold """ from __future__ import print_function from statsmodels.compat.python import lrange, zip import time import numpy as np #import matplotlib.pyplot as plt np.seterr(all='raise') from scipy import stats from statsmodels.sandbox.gam import AdditiveModel from statsmodels.sandbox.gam import Model as GAM #? from statsmodels.genmod.families import family from statsmodels.genmod.generalized_linear_model import GLM np.random.seed(8765993) #seed is chosen for nice result, not randomly #other seeds are pretty off in the prediction or end in overflow #DGP: simple polynomial order = 3 sigma_noise = 0.1 nobs = 1000 #lb, ub = -0.75, 3#1.5#0.75 #2.5 lb, ub = -3.5, 3 x1 = np.linspace(lb, ub, nobs) x2 = np.sin(2*x1) x = np.column_stack((x1/x1.max()*1, 1.*x2)) exog = (x[:,:,None]**np.arange(order+1)[None, None, :]).reshape(nobs, -1) idx = lrange((order+1)*2) del idx[order+1] exog_reduced = exog[:,idx] #remove duplicate constant y_true = exog.sum(1) #/ 4. z = y_true #alias check d = x y = y_true + sigma_noise * np.random.randn(nobs) example = 3 if example == 2: print("binomial") f = family.Binomial() mu_true = f.link.inverse(z) #b = np.asarray([scipy.stats.bernoulli.rvs(p) for p in f.link.inverse(y)]) b = np.asarray([stats.bernoulli.rvs(p) for p in f.link.inverse(z)]) b.shape = y.shape m = GAM(b, d, family=f) toc = time.time() m.fit(b) tic = time.time() print(tic-toc) #for plotting yp = f.link.inverse(y) p = b if example == 3: print("Poisson") f = family.Poisson() #y = y/y.max() * 3 yp = f.link.inverse(z) #p = np.asarray([scipy.stats.poisson.rvs(p) for p in f.link.inverse(y)], float) p = np.asarray([stats.poisson.rvs(p) for p in f.link.inverse(z)], float) p.shape = y.shape m = GAM(p, d, family=f) toc = time.time() m.fit(p) tic = time.time() print(tic-toc) for ss in m.smoothers: print(ss.params) if example > 1: import matplotlib.pyplot as plt plt.figure() for i in np.array(m.history[2:15:3]): plt.plot(i.T) plt.figure() plt.plot(exog) #plt.plot(p, '.', lw=2) plt.plot(y_true, lw=2) y_pred = m.results.mu # + m.results.alpha #m.results.predict(d) plt.figure() plt.subplot(2,2,1) plt.plot(p, '.') plt.plot(yp, 'b-', label='true') plt.plot(y_pred, 'r-', label='GAM') plt.legend(loc='upper left') plt.title('gam.GAM Poisson') counter = 2 for ii, xx in zip(['z', 'x1', 'x2'], [z, x[:,0], x[:,1]]): sortidx = np.argsort(xx) #plt.figure() plt.subplot(2, 2, counter) plt.plot(xx[sortidx], p[sortidx], 'k.', alpha=0.5) plt.plot(xx[sortidx], yp[sortidx], 'b.', label='true') plt.plot(xx[sortidx], y_pred[sortidx], 'r.', label='GAM') plt.legend(loc='upper left') plt.title('gam.GAM Poisson ' + ii) counter += 1 res = GLM(p, exog_reduced, family=f).fit() #plot component, compared to true component x1 = x[:,0] x2 = x[:,1] f1 = exog[:,:order+1].sum(1) - 1 #take out constant f2 = exog[:,order+1:].sum(1) - 1 plt.figure() #Note: need to correct for constant which is indeterminatedly distributed #plt.plot(x1, m.smoothers[0](x1)-m.smoothers[0].params[0]+1, 'r') #better would be subtract f(0) m.smoothers[0](np.array([0])) plt.plot(x1, f1, linewidth=2) plt.plot(x1, m.smoothers[0](x1)-m.smoothers[0].params[0], 'r') plt.figure() plt.plot(x2, f2, linewidth=2) plt.plot(x2, m.smoothers[1](x2)-m.smoothers[1].params[0], 'r') plt.show()
bsd-3-clause
acmaheri/sms-tools
software/models_interface/dftModel_function.py
21
2413
# function to call the main analysis/synthesis functions in software/models/dftModel.py import numpy as np import matplotlib.pyplot as plt from scipy.signal import get_window import os, sys sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../models/')) import utilFunctions as UF import dftModel as DFT def main(inputFile = '../../sounds/piano.wav', window = 'blackman', M = 511, N = 1024, time = .2): """ inputFile: input sound file (monophonic with sampling rate of 44100) window: analysis window type (choice of rectangular, hanning, hamming, blackman, blackmanharris) M: analysis window size (odd integer value) N: fft size (power of two, bigger or equal than than M) time: time to start analysis (in seconds) """ # read input sound (monophonic with sampling rate of 44100) fs, x = UF.wavread(inputFile) # compute analysis window w = get_window(window, M) # get a fragment of the input sound of size M sample = int(time*fs) if (sample+M >= x.size or sample < 0): # raise error if time outside of sound raise ValueError("Time outside sound boundaries") x1 = x[sample:sample+M] # compute the dft of the sound fragment mX, pX = DFT.dftAnal(x1, w, N) # compute the inverse dft of the spectrum y = DFT.dftSynth(mX, pX, w.size)*sum(w) # create figure plt.figure(figsize=(12, 9)) # plot the sound fragment plt.subplot(4,1,1) plt.plot(time + np.arange(M)/float(fs), x1) plt.axis([time, time + M/float(fs), min(x1), max(x1)]) plt.ylabel('amplitude') plt.xlabel('time (sec)') plt.title('input sound: x') # plot the magnitude spectrum plt.subplot(4,1,2) plt.plot(float(fs)*np.arange(mX.size)/float(N), mX, 'r') plt.axis([0, fs/2.0, min(mX), max(mX)]) plt.title ('magnitude spectrum: mX') plt.ylabel('amplitude (dB)') plt.xlabel('frequency (Hz)') # plot the phase spectrum plt.subplot(4,1,3) plt.plot(float(fs)*np.arange(pX.size)/float(N), pX, 'c') plt.axis([0, fs/2.0, min(pX), max(pX)]) plt.title ('phase spectrum: pX') plt.ylabel('phase (radians)') plt.xlabel('frequency (Hz)') # plot the sound resulting from the inverse dft plt.subplot(4,1,4) plt.plot(time + np.arange(M)/float(fs), y) plt.axis([time, time + M/float(fs), min(y), max(y)]) plt.ylabel('amplitude') plt.xlabel('time (sec)') plt.title('output sound: y') plt.tight_layout() plt.show() if __name__ == "__main__": main()
agpl-3.0
lukebarnard1/bokeh
bokeh/mplexporter/renderers/vincent_renderer.py
11
1962
from __future__ import absolute_import import warnings from .base import Renderer from ..exporter import Exporter class VincentRenderer(Renderer): def open_figure(self, fig, props): self.chart = None self.figwidth = int(props['figwidth'] * props['dpi']) self.figheight = int(props['figheight'] * props['dpi']) def draw_line(self, data, coordinates, style, label, mplobj=None): import vincent # only import if VincentRenderer is used if coordinates != 'data': warnings.warn("Only data coordinates supported. Skipping this") linedata = {'x': data[:, 0], 'y': data[:, 1]} line = vincent.Line(linedata, iter_idx='x', width=self.figwidth, height=self.figheight) # TODO: respect the other style settings line.scales['color'].range = [style['color']] if self.chart is None: self.chart = line else: warnings.warn("Multiple plot elements not yet supported") def draw_markers(self, data, coordinates, style, label, mplobj=None): import vincent # only import if VincentRenderer is used if coordinates != 'data': warnings.warn("Only data coordinates supported. Skipping this") markerdata = {'x': data[:, 0], 'y': data[:, 1]} markers = vincent.Scatter(markerdata, iter_idx='x', width=self.figwidth, height=self.figheight) # TODO: respect the other style settings markers.scales['color'].range = [style['facecolor']] if self.chart is None: self.chart = markers else: warnings.warn("Multiple plot elements not yet supported") def fig_to_vincent(fig): """Convert a matplotlib figure to a vincent object""" renderer = VincentRenderer() exporter = Exporter(renderer) exporter.run(fig) return renderer.chart
bsd-3-clause
UNR-AERIAL/scikit-learn
examples/cluster/plot_kmeans_assumptions.py
270
2040
""" ==================================== Demonstration of k-means assumptions ==================================== This example is meant to illustrate situations where k-means will produce unintuitive and possibly unexpected clusters. In the first three plots, the input data does not conform to some implicit assumption that k-means makes and undesirable clusters are produced as a result. In the last plot, k-means returns intuitive clusters despite unevenly sized blobs. """ print(__doc__) # Author: Phil Roth <[email protected]> # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn.cluster import KMeans from sklearn.datasets import make_blobs plt.figure(figsize=(12, 12)) n_samples = 1500 random_state = 170 X, y = make_blobs(n_samples=n_samples, random_state=random_state) # Incorrect number of clusters y_pred = KMeans(n_clusters=2, random_state=random_state).fit_predict(X) plt.subplot(221) plt.scatter(X[:, 0], X[:, 1], c=y_pred) plt.title("Incorrect Number of Blobs") # Anisotropicly distributed data transformation = [[ 0.60834549, -0.63667341], [-0.40887718, 0.85253229]] X_aniso = np.dot(X, transformation) y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_aniso) plt.subplot(222) plt.scatter(X_aniso[:, 0], X_aniso[:, 1], c=y_pred) plt.title("Anisotropicly Distributed Blobs") # Different variance X_varied, y_varied = make_blobs(n_samples=n_samples, cluster_std=[1.0, 2.5, 0.5], random_state=random_state) y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_varied) plt.subplot(223) plt.scatter(X_varied[:, 0], X_varied[:, 1], c=y_pred) plt.title("Unequal Variance") # Unevenly sized blobs X_filtered = np.vstack((X[y == 0][:500], X[y == 1][:100], X[y == 2][:10])) y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_filtered) plt.subplot(224) plt.scatter(X_filtered[:, 0], X_filtered[:, 1], c=y_pred) plt.title("Unevenly Sized Blobs") plt.show()
bsd-3-clause
roofit-dev/parallel-roofit-scripts
profiling/vincemark/analyze_f.py
1
12734
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # @Author: Patrick Bos # @Date: 2016-11-16 16:23:55 # @Last Modified by: E. G. Patrick Bos # @Last Modified time: 2017-07-06 11:21:01 import numpy as np import matplotlib.pyplot as plt import pandas as pd import seaborn as sns from pathlib import Path import itertools import load_timing pd.set_option("display.width", None) def savefig(factorplot, fp): try: g.savefig(fp) print("saved figure using pathlib.Path, apparently mpl is now pep 519 compatible! https://github.com/matplotlib/matplotlib/pull/8481") except TypeError: g.savefig(fp.__str__()) """ cd ~/projects/apcocsm/code/profiling/vincemark && rsync --progress --include='*/' --include='*/*/' --include='timing*.json' --exclude='*' -zavr nikhef:project_atlas/apcocsm_code/profiling/vincemark/vincemark_f ./ && cd - """ basepath = Path.home() / 'projects/apcocsm/code/profiling/vincemark/vincemark_f' savefig_dn = basepath / 'analysis' savefig_dn.mkdir(parents=True, exist_ok=True) #### LOAD DATA FROM FILES fpgloblist = [basepath.glob('%i.allier.nikhef.nl/*.json' % i) for i in range(18596455, 18596592)] # for i in itertools.chain(range(18445438, 18445581), # range(18366732, 18367027))] drop_meta = ['parallel_interleave', 'seed', 'print_level', 'timing_flag', 'optConst', 'workspace_filepath', 'time_num_ints'] skip_on_match = ['timing_RRMPFE_serverloop_p*.json', # skip timing_flag 8 output (contains no data) ] if Path('df_numints.hdf').exists(): skip_on_match.append('timings_numInts.json') dfs_sp, dfs_mp_sl, dfs_mp_ma = load_timing.load_dfs_coresplit(fpgloblist, skip_on_match=skip_on_match, drop_meta=drop_meta) # #### TOTAL TIMINGS (flag 1) df_totals_real = pd.concat([dfs_sp['full_minimize'], dfs_mp_ma['full_minimize']]) # ### ADD IDEAL TIMING BASED ON SINGLE CORE RUNS df_totals_ideal = load_timing.estimate_ideal_timing(df_totals_real, groupby=['N_events', 'segment', 'N_chans', 'N_nuisance_parameters', 'N_bins'], time_col='walltime_s') df_totals = load_timing.combine_ideal_and_real(df_totals_real, df_totals_ideal) # remove summed timings, they show nothing new df_totals = df_totals[df_totals.segment != 'migrad+hesse+minos'] # # add combination of two categories # df_totals['timeNIs/Nevents'] = df_totals.time_num_ints.astype(str) + '/' + df_totals.N_events.astype(str) # df_totals['timeNIs/Nbins'] = df_totals.time_num_ints.astype(str) + '/' + df_totals.N_bins.astype(str) # df_totals['timeNIs/Nnps'] = df_totals.time_num_ints.astype(str) + '/' + df_totals.N_nuisance_parameters.astype(str) # df_totals['timeNIs/Nchans'] = df_totals.time_num_ints.astype(str) + '/' + df_totals.N_chans.astype(str) #### ANALYSIS # full timings # g = sns.factorplot(x='num_cpu', y='walltime_s', col='N_bins', hue='timing_type', row='segment', estimator=np.min, data=df_totals, legend_out=False, sharey='row') # plt.subplots_adjust(top=0.93) # g.fig.suptitle(f'total wallclock timing of migrad, hesse and minos') # savefig(g, savefig_dn / f'total_timing.png') plot_stuff = input("press ENTER to plot stuff, type n and press ENTER to not plot stuff. ") if plot_stuff != "n": g = sns.factorplot(x='N_bins', y='walltime_s', col='num_cpu', hue='timing_type', row='segment', estimator=np.min, data=df_totals, legend_out=False, sharey='row', order=range(1, 1001)) plt.subplots_adjust(top=0.93) g.fig.suptitle(f'total wallclock timing of migrad, hesse and minos') savefig(g, savefig_dn / f'total_timing_vs_bins.png') g = sns.factorplot(x='N_chans', y='walltime_s', col='num_cpu', hue='timing_type', row='segment', estimator=np.min, data=df_totals, legend_out=False, sharey='row') plt.subplots_adjust(top=0.93) g.fig.suptitle(f'total wallclock timing of migrad, hesse and minos') savefig(g, savefig_dn / f'total_timing_vs_chans.png') # Use the 1 channel 100 bins 1 nps runs as a special case, since these should scale linearly (i.e. no costs, no benefits) subset = df_totals[(df_totals.N_chans == 1) & (df_totals.N_bins == 100) & (df_totals.N_nuisance_parameters == 1)] g = sns.factorplot(x='num_cpu', y='walltime_s', hue='timing_type', row='segment', data=subset, legend_out=False) plt.subplots_adjust(top=0.93) g.fig.suptitle(f'total wallclock timing for only the 1 channel 100 bins 1 nps runs') savefig(g, savefig_dn / f'total_timing_vs_1chan100bins1nps.png') # make a plot per unique combination of parameters (looping is too complicated, since the combination space is sparse) # # https://stackoverflow.com/a/35268906/1199693 # # for name, group in df_totals.groupby([]): # for chans in df_totals.N_chans.unique(): # for events in df_totals.N_events.unique(): # for nps in df_totals.N_nuisance_parameters.unique(): # data = df_totals[(df_totals.N_chans == chans) & (df_totals.N_events == events) & (df_totals.N_nuisance_parameters == nps)] # if len(data) > 0: # g = sns.factorplot(x='num_cpu', y='walltime_s', col='N_bins', hue='timing_type', row='segment', estimator=np.min, data=data, legend_out=False, sharey='row') # plt.subplots_adjust(top=0.93) # g.fig.suptitle(f'total wallclock timing of migrad, hesse and minos --- N_channels = {chans}, N_events = {events}, N_nps = {nps}') # savefig(g, savefig_dn / f'total_timing_chan{chans}_event{events}_np{nps}.png') print("Something is not going right with the numerical integral added iteration columns... are they structured the way I thought at all?") raise SystemExit #### NUMERICAL INTEGRAL TIMINGS if not Path('df_numints.hdf').exists(): df_numints = dfs_mp_sl['numInts'] df_numints.to_hdf('df_numints.hdf', 'vincemark_a_numint_timings') else: print("loading numerical integral timings from HDF file...") df_numints = pd.read_hdf('df_numints.hdf', 'vincemark_a_numint_timings') print("...done") load_timing.add_iteration_column(df_numints) df_numints_min_by_iteration = df_numints.groupby('iteration').min() df_numints_max_by_iteration = df_numints.groupby('iteration').max() """ #### RooRealMPFE TIMINGS ### MPFE evaluate @ client (single core) (flags 5 and 6) mpfe_eval = pd.concat([v for k, v in dfs_mp_ma.items() if 'wall_RRMPFE_evaluate_client' in k] + [v for k, v in dfs_mp_ma.items() if 'cpu_RRMPFE_evaluate_client' in k]) ### add MPFE evaluate full timings (flag 4) mpfe_eval_full = pd.concat([v for k, v in dfs_mp_ma.items() if 'RRMPFE_evaluate_full' in k]) mpfe_eval_full.rename(columns={'RRMPFE_evaluate_wall_s': 'time s'}, inplace=True) mpfe_eval_full['cpu/wall'] = 'wall+INLINE' mpfe_eval_full['segment'] = 'all' mpfe_eval = mpfe_eval.append(mpfe_eval_full) ### total time per run (== per pid, but the other columns are also grouped-by to prevent from summing over them) mpfe_eval_total = mpfe_eval.groupby(['pid', 'N_events', 'num_cpu', 'cpu/wall', 'segment', 'force_num_int'], as_index=False).sum() #### ADD mpfe_eval COLUMN OF CPU_ID, ***PROBABLY***, WHICH SEEMS TO EXPLAIN DIFFERENT TIMINGS QUITE WELL mpfe_eval_cpu_split = pd.DataFrame(columns=mpfe_eval.columns) for num_cpu in range(2, 9): mpfe_eval_num_cpu = mpfe_eval[(mpfe_eval.segment == 'all') * (mpfe_eval.num_cpu == num_cpu)] mpfe_eval_num_cpu['cpu_id'] = None for cpu_id in range(num_cpu): mpfe_eval_num_cpu.iloc[cpu_id::num_cpu, mpfe_eval_num_cpu.columns.get_loc('cpu_id')] = cpu_id mpfe_eval_cpu_split = mpfe_eval_cpu_split.append(mpfe_eval_num_cpu) mpfe_eval_cpu_split_total = mpfe_eval_cpu_split.groupby(['pid', 'N_events', 'num_cpu', 'cpu/wall', 'segment', 'cpu_id', 'force_num_int'], as_index=False).sum() ### MPFE calculate mpfe_calc = pd.concat([v for k, v in dfs_mp_ma.items() if 'RRMPFE_calculate_initialize' in k]) mpfe_calc.rename(columns={'RRMPFE_calculate_initialize_wall_s': 'walltime s'}, inplace=True) mpfe_calc_total = mpfe_calc.groupby(['pid', 'N_events', 'num_cpu', 'force_num_int'], as_index=False).sum() #### RooAbsTestStatistic TIMINGS ### RATS evaluate full (flag 2) rats_eval_sp = dfs_sp['RATS_evaluate_full'].dropna() rats_eval_ma = dfs_mp_ma['RATS_evaluate_full'].dropna() # rats_eval_sl is not really a multi-process result, it is just the single process runs (the ppid output in RooFit is now set to -1 if it is not really a slave, for later runs) # rats_eval_sl = dfs_mp_sl['RATS_evaluate_full'].dropna() rats_eval = pd.concat([rats_eval_sp, rats_eval_ma]) rats_eval_total = rats_eval.groupby(['pid', 'N_events', 'num_cpu', 'mode', 'force_num_int'], as_index=False).sum() ### RATS evaluate per CPU iteration (multi-process only) (flag 3) rats_eval_itcpu = rats_eval_itcpu_ma = dfs_mp_ma['RATS_evaluate_mpmaster_perCPU'].copy() rats_eval_itcpu.rename(columns={'RATS_evaluate_mpmaster_it_wall_s': 'walltime s'}, inplace=True) # rats_eval_itcpu is counted in the master process, the slaves do nothing (the ppid output is now removed from RooFit, for later runs) # rats_eval_itcpu_sl = dfs_mp_sl['RATS_evaluate_mpmaster_perCPU'] rats_eval_itcpu_total = rats_eval_itcpu.groupby(['pid', 'N_events', 'num_cpu', 'it_nr', 'force_num_int'], as_index=False).sum() """ #### ANALYSIS """ # RATS evaluate full times g = sns.factorplot(x='num_cpu', y='RATS_evaluate_wall_s', col='N_events', hue='mode', row='force_num_int', estimator=np.min, data=rats_eval_total, legend_out=False, sharey=False) plt.subplots_adjust(top=0.85) g.fig.suptitle('total wallclock timing of all calls to RATS::evaluate()') savefig(g, savefig_dn / 'rats_eval.png') # RATS evaluate itX times g = sns.factorplot(x='num_cpu', y='walltime s', hue='it_nr', col='N_events', row='force_num_int', estimator=np.min, data=rats_eval_itcpu_total, legend_out=False, sharey=False) plt.subplots_adjust(top=0.85) g.fig.suptitle('total wallclock timing of the iterations of the main for-loop in RATS::evaluate()') savefig(g, savefig_dn / 'rats_eval_itcpu.png') # MPFE evaluate timings (including "collect" time) for segment in mpfe_eval_total.segment.unique(): g = sns.factorplot(x='num_cpu', y='time s', hue='cpu/wall', col='N_events', row='force_num_int', estimator=np.min, data=mpfe_eval_total[mpfe_eval_total.segment == segment], legend_out=False, sharey=False) plt.subplots_adjust(top=0.95) g.fig.suptitle('total timings of all calls to RRMPFE::evaluate(); "COLLECT"') savefig(g, savefig_dn / f'mpfe_eval_{segment}.png') # ... split by cpu id g = sns.factorplot(x='num_cpu', y='time s', hue='cpu_id', col='N_events', row='force_num_int', estimator=np.min, data=mpfe_eval_cpu_split_total[(mpfe_eval_cpu_split_total['cpu/wall'] == 'wall')], legend_out=False, sharey=False) plt.subplots_adjust(top=0.85) g.fig.suptitle('total wallclock timing of all calls to RRMPFE::evaluate(); only wallclock and only all-segment timings') savefig(g, savefig_dn / f'mpfe_eval_cpu_split.png') # MPFE calculate timings ("dispatch" time) g = sns.factorplot(x='num_cpu', y='walltime s', col='N_events', row='force_num_int', sharey='row', estimator=np.min, data=mpfe_calc_total, legend_out=False) plt.subplots_adjust(top=0.85) g.fig.suptitle('total wallclock timing of all calls to RRMPFE::calculate(); "DISPATCH"') savefig(g, savefig_dn / 'mpfe_calc.png') """ # numerical integrals g = sns.factorplot(x='num_cpu', y='wall_s', col='N_events', sharey='row', row='time_num_ints/segment', estimator=np.min, data=df_numints, legend_out=False) plt.subplots_adjust(top=0.85) g.fig.suptitle('wallclock timing of all timed numerical integrals --- minima of all integrations per plotted factor --- vertical bars: variation in different runs and iterations') savefig(g, savefig_dn / 'numInts_min.png') g = sns.factorplot(x='num_cpu', y='wall_s', col='N_events', sharey='row', row='time_num_ints/segment', estimator=np.max, data=df_numints, legend_out=False) plt.subplots_adjust(top=0.85) g.fig.suptitle('wallclock timing of all timed numerical integrals --- maxima of all integrations per plotted factor --- vertical bars: variation in different runs and iterations') savefig(g, savefig_dn / 'numInts_max.png') g = sns.factorplot(x='num_cpu', y='wall_s', col='N_events', sharey='row', row='time_num_ints/segment', estimator=np.sum, data=df_numints_max_by_iteration, legend_out=False) plt.subplots_adjust(top=0.8) g.fig.suptitle('wallclock timing of all timed numerical integrals --- sum of maximum of each iteration per run $\sum_{\mathrm{it}} \max_{\mathrm{core}}(t_{\mathrm{run,it,core}})$ --- vertical bars: variation in different runs') savefig(g, savefig_dn / 'numInts_it_sum_max.png') plt.show()
apache-2.0
lfairchild/PmagPy
programs/hysteresis_magic.py
2
1746
#!/usr/bin/env python # -*- python-indent-offset: 4; -*- import sys import matplotlib if matplotlib.get_backend() != "TKAgg": matplotlib.use("TKAgg") from pmagpy import ipmag from pmagpy import pmag def main(): """ NAME hysteresis_magic.py DESCRIPTION calculates hystereis parameters and saves them in 3.0 specimen format file makes plots if option selected SYNTAX hysteresis_magic.py [command line options] OPTIONS -h prints help message and quits -f: specify input file, default is agm_measurements.txt -F: specify specimens.txt output file -WD: directory to output files to (default : current directory) Note: if using Windows, all figures will output to current directory -ID: directory to read files from (default : same as -WD) -P: do not make the plots -spc SPEC: specify specimen name to plot and quit -sav save all plots and quit -fmt [png,svg,eps,jpg] """ args = sys.argv fmt = pmag.get_named_arg('-fmt', 'svg') output_dir_path = pmag.get_named_arg('-WD', '.') input_dir_path = pmag.get_named_arg('-ID', "") if "-h" in args: print(main.__doc__) sys.exit() meas_file = pmag.get_named_arg('-f', 'measurements.txt') spec_file = pmag.get_named_arg('-F', 'specimens.txt') make_plots = True save_plots = False if '-P' in args: make_plots = False if '-sav' in args: save_plots = True pltspec = pmag.get_named_arg('-spc', 0) ipmag.hysteresis_magic(output_dir_path, input_dir_path, spec_file, meas_file, fmt, save_plots, make_plots, pltspec) if __name__ == "__main__": main()
bsd-3-clause
khkaminska/bokeh
examples/app/stock_applet/stock_app.py
42
7786
""" This file demonstrates a bokeh applet, which can either be viewed directly on a bokeh-server, or embedded into a flask application. See the README.md file in this directory for instructions on running. """ import logging logging.basicConfig(level=logging.DEBUG) from os import listdir from os.path import dirname, join, splitext import numpy as np import pandas as pd from bokeh.models import ColumnDataSource, Plot from bokeh.plotting import figure, curdoc from bokeh.properties import String, Instance from bokeh.server.app import bokeh_app from bokeh.server.utils.plugins import object_page from bokeh.models.widgets import HBox, VBox, VBoxForm, PreText, Select # build up list of stock data in the daily folder data_dir = join(dirname(__file__), "daily") try: tickers = listdir(data_dir) except OSError as e: print('Stock data not available, see README for download instructions.') raise e tickers = [splitext(x)[0].split("table_")[-1] for x in tickers] # cache stock data as dict of pandas DataFrames pd_cache = {} def get_ticker_data(ticker): fname = join(data_dir, "table_%s.csv" % ticker.lower()) data = pd.read_csv( fname, names=['date', 'foo', 'o', 'h', 'l', 'c', 'v'], header=False, parse_dates=['date'] ) data = data.set_index('date') data = pd.DataFrame({ticker: data.c, ticker + "_returns": data.c.diff()}) return data def get_data(ticker1, ticker2): if pd_cache.get((ticker1, ticker2)) is not None: return pd_cache.get((ticker1, ticker2)) # only append columns if it is the same ticker if ticker1 != ticker2: data1 = get_ticker_data(ticker1) data2 = get_ticker_data(ticker2) data = pd.concat([data1, data2], axis=1) else: data = get_ticker_data(ticker1) data = data.dropna() pd_cache[(ticker1, ticker2)] = data return data class StockApp(VBox): extra_generated_classes = [["StockApp", "StockApp", "VBox"]] jsmodel = "VBox" # text statistics pretext = Instance(PreText) # plots plot = Instance(Plot) line_plot1 = Instance(Plot) line_plot2 = Instance(Plot) hist1 = Instance(Plot) hist2 = Instance(Plot) # data source source = Instance(ColumnDataSource) # layout boxes mainrow = Instance(HBox) histrow = Instance(HBox) statsbox = Instance(VBox) # inputs ticker1 = String(default="AAPL") ticker2 = String(default="GOOG") ticker1_select = Instance(Select) ticker2_select = Instance(Select) input_box = Instance(VBoxForm) def __init__(self, *args, **kwargs): super(StockApp, self).__init__(*args, **kwargs) self._dfs = {} @classmethod def create(cls): """ This function is called once, and is responsible for creating all objects (plots, datasources, etc) """ # create layout widgets obj = cls() obj.mainrow = HBox() obj.histrow = HBox() obj.statsbox = VBox() obj.input_box = VBoxForm() # create input widgets obj.make_inputs() # outputs obj.pretext = PreText(text="", width=500) obj.make_source() obj.make_plots() obj.make_stats() # layout obj.set_children() return obj def make_inputs(self): self.ticker1_select = Select( name='ticker1', value='AAPL', options=['AAPL', 'GOOG', 'INTC', 'BRCM', 'YHOO'] ) self.ticker2_select = Select( name='ticker2', value='GOOG', options=['AAPL', 'GOOG', 'INTC', 'BRCM', 'YHOO'] ) @property def selected_df(self): pandas_df = self.df selected = self.source.selected['1d']['indices'] if selected: pandas_df = pandas_df.iloc[selected, :] return pandas_df def make_source(self): self.source = ColumnDataSource(data=self.df) def line_plot(self, ticker, x_range=None): p = figure( title=ticker, x_range=x_range, x_axis_type='datetime', plot_width=1000, plot_height=200, title_text_font_size="10pt", tools="pan,wheel_zoom,box_select,reset" ) p.circle( 'date', ticker, size=2, source=self.source, nonselection_alpha=0.02 ) return p def hist_plot(self, ticker): global_hist, global_bins = np.histogram(self.df[ticker + "_returns"], bins=50) hist, bins = np.histogram(self.selected_df[ticker + "_returns"], bins=50) width = 0.7 * (bins[1] - bins[0]) center = (bins[:-1] + bins[1:]) / 2 start = global_bins.min() end = global_bins.max() top = hist.max() p = figure( title="%s hist" % ticker, plot_width=500, plot_height=200, tools="", title_text_font_size="10pt", x_range=[start, end], y_range=[0, top], ) p.rect(center, hist / 2.0, width, hist) return p def make_plots(self): ticker1 = self.ticker1 ticker2 = self.ticker2 p = figure( title="%s vs %s" % (ticker1, ticker2), plot_width=400, plot_height=400, tools="pan,wheel_zoom,box_select,reset", title_text_font_size="10pt", ) p.circle(ticker1 + "_returns", ticker2 + "_returns", size=2, nonselection_alpha=0.02, source=self.source ) self.plot = p self.line_plot1 = self.line_plot(ticker1) self.line_plot2 = self.line_plot(ticker2, self.line_plot1.x_range) self.hist_plots() def hist_plots(self): ticker1 = self.ticker1 ticker2 = self.ticker2 self.hist1 = self.hist_plot(ticker1) self.hist2 = self.hist_plot(ticker2) def set_children(self): self.children = [self.mainrow, self.histrow, self.line_plot1, self.line_plot2] self.mainrow.children = [self.input_box, self.plot, self.statsbox] self.input_box.children = [self.ticker1_select, self.ticker2_select] self.histrow.children = [self.hist1, self.hist2] self.statsbox.children = [self.pretext] def input_change(self, obj, attrname, old, new): if obj == self.ticker2_select: self.ticker2 = new if obj == self.ticker1_select: self.ticker1 = new self.make_source() self.make_plots() self.set_children() curdoc().add(self) def setup_events(self): super(StockApp, self).setup_events() if self.source: self.source.on_change('selected', self, 'selection_change') if self.ticker1_select: self.ticker1_select.on_change('value', self, 'input_change') if self.ticker2_select: self.ticker2_select.on_change('value', self, 'input_change') def make_stats(self): stats = self.selected_df.describe() self.pretext.text = str(stats) def selection_change(self, obj, attrname, old, new): self.make_stats() self.hist_plots() self.set_children() curdoc().add(self) @property def df(self): return get_data(self.ticker1, self.ticker2) # The following code adds a "/bokeh/stocks/" url to the bokeh-server. This URL # will render this StockApp. If you don't want serve this applet from a Bokeh # server (for instance if you are embedding in a separate Flask application), # then just remove this block of code. @bokeh_app.route("/bokeh/stocks/") @object_page("stocks") def make_stocks(): app = StockApp.create() return app
bsd-3-clause
timpalpant/KaggleTSTextClassification
scripts/practice/predict.3.py
1
1789
#!/usr/bin/env python ''' Make predictions for the test data 3. Use naive Bayes, on just the boolean features, with a separate classifier for each label. ''' import argparse from common import * from scipy.stats import itemfreq from sklearn.naive_bayes import GaussianNB def prepare_features(data): return data['bfeatures'], data['ffeatures'] def opts(): parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('train', type=load_npz, help='Training features (npz)') parser.add_argument('labels', type=load_npz, help='Training labels (npz)') parser.add_argument('test', type=load_npz, help='Test features (npz)') parser.add_argument('output', help='Output label predictions (npz)') return parser if __name__ == "__main__": args = opts().parse_args() print "Loading and preparing data" X1, X2 = prepare_features(args.train) Y = args.labels['labels'] print "Training classifiers" clf1s = [] for i in xrange(Y.shape[1]): print i clf = naive_bayes.BernoulliNB(fit_prior=True) clf.fit(X1, Y[:,i]) clf1s.append(clf) del X1 clf2s = [] for i in xrange(Y.shape[1]): print i clf = naive_bayes.GaussianNB() clf.fit(X2, Y[:,i]) clf2s.append(clf) del X2, Y print "Predicting" X1, X2 = prepare_features(args.test) p1 = np.vstack([clf.predict_proba(X1)[:,0] for clf in clf1s]) Y1 = 1 - p1.T p2 = np.vstack([clf.predict_proba(X2)[:,0] for clf in clf2s]) Y2 = 1 - p2.T Y = (Y1 + Y2) / 2 Y[:,13] = 0 Y[np.isnan(Y)] = 0 print "Saving predictions" save_npz(args.output, ids=args.test['ids'], header=args.labels['header'], labels=Y)
gpl-3.0
agutieda/QuantEcon.py
examples/wb_download.py
7
1145
""" Origin: QE by John Stachurski and Thomas J. Sargent Filename: wb_download.py Authors: John Stachurski, Tomohito Okabe LastModified: 29/08/2013 Dowloads data from the World Bank site on GDP per capita and plots result for a subset of countries. NOTE: This is not dually compatible with Python 3. Python 2 and Python 3 call the urllib package differently. """ import sys import matplotlib.pyplot as plt from pandas.io.excel import ExcelFile if sys.version_info[0] == 2: from urllib import urlretrieve elif sys.version_info[0] == 3: from urllib.request import urlretrieve # == Get data and read into file gd.xls == # wb_data_file_dir = "http://api.worldbank.org/datafiles/" file_name = "GC.DOD.TOTL.GD.ZS_Indicator_MetaData_en_EXCEL.xls" url = wb_data_file_dir + file_name urlretrieve(url, "gd.xls") # == Parse data into a DataFrame == # gov_debt_xls = ExcelFile('gd.xls') govt_debt = gov_debt_xls.parse('Sheet1', index_col=1, na_values=['NA']) # == Take desired values and plot == # govt_debt = govt_debt.transpose() govt_debt = govt_debt[['AUS', 'DEU', 'FRA', 'USA']] govt_debt = govt_debt[36:] govt_debt.plot(lw=2) plt.show()
bsd-3-clause
Eric89GXL/scikit-learn
sklearn/datasets/species_distributions.py
10
7844
""" ============================= Species distribution dataset ============================= This dataset represents the geographic distribution of species. The dataset is provided by Phillips et. al. (2006). The two species are: - `"Bradypus variegatus" <http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ , the Brown-throated Sloth. - `"Microryzomys minutus" <http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ , also known as the Forest Small Rice Rat, a rodent that lives in Peru, Colombia, Ecuador, Peru, and Venezuela. References: * `"Maximum entropy modeling of species geographic distributions" <http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_ S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling, 190:231-259, 2006. Notes: * See examples/applications/plot_species_distribution_modeling.py for an example of using this dataset """ # Authors: Peter Prettenhofer <[email protected]> # Jake Vanderplas <[email protected]> # # License: BSD 3 clause from io import BytesIO from os import makedirs from os.path import join from os.path import exists try: # Python 2 from urllib2 import urlopen except ImportError: # Python 3 from urllib.request import urlopen import numpy as np from sklearn.datasets.base import get_data_home, Bunch from sklearn.externals import joblib DIRECTORY_URL = "http://www.cs.princeton.edu/~schapire/maxent/datasets/" SAMPLES_URL = join(DIRECTORY_URL, "samples.zip") COVERAGES_URL = join(DIRECTORY_URL, "coverages.zip") DATA_ARCHIVE_NAME = "species_coverage.pkz" def _load_coverage(F, header_length=6, dtype=np.int16): """ load a coverage file. This will return a numpy array of the given dtype """ try: header = [F.readline() for i in range(header_length)] except: F = open(F) header = [F.readline() for i in range(header_length)] make_tuple = lambda t: (t.split()[0], float(t.split()[1])) header = dict([make_tuple(line) for line in header]) M = np.loadtxt(F, dtype=dtype) nodata = header['NODATA_value'] if nodata != -9999: M[nodata] = -9999 return M def _load_csv(F): """Load csv file. Parameters ---------- F : string or file object file object or name of file Returns ------- rec : np.ndarray record array representing the data """ try: names = F.readline().strip().split(',') except: F = open(F) names = F.readline().strip().split(',') rec = np.loadtxt(F, skiprows=1, delimiter=',', dtype='a22,f4,f4') rec.dtype.names = names return rec def construct_grids(batch): """Construct the map grid from the batch object Parameters ---------- batch : Batch object The object returned by :func:`fetch_species_distributions` Returns ------- (xgrid, ygrid) : 1-D arrays The grid corresponding to the values in batch.coverages """ # x,y coordinates for corner cells xmin = batch.x_left_lower_corner + batch.grid_size xmax = xmin + (batch.Nx * batch.grid_size) ymin = batch.y_left_lower_corner + batch.grid_size ymax = ymin + (batch.Ny * batch.grid_size) # x coordinates of the grid cells xgrid = np.arange(xmin, xmax, batch.grid_size) # y coordinates of the grid cells ygrid = np.arange(ymin, ymax, batch.grid_size) return (xgrid, ygrid) def fetch_species_distributions(data_home=None, download_if_missing=True): """Loader for species distribution dataset from Phillips et. al. (2006) Parameters ---------- data_home : optional, default: None Specify another download and cache folder for the datasets. By default all scikit learn data is stored in '~/scikit_learn_data' subfolders. download_if_missing: optional, True by default If False, raise a IOError if the data is not locally available instead of trying to download the data from the source site. Notes ------ This dataset represents the geographic distribution of species. The dataset is provided by Phillips et. al. (2006). The two species are: - `"Bradypus variegatus" <http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ , the Brown-throated Sloth. - `"Microryzomys minutus" <http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ , also known as the Forest Small Rice Rat, a rodent that lives in Peru, Colombia, Ecuador, Peru, and Venezuela. The data is returned as a Bunch object with the following attributes: coverages : array, shape = [14, 1592, 1212] These represent the 14 features measured at each point of the map grid. The latitude/longitude values for the grid are discussed below. Missing data is represented by the value -9999. train : record array, shape = (1623,) The training points for the data. Each point has three fields: - train['species'] is the species name - train['dd long'] is the longitude, in degrees - train['dd lat'] is the latitude, in degrees test : record array, shape = (619,) The test points for the data. Same format as the training data. Nx, Ny : integers The number of longitudes (x) and latitudes (y) in the grid x_left_lower_corner, y_left_lower_corner : floats The (x,y) position of the lower-left corner, in degrees grid_size : float The spacing between points of the grid, in degrees References ---------- * `"Maximum entropy modeling of species geographic distributions" <http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_ S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling, 190:231-259, 2006. Notes ----- * See examples/applications/plot_species_distribution_modeling.py for an example of using this dataset with scikit-learn """ data_home = get_data_home(data_home) if not exists(data_home): makedirs(data_home) # Define parameters for the data files. These should not be changed # unless the data model changes. They will be saved in the npz file # with the downloaded data. extra_params = dict(x_left_lower_corner=-94.8, Nx=1212, y_left_lower_corner=-56.05, Ny=1592, grid_size=0.05) dtype = np.int16 if not exists(join(data_home, DATA_ARCHIVE_NAME)): print('Downloading species data from %s to %s' % (SAMPLES_URL, data_home)) X = np.load(BytesIO(urlopen(SAMPLES_URL).read())) for f in X.files: fhandle = BytesIO(X[f]) if 'train' in f: train = _load_csv(fhandle) if 'test' in f: test = _load_csv(fhandle) print('Downloading coverage data from %s to %s' % (COVERAGES_URL, data_home)) X = np.load(BytesIO(urlopen(COVERAGES_URL).read())) coverages = [] for f in X.files: fhandle = BytesIO(X[f]) print(' - converting', f) coverages.append(_load_coverage(fhandle)) coverages = np.asarray(coverages, dtype=dtype) bunch = Bunch(coverages=coverages, test=test, train=train, **extra_params) joblib.dump(bunch, join(data_home, DATA_ARCHIVE_NAME), compress=9) else: bunch = joblib.load(join(data_home, DATA_ARCHIVE_NAME)) return bunch
bsd-3-clause
arahuja/scikit-learn
examples/svm/plot_svm_margin.py
318
2328
#!/usr/bin/python # -*- coding: utf-8 -*- """ ========================================================= SVM Margins Example ========================================================= The plots below illustrate the effect the parameter `C` has on the separation line. A large value of `C` basically tells our model that we do not have that much faith in our data's distribution, and will only consider points close to line of separation. A small value of `C` includes more/all the observations, allowing the margins to be calculated using all the data in the area. """ print(__doc__) # Code source: Gaël Varoquaux # Modified for documentation by Jaques Grobler # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn import svm # we create 40 separable points np.random.seed(0) X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]] Y = [0] * 20 + [1] * 20 # figure number fignum = 1 # fit the model for name, penalty in (('unreg', 1), ('reg', 0.05)): clf = svm.SVC(kernel='linear', C=penalty) clf.fit(X, Y) # get the separating hyperplane w = clf.coef_[0] a = -w[0] / w[1] xx = np.linspace(-5, 5) yy = a * xx - (clf.intercept_[0]) / w[1] # plot the parallels to the separating hyperplane that pass through the # support vectors margin = 1 / np.sqrt(np.sum(clf.coef_ ** 2)) yy_down = yy + a * margin yy_up = yy - a * margin # plot the line, the points, and the nearest vectors to the plane plt.figure(fignum, figsize=(4, 3)) plt.clf() plt.plot(xx, yy, 'k-') plt.plot(xx, yy_down, 'k--') plt.plot(xx, yy_up, 'k--') plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80, facecolors='none', zorder=10) plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired) plt.axis('tight') x_min = -4.8 x_max = 4.2 y_min = -6 y_max = 6 XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j] Z = clf.predict(np.c_[XX.ravel(), YY.ravel()]) # Put the result into a color plot Z = Z.reshape(XX.shape) plt.figure(fignum, figsize=(4, 3)) plt.pcolormesh(XX, YY, Z, cmap=plt.cm.Paired) plt.xlim(x_min, x_max) plt.ylim(y_min, y_max) plt.xticks(()) plt.yticks(()) fignum = fignum + 1 plt.show()
bsd-3-clause
FrancoisRheaultUS/dipy
doc/examples/streamline_tools.py
3
10275
""" .. _streamline_tools: ========================================================= Connectivity Matrices, ROI Intersections and Density Maps ========================================================= This example is meant to be an introduction to some of the streamline tools available in DIPY_. Some of the functions covered in this example are ``target``, ``connectivity_matrix`` and ``density_map``. ``target`` allows one to filter streamlines that either pass through or do not pass through some region of the brain, ``connectivity_matrix`` groups and counts streamlines based on where in the brain they begin and end, and finally, density map counts the number of streamlines that pass though every voxel of some image. To get started we'll need to have a set of streamlines to work with. We'll use EuDX along with the CsaOdfModel to make some streamlines. Let's import the modules and download the data we'll be using. """ import numpy as np from scipy.ndimage.morphology import binary_dilation from dipy.core.gradients import gradient_table from dipy.data import get_fnames from dipy.io.gradients import read_bvals_bvecs from dipy.io.image import load_nifti_data, load_nifti, save_nifti from dipy.direction import peaks from dipy.reconst import shm from dipy.tracking import utils from dipy.tracking.local_tracking import LocalTracking from dipy.tracking.stopping_criterion import BinaryStoppingCriterion from dipy.tracking.streamline import Streamlines hardi_fname, hardi_bval_fname, hardi_bvec_fname = get_fnames('stanford_hardi') label_fname = get_fnames('stanford_labels') t1_fname = get_fnames('stanford_t1') data, affine, hardi_img = load_nifti(hardi_fname, return_img=True) labels = load_nifti_data(label_fname) t1_data = load_nifti_data(t1_fname) bvals, bvecs = read_bvals_bvecs(hardi_bval_fname, hardi_bvec_fname) gtab = gradient_table(bvals, bvecs) """ We've loaded an image called ``labels_img`` which is a map of tissue types such that every integer value in the array ``labels`` represents an anatomical structure or tissue type [#]_. For this example, the image was created so that white matter voxels have values of either 1 or 2. We'll use ``peaks_from_model`` to apply the ``CsaOdfModel`` to each white matter voxel and estimate fiber orientations which we can use for tracking. We will also dilate this mask by 1 voxel to ensure streamlines reach the grey matter. """ white_matter = binary_dilation((labels == 1) | (labels == 2)) csamodel = shm.CsaOdfModel(gtab, 6) csapeaks = peaks.peaks_from_model(model=csamodel, data=data, sphere=peaks.default_sphere, relative_peak_threshold=.8, min_separation_angle=45, mask=white_matter) """ Now we can use EuDX to track all of the white matter. To keep things reasonably fast we use ``density=1`` which will result in 1 seeds per voxel. The stopping criterion, determining when the tracking stops, is set to stop when the tracking exit the white matter. """ affine = np.eye(4) seeds = utils.seeds_from_mask(white_matter, affine, density=1) stopping_criterion = BinaryStoppingCriterion(white_matter) streamline_generator = LocalTracking(csapeaks, stopping_criterion, seeds, affine=affine, step_size=0.5) streamlines = Streamlines(streamline_generator) """ The first of the tracking utilities we'll cover here is ``target``. This function takes a set of streamlines and a region of interest (ROI) and returns only those streamlines that pass though the ROI. The ROI should be an array such that the voxels that belong to the ROI are ``True`` and all other voxels are ``False`` (this type of binary array is sometimes called a mask). This function can also exclude all the streamlines that pass though an ROI by setting the ``include`` flag to ``False``. In this example we'll target the streamlines of the corpus callosum. Our ``labels`` array has a sagittal slice of the corpus callosum identified by the label value 2. We'll create an ROI mask from that label and create two sets of streamlines, those that intersect with the ROI and those that don't. """ cc_slice = labels == 2 cc_streamlines = utils.target(streamlines, affine, cc_slice) cc_streamlines = Streamlines(cc_streamlines) other_streamlines = utils.target(streamlines, affine, cc_slice, include=False) other_streamlines = Streamlines(other_streamlines) assert len(other_streamlines) + len(cc_streamlines) == len(streamlines) """ We can use some of DIPY_'s visualization tools to display the ROI we targeted above and all the streamlines that pass though that ROI. The ROI is the yellow region near the center of the axial image. """ from dipy.viz import window, actor, colormap as cmap # Enables/disables interactive visualization interactive = False # Make display objects color = cmap.line_colors(cc_streamlines) cc_streamlines_actor = actor.line(cc_streamlines, cmap.line_colors(cc_streamlines)) cc_ROI_actor = actor.contour_from_roi(cc_slice, color=(1., 1., 0.), opacity=0.5) vol_actor = actor.slicer(t1_data) vol_actor.display(x=40) vol_actor2 = vol_actor.copy() vol_actor2.display(z=35) # Add display objects to canvas r = window.Renderer() r.add(vol_actor) r.add(vol_actor2) r.add(cc_streamlines_actor) r.add(cc_ROI_actor) # Save figures window.record(r, n_frames=1, out_path='corpuscallosum_axial.png', size=(800, 800)) if interactive: window.show(r) r.set_camera(position=[-1, 0, 0], focal_point=[0, 0, 0], view_up=[0, 0, 1]) window.record(r, n_frames=1, out_path='corpuscallosum_sagittal.png', size=(800, 800)) if interactive: window.show(r) """ .. figure:: corpuscallosum_axial.png :align: center **Corpus Callosum Axial** .. include:: ../links_names.inc .. figure:: corpuscallosum_sagittal.png :align: center **Corpus Callosum Sagittal** """ """ Once we've targeted on the corpus callosum ROI, we might want to find out which regions of the brain are connected by these streamlines. To do this we can use the ``connectivity_matrix`` function. This function takes a set of streamlines and an array of labels as arguments. It returns the number of streamlines that start and end at each pair of labels and it can return the streamlines grouped by their endpoints. Notice that this function only considers the endpoints of each streamline. """ M, grouping = utils.connectivity_matrix(cc_streamlines, affine, labels.astype(np.uint8), return_mapping=True, mapping_as_streamlines=True) M[:3, :] = 0 M[:, :3] = 0 """ We've set ``return_mapping`` and ``mapping_as_streamlines`` to ``True`` so that ``connectivity_matrix`` returns all the streamlines in ``cc_streamlines`` grouped by their endpoint. Because we're typically only interested in connections between gray matter regions, and because the label 0 represents background and the labels 1 and 2 represent white matter, we discard the first three rows and columns of the connectivity matrix. We can now display this matrix using matplotlib, we display it using a log scale to make small values in the matrix easier to see. """ import numpy as np import matplotlib.pyplot as plt plt.imshow(np.log1p(M), interpolation='nearest') plt.savefig("connectivity.png") """ .. figure:: connectivity.png :align: center **Connectivity of Corpus Callosum** .. include:: ../links_names.inc """ """ In our example track there are more streamlines connecting regions 11 and 54 than any other pair of regions. These labels represent the left and right superior frontal gyrus respectively. These two regions are large, close together, have lots of corpus callosum fibers and are easy to track so this result should not be a surprise to anyone. However, the interpretation of streamline counts can be tricky. The relationship between the underlying biology and the streamline counts will depend on several factors, including how the tracking was done, and the correct way to interpret these kinds of connectivity matrices is still an open question in the diffusion imaging literature. The next function we'll demonstrate is ``density_map``. This function allows one to represent the spatial distribution of a track by counting the density of streamlines in each voxel. For example, let's take the track connecting the left and right superior frontal gyrus. """ lr_superiorfrontal_track = grouping[11, 54] shape = labels.shape dm = utils.density_map(lr_superiorfrontal_track, affine, shape) """ Let's save this density map and the streamlines so that they can be visualized together. In order to save the streamlines in a ".trk" file we'll need to move them to "trackvis space", or the representation of streamlines specified by the trackvis Track File format. """ from dipy.io.stateful_tractogram import Space, StatefulTractogram from dipy.io.streamline import save_trk # Save density map save_nifti("lr-superiorfrontal-dm.nii.gz", dm.astype("int16"), affine) lr_sf_trk = Streamlines(lr_superiorfrontal_track) # Save streamlines sft = StatefulTractogram(lr_sf_trk, hardi_img, Space.VOX) save_trk(sft, "lr-superiorfrontal.trk") """ .. rubric:: Footnotes .. [#] The image `aparc-reduced.nii.gz`, which we load as ``labels_img``, is a modified version of label map `aparc+aseg.mgz` created by `FreeSurfer <https://surfer.nmr.mgh.harvard.edu/>`_. The corpus callosum region is a combination of the FreeSurfer labels 251-255. The remaining FreeSurfer labels were re-mapped and reduced so that they lie between 0 and 88. To see the FreeSurfer region, label and name, represented by each value see `label_info.txt` in `~/.dipy/stanford_hardi`. .. [#] An affine transformation is a mapping between two coordinate systems that can represent scaling, rotation, sheer, translation and reflection. Affine transformations are often represented using a 4x4 matrix where the last row of the matrix is ``[0, 0, 0, 1]``. """
bsd-3-clause
NSLS-II-HXN/PyXRF
pyxrf/gui_module/tab_wd_plots_fitting_model.py
1
9325
from qtpy.QtWidgets import (QWidget, QVBoxLayout, QHBoxLayout, QRadioButton, QButtonGroup, QComboBox, QCheckBox, QPushButton, QDialog) from qtpy.QtCore import Qt, Slot, Signal from matplotlib.backends.backend_qt5agg import \ FigureCanvasQTAgg as FigureCanvas, NavigationToolbar2QT as NavigationToolbar from .useful_widgets import ElementSelection, set_tooltip, global_gui_variables from .dlg_plot_escape_peak import DialogPlotEscapePeak class PlotFittingModel(QWidget): signal_selected_element_changed = Signal(str) signal_add_line = Signal() signal_remove_line = Signal() def __init__(self, *, gpc, gui_vars): super().__init__() self._enable_events = False # Global processing classes self.gpc = gpc # Global GUI variables (used for control of GUI state) self.gui_vars = gui_vars self.combo_plot_type = QComboBox() self.combo_plot_type.addItems(["LinLog", "Linear"]) # Values are received and sent to the model, the don't represent the displayed text self.combo_plot_type_values = ["linlog", "linear"] self.cb_show_spectrum = QCheckBox("Show spectrum") self.cb_show_fit = QCheckBox("Show fit") self.rb_selected_region = QRadioButton("Selected region") self.rb_selected_region.setChecked(True) self.rb_full_spectrum = QRadioButton("Full spectrum") self.bgroup = QButtonGroup() self.bgroup.addButton(self.rb_selected_region) self.bgroup.addButton(self.rb_full_spectrum) self.pb_escape_peak = QPushButton("Escape Peak ...") self.pb_escape_peak.clicked.connect(self.pb_escape_peak_clicked) self.pb_add_line = QPushButton("Add Line") self.pb_add_line.clicked.connect(self.pb_add_line_clicked) self.pb_remove_line = QPushButton("Remove Line") self.pb_remove_line.clicked.connect(self.pb_remove_line_clicked) self.element_selection = ElementSelection() eline_sample_list = ["Li_K", "B_K", "C_K", "N_K", "Fe_K", "Userpeak1"] self.element_selection.set_item_list(eline_sample_list) self.element_selection.signal_current_item_changed.connect( self.element_selection_item_changed) self.mpl_canvas = FigureCanvas(self.gpc.plot_model._fig) self.mpl_toolbar = NavigationToolbar(self.mpl_canvas, self) # Keep layout without change when canvas is hidden (invisible) sp_retain = self.mpl_canvas.sizePolicy() sp_retain.setRetainSizeWhenHidden(True) self.mpl_canvas.setSizePolicy(sp_retain) self.widgets_enable_events(True) vbox = QVBoxLayout() hbox = QHBoxLayout() hbox.addWidget(self.combo_plot_type) hbox.addWidget(self.cb_show_spectrum) hbox.addWidget(self.cb_show_fit) hbox.addStretch(1) hbox.addWidget(self.rb_selected_region) hbox.addWidget(self.rb_full_spectrum) vbox.addLayout(hbox) hbox = QHBoxLayout() hbox.addWidget(self.pb_escape_peak) hbox.addStretch(1) hbox.addWidget(self.pb_add_line) hbox.addWidget(self.pb_remove_line) hbox.addSpacing(20) hbox.addWidget(self.element_selection) hbox.addStretch(1) vbox.addLayout(hbox) vbox.addWidget(self.mpl_toolbar) vbox.addWidget(self.mpl_canvas) self.setLayout(vbox) self._set_tooltips() def widgets_enable_events(self, enable): if enable: if not self._enable_events: self.cb_show_spectrum.toggled.connect(self.cb_show_spectrum_toggled) self.cb_show_fit.toggled.connect(self.cb_show_fit_toggled) self.bgroup.buttonToggled.connect(self.bgroup_button_toggled) self.combo_plot_type.currentIndexChanged.connect( self.combo_plot_type_current_index_changed) self._enable_events = True else: if self._enable_events: self.cb_show_spectrum.toggled.disconnect(self.cb_show_spectrum_toggled) self.cb_show_fit.toggled.disconnect(self.cb_show_fit_toggled) self.bgroup.buttonToggled.disconnect(self.bgroup_button_toggled) self.combo_plot_type.currentIndexChanged.disconnect( self.combo_plot_type_current_index_changed) self._enable_events = False def _set_tooltips(self): set_tooltip(self.combo_plot_type, "Use <b>Linear</b> or <b>LinLog</b> axes to plot spectra") set_tooltip(self.cb_show_spectrum, "Show <b>raw<b> spectrum") set_tooltip(self.cb_show_fit, "Show <b>fitted</b> spectrum") set_tooltip(self.rb_selected_region, "Plot spectrum in the <b>selected range</b> of energies. The range may be set " "in the 'Model' tab. Click the button <b>'Find Automatically ...'</b> " "to set the range of energies before finding the emission lines. The range " "may be changed in General Settings dialog (button <b>'General ...'</b>) at any time." ) set_tooltip(self.rb_full_spectrum, "Plot full spectrum over <b>all available eneriges</b>.") set_tooltip(self.pb_escape_peak, "Select options for displaying the <b>escape peak</b>. " "If activated, the location of the escape peak is shown " "for the emission line, which is currently selected for adding, " "removal or editing.") set_tooltip(self.pb_add_line, "<b>Add</b> the current emission line to the list of selected lines") set_tooltip(self.pb_remove_line, "<b>Remove</b> the current emission line from the list of selected lines.") set_tooltip(self.element_selection, "<b>Choose</b> the emission line for addition or removal.") def update_widget_state(self, condition=None): if condition == "tooltips": self._set_tooltips() self.mpl_toolbar.setVisible(self.gui_vars["show_matplotlib_toolbar"]) # Hide Matplotlib canvas during computations state_compute = global_gui_variables["gui_state"]["running_computations"] self.mpl_canvas.setVisible(not state_compute) @Slot() def update_controls(self): self.widgets_enable_events(False) plot_spectrum, plot_fit = self.gpc.get_line_plot_state() self.cb_show_spectrum.setChecked(plot_spectrum) self.cb_show_fit.setChecked(plot_fit) if self.gpc.get_plot_fit_energy_range() == "selected": self.rb_selected_region.setChecked(True) else: self.rb_full_spectrum.setChecked(True) try: index = self.combo_plot_type_values.index(self.gpc.get_plot_fit_linlog()) self.combo_plot_type.setCurrentIndex(index) except ValueError: self.combo_plot_type.setCurrentIndex(-1) self.widgets_enable_events(True) def le_mouse_press(self, event): print("Button pressed (line edit)") if event.button() == Qt.RightButton: print("Right button pressed") def pb_escape_peak_clicked(self, event): plot_escape_peak, detector_material = self.gpc.get_escape_peak_params() incident_energy = self.gpc.get_incident_energy() dlg = DialogPlotEscapePeak() dlg.set_parameters(plot_escape_peak, incident_energy, detector_material) if dlg.exec() == QDialog.Accepted: plot_escape_peak, detector_material = dlg.get_parameters() self.gpc.set_escape_peak_params(plot_escape_peak, detector_material) print("Dialog exit: Ok button") def cb_show_spectrum_toggled(self, state): self.gpc.show_plot_spectrum(state) def cb_show_fit_toggled(self, state): self.gpc.show_plot_fit(state) def bgroup_button_toggled(self, button, checked): if checked: if button == self.rb_selected_region: self.gpc.set_plot_fit_energy_range("selected") else: self.gpc.set_plot_fit_energy_range("full") def combo_plot_type_current_index_changed(self, index): self.gpc.set_plot_fit_linlog(self.combo_plot_type_values[index]) def pb_add_line_clicked(self): self.signal_add_line.emit() def pb_remove_line_clicked(self): self.signal_remove_line.emit() def element_selection_item_changed(self, index, eline): self.signal_selected_element_changed.emit(eline) @Slot(str) def slot_selection_item_changed(self, eline): self.element_selection.set_current_item(eline) @Slot() def slot_update_eline_selection_list(self): eline_list = self.gpc.get_full_eline_list() self.element_selection.set_item_list(eline_list) @Slot(bool, bool) def slot_update_add_remove_btn_state(self, add_enabled, remove_enabled): self.pb_add_line.setEnabled(add_enabled) self.pb_remove_line.setEnabled(remove_enabled) @Slot() @Slot(bool) def redraw_plot_fit(self): self.gpc.update_plot_fit()
bsd-3-clause
doged/electrum-doged-i2p
plugins/plot.py
4
3822
from PyQt4.QtGui import * from electrum_doged.plugins import BasePlugin, hook from electrum_doged.i18n import _ import datetime from electrum_doged.util import format_satoshis try: import matplotlib.pyplot as plt import matplotlib.dates as md from matplotlib.patches import Ellipse from matplotlib.offsetbox import AnchoredOffsetbox, TextArea, DrawingArea, HPacker flag_matlib=True except: flag_matlib=False class Plugin(BasePlugin): def is_available(self): if flag_matlib: return True else: return False @hook def init_qt(self, gui): self.win = gui.main_window @hook def export_history_dialog(self, d,hbox): self.wallet = d.wallet history = self.wallet.get_history() if len(history) > 0: b = QPushButton(_("Preview plot")) hbox.addWidget(b) b.clicked.connect(lambda: self.do_plot(self.wallet, history)) else: b = QPushButton(_("No history to plot")) hbox.addWidget(b) def do_plot(self, wallet, history): balance_Val=[] fee_val=[] value_val=[] datenums=[] unknown_trans = 0 pending_trans = 0 counter_trans = 0 balance = 0 for item in history: tx_hash, confirmations, value, timestamp = item balance += value if confirmations: if timestamp is not None: try: datenums.append(md.date2num(datetime.datetime.fromtimestamp(timestamp))) balance_string = format_satoshis(balance, False) balance_Val.append(float((format_satoshis(balance,False)))*1000.0) except [RuntimeError, TypeError, NameError] as reason: unknown_trans += 1 pass else: unknown_trans += 1 else: pending_trans += 1 value_string = format_satoshis(value, True) value_val.append(float(value_string)*1000.0) if tx_hash: label, is_default_label = wallet.get_label(tx_hash) label = label.encode('utf-8') else: label = "" f, axarr = plt.subplots(2, sharex=True) plt.subplots_adjust(bottom=0.2) plt.xticks( rotation=25 ) ax=plt.gca() x=19 test11="Unknown transactions = "+str(unknown_trans)+" Pending transactions = "+str(pending_trans)+" ." box1 = TextArea(" Test : Number of pending transactions", textprops=dict(color="k")) box1.set_text(test11) box = HPacker(children=[box1], align="center", pad=0.1, sep=15) anchored_box = AnchoredOffsetbox(loc=3, child=box, pad=0.5, frameon=True, bbox_to_anchor=(0.5, 1.02), bbox_transform=ax.transAxes, borderpad=0.5, ) ax.add_artist(anchored_box) plt.ylabel('mDOGED') plt.xlabel('Dates') xfmt = md.DateFormatter('%Y-%m-%d') ax.xaxis.set_major_formatter(xfmt) axarr[0].plot(datenums,balance_Val,marker='o',linestyle='-',color='blue',label='Balance') axarr[0].legend(loc='upper left') axarr[0].set_title('History Transactions') xfmt = md.DateFormatter('%Y-%m-%d') ax.xaxis.set_major_formatter(xfmt) axarr[1].plot(datenums,value_val,marker='o',linestyle='-',color='green',label='Value') axarr[1].legend(loc='upper left') # plt.annotate('unknown transaction = %d \n pending transactions = %d' %(unknown_trans,pending_trans),xy=(0.7,0.05),xycoords='axes fraction',size=12) plt.show()
gpl-3.0
abimannans/scikit-learn
examples/semi_supervised/plot_label_propagation_versus_svm_iris.py
286
2378
""" ===================================================================== Decision boundary of label propagation versus SVM on the Iris dataset ===================================================================== Comparison for decision boundary generated on iris dataset between Label Propagation and SVM. This demonstrates Label Propagation learning a good boundary even with a small amount of labeled data. """ print(__doc__) # Authors: Clay Woolam <[email protected]> # Licence: BSD import numpy as np import matplotlib.pyplot as plt from sklearn import datasets from sklearn import svm from sklearn.semi_supervised import label_propagation rng = np.random.RandomState(0) iris = datasets.load_iris() X = iris.data[:, :2] y = iris.target # step size in the mesh h = .02 y_30 = np.copy(y) y_30[rng.rand(len(y)) < 0.3] = -1 y_50 = np.copy(y) y_50[rng.rand(len(y)) < 0.5] = -1 # we create an instance of SVM and fit out data. We do not scale our # data since we want to plot the support vectors ls30 = (label_propagation.LabelSpreading().fit(X, y_30), y_30) ls50 = (label_propagation.LabelSpreading().fit(X, y_50), y_50) ls100 = (label_propagation.LabelSpreading().fit(X, y), y) rbf_svc = (svm.SVC(kernel='rbf').fit(X, y), y) # create a mesh to plot in x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1 y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) # title for the plots titles = ['Label Spreading 30% data', 'Label Spreading 50% data', 'Label Spreading 100% data', 'SVC with rbf kernel'] color_map = {-1: (1, 1, 1), 0: (0, 0, .9), 1: (1, 0, 0), 2: (.8, .6, 0)} for i, (clf, y_train) in enumerate((ls30, ls50, ls100, rbf_svc)): # Plot the decision boundary. For that, we will assign a color to each # point in the mesh [x_min, m_max]x[y_min, y_max]. plt.subplot(2, 2, i + 1) Z = clf.predict(np.c_[xx.ravel(), yy.ravel()]) # Put the result into a color plot Z = Z.reshape(xx.shape) plt.contourf(xx, yy, Z, cmap=plt.cm.Paired) plt.axis('off') # Plot also the training points colors = [color_map[y] for y in y_train] plt.scatter(X[:, 0], X[:, 1], c=colors, cmap=plt.cm.Paired) plt.title(titles[i]) plt.text(.90, 0, "Unlabeled points are colored white") plt.show()
bsd-3-clause
allenai/deep_qa
scripts/clean_raw_omnibus.py
4
2207
# -*- coding: utf-8 -*- """ This script takes as input raw TSV files from the Omnibus dataset and preprocesses them to be compatible with the deep_qa pipeline. """ import logging import os import csv from argparse import ArgumentParser import pandas logger = logging.getLogger(__name__) # pylint: disable=invalid-name def main(): log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s' logging.basicConfig(level=logging.INFO, format=log_format) parser = ArgumentParser(description=("Transform a raw Omnibus TSV " "to the format that the pipeline " "expects.")) parser.add_argument('input_csv', nargs='+', metavar="<input_csv>", type=str, help=("Path of TSV files to clean up. Pass in " "as many as you want, and the output " "will be a concatenation of them " "written to <last_input_csv>.clean")) arguments = parser.parse_args() all_clean_file_rows = [] for omnibus_file in arguments.input_csv: all_clean_file_rows.extend(clean_omnibus_csv(omnibus_file)) # turn the list of rows into a dataframe, and write to TSV dataframe = pandas.DataFrame(all_clean_file_rows) folder, filename = os.path.split(arguments.input_csv[-1]) outdirectory = folder + "/cleaned/" os.makedirs(outdirectory, exist_ok=True) outpath = outdirectory + filename + ".clean" logger.info("Saving cleaned file to %s", outpath) dataframe.to_csv(outpath, encoding="utf-8", index=False, sep="\t", header=False, quoting=csv.QUOTE_NONE) def clean_omnibus_csv(omnibus_file_path): logger.info("cleaning up %s", omnibus_file_path) # open the file as a csv dataframe = pandas.read_csv(omnibus_file_path, sep="\t", encoding='utf-8', header=None, quoting=csv.QUOTE_NONE) dataframe_trimmed = dataframe[[3, 9]] clean_rows = dataframe_trimmed.values.tolist() return clean_rows if __name__ == '__main__': main()
apache-2.0
BjerknesClimateDataCentre/QuinCe
external_scripts/NRT/Saildrone_conversion/saildrone_module/api.py
2
4650
############################################################################### ### FUNCTIONS WHICH SEND REQUESTS TO THE SAILDRONE API ### ############################################################################### ### Description: # Several requests are sent to the Saildrone API: # - request a token (function 'auth') # - request a list of what we can access (function 'get_available') # - request to download data (function 'write_json') # This document contains functions executing such requests. #------------------------------------------------------------------------------ import json import urllib.request from urllib.error import HTTPError import os import pandas as pd from datetime import datetime REQUEST_HEADER = {'Content-Type':'application/json', 'Accept':'application/json'} # Function which converts html request output to a dictionary def to_dict(url): response = None # urlopen throws an error if the HTTP status is not 200. # The error is still a response object, so we can grab it - # we need to examine it either way try: response = urllib.request.urlopen(url) except HTTPError as e: response = e # Get the response body as a dictionary dictionary = json.loads(response.read().decode('utf-8')) error = False if response.status == 400: if dictionary['message'] != "Request out of time bound": error = True elif response.status >= 400: error = True if error: # The response is an error, so we can simply raise it raise response return dictionary # Function which returns the token needed for authentication def auth(authentication): # Define the authentication request url auth_url = 'https://developer-mission.saildrone.com/v1/auth' # Define our data our_data = json.dumps({'key':authentication['key'], 'secret':authentication['secret']}).encode() # Send the request auth_request = urllib.request.Request( url=auth_url, headers=REQUEST_HEADER, data=our_data, method='POST') # Convert the response to a dictionary. Extract and return the token auth_response_dict = to_dict(auth_request) token = auth_response_dict['token'] return token # Function returning a list of what's available from the Saildrone API def get_available(token): # Define the url for requesting what's available check_available_url = 'https://developer-mission.saildrone.com/v1/auth/'\ + 'access?token=' + token # Send the request check_available_request = urllib.request.Request( check_available_url, method='GET') # Convert the output to a dictionary. Extract and return the access list. available_dict = to_dict(check_available_request) data = available_dict['data'] access_list = data['access'] return access_list # Function which requests data download. I returns the path to the downloaded # json file. def write_json(data_dir, drone_id, dataset, start, end, token): # Since we can only receive 1000 records per download request we need to # keep requesting (while loop) until we do not receive any data more_to_request = True data_list_concat = [] offset = 0 while (more_to_request is True): # Define the download request URL get_data_url = 'https://developer-mission.saildrone.com/v1/timeseries/'\ + f'{drone_id}?data_set={dataset}&interval=1&start_date={start}&end_date='\ + f'{end}&order_by=asc&limit=1000&offset={offset}&token={token}' #print(get_data_url) # Send request data_request = urllib.request.Request( get_data_url, headers=REQUEST_HEADER, method='GET') # Store output from request in dictionary data_dict = to_dict(data_request) # Continue adding new data to the concatenated data list until # we receive less than 10 records. (Because the data is being updated) # constantly, we can get into odd loops where we're chasing one new record # every time.) # # Once that's done, add one second to the last record received to be the # start point for the next request. #print('Received ' + str(len(data_dict['data']))) if len(data_dict['data']) < 10: more_to_request = False else: data_list_concat = data_list_concat + data_dict['data'] offset = offset + len(data_dict['data']) #print('Total length ' + str(len(data_list_concat))) # Replace the data section of the last json file received with the # concatenated data list data_dict['data'] = data_list_concat # Write the dictionary to a json file output_file_name = str(drone_id) + '_' + dataset + '.json' output_file_path = os.path.join(data_dir, output_file_name) with open(output_file_path, 'w') as outfile: json.dump(data_dict, outfile, sort_keys=True, indent=4, separators=(',',': ')) return output_file_path
gpl-3.0
amandersillinois/landlab
landlab/components/species_evolution/species_evolver.py
3
25710
#!/usr/bin/env python # -*- coding: utf-8 -*- """Evolve life in a landscape. Life evolves alongside landscapes by biotic and abiotic processes under complex dynamics at Earth's surface. Researchers who wish to explore these dynamics can use this component as a tool for them to build landscape-life evolution models. Landlab components, including SpeciesEvolver are designed to work with a shared model grid. Researchers can build novel models using plug-and-play surface process components to evolve the grid's landscape alongside the life tracked by SpeciesEvolver. The simulated life evolves following customizable processes. Component written by Nathan Lyons beginning August 2017. """ from collections import OrderedDict import numpy as np from pandas import DataFrame from landlab import Component from .record import Record class SpeciesEvolver(Component): """Evolve life in a landscape. This component tracks ``Taxon`` objects as they evolve in a landscape. The component calls the evolutionary process methods of tracked ``Taxon`` objects. ``Taxon`` are intended to be subclassed for unique behavior, attributes, and model approaches, including different implementations of evolutionary processes. The general workflow to use this component in a model is 1. Instantiate the component. 2. Instantiate taxa. 3. Introduce taxa to SpeciesEvolver using the ``track_taxon`` method. 4. Advance the component instance in time using ``run_one_step`` method. Taxa can be introduced at model onset and later time steps. Multiple types can be tracked by the same SpeciesEvolver instance. The taxon type, ``ZoneTaxon`` is distributed with SpeciesEvolver. The spatial aspect of ``ZoneTaxon`` macroevolutionary processes is determined using ``Zone`` objects. A ``ZoneController`` is used to create and manage zones as well as efficiently create multiple ZoneTaxon objects. See the documentation of ``ZoneController`` and ``ZoneTaxon`` for more information. SpeciesEvolver knows nothing about zones and their controller, meaning the concept of zones are not required for other taxon types. Model time and other variables can be viewed with the class attribute, ``record_data_frame``. Time is recorded to track the history of taxa lineages. The unit of time is not considered within the component other than the record, and can be thought of as in years or whatever unit is needed. Time is advanced with the ``dt`` parameter of the ``run_one_step`` method. The geographic ranges of the taxa at the current model time are evaluated during the ``run_one_step`` method. Each taxon object determines if it persists or becomes extinct, and if it creates child ``Taxon`` objects. Metadata of all taxa introduced to the component can be viewed with the attribute, ``taxa_data_frame``. Taxa are automatically assigned unique taxon identifiers, ``tid``. Identifiers are used to reference and retrieve taxon objects. Identifiers are assigned in the order taxa are introduced to SpeciesEvolver. Examples -------- The evolution of a lowland taxa lineage in response to mountain range formation is simulated using ZoneTaxon managed by ZoneController. Mountain range formation is forced without processes for simplicity in this example. Import modules used in the following examples. >>> from landlab import RasterModelGrid >>> from landlab.components import SpeciesEvolver >>> from landlab.components.species_evolution import ZoneController Create a model grid with mountain scale resolution. The elevation is equally low throughout the grid at model onset. >>> mg = RasterModelGrid((3, 7), 1000) >>> z = mg.add_ones('topographic__elevation', at='node') >>> z.reshape(mg.shape) array([[ 1., 1., 1., 1., 1., 1., 1.], [ 1., 1., 1., 1., 1., 1., 1.], [ 1., 1., 1., 1., 1., 1., 1.]]) Instantiate the component with the grid as the first parameter. >>> se = SpeciesEvolver(mg) ZoneController requires a function that returns a mask of the total extent of taxa habitat. The mask is a boolean array where `True` values represent nodes that satisfy habitat conditions. Zone objects are not created here. The mask only maps the extent where taxa can exist. This function returns `True` where elevation is below 100, which is where the simulated lowland taxa of this model can inhabit. >>> def zone_func(grid): ... return grid.at_node['topographic__elevation'] < 100 Instantiate ZoneController with the grid and zone function. The initial zones are created at controller instantiation. In this example, one zone is created because all nodes of the zone mask are adjacent to each other. >>> zc = ZoneController(mg, zone_func) >>> len(zc.zones) == 1 True Additional examples of controller usage are provided in ``ZoneController`` documentation. The ``mask`` of the zone is True where the conditions of the zone function are met. All nodes of the grid are included because the elevation of each node is below 100. The ``zones`` attribute of ``ZoneController`` returns a list of the zones that currently exist in the model. Below we return the mask of the single zone by indexing this list. >>> zc.zones[0].mask array([ True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True], dtype=bool) Populate a taxon to the zone. >>> taxon = zc.populate_zones_uniformly(1) >>> se.track_taxa(taxon) The attribute, ``taxa_data_frame`` indicates only the one taxon exists because we populated each zone with one taxon, and only the one zone exists. >>> se.taxa_data_frame # doctest: +NORMALIZE_WHITESPACE pid type t_first t_final tid 0 <NA> ZoneTaxon 0 <NA> The identifier of the taxon, ``tid`` is 0. The identifier of the taxon's parent, ``pid``, is '<NA>' because it does not have a parent taxon given that it was manually introduced using the ``track_taxa`` method. The taxon was introduced at time, ``t_first`` and time, ``t_final`` is '<NA>' because the taxon remains extant. See the documentation of this attribute for further explanation of data frame columns. Force a change in the zone mask to demonstrate component functionality. Here we begin a new time step where topography is uplifted by 200 that forms a ridge trending north-south in the center of the grid. >>> z[[3, 10, 17]] = 200 >>> z.reshape(mg.shape) array([[ 1., 1., 1., 200., 1., 1., 1.], [ 1., 1., 1., 200., 1., 1., 1.], [ 1., 1., 1., 200., 1., 1., 1.]]) The current elevation, the elevation following uplift, is represented here. :: - - - ^ - - - elevation: - 1 - - - ^ - - - ^ 200 - - - ^ - - - The updated zone mask is below. :: . . . x . . . key: . node in zone mask . . . x . . . x node outside of zone mask . . . x . . . Run a step of both the ZoneController and SpeciesEvolver. Both are run to keep time in sync between the ``ZoneController``and ``SpeciesEvolver`` instances. >>> delta_time = 1000 >>> zc.run_one_step(delta_time) >>> se.run_one_step(delta_time) Two zones exist following this time step. >>> len(zc.zones) == 2 True An additional zone was created because the zone mask was not continuous. :: . . . ^ * * * key: . a zone . . . ^ * * * * another zone . . . ^ * * * ^ mountain range The split of the initial zone triggered speciation of taxon 1 by taxon 0. >>> se.taxa_data_frame # doctest: +NORMALIZE_WHITESPACE pid type t_first t_final tid 0 <NA> ZoneTaxon 0 <NA> 1 0 ZoneTaxon 1000 <NA> The phylogenetic tree of the simulated taxa is represented below. The number at the line tips are the taxa identifiers. :: 0 ──────┬── 0 │ └── 1 _________ 0 1000 time The split of the initial zone into two zones at time 1000 triggered taxon 0 to speciate. Taxon 0 occupies a zone on one side of the mountain range, and the child, taxon 1 occupies a zone on the other side. This outcome is the result of the evolutionary processes programmed within ``ZoneTaxon`` as well as the parameters used in this example (default values were used as optional parameters were not set). Different behavior can be achieved by subclassing ``ZoneTaxon`` or ``Taxon``. References ---------- **Required Software Citation(s) Specific to this Component** Lyons, N.J., Albert, J.S., Gasparini, N.M. (2020). SpeciesEvolver: A Landlab component to evolve life in simulated landscapes. Journal of Open Source Software 5(46), 2066, https://doi.org/10.21105/joss.02066 **Additional References** Albert, J.S., Schoolmaster Jr, D.R., Tagliacollo, V., Duke-Sylvester, S.M. (2016). Barrier displacement on a neutral landscape: Toward a theory of continental biogeography. Systematic Biology 66(2), 167–182. Lyons, N.J., Val, P., Albert, J.S., Willenbring, J.K., Gasparini, N.M., in review. Topographic controls on divide migration, stream capture, and diversification in riverine life. Earth Surface Dynamics. """ _name = "SpeciesEvolver" _unit_agnostic = True _info = { "taxa__richness": { "dtype": int, "intent": "out", "optional": False, "units": "-", "mapping": "node", "doc": "The number of taxa at each node", } } _cite_as = """@article{lyons2020species, author = {Lyons, N.J. and Albert, J.S. and Gasparini, N.M.}, title = {SpeciesEvolver: A Landlab component to evolve life in simulated landscapes}, year = {2020}, journal = {Journal of Open Source Software}, volume = {5}, number = {46}, doi = {10.21105/joss.02066}, url = {https://doi.org/10.21105/joss.02066} }""" def __init__(self, grid, initial_time=0): """Instantiate SpeciesEvolver. Parameters ---------- grid : ModelGrid A Landlab ModelGrid. initial_time : float, int, optional The initial time. The unit of time is not considered within the component, with the exception that time is logged in the record. The default value of this parameter is 0. """ super().__init__(grid) # Create data structures. self._record = Record(initial_time) self._record.set_value("taxa", 0) self._taxa_data = OrderedDict( [("tid", []), ("pid", []), ("type", []), ("t_first", []), ("t_final", [])] ) self._taxon_objs = [] # Create a taxa richness field. _ = grid.add_zeros("taxa__richness", at="node", dtype=int, clobber=True) @property def record_data_frame(self): """A Pandas DataFrame of SpeciesEvolver variables over time. Each row is data of a model time step. The time of the step is recorded in the `time` column. `taxa` is the count of taxa extant at a time. Additional columns can be added and updated by SpeciesEvolver objects during the component ``run_one_step`` method. See documention of Taxon objects for an explanation of these columns. The DataFrame is created from a dictionary associated with a SpeciesEvolver ``Record`` object. nan values in Pandas DataFrame force the column to become float values even when data are integers. The original value type is retained in the ``Record`` object. """ return self._record.data_frame @property def taxa_data_frame(self): """A Pandas DataFrame of taxa metadata. Each row is the metadata of a taxon. The column, ``tid`` is the taxon identifier assigned when SpeciesEvolver begins tracking the taxon. The column, ``pid`` is the tid of the parent of the taxon. A pid of `<NA>` indicates no parent taxon. ``type`` is the type of ``Taxon`` object. ``t_first`` is the initial model time the taxon was added to SpeciesEvolver. ``t_final`` is the model time the taxon was recognized as extinct. A t_final of `<NA>` indicates the taxon is extant. Additional columns may be added by some taxon types. See the documentation of these taxa for column description. The DataFrame is created from a data structure within the component. """ data = self._taxa_data cols = list(data.keys()) cols.remove("tid") df = DataFrame(data, columns=cols, index=data["tid"]) df.index.name = "tid" # Change column number type because pandas makes a column float if it # includes nan values. df["pid"] = df["pid"].astype("Int64") if all(isinstance(item, int) for item in data["t_final"] if not np.isnan(item)): df["t_final"] = df["t_final"].astype("Int64") return df def run_one_step(self, dt): """Update the taxa for a single time step. This method advances the model time in the component record, calls the evolve method of taxa extant at the current time, and updates the variables in the record and taxa dataframes. Parameters ---------- dt : float The model time step duration. Time in the record is advanced by the value of this parameter. """ record = self._record record.advance_time(dt) # Create a dictionary of the taxa to update at the current model time. # Keys are objects of extant taxa. Values are booleans indicating if # stages remain for respective taxa. time_dict = OrderedDict.fromkeys(self._taxon_objs, True) # Iteratively call taxa ``_evolve`` method until all stages of all taxa # have run. stage = 0 while any(time_dict.values()): # Run evolution stage. stage_dict = OrderedDict([]) evolving_taxa = filter(time_dict.get, time_dict) for taxon in evolving_taxa: # Run evolution stage of taxon with remaining stages. stages_remain, taxon_children = taxon._evolve(dt, stage, record) if taxon_children: stage_dict.update( OrderedDict.fromkeys(taxon_children, stages_remain) ) stage_dict[taxon] = stages_remain and taxon.extant time_dict.update(stage_dict) stage += 1 self._update_taxa_data(time_dict.keys()) def track_taxa(self, taxa): """Add taxa to be tracked over time by SpeciesEvolver. The taxon/taxa are introduced at the latest time in the record and also tracked during following model times. Each taxon is assigned an identifier and then can be viewed in ``taxa_data_frame``. Parameters ---------- taxa : Taxon or list of Taxon The taxa to introduce. Examples -------- ZoneTaxon are used to demonstrate this method. Import modules used in the following examples. >>> from landlab import RasterModelGrid >>> from landlab.components import SpeciesEvolver >>> from landlab.components.species_evolution import ZoneController Create a model grid with flat topography. >>> mg = RasterModelGrid((3, 7), 1000) >>> z = mg.add_ones('topographic__elevation', at='node') Instantiate SpeciesEvolver and a ZoneController. Instantiate the latter with a function that masks the low elevation zone extent. Only one zone is created. >>> se = SpeciesEvolver(mg) >>> def zone_func(grid): ... return grid.at_node['topographic__elevation'] < 100 >>> zc = ZoneController(mg, zone_func) >>> len(zc.zones) == 1 True Track the taxon of the one zone. >>> taxon = zc.populate_zones_uniformly(1) >>> se.track_taxa(taxon) The one taxon is now tracked by SpeciesEvolver as indicated by the taxa DataFrame. >>> se.taxa_data_frame # doctest: +NORMALIZE_WHITESPACE pid type t_first t_final tid 0 <NA> ZoneTaxon 0 <NA> """ if not isinstance(taxa, list): taxa = [taxa] self._update_taxa_data(taxa) def _update_taxa_data(self, taxa_at_time): """Update the taxa data structure, set identifiers, and taxa statistics. This method sets identifiers and metadata for the newly introduced taxa. For the previously introduced, this method updates the 'latest_time` value of the taxa metadata. Parameters ---------- taxa_at_time : list of Taxon The taxa at the current model time. """ time = self._record.latest_time data = self._taxa_data objs = self._taxon_objs t_recorded = self._taxon_objs t_introduced = [taxon for taxon in taxa_at_time if taxon in t_recorded] t_new = [taxon for taxon in taxa_at_time if taxon not in t_recorded] # Update previously introduced taxa. for taxon in t_introduced: if not taxon.extant: idx = data["tid"].index(taxon.tid) data["t_final"][idx] = time objs.remove(taxon) # Set the data of new taxa. for taxon in t_new: # Set identifier. if data["tid"]: taxon._tid = max(data["tid"]) + 1 else: taxon._tid = 0 # Append taxon data. data["tid"].append(taxon.tid) if taxon.parent is not None: data["pid"].append(taxon.parent.tid) else: data["pid"].append(np.nan) data["type"].append(type(taxon).__name__) data["t_first"].append(time) if taxon.extant: data["t_final"].append(np.nan) objs.append(taxon) else: data["t_final"].append(time) # Update taxa stats. self._record.set_value("taxa", len(objs)) self._grid.at_node["taxa__richness"] = self._get_taxa_richness_map() def get_extant_taxon_objects(self, tids=np.nan, ancestor=np.nan, time=np.nan): """Get extant taxon objects filtered by parameters. This method returns all taxon objects tracked by the component when no optional parameters are included. The objects returned can be limited using one or more parameters. Parameters ---------- tids : list of int, optional The taxa with these identifiers will be returned. A list is returned even if only one object is contained within the list. By default, when `tids` is not specified, extant taxa with any identifier can be returned. ancestor : int, optional Limit the taxa returned to those descending from the taxon designated as the ancestor. The ancestor is designated using its ``tid``. By default, taxa with any or no ancestors are returned. time : float, int, optional Limit the taxa returned to those that were extant at the time designated by this parameter as well as extant at the current model time. By default, extant taxa at all of the times listed in the component record can be returned. Returns ------- taxa : a list of Taxon The Taxon objects that pass through the filter. The list is sorted by ``tid``. An empty list is returned if no taxa pass through the filter. Examples -------- ZoneTaxon are used to demonstrate this method. Import modules used in the following examples. >>> from landlab import RasterModelGrid >>> from landlab.components import SpeciesEvolver >>> from landlab.components.species_evolution import ZoneController Create a model grid. >>> mg = RasterModelGrid((3, 7), 1000) >>> z = mg.add_ones('topographic__elevation', at='node') Instantiate SpeciesEvolver and a ZoneController. Instantiate the latter with a function that masks the low elevation zone extent. Only one zone is created. >>> se = SpeciesEvolver(mg) >>> def zone_func(grid): ... return grid.at_node['topographic__elevation'] < 100 >>> zc = ZoneController(mg, zone_func) >>> len(zc.zones) == 1 True Introduce two taxa to the zone. >>> taxa = zc.populate_zones_uniformly(2) >>> se.track_taxa(taxa) Force north-south mountain ranges over two time steps that drives taxa evolution. >>> z[mg.x_of_node == 2000] = 200 >>> zc.run_one_step(1000) >>> se.run_one_step(1000) >>> z[mg.x_of_node == 4000] = 200 >>> zc.run_one_step(1000) >>> se.run_one_step(1000) Display taxa metadata. >>> se.taxa_data_frame # doctest: +NORMALIZE_WHITESPACE pid type t_first t_final tid 0 <NA> ZoneTaxon 0 <NA> 1 <NA> ZoneTaxon 0 <NA> 2 0 ZoneTaxon 1000 <NA> 3 1 ZoneTaxon 1000 <NA> 4 0 ZoneTaxon 2000 <NA> 5 1 ZoneTaxon 2000 <NA> Objects of all extant taxon are returned when no parameters are inputted. >>> se.get_extant_taxon_objects() # doctest: +NORMALIZE_WHITESPACE [<ZoneTaxon, tid=0>, <ZoneTaxon, tid=1>, <ZoneTaxon, tid=2>, <ZoneTaxon, tid=3>, <ZoneTaxon, tid=4>, <ZoneTaxon, tid=5>] The returned objects of extant species can be limited using parameters. Here, get the taxon objects with identifiers, 4 and 5. >>> se.get_extant_taxon_objects(tids=[4, 5]) [<ZoneTaxon, tid=4>, <ZoneTaxon, tid=5>] Extant taxon objects descending from a taxon can be obtained using the ``ancestor`` property. Here, get the taxa that descended from taxon 0. >>> se.get_extant_taxon_objects(ancestor=0) [<ZoneTaxon, tid=2>, <ZoneTaxon, tid=4>] Taxa can be limited to those that were extant ``time``. >>> se.get_extant_taxon_objects(time=1000) # doctest: +NORMALIZE_WHITESPACE [<ZoneTaxon, tid=0>, <ZoneTaxon, tid=1>, <ZoneTaxon, tid=2>, <ZoneTaxon, tid=3>] The returned taxa can be further limited by including multiple method properties. >>> se.get_extant_taxon_objects(ancestor=0, time=1000) [<ZoneTaxon, tid=2>] An empty list is returned when no extant taxa match parameter criteria. >>> se.get_extant_taxon_objects(tids=[11]) [] """ # Create `results` that contains tids of the taxa matching parameter # criteria. extant_tids = [taxon.tid for taxon in self._taxon_objs] results = set(extant_tids) data = self._taxa_data # Query by identifiers. if isinstance(tids, list): results = results.intersection(tids) # Query by ancestor. if not np.isnan(ancestor) and ancestor in data["tid"]: df = self.taxa_data_frame df["pid"] = df["pid"].fillna(-1) taxon = ancestor descendants = [] stack = [taxon] while stack: children = df.index[df["pid"] == taxon].tolist() if children: descendants.extend(children) stack.extend(children) stack.remove(taxon) if stack: taxon = stack[0] results = results.intersection(descendants) elif not np.isnan(ancestor): results = [] # Query by time. if not np.isnan(time): t_first = np.array(data["t_first"]) t_latest = np.nan_to_num(data["t_final"], nan=self._record.latest_time) mask = np.all([time >= t_first, time <= t_latest], 0) results = results.intersection(np.array(data["tid"])[mask].tolist()) # Get the Taxon objects that match all parameter query results. taxa = [taxon for taxon in self._taxon_objs if taxon.tid in results] taxa.sort(key=lambda taxon: taxon.tid) return taxa def _get_taxa_richness_map(self): """Get a map of the number of taxa.""" objs = self._taxon_objs if objs: masks = np.stack([taxon.range_mask for taxon in objs]) richness_mask = masks.sum(axis=0).astype(int) else: richness_mask = np.zeros(self._grid.number_of_nodes, dtype=int) return richness_mask
mit
shangwuhencc/scikit-learn
sklearn/mixture/gmm.py
68
31091
""" Gaussian Mixture Models. This implementation corresponds to frequentist (non-Bayesian) formulation of Gaussian Mixture Models. """ # Author: Ron Weiss <[email protected]> # Fabian Pedregosa <[email protected]> # Bertrand Thirion <[email protected]> import warnings import numpy as np from scipy import linalg from time import time from ..base import BaseEstimator from ..utils import check_random_state, check_array from ..utils.extmath import logsumexp from ..utils.validation import check_is_fitted from .. import cluster from sklearn.externals.six.moves import zip EPS = np.finfo(float).eps def log_multivariate_normal_density(X, means, covars, covariance_type='diag'): """Compute the log probability under a multivariate Gaussian distribution. Parameters ---------- X : array_like, shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point. means : array_like, shape (n_components, n_features) List of n_features-dimensional mean vectors for n_components Gaussians. Each row corresponds to a single mean vector. covars : array_like List of n_components covariance parameters for each Gaussian. The shape depends on `covariance_type`: (n_components, n_features) if 'spherical', (n_features, n_features) if 'tied', (n_components, n_features) if 'diag', (n_components, n_features, n_features) if 'full' covariance_type : string Type of the covariance parameters. Must be one of 'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'. Returns ------- lpr : array_like, shape (n_samples, n_components) Array containing the log probabilities of each data point in X under each of the n_components multivariate Gaussian distributions. """ log_multivariate_normal_density_dict = { 'spherical': _log_multivariate_normal_density_spherical, 'tied': _log_multivariate_normal_density_tied, 'diag': _log_multivariate_normal_density_diag, 'full': _log_multivariate_normal_density_full} return log_multivariate_normal_density_dict[covariance_type]( X, means, covars) def sample_gaussian(mean, covar, covariance_type='diag', n_samples=1, random_state=None): """Generate random samples from a Gaussian distribution. Parameters ---------- mean : array_like, shape (n_features,) Mean of the distribution. covar : array_like, optional Covariance of the distribution. The shape depends on `covariance_type`: scalar if 'spherical', (n_features) if 'diag', (n_features, n_features) if 'tied', or 'full' covariance_type : string, optional Type of the covariance parameters. Must be one of 'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'. n_samples : int, optional Number of samples to generate. Defaults to 1. Returns ------- X : array, shape (n_features, n_samples) Randomly generated sample """ rng = check_random_state(random_state) n_dim = len(mean) rand = rng.randn(n_dim, n_samples) if n_samples == 1: rand.shape = (n_dim,) if covariance_type == 'spherical': rand *= np.sqrt(covar) elif covariance_type == 'diag': rand = np.dot(np.diag(np.sqrt(covar)), rand) else: s, U = linalg.eigh(covar) s.clip(0, out=s) # get rid of tiny negatives np.sqrt(s, out=s) U *= s rand = np.dot(U, rand) return (rand.T + mean).T class GMM(BaseEstimator): """Gaussian Mixture Model Representation of a Gaussian mixture model probability distribution. This class allows for easy evaluation of, sampling from, and maximum-likelihood estimation of the parameters of a GMM distribution. Initializes parameters such that every mixture component has zero mean and identity covariance. Read more in the :ref:`User Guide <gmm>`. Parameters ---------- n_components : int, optional Number of mixture components. Defaults to 1. covariance_type : string, optional String describing the type of covariance parameters to use. Must be one of 'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'. random_state: RandomState or an int seed (None by default) A random number generator instance min_covar : float, optional Floor on the diagonal of the covariance matrix to prevent overfitting. Defaults to 1e-3. tol : float, optional Convergence threshold. EM iterations will stop when average gain in log-likelihood is below this threshold. Defaults to 1e-3. n_iter : int, optional Number of EM iterations to perform. n_init : int, optional Number of initializations to perform. the best results is kept params : string, optional Controls which parameters are updated in the training process. Can contain any combination of 'w' for weights, 'm' for means, and 'c' for covars. Defaults to 'wmc'. init_params : string, optional Controls which parameters are updated in the initialization process. Can contain any combination of 'w' for weights, 'm' for means, and 'c' for covars. Defaults to 'wmc'. verbose : int, default: 0 Enable verbose output. If 1 then it always prints the current initialization and iteration step. If greater than 1 then it prints additionally the change and time needed for each step. Attributes ---------- weights_ : array, shape (`n_components`,) This attribute stores the mixing weights for each mixture component. means_ : array, shape (`n_components`, `n_features`) Mean parameters for each mixture component. covars_ : array Covariance parameters for each mixture component. The shape depends on `covariance_type`:: (n_components, n_features) if 'spherical', (n_features, n_features) if 'tied', (n_components, n_features) if 'diag', (n_components, n_features, n_features) if 'full' converged_ : bool True when convergence was reached in fit(), False otherwise. See Also -------- DPGMM : Infinite gaussian mixture model, using the dirichlet process, fit with a variational algorithm VBGMM : Finite gaussian mixture model fit with a variational algorithm, better for situations where there might be too little data to get a good estimate of the covariance matrix. Examples -------- >>> import numpy as np >>> from sklearn import mixture >>> np.random.seed(1) >>> g = mixture.GMM(n_components=2) >>> # Generate random observations with two modes centered on 0 >>> # and 10 to use for training. >>> obs = np.concatenate((np.random.randn(100, 1), ... 10 + np.random.randn(300, 1))) >>> g.fit(obs) # doctest: +NORMALIZE_WHITESPACE GMM(covariance_type='diag', init_params='wmc', min_covar=0.001, n_components=2, n_init=1, n_iter=100, params='wmc', random_state=None, thresh=None, tol=0.001, verbose=0) >>> np.round(g.weights_, 2) array([ 0.75, 0.25]) >>> np.round(g.means_, 2) array([[ 10.05], [ 0.06]]) >>> np.round(g.covars_, 2) #doctest: +SKIP array([[[ 1.02]], [[ 0.96]]]) >>> g.predict([[0], [2], [9], [10]]) #doctest: +ELLIPSIS array([1, 1, 0, 0]...) >>> np.round(g.score([[0], [2], [9], [10]]), 2) array([-2.19, -4.58, -1.75, -1.21]) >>> # Refit the model on new data (initial parameters remain the >>> # same), this time with an even split between the two modes. >>> g.fit(20 * [[0]] + 20 * [[10]]) # doctest: +NORMALIZE_WHITESPACE GMM(covariance_type='diag', init_params='wmc', min_covar=0.001, n_components=2, n_init=1, n_iter=100, params='wmc', random_state=None, thresh=None, tol=0.001, verbose=0) >>> np.round(g.weights_, 2) array([ 0.5, 0.5]) """ def __init__(self, n_components=1, covariance_type='diag', random_state=None, thresh=None, tol=1e-3, min_covar=1e-3, n_iter=100, n_init=1, params='wmc', init_params='wmc', verbose=0): if thresh is not None: warnings.warn("'thresh' has been replaced by 'tol' in 0.16 " " and will be removed in 0.18.", DeprecationWarning) self.n_components = n_components self.covariance_type = covariance_type self.thresh = thresh self.tol = tol self.min_covar = min_covar self.random_state = random_state self.n_iter = n_iter self.n_init = n_init self.params = params self.init_params = init_params self.verbose = verbose if covariance_type not in ['spherical', 'tied', 'diag', 'full']: raise ValueError('Invalid value for covariance_type: %s' % covariance_type) if n_init < 1: raise ValueError('GMM estimation requires at least one run') self.weights_ = np.ones(self.n_components) / self.n_components # flag to indicate exit status of fit() method: converged (True) or # n_iter reached (False) self.converged_ = False def _get_covars(self): """Covariance parameters for each mixture component. The shape depends on ``cvtype``:: (n_states, n_features) if 'spherical', (n_features, n_features) if 'tied', (n_states, n_features) if 'diag', (n_states, n_features, n_features) if 'full' """ if self.covariance_type == 'full': return self.covars_ elif self.covariance_type == 'diag': return [np.diag(cov) for cov in self.covars_] elif self.covariance_type == 'tied': return [self.covars_] * self.n_components elif self.covariance_type == 'spherical': return [np.diag(cov) for cov in self.covars_] def _set_covars(self, covars): """Provide values for covariance""" covars = np.asarray(covars) _validate_covars(covars, self.covariance_type, self.n_components) self.covars_ = covars def score_samples(self, X): """Return the per-sample likelihood of the data under the model. Compute the log probability of X under the model and return the posterior distribution (responsibilities) of each mixture component for each element of X. Parameters ---------- X: array_like, shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point. Returns ------- logprob : array_like, shape (n_samples,) Log probabilities of each data point in X. responsibilities : array_like, shape (n_samples, n_components) Posterior probabilities of each mixture component for each observation """ check_is_fitted(self, 'means_') X = check_array(X) if X.ndim == 1: X = X[:, np.newaxis] if X.size == 0: return np.array([]), np.empty((0, self.n_components)) if X.shape[1] != self.means_.shape[1]: raise ValueError('The shape of X is not compatible with self') lpr = (log_multivariate_normal_density(X, self.means_, self.covars_, self.covariance_type) + np.log(self.weights_)) logprob = logsumexp(lpr, axis=1) responsibilities = np.exp(lpr - logprob[:, np.newaxis]) return logprob, responsibilities def score(self, X, y=None): """Compute the log probability under the model. Parameters ---------- X : array_like, shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point. Returns ------- logprob : array_like, shape (n_samples,) Log probabilities of each data point in X """ logprob, _ = self.score_samples(X) return logprob def predict(self, X): """Predict label for data. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- C : array, shape = (n_samples,) component memberships """ logprob, responsibilities = self.score_samples(X) return responsibilities.argmax(axis=1) def predict_proba(self, X): """Predict posterior probability of data under each Gaussian in the model. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- responsibilities : array-like, shape = (n_samples, n_components) Returns the probability of the sample for each Gaussian (state) in the model. """ logprob, responsibilities = self.score_samples(X) return responsibilities def sample(self, n_samples=1, random_state=None): """Generate random samples from the model. Parameters ---------- n_samples : int, optional Number of samples to generate. Defaults to 1. Returns ------- X : array_like, shape (n_samples, n_features) List of samples """ check_is_fitted(self, 'means_') if random_state is None: random_state = self.random_state random_state = check_random_state(random_state) weight_cdf = np.cumsum(self.weights_) X = np.empty((n_samples, self.means_.shape[1])) rand = random_state.rand(n_samples) # decide which component to use for each sample comps = weight_cdf.searchsorted(rand) # for each component, generate all needed samples for comp in range(self.n_components): # occurrences of current component in X comp_in_X = (comp == comps) # number of those occurrences num_comp_in_X = comp_in_X.sum() if num_comp_in_X > 0: if self.covariance_type == 'tied': cv = self.covars_ elif self.covariance_type == 'spherical': cv = self.covars_[comp][0] else: cv = self.covars_[comp] X[comp_in_X] = sample_gaussian( self.means_[comp], cv, self.covariance_type, num_comp_in_X, random_state=random_state).T return X def fit_predict(self, X, y=None): """Fit and then predict labels for data. Warning: due to the final maximization step in the EM algorithm, with low iterations the prediction may not be 100% accurate Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- C : array, shape = (n_samples,) component memberships """ return self._fit(X, y).argmax(axis=1) def _fit(self, X, y=None, do_prediction=False): """Estimate model parameters with the EM algorithm. A initialization step is performed before entering the expectation-maximization (EM) algorithm. If you want to avoid this step, set the keyword argument init_params to the empty string '' when creating the GMM object. Likewise, if you would like just to do an initialization, set n_iter=0. Parameters ---------- X : array_like, shape (n, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point. Returns ------- responsibilities : array, shape (n_samples, n_components) Posterior probabilities of each mixture component for each observation. """ # initialization step X = check_array(X, dtype=np.float64, ensure_min_samples=2) if X.shape[0] < self.n_components: raise ValueError( 'GMM estimation with %s components, but got only %s samples' % (self.n_components, X.shape[0])) max_log_prob = -np.infty if self.verbose > 0: print('Expectation-maximization algorithm started.') for init in range(self.n_init): if self.verbose > 0: print('Initialization ' + str(init + 1)) start_init_time = time() if 'm' in self.init_params or not hasattr(self, 'means_'): self.means_ = cluster.KMeans( n_clusters=self.n_components, random_state=self.random_state).fit(X).cluster_centers_ if self.verbose > 1: print('\tMeans have been initialized.') if 'w' in self.init_params or not hasattr(self, 'weights_'): self.weights_ = np.tile(1.0 / self.n_components, self.n_components) if self.verbose > 1: print('\tWeights have been initialized.') if 'c' in self.init_params or not hasattr(self, 'covars_'): cv = np.cov(X.T) + self.min_covar * np.eye(X.shape[1]) if not cv.shape: cv.shape = (1, 1) self.covars_ = \ distribute_covar_matrix_to_match_covariance_type( cv, self.covariance_type, self.n_components) if self.verbose > 1: print('\tCovariance matrices have been initialized.') # EM algorithms current_log_likelihood = None # reset self.converged_ to False self.converged_ = False # this line should be removed when 'thresh' is removed in v0.18 tol = (self.tol if self.thresh is None else self.thresh / float(X.shape[0])) for i in range(self.n_iter): if self.verbose > 0: print('\tEM iteration ' + str(i + 1)) start_iter_time = time() prev_log_likelihood = current_log_likelihood # Expectation step log_likelihoods, responsibilities = self.score_samples(X) current_log_likelihood = log_likelihoods.mean() # Check for convergence. # (should compare to self.tol when deprecated 'thresh' is # removed in v0.18) if prev_log_likelihood is not None: change = abs(current_log_likelihood - prev_log_likelihood) if self.verbose > 1: print('\t\tChange: ' + str(change)) if change < tol: self.converged_ = True if self.verbose > 0: print('\t\tEM algorithm converged.') break # Maximization step self._do_mstep(X, responsibilities, self.params, self.min_covar) if self.verbose > 1: print('\t\tEM iteration ' + str(i + 1) + ' took {0:.5f}s'.format( time() - start_iter_time)) # if the results are better, keep it if self.n_iter: if current_log_likelihood > max_log_prob: max_log_prob = current_log_likelihood best_params = {'weights': self.weights_, 'means': self.means_, 'covars': self.covars_} if self.verbose > 1: print('\tBetter parameters were found.') if self.verbose > 1: print('\tInitialization ' + str(init + 1) + ' took {0:.5f}s'.format( time() - start_init_time)) # check the existence of an init param that was not subject to # likelihood computation issue. if np.isneginf(max_log_prob) and self.n_iter: raise RuntimeError( "EM algorithm was never able to compute a valid likelihood " + "given initial parameters. Try different init parameters " + "(or increasing n_init) or check for degenerate data.") if self.n_iter: self.covars_ = best_params['covars'] self.means_ = best_params['means'] self.weights_ = best_params['weights'] else: # self.n_iter == 0 occurs when using GMM within HMM # Need to make sure that there are responsibilities to output # Output zeros because it was just a quick initialization responsibilities = np.zeros((X.shape[0], self.n_components)) return responsibilities def fit(self, X, y=None): """Estimate model parameters with the EM algorithm. A initialization step is performed before entering the expectation-maximization (EM) algorithm. If you want to avoid this step, set the keyword argument init_params to the empty string '' when creating the GMM object. Likewise, if you would like just to do an initialization, set n_iter=0. Parameters ---------- X : array_like, shape (n, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point. Returns ------- self """ self._fit(X, y) return self def _do_mstep(self, X, responsibilities, params, min_covar=0): """ Perform the Mstep of the EM algorithm and return the class weights """ weights = responsibilities.sum(axis=0) weighted_X_sum = np.dot(responsibilities.T, X) inverse_weights = 1.0 / (weights[:, np.newaxis] + 10 * EPS) if 'w' in params: self.weights_ = (weights / (weights.sum() + 10 * EPS) + EPS) if 'm' in params: self.means_ = weighted_X_sum * inverse_weights if 'c' in params: covar_mstep_func = _covar_mstep_funcs[self.covariance_type] self.covars_ = covar_mstep_func( self, X, responsibilities, weighted_X_sum, inverse_weights, min_covar) return weights def _n_parameters(self): """Return the number of free parameters in the model.""" ndim = self.means_.shape[1] if self.covariance_type == 'full': cov_params = self.n_components * ndim * (ndim + 1) / 2. elif self.covariance_type == 'diag': cov_params = self.n_components * ndim elif self.covariance_type == 'tied': cov_params = ndim * (ndim + 1) / 2. elif self.covariance_type == 'spherical': cov_params = self.n_components mean_params = ndim * self.n_components return int(cov_params + mean_params + self.n_components - 1) def bic(self, X): """Bayesian information criterion for the current model fit and the proposed data Parameters ---------- X : array of shape(n_samples, n_dimensions) Returns ------- bic: float (the lower the better) """ return (-2 * self.score(X).sum() + self._n_parameters() * np.log(X.shape[0])) def aic(self, X): """Akaike information criterion for the current model fit and the proposed data Parameters ---------- X : array of shape(n_samples, n_dimensions) Returns ------- aic: float (the lower the better) """ return - 2 * self.score(X).sum() + 2 * self._n_parameters() ######################################################################### # some helper routines ######################################################################### def _log_multivariate_normal_density_diag(X, means, covars): """Compute Gaussian log-density at X for a diagonal model""" n_samples, n_dim = X.shape lpr = -0.5 * (n_dim * np.log(2 * np.pi) + np.sum(np.log(covars), 1) + np.sum((means ** 2) / covars, 1) - 2 * np.dot(X, (means / covars).T) + np.dot(X ** 2, (1.0 / covars).T)) return lpr def _log_multivariate_normal_density_spherical(X, means, covars): """Compute Gaussian log-density at X for a spherical model""" cv = covars.copy() if covars.ndim == 1: cv = cv[:, np.newaxis] if covars.shape[1] == 1: cv = np.tile(cv, (1, X.shape[-1])) return _log_multivariate_normal_density_diag(X, means, cv) def _log_multivariate_normal_density_tied(X, means, covars): """Compute Gaussian log-density at X for a tied model""" cv = np.tile(covars, (means.shape[0], 1, 1)) return _log_multivariate_normal_density_full(X, means, cv) def _log_multivariate_normal_density_full(X, means, covars, min_covar=1.e-7): """Log probability for full covariance matrices.""" n_samples, n_dim = X.shape nmix = len(means) log_prob = np.empty((n_samples, nmix)) for c, (mu, cv) in enumerate(zip(means, covars)): try: cv_chol = linalg.cholesky(cv, lower=True) except linalg.LinAlgError: # The model is most probably stuck in a component with too # few observations, we need to reinitialize this components try: cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim), lower=True) except linalg.LinAlgError: raise ValueError("'covars' must be symmetric, " "positive-definite") cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol))) cv_sol = linalg.solve_triangular(cv_chol, (X - mu).T, lower=True).T log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) + n_dim * np.log(2 * np.pi) + cv_log_det) return log_prob def _validate_covars(covars, covariance_type, n_components): """Do basic checks on matrix covariance sizes and values """ from scipy import linalg if covariance_type == 'spherical': if len(covars) != n_components: raise ValueError("'spherical' covars have length n_components") elif np.any(covars <= 0): raise ValueError("'spherical' covars must be non-negative") elif covariance_type == 'tied': if covars.shape[0] != covars.shape[1]: raise ValueError("'tied' covars must have shape (n_dim, n_dim)") elif (not np.allclose(covars, covars.T) or np.any(linalg.eigvalsh(covars) <= 0)): raise ValueError("'tied' covars must be symmetric, " "positive-definite") elif covariance_type == 'diag': if len(covars.shape) != 2: raise ValueError("'diag' covars must have shape " "(n_components, n_dim)") elif np.any(covars <= 0): raise ValueError("'diag' covars must be non-negative") elif covariance_type == 'full': if len(covars.shape) != 3: raise ValueError("'full' covars must have shape " "(n_components, n_dim, n_dim)") elif covars.shape[1] != covars.shape[2]: raise ValueError("'full' covars must have shape " "(n_components, n_dim, n_dim)") for n, cv in enumerate(covars): if (not np.allclose(cv, cv.T) or np.any(linalg.eigvalsh(cv) <= 0)): raise ValueError("component %d of 'full' covars must be " "symmetric, positive-definite" % n) else: raise ValueError("covariance_type must be one of " + "'spherical', 'tied', 'diag', 'full'") def distribute_covar_matrix_to_match_covariance_type( tied_cv, covariance_type, n_components): """Create all the covariance matrices from a given template""" if covariance_type == 'spherical': cv = np.tile(tied_cv.mean() * np.ones(tied_cv.shape[1]), (n_components, 1)) elif covariance_type == 'tied': cv = tied_cv elif covariance_type == 'diag': cv = np.tile(np.diag(tied_cv), (n_components, 1)) elif covariance_type == 'full': cv = np.tile(tied_cv, (n_components, 1, 1)) else: raise ValueError("covariance_type must be one of " + "'spherical', 'tied', 'diag', 'full'") return cv def _covar_mstep_diag(gmm, X, responsibilities, weighted_X_sum, norm, min_covar): """Performing the covariance M step for diagonal cases""" avg_X2 = np.dot(responsibilities.T, X * X) * norm avg_means2 = gmm.means_ ** 2 avg_X_means = gmm.means_ * weighted_X_sum * norm return avg_X2 - 2 * avg_X_means + avg_means2 + min_covar def _covar_mstep_spherical(*args): """Performing the covariance M step for spherical cases""" cv = _covar_mstep_diag(*args) return np.tile(cv.mean(axis=1)[:, np.newaxis], (1, cv.shape[1])) def _covar_mstep_full(gmm, X, responsibilities, weighted_X_sum, norm, min_covar): """Performing the covariance M step for full cases""" # Eq. 12 from K. Murphy, "Fitting a Conditional Linear Gaussian # Distribution" n_features = X.shape[1] cv = np.empty((gmm.n_components, n_features, n_features)) for c in range(gmm.n_components): post = responsibilities[:, c] mu = gmm.means_[c] diff = X - mu with np.errstate(under='ignore'): # Underflow Errors in doing post * X.T are not important avg_cv = np.dot(post * diff.T, diff) / (post.sum() + 10 * EPS) cv[c] = avg_cv + min_covar * np.eye(n_features) return cv def _covar_mstep_tied(gmm, X, responsibilities, weighted_X_sum, norm, min_covar): # Eq. 15 from K. Murphy, "Fitting a Conditional Linear Gaussian # Distribution" avg_X2 = np.dot(X.T, X) avg_means2 = np.dot(gmm.means_.T, weighted_X_sum) out = avg_X2 - avg_means2 out *= 1. / X.shape[0] out.flat[::len(out) + 1] += min_covar return out _covar_mstep_funcs = {'spherical': _covar_mstep_spherical, 'diag': _covar_mstep_diag, 'tied': _covar_mstep_tied, 'full': _covar_mstep_full, }
bsd-3-clause
PatrickOReilly/scikit-learn
examples/plot_multioutput_face_completion.py
330
3019
""" ============================================== Face completion with a multi-output estimators ============================================== This example shows the use of multi-output estimator to complete images. The goal is to predict the lower half of a face given its upper half. The first column of images shows true faces. The next columns illustrate how extremely randomized trees, k nearest neighbors, linear regression and ridge regression complete the lower half of those faces. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn.datasets import fetch_olivetti_faces from sklearn.utils.validation import check_random_state from sklearn.ensemble import ExtraTreesRegressor from sklearn.neighbors import KNeighborsRegressor from sklearn.linear_model import LinearRegression from sklearn.linear_model import RidgeCV # Load the faces datasets data = fetch_olivetti_faces() targets = data.target data = data.images.reshape((len(data.images), -1)) train = data[targets < 30] test = data[targets >= 30] # Test on independent people # Test on a subset of people n_faces = 5 rng = check_random_state(4) face_ids = rng.randint(test.shape[0], size=(n_faces, )) test = test[face_ids, :] n_pixels = data.shape[1] X_train = train[:, :np.ceil(0.5 * n_pixels)] # Upper half of the faces y_train = train[:, np.floor(0.5 * n_pixels):] # Lower half of the faces X_test = test[:, :np.ceil(0.5 * n_pixels)] y_test = test[:, np.floor(0.5 * n_pixels):] # Fit estimators ESTIMATORS = { "Extra trees": ExtraTreesRegressor(n_estimators=10, max_features=32, random_state=0), "K-nn": KNeighborsRegressor(), "Linear regression": LinearRegression(), "Ridge": RidgeCV(), } y_test_predict = dict() for name, estimator in ESTIMATORS.items(): estimator.fit(X_train, y_train) y_test_predict[name] = estimator.predict(X_test) # Plot the completed faces image_shape = (64, 64) n_cols = 1 + len(ESTIMATORS) plt.figure(figsize=(2. * n_cols, 2.26 * n_faces)) plt.suptitle("Face completion with multi-output estimators", size=16) for i in range(n_faces): true_face = np.hstack((X_test[i], y_test[i])) if i: sub = plt.subplot(n_faces, n_cols, i * n_cols + 1) else: sub = plt.subplot(n_faces, n_cols, i * n_cols + 1, title="true faces") sub.axis("off") sub.imshow(true_face.reshape(image_shape), cmap=plt.cm.gray, interpolation="nearest") for j, est in enumerate(sorted(ESTIMATORS)): completed_face = np.hstack((X_test[i], y_test_predict[est][i])) if i: sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j) else: sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j, title=est) sub.axis("off") sub.imshow(completed_face.reshape(image_shape), cmap=plt.cm.gray, interpolation="nearest") plt.show()
bsd-3-clause
appapantula/scikit-learn
examples/semi_supervised/plot_label_propagation_versus_svm_iris.py
286
2378
""" ===================================================================== Decision boundary of label propagation versus SVM on the Iris dataset ===================================================================== Comparison for decision boundary generated on iris dataset between Label Propagation and SVM. This demonstrates Label Propagation learning a good boundary even with a small amount of labeled data. """ print(__doc__) # Authors: Clay Woolam <[email protected]> # Licence: BSD import numpy as np import matplotlib.pyplot as plt from sklearn import datasets from sklearn import svm from sklearn.semi_supervised import label_propagation rng = np.random.RandomState(0) iris = datasets.load_iris() X = iris.data[:, :2] y = iris.target # step size in the mesh h = .02 y_30 = np.copy(y) y_30[rng.rand(len(y)) < 0.3] = -1 y_50 = np.copy(y) y_50[rng.rand(len(y)) < 0.5] = -1 # we create an instance of SVM and fit out data. We do not scale our # data since we want to plot the support vectors ls30 = (label_propagation.LabelSpreading().fit(X, y_30), y_30) ls50 = (label_propagation.LabelSpreading().fit(X, y_50), y_50) ls100 = (label_propagation.LabelSpreading().fit(X, y), y) rbf_svc = (svm.SVC(kernel='rbf').fit(X, y), y) # create a mesh to plot in x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1 y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) # title for the plots titles = ['Label Spreading 30% data', 'Label Spreading 50% data', 'Label Spreading 100% data', 'SVC with rbf kernel'] color_map = {-1: (1, 1, 1), 0: (0, 0, .9), 1: (1, 0, 0), 2: (.8, .6, 0)} for i, (clf, y_train) in enumerate((ls30, ls50, ls100, rbf_svc)): # Plot the decision boundary. For that, we will assign a color to each # point in the mesh [x_min, m_max]x[y_min, y_max]. plt.subplot(2, 2, i + 1) Z = clf.predict(np.c_[xx.ravel(), yy.ravel()]) # Put the result into a color plot Z = Z.reshape(xx.shape) plt.contourf(xx, yy, Z, cmap=plt.cm.Paired) plt.axis('off') # Plot also the training points colors = [color_map[y] for y in y_train] plt.scatter(X[:, 0], X[:, 1], c=colors, cmap=plt.cm.Paired) plt.title(titles[i]) plt.text(.90, 0, "Unlabeled points are colored white") plt.show()
bsd-3-clause
cjayb/mne-python
examples/time_frequency/plot_source_power_spectrum.py
19
1959
""" ====================================================== Compute source power spectral density (PSD) in a label ====================================================== Returns an STC file containing the PSD (in dB) of each of the sources within a label. """ # Authors: Alexandre Gramfort <[email protected]> # # License: BSD (3-clause) import matplotlib.pyplot as plt import mne from mne import io from mne.datasets import sample from mne.minimum_norm import read_inverse_operator, compute_source_psd print(__doc__) ############################################################################### # Set parameters data_path = sample.data_path() raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif' fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif' fname_label = data_path + '/MEG/sample/labels/Aud-lh.label' # Setup for reading the raw data raw = io.read_raw_fif(raw_fname, verbose=False) events = mne.find_events(raw, stim_channel='STI 014') inverse_operator = read_inverse_operator(fname_inv) raw.info['bads'] = ['MEG 2443', 'EEG 053'] # picks MEG gradiometers picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=True, stim=False, exclude='bads') tmin, tmax = 0, 120 # use the first 120s of data fmin, fmax = 4, 100 # look at frequencies between 4 and 100Hz n_fft = 2048 # the FFT size (n_fft). Ideally a power of 2 label = mne.read_label(fname_label) stc = compute_source_psd(raw, inverse_operator, lambda2=1. / 9., method="dSPM", tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax, pick_ori="normal", n_fft=n_fft, label=label, dB=True) stc.save('psd_dSPM') ############################################################################### # View PSD of sources in label plt.plot(stc.times, stc.data.T) plt.xlabel('Frequency (Hz)') plt.ylabel('PSD (dB)') plt.title('Source Power Spectrum (PSD)') plt.show()
bsd-3-clause
dlhocker/neurocontrol-
threechoice_sim.py
1
8536
#simulation to perform many runs of the healthy vs. unhealthy dynamics import tensorflow as tf import numpy as np import matplotlib.pyplot as plt import math import sys from tf_funs import * from threechoice_dynamics import * #tf.set_random_seed(101) pi = math.pi #np.random.seed(101) #sess = tf.Session() session_conf = tf.ConfigProto( intra_op_parallelism_threads=4, inter_op_parallelism_threads=4) sess = tf.Session(config=session_conf) #define the gradients and parameters of the simulation xdim = 2 #state dimension udim = 2 #control dimension T = 1000 #number of steps #tensors for state, control, observation, covariance X_est = tf.placeholder(shape=xdim,dtype=tf.float32,name='X_est') #the state estimate PI_est = tf.placeholder(shape = (xdim,xdim),dtype=tf.float32, name = 'PI_est') #estimated covariance Y_tp1 = tf.placeholder(shape= xdim,dtype=tf.float32, name = 'Y_tp1') #the most recent observation #Q = tf.placeholder(dtype=tf.float32) #R = tf.placeholder(dtype=tf.float32) Control = tf.placeholder(shape = udim, dtype=tf.float32, name='Control') #define the noise for the system dt = 1.0e-4 gamma = 1.0e-4 sigsstate = (1./dt)*(1e-9)#fix these if len(sys.argv) < 5: sigsobs = (0.001)**2 #fix these else: sigsobs = float(sys.argv[4]) Q = sigsstate*np.eye(xdim) Q_tf = tf.constant(Q,dtype=tf.float32, name = 'Q') #state noise covariance R = sigsobs*np.eye(xdim) R_tf = tf.constant(R,dtype=tf.float32, name = 'R') #observation noise covariance #graphs for updating state and observation true_model_est = grad_threechoice_tf(X_est[0],X_est[1],dt,Control) true_model_est_null = grad_threechoice_tf(X_est[0],X_est[1],dt,[0.,0.]) #state est. gradient null control target_model_est = grad_threechoice_healthy_tf(X_est[0],X_est[1],dt) #state est. target dynamics #the non-tensorflow anonymous functions, for generalizations true_nontf = lambda x,c: grad_threechoice(x[0],x[1],dt,c) target_nontf = lambda x: grad_threechoice_healthy(x[0],x[1],dt) X_plus,PI_plus = EKF(X_est,Y_tp1,PI_est,true_model_est,true_model_est_null,Q_tf,R_tf,xdim,dt) Cnew = myopicController_meanonly( X_est,PI_est,Control,gamma,true_model_est, true_model_est_null,target_model_est,xdim,udim) useMO = int(sys.argv[3]) #handle to use of mean-only control if useMO ==1: print('using mean-only control') Cnew = myopicController_meanonly( X_est,PI_est,Control,gamma,true_model_est, true_model_est_null,target_model_est,xdim,udim) else: print('using full myopic control, but non-Jacobian-differential form') Cnew = myopicController_noBdiff( X_est,PI_est,Control,gamma,true_model_est, true_model_est_null,target_model_est,xdim,udim) #covariance prediction update graph Ak = dynamics_linearized(X_est,true_model_est_null,xdim) #the full loss function, not just loss of mean values loss_tf = loss_full(X_est,PI_est,true_model_est,target_model_est) ns = 500 #number of samples #------- the main simulation loop. I might make this a function if i use it a lot #make these numpy version statenoise = np.random.normal(0,sigsstate**0.5,[xdim,T,ns]) obsnoise = np.random.normal(0,sigsobs**0.5,[xdim,T,ns]) G = dt**(0.5)*np.eye(xdim) #system noise matrix, for covariance prediction ##save bad values of noise for lag 10 #import pickle #fname = "noise_bad_lag10_threechoice" #index = ['statenoise','obsnoise'] #alldata = [index,statenoise,obsnoise] #pickle.dump( alldata, open( fname, "wb" ) ) x_estvec = np.zeros((xdim,T,ns)) xvec = np.zeros((xdim,T,ns)) yvec = np.zeros((xdim,T,ns)) x_targvec = np.zeros((xdim,T,ns)) PI_estvec = np.zeros((xdim,xdim,T,ns)) contall = np.zeros((udim,T,ns)) loss = np.zeros((4,T,ns)) loss_nocont = np.zeros((4,T,ns)) targetdynam_vec = np.zeros((2,T,ns)) #the target dynamics for each step lag = int(sys.argv[2]) #how many steps in the past will we receive observations #lag = 10 init = tf.global_variables_initializer() sess.run(init) initvals = np.zeros((xdim,ns)) initvals = np.zeros((xdim,ns)) for m in range(ns): #x_init = [0.9,0.76] #initial state x_init = np.random.uniform(0.,1.,(2,)) initvals[:,m] = x_init print(x_init) PI_init = [[0.,0.],[0.,0.]] #initial covariance c_init = [0.,0.] xest_k = x_init pi_k = PI_init c_k = c_init x_k = x_init x_targ_k = x_init ykp1 = np.array(x_init) x_estvec[:,0,m] = x_init xvec[:,0,m] = x_init x_targvec[:,0,m] = x_init PI_estvec[:,:,0,m] = PI_init print(m) #go ahead and propagate lag-steps ahead before starting state estimation and such for k in range(1,lag): #update actual dynamics grad_cont = true_nontf(xvec[:,k-1,m],c_init) grad_targ = target_nontf(x_targvec[:,k-1,m]) xvec[:,k,m] = xvec[:,k-1,m] + grad_cont + statenoise[:,k,m] x_targvec[:,k,m] = x_targvec[:,k-1,m] + grad_targ + statenoise[:,k,m] yvec[:,k,m] = xvec[:,k,m] + obsnoise[:,k,m] #set estimates in beginning lags to initial state x_estvec[:,k,m] = x_init PI_estvec[:,:,k,m] = PI_init for k in range(max(1,lag),T): #update actual dynamics grad_cont = true_nontf(xvec[:,k-1,m],contall[:,k-1,m]) grad_targ = target_nontf(x_targvec[:,k-1,m]) xvec[:,k,m] = xvec[:,k-1,m] + grad_cont + statenoise[:,k,m] x_targvec[:,k,m] = x_targvec[:,k-1,m] + grad_targ + statenoise[:,k,m] yvec[:,k,m] = xvec[:,k,m] + obsnoise[:,k,m] #run state estimator to update estimate of state k-lag if k==0: print([k,x_estvec[:,k-lag-1,m], PI_estvec[:,:,k-lag-1,m], contall[:,k-lag-1,m], yvec[:,k-lag,m]]) test = sess.run([X_plus,PI_plus], {X_est:x_estvec[:,k-lag-1,m], PI_est:PI_estvec[:,:,k-lag-1,m], Control:contall[:,k-lag-1,m], Y_tp1:yvec[:,k-lag,m]}) x_estvec[:,k-lag,m] = test[0] PI_estvec[:,:,k-lag,m] = test[1] #predit lag states in the future to calculate control x_est_n = x_estvec[:,k-lag,m] PI_est_n = PI_estvec[:,:,k-lag,m] for n in range(lag): #state prediction step grad_cont = true_nontf(x_est_n,contall[:,k-lag-1+n,m]) #covariance prediction step. calculate jacobian Ak_n= sess.run(Ak, {X_est: x_est_n, PI_est: PI_est_n, Control: contall[:,0,m], Y_tp1:yvec[:,0,m]}) x_est_n = x_est_n + grad_cont PI_est_n = np.matmul(Ak_n,PI_est_n) + np.matmul(PI_est_n,np.transpose(Ak_n)) + np.matmul( np.matmul(G,Q),np.transpose(G)) #run myopic controller using predicted state estimated. cov, doesnt matter #find control for time k c_k = sess.run(Cnew,{X_est:x_est_n, PI_est:PI_est_n, Control:contall[:,k-1,m], Y_tp1:yvec[:,k,m]}) contall[:,k,m] = c_k #loss[k-lag,m] = np.linalg.norm(true_nontf(x_estvec[:,k-lag,m],contall[:,k-lag,m])- # target_nontf(x_estvec[:,k-lag,m])) ltest = sess.run(loss_tf,{X_est:x_estvec[:,k-lag,m], PI_est:PI_estvec[:,:,k-lag,m], Control:contall[:,k-lag,m] }) loss[:,k-lag,m] = ltest #loss_nocont[k-lag,m] = np.linalg.norm( # true_nontf(x_estvec[:,k-lag,m],[0.,0.])- # target_nontf(x_estvec[:,k-lag,m])) ltest = sess.run(loss_tf,{X_est:x_estvec[:,k-lag,m], PI_est:PI_estvec[:,:,k-lag,m], Control:np.array([0.,0.]) }) loss_nocont[:,k-lag,m] = ltest #target dynamics at each step, given estimated state targetdynam_vec[:,k-lag,m] = target_nontf(x_estvec[:,k-lag,m]) #set final lag estimate values to esimate for k in range(lag): x_targvec[:,T-lag+k,m] = x_targvec[:,T-lag-1,m] x_estvec[:,T-lag+k,m] = x_estvec[:,T-lag-1,m] PI_estvec[:,:,T-lag+k,m] = PI_estvec[:,:,T-lag-1,m] #------------ save import pickle #fname = 'test.p' fname = sys.argv[1] index = ['x_estvec','x_targvec','PI_estvec','contall','loss','loss_nocont'] alldata = [index,x_estvec,x_targvec,PI_estvec,contall,loss,loss_nocont] pickle.dump( alldata, open( fname, "wb" ) )
bsd-2-clause
0asa/scikit-learn
benchmarks/bench_sparsify.py
28
3380
""" Benchmark SGD prediction time with dense/sparse coefficients. Invoke with ----------- $ kernprof.py -l sparsity_benchmark.py $ python -m line_profiler sparsity_benchmark.py.lprof Typical output -------------- input data sparsity: 0.050000 true coef sparsity: 0.000100 test data sparsity: 0.027400 model sparsity: 0.000024 r^2 on test data (dense model) : 0.233651 r^2 on test data (sparse model) : 0.233651 Wrote profile results to sparsity_benchmark.py.lprof Timer unit: 1e-06 s File: sparsity_benchmark.py Function: benchmark_dense_predict at line 51 Total time: 0.532979 s Line # Hits Time Per Hit % Time Line Contents ============================================================== 51 @profile 52 def benchmark_dense_predict(): 53 301 640 2.1 0.1 for _ in range(300): 54 300 532339 1774.5 99.9 clf.predict(X_test) File: sparsity_benchmark.py Function: benchmark_sparse_predict at line 56 Total time: 0.39274 s Line # Hits Time Per Hit % Time Line Contents ============================================================== 56 @profile 57 def benchmark_sparse_predict(): 58 1 10854 10854.0 2.8 X_test_sparse = csr_matrix(X_test) 59 301 477 1.6 0.1 for _ in range(300): 60 300 381409 1271.4 97.1 clf.predict(X_test_sparse) """ from scipy.sparse.csr import csr_matrix import numpy as np from sklearn.linear_model.stochastic_gradient import SGDRegressor from sklearn.metrics import r2_score np.random.seed(42) def sparsity_ratio(X): return np.count_nonzero(X) / float(n_samples * n_features) n_samples, n_features = 5000, 300 X = np.random.randn(n_samples, n_features) inds = np.arange(n_samples) np.random.shuffle(inds) X[inds[n_features/1.2:]] = 0 # sparsify input print("input data sparsity: %f" % sparsity_ratio(X)) coef = 3 * np.random.randn(n_features) inds = np.arange(n_features) np.random.shuffle(inds) coef[inds[n_features/2:]] = 0 # sparsify coef print("true coef sparsity: %f" % sparsity_ratio(coef)) y = np.dot(X, coef) # add noise y += 0.01 * np.random.normal((n_samples,)) # Split data in train set and test set n_samples = X.shape[0] X_train, y_train = X[:n_samples / 2], y[:n_samples / 2] X_test, y_test = X[n_samples / 2:], y[n_samples / 2:] print("test data sparsity: %f" % sparsity_ratio(X_test)) ############################################################################### clf = SGDRegressor(penalty='l1', alpha=.2, fit_intercept=True, n_iter=2000) clf.fit(X_train, y_train) print("model sparsity: %f" % sparsity_ratio(clf.coef_)) @profile def benchmark_dense_predict(): for _ in range(300): clf.predict(X_test) @profile def benchmark_sparse_predict(): X_test_sparse = csr_matrix(X_test) for _ in range(300): clf.predict(X_test_sparse) def score(y_test, y_pred, case): r2 = r2_score(y_test, y_pred) print("r^2 on test data (%s) : %f" % (case, r2)) score(y_test, clf.predict(X_test), 'dense model') benchmark_dense_predict() clf.sparsify() score(y_test, clf.predict(X_test), 'sparse model') benchmark_sparse_predict()
bsd-3-clause
jlegendary/nupic
external/linux32/lib/python2.6/site-packages/matplotlib/__init__.py
69
28184
""" This is an object-orient plotting library. A procedural interface is provided by the companion pylab module, which may be imported directly, e.g:: from pylab import * or using ipython:: ipython -pylab For the most part, direct use of the object-oriented library is encouraged when programming rather than working interactively. The exceptions are the pylab commands :func:`~matplotlib.pyplot.figure`, :func:`~matplotlib.pyplot.subplot`, :func:`~matplotlib.backends.backend_qt4agg.show`, and :func:`~pyplot.savefig`, which can greatly simplify scripting. Modules include: :mod:`matplotlib.axes` defines the :class:`~matplotlib.axes.Axes` class. Most pylab commands are wrappers for :class:`~matplotlib.axes.Axes` methods. The axes module is the highest level of OO access to the library. :mod:`matplotlib.figure` defines the :class:`~matplotlib.figure.Figure` class. :mod:`matplotlib.artist` defines the :class:`~matplotlib.artist.Artist` base class for all classes that draw things. :mod:`matplotlib.lines` defines the :class:`~matplotlib.lines.Line2D` class for drawing lines and markers :mod`matplotlib.patches` defines classes for drawing polygons :mod:`matplotlib.text` defines the :class:`~matplotlib.text.Text`, :class:`~matplotlib.text.TextWithDash`, and :class:`~matplotlib.text.Annotate` classes :mod:`matplotlib.image` defines the :class:`~matplotlib.image.AxesImage` and :class:`~matplotlib.image.FigureImage` classes :mod:`matplotlib.collections` classes for efficient drawing of groups of lines or polygons :mod:`matplotlib.colors` classes for interpreting color specifications and for making colormaps :mod:`matplotlib.cm` colormaps and the :class:`~matplotlib.image.ScalarMappable` mixin class for providing color mapping functionality to other classes :mod:`matplotlib.ticker` classes for calculating tick mark locations and for formatting tick labels :mod:`matplotlib.backends` a subpackage with modules for various gui libraries and output formats The base matplotlib namespace includes: :data:`~matplotlib.rcParams` a global dictionary of default configuration settings. It is initialized by code which may be overridded by a matplotlibrc file. :func:`~matplotlib.rc` a function for setting groups of rcParams values :func:`~matplotlib.use` a function for setting the matplotlib backend. If used, this function must be called immediately after importing matplotlib for the first time. In particular, it must be called **before** importing pylab (if pylab is imported). matplotlib is written by John D. Hunter (jdh2358 at gmail.com) and a host of others. """ from __future__ import generators __version__ = '0.98.5.2' __revision__ = '$Revision: 6660 $' __date__ = '$Date: 2008-12-18 06:10:51 -0600 (Thu, 18 Dec 2008) $' import os, re, shutil, subprocess, sys, warnings import distutils.sysconfig import distutils.version NEWCONFIG = False # Needed for toolkit setuptools support if 0: try: __import__('pkg_resources').declare_namespace(__name__) except ImportError: pass # must not have setuptools if not hasattr(sys, 'argv'): # for modpython sys.argv = ['modpython'] """ Manage user customizations through a rc file. The default file location is given in the following order - environment variable MATPLOTLIBRC - HOME/.matplotlib/matplotlibrc if HOME is defined - PATH/matplotlibrc where PATH is the return value of get_data_path() """ import sys, os, tempfile from rcsetup import defaultParams, validate_backend, validate_toolbar from rcsetup import validate_cairo_format major, minor1, minor2, s, tmp = sys.version_info _python24 = major>=2 and minor1>=4 # the havedate check was a legacy from old matplotlib which preceeded # datetime support _havedate = True #try: # import pkg_resources # pkg_resources is part of setuptools #except ImportError: _have_pkg_resources = False #else: _have_pkg_resources = True if not _python24: raise ImportError('matplotlib requires Python 2.4 or later') import numpy nn = numpy.__version__.split('.') if not (int(nn[0]) >= 1 and int(nn[1]) >= 1): raise ImportError( 'numpy 1.1 or later is required; you have %s' % numpy.__version__) def is_string_like(obj): if hasattr(obj, 'shape'): return 0 try: obj + '' except (TypeError, ValueError): return 0 return 1 def _is_writable_dir(p): """ p is a string pointing to a putative writable dir -- return True p is such a string, else False """ try: p + '' # test is string like except TypeError: return False try: t = tempfile.TemporaryFile(dir=p) t.write('1') t.close() except OSError: return False else: return True class Verbose: """ A class to handle reporting. Set the fileo attribute to any file instance to handle the output. Default is sys.stdout """ levels = ('silent', 'helpful', 'debug', 'debug-annoying') vald = dict( [(level, i) for i,level in enumerate(levels)]) # parse the verbosity from the command line; flags look like # --verbose-silent or --verbose-helpful _commandLineVerbose = None for arg in sys.argv[1:]: if not arg.startswith('--verbose-'): continue _commandLineVerbose = arg[10:] def __init__(self): self.set_level('silent') self.fileo = sys.stdout def set_level(self, level): 'set the verbosity to one of the Verbose.levels strings' if self._commandLineVerbose is not None: level = self._commandLineVerbose if level not in self.levels: raise ValueError('Illegal verbose string "%s". Legal values are %s'%(level, self.levels)) self.level = level def set_fileo(self, fname): std = { 'sys.stdout': sys.stdout, 'sys.stderr': sys.stderr, } if fname in std: self.fileo = std[fname] else: try: fileo = file(fname, 'w') except IOError: raise ValueError('Verbose object could not open log file "%s" for writing.\nCheck your matplotlibrc verbose.fileo setting'%fname) else: self.fileo = fileo def report(self, s, level='helpful'): """ print message s to self.fileo if self.level>=level. Return value indicates whether a message was issued """ if self.ge(level): print >>self.fileo, s return True return False def wrap(self, fmt, func, level='helpful', always=True): """ return a callable function that wraps func and reports it output through the verbose handler if current verbosity level is higher than level if always is True, the report will occur on every function call; otherwise only on the first time the function is called """ assert callable(func) def wrapper(*args, **kwargs): ret = func(*args, **kwargs) if (always or not wrapper._spoke): spoke = self.report(fmt%ret, level) if not wrapper._spoke: wrapper._spoke = spoke return ret wrapper._spoke = False wrapper.__doc__ = func.__doc__ return wrapper def ge(self, level): 'return true if self.level is >= level' return self.vald[self.level]>=self.vald[level] verbose=Verbose() def checkdep_dvipng(): try: s = subprocess.Popen(['dvipng','-version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) line = s.stdout.readlines()[1] v = line.split()[-1] return v except (IndexError, ValueError, OSError): return None def checkdep_ghostscript(): try: if sys.platform == 'win32': command_args = ['gswin32c', '--version'] else: command_args = ['gs', '--version'] s = subprocess.Popen(command_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) v = s.stdout.read()[:-1] return v except (IndexError, ValueError, OSError): return None def checkdep_tex(): try: s = subprocess.Popen(['tex','-version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) line = s.stdout.readlines()[0] pattern = '3\.1\d+' match = re.search(pattern, line) v = match.group(0) return v except (IndexError, ValueError, AttributeError, OSError): return None def checkdep_pdftops(): try: s = subprocess.Popen(['pdftops','-v'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) for line in s.stderr: if 'version' in line: v = line.split()[-1] return v except (IndexError, ValueError, UnboundLocalError, OSError): return None def compare_versions(a, b): "return True if a is greater than or equal to b" if a: a = distutils.version.LooseVersion(a) b = distutils.version.LooseVersion(b) if a>=b: return True else: return False else: return False def checkdep_ps_distiller(s): if not s: return False flag = True gs_req = '7.07' gs_sugg = '7.07' gs_v = checkdep_ghostscript() if compare_versions(gs_v, gs_sugg): pass elif compare_versions(gs_v, gs_req): verbose.report(('ghostscript-%s found. ghostscript-%s or later ' 'is recommended to use the ps.usedistiller option.') % (gs_v, gs_sugg)) else: flag = False warnings.warn(('matplotlibrc ps.usedistiller option can not be used ' 'unless ghostscript-%s or later is installed on your system') % gs_req) if s == 'xpdf': pdftops_req = '3.0' pdftops_req_alt = '0.9' # poppler version numbers, ugh pdftops_v = checkdep_pdftops() if compare_versions(pdftops_v, pdftops_req): pass elif compare_versions(pdftops_v, pdftops_req_alt) and not \ compare_versions(pdftops_v, '1.0'): pass else: flag = False warnings.warn(('matplotlibrc ps.usedistiller can not be set to ' 'xpdf unless xpdf-%s or later is installed on your system') % pdftops_req) if flag: return s else: return False def checkdep_usetex(s): if not s: return False tex_req = '3.1415' gs_req = '7.07' gs_sugg = '7.07' dvipng_req = '1.5' flag = True tex_v = checkdep_tex() if compare_versions(tex_v, tex_req): pass else: flag = False warnings.warn(('matplotlibrc text.usetex option can not be used ' 'unless TeX-%s or later is ' 'installed on your system') % tex_req) dvipng_v = checkdep_dvipng() if compare_versions(dvipng_v, dvipng_req): pass else: flag = False warnings.warn( 'matplotlibrc text.usetex can not be used with *Agg ' 'backend unless dvipng-1.5 or later is ' 'installed on your system') gs_v = checkdep_ghostscript() if compare_versions(gs_v, gs_sugg): pass elif compare_versions(gs_v, gs_req): verbose.report(('ghostscript-%s found. ghostscript-%s or later is ' 'recommended for use with the text.usetex ' 'option.') % (gs_v, gs_sugg)) else: flag = False warnings.warn(('matplotlibrc text.usetex can not be used ' 'unless ghostscript-%s or later is ' 'installed on your system') % gs_req) return flag def _get_home(): """Find user's home directory if possible. Otherwise raise error. :see: http://mail.python.org/pipermail/python-list/2005-February/263921.html """ path='' try: path=os.path.expanduser("~") except: pass if not os.path.isdir(path): for evar in ('HOME', 'USERPROFILE', 'TMP'): try: path = os.environ[evar] if os.path.isdir(path): break except: pass if path: return path else: raise RuntimeError('please define environment variable $HOME') get_home = verbose.wrap('$HOME=%s', _get_home, always=False) def _get_configdir(): """ Return the string representing the configuration dir. default is HOME/.matplotlib. you can override this with the MPLCONFIGDIR environment variable """ configdir = os.environ.get('MPLCONFIGDIR') if configdir is not None: if not _is_writable_dir(configdir): raise RuntimeError('Could not write to MPLCONFIGDIR="%s"'%configdir) return configdir h = get_home() p = os.path.join(get_home(), '.matplotlib') if os.path.exists(p): if not _is_writable_dir(p): raise RuntimeError("'%s' is not a writable dir; you must set %s/.matplotlib to be a writable dir. You can also set environment variable MPLCONFIGDIR to any writable directory where you want matplotlib data stored "% (h, h)) else: if not _is_writable_dir(h): raise RuntimeError("Failed to create %s/.matplotlib; consider setting MPLCONFIGDIR to a writable directory for matplotlib configuration data"%h) os.mkdir(p) return p get_configdir = verbose.wrap('CONFIGDIR=%s', _get_configdir, always=False) def _get_data_path(): 'get the path to matplotlib data' if 'MATPLOTLIBDATA' in os.environ: path = os.environ['MATPLOTLIBDATA'] if not os.path.isdir(path): raise RuntimeError('Path in environment MATPLOTLIBDATA not a directory') return path path = os.sep.join([os.path.dirname(__file__), 'mpl-data']) if os.path.isdir(path): return path # setuptools' namespace_packages may highjack this init file # so need to try something known to be in matplotlib, not basemap import matplotlib.afm path = os.sep.join([os.path.dirname(matplotlib.afm.__file__), 'mpl-data']) if os.path.isdir(path): return path # py2exe zips pure python, so still need special check if getattr(sys,'frozen',None): path = os.path.join(os.path.split(sys.path[0])[0], 'mpl-data') if os.path.isdir(path): return path else: # Try again assuming we need to step up one more directory path = os.path.join(os.path.split(os.path.split(sys.path[0])[0])[0], 'mpl-data') if os.path.isdir(path): return path else: # Try again assuming sys.path[0] is a dir not a exe path = os.path.join(sys.path[0], 'mpl-data') if os.path.isdir(path): return path raise RuntimeError('Could not find the matplotlib data files') def _get_data_path_cached(): if defaultParams['datapath'][0] is None: defaultParams['datapath'][0] = _get_data_path() return defaultParams['datapath'][0] get_data_path = verbose.wrap('matplotlib data path %s', _get_data_path_cached, always=False) def get_example_data(fname): """ return a filehandle to one of the example files in mpl-data/example *fname* the name of one of the files in mpl-data/example """ datadir = os.path.join(get_data_path(), 'example') fullpath = os.path.join(datadir, fname) if not os.path.exists(fullpath): raise IOError('could not find matplotlib example file "%s" in data directory "%s"'%( fname, datadir)) return file(fullpath, 'rb') def get_py2exe_datafiles(): datapath = get_data_path() head, tail = os.path.split(datapath) d = {} for root, dirs, files in os.walk(datapath): # Need to explicitly remove cocoa_agg files or py2exe complains # NOTE I dont know why, but do as previous version if 'Matplotlib.nib' in files: files.remove('Matplotlib.nib') files = [os.path.join(root, filename) for filename in files] root = root.replace(tail, 'mpl-data') root = root[root.index('mpl-data'):] d[root] = files return d.items() def matplotlib_fname(): """ Return the path to the rc file Search order: * current working dir * environ var MATPLOTLIBRC * HOME/.matplotlib/matplotlibrc * MATPLOTLIBDATA/matplotlibrc """ oldname = os.path.join( os.getcwd(), '.matplotlibrc') if os.path.exists(oldname): print >> sys.stderr, """\ WARNING: Old rc filename ".matplotlibrc" found in working dir and and renamed to new default rc file name "matplotlibrc" (no leading"dot"). """ shutil.move('.matplotlibrc', 'matplotlibrc') home = get_home() oldname = os.path.join( home, '.matplotlibrc') if os.path.exists(oldname): configdir = get_configdir() newname = os.path.join(configdir, 'matplotlibrc') print >> sys.stderr, """\ WARNING: Old rc filename "%s" found and renamed to new default rc file name "%s"."""%(oldname, newname) shutil.move(oldname, newname) fname = os.path.join( os.getcwd(), 'matplotlibrc') if os.path.exists(fname): return fname if 'MATPLOTLIBRC' in os.environ: path = os.environ['MATPLOTLIBRC'] if os.path.exists(path): fname = os.path.join(path, 'matplotlibrc') if os.path.exists(fname): return fname fname = os.path.join(get_configdir(), 'matplotlibrc') if os.path.exists(fname): return fname path = get_data_path() # guaranteed to exist or raise fname = os.path.join(path, 'matplotlibrc') if not os.path.exists(fname): warnings.warn('Could not find matplotlibrc; using defaults') return fname _deprecated_map = { 'text.fontstyle': 'font.style', 'text.fontangle': 'font.style', 'text.fontvariant': 'font.variant', 'text.fontweight': 'font.weight', 'text.fontsize': 'font.size', 'tick.size' : 'tick.major.size', } class RcParams(dict): """ A dictionary object including validation validating functions are defined and associated with rc parameters in :mod:`matplotlib.rcsetup` """ validate = dict([ (key, converter) for key, (default, converter) in \ defaultParams.iteritems() ]) def __setitem__(self, key, val): try: if key in _deprecated_map.keys(): alt = _deprecated_map[key] warnings.warn('%s is deprecated in matplotlibrc. Use %s \ instead.'% (key, alt)) key = alt cval = self.validate[key](val) dict.__setitem__(self, key, cval) except KeyError: raise KeyError('%s is not a valid rc parameter.\ See rcParams.keys() for a list of valid parameters.'%key) def rc_params(fail_on_error=False): 'Return the default params updated from the values in the rc file' fname = matplotlib_fname() if not os.path.exists(fname): # this should never happen, default in mpl-data should always be found message = 'could not find rc file; returning defaults' ret = RcParams([ (key, default) for key, (default, converter) in \ defaultParams.iteritems() ]) warnings.warn(message) return ret cnt = 0 rc_temp = {} for line in file(fname): cnt += 1 strippedline = line.split('#',1)[0].strip() if not strippedline: continue tup = strippedline.split(':',1) if len(tup) !=2: warnings.warn('Illegal line #%d\n\t%s\n\tin file "%s"'%\ (cnt, line, fname)) continue key, val = tup key = key.strip() val = val.strip() if key in rc_temp: warnings.warn('Duplicate key in file "%s", line #%d'%(fname,cnt)) rc_temp[key] = (val, line, cnt) ret = RcParams([ (key, default) for key, (default, converter) in \ defaultParams.iteritems() ]) for key in ('verbose.level', 'verbose.fileo'): if key in rc_temp: val, line, cnt = rc_temp.pop(key) if fail_on_error: ret[key] = val # try to convert to proper type or raise else: try: ret[key] = val # try to convert to proper type or skip except Exception, msg: warnings.warn('Bad val "%s" on line #%d\n\t"%s"\n\tin file \ "%s"\n\t%s' % (val, cnt, line, fname, msg)) verbose.set_level(ret['verbose.level']) verbose.set_fileo(ret['verbose.fileo']) for key, (val, line, cnt) in rc_temp.iteritems(): if key in defaultParams: if fail_on_error: ret[key] = val # try to convert to proper type or raise else: try: ret[key] = val # try to convert to proper type or skip except Exception, msg: warnings.warn('Bad val "%s" on line #%d\n\t"%s"\n\tin file \ "%s"\n\t%s' % (val, cnt, line, fname, msg)) else: print >> sys.stderr, """ Bad key "%s" on line %d in %s. You probably need to get an updated matplotlibrc file from http://matplotlib.sf.net/_static/matplotlibrc or from the matplotlib source distribution""" % (key, cnt, fname) if ret['datapath'] is None: ret['datapath'] = get_data_path() if not ret['text.latex.preamble'] == ['']: verbose.report(""" ***************************************************************** You have the following UNSUPPORTED LaTeX preamble customizations: %s Please do not ask for support with these customizations active. ***************************************************************** """% '\n'.join(ret['text.latex.preamble']), 'helpful') verbose.report('loaded rc file %s'%fname) return ret # this is the instance used by the matplotlib classes rcParams = rc_params() rcParamsDefault = RcParams([ (key, default) for key, (default, converter) in \ defaultParams.iteritems() ]) rcParams['ps.usedistiller'] = checkdep_ps_distiller(rcParams['ps.usedistiller']) rcParams['text.usetex'] = checkdep_usetex(rcParams['text.usetex']) def rc(group, **kwargs): """ Set the current rc params. Group is the grouping for the rc, eg. for ``lines.linewidth`` the group is ``lines``, for ``axes.facecolor``, the group is ``axes``, and so on. Group may also be a list or tuple of group names, eg. (*xtick*, *ytick*). *kwargs* is a dictionary attribute name/value pairs, eg:: rc('lines', linewidth=2, color='r') sets the current rc params and is equivalent to:: rcParams['lines.linewidth'] = 2 rcParams['lines.color'] = 'r' The following aliases are available to save typing for interactive users: ===== ================= Alias Property ===== ================= 'lw' 'linewidth' 'ls' 'linestyle' 'c' 'color' 'fc' 'facecolor' 'ec' 'edgecolor' 'mew' 'markeredgewidth' 'aa' 'antialiased' ===== ================= Thus you could abbreviate the above rc command as:: rc('lines', lw=2, c='r') Note you can use python's kwargs dictionary facility to store dictionaries of default parameters. Eg, you can customize the font rc as follows:: font = {'family' : 'monospace', 'weight' : 'bold', 'size' : 'larger'} rc('font', **font) # pass in the font dict as kwargs This enables you to easily switch between several configurations. Use :func:`~matplotlib.pyplot.rcdefaults` to restore the default rc params after changes. """ aliases = { 'lw' : 'linewidth', 'ls' : 'linestyle', 'c' : 'color', 'fc' : 'facecolor', 'ec' : 'edgecolor', 'mew' : 'markeredgewidth', 'aa' : 'antialiased', } if is_string_like(group): group = (group,) for g in group: for k,v in kwargs.items(): name = aliases.get(k) or k key = '%s.%s' % (g, name) if key not in rcParams: raise KeyError('Unrecognized key "%s" for group "%s" and name "%s"' % (key, g, name)) rcParams[key] = v def rcdefaults(): """ Restore the default rc params - the ones that were created at matplotlib load time. """ rcParams.update(rcParamsDefault) if NEWCONFIG: #print "importing from reorganized config system!" try: from config import rcParams, rcdefaults, mplConfig, save_config verbose.set_level(rcParams['verbose.level']) verbose.set_fileo(rcParams['verbose.fileo']) except: from config import rcParams, rcdefaults _use_error_msg = """ This call to matplotlib.use() has no effect because the the backend has already been chosen; matplotlib.use() must be called *before* pylab, matplotlib.pyplot, or matplotlib.backends is imported for the first time. """ def use(arg, warn=True): """ Set the matplotlib backend to one of the known backends. The argument is case-insensitive. For the Cairo backend, the argument can have an extension to indicate the type of output. Example: use('cairo.pdf') will specify a default of pdf output generated by Cairo. Note: this function must be called *before* importing pylab for the first time; or, if you are not using pylab, it must be called before importing matplotlib.backends. If warn is True, a warning is issued if you try and callthis after pylab or pyplot have been loaded. In certain black magic use cases, eg pyplot.switch_backends, we are doing the reloading necessary to make the backend switch work (in some cases, eg pure image backends) so one can set warn=False to supporess the warnings """ if 'matplotlib.backends' in sys.modules: if warn: warnings.warn(_use_error_msg) return arg = arg.lower() if arg.startswith('module://'): name = arg else: be_parts = arg.split('.') name = validate_backend(be_parts[0]) rcParams['backend'] = name if name == 'cairo' and len(be_parts) > 1: rcParams['cairo.format'] = validate_cairo_format(be_parts[1]) def get_backend(): "Returns the current backend" return rcParams['backend'] def interactive(b): """ Set interactive mode to boolean b. If b is True, then draw after every plotting command, eg, after xlabel """ rcParams['interactive'] = b def is_interactive(): 'Return true if plot mode is interactive' b = rcParams['interactive'] return b def tk_window_focus(): """Return true if focus maintenance under TkAgg on win32 is on. This currently works only for python.exe and IPython.exe. Both IDLE and Pythonwin.exe fail badly when tk_window_focus is on.""" if rcParams['backend'] != 'TkAgg': return False return rcParams['tk.window_focus'] # Now allow command line to override # Allow command line access to the backend with -d (matlab compatible # flag) for s in sys.argv[1:]: if s.startswith('-d') and len(s) > 2: # look for a -d flag try: use(s[2:]) except (KeyError, ValueError): pass # we don't want to assume all -d flags are backends, eg -debug verbose.report('matplotlib version %s'%__version__) verbose.report('verbose.level %s'%verbose.level) verbose.report('interactive is %s'%rcParams['interactive']) verbose.report('units is %s'%rcParams['units']) verbose.report('platform is %s'%sys.platform) verbose.report('loaded modules: %s'%sys.modules.keys(), 'debug')
gpl-3.0
EnvGen/toolbox
scripts/rpkm_table.py
1
1564
#!/usr/bin/env python # coding: utf-8 from __future__ import print_function """A script to calculate rpkm values for contigs or genes based on coverage files Output: Tab separated values: gene id, average coverage and gene length, printed to stdout. """ import sys import argparse import pandas as pd def main(args): sample_info = pd.read_table(args.sample_info, header=None, index_col=0) df = pd.DataFrame() for fn, sample_name in zip(args.coverage_files, args.sample_names): cov_df = pd.read_table(fn, index_col=0) nr_reads_sample, read_length = sample_info.ix[sample_name] # Rpkm calculation based on average coverage rpkm = cov_df['avg_coverage'].divide(float(read_length) * float(nr_reads_sample)) * 1e9 df[sample_name] = rpkm df.to_csv(sys.stdout, sep='\t') if __name__ == "__main__": parser = argparse.ArgumentParser(description = __doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('--sample_names', nargs='*', help=("Sample names, in the same order as coverage_files")) parser.add_argument('--coverage_files', nargs='*', help=("Coverage files with tab separated values: " "sequence id, average coverage, sequence length")) parser.add_argument('--sample_info', help=("Tab separated values 'sample_id', 'nr_reads', 'avg_read_length'. " "all values in sample_names need to be present as sample_id values")) args = parser.parse_args() main(args)
mit
jean/pyxley
pyxley/charts/datamaps/datamaps.py
11
2714
from ..charts import Chart import pandas as pd from flask import request, jsonify, make_response _COLOR_MAP = { 'light blue':'#add8e6', "antique gold":'#fff4b0', "antique silver":'#d7cdc4', "beige": '#f5f5dc', "black":'#000000', "blue": '#8084ff', "bronze": '#c95a0b', "brown": '#864', "burgundy": '#ff7272', "burnt orange": '#cc5500', "camel": '#c96', "canary yellow": '#ffef00', "cobalt": "#56b3ff", "coral": "#ff9e80", "dark green": '#006400', "dark grey": '#666666', "dark pink": '#e3489b', "dark purple": '#540061', "fuchsia": '#ff00ff', "gold": '#fc0', "gray": '#9c9c9c', "green": "#83ff7f", "grey": "#9c9c9c", "jewel tone purple": '#ae2cc6', "light green": '#90ee90', "light grey": '#d3d3d3', "light pink": '#ffd6d3', "light purple": '#b0c4de', "magenta": '#ff00ff', "mustard": '#ffe761', "navy": '#6c70ff', "off-white": '#ffffdd', "olive": '#808000', "orange": '#ffc870', "orange red": '#ff4500', "pale yellow": '#ffff9d', "pink": '#ffb6c1', "purple": '#800080', "red": '#ff0000', "rose gold": '#ffba9d', "silver": '#c0c0c0', "soft orange": '#ffc63c', "tan": '#d2b48c', "teal": '#008080', "teal green":'#a1dfc6', "turquoise": '#40e0d0', "white": '#ffffff', "yellow": '#ffff00', "other": '#111111' } class Datamap(Chart): def __init__(self, chart_id, url, params, api_route): opts = { "url": url, "chartid": chart_id, "params": params } super(Datamap, self).__init__("Datamaps", opts, api_route) class DatamapUSA(Datamap): def __init__(self, url, chart_id, df, state_index, color_index, init_params={}, color_map=_COLOR_MAP): self.state_index = state_index self.color_index = color_index self.fills = color_map self.fills["defaultFills"] = "black" def get_data(): args = {} for c in init_params: if request.args.get(c): args[c] = request.args[c] else: args[c] = init_params[c] return jsonify(self.to_json( self.apply_filters(df, args) )) super(DatamapUSA, self).__init__(chart_id, url, init_params, get_data) def to_json(self, df): records = {} for i, row in df.iterrows(): records[row[self.state_index]] = { "fillKey": row[self.color_index] } return { "data": records, "fills": self.fills }
mit
jskDr/jamespy
jsklearn/codes.py
3
1197
# some of sklearn codes are updated. import numpy as np from sklearn import cross_validation, metrics def _cross_val_score_loo_r0( lm, X, y): """ mean_square_error metric is used from sklearn.metric. Return -------- The mean squared error values are returned. """ if len( y.shape) == 1: y = np.array( [y]).T kf = cross_validation.LeaveOneOut( y.shape[0]) score_l = list() for tr, te in kf: lm.fit( X[tr,:], y[tr,:]) yp = lm.predict( X[te, :]) score_l.append( metrics.mean_squared_error( y[te,:], yp)) return score_l def cross_val_score_loo( lm, X, y): """ mean_square_error metric is used from sklearn.metric. Return -------- The mean squared error values are returned. """ # Transformed to array if they are list, np.mat X = np.array( X) y = np.array( y) # Later, assert can be used to define the size of X and y if len( y.shape) == 1: y = np.array( [y]).T kf = cross_validation.LeaveOneOut( y.shape[0]) # flatterned error vectors for each point are stored in this vector. errors_l = list() for tr, te in kf: lm.fit( X[tr,:], y[tr,:]) yp = lm.predict( X[te, :]) errors_l.extend( (y[te,:] - yp).flatten().tolist()) return errors_l
mit
mbayon/TFG-MachineLearning
venv/lib/python3.6/site-packages/sklearn/ensemble/tests/test_base.py
36
5277
""" Testing for the base module (sklearn.ensemble.base). """ # Authors: Gilles Louppe # License: BSD 3 clause import numpy as np from numpy.testing import assert_equal from sklearn.utils.testing import assert_raise_message from sklearn.utils.testing import assert_not_equal from sklearn.utils.testing import assert_true from sklearn.datasets import load_iris from sklearn.ensemble import BaggingClassifier from sklearn.ensemble.base import _set_random_states from sklearn.linear_model import Perceptron from collections import OrderedDict from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.pipeline import Pipeline from sklearn.feature_selection import SelectFromModel def test_base(): # Check BaseEnsemble methods. ensemble = BaggingClassifier( base_estimator=Perceptron(tol=1e-3, random_state=None), n_estimators=3) iris = load_iris() ensemble.fit(iris.data, iris.target) ensemble.estimators_ = [] # empty the list and create estimators manually ensemble._make_estimator() random_state = np.random.RandomState(3) ensemble._make_estimator(random_state=random_state) ensemble._make_estimator(random_state=random_state) ensemble._make_estimator(append=False) assert_equal(3, len(ensemble)) assert_equal(3, len(ensemble.estimators_)) assert_true(isinstance(ensemble[0], Perceptron)) assert_equal(ensemble[0].random_state, None) assert_true(isinstance(ensemble[1].random_state, int)) assert_true(isinstance(ensemble[2].random_state, int)) assert_not_equal(ensemble[1].random_state, ensemble[2].random_state) np_int_ensemble = BaggingClassifier(base_estimator=Perceptron(tol=1e-3), n_estimators=np.int32(3)) np_int_ensemble.fit(iris.data, iris.target) def test_base_zero_n_estimators(): # Check that instantiating a BaseEnsemble with n_estimators<=0 raises # a ValueError. ensemble = BaggingClassifier(base_estimator=Perceptron(tol=1e-3), n_estimators=0) iris = load_iris() assert_raise_message(ValueError, "n_estimators must be greater than zero, got 0.", ensemble.fit, iris.data, iris.target) def test_base_not_int_n_estimators(): # Check that instantiating a BaseEnsemble with a string as n_estimators # raises a ValueError demanding n_estimators to be supplied as an integer. string_ensemble = BaggingClassifier(base_estimator=Perceptron(tol=1e-3), n_estimators='3') iris = load_iris() assert_raise_message(ValueError, "n_estimators must be an integer", string_ensemble.fit, iris.data, iris.target) float_ensemble = BaggingClassifier(base_estimator=Perceptron(tol=1e-3), n_estimators=3.0) assert_raise_message(ValueError, "n_estimators must be an integer", float_ensemble.fit, iris.data, iris.target) def test_set_random_states(): # Linear Discriminant Analysis doesn't have random state: smoke test _set_random_states(LinearDiscriminantAnalysis(), random_state=17) clf1 = Perceptron(tol=1e-3, random_state=None) assert_equal(clf1.random_state, None) # check random_state is None still sets _set_random_states(clf1, None) assert_true(isinstance(clf1.random_state, int)) # check random_state fixes results in consistent initialisation _set_random_states(clf1, 3) assert_true(isinstance(clf1.random_state, int)) clf2 = Perceptron(tol=1e-3, random_state=None) _set_random_states(clf2, 3) assert_equal(clf1.random_state, clf2.random_state) # nested random_state def make_steps(): return [('sel', SelectFromModel(Perceptron(tol=1e-3, random_state=None))), ('clf', Perceptron(tol=1e-3, random_state=None))] est1 = Pipeline(make_steps()) _set_random_states(est1, 3) assert_true(isinstance(est1.steps[0][1].estimator.random_state, int)) assert_true(isinstance(est1.steps[1][1].random_state, int)) assert_not_equal(est1.get_params()['sel__estimator__random_state'], est1.get_params()['clf__random_state']) # ensure multiple random_state parameters are invariant to get_params() # iteration order class AlphaParamPipeline(Pipeline): def get_params(self, *args, **kwargs): params = Pipeline.get_params(self, *args, **kwargs).items() return OrderedDict(sorted(params)) class RevParamPipeline(Pipeline): def get_params(self, *args, **kwargs): params = Pipeline.get_params(self, *args, **kwargs).items() return OrderedDict(sorted(params, reverse=True)) for cls in [AlphaParamPipeline, RevParamPipeline]: est2 = cls(make_steps()) _set_random_states(est2, 3) assert_equal(est1.get_params()['sel__estimator__random_state'], est2.get_params()['sel__estimator__random_state']) assert_equal(est1.get_params()['clf__random_state'], est2.get_params()['clf__random_state'])
mit
alvarofierroclavero/scikit-learn
examples/svm/plot_svm_margin.py
318
2328
#!/usr/bin/python # -*- coding: utf-8 -*- """ ========================================================= SVM Margins Example ========================================================= The plots below illustrate the effect the parameter `C` has on the separation line. A large value of `C` basically tells our model that we do not have that much faith in our data's distribution, and will only consider points close to line of separation. A small value of `C` includes more/all the observations, allowing the margins to be calculated using all the data in the area. """ print(__doc__) # Code source: Gaël Varoquaux # Modified for documentation by Jaques Grobler # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn import svm # we create 40 separable points np.random.seed(0) X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]] Y = [0] * 20 + [1] * 20 # figure number fignum = 1 # fit the model for name, penalty in (('unreg', 1), ('reg', 0.05)): clf = svm.SVC(kernel='linear', C=penalty) clf.fit(X, Y) # get the separating hyperplane w = clf.coef_[0] a = -w[0] / w[1] xx = np.linspace(-5, 5) yy = a * xx - (clf.intercept_[0]) / w[1] # plot the parallels to the separating hyperplane that pass through the # support vectors margin = 1 / np.sqrt(np.sum(clf.coef_ ** 2)) yy_down = yy + a * margin yy_up = yy - a * margin # plot the line, the points, and the nearest vectors to the plane plt.figure(fignum, figsize=(4, 3)) plt.clf() plt.plot(xx, yy, 'k-') plt.plot(xx, yy_down, 'k--') plt.plot(xx, yy_up, 'k--') plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80, facecolors='none', zorder=10) plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired) plt.axis('tight') x_min = -4.8 x_max = 4.2 y_min = -6 y_max = 6 XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j] Z = clf.predict(np.c_[XX.ravel(), YY.ravel()]) # Put the result into a color plot Z = Z.reshape(XX.shape) plt.figure(fignum, figsize=(4, 3)) plt.pcolormesh(XX, YY, Z, cmap=plt.cm.Paired) plt.xlim(x_min, x_max) plt.ylim(y_min, y_max) plt.xticks(()) plt.yticks(()) fignum = fignum + 1 plt.show()
bsd-3-clause
sylvan5/PRML
sklearn/plot_svm_kernels.py
2
1641
#coding:utf-8 import numpy as np import pylab as pl from matplotlib.colors import ListedColormap from sklearn import svm # データセットを作成 X = np.c_[(0.4, -0.7), (-1.5, -1), (-1.4, -0.9), (-1.3, -1.2), (-1.1, -0.2), (-1.2, -0.4), (-0.5, 1.2), (-1.5, 2.1), (1, 1), (1.3, 0.8), (1.2, 0.5), (0.2, -2.0), (0.5, -2.4), (0.2, -2.3), (0.0, -2.7), (1.3, 2.1)].T Y = [0] * 8 + [1] * 8 fignum = 1 pl.figure(figsize=(18, 5)) for kernel in ('linear', 'poly', 'rbf'): # 分類器を訓練 clf = svm.SVC(kernel=kernel, gamma=2) clf.fit(X, Y) pl.subplot(1, 3, fignum) cmap = ListedColormap(['red', 'blue']) # 訓練データをプロット pl.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=cmap) # サポートベクトルを強調 pl.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80, facecolors='none', zorder=10) x_min = -3 x_max = 3 y_min = -3 y_max = 3 # 識別境界をプロット XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j] # decision_function()は識別境界までの距離を返す Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()]) Z = Z.reshape(XX.shape) pl.pcolormesh(XX, YY, Z > 0, cmap=cmap) pl.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'], levels=[-0.5, 0, 0.5]) pl.title("kernel: %s" % kernel) pl.xlim(x_min, x_max) pl.ylim(y_min, y_max) fignum += 1 pl.show()
mit
amueller/pystruct
pystruct/tests/test_utils/test_utils_logging.py
1
1465
import numpy as np from tempfile import mkstemp from sklearn.datasets import load_iris from sklearn.cross_validation import train_test_split from pystruct.models import GraphCRF from pystruct.learners import NSlackSSVM from pystruct.utils import SaveLogger from pystruct.inference import get_installed from nose.tools import assert_less, assert_almost_equal # we always try to get the fastest installed inference method inference_method = get_installed(["qpbo", "ad3", "max-product", "lp"])[0] def test_logging(): iris = load_iris() X, y = iris.data, iris.target X_ = [(np.atleast_2d(x), np.empty((0, 2), dtype=np.int)) for x in X] Y = y.reshape(-1, 1) X_train, X_test, y_train, y_test = train_test_split(X_, Y, random_state=1) _, file_name = mkstemp() pbl = GraphCRF(n_features=4, n_states=3, inference_method=inference_method) logger = SaveLogger(file_name) svm = NSlackSSVM(pbl, C=100, n_jobs=1, logger=logger) svm.fit(X_train, y_train) score_current = svm.score(X_test, y_test) score_auto_saved = logger.load().score(X_test, y_test) alt_file_name = file_name + "alt" logger.save(svm, alt_file_name) logger.file_name = alt_file_name logger.load() score_manual_saved = logger.load().score(X_test, y_test) assert_less(.97, score_current) assert_less(.97, score_auto_saved) assert_less(.97, score_manual_saved) assert_almost_equal(score_auto_saved, score_manual_saved)
bsd-2-clause
robcarver17/pysystemtrade
sysobjects/adjusted_prices.py
1
6043
from copy import copy import numpy as np import pandas as pd from syscore.objects import named_object from syscore.merge_data import full_merge_of_existing_series from sysobjects.dict_of_named_futures_per_contract_prices import price_column_names, contract_column_names, price_name, contract_name_from_column_name from sysobjects.multiple_prices import futuresMultiplePrices class futuresAdjustedPrices(pd.Series): """ adjusted price information """ def __init__(self, price_data): price_data.index.name = "index" # arctic compatible super().__init__(price_data) @classmethod def create_empty(futuresContractPrices): """ Our graceful fail is to return an empty, but valid, dataframe """ futures_contract_prices = futuresContractPrices(pd.Series()) return futures_contract_prices @classmethod def stich_multiple_prices( futuresAdjustedPrices, multiple_prices: futuresMultiplePrices, forward_fill: bool=False ): """ Do backstitching of multiple prices using panama method If you want to change then override this method :param multiple_prices: multiple prices object :param forward_fill: forward fill prices and forwards before stitching :return: futuresAdjustedPrices """ adjusted_prices = _panama_stitch( multiple_prices) return futuresAdjustedPrices(adjusted_prices) def update_with_multiple_prices_no_roll(self, updated_multiple_prices: futuresMultiplePrices): """ Update adjusted prices assuming no roll has happened :param updated_multiple_prices: futuresMultiplePrices :return: updated adjusted prices """ updated_adj = _update_adjusted_prices_from_multiple_no_roll( self, updated_multiple_prices ) return updated_adj def _panama_stitch(multiple_prices_input: futuresMultiplePrices, forward_fill: bool=False) -> pd.Series: """ Do a panama stitch for adjusted prices :param multiple_prices: futuresMultiplePrices :return: pd.Series of adjusted prices """ multiple_prices = copy(multiple_prices_input) if multiple_prices.empty: raise Exception("Can't stitch an empty multiple prices object") previous_row = multiple_prices.iloc[0, :] adjusted_prices_values = [previous_row.PRICE] for dateindex in multiple_prices.index[1:]: current_row = multiple_prices.loc[dateindex, :] if current_row.PRICE_CONTRACT == previous_row.PRICE_CONTRACT: # no roll has occured # we just append the price adjusted_prices_values.append(current_row.PRICE) else: # A roll has occured adjusted_prices_values = _roll_in_panama(adjusted_prices_values, previous_row, current_row) previous_row = current_row # it's ok to return a DataFrame since the calling object will change the # type adjusted_prices = pd.Series( adjusted_prices_values, index=multiple_prices.index) return adjusted_prices def _roll_in_panama(adjusted_prices_values, previous_row, current_row): # This is the sort of code you will need to change to adjust the roll logic # The roll differential is from the previous_row roll_differential = previous_row.FORWARD - previous_row.PRICE if np.isnan(roll_differential): raise Exception( "On this day %s which should be a roll date we don't have prices for both %s and %s contracts" % (str(current_row.name), previous_row.PRICE_CONTRACT, previous_row.FORWARD_CONTRACT,)) # We add the roll differential to all previous prices adjusted_prices_values = [ adj_price + roll_differential for adj_price in adjusted_prices_values] # note this includes the price for the previous row, which will now be equal to the forward price # We now add todays price. This will be for the new contract adjusted_prices_values.append(current_row.PRICE) return adjusted_prices_values no_update_roll_has_occured = futuresAdjustedPrices.create_empty() def _update_adjusted_prices_from_multiple_no_roll( existing_adjusted_prices: futuresAdjustedPrices, updated_multiple_prices: futuresMultiplePrices ) -> futuresAdjustedPrices: """ Update adjusted prices assuming no roll has happened :param existing_adjusted_prices: futuresAdjustedPrices :param updated_multiple_prices: futuresMultiplePrices :return: updated adjusted prices """ new_multiple_price_data, last_contract_in_price_data = _calc_new_multiple_prices(existing_adjusted_prices, updated_multiple_prices) no_roll_has_occured = new_multiple_price_data.check_all_contracts_equal_to( last_contract_in_price_data ) if not no_roll_has_occured: return no_update_roll_has_occured new_adjusted_prices = new_multiple_price_data[price_name] new_adjusted_prices = new_adjusted_prices.dropna() merged_adjusted_prices = full_merge_of_existing_series( existing_adjusted_prices, new_adjusted_prices ) merged_adjusted_prices = futuresAdjustedPrices(merged_adjusted_prices) return merged_adjusted_prices def _calc_new_multiple_prices(existing_adjusted_prices: futuresAdjustedPrices, updated_multiple_prices: futuresMultiplePrices)\ -> (futuresMultiplePrices, str): last_date_in_current_adj = existing_adjusted_prices.index[-1] multiple_prices_as_dict = updated_multiple_prices.as_dict() prices_in_multiple_prices = multiple_prices_as_dict[price_name] price_contract_column = contract_name_from_column_name(price_name) last_contract_in_price_data = prices_in_multiple_prices[price_contract_column][ :last_date_in_current_adj ][-1] new_multiple_price_data = prices_in_multiple_prices.prices_after_date(last_date_in_current_adj) return new_multiple_price_data, last_contract_in_price_data
gpl-3.0
AlexRobson/scikit-learn
benchmarks/bench_plot_approximate_neighbors.py
244
6011
""" Benchmark for approximate nearest neighbor search using locality sensitive hashing forest. There are two types of benchmarks. First, accuracy of LSHForest queries are measured for various hyper-parameters and index sizes. Second, speed up of LSHForest queries compared to brute force method in exact nearest neighbors is measures for the aforementioned settings. In general, speed up is increasing as the index size grows. """ from __future__ import division import numpy as np from tempfile import gettempdir from time import time from sklearn.neighbors import NearestNeighbors from sklearn.neighbors.approximate import LSHForest from sklearn.datasets import make_blobs from sklearn.externals.joblib import Memory m = Memory(cachedir=gettempdir()) @m.cache() def make_data(n_samples, n_features, n_queries, random_state=0): """Create index and query data.""" print('Generating random blob-ish data') X, _ = make_blobs(n_samples=n_samples + n_queries, n_features=n_features, centers=100, shuffle=True, random_state=random_state) # Keep the last samples as held out query vectors: note since we used # shuffle=True we have ensured that index and query vectors are # samples from the same distribution (a mixture of 100 gaussians in this # case) return X[:n_samples], X[n_samples:] def calc_exact_neighbors(X, queries, n_queries, n_neighbors): """Measures average times for exact neighbor queries.""" print ('Building NearestNeighbors for %d samples in %d dimensions' % (X.shape[0], X.shape[1])) nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X) average_time = 0 t0 = time() neighbors = nbrs.kneighbors(queries, n_neighbors=n_neighbors, return_distance=False) average_time = (time() - t0) / n_queries return neighbors, average_time def calc_accuracy(X, queries, n_queries, n_neighbors, exact_neighbors, average_time_exact, **lshf_params): """Calculates accuracy and the speed up of LSHForest.""" print('Building LSHForest for %d samples in %d dimensions' % (X.shape[0], X.shape[1])) lshf = LSHForest(**lshf_params) t0 = time() lshf.fit(X) lshf_build_time = time() - t0 print('Done in %0.3fs' % lshf_build_time) accuracy = 0 t0 = time() approx_neighbors = lshf.kneighbors(queries, n_neighbors=n_neighbors, return_distance=False) average_time_approx = (time() - t0) / n_queries for i in range(len(queries)): accuracy += np.in1d(approx_neighbors[i], exact_neighbors[i]).mean() accuracy /= n_queries speed_up = average_time_exact / average_time_approx print('Average time for lshf neighbor queries: %0.3fs' % average_time_approx) print ('Average time for exact neighbor queries: %0.3fs' % average_time_exact) print ('Average Accuracy : %0.2f' % accuracy) print ('Speed up: %0.1fx' % speed_up) return speed_up, accuracy if __name__ == '__main__': import matplotlib.pyplot as plt # Initialize index sizes n_samples = [int(1e3), int(1e4), int(1e5), int(1e6)] n_features = int(1e2) n_queries = 100 n_neighbors = 10 X_index, X_query = make_data(np.max(n_samples), n_features, n_queries, random_state=0) params_list = [{'n_estimators': 3, 'n_candidates': 50}, {'n_estimators': 5, 'n_candidates': 70}, {'n_estimators': 10, 'n_candidates': 100}] accuracies = np.zeros((len(n_samples), len(params_list)), dtype=float) speed_ups = np.zeros((len(n_samples), len(params_list)), dtype=float) for i, sample_size in enumerate(n_samples): print ('==========================================================') print ('Sample size: %i' % sample_size) print ('------------------------') exact_neighbors, average_time_exact = calc_exact_neighbors( X_index[:sample_size], X_query, n_queries, n_neighbors) for j, params in enumerate(params_list): print ('LSHF parameters: n_estimators = %i, n_candidates = %i' % (params['n_estimators'], params['n_candidates'])) speed_ups[i, j], accuracies[i, j] = calc_accuracy( X_index[:sample_size], X_query, n_queries, n_neighbors, exact_neighbors, average_time_exact, random_state=0, **params) print ('') print ('==========================================================') # Set labels for LSHForest parameters colors = ['c', 'm', 'y'] legend_rects = [plt.Rectangle((0, 0), 0.1, 0.1, fc=color) for color in colors] legend_labels = ['n_estimators={n_estimators}, ' 'n_candidates={n_candidates}'.format(**p) for p in params_list] # Plot precision plt.figure() plt.legend(legend_rects, legend_labels, loc='upper left') for i in range(len(params_list)): plt.scatter(n_samples, accuracies[:, i], c=colors[i]) plt.plot(n_samples, accuracies[:, i], c=colors[i]) plt.ylim([0, 1.3]) plt.xlim(np.min(n_samples), np.max(n_samples)) plt.semilogx() plt.ylabel("Precision@10") plt.xlabel("Index size") plt.grid(which='both') plt.title("Precision of first 10 neighbors with index size") # Plot speed up plt.figure() plt.legend(legend_rects, legend_labels, loc='upper left') for i in range(len(params_list)): plt.scatter(n_samples, speed_ups[:, i], c=colors[i]) plt.plot(n_samples, speed_ups[:, i], c=colors[i]) plt.ylim(0, np.max(speed_ups)) plt.xlim(np.min(n_samples), np.max(n_samples)) plt.semilogx() plt.ylabel("Speed up") plt.xlabel("Index size") plt.grid(which='both') plt.title("Relationship between Speed up and index size") plt.show()
bsd-3-clause
verdurin/bcbio-nextgen
bcbio/pipeline/qcsummary.py
1
43958
"""Quality control and summary metrics for next-gen alignments and analysis. """ import collections import contextlib import csv import os import shutil import subprocess import pandas as pd import lxml.html import yaml from datetime import datetime # allow graceful during upgrades try: import matplotlib matplotlib.use('Agg', force=True) import matplotlib.pyplot as plt plt.ioff() except ImportError: plt = None try: from fadapa import Fadapa except ImportError: Fadapa = None import pybedtools import pysam import toolz as tz import toolz.dicttoolz as dtz from bcbio import bam, utils from bcbio.distributed.transaction import file_transaction, tx_tmpdir from bcbio.log import logger from bcbio.pipeline import config_utils, run_info from bcbio.install import _get_data_dir from bcbio.provenance import do import bcbio.rnaseq.qc from bcbio.rnaseq.coverage import plot_gene_coverage import bcbio.pipeline.datadict as dd from bcbio.variation import bedutils from bcbio import broad # ## High level functions to generate summary def generate_parallel(samples, run_parallel): """Provide parallel preparation of summary information for alignment and variant calling. """ sum_samples = run_parallel("pipeline_summary", samples) qsign_info = run_parallel("qsignature_summary", [sum_samples]) summary_file = write_project_summary(sum_samples, qsign_info) samples = [] for data in sum_samples: if "summary" not in data[0]: data[0]["summary"] = {} data[0]["summary"]["project"] = summary_file if qsign_info: data[0]["summary"]["mixup_check"] = qsign_info[0]["out_dir"] samples.append(data) samples = _add_researcher_summary(samples, summary_file) return samples def pipeline_summary(data): """Provide summary information on processing sample. """ work_bam = data.get("work_bam") if data["sam_ref"] is not None and work_bam and work_bam.endswith(".bam"): logger.info("Generating summary files: %s" % str(data["name"])) data["summary"] = _run_qc_tools(work_bam, data) return [[data]] def prep_pdf(qc_dir, config): """Create PDF from HTML summary outputs in QC directory. Requires wkhtmltopdf installed: http://www.msweet.org/projects.php?Z1 Thanks to: https://www.biostars.org/p/16991/ Works around issues with CSS conversion on CentOS by adjusting CSS. """ html_file = os.path.join(qc_dir, "fastqc", "fastqc_report.html") html_fixed = "%s-fixed%s" % os.path.splitext(html_file) try: topdf = config_utils.get_program("wkhtmltopdf", config) except config_utils.CmdNotFound: topdf = None if topdf and utils.file_exists(html_file): out_file = "%s.pdf" % os.path.splitext(html_file)[0] if not utils.file_exists(out_file): cmd = ("sed 's/div.summary/div.summary-no/' %s | sed 's/div.main/div.main-no/' > %s" % (html_file, html_fixed)) do.run(cmd, "Fix fastqc CSS to be compatible with wkhtmltopdf") cmd = [topdf, html_fixed, out_file] do.run(cmd, "Convert QC HTML to PDF") return out_file def _run_qc_tools(bam_file, data): """Run a set of third party quality control tools, returning QC directory and metrics. :param bam_file: alignments in bam format :param data: dict with all configuration information :returns: dict with output of different tools """ metrics = {} to_run = [] if "fastqc" not in tz.get_in(("config", "algorithm", "tools_off"), data, []): to_run.append(("fastqc", _run_fastqc)) if data["analysis"].lower().startswith("rna-seq"): # to_run.append(("rnaseqc", bcbio.rnaseq.qc.sample_summary)) # to_run.append(("coverage", _run_gene_coverage)) # to_run.append(("complexity", _run_complexity)) to_run.append(("qualimap", _rnaseq_qualimap)) elif data["analysis"].lower().startswith("chip-seq"): to_run.append(["bamtools", _run_bamtools_stats]) else: to_run += [("bamtools", _run_bamtools_stats), ("gemini", _run_gemini_stats)] if data["analysis"].lower().startswith(("standard", "variant2")): to_run.append(["qsignature", _run_qsignature_generator]) if "qualimap" in tz.get_in(("config", "algorithm", "tools_on"), data, []): to_run.append(("qualimap", _run_qualimap)) qc_dir = utils.safe_makedir(os.path.join(data["dirs"]["work"], "qc", data["description"])) metrics = {} for program_name, qc_fn in to_run: cur_qc_dir = os.path.join(qc_dir, program_name) cur_metrics = qc_fn(bam_file, data, cur_qc_dir) metrics.update(cur_metrics) ratio = bam.get_aligned_reads(bam_file, data) # if (ratio < 0.60 and data['config']["algorithm"].get("kraken", None) and # (data["analysis"].lower().startswith("rna-seq") or # data["analysis"].lower().startswith("standard"))): if data['config']["algorithm"].get("kraken", None): cur_metrics = _run_kraken(data, ratio) metrics.update(cur_metrics) bam.remove("%s-downsample%s" % os.path.splitext(bam_file)) metrics["Name"] = data["name"][-1] metrics["Quality format"] = utils.get_in(data, ("config", "algorithm", "quality_format"), "standard").lower() return {"qc": qc_dir, "metrics": metrics} # ## Generate project level QC summary for quickly assessing large projects def write_project_summary(samples, qsign_info=None): """Write project summary information on the provided samples. write out dirs, genome resources, """ work_dir = samples[0][0]["dirs"]["work"] out_file = os.path.join(work_dir, "project-summary.yaml") upload_dir = (os.path.join(work_dir, samples[0][0]["upload"]["dir"]) if "dir" in samples[0][0]["upload"] else "") test_run = samples[0][0].get("test_run", False) date = str(datetime.now()) prev_samples = _other_pipeline_samples(out_file, samples) with open(out_file, "w") as out_handle: yaml.safe_dump({"date": date}, out_handle, default_flow_style=False, allow_unicode=False) if test_run: yaml.safe_dump({"test_run": True}, out_handle, default_flow_style=False, allow_unicode=False) if qsign_info: qsign_out = utils.deepish_copy(qsign_info[0]) qsign_out.pop("out_dir", None) yaml.safe_dump({"qsignature": qsign_out}, out_handle, default_flow_style=False, allow_unicode=False) yaml.safe_dump({"upload": upload_dir}, out_handle, default_flow_style=False, allow_unicode=False) yaml.safe_dump({"bcbio_system": samples[0][0]["config"].get("bcbio_system", "")}, out_handle, default_flow_style=False, allow_unicode=False) yaml.safe_dump({"samples": prev_samples + [_save_fields(sample[0]) for sample in samples]}, out_handle, default_flow_style=False, allow_unicode=False) return out_file def _other_pipeline_samples(summary_file, cur_samples): """Retrieve samples produced previously by another pipeline in the summary output. """ cur_descriptions = set([s[0]["description"] for s in cur_samples]) out = [] if os.path.exists(summary_file): with open(summary_file) as in_handle: for s in yaml.load(in_handle).get("samples", []): if s["description"] not in cur_descriptions: out.append(s) return out def _save_fields(sample): to_save = ["dirs", "genome_resources", "genome_build", "sam_ref", "metadata", "description"] saved = {k: sample[k] for k in to_save if k in sample} if "summary" in sample: saved["summary"] = {"metrics": sample["summary"]["metrics"]} # check if disambiguation was run if "disambiguate" in sample: if utils.file_exists(sample["disambiguate"]["summary"]): disambigStats = _parse_disambiguate(sample["disambiguate"]["summary"]) saved["summary"]["metrics"]["Disambiguated %s reads" % str(sample["genome_build"])] = disambigStats[0] disambigGenome = (sample["config"]["algorithm"]["disambiguate"][0] if isinstance(sample["config"]["algorithm"]["disambiguate"], (list, tuple)) else sample["config"]["algorithm"]["disambiguate"]) saved["summary"]["metrics"]["Disambiguated %s reads" % disambigGenome] = disambigStats[1] saved["summary"]["metrics"]["Disambiguated ambiguous reads"] = disambigStats[2] return saved def _parse_disambiguate(disambiguatestatsfilename): """Parse disambiguation stats from given file. """ disambig_stats = [-1, -1, -1] with open(disambiguatestatsfilename, "r") as in_handle: header = in_handle.readline().strip().split("\t") if header == ['sample', 'unique species A pairs', 'unique species B pairs', 'ambiguous pairs']: disambig_stats_tmp = in_handle.readline().strip().split("\t")[1:] if len(disambig_stats_tmp) == 3: disambig_stats = [int(x) for x in disambig_stats_tmp] return disambig_stats # ## Generate researcher specific summaries def _add_researcher_summary(samples, summary_yaml): """Generate summary files per researcher if organized via a LIMS. """ by_researcher = collections.defaultdict(list) for data in (x[0] for x in samples): researcher = utils.get_in(data, ("upload", "researcher")) if researcher: by_researcher[researcher].append(data["description"]) out_by_researcher = {} for researcher, descrs in by_researcher.items(): out_by_researcher[researcher] = _summary_csv_by_researcher(summary_yaml, researcher, set(descrs), samples[0][0]) out = [] for data in (x[0] for x in samples): researcher = utils.get_in(data, ("upload", "researcher")) if researcher: data["summary"]["researcher"] = out_by_researcher[researcher] out.append([data]) return out def _summary_csv_by_researcher(summary_yaml, researcher, descrs, data): """Generate a CSV file with summary information for a researcher on this project. """ out_file = os.path.join(utils.safe_makedir(os.path.join(data["dirs"]["work"], "researcher")), "%s-summary.tsv" % run_info.clean_name(researcher)) metrics = ["Total reads", "Mapped reads", "Mapped reads pct", "Duplicates", "Duplicates pct"] with open(summary_yaml) as in_handle: with open(out_file, "w") as out_handle: writer = csv.writer(out_handle, dialect="excel-tab") writer.writerow(["Name"] + metrics) for sample in yaml.safe_load(in_handle)["samples"]: if sample["description"] in descrs: row = [sample["description"]] + [utils.get_in(sample, ("summary", "metrics", x), "") for x in metrics] writer.writerow(row) return out_file # ## Run and parse read information from FastQC class FastQCParser: def __init__(self, base_dir, sample=None): self._dir = base_dir self.sample = sample def get_fastqc_summary(self): ignore = set(["Total Sequences", "Filtered Sequences", "Filename", "File type", "Encoding"]) stats = {} for stat_line in self._fastqc_data_section("Basic Statistics")[1:]: k, v = stat_line.split("\t")[:2] if k not in ignore: stats[k] = v return stats def _fastqc_data_section(self, section_name): out = [] in_section = False data_file = os.path.join(self._dir, "fastqc_data.txt") if os.path.exists(data_file): with open(data_file) as in_handle: for line in in_handle: if line.startswith(">>%s" % section_name): in_section = True elif in_section: if line.startswith(">>END"): break out.append(line.rstrip("\r\n")) return out def save_sections_into_file(self): data_file = os.path.join(self._dir, "fastqc_data.txt") if os.path.exists(data_file) and Fadapa: parser = Fadapa(data_file) module = [m[1] for m in parser.summary()][2:9] for m in module: out_file = os.path.join(self._dir, m.replace(" ", "_") + ".tsv") dt = self._get_module(parser, m) dt.to_csv(out_file, sep="\t", index=False) def _get_module(self, parser, module): """ Get module using fadapa package """ dt = [] lines = parser.clean_data(module) header = lines[0] for data in lines[1:]: if data[0].startswith("#"): #some modules have two headers header = data continue if data[0].find("-") > -1: # expand positions 1-3 to 1, 2, 3 f, s = map(int, data[0].split("-")) for pos in range(f, s): dt.append([str(pos)] + data[1:]) else: dt.append(data) dt = pd.DataFrame(dt) dt.columns = [h.replace(" ", "_") for h in header] dt['sample'] = self.sample return dt def _run_gene_coverage(bam_file, data, out_dir): out_file = os.path.join(out_dir, "gene_coverage.pdf") ref_file = utils.get_in(data, ("genome_resources", "rnaseq", "transcripts")) count_file = data["count_file"] if utils.file_exists(out_file): return out_file with file_transaction(data, out_file) as tx_out_file: plot_gene_coverage(bam_file, ref_file, count_file, tx_out_file) return {"gene_coverage": out_file} def _run_kraken(data, ratio): """Run kraken, generating report in specified directory and parsing metrics. Using only first paired reads. """ logger.info("Number of aligned reads < than 0.60 in %s: %s" % (str(data["name"]), ratio)) logger.info("Running kraken to determine contaminant: %s" % str(data["name"])) qc_dir = utils.safe_makedir(os.path.join(data["dirs"]["work"], "qc", data["description"])) kraken_out = os.path.join(qc_dir, "kraken") out = out_stats = None db = data['config']["algorithm"]["kraken"] kraken_cmd = config_utils.get_program("kraken", data["config"]) if db == "minikraken": db = os.path.join(_get_data_dir(), "genomes", "kraken", "minikraken") else: if not os.path.exists(db): logger.info("kraken: no database found %s, skipping" % db) return {"kraken_report": "null"} if not os.path.exists(os.path.join(kraken_out, "kraken_out")): work_dir = os.path.dirname(kraken_out) utils.safe_makedir(work_dir) num_cores = data["config"]["algorithm"].get("num_cores", 1) fn_file = data["files"][0] if fn_file.endswith("bam"): logger.info("kraken: need fasta files as input") return {"kraken_report": "null"} with tx_tmpdir(data, work_dir) as tx_tmp_dir: with utils.chdir(tx_tmp_dir): out = os.path.join(tx_tmp_dir, "kraken_out") out_stats = os.path.join(tx_tmp_dir, "kraken_stats") cat = "zcat" if fn_file.endswith(".gz") else "cat" cl = ("{cat} {fn_file} | {kraken_cmd} --db {db} --quick " "--preload --min-hits 2 " "--threads {num_cores} " "--out {out} --fastq-input /dev/stdin 2> {out_stats}").format(**locals()) do.run(cl, "kraken: %s" % data["name"][-1]) if os.path.exists(kraken_out): shutil.rmtree(kraken_out) shutil.move(tx_tmp_dir, kraken_out) metrics = _parse_kraken_output(kraken_out, db, data) return metrics def _parse_kraken_output(out_dir, db, data): """Parse kraken stat info comming from stderr, generating report with kraken-report """ in_file = os.path.join(out_dir, "kraken_out") stat_file = os.path.join(out_dir, "kraken_stats") out_file = os.path.join(out_dir, "kraken_summary") kraken_cmd = config_utils.get_program("kraken-report", data["config"]) classify = unclassify = None with open(stat_file, 'r') as handle: for line in handle: if line.find(" classified") > -1: classify = line[line.find("(") + 1:line.find(")")] if line.find(" unclassified") > -1: unclassify = line[line.find("(") + 1:line.find(")")] if os.path.getsize(in_file) > 0 and not os.path.exists(out_file): with file_transaction(data, out_file) as tx_out_file: cl = ("{kraken_cmd} --db {db} {in_file} > {tx_out_file}").format(**locals()) do.run(cl, "kraken report: %s" % data["name"][-1]) kraken = {"kraken_clas": classify, "kraken_unclas": unclassify} kraken_sum = _summarize_kraken(out_file) kraken.update(kraken_sum) return kraken def _summarize_kraken(fn): """get the value at species level""" kraken = {} list_sp, list_value = [], [] with open(fn) as handle: for line in handle: cols = line.strip().split("\t") sp = cols[5].strip() if len(sp.split(" ")) > 1 and not sp.startswith("cellular"): list_sp.append(sp) list_value.append(cols[0]) kraken = {"kraken_sp": list_sp, "kraken_value": list_value} return kraken def _run_fastqc(bam_file, data, fastqc_out): """Run fastqc, generating report in specified directory and parsing metrics. Downsamples to 10 million reads to avoid excessive processing times with large files, unless we're running a Standard/QC pipeline. Handles fastqc 0.11+, which use a single HTML file and older versions that use a directory of files + images. The goal is to eventually move to only 0.11+ """ sentry_file = os.path.join(fastqc_out, "fastqc_report.html") if not os.path.exists(sentry_file): work_dir = os.path.dirname(fastqc_out) utils.safe_makedir(work_dir) ds_bam = (bam.downsample(bam_file, data, 1e7) if data.get("analysis", "").lower() not in ["standard"] else None) bam_file = ds_bam if ds_bam else bam_file fastqc_name = os.path.splitext(os.path.basename(bam_file))[0] num_cores = data["config"]["algorithm"].get("num_cores", 1) with tx_tmpdir(data, work_dir) as tx_tmp_dir: with utils.chdir(tx_tmp_dir): cl = [config_utils.get_program("fastqc", data["config"]), "-t", str(num_cores), "--extract", "-o", tx_tmp_dir, "-f", "bam", bam_file] do.run(cl, "FastQC: %s" % data["name"][-1]) tx_fastqc_out = os.path.join(tx_tmp_dir, "%s_fastqc" % fastqc_name) tx_combo_file = os.path.join(tx_tmp_dir, "%s_fastqc.html" % fastqc_name) if os.path.exists("%s.zip" % tx_fastqc_out): os.remove("%s.zip" % tx_fastqc_out) if not os.path.exists(sentry_file) and os.path.exists(tx_combo_file): utils.safe_makedir(fastqc_out) shutil.move(os.path.join(tx_fastqc_out, "fastqc_data.txt"), fastqc_out) shutil.move(tx_combo_file, sentry_file) elif not os.path.exists(sentry_file): if os.path.exists(fastqc_out): shutil.rmtree(fastqc_out) shutil.move(tx_fastqc_out, fastqc_out) parser = FastQCParser(fastqc_out, data["name"][-1]) stats = parser.get_fastqc_summary() parser.save_sections_into_file() return stats def _run_complexity(bam_file, data, out_dir): try: import pandas as pd import statsmodels.formula.api as sm except ImportError: return {"Unique Starts Per Read": "NA"} SAMPLE_SIZE = 1000000 base, _ = os.path.splitext(os.path.basename(bam_file)) utils.safe_makedir(out_dir) out_file = os.path.join(out_dir, base + ".pdf") df = bcbio.rnaseq.qc.starts_by_depth(bam_file, data["config"], SAMPLE_SIZE) if not utils.file_exists(out_file): with file_transaction(data, out_file) as tmp_out_file: df.plot(x='reads', y='starts', title=bam_file + " complexity") fig = plt.gcf() fig.savefig(tmp_out_file) print "file saved as", out_file print "out_dir is", out_dir return bcbio.rnaseq.qc.estimate_library_complexity(df) # ## Qualimap def _parse_num_pct(k, v): num, pct = v.split(" / ") return {k: num.replace(",", "").strip(), "%s pct" % k: pct.strip()} def _parse_qualimap_globals(table): """Retrieve metrics of interest from globals table. """ out = {} want = {"Mapped reads": _parse_num_pct, "Duplication rate": lambda k, v: {k: v}} for row in table.xpath("table/tr"): col, val = [x.text for x in row.xpath("td")] if col in want: out.update(want[col](col, val)) return out def _parse_qualimap_globals_inregion(table): """Retrieve metrics from the global targeted region table. """ out = {} for row in table.xpath("table/tr"): col, val = [x.text for x in row.xpath("td")] if col == "Mapped reads": out.update(_parse_num_pct("%s (in regions)" % col, val)) return out def _parse_qualimap_coverage(table): """Parse summary qualimap coverage metrics. """ out = {} for row in table.xpath("table/tr"): col, val = [x.text for x in row.xpath("td")] if col == "Mean": out["Coverage (Mean)"] = val return out def _parse_qualimap_insertsize(table): """Parse insert size metrics. """ out = {} for row in table.xpath("table/tr"): col, val = [x.text for x in row.xpath("td")] if col == "Median": out["Insert size (Median)"] = val return out def _parse_qualimap_metrics(report_file): """Extract useful metrics from the qualimap HTML report file. """ out = {} parsers = {"Globals": _parse_qualimap_globals, "Globals (inside of regions)": _parse_qualimap_globals_inregion, "Coverage": _parse_qualimap_coverage, "Coverage (inside of regions)": _parse_qualimap_coverage, "Insert size": _parse_qualimap_insertsize, "Insert size (inside of regions)": _parse_qualimap_insertsize} root = lxml.html.parse(report_file).getroot() for table in root.xpath("//div[@class='table-summary']"): header = table.xpath("h3")[0].text if header in parsers: out.update(parsers[header](table)) return out def _bed_to_bed6(orig_file, out_dir): """Convert bed to required bed6 inputs. """ bed6_file = os.path.join(out_dir, "%s-bed6%s" % os.path.splitext(os.path.basename(orig_file))) if not utils.file_exists(bed6_file): with open(bed6_file, "w") as out_handle: for i, region in enumerate(list(x) for x in pybedtools.BedTool(orig_file)): region = [x for x in list(region) if x] fillers = [str(i), "1.0", "+"] full = region + fillers[:6 - len(region)] out_handle.write("\t".join(full) + "\n") return bed6_file def _run_qualimap(bam_file, data, out_dir): """Run qualimap to assess alignment quality metrics. """ report_file = os.path.join(out_dir, "qualimapReport.html") if not os.path.exists(report_file): ds_bam = bam.downsample(bam_file, data, 1e7) bam_file = ds_bam if ds_bam else bam_file utils.safe_makedir(out_dir) num_cores = data["config"]["algorithm"].get("num_cores", 1) qualimap = config_utils.get_program("qualimap", data["config"]) resources = config_utils.get_resources("qualimap", data["config"]) max_mem = config_utils.adjust_memory(resources.get("memory", "1G"), num_cores) cmd = ("unset DISPLAY && {qualimap} bamqc -bam {bam_file} -outdir {out_dir} " "-nt {num_cores} --java-mem-size={max_mem}") species = data["genome_resources"]["aliases"].get("ensembl", "").upper() if species in ["HUMAN", "MOUSE"]: cmd += " -gd {species}" regions = bedutils.merge_overlaps(dd.get_variant_regions(data), data) if regions: bed6_regions = _bed_to_bed6(regions, out_dir) cmd += " -gff {bed6_regions}" do.run(cmd.format(**locals()), "Qualimap: %s" % data["name"][-1]) return _parse_qualimap_metrics(report_file) # ## RNAseq Qualimap def _parse_metrics(metrics): # skipped metrics can sometimes be in unicode, replace unicode with NA if it exists metrics = dtz.valmap(lambda x: 'nan' if isinstance(x, unicode) else x, metrics) missing = set(["Genes Detected", "Transcripts Detected", "Mean Per Base Cov."]) correct = set(["Intergenic pct", "Intronic pct", "Exonic pct"]) to_change = dict({"5'-3' bias": 1, "Intergenic pct": "Intergenic Rate", "Intronic pct": "Intronic Rate", "Exonic pct": "Exonic Rate", "Not aligned": 0, 'Aligned to genes': 0, 'Non-unique alignment': 0, "No feature assigned": 0, "Duplication Rate of Mapped": 1, "Fragment Length Mean": 1, "rRNA": 1, "Ambiguou alignment": 0}) total = ["Not aligned", "Aligned to genes", "No feature assigned"] out = {} total_reads = sum([int(metrics[name]) for name in total]) out['rRNA rate'] = 1.0 * int(metrics["rRNA"]) / total_reads out['Mapped'] = sum([int(metrics[name]) for name in total[1:]]) out['Mapping Rate'] = 1.0 * int(out['Mapped']) / total_reads [out.update({name: 0}) for name in missing] [metrics.update({name: 1.0 * float(metrics[name]) / 100}) for name in correct] for name in to_change: if not to_change[name]: continue if to_change[name] == 1: out.update({name: float(metrics[name])}) else: out.update({to_change[name]: float(metrics[name])}) return out def _detect_duplicates(bam_file, out_dir, config): """ Detect duplicates metrics with Picard """ out_file = os.path.join(out_dir, "dup_metrics") if not utils.file_exists(out_file): broad_runner = broad.runner_from_config(config) (dup_align_bam, metrics_file) = broad_runner.run_fn("picard_mark_duplicates", bam_file, remove_dups=True) shutil.move(metrics_file, out_file) metrics = [] with open(out_file) as in_handle: reader = csv.reader(in_handle, dialect="excel-tab") for line in reader: if line and not line[0].startswith("#"): metrics.append(line) metrics = dict(zip(metrics[0], metrics[1])) return {"Duplication Rate of Mapped": metrics["PERCENT_DUPLICATION"]} def _transform_browser_coor(rRNA_interval, rRNA_coor): """ transform interval format to browser coord: chr:start-end """ with open(rRNA_coor, 'w') as out_handle: with open(rRNA_interval, 'r') as in_handle: for line in in_handle: c, bio, source, s, e = line.split("\t")[:5] if bio.startswith("rRNA"): out_handle.write(("{0}:{1}-{2}\n").format(c, s, e)) def _detect_rRNA(config, bam_file, rRNA_file, ref_file, out_dir, single_end): """ Calculate rRNA with gatk-framework """ if not utils.file_exists(rRNA_file): return {'rRNA': 0} out_file = os.path.join(out_dir, "rRNA.counts") if not utils.file_exists(out_file): out_file = _count_rRNA_reads(bam_file, out_file, ref_file, rRNA_file, single_end, config) with open(out_file) as in_handle: for line in in_handle: if line.find("CountReads counted") > -1: rRNA_reads = line.split()[6] break return {'rRNA': rRNA_reads} def _count_rRNA_reads(in_bam, out_file, ref_file, rRNA_interval, single_end, config): """Use GATK counter to count reads in rRNA genes """ bam.index(in_bam, config) if not utils.file_exists(out_file): with file_transaction(out_file) as tx_out_file: rRNA_coor = os.path.join(os.path.dirname(out_file), "rRNA.list") _transform_browser_coor(rRNA_interval, rRNA_coor) params = ["-T", "CountReads", "-R", ref_file, "-I", in_bam, "-log", tx_out_file, "-L", rRNA_coor, "--filter_reads_with_N_cigar", "-allowPotentiallyMisencodedQuals"] jvm_opts = broad.get_gatk_framework_opts(config) cmd = [config_utils.get_program("gatk-framework", config)] + jvm_opts + params do.run(cmd, "counts rRNA for %s" % in_bam) return out_file def _parse_qualimap_rnaseq(table): """ Retrieve metrics of interest from globals table. """ out = {} for row in table.xpath("table/tr"): col, val = [x.text for x in row.xpath("td")] col = col.replace(":", "").strip() val = val.replace(",", "") m = {col: val} if val.find("/") > -1: m = _parse_num_pct(col, val.replace("%", "")) out.update(m) return out def _parse_rnaseq_qualimap_metrics(report_file): """Extract useful metrics from the qualimap HTML report file. """ out = {} parsers = ["Reads alignment", "Reads genomic origin", "Transcript coverage profile"] root = lxml.html.parse(report_file).getroot() for table in root.xpath("//div[@class='table-summary']"): header = table.xpath("h3")[0].text if header in parsers: out.update(_parse_qualimap_rnaseq(table)) return out def _rnaseq_qualimap(bam_file, data, out_dir): """ Run qualimap for a rnaseq bam file and parse results """ report_file = os.path.join(out_dir, "qualimapReport.html") config = data["config"] gtf_file = dd.get_gtf_file(data) ref_file = dd.get_ref_file(data) single_end = not bam.is_paired(bam_file) if not utils.file_exists(report_file): utils.safe_makedir(out_dir) bam.index(bam_file, config) cmd = _rnaseq_qualimap_cmd(config, bam_file, out_dir, gtf_file, single_end) do.run(cmd, "Qualimap for {}".format(data["name"][-1])) metrics = _parse_rnaseq_qualimap_metrics(report_file) metrics.update(_detect_duplicates(bam_file, out_dir, config)) metrics.update(_detect_rRNA(config, bam_file, gtf_file, ref_file, out_dir, single_end)) metrics.update({"Fragment Length Mean": bam.estimate_fragment_size(bam_file)}) metrics = _parse_metrics(metrics) return metrics def _rnaseq_qualimap_cmd(config, bam_file, out_dir, gtf_file=None, single_end=None): """ Create command lines for qualimap """ qualimap = config_utils.get_program("qualimap", config) resources = config_utils.get_resources("qualimap", config) num_cores = resources.get("cores", 1) max_mem = config_utils.adjust_memory(resources.get("memory", "4G"), num_cores) cmd = ("unset DISPLAY && {qualimap} rnaseq -outdir {out_dir} -a proportional -bam {bam_file} " "-gtf {gtf_file} --java-mem-size={max_mem}").format(**locals()) return cmd # ## Lightweight QC approaches def _parse_bamtools_stats(stats_file): out = {} want = set(["Total reads", "Mapped reads", "Duplicates", "Median insert size"]) with open(stats_file) as in_handle: for line in in_handle: parts = line.split(":") if len(parts) == 2: metric, stat_str = parts metric = metric.split("(")[0].strip() if metric in want: stat_parts = stat_str.split() if len(stat_parts) == 2: stat, pct = stat_parts pct = pct.replace("(", "").replace(")", "") else: stat = stat_parts[0] pct = None out[metric] = stat if pct: out["%s pct" % metric] = pct return out def _parse_offtargets(bam_file): """ Add to metrics off-targets reads if it exitst """ off_target = bam_file.replace(".bam", "-offtarget-stats.yaml") if os.path.exists(off_target): res = yaml.load(open(off_target)) return res return {} def _run_bamtools_stats(bam_file, data, out_dir): """Run bamtools stats with reports on mapped reads, duplicates and insert sizes. """ stats_file = os.path.join(out_dir, "bamtools_stats.txt") if not utils.file_exists(stats_file): utils.safe_makedir(out_dir) bamtools = config_utils.get_program("bamtools", data["config"]) with file_transaction(data, stats_file) as tx_out_file: cmd = "{bamtools} stats -in {bam_file}" if bam.is_paired(bam_file): cmd += " -insert" cmd += " > {tx_out_file}" do.run(cmd.format(**locals()), "bamtools stats", data) out = _parse_bamtools_stats(stats_file) out.update(_parse_offtargets(bam_file)) return out ## Variant statistics from gemini def _run_gemini_stats(bam_file, data, out_dir): """Retrieve high level variant statistics from Gemini. """ out = {} gemini_dbs = [d for d in [tz.get_in(["population", "db"], x) for x in data.get("variants", [])] if d] if len(gemini_dbs) > 0: gemini_db = gemini_dbs[0] gemini_stat_file = "%s-stats.yaml" % os.path.splitext(gemini_db)[0] if not utils.file_uptodate(gemini_stat_file, gemini_db): gemini = config_utils.get_program("gemini", data["config"]) tstv = subprocess.check_output([gemini, "stats", "--tstv", gemini_db]) gt_counts = subprocess.check_output([gemini, "stats", "--gts-by-sample", gemini_db]) dbsnp_count = subprocess.check_output([gemini, "query", gemini_db, "-q", "SELECT count(*) FROM variants WHERE in_dbsnp==1"]) out["Transition/Transversion"] = tstv.split("\n")[1].split()[-1] for line in gt_counts.split("\n"): parts = line.rstrip().split() if len(parts) > 0 and parts[0] != "sample": name, hom_ref, het, hom_var, _, total = parts out[name] = {} out[name]["Variations (heterozygous)"] = int(het) out[name]["Variations (homozygous)"] = int(hom_var) # same total variations for all samples, keep that top level as well. out["Variations (total)"] = int(total) out["Variations (in dbSNP)"] = int(dbsnp_count.strip()) if out.get("Variations (total)") > 0: out["Variations (in dbSNP) pct"] = "%.1f%%" % (out["Variations (in dbSNP)"] / float(out["Variations (total)"]) * 100.0) with open(gemini_stat_file, "w") as out_handle: yaml.safe_dump(out, out_handle, default_flow_style=False, allow_unicode=False) else: with open(gemini_stat_file) as in_handle: out = yaml.safe_load(in_handle) res = {} for k, v in out.iteritems(): if not isinstance(v, dict): res.update({k: v}) if k == data['name'][-1]: res.update(v) return res ## qsignature def _run_qsignature_generator(bam_file, data, out_dir): """ Run SignatureGenerator to create normalize vcf that later will be input of qsignature_summary :param bam_file: (str) path of the bam_file :param data: (list) list containing the all the dictionary for this sample :param out_dir: (str) path of the output :returns: (dict) dict with the normalize vcf file """ position = dd.get_qsig_file(data) mixup_check = dd.get_mixup_check(data) if mixup_check and mixup_check.startswith("qsignature"): if not position: logger.info("There is no qsignature for this species: %s" % tz.get_in(['genome_build'], data)) return {} jvm_opts = "-Xms750m -Xmx2g" limit_reads = 20000000 if mixup_check == "qsignature_full": slice_bam = bam_file jvm_opts = "-Xms750m -Xmx8g" limit_reads = 100000000 else: slice_bam = _slice_chr22(bam_file, data) qsig = config_utils.get_program("qsignature", data["config"]) if not qsig: return {} utils.safe_makedir(out_dir) out_name = os.path.basename(slice_bam).replace("bam", "qsig.vcf") out_file = os.path.join(out_dir, out_name) log_file = os.path.join(out_dir, "qsig.log") cores = dd.get_cores(data) base_cmd = ("{qsig} {jvm_opts} " "org.qcmg.sig.SignatureGenerator " "--noOfThreads {cores} " "-log {log_file} -i {position} " "-i {down_file} ") if not os.path.exists(out_file): down_file = bam.downsample(slice_bam, data, limit_reads) if not down_file: down_file = slice_bam file_qsign_out = "{0}.qsig.vcf".format(down_file) do.run(base_cmd.format(**locals()), "qsignature vcf generation: %s" % data["name"][-1]) if os.path.exists(file_qsign_out): with file_transaction(data, out_file) as file_txt_out: shutil.move(file_qsign_out, file_txt_out) else: raise IOError("File doesn't exist %s" % file_qsign_out) return {'qsig_vcf': out_file} return {} def qsignature_summary(*samples): """Run SignatureCompareRelatedSimple module from qsignature tool. Creates a matrix of pairwise comparison among samples. The function will not run if the output exists :param samples: list with only one element containing all samples information :returns: (dict) with the path of the output to be joined to summary """ warnings, similar = [], [] qsig = config_utils.get_program("qsignature", samples[0][0]["config"]) if not qsig: return [[]] jvm_opts = "-Xms750m -Xmx8g" work_dir = samples[0][0]["dirs"]["work"] count = 0 for data in samples: data = data[0] vcf = tz.get_in(["summary", "metrics", "qsig_vcf"], data) if vcf: count += 1 vcf_name = data["name"][-1] + ".qsig.vcf" out_dir = utils.safe_makedir(os.path.join(work_dir, "qsignature")) if not os.path.lexists(os.path.join(out_dir, vcf_name)): os.symlink(vcf, os.path.join(out_dir, vcf_name)) if count > 0: qc_out_dir = utils.safe_makedir(os.path.join(work_dir, "qc", "qsignature")) out_file = os.path.join(qc_out_dir, "qsignature.xml") out_ma_file = os.path.join(qc_out_dir, "qsignature.ma") out_warn_file = os.path.join(qc_out_dir, "qsignature.warnings") log = os.path.join(work_dir, "qsignature", "qsig-summary.log") if not os.path.exists(out_file): with file_transaction(samples[0][0], out_file) as file_txt_out: base_cmd = ("{qsig} {jvm_opts} " "org.qcmg.sig.SignatureCompareRelatedSimple " "-log {log} -dir {out_dir} " "-o {file_txt_out} ") do.run(base_cmd.format(**locals()), "qsignature score calculation") error, warnings, similar = _parse_qsignature_output(out_file, out_ma_file, out_warn_file, samples[0][0]) return [{'total samples': count, 'similar samples pairs': len(similar), 'warnings samples pairs': len(warnings), 'error samples': list(error), 'out_dir': qc_out_dir}] else: return [] def _parse_qsignature_output(in_file, out_file, warning_file, data): """ Parse xml file produced by qsignature :param in_file: (str) with the path to the xml file :param out_file: (str) with the path to output file :param warning_file: (str) with the path to warning file :returns: (list) with samples that could be duplicated """ name = {} error, warnings, similar = set(), set(), set() same, replicate, related = 0, 0.1, 0.18 mixup_check = dd.get_mixup_check(data) if mixup_check == "qsignature_full": same, replicate, related = 0, 0.01, 0.061 with open(in_file, 'r') as in_handle: with file_transaction(data, out_file) as out_tx_file: with file_transaction(data, warning_file) as warn_tx_file: with open(out_tx_file, 'w') as out_handle: with open(warn_tx_file, 'w') as warn_handle: et = lxml.etree.parse(in_handle) for i in list(et.iter('file')): name[i.attrib['id']] = os.path.basename(i.attrib['name']).replace(".qsig.vcf", "") for i in list(et.iter('comparison')): msg = None pair = "-".join([name[i.attrib['file1']], name[i.attrib['file2']]]) out_handle.write("%s\t%s\t%s\n" % (name[i.attrib['file1']], name[i.attrib['file2']], i.attrib['score'])) if float(i.attrib['score']) == same: msg = 'qsignature ERROR: read same samples:%s\n' error.add(pair) elif float(i.attrib['score']) < replicate: msg = 'qsignature WARNING: read similar/replicate samples:%s\n' warnings.add(pair) elif float(i.attrib['score']) < related: msg = 'qsignature NOTE: read relative samples:%s\n' similar.add(pair) if msg: logger.info(msg % pair) warn_handle.write(msg % pair) return error, warnings, similar def _slice_chr22(in_bam, data): """ return only one BAM file with only chromosome 22 """ sambamba = config_utils.get_program("sambamba", data["config"]) out_file = "%s-chr%s" % os.path.splitext(in_bam) if not utils.file_exists(out_file): bam.index(in_bam, data['config']) with contextlib.closing(pysam.Samfile(in_bam, "rb")) as bamfile: bam_contigs = [c["SN"] for c in bamfile.header["SQ"]] chromosome = "22" if "chr22" in bam_contigs: chromosome = "chr22" with file_transaction(data, out_file) as tx_out_file: cmd = ("{sambamba} slice -o {tx_out_file} {in_bam} {chromosome}").format(**locals()) out = subprocess.check_output(cmd, shell=True) return out_file
mit