repo_name
stringlengths
6
92
path
stringlengths
4
191
copies
stringclasses
322 values
size
stringlengths
4
6
content
stringlengths
821
753k
license
stringclasses
15 values
andipeng/MagnePlane
paper/images/trade_scripts/boundary_layer_growth_plot.py
4
1083
import numpy as np import matplotlib.pyplot as plt delta_star = np.loadtxt('../data_files/boundary_layer_growth_trades/delta_star.txt', delimiter = '\t') A_tube = np.loadtxt('../data_files/boundary_layer_growth_trades/A_tube.txt', delimiter = '\t') fig = plt.figure(figsize = (3.25,3.5), tight_layout = True) ax = plt.axes() plt.setp(ax.get_xticklabels(), fontsize=8) plt.setp(ax.get_yticklabels(), fontsize=8) plt.hold(True) line1, = plt.plot(delta_star, A_tube[0,:], 'b-', linewidth = 2.0, label = 'A_pod = 2.0 $m^2$') line2, = plt.plot(delta_star, A_tube[1,:], 'r-', linewidth = 2.0, label = 'A_pod = 2.5 $m^2$') line3, = plt.plot(delta_star, A_tube[2,:], 'g-', linewidth = 2.0, label = 'A_pod = 3.0 $m^2$') plt.xlabel('Boundary Layer Thickness (m)', fontsize = 8, fontweight = 'bold') plt.ylabel('Tube Area ($m^2$)', fontsize = 10, fontweight = 'bold') plt.grid('on') plt.xlim(.02, .12) plt.legend(handles = [line1, line2, line3], loc = 2, fontsize = 8) plt.savefig('../graphs/boundary_layer_growth_trades/Tube_Area_vs_boundary_layer.png', format = 'png', dpi = 300) plt.show()
apache-2.0
voxlol/scikit-learn
benchmarks/bench_plot_lasso_path.py
301
4003
"""Benchmarks of Lasso regularization path computation using Lars and CD The input data is mostly low rank but is a fat infinite tail. """ from __future__ import print_function from collections import defaultdict import gc import sys from time import time import numpy as np from sklearn.linear_model import lars_path from sklearn.linear_model import lasso_path from sklearn.datasets.samples_generator import make_regression def compute_bench(samples_range, features_range): it = 0 results = defaultdict(lambda: []) max_it = len(samples_range) * len(features_range) for n_samples in samples_range: for n_features in features_range: it += 1 print('====================') print('Iteration %03d of %03d' % (it, max_it)) print('====================') dataset_kwargs = { 'n_samples': n_samples, 'n_features': n_features, 'n_informative': n_features / 10, 'effective_rank': min(n_samples, n_features) / 10, #'effective_rank': None, 'bias': 0.0, } print("n_samples: %d" % n_samples) print("n_features: %d" % n_features) X, y = make_regression(**dataset_kwargs) gc.collect() print("benchmarking lars_path (with Gram):", end='') sys.stdout.flush() tstart = time() G = np.dot(X.T, X) # precomputed Gram matrix Xy = np.dot(X.T, y) lars_path(X, y, Xy=Xy, Gram=G, method='lasso') delta = time() - tstart print("%0.3fs" % delta) results['lars_path (with Gram)'].append(delta) gc.collect() print("benchmarking lars_path (without Gram):", end='') sys.stdout.flush() tstart = time() lars_path(X, y, method='lasso') delta = time() - tstart print("%0.3fs" % delta) results['lars_path (without Gram)'].append(delta) gc.collect() print("benchmarking lasso_path (with Gram):", end='') sys.stdout.flush() tstart = time() lasso_path(X, y, precompute=True) delta = time() - tstart print("%0.3fs" % delta) results['lasso_path (with Gram)'].append(delta) gc.collect() print("benchmarking lasso_path (without Gram):", end='') sys.stdout.flush() tstart = time() lasso_path(X, y, precompute=False) delta = time() - tstart print("%0.3fs" % delta) results['lasso_path (without Gram)'].append(delta) return results if __name__ == '__main__': from mpl_toolkits.mplot3d import axes3d # register the 3d projection import matplotlib.pyplot as plt samples_range = np.linspace(10, 2000, 5).astype(np.int) features_range = np.linspace(10, 2000, 5).astype(np.int) results = compute_bench(samples_range, features_range) max_time = max(max(t) for t in results.values()) fig = plt.figure('scikit-learn Lasso path benchmark results') i = 1 for c, (label, timings) in zip('bcry', sorted(results.items())): ax = fig.add_subplot(2, 2, i, projection='3d') X, Y = np.meshgrid(samples_range, features_range) Z = np.asarray(timings).reshape(samples_range.shape[0], features_range.shape[0]) # plot the actual surface ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.8) # dummy point plot to stick the legend to since surface plot do not # support legends (yet?) #ax.plot([1], [1], [1], color=c, label=label) ax.set_xlabel('n_samples') ax.set_ylabel('n_features') ax.set_zlabel('Time (s)') ax.set_zlim3d(0.0, max_time * 1.1) ax.set_title(label) #ax.legend() i += 1 plt.show()
bsd-3-clause
huzq/scikit-learn
examples/linear_model/plot_ridge_coeffs.py
157
2785
""" ============================================================== Plot Ridge coefficients as a function of the L2 regularization ============================================================== .. currentmodule:: sklearn.linear_model :class:`Ridge` Regression is the estimator used in this example. Each color in the left plot represents one different dimension of the coefficient vector, and this is displayed as a function of the regularization parameter. The right plot shows how exact the solution is. This example illustrates how a well defined solution is found by Ridge regression and how regularization affects the coefficients and their values. The plot on the right shows how the difference of the coefficients from the estimator changes as a function of regularization. In this example the dependent variable Y is set as a function of the input features: y = X*w + c. The coefficient vector w is randomly sampled from a normal distribution, whereas the bias term c is set to a constant. As alpha tends toward zero the coefficients found by Ridge regression stabilize towards the randomly sampled vector w. For big alpha (strong regularisation) the coefficients are smaller (eventually converging at 0) leading to a simpler and biased solution. These dependencies can be observed on the left plot. The right plot shows the mean squared error between the coefficients found by the model and the chosen vector w. Less regularised models retrieve the exact coefficients (error is equal to 0), stronger regularised models increase the error. Please note that in this example the data is non-noisy, hence it is possible to extract the exact coefficients. """ # Author: Kornel Kielczewski -- <[email protected]> print(__doc__) import matplotlib.pyplot as plt import numpy as np from sklearn.datasets import make_regression from sklearn.linear_model import Ridge from sklearn.metrics import mean_squared_error clf = Ridge() X, y, w = make_regression(n_samples=10, n_features=10, coef=True, random_state=1, bias=3.5) coefs = [] errors = [] alphas = np.logspace(-6, 6, 200) # Train the model with different regularisation strengths for a in alphas: clf.set_params(alpha=a) clf.fit(X, y) coefs.append(clf.coef_) errors.append(mean_squared_error(clf.coef_, w)) # Display results plt.figure(figsize=(20, 6)) plt.subplot(121) ax = plt.gca() ax.plot(alphas, coefs) ax.set_xscale('log') plt.xlabel('alpha') plt.ylabel('weights') plt.title('Ridge coefficients as a function of the regularization') plt.axis('tight') plt.subplot(122) ax = plt.gca() ax.plot(alphas, errors) ax.set_xscale('log') plt.xlabel('alpha') plt.ylabel('error') plt.title('Coefficient error as a function of the regularization') plt.axis('tight') plt.show()
bsd-3-clause
CallaJun/hackprince
indico/matplotlib/units.py
10
6146
""" The classes here provide support for using custom classes with matplotlib, e.g., those that do not expose the array interface but know how to converter themselves to arrays. It also supoprts classes with units and units conversion. Use cases include converters for custom objects, e.g., a list of datetime objects, as well as for objects that are unit aware. We don't assume any particular units implementation, rather a units implementation must provide a ConversionInterface, and the register with the Registry converter dictionary. For example, here is a complete implementation which supports plotting with native datetime objects:: import matplotlib.units as units import matplotlib.dates as dates import matplotlib.ticker as ticker import datetime class DateConverter(units.ConversionInterface): @staticmethod def convert(value, unit, axis): 'convert value to a scalar or array' return dates.date2num(value) @staticmethod def axisinfo(unit, axis): 'return major and minor tick locators and formatters' if unit!='date': return None majloc = dates.AutoDateLocator() majfmt = dates.AutoDateFormatter(majloc) return AxisInfo(majloc=majloc, majfmt=majfmt, label='date') @staticmethod def default_units(x, axis): 'return the default unit for x or None' return 'date' # finally we register our object type with a converter units.registry[datetime.date] = DateConverter() """ from __future__ import (absolute_import, division, print_function, unicode_literals) import six from matplotlib.cbook import iterable, is_numlike import numpy as np class AxisInfo: """information to support default axis labeling and tick labeling, and default limits""" def __init__(self, majloc=None, minloc=None, majfmt=None, minfmt=None, label=None, default_limits=None): """ majloc and minloc: TickLocators for the major and minor ticks majfmt and minfmt: TickFormatters for the major and minor ticks label: the default axis label default_limits: the default min, max of the axis if no data is present If any of the above are None, the axis will simply use the default """ self.majloc = majloc self.minloc = minloc self.majfmt = majfmt self.minfmt = minfmt self.label = label self.default_limits = default_limits class ConversionInterface: """ The minimal interface for a converter to take custom instances (or sequences) and convert them to values mpl can use """ @staticmethod def axisinfo(unit, axis): 'return an units.AxisInfo instance for axis with the specified units' return None @staticmethod def default_units(x, axis): 'return the default unit for x or None for the given axis' return None @staticmethod def convert(obj, unit, axis): """ convert obj using unit for the specified axis. If obj is a sequence, return the converted sequence. The ouput must be a sequence of scalars that can be used by the numpy array layer """ return obj @staticmethod def is_numlike(x): """ The matplotlib datalim, autoscaling, locators etc work with scalars which are the units converted to floats given the current unit. The converter may be passed these floats, or arrays of them, even when units are set. Derived conversion interfaces may opt to pass plain-ol unitless numbers through the conversion interface and this is a helper function for them. """ if iterable(x): for thisx in x: return is_numlike(thisx) else: return is_numlike(x) class Registry(dict): """ register types with conversion interface """ def __init__(self): dict.__init__(self) self._cached = {} def get_converter(self, x): 'get the converter interface instance for x, or None' if not len(self): return None # nothing registered #DISABLED idx = id(x) #DISABLED cached = self._cached.get(idx) #DISABLED if cached is not None: return cached converter = None classx = getattr(x, '__class__', None) if classx is not None: converter = self.get(classx) if isinstance(x, np.ndarray) and x.size: xravel = x.ravel() try: # pass the first value of x that is not masked back to # get_converter if not np.all(xravel.mask): # some elements are not masked converter = self.get_converter( xravel[np.argmin(xravel.mask)]) return converter except AttributeError: # not a masked_array # Make sure we don't recurse forever -- it's possible for # ndarray subclasses to continue to return subclasses and # not ever return a non-subclass for a single element. next_item = xravel[0] if (not isinstance(next_item, np.ndarray) or next_item.shape != x.shape): converter = self.get_converter(next_item) return converter if converter is None and iterable(x): for thisx in x: # Make sure that recursing might actually lead to a solution, # if we are just going to re-examine another item of the same # kind, then do not look at it. if classx and classx != getattr(thisx, '__class__', None): converter = self.get_converter(thisx) return converter #DISABLED self._cached[idx] = converter return converter registry = Registry()
lgpl-3.0
tiw51/DeepPurple
Stock_Programs/Webscraping/webScrapingTutorial.py
1
3660
#importing libraries for websraping data #beautiful soup import bs4 as bs #this serializes any python object import pickle import requests import datetime as dt import os import pandas as pd import pandas_datareader.data as web import time #api key, quandl: ZHafX6HMyxi4K9mcdAEM #example code to get the data for the sp500 list tickers def save_sp500_tickers(): resp=requests.get('https://en.wikipedia.org/wiki/List_of_S%26P_500_companies') #creating a bs object #resp.txt is the source code txt #standard html parser soup=bs.BeautifulSoup(resp.text, 'html.parser') #finds the specific table tag in the script #specifies a specific class to narrow search table =soup.find('table', {'class': 'wikitable sortable'}) tickers =[] #[1:] specifies that we are not searching for the first table header row #tr is for table-row for row in table.findAll('tr')[1:]: #zeroth column ticker =row.findAll('td')[0].text tickers.append(ticker) with open("sp500tickers.pickle", "wb") as f: #this opens a file and dumps the data in the file pickle.dump(tickers, f) #tests if the tickers got sent out correctly print(tickers) return tickers save_sp500_tickers() def get_data_from_google(reload_sp500=False): if reload_sp500: tickers = save_sp500_tickers() else: with open("sp500tickers.pickle", "rb") as f: tickers = pickle.load(f) #this loads in the data frame if it does not already exist if not os.path.exists('stock_dfs'): os.makedirs('stock_dfs') start=dt.datetime(2005, 1, 1) end =dt.datetime(2017, 6, 16) total=0 for ticker in tickers: #prints where the data file is in copying it to the data format total+=1 print('Printing Google Finance results for ' ,ticker) ## if ticker=='LMT' or ticker=='NWL': ## print('Shit dont work') if not os.path.exists('stock_dfs/{}.csv'.format(ticker)): ## if total%5 ==0: #print('Pausing...') #time.sleep(5) if ticker =='LMT' or ticker=='NWL': print(ticker, ' is not reading data') ## df=web.DataReader(ticker, 'yahoo', start, end) ## df.to_csv('stock_dfs/{}.csv'.format(ticker)) else: df=web.DataReader(ticker, 'google', start, end) df.to_csv('stock_dfs/{}.csv'.format(ticker)) else: print('Already have {}'.format(ticker)) get_data_from_google() def compiledata(): with open("sp500tickers.pickle", "rb") as f: tickers = pickle.load(f) #begin dataframe main_df=pd.DataFrame() #begin iterating through the tickers tickers_that_dont_work ={'ALGN','RE','HLT','LMT' ,'NWL' ,'NBL','NSC', 'NOC'} for count, ticker in enumerate(tickers): ## if ticker in tickers_that_dont_work: ## print('Shit dont work') ## else: #count allows for us to know where we are in the list df=pd.read_csv('stock_dfs/{}.csv'.format(ticker)) df.set_index('Date', inplace=True) #renames adj close to the ticker symbol df.rename(columns ={'Adj Close': ticker}, inplace=True) df.drop(['Open', 'High', 'Low', 'Close', 'Volume'] ,1,inplace=True) if main_df.empty: main_df =df else: main_df =main_df.join(df, how='outer') if count % 10 == 0: print(count) print (main_df.head()) main_df.to_csv('sp500_joined_closes.csv') compiledata()
apache-2.0
jakobkolb/MayaSim
Experiments/mayasim_X7_multiple_droughts.py
1
7457
""" Experiment to test the influence of drought events. Drought events start once the civilisation has reached a 'complex society' state and vary in length and severity. Therefore, starting point is at t = 150 where the model has reached a complex society state in all previous studies. We also use parameters for income from trade, agriculture and ecosystem services, that have previously proven to lead to some influence of precipitation variability on the state of the system. """ from __future__ import print_function try: import cPickle as cp except ImportError: import pickle as cp import getpass import itertools as it import numpy as np import sys import pandas as pd from pymofa.experiment_handling import experiment_handling as eh from mayasim.model.ModelCore import ModelCore as Model from mayasim.model.ModelParameters import ModelParameters as Parameters test = True def run_function(d_severity=50., r_bca=0.2, r_es=0.0002, r_trade=6000, population_control=False, n=30, crop_income_mode='sum', better_ess=True, kill_cropless=False, steps=900, filename='./'): """ Set up the Model for different Parameters and determine which parts of the output are saved where. Output is saved in pickled dictionaries including the initial values and Parameters, as well as the time development of aggregated variables for each run. Parameters: ----------- d_times: list of lists list of list of start and end dates of droughts d_severity : float severity of drought (decrease in rainfall in percent) r_bca : float > 0 the prefactor for income from agriculture r_es : float the prefactor for income from ecosystem services r_trade : float the prefactor for income from trade population_control : boolean determines whether the population grows unbounded or if population growth decreases with income per capita and population density. n : int > 0 initial number of settlements on the map crop_income_mode : string defines the mode of crop income calculation. possible values are 'sum' and 'mean' better_ess : bool switch to use forest as proxy for income from eco system services from net primary productivity. kill_cropless: bool Switch to determine whether or not to kill cities without cropped cells. filename: string path to save the results to. """ # initialize the Model d_times = [[0, 2], [200, 220], [225, 245], [250, 270], [275, 295]] m = Model(n, output_data_location=filename, debug=test) if not filename.endswith('s0.pkl'): m.output_geographic_data = False m.output_settlement_data = False m.population_control = population_control m.crop_income_mode = crop_income_mode m.better_ess = better_ess m.r_bca_sum = r_bca m.r_es_sum = r_es m.r_trade = r_trade m.kill_cities_without_crops = kill_cropless m.precipitation_modulation = False m.drought_times = d_times m.drought_severity = d_severity # store initial conditions and Parameters res = {"initials": pd.DataFrame({"Settlement X Possitions": m.settlement_positions[0], "Settlement Y Possitions": m.settlement_positions[1], "Population": m.population}), "Parameters": pd.Series({key: getattr(m, key) for key in dir(Parameters) if not key.startswith('__') and not callable(key)})} # run Model if test: m.run(3) else: m.run(steps) # Retrieve results res["trajectory"] = m.get_trajectory() try: with open(filename, 'wb') as dumpfile: cp.dump(res, dumpfile) return 1 except IOError: return -1 def run_experiment(argv): """ Take arv input variables and run sub_experiment accordingly. This happens in five steps: 1) parse input arguments to set switches for [test], 2) set output folders according to switches, 3) generate parameter combinations, 4) define names and dictionaries of callables to apply to sub_experiment data for post processing, 5) run computation and/or post processing and/or plotting depending on execution on cluster or locally or depending on experimentation mode. Parameters ---------- argv: list[N] List of parameters from terminal input Returns ------- rt: int some return value to show whether sub_experiment succeeded return 1 if sucessfull. """ global test # Parse switches from input if len(argv) > 1: test = int(argv[1]) # Generate paths according to switches and user name test_folder = ['', 'test_output/'][int(test)] experiment_folder = 'X7_multiple_droughts/' raw = 'raw_data/' res = 'results/' if getpass.getuser() == "kolb": save_path_raw = "/p/tmp/kolb/Mayasim/output_data/{}{}{}".format( test_folder, experiment_folder, raw) save_path_res = "/home/kolb/Mayasim/output_data/{}{}{}".format( test_folder, experiment_folder, res) elif getpass.getuser() == "jakob": save_path_raw = \ "/home/jakob/Project_MayaSim/Python/" \ "output_data/{}{}{}".format(test_folder, experiment_folder, raw) save_path_res = \ "/home/jakob/Project_MayaSim/Python/" \ "output_data/{}{}{}".format(test_folder, experiment_folder, res) else: save_path_res = './{}'.format(res) save_path_raw = './{}'.format(raw) # Generate parameter combinations index = {0: "d_severity", 1: "r_trade"} if test == 0: d_severity = [0., 20., 40., 60.] r_trade = [6000, 7000, 8000, 10000] test = False else: d_severity = [0., 60.] r_trade = [6000,] test = True param_combs = list(it.product(d_severity, r_trade)) sample_size = 50 if not test else 2 # Define names and callables for post processing name = "trajectory" estimators = {"<mean_trajectories>": lambda fnames: pd.concat([np.load(f)["trajectory"] for f in fnames]).groupby(level=0).mean(), "<sigma_trajectories>": lambda fnames: pd.concat([np.load(f)["trajectory"] for f in fnames]).groupby(level=0).std() } name2 = "all_trajectories" estimators2 = {"trajectory_list": lambda fnames: [np.load(f)["trajectory"] for f in fnames]} # Run computation and post processing. handle = eh(sample_size=sample_size, parameter_combinations=param_combs, index=index, path_raw=save_path_raw, path_res=save_path_res, use_kwargs=True) handle.compute(run_func=run_function) handle.resave(eva=estimators, name=name) handle.resave(eva=estimators2, name=name2) return 1 if __name__ == '__main__': run_experiment(sys.argv)
gpl-3.0
HPCC-Cloud-Computing/press
prediction/CNN4Predict/cnn.py
1
3736
from __future__ import print_function, division import numpy as np from utils import get_data, compared_diagram from keras.layers import Convolution1D, Dense, MaxPooling1D, Flatten from keras.models import Sequential from sklearn.metrics import mean_squared_error # Tham so lien quan den neural network NUMBER_TIME_SERIES = 1 NUMBER_OUTPUTS = 1 NUMBER_FEATURE_MAPS = 4 WINDOW_SIZE = 30 NUMBER_NEURAL_PER_LAYER = 5 BATCH_SIZE = 10 NUMBER_EPOCH = 200 # Tham so lien quan den du lieu dau vao INTERVAL_BY_SECOND = 600 PEAK_PERCENT = 99 START_DAY = 6 END_DAY = 10 # Tham so du lieu K-shift SHIFT_INDEX = 1 # Thiet lap neural network def neural_network(window_size, filter_length, nb_input_series=NUMBER_TIME_SERIES, nb_outputs=NUMBER_OUTPUTS, nb_filter=NUMBER_FEATURE_MAPS): model = Sequential(( Convolution1D(nb_filter=nb_filter, filter_length=filter_length, activation='relu', input_shape=(window_size, nb_input_series)), MaxPooling1D(), Convolution1D(nb_filter=nb_filter, filter_length=filter_length, activation='relu'), MaxPooling1D(), Flatten(), Dense(nb_outputs, activation='linear'), )) # Su dung toi uu Adam de toi thieu MSE model.compile(loss='mse', optimizer='adam', metrics=['mae']) return model # Tao input va output tu bo du lieu ban dau def make_timeseries_instances(time_series, window_size): time_series = np.asarray(time_series) assert 0 < window_size < time_series.shape[0] x = np.atleast_3d(np.array([time_series[start:start + window_size] for start in range(0, time_series.shape[0] - window_size)])) y = time_series[window_size+SHIFT_INDEX:] q = np.atleast_3d([time_series[-window_size:]]) return x, y, q # Ham train du lieu def evaluate_timeseries(time_series, window_size): filter_length = NUMBER_NEURAL_PER_LAYER nb_filter = NUMBER_FEATURE_MAPS time_series = np.atleast_2d(time_series) if time_series.shape[0] == 1: time_series = time_series.T nb_samples, nb_series = time_series.shape model = neural_network(window_size=window_size, filter_length=filter_length, nb_input_series=nb_series, nb_outputs=nb_series, nb_filter=nb_filter) model.summary() x, y, q = make_timeseries_instances(time_series, window_size) test_size = int(0.2 * nb_samples) x_train, x_test, y_train, y_test = x[:-test_size], x[-test_size:], y[:-test_size], y[-test_size:] model.fit(x_train, y_train, nb_epoch=NUMBER_EPOCH, batch_size=BATCH_SIZE, validation_data=(x_test, y_test)) predicted_time_series = [] pred = model.predict(x_test) print('\n\nactual', 'predicted', sep='\t') for actual, predicted in zip(y_test, pred.squeeze()): print(actual.squeeze(), predicted, sep='\t') predicted_time_series.append(predicted) print('next', model.predict(q).squeeze(), sep='\t') return predicted_time_series def main(): # Khai bao cac tham so trong CNN np.set_printoptions(threshold=25) # Khai bao du lieu time_series = get_data(START_DAY, END_DAY, INTERVAL_BY_SECOND) predicted_time_series = evaluate_timeseries(time_series, WINDOW_SIZE) actual_time_series = get_data(END_DAY, END_DAY, INTERVAL_BY_SECOND) # Ghi gia tri RMSE ra file (CNN) rmse = np.sqrt(mean_squared_error(predicted_time_series, actual_time_series)) f = open("evaluate_result/mse"+str(WINDOW_SIZE)+".txt", "w") f.write("Window size "+str(WINDOW_SIZE)+" : "+str(rmse)+"\n") f.close() # Bieu do compared_diagram(predicted_time_series, actual_time_series, WINDOW_SIZE) if __name__ == '__main__': main()
mit
RaoUmer/db.py
db/tests/tests.py
5
4243
import pandas as pd from db import DemoDB, DB, list_profiles, remove_profile import unittest class PandaSQLTest(unittest.TestCase): def setUp(self): self.db = DemoDB() def test_query_rowsum(self): df = self.db.query("select * from Artist;") self.assertEqual(len(df), 275) def test_query_groupby(self): q = "select AlbumId, sum(1) from Track group by 1" df = self.db.query(q) self.assertEqual(len(df), 347) def test_query_from_file_rowsum(self): with open("db/tests/testscript.sql", "w") as f: f.write("select * from Artist;") df = self.db.query_from_file("db/tests/testscript.sql") self.assertEqual(len(df), 275) def test_add_profile(self): profiles = list_profiles() self.db.save_credentials(profile="test_profile") self.assertEqual(len(profiles)+1, len(list_profiles())) remove_profile("test_profile") def test_remove_profile(self): profiles = list_profiles() self.db.save_credentials(profile="test_profile") self.assertEqual(len(profiles)+1, len(list_profiles())) remove_profile("test_profile") def test_list_profiles(self): self.db.save_credentials(profile="test_profile") self.assertTrue(len(list_profiles()) > 0) remove_profile("test_profile") def test_table_head(self): self.assertEqual(len(self.db.tables.Artist.head()), 6) def test_table_all(self): self.assertEqual(len(self.db.tables.Artist.all()), 275) def test_table_select(self): df = self.db.tables.Artist.select("ArtistId", "Name") self.assertEqual(df.shape, (275, 2)) def test_table_sample(self): df = self.db.tables.Artist.sample(n=10) self.assertEqual(len(df), 10) def test_table_uniqe(self): df = self.db.tables.Track.unique("GenreId", "MediaTypeId") self.assertEqual(len(df), 38) def test_column_head(self): col = self.db.tables.Track.TrackId.head() self.assertEqual(len(col), 6) def test_column_all(self): col = self.db.tables.Track.TrackId.all() self.assertEqual(len(col), 3503) def test_column_sample(self): col = self.db.tables.Track.TrackId.sample(n=10) self.assertEqual(len(col), 10) def test_column_unique(self): col = self.db.tables.Customer.Country.unique() self.assertEqual(len(col), 24) def test_table_keys_per_column(self): short_db = DemoDB(keys_per_column=1) self.assertEqual("""+----------------------------------------------------------------------------------------+ | Track | +--------------+---------------+-----------------------+---------------------------------+ | Column | Type | Foreign Keys | Reference Keys | +--------------+---------------+-----------------------+---------------------------------+ | TrackId | INTEGER | | InvoiceLine.TrackId, (+ 1 more) | | Name | NVARCHAR(200) | | | | AlbumId | INTEGER | Album.AlbumId | | | MediaTypeId | INTEGER | MediaType.MediaTypeId | | | GenreId | INTEGER | Genre.GenreId | | | Composer | NVARCHAR(220) | | | | Milliseconds | INTEGER | | | | Bytes | INTEGER | | | | UnitPrice | NUMERIC(10,2) | | | +--------------+---------------+-----------------------+---------------------------------+""".strip(), '{0}'.format(short_db.tables.Track.__repr__()).strip()) def tearDown(self): pass def test_table_count_rows(self): count = self.db.tables.Invoice.count self.assertEqual(count, 412) if __name__ == "__main__": unittest.main()
bsd-2-clause
areeda/gwpy
gwpy/conftest.py
3
1461
# -*- coding: utf-8 -*- # Copyright (C) Duncan Macleod (2018-2020) # # This file is part of GWpy. # # GWpy is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # GWpy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GWpy. If not, see <http://www.gnu.org/licenses/>. """Test setup for gwpy """ import warnings import numpy from matplotlib import (use, rcParams) # force Agg for all tests use('agg', force=True) # register custom fixtures for all test modules from .testing.fixtures import * # noqa: E402,F401,F403 # set random seed to 1 for reproducability numpy.random.seed(1) # -- plotting options # ignore errors due from pyplot.show() using Agg warnings.filterwarnings('ignore', message=".*non-GUI backend.*") # force simpler rcParams for all tests # (fixtures or tests may update these individually) # NOTE: this most-likely happens _after_ gwpy.plot has # updated the rcParams once, so these settings should persist rcParams.update({ 'text.usetex': False, # TeX is slow most of the time })
gpl-3.0
daskol/ml-cipher-cracker
bigram_model-Copy0 (1).py
3
10635
# coding: utf-8 # In[15]: import numpy as np import math import matplotlib.pyplot as plt import random from numpy.random import rand # read text # In[1]: def read_text_words(filename, wordsnumber): with open(filename) as f: X = f.readlines() wordsnumber = len(X) X = ''.join(X) X = X.replace('\n', '{') #123 return X def read_text_whole(filename): with open(filename) as f: X = f.read() X = X.replace('\n', '{') #123 return X def chop_text_to_size(text, size): return text[:1024*1024*size] def read_text_filesize(filename, size): with open(filename) as f: X = f.read(1024*1024*size) X = X.replace('\n', '{') #123 return X # counts # In[3]: def get_unicount(text): length = len(text) counts = np.zeros(27) for i in xrange(length): c = ord(text[i]) counts[c-97]+=1 #97-122, 123 - word delimiter return counts[:26] # bigram statistics # In[4]: def get_bigram_stats_dic(text): length = len(text) dic = {} for i in xrange(length-1): if ord(text[i]) == 123 or ord(text[i+1]) == 123: continue if (text[i], text[i+1]) in dic: dic[(text[i], text[i+1])] += 1 else: dic[(text[i], text[i+1])] = 1 for k,v in dic.items(): r = 0 if (k[0],'{') in dic.keys(): r = dic[(k[0],'{')] dic[k] = v/(sum(stats)) return dic # quality # In[5]: def quality(decrypted, original): l = len(decrypted) zipped = zip(decrypted, original) return sum(1 for x,y in zipped if x != y)/l # crypt # In[6]: def crypt(text): p = range(26) random.shuffle(p) output='' p.append(26) for ch in text: try: x = ord(ch) - ord('a') output+=(chr(p[x] + ord('a'))) except: pass return output, p # metropolis and density maximization # In[68]: # from random import random """ This module implements algorithm of Metropolis-Hastings for random variable generation. The algorithm generates random variables from a desired distribution (which may be unnormalized). """ def metropolis( desiredPDF, initValue, computableRVS, skipIterations = 200 ): """ This function returns a generator, which generates random variables from some space S with a desired distribution using Metropolis-Hastings algorithm. Args: desiredPDF (func) : PDF of desired distribution p( T ), where T from S initValue : an object from S to initialize the starting point of iterative proccess computableRVS (func) : a generator of random value from space S with given parameter T, which is also from S skipIterations (int) : number of iterations to skip (skipping more iterations leads to better accuracy? but greater time consuming) Returns: generator, which produce some values from S and their denisity according to distribution desiredPDF """ random_variable = initValue random_variableDensityValue = desiredPDF( random_variable ) """ A state of MCMC """ #ignore first iterations to let the iterative proccess #converge to some distribution, which is close to desired for i in xrange( skipIterations ): candidate = computableRVS( random_variable ) print candidate candidateDensityValue = desiredPDF( candidate ) """ next candidate for sample, generated by computableRVS """ # acceptanceProb = min( 1, candidateDensityValue / random_variableDensityValue ) # logp is returnd by desiredPDF_bigram, so here is the change acceptanceProb = min( 0, candidateDensityValue - random_variableDensityValue ) """ probability to accept candidate to sample """ # acceptanceProb = math.exp(acceptanceProb) print acceptanceProb if math.log(random.random()) < acceptanceProb: random_variable = candidate random_variableDensityValue = candidateDensityValue #now when the procces is converged to desired distribution, #return acceptable candidates print "-----" while True: candidate = computableRVS( random_variable ) print candidate candidateDensityValue = desiredPDF( candidate ) """ next candidate for sample, generated by computableRVS """ # acceptanceProb = min( 1, candidateDensityValue / random_variableDensityValue ) # logp is returnd by desiredPDF_bigram, so here is the change acceptanceProb = min( 0, candidateDensityValue - random_variableDensityValue ) """ probability to accept candidate to sample """ print acceptanceProb # acceptanceProb = math.exp(acceptanceProb) if math.log(random.random()) < acceptanceProb: random_variable = candidate random_variableDensityValue = candidateDensityValue yield random_variable, random_variableDensityValue def densityMaximization( desiredPDF, initValue, computableRVS, skipIterations = 200 ): """ This function return a generator, which generates random variables from some space S by trying to maximize givven density. The algorithm is a modification of Metropolis-Hastings. It rejects all objects, which decrease density. Args: desiredPDF (func) : PDF of desired distribution p( T ), where T from S initValue : an object from S to initialize the starting point of iterative proccess computableRVS (func) : a generator of random value from space S with given parameter T, which is also from S skipIterations (int) : number of iterations to skip (skipping more iterations leads to better accuracy? but greater time consuming) Returns: generator, which produce some values from S, where each next value has no less density, and their denisity """ random_variable = initValue random_variableDensityValue = desiredPDF( random_variable ) """ A state of MCMC """ #ignore first iterations to let the iterative proccess to enter #the high density regions for i in xrange( skipIterations ): candidate = computableRVS( random_variable ) candidateDensityValue = desiredPDF( candidate ) """ next candidate for sample, generated by computableRVS """ if random_variableDensityValue < candidateDensityValue: print candidate print candidateDensityValue random_variable = candidate random_variableDensityValue = candidateDensityValue #now when the procces is in high density regions, #return acceptable candidates while True: candidate = computableRVS( random_variable ) candidateDensityValue = desiredPDF( candidate ) """ next candidate for sample, generated by computableRVS """ if random_variableDensityValue < candidateDensityValue: print candidate print candidateDensityValue random_variable = candidate random_variableDensityValue = candidateDensityValue yield random_variable, random_variableDensityValue # permutation generator and computablervs # In[8]: """ This module provide some functions, that generate random permutations with different distributions. There are a uniform distribution and a symmetric distribution, which depends on some other permutation. """ def uniform( n ): """ Generates random permutation using Knuth algorithm. Args: n (int) : length of permutation Returns: random permutation of length n from uniform distribution """ #initialize permutation with identical permutation = [ i for i in xrange( n ) ] #swap ith object with random onject from i to n - 1 enclusively for i in xrange( n ): j = random.randint( i, n - 1 ) permutation[ i ], permutation[ j ] = permutation[ j ], permutation[ i ] permutation.append(26) return permutation def applyedTranspostions( basePermutation ): """ This function returns random permutation by applying random transpositions to given permutation. The result distribution is not uniform and symmetric assuming parameter. Args: basePermutation (array) : parameter of distribution Returns: random permutation generated from basePermutation """ n = len( basePermutation) -1 """ length of permutation """ #apply n random transpositions (including identical) to base permutation for i in xrange( n ): k, l = random.randint( 0, n - 2 ), random.randint( 0, n - 2 ) basePermutation[ k ], basePermutation[ l ] = basePermutation[ l ], basePermutation[ k ] return basePermutation # desiredPDF # In[19]: def get_desiredPDF_bigram(permutation): logp = 0 for i in xrange(len(encrypted)-1): if (chr(permutation[ord(encrypted[i])-97]+97), chr(permutation[ord(encrypted[i+1])-97]+97)) in stats.keys(): logp += math.log(stats[(chr(permutation[ord(encrypted[i])-97]+97), chr(permutation[ord(encrypted[i+1])-97]+97))]) return logp ## Varying training text size # Fix large (e.g. 5000 or more words) encrypted text and explore how the ratio of correctly decrypted symbols # depends on the size of training text (using the same number of MCMC iterations) ## TO BE DELETED # In[13]: #TEST TEXT fname = 'main/oliver_twist.txt' original = read_text_words(fname, 1000) encrypted, p = crypt(original) #TRAIN TEXT length = 575514 train_text = read_text_words('main/war_and_peace.txt', length) counts = get_unicount(train_text) stats = get_bigram_stats_dic(train_text) print p # In[69]: computableGen = lambda t: applyedTranspostions(t) init_p = uniform(26) metropolisgenerator = metropolis(get_desiredPDF_bigram, init_p, computableGen ) # densityMaximization(get_desiredPDF_bigram, init_p, computableGen ) x = [] for i in xrange( 10 ): x.append( metropolisgenerator.next()[0] ) # In[65]: for i in x: print i # In[62]: per = x[0] for i in xrange(len(per)): print (ord('a') + i) == (ord('a') + per[p[i]]) # In[ ]:
mit
henrystokeley/pgsheets
setup.py
1
1451
import re from setuptools import setup, find_packages if __name__ == '__main__': # get requirements with open('requirements.txt') as f: requirements = f.read() requirements = [ r for r in requirements.splitlines() if r != ''] # get readme with open('README.rst') as f: readme = f.read() # get version number with open('pgsheets/__init__.py') as f: version = f.read() version = re.search( r'^__version__\s*=\s*[\'"]([\d\.]*)[\'"]\s*$', version, re.MULTILINE).groups(1)[0] setup(name='pgsheets', version=version, packages=find_packages(exclude=['test', 'test.*']), author="Henry Stokeley", author_email="[email protected]", description=("Manipulate Google Sheets Using Pandas DataFrames"), long_description=readme, license="MIT", url="https://github.com/henrystokeley/pgsheets", install_requires=requirements, test_suite='test', classifiers=[ 'Development Status :: 3 - Alpha', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 3.4', 'Topic :: Scientific/Engineering', 'Topic :: Office/Business :: Financial :: Spreadsheet', ], keywords='pandas google sheets spreadsheets dataframe', )
mit
astropy/astropy
astropy/visualization/wcsaxes/transforms.py
8
5762
# Licensed under a 3-clause BSD style license - see LICENSE.rst # Note: This file incldues code dervived from pywcsgrid2 # # This file contains Matplotlib transformation objects (e.g. from pixel to world # coordinates, but also world-to-world). import abc import numpy as np from matplotlib.path import Path from matplotlib.transforms import Transform from astropy import units as u from astropy.coordinates import (SkyCoord, frame_transform_graph, UnitSphericalRepresentation, BaseCoordinateFrame) __all__ = ['CurvedTransform', 'CoordinateTransform', 'World2PixelTransform', 'Pixel2WorldTransform'] class CurvedTransform(Transform, metaclass=abc.ABCMeta): """ Abstract base class for non-affine curved transforms """ input_dims = 2 output_dims = 2 is_separable = False def transform_path(self, path): """ Transform a Matplotlib Path Parameters ---------- path : :class:`~matplotlib.path.Path` The path to transform Returns ------- path : :class:`~matplotlib.path.Path` The resulting path """ return Path(self.transform(path.vertices), path.codes) transform_path_non_affine = transform_path def transform(self, input): raise NotImplementedError("") def inverted(self): raise NotImplementedError("") class CoordinateTransform(CurvedTransform): has_inverse = True def __init__(self, input_system, output_system): super().__init__() self._input_system_name = input_system self._output_system_name = output_system if isinstance(self._input_system_name, str): frame_cls = frame_transform_graph.lookup_name(self._input_system_name) if frame_cls is None: raise ValueError(f"Frame {self._input_system_name} not found") else: self.input_system = frame_cls() elif isinstance(self._input_system_name, BaseCoordinateFrame): self.input_system = self._input_system_name else: raise TypeError("input_system should be a WCS instance, string, or a coordinate frame instance") if isinstance(self._output_system_name, str): frame_cls = frame_transform_graph.lookup_name(self._output_system_name) if frame_cls is None: raise ValueError(f"Frame {self._output_system_name} not found") else: self.output_system = frame_cls() elif isinstance(self._output_system_name, BaseCoordinateFrame): self.output_system = self._output_system_name else: raise TypeError("output_system should be a WCS instance, string, or a coordinate frame instance") if self.output_system == self.input_system: self.same_frames = True else: self.same_frames = False @property def same_frames(self): return self._same_frames @same_frames.setter def same_frames(self, same_frames): self._same_frames = same_frames def transform(self, input_coords): """ Transform one set of coordinates to another """ if self.same_frames: return input_coords input_coords = input_coords*u.deg x_in, y_in = input_coords[:, 0], input_coords[:, 1] c_in = SkyCoord(UnitSphericalRepresentation(x_in, y_in), frame=self.input_system) # We often need to transform arrays that contain NaN values, and filtering # out the NaN values would have a performance hit, so instead we just pass # on all values and just ignore Numpy warnings with np.errstate(all='ignore'): c_out = c_in.transform_to(self.output_system) lon = c_out.spherical.lon.deg lat = c_out.spherical.lat.deg return np.concatenate((lon[:, np.newaxis], lat[:, np.newaxis]), axis=1) transform_non_affine = transform def inverted(self): """ Return the inverse of the transform """ return CoordinateTransform(self._output_system_name, self._input_system_name) class World2PixelTransform(CurvedTransform, metaclass=abc.ABCMeta): """ Base transformation from world to pixel coordinates """ has_inverse = True frame_in = None @property @abc.abstractmethod def input_dims(self): """ The number of input world dimensions """ @abc.abstractmethod def transform(self, world): """ Transform world to pixel coordinates. You should pass in a NxM array where N is the number of points to transform, and M is the number of dimensions. This then returns the (x, y) pixel coordinates as a Nx2 array. """ @abc.abstractmethod def inverted(self): """ Return the inverse of the transform """ class Pixel2WorldTransform(CurvedTransform, metaclass=abc.ABCMeta): """ Base transformation from pixel to world coordinates """ has_inverse = True frame_out = None @property @abc.abstractmethod def output_dims(self): """ The number of output world dimensions """ @abc.abstractmethod def transform(self, pixel): """ Transform pixel to world coordinates. You should pass in a Nx2 array of (x, y) pixel coordinates to transform to world coordinates. This will then return an NxM array where M is the number of dimensions. """ @abc.abstractmethod def inverted(self): """ Return the inverse of the transform """
bsd-3-clause
theodoregoetz/clas12-dc-wiremap
clas12_wiremap/ui/wire_map.py
1
11849
import numpy as np from matplotlib import pyplot, cm, colors, colorbar from clas12_wiremap.cached_property import cached_property from clas12_wiremap.ui import QtCore, QtGui, FigureCanvas, Figure, \ NavigationToolbar class DCWireStack(QtGui.QStackedWidget): def __init__(self, parent=None): super(DCWireStack,self).__init__(parent) self.components = None self.wiremap = DCWirePlot(self) self.addWidget(self.wiremap) self.sec_wiremaps = [] for sec in range(6): self.sec_wiremaps.append(DCWireSectorPlot(sec,self)) self.addWidget(self.sec_wiremaps[sec]) def setCurrentIndex(self,*args,**kwargs): super(DCWireStack,self).setCurrentIndex(*args,**kwargs) self.update_active_plot() @property def data(self): return self.wiremap.data @data.setter def data(self,d): self.wiremap.data = d for sec in range(6): self.sec_wiremaps[sec].data = d[sec] self.update_active_plot() @property def mask(self): return self.wiremap.mask @mask.setter def mask(self,m): self.wiremap.mask = m for sec in range(6): self.sec_wiremaps[sec].mask = m[sec] self.update_active_plot() def update_active_plot(self): if super(DCWireStack,self).currentIndex() == 0: self.wiremap.update() self.wiremap.canvas.setFocus() else: sec = super(DCWireStack,self).currentIndex() - 1 self.sec_wiremaps[sec].update() self.sec_wiremaps[sec].canvas.setFocus() class DCWirePlot(QtGui.QWidget): def __init__(self, parent=None): super(DCWirePlot,self).__init__(parent) self.parent = parent self.fig = Figure((5.0, 4.0), dpi=100) self.canvas = FigureCanvas(self.fig) self.canvas.setParent(self) self.canvas.setFocusPolicy(QtCore.Qt.ClickFocus) self.canvas.setFocus() self.toolbar = NavigationToolbar(self.canvas, self.parent) self.vbox = QtGui.QVBoxLayout(self) self.vbox.addWidget(self.canvas) self.vbox.addWidget(self.toolbar) self.setup_axes() self.setup_textbox() self.canvas.mpl_connect('motion_notify_event', self.mouse_move) def _transform_data(self,data): a = data.copy().reshape(6,6*6,112) a[3:,:,...] = a[3:,::-1,...] a.shape = (2,3,6,6,112) a = np.rollaxis(a,2,1) a = np.rollaxis(a,3,2) a = a.reshape(2*6*6,3*112) a = np.roll(a,6*6,axis=0) return a def clear(self): try: del self.masked_data except AttributeError: pass @property def data(self): try: return self._data except AttributeError: self._data = np.zeros((2*6*6,3*112)) return self._data @data.setter def data(self,data): self.clear() self._data = self._transform_data(data) self.update() @property def mask(self): try: return self._mask except AttributeError: self._mask = np.ones((2*6*6,3*112),dtype=np.bool) return self._mask @mask.setter def mask(self,mask): self.clear() self._mask = self._transform_data(mask) self.update() @cached_property def masked_data(self): return np.ma.array(self.data, mask=~self.mask) def update(self): self.im.set_data(self.masked_data) self.im.set_clim(vmin=np.nanmin(self.masked_data), vmax=np.nanmax(self.masked_data)) self.canvas.draw() def setup_axes(self): self.ax = self.fig.add_subplot(1,1,1) self.im = self.ax.imshow(np.zeros((2*6*6,3*112)), extent=[0,112*3,-6*6,6*6], vmin=0, vmax=1, aspect='auto', origin='lower', interpolation='nearest') self.ax.grid(True) _=self.ax.xaxis.set_ticks([0,112,112*2,112*3]) _=self.ax.xaxis.set_ticklabels([1,112,112,112]) yticks = np.linspace(-36,36,2*6+1,dtype=int) ylabels = abs(yticks) ylabels[len(ylabels)//2] = 1 _=self.ax.yaxis.set_ticks(list(yticks)) _=self.ax.yaxis.set_ticklabels([str(x) for x in ylabels]) for sec in range(6): _ = self.ax.text(0.34*(sec%3) + 0.1, 1.02 if sec<3 else -0.06, 'Sector {}'.format(sec+1), transform=self.ax.transAxes) self.cb = self.ax.figure.colorbar(self.im, ax=self.ax) def setup_textbox(self): # text location in fig coords self.txt = self.fig.text( 0.98, 0.98, '', ha = 'right', va = 'top', bbox = dict(alpha=0.6, color='white'), transform=self.fig.transFigure, family='monospace', zorder=100) self.msg = '''\ Sec: {sec: >1}, Slyr: {slyr: >1}, Lyr: {lyr: >1}, Wire: {wire: >3} Crate: {crate: >1}, Slot: {slot: >2}, Subslot: {subslot: >1}, Channel: {ch: >2} Distr Board: {dboard: <8}, Quad: {quad: >1}, Doublet: {doublet: >1} Trans Board: {tboard: >1}, Trans Board Half: {tboard_half: >1}''' def mouse_move(self, event): if not event.inaxes: return if self.parent.components is None: return x, y = int(event.xdata),abs(int(event.ydata)) if (x < 0) or (112*3 <= x) or (y < 0) or (6*6 <= y): return comp = self.parent.components wire = x%112 lyr = y%6 slyr = y//6 sec = (x//112) + (3 if event.ydata<0 else 0) point = (sec,slyr,lyr,wire) msgopts = dict( sec=sec+1,slyr=slyr+1,lyr=lyr+1,wire=wire+1, crate = comp.crate_id[point]+1, slot = comp.slot_id[point]+1, subslot = comp.subslot_id[point]+1, ch = comp.subslot_channel_id[point]+1, dboard = comp.distr_box_type[point], quad = comp.quad_id[point]+1, doublet = comp.doublet_id[point]+1, tboard = comp.trans_board_id[point]+1, tboard_half = comp.trans_board_slot_id[point]+1, ) self.txt.set_text(self.msg.format(**msgopts)) self.canvas.draw() class DCWireSectorPlot(QtGui.QWidget): def __init__(self,sec,parent=None): super(DCWireSectorPlot,self).__init__(parent) self.parent = parent self.sec = sec self.fig = Figure((5.0, 4.0), dpi=100) self.canvas = FigureCanvas(self.fig) self.canvas.setParent(self) self.canvas.setFocusPolicy(QtCore.Qt.ClickFocus) self.canvas.setFocus() self.toolbar = NavigationToolbar(self.canvas, self.parent) self.vbox = QtGui.QVBoxLayout(self) self.vbox.addWidget(self.canvas) self.vbox.addWidget(self.toolbar) self.setup_axes() self.setup_textbox() self.canvas.mpl_connect('motion_notify_event', self.mouse_move) def _transform_data(self,data): return data.reshape(6*6,112) def clear(self): try: del self.masked_data except AttributeError: pass @property def data(self): try: return self._data except AttributeError: self._data = np.zeros((6*6,112)) return self._data @data.setter def data(self,data): self.clear() self._data = self._transform_data(data) self.update() @property def mask(self): try: return self._mask except AttributeError: self._mask = np.ones((6*6,112),dtype=np.bool) return self._mask @mask.setter def mask(self,mask): self.clear() self._mask = self._transform_data(mask) self.update() @cached_property def masked_data(self): return np.ma.array(self.data, mask=~self.mask) def update(self): self.im.set_data(self.masked_data) self.im.set_clim(vmin=np.nanmin(self.masked_data), vmax=np.nanmax(self.masked_data)) self.canvas.draw() def setup_axes(self): self.ax = self.fig.add_subplot(1,1,1) self.im = self.ax.imshow(np.zeros((6*6,112)), extent=[0,112,0,6*6], vmin=0, vmax=1, aspect='auto', origin='lower', interpolation='nearest') self.ax.grid(True) xticks = list(np.linspace(0,112,112//16+1,dtype=int)) xlabels = [str(x) for x in xticks] xlabels[0] = '1' yticks = list(np.linspace(0,36,36//6+1,dtype=int)) ylabels = [str(x) for x in yticks] ylabels[0] = '1' self.ax.xaxis.set_ticks(xticks) self.ax.xaxis.set_ticklabels(xlabels) self.ax.yaxis.set_ticks(yticks) self.ax.yaxis.set_ticklabels(ylabels) self.cb = self.ax.figure.colorbar(self.im, ax=self.ax) def setup_textbox(self): # text location in fig coords self.txt = self.fig.text( 0.98, 0.98, '', ha = 'right', va = 'top', bbox = dict(alpha=0.6, color='white'), transform=self.fig.transFigure, family='monospace', zorder=100) self.msg = '''\ Sec: {sec: >1}, Slyr: {slyr: >1}, Lyr: {lyr: >1}, Wire: {wire: >3} Crate: {crate: >1}, Slot: {slot: >2}, Subslot: {subslot: >1}, Channel: {ch: >2} Distr Board: {dboard: <8}, Quad: {quad: >1}, Doublet: {doublet: >1} Trans Board: {tboard: >1}, Trans Board Half: {tboard_half: >1}''' def mouse_move(self, event): if not event.inaxes: return if self.parent.components is None: return x, y = int(event.xdata),abs(int(event.ydata)) if (x < 0) or (112 <= x) or (y < 0) or (6*6 <= y): return comp = self.parent.components wire = x%112 lyr = y%6 slyr = y//6 sec = self.sec point = (sec,slyr,lyr,wire) msgopts = dict( sec=sec+1,slyr=slyr+1,lyr=lyr+1,wire=wire+1, crate = comp.crate_id[point]+1, slot = comp.slot_id[point]+1, subslot = comp.subslot_id[point]+1, ch = comp.subslot_channel_id[point]+1, dboard = comp.distr_box_type[point], quad = comp.quad_id[point]+1, doublet = comp.doublet_id[point]+1, tboard = comp.trans_board_id[point]+1, tboard_half = comp.trans_board_slot_id[point]+1, ) self.txt.set_text(self.msg.format(**msgopts)) self.canvas.draw() if __name__ == '__main__': ''' to run this, issue the following command: python3 -m clas12monitor.dc.plots ''' import sys from numpy import random as rand from clas12monitor.dc import dc_wire_occupancy, DCComponents class MainWindow(QtGui.QMainWindow): def __init__(self): super(MainWindow, self).__init__() wid = QtGui.QWidget() vbox = QtGui.QVBoxLayout() wid.setLayout(vbox) cbox = QtGui.QSpinBox() cbox.setMinimum(0) cbox.setMaximum(6) cbox.setSpecialValueText('-') stack = DCWireStack() stack.data = dc_wire_occupancy('exim1690.0001.recon') stack.components = DCComponents() stack.components.run = 1 stack.components.fetch_data() vbox.addWidget(cbox) vbox.addWidget(stack) self.setCentralWidget(wid) cbox.valueChanged.connect(stack.setCurrentIndex) self.show() app = QtGui.QApplication(sys.argv) main_window = MainWindow() sys.exit(app.exec_())
gpl-3.0
cbertinato/pandas
pandas/tests/generic/test_series.py
1
8580
from distutils.version import LooseVersion from operator import methodcaller import numpy as np import pytest import pandas.util._test_decorators as td import pandas as pd from pandas import MultiIndex, Series, date_range import pandas.util.testing as tm from pandas.util.testing import assert_almost_equal, assert_series_equal from .test_generic import Generic try: import xarray _XARRAY_INSTALLED = True except ImportError: _XARRAY_INSTALLED = False class TestSeries(Generic): _typ = Series _comparator = lambda self, x, y: assert_series_equal(x, y) def setup_method(self): self.ts = tm.makeTimeSeries() # Was at top level in test_series self.ts.name = 'ts' self.series = tm.makeStringSeries() self.series.name = 'series' def test_rename_mi(self): s = Series([11, 21, 31], index=MultiIndex.from_tuples( [("A", x) for x in ["a", "B", "c"]])) s.rename(str.lower) def test_set_axis_name(self): s = Series([1, 2, 3], index=['a', 'b', 'c']) funcs = ['rename_axis', '_set_axis_name'] name = 'foo' for func in funcs: result = methodcaller(func, name)(s) assert s.index.name is None assert result.index.name == name def test_set_axis_name_mi(self): s = Series([11, 21, 31], index=MultiIndex.from_tuples( [("A", x) for x in ["a", "B", "c"]], names=['l1', 'l2']) ) funcs = ['rename_axis', '_set_axis_name'] for func in funcs: result = methodcaller(func, ['L1', 'L2'])(s) assert s.index.name is None assert s.index.names == ['l1', 'l2'] assert result.index.name is None assert result.index.names, ['L1', 'L2'] def test_set_axis_name_raises(self): s = pd.Series([1]) with pytest.raises(ValueError): s._set_axis_name(name='a', axis=1) def test_get_numeric_data_preserve_dtype(self): # get the numeric data o = Series([1, 2, 3]) result = o._get_numeric_data() self._compare(result, o) o = Series([1, '2', 3.]) result = o._get_numeric_data() expected = Series([], dtype=object, index=pd.Index([], dtype=object)) self._compare(result, expected) o = Series([True, False, True]) result = o._get_numeric_data() self._compare(result, o) o = Series([True, False, True]) result = o._get_bool_data() self._compare(result, o) o = Series(date_range('20130101', periods=3)) result = o._get_numeric_data() expected = Series([], dtype='M8[ns]', index=pd.Index([], dtype=object)) self._compare(result, expected) def test_nonzero_single_element(self): # allow single item via bool method s = Series([True]) assert s.bool() s = Series([False]) assert not s.bool() msg = "The truth value of a Series is ambiguous" # single item nan to raise for s in [Series([np.nan]), Series([pd.NaT]), Series([True]), Series([False])]: with pytest.raises(ValueError, match=msg): bool(s) msg = "bool cannot act on a non-boolean single element Series" for s in [Series([np.nan]), Series([pd.NaT])]: with pytest.raises(ValueError, match=msg): s.bool() # multiple bool are still an error msg = "The truth value of a Series is ambiguous" for s in [Series([True, True]), Series([False, False])]: with pytest.raises(ValueError, match=msg): bool(s) with pytest.raises(ValueError, match=msg): s.bool() # single non-bool are an error for s in [Series([1]), Series([0]), Series(['a']), Series([0.0])]: msg = "The truth value of a Series is ambiguous" with pytest.raises(ValueError, match=msg): bool(s) msg = "bool cannot act on a non-boolean single element Series" with pytest.raises(ValueError, match=msg): s.bool() def test_metadata_propagation_indiv(self): # check that the metadata matches up on the resulting ops o = Series(range(3), range(3)) o.name = 'foo' o2 = Series(range(3), range(3)) o2.name = 'bar' result = o.T self.check_metadata(o, result) # resample ts = Series(np.random.rand(1000), index=date_range('20130101', periods=1000, freq='s'), name='foo') result = ts.resample('1T').mean() self.check_metadata(ts, result) result = ts.resample('1T').min() self.check_metadata(ts, result) result = ts.resample('1T').apply(lambda x: x.sum()) self.check_metadata(ts, result) _metadata = Series._metadata _finalize = Series.__finalize__ Series._metadata = ['name', 'filename'] o.filename = 'foo' o2.filename = 'bar' def finalize(self, other, method=None, **kwargs): for name in self._metadata: if method == 'concat' and name == 'filename': value = '+'.join([getattr( o, name) for o in other.objs if getattr(o, name, None) ]) object.__setattr__(self, name, value) else: object.__setattr__(self, name, getattr(other, name, None)) return self Series.__finalize__ = finalize result = pd.concat([o, o2]) assert result.filename == 'foo+bar' assert result.name is None # reset Series._metadata = _metadata Series.__finalize__ = _finalize @pytest.mark.skipif(not _XARRAY_INSTALLED or _XARRAY_INSTALLED and LooseVersion(xarray.__version__) < LooseVersion('0.10.0'), reason='xarray >= 0.10.0 required') @pytest.mark.parametrize( "index", ['FloatIndex', 'IntIndex', 'StringIndex', 'UnicodeIndex', 'DateIndex', 'PeriodIndex', 'TimedeltaIndex', 'CategoricalIndex']) def test_to_xarray_index_types(self, index): from xarray import DataArray index = getattr(tm, 'make{}'.format(index)) s = Series(range(6), index=index(6)) s.index.name = 'foo' result = s.to_xarray() repr(result) assert len(result) == 6 assert len(result.coords) == 1 assert_almost_equal(list(result.coords.keys()), ['foo']) assert isinstance(result, DataArray) # idempotency assert_series_equal(result.to_series(), s, check_index_type=False, check_categorical=True) @td.skip_if_no('xarray', min_version='0.7.0') def test_to_xarray(self): from xarray import DataArray s = Series([]) s.index.name = 'foo' result = s.to_xarray() assert len(result) == 0 assert len(result.coords) == 1 assert_almost_equal(list(result.coords.keys()), ['foo']) assert isinstance(result, DataArray) s = Series(range(6)) s.index.name = 'foo' s.index = pd.MultiIndex.from_product([['a', 'b'], range(3)], names=['one', 'two']) result = s.to_xarray() assert len(result) == 2 assert_almost_equal(list(result.coords.keys()), ['one', 'two']) assert isinstance(result, DataArray) assert_series_equal(result.to_series(), s) def test_valid_deprecated(self): # GH18800 with tm.assert_produces_warning(FutureWarning): pd.Series([]).valid() @pytest.mark.parametrize("s", [ Series([np.arange(5)]), pd.date_range('1/1/2011', periods=24, freq='H'), pd.Series(range(5), index=pd.date_range("2017", periods=5)) ]) @pytest.mark.parametrize("shift_size", [0, 1, 2]) def test_shift_always_copy(self, s, shift_size): # GH22397 assert s.shift(shift_size) is not s @pytest.mark.parametrize("move_by_freq", [ pd.Timedelta('1D'), pd.Timedelta('1M'), ]) def test_datetime_shift_always_copy(self, move_by_freq): # GH22397 s = pd.Series(range(5), index=pd.date_range("2017", periods=5)) assert s.shift(freq=move_by_freq) is not s
bsd-3-clause
sgrieve/iverson_2000
Parse_MIDAS_daily.py
1
2766
# -*- coding: utf-8 -*- """ This script parses data derived from MIDAS weather station data Created on Tue Nov 22 16:38:15 2016 HOW TO GET DATA Search for station here http://badc.nerc.ac.uk/search/midas_stations/ Best way is to use postcode Get the station ID Go to the CEDA page http://wps-web1.ceda.ac.uk/ui/home Got to web processes select Extract weather station data Choose decadal output choose daily rainfall input station data wait @author: smudd """ import pandas as pd def load_MIDAS_data(): #fname = "station_data-196101010000-196112312359.csv" #fname = "station_data-201001010000-201611161701.csv" fname = "new_small.csv" dateparse = lambda x: pd.datetime.strptime(x, '%Y-%m-%d %H:%M') # Read in the data MIDAS_df = pd.read_csv(fname,parse_dates=True,date_parser=dateparse) # get rid of stupid whitespace in the header MIDAS_df.columns = MIDAS_df.columns.str.strip() # Make sure that whitespace has been stripped a = list(MIDAS_df.columns.values) print "Headers are:" print a # Get rid of some useless columns MIDAS_df.drop('id', axis=1, inplace=True) MIDAS_df.drop('id_type', axis=1, inplace=True) MIDAS_df.drop('met_domain_name', axis=1, inplace=True) MIDAS_df.drop('ob_end_ctime', axis=1, inplace=True) MIDAS_df.drop('version_num', axis=1, inplace=True) MIDAS_df.drop('ob_day_cnt_q', axis=1, inplace=True) MIDAS_df.drop('meto_stmp_time', axis=1, inplace=True) MIDAS_df.drop('midas_stmp_etime', axis=1, inplace=True) MIDAS_df.drop('prcp_amt_j', axis=1, inplace=True) # Parse dates MIDAS_df['ob_date'] = pd.to_datetime(MIDAS_df['ob_date'], format='%Y-%m-%d %H:%M') # get rid of exactly duplicate lines MIDAS_df = MIDAS_df.drop_duplicates() # Make sure we are only dealing with 1 day records (not monthly totals) MIDAS_df = MIDAS_df[MIDAS_df.ob_day_cnt == 1] #print yo['ob_date'] # Make a timestamp for the year 1900 yr_1900 = pd.Timestamp('1900-01-01') # Now we are going to have to group by station # print to file, organised by station. for station, df_station in MIDAS_df.groupby('src_id'): fname = "MidasNEW_"+str(station)+".csv" df_station = df_station.drop_duplicates(['ob_date'], keep="last") new_MIDAS = df_station.copy() # get a column that has the days since 1900 new_MIDAS['days_since_1900'] = (new_MIDAS['ob_date'] - yr_1900).dt.days new_MIDAS.to_csv(fname) if __name__ == "__main__": #compare_linear_to_loop() #test_FoS() load_MIDAS_data()
mit
mpyeager/MLPyDemo
stock_price_prediction_demo.py
1
1206
import csv import numpy as np from sklearn.svm import SVR import matplotlib.pyplot as plt dates = [] prices = [] def get_data(filename): with open(filename, 'r') as csvfile: csvFileReader = csv.reader(csvfile) next(csvFileReader) for row in csvFileReader: dates.append(int(row[0].split('-')[0])) prices.append(float(row[1])) return def predict_prices(dates, prices, x): dates = np.reshape(dates,(len(dates), 1)) svr_lin = SVR(kernel= 'linear', C=1e3) svr_poly = SVR(kernel= 'poly', C=1e3, degree=2) svr_rbf = SVR(kernel= 'rbf', C=1e3, gamma=0.1) svr_lin.fit(dates, prices) svr_poly.fit(dates, prices) svr_rbf.fit(dates, prices) plt.scatter(dates, prices, color='black', label='Data') plt.plot(dates, svr_rbf.predict(dates), color='red', label='RBF Model') plt.plot(dates, svr_lin.predict(dates), color='green', label='Linear Model') plt.plot(dates, svr_poly.predict(dates), color='blue', label='Polynomial Model') plt.xlabel('Date') plt.ylabel('Price') plt.title('Support Vector Regression') plt.legend() plt.show() return svr_rbf.predict(x)[0], svr_lin.predict(x)[0], svr_poly.predict(x)[0] get_data('aapl.csv') predicted_price = predict_prices(dates, prices, 29)
mit
sauloal/cnidaria
scripts/venv/lib/python2.7/site-packages/matplotlib/tri/triplot.py
21
3124
from __future__ import (absolute_import, division, print_function, unicode_literals) import six import numpy as np from matplotlib.tri.triangulation import Triangulation def triplot(ax, *args, **kwargs): """ Draw a unstructured triangular grid as lines and/or markers. The triangulation to plot can be specified in one of two ways; either:: triplot(triangulation, ...) where triangulation is a :class:`matplotlib.tri.Triangulation` object, or :: triplot(x, y, ...) triplot(x, y, triangles, ...) triplot(x, y, triangles=triangles, ...) triplot(x, y, mask=mask, ...) triplot(x, y, triangles, mask=mask, ...) in which case a Triangulation object will be created. See :class:`~matplotlib.tri.Triangulation` for a explanation of these possibilities. The remaining args and kwargs are the same as for :meth:`~matplotlib.axes.Axes.plot`. Return a list of 2 :class:`~matplotlib.lines.Line2D` containing respectively: - the lines plotted for triangles edges - the markers plotted for triangles nodes **Example:** .. plot:: mpl_examples/pylab_examples/triplot_demo.py """ import matplotlib.axes tri, args, kwargs = Triangulation.get_from_args_and_kwargs(*args, **kwargs) x, y, edges = (tri.x, tri.y, tri.edges) # Decode plot format string, e.g., 'ro-' fmt = "" if len(args) > 0: fmt = args[0] linestyle, marker, color = matplotlib.axes._base._process_plot_format(fmt) # Insert plot format string into a copy of kwargs (kwargs values prevail). kw = kwargs.copy() for key, val in zip(('linestyle', 'marker', 'color'), (linestyle, marker, color)): if val is not None: kw[key] = kwargs.get(key, val) # Draw lines without markers. # Note 1: If we drew markers here, most markers would be drawn more than # once as they belong to several edges. # Note 2: We insert nan values in the flattened edges arrays rather than # plotting directly (triang.x[edges].T, triang.y[edges].T) # as it considerably speeds-up code execution. linestyle = kw['linestyle'] kw_lines = kw.copy() kw_lines['marker'] = 'None' # No marker to draw. kw_lines['zorder'] = kw.get('zorder', 1) # Path default zorder is used. if (linestyle is not None) and (linestyle not in ['None', '', ' ']): tri_lines_x = np.insert(x[edges], 2, np.nan, axis=1) tri_lines_y = np.insert(y[edges], 2, np.nan, axis=1) tri_lines = ax.plot(tri_lines_x.ravel(), tri_lines_y.ravel(), **kw_lines) else: tri_lines = ax.plot([], [], **kw_lines) # Draw markers separately. marker = kw['marker'] kw_markers = kw.copy() kw_markers['linestyle'] = 'None' # No line to draw. if (marker is not None) and (marker not in ['None', '', ' ']): tri_markers = ax.plot(x, y, **kw_markers) else: tri_markers = ax.plot([], [], **kw_markers) return tri_lines + tri_markers
mit
mhogg/BMDanalyse
BMDanalyse/ViewBoxCustom.py
1
20762
# -*- coding: utf-8 -*- # Copyright (C) 2016 Michael Hogg # This file is part of BMDanalyse - See LICENSE.txt for information on usage and redistribution import pyqtgraph as pg import numpy as np import matplotlib import pickle import pyqtgraph.functions as fn import types from pyqtgraph.Qt import QtCore, QtGui from ROI import RectROIcustom, PolyLineROIcustom from customItems import QMenuCustom, ImageExporterCustom from functools import partial __all__=['ImageAnalysisViewBox','ViewMode','MultiRoiViewBox'] class ImageAnalysisViewBox(pg.ViewBox): ''' Custom ViewBox used to over-ride the context menu. I don't want the full context menu, just a view all and an export. Export does not call a dialog, just prompts user for filename. ''' def __init__(self,parent=None,border=None,lockAspect=False,enableMouse=True,invertY=False,enableMenu=True,name=None): pg.ViewBox.__init__(self,parent,border,lockAspect,enableMouse,invertY,enableMenu,name) self.menu = None # Override pyqtgraph ViewBoxMenu self.menu = self.getMenu(None) def raiseContextMenu(self, ev): if not self.menuEnabled(): return menu = self.getMenu(ev) pos = ev.screenPos() menu.popup(QtCore.QPoint(pos.x(), pos.y())) def export(self): self.exp = ImageExporterCustom(self) self.exp.export() def getMenu(self,event): if self.menu is None: self.menu = QMenuCustom() self.viewAll = QtGui.QAction("View All", self.menu) self.exportImage = QtGui.QAction("Export image", self.menu) self.viewAll.triggered[()].connect(self.autoRange) self.exportImage.triggered[()].connect(self.export) self.menu.addAction(self.viewAll) self.menu.addAction(self.exportImage) return self.menu class ViewMode(): ''' Helper class for different colour displays of images in MultiRoiViewBox class ''' def __init__(self,id,cmap): self.id = id self.cmap = cmap self.getLookupTable() def getLookupTable(self): lut = [ [ int(255*val) for val in self.cmap(i)[:3] ] for i in xrange(256) ] lut = np.array(lut,dtype=np.ubyte) self.lut = lut class MultiRoiViewBox(pg.ViewBox): ''' Custom Viewbox for multiple ROIs ''' def __init__(self,parent=None,border=None,lockAspect=False,enableMouse=True,invertY=False,enableMenu=True,name=None): pg.ViewBox.__init__(self,parent,border,lockAspect,enableMouse,invertY,enableMenu,name) # Set default values self.rois = [] self.currentROIindex = None self.img = None self.NORMAL = ViewMode(0,matplotlib.cm.gray) self.DEXA = ViewMode(1,matplotlib.cm.jet) self.viewMode = self.NORMAL self.drawROImode = False self.drawingROI = None self.menu = None def getContextMenus(self,ev): return None def raiseContextMenu(self, ev): ''' Display context menu at location of right mouse click ''' if not self.menuEnabled(): return menu = self.getMenu(ev) pos = ev.screenPos() menu.popup(QtCore.QPoint(pos.x(), pos.y())) def export(self): ''' Export viewbox image ''' self.exp = ImageExporterCustom(self) self.exp.export() def raiseRoiSelectMenuLeft(self,ev,roiList): ''' Raise roi menu on left mouse click ''' self.roimenu = QtGui.QMenu() for roi in roiList: action = QtGui.QAction(roi.name, self.roimenu) action.triggered[()].connect(lambda arg=roi: self.selectROI(arg)) self.roimenu.addAction(action) pos = ev.screenPos() self.roimenu.popup(QtCore.QPoint(pos.x(), pos.y())) def raiseRoiSelectMenuRight(self,ev,roiList): ''' Raise roi menu on right mouse click Must use functools.partial (not lamda) here to get signals to work here ''' self.roimenu = QtGui.QMenu() for roi in roiList: action = QtGui.QAction(roi.name, self.roimenu) action.triggered[()].connect(partial(roi.raiseContextMenu, ev)) self.roimenu.addAction(action) pos = ev.screenPos() self.roimenu.popup(QtCore.QPoint(pos.x(), pos.y())) def mouseClickEvent(self, ev): ''' Mouse click event handler ''' # Check if click is over any rois roisUnderMouse = [] pos = ev.scenePos() for roi in self.rois: if roi.isUnderMouse(pos): roisUnderMouse.append(roi) numRois = len(roisUnderMouse) # Drawing mode (all buttons) if self.drawROImode: ev.accept() self.drawPolygonRoi(ev) # Click not over any rois elif numRois==0: # Context menu (right mouse button) if ev.button() == QtCore.Qt.RightButton and self.menuEnabled(): self.raiseContextMenu(ev) # Click over any rois else: if ev.button() == QtCore.Qt.LeftButton: if numRois==1: self.selectROI(roisUnderMouse[0]) elif numRois>1: self.raiseRoiSelectMenuLeft(ev,roisUnderMouse) elif ev.button() == QtCore.Qt.RightButton: if numRois==1: roisUnderMouse[0].raiseContextMenu(ev) elif numRois>1: self.raiseRoiSelectMenuRight(ev,roisUnderMouse) def addPolyRoiRequest(self): ''' Function to add a Polygon ROI ''' self.drawROImode = True def endPolyRoiRequest(self): ''' Called at the completion of drawing Polygon ROI ''' self.drawROImode = False # Deactivate drawing mode self.drawingROI = None # No roi being drawn, so set to None def addPolyLineROI(self,handlePositions): ''' Add Polygon ROI - Used for copy and load operations ''' roi = PolyLineROIcustom(handlePositions=handlePositions,removable=True) roi.setName('ROI-%i'% self.getROIid()) self.addItem(roi) # Add roi to viewbox self.rois.append(roi) # Add to list of rois self.selectROI(roi) self.sortROIs() self.setCurrentROIindex(roi) roi.translatable = True for seg in roi.segments: seg.setSelectable(True) for h in roi.handles: h['item'].setSelectable(True) # Setup signals roi.sigRemoveRequested.connect(self.removeROI) roi.sigCopyRequested.connect(self.copyROI) roi.sigSaveRequested.connect(self.saveROI) def drawPolygonRoi(self,ev): ''' Function to draw a Polygon ROI - Called directly by MouseClickEvent ''' roi = self.drawingROI pos = self.mapSceneToView(ev.scenePos()) # TO DRAW ROI if ev.button() == QtCore.Qt.LeftButton: if roi is None: # To start drawing a new roi roi = PolyLineROIcustom(removable = False) # Create new roi roi.setName('ROI-%i'% self.getROIid()) # Set name. Do this before self.selectROIs(roi) self.drawingROI = roi self.addItem(roi) # Add roi to viewbox self.rois.append(roi) # Add to list of rois self.selectROI(roi) # Make selected self.sortROIs() # Sort list of rois self.setCurrentROIindex(roi) # Make current roi.translatable = False # Deactivate translation during drawing roi.addFreeHandle(pos) # Add two handles on first click (1 fixed, 1 draggable) roi.addFreeHandle(pos) h = roi.handles[-1]['item'] # Get draggable handle h.scene().sigMouseMoved.connect(h.movePoint) # Connect signal to move handle with mouse else: # To continue drawing an existing roi h = roi.handles[-1]['item'] # Get last handle h.scene().sigMouseMoved.disconnect() # Make last handle non-draggable roi.addFreeHandle(pos) # Add new handle h = roi.handles[-1]['item'] # Get new handle h.scene().sigMouseMoved.connect(h.movePoint) # Make new handle draggable # Add a segment between the handles roi.addSegment(roi.handles[-2]['item'],roi.handles[-1]['item']) # Set segment and handles to non-selectable seg = roi.segments[-1] seg.setSelectable(False) for h in seg.handles: h['item'].setSelectable(False) # TO STOP DRAWING ROI elif (ev.button() == QtCore.Qt.MiddleButton) or \ (ev.button() == QtCore.Qt.RightButton and (roi==None or len(roi.segments)<3)): if roi!=None: # Remove handle and disconnect from scene h = roi.handles[-1]['item'] h.scene().sigMouseMoved.disconnect() roi.removeHandle(h) # Removed roi from viewbox self.removeItem(roi) self.rois.pop(self.currentROIindex) self.setCurrentROIindex(None) # Exit ROI drawing mode self.endPolyRoiRequest() # TO COMPLETE ROI elif ev.button() == QtCore.Qt.RightButton: # Remove last handle h = roi.handles[-1]['item'] h.scene().sigMouseMoved.disconnect() roi.removeHandle(h) # Add segment to close ROI roi.addSegment(roi.handles[-1]['item'],roi.handles[0]['item']) # Setup signals on completed roi roi.sigRemoveRequested.connect(self.removeROI) roi.sigCopyRequested.connect(self.copyROI) roi.sigSaveRequested.connect(self.saveROI) # Re-activate mouse clicks for all roi, segments and handles roi.removable = True roi.translatable = True for seg in roi.segments: seg.setSelectable(True) for h in roi.handles: h['item'].setSelectable(True) # Exit ROI drawing mode self.endPolyRoiRequest() def getMenu(self,ev): '''Create and return context menu ''' # Menu and submenus self.menu = QtGui.QMenu() self.submenu = QtGui.QMenu("Add ROI", self.menu) # Actions self.addROIRectAct = QtGui.QAction("Rectangular", self.submenu) self.addROIPolyAct = QtGui.QAction("Polygon", self.submenu) self.loadROIAct = QtGui.QAction("Load ROI", self.menu) self.dexaMode = QtGui.QAction("Toggle normal/DEXA view", self.menu) self.viewAll = QtGui.QAction("View All", self.menu) self.exportImage = QtGui.QAction("Export image", self.menu) # Signals self.loadROIAct.triggered[()].connect(self.loadROI) self.dexaMode.triggered.connect(self.toggleViewMode) self.viewAll.triggered[()].connect(self.autoRange) self.exportImage.triggered[()].connect(self.export) self.addROIRectAct.triggered[()].connect(lambda arg=ev: self.addRoiRequest(arg)) self.addROIPolyAct.triggered.connect(self.addPolyRoiRequest) # Add actions to menu and submenus self.submenu.addAction(self.addROIRectAct) self.submenu.addAction(self.addROIPolyAct) self.menu.addAction(self.viewAll) self.menu.addAction(self.dexaMode) self.menu.addAction(self.exportImage) self.menu.addSeparator() self.menu.addMenu(self.submenu) self.menu.addAction(self.loadROIAct) self.dexaMode.setCheckable(True) # Return menu return self.menu def setCurrentROIindex(self,roi=None): ''' Use this function to change currentROIindex value to ensure a signal is emitted ''' if roi==None: self.currentROIindex = None else: self.currentROIindex = self.rois.index(roi) def getCurrentROIindex(self): return self.currentROIindex def selectROI(self,roi): '''Selection control of ROIs''' # If no ROI is currently selected (currentROIindex is None), select roi if self.currentROIindex==None: roi.setSelected(True) self.setCurrentROIindex(roi) # If an ROI is already selected... else: roiSelected = self.rois[self.currentROIindex] roiSelected.setSelected(False) # If a different roi is already selected, then select roi if self.currentROIindex != self.rois.index(roi): self.setCurrentROIindex(roi) roi.setSelected(True) # If roi is already selected, then unselect else: self.setCurrentROIindex(None) def addRoiRequest(self,ev): ''' Function to addROI at an event screen position ''' # Get position pos = self.mapSceneToView(ev.scenePos()) xpos = pos.x() ypos = pos.y() # Shift down by size xr,yr = self.viewRange() xsize = 0.25*(xr[1]-xr[0]) ysize = 0.25*(yr[1]-yr[0]) xysize = min(xsize,ysize) if xysize==0: xysize=100 ypos -= xysize # Create ROI xypos = (xpos,ypos) self.addROI(pos=xypos) def addROI(self,pos=None,size=None,angle=0.0): ''' Add an ROI to the ViewBox ''' xr,yr = self.viewRange() if pos is None: posx = xr[0]+0.05*(xr[1]-xr[0]) posy = yr[0]+0.05*(yr[1]-yr[0]) pos = [posx,posy] if size is None: xsize = 0.25*(xr[1]-xr[0]) ysize = 0.25*(yr[1]-yr[0]) xysize = min(xsize,ysize) if xysize==0: xysize=100 size = [xysize,xysize] roi = RectROIcustom(pos,size,angle,removable=True,pen=(255,0,0)) # Setup signals roi.setName('ROI-%i'% self.getROIid()) roi.sigRemoveRequested.connect(self.removeROI) roi.sigCopyRequested.connect(self.copyROI) roi.sigSaveRequested.connect(self.saveROI) # Keep track of rois self.addItem(roi) self.rois.append(roi) self.selectROI(roi) self.sortROIs() self.setCurrentROIindex(roi) def sortROIs(self): ''' Sort self.rois by roi name and adjust self.currentROIindex as necessary ''' if len(self.rois)==0: return if self.currentROIindex==None: self.rois.sort() else: roiCurrent = self.rois[self.currentROIindex] self.rois.sort() self.currentROIindex = self.rois.index(roiCurrent) def getROIid(self): ''' Get available and unique number for ROI name ''' nums = [ int(roi.name.split('-')[-1]) for roi in self.rois if roi.name!=None ] nid = 1 if len(nums)>0: while(True): if nid not in nums: break nid+=1 return nid def copyROI(self): ''' Copy current ROI. Offset from original for visibility ''' osFract = 0.05 if self.currentROIindex!=None: roi = self.rois[self.currentROIindex] # For rectangular ROI, offset by a fraction of the rotated size if type(roi)==RectROIcustom: roiState = roi.getState() pos = roiState['pos'] size = roiState['size'] angle = roiState['angle'] dx,dy = np.array(size)*osFract ang = np.radians(angle) cosa = np.cos(ang) sina = np.sin(ang) dxt = dx*cosa - dy*sina dyt = dx*sina + dy*cosa offset = QtCore.QPointF(dxt,dyt) self.addROI(pos+offset,size,angle) # For a polyline ROI, offset by a fraction of the bounding rectangle if type(roi)==PolyLineROIcustom: br = roi.shape().boundingRect() size = np.array([br.width(),br.height()]) osx,osy = size * osFract offset = QtCore.QPointF(osx,osy) hps = [i[-1] for i in roi.getSceneHandlePositions(index=None)] hpsOffset = [self.mapSceneToView(hp)+offset for hp in hps] self.addPolyLineROI(hpsOffset) def saveROI(self): ''' Save the highlighted ROI to file ''' if self.currentROIindex!=None: roi = self.rois[self.currentROIindex] fileName = QtGui.QFileDialog.getSaveFileName(None,self.tr("Save ROI"),QtCore.QDir.currentPath(),self.tr("ROI (*.roi)")) # Fix for PyQt/PySide compatibility. PyQt returns a QString, whereas PySide returns a tuple (first entry is filename as string) if isinstance(fileName,types.TupleType): fileName = fileName[0] if hasattr(QtCore,'QString') and isinstance(fileName, QtCore.QString): fileName = str(fileName) if not fileName=='': if type(roi)==RectROIcustom: roiState = roi.saveState() roiState['type']='RectROIcustom' elif type(roi)==PolyLineROIcustom: roiState = {} hps = [self.mapSceneToView(i[-1]) for i in roi.getSceneHandlePositions(index=None)] hps = [[hp.x(),hp.y()] for hp in hps] roiState['type']='PolyLineROIcustom' roiState['handlePositions'] = hps pickle.dump( roiState, open( fileName, "wb" ) ) def loadROI(self): ''' Load a previously saved ROI from file ''' fileNames = QtGui.QFileDialog.getOpenFileNames(None,self.tr("Load ROI"),QtCore.QDir.currentPath(),self.tr("ROI (*.roi)")) # Fix for PyQt/PySide compatibility. PyQt returns a QString, whereas PySide returns a tuple (first entry is filename as string) if isinstance(fileNames,types.TupleType): fileNames = fileNames[0] if hasattr(QtCore,'QStringList') and isinstance(fileNames, QtCore.QStringList): fileNames = [str(i) for i in fileNames] if len(fileNames)>0: for fileName in fileNames: if fileName!='': roiState = pickle.load( open(fileName, "rb") ) if roiState['type']=='RectROIcustom': self.addROI(roiState['pos'],roiState['size'],roiState['angle']) elif roiState['type']=='PolyLineROIcustom': self.addPolyLineROI(roiState['handlePositions']) def removeROI(self): ''' Delete the highlighted ROI ''' if self.currentROIindex!=None: roi = self.rois[self.currentROIindex] self.rois.pop(self.currentROIindex) self.removeItem(roi) self.setCurrentROIindex(None) def toggleViewMode(self): ''' Toggles between NORMAL (Black/White) and DEXA mode (colour) ''' if self.viewMode == self.NORMAL: viewMode = self.DEXA else: viewMode = self.NORMAL self.setViewMode(viewMode) def setViewMode(self,viewMode): self.viewMode = viewMode self.updateView() def updateView(self): self.background.setBrush(fn.mkBrush(self.viewMode.lut[0])) self.background.show() if self.img is None: return else: self.img.setLookupTable(self.viewMode.lut) def showImage(self,arr): if arr is None: self.img = None return if self.img is None: self.img = pg.ImageItem(arr,autoRange=False,autoLevels=False) self.addItem(self.img) self.img.setImage(arr,autoLevels=False) self.updateView()
mit
donbright/piliko
experiment/pythsphere.py
1
2981
from fractions import Fraction ########## ########## # # rational parameterization of the unit sphere # ######### ######### # consider a basic unit sphere # # x^2 + y^2 + z^2 = radius = 1^2 # # x^2 + y^2 = l^2 # l^2 + z^2 = radius^2 # radius = 1, radius^2=1 # # example: begin with very simple pythagorean triple numbers:, 3,4,5 and 5,12,13 # now, assume x=3, y=4, l=5, z=12, radius=13. divide all by 13 # # x=3/13, y=4/13, l=5/13, z=12/13, radius=13/13=1 # # to verify: , 3^2=9 4^2=16, 5^2=25, 12^2=144, 13^2=169 # (3/13)^2+(4/13)^2+(12/13)^2 = (13/13)^2 [ qx+qy+qz=r=1 ] # (3/13)^2+(4/13)^2 = (5/13)^2 [ qx+qy = ql ] # (5/13)^2+(12/13)^2 = (13/13)^2 [ ql+qz = qr=1 ] # qn = quadrance(n) = n^2 # # therefore the given point, with x,y,z coordinates [ 3/13, 4/13, 12/13 ] is # a rational point on the unit sphere. # theory # # Use the rational paramterization of the unit circle to find l and z. # # l = m^2-n^2 / m^2+n^2 z = 2*m*n / m^2+n^2 # # now, note that x^2 + y^2 = l^2. # divide this equation by l^2, you get this: # (x/l)^2 + (y/l)^2 = 1 # # we can again use the rational paramterization of a unit circle but # our "x" will actually be x/l and "y" will be y/l. We are using different # m and n as well, call them "m1" and "n1" here. # # x/l = m1^2-n1^2 / m1^2+n1^2 y/l = 2*m1*n1 / m1^2+n1^2 # # Now. That is quite interesting. You can choose m1, n1 as integers and get # values for x/l and y/l. But what if you want just x or y by itself? # # Ahh, remember, we calculated l up above, based on two other integers, m and n # you can multiple the equations above by l to get your sol'n for x and y. # # x = l * ( m1^2-n1^2 / m1^2+n1^2 ) y = l * ( 2*m1*n1 / m1^2+n1^2 ) # # you can use Algebra to rearrange all this, but basically, in the end, # we have x, y, and z as functions of m, n, m1, and n1, four separate integers. # # # possible problem: i have no idea if this works. # # and others have probably found better. xs,ys,zs=[],[],[] def sqr(x): return x*x def blueq(x,y): return sqr(x)+sqr(y) def redq(x,y): return sqr(x)-sqr(y) def greenq(x,y): return 2*x*y depth=8 for m in range(0,depth): for n in range(0,depth): for m1 in range(0,depth): for n1 in range(0,depth): if blueq(m1,n1)==0: continue if blueq(m,n)==0: continue if blueq(m1,n1)==0: continue l = Fraction( redq(m,n) , blueq(m,n) ) z = Fraction( greenq(m,n) , blueq(m,n) ) x = l * Fraction( redq(m1,n1), blueq(m1,n1) ) y = l * Fraction( greenq(m1,n1), blueq(m1,n1) ) print x,y,z,' sq sum: ',x*x+y*y+z*z xs += [x] ys += [y] xs += [y] ys += [x] zs += [z] zs += [-z] print len(xs) import numpy as np import matplotlib.pylab as plt fig,ax = plt.subplots(figsize=(8,8)) ax.set_ylim([-1.2,1.2]) ax.set_xlim([-1.2,1.2]) for i in range(0,len(xs)): xs[i]=xs[i]+zs[i]/4 ys[i]=ys[i]+zs[i]/4 ax.scatter(xs,ys) plt.show()
bsd-3-clause
hainm/scikit-learn
sklearn/utils/tests/test_utils.py
215
8100
import warnings import numpy as np import scipy.sparse as sp from scipy.linalg import pinv2 from itertools import chain from sklearn.utils.testing import (assert_equal, assert_raises, assert_true, assert_almost_equal, assert_array_equal, SkipTest, assert_raises_regex) from sklearn.utils import check_random_state from sklearn.utils import deprecated from sklearn.utils import resample from sklearn.utils import safe_mask from sklearn.utils import column_or_1d from sklearn.utils import safe_indexing from sklearn.utils import shuffle from sklearn.utils import gen_even_slices from sklearn.utils.extmath import pinvh from sklearn.utils.mocking import MockDataFrame def test_make_rng(): # Check the check_random_state utility function behavior assert_true(check_random_state(None) is np.random.mtrand._rand) assert_true(check_random_state(np.random) is np.random.mtrand._rand) rng_42 = np.random.RandomState(42) assert_true(check_random_state(42).randint(100) == rng_42.randint(100)) rng_42 = np.random.RandomState(42) assert_true(check_random_state(rng_42) is rng_42) rng_42 = np.random.RandomState(42) assert_true(check_random_state(43).randint(100) != rng_42.randint(100)) assert_raises(ValueError, check_random_state, "some invalid seed") def test_resample_noarg(): # Border case not worth mentioning in doctests assert_true(resample() is None) def test_deprecated(): # Test whether the deprecated decorator issues appropriate warnings # Copied almost verbatim from http://docs.python.org/library/warnings.html # First a function... with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") @deprecated() def ham(): return "spam" spam = ham() assert_equal(spam, "spam") # function must remain usable assert_equal(len(w), 1) assert_true(issubclass(w[0].category, DeprecationWarning)) assert_true("deprecated" in str(w[0].message).lower()) # ... then a class. with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") @deprecated("don't use this") class Ham(object): SPAM = 1 ham = Ham() assert_true(hasattr(ham, "SPAM")) assert_equal(len(w), 1) assert_true(issubclass(w[0].category, DeprecationWarning)) assert_true("deprecated" in str(w[0].message).lower()) def test_resample_value_errors(): # Check that invalid arguments yield ValueError assert_raises(ValueError, resample, [0], [0, 1]) assert_raises(ValueError, resample, [0, 1], [0, 1], n_samples=3) assert_raises(ValueError, resample, [0, 1], [0, 1], meaning_of_life=42) def test_safe_mask(): random_state = check_random_state(0) X = random_state.rand(5, 4) X_csr = sp.csr_matrix(X) mask = [False, False, True, True, True] mask = safe_mask(X, mask) assert_equal(X[mask].shape[0], 3) mask = safe_mask(X_csr, mask) assert_equal(X_csr[mask].shape[0], 3) def test_pinvh_simple_real(): a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=np.float64) a = np.dot(a, a.T) a_pinv = pinvh(a) assert_almost_equal(np.dot(a, a_pinv), np.eye(3)) def test_pinvh_nonpositive(): a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float64) a = np.dot(a, a.T) u, s, vt = np.linalg.svd(a) s[0] *= -1 a = np.dot(u * s, vt) # a is now symmetric non-positive and singular a_pinv = pinv2(a) a_pinvh = pinvh(a) assert_almost_equal(a_pinv, a_pinvh) def test_pinvh_simple_complex(): a = (np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10]]) + 1j * np.array([[10, 8, 7], [6, 5, 4], [3, 2, 1]])) a = np.dot(a, a.conj().T) a_pinv = pinvh(a) assert_almost_equal(np.dot(a, a_pinv), np.eye(3)) def test_column_or_1d(): EXAMPLES = [ ("binary", ["spam", "egg", "spam"]), ("binary", [0, 1, 0, 1]), ("continuous", np.arange(10) / 20.), ("multiclass", [1, 2, 3]), ("multiclass", [0, 1, 2, 2, 0]), ("multiclass", [[1], [2], [3]]), ("multilabel-indicator", [[0, 1, 0], [0, 0, 1]]), ("multiclass-multioutput", [[1, 2, 3]]), ("multiclass-multioutput", [[1, 1], [2, 2], [3, 1]]), ("multiclass-multioutput", [[5, 1], [4, 2], [3, 1]]), ("multiclass-multioutput", [[1, 2, 3]]), ("continuous-multioutput", np.arange(30).reshape((-1, 3))), ] for y_type, y in EXAMPLES: if y_type in ["binary", 'multiclass', "continuous"]: assert_array_equal(column_or_1d(y), np.ravel(y)) else: assert_raises(ValueError, column_or_1d, y) def test_safe_indexing(): X = [[1, 2, 3], [4, 5, 6], [7, 8, 9]] inds = np.array([1, 2]) X_inds = safe_indexing(X, inds) X_arrays = safe_indexing(np.array(X), inds) assert_array_equal(np.array(X_inds), X_arrays) assert_array_equal(np.array(X_inds), np.array(X)[inds]) def test_safe_indexing_pandas(): try: import pandas as pd except ImportError: raise SkipTest("Pandas not found") X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) X_df = pd.DataFrame(X) inds = np.array([1, 2]) X_df_indexed = safe_indexing(X_df, inds) X_indexed = safe_indexing(X_df, inds) assert_array_equal(np.array(X_df_indexed), X_indexed) # fun with read-only data in dataframes # this happens in joblib memmapping X.setflags(write=False) X_df_readonly = pd.DataFrame(X) with warnings.catch_warnings(record=True): X_df_ro_indexed = safe_indexing(X_df_readonly, inds) assert_array_equal(np.array(X_df_ro_indexed), X_indexed) def test_safe_indexing_mock_pandas(): X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) X_df = MockDataFrame(X) inds = np.array([1, 2]) X_df_indexed = safe_indexing(X_df, inds) X_indexed = safe_indexing(X_df, inds) assert_array_equal(np.array(X_df_indexed), X_indexed) def test_shuffle_on_ndim_equals_three(): def to_tuple(A): # to make the inner arrays hashable return tuple(tuple(tuple(C) for C in B) for B in A) A = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) # A.shape = (2,2,2) S = set(to_tuple(A)) shuffle(A) # shouldn't raise a ValueError for dim = 3 assert_equal(set(to_tuple(A)), S) def test_shuffle_dont_convert_to_array(): # Check that shuffle does not try to convert to numpy arrays with float # dtypes can let any indexable datastructure pass-through. a = ['a', 'b', 'c'] b = np.array(['a', 'b', 'c'], dtype=object) c = [1, 2, 3] d = MockDataFrame(np.array([['a', 0], ['b', 1], ['c', 2]], dtype=object)) e = sp.csc_matrix(np.arange(6).reshape(3, 2)) a_s, b_s, c_s, d_s, e_s = shuffle(a, b, c, d, e, random_state=0) assert_equal(a_s, ['c', 'b', 'a']) assert_equal(type(a_s), list) assert_array_equal(b_s, ['c', 'b', 'a']) assert_equal(b_s.dtype, object) assert_equal(c_s, [3, 2, 1]) assert_equal(type(c_s), list) assert_array_equal(d_s, np.array([['c', 2], ['b', 1], ['a', 0]], dtype=object)) assert_equal(type(d_s), MockDataFrame) assert_array_equal(e_s.toarray(), np.array([[4, 5], [2, 3], [0, 1]])) def test_gen_even_slices(): # check that gen_even_slices contains all samples some_range = range(10) joined_range = list(chain(*[some_range[slice] for slice in gen_even_slices(10, 3)])) assert_array_equal(some_range, joined_range) # check that passing negative n_chunks raises an error slices = gen_even_slices(10, -1) assert_raises_regex(ValueError, "gen_even_slices got n_packs=-1, must be" " >=1", next, slices)
bsd-3-clause
wazeerzulfikar/scikit-learn
sklearn/cluster/hierarchical.py
1
33898
"""Hierarchical Agglomerative Clustering These routines perform some hierarchical agglomerative clustering of some input data. Authors : Vincent Michel, Bertrand Thirion, Alexandre Gramfort, Gael Varoquaux License: BSD 3 clause """ from heapq import heapify, heappop, heappush, heappushpop import warnings import numpy as np from scipy import sparse from scipy.sparse.csgraph import connected_components from ..base import BaseEstimator, ClusterMixin from ..externals.joblib import Memory from ..externals import six from ..metrics.pairwise import paired_distances, pairwise_distances from ..utils import check_array from . import _hierarchical from ._feature_agglomeration import AgglomerationTransform from ..utils.fast_dict import IntFloatDict from ..externals.six.moves import xrange ############################################################################### # For non fully-connected graphs def _fix_connectivity(X, connectivity, affinity): """ Fixes the connectivity matrix - copies it - makes it symmetric - converts it to LIL if necessary - completes it if necessary """ n_samples = X.shape[0] if (connectivity.shape[0] != n_samples or connectivity.shape[1] != n_samples): raise ValueError('Wrong shape for connectivity matrix: %s ' 'when X is %s' % (connectivity.shape, X.shape)) # Make the connectivity matrix symmetric: connectivity = connectivity + connectivity.T # Convert connectivity matrix to LIL if not sparse.isspmatrix_lil(connectivity): if not sparse.isspmatrix(connectivity): connectivity = sparse.lil_matrix(connectivity) else: connectivity = connectivity.tolil() # Compute the number of nodes n_components, labels = connected_components(connectivity) if n_components > 1: warnings.warn("the number of connected components of the " "connectivity matrix is %d > 1. Completing it to avoid " "stopping the tree early." % n_components, stacklevel=2) # XXX: Can we do without completing the matrix? for i in xrange(n_components): idx_i = np.where(labels == i)[0] Xi = X[idx_i] for j in xrange(i): idx_j = np.where(labels == j)[0] Xj = X[idx_j] D = pairwise_distances(Xi, Xj, metric=affinity) ii, jj = np.where(D == np.min(D)) ii = ii[0] jj = jj[0] connectivity[idx_i[ii], idx_j[jj]] = True connectivity[idx_j[jj], idx_i[ii]] = True return connectivity, n_components ############################################################################### # Hierarchical tree building functions def ward_tree(X, connectivity=None, n_clusters=None, return_distance=False): """Ward clustering based on a Feature matrix. Recursively merges the pair of clusters that minimally increases within-cluster variance. The inertia matrix uses a Heapq-based representation. This is the structured version, that takes into account some topological structure between samples. Read more in the :ref:`User Guide <hierarchical_clustering>`. Parameters ---------- X : array, shape (n_samples, n_features) feature matrix representing n_samples samples to be clustered connectivity : sparse matrix (optional). connectivity matrix. Defines for each sample the neighboring samples following a given structure of the data. The matrix is assumed to be symmetric and only the upper triangular half is used. Default is None, i.e, the Ward algorithm is unstructured. n_clusters : int (optional) Stop early the construction of the tree at n_clusters. This is useful to decrease computation time if the number of clusters is not small compared to the number of samples. In this case, the complete tree is not computed, thus the 'children' output is of limited use, and the 'parents' output should rather be used. This option is valid only when specifying a connectivity matrix. return_distance : bool (optional) If True, return the distance between the clusters. Returns ------- children : 2D array, shape (n_nodes-1, 2) The children of each non-leaf node. Values less than `n_samples` correspond to leaves of the tree which are the original samples. A node `i` greater than or equal to `n_samples` is a non-leaf node and has children `children_[i - n_samples]`. Alternatively at the i-th iteration, children[i][0] and children[i][1] are merged to form node `n_samples + i` n_components : int The number of connected components in the graph. n_leaves : int The number of leaves in the tree parents : 1D array, shape (n_nodes, ) or None The parent of each node. Only returned when a connectivity matrix is specified, elsewhere 'None' is returned. distances : 1D array, shape (n_nodes-1, ) Only returned if return_distance is set to True (for compatibility). The distances between the centers of the nodes. `distances[i]` corresponds to a weighted euclidean distance between the nodes `children[i, 1]` and `children[i, 2]`. If the nodes refer to leaves of the tree, then `distances[i]` is their unweighted euclidean distance. Distances are updated in the following way (from scipy.hierarchy.linkage): The new entry :math:`d(u,v)` is computed as follows, .. math:: d(u,v) = \\sqrt{\\frac{|v|+|s|} {T}d(v,s)^2 + \\frac{|v|+|t|} {T}d(v,t)^2 - \\frac{|v|} {T}d(s,t)^2} where :math:`u` is the newly joined cluster consisting of clusters :math:`s` and :math:`t`, :math:`v` is an unused cluster in the forest, :math:`T=|v|+|s|+|t|`, and :math:`|*|` is the cardinality of its argument. This is also known as the incremental algorithm. """ X = np.asarray(X) if X.ndim == 1: X = np.reshape(X, (-1, 1)) n_samples, n_features = X.shape if connectivity is None: from scipy.cluster import hierarchy # imports PIL if n_clusters is not None: warnings.warn('Partial build of the tree is implemented ' 'only for structured clustering (i.e. with ' 'explicit connectivity). The algorithm ' 'will build the full tree and only ' 'retain the lower branches required ' 'for the specified number of clusters', stacklevel=2) out = hierarchy.ward(X) children_ = out[:, :2].astype(np.intp) if return_distance: distances = out[:, 2] return children_, 1, n_samples, None, distances else: return children_, 1, n_samples, None connectivity, n_components = _fix_connectivity(X, connectivity, affinity='euclidean') if n_clusters is None: n_nodes = 2 * n_samples - 1 else: if n_clusters > n_samples: raise ValueError('Cannot provide more clusters than samples. ' '%i n_clusters was asked, and there are %i samples.' % (n_clusters, n_samples)) n_nodes = 2 * n_samples - n_clusters # create inertia matrix coord_row = [] coord_col = [] A = [] for ind, row in enumerate(connectivity.rows): A.append(row) # We keep only the upper triangular for the moments # Generator expressions are faster than arrays on the following row = [i for i in row if i < ind] coord_row.extend(len(row) * [ind, ]) coord_col.extend(row) coord_row = np.array(coord_row, dtype=np.intp, order='C') coord_col = np.array(coord_col, dtype=np.intp, order='C') # build moments as a list moments_1 = np.zeros(n_nodes, order='C') moments_1[:n_samples] = 1 moments_2 = np.zeros((n_nodes, n_features), order='C') moments_2[:n_samples] = X inertia = np.empty(len(coord_row), dtype=np.float64, order='C') _hierarchical.compute_ward_dist(moments_1, moments_2, coord_row, coord_col, inertia) inertia = list(six.moves.zip(inertia, coord_row, coord_col)) heapify(inertia) # prepare the main fields parent = np.arange(n_nodes, dtype=np.intp) used_node = np.ones(n_nodes, dtype=bool) children = [] if return_distance: distances = np.empty(n_nodes - n_samples) not_visited = np.empty(n_nodes, dtype=np.int8, order='C') # recursive merge loop for k in range(n_samples, n_nodes): # identify the merge while True: inert, i, j = heappop(inertia) if used_node[i] and used_node[j]: break parent[i], parent[j] = k, k children.append((i, j)) used_node[i] = used_node[j] = False if return_distance: # store inertia value distances[k - n_samples] = inert # update the moments moments_1[k] = moments_1[i] + moments_1[j] moments_2[k] = moments_2[i] + moments_2[j] # update the structure matrix A and the inertia matrix coord_col = [] not_visited.fill(1) not_visited[k] = 0 _hierarchical._get_parents(A[i], coord_col, parent, not_visited) _hierarchical._get_parents(A[j], coord_col, parent, not_visited) # List comprehension is faster than a for loop [A[l].append(k) for l in coord_col] A.append(coord_col) coord_col = np.array(coord_col, dtype=np.intp, order='C') coord_row = np.empty(coord_col.shape, dtype=np.intp, order='C') coord_row.fill(k) n_additions = len(coord_row) ini = np.empty(n_additions, dtype=np.float64, order='C') _hierarchical.compute_ward_dist(moments_1, moments_2, coord_row, coord_col, ini) # List comprehension is faster than a for loop [heappush(inertia, (ini[idx], k, coord_col[idx])) for idx in range(n_additions)] # Separate leaves in children (empty lists up to now) n_leaves = n_samples # sort children to get consistent output with unstructured version children = [c[::-1] for c in children] children = np.array(children) # return numpy array for efficient caching if return_distance: # 2 is scaling factor to compare w/ unstructured version distances = np.sqrt(2. * distances) return children, n_components, n_leaves, parent, distances else: return children, n_components, n_leaves, parent # average and complete linkage def linkage_tree(X, connectivity=None, n_components='deprecated', n_clusters=None, linkage='complete', affinity="euclidean", return_distance=False): """Linkage agglomerative clustering based on a Feature matrix. The inertia matrix uses a Heapq-based representation. This is the structured version, that takes into account some topological structure between samples. Read more in the :ref:`User Guide <hierarchical_clustering>`. Parameters ---------- X : array, shape (n_samples, n_features) feature matrix representing n_samples samples to be clustered connectivity : sparse matrix (optional). connectivity matrix. Defines for each sample the neighboring samples following a given structure of the data. The matrix is assumed to be symmetric and only the upper triangular half is used. Default is None, i.e, the Ward algorithm is unstructured. n_components : int (optional) The number of connected components in the graph. n_clusters : int (optional) Stop early the construction of the tree at n_clusters. This is useful to decrease computation time if the number of clusters is not small compared to the number of samples. In this case, the complete tree is not computed, thus the 'children' output is of limited use, and the 'parents' output should rather be used. This option is valid only when specifying a connectivity matrix. linkage : {"average", "complete"}, optional, default: "complete" Which linkage criteria to use. The linkage criterion determines which distance to use between sets of observation. - average uses the average of the distances of each observation of the two sets - complete or maximum linkage uses the maximum distances between all observations of the two sets. affinity : string or callable, optional, default: "euclidean". which metric to use. Can be "euclidean", "manhattan", or any distance know to paired distance (see metric.pairwise) return_distance : bool, default False whether or not to return the distances between the clusters. Returns ------- children : 2D array, shape (n_nodes-1, 2) The children of each non-leaf node. Values less than `n_samples` correspond to leaves of the tree which are the original samples. A node `i` greater than or equal to `n_samples` is a non-leaf node and has children `children_[i - n_samples]`. Alternatively at the i-th iteration, children[i][0] and children[i][1] are merged to form node `n_samples + i` n_components : int The number of connected components in the graph. n_leaves : int The number of leaves in the tree. parents : 1D array, shape (n_nodes, ) or None The parent of each node. Only returned when a connectivity matrix is specified, elsewhere 'None' is returned. distances : ndarray, shape (n_nodes-1,) Returned when return_distance is set to True. distances[i] refers to the distance between children[i][0] and children[i][1] when they are merged. See also -------- ward_tree : hierarchical clustering with ward linkage """ if n_components != 'deprecated': warnings.warn("n_components was deprecated in 0.18" "will be removed in 0.21", DeprecationWarning) X = np.asarray(X) if X.ndim == 1: X = np.reshape(X, (-1, 1)) n_samples, n_features = X.shape linkage_choices = {'complete': _hierarchical.max_merge, 'average': _hierarchical.average_merge} try: join_func = linkage_choices[linkage] except KeyError: raise ValueError( 'Unknown linkage option, linkage should be one ' 'of %s, but %s was given' % (linkage_choices.keys(), linkage)) if connectivity is None: from scipy.cluster import hierarchy # imports PIL if n_clusters is not None: warnings.warn('Partial build of the tree is implemented ' 'only for structured clustering (i.e. with ' 'explicit connectivity). The algorithm ' 'will build the full tree and only ' 'retain the lower branches required ' 'for the specified number of clusters', stacklevel=2) if affinity == 'precomputed': # for the linkage function of hierarchy to work on precomputed # data, provide as first argument an ndarray of the shape returned # by pdist: it is a flat array containing the upper triangular of # the distance matrix. i, j = np.triu_indices(X.shape[0], k=1) X = X[i, j] elif affinity == 'l2': # Translate to something understood by scipy affinity = 'euclidean' elif affinity in ('l1', 'manhattan'): affinity = 'cityblock' elif callable(affinity): X = affinity(X) i, j = np.triu_indices(X.shape[0], k=1) X = X[i, j] out = hierarchy.linkage(X, method=linkage, metric=affinity) children_ = out[:, :2].astype(np.int) if return_distance: distances = out[:, 2] return children_, 1, n_samples, None, distances return children_, 1, n_samples, None connectivity, n_components = _fix_connectivity(X, connectivity, affinity=affinity) connectivity = connectivity.tocoo() # Put the diagonal to zero diag_mask = (connectivity.row != connectivity.col) connectivity.row = connectivity.row[diag_mask] connectivity.col = connectivity.col[diag_mask] connectivity.data = connectivity.data[diag_mask] del diag_mask if affinity == 'precomputed': distances = X[connectivity.row, connectivity.col] else: # FIXME We compute all the distances, while we could have only computed # the "interesting" distances distances = paired_distances(X[connectivity.row], X[connectivity.col], metric=affinity) connectivity.data = distances if n_clusters is None: n_nodes = 2 * n_samples - 1 else: assert n_clusters <= n_samples n_nodes = 2 * n_samples - n_clusters if return_distance: distances = np.empty(n_nodes - n_samples) # create inertia heap and connection matrix A = np.empty(n_nodes, dtype=object) inertia = list() # LIL seems to the best format to access the rows quickly, # without the numpy overhead of slicing CSR indices and data. connectivity = connectivity.tolil() # We are storing the graph in a list of IntFloatDict for ind, (data, row) in enumerate(zip(connectivity.data, connectivity.rows)): A[ind] = IntFloatDict(np.asarray(row, dtype=np.intp), np.asarray(data, dtype=np.float64)) # We keep only the upper triangular for the heap # Generator expressions are faster than arrays on the following inertia.extend(_hierarchical.WeightedEdge(d, ind, r) for r, d in zip(row, data) if r < ind) del connectivity heapify(inertia) # prepare the main fields parent = np.arange(n_nodes, dtype=np.intp) used_node = np.ones(n_nodes, dtype=np.intp) children = [] # recursive merge loop for k in xrange(n_samples, n_nodes): # identify the merge while True: edge = heappop(inertia) if used_node[edge.a] and used_node[edge.b]: break i = edge.a j = edge.b if return_distance: # store distances distances[k - n_samples] = edge.weight parent[i] = parent[j] = k children.append((i, j)) # Keep track of the number of elements per cluster n_i = used_node[i] n_j = used_node[j] used_node[k] = n_i + n_j used_node[i] = used_node[j] = False # update the structure matrix A and the inertia matrix # a clever 'min', or 'max' operation between A[i] and A[j] coord_col = join_func(A[i], A[j], used_node, n_i, n_j) for l, d in coord_col: A[l].append(k, d) # Here we use the information from coord_col (containing the # distances) to update the heap heappush(inertia, _hierarchical.WeightedEdge(d, k, l)) A[k] = coord_col # Clear A[i] and A[j] to save memory A[i] = A[j] = 0 # Separate leaves in children (empty lists up to now) n_leaves = n_samples # # return numpy array for efficient caching children = np.array(children)[:, ::-1] if return_distance: return children, n_components, n_leaves, parent, distances return children, n_components, n_leaves, parent # Matching names to tree-building strategies def _complete_linkage(*args, **kwargs): kwargs['linkage'] = 'complete' return linkage_tree(*args, **kwargs) def _average_linkage(*args, **kwargs): kwargs['linkage'] = 'average' return linkage_tree(*args, **kwargs) _TREE_BUILDERS = dict( ward=ward_tree, complete=_complete_linkage, average=_average_linkage) ############################################################################### # Functions for cutting hierarchical clustering tree def _hc_cut(n_clusters, children, n_leaves): """Function cutting the ward tree for a given number of clusters. Parameters ---------- n_clusters : int or ndarray The number of clusters to form. children : 2D array, shape (n_nodes-1, 2) The children of each non-leaf node. Values less than `n_samples` correspond to leaves of the tree which are the original samples. A node `i` greater than or equal to `n_samples` is a non-leaf node and has children `children_[i - n_samples]`. Alternatively at the i-th iteration, children[i][0] and children[i][1] are merged to form node `n_samples + i` n_leaves : int Number of leaves of the tree. Returns ------- labels : array [n_samples] cluster labels for each point """ if n_clusters > n_leaves: raise ValueError('Cannot extract more clusters than samples: ' '%s clusters where given for a tree with %s leaves.' % (n_clusters, n_leaves)) # In this function, we store nodes as a heap to avoid recomputing # the max of the nodes: the first element is always the smallest # We use negated indices as heaps work on smallest elements, and we # are interested in largest elements # children[-1] is the root of the tree nodes = [-(max(children[-1]) + 1)] for i in xrange(n_clusters - 1): # As we have a heap, nodes[0] is the smallest element these_children = children[-nodes[0] - n_leaves] # Insert the 2 children and remove the largest node heappush(nodes, -these_children[0]) heappushpop(nodes, -these_children[1]) label = np.zeros(n_leaves, dtype=np.intp) for i, node in enumerate(nodes): label[_hierarchical._hc_get_descendent(-node, children, n_leaves)] = i return label ############################################################################### class AgglomerativeClustering(BaseEstimator, ClusterMixin): """ Agglomerative Clustering Recursively merges the pair of clusters that minimally increases a given linkage distance. Read more in the :ref:`User Guide <hierarchical_clustering>`. Parameters ---------- n_clusters : int, default=2 The number of clusters to find. affinity : string or callable, default: "euclidean" Metric used to compute the linkage. Can be "euclidean", "l1", "l2", "manhattan", "cosine", or 'precomputed'. If linkage is "ward", only "euclidean" is accepted. memory : Instance of sklearn.externals.joblib.Memory or string, optional \ (default=None) Used to cache the output of the computation of the tree. By default, no caching is done. If a string is given, it is the path to the caching directory. connectivity : array-like or callable, optional Connectivity matrix. Defines for each sample the neighboring samples following a given structure of the data. This can be a connectivity matrix itself or a callable that transforms the data into a connectivity matrix, such as derived from kneighbors_graph. Default is None, i.e, the hierarchical clustering algorithm is unstructured. compute_full_tree : bool or 'auto' (optional) Stop early the construction of the tree at n_clusters. This is useful to decrease computation time if the number of clusters is not small compared to the number of samples. This option is useful only when specifying a connectivity matrix. Note also that when varying the number of clusters and using caching, it may be advantageous to compute the full tree. linkage : {"ward", "complete", "average"}, optional, default: "ward" Which linkage criterion to use. The linkage criterion determines which distance to use between sets of observation. The algorithm will merge the pairs of cluster that minimize this criterion. - ward minimizes the variance of the clusters being merged. - average uses the average of the distances of each observation of the two sets. - complete or maximum linkage uses the maximum distances between all observations of the two sets. pooling_func : callable, default=np.mean This combines the values of agglomerated features into a single value, and should accept an array of shape [M, N] and the keyword argument ``axis=1``, and reduce it to an array of size [M]. Attributes ---------- labels_ : array [n_samples] cluster labels for each point n_leaves_ : int Number of leaves in the hierarchical tree. n_components_ : int The estimated number of connected components in the graph. children_ : array-like, shape (n_nodes-1, 2) The children of each non-leaf node. Values less than `n_samples` correspond to leaves of the tree which are the original samples. A node `i` greater than or equal to `n_samples` is a non-leaf node and has children `children_[i - n_samples]`. Alternatively at the i-th iteration, children[i][0] and children[i][1] are merged to form node `n_samples + i` """ def __init__(self, n_clusters=2, affinity="euclidean", memory=None, connectivity=None, compute_full_tree='auto', linkage='ward', pooling_func=np.mean): self.n_clusters = n_clusters self.memory = memory self.connectivity = connectivity self.compute_full_tree = compute_full_tree self.linkage = linkage self.affinity = affinity self.pooling_func = pooling_func def fit(self, X, y=None): """Fit the hierarchical clustering on the data Parameters ---------- X : array-like, shape = [n_samples, n_features] The samples a.k.a. observations. Returns ------- self """ X = check_array(X, ensure_min_samples=2, estimator=self) memory = self.memory if memory is None: memory = Memory(cachedir=None, verbose=0) elif isinstance(memory, six.string_types): memory = Memory(cachedir=memory, verbose=0) elif not isinstance(memory, Memory): raise ValueError("'memory' should either be a string or" " a sklearn.externals.joblib.Memory" " instance, got 'memory={!r}' instead.".format( type(memory))) if self.n_clusters <= 0: raise ValueError("n_clusters should be an integer greater than 0." " %s was provided." % str(self.n_clusters)) if self.linkage == "ward" and self.affinity != "euclidean": raise ValueError("%s was provided as affinity. Ward can only " "work with euclidean distances." % (self.affinity, )) if self.linkage not in _TREE_BUILDERS: raise ValueError("Unknown linkage type %s." "Valid options are %s" % (self.linkage, _TREE_BUILDERS.keys())) tree_builder = _TREE_BUILDERS[self.linkage] connectivity = self.connectivity if self.connectivity is not None: if callable(self.connectivity): connectivity = self.connectivity(X) connectivity = check_array( connectivity, accept_sparse=['csr', 'coo', 'lil']) n_samples = len(X) compute_full_tree = self.compute_full_tree if self.connectivity is None: compute_full_tree = True if compute_full_tree == 'auto': # Early stopping is likely to give a speed up only for # a large number of clusters. The actual threshold # implemented here is heuristic compute_full_tree = self.n_clusters < max(100, .02 * n_samples) n_clusters = self.n_clusters if compute_full_tree: n_clusters = None # Construct the tree kwargs = {} if self.linkage != 'ward': kwargs['linkage'] = self.linkage kwargs['affinity'] = self.affinity self.children_, self.n_components_, self.n_leaves_, parents = \ memory.cache(tree_builder)(X, connectivity, n_clusters=n_clusters, **kwargs) # Cut the tree if compute_full_tree: self.labels_ = _hc_cut(self.n_clusters, self.children_, self.n_leaves_) else: labels = _hierarchical.hc_get_heads(parents, copy=False) # copy to avoid holding a reference on the original array labels = np.copy(labels[:n_samples]) # Reassign cluster numbers self.labels_ = np.searchsorted(np.unique(labels), labels) return self class FeatureAgglomeration(AgglomerativeClustering, AgglomerationTransform): """Agglomerate features. Similar to AgglomerativeClustering, but recursively merges features instead of samples. Read more in the :ref:`User Guide <hierarchical_clustering>`. Parameters ---------- n_clusters : int, default 2 The number of clusters to find. affinity : string or callable, default "euclidean" Metric used to compute the linkage. Can be "euclidean", "l1", "l2", "manhattan", "cosine", or 'precomputed'. If linkage is "ward", only "euclidean" is accepted. memory : Instance of sklearn.externals.joblib.Memory or string, optional \ (default=None) Used to cache the output of the computation of the tree. By default, no caching is done. If a string is given, it is the path to the caching directory. connectivity : array-like or callable, optional Connectivity matrix. Defines for each feature the neighboring features following a given structure of the data. This can be a connectivity matrix itself or a callable that transforms the data into a connectivity matrix, such as derived from kneighbors_graph. Default is None, i.e, the hierarchical clustering algorithm is unstructured. compute_full_tree : bool or 'auto', optional, default "auto" Stop early the construction of the tree at n_clusters. This is useful to decrease computation time if the number of clusters is not small compared to the number of features. This option is useful only when specifying a connectivity matrix. Note also that when varying the number of clusters and using caching, it may be advantageous to compute the full tree. linkage : {"ward", "complete", "average"}, optional, default "ward" Which linkage criterion to use. The linkage criterion determines which distance to use between sets of features. The algorithm will merge the pairs of cluster that minimize this criterion. - ward minimizes the variance of the clusters being merged. - average uses the average of the distances of each feature of the two sets. - complete or maximum linkage uses the maximum distances between all features of the two sets. pooling_func : callable, default np.mean This combines the values of agglomerated features into a single value, and should accept an array of shape [M, N] and the keyword argument `axis=1`, and reduce it to an array of size [M]. Attributes ---------- labels_ : array-like, (n_features,) cluster labels for each feature. n_leaves_ : int Number of leaves in the hierarchical tree. n_components_ : int The estimated number of connected components in the graph. children_ : array-like, shape (n_nodes-1, 2) The children of each non-leaf node. Values less than `n_features` correspond to leaves of the tree which are the original samples. A node `i` greater than or equal to `n_features` is a non-leaf node and has children `children_[i - n_features]`. Alternatively at the i-th iteration, children[i][0] and children[i][1] are merged to form node `n_features + i` """ def fit(self, X, y=None, **params): """Fit the hierarchical clustering on the data Parameters ---------- X : array-like, shape = [n_samples, n_features] The data Returns ------- self """ X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], ensure_min_features=2, estimator=self) return AgglomerativeClustering.fit(self, X.T, **params) @property def fit_predict(self): raise AttributeError
bsd-3-clause
dsm054/pandas
pandas/tests/sparse/series/test_indexing.py
4
3133
import pytest import numpy as np from pandas import SparseSeries, Series from pandas.util import testing as tm pytestmark = pytest.mark.skip("Wrong SparseBlock initialization (GH 17386)") @pytest.mark.parametrize('data', [ [1, 1, 2, 2, 3, 3, 4, 4, 0, 0], [1.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0, np.nan, np.nan], [ 1.0, 1.0 + 1.0j, 2.0 + 2.0j, 2.0, 3.0, 3.0 + 3.0j, 4.0 + 4.0j, 4.0, np.nan, np.nan ] ]) @pytest.mark.xfail(reason='Wrong SparseBlock initialization (GH#17386)', strict=True) def test_where_with_numeric_data(data): # GH 17386 lower_bound = 1.5 sparse = SparseSeries(data) result = sparse.where(sparse > lower_bound) dense = Series(data) dense_expected = dense.where(dense > lower_bound) sparse_expected = SparseSeries(dense_expected) tm.assert_series_equal(result, dense_expected) tm.assert_sp_series_equal(result, sparse_expected) @pytest.mark.parametrize('data', [ [1, 1, 2, 2, 3, 3, 4, 4, 0, 0], [1.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0, np.nan, np.nan], [ 1.0, 1.0 + 1.0j, 2.0 + 2.0j, 2.0, 3.0, 3.0 + 3.0j, 4.0 + 4.0j, 4.0, np.nan, np.nan ] ]) @pytest.mark.parametrize('other', [ True, -100, 0.1, 100.0 + 100.0j ]) @pytest.mark.skip(reason='Wrong SparseBlock initialization ' '(Segfault) ' '(GH 17386)') def test_where_with_numeric_data_and_other(data, other): # GH 17386 lower_bound = 1.5 sparse = SparseSeries(data) result = sparse.where(sparse > lower_bound, other) dense = Series(data) dense_expected = dense.where(dense > lower_bound, other) sparse_expected = SparseSeries(dense_expected, fill_value=other) tm.assert_series_equal(result, dense_expected) tm.assert_sp_series_equal(result, sparse_expected) @pytest.mark.xfail(reason='Wrong SparseBlock initialization (GH#17386)', strict=True) def test_where_with_bool_data(): # GH 17386 data = [False, False, True, True, False, False] cond = True sparse = SparseSeries(data) result = sparse.where(sparse == cond) dense = Series(data) dense_expected = dense.where(dense == cond) sparse_expected = SparseSeries(dense_expected) tm.assert_series_equal(result, dense_expected) tm.assert_sp_series_equal(result, sparse_expected) @pytest.mark.parametrize('other', [ True, 0, 0.1, 100.0 + 100.0j ]) @pytest.mark.skip(reason='Wrong SparseBlock initialization ' '(Segfault) ' '(GH 17386)') def test_where_with_bool_data_and_other(other): # GH 17386 data = [False, False, True, True, False, False] cond = True sparse = SparseSeries(data) result = sparse.where(sparse == cond, other) dense = Series(data) dense_expected = dense.where(dense == cond, other) sparse_expected = SparseSeries(dense_expected, fill_value=other) tm.assert_series_equal(result, dense_expected) tm.assert_sp_series_equal(result, sparse_expected)
bsd-3-clause
mcdeaton13/dynamic
Data/Calibration/DepreciationParameters/Program/testing script.py
2
7260
''' ------------------------------------------------------------------------------- Date created: 4/15/2015 Last updated 4/15/2015 ------------------------------------------------------------------------------- This py-file tests the naics_processing.py program. ------------------------------------------------------------------------------- Packages: ------------------------------------------------------------------------------- ''' import os.path import numpy as np import pandas as pd # import naics_processing as naics ''' ------------------------------------------------------------------------------- The main script of the program: ------------------------------------------------------------------------------- Testing the "load_naics" function. Checks: 1) Recreate the list of naics codes using the tree. Check to see that this matches the input. 2) ------------------------------------------------------------------------------- ''' def test_load_naics(path = None, messages = True): # Default path if none is specified: if path == None: path = os.getcwd() path = os.path.abspath(path + "\\data\\2012_NAICS_Codes.csv") # Using the function being tested to create a tree: cur_tree = naics.load_naics(path) # Replicating the codes in the input file: rep_codes = np.zeros(0) for ind in cur_tree.enum_inds: cur_codes = ind.data.dfs["Codes:"].iloc[:,0] rep_codes = np.append(rep_codes, cur_codes) rep_codes = rep_codes.astype(int) rep_codes = np.unique(rep_codes) rep_codes = np.sort(rep_codes) # orig_data = pd.read_csv(path).iloc[:,0] orig_codes = np.zeros(0) for i in xrange(0, len(orig_data)): cur_codes = str(orig_data[i]).split("-") orig_codes = np.append(orig_codes, cur_codes) orig_codes = orig_codes.astype(int) orig_codes = np.unique(orig_codes) orig_codes = np.sort(orig_codes) # rep_index = 0 orig_index = 0 matches = 0 while((rep_index < len(rep_codes)) and (orig_index < len(orig_codes))): if(rep_codes[rep_index] == int(orig_codes[orig_index])): rep_index += 1 orig_index += 1 matches += 1 elif(rep_codes[rep_index] <= orig_codes[orig_index]): rep_index += 1 elif(rep_codes[rep_index] >= orig_codes[orig_index]): orig_index += 1 if matches == len(orig_codes): if messages: print "\"load_naics\" passed test 1." return None else: mismatch = str(len(orig_codes) - matches) if messages: print "\"load_naics\" failed test 1. Mismatches:" + mismatch + "." return int(mismatch) ''' ------------------------------------------------------------------------------- Prints out the contents of a tree. Creates a csv file for each dataframe key. Each line in the csv file has the contents of the df for a specific industry. This allows the data to be manually checked in excel. ------------------------------------------------------------------------------- ''' def print_tree_dfs(data_tree, out_path = None, data_types = None): if out_path == None: out_path = os.getcwd() out_path = os.path.abspath(out_path + "\\OUTPUT\\tests\\tree_data") # if data_types == None: data_types = data_tree.enum_inds[0].data.dfs.keys() data_types.remove("Codes:") # for i in data_types: cur_cols = data_tree.enum_inds[0].data.dfs[i].columns.values.tolist() cur_cols = ["Codes:"] + cur_cols cur_pd = np.zeros((0,len(cur_cols))) for j in xrange(0,len(data_tree.enum_inds)): cur_data = data_tree.enum_inds[j].data.dfs[i].iloc[0,:] if(np.sum((cur_data != np.zeros(len(cur_cols)-1))) == 0): continue cur_code = data_tree.enum_inds[j].data.dfs["Codes:"].iloc[0,0] cur_data = np.array([cur_code] + cur_data.tolist()) cur_pd = np.vstack((cur_pd, cur_data)) cur_pd = pd.DataFrame(cur_pd, columns = cur_cols) cur_pd.to_csv(out_path + "\\" + i + ".csv") #''' #------------------------------------------------------------------------------- #Testing the "load_soi_corporate_data" function. #Checks: # 1) # 2) #------------------------------------------------------------------------------- #''' #def test_load_soi_corporate_data(data_tree, loaded = False, path = None, out_path = None): # # Default path if none is specified: # if path == None: # path = os.getcwd() # path = os.path.abspath(path + "\\data") # # # if out_path == None: # out_path = os.getcwd() # out_path = os.path.abspath(out_path + "\\OUTPUT\\tests") # # # if(not loaded): # naics.load_soi_corporate_data(data_tree, path) # # # corp_types = ["tot_corps", "s_corps", "c_corps"] # # # for i in corp_types: # cur_cols = data_tree.enum_inds[0].data.dfs[i].columns.values.tolist() # cur_cols = ["Codes:"] + cur_cols # cur_pd = np.zeros((0,len(cur_cols))) # for j in xrange(0,len(data_tree.enum_inds)): # cur_data = data_tree.enum_inds[j].data.dfs[i].iloc[0,:] # if(np.sum((cur_data != np.zeros(len(cur_cols)-1))) == 0): # continue # cur_code = data_tree.enum_inds[j].data.dfs["Codes:"].iloc[0,0] # cur_data = np.array([cur_code] + cur_data.tolist()) # # cur_pd = np.vstack((cur_pd, cur_data)) # cur_pd = pd.DataFrame(cur_pd, columns = cur_cols) # cur_pd.to_csv(out_path + "\\" + i + ".csv") # #''' #------------------------------------------------------------------------------- #Testing the "load_soi_corporate_data" function. #Checks: # 1) # 2) #------------------------------------------------------------------------------- #''' #def test_load_soi_partner_data(data_tree, loaded = False, path = None, out_path = None): # # Default path if none is specified: # if path == None: # path = os.getcwd() # path = os.path.abspath(path + "\\data") # # # if out_path == None: # out_path = os.getcwd() # out_path = os.path.abspath(out_path + "\\OUTPUT\\tests") # # # if(not loaded): # naics.load_soi_partner_data(data_tree, path) # # # #corp_types = ["tot_corps", "s_corps", "c_corps"] # asset_types = ['PA_inc_loss', 'PA_assets', 'PA_types'] # # # for i in asset_types: # cur_cols = data_tree.enum_inds[0].data.dfs[i].columns.values.tolist() # cur_cols = ["Codes:"] + cur_cols # cur_pd = np.zeros((0,len(cur_cols))) # for j in xrange(0,len(data_tree.enum_inds)): # cur_data = data_tree.enum_inds[j].data.dfs[i].iloc[0,:] # if(np.sum((cur_data != np.zeros(len(cur_cols)-1))) == 0): # continue # cur_code = data_tree.enum_inds[j].data.dfs["Codes:"].iloc[0,0] # cur_data = np.array([cur_code] + cur_data.tolist()) # # cur_pd = np.vstack((cur_pd, cur_data)) # cur_pd = pd.DataFrame(cur_pd, columns = cur_cols) # print cur_pd # cur_pd.to_csv(os.path.abspath(out_path + "\\" + i + ".csv"))
mit
decvalts/cartopy
lib/cartopy/tests/mpl/test_caching.py
3
8692
# (C) British Crown Copyright 2011 - 2018, Met Office # # This file is part of cartopy. # # cartopy is free software: you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by the # Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # cartopy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with cartopy. If not, see <https://www.gnu.org/licenses/>. from __future__ import (absolute_import, division, print_function) import gc import six try: from owslib.wmts import WebMapTileService except ImportError as e: WebMapTileService = None import matplotlib.pyplot as plt import pytest import cartopy.crs as ccrs from cartopy.mpl.feature_artist import FeatureArtist from cartopy.io.ogc_clients import WMTSRasterSource, _OWSLIB_AVAILABLE import cartopy.io.shapereader import cartopy.mpl.geoaxes as cgeoaxes import cartopy.mpl.patch from cartopy.examples.waves import sample_data class CallCounter(object): """ Exposes a context manager which can count the number of calls to a specific function. (useful for cache checking!) Internally, the target function is replaced with a new one created by this context manager which then increments ``self.count`` every time it is called. Example usage:: show_counter = CallCounter(plt, 'show') with show_counter: plt.show() plt.show() plt.show() print show_counter.count # <--- outputs 3 """ def __init__(self, parent, function_name): self.count = 0 self.parent = parent self.function_name = function_name self.orig_fn = getattr(parent, function_name) def __enter__(self): def replacement_fn(*args, **kwargs): self.count += 1 return self.orig_fn(*args, **kwargs) setattr(self.parent, self.function_name, replacement_fn) return self def __exit__(self, exc_type, exc_val, exc_tb): setattr(self.parent, self.function_name, self.orig_fn) @pytest.mark.natural_earth def test_coastline_loading_cache(): # a5caae040ee11e72a62a53100fe5edc355304419 added coastline caching. # This test ensures it is working. # Create coastlines to ensure they are cached. ax1 = plt.subplot(2, 1, 1, projection=ccrs.PlateCarree()) ax1.coastlines() plt.draw() # Create another instance of the coastlines and count # the number of times shapereader.Reader is created. counter = CallCounter(cartopy.io.shapereader.Reader, '__init__') with counter: ax2 = plt.subplot(2, 1, 1, projection=ccrs.Robinson()) ax2.coastlines() plt.draw() assert counter.count == 0, ('The shapereader Reader class was created {} ' 'times, indicating that the caching is not ' 'working.'.format(counter.count)) plt.close() @pytest.mark.natural_earth def test_shapefile_transform_cache(): # a5caae040ee11e72a62a53100fe5edc355304419 added shapefile mpl # geometry caching based on geometry object id. This test ensures # it is working. coastline_path = cartopy.io.shapereader.natural_earth(resolution="50m", category='physical', name='coastline') geoms = cartopy.io.shapereader.Reader(coastline_path).geometries() # Use the first 10 of them. geoms = tuple(geoms)[:10] n_geom = len(geoms) ax = plt.axes(projection=ccrs.Robinson()) # Empty the cache. FeatureArtist._geom_key_to_geometry_cache.clear() FeatureArtist._geom_key_to_path_cache.clear() assert len(FeatureArtist._geom_key_to_geometry_cache) == 0 assert len(FeatureArtist._geom_key_to_path_cache) == 0 counter = CallCounter(ax.projection, 'project_geometry') with counter: ax.add_geometries(geoms, ccrs.PlateCarree()) ax.add_geometries(geoms, ccrs.PlateCarree()) ax.add_geometries(geoms[:], ccrs.PlateCarree()) ax.figure.canvas.draw() # Without caching the count would have been # n_calls * n_geom, but should now be just n_geom. assert counter.count == n_geom, ('The given geometry was transformed too ' 'many times (expected: {}; got {}) - the' ' caching is not working.' ''.format(n_geom, counter.count)) # Check the cache has an entry for each geometry. assert len(FeatureArtist._geom_key_to_geometry_cache) == n_geom assert len(FeatureArtist._geom_key_to_path_cache) == n_geom # Check that the cache is empty again once we've dropped all references # to the source paths. plt.clf() del geoms gc.collect() assert len(FeatureArtist._geom_key_to_geometry_cache) == 0 assert len(FeatureArtist._geom_key_to_path_cache) == 0 plt.close() def test_contourf_transform_path_counting(): ax = plt.axes(projection=ccrs.Robinson()) ax.figure.canvas.draw() # Capture the size of the cache before our test. gc.collect() initial_cache_size = len(cgeoaxes._PATH_TRANSFORM_CACHE) path_to_geos_counter = CallCounter(cartopy.mpl.patch, 'path_to_geos') with path_to_geos_counter: x, y, z = sample_data((30, 60)) cs = plt.contourf(x, y, z, 5, transform=ccrs.PlateCarree()) n_geom = sum([len(c.get_paths()) for c in cs.collections]) del cs if not six.PY3: del c ax.figure.canvas.draw() # Before the performance enhancement, the count would have been 2 * n_geom, # but should now be just n_geom. msg = ('The given geometry was transformed too many times (expected: {}; ' 'got {}) - the caching is not working.' '').format(n_geom, path_to_geos_counter.count) assert path_to_geos_counter.count == n_geom, msg # Check the cache has an entry for each geometry. assert len(cgeoaxes._PATH_TRANSFORM_CACHE) == initial_cache_size + n_geom # Check that the cache is empty again once we've dropped all references # to the source paths. plt.clf() gc.collect() assert len(cgeoaxes._PATH_TRANSFORM_CACHE) == initial_cache_size plt.close() @pytest.mark.network @pytest.mark.skipif(not _OWSLIB_AVAILABLE, reason='OWSLib is unavailable.') def test_wmts_tile_caching(): image_cache = WMTSRasterSource._shared_image_cache image_cache.clear() assert len(image_cache) == 0 url = 'https://map1c.vis.earthdata.nasa.gov/wmts-geo/wmts.cgi' wmts = WebMapTileService(url) layer_name = 'MODIS_Terra_CorrectedReflectance_TrueColor' source = WMTSRasterSource(wmts, layer_name) gettile_counter = CallCounter(wmts, 'gettile') crs = ccrs.PlateCarree() extent = (-180, 180, -90, 90) resolution = (20, 10) with gettile_counter: source.fetch_raster(crs, extent, resolution) n_tiles = 2 assert gettile_counter.count == n_tiles, ('Too many tile requests - ' 'expected {}, got {}.'.format( n_tiles, gettile_counter.count) ) gc.collect() assert len(image_cache) == 1 assert len(image_cache[wmts]) == 1 tiles_key = (layer_name, '0') assert len(image_cache[wmts][tiles_key]) == n_tiles # Second time around we shouldn't request any more tiles so the # call count will stay the same. with gettile_counter: source.fetch_raster(crs, extent, resolution) assert gettile_counter.count == n_tiles, ('Too many tile requests - ' 'expected {}, got {}.'.format( n_tiles, gettile_counter.count) ) gc.collect() assert len(image_cache) == 1 assert len(image_cache[wmts]) == 1 tiles_key = (layer_name, '0') assert len(image_cache[wmts][tiles_key]) == n_tiles # Once there are no live references the weak-ref cache should clear. del source, wmts, gettile_counter gc.collect() assert len(image_cache) == 0
gpl-3.0
fengzhyuan/scikit-learn
doc/tutorial/text_analytics/solutions/exercise_02_sentiment.py
254
2795
"""Build a sentiment analysis / polarity model Sentiment analysis can be casted as a binary text classification problem, that is fitting a linear classifier on features extracted from the text of the user messages so as to guess wether the opinion of the author is positive or negative. In this examples we will use a movie review dataset. """ # Author: Olivier Grisel <[email protected]> # License: Simplified BSD import sys from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.svm import LinearSVC from sklearn.pipeline import Pipeline from sklearn.grid_search import GridSearchCV from sklearn.datasets import load_files from sklearn.cross_validation import train_test_split from sklearn import metrics if __name__ == "__main__": # NOTE: we put the following in a 'if __name__ == "__main__"' protected # block to be able to use a multi-core grid search that also works under # Windows, see: http://docs.python.org/library/multiprocessing.html#windows # The multiprocessing module is used as the backend of joblib.Parallel # that is used when n_jobs != 1 in GridSearchCV # the training data folder must be passed as first argument movie_reviews_data_folder = sys.argv[1] dataset = load_files(movie_reviews_data_folder, shuffle=False) print("n_samples: %d" % len(dataset.data)) # split the dataset in training and test set: docs_train, docs_test, y_train, y_test = train_test_split( dataset.data, dataset.target, test_size=0.25, random_state=None) # TASK: Build a vectorizer / classifier pipeline that filters out tokens # that are too rare or too frequent pipeline = Pipeline([ ('vect', TfidfVectorizer(min_df=3, max_df=0.95)), ('clf', LinearSVC(C=1000)), ]) # TASK: Build a grid search to find out whether unigrams or bigrams are # more useful. # Fit the pipeline on the training set using grid search for the parameters parameters = { 'vect__ngram_range': [(1, 1), (1, 2)], } grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1) grid_search.fit(docs_train, y_train) # TASK: print the cross-validated scores for the each parameters set # explored by the grid search print(grid_search.grid_scores_) # TASK: Predict the outcome on the testing set and store it in a variable # named y_predicted y_predicted = grid_search.predict(docs_test) # Print the classification report print(metrics.classification_report(y_test, y_predicted, target_names=dataset.target_names)) # Print and plot the confusion matrix cm = metrics.confusion_matrix(y_test, y_predicted) print(cm) # import matplotlib.pyplot as plt # plt.matshow(cm) # plt.show()
bsd-3-clause
hrjn/scikit-learn
setup.py
12
10272
#! /usr/bin/env python # # Copyright (C) 2007-2009 Cournapeau David <[email protected]> # 2010 Fabian Pedregosa <[email protected]> # License: 3-clause BSD descr = """A set of python modules for machine learning and data mining""" import sys import os import shutil from distutils.command.clean import clean as Clean from pkg_resources import parse_version import traceback if sys.version_info[0] < 3: import __builtin__ as builtins else: import builtins # This is a bit (!) hackish: we are setting a global variable so that the main # sklearn __init__ can detect if it is being loaded by the setup routine, to # avoid attempting to load components that aren't built yet: # the numpy distutils extensions that are used by scikit-learn to recursively # build the compiled extensions in sub-packages is based on the Python import # machinery. builtins.__SKLEARN_SETUP__ = True DISTNAME = 'scikit-learn' DESCRIPTION = 'A set of python modules for machine learning and data mining' with open('README.rst') as f: LONG_DESCRIPTION = f.read() MAINTAINER = 'Andreas Mueller' MAINTAINER_EMAIL = '[email protected]' URL = 'http://scikit-learn.org' LICENSE = 'new BSD' # We can actually import a restricted version of sklearn that # does not need the compiled code import sklearn VERSION = sklearn.__version__ SCIPY_MIN_VERSION = '0.9' NUMPY_MIN_VERSION = '1.6.1' # Optional setuptools features # We need to import setuptools early, if we want setuptools features, # as it monkey-patches the 'setup' function # For some commands, use setuptools SETUPTOOLS_COMMANDS = set([ 'develop', 'release', 'bdist_egg', 'bdist_rpm', 'bdist_wininst', 'install_egg_info', 'build_sphinx', 'egg_info', 'easy_install', 'upload', 'bdist_wheel', '--single-version-externally-managed', ]) if SETUPTOOLS_COMMANDS.intersection(sys.argv): import setuptools extra_setuptools_args = dict( zip_safe=False, # the package can run out of an .egg file include_package_data=True, extras_require={ 'alldeps': ( 'numpy >= {0}'.format(NUMPY_MIN_VERSION), 'scipy >= {0}'.format(SCIPY_MIN_VERSION), ), }, ) else: extra_setuptools_args = dict() # Custom clean command to remove build artifacts class CleanCommand(Clean): description = "Remove build artifacts from the source tree" def run(self): Clean.run(self) # Remove c files if we are not within a sdist package cwd = os.path.abspath(os.path.dirname(__file__)) remove_c_files = not os.path.exists(os.path.join(cwd, 'PKG-INFO')) if remove_c_files: print('Will remove generated .c files') if os.path.exists('build'): shutil.rmtree('build') for dirpath, dirnames, filenames in os.walk('sklearn'): for filename in filenames: if any(filename.endswith(suffix) for suffix in (".so", ".pyd", ".dll", ".pyc")): os.unlink(os.path.join(dirpath, filename)) continue extension = os.path.splitext(filename)[1] if remove_c_files and extension in ['.c', '.cpp']: pyx_file = str.replace(filename, extension, '.pyx') if os.path.exists(os.path.join(dirpath, pyx_file)): os.unlink(os.path.join(dirpath, filename)) for dirname in dirnames: if dirname == '__pycache__': shutil.rmtree(os.path.join(dirpath, dirname)) cmdclass = {'clean': CleanCommand} # Optional wheelhouse-uploader features # To automate release of binary packages for scikit-learn we need a tool # to download the packages generated by travis and appveyor workers (with # version number matching the current release) and upload them all at once # to PyPI at release time. # The URL of the artifact repositories are configured in the setup.cfg file. WHEELHOUSE_UPLOADER_COMMANDS = set(['fetch_artifacts', 'upload_all']) if WHEELHOUSE_UPLOADER_COMMANDS.intersection(sys.argv): import wheelhouse_uploader.cmd cmdclass.update(vars(wheelhouse_uploader.cmd)) def configuration(parent_package='', top_path=None): if os.path.exists('MANIFEST'): os.remove('MANIFEST') from numpy.distutils.misc_util import Configuration config = Configuration(None, parent_package, top_path) # Avoid non-useful msg: # "Ignoring attempt to set 'name' (from ... " config.set_options(ignore_setup_xxx_py=True, assume_default_configuration=True, delegate_options_to_subpackages=True, quiet=True) config.add_subpackage('sklearn') return config def get_scipy_status(): """ Returns a dictionary containing a boolean specifying whether SciPy is up-to-date, along with the version string (empty string if not installed). """ scipy_status = {} try: import scipy scipy_version = scipy.__version__ scipy_status['up_to_date'] = parse_version( scipy_version) >= parse_version(SCIPY_MIN_VERSION) scipy_status['version'] = scipy_version except ImportError: traceback.print_exc() scipy_status['up_to_date'] = False scipy_status['version'] = "" return scipy_status def get_numpy_status(): """ Returns a dictionary containing a boolean specifying whether NumPy is up-to-date, along with the version string (empty string if not installed). """ numpy_status = {} try: import numpy numpy_version = numpy.__version__ numpy_status['up_to_date'] = parse_version( numpy_version) >= parse_version(NUMPY_MIN_VERSION) numpy_status['version'] = numpy_version except ImportError: traceback.print_exc() numpy_status['up_to_date'] = False numpy_status['version'] = "" return numpy_status def setup_package(): metadata = dict(name=DISTNAME, maintainer=MAINTAINER, maintainer_email=MAINTAINER_EMAIL, description=DESCRIPTION, license=LICENSE, url=URL, version=VERSION, long_description=LONG_DESCRIPTION, classifiers=['Intended Audience :: Science/Research', 'Intended Audience :: Developers', 'License :: OSI Approved', 'Programming Language :: C', 'Programming Language :: Python', 'Topic :: Software Development', 'Topic :: Scientific/Engineering', 'Operating System :: Microsoft :: Windows', 'Operating System :: POSIX', 'Operating System :: Unix', 'Operating System :: MacOS', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', ], cmdclass=cmdclass, **extra_setuptools_args) if len(sys.argv) == 1 or ( len(sys.argv) >= 2 and ('--help' in sys.argv[1:] or sys.argv[1] in ('--help-commands', 'egg_info', '--version', 'clean'))): # For these actions, NumPy is not required # # They are required to succeed without Numpy for example when # pip is used to install Scikit-learn when Numpy is not yet present in # the system. try: from setuptools import setup except ImportError: from distutils.core import setup metadata['version'] = VERSION else: numpy_status = get_numpy_status() numpy_req_str = "scikit-learn requires NumPy >= {0}.\n".format( NUMPY_MIN_VERSION) scipy_status = get_scipy_status() scipy_req_str = "scikit-learn requires SciPy >= {0}.\n".format( SCIPY_MIN_VERSION) instructions = ("Installation instructions are available on the " "scikit-learn website: " "http://scikit-learn.org/stable/install.html\n") if numpy_status['up_to_date'] is False: if numpy_status['version']: raise ImportError("Your installation of Numerical Python " "(NumPy) {0} is out-of-date.\n{1}{2}" .format(numpy_status['version'], numpy_req_str, instructions)) else: raise ImportError("Numerical Python (NumPy) is not " "installed.\n{0}{1}" .format(numpy_req_str, instructions)) if scipy_status['up_to_date'] is False: if scipy_status['version']: raise ImportError("Your installation of Scientific Python " "(SciPy) {0} is out-of-date.\n{1}{2}" .format(scipy_status['version'], scipy_req_str, instructions)) else: raise ImportError("Scientific Python (SciPy) is not " "installed.\n{0}{1}" .format(scipy_req_str, instructions)) from numpy.distutils.core import setup metadata['configuration'] = configuration setup(**metadata) if __name__ == "__main__": setup_package()
bsd-3-clause
hugobowne/scikit-learn
examples/cluster/plot_agglomerative_clustering.py
343
2931
""" Agglomerative clustering with and without structure =================================================== This example shows the effect of imposing a connectivity graph to capture local structure in the data. The graph is simply the graph of 20 nearest neighbors. Two consequences of imposing a connectivity can be seen. First clustering with a connectivity matrix is much faster. Second, when using a connectivity matrix, average and complete linkage are unstable and tend to create a few clusters that grow very quickly. Indeed, average and complete linkage fight this percolation behavior by considering all the distances between two clusters when merging them. The connectivity graph breaks this mechanism. This effect is more pronounced for very sparse graphs (try decreasing the number of neighbors in kneighbors_graph) and with complete linkage. In particular, having a very small number of neighbors in the graph, imposes a geometry that is close to that of single linkage, which is well known to have this percolation instability. """ # Authors: Gael Varoquaux, Nelle Varoquaux # License: BSD 3 clause import time import matplotlib.pyplot as plt import numpy as np from sklearn.cluster import AgglomerativeClustering from sklearn.neighbors import kneighbors_graph # Generate sample data n_samples = 1500 np.random.seed(0) t = 1.5 * np.pi * (1 + 3 * np.random.rand(1, n_samples)) x = t * np.cos(t) y = t * np.sin(t) X = np.concatenate((x, y)) X += .7 * np.random.randn(2, n_samples) X = X.T # Create a graph capturing local connectivity. Larger number of neighbors # will give more homogeneous clusters to the cost of computation # time. A very large number of neighbors gives more evenly distributed # cluster sizes, but may not impose the local manifold structure of # the data knn_graph = kneighbors_graph(X, 30, include_self=False) for connectivity in (None, knn_graph): for n_clusters in (30, 3): plt.figure(figsize=(10, 4)) for index, linkage in enumerate(('average', 'complete', 'ward')): plt.subplot(1, 3, index + 1) model = AgglomerativeClustering(linkage=linkage, connectivity=connectivity, n_clusters=n_clusters) t0 = time.time() model.fit(X) elapsed_time = time.time() - t0 plt.scatter(X[:, 0], X[:, 1], c=model.labels_, cmap=plt.cm.spectral) plt.title('linkage=%s (time %.2fs)' % (linkage, elapsed_time), fontdict=dict(verticalalignment='top')) plt.axis('equal') plt.axis('off') plt.subplots_adjust(bottom=0, top=.89, wspace=0, left=0, right=1) plt.suptitle('n_cluster=%i, connectivity=%r' % (n_clusters, connectivity is not None), size=17) plt.show()
bsd-3-clause
bathepawan/workload-automation
wlauto/result_processors/uxperf.py
1
10864
# Copyright 2016 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os import re import logging from collections import defaultdict from distutils.version import LooseVersion from wlauto import ResultProcessor, Parameter from wlauto.instrumentation import instrument_is_enabled from wlauto.instrumentation.fps import VSYNC_INTERVAL from wlauto.exceptions import ResultProcessorError, ConfigError from wlauto.utils.fps import FpsProcessor, SurfaceFlingerFrame, GfxInfoFrame from wlauto.utils.types import numeric, boolean try: import pandas as pd except ImportError: pd = None class UxPerfResultProcessor(ResultProcessor): name = 'uxperf' description = ''' Parse logcat for UX_PERF markers to produce performance metrics for workload actions using specified instrumentation. An action represents a series of UI interactions to capture. NOTE: The UX_PERF markers are turned off by default and must be enabled in a agenda file by setting ``markers_enabled`` for the workload to ``True``. ''' parameters = [ Parameter('add_timings', kind=boolean, default=True, description=''' If set to ``True``, add per-action timings to result metrics.' '''), Parameter('add_frames', kind=boolean, default=False, description=''' If set to ``True``, add per-action frame statistics to result metrics. i.e. fps, frame_count, jank and not_at_vsync. NOTE: This option requires the fps instrument to be enabled. '''), Parameter('drop_threshold', kind=numeric, default=5, description=''' Data points below this FPS will be dropped as they do not constitute "real" gameplay. The assumption being that while actually running, the FPS in the game will not drop below X frames per second, except on loading screens, menus, etc, which should not contribute to FPS calculation. '''), Parameter('generate_csv', kind=boolean, default=True, description=''' If set to ``True``, this will produce temporal per-action fps data in the results directory, in a file named <action>_fps.csv. Note: per-action fps data will appear as discrete step-like values in order to produce a more meainingfull representation, a rolling mean can be applied. '''), ] def initialize(self, context): if not pd or LooseVersion(pd.__version__) < LooseVersion('0.13.1'): message = ('uxperf result processor requires pandas Python package ' '(version 0.13.1 or higher) to be installed.\n' 'You can install it with pip, e.g. "sudo pip install pandas"') raise ResultProcessorError(message) if self.add_frames and not instrument_is_enabled('fps'): raise ConfigError('fps instrument must be enabled in order to add frames.') def export_iteration_result(self, result, context): parser = UxPerfParser(context) logfile = os.path.join(context.output_directory, 'logcat.log') framelog = os.path.join(context.output_directory, 'frames.csv') self.logger.debug('Parsing logcat.log for UX_PERF markers') parser.parse(logfile) if self.add_timings: self.logger.debug('Adding per-action timings') parser.add_action_timings() if self.add_frames: self.logger.debug('Adding per-action frame metrics') parser.add_action_frames(framelog, self.drop_threshold, self.generate_csv) class UxPerfParser(object): ''' Parses logcat messages for UX Performance markers. UX Performance markers are output from logcat under a debug priority. The logcat tag for the marker messages is UX_PERF. The messages associated with this tag consist of a name for the action to be recorded and a timestamp. These fields are delimited by a single space. e.g. <TAG> : <MESSAGE> UX_PERF : gestures_swipe_left_start 861975087367 ... ... UX_PERF : gestures_swipe_left_end 862132085804 Timestamps are produced using the running Java Virtual Machine's high-resolution time source, in nanoseconds. ''' def __init__(self, context): self.context = context self.actions = defaultdict(list) self.logger = logging.getLogger('UxPerfParser') # regex for matching logcat message format: self.regex = re.compile(r'UX_PERF.*?:\s*(?P<message>.*\d+$)') def parse(self, log): ''' Opens log file and parses UX_PERF markers. Actions delimited by markers are captured in a dictionary with actions mapped to timestamps. ''' loglines = self._read(log) self._gen_action_timestamps(loglines) def add_action_frames(self, frames, drop_threshold, generate_csv): # pylint: disable=too-many-locals ''' Uses FpsProcessor to parse frame.csv extracting fps, frame count, jank and vsync metrics on a per action basis. Adds results to metrics. ''' refresh_period = self._parse_refresh_peroid() for action in self.actions: # default values fps, frame_count, janks, not_at_vsync = float('nan'), 0, 0, 0 p90, p95, p99 = [float('nan')] * 3 metrics = (fps, frame_count, janks, not_at_vsync) df = self._create_sub_df(self.actions[action], frames) if not df.empty: # pylint: disable=maybe-no-member fp = FpsProcessor(df, action=action) try: per_frame_fps, metrics = fp.process(refresh_period, drop_threshold) fps, frame_count, janks, not_at_vsync = metrics if generate_csv: name = action + '_fps' filename = name + '.csv' fps_outfile = os.path.join(self.context.output_directory, filename) per_frame_fps.to_csv(fps_outfile, index=False, header=True) self.context.add_artifact(name, path=filename, kind='data') p90, p95, p99 = fp.percentiles() except AttributeError: self.logger.warning('Non-matched timestamps in dumpsys output: action={}' .format(action)) self.context.result.add_metric(action + '_FPS', fps) self.context.result.add_metric(action + '_frame_count', frame_count) self.context.result.add_metric(action + '_janks', janks, lower_is_better=True) self.context.result.add_metric(action + '_not_at_vsync', not_at_vsync, lower_is_better=True) self.context.result.add_metric(action + '_frame_time_90percentile', p90, 'ms', lower_is_better=True) self.context.result.add_metric(action + '_frame_time_95percentile', p95, 'ms', lower_is_better=True) self.context.result.add_metric(action + '_frame_time_99percentile', p99, 'ms', lower_is_better=True) def add_action_timings(self): ''' Add simple action timings in millisecond resolution to metrics ''' for action, timestamps in self.actions.iteritems(): # nanosecond precision, but not necessarily nanosecond resolution # truncate to guarantee millisecond precision ts_ms = tuple(int(ts[:-6]) for ts in timestamps) if len(ts_ms) == 2: start, finish = ts_ms duration = finish - start result = self.context.result result.add_metric(action + "_start", start, units='ms') result.add_metric(action + "_finish", finish, units='ms') result.add_metric(action + "_duration", duration, units='ms', lower_is_better=True) else: self.logger.warning('Expected two timestamps. Received {}'.format(ts_ms)) def _gen_action_timestamps(self, lines): ''' Parses lines and matches against logcat tag. Groups timestamps by action name. Creates a dictionary of lists with actions mapped to timestamps. ''' for line in lines: match = self.regex.search(line) if match: message = match.group('message') action_with_suffix, timestamp = message.rsplit(' ', 1) action, _ = action_with_suffix.rsplit('_', 1) self.actions[action].append(timestamp) def _parse_refresh_peroid(self): ''' Reads the first line of the raw dumpsys output for the refresh period. ''' raw_path = os.path.join(self.context.output_directory, 'surfaceflinger.raw') if os.path.isfile(raw_path): raw_lines = self._read(raw_path) refresh_period = int(raw_lines.next()) else: refresh_period = VSYNC_INTERVAL return refresh_period def _create_sub_df(self, action, frames): ''' Creates a data frame containing fps metrics for a captured action. ''' start, end = map(int, action) df = pd.read_csv(frames) # SurfaceFlinger Algorithm if df.columns.tolist() == list(SurfaceFlingerFrame._fields): # pylint: disable=maybe-no-member field = 'actual_present_time' # GfxInfo Algorithm elif df.columns.tolist() == list(GfxInfoFrame._fields): # pylint: disable=maybe-no-member field = 'FrameCompleted' else: field = '' self.logger.error('frames.csv not in a recognised format. Cannot parse.') if field: df = df[start < df[field]] df = df[df[field] <= end] return df def _read(self, log): ''' Opens a file a yields the lines with whitespace stripped. ''' try: with open(log, 'r') as rfh: for line in rfh: yield line.strip() except IOError: self.logger.error('Could not open {}'.format(log))
apache-2.0
CA-Lab/moral-exchange
simulations/hopfield_habituation.py
1
4183
import matplotlib matplotlib.use('TkAgg') import matplotlib.pyplot as plt import pylab as pl import random as rd import scipy as sp import networkx as nx import numpy as np import math as mt import pprint as ppt time_list = [] energy_state = [] """Generates a full connected network""" def init(): global time, g, positions, E E = 0 time = 0 g = nx.Graph() g.add_nodes_from(['a','b','c','d']) for i in g.node: for j in g.node: g.add_edge(i,j) for i in g.nodes(): g.node[i]['s'] = rd.choice([True, False]) g.node[i]['p'] = 0 g.node[i]['prime'] = rd.choice([True, False]) #for i, j in g.edges(): #g.edge[i][j]['w'] = rd.choice([-2,-1,0,1,2]) # for i in g.edge: # for j in g.edge: # g.edge[i][j]['w'] = rd.choice([-2,-1,0,1,2]) def init_watts(): global time, g, positions, E E = 0 time = 0 g = nx.watts_strogatz_graph(100, 2, 0.3) for i in g.nodes(): g.node[i]['s'] = rd.choice([True, False]) g.node[i]['p'] = 0 g.node[i]['prime'] = rd.choice([True, False]) def init_erdos(): global time, g, positions, E E = 0 time = 0 g = nx.erdos_renyi_graph(100, .3) for i in g.nodes(): g.node[i]['s'] = rd.choice([True, False]) def init_barabasi(): global time, g, positions, E E = 0 time = 0 g = nx.barabasi_albert_graph(200, 15) for i in g.nodes(): g.node[i]['s'] = rd.choice([True, False]) def draw(): pl.cla() node_color = [] for i in g.nodes_iter(): if g.node[i]['s']: node_color.append(1) else: node_color.append(-1) nx.draw(g, pos = positions, node_color = node_color, with_labels = True, edge_color = 'c', cmap = pl.cm.autumn, vmin = 0, vmax = 1) pl.axis('image') pl.title('t = ' + str(time)) #pl.title('Energy = ' + str(E)) plt.show() def p_local_u(i): u = local_u(i) m = [] for j in g.neighbors(i): if g.node[i]['prime']: Ar = 0.005 else: Ar = -0.005 g.node[i]['p'] += Ar if (2 * u) > (2 * u + g.node[i]['p']): g.node[i]['prime'] = True else: g.node[i]['prime'] = False return 2 * u + g.node[i]['p'] def local_u(i): m = [] for j in g.neighbors(i): if g.node[i]['s'] == g.node[j]['s']: m.append( 1 ) return sum(m) def global_u(): gu = [] for i in g.nodes(): gu.append( local_u( i ) ) return sum(gu) def step(): global time, g, positions, E time += 1 states = [] """ef for energy function""" ef = [] i = rd.choice(g.nodes()) theta = p_local_u(i) / len( g.neighbors(i) ) if theta < 0.5: g.node[i]['s'] = not g.node[i]['s'] time_list.append(time) energy_state.append(global_u()) def step_sync_global(): global time, g, positions, E time += 1 states = [] m = [] """ef for energy function""" ef = [] g_plus = g.copy() for i in g.nodes(): for j in g.neighbors(i): m.append( g.edge[i][j]['w'] * g.node[j]['s'] ) e = sum(m) # print i, e if e >= 1: g_plus.node[i]['s'] = 1 else: g_plus.node[i]['s'] = -1 g = g_plus.copy() for i, j in g.edges(): if g.node[i]['s'] == 1 and g.node[j]['s'] == 1: ef.append( g.edge[i][j]['w'] ) E = sum(ef) for i in g.node: states.append(g.node[i]['s']) if len(states) == 4: print states time_list.append(time) energy_state.append(E) import pycxsimulator #init() init_watts() #init_erdos() #init_barabasi() positions = nx.spring_layout(g) pycxsimulator.GUI().start(func = [init_watts, draw, step]) plt.cla() plt.plot(time_list, energy_state, 'bs-') plt.xlabel('Time') plt.ylabel('Energy states') #plt.ylim(-100, 100) #plt.yticks(range(-10, 13, 2)) plt.savefig('hh_plot.png') #plt.show()
gpl-3.0
DanielAndreasen/SWEET-Cat
checkDuplicates.py
1
4561
import pandas as pd import numpy as np import warnings from clint.textui import colored warnings.simplefilter("ignore") class Sweetcat: """Load SWEET-Cat database""" def __init__(self): # self.fname_sc = 'WEBSITE_online_EU-NASA_full_database.rdb' self.fname_sc = 'WEBSITE_online_EU-NASA_full_database_clean.rdb' # Loading the SweetCat database self.readSC() def readSC(self): # TODO: Use the ra and dec, and match with coordinates instead of name # stored in self.coordinates. # Read the current version of SWEET-Cat names_ = ['name', 'hd', 'ra', 'dec', 'V', 'Verr', 'p', 'perr', 'pflag', 'Teff', 'Tefferr', 'logg', 'logger', 'n1', 'n2', 'vt', 'vterr', 'feh', 'feherr', 'M', 'Merr', 'author', 'link', 'source', 'update', 'comment', 'database', 'n3'] # SC = pd.read_csv('WEBSITE_online.rdb', delimiter='\t', names=names_) SC = pd.read_csv(self.fname_sc, delimiter='\t', names=names_) # Clean star names self.sc_names = [x.lower().replace(' ', '').replace('-', '') for x in SC.name] self.sc_names = list(map(str.strip, self.sc_names)) # Original star names self.sc_names_orig = [x.strip() for x in SC.name] # Coordinates of the stars in SWEET-Cat self.coordinates = SC.loc[:, ['ra', 'dec']] # SWEET-Cat (used to automatically update the database label) self.SC = SC if __name__ == '__main__': # Loading SWEET Cat sc = Sweetcat() # Check for duplicates, subset of columns can be changed print('\nChecking for possible duplicates ...') print(colored.green('Same RA/DEC')) print(sc.SC[sc.SC.duplicated(['ra', 'dec'], keep=False)][['name', 'hd', 'ra', 'dec']]) print(colored.green('\nSame HD number')) print(sc.SC[sc.SC.duplicated(['hd'], keep=False)].dropna(subset=['hd'])[['name', 'hd', 'ra', 'dec']]) print(colored.green('\nApproximate RA/DEC ...')) # Remove the characters after the . in the coordinates ra_sc = sc.SC['ra'].values.tolist() ra_approx = list(map(lambda i: i[:i.find('.')], ra_sc)) dec_sc = sc.SC['dec'].values.tolist() dec_approx = list(map(lambda i: i[:i.find('.')], dec_sc)) # Check for similar RA/DEC idx_duplicate = [] for idx, (ra, dec) in enumerate(zip(ra_approx, dec_approx)): dupli = list(np.where((np.array(ra_approx) == ra) & (np.array(dec_approx) == dec))[0]) if len(dupli) > 1: idx_duplicate.append(dupli) # Print possible duplicates print(colored.green('RA/DEC are similar: possible duplicates\n')) unique_duplicate = set([tuple(t) for t in idx_duplicate]) for idx in unique_duplicate: print(sc.SC.iloc[list(idx)][['name', 'hd', 'ra', 'dec']]) # Remove the -1.0 in microturbulence and its error sc.SC[sc.SC['vt'] < 0.0][['name', 'hd', 'ra', 'dec', 'vt', 'vterr', 'author', 'link']] # Change the value of a given cell # sc.SC.at[9, 'vt'] = 1.44 # sc.SC.at[9, 'vterr'] = np.nan # Uncomment some of the following lines to remove duplicates # Indexes of the duplicates # indexes = sc.SC[sc.SC.duplicated(['ra', 'dec'], keep=False)].index # Remove a row (HD21749) # new_sc = sc.SC.drop([2728]) # # Write the new file # # Convert Tefferr column to integers # new_sc['Tefferr'] = new_sc['Tefferr'].fillna('-111111') # new_sc['Tefferr'] = new_sc['Tefferr'].astype(int).replace(-111111, 'NULL') # # Replace NaN by NULL # new_sc.fillna(value='NULL', inplace=True) # new_sc.to_csv('WEBSITE_online_EU-NASA_full_database_clean_09-03-2020.rdb', # sep='\t', index=False, header=False) # # Select only the EU data # sc_EU = new_sc[new_sc['database'].str.contains('EU')] # # Drop the database column # sc_like_old = sc_EU.drop(columns=['database']) # sc_like_old.to_csv('WEBSITE_online_EU-updated_09-03-2020.rdb', # sep='\t', index=False, header=False)
mit
vivekmishra1991/scikit-learn
examples/ensemble/plot_partial_dependence.py
249
4456
""" ======================== Partial Dependence Plots ======================== Partial dependence plots show the dependence between the target function [1]_ and a set of 'target' features, marginalizing over the values of all other features (the complement features). Due to the limits of human perception the size of the target feature set must be small (usually, one or two) thus the target features are usually chosen among the most important features (see :attr:`~sklearn.ensemble.GradientBoostingRegressor.feature_importances_`). This example shows how to obtain partial dependence plots from a :class:`~sklearn.ensemble.GradientBoostingRegressor` trained on the California housing dataset. The example is taken from [HTF2009]_. The plot shows four one-way and one two-way partial dependence plots. The target variables for the one-way PDP are: median income (`MedInc`), avg. occupants per household (`AvgOccup`), median house age (`HouseAge`), and avg. rooms per household (`AveRooms`). We can clearly see that the median house price shows a linear relationship with the median income (top left) and that the house price drops when the avg. occupants per household increases (top middle). The top right plot shows that the house age in a district does not have a strong influence on the (median) house price; so does the average rooms per household. The tick marks on the x-axis represent the deciles of the feature values in the training data. Partial dependence plots with two target features enable us to visualize interactions among them. The two-way partial dependence plot shows the dependence of median house price on joint values of house age and avg. occupants per household. We can clearly see an interaction between the two features: For an avg. occupancy greater than two, the house price is nearly independent of the house age, whereas for values less than two there is a strong dependence on age. .. [HTF2009] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical Learning Ed. 2", Springer, 2009. .. [1] For classification you can think of it as the regression score before the link function. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D from sklearn.cross_validation import train_test_split from sklearn.ensemble import GradientBoostingRegressor from sklearn.ensemble.partial_dependence import plot_partial_dependence from sklearn.ensemble.partial_dependence import partial_dependence from sklearn.datasets.california_housing import fetch_california_housing # fetch California housing dataset cal_housing = fetch_california_housing() # split 80/20 train-test X_train, X_test, y_train, y_test = train_test_split(cal_housing.data, cal_housing.target, test_size=0.2, random_state=1) names = cal_housing.feature_names print('_' * 80) print("Training GBRT...") clf = GradientBoostingRegressor(n_estimators=100, max_depth=4, learning_rate=0.1, loss='huber', random_state=1) clf.fit(X_train, y_train) print("done.") print('_' * 80) print('Convenience plot with ``partial_dependence_plots``') print features = [0, 5, 1, 2, (5, 1)] fig, axs = plot_partial_dependence(clf, X_train, features, feature_names=names, n_jobs=3, grid_resolution=50) fig.suptitle('Partial dependence of house value on nonlocation features\n' 'for the California housing dataset') plt.subplots_adjust(top=0.9) # tight_layout causes overlap with suptitle print('_' * 80) print('Custom 3d plot via ``partial_dependence``') print fig = plt.figure() target_feature = (1, 5) pdp, (x_axis, y_axis) = partial_dependence(clf, target_feature, X=X_train, grid_resolution=50) XX, YY = np.meshgrid(x_axis, y_axis) Z = pdp.T.reshape(XX.shape).T ax = Axes3D(fig) surf = ax.plot_surface(XX, YY, Z, rstride=1, cstride=1, cmap=plt.cm.BuPu) ax.set_xlabel(names[target_feature[0]]) ax.set_ylabel(names[target_feature[1]]) ax.set_zlabel('Partial dependence') # pretty init view ax.view_init(elev=22, azim=122) plt.colorbar(surf) plt.suptitle('Partial dependence of house value on median age and ' 'average occupancy') plt.subplots_adjust(top=0.9) plt.show()
bsd-3-clause
tedmeeds/tcga_encoder
tcga_encoder/models/dna/models.py
1
15323
from tcga_encoder.utils.helpers import * from scipy import special #import numpy as np import pdb from sklearn.neighbors import KernelDensity from sklearn import linear_model def logistic_sigmoid( activations ): return 1.0 / (1.0 + np.exp(-activations) ) class NaiveBayesClassifier(object): def fit( self, X, y ): raise NotImplementedError def predict( self, X ): raise NotImplementedError class LogisticRegressionModel( object ): def get_weights(self): return self.w def fit( self, X, y, one_hot_groups = None ): if one_hot_groups is None: return self.fit_normal( X, y ) else: return self.fit_grouped( X, y, one_hot_groups ) def fit_normal( self, X, y ): self.class_1 = pp.find( y==1 ) self.class_0 = pp.find( y==0 ) self.mu_1_0 = X[self.class_1,:].mean(0) self.mu_0_0 = X[self.class_0,:].mean(0) self.var_1_0 = X[self.class_0,:].var(0) self.var_0_0 = X[self.class_0,:].var(0) + 1.0 X_normed = X - self.mu_0_0 X_normed /= self.var_0_0 self.model = linear_model.LogisticRegression( penalty='l1', C = 0.5 ) self.model.fit( X_normed, y ) self.pi_1 = len( self.class_1 ) / float(len(y)) self.pi_0 = 1.0 - self.pi_1 self.w = self.model.coef_ self.common_b = self.model.intercept_ def fit_grouped( self, X, y, groups ): assert False, "not implemented" self.class_1 = pp.find( y==1 ) self.class_0 = pp.find( y==0 ) self.pi_1 = len( self.class_1 ) / float(len(y)) self.pi_0 = 1.0 - self.pi_1 self.mu_1_0 = X[self.class_1,:].mean(0) self.mu_0_0 = X[self.class_0,:].mean(0) self.var_1_0 = X[self.class_0,:].var(0) self.var_0_0 = X[self.class_0,:].var(0) D = len(self.mu_1_0) N,K = groups.shape pi_1 = self.pi_1 self.pi_1 = pi_1*np.ones(K) self.mu_1 = self.mu_1_0*np.ones((K,D)) self.mu_0 = self.mu_0_0*np.ones((K,D)) self.var_1 = self.var_1_0*np.ones((K,D)) self.var_0 = self.var_0_0*np.ones((K,D)) for k in range(K): ik = pp.find(groups[:,k]==1) class_1 = pp.find( y[ik]==1 ) class_0 = pp.find( y[ik]==0 ) if len(class_1) > 0: self.pi_1[ k ] = len( class_1 ) / float(len(ik)) self.mu_1[ k,: ] = X[class_1,:].mean(0) self.mu_0[ k,: ] = X[class_0,:].mean(0) self.var_1[ k,: ] = 0.9*X[class_0,:].var(0) + 0.1*self.var_1_0.reshape((1,D)) self.var_0[ k,: ] = 0.9*X[class_0,:].var(0) + 0.1*self.var_0_0.reshape((1,D)) self.w = self.mu_1/self.var_1 - self.mu_0/self.var_0 self.b_vec = self.mu_0*self.mu_0/(2*self.var_0) - self.mu_1*self.mu_1/(2*self.var_1) + 0.5*np.log(self.var_0)- 0.5*np.log(self.var_1) self.b_x_factor = 1.0/(2*self.var_0) - 1.0/(2*self.var_1) self.common_b = np.log(self.pi_1) - np.log(self.pi_0) self.b = np.sum( self.b_vec ) def predict( self, X, elementwise = False, one_hot_groups = None ): if one_hot_groups is None: return self.predict_normal( X, elementwise=elementwise) else: return self.predict_grouped( X, one_hot_groups, elementwise=elementwise ) def predict_normal( self, X, elementwise = False ): N,D = X.shape common_b = self.common_b X_normed = X - self.mu_0_0 X_normed /= self.var_0_0 if elementwise is True: activations = X_normed*self.w + self.common_b predictions = logistic_sigmoid( activations ) else: predictions = self.model.predict_proba( X_normed )[:,1] return predictions def predict_grouped( self, X, groups, elementwise = False ): N,D = X.shape common_b = np.dot( groups, self.common_b ) #.reshape( (N,1)) w = np.dot( groups, self.w ) b = np.dot( groups, self.b_vec ) b_x_factor = np.dot( groups, self.b_x_factor ) if elementwise is True: #pdb.set_trace() activations = X*w + np.square(X)*b_x_factor + b + common_b.reshape((N,1)) else: #z = np.hstack( (Z_alpha,Z_beta) ) #pdb.set_trace() activations = np.sum(X*w,1) + np.sum(np.square(X)*b_x_factor,1) + b.sum(1) + common_b predictions = logistic_sigmoid( activations ) if np.any( np.isnan( predictions) ) or np.any( np.isinf( predictions) ): pdb.set_trace() return predictions class BetaNaiveBayesModel( object ): def get_weights(self): return np.vstack( (self.w_alpha,self.w_beta)).T def fit( self, X, y ): self.class_1 = pp.find( y==1 ) self.class_0 = pp.find( y==0 ) self.pi_1 = len( self.class_1 ) / float(len(y)) self.pi_0 = 1.0 - self.pi_1 self.mu_1 = X[self.class_1,:].mean(0) self.mu_0 = X[self.class_0,:].mean(0) self.var_1 = X[self.class_1,:].var(0) self.var_0 = X[self.class_0,:].var(0) self.alpha_1 = self.mu_1*( self.mu_1*(1-self.mu_1)/self.var_1 - 1.0 ) self.alpha_0 = self.mu_0*( self.mu_0*(1-self.mu_0)/self.var_0 - 1.0 ) self.beta_1 = (1.0-self.mu_1)*( self.mu_1*(1-self.mu_1)/self.var_1 - 1.0 ) self.beta_0 = (1.0-self.mu_0)*( self.mu_0*(1-self.mu_0)/self.var_0 - 1.0 ) # w is D by 2 self.w_alpha = self.alpha_1 - self.alpha_0 self.w_beta = self.beta_1 - self.beta_0 self.b_vec = - special.gammaln( self.alpha_0+self.beta_0 ) \ + special.gammaln( self.alpha_1+self.beta_1 ) \ + special.gammaln( self.alpha_0 ) + special.gammaln( self.beta_0 ) \ - special.gammaln( self.alpha_1 ) - special.gammaln( self.beta_1 ) self.common_b = np.log(self.pi_1) - np.log(self.pi_0) self.w = np.hstack( (self.w_alpha, self.w_beta )).T self.b = np.sum( self.b_vec ) def predict( self, X, elementwise = False ): logX = np.log( np.maximum( X, 1e-12 ) ) logXcomp = np.log( np.maximum( 1.0-X, 1e-12 ) ) N,D = X.shape Z_alpha = logX Z_beta = logXcomp if elementwise is True: activations_alpha = Z_alpha*self.w_alpha activations_beta = Z_beta*self.w_beta activations = activations_alpha+activations_beta + self.b_vec + self.common_b else: z = np.hstack( (Z_alpha,Z_beta) ) activations = np.dot( z, self.w ) + self.b + self.common_b predictions = logistic_sigmoid( activations ) return predictions class PoissonNaiveBayesModel( object ): def get_weights(self): return self.w def fit( self, X, y ): self.class_1 = pp.find( y==1 ) self.class_0 = pp.find( y==0 ) self.pi_1 = len( self.class_1 ) / float(len(y)) self.pi_0 = 1.0 - self.pi_1 self.mu_1 = X[self.class_1,:].mean(0) self.mu_0 = X[self.class_0,:].mean(0) self.var_1 = X[self.class_1,:].var(0) self.var_0 = X[self.class_0,:].var(0) self.rate_1 = self.mu_1 self.rate_0 = self.mu_0 self.w = np.log(self.rate_1) - np.log(self.rate_0) self.b_vec = self.rate_0 - self.rate_1 self.common_b = np.log(self.pi_1) - np.log(self.pi_0) self.b = np.sum( self.b_vec ) def predict( self, X, elementwise = False ): N,D = X.shape if elementwise is True: activations = X*self.w + self.b_vec + self.common_b else: activations = np.dot( X, self.w ) + self.b + self.common_b predictions = logistic_sigmoid( activations ) return predictions class GaussianNaiveBayesModel( object ): def get_weights(self): return self.w def fit( self, X, y, one_hot_groups = None ): if one_hot_groups is None: return self.fit_normal( X, y ) else: return self.fit_grouped( X, y, one_hot_groups ) def fit_normal( self, X, y ): self.class_1 = pp.find( y==1 ) self.class_0 = pp.find( y==0 ) self.pi_1 = len( self.class_1 ) / float(len(y)) self.pi_0 = 1.0 - self.pi_1 self.mu_1 = X[self.class_1,:].mean(0) self.mu_0 = X[self.class_0,:].mean(0) self.var_1 = X[self.class_1,:].var(0) self.var_0 = X[self.class_0,:].var(0) self.var_0 = 0.5*X.var(0) self.var_1 = self.var_0 self.w = self.mu_1/self.var_1 - self.mu_0/self.var_0 self.b_vec = self.mu_0*self.mu_0/(2*self.var_0) - self.mu_1*self.mu_1/(2*self.var_1) + 0.5*np.log(self.var_0)- 0.5*np.log(self.var_1) self.b_x_factor = 1.0/(2*self.var_0) - 1.0/(2*self.var_1) self.common_b = np.log(self.pi_1) - np.log(self.pi_0) self.b = np.sum( self.b_vec ) def fit_grouped( self, X, y, groups ): self.class_1 = pp.find( y==1 ) self.class_0 = pp.find( y==0 ) self.pi_1 = len( self.class_1 ) / float(len(y)) self.pi_0 = 1.0 - self.pi_1 self.mu_1_0 = X[self.class_1,:].mean(0) self.mu_0_0 = X[self.class_0,:].mean(0) self.var_1_0 = X[self.class_0,:].var(0) self.var_0_0 = X[self.class_0,:].var(0) D = len(self.mu_1_0) N,K = groups.shape pi_1 = self.pi_1 self.pi_1 = pi_1*np.ones(K) self.mu_1 = self.mu_1_0*np.ones((K,D)) self.mu_0 = self.mu_0_0*np.ones((K,D)) self.var_1 = self.var_1_0*np.ones((K,D)) self.var_0 = self.var_0_0*np.ones((K,D)) for k in range(K): ik = pp.find(groups[:,k]==1) class_1 = pp.find( y[ik]==1 ) class_0 = pp.find( y[ik]==0 ) if len(class_1) > 0: self.pi_1[ k ] = len( class_1 ) / float(len(ik)) self.mu_1[ k,: ] = X[class_1,:].mean(0) self.mu_0[ k,: ] = X[class_0,:].mean(0) self.var_1[ k,: ] = 0.9*X[class_0,:].var(0) + 0.1*self.var_1_0.reshape((1,D)) self.var_0[ k,: ] = 0.9*X[class_0,:].var(0) + 0.1*self.var_0_0.reshape((1,D)) self.w = self.mu_1/self.var_1 - self.mu_0/self.var_0 self.b_vec = self.mu_0*self.mu_0/(2*self.var_0) - self.mu_1*self.mu_1/(2*self.var_1) + 0.5*np.log(self.var_0)- 0.5*np.log(self.var_1) self.b_x_factor = 1.0/(2*self.var_0) - 1.0/(2*self.var_1) self.common_b = np.log(self.pi_1) - np.log(self.pi_0) self.b = np.sum( self.b_vec ) def predict( self, X, elementwise = False, one_hot_groups = None ): if one_hot_groups is None: return self.predict_normal( X, elementwise=elementwise) else: return self.predict_grouped( X, one_hot_groups, elementwise=elementwise ) def predict_normal( self, X, elementwise = False ): N,D = X.shape common_b = self.common_b if elementwise is True: activations = X*self.w + np.square(X)*self.b_x_factor + self.b_vec + common_b else: #z = np.hstack( (Z_alpha,Z_beta) ) #pdb.set_trace() activations = np.dot( X, self.w.T ) + np.dot(np.square(X),self.b_x_factor.T) + self.b + common_b predictions = logistic_sigmoid( activations ) return predictions def predict_grouped( self, X, groups, elementwise = False ): N,D = X.shape common_b = np.dot( groups, self.common_b ) #.reshape( (N,1)) w = np.dot( groups, self.w ) b = np.dot( groups, self.b_vec ) b_x_factor = np.dot( groups, self.b_x_factor ) if elementwise is True: #pdb.set_trace() activations = X*w + np.square(X)*b_x_factor + b + common_b.reshape((N,1)) else: #z = np.hstack( (Z_alpha,Z_beta) ) #pdb.set_trace() activations = np.sum(X*w,1) + np.sum(np.square(X)*b_x_factor,1) + b.sum(1) + common_b predictions = logistic_sigmoid( activations ) if np.any( np.isnan( predictions) ) or np.any( np.isinf( predictions) ): pdb.set_trace() return predictions class KernelDensityNaiveBayesModel( object ): def fit( self, X, y, one_hot_groups = None ): if one_hot_groups is None: return self.fit_normal( X, y ) else: return self.fit_grouped( X, y, one_hot_groups ) def fit_normal( self, X, y ): self.class_1 = pp.find( y==1 ) self.class_0 = pp.find( y==0 ) self.pi_1 = len( self.class_1 ) / float(len(y)) self.pi_0 = 1.0 - self.pi_1 N,D = X.shape self.bandwidths = 5*np.std(X,0)*pow(4.0/3.0/N, 1.0/5.0) self.models0 = [] self.models1 = [] for d in xrange(D): self.models0.append( KernelDensity( kernel='gaussian', bandwidth = self.bandwidths[d] ) ) self.models0[-1].fit( X[self.class_0,:][:,d][:, np.newaxis] ) self.models1.append( KernelDensity( kernel='gaussian', bandwidth = self.bandwidths[d] ) ) self.models1[-1].fit( X[self.class_1,:][:,d][:, np.newaxis] ) #pdb.set_trace() # self.w = self.mu_1/self.var_1 - self.mu_0/self.var_0 # self.b_vec = self.mu_0*self.mu_0/(2*self.var_0) - self.mu_1*self.mu_1/(2*self.var_1) + 0.5*np.log(self.var_0)- 0.5*np.log(self.var_1) # # self.b_x_factor = 1.0/(2*self.var_0) - 1.0/(2*self.var_1) # self.common_b = np.log(self.pi_1) - np.log(self.pi_0) # self.b = np.sum( self.b_vec ) def predict( self, X, elementwise = False, one_hot_groups = None ): if one_hot_groups is None: return self.predict_normal( X, elementwise=elementwise) else: return self.predict_grouped( X, one_hot_groups, elementwise=elementwise ) def predict_normal( self, X, elementwise = False ): N,D = X.shape common_b = self.common_b log_prob_0 = self.log_prob( self.models0, X ) log_prob_1 = self.log_prob( self.models1, X ) if elementwise is True: activations = log_prob_1 - log_prob_0 + common_b else: activations = np.sum(log_prob_1 - log_prob_0, 1 ) + common_b predictions = logistic_sigmoid( activations ) return predictions def log_prob( self, models, X ): N,D = X.shape logprob = np.zeros( (N,D) ) for model, d in zip( models, xrange(D) ): logprob[:,d] = model.score_samples( X[:,d][:, np.newaxis] ) return logprob class NegBinNaiveBayesModel( object ): def get_weights(self): return self.w def fit( self, X, y ): self.class_1 = pp.find( y==1 ) self.class_0 = pp.find( y==0 ) self.pi_1 = len( self.class_1 ) / float(len(y)) self.pi_0 = 1.0 - self.pi_1 self.mu_1 = X[self.class_1,:].mean(0) self.mu_0 = X[self.class_0,:].mean(0) self.var_1 = X[self.class_1,:].var(0) self.var_0 = X[self.class_0,:].var(0) self.alpha_1 = np.square(self.mu_1)/np.maximum(0.1,self.var_1 - self.mu_1) self.alpha_0 = np.square(self.mu_0)/np.maximum(0.1,self.var_0 - self.mu_0) self.beta_1 = self.mu_1/np.maximum(0.1,self.var_1 - self.mu_1) self.beta_0 = self.mu_0/np.maximum(0.1,self.var_0 - self.mu_0) self.w = np.log(self.beta_1) - np.log(self.beta_0) self.b_vec = - special.gammaln( self.alpha_1 ) \ + special.gammaln( self.alpha_0 ) \ - self.alpha_0*np.log(1.0+self.beta_0) + self.alpha_1*np.log(1.0+self.beta_1) self.common_b = np.log(self.pi_1) - np.log(self.pi_0) self.b = np.sum( self.b_vec ) def predict( self, X, elementwise = False ): N,D = X.shape b_x = special.gammaln( self.alpha_0 + X ) + special.gammaln( self.alpha_1 + X ) if elementwise is True: activations = X*self.w + b_x + self.b_vec + self.common_b else: activations = np.dot( X, self.w ) + b_x.sum() + self.b + self.common_b predictions = logistic_sigmoid( activations ) return predictions
mit
snap-stanford/ogb
setup.py
1
1636
from setuptools import setup, find_packages from os import path import sys from io import open here = path.abspath(path.dirname(__file__)) sys.path.insert(0, path.join(here, 'ogb')) from version import __version__ print('version') print(__version__) # Get the long description from the README file with open(path.join(here, 'README.md'), encoding='utf-8') as f: long_description = f.read() package_data_list = ['ogb/graphproppred/master.csv', 'ogb/nodeproppred/master.csv', 'ogb/linkproppred/master.csv'] setup(name='ogb', version=__version__, description='Open Graph Benchmark', url='https://github.com/snap-stanford/ogb', author='OGB Team', author_email='[email protected]', keywords=['pytorch', 'graph machine learning', 'graph representation learning', 'graph neural networks'], long_description=long_description, long_description_content_type='text/markdown', install_requires = [ 'torch>=1.6.0', 'numpy>=1.16.0', 'tqdm>=4.29.0', 'scikit-learn>=0.20.0', 'pandas>=0.24.0', 'six>=1.12.0', 'urllib3>=1.24.0', 'outdated>=0.2.0' ], license='MIT', packages=find_packages(exclude=['dataset', 'examples', 'docs']), package_data={'ogb': package_data_list}, include_package_data=True, classifiers=[ 'Topic :: Scientific/Engineering :: Artificial Intelligence', 'Intended Audience :: Science/Research', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'License :: OSI Approved :: MIT License', ], )
mit
kyleabeauchamp/HMCNotes
code/old/test_ramping.py
1
1793
import numpy as np import pandas as pd import simtk.openmm as mm from simtk import unit as u from openmmtools import hmc_integrators, testsystems steps_per_hmc = 12 collision_rate = 1.0 / u.picoseconds n_steps = 5000 temperature = 300. * u.kelvin #testsystem = testsystems.FlexibleWaterBox(box_edge=3.18 * u.nanometers) # Around 1060 molecules of water testsystem = testsystems.WaterBox(box_edge=3.18 * u.nanometers) # Around 1060 molecules of water system = testsystem.system integrator = mm.LangevinIntegrator(temperature, 1.0 / u.picoseconds, 0.25 * u.femtoseconds) context = mm.Context(testsystem.system, integrator) context.setPositions(testsystem.positions) context.setVelocitiesToTemperature(temperature) integrator.step(5000) positions = context.getState(getPositions=True).getPositions() def test_hmc(timestep, steps_per_hmc, alpha): timestep = timestep * u.femtoseconds integrator = hmc_integrators.RampedHMCIntegrator(temperature, steps_per_hmc, timestep, max_boost=alpha) context = mm.Context(system, integrator) context.setPositions(positions) context.setVelocitiesToTemperature(temperature) integrator.step(n_steps) return integrator.acceptance_rate timestep_list = np.linspace(2.05, 2.20, 5) alpha_list = np.linspace(0.0, 0.05, 5) data = [] for i, timestep in enumerate(timestep_list): for j, alpha in enumerate(alpha_list): print(i, j, timestep, alpha) acceptance = test_hmc(timestep, steps_per_hmc, alpha) data.append(dict(acceptance=acceptance, timestep=timestep, alpha=alpha, normalized=timestep * acceptance)) print(data[-1]) data = pd.DataFrame(data) acceptance = data.pivot("timestep", "alpha", "acceptance") normalized = data.pivot("timestep", "alpha", "normalized") acceptance normalized
gpl-2.0
icdishb/scikit-learn
sklearn/semi_supervised/label_propagation.py
24
15181
# coding=utf8 """ Label propagation in the context of this module refers to a set of semisupervised classification algorithms. In the high level, these algorithms work by forming a fully-connected graph between all points given and solving for the steady-state distribution of labels at each point. These algorithms perform very well in practice. The cost of running can be very expensive, at approximately O(N^3) where N is the number of (labeled and unlabeled) points. The theory (why they perform so well) is motivated by intuitions from random walk algorithms and geometric relationships in the data. For more information see the references below. Model Features -------------- Label clamping: The algorithm tries to learn distributions of labels over the dataset. In the "Hard Clamp" mode, the true ground labels are never allowed to change. They are clamped into position. In the "Soft Clamp" mode, they are allowed some wiggle room, but some alpha of their original value will always be retained. Hard clamp is the same as soft clamping with alpha set to 1. Kernel: A function which projects a vector into some higher dimensional space. This implementation supprots RBF and KNN kernels. Using the RBF kernel generates a dense matrix of size O(N^2). KNN kernel will generate a sparse matrix of size O(k*N) which will run much faster. See the documentation for SVMs for more info on kernels. Examples -------- >>> from sklearn import datasets >>> from sklearn.semi_supervised import LabelPropagation >>> label_prop_model = LabelPropagation() >>> iris = datasets.load_iris() >>> random_unlabeled_points = np.where(np.random.random_integers(0, 1, ... size=len(iris.target))) >>> labels = np.copy(iris.target) >>> labels[random_unlabeled_points] = -1 >>> label_prop_model.fit(iris.data, labels) ... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS LabelPropagation(...) Notes ----- References: [1] Yoshua Bengio, Olivier Delalleau, Nicolas Le Roux. In Semi-Supervised Learning (2006), pp. 193-216 [2] Olivier Delalleau, Yoshua Bengio, Nicolas Le Roux. Efficient Non-Parametric Function Induction in Semi-Supervised Learning. AISTAT 2005 """ # Authors: Clay Woolam <[email protected]> # Licence: BSD from abc import ABCMeta, abstractmethod from scipy import sparse import numpy as np from ..base import BaseEstimator, ClassifierMixin from ..metrics.pairwise import rbf_kernel from ..utils.graph import graph_laplacian from ..utils.extmath import safe_sparse_dot from ..utils.validation import check_X_y, check_is_fitted from ..externals import six from ..neighbors.unsupervised import NearestNeighbors ### Helper functions def _not_converged(y_truth, y_prediction, tol=1e-3): """basic convergence check""" return np.abs(y_truth - y_prediction).sum() > tol class BaseLabelPropagation(six.with_metaclass(ABCMeta, BaseEstimator, ClassifierMixin)): """Base class for label propagation module. Parameters ---------- kernel : {'knn', 'rbf'} String identifier for kernel function to use. Only 'rbf' and 'knn' kernels are currently supported.. gamma : float Parameter for rbf kernel alpha : float Clamping factor max_iter : float Change maximum number of iterations allowed tol : float Convergence tolerance: threshold to consider the system at steady state n_neighbors : integer > 0 Parameter for knn kernel """ def __init__(self, kernel='rbf', gamma=20, n_neighbors=7, alpha=1, max_iter=30, tol=1e-3): self.max_iter = max_iter self.tol = tol # kernel parameters self.kernel = kernel self.gamma = gamma self.n_neighbors = n_neighbors # clamping factor self.alpha = alpha def _get_kernel(self, X, y=None): if self.kernel == "rbf": if y is None: return rbf_kernel(X, X, gamma=self.gamma) else: return rbf_kernel(X, y, gamma=self.gamma) elif self.kernel == "knn": if self.nn_fit is None: self.nn_fit = NearestNeighbors(self.n_neighbors).fit(X) if y is None: return self.nn_fit.kneighbors_graph(self.nn_fit._fit_X, self.n_neighbors, mode='connectivity') else: return self.nn_fit.kneighbors(y, return_distance=False) else: raise ValueError("%s is not a valid kernel. Only rbf and knn" " are supported at this time" % self.kernel) @abstractmethod def _build_graph(self): raise NotImplementedError("Graph construction must be implemented" " to fit a label propagation model.") def predict(self, X): """Performs inductive inference across the model. Parameters ---------- X : array_like, shape = [n_samples, n_features] Returns ------- y : array_like, shape = [n_samples] Predictions for input data """ probas = self.predict_proba(X) return self.classes_[np.argmax(probas, axis=1)].ravel() def predict_proba(self, X): """Predict probability for each possible outcome. Compute the probability estimates for each single sample in X and each possible outcome seen during training (categorical distribution). Parameters ---------- X : array_like, shape = [n_samples, n_features] Returns ------- probabilities : array, shape = [n_samples, n_classes] Normalized probability distributions across class labels """ check_is_fitted(self, 'X_') if sparse.isspmatrix(X): X_2d = X else: X_2d = np.atleast_2d(X) weight_matrices = self._get_kernel(self.X_, X_2d) if self.kernel == 'knn': probabilities = [] for weight_matrix in weight_matrices: ine = np.sum(self.label_distributions_[weight_matrix], axis=0) probabilities.append(ine) probabilities = np.array(probabilities) else: weight_matrices = weight_matrices.T probabilities = np.dot(weight_matrices, self.label_distributions_) normalizer = np.atleast_2d(np.sum(probabilities, axis=1)).T probabilities /= normalizer return probabilities def fit(self, X, y): """Fit a semi-supervised label propagation model based All the input data is provided matrix X (labeled and unlabeled) and corresponding label matrix y with a dedicated marker value for unlabeled samples. Parameters ---------- X : array-like, shape = [n_samples, n_features] A {n_samples by n_samples} size matrix will be created from this y : array_like, shape = [n_samples] n_labeled_samples (unlabeled points are marked as -1) All unlabeled samples will be transductively assigned labels Returns ------- self : returns an instance of self. """ X, y = check_X_y(X, y) self.X_ = X # actual graph construction (implementations should override this) graph_matrix = self._build_graph() # label construction # construct a categorical distribution for classification only classes = np.unique(y) classes = (classes[classes != -1]) self.classes_ = classes n_samples, n_classes = len(y), len(classes) y = np.asarray(y) unlabeled = y == -1 clamp_weights = np.ones((n_samples, 1)) clamp_weights[unlabeled, 0] = self.alpha # initialize distributions self.label_distributions_ = np.zeros((n_samples, n_classes)) for label in classes: self.label_distributions_[y == label, classes == label] = 1 y_static = np.copy(self.label_distributions_) if self.alpha > 0.: y_static *= 1 - self.alpha y_static[unlabeled] = 0 l_previous = np.zeros((self.X_.shape[0], n_classes)) remaining_iter = self.max_iter if sparse.isspmatrix(graph_matrix): graph_matrix = graph_matrix.tocsr() while (_not_converged(self.label_distributions_, l_previous, self.tol) and remaining_iter > 1): l_previous = self.label_distributions_ self.label_distributions_ = safe_sparse_dot( graph_matrix, self.label_distributions_) # clamp self.label_distributions_ = np.multiply( clamp_weights, self.label_distributions_) + y_static remaining_iter -= 1 normalizer = np.sum(self.label_distributions_, axis=1)[:, np.newaxis] self.label_distributions_ /= normalizer # set the transduction item transduction = self.classes_[np.argmax(self.label_distributions_, axis=1)] self.transduction_ = transduction.ravel() self.n_iter_ = self.max_iter - remaining_iter return self class LabelPropagation(BaseLabelPropagation): """Label Propagation classifier Parameters ---------- kernel : {'knn', 'rbf'} String identifier for kernel function to use. Only 'rbf' and 'knn' kernels are currently supported.. gamma : float Parameter for rbf kernel n_neighbors : integer > 0 Parameter for knn kernel alpha : float Clamping factor max_iter : float Change maximum number of iterations allowed tol : float Convergence tolerance: threshold to consider the system at steady state Attributes ---------- X_ : array, shape = [n_samples, n_features] Input array. classes_ : array, shape = [n_classes] The distinct labels used in classifying instances. label_distributions_ : array, shape = [n_samples, n_classes] Categorical distribution for each item. transduction_ : array, shape = [n_samples] Label assigned to each item via the transduction. n_iter_ : int Number of iterations run. Examples -------- >>> from sklearn import datasets >>> from sklearn.semi_supervised import LabelPropagation >>> label_prop_model = LabelPropagation() >>> iris = datasets.load_iris() >>> random_unlabeled_points = np.where(np.random.random_integers(0, 1, ... size=len(iris.target))) >>> labels = np.copy(iris.target) >>> labels[random_unlabeled_points] = -1 >>> label_prop_model.fit(iris.data, labels) ... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS LabelPropagation(...) References ---------- Xiaojin Zhu and Zoubin Ghahramani. Learning from labeled and unlabeled data with label propagation. Technical Report CMU-CALD-02-107, Carnegie Mellon University, 2002 http://pages.cs.wisc.edu/~jerryzhu/pub/CMU-CALD-02-107.pdf See Also -------- LabelSpreading : Alternate label propagation strategy more robust to noise """ def _build_graph(self): """Matrix representing a fully connected graph between each sample This basic implementation creates a non-stochastic affinity matrix, so class distributions will exceed 1 (normalization may be desired). """ if self.kernel == 'knn': self.nn_fit = None affinity_matrix = self._get_kernel(self.X_) normalizer = affinity_matrix.sum(axis=0) if sparse.isspmatrix(affinity_matrix): affinity_matrix.data /= np.diag(np.array(normalizer)) else: affinity_matrix /= normalizer[:, np.newaxis] return affinity_matrix class LabelSpreading(BaseLabelPropagation): """LabelSpreading model for semi-supervised learning This model is similar to the basic Label Propgation algorithm, but uses affinity matrix based on the normalized graph Laplacian and soft clamping across the labels. Parameters ---------- kernel : {'knn', 'rbf'} String identifier for kernel function to use. Only 'rbf' and 'knn' kernels are currently supported. gamma : float parameter for rbf kernel n_neighbors : integer > 0 parameter for knn kernel alpha : float clamping factor max_iter : float maximum number of iterations allowed tol : float Convergence tolerance: threshold to consider the system at steady state Attributes ---------- X_ : array, shape = [n_samples, n_features] Input array. classes_ : array, shape = [n_classes] The distinct labels used in classifying instances. label_distributions_ : array, shape = [n_samples, n_classes] Categorical distribution for each item. transduction_ : array, shape = [n_samples] Label assigned to each item via the transduction. n_iter_ : int Number of iterations run. Examples -------- >>> from sklearn import datasets >>> from sklearn.semi_supervised import LabelSpreading >>> label_prop_model = LabelSpreading() >>> iris = datasets.load_iris() >>> random_unlabeled_points = np.where(np.random.random_integers(0, 1, ... size=len(iris.target))) >>> labels = np.copy(iris.target) >>> labels[random_unlabeled_points] = -1 >>> label_prop_model.fit(iris.data, labels) ... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS LabelSpreading(...) References ---------- Dengyong Zhou, Olivier Bousquet, Thomas Navin Lal, Jason Weston, Bernhard Schoelkopf. Learning with local and global consistency (2004) http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.115.3219 See Also -------- LabelPropagation : Unregularized graph based semi-supervised learning """ def __init__(self, kernel='rbf', gamma=20, n_neighbors=7, alpha=0.2, max_iter=30, tol=1e-3): # this one has different base parameters super(LabelSpreading, self).__init__(kernel=kernel, gamma=gamma, n_neighbors=n_neighbors, alpha=alpha, max_iter=max_iter, tol=tol) def _build_graph(self): """Graph matrix for Label Spreading computes the graph laplacian""" # compute affinity matrix (or gram matrix) if self.kernel == 'knn': self.nn_fit = None n_samples = self.X_.shape[0] affinity_matrix = self._get_kernel(self.X_) laplacian = graph_laplacian(affinity_matrix, normed=True) laplacian = -laplacian if sparse.isspmatrix(laplacian): diag_mask = (laplacian.row == laplacian.col) laplacian.data[diag_mask] = 0.0 else: laplacian.flat[::n_samples + 1] = 0.0 # set diag to 0.0 return laplacian
bsd-3-clause
elijah513/scikit-learn
examples/ensemble/plot_ensemble_oob.py
259
3265
""" ============================= OOB Errors for Random Forests ============================= The ``RandomForestClassifier`` is trained using *bootstrap aggregation*, where each new tree is fit from a bootstrap sample of the training observations :math:`z_i = (x_i, y_i)`. The *out-of-bag* (OOB) error is the average error for each :math:`z_i` calculated using predictions from the trees that do not contain :math:`z_i` in their respective bootstrap sample. This allows the ``RandomForestClassifier`` to be fit and validated whilst being trained [1]. The example below demonstrates how the OOB error can be measured at the addition of each new tree during training. The resulting plot allows a practitioner to approximate a suitable value of ``n_estimators`` at which the error stabilizes. .. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical Learning Ed. 2", p592-593, Springer, 2009. """ import matplotlib.pyplot as plt from collections import OrderedDict from sklearn.datasets import make_classification from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier # Author: Kian Ho <[email protected]> # Gilles Louppe <[email protected]> # Andreas Mueller <[email protected]> # # License: BSD 3 Clause print(__doc__) RANDOM_STATE = 123 # Generate a binary classification dataset. X, y = make_classification(n_samples=500, n_features=25, n_clusters_per_class=1, n_informative=15, random_state=RANDOM_STATE) # NOTE: Setting the `warm_start` construction parameter to `True` disables # support for paralellised ensembles but is necessary for tracking the OOB # error trajectory during training. ensemble_clfs = [ ("RandomForestClassifier, max_features='sqrt'", RandomForestClassifier(warm_start=True, oob_score=True, max_features="sqrt", random_state=RANDOM_STATE)), ("RandomForestClassifier, max_features='log2'", RandomForestClassifier(warm_start=True, max_features='log2', oob_score=True, random_state=RANDOM_STATE)), ("RandomForestClassifier, max_features=None", RandomForestClassifier(warm_start=True, max_features=None, oob_score=True, random_state=RANDOM_STATE)) ] # Map a classifier name to a list of (<n_estimators>, <error rate>) pairs. error_rate = OrderedDict((label, []) for label, _ in ensemble_clfs) # Range of `n_estimators` values to explore. min_estimators = 15 max_estimators = 175 for label, clf in ensemble_clfs: for i in range(min_estimators, max_estimators + 1): clf.set_params(n_estimators=i) clf.fit(X, y) # Record the OOB error for each `n_estimators=i` setting. oob_error = 1 - clf.oob_score_ error_rate[label].append((i, oob_error)) # Generate the "OOB error rate" vs. "n_estimators" plot. for label, clf_err in error_rate.items(): xs, ys = zip(*clf_err) plt.plot(xs, ys, label=label) plt.xlim(min_estimators, max_estimators) plt.xlabel("n_estimators") plt.ylabel("OOB error rate") plt.legend(loc="upper right") plt.show()
bsd-3-clause
CorySimon/IAST
docs/conf.py
2
8615
# -*- coding: utf-8 -*- # # IAST documentation build configuration file, created by # sphinx-quickstart on Thu May 28 12:45:03 2015. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. from __future__ import absolute_import import sys, os # from mock import Mock as MagicMock # for local make html # from unittest.mock import MagicMock on_rtd = os.environ.get('READTHEDOCS', None) == 'True' # check if on rtd if on_rtd == False: import sphinx_rtd_theme # for local # class Mock(MagicMock): # @classmethod # def __getattr__(cls, name): # return Mock() # MOCK_MODULES = ['numpy', 'scipy', 'matplotlib', 'pandas', 'matplotlib.pyplot', 'scipy.interpolate', 'scipy.optimize'] # sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES) # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('.') + "/../pyiast/") # sys.path.insert(0, ".") # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.mathjax'] # extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.mathjax', 'sphinx.ext.mathbase'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'pyIAST' copyright = u'2015, Cory M. Simon' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '0.0.0' # The full version, including alpha/beta/rc tags. release = '0.0.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. if on_rtd: # for rtd html_theme = 'default' else: html_theme = 'sphinx_rtd_theme' html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. html_logo = "pyIAST_logo.png" # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. html_favicon = 'favicon.ico' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'IASTdoc' # -- Options for LaTeX output -------------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'IAST.tex', u'pyIAST Documentation', u'Cory M. Simon', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [('index', 'iast', u'IAST Documentation', [u'Cory M. Simon'], 1)] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------------ # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'IAST', u'IAST Documentation', u'Cory M. Simon', 'IAST', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote'
mit
Nodoka/Bioquality
graphing/rbs0910_hori_scatter.py
1
1397
#!/usr/local/bin/ipython -i """ A scatter graph of hori vs RBS scores """ import numpy as np import matplotlib.pyplot as plt # extract data from csv file_name = "../data/RBS0910_Horim.csv" # use columns # 0 = scode (1~39 = rbs09, 4001~4021 = rbs2010) # 16 = spcount # 18 = GHI40 for RBS # 31 = XMGHI40 scode = np.genfromtxt(file_name, delimiter=',', dtype=None, skip_header=1, usecols=0) rbs_score = np.genfromtxt(file_name, delimiter=',', dtype=None, skip_header=1, usecols=18) hori_score = np.genfromtxt(file_name, delimiter=',', dtype=None, skip_header=1, usecols=31) spcount = np.genfromtxt(file_name, delimiter=',', dtype=None, skip_header=1, usecols=16) # add y = x line lx = np.arange(0,460) ly = np.arange(0,460) # define colours colours = map(lambda plot_type: 'm' if plot_type < 40 else 'r', scode) # sizes are constant times the mean times one over the species number #sizes = 20 * sum(spcount) / (spcount * len(spcount)) sizes = spcount # plot scatter graph fig = plt.figure() fig.suptitle('Comparison of GHI between RBS samples and Horikawa maps', fontsize=20) ax = fig.add_subplot(111) ax.scatter(hori_score, rbs_score, s= sizes, color=colours, alpha=0.5) ax.plot(lx, ly, c='k', alpha=0.2) ax.set_aspect(1) ax.set_xlim([0, 460]) ax.set_ylim([0, 460]) ax.set_xlabel('Horikawa score', fontsize=16) ax.set_ylabel('RBS 2009 & 2010 score', fontsize=16) ax.grid(True) plt.show()
mit
eldar/pose-tensorflow
lib/coco/PythonAPI/pycocotools/coco.py
17
18296
__author__ = 'tylin' __version__ = '2.0' # Interface for accessing the Microsoft COCO dataset. # Microsoft COCO is a large image dataset designed for object detection, # segmentation, and caption generation. pycocotools is a Python API that # assists in loading, parsing and visualizing the annotations in COCO. # Please visit http://mscoco.org/ for more information on COCO, including # for the data, paper, and tutorials. The exact format of the annotations # is also described on the COCO website. For example usage of the pycocotools # please see pycocotools_demo.ipynb. In addition to this API, please download both # the COCO images and annotations in order to run the demo. # An alternative to using the API is to load the annotations directly # into Python dictionary # Using the API provides additional utility functions. Note that this API # supports both *instance* and *caption* annotations. In the case of # captions not all functions are defined (e.g. categories are undefined). # The following API functions are defined: # COCO - COCO api class that loads COCO annotation file and prepare data structures. # decodeMask - Decode binary mask M encoded via run-length encoding. # encodeMask - Encode binary mask M using run-length encoding. # getAnnIds - Get ann ids that satisfy given filter conditions. # getCatIds - Get cat ids that satisfy given filter conditions. # getImgIds - Get img ids that satisfy given filter conditions. # loadAnns - Load anns with the specified ids. # loadCats - Load cats with the specified ids. # loadImgs - Load imgs with the specified ids. # annToMask - Convert segmentation in an annotation to binary mask. # showAnns - Display the specified annotations. # loadRes - Load algorithm results and create API for accessing them. # download - Download COCO images from mscoco.org server. # Throughout the API "ann"=annotation, "cat"=category, and "img"=image. # Help on each functions can be accessed by: "help COCO>function". # See also COCO>decodeMask, # COCO>encodeMask, COCO>getAnnIds, COCO>getCatIds, # COCO>getImgIds, COCO>loadAnns, COCO>loadCats, # COCO>loadImgs, COCO>annToMask, COCO>showAnns # Microsoft COCO Toolbox. version 2.0 # Data, paper, and tutorials available at: http://mscoco.org/ # Code written by Piotr Dollar and Tsung-Yi Lin, 2014. # Licensed under the Simplified BSD License [see bsd.txt] import json import time import matplotlib.pyplot as plt from matplotlib.collections import PatchCollection from matplotlib.patches import Polygon import numpy as np import copy import itertools from . import mask as maskUtils import os from collections import defaultdict import sys PYTHON_VERSION = sys.version_info[0] if PYTHON_VERSION == 2: from urllib import urlretrieve elif PYTHON_VERSION == 3: from urllib.request import urlretrieve class COCO: def __init__(self, annotation_file=None): """ Constructor of Microsoft COCO helper class for reading and visualizing annotations. :param annotation_file (str): location of annotation file :param image_folder (str): location to the folder that hosts images. :return: """ # load dataset self.dataset,self.anns,self.cats,self.imgs = dict(),dict(),dict(),dict() self.imgToAnns, self.catToImgs = defaultdict(list), defaultdict(list) if not annotation_file == None: print('loading annotations into memory...') tic = time.time() dataset = json.load(open(annotation_file, 'r')) assert type(dataset)==dict, 'annotation file format {} not supported'.format(type(dataset)) print('Done (t={:0.2f}s)'.format(time.time()- tic)) self.dataset = dataset self.createIndex() def createIndex(self): # create index print('creating index...') anns, cats, imgs = {}, {}, {} imgToAnns,catToImgs = defaultdict(list),defaultdict(list) if 'annotations' in self.dataset: for ann in self.dataset['annotations']: imgToAnns[ann['image_id']].append(ann) anns[ann['id']] = ann if 'images' in self.dataset: for img in self.dataset['images']: imgs[img['id']] = img if 'categories' in self.dataset: for cat in self.dataset['categories']: cats[cat['id']] = cat if 'annotations' in self.dataset and 'categories' in self.dataset: for ann in self.dataset['annotations']: catToImgs[ann['category_id']].append(ann['image_id']) print('index created!') # create class members self.anns = anns self.imgToAnns = imgToAnns self.catToImgs = catToImgs self.imgs = imgs self.cats = cats def info(self): """ Print information about the annotation file. :return: """ for key, value in self.dataset['info'].items(): print('{}: {}'.format(key, value)) def getAnnIds(self, imgIds=[], catIds=[], areaRng=[], iscrowd=None): """ Get ann ids that satisfy given filter conditions. default skips that filter :param imgIds (int array) : get anns for given imgs catIds (int array) : get anns for given cats areaRng (float array) : get anns for given area range (e.g. [0 inf]) iscrowd (boolean) : get anns for given crowd label (False or True) :return: ids (int array) : integer array of ann ids """ imgIds = imgIds if type(imgIds) == list else [imgIds] catIds = catIds if type(catIds) == list else [catIds] if len(imgIds) == len(catIds) == len(areaRng) == 0: anns = self.dataset['annotations'] else: if not len(imgIds) == 0: lists = [self.imgToAnns[imgId] for imgId in imgIds if imgId in self.imgToAnns] anns = list(itertools.chain.from_iterable(lists)) else: anns = self.dataset['annotations'] anns = anns if len(catIds) == 0 else [ann for ann in anns if ann['category_id'] in catIds] anns = anns if len(areaRng) == 0 else [ann for ann in anns if ann['area'] > areaRng[0] and ann['area'] < areaRng[1]] if not iscrowd == None: ids = [ann['id'] for ann in anns if ann['iscrowd'] == iscrowd] else: ids = [ann['id'] for ann in anns] return ids def getCatIds(self, catNms=[], supNms=[], catIds=[]): """ filtering parameters. default skips that filter. :param catNms (str array) : get cats for given cat names :param supNms (str array) : get cats for given supercategory names :param catIds (int array) : get cats for given cat ids :return: ids (int array) : integer array of cat ids """ catNms = catNms if type(catNms) == list else [catNms] supNms = supNms if type(supNms) == list else [supNms] catIds = catIds if type(catIds) == list else [catIds] if len(catNms) == len(supNms) == len(catIds) == 0: cats = self.dataset['categories'] else: cats = self.dataset['categories'] cats = cats if len(catNms) == 0 else [cat for cat in cats if cat['name'] in catNms] cats = cats if len(supNms) == 0 else [cat for cat in cats if cat['supercategory'] in supNms] cats = cats if len(catIds) == 0 else [cat for cat in cats if cat['id'] in catIds] ids = [cat['id'] for cat in cats] return ids def getImgIds(self, imgIds=[], catIds=[]): ''' Get img ids that satisfy given filter conditions. :param imgIds (int array) : get imgs for given ids :param catIds (int array) : get imgs with all given cats :return: ids (int array) : integer array of img ids ''' imgIds = imgIds if type(imgIds) == list else [imgIds] catIds = catIds if type(catIds) == list else [catIds] if len(imgIds) == len(catIds) == 0: ids = self.imgs.keys() else: ids = set(imgIds) for i, catId in enumerate(catIds): if i == 0 and len(ids) == 0: ids = set(self.catToImgs[catId]) else: ids &= set(self.catToImgs[catId]) return list(ids) def loadAnns(self, ids=[]): """ Load anns with the specified ids. :param ids (int array) : integer ids specifying anns :return: anns (object array) : loaded ann objects """ if type(ids) == list: return [self.anns[id] for id in ids] elif type(ids) == int: return [self.anns[ids]] def loadCats(self, ids=[]): """ Load cats with the specified ids. :param ids (int array) : integer ids specifying cats :return: cats (object array) : loaded cat objects """ if type(ids) == list: return [self.cats[id] for id in ids] elif type(ids) == int: return [self.cats[ids]] def loadImgs(self, ids=[]): """ Load anns with the specified ids. :param ids (int array) : integer ids specifying img :return: imgs (object array) : loaded img objects """ if type(ids) == list: return [self.imgs[id] for id in ids] elif type(ids) == int: return [self.imgs[ids]] def showAnns(self, anns): """ Display the specified annotations. :param anns (array of object): annotations to display :return: None """ if len(anns) == 0: return 0 if 'segmentation' in anns[0] or 'keypoints' in anns[0]: datasetType = 'instances' elif 'caption' in anns[0]: datasetType = 'captions' else: raise Exception('datasetType not supported') if datasetType == 'instances': ax = plt.gca() ax.set_autoscale_on(False) polygons = [] color = [] for ann in anns: c = (np.random.random((1, 3))*0.6+0.4).tolist()[0] if 'segmentation' in ann: if type(ann['segmentation']) == list: # polygon for seg in ann['segmentation']: poly = np.array(seg).reshape((int(len(seg)/2), 2)) polygons.append(Polygon(poly)) color.append(c) else: # mask t = self.imgs[ann['image_id']] if type(ann['segmentation']['counts']) == list: rle = maskUtils.frPyObjects([ann['segmentation']], t['height'], t['width']) else: rle = [ann['segmentation']] m = maskUtils.decode(rle) img = np.ones( (m.shape[0], m.shape[1], 3) ) if ann['iscrowd'] == 1: color_mask = np.array([2.0,166.0,101.0])/255 if ann['iscrowd'] == 0: color_mask = np.random.random((1, 3)).tolist()[0] for i in range(3): img[:,:,i] = color_mask[i] ax.imshow(np.dstack( (img, m*0.5) )) if 'keypoints' in ann and type(ann['keypoints']) == list: # turn skeleton into zero-based index sks = np.array(self.loadCats(ann['category_id'])[0]['skeleton'])-1 kp = np.array(ann['keypoints']) x = kp[0::3] y = kp[1::3] v = kp[2::3] for sk in sks: if np.all(v[sk]>0): plt.plot(x[sk],y[sk], linewidth=3, color=c) plt.plot(x[v>0], y[v>0],'o',markersize=8, markerfacecolor=c, markeredgecolor='k',markeredgewidth=2) plt.plot(x[v>1], y[v>1],'o',markersize=8, markerfacecolor=c, markeredgecolor=c, markeredgewidth=2) p = PatchCollection(polygons, facecolor=color, linewidths=0, alpha=0.4) ax.add_collection(p) p = PatchCollection(polygons, facecolor='none', edgecolors=color, linewidths=2) ax.add_collection(p) elif datasetType == 'captions': for ann in anns: print(ann['caption']) def loadRes(self, resFile): """ Load result file and return a result api object. :param resFile (str) : file name of result file :return: res (obj) : result api object """ res = COCO() res.dataset['images'] = [img for img in self.dataset['images']] print('Loading and preparing results...') tic = time.time() if type(resFile) == str or type(resFile) == unicode: anns = json.load(open(resFile)) elif type(resFile) == np.ndarray: anns = self.loadNumpyAnnotations(resFile) else: anns = resFile assert type(anns) == list, 'results in not an array of objects' annsImgIds = [ann['image_id'] for ann in anns] assert set(annsImgIds) == (set(annsImgIds) & set(self.getImgIds())), \ 'Results do not correspond to current coco set' if 'caption' in anns[0]: imgIds = set([img['id'] for img in res.dataset['images']]) & set([ann['image_id'] for ann in anns]) res.dataset['images'] = [img for img in res.dataset['images'] if img['id'] in imgIds] for id, ann in enumerate(anns): ann['id'] = id+1 elif 'bbox' in anns[0] and not anns[0]['bbox'] == []: res.dataset['categories'] = copy.deepcopy(self.dataset['categories']) for id, ann in enumerate(anns): bb = ann['bbox'] x1, x2, y1, y2 = [bb[0], bb[0]+bb[2], bb[1], bb[1]+bb[3]] if not 'segmentation' in ann: ann['segmentation'] = [[x1, y1, x1, y2, x2, y2, x2, y1]] ann['area'] = bb[2]*bb[3] ann['id'] = id+1 ann['iscrowd'] = 0 elif 'segmentation' in anns[0]: res.dataset['categories'] = copy.deepcopy(self.dataset['categories']) for id, ann in enumerate(anns): # now only support compressed RLE format as segmentation results ann['area'] = maskUtils.area(ann['segmentation']) if not 'bbox' in ann: ann['bbox'] = maskUtils.toBbox(ann['segmentation']) ann['id'] = id+1 ann['iscrowd'] = 0 elif 'keypoints' in anns[0]: res.dataset['categories'] = copy.deepcopy(self.dataset['categories']) for id, ann in enumerate(anns): s = ann['keypoints'] x = s[0::3] y = s[1::3] x0,x1,y0,y1 = np.min(x), np.max(x), np.min(y), np.max(y) ann['area'] = (x1-x0)*(y1-y0) ann['id'] = id + 1 ann['bbox'] = [x0,y0,x1-x0,y1-y0] print('DONE (t={:0.2f}s)'.format(time.time()- tic)) res.dataset['annotations'] = anns res.createIndex() return res def download(self, tarDir = None, imgIds = [] ): ''' Download COCO images from mscoco.org server. :param tarDir (str): COCO results directory name imgIds (list): images to be downloaded :return: ''' if tarDir is None: print('Please specify target directory') return -1 if len(imgIds) == 0: imgs = self.imgs.values() else: imgs = self.loadImgs(imgIds) N = len(imgs) if not os.path.exists(tarDir): os.makedirs(tarDir) for i, img in enumerate(imgs): tic = time.time() fname = os.path.join(tarDir, img['file_name']) if not os.path.exists(fname): urlretrieve(img['coco_url'], fname) print('downloaded {}/{} images (t={:0.1f}s)'.format(i, N, time.time()- tic)) def loadNumpyAnnotations(self, data): """ Convert result data from a numpy array [Nx7] where each row contains {imageID,x1,y1,w,h,score,class} :param data (numpy.ndarray) :return: annotations (python nested list) """ print('Converting ndarray to lists...') assert(type(data) == np.ndarray) print(data.shape) assert(data.shape[1] == 7) N = data.shape[0] ann = [] for i in range(N): if i % 1000000 == 0: print('{}/{}'.format(i,N)) ann += [{ 'image_id' : int(data[i, 0]), 'bbox' : [ data[i, 1], data[i, 2], data[i, 3], data[i, 4] ], 'score' : data[i, 5], 'category_id': int(data[i, 6]), }] return ann def annToRLE(self, ann): """ Convert annotation which can be polygons, uncompressed RLE to RLE. :return: binary mask (numpy 2D array) """ t = self.imgs[ann['image_id']] h, w = t['height'], t['width'] segm = ann['segmentation'] if type(segm) == list: # polygon -- a single object might consist of multiple parts # we merge all parts into one mask rle code rles = maskUtils.frPyObjects(segm, h, w) rle = maskUtils.merge(rles) elif type(segm['counts']) == list: # uncompressed RLE rle = maskUtils.frPyObjects(segm, h, w) else: # rle rle = ann['segmentation'] return rle def annToMask(self, ann): """ Convert annotation which can be polygons, uncompressed RLE, or RLE to binary mask. :return: binary mask (numpy 2D array) """ rle = self.annToRLE(ann) m = maskUtils.decode(rle) return m
lgpl-3.0
peastman/deepchem
deepchem/molnet/run_benchmark_models.py
2
31101
#!/usr/bin/env python2 # -*- coding: utf-8 -*- """ Created on Mon Mar 6 23:41:26 2017 @author: zqwu """ import numpy as np import deepchem from deepchem.molnet.preset_hyper_parameters import hps from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import RandomForestRegressor from sklearn.linear_model import LogisticRegression from sklearn.svm import SVC from sklearn.kernel_ridge import KernelRidge def benchmark_classification(train_dataset, valid_dataset, test_dataset, tasks, transformers, n_features, metric, model, test=False, hyper_parameters=None, seed=123): """ Calculate performance of different models on the specific dataset & tasks Parameters ---------- train_dataset: dataset struct dataset used for model training and evaluation valid_dataset: dataset struct dataset only used for model evaluation (and hyperparameter tuning) test_dataset: dataset struct dataset only used for model evaluation tasks: list of string list of targets(tasks, datasets) transformers: dc.trans.Transformer struct transformer used for model evaluation n_features: integer number of features, or length of binary fingerprints metric: list of dc.metrics.Metric objects metrics used for evaluation model: string, optional choice of model 'rf', 'tf', 'tf_robust', 'logreg', 'irv', 'graphconv', 'dag', 'xgb', 'weave', 'kernelsvm', 'textcnn', 'mpnn' test: boolean, optional whether to calculate test_set performance hyper_parameters: dict, optional (default=None) hyper parameters for designated model, None = use preset values Returns ------- train_scores : dict predicting results(AUC) on training set valid_scores : dict predicting results(AUC) on valid set test_scores : dict predicting results(AUC) on test set """ train_scores = {} valid_scores = {} test_scores = {} assert model in [ 'rf', 'tf', 'tf_robust', 'logreg', 'irv', 'graphconv', 'dag', 'xgb', 'weave', 'kernelsvm', 'textcnn', 'mpnn' ] if hyper_parameters is None: hyper_parameters = hps[model] model_name = model if model_name == 'tf': layer_sizes = hyper_parameters['layer_sizes'] weight_init_stddevs = hyper_parameters['weight_init_stddevs'] bias_init_consts = hyper_parameters['bias_init_consts'] dropouts = hyper_parameters['dropouts'] penalty = hyper_parameters['penalty'] penalty_type = hyper_parameters['penalty_type'] batch_size = hyper_parameters['batch_size'] nb_epoch = hyper_parameters['nb_epoch'] learning_rate = hyper_parameters['learning_rate'] # Building tensorflow MultitaskDNN model model = deepchem.models.MultitaskClassifier( len(tasks), n_features, layer_sizes=layer_sizes, weight_init_stddevs=weight_init_stddevs, bias_init_consts=bias_init_consts, dropouts=dropouts, weight_decay_penalty=penalty, weight_decay_penalty_type=penalty_type, batch_size=batch_size, learning_rate=learning_rate, random_seed=seed) elif model_name == 'tf_robust': layer_sizes = hyper_parameters['layer_sizes'] weight_init_stddevs = hyper_parameters['weight_init_stddevs'] bias_init_consts = hyper_parameters['bias_init_consts'] dropouts = hyper_parameters['dropouts'] bypass_layer_sizes = hyper_parameters['bypass_layer_sizes'] bypass_weight_init_stddevs = hyper_parameters['bypass_weight_init_stddevs'] bypass_bias_init_consts = hyper_parameters['bypass_bias_init_consts'] bypass_dropouts = hyper_parameters['bypass_dropouts'] penalty = hyper_parameters['penalty'] penalty_type = hyper_parameters['penalty_type'] batch_size = hyper_parameters['batch_size'] nb_epoch = hyper_parameters['nb_epoch'] learning_rate = hyper_parameters['learning_rate'] # Building tensorflow robust MultitaskDNN model model = deepchem.models.RobustMultitaskClassifier( len(tasks), n_features, layer_sizes=layer_sizes, weight_init_stddevs=weight_init_stddevs, bias_init_consts=bias_init_consts, dropouts=dropouts, bypass_layer_sizes=bypass_layer_sizes, bypass_weight_init_stddevs=bypass_weight_init_stddevs, bypass_bias_init_consts=bypass_bias_init_consts, bypass_dropouts=bypass_dropouts, weight_decay_penalty=penalty, weight_decay_penalty_type=penalty_type, batch_size=batch_size, learning_rate=learning_rate, random_seed=seed) elif model_name == 'logreg': penalty = hyper_parameters['penalty'] penalty_type = hyper_parameters['penalty_type'] nb_epoch = None # Building scikit logistic regression model def model_builder(model_dir): sklearn_model = LogisticRegression( penalty=penalty_type, C=1. / penalty, class_weight="balanced", n_jobs=-1) return deepchem.models.sklearn_models.SklearnModel( sklearn_model, model_dir) model = deepchem.models.multitask.SingletaskToMultitask( tasks, model_builder) elif model_name == 'irv': penalty = hyper_parameters['penalty'] batch_size = hyper_parameters['batch_size'] nb_epoch = hyper_parameters['nb_epoch'] learning_rate = hyper_parameters['learning_rate'] n_K = hyper_parameters['n_K'] # Transform fingerprints to IRV features transformer = deepchem.trans.IRVTransformer(n_K, len(tasks), train_dataset) train_dataset = transformer.transform(train_dataset) valid_dataset = transformer.transform(valid_dataset) if test: test_dataset = transformer.transform(test_dataset) # Building tensorflow IRV model model = deepchem.models.TensorflowMultitaskIRVClassifier( len(tasks), K=n_K, penalty=penalty, batch_size=batch_size, learning_rate=learning_rate, random_seed=seed, mode='classification') elif model_name == 'graphconv': batch_size = hyper_parameters['batch_size'] nb_epoch = hyper_parameters['nb_epoch'] learning_rate = hyper_parameters['learning_rate'] n_filters = hyper_parameters['n_filters'] n_fully_connected_nodes = hyper_parameters['n_fully_connected_nodes'] model = deepchem.models.GraphConvModel( len(tasks), graph_conv_layers=[n_filters] * 2, dense_layer_size=n_fully_connected_nodes, batch_size=batch_size, learning_rate=learning_rate, random_seed=seed, mode='classification') elif model_name == 'dag': batch_size = hyper_parameters['batch_size'] nb_epoch = hyper_parameters['nb_epoch'] learning_rate = hyper_parameters['learning_rate'] n_graph_feat = hyper_parameters['n_graph_feat'] default_max_atoms = hyper_parameters['default_max_atoms'] max_atoms_train = max([mol.get_num_atoms() for mol in train_dataset.X]) max_atoms_valid = max([mol.get_num_atoms() for mol in valid_dataset.X]) max_atoms_test = max([mol.get_num_atoms() for mol in test_dataset.X]) max_atoms = max([max_atoms_train, max_atoms_valid, max_atoms_test]) max_atoms = min([max_atoms, default_max_atoms]) print('Maximum number of atoms: %i' % max_atoms) reshard_size = 256 transformer = deepchem.trans.DAGTransformer(max_atoms=max_atoms) train_dataset.reshard(reshard_size) train_dataset = transformer.transform(train_dataset) valid_dataset.reshard(reshard_size) valid_dataset = transformer.transform(valid_dataset) if test: test_dataset.reshard(reshard_size) test_dataset = transformer.transform(test_dataset) model = deepchem.models.DAGModel( len(tasks), max_atoms=max_atoms, n_atom_feat=n_features, n_graph_feat=n_graph_feat, n_outputs=30, batch_size=batch_size, learning_rate=learning_rate, random_seed=seed, use_queue=False, mode='classification') elif model_name == 'weave': batch_size = hyper_parameters['batch_size'] nb_epoch = hyper_parameters['nb_epoch'] learning_rate = hyper_parameters['learning_rate'] n_graph_feat = hyper_parameters['n_graph_feat'] n_pair_feat = hyper_parameters['n_pair_feat'] model = deepchem.models.WeaveModel( len(tasks), n_atom_feat=n_features, n_pair_feat=n_pair_feat, n_hidden=50, n_graph_feat=n_graph_feat, batch_size=batch_size, learning_rate=learning_rate, use_queue=False, random_seed=seed, mode='classification') elif model_name == 'textcnn': batch_size = hyper_parameters['batch_size'] nb_epoch = hyper_parameters['nb_epoch'] learning_rate = hyper_parameters['learning_rate'] n_embedding = hyper_parameters['n_embedding'] filter_sizes = hyper_parameters['filter_sizes'] num_filters = hyper_parameters['num_filters'] all_data = deepchem.data.DiskDataset.merge( [train_dataset, valid_dataset, test_dataset]) char_dict, length = deepchem.models.TextCNNModel.build_char_dict(all_data) model = deepchem.models.TextCNNModel( len(tasks), char_dict, seq_length=length, n_embedding=n_embedding, filter_sizes=filter_sizes, num_filters=num_filters, learning_rate=learning_rate, batch_size=batch_size, use_queue=False, random_seed=seed, mode='classification') elif model_name == 'mpnn': batch_size = hyper_parameters['batch_size'] nb_epoch = hyper_parameters['nb_epoch'] learning_rate = hyper_parameters['learning_rate'] T = hyper_parameters['T'] M = hyper_parameters['M'] model = deepchem.models.MPNNModel( len(tasks), n_atom_feat=n_features[0], n_pair_feat=n_features[1], n_hidden=n_features[0], T=T, M=M, batch_size=batch_size, learning_rate=learning_rate, use_queue=False, mode="classification") elif model_name == 'rf': n_estimators = hyper_parameters['n_estimators'] nb_epoch = None # Building scikit random forest model def model_builder(model_dir): sklearn_model = RandomForestClassifier( class_weight="balanced", n_estimators=n_estimators, n_jobs=-1) return deepchem.models.sklearn_models.SklearnModel( sklearn_model, model_dir) model = deepchem.models.multitask.SingletaskToMultitask( tasks, model_builder) elif model_name == 'kernelsvm': C = hyper_parameters['C'] gamma = hyper_parameters['gamma'] nb_epoch = None # Building scikit learn Kernel SVM model def model_builder(model_dir): sklearn_model = SVC( C=C, gamma=gamma, class_weight="balanced", probability=True) return deepchem.models.SklearnModel(sklearn_model, model_dir) model = deepchem.models.multitask.SingletaskToMultitask( tasks, model_builder) elif model_name == 'xgb': max_depth = hyper_parameters['max_depth'] learning_rate = hyper_parameters['learning_rate'] n_estimators = hyper_parameters['n_estimators'] gamma = hyper_parameters['gamma'] min_child_weight = hyper_parameters['min_child_weight'] max_delta_step = hyper_parameters['max_delta_step'] subsample = hyper_parameters['subsample'] colsample_bytree = hyper_parameters['colsample_bytree'] colsample_bylevel = hyper_parameters['colsample_bylevel'] reg_alpha = hyper_parameters['reg_alpha'] reg_lambda = hyper_parameters['reg_lambda'] scale_pos_weight = hyper_parameters['scale_pos_weight'] base_score = hyper_parameters['base_score'] seed = hyper_parameters['seed'] early_stopping_rounds = hyper_parameters['early_stopping_rounds'] nb_epoch = None esr = {'early_stopping_rounds': early_stopping_rounds} # Building xgboost classification model def model_builder(model_dir): import xgboost xgboost_model = xgboost.XGBClassifier( max_depth=max_depth, learning_rate=learning_rate, n_estimators=n_estimators, gamma=gamma, min_child_weight=min_child_weight, max_delta_step=max_delta_step, subsample=subsample, colsample_bytree=colsample_bytree, colsample_bylevel=colsample_bylevel, reg_alpha=reg_alpha, reg_lambda=reg_lambda, scale_pos_weight=scale_pos_weight, base_score=base_score, seed=seed) return deepchem.models.xgboost_models.XGBoostModel( xgboost_model, model_dir, **esr) model = deepchem.models.multitask.SingletaskToMultitask( tasks, model_builder) if nb_epoch is None: model.fit(train_dataset) else: model.fit(train_dataset, nb_epoch=nb_epoch) train_scores[model_name] = model.evaluate(train_dataset, metric, transformers) valid_scores[model_name] = model.evaluate(valid_dataset, metric, transformers) if test: test_scores[model_name] = model.evaluate(test_dataset, metric, transformers) return train_scores, valid_scores, test_scores def benchmark_regression(train_dataset, valid_dataset, test_dataset, tasks, transformers, n_features, metric, model, test=False, hyper_parameters=None, seed=123): """ Calculate performance of different models on the specific dataset & tasks Parameters ---------- train_dataset: dataset struct dataset used for model training and evaluation valid_dataset: dataset struct dataset only used for model evaluation (and hyperparameter tuning) test_dataset: dataset struct dataset only used for model evaluation tasks: list of string list of targets(tasks, datasets) transformers: dc.trans.Transformer struct transformer used for model evaluation n_features: integer number of features, or length of binary fingerprints metric: list of dc.metrics.Metric objects metrics used for evaluation model: string, optional choice of model 'tf_regression', 'tf_regression_ft', 'rf_regression', 'graphconvreg', 'dtnn', 'dag_regression', 'xgb_regression', 'weave_regression', 'textcnn_regression', 'krr', 'ani', 'krr_ft', 'mpnn' test: boolean, optional whether to calculate test_set performance hyper_parameters: dict, optional (default=None) hyper parameters for designated model, None = use preset values Returns ------- train_scores : dict predicting results(R2) on training set valid_scores : dict predicting results(R2) on valid set test_scores : dict predicting results(R2) on test set """ train_scores = {} valid_scores = {} test_scores = {} assert model in [ 'tf_regression', 'tf_regression_ft', 'rf_regression', 'graphconvreg', 'dtnn', 'dag_regression', 'xgb_regression', 'weave_regression', 'textcnn_regression', 'krr', 'ani', 'krr_ft', 'mpnn' ] import xgboost if hyper_parameters is None: hyper_parameters = hps[model] model_name = model if model_name == 'tf_regression': layer_sizes = hyper_parameters['layer_sizes'] weight_init_stddevs = hyper_parameters['weight_init_stddevs'] bias_init_consts = hyper_parameters['bias_init_consts'] dropouts = hyper_parameters['dropouts'] penalty = hyper_parameters['penalty'] penalty_type = hyper_parameters['penalty_type'] batch_size = hyper_parameters['batch_size'] nb_epoch = hyper_parameters['nb_epoch'] learning_rate = hyper_parameters['learning_rate'] model = deepchem.models.MultitaskRegressor( len(tasks), n_features, layer_sizes=layer_sizes, weight_init_stddevs=weight_init_stddevs, bias_init_consts=bias_init_consts, dropouts=dropouts, weight_decay_penalty=penalty, weight_decay_penalty_type=penalty_type, batch_size=batch_size, learning_rate=learning_rate, seed=seed) elif model_name == 'tf_regression_ft': layer_sizes = hyper_parameters['layer_sizes'] weight_init_stddevs = hyper_parameters['weight_init_stddevs'] bias_init_consts = hyper_parameters['bias_init_consts'] dropouts = hyper_parameters['dropouts'] penalty = hyper_parameters['penalty'] penalty_type = hyper_parameters['penalty_type'] batch_size = hyper_parameters['batch_size'] nb_epoch = hyper_parameters['nb_epoch'] learning_rate = hyper_parameters['learning_rate'] fit_transformers = [hyper_parameters['fit_transformers'](train_dataset)] model = deepchem.models.MultitaskFitTransformRegressor( n_tasks=len(tasks), n_features=n_features, layer_sizes=layer_sizes, weight_init_stddevs=weight_init_stddevs, bias_init_consts=bias_init_consts, dropouts=dropouts, weight_decay_penalty=penalty, weight_decay_penalty_type=penalty_type, batch_size=batch_size, learning_rate=learning_rate, fit_transformers=fit_transformers, n_eval=10, seed=seed) elif model_name == 'graphconvreg': batch_size = hyper_parameters['batch_size'] nb_epoch = hyper_parameters['nb_epoch'] learning_rate = hyper_parameters['learning_rate'] n_filters = hyper_parameters['n_filters'] n_fully_connected_nodes = hyper_parameters['n_fully_connected_nodes'] model = deepchem.models.GraphConvModel( len(tasks), graph_conv_layers=[n_filters] * 2, dense_layer_size=n_fully_connected_nodes, batch_size=batch_size, learning_rate=learning_rate, random_seed=seed, mode='regression') elif model_name == 'dtnn': batch_size = hyper_parameters['batch_size'] nb_epoch = hyper_parameters['nb_epoch'] learning_rate = hyper_parameters['learning_rate'] n_embedding = hyper_parameters['n_embedding'] n_distance = hyper_parameters['n_distance'] assert len(n_features) == 2, 'DTNN is only applicable to qm datasets' model = deepchem.models.DTNNModel( len(tasks), n_embedding=n_embedding, n_distance=n_distance, batch_size=batch_size, learning_rate=learning_rate, random_seed=seed, output_activation=False, use_queue=False, mode='regression') elif model_name == 'dag_regression': batch_size = hyper_parameters['batch_size'] nb_epoch = hyper_parameters['nb_epoch'] learning_rate = hyper_parameters['learning_rate'] n_graph_feat = hyper_parameters['n_graph_feat'] default_max_atoms = hyper_parameters['default_max_atoms'] max_atoms_train = max([mol.get_num_atoms() for mol in train_dataset.X]) max_atoms_valid = max([mol.get_num_atoms() for mol in valid_dataset.X]) max_atoms_test = max([mol.get_num_atoms() for mol in test_dataset.X]) max_atoms = max([max_atoms_train, max_atoms_valid, max_atoms_test]) max_atoms = min([max_atoms, default_max_atoms]) print('Maximum number of atoms: %i' % max_atoms) reshard_size = 256 transformer = deepchem.trans.DAGTransformer(max_atoms=max_atoms) train_dataset.reshard(reshard_size) train_dataset = transformer.transform(train_dataset) valid_dataset.reshard(reshard_size) valid_dataset = transformer.transform(valid_dataset) if test: test_dataset.reshard(reshard_size) test_dataset = transformer.transform(test_dataset) model = deepchem.models.DAGModel( len(tasks), max_atoms=max_atoms, n_atom_feat=n_features, n_graph_feat=n_graph_feat, n_outputs=30, batch_size=batch_size, learning_rate=learning_rate, random_seed=seed, use_queue=False, mode='regression') elif model_name == 'weave_regression': batch_size = hyper_parameters['batch_size'] nb_epoch = hyper_parameters['nb_epoch'] learning_rate = hyper_parameters['learning_rate'] n_graph_feat = hyper_parameters['n_graph_feat'] n_pair_feat = hyper_parameters['n_pair_feat'] model = deepchem.models.WeaveModel( len(tasks), n_atom_feat=n_features, n_pair_feat=n_pair_feat, n_hidden=50, n_graph_feat=n_graph_feat, batch_size=batch_size, learning_rate=learning_rate, use_queue=False, random_seed=seed, mode='regression') elif model_name == 'textcnn_regression': batch_size = hyper_parameters['batch_size'] nb_epoch = hyper_parameters['nb_epoch'] learning_rate = hyper_parameters['learning_rate'] n_embedding = hyper_parameters['n_embedding'] filter_sizes = hyper_parameters['filter_sizes'] num_filters = hyper_parameters['num_filters'] char_dict, length = deepchem.models.TextCNNModel.build_char_dict( train_dataset) model = deepchem.models.TextCNNModel( len(tasks), char_dict, seq_length=length, n_embedding=n_embedding, filter_sizes=filter_sizes, num_filters=num_filters, learning_rate=learning_rate, batch_size=batch_size, use_queue=False, random_seed=seed, mode='regression') elif model_name == 'ani': batch_size = hyper_parameters['batch_size'] nb_epoch = hyper_parameters['nb_epoch'] learning_rate = hyper_parameters['learning_rate'] layer_structures = hyper_parameters['layer_structures'] assert len(n_features) == 2, 'ANI is only applicable to qm datasets' max_atoms = n_features[0] atom_number_cases = np.unique( np.concatenate([ train_dataset.X[:, :, 0], valid_dataset.X[:, :, 0], test_dataset.X[:, :, 0] ])) atom_number_cases = atom_number_cases.astype(int).tolist() try: # Remove token for paddings atom_number_cases.remove(0) except: pass ANItransformer = deepchem.trans.ANITransformer( max_atoms=max_atoms, atom_cases=atom_number_cases) train_dataset = ANItransformer.transform(train_dataset) valid_dataset = ANItransformer.transform(valid_dataset) if test: test_dataset = ANItransformer.transform(test_dataset) n_feat = ANItransformer.get_num_feats() - 1 model = deepchem.models.ANIRegression( len(tasks), max_atoms, n_feat, layer_structures=layer_structures, atom_number_cases=atom_number_cases, batch_size=batch_size, learning_rate=learning_rate, use_queue=False, mode="regression", random_seed=seed) elif model_name == 'mpnn': batch_size = hyper_parameters['batch_size'] nb_epoch = hyper_parameters['nb_epoch'] learning_rate = hyper_parameters['learning_rate'] T = hyper_parameters['T'] M = hyper_parameters['M'] model = deepchem.models.MPNNModel( len(tasks), n_atom_feat=n_features[0], n_pair_feat=n_features[1], n_hidden=n_features[0], T=T, M=M, batch_size=batch_size, learning_rate=learning_rate, use_queue=False, mode="regression") elif model_name == 'rf_regression': n_estimators = hyper_parameters['n_estimators'] nb_epoch = None # Building scikit random forest model def model_builder(model_dir): sklearn_model = RandomForestRegressor( n_estimators=n_estimators, n_jobs=-1) return deepchem.models.sklearn_models.SklearnModel( sklearn_model, model_dir) model = deepchem.models.multitask.SingletaskToMultitask( tasks, model_builder) elif model_name == 'krr': alpha = hyper_parameters['alpha'] nb_epoch = None # Building scikit learn Kernel Ridge Regression model def model_builder(model_dir): sklearn_model = KernelRidge(kernel="rbf", alpha=alpha) return deepchem.models.SklearnModel(sklearn_model, model_dir) model = deepchem.models.multitask.SingletaskToMultitask( tasks, model_builder) elif model_name == 'krr_ft': alpha = hyper_parameters['alpha'] nb_epoch = None ft_transformer = deepchem.trans.CoulombFitTransformer(train_dataset) train_dataset = ft_transformer.transform(train_dataset) valid_dataset = ft_transformer.transform(valid_dataset) test_dataset = ft_transformer.transform(test_dataset) # Building scikit learn Kernel Ridge Regression model def model_builder(model_dir): sklearn_model = KernelRidge(kernel="rbf", alpha=alpha) return deepchem.models.SklearnModel(sklearn_model, model_dir) model = deepchem.models.multitask.SingletaskToMultitask( tasks, model_builder) elif model_name == 'xgb_regression': max_depth = hyper_parameters['max_depth'] learning_rate = hyper_parameters['learning_rate'] n_estimators = hyper_parameters['n_estimators'] gamma = hyper_parameters['gamma'] min_child_weight = hyper_parameters['min_child_weight'] max_delta_step = hyper_parameters['max_delta_step'] subsample = hyper_parameters['subsample'] colsample_bytree = hyper_parameters['colsample_bytree'] colsample_bylevel = hyper_parameters['colsample_bylevel'] reg_alpha = hyper_parameters['reg_alpha'] reg_lambda = hyper_parameters['reg_lambda'] scale_pos_weight = hyper_parameters['scale_pos_weight'] base_score = hyper_parameters['base_score'] seed = hyper_parameters['seed'] early_stopping_rounds = hyper_parameters['early_stopping_rounds'] nb_epoch = None esr = {'early_stopping_rounds': early_stopping_rounds} # Building xgboost regression model def model_builder(model_dir): xgboost_model = xgboost.XGBRegressor( max_depth=max_depth, learning_rate=learning_rate, n_estimators=n_estimators, gamma=gamma, min_child_weight=min_child_weight, max_delta_step=max_delta_step, subsample=subsample, colsample_bytree=colsample_bytree, colsample_bylevel=colsample_bylevel, reg_alpha=reg_alpha, reg_lambda=reg_lambda, scale_pos_weight=scale_pos_weight, base_score=base_score, seed=seed) return deepchem.models.xgboost_models.XGBoostModel( xgboost_model, model_dir, **esr) model = deepchem.models.multitask.SingletaskToMultitask( tasks, model_builder) print('-----------------------------') print('Start fitting: %s' % model_name) if nb_epoch is None: model.fit(train_dataset) else: model.fit(train_dataset, nb_epoch=nb_epoch) train_scores[model_name] = model.evaluate(train_dataset, metric, transformers) valid_scores[model_name] = model.evaluate(valid_dataset, metric, transformers) if test: test_scores[model_name] = model.evaluate(test_dataset, metric, transformers) return train_scores, valid_scores, test_scores ''' def low_data_benchmark_classification(train_dataset, valid_dataset, n_features, metric, model='siamese', hyper_parameters=None, seed=123): """ Calculate low data benchmark performance Parameters ---------- train_dataset : dataset struct loaded dataset, ConvMol struct, used for training valid_dataset : dataset struct loaded dataset, ConvMol struct, used for validation n_features : integer number of features, or length of binary fingerprints metric: list of dc.metrics.Metric objects metrics used for evaluation model : string, optional (default='siamese') choice of which model to use, should be: siamese, attn, res hyper_parameters: dict, optional (default=None) hyper parameters for designated model, None = use preset values Returns ------- valid_scores : dict predicting results(AUC) on valid set """ train_scores = {} # train set not evaluated in low data model valid_scores = {} assert model in ['siamese', 'attn', 'res'] if hyper_parameters is None: hyper_parameters = hps[model] # Loading hyperparameters # num positive/negative ligands n_pos = hyper_parameters['n_pos'] n_neg = hyper_parameters['n_neg'] # Set batch sizes for network test_batch_size = hyper_parameters['test_batch_size'] support_batch_size = n_pos + n_neg # Model structure n_filters = hyper_parameters['n_filters'] n_fully_connected_nodes = hyper_parameters['n_fully_connected_nodes'] # Traning settings nb_epochs = hyper_parameters['nb_epochs'] n_train_trials = hyper_parameters['n_train_trials'] n_eval_trials = hyper_parameters['n_eval_trials'] learning_rate = hyper_parameters['learning_rate'] tf.set_random_seed(seed) support_graph = deepchem.nn.SequentialSupportGraph(n_features) prev_features = n_features for count, n_filter in enumerate(n_filters): support_graph.add( deepchem.nn.GraphConv(int(n_filter), prev_features, activation='relu')) support_graph.add(deepchem.nn.GraphPool()) prev_features = int(n_filter) for count, n_fcnode in enumerate(n_fully_connected_nodes): support_graph.add( deepchem.nn.Dense(int(n_fcnode), prev_features, activation='tanh')) prev_features = int(n_fcnode) support_graph.add_test( deepchem.nn.GraphGather(test_batch_size, activation='tanh')) support_graph.add_support( deepchem.nn.GraphGather(support_batch_size, activation='tanh')) if model in ['siamese']: pass elif model in ['attn']: max_depth = hyper_parameters['max_depth'] support_graph.join( deepchem.nn.AttnLSTMEmbedding(test_batch_size, support_batch_size, prev_features, max_depth)) elif model in ['res']: max_depth = hyper_parameters['max_depth'] support_graph.join( deepchem.nn.ResiLSTMEmbedding(test_batch_size, support_batch_size, prev_features, max_depth)) model_low_data = deepchem.models.SupportGraphClassifier( support_graph, test_batch_size=test_batch_size, support_batch_size=support_batch_size, learning_rate=learning_rate) print('-------------------------------------') print('Start fitting by low data model: ' + model) # Fit trained model model_low_data.fit( train_dataset, nb_epochs=nb_epochs, n_episodes_per_epoch=n_train_trials, n_pos=n_pos, n_neg=n_neg, log_every_n_samples=50) # Evaluating low data model valid_scores[model] = model_low_data.evaluate( valid_dataset, metric, n_pos, n_neg, n_trials=n_eval_trials) return valid_scores '''
mit
Gabriel-p/photom
tasks/master.py
2
25030
import read_pars_file as rpf from os.path import join, isfile import sys import operator import numpy as np from scipy.spatial import cKDTree from astropy.io import ascii from astropy.table import Table from astropy.table import Column import matplotlib.pyplot as plt import time as t def in_params(): """ Read and prepare input parameter values. TODO Returns ------- pars : dictionary All parameters. """ pars = rpf.main() pars['in_out_path'] = pars['mypath'].replace( 'tasks', 'output/' + pars['master_folder']) # Format parameters appropriately. if pars['load_format'][0][0] == 'allstar': f = join(pars['in_out_path'], pars['load_format'][0][1]) if isfile(f): pars['load_format'] = 'allstar' pars['als_files'] = f # Check that all .als files exist. for file in ascii.read(f).columns[0]: f = join(pars['in_out_path'], file) if not isfile(f): print("{}\n file does not exist. Exit.".format(f)) sys.exit() else: print("{}\n file does not exist. Exit.".format(f)) sys.exit() else: pars['load_format'], pars['als_files'] = 'default', '' return pars def loadFrames(in_out_path, load_format, als_files): """ Load photometric lists of stars. Parameters ---------- in_out_path : string Path to the folder where input photometric files exist, and where the output files generated by this script will be stored. load_format : string Selected format for the photometry files. extCoeffs : list Extinction coefficients for each observed filter. Returns ------- frames : dictionary Contains x,y coordinates, magnitudes and their errors, for each observed frame (all filters, all exposure times). """ frames = {'U': {}, 'B': {}, 'V': {}, 'I': {}, 'R': {}} xy_shifts = {'U': {}, 'B': {}, 'V': {}, 'I': {}, 'R': {}} if load_format == 'allstar': files_data = ascii.read(als_files) for fname, filt, expT, K, x0, y0 in files_data: # if filt == 'U': # TODO: remove this als = ascii.read(in_out_path + '/' + fname, format='daophot') # DELETE # # Used to align .als test files. # if filt == 'V' and str(expT) == '300': # dx, dy = [13., -7.] # elif filt == 'V' and str(expT) == '30': # dx, dy = [7., -8.] # elif filt == 'V' and str(expT) == '3': # dx, dy = [10., -9.] # elif filt == 'B' and str(expT) == '300': # dx, dy = [6., -7.] # elif filt == 'B' and str(expT) == '30': # dx, dy = [0., -5.] # elif filt == 'U' and str(expT) == '300': # dx, dy = [4., 2.] # elif filt == 'U' and str(expT) == '30': # dx, dy = [6.5, 0.] # else: # dx, dy = [0., 0.] # frames[filt].update( # {str(expT): [als['XCENTER'] + dx, als['YCENTER'] + dy, zAmag, # als['MERR']]}) # DELETE frames[filt].update( {str(expT): [als['XCENTER'], als['YCENTER'], als['MAG'], als['MERR']]}) xy_shifts[filt].update({str(expT): (float(x0), float(y0))}) print("Loaded: {}, ({}, {}) N={}".format( fname, filt, expT, len(frames[filt][str(expT)][0]))) elif load_format == 'default': # TODO: write code. pass return frames, xy_shifts def framesOrder(frames): """ TODO: allow selecting the reference frame manually. Assign reference frame as the one with the largest number of detected stars. Order the remaining frames with the largest one on top. Parameters ---------- frames : dictionary Observed photometry. Contains sub-dictionaries with the exposure times as keys, and the photometry data as values. Returns ------- refFrameInfo : list Information about the reference frame. Each sub-list contains the filter name, exposure time, and number of stars added to the list by appending this frame. refFrame : list Photometry of the reference frame, selected as the frame with the largest number of stars. framesOrdered : list Each sub-list is an observed frame containing its filter name, exposure time, and photometry. Ordered putting the frame with the largest number of stars on top. """ flen = [] for filt, f in frames.iteritems(): for expTime, fexpT in f.iteritems(): N = len(fexpT[0]) flen.append([filt, expTime, N]) # Sort putting the frames with the largest number of stars first. sortIdx = sorted(flen, key=operator.itemgetter(2), reverse=True) # Isolate reference frame. filt, expTime = sortIdx[0][0], sortIdx[0][1] # Store each star separately. stars = list(zip(*frames[filt][expTime])) # Convert each float to list, to prepare for later appending of data. # refFrame = [star1, star2, ...starN] # starX = [x, y, mag, e_mag] # x = [x1, x2, ...] ; y = [y1, y2, ...] ; mag = [mag1, mag2, ...] refFrame = [list(list([_]) for _ in st) for st in stars] # Store here names of filters, exposure times, and number of stars added # when processing each frame. refFrameInfo = [[filt, expTime, len(refFrame)]] framesOrdered = [] for filt, expTime, dummy in sortIdx[1:]: framesOrdered.append([filt, expTime, frames[filt][expTime]]) return refFrameInfo, refFrame, framesOrdered def transfABCD(x, y, ABCD): """ Apply transformation equations to obtain new (xt, yt) coordinates. """ A, B, C, D = ABCD xt = A + x * C + y * D yt = B + y * C + x * D return xt, yt def solveABCD(x1, y1, x2, y2): """ Obtain new A, B, C, D parameters, solving the linear equations: 1*A + 0*B + x2*C + y2*D = x1 0*A + 1*B + y2*C + x2*D = y1 """ N = len(x1) l1 = np.array([np.ones(N), np.zeros(N), x2, y2]) l2 = np.array([np.zeros(N), np.ones(N), y2, x2]) M1 = np.vstack([l1.T, l2.T]) M2 = np.concatenate([x1, y1]) A, B, C, D = np.linalg.lstsq(M1, M2)[0] print("New A,B,C,D parameters: " "({:.3f}, {:.3f}, {:.3f}, {:.3f})".format(A, B, C, D)) return A, B, C, D def closestStar(x_fr1, y_fr1, x_fr2, y_fr2): """ For every star in fr1, find the closest star in fr2. Parameters ---------- x_fr1 : list x coordinates for stars in the reference frame. y_fr1 : list y coordinates for stars in the reference frame. x_fr2 : list x coordinates for stars in the processed frame. y_fr2 : list y coordinates for stars in the processed frame. Returns ------- min_dist_idx : numpy array Index to the processed star closest to the reference star, for each reference star: * fr2[min_dist_idx[i]]: closest star in fr2 to the ith star in fr1. Also the index of the minimum distance in dist[i], i.e.: distance to the closest processed star to the ith reference star: * dist[i][min_dist_idx[i]]: distance between these two stars. min_dists : list Minimum distance for each star in the reference frame to a star in the processed frame. Notes ----- len(fr1) = len(dist) = len(min_dist_idx) """ fr1 = np.array(zip(*[x_fr1, y_fr1])) fr2 = np.array(zip(*[x_fr2, y_fr2])) min_dists, min_dist_idx = cKDTree(fr2).query(fr1, 1) return min_dist_idx, min_dists def starMatch(c1_ids, c2_ids, d2d, maxrad): """ Reject duplicated matches and matched stars with distances beyond the maximum separation defined. Parameters ---------- c1_ids : list IDs of stars in the reference frame. c2_ids : list IDs of stars in the processed frame, closest to each star in the reference frame. d2d : list Distances between each star in the reference frame, and the closest star in the processed frame. maxrad : int see frameCombine() Returns ------- N_dupl_c1 : int Number of reference stars that were matched to the same processed star as another reference star. These stars will be re-processed. match_c1_ids : list IDs in reference frame for unique matches between frames. match_c2_ids : list IDs in the processed frame for unique matches between frames. no_match_c1 : list IDs of reference stars with no match found within maxrad. """ N_dupl_c1, match_c1_ids, match_c2_ids, match_d = 0, [], [], [] # Indexes of matched star in both frames and distance between them. for c1_i, c2_i, d in zip(*[c1_ids, c2_ids, d2d]): # Filter by maximum allowed match distance. if d <= float(maxrad): # Check if this processed star was already stored as a match with # another reference star. if c2_i in match_c2_ids: # Update number of duplicated reference stars. N_dupl_c1 += 1 # Index of this stored c2 star. j = match_c2_ids.index(c2_i) # If the previous match had a larger distance than this match, # replace with this match. if match_d[j] > d: # Now replace this reference star. match_c1_ids[j] = c1_i match_d[j] = d else: # Store IDs of both matched stars, and their distance. match_c1_ids.append(c1_i) match_c2_ids.append(c2_i) match_d.append(d) # else: # This reference star has no processed star closer than the max # distance allowed. return N_dupl_c1, match_c1_ids, match_c2_ids, match_d def frameCoordsUpdt(x_ref, y_ref, x_fr, y_fr, match_fr1_ids, match_fr2_ids): """ Identify stars in the processed frame that where not matched to any star in the reference frame. To avoid messing with the indexes, change the coordinates of already matched 'frame' stars so that they will not be matched again. Parameters ---------- x_fr : list Original x coordinates of the stars in the processed frame. y_fr : list Original y coordinates of the stars in the processed frame. match_fr2_ids : list IDs of stars in the processed frame that were matched to a star in the reference frame. Returns ------- x_fr_updt, y_fr_updt : list, list Coordinates of frame stars with those identified as matches changed so that they will not be matched again. """ # Modify coordinates of matched stars. Use copy of arrays to avoid # overwriting the original coordinate values in 'frame'. x_fr_updt, y_fr_updt = np.copy(x_fr), np.copy(y_fr) x_fr_updt[match_fr2_ids] = 1000000. y_fr_updt[match_fr2_ids] = 1000000. x_ref_updt, y_ref_updt = np.copy(x_ref), np.copy(y_ref) x_ref_updt[match_fr1_ids] = -1000000. y_ref_updt[match_fr1_ids] = -1000000. return x_ref_updt, y_ref_updt, x_fr_updt, y_fr_updt def frameMatch(x_ref, y_ref, x_fr, y_fr, maxrad): """ Match stars in the reference frame (x_ref, y_ref) with those from the processed frame (x_fr, y_fr), rejecting matches with distances beyond 'maxrad'. Parameters ---------- x_ref, y_ref : list x,y coordinates for stars in the reference frame. x_fr, y_fr : list x,y coordinates for stars in the processed frame. maxrad : int see frameCombine() Returns ------- match_fr1_ids_all : list Indexes of stars in the reference frame that match stars in the processed frame. match_fr2_ids_all : list Indexes of stars in the processed frame that match stars in the reference frame. Notes ----- x_ref[match_fr1_ids_all] == x_fr[match_fr2_ids_all] y_ref[match_fr1_ids_all] == y_fr[match_fr2_ids_all] """ # Initial full list of IDs for the reference and processed frame. fr1_ids = np.arange(len(x_ref)).tolist() N_fr2 = len(x_fr) match_fr1_ids_all, match_fr2_ids_all, match_d_all = [], [], [] # Continue until no more duplicate matches exist. counter, N_dupl_fr1 = 1, 1 while N_dupl_fr1 > 0: # Find closest stars between reference and processed frame. s = t.clock() fr2_ids_2fr1, fr1fr2_d2d = closestStar(x_ref, y_ref, x_fr, y_fr) print(' 1', t.clock() - s) # Match reference and processed frame. s = t.clock() N_dupl_fr1, match_fr1_ids, match_fr2_ids, match_d = starMatch( fr1_ids, fr2_ids_2fr1, fr1fr2_d2d, maxrad) print(' 2', t.clock() - s) # Store unique matches and distances. match_fr1_ids_all += match_fr1_ids match_fr2_ids_all += match_fr2_ids match_d_all += match_d # x1, y1 = x_ref[match_fr1_ids], y_ref[match_fr1_ids] # x2, y2 = x_fr[match_fr2_ids], y_fr[match_fr2_ids] # plt.subplot(131) # plt.scatter(x_ref, y_ref, c='g', s=2, zorder=1) # plt.scatter(x_fr, y_fr, c='r', s=5, zorder=4) # plt.xlim(0., 4100.) # plt.ylim(0., 4100.) # plt.subplot(132) # plt.scatter(x1, y1, c='g', s=2, zorder=1) # plt.scatter(x2, y2, c='r', s=5, zorder=4) # plt.subplot(133) # plt.scatter(x1, x1 - x2, c='b', s=5) # plt.scatter(y1, y1 - y2, c='r', s=5) # plt.show() print("{}.".format(counter)) counter += 1 s = t.clock() print(" Cross-matched stars: {}".format(len(match_fr1_ids))) if match_fr1_ids: print(" (Mean cross-match distance: {:.2f} px)".format( np.mean(match_d_all))) print(" Reference stars w/ no match within maxrad: {}".format( len(x_ref) - len(match_fr1_ids_all) - N_dupl_fr1)) # If there are any stars from the reference frame that had # duplicated matches and were stored for re-matching. if N_dupl_fr1 > 0: print(" Reference stars for re-match: {}".format(N_dupl_fr1)) print(" Frame stars for re-match: {}".format( N_fr2 - len(match_fr2_ids_all))) # Update coordinates of matched stars in both frames. x_ref, y_ref, x_fr, y_fr = frameCoordsUpdt( x_ref, y_ref, x_fr, y_fr, match_fr1_ids, match_fr2_ids) else: print(" Frame stars w/ no match within maxrad: {}".format( N_fr2 - len(match_fr2_ids_all))) print(' 1', t.clock() - s) return match_fr1_ids_all, match_fr2_ids_all def UpdtRefFrame( refFrameInfo, refFrame, frame, match_fr1_ids_all, match_fr2_ids_all, ABCD): """ Update the reference frame adding the stars in the processed frame that were assigned as matches to each reference star. If a reference star was not assigned any match from the processed frame, add a Nan value. Also add to the end of the list (thereby increasing the length of the reference frame) those processed stars that could not be matched to any reference star. Parameters ---------- refFrameInfo : list see frameMatch() refFrame : list see frameMatch() frame : list see frameMatch() match_fr1_ids_all : list Indexes of stars in refFrame that were matched to a star in frame. match_fr2_ids_all : list Indexes of stars in frame that were matched to a star in refFrame. ABCD: tuple of floats Transformation parameters for the frame's coordinates(A, B, C, D) Returns ------- refFrameInfo : list Updated list. refFrame : list Updated list. """ # Extract processed frame data. fr_filt, fr_expTime = frame[:2] x_fr, y_fr, mag_fr, emag_fr = frame[2] # Update x,y coordinates before storing. x_fr, y_fr = transfABCD(x_fr, y_fr, ABCD) # For each reference frame star. for ref_id, ref_st in enumerate(refFrame): # Check if this reference star was uniquely associated with a # processed star. if ref_id in match_fr1_ids_all: # Index of the associated processed 'frame' star. j = match_fr1_ids_all.index(ref_id) fr_id = match_fr2_ids_all[j] # ref_st[0].append(x_fr[fr_id]) ref_st[1].append(y_fr[fr_id]) ref_st[2].append(mag_fr[fr_id]) ref_st[3].append(emag_fr[fr_id]) else: # If this reference star could not be matched to any processed star # within the maximum match radius defined, add a NaN value # to mean that no match in the processed frame was found # for this reference star. ref_st[0].append(np.nan) ref_st[1].append(np.nan) ref_st[2].append(np.nan) ref_st[3].append(np.nan) # Number of frames processed this far including the reference frame, but # excluding this one. N_fr = len(refFrame[0][0]) - 1 # For each processed frame star. fr_st_no_match = 0 for fr_id, fr_st in enumerate(zip(*[x_fr, y_fr, mag_fr, emag_fr])): if fr_id not in match_fr2_ids_all: # This frame star was not matched to any reference star. x = [np.nan for _ in range(N_fr)] + [fr_st[0]] y = [np.nan for _ in range(N_fr)] + [fr_st[1]] mag = [np.nan for _ in range(N_fr)] + [fr_st[2]] emag = [np.nan for _ in range(N_fr)] + [fr_st[3]] refFrame.append([x, y, mag, emag]) fr_st_no_match += 1 # Update the information stored on the frames processed. refFrameInfo.append([fr_filt, fr_expTime, fr_st_no_match]) return refFrameInfo, refFrame def frameCombine(refFrameInfo, refFrame, frame, xy_shifts, maxrad): """ Combine 'refFrame' and 'frame' to generate an updated 'refFrame' with all matches and non-matches identified and stored. Parameters ---------- refFrameInfo : list Contains one sub-list per processed frame with its filter name, exposure time, and number of stars not matched to the original refFrame that where added to the end of the list. Will be updated before this function ends. refFrame : list Collects all the cross-matched photometry into this single reference frame. frame : list Processed frame to be compared to refFrame. xy_shifts: dict Contains the x0,y0 shifts for each frame. maxrad : int Maximum allowed distance (radius) for a match to be valid. Returns ------- refFrameInfo : list Updated list. refFrame : list Updated list. """ # Extract (x,y) coordinates, averaging the values assigned to the # same star. Use np.nanmean() because stars from 'refFrame' that could not # be matched to a star in 'frame', will have a 'NaN' value attached to # its x,y coordinates lists. x_ref, y_ref = [], [] # TODO improve this block for st in refFrame: x_ref.append(np.nanmean(st[0])) y_ref.append(np.nanmean(st[1])) x_ref, y_ref = np.array(x_ref), np.array(y_ref) # Extract filter name and exposure time of the processed frame. fr_filt, fr_expTime = frame[:2] # x,y coordinates and their shifts x_fr, y_fr = frame[2][:2] x0, y0 = xy_shifts[fr_filt][fr_expTime] print('\nProcessing frame: {}, {} (N={})'.format( fr_filt, fr_expTime, len(x_fr))) A, B, C, D = x0, y0, 1., 0. print("Initial A,B,C,D parameters: ({}, {}, {}, {})".format(A, B, C, D)) min_rad, mstep = 3., -1 # if fr_filt == 'I': # TODO remove this # maxrad, min_rad = 20., 15. rads_list = np.arange(maxrad, min_rad - 1., mstep) print("Minimum match radius: {}".format(min_rad)) for m_rad in rads_list: print("Match using maxrad={}".format(m_rad)) # Apply transformation equations s = t.clock() xt, yt = transfABCD(x_fr, y_fr, (A, B, C, D)) print('1', t.clock() - s) # Match frames match_fr1_ids_all, match_fr2_ids_all = frameMatch( x_ref, y_ref, xt, yt, float(m_rad)) # Solve for new A,B,C,D using only the cross-matched stars, only if # the loop while run one more time. if m_rad != rads_list[-1]: s = t.clock() x1 = np.array(x_ref)[match_fr1_ids_all] y1 = np.array(y_ref)[match_fr1_ids_all] x2, y2 = x_fr[match_fr2_ids_all], y_fr[match_fr2_ids_all] A, B, C, D = solveABCD(x1, y1, x2, y2) x_med, y_med = np.median(x1 - x2), np.median(y1 - y2) print(x_med, y_med) print('3', t.clock() - s) # Update reference frame. print("Updating reference frame.") refFrameInfo, refFrame = UpdtRefFrame( refFrameInfo, refFrame, frame, match_fr1_ids_all, match_fr2_ids_all, (A, B, C, D)) return refFrameInfo, refFrame def groupFilters(refFrameInfo, refFrame): """ Group observed frames according to filters and exposure times. Parameters ---------- refFrameInfo : list See frameMatch() refFrame : list See frameMatch() Returns ------- group_phot : dict Dictionary of astropy Tables(), one per observed filter with columns ordered as 'x_XXX y_XXX mag_XXX emag_XXX', where 'XXX' represents the exposure time. """ filters = {'U': {}, 'B': {}, 'V': {}, 'R': {}, 'I': {}} # Store data in 'filters' dict, grouped by exposure time. x, y, mag, e_mag = [list(zip(*_)) for _ in list(zip(*refFrame))] for i, (f, exp, N) in enumerate(refFrameInfo): filters[f].update({exp: {'x': x[i], 'y': y[i], 'mag': mag[i], 'e_mag': e_mag[i]}}) group_phot = {} # Order columns, add exposure time to col names, and write to file. for f, fdata in filters.iteritems(): tab = Table() for expT, col_data in fdata.iteritems(): t = Table(col_data, names=col_data.keys()) t_order = t['x', 'y', 'mag', 'e_mag'] t = Table(t_order, names=[_ + expT for _ in ['x_', 'y_', 'mag_', 'emag_']]) tab.add_columns(t.columns.values()) if len(tab) > 0: # Add IDs. ids = Column(np.arange(len(tab)), name='ID') tab.add_column(ids, index=0) # Append to grouped dictionary. group_phot.update({f: tab}) return group_phot def writeToFile(in_out_path, group_phot): """ TODO """ for f, tab in group_phot.iteritems(): print("\nWriting 'filter_{}.mag' file.".format(f)) # Remove rows with all 'nan' values before writing to file. # tab = rmNaNrows(tab, 1) # Write to file. if len(tab) > 0: ascii.write( tab, in_out_path + '/filter_' + f + '.mag', format='fixed_width', delimiter=' ', formats={_: '%10.4f' for _ in tab.keys()[1:]}, fill_values=[(ascii.masked, 'nan')], overwrite=True) def make_plots(in_out_path): """ TODO """ def main(): """ TODO """ pars = in_params() frames, xy_shifts = loadFrames( pars['in_out_path'], pars['load_format'], pars['als_files']) # Select the 'reference' frame as the one with the largest number of stars, # and store the remaining data in the correct order. refFrameInfo, refFrame, framesOrdered = framesOrder(frames) # Compare the reference frame to all the other frames. for frame in framesOrdered: print("\n--------------------------------------") print("Reference frame (N={}), composed of:".format( np.sum(zip(*refFrameInfo)[2]))) for _ in refFrameInfo: print("{}, {} (N={})".format(*_)) if frame[0] == 'I': # TODO remove refFrameInfo, refFrame = frameCombine( refFrameInfo, refFrame, frame, xy_shifts, int(float(pars['maxrad']))) print("\nFinal combined reference frame (N={})".format( np.sum(zip(*refFrameInfo)[2]))) for _ in refFrameInfo: print("{}, {} (N added: {})".format(*_)) # Group by filters and order by exposure time and data type (x, y, mag, # e_mag). group_phot = groupFilters(refFrameInfo, refFrame) # Create all output files and make final plot. writeToFile(pars['in_out_path'], group_phot) if pars['do_plots_H'] == 'y': make_plots(pars['in_out_path']) if __name__ == '__main__': main()
gpl-3.0
macks22/scikit-learn
sklearn/cross_decomposition/pls_.py
187
28507
""" The :mod:`sklearn.pls` module implements Partial Least Squares (PLS). """ # Author: Edouard Duchesnay <[email protected]> # License: BSD 3 clause from ..base import BaseEstimator, RegressorMixin, TransformerMixin from ..utils import check_array, check_consistent_length from ..externals import six import warnings from abc import ABCMeta, abstractmethod import numpy as np from scipy import linalg from ..utils import arpack from ..utils.validation import check_is_fitted __all__ = ['PLSCanonical', 'PLSRegression', 'PLSSVD'] def _nipals_twoblocks_inner_loop(X, Y, mode="A", max_iter=500, tol=1e-06, norm_y_weights=False): """Inner loop of the iterative NIPALS algorithm. Provides an alternative to the svd(X'Y); returns the first left and right singular vectors of X'Y. See PLS for the meaning of the parameters. It is similar to the Power method for determining the eigenvectors and eigenvalues of a X'Y. """ y_score = Y[:, [0]] x_weights_old = 0 ite = 1 X_pinv = Y_pinv = None eps = np.finfo(X.dtype).eps # Inner loop of the Wold algo. while True: # 1.1 Update u: the X weights if mode == "B": if X_pinv is None: X_pinv = linalg.pinv(X) # compute once pinv(X) x_weights = np.dot(X_pinv, y_score) else: # mode A # Mode A regress each X column on y_score x_weights = np.dot(X.T, y_score) / np.dot(y_score.T, y_score) # 1.2 Normalize u x_weights /= np.sqrt(np.dot(x_weights.T, x_weights)) + eps # 1.3 Update x_score: the X latent scores x_score = np.dot(X, x_weights) # 2.1 Update y_weights if mode == "B": if Y_pinv is None: Y_pinv = linalg.pinv(Y) # compute once pinv(Y) y_weights = np.dot(Y_pinv, x_score) else: # Mode A regress each Y column on x_score y_weights = np.dot(Y.T, x_score) / np.dot(x_score.T, x_score) # 2.2 Normalize y_weights if norm_y_weights: y_weights /= np.sqrt(np.dot(y_weights.T, y_weights)) + eps # 2.3 Update y_score: the Y latent scores y_score = np.dot(Y, y_weights) / (np.dot(y_weights.T, y_weights) + eps) # y_score = np.dot(Y, y_weights) / np.dot(y_score.T, y_score) ## BUG x_weights_diff = x_weights - x_weights_old if np.dot(x_weights_diff.T, x_weights_diff) < tol or Y.shape[1] == 1: break if ite == max_iter: warnings.warn('Maximum number of iterations reached') break x_weights_old = x_weights ite += 1 return x_weights, y_weights, ite def _svd_cross_product(X, Y): C = np.dot(X.T, Y) U, s, Vh = linalg.svd(C, full_matrices=False) u = U[:, [0]] v = Vh.T[:, [0]] return u, v def _center_scale_xy(X, Y, scale=True): """ Center X, Y and scale if the scale parameter==True Returns ------- X, Y, x_mean, y_mean, x_std, y_std """ # center x_mean = X.mean(axis=0) X -= x_mean y_mean = Y.mean(axis=0) Y -= y_mean # scale if scale: x_std = X.std(axis=0, ddof=1) x_std[x_std == 0.0] = 1.0 X /= x_std y_std = Y.std(axis=0, ddof=1) y_std[y_std == 0.0] = 1.0 Y /= y_std else: x_std = np.ones(X.shape[1]) y_std = np.ones(Y.shape[1]) return X, Y, x_mean, y_mean, x_std, y_std class _PLS(six.with_metaclass(ABCMeta), BaseEstimator, TransformerMixin, RegressorMixin): """Partial Least Squares (PLS) This class implements the generic PLS algorithm, constructors' parameters allow to obtain a specific implementation such as: - PLS2 regression, i.e., PLS 2 blocks, mode A, with asymmetric deflation and unnormalized y weights such as defined by [Tenenhaus 1998] p. 132. With univariate response it implements PLS1. - PLS canonical, i.e., PLS 2 blocks, mode A, with symmetric deflation and normalized y weights such as defined by [Tenenhaus 1998] (p. 132) and [Wegelin et al. 2000]. This parametrization implements the original Wold algorithm. We use the terminology defined by [Wegelin et al. 2000]. This implementation uses the PLS Wold 2 blocks algorithm based on two nested loops: (i) The outer loop iterate over components. (ii) The inner loop estimates the weights vectors. This can be done with two algo. (a) the inner loop of the original NIPALS algo. or (b) a SVD on residuals cross-covariance matrices. n_components : int, number of components to keep. (default 2). scale : boolean, scale data? (default True) deflation_mode : str, "canonical" or "regression". See notes. mode : "A" classical PLS and "B" CCA. See notes. norm_y_weights: boolean, normalize Y weights to one? (default False) algorithm : string, "nipals" or "svd" The algorithm used to estimate the weights. It will be called n_components times, i.e. once for each iteration of the outer loop. max_iter : an integer, the maximum number of iterations (default 500) of the NIPALS inner loop (used only if algorithm="nipals") tol : non-negative real, default 1e-06 The tolerance used in the iterative algorithm. copy : boolean, default True Whether the deflation should be done on a copy. Let the default value to True unless you don't care about side effects. Attributes ---------- x_weights_ : array, [p, n_components] X block weights vectors. y_weights_ : array, [q, n_components] Y block weights vectors. x_loadings_ : array, [p, n_components] X block loadings vectors. y_loadings_ : array, [q, n_components] Y block loadings vectors. x_scores_ : array, [n_samples, n_components] X scores. y_scores_ : array, [n_samples, n_components] Y scores. x_rotations_ : array, [p, n_components] X block to latents rotations. y_rotations_ : array, [q, n_components] Y block to latents rotations. coef_: array, [p, q] The coefficients of the linear model: ``Y = X coef_ + Err`` n_iter_ : array-like Number of iterations of the NIPALS inner loop for each component. Not useful if the algorithm given is "svd". References ---------- Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with emphasis on the two-block case. Technical Report 371, Department of Statistics, University of Washington, Seattle, 2000. In French but still a reference: Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris: Editions Technic. See also -------- PLSCanonical PLSRegression CCA PLS_SVD """ @abstractmethod def __init__(self, n_components=2, scale=True, deflation_mode="regression", mode="A", algorithm="nipals", norm_y_weights=False, max_iter=500, tol=1e-06, copy=True): self.n_components = n_components self.deflation_mode = deflation_mode self.mode = mode self.norm_y_weights = norm_y_weights self.scale = scale self.algorithm = algorithm self.max_iter = max_iter self.tol = tol self.copy = copy def fit(self, X, Y): """Fit model to data. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vectors, where n_samples in the number of samples and n_features is the number of predictors. Y : array-like of response, shape = [n_samples, n_targets] Target vectors, where n_samples in the number of samples and n_targets is the number of response variables. """ # copy since this will contains the residuals (deflated) matrices check_consistent_length(X, Y) X = check_array(X, dtype=np.float64, copy=self.copy) Y = check_array(Y, dtype=np.float64, copy=self.copy, ensure_2d=False) if Y.ndim == 1: Y = Y.reshape(-1, 1) n = X.shape[0] p = X.shape[1] q = Y.shape[1] if self.n_components < 1 or self.n_components > p: raise ValueError('Invalid number of components: %d' % self.n_components) if self.algorithm not in ("svd", "nipals"): raise ValueError("Got algorithm %s when only 'svd' " "and 'nipals' are known" % self.algorithm) if self.algorithm == "svd" and self.mode == "B": raise ValueError('Incompatible configuration: mode B is not ' 'implemented with svd algorithm') if self.deflation_mode not in ["canonical", "regression"]: raise ValueError('The deflation mode is unknown') # Scale (in place) X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_\ = _center_scale_xy(X, Y, self.scale) # Residuals (deflated) matrices Xk = X Yk = Y # Results matrices self.x_scores_ = np.zeros((n, self.n_components)) self.y_scores_ = np.zeros((n, self.n_components)) self.x_weights_ = np.zeros((p, self.n_components)) self.y_weights_ = np.zeros((q, self.n_components)) self.x_loadings_ = np.zeros((p, self.n_components)) self.y_loadings_ = np.zeros((q, self.n_components)) self.n_iter_ = [] # NIPALS algo: outer loop, over components for k in range(self.n_components): if np.all(np.dot(Yk.T, Yk) < np.finfo(np.double).eps): # Yk constant warnings.warn('Y residual constant at iteration %s' % k) break # 1) weights estimation (inner loop) # ----------------------------------- if self.algorithm == "nipals": x_weights, y_weights, n_iter_ = \ _nipals_twoblocks_inner_loop( X=Xk, Y=Yk, mode=self.mode, max_iter=self.max_iter, tol=self.tol, norm_y_weights=self.norm_y_weights) self.n_iter_.append(n_iter_) elif self.algorithm == "svd": x_weights, y_weights = _svd_cross_product(X=Xk, Y=Yk) # compute scores x_scores = np.dot(Xk, x_weights) if self.norm_y_weights: y_ss = 1 else: y_ss = np.dot(y_weights.T, y_weights) y_scores = np.dot(Yk, y_weights) / y_ss # test for null variance if np.dot(x_scores.T, x_scores) < np.finfo(np.double).eps: warnings.warn('X scores are null at iteration %s' % k) break # 2) Deflation (in place) # ---------------------- # Possible memory footprint reduction may done here: in order to # avoid the allocation of a data chunk for the rank-one # approximations matrix which is then subtracted to Xk, we suggest # to perform a column-wise deflation. # # - regress Xk's on x_score x_loadings = np.dot(Xk.T, x_scores) / np.dot(x_scores.T, x_scores) # - subtract rank-one approximations to obtain remainder matrix Xk -= np.dot(x_scores, x_loadings.T) if self.deflation_mode == "canonical": # - regress Yk's on y_score, then subtract rank-one approx. y_loadings = (np.dot(Yk.T, y_scores) / np.dot(y_scores.T, y_scores)) Yk -= np.dot(y_scores, y_loadings.T) if self.deflation_mode == "regression": # - regress Yk's on x_score, then subtract rank-one approx. y_loadings = (np.dot(Yk.T, x_scores) / np.dot(x_scores.T, x_scores)) Yk -= np.dot(x_scores, y_loadings.T) # 3) Store weights, scores and loadings # Notation: self.x_scores_[:, k] = x_scores.ravel() # T self.y_scores_[:, k] = y_scores.ravel() # U self.x_weights_[:, k] = x_weights.ravel() # W self.y_weights_[:, k] = y_weights.ravel() # C self.x_loadings_[:, k] = x_loadings.ravel() # P self.y_loadings_[:, k] = y_loadings.ravel() # Q # Such that: X = TP' + Err and Y = UQ' + Err # 4) rotations from input space to transformed space (scores) # T = X W(P'W)^-1 = XW* (W* : p x k matrix) # U = Y C(Q'C)^-1 = YC* (W* : q x k matrix) self.x_rotations_ = np.dot( self.x_weights_, linalg.pinv(np.dot(self.x_loadings_.T, self.x_weights_))) if Y.shape[1] > 1: self.y_rotations_ = np.dot( self.y_weights_, linalg.pinv(np.dot(self.y_loadings_.T, self.y_weights_))) else: self.y_rotations_ = np.ones(1) if True or self.deflation_mode == "regression": # FIXME what's with the if? # Estimate regression coefficient # Regress Y on T # Y = TQ' + Err, # Then express in function of X # Y = X W(P'W)^-1Q' + Err = XB + Err # => B = W*Q' (p x q) self.coef_ = np.dot(self.x_rotations_, self.y_loadings_.T) self.coef_ = (1. / self.x_std_.reshape((p, 1)) * self.coef_ * self.y_std_) return self def transform(self, X, Y=None, copy=True): """Apply the dimension reduction learned on the train data. Parameters ---------- X : array-like of predictors, shape = [n_samples, p] Training vectors, where n_samples in the number of samples and p is the number of predictors. Y : array-like of response, shape = [n_samples, q], optional Training vectors, where n_samples in the number of samples and q is the number of response variables. copy : boolean, default True Whether to copy X and Y, or perform in-place normalization. Returns ------- x_scores if Y is not given, (x_scores, y_scores) otherwise. """ check_is_fitted(self, 'x_mean_') X = check_array(X, copy=copy) # Normalize X -= self.x_mean_ X /= self.x_std_ # Apply rotation x_scores = np.dot(X, self.x_rotations_) if Y is not None: Y = check_array(Y, ensure_2d=False, copy=copy) if Y.ndim == 1: Y = Y.reshape(-1, 1) Y -= self.y_mean_ Y /= self.y_std_ y_scores = np.dot(Y, self.y_rotations_) return x_scores, y_scores return x_scores def predict(self, X, copy=True): """Apply the dimension reduction learned on the train data. Parameters ---------- X : array-like of predictors, shape = [n_samples, p] Training vectors, where n_samples in the number of samples and p is the number of predictors. copy : boolean, default True Whether to copy X and Y, or perform in-place normalization. Notes ----- This call requires the estimation of a p x q matrix, which may be an issue in high dimensional space. """ check_is_fitted(self, 'x_mean_') X = check_array(X, copy=copy) # Normalize X -= self.x_mean_ X /= self.x_std_ Ypred = np.dot(X, self.coef_) return Ypred + self.y_mean_ def fit_transform(self, X, y=None, **fit_params): """Learn and apply the dimension reduction on the train data. Parameters ---------- X : array-like of predictors, shape = [n_samples, p] Training vectors, where n_samples in the number of samples and p is the number of predictors. Y : array-like of response, shape = [n_samples, q], optional Training vectors, where n_samples in the number of samples and q is the number of response variables. copy : boolean, default True Whether to copy X and Y, or perform in-place normalization. Returns ------- x_scores if Y is not given, (x_scores, y_scores) otherwise. """ check_is_fitted(self, 'x_mean_') return self.fit(X, y, **fit_params).transform(X, y) class PLSRegression(_PLS): """PLS regression PLSRegression implements the PLS 2 blocks regression known as PLS2 or PLS1 in case of one dimensional response. This class inherits from _PLS with mode="A", deflation_mode="regression", norm_y_weights=False and algorithm="nipals". Read more in the :ref:`User Guide <cross_decomposition>`. Parameters ---------- n_components : int, (default 2) Number of components to keep. scale : boolean, (default True) whether to scale the data max_iter : an integer, (default 500) the maximum number of iterations of the NIPALS inner loop (used only if algorithm="nipals") tol : non-negative real Tolerance used in the iterative algorithm default 1e-06. copy : boolean, default True Whether the deflation should be done on a copy. Let the default value to True unless you don't care about side effect Attributes ---------- x_weights_ : array, [p, n_components] X block weights vectors. y_weights_ : array, [q, n_components] Y block weights vectors. x_loadings_ : array, [p, n_components] X block loadings vectors. y_loadings_ : array, [q, n_components] Y block loadings vectors. x_scores_ : array, [n_samples, n_components] X scores. y_scores_ : array, [n_samples, n_components] Y scores. x_rotations_ : array, [p, n_components] X block to latents rotations. y_rotations_ : array, [q, n_components] Y block to latents rotations. coef_: array, [p, q] The coefficients of the linear model: ``Y = X coef_ + Err`` n_iter_ : array-like Number of iterations of the NIPALS inner loop for each component. Notes ----- For each component k, find weights u, v that optimizes: ``max corr(Xk u, Yk v) * var(Xk u) var(Yk u)``, such that ``|u| = 1`` Note that it maximizes both the correlations between the scores and the intra-block variances. The residual matrix of X (Xk+1) block is obtained by the deflation on the current X score: x_score. The residual matrix of Y (Yk+1) block is obtained by deflation on the current X score. This performs the PLS regression known as PLS2. This mode is prediction oriented. This implementation provides the same results that 3 PLS packages provided in the R language (R-project): - "mixOmics" with function pls(X, Y, mode = "regression") - "plspm " with function plsreg2(X, Y) - "pls" with function oscorespls.fit(X, Y) Examples -------- >>> from sklearn.cross_decomposition import PLSRegression >>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]] >>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]] >>> pls2 = PLSRegression(n_components=2) >>> pls2.fit(X, Y) ... # doctest: +NORMALIZE_WHITESPACE PLSRegression(copy=True, max_iter=500, n_components=2, scale=True, tol=1e-06) >>> Y_pred = pls2.predict(X) References ---------- Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with emphasis on the two-block case. Technical Report 371, Department of Statistics, University of Washington, Seattle, 2000. In french but still a reference: Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris: Editions Technic. """ def __init__(self, n_components=2, scale=True, max_iter=500, tol=1e-06, copy=True): _PLS.__init__(self, n_components=n_components, scale=scale, deflation_mode="regression", mode="A", norm_y_weights=False, max_iter=max_iter, tol=tol, copy=copy) class PLSCanonical(_PLS): """ PLSCanonical implements the 2 blocks canonical PLS of the original Wold algorithm [Tenenhaus 1998] p.204, referred as PLS-C2A in [Wegelin 2000]. This class inherits from PLS with mode="A" and deflation_mode="canonical", norm_y_weights=True and algorithm="nipals", but svd should provide similar results up to numerical errors. Read more in the :ref:`User Guide <cross_decomposition>`. Parameters ---------- scale : boolean, scale data? (default True) algorithm : string, "nipals" or "svd" The algorithm used to estimate the weights. It will be called n_components times, i.e. once for each iteration of the outer loop. max_iter : an integer, (default 500) the maximum number of iterations of the NIPALS inner loop (used only if algorithm="nipals") tol : non-negative real, default 1e-06 the tolerance used in the iterative algorithm copy : boolean, default True Whether the deflation should be done on a copy. Let the default value to True unless you don't care about side effect n_components : int, number of components to keep. (default 2). Attributes ---------- x_weights_ : array, shape = [p, n_components] X block weights vectors. y_weights_ : array, shape = [q, n_components] Y block weights vectors. x_loadings_ : array, shape = [p, n_components] X block loadings vectors. y_loadings_ : array, shape = [q, n_components] Y block loadings vectors. x_scores_ : array, shape = [n_samples, n_components] X scores. y_scores_ : array, shape = [n_samples, n_components] Y scores. x_rotations_ : array, shape = [p, n_components] X block to latents rotations. y_rotations_ : array, shape = [q, n_components] Y block to latents rotations. n_iter_ : array-like Number of iterations of the NIPALS inner loop for each component. Not useful if the algorithm provided is "svd". Notes ----- For each component k, find weights u, v that optimize:: max corr(Xk u, Yk v) * var(Xk u) var(Yk u), such that ``|u| = |v| = 1`` Note that it maximizes both the correlations between the scores and the intra-block variances. The residual matrix of X (Xk+1) block is obtained by the deflation on the current X score: x_score. The residual matrix of Y (Yk+1) block is obtained by deflation on the current Y score. This performs a canonical symmetric version of the PLS regression. But slightly different than the CCA. This is mostly used for modeling. This implementation provides the same results that the "plspm" package provided in the R language (R-project), using the function plsca(X, Y). Results are equal or collinear with the function ``pls(..., mode = "canonical")`` of the "mixOmics" package. The difference relies in the fact that mixOmics implementation does not exactly implement the Wold algorithm since it does not normalize y_weights to one. Examples -------- >>> from sklearn.cross_decomposition import PLSCanonical >>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]] >>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]] >>> plsca = PLSCanonical(n_components=2) >>> plsca.fit(X, Y) ... # doctest: +NORMALIZE_WHITESPACE PLSCanonical(algorithm='nipals', copy=True, max_iter=500, n_components=2, scale=True, tol=1e-06) >>> X_c, Y_c = plsca.transform(X, Y) References ---------- Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with emphasis on the two-block case. Technical Report 371, Department of Statistics, University of Washington, Seattle, 2000. Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris: Editions Technic. See also -------- CCA PLSSVD """ def __init__(self, n_components=2, scale=True, algorithm="nipals", max_iter=500, tol=1e-06, copy=True): _PLS.__init__(self, n_components=n_components, scale=scale, deflation_mode="canonical", mode="A", norm_y_weights=True, algorithm=algorithm, max_iter=max_iter, tol=tol, copy=copy) class PLSSVD(BaseEstimator, TransformerMixin): """Partial Least Square SVD Simply perform a svd on the crosscovariance matrix: X'Y There are no iterative deflation here. Read more in the :ref:`User Guide <cross_decomposition>`. Parameters ---------- n_components : int, default 2 Number of components to keep. scale : boolean, default True Whether to scale X and Y. copy : boolean, default True Whether to copy X and Y, or perform in-place computations. Attributes ---------- x_weights_ : array, [p, n_components] X block weights vectors. y_weights_ : array, [q, n_components] Y block weights vectors. x_scores_ : array, [n_samples, n_components] X scores. y_scores_ : array, [n_samples, n_components] Y scores. See also -------- PLSCanonical CCA """ def __init__(self, n_components=2, scale=True, copy=True): self.n_components = n_components self.scale = scale self.copy = copy def fit(self, X, Y): # copy since this will contains the centered data check_consistent_length(X, Y) X = check_array(X, dtype=np.float64, copy=self.copy) Y = check_array(Y, dtype=np.float64, copy=self.copy, ensure_2d=False) if Y.ndim == 1: Y = Y.reshape(-1, 1) if self.n_components > max(Y.shape[1], X.shape[1]): raise ValueError("Invalid number of components n_components=%d" " with X of shape %s and Y of shape %s." % (self.n_components, str(X.shape), str(Y.shape))) # Scale (in place) X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_ =\ _center_scale_xy(X, Y, self.scale) # svd(X'Y) C = np.dot(X.T, Y) # The arpack svds solver only works if the number of extracted # components is smaller than rank(X) - 1. Hence, if we want to extract # all the components (C.shape[1]), we have to use another one. Else, # let's use arpacks to compute only the interesting components. if self.n_components >= np.min(C.shape): U, s, V = linalg.svd(C, full_matrices=False) else: U, s, V = arpack.svds(C, k=self.n_components) V = V.T self.x_scores_ = np.dot(X, U) self.y_scores_ = np.dot(Y, V) self.x_weights_ = U self.y_weights_ = V return self def transform(self, X, Y=None): """Apply the dimension reduction learned on the train data.""" check_is_fitted(self, 'x_mean_') X = check_array(X, dtype=np.float64) Xr = (X - self.x_mean_) / self.x_std_ x_scores = np.dot(Xr, self.x_weights_) if Y is not None: if Y.ndim == 1: Y = Y.reshape(-1, 1) Yr = (Y - self.y_mean_) / self.y_std_ y_scores = np.dot(Yr, self.y_weights_) return x_scores, y_scores return x_scores def fit_transform(self, X, y=None, **fit_params): """Learn and apply the dimension reduction on the train data. Parameters ---------- X : array-like of predictors, shape = [n_samples, p] Training vectors, where n_samples in the number of samples and p is the number of predictors. Y : array-like of response, shape = [n_samples, q], optional Training vectors, where n_samples in the number of samples and q is the number of response variables. Returns ------- x_scores if Y is not given, (x_scores, y_scores) otherwise. """ return self.fit(X, y, **fit_params).transform(X, y)
bsd-3-clause
tudelft3d/masbpy
util/compute_normals.py
1
2229
# This file is part of masbpy. # masbpy is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # masbpy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with masbpy. If not, see <http://www.gnu.org/licenses/>. # Copyright 2015 Ravi Peters #!/usr/bin/env python from sklearn.decomposition import PCA from pykdtree.kdtree import KDTree from multiprocessing import Pool from time import time from masbpy import io_npy import numpy as np import sys import argparse def compute_normal(neighbours): pca = PCA(n_components=3) pca.fit(neighbours) plane_normal = pca.components_[-1] # this is a normalized normal # make all normals point upwards: if plane_normal[-1] < 0: plane_normal *= -1 return plane_normal def main(args): if args.infile.endswith('ply'): from masbpy import io_ply datadict = io_ply.read_ply(args.infile) elif args.infile.endswith('las'): from masbpy import io_las datadict = io_las.read_las(args.infile) elif args.infile.endswith('npy'): datadict = io_npy.read_npy(args.infile, ['coords']) kd_tree = KDTree( datadict['coords'] ) neighbours = kd_tree.query( datadict['coords'], args.k+1 )[1] neighbours = datadict['coords'][neighbours] p = Pool() t1 = time() normals = p.map(compute_normal, neighbours) t2 = time() print "finished normal computation in {} s".format(t2-t1) datadict['normals'] = np.array(normals, dtype=np.float32) io_npy.write_npy(args.outfile, datadict) if __name__ == "__main__": parser = argparse.ArgumentParser(description='Basic PCA normal approximation') parser.add_argument('infile', help='Input .las, .ply, _npy') parser.add_argument('outfile', help='Output _npy') parser.add_argument('-k', help='Number of neighbours to use', default=10, type=int) args = parser.parse_args() main(args)
gpl-3.0
ningchi/scikit-learn
sklearn/cross_decomposition/pls_.py
14
28526
""" The :mod:`sklearn.pls` module implements Partial Least Squares (PLS). """ # Author: Edouard Duchesnay <[email protected]> # License: BSD 3 clause from ..base import BaseEstimator, RegressorMixin, TransformerMixin from ..utils import check_array, check_consistent_length from ..externals import six import warnings from abc import ABCMeta, abstractmethod import numpy as np from scipy import linalg from ..utils import arpack from ..utils.validation import check_is_fitted __all__ = ['PLSCanonical', 'PLSRegression', 'PLSSVD'] def _nipals_twoblocks_inner_loop(X, Y, mode="A", max_iter=500, tol=1e-06, norm_y_weights=False): """Inner loop of the iterative NIPALS algorithm. Provides an alternative to the svd(X'Y); returns the first left and right singular vectors of X'Y. See PLS for the meaning of the parameters. It is similar to the Power method for determining the eigenvectors and eigenvalues of a X'Y. """ y_score = Y[:, [0]] x_weights_old = 0 ite = 1 X_pinv = Y_pinv = None # Inner loop of the Wold algo. while True: # 1.1 Update u: the X weights if mode == "B": if X_pinv is None: X_pinv = linalg.pinv(X) # compute once pinv(X) x_weights = np.dot(X_pinv, y_score) else: # mode A # Mode A regress each X column on y_score x_weights = np.dot(X.T, y_score) / np.dot(y_score.T, y_score) # 1.2 Normalize u x_weights /= np.sqrt(np.dot(x_weights.T, x_weights)) # 1.3 Update x_score: the X latent scores x_score = np.dot(X, x_weights) # 2.1 Update y_weights if mode == "B": if Y_pinv is None: Y_pinv = linalg.pinv(Y) # compute once pinv(Y) y_weights = np.dot(Y_pinv, x_score) else: # Mode A regress each Y column on x_score y_weights = np.dot(Y.T, x_score) / np.dot(x_score.T, x_score) ## 2.2 Normalize y_weights if norm_y_weights: y_weights /= np.sqrt(np.dot(y_weights.T, y_weights)) # 2.3 Update y_score: the Y latent scores y_score = np.dot(Y, y_weights) / np.dot(y_weights.T, y_weights) ## y_score = np.dot(Y, y_weights) / np.dot(y_score.T, y_score) ## BUG x_weights_diff = x_weights - x_weights_old if np.dot(x_weights_diff.T, x_weights_diff) < tol or Y.shape[1] == 1: break if ite == max_iter: warnings.warn('Maximum number of iterations reached') break x_weights_old = x_weights ite += 1 return x_weights, y_weights, ite def _svd_cross_product(X, Y): C = np.dot(X.T, Y) U, s, Vh = linalg.svd(C, full_matrices=False) u = U[:, [0]] v = Vh.T[:, [0]] return u, v def _center_scale_xy(X, Y, scale=True): """ Center X, Y and scale if the scale parameter==True Returns ------- X, Y, x_mean, y_mean, x_std, y_std """ # center x_mean = X.mean(axis=0) X -= x_mean y_mean = Y.mean(axis=0) Y -= y_mean # scale if scale: x_std = X.std(axis=0, ddof=1) x_std[x_std == 0.0] = 1.0 X /= x_std y_std = Y.std(axis=0, ddof=1) y_std[y_std == 0.0] = 1.0 Y /= y_std else: x_std = np.ones(X.shape[1]) y_std = np.ones(Y.shape[1]) return X, Y, x_mean, y_mean, x_std, y_std class _PLS(six.with_metaclass(ABCMeta), BaseEstimator, TransformerMixin, RegressorMixin): """Partial Least Squares (PLS) This class implements the generic PLS algorithm, constructors' parameters allow to obtain a specific implementation such as: - PLS2 regression, i.e., PLS 2 blocks, mode A, with asymmetric deflation and unnormalized y weights such as defined by [Tenenhaus 1998] p. 132. With univariate response it implements PLS1. - PLS canonical, i.e., PLS 2 blocks, mode A, with symmetric deflation and normalized y weights such as defined by [Tenenhaus 1998] (p. 132) and [Wegelin et al. 2000]. This parametrization implements the original Wold algorithm. We use the terminology defined by [Wegelin et al. 2000]. This implementation uses the PLS Wold 2 blocks algorithm based on two nested loops: (i) The outer loop iterate over components. (ii) The inner loop estimates the weights vectors. This can be done with two algo. (a) the inner loop of the original NIPALS algo. or (b) a SVD on residuals cross-covariance matrices. n_components : int, number of components to keep. (default 2). scale : boolean, scale data? (default True) deflation_mode : str, "canonical" or "regression". See notes. mode : "A" classical PLS and "B" CCA. See notes. norm_y_weights: boolean, normalize Y weights to one? (default False) algorithm : string, "nipals" or "svd" The algorithm used to estimate the weights. It will be called n_components times, i.e. once for each iteration of the outer loop. max_iter : an integer, the maximum number of iterations (default 500) of the NIPALS inner loop (used only if algorithm="nipals") tol : non-negative real, default 1e-06 The tolerance used in the iterative algorithm. copy : boolean, default True Whether the deflation should be done on a copy. Let the default value to True unless you don't care about side effects. Attributes ---------- x_weights_ : array, [p, n_components] X block weights vectors. y_weights_ : array, [q, n_components] Y block weights vectors. x_loadings_ : array, [p, n_components] X block loadings vectors. y_loadings_ : array, [q, n_components] Y block loadings vectors. x_scores_ : array, [n_samples, n_components] X scores. y_scores_ : array, [n_samples, n_components] Y scores. x_rotations_ : array, [p, n_components] X block to latents rotations. y_rotations_ : array, [q, n_components] Y block to latents rotations. coef_: array, [p, q] The coefficients of the linear model: ``Y = X coef_ + Err`` n_iter_ : array-like Number of iterations of the NIPALS inner loop for each component. Not useful if the algorithm given is "svd". References ---------- Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with emphasis on the two-block case. Technical Report 371, Department of Statistics, University of Washington, Seattle, 2000. In French but still a reference: Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris: Editions Technic. See also -------- PLSCanonical PLSRegression CCA PLS_SVD """ @abstractmethod def __init__(self, n_components=2, scale=True, deflation_mode="regression", mode="A", algorithm="nipals", norm_y_weights=False, max_iter=500, tol=1e-06, copy=True): self.n_components = n_components self.deflation_mode = deflation_mode self.mode = mode self.norm_y_weights = norm_y_weights self.scale = scale self.algorithm = algorithm self.max_iter = max_iter self.tol = tol self.copy = copy def fit(self, X, Y): """Fit model to data. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vectors, where n_samples in the number of samples and n_features is the number of predictors. Y : array-like of response, shape = [n_samples, n_targets] Target vectors, where n_samples in the number of samples and n_targets is the number of response variables. """ # copy since this will contains the residuals (deflated) matrices check_consistent_length(X, Y) X = check_array(X, dtype=np.float64, copy=self.copy) Y = check_array(Y, dtype=np.float64, copy=self.copy, ensure_2d=False) if Y.ndim == 1: Y = Y.reshape(-1, 1) n = X.shape[0] p = X.shape[1] q = Y.shape[1] if self.n_components < 1 or self.n_components > p: raise ValueError('Invalid number of components: %d' % self.n_components) if self.algorithm not in ("svd", "nipals"): raise ValueError("Got algorithm %s when only 'svd' " "and 'nipals' are known" % self.algorithm) if self.algorithm == "svd" and self.mode == "B": raise ValueError('Incompatible configuration: mode B is not ' 'implemented with svd algorithm') if self.deflation_mode not in ["canonical", "regression"]: raise ValueError('The deflation mode is unknown') # Scale (in place) X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_\ = _center_scale_xy(X, Y, self.scale) # Residuals (deflated) matrices Xk = X Yk = Y # Results matrices self.x_scores_ = np.zeros((n, self.n_components)) self.y_scores_ = np.zeros((n, self.n_components)) self.x_weights_ = np.zeros((p, self.n_components)) self.y_weights_ = np.zeros((q, self.n_components)) self.x_loadings_ = np.zeros((p, self.n_components)) self.y_loadings_ = np.zeros((q, self.n_components)) self.n_iter_ = [] # NIPALS algo: outer loop, over components for k in range(self.n_components): if np.all(np.dot(Yk.T, Yk) < np.finfo(np.double).eps): # Yk constant warnings.warn('Y residual constant at iteration %s' % k) break #1) weights estimation (inner loop) # ----------------------------------- if self.algorithm == "nipals": x_weights, y_weights, n_iter_ = \ _nipals_twoblocks_inner_loop( X=Xk, Y=Yk, mode=self.mode, max_iter=self.max_iter, tol=self.tol, norm_y_weights=self.norm_y_weights) self.n_iter_.append(n_iter_) elif self.algorithm == "svd": x_weights, y_weights = _svd_cross_product(X=Xk, Y=Yk) # compute scores x_scores = np.dot(Xk, x_weights) if self.norm_y_weights: y_ss = 1 else: y_ss = np.dot(y_weights.T, y_weights) y_scores = np.dot(Yk, y_weights) / y_ss # test for null variance if np.dot(x_scores.T, x_scores) < np.finfo(np.double).eps: warnings.warn('X scores are null at iteration %s' % k) break #2) Deflation (in place) # ---------------------- # Possible memory footprint reduction may done here: in order to # avoid the allocation of a data chunk for the rank-one # approximations matrix which is then subtracted to Xk, we suggest # to perform a column-wise deflation. # # - regress Xk's on x_score x_loadings = np.dot(Xk.T, x_scores) / np.dot(x_scores.T, x_scores) # - subtract rank-one approximations to obtain remainder matrix Xk -= np.dot(x_scores, x_loadings.T) if self.deflation_mode == "canonical": # - regress Yk's on y_score, then subtract rank-one approx. y_loadings = (np.dot(Yk.T, y_scores) / np.dot(y_scores.T, y_scores)) Yk -= np.dot(y_scores, y_loadings.T) if self.deflation_mode == "regression": # - regress Yk's on x_score, then subtract rank-one approx. y_loadings = (np.dot(Yk.T, x_scores) / np.dot(x_scores.T, x_scores)) Yk -= np.dot(x_scores, y_loadings.T) # 3) Store weights, scores and loadings # Notation: self.x_scores_[:, k] = x_scores.ravel() # T self.y_scores_[:, k] = y_scores.ravel() # U self.x_weights_[:, k] = x_weights.ravel() # W self.y_weights_[:, k] = y_weights.ravel() # C self.x_loadings_[:, k] = x_loadings.ravel() # P self.y_loadings_[:, k] = y_loadings.ravel() # Q # Such that: X = TP' + Err and Y = UQ' + Err # 4) rotations from input space to transformed space (scores) # T = X W(P'W)^-1 = XW* (W* : p x k matrix) # U = Y C(Q'C)^-1 = YC* (W* : q x k matrix) self.x_rotations_ = np.dot( self.x_weights_, linalg.pinv(np.dot(self.x_loadings_.T, self.x_weights_))) if Y.shape[1] > 1: self.y_rotations_ = np.dot( self.y_weights_, linalg.pinv(np.dot(self.y_loadings_.T, self.y_weights_))) else: self.y_rotations_ = np.ones(1) if True or self.deflation_mode == "regression": # FIXME what's with the if? # Estimate regression coefficient # Regress Y on T # Y = TQ' + Err, # Then express in function of X # Y = X W(P'W)^-1Q' + Err = XB + Err # => B = W*Q' (p x q) self.coef_ = np.dot(self.x_rotations_, self.y_loadings_.T) self.coef_ = (1. / self.x_std_.reshape((p, 1)) * self.coef_ * self.y_std_) return self def transform(self, X, Y=None, copy=True): """Apply the dimension reduction learned on the train data. Parameters ---------- X : array-like of predictors, shape = [n_samples, p] Training vectors, where n_samples in the number of samples and p is the number of predictors. Y : array-like of response, shape = [n_samples, q], optional Training vectors, where n_samples in the number of samples and q is the number of response variables. copy : boolean, default True Whether to copy X and Y, or perform in-place normalization. Returns ------- x_scores if Y is not given, (x_scores, y_scores) otherwise. """ check_is_fitted(self, 'x_mean_') X = check_array(X, copy=copy) # Normalize X -= self.x_mean_ X /= self.x_std_ # Apply rotation x_scores = np.dot(X, self.x_rotations_) if Y is not None: Y = check_array(Y, ensure_2d=False, copy=copy) if Y.ndim == 1: Y = Y.reshape(-1, 1) Y -= self.y_mean_ Y /= self.y_std_ y_scores = np.dot(Y, self.y_rotations_) return x_scores, y_scores return x_scores def predict(self, X, copy=True): """Apply the dimension reduction learned on the train data. Parameters ---------- X : array-like of predictors, shape = [n_samples, p] Training vectors, where n_samples in the number of samples and p is the number of predictors. copy : boolean, default True Whether to copy X and Y, or perform in-place normalization. Notes ----- This call requires the estimation of a p x q matrix, which may be an issue in high dimensional space. """ check_is_fitted(self, 'x_mean_') X = check_array(X, copy=copy) # Normalize X -= self.x_mean_ X /= self.x_std_ Ypred = np.dot(X, self.coef_) return Ypred + self.y_mean_ def fit_transform(self, X, y=None, **fit_params): """Learn and apply the dimension reduction on the train data. Parameters ---------- X : array-like of predictors, shape = [n_samples, p] Training vectors, where n_samples in the number of samples and p is the number of predictors. Y : array-like of response, shape = [n_samples, q], optional Training vectors, where n_samples in the number of samples and q is the number of response variables. copy : boolean, default True Whether to copy X and Y, or perform in-place normalization. Returns ------- x_scores if Y is not given, (x_scores, y_scores) otherwise. """ check_is_fitted(self, 'x_mean_') return self.fit(X, y, **fit_params).transform(X, y) class PLSRegression(_PLS): """PLS regression PLSRegression implements the PLS 2 blocks regression known as PLS2 or PLS1 in case of one dimensional response. This class inherits from _PLS with mode="A", deflation_mode="regression", norm_y_weights=False and algorithm="nipals". Parameters ---------- n_components : int, (default 2) Number of components to keep. scale : boolean, (default True) whether to scale the data max_iter : an integer, (default 500) the maximum number of iterations of the NIPALS inner loop (used only if algorithm="nipals") tol : non-negative real Tolerance used in the iterative algorithm default 1e-06. copy : boolean, default True Whether the deflation should be done on a copy. Let the default value to True unless you don't care about side effect Attributes ---------- x_weights_ : array, [p, n_components] X block weights vectors. y_weights_ : array, [q, n_components] Y block weights vectors. x_loadings_ : array, [p, n_components] X block loadings vectors. y_loadings_ : array, [q, n_components] Y block loadings vectors. x_scores_ : array, [n_samples, n_components] X scores. y_scores_ : array, [n_samples, n_components] Y scores. x_rotations_ : array, [p, n_components] X block to latents rotations. y_rotations_ : array, [q, n_components] Y block to latents rotations. coef_: array, [p, q] The coefficients of the linear model: ``Y = X coef_ + Err`` n_iter_ : array-like Number of iterations of the NIPALS inner loop for each component. Notes ----- For each component k, find weights u, v that optimizes: ``max corr(Xk u, Yk v) * var(Xk u) var(Yk u)``, such that ``|u| = 1`` Note that it maximizes both the correlations between the scores and the intra-block variances. The residual matrix of X (Xk+1) block is obtained by the deflation on the current X score: x_score. The residual matrix of Y (Yk+1) block is obtained by deflation on the current X score. This performs the PLS regression known as PLS2. This mode is prediction oriented. This implementation provides the same results that 3 PLS packages provided in the R language (R-project): - "mixOmics" with function pls(X, Y, mode = "regression") - "plspm " with function plsreg2(X, Y) - "pls" with function oscorespls.fit(X, Y) Examples -------- >>> from sklearn.cross_decomposition import PLSRegression >>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]] >>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]] >>> pls2 = PLSRegression(n_components=2) >>> pls2.fit(X, Y) ... # doctest: +NORMALIZE_WHITESPACE PLSRegression(copy=True, max_iter=500, n_components=2, scale=True, tol=1e-06) >>> Y_pred = pls2.predict(X) References ---------- Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with emphasis on the two-block case. Technical Report 371, Department of Statistics, University of Washington, Seattle, 2000. In french but still a reference: Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris: Editions Technic. """ def __init__(self, n_components=2, scale=True, max_iter=500, tol=1e-06, copy=True): _PLS.__init__(self, n_components=n_components, scale=scale, deflation_mode="regression", mode="A", norm_y_weights=False, max_iter=max_iter, tol=tol, copy=copy) @property def coefs(self): check_is_fitted(self, 'coef_') DeprecationWarning("``coefs`` attribute has been deprecated and will be " "removed in version 0.17. Use ``coef_`` instead") return self.coef_ class PLSCanonical(_PLS): """ PLSCanonical implements the 2 blocks canonical PLS of the original Wold algorithm [Tenenhaus 1998] p.204, referred as PLS-C2A in [Wegelin 2000]. This class inherits from PLS with mode="A" and deflation_mode="canonical", norm_y_weights=True and algorithm="nipals", but svd should provide similar results up to numerical errors. Parameters ---------- scale : boolean, scale data? (default True) algorithm : string, "nipals" or "svd" The algorithm used to estimate the weights. It will be called n_components times, i.e. once for each iteration of the outer loop. max_iter : an integer, (default 500) the maximum number of iterations of the NIPALS inner loop (used only if algorithm="nipals") tol : non-negative real, default 1e-06 the tolerance used in the iterative algorithm copy : boolean, default True Whether the deflation should be done on a copy. Let the default value to True unless you don't care about side effect n_components : int, number of components to keep. (default 2). Attributes ---------- x_weights_ : array, shape = [p, n_components] X block weights vectors. y_weights_ : array, shape = [q, n_components] Y block weights vectors. x_loadings_ : array, shape = [p, n_components] X block loadings vectors. y_loadings_ : array, shape = [q, n_components] Y block loadings vectors. x_scores_ : array, shape = [n_samples, n_components] X scores. y_scores_ : array, shape = [n_samples, n_components] Y scores. x_rotations_ : array, shape = [p, n_components] X block to latents rotations. y_rotations_ : array, shape = [q, n_components] Y block to latents rotations. n_iter_ : array-like Number of iterations of the NIPALS inner loop for each component. Not useful if the algorithm provided is "svd". Notes ----- For each component k, find weights u, v that optimize:: max corr(Xk u, Yk v) * var(Xk u) var(Yk u), such that ``|u| = |v| = 1`` Note that it maximizes both the correlations between the scores and the intra-block variances. The residual matrix of X (Xk+1) block is obtained by the deflation on the current X score: x_score. The residual matrix of Y (Yk+1) block is obtained by deflation on the current Y score. This performs a canonical symmetric version of the PLS regression. But slightly different than the CCA. This is mostly used for modeling. This implementation provides the same results that the "plspm" package provided in the R language (R-project), using the function plsca(X, Y). Results are equal or collinear with the function ``pls(..., mode = "canonical")`` of the "mixOmics" package. The difference relies in the fact that mixOmics implementation does not exactly implement the Wold algorithm since it does not normalize y_weights to one. Examples -------- >>> from sklearn.cross_decomposition import PLSCanonical >>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]] >>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]] >>> plsca = PLSCanonical(n_components=2) >>> plsca.fit(X, Y) ... # doctest: +NORMALIZE_WHITESPACE PLSCanonical(algorithm='nipals', copy=True, max_iter=500, n_components=2, scale=True, tol=1e-06) >>> X_c, Y_c = plsca.transform(X, Y) References ---------- Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with emphasis on the two-block case. Technical Report 371, Department of Statistics, University of Washington, Seattle, 2000. Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris: Editions Technic. See also -------- CCA PLSSVD """ def __init__(self, n_components=2, scale=True, algorithm="nipals", max_iter=500, tol=1e-06, copy=True): _PLS.__init__(self, n_components=n_components, scale=scale, deflation_mode="canonical", mode="A", norm_y_weights=True, algorithm=algorithm, max_iter=max_iter, tol=tol, copy=copy) class PLSSVD(BaseEstimator, TransformerMixin): """Partial Least Square SVD Simply perform a svd on the crosscovariance matrix: X'Y There are no iterative deflation here. Parameters ---------- n_components : int, default 2 Number of components to keep. scale : boolean, default True Whether to scale X and Y. copy : boolean, default True Whether to copy X and Y, or perform in-place computations. Attributes ---------- x_weights_ : array, [p, n_components] X block weights vectors. y_weights_ : array, [q, n_components] Y block weights vectors. x_scores_ : array, [n_samples, n_components] X scores. y_scores_ : array, [n_samples, n_components] Y scores. See also -------- PLSCanonical CCA """ def __init__(self, n_components=2, scale=True, copy=True): self.n_components = n_components self.scale = scale self.copy = copy def fit(self, X, Y): # copy since this will contains the centered data check_consistent_length(X, Y) X = check_array(X, dtype=np.float64, copy=self.copy) Y = check_array(Y, dtype=np.float64, copy=self.copy, ensure_2d=False) if Y.ndim == 1: Y = Y.reshape(-1, 1) if self.n_components > max(Y.shape[1], X.shape[1]): raise ValueError("Invalid number of components n_components=%d with " "X of shape %s and Y of shape %s." % (self.n_components, str(X.shape), str(Y.shape))) # Scale (in place) X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_ =\ _center_scale_xy(X, Y, self.scale) # svd(X'Y) C = np.dot(X.T, Y) # The arpack svds solver only works if the number of extracted # components is smaller than rank(X) - 1. Hence, if we want to extract # all the components (C.shape[1]), we have to use another one. Else, # let's use arpacks to compute only the interesting components. if self.n_components >= np.min(C.shape): U, s, V = linalg.svd(C, full_matrices=False) else: U, s, V = arpack.svds(C, k=self.n_components) V = V.T self.x_scores_ = np.dot(X, U) self.y_scores_ = np.dot(Y, V) self.x_weights_ = U self.y_weights_ = V return self def transform(self, X, Y=None): """Apply the dimension reduction learned on the train data.""" check_is_fitted(self, 'x_mean_') X = check_array(X, dtype=np.float64) Xr = (X - self.x_mean_) / self.x_std_ x_scores = np.dot(Xr, self.x_weights_) if Y is not None: if Y.ndim == 1: Y = Y.reshape(-1, 1) Yr = (Y - self.y_mean_) / self.y_std_ y_scores = np.dot(Yr, self.y_weights_) return x_scores, y_scores return x_scores def fit_transform(self, X, y=None, **fit_params): """Learn and apply the dimension reduction on the train data. Parameters ---------- X : array-like of predictors, shape = [n_samples, p] Training vectors, where n_samples in the number of samples and p is the number of predictors. Y : array-like of response, shape = [n_samples, q], optional Training vectors, where n_samples in the number of samples and q is the number of response variables. Returns ------- x_scores if Y is not given, (x_scores, y_scores) otherwise. """ return self.fit(X, y, **fit_params).transform(X, y)
bsd-3-clause
Hubert51/AutoGrading
learning/Ruijie/handwriting_recognization_SVM/data_collection.py
1
3476
# -*- coding: utf-8 -*- import cv2 import numpy as np # import matplotlib.pyplot as plt from PIL import Image from difflib import SequenceMatcher from PIL import * from PIL import ImageEnhance import time from pytesseract import image_to_string, image_to_boxes import os import sys # define some variable ENTER = 13 INDEX = 0 # initialize the list of reference points and boolean indicating # whether cropping is being performed or not refPt = [] refPts = [] cropping = False def click_and_crop(event, x, y, flags, param): # grab references to the global variables global refPt, cropping # if the left mouse button was clicked, record the starting # (x, y) coordinates and indicate that cropping is being # performed if event == cv2.EVENT_LBUTTONDOWN: refPt = [(x, y)] cropping = True # check to see if the left mouse button was released elif event == cv2.EVENT_LBUTTONUP: # record the ending (x, y) coordinates and indicate that # the cropping operation is finished refPt.append((x, y)) cropping = False # draw a rectangle around the region of interest cv2.rectangle(image, refPt[0], refPt[1], (0, 255, 0), 2) cv2.imshow("image", image) def get_coord(image): # load the image, clone it, and setup the mouse callback function clone = image.copy() global refPts refPts.clear() while True: cv2.namedWindow("image") cv2.setMouseCallback("image", click_and_crop) # keep looping until the 'q' key is pressed while True: # display the image and wait for a keypress cv2.imshow("image", image) key = cv2.waitKey(0) & 0xFF # if the 'r' key is pressed, reset the cropping region if key == ord("r"): image = clone.copy() # if the 'enter' or 'esc' key is pressed, break from the loop elif key == 13 or key == 27: break if key == 27: break # if there are two reference points, then crop the region of interest # from teh image and display it if len(refPt) == 2: roi = clone[refPt[0][1]:refPt[1][1], refPt[0][0]:refPt[1][0]] refPts += refPt # cv2.imshow("ROI", roi) # key = cv2.waitKey(0) # close all open windows # cv2.destroyAllWindows() # print(refPts) cv2.destroyAllWindows() return refPts def write_coord(img_address, coord): f = open("description", "a") f.write("{}: {}\n".format(img_address, coord)) f.close() if __name__ == '__main__': if len(sys.argv) != 4: print("Usage: process_data.py original_folder pos_folder neg_folder ") print("original_folder is the folder contains original image") print("pos_folder is the folder contains the positive images after process") print("neg_folder is the folder contains the negative images after process") sys.exit(1) address = sys.argv[1] pos_folder = sys.argv[2] neg_folder = sys.argv[3] if not os.path.exists(pos_folder): os.makedirs(pos_folder) if not os.path.exists(neg_folder): os.makedirs(neg_folder) images = os.listdir(address) for file in images: img_address = "{}/{}".format(address, file) image = cv2.imread(img_address) coord = get_coord(image) write_coord(img_address,coord)
mit
PedroTrujilloV/nest-simulator
pynest/nest/voltage_trace.py
12
6711
# -*- coding: utf-8 -*- # # voltage_trace.py # # This file is part of NEST. # # Copyright (C) 2004 The NEST Initiative # # NEST is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # NEST is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with NEST. If not, see <http://www.gnu.org/licenses/>. import nest import numpy import pylab def from_file(fname, title=None, grayscale=False): if nest.is_iterable(fname): data = None for f in fname: if data is None: data = numpy.loadtxt(f) else: data = numpy.concatenate((data, numpy.loadtxt(f))) else: data = numpy.loadtxt(fname) if grayscale: line_style = "k" else: line_style = "" if len(data.shape) == 1: print("INFO: only found 1 column in the file. Assuming that only one neuron was recorded.") plotid = pylab.plot(data, line_style) pylab.xlabel("Time (steps of length interval)") elif data.shape[1] == 2: print("INFO: found 2 columns in the file. Assuming them to be gid, pot.") plotid = [] data_dict = {} for d in data: if not d[0] in data_dict: data_dict[d[0]] = [d[1]] else: data_dict[d[0]].append(d[1]) for d in data_dict: plotid.append(pylab.plot(data_dict[d], line_style, label="Neuron %i" % d)) pylab.xlabel("Time (steps of length interval)") pylab.legend() elif data.shape[1] == 3: plotid = [] data_dict = {} g = data[0][0] t = [] for d in data: if not d[0] in data_dict: data_dict[d[0]] = [d[2]] else: data_dict[d[0]].append(d[2]) if d[0] == g: t.append(d[1]) for d in data_dict: plotid.append(pylab.plot(t, data_dict[d], line_style, label="Neuron %i" % d)) pylab.xlabel("Time (ms)") pylab.legend() else: raise ValueError("Inappropriate data shape %i!" % data.shape) if not title: title = "Membrane potential from file '%s'" % fname pylab.title(title) pylab.ylabel("Membrane potential (mV)") pylab.draw() return plotid def from_device(detec, neurons=None, title=None, grayscale=False, timeunit="ms"): """ Plot the membrane potential of a set of neurons recorded by the given voltmeter. """ if len(detec) > 1: raise nest.NESTError("Please provide a single voltmeter.") if not nest.GetStatus(detec)[0]['model'] in ('voltmeter', 'multimeter'): raise nest.NESTError("Please provide a voltmeter or a multimeter measuring V_m.") elif nest.GetStatus(detec)[0]['model'] == 'multimeter': if not "V_m" in nest.GetStatus(detec, "record_from")[0]: raise nest.NESTError("Please provide a multimeter measuring V_m.") elif (not nest.GetStatus(detec, "to_memory")[0] and len(nest.GetStatus(detec, "record_from")[0]) > 1): raise nest.NESTError("Please provide a multimeter measuring only V_m or record to memory!") if nest.GetStatus(detec, "to_memory")[0]: timefactor = 1.0 if not nest.GetStatus(detec)[0]['time_in_steps']: if timeunit == "s": timefactor = 1000.0 else: timeunit = "ms" times, voltages = _from_memory(detec) if not len(times): raise nest.NESTError("No events recorded! Make sure that withtime and withgid are set to True.") if neurons is None: neurons = voltages.keys() plotids = [] for neuron in neurons: time_values = numpy.array(times[neuron]) / timefactor if grayscale: line_style = "k" else: line_style = "" try: plotids.append(pylab.plot(time_values, voltages[neuron], line_style, label="Neuron %i" % neuron)) except KeyError: print("INFO: Wrong ID: {0}".format(neuron)) if not title: title = "Membrane potential" pylab.title(title) pylab.ylabel("Membrane potential (mV)") if nest.GetStatus(detec)[0]['time_in_steps']: pylab.xlabel("Steps") else: pylab.xlabel("Time (%s)" % timeunit) pylab.legend(loc="best") pylab.draw() return plotids elif nest.GetStatus(detec, "to_file")[0]: fname = nest.GetStatus(detec, "filenames")[0] return from_file(fname, title, grayscale) else: raise nest.NESTError("Provided devices neither records to file, nor to memory.") def _from_memory(detec): import array ev = nest.GetStatus(detec, 'events')[0] potentials = ev['V_m'] senders = ev['senders'] v = {} t = {} if 'times' in ev: times = ev['times'] for s, currentsender in enumerate(senders): if currentsender not in v: v[currentsender] = array.array('f') t[currentsender] = array.array('f') v[currentsender].append(float(potentials[s])) t[currentsender].append(float(times[s])) else: # reconstruct the time vector, if not stored explicitly detec_status = nest.GetStatus(detec)[0] origin = detec_status['origin'] start = detec_status['start'] interval = detec_status['interval'] senders_uniq = numpy.unique(senders) num_intvls = len(senders) / len(senders_uniq) times_s = origin + start + interval + interval * numpy.array(range(num_intvls)) for s, currentsender in enumerate(senders): if currentsender not in v: v[currentsender] = array.array('f') t[currentsender] = times_s v[currentsender].append(float(potentials[s])) return t, v def show(): """ Call pylab.show() to show all figures and enter the GUI main loop. Python will block until all figure windows are closed again. You should call this function only once at the end of a script. See also: http://matplotlib.sourceforge.net/faq/howto_faq.html#use-show """ pylab.show()
gpl-2.0
kcaluwae/tensegrity-el-simulator
examples/kinematic_2spring/mplab_gui.py
1
4602
''' Ken Caluwaerts 2012-2013 <[email protected]> ''' import matplotlib.pylab as plt plt.ion() import matplotlib.animation as animation import mpl_toolkits.mplot3d.axes3d as p3 import threading import numpy as np class Visualization(object): ''' A simple 3D visualization for tensegrity structures using the new Matplotlib animation interface (version 1.1.1) Based on: http://matplotlib.sourceforge.net/trunk-docs/examples/animation/simple_3danim.py ''' def __init__(self, simulator,draw_node_indices=True,tagged_springs = None): self.simulator = simulator self.changed = False #Do we need to redraw self.iteration=0 self.draw_node_indices = draw_node_indices self.tagged_springs = tagged_springs self.create_window() #self.animation = animation.FuncAnimation(self.fig,self.update,25,init_func=self.init_plot, blit=False) self.init_plot() def create_window(self): ''' Creates a simple 3D plotting window ''' self.fig = plt.figure() self.ax = p3.Axes3D(self.fig) def init_plot(self): ''' This function initializes the plotting window. ''' #get the structure for the first time from the simulator nodes = self.simulator.nodes #plot the structure self.bars_plot = [] for i in range(self.simulator.B): _from = self.simulator.bar_connections[i].argmin() _to = self.simulator.bar_connections[i].argmax() self.bars_plot.append(self.ax.plot(nodes[(_from,_to),0],nodes[(_from,_to),1],nodes[(_from,_to),2],c='r',linewidth=3)[0]) self.springs_plot = [] for i in range(self.simulator.S): _from = self.simulator.spring_connections[i].argmin() _to = self.simulator.spring_connections[i].argmax() if(self.tagged_springs is None): c = 'g' else: c = 'g' if self.tagged_springs[i]==0 else 'b' self.springs_plot.append(self.ax.plot(nodes[(_from,_to),0],nodes[(_from,_to),1],nodes[(_from,_to),2],c=c)[0]) #draw fixed nodes #fixed_nodes = nodes[self.simulator.nodes_fixed_indices] #if(fixed_nodes.shape[0]>0): # self.ax.plot(fixed_nodes[:,0],fixed_nodes[:,1],fixed_nodes[:,2],'b+') self.nodes_plot = self.ax.plot(self.simulator.nodes_eucl[:,0],self.simulator.nodes_eucl[:,1],self.simulator.nodes_eucl[:,2],'yo') #draw node indices self.indices_plot = [] if(self.draw_node_indices): for i in xrange(self.simulator.nodes_eucl.shape[0]): self.indices_plot.append(self.ax.text(self.simulator.nodes_eucl[i,0],self.simulator.nodes_eucl[i,1],self.simulator.nodes_eucl[i,2],i+1)) #axes properties self.ax.set_xlim3d([-.5, .5]) self.ax.set_xlabel('X') self.ax.set_ylim3d([-.5, .5]) self.ax.set_ylabel('Y') self.ax.set_zlim3d([0, 1]) self.ax.set_zlabel('Z') self.last_time = 0 return self.bars_plot+self.springs_plot def update(self,iteration): ''' This function refreshes the plot ''' if self.changed: for i in range(self.simulator.B): nodes = self.simulator.nodes_eucl _from = self.simulator.bar_connections[i].argmin() _to = self.simulator.bar_connections[i].argmax() bar_plot = self.bars_plot[i] # NOTE: there is no .set_data() for 3 dim data... bar_plot.set_data(nodes[(_from,_to),0],nodes[(_from,_to),1]) bar_plot.set_3d_properties(nodes[(_from,_to),2]) for i in range(self.simulator.S): nodes = self.simulator.nodes_eucl _from = self.simulator.spring_connections[i].argmin() _to = self.simulator.spring_connections[i].argmax() springs_plot = self.springs_plot[i] # NOTE: there is no .set_data() for 3 dim data... springs_plot.set_data(nodes[(_from,_to),0],nodes[(_from,_to),1]) springs_plot.set_3d_properties(nodes[(_from,_to),2]) #springs_plot.set_linewidth(self.simulator.spring_forces[i]) self.nodes_plot[0].set_data(self.simulator.nodes_eucl[:,0],self.simulator.nodes_eucl[:,1]) self.nodes_plot[0].set_3d_properties(self.simulator.nodes_eucl[:,2]) self.changed = False if(self.draw_node_indices): for i in xrange(self.simulator.nodes_eucl.shape[0]): self.indices_plot[i]._position3d = np.array((self.simulator.nodes_eucl[i,0],self.simulator.nodes_eucl[i,1],self.simulator.nodes_eucl[i,2])) #if(iteration%10==0): # self.ax.plot(self.simulator.nodes_eucl[:,0],self.simulator.nodes_eucl[:,1],self.simulator.nodes_eucl[:,2],'bo') return self.bars_plot+self.springs_plot+self.nodes_plot+self.indices_plot else: return [] def callback(self,event): self.changed = True self.last_time+=1 self.iteration+=1 if(self.last_time>10): self.last_time=0 self.update(0) plt.draw() #if(self.iteration%2==0): # plt.savefig('img/video_%.5d.png'%self.iteration)
mit
gfyoung/scipy
doc/source/conf.py
3
12805
# -*- coding: utf-8 -*- from __future__ import print_function import sys, os, re from datetime import date # Check Sphinx version import sphinx if sphinx.__version__ < "1.6": raise RuntimeError("Sphinx 1.6 or newer required") needs_sphinx = '1.6' # ----------------------------------------------------------------------------- # General configuration # ----------------------------------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. sys.path.insert(0, os.path.abspath('../sphinxext')) sys.path.insert(0, os.path.abspath(os.path.dirname(__file__))) extensions = ['sphinx.ext.autodoc', 'sphinx.ext.mathjax', 'numpydoc', 'sphinx.ext.intersphinx', 'sphinx.ext.coverage', 'sphinx.ext.autosummary', 'scipyoptdoc', 'doi_role'] # Determine if the matplotlib has a recent enough version of the # plot_directive. try: from matplotlib.sphinxext import plot_directive except ImportError: use_matplotlib_plot_directive = False else: try: use_matplotlib_plot_directive = (plot_directive.__version__ >= 2) except AttributeError: use_matplotlib_plot_directive = False if use_matplotlib_plot_directive: extensions.append('matplotlib.sphinxext.plot_directive') else: raise RuntimeError("You need a recent enough version of matplotlib") # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General substitutions. project = 'SciPy' copyright = '2008-%s, The SciPy community' % date.today().year # The default replacements for |version| and |release|, also used in various # other places throughout the built documents. import scipy version = re.sub(r'\.dev-.*$', r'.dev', scipy.__version__) release = scipy.__version__ print("Scipy (VERSION %s)" % (version,)) # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. #unused_docs = [] # The reST default role (used for this markup: `text`) to use for all documents. default_role = "autolink" # List of directories, relative to source directories, that shouldn't be searched # for source files. exclude_dirs = [] # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = False # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # ----------------------------------------------------------------------------- # HTML output # ----------------------------------------------------------------------------- themedir = os.path.join(os.pardir, 'scipy-sphinx-theme', '_theme') if os.path.isdir(themedir): html_theme = 'scipy' html_theme_path = [themedir] if 'scipyorg' in tags: # Build for the scipy.org website html_theme_options = { "edit_link": True, "sidebar": "right", "scipy_org_logo": True, "rootlinks": [("https://scipy.org/", "Scipy.org"), ("https://docs.scipy.org/", "Docs")] } else: # Default build html_theme_options = { "edit_link": False, "sidebar": "left", "scipy_org_logo": False, "rootlinks": [] } html_logo = '_static/scipyshiny_small.png' html_sidebars = {'index': ['indexsidebar.html', 'searchbox.html']} else: # Build without scipy.org sphinx theme present if 'scipyorg' in tags: raise RuntimeError("Get the scipy-sphinx-theme first, " "via git submodule init & update") else: html_style = 'scipy_fallback.css' html_logo = '_static/scipyshiny_small.png' html_sidebars = {'index': ['indexsidebar.html', 'searchbox.html']} html_title = "%s v%s Reference Guide" % (project, version) html_static_path = ['_static'] html_last_updated_fmt = '%b %d, %Y' html_additional_pages = {} html_domain_indices = True html_copy_source = False html_file_suffix = '.html' htmlhelp_basename = 'scipy' mathjax_path = "scipy-mathjax/MathJax.js?config=scipy-mathjax" # ----------------------------------------------------------------------------- # LaTeX output # ----------------------------------------------------------------------------- # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, document class [howto/manual]). _stdauthor = 'Written by the SciPy community' latex_documents = [ ('index', 'scipy-ref.tex', 'SciPy Reference Guide', _stdauthor, 'manual'), # ('user/index', 'scipy-user.tex', 'SciPy User Guide', # _stdauthor, 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. latex_domain_indices = False # fix issues with Unicode characters latex_engine = 'xelatex' latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # 'preamble': r''' % In the parameters etc. sections, align uniformly, and adjust label emphasis \usepackage{expdlist} \let\latexdescription=\description \let\endlatexdescription=\enddescription \renewenvironment{description} {\renewenvironment{description} {\begin{latexdescription}% [\setleftmargin{50pt}\breaklabel\setlabelstyle{\bfseries}]% }% {\end{latexdescription}}% \begin{latexdescription}% [\setleftmargin{15pt}\breaklabel\setlabelstyle{\bfseries\itshape}]% }% {\end{latexdescription}} % Fix bug in expdlist's modified \@item \usepackage{etoolbox} \makeatletter \patchcmd\@item{{\@breaklabel} }{{\@breaklabel}}{}{} % Fix bug in expdlist's way of breaking the line after long item label \def\breaklabel{% \def\@breaklabel{% \leavevmode\par % now a hack because Sphinx inserts \leavevmode after term node \def\leavevmode{\def\leavevmode{\unhbox\voidb@x}}% }% } \makeatother % Make Examples/etc section headers smaller and more compact \titlespacing*{\paragraph}{0pt}{1ex}{0pt} % Save vertical space in parameter lists and elsewhere \makeatletter \renewenvironment{quote}% {\list{}{\topsep=0pt\relax \parsep \z@ \@plus\p@}% \item\relax}% {\endlist} \makeatother % Avoid small font size in code-blocks \fvset{fontsize=auto} % Use left-alignment per default in tabulary rendered tables \newcolumntype{T}{L} % Get some useful deeper bookmarks and table of contents in PDF \setcounter{tocdepth}{1} % Fix: ≠ is unknown to XeLaTeX's default font Latin Modern \usepackage{newunicodechar} \newunicodechar{≠}{\ensuremath{\neq}} % Get PDF to use maximal depth bookmarks \hypersetup{bookmarksdepth=subparagraph} % reduce hyperref warnings \pdfstringdefDisableCommands{% \let\sphinxupquote\empty \let\sphinxstyleliteralintitle\empty \let\sphinxstyleemphasis\empty } ''', # Latex figure (float) alignment # # 'figure_align': 'htbp', # benefit from Sphinx built-in workaround of LaTeX's list limitations 'maxlistdepth': '12', # reduce TeX warnings about underfull boxes in the index 'printindex': r'\raggedright\printindex', # avoid potential problems arising from erroneous mark-up of the # \mathbf{\Gamma} type 'passoptionstopackages': r'\PassOptionsToPackage{no-math}{fontspec}', } # ----------------------------------------------------------------------------- # Intersphinx configuration # ----------------------------------------------------------------------------- intersphinx_mapping = { 'python': ('https://docs.python.org/dev', None), 'numpy': ('https://docs.scipy.org/doc/numpy', None), 'matplotlib': ('https://matplotlib.org', None), } # ----------------------------------------------------------------------------- # Numpy extensions # ----------------------------------------------------------------------------- # If we want to do a phantom import from an XML file for all autodocs phantom_import_file = 'dump.xml' # Generate plots for example sections numpydoc_use_plots = True # ----------------------------------------------------------------------------- # Autosummary # ----------------------------------------------------------------------------- if sphinx.__version__ >= "0.7": import glob autosummary_generate = glob.glob("*.rst") # ----------------------------------------------------------------------------- # Coverage checker # ----------------------------------------------------------------------------- coverage_ignore_modules = r""" """.split() coverage_ignore_functions = r""" test($|_) (some|all)true bitwise_not cumproduct pkgload generic\. """.split() coverage_ignore_classes = r""" """.split() coverage_c_path = [] coverage_c_regexes = {} coverage_ignore_c_items = {} #------------------------------------------------------------------------------ # Plot #------------------------------------------------------------------------------ plot_pre_code = """ import numpy as np np.random.seed(123) """ plot_include_source = True plot_formats = [('png', 96), 'pdf'] plot_html_show_formats = False plot_html_show_source_link = False import math phi = (math.sqrt(5) + 1)/2 font_size = 13*72/96.0 # 13 px plot_rcparams = { 'font.size': font_size, 'axes.titlesize': font_size, 'axes.labelsize': font_size, 'xtick.labelsize': font_size, 'ytick.labelsize': font_size, 'legend.fontsize': font_size, 'figure.figsize': (3*phi, 3), 'figure.subplot.bottom': 0.2, 'figure.subplot.left': 0.2, 'figure.subplot.right': 0.9, 'figure.subplot.top': 0.85, 'figure.subplot.wspace': 0.4, 'text.usetex': False, } if not use_matplotlib_plot_directive: import matplotlib matplotlib.rcParams.update(plot_rcparams) # ----------------------------------------------------------------------------- # Source code links # ----------------------------------------------------------------------------- import re import inspect from os.path import relpath, dirname for name in ['sphinx.ext.linkcode', 'linkcode', 'numpydoc.linkcode']: try: __import__(name) extensions.append(name) break except ImportError: pass else: print("NOTE: linkcode extension not found -- no links to source generated") def linkcode_resolve(domain, info): """ Determine the URL corresponding to Python object """ if domain != 'py': return None modname = info['module'] fullname = info['fullname'] submod = sys.modules.get(modname) if submod is None: return None obj = submod for part in fullname.split('.'): try: obj = getattr(obj, part) except Exception: return None try: fn = inspect.getsourcefile(obj) except Exception: fn = None if not fn: try: fn = inspect.getsourcefile(sys.modules[obj.__module__]) except Exception: fn = None if not fn: return None try: source, lineno = inspect.getsourcelines(obj) except Exception: lineno = None if lineno: linespec = "#L%d-L%d" % (lineno, lineno + len(source) - 1) else: linespec = "" startdir = os.path.abspath(os.path.join(dirname(scipy.__file__), '..')) fn = relpath(fn, start=startdir).replace(os.path.sep, '/') if fn.startswith('scipy/'): m = re.match(r'^.*dev0\+([a-f0-9]+)$', scipy.__version__) if m: return "https://github.com/scipy/scipy/blob/%s/%s%s" % ( m.group(1), fn, linespec) elif 'dev' in scipy.__version__: return "https://github.com/scipy/scipy/blob/master/%s%s" % ( fn, linespec) else: return "https://github.com/scipy/scipy/blob/v%s/%s%s" % ( scipy.__version__, fn, linespec) else: return None
bsd-3-clause
rs2/pandas
pandas/core/frame.py
1
320009
""" DataFrame --------- An efficient 2D container for potentially mixed-type time series or other labeled data series. Similar to its R counterpart, data.frame, except providing automatic data alignment and a host of useful data manipulation methods having to do with the labeling information """ from __future__ import annotations import collections from collections import abc import datetime from io import StringIO import itertools from textwrap import dedent from typing import ( IO, TYPE_CHECKING, Any, AnyStr, Dict, FrozenSet, Hashable, Iterable, Iterator, List, Optional, Sequence, Set, Tuple, Type, Union, cast, ) import warnings import numpy as np import numpy.ma as ma from pandas._config import get_option from pandas._libs import algos as libalgos, lib, properties from pandas._libs.lib import no_default from pandas._typing import ( AggFuncType, ArrayLike, Axes, Axis, CompressionOptions, Dtype, FilePathOrBuffer, FrameOrSeriesUnion, IndexKeyFunc, Label, Level, Renamer, StorageOptions, ValueKeyFunc, ) from pandas.compat._optional import import_optional_dependency from pandas.compat.numpy import function as nv from pandas.util._decorators import ( Appender, Substitution, deprecate_kwarg, doc, rewrite_axis_style_signature, ) from pandas.util._validators import ( validate_axis_style_args, validate_bool_kwarg, validate_percentile, ) from pandas.core.dtypes.cast import ( cast_scalar_to_array, coerce_to_dtypes, construct_1d_arraylike_from_scalar, find_common_type, infer_dtype_from_scalar, invalidate_string_dtypes, maybe_cast_to_datetime, maybe_convert_platform, maybe_downcast_to_dtype, maybe_infer_to_datetimelike, maybe_upcast, maybe_upcast_putmask, validate_numeric_casting, ) from pandas.core.dtypes.common import ( ensure_int64, ensure_platform_int, infer_dtype_from_object, is_bool_dtype, is_dataclass, is_datetime64_any_dtype, is_dict_like, is_dtype_equal, is_extension_array_dtype, is_float_dtype, is_hashable, is_integer, is_integer_dtype, is_iterator, is_list_like, is_named_tuple, is_object_dtype, is_scalar, is_sequence, needs_i8_conversion, pandas_dtype, ) from pandas.core.dtypes.missing import isna, na_value_for_dtype, notna from pandas.core import algorithms, common as com, nanops, ops from pandas.core.accessor import CachedAccessor from pandas.core.aggregation import reconstruct_func, relabel_result, transform from pandas.core.arrays import Categorical, ExtensionArray from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin as DatetimeLikeArray from pandas.core.arrays.sparse import SparseFrameAccessor from pandas.core.construction import extract_array from pandas.core.generic import NDFrame, _shared_docs from pandas.core.indexes import base as ibase from pandas.core.indexes.api import Index, ensure_index, ensure_index_from_sequences from pandas.core.indexes.datetimes import DatetimeIndex from pandas.core.indexes.multi import MultiIndex, maybe_droplevels from pandas.core.indexes.period import PeriodIndex from pandas.core.indexing import check_bool_indexer, convert_to_index_sliceable from pandas.core.internals import BlockManager from pandas.core.internals.construction import ( arrays_to_mgr, dataclasses_to_dicts, get_names_from_index, init_dict, init_ndarray, masked_rec_array_to_mgr, reorder_arrays, sanitize_index, to_arrays, ) from pandas.core.reshape.melt import melt from pandas.core.series import Series from pandas.io.common import get_filepath_or_buffer from pandas.io.formats import console, format as fmt from pandas.io.formats.info import DataFrameInfo import pandas.plotting if TYPE_CHECKING: from pandas.core.groupby.generic import DataFrameGroupBy from pandas.io.formats.style import Styler # --------------------------------------------------------------------- # Docstring templates _shared_doc_kwargs = dict( axes="index, columns", klass="DataFrame", axes_single_arg="{0 or 'index', 1 or 'columns'}", axis="""axis : {0 or 'index', 1 or 'columns'}, default 0 If 0 or 'index': apply function to each column. If 1 or 'columns': apply function to each row.""", optional_by=""" by : str or list of str Name or list of names to sort by. - if `axis` is 0 or `'index'` then `by` may contain index levels and/or column labels. - if `axis` is 1 or `'columns'` then `by` may contain column levels and/or index labels.""", optional_labels="""labels : array-like, optional New labels / index to conform the axis specified by 'axis' to.""", optional_axis="""axis : int or str, optional Axis to target. Can be either the axis name ('index', 'columns') or number (0, 1).""", ) _numeric_only_doc = """numeric_only : boolean, default None Include only float, int, boolean data. If None, will attempt to use everything, then use only numeric data """ _merge_doc = """ Merge DataFrame or named Series objects with a database-style join. The join is done on columns or indexes. If joining columns on columns, the DataFrame indexes *will be ignored*. Otherwise if joining indexes on indexes or indexes on a column or columns, the index will be passed on. Parameters ----------%s right : DataFrame or named Series Object to merge with. how : {'left', 'right', 'outer', 'inner'}, default 'inner' Type of merge to be performed. * left: use only keys from left frame, similar to a SQL left outer join; preserve key order. * right: use only keys from right frame, similar to a SQL right outer join; preserve key order. * outer: use union of keys from both frames, similar to a SQL full outer join; sort keys lexicographically. * inner: use intersection of keys from both frames, similar to a SQL inner join; preserve the order of the left keys. on : label or list Column or index level names to join on. These must be found in both DataFrames. If `on` is None and not merging on indexes then this defaults to the intersection of the columns in both DataFrames. left_on : label or list, or array-like Column or index level names to join on in the left DataFrame. Can also be an array or list of arrays of the length of the left DataFrame. These arrays are treated as if they are columns. right_on : label or list, or array-like Column or index level names to join on in the right DataFrame. Can also be an array or list of arrays of the length of the right DataFrame. These arrays are treated as if they are columns. left_index : bool, default False Use the index from the left DataFrame as the join key(s). If it is a MultiIndex, the number of keys in the other DataFrame (either the index or a number of columns) must match the number of levels. right_index : bool, default False Use the index from the right DataFrame as the join key. Same caveats as left_index. sort : bool, default False Sort the join keys lexicographically in the result DataFrame. If False, the order of the join keys depends on the join type (how keyword). suffixes : list-like, default is ("_x", "_y") A length-2 sequence where each element is optionally a string indicating the suffix to add to overlapping column names in `left` and `right` respectively. Pass a value of `None` instead of a string to indicate that the column name from `left` or `right` should be left as-is, with no suffix. At least one of the values must not be None. copy : bool, default True If False, avoid copy if possible. indicator : bool or str, default False If True, adds a column to the output DataFrame called "_merge" with information on the source of each row. The column can be given a different name by providing a string argument. The column will have a Categorical type with the value of "left_only" for observations whose merge key only appears in the left DataFrame, "right_only" for observations whose merge key only appears in the right DataFrame, and "both" if the observation's merge key is found in both DataFrames. validate : str, optional If specified, checks if merge is of specified type. * "one_to_one" or "1:1": check if merge keys are unique in both left and right datasets. * "one_to_many" or "1:m": check if merge keys are unique in left dataset. * "many_to_one" or "m:1": check if merge keys are unique in right dataset. * "many_to_many" or "m:m": allowed, but does not result in checks. Returns ------- DataFrame A DataFrame of the two merged objects. See Also -------- merge_ordered : Merge with optional filling/interpolation. merge_asof : Merge on nearest keys. DataFrame.join : Similar method using indices. Notes ----- Support for specifying index levels as the `on`, `left_on`, and `right_on` parameters was added in version 0.23.0 Support for merging named Series objects was added in version 0.24.0 Examples -------- >>> df1 = pd.DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'], ... 'value': [1, 2, 3, 5]}) >>> df2 = pd.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'], ... 'value': [5, 6, 7, 8]}) >>> df1 lkey value 0 foo 1 1 bar 2 2 baz 3 3 foo 5 >>> df2 rkey value 0 foo 5 1 bar 6 2 baz 7 3 foo 8 Merge df1 and df2 on the lkey and rkey columns. The value columns have the default suffixes, _x and _y, appended. >>> df1.merge(df2, left_on='lkey', right_on='rkey') lkey value_x rkey value_y 0 foo 1 foo 5 1 foo 1 foo 8 2 foo 5 foo 5 3 foo 5 foo 8 4 bar 2 bar 6 5 baz 3 baz 7 Merge DataFrames df1 and df2 with specified left and right suffixes appended to any overlapping columns. >>> df1.merge(df2, left_on='lkey', right_on='rkey', ... suffixes=('_left', '_right')) lkey value_left rkey value_right 0 foo 1 foo 5 1 foo 1 foo 8 2 foo 5 foo 5 3 foo 5 foo 8 4 bar 2 bar 6 5 baz 3 baz 7 Merge DataFrames df1 and df2, but raise an exception if the DataFrames have any overlapping columns. >>> df1.merge(df2, left_on='lkey', right_on='rkey', suffixes=(False, False)) Traceback (most recent call last): ... ValueError: columns overlap but no suffix specified: Index(['value'], dtype='object') """ # ----------------------------------------------------------------------- # DataFrame class class DataFrame(NDFrame): """ Two-dimensional, size-mutable, potentially heterogeneous tabular data. Data structure also contains labeled axes (rows and columns). Arithmetic operations align on both row and column labels. Can be thought of as a dict-like container for Series objects. The primary pandas data structure. Parameters ---------- data : ndarray (structured or homogeneous), Iterable, dict, or DataFrame Dict can contain Series, arrays, constants, or list-like objects. If data is a dict, column order follows insertion-order. .. versionchanged:: 0.25.0 If data is a list of dicts, column order follows insertion-order. index : Index or array-like Index to use for resulting frame. Will default to RangeIndex if no indexing information part of input data and no index provided. columns : Index or array-like Column labels to use for resulting frame. Will default to RangeIndex (0, 1, 2, ..., n) if no column labels are provided. dtype : dtype, default None Data type to force. Only a single dtype is allowed. If None, infer. copy : bool, default False Copy data from inputs. Only affects DataFrame / 2d ndarray input. See Also -------- DataFrame.from_records : Constructor from tuples, also record arrays. DataFrame.from_dict : From dicts of Series, arrays, or dicts. read_csv : Read a comma-separated values (csv) file into DataFrame. read_table : Read general delimited file into DataFrame. read_clipboard : Read text from clipboard into DataFrame. Examples -------- Constructing DataFrame from a dictionary. >>> d = {'col1': [1, 2], 'col2': [3, 4]} >>> df = pd.DataFrame(data=d) >>> df col1 col2 0 1 3 1 2 4 Notice that the inferred dtype is int64. >>> df.dtypes col1 int64 col2 int64 dtype: object To enforce a single dtype: >>> df = pd.DataFrame(data=d, dtype=np.int8) >>> df.dtypes col1 int8 col2 int8 dtype: object Constructing DataFrame from numpy ndarray: >>> df2 = pd.DataFrame(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), ... columns=['a', 'b', 'c']) >>> df2 a b c 0 1 2 3 1 4 5 6 2 7 8 9 """ _internal_names_set = {"columns", "index"} | NDFrame._internal_names_set _typ = "dataframe" @property def _constructor(self) -> Type[DataFrame]: return DataFrame _constructor_sliced: Type[Series] = Series _deprecations: FrozenSet[str] = NDFrame._deprecations | frozenset([]) _accessors: Set[str] = {"sparse"} @property def _constructor_expanddim(self): # GH#31549 raising NotImplementedError on a property causes trouble # for `inspect` def constructor(*args, **kwargs): raise NotImplementedError("Not supported for DataFrames!") return constructor # ---------------------------------------------------------------------- # Constructors def __init__( self, data=None, index: Optional[Axes] = None, columns: Optional[Axes] = None, dtype: Optional[Dtype] = None, copy: bool = False, ): if data is None: data = {} if dtype is not None: dtype = self._validate_dtype(dtype) if isinstance(data, DataFrame): data = data._mgr if isinstance(data, BlockManager): if index is None and columns is None and dtype is None and copy is False: # GH#33357 fastpath NDFrame.__init__(self, data) return mgr = self._init_mgr( data, axes=dict(index=index, columns=columns), dtype=dtype, copy=copy ) elif isinstance(data, dict): mgr = init_dict(data, index, columns, dtype=dtype) elif isinstance(data, ma.MaskedArray): import numpy.ma.mrecords as mrecords # masked recarray if isinstance(data, mrecords.MaskedRecords): mgr = masked_rec_array_to_mgr(data, index, columns, dtype, copy) # a masked array else: mask = ma.getmaskarray(data) if mask.any(): data, fill_value = maybe_upcast(data, copy=True) data.soften_mask() # set hardmask False if it was True data[mask] = fill_value else: data = data.copy() mgr = init_ndarray(data, index, columns, dtype=dtype, copy=copy) elif isinstance(data, (np.ndarray, Series, Index)): if data.dtype.names: data_columns = list(data.dtype.names) data = {k: data[k] for k in data_columns} if columns is None: columns = data_columns mgr = init_dict(data, index, columns, dtype=dtype) elif getattr(data, "name", None) is not None: mgr = init_dict({data.name: data}, index, columns, dtype=dtype) else: mgr = init_ndarray(data, index, columns, dtype=dtype, copy=copy) # For data is list-like, or Iterable (will consume into list) elif isinstance(data, abc.Iterable) and not isinstance(data, (str, bytes)): if not isinstance(data, (abc.Sequence, ExtensionArray)): data = list(data) if len(data) > 0: if is_dataclass(data[0]): data = dataclasses_to_dicts(data) if is_list_like(data[0]) and getattr(data[0], "ndim", 1) == 1: if is_named_tuple(data[0]) and columns is None: columns = data[0]._fields arrays, columns = to_arrays(data, columns, dtype=dtype) columns = ensure_index(columns) # set the index if index is None: if isinstance(data[0], Series): index = get_names_from_index(data) elif isinstance(data[0], Categorical): index = ibase.default_index(len(data[0])) else: index = ibase.default_index(len(data)) mgr = arrays_to_mgr(arrays, columns, index, columns, dtype=dtype) else: mgr = init_ndarray(data, index, columns, dtype=dtype, copy=copy) else: mgr = init_dict({}, index, columns, dtype=dtype) # For data is scalar else: if index is None or columns is None: raise ValueError("DataFrame constructor not properly called!") if not dtype: dtype, _ = infer_dtype_from_scalar(data, pandas_dtype=True) # For data is a scalar extension dtype if is_extension_array_dtype(dtype): values = [ construct_1d_arraylike_from_scalar(data, len(index), dtype) for _ in range(len(columns)) ] mgr = arrays_to_mgr(values, columns, index, columns, dtype=None) else: # Attempt to coerce to a numpy array try: arr = np.array(data, dtype=dtype, copy=copy) except (ValueError, TypeError) as err: exc = TypeError( "DataFrame constructor called with " f"incompatible data and dtype: {err}" ) raise exc from err if arr.ndim != 0: raise ValueError("DataFrame constructor not properly called!") values = cast_scalar_to_array( (len(index), len(columns)), data, dtype=dtype ) mgr = init_ndarray( values, index, columns, dtype=values.dtype, copy=False ) NDFrame.__init__(self, mgr) # ---------------------------------------------------------------------- @property def axes(self) -> List[Index]: """ Return a list representing the axes of the DataFrame. It has the row axis labels and column axis labels as the only members. They are returned in that order. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.axes [RangeIndex(start=0, stop=2, step=1), Index(['col1', 'col2'], dtype='object')] """ return [self.index, self.columns] @property def shape(self) -> Tuple[int, int]: """ Return a tuple representing the dimensionality of the DataFrame. See Also -------- ndarray.shape Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.shape (2, 2) >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4], ... 'col3': [5, 6]}) >>> df.shape (2, 3) """ return len(self.index), len(self.columns) @property def _is_homogeneous_type(self) -> bool: """ Whether all the columns in a DataFrame have the same type. Returns ------- bool See Also -------- Index._is_homogeneous_type : Whether the object has a single dtype. MultiIndex._is_homogeneous_type : Whether all the levels of a MultiIndex have the same dtype. Examples -------- >>> DataFrame({"A": [1, 2], "B": [3, 4]})._is_homogeneous_type True >>> DataFrame({"A": [1, 2], "B": [3.0, 4.0]})._is_homogeneous_type False Items with the same type but different sizes are considered different types. >>> DataFrame({ ... "A": np.array([1, 2], dtype=np.int32), ... "B": np.array([1, 2], dtype=np.int64)})._is_homogeneous_type False """ if self._mgr.any_extension_types: return len({block.dtype for block in self._mgr.blocks}) == 1 else: # Note: consolidates inplace return not self._is_mixed_type @property def _can_fast_transpose(self) -> bool: """ Can we transpose this DataFrame without creating any new array objects. """ if self._data.any_extension_types: # TODO(EA2D) special case would be unnecessary with 2D EAs return False return len(self._data.blocks) == 1 # ---------------------------------------------------------------------- # Rendering Methods def _repr_fits_vertical_(self) -> bool: """ Check length against max_rows. """ max_rows = get_option("display.max_rows") return len(self) <= max_rows def _repr_fits_horizontal_(self, ignore_width: bool = False) -> bool: """ Check if full repr fits in horizontal boundaries imposed by the display options width and max_columns. In case of non-interactive session, no boundaries apply. `ignore_width` is here so ipynb+HTML output can behave the way users expect. display.max_columns remains in effect. GH3541, GH3573 """ width, height = console.get_console_size() max_columns = get_option("display.max_columns") nb_columns = len(self.columns) # exceed max columns if (max_columns and nb_columns > max_columns) or ( (not ignore_width) and width and nb_columns > (width // 2) ): return False # used by repr_html under IPython notebook or scripts ignore terminal # dims if ignore_width or not console.in_interactive_session(): return True if get_option("display.width") is not None or console.in_ipython_frontend(): # check at least the column row for excessive width max_rows = 1 else: max_rows = get_option("display.max_rows") # when auto-detecting, so width=None and not in ipython front end # check whether repr fits horizontal by actually checking # the width of the rendered repr buf = StringIO() # only care about the stuff we'll actually print out # and to_string on entire frame may be expensive d = self if not (max_rows is None): # unlimited rows # min of two, where one may be None d = d.iloc[: min(max_rows, len(d))] else: return True d.to_string(buf=buf) value = buf.getvalue() repr_width = max(len(l) for l in value.split("\n")) return repr_width < width def _info_repr(self) -> bool: """ True if the repr should show the info view. """ info_repr_option = get_option("display.large_repr") == "info" return info_repr_option and not ( self._repr_fits_horizontal_() and self._repr_fits_vertical_() ) def __repr__(self) -> str: """ Return a string representation for a particular DataFrame. """ buf = StringIO("") if self._info_repr(): self.info(buf=buf) return buf.getvalue() max_rows = get_option("display.max_rows") min_rows = get_option("display.min_rows") max_cols = get_option("display.max_columns") max_colwidth = get_option("display.max_colwidth") show_dimensions = get_option("display.show_dimensions") if get_option("display.expand_frame_repr"): width, _ = console.get_console_size() else: width = None self.to_string( buf=buf, max_rows=max_rows, min_rows=min_rows, max_cols=max_cols, line_width=width, max_colwidth=max_colwidth, show_dimensions=show_dimensions, ) return buf.getvalue() def _repr_html_(self) -> Optional[str]: """ Return a html representation for a particular DataFrame. Mainly for IPython notebook. """ if self._info_repr(): buf = StringIO("") self.info(buf=buf) # need to escape the <class>, should be the first line. val = buf.getvalue().replace("<", r"&lt;", 1) val = val.replace(">", r"&gt;", 1) return "<pre>" + val + "</pre>" if get_option("display.notebook_repr_html"): max_rows = get_option("display.max_rows") min_rows = get_option("display.min_rows") max_cols = get_option("display.max_columns") show_dimensions = get_option("display.show_dimensions") formatter = fmt.DataFrameFormatter( self, columns=None, col_space=None, na_rep="NaN", formatters=None, float_format=None, sparsify=None, justify=None, index_names=True, header=True, index=True, bold_rows=True, escape=True, max_rows=max_rows, min_rows=min_rows, max_cols=max_cols, show_dimensions=show_dimensions, decimal=".", table_id=None, render_links=False, ) return formatter.to_html(notebook=True) else: return None @Substitution( header_type="bool or sequence", header="Write out the column names. If a list of strings " "is given, it is assumed to be aliases for the " "column names", col_space_type="int, list or dict of int", col_space="The minimum width of each column", ) @Substitution(shared_params=fmt.common_docstring, returns=fmt.return_docstring) def to_string( self, buf: Optional[FilePathOrBuffer[str]] = None, columns: Optional[Sequence[str]] = None, col_space: Optional[int] = None, header: Union[bool, Sequence[str]] = True, index: bool = True, na_rep: str = "NaN", formatters: Optional[fmt.FormattersType] = None, float_format: Optional[fmt.FloatFormatType] = None, sparsify: Optional[bool] = None, index_names: bool = True, justify: Optional[str] = None, max_rows: Optional[int] = None, min_rows: Optional[int] = None, max_cols: Optional[int] = None, show_dimensions: bool = False, decimal: str = ".", line_width: Optional[int] = None, max_colwidth: Optional[int] = None, encoding: Optional[str] = None, ) -> Optional[str]: """ Render a DataFrame to a console-friendly tabular output. %(shared_params)s line_width : int, optional Width to wrap a line in characters. max_colwidth : int, optional Max width to truncate each column in characters. By default, no limit. .. versionadded:: 1.0.0 encoding : str, default "utf-8" Set character encoding. .. versionadded:: 1.0 %(returns)s See Also -------- to_html : Convert DataFrame to HTML. Examples -------- >>> d = {'col1': [1, 2, 3], 'col2': [4, 5, 6]} >>> df = pd.DataFrame(d) >>> print(df.to_string()) col1 col2 0 1 4 1 2 5 2 3 6 """ from pandas import option_context with option_context("display.max_colwidth", max_colwidth): formatter = fmt.DataFrameFormatter( self, columns=columns, col_space=col_space, na_rep=na_rep, formatters=formatters, float_format=float_format, sparsify=sparsify, justify=justify, index_names=index_names, header=header, index=index, min_rows=min_rows, max_rows=max_rows, max_cols=max_cols, show_dimensions=show_dimensions, decimal=decimal, line_width=line_width, ) return formatter.to_string(buf=buf, encoding=encoding) # ---------------------------------------------------------------------- @property def style(self) -> Styler: """ Returns a Styler object. Contains methods for building a styled HTML representation of the DataFrame. See Also -------- io.formats.style.Styler : Helps style a DataFrame or Series according to the data with HTML and CSS. """ from pandas.io.formats.style import Styler return Styler(self) _shared_docs[ "items" ] = r""" Iterate over (column name, Series) pairs. Iterates over the DataFrame columns, returning a tuple with the column name and the content as a Series. Yields ------ label : object The column names for the DataFrame being iterated over. content : Series The column entries belonging to each label, as a Series. See Also -------- DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs. DataFrame.itertuples : Iterate over DataFrame rows as namedtuples of the values. Examples -------- >>> df = pd.DataFrame({'species': ['bear', 'bear', 'marsupial'], ... 'population': [1864, 22000, 80000]}, ... index=['panda', 'polar', 'koala']) >>> df species population panda bear 1864 polar bear 22000 koala marsupial 80000 >>> for label, content in df.items(): ... print(f'label: {label}') ... print(f'content: {content}', sep='\n') ... label: species content: panda bear polar bear koala marsupial Name: species, dtype: object label: population content: panda 1864 polar 22000 koala 80000 Name: population, dtype: int64 """ @Appender(_shared_docs["items"]) def items(self) -> Iterable[Tuple[Label, Series]]: if self.columns.is_unique and hasattr(self, "_item_cache"): for k in self.columns: yield k, self._get_item_cache(k) else: for i, k in enumerate(self.columns): yield k, self._ixs(i, axis=1) @Appender(_shared_docs["items"]) def iteritems(self) -> Iterable[Tuple[Label, Series]]: yield from self.items() def iterrows(self) -> Iterable[Tuple[Label, Series]]: """ Iterate over DataFrame rows as (index, Series) pairs. Yields ------ index : label or tuple of label The index of the row. A tuple for a `MultiIndex`. data : Series The data of the row as a Series. it : generator A generator that iterates over the rows of the frame. See Also -------- DataFrame.itertuples : Iterate over DataFrame rows as namedtuples of the values. DataFrame.items : Iterate over (column name, Series) pairs. Notes ----- 1. Because ``iterrows`` returns a Series for each row, it does **not** preserve dtypes across the rows (dtypes are preserved across columns for DataFrames). For example, >>> df = pd.DataFrame([[1, 1.5]], columns=['int', 'float']) >>> row = next(df.iterrows())[1] >>> row int 1.0 float 1.5 Name: 0, dtype: float64 >>> print(row['int'].dtype) float64 >>> print(df['int'].dtype) int64 To preserve dtypes while iterating over the rows, it is better to use :meth:`itertuples` which returns namedtuples of the values and which is generally faster than ``iterrows``. 2. You should **never modify** something you are iterating over. This is not guaranteed to work in all cases. Depending on the data types, the iterator returns a copy and not a view, and writing to it will have no effect. """ columns = self.columns klass = self._constructor_sliced for k, v in zip(self.index, self.values): s = klass(v, index=columns, name=k) yield k, s def itertuples(self, index: bool = True, name: Optional[str] = "Pandas"): """ Iterate over DataFrame rows as namedtuples. Parameters ---------- index : bool, default True If True, return the index as the first element of the tuple. name : str or None, default "Pandas" The name of the returned namedtuples or None to return regular tuples. Returns ------- iterator An object to iterate over namedtuples for each row in the DataFrame with the first field possibly being the index and following fields being the column values. See Also -------- DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs. DataFrame.items : Iterate over (column name, Series) pairs. Notes ----- The column names will be renamed to positional names if they are invalid Python identifiers, repeated, or start with an underscore. On python versions < 3.7 regular tuples are returned for DataFrames with a large number of columns (>254). Examples -------- >>> df = pd.DataFrame({'num_legs': [4, 2], 'num_wings': [0, 2]}, ... index=['dog', 'hawk']) >>> df num_legs num_wings dog 4 0 hawk 2 2 >>> for row in df.itertuples(): ... print(row) ... Pandas(Index='dog', num_legs=4, num_wings=0) Pandas(Index='hawk', num_legs=2, num_wings=2) By setting the `index` parameter to False we can remove the index as the first element of the tuple: >>> for row in df.itertuples(index=False): ... print(row) ... Pandas(num_legs=4, num_wings=0) Pandas(num_legs=2, num_wings=2) With the `name` parameter set we set a custom name for the yielded namedtuples: >>> for row in df.itertuples(name='Animal'): ... print(row) ... Animal(Index='dog', num_legs=4, num_wings=0) Animal(Index='hawk', num_legs=2, num_wings=2) """ arrays = [] fields = list(self.columns) if index: arrays.append(self.index) fields.insert(0, "Index") # use integer indexing because of possible duplicate column names arrays.extend(self.iloc[:, k] for k in range(len(self.columns))) if name is not None: # https://github.com/python/mypy/issues/9046 # error: namedtuple() expects a string literal as the first argument itertuple = collections.namedtuple( # type: ignore[misc] name, fields, rename=True ) return map(itertuple._make, zip(*arrays)) # fallback to regular tuples return zip(*arrays) def __len__(self) -> int: """ Returns length of info axis, but here we use the index. """ return len(self.index) def dot(self, other): """ Compute the matrix multiplication between the DataFrame and other. This method computes the matrix product between the DataFrame and the values of an other Series, DataFrame or a numpy array. It can also be called using ``self @ other`` in Python >= 3.5. Parameters ---------- other : Series, DataFrame or array-like The other object to compute the matrix product with. Returns ------- Series or DataFrame If other is a Series, return the matrix product between self and other as a Series. If other is a DataFrame or a numpy.array, return the matrix product of self and other in a DataFrame of a np.array. See Also -------- Series.dot: Similar method for Series. Notes ----- The dimensions of DataFrame and other must be compatible in order to compute the matrix multiplication. In addition, the column names of DataFrame and the index of other must contain the same values, as they will be aligned prior to the multiplication. The dot method for Series computes the inner product, instead of the matrix product here. Examples -------- Here we multiply a DataFrame with a Series. >>> df = pd.DataFrame([[0, 1, -2, -1], [1, 1, 1, 1]]) >>> s = pd.Series([1, 1, 2, 1]) >>> df.dot(s) 0 -4 1 5 dtype: int64 Here we multiply a DataFrame with another DataFrame. >>> other = pd.DataFrame([[0, 1], [1, 2], [-1, -1], [2, 0]]) >>> df.dot(other) 0 1 0 1 4 1 2 2 Note that the dot method give the same result as @ >>> df @ other 0 1 0 1 4 1 2 2 The dot method works also if other is an np.array. >>> arr = np.array([[0, 1], [1, 2], [-1, -1], [2, 0]]) >>> df.dot(arr) 0 1 0 1 4 1 2 2 Note how shuffling of the objects does not change the result. >>> s2 = s.reindex([1, 0, 2, 3]) >>> df.dot(s2) 0 -4 1 5 dtype: int64 """ if isinstance(other, (Series, DataFrame)): common = self.columns.union(other.index) if len(common) > len(self.columns) or len(common) > len(other.index): raise ValueError("matrices are not aligned") left = self.reindex(columns=common, copy=False) right = other.reindex(index=common, copy=False) lvals = left.values rvals = right._values else: left = self lvals = self.values rvals = np.asarray(other) if lvals.shape[1] != rvals.shape[0]: raise ValueError( f"Dot product shape mismatch, {lvals.shape} vs {rvals.shape}" ) if isinstance(other, DataFrame): return self._constructor( np.dot(lvals, rvals), index=left.index, columns=other.columns ) elif isinstance(other, Series): return self._constructor_sliced(np.dot(lvals, rvals), index=left.index) elif isinstance(rvals, (np.ndarray, Index)): result = np.dot(lvals, rvals) if result.ndim == 2: return self._constructor(result, index=left.index) else: return self._constructor_sliced(result, index=left.index) else: # pragma: no cover raise TypeError(f"unsupported type: {type(other)}") def __matmul__(self, other): """ Matrix multiplication using binary `@` operator in Python>=3.5. """ return self.dot(other) def __rmatmul__(self, other): """ Matrix multiplication using binary `@` operator in Python>=3.5. """ return self.T.dot(np.transpose(other)).T # ---------------------------------------------------------------------- # IO methods (to / from other formats) @classmethod def from_dict(cls, data, orient="columns", dtype=None, columns=None) -> DataFrame: """ Construct DataFrame from dict of array-like or dicts. Creates DataFrame object from dictionary by columns or by index allowing dtype specification. Parameters ---------- data : dict Of the form {field : array-like} or {field : dict}. orient : {'columns', 'index'}, default 'columns' The "orientation" of the data. If the keys of the passed dict should be the columns of the resulting DataFrame, pass 'columns' (default). Otherwise if the keys should be rows, pass 'index'. dtype : dtype, default None Data type to force, otherwise infer. columns : list, default None Column labels to use when ``orient='index'``. Raises a ValueError if used with ``orient='columns'``. Returns ------- DataFrame See Also -------- DataFrame.from_records : DataFrame from structured ndarray, sequence of tuples or dicts, or DataFrame. DataFrame : DataFrame object creation using constructor. Examples -------- By default the keys of the dict become the DataFrame columns: >>> data = {'col_1': [3, 2, 1, 0], 'col_2': ['a', 'b', 'c', 'd']} >>> pd.DataFrame.from_dict(data) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d Specify ``orient='index'`` to create the DataFrame using dictionary keys as rows: >>> data = {'row_1': [3, 2, 1, 0], 'row_2': ['a', 'b', 'c', 'd']} >>> pd.DataFrame.from_dict(data, orient='index') 0 1 2 3 row_1 3 2 1 0 row_2 a b c d When using the 'index' orientation, the column names can be specified manually: >>> pd.DataFrame.from_dict(data, orient='index', ... columns=['A', 'B', 'C', 'D']) A B C D row_1 3 2 1 0 row_2 a b c d """ index = None orient = orient.lower() if orient == "index": if len(data) > 0: # TODO speed up Series case if isinstance(list(data.values())[0], (Series, dict)): data = _from_nested_dict(data) else: data, index = list(data.values()), list(data.keys()) elif orient == "columns": if columns is not None: raise ValueError("cannot use columns parameter with orient='columns'") else: # pragma: no cover raise ValueError("only recognize index or columns for orient") return cls(data, index=index, columns=columns, dtype=dtype) def to_numpy( self, dtype=None, copy: bool = False, na_value=lib.no_default ) -> np.ndarray: """ Convert the DataFrame to a NumPy array. .. versionadded:: 0.24.0 By default, the dtype of the returned array will be the common NumPy dtype of all types in the DataFrame. For example, if the dtypes are ``float16`` and ``float32``, the results dtype will be ``float32``. This may require copying data and coercing values, which may be expensive. Parameters ---------- dtype : str or numpy.dtype, optional The dtype to pass to :meth:`numpy.asarray`. copy : bool, default False Whether to ensure that the returned value is not a view on another array. Note that ``copy=False`` does not *ensure* that ``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that a copy is made, even if not strictly necessary. na_value : Any, optional The value to use for missing values. The default value depends on `dtype` and the dtypes of the DataFrame columns. .. versionadded:: 1.1.0 Returns ------- numpy.ndarray See Also -------- Series.to_numpy : Similar method for Series. Examples -------- >>> pd.DataFrame({"A": [1, 2], "B": [3, 4]}).to_numpy() array([[1, 3], [2, 4]]) With heterogeneous data, the lowest common type will have to be used. >>> df = pd.DataFrame({"A": [1, 2], "B": [3.0, 4.5]}) >>> df.to_numpy() array([[1. , 3. ], [2. , 4.5]]) For a mix of numeric and non-numeric types, the output array will have object dtype. >>> df['C'] = pd.date_range('2000', periods=2) >>> df.to_numpy() array([[1, 3.0, Timestamp('2000-01-01 00:00:00')], [2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object) """ self._consolidate_inplace() result = self._mgr.as_array( transpose=self._AXIS_REVERSED, dtype=dtype, copy=copy, na_value=na_value ) if result.dtype is not dtype: result = np.array(result, dtype=dtype, copy=False) return result def to_dict(self, orient="dict", into=dict): """ Convert the DataFrame to a dictionary. The type of the key-value pairs can be customized with the parameters (see below). Parameters ---------- orient : str {'dict', 'list', 'series', 'split', 'records', 'index'} Determines the type of the values of the dictionary. - 'dict' (default) : dict like {column -> {index -> value}} - 'list' : dict like {column -> [values]} - 'series' : dict like {column -> Series(values)} - 'split' : dict like {'index' -> [index], 'columns' -> [columns], 'data' -> [values]} - 'records' : list like [{column -> value}, ... , {column -> value}] - 'index' : dict like {index -> {column -> value}} Abbreviations are allowed. `s` indicates `series` and `sp` indicates `split`. into : class, default dict The collections.abc.Mapping subclass used for all Mappings in the return value. Can be the actual class or an empty instance of the mapping type you want. If you want a collections.defaultdict, you must pass it initialized. Returns ------- dict, list or collections.abc.Mapping Return a collections.abc.Mapping object representing the DataFrame. The resulting transformation depends on the `orient` parameter. See Also -------- DataFrame.from_dict: Create a DataFrame from a dictionary. DataFrame.to_json: Convert a DataFrame to JSON format. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], ... 'col2': [0.5, 0.75]}, ... index=['row1', 'row2']) >>> df col1 col2 row1 1 0.50 row2 2 0.75 >>> df.to_dict() {'col1': {'row1': 1, 'row2': 2}, 'col2': {'row1': 0.5, 'row2': 0.75}} You can specify the return orientation. >>> df.to_dict('series') {'col1': row1 1 row2 2 Name: col1, dtype: int64, 'col2': row1 0.50 row2 0.75 Name: col2, dtype: float64} >>> df.to_dict('split') {'index': ['row1', 'row2'], 'columns': ['col1', 'col2'], 'data': [[1, 0.5], [2, 0.75]]} >>> df.to_dict('records') [{'col1': 1, 'col2': 0.5}, {'col1': 2, 'col2': 0.75}] >>> df.to_dict('index') {'row1': {'col1': 1, 'col2': 0.5}, 'row2': {'col1': 2, 'col2': 0.75}} You can also specify the mapping type. >>> from collections import OrderedDict, defaultdict >>> df.to_dict(into=OrderedDict) OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])), ('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))]) If you want a `defaultdict`, you need to initialize it: >>> dd = defaultdict(list) >>> df.to_dict('records', into=dd) [defaultdict(<class 'list'>, {'col1': 1, 'col2': 0.5}), defaultdict(<class 'list'>, {'col1': 2, 'col2': 0.75})] """ if not self.columns.is_unique: warnings.warn( "DataFrame columns are not unique, some columns will be omitted.", UserWarning, stacklevel=2, ) # GH16122 into_c = com.standardize_mapping(into) orient = orient.lower() # GH32515 if orient.startswith(("d", "l", "s", "r", "i")) and orient not in { "dict", "list", "series", "split", "records", "index", }: warnings.warn( "Using short name for 'orient' is deprecated. Only the " "options: ('dict', list, 'series', 'split', 'records', 'index') " "will be used in a future version. Use one of the above " "to silence this warning.", FutureWarning, ) if orient.startswith("d"): orient = "dict" elif orient.startswith("l"): orient = "list" elif orient.startswith("sp"): orient = "split" elif orient.startswith("s"): orient = "series" elif orient.startswith("r"): orient = "records" elif orient.startswith("i"): orient = "index" if orient == "dict": return into_c((k, v.to_dict(into)) for k, v in self.items()) elif orient == "list": return into_c((k, v.tolist()) for k, v in self.items()) elif orient == "split": return into_c( ( ("index", self.index.tolist()), ("columns", self.columns.tolist()), ( "data", [ list(map(com.maybe_box_datetimelike, t)) for t in self.itertuples(index=False, name=None) ], ), ) ) elif orient == "series": return into_c((k, com.maybe_box_datetimelike(v)) for k, v in self.items()) elif orient == "records": columns = self.columns.tolist() rows = ( dict(zip(columns, row)) for row in self.itertuples(index=False, name=None) ) return [ into_c((k, com.maybe_box_datetimelike(v)) for k, v in row.items()) for row in rows ] elif orient == "index": if not self.index.is_unique: raise ValueError("DataFrame index must be unique for orient='index'.") return into_c( (t[0], dict(zip(self.columns, t[1:]))) for t in self.itertuples(name=None) ) else: raise ValueError(f"orient '{orient}' not understood") def to_gbq( self, destination_table, project_id=None, chunksize=None, reauth=False, if_exists="fail", auth_local_webserver=False, table_schema=None, location=None, progress_bar=True, credentials=None, ) -> None: """ Write a DataFrame to a Google BigQuery table. This function requires the `pandas-gbq package <https://pandas-gbq.readthedocs.io>`__. See the `How to authenticate with Google BigQuery <https://pandas-gbq.readthedocs.io/en/latest/howto/authentication.html>`__ guide for authentication instructions. Parameters ---------- destination_table : str Name of table to be written, in the form ``dataset.tablename``. project_id : str, optional Google BigQuery Account project ID. Optional when available from the environment. chunksize : int, optional Number of rows to be inserted in each chunk from the dataframe. Set to ``None`` to load the whole dataframe at once. reauth : bool, default False Force Google BigQuery to re-authenticate the user. This is useful if multiple accounts are used. if_exists : str, default 'fail' Behavior when the destination table exists. Value can be one of: ``'fail'`` If table exists raise pandas_gbq.gbq.TableCreationError. ``'replace'`` If table exists, drop it, recreate it, and insert data. ``'append'`` If table exists, insert data. Create if does not exist. auth_local_webserver : bool, default False Use the `local webserver flow`_ instead of the `console flow`_ when getting user credentials. .. _local webserver flow: https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server .. _console flow: https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console *New in version 0.2.0 of pandas-gbq*. table_schema : list of dicts, optional List of BigQuery table fields to which according DataFrame columns conform to, e.g. ``[{'name': 'col1', 'type': 'STRING'},...]``. If schema is not provided, it will be generated according to dtypes of DataFrame columns. See BigQuery API documentation on available names of a field. *New in version 0.3.1 of pandas-gbq*. location : str, optional Location where the load job should run. See the `BigQuery locations documentation <https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a list of available locations. The location must match that of the target dataset. *New in version 0.5.0 of pandas-gbq*. progress_bar : bool, default True Use the library `tqdm` to show the progress bar for the upload, chunk by chunk. *New in version 0.5.0 of pandas-gbq*. credentials : google.auth.credentials.Credentials, optional Credentials for accessing Google APIs. Use this parameter to override default credentials, such as to use Compute Engine :class:`google.auth.compute_engine.Credentials` or Service Account :class:`google.oauth2.service_account.Credentials` directly. *New in version 0.8.0 of pandas-gbq*. .. versionadded:: 0.24.0 See Also -------- pandas_gbq.to_gbq : This function in the pandas-gbq library. read_gbq : Read a DataFrame from Google BigQuery. """ from pandas.io import gbq gbq.to_gbq( self, destination_table, project_id=project_id, chunksize=chunksize, reauth=reauth, if_exists=if_exists, auth_local_webserver=auth_local_webserver, table_schema=table_schema, location=location, progress_bar=progress_bar, credentials=credentials, ) @classmethod def from_records( cls, data, index=None, exclude=None, columns=None, coerce_float=False, nrows=None, ) -> DataFrame: """ Convert structured or record ndarray to DataFrame. Creates a DataFrame object from a structured ndarray, sequence of tuples or dicts, or DataFrame. Parameters ---------- data : structured ndarray, sequence of tuples or dicts, or DataFrame Structured input data. index : str, list of fields, array-like Field of array to use as the index, alternately a specific set of input labels to use. exclude : sequence, default None Columns or fields to exclude. columns : sequence, default None Column names to use. If the passed data do not have names associated with them, this argument provides names for the columns. Otherwise this argument indicates the order of the columns in the result (any names not found in the data will become all-NA columns). coerce_float : bool, default False Attempt to convert values of non-string, non-numeric objects (like decimal.Decimal) to floating point, useful for SQL result sets. nrows : int, default None Number of rows to read if data is an iterator. Returns ------- DataFrame See Also -------- DataFrame.from_dict : DataFrame from dict of array-like or dicts. DataFrame : DataFrame object creation using constructor. Examples -------- Data can be provided as a structured ndarray: >>> data = np.array([(3, 'a'), (2, 'b'), (1, 'c'), (0, 'd')], ... dtype=[('col_1', 'i4'), ('col_2', 'U1')]) >>> pd.DataFrame.from_records(data) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d Data can be provided as a list of dicts: >>> data = [{'col_1': 3, 'col_2': 'a'}, ... {'col_1': 2, 'col_2': 'b'}, ... {'col_1': 1, 'col_2': 'c'}, ... {'col_1': 0, 'col_2': 'd'}] >>> pd.DataFrame.from_records(data) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d Data can be provided as a list of tuples with corresponding columns: >>> data = [(3, 'a'), (2, 'b'), (1, 'c'), (0, 'd')] >>> pd.DataFrame.from_records(data, columns=['col_1', 'col_2']) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d """ # Make a copy of the input columns so we can modify it if columns is not None: columns = ensure_index(columns) if is_iterator(data): if nrows == 0: return cls() try: first_row = next(data) except StopIteration: return cls(index=index, columns=columns) dtype = None if hasattr(first_row, "dtype") and first_row.dtype.names: dtype = first_row.dtype values = [first_row] if nrows is None: values += data else: values.extend(itertools.islice(data, nrows - 1)) if dtype is not None: data = np.array(values, dtype=dtype) else: data = values if isinstance(data, dict): if columns is None: columns = arr_columns = ensure_index(sorted(data)) arrays = [data[k] for k in columns] else: arrays = [] arr_columns_list = [] for k, v in data.items(): if k in columns: arr_columns_list.append(k) arrays.append(v) arrays, arr_columns = reorder_arrays(arrays, arr_columns_list, columns) elif isinstance(data, (np.ndarray, DataFrame)): arrays, columns = to_arrays(data, columns) if columns is not None: columns = ensure_index(columns) arr_columns = columns else: arrays, arr_columns = to_arrays(data, columns, coerce_float=coerce_float) arr_columns = ensure_index(arr_columns) if columns is not None: columns = ensure_index(columns) else: columns = arr_columns if exclude is None: exclude = set() else: exclude = set(exclude) result_index = None if index is not None: if isinstance(index, str) or not hasattr(index, "__iter__"): i = columns.get_loc(index) exclude.add(index) if len(arrays) > 0: result_index = Index(arrays[i], name=index) else: result_index = Index([], name=index) else: try: index_data = [arrays[arr_columns.get_loc(field)] for field in index] except (KeyError, TypeError): # raised by get_loc, see GH#29258 result_index = index else: result_index = ensure_index_from_sequences(index_data, names=index) exclude.update(index) if any(exclude): arr_exclude = [x for x in exclude if x in arr_columns] to_remove = [arr_columns.get_loc(col) for col in arr_exclude] arrays = [v for i, v in enumerate(arrays) if i not in to_remove] arr_columns = arr_columns.drop(arr_exclude) columns = columns.drop(exclude) mgr = arrays_to_mgr(arrays, arr_columns, result_index, columns) return cls(mgr) def to_records( self, index=True, column_dtypes=None, index_dtypes=None ) -> np.recarray: """ Convert DataFrame to a NumPy record array. Index will be included as the first field of the record array if requested. Parameters ---------- index : bool, default True Include index in resulting record array, stored in 'index' field or using the index label, if set. column_dtypes : str, type, dict, default None .. versionadded:: 0.24.0 If a string or type, the data type to store all columns. If a dictionary, a mapping of column names and indices (zero-indexed) to specific data types. index_dtypes : str, type, dict, default None .. versionadded:: 0.24.0 If a string or type, the data type to store all index levels. If a dictionary, a mapping of index level names and indices (zero-indexed) to specific data types. This mapping is applied only if `index=True`. Returns ------- numpy.recarray NumPy ndarray with the DataFrame labels as fields and each row of the DataFrame as entries. See Also -------- DataFrame.from_records: Convert structured or record ndarray to DataFrame. numpy.recarray: An ndarray that allows field access using attributes, analogous to typed columns in a spreadsheet. Examples -------- >>> df = pd.DataFrame({'A': [1, 2], 'B': [0.5, 0.75]}, ... index=['a', 'b']) >>> df A B a 1 0.50 b 2 0.75 >>> df.to_records() rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('index', 'O'), ('A', '<i8'), ('B', '<f8')]) If the DataFrame index has no label then the recarray field name is set to 'index'. If the index has a label then this is used as the field name: >>> df.index = df.index.rename("I") >>> df.to_records() rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('I', 'O'), ('A', '<i8'), ('B', '<f8')]) The index can be excluded from the record array: >>> df.to_records(index=False) rec.array([(1, 0.5 ), (2, 0.75)], dtype=[('A', '<i8'), ('B', '<f8')]) Data types can be specified for the columns: >>> df.to_records(column_dtypes={"A": "int32"}) rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('I', 'O'), ('A', '<i4'), ('B', '<f8')]) As well as for the index: >>> df.to_records(index_dtypes="<S2") rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)], dtype=[('I', 'S2'), ('A', '<i8'), ('B', '<f8')]) >>> index_dtypes = f"<S{df.index.str.len().max()}" >>> df.to_records(index_dtypes=index_dtypes) rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)], dtype=[('I', 'S1'), ('A', '<i8'), ('B', '<f8')]) """ if index: if isinstance(self.index, MultiIndex): # array of tuples to numpy cols. copy copy copy ix_vals = list(map(np.array, zip(*self.index._values))) else: ix_vals = [self.index.values] arrays = ix_vals + [ np.asarray(self.iloc[:, i]) for i in range(len(self.columns)) ] count = 0 index_names = list(self.index.names) if isinstance(self.index, MultiIndex): for i, n in enumerate(index_names): if n is None: index_names[i] = f"level_{count}" count += 1 elif index_names[0] is None: index_names = ["index"] names = [str(name) for name in itertools.chain(index_names, self.columns)] else: arrays = [np.asarray(self.iloc[:, i]) for i in range(len(self.columns))] names = [str(c) for c in self.columns] index_names = [] index_len = len(index_names) formats = [] for i, v in enumerate(arrays): index = i # When the names and arrays are collected, we # first collect those in the DataFrame's index, # followed by those in its columns. # # Thus, the total length of the array is: # len(index_names) + len(DataFrame.columns). # # This check allows us to see whether we are # handling a name / array in the index or column. if index < index_len: dtype_mapping = index_dtypes name = index_names[index] else: index -= index_len dtype_mapping = column_dtypes name = self.columns[index] # We have a dictionary, so we get the data type # associated with the index or column (which can # be denoted by its name in the DataFrame or its # position in DataFrame's array of indices or # columns, whichever is applicable. if is_dict_like(dtype_mapping): if name in dtype_mapping: dtype_mapping = dtype_mapping[name] elif index in dtype_mapping: dtype_mapping = dtype_mapping[index] else: dtype_mapping = None # If no mapping can be found, use the array's # dtype attribute for formatting. # # A valid dtype must either be a type or # string naming a type. if dtype_mapping is None: formats.append(v.dtype) elif isinstance(dtype_mapping, (type, np.dtype, str)): formats.append(dtype_mapping) else: element = "row" if i < index_len else "column" msg = f"Invalid dtype {dtype_mapping} specified for {element} {name}" raise ValueError(msg) return np.rec.fromarrays(arrays, dtype={"names": names, "formats": formats}) @classmethod def _from_arrays( cls, arrays, columns, index, dtype: Optional[Dtype] = None, verify_integrity: bool = True, ) -> DataFrame: """ Create DataFrame from a list of arrays corresponding to the columns. Parameters ---------- arrays : list-like of arrays Each array in the list corresponds to one column, in order. columns : list-like, Index The column names for the resulting DataFrame. index : list-like, Index The rows labels for the resulting DataFrame. dtype : dtype, optional Optional dtype to enforce for all arrays. verify_integrity : bool, default True Validate and homogenize all input. If set to False, it is assumed that all elements of `arrays` are actual arrays how they will be stored in a block (numpy ndarray or ExtensionArray), have the same length as and are aligned with the index, and that `columns` and `index` are ensured to be an Index object. Returns ------- DataFrame """ if dtype is not None: dtype = pandas_dtype(dtype) mgr = arrays_to_mgr( arrays, columns, index, columns, dtype=dtype, verify_integrity=verify_integrity, ) return cls(mgr) @deprecate_kwarg(old_arg_name="fname", new_arg_name="path") def to_stata( self, path: FilePathOrBuffer, convert_dates: Optional[Dict[Label, str]] = None, write_index: bool = True, byteorder: Optional[str] = None, time_stamp: Optional[datetime.datetime] = None, data_label: Optional[str] = None, variable_labels: Optional[Dict[Label, str]] = None, version: Optional[int] = 114, convert_strl: Optional[Sequence[Label]] = None, compression: CompressionOptions = "infer", storage_options: StorageOptions = None, ) -> None: """ Export DataFrame object to Stata dta format. Writes the DataFrame to a Stata dataset file. "dta" files contain a Stata dataset. Parameters ---------- path : str, buffer or path object String, path object (pathlib.Path or py._path.local.LocalPath) or object implementing a binary write() function. If using a buffer then the buffer will not be automatically closed after the file data has been written. .. versionchanged:: 1.0.0 Previously this was "fname" convert_dates : dict Dictionary mapping columns containing datetime types to stata internal format to use when writing the dates. Options are 'tc', 'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either an integer or a name. Datetime columns that do not have a conversion type specified will be converted to 'tc'. Raises NotImplementedError if a datetime column has timezone information. write_index : bool Write the index to Stata dataset. byteorder : str Can be ">", "<", "little", or "big". default is `sys.byteorder`. time_stamp : datetime A datetime to use as file creation date. Default is the current time. data_label : str, optional A label for the data set. Must be 80 characters or smaller. variable_labels : dict Dictionary containing columns as keys and variable labels as values. Each label must be 80 characters or smaller. version : {114, 117, 118, 119, None}, default 114 Version to use in the output dta file. Set to None to let pandas decide between 118 or 119 formats depending on the number of columns in the frame. Version 114 can be read by Stata 10 and later. Version 117 can be read by Stata 13 or later. Version 118 is supported in Stata 14 and later. Version 119 is supported in Stata 15 and later. Version 114 limits string variables to 244 characters or fewer while versions 117 and later allow strings with lengths up to 2,000,000 characters. Versions 118 and 119 support Unicode characters, and version 119 supports more than 32,767 variables. .. versionchanged:: 1.0.0 Added support for formats 118 and 119. convert_strl : list, optional List of column names to convert to string columns to Stata StrL format. Only available if version is 117. Storing strings in the StrL format can produce smaller dta files if strings have more than 8 characters and values are repeated. compression : str or dict, default 'infer' For on-the-fly compression of the output dta. If string, specifies compression mode. If dict, value at key 'method' specifies compression mode. Compression mode must be one of {'infer', 'gzip', 'bz2', 'zip', 'xz', None}. If compression mode is 'infer' and `fname` is path-like, then detect compression from the following extensions: '.gz', '.bz2', '.zip', or '.xz' (otherwise no compression). If dict and compression mode is one of {'zip', 'gzip', 'bz2'}, or inferred as one of the above, other entries passed as additional compression options. .. versionadded:: 1.1.0 storage_options : dict, optional Extra options that make sense for a particular storage connection, e.g. host, port, username, password, etc., if using a URL that will be parsed by ``fsspec``, e.g., starting "s3://", "gcs://". An error will be raised if providing this argument with a local path or a file-like buffer. See the fsspec and backend storage implementation docs for the set of allowed keys and values. .. versionadded:: 1.2.0 Raises ------ NotImplementedError * If datetimes contain timezone information * Column dtype is not representable in Stata ValueError * Columns listed in convert_dates are neither datetime64[ns] or datetime.datetime * Column listed in convert_dates is not in DataFrame * Categorical label contains more than 32,000 characters See Also -------- read_stata : Import Stata data files. io.stata.StataWriter : Low-level writer for Stata data files. io.stata.StataWriter117 : Low-level writer for version 117 files. Examples -------- >>> df = pd.DataFrame({'animal': ['falcon', 'parrot', 'falcon', ... 'parrot'], ... 'speed': [350, 18, 361, 15]}) >>> df.to_stata('animals.dta') # doctest: +SKIP """ if version not in (114, 117, 118, 119, None): raise ValueError("Only formats 114, 117, 118 and 119 are supported.") if version == 114: if convert_strl is not None: raise ValueError("strl is not supported in format 114") from pandas.io.stata import StataWriter as statawriter elif version == 117: # mypy: Name 'statawriter' already defined (possibly by an import) from pandas.io.stata import ( # type: ignore[no-redef] StataWriter117 as statawriter, ) else: # versions 118 and 119 # mypy: Name 'statawriter' already defined (possibly by an import) from pandas.io.stata import ( # type: ignore[no-redef] StataWriterUTF8 as statawriter, ) kwargs: Dict[str, Any] = {} if version is None or version >= 117: # strl conversion is only supported >= 117 kwargs["convert_strl"] = convert_strl if version is None or version >= 118: # Specifying the version is only supported for UTF8 (118 or 119) kwargs["version"] = version # mypy: Too many arguments for "StataWriter" writer = statawriter( # type: ignore[call-arg] path, self, convert_dates=convert_dates, byteorder=byteorder, time_stamp=time_stamp, data_label=data_label, write_index=write_index, variable_labels=variable_labels, compression=compression, storage_options=storage_options, **kwargs, ) writer.write_file() @deprecate_kwarg(old_arg_name="fname", new_arg_name="path") def to_feather(self, path: FilePathOrBuffer[AnyStr], **kwargs) -> None: """ Write a DataFrame to the binary Feather format. Parameters ---------- path : str or file-like object If a string, it will be used as Root Directory path. **kwargs : Additional keywords passed to :func:`pyarrow.feather.write_feather`. Starting with pyarrow 0.17, this includes the `compression`, `compression_level`, `chunksize` and `version` keywords. .. versionadded:: 1.1.0 """ from pandas.io.feather_format import to_feather to_feather(self, path, **kwargs) @doc( Series.to_markdown, klass=_shared_doc_kwargs["klass"], examples="""Examples -------- >>> df = pd.DataFrame( ... data={"animal_1": ["elk", "pig"], "animal_2": ["dog", "quetzal"]} ... ) >>> print(df.to_markdown()) | | animal_1 | animal_2 | |---:|:-----------|:-----------| | 0 | elk | dog | | 1 | pig | quetzal | Output markdown with a tabulate option. >>> print(df.to_markdown(tablefmt="grid")) +----+------------+------------+ | | animal_1 | animal_2 | +====+============+============+ | 0 | elk | dog | +----+------------+------------+ | 1 | pig | quetzal | +----+------------+------------+ """, ) def to_markdown( self, buf: Optional[Union[IO[str], str]] = None, mode: str = "wt", index: bool = True, storage_options: StorageOptions = None, **kwargs, ) -> Optional[str]: if "showindex" in kwargs: warnings.warn( "'showindex' is deprecated. Only 'index' will be used " "in a future version. Use 'index' to silence this warning.", FutureWarning, stacklevel=2, ) kwargs.setdefault("headers", "keys") kwargs.setdefault("tablefmt", "pipe") kwargs.setdefault("showindex", index) tabulate = import_optional_dependency("tabulate") result = tabulate.tabulate(self, **kwargs) if buf is None: return result ioargs = get_filepath_or_buffer(buf, mode=mode, storage_options=storage_options) assert not isinstance(ioargs.filepath_or_buffer, str) ioargs.filepath_or_buffer.writelines(result) if ioargs.should_close: ioargs.filepath_or_buffer.close() return None @deprecate_kwarg(old_arg_name="fname", new_arg_name="path") def to_parquet( self, path: FilePathOrBuffer[AnyStr], engine: str = "auto", compression: Optional[str] = "snappy", index: Optional[bool] = None, partition_cols: Optional[List[str]] = None, storage_options: StorageOptions = None, **kwargs, ) -> None: """ Write a DataFrame to the binary parquet format. This function writes the dataframe as a `parquet file <https://parquet.apache.org/>`_. You can choose different parquet backends, and have the option of compression. See :ref:`the user guide <io.parquet>` for more details. Parameters ---------- path : str or file-like object If a string, it will be used as Root Directory path when writing a partitioned dataset. By file-like object, we refer to objects with a write() method, such as a file handler (e.g. via builtin open function) or io.BytesIO. The engine fastparquet does not accept file-like objects. .. versionchanged:: 1.0.0 Previously this was "fname" engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto' Parquet library to use. If 'auto', then the option ``io.parquet.engine`` is used. The default ``io.parquet.engine`` behavior is to try 'pyarrow', falling back to 'fastparquet' if 'pyarrow' is unavailable. compression : {'snappy', 'gzip', 'brotli', None}, default 'snappy' Name of the compression to use. Use ``None`` for no compression. index : bool, default None If ``True``, include the dataframe's index(es) in the file output. If ``False``, they will not be written to the file. If ``None``, similar to ``True`` the dataframe's index(es) will be saved. However, instead of being saved as values, the RangeIndex will be stored as a range in the metadata so it doesn't require much space and is faster. Other indexes will be included as columns in the file output. .. versionadded:: 0.24.0 partition_cols : list, optional, default None Column names by which to partition the dataset. Columns are partitioned in the order they are given. Must be None if path is not a string. .. versionadded:: 0.24.0 storage_options : dict, optional Extra options that make sense for a particular storage connection, e.g. host, port, username, password, etc., if using a URL that will be parsed by ``fsspec``, e.g., starting "s3://", "gcs://". An error will be raised if providing this argument with a local path or a file-like buffer. See the fsspec and backend storage implementation docs for the set of allowed keys and values .. versionadded:: 1.2.0 **kwargs Additional arguments passed to the parquet library. See :ref:`pandas io <io.parquet>` for more details. See Also -------- read_parquet : Read a parquet file. DataFrame.to_csv : Write a csv file. DataFrame.to_sql : Write to a sql table. DataFrame.to_hdf : Write to hdf. Notes ----- This function requires either the `fastparquet <https://pypi.org/project/fastparquet>`_ or `pyarrow <https://arrow.apache.org/docs/python/>`_ library. Examples -------- >>> df = pd.DataFrame(data={'col1': [1, 2], 'col2': [3, 4]}) >>> df.to_parquet('df.parquet.gzip', ... compression='gzip') # doctest: +SKIP >>> pd.read_parquet('df.parquet.gzip') # doctest: +SKIP col1 col2 0 1 3 1 2 4 If you want to get a buffer to the parquet content you can use a io.BytesIO object, as long as you don't use partition_cols, which creates multiple files. >>> import io >>> f = io.BytesIO() >>> df.to_parquet(f) >>> f.seek(0) 0 >>> content = f.read() """ from pandas.io.parquet import to_parquet to_parquet( self, path, engine, compression=compression, index=index, partition_cols=partition_cols, storage_options=storage_options, **kwargs, ) @Substitution( header_type="bool", header="Whether to print column labels, default True", col_space_type="str or int, list or dict of int or str", col_space="The minimum width of each column in CSS length " "units. An int is assumed to be px units.\n\n" " .. versionadded:: 0.25.0\n" " Ability to use str", ) @Substitution(shared_params=fmt.common_docstring, returns=fmt.return_docstring) def to_html( self, buf=None, columns=None, col_space=None, header=True, index=True, na_rep="NaN", formatters=None, float_format=None, sparsify=None, index_names=True, justify=None, max_rows=None, max_cols=None, show_dimensions=False, decimal=".", bold_rows=True, classes=None, escape=True, notebook=False, border=None, table_id=None, render_links=False, encoding=None, ): """ Render a DataFrame as an HTML table. %(shared_params)s bold_rows : bool, default True Make the row labels bold in the output. classes : str or list or tuple, default None CSS class(es) to apply to the resulting html table. escape : bool, default True Convert the characters <, >, and & to HTML-safe sequences. notebook : {True, False}, default False Whether the generated HTML is for IPython Notebook. border : int A ``border=border`` attribute is included in the opening `<table>` tag. Default ``pd.options.display.html.border``. encoding : str, default "utf-8" Set character encoding. .. versionadded:: 1.0 table_id : str, optional A css id is included in the opening `<table>` tag if specified. render_links : bool, default False Convert URLs to HTML links. .. versionadded:: 0.24.0 %(returns)s See Also -------- to_string : Convert DataFrame to a string. """ if justify is not None and justify not in fmt._VALID_JUSTIFY_PARAMETERS: raise ValueError("Invalid value for justify parameter") formatter = fmt.DataFrameFormatter( self, columns=columns, col_space=col_space, na_rep=na_rep, formatters=formatters, float_format=float_format, sparsify=sparsify, justify=justify, index_names=index_names, header=header, index=index, bold_rows=bold_rows, escape=escape, max_rows=max_rows, max_cols=max_cols, show_dimensions=show_dimensions, decimal=decimal, table_id=table_id, render_links=render_links, ) # TODO: a generic formatter wld b in DataFrameFormatter return formatter.to_html( buf=buf, classes=classes, notebook=notebook, border=border, encoding=encoding, ) # ---------------------------------------------------------------------- @Substitution( klass="DataFrame", type_sub=" and columns", max_cols_sub=( """max_cols : int, optional When to switch from the verbose to the truncated output. If the DataFrame has more than `max_cols` columns, the truncated output is used. By default, the setting in ``pandas.options.display.max_info_columns`` is used. """ ), examples_sub=( """ >>> int_values = [1, 2, 3, 4, 5] >>> text_values = ['alpha', 'beta', 'gamma', 'delta', 'epsilon'] >>> float_values = [0.0, 0.25, 0.5, 0.75, 1.0] >>> df = pd.DataFrame({"int_col": int_values, "text_col": text_values, ... "float_col": float_values}) >>> df int_col text_col float_col 0 1 alpha 0.00 1 2 beta 0.25 2 3 gamma 0.50 3 4 delta 0.75 4 5 epsilon 1.00 Prints information of all columns: >>> df.info(verbose=True) <class 'pandas.core.frame.DataFrame'> RangeIndex: 5 entries, 0 to 4 Data columns (total 3 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 int_col 5 non-null int64 1 text_col 5 non-null object 2 float_col 5 non-null float64 dtypes: float64(1), int64(1), object(1) memory usage: 248.0+ bytes Prints a summary of columns count and its dtypes but not per column information: >>> df.info(verbose=False) <class 'pandas.core.frame.DataFrame'> RangeIndex: 5 entries, 0 to 4 Columns: 3 entries, int_col to float_col dtypes: float64(1), int64(1), object(1) memory usage: 248.0+ bytes Pipe output of DataFrame.info to buffer instead of sys.stdout, get buffer content and writes to a text file: >>> import io >>> buffer = io.StringIO() >>> df.info(buf=buffer) >>> s = buffer.getvalue() >>> with open("df_info.txt", "w", ... encoding="utf-8") as f: # doctest: +SKIP ... f.write(s) 260 The `memory_usage` parameter allows deep introspection mode, specially useful for big DataFrames and fine-tune memory optimization: >>> random_strings_array = np.random.choice(['a', 'b', 'c'], 10 ** 6) >>> df = pd.DataFrame({ ... 'column_1': np.random.choice(['a', 'b', 'c'], 10 ** 6), ... 'column_2': np.random.choice(['a', 'b', 'c'], 10 ** 6), ... 'column_3': np.random.choice(['a', 'b', 'c'], 10 ** 6) ... }) >>> df.info() <class 'pandas.core.frame.DataFrame'> RangeIndex: 1000000 entries, 0 to 999999 Data columns (total 3 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 column_1 1000000 non-null object 1 column_2 1000000 non-null object 2 column_3 1000000 non-null object dtypes: object(3) memory usage: 22.9+ MB >>> df.info(memory_usage='deep') <class 'pandas.core.frame.DataFrame'> RangeIndex: 1000000 entries, 0 to 999999 Data columns (total 3 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 column_1 1000000 non-null object 1 column_2 1000000 non-null object 2 column_3 1000000 non-null object dtypes: object(3) memory usage: 165.9 MB""" ), see_also_sub=( """ DataFrame.describe: Generate descriptive statistics of DataFrame columns. DataFrame.memory_usage: Memory usage of DataFrame columns.""" ), ) @doc(DataFrameInfo.info) def info( self, verbose: Optional[bool] = None, buf: Optional[IO[str]] = None, max_cols: Optional[int] = None, memory_usage: Optional[Union[bool, str]] = None, null_counts: Optional[bool] = None, ) -> None: return DataFrameInfo( self, verbose, buf, max_cols, memory_usage, null_counts ).info() def memory_usage(self, index=True, deep=False) -> Series: """ Return the memory usage of each column in bytes. The memory usage can optionally include the contribution of the index and elements of `object` dtype. This value is displayed in `DataFrame.info` by default. This can be suppressed by setting ``pandas.options.display.memory_usage`` to False. Parameters ---------- index : bool, default True Specifies whether to include the memory usage of the DataFrame's index in returned Series. If ``index=True``, the memory usage of the index is the first item in the output. deep : bool, default False If True, introspect the data deeply by interrogating `object` dtypes for system-level memory consumption, and include it in the returned values. Returns ------- Series A Series whose index is the original column names and whose values is the memory usage of each column in bytes. See Also -------- numpy.ndarray.nbytes : Total bytes consumed by the elements of an ndarray. Series.memory_usage : Bytes consumed by a Series. Categorical : Memory-efficient array for string values with many repeated values. DataFrame.info : Concise summary of a DataFrame. Examples -------- >>> dtypes = ['int64', 'float64', 'complex128', 'object', 'bool'] >>> data = dict([(t, np.ones(shape=5000).astype(t)) ... for t in dtypes]) >>> df = pd.DataFrame(data) >>> df.head() int64 float64 complex128 object bool 0 1 1.0 1.000000+0.000000j 1 True 1 1 1.0 1.000000+0.000000j 1 True 2 1 1.0 1.000000+0.000000j 1 True 3 1 1.0 1.000000+0.000000j 1 True 4 1 1.0 1.000000+0.000000j 1 True >>> df.memory_usage() Index 128 int64 40000 float64 40000 complex128 80000 object 40000 bool 5000 dtype: int64 >>> df.memory_usage(index=False) int64 40000 float64 40000 complex128 80000 object 40000 bool 5000 dtype: int64 The memory footprint of `object` dtype columns is ignored by default: >>> df.memory_usage(deep=True) Index 128 int64 40000 float64 40000 complex128 80000 object 160000 bool 5000 dtype: int64 Use a Categorical for efficient storage of an object-dtype column with many repeated values. >>> df['object'].astype('category').memory_usage(deep=True) 5216 """ result = self._constructor_sliced( [c.memory_usage(index=False, deep=deep) for col, c in self.items()], index=self.columns, ) if index: result = self._constructor_sliced( self.index.memory_usage(deep=deep), index=["Index"] ).append(result) return result def transpose(self, *args, copy: bool = False) -> DataFrame: """ Transpose index and columns. Reflect the DataFrame over its main diagonal by writing rows as columns and vice-versa. The property :attr:`.T` is an accessor to the method :meth:`transpose`. Parameters ---------- *args : tuple, optional Accepted for compatibility with NumPy. copy : bool, default False Whether to copy the data after transposing, even for DataFrames with a single dtype. Note that a copy is always required for mixed dtype DataFrames, or for DataFrames with any extension types. Returns ------- DataFrame The transposed DataFrame. See Also -------- numpy.transpose : Permute the dimensions of a given array. Notes ----- Transposing a DataFrame with mixed dtypes will result in a homogeneous DataFrame with the `object` dtype. In such a case, a copy of the data is always made. Examples -------- **Square DataFrame with homogeneous dtype** >>> d1 = {'col1': [1, 2], 'col2': [3, 4]} >>> df1 = pd.DataFrame(data=d1) >>> df1 col1 col2 0 1 3 1 2 4 >>> df1_transposed = df1.T # or df1.transpose() >>> df1_transposed 0 1 col1 1 2 col2 3 4 When the dtype is homogeneous in the original DataFrame, we get a transposed DataFrame with the same dtype: >>> df1.dtypes col1 int64 col2 int64 dtype: object >>> df1_transposed.dtypes 0 int64 1 int64 dtype: object **Non-square DataFrame with mixed dtypes** >>> d2 = {'name': ['Alice', 'Bob'], ... 'score': [9.5, 8], ... 'employed': [False, True], ... 'kids': [0, 0]} >>> df2 = pd.DataFrame(data=d2) >>> df2 name score employed kids 0 Alice 9.5 False 0 1 Bob 8.0 True 0 >>> df2_transposed = df2.T # or df2.transpose() >>> df2_transposed 0 1 name Alice Bob score 9.5 8 employed False True kids 0 0 When the DataFrame has mixed dtypes, we get a transposed DataFrame with the `object` dtype: >>> df2.dtypes name object score float64 employed bool kids int64 dtype: object >>> df2_transposed.dtypes 0 object 1 object dtype: object """ nv.validate_transpose(args, dict()) # construct the args dtypes = list(self.dtypes) if self._is_homogeneous_type and dtypes and is_extension_array_dtype(dtypes[0]): # We have EAs with the same dtype. We can preserve that dtype in transpose. dtype = dtypes[0] arr_type = dtype.construct_array_type() values = self.values new_values = [arr_type._from_sequence(row, dtype=dtype) for row in values] result = self._constructor( dict(zip(self.index, new_values)), index=self.columns ) else: new_values = self.values.T if copy: new_values = new_values.copy() result = self._constructor( new_values, index=self.columns, columns=self.index ) return result.__finalize__(self, method="transpose") @property def T(self) -> DataFrame: return self.transpose() # ---------------------------------------------------------------------- # Indexing Methods def _ixs(self, i: int, axis: int = 0): """ Parameters ---------- i : int axis : int Notes ----- If slice passed, the resulting data will be a view. """ # irow if axis == 0: new_values = self._mgr.fast_xs(i) # if we are a copy, mark as such copy = isinstance(new_values, np.ndarray) and new_values.base is None result = self._constructor_sliced( new_values, index=self.columns, name=self.index[i], dtype=new_values.dtype, ) result._set_is_copy(self, copy=copy) return result # icol else: label = self.columns[i] values = self._mgr.iget(i) result = self._box_col_values(values, i) # this is a cached value, mark it so result._set_as_cached(label, self) return result def _get_column_array(self, i: int) -> ArrayLike: """ Get the values of the i'th column (ndarray or ExtensionArray, as stored in the Block) """ return self._data.iget_values(i) def _iter_column_arrays(self) -> Iterator[ArrayLike]: """ Iterate over the arrays of all columns in order. This returns the values as stored in the Block (ndarray or ExtensionArray). """ for i in range(len(self.columns)): yield self._get_column_array(i) def __getitem__(self, key): key = lib.item_from_zerodim(key) key = com.apply_if_callable(key, self) if is_hashable(key): # shortcut if the key is in columns if self.columns.is_unique and key in self.columns: if self.columns.nlevels > 1: return self._getitem_multilevel(key) return self._get_item_cache(key) # Do we have a slicer (on rows)? indexer = convert_to_index_sliceable(self, key) if indexer is not None: # either we have a slice or we have a string that can be converted # to a slice for partial-string date indexing return self._slice(indexer, axis=0) # Do we have a (boolean) DataFrame? if isinstance(key, DataFrame): return self.where(key) # Do we have a (boolean) 1d indexer? if com.is_bool_indexer(key): return self._getitem_bool_array(key) # We are left with two options: a single key, and a collection of keys, # We interpret tuples as collections only for non-MultiIndex is_single_key = isinstance(key, tuple) or not is_list_like(key) if is_single_key: if self.columns.nlevels > 1: return self._getitem_multilevel(key) indexer = self.columns.get_loc(key) if is_integer(indexer): indexer = [indexer] else: if is_iterator(key): key = list(key) indexer = self.loc._get_listlike_indexer(key, axis=1, raise_missing=True)[1] # take() does not accept boolean indexers if getattr(indexer, "dtype", None) == bool: indexer = np.where(indexer)[0] data = self._take_with_is_copy(indexer, axis=1) if is_single_key: # What does looking for a single key in a non-unique index return? # The behavior is inconsistent. It returns a Series, except when # - the key itself is repeated (test on data.shape, #9519), or # - we have a MultiIndex on columns (test on self.columns, #21309) if data.shape[1] == 1 and not isinstance(self.columns, MultiIndex): data = data[key] return data def _getitem_bool_array(self, key): # also raises Exception if object array with NA values # warning here just in case -- previously __setitem__ was # reindexing but __getitem__ was not; it seems more reasonable to # go with the __setitem__ behavior since that is more consistent # with all other indexing behavior if isinstance(key, Series) and not key.index.equals(self.index): warnings.warn( "Boolean Series key will be reindexed to match DataFrame index.", UserWarning, stacklevel=3, ) elif len(key) != len(self.index): raise ValueError( f"Item wrong length {len(key)} instead of {len(self.index)}." ) # check_bool_indexer will throw exception if Series key cannot # be reindexed to match DataFrame rows key = check_bool_indexer(self.index, key) indexer = key.nonzero()[0] return self._take_with_is_copy(indexer, axis=0) def _getitem_multilevel(self, key): # self.columns is a MultiIndex loc = self.columns.get_loc(key) if isinstance(loc, (slice, np.ndarray)): new_columns = self.columns[loc] result_columns = maybe_droplevels(new_columns, key) if self._is_mixed_type: result = self.reindex(columns=new_columns) result.columns = result_columns else: new_values = self.values[:, loc] result = self._constructor( new_values, index=self.index, columns=result_columns ) result = result.__finalize__(self) # If there is only one column being returned, and its name is # either an empty string, or a tuple with an empty string as its # first element, then treat the empty string as a placeholder # and return the column as if the user had provided that empty # string in the key. If the result is a Series, exclude the # implied empty string from its name. if len(result.columns) == 1: top = result.columns[0] if isinstance(top, tuple): top = top[0] if top == "": result = result[""] if isinstance(result, Series): result = self._constructor_sliced( result, index=self.index, name=key ) result._set_is_copy(self) return result else: # loc is neither a slice nor ndarray, so must be an int return self._ixs(loc, axis=1) def _get_value(self, index, col, takeable: bool = False): """ Quickly retrieve single value at passed column and index. Parameters ---------- index : row label col : column label takeable : interpret the index/col as indexers, default False Returns ------- scalar """ if takeable: series = self._ixs(col, axis=1) return series._values[index] series = self._get_item_cache(col) engine = self.index._engine try: loc = engine.get_loc(index) return series._values[loc] except KeyError: # GH 20629 if self.index.nlevels > 1: # partial indexing forbidden raise # we cannot handle direct indexing # use positional col = self.columns.get_loc(col) index = self.index.get_loc(index) return self._get_value(index, col, takeable=True) def __setitem__(self, key, value): key = com.apply_if_callable(key, self) # see if we can slice the rows indexer = convert_to_index_sliceable(self, key) if indexer is not None: # either we have a slice or we have a string that can be converted # to a slice for partial-string date indexing return self._setitem_slice(indexer, value) if isinstance(key, DataFrame) or getattr(key, "ndim", None) == 2: self._setitem_frame(key, value) elif isinstance(key, (Series, np.ndarray, list, Index)): self._setitem_array(key, value) else: # set column self._set_item(key, value) def _setitem_slice(self, key: slice, value): # NB: we can't just use self.loc[key] = value because that # operates on labels and we need to operate positional for # backwards-compat, xref GH#31469 self._check_setitem_copy() self.iloc._setitem_with_indexer(key, value) def _setitem_array(self, key, value): # also raises Exception if object array with NA values if com.is_bool_indexer(key): if len(key) != len(self.index): raise ValueError( f"Item wrong length {len(key)} instead of {len(self.index)}!" ) key = check_bool_indexer(self.index, key) indexer = key.nonzero()[0] self._check_setitem_copy() self.iloc._setitem_with_indexer(indexer, value) else: if isinstance(value, DataFrame): if len(value.columns) != len(key): raise ValueError("Columns must be same length as key") for k1, k2 in zip(key, value.columns): self[k1] = value[k2] else: self.loc._ensure_listlike_indexer(key, axis=1) indexer = self.loc._get_listlike_indexer( key, axis=1, raise_missing=False )[1] self._check_setitem_copy() self.iloc._setitem_with_indexer((slice(None), indexer), value) def _setitem_frame(self, key, value): # support boolean setting with DataFrame input, e.g. # df[df > df2] = 0 if isinstance(key, np.ndarray): if key.shape != self.shape: raise ValueError("Array conditional must be same shape as self") key = self._constructor(key, **self._construct_axes_dict()) if key.size and not is_bool_dtype(key.values): raise TypeError( "Must pass DataFrame or 2-d ndarray with boolean values only" ) self._check_inplace_setting(value) self._check_setitem_copy() self._where(-key, value, inplace=True) def _iset_item(self, loc: int, value): self._ensure_valid_index(value) # technically _sanitize_column expects a label, not a position, # but the behavior is the same as long as we pass broadcast=False value = self._sanitize_column(loc, value, broadcast=False) NDFrame._iset_item(self, loc, value) # check if we are modifying a copy # try to set first as we want an invalid # value exception to occur first if len(self): self._check_setitem_copy() def _set_item(self, key, value): """ Add series to DataFrame in specified column. If series is a numpy-array (not a Series/TimeSeries), it must be the same length as the DataFrames index or an error will be thrown. Series/TimeSeries will be conformed to the DataFrames index to ensure homogeneity. """ self._ensure_valid_index(value) value = self._sanitize_column(key, value) NDFrame._set_item(self, key, value) # check if we are modifying a copy # try to set first as we want an invalid # value exception to occur first if len(self): self._check_setitem_copy() def _set_value(self, index, col, value, takeable: bool = False): """ Put single value at passed column and index. Parameters ---------- index : row label col : column label value : scalar takeable : interpret the index/col as indexers, default False """ try: if takeable is True: series = self._ixs(col, axis=1) series._set_value(index, value, takeable=True) return series = self._get_item_cache(col) engine = self.index._engine loc = engine.get_loc(index) validate_numeric_casting(series.dtype, value) series._values[loc] = value # Note: trying to use series._set_value breaks tests in # tests.frame.indexing.test_indexing and tests.indexing.test_partial except (KeyError, TypeError): # set using a non-recursive method & reset the cache if takeable: self.iloc[index, col] = value else: self.loc[index, col] = value self._item_cache.pop(col, None) def _ensure_valid_index(self, value): """ Ensure that if we don't have an index, that we can create one from the passed value. """ # GH5632, make sure that we are a Series convertible if not len(self.index) and is_list_like(value) and len(value): try: value = Series(value) except (ValueError, NotImplementedError, TypeError) as err: raise ValueError( "Cannot set a frame with no defined index " "and a value that cannot be converted to a Series" ) from err # GH31368 preserve name of index index_copy = value.index.copy() if self.index.name is not None: index_copy.name = self.index.name self._mgr = self._mgr.reindex_axis(index_copy, axis=1, fill_value=np.nan) def _box_col_values(self, values, loc: int) -> Series: """ Provide boxed values for a column. """ # Lookup in columns so that if e.g. a str datetime was passed # we attach the Timestamp object as the name. name = self.columns[loc] klass = self._constructor_sliced return klass(values, index=self.index, name=name, fastpath=True) # ---------------------------------------------------------------------- # Unsorted def query(self, expr, inplace=False, **kwargs): """ Query the columns of a DataFrame with a boolean expression. Parameters ---------- expr : str The query string to evaluate. You can refer to variables in the environment by prefixing them with an '@' character like ``@a + b``. You can refer to column names that are not valid Python variable names by surrounding them in backticks. Thus, column names containing spaces or punctuations (besides underscores) or starting with digits must be surrounded by backticks. (For example, a column named "Area (cm^2) would be referenced as `Area (cm^2)`). Column names which are Python keywords (like "list", "for", "import", etc) cannot be used. For example, if one of your columns is called ``a a`` and you want to sum it with ``b``, your query should be ```a a` + b``. .. versionadded:: 0.25.0 Backtick quoting introduced. .. versionadded:: 1.0.0 Expanding functionality of backtick quoting for more than only spaces. inplace : bool Whether the query should modify the data in place or return a modified copy. **kwargs See the documentation for :func:`eval` for complete details on the keyword arguments accepted by :meth:`DataFrame.query`. Returns ------- DataFrame DataFrame resulting from the provided query expression. See Also -------- eval : Evaluate a string describing operations on DataFrame columns. DataFrame.eval : Evaluate a string describing operations on DataFrame columns. Notes ----- The result of the evaluation of this expression is first passed to :attr:`DataFrame.loc` and if that fails because of a multidimensional key (e.g., a DataFrame) then the result will be passed to :meth:`DataFrame.__getitem__`. This method uses the top-level :func:`eval` function to evaluate the passed query. The :meth:`~pandas.DataFrame.query` method uses a slightly modified Python syntax by default. For example, the ``&`` and ``|`` (bitwise) operators have the precedence of their boolean cousins, :keyword:`and` and :keyword:`or`. This *is* syntactically valid Python, however the semantics are different. You can change the semantics of the expression by passing the keyword argument ``parser='python'``. This enforces the same semantics as evaluation in Python space. Likewise, you can pass ``engine='python'`` to evaluate an expression using Python itself as a backend. This is not recommended as it is inefficient compared to using ``numexpr`` as the engine. The :attr:`DataFrame.index` and :attr:`DataFrame.columns` attributes of the :class:`~pandas.DataFrame` instance are placed in the query namespace by default, which allows you to treat both the index and columns of the frame as a column in the frame. The identifier ``index`` is used for the frame index; you can also use the name of the index to identify it in a query. Please note that Python keywords may not be used as identifiers. For further details and examples see the ``query`` documentation in :ref:`indexing <indexing.query>`. *Backtick quoted variables* Backtick quoted variables are parsed as literal Python code and are converted internally to a Python valid identifier. This can lead to the following problems. During parsing a number of disallowed characters inside the backtick quoted string are replaced by strings that are allowed as a Python identifier. These characters include all operators in Python, the space character, the question mark, the exclamation mark, the dollar sign, and the euro sign. For other characters that fall outside the ASCII range (U+0001..U+007F) and those that are not further specified in PEP 3131, the query parser will raise an error. This excludes whitespace different than the space character, but also the hashtag (as it is used for comments) and the backtick itself (backtick can also not be escaped). In a special case, quotes that make a pair around a backtick can confuse the parser. For example, ```it's` > `that's``` will raise an error, as it forms a quoted string (``'s > `that'``) with a backtick inside. See also the Python documentation about lexical analysis (https://docs.python.org/3/reference/lexical_analysis.html) in combination with the source code in :mod:`pandas.core.computation.parsing`. Examples -------- >>> df = pd.DataFrame({'A': range(1, 6), ... 'B': range(10, 0, -2), ... 'C C': range(10, 5, -1)}) >>> df A B C C 0 1 10 10 1 2 8 9 2 3 6 8 3 4 4 7 4 5 2 6 >>> df.query('A > B') A B C C 4 5 2 6 The previous expression is equivalent to >>> df[df.A > df.B] A B C C 4 5 2 6 For columns with spaces in their name, you can use backtick quoting. >>> df.query('B == `C C`') A B C C 0 1 10 10 The previous expression is equivalent to >>> df[df.B == df['C C']] A B C C 0 1 10 10 """ inplace = validate_bool_kwarg(inplace, "inplace") if not isinstance(expr, str): msg = f"expr must be a string to be evaluated, {type(expr)} given" raise ValueError(msg) kwargs["level"] = kwargs.pop("level", 0) + 1 kwargs["target"] = None res = self.eval(expr, **kwargs) try: result = self.loc[res] except ValueError: # when res is multi-dimensional loc raises, but this is sometimes a # valid query result = self[res] if inplace: self._update_inplace(result) else: return result def eval(self, expr, inplace=False, **kwargs): """ Evaluate a string describing operations on DataFrame columns. Operates on columns only, not specific rows or elements. This allows `eval` to run arbitrary code, which can make you vulnerable to code injection if you pass user input to this function. Parameters ---------- expr : str The expression string to evaluate. inplace : bool, default False If the expression contains an assignment, whether to perform the operation inplace and mutate the existing DataFrame. Otherwise, a new DataFrame is returned. **kwargs See the documentation for :func:`eval` for complete details on the keyword arguments accepted by :meth:`~pandas.DataFrame.query`. Returns ------- ndarray, scalar, or pandas object The result of the evaluation. See Also -------- DataFrame.query : Evaluates a boolean expression to query the columns of a frame. DataFrame.assign : Can evaluate an expression or function to create new values for a column. eval : Evaluate a Python expression as a string using various backends. Notes ----- For more details see the API documentation for :func:`~eval`. For detailed examples see :ref:`enhancing performance with eval <enhancingperf.eval>`. Examples -------- >>> df = pd.DataFrame({'A': range(1, 6), 'B': range(10, 0, -2)}) >>> df A B 0 1 10 1 2 8 2 3 6 3 4 4 4 5 2 >>> df.eval('A + B') 0 11 1 10 2 9 3 8 4 7 dtype: int64 Assignment is allowed though by default the original DataFrame is not modified. >>> df.eval('C = A + B') A B C 0 1 10 11 1 2 8 10 2 3 6 9 3 4 4 8 4 5 2 7 >>> df A B 0 1 10 1 2 8 2 3 6 3 4 4 4 5 2 Use ``inplace=True`` to modify the original DataFrame. >>> df.eval('C = A + B', inplace=True) >>> df A B C 0 1 10 11 1 2 8 10 2 3 6 9 3 4 4 8 4 5 2 7 Multiple columns can be assigned to using multi-line expressions: >>> df.eval( ... ''' ... C = A + B ... D = A - B ... ''' ... ) A B C D 0 1 10 11 -9 1 2 8 10 -6 2 3 6 9 -3 3 4 4 8 0 4 5 2 7 3 """ from pandas.core.computation.eval import eval as _eval inplace = validate_bool_kwarg(inplace, "inplace") resolvers = kwargs.pop("resolvers", None) kwargs["level"] = kwargs.pop("level", 0) + 1 if resolvers is None: index_resolvers = self._get_index_resolvers() column_resolvers = self._get_cleaned_column_resolvers() resolvers = column_resolvers, index_resolvers if "target" not in kwargs: kwargs["target"] = self kwargs["resolvers"] = kwargs.get("resolvers", ()) + tuple(resolvers) return _eval(expr, inplace=inplace, **kwargs) def select_dtypes(self, include=None, exclude=None) -> DataFrame: """ Return a subset of the DataFrame's columns based on the column dtypes. Parameters ---------- include, exclude : scalar or list-like A selection of dtypes or strings to be included/excluded. At least one of these parameters must be supplied. Returns ------- DataFrame The subset of the frame including the dtypes in ``include`` and excluding the dtypes in ``exclude``. Raises ------ ValueError * If both of ``include`` and ``exclude`` are empty * If ``include`` and ``exclude`` have overlapping elements * If any kind of string dtype is passed in. See Also -------- DataFrame.dtypes: Return Series with the data type of each column. Notes ----- * To select all *numeric* types, use ``np.number`` or ``'number'`` * To select strings you must use the ``object`` dtype, but note that this will return *all* object dtype columns * See the `numpy dtype hierarchy <https://numpy.org/doc/stable/reference/arrays.scalars.html>`__ * To select datetimes, use ``np.datetime64``, ``'datetime'`` or ``'datetime64'`` * To select timedeltas, use ``np.timedelta64``, ``'timedelta'`` or ``'timedelta64'`` * To select Pandas categorical dtypes, use ``'category'`` * To select Pandas datetimetz dtypes, use ``'datetimetz'`` (new in 0.20.0) or ``'datetime64[ns, tz]'`` Examples -------- >>> df = pd.DataFrame({'a': [1, 2] * 3, ... 'b': [True, False] * 3, ... 'c': [1.0, 2.0] * 3}) >>> df a b c 0 1 True 1.0 1 2 False 2.0 2 1 True 1.0 3 2 False 2.0 4 1 True 1.0 5 2 False 2.0 >>> df.select_dtypes(include='bool') b 0 True 1 False 2 True 3 False 4 True 5 False >>> df.select_dtypes(include=['float64']) c 0 1.0 1 2.0 2 1.0 3 2.0 4 1.0 5 2.0 >>> df.select_dtypes(exclude=['int64']) b c 0 True 1.0 1 False 2.0 2 True 1.0 3 False 2.0 4 True 1.0 5 False 2.0 """ if not is_list_like(include): include = (include,) if include is not None else () if not is_list_like(exclude): exclude = (exclude,) if exclude is not None else () selection = (frozenset(include), frozenset(exclude)) if not any(selection): raise ValueError("at least one of include or exclude must be nonempty") # convert the myriad valid dtypes object to a single representation include = frozenset(infer_dtype_from_object(x) for x in include) exclude = frozenset(infer_dtype_from_object(x) for x in exclude) for dtypes in (include, exclude): invalidate_string_dtypes(dtypes) # can't both include AND exclude! if not include.isdisjoint(exclude): raise ValueError(f"include and exclude overlap on {(include & exclude)}") # We raise when both include and exclude are empty # Hence, we can just shrink the columns we want to keep keep_these = np.full(self.shape[1], True) def extract_unique_dtypes_from_dtypes_set( dtypes_set: FrozenSet[Dtype], unique_dtypes: np.ndarray ) -> List[Dtype]: extracted_dtypes = [ unique_dtype for unique_dtype in unique_dtypes # error: Argument 1 to "tuple" has incompatible type # "FrozenSet[Union[ExtensionDtype, str, Any, Type[str], # Type[float], Type[int], Type[complex], Type[bool]]]"; # expected "Iterable[Union[type, Tuple[Any, ...]]]" if issubclass( unique_dtype.type, tuple(dtypes_set) # type: ignore[arg-type] ) ] return extracted_dtypes unique_dtypes = self.dtypes.unique() if include: included_dtypes = extract_unique_dtypes_from_dtypes_set( include, unique_dtypes ) keep_these &= self.dtypes.isin(included_dtypes) if exclude: excluded_dtypes = extract_unique_dtypes_from_dtypes_set( exclude, unique_dtypes ) keep_these &= ~self.dtypes.isin(excluded_dtypes) return self.iloc[:, keep_these.values] def insert(self, loc, column, value, allow_duplicates=False) -> None: """ Insert column into DataFrame at specified location. Raises a ValueError if `column` is already contained in the DataFrame, unless `allow_duplicates` is set to True. Parameters ---------- loc : int Insertion index. Must verify 0 <= loc <= len(columns). column : str, number, or hashable object Label of the inserted column. value : int, Series, or array-like allow_duplicates : bool, optional """ if allow_duplicates and not self.flags.allows_duplicate_labels: raise ValueError( "Cannot specify 'allow_duplicates=True' when " "'self.flags.allows_duplicate_labels' is False." ) self._ensure_valid_index(value) value = self._sanitize_column(column, value, broadcast=False) self._mgr.insert(loc, column, value, allow_duplicates=allow_duplicates) def assign(self, **kwargs) -> DataFrame: r""" Assign new columns to a DataFrame. Returns a new object with all original columns in addition to new ones. Existing columns that are re-assigned will be overwritten. Parameters ---------- **kwargs : dict of {str: callable or Series} The column names are keywords. If the values are callable, they are computed on the DataFrame and assigned to the new columns. The callable must not change input DataFrame (though pandas doesn't check it). If the values are not callable, (e.g. a Series, scalar, or array), they are simply assigned. Returns ------- DataFrame A new DataFrame with the new columns in addition to all the existing columns. Notes ----- Assigning multiple columns within the same ``assign`` is possible. Later items in '\*\*kwargs' may refer to newly created or modified columns in 'df'; items are computed and assigned into 'df' in order. Examples -------- >>> df = pd.DataFrame({'temp_c': [17.0, 25.0]}, ... index=['Portland', 'Berkeley']) >>> df temp_c Portland 17.0 Berkeley 25.0 Where the value is a callable, evaluated on `df`: >>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 Alternatively, the same behavior can be achieved by directly referencing an existing Series or sequence: >>> df.assign(temp_f=df['temp_c'] * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 You can create multiple columns within the same assign where one of the columns depends on another one defined within the same assign: >>> df.assign(temp_f=lambda x: x['temp_c'] * 9 / 5 + 32, ... temp_k=lambda x: (x['temp_f'] + 459.67) * 5 / 9) temp_c temp_f temp_k Portland 17.0 62.6 290.15 Berkeley 25.0 77.0 298.15 """ data = self.copy() for k, v in kwargs.items(): data[k] = com.apply_if_callable(v, data) return data def _sanitize_column(self, key, value, broadcast=True): """ Ensures new columns (which go into the BlockManager as new blocks) are always copied and converted into an array. Parameters ---------- key : object value : scalar, Series, or array-like broadcast : bool, default True If ``key`` matches multiple duplicate column names in the DataFrame, this parameter indicates whether ``value`` should be tiled so that the returned array contains a (duplicated) column for each occurrence of the key. If False, ``value`` will not be tiled. Returns ------- numpy.ndarray """ def reindexer(value): # reindex if necessary if value.index.equals(self.index) or not len(self.index): value = value._values.copy() else: # GH 4107 try: value = value.reindex(self.index)._values except ValueError as err: # raised in MultiIndex.from_tuples, see test_insert_error_msmgs if not value.index.is_unique: # duplicate axis raise err # other raise TypeError( "incompatible index of inserted column with frame index" ) from err return value if isinstance(value, Series): value = reindexer(value) elif isinstance(value, DataFrame): # align right-hand-side columns if self.columns # is multi-index and self[key] is a sub-frame if isinstance(self.columns, MultiIndex) and key in self.columns: loc = self.columns.get_loc(key) if isinstance(loc, (slice, Series, np.ndarray, Index)): cols = maybe_droplevels(self.columns[loc], key) if len(cols) and not cols.equals(value.columns): value = value.reindex(cols, axis=1) # now align rows value = reindexer(value).T elif isinstance(value, ExtensionArray): # Explicitly copy here, instead of in sanitize_index, # as sanitize_index won't copy an EA, even with copy=True value = value.copy() value = sanitize_index(value, self.index) elif isinstance(value, Index) or is_sequence(value): # turn me into an ndarray value = sanitize_index(value, self.index) if not isinstance(value, (np.ndarray, Index)): if isinstance(value, list) and len(value) > 0: value = maybe_convert_platform(value) else: value = com.asarray_tuplesafe(value) elif value.ndim == 2: value = value.copy().T elif isinstance(value, Index): value = value.copy(deep=True) else: value = value.copy() # possibly infer to datetimelike if is_object_dtype(value.dtype): value = maybe_infer_to_datetimelike(value) else: # cast ignores pandas dtypes. so save the dtype first infer_dtype, _ = infer_dtype_from_scalar(value, pandas_dtype=True) # upcast if is_extension_array_dtype(infer_dtype): value = construct_1d_arraylike_from_scalar( value, len(self.index), infer_dtype ) else: value = cast_scalar_to_array(len(self.index), value) value = maybe_cast_to_datetime(value, infer_dtype) # return internal types directly if is_extension_array_dtype(value): return value # broadcast across multiple columns if necessary if broadcast and key in self.columns and value.ndim == 1: if not self.columns.is_unique or isinstance(self.columns, MultiIndex): existing_piece = self[key] if isinstance(existing_piece, DataFrame): value = np.tile(value, (len(existing_piece.columns), 1)) return np.atleast_2d(np.asarray(value)) @property def _series(self): return { item: Series( self._mgr.iget(idx), index=self.index, name=item, fastpath=True ) for idx, item in enumerate(self.columns) } def lookup(self, row_labels, col_labels) -> np.ndarray: """ Label-based "fancy indexing" function for DataFrame. Given equal-length arrays of row and column labels, return an array of the values corresponding to each (row, col) pair. .. deprecated:: 1.2.0 DataFrame.lookup is deprecated, use DataFrame.melt and DataFrame.loc instead. For an example see :meth:`~pandas.DataFrame.lookup` in the user guide. Parameters ---------- row_labels : sequence The row labels to use for lookup. col_labels : sequence The column labels to use for lookup. Returns ------- numpy.ndarray The found values. """ msg = ( "The 'lookup' method is deprecated and will be" "removed in a future version." "You can use DataFrame.melt and DataFrame.loc" "as a substitute." ) warnings.warn(msg, FutureWarning, stacklevel=2) n = len(row_labels) if n != len(col_labels): raise ValueError("Row labels must have same size as column labels") if not (self.index.is_unique and self.columns.is_unique): # GH#33041 raise ValueError("DataFrame.lookup requires unique index and columns") thresh = 1000 if not self._is_mixed_type or n > thresh: values = self.values ridx = self.index.get_indexer(row_labels) cidx = self.columns.get_indexer(col_labels) if (ridx == -1).any(): raise KeyError("One or more row labels was not found") if (cidx == -1).any(): raise KeyError("One or more column labels was not found") flat_index = ridx * len(self.columns) + cidx result = values.flat[flat_index] else: result = np.empty(n, dtype="O") for i, (r, c) in enumerate(zip(row_labels, col_labels)): result[i] = self._get_value(r, c) if is_object_dtype(result): result = lib.maybe_convert_objects(result) return result # ---------------------------------------------------------------------- # Reindexing and alignment def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value, copy): frame = self columns = axes["columns"] if columns is not None: frame = frame._reindex_columns( columns, method, copy, level, fill_value, limit, tolerance ) index = axes["index"] if index is not None: frame = frame._reindex_index( index, method, copy, level, fill_value, limit, tolerance ) return frame def _reindex_index( self, new_index, method, copy, level, fill_value=np.nan, limit=None, tolerance=None, ): new_index, indexer = self.index.reindex( new_index, method=method, level=level, limit=limit, tolerance=tolerance ) return self._reindex_with_indexers( {0: [new_index, indexer]}, copy=copy, fill_value=fill_value, allow_dups=False, ) def _reindex_columns( self, new_columns, method, copy, level, fill_value=None, limit=None, tolerance=None, ): new_columns, indexer = self.columns.reindex( new_columns, method=method, level=level, limit=limit, tolerance=tolerance ) return self._reindex_with_indexers( {1: [new_columns, indexer]}, copy=copy, fill_value=fill_value, allow_dups=False, ) def _reindex_multi(self, axes, copy, fill_value) -> DataFrame: """ We are guaranteed non-Nones in the axes. """ new_index, row_indexer = self.index.reindex(axes["index"]) new_columns, col_indexer = self.columns.reindex(axes["columns"]) if row_indexer is not None and col_indexer is not None: indexer = row_indexer, col_indexer new_values = algorithms.take_2d_multi( self.values, indexer, fill_value=fill_value ) return self._constructor(new_values, index=new_index, columns=new_columns) else: return self._reindex_with_indexers( {0: [new_index, row_indexer], 1: [new_columns, col_indexer]}, copy=copy, fill_value=fill_value, ) @doc(NDFrame.align, **_shared_doc_kwargs) def align( self, other, join="outer", axis=None, level=None, copy=True, fill_value=None, method=None, limit=None, fill_axis=0, broadcast_axis=None, ) -> DataFrame: return super().align( other, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, broadcast_axis=broadcast_axis, ) @Appender( """ Examples -------- >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) Change the row labels. >>> df.set_axis(['a', 'b', 'c'], axis='index') A B a 1 4 b 2 5 c 3 6 Change the column labels. >>> df.set_axis(['I', 'II'], axis='columns') I II 0 1 4 1 2 5 2 3 6 Now, update the labels inplace. >>> df.set_axis(['i', 'ii'], axis='columns', inplace=True) >>> df i ii 0 1 4 1 2 5 2 3 6 """ ) @Substitution( **_shared_doc_kwargs, extended_summary_sub=" column or", axis_description_sub=", and 1 identifies the columns", see_also_sub=" or columns", ) @Appender(NDFrame.set_axis.__doc__) def set_axis(self, labels, axis: Axis = 0, inplace: bool = False): return super().set_axis(labels, axis=axis, inplace=inplace) @Substitution(**_shared_doc_kwargs) @Appender(NDFrame.reindex.__doc__) @rewrite_axis_style_signature( "labels", [ ("method", None), ("copy", True), ("level", None), ("fill_value", np.nan), ("limit", None), ("tolerance", None), ], ) def reindex(self, *args, **kwargs) -> DataFrame: axes = validate_axis_style_args(self, args, kwargs, "labels", "reindex") kwargs.update(axes) # Pop these, since the values are in `kwargs` under different names kwargs.pop("axis", None) kwargs.pop("labels", None) return super().reindex(**kwargs) def drop( self, labels=None, axis=0, index=None, columns=None, level=None, inplace=False, errors="raise", ): """ Drop specified labels from rows or columns. Remove rows or columns by specifying label names and corresponding axis, or by specifying directly index or column names. When using a multi-index, labels on different levels can be removed by specifying the level. Parameters ---------- labels : single label or list-like Index or column labels to drop. axis : {0 or 'index', 1 or 'columns'}, default 0 Whether to drop labels from the index (0 or 'index') or columns (1 or 'columns'). index : single label or list-like Alternative to specifying axis (``labels, axis=0`` is equivalent to ``index=labels``). columns : single label or list-like Alternative to specifying axis (``labels, axis=1`` is equivalent to ``columns=labels``). level : int or level name, optional For MultiIndex, level from which the labels will be removed. inplace : bool, default False If False, return a copy. Otherwise, do operation inplace and return None. errors : {'ignore', 'raise'}, default 'raise' If 'ignore', suppress error and only existing labels are dropped. Returns ------- DataFrame DataFrame without the removed index or column labels. Raises ------ KeyError If any of the labels is not found in the selected axis. See Also -------- DataFrame.loc : Label-location based indexer for selection by label. DataFrame.dropna : Return DataFrame with labels on given axis omitted where (all or any) data are missing. DataFrame.drop_duplicates : Return DataFrame with duplicate rows removed, optionally only considering certain columns. Series.drop : Return Series with specified index labels removed. Examples -------- >>> df = pd.DataFrame(np.arange(12).reshape(3, 4), ... columns=['A', 'B', 'C', 'D']) >>> df A B C D 0 0 1 2 3 1 4 5 6 7 2 8 9 10 11 Drop columns >>> df.drop(['B', 'C'], axis=1) A D 0 0 3 1 4 7 2 8 11 >>> df.drop(columns=['B', 'C']) A D 0 0 3 1 4 7 2 8 11 Drop a row by index >>> df.drop([0, 1]) A B C D 2 8 9 10 11 Drop columns and/or rows of MultiIndex DataFrame >>> midx = pd.MultiIndex(levels=[['lama', 'cow', 'falcon'], ... ['speed', 'weight', 'length']], ... codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2], ... [0, 1, 2, 0, 1, 2, 0, 1, 2]]) >>> df = pd.DataFrame(index=midx, columns=['big', 'small'], ... data=[[45, 30], [200, 100], [1.5, 1], [30, 20], ... [250, 150], [1.5, 0.8], [320, 250], ... [1, 0.8], [0.3, 0.2]]) >>> df big small lama speed 45.0 30.0 weight 200.0 100.0 length 1.5 1.0 cow speed 30.0 20.0 weight 250.0 150.0 length 1.5 0.8 falcon speed 320.0 250.0 weight 1.0 0.8 length 0.3 0.2 >>> df.drop(index='cow', columns='small') big lama speed 45.0 weight 200.0 length 1.5 falcon speed 320.0 weight 1.0 length 0.3 >>> df.drop(index='length', level=1) big small lama speed 45.0 30.0 weight 200.0 100.0 cow speed 30.0 20.0 weight 250.0 150.0 falcon speed 320.0 250.0 weight 1.0 0.8 """ return super().drop( labels=labels, axis=axis, index=index, columns=columns, level=level, inplace=inplace, errors=errors, ) @rewrite_axis_style_signature( "mapper", [("copy", True), ("inplace", False), ("level", None), ("errors", "ignore")], ) def rename( self, mapper: Optional[Renamer] = None, *, index: Optional[Renamer] = None, columns: Optional[Renamer] = None, axis: Optional[Axis] = None, copy: bool = True, inplace: bool = False, level: Optional[Level] = None, errors: str = "ignore", ) -> Optional[DataFrame]: """ Alter axes labels. Function / dict values must be unique (1-to-1). Labels not contained in a dict / Series will be left as-is. Extra labels listed don't throw an error. See the :ref:`user guide <basics.rename>` for more. Parameters ---------- mapper : dict-like or function Dict-like or function transformations to apply to that axis' values. Use either ``mapper`` and ``axis`` to specify the axis to target with ``mapper``, or ``index`` and ``columns``. index : dict-like or function Alternative to specifying axis (``mapper, axis=0`` is equivalent to ``index=mapper``). columns : dict-like or function Alternative to specifying axis (``mapper, axis=1`` is equivalent to ``columns=mapper``). axis : {0 or 'index', 1 or 'columns'}, default 0 Axis to target with ``mapper``. Can be either the axis name ('index', 'columns') or number (0, 1). The default is 'index'. copy : bool, default True Also copy underlying data. inplace : bool, default False Whether to return a new DataFrame. If True then value of copy is ignored. level : int or level name, default None In case of a MultiIndex, only rename labels in the specified level. errors : {'ignore', 'raise'}, default 'ignore' If 'raise', raise a `KeyError` when a dict-like `mapper`, `index`, or `columns` contains labels that are not present in the Index being transformed. If 'ignore', existing keys will be renamed and extra keys will be ignored. Returns ------- DataFrame DataFrame with the renamed axis labels. Raises ------ KeyError If any of the labels is not found in the selected axis and "errors='raise'". See Also -------- DataFrame.rename_axis : Set the name of the axis. Examples -------- ``DataFrame.rename`` supports two calling conventions * ``(index=index_mapper, columns=columns_mapper, ...)`` * ``(mapper, axis={'index', 'columns'}, ...)`` We *highly* recommend using keyword arguments to clarify your intent. Rename columns using a mapping: >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) >>> df.rename(columns={"A": "a", "B": "c"}) a c 0 1 4 1 2 5 2 3 6 Rename index using a mapping: >>> df.rename(index={0: "x", 1: "y", 2: "z"}) A B x 1 4 y 2 5 z 3 6 Cast index labels to a different type: >>> df.index RangeIndex(start=0, stop=3, step=1) >>> df.rename(index=str).index Index(['0', '1', '2'], dtype='object') >>> df.rename(columns={"A": "a", "B": "b", "C": "c"}, errors="raise") Traceback (most recent call last): KeyError: ['C'] not found in axis Using axis-style parameters >>> df.rename(str.lower, axis='columns') a b 0 1 4 1 2 5 2 3 6 >>> df.rename({1: 2, 2: 4}, axis='index') A B 0 1 4 2 2 5 4 3 6 """ return super().rename( mapper=mapper, index=index, columns=columns, axis=axis, copy=copy, inplace=inplace, level=level, errors=errors, ) @doc(NDFrame.fillna, **_shared_doc_kwargs) def fillna( self, value=None, method=None, axis=None, inplace=False, limit=None, downcast=None, ) -> Optional[DataFrame]: return super().fillna( value=value, method=method, axis=axis, inplace=inplace, limit=limit, downcast=downcast, ) def pop(self, item: Label) -> Series: """ Return item and drop from frame. Raise KeyError if not found. Parameters ---------- item : label Label of column to be popped. Returns ------- Series Examples -------- >>> df = pd.DataFrame([('falcon', 'bird', 389.0), ... ('parrot', 'bird', 24.0), ... ('lion', 'mammal', 80.5), ... ('monkey', 'mammal', np.nan)], ... columns=('name', 'class', 'max_speed')) >>> df name class max_speed 0 falcon bird 389.0 1 parrot bird 24.0 2 lion mammal 80.5 3 monkey mammal NaN >>> df.pop('class') 0 bird 1 bird 2 mammal 3 mammal Name: class, dtype: object >>> df name max_speed 0 falcon 389.0 1 parrot 24.0 2 lion 80.5 3 monkey NaN """ return super().pop(item=item) @doc(NDFrame.replace, **_shared_doc_kwargs) def replace( self, to_replace=None, value=None, inplace=False, limit=None, regex=False, method="pad", ): return super().replace( to_replace=to_replace, value=value, inplace=inplace, limit=limit, regex=regex, method=method, ) def _replace_columnwise( self, mapping: Dict[Label, Tuple[Any, Any]], inplace: bool, regex ): """ Dispatch to Series.replace column-wise. Parameters ---------- mapping : dict of the form {col: (target, value)} inplace : bool regex : bool or same types as `to_replace` in DataFrame.replace Returns ------- DataFrame or None """ # Operate column-wise res = self if inplace else self.copy() ax = self.columns for i in range(len(ax)): if ax[i] in mapping: ser = self.iloc[:, i] target, value = mapping[ax[i]] newobj = ser.replace(target, value, regex=regex) res.iloc[:, i] = newobj if inplace: return return res.__finalize__(self) @doc(NDFrame.shift, klass=_shared_doc_kwargs["klass"]) def shift(self, periods=1, freq=None, axis=0, fill_value=None) -> DataFrame: return super().shift( periods=periods, freq=freq, axis=axis, fill_value=fill_value ) def set_index( self, keys, drop=True, append=False, inplace=False, verify_integrity=False ): """ Set the DataFrame index using existing columns. Set the DataFrame index (row labels) using one or more existing columns or arrays (of the correct length). The index can replace the existing index or expand on it. Parameters ---------- keys : label or array-like or list of labels/arrays This parameter can be either a single column key, a single array of the same length as the calling DataFrame, or a list containing an arbitrary combination of column keys and arrays. Here, "array" encompasses :class:`Series`, :class:`Index`, ``np.ndarray``, and instances of :class:`~collections.abc.Iterator`. drop : bool, default True Delete columns to be used as the new index. append : bool, default False Whether to append columns to existing index. inplace : bool, default False Modify the DataFrame in place (do not create a new object). verify_integrity : bool, default False Check the new index for duplicates. Otherwise defer the check until necessary. Setting to False will improve the performance of this method. Returns ------- DataFrame Changed row labels. See Also -------- DataFrame.reset_index : Opposite of set_index. DataFrame.reindex : Change to new indices or expand indices. DataFrame.reindex_like : Change to same indices as other DataFrame. Examples -------- >>> df = pd.DataFrame({'month': [1, 4, 7, 10], ... 'year': [2012, 2014, 2013, 2014], ... 'sale': [55, 40, 84, 31]}) >>> df month year sale 0 1 2012 55 1 4 2014 40 2 7 2013 84 3 10 2014 31 Set the index to become the 'month' column: >>> df.set_index('month') year sale month 1 2012 55 4 2014 40 7 2013 84 10 2014 31 Create a MultiIndex using columns 'year' and 'month': >>> df.set_index(['year', 'month']) sale year month 2012 1 55 2014 4 40 2013 7 84 2014 10 31 Create a MultiIndex using an Index and a column: >>> df.set_index([pd.Index([1, 2, 3, 4]), 'year']) month sale year 1 2012 1 55 2 2014 4 40 3 2013 7 84 4 2014 10 31 Create a MultiIndex using two Series: >>> s = pd.Series([1, 2, 3, 4]) >>> df.set_index([s, s**2]) month year sale 1 1 1 2012 55 2 4 4 2014 40 3 9 7 2013 84 4 16 10 2014 31 """ inplace = validate_bool_kwarg(inplace, "inplace") self._check_inplace_and_allows_duplicate_labels(inplace) if not isinstance(keys, list): keys = [keys] err_msg = ( 'The parameter "keys" may be a column key, one-dimensional ' "array, or a list containing only valid column keys and " "one-dimensional arrays." ) missing: List[Label] = [] for col in keys: if isinstance(col, (Index, Series, np.ndarray, list, abc.Iterator)): # arrays are fine as long as they are one-dimensional # iterators get converted to list below if getattr(col, "ndim", 1) != 1: raise ValueError(err_msg) else: # everything else gets tried as a key; see GH 24969 try: found = col in self.columns except TypeError as err: raise TypeError( f"{err_msg}. Received column of type {type(col)}" ) from err else: if not found: missing.append(col) if missing: raise KeyError(f"None of {missing} are in the columns") if inplace: frame = self else: frame = self.copy() arrays = [] names: List[Label] = [] if append: names = list(self.index.names) if isinstance(self.index, MultiIndex): for i in range(self.index.nlevels): arrays.append(self.index._get_level_values(i)) else: arrays.append(self.index) to_remove: List[Label] = [] for col in keys: if isinstance(col, MultiIndex): for n in range(col.nlevels): arrays.append(col._get_level_values(n)) names.extend(col.names) elif isinstance(col, (Index, Series)): # if Index then not MultiIndex (treated above) arrays.append(col) names.append(col.name) elif isinstance(col, (list, np.ndarray)): arrays.append(col) names.append(None) elif isinstance(col, abc.Iterator): arrays.append(list(col)) names.append(None) # from here, col can only be a column label else: arrays.append(frame[col]._values) names.append(col) if drop: to_remove.append(col) if len(arrays[-1]) != len(self): # check newest element against length of calling frame, since # ensure_index_from_sequences would not raise for append=False. raise ValueError( f"Length mismatch: Expected {len(self)} rows, " f"received array of length {len(arrays[-1])}" ) index = ensure_index_from_sequences(arrays, names) if verify_integrity and not index.is_unique: duplicates = index[index.duplicated()].unique() raise ValueError(f"Index has duplicate keys: {duplicates}") # use set to handle duplicate column names gracefully in case of drop for c in set(to_remove): del frame[c] # clear up memory usage index._cleanup() frame.index = index if not inplace: return frame def reset_index( self, level: Optional[Union[Hashable, Sequence[Hashable]]] = None, drop: bool = False, inplace: bool = False, col_level: Hashable = 0, col_fill: Label = "", ) -> Optional[DataFrame]: """ Reset the index, or a level of it. Reset the index of the DataFrame, and use the default one instead. If the DataFrame has a MultiIndex, this method can remove one or more levels. Parameters ---------- level : int, str, tuple, or list, default None Only remove the given levels from the index. Removes all levels by default. drop : bool, default False Do not try to insert index into dataframe columns. This resets the index to the default integer index. inplace : bool, default False Modify the DataFrame in place (do not create a new object). col_level : int or str, default 0 If the columns have multiple levels, determines which level the labels are inserted into. By default it is inserted into the first level. col_fill : object, default '' If the columns have multiple levels, determines how the other levels are named. If None then the index name is repeated. Returns ------- DataFrame or None DataFrame with the new index or None if ``inplace=True``. See Also -------- DataFrame.set_index : Opposite of reset_index. DataFrame.reindex : Change to new indices or expand indices. DataFrame.reindex_like : Change to same indices as other DataFrame. Examples -------- >>> df = pd.DataFrame([('bird', 389.0), ... ('bird', 24.0), ... ('mammal', 80.5), ... ('mammal', np.nan)], ... index=['falcon', 'parrot', 'lion', 'monkey'], ... columns=('class', 'max_speed')) >>> df class max_speed falcon bird 389.0 parrot bird 24.0 lion mammal 80.5 monkey mammal NaN When we reset the index, the old index is added as a column, and a new sequential index is used: >>> df.reset_index() index class max_speed 0 falcon bird 389.0 1 parrot bird 24.0 2 lion mammal 80.5 3 monkey mammal NaN We can use the `drop` parameter to avoid the old index being added as a column: >>> df.reset_index(drop=True) class max_speed 0 bird 389.0 1 bird 24.0 2 mammal 80.5 3 mammal NaN You can also use `reset_index` with `MultiIndex`. >>> index = pd.MultiIndex.from_tuples([('bird', 'falcon'), ... ('bird', 'parrot'), ... ('mammal', 'lion'), ... ('mammal', 'monkey')], ... names=['class', 'name']) >>> columns = pd.MultiIndex.from_tuples([('speed', 'max'), ... ('species', 'type')]) >>> df = pd.DataFrame([(389.0, 'fly'), ... ( 24.0, 'fly'), ... ( 80.5, 'run'), ... (np.nan, 'jump')], ... index=index, ... columns=columns) >>> df speed species max type class name bird falcon 389.0 fly parrot 24.0 fly mammal lion 80.5 run monkey NaN jump If the index has multiple levels, we can reset a subset of them: >>> df.reset_index(level='class') class speed species max type name falcon bird 389.0 fly parrot bird 24.0 fly lion mammal 80.5 run monkey mammal NaN jump If we are not dropping the index, by default, it is placed in the top level. We can place it in another level: >>> df.reset_index(level='class', col_level=1) speed species class max type name falcon bird 389.0 fly parrot bird 24.0 fly lion mammal 80.5 run monkey mammal NaN jump When the index is inserted under another level, we can specify under which one with the parameter `col_fill`: >>> df.reset_index(level='class', col_level=1, col_fill='species') species speed species class max type name falcon bird 389.0 fly parrot bird 24.0 fly lion mammal 80.5 run monkey mammal NaN jump If we specify a nonexistent level for `col_fill`, it is created: >>> df.reset_index(level='class', col_level=1, col_fill='genus') genus speed species class max type name falcon bird 389.0 fly parrot bird 24.0 fly lion mammal 80.5 run monkey mammal NaN jump """ inplace = validate_bool_kwarg(inplace, "inplace") self._check_inplace_and_allows_duplicate_labels(inplace) if inplace: new_obj = self else: new_obj = self.copy() def _maybe_casted_values(index, labels=None): values = index._values if not isinstance(index, (PeriodIndex, DatetimeIndex)): if values.dtype == np.object_: values = lib.maybe_convert_objects(values) # if we have the labels, extract the values with a mask if labels is not None: mask = labels == -1 # we can have situations where the whole mask is -1, # meaning there is nothing found in labels, so make all nan's if mask.size > 0 and mask.all(): dtype = index.dtype fill_value = na_value_for_dtype(dtype) values = construct_1d_arraylike_from_scalar( fill_value, len(mask), dtype ) else: values = values.take(labels) # TODO(https://github.com/pandas-dev/pandas/issues/24206) # Push this into maybe_upcast_putmask? # We can't pass EAs there right now. Looks a bit # complicated. # So we unbox the ndarray_values, op, re-box. values_type = type(values) values_dtype = values.dtype if issubclass(values_type, DatetimeLikeArray): values = values._data # TODO: can we de-kludge yet? if mask.any(): values, _ = maybe_upcast_putmask(values, mask, np.nan) if issubclass(values_type, DatetimeLikeArray): values = values_type(values, dtype=values_dtype) return values new_index = ibase.default_index(len(new_obj)) if level is not None: if not isinstance(level, (tuple, list)): level = [level] level = [self.index._get_level_number(lev) for lev in level] if len(level) < self.index.nlevels: new_index = self.index.droplevel(level) if not drop: to_insert: Iterable[Tuple[Any, Optional[Any]]] if isinstance(self.index, MultiIndex): names = [ (n if n is not None else f"level_{i}") for i, n in enumerate(self.index.names) ] to_insert = zip(self.index.levels, self.index.codes) else: default = "index" if "index" not in self else "level_0" names = [default] if self.index.name is None else [self.index.name] to_insert = ((self.index, None),) multi_col = isinstance(self.columns, MultiIndex) for i, (lev, lab) in reversed(list(enumerate(to_insert))): if not (level is None or i in level): continue name = names[i] if multi_col: col_name = list(name) if isinstance(name, tuple) else [name] if col_fill is None: if len(col_name) not in (1, self.columns.nlevels): raise ValueError( "col_fill=None is incompatible " f"with incomplete column name {name}" ) col_fill = col_name[0] lev_num = self.columns._get_level_number(col_level) name_lst = [col_fill] * lev_num + col_name missing = self.columns.nlevels - len(name_lst) name_lst += [col_fill] * missing name = tuple(name_lst) # to ndarray and maybe infer different dtype level_values = _maybe_casted_values(lev, lab) new_obj.insert(0, name, level_values) new_obj.index = new_index if not inplace: return new_obj return None # ---------------------------------------------------------------------- # Reindex-based selection methods @doc(NDFrame.isna, klass=_shared_doc_kwargs["klass"]) def isna(self) -> DataFrame: result = self._constructor(self._data.isna(func=isna)) return result.__finalize__(self, method="isna") @doc(NDFrame.isna, klass=_shared_doc_kwargs["klass"]) def isnull(self) -> DataFrame: return self.isna() @doc(NDFrame.notna, klass=_shared_doc_kwargs["klass"]) def notna(self) -> DataFrame: return ~self.isna() @doc(NDFrame.notna, klass=_shared_doc_kwargs["klass"]) def notnull(self) -> DataFrame: return ~self.isna() def dropna(self, axis=0, how="any", thresh=None, subset=None, inplace=False): """ Remove missing values. See the :ref:`User Guide <missing_data>` for more on which values are considered missing, and how to work with missing data. Parameters ---------- axis : {0 or 'index', 1 or 'columns'}, default 0 Determine if rows or columns which contain missing values are removed. * 0, or 'index' : Drop rows which contain missing values. * 1, or 'columns' : Drop columns which contain missing value. .. versionchanged:: 1.0.0 Pass tuple or list to drop on multiple axes. Only a single axis is allowed. how : {'any', 'all'}, default 'any' Determine if row or column is removed from DataFrame, when we have at least one NA or all NA. * 'any' : If any NA values are present, drop that row or column. * 'all' : If all values are NA, drop that row or column. thresh : int, optional Require that many non-NA values. subset : array-like, optional Labels along other axis to consider, e.g. if you are dropping rows these would be a list of columns to include. inplace : bool, default False If True, do operation inplace and return None. Returns ------- DataFrame DataFrame with NA entries dropped from it. See Also -------- DataFrame.isna: Indicate missing values. DataFrame.notna : Indicate existing (non-missing) values. DataFrame.fillna : Replace missing values. Series.dropna : Drop missing values. Index.dropna : Drop missing indices. Examples -------- >>> df = pd.DataFrame({"name": ['Alfred', 'Batman', 'Catwoman'], ... "toy": [np.nan, 'Batmobile', 'Bullwhip'], ... "born": [pd.NaT, pd.Timestamp("1940-04-25"), ... pd.NaT]}) >>> df name toy born 0 Alfred NaN NaT 1 Batman Batmobile 1940-04-25 2 Catwoman Bullwhip NaT Drop the rows where at least one element is missing. >>> df.dropna() name toy born 1 Batman Batmobile 1940-04-25 Drop the columns where at least one element is missing. >>> df.dropna(axis='columns') name 0 Alfred 1 Batman 2 Catwoman Drop the rows where all elements are missing. >>> df.dropna(how='all') name toy born 0 Alfred NaN NaT 1 Batman Batmobile 1940-04-25 2 Catwoman Bullwhip NaT Keep only the rows with at least 2 non-NA values. >>> df.dropna(thresh=2) name toy born 1 Batman Batmobile 1940-04-25 2 Catwoman Bullwhip NaT Define in which columns to look for missing values. >>> df.dropna(subset=['name', 'toy']) name toy born 1 Batman Batmobile 1940-04-25 2 Catwoman Bullwhip NaT Keep the DataFrame with valid entries in the same variable. >>> df.dropna(inplace=True) >>> df name toy born 1 Batman Batmobile 1940-04-25 """ inplace = validate_bool_kwarg(inplace, "inplace") if isinstance(axis, (tuple, list)): # GH20987 raise TypeError("supplying multiple axes to axis is no longer supported.") axis = self._get_axis_number(axis) agg_axis = 1 - axis agg_obj = self if subset is not None: ax = self._get_axis(agg_axis) indices = ax.get_indexer_for(subset) check = indices == -1 if check.any(): raise KeyError(list(np.compress(check, subset))) agg_obj = self.take(indices, axis=agg_axis) count = agg_obj.count(axis=agg_axis) if thresh is not None: mask = count >= thresh elif how == "any": mask = count == len(agg_obj._get_axis(agg_axis)) elif how == "all": mask = count > 0 else: if how is not None: raise ValueError(f"invalid how option: {how}") else: raise TypeError("must specify how or thresh") result = self.loc(axis=axis)[mask] if inplace: self._update_inplace(result) else: return result def drop_duplicates( self, subset: Optional[Union[Hashable, Sequence[Hashable]]] = None, keep: Union[str, bool] = "first", inplace: bool = False, ignore_index: bool = False, ) -> Optional[DataFrame]: """ Return DataFrame with duplicate rows removed. Considering certain columns is optional. Indexes, including time indexes are ignored. Parameters ---------- subset : column label or sequence of labels, optional Only consider certain columns for identifying duplicates, by default use all of the columns. keep : {'first', 'last', False}, default 'first' Determines which duplicates (if any) to keep. - ``first`` : Drop duplicates except for the first occurrence. - ``last`` : Drop duplicates except for the last occurrence. - False : Drop all duplicates. inplace : bool, default False Whether to drop duplicates in place or to return a copy. ignore_index : bool, default False If True, the resulting axis will be labeled 0, 1, …, n - 1. .. versionadded:: 1.0.0 Returns ------- DataFrame DataFrame with duplicates removed or None if ``inplace=True``. See Also -------- DataFrame.value_counts: Count unique combinations of columns. Examples -------- Consider dataset containing ramen rating. >>> df = pd.DataFrame({ ... 'brand': ['Yum Yum', 'Yum Yum', 'Indomie', 'Indomie', 'Indomie'], ... 'style': ['cup', 'cup', 'cup', 'pack', 'pack'], ... 'rating': [4, 4, 3.5, 15, 5] ... }) >>> df brand style rating 0 Yum Yum cup 4.0 1 Yum Yum cup 4.0 2 Indomie cup 3.5 3 Indomie pack 15.0 4 Indomie pack 5.0 By default, it removes duplicate rows based on all columns. >>> df.drop_duplicates() brand style rating 0 Yum Yum cup 4.0 2 Indomie cup 3.5 3 Indomie pack 15.0 4 Indomie pack 5.0 To remove duplicates on specific column(s), use ``subset``. >>> df.drop_duplicates(subset=['brand']) brand style rating 0 Yum Yum cup 4.0 2 Indomie cup 3.5 To remove duplicates and keep last occurrences, use ``keep``. >>> df.drop_duplicates(subset=['brand', 'style'], keep='last') brand style rating 1 Yum Yum cup 4.0 2 Indomie cup 3.5 4 Indomie pack 5.0 """ if self.empty: return self.copy() inplace = validate_bool_kwarg(inplace, "inplace") duplicated = self.duplicated(subset, keep=keep) result = self[-duplicated] if ignore_index: result.index = ibase.default_index(len(result)) if inplace: self._update_inplace(result) return None else: return result def duplicated( self, subset: Optional[Union[Hashable, Sequence[Hashable]]] = None, keep: Union[str, bool] = "first", ) -> Series: """ Return boolean Series denoting duplicate rows. Considering certain columns is optional. Parameters ---------- subset : column label or sequence of labels, optional Only consider certain columns for identifying duplicates, by default use all of the columns. keep : {'first', 'last', False}, default 'first' Determines which duplicates (if any) to mark. - ``first`` : Mark duplicates as ``True`` except for the first occurrence. - ``last`` : Mark duplicates as ``True`` except for the last occurrence. - False : Mark all duplicates as ``True``. Returns ------- Series Boolean series for each duplicated rows. See Also -------- Index.duplicated : Equivalent method on index. Series.duplicated : Equivalent method on Series. Series.drop_duplicates : Remove duplicate values from Series. DataFrame.drop_duplicates : Remove duplicate values from DataFrame. Examples -------- Consider dataset containing ramen rating. >>> df = pd.DataFrame({ ... 'brand': ['Yum Yum', 'Yum Yum', 'Indomie', 'Indomie', 'Indomie'], ... 'style': ['cup', 'cup', 'cup', 'pack', 'pack'], ... 'rating': [4, 4, 3.5, 15, 5] ... }) >>> df brand style rating 0 Yum Yum cup 4.0 1 Yum Yum cup 4.0 2 Indomie cup 3.5 3 Indomie pack 15.0 4 Indomie pack 5.0 By default, for each set of duplicated values, the first occurrence is set on False and all others on True. >>> df.duplicated() 0 False 1 True 2 False 3 False 4 False dtype: bool By using 'last', the last occurrence of each set of duplicated values is set on False and all others on True. >>> df.duplicated(keep='last') 0 True 1 False 2 False 3 False 4 False dtype: bool By setting ``keep`` on False, all duplicates are True. >>> df.duplicated(keep=False) 0 True 1 True 2 False 3 False 4 False dtype: bool To find duplicates on specific column(s), use ``subset``. >>> df.duplicated(subset=['brand']) 0 False 1 True 2 False 3 True 4 True dtype: bool """ from pandas._libs.hashtable import SIZE_HINT_LIMIT, duplicated_int64 from pandas.core.sorting import get_group_index if self.empty: return self._constructor_sliced(dtype=bool) def f(vals): labels, shape = algorithms.factorize( vals, size_hint=min(len(self), SIZE_HINT_LIMIT) ) return labels.astype("i8", copy=False), len(shape) if subset is None: subset = self.columns elif ( not np.iterable(subset) or isinstance(subset, str) or isinstance(subset, tuple) and subset in self.columns ): subset = (subset,) # needed for mypy since can't narrow types using np.iterable subset = cast(Iterable, subset) # Verify all columns in subset exist in the queried dataframe # Otherwise, raise a KeyError, same as if you try to __getitem__ with a # key that doesn't exist. diff = Index(subset).difference(self.columns) if not diff.empty: raise KeyError(diff) vals = (col.values for name, col in self.items() if name in subset) labels, shape = map(list, zip(*map(f, vals))) ids = get_group_index(labels, shape, sort=False, xnull=False) return self._constructor_sliced(duplicated_int64(ids, keep), index=self.index) # ---------------------------------------------------------------------- # Sorting # TODO: Just move the sort_values doc here. @Substitution(**_shared_doc_kwargs) @Appender(NDFrame.sort_values.__doc__) # error: Signature of "sort_values" incompatible with supertype "NDFrame" def sort_values( # type: ignore[override] self, by, axis=0, ascending=True, inplace=False, kind="quicksort", na_position="last", ignore_index=False, key: ValueKeyFunc = None, ): inplace = validate_bool_kwarg(inplace, "inplace") axis = self._get_axis_number(axis) if not isinstance(by, list): by = [by] if is_sequence(ascending) and len(by) != len(ascending): raise ValueError( f"Length of ascending ({len(ascending)}) != length of by ({len(by)})" ) if len(by) > 1: from pandas.core.sorting import lexsort_indexer keys = [self._get_label_or_level_values(x, axis=axis) for x in by] # need to rewrap columns in Series to apply key function if key is not None: keys = [Series(k, name=name) for (k, name) in zip(keys, by)] indexer = lexsort_indexer( keys, orders=ascending, na_position=na_position, key=key ) indexer = ensure_platform_int(indexer) else: from pandas.core.sorting import nargsort by = by[0] k = self._get_label_or_level_values(by, axis=axis) # need to rewrap column in Series to apply key function if key is not None: k = Series(k, name=by) if isinstance(ascending, (tuple, list)): ascending = ascending[0] indexer = nargsort( k, kind=kind, ascending=ascending, na_position=na_position, key=key ) new_data = self._mgr.take( indexer, axis=self._get_block_manager_axis(axis), verify=False ) if ignore_index: new_data.axes[1] = ibase.default_index(len(indexer)) result = self._constructor(new_data) if inplace: return self._update_inplace(result) else: return result.__finalize__(self, method="sort_values") def sort_index( self, axis=0, level=None, ascending: bool = True, inplace: bool = False, kind: str = "quicksort", na_position: str = "last", sort_remaining: bool = True, ignore_index: bool = False, key: IndexKeyFunc = None, ): """ Sort object by labels (along an axis). Returns a new DataFrame sorted by label if `inplace` argument is ``False``, otherwise updates the original DataFrame and returns None. Parameters ---------- axis : {0 or 'index', 1 or 'columns'}, default 0 The axis along which to sort. The value 0 identifies the rows, and 1 identifies the columns. level : int or level name or list of ints or list of level names If not None, sort on values in specified index level(s). ascending : bool or list of bools, default True Sort ascending vs. descending. When the index is a MultiIndex the sort direction can be controlled for each level individually. inplace : bool, default False If True, perform operation in-place. kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort' Choice of sorting algorithm. See also ndarray.np.sort for more information. `mergesort` is the only stable algorithm. For DataFrames, this option is only applied when sorting on a single column or label. na_position : {'first', 'last'}, default 'last' Puts NaNs at the beginning if `first`; `last` puts NaNs at the end. Not implemented for MultiIndex. sort_remaining : bool, default True If True and sorting by level and index is multilevel, sort by other levels too (in order) after sorting by specified level. ignore_index : bool, default False If True, the resulting axis will be labeled 0, 1, …, n - 1. .. versionadded:: 1.0.0 key : callable, optional If not None, apply the key function to the index values before sorting. This is similar to the `key` argument in the builtin :meth:`sorted` function, with the notable difference that this `key` function should be *vectorized*. It should expect an ``Index`` and return an ``Index`` of the same shape. For MultiIndex inputs, the key is applied *per level*. .. versionadded:: 1.1.0 Returns ------- DataFrame The original DataFrame sorted by the labels. See Also -------- Series.sort_index : Sort Series by the index. DataFrame.sort_values : Sort DataFrame by the value. Series.sort_values : Sort Series by the value. Examples -------- >>> df = pd.DataFrame([1, 2, 3, 4, 5], index=[100, 29, 234, 1, 150], ... columns=['A']) >>> df.sort_index() A 1 4 29 2 100 1 150 5 234 3 By default, it sorts in ascending order, to sort in descending order, use ``ascending=False`` >>> df.sort_index(ascending=False) A 234 3 150 5 100 1 29 2 1 4 A key function can be specified which is applied to the index before sorting. For a ``MultiIndex`` this is applied to each level separately. >>> df = pd.DataFrame({"a": [1, 2, 3, 4]}, index=['A', 'b', 'C', 'd']) >>> df.sort_index(key=lambda x: x.str.lower()) a A 1 b 2 C 3 d 4 """ return super().sort_index( axis, level, ascending, inplace, kind, na_position, sort_remaining, ignore_index, key, ) def value_counts( self, subset: Optional[Sequence[Label]] = None, normalize: bool = False, sort: bool = True, ascending: bool = False, ): """ Return a Series containing counts of unique rows in the DataFrame. .. versionadded:: 1.1.0 Parameters ---------- subset : list-like, optional Columns to use when counting unique combinations. normalize : bool, default False Return proportions rather than frequencies. sort : bool, default True Sort by frequencies. ascending : bool, default False Sort in ascending order. Returns ------- Series See Also -------- Series.value_counts: Equivalent method on Series. Notes ----- The returned Series will have a MultiIndex with one level per input column. By default, rows that contain any NA values are omitted from the result. By default, the resulting Series will be in descending order so that the first element is the most frequently-occurring row. Examples -------- >>> df = pd.DataFrame({'num_legs': [2, 4, 4, 6], ... 'num_wings': [2, 0, 0, 0]}, ... index=['falcon', 'dog', 'cat', 'ant']) >>> df num_legs num_wings falcon 2 2 dog 4 0 cat 4 0 ant 6 0 >>> df.value_counts() num_legs num_wings 4 0 2 6 0 1 2 2 1 dtype: int64 >>> df.value_counts(sort=False) num_legs num_wings 2 2 1 4 0 2 6 0 1 dtype: int64 >>> df.value_counts(ascending=True) num_legs num_wings 2 2 1 6 0 1 4 0 2 dtype: int64 >>> df.value_counts(normalize=True) num_legs num_wings 4 0 0.50 6 0 0.25 2 2 0.25 dtype: float64 """ if subset is None: subset = self.columns.tolist() counts = self.groupby(subset).grouper.size() if sort: counts = counts.sort_values(ascending=ascending) if normalize: counts /= counts.sum() # Force MultiIndex for single column if len(subset) == 1: counts.index = MultiIndex.from_arrays( [counts.index], names=[counts.index.name] ) return counts def nlargest(self, n, columns, keep="first") -> DataFrame: """ Return the first `n` rows ordered by `columns` in descending order. Return the first `n` rows with the largest values in `columns`, in descending order. The columns that are not specified are returned as well, but not used for ordering. This method is equivalent to ``df.sort_values(columns, ascending=False).head(n)``, but more performant. Parameters ---------- n : int Number of rows to return. columns : label or list of labels Column label(s) to order by. keep : {'first', 'last', 'all'}, default 'first' Where there are duplicate values: - `first` : prioritize the first occurrence(s) - `last` : prioritize the last occurrence(s) - ``all`` : do not drop any duplicates, even it means selecting more than `n` items. .. versionadded:: 0.24.0 Returns ------- DataFrame The first `n` rows ordered by the given columns in descending order. See Also -------- DataFrame.nsmallest : Return the first `n` rows ordered by `columns` in ascending order. DataFrame.sort_values : Sort DataFrame by the values. DataFrame.head : Return the first `n` rows without re-ordering. Notes ----- This function cannot be used with all column types. For example, when specifying columns with `object` or `category` dtypes, ``TypeError`` is raised. Examples -------- >>> df = pd.DataFrame({'population': [59000000, 65000000, 434000, ... 434000, 434000, 337000, 11300, ... 11300, 11300], ... 'GDP': [1937894, 2583560 , 12011, 4520, 12128, ... 17036, 182, 38, 311], ... 'alpha-2': ["IT", "FR", "MT", "MV", "BN", ... "IS", "NR", "TV", "AI"]}, ... index=["Italy", "France", "Malta", ... "Maldives", "Brunei", "Iceland", ... "Nauru", "Tuvalu", "Anguilla"]) >>> df population GDP alpha-2 Italy 59000000 1937894 IT France 65000000 2583560 FR Malta 434000 12011 MT Maldives 434000 4520 MV Brunei 434000 12128 BN Iceland 337000 17036 IS Nauru 11300 182 NR Tuvalu 11300 38 TV Anguilla 11300 311 AI In the following example, we will use ``nlargest`` to select the three rows having the largest values in column "population". >>> df.nlargest(3, 'population') population GDP alpha-2 France 65000000 2583560 FR Italy 59000000 1937894 IT Malta 434000 12011 MT When using ``keep='last'``, ties are resolved in reverse order: >>> df.nlargest(3, 'population', keep='last') population GDP alpha-2 France 65000000 2583560 FR Italy 59000000 1937894 IT Brunei 434000 12128 BN When using ``keep='all'``, all duplicate items are maintained: >>> df.nlargest(3, 'population', keep='all') population GDP alpha-2 France 65000000 2583560 FR Italy 59000000 1937894 IT Malta 434000 12011 MT Maldives 434000 4520 MV Brunei 434000 12128 BN To order by the largest values in column "population" and then "GDP", we can specify multiple columns like in the next example. >>> df.nlargest(3, ['population', 'GDP']) population GDP alpha-2 France 65000000 2583560 FR Italy 59000000 1937894 IT Brunei 434000 12128 BN """ return algorithms.SelectNFrame(self, n=n, keep=keep, columns=columns).nlargest() def nsmallest(self, n, columns, keep="first") -> DataFrame: """ Return the first `n` rows ordered by `columns` in ascending order. Return the first `n` rows with the smallest values in `columns`, in ascending order. The columns that are not specified are returned as well, but not used for ordering. This method is equivalent to ``df.sort_values(columns, ascending=True).head(n)``, but more performant. Parameters ---------- n : int Number of items to retrieve. columns : list or str Column name or names to order by. keep : {'first', 'last', 'all'}, default 'first' Where there are duplicate values: - ``first`` : take the first occurrence. - ``last`` : take the last occurrence. - ``all`` : do not drop any duplicates, even it means selecting more than `n` items. .. versionadded:: 0.24.0 Returns ------- DataFrame See Also -------- DataFrame.nlargest : Return the first `n` rows ordered by `columns` in descending order. DataFrame.sort_values : Sort DataFrame by the values. DataFrame.head : Return the first `n` rows without re-ordering. Examples -------- >>> df = pd.DataFrame({'population': [59000000, 65000000, 434000, ... 434000, 434000, 337000, 337000, ... 11300, 11300], ... 'GDP': [1937894, 2583560 , 12011, 4520, 12128, ... 17036, 182, 38, 311], ... 'alpha-2': ["IT", "FR", "MT", "MV", "BN", ... "IS", "NR", "TV", "AI"]}, ... index=["Italy", "France", "Malta", ... "Maldives", "Brunei", "Iceland", ... "Nauru", "Tuvalu", "Anguilla"]) >>> df population GDP alpha-2 Italy 59000000 1937894 IT France 65000000 2583560 FR Malta 434000 12011 MT Maldives 434000 4520 MV Brunei 434000 12128 BN Iceland 337000 17036 IS Nauru 337000 182 NR Tuvalu 11300 38 TV Anguilla 11300 311 AI In the following example, we will use ``nsmallest`` to select the three rows having the smallest values in column "population". >>> df.nsmallest(3, 'population') population GDP alpha-2 Tuvalu 11300 38 TV Anguilla 11300 311 AI Iceland 337000 17036 IS When using ``keep='last'``, ties are resolved in reverse order: >>> df.nsmallest(3, 'population', keep='last') population GDP alpha-2 Anguilla 11300 311 AI Tuvalu 11300 38 TV Nauru 337000 182 NR When using ``keep='all'``, all duplicate items are maintained: >>> df.nsmallest(3, 'population', keep='all') population GDP alpha-2 Tuvalu 11300 38 TV Anguilla 11300 311 AI Iceland 337000 17036 IS Nauru 337000 182 NR To order by the smallest values in column "population" and then "GDP", we can specify multiple columns like in the next example. >>> df.nsmallest(3, ['population', 'GDP']) population GDP alpha-2 Tuvalu 11300 38 TV Anguilla 11300 311 AI Nauru 337000 182 NR """ return algorithms.SelectNFrame( self, n=n, keep=keep, columns=columns ).nsmallest() def swaplevel(self, i=-2, j=-1, axis=0) -> DataFrame: """ Swap levels i and j in a MultiIndex on a particular axis. Parameters ---------- i, j : int or str Levels of the indices to be swapped. Can pass level name as string. axis : {0 or 'index', 1 or 'columns'}, default 0 The axis to swap levels on. 0 or 'index' for row-wise, 1 or 'columns' for column-wise. Returns ------- DataFrame """ result = self.copy() axis = self._get_axis_number(axis) if not isinstance(result._get_axis(axis), MultiIndex): # pragma: no cover raise TypeError("Can only swap levels on a hierarchical axis.") if axis == 0: assert isinstance(result.index, MultiIndex) result.index = result.index.swaplevel(i, j) else: assert isinstance(result.columns, MultiIndex) result.columns = result.columns.swaplevel(i, j) return result def reorder_levels(self, order, axis=0) -> DataFrame: """ Rearrange index levels using input order. May not drop or duplicate levels. Parameters ---------- order : list of int or list of str List representing new level order. Reference level by number (position) or by key (label). axis : {0 or 'index', 1 or 'columns'}, default 0 Where to reorder levels. Returns ------- DataFrame """ axis = self._get_axis_number(axis) if not isinstance(self._get_axis(axis), MultiIndex): # pragma: no cover raise TypeError("Can only reorder levels on a hierarchical axis.") result = self.copy() if axis == 0: assert isinstance(result.index, MultiIndex) result.index = result.index.reorder_levels(order) else: assert isinstance(result.columns, MultiIndex) result.columns = result.columns.reorder_levels(order) return result # ---------------------------------------------------------------------- # Arithmetic / combination related def _combine_frame(self, other: DataFrame, func, fill_value=None): # at this point we have `self._indexed_same(other)` if fill_value is None: # since _arith_op may be called in a loop, avoid function call # overhead if possible by doing this check once _arith_op = func else: def _arith_op(left, right): # for the mixed_type case where we iterate over columns, # _arith_op(left, right) is equivalent to # left._binop(right, func, fill_value=fill_value) left, right = ops.fill_binop(left, right, fill_value) return func(left, right) new_data = ops.dispatch_to_series(self, other, _arith_op) return new_data def _construct_result(self, result) -> DataFrame: """ Wrap the result of an arithmetic, comparison, or logical operation. Parameters ---------- result : DataFrame Returns ------- DataFrame """ out = self._constructor(result, copy=False) # Pin columns instead of passing to constructor for compat with # non-unique columns case out.columns = self.columns out.index = self.index return out @Appender( """ Returns ------- DataFrame DataFrame that shows the differences stacked side by side. The resulting index will be a MultiIndex with 'self' and 'other' stacked alternately at the inner level. Raises ------ ValueError When the two DataFrames don't have identical labels or shape. See Also -------- Series.compare : Compare with another Series and show differences. DataFrame.equals : Test whether two objects contain the same elements. Notes ----- Matching NaNs will not appear as a difference. Can only compare identically-labeled (i.e. same shape, identical row and column labels) DataFrames Examples -------- >>> df = pd.DataFrame( ... { ... "col1": ["a", "a", "b", "b", "a"], ... "col2": [1.0, 2.0, 3.0, np.nan, 5.0], ... "col3": [1.0, 2.0, 3.0, 4.0, 5.0] ... }, ... columns=["col1", "col2", "col3"], ... ) >>> df col1 col2 col3 0 a 1.0 1.0 1 a 2.0 2.0 2 b 3.0 3.0 3 b NaN 4.0 4 a 5.0 5.0 >>> df2 = df.copy() >>> df2.loc[0, 'col1'] = 'c' >>> df2.loc[2, 'col3'] = 4.0 >>> df2 col1 col2 col3 0 c 1.0 1.0 1 a 2.0 2.0 2 b 3.0 4.0 3 b NaN 4.0 4 a 5.0 5.0 Align the differences on columns >>> df.compare(df2) col1 col3 self other self other 0 a c NaN NaN 2 NaN NaN 3.0 4.0 Stack the differences on rows >>> df.compare(df2, align_axis=0) col1 col3 0 self a NaN other c NaN 2 self NaN 3.0 other NaN 4.0 Keep the equal values >>> df.compare(df2, keep_equal=True) col1 col3 self other self other 0 a c 1.0 1.0 2 b b 3.0 4.0 Keep all original rows and columns >>> df.compare(df2, keep_shape=True) col1 col2 col3 self other self other self other 0 a c NaN NaN NaN NaN 1 NaN NaN NaN NaN NaN NaN 2 NaN NaN NaN NaN 3.0 4.0 3 NaN NaN NaN NaN NaN NaN 4 NaN NaN NaN NaN NaN NaN Keep all original rows and columns and also all original values >>> df.compare(df2, keep_shape=True, keep_equal=True) col1 col2 col3 self other self other self other 0 a c 1.0 1.0 1.0 1.0 1 a a 2.0 2.0 2.0 2.0 2 b b 3.0 3.0 3.0 4.0 3 b b NaN NaN 4.0 4.0 4 a a 5.0 5.0 5.0 5.0 """ ) @Appender(_shared_docs["compare"] % _shared_doc_kwargs) def compare( self, other: DataFrame, align_axis: Axis = 1, keep_shape: bool = False, keep_equal: bool = False, ) -> DataFrame: return super().compare( other=other, align_axis=align_axis, keep_shape=keep_shape, keep_equal=keep_equal, ) def combine( self, other: DataFrame, func, fill_value=None, overwrite=True ) -> DataFrame: """ Perform column-wise combine with another DataFrame. Combines a DataFrame with `other` DataFrame using `func` to element-wise combine columns. The row and column indexes of the resulting DataFrame will be the union of the two. Parameters ---------- other : DataFrame The DataFrame to merge column-wise. func : function Function that takes two series as inputs and return a Series or a scalar. Used to merge the two dataframes column by columns. fill_value : scalar value, default None The value to fill NaNs with prior to passing any column to the merge func. overwrite : bool, default True If True, columns in `self` that do not exist in `other` will be overwritten with NaNs. Returns ------- DataFrame Combination of the provided DataFrames. See Also -------- DataFrame.combine_first : Combine two DataFrame objects and default to non-null values in frame calling the method. Examples -------- Combine using a simple function that chooses the smaller column. >>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]}) >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]}) >>> take_smaller = lambda s1, s2: s1 if s1.sum() < s2.sum() else s2 >>> df1.combine(df2, take_smaller) A B 0 0 3 1 0 3 Example using a true element-wise combine function. >>> df1 = pd.DataFrame({'A': [5, 0], 'B': [2, 4]}) >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]}) >>> df1.combine(df2, np.minimum) A B 0 1 2 1 0 3 Using `fill_value` fills Nones prior to passing the column to the merge function. >>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]}) >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]}) >>> df1.combine(df2, take_smaller, fill_value=-5) A B 0 0 -5.0 1 0 4.0 However, if the same element in both dataframes is None, that None is preserved >>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]}) >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [None, 3]}) >>> df1.combine(df2, take_smaller, fill_value=-5) A B 0 0 -5.0 1 0 3.0 Example that demonstrates the use of `overwrite` and behavior when the axis differ between the dataframes. >>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]}) >>> df2 = pd.DataFrame({'B': [3, 3], 'C': [-10, 1], }, index=[1, 2]) >>> df1.combine(df2, take_smaller) A B C 0 NaN NaN NaN 1 NaN 3.0 -10.0 2 NaN 3.0 1.0 >>> df1.combine(df2, take_smaller, overwrite=False) A B C 0 0.0 NaN NaN 1 0.0 3.0 -10.0 2 NaN 3.0 1.0 Demonstrating the preference of the passed in dataframe. >>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1], }, index=[1, 2]) >>> df2.combine(df1, take_smaller) A B C 0 0.0 NaN NaN 1 0.0 3.0 NaN 2 NaN 3.0 NaN >>> df2.combine(df1, take_smaller, overwrite=False) A B C 0 0.0 NaN NaN 1 0.0 3.0 1.0 2 NaN 3.0 1.0 """ other_idxlen = len(other.index) # save for compare this, other = self.align(other, copy=False) new_index = this.index if other.empty and len(new_index) == len(self.index): return self.copy() if self.empty and len(other) == other_idxlen: return other.copy() # sorts if possible new_columns = this.columns.union(other.columns) do_fill = fill_value is not None result = {} for col in new_columns: series = this[col] otherSeries = other[col] this_dtype = series.dtype other_dtype = otherSeries.dtype this_mask = isna(series) other_mask = isna(otherSeries) # don't overwrite columns unnecessarily # DO propagate if this column is not in the intersection if not overwrite and other_mask.all(): result[col] = this[col].copy() continue if do_fill: series = series.copy() otherSeries = otherSeries.copy() series[this_mask] = fill_value otherSeries[other_mask] = fill_value if col not in self.columns: # If self DataFrame does not have col in other DataFrame, # try to promote series, which is all NaN, as other_dtype. new_dtype = other_dtype try: series = series.astype(new_dtype, copy=False) except ValueError: # e.g. new_dtype is integer types pass else: # if we have different dtypes, possibly promote new_dtype = find_common_type([this_dtype, other_dtype]) if not is_dtype_equal(this_dtype, new_dtype): series = series.astype(new_dtype) if not is_dtype_equal(other_dtype, new_dtype): otherSeries = otherSeries.astype(new_dtype) arr = func(series, otherSeries) arr = maybe_downcast_to_dtype(arr, this_dtype) result[col] = arr # convert_objects just in case return self._constructor(result, index=new_index, columns=new_columns) def combine_first(self, other: DataFrame) -> DataFrame: """ Update null elements with value in the same location in `other`. Combine two DataFrame objects by filling null values in one DataFrame with non-null values from other DataFrame. The row and column indexes of the resulting DataFrame will be the union of the two. Parameters ---------- other : DataFrame Provided DataFrame to use to fill null values. Returns ------- DataFrame See Also -------- DataFrame.combine : Perform series-wise operation on two DataFrames using a given function. Examples -------- >>> df1 = pd.DataFrame({'A': [None, 0], 'B': [None, 4]}) >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]}) >>> df1.combine_first(df2) A B 0 1.0 3.0 1 0.0 4.0 Null values still persist if the location of that null value does not exist in `other` >>> df1 = pd.DataFrame({'A': [None, 0], 'B': [4, None]}) >>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1]}, index=[1, 2]) >>> df1.combine_first(df2) A B C 0 NaN 4.0 NaN 1 0.0 3.0 1.0 2 NaN 3.0 1.0 """ import pandas.core.computation.expressions as expressions def extract_values(arr): # Does two things: # 1. maybe gets the values from the Series / Index # 2. convert datelike to i8 # TODO: extract_array? if isinstance(arr, (Index, Series)): arr = arr._values if needs_i8_conversion(arr.dtype): if is_extension_array_dtype(arr.dtype): arr = arr.asi8 else: arr = arr.view("i8") return arr def combiner(x, y): mask = isna(x) # TODO: extract_array? if isinstance(mask, (Index, Series)): mask = mask._values x_values = extract_values(x) y_values = extract_values(y) # If the column y in other DataFrame is not in first DataFrame, # just return y_values. if y.name not in self.columns: return y_values return expressions.where(mask, y_values, x_values) return self.combine(other, combiner, overwrite=False) def update( self, other, join="left", overwrite=True, filter_func=None, errors="ignore" ) -> None: """ Modify in place using non-NA values from another DataFrame. Aligns on indices. There is no return value. Parameters ---------- other : DataFrame, or object coercible into a DataFrame Should have at least one matching index/column label with the original DataFrame. If a Series is passed, its name attribute must be set, and that will be used as the column name to align with the original DataFrame. join : {'left'}, default 'left' Only left join is implemented, keeping the index and columns of the original object. overwrite : bool, default True How to handle non-NA values for overlapping keys: * True: overwrite original DataFrame's values with values from `other`. * False: only update values that are NA in the original DataFrame. filter_func : callable(1d-array) -> bool 1d-array, optional Can choose to replace values other than NA. Return True for values that should be updated. errors : {'raise', 'ignore'}, default 'ignore' If 'raise', will raise a ValueError if the DataFrame and `other` both contain non-NA data in the same place. .. versionchanged:: 0.24.0 Changed from `raise_conflict=False|True` to `errors='ignore'|'raise'`. Returns ------- None : method directly changes calling object Raises ------ ValueError * When `errors='raise'` and there's overlapping non-NA data. * When `errors` is not either `'ignore'` or `'raise'` NotImplementedError * If `join != 'left'` See Also -------- dict.update : Similar method for dictionaries. DataFrame.merge : For column(s)-on-columns(s) operations. Examples -------- >>> df = pd.DataFrame({'A': [1, 2, 3], ... 'B': [400, 500, 600]}) >>> new_df = pd.DataFrame({'B': [4, 5, 6], ... 'C': [7, 8, 9]}) >>> df.update(new_df) >>> df A B 0 1 4 1 2 5 2 3 6 The DataFrame's length does not increase as a result of the update, only values at matching index/column labels are updated. >>> df = pd.DataFrame({'A': ['a', 'b', 'c'], ... 'B': ['x', 'y', 'z']}) >>> new_df = pd.DataFrame({'B': ['d', 'e', 'f', 'g', 'h', 'i']}) >>> df.update(new_df) >>> df A B 0 a d 1 b e 2 c f For Series, it's name attribute must be set. >>> df = pd.DataFrame({'A': ['a', 'b', 'c'], ... 'B': ['x', 'y', 'z']}) >>> new_column = pd.Series(['d', 'e'], name='B', index=[0, 2]) >>> df.update(new_column) >>> df A B 0 a d 1 b y 2 c e >>> df = pd.DataFrame({'A': ['a', 'b', 'c'], ... 'B': ['x', 'y', 'z']}) >>> new_df = pd.DataFrame({'B': ['d', 'e']}, index=[1, 2]) >>> df.update(new_df) >>> df A B 0 a x 1 b d 2 c e If `other` contains NaNs the corresponding values are not updated in the original dataframe. >>> df = pd.DataFrame({'A': [1, 2, 3], ... 'B': [400, 500, 600]}) >>> new_df = pd.DataFrame({'B': [4, np.nan, 6]}) >>> df.update(new_df) >>> df A B 0 1 4.0 1 2 500.0 2 3 6.0 """ import pandas.core.computation.expressions as expressions # TODO: Support other joins if join != "left": # pragma: no cover raise NotImplementedError("Only left join is supported") if errors not in ["ignore", "raise"]: raise ValueError("The parameter errors must be either 'ignore' or 'raise'") if not isinstance(other, DataFrame): other = DataFrame(other) other = other.reindex_like(self) for col in self.columns: this = self[col]._values that = other[col]._values if filter_func is not None: with np.errstate(all="ignore"): mask = ~filter_func(this) | isna(that) else: if errors == "raise": mask_this = notna(that) mask_that = notna(this) if any(mask_this & mask_that): raise ValueError("Data overlaps.") if overwrite: mask = isna(that) else: mask = notna(this) # don't overwrite columns unnecessarily if mask.all(): continue self[col] = expressions.where(mask, this, that) # ---------------------------------------------------------------------- # Data reshaping @Appender( """ Examples -------- >>> df = pd.DataFrame({'Animal': ['Falcon', 'Falcon', ... 'Parrot', 'Parrot'], ... 'Max Speed': [380., 370., 24., 26.]}) >>> df Animal Max Speed 0 Falcon 380.0 1 Falcon 370.0 2 Parrot 24.0 3 Parrot 26.0 >>> df.groupby(['Animal']).mean() Max Speed Animal Falcon 375.0 Parrot 25.0 **Hierarchical Indexes** We can groupby different levels of a hierarchical index using the `level` parameter: >>> arrays = [['Falcon', 'Falcon', 'Parrot', 'Parrot'], ... ['Captive', 'Wild', 'Captive', 'Wild']] >>> index = pd.MultiIndex.from_arrays(arrays, names=('Animal', 'Type')) >>> df = pd.DataFrame({'Max Speed': [390., 350., 30., 20.]}, ... index=index) >>> df Max Speed Animal Type Falcon Captive 390.0 Wild 350.0 Parrot Captive 30.0 Wild 20.0 >>> df.groupby(level=0).mean() Max Speed Animal Falcon 370.0 Parrot 25.0 >>> df.groupby(level="Type").mean() Max Speed Type Captive 210.0 Wild 185.0 We can also choose to include NA in group keys or not by setting `dropna` parameter, the default setting is `True`: >>> l = [[1, 2, 3], [1, None, 4], [2, 1, 3], [1, 2, 2]] >>> df = pd.DataFrame(l, columns=["a", "b", "c"]) >>> df.groupby(by=["b"]).sum() a c b 1.0 2 3 2.0 2 5 >>> df.groupby(by=["b"], dropna=False).sum() a c b 1.0 2 3 2.0 2 5 NaN 1 4 >>> l = [["a", 12, 12], [None, 12.3, 33.], ["b", 12.3, 123], ["a", 1, 1]] >>> df = pd.DataFrame(l, columns=["a", "b", "c"]) >>> df.groupby(by="a").sum() b c a a 13.0 13.0 b 12.3 123.0 >>> df.groupby(by="a", dropna=False).sum() b c a a 13.0 13.0 b 12.3 123.0 NaN 12.3 33.0 """ ) @Appender(_shared_docs["groupby"] % _shared_doc_kwargs) def groupby( self, by=None, axis=0, level=None, as_index: bool = True, sort: bool = True, group_keys: bool = True, squeeze: bool = no_default, observed: bool = False, dropna: bool = True, ) -> DataFrameGroupBy: from pandas.core.groupby.generic import DataFrameGroupBy if squeeze is not no_default: warnings.warn( ( "The `squeeze` parameter is deprecated and " "will be removed in a future version." ), FutureWarning, stacklevel=2, ) else: squeeze = False if level is None and by is None: raise TypeError("You have to supply one of 'by' and 'level'") axis = self._get_axis_number(axis) return DataFrameGroupBy( obj=self, keys=by, axis=axis, level=level, as_index=as_index, sort=sort, group_keys=group_keys, squeeze=squeeze, observed=observed, dropna=dropna, ) _shared_docs[ "pivot" ] = """ Return reshaped DataFrame organized by given index / column values. Reshape data (produce a "pivot" table) based on column values. Uses unique values from specified `index` / `columns` to form axes of the resulting DataFrame. This function does not support data aggregation, multiple values will result in a MultiIndex in the columns. See the :ref:`User Guide <reshaping>` for more on reshaping. Parameters ----------%s index : str or object or a list of str, optional Column to use to make new frame's index. If None, uses existing index. .. versionchanged:: 1.1.0 Also accept list of index names. columns : str or object or a list of str Column to use to make new frame's columns. .. versionchanged:: 1.1.0 Also accept list of columns names. values : str, object or a list of the previous, optional Column(s) to use for populating new frame's values. If not specified, all remaining columns will be used and the result will have hierarchically indexed columns. Returns ------- DataFrame Returns reshaped DataFrame. Raises ------ ValueError: When there are any `index`, `columns` combinations with multiple values. `DataFrame.pivot_table` when you need to aggregate. See Also -------- DataFrame.pivot_table : Generalization of pivot that can handle duplicate values for one index/column pair. DataFrame.unstack : Pivot based on the index values instead of a column. wide_to_long : Wide panel to long format. Less flexible but more user-friendly than melt. Notes ----- For finer-tuned control, see hierarchical indexing documentation along with the related stack/unstack methods. Examples -------- >>> df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two', ... 'two'], ... 'bar': ['A', 'B', 'C', 'A', 'B', 'C'], ... 'baz': [1, 2, 3, 4, 5, 6], ... 'zoo': ['x', 'y', 'z', 'q', 'w', 't']}) >>> df foo bar baz zoo 0 one A 1 x 1 one B 2 y 2 one C 3 z 3 two A 4 q 4 two B 5 w 5 two C 6 t >>> df.pivot(index='foo', columns='bar', values='baz') bar A B C foo one 1 2 3 two 4 5 6 >>> df.pivot(index='foo', columns='bar')['baz'] bar A B C foo one 1 2 3 two 4 5 6 >>> df.pivot(index='foo', columns='bar', values=['baz', 'zoo']) baz zoo bar A B C A B C foo one 1 2 3 x y z two 4 5 6 q w t You could also assign a list of column names or a list of index names. >>> df = pd.DataFrame({ ... "lev1": [1, 1, 1, 2, 2, 2], ... "lev2": [1, 1, 2, 1, 1, 2], ... "lev3": [1, 2, 1, 2, 1, 2], ... "lev4": [1, 2, 3, 4, 5, 6], ... "values": [0, 1, 2, 3, 4, 5]}) >>> df lev1 lev2 lev3 lev4 values 0 1 1 1 1 0 1 1 1 2 2 1 2 1 2 1 3 2 3 2 1 2 4 3 4 2 1 1 5 4 5 2 2 2 6 5 >>> df.pivot(index="lev1", columns=["lev2", "lev3"],values="values") lev2 1 2 lev3 1 2 1 2 lev1 1 0.0 1.0 2.0 NaN 2 4.0 3.0 NaN 5.0 >>> df.pivot(index=["lev1", "lev2"], columns=["lev3"],values="values") lev3 1 2 lev1 lev2 1 1 0.0 1.0 2 2.0 NaN 2 1 4.0 3.0 2 NaN 5.0 A ValueError is raised if there are any duplicates. >>> df = pd.DataFrame({"foo": ['one', 'one', 'two', 'two'], ... "bar": ['A', 'A', 'B', 'C'], ... "baz": [1, 2, 3, 4]}) >>> df foo bar baz 0 one A 1 1 one A 2 2 two B 3 3 two C 4 Notice that the first two rows are the same for our `index` and `columns` arguments. >>> df.pivot(index='foo', columns='bar', values='baz') Traceback (most recent call last): ... ValueError: Index contains duplicate entries, cannot reshape """ @Substitution("") @Appender(_shared_docs["pivot"]) def pivot(self, index=None, columns=None, values=None) -> DataFrame: from pandas.core.reshape.pivot import pivot return pivot(self, index=index, columns=columns, values=values) _shared_docs[ "pivot_table" ] = """ Create a spreadsheet-style pivot table as a DataFrame. The levels in the pivot table will be stored in MultiIndex objects (hierarchical indexes) on the index and columns of the result DataFrame. Parameters ----------%s values : column to aggregate, optional index : column, Grouper, array, or list of the previous If an array is passed, it must be the same length as the data. The list can contain any of the other types (except list). Keys to group by on the pivot table index. If an array is passed, it is being used as the same manner as column values. columns : column, Grouper, array, or list of the previous If an array is passed, it must be the same length as the data. The list can contain any of the other types (except list). Keys to group by on the pivot table column. If an array is passed, it is being used as the same manner as column values. aggfunc : function, list of functions, dict, default numpy.mean If list of functions passed, the resulting pivot table will have hierarchical columns whose top level are the function names (inferred from the function objects themselves) If dict is passed, the key is column to aggregate and value is function or list of functions. fill_value : scalar, default None Value to replace missing values with (in the resulting pivot table, after aggregation). margins : bool, default False Add all row / columns (e.g. for subtotal / grand totals). dropna : bool, default True Do not include columns whose entries are all NaN. margins_name : str, default 'All' Name of the row / column that will contain the totals when margins is True. observed : bool, default False This only applies if any of the groupers are Categoricals. If True: only show observed values for categorical groupers. If False: show all values for categorical groupers. .. versionchanged:: 0.25.0 Returns ------- DataFrame An Excel style pivot table. See Also -------- DataFrame.pivot : Pivot without aggregation that can handle non-numeric data. DataFrame.melt: Unpivot a DataFrame from wide to long format, optionally leaving identifiers set. wide_to_long : Wide panel to long format. Less flexible but more user-friendly than melt. Examples -------- >>> df = pd.DataFrame({"A": ["foo", "foo", "foo", "foo", "foo", ... "bar", "bar", "bar", "bar"], ... "B": ["one", "one", "one", "two", "two", ... "one", "one", "two", "two"], ... "C": ["small", "large", "large", "small", ... "small", "large", "small", "small", ... "large"], ... "D": [1, 2, 2, 3, 3, 4, 5, 6, 7], ... "E": [2, 4, 5, 5, 6, 6, 8, 9, 9]}) >>> df A B C D E 0 foo one small 1 2 1 foo one large 2 4 2 foo one large 2 5 3 foo two small 3 5 4 foo two small 3 6 5 bar one large 4 6 6 bar one small 5 8 7 bar two small 6 9 8 bar two large 7 9 This first example aggregates values by taking the sum. >>> table = pd.pivot_table(df, values='D', index=['A', 'B'], ... columns=['C'], aggfunc=np.sum) >>> table C large small A B bar one 4.0 5.0 two 7.0 6.0 foo one 4.0 1.0 two NaN 6.0 We can also fill missing values using the `fill_value` parameter. >>> table = pd.pivot_table(df, values='D', index=['A', 'B'], ... columns=['C'], aggfunc=np.sum, fill_value=0) >>> table C large small A B bar one 4 5 two 7 6 foo one 4 1 two 0 6 The next example aggregates by taking the mean across multiple columns. >>> table = pd.pivot_table(df, values=['D', 'E'], index=['A', 'C'], ... aggfunc={'D': np.mean, ... 'E': np.mean}) >>> table D E A C bar large 5.500000 7.500000 small 5.500000 8.500000 foo large 2.000000 4.500000 small 2.333333 4.333333 We can also calculate multiple types of aggregations for any given value column. >>> table = pd.pivot_table(df, values=['D', 'E'], index=['A', 'C'], ... aggfunc={'D': np.mean, ... 'E': [min, max, np.mean]}) >>> table D E mean max mean min A C bar large 5.500000 9.0 7.500000 6.0 small 5.500000 9.0 8.500000 8.0 foo large 2.000000 5.0 4.500000 4.0 small 2.333333 6.0 4.333333 2.0 """ @Substitution("") @Appender(_shared_docs["pivot_table"]) def pivot_table( self, values=None, index=None, columns=None, aggfunc="mean", fill_value=None, margins=False, dropna=True, margins_name="All", observed=False, ) -> DataFrame: from pandas.core.reshape.pivot import pivot_table return pivot_table( self, values=values, index=index, columns=columns, aggfunc=aggfunc, fill_value=fill_value, margins=margins, dropna=dropna, margins_name=margins_name, observed=observed, ) def stack(self, level=-1, dropna=True): """ Stack the prescribed level(s) from columns to index. Return a reshaped DataFrame or Series having a multi-level index with one or more new inner-most levels compared to the current DataFrame. The new inner-most levels are created by pivoting the columns of the current dataframe: - if the columns have a single level, the output is a Series; - if the columns have multiple levels, the new index level(s) is (are) taken from the prescribed level(s) and the output is a DataFrame. Parameters ---------- level : int, str, list, default -1 Level(s) to stack from the column axis onto the index axis, defined as one index or label, or a list of indices or labels. dropna : bool, default True Whether to drop rows in the resulting Frame/Series with missing values. Stacking a column level onto the index axis can create combinations of index and column values that are missing from the original dataframe. See Examples section. Returns ------- DataFrame or Series Stacked dataframe or series. See Also -------- DataFrame.unstack : Unstack prescribed level(s) from index axis onto column axis. DataFrame.pivot : Reshape dataframe from long format to wide format. DataFrame.pivot_table : Create a spreadsheet-style pivot table as a DataFrame. Notes ----- The function is named by analogy with a collection of books being reorganized from being side by side on a horizontal position (the columns of the dataframe) to being stacked vertically on top of each other (in the index of the dataframe). Examples -------- **Single level columns** >>> df_single_level_cols = pd.DataFrame([[0, 1], [2, 3]], ... index=['cat', 'dog'], ... columns=['weight', 'height']) Stacking a dataframe with a single level column axis returns a Series: >>> df_single_level_cols weight height cat 0 1 dog 2 3 >>> df_single_level_cols.stack() cat weight 0 height 1 dog weight 2 height 3 dtype: int64 **Multi level columns: simple case** >>> multicol1 = pd.MultiIndex.from_tuples([('weight', 'kg'), ... ('weight', 'pounds')]) >>> df_multi_level_cols1 = pd.DataFrame([[1, 2], [2, 4]], ... index=['cat', 'dog'], ... columns=multicol1) Stacking a dataframe with a multi-level column axis: >>> df_multi_level_cols1 weight kg pounds cat 1 2 dog 2 4 >>> df_multi_level_cols1.stack() weight cat kg 1 pounds 2 dog kg 2 pounds 4 **Missing values** >>> multicol2 = pd.MultiIndex.from_tuples([('weight', 'kg'), ... ('height', 'm')]) >>> df_multi_level_cols2 = pd.DataFrame([[1.0, 2.0], [3.0, 4.0]], ... index=['cat', 'dog'], ... columns=multicol2) It is common to have missing values when stacking a dataframe with multi-level columns, as the stacked dataframe typically has more values than the original dataframe. Missing values are filled with NaNs: >>> df_multi_level_cols2 weight height kg m cat 1.0 2.0 dog 3.0 4.0 >>> df_multi_level_cols2.stack() height weight cat kg NaN 1.0 m 2.0 NaN dog kg NaN 3.0 m 4.0 NaN **Prescribing the level(s) to be stacked** The first parameter controls which level or levels are stacked: >>> df_multi_level_cols2.stack(0) kg m cat height NaN 2.0 weight 1.0 NaN dog height NaN 4.0 weight 3.0 NaN >>> df_multi_level_cols2.stack([0, 1]) cat height m 2.0 weight kg 1.0 dog height m 4.0 weight kg 3.0 dtype: float64 **Dropping missing values** >>> df_multi_level_cols3 = pd.DataFrame([[None, 1.0], [2.0, 3.0]], ... index=['cat', 'dog'], ... columns=multicol2) Note that rows where all values are missing are dropped by default but this behaviour can be controlled via the dropna keyword parameter: >>> df_multi_level_cols3 weight height kg m cat NaN 1.0 dog 2.0 3.0 >>> df_multi_level_cols3.stack(dropna=False) height weight cat kg NaN NaN m 1.0 NaN dog kg NaN 2.0 m 3.0 NaN >>> df_multi_level_cols3.stack(dropna=True) height weight cat m 1.0 NaN dog kg NaN 2.0 m 3.0 NaN """ from pandas.core.reshape.reshape import stack, stack_multiple if isinstance(level, (tuple, list)): return stack_multiple(self, level, dropna=dropna) else: return stack(self, level, dropna=dropna) def explode( self, column: Union[str, Tuple], ignore_index: bool = False ) -> DataFrame: """ Transform each element of a list-like to a row, replicating index values. .. versionadded:: 0.25.0 Parameters ---------- column : str or tuple Column to explode. ignore_index : bool, default False If True, the resulting index will be labeled 0, 1, …, n - 1. .. versionadded:: 1.1.0 Returns ------- DataFrame Exploded lists to rows of the subset columns; index will be duplicated for these rows. Raises ------ ValueError : if columns of the frame are not unique. See Also -------- DataFrame.unstack : Pivot a level of the (necessarily hierarchical) index labels. DataFrame.melt : Unpivot a DataFrame from wide format to long format. Series.explode : Explode a DataFrame from list-like columns to long format. Notes ----- This routine will explode list-likes including lists, tuples, sets, Series, and np.ndarray. The result dtype of the subset rows will be object. Scalars will be returned unchanged, and empty list-likes will result in a np.nan for that row. In addition, the ordering of rows in the output will be non-deterministic when exploding sets. Examples -------- >>> df = pd.DataFrame({'A': [[1, 2, 3], 'foo', [], [3, 4]], 'B': 1}) >>> df A B 0 [1, 2, 3] 1 1 foo 1 2 [] 1 3 [3, 4] 1 >>> df.explode('A') A B 0 1 1 0 2 1 0 3 1 1 foo 1 2 NaN 1 3 3 1 3 4 1 """ if not (is_scalar(column) or isinstance(column, tuple)): raise ValueError("column must be a scalar") if not self.columns.is_unique: raise ValueError("columns must be unique") df = self.reset_index(drop=True) # TODO: use overload to refine return type of reset_index assert df is not None # needed for mypy result = df[column].explode() result = df.drop([column], axis=1).join(result) if ignore_index: result.index = ibase.default_index(len(result)) else: result.index = self.index.take(result.index) result = result.reindex(columns=self.columns, copy=False) return result def unstack(self, level=-1, fill_value=None): """ Pivot a level of the (necessarily hierarchical) index labels. Returns a DataFrame having a new level of column labels whose inner-most level consists of the pivoted index labels. If the index is not a MultiIndex, the output will be a Series (the analogue of stack when the columns are not a MultiIndex). Parameters ---------- level : int, str, or list of these, default -1 (last level) Level(s) of index to unstack, can pass level name. fill_value : int, str or dict Replace NaN with this value if the unstack produces missing values. Returns ------- Series or DataFrame See Also -------- DataFrame.pivot : Pivot a table based on column values. DataFrame.stack : Pivot a level of the column labels (inverse operation from `unstack`). Examples -------- >>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'), ... ('two', 'a'), ('two', 'b')]) >>> s = pd.Series(np.arange(1.0, 5.0), index=index) >>> s one a 1.0 b 2.0 two a 3.0 b 4.0 dtype: float64 >>> s.unstack(level=-1) a b one 1.0 2.0 two 3.0 4.0 >>> s.unstack(level=0) one two a 1.0 3.0 b 2.0 4.0 >>> df = s.unstack(level=0) >>> df.unstack() one a 1.0 b 2.0 two a 3.0 b 4.0 dtype: float64 """ from pandas.core.reshape.reshape import unstack return unstack(self, level, fill_value) @Appender(_shared_docs["melt"] % dict(caller="df.melt(", other="melt")) def melt( self, id_vars=None, value_vars=None, var_name=None, value_name="value", col_level=None, ignore_index=True, ) -> DataFrame: return melt( self, id_vars=id_vars, value_vars=value_vars, var_name=var_name, value_name=value_name, col_level=col_level, ignore_index=ignore_index, ) # ---------------------------------------------------------------------- # Time series-related @doc( Series.diff, klass="Dataframe", extra_params="axis : {0 or 'index', 1 or 'columns'}, default 0\n " "Take difference over rows (0) or columns (1).\n", other_klass="Series", examples=dedent( """ Difference with previous row >>> df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6], ... 'b': [1, 1, 2, 3, 5, 8], ... 'c': [1, 4, 9, 16, 25, 36]}) >>> df a b c 0 1 1 1 1 2 1 4 2 3 2 9 3 4 3 16 4 5 5 25 5 6 8 36 >>> df.diff() a b c 0 NaN NaN NaN 1 1.0 0.0 3.0 2 1.0 1.0 5.0 3 1.0 1.0 7.0 4 1.0 2.0 9.0 5 1.0 3.0 11.0 Difference with previous column >>> df.diff(axis=1) a b c 0 NaN 0.0 0.0 1 NaN -1.0 3.0 2 NaN -1.0 7.0 3 NaN -1.0 13.0 4 NaN 0.0 20.0 5 NaN 2.0 28.0 Difference with 3rd previous row >>> df.diff(periods=3) a b c 0 NaN NaN NaN 1 NaN NaN NaN 2 NaN NaN NaN 3 3.0 2.0 15.0 4 3.0 4.0 21.0 5 3.0 6.0 27.0 Difference with following row >>> df.diff(periods=-1) a b c 0 -1.0 0.0 -3.0 1 -1.0 -1.0 -5.0 2 -1.0 -1.0 -7.0 3 -1.0 -2.0 -9.0 4 -1.0 -3.0 -11.0 5 NaN NaN NaN Overflow in input dtype >>> df = pd.DataFrame({'a': [1, 0]}, dtype=np.uint8) >>> df.diff() a 0 NaN 1 255.0""" ), ) def diff(self, periods: int = 1, axis: Axis = 0) -> DataFrame: bm_axis = self._get_block_manager_axis(axis) self._consolidate_inplace() if bm_axis == 0 and periods != 0: return self.T.diff(periods, axis=0).T new_data = self._mgr.diff(n=periods, axis=bm_axis) return self._constructor(new_data) # ---------------------------------------------------------------------- # Function application def _gotitem( self, key: Union[str, List[str]], ndim: int, subset: Optional[FrameOrSeriesUnion] = None, ) -> FrameOrSeriesUnion: """ Sub-classes to define. Return a sliced object. Parameters ---------- key : string / list of selections ndim : 1,2 requested ndim of result subset : object, default None subset to act on """ if subset is None: subset = self elif subset.ndim == 1: # is Series return subset # TODO: _shallow_copy(subset)? return subset[key] _agg_summary_and_see_also_doc = dedent( """ The aggregation operations are always performed over an axis, either the index (default) or the column axis. This behavior is different from `numpy` aggregation functions (`mean`, `median`, `prod`, `sum`, `std`, `var`), where the default is to compute the aggregation of the flattened array, e.g., ``numpy.mean(arr_2d)`` as opposed to ``numpy.mean(arr_2d, axis=0)``. `agg` is an alias for `aggregate`. Use the alias. See Also -------- DataFrame.apply : Perform any type of operations. DataFrame.transform : Perform transformation type operations. core.groupby.GroupBy : Perform operations over groups. core.resample.Resampler : Perform operations over resampled bins. core.window.Rolling : Perform operations over rolling window. core.window.Expanding : Perform operations over expanding window. core.window.ExponentialMovingWindow : Perform operation over exponential weighted window. """ ) _agg_examples_doc = dedent( """ Examples -------- >>> df = pd.DataFrame([[1, 2, 3], ... [4, 5, 6], ... [7, 8, 9], ... [np.nan, np.nan, np.nan]], ... columns=['A', 'B', 'C']) Aggregate these functions over the rows. >>> df.agg(['sum', 'min']) A B C sum 12.0 15.0 18.0 min 1.0 2.0 3.0 Different aggregations per column. >>> df.agg({'A' : ['sum', 'min'], 'B' : ['min', 'max']}) A B max NaN 8.0 min 1.0 2.0 sum 12.0 NaN Aggregate different functions over the columns and rename the index of the resulting DataFrame. >>> df.agg(x=('A', max), y=('B', 'min'), z=('C', np.mean)) A B C x 7.0 NaN NaN y NaN 2.0 NaN z NaN NaN 6.0 Aggregate over the columns. >>> df.agg("mean", axis="columns") 0 2.0 1 5.0 2 8.0 3 NaN dtype: float64 """ ) @doc( _shared_docs["aggregate"], klass=_shared_doc_kwargs["klass"], axis=_shared_doc_kwargs["axis"], see_also=_agg_summary_and_see_also_doc, examples=_agg_examples_doc, ) def aggregate(self, func=None, axis=0, *args, **kwargs): axis = self._get_axis_number(axis) relabeling, func, columns, order = reconstruct_func(func, **kwargs) result = None try: result, how = self._aggregate(func, axis=axis, *args, **kwargs) except TypeError as err: exc = TypeError( "DataFrame constructor called with " f"incompatible data and dtype: {err}" ) raise exc from err if result is None: return self.apply(func, axis=axis, args=args, **kwargs) if relabeling: # This is to keep the order to columns occurrence unchanged, and also # keep the order of new columns occurrence unchanged # For the return values of reconstruct_func, if relabeling is # False, columns and order will be None. assert columns is not None assert order is not None result_in_dict = relabel_result(result, func, columns, order) result = DataFrame(result_in_dict, index=columns) return result def _aggregate(self, arg, axis=0, *args, **kwargs): if axis == 1: # NDFrame.aggregate returns a tuple, and we need to transpose # only result result, how = self.T._aggregate(arg, *args, **kwargs) result = result.T if result is not None else result return result, how return super()._aggregate(arg, *args, **kwargs) agg = aggregate @doc( _shared_docs["transform"], klass=_shared_doc_kwargs["klass"], axis=_shared_doc_kwargs["axis"], ) def transform( self, func: AggFuncType, axis: Axis = 0, *args, **kwargs ) -> DataFrame: result = transform(self, func, axis, *args, **kwargs) assert isinstance(result, DataFrame) return result def apply(self, func, axis=0, raw=False, result_type=None, args=(), **kwds): """ Apply a function along an axis of the DataFrame. Objects passed to the function are Series objects whose index is either the DataFrame's index (``axis=0``) or the DataFrame's columns (``axis=1``). By default (``result_type=None``), the final return type is inferred from the return type of the applied function. Otherwise, it depends on the `result_type` argument. Parameters ---------- func : function Function to apply to each column or row. axis : {0 or 'index', 1 or 'columns'}, default 0 Axis along which the function is applied: * 0 or 'index': apply function to each column. * 1 or 'columns': apply function to each row. raw : bool, default False Determines if row or column is passed as a Series or ndarray object: * ``False`` : passes each row or column as a Series to the function. * ``True`` : the passed function will receive ndarray objects instead. If you are just applying a NumPy reduction function this will achieve much better performance. result_type : {'expand', 'reduce', 'broadcast', None}, default None These only act when ``axis=1`` (columns): * 'expand' : list-like results will be turned into columns. * 'reduce' : returns a Series if possible rather than expanding list-like results. This is the opposite of 'expand'. * 'broadcast' : results will be broadcast to the original shape of the DataFrame, the original index and columns will be retained. The default behaviour (None) depends on the return value of the applied function: list-like results will be returned as a Series of those. However if the apply function returns a Series these are expanded to columns. args : tuple Positional arguments to pass to `func` in addition to the array/series. **kwds Additional keyword arguments to pass as keywords arguments to `func`. Returns ------- Series or DataFrame Result of applying ``func`` along the given axis of the DataFrame. See Also -------- DataFrame.applymap: For elementwise operations. DataFrame.aggregate: Only perform aggregating type operations. DataFrame.transform: Only perform transforming type operations. Examples -------- >>> df = pd.DataFrame([[4, 9]] * 3, columns=['A', 'B']) >>> df A B 0 4 9 1 4 9 2 4 9 Using a numpy universal function (in this case the same as ``np.sqrt(df)``): >>> df.apply(np.sqrt) A B 0 2.0 3.0 1 2.0 3.0 2 2.0 3.0 Using a reducing function on either axis >>> df.apply(np.sum, axis=0) A 12 B 27 dtype: int64 >>> df.apply(np.sum, axis=1) 0 13 1 13 2 13 dtype: int64 Returning a list-like will result in a Series >>> df.apply(lambda x: [1, 2], axis=1) 0 [1, 2] 1 [1, 2] 2 [1, 2] dtype: object Passing ``result_type='expand'`` will expand list-like results to columns of a Dataframe >>> df.apply(lambda x: [1, 2], axis=1, result_type='expand') 0 1 0 1 2 1 1 2 2 1 2 Returning a Series inside the function is similar to passing ``result_type='expand'``. The resulting column names will be the Series index. >>> df.apply(lambda x: pd.Series([1, 2], index=['foo', 'bar']), axis=1) foo bar 0 1 2 1 1 2 2 1 2 Passing ``result_type='broadcast'`` will ensure the same shape result, whether list-like or scalar is returned by the function, and broadcast it along the axis. The resulting column names will be the originals. >>> df.apply(lambda x: [1, 2], axis=1, result_type='broadcast') A B 0 1 2 1 1 2 2 1 2 """ from pandas.core.apply import frame_apply op = frame_apply( self, func=func, axis=axis, raw=raw, result_type=result_type, args=args, kwds=kwds, ) return op.get_result() def applymap(self, func, na_action: Optional[str] = None) -> DataFrame: """ Apply a function to a Dataframe elementwise. This method applies a function that accepts and returns a scalar to every element of a DataFrame. Parameters ---------- func : callable Python function, returns a single value from a single value. na_action : {None, 'ignore'}, default None If ‘ignore’, propagate NaN values, without passing them to func. .. versionadded:: 1.2 Returns ------- DataFrame Transformed DataFrame. See Also -------- DataFrame.apply : Apply a function along input axis of DataFrame. Examples -------- >>> df = pd.DataFrame([[1, 2.12], [3.356, 4.567]]) >>> df 0 1 0 1.000 2.120 1 3.356 4.567 >>> df.applymap(lambda x: len(str(x))) 0 1 0 3 4 1 5 5 Like Series.map, NA values can be ignored: >>> df_copy = df.copy() >>> df_copy.iloc[0, 0] = pd.NA >>> df_copy.applymap(lambda x: len(str(x)), na_action='ignore') 0 1 0 <NA> 4 1 5 5 Note that a vectorized version of `func` often exists, which will be much faster. You could square each number elementwise. >>> df.applymap(lambda x: x**2) 0 1 0 1.000000 4.494400 1 11.262736 20.857489 But it's better to avoid applymap in that case. >>> df ** 2 0 1 0 1.000000 4.494400 1 11.262736 20.857489 """ if na_action not in {"ignore", None}: raise ValueError( f"na_action must be 'ignore' or None. Got {repr(na_action)}" ) ignore_na = na_action == "ignore" # if we have a dtype == 'M8[ns]', provide boxed values def infer(x): if x.empty: return lib.map_infer(x, func, ignore_na=ignore_na) return lib.map_infer(x.astype(object)._values, func, ignore_na=ignore_na) return self.apply(infer) # ---------------------------------------------------------------------- # Merging / joining methods def append( self, other, ignore_index=False, verify_integrity=False, sort=False ) -> DataFrame: """ Append rows of `other` to the end of caller, returning a new object. Columns in `other` that are not in the caller are added as new columns. Parameters ---------- other : DataFrame or Series/dict-like object, or list of these The data to append. ignore_index : bool, default False If True, the resulting axis will be labeled 0, 1, …, n - 1. verify_integrity : bool, default False If True, raise ValueError on creating index with duplicates. sort : bool, default False Sort columns if the columns of `self` and `other` are not aligned. .. versionchanged:: 1.0.0 Changed to not sort by default. Returns ------- DataFrame See Also -------- concat : General function to concatenate DataFrame or Series objects. Notes ----- If a list of dict/series is passed and the keys are all contained in the DataFrame's index, the order of the columns in the resulting DataFrame will be unchanged. Iteratively appending rows to a DataFrame can be more computationally intensive than a single concatenate. A better solution is to append those rows to a list and then concatenate the list with the original DataFrame all at once. Examples -------- >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=list('AB')) >>> df A B 0 1 2 1 3 4 >>> df2 = pd.DataFrame([[5, 6], [7, 8]], columns=list('AB')) >>> df.append(df2) A B 0 1 2 1 3 4 0 5 6 1 7 8 With `ignore_index` set to True: >>> df.append(df2, ignore_index=True) A B 0 1 2 1 3 4 2 5 6 3 7 8 The following, while not recommended methods for generating DataFrames, show two ways to generate a DataFrame from multiple data sources. Less efficient: >>> df = pd.DataFrame(columns=['A']) >>> for i in range(5): ... df = df.append({'A': i}, ignore_index=True) >>> df A 0 0 1 1 2 2 3 3 4 4 More efficient: >>> pd.concat([pd.DataFrame([i], columns=['A']) for i in range(5)], ... ignore_index=True) A 0 0 1 1 2 2 3 3 4 4 """ if isinstance(other, (Series, dict)): if isinstance(other, dict): if not ignore_index: raise TypeError("Can only append a dict if ignore_index=True") other = Series(other) if other.name is None and not ignore_index: raise TypeError( "Can only append a Series if ignore_index=True " "or if the Series has a name" ) index = Index([other.name], name=self.index.name) idx_diff = other.index.difference(self.columns) try: combined_columns = self.columns.append(idx_diff) except TypeError: combined_columns = self.columns.astype(object).append(idx_diff) other = ( other.reindex(combined_columns, copy=False) .to_frame() .T.infer_objects() .rename_axis(index.names, copy=False) ) if not self.columns.equals(combined_columns): self = self.reindex(columns=combined_columns) elif isinstance(other, list): if not other: pass elif not isinstance(other[0], DataFrame): other = DataFrame(other) if (self.columns.get_indexer(other.columns) >= 0).all(): other = other.reindex(columns=self.columns) from pandas.core.reshape.concat import concat if isinstance(other, (list, tuple)): to_concat = [self, *other] else: to_concat = [self, other] return concat( to_concat, ignore_index=ignore_index, verify_integrity=verify_integrity, sort=sort, ) def join( self, other, on=None, how="left", lsuffix="", rsuffix="", sort=False ) -> DataFrame: """ Join columns of another DataFrame. Join columns with `other` DataFrame either on index or on a key column. Efficiently join multiple DataFrame objects by index at once by passing a list. Parameters ---------- other : DataFrame, Series, or list of DataFrame Index should be similar to one of the columns in this one. If a Series is passed, its name attribute must be set, and that will be used as the column name in the resulting joined DataFrame. on : str, list of str, or array-like, optional Column or index level name(s) in the caller to join on the index in `other`, otherwise joins index-on-index. If multiple values given, the `other` DataFrame must have a MultiIndex. Can pass an array as the join key if it is not already contained in the calling DataFrame. Like an Excel VLOOKUP operation. how : {'left', 'right', 'outer', 'inner'}, default 'left' How to handle the operation of the two objects. * left: use calling frame's index (or column if on is specified) * right: use `other`'s index. * outer: form union of calling frame's index (or column if on is specified) with `other`'s index, and sort it. lexicographically. * inner: form intersection of calling frame's index (or column if on is specified) with `other`'s index, preserving the order of the calling's one. lsuffix : str, default '' Suffix to use from left frame's overlapping columns. rsuffix : str, default '' Suffix to use from right frame's overlapping columns. sort : bool, default False Order result DataFrame lexicographically by the join key. If False, the order of the join key depends on the join type (how keyword). Returns ------- DataFrame A dataframe containing columns from both the caller and `other`. See Also -------- DataFrame.merge : For column(s)-on-columns(s) operations. Notes ----- Parameters `on`, `lsuffix`, and `rsuffix` are not supported when passing a list of `DataFrame` objects. Support for specifying index levels as the `on` parameter was added in version 0.23.0. Examples -------- >>> df = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3', 'K4', 'K5'], ... 'A': ['A0', 'A1', 'A2', 'A3', 'A4', 'A5']}) >>> df key A 0 K0 A0 1 K1 A1 2 K2 A2 3 K3 A3 4 K4 A4 5 K5 A5 >>> other = pd.DataFrame({'key': ['K0', 'K1', 'K2'], ... 'B': ['B0', 'B1', 'B2']}) >>> other key B 0 K0 B0 1 K1 B1 2 K2 B2 Join DataFrames using their indexes. >>> df.join(other, lsuffix='_caller', rsuffix='_other') key_caller A key_other B 0 K0 A0 K0 B0 1 K1 A1 K1 B1 2 K2 A2 K2 B2 3 K3 A3 NaN NaN 4 K4 A4 NaN NaN 5 K5 A5 NaN NaN If we want to join using the key columns, we need to set key to be the index in both `df` and `other`. The joined DataFrame will have key as its index. >>> df.set_index('key').join(other.set_index('key')) A B key K0 A0 B0 K1 A1 B1 K2 A2 B2 K3 A3 NaN K4 A4 NaN K5 A5 NaN Another option to join using the key columns is to use the `on` parameter. DataFrame.join always uses `other`'s index but we can use any column in `df`. This method preserves the original DataFrame's index in the result. >>> df.join(other.set_index('key'), on='key') key A B 0 K0 A0 B0 1 K1 A1 B1 2 K2 A2 B2 3 K3 A3 NaN 4 K4 A4 NaN 5 K5 A5 NaN """ return self._join_compat( other, on=on, how=how, lsuffix=lsuffix, rsuffix=rsuffix, sort=sort ) def _join_compat( self, other, on=None, how="left", lsuffix="", rsuffix="", sort=False ): from pandas.core.reshape.concat import concat from pandas.core.reshape.merge import merge if isinstance(other, Series): if other.name is None: raise ValueError("Other Series must have a name") other = DataFrame({other.name: other}) if isinstance(other, DataFrame): return merge( self, other, left_on=on, how=how, left_index=on is None, right_index=True, suffixes=(lsuffix, rsuffix), sort=sort, ) else: if on is not None: raise ValueError( "Joining multiple DataFrames only supported for joining on index" ) frames = [self] + list(other) can_concat = all(df.index.is_unique for df in frames) # join indexes only using concat if can_concat: if how == "left": res = concat( frames, axis=1, join="outer", verify_integrity=True, sort=sort ) return res.reindex(self.index, copy=False) else: return concat( frames, axis=1, join=how, verify_integrity=True, sort=sort ) joined = frames[0] for frame in frames[1:]: joined = merge( joined, frame, how=how, left_index=True, right_index=True ) return joined @Substitution("") @Appender(_merge_doc, indents=2) def merge( self, right, how="inner", on=None, left_on=None, right_on=None, left_index=False, right_index=False, sort=False, suffixes=("_x", "_y"), copy=True, indicator=False, validate=None, ) -> DataFrame: from pandas.core.reshape.merge import merge return merge( self, right, how=how, on=on, left_on=left_on, right_on=right_on, left_index=left_index, right_index=right_index, sort=sort, suffixes=suffixes, copy=copy, indicator=indicator, validate=validate, ) def round(self, decimals=0, *args, **kwargs) -> DataFrame: """ Round a DataFrame to a variable number of decimal places. Parameters ---------- decimals : int, dict, Series Number of decimal places to round each column to. If an int is given, round each column to the same number of places. Otherwise dict and Series round to variable numbers of places. Column names should be in the keys if `decimals` is a dict-like, or in the index if `decimals` is a Series. Any columns not included in `decimals` will be left as is. Elements of `decimals` which are not columns of the input will be ignored. *args Additional keywords have no effect but might be accepted for compatibility with numpy. **kwargs Additional keywords have no effect but might be accepted for compatibility with numpy. Returns ------- DataFrame A DataFrame with the affected columns rounded to the specified number of decimal places. See Also -------- numpy.around : Round a numpy array to the given number of decimals. Series.round : Round a Series to the given number of decimals. Examples -------- >>> df = pd.DataFrame([(.21, .32), (.01, .67), (.66, .03), (.21, .18)], ... columns=['dogs', 'cats']) >>> df dogs cats 0 0.21 0.32 1 0.01 0.67 2 0.66 0.03 3 0.21 0.18 By providing an integer each column is rounded to the same number of decimal places >>> df.round(1) dogs cats 0 0.2 0.3 1 0.0 0.7 2 0.7 0.0 3 0.2 0.2 With a dict, the number of places for specific columns can be specified with the column names as key and the number of decimal places as value >>> df.round({'dogs': 1, 'cats': 0}) dogs cats 0 0.2 0.0 1 0.0 1.0 2 0.7 0.0 3 0.2 0.0 Using a Series, the number of places for specific columns can be specified with the column names as index and the number of decimal places as value >>> decimals = pd.Series([0, 1], index=['cats', 'dogs']) >>> df.round(decimals) dogs cats 0 0.2 0.0 1 0.0 1.0 2 0.7 0.0 3 0.2 0.0 """ from pandas.core.reshape.concat import concat def _dict_round(df, decimals): for col, vals in df.items(): try: yield _series_round(vals, decimals[col]) except KeyError: yield vals def _series_round(s, decimals): if is_integer_dtype(s) or is_float_dtype(s): return s.round(decimals) return s nv.validate_round(args, kwargs) if isinstance(decimals, (dict, Series)): if isinstance(decimals, Series): if not decimals.index.is_unique: raise ValueError("Index of decimals must be unique") new_cols = list(_dict_round(self, decimals)) elif is_integer(decimals): # Dispatch to Series.round new_cols = [_series_round(v, decimals) for _, v in self.items()] else: raise TypeError("decimals must be an integer, a dict-like or a Series") if len(new_cols) > 0: return self._constructor( concat(new_cols, axis=1), index=self.index, columns=self.columns ) else: return self # ---------------------------------------------------------------------- # Statistical methods, etc. def corr(self, method="pearson", min_periods=1) -> DataFrame: """ Compute pairwise correlation of columns, excluding NA/null values. Parameters ---------- method : {'pearson', 'kendall', 'spearman'} or callable Method of correlation: * pearson : standard correlation coefficient * kendall : Kendall Tau correlation coefficient * spearman : Spearman rank correlation * callable: callable with input two 1d ndarrays and returning a float. Note that the returned matrix from corr will have 1 along the diagonals and will be symmetric regardless of the callable's behavior. .. versionadded:: 0.24.0 min_periods : int, optional Minimum number of observations required per pair of columns to have a valid result. Currently only available for Pearson and Spearman correlation. Returns ------- DataFrame Correlation matrix. See Also -------- DataFrame.corrwith : Compute pairwise correlation with another DataFrame or Series. Series.corr : Compute the correlation between two Series. Examples -------- >>> def histogram_intersection(a, b): ... v = np.minimum(a, b).sum().round(decimals=1) ... return v >>> df = pd.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)], ... columns=['dogs', 'cats']) >>> df.corr(method=histogram_intersection) dogs cats dogs 1.0 0.3 cats 0.3 1.0 """ numeric_df = self._get_numeric_data() cols = numeric_df.columns idx = cols.copy() mat = numeric_df.to_numpy(dtype=float, na_value=np.nan, copy=False) if method == "pearson": correl = libalgos.nancorr(mat, minp=min_periods) elif method == "spearman": correl = libalgos.nancorr_spearman(mat, minp=min_periods) elif method == "kendall" or callable(method): if min_periods is None: min_periods = 1 mat = mat.T corrf = nanops.get_corr_func(method) K = len(cols) correl = np.empty((K, K), dtype=float) mask = np.isfinite(mat) for i, ac in enumerate(mat): for j, bc in enumerate(mat): if i > j: continue valid = mask[i] & mask[j] if valid.sum() < min_periods: c = np.nan elif i == j: c = 1.0 elif not valid.all(): c = corrf(ac[valid], bc[valid]) else: c = corrf(ac, bc) correl[i, j] = c correl[j, i] = c else: raise ValueError( "method must be either 'pearson', " "'spearman', 'kendall', or a callable, " f"'{method}' was supplied" ) return self._constructor(correl, index=idx, columns=cols) def cov( self, min_periods: Optional[int] = None, ddof: Optional[int] = 1 ) -> DataFrame: """ Compute pairwise covariance of columns, excluding NA/null values. Compute the pairwise covariance among the series of a DataFrame. The returned data frame is the `covariance matrix <https://en.wikipedia.org/wiki/Covariance_matrix>`__ of the columns of the DataFrame. Both NA and null values are automatically excluded from the calculation. (See the note below about bias from missing values.) A threshold can be set for the minimum number of observations for each value created. Comparisons with observations below this threshold will be returned as ``NaN``. This method is generally used for the analysis of time series data to understand the relationship between different measures across time. Parameters ---------- min_periods : int, optional Minimum number of observations required per pair of columns to have a valid result. ddof : int, default 1 Delta degrees of freedom. The divisor used in calculations is ``N - ddof``, where ``N`` represents the number of elements. .. versionadded:: 1.1.0 Returns ------- DataFrame The covariance matrix of the series of the DataFrame. See Also -------- Series.cov : Compute covariance with another Series. core.window.ExponentialMovingWindow.cov: Exponential weighted sample covariance. core.window.Expanding.cov : Expanding sample covariance. core.window.Rolling.cov : Rolling sample covariance. Notes ----- Returns the covariance matrix of the DataFrame's time series. The covariance is normalized by N-ddof. For DataFrames that have Series that are missing data (assuming that data is `missing at random <https://en.wikipedia.org/wiki/Missing_data#Missing_at_random>`__) the returned covariance matrix will be an unbiased estimate of the variance and covariance between the member Series. However, for many applications this estimate may not be acceptable because the estimate covariance matrix is not guaranteed to be positive semi-definite. This could lead to estimate correlations having absolute values which are greater than one, and/or a non-invertible covariance matrix. See `Estimation of covariance matrices <https://en.wikipedia.org/w/index.php?title=Estimation_of_covariance_ matrices>`__ for more details. Examples -------- >>> df = pd.DataFrame([(1, 2), (0, 3), (2, 0), (1, 1)], ... columns=['dogs', 'cats']) >>> df.cov() dogs cats dogs 0.666667 -1.000000 cats -1.000000 1.666667 >>> np.random.seed(42) >>> df = pd.DataFrame(np.random.randn(1000, 5), ... columns=['a', 'b', 'c', 'd', 'e']) >>> df.cov() a b c d e a 0.998438 -0.020161 0.059277 -0.008943 0.014144 b -0.020161 1.059352 -0.008543 -0.024738 0.009826 c 0.059277 -0.008543 1.010670 -0.001486 -0.000271 d -0.008943 -0.024738 -0.001486 0.921297 -0.013692 e 0.014144 0.009826 -0.000271 -0.013692 0.977795 **Minimum number of periods** This method also supports an optional ``min_periods`` keyword that specifies the required minimum number of non-NA observations for each column pair in order to have a valid result: >>> np.random.seed(42) >>> df = pd.DataFrame(np.random.randn(20, 3), ... columns=['a', 'b', 'c']) >>> df.loc[df.index[:5], 'a'] = np.nan >>> df.loc[df.index[5:10], 'b'] = np.nan >>> df.cov(min_periods=12) a b c a 0.316741 NaN -0.150812 b NaN 1.248003 0.191417 c -0.150812 0.191417 0.895202 """ numeric_df = self._get_numeric_data() cols = numeric_df.columns idx = cols.copy() mat = numeric_df.to_numpy(dtype=float, na_value=np.nan, copy=False) if notna(mat).all(): if min_periods is not None and min_periods > len(mat): base_cov = np.empty((mat.shape[1], mat.shape[1])) base_cov.fill(np.nan) else: base_cov = np.cov(mat.T, ddof=ddof) base_cov = base_cov.reshape((len(cols), len(cols))) else: base_cov = libalgos.nancorr(mat, cov=True, minp=min_periods) return self._constructor(base_cov, index=idx, columns=cols) def corrwith(self, other, axis=0, drop=False, method="pearson") -> Series: """ Compute pairwise correlation. Pairwise correlation is computed between rows or columns of DataFrame with rows or columns of Series or DataFrame. DataFrames are first aligned along both axes before computing the correlations. Parameters ---------- other : DataFrame, Series Object with which to compute correlations. axis : {0 or 'index', 1 or 'columns'}, default 0 The axis to use. 0 or 'index' to compute column-wise, 1 or 'columns' for row-wise. drop : bool, default False Drop missing indices from result. method : {'pearson', 'kendall', 'spearman'} or callable Method of correlation: * pearson : standard correlation coefficient * kendall : Kendall Tau correlation coefficient * spearman : Spearman rank correlation * callable: callable with input two 1d ndarrays and returning a float. .. versionadded:: 0.24.0 Returns ------- Series Pairwise correlations. See Also -------- DataFrame.corr : Compute pairwise correlation of columns. """ axis = self._get_axis_number(axis) this = self._get_numeric_data() if isinstance(other, Series): return this.apply(lambda x: other.corr(x, method=method), axis=axis) other = other._get_numeric_data() left, right = this.align(other, join="inner", copy=False) if axis == 1: left = left.T right = right.T if method == "pearson": # mask missing values left = left + right * 0 right = right + left * 0 # demeaned data ldem = left - left.mean() rdem = right - right.mean() num = (ldem * rdem).sum() dom = (left.count() - 1) * left.std() * right.std() correl = num / dom elif method in ["kendall", "spearman"] or callable(method): def c(x): return nanops.nancorr(x[0], x[1], method=method) correl = self._constructor_sliced( map(c, zip(left.values.T, right.values.T)), index=left.columns ) else: raise ValueError( f"Invalid method {method} was passed, " "valid methods are: 'pearson', 'kendall', " "'spearman', or callable" ) if not drop: # Find non-matching labels along the given axis # and append missing correlations (GH 22375) raxis = 1 if axis == 0 else 0 result_index = this._get_axis(raxis).union(other._get_axis(raxis)) idx_diff = result_index.difference(correl.index) if len(idx_diff) > 0: correl = correl.append(Series([np.nan] * len(idx_diff), index=idx_diff)) return correl # ---------------------------------------------------------------------- # ndarray-like stats methods def count(self, axis=0, level=None, numeric_only=False): """ Count non-NA cells for each column or row. The values `None`, `NaN`, `NaT`, and optionally `numpy.inf` (depending on `pandas.options.mode.use_inf_as_na`) are considered NA. Parameters ---------- axis : {0 or 'index', 1 or 'columns'}, default 0 If 0 or 'index' counts are generated for each column. If 1 or 'columns' counts are generated for each row. level : int or str, optional If the axis is a `MultiIndex` (hierarchical), count along a particular `level`, collapsing into a `DataFrame`. A `str` specifies the level name. numeric_only : bool, default False Include only `float`, `int` or `boolean` data. Returns ------- Series or DataFrame For each column/row the number of non-NA/null entries. If `level` is specified returns a `DataFrame`. See Also -------- Series.count: Number of non-NA elements in a Series. DataFrame.shape: Number of DataFrame rows and columns (including NA elements). DataFrame.isna: Boolean same-sized DataFrame showing places of NA elements. Examples -------- Constructing DataFrame from a dictionary: >>> df = pd.DataFrame({"Person": ... ["John", "Myla", "Lewis", "John", "Myla"], ... "Age": [24., np.nan, 21., 33, 26], ... "Single": [False, True, True, True, False]}) >>> df Person Age Single 0 John 24.0 False 1 Myla NaN True 2 Lewis 21.0 True 3 John 33.0 True 4 Myla 26.0 False Notice the uncounted NA values: >>> df.count() Person 5 Age 4 Single 5 dtype: int64 Counts for each **row**: >>> df.count(axis='columns') 0 3 1 2 2 3 3 3 4 3 dtype: int64 Counts for one level of a `MultiIndex`: >>> df.set_index(["Person", "Single"]).count(level="Person") Age Person John 2 Lewis 1 Myla 1 """ axis = self._get_axis_number(axis) if level is not None: return self._count_level(level, axis=axis, numeric_only=numeric_only) if numeric_only: frame = self._get_numeric_data() else: frame = self # GH #423 if len(frame._get_axis(axis)) == 0: result = self._constructor_sliced(0, index=frame._get_agg_axis(axis)) else: if frame._is_mixed_type or frame._mgr.any_extension_types: # the or any_extension_types is really only hit for single- # column frames with an extension array result = notna(frame).sum(axis=axis) else: # GH13407 series_counts = notna(frame).sum(axis=axis) counts = series_counts.values result = self._constructor_sliced( counts, index=frame._get_agg_axis(axis) ) return result.astype("int64") def _count_level(self, level, axis=0, numeric_only=False): if numeric_only: frame = self._get_numeric_data() else: frame = self count_axis = frame._get_axis(axis) agg_axis = frame._get_agg_axis(axis) if not isinstance(count_axis, MultiIndex): raise TypeError( f"Can only count levels on hierarchical {self._get_axis_name(axis)}." ) # Mask NaNs: Mask rows or columns where the index level is NaN, and all # values in the DataFrame that are NaN if frame._is_mixed_type: # Since we have mixed types, calling notna(frame.values) might # upcast everything to object values_mask = notna(frame).values else: # But use the speedup when we have homogeneous dtypes values_mask = notna(frame.values) index_mask = notna(count_axis.get_level_values(level=level)) if axis == 1: mask = index_mask & values_mask else: mask = index_mask.reshape(-1, 1) & values_mask if isinstance(level, str): level = count_axis._get_level_number(level) level_name = count_axis._names[level] level_index = count_axis.levels[level]._shallow_copy(name=level_name) level_codes = ensure_int64(count_axis.codes[level]) counts = lib.count_level_2d(mask, level_codes, len(level_index), axis=axis) if axis == 1: result = self._constructor(counts, index=agg_axis, columns=level_index) else: result = self._constructor(counts, index=level_index, columns=agg_axis) return result def _reduce( self, op, name: str, axis=0, skipna=True, numeric_only=None, filter_type=None, **kwds, ): assert filter_type is None or filter_type == "bool", filter_type dtype_is_dt = np.array( [ is_datetime64_any_dtype(values.dtype) for values in self._iter_column_arrays() ], dtype=bool, ) if numeric_only is None and name in ["mean", "median"] and dtype_is_dt.any(): warnings.warn( "DataFrame.mean and DataFrame.median with numeric_only=None " "will include datetime64 and datetime64tz columns in a " "future version.", FutureWarning, stacklevel=3, ) cols = self.columns[~dtype_is_dt] self = self[cols] # TODO: Make other agg func handle axis=None properly axis = self._get_axis_number(axis) labels = self._get_agg_axis(axis) constructor = self._constructor assert axis in [0, 1] def func(values): if is_extension_array_dtype(values.dtype): return extract_array(values)._reduce(name, skipna=skipna, **kwds) else: return op(values, axis=axis, skipna=skipna, **kwds) def _get_data() -> DataFrame: if filter_type is None: data = self._get_numeric_data() elif filter_type == "bool": # GH#25101, GH#24434 data = self._get_bool_data() else: # pragma: no cover msg = ( f"Generating numeric_only data with filter_type {filter_type} " "not supported." ) raise NotImplementedError(msg) return data if numeric_only is not None: df = self if numeric_only is True: df = _get_data() if axis == 1: df = df.T axis = 0 out_dtype = "bool" if filter_type == "bool" else None def blk_func(values): if isinstance(values, ExtensionArray): return values._reduce(name, skipna=skipna, **kwds) else: return op(values, axis=1, skipna=skipna, **kwds) # After possibly _get_data and transposing, we are now in the # simple case where we can use BlockManager.reduce res = df._mgr.reduce(blk_func) out = df._constructor(res).iloc[0].rename(None) if out_dtype is not None: out = out.astype(out_dtype) if axis == 0 and is_object_dtype(out.dtype): out[:] = coerce_to_dtypes(out.values, df.dtypes) return out assert numeric_only is None if not self._is_homogeneous_type or self._mgr.any_extension_types: # try to avoid self.values call if filter_type is None and axis == 0 and len(self) > 0: # operate column-wise # numeric_only must be None here, as other cases caught above # require len(self) > 0 bc frame_apply messes up empty prod/sum # this can end up with a non-reduction # but not always. if the types are mixed # with datelike then need to make sure a series # we only end up here if we have not specified # numeric_only and yet we have tried a # column-by-column reduction, where we have mixed type. # So let's just do what we can from pandas.core.apply import frame_apply opa = frame_apply( self, func=func, result_type="expand", ignore_failures=True ) result = opa.get_result() if result.ndim == self.ndim: result = result.iloc[0].rename(None) return result data = self values = data.values try: result = func(values) except TypeError: # e.g. in nanops trying to convert strs to float data = _get_data() labels = data._get_agg_axis(axis) values = data.values with np.errstate(all="ignore"): result = func(values) if is_object_dtype(result.dtype): try: if filter_type is None: result = result.astype(np.float64) elif filter_type == "bool" and notna(result).all(): result = result.astype(np.bool_) except (ValueError, TypeError): # try to coerce to the original dtypes item by item if we can if axis == 0: result = coerce_to_dtypes(result, data.dtypes) if constructor is not None: result = self._constructor_sliced(result, index=labels) return result def nunique(self, axis=0, dropna=True) -> Series: """ Count distinct observations over requested axis. Return Series with number of distinct observations. Can ignore NaN values. Parameters ---------- axis : {0 or 'index', 1 or 'columns'}, default 0 The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise. dropna : bool, default True Don't include NaN in the counts. Returns ------- Series See Also -------- Series.nunique: Method nunique for Series. DataFrame.count: Count non-NA cells for each column or row. Examples -------- >>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [1, 1, 1]}) >>> df.nunique() A 3 B 1 dtype: int64 >>> df.nunique(axis=1) 0 1 1 2 2 2 dtype: int64 """ return self.apply(Series.nunique, axis=axis, dropna=dropna) def idxmin(self, axis=0, skipna=True) -> Series: """ Return index of first occurrence of minimum over requested axis. NA/null values are excluded. Parameters ---------- axis : {0 or 'index', 1 or 'columns'}, default 0 The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise. skipna : bool, default True Exclude NA/null values. If an entire row/column is NA, the result will be NA. Returns ------- Series Indexes of minima along the specified axis. Raises ------ ValueError * If the row/column is empty See Also -------- Series.idxmin : Return index of the minimum element. Notes ----- This method is the DataFrame version of ``ndarray.argmin``. Examples -------- Consider a dataset containing food consumption in Argentina. >>> df = pd.DataFrame({'consumption': [10.51, 103.11, 55.48], ... 'co2_emissions': [37.2, 19.66, 1712]}, ... index=['Pork', 'Wheat Products', 'Beef']) >>> df consumption co2_emissions Pork 10.51 37.20 Wheat Products 103.11 19.66 Beef 55.48 1712.00 By default, it returns the index for the minimum value in each column. >>> df.idxmin() consumption Pork co2_emissions Wheat Products dtype: object To return the index for the minimum value in each row, use ``axis="columns"``. >>> df.idxmin(axis="columns") Pork consumption Wheat Products co2_emissions Beef consumption dtype: object """ axis = self._get_axis_number(axis) indices = nanops.nanargmin(self.values, axis=axis, skipna=skipna) # indices will always be np.ndarray since axis is not None and # values is a 2d array for DataFrame # error: Item "int" of "Union[int, Any]" has no attribute "__iter__" assert isinstance(indices, np.ndarray) # for mypy index = self._get_axis(axis) result = [index[i] if i >= 0 else np.nan for i in indices] return self._constructor_sliced(result, index=self._get_agg_axis(axis)) def idxmax(self, axis=0, skipna=True) -> Series: """ Return index of first occurrence of maximum over requested axis. NA/null values are excluded. Parameters ---------- axis : {0 or 'index', 1 or 'columns'}, default 0 The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise. skipna : bool, default True Exclude NA/null values. If an entire row/column is NA, the result will be NA. Returns ------- Series Indexes of maxima along the specified axis. Raises ------ ValueError * If the row/column is empty See Also -------- Series.idxmax : Return index of the maximum element. Notes ----- This method is the DataFrame version of ``ndarray.argmax``. Examples -------- Consider a dataset containing food consumption in Argentina. >>> df = pd.DataFrame({'consumption': [10.51, 103.11, 55.48], ... 'co2_emissions': [37.2, 19.66, 1712]}, ... index=['Pork', 'Wheat Products', 'Beef']) >>> df consumption co2_emissions Pork 10.51 37.20 Wheat Products 103.11 19.66 Beef 55.48 1712.00 By default, it returns the index for the maximum value in each column. >>> df.idxmax() consumption Wheat Products co2_emissions Beef dtype: object To return the index for the maximum value in each row, use ``axis="columns"``. >>> df.idxmax(axis="columns") Pork co2_emissions Wheat Products consumption Beef co2_emissions dtype: object """ axis = self._get_axis_number(axis) indices = nanops.nanargmax(self.values, axis=axis, skipna=skipna) # indices will always be np.ndarray since axis is not None and # values is a 2d array for DataFrame # error: Item "int" of "Union[int, Any]" has no attribute "__iter__" assert isinstance(indices, np.ndarray) # for mypy index = self._get_axis(axis) result = [index[i] if i >= 0 else np.nan for i in indices] return self._constructor_sliced(result, index=self._get_agg_axis(axis)) def _get_agg_axis(self, axis_num: int) -> Index: """ Let's be explicit about this. """ if axis_num == 0: return self.columns elif axis_num == 1: return self.index else: raise ValueError(f"Axis must be 0 or 1 (got {repr(axis_num)})") def mode(self, axis=0, numeric_only=False, dropna=True) -> DataFrame: """ Get the mode(s) of each element along the selected axis. The mode of a set of values is the value that appears most often. It can be multiple values. Parameters ---------- axis : {0 or 'index', 1 or 'columns'}, default 0 The axis to iterate over while searching for the mode: * 0 or 'index' : get mode of each column * 1 or 'columns' : get mode of each row. numeric_only : bool, default False If True, only apply to numeric columns. dropna : bool, default True Don't consider counts of NaN/NaT. .. versionadded:: 0.24.0 Returns ------- DataFrame The modes of each column or row. See Also -------- Series.mode : Return the highest frequency value in a Series. Series.value_counts : Return the counts of values in a Series. Examples -------- >>> df = pd.DataFrame([('bird', 2, 2), ... ('mammal', 4, np.nan), ... ('arthropod', 8, 0), ... ('bird', 2, np.nan)], ... index=('falcon', 'horse', 'spider', 'ostrich'), ... columns=('species', 'legs', 'wings')) >>> df species legs wings falcon bird 2 2.0 horse mammal 4 NaN spider arthropod 8 0.0 ostrich bird 2 NaN By default, missing values are not considered, and the mode of wings are both 0 and 2. The second row of species and legs contains ``NaN``, because they have only one mode, but the DataFrame has two rows. >>> df.mode() species legs wings 0 bird 2.0 0.0 1 NaN NaN 2.0 Setting ``dropna=False`` ``NaN`` values are considered and they can be the mode (like for wings). >>> df.mode(dropna=False) species legs wings 0 bird 2 NaN Setting ``numeric_only=True``, only the mode of numeric columns is computed, and columns of other types are ignored. >>> df.mode(numeric_only=True) legs wings 0 2.0 0.0 1 NaN 2.0 To compute the mode over columns and not rows, use the axis parameter: >>> df.mode(axis='columns', numeric_only=True) 0 1 falcon 2.0 NaN horse 4.0 NaN spider 0.0 8.0 ostrich 2.0 NaN """ data = self if not numeric_only else self._get_numeric_data() def f(s): return s.mode(dropna=dropna) return data.apply(f, axis=axis) def quantile(self, q=0.5, axis=0, numeric_only=True, interpolation="linear"): """ Return values at the given quantile over requested axis. Parameters ---------- q : float or array-like, default 0.5 (50% quantile) Value between 0 <= q <= 1, the quantile(s) to compute. axis : {0, 1, 'index', 'columns'}, default 0 Equals 0 or 'index' for row-wise, 1 or 'columns' for column-wise. numeric_only : bool, default True If False, the quantile of datetime and timedelta data will be computed as well. interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} This optional parameter specifies the interpolation method to use, when the desired quantile lies between two data points `i` and `j`: * linear: `i + (j - i) * fraction`, where `fraction` is the fractional part of the index surrounded by `i` and `j`. * lower: `i`. * higher: `j`. * nearest: `i` or `j` whichever is nearest. * midpoint: (`i` + `j`) / 2. Returns ------- Series or DataFrame If ``q`` is an array, a DataFrame will be returned where the index is ``q``, the columns are the columns of self, and the values are the quantiles. If ``q`` is a float, a Series will be returned where the index is the columns of self and the values are the quantiles. See Also -------- core.window.Rolling.quantile: Rolling quantile. numpy.percentile: Numpy function to compute the percentile. Examples -------- >>> df = pd.DataFrame(np.array([[1, 1], [2, 10], [3, 100], [4, 100]]), ... columns=['a', 'b']) >>> df.quantile(.1) a 1.3 b 3.7 Name: 0.1, dtype: float64 >>> df.quantile([.1, .5]) a b 0.1 1.3 3.7 0.5 2.5 55.0 Specifying `numeric_only=False` will also compute the quantile of datetime and timedelta data. >>> df = pd.DataFrame({'A': [1, 2], ... 'B': [pd.Timestamp('2010'), ... pd.Timestamp('2011')], ... 'C': [pd.Timedelta('1 days'), ... pd.Timedelta('2 days')]}) >>> df.quantile(0.5, numeric_only=False) A 1.5 B 2010-07-02 12:00:00 C 1 days 12:00:00 Name: 0.5, dtype: object """ validate_percentile(q) data = self._get_numeric_data() if numeric_only else self axis = self._get_axis_number(axis) is_transposed = axis == 1 if is_transposed: data = data.T if len(data.columns) == 0: # GH#23925 _get_numeric_data may have dropped all columns cols = Index([], name=self.columns.name) if is_list_like(q): return self._constructor([], index=q, columns=cols) return self._constructor_sliced([], index=cols, name=q, dtype=np.float64) result = data._mgr.quantile( qs=q, axis=1, interpolation=interpolation, transposed=is_transposed ) if result.ndim == 2: result = self._constructor(result) else: result = self._constructor_sliced(result, name=q) if is_transposed: result = result.T return result def to_timestamp( self, freq=None, how: str = "start", axis: Axis = 0, copy: bool = True ) -> DataFrame: """ Cast to DatetimeIndex of timestamps, at *beginning* of period. Parameters ---------- freq : str, default frequency of PeriodIndex Desired frequency. how : {'s', 'e', 'start', 'end'} Convention for converting period to timestamp; start of period vs. end. axis : {0 or 'index', 1 or 'columns'}, default 0 The axis to convert (the index by default). copy : bool, default True If False then underlying input data is not copied. Returns ------- DataFrame with DatetimeIndex """ new_obj = self.copy(deep=copy) axis_name = self._get_axis_name(axis) old_ax = getattr(self, axis_name) new_ax = old_ax.to_timestamp(freq=freq, how=how) setattr(new_obj, axis_name, new_ax) return new_obj def to_period(self, freq=None, axis: Axis = 0, copy: bool = True) -> DataFrame: """ Convert DataFrame from DatetimeIndex to PeriodIndex. Convert DataFrame from DatetimeIndex to PeriodIndex with desired frequency (inferred from index if not passed). Parameters ---------- freq : str, default Frequency of the PeriodIndex. axis : {0 or 'index', 1 or 'columns'}, default 0 The axis to convert (the index by default). copy : bool, default True If False then underlying input data is not copied. Returns ------- DataFrame with PeriodIndex """ new_obj = self.copy(deep=copy) axis_name = self._get_axis_name(axis) old_ax = getattr(self, axis_name) new_ax = old_ax.to_period(freq=freq) setattr(new_obj, axis_name, new_ax) return new_obj def isin(self, values) -> DataFrame: """ Whether each element in the DataFrame is contained in values. Parameters ---------- values : iterable, Series, DataFrame or dict The result will only be true at a location if all the labels match. If `values` is a Series, that's the index. If `values` is a dict, the keys must be the column names, which must match. If `values` is a DataFrame, then both the index and column labels must match. Returns ------- DataFrame DataFrame of booleans showing whether each element in the DataFrame is contained in values. See Also -------- DataFrame.eq: Equality test for DataFrame. Series.isin: Equivalent method on Series. Series.str.contains: Test if pattern or regex is contained within a string of a Series or Index. Examples -------- >>> df = pd.DataFrame({'num_legs': [2, 4], 'num_wings': [2, 0]}, ... index=['falcon', 'dog']) >>> df num_legs num_wings falcon 2 2 dog 4 0 When ``values`` is a list check whether every value in the DataFrame is present in the list (which animals have 0 or 2 legs or wings) >>> df.isin([0, 2]) num_legs num_wings falcon True True dog False True When ``values`` is a dict, we can pass values to check for each column separately: >>> df.isin({'num_wings': [0, 3]}) num_legs num_wings falcon False False dog False True When ``values`` is a Series or DataFrame the index and column must match. Note that 'falcon' does not match based on the number of legs in df2. >>> other = pd.DataFrame({'num_legs': [8, 2], 'num_wings': [0, 2]}, ... index=['spider', 'falcon']) >>> df.isin(other) num_legs num_wings falcon True True dog False False """ if isinstance(values, dict): from pandas.core.reshape.concat import concat values = collections.defaultdict(list, values) return concat( ( self.iloc[:, [i]].isin(values[col]) for i, col in enumerate(self.columns) ), axis=1, ) elif isinstance(values, Series): if not values.index.is_unique: raise ValueError("cannot compute isin with a duplicate axis.") return self.eq(values.reindex_like(self), axis="index") elif isinstance(values, DataFrame): if not (values.columns.is_unique and values.index.is_unique): raise ValueError("cannot compute isin with a duplicate axis.") return self.eq(values.reindex_like(self)) else: if not is_list_like(values): raise TypeError( "only list-like or dict-like objects are allowed " "to be passed to DataFrame.isin(), " f"you passed a '{type(values).__name__}'" ) return self._constructor( algorithms.isin(self.values.ravel(), values).reshape(self.shape), self.index, self.columns, ) # ---------------------------------------------------------------------- # Add index and columns _AXIS_ORDERS = ["index", "columns"] _AXIS_TO_AXIS_NUMBER: Dict[Axis, int] = { **NDFrame._AXIS_TO_AXIS_NUMBER, 1: 1, "columns": 1, } _AXIS_REVERSED = True _AXIS_LEN = len(_AXIS_ORDERS) _info_axis_number = 1 _info_axis_name = "columns" index: Index = properties.AxisProperty( axis=1, doc="The index (row labels) of the DataFrame." ) columns: Index = properties.AxisProperty( axis=0, doc="The column labels of the DataFrame." ) @property def _AXIS_NUMBERS(self) -> Dict[str, int]: """.. deprecated:: 1.1.0""" super()._AXIS_NUMBERS return {"index": 0, "columns": 1} @property def _AXIS_NAMES(self) -> Dict[int, str]: """.. deprecated:: 1.1.0""" super()._AXIS_NAMES return {0: "index", 1: "columns"} # ---------------------------------------------------------------------- # Add plotting methods to DataFrame plot = CachedAccessor("plot", pandas.plotting.PlotAccessor) hist = pandas.plotting.hist_frame boxplot = pandas.plotting.boxplot_frame sparse = CachedAccessor("sparse", SparseFrameAccessor) DataFrame._add_numeric_operations() ops.add_flex_arithmetic_methods(DataFrame) ops.add_special_arithmetic_methods(DataFrame) def _from_nested_dict(data): new_data = collections.defaultdict(dict) for index, s in data.items(): for col, v in s.items(): new_data[col][index] = v return new_data
bsd-3-clause
massmutual/scikit-learn
sklearn/manifold/setup.py
99
1243
import os from os.path import join import numpy from numpy.distutils.misc_util import Configuration from sklearn._build_utils import get_blas_info def configuration(parent_package="", top_path=None): config = Configuration("manifold", parent_package, top_path) libraries = [] if os.name == 'posix': libraries.append('m') config.add_extension("_utils", sources=["_utils.c"], include_dirs=[numpy.get_include()], libraries=libraries, extra_compile_args=["-O3"]) cblas_libs, blas_info = get_blas_info() eca = blas_info.pop('extra_compile_args', []) eca.append("-O4") config.add_extension("_barnes_hut_tsne", libraries=cblas_libs, sources=["_barnes_hut_tsne.c"], include_dirs=[join('..', 'src', 'cblas'), numpy.get_include(), blas_info.pop('include_dirs', [])], extra_compile_args=eca, **blas_info) return config if __name__ == "__main__": from numpy.distutils.core import setup setup(**configuration().todict())
bsd-3-clause
mfjb/scikit-learn
examples/cluster/plot_lena_compress.py
271
2229
#!/usr/bin/python # -*- coding: utf-8 -*- """ ========================================================= Vector Quantization Example ========================================================= The classic image processing example, Lena, an 8-bit grayscale bit-depth, 512 x 512 sized image, is used here to illustrate how `k`-means is used for vector quantization. """ print(__doc__) # Code source: Gaël Varoquaux # Modified for documentation by Jaques Grobler # License: BSD 3 clause import numpy as np import scipy as sp import matplotlib.pyplot as plt from sklearn import cluster n_clusters = 5 np.random.seed(0) try: lena = sp.lena() except AttributeError: # Newer versions of scipy have lena in misc from scipy import misc lena = misc.lena() X = lena.reshape((-1, 1)) # We need an (n_sample, n_feature) array k_means = cluster.KMeans(n_clusters=n_clusters, n_init=4) k_means.fit(X) values = k_means.cluster_centers_.squeeze() labels = k_means.labels_ # create an array from labels and values lena_compressed = np.choose(labels, values) lena_compressed.shape = lena.shape vmin = lena.min() vmax = lena.max() # original lena plt.figure(1, figsize=(3, 2.2)) plt.imshow(lena, cmap=plt.cm.gray, vmin=vmin, vmax=256) # compressed lena plt.figure(2, figsize=(3, 2.2)) plt.imshow(lena_compressed, cmap=plt.cm.gray, vmin=vmin, vmax=vmax) # equal bins lena regular_values = np.linspace(0, 256, n_clusters + 1) regular_labels = np.searchsorted(regular_values, lena) - 1 regular_values = .5 * (regular_values[1:] + regular_values[:-1]) # mean regular_lena = np.choose(regular_labels.ravel(), regular_values) regular_lena.shape = lena.shape plt.figure(3, figsize=(3, 2.2)) plt.imshow(regular_lena, cmap=plt.cm.gray, vmin=vmin, vmax=vmax) # histogram plt.figure(4, figsize=(3, 2.2)) plt.clf() plt.axes([.01, .01, .98, .98]) plt.hist(X, bins=256, color='.5', edgecolor='.5') plt.yticks(()) plt.xticks(regular_values) values = np.sort(values) for center_1, center_2 in zip(values[:-1], values[1:]): plt.axvline(.5 * (center_1 + center_2), color='b') for center_1, center_2 in zip(regular_values[:-1], regular_values[1:]): plt.axvline(.5 * (center_1 + center_2), color='b', linestyle='--') plt.show()
bsd-3-clause
cactusbin/nyt
matplotlib/lib/matplotlib/rcsetup.py
4
31862
""" The rcsetup module contains the default values and the validation code for customization using matplotlib's rc settings. Each rc setting is assigned a default value and a function used to validate any attempted changes to that setting. The default values and validation functions are defined in the rcsetup module, and are used to construct the rcParams global object which stores the settings and is referenced throughout matplotlib. These default values should be consistent with the default matplotlibrc file that actually reflects the values given here. Any additions or deletions to the parameter set listed here should also be visited to the :file:`matplotlibrc.template` in matplotlib's root source directory. """ from __future__ import print_function import os import warnings from matplotlib.fontconfig_pattern import parse_fontconfig_pattern from matplotlib.colors import is_color_like #interactive_bk = ['gtk', 'gtkagg', 'gtkcairo', 'qt4agg', # 'tkagg', 'wx', 'wxagg', 'cocoaagg', 'webagg'] # The capitalized forms are needed for ipython at present; this may # change for later versions. interactive_bk = ['GTK', 'GTKAgg', 'GTKCairo', 'MacOSX', 'Qt4Agg', 'TkAgg', 'WX', 'WXAgg', 'CocoaAgg', 'GTK3Cairo', 'GTK3Agg', 'WebAgg'] non_interactive_bk = ['agg', 'cairo', 'emf', 'gdk', 'pdf', 'pgf', 'ps', 'svg', 'template'] all_backends = interactive_bk + non_interactive_bk class ValidateInStrings: def __init__(self, key, valid, ignorecase=False): 'valid is a list of legal strings' self.key = key self.ignorecase = ignorecase def func(s): if ignorecase: return s.lower() else: return s self.valid = dict([(func(k), k) for k in valid]) def __call__(self, s): if self.ignorecase: s = s.lower() if s in self.valid: return self.valid[s] raise ValueError('Unrecognized %s string "%s": valid strings are %s' % (self.key, s, self.valid.values())) def validate_any(s): return s def validate_path_exists(s): """If s is a path, return s, else False""" if os.path.exists(s): return s else: raise RuntimeError('"%s" should be a path but it does not exist' % s) def validate_bool(b): """Convert b to a boolean or raise""" if type(b) is str: b = b.lower() if b in ('t', 'y', 'yes', 'on', 'true', '1', 1, True): return True elif b in ('f', 'n', 'no', 'off', 'false', '0', 0, False): return False else: raise ValueError('Could not convert "%s" to boolean' % b) def validate_bool_maybe_none(b): 'Convert b to a boolean or raise' if type(b) is str: b = b.lower() if b == 'none': return None if b in ('t', 'y', 'yes', 'on', 'true', '1', 1, True): return True elif b in ('f', 'n', 'no', 'off', 'false', '0', 0, False): return False else: raise ValueError('Could not convert "%s" to boolean' % b) def validate_float(s): """convert s to float or raise""" try: return float(s) except ValueError: raise ValueError('Could not convert "%s" to float' % s) def validate_int(s): """convert s to int or raise""" try: return int(s) except ValueError: raise ValueError('Could not convert "%s" to int' % s) def validate_fonttype(s): """ confirm that this is a Postscript of PDF font type that we know how to convert to """ fonttypes = {'type3': 3, 'truetype': 42} try: fonttype = validate_int(s) except ValueError: if s.lower() in fonttypes.iterkeys(): return fonttypes[s.lower()] raise ValueError( 'Supported Postscript/PDF font types are %s' % fonttypes.keys()) else: if fonttype not in fonttypes.itervalues(): raise ValueError( 'Supported Postscript/PDF font types are %s' % fonttypes.values()) return fonttype #validate_backend = ValidateInStrings('backend', all_backends, ignorecase=True) _validate_standard_backends = ValidateInStrings('backend', all_backends, ignorecase=True) def validate_backend(s): if s.startswith('module://'): return s else: return _validate_standard_backends(s) validate_qt4 = ValidateInStrings('backend.qt4', ['PyQt4', 'PySide']) def validate_toolbar(s): validator = ValidateInStrings( 'toolbar', ['None', 'classic', 'toolbar2'], ignorecase=True) s = validator(s) if s.lower == 'classic': warnings.warn("'classic' Navigation Toolbar " "is deprecated in v1.2.x and will be " "removed in v1.3") return s def validate_maskedarray(v): # 2008/12/12: start warning; later, remove all traces of maskedarray try: if v == 'obsolete': return v except ValueError: pass warnings.warn('rcParams key "maskedarray" is obsolete and has no effect;\n' ' please delete it from your matplotlibrc file') class validate_nseq_float: def __init__(self, n): self.n = n def __call__(self, s): """return a seq of n floats or raise""" if type(s) is str: ss = s.split(',') if len(ss) != self.n: raise ValueError( 'You must supply exactly %d comma separated values' % self.n) try: return [float(val) for val in ss] except ValueError: raise ValueError('Could not convert all entries to floats') else: assert type(s) in (list, tuple) if len(s) != self.n: raise ValueError('You must supply exactly %d values' % self.n) return [float(val) for val in s] class validate_nseq_int: def __init__(self, n): self.n = n def __call__(self, s): """return a seq of n ints or raise""" if type(s) is str: ss = s.split(',') if len(ss) != self.n: raise ValueError( 'You must supply exactly %d comma separated values' % self.n) try: return [int(val) for val in ss] except ValueError: raise ValueError('Could not convert all entries to ints') else: assert type(s) in (list, tuple) if len(s) != self.n: raise ValueError('You must supply exactly %d values' % self.n) return [int(val) for val in s] def validate_color(s): 'return a valid color arg' try: if s.lower() == 'none': return 'None' except AttributeError: pass if is_color_like(s): return s stmp = '#' + s if is_color_like(stmp): return stmp # If it is still valid, it must be a tuple. colorarg = s msg = '' if s.find(',') >= 0: # get rid of grouping symbols stmp = ''.join([c for c in s if c.isdigit() or c == '.' or c == ',']) vals = stmp.split(',') if len(vals) != 3: msg = '\nColor tuples must be length 3' else: try: colorarg = [float(val) for val in vals] except ValueError: msg = '\nCould not convert all entries to floats' if not msg and is_color_like(colorarg): return colorarg raise ValueError('%s does not look like a color arg%s' % (s, msg)) def validate_colorlist(s): 'return a list of colorspecs' if type(s) is str: return [validate_color(c.strip()) for c in s.split(',')] else: assert type(s) in [list, tuple] return [validate_color(c) for c in s] def validate_stringlist(s): 'return a list' if type(s) in (str, unicode): return [v.strip() for v in s.split(',')] else: assert type(s) in [list, tuple] return [str(v) for v in s] validate_orientation = ValidateInStrings( 'orientation', ['landscape', 'portrait']) def validate_aspect(s): if s in ('auto', 'equal'): return s try: return float(s) except ValueError: raise ValueError('not a valid aspect specification') def validate_fontsize(s): if type(s) is str: s = s.lower() if s in ['xx-small', 'x-small', 'small', 'medium', 'large', 'x-large', 'xx-large', 'smaller', 'larger']: return s try: return float(s) except ValueError: raise ValueError('not a valid font size') def validate_font_properties(s): parse_fontconfig_pattern(s) return s validate_fontset = ValidateInStrings( 'fontset', ['cm', 'stix', 'stixsans', 'custom']) validate_mathtext_default = ValidateInStrings( 'default', "rm cal it tt sf bf default bb frak circled scr regular".split()) validate_verbose = ValidateInStrings( 'verbose', ['silent', 'helpful', 'debug', 'debug-annoying']) def deprecate_savefig_extension(value): warnings.warn("savefig.extension is deprecated. Use savefig.format " "instead. Will be removed in 1.4.x") return value def update_savefig_format(value): # The old savefig.extension could also have a value of "auto", but # the new savefig.format does not. We need to fix this here. value = str(value) if value == 'auto': value = 'png' return value validate_ps_papersize = ValidateInStrings( 'ps_papersize', ['auto', 'letter', 'legal', 'ledger', 'a0', 'a1', 'a2', 'a3', 'a4', 'a5', 'a6', 'a7', 'a8', 'a9', 'a10', 'b0', 'b1', 'b2', 'b3', 'b4', 'b5', 'b6', 'b7', 'b8', 'b9', 'b10', ], ignorecase=True) def validate_ps_distiller(s): if type(s) is str: s = s.lower() if s in ('none', None): return None elif s in ('false', False): return False elif s in ('ghostscript', 'xpdf'): return s else: raise ValueError('matplotlibrc ps.usedistiller must either be none, ' 'ghostscript or xpdf') validate_joinstyle = ValidateInStrings('joinstyle', ['miter', 'round', 'bevel'], ignorecase=True) validate_capstyle = ValidateInStrings('capstyle', ['butt', 'round', 'projecting'], ignorecase=True) validate_negative_linestyle = ValidateInStrings('negative_linestyle', ['solid', 'dashed'], ignorecase=True) def validate_negative_linestyle_legacy(s): try: res = validate_negative_linestyle(s) return res except ValueError: dashes = validate_nseq_float(2)(s) warnings.warn("Deprecated negative_linestyle specification; use " "'solid' or 'dashed'") return (0, dashes) # (offset, (solid, blank)) def validate_tkpythoninspect(s): # Introduced 2010/07/05 warnings.warn("tk.pythoninspect is obsolete, and has no effect") return validate_bool(s) validate_legend_loc = ValidateInStrings( 'legend_loc', ['best', 'upper right', 'upper left', 'lower left', 'lower right', 'right', 'center left', 'center right', 'lower center', 'upper center', 'center'], ignorecase=True) def deprecate_svg_embed_char_paths(value): warnings.warn("svg.embed_char_paths is deprecated. Use " "svg.fonttype instead.") validate_svg_fonttype = ValidateInStrings('fonttype', ['none', 'path', 'svgfont']) def validate_hinting(s): if s in (True, False): return s if s.lower() in ('auto', 'native', 'either', 'none'): return s.lower() raise ValueError("hinting should be 'auto', 'native', 'either' or 'none'") validate_pgf_texsystem = ValidateInStrings('pgf.texsystem', ['xelatex', 'lualatex', 'pdflatex']) validate_movie_writer = ValidateInStrings('animation.writer', ['ffmpeg', 'ffmpeg_file', 'avconv', 'avconv_file', 'mencoder', 'mencoder_file', 'imagemagick', 'imagemagick_file']) validate_movie_frame_fmt = ValidateInStrings('animation.frame_format', ['png', 'jpeg', 'tiff', 'raw', 'rgba']) def validate_bbox(s): if type(s) is str: s = s.lower() if s == 'tight': return s if s == 'standard': return None raise ValueError("bbox should be 'tight' or 'standard'") def validate_sketch(s): if s == 'None' or s is None: return None if isinstance(s, basestring): result = tuple([float(v.strip()) for v in s.split(',')]) elif isinstance(s, (list, tuple)): result = tuple([float(v) for v in s]) if len(result) != 3: raise ValueError("path.sketch must be a tuple (scale, length, randomness)") return result class ValidateInterval: """ Value must be in interval """ def __init__(self, vmin, vmax, closedmin=True, closedmax=True): self.vmin = vmin self.vmax = vmax self.cmin = closedmin self.cmax = closedmax def __call__(self, s): try: s = float(s) except: raise RuntimeError('Value must be a float; found "%s"' % s) if self.cmin and s < self.vmin: raise RuntimeError('Value must be >= %f; found "%f"' % (self.vmin, s)) elif not self.cmin and s <= self.vmin: raise RuntimeError('Value must be > %f; found "%f"' % (self.vmin, s)) if self.cmax and s > self.vmax: raise RuntimeError('Value must be <= %f; found "%f"' % (self.vmax, s)) elif not self.cmax and s >= self.vmax: raise RuntimeError('Value must be < %f; found "%f"' % (self.vmax, s)) return s # a map from key -> value, converter defaultParams = { 'backend': ['Agg', validate_backend], # agg is certainly # present 'backend_fallback': [True, validate_bool], # agg is certainly present 'backend.qt4': ['PyQt4', validate_qt4], 'webagg.port': [8988, validate_int], 'webagg.open_in_browser': [True, validate_bool], 'webagg.port_retries': [50, validate_int], 'toolbar': ['toolbar2', validate_toolbar], 'datapath': [None, validate_path_exists], # handled by # _get_data_path_cached 'interactive': [False, validate_bool], 'timezone': ['UTC', str], # the verbosity setting 'verbose.level': ['silent', validate_verbose], 'verbose.fileo': ['sys.stdout', str], # line props 'lines.linewidth': [1.0, validate_float], # line width in points 'lines.linestyle': ['-', str], # solid line 'lines.color': ['b', validate_color], # blue 'lines.marker': ['None', str], # black 'lines.markeredgewidth': [0.5, validate_float], 'lines.markersize': [6, validate_float], # markersize, in points 'lines.antialiased': [True, validate_bool], # antialised (no jaggies) 'lines.dash_joinstyle': ['round', validate_joinstyle], 'lines.solid_joinstyle': ['round', validate_joinstyle], 'lines.dash_capstyle': ['butt', validate_capstyle], 'lines.solid_capstyle': ['projecting', validate_capstyle], ## patch props 'patch.linewidth': [1.0, validate_float], # line width in points 'patch.edgecolor': ['k', validate_color], # black 'patch.facecolor': ['b', validate_color], # blue 'patch.antialiased': [True, validate_bool], # antialised (no jaggies) ## font props 'font.family': ['sans-serif', validate_stringlist], # used by text object 'font.style': ['normal', str], 'font.variant': ['normal', str], 'font.stretch': ['normal', str], 'font.weight': ['normal', str], 'font.size': [12, validate_float], # Base font size in points 'font.serif': [['Bitstream Vera Serif', 'DejaVu Serif', 'New Century Schoolbook', 'Century Schoolbook L', 'Utopia', 'ITC Bookman', 'Bookman', 'Nimbus Roman No9 L', 'Times New Roman', 'Times', 'Palatino', 'Charter', 'serif'], validate_stringlist], 'font.sans-serif': [['Bitstream Vera Sans', 'DejaVu Sans', 'Lucida Grande', 'Verdana', 'Geneva', 'Lucid', 'Arial', 'Helvetica', 'Avant Garde', 'sans-serif'], validate_stringlist], 'font.cursive': [['Apple Chancery', 'Textile', 'Zapf Chancery', 'Sand', 'cursive'], validate_stringlist], 'font.fantasy': [['Comic Sans MS', 'Chicago', 'Charcoal', 'Impact' 'Western', 'fantasy'], validate_stringlist], 'font.monospace': [['Bitstream Vera Sans Mono', 'DejaVu Sans Mono', 'Andale Mono', 'Nimbus Mono L', 'Courier New', 'Courier', 'Fixed', 'Terminal', 'monospace'], validate_stringlist], # text props 'text.color': ['k', validate_color], # black 'text.usetex': [False, validate_bool], 'text.latex.unicode': [False, validate_bool], 'text.latex.preamble': [[''], validate_stringlist], 'text.latex.preview': [False, validate_bool], 'text.dvipnghack': [None, validate_bool_maybe_none], 'text.hinting': [True, validate_hinting], 'text.hinting_factor': [8, validate_int], 'text.antialiased': [True, validate_bool], 'mathtext.cal': ['cursive', validate_font_properties], 'mathtext.rm': ['serif', validate_font_properties], 'mathtext.tt': ['monospace', validate_font_properties], 'mathtext.it': ['serif:italic', validate_font_properties], 'mathtext.bf': ['serif:bold', validate_font_properties], 'mathtext.sf': ['sans\-serif', validate_font_properties], 'mathtext.fontset': ['cm', validate_fontset], 'mathtext.default': ['it', validate_mathtext_default], 'mathtext.fallback_to_cm': [True, validate_bool], 'image.aspect': ['equal', validate_aspect], # equal, auto, a number 'image.interpolation': ['bilinear', str], 'image.cmap': ['jet', str], # one of gray, jet, etc 'image.lut': [256, validate_int], # lookup table 'image.origin': ['upper', str], # lookup table 'image.resample': [False, validate_bool], 'contour.negative_linestyle': ['dashed', validate_negative_linestyle_legacy], # axes props 'axes.axisbelow': [False, validate_bool], 'axes.hold': [True, validate_bool], 'axes.facecolor': ['w', validate_color], # background color; white 'axes.edgecolor': ['k', validate_color], # edge color; black 'axes.linewidth': [1.0, validate_float], # edge linewidth 'axes.titlesize': ['large', validate_fontsize], # fontsize of the # axes title 'axes.grid': [False, validate_bool], # display grid or not 'axes.labelsize': ['medium', validate_fontsize], # fontsize of the # x any y labels 'axes.labelweight': ['normal', str], # fontsize of the x any y labels 'axes.labelcolor': ['k', validate_color], # color of axis label 'axes.formatter.limits': [[-7, 7], validate_nseq_int(2)], # use scientific notation if log10 # of the axis range is smaller than the # first or larger than the second 'axes.formatter.use_locale': [False, validate_bool], # Use the current locale to format ticks 'axes.formatter.use_mathtext': [False, validate_bool], 'axes.unicode_minus': [True, validate_bool], 'axes.color_cycle': [['b', 'g', 'r', 'c', 'm', 'y', 'k'], validate_colorlist], # cycle of plot # line colors 'axes.xmargin': [0, ValidateInterval(0, 1, closedmin=True, closedmax=True)], # margin added to xaxis 'axes.ymargin': [0, ValidateInterval(0, 1, closedmin=True, closedmax=True)],# margin added to yaxis 'polaraxes.grid': [True, validate_bool], # display polar grid or # not 'axes3d.grid': [True, validate_bool], # display 3d grid #legend properties 'legend.fancybox': [False, validate_bool], # at some point, legend.loc should be changed to 'best' 'legend.loc': ['upper right', validate_legend_loc], # this option is internally ignored - it never served any useful purpose 'legend.isaxes': [True, validate_bool], # the number of points in the legend line 'legend.numpoints': [2, validate_int], # the number of points in the legend line for scatter 'legend.scatterpoints': [3, validate_int], 'legend.fontsize': ['large', validate_fontsize], # the relative size of legend markers vs. original 'legend.markerscale': [1.0, validate_float], 'legend.shadow': [False, validate_bool], # whether or not to draw a frame around legend 'legend.frameon': [True, validate_bool], ## the following dimensions are in fraction of the font size 'legend.borderpad': [0.4, validate_float], # units are fontsize # the vertical space between the legend entries 'legend.labelspacing': [0.5, validate_float], # the length of the legend lines 'legend.handlelength': [2., validate_float], # the length of the legend lines 'legend.handleheight': [0.7, validate_float], # the space between the legend line and legend text 'legend.handletextpad': [.8, validate_float], # the border between the axes and legend edge 'legend.borderaxespad': [0.5, validate_float], # the border between the axes and legend edge 'legend.columnspacing': [2., validate_float], # the relative size of legend markers vs. original 'legend.markerscale': [1.0, validate_float], 'legend.shadow': [False, validate_bool], ## tick properties 'xtick.major.size': [4, validate_float], # major xtick size in points 'xtick.minor.size': [2, validate_float], # minor xtick size in points 'xtick.major.width': [0.5, validate_float], # major xtick width in points 'xtick.minor.width': [0.5, validate_float], # minor xtick width in points 'xtick.major.pad': [4, validate_float], # distance to label in points 'xtick.minor.pad': [4, validate_float], # distance to label in points 'xtick.color': ['k', validate_color], # color of the xtick labels # fontsize of the xtick labels 'xtick.labelsize': ['medium', validate_fontsize], 'xtick.direction': ['in', str], # direction of xticks 'ytick.major.size': [4, validate_float], # major ytick size in points 'ytick.minor.size': [2, validate_float], # minor ytick size in points 'ytick.major.width': [0.5, validate_float], # major ytick width in points 'ytick.minor.width': [0.5, validate_float], # minor ytick width in points 'ytick.major.pad': [4, validate_float], # distance to label in points 'ytick.minor.pad': [4, validate_float], # distance to label in points 'ytick.color': ['k', validate_color], # color of the ytick labels # fontsize of the ytick labels 'ytick.labelsize': ['medium', validate_fontsize], 'ytick.direction': ['in', str], # direction of yticks 'grid.color': ['k', validate_color], # grid color 'grid.linestyle': [':', str], # dotted 'grid.linewidth': [0.5, validate_float], # in points 'grid.alpha': [1.0, validate_float], ## figure props # figure size in inches: width by height 'figure.figsize': [[8.0, 6.0], validate_nseq_float(2)], 'figure.dpi': [80, validate_float], # DPI 'figure.facecolor': ['0.75', validate_color], # facecolor; scalar gray 'figure.edgecolor': ['w', validate_color], # edgecolor; white 'figure.frameon': [True, validate_bool], 'figure.autolayout': [False, validate_bool], 'figure.max_open_warning': [20, validate_int], 'figure.subplot.left': [0.125, ValidateInterval(0, 1, closedmin=True, closedmax=True)], 'figure.subplot.right': [0.9, ValidateInterval(0, 1, closedmin=True, closedmax=True)], 'figure.subplot.bottom': [0.1, ValidateInterval(0, 1, closedmin=True, closedmax=True)], 'figure.subplot.top': [0.9, ValidateInterval(0, 1, closedmin=True, closedmax=True)], 'figure.subplot.wspace': [0.2, ValidateInterval(0, 1, closedmin=True, closedmax=False)], 'figure.subplot.hspace': [0.2, ValidateInterval(0, 1, closedmin=True, closedmax=False)], ## Saving figure's properties 'savefig.dpi': [100, validate_float], # DPI 'savefig.facecolor': ['w', validate_color], # facecolor; white 'savefig.edgecolor': ['w', validate_color], # edgecolor; white 'savefig.frameon': [True, validate_bool], 'savefig.orientation': ['portrait', validate_orientation], # edgecolor; #white 'savefig.jpeg_quality': [95, validate_int], # what to add to extensionless filenames 'savefig.extension': ['png', deprecate_savefig_extension], # value checked by backend at runtime 'savefig.format': ['png', update_savefig_format], # options are 'tight', or 'standard'. 'standard' validates to None. 'savefig.bbox': [None, validate_bbox], 'savefig.pad_inches': [0.1, validate_float], # default directory in savefig dialog box 'savefig.directory': ['~', unicode], # Maintain shell focus for TkAgg 'tk.window_focus': [False, validate_bool], 'tk.pythoninspect': [False, validate_tkpythoninspect], # obsolete # Set the papersize/type 'ps.papersize': ['letter', validate_ps_papersize], 'ps.useafm': [False, validate_bool], # Set PYTHONINSPECT # use ghostscript or xpdf to distill ps output 'ps.usedistiller': [False, validate_ps_distiller], 'ps.distiller.res': [6000, validate_int], # dpi 'ps.fonttype': [3, validate_fonttype], # 3 (Type3) or 42 (Truetype) # compression level from 0 to 9; 0 to disable 'pdf.compression': [6, validate_int], # ignore any color-setting commands from the frontend 'pdf.inheritcolor': [False, validate_bool], # use only the 14 PDF core fonts embedded in every PDF viewing application 'pdf.use14corefonts': [False, validate_bool], 'pdf.fonttype': [3, validate_fonttype], # 3 (Type3) or 42 (Truetype) 'pgf.debug': [False, validate_bool], # output debug information # choose latex application for creating pdf files (xelatex/lualatex) 'pgf.texsystem': ['xelatex', validate_pgf_texsystem], # use matplotlib rc settings for font configuration 'pgf.rcfonts': [True, validate_bool], # provide a custom preamble for the latex process 'pgf.preamble': [[''], validate_stringlist], # write raster image data directly into the svg file 'svg.image_inline': [True, validate_bool], # suppress scaling of raster data embedded in SVG 'svg.image_noscale': [False, validate_bool], # True to save all characters as paths in the SVG 'svg.embed_char_paths': [True, deprecate_svg_embed_char_paths], 'svg.fonttype': ['path', validate_svg_fonttype], # set this when you want to generate hardcopy docstring 'docstring.hardcopy': [False, validate_bool], # where plugin directory is locate 'plugins.directory': ['.matplotlib_plugins', str], 'path.simplify': [True, validate_bool], 'path.simplify_threshold': [1.0 / 9.0, ValidateInterval(0.0, 1.0)], 'path.snap': [True, validate_bool], 'path.sketch': [None, validate_sketch], 'path.effects': [[], validate_any], 'agg.path.chunksize': [0, validate_int], # 0 to disable chunking; # key-mappings (multi-character mappings should be a list/tuple) 'keymap.fullscreen': [('f', 'ctrl+f'), validate_stringlist], 'keymap.home': [['h', 'r', 'home'], validate_stringlist], 'keymap.back': [['left', 'c', 'backspace'], validate_stringlist], 'keymap.forward': [['right', 'v'], validate_stringlist], 'keymap.pan': ['p', validate_stringlist], 'keymap.zoom': ['o', validate_stringlist], 'keymap.save': [('s', 'ctrl+s'), validate_stringlist], 'keymap.quit': [('ctrl+w', 'cmd+w'), validate_stringlist], 'keymap.grid': ['g', validate_stringlist], 'keymap.yscale': ['l', validate_stringlist], 'keymap.xscale': [['k', 'L'], validate_stringlist], 'keymap.all_axes': ['a', validate_stringlist], # sample data 'examples.directory': ['', str], # Animation settings 'animation.writer': ['ffmpeg', validate_movie_writer], 'animation.codec': ['mpeg4', str], 'animation.bitrate': [-1, validate_int], # Controls image format when frames are written to disk 'animation.frame_format': ['png', validate_movie_frame_fmt], # Path to FFMPEG binary. If just binary name, subprocess uses $PATH. 'animation.ffmpeg_path': ['ffmpeg', str], ## Additional arguments for ffmpeg movie writer (using pipes) 'animation.ffmpeg_args': ['', validate_stringlist], # Path to AVConv binary. If just binary name, subprocess uses $PATH. 'animation.avconv_path': ['avconv', str], # Additional arguments for avconv movie writer (using pipes) 'animation.avconv_args': ['', validate_stringlist], # Path to MENCODER binary. If just binary name, subprocess uses $PATH. 'animation.mencoder_path': ['mencoder', str], # Additional arguments for mencoder movie writer (using pipes) 'animation.mencoder_args': ['', validate_stringlist], # Path to convert binary. If just binary name, subprocess uses $PATH 'animation.convert_path': ['convert', str], # Additional arguments for mencoder movie writer (using pipes) 'animation.convert_args': ['', validate_stringlist]} if __name__ == '__main__': rc = defaultParams rc['datapath'][0] = '/' for key in rc: if not rc[key][1](rc[key][0]) == rc[key][0]: print("%s: %s != %s" % (key, rc[key][1](rc[key][0]), rc[key][0]))
unlicense
ttthy1/2017sejongAI
week12/pretreatment_hw.py
1
1265
import numpy as np from sklearn import preprocessing #샘플 데이터 정의 input_data = np.array([[2.3, -3.5, 10.3], [-2.1, 8.7, -1.6], [7.9, 1.4, 3.1], [3.7, -9.8, -5.4]]) #데이터 이진화 data_binarized = preprocessing.Binarizer(threshold=2.1).transform(input_data) print("\nBinarized data : \n", data_binarized) #평균과 표준편차 출력 print("\n BEFORE : ") print("Mean = ", input_data.mean(axis=0)) print("Std deviation = ", input_data.std(axis=0)) #평균 제거 data_scaed = preprocessing.scale(input_data) print("\n AFTER : ") print("Mean = ", data_scaed.mean(axis=0)) print("Std deviation = ", data_scaed.std(axis=0)) #크기 조정 #최솟값 /최댓값 조정 data_scaler_minmax = preprocessing.MinMaxScaler(feature_range=(0,1)) data_scaled_minmax = data_scaler_minmax.fit_transform(input_data) print("\nMin max scaled data : \n", data_scaled_minmax) #정규화 #데이터 정규화 data_normalized_l1 = preprocessing.normalize(input_data, norm='l1') data_normalized_l2 = preprocessing.normalize(input_data, norm='l2') print("\nL1 normalized data : \n", data_normalized_l1) print("\nL1 normalized data : \n", data_normalized_l2)
gpl-3.0
mxjl620/scikit-learn
examples/cluster/plot_cluster_iris.py
350
2593
#!/usr/bin/python # -*- coding: utf-8 -*- """ ========================================================= K-means Clustering ========================================================= The plots display firstly what a K-means algorithm would yield using three clusters. It is then shown what the effect of a bad initialization is on the classification process: By setting n_init to only 1 (default is 10), the amount of times that the algorithm will be run with different centroid seeds is reduced. The next plot displays what using eight clusters would deliver and finally the ground truth. """ print(__doc__) # Code source: Gaël Varoquaux # Modified for documentation by Jaques Grobler # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D from sklearn.cluster import KMeans from sklearn import datasets np.random.seed(5) centers = [[1, 1], [-1, -1], [1, -1]] iris = datasets.load_iris() X = iris.data y = iris.target estimators = {'k_means_iris_3': KMeans(n_clusters=3), 'k_means_iris_8': KMeans(n_clusters=8), 'k_means_iris_bad_init': KMeans(n_clusters=3, n_init=1, init='random')} fignum = 1 for name, est in estimators.items(): fig = plt.figure(fignum, figsize=(4, 3)) plt.clf() ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134) plt.cla() est.fit(X) labels = est.labels_ ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=labels.astype(np.float)) ax.w_xaxis.set_ticklabels([]) ax.w_yaxis.set_ticklabels([]) ax.w_zaxis.set_ticklabels([]) ax.set_xlabel('Petal width') ax.set_ylabel('Sepal length') ax.set_zlabel('Petal length') fignum = fignum + 1 # Plot the ground truth fig = plt.figure(fignum, figsize=(4, 3)) plt.clf() ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134) plt.cla() for name, label in [('Setosa', 0), ('Versicolour', 1), ('Virginica', 2)]: ax.text3D(X[y == label, 3].mean(), X[y == label, 0].mean() + 1.5, X[y == label, 2].mean(), name, horizontalalignment='center', bbox=dict(alpha=.5, edgecolor='w', facecolor='w')) # Reorder the labels to have colors matching the cluster results y = np.choose(y, [1, 2, 0]).astype(np.float) ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=y) ax.w_xaxis.set_ticklabels([]) ax.w_yaxis.set_ticklabels([]) ax.w_zaxis.set_ticklabels([]) ax.set_xlabel('Petal width') ax.set_ylabel('Sepal length') ax.set_zlabel('Petal length') plt.show()
bsd-3-clause
JackKelly/neuralnilm_prototype
scripts/e302.py
2
6009
from __future__ import print_function, division import matplotlib import logging from sys import stdout matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab! from neuralnilm import (Net, RealApplianceSource, BLSTMLayer, DimshuffleLayer, BidirectionalRecurrentLayer) from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff from neuralnilm.experiment import run_experiment, init_experiment from neuralnilm.net import TrainingError from neuralnilm.layers import MixtureDensityLayer from neuralnilm.objectives import scaled_cost, mdn_nll, scaled_cost_ignore_inactive, ignore_inactive from neuralnilm.plot import MDNPlotter from lasagne.nonlinearities import sigmoid, rectify, tanh from lasagne.objectives import mse from lasagne.init import Uniform, Normal from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer, FeaturePoolLayer, RecurrentLayer) from lasagne.updates import nesterov_momentum, momentum from functools import partial import os import __main__ from copy import deepcopy from math import sqrt import numpy as np import theano.tensor as T NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0] PATH = "/homes/dk3810/workspace/python/neuralnilm/figures" SAVE_PLOT_INTERVAL = 1000 GRADIENT_STEPS = 100 SEQ_LENGTH = 512 source_dict = dict( filename='/data/dk3810/ukdale.h5', appliances=[ ['fridge freezer', 'fridge', 'freezer'], 'hair straighteners', 'television' # 'dish washer', # ['washer dryer', 'washing machine'] ], max_appliance_powers=[300, 500, 200, 2500, 2400], on_power_thresholds=[5] * 5, max_input_power=5900, min_on_durations=[60, 60, 60, 1800, 1800], min_off_durations=[12, 12, 12, 1800, 600], window=("2013-06-01", "2014-07-01"), seq_length=SEQ_LENGTH, output_one_appliance=False, boolean_targets=False, train_buildings=[1], validation_buildings=[1], skip_probability=0.0, n_seq_per_batch=16, subsample_target=4, include_diff=False, clip_appliance_power=True, target_is_prediction=False, standardise_input=True, standardise_targets=True, input_padding=0, lag=0, reshape_target_to_2D=False, input_stats={'mean': np.array([ 0.05526326], dtype=np.float32), 'std': np.array([ 0.12636775], dtype=np.float32)}, target_stats={ 'mean': np.array([ 0.04066789, 0.01881946, 0.24639061, 0.17608672, 0.10273963], dtype=np.float32), 'std': np.array([ 0.11449792, 0.07338708, 0.26608968, 0.33463112, 0.21250485], dtype=np.float32)} ) N = 50 net_dict = dict( save_plot_interval=SAVE_PLOT_INTERVAL, # loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH), # loss_function=lambda x, t: mdn_nll(x, t).mean(), loss_function=lambda x, t: mse(x, t).mean(), updates_func=momentum, learning_rate=1e-02, learning_rate_changes_by_iteration={ 500: 5e-03, 5000: 1e-03, 10000: 1e-04, 50000: 5e-06, 70000: 1e-06 # 2000: 5e-06 # 3000: 1e-05 # 7000: 5e-06, # 10000: 1e-06, # 15000: 5e-07, # 50000: 1e-07 }, do_save_activations=True ) def callback(net, epoch): net.source.reshape_target_to_2D = True net.plotter = MDNPlotter(net) net.generate_validation_data_and_set_shapes() net.loss_function = lambda x, t: mdn_nll(x, t).mean() net.learning_rate = 1e-05 def exp_a(name): # 3 appliances global source source_dict_copy = deepcopy(source_dict) source_dict_copy['reshape_target_to_2D'] = False source = RealApplianceSource(**source_dict_copy) source.reshape_target_to_2D = False net_dict_copy = deepcopy(net_dict) net_dict_copy.update(dict( experiment_name=name, source=source )) N = 50 net_dict_copy['layers_config'] = [ { 'type': BidirectionalRecurrentLayer, 'num_units': N, 'gradient_steps': GRADIENT_STEPS, 'W_in_to_hid': Normal(std=1.), 'nonlinearity': tanh }, { 'type': FeaturePoolLayer, 'ds': 4, # number of feature maps to be pooled together 'axis': 1, # pool over the time axis 'pool_function': T.max }, { 'type': BidirectionalRecurrentLayer, 'num_units': N, 'gradient_steps': GRADIENT_STEPS, 'W_in_to_hid': Normal(std=1/sqrt(N)), 'nonlinearity': tanh }, { 'type': DenseLayer, 'W': Normal(std=1/sqrt(N)), 'num_units': source.n_outputs, 'nonlinearity': None } ] net_dict_copy['layer_changes'] = { 30000: { 'remove_from': -2, 'callback': callback, 'new_layers': [ { 'type': MixtureDensityLayer, 'num_units': source.n_outputs, 'num_components': 2 } ] } } net = Net(**net_dict_copy) return net def main(): # EXPERIMENTS = list('abcdefghijklmnopqrstuvwxyz') EXPERIMENTS = list('a') for experiment in EXPERIMENTS: full_exp_name = NAME + experiment func_call = init_experiment(PATH, experiment, full_exp_name) logger = logging.getLogger(full_exp_name) try: net = eval(func_call) run_experiment(net, epochs=100000) except KeyboardInterrupt: logger.info("KeyboardInterrupt") break except Exception as exception: logger.exception("Exception") raise finally: logging.shutdown() if __name__ == "__main__": main()
mit
dpwrussell/openmicroscopy
components/tools/OmeroPy/src/omero/install/jvmcfg.py
10
16253
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright (C) 2014 Glencoe Software, Inc. All Rights Reserved. # Use is subject to license terms supplied in LICENSE.txt # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. """ Automatic configuration of memory settings for Java servers. """ from types import StringType from shlex import split import logging LOGGER = logging.getLogger("omero.install.jvmcfg") def strip_dict(map, prefix=("omero", "jvmcfg"), suffix=(), limit=1): """ For the given dictionary, return a copy of the dictionary where all entries not matching the prefix, suffix, and limit have been removed and where all remaining keys have had the prefix and suffix stripped. The limit describes the number of elements that are allowed in the new key after stripping prefix and suffix. """ if isinstance(prefix, StringType): prefix = tuple(prefix.split(".")) if isinstance(suffix, StringType): suffix = tuple(suffix.split(".")) rv = dict() if not map: return dict() def __strip_dict(k, v, prefix, suffix, rv): key = tuple(k.split(".")) ksz = len(key) psz = len(prefix) ssz = len(suffix) if ksz <= (psz + ssz): return # No way to strip if smaller if key[0:psz] == prefix and key[ksz-ssz:] == suffix: newkey = key[psz:ksz-ssz] if len(newkey) == limit: newkey = ".".join(newkey) rv[newkey] = v for k, v in map.items(): __strip_dict(k, v, prefix, suffix, rv) return rv class StrategyRegistry(dict): def __init__(self, *args, **kwargs): super(dict, self).__init__(*args, **kwargs) STRATEGY_REGISTRY = StrategyRegistry() class Settings(object): """ Container for the config options found in etc/grid/config.xml """ def __init__(self, server_values=None, global_values=None): if server_values is None: self.__server = dict() else: self.__server = server_values if global_values is None: self.__global = dict() else: self.__global = global_values self.__static = { "strategy": PercentStrategy, "append": "", "perm_gen": "128m", "heap_dump": "off", "heap_size": "512m", "system_memory": None, "max_system_memory": "48000", "min_system_memory": "3414", } self.__manual = dict() def __getattr__(self, key): return self.lookup(key) def lookup(self, key, default=None): if key in self.__manual: return self.__manual[key] elif key in self.__server: return self.__server[key] elif key in self.__global: return self.__global[key] elif key in self.__static: return self.__static[key] else: return default def overwrite(self, key, value, always=False): if self.was_set(key) and not always: # Then we leave it as the user requested return else: self.__manual[key] = value def was_set(self, key): return key in self.__server or key in self.__global def get_strategy(self): return STRATEGY_REGISTRY.get(self.strategy, self.strategy) def __str__(self): rv = dict() rv.update(self.__server) rv.update(self.__global) if not rv: rv = "" return 'Settings(%s)' % rv class Strategy(object): """ Strategy for calculating memory settings. Primary class of the memory module. """ def __init__(self, name, settings=None): """ 'name' argument should likely be one of: ('blitz', 'indexer', 'pixeldata', 'repository') """ if settings is None: settings = Settings() self.name = name self.settings = settings if type(self) == Strategy: raise Exception("Must subclass!") # Memory helpers def system_memory_mb(self): """ Returns a tuple, in MB, of available, active, and total memory. "total" memory is found by calling to first a Python library (if installed) and otherwise a Java class. If "system_memory" is set, it will short-circuit both methods. "active" memory is set to "total" but limited by "min_system_memory" and "max_system_memory". "available" may not be accurate, and in some cases will be set to total. """ available, total = None, None if self.settings.system_memory is not None: total = int(self.settings.system_memory) available = total else: pymem = self._system_memory_mb_psutil() if pymem is not None: available, total = pymem else: available, total = self._system_memory_mb_java() max_system_memory = int(self.settings.max_system_memory) min_system_memory = int(self.settings.min_system_memory) active = max(min(total, max_system_memory), min_system_memory) return available, active, total def _system_memory_mb_psutil(self): try: import psutil pymem = psutil.virtual_memory() return (pymem.free/1000000, pymem.total/1000000) except ImportError: LOGGER.debug("No psutil installed") return None def _system_memory_mb_java(self): import omero.cli import omero.java # Copied from db.py. Needs better dir detection cwd = omero.cli.CLI().dir server_jar = cwd / "lib" / "server" / "server.jar" cmd = ["ome.services.util.JvmSettingsCheck", "--psutil"] p = omero.java.popen(["-cp", str(server_jar)] + cmd) o, e = p.communicate() if p.poll() != 0: LOGGER.warn("Failed to invoke java:\nout:%s\nerr:%s", o, e) rv = dict() for line in o.split("\n"): line = line.strip() if not line: continue parts = line.split(":") if len(parts) == 1: parts.append("") rv[parts[0]] = parts[1] try: free = long(rv["Free"]) / 1000000 except: LOGGER.warn("Failed to parse Free from %s", rv) free = 2000 try: total = long(rv["Total"]) / 1000000 except: LOGGER.warn("Failed to parse Total from %s", rv) total = 4000 return (free, total) # API Getters def get_heap_size(self, sz=None): if sz is None or self.settings.was_set("heap_size"): sz = self.settings.heap_size if str(sz).startswith("-X"): return sz else: rv = "-Xmx%s" % sz if rv[-1].lower() not in ("b", "k", "m", "g"): rv = "%sm" % rv return rv def get_heap_dump(self): hd = self.settings.heap_dump if hd == "off": return "" elif hd in ("on", "cwd", "tmp"): return "-XX:+HeapDumpOnOutOfMemoryError" def get_perm_gen(self): pg = self.settings.perm_gen if str(pg).startswith("-XX"): return pg else: return "-XX:MaxPermSize=%s" % pg def get_append(self): values = [] if self.settings.heap_dump == "tmp": import tempfile tmp = tempfile.gettempdir() values.append("-XX:HeapDumpPath=%s" % tmp) return values + split(self.settings.append) def get_memory_settings(self): values = [ self.get_heap_size(), self.get_heap_dump(), self.get_perm_gen(), ] if any([x.startswith("-XX:MaxPermSize") for x in values]): values.append("-XX:+IgnoreUnrecognizedVMOptions") values += self.get_append() return [x for x in values if x] class ManualStrategy(Strategy): """ Simplest strategy which assumes all values have been set and simply uses them or their defaults. """ class PercentStrategy(Strategy): """ Strategy based on a percent of available memory. """ PERCENT_DEFAULTS = ( ("blitz", 15), ("pixeldata", 15), ("indexer", 10), ("repository", 10), ("other", 1), ) def __init__(self, name, settings=None): super(PercentStrategy, self).__init__(name, settings) self.defaults = dict(self.PERCENT_DEFAULTS) self.use_active = True def get_heap_size(self): """ Uses the results of the default settings of calculate_heap_size() as an argument to get_heap_size(), in other words some percent of the active memory. """ sz = self.calculate_heap_size() return super(PercentStrategy, self).get_heap_size(sz) def get_percent(self): other = self.defaults.get("other", "1") default = self.defaults.get(self.name, other) percent = int(self.settings.lookup("percent", default)) return percent def get_perm_gen(self): available, active, total = self.system_memory_mb() choice = self.use_active and active or total if choice <= 4000: if choice >= 2000: self.settings.overwrite("perm_gen", "256m") elif choice <= 8000: self.settings.overwrite("perm_gen", "512m") else: self.settings.overwrite("perm_gen", "1g") return super(PercentStrategy, self).get_perm_gen() def calculate_heap_size(self, method=None): """ Re-calculates the appropriate heap size based on the value of get_percent(). The "active" memory returned by method() will be used by default, but can be modified to use "total" via the "use_active" flag. """ if method is None: method = self.system_memory_mb available, active, total = method() choice = self.use_active and active or total percent = self.get_percent() calculated = choice * int(percent) / 100 return calculated def usage_table(self, min=10, max=20): total_mb = [2**x for x in range(min, max)] for total in total_mb: method = lambda: (total, total, total) yield total, self.calculate_heap_size(method) STRATEGY_REGISTRY["manual"] = ManualStrategy STRATEGY_REGISTRY["percent"] = PercentStrategy def read_settings(template_xml): """ Read the memory settings from the template file """ rv = dict() for template in template_xml.findall("server-template"): for server in template.findall("server"): for option in server.findall("option"): o = option.text if o.startswith("-Xmx") | o.startswith("-XX"): rv.setdefault(server.get('id'), []).append(o) return rv def adjust_settings(config, template_xml, blitz=None, indexer=None, pixeldata=None, repository=None): """ Takes an omero.config.ConfigXml object and adjusts the memory settings. Primary entry point to the memory module. """ from xml.etree.ElementTree import Element from collections import defaultdict replacements = dict() options = dict() for template in template_xml.findall("server-template"): for server in template.findall("server"): for option in server.findall("option"): o = option.text if o.startswith("MEMORY:"): options[o[7:]] = (server, option) for props in server.findall("properties"): for prop in props.findall("property"): name = prop.attrib.get("name", "") if name.startswith("REPLACEMENT:"): replacements[name[12:]] = (server, prop) rv = defaultdict(list) m = config.as_map() loop = (("blitz", blitz), ("indexer", indexer), ("pixeldata", pixeldata), ("repository", repository)) for name, StrategyType in loop: if name not in options: raise Exception( "Cannot find %s option. Make sure templates.xml was " "not copied from an older server" % name) for name, StrategyType in loop: specific = strip_dict(m, suffix=name) defaults = strip_dict(m) settings = Settings(specific, defaults) rv[name].append(settings) if StrategyType is None: StrategyType = settings.get_strategy() if not callable(StrategyType): raise Exception("Bad strategy: %s" % StrategyType) strategy = StrategyType(name, settings) settings = strategy.get_memory_settings() server, option = options[name] idx = 0 for v in settings: rv[name].append(v) if idx == 0: option.text = v else: elem = Element("option") elem.text = v server.insert(idx, elem) idx += 1 # Now we check for any other properties and # put them where the replacement should go. for k, v in m.items(): r = [] suffix = ".%s" % name size = len(suffix) if k.endswith(suffix): k = k[:-size] r.append((k, v)) server, replacement = replacements[name] idx = 0 for k, v in r: if idx == 0: replacement.attrib["name"] = k replacement.attrib["value"] = v else: elem = Element("property", name=k, value=v) server.append(elem) return rv def usage_charts(path, min=0, max=20, Strategy=PercentStrategy, name="blitz"): # See http://matplotlib.org/examples/pylab_examples/anscombe.html from pylab import array from pylab import axis from pylab import gca from pylab import subplot from pylab import plot from pylab import setp from pylab import savefig from pylab import text points = 200 x = array([2 ** (x / points) / 1000 for x in range(min*points, max*points)]) y_configs = ( (Settings({}), 'A'), (Settings({"percent": "20"}), 'B'), (Settings({}), 'C'), (Settings({"max_system_memory": "10000"}), 'D'), ) def f(cfg): s = Strategy(name, settings=cfg[0]) y = [] for total in x: method = lambda: (total, total, total) y.append(s.calculate_heap_size(method)) return y y1 = f(y_configs[0]) y2 = f(y_configs[1]) y3 = f(y_configs[2]) y4 = f(y_configs[3]) axis_values = [0, 20, 0, 6] def ticks_f(): setp(gca(), xticks=(8, 16), yticks=(2, 4)) def text_f(which): cfg = y_configs[which] # s = cfg[0] txt = "%s" % (cfg[1],) text(2, 2, txt, fontsize=20) subplot(221) plot(x, y1) axis(axis_values) text_f(0) ticks_f() subplot(222) plot(x, y2) axis(axis_values) text_f(1) ticks_f() subplot(223) plot(x, y3) axis(axis_values) text_f(2) ticks_f() subplot(224) plot(x, y4) axis(axis_values) text_f(3) ticks_f() savefig(path)
gpl-2.0
xuewei4d/scikit-learn
sklearn/tree/tests/test_tree.py
6
78716
""" Testing for the tree module (sklearn.tree). """ import copy import pickle from itertools import product import struct import pytest import numpy as np from numpy.testing import assert_allclose from scipy.sparse import csc_matrix from scipy.sparse import csr_matrix from scipy.sparse import coo_matrix from sklearn.random_projection import _sparse_random_matrix from sklearn.dummy import DummyRegressor from sklearn.metrics import accuracy_score from sklearn.metrics import mean_squared_error from sklearn.metrics import mean_poisson_deviance from sklearn.model_selection import train_test_split from sklearn.utils._testing import assert_array_equal from sklearn.utils._testing import assert_array_almost_equal from sklearn.utils._testing import assert_almost_equal from sklearn.utils._testing import assert_warns from sklearn.utils._testing import assert_warns_message from sklearn.utils._testing import create_memmap_backed_data from sklearn.utils._testing import ignore_warnings from sklearn.utils._testing import skip_if_32bit from sklearn.utils.estimator_checks import check_sample_weights_invariance from sklearn.utils.validation import check_random_state from sklearn.exceptions import NotFittedError from sklearn.tree import DecisionTreeClassifier from sklearn.tree import DecisionTreeRegressor from sklearn.tree import ExtraTreeClassifier from sklearn.tree import ExtraTreeRegressor from sklearn import tree from sklearn.tree._tree import TREE_LEAF, TREE_UNDEFINED from sklearn.tree._classes import CRITERIA_CLF from sklearn.tree._classes import CRITERIA_REG from sklearn import datasets from sklearn.utils import compute_sample_weight CLF_CRITERIONS = ("gini", "entropy") REG_CRITERIONS = ("mse", "mae", "friedman_mse", "poisson") CLF_TREES = { "DecisionTreeClassifier": DecisionTreeClassifier, "ExtraTreeClassifier": ExtraTreeClassifier, } REG_TREES = { "DecisionTreeRegressor": DecisionTreeRegressor, "ExtraTreeRegressor": ExtraTreeRegressor, } ALL_TREES = dict() ALL_TREES.update(CLF_TREES) ALL_TREES.update(REG_TREES) SPARSE_TREES = ["DecisionTreeClassifier", "DecisionTreeRegressor", "ExtraTreeClassifier", "ExtraTreeRegressor"] X_small = np.array([ [0, 0, 4, 0, 0, 0, 1, -14, 0, -4, 0, 0, 0, 0, ], [0, 0, 5, 3, 0, -4, 0, 0, 1, -5, 0.2, 0, 4, 1, ], [-1, -1, 0, 0, -4.5, 0, 0, 2.1, 1, 0, 0, -4.5, 0, 1, ], [-1, -1, 0, -1.2, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 1, ], [-1, -1, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 1, ], [-1, -2, 0, 4, -3, 10, 4, 0, -3.2, 0, 4, 3, -4, 1, ], [2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ], [2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ], [2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ], [2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -1, 0, ], [2, 8, 5, 1, 0.5, -4, 10, 0, 1, -5, 3, 0, 2, 0, ], [2, 0, 1, 1, 1, -1, 1, 0, 0, -2, 3, 0, 1, 0, ], [2, 0, 1, 2, 3, -1, 10, 2, 0, -1, 1, 2, 2, 0, ], [1, 1, 0, 2, 2, -1, 1, 2, 0, -5, 1, 2, 3, 0, ], [3, 1, 0, 3, 0, -4, 10, 0, 1, -5, 3, 0, 3, 1, ], [2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ], [2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 1.5, 1, -1, -1, ], [2.11, 8, -6, -0.5, 0, 10, 0, 0, -3.2, 6, 0.5, 0, -1, -1, ], [2, 0, 5, 1, 0.5, -2, 10, 0, 1, -5, 3, 1, 0, -1, ], [2, 0, 1, 1, 1, -2, 1, 0, 0, -2, 0, 0, 0, 1, ], [2, 1, 1, 1, 2, -1, 10, 2, 0, -1, 0, 2, 1, 1, ], [1, 1, 0, 0, 1, -3, 1, 2, 0, -5, 1, 2, 1, 1, ], [3, 1, 0, 1, 0, -4, 1, 0, 1, -2, 0, 0, 1, 0, ]]) y_small = [1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0] y_small_reg = [1.0, 2.1, 1.2, 0.05, 10, 2.4, 3.1, 1.01, 0.01, 2.98, 3.1, 1.1, 0.0, 1.2, 2, 11, 0, 0, 4.5, 0.201, 1.06, 0.9, 0] # toy sample X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]] y = [-1, -1, -1, 1, 1, 1] T = [[-1, -1], [2, 2], [3, 2]] true_result = [-1, 1, 1] # also load the iris dataset # and randomly permute it iris = datasets.load_iris() rng = np.random.RandomState(1) perm = rng.permutation(iris.target.size) iris.data = iris.data[perm] iris.target = iris.target[perm] # also load the diabetes dataset # and randomly permute it diabetes = datasets.load_diabetes() perm = rng.permutation(diabetes.target.size) diabetes.data = diabetes.data[perm] diabetes.target = diabetes.target[perm] digits = datasets.load_digits() perm = rng.permutation(digits.target.size) digits.data = digits.data[perm] digits.target = digits.target[perm] random_state = check_random_state(0) X_multilabel, y_multilabel = datasets.make_multilabel_classification( random_state=0, n_samples=30, n_features=10) # NB: despite their names X_sparse_* are numpy arrays (and not sparse matrices) X_sparse_pos = random_state.uniform(size=(20, 5)) X_sparse_pos[X_sparse_pos <= 0.8] = 0. y_random = random_state.randint(0, 4, size=(20, )) X_sparse_mix = _sparse_random_matrix(20, 10, density=0.25, random_state=0).toarray() DATASETS = { "iris": {"X": iris.data, "y": iris.target}, "diabetes": {"X": diabetes.data, "y": diabetes.target}, "digits": {"X": digits.data, "y": digits.target}, "toy": {"X": X, "y": y}, "clf_small": {"X": X_small, "y": y_small}, "reg_small": {"X": X_small, "y": y_small_reg}, "multilabel": {"X": X_multilabel, "y": y_multilabel}, "sparse-pos": {"X": X_sparse_pos, "y": y_random}, "sparse-neg": {"X": - X_sparse_pos, "y": y_random}, "sparse-mix": {"X": X_sparse_mix, "y": y_random}, "zeros": {"X": np.zeros((20, 3)), "y": y_random} } for name in DATASETS: DATASETS[name]["X_sparse"] = csc_matrix(DATASETS[name]["X"]) def assert_tree_equal(d, s, message): assert s.node_count == d.node_count, ( "{0}: inequal number of node ({1} != {2})" "".format(message, s.node_count, d.node_count)) assert_array_equal(d.children_right, s.children_right, message + ": inequal children_right") assert_array_equal(d.children_left, s.children_left, message + ": inequal children_left") external = d.children_right == TREE_LEAF internal = np.logical_not(external) assert_array_equal(d.feature[internal], s.feature[internal], message + ": inequal features") assert_array_equal(d.threshold[internal], s.threshold[internal], message + ": inequal threshold") assert_array_equal(d.n_node_samples.sum(), s.n_node_samples.sum(), message + ": inequal sum(n_node_samples)") assert_array_equal(d.n_node_samples, s.n_node_samples, message + ": inequal n_node_samples") assert_almost_equal(d.impurity, s.impurity, err_msg=message + ": inequal impurity") assert_array_almost_equal(d.value[external], s.value[external], err_msg=message + ": inequal value") def test_classification_toy(): # Check classification on a toy dataset. for name, Tree in CLF_TREES.items(): clf = Tree(random_state=0) clf.fit(X, y) assert_array_equal(clf.predict(T), true_result, "Failed with {0}".format(name)) clf = Tree(max_features=1, random_state=1) clf.fit(X, y) assert_array_equal(clf.predict(T), true_result, "Failed with {0}".format(name)) def test_weighted_classification_toy(): # Check classification on a weighted toy dataset. for name, Tree in CLF_TREES.items(): clf = Tree(random_state=0) clf.fit(X, y, sample_weight=np.ones(len(X))) assert_array_equal(clf.predict(T), true_result, "Failed with {0}".format(name)) clf.fit(X, y, sample_weight=np.full(len(X), 0.5)) assert_array_equal(clf.predict(T), true_result, "Failed with {0}".format(name)) @pytest.mark.parametrize("Tree", REG_TREES.values()) @pytest.mark.parametrize("criterion", REG_CRITERIONS) def test_regression_toy(Tree, criterion): # Check regression on a toy dataset. if criterion == "poisson": # make target positive while not touching the original y and # true_result a = np.abs(np.min(y)) + 1 y_train = np.array(y) + a y_test = np.array(true_result) + a else: y_train = y y_test = true_result reg = Tree(criterion=criterion, random_state=1) reg.fit(X, y_train) assert_allclose(reg.predict(T), y_test) clf = Tree(criterion=criterion, max_features=1, random_state=1) clf.fit(X, y_train) assert_allclose(reg.predict(T), y_test) def test_xor(): # Check on a XOR problem y = np.zeros((10, 10)) y[:5, :5] = 1 y[5:, 5:] = 1 gridx, gridy = np.indices(y.shape) X = np.vstack([gridx.ravel(), gridy.ravel()]).T y = y.ravel() for name, Tree in CLF_TREES.items(): clf = Tree(random_state=0) clf.fit(X, y) assert clf.score(X, y) == 1.0, "Failed with {0}".format(name) clf = Tree(random_state=0, max_features=1) clf.fit(X, y) assert clf.score(X, y) == 1.0, "Failed with {0}".format(name) def test_iris(): # Check consistency on dataset iris. for (name, Tree), criterion in product(CLF_TREES.items(), CLF_CRITERIONS): clf = Tree(criterion=criterion, random_state=0) clf.fit(iris.data, iris.target) score = accuracy_score(clf.predict(iris.data), iris.target) assert score > 0.9, ( "Failed with {0}, criterion = {1} and score = {2}" "".format(name, criterion, score)) clf = Tree(criterion=criterion, max_features=2, random_state=0) clf.fit(iris.data, iris.target) score = accuracy_score(clf.predict(iris.data), iris.target) assert score > 0.5, ( "Failed with {0}, criterion = {1} and score = {2}" "".format(name, criterion, score)) @pytest.mark.parametrize("name, Tree", REG_TREES.items()) @pytest.mark.parametrize("criterion", REG_CRITERIONS) def test_diabetes_overfit(name, Tree, criterion): # check consistency of overfitted trees on the diabetes dataset # since the trees will overfit, we expect an MSE of 0 reg = Tree(criterion=criterion, random_state=0) reg.fit(diabetes.data, diabetes.target) score = mean_squared_error(diabetes.target, reg.predict(diabetes.data)) assert score == pytest.approx(0), ( f"Failed with {name}, criterion = {criterion} and score = {score}" ) @skip_if_32bit @pytest.mark.parametrize("name, Tree", REG_TREES.items()) @pytest.mark.parametrize( "criterion, max_depth, metric, max_loss", [("mse", 15, mean_squared_error, 60), ("mae", 20, mean_squared_error, 60), ("friedman_mse", 15, mean_squared_error, 60), ("poisson", 15, mean_poisson_deviance, 30)] ) def test_diabetes_underfit(name, Tree, criterion, max_depth, metric, max_loss): # check consistency of trees when the depth and the number of features are # limited reg = Tree( criterion=criterion, max_depth=max_depth, max_features=6, random_state=0 ) reg.fit(diabetes.data, diabetes.target) loss = metric(diabetes.target, reg.predict(diabetes.data)) assert 0 < loss < max_loss def test_probability(): # Predict probabilities using DecisionTreeClassifier. for name, Tree in CLF_TREES.items(): clf = Tree(max_depth=1, max_features=1, random_state=42) clf.fit(iris.data, iris.target) prob_predict = clf.predict_proba(iris.data) assert_array_almost_equal(np.sum(prob_predict, 1), np.ones(iris.data.shape[0]), err_msg="Failed with {0}".format(name)) assert_array_equal(np.argmax(prob_predict, 1), clf.predict(iris.data), err_msg="Failed with {0}".format(name)) assert_almost_equal(clf.predict_proba(iris.data), np.exp(clf.predict_log_proba(iris.data)), 8, err_msg="Failed with {0}".format(name)) def test_arrayrepr(): # Check the array representation. # Check resize X = np.arange(10000)[:, np.newaxis] y = np.arange(10000) for name, Tree in REG_TREES.items(): reg = Tree(max_depth=None, random_state=0) reg.fit(X, y) def test_pure_set(): # Check when y is pure. X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]] y = [1, 1, 1, 1, 1, 1] for name, TreeClassifier in CLF_TREES.items(): clf = TreeClassifier(random_state=0) clf.fit(X, y) assert_array_equal(clf.predict(X), y, err_msg="Failed with {0}".format(name)) for name, TreeRegressor in REG_TREES.items(): reg = TreeRegressor(random_state=0) reg.fit(X, y) assert_almost_equal(reg.predict(X), y, err_msg="Failed with {0}".format(name)) def test_numerical_stability(): # Check numerical stability. X = np.array([ [152.08097839, 140.40744019, 129.75102234, 159.90493774], [142.50700378, 135.81935120, 117.82884979, 162.75781250], [127.28772736, 140.40744019, 129.75102234, 159.90493774], [132.37025452, 143.71923828, 138.35694885, 157.84558105], [103.10237122, 143.71928406, 138.35696411, 157.84559631], [127.71276855, 143.71923828, 138.35694885, 157.84558105], [120.91514587, 140.40744019, 129.75102234, 159.90493774]]) y = np.array( [1., 0.70209277, 0.53896582, 0., 0.90914464, 0.48026916, 0.49622521]) with np.errstate(all="raise"): for name, Tree in REG_TREES.items(): reg = Tree(random_state=0) reg.fit(X, y) reg.fit(X, -y) reg.fit(-X, y) reg.fit(-X, -y) def test_importances(): # Check variable importances. X, y = datasets.make_classification(n_samples=5000, n_features=10, n_informative=3, n_redundant=0, n_repeated=0, shuffle=False, random_state=0) for name, Tree in CLF_TREES.items(): clf = Tree(random_state=0) clf.fit(X, y) importances = clf.feature_importances_ n_important = np.sum(importances > 0.1) assert importances.shape[0] == 10, "Failed with {0}".format(name) assert n_important == 3, "Failed with {0}".format(name) # Check on iris that importances are the same for all builders clf = DecisionTreeClassifier(random_state=0) clf.fit(iris.data, iris.target) clf2 = DecisionTreeClassifier(random_state=0, max_leaf_nodes=len(iris.data)) clf2.fit(iris.data, iris.target) assert_array_equal(clf.feature_importances_, clf2.feature_importances_) def test_importances_raises(): # Check if variable importance before fit raises ValueError. clf = DecisionTreeClassifier() with pytest.raises(ValueError): getattr(clf, 'feature_importances_') def test_importances_gini_equal_mse(): # Check that gini is equivalent to mse for binary output variable X, y = datasets.make_classification(n_samples=2000, n_features=10, n_informative=3, n_redundant=0, n_repeated=0, shuffle=False, random_state=0) # The gini index and the mean square error (variance) might differ due # to numerical instability. Since those instabilities mainly occurs at # high tree depth, we restrict this maximal depth. clf = DecisionTreeClassifier(criterion="gini", max_depth=5, random_state=0).fit(X, y) reg = DecisionTreeRegressor(criterion="mse", max_depth=5, random_state=0).fit(X, y) assert_almost_equal(clf.feature_importances_, reg.feature_importances_) assert_array_equal(clf.tree_.feature, reg.tree_.feature) assert_array_equal(clf.tree_.children_left, reg.tree_.children_left) assert_array_equal(clf.tree_.children_right, reg.tree_.children_right) assert_array_equal(clf.tree_.n_node_samples, reg.tree_.n_node_samples) def test_max_features(): # Check max_features. for name, TreeRegressor in REG_TREES.items(): reg = TreeRegressor(max_features="auto") reg.fit(diabetes.data, diabetes.target) assert reg.max_features_ == diabetes.data.shape[1] for name, TreeClassifier in CLF_TREES.items(): clf = TreeClassifier(max_features="auto") clf.fit(iris.data, iris.target) assert clf.max_features_ == 2 for name, TreeEstimator in ALL_TREES.items(): est = TreeEstimator(max_features="sqrt") est.fit(iris.data, iris.target) assert (est.max_features_ == int(np.sqrt(iris.data.shape[1]))) est = TreeEstimator(max_features="log2") est.fit(iris.data, iris.target) assert (est.max_features_ == int(np.log2(iris.data.shape[1]))) est = TreeEstimator(max_features=1) est.fit(iris.data, iris.target) assert est.max_features_ == 1 est = TreeEstimator(max_features=3) est.fit(iris.data, iris.target) assert est.max_features_ == 3 est = TreeEstimator(max_features=0.01) est.fit(iris.data, iris.target) assert est.max_features_ == 1 est = TreeEstimator(max_features=0.5) est.fit(iris.data, iris.target) assert (est.max_features_ == int(0.5 * iris.data.shape[1])) est = TreeEstimator(max_features=1.0) est.fit(iris.data, iris.target) assert est.max_features_ == iris.data.shape[1] est = TreeEstimator(max_features=None) est.fit(iris.data, iris.target) assert est.max_features_ == iris.data.shape[1] # use values of max_features that are invalid est = TreeEstimator(max_features=10) with pytest.raises(ValueError): est.fit(X, y) est = TreeEstimator(max_features=-1) with pytest.raises(ValueError): est.fit(X, y) est = TreeEstimator(max_features=0.0) with pytest.raises(ValueError): est.fit(X, y) est = TreeEstimator(max_features=1.5) with pytest.raises(ValueError): est.fit(X, y) est = TreeEstimator(max_features="foobar") with pytest.raises(ValueError): est.fit(X, y) def test_error(): # Test that it gives proper exception on deficient input. for name, TreeEstimator in CLF_TREES.items(): # predict before fit est = TreeEstimator() with pytest.raises(NotFittedError): est.predict_proba(X) est.fit(X, y) X2 = [[-2, -1, 1]] # wrong feature shape for sample with pytest.raises(ValueError): est.predict_proba(X2) for name, TreeEstimator in ALL_TREES.items(): with pytest.raises(ValueError): TreeEstimator(min_samples_leaf=-1).fit(X, y) with pytest.raises(ValueError): TreeEstimator(min_samples_leaf=.6).fit(X, y) with pytest.raises(ValueError): TreeEstimator(min_samples_leaf=0.).fit(X, y) with pytest.raises(ValueError): TreeEstimator(min_samples_leaf=3.).fit(X, y) with pytest.raises(ValueError): TreeEstimator(min_weight_fraction_leaf=-1).fit(X, y) with pytest.raises(ValueError): TreeEstimator(min_weight_fraction_leaf=0.51).fit(X, y) with pytest.raises(ValueError): TreeEstimator(min_samples_split=-1).fit(X, y) with pytest.raises(ValueError): TreeEstimator(min_samples_split=0.0).fit(X, y) with pytest.raises(ValueError): TreeEstimator(min_samples_split=1.1).fit(X, y) with pytest.raises(ValueError): TreeEstimator(min_samples_split=2.5).fit(X, y) with pytest.raises(ValueError): TreeEstimator(max_depth=-1).fit(X, y) with pytest.raises(ValueError): TreeEstimator(max_features=42).fit(X, y) # min_impurity_split warning with ignore_warnings(category=FutureWarning): with pytest.raises(ValueError): TreeEstimator(min_impurity_split=-1.0).fit(X, y) with pytest.raises(ValueError): TreeEstimator(min_impurity_decrease=-1.0).fit(X, y) # Wrong dimensions est = TreeEstimator() y2 = y[:-1] with pytest.raises(ValueError): est.fit(X, y2) # Test with arrays that are non-contiguous. Xf = np.asfortranarray(X) est = TreeEstimator() est.fit(Xf, y) assert_almost_equal(est.predict(T), true_result) # predict before fitting est = TreeEstimator() with pytest.raises(NotFittedError): est.predict(T) # predict on vector with different dims est.fit(X, y) t = np.asarray(T) with pytest.raises(ValueError): est.predict(t[:, 1:]) # wrong sample shape Xt = np.array(X).T est = TreeEstimator() est.fit(np.dot(X, Xt), y) with pytest.raises(ValueError): est.predict(X) with pytest.raises(ValueError): est.apply(X) clf = TreeEstimator() clf.fit(X, y) with pytest.raises(ValueError): clf.predict(Xt) with pytest.raises(ValueError): clf.apply(Xt) # apply before fitting est = TreeEstimator() with pytest.raises(NotFittedError): est.apply(T) # non positive target for Poisson splitting Criterion est = DecisionTreeRegressor(criterion="poisson") with pytest.raises(ValueError, match="y is not positive.*Poisson"): est.fit([[0, 1, 2]], [0, 0, 0]) with pytest.raises(ValueError, match="Some.*y are negative.*Poisson"): est.fit([[0, 1, 2]], [5, -0.1, 2]) def test_min_samples_split(): """Test min_samples_split parameter""" X = np.asfortranarray(iris.data, dtype=tree._tree.DTYPE) y = iris.target # test both DepthFirstTreeBuilder and BestFirstTreeBuilder # by setting max_leaf_nodes for max_leaf_nodes, name in product((None, 1000), ALL_TREES.keys()): TreeEstimator = ALL_TREES[name] # test for integer parameter est = TreeEstimator(min_samples_split=10, max_leaf_nodes=max_leaf_nodes, random_state=0) est.fit(X, y) # count samples on nodes, -1 means it is a leaf node_samples = est.tree_.n_node_samples[est.tree_.children_left != -1] assert np.min(node_samples) > 9, "Failed with {0}".format(name) # test for float parameter est = TreeEstimator(min_samples_split=0.2, max_leaf_nodes=max_leaf_nodes, random_state=0) est.fit(X, y) # count samples on nodes, -1 means it is a leaf node_samples = est.tree_.n_node_samples[est.tree_.children_left != -1] assert np.min(node_samples) > 9, "Failed with {0}".format(name) def test_min_samples_leaf(): # Test if leaves contain more than leaf_count training examples X = np.asfortranarray(iris.data, dtype=tree._tree.DTYPE) y = iris.target # test both DepthFirstTreeBuilder and BestFirstTreeBuilder # by setting max_leaf_nodes for max_leaf_nodes, name in product((None, 1000), ALL_TREES.keys()): TreeEstimator = ALL_TREES[name] # test integer parameter est = TreeEstimator(min_samples_leaf=5, max_leaf_nodes=max_leaf_nodes, random_state=0) est.fit(X, y) out = est.tree_.apply(X) node_counts = np.bincount(out) # drop inner nodes leaf_count = node_counts[node_counts != 0] assert np.min(leaf_count) > 4, "Failed with {0}".format(name) # test float parameter est = TreeEstimator(min_samples_leaf=0.1, max_leaf_nodes=max_leaf_nodes, random_state=0) est.fit(X, y) out = est.tree_.apply(X) node_counts = np.bincount(out) # drop inner nodes leaf_count = node_counts[node_counts != 0] assert np.min(leaf_count) > 4, "Failed with {0}".format(name) def check_min_weight_fraction_leaf(name, datasets, sparse=False): """Test if leaves contain at least min_weight_fraction_leaf of the training set""" if sparse: X = DATASETS[datasets]["X_sparse"].astype(np.float32) else: X = DATASETS[datasets]["X"].astype(np.float32) y = DATASETS[datasets]["y"] weights = rng.rand(X.shape[0]) total_weight = np.sum(weights) TreeEstimator = ALL_TREES[name] # test both DepthFirstTreeBuilder and BestFirstTreeBuilder # by setting max_leaf_nodes for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 6)): est = TreeEstimator(min_weight_fraction_leaf=frac, max_leaf_nodes=max_leaf_nodes, random_state=0) est.fit(X, y, sample_weight=weights) if sparse: out = est.tree_.apply(X.tocsr()) else: out = est.tree_.apply(X) node_weights = np.bincount(out, weights=weights) # drop inner nodes leaf_weights = node_weights[node_weights != 0] assert ( np.min(leaf_weights) >= total_weight * est.min_weight_fraction_leaf), ( "Failed with {0} min_weight_fraction_leaf={1}".format( name, est.min_weight_fraction_leaf)) # test case with no weights passed in total_weight = X.shape[0] for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 6)): est = TreeEstimator(min_weight_fraction_leaf=frac, max_leaf_nodes=max_leaf_nodes, random_state=0) est.fit(X, y) if sparse: out = est.tree_.apply(X.tocsr()) else: out = est.tree_.apply(X) node_weights = np.bincount(out) # drop inner nodes leaf_weights = node_weights[node_weights != 0] assert ( np.min(leaf_weights) >= total_weight * est.min_weight_fraction_leaf), ( "Failed with {0} min_weight_fraction_leaf={1}".format( name, est.min_weight_fraction_leaf)) @pytest.mark.parametrize("name", ALL_TREES) def test_min_weight_fraction_leaf_on_dense_input(name): check_min_weight_fraction_leaf(name, "iris") @pytest.mark.parametrize("name", SPARSE_TREES) def test_min_weight_fraction_leaf_on_sparse_input(name): check_min_weight_fraction_leaf(name, "multilabel", True) def check_min_weight_fraction_leaf_with_min_samples_leaf(name, datasets, sparse=False): """Test the interaction between min_weight_fraction_leaf and min_samples_leaf when sample_weights is not provided in fit.""" if sparse: X = DATASETS[datasets]["X_sparse"].astype(np.float32) else: X = DATASETS[datasets]["X"].astype(np.float32) y = DATASETS[datasets]["y"] total_weight = X.shape[0] TreeEstimator = ALL_TREES[name] for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 3)): # test integer min_samples_leaf est = TreeEstimator(min_weight_fraction_leaf=frac, max_leaf_nodes=max_leaf_nodes, min_samples_leaf=5, random_state=0) est.fit(X, y) if sparse: out = est.tree_.apply(X.tocsr()) else: out = est.tree_.apply(X) node_weights = np.bincount(out) # drop inner nodes leaf_weights = node_weights[node_weights != 0] assert ( np.min(leaf_weights) >= max((total_weight * est.min_weight_fraction_leaf), 5)), ( "Failed with {0} min_weight_fraction_leaf={1}, " "min_samples_leaf={2}".format( name, est.min_weight_fraction_leaf, est.min_samples_leaf)) for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 3)): # test float min_samples_leaf est = TreeEstimator(min_weight_fraction_leaf=frac, max_leaf_nodes=max_leaf_nodes, min_samples_leaf=.1, random_state=0) est.fit(X, y) if sparse: out = est.tree_.apply(X.tocsr()) else: out = est.tree_.apply(X) node_weights = np.bincount(out) # drop inner nodes leaf_weights = node_weights[node_weights != 0] assert ( np.min(leaf_weights) >= max((total_weight * est.min_weight_fraction_leaf), (total_weight * est.min_samples_leaf))), ( "Failed with {0} min_weight_fraction_leaf={1}, " "min_samples_leaf={2}".format(name, est.min_weight_fraction_leaf, est.min_samples_leaf)) @pytest.mark.parametrize("name", ALL_TREES) def test_min_weight_fraction_leaf_with_min_samples_leaf_on_dense_input(name): check_min_weight_fraction_leaf_with_min_samples_leaf(name, "iris") @pytest.mark.parametrize("name", SPARSE_TREES) def test_min_weight_fraction_leaf_with_min_samples_leaf_on_sparse_input(name): check_min_weight_fraction_leaf_with_min_samples_leaf( name, "multilabel", True) def test_min_impurity_split(): # test if min_impurity_split creates leaves with impurity # [0, min_impurity_split) when min_samples_leaf = 1 and # min_samples_split = 2. X = np.asfortranarray(iris.data, dtype=tree._tree.DTYPE) y = iris.target # test both DepthFirstTreeBuilder and BestFirstTreeBuilder # by setting max_leaf_nodes for max_leaf_nodes, name in product((None, 1000), ALL_TREES.keys()): TreeEstimator = ALL_TREES[name] min_impurity_split = .5 # verify leaf nodes without min_impurity_split less than # impurity 1e-7 est = TreeEstimator(max_leaf_nodes=max_leaf_nodes, random_state=0) assert est.min_impurity_split is None, ( "Failed, min_impurity_split = {0} != None".format( est.min_impurity_split)) try: assert_warns(FutureWarning, est.fit, X, y) except AssertionError: pass for node in range(est.tree_.node_count): if (est.tree_.children_left[node] == TREE_LEAF or est.tree_.children_right[node] == TREE_LEAF): assert est.tree_.impurity[node] == 0., ( "Failed with {0} min_impurity_split={1}".format( est.tree_.impurity[node], est.min_impurity_split)) # verify leaf nodes have impurity [0,min_impurity_split] when using # min_impurity_split est = TreeEstimator(max_leaf_nodes=max_leaf_nodes, min_impurity_split=min_impurity_split, random_state=0) assert_warns_message(FutureWarning, "Use the min_impurity_decrease", est.fit, X, y) for node in range(est.tree_.node_count): if (est.tree_.children_left[node] == TREE_LEAF or est.tree_.children_right[node] == TREE_LEAF): assert est.tree_.impurity[node] >= 0, ( "Failed with {0}, min_impurity_split={1}".format( est.tree_.impurity[node], est.min_impurity_split)) assert est.tree_.impurity[node] <= min_impurity_split, ( "Failed with {0}, min_impurity_split={1}".format( est.tree_.impurity[node], est.min_impurity_split)) def test_min_impurity_decrease(): # test if min_impurity_decrease ensure that a split is made only if # if the impurity decrease is atleast that value X, y = datasets.make_classification(n_samples=10000, random_state=42) # test both DepthFirstTreeBuilder and BestFirstTreeBuilder # by setting max_leaf_nodes for max_leaf_nodes, name in product((None, 1000), ALL_TREES.keys()): TreeEstimator = ALL_TREES[name] # Check default value of min_impurity_decrease, 1e-7 est1 = TreeEstimator(max_leaf_nodes=max_leaf_nodes, random_state=0) # Check with explicit value of 0.05 est2 = TreeEstimator(max_leaf_nodes=max_leaf_nodes, min_impurity_decrease=0.05, random_state=0) # Check with a much lower value of 0.0001 est3 = TreeEstimator(max_leaf_nodes=max_leaf_nodes, min_impurity_decrease=0.0001, random_state=0) # Check with a much lower value of 0.1 est4 = TreeEstimator(max_leaf_nodes=max_leaf_nodes, min_impurity_decrease=0.1, random_state=0) for est, expected_decrease in ((est1, 1e-7), (est2, 0.05), (est3, 0.0001), (est4, 0.1)): assert est.min_impurity_decrease <= expected_decrease, ( "Failed, min_impurity_decrease = {0} > {1}".format( est.min_impurity_decrease, expected_decrease)) est.fit(X, y) for node in range(est.tree_.node_count): # If current node is a not leaf node, check if the split was # justified w.r.t the min_impurity_decrease if est.tree_.children_left[node] != TREE_LEAF: imp_parent = est.tree_.impurity[node] wtd_n_node = est.tree_.weighted_n_node_samples[node] left = est.tree_.children_left[node] wtd_n_left = est.tree_.weighted_n_node_samples[left] imp_left = est.tree_.impurity[left] wtd_imp_left = wtd_n_left * imp_left right = est.tree_.children_right[node] wtd_n_right = est.tree_.weighted_n_node_samples[right] imp_right = est.tree_.impurity[right] wtd_imp_right = wtd_n_right * imp_right wtd_avg_left_right_imp = wtd_imp_right + wtd_imp_left wtd_avg_left_right_imp /= wtd_n_node fractional_node_weight = ( est.tree_.weighted_n_node_samples[node] / X.shape[0]) actual_decrease = fractional_node_weight * ( imp_parent - wtd_avg_left_right_imp) assert actual_decrease >= expected_decrease, ( "Failed with {0} expected min_impurity_decrease={1}" .format(actual_decrease, expected_decrease)) for name, TreeEstimator in ALL_TREES.items(): if "Classifier" in name: X, y = iris.data, iris.target else: X, y = diabetes.data, diabetes.target est = TreeEstimator(random_state=0) est.fit(X, y) score = est.score(X, y) fitted_attribute = dict() for attribute in ["max_depth", "node_count", "capacity"]: fitted_attribute[attribute] = getattr(est.tree_, attribute) serialized_object = pickle.dumps(est) est2 = pickle.loads(serialized_object) assert type(est2) == est.__class__ score2 = est2.score(X, y) assert score == score2, ( "Failed to generate same score after pickling " "with {0}".format(name)) for attribute in fitted_attribute: assert (getattr(est2.tree_, attribute) == fitted_attribute[attribute]), ( "Failed to generate same attribute {0} after " "pickling with {1}".format(attribute, name)) def test_multioutput(): # Check estimators on multi-output problems. X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [-2, 1], [-1, 1], [-1, 2], [2, -1], [1, -1], [1, -2]] y = [[-1, 0], [-1, 0], [-1, 0], [1, 1], [1, 1], [1, 1], [-1, 2], [-1, 2], [-1, 2], [1, 3], [1, 3], [1, 3]] T = [[-1, -1], [1, 1], [-1, 1], [1, -1]] y_true = [[-1, 0], [1, 1], [-1, 2], [1, 3]] # toy classification problem for name, TreeClassifier in CLF_TREES.items(): clf = TreeClassifier(random_state=0) y_hat = clf.fit(X, y).predict(T) assert_array_equal(y_hat, y_true) assert y_hat.shape == (4, 2) proba = clf.predict_proba(T) assert len(proba) == 2 assert proba[0].shape == (4, 2) assert proba[1].shape == (4, 4) log_proba = clf.predict_log_proba(T) assert len(log_proba) == 2 assert log_proba[0].shape == (4, 2) assert log_proba[1].shape == (4, 4) # toy regression problem for name, TreeRegressor in REG_TREES.items(): reg = TreeRegressor(random_state=0) y_hat = reg.fit(X, y).predict(T) assert_almost_equal(y_hat, y_true) assert y_hat.shape == (4, 2) def test_classes_shape(): # Test that n_classes_ and classes_ have proper shape. for name, TreeClassifier in CLF_TREES.items(): # Classification, single output clf = TreeClassifier(random_state=0) clf.fit(X, y) assert clf.n_classes_ == 2 assert_array_equal(clf.classes_, [-1, 1]) # Classification, multi-output _y = np.vstack((y, np.array(y) * 2)).T clf = TreeClassifier(random_state=0) clf.fit(X, _y) assert len(clf.n_classes_) == 2 assert len(clf.classes_) == 2 assert_array_equal(clf.n_classes_, [2, 2]) assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]]) def test_unbalanced_iris(): # Check class rebalancing. unbalanced_X = iris.data[:125] unbalanced_y = iris.target[:125] sample_weight = compute_sample_weight("balanced", unbalanced_y) for name, TreeClassifier in CLF_TREES.items(): clf = TreeClassifier(random_state=0) clf.fit(unbalanced_X, unbalanced_y, sample_weight=sample_weight) assert_almost_equal(clf.predict(unbalanced_X), unbalanced_y) def test_memory_layout(): # Check that it works no matter the memory layout for (name, TreeEstimator), dtype in product(ALL_TREES.items(), [np.float64, np.float32]): est = TreeEstimator(random_state=0) # Nothing X = np.asarray(iris.data, dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) # C-order X = np.asarray(iris.data, order="C", dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) # F-order X = np.asarray(iris.data, order="F", dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) # Contiguous X = np.ascontiguousarray(iris.data, dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) # csr matrix X = csr_matrix(iris.data, dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) # csc_matrix X = csc_matrix(iris.data, dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) # Strided X = np.asarray(iris.data[::3], dtype=dtype) y = iris.target[::3] assert_array_equal(est.fit(X, y).predict(X), y) def test_sample_weight(): # Check sample weighting. # Test that zero-weighted samples are not taken into account X = np.arange(100)[:, np.newaxis] y = np.ones(100) y[:50] = 0.0 sample_weight = np.ones(100) sample_weight[y == 0] = 0.0 clf = DecisionTreeClassifier(random_state=0) clf.fit(X, y, sample_weight=sample_weight) assert_array_equal(clf.predict(X), np.ones(100)) # Test that low weighted samples are not taken into account at low depth X = np.arange(200)[:, np.newaxis] y = np.zeros(200) y[50:100] = 1 y[100:200] = 2 X[100:200, 0] = 200 sample_weight = np.ones(200) sample_weight[y == 2] = .51 # Samples of class '2' are still weightier clf = DecisionTreeClassifier(max_depth=1, random_state=0) clf.fit(X, y, sample_weight=sample_weight) assert clf.tree_.threshold[0] == 149.5 sample_weight[y == 2] = .5 # Samples of class '2' are no longer weightier clf = DecisionTreeClassifier(max_depth=1, random_state=0) clf.fit(X, y, sample_weight=sample_weight) assert clf.tree_.threshold[0] == 49.5 # Threshold should have moved # Test that sample weighting is the same as having duplicates X = iris.data y = iris.target duplicates = rng.randint(0, X.shape[0], 100) clf = DecisionTreeClassifier(random_state=1) clf.fit(X[duplicates], y[duplicates]) sample_weight = np.bincount(duplicates, minlength=X.shape[0]) clf2 = DecisionTreeClassifier(random_state=1) clf2.fit(X, y, sample_weight=sample_weight) internal = clf.tree_.children_left != tree._tree.TREE_LEAF assert_array_almost_equal(clf.tree_.threshold[internal], clf2.tree_.threshold[internal]) def test_sample_weight_invalid(): # Check sample weighting raises errors. X = np.arange(100)[:, np.newaxis] y = np.ones(100) y[:50] = 0.0 clf = DecisionTreeClassifier(random_state=0) sample_weight = np.random.rand(100, 1) with pytest.raises(ValueError): clf.fit(X, y, sample_weight=sample_weight) sample_weight = np.array(0) expected_err = r"Singleton.* cannot be considered a valid collection" with pytest.raises(TypeError, match=expected_err): clf.fit(X, y, sample_weight=sample_weight) def check_class_weights(name): """Check class_weights resemble sample_weights behavior.""" TreeClassifier = CLF_TREES[name] # Iris is balanced, so no effect expected for using 'balanced' weights clf1 = TreeClassifier(random_state=0) clf1.fit(iris.data, iris.target) clf2 = TreeClassifier(class_weight='balanced', random_state=0) clf2.fit(iris.data, iris.target) assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_) # Make a multi-output problem with three copies of Iris iris_multi = np.vstack((iris.target, iris.target, iris.target)).T # Create user-defined weights that should balance over the outputs clf3 = TreeClassifier(class_weight=[{0: 2., 1: 2., 2: 1.}, {0: 2., 1: 1., 2: 2.}, {0: 1., 1: 2., 2: 2.}], random_state=0) clf3.fit(iris.data, iris_multi) assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_) # Check against multi-output "auto" which should also have no effect clf4 = TreeClassifier(class_weight='balanced', random_state=0) clf4.fit(iris.data, iris_multi) assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_) # Inflate importance of class 1, check against user-defined weights sample_weight = np.ones(iris.target.shape) sample_weight[iris.target == 1] *= 100 class_weight = {0: 1., 1: 100., 2: 1.} clf1 = TreeClassifier(random_state=0) clf1.fit(iris.data, iris.target, sample_weight) clf2 = TreeClassifier(class_weight=class_weight, random_state=0) clf2.fit(iris.data, iris.target) assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_) # Check that sample_weight and class_weight are multiplicative clf1 = TreeClassifier(random_state=0) clf1.fit(iris.data, iris.target, sample_weight ** 2) clf2 = TreeClassifier(class_weight=class_weight, random_state=0) clf2.fit(iris.data, iris.target, sample_weight) assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_) @pytest.mark.parametrize("name", CLF_TREES) def test_class_weights(name): check_class_weights(name) def check_class_weight_errors(name): # Test if class_weight raises errors and warnings when expected. TreeClassifier = CLF_TREES[name] _y = np.vstack((y, np.array(y) * 2)).T # Invalid preset string clf = TreeClassifier(class_weight='the larch', random_state=0) with pytest.raises(ValueError): clf.fit(X, y) with pytest.raises(ValueError): clf.fit(X, _y) # Not a list or preset for multi-output clf = TreeClassifier(class_weight=1, random_state=0) with pytest.raises(ValueError): clf.fit(X, _y) # Incorrect length list for multi-output clf = TreeClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0) with pytest.raises(ValueError): clf.fit(X, _y) @pytest.mark.parametrize("name", CLF_TREES) def test_class_weight_errors(name): check_class_weight_errors(name) def test_max_leaf_nodes(): # Test greedy trees with max_depth + 1 leafs. X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) k = 4 for name, TreeEstimator in ALL_TREES.items(): est = TreeEstimator(max_depth=None, max_leaf_nodes=k + 1).fit(X, y) assert est.get_n_leaves() == k + 1 # max_leaf_nodes in (0, 1) should raise ValueError est = TreeEstimator(max_depth=None, max_leaf_nodes=0) with pytest.raises(ValueError): est.fit(X, y) est = TreeEstimator(max_depth=None, max_leaf_nodes=1) with pytest.raises(ValueError): est.fit(X, y) est = TreeEstimator(max_depth=None, max_leaf_nodes=0.1) with pytest.raises(ValueError): est.fit(X, y) def test_max_leaf_nodes_max_depth(): # Test precedence of max_leaf_nodes over max_depth. X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) k = 4 for name, TreeEstimator in ALL_TREES.items(): est = TreeEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y) assert est.get_depth() == 1 def test_arrays_persist(): # Ensure property arrays' memory stays alive when tree disappears # non-regression for #2726 for attr in ['n_classes', 'value', 'children_left', 'children_right', 'threshold', 'impurity', 'feature', 'n_node_samples']: value = getattr(DecisionTreeClassifier().fit([[0], [1]], [0, 1]).tree_, attr) # if pointing to freed memory, contents may be arbitrary assert -3 <= value.flat[0] < 3, \ 'Array points to arbitrary memory' def test_only_constant_features(): random_state = check_random_state(0) X = np.zeros((10, 20)) y = random_state.randint(0, 2, (10, )) for name, TreeEstimator in ALL_TREES.items(): est = TreeEstimator(random_state=0) est.fit(X, y) assert est.tree_.max_depth == 0 def test_behaviour_constant_feature_after_splits(): X = np.transpose(np.vstack(([[0, 0, 0, 0, 0, 1, 2, 4, 5, 6, 7]], np.zeros((4, 11))))) y = [0, 0, 0, 1, 1, 2, 2, 2, 3, 3, 3] for name, TreeEstimator in ALL_TREES.items(): # do not check extra random trees if "ExtraTree" not in name: est = TreeEstimator(random_state=0, max_features=1) est.fit(X, y) assert est.tree_.max_depth == 2 assert est.tree_.node_count == 5 def test_with_only_one_non_constant_features(): X = np.hstack([np.array([[1.], [1.], [0.], [0.]]), np.zeros((4, 1000))]) y = np.array([0., 1., 0., 1.0]) for name, TreeEstimator in CLF_TREES.items(): est = TreeEstimator(random_state=0, max_features=1) est.fit(X, y) assert est.tree_.max_depth == 1 assert_array_equal(est.predict_proba(X), np.full((4, 2), 0.5)) for name, TreeEstimator in REG_TREES.items(): est = TreeEstimator(random_state=0, max_features=1) est.fit(X, y) assert est.tree_.max_depth == 1 assert_array_equal(est.predict(X), np.full((4, ), 0.5)) def test_big_input(): # Test if the warning for too large inputs is appropriate. X = np.repeat(10 ** 40., 4).astype(np.float64).reshape(-1, 1) clf = DecisionTreeClassifier() try: clf.fit(X, [0, 1, 0, 1]) except ValueError as e: assert "float32" in str(e) def test_realloc(): from sklearn.tree._utils import _realloc_test with pytest.raises(MemoryError): _realloc_test() def test_huge_allocations(): n_bits = 8 * struct.calcsize("P") X = np.random.randn(10, 2) y = np.random.randint(0, 2, 10) # Sanity check: we cannot request more memory than the size of the address # space. Currently raises OverflowError. huge = 2 ** (n_bits + 1) clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge) with pytest.raises(Exception): clf.fit(X, y) # Non-regression test: MemoryError used to be dropped by Cython # because of missing "except *". huge = 2 ** (n_bits - 1) - 1 clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge) with pytest.raises(MemoryError): clf.fit(X, y) def check_sparse_input(tree, dataset, max_depth=None): TreeEstimator = ALL_TREES[tree] X = DATASETS[dataset]["X"] X_sparse = DATASETS[dataset]["X_sparse"] y = DATASETS[dataset]["y"] # Gain testing time if dataset in ["digits", "diabetes"]: n_samples = X.shape[0] // 5 X = X[:n_samples] X_sparse = X_sparse[:n_samples] y = y[:n_samples] for sparse_format in (csr_matrix, csc_matrix, coo_matrix): X_sparse = sparse_format(X_sparse) # Check the default (depth first search) d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y) s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y) assert_tree_equal(d.tree_, s.tree_, "{0} with dense and sparse format gave different " "trees".format(tree)) y_pred = d.predict(X) if tree in CLF_TREES: y_proba = d.predict_proba(X) y_log_proba = d.predict_log_proba(X) for sparse_matrix in (csr_matrix, csc_matrix, coo_matrix): X_sparse_test = sparse_matrix(X_sparse, dtype=np.float32) assert_array_almost_equal(s.predict(X_sparse_test), y_pred) if tree in CLF_TREES: assert_array_almost_equal(s.predict_proba(X_sparse_test), y_proba) assert_array_almost_equal(s.predict_log_proba(X_sparse_test), y_log_proba) @pytest.mark.parametrize("tree_type", SPARSE_TREES) @pytest.mark.parametrize( "dataset", ("clf_small", "toy", "digits", "multilabel", "sparse-pos", "sparse-neg", "sparse-mix", "zeros") ) def test_sparse_input(tree_type, dataset): max_depth = 3 if dataset == "digits" else None check_sparse_input(tree_type, dataset, max_depth) @pytest.mark.parametrize("tree_type", sorted(set(SPARSE_TREES).intersection(REG_TREES))) @pytest.mark.parametrize("dataset", ["diabetes", "reg_small"]) def test_sparse_input_reg_trees(tree_type, dataset): # Due to numerical instability of MSE and too strict test, we limit the # maximal depth check_sparse_input(tree_type, dataset, 2) def check_sparse_parameters(tree, dataset): TreeEstimator = ALL_TREES[tree] X = DATASETS[dataset]["X"] X_sparse = DATASETS[dataset]["X_sparse"] y = DATASETS[dataset]["y"] # Check max_features d = TreeEstimator(random_state=0, max_features=1, max_depth=2).fit(X, y) s = TreeEstimator(random_state=0, max_features=1, max_depth=2).fit(X_sparse, y) assert_tree_equal(d.tree_, s.tree_, "{0} with dense and sparse format gave different " "trees".format(tree)) assert_array_almost_equal(s.predict(X), d.predict(X)) # Check min_samples_split d = TreeEstimator(random_state=0, max_features=1, min_samples_split=10).fit(X, y) s = TreeEstimator(random_state=0, max_features=1, min_samples_split=10).fit(X_sparse, y) assert_tree_equal(d.tree_, s.tree_, "{0} with dense and sparse format gave different " "trees".format(tree)) assert_array_almost_equal(s.predict(X), d.predict(X)) # Check min_samples_leaf d = TreeEstimator(random_state=0, min_samples_leaf=X_sparse.shape[0] // 2).fit(X, y) s = TreeEstimator(random_state=0, min_samples_leaf=X_sparse.shape[0] // 2).fit(X_sparse, y) assert_tree_equal(d.tree_, s.tree_, "{0} with dense and sparse format gave different " "trees".format(tree)) assert_array_almost_equal(s.predict(X), d.predict(X)) # Check best-first search d = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X, y) s = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X_sparse, y) assert_tree_equal(d.tree_, s.tree_, "{0} with dense and sparse format gave different " "trees".format(tree)) assert_array_almost_equal(s.predict(X), d.predict(X)) def check_sparse_criterion(tree, dataset): TreeEstimator = ALL_TREES[tree] X = DATASETS[dataset]["X"] X_sparse = DATASETS[dataset]["X_sparse"] y = DATASETS[dataset]["y"] # Check various criterion CRITERIONS = REG_CRITERIONS if tree in REG_TREES else CLF_CRITERIONS for criterion in CRITERIONS: d = TreeEstimator(random_state=0, max_depth=3, criterion=criterion).fit(X, y) s = TreeEstimator(random_state=0, max_depth=3, criterion=criterion).fit(X_sparse, y) assert_tree_equal(d.tree_, s.tree_, "{0} with dense and sparse format gave different " "trees".format(tree)) assert_array_almost_equal(s.predict(X), d.predict(X)) @pytest.mark.parametrize("tree_type", SPARSE_TREES) @pytest.mark.parametrize("dataset", ["sparse-pos", "sparse-neg", "sparse-mix", "zeros"]) @pytest.mark.parametrize("check", [check_sparse_parameters, check_sparse_criterion]) def test_sparse(tree_type, dataset, check): check(tree_type, dataset) def check_explicit_sparse_zeros(tree, max_depth=3, n_features=10): TreeEstimator = ALL_TREES[tree] # n_samples set n_feature to ease construction of a simultaneous # construction of a csr and csc matrix n_samples = n_features samples = np.arange(n_samples) # Generate X, y random_state = check_random_state(0) indices = [] data = [] offset = 0 indptr = [offset] for i in range(n_features): n_nonzero_i = random_state.binomial(n_samples, 0.5) indices_i = random_state.permutation(samples)[:n_nonzero_i] indices.append(indices_i) data_i = random_state.binomial(3, 0.5, size=(n_nonzero_i, )) - 1 data.append(data_i) offset += n_nonzero_i indptr.append(offset) indices = np.concatenate(indices) data = np.array(np.concatenate(data), dtype=np.float32) X_sparse = csc_matrix((data, indices, indptr), shape=(n_samples, n_features)) X = X_sparse.toarray() X_sparse_test = csr_matrix((data, indices, indptr), shape=(n_samples, n_features)) X_test = X_sparse_test.toarray() y = random_state.randint(0, 3, size=(n_samples, )) # Ensure that X_sparse_test owns its data, indices and indptr array X_sparse_test = X_sparse_test.copy() # Ensure that we have explicit zeros assert (X_sparse.data == 0.).sum() > 0 assert (X_sparse_test.data == 0.).sum() > 0 # Perform the comparison d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y) s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y) assert_tree_equal(d.tree_, s.tree_, "{0} with dense and sparse format gave different " "trees".format(tree)) Xs = (X_test, X_sparse_test) for X1, X2 in product(Xs, Xs): assert_array_almost_equal(s.tree_.apply(X1), d.tree_.apply(X2)) assert_array_almost_equal(s.apply(X1), d.apply(X2)) assert_array_almost_equal(s.apply(X1), s.tree_.apply(X1)) assert_array_almost_equal(s.tree_.decision_path(X1).toarray(), d.tree_.decision_path(X2).toarray()) assert_array_almost_equal(s.decision_path(X1).toarray(), d.decision_path(X2).toarray()) assert_array_almost_equal(s.decision_path(X1).toarray(), s.tree_.decision_path(X1).toarray()) assert_array_almost_equal(s.predict(X1), d.predict(X2)) if tree in CLF_TREES: assert_array_almost_equal(s.predict_proba(X1), d.predict_proba(X2)) @pytest.mark.parametrize("tree_type", SPARSE_TREES) def test_explicit_sparse_zeros(tree_type): check_explicit_sparse_zeros(tree_type) @ignore_warnings def check_raise_error_on_1d_input(name): TreeEstimator = ALL_TREES[name] X = iris.data[:, 0].ravel() X_2d = iris.data[:, 0].reshape((-1, 1)) y = iris.target with pytest.raises(ValueError): TreeEstimator(random_state=0).fit(X, y) est = TreeEstimator(random_state=0) est.fit(X_2d, y) with pytest.raises(ValueError): est.predict([X]) @pytest.mark.parametrize("name", ALL_TREES) def test_1d_input(name): with ignore_warnings(): check_raise_error_on_1d_input(name) def _check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight): est = TreeEstimator(random_state=0) est.fit(X, y, sample_weight=sample_weight) assert est.tree_.max_depth == 1 est = TreeEstimator(random_state=0, min_weight_fraction_leaf=0.4) est.fit(X, y, sample_weight=sample_weight) assert est.tree_.max_depth == 0 def check_min_weight_leaf_split_level(name): TreeEstimator = ALL_TREES[name] X = np.array([[0], [0], [0], [0], [1]]) y = [0, 0, 0, 0, 1] sample_weight = [0.2, 0.2, 0.2, 0.2, 0.2] _check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight) _check_min_weight_leaf_split_level(TreeEstimator, csc_matrix(X), y, sample_weight) @pytest.mark.parametrize("name", ALL_TREES) def test_min_weight_leaf_split_level(name): check_min_weight_leaf_split_level(name) def check_public_apply(name): X_small32 = X_small.astype(tree._tree.DTYPE, copy=False) est = ALL_TREES[name]() est.fit(X_small, y_small) assert_array_equal(est.apply(X_small), est.tree_.apply(X_small32)) def check_public_apply_sparse(name): X_small32 = csr_matrix(X_small.astype(tree._tree.DTYPE, copy=False)) est = ALL_TREES[name]() est.fit(X_small, y_small) assert_array_equal(est.apply(X_small), est.tree_.apply(X_small32)) @pytest.mark.parametrize("name", ALL_TREES) def test_public_apply_all_trees(name): check_public_apply(name) @pytest.mark.parametrize("name", SPARSE_TREES) def test_public_apply_sparse_trees(name): check_public_apply_sparse(name) def test_decision_path_hardcoded(): X = iris.data y = iris.target est = DecisionTreeClassifier(random_state=0, max_depth=1).fit(X, y) node_indicator = est.decision_path(X[:2]).toarray() assert_array_equal(node_indicator, [[1, 1, 0], [1, 0, 1]]) def check_decision_path(name): X = iris.data y = iris.target n_samples = X.shape[0] TreeEstimator = ALL_TREES[name] est = TreeEstimator(random_state=0, max_depth=2) est.fit(X, y) node_indicator_csr = est.decision_path(X) node_indicator = node_indicator_csr.toarray() assert node_indicator.shape == (n_samples, est.tree_.node_count) # Assert that leaves index are correct leaves = est.apply(X) leave_indicator = [node_indicator[i, j] for i, j in enumerate(leaves)] assert_array_almost_equal(leave_indicator, np.ones(shape=n_samples)) # Ensure only one leave node per sample all_leaves = est.tree_.children_left == TREE_LEAF assert_array_almost_equal(np.dot(node_indicator, all_leaves), np.ones(shape=n_samples)) # Ensure max depth is consistent with sum of indicator max_depth = node_indicator.sum(axis=1).max() assert est.tree_.max_depth <= max_depth @pytest.mark.parametrize("name", ALL_TREES) def test_decision_path(name): check_decision_path(name) def check_no_sparse_y_support(name): X, y = X_multilabel, csr_matrix(y_multilabel) TreeEstimator = ALL_TREES[name] with pytest.raises(TypeError): TreeEstimator(random_state=0).fit(X, y) @pytest.mark.parametrize("name", ALL_TREES) def test_no_sparse_y_support(name): # Currently we don't support sparse y check_no_sparse_y_support(name) def test_mae(): """Check MAE criterion produces correct results on small toy dataset: ------------------ | X | y | weight | ------------------ | 3 | 3 | 0.1 | | 5 | 3 | 0.3 | | 8 | 4 | 1.0 | | 3 | 6 | 0.6 | | 5 | 7 | 0.3 | ------------------ |sum wt:| 2.3 | ------------------ Because we are dealing with sample weights, we cannot find the median by simply choosing/averaging the centre value(s), instead we consider the median where 50% of the cumulative weight is found (in a y sorted data set) . Therefore with regards to this test data, the cumulative weight is >= 50% when y = 4. Therefore: Median = 4 For all the samples, we can get the total error by summing: Absolute(Median - y) * weight I.e., total error = (Absolute(4 - 3) * 0.1) + (Absolute(4 - 3) * 0.3) + (Absolute(4 - 4) * 1.0) + (Absolute(4 - 6) * 0.6) + (Absolute(4 - 7) * 0.3) = 2.5 Impurity = Total error / total weight = 2.5 / 2.3 = 1.08695652173913 ------------------ From this root node, the next best split is between X values of 3 and 5. Thus, we have left and right child nodes: LEFT RIGHT ------------------ ------------------ | X | y | weight | | X | y | weight | ------------------ ------------------ | 3 | 3 | 0.1 | | 5 | 3 | 0.3 | | 3 | 6 | 0.6 | | 8 | 4 | 1.0 | ------------------ | 5 | 7 | 0.3 | |sum wt:| 0.7 | ------------------ ------------------ |sum wt:| 1.6 | ------------------ Impurity is found in the same way: Left node Median = 6 Total error = (Absolute(6 - 3) * 0.1) + (Absolute(6 - 6) * 0.6) = 0.3 Left Impurity = Total error / total weight = 0.3 / 0.7 = 0.428571428571429 ------------------- Likewise for Right node: Right node Median = 4 Total error = (Absolute(4 - 3) * 0.3) + (Absolute(4 - 4) * 1.0) + (Absolute(4 - 7) * 0.3) = 1.2 Right Impurity = Total error / total weight = 1.2 / 1.6 = 0.75 ------ """ dt_mae = DecisionTreeRegressor(random_state=0, criterion="mae", max_leaf_nodes=2) # Test MAE where sample weights are non-uniform (as illustrated above): dt_mae.fit(X=[[3], [5], [3], [8], [5]], y=[6, 7, 3, 4, 3], sample_weight=[0.6, 0.3, 0.1, 1.0, 0.3]) assert_allclose(dt_mae.tree_.impurity, [2.5 / 2.3, 0.3 / 0.7, 1.2 / 1.6]) assert_array_equal(dt_mae.tree_.value.flat, [4.0, 6.0, 4.0]) # Test MAE where all sample weights are uniform: dt_mae.fit(X=[[3], [5], [3], [8], [5]], y=[6, 7, 3, 4, 3], sample_weight=np.ones(5)) assert_array_equal(dt_mae.tree_.impurity, [1.4, 1.5, 4.0 / 3.0]) assert_array_equal(dt_mae.tree_.value.flat, [4, 4.5, 4.0]) # Test MAE where a `sample_weight` is not explicitly provided. # This is equivalent to providing uniform sample weights, though # the internal logic is different: dt_mae.fit(X=[[3], [5], [3], [8], [5]], y=[6, 7, 3, 4, 3]) assert_array_equal(dt_mae.tree_.impurity, [1.4, 1.5, 4.0 / 3.0]) assert_array_equal(dt_mae.tree_.value.flat, [4, 4.5, 4.0]) def test_criterion_copy(): # Let's check whether copy of our criterion has the same type # and properties as original n_outputs = 3 n_classes = np.arange(3, dtype=np.intp) n_samples = 100 def _pickle_copy(obj): return pickle.loads(pickle.dumps(obj)) for copy_func in [copy.copy, copy.deepcopy, _pickle_copy]: for _, typename in CRITERIA_CLF.items(): criteria = typename(n_outputs, n_classes) result = copy_func(criteria).__reduce__() typename_, (n_outputs_, n_classes_), _ = result assert typename == typename_ assert n_outputs == n_outputs_ assert_array_equal(n_classes, n_classes_) for _, typename in CRITERIA_REG.items(): criteria = typename(n_outputs, n_samples) result = copy_func(criteria).__reduce__() typename_, (n_outputs_, n_samples_), _ = result assert typename == typename_ assert n_outputs == n_outputs_ assert n_samples == n_samples_ def test_empty_leaf_infinite_threshold(): # try to make empty leaf by using near infinite value. data = np.random.RandomState(0).randn(100, 11) * 2e38 data = np.nan_to_num(data.astype('float32')) X_full = data[:, :-1] X_sparse = csc_matrix(X_full) y = data[:, -1] for X in [X_full, X_sparse]: tree = DecisionTreeRegressor(random_state=0).fit(X, y) terminal_regions = tree.apply(X) left_leaf = set(np.where(tree.tree_.children_left == TREE_LEAF)[0]) empty_leaf = left_leaf.difference(terminal_regions) infinite_threshold = np.where(~np.isfinite(tree.tree_.threshold))[0] assert len(infinite_threshold) == 0 assert len(empty_leaf) == 0 @pytest.mark.parametrize("criterion", CLF_CRITERIONS) @pytest.mark.parametrize( "dataset", sorted(set(DATASETS.keys()) - {"reg_small", "diabetes"})) @pytest.mark.parametrize( "tree_cls", [DecisionTreeClassifier, ExtraTreeClassifier]) def test_prune_tree_classifier_are_subtrees(criterion, dataset, tree_cls): dataset = DATASETS[dataset] X, y = dataset["X"], dataset["y"] est = tree_cls(max_leaf_nodes=20, random_state=0) info = est.cost_complexity_pruning_path(X, y) pruning_path = info.ccp_alphas impurities = info.impurities assert np.all(np.diff(pruning_path) >= 0) assert np.all(np.diff(impurities) >= 0) assert_pruning_creates_subtree(tree_cls, X, y, pruning_path) @pytest.mark.parametrize("criterion", REG_CRITERIONS) @pytest.mark.parametrize("dataset", DATASETS.keys()) @pytest.mark.parametrize( "tree_cls", [DecisionTreeRegressor, ExtraTreeRegressor]) def test_prune_tree_regression_are_subtrees(criterion, dataset, tree_cls): dataset = DATASETS[dataset] X, y = dataset["X"], dataset["y"] est = tree_cls(max_leaf_nodes=20, random_state=0) info = est.cost_complexity_pruning_path(X, y) pruning_path = info.ccp_alphas impurities = info.impurities assert np.all(np.diff(pruning_path) >= 0) assert np.all(np.diff(impurities) >= 0) assert_pruning_creates_subtree(tree_cls, X, y, pruning_path) def test_prune_single_node_tree(): # single node tree clf1 = DecisionTreeClassifier(random_state=0) clf1.fit([[0], [1]], [0, 0]) # pruned single node tree clf2 = DecisionTreeClassifier(random_state=0, ccp_alpha=10) clf2.fit([[0], [1]], [0, 0]) assert_is_subtree(clf1.tree_, clf2.tree_) def assert_pruning_creates_subtree(estimator_cls, X, y, pruning_path): # generate trees with increasing alphas estimators = [] for ccp_alpha in pruning_path: est = estimator_cls( max_leaf_nodes=20, ccp_alpha=ccp_alpha, random_state=0).fit(X, y) estimators.append(est) # A pruned tree must be a subtree of the previous tree (which had a # smaller ccp_alpha) for prev_est, next_est in zip(estimators, estimators[1:]): assert_is_subtree(prev_est.tree_, next_est.tree_) def assert_is_subtree(tree, subtree): assert tree.node_count >= subtree.node_count assert tree.max_depth >= subtree.max_depth tree_c_left = tree.children_left tree_c_right = tree.children_right subtree_c_left = subtree.children_left subtree_c_right = subtree.children_right stack = [(0, 0)] while stack: tree_node_idx, subtree_node_idx = stack.pop() assert_array_almost_equal(tree.value[tree_node_idx], subtree.value[subtree_node_idx]) assert_almost_equal(tree.impurity[tree_node_idx], subtree.impurity[subtree_node_idx]) assert_almost_equal(tree.n_node_samples[tree_node_idx], subtree.n_node_samples[subtree_node_idx]) assert_almost_equal(tree.weighted_n_node_samples[tree_node_idx], subtree.weighted_n_node_samples[subtree_node_idx]) if (subtree_c_left[subtree_node_idx] == subtree_c_right[subtree_node_idx]): # is a leaf assert_almost_equal(TREE_UNDEFINED, subtree.threshold[subtree_node_idx]) else: # not a leaf assert_almost_equal(tree.threshold[tree_node_idx], subtree.threshold[subtree_node_idx]) stack.append((tree_c_left[tree_node_idx], subtree_c_left[subtree_node_idx])) stack.append((tree_c_right[tree_node_idx], subtree_c_right[subtree_node_idx])) def test_prune_tree_raises_negative_ccp_alpha(): clf = DecisionTreeClassifier() msg = "ccp_alpha must be greater than or equal to 0" with pytest.raises(ValueError, match=msg): clf.set_params(ccp_alpha=-1.0) clf.fit(X, y) clf.set_params(ccp_alpha=0.0) clf.fit(X, y) with pytest.raises(ValueError, match=msg): clf.set_params(ccp_alpha=-1.0) clf._prune_tree() def check_apply_path_readonly(name): X_readonly = create_memmap_backed_data(X_small.astype(tree._tree.DTYPE, copy=False)) y_readonly = create_memmap_backed_data(np.array(y_small, dtype=tree._tree.DTYPE)) est = ALL_TREES[name]() est.fit(X_readonly, y_readonly) assert_array_equal(est.predict(X_readonly), est.predict(X_small)) assert_array_equal(est.decision_path(X_readonly).todense(), est.decision_path(X_small).todense()) @pytest.mark.parametrize("name", ALL_TREES) def test_apply_path_readonly_all_trees(name): check_apply_path_readonly(name) @pytest.mark.parametrize("criterion", ["mse", "friedman_mse", "poisson"]) @pytest.mark.parametrize("Tree", REG_TREES.values()) def test_balance_property(criterion, Tree): # Test that sum(y_pred)=sum(y_true) on training set. # This works if the mean is predicted (should even be true for each leaf). # MAE predicts the median and is therefore excluded from this test. # Choose a training set with non-negative targets (for poisson) X, y = diabetes.data, diabetes.target reg = Tree(criterion=criterion) reg.fit(X, y) assert np.sum(reg.predict(X)) == pytest.approx(np.sum(y)) @pytest.mark.parametrize("seed", range(3)) def test_poisson_zero_nodes(seed): # Test that sum(y)=0 and therefore y_pred=0 is forbidden on nodes. X = [[0, 0], [0, 1], [0, 2], [0, 3], [1, 0], [1, 2], [1, 2], [1, 3]] y = [0, 0, 0, 0, 1, 2, 3, 4] # Note that X[:, 0] == 0 is a 100% indicator for y == 0. The tree can # easily learn that: reg = DecisionTreeRegressor(criterion="mse", random_state=seed) reg.fit(X, y) assert np.amin(reg.predict(X)) == 0 # whereas Poisson must predict strictly positive numbers reg = DecisionTreeRegressor(criterion="poisson", random_state=seed) reg.fit(X, y) assert np.all(reg.predict(X) > 0) # Test additional dataset where something could go wrong. n_features = 10 X, y = datasets.make_regression( effective_rank=n_features * 2 // 3, tail_strength=0.6, n_samples=1_000, n_features=n_features, n_informative=n_features * 2 // 3, random_state=seed, ) # some excess zeros y[(-1 < y) & (y < 0)] = 0 # make sure the target is positive y = np.abs(y) reg = DecisionTreeRegressor(criterion='poisson', random_state=seed) reg.fit(X, y) assert np.all(reg.predict(X) > 0) def test_poisson_vs_mse(): # For a Poisson distributed target, Poisson loss should give better results # than least squares measured in Poisson deviance as metric. # We have a similar test, test_poisson(), in # sklearn/ensemble/_hist_gradient_boosting/tests/test_gradient_boosting.py # Note: Some fine tuning was needed to have metric_poi < metric_dummy on # the test set! rng = np.random.RandomState(42) n_train, n_test, n_features = 500, 500, 10 X = datasets.make_low_rank_matrix(n_samples=n_train + n_test, n_features=n_features, random_state=rng) # We create a log-linear Poisson model and downscale coef as it will get # exponentiated. coef = rng.uniform(low=-2, high=2, size=n_features) / np.max(X, axis=0) y = rng.poisson(lam=np.exp(X @ coef)) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=n_test, random_state=rng) # We prevent some overfitting by setting min_samples_split=10. tree_poi = DecisionTreeRegressor(criterion="poisson", min_samples_split=10, random_state=rng) tree_mse = DecisionTreeRegressor(criterion="mse", min_samples_split=10, random_state=rng) tree_poi.fit(X_train, y_train) tree_mse.fit(X_train, y_train) dummy = DummyRegressor(strategy="mean").fit(X_train, y_train) for X, y, val in [(X_train, y_train, "train"), (X_test, y_test, "test")]: metric_poi = mean_poisson_deviance(y, tree_poi.predict(X)) # mse might produce non-positive predictions => clip metric_mse = mean_poisson_deviance(y, np.clip(tree_mse.predict(X), 1e-15, None)) metric_dummy = mean_poisson_deviance(y, dummy.predict(X)) # As MSE might correctly predict 0 in train set, its train score can # be better than Poisson. This is no longer the case for the test set. if val == "test": assert metric_poi < metric_mse assert metric_poi < metric_dummy @pytest.mark.parametrize('criterion', REG_CRITERIONS) def test_decision_tree_regressor_sample_weight_consistentcy( criterion): """Test that the impact of sample_weight is consistent.""" tree_params = dict(criterion=criterion) tree = DecisionTreeRegressor(**tree_params, random_state=42) for kind in ['zeros', 'ones']: check_sample_weights_invariance("DecisionTreeRegressor_" + criterion, tree, kind='zeros') rng = np.random.RandomState(0) n_samples, n_features = 10, 5 X = rng.rand(n_samples, n_features) y = np.mean(X, axis=1) + rng.rand(n_samples) # make it positive in order to work also for poisson criterion y += np.min(y) + 0.1 # check that multiplying sample_weight by 2 is equivalent # to repeating corresponding samples twice X2 = np.concatenate([X, X[:n_samples//2]], axis=0) y2 = np.concatenate([y, y[:n_samples//2]]) sample_weight_1 = np.ones(len(y)) sample_weight_1[:n_samples//2] = 2 tree1 = DecisionTreeRegressor(**tree_params).fit( X, y, sample_weight=sample_weight_1 ) tree2 = DecisionTreeRegressor(**tree_params).fit( X2, y2, sample_weight=None ) assert tree1.tree_.node_count == tree2.tree_.node_count # Thresholds, tree.tree_.threshold, and values, tree.tree_.value, are not # exactly the same, but on the training set, those differences do not # matter and thus predictions are the same. assert_allclose(tree1.predict(X), tree2.predict(X)) # TODO: Remove in v1.1 @pytest.mark.parametrize("TreeEstimator", [DecisionTreeClassifier, DecisionTreeRegressor]) def test_X_idx_sorted_deprecated(TreeEstimator): X_idx_sorted = np.argsort(X, axis=0) tree = TreeEstimator() with pytest.warns(FutureWarning, match="The parameter 'X_idx_sorted' is deprecated"): tree.fit(X, y, X_idx_sorted=X_idx_sorted)
bsd-3-clause
LorhanSohaky/UFSCar
2018/GRAFOS/mst.py
1
1183
import numpy as np import networkx as nx import matplotlib.pyplot as plt parent = dict() rank = dict() def make_set(v): parent[v] = v rank[v] = 0 def find(v): if parent[v] != v: parent[v] = find(parent[v]) return parent[v] def union(v1, v2): root1 = find(v1) root2 = find(v2) if root1 != root2: if rank[root1] > rank[root2]: parent[root2] = root1 else: parent[root1] = root2 if rank[root1] == rank[root2]: rank[root2] += 1 def kruskal(G): A = nx.Graph() for vertice in G.nodes: make_set(vertice) arestas = [(x[2], x[0], x[1]) for x in G.edges.data('weight')] arestas.sort() for aresta in arestas: weight, vertice1, vertice2 = aresta if find(vertice1) != find(vertice2): union(vertice1, vertice2) A.add_edge(vertice1, vertice2, weight=weight) return A def main(): Arestas = np.loadtxt('ha30_dist.txt') G = nx.from_numpy_matrix(Arestas) A = kruskal(G) print(A.edges) nx.draw_spring(A, with_labels=True) plt.savefig("grafo_mst.png") if __name__ == '__main__': main()
mit
ammarkhann/FinalSeniorCode
lib/python2.7/site-packages/matplotlib/tests/test_colors.py
2
24869
from __future__ import (absolute_import, division, print_function, unicode_literals) import six import itertools from distutils.version import LooseVersion as V from nose.tools import assert_raises, assert_equal, assert_true try: # this is not available in nose + py2.6 from nose.tools import assert_sequence_equal except ImportError: assert_sequence_equal = None import numpy as np from numpy.testing.utils import assert_array_equal, assert_array_almost_equal from nose.plugins.skip import SkipTest from matplotlib import cycler import matplotlib import matplotlib.colors as mcolors import matplotlib.cm as cm import matplotlib.colorbar as mcolorbar import matplotlib.cbook as cbook import matplotlib.pyplot as plt from matplotlib.testing.decorators import (image_comparison, cleanup, knownfailureif) def test_resample(): """ Github issue #6025 pointed to incorrect ListedColormap._resample; here we test the method for LinearSegmentedColormap as well. """ n = 101 colorlist = np.empty((n, 4), float) colorlist[:, 0] = np.linspace(0, 1, n) colorlist[:, 1] = 0.2 colorlist[:, 2] = np.linspace(1, 0, n) colorlist[:, 3] = 0.7 lsc = mcolors.LinearSegmentedColormap.from_list('lsc', colorlist) lc = mcolors.ListedColormap(colorlist) lsc3 = lsc._resample(3) lc3 = lc._resample(3) expected = np.array([[0.0, 0.2, 1.0, 0.7], [0.5, 0.2, 0.5, 0.7], [1.0, 0.2, 0.0, 0.7]], float) assert_array_almost_equal(lsc3([0, 0.5, 1]), expected) assert_array_almost_equal(lc3([0, 0.5, 1]), expected) def test_colormap_endian(): """ Github issue #1005: a bug in putmask caused erroneous mapping of 1.0 when input from a non-native-byteorder array. """ cmap = cm.get_cmap("jet") # Test under, over, and invalid along with values 0 and 1. a = [-0.5, 0, 0.5, 1, 1.5, np.nan] for dt in ["f2", "f4", "f8"]: anative = np.ma.masked_invalid(np.array(a, dtype=dt)) aforeign = anative.byteswap().newbyteorder() #print(anative.dtype.isnative, aforeign.dtype.isnative) assert_array_equal(cmap(anative), cmap(aforeign)) def test_BoundaryNorm(): """ Github issue #1258: interpolation was failing with numpy 1.7 pre-release. """ boundaries = [0, 1.1, 2.2] vals = [-1, 0, 1, 2, 2.2, 4] # Without interpolation expected = [-1, 0, 0, 1, 2, 2] ncolors = len(boundaries) - 1 bn = mcolors.BoundaryNorm(boundaries, ncolors) assert_array_equal(bn(vals), expected) # ncolors != len(boundaries) - 1 triggers interpolation expected = [-1, 0, 0, 2, 3, 3] ncolors = len(boundaries) bn = mcolors.BoundaryNorm(boundaries, ncolors) assert_array_equal(bn(vals), expected) # more boundaries for a third color boundaries = [0, 1, 2, 3] vals = [-1, 0.1, 1.1, 2.2, 4] ncolors = 5 expected = [-1, 0, 2, 4, 5] bn = mcolors.BoundaryNorm(boundaries, ncolors) assert_array_equal(bn(vals), expected) # a scalar as input should not trigger an error and should return a scalar boundaries = [0, 1, 2] vals = [-1, 0.1, 1.1, 2.2] bn = mcolors.BoundaryNorm(boundaries, 2) expected = [-1, 0, 1, 2] for v, ex in zip(vals, expected): ret = bn(v) assert_true(isinstance(ret, six.integer_types)) assert_array_equal(ret, ex) assert_array_equal(bn([v]), ex) # same with interp bn = mcolors.BoundaryNorm(boundaries, 3) expected = [-1, 0, 2, 3] for v, ex in zip(vals, expected): ret = bn(v) assert_true(isinstance(ret, six.integer_types)) assert_array_equal(ret, ex) assert_array_equal(bn([v]), ex) # Clipping bn = mcolors.BoundaryNorm(boundaries, 3, clip=True) expected = [0, 0, 2, 2] for v, ex in zip(vals, expected): ret = bn(v) assert_true(isinstance(ret, six.integer_types)) assert_array_equal(ret, ex) assert_array_equal(bn([v]), ex) # Masked arrays boundaries = [0, 1.1, 2.2] vals = np.ma.masked_invalid([-1., np.NaN, 0, 1.4, 9]) # Without interpolation ncolors = len(boundaries) - 1 bn = mcolors.BoundaryNorm(boundaries, ncolors) expected = np.ma.masked_array([-1, -99, 0, 1, 2], mask=[0, 1, 0, 0, 0]) assert_array_equal(bn(vals), expected) # With interpolation bn = mcolors.BoundaryNorm(boundaries, len(boundaries)) expected = np.ma.masked_array([-1, -99, 0, 2, 3], mask=[0, 1, 0, 0, 0]) assert_array_equal(bn(vals), expected) # Non-trivial masked arrays vals = np.ma.masked_invalid([np.Inf, np.NaN]) assert_true(np.all(bn(vals).mask)) vals = np.ma.masked_invalid([np.Inf]) assert_true(np.all(bn(vals).mask)) def test_LogNorm(): """ LogNorm ignored clip, now it has the same behavior as Normalize, e.g., values > vmax are bigger than 1 without clip, with clip they are 1. """ ln = mcolors.LogNorm(clip=True, vmax=5) assert_array_equal(ln([1, 6]), [0, 1.0]) def test_PowerNorm(): a = np.array([0, 0.5, 1, 1.5], dtype=np.float) pnorm = mcolors.PowerNorm(1) norm = mcolors.Normalize() assert_array_almost_equal(norm(a), pnorm(a)) a = np.array([-0.5, 0, 2, 4, 8], dtype=np.float) expected = [0, 0, 1/16, 1/4, 1] pnorm = mcolors.PowerNorm(2, vmin=0, vmax=8) assert_array_almost_equal(pnorm(a), expected) assert_equal(pnorm(a[0]), expected[0]) assert_equal(pnorm(a[2]), expected[2]) assert_array_almost_equal(a[1:], pnorm.inverse(pnorm(a))[1:]) # Clip = True a = np.array([-0.5, 0, 1, 8, 16], dtype=np.float) expected = [0, 0, 0, 1, 1] pnorm = mcolors.PowerNorm(2, vmin=2, vmax=8, clip=True) assert_array_almost_equal(pnorm(a), expected) assert_equal(pnorm(a[0]), expected[0]) assert_equal(pnorm(a[-1]), expected[-1]) # Clip = True at call time a = np.array([-0.5, 0, 1, 8, 16], dtype=np.float) expected = [0, 0, 0, 1, 1] pnorm = mcolors.PowerNorm(2, vmin=2, vmax=8, clip=False) assert_array_almost_equal(pnorm(a, clip=True), expected) assert_equal(pnorm(a[0], clip=True), expected[0]) assert_equal(pnorm(a[-1], clip=True), expected[-1]) def test_Normalize(): norm = mcolors.Normalize() vals = np.arange(-10, 10, 1, dtype=np.float) _inverse_tester(norm, vals) _scalar_tester(norm, vals) _mask_tester(norm, vals) # Handle integer input correctly (don't overflow when computing max-min, # i.e. 127-(-128) here). vals = np.array([-128, 127], dtype=np.int8) norm = mcolors.Normalize(vals.min(), vals.max()) assert_array_equal(np.asarray(norm(vals)), [0, 1]) # Don't lose precision on longdoubles (float128 on Linux): # for array inputs... vals = np.array([1.2345678901, 9.8765432109], dtype=np.longdouble) norm = mcolors.Normalize(vals.min(), vals.max()) assert_array_equal(np.asarray(norm(vals)), [0, 1]) # and for scalar ones. eps = np.finfo(np.longdouble).resolution norm = plt.Normalize(1, 1 + 100 * eps) # This returns exactly 0.5 when longdouble is extended precision (80-bit), # but only a value close to it when it is quadruple precision (128-bit). assert 0 < norm(1 + 50 * eps) < 1 def test_SymLogNorm(): """ Test SymLogNorm behavior """ norm = mcolors.SymLogNorm(3, vmax=5, linscale=1.2) vals = np.array([-30, -1, 2, 6], dtype=np.float) normed_vals = norm(vals) expected = [0., 0.53980074, 0.826991, 1.02758204] assert_array_almost_equal(normed_vals, expected) _inverse_tester(norm, vals) _scalar_tester(norm, vals) _mask_tester(norm, vals) # Ensure that specifying vmin returns the same result as above norm = mcolors.SymLogNorm(3, vmin=-30, vmax=5, linscale=1.2) normed_vals = norm(vals) assert_array_almost_equal(normed_vals, expected) @cleanup def test_SymLogNorm_colorbar(): """ Test un-called SymLogNorm in a colorbar. """ norm = mcolors.SymLogNorm(0.1, vmin=-1, vmax=1, linscale=1) fig = plt.figure() cbar = mcolorbar.ColorbarBase(fig.add_subplot(111), norm=norm) plt.close(fig) def _inverse_tester(norm_instance, vals): """ Checks if the inverse of the given normalization is working. """ assert_array_almost_equal(norm_instance.inverse(norm_instance(vals)), vals) def _scalar_tester(norm_instance, vals): """ Checks if scalars and arrays are handled the same way. Tests only for float. """ scalar_result = [norm_instance(float(v)) for v in vals] assert_array_almost_equal(scalar_result, norm_instance(vals)) def _mask_tester(norm_instance, vals): """ Checks mask handling """ masked_array = np.ma.array(vals) masked_array[0] = np.ma.masked assert_array_equal(masked_array.mask, norm_instance(masked_array).mask) @image_comparison(baseline_images=['levels_and_colors'], extensions=['png']) def test_cmap_and_norm_from_levels_and_colors(): data = np.linspace(-2, 4, 49).reshape(7, 7) levels = [-1, 2, 2.5, 3] colors = ['red', 'green', 'blue', 'yellow', 'black'] extend = 'both' cmap, norm = mcolors.from_levels_and_colors(levels, colors, extend=extend) ax = plt.axes() m = plt.pcolormesh(data, cmap=cmap, norm=norm) plt.colorbar(m) # Hide the axes labels (but not the colorbar ones, as they are useful) for lab in ax.get_xticklabels() + ax.get_yticklabels(): lab.set_visible(False) def test_cmap_and_norm_from_levels_and_colors2(): levels = [-1, 2, 2.5, 3] colors = ['red', (0, 1, 0), 'blue', (0.5, 0.5, 0.5), (0.0, 0.0, 0.0, 1.0)] clr = mcolors.to_rgba_array(colors) bad = (0.1, 0.1, 0.1, 0.1) no_color = (0.0, 0.0, 0.0, 0.0) masked_value = 'masked_value' # Define the test values which are of interest. # Note: levels are lev[i] <= v < lev[i+1] tests = [('both', None, {-2: clr[0], -1: clr[1], 2: clr[2], 2.25: clr[2], 3: clr[4], 3.5: clr[4], masked_value: bad}), ('min', -1, {-2: clr[0], -1: clr[1], 2: clr[2], 2.25: clr[2], 3: no_color, 3.5: no_color, masked_value: bad}), ('max', -1, {-2: no_color, -1: clr[0], 2: clr[1], 2.25: clr[1], 3: clr[3], 3.5: clr[3], masked_value: bad}), ('neither', -2, {-2: no_color, -1: clr[0], 2: clr[1], 2.25: clr[1], 3: no_color, 3.5: no_color, masked_value: bad}), ] for extend, i1, cases in tests: cmap, norm = mcolors.from_levels_and_colors(levels, colors[0:i1], extend=extend) cmap.set_bad(bad) for d_val, expected_color in cases.items(): if d_val == masked_value: d_val = np.ma.array([1], mask=True) else: d_val = [d_val] assert_array_equal(expected_color, cmap(norm(d_val))[0], 'Wih extend={0!r} and data ' 'value={1!r}'.format(extend, d_val)) assert_raises(ValueError, mcolors.from_levels_and_colors, levels, colors) def test_rgb_hsv_round_trip(): for a_shape in [(500, 500, 3), (500, 3), (1, 3), (3,)]: np.random.seed(0) tt = np.random.random(a_shape) assert_array_almost_equal(tt, mcolors.hsv_to_rgb(mcolors.rgb_to_hsv(tt))) assert_array_almost_equal(tt, mcolors.rgb_to_hsv(mcolors.hsv_to_rgb(tt))) @cleanup def test_autoscale_masked(): # Test for #2336. Previously fully masked data would trigger a ValueError. data = np.ma.masked_all((12, 20)) plt.pcolor(data) plt.draw() def test_colors_no_float(): # Gray must be a string to distinguish 3-4 grays from RGB or RGBA. def gray_from_float_rgba(): return mcolors.to_rgba(0.4) assert_raises(ValueError, gray_from_float_rgba) @image_comparison(baseline_images=['light_source_shading_topo'], extensions=['png']) def test_light_source_topo_surface(): """Shades a DEM using different v.e.'s and blend modes.""" fname = cbook.get_sample_data('jacksboro_fault_dem.npz', asfileobj=False) dem = np.load(fname) elev = dem['elevation'] # Get the true cellsize in meters for accurate vertical exaggeration # Convert from decimal degrees to meters dx, dy = dem['dx'], dem['dy'] dx = 111320.0 * dx * np.cos(dem['ymin']) dy = 111320.0 * dy dem.close() ls = mcolors.LightSource(315, 45) cmap = cm.gist_earth fig, axes = plt.subplots(nrows=3, ncols=3) for row, mode in zip(axes, ['hsv', 'overlay', 'soft']): for ax, ve in zip(row, [0.1, 1, 10]): rgb = ls.shade(elev, cmap, vert_exag=ve, dx=dx, dy=dy, blend_mode=mode) ax.imshow(rgb) ax.set(xticks=[], yticks=[]) def test_light_source_shading_default(): """Array comparison test for the default "hsv" blend mode. Ensure the default result doesn't change without warning.""" y, x = np.mgrid[-1.2:1.2:8j, -1.2:1.2:8j] z = 10 * np.cos(x**2 + y**2) cmap = plt.cm.copper ls = mcolors.LightSource(315, 45) rgb = ls.shade(z, cmap) # Result stored transposed and rounded for for more compact display... expect = np.array( [[[0.00, 0.45, 0.90, 0.90, 0.82, 0.62, 0.28, 0.00], [0.45, 0.94, 0.99, 1.00, 1.00, 0.96, 0.65, 0.17], [0.90, 0.99, 1.00, 1.00, 1.00, 1.00, 0.94, 0.35], [0.90, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 0.49], [0.82, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 0.41], [0.62, 0.96, 1.00, 1.00, 1.00, 1.00, 0.90, 0.07], [0.28, 0.65, 0.94, 1.00, 1.00, 0.90, 0.35, 0.01], [0.00, 0.17, 0.35, 0.49, 0.41, 0.07, 0.01, 0.00]], [[0.00, 0.28, 0.59, 0.72, 0.62, 0.40, 0.18, 0.00], [0.28, 0.78, 0.93, 0.92, 0.83, 0.66, 0.39, 0.11], [0.59, 0.93, 0.99, 1.00, 0.92, 0.75, 0.50, 0.21], [0.72, 0.92, 1.00, 0.99, 0.93, 0.76, 0.51, 0.18], [0.62, 0.83, 0.92, 0.93, 0.87, 0.68, 0.42, 0.08], [0.40, 0.66, 0.75, 0.76, 0.68, 0.52, 0.23, 0.02], [0.18, 0.39, 0.50, 0.51, 0.42, 0.23, 0.00, 0.00], [0.00, 0.11, 0.21, 0.18, 0.08, 0.02, 0.00, 0.00]], [[0.00, 0.18, 0.38, 0.46, 0.39, 0.26, 0.11, 0.00], [0.18, 0.50, 0.70, 0.75, 0.64, 0.44, 0.25, 0.07], [0.38, 0.70, 0.91, 0.98, 0.81, 0.51, 0.29, 0.13], [0.46, 0.75, 0.98, 0.96, 0.84, 0.48, 0.22, 0.12], [0.39, 0.64, 0.81, 0.84, 0.71, 0.31, 0.11, 0.05], [0.26, 0.44, 0.51, 0.48, 0.31, 0.10, 0.03, 0.01], [0.11, 0.25, 0.29, 0.22, 0.11, 0.03, 0.00, 0.00], [0.00, 0.07, 0.13, 0.12, 0.05, 0.01, 0.00, 0.00]], [[1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00], [1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00], [1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00], [1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00], [1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00], [1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00], [1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00], [1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00]] ]).T if (V(np.__version__) == V('1.9.0')): # Numpy 1.9.0 uses a 2. order algorithm on the edges by default # This was changed back again in 1.9.1 expect = expect[1:-1, 1:-1, :] rgb = rgb[1:-1, 1:-1, :] assert_array_almost_equal(rgb, expect, decimal=2) @knownfailureif((V(np.__version__) <= V('1.9.0') and V(np.__version__) >= V('1.7.0'))) # Numpy 1.9.1 fixed a bug in masked arrays which resulted in # additional elements being masked when calculating the gradient thus # the output is different with earlier numpy versions. def test_light_source_masked_shading(): """Array comparison test for a surface with a masked portion. Ensures that we don't wind up with "fringes" of odd colors around masked regions.""" y, x = np.mgrid[-1.2:1.2:8j, -1.2:1.2:8j] z = 10 * np.cos(x**2 + y**2) z = np.ma.masked_greater(z, 9.9) cmap = plt.cm.copper ls = mcolors.LightSource(315, 45) rgb = ls.shade(z, cmap) # Result stored transposed and rounded for for more compact display... expect = np.array( [[[0.00, 0.46, 0.91, 0.91, 0.84, 0.64, 0.29, 0.00], [0.46, 0.96, 1.00, 1.00, 1.00, 0.97, 0.67, 0.18], [0.91, 1.00, 1.00, 1.00, 1.00, 1.00, 0.96, 0.36], [0.91, 1.00, 1.00, 0.00, 0.00, 1.00, 1.00, 0.51], [0.84, 1.00, 1.00, 0.00, 0.00, 1.00, 1.00, 0.44], [0.64, 0.97, 1.00, 1.00, 1.00, 1.00, 0.94, 0.09], [0.29, 0.67, 0.96, 1.00, 1.00, 0.94, 0.38, 0.01], [0.00, 0.18, 0.36, 0.51, 0.44, 0.09, 0.01, 0.00]], [[0.00, 0.29, 0.61, 0.75, 0.64, 0.41, 0.18, 0.00], [0.29, 0.81, 0.95, 0.93, 0.85, 0.68, 0.40, 0.11], [0.61, 0.95, 1.00, 0.78, 0.78, 0.77, 0.52, 0.22], [0.75, 0.93, 0.78, 0.00, 0.00, 0.78, 0.54, 0.19], [0.64, 0.85, 0.78, 0.00, 0.00, 0.78, 0.45, 0.08], [0.41, 0.68, 0.77, 0.78, 0.78, 0.55, 0.25, 0.02], [0.18, 0.40, 0.52, 0.54, 0.45, 0.25, 0.00, 0.00], [0.00, 0.11, 0.22, 0.19, 0.08, 0.02, 0.00, 0.00]], [[0.00, 0.19, 0.39, 0.48, 0.41, 0.26, 0.12, 0.00], [0.19, 0.52, 0.73, 0.78, 0.66, 0.46, 0.26, 0.07], [0.39, 0.73, 0.95, 0.50, 0.50, 0.53, 0.30, 0.14], [0.48, 0.78, 0.50, 0.00, 0.00, 0.50, 0.23, 0.12], [0.41, 0.66, 0.50, 0.00, 0.00, 0.50, 0.11, 0.05], [0.26, 0.46, 0.53, 0.50, 0.50, 0.11, 0.03, 0.01], [0.12, 0.26, 0.30, 0.23, 0.11, 0.03, 0.00, 0.00], [0.00, 0.07, 0.14, 0.12, 0.05, 0.01, 0.00, 0.00]], [[1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00], [1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00], [1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00], [1.00, 1.00, 1.00, 0.00, 0.00, 1.00, 1.00, 1.00], [1.00, 1.00, 1.00, 0.00, 0.00, 1.00, 1.00, 1.00], [1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00], [1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00], [1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00]], ]).T assert_array_almost_equal(rgb, expect, decimal=2) def test_light_source_hillshading(): """Compare the current hillshading method against one that should be mathematically equivalent. Illuminates a cone from a range of angles.""" def alternative_hillshade(azimuth, elev, z): illum = _sph2cart(*_azimuth2math(azimuth, elev)) illum = np.array(illum) dy, dx = np.gradient(-z) dy = -dy dz = np.ones_like(dy) normals = np.dstack([dx, dy, dz]) dividers = np.zeros_like(z)[..., None] for i, mat in enumerate(normals): for j, vec in enumerate(mat): dividers[i, j, 0] = np.linalg.norm(vec) normals /= dividers # once we drop support for numpy 1.7.x the above can be written as # normals /= np.linalg.norm(normals, axis=2)[..., None] # aviding the double loop. intensity = np.tensordot(normals, illum, axes=(2, 0)) intensity -= intensity.min() intensity /= intensity.ptp() return intensity y, x = np.mgrid[5:0:-1, :5] z = -np.hypot(x - x.mean(), y - y.mean()) for az, elev in itertools.product(range(0, 390, 30), range(0, 105, 15)): ls = mcolors.LightSource(az, elev) h1 = ls.hillshade(z) h2 = alternative_hillshade(az, elev, z) assert_array_almost_equal(h1, h2) def test_light_source_planar_hillshading(): """Ensure that the illumination intensity is correct for planar surfaces.""" def plane(azimuth, elevation, x, y): """Create a plane whose normal vector is at the given azimuth and elevation.""" theta, phi = _azimuth2math(azimuth, elevation) a, b, c = _sph2cart(theta, phi) z = -(a*x + b*y) / c return z def angled_plane(azimuth, elevation, angle, x, y): """Create a plane whose normal vector is at an angle from the given azimuth and elevation.""" elevation = elevation + angle if elevation > 90: azimuth = (azimuth + 180) % 360 elevation = (90 - elevation) % 90 return plane(azimuth, elevation, x, y) y, x = np.mgrid[5:0:-1, :5] for az, elev in itertools.product(range(0, 390, 30), range(0, 105, 15)): ls = mcolors.LightSource(az, elev) # Make a plane at a range of angles to the illumination for angle in range(0, 105, 15): z = angled_plane(az, elev, angle, x, y) h = ls.hillshade(z) assert_array_almost_equal(h, np.cos(np.radians(angle))) def test_color_names(): assert mcolors.to_hex("blue") == "#0000ff" assert mcolors.to_hex("xkcd:blue") == "#0343df" assert mcolors.to_hex("tab:blue") == "#1f77b4" def _sph2cart(theta, phi): x = np.cos(theta) * np.sin(phi) y = np.sin(theta) * np.sin(phi) z = np.cos(phi) return x, y, z def _azimuth2math(azimuth, elevation): """Converts from clockwise-from-north and up-from-horizontal to mathematical conventions.""" theta = np.radians((90 - azimuth) % 360) phi = np.radians(90 - elevation) return theta, phi def test_pandas_iterable(): try: import pandas as pd except ImportError: raise SkipTest("Pandas not installed") if assert_sequence_equal is None: raise SkipTest("nose lacks required function") # Using a list or series yields equivalent # color maps, i.e the series isn't seen as # a single color lst = ['red', 'blue', 'green'] s = pd.Series(lst) cm1 = mcolors.ListedColormap(lst, N=5) cm2 = mcolors.ListedColormap(s, N=5) assert_sequence_equal(cm1.colors, cm2.colors) @cleanup def test_cn(): matplotlib.rcParams['axes.prop_cycle'] = cycler('color', ['blue', 'r']) assert mcolors.to_hex("C0") == '#0000ff' assert mcolors.to_hex("C1") == '#ff0000' matplotlib.rcParams['axes.prop_cycle'] = cycler('color', ['xkcd:blue', 'r']) assert mcolors.to_hex("C0") == '#0343df' assert mcolors.to_hex("C1") == '#ff0000' matplotlib.rcParams['axes.prop_cycle'] = cycler('color', ['8e4585', 'r']) assert mcolors.to_hex("C0") == '#8e4585' # if '8e4585' gets parsed as a float before it gets detected as a hex # colour it will be interpreted as a very large number. # this mustn't happen. assert mcolors.to_rgb("C0")[0] != np.inf def test_conversions(): # to_rgba_array("none") returns a (0, 4) array. assert_array_equal(mcolors.to_rgba_array("none"), np.zeros((0, 4))) # a list of grayscale levels, not a single color. assert_array_equal( mcolors.to_rgba_array([".2", ".5", ".8"]), np.vstack([mcolors.to_rgba(c) for c in [".2", ".5", ".8"]])) # alpha is properly set. assert_equal(mcolors.to_rgba((1, 1, 1), .5), (1, 1, 1, .5)) assert_equal(mcolors.to_rgba(".1", .5), (.1, .1, .1, .5)) # builtin round differs between py2 and py3. assert_equal(mcolors.to_hex((.7, .7, .7)), "#b2b2b2") # hex roundtrip. hex_color = "#1234abcd" assert_equal(mcolors.to_hex(mcolors.to_rgba(hex_color), keep_alpha=True), hex_color) def test_grey_gray(): color_mapping = mcolors._colors_full_map for k in color_mapping.keys(): if 'grey' in k: assert color_mapping[k] == color_mapping[k.replace('grey', 'gray')] if 'gray' in k: assert color_mapping[k] == color_mapping[k.replace('gray', 'grey')] def test_tableau_order(): dflt_cycle = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf'] assert list(mcolors.TABLEAU_COLORS.values()) == dflt_cycle if __name__ == '__main__': import nose nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
mit
willowj/python_dataEE
plot/kline_candle.py
2
3300
# -*- coding: utf-8 -*- # 2017/09/16 # author : willow_j # email : [email protected] from __future__ import unicode_literals from __future__ import print_function from pyecharts import Kline, Bar, Line, Grid, Overlap def kline_js(name, df, prices_cols=None, ma=('ma10',), width=1600, height=750, kline_xaxis_pos='top', render_path=None): ''' @params: - name: str #图例名称 - df: pandas.DataFrame #columns包含 prices_cols、‘volume’ - prices_cols : list #默认 [u'open', u'close', u'low', u'high'] - ma=('ma10',): list or tuple #移动平均周期 - width=1600, height=750 # 默认图片大小 - kline_xaxis_pos='top' #k-line图例默认在上方 - render_path: str #html file path to save ''' if not prices_cols: prices_cols = [u'open', u'close', u'low', u'high'] if not set(prices_cols+['volume']).issubset(set(df.columns)): raise AttributeError("%s or 'volume' not in columns" % str(prices_cols)) kline = Kline(name, width=width, height=height) kline.add('k-candle', df.index.format(), df[prices_cols].values.tolist(), is_datazoom_show=True, datazoom_xaxis_index=[0, 1], xaxis_pos=kline_xaxis_pos, is_xaxislabel_align=True, ) # volume if not 'price_change' in df.columns: df['price_change'] = df[prices_cols[1]].diff() ups = df.where(df.price_change > 0, 0)['volume'] downs = df.where(~(df.price_change > 0), 0)['volume'] bar = Bar() bar.add('up', x_axis=ups.index.format(), y_axis=ups.values.tolist(), is_datazoom_show=True, legend_top="70%", is_stack=True, is_xaxislabel_align=True, ) bar.add('down', x_axis=downs.index.format(), y_axis=downs.values.tolist(), is_datazoom_show=True, is_stack=True, legend_top="70%", legend_orient='vertical', legend_pos='left', yaxis_pos='right', is_xaxislabel_align=True, # mark_line=["average"], ) # merge grid1 = Grid() grid1.add(kline, grid_bottom="18%") grid1.add(bar, grid_top="75%") # add ma Line_draw = False for ma_ in ma: if ma_ in df.columns: if Line_draw is False: Line_draw = True line = Line() line.add(ma_, df.index.format(), df[ma_].values.tolist()) if Line_draw: overlap = Overlap() overlap.add(kline) # overlap kline and ma overlap.add(line) if render_path: grid1.render(render_path) return grid1 # .render('k-line.html') if __name__ == '__main__': import tushare as ts name = 'hs300' period = '5' df = ts.get_hist_data(name, ktype=period) df.sort_index(inplace=True) if period.isdigit(): period += 'min' kline_js('%s_kline_%s' % (name, period), df, ma=['ma10', 'ma20'], width=900, height=600, render_path='%s_kline_%s.html' % (name, period) )
mit
AlexanderFabisch/OpenANN
benchmarks/sarcos/benchmark.py
5
5237
## \page SARCOSBenchmark SARCOS Inverse Dynamics Problem # # The SARCOS dataset is taken from # <a href="http://www.gaussianprocess.org/gpml/data/" target=_blank>this</a> # website. This is an inverse dynamics problem, i.e. we have to predict the # 7 joint torques given the joint positions, velocities and accelerations. # Hence, we have to solve a regression problem with 21 inputs and 7 outputs # and a very nonlinear function. # # The optimization problem is very hard. Underfitting is a much bigger # problem than overfitting. For this reason, we need a very big network # that has four hidden layers with 200 nodes each. The deep architecture # makes the optimization problem very hard but it is more efficient than a # shallow network. However, we can do two things to increase the optimization # speed drastically: we use a non-saturating activation function (rectified # linear units) and mini-batch stochastic gradient descent. # # You can start the benchmark with the script: # \verbatim # python benchmark.py [download] [run] # \endverbatim # Note that you need SciPy to load the dataset and matplotlib to display some # results. # # The output will look like # \verbatim # Dimension 1: nMSE = 0.938668% (training) / 0.903342% (validation) # Dimension 2: nMSE = 0.679012% (training) / 0.647091% (validation) # Dimension 3: nMSE = 0.453497% (training) / 0.442720% (validation) # Dimension 4: nMSE = 0.242476% (training) / 0.240360% (validation) # Dimension 5: nMSE = 1.010049% (training) / 1.044068% (validation) # Dimension 6: nMSE = 0.851110% (training) / 0.796895% (validation) # Dimension 7: nMSE = 0.474232% (training) / 0.465929% (validation) # \endverbatim # You see the normalized mean squared error (nMSE) for each output dimension # on the training set and the test set. The nMSE is the mean squared error # divided by the variance of the corresponding output dimension. In addition, # a plot that compares the actual and the predicted output of one dimension # will occur. import os import sys import urllib try: import scipy.io except: print("SciPy is required for this benchmark.") exit(1) try: from openann import * except: print("OpenANN Python bindings are not installed!") exit(1) FILES = ["sarcos_inv.mat", "sarcos_inv_test.mat"] URLS = ["http://www.gaussianprocess.org/gpml/data/%s" % f for f in FILES] def print_usage(): print("Usage:") print(" python benchmark [download] [run]") def download_sarcos(): if all(os.path.exists(f) for f in FILES): print("Download is not required.") return for i in range(len(URLS)): print("Downloading %s" % URLS[i]) downloader = urllib.urlopen(URLS[i]) with open(FILES[i], "wb") as out: while True: data = downloader.read(1024) if len(data) == 0: break out.write(data) def run_sarcos(): print("Loading dataset...") a = scipy.io.loadmat("sarcos_inv.mat") X = a["sarcos_inv"][:, :21] Y = a["sarcos_inv"][:, 21:] b = scipy.io.loadmat("sarcos_inv_test.mat") Xtest = b["sarcos_inv_test"][:, :21] Ytest = b["sarcos_inv_test"][:, 21:] print("Starting benchmark, this will take some minutes...") # Normalize data n = Normalization() X = n.fit(X).transform(X) Xtest = n.transform(Xtest) Y = n.fit(Y).transform(Y) Ytest = n.transform(Ytest) training_set = DataSet(X, Y) validation_set = DataSet(Xtest, Ytest) D = X.shape[1] F = Y.shape[1] net = Net() net.input_layer(D) net.fully_connected_layer(400, Activation.RECTIFIER) net.fully_connected_layer(200, Activation.RECTIFIER) net.fully_connected_layer(200, Activation.RECTIFIER) net.output_layer(F, Activation.LINEAR) stop_dict = {"maximal_iterations" : 100} opt = MBSGD(stop_dict, learning_rate=0.2, learning_rate_decay=0.9999, min_learning_rate=0.001, momentum=0.5, batch_size=128, nesterov=False) opt.optimize(net, training_set) pred = net.predict(X) pred_test = net.predict(Xtest) var = Y.var(axis=0) # in case we do not normalize the outputs for f in range(F): nMSE_train = sum((Y[:, f] - pred[:, f])**2) / len(Y) / var[f] nMSE_test = sum((Ytest[:, f] - pred_test[:, f])**2) / len(Ytest) / var[f] print("Dimension %d: nMSE = %f%% (training) / %f%% (validation)" % (f+1, nMSE_train*100, nMSE_test*100)) try: import pylab except: print("Cannot plot the result. Matplotlib is not available.") exit(1) dim = 0 n_samples = 200 pylab.plot(Ytest[:n_samples, dim], label="Actual") pylab.plot(net.predict(Xtest[:n_samples])[:, dim], label="Predicted") pylab.legend(loc="best") pylab.title("Output of %d samples from dimension %d (validation set)" % (n_samples, dim+1)) pylab.show() if __name__ == "__main__": if len(sys.argv) == 1: print_usage() for command in sys.argv[1:]: if command == "download": download_sarcos() elif command == "run": run_sarcos() else: print_usage() exit(1)
gpl-3.0
mlyundin/scikit-learn
sklearn/datasets/tests/test_mldata.py
384
5221
"""Test functionality of mldata fetching utilities.""" import os import shutil import tempfile import scipy as sp from sklearn import datasets from sklearn.datasets import mldata_filename, fetch_mldata from sklearn.utils.testing import assert_in from sklearn.utils.testing import assert_not_in from sklearn.utils.testing import mock_mldata_urlopen from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import with_setup from sklearn.utils.testing import assert_array_equal tmpdir = None def setup_tmpdata(): # create temporary dir global tmpdir tmpdir = tempfile.mkdtemp() os.makedirs(os.path.join(tmpdir, 'mldata')) def teardown_tmpdata(): # remove temporary dir if tmpdir is not None: shutil.rmtree(tmpdir) def test_mldata_filename(): cases = [('datasets-UCI iris', 'datasets-uci-iris'), ('news20.binary', 'news20binary'), ('book-crossing-ratings-1.0', 'book-crossing-ratings-10'), ('Nile Water Level', 'nile-water-level'), ('MNIST (original)', 'mnist-original')] for name, desired in cases: assert_equal(mldata_filename(name), desired) @with_setup(setup_tmpdata, teardown_tmpdata) def test_download(): """Test that fetch_mldata is able to download and cache a data set.""" _urlopen_ref = datasets.mldata.urlopen datasets.mldata.urlopen = mock_mldata_urlopen({ 'mock': { 'label': sp.ones((150,)), 'data': sp.ones((150, 4)), }, }) try: mock = fetch_mldata('mock', data_home=tmpdir) for n in ["COL_NAMES", "DESCR", "target", "data"]: assert_in(n, mock) assert_equal(mock.target.shape, (150,)) assert_equal(mock.data.shape, (150, 4)) assert_raises(datasets.mldata.HTTPError, fetch_mldata, 'not_existing_name') finally: datasets.mldata.urlopen = _urlopen_ref @with_setup(setup_tmpdata, teardown_tmpdata) def test_fetch_one_column(): _urlopen_ref = datasets.mldata.urlopen try: dataname = 'onecol' # create fake data set in cache x = sp.arange(6).reshape(2, 3) datasets.mldata.urlopen = mock_mldata_urlopen({dataname: {'x': x}}) dset = fetch_mldata(dataname, data_home=tmpdir) for n in ["COL_NAMES", "DESCR", "data"]: assert_in(n, dset) assert_not_in("target", dset) assert_equal(dset.data.shape, (2, 3)) assert_array_equal(dset.data, x) # transposing the data array dset = fetch_mldata(dataname, transpose_data=False, data_home=tmpdir) assert_equal(dset.data.shape, (3, 2)) finally: datasets.mldata.urlopen = _urlopen_ref @with_setup(setup_tmpdata, teardown_tmpdata) def test_fetch_multiple_column(): _urlopen_ref = datasets.mldata.urlopen try: # create fake data set in cache x = sp.arange(6).reshape(2, 3) y = sp.array([1, -1]) z = sp.arange(12).reshape(4, 3) # by default dataname = 'threecol-default' datasets.mldata.urlopen = mock_mldata_urlopen({ dataname: ( { 'label': y, 'data': x, 'z': z, }, ['z', 'data', 'label'], ), }) dset = fetch_mldata(dataname, data_home=tmpdir) for n in ["COL_NAMES", "DESCR", "target", "data", "z"]: assert_in(n, dset) assert_not_in("x", dset) assert_not_in("y", dset) assert_array_equal(dset.data, x) assert_array_equal(dset.target, y) assert_array_equal(dset.z, z.T) # by order dataname = 'threecol-order' datasets.mldata.urlopen = mock_mldata_urlopen({ dataname: ({'y': y, 'x': x, 'z': z}, ['y', 'x', 'z']), }) dset = fetch_mldata(dataname, data_home=tmpdir) for n in ["COL_NAMES", "DESCR", "target", "data", "z"]: assert_in(n, dset) assert_not_in("x", dset) assert_not_in("y", dset) assert_array_equal(dset.data, x) assert_array_equal(dset.target, y) assert_array_equal(dset.z, z.T) # by number dataname = 'threecol-number' datasets.mldata.urlopen = mock_mldata_urlopen({ dataname: ({'y': y, 'x': x, 'z': z}, ['z', 'x', 'y']), }) dset = fetch_mldata(dataname, target_name=2, data_name=0, data_home=tmpdir) for n in ["COL_NAMES", "DESCR", "target", "data", "x"]: assert_in(n, dset) assert_not_in("y", dset) assert_not_in("z", dset) assert_array_equal(dset.data, z) assert_array_equal(dset.target, y) # by name dset = fetch_mldata(dataname, target_name='y', data_name='z', data_home=tmpdir) for n in ["COL_NAMES", "DESCR", "target", "data", "x"]: assert_in(n, dset) assert_not_in("y", dset) assert_not_in("z", dset) finally: datasets.mldata.urlopen = _urlopen_ref
bsd-3-clause
maxlikely/scikit-learn
examples/ensemble/plot_partial_dependence.py
4
4436
""" ======================== Partial Dependence Plots ======================== Partial dependence plots show the dependence between the target function [1]_ and a set of 'target' features, marginalizing over the values of all other features (the complement features). Due to the limits of human perception the size of the target feature set must be small (usually, one or two) thus the target features are usually chosen among the most important features (see :attr:`~sklearn.ensemble.GradientBoostingRegressor.feature_importances_`). This example shows how to obtain partial dependence plots from a :class:`~sklearn.ensemble.GradientBoostingRegressor` trained on the California housing dataset. The example is taken from [HTF2009]_. The plot shows four one-way and one two-way partial dependence plots. The target variables for the one-way PDP are: median income (`MedInc`), avg. occupants per household (`AvgOccup`), median house age (`HouseAge`), and avg. rooms per household (`AveRooms`). We can clearly see that the median house price shows a linear relationship with the median income (top left) and that the house price drops when the avg. occupants per household increases (top middle). The top right plot shows that the house age in a district does not have a strong influence on the (median) house price; so does the average rooms per household. The tick marks on the x-axis represent the deciles of the feature values in the trainig data. Partial dependence plots with two target features enable us to visualize interactions among them. The two-way partial dependence plot shows the dependence of median house price on joint values of house age and avg. occupants per household. We can clearly see an interaction between the two features: For an avg. occupancy greather than two, the house price is nearly independent of the house age, whereas for values less than two there is a strong dependence on age. .. [HTF2009] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical Learning Ed. 2", Springer, 2009. .. [1] For classification you can think of it as the regression score before the link function. """ print(__doc__) import numpy as np import pylab as pl from mpl_toolkits.mplot3d import Axes3D from sklearn.cross_validation import train_test_split from sklearn.ensemble import GradientBoostingRegressor from sklearn.ensemble.partial_dependence import plot_partial_dependence from sklearn.ensemble.partial_dependence import partial_dependence from sklearn.datasets.california_housing import fetch_california_housing # fetch California housing dataset cal_housing = fetch_california_housing() # split 80/20 train-test X_train, X_test, y_train, y_test = train_test_split(cal_housing.data, cal_housing.target, test_size=0.2, random_state=1) names = cal_housing.feature_names print('_' * 80) print("Training GBRT...") clf = GradientBoostingRegressor(n_estimators=100, max_depth=4, learning_rate=0.1, loss='huber', random_state=1) clf.fit(X_train, y_train) print("done.") print('_' * 80) print('Convenience plot with ``partial_dependence_plots``') print features = [0, 5, 1, 2, (5, 1)] fig, axs = plot_partial_dependence(clf, X_train, features, feature_names=names, n_jobs=3, grid_resolution=50) fig.suptitle('Partial dependence of house value on nonlocation features\n' 'for the California housing dataset') pl.subplots_adjust(top=0.9) # tight_layout causes overlap with suptitle print('_' * 80) print('Custom 3d plot via ``partial_dependence``') print fig = pl.figure() target_feature = (1, 5) pdp, (x_axis, y_axis) = partial_dependence(clf, target_feature, X=X_train, grid_resolution=50) XX, YY = np.meshgrid(x_axis, y_axis) Z = pdp.T.reshape(XX.shape).T ax = Axes3D(fig) surf = ax.plot_surface(XX, YY, Z, rstride=1, cstride=1, cmap=pl.cm.BuPu) ax.set_xlabel(names[target_feature[0]]) ax.set_ylabel(names[target_feature[1]]) ax.set_zlabel('Partial dependence') # pretty init view ax.view_init(elev=22, azim=122) pl.colorbar(surf) pl.suptitle('Partial dependence of house value on median age and ' 'average occupancy') pl.subplots_adjust(top=0.9) pl.show()
bsd-3-clause
Srisai85/scikit-learn
doc/datasets/mldata_fixture.py
367
1183
"""Fixture module to skip the datasets loading when offline Mock urllib2 access to mldata.org and create a temporary data folder. """ from os import makedirs from os.path import join import numpy as np import tempfile import shutil from sklearn import datasets from sklearn.utils.testing import install_mldata_mock from sklearn.utils.testing import uninstall_mldata_mock def globs(globs): # Create a temporary folder for the data fetcher global custom_data_home custom_data_home = tempfile.mkdtemp() makedirs(join(custom_data_home, 'mldata')) globs['custom_data_home'] = custom_data_home return globs def setup_module(): # setup mock urllib2 module to avoid downloading from mldata.org install_mldata_mock({ 'mnist-original': { 'data': np.empty((70000, 784)), 'label': np.repeat(np.arange(10, dtype='d'), 7000), }, 'iris': { 'data': np.empty((150, 4)), }, 'datasets-uci-iris': { 'double0': np.empty((150, 4)), 'class': np.empty((150,)), }, }) def teardown_module(): uninstall_mldata_mock() shutil.rmtree(custom_data_home)
bsd-3-clause
TinghuiWang/pyActLearn
pyActLearn/sensors/sensor2vec.py
1
11535
import math import numpy as np import tensorflow as tf from ..learning.nn.injectors import SkipGramInjector def sensor2vec(num_sensors, sensor_event_list, embedding_size=20, batch_size=128, num_skips=8, skip_window=5, num_neg_samples=64, learning_rate=1.0): """Sensor to Vector """ if num_neg_samples > num_sensors: num_neg_samples = num_sensors # Initialize a SkipGram Injector injector = SkipGramInjector(sensor_event_list, batch_size, num_skips, skip_window) # Build Training Model graph = tf.Graph() with graph.as_default(): # Input Place Holder train_inputs = tf.placeholder(tf.int32, shape=[batch_size]) train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1]) # As we normally do not have too many sensors - it is OK to use all of them valid_dataset = tf.constant([i for i in range(num_sensors)], dtype=tf.int32) # Only CPU supports NCE loss with tf.device('/cpu:0'): # Look up embeddings for inputs. embeddings = tf.Variable( tf.random_uniform([num_sensors, embedding_size], -1.0, 1.0)) embed = tf.nn.embedding_lookup(embeddings, train_inputs) # Construct the variables for the NCE loss nce_weights = tf.Variable( tf.truncated_normal([num_sensors, embedding_size], stddev=1.0 / math.sqrt(embedding_size))) nce_biases = tf.Variable(tf.zeros([num_sensors])) # Compute the average NCE loss for the batch. # tf.nce_loss automatically draws a new sample of the negative labels each # time we evaluate the loss. loss = tf.reduce_mean( tf.nn.nce_loss(weights=nce_weights, biases=nce_biases, labels=train_labels, inputs=embed, num_sampled=num_neg_samples, num_classes=num_sensors)) # Construct the Optimizer optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss) # Compute the cosine similarity between minibatch examples and all embeddings. norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True)) normalized_embeddings = embeddings / norm valid_embeddings = tf.nn.embedding_lookup( normalized_embeddings, valid_dataset) similarity = tf.matmul( valid_embeddings, normalized_embeddings, transpose_b=True) # Add variable initializer. init = tf.initialize_all_variables() # Begin training. num_steps = 100001 with tf.Session(graph=graph) as session: # We must initialize all variables before we use them. init.run() print("Initialized") average_loss = 0 for step in range(num_steps): batch_inputs, batch_labels = injector.next_batch() feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels} # We perform one update step by evaluating the optimizer op (including it # in the list of returned values for session.run() _, loss_val = session.run([optimizer, loss], feed_dict=feed_dict) average_loss += loss_val if step % 2000 == 0: if step > 0: average_loss /= 2000 # The average loss is an estimate of the loss over the last 2000 batches. print("Average loss at step ", step, ": ", average_loss) average_loss = 0 final_embeddings = normalized_embeddings.eval() final_similarity = 1 - similarity.eval() distance_matrix = final_similarity / np.max(final_similarity, axis=1)[:, None] return final_embeddings, distance_matrix def sensor2vec_data(sensor_list, event_list, embedding_size=20, batch_size=128, num_skips=8, skip_window=5, num_neg_samples=64, learning_rate=1.0, ignore_off=True): """Transform sensor to high dimensional space Similar to word embedding used in natural language processing system, we want to represent sensors using in a synthesized vector space as well, instead of using an arbitrary labels for each sensors without any useful information. The methods used to find word embeddings can be classified into two categories: count-based methods (Latent Semantic Analysis) and predictive models. In this implementation for mapping sensor into high dimension vector space, we use skip-gram negative sampling models. Args: sensor_list (:obj:`list` of :obj:`dict`): List of dictionary containing sensor information. event_list (:obj:`list` of :obj:`dict`): List of events. embedding_size (:obj:`int`): The size of embedding vector. batch_size (:obj:`int`): The number of batch used in training num_skips (:obj:`int`): How many times to re-use an input to generate a label in skip-gram model. skip_window (:obj:`int`): How many items to consider left or right in skip-gram model. num_neg_samples (:obj:`int`): Number of negative samples to draw from the vocabulary. ignore_off (:obj:`bool`): Ignore motion-sensor with ``Off`` state in event.rst list. Please refer to :func:`sensor_distance` for an example of ``sensor_list``. Please refer to :func:`sensor_mi_distance` for an example of ``event_list``. """ # Put sensor in hash table for fast fetch of index num_sensors = len(sensor_list) # Negative samples cannot exceed sensor numbers if num_neg_samples > num_sensors: num_neg_samples = num_sensors # Store sensor ID in hash table for faster access sensor_dict = {} for i in range(num_sensors): sensor_dict[sensor_list[i]['name']] = i # Generate event.rst sensor list event_sensor_list = [] for event_entry in event_list: if ignore_off and event_entry['sensor_status'].upper() == "OFF": continue event_sensor_list.append(sensor_dict[event_entry['sensor_id']]) # Initialize a SkipGram Injector injector = SkipGramInjector(event_sensor_list, batch_size, num_skips, skip_window) # Build Training Model graph = tf.Graph() with graph.as_default(): # Input Place Holder train_inputs = tf.placeholder(tf.int32, shape=[batch_size]) train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1]) # As we normally do not have too many sensors - it is OK to use all of them valid_dataset = tf.constant([i for i in range(num_sensors)], dtype=tf.int32) # Only CPU supports NCE loss with tf.device('/cpu:0'): # Look up embeddings for inputs. embeddings = tf.Variable( tf.random_uniform([num_sensors, embedding_size], -1.0, 1.0)) embed = tf.nn.embedding_lookup(embeddings, train_inputs) # Construct the variables for the NCE loss nce_weights = tf.Variable( tf.truncated_normal([num_sensors, embedding_size], stddev=1.0 / math.sqrt(embedding_size))) nce_biases = tf.Variable(tf.zeros([num_sensors])) # Compute the average NCE loss for the batch. # tf.nce_loss automatically draws a new sample of the negative labels each # time we evaluate the loss. loss = tf.reduce_mean( tf.nn.nce_loss(weights=nce_weights, biases=nce_biases, labels=train_labels, inputs=embed, num_sampled=num_neg_samples, num_classes=num_sensors)) # Construct the Optimizer optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss) # Compute the cosine similarity between minibatch examples and all embeddings. norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True)) normalized_embeddings = embeddings / norm valid_embeddings = tf.nn.embedding_lookup( normalized_embeddings, valid_dataset) similarity = tf.matmul( valid_embeddings, normalized_embeddings, transpose_b=True) # Add variable initializer. init = tf.initialize_all_variables() # Begin training. num_steps = 100001 with tf.Session(graph=graph) as session: # We must initialize all variables before we use them. init.run() print("Initialized") average_loss = 0 for step in range(num_steps): batch_inputs, batch_labels = injector.next_batch() feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels} # We perform one update step by evaluating the optimizer op (including it # in the list of returned values for session.run() _, loss_val = session.run([optimizer, loss], feed_dict=feed_dict) average_loss += loss_val if step % 2000 == 0: if step > 0: average_loss /= 2000 # The average loss is an estimate of the loss over the last 2000 batches. print("Average loss at step ", step, ": ", average_loss) average_loss = 0 # Note that this is expensive (~20% slowdown if computed every 500 steps) if step % 10000 == 0: sim = similarity.eval() for i in range(num_sensors): valid_sensor = sensor_list[i]['name'] top_k = 8 # number of nearest neighbors nearest = (-sim[i, :]).argsort()[1:top_k + 1] log_str = "Nearest to %s:" % valid_sensor for k in range(top_k): close_sensor = sensor_list[nearest[k]]['name'] log_str = "%s %s," % (log_str, close_sensor) print(log_str) final_embeddings = normalized_embeddings.eval() final_similarity = 1 - similarity.eval() distance_matrix = final_similarity / np.max(final_similarity, axis=1)[:,None] # try: # from sklearn.manifold import TSNE # import matplotlib.pyplot as plt # # tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000) # low_dim_embs = tsne.fit_transform(final_embeddings) # labels = [sensor_list[i]['name'] for i in range(num_sensors)] # # assert low_dim_embs.shape[0] >= len(labels), "More labels than embeddings" # plt.figure(figsize=(18, 18)) # in inches # for i, label in enumerate(labels): # x, y = low_dim_embs[i, :] # plt.scatter(x, y) # plt.annotate(label, # xy=(x, y), # xytext=(5, 2), # textcoords='offset points', # ha='right', # va='bottom') # plt.show() # except ImportError: # print("Please install sklearn, matplotlib, and scipy to visualize embeddings.") return final_embeddings, distance_matrix
bsd-3-clause
afloren/nipype
build_docs.py
15
6971
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Code to build the documentation in the setup.py To use this code, run:: python setup.py build_sphinx """ # Standard library imports import sys import os from os.path import join as pjoin import zipfile import warnings import shutil from distutils.cmd import Command from distutils.command.clean import clean _info_fname = pjoin(os.path.dirname(__file__), 'nipype', 'info.py') INFO_VARS = {} exec(open(_info_fname, 'rt').read(), {}, INFO_VARS) DOC_BUILD_DIR = os.path.join('doc', '_build', 'html') DOC_DOCTREES_DIR = os.path.join('doc', '_build', 'doctrees') ################################################################################ # Distutils Command class for installing nipype to a temporary location. class TempInstall(Command): temp_install_dir = os.path.join('build', 'install') def run(self): """ build and install nipype in a temporary location. """ install = self.distribution.get_command_obj('install') install.install_scripts = self.temp_install_dir install.install_base = self.temp_install_dir install.install_platlib = self.temp_install_dir install.install_purelib = self.temp_install_dir install.install_data = self.temp_install_dir install.install_lib = self.temp_install_dir install.install_headers = self.temp_install_dir install.run() # Horrible trick to reload nipype with our temporary instal for key in sys.modules.keys(): if key.startswith('nipype'): sys.modules.pop(key, None) sys.path.append(os.path.abspath(self.temp_install_dir)) # Pop the cwd sys.path.pop(0) import nipype def initialize_options(self): pass def finalize_options(self): pass ################################################################################ # Distutils Command class for API generation class APIDocs(TempInstall): description = \ """generate API docs """ user_options = [ ('None', None, 'this command has no options'), ] def run(self): # First build the project and install it to a temporary location. TempInstall.run(self) os.chdir('doc') try: # We are running the API-building script via an # system call, but overriding the import path. toolsdir = os.path.abspath(pjoin('..', 'tools')) for docbuilder in ['build_interface_docs.py']: build_templates = pjoin(toolsdir, docbuilder) cmd = """%s -c 'import sys; sys.path.append("%s"); sys.path.append("%s"); execfile("%s", dict(__name__="__main__"))'""" \ % (sys.executable, toolsdir, self.temp_install_dir, build_templates) os.system(cmd) finally: os.chdir('..') ################################################################################ # Code to copy the sphinx-generated html docs in the distribution. def relative_path(filename): """ Return the relative path to the file, assuming the file is in the DOC_BUILD_DIR directory. """ length = len(os.path.abspath(DOC_BUILD_DIR)) + 1 return os.path.abspath(filename)[length:] ################################################################################ # Distutils Command class build the docs # Sphinx import. try: from sphinx.setup_command import BuildDoc except: MyBuildDoc = None else: class MyBuildDoc(BuildDoc): """ Sub-class the standard sphinx documentation building system, to add logics for API generation and matplotlib's plot directive. """ def run(self): self.run_command('api_docs') # We need to be in the doc directory for to plot_directive # and API generation to work """ os.chdir('doc') try: BuildDoc.run(self) finally: os.chdir('..') """ # It put's the build in a doc/doc/_build directory with the # above?!?! I'm leaving the code above here but commented out # in case I'm missing something? BuildDoc.run(self) self.zip_docs() def zip_docs(self): if not os.path.exists(DOC_BUILD_DIR): raise OSError, 'Doc directory does not exist.' target_file = os.path.join('doc', 'documentation.zip') # ZIP_DEFLATED actually compresses the archive. However, there # will be a RuntimeError if zlib is not installed, so we check # for it. ZIP_STORED produces an uncompressed zip, but does not # require zlib. try: zf = zipfile.ZipFile(target_file, 'w', compression=zipfile.ZIP_DEFLATED) except RuntimeError: warnings.warn('zlib not installed, storing the docs ' 'without compression') zf = zipfile.ZipFile(target_file, 'w', compression=zipfile.ZIP_STORED) for root, dirs, files in os.walk(DOC_BUILD_DIR): relative = relative_path(root) if not relative.startswith('.doctrees'): for f in files: zf.write(os.path.join(root, f), os.path.join(relative, 'html_docs', f)) zf.close() def finalize_options(self): """ Override the default for the documentation build directory. """ self.build_dir = os.path.join(*DOC_BUILD_DIR.split(os.sep)[:-1]) BuildDoc.finalize_options(self) ################################################################################ # Distutils Command class to clean class Clean(clean): def run(self): clean.run(self) api_path = os.path.join('doc', 'api', 'generated') if os.path.exists(api_path): print "Removing %s" % api_path shutil.rmtree(api_path) interface_path = os.path.join('doc', 'interfaces', 'generated') if os.path.exists(interface_path): print "Removing %s" % interface_path shutil.rmtree(interface_path) if os.path.exists(DOC_BUILD_DIR): print "Removing %s" % DOC_BUILD_DIR shutil.rmtree(DOC_BUILD_DIR) if os.path.exists(DOC_DOCTREES_DIR): print "Removing %s" % DOC_DOCTREES_DIR shutil.rmtree(DOC_DOCTREES_DIR) # The command classes for distutils, used by the setup.py cmdclass = {'build_sphinx': MyBuildDoc, 'api_docs': APIDocs, 'clean': Clean, }
bsd-3-clause
u-engine/rpg_svo
svo_analysis/src/svo_analysis/analyse_dataset.py
17
1178
# -*- coding: utf-8 -*- import associate import numpy as np import matplotlib.pyplot as plt import yaml def loadDataset(filename): file = open(filename) data = file.read() lines = data.replace(","," ").replace("\t"," ").split("\n") D = np.array([[v.strip() for v in line.split(" ") if v.strip()!=""] for line in lines if len(line)>0 and line[0]!="#"], dtype=np.float64) return D dataset_dir = '/home/cforster/Datasets/SlamBenchmark/px4_r2' trajectory_data = dataset_dir+'/groundtruth.txt' stepsize = 10 # load dataset data = loadDataset(trajectory_data) n = data.shape[0] steps = np.arange(0,n,stepsize) # compute trajectory length last_pos = data[0,1:4] trajectory_length = 0 for i in steps[1:]: new_pos = data[i,1:4] trajectory_length += np.linalg.norm(new_pos-last_pos) last_pos = new_pos print 'trajectory lenght = ' + str(trajectory_length) + 'm' print 'height mean = ' + str(np.mean(data[:,3])) + 'm' print 'height median = ' + str(np.median(data[:,3])) + 'm' print 'height std = ' + str(np.std(data[:,3])) + 'm' print 'duration = ' + str(data[-1,0]-data[0,0]) + 's' print 'speed = ' + str(trajectory_length/(data[-1,0]-data[0,0])) + 'm/s'
gpl-3.0
deepesch/scikit-learn
sklearn/linear_model/coordinate_descent.py
43
75144
# Author: Alexandre Gramfort <[email protected]> # Fabian Pedregosa <[email protected]> # Olivier Grisel <[email protected]> # Gael Varoquaux <[email protected]> # # License: BSD 3 clause import sys import warnings from abc import ABCMeta, abstractmethod import numpy as np from scipy import sparse from .base import LinearModel, _pre_fit from ..base import RegressorMixin from .base import center_data, sparse_center_data from ..utils import check_array, check_X_y, deprecated from ..utils.validation import check_random_state from ..cross_validation import check_cv from ..externals.joblib import Parallel, delayed from ..externals import six from ..externals.six.moves import xrange from ..utils.extmath import safe_sparse_dot from ..utils.validation import check_is_fitted from ..utils.validation import column_or_1d from ..utils import ConvergenceWarning from . import cd_fast ############################################################################### # Paths functions def _alpha_grid(X, y, Xy=None, l1_ratio=1.0, fit_intercept=True, eps=1e-3, n_alphas=100, normalize=False, copy_X=True): """ Compute the grid of alpha values for elastic net parameter search Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data. Pass directly as Fortran-contiguous data to avoid unnecessary memory duplication y : ndarray, shape (n_samples,) Target values Xy : array-like, optional Xy = np.dot(X.T, y) that can be precomputed. l1_ratio : float The elastic net mixing parameter, with ``0 <= l1_ratio <= 1``. For ``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a combination of L1 and L2. eps : float, optional Length of the path. ``eps=1e-3`` means that ``alpha_min / alpha_max = 1e-3`` n_alphas : int, optional Number of alphas along the regularization path fit_intercept : boolean, default True Whether to fit an intercept or not normalize : boolean, optional, default False If ``True``, the regressors X will be normalized before regression. copy_X : boolean, optional, default True If ``True``, X will be copied; else, it may be overwritten. """ n_samples = len(y) sparse_center = False if Xy is None: X_sparse = sparse.isspmatrix(X) sparse_center = X_sparse and (fit_intercept or normalize) X = check_array(X, 'csc', copy=(copy_X and fit_intercept and not X_sparse)) if not X_sparse: # X can be touched inplace thanks to the above line X, y, _, _, _ = center_data(X, y, fit_intercept, normalize, copy=False) Xy = safe_sparse_dot(X.T, y, dense_output=True) if sparse_center: # Workaround to find alpha_max for sparse matrices. # since we should not destroy the sparsity of such matrices. _, _, X_mean, _, X_std = sparse_center_data(X, y, fit_intercept, normalize) mean_dot = X_mean * np.sum(y) if Xy.ndim == 1: Xy = Xy[:, np.newaxis] if sparse_center: if fit_intercept: Xy -= mean_dot[:, np.newaxis] if normalize: Xy /= X_std[:, np.newaxis] alpha_max = (np.sqrt(np.sum(Xy ** 2, axis=1)).max() / (n_samples * l1_ratio)) if alpha_max <= np.finfo(float).resolution: alphas = np.empty(n_alphas) alphas.fill(np.finfo(float).resolution) return alphas return np.logspace(np.log10(alpha_max * eps), np.log10(alpha_max), num=n_alphas)[::-1] def lasso_path(X, y, eps=1e-3, n_alphas=100, alphas=None, precompute='auto', Xy=None, copy_X=True, coef_init=None, verbose=False, return_n_iter=False, positive=False, **params): """Compute Lasso path with coordinate descent The Lasso optimization function varies for mono and multi-outputs. For mono-output tasks it is:: (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1 For multi-output tasks it is:: (1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21 Where:: ||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2} i.e. the sum of norm of each row. Read more in the :ref:`User Guide <lasso>`. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data. Pass directly as Fortran-contiguous data to avoid unnecessary memory duplication. If ``y`` is mono-output then ``X`` can be sparse. y : ndarray, shape (n_samples,), or (n_samples, n_outputs) Target values eps : float, optional Length of the path. ``eps=1e-3`` means that ``alpha_min / alpha_max = 1e-3`` n_alphas : int, optional Number of alphas along the regularization path alphas : ndarray, optional List of alphas where to compute the models. If ``None`` alphas are set automatically precompute : True | False | 'auto' | array-like Whether to use a precomputed Gram matrix to speed up calculations. If set to ``'auto'`` let us decide. The Gram matrix can also be passed as argument. Xy : array-like, optional Xy = np.dot(X.T, y) that can be precomputed. It is useful only when the Gram matrix is precomputed. copy_X : boolean, optional, default True If ``True``, X will be copied; else, it may be overwritten. coef_init : array, shape (n_features, ) | None The initial values of the coefficients. verbose : bool or integer Amount of verbosity. params : kwargs keyword arguments passed to the coordinate descent solver. positive : bool, default False If set to True, forces coefficients to be positive. return_n_iter : bool whether to return the number of iterations or not. Returns ------- alphas : array, shape (n_alphas,) The alphas along the path where models are computed. coefs : array, shape (n_features, n_alphas) or \ (n_outputs, n_features, n_alphas) Coefficients along the path. dual_gaps : array, shape (n_alphas,) The dual gaps at the end of the optimization for each alpha. n_iters : array-like, shape (n_alphas,) The number of iterations taken by the coordinate descent optimizer to reach the specified tolerance for each alpha. Notes ----- See examples/linear_model/plot_lasso_coordinate_descent_path.py for an example. To avoid unnecessary memory duplication the X argument of the fit method should be directly passed as a Fortran-contiguous numpy array. Note that in certain cases, the Lars solver may be significantly faster to implement this functionality. In particular, linear interpolation can be used to retrieve model coefficients between the values output by lars_path Examples --------- Comparing lasso_path and lars_path with interpolation: >>> X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T >>> y = np.array([1, 2, 3.1]) >>> # Use lasso_path to compute a coefficient path >>> _, coef_path, _ = lasso_path(X, y, alphas=[5., 1., .5]) >>> print(coef_path) [[ 0. 0. 0.46874778] [ 0.2159048 0.4425765 0.23689075]] >>> # Now use lars_path and 1D linear interpolation to compute the >>> # same path >>> from sklearn.linear_model import lars_path >>> alphas, active, coef_path_lars = lars_path(X, y, method='lasso') >>> from scipy import interpolate >>> coef_path_continuous = interpolate.interp1d(alphas[::-1], ... coef_path_lars[:, ::-1]) >>> print(coef_path_continuous([5., 1., .5])) [[ 0. 0. 0.46915237] [ 0.2159048 0.4425765 0.23668876]] See also -------- lars_path Lasso LassoLars LassoCV LassoLarsCV sklearn.decomposition.sparse_encode """ return enet_path(X, y, l1_ratio=1., eps=eps, n_alphas=n_alphas, alphas=alphas, precompute=precompute, Xy=Xy, copy_X=copy_X, coef_init=coef_init, verbose=verbose, positive=positive, **params) def enet_path(X, y, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None, precompute='auto', Xy=None, copy_X=True, coef_init=None, verbose=False, return_n_iter=False, positive=False, **params): """Compute elastic net path with coordinate descent The elastic net optimization function varies for mono and multi-outputs. For mono-output tasks it is:: 1 / (2 * n_samples) * ||y - Xw||^2_2 + + alpha * l1_ratio * ||w||_1 + 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2 For multi-output tasks it is:: (1 / (2 * n_samples)) * ||Y - XW||^Fro_2 + alpha * l1_ratio * ||W||_21 + 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2 Where:: ||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2} i.e. the sum of norm of each row. Read more in the :ref:`User Guide <elastic_net>`. Parameters ---------- X : {array-like}, shape (n_samples, n_features) Training data. Pass directly as Fortran-contiguous data to avoid unnecessary memory duplication. If ``y`` is mono-output then ``X`` can be sparse. y : ndarray, shape (n_samples,) or (n_samples, n_outputs) Target values l1_ratio : float, optional float between 0 and 1 passed to elastic net (scaling between l1 and l2 penalties). ``l1_ratio=1`` corresponds to the Lasso eps : float Length of the path. ``eps=1e-3`` means that ``alpha_min / alpha_max = 1e-3`` n_alphas : int, optional Number of alphas along the regularization path alphas : ndarray, optional List of alphas where to compute the models. If None alphas are set automatically precompute : True | False | 'auto' | array-like Whether to use a precomputed Gram matrix to speed up calculations. If set to ``'auto'`` let us decide. The Gram matrix can also be passed as argument. Xy : array-like, optional Xy = np.dot(X.T, y) that can be precomputed. It is useful only when the Gram matrix is precomputed. copy_X : boolean, optional, default True If ``True``, X will be copied; else, it may be overwritten. coef_init : array, shape (n_features, ) | None The initial values of the coefficients. verbose : bool or integer Amount of verbosity. params : kwargs keyword arguments passed to the coordinate descent solver. return_n_iter : bool whether to return the number of iterations or not. positive : bool, default False If set to True, forces coefficients to be positive. Returns ------- alphas : array, shape (n_alphas,) The alphas along the path where models are computed. coefs : array, shape (n_features, n_alphas) or \ (n_outputs, n_features, n_alphas) Coefficients along the path. dual_gaps : array, shape (n_alphas,) The dual gaps at the end of the optimization for each alpha. n_iters : array-like, shape (n_alphas,) The number of iterations taken by the coordinate descent optimizer to reach the specified tolerance for each alpha. (Is returned when ``return_n_iter`` is set to True). Notes ----- See examples/plot_lasso_coordinate_descent_path.py for an example. See also -------- MultiTaskElasticNet MultiTaskElasticNetCV ElasticNet ElasticNetCV """ # We expect X and y to be already float64 Fortran ordered when bypassing # checks check_input = 'check_input' not in params or params['check_input'] pre_fit = 'check_input' not in params or params['pre_fit'] if check_input: X = check_array(X, 'csc', dtype=np.float64, order='F', copy=copy_X) y = check_array(y, 'csc', dtype=np.float64, order='F', copy=False, ensure_2d=False) if Xy is not None: Xy = check_array(Xy, 'csc', dtype=np.float64, order='F', copy=False, ensure_2d=False) n_samples, n_features = X.shape multi_output = False if y.ndim != 1: multi_output = True _, n_outputs = y.shape # MultiTaskElasticNet does not support sparse matrices if not multi_output and sparse.isspmatrix(X): if 'X_mean' in params: # As sparse matrices are not actually centered we need this # to be passed to the CD solver. X_sparse_scaling = params['X_mean'] / params['X_std'] else: X_sparse_scaling = np.zeros(n_features) # X should be normalized and fit already if function is called # from ElasticNet.fit if pre_fit: X, y, X_mean, y_mean, X_std, precompute, Xy = \ _pre_fit(X, y, Xy, precompute, normalize=False, fit_intercept=False, copy=False, Xy_precompute_order='F') if alphas is None: # No need to normalize of fit_intercept: it has been done # above alphas = _alpha_grid(X, y, Xy=Xy, l1_ratio=l1_ratio, fit_intercept=False, eps=eps, n_alphas=n_alphas, normalize=False, copy_X=False) else: alphas = np.sort(alphas)[::-1] # make sure alphas are properly ordered n_alphas = len(alphas) tol = params.get('tol', 1e-4) max_iter = params.get('max_iter', 1000) dual_gaps = np.empty(n_alphas) n_iters = [] rng = check_random_state(params.get('random_state', None)) selection = params.get('selection', 'cyclic') if selection not in ['random', 'cyclic']: raise ValueError("selection should be either random or cyclic.") random = (selection == 'random') if not multi_output: coefs = np.empty((n_features, n_alphas), dtype=np.float64) else: coefs = np.empty((n_outputs, n_features, n_alphas), dtype=np.float64) if coef_init is None: coef_ = np.asfortranarray(np.zeros(coefs.shape[:-1])) else: coef_ = np.asfortranarray(coef_init) for i, alpha in enumerate(alphas): l1_reg = alpha * l1_ratio * n_samples l2_reg = alpha * (1.0 - l1_ratio) * n_samples if not multi_output and sparse.isspmatrix(X): model = cd_fast.sparse_enet_coordinate_descent( coef_, l1_reg, l2_reg, X.data, X.indices, X.indptr, y, X_sparse_scaling, max_iter, tol, rng, random, positive) elif multi_output: model = cd_fast.enet_coordinate_descent_multi_task( coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random) elif isinstance(precompute, np.ndarray): # We expect precompute to be already Fortran ordered when bypassing # checks if check_input: precompute = check_array(precompute, 'csc', dtype=np.float64, order='F') model = cd_fast.enet_coordinate_descent_gram( coef_, l1_reg, l2_reg, precompute, Xy, y, max_iter, tol, rng, random, positive) elif precompute is False: model = cd_fast.enet_coordinate_descent( coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random, positive) else: raise ValueError("Precompute should be one of True, False, " "'auto' or array-like") coef_, dual_gap_, eps_, n_iter_ = model coefs[..., i] = coef_ dual_gaps[i] = dual_gap_ n_iters.append(n_iter_) if dual_gap_ > eps_: warnings.warn('Objective did not converge.' + ' You might want' + ' to increase the number of iterations', ConvergenceWarning) if verbose: if verbose > 2: print(model) elif verbose > 1: print('Path: %03i out of %03i' % (i, n_alphas)) else: sys.stderr.write('.') if return_n_iter: return alphas, coefs, dual_gaps, n_iters return alphas, coefs, dual_gaps ############################################################################### # ElasticNet model class ElasticNet(LinearModel, RegressorMixin): """Linear regression with combined L1 and L2 priors as regularizer. Minimizes the objective function:: 1 / (2 * n_samples) * ||y - Xw||^2_2 + + alpha * l1_ratio * ||w||_1 + 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2 If you are interested in controlling the L1 and L2 penalty separately, keep in mind that this is equivalent to:: a * L1 + b * L2 where:: alpha = a + b and l1_ratio = a / (a + b) The parameter l1_ratio corresponds to alpha in the glmnet R package while alpha corresponds to the lambda parameter in glmnet. Specifically, l1_ratio = 1 is the lasso penalty. Currently, l1_ratio <= 0.01 is not reliable, unless you supply your own sequence of alpha. Read more in the :ref:`User Guide <elastic_net>`. Parameters ---------- alpha : float Constant that multiplies the penalty terms. Defaults to 1.0 See the notes for the exact mathematical meaning of this parameter. ``alpha = 0`` is equivalent to an ordinary least square, solved by the :class:`LinearRegression` object. For numerical reasons, using ``alpha = 0`` with the Lasso object is not advised and you should prefer the LinearRegression object. l1_ratio : float The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``. For ``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a combination of L1 and L2. fit_intercept : bool Whether the intercept should be estimated or not. If ``False``, the data is assumed to be already centered. normalize : boolean, optional, default False If ``True``, the regressors X will be normalized before regression. precompute : True | False | 'auto' | array-like Whether to use a precomputed Gram matrix to speed up calculations. If set to ``'auto'`` let us decide. The Gram matrix can also be passed as argument. For sparse input this option is always ``True`` to preserve sparsity. WARNING : The ``'auto'`` option is deprecated and will be removed in 0.18. max_iter : int, optional The maximum number of iterations copy_X : boolean, optional, default True If ``True``, X will be copied; else, it may be overwritten. tol : float, optional The tolerance for the optimization: if the updates are smaller than ``tol``, the optimization code checks the dual gap for optimality and continues until it is smaller than ``tol``. warm_start : bool, optional When set to ``True``, reuse the solution of the previous call to fit as initialization, otherwise, just erase the previous solution. positive : bool, optional When set to ``True``, forces the coefficients to be positive. selection : str, default 'cyclic' If set to 'random', a random coefficient is updated every iteration rather than looping over features sequentially by default. This (setting to 'random') often leads to significantly faster convergence especially when tol is higher than 1e-4. random_state : int, RandomState instance, or None (default) The seed of the pseudo random number generator that selects a random feature to update. Useful only when selection is set to 'random'. Attributes ---------- coef_ : array, shape (n_features,) | (n_targets, n_features) parameter vector (w in the cost function formula) sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \ (n_targets, n_features) ``sparse_coef_`` is a readonly property derived from ``coef_`` intercept_ : float | array, shape (n_targets,) independent term in decision function. n_iter_ : array-like, shape (n_targets,) number of iterations run by the coordinate descent solver to reach the specified tolerance. Notes ----- To avoid unnecessary memory duplication the X argument of the fit method should be directly passed as a Fortran-contiguous numpy array. See also -------- SGDRegressor: implements elastic net regression with incremental training. SGDClassifier: implements logistic regression with elastic net penalty (``SGDClassifier(loss="log", penalty="elasticnet")``). """ path = staticmethod(enet_path) def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True, normalize=False, precompute=False, max_iter=1000, copy_X=True, tol=1e-4, warm_start=False, positive=False, random_state=None, selection='cyclic'): self.alpha = alpha self.l1_ratio = l1_ratio self.coef_ = None self.fit_intercept = fit_intercept self.normalize = normalize self.precompute = precompute self.max_iter = max_iter self.copy_X = copy_X self.tol = tol self.warm_start = warm_start self.positive = positive self.intercept_ = 0.0 self.random_state = random_state self.selection = selection def fit(self, X, y, check_input=True): """Fit model with coordinate descent. Parameters ----------- X : ndarray or scipy.sparse matrix, (n_samples, n_features) Data y : ndarray, shape (n_samples,) or (n_samples, n_targets) Target Notes ----- Coordinate descent is an algorithm that considers each column of data at a time hence it will automatically convert the X input as a Fortran-contiguous numpy array if necessary. To avoid memory re-allocation it is advised to allocate the initial data in memory directly using that format. """ if self.alpha == 0: warnings.warn("With alpha=0, this algorithm does not converge " "well. You are advised to use the LinearRegression " "estimator", stacklevel=2) if self.precompute == 'auto': warnings.warn("Setting precompute to 'auto', was found to be " "slower even when n_samples > n_features. Hence " "it will be removed in 0.18.", DeprecationWarning, stacklevel=2) # We expect X and y to be already float64 Fortran ordered arrays # when bypassing checks if check_input: X, y = check_X_y(X, y, accept_sparse='csc', dtype=np.float64, order='F', copy=self.copy_X and self.fit_intercept, multi_output=True, y_numeric=True) X, y, X_mean, y_mean, X_std, precompute, Xy = \ _pre_fit(X, y, None, self.precompute, self.normalize, self.fit_intercept, copy=False, Xy_precompute_order='F') if y.ndim == 1: y = y[:, np.newaxis] if Xy is not None and Xy.ndim == 1: Xy = Xy[:, np.newaxis] n_samples, n_features = X.shape n_targets = y.shape[1] if self.selection not in ['cyclic', 'random']: raise ValueError("selection should be either random or cyclic.") if not self.warm_start or self.coef_ is None: coef_ = np.zeros((n_targets, n_features), dtype=np.float64, order='F') else: coef_ = self.coef_ if coef_.ndim == 1: coef_ = coef_[np.newaxis, :] dual_gaps_ = np.zeros(n_targets, dtype=np.float64) self.n_iter_ = [] for k in xrange(n_targets): if Xy is not None: this_Xy = Xy[:, k] else: this_Xy = None _, this_coef, this_dual_gap, this_iter = \ self.path(X, y[:, k], l1_ratio=self.l1_ratio, eps=None, n_alphas=None, alphas=[self.alpha], precompute=precompute, Xy=this_Xy, fit_intercept=False, normalize=False, copy_X=True, verbose=False, tol=self.tol, positive=self.positive, X_mean=X_mean, X_std=X_std, return_n_iter=True, coef_init=coef_[k], max_iter=self.max_iter, random_state=self.random_state, selection=self.selection, check_input=False, pre_fit=False) coef_[k] = this_coef[:, 0] dual_gaps_[k] = this_dual_gap[0] self.n_iter_.append(this_iter[0]) if n_targets == 1: self.n_iter_ = self.n_iter_[0] self.coef_, self.dual_gap_ = map(np.squeeze, [coef_, dual_gaps_]) self._set_intercept(X_mean, y_mean, X_std) # return self for chaining fit and predict calls return self @property def sparse_coef_(self): """ sparse representation of the fitted coef """ return sparse.csr_matrix(self.coef_) @deprecated(" and will be removed in 0.19") def decision_function(self, X): """Decision function of the linear model Parameters ---------- X : numpy array or scipy.sparse matrix of shape (n_samples, n_features) Returns ------- T : array, shape (n_samples,) The predicted decision function """ return self._decision_function(X) def _decision_function(self, X): """Decision function of the linear model Parameters ---------- X : numpy array or scipy.sparse matrix of shape (n_samples, n_features) Returns ------- T : array, shape (n_samples,) The predicted decision function """ check_is_fitted(self, 'n_iter_') if sparse.isspmatrix(X): return np.ravel(safe_sparse_dot(self.coef_, X.T, dense_output=True) + self.intercept_) else: return super(ElasticNet, self)._decision_function(X) ############################################################################### # Lasso model class Lasso(ElasticNet): """Linear Model trained with L1 prior as regularizer (aka the Lasso) The optimization objective for Lasso is:: (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1 Technically the Lasso model is optimizing the same objective function as the Elastic Net with ``l1_ratio=1.0`` (no L2 penalty). Read more in the :ref:`User Guide <lasso>`. Parameters ---------- alpha : float, optional Constant that multiplies the L1 term. Defaults to 1.0. ``alpha = 0`` is equivalent to an ordinary least square, solved by the :class:`LinearRegression` object. For numerical reasons, using ``alpha = 0`` is with the Lasso object is not advised and you should prefer the LinearRegression object. fit_intercept : boolean whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). normalize : boolean, optional, default False If ``True``, the regressors X will be normalized before regression. copy_X : boolean, optional, default True If ``True``, X will be copied; else, it may be overwritten. precompute : True | False | 'auto' | array-like Whether to use a precomputed Gram matrix to speed up calculations. If set to ``'auto'`` let us decide. The Gram matrix can also be passed as argument. For sparse input this option is always ``True`` to preserve sparsity. WARNING : The ``'auto'`` option is deprecated and will be removed in 0.18. max_iter : int, optional The maximum number of iterations tol : float, optional The tolerance for the optimization: if the updates are smaller than ``tol``, the optimization code checks the dual gap for optimality and continues until it is smaller than ``tol``. warm_start : bool, optional When set to True, reuse the solution of the previous call to fit as initialization, otherwise, just erase the previous solution. positive : bool, optional When set to ``True``, forces the coefficients to be positive. selection : str, default 'cyclic' If set to 'random', a random coefficient is updated every iteration rather than looping over features sequentially by default. This (setting to 'random') often leads to significantly faster convergence especially when tol is higher than 1e-4. random_state : int, RandomState instance, or None (default) The seed of the pseudo random number generator that selects a random feature to update. Useful only when selection is set to 'random'. Attributes ---------- coef_ : array, shape (n_features,) | (n_targets, n_features) parameter vector (w in the cost function formula) sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \ (n_targets, n_features) ``sparse_coef_`` is a readonly property derived from ``coef_`` intercept_ : float | array, shape (n_targets,) independent term in decision function. n_iter_ : int | array-like, shape (n_targets,) number of iterations run by the coordinate descent solver to reach the specified tolerance. Examples -------- >>> from sklearn import linear_model >>> clf = linear_model.Lasso(alpha=0.1) >>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2]) Lasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000, normalize=False, positive=False, precompute=False, random_state=None, selection='cyclic', tol=0.0001, warm_start=False) >>> print(clf.coef_) [ 0.85 0. ] >>> print(clf.intercept_) 0.15 See also -------- lars_path lasso_path LassoLars LassoCV LassoLarsCV sklearn.decomposition.sparse_encode Notes ----- The algorithm used to fit the model is coordinate descent. To avoid unnecessary memory duplication the X argument of the fit method should be directly passed as a Fortran-contiguous numpy array. """ path = staticmethod(enet_path) def __init__(self, alpha=1.0, fit_intercept=True, normalize=False, precompute=False, copy_X=True, max_iter=1000, tol=1e-4, warm_start=False, positive=False, random_state=None, selection='cyclic'): super(Lasso, self).__init__( alpha=alpha, l1_ratio=1.0, fit_intercept=fit_intercept, normalize=normalize, precompute=precompute, copy_X=copy_X, max_iter=max_iter, tol=tol, warm_start=warm_start, positive=positive, random_state=random_state, selection=selection) ############################################################################### # Functions for CV with paths functions def _path_residuals(X, y, train, test, path, path_params, alphas=None, l1_ratio=1, X_order=None, dtype=None): """Returns the MSE for the models computed by 'path' Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data. y : array-like, shape (n_samples,) or (n_samples, n_targets) Target values train : list of indices The indices of the train set test : list of indices The indices of the test set path : callable function returning a list of models on the path. See enet_path for an example of signature path_params : dictionary Parameters passed to the path function alphas : array-like, optional Array of float that is used for cross-validation. If not provided, computed using 'path' l1_ratio : float, optional float between 0 and 1 passed to ElasticNet (scaling between l1 and l2 penalties). For ``l1_ratio = 0`` the penalty is an L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a combination of L1 and L2 X_order : {'F', 'C', or None}, optional The order of the arrays expected by the path function to avoid memory copies dtype : a numpy dtype or None The dtype of the arrays expected by the path function to avoid memory copies """ X_train = X[train] y_train = y[train] X_test = X[test] y_test = y[test] fit_intercept = path_params['fit_intercept'] normalize = path_params['normalize'] if y.ndim == 1: precompute = path_params['precompute'] else: # No Gram variant of multi-task exists right now. # Fall back to default enet_multitask precompute = False X_train, y_train, X_mean, y_mean, X_std, precompute, Xy = \ _pre_fit(X_train, y_train, None, precompute, normalize, fit_intercept, copy=False) path_params = path_params.copy() path_params['Xy'] = Xy path_params['X_mean'] = X_mean path_params['X_std'] = X_std path_params['precompute'] = precompute path_params['copy_X'] = False path_params['alphas'] = alphas if 'l1_ratio' in path_params: path_params['l1_ratio'] = l1_ratio # Do the ordering and type casting here, as if it is done in the path, # X is copied and a reference is kept here X_train = check_array(X_train, 'csc', dtype=dtype, order=X_order) alphas, coefs, _ = path(X_train, y_train, **path_params) del X_train, y_train if y.ndim == 1: # Doing this so that it becomes coherent with multioutput. coefs = coefs[np.newaxis, :, :] y_mean = np.atleast_1d(y_mean) y_test = y_test[:, np.newaxis] if normalize: nonzeros = np.flatnonzero(X_std) coefs[:, nonzeros] /= X_std[nonzeros][:, np.newaxis] intercepts = y_mean[:, np.newaxis] - np.dot(X_mean, coefs) if sparse.issparse(X_test): n_order, n_features, n_alphas = coefs.shape # Work around for sparse matices since coefs is a 3-D numpy array. coefs_feature_major = np.rollaxis(coefs, 1) feature_2d = np.reshape(coefs_feature_major, (n_features, -1)) X_test_coefs = safe_sparse_dot(X_test, feature_2d) X_test_coefs = X_test_coefs.reshape(X_test.shape[0], n_order, -1) else: X_test_coefs = safe_sparse_dot(X_test, coefs) residues = X_test_coefs - y_test[:, :, np.newaxis] residues += intercepts this_mses = ((residues ** 2).mean(axis=0)).mean(axis=0) return this_mses class LinearModelCV(six.with_metaclass(ABCMeta, LinearModel)): """Base class for iterative model fitting along a regularization path""" @abstractmethod def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True, normalize=False, precompute='auto', max_iter=1000, tol=1e-4, copy_X=True, cv=None, verbose=False, n_jobs=1, positive=False, random_state=None, selection='cyclic'): self.eps = eps self.n_alphas = n_alphas self.alphas = alphas self.fit_intercept = fit_intercept self.normalize = normalize self.precompute = precompute self.max_iter = max_iter self.tol = tol self.copy_X = copy_X self.cv = cv self.verbose = verbose self.n_jobs = n_jobs self.positive = positive self.random_state = random_state self.selection = selection def fit(self, X, y): """Fit linear model with coordinate descent Fit is on grid of alphas and best alpha estimated by cross-validation. Parameters ---------- X : {array-like}, shape (n_samples, n_features) Training data. Pass directly as float64, Fortran-contiguous data to avoid unnecessary memory duplication. If y is mono-output, X can be sparse. y : array-like, shape (n_samples,) or (n_samples, n_targets) Target values """ y = np.asarray(y, dtype=np.float64) if y.shape[0] == 0: raise ValueError("y has 0 samples: %r" % y) if hasattr(self, 'l1_ratio'): model_str = 'ElasticNet' else: model_str = 'Lasso' if isinstance(self, ElasticNetCV) or isinstance(self, LassoCV): if model_str == 'ElasticNet': model = ElasticNet() else: model = Lasso() if y.ndim > 1 and y.shape[1] > 1: raise ValueError("For multi-task outputs, use " "MultiTask%sCV" % (model_str)) y = column_or_1d(y, warn=True) else: if sparse.isspmatrix(X): raise TypeError("X should be dense but a sparse matrix was" "passed") elif y.ndim == 1: raise ValueError("For mono-task outputs, use " "%sCV" % (model_str)) if model_str == 'ElasticNet': model = MultiTaskElasticNet() else: model = MultiTaskLasso() if self.selection not in ["random", "cyclic"]: raise ValueError("selection should be either random or cyclic.") # This makes sure that there is no duplication in memory. # Dealing right with copy_X is important in the following: # Multiple functions touch X and subsamples of X and can induce a # lot of duplication of memory copy_X = self.copy_X and self.fit_intercept if isinstance(X, np.ndarray) or sparse.isspmatrix(X): # Keep a reference to X reference_to_old_X = X # Let us not impose fortran ordering or float64 so far: it is # not useful for the cross-validation loop and will be done # by the model fitting itself X = check_array(X, 'csc', copy=False) if sparse.isspmatrix(X): if (hasattr(reference_to_old_X, "data") and not np.may_share_memory(reference_to_old_X.data, X.data)): # X is a sparse matrix and has been copied copy_X = False elif not np.may_share_memory(reference_to_old_X, X): # X has been copied copy_X = False del reference_to_old_X else: X = check_array(X, 'csc', dtype=np.float64, order='F', copy=copy_X) copy_X = False if X.shape[0] != y.shape[0]: raise ValueError("X and y have inconsistent dimensions (%d != %d)" % (X.shape[0], y.shape[0])) # All LinearModelCV parameters except 'cv' are acceptable path_params = self.get_params() if 'l1_ratio' in path_params: l1_ratios = np.atleast_1d(path_params['l1_ratio']) # For the first path, we need to set l1_ratio path_params['l1_ratio'] = l1_ratios[0] else: l1_ratios = [1, ] path_params.pop('cv', None) path_params.pop('n_jobs', None) alphas = self.alphas n_l1_ratio = len(l1_ratios) if alphas is None: alphas = [] for l1_ratio in l1_ratios: alphas.append(_alpha_grid( X, y, l1_ratio=l1_ratio, fit_intercept=self.fit_intercept, eps=self.eps, n_alphas=self.n_alphas, normalize=self.normalize, copy_X=self.copy_X)) else: # Making sure alphas is properly ordered. alphas = np.tile(np.sort(alphas)[::-1], (n_l1_ratio, 1)) # We want n_alphas to be the number of alphas used for each l1_ratio. n_alphas = len(alphas[0]) path_params.update({'n_alphas': n_alphas}) path_params['copy_X'] = copy_X # We are not computing in parallel, we can modify X # inplace in the folds if not (self.n_jobs == 1 or self.n_jobs is None): path_params['copy_X'] = False # init cross-validation generator cv = check_cv(self.cv, X) # Compute path for all folds and compute MSE to get the best alpha folds = list(cv) best_mse = np.inf # We do a double for loop folded in one, in order to be able to # iterate in parallel on l1_ratio and folds jobs = (delayed(_path_residuals)(X, y, train, test, self.path, path_params, alphas=this_alphas, l1_ratio=this_l1_ratio, X_order='F', dtype=np.float64) for this_l1_ratio, this_alphas in zip(l1_ratios, alphas) for train, test in folds) mse_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose, backend="threading")(jobs) mse_paths = np.reshape(mse_paths, (n_l1_ratio, len(folds), -1)) mean_mse = np.mean(mse_paths, axis=1) self.mse_path_ = np.squeeze(np.rollaxis(mse_paths, 2, 1)) for l1_ratio, l1_alphas, mse_alphas in zip(l1_ratios, alphas, mean_mse): i_best_alpha = np.argmin(mse_alphas) this_best_mse = mse_alphas[i_best_alpha] if this_best_mse < best_mse: best_alpha = l1_alphas[i_best_alpha] best_l1_ratio = l1_ratio best_mse = this_best_mse self.l1_ratio_ = best_l1_ratio self.alpha_ = best_alpha if self.alphas is None: self.alphas_ = np.asarray(alphas) if n_l1_ratio == 1: self.alphas_ = self.alphas_[0] # Remove duplicate alphas in case alphas is provided. else: self.alphas_ = np.asarray(alphas[0]) # Refit the model with the parameters selected common_params = dict((name, value) for name, value in self.get_params().items() if name in model.get_params()) model.set_params(**common_params) model.alpha = best_alpha model.l1_ratio = best_l1_ratio model.copy_X = copy_X model.precompute = False model.fit(X, y) if not hasattr(self, 'l1_ratio'): del self.l1_ratio_ self.coef_ = model.coef_ self.intercept_ = model.intercept_ self.dual_gap_ = model.dual_gap_ self.n_iter_ = model.n_iter_ return self class LassoCV(LinearModelCV, RegressorMixin): """Lasso linear model with iterative fitting along a regularization path The best model is selected by cross-validation. The optimization objective for Lasso is:: (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1 Read more in the :ref:`User Guide <lasso>`. Parameters ---------- eps : float, optional Length of the path. ``eps=1e-3`` means that ``alpha_min / alpha_max = 1e-3``. n_alphas : int, optional Number of alphas along the regularization path alphas : numpy array, optional List of alphas where to compute the models. If ``None`` alphas are set automatically precompute : True | False | 'auto' | array-like Whether to use a precomputed Gram matrix to speed up calculations. If set to ``'auto'`` let us decide. The Gram matrix can also be passed as argument. max_iter : int, optional The maximum number of iterations tol : float, optional The tolerance for the optimization: if the updates are smaller than ``tol``, the optimization code checks the dual gap for optimality and continues until it is smaller than ``tol``. cv : integer or cross-validation generator, optional If an integer is passed, it is the number of fold (default 3). Specific cross-validation objects can be passed, see the :mod:`sklearn.cross_validation` module for the list of possible objects. verbose : bool or integer Amount of verbosity. n_jobs : integer, optional Number of CPUs to use during the cross validation. If ``-1``, use all the CPUs. positive : bool, optional If positive, restrict regression coefficients to be positive selection : str, default 'cyclic' If set to 'random', a random coefficient is updated every iteration rather than looping over features sequentially by default. This (setting to 'random') often leads to significantly faster convergence especially when tol is higher than 1e-4. random_state : int, RandomState instance, or None (default) The seed of the pseudo random number generator that selects a random feature to update. Useful only when selection is set to 'random'. fit_intercept : boolean, default True whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). normalize : boolean, optional, default False If ``True``, the regressors X will be normalized before regression. copy_X : boolean, optional, default True If ``True``, X will be copied; else, it may be overwritten. Attributes ---------- alpha_ : float The amount of penalization chosen by cross validation coef_ : array, shape (n_features,) | (n_targets, n_features) parameter vector (w in the cost function formula) intercept_ : float | array, shape (n_targets,) independent term in decision function. mse_path_ : array, shape (n_alphas, n_folds) mean square error for the test set on each fold, varying alpha alphas_ : numpy array, shape (n_alphas,) The grid of alphas used for fitting dual_gap_ : ndarray, shape () The dual gap at the end of the optimization for the optimal alpha (``alpha_``). n_iter_ : int number of iterations run by the coordinate descent solver to reach the specified tolerance for the optimal alpha. Notes ----- See examples/linear_model/lasso_path_with_crossvalidation.py for an example. To avoid unnecessary memory duplication the X argument of the fit method should be directly passed as a Fortran-contiguous numpy array. See also -------- lars_path lasso_path LassoLars Lasso LassoLarsCV """ path = staticmethod(lasso_path) def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True, normalize=False, precompute='auto', max_iter=1000, tol=1e-4, copy_X=True, cv=None, verbose=False, n_jobs=1, positive=False, random_state=None, selection='cyclic'): super(LassoCV, self).__init__( eps=eps, n_alphas=n_alphas, alphas=alphas, fit_intercept=fit_intercept, normalize=normalize, precompute=precompute, max_iter=max_iter, tol=tol, copy_X=copy_X, cv=cv, verbose=verbose, n_jobs=n_jobs, positive=positive, random_state=random_state, selection=selection) class ElasticNetCV(LinearModelCV, RegressorMixin): """Elastic Net model with iterative fitting along a regularization path The best model is selected by cross-validation. Read more in the :ref:`User Guide <elastic_net>`. Parameters ---------- l1_ratio : float, optional float between 0 and 1 passed to ElasticNet (scaling between l1 and l2 penalties). For ``l1_ratio = 0`` the penalty is an L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a combination of L1 and L2 This parameter can be a list, in which case the different values are tested by cross-validation and the one giving the best prediction score is used. Note that a good choice of list of values for l1_ratio is often to put more values close to 1 (i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7, .9, .95, .99, 1]`` eps : float, optional Length of the path. ``eps=1e-3`` means that ``alpha_min / alpha_max = 1e-3``. n_alphas : int, optional Number of alphas along the regularization path, used for each l1_ratio. alphas : numpy array, optional List of alphas where to compute the models. If None alphas are set automatically precompute : True | False | 'auto' | array-like Whether to use a precomputed Gram matrix to speed up calculations. If set to ``'auto'`` let us decide. The Gram matrix can also be passed as argument. max_iter : int, optional The maximum number of iterations tol : float, optional The tolerance for the optimization: if the updates are smaller than ``tol``, the optimization code checks the dual gap for optimality and continues until it is smaller than ``tol``. cv : integer or cross-validation generator, optional If an integer is passed, it is the number of fold (default 3). Specific cross-validation objects can be passed, see the :mod:`sklearn.cross_validation` module for the list of possible objects. verbose : bool or integer Amount of verbosity. n_jobs : integer, optional Number of CPUs to use during the cross validation. If ``-1``, use all the CPUs. positive : bool, optional When set to ``True``, forces the coefficients to be positive. selection : str, default 'cyclic' If set to 'random', a random coefficient is updated every iteration rather than looping over features sequentially by default. This (setting to 'random') often leads to significantly faster convergence especially when tol is higher than 1e-4. random_state : int, RandomState instance, or None (default) The seed of the pseudo random number generator that selects a random feature to update. Useful only when selection is set to 'random'. fit_intercept : boolean whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). normalize : boolean, optional, default False If ``True``, the regressors X will be normalized before regression. copy_X : boolean, optional, default True If ``True``, X will be copied; else, it may be overwritten. Attributes ---------- alpha_ : float The amount of penalization chosen by cross validation l1_ratio_ : float The compromise between l1 and l2 penalization chosen by cross validation coef_ : array, shape (n_features,) | (n_targets, n_features) Parameter vector (w in the cost function formula), intercept_ : float | array, shape (n_targets, n_features) Independent term in the decision function. mse_path_ : array, shape (n_l1_ratio, n_alpha, n_folds) Mean square error for the test set on each fold, varying l1_ratio and alpha. alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas) The grid of alphas used for fitting, for each l1_ratio. n_iter_ : int number of iterations run by the coordinate descent solver to reach the specified tolerance for the optimal alpha. Notes ----- See examples/linear_model/lasso_path_with_crossvalidation.py for an example. To avoid unnecessary memory duplication the X argument of the fit method should be directly passed as a Fortran-contiguous numpy array. The parameter l1_ratio corresponds to alpha in the glmnet R package while alpha corresponds to the lambda parameter in glmnet. More specifically, the optimization objective is:: 1 / (2 * n_samples) * ||y - Xw||^2_2 + + alpha * l1_ratio * ||w||_1 + 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2 If you are interested in controlling the L1 and L2 penalty separately, keep in mind that this is equivalent to:: a * L1 + b * L2 for:: alpha = a + b and l1_ratio = a / (a + b). See also -------- enet_path ElasticNet """ path = staticmethod(enet_path) def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True, normalize=False, precompute='auto', max_iter=1000, tol=1e-4, cv=None, copy_X=True, verbose=0, n_jobs=1, positive=False, random_state=None, selection='cyclic'): self.l1_ratio = l1_ratio self.eps = eps self.n_alphas = n_alphas self.alphas = alphas self.fit_intercept = fit_intercept self.normalize = normalize self.precompute = precompute self.max_iter = max_iter self.tol = tol self.cv = cv self.copy_X = copy_X self.verbose = verbose self.n_jobs = n_jobs self.positive = positive self.random_state = random_state self.selection = selection ############################################################################### # Multi Task ElasticNet and Lasso models (with joint feature selection) class MultiTaskElasticNet(Lasso): """Multi-task ElasticNet model trained with L1/L2 mixed-norm as regularizer The optimization objective for MultiTaskElasticNet is:: (1 / (2 * n_samples)) * ||Y - XW||^Fro_2 + alpha * l1_ratio * ||W||_21 + 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2 Where:: ||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2} i.e. the sum of norm of each row. Read more in the :ref:`User Guide <multi_task_lasso>`. Parameters ---------- alpha : float, optional Constant that multiplies the L1/L2 term. Defaults to 1.0 l1_ratio : float The ElasticNet mixing parameter, with 0 < l1_ratio <= 1. For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2. fit_intercept : boolean whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). normalize : boolean, optional, default False If ``True``, the regressors X will be normalized before regression. copy_X : boolean, optional, default True If ``True``, X will be copied; else, it may be overwritten. max_iter : int, optional The maximum number of iterations tol : float, optional The tolerance for the optimization: if the updates are smaller than ``tol``, the optimization code checks the dual gap for optimality and continues until it is smaller than ``tol``. warm_start : bool, optional When set to ``True``, reuse the solution of the previous call to fit as initialization, otherwise, just erase the previous solution. selection : str, default 'cyclic' If set to 'random', a random coefficient is updated every iteration rather than looping over features sequentially by default. This (setting to 'random') often leads to significantly faster convergence especially when tol is higher than 1e-4. random_state : int, RandomState instance, or None (default) The seed of the pseudo random number generator that selects a random feature to update. Useful only when selection is set to 'random'. Attributes ---------- intercept_ : array, shape (n_tasks,) Independent term in decision function. coef_ : array, shape (n_tasks, n_features) Parameter vector (W in the cost function formula). If a 1D y is \ passed in at fit (non multi-task usage), ``coef_`` is then a 1D array n_iter_ : int number of iterations run by the coordinate descent solver to reach the specified tolerance. Examples -------- >>> from sklearn import linear_model >>> clf = linear_model.MultiTaskElasticNet(alpha=0.1) >>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]]) ... #doctest: +NORMALIZE_WHITESPACE MultiTaskElasticNet(alpha=0.1, copy_X=True, fit_intercept=True, l1_ratio=0.5, max_iter=1000, normalize=False, random_state=None, selection='cyclic', tol=0.0001, warm_start=False) >>> print(clf.coef_) [[ 0.45663524 0.45612256] [ 0.45663524 0.45612256]] >>> print(clf.intercept_) [ 0.0872422 0.0872422] See also -------- ElasticNet, MultiTaskLasso Notes ----- The algorithm used to fit the model is coordinate descent. To avoid unnecessary memory duplication the X argument of the fit method should be directly passed as a Fortran-contiguous numpy array. """ def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True, normalize=False, copy_X=True, max_iter=1000, tol=1e-4, warm_start=False, random_state=None, selection='cyclic'): self.l1_ratio = l1_ratio self.alpha = alpha self.coef_ = None self.fit_intercept = fit_intercept self.normalize = normalize self.max_iter = max_iter self.copy_X = copy_X self.tol = tol self.warm_start = warm_start self.random_state = random_state self.selection = selection def fit(self, X, y): """Fit MultiTaskLasso model with coordinate descent Parameters ----------- X : ndarray, shape (n_samples, n_features) Data y : ndarray, shape (n_samples, n_tasks) Target Notes ----- Coordinate descent is an algorithm that considers each column of data at a time hence it will automatically convert the X input as a Fortran-contiguous numpy array if necessary. To avoid memory re-allocation it is advised to allocate the initial data in memory directly using that format. """ # X and y must be of type float64 X = check_array(X, dtype=np.float64, order='F', copy=self.copy_X and self.fit_intercept) y = np.asarray(y, dtype=np.float64) if hasattr(self, 'l1_ratio'): model_str = 'ElasticNet' else: model_str = 'Lasso' if y.ndim == 1: raise ValueError("For mono-task outputs, use %s" % model_str) n_samples, n_features = X.shape _, n_tasks = y.shape if n_samples != y.shape[0]: raise ValueError("X and y have inconsistent dimensions (%d != %d)" % (n_samples, y.shape[0])) X, y, X_mean, y_mean, X_std = center_data( X, y, self.fit_intercept, self.normalize, copy=False) if not self.warm_start or self.coef_ is None: self.coef_ = np.zeros((n_tasks, n_features), dtype=np.float64, order='F') l1_reg = self.alpha * self.l1_ratio * n_samples l2_reg = self.alpha * (1.0 - self.l1_ratio) * n_samples self.coef_ = np.asfortranarray(self.coef_) # coef contiguous in memory if self.selection not in ['random', 'cyclic']: raise ValueError("selection should be either random or cyclic.") random = (self.selection == 'random') self.coef_, self.dual_gap_, self.eps_, self.n_iter_ = \ cd_fast.enet_coordinate_descent_multi_task( self.coef_, l1_reg, l2_reg, X, y, self.max_iter, self.tol, check_random_state(self.random_state), random) self._set_intercept(X_mean, y_mean, X_std) if self.dual_gap_ > self.eps_: warnings.warn('Objective did not converge, you might want' ' to increase the number of iterations') # return self for chaining fit and predict calls return self class MultiTaskLasso(MultiTaskElasticNet): """Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer The optimization objective for Lasso is:: (1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21 Where:: ||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2} i.e. the sum of norm of earch row. Read more in the :ref:`User Guide <multi_task_lasso>`. Parameters ---------- alpha : float, optional Constant that multiplies the L1/L2 term. Defaults to 1.0 fit_intercept : boolean whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). normalize : boolean, optional, default False If ``True``, the regressors X will be normalized before regression. copy_X : boolean, optional, default True If ``True``, X will be copied; else, it may be overwritten. max_iter : int, optional The maximum number of iterations tol : float, optional The tolerance for the optimization: if the updates are smaller than ``tol``, the optimization code checks the dual gap for optimality and continues until it is smaller than ``tol``. warm_start : bool, optional When set to ``True``, reuse the solution of the previous call to fit as initialization, otherwise, just erase the previous solution. selection : str, default 'cyclic' If set to 'random', a random coefficient is updated every iteration rather than looping over features sequentially by default. This (setting to 'random') often leads to significantly faster convergence especially when tol is higher than 1e-4 random_state : int, RandomState instance, or None (default) The seed of the pseudo random number generator that selects a random feature to update. Useful only when selection is set to 'random'. Attributes ---------- coef_ : array, shape (n_tasks, n_features) parameter vector (W in the cost function formula) intercept_ : array, shape (n_tasks,) independent term in decision function. n_iter_ : int number of iterations run by the coordinate descent solver to reach the specified tolerance. Examples -------- >>> from sklearn import linear_model >>> clf = linear_model.MultiTaskLasso(alpha=0.1) >>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]]) MultiTaskLasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000, normalize=False, random_state=None, selection='cyclic', tol=0.0001, warm_start=False) >>> print(clf.coef_) [[ 0.89393398 0. ] [ 0.89393398 0. ]] >>> print(clf.intercept_) [ 0.10606602 0.10606602] See also -------- Lasso, MultiTaskElasticNet Notes ----- The algorithm used to fit the model is coordinate descent. To avoid unnecessary memory duplication the X argument of the fit method should be directly passed as a Fortran-contiguous numpy array. """ def __init__(self, alpha=1.0, fit_intercept=True, normalize=False, copy_X=True, max_iter=1000, tol=1e-4, warm_start=False, random_state=None, selection='cyclic'): self.alpha = alpha self.coef_ = None self.fit_intercept = fit_intercept self.normalize = normalize self.max_iter = max_iter self.copy_X = copy_X self.tol = tol self.warm_start = warm_start self.l1_ratio = 1.0 self.random_state = random_state self.selection = selection class MultiTaskElasticNetCV(LinearModelCV, RegressorMixin): """Multi-task L1/L2 ElasticNet with built-in cross-validation. The optimization objective for MultiTaskElasticNet is:: (1 / (2 * n_samples)) * ||Y - XW||^Fro_2 + alpha * l1_ratio * ||W||_21 + 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2 Where:: ||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2} i.e. the sum of norm of each row. Read more in the :ref:`User Guide <multi_task_lasso>`. Parameters ---------- eps : float, optional Length of the path. ``eps=1e-3`` means that ``alpha_min / alpha_max = 1e-3``. alphas : array-like, optional List of alphas where to compute the models. If not provided, set automatically. n_alphas : int, optional Number of alphas along the regularization path l1_ratio : float or array of floats The ElasticNet mixing parameter, with 0 < l1_ratio <= 1. For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2. fit_intercept : boolean whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). normalize : boolean, optional, default False If ``True``, the regressors X will be normalized before regression. copy_X : boolean, optional, default True If ``True``, X will be copied; else, it may be overwritten. max_iter : int, optional The maximum number of iterations tol : float, optional The tolerance for the optimization: if the updates are smaller than ``tol``, the optimization code checks the dual gap for optimality and continues until it is smaller than ``tol``. cv : integer or cross-validation generator, optional If an integer is passed, it is the number of fold (default 3). Specific cross-validation objects can be passed, see the :mod:`sklearn.cross_validation` module for the list of possible objects. verbose : bool or integer Amount of verbosity. n_jobs : integer, optional Number of CPUs to use during the cross validation. If ``-1``, use all the CPUs. Note that this is used only if multiple values for l1_ratio are given. selection : str, default 'cyclic' If set to 'random', a random coefficient is updated every iteration rather than looping over features sequentially by default. This (setting to 'random') often leads to significantly faster convergence especially when tol is higher than 1e-4. random_state : int, RandomState instance, or None (default) The seed of the pseudo random number generator that selects a random feature to update. Useful only when selection is set to 'random'. Attributes ---------- intercept_ : array, shape (n_tasks,) Independent term in decision function. coef_ : array, shape (n_tasks, n_features) Parameter vector (W in the cost function formula). alpha_ : float The amount of penalization chosen by cross validation mse_path_ : array, shape (n_alphas, n_folds) or \ (n_l1_ratio, n_alphas, n_folds) mean square error for the test set on each fold, varying alpha alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas) The grid of alphas used for fitting, for each l1_ratio l1_ratio_ : float best l1_ratio obtained by cross-validation. n_iter_ : int number of iterations run by the coordinate descent solver to reach the specified tolerance for the optimal alpha. Examples -------- >>> from sklearn import linear_model >>> clf = linear_model.MultiTaskElasticNetCV() >>> clf.fit([[0,0], [1, 1], [2, 2]], ... [[0, 0], [1, 1], [2, 2]]) ... #doctest: +NORMALIZE_WHITESPACE MultiTaskElasticNetCV(alphas=None, copy_X=True, cv=None, eps=0.001, fit_intercept=True, l1_ratio=0.5, max_iter=1000, n_alphas=100, n_jobs=1, normalize=False, random_state=None, selection='cyclic', tol=0.0001, verbose=0) >>> print(clf.coef_) [[ 0.52875032 0.46958558] [ 0.52875032 0.46958558]] >>> print(clf.intercept_) [ 0.00166409 0.00166409] See also -------- MultiTaskElasticNet ElasticNetCV MultiTaskLassoCV Notes ----- The algorithm used to fit the model is coordinate descent. To avoid unnecessary memory duplication the X argument of the fit method should be directly passed as a Fortran-contiguous numpy array. """ path = staticmethod(enet_path) def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True, normalize=False, max_iter=1000, tol=1e-4, cv=None, copy_X=True, verbose=0, n_jobs=1, random_state=None, selection='cyclic'): self.l1_ratio = l1_ratio self.eps = eps self.n_alphas = n_alphas self.alphas = alphas self.fit_intercept = fit_intercept self.normalize = normalize self.max_iter = max_iter self.tol = tol self.cv = cv self.copy_X = copy_X self.verbose = verbose self.n_jobs = n_jobs self.random_state = random_state self.selection = selection class MultiTaskLassoCV(LinearModelCV, RegressorMixin): """Multi-task L1/L2 Lasso with built-in cross-validation. The optimization objective for MultiTaskLasso is:: (1 / (2 * n_samples)) * ||Y - XW||^Fro_2 + alpha * ||W||_21 Where:: ||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2} i.e. the sum of norm of each row. Read more in the :ref:`User Guide <multi_task_lasso>`. Parameters ---------- eps : float, optional Length of the path. ``eps=1e-3`` means that ``alpha_min / alpha_max = 1e-3``. alphas : array-like, optional List of alphas where to compute the models. If not provided, set automaticlly. n_alphas : int, optional Number of alphas along the regularization path fit_intercept : boolean whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). normalize : boolean, optional, default False If ``True``, the regressors X will be normalized before regression. copy_X : boolean, optional, default True If ``True``, X will be copied; else, it may be overwritten. max_iter : int, optional The maximum number of iterations. tol : float, optional The tolerance for the optimization: if the updates are smaller than ``tol``, the optimization code checks the dual gap for optimality and continues until it is smaller than ``tol``. cv : integer or cross-validation generator, optional If an integer is passed, it is the number of fold (default 3). Specific cross-validation objects can be passed, see the :mod:`sklearn.cross_validation` module for the list of possible objects. verbose : bool or integer Amount of verbosity. n_jobs : integer, optional Number of CPUs to use during the cross validation. If ``-1``, use all the CPUs. Note that this is used only if multiple values for l1_ratio are given. selection : str, default 'cyclic' If set to 'random', a random coefficient is updated every iteration rather than looping over features sequentially by default. This (setting to 'random') often leads to significantly faster convergence especially when tol is higher than 1e-4. random_state : int, RandomState instance, or None (default) The seed of the pseudo random number generator that selects a random feature to update. Useful only when selection is set to 'random'. Attributes ---------- intercept_ : array, shape (n_tasks,) Independent term in decision function. coef_ : array, shape (n_tasks, n_features) Parameter vector (W in the cost function formula). alpha_ : float The amount of penalization chosen by cross validation mse_path_ : array, shape (n_alphas, n_folds) mean square error for the test set on each fold, varying alpha alphas_ : numpy array, shape (n_alphas,) The grid of alphas used for fitting. n_iter_ : int number of iterations run by the coordinate descent solver to reach the specified tolerance for the optimal alpha. See also -------- MultiTaskElasticNet ElasticNetCV MultiTaskElasticNetCV Notes ----- The algorithm used to fit the model is coordinate descent. To avoid unnecessary memory duplication the X argument of the fit method should be directly passed as a Fortran-contiguous numpy array. """ path = staticmethod(lasso_path) def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True, normalize=False, max_iter=1000, tol=1e-4, copy_X=True, cv=None, verbose=False, n_jobs=1, random_state=None, selection='cyclic'): super(MultiTaskLassoCV, self).__init__( eps=eps, n_alphas=n_alphas, alphas=alphas, fit_intercept=fit_intercept, normalize=normalize, max_iter=max_iter, tol=tol, copy_X=copy_X, cv=cv, verbose=verbose, n_jobs=n_jobs, random_state=random_state, selection=selection)
bsd-3-clause
JasonKessler/agefromname
agefromname/regenerate_data.py
1
3466
import datetime import io import math from urllib.request import urlopen from zipfile import ZipFile import numpy as np import pandas as pd from bs4 import BeautifulSoup def regenerate_birth_counts( census_zip_file=None, output_path='data/year_of_birth_counts.csv.gz'): '''Regenerate table containing counts of first names by sex and year of birth. :param census_zip_file: str, file-like object similar to http://www.ssa.gov/oact/babynames/names.zip, defaults to SSA.gov url :param output_path: str, path of .gz file to write dataframe csv :return: pd.DataFrame, pandas data frame with the columns first_name,sex,count,year_of_birth ''' if census_zip_file is None: census_zip_file = io.BytesIO(urlopen('http://www.ssa.gov/oact/babynames/names.zip').read()) year_of_birth_dfs = [] with ZipFile(census_zip_file) as names_zip: for filename in names_zip.namelist(): if filename.startswith('yob') and filename.endswith('txt'): cur_year_of_birth_df = pd.read_csv(names_zip.open(filename), index_col=None, names=['first_name', 'sex', 'count']) cur_year_of_birth_df['year_of_birth'] = filename[3:7] year_of_birth_dfs.append(cur_year_of_birth_df) year_of_birth_df = pd.concat(year_of_birth_dfs, ignore_index=True) year_of_birth_df['first_name'] = year_of_birth_df['first_name'].apply(str.lower) year_of_birth_df['sex'] = year_of_birth_df['sex'].apply(str.lower) year_of_birth_df.to_csv(output_path, index=False, compression='gzip') return year_of_birth_df def _decade_mortality_table(year, url_template='https://www.ssa.gov/oact/NOTES/as120/LifeTables_Tbl_7_{}.html'): assert int(year) % 10 == 0 url = url_template.format(year) soup = BeautifulSoup(urlopen(url).read(), 'lxml') table = soup.find('table', border=1) rows = [] for row in table.find_all('tr'): row_datum = [cell.text.strip() for cell in row.find_all('td')] if len(row_datum) == 15 and row_datum[0] != '': rows.append({ 'year_of_birth': int(year), 'age': int(row_datum[0]), 'm_prob_survive_that_year': 1 - float(row_datum[1]), 'f_prob_survive_that_year': 1 - float(row_datum[9]), }) df = pd.DataFrame(rows).sort_values(by='age') for sex in 'mf': df[sex + '_prob_alive'] = np.cumprod(df[sex + '_prob_survive_that_year']).astype(np.float64) df['as_of_year'] = df['year_of_birth'] + df['age'] return df[['year_of_birth', 'as_of_year', 'm_prob_alive', 'f_prob_alive']] def regenerate_decade_mortality_table( url_template='https://www.ssa.gov/oact/NOTES/as120/LifeTables_Tbl_7_{}.html', output_path='data/mortality_table.csv.gz', min_decade=1900, max_decade=math.ceil(datetime.datetime.now().year * 0.1) * 10): ''' :param url_template: str, url tempate (with year as {}) to scrape :param output_path: str, path of .gz file to write dataframe csv :param min_decade: int, minimum decade to search :param max_decade: int, maximum decade to search :return: pd.DataFrame, pandas data frame with the columns year_of_birth,as_of_year,m_death_prob,f_death_prob ''' mortality_df = pd.concat([_decade_mortality_table(year, url_template) for year in range(min_decade, max_decade, 10)], axis=0) mortality_df.to_csv(output_path, index=False, compression='gzip') return mortality_df def regenerate_all(): regenerate_birth_counts() regenerate_decade_mortality_table()
apache-2.0
vermouthmjl/scikit-learn
sklearn/metrics/tests/test_pairwise.py
22
25505
import numpy as np from numpy import linalg from scipy.sparse import dok_matrix, csr_matrix, issparse from scipy.spatial.distance import cosine, cityblock, minkowski, wminkowski from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_raises_regexp from sklearn.utils.testing import assert_true from sklearn.externals.six import iteritems from sklearn.metrics.pairwise import euclidean_distances from sklearn.metrics.pairwise import manhattan_distances from sklearn.metrics.pairwise import linear_kernel from sklearn.metrics.pairwise import chi2_kernel, additive_chi2_kernel from sklearn.metrics.pairwise import polynomial_kernel from sklearn.metrics.pairwise import rbf_kernel from sklearn.metrics.pairwise import laplacian_kernel from sklearn.metrics.pairwise import sigmoid_kernel from sklearn.metrics.pairwise import cosine_similarity from sklearn.metrics.pairwise import cosine_distances from sklearn.metrics.pairwise import pairwise_distances from sklearn.metrics.pairwise import pairwise_distances_argmin_min from sklearn.metrics.pairwise import pairwise_distances_argmin from sklearn.metrics.pairwise import pairwise_kernels from sklearn.metrics.pairwise import PAIRWISE_KERNEL_FUNCTIONS from sklearn.metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS from sklearn.metrics.pairwise import PAIRED_DISTANCES from sklearn.metrics.pairwise import check_pairwise_arrays from sklearn.metrics.pairwise import check_paired_arrays from sklearn.metrics.pairwise import paired_distances from sklearn.metrics.pairwise import paired_euclidean_distances from sklearn.metrics.pairwise import paired_manhattan_distances from sklearn.preprocessing import normalize def test_pairwise_distances(): # Test the pairwise_distance helper function. rng = np.random.RandomState(0) # Euclidean distance should be equivalent to calling the function. X = rng.random_sample((5, 4)) S = pairwise_distances(X, metric="euclidean") S2 = euclidean_distances(X) assert_array_almost_equal(S, S2) # Euclidean distance, with Y != X. Y = rng.random_sample((2, 4)) S = pairwise_distances(X, Y, metric="euclidean") S2 = euclidean_distances(X, Y) assert_array_almost_equal(S, S2) # Test with tuples as X and Y X_tuples = tuple([tuple([v for v in row]) for row in X]) Y_tuples = tuple([tuple([v for v in row]) for row in Y]) S2 = pairwise_distances(X_tuples, Y_tuples, metric="euclidean") assert_array_almost_equal(S, S2) # "cityblock" uses sklearn metric, cityblock (function) is scipy.spatial. S = pairwise_distances(X, metric="cityblock") S2 = pairwise_distances(X, metric=cityblock) assert_equal(S.shape[0], S.shape[1]) assert_equal(S.shape[0], X.shape[0]) assert_array_almost_equal(S, S2) # The manhattan metric should be equivalent to cityblock. S = pairwise_distances(X, Y, metric="manhattan") S2 = pairwise_distances(X, Y, metric=cityblock) assert_equal(S.shape[0], X.shape[0]) assert_equal(S.shape[1], Y.shape[0]) assert_array_almost_equal(S, S2) # Low-level function for manhattan can divide in blocks to avoid # using too much memory during the broadcasting S3 = manhattan_distances(X, Y, size_threshold=10) assert_array_almost_equal(S, S3) # Test cosine as a string metric versus cosine callable # "cosine" uses sklearn metric, cosine (function) is scipy.spatial S = pairwise_distances(X, Y, metric="cosine") S2 = pairwise_distances(X, Y, metric=cosine) assert_equal(S.shape[0], X.shape[0]) assert_equal(S.shape[1], Y.shape[0]) assert_array_almost_equal(S, S2) # Test with sparse X and Y, # currently only supported for Euclidean, L1 and cosine. X_sparse = csr_matrix(X) Y_sparse = csr_matrix(Y) S = pairwise_distances(X_sparse, Y_sparse, metric="euclidean") S2 = euclidean_distances(X_sparse, Y_sparse) assert_array_almost_equal(S, S2) S = pairwise_distances(X_sparse, Y_sparse, metric="cosine") S2 = cosine_distances(X_sparse, Y_sparse) assert_array_almost_equal(S, S2) S = pairwise_distances(X_sparse, Y_sparse.tocsc(), metric="manhattan") S2 = manhattan_distances(X_sparse.tobsr(), Y_sparse.tocoo()) assert_array_almost_equal(S, S2) S2 = manhattan_distances(X, Y) assert_array_almost_equal(S, S2) # Test with scipy.spatial.distance metric, with a kwd kwds = {"p": 2.0} S = pairwise_distances(X, Y, metric="minkowski", **kwds) S2 = pairwise_distances(X, Y, metric=minkowski, **kwds) assert_array_almost_equal(S, S2) # same with Y = None kwds = {"p": 2.0} S = pairwise_distances(X, metric="minkowski", **kwds) S2 = pairwise_distances(X, metric=minkowski, **kwds) assert_array_almost_equal(S, S2) # Test that scipy distance metrics throw an error if sparse matrix given assert_raises(TypeError, pairwise_distances, X_sparse, metric="minkowski") assert_raises(TypeError, pairwise_distances, X, Y_sparse, metric="minkowski") # Test that a value error is raised if the metric is unknown assert_raises(ValueError, pairwise_distances, X, Y, metric="blah") def test_pairwise_precomputed(): for func in [pairwise_distances, pairwise_kernels]: # Test correct shape assert_raises_regexp(ValueError, '.* shape .*', func, np.zeros((5, 3)), metric='precomputed') # with two args assert_raises_regexp(ValueError, '.* shape .*', func, np.zeros((5, 3)), np.zeros((4, 4)), metric='precomputed') # even if shape[1] agrees (although thus second arg is spurious) assert_raises_regexp(ValueError, '.* shape .*', func, np.zeros((5, 3)), np.zeros((4, 3)), metric='precomputed') # Test not copied (if appropriate dtype) S = np.zeros((5, 5)) S2 = func(S, metric="precomputed") assert_true(S is S2) # with two args S = np.zeros((5, 3)) S2 = func(S, np.zeros((3, 3)), metric="precomputed") assert_true(S is S2) # Test always returns float dtype S = func(np.array([[1]], dtype='int'), metric='precomputed') assert_equal('f', S.dtype.kind) # Test converts list to array-like S = func([[1]], metric='precomputed') assert_true(isinstance(S, np.ndarray)) def check_pairwise_parallel(func, metric, kwds): rng = np.random.RandomState(0) for make_data in (np.array, csr_matrix): X = make_data(rng.random_sample((5, 4))) Y = make_data(rng.random_sample((3, 4))) try: S = func(X, metric=metric, n_jobs=1, **kwds) except (TypeError, ValueError) as exc: # Not all metrics support sparse input # ValueError may be triggered by bad callable if make_data is csr_matrix: assert_raises(type(exc), func, X, metric=metric, n_jobs=2, **kwds) continue else: raise S2 = func(X, metric=metric, n_jobs=2, **kwds) assert_array_almost_equal(S, S2) S = func(X, Y, metric=metric, n_jobs=1, **kwds) S2 = func(X, Y, metric=metric, n_jobs=2, **kwds) assert_array_almost_equal(S, S2) def test_pairwise_parallel(): wminkowski_kwds = {'w': np.arange(1, 5).astype('double'), 'p': 1} metrics = [(pairwise_distances, 'euclidean', {}), (pairwise_distances, wminkowski, wminkowski_kwds), (pairwise_distances, 'wminkowski', wminkowski_kwds), (pairwise_kernels, 'polynomial', {'degree': 1}), (pairwise_kernels, callable_rbf_kernel, {'gamma': .1}), ] for func, metric, kwds in metrics: yield check_pairwise_parallel, func, metric, kwds def test_pairwise_callable_nonstrict_metric(): # paired_distances should allow callable metric where metric(x, x) != 0 # Knowing that the callable is a strict metric would allow the diagonal to # be left uncalculated and set to 0. assert_equal(pairwise_distances([[1]], metric=lambda x, y: 5)[0, 0], 5) def callable_rbf_kernel(x, y, **kwds): # Callable version of pairwise.rbf_kernel. K = rbf_kernel(np.atleast_2d(x), np.atleast_2d(y), **kwds) return K def test_pairwise_kernels(): # Test the pairwise_kernels helper function. rng = np.random.RandomState(0) X = rng.random_sample((5, 4)) Y = rng.random_sample((2, 4)) # Test with all metrics that should be in PAIRWISE_KERNEL_FUNCTIONS. test_metrics = ["rbf", "laplacian", "sigmoid", "polynomial", "linear", "chi2", "additive_chi2"] for metric in test_metrics: function = PAIRWISE_KERNEL_FUNCTIONS[metric] # Test with Y=None K1 = pairwise_kernels(X, metric=metric) K2 = function(X) assert_array_almost_equal(K1, K2) # Test with Y=Y K1 = pairwise_kernels(X, Y=Y, metric=metric) K2 = function(X, Y=Y) assert_array_almost_equal(K1, K2) # Test with tuples as X and Y X_tuples = tuple([tuple([v for v in row]) for row in X]) Y_tuples = tuple([tuple([v for v in row]) for row in Y]) K2 = pairwise_kernels(X_tuples, Y_tuples, metric=metric) assert_array_almost_equal(K1, K2) # Test with sparse X and Y X_sparse = csr_matrix(X) Y_sparse = csr_matrix(Y) if metric in ["chi2", "additive_chi2"]: # these don't support sparse matrices yet assert_raises(ValueError, pairwise_kernels, X_sparse, Y=Y_sparse, metric=metric) continue K1 = pairwise_kernels(X_sparse, Y=Y_sparse, metric=metric) assert_array_almost_equal(K1, K2) # Test with a callable function, with given keywords. metric = callable_rbf_kernel kwds = {'gamma': 0.1} K1 = pairwise_kernels(X, Y=Y, metric=metric, **kwds) K2 = rbf_kernel(X, Y=Y, **kwds) assert_array_almost_equal(K1, K2) # callable function, X=Y K1 = pairwise_kernels(X, Y=X, metric=metric, **kwds) K2 = rbf_kernel(X, Y=X, **kwds) assert_array_almost_equal(K1, K2) def test_pairwise_kernels_filter_param(): rng = np.random.RandomState(0) X = rng.random_sample((5, 4)) Y = rng.random_sample((2, 4)) K = rbf_kernel(X, Y, gamma=0.1) params = {"gamma": 0.1, "blabla": ":)"} K2 = pairwise_kernels(X, Y, metric="rbf", filter_params=True, **params) assert_array_almost_equal(K, K2) assert_raises(TypeError, pairwise_kernels, X, Y, "rbf", **params) def test_paired_distances(): # Test the pairwise_distance helper function. rng = np.random.RandomState(0) # Euclidean distance should be equivalent to calling the function. X = rng.random_sample((5, 4)) # Euclidean distance, with Y != X. Y = rng.random_sample((5, 4)) for metric, func in iteritems(PAIRED_DISTANCES): S = paired_distances(X, Y, metric=metric) S2 = func(X, Y) assert_array_almost_equal(S, S2) S3 = func(csr_matrix(X), csr_matrix(Y)) assert_array_almost_equal(S, S3) if metric in PAIRWISE_DISTANCE_FUNCTIONS: # Check the pairwise_distances implementation # gives the same value distances = PAIRWISE_DISTANCE_FUNCTIONS[metric](X, Y) distances = np.diag(distances) assert_array_almost_equal(distances, S) # Check the callable implementation S = paired_distances(X, Y, metric='manhattan') S2 = paired_distances(X, Y, metric=lambda x, y: np.abs(x - y).sum(axis=0)) assert_array_almost_equal(S, S2) # Test that a value error is raised when the lengths of X and Y should not # differ Y = rng.random_sample((3, 4)) assert_raises(ValueError, paired_distances, X, Y) def test_pairwise_distances_argmin_min(): # Check pairwise minimum distances computation for any metric X = [[0], [1]] Y = [[-1], [2]] Xsp = dok_matrix(X) Ysp = csr_matrix(Y, dtype=np.float32) # euclidean metric D, E = pairwise_distances_argmin_min(X, Y, metric="euclidean") D2 = pairwise_distances_argmin(X, Y, metric="euclidean") assert_array_almost_equal(D, [0, 1]) assert_array_almost_equal(D2, [0, 1]) assert_array_almost_equal(D, [0, 1]) assert_array_almost_equal(E, [1., 1.]) # sparse matrix case Dsp, Esp = pairwise_distances_argmin_min(Xsp, Ysp, metric="euclidean") assert_array_equal(Dsp, D) assert_array_equal(Esp, E) # We don't want np.matrix here assert_equal(type(Dsp), np.ndarray) assert_equal(type(Esp), np.ndarray) # Non-euclidean sklearn metric D, E = pairwise_distances_argmin_min(X, Y, metric="manhattan") D2 = pairwise_distances_argmin(X, Y, metric="manhattan") assert_array_almost_equal(D, [0, 1]) assert_array_almost_equal(D2, [0, 1]) assert_array_almost_equal(E, [1., 1.]) D, E = pairwise_distances_argmin_min(Xsp, Ysp, metric="manhattan") D2 = pairwise_distances_argmin(Xsp, Ysp, metric="manhattan") assert_array_almost_equal(D, [0, 1]) assert_array_almost_equal(E, [1., 1.]) # Non-euclidean Scipy distance (callable) D, E = pairwise_distances_argmin_min(X, Y, metric=minkowski, metric_kwargs={"p": 2}) assert_array_almost_equal(D, [0, 1]) assert_array_almost_equal(E, [1., 1.]) # Non-euclidean Scipy distance (string) D, E = pairwise_distances_argmin_min(X, Y, metric="minkowski", metric_kwargs={"p": 2}) assert_array_almost_equal(D, [0, 1]) assert_array_almost_equal(E, [1., 1.]) # Compare with naive implementation rng = np.random.RandomState(0) X = rng.randn(97, 149) Y = rng.randn(111, 149) dist = pairwise_distances(X, Y, metric="manhattan") dist_orig_ind = dist.argmin(axis=0) dist_orig_val = dist[dist_orig_ind, range(len(dist_orig_ind))] dist_chunked_ind, dist_chunked_val = pairwise_distances_argmin_min( X, Y, axis=0, metric="manhattan", batch_size=50) np.testing.assert_almost_equal(dist_orig_ind, dist_chunked_ind, decimal=7) np.testing.assert_almost_equal(dist_orig_val, dist_chunked_val, decimal=7) def test_euclidean_distances(): # Check the pairwise Euclidean distances computation X = [[0]] Y = [[1], [2]] D = euclidean_distances(X, Y) assert_array_almost_equal(D, [[1., 2.]]) X = csr_matrix(X) Y = csr_matrix(Y) D = euclidean_distances(X, Y) assert_array_almost_equal(D, [[1., 2.]]) rng = np.random.RandomState(0) X = rng.random_sample((10, 4)) Y = rng.random_sample((20, 4)) X_norm_sq = (X ** 2).sum(axis=1).reshape(1, -1) Y_norm_sq = (Y ** 2).sum(axis=1).reshape(1, -1) # check that we still get the right answers with {X,Y}_norm_squared D1 = euclidean_distances(X, Y) D2 = euclidean_distances(X, Y, X_norm_squared=X_norm_sq) D3 = euclidean_distances(X, Y, Y_norm_squared=Y_norm_sq) D4 = euclidean_distances(X, Y, X_norm_squared=X_norm_sq, Y_norm_squared=Y_norm_sq) assert_array_almost_equal(D2, D1) assert_array_almost_equal(D3, D1) assert_array_almost_equal(D4, D1) # check we get the wrong answer with wrong {X,Y}_norm_squared X_norm_sq *= 0.5 Y_norm_sq *= 0.5 wrong_D = euclidean_distances(X, Y, X_norm_squared=np.zeros_like(X_norm_sq), Y_norm_squared=np.zeros_like(Y_norm_sq)) assert_greater(np.max(np.abs(wrong_D - D1)), .01) # Paired distances def test_paired_euclidean_distances(): # Check the paired Euclidean distances computation X = [[0], [0]] Y = [[1], [2]] D = paired_euclidean_distances(X, Y) assert_array_almost_equal(D, [1., 2.]) def test_paired_manhattan_distances(): # Check the paired manhattan distances computation X = [[0], [0]] Y = [[1], [2]] D = paired_manhattan_distances(X, Y) assert_array_almost_equal(D, [1., 2.]) def test_chi_square_kernel(): rng = np.random.RandomState(0) X = rng.random_sample((5, 4)) Y = rng.random_sample((10, 4)) K_add = additive_chi2_kernel(X, Y) gamma = 0.1 K = chi2_kernel(X, Y, gamma=gamma) assert_equal(K.dtype, np.float) for i, x in enumerate(X): for j, y in enumerate(Y): chi2 = -np.sum((x - y) ** 2 / (x + y)) chi2_exp = np.exp(gamma * chi2) assert_almost_equal(K_add[i, j], chi2) assert_almost_equal(K[i, j], chi2_exp) # check diagonal is ones for data with itself K = chi2_kernel(Y) assert_array_equal(np.diag(K), 1) # check off-diagonal is < 1 but > 0: assert_true(np.all(K > 0)) assert_true(np.all(K - np.diag(np.diag(K)) < 1)) # check that float32 is preserved X = rng.random_sample((5, 4)).astype(np.float32) Y = rng.random_sample((10, 4)).astype(np.float32) K = chi2_kernel(X, Y) assert_equal(K.dtype, np.float32) # check integer type gets converted, # check that zeros are handled X = rng.random_sample((10, 4)).astype(np.int32) K = chi2_kernel(X, X) assert_true(np.isfinite(K).all()) assert_equal(K.dtype, np.float) # check that kernel of similar things is greater than dissimilar ones X = [[.3, .7], [1., 0]] Y = [[0, 1], [.9, .1]] K = chi2_kernel(X, Y) assert_greater(K[0, 0], K[0, 1]) assert_greater(K[1, 1], K[1, 0]) # test negative input assert_raises(ValueError, chi2_kernel, [[0, -1]]) assert_raises(ValueError, chi2_kernel, [[0, -1]], [[-1, -1]]) assert_raises(ValueError, chi2_kernel, [[0, 1]], [[-1, -1]]) # different n_features in X and Y assert_raises(ValueError, chi2_kernel, [[0, 1]], [[.2, .2, .6]]) # sparse matrices assert_raises(ValueError, chi2_kernel, csr_matrix(X), csr_matrix(Y)) assert_raises(ValueError, additive_chi2_kernel, csr_matrix(X), csr_matrix(Y)) def test_kernel_symmetry(): # Valid kernels should be symmetric rng = np.random.RandomState(0) X = rng.random_sample((5, 4)) for kernel in (linear_kernel, polynomial_kernel, rbf_kernel, laplacian_kernel, sigmoid_kernel, cosine_similarity): K = kernel(X, X) assert_array_almost_equal(K, K.T, 15) def test_kernel_sparse(): rng = np.random.RandomState(0) X = rng.random_sample((5, 4)) X_sparse = csr_matrix(X) for kernel in (linear_kernel, polynomial_kernel, rbf_kernel, laplacian_kernel, sigmoid_kernel, cosine_similarity): K = kernel(X, X) K2 = kernel(X_sparse, X_sparse) assert_array_almost_equal(K, K2) def test_linear_kernel(): rng = np.random.RandomState(0) X = rng.random_sample((5, 4)) K = linear_kernel(X, X) # the diagonal elements of a linear kernel are their squared norm assert_array_almost_equal(K.flat[::6], [linalg.norm(x) ** 2 for x in X]) def test_rbf_kernel(): rng = np.random.RandomState(0) X = rng.random_sample((5, 4)) K = rbf_kernel(X, X) # the diagonal elements of a rbf kernel are 1 assert_array_almost_equal(K.flat[::6], np.ones(5)) def test_laplacian_kernel(): rng = np.random.RandomState(0) X = rng.random_sample((5, 4)) K = laplacian_kernel(X, X) # the diagonal elements of a laplacian kernel are 1 assert_array_almost_equal(np.diag(K), np.ones(5)) # off-diagonal elements are < 1 but > 0: assert_true(np.all(K > 0)) assert_true(np.all(K - np.diag(np.diag(K)) < 1)) def test_cosine_similarity_sparse_output(): # Test if cosine_similarity correctly produces sparse output. rng = np.random.RandomState(0) X = rng.random_sample((5, 4)) Y = rng.random_sample((3, 4)) Xcsr = csr_matrix(X) Ycsr = csr_matrix(Y) K1 = cosine_similarity(Xcsr, Ycsr, dense_output=False) assert_true(issparse(K1)) K2 = pairwise_kernels(Xcsr, Y=Ycsr, metric="cosine") assert_array_almost_equal(K1.todense(), K2) def test_cosine_similarity(): # Test the cosine_similarity. rng = np.random.RandomState(0) X = rng.random_sample((5, 4)) Y = rng.random_sample((3, 4)) Xcsr = csr_matrix(X) Ycsr = csr_matrix(Y) for X_, Y_ in ((X, None), (X, Y), (Xcsr, None), (Xcsr, Ycsr)): # Test that the cosine is kernel is equal to a linear kernel when data # has been previously normalized by L2-norm. K1 = pairwise_kernels(X_, Y=Y_, metric="cosine") X_ = normalize(X_) if Y_ is not None: Y_ = normalize(Y_) K2 = pairwise_kernels(X_, Y=Y_, metric="linear") assert_array_almost_equal(K1, K2) def test_check_dense_matrices(): # Ensure that pairwise array check works for dense matrices. # Check that if XB is None, XB is returned as reference to XA XA = np.resize(np.arange(40), (5, 8)) XA_checked, XB_checked = check_pairwise_arrays(XA, None) assert_true(XA_checked is XB_checked) assert_array_equal(XA, XA_checked) def test_check_XB_returned(): # Ensure that if XA and XB are given correctly, they return as equal. # Check that if XB is not None, it is returned equal. # Note that the second dimension of XB is the same as XA. XA = np.resize(np.arange(40), (5, 8)) XB = np.resize(np.arange(32), (4, 8)) XA_checked, XB_checked = check_pairwise_arrays(XA, XB) assert_array_equal(XA, XA_checked) assert_array_equal(XB, XB_checked) XB = np.resize(np.arange(40), (5, 8)) XA_checked, XB_checked = check_paired_arrays(XA, XB) assert_array_equal(XA, XA_checked) assert_array_equal(XB, XB_checked) def test_check_different_dimensions(): # Ensure an error is raised if the dimensions are different. XA = np.resize(np.arange(45), (5, 9)) XB = np.resize(np.arange(32), (4, 8)) assert_raises(ValueError, check_pairwise_arrays, XA, XB) XB = np.resize(np.arange(4 * 9), (4, 9)) assert_raises(ValueError, check_paired_arrays, XA, XB) def test_check_invalid_dimensions(): # Ensure an error is raised on 1D input arrays. # The modified tests are not 1D. In the old test, the array was internally # converted to 2D anyways XA = np.arange(45).reshape(9, 5) XB = np.arange(32).reshape(4, 8) assert_raises(ValueError, check_pairwise_arrays, XA, XB) XA = np.arange(45).reshape(9, 5) XB = np.arange(32).reshape(4, 8) assert_raises(ValueError, check_pairwise_arrays, XA, XB) def test_check_sparse_arrays(): # Ensures that checks return valid sparse matrices. rng = np.random.RandomState(0) XA = rng.random_sample((5, 4)) XA_sparse = csr_matrix(XA) XB = rng.random_sample((5, 4)) XB_sparse = csr_matrix(XB) XA_checked, XB_checked = check_pairwise_arrays(XA_sparse, XB_sparse) # compare their difference because testing csr matrices for # equality with '==' does not work as expected. assert_true(issparse(XA_checked)) assert_equal(abs(XA_sparse - XA_checked).sum(), 0) assert_true(issparse(XB_checked)) assert_equal(abs(XB_sparse - XB_checked).sum(), 0) XA_checked, XA_2_checked = check_pairwise_arrays(XA_sparse, XA_sparse) assert_true(issparse(XA_checked)) assert_equal(abs(XA_sparse - XA_checked).sum(), 0) assert_true(issparse(XA_2_checked)) assert_equal(abs(XA_2_checked - XA_checked).sum(), 0) def tuplify(X): # Turns a numpy matrix (any n-dimensional array) into tuples. s = X.shape if len(s) > 1: # Tuplify each sub-array in the input. return tuple(tuplify(row) for row in X) else: # Single dimension input, just return tuple of contents. return tuple(r for r in X) def test_check_tuple_input(): # Ensures that checks return valid tuples. rng = np.random.RandomState(0) XA = rng.random_sample((5, 4)) XA_tuples = tuplify(XA) XB = rng.random_sample((5, 4)) XB_tuples = tuplify(XB) XA_checked, XB_checked = check_pairwise_arrays(XA_tuples, XB_tuples) assert_array_equal(XA_tuples, XA_checked) assert_array_equal(XB_tuples, XB_checked) def test_check_preserve_type(): # Ensures that type float32 is preserved. XA = np.resize(np.arange(40), (5, 8)).astype(np.float32) XB = np.resize(np.arange(40), (5, 8)).astype(np.float32) XA_checked, XB_checked = check_pairwise_arrays(XA, None) assert_equal(XA_checked.dtype, np.float32) # both float32 XA_checked, XB_checked = check_pairwise_arrays(XA, XB) assert_equal(XA_checked.dtype, np.float32) assert_equal(XB_checked.dtype, np.float32) # mismatched A XA_checked, XB_checked = check_pairwise_arrays(XA.astype(np.float), XB) assert_equal(XA_checked.dtype, np.float) assert_equal(XB_checked.dtype, np.float) # mismatched B XA_checked, XB_checked = check_pairwise_arrays(XA, XB.astype(np.float)) assert_equal(XA_checked.dtype, np.float) assert_equal(XB_checked.dtype, np.float)
bsd-3-clause
RNAer/Calour
calour/tests/test_transforming.py
1
5611
# ---------------------------------------------------------------------------- # Copyright (c) 2016--, Calour development team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file COPYING.txt, distributed with this software. # ---------------------------------------------------------------------------- import sys from unittest import main, skipIf import numpy as np import pandas as pd from numpy.testing import assert_array_almost_equal, assert_array_equal import calour as ca from calour._testing import Tests class TestTransforming(Tests): def setUp(self): super().setUp() self.test2 = ca.read(self.test2_biom, self.test2_samp, self.test2_feat, normalize=None) def test_standardize(self): obs = self.test2.standardize() self.assertIsNot(obs, self.test2) assert_array_almost_equal(obs.data.sum(axis=1), [0] * 9) assert_array_almost_equal(obs.data.var(axis=1), [1] * 9) obs = self.test2.standardize(inplace=True) self.assertIs(obs, self.test2) def test_binarize(self): obs = self.test2.binarize() self.assertIsNot(obs, self.test2) obs = self.test2.binarize(inplace=True) self.assertIs(obs, self.test2) def test_log_n(self): obs = self.test2.log_n() self.test2.data = np.log2( [[10., 20., 1., 20., 5., 100., 844., 100.], [10., 20., 2., 19., 1., 100., 849., 200.], [10., 20., 3., 18., 5., 100., 844., 300.], [10., 20., 4., 17., 1., 100., 849., 400.], [10., 20., 5., 16., 4., 100., 845., 500.], [10., 20., 6., 15., 1., 100., 849., 600.], [10., 20., 7., 14., 3., 100., 846., 700.], [10., 20., 8., 13., 1., 100., 849., 800.], [10., 20., 9., 12., 7., 100., 842., 900.]]) self.assert_experiment_equal(obs, self.test2) self.assertIsNot(obs, self.test2) obs = self.test2.log_n(inplace=True) self.assertIs(obs, self.test2) def test_center_log_ration(self): from skbio.stats.composition import clr, centralize dat = np.array( [[10, 20, 1, 20, 5, 100, 844, 100], [10, 20, 2, 19, 0, 100, 849, 200], [10, 20, 3, 18, 5, 100, 844, 300], [10, 20, 4, 17, 0, 100, 849, 400], [10, 20, 5, 16, 4, 100, 845, 500], [10, 20, 6, 15, 0, 100, 849, 600], [10, 20, 7, 14, 3, 100, 846, 700], [10, 20, 8, 13, 0, 100, 849, 800], [10, 20, 9, 12, 7, 100, 842, 900]]) + 1 obs = self.test2.center_log_ratio() exp = clr(dat) assert_array_almost_equal(exp, obs.data) obs = self.test2.center_log_ratio(centralize=True) exp = clr(centralize(dat)) assert_array_almost_equal(exp, obs.data) def test_normalize(self): total = 1000 obs = self.test2.normalize(total) assert_array_almost_equal(obs.data.sum(axis=1).A1, [total] * 9) self.assertIsNot(obs, self.test2) obs = self.test2.normalize(total, inplace=True) self.assertIs(obs, self.test2) def test_normalize_non_numeric(self): with self.assertRaises(ValueError): self.test2.normalize(False) def test_rescale(self): total = 1000 obs = self.test2.rescale(total) self.assertAlmostEqual(np.mean(obs.data.sum(axis=1)), 1000) self.assertIsNot(obs, self.test2) self.assertNotAlmostEqual(obs.data.sum(axis=1).A1[0], 1000) def test_rescale_non_numeric(self): with self.assertRaises(ValueError): self.test2.normalize(False) with self.assertRaises(ValueError): self.test2.normalize(0) def test_normalize_by_subset_features(self): # test the filtering in standard mode (remove a few features, normalize to 10k) exp = ca.read(self.test1_biom, self.test1_samp, normalize=None) bad_features = [6, 7] features = [exp.feature_metadata.index[cbad] for cbad in bad_features] newexp = exp.normalize_by_subset_features(features, 10000, negate=True, inplace=False) # see the mean of the features we want (without 6,7) is 10k good_features = list(set(range(exp.data.shape[1])).difference(set(bad_features))) assert_array_almost_equal(newexp.data[:, good_features].sum(axis=1), np.ones([exp.data.shape[0]]) * 10000) self.assertTrue(np.all(newexp.data[:, bad_features] > exp.data[:, bad_features])) @skipIf(sys.platform.startswith("win"), "skip this test for Windows") def test_subsample_count(self): exp = ca.Experiment(data=np.array([[1, 2, 3], [4, 5, 6]]), sample_metadata=pd.DataFrame([['a', 'b', 'c'], ['d', 'e', 'f']]), sparse=False) n = 6 obs = exp.subsample_count(n, random_seed=9) assert_array_equal(obs.data.sum(axis=1), np.array([n, n])) self.assertTrue(np.all(obs.data <= n)) n = 7 obs = exp.subsample_count(n) # the 1st row dropped assert_array_equal(obs.data.sum(axis=1), np.array([n])) self.assertIsNot(obs, exp) obs = exp.subsample_count(n, inplace=True) assert_array_equal(obs.data.sum(axis=1), np.array([n])) self.assertTrue(np.all(obs.data <= n)) self.assertIs(obs, exp) n = 10000 obs = exp.subsample_count(n) assert_array_equal(obs.data.sum(axis=1), np.array([])) if __name__ == '__main__': main()
bsd-3-clause
wu-ty/LINE_PROJECT
LDAmodel.py
1
5702
# -*- coding: utf-8 -*- import MeCab import pandas as pd import gensim import numpy import re # process in mecab def get_words(contents): ret = [] for content in contents: ret.append(get_words_main(content)) return ret def get_words_main(content): return tokenlize(content) def tokenlize(text): #mecab = MeCab.Tagger("-Owakati") #node = mecab.parse(text.encode('utf-8')) #return node text=text.encode('utf-8') tagger = MeCab.Tagger('-Ochasen') node = tagger.parseToNode(text)#.encode('utf-8')) keywords = [] while node: # if len(node.surface) > 1: # keywords.append(node.surface) # node = node.next if node.feature.split(",")[0] == '名詞': #yield node.surface if len(node.surface) > 1: keywords.append(node.surface) node = node.next return keywords #process in LDA model class lda_parts(object): '''docstring for lda_parts''' def __init__(self,sentencelist): #self.sentencelist = sentencelist self.wordslist = get_words(sentencelist) def dictionary_corpus(self, filters = True, load = None ,save = None, show=False,no_below=5, no_above=0.75): if load == None: dictionary = gensim.corpora.Dictionary(self.wordslist) if load == None and filters == True: dictionary.filter_extremes(no_below,no_above) else: dictionary = gensim.corpora.Dictionary.load(load) self.dictionary = dictionary if save != None: self.dictionary.save(save) self.corpus = [self.dictionary.doc2bow(words) for words in self.wordslist] def LDA_model(self,num_topics=150,save=None,load=None,show=False,set_matrix = True): if load == None: self.lda = gensim.models.LdaModel(corpus=self.corpus, id2word=self.dictionary, num_topics=num_topics) self.lda.save(save) else: self.lda = gensim.models.LdaModel.load(load) if show == True: for topic in self.lda.show_topics(-1): print topic if set_matrix: self.similarity_matrix() def similarity_matrix(self): self.matrix = gensim.similarities.MatrixSimilarity(self.lda[self.corpus]) def similarity_vector(self,p_sentence): p_corpus = self.dictionary.doc2bow(tokenlize(p_sentence)) return self.matrix[self.lda[p_corpus]] class News(object): def __init__(self, PKs, titles , descriptions, filters = True,show=False, no_below=5, no_above=0.75): self.n = len(PKs) self.PKs = PKs self.titles = titles self.descriptions = descriptions self.RelevantList = {} self.title_lda = lda_parts(titles) self.title_lda.dictionary_corpus(filters=filters,load = ("./model/titles.dictionary"), show=show, no_below=no_below, no_above=no_above) self.title_lda.LDA_model(load=("./model/titles.model"),show=show) self.title_lda.similarity_matrix() #self.titile_similarMatrix = self.title_lda.matrix self.description_lda = lda_parts(descriptions) self.description_lda.dictionary_corpus(filters=filters,load = ("./model/descriptions.dictionary"), show=show,no_below=no_below, no_above=no_above) self.description_lda.LDA_model(load=("./model/descriptions.model"),show=show) self.description_lda.similarity_matrix() #self.description_similarMatrix = self.description_lda.matrix self.p = re.compile(r"<[^>]*?>") def calculate_relevent(self): for k in range(self.n): sim_title = numpy.array(self.title_lda.similarity_vector(self.titles[k])) sim_description = numpy.array(self.description_lda.similarity_vector(self.descriptions[k])) sort_title = numpy.argsort(sim_title) sort_description = numpy.argsort(sim_description) resultTitle = [(sort_title[self.n-1-x],sim_title[sort_title[self.n-1-x]]) for x in range(self.n) if sort_title[self.n-1-x] != k and 0.1 < sim_title[sort_title[self.n-1-x]] < 1.0] resultDescription = [(sort_description[self.n-1-x],sim_description[sort_title[self.n-1-x]]) for x in range(self.n)if sort_description[self.n-1-x] != k and 0.1 < sim_title[sort_description[self.n-1-x]] < 1.0] mostRelevant = [] print k print resultTitle,resultDescription print len(self.PKs) while len(mostRelevant) <= 3: if len(resultTitle) == 0 and len(resultDescription) == 0: break if len(resultTitle) == 0: mostRelevant.append(resultDescription[0][0]) del resultDescription[0] if len(resultDescription) == 0: mostRelevant.append(resultTitle[0][0]) del resultTitle[0] if resultTitle[0][1] >= resultDescription[0][1]: if resultTitle[0][0] not in mostRelevant: mostRelevant.append(resultTitle[0][0]) del resultTitle[0] else: del resultTitle[0] else: if resultDescription[0][0] not in mostRelevant: mostRelevant.append(resultDescription[0][0]) del resultDescription[0] else: del resultDescription[0] print mostRelevant self.RelevantList[self.PKs[k]] = [self.PKs[i] for i in mostRelevant] print 'calculate_relevent over' return 0
bsd-3-clause
wschenck/nest-simulator
pynest/examples/gap_junctions_inhibitory_network.py
7
5989
# -*- coding: utf-8 -*- # # gap_junctions_inhibitory_network.py # # This file is part of NEST. # # Copyright (C) 2004 The NEST Initiative # # NEST is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # NEST is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with NEST. If not, see <http://www.gnu.org/licenses/>. """Gap Junctions: Inhibitory network example ----------------------------------------------- This script simulates an inhibitory network of 500 Hodgkin-Huxley neurons. Without the gap junctions (meaning for ``gap_weight = 0.0``) the network shows an asynchronous irregular state that is caused by the external excitatory Poissonian drive being balanced by the inhibitory feedback within the network. With increasing `gap_weight` the network synchronizes: For a lower gap weight of 0.3 nS the network remains in an asynchronous state. With a weight of 0.54 nS the network switches randomly between the asynchronous to the synchronous state, while for a gap weight of 0.7 nS a stable synchronous state is reached. This example is also used as test case 2 (see Figure 9 and 10) in [1]_. References ~~~~~~~~~~~ .. [1] Hahne et al. (2015) A unified framework for spiking and gap-junction interactions in distributed neuronal network simulations, Front. Neuroinform. http://dx.doi.org/10.3389/neuro.11.012.2008 """ import nest import matplotlib.pyplot as plt import numpy n_neuron = 500 gap_per_neuron = 60 inh_per_neuron = 50 delay = 1.0 j_exc = 300. j_inh = -50. threads = 8 stepsize = 0.05 simtime = 501. gap_weight = 0.3 nest.ResetKernel() ############################################################################### # First we set the random seed, adjust the kernel settings and create # ``hh_psc_alpha_gap`` neurons, ``spike_recorder`` and ``poisson_generator``. numpy.random.seed(1) nest.SetKernelStatus({'resolution': 0.05, 'total_num_virtual_procs': threads, 'print_time': True, # Settings for waveform relaxation # 'use_wfr': False uses communication in every step # instead of an iterative solution 'use_wfr': True, 'wfr_comm_interval': 1.0, 'wfr_tol': 0.0001, 'wfr_max_iterations': 15, 'wfr_interpolation_order': 3}) neurons = nest.Create('hh_psc_alpha_gap', n_neuron) sr = nest.Create("spike_recorder") pg = nest.Create("poisson_generator", params={'rate': 500.0}) ############################################################################### # Each neuron shall receive ``inh_per_neuron = 50`` inhibitory synaptic inputs # that are randomly selected from all other neurons, each with synaptic # weight ``j_inh = -50.0`` pA and a synaptic delay of 1.0 ms. Furthermore each # neuron shall receive an excitatory external Poissonian input of 500.0 Hz # with synaptic weight ``j_exc = 300.0`` pA and the same delay. # The desired connections are created with the following commands: conn_dict = {'rule': 'fixed_indegree', 'indegree': inh_per_neuron, 'allow_autapses': False, 'allow_multapses': True} syn_dict = {'synapse_model': 'static_synapse', 'weight': j_inh, 'delay': delay} nest.Connect(neurons, neurons, conn_dict, syn_dict) nest.Connect(pg, neurons, 'all_to_all', syn_spec={'synapse_model': 'static_synapse', 'weight': j_exc, 'delay': delay}) ############################################################################### # Then the neurons are connected to the ``spike_recorder`` and the initial # membrane potential of each neuron is set randomly between -40 and -80 mV. nest.Connect(neurons, sr) neurons.V_m = nest.random.uniform(min=-80., max=-40.) ####################################################################################### # Finally gap junctions are added to the network. :math:`(60*500)/2` ``gap_junction`` # connections are added randomly resulting in an average of 60 gap-junction # connections per neuron. We must not use the ``fixed_indegree`` oder # ``fixed_outdegree`` functionality of ``nest.Connect()`` to create the # connections, as ``gap_junction`` connections are bidirectional connections # and we need to make sure that the same neurons are connected in both ways. # This is achieved by creating the connections on the Python level with the # `random` module of the Python Standard Library and connecting the neurons # using the ``make_symmetric`` flag for ``one_to_one`` connections. n_connection = int(n_neuron * gap_per_neuron / 2) neuron_list = neurons.tolist() connections = numpy.random.choice(neuron_list, [n_connection, 2]) for source_node_id, target_node_id in connections: nest.Connect(nest.NodeCollection([source_node_id]), nest.NodeCollection([target_node_id]), {'rule': 'one_to_one', 'make_symmetric': True}, {'synapse_model': 'gap_junction', 'weight': gap_weight}) ############################################################################### # In the end we start the simulation and plot the spike pattern. nest.Simulate(simtime) times = sr.get('events', 'times') spikes = sr.get('events', 'senders') n_spikes = sr.n_events hz_rate = (1000.0 * n_spikes / simtime) / n_neuron plt.figure(1) plt.plot(times, spikes, 'o') plt.title('Average spike rate (Hz): %.2f' % hz_rate) plt.xlabel('time (ms)') plt.ylabel('neuron no') plt.show()
gpl-2.0
mlskit/astromlskit
FRONTEND/ldafront.py
2
3767
from PyQt4 import QtCore, QtGui import numpy as np try: _fromUtf8 = QtCore.QString.fromUtf8 except AttributeError: def _fromUtf8(s): return s try: _encoding = QtGui.QApplication.UnicodeUTF8 def _translate(context, text, disambig): return QtGui.QApplication.translate(context, text, disambig, _encoding) except AttributeError: def _translate(context, text, disambig): return QtGui.QApplication.translate(context, text, disambig) class Ui_Form(object): def setupUi(self, Form): Form.setObjectName(_fromUtf8("Form")) Form.resize(220, 191) self.testdata=[] self.groupBox = QtGui.QGroupBox(Form) self.groupBox.setGeometry(QtCore.QRect(20, 20, 191, 51)) self.groupBox.setObjectName(_fromUtf8("groupBox")) self.lineEdit = QtGui.QLineEdit(self.groupBox) self.lineEdit.setGeometry(QtCore.QRect(20, 20, 141, 20)) self.lineEdit.setObjectName(_fromUtf8("lineEdit")) self.pushButton_3 = QtGui.QPushButton(Form) self.pushButton_3.setGeometry(QtCore.QRect(30, 140, 161, 23)) self.pushButton_3.setObjectName(_fromUtf8("pushButton_3")) self.pushButton_3.clicked.connect(self.startlda) self.pushButton_2 = QtGui.QPushButton(Form) self.pushButton_2.setGeometry(QtCore.QRect(30, 110, 161, 23)) self.pushButton_2.setObjectName(_fromUtf8("pushButton_2")) self.pushButton_2.clicked.connect(self.takeoutput) self.pushButton = QtGui.QPushButton(Form) self.pushButton.setGeometry(QtCore.QRect(30, 80, 161, 23)) self.pushButton.setObjectName(_fromUtf8("pushButton")) self.pushButton.clicked.connect(self.takeinput) self.retranslateUi(Form) QtCore.QMetaObject.connectSlotsByName(Form) def retranslateUi(self, Form): Form.setWindowTitle(_translate("Form", "Form", None)) self.groupBox.setTitle(_translate("Form", "Learner/Classifier Name", None)) self.lineEdit.setText(_translate("Form", "Linear Discriminant analysis", None)) self.pushButton_3.setText(_translate("Form", "Start", None)) self.pushButton_2.setText(_translate("Form", "Test File", None)) self.pushButton.setText(_translate("Form", "Train File", None)) def takeinput(self): fname = QtGui.QFileDialog.getOpenFileName(None, 'Open file', 'C:') self.traindata=[] self.trainclass=[] for line in open(str(fname)): row=line.split("\n")[0].split(",") classlabel=row.pop() self.traindata.append(row) self.trainclass.append(classlabel) print "-----training complete ----" #print self.traindata #print self.trainclass def takeoutput(self): fname = QtGui.QFileDialog.getOpenFileName(None, 'Open file', 'C:') self.testdata=[] for line in open(str(fname)): row=line.split("\n")[0].split(",") self.testdata.append(row) #print self.testdata #print "---test data taken successfully---" #print self.testdata def myFloat(myList): return map(float, myList) def startlda(self): from sklearn.lda import LDA clf=LDA() X=np.array(self.traindata) Y=np.array(self.trainclass) y=self.testdata X=[[float(y) for y in x] for x in X] Y=[[int(y) for y in x] for x in Y] y=[[float(y) for y in x] for x in self.testdata] clf.fit(X,Y) print clf.predict(y) if __name__ == "__main__": import sys app = QtGui.QApplication(sys.argv) Dialog = QtGui.QDialog() ui = Ui_Form() ui.setupUi(Dialog) Dialog.show() sys.exit(app.exec_())
gpl-3.0
newville/scikit-image
skimage/viewer/tests/test_tools.py
19
5681
from collections import namedtuple import numpy as np from numpy.testing import assert_equal from numpy.testing.decorators import skipif from skimage import data from skimage.viewer import ImageViewer, has_qt from skimage.viewer.canvastools import ( LineTool, ThickLineTool, RectangleTool, PaintTool) from skimage.viewer.canvastools.base import CanvasToolBase try: from matplotlib.testing.decorators import cleanup except ImportError: def cleanup(func): return func def get_end_points(image): h, w = image.shape[0:2] x = [w / 3, 2 * w / 3] y = [h / 2] * 2 return np.transpose([x, y]) def do_event(viewer, etype, button=1, xdata=0, ydata=0, key=None): """ *name* the event name *canvas* the FigureCanvas instance generating the event *guiEvent* the GUI event that triggered the matplotlib event *x* x position - pixels from left of canvas *y* y position - pixels from bottom of canvas *inaxes* the :class:`~matplotlib.axes.Axes` instance if mouse is over axes *xdata* x coord of mouse in data coords *ydata* y coord of mouse in data coords *button* button pressed None, 1, 2, 3, 'up', 'down' (up and down are used for scroll events) *key* the key depressed when the mouse event triggered (see :class:`KeyEvent`) *step* number of scroll steps (positive for 'up', negative for 'down') """ ax = viewer.ax event = namedtuple('Event', ('name canvas guiEvent x y inaxes xdata ydata ' 'button key step')) event.button = button event.x, event.y = ax.transData.transform((xdata, ydata)) event.xdata, event.ydata = xdata, ydata event.inaxes = ax event.canvas = ax.figure.canvas event.key = key event.step = 1 event.guiEvent = None event.name = 'Custom' func = getattr(viewer._event_manager, 'on_%s' % etype) func(event) @cleanup @skipif(not has_qt) def test_line_tool(): img = data.camera() viewer = ImageViewer(img) tool = LineTool(viewer, maxdist=10, line_props=dict(linewidth=3), handle_props=dict(markersize=5)) tool.end_points = get_end_points(img) assert_equal(tool.end_points, np.array([[170, 256], [341, 256]])) # grab a handle and move it do_event(viewer, 'mouse_press', xdata=170, ydata=256) do_event(viewer, 'move', xdata=180, ydata=260) do_event(viewer, 'mouse_release') assert_equal(tool.geometry, np.array([[180, 260], [341, 256]])) # create a new line do_event(viewer, 'mouse_press', xdata=10, ydata=10) do_event(viewer, 'move', xdata=100, ydata=100) do_event(viewer, 'mouse_release') assert_equal(tool.geometry, np.array([[100, 100], [10, 10]])) @cleanup @skipif(not has_qt) def test_thick_line_tool(): img = data.camera() viewer = ImageViewer(img) tool = ThickLineTool(viewer, maxdist=10, line_props=dict(color='red'), handle_props=dict(markersize=5)) tool.end_points = get_end_points(img) do_event(viewer, 'scroll', button='up') assert_equal(tool.linewidth, 2) do_event(viewer, 'scroll', button='down') assert_equal(tool.linewidth, 1) do_event(viewer, 'key_press', key='+') assert_equal(tool.linewidth, 2) do_event(viewer, 'key_press', key='-') assert_equal(tool.linewidth, 1) @cleanup @skipif(not has_qt) def test_rect_tool(): img = data.camera() viewer = ImageViewer(img) tool = RectangleTool(viewer, maxdist=10) tool.extents = (100, 150, 100, 150) assert_equal(tool.corners, ((100, 150, 150, 100), (100, 100, 150, 150))) assert_equal(tool.extents, (100, 150, 100, 150)) assert_equal(tool.edge_centers, ((100, 125.0, 150, 125.0), (125.0, 100, 125.0, 150))) assert_equal(tool.geometry, (100, 150, 100, 150)) # grab a corner and move it do_event(viewer, 'mouse_press', xdata=100, ydata=100) do_event(viewer, 'move', xdata=120, ydata=120) do_event(viewer, 'mouse_release') assert_equal(tool.geometry, [120, 150, 120, 150]) # create a new line do_event(viewer, 'mouse_press', xdata=10, ydata=10) do_event(viewer, 'move', xdata=100, ydata=100) do_event(viewer, 'mouse_release') assert_equal(tool.geometry, [10, 100, 10, 100]) @cleanup @skipif(not has_qt) def test_paint_tool(): img = data.moon() viewer = ImageViewer(img) tool = PaintTool(viewer, img.shape) tool.radius = 10 assert_equal(tool.radius, 10) tool.label = 2 assert_equal(tool.label, 2) assert_equal(tool.shape, img.shape) do_event(viewer, 'mouse_press', xdata=100, ydata=100) do_event(viewer, 'move', xdata=110, ydata=110) do_event(viewer, 'mouse_release') assert_equal(tool.overlay[tool.overlay == 2].size, 761) tool.label = 5 do_event(viewer, 'mouse_press', xdata=20, ydata=20) do_event(viewer, 'move', xdata=40, ydata=40) do_event(viewer, 'mouse_release') assert_equal(tool.overlay[tool.overlay == 5].size, 881) assert_equal(tool.overlay[tool.overlay == 2].size, 761) do_event(viewer, 'key_press', key='enter') tool.overlay = tool.overlay * 0 assert_equal(tool.overlay.sum(), 0) @cleanup @skipif(not has_qt) def test_base_tool(): img = data.moon() viewer = ImageViewer(img) tool = CanvasToolBase(viewer) tool.set_visible(False) tool.set_visible(True) do_event(viewer, 'key_press', key='enter') tool.redraw() tool.remove() tool = CanvasToolBase(viewer, useblit=False) tool.redraw()
bsd-3-clause
ldirer/scikit-learn
sklearn/metrics/tests/test_classification.py
3
57142
from __future__ import division, print_function import numpy as np from scipy import linalg from functools import partial from itertools import product import warnings from sklearn import datasets from sklearn import svm from sklearn.datasets import make_multilabel_classification from sklearn.preprocessing import label_binarize from sklearn.utils.fixes import np_version from sklearn.utils.validation import check_random_state from sklearn.utils.testing import assert_raises, clean_warning_registry from sklearn.utils.testing import assert_raise_message from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_warns from sklearn.utils.testing import assert_no_warnings from sklearn.utils.testing import assert_warns_message from sklearn.utils.testing import assert_not_equal from sklearn.utils.testing import ignore_warnings from sklearn.utils.mocking import MockDataFrame from sklearn.metrics import accuracy_score from sklearn.metrics import average_precision_score from sklearn.metrics import classification_report from sklearn.metrics import cohen_kappa_score from sklearn.metrics import confusion_matrix from sklearn.metrics import f1_score from sklearn.metrics import fbeta_score from sklearn.metrics import hamming_loss from sklearn.metrics import hinge_loss from sklearn.metrics import jaccard_similarity_score from sklearn.metrics import log_loss from sklearn.metrics import matthews_corrcoef from sklearn.metrics import precision_recall_fscore_support from sklearn.metrics import precision_score from sklearn.metrics import recall_score from sklearn.metrics import zero_one_loss from sklearn.metrics import brier_score_loss from sklearn.metrics.classification import _check_targets from sklearn.exceptions import UndefinedMetricWarning from scipy.spatial.distance import hamming as sp_hamming ############################################################################### # Utilities for testing def make_prediction(dataset=None, binary=False): """Make some classification predictions on a toy dataset using a SVC If binary is True restrict to a binary classification problem instead of a multiclass classification problem """ if dataset is None: # import some data to play with dataset = datasets.load_iris() X = dataset.data y = dataset.target if binary: # restrict to a binary classification task X, y = X[y < 2], y[y < 2] n_samples, n_features = X.shape p = np.arange(n_samples) rng = check_random_state(37) rng.shuffle(p) X, y = X[p], y[p] half = int(n_samples / 2) # add noisy features to make the problem harder and avoid perfect results rng = np.random.RandomState(0) X = np.c_[X, rng.randn(n_samples, 200 * n_features)] # run classifier, get class probabilities and label predictions clf = svm.SVC(kernel='linear', probability=True, random_state=0) probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:]) if binary: # only interested in probabilities of the positive case # XXX: do we really want a special API for the binary case? probas_pred = probas_pred[:, 1] y_pred = clf.predict(X[half:]) y_true = y[half:] return y_true, y_pred, probas_pred ############################################################################### # Tests def test_multilabel_accuracy_score_subset_accuracy(): # Dense label indicator matrix format y1 = np.array([[0, 1, 1], [1, 0, 1]]) y2 = np.array([[0, 0, 1], [1, 0, 1]]) assert_equal(accuracy_score(y1, y2), 0.5) assert_equal(accuracy_score(y1, y1), 1) assert_equal(accuracy_score(y2, y2), 1) assert_equal(accuracy_score(y2, np.logical_not(y2)), 0) assert_equal(accuracy_score(y1, np.logical_not(y1)), 0) assert_equal(accuracy_score(y1, np.zeros(y1.shape)), 0) assert_equal(accuracy_score(y2, np.zeros(y1.shape)), 0) def test_precision_recall_f1_score_binary(): # Test Precision Recall and F1 Score for binary classification task y_true, y_pred, _ = make_prediction(binary=True) # detailed measures for each class p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None) assert_array_almost_equal(p, [0.73, 0.85], 2) assert_array_almost_equal(r, [0.88, 0.68], 2) assert_array_almost_equal(f, [0.80, 0.76], 2) assert_array_equal(s, [25, 25]) # individual scoring function that can be used for grid search: in the # binary class case the score is the value of the measure for the positive # class (e.g. label == 1). This is deprecated for average != 'binary'. for kwargs, my_assert in [({}, assert_no_warnings), ({'average': 'binary'}, assert_no_warnings)]: ps = my_assert(precision_score, y_true, y_pred, **kwargs) assert_array_almost_equal(ps, 0.85, 2) rs = my_assert(recall_score, y_true, y_pred, **kwargs) assert_array_almost_equal(rs, 0.68, 2) fs = my_assert(f1_score, y_true, y_pred, **kwargs) assert_array_almost_equal(fs, 0.76, 2) assert_almost_equal(my_assert(fbeta_score, y_true, y_pred, beta=2, **kwargs), (1 + 2 ** 2) * ps * rs / (2 ** 2 * ps + rs), 2) def test_precision_recall_f_binary_single_class(): # Test precision, recall and F1 score behave with a single positive or # negative class # Such a case may occur with non-stratified cross-validation assert_equal(1., precision_score([1, 1], [1, 1])) assert_equal(1., recall_score([1, 1], [1, 1])) assert_equal(1., f1_score([1, 1], [1, 1])) assert_equal(0., precision_score([-1, -1], [-1, -1])) assert_equal(0., recall_score([-1, -1], [-1, -1])) assert_equal(0., f1_score([-1, -1], [-1, -1])) @ignore_warnings def test_precision_recall_f_extra_labels(): # Test handling of explicit additional (not in input) labels to PRF y_true = [1, 3, 3, 2] y_pred = [1, 1, 3, 2] y_true_bin = label_binarize(y_true, classes=np.arange(5)) y_pred_bin = label_binarize(y_pred, classes=np.arange(5)) data = [(y_true, y_pred), (y_true_bin, y_pred_bin)] for i, (y_true, y_pred) in enumerate(data): # No average: zeros in array actual = recall_score(y_true, y_pred, labels=[0, 1, 2, 3, 4], average=None) assert_array_almost_equal([0., 1., 1., .5, 0.], actual) # Macro average is changed actual = recall_score(y_true, y_pred, labels=[0, 1, 2, 3, 4], average='macro') assert_array_almost_equal(np.mean([0., 1., 1., .5, 0.]), actual) # No effect otheriwse for average in ['micro', 'weighted', 'samples']: if average == 'samples' and i == 0: continue assert_almost_equal(recall_score(y_true, y_pred, labels=[0, 1, 2, 3, 4], average=average), recall_score(y_true, y_pred, labels=None, average=average)) # Error when introducing invalid label in multilabel case # (although it would only affect performance if average='macro'/None) for average in [None, 'macro', 'micro', 'samples']: assert_raises(ValueError, recall_score, y_true_bin, y_pred_bin, labels=np.arange(6), average=average) assert_raises(ValueError, recall_score, y_true_bin, y_pred_bin, labels=np.arange(-1, 4), average=average) @ignore_warnings def test_precision_recall_f_ignored_labels(): # Test a subset of labels may be requested for PRF y_true = [1, 1, 2, 3] y_pred = [1, 3, 3, 3] y_true_bin = label_binarize(y_true, classes=np.arange(5)) y_pred_bin = label_binarize(y_pred, classes=np.arange(5)) data = [(y_true, y_pred), (y_true_bin, y_pred_bin)] for i, (y_true, y_pred) in enumerate(data): recall_13 = partial(recall_score, y_true, y_pred, labels=[1, 3]) recall_all = partial(recall_score, y_true, y_pred, labels=None) assert_array_almost_equal([.5, 1.], recall_13(average=None)) assert_almost_equal((.5 + 1.) / 2, recall_13(average='macro')) assert_almost_equal((.5 * 2 + 1. * 1) / 3, recall_13(average='weighted')) assert_almost_equal(2. / 3, recall_13(average='micro')) # ensure the above were meaningful tests: for average in ['macro', 'weighted', 'micro']: assert_not_equal(recall_13(average=average), recall_all(average=average)) def test_average_precision_score_score_non_binary_class(): # Test that average_precision_score function returns an error when trying # to compute average_precision_score for multiclass task. rng = check_random_state(404) y_pred = rng.rand(10) # y_true contains three different class values y_true = rng.randint(0, 3, size=10) assert_raise_message(ValueError, "multiclass format is not supported", average_precision_score, y_true, y_pred) def test_average_precision_score_duplicate_values(): # Duplicate values with precision-recall require a different # processing than when computing the AUC of a ROC, because the # precision-recall curve is a decreasing curve # The following situation corresponds to a perfect # test statistic, the average_precision_score should be 1 y_true = [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1] y_score = [0, .1, .1, .4, .5, .6, .6, .9, .9, 1, 1] assert_equal(average_precision_score(y_true, y_score), 1) def test_average_precision_score_tied_values(): # Here if we go from left to right in y_true, the 0 values are # are separated from the 1 values, so it appears that we've # Correctly sorted our classifications. But in fact the first two # values have the same score (0.5) and so the first two values # could be swapped around, creating an imperfect sorting. This # imperfection should come through in the end score, making it less # than one. y_true = [0, 1, 1] y_score = [.5, .5, .6] assert_not_equal(average_precision_score(y_true, y_score), 1.) @ignore_warnings def test_precision_recall_fscore_support_errors(): y_true, y_pred, _ = make_prediction(binary=True) # Bad beta assert_raises(ValueError, precision_recall_fscore_support, y_true, y_pred, beta=0.0) # Bad pos_label assert_raises(ValueError, precision_recall_fscore_support, y_true, y_pred, pos_label=2, average='binary') # Bad average option assert_raises(ValueError, precision_recall_fscore_support, [0, 1, 2], [1, 2, 0], average='mega') def test_precision_recall_f_unused_pos_label(): # Check warning that pos_label unused when set to non-default value # but average != 'binary'; even if data is binary. assert_warns_message(UserWarning, "Note that pos_label (set to 2) is " "ignored when average != 'binary' (got 'macro'). You " "may use labels=[pos_label] to specify a single " "positive class.", precision_recall_fscore_support, [1, 2, 1], [1, 2, 2], pos_label=2, average='macro') def test_confusion_matrix_binary(): # Test confusion matrix - binary classification case y_true, y_pred, _ = make_prediction(binary=True) def test(y_true, y_pred): cm = confusion_matrix(y_true, y_pred) assert_array_equal(cm, [[22, 3], [8, 17]]) tp, fp, fn, tn = cm.flatten() num = (tp * tn - fp * fn) den = np.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn)) true_mcc = 0 if den == 0 else num / den mcc = matthews_corrcoef(y_true, y_pred) assert_array_almost_equal(mcc, true_mcc, decimal=2) assert_array_almost_equal(mcc, 0.57, decimal=2) test(y_true, y_pred) test([str(y) for y in y_true], [str(y) for y in y_pred]) def test_cohen_kappa(): # These label vectors reproduce the contingency matrix from Artstein and # Poesio (2008), Table 1: np.array([[20, 20], [10, 50]]). y1 = np.array([0] * 40 + [1] * 60) y2 = np.array([0] * 20 + [1] * 20 + [0] * 10 + [1] * 50) kappa = cohen_kappa_score(y1, y2) assert_almost_equal(kappa, .348, decimal=3) assert_equal(kappa, cohen_kappa_score(y2, y1)) # Add spurious labels and ignore them. y1 = np.append(y1, [2] * 4) y2 = np.append(y2, [2] * 4) assert_equal(cohen_kappa_score(y1, y2, labels=[0, 1]), kappa) assert_almost_equal(cohen_kappa_score(y1, y1), 1.) # Multiclass example: Artstein and Poesio, Table 4. y1 = np.array([0] * 46 + [1] * 44 + [2] * 10) y2 = np.array([0] * 52 + [1] * 32 + [2] * 16) assert_almost_equal(cohen_kappa_score(y1, y2), .8013, decimal=4) # Weighting example: none, linear, quadratic. y1 = np.array([0] * 46 + [1] * 44 + [2] * 10) y2 = np.array([0] * 50 + [1] * 40 + [2] * 10) assert_almost_equal(cohen_kappa_score(y1, y2), .9315, decimal=4) assert_almost_equal(cohen_kappa_score(y1, y2, weights="linear"), .9412, decimal=4) assert_almost_equal(cohen_kappa_score(y1, y2, weights="quadratic"), .9541, decimal=4) @ignore_warnings def test_matthews_corrcoef_nan(): assert_equal(matthews_corrcoef([0], [1]), 0.0) assert_equal(matthews_corrcoef([0, 0], [0, 1]), 0.0) def test_matthews_corrcoef_against_numpy_corrcoef(): rng = np.random.RandomState(0) y_true = rng.randint(0, 2, size=20) y_pred = rng.randint(0, 2, size=20) assert_almost_equal(matthews_corrcoef(y_true, y_pred), np.corrcoef(y_true, y_pred)[0, 1], 10) def test_matthews_corrcoef(): rng = np.random.RandomState(0) y_true = ["a" if i == 0 else "b" for i in rng.randint(0, 2, size=20)] # corrcoef of same vectors must be 1 assert_almost_equal(matthews_corrcoef(y_true, y_true), 1.0) # corrcoef, when the two vectors are opposites of each other, should be -1 y_true_inv = ["b" if i == "a" else "a" for i in y_true] assert_almost_equal(matthews_corrcoef(y_true, y_true_inv), -1) y_true_inv2 = label_binarize(y_true, ["a", "b"]) y_true_inv2 = np.where(y_true_inv2, 'a', 'b') assert_almost_equal(matthews_corrcoef(y_true, y_true_inv2), -1) # For the zero vector case, the corrcoef cannot be calculated and should # result in a RuntimeWarning mcc = assert_warns_message(RuntimeWarning, 'invalid value encountered', matthews_corrcoef, [0, 0, 0, 0], [0, 0, 0, 0]) # But will output 0 assert_almost_equal(mcc, 0.) # And also for any other vector with 0 variance mcc = assert_warns_message(RuntimeWarning, 'invalid value encountered', matthews_corrcoef, y_true, ['a'] * len(y_true)) # But will output 0 assert_almost_equal(mcc, 0.) # These two vectors have 0 correlation and hence mcc should be 0 y_1 = [1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1] y_2 = [1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1] assert_almost_equal(matthews_corrcoef(y_1, y_2), 0.) # Check that sample weight is able to selectively exclude mask = [1] * 10 + [0] * 10 # Now the first half of the vector elements are alone given a weight of 1 # and hence the mcc will not be a perfect 0 as in the previous case assert_raises(AssertionError, assert_almost_equal, matthews_corrcoef(y_1, y_2, sample_weight=mask), 0.) def test_precision_recall_f1_score_multiclass(): # Test Precision Recall and F1 Score for multiclass classification task y_true, y_pred, _ = make_prediction(binary=False) # compute scores with default labels introspection p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None) assert_array_almost_equal(p, [0.83, 0.33, 0.42], 2) assert_array_almost_equal(r, [0.79, 0.09, 0.90], 2) assert_array_almost_equal(f, [0.81, 0.15, 0.57], 2) assert_array_equal(s, [24, 31, 20]) # averaging tests ps = precision_score(y_true, y_pred, pos_label=1, average='micro') assert_array_almost_equal(ps, 0.53, 2) rs = recall_score(y_true, y_pred, average='micro') assert_array_almost_equal(rs, 0.53, 2) fs = f1_score(y_true, y_pred, average='micro') assert_array_almost_equal(fs, 0.53, 2) ps = precision_score(y_true, y_pred, average='macro') assert_array_almost_equal(ps, 0.53, 2) rs = recall_score(y_true, y_pred, average='macro') assert_array_almost_equal(rs, 0.60, 2) fs = f1_score(y_true, y_pred, average='macro') assert_array_almost_equal(fs, 0.51, 2) ps = precision_score(y_true, y_pred, average='weighted') assert_array_almost_equal(ps, 0.51, 2) rs = recall_score(y_true, y_pred, average='weighted') assert_array_almost_equal(rs, 0.53, 2) fs = f1_score(y_true, y_pred, average='weighted') assert_array_almost_equal(fs, 0.47, 2) assert_raises(ValueError, precision_score, y_true, y_pred, average="samples") assert_raises(ValueError, recall_score, y_true, y_pred, average="samples") assert_raises(ValueError, f1_score, y_true, y_pred, average="samples") assert_raises(ValueError, fbeta_score, y_true, y_pred, average="samples", beta=0.5) # same prediction but with and explicit label ordering p, r, f, s = precision_recall_fscore_support( y_true, y_pred, labels=[0, 2, 1], average=None) assert_array_almost_equal(p, [0.83, 0.41, 0.33], 2) assert_array_almost_equal(r, [0.79, 0.90, 0.10], 2) assert_array_almost_equal(f, [0.81, 0.57, 0.15], 2) assert_array_equal(s, [24, 20, 31]) def test_precision_refcall_f1_score_multilabel_unordered_labels(): # test that labels need not be sorted in the multilabel case y_true = np.array([[1, 1, 0, 0]]) y_pred = np.array([[0, 0, 1, 1]]) for average in ['samples', 'micro', 'macro', 'weighted', None]: p, r, f, s = precision_recall_fscore_support( y_true, y_pred, labels=[3, 0, 1, 2], warn_for=[], average=average) assert_array_equal(p, 0) assert_array_equal(r, 0) assert_array_equal(f, 0) if average is None: assert_array_equal(s, [0, 1, 1, 0]) def test_precision_recall_f1_score_binary_averaged(): y_true = np.array([0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1]) y_pred = np.array([1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1]) # compute scores with default labels introspection ps, rs, fs, _ = precision_recall_fscore_support(y_true, y_pred, average=None) p, r, f, _ = precision_recall_fscore_support(y_true, y_pred, average='macro') assert_equal(p, np.mean(ps)) assert_equal(r, np.mean(rs)) assert_equal(f, np.mean(fs)) p, r, f, _ = precision_recall_fscore_support(y_true, y_pred, average='weighted') support = np.bincount(y_true) assert_equal(p, np.average(ps, weights=support)) assert_equal(r, np.average(rs, weights=support)) assert_equal(f, np.average(fs, weights=support)) def test_zero_precision_recall(): # Check that pathological cases do not bring NaNs old_error_settings = np.seterr(all='raise') try: y_true = np.array([0, 1, 2, 0, 1, 2]) y_pred = np.array([2, 0, 1, 1, 2, 0]) assert_almost_equal(precision_score(y_true, y_pred, average='macro'), 0.0, 2) assert_almost_equal(recall_score(y_true, y_pred, average='macro'), 0.0, 2) assert_almost_equal(f1_score(y_true, y_pred, average='macro'), 0.0, 2) finally: np.seterr(**old_error_settings) def test_confusion_matrix_multiclass(): # Test confusion matrix - multi-class case y_true, y_pred, _ = make_prediction(binary=False) def test(y_true, y_pred, string_type=False): # compute confusion matrix with default labels introspection cm = confusion_matrix(y_true, y_pred) assert_array_equal(cm, [[19, 4, 1], [4, 3, 24], [0, 2, 18]]) # compute confusion matrix with explicit label ordering labels = ['0', '2', '1'] if string_type else [0, 2, 1] cm = confusion_matrix(y_true, y_pred, labels=labels) assert_array_equal(cm, [[19, 1, 4], [0, 18, 2], [4, 24, 3]]) test(y_true, y_pred) test(list(str(y) for y in y_true), list(str(y) for y in y_pred), string_type=True) def test_confusion_matrix_sample_weight(): """Test confusion matrix - case with sample_weight""" y_true, y_pred, _ = make_prediction(binary=False) weights = [.1] * 25 + [.2] * 25 + [.3] * 25 cm = confusion_matrix(y_true, y_pred, sample_weight=weights) true_cm = (.1 * confusion_matrix(y_true[:25], y_pred[:25]) + .2 * confusion_matrix(y_true[25:50], y_pred[25:50]) + .3 * confusion_matrix(y_true[50:], y_pred[50:])) assert_array_almost_equal(cm, true_cm) assert_raises( ValueError, confusion_matrix, y_true, y_pred, sample_weight=weights[:-1]) def test_confusion_matrix_multiclass_subset_labels(): # Test confusion matrix - multi-class case with subset of labels y_true, y_pred, _ = make_prediction(binary=False) # compute confusion matrix with only first two labels considered cm = confusion_matrix(y_true, y_pred, labels=[0, 1]) assert_array_equal(cm, [[19, 4], [4, 3]]) # compute confusion matrix with explicit label ordering for only subset # of labels cm = confusion_matrix(y_true, y_pred, labels=[2, 1]) assert_array_equal(cm, [[18, 2], [24, 3]]) # a label not in y_true should result in zeros for that row/column extra_label = np.max(y_true) + 1 cm = confusion_matrix(y_true, y_pred, labels=[2, extra_label]) assert_array_equal(cm, [[18, 0], [0, 0]]) # check for exception when none of the specified labels are in y_true assert_raises(ValueError, confusion_matrix, y_true, y_pred, labels=[extra_label, extra_label + 1]) def test_classification_report_multiclass(): # Test performance report iris = datasets.load_iris() y_true, y_pred, _ = make_prediction(dataset=iris, binary=False) # print classification report with class names expected_report = """\ precision recall f1-score support setosa 0.83 0.79 0.81 24 versicolor 0.33 0.10 0.15 31 virginica 0.42 0.90 0.57 20 avg / total 0.51 0.53 0.47 75 """ report = classification_report( y_true, y_pred, labels=np.arange(len(iris.target_names)), target_names=iris.target_names) assert_equal(report, expected_report) # print classification report with label detection expected_report = """\ precision recall f1-score support 0 0.83 0.79 0.81 24 1 0.33 0.10 0.15 31 2 0.42 0.90 0.57 20 avg / total 0.51 0.53 0.47 75 """ report = classification_report(y_true, y_pred) assert_equal(report, expected_report) def test_classification_report_multiclass_with_digits(): # Test performance report with added digits in floating point values iris = datasets.load_iris() y_true, y_pred, _ = make_prediction(dataset=iris, binary=False) # print classification report with class names expected_report = """\ precision recall f1-score support setosa 0.82609 0.79167 0.80851 24 versicolor 0.33333 0.09677 0.15000 31 virginica 0.41860 0.90000 0.57143 20 avg / total 0.51375 0.53333 0.47310 75 """ report = classification_report( y_true, y_pred, labels=np.arange(len(iris.target_names)), target_names=iris.target_names, digits=5) assert_equal(report, expected_report) # print classification report with label detection expected_report = """\ precision recall f1-score support 0 0.83 0.79 0.81 24 1 0.33 0.10 0.15 31 2 0.42 0.90 0.57 20 avg / total 0.51 0.53 0.47 75 """ report = classification_report(y_true, y_pred) assert_equal(report, expected_report) def test_classification_report_multiclass_with_string_label(): y_true, y_pred, _ = make_prediction(binary=False) y_true = np.array(["blue", "green", "red"])[y_true] y_pred = np.array(["blue", "green", "red"])[y_pred] expected_report = """\ precision recall f1-score support blue 0.83 0.79 0.81 24 green 0.33 0.10 0.15 31 red 0.42 0.90 0.57 20 avg / total 0.51 0.53 0.47 75 """ report = classification_report(y_true, y_pred) assert_equal(report, expected_report) expected_report = """\ precision recall f1-score support a 0.83 0.79 0.81 24 b 0.33 0.10 0.15 31 c 0.42 0.90 0.57 20 avg / total 0.51 0.53 0.47 75 """ report = classification_report(y_true, y_pred, target_names=["a", "b", "c"]) assert_equal(report, expected_report) def test_classification_report_multiclass_with_unicode_label(): y_true, y_pred, _ = make_prediction(binary=False) labels = np.array([u"blue\xa2", u"green\xa2", u"red\xa2"]) y_true = labels[y_true] y_pred = labels[y_pred] expected_report = u"""\ precision recall f1-score support blue\xa2 0.83 0.79 0.81 24 green\xa2 0.33 0.10 0.15 31 red\xa2 0.42 0.90 0.57 20 avg / total 0.51 0.53 0.47 75 """ report = classification_report(y_true, y_pred) assert_equal(report, expected_report) def test_classification_report_multiclass_with_long_string_label(): y_true, y_pred, _ = make_prediction(binary=False) labels = np.array(["blue", "green"*5, "red"]) y_true = labels[y_true] y_pred = labels[y_pred] expected_report = """\ precision recall f1-score support blue 0.83 0.79 0.81 24 greengreengreengreengreen 0.33 0.10 0.15 31 red 0.42 0.90 0.57 20 avg / total 0.51 0.53 0.47 75 """ report = classification_report(y_true, y_pred) assert_equal(report, expected_report) def test_classification_report_labels_target_names_unequal_length(): y_true = [0, 0, 2, 0, 0] y_pred = [0, 2, 2, 0, 0] target_names = ['class 0', 'class 1', 'class 2'] assert_warns_message(UserWarning, "labels size, 2, does not " "match size of target_names, 3", classification_report, y_true, y_pred, target_names=target_names) def test_multilabel_classification_report(): n_classes = 4 n_samples = 50 _, y_true = make_multilabel_classification(n_features=1, n_samples=n_samples, n_classes=n_classes, random_state=0) _, y_pred = make_multilabel_classification(n_features=1, n_samples=n_samples, n_classes=n_classes, random_state=1) expected_report = """\ precision recall f1-score support 0 0.50 0.67 0.57 24 1 0.51 0.74 0.61 27 2 0.29 0.08 0.12 26 3 0.52 0.56 0.54 27 avg / total 0.45 0.51 0.46 104 """ report = classification_report(y_true, y_pred) assert_equal(report, expected_report) def test_multilabel_zero_one_loss_subset(): # Dense label indicator matrix format y1 = np.array([[0, 1, 1], [1, 0, 1]]) y2 = np.array([[0, 0, 1], [1, 0, 1]]) assert_equal(zero_one_loss(y1, y2), 0.5) assert_equal(zero_one_loss(y1, y1), 0) assert_equal(zero_one_loss(y2, y2), 0) assert_equal(zero_one_loss(y2, np.logical_not(y2)), 1) assert_equal(zero_one_loss(y1, np.logical_not(y1)), 1) assert_equal(zero_one_loss(y1, np.zeros(y1.shape)), 1) assert_equal(zero_one_loss(y2, np.zeros(y1.shape)), 1) def test_multilabel_hamming_loss(): # Dense label indicator matrix format y1 = np.array([[0, 1, 1], [1, 0, 1]]) y2 = np.array([[0, 0, 1], [1, 0, 1]]) w = np.array([1, 3]) assert_equal(hamming_loss(y1, y2), 1 / 6) assert_equal(hamming_loss(y1, y1), 0) assert_equal(hamming_loss(y2, y2), 0) assert_equal(hamming_loss(y2, 1 - y2), 1) assert_equal(hamming_loss(y1, 1 - y1), 1) assert_equal(hamming_loss(y1, np.zeros(y1.shape)), 4 / 6) assert_equal(hamming_loss(y2, np.zeros(y1.shape)), 0.5) assert_equal(hamming_loss(y1, y2, sample_weight=w), 1. / 12) assert_equal(hamming_loss(y1, 1-y2, sample_weight=w), 11. / 12) assert_equal(hamming_loss(y1, np.zeros_like(y1), sample_weight=w), 2. / 3) # sp_hamming only works with 1-D arrays assert_equal(hamming_loss(y1[0], y2[0]), sp_hamming(y1[0], y2[0])) assert_warns(DeprecationWarning, hamming_loss, y1, y2, classes=[0, 1]) def test_multilabel_jaccard_similarity_score(): # Dense label indicator matrix format y1 = np.array([[0, 1, 1], [1, 0, 1]]) y2 = np.array([[0, 0, 1], [1, 0, 1]]) # size(y1 \inter y2) = [1, 2] # size(y1 \union y2) = [2, 2] assert_equal(jaccard_similarity_score(y1, y2), 0.75) assert_equal(jaccard_similarity_score(y1, y1), 1) assert_equal(jaccard_similarity_score(y2, y2), 1) assert_equal(jaccard_similarity_score(y2, np.logical_not(y2)), 0) assert_equal(jaccard_similarity_score(y1, np.logical_not(y1)), 0) assert_equal(jaccard_similarity_score(y1, np.zeros(y1.shape)), 0) assert_equal(jaccard_similarity_score(y2, np.zeros(y1.shape)), 0) @ignore_warnings def test_precision_recall_f1_score_multilabel_1(): # Test precision_recall_f1_score on a crafted multilabel example # First crafted example y_true = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 1]]) y_pred = np.array([[0, 1, 0, 0], [0, 1, 0, 0], [1, 0, 1, 0]]) p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None) # tp = [0, 1, 1, 0] # fn = [1, 0, 0, 1] # fp = [1, 1, 0, 0] # Check per class assert_array_almost_equal(p, [0.0, 0.5, 1.0, 0.0], 2) assert_array_almost_equal(r, [0.0, 1.0, 1.0, 0.0], 2) assert_array_almost_equal(f, [0.0, 1 / 1.5, 1, 0.0], 2) assert_array_almost_equal(s, [1, 1, 1, 1], 2) f2 = fbeta_score(y_true, y_pred, beta=2, average=None) support = s assert_array_almost_equal(f2, [0, 0.83, 1, 0], 2) # Check macro p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average="macro") assert_almost_equal(p, 1.5 / 4) assert_almost_equal(r, 0.5) assert_almost_equal(f, 2.5 / 1.5 * 0.25) assert_equal(s, None) assert_almost_equal(fbeta_score(y_true, y_pred, beta=2, average="macro"), np.mean(f2)) # Check micro p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average="micro") assert_almost_equal(p, 0.5) assert_almost_equal(r, 0.5) assert_almost_equal(f, 0.5) assert_equal(s, None) assert_almost_equal(fbeta_score(y_true, y_pred, beta=2, average="micro"), (1 + 4) * p * r / (4 * p + r)) # Check weighted p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average="weighted") assert_almost_equal(p, 1.5 / 4) assert_almost_equal(r, 0.5) assert_almost_equal(f, 2.5 / 1.5 * 0.25) assert_equal(s, None) assert_almost_equal(fbeta_score(y_true, y_pred, beta=2, average="weighted"), np.average(f2, weights=support)) # Check samples # |h(x_i) inter y_i | = [0, 1, 1] # |y_i| = [1, 1, 2] # |h(x_i)| = [1, 1, 2] p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average="samples") assert_almost_equal(p, 0.5) assert_almost_equal(r, 0.5) assert_almost_equal(f, 0.5) assert_equal(s, None) assert_almost_equal(fbeta_score(y_true, y_pred, beta=2, average="samples"), 0.5) @ignore_warnings def test_precision_recall_f1_score_multilabel_2(): # Test precision_recall_f1_score on a crafted multilabel example 2 # Second crafted example y_true = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 1, 1, 0]]) y_pred = np.array([[0, 0, 0, 1], [0, 0, 0, 1], [1, 1, 0, 0]]) # tp = [ 0. 1. 0. 0.] # fp = [ 1. 0. 0. 2.] # fn = [ 1. 1. 1. 0.] p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None) assert_array_almost_equal(p, [0.0, 1.0, 0.0, 0.0], 2) assert_array_almost_equal(r, [0.0, 0.5, 0.0, 0.0], 2) assert_array_almost_equal(f, [0.0, 0.66, 0.0, 0.0], 2) assert_array_almost_equal(s, [1, 2, 1, 0], 2) f2 = fbeta_score(y_true, y_pred, beta=2, average=None) support = s assert_array_almost_equal(f2, [0, 0.55, 0, 0], 2) p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average="micro") assert_almost_equal(p, 0.25) assert_almost_equal(r, 0.25) assert_almost_equal(f, 2 * 0.25 * 0.25 / 0.5) assert_equal(s, None) assert_almost_equal(fbeta_score(y_true, y_pred, beta=2, average="micro"), (1 + 4) * p * r / (4 * p + r)) p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average="macro") assert_almost_equal(p, 0.25) assert_almost_equal(r, 0.125) assert_almost_equal(f, 2 / 12) assert_equal(s, None) assert_almost_equal(fbeta_score(y_true, y_pred, beta=2, average="macro"), np.mean(f2)) p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average="weighted") assert_almost_equal(p, 2 / 4) assert_almost_equal(r, 1 / 4) assert_almost_equal(f, 2 / 3 * 2 / 4) assert_equal(s, None) assert_almost_equal(fbeta_score(y_true, y_pred, beta=2, average="weighted"), np.average(f2, weights=support)) p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average="samples") # Check samples # |h(x_i) inter y_i | = [0, 0, 1] # |y_i| = [1, 1, 2] # |h(x_i)| = [1, 1, 2] assert_almost_equal(p, 1 / 6) assert_almost_equal(r, 1 / 6) assert_almost_equal(f, 2 / 4 * 1 / 3) assert_equal(s, None) assert_almost_equal(fbeta_score(y_true, y_pred, beta=2, average="samples"), 0.1666, 2) @ignore_warnings def test_precision_recall_f1_score_with_an_empty_prediction(): y_true = np.array([[0, 1, 0, 0], [1, 0, 0, 0], [0, 1, 1, 0]]) y_pred = np.array([[0, 0, 0, 0], [0, 0, 0, 1], [0, 1, 1, 0]]) # true_pos = [ 0. 1. 1. 0.] # false_pos = [ 0. 0. 0. 1.] # false_neg = [ 1. 1. 0. 0.] p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None) assert_array_almost_equal(p, [0.0, 1.0, 1.0, 0.0], 2) assert_array_almost_equal(r, [0.0, 0.5, 1.0, 0.0], 2) assert_array_almost_equal(f, [0.0, 1 / 1.5, 1, 0.0], 2) assert_array_almost_equal(s, [1, 2, 1, 0], 2) f2 = fbeta_score(y_true, y_pred, beta=2, average=None) support = s assert_array_almost_equal(f2, [0, 0.55, 1, 0], 2) p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average="macro") assert_almost_equal(p, 0.5) assert_almost_equal(r, 1.5 / 4) assert_almost_equal(f, 2.5 / (4 * 1.5)) assert_equal(s, None) assert_almost_equal(fbeta_score(y_true, y_pred, beta=2, average="macro"), np.mean(f2)) p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average="micro") assert_almost_equal(p, 2 / 3) assert_almost_equal(r, 0.5) assert_almost_equal(f, 2 / 3 / (2 / 3 + 0.5)) assert_equal(s, None) assert_almost_equal(fbeta_score(y_true, y_pred, beta=2, average="micro"), (1 + 4) * p * r / (4 * p + r)) p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average="weighted") assert_almost_equal(p, 3 / 4) assert_almost_equal(r, 0.5) assert_almost_equal(f, (2 / 1.5 + 1) / 4) assert_equal(s, None) assert_almost_equal(fbeta_score(y_true, y_pred, beta=2, average="weighted"), np.average(f2, weights=support)) p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average="samples") # |h(x_i) inter y_i | = [0, 0, 2] # |y_i| = [1, 1, 2] # |h(x_i)| = [0, 1, 2] assert_almost_equal(p, 1 / 3) assert_almost_equal(r, 1 / 3) assert_almost_equal(f, 1 / 3) assert_equal(s, None) assert_almost_equal(fbeta_score(y_true, y_pred, beta=2, average="samples"), 0.333, 2) def test_precision_recall_f1_no_labels(): y_true = np.zeros((20, 3)) y_pred = np.zeros_like(y_true) # tp = [0, 0, 0] # fn = [0, 0, 0] # fp = [0, 0, 0] # support = [0, 0, 0] # |y_hat_i inter y_i | = [0, 0, 0] # |y_i| = [0, 0, 0] # |y_hat_i| = [0, 0, 0] for beta in [1]: p, r, f, s = assert_warns(UndefinedMetricWarning, precision_recall_fscore_support, y_true, y_pred, average=None, beta=beta) assert_array_almost_equal(p, [0, 0, 0], 2) assert_array_almost_equal(r, [0, 0, 0], 2) assert_array_almost_equal(f, [0, 0, 0], 2) assert_array_almost_equal(s, [0, 0, 0], 2) fbeta = assert_warns(UndefinedMetricWarning, fbeta_score, y_true, y_pred, beta=beta, average=None) assert_array_almost_equal(fbeta, [0, 0, 0], 2) for average in ["macro", "micro", "weighted", "samples"]: p, r, f, s = assert_warns(UndefinedMetricWarning, precision_recall_fscore_support, y_true, y_pred, average=average, beta=beta) assert_almost_equal(p, 0) assert_almost_equal(r, 0) assert_almost_equal(f, 0) assert_equal(s, None) fbeta = assert_warns(UndefinedMetricWarning, fbeta_score, y_true, y_pred, beta=beta, average=average) assert_almost_equal(fbeta, 0) def test_prf_warnings(): # average of per-label scores f, w = precision_recall_fscore_support, UndefinedMetricWarning my_assert = assert_warns_message for average in [None, 'weighted', 'macro']: msg = ('Precision and F-score are ill-defined and ' 'being set to 0.0 in labels with no predicted samples.') my_assert(w, msg, f, [0, 1, 2], [1, 1, 2], average=average) msg = ('Recall and F-score are ill-defined and ' 'being set to 0.0 in labels with no true samples.') my_assert(w, msg, f, [1, 1, 2], [0, 1, 2], average=average) # average of per-sample scores msg = ('Precision and F-score are ill-defined and ' 'being set to 0.0 in samples with no predicted labels.') my_assert(w, msg, f, np.array([[1, 0], [1, 0]]), np.array([[1, 0], [0, 0]]), average='samples') msg = ('Recall and F-score are ill-defined and ' 'being set to 0.0 in samples with no true labels.') my_assert(w, msg, f, np.array([[1, 0], [0, 0]]), np.array([[1, 0], [1, 0]]), average='samples') # single score: micro-average msg = ('Precision and F-score are ill-defined and ' 'being set to 0.0 due to no predicted samples.') my_assert(w, msg, f, np.array([[1, 1], [1, 1]]), np.array([[0, 0], [0, 0]]), average='micro') msg = ('Recall and F-score are ill-defined and ' 'being set to 0.0 due to no true samples.') my_assert(w, msg, f, np.array([[0, 0], [0, 0]]), np.array([[1, 1], [1, 1]]), average='micro') # single postive label msg = ('Precision and F-score are ill-defined and ' 'being set to 0.0 due to no predicted samples.') my_assert(w, msg, f, [1, 1], [-1, -1], average='binary') msg = ('Recall and F-score are ill-defined and ' 'being set to 0.0 due to no true samples.') my_assert(w, msg, f, [-1, -1], [1, 1], average='binary') def test_recall_warnings(): assert_no_warnings(recall_score, np.array([[1, 1], [1, 1]]), np.array([[0, 0], [0, 0]]), average='micro') clean_warning_registry() with warnings.catch_warnings(record=True) as record: warnings.simplefilter('always') recall_score(np.array([[0, 0], [0, 0]]), np.array([[1, 1], [1, 1]]), average='micro') assert_equal(str(record.pop().message), 'Recall is ill-defined and ' 'being set to 0.0 due to no true samples.') def test_precision_warnings(): clean_warning_registry() with warnings.catch_warnings(record=True) as record: warnings.simplefilter('always') precision_score(np.array([[1, 1], [1, 1]]), np.array([[0, 0], [0, 0]]), average='micro') assert_equal(str(record.pop().message), 'Precision is ill-defined and ' 'being set to 0.0 due to no predicted samples.') assert_no_warnings(precision_score, np.array([[0, 0], [0, 0]]), np.array([[1, 1], [1, 1]]), average='micro') def test_fscore_warnings(): clean_warning_registry() with warnings.catch_warnings(record=True) as record: warnings.simplefilter('always') for score in [f1_score, partial(fbeta_score, beta=2)]: score(np.array([[1, 1], [1, 1]]), np.array([[0, 0], [0, 0]]), average='micro') assert_equal(str(record.pop().message), 'F-score is ill-defined and ' 'being set to 0.0 due to no predicted samples.') score(np.array([[0, 0], [0, 0]]), np.array([[1, 1], [1, 1]]), average='micro') assert_equal(str(record.pop().message), 'F-score is ill-defined and ' 'being set to 0.0 due to no true samples.') def test_prf_average_binary_data_non_binary(): # Error if user does not explicitly set non-binary average mode y_true_mc = [1, 2, 3, 3] y_pred_mc = [1, 2, 3, 1] y_true_ind = np.array([[0, 1, 1], [1, 0, 0], [0, 0, 1]]) y_pred_ind = np.array([[0, 1, 0], [1, 0, 0], [0, 0, 1]]) for y_true, y_pred, y_type in [ (y_true_mc, y_pred_mc, 'multiclass'), (y_true_ind, y_pred_ind, 'multilabel-indicator'), ]: for metric in [precision_score, recall_score, f1_score, partial(fbeta_score, beta=2)]: assert_raise_message(ValueError, "Target is %s but average='binary'. Please " "choose another average setting." % y_type, metric, y_true, y_pred) def test__check_targets(): # Check that _check_targets correctly merges target types, squeezes # output and fails if input lengths differ. IND = 'multilabel-indicator' MC = 'multiclass' BIN = 'binary' CNT = 'continuous' MMC = 'multiclass-multioutput' MCN = 'continuous-multioutput' # all of length 3 EXAMPLES = [ (IND, np.array([[0, 1, 1], [1, 0, 0], [0, 0, 1]])), # must not be considered binary (IND, np.array([[0, 1], [1, 0], [1, 1]])), (MC, [2, 3, 1]), (BIN, [0, 1, 1]), (CNT, [0., 1.5, 1.]), (MC, np.array([[2], [3], [1]])), (BIN, np.array([[0], [1], [1]])), (CNT, np.array([[0.], [1.5], [1.]])), (MMC, np.array([[0, 2], [1, 3], [2, 3]])), (MCN, np.array([[0.5, 2.], [1.1, 3.], [2., 3.]])), ] # expected type given input types, or None for error # (types will be tried in either order) EXPECTED = { (IND, IND): IND, (MC, MC): MC, (BIN, BIN): BIN, (MC, IND): None, (BIN, IND): None, (BIN, MC): MC, # Disallowed types (CNT, CNT): None, (MMC, MMC): None, (MCN, MCN): None, (IND, CNT): None, (MC, CNT): None, (BIN, CNT): None, (MMC, CNT): None, (MCN, CNT): None, (IND, MMC): None, (MC, MMC): None, (BIN, MMC): None, (MCN, MMC): None, (IND, MCN): None, (MC, MCN): None, (BIN, MCN): None, } for (type1, y1), (type2, y2) in product(EXAMPLES, repeat=2): try: expected = EXPECTED[type1, type2] except KeyError: expected = EXPECTED[type2, type1] if expected is None: assert_raises(ValueError, _check_targets, y1, y2) if type1 != type2: assert_raise_message( ValueError, "Can't handle mix of {0} and {1}".format(type1, type2), _check_targets, y1, y2) else: if type1 not in (BIN, MC, IND): assert_raise_message(ValueError, "{0} is not supported".format(type1), _check_targets, y1, y2) else: merged_type, y1out, y2out = _check_targets(y1, y2) assert_equal(merged_type, expected) if merged_type.startswith('multilabel'): assert_equal(y1out.format, 'csr') assert_equal(y2out.format, 'csr') else: assert_array_equal(y1out, np.squeeze(y1)) assert_array_equal(y2out, np.squeeze(y2)) assert_raises(ValueError, _check_targets, y1[:-1], y2) # Make sure seq of seq is not supported y1 = [(1, 2,), (0, 2, 3)] y2 = [(2,), (0, 2,)] msg = ('You appear to be using a legacy multi-label data representation. ' 'Sequence of sequences are no longer supported; use a binary array' ' or sparse matrix instead.') assert_raise_message(ValueError, msg, _check_targets, y1, y2) def test__check_targets_multiclass_with_both_y_true_and_y_pred_binary(): # https://github.com/scikit-learn/scikit-learn/issues/8098 y_true = [0, 1] y_pred = [0, -1] assert_equal(_check_targets(y_true, y_pred)[0], 'multiclass') def test_hinge_loss_binary(): y_true = np.array([-1, 1, 1, -1]) pred_decision = np.array([-8.5, 0.5, 1.5, -0.3]) assert_equal(hinge_loss(y_true, pred_decision), 1.2 / 4) y_true = np.array([0, 2, 2, 0]) pred_decision = np.array([-8.5, 0.5, 1.5, -0.3]) assert_equal(hinge_loss(y_true, pred_decision), 1.2 / 4) def test_hinge_loss_multiclass(): pred_decision = np.array([ [+0.36, -0.17, -0.58, -0.99], [-0.54, -0.37, -0.48, -0.58], [-1.45, -0.58, -0.38, -0.17], [-0.54, -0.38, -0.48, -0.58], [-2.36, -0.79, -0.27, +0.24], [-1.45, -0.58, -0.38, -0.17] ]) y_true = np.array([0, 1, 2, 1, 3, 2]) dummy_losses = np.array([ 1 - pred_decision[0][0] + pred_decision[0][1], 1 - pred_decision[1][1] + pred_decision[1][2], 1 - pred_decision[2][2] + pred_decision[2][3], 1 - pred_decision[3][1] + pred_decision[3][2], 1 - pred_decision[4][3] + pred_decision[4][2], 1 - pred_decision[5][2] + pred_decision[5][3] ]) dummy_losses[dummy_losses <= 0] = 0 dummy_hinge_loss = np.mean(dummy_losses) assert_equal(hinge_loss(y_true, pred_decision), dummy_hinge_loss) def test_hinge_loss_multiclass_missing_labels_with_labels_none(): y_true = np.array([0, 1, 2, 2]) pred_decision = np.array([ [+1.27, 0.034, -0.68, -1.40], [-1.45, -0.58, -0.38, -0.17], [-2.36, -0.79, -0.27, +0.24], [-2.36, -0.79, -0.27, +0.24] ]) error_message = ("Please include all labels in y_true " "or pass labels as third argument") assert_raise_message(ValueError, error_message, hinge_loss, y_true, pred_decision) def test_hinge_loss_multiclass_with_missing_labels(): pred_decision = np.array([ [+0.36, -0.17, -0.58, -0.99], [-0.55, -0.38, -0.48, -0.58], [-1.45, -0.58, -0.38, -0.17], [-0.55, -0.38, -0.48, -0.58], [-1.45, -0.58, -0.38, -0.17] ]) y_true = np.array([0, 1, 2, 1, 2]) labels = np.array([0, 1, 2, 3]) dummy_losses = np.array([ 1 - pred_decision[0][0] + pred_decision[0][1], 1 - pred_decision[1][1] + pred_decision[1][2], 1 - pred_decision[2][2] + pred_decision[2][3], 1 - pred_decision[3][1] + pred_decision[3][2], 1 - pred_decision[4][2] + pred_decision[4][3] ]) dummy_losses[dummy_losses <= 0] = 0 dummy_hinge_loss = np.mean(dummy_losses) assert_equal(hinge_loss(y_true, pred_decision, labels=labels), dummy_hinge_loss) def test_hinge_loss_multiclass_invariance_lists(): # Currently, invariance of string and integer labels cannot be tested # in common invariance tests because invariance tests for multiclass # decision functions is not implemented yet. y_true = ['blue', 'green', 'red', 'green', 'white', 'red'] pred_decision = [ [+0.36, -0.17, -0.58, -0.99], [-0.55, -0.38, -0.48, -0.58], [-1.45, -0.58, -0.38, -0.17], [-0.55, -0.38, -0.48, -0.58], [-2.36, -0.79, -0.27, +0.24], [-1.45, -0.58, -0.38, -0.17]] dummy_losses = np.array([ 1 - pred_decision[0][0] + pred_decision[0][1], 1 - pred_decision[1][1] + pred_decision[1][2], 1 - pred_decision[2][2] + pred_decision[2][3], 1 - pred_decision[3][1] + pred_decision[3][2], 1 - pred_decision[4][3] + pred_decision[4][2], 1 - pred_decision[5][2] + pred_decision[5][3] ]) dummy_losses[dummy_losses <= 0] = 0 dummy_hinge_loss = np.mean(dummy_losses) assert_equal(hinge_loss(y_true, pred_decision), dummy_hinge_loss) def test_log_loss(): # binary case with symbolic labels ("no" < "yes") y_true = ["no", "no", "no", "yes", "yes", "yes"] y_pred = np.array([[0.5, 0.5], [0.1, 0.9], [0.01, 0.99], [0.9, 0.1], [0.75, 0.25], [0.001, 0.999]]) loss = log_loss(y_true, y_pred) assert_almost_equal(loss, 1.8817971) # multiclass case; adapted from http://bit.ly/RJJHWA y_true = [1, 0, 2] y_pred = [[0.2, 0.7, 0.1], [0.6, 0.2, 0.2], [0.6, 0.1, 0.3]] loss = log_loss(y_true, y_pred, normalize=True) assert_almost_equal(loss, 0.6904911) # check that we got all the shapes and axes right # by doubling the length of y_true and y_pred y_true *= 2 y_pred *= 2 loss = log_loss(y_true, y_pred, normalize=False) assert_almost_equal(loss, 0.6904911 * 6, decimal=6) # check eps and handling of absolute zero and one probabilities y_pred = np.asarray(y_pred) > .5 loss = log_loss(y_true, y_pred, normalize=True, eps=.1) assert_almost_equal(loss, log_loss(y_true, np.clip(y_pred, .1, .9))) # raise error if number of classes are not equal. y_true = [1, 0, 2] y_pred = [[0.2, 0.7], [0.6, 0.5], [0.4, 0.1]] assert_raises(ValueError, log_loss, y_true, y_pred) # case when y_true is a string array object y_true = ["ham", "spam", "spam", "ham"] y_pred = [[0.2, 0.7], [0.6, 0.5], [0.4, 0.1], [0.7, 0.2]] loss = log_loss(y_true, y_pred) assert_almost_equal(loss, 1.0383217, decimal=6) # test labels option y_true = [2, 2] y_pred = [[0.2, 0.7], [0.6, 0.5]] y_score = np.array([[0.1, 0.9], [0.1, 0.9]]) error_str = ('y_true contains only one label (2). Please provide ' 'the true labels explicitly through the labels argument.') assert_raise_message(ValueError, error_str, log_loss, y_true, y_pred) y_pred = [[0.2, 0.7], [0.6, 0.5], [0.2, 0.3]] error_str = ('Found input variables with inconsistent numbers of samples: ' '[3, 2]') assert_raise_message(ValueError, error_str, log_loss, y_true, y_pred) # works when the labels argument is used true_log_loss = -np.mean(np.log(y_score[:, 1])) calculated_log_loss = log_loss(y_true, y_score, labels=[1, 2]) assert_almost_equal(calculated_log_loss, true_log_loss) # ensure labels work when len(np.unique(y_true)) != y_pred.shape[1] y_true = [1, 2, 2] y_score2 = [[0.2, 0.7, 0.3], [0.6, 0.5, 0.3], [0.3, 0.9, 0.1]] loss = log_loss(y_true, y_score2, labels=[1, 2, 3]) assert_almost_equal(loss, 1.0630345, decimal=6) def test_log_loss_pandas_input(): # case when input is a pandas series and dataframe gh-5715 y_tr = np.array(["ham", "spam", "spam", "ham"]) y_pr = np.array([[0.2, 0.7], [0.6, 0.5], [0.4, 0.1], [0.7, 0.2]]) types = [(MockDataFrame, MockDataFrame)] try: from pandas import Series, DataFrame types.append((Series, DataFrame)) except ImportError: pass for TrueInputType, PredInputType in types: # y_pred dataframe, y_true series y_true, y_pred = TrueInputType(y_tr), PredInputType(y_pr) loss = log_loss(y_true, y_pred) assert_almost_equal(loss, 1.0383217, decimal=6) def test_brier_score_loss(): # Check brier_score_loss function y_true = np.array([0, 1, 1, 0, 1, 1]) y_pred = np.array([0.1, 0.8, 0.9, 0.3, 1., 0.95]) true_score = linalg.norm(y_true - y_pred) ** 2 / len(y_true) assert_almost_equal(brier_score_loss(y_true, y_true), 0.0) assert_almost_equal(brier_score_loss(y_true, y_pred), true_score) assert_almost_equal(brier_score_loss(1. + y_true, y_pred), true_score) assert_almost_equal(brier_score_loss(2 * y_true - 1, y_pred), true_score) assert_raises(ValueError, brier_score_loss, y_true, y_pred[1:]) assert_raises(ValueError, brier_score_loss, y_true, y_pred + 1.) assert_raises(ValueError, brier_score_loss, y_true, y_pred - 1.) # calculate even if only single class in y_true (#6980) assert_almost_equal(brier_score_loss([0], [0.5]), 0.25) assert_almost_equal(brier_score_loss([1], [0.5]), 0.25)
bsd-3-clause
EggInTheShell/TodoCounting
blur_image2.py
1
3563
""" 合計でなく最大値でガウスを合成 """ import numpy as np from PIL import Image, ImageFilter import matplotlib.pyplot as plt import pandas as pd from os.path import join, relpath import glob, os from scipy.ndimage.filters import gaussian_filter import pickle from settings import * from data_utils import * import time def gauss_max(label, sigma): num_label = int(np.sum(label)) # dotの数を出す # print(num_label) if num_label==0: gauss_max = np.zeros(label.shape, dtype=np.float32) else: gauss_ch = np.zeros([label.shape[0], label.shape[1], num_label], dtype=np.float32) # dotの数だけchを持つ画像を作る where_dot = np.where(label==1) # print(where_dot) # (nparray, nparray)のtuple for i in range(num_label): gauss_ch[where_dot[0][i], where_dot[1][i], i] = 1 # print(gauss_ch.shape) for ch in range(num_label): gauss_ch[:, :, ch] = gaussian_filter(gauss_ch[:, :, ch], sigma=sigma, mode='constant') gauss_max = np.max(gauss_ch, axis=2) # print(gauss_max.shape) return gauss_max startTime = time.time() data_folder = DATA_DIR + 'patches_bool/' data_path_list = glob.glob(data_folder+'*traindata_reduced.pkl') # ぼかし方を設定 # todo 各dotのガウスを和算せずに最大値を取る -> peakが消失しない ref openpose sigma = 15 sample = np.zeros([99,99], dtype=np.float32) sample[44,44] = 1 sample = gaussian_filter(sample, sigma=sigma) # plt.imshow(sample) # plt.gray() # plt.show() peak = np.max(sample) # print(peak) for path in data_path_list: id = int(os.path.basename(path)[:-len('traindata_reduced.pkl')]) print('processing: ', id) with open(path, mode='rb') as f: dict = pickle.load(f) slice = 1000 images = dict['image'][:slice] labels = dict['label'][:slice] labels_blurred = np.zeros([slice,labels.shape[1], labels.shape[2], 5], dtype=np.float32) # print('labels shape', labels.shape) for i in range(labels.shape[0]): print(i) label = labels[i].astype(np.float32) # print(np.max(label)) # print(label.shape) blurred = np.zeros_like(label, dtype=np.float32) blurred = gaussian_filter(label[:, :], sigma=15) for ch in range(label.shape[2]): blurred[:,:,ch] = gauss_max(label[:,:,ch], sigma=sigma) # blurred[:,:,ch] = gaussian_filter(label[:,:,ch], sigma=sigma, mode='nearest') # print(np.max(blurred)) labels_blurred[i] = blurred labels_blurred = labels_blurred/peak print('label peak ', np.max(labels_blurred)) labels_blurred = np.minimum(1, labels_blurred) # 可視化 # for i in range(slice): # plt.subplot(2,3,1) # plt.imshow(images[i]) # plt.subplot(2,3,2) # plt.imshow(labels_blurred[i,:,:,0]) # plt.gray() # plt.subplot(2,3,3) # plt.imshow(labels_blurred[i,:,:,1]) # plt.gray() # plt.subplot(2,3,4) # plt.imshow(labels_blurred[i,:,:,2]) # plt.gray() # plt.subplot(2,3,5) # plt.imshow(labels_blurred[i,:,:,3]) # plt.gray() # plt.subplot(2,3,6) # plt.imshow(labels_blurred[i,:,:,4]) # plt.gray() # plt.show() # 保存 dict = {'image': images, 'label': labels_blurred} savepath = DATA_DIR + str(id) + '_train_max_blurred.pkl' with open(savepath, mode='wb') as f: pickle.dump(dict, f) print('saved: ', savepath, time.time()-startTime)
mit
scikit-optimize/scikit-optimize.github.io
0.8/_downloads/365fdab27864494141feaa35987b301b/partial-dependence-plot-2D.py
3
3291
""" =========================== Partial Dependence Plots 2D =========================== Hvass-Labs Dec 2017 Holger Nahrstaedt 2020 .. currentmodule:: skopt Simple example to show the new 2D plots. """ print(__doc__) import numpy as np from math import exp from skopt import gp_minimize from skopt.space import Real, Categorical, Integer from skopt.plots import plot_histogram, plot_objective_2D, plot_objective from skopt.utils import point_asdict np.random.seed(123) import matplotlib.pyplot as plt ############################################################################# dim_learning_rate = Real(name='learning_rate', low=1e-6, high=1e-2, prior='log-uniform') dim_num_dense_layers = Integer(name='num_dense_layers', low=1, high=5) dim_num_dense_nodes = Integer(name='num_dense_nodes', low=5, high=512) dim_activation = Categorical(name='activation', categories=['relu', 'sigmoid']) dimensions = [dim_learning_rate, dim_num_dense_layers, dim_num_dense_nodes, dim_activation] default_parameters = [1e-4, 1, 64, 'relu'] def model_fitness(x): learning_rate, num_dense_layers, num_dense_nodes, activation = x fitness = ((exp(learning_rate) - 1.0) * 1000) ** 2 + \ (num_dense_layers) ** 2 + \ (num_dense_nodes/100) ** 2 fitness *= 1.0 + 0.1 * np.random.rand() if activation == 'sigmoid': fitness += 10 return fitness print(model_fitness(x=default_parameters)) ############################################################################# search_result = gp_minimize(func=model_fitness, dimensions=dimensions, n_calls=30, x0=default_parameters, random_state=123 ) print(search_result.x) print(search_result.fun) ############################################################################# for fitness, x in sorted(zip(search_result.func_vals, search_result.x_iters)): print(fitness, x) ############################################################################# space = search_result.space print(search_result.x_iters) search_space = {name: space[name][1] for name in space.dimension_names} print(point_asdict(search_space, default_parameters)) ############################################################################# print("Plotting now ...") _ = plot_histogram(result=search_result, dimension_identifier='learning_rate', bins=20) plt.show() ############################################################################# _ = plot_objective_2D(result=search_result, dimension_identifier1='learning_rate', dimension_identifier2='num_dense_nodes') plt.show() ############################################################################# _ = plot_objective_2D(result=search_result, dimension_identifier1='num_dense_layers', dimension_identifier2='num_dense_nodes') plt.show() ############################################################################# _ = plot_objective(result=search_result, plot_dims=['num_dense_layers', 'num_dense_nodes']) plt.show()
bsd-3-clause
ghislainp/iris
docs/iris/example_code/General/anomaly_log_colouring.py
12
4380
""" Colouring anomaly data with logarithmic scaling =============================================== In this example, we need to plot anomaly data where the values have a "logarithmic" significance -- i.e. we want to give approximately equal ranges of colour between data values of, say, 1 and 10 as between 10 and 100. As the data range also contains zero, that obviously does not suit a simple logarithmic interpretation. However, values of less than a certain absolute magnitude may be considered "not significant", so we put these into a separate "zero band" which is plotted in white. To do this, we create a custom value mapping function (normalization) using the matplotlib Norm class `matplotlib.colours.SymLogNorm <http://matplotlib.org/api/colors_api.html#matplotlib.colors.SymLogNorm>`_. We use this to make a cell-filled pseudocolour plot with a colorbar. NOTE: By "pseudocolour", we mean that each data point is drawn as a "cell" region on the plot, coloured according to its data value. This is provided in Iris by the functions :meth:`iris.plot.pcolor` and :meth:`iris.plot.pcolormesh`, which call the underlying matplotlib functions of the same names (i.e. `matplotlib.pyplot.pcolor <http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.pcolor>`_ and `matplotlib.pyplot.pcolormesh <http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.pcolormesh>`_). See also: http://en.wikipedia.org/wiki/False_color#Pseudocolor. """ import cartopy.crs as ccrs import iris import iris.coord_categorisation import iris.plot as iplt import matplotlib.pyplot as plt import matplotlib.colors as mcols def main(): # Load a sample air temperatures sequence. file_path = iris.sample_data_path('E1_north_america.nc') temperatures = iris.load_cube(file_path) # Create a year-number coordinate from the time information. iris.coord_categorisation.add_year(temperatures, 'time') # Create a sample anomaly field for one chosen year, by extracting that # year and subtracting the time mean. sample_year = 1982 year_temperature = temperatures.extract(iris.Constraint(year=sample_year)) time_mean = temperatures.collapsed('time', iris.analysis.MEAN) anomaly = year_temperature - time_mean # Construct a plot title string explaining which years are involved. years = temperatures.coord('year').points plot_title = 'Temperature anomaly' plot_title += '\n{} differences from {}-{} average.'.format( sample_year, years[0], years[-1]) # Define scaling levels for the logarithmic colouring. minimum_log_level = 0.1 maximum_scale_level = 3.0 # Use a standard colour map which varies blue-white-red. # For suitable options, see the 'Diverging colormaps' section in: # http://matplotlib.org/examples/color/colormaps_reference.html anom_cmap = 'bwr' # Create a 'logarithmic' data normalization. anom_norm = mcols.SymLogNorm(linthresh=minimum_log_level, linscale=0, vmin=-maximum_scale_level, vmax=maximum_scale_level) # Setting "linthresh=minimum_log_level" makes its non-logarithmic # data range equal to our 'zero band'. # Setting "linscale=0" maps the whole zero band to the middle colour value # (i.e. 0.5), which is the neutral point of a "diverging" style colormap. # Create an Axes, specifying the map projection. plt.axes(projection=ccrs.LambertConformal()) # Make a pseudocolour plot using this colour scheme. mesh = iplt.pcolormesh(anomaly, cmap=anom_cmap, norm=anom_norm) # Add a colourbar, with extensions to show handling of out-of-range values. bar = plt.colorbar(mesh, orientation='horizontal', extend='both') # Set some suitable fixed "logarithmic" colourbar tick positions. tick_levels = [-3, -1, -0.3, 0.0, 0.3, 1, 3] bar.set_ticks(tick_levels) # Modify the tick labels so that the centre one shows "+/-<minumum-level>". tick_levels[3] = r'$\pm${:g}'.format(minimum_log_level) bar.set_ticklabels(tick_levels) # Label the colourbar to show the units. bar.set_label('[{}, log scale]'.format(anomaly.units)) # Add coastlines and a title. plt.gca().coastlines() plt.title(plot_title) # Display the result. iplt.show() if __name__ == '__main__': main()
gpl-3.0
f3r/scikit-learn
sklearn/cluster/__init__.py
364
1228
""" The :mod:`sklearn.cluster` module gathers popular unsupervised clustering algorithms. """ from .spectral import spectral_clustering, SpectralClustering from .mean_shift_ import (mean_shift, MeanShift, estimate_bandwidth, get_bin_seeds) from .affinity_propagation_ import affinity_propagation, AffinityPropagation from .hierarchical import (ward_tree, AgglomerativeClustering, linkage_tree, FeatureAgglomeration) from .k_means_ import k_means, KMeans, MiniBatchKMeans from .dbscan_ import dbscan, DBSCAN from .bicluster import SpectralBiclustering, SpectralCoclustering from .birch import Birch __all__ = ['AffinityPropagation', 'AgglomerativeClustering', 'Birch', 'DBSCAN', 'KMeans', 'FeatureAgglomeration', 'MeanShift', 'MiniBatchKMeans', 'SpectralClustering', 'affinity_propagation', 'dbscan', 'estimate_bandwidth', 'get_bin_seeds', 'k_means', 'linkage_tree', 'mean_shift', 'spectral_clustering', 'ward_tree', 'SpectralBiclustering', 'SpectralCoclustering']
bsd-3-clause
nesterione/scikit-learn
sklearn/neighbors/classification.py
106
13987
"""Nearest Neighbor Classification""" # Authors: Jake Vanderplas <[email protected]> # Fabian Pedregosa <[email protected]> # Alexandre Gramfort <[email protected]> # Sparseness support by Lars Buitinck <[email protected]> # Multi-output support by Arnaud Joly <[email protected]> # # License: BSD 3 clause (C) INRIA, University of Amsterdam import numpy as np from scipy import stats from ..utils.extmath import weighted_mode from .base import \ _check_weights, _get_weights, \ NeighborsBase, KNeighborsMixin,\ RadiusNeighborsMixin, SupervisedIntegerMixin from ..base import ClassifierMixin from ..utils import check_array class KNeighborsClassifier(NeighborsBase, KNeighborsMixin, SupervisedIntegerMixin, ClassifierMixin): """Classifier implementing the k-nearest neighbors vote. Read more in the :ref:`User Guide <classification>`. Parameters ---------- n_neighbors : int, optional (default = 5) Number of neighbors to use by default for :meth:`k_neighbors` queries. weights : str or callable weight function used in prediction. Possible values: - 'uniform' : uniform weights. All points in each neighborhood are weighted equally. - 'distance' : weight points by the inverse of their distance. in this case, closer neighbors of a query point will have a greater influence than neighbors which are further away. - [callable] : a user-defined function which accepts an array of distances, and returns an array of the same shape containing the weights. Uniform weights are used by default. algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional Algorithm used to compute the nearest neighbors: - 'ball_tree' will use :class:`BallTree` - 'kd_tree' will use :class:`KDTree` - 'brute' will use a brute-force search. - 'auto' will attempt to decide the most appropriate algorithm based on the values passed to :meth:`fit` method. Note: fitting on sparse input will override the setting of this parameter, using brute force. leaf_size : int, optional (default = 30) Leaf size passed to BallTree or KDTree. This can affect the speed of the construction and query, as well as the memory required to store the tree. The optimal value depends on the nature of the problem. metric : string or DistanceMetric object (default = 'minkowski') the distance metric to use for the tree. The default metric is minkowski, and with p=2 is equivalent to the standard Euclidean metric. See the documentation of the DistanceMetric class for a list of available metrics. p : integer, optional (default = 2) Power parameter for the Minkowski metric. When p = 1, this is equivalent to using manhattan_distance (l1), and euclidean_distance (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used. metric_params: dict, optional (default = None) additional keyword arguments for the metric function. Examples -------- >>> X = [[0], [1], [2], [3]] >>> y = [0, 0, 1, 1] >>> from sklearn.neighbors import KNeighborsClassifier >>> neigh = KNeighborsClassifier(n_neighbors=3) >>> neigh.fit(X, y) # doctest: +ELLIPSIS KNeighborsClassifier(...) >>> print(neigh.predict([[1.1]])) [0] >>> print(neigh.predict_proba([[0.9]])) [[ 0.66666667 0.33333333]] See also -------- RadiusNeighborsClassifier KNeighborsRegressor RadiusNeighborsRegressor NearestNeighbors Notes ----- See :ref:`Nearest Neighbors <neighbors>` in the online documentation for a discussion of the choice of ``algorithm`` and ``leaf_size``. .. warning:: Regarding the Nearest Neighbors algorithms, if it is found that two neighbors, neighbor `k+1` and `k`, have identical distances but but different labels, the results will depend on the ordering of the training data. http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm """ def __init__(self, n_neighbors=5, weights='uniform', algorithm='auto', leaf_size=30, p=2, metric='minkowski', metric_params=None, **kwargs): self._init_params(n_neighbors=n_neighbors, algorithm=algorithm, leaf_size=leaf_size, metric=metric, p=p, metric_params=metric_params, **kwargs) self.weights = _check_weights(weights) def predict(self, X): """Predict the class labels for the provided data Parameters ---------- X : array of shape [n_samples, n_features] A 2-D array representing the test points. Returns ------- y : array of shape [n_samples] or [n_samples, n_outputs] Class labels for each data sample. """ X = check_array(X, accept_sparse='csr') neigh_dist, neigh_ind = self.kneighbors(X) classes_ = self.classes_ _y = self._y if not self.outputs_2d_: _y = self._y.reshape((-1, 1)) classes_ = [self.classes_] n_outputs = len(classes_) n_samples = X.shape[0] weights = _get_weights(neigh_dist, self.weights) y_pred = np.empty((n_samples, n_outputs), dtype=classes_[0].dtype) for k, classes_k in enumerate(classes_): if weights is None: mode, _ = stats.mode(_y[neigh_ind, k], axis=1) else: mode, _ = weighted_mode(_y[neigh_ind, k], weights, axis=1) mode = np.asarray(mode.ravel(), dtype=np.intp) y_pred[:, k] = classes_k.take(mode) if not self.outputs_2d_: y_pred = y_pred.ravel() return y_pred def predict_proba(self, X): """Return probability estimates for the test data X. Parameters ---------- X : array, shape = (n_samples, n_features) A 2-D array representing the test points. Returns ------- p : array of shape = [n_samples, n_classes], or a list of n_outputs of such arrays if n_outputs > 1. The class probabilities of the input samples. Classes are ordered by lexicographic order. """ X = check_array(X, accept_sparse='csr') neigh_dist, neigh_ind = self.kneighbors(X) classes_ = self.classes_ _y = self._y if not self.outputs_2d_: _y = self._y.reshape((-1, 1)) classes_ = [self.classes_] n_samples = X.shape[0] weights = _get_weights(neigh_dist, self.weights) if weights is None: weights = np.ones_like(neigh_ind) all_rows = np.arange(X.shape[0]) probabilities = [] for k, classes_k in enumerate(classes_): pred_labels = _y[:, k][neigh_ind] proba_k = np.zeros((n_samples, classes_k.size)) # a simple ':' index doesn't work right for i, idx in enumerate(pred_labels.T): # loop is O(n_neighbors) proba_k[all_rows, idx] += weights[:, i] # normalize 'votes' into real [0,1] probabilities normalizer = proba_k.sum(axis=1)[:, np.newaxis] normalizer[normalizer == 0.0] = 1.0 proba_k /= normalizer probabilities.append(proba_k) if not self.outputs_2d_: probabilities = probabilities[0] return probabilities class RadiusNeighborsClassifier(NeighborsBase, RadiusNeighborsMixin, SupervisedIntegerMixin, ClassifierMixin): """Classifier implementing a vote among neighbors within a given radius Read more in the :ref:`User Guide <classification>`. Parameters ---------- radius : float, optional (default = 1.0) Range of parameter space to use by default for :meth`radius_neighbors` queries. weights : str or callable weight function used in prediction. Possible values: - 'uniform' : uniform weights. All points in each neighborhood are weighted equally. - 'distance' : weight points by the inverse of their distance. in this case, closer neighbors of a query point will have a greater influence than neighbors which are further away. - [callable] : a user-defined function which accepts an array of distances, and returns an array of the same shape containing the weights. Uniform weights are used by default. algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional Algorithm used to compute the nearest neighbors: - 'ball_tree' will use :class:`BallTree` - 'kd_tree' will use :class:`KDtree` - 'brute' will use a brute-force search. - 'auto' will attempt to decide the most appropriate algorithm based on the values passed to :meth:`fit` method. Note: fitting on sparse input will override the setting of this parameter, using brute force. leaf_size : int, optional (default = 30) Leaf size passed to BallTree or KDTree. This can affect the speed of the construction and query, as well as the memory required to store the tree. The optimal value depends on the nature of the problem. metric : string or DistanceMetric object (default='minkowski') the distance metric to use for the tree. The default metric is minkowski, and with p=2 is equivalent to the standard Euclidean metric. See the documentation of the DistanceMetric class for a list of available metrics. p : integer, optional (default = 2) Power parameter for the Minkowski metric. When p = 1, this is equivalent to using manhattan_distance (l1), and euclidean_distance (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used. outlier_label : int, optional (default = None) Label, which is given for outlier samples (samples with no neighbors on given radius). If set to None, ValueError is raised, when outlier is detected. metric_params: dict, optional (default = None) additional keyword arguments for the metric function. Examples -------- >>> X = [[0], [1], [2], [3]] >>> y = [0, 0, 1, 1] >>> from sklearn.neighbors import RadiusNeighborsClassifier >>> neigh = RadiusNeighborsClassifier(radius=1.0) >>> neigh.fit(X, y) # doctest: +ELLIPSIS RadiusNeighborsClassifier(...) >>> print(neigh.predict([[1.5]])) [0] See also -------- KNeighborsClassifier RadiusNeighborsRegressor KNeighborsRegressor NearestNeighbors Notes ----- See :ref:`Nearest Neighbors <neighbors>` in the online documentation for a discussion of the choice of ``algorithm`` and ``leaf_size``. http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm """ def __init__(self, radius=1.0, weights='uniform', algorithm='auto', leaf_size=30, p=2, metric='minkowski', outlier_label=None, metric_params=None, **kwargs): self._init_params(radius=radius, algorithm=algorithm, leaf_size=leaf_size, metric=metric, p=p, metric_params=metric_params, **kwargs) self.weights = _check_weights(weights) self.outlier_label = outlier_label def predict(self, X): """Predict the class labels for the provided data Parameters ---------- X : array of shape [n_samples, n_features] A 2-D array representing the test points. Returns ------- y : array of shape [n_samples] or [n_samples, n_outputs] Class labels for each data sample. """ X = check_array(X, accept_sparse='csr') n_samples = X.shape[0] neigh_dist, neigh_ind = self.radius_neighbors(X) inliers = [i for i, nind in enumerate(neigh_ind) if len(nind) != 0] outliers = [i for i, nind in enumerate(neigh_ind) if len(nind) == 0] classes_ = self.classes_ _y = self._y if not self.outputs_2d_: _y = self._y.reshape((-1, 1)) classes_ = [self.classes_] n_outputs = len(classes_) if self.outlier_label is not None: neigh_dist[outliers] = 1e-6 elif outliers: raise ValueError('No neighbors found for test samples %r, ' 'you can try using larger radius, ' 'give a label for outliers, ' 'or consider removing them from your dataset.' % outliers) weights = _get_weights(neigh_dist, self.weights) y_pred = np.empty((n_samples, n_outputs), dtype=classes_[0].dtype) for k, classes_k in enumerate(classes_): pred_labels = np.array([_y[ind, k] for ind in neigh_ind], dtype=object) if weights is None: mode = np.array([stats.mode(pl)[0] for pl in pred_labels[inliers]], dtype=np.int) else: mode = np.array([weighted_mode(pl, w)[0] for (pl, w) in zip(pred_labels[inliers], weights)], dtype=np.int) mode = mode.ravel() y_pred[inliers, k] = classes_k.take(mode) if outliers: y_pred[outliers, :] = self.outlier_label if not self.outputs_2d_: y_pred = y_pred.ravel() return y_pred
bsd-3-clause
matthieudumont/dipy
doc/examples/reconst_dti.py
4
9303
""" ============================================================ Reconstruction of the diffusion signal with the Tensor model ============================================================ The diffusion tensor model is a model that describes the diffusion within a voxel. First proposed by Basser and colleagues [Basser1994]_, it has been very influential in demonstrating the utility of diffusion MRI in characterizing the micro-structure of white matter tissue and of the biophysical properties of tissue, inferred from local diffusion properties and it is still very commonly used. The diffusion tensor models the diffusion signal as: .. math:: \frac{S(\mathbf{g}, b)}{S_0} = e^{-b\mathbf{g}^T \mathbf{D} \mathbf{g}} Where $\mathbf{g}$ is a unit vector in 3 space indicating the direction of measurement and b are the parameters of measurement, such as the strength and duration of diffusion-weighting gradient. $S(\mathbf{g}, b)$ is the diffusion-weighted signal measured and $S_0$ is the signal conducted in a measurement with no diffusion weighting. $\mathbf{D}$ is a positive-definite quadratic form, which contains six free parameters to be fit. These six parameters are: .. math:: \mathbf{D} = \begin{pmatrix} D_{xx} & D_{xy} & D_{xz} \\ D_{yx} & D_{yy} & D_{yz} \\ D_{zx} & D_{zy} & D_{zz} \\ \end{pmatrix} This matrix is a variance/covariance matrix of the diffusivity along the three spatial dimensions. Note that we can assume that diffusivity has antipodal symmetry, so elements across the diagonal are equal. For example: $D_{xy} = D_{yx}$. This is why there are only 6 free parameters to estimate here. In the following example we show how to reconstruct your diffusion datasets using a single tensor model. First import the necessary modules: ``numpy`` is for numerical computation """ import numpy as np """ ``nibabel`` is for loading imaging datasets """ import nibabel as nib """ ``dipy.reconst`` is for the reconstruction algorithms which we use to create voxel models from the raw data. """ import dipy.reconst.dti as dti """ ``dipy.data`` is used for small datasets that we use in tests and examples. """ from dipy.data import fetch_stanford_hardi """ Fetch will download the raw dMRI dataset of a single subject. The size of the dataset is 87 MBytes. You only need to fetch once. """ fetch_stanford_hardi() """ Next, we read the saved dataset """ from dipy.data import read_stanford_hardi img, gtab = read_stanford_hardi() """ img contains a nibabel Nifti1Image object (with the data) and gtab contains a GradientTable object (information about the gradients e.g. b-values and b-vectors). """ data = img.get_data() print('data.shape (%d, %d, %d, %d)' % data.shape) """ data.shape ``(81, 106, 76, 160)`` First of all, we mask and crop the data. This is a quick way to avoid calculating Tensors on the background of the image. This is done using dipy's mask module. """ from dipy.segment.mask import median_otsu maskdata, mask = median_otsu(data, 3, 1, True, vol_idx=range(10, 50), dilate=2) print('maskdata.shape (%d, %d, %d, %d)' % maskdata.shape) """ maskdata.shape ``(72, 87, 59, 160)`` Now that we have prepared the datasets we can go forward with the voxel reconstruction. First, we instantiate the Tensor model in the following way. """ tenmodel = dti.TensorModel(gtab) """ Fitting the data is very simple. We just need to call the fit method of the TensorModel in the following way: """ tenfit = tenmodel.fit(maskdata) """ The fit method creates a TensorFit object which contains the fitting parameters and other attributes of the model. For example we can generate fractional anisotropy (FA) from the eigen-values of the tensor. FA is used to characterize the degree to which the distribution of diffusion in a voxel is directional. That is, whether there is relatively unrestricted diffusion in one particular direction. Mathematically, FA is defined as the normalized variance of the eigen-values of the tensor: .. math:: FA = \sqrt{\frac{1}{2}\frac{(\lambda_1-\lambda_2)^2+(\lambda_1- \lambda_3)^2+(\lambda_2-\lambda_3)^2}{\lambda_1^2+ \lambda_2^2+\lambda_3^2}} Note that FA should be interpreted carefully. It may be an indication of the density of packing of fibers in a voxel, and the amount of myelin wrapping these axons, but it is not always a measure of "tissue integrity". For example, FA may decrease in locations in which there is fanning of white matter fibers, or where more than one population of white matter fibers crosses. """ print('Computing anisotropy measures (FA, MD, RGB)') from dipy.reconst.dti import fractional_anisotropy, color_fa, lower_triangular FA = fractional_anisotropy(tenfit.evals) """ In the background of the image the fitting will not be accurate there is no signal and possibly we will find FA values with nans (not a number). We can easily remove these in the following way. """ FA[np.isnan(FA)] = 0 """ Saving the FA images is very easy using nibabel. We need the FA volume and the affine matrix which transform the image's coordinates to the world coordinates. Here, we choose to save the FA in float32. """ fa_img = nib.Nifti1Image(FA.astype(np.float32), img.get_affine()) nib.save(fa_img, 'tensor_fa.nii.gz') """ You can now see the result with any nifti viewer or check it slice by slice using matplotlib_'s imshow. In the same way you can save the eigen values, the eigen vectors or any other properties of the Tensor. """ evecs_img = nib.Nifti1Image(tenfit.evecs.astype(np.float32), img.get_affine()) nib.save(evecs_img, 'tensor_evecs.nii.gz') """ Other tensor statistics can be calculated from the `tenfit` object. For example, a commonly calculated statistic is the mean diffusivity (MD). This is simply the mean of the eigenvalues of the tensor. Since FA is a normalized measure of variance and MD is the mean, they are often used as complimentary measures. In `dipy`, there are two equivalent ways to calculate the mean diffusivity. One is by calling the `mean_diffusivity` module function on the eigen-values of the TensorFit class instance: """ MD1 = dti.mean_diffusivity(tenfit.evals) nib.save(nib.Nifti1Image(MD1.astype(np.float32), img.get_affine()), 'tensors_md.nii.gz') """ The other is to call the TensorFit class method: """ MD2 = tenfit.md """ Obviously, the quantities are identical. We can also compute the colored FA or RGB-map [Pajevic1999]_. First, we make sure that the FA is scaled between 0 and 1, we compute the RGB map and save it. """ FA = np.clip(FA, 0, 1) RGB = color_fa(FA, tenfit.evecs) nib.save(nib.Nifti1Image(np.array(255 * RGB, 'uint8'), img.get_affine()), 'tensor_rgb.nii.gz') """ Let's try to visualize the tensor ellipsoids of a small rectangular area in an axial slice of the splenium of the corpus callosum (CC). """ print('Computing tensor ellipsoids in a part of the splenium of the CC') from dipy.data import get_sphere sphere = get_sphere('symmetric724') from dipy.viz import fvtk ren = fvtk.ren() evals = tenfit.evals[13:43, 44:74, 28:29] evecs = tenfit.evecs[13:43, 44:74, 28:29] """ We can color the ellipsoids using the ``color_fa`` values that we calculated above. In this example we additionally normalize the values to increase the contrast. """ cfa = RGB[13:43, 44:74, 28:29] cfa /= cfa.max() fvtk.add(ren, fvtk.tensor(evals, evecs, cfa, sphere)) print('Saving illustration as tensor_ellipsoids.png') fvtk.record(ren, n_frames=1, out_path='tensor_ellipsoids.png', size=(600, 600)) """ .. figure:: tensor_ellipsoids.png :align: center **Tensor Ellipsoids**. """ fvtk.clear(ren) """ Finally, we can visualize the tensor orientation distribution functions for the same area as we did with the ellipsoids. """ tensor_odfs = tenmodel.fit(data[20:50, 55:85, 38:39]).odf(sphere) fvtk.add(ren, fvtk.sphere_funcs(tensor_odfs, sphere, colormap=None)) #fvtk.show(r) print('Saving illustration as tensor_odfs.png') fvtk.record(ren, n_frames=1, out_path='tensor_odfs.png', size=(600, 600)) """ .. figure:: tensor_odfs.png :align: center **Tensor ODFs**. Note that while the tensor model is an accurate and reliable model of the diffusion signal in the white matter, it has the drawback that it only has one principal diffusion direction. Therefore, in locations in the brain that contain multiple fiber populations crossing each other, the tensor model may indicate that the principal diffusion direction is intermediate to these directions. Therefore, using the principal diffusion direction for tracking in these locations may be misleading and may lead to errors in defining the tracks. Fortunately, other reconstruction methods can be used to represent the diffusion and fiber orientations in those locations. These are presented in other examples. .. [Basser1994] Basser PJ, Mattielo J, LeBihan (1994). MR diffusion tensor spectroscopy and imaging. .. [Pajevic1999] Pajevic S, Pierpaoli (1999). Color schemes to represent the orientation of anisotropic tissues from diffusion tensor data: application to white matter fiber tract mapping in the human brain. .. include:: ../links_names.inc """
bsd-3-clause
DailyActie/Surrogate-Model
01-codes/scikit-learn-master/sklearn/feature_extraction/dict_vectorizer.py
1
12559
# Authors: Lars Buitinck # Dan Blanchard <[email protected]> # License: BSD 3 clause from array import array from collections import Mapping from operator import itemgetter import numpy as np import scipy.sparse as sp from ..externals.six.moves import xrange from ..base import BaseEstimator, TransformerMixin from ..externals import six from ..utils import check_array, tosequence from ..utils.fixes import frombuffer_empty def _tosequence(X): """Turn X into a sequence or ndarray, avoiding a copy if possible.""" if isinstance(X, Mapping): # single sample return [X] else: return tosequence(X) class DictVectorizer(BaseEstimator, TransformerMixin): """Transforms lists of feature-value mappings to vectors. This transformer turns lists of mappings (dict-like objects) of feature names to feature values into Numpy arrays or scipy.sparse matrices for use with scikit-learn estimators. When feature values are strings, this transformer will do a binary one-hot (aka one-of-K) coding: one boolean-valued feature is constructed for each of the possible string values that the feature can take on. For instance, a feature "f" that can take on the values "ham" and "spam" will become two features in the output, one signifying "f=ham", the other "f=spam". However, note that this transformer will only do a binary one-hot encoding when feature values are of type string. If categorical features are represented as numeric values such as int, the DictVectorizer can be followed by OneHotEncoder to complete binary one-hot encoding. Features that do not occur in a sample (mapping) will have a zero value in the resulting array/matrix. Read more in the :ref:`User Guide <dict_feature_extraction>`. Parameters ---------- dtype : callable, optional The type of feature values. Passed to Numpy array/scipy.sparse matrix constructors as the dtype argument. separator: string, optional Separator string used when constructing new features for one-hot coding. sparse: boolean, optional. Whether transform should produce scipy.sparse matrices. True by default. sort: boolean, optional. Whether ``feature_names_`` and ``vocabulary_`` should be sorted when fitting. True by default. Attributes ---------- vocabulary_ : dict A dictionary mapping feature names to feature indices. feature_names_ : list A list of length n_features containing the feature names (e.g., "f=ham" and "f=spam"). Examples -------- >>> from sklearn.feature_extraction import DictVectorizer >>> v = DictVectorizer(sparse=False) >>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}] >>> X = v.fit_transform(D) >>> X array([[ 2., 0., 1.], [ 0., 1., 3.]]) >>> v.inverse_transform(X) == \ [{'bar': 2.0, 'foo': 1.0}, {'baz': 1.0, 'foo': 3.0}] True >>> v.transform({'foo': 4, 'unseen_feature': 3}) array([[ 0., 0., 4.]]) See also -------- FeatureHasher : performs vectorization using only a hash function. sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features encoded as columns of integers. """ def __init__(self, dtype=np.float64, separator="=", sparse=True, sort=True): self.dtype = dtype self.separator = separator self.sparse = sparse self.sort = sort def fit(self, X, y=None): """Learn a list of feature name -> indices mappings. Parameters ---------- X : Mapping or iterable over Mappings Dict(s) or Mapping(s) from feature names (arbitrary Python objects) to feature values (strings or convertible to dtype). y : (ignored) Returns ------- self """ feature_names = [] vocab = {} for x in X: for f, v in six.iteritems(x): if isinstance(v, six.string_types): f = "%s%s%s" % (f, self.separator, v) if f not in vocab: feature_names.append(f) vocab[f] = len(vocab) if self.sort: feature_names.sort() vocab = dict((f, i) for i, f in enumerate(feature_names)) self.feature_names_ = feature_names self.vocabulary_ = vocab return self def _transform(self, X, fitting): # Sanity check: Python's array has no way of explicitly requesting the # signed 32-bit integers that scipy.sparse needs, so we use the next # best thing: typecode "i" (int). However, if that gives larger or # smaller integers than 32-bit ones, np.frombuffer screws up. assert array("i").itemsize == 4, ( "sizeof(int) != 4 on your platform; please report this at" " https://github.com/scikit-learn/scikit-learn/issues and" " include the output from platform.platform() in your bug report") dtype = self.dtype if fitting: feature_names = [] vocab = {} else: feature_names = self.feature_names_ vocab = self.vocabulary_ # Process everything as sparse regardless of setting X = [X] if isinstance(X, Mapping) else X indices = array("i") indptr = array("i", [0]) # XXX we could change values to an array.array as well, but it # would require (heuristic) conversion of dtype to typecode... values = [] # collect all the possible feature names and build sparse matrix at # same time for x in X: for f, v in six.iteritems(x): if isinstance(v, six.string_types): f = "%s%s%s" % (f, self.separator, v) v = 1 if f in vocab: indices.append(vocab[f]) values.append(dtype(v)) else: if fitting: feature_names.append(f) vocab[f] = len(vocab) indices.append(vocab[f]) values.append(dtype(v)) indptr.append(len(indices)) if len(indptr) == 1: raise ValueError("Sample sequence X is empty.") indices = frombuffer_empty(indices, dtype=np.intc) indptr = np.frombuffer(indptr, dtype=np.intc) shape = (len(indptr) - 1, len(vocab)) result_matrix = sp.csr_matrix((values, indices, indptr), shape=shape, dtype=dtype) # Sort everything if asked if fitting and self.sort: feature_names.sort() map_index = np.empty(len(feature_names), dtype=np.int32) for new_val, f in enumerate(feature_names): map_index[new_val] = vocab[f] vocab[f] = new_val result_matrix = result_matrix[:, map_index] if self.sparse: result_matrix.sort_indices() else: result_matrix = result_matrix.toarray() if fitting: self.feature_names_ = feature_names self.vocabulary_ = vocab return result_matrix def fit_transform(self, X, y=None): """Learn a list of feature name -> indices mappings and transform X. Like fit(X) followed by transform(X), but does not require materializing X in memory. Parameters ---------- X : Mapping or iterable over Mappings Dict(s) or Mapping(s) from feature names (arbitrary Python objects) to feature values (strings or convertible to dtype). y : (ignored) Returns ------- Xa : {array, sparse matrix} Feature vectors; always 2-d. """ return self._transform(X, fitting=True) def inverse_transform(self, X, dict_type=dict): """Transform array or sparse matrix X back to feature mappings. X must have been produced by this DictVectorizer's transform or fit_transform method; it may only have passed through transformers that preserve the number of features and their order. In the case of one-hot/one-of-K coding, the constructed feature names and values are returned rather than the original ones. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Sample matrix. dict_type : callable, optional Constructor for feature mappings. Must conform to the collections.Mapping API. Returns ------- D : list of dict_type objects, length = n_samples Feature mappings for the samples in X. """ # COO matrix is not subscriptable X = check_array(X, accept_sparse=['csr', 'csc']) n_samples = X.shape[0] names = self.feature_names_ dicts = [dict_type() for _ in xrange(n_samples)] if sp.issparse(X): for i, j in zip(*X.nonzero()): dicts[i][names[j]] = X[i, j] else: for i, d in enumerate(dicts): for j, v in enumerate(X[i, :]): if v != 0: d[names[j]] = X[i, j] return dicts def transform(self, X, y=None): """Transform feature->value dicts to array or sparse matrix. Named features not encountered during fit or fit_transform will be silently ignored. Parameters ---------- X : Mapping or iterable over Mappings, length = n_samples Dict(s) or Mapping(s) from feature names (arbitrary Python objects) to feature values (strings or convertible to dtype). y : (ignored) Returns ------- Xa : {array, sparse matrix} Feature vectors; always 2-d. """ if self.sparse: return self._transform(X, fitting=False) else: dtype = self.dtype vocab = self.vocabulary_ X = _tosequence(X) Xa = np.zeros((len(X), len(vocab)), dtype=dtype) for i, x in enumerate(X): for f, v in six.iteritems(x): if isinstance(v, six.string_types): f = "%s%s%s" % (f, self.separator, v) v = 1 try: Xa[i, vocab[f]] = dtype(v) except KeyError: pass return Xa def get_feature_names(self): """Returns a list of feature names, ordered by their indices. If one-of-K coding is applied to categorical features, this will include the constructed feature names but not the original ones. """ return self.feature_names_ def restrict(self, support, indices=False): """Restrict the features to those in support using feature selection. This function modifies the estimator in-place. Parameters ---------- support : array-like Boolean mask or list of indices (as returned by the get_support member of feature selectors). indices : boolean, optional Whether support is a list of indices. Returns ------- self Examples -------- >>> from sklearn.feature_extraction import DictVectorizer >>> from sklearn.feature_selection import SelectKBest, chi2 >>> v = DictVectorizer() >>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}] >>> X = v.fit_transform(D) >>> support = SelectKBest(chi2, k=2).fit(X, [0, 1]) >>> v.get_feature_names() ['bar', 'baz', 'foo'] >>> v.restrict(support.get_support()) # doctest: +ELLIPSIS DictVectorizer(dtype=..., separator='=', sort=True, sparse=True) >>> v.get_feature_names() ['bar', 'foo'] """ if not indices: support = np.where(support)[0] names = self.feature_names_ new_vocab = {} for i in support: new_vocab[names[i]] = len(new_vocab) self.vocabulary_ = new_vocab self.feature_names_ = [f for f, i in sorted(six.iteritems(new_vocab), key=itemgetter(1))] return self
mit
cjohnson318/geostatsmodels
notebooks/VariogramAnalysis.py
1
4048
# -*- coding: utf-8 -*- # <nbformat>3.0</nbformat> # <codecell> %matplotlib inline # <markdowncell> # This notebook is an effort to replicate the lessons found here: # http://people.ku.edu/~gbohling/cpe940/Variograms.pdf # # We'll do all of our imports here at the top. # # *Note: This is a work in progress!* # <codecell> import numpy as np from pandas import DataFrame, Series from geostatsmodels import utilities, kriging, variograms, model, geoplot import matplotlib.pyplot as plt from scipy.stats import norm import urllib2 import os.path import zipfile import StringIO # <markdowncell> # We're going to fetch the data file we need for this exercise from the following URL: # # http://people.ku.edu/~gbohling/geostats/WGTutorial.zip # # Subsequent runs of this Notebook should use a local copy, saved in the current directory. # <codecell> clusterfile = 'ZoneA.dat' if not os.path.isfile(clusterfile): fh = urllib2.urlopen('http://people.ku.edu/~gbohling/geostats/WGTutorial.zip') data = fh.read() fobj = StringIO.StringIO(data) myzip = zipfile.ZipFile(fobj,'r') myzip.extract(clusterfile) fobj.close() fh.close() z = open(clusterfile,'r' ).readlines() z = [ i.strip().split() for i in z[10:] ] z = np.array( z, dtype=np.float ) z = DataFrame( z, columns=['x','y','thk','por','perm','lperm','lpermp','lpermr'] ) P = np.array( z[['x','y','por']] ) # <markdowncell> # Let's make a plot of the data, so we know what we're dealing with. # <codecell> fig, ax = plt.subplots() fig.set_size_inches(8,8) cmap = geoplot.YPcmap ax.scatter( z.x/1000, z.y/1000, c=z.por, s=64,cmap=cmap) ax.set_aspect(1) plt.xlim(-2,22) plt.ylim(-2,17.5) plt.xlabel('Easting [m]') plt.ylabel('Northing [m]') th=plt.title('Porosity %') # <markdowncell> # Let's verify that our data is distributed normally. # <codecell> hrange = (12,17.2) mu, std = norm.fit(z.por) ahist=plt.hist(z.por, bins=7, normed=True, alpha=0.6, color='c',range=hrange) xmin, xmax = plt.xlim() x = np.linspace(xmin, xmax, 100) p = norm.pdf(x, mu, std) plt.plot(x, p, 'k', linewidth=2) title = "Fit results: mu = %.2f, std = %.2f" % (mu, std) th=plt.title(title) xh=plt.xlabel('Porosity (%)') yh=plt.ylabel('Density') xl=plt.xlim(11.5,17.5) yl=plt.ylim(-0.02,0.45) # <codecell> import scipy.stats as stats qqdata = stats.probplot(z.por, dist="norm",plot=plt,fit=False) xh=plt.xlabel('Standard Normal Quantiles') yh=plt.ylabel('Sorted Porosity Values') fig=plt.gcf() fig.set_size_inches(8,8) th=plt.title('') # <markdowncell> # What is the optimal "lag" distance between points? Use the utilities scattergram() function to help determine that distance. # <codecell> pw = utilities.pairwise(P) geoplot.hscattergram(P,pw,1000,500) geoplot.hscattergram(P,pw,2000,500) geoplot.hscattergram(P,pw,3000,500) # <markdowncell> # Here, we plot the semivariogram and overlay a horizontal line for the sill, $c$. # <codecell> tolerance = 250 lags = np.arange( tolerance, 10000, tolerance*2 ) sill = np.var(P[:,2]) geoplot.semivariogram( P, lags, tolerance ) # <markdowncell> # Looking at the figure above, we can say that the semivariogram levels off around 4000, so we can set the range, $a$, to that value and model the covariance function. # <codecell> svm = model.semivariance( model.spherical, [ 4000, sill ] ) geoplot.semivariogram( P, lags, tolerance, model=svm ) # <markdowncell> # We can visualize the distribution of the lagged distances with the `laghistogram()` function. # <codecell> geoplot.laghistogram( P, pw, lags, tolerance ) # <markdowncell> # If we want to perform anisotropic kriging, we can visualize the distribution of the anisotropic lags using the `anisotropiclags()` function. Note that we use the bearing, which is measured in degrees, clockwise from North. # <codecell> geoplot.anisotropiclags( P, pw, lag=2000, tol=250, angle=45, atol=15 ) # <codecell> geoplot.anisotropiclags( P, pw, lag=2000, tol=250, angle=135, atol=15 ) # <codecell> geoplot.polaranisotropy( P, pw, lags, tolerance, nsectors=18 )
mit
icdishb/scikit-learn
examples/neighbors/plot_approximate_nearest_neighbors_scalability.py
225
5719
""" ============================================ Scalability of Approximate Nearest Neighbors ============================================ This example studies the scalability profile of approximate 10-neighbors queries using the LSHForest with ``n_estimators=20`` and ``n_candidates=200`` when varying the number of samples in the dataset. The first plot demonstrates the relationship between query time and index size of LSHForest. Query time is compared with the brute force method in exact nearest neighbor search for the same index sizes. The brute force queries have a very predictable linear scalability with the index (full scan). LSHForest index have sub-linear scalability profile but can be slower for small datasets. The second plot shows the speedup when using approximate queries vs brute force exact queries. The speedup tends to increase with the dataset size but should reach a plateau typically when doing queries on datasets with millions of samples and a few hundreds of dimensions. Higher dimensional datasets tends to benefit more from LSHForest indexing. The break even point (speedup = 1) depends on the dimensionality and structure of the indexed data and the parameters of the LSHForest index. The precision of approximate queries should decrease slowly with the dataset size. The speed of the decrease depends mostly on the LSHForest parameters and the dimensionality of the data. """ from __future__ import division print(__doc__) # Authors: Maheshakya Wijewardena <[email protected]> # Olivier Grisel <[email protected]> # # License: BSD 3 clause ############################################################################### import time import numpy as np from sklearn.datasets.samples_generator import make_blobs from sklearn.neighbors import LSHForest from sklearn.neighbors import NearestNeighbors import matplotlib.pyplot as plt # Parameters of the study n_samples_min = int(1e3) n_samples_max = int(1e5) n_features = 100 n_centers = 100 n_queries = 100 n_steps = 6 n_iter = 5 # Initialize the range of `n_samples` n_samples_values = np.logspace(np.log10(n_samples_min), np.log10(n_samples_max), n_steps).astype(np.int) # Generate some structured data rng = np.random.RandomState(42) all_data, _ = make_blobs(n_samples=n_samples_max + n_queries, n_features=n_features, centers=n_centers, shuffle=True, random_state=0) queries = all_data[:n_queries] index_data = all_data[n_queries:] # Metrics to collect for the plots average_times_exact = [] average_times_approx = [] std_times_approx = [] accuracies = [] std_accuracies = [] average_speedups = [] std_speedups = [] # Calculate the average query time for n_samples in n_samples_values: X = index_data[:n_samples] # Initialize LSHForest for queries of a single neighbor lshf = LSHForest(n_estimators=20, n_candidates=200, n_neighbors=10).fit(X) nbrs = NearestNeighbors(algorithm='brute', metric='cosine', n_neighbors=10).fit(X) time_approx = [] time_exact = [] accuracy = [] for i in range(n_iter): # pick one query at random to study query time variability in LSHForest query = queries[rng.randint(0, n_queries)] t0 = time.time() exact_neighbors = nbrs.kneighbors(query, return_distance=False) time_exact.append(time.time() - t0) t0 = time.time() approx_neighbors = lshf.kneighbors(query, return_distance=False) time_approx.append(time.time() - t0) accuracy.append(np.in1d(approx_neighbors, exact_neighbors).mean()) average_time_exact = np.mean(time_exact) average_time_approx = np.mean(time_approx) speedup = np.array(time_exact) / np.array(time_approx) average_speedup = np.mean(speedup) mean_accuracy = np.mean(accuracy) std_accuracy = np.std(accuracy) print("Index size: %d, exact: %0.3fs, LSHF: %0.3fs, speedup: %0.1f, " "accuracy: %0.2f +/-%0.2f" % (n_samples, average_time_exact, average_time_approx, average_speedup, mean_accuracy, std_accuracy)) accuracies.append(mean_accuracy) std_accuracies.append(std_accuracy) average_times_exact.append(average_time_exact) average_times_approx.append(average_time_approx) std_times_approx.append(np.std(time_approx)) average_speedups.append(average_speedup) std_speedups.append(np.std(speedup)) # Plot average query time against n_samples plt.figure() plt.errorbar(n_samples_values, average_times_approx, yerr=std_times_approx, fmt='o-', c='r', label='LSHForest') plt.plot(n_samples_values, average_times_exact, c='b', label="NearestNeighbors(algorithm='brute', metric='cosine')") plt.legend(loc='upper left', fontsize='small') plt.ylim(0, None) plt.ylabel("Average query time in seconds") plt.xlabel("n_samples") plt.grid(which='both') plt.title("Impact of index size on response time for first " "nearest neighbors queries") # Plot average query speedup versus index size plt.figure() plt.errorbar(n_samples_values, average_speedups, yerr=std_speedups, fmt='o-', c='r') plt.ylim(0, None) plt.ylabel("Average speedup") plt.xlabel("n_samples") plt.grid(which='both') plt.title("Speedup of the approximate NN queries vs brute force") # Plot average precision versus index size plt.figure() plt.errorbar(n_samples_values, accuracies, std_accuracies, fmt='o-', c='c') plt.ylim(0, 1.1) plt.ylabel("precision@10") plt.xlabel("n_samples") plt.grid(which='both') plt.title("precision of 10-nearest-neighbors queries with index size") plt.show()
bsd-3-clause
jblackburne/scikit-learn
sklearn/metrics/cluster/bicluster.py
359
2797
from __future__ import division import numpy as np from sklearn.utils.linear_assignment_ import linear_assignment from sklearn.utils.validation import check_consistent_length, check_array __all__ = ["consensus_score"] def _check_rows_and_columns(a, b): """Unpacks the row and column arrays and checks their shape.""" check_consistent_length(*a) check_consistent_length(*b) checks = lambda x: check_array(x, ensure_2d=False) a_rows, a_cols = map(checks, a) b_rows, b_cols = map(checks, b) return a_rows, a_cols, b_rows, b_cols def _jaccard(a_rows, a_cols, b_rows, b_cols): """Jaccard coefficient on the elements of the two biclusters.""" intersection = ((a_rows * b_rows).sum() * (a_cols * b_cols).sum()) a_size = a_rows.sum() * a_cols.sum() b_size = b_rows.sum() * b_cols.sum() return intersection / (a_size + b_size - intersection) def _pairwise_similarity(a, b, similarity): """Computes pairwise similarity matrix. result[i, j] is the Jaccard coefficient of a's bicluster i and b's bicluster j. """ a_rows, a_cols, b_rows, b_cols = _check_rows_and_columns(a, b) n_a = a_rows.shape[0] n_b = b_rows.shape[0] result = np.array(list(list(similarity(a_rows[i], a_cols[i], b_rows[j], b_cols[j]) for j in range(n_b)) for i in range(n_a))) return result def consensus_score(a, b, similarity="jaccard"): """The similarity of two sets of biclusters. Similarity between individual biclusters is computed. Then the best matching between sets is found using the Hungarian algorithm. The final score is the sum of similarities divided by the size of the larger set. Read more in the :ref:`User Guide <biclustering>`. Parameters ---------- a : (rows, columns) Tuple of row and column indicators for a set of biclusters. b : (rows, columns) Another set of biclusters like ``a``. similarity : string or function, optional, default: "jaccard" May be the string "jaccard" to use the Jaccard coefficient, or any function that takes four arguments, each of which is a 1d indicator vector: (a_rows, a_columns, b_rows, b_columns). References ---------- * Hochreiter, Bodenhofer, et. al., 2010. `FABIA: factor analysis for bicluster acquisition <https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2881408/>`__. """ if similarity == "jaccard": similarity = _jaccard matrix = _pairwise_similarity(a, b, similarity) indices = linear_assignment(1. - matrix) n_a = len(a[0]) n_b = len(b[0]) return matrix[indices[:, 0], indices[:, 1]].sum() / max(n_a, n_b)
bsd-3-clause
rubennj/pvlib-python
pvlib/test/test_clearsky.py
1
4194
import logging pvl_logger = logging.getLogger('pvlib') import datetime import numpy as np import pandas as pd from nose.tools import raises from numpy.testing import assert_almost_equal from pvlib.location import Location from pvlib import clearsky from pvlib import solarposition # setup times and location to be tested. times = pd.date_range(start=datetime.datetime(2014,6,24), end=datetime.datetime(2014,6,26), freq='1Min') tus = Location(32.2, -111, 'US/Arizona', 700) times_localized = times.tz_localize(tus.tz) ephem_data = solarposition.get_solarposition(times, tus) # test the ineichen clear sky model implementation in a few ways def test_ineichen_required(): # the clearsky function should lookup the linke turbidity on its own # will fail without scipy clearsky.ineichen(times, tus) def test_ineichen_supply_linke(): clearsky.ineichen(times, tus, linke_turbidity=3) def test_ineichen_solpos(): clearsky.ineichen(times, tus, linke_turbidity=3, solarposition_method='pyephem') def test_ineichen_airmass(): clearsky.ineichen(times, tus, linke_turbidity=3, airmass_model='simple') def test_ineichen_keys(): clearsky_data = clearsky.ineichen(times, tus, linke_turbidity=3) assert 'GHI' in clearsky_data.columns assert 'DNI' in clearsky_data.columns assert 'DHI' in clearsky_data.columns # test the haurwitz clear sky implementation def test_haurwitz(): clearsky.haurwitz(ephem_data['zenith']) def test_haurwitz_keys(): clearsky_data = clearsky.haurwitz(ephem_data['zenith']) assert 'GHI' in clearsky_data.columns # test DISC def test_disc_keys(): clearsky_data = clearsky.ineichen(times, tus, linke_turbidity=3) disc_data = clearsky.disc(clearsky_data['GHI'], ephem_data['zenith'], ephem_data.index) assert 'DNI_gen_DISC' in disc_data.columns assert 'Kt_gen_DISC' in disc_data.columns assert 'AM' in disc_data.columns def test_disc_value(): times = pd.DatetimeIndex(['2014-06-24T12-0700','2014-06-24T18-0700']) ghi = pd.Series([1038.62, 254.53], index=times) zenith = pd.Series([10.567, 72.469], index=times) pressure = 93193. disc_data = clearsky.disc(ghi, zenith, times, pressure=pressure) assert_almost_equal(disc_data['DNI_gen_DISC'].values, np.array([830.46, 676.09]), 1) def test_dirint(): clearsky_data = clearsky.ineichen(times, tus, linke_turbidity=3) pressure = 93193. dirint_data = clearsky.dirint(clearsky_data['GHI'], ephem_data['zenith'], ephem_data.index, pressure=pressure) def test_dirint_value(): times = pd.DatetimeIndex(['2014-06-24T12-0700','2014-06-24T18-0700']) ghi = pd.Series([1038.62, 254.53], index=times) zenith = pd.Series([10.567, 72.469], index=times) pressure = 93193. dirint_data = clearsky.dirint(ghi, zenith, times, pressure=pressure) assert_almost_equal(dirint_data.values, np.array([928.85, 688.26]), 1) def test_dirint_tdew(): times = pd.DatetimeIndex(['2014-06-24T12-0700','2014-06-24T18-0700']) ghi = pd.Series([1038.62, 254.53], index=times) zenith = pd.Series([10.567, 72.469], index=times) pressure = 93193. dirint_data = clearsky.dirint(ghi, zenith, times, pressure=pressure, temp_dew=10) assert_almost_equal(dirint_data.values, np.array([934.06, 640.67]), 1) def test_dirint_no_delta_kt(): times = pd.DatetimeIndex(['2014-06-24T12-0700','2014-06-24T18-0700']) ghi = pd.Series([1038.62, 254.53], index=times) zenith = pd.Series([10.567, 72.469], index=times) pressure = 93193. dirint_data = clearsky.dirint(ghi, zenith, times, pressure=pressure, use_delta_kt_prime=False) assert_almost_equal(dirint_data.values, np.array([901.56, 674.87]), 1) def test_dirint_coeffs(): coeffs = clearsky._get_dirint_coeffs() assert coeffs[0,0,0,0] == 0.385230 assert coeffs[0,1,2,1] == 0.229970 assert coeffs[3,2,6,3] == 1.032260
bsd-3-clause
pv/scikit-learn
examples/svm/plot_svm_margin.py
318
2328
#!/usr/bin/python # -*- coding: utf-8 -*- """ ========================================================= SVM Margins Example ========================================================= The plots below illustrate the effect the parameter `C` has on the separation line. A large value of `C` basically tells our model that we do not have that much faith in our data's distribution, and will only consider points close to line of separation. A small value of `C` includes more/all the observations, allowing the margins to be calculated using all the data in the area. """ print(__doc__) # Code source: Gaël Varoquaux # Modified for documentation by Jaques Grobler # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn import svm # we create 40 separable points np.random.seed(0) X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]] Y = [0] * 20 + [1] * 20 # figure number fignum = 1 # fit the model for name, penalty in (('unreg', 1), ('reg', 0.05)): clf = svm.SVC(kernel='linear', C=penalty) clf.fit(X, Y) # get the separating hyperplane w = clf.coef_[0] a = -w[0] / w[1] xx = np.linspace(-5, 5) yy = a * xx - (clf.intercept_[0]) / w[1] # plot the parallels to the separating hyperplane that pass through the # support vectors margin = 1 / np.sqrt(np.sum(clf.coef_ ** 2)) yy_down = yy + a * margin yy_up = yy - a * margin # plot the line, the points, and the nearest vectors to the plane plt.figure(fignum, figsize=(4, 3)) plt.clf() plt.plot(xx, yy, 'k-') plt.plot(xx, yy_down, 'k--') plt.plot(xx, yy_up, 'k--') plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80, facecolors='none', zorder=10) plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired) plt.axis('tight') x_min = -4.8 x_max = 4.2 y_min = -6 y_max = 6 XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j] Z = clf.predict(np.c_[XX.ravel(), YY.ravel()]) # Put the result into a color plot Z = Z.reshape(XX.shape) plt.figure(fignum, figsize=(4, 3)) plt.pcolormesh(XX, YY, Z, cmap=plt.cm.Paired) plt.xlim(x_min, x_max) plt.ylim(y_min, y_max) plt.xticks(()) plt.yticks(()) fignum = fignum + 1 plt.show()
bsd-3-clause
shyamalschandra/scikit-learn
sklearn/ensemble/tests/test_weight_boosting.py
58
17158
"""Testing for the boost module (sklearn.ensemble.boost).""" import numpy as np from sklearn.utils.testing import assert_array_equal, assert_array_less from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_equal, assert_true from sklearn.utils.testing import assert_raises, assert_raises_regexp from sklearn.base import BaseEstimator from sklearn.model_selection import train_test_split from sklearn.model_selection import GridSearchCV from sklearn.ensemble import AdaBoostClassifier from sklearn.ensemble import AdaBoostRegressor from sklearn.ensemble import weight_boosting from scipy.sparse import csc_matrix from scipy.sparse import csr_matrix from scipy.sparse import coo_matrix from scipy.sparse import dok_matrix from scipy.sparse import lil_matrix from sklearn.svm import SVC, SVR from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor from sklearn.utils import shuffle from sklearn import datasets # Common random state rng = np.random.RandomState(0) # Toy sample X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]] y_class = ["foo", "foo", "foo", 1, 1, 1] # test string class labels y_regr = [-1, -1, -1, 1, 1, 1] T = [[-1, -1], [2, 2], [3, 2]] y_t_class = ["foo", 1, 1] y_t_regr = [-1, 1, 1] # Load the iris dataset and randomly permute it iris = datasets.load_iris() perm = rng.permutation(iris.target.size) iris.data, iris.target = shuffle(iris.data, iris.target, random_state=rng) # Load the boston dataset and randomly permute it boston = datasets.load_boston() boston.data, boston.target = shuffle(boston.data, boston.target, random_state=rng) def test_samme_proba(): # Test the `_samme_proba` helper function. # Define some example (bad) `predict_proba` output. probs = np.array([[1, 1e-6, 0], [0.19, 0.6, 0.2], [-999, 0.51, 0.5], [1e-6, 1, 1e-9]]) probs /= np.abs(probs.sum(axis=1))[:, np.newaxis] # _samme_proba calls estimator.predict_proba. # Make a mock object so I can control what gets returned. class MockEstimator(object): def predict_proba(self, X): assert_array_equal(X.shape, probs.shape) return probs mock = MockEstimator() samme_proba = weight_boosting._samme_proba(mock, 3, np.ones_like(probs)) assert_array_equal(samme_proba.shape, probs.shape) assert_true(np.isfinite(samme_proba).all()) # Make sure that the correct elements come out as smallest -- # `_samme_proba` should preserve the ordering in each example. assert_array_equal(np.argmin(samme_proba, axis=1), [2, 0, 0, 2]) assert_array_equal(np.argmax(samme_proba, axis=1), [0, 1, 1, 1]) def test_classification_toy(): # Check classification on a toy dataset. for alg in ['SAMME', 'SAMME.R']: clf = AdaBoostClassifier(algorithm=alg, random_state=0) clf.fit(X, y_class) assert_array_equal(clf.predict(T), y_t_class) assert_array_equal(np.unique(np.asarray(y_t_class)), clf.classes_) assert_equal(clf.predict_proba(T).shape, (len(T), 2)) assert_equal(clf.decision_function(T).shape, (len(T),)) def test_regression_toy(): # Check classification on a toy dataset. clf = AdaBoostRegressor(random_state=0) clf.fit(X, y_regr) assert_array_equal(clf.predict(T), y_t_regr) def test_iris(): # Check consistency on dataset iris. classes = np.unique(iris.target) clf_samme = prob_samme = None for alg in ['SAMME', 'SAMME.R']: clf = AdaBoostClassifier(algorithm=alg) clf.fit(iris.data, iris.target) assert_array_equal(classes, clf.classes_) proba = clf.predict_proba(iris.data) if alg == "SAMME": clf_samme = clf prob_samme = proba assert_equal(proba.shape[1], len(classes)) assert_equal(clf.decision_function(iris.data).shape[1], len(classes)) score = clf.score(iris.data, iris.target) assert score > 0.9, "Failed with algorithm %s and score = %f" % \ (alg, score) # Somewhat hacky regression test: prior to # ae7adc880d624615a34bafdb1d75ef67051b8200, # predict_proba returned SAMME.R values for SAMME. clf_samme.algorithm = "SAMME.R" assert_array_less(0, np.abs(clf_samme.predict_proba(iris.data) - prob_samme)) def test_boston(): # Check consistency on dataset boston house prices. clf = AdaBoostRegressor(random_state=0) clf.fit(boston.data, boston.target) score = clf.score(boston.data, boston.target) assert score > 0.85 def test_staged_predict(): # Check staged predictions. rng = np.random.RandomState(0) iris_weights = rng.randint(10, size=iris.target.shape) boston_weights = rng.randint(10, size=boston.target.shape) # AdaBoost classification for alg in ['SAMME', 'SAMME.R']: clf = AdaBoostClassifier(algorithm=alg, n_estimators=10) clf.fit(iris.data, iris.target, sample_weight=iris_weights) predictions = clf.predict(iris.data) staged_predictions = [p for p in clf.staged_predict(iris.data)] proba = clf.predict_proba(iris.data) staged_probas = [p for p in clf.staged_predict_proba(iris.data)] score = clf.score(iris.data, iris.target, sample_weight=iris_weights) staged_scores = [ s for s in clf.staged_score( iris.data, iris.target, sample_weight=iris_weights)] assert_equal(len(staged_predictions), 10) assert_array_almost_equal(predictions, staged_predictions[-1]) assert_equal(len(staged_probas), 10) assert_array_almost_equal(proba, staged_probas[-1]) assert_equal(len(staged_scores), 10) assert_array_almost_equal(score, staged_scores[-1]) # AdaBoost regression clf = AdaBoostRegressor(n_estimators=10, random_state=0) clf.fit(boston.data, boston.target, sample_weight=boston_weights) predictions = clf.predict(boston.data) staged_predictions = [p for p in clf.staged_predict(boston.data)] score = clf.score(boston.data, boston.target, sample_weight=boston_weights) staged_scores = [ s for s in clf.staged_score( boston.data, boston.target, sample_weight=boston_weights)] assert_equal(len(staged_predictions), 10) assert_array_almost_equal(predictions, staged_predictions[-1]) assert_equal(len(staged_scores), 10) assert_array_almost_equal(score, staged_scores[-1]) def test_gridsearch(): # Check that base trees can be grid-searched. # AdaBoost classification boost = AdaBoostClassifier(base_estimator=DecisionTreeClassifier()) parameters = {'n_estimators': (1, 2), 'base_estimator__max_depth': (1, 2), 'algorithm': ('SAMME', 'SAMME.R')} clf = GridSearchCV(boost, parameters) clf.fit(iris.data, iris.target) # AdaBoost regression boost = AdaBoostRegressor(base_estimator=DecisionTreeRegressor(), random_state=0) parameters = {'n_estimators': (1, 2), 'base_estimator__max_depth': (1, 2)} clf = GridSearchCV(boost, parameters) clf.fit(boston.data, boston.target) def test_pickle(): # Check pickability. import pickle # Adaboost classifier for alg in ['SAMME', 'SAMME.R']: obj = AdaBoostClassifier(algorithm=alg) obj.fit(iris.data, iris.target) score = obj.score(iris.data, iris.target) s = pickle.dumps(obj) obj2 = pickle.loads(s) assert_equal(type(obj2), obj.__class__) score2 = obj2.score(iris.data, iris.target) assert_equal(score, score2) # Adaboost regressor obj = AdaBoostRegressor(random_state=0) obj.fit(boston.data, boston.target) score = obj.score(boston.data, boston.target) s = pickle.dumps(obj) obj2 = pickle.loads(s) assert_equal(type(obj2), obj.__class__) score2 = obj2.score(boston.data, boston.target) assert_equal(score, score2) def test_importances(): # Check variable importances. X, y = datasets.make_classification(n_samples=2000, n_features=10, n_informative=3, n_redundant=0, n_repeated=0, shuffle=False, random_state=1) for alg in ['SAMME', 'SAMME.R']: clf = AdaBoostClassifier(algorithm=alg) clf.fit(X, y) importances = clf.feature_importances_ assert_equal(importances.shape[0], 10) assert_equal((importances[:3, np.newaxis] >= importances[3:]).all(), True) def test_error(): # Test that it gives proper exception on deficient input. assert_raises(ValueError, AdaBoostClassifier(learning_rate=-1).fit, X, y_class) assert_raises(ValueError, AdaBoostClassifier(algorithm="foo").fit, X, y_class) assert_raises(ValueError, AdaBoostClassifier().fit, X, y_class, sample_weight=np.asarray([-1])) def test_base_estimator(): # Test different base estimators. from sklearn.ensemble import RandomForestClassifier from sklearn.svm import SVC # XXX doesn't work with y_class because RF doesn't support classes_ # Shouldn't AdaBoost run a LabelBinarizer? clf = AdaBoostClassifier(RandomForestClassifier()) clf.fit(X, y_regr) clf = AdaBoostClassifier(SVC(), algorithm="SAMME") clf.fit(X, y_class) from sklearn.ensemble import RandomForestRegressor from sklearn.svm import SVR clf = AdaBoostRegressor(RandomForestRegressor(), random_state=0) clf.fit(X, y_regr) clf = AdaBoostRegressor(SVR(), random_state=0) clf.fit(X, y_regr) # Check that an empty discrete ensemble fails in fit, not predict. X_fail = [[1, 1], [1, 1], [1, 1], [1, 1]] y_fail = ["foo", "bar", 1, 2] clf = AdaBoostClassifier(SVC(), algorithm="SAMME") assert_raises_regexp(ValueError, "worse than random", clf.fit, X_fail, y_fail) def test_sample_weight_missing(): from sklearn.linear_model import LogisticRegression from sklearn.cluster import KMeans clf = AdaBoostClassifier(KMeans(), algorithm="SAMME") assert_raises(ValueError, clf.fit, X, y_regr) clf = AdaBoostRegressor(KMeans()) assert_raises(ValueError, clf.fit, X, y_regr) def test_sparse_classification(): # Check classification with sparse input. class CustomSVC(SVC): """SVC variant that records the nature of the training set.""" def fit(self, X, y, sample_weight=None): """Modification on fit caries data type for later verification.""" super(CustomSVC, self).fit(X, y, sample_weight=sample_weight) self.data_type_ = type(X) return self X, y = datasets.make_multilabel_classification(n_classes=1, n_samples=15, n_features=5, random_state=42) # Flatten y to a 1d array y = np.ravel(y) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix, dok_matrix]: X_train_sparse = sparse_format(X_train) X_test_sparse = sparse_format(X_test) # Trained on sparse format sparse_classifier = AdaBoostClassifier( base_estimator=CustomSVC(probability=True), random_state=1, algorithm="SAMME" ).fit(X_train_sparse, y_train) # Trained on dense format dense_classifier = AdaBoostClassifier( base_estimator=CustomSVC(probability=True), random_state=1, algorithm="SAMME" ).fit(X_train, y_train) # predict sparse_results = sparse_classifier.predict(X_test_sparse) dense_results = dense_classifier.predict(X_test) assert_array_equal(sparse_results, dense_results) # decision_function sparse_results = sparse_classifier.decision_function(X_test_sparse) dense_results = dense_classifier.decision_function(X_test) assert_array_equal(sparse_results, dense_results) # predict_log_proba sparse_results = sparse_classifier.predict_log_proba(X_test_sparse) dense_results = dense_classifier.predict_log_proba(X_test) assert_array_equal(sparse_results, dense_results) # predict_proba sparse_results = sparse_classifier.predict_proba(X_test_sparse) dense_results = dense_classifier.predict_proba(X_test) assert_array_equal(sparse_results, dense_results) # score sparse_results = sparse_classifier.score(X_test_sparse, y_test) dense_results = dense_classifier.score(X_test, y_test) assert_array_equal(sparse_results, dense_results) # staged_decision_function sparse_results = sparse_classifier.staged_decision_function( X_test_sparse) dense_results = dense_classifier.staged_decision_function(X_test) for sprase_res, dense_res in zip(sparse_results, dense_results): assert_array_equal(sprase_res, dense_res) # staged_predict sparse_results = sparse_classifier.staged_predict(X_test_sparse) dense_results = dense_classifier.staged_predict(X_test) for sprase_res, dense_res in zip(sparse_results, dense_results): assert_array_equal(sprase_res, dense_res) # staged_predict_proba sparse_results = sparse_classifier.staged_predict_proba(X_test_sparse) dense_results = dense_classifier.staged_predict_proba(X_test) for sprase_res, dense_res in zip(sparse_results, dense_results): assert_array_equal(sprase_res, dense_res) # staged_score sparse_results = sparse_classifier.staged_score(X_test_sparse, y_test) dense_results = dense_classifier.staged_score(X_test, y_test) for sprase_res, dense_res in zip(sparse_results, dense_results): assert_array_equal(sprase_res, dense_res) # Verify sparsity of data is maintained during training types = [i.data_type_ for i in sparse_classifier.estimators_] assert all([(t == csc_matrix or t == csr_matrix) for t in types]) def test_sparse_regression(): # Check regression with sparse input. class CustomSVR(SVR): """SVR variant that records the nature of the training set.""" def fit(self, X, y, sample_weight=None): """Modification on fit caries data type for later verification.""" super(CustomSVR, self).fit(X, y, sample_weight=sample_weight) self.data_type_ = type(X) return self X, y = datasets.make_regression(n_samples=15, n_features=50, n_targets=1, random_state=42) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix, dok_matrix]: X_train_sparse = sparse_format(X_train) X_test_sparse = sparse_format(X_test) # Trained on sparse format sparse_classifier = AdaBoostRegressor( base_estimator=CustomSVR(), random_state=1 ).fit(X_train_sparse, y_train) # Trained on dense format dense_classifier = dense_results = AdaBoostRegressor( base_estimator=CustomSVR(), random_state=1 ).fit(X_train, y_train) # predict sparse_results = sparse_classifier.predict(X_test_sparse) dense_results = dense_classifier.predict(X_test) assert_array_equal(sparse_results, dense_results) # staged_predict sparse_results = sparse_classifier.staged_predict(X_test_sparse) dense_results = dense_classifier.staged_predict(X_test) for sprase_res, dense_res in zip(sparse_results, dense_results): assert_array_equal(sprase_res, dense_res) types = [i.data_type_ for i in sparse_classifier.estimators_] assert all([(t == csc_matrix or t == csr_matrix) for t in types]) def test_sample_weight_adaboost_regressor(): """ AdaBoostRegressor should work without sample_weights in the base estimator The random weighted sampling is done internally in the _boost method in AdaBoostRegressor. """ class DummyEstimator(BaseEstimator): def fit(self, X, y): pass def predict(self, X): return np.zeros(X.shape[0]) boost = AdaBoostRegressor(DummyEstimator(), n_estimators=3) boost.fit(X, y_regr) assert_equal(len(boost.estimator_weights_), len(boost.estimator_errors_))
bsd-3-clause
chaluemwut/fbserver
venv/lib/python2.7/site-packages/sklearn/feature_selection/tests/test_rfe.py
1
3645
""" Testing Recursive feature elimination """ import numpy as np from numpy.testing import assert_array_almost_equal, assert_array_equal from nose.tools import assert_equal from scipy import sparse from sklearn.feature_selection.rfe import RFE, RFECV from sklearn.datasets import load_iris from sklearn.metrics import zero_one_loss from sklearn.svm import SVC from sklearn.utils import check_random_state from sklearn.utils.testing import ignore_warnings from sklearn.metrics.scorer import SCORERS def test_rfe_set_params(): generator = check_random_state(0) iris = load_iris() X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))] y = iris.target clf = SVC(kernel="linear") rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1) y_pred = rfe.fit(X, y).predict(X) clf = SVC() rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1, estimator_params={'kernel': 'linear'}) y_pred2 = rfe.fit(X, y).predict(X) assert_array_equal(y_pred, y_pred2) def test_rfe(): generator = check_random_state(0) iris = load_iris() X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))] X_sparse = sparse.csr_matrix(X) y = iris.target # dense model clf = SVC(kernel="linear") rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1) rfe.fit(X, y) X_r = rfe.transform(X) clf.fit(X_r, y) assert_equal(len(rfe.ranking_), X.shape[1]) # sparse model clf_sparse = SVC(kernel="linear") rfe_sparse = RFE(estimator=clf_sparse, n_features_to_select=4, step=0.1) rfe_sparse.fit(X_sparse, y) X_r_sparse = rfe_sparse.transform(X_sparse) assert_equal(X_r.shape, iris.data.shape) assert_array_almost_equal(X_r[:10], iris.data[:10]) assert_array_almost_equal(rfe.predict(X), clf.predict(iris.data)) assert_equal(rfe.score(X, y), clf.score(iris.data, iris.target)) assert_array_almost_equal(X_r, X_r_sparse.toarray()) def test_rfecv(): generator = check_random_state(0) iris = load_iris() X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))] y = list(iris.target) # regression test: list should be supported # Test using the score function rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5) rfecv.fit(X, y) # non-regression test for missing worst feature: assert_equal(len(rfecv.grid_scores_), X.shape[1]) assert_equal(len(rfecv.ranking_), X.shape[1]) X_r = rfecv.transform(X) # All the noisy variable were filtered out assert_array_equal(X_r, iris.data) # same in sparse rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5) X_sparse = sparse.csr_matrix(X) rfecv_sparse.fit(X_sparse, y) X_r_sparse = rfecv_sparse.transform(X_sparse) assert_array_equal(X_r_sparse.toarray(), iris.data) # Test using a customized loss function rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5, loss_func=zero_one_loss) ignore_warnings(rfecv.fit)(X, y) X_r = rfecv.transform(X) assert_array_equal(X_r, iris.data) # Test using a scorer scorer = SCORERS['accuracy'] rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5, scoring=scorer) rfecv.fit(X, y) X_r = rfecv.transform(X) assert_array_equal(X_r, iris.data) # Test fix on grid_scores def test_scorer(estimator, X, y): return 1.0 rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5, scoring=test_scorer) rfecv.fit(X, y) assert_array_equal(rfecv.grid_scores_, np.ones(len(rfecv.grid_scores_)))
apache-2.0
iagapov/ocelot
gui/sr_plot.py
2
4383
__author__ = 'Sergey Tomin' import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D import matplotlib from matplotlib import cm def show_flux(screen, show = 'Total', xlim = (0,0), ylim = (0,0), file_name = None, unit = "mm"): if show == 'Total': data = screen.Total elif show == 'Sigma': data = screen.Sigma else: data = screen.Pi if screen.nx == 1 or screen.ny == 1: if screen.nx == 1 and screen.ny == 1: X = screen.Eph xlabel = r'$E_{ph}$, $eV$' status = "spectrum" elif screen.nx == 1: X = screen.Yph xlabel = r'$Y$, $mm$' if unit == "mrad": xlabel = r'$Y$, $mrad$' status = "spatial" else: X = screen.Xph xlabel = r'$X$, $mm$' if unit == "mrad": xlabel = r'$X$, $mrad$' status = "spatial" D1(data, X, distance = screen.Distance, xlabel = xlabel, xlim = xlim, ylim = ylim, file_name = file_name, unit = unit, status = status) else: if screen.ne!=1: print (" ******** ERROR into show.screen ! *********** ") return D3(screen, data, distance = screen.Distance, file_name = file_name, unit = unit) def D1(data, X, distance, xlabel, xlim, ylim, file_name, unit, status ): # distance in [mm] if unit == "mrad": data = data*distance*distance*1e-6 if unit == "mrad" and status == "spatial": X = X/distance*1e3 maxS = max(data) index = np.where(data== max(data))[0][0] energy = X[index] fig = plt.figure() ax = fig.add_subplot(111) ax.plot(X, data) if xlim != (0,0): ax.set_xlim(xlim) if ylim != (0,0): ax.set_ylim(ylim) #ax.set_title() ax.set_xlabel(xlabel) ax.set_ylabel(r"$I$, $\frac{ph}{sec \cdot mm^2 10^{-3}BW}$") if unit == "mrad": ax.set_ylabel(r"$I$, $\frac{ph}{sec mrad^2 10^{-3}BW}$") ax.grid(True) ax.annotate('$\epsilon_1 = ' + str(int(energy*10)/10.) +'$', xy=(0.9, 0.85), xycoords='axes fraction', horizontalalignment='right', verticalalignment='top', fontsize=20) power = np.floor(np.log10(maxS)) intensity = np.around(maxS*10**(-power), 2)*10**power ax.annotate('I = ' + str(intensity) , xy=(0.9, 0.93), xycoords='axes fraction', horizontalalignment='right', verticalalignment='top', fontsize=15) if file_name != None: figg = plt.gcf() k_size = 1.4 figg.set_size_inches( (4*k_size, 3.01*k_size) ) figg.savefig(file_name) else: plt.show() plt.show() def D3(screen,Data, distance, file_name = None , unit = "mm"): #print " showme.any = ", np.shape(Data) X,Y = np.meshgrid(screen.Xph, screen.Yph) if unit == "mrad": Data = Data*distance*distance*1e-6 X = X/distance*1e6 Y = Y/distance*1e6 fig = plt.figure() ax = fig.add_subplot(111, projection='3d') #print " showme.any = ", np.shape(X) #print " showme.any = ", np.shape(Y) data = np.zeros((screen.ny, screen.nx)) for j in range(screen.ny): for i in range(screen.nx): data[j,i] = Data[screen.nx*j + i] ax.plot_surface(X, Y, data, rstride=1, cstride=1, cmap=cm.jet) #ax.set_zlim3d(0, 1) if unit == "mrad": ax.set_xlabel(r'$\theta_x$, $\mu rad$') ax.set_ylabel(r'$\theta_y$, $\mu rad$') ax.set_zlabel(r"$I$, $\frac{ph}{s \cdot mrad^2 10^{-3}BW}$") else: ax.set_xlabel(r'$X$, $mm$') ax.set_ylabel(r'$Y$, $mm$') ax.set_zlabel(r"$I$, $\frac{ph}{s\cdot mm^2 10^{-3}BW}$") #ax.set_xticks([]) if file_name != None: figg = plt.gcf() k_size = 1.7 figg.set_size_inches( (4*k_size, 3.01*k_size) ) figg.savefig(file_name) else: plt.show() #plt.show() def plot3D_data(data, x = None, y = None): if x != None and y != None: X,Y = np.meshgrid(x,y) else: print( np.shape(data)) X,Y = np.meshgrid(np.arange(np.shape(data)[1]), np.arange(np.shape(data)[0])) fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.plot_surface(X, Y, data, rstride=1, cstride=1, cmap=cm.jet) plt.show()
gpl-3.0
icoderaven/mavlink
pymavlink/tools/mavgraph.py
5
9534
#!/usr/bin/env python ''' graph a MAVLink log file Andrew Tridgell August 2011 ''' import sys, struct, time, os, datetime import math, re import matplotlib from math import * from pymavlink.mavextra import * colourmap = { 'apm' : { 'MANUAL' : (1.0, 0, 0), 'AUTO' : ( 0, 1.0, 0), 'LOITER' : ( 0, 0, 1.0), 'FBWA' : (1.0, 0.5, 0), 'RTL' : ( 1, 0, 0.5), 'STABILIZE' : (0.5, 1.0, 0), 'LAND' : ( 0, 1.0, 0.5), 'STEERING' : (0.5, 0, 1.0), 'HOLD' : ( 0, 0.5, 1.0), 'ALT_HOLD' : (1.0, 0.5, 0.5), 'CIRCLE' : (0.5, 1.0, 0.5), 'POSITION' : (1.0, 0.0, 1.0), 'GUIDED' : (0.5, 0.5, 1.0), 'ACRO' : (1.0, 1.0, 0), 'CRUISE' : ( 0, 1.0, 1.0) }, 'px4' : { 'MANUAL' : (1.0, 0, 0), 'SEATBELT' : ( 0.5, 0.5, 0), 'EASY' : ( 0, 1.0, 0), 'AUTO' : ( 0, 0, 1.0), 'UNKNOWN' : ( 1.0, 1.0, 1.0) } } edge_colour = (0.1, 0.1, 0.1) lowest_x = None highest_x = None def plotit(x, y, fields, colors=[]): '''plot a set of graphs using date for x axis''' global lowest_x, highest_x pylab.ion() fig = pylab.figure(num=1, figsize=(12,6)) ax1 = fig.gca() ax2 = None xrange = 0.0 for i in range(0, len(fields)): if len(x[i]) == 0: continue if lowest_x is None or x[i][0] < lowest_x: lowest_x = x[i][0] if highest_x is None or x[i][-1] > highest_x: highest_x = x[i][-1] if highest_x is None or lowest_x is None: return xrange = highest_x - lowest_x xrange *= 24 * 60 * 60 formatter = matplotlib.dates.DateFormatter('%H:%M:%S') interval = 1 intervals = [ 1, 2, 5, 10, 15, 30, 60, 120, 240, 300, 600, 900, 1800, 3600, 7200, 5*3600, 10*3600, 24*3600 ] for interval in intervals: if xrange / interval < 15: break locator = matplotlib.dates.SecondLocator(interval=interval) if not args.xaxis: ax1.xaxis.set_major_locator(locator) ax1.xaxis.set_major_formatter(formatter) empty = True ax1_labels = [] ax2_labels = [] for i in range(0, len(fields)): if len(x[i]) == 0: print("Failed to find any values for field %s" % fields[i]) continue if i < len(colors): color = colors[i] else: color = 'red' (tz, tzdst) = time.tzname if axes[i] == 2: if ax2 == None: ax2 = ax1.twinx() ax = ax2 if not args.xaxis: ax2.xaxis.set_major_locator(locator) ax2.xaxis.set_major_formatter(formatter) label = fields[i] if label.endswith(":2"): label = label[:-2] ax2_labels.append(label) else: ax1_labels.append(fields[i]) ax = ax1 if args.xaxis: if args.marker is not None: marker = args.marker else: marker = '+' if args.linestyle is not None: linestyle = args.linestyle else: linestyle = 'None' ax.plot(x[i], y[i], color=color, label=fields[i], linestyle=linestyle, marker=marker) else: if args.marker is not None: marker = args.marker else: marker = 'None' if args.linestyle is not None: linestyle = args.linestyle else: linestyle = '-' ax.plot_date(x[i], y[i], color=color, label=fields[i], linestyle=linestyle, marker=marker, tz=None) empty = False if args.flightmode is not None: for i in range(len(modes)-1): c = colourmap[args.flightmode].get(modes[i][1], edge_colour) ax1.axvspan(modes[i][0], modes[i+1][0], fc=c, ec=edge_colour, alpha=0.1) c = colourmap[args.flightmode].get(modes[-1][1], edge_colour) ax1.axvspan(modes[-1][0], ax1.get_xlim()[1], fc=c, ec=edge_colour, alpha=0.1) if ax1_labels != []: ax1.legend(ax1_labels,loc=args.legend) if ax2_labels != []: ax2.legend(ax2_labels,loc=args.legend2) if empty: print("No data to graph") return from argparse import ArgumentParser parser = ArgumentParser(description=__doc__) parser.add_argument("--no-timestamps", dest="notimestamps", action='store_true', help="Log doesn't have timestamps") parser.add_argument("--planner", action='store_true', help="use planner file format") parser.add_argument("--condition", default=None, help="select packets by a condition") parser.add_argument("--labels", default=None, help="comma separated field labels") parser.add_argument("--legend", default='upper left', help="default legend position") parser.add_argument("--legend2", default='upper right', help="default legend2 position") parser.add_argument("--marker", default=None, help="point marker") parser.add_argument("--linestyle", default=None, help="line style") parser.add_argument("--xaxis", default=None, help="X axis expression") parser.add_argument("--multi", action='store_true', help="multiple files with same colours") parser.add_argument("--zero-time-base", action='store_true', help="use Z time base for DF logs") parser.add_argument("--flightmode", default=None, help="Choose the plot background according to the active flight mode of the specified type, e.g. --flightmode=apm for ArduPilot or --flightmode=px4 for PX4 stack logs. Cannot be specified with --xaxis.") parser.add_argument("--dialect", default="ardupilotmega", help="MAVLink dialect") parser.add_argument("--output", default=None, help="provide an output format") parser.add_argument("logs_fields", metavar="<LOG or FIELD>", nargs="+") args = parser.parse_args() from pymavlink import mavutil if args.flightmode is not None and args.xaxis: print("Cannot request flightmode backgrounds with an x-axis expression") sys.exit(1) if args.flightmode is not None and args.flightmode not in colourmap: print("Unknown flight controller '%s' in specification of --flightmode" % args.flightmode) sys.exit(1) if args.output is not None: matplotlib.use('Agg') import pylab filenames = [] fields = [] for f in args.logs_fields: if os.path.exists(f): filenames.append(f) else: fields.append(f) msg_types = set() multiplier = [] field_types = [] colors = [ 'red', 'green', 'blue', 'orange', 'olive', 'black', 'grey', 'yellow', 'brown', 'darkcyan', 'cornflowerblue', 'darkmagenta', 'deeppink', 'darkred'] # work out msg types we are interested in x = [] y = [] modes = [] axes = [] first_only = [] re_caps = re.compile('[A-Z_][A-Z0-9_]+') for f in fields: caps = set(re.findall(re_caps, f)) msg_types = msg_types.union(caps) field_types.append(caps) y.append([]) x.append([]) axes.append(1) first_only.append(False) def add_data(t, msg, vars, flightmode): '''add some data''' mtype = msg.get_type() if args.flightmode is not None and (len(modes) == 0 or modes[-1][1] != flightmode): modes.append((t, flightmode)) if mtype not in msg_types: return for i in range(0, len(fields)): if mtype not in field_types[i]: continue f = fields[i] if f.endswith(":2"): axes[i] = 2 f = f[:-2] if f.endswith(":1"): first_only[i] = True f = f[:-2] v = mavutil.evaluate_expression(f, vars) if v is None: continue if args.xaxis is None: xv = t else: xv = mavutil.evaluate_expression(args.xaxis, vars) if xv is None: continue y[i].append(v) x[i].append(xv) def process_file(filename): '''process one file''' print("Processing %s" % filename) mlog = mavutil.mavlink_connection(filename, notimestamps=args.notimestamps, zero_time_base=args.zero_time_base, dialect=args.dialect) vars = {} while True: msg = mlog.recv_match(args.condition) if msg is None: break tdays = matplotlib.dates.date2num(datetime.datetime.fromtimestamp(msg._timestamp)) add_data(tdays, msg, mlog.messages, mlog.flightmode) if len(filenames) == 0: print("No files to process") sys.exit(1) if args.labels is not None: labels = args.labels.split(',') if len(labels) != len(fields)*len(filenames): print("Number of labels (%u) must match number of fields (%u)" % ( len(labels), len(fields)*len(filenames))) sys.exit(1) else: labels = None for fi in range(0, len(filenames)): f = filenames[fi] process_file(f) for i in range(0, len(x)): if first_only[i] and fi != 0: x[i] = [] y[i] = [] if labels: lab = labels[fi*len(fields):(fi+1)*len(fields)] else: lab = fields[:] if args.multi: col = colors[:] else: col = colors[fi*len(fields):] plotit(x, y, lab, colors=col) for i in range(0, len(x)): x[i] = [] y[i] = [] if args.output is None: pylab.show() pylab.draw() raw_input('press enter to exit....') else: pylab.legend(loc=2,prop={'size':8}) pylab.savefig(args.output, bbox_inches='tight', dpi=200)
lgpl-3.0
psathyrella/partis
bin/chimera-plot.py
1
3199
#!/usr/bin/env python import collections import argparse import sys import os import csv partis_dir = os.path.dirname(os.path.realpath(__file__)).replace('/bin', '') if not os.path.exists(partis_dir): print 'WARNING current script dir %s doesn\'t exist, so python path may not be correctly set' % partis_dir sys.path.insert(1, partis_dir + '/python') import utils from hist import Hist import plotting import glutils parser = argparse.ArgumentParser() parser.add_argument('infile') parser.add_argument('plotdir') parser.add_argument('--glfo-dir', default='data/germlines/human', help='I\'m hacking this in afterwards because this was written before switching to yaml output files, so I think it was using this default germline dir anyway (except it used old glfo with different genes, so you probably actually have to pass in the real corresponding glfo anyway)') parser.add_argument('--chunk-len', default=75, type=int) parser.add_argument('--cutoff', default=0.3, help='point in max-abs-diff above which we assume most sequences are chimeric') parser.add_argument('--title') parser.add_argument('--locus', default='igh') args = parser.parse_args() if args.title == 'good': args.title = 'none' elif args.title == 'chimeras': args.title = 'all chimeras' def gk(uids): return ':'.join(uids) glfo = None if utils.getsuffix(args.infile) == '.csv': glfo = glutils.read_glfo(args.glfo_dir, args.locus) glfo, annotation_list, _ = utils.read_output(args.infile, glfo=glfo) annotations = collections.OrderedDict((line['unique_ids'][0], line) for line in annotation_list) chfo = {uid : {k : v for k, v in zip(('imax', 'max_abs_diff'), utils.get_chimera_max_abs_diff(annotations[uid], iseq=0, chunk_len=args.chunk_len))} for uid in annotations} biggest_adiffs = sorted(chfo, key=lambda q: chfo[q]['max_abs_diff'], reverse=True) for uid in biggest_adiffs[:5]: print '%-3d %6.3f' % (chfo[uid]['imax'], chfo[uid]['max_abs_diff']) utils.print_reco_event(annotations[uid]) n_above_cutoff = len([_ for cfo in chfo.values() if cfo['max_abs_diff'] > args.cutoff]) chimeric_fraction = n_above_cutoff / float(len(chfo)) print ' %d / %d = %.3f above chimeric cutoff' % (n_above_cutoff, len(chfo), chimeric_fraction) hmaxval = Hist(45, 0., 0.65) for uid in annotations: hmaxval.fill(chfo[uid]['max_abs_diff']) himax = Hist(75, 0., 400) for uid in annotations: himax.fill(chfo[uid]['imax']) utils.prep_dir(args.plotdir, wildlings=['*.svg', '*.csv']) import matplotlib from matplotlib import pyplot as plt fig, ax = plotting.mpl_init() xvals, yvals = zip(*[(v['imax'], v['max_abs_diff']) for v in chfo.values()]) plt.scatter(xvals, yvals, alpha=0.4) print 'writing to %s' % args.plotdir plotting.mpl_finish(ax, args.plotdir, 'hexbin', title=args.title, xlabel='break point', ylabel='abs mfreq diff') plotting.draw_no_root(hmaxval, plotdir=args.plotdir, plotname='mfreq-diff', shift_overflows=True, xtitle='abs mfreq diff', ytitle='seqs') hmaxval.write('%s/%s.csv' % (args.plotdir, 'mfreq-diff')) plotting.draw_no_root(himax, plotdir=args.plotdir, plotname='imax', shift_overflows=True, xtitle='break point', ytitle='seqs') himax.write('%s/%s.csv' % (args.plotdir, 'imax'))
gpl-3.0
ruymanengithub/vison
vison/eyegore/eyeWarnings.py
1
9301
#!/usr/bin/env python2 # -*- coding: utf-8 -*- """ Module to handle HK-OOL Warnings Created on Thu Apr 19 16:09:02 2018 :author: Ruyman Azzollini """ # IMPORT STUFF from __future__ import print_function import os from pdb import set_trace as stop import string as st import tempfile import pandas as pd from collections import OrderedDict import re from vison.support import ET from vison.support import vjson from vison.support import utils # END IMPORT critical_HKkeys = ['CCD3_TEMP_T', 'CCD2_TEMP_T', 'CCD1_TEMP_T', 'VID_PCB_TEMP_T', 'FPGA_PCB_TEMP_T', 'CCD1_TEMP_B', 'CCD2_TEMP_B', 'CCD3_TEMP_B', 'VID_PCB_TEMP_B', 'FPGA_PCB_TEMP_B'] severitydict = dict(CCD1_TEMP_T=dict(Tm1=2, T1=2), CCD1_TEMP_B=dict(Tm1=2, T1=2), CCD2_TEMP_T=dict(Tm1=2, T1=2), CCD2_TEMP_B=dict(Tm1=2, T1=2), CCD3_TEMP_T=dict(Tm1=2, T1=2), CCD3_TEMP_B=dict(Tm1=2, T1=2), VID_PCB_TEMP_T=dict(Tm1=1, T1=2), VID_PCB_TEMP_B=dict(Tm1=1, T1=2), FPGA_PCB_TEMP_T=dict(Tm1=1, T1=2), FPGA_PCB_TEMP_B=dict(Tm1=1, T1=2), ) rootURL = "https://visonwarningcall.000webhostapp.com" subURLs = dict( NonSpecificWarning="visonwarningcall.xml", LoCCDTemp="low_CCD_TEMP.xml", HiCCDTemp="hi_CCD_TEMP.xml", LoVIDTemp="low_VID_PCB_TEMP.xml", HiVIDTemp="hi_VID_PCB_TEMP.xml", LoFPGATemp="low_FPGA_PCB_TEMP.xml", HiFPGATemp="hi_FPGA_PCB_TEMP.xml") URLs = dict() for key, value in subURLs.items(): URLs[key] = '%s/%s' % (rootURL, value) try: recipients_dict = vjson.load_jsonfile(os.path.join( utils.credentials_path, 'recipients_eyegore')) recipients = [recipients_dict['main'], recipients_dict['secondary']] except IOError: recipients = [None] def matches_expression(pair): return re.match(pair[0], pair[1]) is not None def _get_matching_HKurl(HKkey, patterns): for key, value in patterns.items(): if matches_expression((value, HKkey)): return key return None class EyeWarnings(object): critical_HKkeys = critical_HKkeys severitydict = severitydict def __init__(self, parent_flags_obj=None): """ """ self.parent = parent_flags_obj self.log = None if self.parent is not None: self.log = self.parent.log self.recipients = recipients try: self.et = ET.ET() except IOError: print('vison.support.ET: Phone Calling is limited to personel with access details.') self.et = None def process_event(self, HKkey, violation_type, value, HKlim, timestamp): if not self.iscritical(HKkey): return None self.assess_OOL_incident( HKkey, violation_type, value, HKlim, timestamp) def iscritical(self, HKkey): return bool(HKkey in self.critical_HKkeys) def assess_OOL_incident(self, HKkey, violation_type, value, HKlim, timestamp): """ """ if self.iscritical(HKkey): tvar = 'T%i' % violation_type violation_key = tvar.replace('-', 'm') Kseveritydict = self.severitydict[HKkey] try: severity = Kseveritydict[violation_key] except KeyError: severity = 0 self.issue_warning(HKkey, severity, violation_type, value, HKlim, timestamp) def send_email(self, subject, bodyList, recipient): with tempfile.NamedTemporaryFile(mode='w+a', delete=False) as f: for line in bodyList: print(line, file=f) f.close() try: os.system('mail -s "%s" %s < %s' % (subject, recipient, f.name)) except BaseException: print(('WARNING email not sent! [subject: %s]' % subject)) if self.log is not None: self.log.info( 'WARNING email not sent! [subject: %s]' % subject) os.unlink(f.name) def warn_via_email(self, HKkey, value, HKlim, timestamp, HKdata=None): """ """ if self.recipients[0] is None: if self.log is not None: self.log.info( "warn_via_email: recipient must be valid (%s)" % self.recipients.__str__()) subject = 'Eyegore HK WARNING: %s (DT=%s)' % (HKkey, timestamp) bodyList = ['HK OOL WARNING: %s' % HKkey, 'value = %s, limits = %s' % (value, HKlim), 'at %s\n\n' % timestamp] if HKdata is not None: _data = OrderedDict(time=HKdata['time']) _data[HKkey] = HKdata[HKkey] df = pd.DataFrame.from_dict(_data) bodyList.append('LATEST VALUES after the jump\n\n') bodyList.append(df.to_string(index=False)) for recipient in self.recipients: self.send_email(subject, bodyList, recipient) def do_phone_call(self, url): """Does phone call via self.et object""" self.et.dial_numbers(url) def get_phone_url(self, HKkey, violation_type): """ """ HKkeys_patterns = dict( CCDTemp='CCD\d_TEMP_[T,B]', VIDTemp='VID_PCB_TEMP_[T,B]', FPGATemp='FPGA_PCB_TEMP_[T,B]') HKurl = _get_matching_HKurl(HKkey, HKkeys_patterns) if violation_type == -1: prefix = 'Lo' elif violation_type == 1: prefix = 'Hi' elif violation_type == 2: prefix = 'Ne' else: prefix = 'Un' urlkey = '%s%s' % (prefix, HKurl) if urlkey in URLs: return URLs[urlkey] return None def warn_via_phone(self, HKkey, violation_type): """ """ if self.et is None: if self.log is not None: self.log.info('VOICE WARNING not sent! ET not available') return None url = self.get_phone_url(HKkey, violation_type) try: self.do_phone_call(url) except BaseException: if self.log is not None: self.log.info('VOICE WARNING not sent! [%s]' % HKkey) def warn_via_sms(self, HKkey, value, HKlim, timestamp): """ """ if self.et is None: if self.log is not None: self.log.info('SMS WARNING not sent! ET not available.') body = ''.join(['HK OOL WARNING: %s. ' % HKkey, 'value = %s, limits = %s. ' % (value, HKlim), 'at %s' % timestamp]) try: self.send_sms(body) except BaseException: if self.log is not None: self.log.info('SMS WARNING not sent! ET not available.') def send_sms(self, body): """Sends text message via self.et object""" self.et.send_sms(body) def get_parent_HK_data(self, HKkey): if self.parent is not None: HKdata = self.parent.HK # alias data = {'time': HKdata['time'][-100:], HKkey: HKdata[HKkey][-100:]} else: data = None return data def issue_warning(self, HKkey, severity, violation_type, value, HKlim, timestamp): """ """ HKdata = self.get_parent_HK_data(HKkey) if severity > 0: self.warn_via_email(HKkey, value, HKlim, timestamp, HKdata) if severity > 1: #print 'WARNINGS via phone-calls/sms DISABLED by now in eyeWarnings' # self.warn_via_phone(HKkey,violation_type) DISABLED BY NOW self.warn_via_sms(HKkey, value, HKlim, timestamp) def test_URLs(): """ """ import urllib.request, urllib.error, urllib.parse for key, value in URLs.items(): try: urllib2.urlopen(value) print('Found %s' % key, value) except urllib2.HTTPError as e: print(key, value, e.code) except urllib2.URLError as e: print(key, value, e.args) def test_get_URLs(): ew = EyeWarnings() assert ew.get_phone_url('CCD1_TEMP_T', -1) == URLs['LoCCDTemp'] assert ew.get_phone_url('CCD1_TEMP_T', 1) == URLs['HiCCDTemp'] assert ew.get_phone_url('VID_PCB_TEMP_T', -1) == URLs['LoVIDTemp'] assert ew.get_phone_url('VID_PCB_TEMP_T', 1) == URLs['HiVIDTemp'] assert ew.get_phone_url('FPGA_PCB_TEMP_T', -1) == URLs['LoFPGATemp'] assert ew.get_phone_url('FPGA_PCB_TEMP_T', 1) == URLs['HiFPGATemp'] print('test_get_URLs passed!') def test_do_phone_call(urlkey): ew = EyeWarnings() ew.do_phone_call(URLs[urlkey]) def test_do_send_sms(urlkey): ew = EyeWarnings() ew.send_sms(urlkey) def test_assess_OOL(): ew = EyeWarnings() ew.assess_OOL_incident('CCD1_TEMP_T', -1, -160, [-133, 35], '24042018_1828') ew.assess_OOL_incident('VID_PCB_TEMP_T', -1, -15., [-10, 40], '24042018_1828') if __name__ == '__main__': test_URLs() test_get_URLs() # test_assess_OOL() # does phone call # test_do_phone_call('LoCCDTemp') # does phone call # test_do_send_sms('LoCCDTemp')
gpl-3.0
vigilv/scikit-learn
examples/linear_model/plot_iris_logistic.py
283
1678
#!/usr/bin/python # -*- coding: utf-8 -*- """ ========================================================= Logistic Regression 3-class Classifier ========================================================= Show below is a logistic-regression classifiers decision boundaries on the `iris <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ dataset. The datapoints are colored according to their labels. """ print(__doc__) # Code source: Gaël Varoquaux # Modified for documentation by Jaques Grobler # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn import linear_model, datasets # import some data to play with iris = datasets.load_iris() X = iris.data[:, :2] # we only take the first two features. Y = iris.target h = .02 # step size in the mesh logreg = linear_model.LogisticRegression(C=1e5) # we create an instance of Neighbours Classifier and fit the data. logreg.fit(X, Y) # Plot the decision boundary. For that, we will assign a color to each # point in the mesh [x_min, m_max]x[y_min, y_max]. x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5 y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) Z = logreg.predict(np.c_[xx.ravel(), yy.ravel()]) # Put the result into a color plot Z = Z.reshape(xx.shape) plt.figure(1, figsize=(4, 3)) plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired) # Plot also the training points plt.scatter(X[:, 0], X[:, 1], c=Y, edgecolors='k', cmap=plt.cm.Paired) plt.xlabel('Sepal length') plt.ylabel('Sepal width') plt.xlim(xx.min(), xx.max()) plt.ylim(yy.min(), yy.max()) plt.xticks(()) plt.yticks(()) plt.show()
bsd-3-clause
wlamond/scikit-learn
sklearn/cluster/tests/test_dbscan.py
56
13916
""" Tests for DBSCAN clustering algorithm """ import pickle import numpy as np from scipy.spatial import distance from scipy import sparse from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_in from sklearn.utils.testing import assert_not_in from sklearn.neighbors import NearestNeighbors from sklearn.cluster.dbscan_ import DBSCAN from sklearn.cluster.dbscan_ import dbscan from sklearn.cluster.tests.common import generate_clustered_data from sklearn.metrics.pairwise import pairwise_distances n_clusters = 3 X = generate_clustered_data(n_clusters=n_clusters) def test_dbscan_similarity(): # Tests the DBSCAN algorithm with a similarity array. # Parameters chosen specifically for this task. eps = 0.15 min_samples = 10 # Compute similarities D = distance.squareform(distance.pdist(X)) D /= np.max(D) # Compute DBSCAN core_samples, labels = dbscan(D, metric="precomputed", eps=eps, min_samples=min_samples) # number of clusters, ignoring noise if present n_clusters_1 = len(set(labels)) - (1 if -1 in labels else 0) assert_equal(n_clusters_1, n_clusters) db = DBSCAN(metric="precomputed", eps=eps, min_samples=min_samples) labels = db.fit(D).labels_ n_clusters_2 = len(set(labels)) - int(-1 in labels) assert_equal(n_clusters_2, n_clusters) def test_dbscan_feature(): # Tests the DBSCAN algorithm with a feature vector array. # Parameters chosen specifically for this task. # Different eps to other test, because distance is not normalised. eps = 0.8 min_samples = 10 metric = 'euclidean' # Compute DBSCAN # parameters chosen for task core_samples, labels = dbscan(X, metric=metric, eps=eps, min_samples=min_samples) # number of clusters, ignoring noise if present n_clusters_1 = len(set(labels)) - int(-1 in labels) assert_equal(n_clusters_1, n_clusters) db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples) labels = db.fit(X).labels_ n_clusters_2 = len(set(labels)) - int(-1 in labels) assert_equal(n_clusters_2, n_clusters) def test_dbscan_sparse(): core_sparse, labels_sparse = dbscan(sparse.lil_matrix(X), eps=.8, min_samples=10) core_dense, labels_dense = dbscan(X, eps=.8, min_samples=10) assert_array_equal(core_dense, core_sparse) assert_array_equal(labels_dense, labels_sparse) def test_dbscan_sparse_precomputed(): D = pairwise_distances(X) nn = NearestNeighbors(radius=.9).fit(X) D_sparse = nn.radius_neighbors_graph(mode='distance') # Ensure it is sparse not merely on diagonals: assert D_sparse.nnz < D.shape[0] * (D.shape[0] - 1) core_sparse, labels_sparse = dbscan(D_sparse, eps=.8, min_samples=10, metric='precomputed') core_dense, labels_dense = dbscan(D, eps=.8, min_samples=10, metric='precomputed') assert_array_equal(core_dense, core_sparse) assert_array_equal(labels_dense, labels_sparse) def test_dbscan_no_core_samples(): rng = np.random.RandomState(0) X = rng.rand(40, 10) X[X < .8] = 0 for X_ in [X, sparse.csr_matrix(X)]: db = DBSCAN(min_samples=6).fit(X_) assert_array_equal(db.components_, np.empty((0, X_.shape[1]))) assert_array_equal(db.labels_, -1) assert_equal(db.core_sample_indices_.shape, (0,)) def test_dbscan_callable(): # Tests the DBSCAN algorithm with a callable metric. # Parameters chosen specifically for this task. # Different eps to other test, because distance is not normalised. eps = 0.8 min_samples = 10 # metric is the function reference, not the string key. metric = distance.euclidean # Compute DBSCAN # parameters chosen for task core_samples, labels = dbscan(X, metric=metric, eps=eps, min_samples=min_samples, algorithm='ball_tree') # number of clusters, ignoring noise if present n_clusters_1 = len(set(labels)) - int(-1 in labels) assert_equal(n_clusters_1, n_clusters) db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples, algorithm='ball_tree') labels = db.fit(X).labels_ n_clusters_2 = len(set(labels)) - int(-1 in labels) assert_equal(n_clusters_2, n_clusters) def test_dbscan_metric_params(): # Tests that DBSCAN works with the metrics_params argument. eps = 0.8 min_samples = 10 p = 1 # Compute DBSCAN with metric_params arg db = DBSCAN(metric='minkowski', metric_params={'p': p}, eps=eps, min_samples=min_samples, algorithm='ball_tree').fit(X) core_sample_1, labels_1 = db.core_sample_indices_, db.labels_ # Test that sample labels are the same as passing Minkowski 'p' directly db = DBSCAN(metric='minkowski', eps=eps, min_samples=min_samples, algorithm='ball_tree', p=p).fit(X) core_sample_2, labels_2 = db.core_sample_indices_, db.labels_ assert_array_equal(core_sample_1, core_sample_2) assert_array_equal(labels_1, labels_2) # Minkowski with p=1 should be equivalent to Manhattan distance db = DBSCAN(metric='manhattan', eps=eps, min_samples=min_samples, algorithm='ball_tree').fit(X) core_sample_3, labels_3 = db.core_sample_indices_, db.labels_ assert_array_equal(core_sample_1, core_sample_3) assert_array_equal(labels_1, labels_3) def test_dbscan_balltree(): # Tests the DBSCAN algorithm with balltree for neighbor calculation. eps = 0.8 min_samples = 10 D = pairwise_distances(X) core_samples, labels = dbscan(D, metric="precomputed", eps=eps, min_samples=min_samples) # number of clusters, ignoring noise if present n_clusters_1 = len(set(labels)) - int(-1 in labels) assert_equal(n_clusters_1, n_clusters) db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='ball_tree') labels = db.fit(X).labels_ n_clusters_2 = len(set(labels)) - int(-1 in labels) assert_equal(n_clusters_2, n_clusters) db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='kd_tree') labels = db.fit(X).labels_ n_clusters_3 = len(set(labels)) - int(-1 in labels) assert_equal(n_clusters_3, n_clusters) db = DBSCAN(p=1.0, eps=eps, min_samples=min_samples, algorithm='ball_tree') labels = db.fit(X).labels_ n_clusters_4 = len(set(labels)) - int(-1 in labels) assert_equal(n_clusters_4, n_clusters) db = DBSCAN(leaf_size=20, eps=eps, min_samples=min_samples, algorithm='ball_tree') labels = db.fit(X).labels_ n_clusters_5 = len(set(labels)) - int(-1 in labels) assert_equal(n_clusters_5, n_clusters) def test_input_validation(): # DBSCAN.fit should accept a list of lists. X = [[1., 2.], [3., 4.]] DBSCAN().fit(X) # must not raise exception def test_dbscan_badargs(): # Test bad argument values: these should all raise ValueErrors assert_raises(ValueError, dbscan, X, eps=-1.0) assert_raises(ValueError, dbscan, X, algorithm='blah') assert_raises(ValueError, dbscan, X, metric='blah') assert_raises(ValueError, dbscan, X, leaf_size=-1) assert_raises(ValueError, dbscan, X, p=-1) def test_pickle(): obj = DBSCAN() s = pickle.dumps(obj) assert_equal(type(pickle.loads(s)), obj.__class__) def test_boundaries(): # ensure min_samples is inclusive of core point core, _ = dbscan([[0], [1]], eps=2, min_samples=2) assert_in(0, core) # ensure eps is inclusive of circumference core, _ = dbscan([[0], [1], [1]], eps=1, min_samples=2) assert_in(0, core) core, _ = dbscan([[0], [1], [1]], eps=.99, min_samples=2) assert_not_in(0, core) def test_weighted_dbscan(): # ensure sample_weight is validated assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2]) assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2, 3, 4]) # ensure sample_weight has an effect assert_array_equal([], dbscan([[0], [1]], sample_weight=None, min_samples=6)[0]) assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 5], min_samples=6)[0]) assert_array_equal([0], dbscan([[0], [1]], sample_weight=[6, 5], min_samples=6)[0]) assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 6], min_samples=6)[0]) # points within eps of each other: assert_array_equal([0, 1], dbscan([[0], [1]], eps=1.5, sample_weight=[5, 1], min_samples=6)[0]) # and effect of non-positive and non-integer sample_weight: assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 0], eps=1.5, min_samples=6)[0]) assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[5.9, 0.1], eps=1.5, min_samples=6)[0]) assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 0], eps=1.5, min_samples=6)[0]) assert_array_equal([], dbscan([[0], [1]], sample_weight=[6, -1], eps=1.5, min_samples=6)[0]) # for non-negative sample_weight, cores should be identical to repetition rng = np.random.RandomState(42) sample_weight = rng.randint(0, 5, X.shape[0]) core1, label1 = dbscan(X, sample_weight=sample_weight) assert_equal(len(label1), len(X)) X_repeated = np.repeat(X, sample_weight, axis=0) core_repeated, label_repeated = dbscan(X_repeated) core_repeated_mask = np.zeros(X_repeated.shape[0], dtype=bool) core_repeated_mask[core_repeated] = True core_mask = np.zeros(X.shape[0], dtype=bool) core_mask[core1] = True assert_array_equal(np.repeat(core_mask, sample_weight), core_repeated_mask) # sample_weight should work with precomputed distance matrix D = pairwise_distances(X) core3, label3 = dbscan(D, sample_weight=sample_weight, metric='precomputed') assert_array_equal(core1, core3) assert_array_equal(label1, label3) # sample_weight should work with estimator est = DBSCAN().fit(X, sample_weight=sample_weight) core4 = est.core_sample_indices_ label4 = est.labels_ assert_array_equal(core1, core4) assert_array_equal(label1, label4) est = DBSCAN() label5 = est.fit_predict(X, sample_weight=sample_weight) core5 = est.core_sample_indices_ assert_array_equal(core1, core5) assert_array_equal(label1, label5) assert_array_equal(label1, est.labels_) def test_dbscan_core_samples_toy(): X = [[0], [2], [3], [4], [6], [8], [10]] n_samples = len(X) for algorithm in ['brute', 'kd_tree', 'ball_tree']: # Degenerate case: every sample is a core sample, either with its own # cluster or including other close core samples. core_samples, labels = dbscan(X, algorithm=algorithm, eps=1, min_samples=1) assert_array_equal(core_samples, np.arange(n_samples)) assert_array_equal(labels, [0, 1, 1, 1, 2, 3, 4]) # With eps=1 and min_samples=2 only the 3 samples from the denser area # are core samples. All other points are isolated and considered noise. core_samples, labels = dbscan(X, algorithm=algorithm, eps=1, min_samples=2) assert_array_equal(core_samples, [1, 2, 3]) assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1]) # Only the sample in the middle of the dense area is core. Its two # neighbors are edge samples. Remaining samples are noise. core_samples, labels = dbscan(X, algorithm=algorithm, eps=1, min_samples=3) assert_array_equal(core_samples, [2]) assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1]) # It's no longer possible to extract core samples with eps=1: # everything is noise. core_samples, labels = dbscan(X, algorithm=algorithm, eps=1, min_samples=4) assert_array_equal(core_samples, []) assert_array_equal(labels, -np.ones(n_samples)) def test_dbscan_precomputed_metric_with_degenerate_input_arrays(): # see https://github.com/scikit-learn/scikit-learn/issues/4641 for # more details X = np.eye(10) labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_ assert_equal(len(set(labels)), 1) X = np.zeros((10, 10)) labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_ assert_equal(len(set(labels)), 1) def test_dbscan_precomputed_metric_with_initial_rows_zero(): # sample matrix with initial two row all zero ar = np.array([ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.1, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.1, 0.0, 0.0], [0.0, 0.0, 0.1, 0.1, 0.0, 0.0, 0.3], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1], [0.0, 0.0, 0.0, 0.0, 0.3, 0.1, 0.0] ]) matrix = sparse.csr_matrix(ar) labels = DBSCAN(eps=0.2, metric='precomputed', min_samples=2).fit(matrix).labels_ assert_array_equal(labels, [-1, -1, 0, 0, 0, 1, 1])
bsd-3-clause
eramirem/astroML
book_figures/chapter4/fig_anderson_darling.py
3
3714
""" Gaussianity Tests ----------------- Figure 4.7. The results of the Anderson-Darling test, the Kolmogorov-Smirnov test, and the Shapiro-Wilk test when applied to a sample of 10,000 values drawn from a normal distribution (upper panel) and from a combination of two Gaussian distributions (lower panel). The functions are available in the ``scipy`` package: - The Anderson-Darling test (``scipy.stats.anderson``) - The Kolmogorov-Smirnov test (``scipy.stats.kstest``) - The Shapiro-Wilk test (``scipy.stats.shapiro``) """ # Author: Jake VanderPlas # License: BSD # The figure produced by this code is published in the textbook # "Statistics, Data Mining, and Machine Learning in Astronomy" (2013) # For more information, see http://astroML.github.com # To report a bug or issue, use the following forum: # https://groups.google.com/forum/#!forum/astroml-general from __future__ import print_function, division import numpy as np from scipy import stats from matplotlib import pyplot as plt #---------------------------------------------------------------------- # This function adjusts matplotlib settings for a uniform feel in the textbook. # Note that with usetex=True, fonts are rendered with LaTeX. This may # result in an error if LaTeX is not installed on your system. In that case, # you can set usetex to False. from astroML.plotting import setup_text_plots setup_text_plots(fontsize=8, usetex=True) from astroML.stats import mean_sigma, median_sigmaG # create some distributions np.random.seed(1) normal_vals = stats.norm(loc=0, scale=1).rvs(10000) dual_vals = stats.norm(0, 1).rvs(10000) dual_vals[:4000] = stats.norm(loc=3, scale=2).rvs(4000) x = np.linspace(-4, 10, 1000) normal_pdf = stats.norm(0, 1).pdf(x) dual_pdf = 0.6 * stats.norm(0, 1).pdf(x) + 0.4 * stats.norm(3, 2).pdf(x) vals = [normal_vals, dual_vals] pdf = [normal_pdf, dual_pdf] xlims = [(-4, 4), (-4, 10)] #------------------------------------------------------------ # Compute the statistics and plot the results fig = plt.figure(figsize=(5, 7)) fig.subplots_adjust(left=0.13, right=0.95, bottom=0.06, top=0.95, hspace=0.1) for i in range(2): ax = fig.add_subplot(2, 1, 1 + i) # 2 x 1 subplot # compute some statistics A2, sig, crit = stats.anderson(vals[i]) D, pD = stats.kstest(vals[i], "norm") W, pW = stats.shapiro(vals[i]) mu, sigma = mean_sigma(vals[i], ddof=1) median, sigmaG = median_sigmaG(vals[i]) N = len(vals[i]) Z1 = 1.3 * abs(mu - median) / sigma * np.sqrt(N) Z2 = 1.1 * abs(sigma / sigmaG - 1) * np.sqrt(N) print(70 * '_') print(" Kolmogorov-Smirnov test: D = %.2g p = %.2g" % (D, pD)) print(" Anderson-Darling test: A^2 = %.2g" % A2) print(" significance | critical value ") print(" --------------|----------------") for j in range(len(sig)): print(" {0:.2f} | {1:.1f}%".format(sig[j], crit[j])) print(" Shapiro-Wilk test: W = %.2g p = %.2g" % (W, pW)) print(" Z_1 = %.1f" % Z1) print(" Z_2 = %.1f" % Z2) # plot a histogram ax.hist(vals[i], bins=50, normed=True, histtype='stepfilled', alpha=0.5) ax.plot(x, pdf[i], '-k') ax.set_xlim(xlims[i]) # print information on the plot info = "Anderson-Darling: $A^2 = %.2f$\n" % A2 info += "Kolmogorov-Smirnov: $D = %.2g$\n" % D info += "Shapiro-Wilk: $W = %.2g$\n" % W info += "$Z_1 = %.1f$\n$Z_2 = %.1f$" % (Z1, Z2) ax.text(0.97, 0.97, info, ha='right', va='top', transform=ax.transAxes) if i == 0: ax.set_ylim(0, 0.55) else: ax.set_ylim(0, 0.35) ax.set_xlabel('$x$') ax.set_ylabel('$p(x)$') plt.show()
bsd-2-clause
rmvanhees/pys5p
src/pys5p/s5p_plot.py
1
66550
""" This file is part of pyS5p https://github.com/rmvanhees/pys5p.git The class S5Pplot contains generic plot functions Copyright (c) 2017-2020 SRON - Netherlands Institute for Space Research All Rights Reserved License: BSD-3-Clause """ # pylint: disable=too-many-lines from datetime import datetime from pathlib import PurePath try: from cartopy import crs as ccrs except ModuleNotFoundError: FOUND_CARTOPY = False else: FOUND_CARTOPY = True import matplotlib as mpl import matplotlib.pyplot as plt from matplotlib.backends.backend_pdf import PdfPages from matplotlib.ticker import MultipleLocator from mpl_toolkits.axes_grid1 import make_axes_locatable import numpy as np from . import error_propagation from . import swir_region from .biweight import biweight from .ckd_io import CKDio from .lib.plotlib import (blank_legend_key, check_data2d, FIGinfo, get_fig_coords, get_xdata, MidpointNormalize) from .tol_colors import tol_cmap, tol_cset # - main function ---------------------------------- class S5Pplot: """ Generate figure(s) for the SRON Tropomi SWIR monitor website or MPC reports Attributes ---------- figname : str Name of the PDF or PNG file Methods ------- close() Close PNG or (multipage) PDF document. set_cmap(cmap) Define alternative color-map to overrule the default. unset_cmap() Unset user supplied color-map, and use default color-map. get_cmap(method='data') Returns matplotlib colormap. set_zunit(units) Provide units of data to be displayed. unset_zunit() Unset user supplied unit definition of data. zunit Returns value of zunit (property). draw_signal(data_in, ref_data=None, method='data', add_medians=True, vperc=None, vrange=None, title=None, sub_title=None, extent=None, fig_info=None) Display 2D array data as image and averaged column/row signal plots. draw_quality(data_in, ref_data=None, add_medians=True, qlabels=None, thres_worst=0.1, thres_bad=0.8, title=None, sub_title=None, extent=None, fig_info=None) Display pixel-quality 2D array data as image and column/row statistics. draw_cmp_swir(data_in, ref_data, model_label='reference', vperc=None, vrange=None, add_residual=True, add_model=True, title=None, sub_title=None, extent=None, fig_info=None) Display signal vs model (or CKD) comparison in three panels. Top panel shows data, middle panel shows residuals (data - model) and lower panel shows model. draw_trend1d(msm1, hk_data=None, msm2=None, hk_keys=None, title=None, sub_title=None, fig_info=None) Display trends of measurement and house-keeping data. draw_lines(xdata, ydata, color=0, xlabel=None, ylabel=None, xlim=None, ylim=None, title=None, sub_title=None, fig_info=None, **kwargs) Display multiple 1D-data sets sharing the same x-axis draw_qhist(data_dict, title=None, density=True, fig_info=None) Display pixel-quality data as histograms. draw_tracks(lons, lats, icids, saa_region=None, title=None, fig_info=None) Display tracks of S5P on a world map using a Robinson projection. Notes ----- Generate Figures - Creating an S5Pplot object will open multi-page PDF file or single-page PNG - Each public function listed below can be used to create a (new) page * draw_signal * draw_quality * draw_cmp_swir * draw_trend1d * draw_lines * draw_qhist * draw_tracks - Closing the S5Pplot object will write the report to disk Suggestion for the name of a report/PDF-file: <identifier>_<yyyymmdd>_<orbit>.pdf where identifier : name of L1B/ICM/OCM product or monitoring database yyyymmdd : coverage start-date or start-date of monitoring entry orbit : reference orbit Examples -------- >>> from pys5p.s5p_plot import S5Pplot >>> plot = S5Pplot('test_plot_class.pdf') Create the same plot twice, using ndarray/set_zunit or S5Pmsm >>> plot.set_zunit('V') >>> plot.draw_signal(np.mean(signal, axis=0), title='Offset signal') >>> plot.unset_zunit() >>> msm = S5Pmsm(np.mean(signal, axis=0)) >>> msm.set_units('V') Create plot with matplotlib colormap 'RdPu' >>> plot.set_cmap(plt.get_cmap('RdPu')) >>> plot.draw_signal(msm, title='signal of my measurement') >>> plot.unset_cmap() >>> plot.draw_trend1d(np.mean(signal, axis=(1, 2)), hk_data, hk_keys) >>> plot.close() """ def __init__(self, figname, pdf_title=None): """ Initialize multi-page PDF document or a single-page PNG Parameters ---------- figname : string Name of PDF or PNG file (extension required) pdf_title : string Title of the PDF document (attribute of the PDF document) Default: 'Monitor report on Tropomi SWIR instrument' """ self.filename = figname if PurePath(figname).suffix.lower() == '.pdf': self.__pdf = PdfPages(figname) # add annotation doc = self.__pdf.infodict() if pdf_title is None: doc['Title'] = 'Monitor report on Tropomi SWIR instrument' else: doc['Title'] = pdf_title doc['Author'] = '(c) SRON Netherlands Institute for Space Research' else: self.__pdf = None self.__cmap = None self.__divider = None self.__zunit = None self.__mpl = None # only used by draw_lines def __repr__(self): pass def __close_this_page(self, fig): """ close current matplotlib figure or page in a PDF document """ # add save figure if self.__pdf is None: plt.savefig(self.filename) plt.close(fig) else: self.__pdf.savefig() self.__divider = None def close(self): """ Close PNG or (multipage) PDF document """ if self.__pdf is None: return self.__pdf.close() plt.close('all') # -------------------------------------------------- def set_cmap(self, cmap): """ Define alternative color-map to overrule the default Parameter --------- cmap : matplotlib color-map """ self.__cmap = cmap def unset_cmap(self): """ Unset user supplied color-map, and use default color-map """ self.__cmap = None def get_cmap(self, method='data'): """ Returns matplotlib colormap """ if self.__cmap is not None: return self.__cmap if method == 'diff': return tol_cmap('sunset') if method == 'ratio': return tol_cmap('sunset') return tol_cmap('rainbow_PuRd') def set_zunit(self, units): """ Provide units of data to be displayed """ self.__zunit = units def unset_zunit(self): """ Unset user supplied unit definition of data """ self.__zunit = None @property def zunit(self): """ Returns value of zunit """ return self.__zunit def __adjust_zunit(self, vmin: float, vmax: float): """ Adjust units: electron to 'e' and Volt to 'V' and scale data range to <-1000, 1000>. Parameters ---------- vmin, vmax : float image-data range Returns ------- float : dscale """ if self.zunit is None or self.zunit == '1': return 1. max_value = max(abs(vmin), abs(vmax)) if self.zunit.find('electron') >= 0: zunit = self.zunit if zunit.find('.s-1') >= 0: zunit = zunit.replace('.s-1', ' s$^{-1}$') if max_value > 1000000000: dscale = 1e9 zunit = zunit.replace('electron', 'Ge') elif max_value > 1000000: dscale = 1e6 zunit = zunit.replace('electron', 'Me') elif max_value > 1000: dscale = 1e3 zunit = zunit.replace('electron', 'ke') else: dscale = 1. zunit = zunit.replace('electron', 'e') self.set_zunit(zunit) return dscale if self.zunit[0] == 'V': zunit = self.zunit if max_value <= 2e-4: dscale = 1e-6 zunit = zunit.replace('V', u'\xb5V') elif max_value <= 0.1: dscale = 1e-3 zunit = zunit.replace('V', 'mV') else: dscale = 1. zunit = 'V' self.set_zunit(zunit) return dscale return 1. # ------------------------- def __get_zlabel(self, method): """ Return label of colorbar """ if method == 'ratio': zlabel = 'ratio' elif method == 'ratio_unc': zlabel = 'uncertainty' elif method == 'diff': if self.zunit is None or self.zunit == '1': zlabel = 'difference' else: zlabel = r'difference [{}]'.format(self.zunit) elif method == 'error': if self.zunit is None or self.zunit == '1': zlabel = 'uncertainty' else: zlabel = r'uncertainty [{}]'.format(self.zunit) else: if self.zunit is None or self.zunit == '1': zlabel = 'value' else: zlabel = r'value [{}]'.format(self.zunit) return zlabel @staticmethod def __adjust_tickmarks(ax_fig, coords): """ Define ticks locations for X & Y valid for most detectors Notes ----- - This method is used by: draw_signal, draw_quality, draw_cmp_swir """ sz_xcoord = len(coords['X']['data']) sz_ycoord = len(coords['Y']['data']) if (sz_xcoord % 10) == 0: minor_locator = MultipleLocator(sz_xcoord / 20) major_locator = MultipleLocator(sz_xcoord / 5) ax_fig.xaxis.set_major_locator(major_locator) ax_fig.xaxis.set_minor_locator(minor_locator) elif (sz_xcoord % 8) == 0: minor_locator = MultipleLocator(sz_xcoord / 16) major_locator = MultipleLocator(sz_xcoord / 4) ax_fig.xaxis.set_major_locator(major_locator) ax_fig.xaxis.set_minor_locator(minor_locator) if (sz_ycoord % 10) == 0: minor_locator = MultipleLocator(sz_ycoord / 20) major_locator = MultipleLocator(sz_ycoord / 5) ax_fig.yaxis.set_major_locator(major_locator) ax_fig.yaxis.set_minor_locator(minor_locator) elif (sz_ycoord % 8) == 0: minor_locator = MultipleLocator(sz_ycoord / 16) major_locator = MultipleLocator(sz_ycoord / 4) ax_fig.yaxis.set_major_locator(major_locator) ax_fig.yaxis.set_minor_locator(minor_locator) # ------------------------- @staticmethod def __add_copyright(axx): """ Display SRON copyright in current figure """ axx.text(1, 0, r' $\copyright$ SRON', horizontalalignment='right', verticalalignment='bottom', rotation='vertical', fontsize='xx-small', transform=axx.transAxes) def __add_colorbar(self, ax_img, labels, bounds=None): """ Draw colorbar right of image panel """ # define location of colorbar cax = self.__divider.append_axes("right", size=0.3, pad=0.05) # colorbar for image data if bounds is None: plt.colorbar(ax_img, cax=cax, label=labels) return # colorbar for pixel-quality data mbounds = [(bounds[ii+1] + bounds[ii]) / 2 for ii in range(len(bounds)-1)] plt.colorbar(ax_img, cax=cax, ticks=mbounds, boundaries=bounds) cax.tick_params(axis='y', which='both', length=0) cax.set_yticklabels(labels) def __add_fig_box(self, fig, fig_info) -> None: """ Add meta-information in the current figure Parameters ---------- fig : Matplotlib figure instance fig_info : FIGinfo instance of pys5p.lib.plotlib.FIGinfo to be displayed """ if fig_info is None or fig_info.location == 'none': return if fig_info.location == 'above': xpos = 1 - 0.4 / fig.get_figwidth() ypos = 1 - 0.25 / fig.get_figheight() fig.text(xpos, ypos, fig_info.as_str(), fontsize='x-small', style='normal', verticalalignment='top', horizontalalignment='right', multialignment='left', bbox={'facecolor': 'white', 'pad': 5}) return # box location on the right of the main image xpos = 1 - 3.825 / fig.get_figwidth() ypos = 1 - 1.2 / fig.get_figheight() # height=95: 9-45 8-50 7-58 6-67 # height=74: 9-31 8-36 7-40 6-47 # height=635: 9-25 8-29 7-33 6-38 # height=60: 9-23 8-26 7-30 6-37 font_sizes = [9, 8, 7, 6, 5.25] if fig.get_figheight() == 9.5: mx_lines = np.array([45, 50, 58, 67, 999]) elif fig.get_figheight() == 7.4: mx_lines = np.array([31, 36, 40, 47, 999]) elif fig.get_figheight() == 6.35: mx_lines = np.array([25, 29, 33, 38, 999]) elif fig.get_figheight() == 6.: mx_lines = np.array([23, 26, 30, 37, 999]) else: raise KeyError('unknown figure height') fontsize = font_sizes[(mx_lines > len(fig_info)).nonzero()[0].min()] ax_info = self.__divider.append_axes("right", size=2.5, pad=.75) ax_info.set_xticks([]) # remove all X-axis tick locations ax_info.set_yticks([]) # remove all Y-axis tick locations for key in ('left', 'right', 'top', 'bottom'): ax_info.spines[key].set_color('white') fig.text(xpos, ypos, fig_info.as_str(), fontsize=fontsize, style='normal', verticalalignment='top', horizontalalignment='left', multialignment='left', bbox=None, linespacing=1.5) # ------------------------- def __add_data1d(self, plot_mode, axarr, msm_1, msm_2): """ Implemented 3 options 1) only house-keeping data, no upper-panel with detector data 2) draw pixel-quality data, displayed in the upper-panel 3) draw measurement data, displayed in the upper-panel Notes ----- - This method is used by: draw_trend1d """ # define colors cset = tol_cset('bright') i_ax = 0 if plot_mode == 'quality': use_steps = msm_1.value.size <= 256 xdata, gap_list = get_xdata(msm_1.coords[0][:], use_steps) qc_dict = {'bad': cset.yellow, 'worst': cset.red} ql_dict = {'bad': 'bad (quality < 0.8)', 'worst': 'worst (quality < 0.1)'} for key in ['bad', 'worst']: ydata = msm_1.value[key].copy().astype(float) for indx in reversed(gap_list): ydata = np.insert(ydata, indx, np.nan) ydata = np.insert(ydata, indx, np.nan) ydata = np.insert(ydata, indx, ydata[indx-1]) if use_steps: ydata = np.append(ydata, ydata[-1]) axarr[i_ax].step(xdata, ydata, where='post', linewidth=1.5, color=qc_dict[key]) else: axarr[i_ax].plot(xdata, ydata, linewidth=1.5, color=qc_dict[key]) axarr[i_ax].set_xlim([xdata[0], xdata[-1]]) axarr[i_ax].grid(True) axarr[i_ax].set_ylabel('{}'.format('count')) legenda = axarr[i_ax].legend([blank_legend_key()], [ql_dict[key]], loc='upper left') legenda.draw_frame(False) i_ax += 1 return i_ax if plot_mode == 'data': for msm in (msm_1, msm_2): if msm is None: continue # convert units from electrons to ke, Me, ... if msm.error is None: vmin = msm.value.min() vmax = msm.value.max() else: vmin = msm.error[0].min() vmax = msm.error[1].max() self.set_zunit(msm.units) dscale = self.__adjust_zunit(vmin, vmax) use_steps = msm.value.size <= 256 xdata, gap_list = get_xdata(msm.coords[0][:], use_steps) ydata = msm.value.copy() / dscale for indx in reversed(gap_list): ydata = np.insert(ydata, indx, np.nan) ydata = np.insert(ydata, indx, np.nan) ydata = np.insert(ydata, indx, ydata[indx-1]) if use_steps: ydata = np.append(ydata, ydata[-1]) axarr[i_ax].step(xdata, ydata, where='post', linewidth=1.5, color=cset.blue) else: axarr[i_ax].plot(xdata, ydata, linewidth=1.5, color=cset.blue) if msm.error is not None: yerr1 = msm.error[0].copy() / dscale yerr2 = msm.error[1].copy() / dscale for indx in reversed(gap_list): yerr1 = np.insert(yerr1, indx, np.nan) yerr2 = np.insert(yerr2, indx, np.nan) yerr1 = np.insert(yerr1, indx, np.nan) yerr2 = np.insert(yerr2, indx, np.nan) yerr1 = np.insert(yerr1, indx, yerr1[indx-1]) yerr2 = np.insert(yerr2, indx, yerr2[indx-1]) if use_steps: yerr1 = np.append(yerr1, yerr1[-1]) yerr2 = np.append(yerr2, yerr2[-1]) axarr[i_ax].fill_between(xdata, yerr1, yerr2, step='post', facecolor='#BBCCEE') else: axarr[i_ax].fill_between(xdata, yerr1, yerr2, facecolor='#BBCCEE') axarr[i_ax].set_xlim([xdata[0], xdata[-1]]) axarr[i_ax].grid(True) if self.zunit is None or self.zunit == '1': axarr[i_ax].set_ylabel(msm.long_name) else: axarr[i_ax].set_ylabel(r'{} [{}]'.format( msm.long_name, self.zunit)) i_ax += 1 return i_ax return i_ax @staticmethod def __add_hkdata(i_ax, axarr, hk_data, hk_keys): """ Add house-keeping information for method draw_trend1d Notes ----- - This method is used by: draw_trend1d """ # define colors cset = tol_cset('bright') (xlabel,) = hk_data.coords._fields xdata = hk_data.coords[0][:].copy() use_steps = xdata.size <= 256 xdata, gap_list = get_xdata(xdata, use_steps) if xlabel == 'time': xdata = xdata.astype(float) / 3600 for key in hk_keys: if key not in hk_data.value.dtype.names: continue indx = hk_data.value.dtype.names.index(key) hk_unit = hk_data.units[indx] if isinstance(hk_unit, bytes): hk_unit = hk_unit.decode('ascii') full_string = hk_data.long_name[indx] if isinstance(full_string, bytes): full_string = full_string.decode('ascii') if hk_unit == 'K': hk_name = full_string.rsplit(' ', 1)[0] hk_label = 'temperature [{}]'.format(hk_unit) lcolor = cset.blue fcolor = '#BBCCEE' elif hk_unit in ('A', 'mA'): hk_name = full_string.rsplit(' ', 1)[0] hk_label = 'current [{}]'.format(hk_unit) lcolor = cset.green fcolor = '#CCDDAA' elif hk_unit == '%': hk_name = full_string.rsplit(' ', 2)[0] hk_label = 'duty cycle [{}]'.format(hk_unit) lcolor = cset.red fcolor = '#FFCCCC' else: hk_name = full_string hk_label = 'value [{}]'.format(hk_unit) lcolor = cset.purple fcolor = '#EEBBDD' ydata = hk_data.value[key].copy() for indx in reversed(gap_list): ydata = np.insert(ydata, indx, np.nan) ydata = np.insert(ydata, indx, np.nan) ydata = np.insert(ydata, indx, ydata[indx-1]) if np.all(np.isnan(ydata)): ydata[:] = 0 if use_steps: ydata = np.append(ydata, ydata[-1]) axarr[i_ax].step(xdata, ydata, where='post', linewidth=1.5, color=lcolor) else: axarr[i_ax].plot(xdata, ydata, linewidth=1.5, color=lcolor) # we are interested to see the last 2 days of the data, # and any trend over the whole data, without outliers ylim = None ybuff = ydata[np.isfinite(ydata)] if xlabel == 'orbit' and ybuff.size > 5 * 15: ni = 2 * 15 ylim = [min(ybuff[0:ni].min(), ybuff[-ni:].min()), max(ybuff[0:ni].max(), ybuff[-ni:].max())] # add errors on the y-parameter if hk_data.error is not None and not np.all(np.isnan(ydata)): yerr1 = hk_data.error[key][:, 0].copy() yerr2 = hk_data.error[key][:, 1].copy() for indx in reversed(gap_list): yerr1 = np.insert(yerr1, indx, np.nan) yerr2 = np.insert(yerr2, indx, np.nan) yerr1 = np.insert(yerr1, indx, np.nan) yerr2 = np.insert(yerr2, indx, np.nan) yerr1 = np.insert(yerr1, indx, yerr1[indx-1]) yerr2 = np.insert(yerr2, indx, yerr2[indx-1]) if use_steps: yerr1 = np.append(yerr1, yerr1[-1]) yerr2 = np.append(yerr2, yerr2[-1]) if not (np.array_equal(ydata, yerr1) and np.array_equal(ydata, yerr2)): axarr[i_ax].fill_between(xdata, yerr1, yerr2, step='post', facecolor=fcolor) ybuff1 = yerr1[np.isfinite(yerr1)] ybuff2 = yerr2[np.isfinite(yerr2)] if xlabel == 'orbit' \ and ybuff1.size > 5 * 15 and ybuff2.size > 5 * 15: ni = 2 * 15 ylim = [min(ybuff1[0:ni].min(), ybuff1[-ni:].min()), max(ybuff2[0:ni].max(), ybuff2[-ni:].max())] axarr[i_ax].locator_params(axis='y', nbins=4) axarr[i_ax].set_xlim([xdata[0], xdata[-1]]) if ylim is not None: delta = (ylim[1] - ylim[0]) / 5 if delta == 0: if ylim[0] == 0: delta = 0.01 else: delta = ylim[0] / 20 axarr[i_ax].set_ylim([ylim[0] - delta, ylim[1] + delta]) axarr[i_ax].grid(True) axarr[i_ax].set_ylabel(hk_label) legenda = axarr[i_ax].legend([blank_legend_key()], [hk_name], loc='upper left') legenda.draw_frame(False) i_ax += 1 def __add_side_panels(self, ax_fig, img_data, coords, quality=None): """ Draw row and column medians left and below image panel Notes ----- - This method is used by: draw_signal, draw_quality """ cset = tol_cset('bright') for xtl in ax_fig.get_xticklabels(): xtl.set_visible(False) for ytl in ax_fig.get_yticklabels(): ytl.set_visible(False) # ----- Panel bellow the image ----- ax_medx = self.__divider.append_axes("bottom", 1.15, pad=0.25, sharex=ax_fig) if quality is None: data_row = biweight(img_data, axis=0) if len(coords['X']['data']) > 250: ax_medx.plot(coords['X']['data'], data_row, linewidth=0.75, color=cset.blue) else: ax_medx.step(coords['X']['data'], data_row, linewidth=0.75, color=cset.blue) else: data_row = np.sum(((img_data == 1) | (img_data == 2)), axis=0) ax_medx.step(coords['X']['data'], data_row, linewidth=0.75, color=cset.yellow) data_row = np.sum((img_data == 1), axis=0) # worst ax_medx.step(coords['X']['data'], data_row, linewidth=0.75, color=cset.red) if quality['compare']: data_row = np.sum((img_data == 4), axis=0) # to_good ax_medx.step(coords['X']['data'], data_row, linewidth=0.75, color=cset.green) ax_medx.set_xlim([0, len(coords['X']['data'])]) ax_medx.grid(True) ax_medx.set_xlabel(coords['X']['label']) # ----- Panel left of the image ----- ax_medy = self.__divider.append_axes("left", 1.15, pad=0.25, sharey=ax_fig) if quality is None: data_col = biweight(img_data, axis=1) if len(coords['Y']['data']) > 250: ax_medy.plot(data_col, coords['Y']['data'], linewidth=0.75, color=cset.blue) else: ax_medy.step(data_col, coords['Y']['data'], linewidth=0.75, color=cset.blue) else: data_col = np.sum(((img_data == 1) | (img_data == 2)), axis=1) ax_medy.step(data_col, coords['Y']['data'], linewidth=0.75, color=cset.yellow) data_col = np.sum(img_data == 1, axis=1) # worst ax_medy.step(data_col, coords['Y']['data'], linewidth=0.75, color=cset.red) if quality['compare']: data_col = np.sum(img_data == 4, axis=1) # to_good ax_medy.step(data_col, coords['Y']['data'], linewidth=0.75, color=cset.green) ax_medy.set_ylim([0, len(coords['Y']['data'])]) ax_medy.grid(True) ax_medy.set_ylabel(coords['Y']['label']) # ------------------------- @staticmethod def __fig_sz_img(fig_info, data_dims): """ Define figure size depended on image aspect-ratio Notes ----- - This method is used by: draw_signal, draw_quality """ fig_ext = 3.5 if fig_info.location == 'right' else 0 # determine image aspect ratio, range [1, 4] aspect = min(4, max(1, int(round(data_dims[1] / data_dims[0])))) if aspect == 1: figsize = (11 + fig_ext, 9.5) elif aspect == 2: figsize = (12 + fig_ext, 7.4) elif aspect == 3: figsize = (13 + fig_ext, 6.35) elif aspect == 4: figsize = (14 + fig_ext, 6) else: print(__name__ + '.draw_signal', aspect) raise ValueError('*** FATAL: aspect ratio out of range') return figsize # ------------------------- def __get_fig_data2d(self, method: str, data_in, ref_data=None): """ Determine image data to be displayed """ # check image data try: check_data2d(method, data_in) except Exception as exc: raise RuntimeError('invalid input-data provided') from exc # check reference data if ref_data is not None: try: check_data2d(method, ref_data) except Exception as exc: raise RuntimeError('invalid reference-data provided') from exc img_data = None if isinstance(data_in, np.ndarray): img_data = data_in.copy() else: if method == 'error': img_data = data_in.error.copy() else: img_data = data_in.value.copy() if method == 'ratio_unc': self.unset_zunit() mask = ref_data.value != 0. img_data[~mask] = np.nan img_data[mask] = error_propagation.unc_div( data_in.value[mask], data_in.error[mask], ref_data.value[mask], ref_data.error[mask]) elif method == 'diff': self.set_zunit(data_in.units) mask = np.isfinite(data_in.value) & np.isfinite(ref_data) img_data[~mask] = np.nan img_data[mask] -= ref_data[mask] elif method == 'ratio': self.unset_zunit() mask = (np.isfinite(data_in.value) & np.isfinite(ref_data) & (ref_data != 0.)) img_data[~mask] = np.nan img_data[mask] /= ref_data[mask] else: self.set_zunit(data_in.units) return img_data def __scale_data2d(self, method: str, img_data, vperc, vrange): """ Determine range of image data and normalize colormap accordingly """ # define data-range if vrange is None: vmin, vmax = np.nanpercentile(img_data, vperc) else: vmin, vmax = vrange # convert units from electrons to ke, Me, ... dscale = self.__adjust_zunit(vmin, vmax) if not issubclass(img_data.dtype.type, np.integer): vmin /= dscale vmax /= dscale img_data[np.isfinite(img_data)] /= dscale mid_val = (vmin + vmax) / 2 if method == 'diff': if vmin < 0 < vmax: tmp1, tmp2 = (vmin, vmax) vmin = -max(-tmp1, tmp2) vmax = max(-tmp1, tmp2) mid_val = 0. if method == 'ratio': if vmin < 1 < vmax: tmp1, tmp2 = (vmin, vmax) vmin = min(tmp1, 1 / tmp2) vmax = max(1 / tmp1, tmp2) mid_val = 1. return MidpointNormalize(midpoint=mid_val, vmin=vmin, vmax=vmax) # ------------------------- @staticmethod def __set_fig_quality(qthres, data, ref_data=None): """ Check pixel-quality data and convert to quality classes Quality classes without reference data: [good]: value=4 [bad]: value=2 [worst]: value=1 [unused]: value=0 Quality classes with reference data: [unchanged]: value=8 [to_good]: value=4 [good_to_bad]: value=2 [to_worst]: value=1 [unused]: value=0 Note: ignored are [worst_to_bad] """ def float_to_quality(qthres, arr): """ Convert float value [0, 1] to quality classes """ res = np.empty(arr.shape, dtype='i1') res[arr >= qthres['bad']] = 4 res[(arr > qthres['worst']) & (arr < qthres['bad'])] = 2 res[arr <= qthres['worst']] = 1 res[~swir_region.mask()] = 0 return res # check image data try: check_data2d('quality', data) except Exception as exc: raise RuntimeError('invalid input-data provided') from exc if isinstance(data, np.ndarray): qval = float_to_quality(qthres, data) else: qval = float_to_quality(qthres, data.value) if ref_data is None: return qval # check reference data try: check_data2d('quality', ref_data) except Exception as exc: raise RuntimeError('invalid reference-data provided') from exc # return difference with reference qdiff = float_to_quality(qthres, ref_data) - qval qval = np.full_like(qdiff, 8) qval[(qdiff == -2) | (qdiff == -3)] = 4 qval[qdiff == 2] = 2 qval[(qdiff == 1) | (qdiff == 3)] = 1 qval[~swir_region.mask()] = 0 return qval # -------------------------------------------------- def draw_signal(self, data_in, ref_data=None, method='data', *, add_medians=True, vperc=None, vrange=None, title=None, sub_title=None, extent=None, fig_info=None): """ Display 2D array data as image and averaged column/row signal plots Parameters ---------- data : numpy.ndarray or pys5p.S5Pmsm Object holding measurement data and attributes ref_data : numpy.ndarray, optional Numpy array holding reference data. Required for method equals 'ratio' where measurement data is divided by the reference 'diff' where reference is subtracted from the measurement data S5Pmsm object holding the reference data as value/error required by the method 'ratio_unc' method : string Method of plot to be generated, default is 'data', optional are 'error', 'diff', 'ratio', 'ratio_unc' add_medians : boolean show in side plots row and column (biweight) medians. Default=True. vperc : list Range to normalize luminance data between percentiles min and max of array data. Default is [1., 99.]. keyword 'vperc' is ignored when vrange is given vrange : list [vmin,vmax] Range to normalize luminance data between vmin and vmax. title : string Title of the figure. Default is None Suggestion: use attribute "title" of data-product sub_title : string Sub-title of the figure. Default is None Suggestion: use attribute "comment" of data-product fig_info : FIGinfo, optional OrderedDict holding meta-data to be displayed in the figure The information provided in the parameter 'fig_info' will be displayed in a small box. In addition, we display the creation date and the data (biweight) median & spread. """ if method not in ('data', 'error', 'diff', 'ratio', 'ratio_unc'): raise RuntimeError('unknown method: {}'.format(method)) if fig_info is None: fig_info = FIGinfo() if vrange is None and vperc is None: vperc = (1., 99.) elif vrange is None: if len(vperc) != 2: raise TypeError('keyword vperc requires two values') else: if len(vrange) != 2: raise TypeError('keyword vrange requires two values') try: img_data = self.__get_fig_data2d(method, data_in, ref_data) except Exception as exc: raise RuntimeError('invalid input-data provided') from exc norm = self.__scale_data2d(method, img_data, vperc, vrange) # inititalize figure fig, ax_fig = plt.subplots(figsize=self.__fig_sz_img(fig_info, img_data.shape)) if title is not None: ypos = 1 - 0.3 / fig.get_figheight() fig.suptitle(title, fontsize='x-large', position=(0.5, ypos), horizontalalignment='center') if sub_title is not None: ax_fig.set_title(sub_title, fontsize='large') # draw image coords = get_fig_coords(data_in) if extent is None: extent = [0, len(coords['X']['data']), 0, len(coords['Y']['data'])] ax_img = ax_fig.imshow(img_data, cmap=self.get_cmap(method), interpolation='none', origin='lower', aspect='equal', extent=extent, norm=norm) self.__add_copyright(ax_fig) # define ticks locations for X & Y valid for most detectors self.__adjust_tickmarks(ax_fig, coords) self.__divider = make_axes_locatable(ax_fig) self.__add_colorbar(ax_img, self.__get_zlabel(method)) if add_medians: self.__add_side_panels(ax_fig, img_data, coords) else: ax_fig.set_xlabel(coords['X']['label']) ax_fig.set_ylabel(coords['Y']['label']) # add annotation and save figure median, spread = biweight(img_data, spread=True) if self.zunit is None or self.zunit == '1': fig_info.add('median', median, '{:.5g}') fig_info.add('spread', spread, '{:.5g}') else: fig_info.add('median', (median, self.zunit), r'{:.5g} {}') fig_info.add('spread', (spread, self.zunit), r'{:.5g} {}') self.__add_fig_box(fig, fig_info) self.__close_this_page(fig) # -------------------------------------------------- def draw_quality(self, data_in, ref_data=None, *, add_medians=True, thres_worst=0.1, thres_bad=0.8, qlabels=None, title=None, sub_title=None, extent=None, fig_info=None): """ Display pixel-quality 2D array data as image and column/row statistics Parameters ---------- data : numpy.ndarray or pys5p.S5Pmsm Object holding measurement data and attributes ref_data : numpy.ndarray, optional Numpy array holding reference data, for example pixel quality reference map taken from the CKD. Shown are the changes with respect to the reference data. Default is None add_medians : boolean show in side plots row and column (biweight) medians. Default=True. thres_worst : float Threshold to reject only the worst of the bad pixels, intended for CKD derivation. Default=0.1 thres_bad : float Threshold for bad pixels. Default=0.8 qlabel : list of strings Labels for the pixel-quality classes, see below title : string Title of the figure. Default is None Suggestion: use attribute "title" of data-product sub_title : string Sub-title of the figure. Default is None Suggestion: use attribute "comment" of data-product fig_info : FIGinfo, optional OrderedDict holding meta-data to be displayed in the figure The quality ranking labels are ['unusable', 'worst', 'bad', 'good'], in case nor reference dataset is provided. Where: - 'unusable' : pixels outside the illuminated region - 'worst' : 0 <= value < thres_worst - 'bad' : 0 <= value < thres_bad - 'good' : thres_bad <= value <= 1 Otherwise the labels for quality ranking indicate which pixels have changed w.r.t. reference. The labels are: - 'unusable' : pixels outside the illuminated region - 'worst' : from good or bad to worst - 'bad' : from good to bad - 'good' : from any rank to good - 'unchanged' : no change in rank The information provided in the parameter 'fig_info' will be displayed in a small box. Where creation date and statistics on the number of bad and worst pixels are displayed. """ if fig_info is None: fig_info = FIGinfo() qthres = {'worst': thres_worst, 'bad': thres_bad, 'compare': ref_data is not None} try: qdata = self.__set_fig_quality(qthres, data_in, ref_data) except Exception as exc: raise RuntimeError('invalid input-data provided') from exc # define colors, data-range cset = tol_cset('bright') if ref_data is None: if qlabels is None: qlabels = ("unusable", "worst", "bad", "good") else: if len(qlabels) != 4: raise TypeError('keyword qlabels requires four labels') # define colors for resp. unusable, worst, bad and good ctuple = (cset.grey, cset.red, cset.yellow, '#FFFFFF') bounds = [0, 1, 2, 4, 8] else: if qlabels is None: qlabels = ["unusable", "to worst", "good to bad ", "to good", "unchanged"] else: if len(qlabels) != 5: raise TypeError('keyword qlabels requires five labels') # define colors for resp. unusable, worst, bad, good and unchanged ctuple = (cset.grey, cset.red, cset.yellow, cset.green, '#FFFFFF') bounds = [0, 1, 2, 4, 8, 16] cmap = mpl.colors.ListedColormap(ctuple) norm = mpl.colors.BoundaryNorm(bounds, cmap.N) # inititalize figure fig, ax_fig = plt.subplots(figsize=self.__fig_sz_img(fig_info, qdata.shape)) if title is not None: ypos = 1 - 0.3 / fig.get_figheight() fig.suptitle(title, fontsize='x-large', position=(0.5, ypos), horizontalalignment='center') if sub_title is not None: ax_fig.set_title(sub_title, fontsize='large') # draw image coords = get_fig_coords(data_in) if extent is None: extent = [0, len(coords['X']['data']), 0, len(coords['Y']['data'])] ax_img = ax_fig.imshow(qdata, cmap=cmap, norm=norm, interpolation='none', origin='lower', aspect='equal', extent=extent) self.__add_copyright(ax_fig) # define ticks locations for X & Y valid for most detectors self.__adjust_tickmarks(ax_fig, coords) self.__divider = make_axes_locatable(ax_fig) self.__add_colorbar(ax_img, qlabels, bounds) if add_medians: self.__add_side_panels(ax_fig, qdata, coords, quality=qthres) else: ax_fig.set_xlabel(coords['X']['label']) ax_fig.set_ylabel(coords['Y']['label']) # add annotation and save figure if ref_data is None: fig_info.add('{} (quality < {})'.format(qlabels[2], thres_bad), np.sum((qdata == 1) | (qdata == 2))) fig_info.add('{} (quality < {})'.format(qlabels[1], thres_worst), np.sum(qdata == 1)) else: fig_info.add(qlabels[3], np.sum(qdata == 4)) fig_info.add(qlabels[2], np.sum(qdata == 2)) fig_info.add(qlabels[1], np.sum(qdata == 1)) self.__add_fig_box(fig, fig_info) self.__close_this_page(fig) # -------------------------------------------------- def draw_cmp_swir(self, data_in, ref_data, *, vperc=None, vrange=None, model_label='reference', add_residual=True, add_model=True, title=None, sub_title=None, extent=None, fig_info=None): """ Display signal vs model (or CKD) comparison in three panels. Top panel shows data, middle panel shows residuals (data - model) and lower panel shows model. Parameters ---------- data : numpy.ndarray or pys5p.S5Pmsm Object holding measurement data and attributes ref_data : numpy.ndarray Numpy array holding reference data. vperc : list Range to normalize luminance data between percentiles min and max of array data. Default is [1., 99.]. keyword 'vperc' is ignored when vrange is given vrange : list [vmin,vmax] Range to normalize luminance data between vmin and vmax. model_label : string Name of reference dataset. Default is 'reference' title : string Title of the figure. Default is None Suggestion: use attribute "title" of data-product sub_title : string Sub-title of the figure. Default is None Suggestion: use attribute "comment" of data-product fig_info : FIGinfo, optional OrderedDict holding meta-data to be displayed in the figure The information provided in the parameter 'fig_info' will be displayed in a small box. In addition, we display the creation date and the data median & spread. Note ---- Current implementation only works for images with aspect-ratio equals 4 (like Tropomi-SWIR). """ if not (add_residual or add_model): raise KeyError('add_resudual or add_model should be true') if fig_info is None: fig_info = FIGinfo() if vrange is None and vperc is None: vperc = (1., 99.) elif vrange is None: if len(vperc) != 2: raise TypeError('keyword vperc requires two values') else: if len(vrange) != 2: raise TypeError('keyword vrange requires two values') try: img_diff = self.__get_fig_data2d('diff', data_in, ref_data) except Exception as exc: raise RuntimeError('invalid input-data provided') from exc median, spread = biweight(img_diff, spread=True) rrange = (median - 5 * spread, median + 5 * spread) rnorm = self.__scale_data2d('diff', img_diff, None, rrange) try: img_data = self.__get_fig_data2d('data', data_in, None) except Exception as exc: raise RuntimeError('invalid input-data provided') from exc vnorm = self.__scale_data2d('data', img_data, vperc, vrange) # inititalize figure npanel = True + add_residual + add_model if npanel == 3: fig = plt.figure(figsize=(12, 10)) else: fig = plt.figure(figsize=(12, 7.5)) gspec = mpl.gridspec.GridSpec(npanel, 2) if title is not None: ypos = 1 - 0.3 / fig.get_figheight() fig.suptitle(title, fontsize='x-large', position=(0.5, ypos), horizontalalignment='center') coords = get_fig_coords(data_in) if extent is None: extent = [0, len(coords['X']['data']), 0, len(coords['Y']['data'])] # create image panel def draw_image(ipanel, data, sub_title, norm, extent): bullet = ['(a) ', '(b) ', '(c) ', None] ax_fig = plt.subplot(gspec[ipanel, :]) if sub_title is not None: ax_fig.set_title(bullet[ipanel] + sub_title, fontsize='large') method = 'diff' if sub_title == 'residual' else 'data' ax_img = ax_fig.imshow(data, cmap=self.get_cmap(method), interpolation='none', origin='lower', aspect='equal', extent=extent, norm=norm,) self.__add_copyright(ax_fig) # define ticks locations for X & Y valid for most detectors self.__adjust_tickmarks(ax_fig, coords) self.__divider = make_axes_locatable(ax_fig) if sub_title == 'residual': self.__add_colorbar(ax_img, self.__get_zlabel('diff')) else: self.__add_colorbar(ax_img, self.__get_zlabel('data')) if npanel == (ipanel + 1): ax_fig.set_xlabel(coords['X']['label']) else: for xtl in ax_fig.get_xticklabels(): xtl.set_visible(False) ax_fig.set_ylabel(coords['Y']['label']) ipanel = 0 draw_image(ipanel, img_data, sub_title, vnorm, extent) if add_residual: ipanel += 1 draw_image(ipanel, img_diff, 'residual', rnorm, extent) if add_model: ipanel += 1 draw_image(ipanel, ref_data, model_label, vnorm, extent) # add annotation and save figure median, spread = biweight(img_diff, spread=True) if self.zunit is None or self.zunit == '1': fig_info.add('median', median, '{:.5g}') fig_info.add('spread', spread, '{:.5g}') else: fig_info.add('median', (median, self.zunit), r'{:.5g} {}') fig_info.add('spread', (spread, self.zunit), r'{:.5g} {}') self.__add_fig_box(fig, fig_info) self.__close_this_page(fig) # -------------------------------------------------- def draw_trend1d(self, msm1, hk_data=None, msm2=None, *, hk_keys=None, title=None, sub_title=None, fig_info=None): """ Display trends of measurement and house-keeping data Parameters ---------- msm1 : pys5p.S5Pmsm, optional Object with measurement data and its HDF5 attributes (first figure) msm2 : pys5p.S5Pmsm, optional Object with measurement data and its HDF5 attributes (second figure) hk_data : pys5p.S5Pmsm, optional Object holding housekeeping data and its HDF5 attributes hk_keys : list or tuple List of housekeeping parameters to be displayed title : string Title of the figure. Default is None Suggestion: use attribute "title" of data-product sub_title : string Sub-title of the figure. Default is None Suggestion: use attribute "comment" of data-product fig_info : FIGinfo, optional OrderedDict holding meta-data to be displayed in the figure You have to provide a non-None value for parameter 'msm1' or 'hk_data'. Only house-keeping data will be shown when 'msm1' is None (parameter 'msm2' is ignored when 'msm1' equals None. The information provided in the parameter 'fig_info' will be displayed in a small box. """ # we require measurement data and/or house-keeping data if msm1 is None and hk_data is None: raise ValueError( 'measurement data and/or house-keeping data are required') if fig_info is None: fig_info = FIGinfo() # make sure that we use 'large' fonts in the small plots if self.__pdf is None: plt.rc('font', size=10) # define number of panels for measurement data if msm1 is None: plot_mode = 'house-keeping' (xlabel,) = hk_data.coords._fields if xlabel == 'time': xlabel += ' [hours]' npanels = 0 elif (msm1.value.dtype.names is not None and 'bad' in msm1.value.dtype.names): plot_mode = 'quality' (xlabel,) = msm1.coords._fields npanels = 2 else: plot_mode = 'data' (xlabel,) = msm1.coords._fields if msm2 is None: npanels = 1 else: npanels = 2 # add panels for housekeeping parameters if hk_data is not None: # default house-keeping parameters if hk_keys is None: if 'detector_temp' in hk_data.value.dtype.names: hk_keys = ('detector_temp', 'grating_temp', 'imager_temp', 'obm_temp') elif 'temp_det4' in hk_data.value.dtype.names: hk_keys = ('temp_det4', 'temp_obm_swir_grating') else: hk_keys = tuple(hk_data.value.dtype.names)[0:4] npanels += len(hk_keys) # initialize matplotlib using 'subplots' figsize = (10., 1 + (npanels + 1) * 1.5) fig, axarr = plt.subplots(npanels, sharex=True, figsize=figsize) if npanels == 1: axarr = [axarr] margin = min(1. / (1.8 * (npanels + 1)), .25) fig.subplots_adjust(bottom=margin, top=1-margin, hspace=0.02) # draw titles (and put it at the same place) if title is not None: ypos = 1 - 0.3 / fig.get_figheight() fig.suptitle(title, fontsize='x-large', position=(0.5, ypos), horizontalalignment='center') if sub_title is not None: axarr[0].set_title(sub_title, fontsize='large') # add figures with quality data or measurement data i_ax = self.__add_data1d(plot_mode, axarr, msm1, msm2) # add figures with house-keeping data if hk_data is not None: self.__add_hkdata(i_ax, axarr, hk_data, hk_keys) axarr[-1].set_xlabel(xlabel) if xlabel == 'time [hours]': minor_locator = MultipleLocator(1) major_locator = MultipleLocator(3) axarr[0].xaxis.set_major_locator(major_locator) axarr[0].xaxis.set_minor_locator(minor_locator) if npanels > 1: plt.setp([a.get_xticklabels() for a in fig.axes[:-1]], visible=False) # add annotation and save figure self.__add_copyright(axarr[-1]) self.__add_fig_box(fig, fig_info) self.__close_this_page(fig) # -------------------------------------------------- def draw_lines(self, xdata, ydata, *, color=0, xlabel=None, ylabel=None, xlim=None, ylim=None, title=None, sub_title=None, fig_info=None, **kwargs): """ Display multiple 1D-data sets sharing the same x-axis. Parameters ---------- xdata : ndarray x-axis data Special case if xdata is None then close figure ydata : ndarray y-axis data color : integer, optional index to color in tol_colors.tol_cset('bright'). Default is zero title : string, optional Title of the figure. Default is None Suggestion: use attribute "title" of data-product sub_title : string, optional Sub-title of the figure. Default is None Suggestion: use attribute "comment" of data-product fig_info : FIGinfo, optional OrderedDict holding meta-data to be displayed in the figure **kwargs : other keywords Pass all other keyword arguments to matplotlib.pyplot.plot() Returns ------- Nothing Examples -------- General example: >>> plot = S5Pplot(fig_name) >>> for ii, xx, yy in enumerate(data_of_each_line): >>> plot.draw_lines(xx, yy, color=ii, label=mylabel[ii], >>> marker='o', linestyle='None') >>> plot.draw_lines(None, None, xlim=[0, 0.5], ylim=[-10, 10], >>> xlabel=my_xlabel, ylabel=my_ylabel) >>> plot.close() Using a time-axis: >>> from datetime import datetime, timedelta >>> tt0 = (datetime(year=2020, month=10, day=1) >>> + timedelta(seconds=sec_in_day)) >>> tt = [tt0 + xx * t_step for xx in range(yy.size)] >>> plot = S5Pplot(fig_name) >>> plot.draw_lines(tt, yy, color=1, label=mylabel, >>> marker='o', linestyle='None') >>> plot.draw_line(None, None, ylim=[-10, 10], >>> xlabel=my_xlabel, ylabel=my_ylabel) >>> plot.close() """ # add annotation and close figure if xdata is None: if self.__mpl is None: raise ValueError('No plot defined and no data provided') if fig_info is None: fig_info = FIGinfo() # finalize figure if self.__mpl['time_axis']: plt.gcf().autofmt_xdate() my_fmt = mpl.dates.DateFormatter('%H:%M:%S') plt.gca().xaxis.set_major_formatter(my_fmt) self.__mpl['axarr'].grid(True) if xlabel is not None: self.__mpl['axarr'].set_xlabel(xlabel) if ylabel is not None: self.__mpl['axarr'].set_ylabel(ylabel) if xlim is not None: self.__mpl['axarr'].set_xlim(xlim) if ylim is not None: self.__mpl['axarr'].set_ylim(ylim) if 'xscale' in kwargs: self.__mpl['axarr'].set_xscale(kwargs['xscale']) if 'yscale' in kwargs: self.__mpl['axarr'].set_ylabel(kwargs['yscale']) # draw titles (and put it at the same place) if title is not None: ypos = 1 - 0.3 / self.__mpl['fig'].get_figheight() self.__mpl['fig'].suptitle(title, fontsize='x-large', position=(0.5, ypos), horizontalalignment='center') if sub_title is not None: self.__mpl['axarr'].set_title(sub_title, fontsize='large') # draw legenda in figure if self.__mpl['axarr'].get_legend_handles_labels()[1]: self.__mpl['axarr'].legend(fontsize='small', loc='best') # draw copyright self.__add_copyright(self.__mpl['axarr']) # close page self.__add_fig_box(self.__mpl['fig'], fig_info) self.__close_this_page(self.__mpl['fig']) self.__mpl = None return # define colors cset = tol_cset('bright') c_max = len(cset) # initialize matplotlib using 'subplots' if self.__mpl is None: if len(xdata) <= 256: figsize = (8, 8) elif 256 > len(xdata) <= 512: figsize = (10, 8) elif 512 > len(xdata) <= 768: figsize = (12, 8) else: figsize = (14, 8) self.__mpl = dict(zip(('fig', 'axarr'), plt.subplots(1, figsize=figsize))) if isinstance(xdata[0], datetime): self.__mpl['time_axis'] = True else: self.__mpl['time_axis'] = False use_steps = False if use_steps: xx = np.append(xdata, xdata[-1]) yy = np.append(ydata, ydata[-1]) self.__mpl['axarr'].step(xx, yy, where='post', color=cset[color % c_max], **kwargs) else: self.__mpl['axarr'].plot(xdata, ydata, color=cset[color % c_max], **kwargs) # -------------------------------------------------- def draw_qhist(self, data_dict, *, title=None, density=True, fig_info=None): """ Display pixel-quality data as histograms. Parameters ---------- data_dict : dict Dictionary containing pixel quality and its submasks title : string Title of the figure. Default is None Suggestion: use attribute "title" of data-product density : bool If True, draw and return a probability density: each bin will display the bin's raw count divided by the total number of counts and the bin width (see matplotlib.pyplot.hist). Default is True fig_info : FIGinfo OrderedDict holding meta-data to be displayed in the figure The information provided in the parameter 'fig_info' will be displayed in a small box. In addition, we display the creation date, signal median & spread and error meadian & spread. """ if fig_info is None: fig_info = FIGinfo() # define colors cset = tol_cset('bright') # initialize matplotlib using 'subplots' npanels = len(data_dict) figsize = (10., 1 + (npanels + 1) * 1.65) fig, axarr = plt.subplots(npanels, sharex=True, figsize=figsize) if npanels == 1: axarr = [axarr] margin = min(1. / (1.8 * (npanels + 1)), .25) fig.subplots_adjust(bottom=margin, top=1-margin, hspace=0.02) # draw titles (and put it at the same place) if title is not None: ypos = 1 - 0.3 / fig.get_figheight() fig.suptitle(title, fontsize='x-large', position=(0.5, ypos), horizontalalignment='center') axarr[0].set_title('Histograms of pixel-quality', fontsize='large') # draw histograms for ii, key in enumerate(data_dict): try: check_data2d('quality', data_dict[key]) except Exception as exc: raise RuntimeError('invalid input-data provided') from exc if isinstance(data_dict[key], np.ndarray): data = data_dict[key][swir_region.mask()] long_name = key else: data = data_dict[key].value[swir_region.mask()] long_name = data_dict[key].long_name data[np.isnan(data)] = 0. axarr[ii].hist(data, bins=11, range=[-.1, 1.], histtype='stepfilled', log=True, density=density, color=cset.blue) # axarr[ii].set_yscale('log', nonpositive='clip') axarr[ii].set_xlim([0, 1]) axarr[ii].set_ylabel('density') axarr[ii].set_ylim([1e-4, 10]) axarr[ii].set_yticks([1e-4, 1e-3, 1e-2, 1e-1, 1]) axarr[ii].grid(which='major', color='#BBBBBB', lw=0.75, ls=(0, (1, 5))) legenda = axarr[ii].legend([blank_legend_key()], [long_name], loc='upper left') legenda.draw_frame(False) axarr[-1].set_xlabel('pixel quality') # add annotation and save figure self.__add_copyright(axarr[-1]) self.__add_fig_box(fig, fig_info) self.__close_this_page(fig) # -------------------------------------------------- def draw_tracks(self, lons, lats, icids, *, saa_region=None, title=None, fig_info=None): """ Display tracks of S5P on a world map using a Robinson projection Parameters ---------- lats : ndarray [N, 2] Latitude coordinates at start and end of measurement lons : ndarray [N, 2] Longitude coordinates at start and end of measurement icids : ndarray [N] ICID of measurements per (lon, lat) saa_region : 'ckd' or ndarray Show SAA region. Its definition obtained from Tropomi Level-1B CKD, or as a matplotlib polygon patch. Default None title : string Title of the figure. Default is None Suggestion: use attribute "title" of data-product fig_info : FIGinfo, optional OrderedDict holding meta-data to be displayed in the figure The information provided in the parameter 'fig_info' will be displayed in a small box. """ if not FOUND_CARTOPY: raise RuntimeError('You need Cartopy to run this function') if fig_info is None: fig_info = FIGinfo() # define colors cset = tol_cset('bright') # define plot layout myproj = ccrs.Robinson(central_longitude=11.5) fig, axx = plt.subplots(figsize=(12.85, 6), subplot_kw={'projection': myproj}) axx.set_global() axx.coastlines(resolution='110m') axx.gridlines() axx.set_title('ground-tracks of Sentinel-5P') if title is not None: ypos = 1 - 0.3 / fig.get_figheight() fig.suptitle(title, fontsize='x-large', position=(0.5, ypos), horizontalalignment='center') # draw SAA region if saa_region is not None: if saa_region in ('ckd', 'CKD'): with CKDio() as ckd: res = ckd.saa() saa_region = list(zip(res['lon'], res['lat'])) saa_poly = mpl.patches.Polygon( xy=saa_region, closed=True, alpha=1.0, facecolor=cset.grey, transform=ccrs.PlateCarree()) axx.add_patch(saa_poly) # draw satellite position(s) icid_found = [] for lon, lat, icid in zip(lons, lats, icids): if icid not in icid_found: indx_color = len(icid_found) else: indx_color = icid_found.index(icid) line, = plt.plot(lon, lat, linestyle='-', linewidth=3, color=cset[indx_color % 6], transform=ccrs.PlateCarree()) if icid not in icid_found: line.set_label('ICID: {}'.format(icid)) icid_found.append(icid) # finalize figure axx.legend(loc='lower left') self.__add_copyright(axx) self.__add_fig_box(fig, fig_info) self.__close_this_page(fig)
bsd-3-clause
idaholab/raven
tests/framework/Samplers/Categorical/StringVars/proj_second.py
2
1667
import numpy as np def run(self,Input): v0 = self.v0 y0 = self.y0 ang = 45.*np.pi/180. times = np.linspace(0,2,5) mode = self.mode if mode == 'stepper': y = stepper(v0,y0,ang,times) elif mode == 'analytic': y = analytic(v0,y0,ang,times) else: raise IOError('Unrecognized mode:',mode) self.y = np.atleast_1d(y) self.t = np.atleast_1d(times) self.restartID = np.array([2]*len(times)) def analytic(v0,y0,ang,times): ys = [] # initial y velocity vy0 = v0[0] * np.sin(ang) for t in times: # calculate analytic height y = y0[0] + vy0*t - 0.5*9.8*t*t # calculate analytic velocity magnitude v = np.sqrt(v0[0]*v0[0] + 2.0*(9.8)*(y0[0]-y)) ys.append(y) return ys def stepper(v0,y0,ang,times): # initial x position x = 0.0 y = y0[0] # initial x,y velocity vx = v0 * np.cos(ang) vy = v0 * np.sin(ang) dt = times[1] - times[0] # tracker ys = [] for _ in times: # store current values #v = np.sqrt(vx*vx + vy*vy) ys.append(y) # update velocity vx = vx vy = vy - 9.8*dt # update position x = x + vx*dt y = y + vy*dt return ys class data: def __init__(self,v0,y0,mode): self.v0 = v0 self.y0 = y0 self.mode = mode self.y = None self.t = None if __name__=='__main__': import matplotlib.pyplot as plt # initialize y0 = 1.0 v0 = 15.0 # test rdata = {} for mode in ['stepper','analytic']: # set up input class dat = data(v0,y0,mode) rdata[mode] = dat # run run(dat,None) # plot plt.plot(dat.t,dat.y,'-o',label=mode) plt.legend(loc=0) plt.xlabel('time') plt.ylabel('height') plt.show()
apache-2.0
platinhom/ManualHom
Coding/Python/scipy-html-0.16.1/tutorial/examples/normdiscr_plot1.py
84
1547
import numpy as np import matplotlib.pyplot as plt from scipy import stats npoints = 20 # number of integer support points of the distribution minus 1 npointsh = npoints / 2 npointsf = float(npoints) nbound = 4 #bounds for the truncated normal normbound = (1 + 1 / npointsf) * nbound #actual bounds of truncated normal grid = np.arange(-npointsh, npointsh+2, 1) #integer grid gridlimitsnorm = (grid-0.5) / npointsh * nbound #bin limits for the truncnorm gridlimits = grid - 0.5 grid = grid[:-1] probs = np.diff(stats.truncnorm.cdf(gridlimitsnorm, -normbound, normbound)) gridint = grid normdiscrete = stats.rv_discrete( values=(gridint, np.round(probs, decimals=7)), name='normdiscrete') n_sample = 500 np.random.seed(87655678) #fix the seed for replicability rvs = normdiscrete.rvs(size=n_sample) rvsnd=rvs f,l = np.histogram(rvs, bins=gridlimits) sfreq = np.vstack([gridint, f, probs*n_sample]).T fs = sfreq[:,1] / float(n_sample) ft = sfreq[:,2] / float(n_sample) nd_std = np.sqrt(normdiscrete.stats(moments='v')) ind = gridint # the x locations for the groups width = 0.35 # the width of the bars plt.subplot(111) rects1 = plt.bar(ind, ft, width, color='b') rects2 = plt.bar(ind+width, fs, width, color='r') normline = plt.plot(ind+width/2.0, stats.norm.pdf(ind, scale=nd_std), color='b') plt.ylabel('Frequency') plt.title('Frequency and Probability of normdiscrete') plt.xticks(ind+width, ind) plt.legend((rects1[0], rects2[0]), ('true', 'sample')) plt.show()
gpl-2.0