prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
""" test the scalar Timestamp """
import pytz
import pytest
import dateutil
import calendar
import locale
import numpy as np
from dateutil.tz import tzutc
from pytz import timezone, utc
from datetime import datetime, timedelta
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas.tseries import offsets
from pandas._libs.tslibs import conversion
from pandas._libs.tslibs.timezones import get_timezone, dateutil_gettz as gettz
from pandas.errors import OutOfBoundsDatetime
from pandas.compat import long, PY3
from pandas.compat.numpy import np_datetime64_compat
from pandas import Timestamp, Period, Timedelta, NaT
class TestTimestampProperties(object):
def test_properties_business(self):
ts = Timestamp('2017-10-01', freq='B')
control = Timestamp('2017-10-01')
assert ts.dayofweek == 6
assert not ts.is_month_start # not a weekday
assert not ts.is_quarter_start # not a weekday
# Control case: non-business is month/qtr start
assert control.is_month_start
assert control.is_quarter_start
ts = Timestamp('2017-09-30', freq='B')
control = Timestamp('2017-09-30')
assert ts.dayofweek == 5
assert not ts.is_month_end # not a weekday
assert not ts.is_quarter_end # not a weekday
# Control case: non-business is month/qtr start
assert control.is_month_end
assert control.is_quarter_end
def test_fields(self):
def check(value, equal):
# that we are int/long like
assert isinstance(value, (int, long))
assert value == equal
# GH 10050
ts = Timestamp('2015-05-10 09:06:03.000100001')
check(ts.year, 2015)
check(ts.month, 5)
check(ts.day, 10)
check(ts.hour, 9)
check(ts.minute, 6)
check(ts.second, 3)
pytest.raises(AttributeError, lambda: ts.millisecond)
check(ts.microsecond, 100)
check(ts.nanosecond, 1)
check(ts.dayofweek, 6)
check(ts.quarter, 2)
check(ts.dayofyear, 130)
check(ts.week, 19)
check(ts.daysinmonth, 31)
check(ts.daysinmonth, 31)
# GH 13303
ts = Timestamp('2014-12-31 23:59:00-05:00', tz='US/Eastern')
check(ts.year, 2014)
check(ts.month, 12)
check(ts.day, 31)
check(ts.hour, 23)
check(ts.minute, 59)
check(ts.second, 0)
pytest.raises(AttributeError, lambda: ts.millisecond)
check(ts.microsecond, 0)
check(ts.nanosecond, 0)
check(ts.dayofweek, 2)
check(ts.quarter, 4)
check(ts.dayofyear, 365)
check(ts.week, 1)
check(ts.daysinmonth, 31)
ts = Timestamp('2014-01-01 00:00:00+01:00')
starts = ['is_month_start', 'is_quarter_start', 'is_year_start']
for start in starts:
assert getattr(ts, start)
ts = Timestamp('2014-12-31 23:59:59+01:00')
ends = ['is_month_end', 'is_year_end', 'is_quarter_end']
for end in ends:
assert getattr(ts, end)
# GH 12806
@pytest.mark.parametrize('data',
[Timestamp('2017-08-28 23:00:00'),
Timestamp('2017-08-28 23:00:00', tz='EST')])
@pytest.mark.parametrize('time_locale', [
None] if tm.get_locales() is None else [None] + tm.get_locales())
def test_names(self, data, time_locale):
# GH 17354
# Test .weekday_name, .day_name(), .month_name
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
assert data.weekday_name == 'Monday'
if time_locale is None:
expected_day = 'Monday'
expected_month = 'August'
else:
with | tm.set_locale(time_locale, locale.LC_TIME) | pandas.util.testing.set_locale |
class pydb():
def __init__(self,seed=None):
"""
Initiates the class and creates a Faker() object for later data generation by other methods
seed: User can set a seed parameter to generate deterministic, non-random output
"""
from faker import Faker
import pandas as pd
from random import randint,choice
self.fake=Faker()
self.seed=seed
self.randnum=randint(1,9)
def simple_ph_num(self,seed=None):
"""
Generates 10 digit US phone number in xxx-xxx-xxxx format
seed: Currently not used. Uses seed from the pydb class if chosen by user
"""
import random
from random import randint,choice
random.seed(self.seed)
result = str(randint(1,9))
for _ in range(2):
result+=str(randint(0,9))
result+='-'
for _ in range(3):
result+=str(randint(0,9))
result+='-'
for _ in range(4):
result+=str(randint(0,9))
return result
def license_plate(self,seed=None,style=None):
"""
Generates vehicle license plate number in 3 possible styles
Style can be 1, 2, or 3.
- 9ABC123 format
- ABC-1234 format
- ABC-123 format
If style is not specified by user, a random style is chosen at runtime
seed: Currently not used. Uses seed from the pydb class if chosen by user
"""
import random
from random import randint,choice
random.seed(self.seed)
if style==None:
style = choice([1,2,3])
if style==1:
result = str(randint(1,9))
for _ in range(3):
result+=chr(randint(65,90))
for _ in range(3):
result+=str(randint(1,9))
return result
elif style==2:
result=''
for _ in range(3):
result+=chr(randint(65,90))
result+='-'
for _ in range(4):
result+=str(randint(0,9))
return result
else:
result=''
for _ in range(3):
result+=chr(randint(65,90))
result+='-'
for _ in range(3):
result+=str(randint(0,9))
return result
def realistic_email(self,name,seed=None):
'''
Generates realistic email from first and last name and a random domain address
seed: Currently not used. Uses seed from the pydb class if chosen by user
'''
import random
import os
from random import randint,choice
random.seed(self.seed)
name=str(name)
result=''
f_name = name.split()[0]
l_name = name.split()[-1]
choice_int = choice(range(10))
dir_path = os.path.dirname(os.path.realpath(__file__))
print(path)
path = dir_path+"\Domains.txt"
domain_list = []
fh = open(path)
for line in fh.readlines():
domain_list.append(str(line).strip())
domain = choice(domain_list)
fh.close()
name_choice = choice(range(8))
if name_choice==0:
name_combo=f_name[0]+l_name
elif name_choice==1:
name_combo=f_name+l_name
elif name_choice==2:
name_combo=f_name+'.'+l_name[0]
elif name_choice==3:
name_combo=f_name+'_'+l_name[0]
elif name_choice==4:
name_combo=f_name+'.'+l_name
elif name_choice==5:
name_combo=f_name+'_'+l_name
elif name_choice==6:
name_combo=l_name+'_'+f_name
elif name_choice==7:
name_combo=l_name+'.'+f_name
if (choice_int<7):
result+=name_combo+'@'+str(domain)
else:
random_int = randint(11,99)
result+=name_combo+str(random_int)+'@'+str(domain)
return result
def city_real(self,seed=None):
'''
Picks and returns a random entry out of 385 US cities
seed: Currently not used. Uses seed from the pydb class if chosen by user
'''
import os
from six import moves
import ssl
import random
from random import randint,choice
random.seed(self.seed)
path = "US_Cities.txt"
if not os.path.isfile(path):
context = ssl._create_unverified_context()
moves.urllib.request.urlretrieve("https://raw.githubusercontent.com/tflearn/tflearn.github.io/master/resources/US_Cities.txt", path)
city_list = []
fh = open(path)
for line in fh.readlines():
city_list.append(str(line).strip())
fh.close()
return (choice(city_list))
def gen_data_series(self,num=10,data_type='name',seed=None):
"""
Returns a pandas series object with the desired number of entries and data type
Data types available:
- Name, country, city, real (US) cities, US state, zipcode, latitude, longitude
- Month, weekday, year, time, date
- Personal email, official email, SSN
- Company, Job title, phone number, license plate
Phone number can be two types:
'phone_number_simple' generates 10 digit US number in xxx-xxx-xxxx format
'phone_number_full' may generate an international number with different format
seed: Currently not used. Uses seed from the pydb class if chosen by user
"""
if type(data_type)!=str:
print("Data type not understood. No series generated")
return None
try:
num=int(num)
except:
print('Number of samples not understood, terminating...')
return None
if num<=0:
print("Please input a positive integer for the number of examples")
return None
else:
import pandas as pd
num=int(num)
fake=self.fake
fake.seed(self.seed)
lst = []
# Name, country, city, real (US) cities, US state, zipcode, latitude, longitude
if data_type=='name':
for _ in range(num):
lst.append(fake.name())
return pd.Series(lst)
if data_type=='country':
for _ in range(num):
lst.append(fake.country())
return pd.Series(lst)
if data_type=='street_address':
for _ in range(num):
lst.append(fake.street_address())
return pd.Series(lst)
if data_type=='city':
for _ in range(num):
lst.append(fake.city())
return pd.Series(lst)
if data_type=='real_city':
for _ in range(num):
lst.append(self.city_real())
return pd.Series(lst)
if data_type=='state':
for _ in range(num):
lst.append(fake.state())
return pd.Series(lst)
if data_type=='zipcode':
for _ in range(num):
lst.append(fake.zipcode())
return pd.Series(lst)
if data_type=='latitude':
for _ in range(num):
lst.append(fake.latitude())
return pd.Series(lst)
if data_type=='longitude':
for _ in range(num):
lst.append(fake.longitude())
return pd.Series(lst)
# Month, weekday, year, time, date
if data_type=='name_month':
for _ in range(num):
lst.append(fake.month_name())
return pd.Series(lst)
if data_type=='weekday':
for _ in range(num):
lst.append(fake.day_of_week())
return pd.Series(lst)
if data_type=='year':
for _ in range(num):
lst.append(fake.year())
return pd.Series(lst)
if data_type=='time':
for _ in range(num):
lst.append(fake.time())
return pd.Series(lst)
if data_type=='date':
for _ in range(num):
lst.append(fake.date())
return pd.Series(lst)
# SSN
if data_type=='ssn':
for _ in range(num):
lst.append(fake.ssn())
return pd.Series(lst)
# Personal, official email
if data_type=='email':
for _ in range(num):
lst.append(fake.email())
return pd.Series(lst)
if data_type=='office_email':
for _ in range(num):
lst.append(fake.company_email())
return pd.Series(lst)
# Company, Job title
if data_type=='company':
for _ in range(num):
lst.append(fake.company())
return pd.Series(lst)
if data_type=='job_title':
for _ in range(num):
lst.append(fake.job())
return pd.Series(lst)
# Phone number, license plate (3 styles)
if data_type=='phone_number_simple':
for _ in range(num):
lst.append(self.simple_ph_num())
return | pd.Series(lst) | pandas.Series |
from __future__ import annotations
import copy
import itertools
from typing import (
TYPE_CHECKING,
Sequence,
cast,
)
import numpy as np
from pandas._libs import (
NaT,
internals as libinternals,
)
from pandas._libs.missing import NA
from pandas._typing import (
ArrayLike,
DtypeObj,
Manager,
Shape,
)
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.cast import (
ensure_dtype_can_hold_na,
find_common_type,
)
from pandas.core.dtypes.common import (
is_1d_only_ea_dtype,
is_1d_only_ea_obj,
is_datetime64tz_dtype,
is_dtype_equal,
needs_i8_conversion,
)
from pandas.core.dtypes.concat import (
cast_to_common_type,
concat_compat,
)
from pandas.core.dtypes.dtypes import ExtensionDtype
from pandas.core.dtypes.missing import (
is_valid_na_for_dtype,
isna_all,
)
import pandas.core.algorithms as algos
from pandas.core.arrays import (
DatetimeArray,
ExtensionArray,
)
from pandas.core.arrays.sparse import SparseDtype
from pandas.core.construction import ensure_wrapped_if_datetimelike
from pandas.core.internals.array_manager import (
ArrayManager,
NullArrayProxy,
)
from pandas.core.internals.blocks import (
ensure_block_shape,
new_block,
)
from pandas.core.internals.managers import BlockManager
if TYPE_CHECKING:
from pandas import Index
def _concatenate_array_managers(
mgrs_indexers, axes: list[Index], concat_axis: int, copy: bool
) -> Manager:
"""
Concatenate array managers into one.
Parameters
----------
mgrs_indexers : list of (ArrayManager, {axis: indexer,...}) tuples
axes : list of Index
concat_axis : int
copy : bool
Returns
-------
ArrayManager
"""
# reindex all arrays
mgrs = []
for mgr, indexers in mgrs_indexers:
for ax, indexer in indexers.items():
mgr = mgr.reindex_indexer(
axes[ax], indexer, axis=ax, allow_dups=True, use_na_proxy=True
)
mgrs.append(mgr)
if concat_axis == 1:
# concatting along the rows -> concat the reindexed arrays
# TODO(ArrayManager) doesn't yet preserve the correct dtype
arrays = [
concat_arrays([mgrs[i].arrays[j] for i in range(len(mgrs))])
for j in range(len(mgrs[0].arrays))
]
else:
# concatting along the columns -> combine reindexed arrays in a single manager
assert concat_axis == 0
arrays = list(itertools.chain.from_iterable([mgr.arrays for mgr in mgrs]))
if copy:
arrays = [x.copy() for x in arrays]
new_mgr = ArrayManager(arrays, [axes[1], axes[0]], verify_integrity=False)
return new_mgr
def concat_arrays(to_concat: list) -> ArrayLike:
"""
Alternative for concat_compat but specialized for use in the ArrayManager.
Differences: only deals with 1D arrays (no axis keyword), assumes
ensure_wrapped_if_datetimelike and does not skip empty arrays to determine
the dtype.
In addition ensures that all NullArrayProxies get replaced with actual
arrays.
Parameters
----------
to_concat : list of arrays
Returns
-------
np.ndarray or ExtensionArray
"""
# ignore the all-NA proxies to determine the resulting dtype
to_concat_no_proxy = [x for x in to_concat if not isinstance(x, NullArrayProxy)]
dtypes = {x.dtype for x in to_concat_no_proxy}
single_dtype = len(dtypes) == 1
if single_dtype:
target_dtype = to_concat_no_proxy[0].dtype
elif all(x.kind in ["i", "u", "b"] and isinstance(x, np.dtype) for x in dtypes):
# GH#42092
target_dtype = np.find_common_type(list(dtypes), [])
else:
target_dtype = | find_common_type([arr.dtype for arr in to_concat_no_proxy]) | pandas.core.dtypes.cast.find_common_type |
"""Age prediction using MRI, fMRI and MEG data."""
# Author: <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
import os.path as op
import numpy as np
import pandas as pd
from sklearn.dummy import DummyRegressor
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from joblib import Memory
from camcan.processing import map_tangent
##############################################################################
# Paths
DRAGO_PATH = '/storage/inria/agramfor/camcan_derivatives'
OLEH_PATH = '/storage/tompouce/okozynet/projects/camcan_analysis/data'
PANDAS_OUT_FILE = './data/age_prediction_exp_data_denis_{}-rep.h5'
STRUCTURAL_DATA = f'{OLEH_PATH}/structural/structural_data.h5'
CONNECT_DATA_CORR = f'{OLEH_PATH}/connectivity/connect_data_correlation.h5'
CONNECT_DATA_TAN = f'{OLEH_PATH}/connectivity/connect_data_tangent.h5'
MEG_EXTRA_DATA = './data/meg_extra_data.h5'
MEG_PEAKS = './data/evoked_peaks.csv'
MEG_PEAKS2 = './data/evoked_peaks_task_audvis.csv'
##############################################################################
# Control paramaters
# common subjects 574
N_REPEATS = 10
N_JOBS = 10
N_THREADS = 6
REDUCE_TO_COMMON_SUBJECTS = False
memory = Memory(location=DRAGO_PATH)
##############################################################################
# MEG features
#
# 1. Marginal Power
# 2. Cross-Power
# 3. Envelope Power
# 4. Envelope Cross-Power
# 5. Envelope Connectivity
# 6. Envelope Orthogonalized Connectivity
# 7. 1/f
# 8. Alpha peak
# 9. ERF delay
FREQ_BANDS = ('alpha',
'beta_high',
'beta_low',
'delta',
'gamma_high',
'gamma_lo',
'gamma_mid',
'low',
'theta')
meg_source_types = (
'mne_power_diag',
'mne_power_cross',
'mne_envelope_diag',
'mne_envelope_cross',
'mne_envelope_corr',
'mne_envelope_corr_orth'
)
def vec_to_sym(data, n_rows, skip_diag=True):
"""Put vector back in matrix form"""
if skip_diag:
k = 1
# This is usually true as we write explicitly
# the diag info in asecond step and we only
# store the upper triangle, hence all files
# have equal size.
else:
k = 0
C = np.zeros((n_rows, n_rows), dtype=np.float64)
C[np.triu_indices(n=n_rows, k=k)] = data
C += C.T
if not skip_diag:
C.flat[::n_rows + 1] = np.diag(C) / 2.
return C
def make_covs(diag, data, n_labels):
if not np.isscalar(diag):
assert np.all(diag.index == data.index)
covs = np.empty(shape=(len(data), n_labels, n_labels))
for ii, this_cross in enumerate(data.values):
C = vec_to_sym(this_cross, n_labels)
if np.isscalar(diag):
this_diag = diag
else:
this_diag = diag.values[ii]
C.flat[::n_labels + 1] = this_diag
covs[ii] = C
return covs
@memory.cache
def read_meg_rest_data(kind, band, n_labels=448):
"""Read the resting state data (600 subjects)
Read connectivity outptus and do some additional
preprocessing.
Parameters
----------
kind : str
The type of MEG feature.
band : str
The frequency band.
n_label: int
The number of ROIs in source space.
"""
if kind == 'mne_power_diag':
data = pd.read_hdf(
op.join(DRAGO_PATH, f'mne_source_power_diag-{band}.h5'),
key=kind)
elif kind == 'mne_power_cross':
# We need the diagonal powers to do tangent mapping.
# but then we will discard it.
diag = read_meg_rest_data(kind='mne_power_diag', band=band)
# undp log10
diag = diag.transform(lambda x: 10 ** x)
index = diag.index.copy()
data = pd.read_hdf(
op.join(DRAGO_PATH, f'mne_source_power_cross-{band}.h5'),
key=kind)
covs = make_covs(diag, data, n_labels)
data = map_tangent(covs, diag=True)
data = pd.DataFrame(data=data, index=index)
if kind == 'mne_envelope_diag':
data = pd.read_hdf(
op.join(DRAGO_PATH, f'mne_envelopes_diag_{band}.h5'),
key=kind)
elif kind == 'mne_envelope_cross':
# We need the diagonal powers to do tangent mapping.
# but then we will discard it.
diag = read_meg_rest_data(kind='mne_envelope_diag', band=band)
# undp log10
diag = diag.transform(lambda x: 10 ** x)
index = diag.index.copy()
data = pd.read_hdf(
op.join(DRAGO_PATH, f'mne_envelopes_cross_{band}.h5'),
key=kind)
covs = make_covs(diag, data, n_labels)
data = map_tangent(covs, diag=True)
data = | pd.DataFrame(data=data, index=index) | pandas.DataFrame |
import sys
sys.path.append('~/combs/src/')
import combs
import pandas as pd
ifg_dict = {'ASN': 'CB CG OD1 ND2'}
csv_path = 'path_to_asn_comb_csv_file'
an = combs.analyze.Analyze(csv_path)
dist_vdms = an.get_distant_vdms(7)
dist_vdms_hbond = | pd.merge(dist_vdms, an.ifg_hbond_vdm, on=['iFG_count', 'vdM_count']) | pandas.merge |
# -*- coding: utf-8 -*-
"""
Creates textual features from an intput paragraph
"""
# Load Packages
import textstat
from sklearn.preprocessing import label_binarize
from sklearn.decomposition import PCA
import numpy as np
import pandas as pd
import pkg_resources
import ast
import spacy
#from collections import Counter
from pyphen import Pyphen
import pickle
#import xgboost
# lead the language model from spay. this must be downloaded
nlp = spacy.load('en_core_web_md')
pyphen_dic = Pyphen(lang='en')
# set word lists to be used
## This corpus comes from the Cambridge English Corpus of spoken English and includes
## all the NGSL and SUP words needed to get 90% coverage.
NGSL_wordlist = set([
ln.decode('utf-8').strip() for ln in
pkg_resources.resource_stream('financial_readability', 'word_lists/NGSL_wordlist.txt')
])
## The Business Service List 1.0, also known as the BSL (<NAME>. & Culligan, B., 2016) is a list of approximately 1700 words
## that occur with very high frequency within the domain of general business English. Based on a 64.5 million word corpus of business
## texts, newspapers, journals and websites, the BSL 1.0 version gives approximately 97% coverage of general business English materials
## when learned in combination with the 2800 words of core general English in the New General Service List or NGSL (<NAME>., Culligan, B., and Phillips, J. 2013)
BSL_wordlist = set([
ln.decode('utf-8').strip() for ln in
pkg_resources.resource_stream('financial_readability', 'word_lists/BSL_wordlist.txt')
])
## New Academic Word List (NAWL): The NAWL is based on a carefully selected academic corpus of 288 million words.
NAWL_wordlist = set([
ln.decode('utf-8').strip() for ln in
pkg_resources.resource_stream('financial_readability', 'word_lists/NAWL_wordlist.txt')
])
## Load tf_idf score list
idf_list = list([
ln.decode('utf-8').strip() for ln in
pkg_resources.resource_stream('financial_readability', 'word_lists/dict_idf.txt')
])
idf_dict = ast.literal_eval(idf_list[0])
## Load the BOFIR model
with pkg_resources.resource_stream('financial_readability', 'models/bofir_model_5c.pickle.dat') as f:
bofir_model_5c = pickle.load(f)
with pkg_resources.resource_stream('financial_readability', 'models/bofir_model_3c.pickle.dat') as f:
bofir_model_3c = pickle.load(f)
#bofir_model_5c = pickle.load(open("bofir_model_5c.pickle.dat", "rb"))
#bofir_model_3c = pickle.load(open("bofir_model_3c.pickle.dat", "rb"))
#%%
# Create Features
class readability:
"""
Creates various text features for a paragraph
Methods
-------
syl_count(word=None)
Counts the number of syllables for a given word
linguistic_features(as_dict = False)
Returns the tokens and their linguistic features based on the spacy doc container
pos_onehot():
Creates the POS tag per token in a one-hot encoding.
dep_onehot():
Creates the dependency tag per token in a one-hot encoding.
wordlist_features(as_dict=False):
Creates word features based on word lists and the calculated tf-idf scores.
other_features(as_dict=False):
Returns dummies for the remaining spacy word features
classic_features(as_dict=False):
Returns the classic word features
check_word_list(token, word_list='NGSL'):
Function to check if token exists in specific word list.
check_tfidf(token):
Function to check if token exists in tf_idf list and return idf score.
tree_features(as_dict=False):
Function to create the tree based features.
semantic_features(as_dict=False):
Function to calculate the cumulative explained variance for PCA on the word embeddings.
word_features(embeddings = False):
Combines the featuresets to a Dataframe with all the 88 word-features.
paragraph_features(embed = False, as_dict = False):
Create the feature set over the total paragraph based on the
features estimated per word.
bofir(cat5 = True):
Use the paragraph features to calculate the BOFIR score for a
given paragraph.
readability_measures(as_dict = False):
Return the BOFIR score as well as other classic readability formulas for the paragraph.
"""
def __init__(self, paragraph):
self.paragraph = paragraph
# create the standard readability measures
self.flesch = textstat.flesch_reading_ease(self.paragraph)
# create a spacy doc container
self.doc = nlp(paragraph)
# Spacy text variables
self.token = [token.text for token in self.doc]
self.sent = [sentence.text for sentence in self.doc.sents]
self.lenght = [len(token.text) for token in self.doc]
self.lemma = [token.lemma_ for token in self.doc]
self.pos = [token.pos_ for token in self.doc]
self.tag = [token.tag_ for token in self.doc]
self.dep = [token.dep_ for token in self.doc]
self.like_email = [token.like_email for token in self.doc]
self.like_url = [token.like_url for token in self.doc]
self.is_alpha = [token.is_alpha for token in self.doc]
self.is_stop = [token.is_stop for token in self.doc]
self.ent_type = [token.ent_type_ for token in self.doc]
self.ent_pos = [token.ent_iob_ for token in self.doc]
self.word_vectors = [token.vector for token in self.doc]
self.vector_norm = [token.vector_norm for token in self.doc]
self.is_oov = [token.is_oov for token in self.doc]
# lexical chain - dependencies of words:
self.subtree_lenght = [len(list(token.subtree)) for token in self.doc]
self.n_left = [len(list(token.lefts)) for token in self.doc]
self.n_right = [len(list(token.rights)) for token in self.doc]
self.ancestors = [len(list(token.ancestors)) for token in self.doc]
self.children = [len(list(token.children)) for token in self.doc]
# count syllables per token
self.syllables = [self.syl_count(token.text) for token in self.doc]
# number of sentences and tokens
self.n_sentences = len(self.sent)
self.n_tokens = len(self.token)
def syl_count(self, word):
"""
Counts the number of syllables for a given word
Parameters
----------
word : str
The token to be analyzed
Returns
-------
count: integer
The number of syllables
"""
count = 0
split_word = pyphen_dic.inserted(word.lower())
count += max(1, split_word.count("-") + 1)
return count
def linguistic_features(self, as_dict = False):
"""
Function that returns the tokens and their linguistic features based on the spacy doc container
doc: spacy doc input
Parameters
----------
as_dict : boolean
Defines if output is a dataframe or dict
Returns
-------
d: pandas DataFrame
Dataframe with all the Output Variables
Output Variables:
-------
Text: The original word text.
Lemma: The base form of the word.
POS: The simple part-of-speech tag.
Tag: The detailed part-of-speech tag.
Dep: Syntactic dependency, i.e. the relation between tokens.
like_email: Does the token resemble an email address?
is_alpha: Does the token consist of alphabetic characters?
is stop: Is the token part of a stop list, i.e. the most common words of the language?
ent_type: Named entity type
ent_pos: IOB code of named entity tag.
vector_norm: The L2 norm of the token’s vector (the square root of
the sum of the values squared)
is_oov: Out-of-vocabulary
lexical chain variables determine the dependency tree:
subtree_lenght: total number of suptrees
n_left: number of connections left
n_left: number of connections right
ancestors: number of nodes above
children: number of nodes below
syllables: number of syllables (only for words found in the dictionary)
"""
d = {'token':self.token,'lenght':self.lenght,'lemma':self.lemma,
'pos':self.pos,'tag':self.tag,
'dep':self.dep,'like_email':self.like_email,'like_url':self.like_url,
'stop':self.is_stop, 'alpha':self.is_alpha,
'ent_type':self.ent_type,'ent_pos':self.ent_pos,
'vector_norm':self.vector_norm,'oov':self.is_oov,
'subtree_lenght':self.subtree_lenght, 'n_left':self.n_left,
'n_right':self.n_right,'ancestors':self.ancestors,
'children':self.children,'syllables': self.syllables}
if as_dict:
return d
else:
return pd.DataFrame(d)
def pos_onehot(self):
"""
Creates the POS tag per token in a one-hot encoding. (To be agregated
over the paragraph or used as input into a RNN.)
Returns
-------
d: pandas DataFrame
Dataframe with all the Output Variables
Output Variables:
-------
ADJ adjective
ADP adposition
ADV adverb
AUX auxiliary
CONJ conjunction
CCONJ coordinating conjunction
DET determiner
INTJ interjection
NOUN noun
NUM numeral
PART particle
PRON pronoun
PROPN proper noun
PUNCT punctuation
SCONJ subordinating conjunction
SYM symbol
VERB verb
X other
SPACE space
"""
pos_tags_classes = ['ADJ', 'ADP', 'ADV','AUX', 'CONJ', 'CCONJ', 'DET',
'INTJ', 'JJS', 'NOUN', 'NUM', 'PART',
'PRON', 'PROPN', 'PUNCT', 'SCONJ', 'SYM', 'VERB',
'X', 'SPACE']
pos_tag_data = self.pos
# one hot encoding of the different POS tags
x = label_binarize(pos_tag_data, classes=pos_tags_classes)
output = pd.DataFrame(x, columns=pos_tags_classes)
return output
def dep_onehot(self):
"""
Creates the dependency tag per token in a one-hot encoding.
Returns
-------
d: pandas DataFrame
Dataframe with all the Output Variables
Output Variables:
-------
acl clausal modifier of noun (adjectival clause)
acomp adjectival complement
advcl adverbial clause modifier
advmod adverbial modifier
agent agent
amod adjectival modifier
appos appositional modifier
attr attribute
aux auxiliary
auxpass auxiliary (passive)
case case marking
cc coordinating conjunction
ccomp clausal complement
compound compound
conj conjunct
cop copula
csubj clausal subject
csubjpass clausal subject (passive)
dative dative
dep unclassified dependent
det determiner
dobj direct object
expl expletive
intj interjection
mark marker
meta meta modifier
neg negation modifier
nn noun compound modifier
nounmod modifier of nominal
npmod noun phrase as adverbial modifier
nsubj nominal subject
nsubjpass nominal subject (passive)
nummod numeric modifier
oprd object predicate
obj object
obl oblique nominal
parataxis parataxis
pcomp complement of preposition
pobj object of preposition
poss possession modifier
preconj pre-correlative conjunction
prep prepositional modifier
prt particle
punct punctuation
quantmod modifier of quantifier
relcl relative clause modifier
root root
xcomp open clausal complement
"""
dep_tags_classes = ['acl', 'acomp', 'advcl','advmod', 'agent', 'amod',
'appos', 'attr', 'aux', 'auxpass', 'case', 'cc',
'ccomp', 'compound', 'conj', 'cop', 'csubj', 'csubjpass',
'dative', 'dep','det', 'dobj', 'expl',
'intj', 'mark', 'meta', 'neg', 'nn', 'nounmod', 'npmod',
'nsubj','nsubjpass', 'nummod', 'oprd',
'obj', 'obl', 'parataxis', 'pcomp', 'pobj', 'poss', 'preconj',
'prep','prt', 'punct', 'quantmod',
'relcl','root', 'xcomp']
# one hot encoding of the different DEP tags
x = label_binarize(self.dep, classes=dep_tags_classes)
output = pd.DataFrame(x, columns=dep_tags_classes)
return output
def wordlist_features(self, as_dict=False):
"""
Creates word features based on word lists and the calculated tf-idf scores.
Parameters
----------
as_dict : boolean
Defines if output is a dataframe or dict
Returns
-------
d: pandas DataFrame
Dataframe with all the Output Variables
"""
NGSL = [self.check_word_list(token.lower(), word_list='NGSL') for token in self.token]
BSL = [self.check_word_list(token.lower(), word_list='BSL') for token in self.token]
NAWL = [self.check_word_list(token.lower(), word_list='NAWL') for token in self.token]
idf = [self.check_tfidf(token.lower()) for token in self.token]
d = {'ngsl': NGSL,'bsl': BSL,'nawl': NAWL, 'idf': idf}
if as_dict:
return d
else:
return pd.DataFrame(d)
def other_features(self, as_dict=False):
"""
Returns dummies for the remaining spacy word features
Parameters
----------
as_dict : boolean
Defines if output is a dataframe or dict
Returns
-------
d: pandas DataFrame
Dataframe with all the Output Variables
"""
# the dummy variables
is_entity = [1 if token != 'O' else 0 for token in self.ent_pos]
like_email = [1 if token == True else 0 for token in self.like_email]
like_url = [1 if token == True else 0 for token in self.like_url]
is_stop = [1 if token == True else 0 for token in self.is_stop]
is_alpha = [1 if token == True else 0 for token in self.is_alpha]
is_oov = [1 if token == True else 0 for token in self.is_oov]
d = {'is_entity': is_entity,'like_email': like_email,'like_url': like_url,
'is_stop': is_stop, 'is_alpha': is_alpha, 'is_oov': is_oov,
'vector_norm':self.vector_norm}
if as_dict:
return d
else:
return pd.DataFrame(d)
def classic_features(self, as_dict=False):
"""
Returns the classic word features
Parameters
----------
as_dict : boolean
Defines if output is a dataframe or dict
Returns
-------
d: pandas DataFrame
Dataframe with all the Output Variables
"""
large_words = [1 if syl >= 3 else 0 for syl in self.syllables]
polsyll = [1 if syl > 1 else 0 for syl in self.syllables]
# the dummy variables
d = {'syllables': self.syllables, 'large_word': large_words,
'polsyll':polsyll, 'lenght':self.lenght}
if as_dict:
return d
else:
return pd.DataFrame(d)
def check_word_list(self, token, word_list='NGSL'):
"""
Function to check if token exists in specific word list.
Parameters
----------
token : str
The token to be analyzed
word_list : str
Defines the wordlist to be considered (NGSL, BSL or NAWL) if nothing
is specified, NAWL is considered
Returns
-------
x: integer
Dummy (0 or 1) if word is in the specified word list
"""
if word_list=='NGSL':
word_set = NGSL_wordlist
elif word_list=='BSL':
word_set = BSL_wordlist
else:
word_set = NAWL_wordlist
if token not in word_set:
x = 0
else:
x=1
return x
def check_tfidf(self, token):
"""
Function to check if token exists in tf_idf list and return idf score.
Parameters
----------
token : str
The token to be analyzed
Returns
-------
value: integer
IDF value
"""
value = idf_dict.get(token, 0)
return value
def tree_features(self, as_dict=False):
"""
Function to create the tree based features.
Parameters
----------
as_dict : boolean
Defines if output is a dataframe or dict
Returns
-------
d: pandas DataFrame
Dataframe with all the Output Variables
Output Variables
-------
subtree_lenght
n_left
n_right
ancestors
children
"""
# lexical chain - dependencies of words:
self.subtree_lenght = [len(list(token.subtree)) for token in self.doc]
self.n_left = [len(list(token.lefts)) for token in self.doc]
self.n_right = [len(list(token.rights)) for token in self.doc]
self.ancestors = [len(list(token.ancestors)) for token in self.doc]
self.children = [len(list(token.children)) for token in self.doc]
d = {'subtree_lenght':self.subtree_lenght, 'n_left':self.n_left,'n_right':self.n_right,
'ancestors':self.ancestors,'children':self.children}
if as_dict:
return d
else:
return pd.DataFrame(d)
def semantic_features(self, as_dict=False):
"""
Function to calculate the cumulative explained variance for PCA on the word embeddings.
Parameters
----------
as_dict : boolean
Defines if output is a dataframe or dict
Returns
-------
d: pandas DataFrame
Dataframe with all the Output Variables
Output Variables
-------
wordvec_1pc
wordvec_3pc
wordvec_10pc
"""
pca = PCA()
pca.fit(self.word_vectors)
explained_var = pd.DataFrame(pca.explained_variance_ratio_, columns=['expl_var'])
wordvec_1pc = np.sum(explained_var.iloc[0])
wordvec_3pc = np.sum(explained_var.iloc[0:3])
wordvec_10pc = np.sum(explained_var.iloc[0:10])
d = {'wordvec_1pc':wordvec_1pc,'wordvec_3pc':wordvec_3pc,'wordvec_10pc':wordvec_10pc}
if as_dict:
return d
else:
return pd.DataFrame(d)
def word_features(self, embeddings = False):
"""
Combines the featuresets to a Dataframe with
all the 88 word-features.
Parameters
----------
embeddings : boolean
Defines if the word embeddings (n=300) are included or not
Returns
-------
d: pandas DataFrame
Dataframe with all the Output Variables. Each row represents a token
and the features are in the columns (n x 88) as there are 88 word-features
"""
classic_features = self.classic_features()
pos_features = self.pos_onehot()
dep_features = self.dep_onehot()
wordlist_features = self.wordlist_features()
other_features = self.other_features()
tree_features = self.tree_features()
if embeddings:
nameslist = ["V{:02d}".format(x+1) for x in range(300)]
word_embeddings = pd.DataFrame(self.word_vectors, columns = nameslist)
return pd.concat([classic_features,pos_features,dep_features, wordlist_features,
other_features,tree_features,word_embeddings], axis=1)
else:
return pd.concat([classic_features, pos_features,dep_features, wordlist_features,
other_features,tree_features], axis=1)
def paragraph_features(self, embed = False, as_dict = False):
"""
Create the feature set over the total paragraph based on the
features estimated per word.
Parameters
----------
embed : boolean
Defines if the word embeddings (n=300) are included or not
as_dict : boolean
Defines if output is a dataframe or dict
Returns
-------
d: pandas DataFrame
Dataframe with all the Output Variables. Each row represents a feature.
columns:
cat: feature category
value: value of the feature
"""
# word embeddings
word_embeddings_raw = pd.DataFrame(self.word_vectors, columns = ["V{:02d}".format(x+1) for x in range(300)])
# create all datasets with the mean word values
classic_features = pd.DataFrame(self.classic_features().mean(), columns= ['value'])
classic_features['cat'] = 'classic'
dep_features = pd.DataFrame(self.dep_onehot().mean(), columns= ['value'])
dep_features['cat'] = 'dep'
wordlist_features = pd.DataFrame(self.wordlist_features().mean(), columns= ['value'])
wordlist_features['cat'] = 'classic'
pos_features = pd.DataFrame(self.pos_onehot().mean(), columns= ['value'])
pos_features['cat'] = 'pos'
tree_features = pd.DataFrame(self.tree_features().mean(), columns= ['value'])
tree_features['cat'] = 'tree'
other_features = pd.DataFrame(self.other_features().mean(), columns= ['value'])
other_features['cat'] = 'classic'
semantic_features = pd.DataFrame(self.semantic_features().mean(), columns= ['value'])
semantic_features['cat'] = 'semantic'
word_embeddings = pd.DataFrame(word_embeddings_raw.mean(), columns= ['value'])
word_embeddings['cat'] = 'embeddings'
if embed:
temp_df = pd.concat([classic_features, dep_features, wordlist_features, pos_features, other_features,
tree_features,semantic_features, word_embeddings], axis=0)
else:
temp_df = pd.concat([classic_features, dep_features, wordlist_features, pos_features, other_features,
tree_features,semantic_features], axis=0)
temp_df['var'] = temp_df.index
# add standard features that are not based on word features
paragraph_features = pd.DataFrame(columns=['var','value', 'cat'])
paragraph_features.loc[0] = ['n_sentences'] + [self.n_sentences] + ['classic']
paragraph_features.loc[1] = ['sent_lenght'] +[self.n_tokens/self.n_sentences] + ['classic']
paragraph_features.loc[3] = ['n_tokens'] +[self.n_tokens] + ['classic']
# add the entitiy based features (in addition to the percentage of entities)
paragraph_features.loc[4] = ['n_entities'] + [self.other_features()['is_entity'].sum()] + ['entity']
paragraph_features.loc[5] = ['ent_per_sent'] + [self.other_features()['is_entity'].sum() /self.n_sentences ] + ['entity']
# additional dependency tree features
paragraph_features.loc[6] = ['max_treelenght'] + [self.tree_features()['subtree_lenght'].max()] + ['tree']
paragraph_features.loc[7] = ['q80_treelenght'] + [self.tree_features()['subtree_lenght'].quantile(.8)] + ['tree']
paragraph_features.loc[8] = ['var_treelenght'] + [self.tree_features()['subtree_lenght'].var()] + ['tree']
full_df = | pd.concat([temp_df,paragraph_features], axis=0, sort=True) | pandas.concat |
#!/usr/bin/env python
import math
import numpy as np
import pandas as pd
import random
import string
from scipy.stats import zipf
from itertools import chain
import json
class contentCatalogue():
def __init__(self, size=1000):
'''
Assigns the size and constructs an empty list of contents. Constructs
an empty list for popularities of contents (probabilities). Constructs an
empty content matrix as a list.
Input: contents: list, popularity: list, contentMatrix: list, size: int
'''
self.size = size
self.contents = list()
self.popularity = list()
self.contentMatrix = list()
def characteristics(self):
'''
Output: returns specs of content catalogue
'''
return 'Content Catalogue Size: {}\nContent Catalogue Popularity:\n{}\nContent Catalogue:\n{}\nContent Catalogue Relations:\n{}'.format(self.size, self.popularity, self.contents, self.contentMatrix)
def randomSingleContentGenerator(self, stringLength=8):
"""Generate a random string of letters and digits """
lettersAndDigits = string.ascii_letters + string.digits
return ''.join(random.choice(lettersAndDigits) for i in range(stringLength))
def randomMultipleContentsGenerator(self):
"""Generate a list of random strings of letters and digits """
contents = list()
for i in range(0, self.size):
contents.append(self.randomSingleContentGenerator())
assert(len(contents) == self.size)
return contents
def getNrandomElements(self, list, N):
'''
Returns random elements from a list
Input: list and num of items to be returned
Output: list of N random items
'''
return random.sample(list, N)
def getContents(self):
'''
Output: returns contents as a list
'''
return self.contents
def getContentsLength(self):
'''
Output: returns contents size
'''
return len(self.contents)
def zipf_pmf(self, lib_size, expn):
'''
Returns a probability mass function (list of probabilities summing to 1.0) for the Zipf distribution with the given size "lib_size" and exponent "expn"
Input: size of content catalogue, exponent
Output: list of probabilities that sum up to 1
'''
K = lib_size
p0 = np.zeros([K])
for i in range(K):
p0[i] = ((i+1)**(-expn))/np.sum(np.power(range(1, K+1), -expn))
return p0
def setContentsPopularity(self, distribution='zipf', a=0.78):
'''
Sets the popularity of contents given a distribution (zipf by default) .
Input: distribution, exponent
Output: vector of probabilities that correspond to the content catalogue
'''
if distribution == 'zipf':
prob = self.zipf_pmf(self.getContentsLength(), a)
return prob
else:
raise Exception('Distribution \'' + distribution +
'\' not implemented yet')
def initializeContentCatalogue(self, contents):
'''
Set the content catalogue for the first time in a list format
Input: strings in a list
'''
self.contents = contents
self.popularity = self.setContentsPopularity()
def symmetrize(self, a):
'''
Forces symmetricity in a content matrix
Input: a matrix
Output: a symmetric matrix provided from the original
'''
return np.tril(a) + np.tril(a, -1).T
def createRandomContentMatrixBinary(self, symmetric=True, numOfRelations=10, outputForm='dataframe'):
'''
TODO: Fix commentary in this piece of code
'''
numOfContents = self.getContentsLength()
contentMatrix = np.zeros((numOfContents, numOfContents))
idx = np.random.rand(numOfContents, numOfContents).argsort(1)[
:, :numOfRelations]
contentMatrix[np.arange(numOfContents)[:, None], idx] = 1
print(contentMatrix)
# if symmetric:
# contentMatrix = self.symmetrize(contentMatrix)
# print(contentMatrix)
# for row in contentMatrix:
# if(len(row) != numOfRelations):
# print(row)
# # print(contentMatrix)
# for i in range(numOfContents):
# for j in range(numOfContents):
# if i == j and contentMatrix[i][j] == 1:
# indexesOfZeros = np.argwhere(
# contentMatrix[i] == 0).tolist()
# contentMatrix[i][j] = 0
# for i in range(numOfContents):
# for j in range(numOfContents):
# # print(i, j)
# indexesOfOnesCurrentNodeRow = np.argwhere(
# contentMatrix[i] == 1).tolist()
# # print(len(indexesOfOnesCurrentNodeRow))
# while len(indexesOfOnesCurrentNodeRow) < numOfRelations:
# randomChoiceOfIndex = random.choice(
# indexesOfOnesCurrentNodeRow)[0]
# indexesOfOnesRelatedNodesRow = np.argwhere(
# contentMatrix[randomChoiceOfIndex] == 1).tolist()
# if len(indexesOfOnesRelatedNodesRow) < numOfRelations:
# contentMatrix[i][randomChoiceOfIndex] = 1
# contentMatrix[randomChoiceOfIndex][i] = 1
# assert symmetricity
# assert(np.allclose(contentMatrix, contentMatrix.T))
self.contentMatrix = contentMatrix
# Return in a specific format (list or df)
if outputForm == 'dataframe':
names = [_ for _ in self.getContents()]
df = pd.DataFrame(contentMatrix, index=names, columns=names)
return df
return contentMatrix
def loadContentMatrix_JSON(self, url):
'''
Loads an item based NxN content matrix (IDs in rows/columns).
Input: url of content matrix
'''
out = None
with open(url, 'r') as f:
out = json.load(f)
self.contentMatrix = np.array(out)
def loadContentMatrix_CSV(self, url):
'''
Loads an item based NxN content matrix (IDs in rows/columns).
Also initializes the content catalogue with the given column names
Input: url of content matrix as a CSV file
'''
data = pd.read_csv(url, delimiter='\t')
self.initializeContentCatalogue(list(data.columns)[1:])
data = [list(x[1:]) for x in data.to_numpy()]
self.contentMatrix = np.array(data)
def relatedContents(self, id):
'''
Returns all non zero relations to a given ID
Relations are extracted from a content matrix (cm)
Input: id
Output: related contents list
'''
# extract all relations from content matrix
candidateRelated = self.contentMatrix[self.contents.index(id)]
# print(len(candidateRelated))
# extract all non zero relations from the above list - just indexes
indexesOfPositiveRelations = np.argwhere(candidateRelated == 1)
# print(len(indexesOfPositiveRelations))
# make the above indexes a single list, for easier reference
indexesOfPositiveRelations = list(
chain.from_iterable(indexesOfPositiveRelations))
# dereference the indexes => acquire a list of related contents
related = [self.contents[i] for i in indexesOfPositiveRelations]
toReturn = []
# Return also the relation weight for each related content
for rel in related:
toReturn.append(
(rel, candidateRelated[self.contents.index(rel)]))
# Return items sorted in descending relevance (most relevant item in first position)
return sorted(toReturn, key=lambda x: x[1], reverse=True)
def main():
N = 10 # number of relations
W = 5
MP = 10000
r = contentCatalogue(size=10000)
r.initializeContentCatalogue(r.randomMultipleContentsGenerator())
# Set numOfRelations equal to the number of relations you want each content to have with all others
r.createRandomContentMatrixBinary(symmetric=False, numOfRelations=N)
# Get content catalogue
names = [_ for _ in r.getContents()]
df = | pd.DataFrame(r.contentMatrix, index=names, columns=names) | pandas.DataFrame |
import numpy as np
import os
import util
import argparse
import pandas as pd
import matplotlib
from matplotlib import colors
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
if __name__ == "__main__":
post = True
plot_title = True
plot_patrol_effort = True
plot_illegal_activity = True
plot_patrol_post = False
parser = argparse.ArgumentParser(description='Bagging Cross Validation Blackbox function')
parser.add_argument('-r', '--resolution', default=1000, help='Input the resolution scale')
parser.add_argument('-p', '--park', help='Input the park name', required=True)
parser.add_argument('-c', '--category', default='All', help='Input the category')
args = parser.parse_args()
resolution = int(args.resolution)
park = args.park
category = args.category
directory = './{0}_datasets/resolution/{1}m/input'.format(park, str(resolution))
output_directory = './{0}_datasets/resolution/{1}m/output'.format(park, str(resolution))
patrol_post_path = './{0}_datasets/PatrolPosts.csv'.format(park)
if not os.path.exists(directory):
os.makedirs(directory)
if not os.path.exists(output_directory + "/Maps/{0}".format(category)):
os.makedirs(output_directory + "/Maps/{0}".format(category))
# test_year, test_quarter = util.test_year_quarter_by_park(park)
data = pd.read_csv(directory + '/' + 'allStaticFeat.csv')
if plot_patrol_effort:
patrol_data = pd.read_csv(directory + '/' + '{0}_X.csv'.format(category))
if plot_illegal_activity:
illegal_data = pd.read_csv(directory + '/' + '{0}_Y.csv'.format(category))
#data = pd.read_csv(directory + '/' + 'boundary_cropped500.csv')
# --------------------- shifting --------------------------
x_min=int(np.min(data['x']))
y_min=int(np.min(data['y']))
data['x'] = (data['x'] - x_min) / resolution
data['y'] = (data['y'] - y_min) / resolution
data['x'] = data['x'].astype(int)
data['y'] = data['y'].astype(int)
if plot_patrol_effort:
patrol_data['x'] = (patrol_data['x'] - x_min) / resolution
patrol_data['y'] = (patrol_data['y'] - y_min) / resolution
patrol_data['x'] = patrol_data['x'].astype(int)
patrol_data['y'] = patrol_data['y'].astype(int)
if plot_illegal_activity:
illegal_data['x'] = (illegal_data['x'] - x_min) / resolution
illegal_data['y'] = (illegal_data['y'] - y_min) / resolution
illegal_data['x'] = illegal_data['x'].astype(int)
illegal_data['y'] = illegal_data['y'].astype(int)
# --------------------- feature map -----------------------
static_feature_options = list(data.columns[3:]) + ["Null"]
if plot_patrol_effort:
static_feature_options = static_feature_options + ["Past Patrol"]
if plot_illegal_activity:
static_feature_options = static_feature_options + ["Illegal Activity"]
for static_feature_option in static_feature_options:
print("Processing feature: {0} ...".format(static_feature_option))
x_max=int(np.max(data['x']))
y_max=int(np.max(data['y']))
color = ['black',(0,0,0,0)]
cmapm = colors.ListedColormap(color)
bounds=[-1,0]
norm = colors.BoundaryNorm(bounds, cmapm.N)
gridmap = [[0 for x in range(x_max+1)] for y in range(y_max+1)]
if static_feature_option == "Past Patrol":
feature_map = np.ones((y_max+1, x_max+1)) / float(10)
for index, row in data.iterrows():
gridmap[int(row['y'])][int(row['x'])] = 1
for index, row in patrol_data.iterrows():
feature_map[int(row['y'])][int(row['x'])] += row['currentPatrolEffort']
elif static_feature_option == "Illegal Activity":
feature_map = np.zeros((y_max+1, x_max+1))
feature_df = | pd.DataFrame(columns=['x', 'y', 'value']) | pandas.DataFrame |
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import train_test_split, cross_val_score
from imblearn.under_sampling import RandomUnderSampler
from process_loaded_data import check_if_many_relative_followers_to_friends
from datetime import datetime
from pymongo import MongoClient
from tweet_scrape_processor import process_tweet
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report, confusion_matrix
from dill import pickle
"""
This module is used to create the random forest ensemble that will classify
a user as real or fake based on a single tweet and the user data embedded
in that json object
Previous research in the area of classifying twitter users as real or fake
has done so by using class A (lightweight) and class B (costlier) features.
Lightweight features include everything that you can get from a single tweet
(total tweets, follower, likes, account creation date) as these are embedded
in the json object that one can get when downloading a tweet via twitter's
API. Costlier features include a user's tweet history,
meaning the tweets themselves.
The contribution to the research community of this lightweight classifier is
a classification method that relies solely on class A features. The approach
is as follows: a) create features from user's account history (total likes,
total tweets, total followers, total friends, etc) b) create features that
express relative volume (total likes divided by total number of followers,
total tweets divided by total number of friends, etc) as it was observed that
some accounts have hundreds and thousands of tweets but very few people in
their network c) create features that express behavior rate (total likes per
day, total tweets per day, total likes per friends per day) as the account
creation date is available in the json object and it was observed that fake
accounts do "machine gun" tweeting where they tweet very frequently in a small
period of time. These set of features was added in order to also make the
model less naive to new users
No features took the content or the words of the tweet into account (i.e. NLP
based prediction) as the premise is that a human is always behind the message
being artificially propagated. The behavior captured by the tweet was taken
into account by looking at hashtag usage, mentions, whether the tweet was
favorited by another person, etc.
The classification model is a random forest ensemble made up of three random
forest models.
Random Forest 1 (RF1) takes in account history features and relative volume
features
Random Forest 2 (RF2) takes in behavior rate features that look at account
history features per day and relative volume features per day
Random Forest 3 (RF3) takes in the predicted probabilities of Random Forest
1 and Random Forest 2, along with all of these models features, and then
makes the final prediction.
The final Random Forest is able to balance out the work of the previous ones
by understanding the user patterns along the two major facets: account
history and account behavior rate.
The ten fold cross validated accuracy of RF1 is 97%, RF2 has 95%, and RF3
has 98%. Previous research using this dataset achieved these kinds of scores
as well. However, they did so with class A and class B features. The
contribution of this work is that this kind of performance was attained
using only class A features.
To run this, just run the function:
create_ensemble_model()
Todo:
* train the model with more samples from today's set of Twitter's
false negatives so that the model can understand the patterns of
the spammers of today
"""
def view_feature_importances(df, model):
"""
Args:
df (pandas dataframe): dataframe which has the original data
model (sklearn model): this is the sklearn classification model that
has already been fit (work with tree based models)
Returns:
nothing, this just prints the feature importances in descending order
"""
columns = df.columns
features = model.feature_importances_
featimps = []
for column, feature in zip(columns, features):
featimps.append([column, feature])
print(pd.DataFrame(featimps, columns=['Features',
'Importances']).sort_values(by='Importances',
ascending=False))
def evaluate_model(model, X_train, y_train):
"""
Args:
model (sklearn classification model): this model from sklearn that
will be used to fit the data and to see the 10 fold cross val score of
X_train (2d numpy array): this is the feature matrix
y_train (1d numpy array): this is the array of targets
Returns:
prints information about the model's accuracy using 10
fold cross validation
model (sklearn classification model): the model that has already been
fit to the data
"""
print(np.mean(cross_val_score(model, X_train, y_train,
cv=10, n_jobs=-1, verbose=10)))
model.fit(X_train, y_train)
return model
def write_model_to_pkl(model, model_name):
"""
Args:
model_name (str): this is the name of the model
model (sklearn fit model): the sklearn classification model
that will be saved to a pkl file
Returns:
nothing, saves the model to a pkl file
"""
with open('models/{}_model.pkl'.format(model_name), 'w+') as f:
pickle.dump(model, f)
def view_classification_report(model, X_test, y_test):
"""
Args
model (sklearn classification model): this model from sklearn that
will has already been fit
X_test (2d numpy array): this is the feature matrix
y_test (1d numpy array): this is the array of targets
Returns
nothing, this is just a wrapper for the classification report
"""
print(classification_report(y_test, model.predict(X_test)))
def gridsearch(paramgrid, model, X_train, y_train):
"""
Args:
paramgrid (dictionary): a dictionary of lists where the keys are the
model's tunable parameters and the values are a list of the
different parameter values to search over
X_train (2d numpy array): this is the feature matrix
y_train (1d numpy array): this is the array of targets
Returns:
best_model (sklearn classifier): a fit sklearn classifier with the
best parameters from the gridsearch
gridsearch (gridsearch object): the gridsearch object that has
already been fit
"""
gridsearch = GridSearchCV(model,
paramgrid,
n_jobs=-1,
verbose=10,
cv=10)
gridsearch.fit(X_train, y_train)
best_model = gridsearch.best_estimator_
print('these are the parameters of the best model')
print(best_model)
print('\nthese is the best score')
print(gridsearch.best_score_)
return best_model, gridsearch
def balance_classes(sm, X, y):
"""
Args:
sm (imblearn class): this is an imbalance learn oversampling or
undersampling class
X (2d numpy array): this is the feature matrix
y (1d numpy array): this is the array of the targets
Returns:
X (2d numpy array): this is the balanced feature matrix
y (1d numpy array): this is the corresponding balanced target array
Returns X and y after being fit with the resampling method
"""
X, y = sm.fit_sample(X, y)
return X, y
def load_all_training_data():
"""
Args:
- none
Returns:
df (pandas dataframe): the training dataframe with the ff
things done to it:
a) protected accounts dropped
b) irrelevant columns removed
"""
df = pd.read_csv('data/all_user_data.csv')
df = df.query('protected != 1')
df.drop(['profile_image_url_https',
'profile_sidebar_fill_color',
'profile_text_color',
'profile_background_color',
'profile_link_color',
'profile_image_url',
'profile_background_image_url_https',
'profile_banner_url',
'profile_background_image_url',
'profile_background_tile',
'profile_sidebar_border_color',
'default_profile',
'file',
'time_zone',
'screen_name',
'utc_offset',
'protected'], axis=1, inplace=True)
return df
def get_most_recent_tweets_per_user():
"""
Args:
none
Returns
df (pandas dataframe): Returns a dataframe with only one tweet per
row which is the MOST recent tweet recorded for that user_id
"""
tweetdf = pd.read_csv('data/training_tweets.csv')
tweetdf.timestamp = pd.to_datetime(tweetdf.timestamp)
index = tweetdf.groupby('user_id').apply(lambda x: np.argmax(x.timestamp))
tweetdf = tweetdf.loc[index.values]
tweetdf.reset_index().drop('Unnamed: 0', axis=1, inplace=True)
tweetdf.drop('Unnamed: 0', axis=1, inplace=True)
return tweetdf
def load_master_training_df():
"""
Args:
none
Returns
df (pandas dataframe): Returns dataframe combining most recent tweet
info with user info. notes on the columns:
updated - when the account was last updated
geo_enabled - if the account is geo enabled
description - text which has the user input self-description
verified - if the account is verified or not
followers_count - number of followers
location - string, location
default_profile_image - binary, yes or no
listed_count - how many times the account was listed
statuses count - number of tweets posted
friends_count - number of accounts the user is following
name - user specified user name
lang - user specified user language (CANNOT BE USED)
favourites_count - number of items favourited
url - user specified url
created_at - date the account was created
user_id - twitter assigned user id (unique in the twittersphere)
favorite_count - times the tweet was favorited
num_hashtags - number of hashtags used in the tweet
text - the tweet contents
source - the device used to upload the tweet
num_mentions - number of users mentioned in the tweet
timestamp - timestamp of the tweet
geo - if the tweet was geo localized or not
place - user specified place of the tweet
retweet_count - number of times the tweet was retweeted
"""
df = load_all_training_data()
tweetdf = get_most_recent_tweets_per_user()
users_who_tweeted = set(tweetdf.user_id.apply(int))
df = df[df.id.isin(users_who_tweeted)]
df['user_id'] = df['id']
df = pd.merge(df, tweetdf, on='user_id')
df.drop(['id',
'label_x',
'reply_count',
'file'], axis=1, inplace=True)
df.updated = pd.to_datetime(df.updated)
df.created_at = df.created_at.apply(convert_created_time_to_datetime)
account_age = df.timestamp - df.created_at
account_age = map(get_account_age_in_days, account_age.values)
df['account_age'] = account_age
return df
def get_account_age_in_days(numpy_time_difference):
"""
Args
numpy_time_difference (numpy timedelta): a numpy timedelta object
that is the difference between the user's account creation date
and the date of their most recent tweet
Return
account_age (int)
"""
return int(numpy_time_difference/1000000000/60/60/24)+1
def convert_created_time_to_datetime(datestring):
"""
Args:
datestring (str): a string object either as a date or
a unix timestamp
Returns
datetime_object (pandas datetime object): the converted string as
a datetime object
"""
if len(datestring) == 30:
return pd.to_datetime(datestring)
else:
return pd.to_datetime(datetime.fromtimestamp(int(datestring[:10])))
def feature_engineering(df):
"""
Args:
df (pandas dataframe): the initial pandas dataframe with the user
and tweet information
Returns
df (pandas dataframe): the processed dataframe with the features
needed for the model
Returns - features needed for the model
"""
df = check_if_many_relative_followers_to_friends(df)
df['has_30_followers'] = \
df.followers_count.apply(lambda x: 1 if x >= 30 else 0)
df['favorited_by_another'] = \
df.favorite_count.apply(lambda favcnt: 1 if favcnt > 0 else 0)
df['has_hashtagged'] = \
df.num_hashtags.apply(lambda hashtag: 1 if hashtag > 0 else 0)
df['has_mentions'] = \
df.num_mentions.apply(lambda mentions: 1 if mentions > 0 else 0)
df = df.fillna(-999)
return df
def get_and_process_mongo_tweets(dbname, collection):
"""
Args:
dbname (str): the name of the mongo db to connect to
collection (str): the name of the table inside that mongodb
Returns:
tweet_history (list): this is the list of processed tweets
"""
client = MongoClient()
tweet_list = []
db = client[dbname]
tab = db[collection].find()
for document in tab:
tweet_list.append(document)
processed_tweets = np.array([process_tweet(tweet) for tweet in tweet_list])
tweet_history = processed_tweets[:, :19].astype(float)
return tweet_history
def drop_unnecessary_features(df):
"""
Args:
df (pandas dataframe): the initial pandas dataframe
Returns
df (pandas dataframe): a dataframe where the unnecessary
features have been dropped
"""
df.drop(['updated', 'description', 'location', 'name', 'lang', 'url',
'user_id', 'text', 'source', 'timestamp', 'created_at', 'geo',
'place'], axis=1, inplace=True)
return df
def behavior_network_ratio_feature_creation(df):
"""
Args:
df (pandas dataframe): initial dataframe
Returns:
df (pandas dataframe): the dataframe with additional features,
where -999 is applied if an inf or a nan will appear, to denote
missing values
"""
df['tweets_followers'] = df.statuses_count / df.followers_count
df['tweets_friends'] = df.statuses_count / df.friends_count
df['likes_followers'] = df.favourites_count / df.followers_count
df['likes_friends'] = df.favourites_count / df.friends_count
df.tweets_followers = \
df.tweets_followers.apply(lambda tf: -999 if tf == np.inf else tf)
df.tweets_friends = \
df.tweets_friends.apply(lambda tf: -999 if tf == np.inf else tf)
df.likes_followers = \
df.likes_followers.apply(lambda lf: -999 if lf == np.inf else lf)
df.likes_friends = \
df.likes_friends.apply(lambda lf: -999 if lf == np.inf else lf)
df = df.fillna(-999)
return df
def create_ensemble_model():
"""
Args:
none
Returns
nothing, this prints out the performance of the model and then
saves it to a pkl file, it takes in the training user tweet data,
the miley cyrus data, and the celebrity users data, as additional
information to train the model to sense spam in today's times
"""
print('this is the portion that checks absolute user behavior values')
df = pd.read_csv('data/training_user_tweet_data.csv')
cyrusdf = pd.read_csv('data/mileycyrususers.csv')
celebdf = pd.read_csv('data/celebrityusers.csv')
df = behavior_network_ratio_feature_creation(df)
cyrusdf = behavior_network_ratio_feature_creation(cyrusdf)
celebdf = behavior_network_ratio_feature_creation(celebdf)
print('this is the portion that checks absolute user behavior values')
y = df.pop('label_y')
y = y.values
X = df.values
ycyrus = cyrusdf.pop('label_y')
ycyrus = ycyrus.values
yceleb = celebdf.pop('label_y')
yceleb = yceleb.values
cyrusX = cyrusdf.values
celebX = celebdf.values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.2)
cyX_train, cyX_test, cyy_train, cyy_test = \
train_test_split(cyrusX, ycyrus, test_size=.2)
ceX_train, ceX_test, cey_train, cey_test = \
train_test_split(celebX, yceleb, test_size=.2)
X_train_b, y_train_b = balance_classes(RandomUnderSampler(),
X_train, y_train)
X_test_b, y_test_b = balance_classes(RandomUnderSampler(),
X_test, y_test)
X_train_b = np.vstack((X_train_b, cyX_train, ceX_train))
y_train_b = np.hstack((y_train_b, cyy_train, cey_train))
X_test_b = np.vstack((X_test_b, cyX_test, ceX_test))
y_test_b = np.hstack((y_test_b, cyy_test, cey_test))
weights = 1
X_train_bw = X_train_b * weights
paramgrid = {'n_estimators': [200],
'max_features': ['auto'],
'criterion': ['entropy'],
'min_samples_split': [10],
'min_samples_leaf': [8],
'max_depth': [30],
'bootstrap': [True]}
model = RandomForestClassifier(n_jobs=-1)
model, gs = gridsearch(paramgrid, model, X_train_bw, y_train_b)
print("\nthis is the model performance on the training data\n")
view_classification_report(model, X_train_bw, y_train_b)
view_classification_report(model, X_test_b*weights, y_test_b)
print(confusion_matrix(y_train_b, model.predict(X_train_bw)))
print("this is the model performance on the test data\n")
print(confusion_matrix(y_test_b, model.predict(X_test_b*weights)))
print("this is the model performance on different split ratios\n")
print("\nthese are the model feature importances\n")
view_feature_importances(df, model)
y_pred = model.predict_proba(X_train_bw)[:, 1]
print('this is the portion that checks user behavior rate values')
X_train_bwr = X_train_bw/X_train_bw[:, 13].reshape(-1, 1)
weights = 1
X_train_bwr = X_train_bwr * weights
paramgrid = {'n_estimators': [200],
'max_features': ['auto'],
'criterion': ['entropy'],
'min_samples_split': [16],
'min_samples_leaf': [18],
'max_depth': [30],
'bootstrap': [True]}
modelb = RandomForestClassifier(n_jobs=-1)
modelb, gs = gridsearch(paramgrid, modelb, X_train_bwr, y_train_b)
print("\nthis is the model performance on the training data\n")
view_classification_report(modelb, X_train_bwr, y_train_b)
print(confusion_matrix(y_train_b, modelb.predict(X_train_bwr)))
print("this is the model performance on the test data\n")
X_test_br = X_test_b * weights/X_test_b[:, 13].reshape(-1, 1)
view_classification_report(modelb, X_test_br, y_test_b)
print(confusion_matrix(y_test_b, modelb.predict(X_test_br)))
print("\nthese are the model feature importances\n")
view_feature_importances(df, modelb)
y_pred_b = modelb.predict_proba(X_train_bwr)[:, 1]
print('this is the portion that ensembles these two facets')
ensemble_X = np.hstack((X_train_bw, X_train_bwr,
y_pred.reshape(-1, 1), y_pred_b.reshape(-1, 1)))
model_ens = RandomForestClassifier(n_jobs=-1)
paramgrid = {'n_estimators': [500],
'max_features': ['auto'],
'criterion': ['entropy'],
'min_samples_split': [16],
'min_samples_leaf': [11],
'max_depth': [20],
'bootstrap': [True]}
model_ens, gs = gridsearch(paramgrid, model_ens, ensemble_X, y_train_b)
print("\nthis is the model performance on the training data\n")
view_classification_report(model_ens, ensemble_X, y_train_b)
print(confusion_matrix(y_train_b, model_ens.predict(ensemble_X)))
print("this is the model performance on the test data\n")
y_pred_test = model.predict_proba(X_test_b)[:, 1]
X_test_br = X_test_b/X_test_b[:, 13].reshape(-1, 1)
y_pred_test_b = modelb.predict_proba(X_test_br)[:, 1]
ensemble_X_test = np.hstack((X_test_b, X_test_br,
y_pred_test.reshape(-1, 1),
y_pred_test_b.reshape(-1, 1)))
view_classification_report(model_ens, ensemble_X_test, y_test_b)
print(confusion_matrix(y_test_b, model_ens.predict(ensemble_X_test)))
columns = \
list(df.columns)+[column+'_rate' for
column in df.columns] + \
['pred_model_1', 'pred_model_2']
ensdf = pd.DataFrame(ensemble_X, columns=columns)
view_feature_importances(ensdf, model_ens)
print('evaluating the model on the new kind of spam')
newX = np.vstack((cyX_test, ceX_test))
newXbr = newX/newX[:, 13].reshape(-1, 1)
newy = np.hstack((cyy_test, cey_test))
newy_pred = model.predict(newX)
newy_pred_b = modelb.predict(newXbr)
newXens = np.hstack((newX, newXbr,
newy_pred.reshape(-1, 1),
newy_pred_b.reshape(-1, 1)))
print(confusion_matrix(newy, model_ens.predict(newXens)))
print('fitting to all and writing to pkl')
y_all = np.hstack((y_train_b, y_test_b))
behavior_X = np.vstack((X_train_bw, X_test_b))
behavior_rate_X = np.vstack((X_train_bwr, X_test_br))
ensemble_X = np.vstack((ensemble_X, ensemble_X_test))
model.fit(behavior_X, y_all)
modelb.fit(behavior_rate_X, y_all)
model_ens.fit(ensemble_X, y_all)
write_model_to_pkl(model, 'account_history_rf_v2')
write_model_to_pkl(modelb, 'behavior_rate_rf_v2')
write_model_to_pkl(model_ens, 'ensemble_rf_v2')
if __name__ == "__main__":
df = | pd.read_csv('data/training_user_tweet_data.csv') | pandas.read_csv |
############################################################################
#Copyright 2019 Google LLC
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
#################################################################################################
import pandas as pd
import numpy as np
#### The warnings from Sklearn are so annoying that I have to shut it off ####
import warnings
warnings.filterwarnings("ignore")
def warn(*args, **kwargs):
pass
warnings.warn = warn
########################################
import warnings
warnings.filterwarnings("ignore")
from sklearn.exceptions import DataConversionWarning
warnings.filterwarnings(action='ignore', category=DataConversionWarning)
####################################################################################
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
# from matplotlib import io
import io
# ipython inline magic shouldn't be needed because all plots are
# being displayed with plt.show() calls
get_ipython().magic('matplotlib inline')
import seaborn as sns
sns.set(style="whitegrid", color_codes=True)
import re
import pdb
import pprint
import matplotlib
matplotlib.style.use('seaborn')
from itertools import cycle, combinations
from collections import defaultdict
import copy
import time
import sys
import random
import xlrd
import statsmodels
from io import BytesIO
import base64
from functools import reduce
import traceback
import xgboost as xgb
from xgboost.sklearn import XGBClassifier
from xgboost.sklearn import XGBRegressor
#####################################################
class AutoViz_Class():
"""
##############################################################################
############# This is not an Officially Supported Google Product! ######
##############################################################################
#Copyright 2019 Google LLC ######
# ######
#Licensed under the Apache License, Version 2.0 (the "License"); ######
#you may not use this file except in compliance with the License. ######
#You may obtain a copy of the License at ######
# ######
# https://www.apache.org/licenses/LICENSE-2.0 ######
# ######
#Unless required by applicable law or agreed to in writing, software ######
#distributed under the License is distributed on an "AS IS" BASIS, ######
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.#####
#See the License for the specific language governing permissions and ######
#limitations under the License. ######
##############################################################################
########### AutoViz Class ######
########### by <NAME> ######
########### AUTOMATICALLY VISUALIZE ANY DATA SET ######
########### Version V0.0.68 1/10/20 ######
##############################################################################
##### AUTOVIZ PERFORMS AUTOMATIC VISUALIZATION OF ANY DATA SET WITH ONE CLICK.
##### Give it any input file (CSV, txt or json) and AV will visualize it.##
##### INPUTS: #####
##### A FILE NAME OR A DATA FRAME AS INPUT. #####
##### AutoViz will visualize any sized file using a statistically valid sample.
##### - COMMA is assumed as default separator in file. But u can change it.##
##### - Assumes first row as header in file but you can change it. #####
##### - First instantiate an AutoViz class to hold output of charts, plots.#
##### - Then call the Autoviz program with inputs as defined below. ###
##############################################################################
"""
def __init__(self):
self.overall = {
'name': 'overall',
'plots': [],
'heading': [],
'subheading':[], #"\n".join(subheading)
'desc': [], #"\n".join(subheading)
'table1_title': "",
'table1': [],
'table2_title': "",
'table2': []
} ### This is for overall description and comments about the data set
self.scatter_plot = {
'name': 'scatter',
'heading': 'Scatter Plot of each Continuous Variable against Target Variable',
'plots': [],
'subheading':[],#"\n".join(subheading)
'desc': [] #"\n".join(desc)
} ##### This is for description and images for scatter plots ###
self.pair_scatter = {
'name': 'pair-scatter',
'heading': 'Pairwise Scatter Plot of each Continuous Variable against other Continuous Variables',
'plots': [],
'subheading':[],#"\n".join(subheading)
'desc': [] #"\n".join(desc)
} ##### This is for description and images for pairs of scatter plots ###
self.dist_plot = {
'name': 'distribution',
'heading': 'Distribution Plot of Target Variable',
'plots': [],
'subheading':[],#"\n".join(subheading)
'desc': [] #"\n".join(desc)
} ##### This is for description and images for distribution plots ###
self.pivot_plot = {
'name': 'pivot',
'heading': 'Pivot Plots of all Continuous Variable',
'plots': [],
'subheading':[],#"\n".join(subheading)
'desc': [] #"\n".join(desc)
} ##### This is for description and images for pivot plots ###
self.violin_plot = {
'name': 'violin',
'heading': 'Violin Plots of all Continuous Variable',
'plots': [],
'subheading':[],#"\n".join(subheading)
'desc': [] #"\n".join(desc)
} ##### This is for description and images for violin plots ###
self.heat_map = {
'name': 'heatmap',
'heading': 'Heatmap of all Continuous Variables for target Variable',
'plots': [],
'subheading':[],#"\n".join(subheading)
'desc': [] #"\n".join(desc)
} ##### This is for description and images for heatmaps ###
self.bar_plot = {
'name': 'bar',
'heading': 'Bar Plots of Average of each Continuous Variable by Target Variable',
'plots': [],
'subheading':[],#"\n".join(subheading)
'desc': [] #"\n".join(desc)
} ##### This is for description and images for bar plots ###
self.date_plot = {
'name': 'time-series',
'heading': 'Time Series Plots of Two Continuous Variables against a Date/Time Variable',
'plots': [],
'subheading':[],#"\n".join(subheading)
'desc': [] #"\n".join(desc)
} ######## This is for description and images for date time plots ###
def add_plots(self,plotname,X):
"""
This is a simple program to append the input chart to the right variable named plotname
which is an attribute of class AV. So make sure that the plotname var matches an exact
variable name defined in class AV. Otherwise, this will give an error.
"""
if X is None:
### If there is nothing to add, leave it as it is.
print("Nothing to add Plot not being added")
pass
else:
eval('self.'+plotname+'["plots"].append(X)')
def add_subheading(self,plotname,X):
"""
This is a simple program to append the input chart to the right variable named plotname
which is an attribute of class AV. So make sure that the plotname var matches an exact
variable name defined in class AV. Otherwise, this will give an error.
"""
if X is None:
### If there is nothing to add, leave it as it is.
pass
else:
eval('self.'+plotname+'["subheading"].append(X)')
def AutoViz(self, filename, sep=',', depVar='', dfte=None, header=0, verbose=0,
lowess=False,chart_format='svg',max_rows_analyzed=150000,
max_cols_analyzed=30):
"""
##############################################################################
##### AUTOVIZ PERFORMS AUTOMATIC VISUALIZATION OF ANY DATA SET WITH ONE CLICK.
##### Give it any input file (CSV, txt or json) and AV will visualize it.##
##### INPUTS: #####
##### A FILE NAME OR A DATA FRAME AS INPUT. #####
##### AutoViz will visualize any sized file using a statistically valid sample.
##### - max_rows_analyzed = 150000 ### this limits the max number of rows ###
##### that is used to display charts ###
##### - max_cols_analyzed = 30 ### This limits the number of continuous ###
##### vars that can be analyzed ####
##### - COMMA is assumed as default separator in file. But u can change it.##
##### - Assumes first row as header in file but you can change it. #####
##### - First instantiate an AutoViz class to hold output of charts, plots.#
##### - Then call the Autoviz program with inputs as defined below. ###
##############################################################################
##### This is the main calling program in AV. It will call all the load, #####
#### display and save rograms that are currently outside AV. This program ###
#### will draw scatter and other plots for the input data set and then ####
#### call the correct variable name with add_plots function and send in ####
#### the chart created by that plotting program, for example, scatter #####
#### You have to make sure that add_plots function has the exact name of ####
#### the variable defined in the Class AV. If not, this will give an error.##
#### If verbose=0: it does not print any messages and goes into silent mode##
#### This is the default. #####
#### If verbose=1, it will print messages on the terminal and also display###
#### charts on terminal #####
#### If verbose=2, it will print messages but will not display charts, #####
#### it will simply save them. #####
##############################################################################
"""
max_num_cols_analyzed = min(25,int(max_cols_analyzed*0.6))
start_time = time.time()
try:
dft, depVar,IDcols,bool_vars,cats,continuous_vars,discrete_string_vars,date_vars,classes,problem_type = classify_print_vars(
filename,sep,max_rows_analyzed, max_cols_analyzed,
depVar,dfte,header,verbose)
except:
print('Not able to read or load file. Please check your inputs and try again...')
return None
if depVar == None or depVar == '':
##### This is when No dependent Variable is given #######
try:
svg_data = draw_pair_scatters(dft,continuous_vars,problem_type,verbose,chart_format,
depVar,classes,lowess)
self.add_plots('pair_scatter',svg_data)
except Exception as e:
print(e)
print('Could not draw Pair Scatter Plots')
try:
svg_data = draw_distplot(dft, bool_vars+cats+continuous_vars,verbose,chart_format,problem_type)
self.add_plots('dist_plot',svg_data)
except:
print('Could not draw Distribution Plot')
try:
svg_data = draw_violinplot(dft,depVar,continuous_vars,verbose,chart_format,problem_type)
self.add_plots('violin_plot',svg_data)
except:
print('Could not draw Violin Plot')
try:
svg_data = draw_heatmap(dft, continuous_vars, verbose,chart_format, date_vars, depVar)
self.add_plots('heat_map',svg_data)
except:
print('Could not draw Heat Map')
if date_vars != [] and len(continuous_vars)<=max_num_cols_analyzed:
try:
svg_data = draw_date_vars(dft,depVar,date_vars,
continuous_vars,verbose,chart_format,problem_type)
self.add_plots('date_plot',svg_data)
except:
print('Could not draw Date Vars')
if len(cats) <= 10 and len(continuous_vars)<=max_num_cols_analyzed:
try:
svg_data = draw_barplots(dft,cats+bool_vars,continuous_vars, problem_type,
verbose,chart_format,depVar)
self.add_plots('bar_plot',svg_data)
except:
print('Could not draw Bar Plots')
else :
print('Number of Categorical and Continuous Vars exceeds limit, hence no Bar Plots')
print('Time to run AutoViz (in seconds) = %0.3f' %(time.time()-start_time))
if verbose == 1:
print('\n ###################### VISUALIZATION Completed ########################')
else:
if problem_type=='Regression':
############## This is a Regression Problem #################
try:
svg_data = draw_scatters(dft,
continuous_vars,verbose,chart_format,problem_type,depVar,classes,lowess)
self.add_plots('scatter_plot',svg_data)
except Exception as e:
print("Exception Drawing Scatter Plots")
print(e)
traceback.print_exc()
print('Could not draw Scatter Plots')
try:
svg_data = draw_pair_scatters(dft,continuous_vars,problem_type,verbose,chart_format,
depVar,classes,lowess)
self.add_plots('pair_scatter',svg_data)
except:
print('Could not draw Pair Scatter Plots')
try:
if type(depVar) == str:
othernums = [x for x in continuous_vars if x not in [depVar]]
else:
othernums = [x for x in continuous_vars if x not in depVar]
if len(othernums) >= 1:
svg_data = draw_distplot(dft, bool_vars+cats+continuous_vars,verbose,chart_format,problem_type,depVar,classes)
self.add_plots('dist_plot',svg_data)
else:
print('No continuous var in data set: hence no distribution plots')
except:
print('Could not draw Distribution Plots')
try:
svg_data = draw_violinplot(dft,depVar,continuous_vars,verbose,chart_format,problem_type)
self.add_plots('violin_plot',svg_data)
except:
print('Could not draw Violin Plots')
try:
svg_data = draw_heatmap(dft,
continuous_vars, verbose,chart_format, date_vars, depVar,problem_type)
self.add_plots('heat_map',svg_data)
except:
print('Could not draw Heat Maps')
if date_vars != [] and len(continuous_vars)<=max_num_cols_analyzed:
try:
svg_data = draw_date_vars(
dft,depVar,date_vars,continuous_vars,verbose,chart_format,problem_type)
self.add_plots('date_plot',svg_data)
except:
print('Could not draw Time Series plots')
if len(cats) <= 10 and len(continuous_vars) <= max_num_cols_analyzed:
try:
svg_data = draw_pivot_tables(dft,cats+bool_vars,
continuous_vars,problem_type,verbose,chart_format,depVar)
self.add_plots('pivot_plot',svg_data)
except:
print('Could not draw Pivot Charts against Dependent Variable')
try:
svg_data = draw_barplots(dft,cats+bool_vars,continuous_vars,problem_type,verbose,
chart_format,depVar)
self.add_plots('bar_plot',svg_data)
#self.add_plots('bar_plot',None)
print('All Plots done')
except:
print('Could not draw Bar Charts')
else:
print ('Number of Cat and Continuous Vars exceeds %d, hence no Pivot Tables' %max_cols_analyzed)
print('Time to run AutoViz (in seconds) = %0.3f' %(time.time()-start_time))
if verbose == 1:
print('\n ###################### VISUALIZATION Completed ########################')
else :
############ This is a Classification Problem ##################
try:
svg_data = draw_scatters(dft,continuous_vars,
verbose,chart_format,problem_type,depVar, classes,lowess)
self.add_plots('scatter_plot',svg_data)
except Exception as e:
print(e)
traceback.print_exc()
print("Exception Drawing Scatter Plots")
print('Could not draw Scatter Plots')
try:
svg_data = draw_pair_scatters(dft,continuous_vars,
problem_type,verbose,chart_format,depVar,classes,lowess)
self.add_plots('pair_scatter',svg_data)
except:
print('Could not draw Pair Scatter Plots')
try:
if type(depVar) == str:
othernums = [x for x in continuous_vars if x not in [depVar]]
else:
othernums = [x for x in continuous_vars if x not in depVar]
if len(othernums) >= 1:
svg_data = draw_distplot(dft, bool_vars+cats+continuous_vars,verbose,chart_format,
problem_type,depVar,classes)
self.add_plots('dist_plot',svg_data)
else:
print('No continuous var in data set: hence no distribution plots')
except:
print('Could not draw Distribution Plots')
try:
svg_data = draw_violinplot(dft,depVar,continuous_vars,verbose,chart_format,problem_type)
self.add_plots('violin_plot',svg_data)
except:
print('Could not draw Violin Plots')
try:
svg_data = draw_heatmap(dft, continuous_vars,
verbose,chart_format, date_vars, depVar,problem_type,classes)
self.add_plots('heat_map',svg_data)
except:
print('Could not draw Heat Maps')
if date_vars != [] and len(continuous_vars)<=max_num_cols_analyzed:
try:
svg_data = draw_date_vars(dft,depVar,date_vars,
continuous_vars,verbose,chart_format,problem_type)
self.add_plots('date_plot',svg_data)
except:
print('Could not draw Time Series plots')
if len(cats) <= 10 and len(continuous_vars)<=max_num_cols_analyzed:
try:
svg_data = draw_pivot_tables(
dft,cats+bool_vars,continuous_vars,problem_type,verbose,chart_format,depVar,classes)
self.add_plots('pivot_plot',svg_data)
except:
print('Could not draw Pivot Charts against Dependent Variable')
try:
if len(classes) > 2:
svg_data = draw_barplots(dft,cats+bool_vars,continuous_vars,problem_type,
verbose,chart_format,depVar, classes)
self.add_plots('bar_plot',svg_data)
else:
self.add_plots('bar_plot',None)
print('All plots done')
pass
except:
if verbose == 1:
print('Could not draw Bar Charts')
pass
else:
print('Number of Cat and Continuous Vars exceeds %d, hence no Pivot or Bar Charts' %max_cols_analyzed)
print('Time to run AutoViz (in seconds) = %0.3f' %(time.time()-start_time))
if verbose == 1:
print ('\n ###################### VISUALIZATION Completed ########################')
return dft
######## This is where we store the image data in a dictionary with a list of images #########
def save_image_data(fig, image_count, chart_format):
if chart_format == 'svg':
###### You have to add these lines to each function that creates charts currently ##
imgdata = io.StringIO()
fig.savefig(imgdata, format=chart_format)
imgdata.seek(0)
svg_data = imgdata.getvalue()
return svg_data
else:
### You have to do it slightly differently for PNG and JPEG formats
imgdata = BytesIO()
fig.savefig(imgdata, format=chart_format, bbox_inches='tight', pad_inches=0.0)
imgdata.seek(0)
figdata_png = base64.b64encode(imgdata.getvalue())
return figdata_png
#### This module analyzes a dependent Variable and finds out whether it is a
#### Regression or Classification type problem
def analyze_problem_type(train, targ,verbose=0) :
if train[targ].dtype != 'int64' and train[targ].dtype != float :
if len(train[targ].unique()) == 2:
if verbose == 1:
print('''\n################### Binary-Class VISUALIZATION Started #####################''')
model_class = 'Binary_Classification'
elif len(train[targ].unique()) > 1 and len(train[targ].unique()) <= 15:
model_class = 'Multi_Classification'
if verbose == 1:
print('''\n################### Multi-Class VISUALIZATION Started ######################''')
elif train[targ].dtype == 'int64' or train[targ].dtype == float :
if len(train[targ].unique()) == 2:
if verbose == 1:
print('''\n################### Binary-Class VISUALIZATION Started #####################''')
model_class = 'Binary_Classification'
elif len(train[targ].unique()) > 1 and len(train[targ].unique()) <= 15:
model_class = 'Multi_Classification'
if verbose == 1:
print('''\n################### Multi-Class VISUALIZATION Started ######################''')
else:
model_class = 'Regression'
if verbose == 1:
print('''\n################### Regression VISUALIZATION Started ######################''')
elif train[targ].dtype == object:
if len(train[targ].unique()) > 1 and len(train[targ].unique()) <= 2:
model_class = 'Binary_Classification'
if verbose == 1:
print('''\n################### Binary-Class VISUALIZATION Started #####################''')
else:
model_class = 'Multi_Classification'
if verbose == 1:
print('''\n################### Multi-Class VISUALIZATION Started ######################''')
elif train[targ].dtype == bool:
model_class = 'Binary_Classification'
if verbose == 1:
print('''\n################### Binary-Class VISUALIZATION Started ######################''')
elif train[targ].dtype == 'int64':
if len(train[targ].unique()) == 2:
if verbose == 1:
print('''\n################### Binary-Class VISUALIZATION Started #####################''')
model_class = 'Binary_Classification'
elif len(train[targ].unique()) > 1 and len(train[targ].unique()) <= 25:
model_class = 'Multi_Classification'
if verbose == 1:
print('''\n################### Multi-Class VISUALIZATION Started ######################''')
else:
model_class = 'Regression'
if verbose == 1:
print('''\n################### Regression VISUALIZATION Started ######################''')
else :
if verbose == 1:
print('''\n###################### REGRESSION VISUALIZATION Started #####################''')
model_class = 'Regression'
return model_class
# Pivot Tables are generally meant for Categorical Variables on the axes
# and a Numeric Column (typically the Dep Var) as the "Value" aggregated by Sum.
# Let's do some pivot tables to capture some meaningful insights
def draw_pivot_tables(dft,cats,nums,problem_type,verbose,chart_format,depVar='', classes=None):
cats = list(set(cats))
dft = dft[:]
cols = 2
cmap = plt.get_cmap('jet')
#### For some reason, the cmap colors are not working #########################
colors = cmap(np.linspace(0, 1, len(cats)))
colors = cycle('byrcmgkbyrcmgkbyrcmgkbyrcmgkbyr')
colormaps = ['summer', 'rainbow','viridis','inferno','magma','jet','plasma']
N = len(cats)
if N==0:
print('No categorical or boolean vars in data set. Hence no pivot plots...')
return None
noplots = copy.deepcopy(N)
#### You can set the number of subplots per row and the number of categories to display here cols = 2
displaylimit = 20
categorylimit = 10
imgdata_list = []
width_size = 15
height_size = 6
if problem_type == 'Regression' or depVar==None or depVar=='' or depVar==[]:
image_count = 0
###### This is meant for Regression problems where the DepVar is of Continuous Type
if noplots%cols == 0:
rows = noplots/cols
else:
rows = (noplots/cols)+1
fig = plt.figure(figsize=(width_size,rows*height_size))
fig.suptitle('Bar Plots of each Continuous Var by %s' %depVar, fontsize=20,y=1.08)
k = 1
for i,color in zip(range(len(cats)), colors) :
if len(dft[cats[i]].unique()) >= categorylimit:
plt.subplot(rows,cols,k)
ax1 = plt.gca()
dft.groupby(cats[i])[depVar].mean().sort_values(ascending=False)[:displaylimit].plot(kind='bar',
title='Average %s by. %s (Descending) ' %(depVar, cats[i]),ax=ax1,
colormap=random.choice(colormaps))
for p in ax1.patches:
ax1.annotate(str(round(p.get_height(),2)),(round(p.get_x()*1.01,2),round(p.get_height()*1.01,2)))
k += 1
else:
plt.subplot(rows,cols,k)
ax1 = plt.gca()
dft.groupby(cats[i])[depVar].mean().sort_values(ascending=False)[:displaylimit].plot(kind='bar',
title='Average %s by %s (Descending) ' % (depVar, cats [i]), ax=ax1,
colormap=random.choice(colormaps))
for p in ax1.patches:
ax1.annotate(str(round(p.get_height(),2)),(round(p.get_x()*1.01,2), round(p.get_height()*1.01,2)))
k += 1
fig.tight_layout();
if verbose == 2:
imgdata_list.append(save_image_data(fig, image_count, chart_format))
image_count += 1
else:
###### This is meant for Classification problems where the DepVar is an Object type
image_count = 0
chunksize = 20
N = len(nums)
lst=[]
noplots=int((N**2-N)/2)
dicti = {}
if len(nums) == 1:
pass
ls_cats = []
for each_cat in cats:
if len(dft[dft[depVar]==classes[0]][each_cat].value_counts()) == 1:
pass
else:
ls_cats.append(each_cat)
if len(ls_cats) <= 2:
cols = 2
noplots = len(nums)
rows = int((noplots/cols)+0.99)
counter = 1
fig = plt.figure(figsize=(width_size,rows*height_size))
fig.suptitle('Plots of each Continuous Var by %s' %depVar, fontsize=20,y=1.08)
plt.subplots_adjust(hspace=0.5)
for eachpred,color3 in zip(nums,colors):
### Be very careful with the next line. It should be singular "subplot" ##
##### Otherwise, if you use the plural version "subplots" it has a different meaning!
plt.subplot(rows,cols,counter)
ax1 = plt.gca()
dft[[eachpred,depVar]].groupby(depVar).mean()[:chunksize].plot(kind='bar', ax=ax1, colors=color3)
for p in ax1.patches:
ax1.annotate(str(round(p.get_height(),2)), (round(p.get_x()*1.01,2), round(p.get_height()*1.01,2)))
ax1.set_title('Average of %s by %s' %(eachpred,depVar))
plt.legend(loc='best')
counter += 1
fig.tight_layout();
if verbose == 2:
imgdata_list.append(save_image_data(fig, image_count, chart_format))
image_count += 1
else:
N = len(ls_cats)
combos = combinations(ls_cats,2)
noplots = int((N**2-N)/2)
rows = int((noplots/cols)+0.99)
num_plots = len(classes)*noplots/2.0
if verbose == 1:
print('No. of Bar Plots = %s' %num_plots)
rows = int((num_plots/cols)+0.99)
fig = plt.figure(figsize=(width_size,rows*height_size))
target_vars = dft[depVar].unique()
if len(classes) == 2:
func = 'np.mean'
func_keyword = 'Average'
else:
func = 'len'
func_keyword = 'Number'
fig.suptitle('Plots of %s of each Continuous Var by %s' %(func_keyword,depVar),fontsize=20,y=1.08)
plotcounter = 1
if dft[depVar].dtype == object:
dft[depVar] = dft[depVar].factorize()[0]
for (var1, var2) in combos:
if len(classes) == 2:
plt.subplot(rows, cols, plotcounter)
ax1 = plt.gca()
try:
#pd.pivot_table(data=dft,columns=var1, index=var2, values=depVar, aggfunc=eval(func))
if func == 'np.mean':
dft[[var1,var2,depVar]].groupby([var1,var2])[depVar].mean().sort_values()[
:chunksize].plot(
kind='bar', colormap=random.choice(colormaps),ax=ax1)
else:
dft[[var1,var2,depVar]].groupby([var1,var2])[depVar].size().sort_values()[
:chunksize].plot(
kind='bar', colormap=random.choice(colormaps),ax=ax1)
ax1.set_title('Percentage of %s grouped by %s and %s' %(depVar,var1,var2))
except:
dft.pivot(columns=var1, values=var2).plot(kind='bar', colormap='plasma',ax=ax1)
plt.xlabel(var2)
plt.ylabel(depVar)
ax1.set_title('Percentage of %s grouped by %s and %s' %(depVar,var1,var2))
plt.legend()
plotcounter += 1
else:
#### Fix color in all Scatter plots using this trick:
colors = cycle('byrcmgkbyrcmgkbyrcmgkbyrcmgkbyr')
for target_var, color_val,class_label in zip(target_vars, colors,classes):
plt.subplot(rows, cols, plotcounter)
ax1 = plt.gca()
dft_target = dft[dft[depVar]==target_var]
try:
pd.pivot_table(data=dft_target,columns=var1, index=var2, values=depVar,
aggfunc=eval(func)).plot(kind='bar', colormap='plasma',ax=ax1)
except:
dft.pivot(columns=var1, values=var2).plot(kind='bar', colormap='plasma',ax=ax1)
plt.xlabel(var2)
plt.ylabel(target_var)
ax1.set_title('%s of %s grouped by %s and %s' %(func_keyword,class_label,var2,var1))
plt.legend()
plotcounter += 1
fig.tight_layout();
if verbose == 2:
imgdata_list.append(save_image_data(fig, image_count, chart_format))
image_count += 1
if verbose == 1:
plt.show();
####### End of Pivot Plotting #############################
return imgdata_list
# In[ ]:
# SCATTER PLOTS ARE USEFUL FOR COMPARING NUMERIC VARIABLES
def draw_scatters(dfin,nums,verbose,chart_format,problem_type,dep=None, classes=None, lowess=False):
dft = dfin[:]
##### we are going to modify dfin and classes, so we are making copies to make changes
classes = copy.deepcopy(classes)
colortext = 'brymcgkbyrcmgkbyrcmgkbyrcmgkbyr'
if len(classes) == 0:
leng = len(nums)
else:
leng = len(classes)
colors = cycle(colortext[:leng])
#imgdata_list = defaultdict(list)
imgdata_list = []
if dfin.shape[0] >= 10000 or lowess == False:
lowess = False
x_est = None
transparent = 0.6
bubble_size = 80
else:
if verbose == 1:
print('Using Lowess Smoothing. This might take a few minutes for large data sets...')
lowess = True
x_est = None
transparent = 0.6
bubble_size = 100
if verbose == 1:
x_est = np.mean
N = len(nums)
cols = 2
width_size = 15
height_size = 4
if dep == None or dep == '':
image_count = 0
##### This is when no Dependent Variable is given ###
### You have to do a Pair-wise Scatter Plot of all Continuous Variables ####
combos = combinations(nums, 2)
noplots = int((N**2-N)/2)
print('Number of Scatter Plots = %d' %(noplots+N))
rows = int((noplots/cols)+0.99)
fig = plt.figure(figsize=(width_size,rows*height_size))
for (var1,var2), plotcounter, color_val in zip(combos, range(1,noplots+1), colors):
### Be very careful with the next line. It should be singular "subplot" ##
##### Otherwise, if you use the plural version "subplots" it has a different meaning!
plt.subplot(rows,cols,plotcounter)
if lowess:
sns.regplot(x=dft[var1], y = dft[var2], lowess=lowess, color=color_val, ax=plt.gca())
else:
sns.scatterplot(x=dft[var1], y=dft[var2], ax=plt.gca(), paletter='dark',color=color_val)
plt.xlabel(var1)
plt.ylabel(var2)
fig.suptitle('Pair-wise Scatter Plot of all Continuous Variables',fontsize=20,y=1.08)
fig.tight_layout();
if verbose == 1:
plt.show();
#### Keep it at the figure level###
if verbose == 2:
imgdata_list.append(save_image_data(fig, image_count, chart_format))
image_count += 1
elif problem_type == 'Regression':
image_count = 0
####### This is a Regression Problem so it requires 2 steps ####
####### First, plot every Independent variable against the Dependent Variable ###
noplots = len(nums)
rows = int((noplots/cols)+0.99)
fig = plt.figure(figsize=(width_size,rows*height_size))
for num, plotcounter, color_val in zip(nums, range(1,noplots+1), colors):
### Be very careful with the next line. It should be singular "subplot" ##
##### Otherwise, if you use the plural version "subplots" it has a different meaning!
plt.subplot(rows,cols,plotcounter)
if lowess:
sns.regplot(x=dft[num], y = dft[dep], lowess=lowess, color=color_val, ax=plt.gca())
else:
sns.scatterplot(x=dft[num], y=dft[dep], ax=plt.gca(), palette='dark',color=color_val)
plt.xlabel(num)
plt.ylabel(dep)
fig.suptitle('Scatter Plot of each Continuous Variable against Target Variable', fontsize=20,y=1.08)
fig.tight_layout();
if verbose == 1:
plt.show();
#### Keep it at the figure level###
if verbose == 2:
imgdata_list.append(save_image_data(fig, image_count, chart_format))
image_count += 1
else:
####### This is a Classification Problem #### You need to plot a strip plot ####
####### First, Plot each Continuous variable against the Target Variable ###
if len(dft) < 1000:
jitter = 0.05
else:
jitter = 0.5
image_count = 0
noplots = len(nums)
rows = int((noplots/cols)+0.99)
### Be very careful with the next line. we have used the singular "subplot" ##
fig = plt.figure(figsize=(width_size,rows*height_size))
for num, plotc, color_val in zip(nums, range(1,noplots+1),colors):
####Strip plots are meant for categorical plots so x axis must always be depVar ##
plt.subplot(rows,cols,plotc)
sns.stripplot(x=dft[dep], y=dft[num], ax=plt.gca(), jitter=jitter)
plt.suptitle('Scatter Plot of Continuous Variable vs Target (jitter=%0.2f)' %jitter, fontsize=20,y=1.08)
fig.tight_layout();
if verbose == 1:
plt.show();
if verbose == 2:
imgdata_list.append(save_image_data(fig, image_count, chart_format))
image_count += 1
####### End of Scatter Plots ######
return imgdata_list
# PAIR SCATTER PLOTS ARE NEEDED ONLY FOR CLASSIFICATION PROBLEMS IN NUMERIC VARIABLES
def draw_pair_scatters(dfin,nums,problem_type, verbose,chart_format, dep=None, classes=None, lowess=False):
"""
### This is where you plot a pair-wise scatter plot of Independent Variables against each other####
"""
dft = dfin[:]
if len(nums) <= 1:
return
classes = copy.deepcopy(classes)
cols = 2
colortext = 'brymcgkbyrcmgkbyrcmgkbyrcmgkbyr'
colors = cycle(colortext)
imgdata_list = list()
width_size = 15
height_size = 4
N = len(nums)
if dfin.shape[0] >= 10000 or lowess == False:
x_est = None
transparent =0.7
bubble_size = 80
elif lowess:
print('Using Lowess Smoothing. This might take a few minutes for large data sets...')
x_est = None
transparent =0.7
bubble_size = 100
else:
x_est = None
transparent =0.7
bubble_size = 100
if verbose == 1:
x_est = np.mean
if problem_type == 'Regression' or problem_type == 'Clustering':
image_count = 0
### Second, plot a pair-wise scatter plot of Independent Variables against each other####
combos = combinations(nums, 2)
noplots = int((N**2-N)/2)
print('Number of All Scatter Plots = %d' %(noplots+N))
rows = int((noplots/cols)+0.99)
fig = plt.figure(figsize=(width_size,rows*height_size))
for (var1,var2), plotcounter,color_val in zip(combos, range(1,noplots+1),colors):
### Be very careful with the next line. It should be singular "subplot" ##
##### Otherwise, if you use the plural version "subplots" it has a different meaning!
plt.subplot(rows,cols,plotcounter)
if lowess:
sns.regplot(x=dft[var1], y=dft[var2], lowess=lowess, color=color_val, ax=plt.gca())
else:
sns.scatterplot(x=dft[var1], y=dft[var2], ax=plt.gca(), palette='dark',color=color_val)
plt.xlabel(var1)
plt.ylabel(var2)
fig.suptitle('Pair-wise Scatter Plot of all Continuous Variables', fontsize=20,y=1.08)
fig.tight_layout();
if verbose == 2:
imgdata_list.append(save_image_data(fig, image_count, chart_format))
image_count += 1
if verbose == 1:
plt.show();
else:
########## This is for Classification problems ##########
if len(classes) <= 1:
leng = 1
else:
leng = len(classes)
colors = cycle(colortext[:leng])
image_count = 0
#cmap = plt.get_cmap('gnuplot')
#cmap = plt.get_cmap('Set1')
cmap = plt.get_cmap('Paired')
combos = combinations(nums, 2)
combos_cycle = cycle(combos)
noplots = int((N**2-N)/2)
print('Total Number of Scatter Plots = %d' %(noplots+N))
rows = int((noplots/cols)+0.99)
fig = plt.figure(figsize=(width_size,rows*height_size))
### Be very careful with the next line. we have used the plural "subplots" ##
## In this case, you have ax as an array and you have to use (row,col) to get each ax!
target_vars = dft[dep].unique()
number = len(target_vars)
#colors = [cmap(i) for i in np.linspace(0, 1, number)]
for (var1,var2), plotc in zip(combos, range(1,noplots+1)):
for target_var, color_val, class_label in zip(target_vars, colors, classes):
#Fix color in all scatter plots for each class the same using this trick
color_array = np.empty(0)
value = dft[dep]==target_var
dft['color'] = np.where(value==True, color_val, 'r')
color_array = np.hstack((color_array, dft[dft['color']==color_val]['color'].values))
plt.subplot(rows, cols, plotc)
plt.scatter(x=dft.loc[dft[dep]==target_var][var1], y=dft.loc[dft[dep]==target_var][var2],
label=class_label, color=color_val, alpha=transparent)
plt.xlabel(var1)
plt.ylabel(var2)
plt.legend()
fig.suptitle('Scatter Plot of each Continuous Variable against Target Variable', fontsize=20,y=1.08)
fig.tight_layout();
if verbose == 2:
imgdata_list.append(save_image_data(fig, image_count, chart_format))
image_count += 1
if verbose == 1:
plt.show();
####### End of Pair Scatter Plots ######
return imgdata_list
#Bar Plots are for 2 Categoricals and One Numeric (usually Dep Var)
def draw_barplots(dft,cats,conti,problem_type,verbose,chart_format,dep='', classes=None):
#### Category limit within a variable ###
cats = cats[:]
cat_limit = 10
width_size = 15
height_size = 4
conti = list_difference(conti,dep)
#### Remove Floating Point Categorical Vars from this list since they Error when Bar Plots are drawn
cats = [x for x in cats if dft[x].dtype != float]
dft = dft[:]
N = len(cats)
if N==0:
print('No categorical or boolean vars in data set. Hence no bar charts.')
return None
cmap = plt.get_cmap('jet')
### Not sure why the cmap doesn't work and gives an error in some cases #################
colors = cmap(np.linspace(0, 1, len(conti)))
colors = cycle('gkbyrcmgkbyrcmgkbyrcmgkbyr')
colormaps = ['plasma','viridis','inferno','magma']
imgdata_list = list()
nums = [x for x in list (dft) if dft[x].dtype=='float64' and x not in [dep]+cats]
for k in range(len(cats)):
image_count = 0
N = len(conti)
order= dft[cats[k]].unique().tolist()
nocats = len(order)
if nocats >= 100:
chunksize = 25
cols = 1
else:
if nocats >= 25:
chunksize = 15
cols = 2
else:
chunksize = cat_limit
cols = 2
if len(cats) == 0:
noplots = len(conti)*cols
else:
noplots=len(conti)*len(cats)*cols
if cols==2:
if noplots%cols == 0:
rows = noplots/cols
else:
rows = (noplots/cols)+1
else:
rows = copy.deepcopy(noplots)
if rows >= 50:
rows = 50
stringlimit = 25
if dep==None or dep == '':
########## This is when no Dependent Variable is Given ######
fig = plt.figure(figsize=(width_size,rows*height_size))
kadd = 1
for each_conti,color in zip(conti,colors):
plt.subplot(rows,cols,kadd)
ax1 = plt.gca()
dft.groupby(cats[k])[each_conti].mean().sort_values(ascending=False)[:chunksize].plot(
kind='bar',ax=ax1, color=color)
ax1.set_title('Average %s by %s (Descending)' %(each_conti, cats[k]))
if dft[cats[k]].dtype == object:
labels = dft.groupby(cats[k])[each_conti].mean().sort_values(
ascending=False)[:chunksize].index.str[:stringlimit].tolist()
ax1.set_xticklabels(labels, rotation = 45, ha="right")
kadd += 1
if verbose == 2:
imgdata_list.append(save_image_data(fig, image_count, chart_format))
image_count += 1
### This code is redundant unless number of levels in category are >20
if dft[cats[k]].nunique() > chunksize:
plt.subplot(rows,cols,kadd)
ax1 = plt.gca()
dft.groupby(cats[k])[each_conti].mean().sort_values(
ascending=True)[:chunksize].plot(kind='bar',ax=ax1, color=color)
if dft[cats[k]].dtype == object:
labels = dft.groupby(cats [k])[each_conti].mean().sort_values(
ascending=True)[:chunksize].index.str[:stringlimit].tolist()
ax1.set_xticklabels(labels, rotation = 45, ha="right")
ax1.set_title('Average %s by %s (Ascending)' %(each_conti,cats[k]))
kadd += 1
fig.tight_layout();
if verbose == 1:
plt.show();
###########
if verbose == 2:
imgdata_list.append(save_image_data(fig, image_count, chart_format))
image_count += 1
elif problem_type == 'Regression':
########## This is for Regression Problems only ######
fig = plt.figure(figsize=(width_size,rows*height_size))
N = len(conti)
noplots=int((N**2-N)/4)
kadd = 1
for each_conti,color in zip(conti,colors):
if len(dft[cats[k]].value_counts()) < 20:
plt.subplot(rows,cols,kadd)
ax1 = plt.gca()
dft.groupby(cats[k])[each_conti].mean().sort_values(
ascending=False)[:chunksize].plot(kind='bar',ax=ax1,
colormap=random.choice(colormaps))
for p in ax1.patches:
ax1.annotate(str(round(p.get_height(),2)),(round(p.get_x()*1.01,2), round(p.get_height()*1.01,2)))
if dft[cats[k]].dtype == object:
labels = dft.groupby(cats[k])[each_conti].mean().sort_values(
ascending= True)[:chunksize].index.str[:stringlimit].tolist()
ax1.set_xticklabels(labels, rotation = 45, ha="right")
ax1.set_title('Average %s by %s (Descending)' %(each_conti,cats[k]))
kadd += 1
else:
### This code is redundant unless number of levels in category are >20
plt.subplot(rows,cols,kadd)
ax1 = plt.gca()
dft.groupby(cats[k])[each_conti].mean().sort_values(
ascending=True)[:chunksize].plot(kind='bar',ax=ax1,
colormap=random.choice(colormaps))
if dft[cats[k]].dtype == object:
labels = dft.groupby(cats[k])[each_conti].mean().sort_values(
ascending= True)[:chunksize].index.str[:stringlimit].tolist()
ax1.set_xticklabels(labels, rotation = 45, ha="right")
ax1.set_title('Mean %s by %s (Ascending)' %(each_conti,cats[k]))
kadd += 1
fig.tight_layout();
if verbose == 2:
imgdata_list.append(save_image_data(fig, image_count, chart_format))
image_count += 1
elif verbose == 1:
plt.show();
else:
########## This is for Classification Problems ######
image_count = 0
target_vars = dft[dep].unique()
noplots = len(conti)*cols
kadd = 1
fig = plt.figure(figsize=(width_size,rows*height_size))
if len(nums) == 0:
for each_conti,color3 in zip(conti,colors):
plt.subplot(rows,cols,kadd)
ax1 = plt.gca()
dft.groupby(cats[k])[each_conti].mean().sort_values(
ascending=False)[:chunksize].plot(kind='bar',ax=ax1,
color=color3)
ax1.set_title('Average %s by %s (Descending)' %(each_conti,cats[k]))
kadd += 1
else:
conti = copy.deepcopy(nums)
for each_conti in conti:
plt.subplot(rows,cols,kadd)
ax1 = plt.gca()
dft.groupby([dep, cats[k]])[each_conti].mean().sort_values(
ascending=False)[:chunksize].unstack().plot(kind='bar',ax=ax1,
colormap=random.choice(colormaps))
ax1.set_title('Average %s by %s (Descending)' %(each_conti,cats[k]))
kadd += 1
fig.tight_layout();
fig.suptitle('Bar Plots of Continuous Variables by %s' %cats[k], fontsize=20, y=1.08)
if verbose == 2:
imgdata_list.append(save_image_data(fig, image_count, chart_format))
image_count += 1
elif verbose == 1:
plt.show();
return imgdata_list
############## End of Bar Plotting ##########################################
##### Draw a Heatmap using Pearson Correlation #########################################
def draw_heatmap(dft, conti, verbose,chart_format,datevars=[], dep=None,
modeltype='Regression',classes=None):
### Test if this is a time series data set, then differene the continuous vars to find
### if they have true correlation to Dependent Var. Otherwise, leave them as is
width_size = 3
height_size = 2
if len(conti) <= 1:
return
if isinstance(dft.index, pd.DatetimeIndex) :
dft = dft[:]
timeseries_flag = True
pass
else:
dft = dft[:]
try:
dft.index = pd.to_datetime(dft.pop(datevars[0]),infer_datetime_format=True)
timeseries_flag = True
except:
if verbose == 1 and len(datevars) > 0:
print('No date vars could be found or %s could not be indexed.' %datevars)
elif verbose == 1 and len(datevars) == 0:
print('No date vars could be found in data set')
timeseries_flag = False
# Add a column: the color depends on target variable but you can use whatever function
imgdata_list = list()
if modeltype != 'Regression':
########## This is for Classification problems only ###########
if dft[dep].dtype == object or dft[dep].dtype == np.int64:
dft[dep] = dft[dep].factorize()[0]
image_count = 0
N = len(conti)
target_vars = dft[dep].unique()
fig = plt.figure(figsize=(min(N*width_size,20),min(N*height_size,20)))
plotc = 1
#rows = len(target_vars)
rows = 1
cols = 1
if timeseries_flag:
dft_target = dft[[dep]+conti].diff()
else:
dft_target = dft[:]
dft_target[dep] = dft[dep]
corr = dft_target.corr()
plt.subplot(rows, cols, plotc)
ax1 = plt.gca()
sns.heatmap(corr, annot=True,ax=ax1)
plotc += 1
if timeseries_flag:
plt.title('Time Series: Heatmap of all Differenced Continuous vars for target = %s' %dep)
else:
plt.title('Heatmap of all Continuous Variables for target = %s' %dep)
fig.tight_layout();
if verbose == 1:
plt.show();
if verbose == 2:
imgdata_list.append(save_image_data(fig, image_count, chart_format))
image_count += 1
else:
### This is for Regression and None Dep variable problems only ##
image_count = 0
if dep == None or dep == '':
pass
else:
conti += [dep]
dft_target = dft[conti]
if timeseries_flag:
dft_target = dft_target.diff().dropna()
else:
dft_target = dft_target[:]
N = len(conti)
fig = plt.figure(figsize=(min(20,N*width_size),min(20,N*height_size)))
corr = dft_target.corr()
sns.heatmap(corr, annot=True)
if timeseries_flag:
plt.title('Time Series Data: Heatmap of Differenced Continuous vars including target = %s' %dep)
else:
plt.title('Heatmap of all Continuous Variables including target = %s' %dep)
fig.tight_layout();
if verbose == 1:
plt.show();
if verbose == 2:
imgdata_list.append(save_image_data(fig, image_count, chart_format))
image_count += 1
return imgdata_list
############# End of Heat Maps ##############
##### Draw the Distribution of each variable using Distplot
##### Must do this only for Continuous Variables
def draw_distplot(dft, conti,verbose,chart_format,problem_type,dep=None, classes=None):
#### Since we are making changes to dft and classes, we will be making copies of it here
conti = list(set(conti))
dft = dft[:]
classes = copy.deepcopy(classes)
colors = cycle('brycgkbyrcmgkbyrcmgkbyrcmgkbyr')
cols = 2
imgdata_list = list()
width_size = 15 #### this is to control the width of chart as well as number of categories to display
height_size = 5
gap = 0.4 #### This controls the space between rows ######
if dep==None or dep=='' or problem_type == 'Regression':
image_count = 0
transparent = 0.7
######### This is for cases where there is No Target or Dependent Variable ########
if problem_type == 'Regression':
if isinstance(dep,list):
conti += dep
else:
conti += [dep]
noplots = len(conti)
rows = int((noplots/cols)+0.99 )
### Be very careful with the next line. we have used the plural "subplots" ##
## In this case, you have ax as an array and you have to use (row,col) to get each ax!
fig = plt.figure(figsize=(width_size,rows*height_size))
fig.subplots_adjust(hspace=gap) ### This controls the space betwen rows
for k, color2 in zip(range(noplots),colors):
#print('Iteration %s' %k)
conti_iter = conti[k]
if dft[conti_iter].dtype == float or dft[conti_iter].dtype==np.int32 or dft[conti_iter].dtype==np.int64:
if dft[conti_iter].nunique() <= 25:
chart_type = 'bar'
else:
chart_type = 'float'
elif dft[conti_iter].dtype == object and dft[conti_iter].nunique() <= 25:
chart_type = 'bar'
else:
chart_type = 'bar'
if chart_type == 'float':
if dft[conti_iter].min() == 0.0:
hist_bins = 25
elif dft[conti_iter].max()/dft[conti_iter].min() > 50 or dft[conti_iter].max()-dft[conti_iter].min() > 50:
hist_bins = 50
else:
hist_bins = 30
plt.subplot(rows, cols, k+1)
ax1 = plt.gca()
#ax2 = ax1.twiny()
if len(dft[dft[conti_iter]<0]) > 0:
### If there are simply neg numbers in the column, better to skip the Log...
#dft[conti_iter].hist(bins=hist_bins, ax=ax1, color=color2,label='%s' %conti_iter,
# )
sns.distplot(dft[conti_iter],
hist=False, kde=True,label='%s' %conti_iter,
bins=hist_bins, ax= ax1,hist_kws={'alpha':transparent},
color=color2)
ax1.legend(loc='upper right')
ax1.set_xscale('linear')
ax1.set_xlabel('Linear Scale')
#ax1.set_title('%s Distribution (No Log transform since negative numbers)' %conti_iter,
# loc='center',y=1.18)
elif len(dft[dft[conti_iter]==0]) > 0:
### If there are only zeros numbers in the column, you can do log transform by adding 1...
#dft[conti_iter].hist(bins=hist_bins, ax=ax1, color=color2,label='before log transform'
# )
sns.distplot(dft[conti_iter],
hist=False, kde=True,label='%s' %conti_iter,hist_kws={'alpha':transparent},
bins=hist_bins, ax= ax1,
color=color2)
#np.log(dft[conti_iter]+1).hist(bins=hist_bins, ax=ax2, color=next(colors),
# alpha=transparent, label='after log transform',bw_method=3)
#sns.distplot(np.log10(dft[conti_iter]+1),
# hist=False, kde=True,hist_kws={'alpha':transparent},
# bins=hist_bins, ax= ax2,label='after potential log transform',
# color=next(colors))
ax1.legend(loc='upper right')
#ax2.legend(loc='upper left')
ax1.set_xscale('linear')
#ax2.set_xscale('log')
ax1.set_xlabel('Linear Scale')
#ax2.set_xlabel('Log Scale')
#ax1.set_title('%s Distribution and potential Log Transform' %conti_iter, loc='center',y=1.18)
else:
### if there are no zeros and no negative numbers then it is a clean data ########
#dft[conti_iter].hist(bins=hist_bins, ax=ax1, color=color2,label='before log transform',
# bw_method=3)
sns.distplot(dft[conti_iter],
hist=False, kde=True,label='%s' %conti_iter,
bins=hist_bins, ax= ax1,hist_kws={'alpha':transparent},
color=color2)
#np.log(dft[conti_iter]).fillna(0).hist(bins=hist_bins, ax=ax2, color=next(colors),
# alpha=transparent, label='after log transform',bw_method=3)
#sns.distplot(np.log10(dft[conti_iter]),
# hist=False, kde=True,label='after potential log transform',
# bins=hist_bins, ax= ax2,hist_kws={'alpha':transparent},
# color=next(colors))
ax1.legend(loc='upper right')
#ax2.legend(loc='upper left')
ax1.set_xscale('linear')
#ax2.set_xscale('log')
ax1.set_xlabel('Linear Scale')
#ax2.set_xlabel('Log Scale')
#ax1.set_title('%s Distribution and potential Log Transform' %conti_iter, loc='center',y=1.18)
else:
plt.subplot(rows, cols, k+1)
ax1 = plt.gca()
kwds = {"rotation": 45, "ha":"right"}
labels = dft[conti_iter].value_counts()[:width_size].index.tolist()
dft[conti_iter].value_counts()[:width_size].plot(kind='bar',ax=ax1,label='%s' %conti_iter)
ax1.set_xticklabels(labels,**kwds);
ax1.set_title('Distribution of %s (top %d categories only)' %(conti_iter,width_size))
#fig.tight_layout();
if verbose == 2:
imgdata_list.append(save_image_data(fig, image_count, chart_format))
image_count += 1
fig.suptitle('Histograms (KDE plots) of all Continuous Variables', fontsize=20, y=1.08)
if verbose == 1:
plt.show();
else:
######### This is for Classification problems only ########
image_count = 0
transparent = 0.7
noplots = len(conti)
binsize = 30
k = 0
rows = int((noplots/cols)+0.99 )
### Be very careful with the next line. we have used the plural "subplots" ##
## In this case, you have ax as an array and you have to use (row,col) to get each ax!
fig = plt.figure(figsize=(width_size,rows*height_size))
target_vars = dft[dep].unique()
if type(classes[0])==int:
classes = [str(x) for x in classes]
label_limit = len(target_vars)
legend_flag = 1
for each_conti,k in zip(conti,range(len(conti))):
if dft[each_conti].isnull().sum() > 0:
dft[each_conti].fillna(0, inplace=True)
plt.subplot(rows, cols, k+1)
ax1 = plt.gca()
if dft[each_conti].dtype==object:
kwds = {"rotation": 45, "ha":"right"}
labels = dft[each_conti].value_counts()[:width_size].index.tolist()
conti_df = dft[[dep,each_conti]].groupby([dep,each_conti]).size().nlargest(width_size).reset_index(name='Values')
pivot_df = conti_df.pivot(index=each_conti, columns=dep, values='Values')
row_ticks = dft[dep].unique().tolist()
color_list = []
for i in range(len(row_ticks)):
color_list.append(next(colors))
#print('color list = %s' %color_list)
pivot_df.loc[:,row_ticks].plot.bar(stacked=True, color=color_list, ax=ax1)
#dft[each_conti].value_counts()[:width_size].plot(kind='bar',ax=ax1,
# label=class_label)
#ax1.set_xticklabels(labels,**kwds);
ax1.set_title('Distribution of %s (top %d categories only)' %(each_conti,width_size))
else:
for target_var, color2, class_label in zip(target_vars,colors,classes):
try:
if legend_flag <= label_limit:
sns.distplot(dft.loc[dft[dep]==target_var][each_conti],
hist=False, kde=True,
#dft.ix[dft[dep]==target_var][each_conti].hist(
bins=binsize, ax= ax1,
label=target_var, color=color2)
ax1.set_title('Distribution of %s' %each_conti)
legend_flag += 1
else:
sns.distplot(dft.loc[dft[dep]==target_var][each_conti],bins=binsize, ax= ax1,
label=target_var, hist=False, kde=True,
color=color2)
legend_flag += 1
ax1.set_title('Normed Histogram of %s' %each_conti)
except:
pass
ax1.legend(loc='best')
fig.tight_layout();
if verbose == 1:
plt.show();
if verbose == 2:
imgdata_list.append(save_image_data(fig, image_count, chart_format))
image_count += 1
fig.suptitle('Histograms (KDE plots) of all Continuous Variables', fontsize=20, y=1.08)
###### Now draw the distribution of the target variable in Classification only ####
if problem_type.endswith('Classification'):
col = 2
row = 1
fig, (ax1,ax2) = plt.subplots(row, col)
fig.set_figheight(5)
fig.set_figwidth(15)
fig.suptitle('%s : Distribution of Target Variable' %dep, fontsize=20,y=1.08)
#fig.subplots_adjust(hspace=0.3) ### This controls the space betwen rows
#fig.subplots_adjust(wspace=0.3) ### This controls the space between columns
###### Precentage Distribution is first #################
dft[dep].value_counts(1).plot(ax=ax1,kind='bar')
if dft[dep].dtype == object:
dft[dep] = dft[dep].factorize()[0]
for p in ax1.patches:
ax1.annotate(str(round(p.get_height(),2)), (round(p.get_x()*1.01,2), round(p.get_height()*1.01,2)))
ax1.set_title('Percentage Distribution of Target = %s' %dep, fontsize=10, y=1.05)
#### Freq Distribution is next ###########################
dft[dep].value_counts().plot(ax=ax2,kind='bar')
for p in ax2.patches:
ax2.annotate(str(round(p.get_height(),2)), (round(p.get_x()*1.01,2), round(p.get_height()*1.01,2)))
ax2.set_xticks(dft[dep].unique().tolist())
ax2.set_xticklabels(classes, rotation = 45, ha="right")
ax2.set_title('Freq Distribution of Target Variable = %s' %dep, fontsize=10,y=1.05)
else:
############################################################################
width_size = 5
height_size = 5
fig = plt.figure(figsize=(width_size,height_size))
dft[dep].plot(kind='hist')
fig.suptitle('%s : Distribution of Target Variable' %dep, fontsize=20,y=1.05)
fig.tight_layout();
if verbose == 1:
plt.show();
if verbose == 2:
imgdata_list.append(save_image_data(fig, image_count, chart_format))
image_count += 1
####### End of Distplots ###########
return imgdata_list
##### Standardize all the variables in One step. But be careful !
#### All the variables must be numeric for this to work !!
def draw_violinplot(df, dep, nums,verbose,chart_format, modeltype='Regression'):
df = df[:]
number_in_each_row = 8
imgdata_list = list()
width_size = 15
height_size = 4
if type(dep) == str:
othernums = [x for x in nums if x not in [dep]]
else:
othernums = [x for x in nums if x not in dep]
if modeltype == 'Regression' or dep == None or dep == '':
image_count = 0
if modeltype == 'Regression':
nums = nums + [dep]
numb = len(nums)
if numb > number_in_each_row:
rows = int(numb/number_in_each_row)+1
else:
rows = 1
for row in range(rows):
first_10 = number_in_each_row*row
next_10 = first_10 + number_in_each_row
num_10 = nums[first_10:next_10]
df10 = df[num_10]
df_norm = (df10 - df10.mean())/df10.std()
if numb <= 5:
fig = plt.figure(figsize=(min(width_size*len(num_10),width_size),min(height_size,height_size*len(num_10))))
else:
fig = plt.figure(figsize=(min(width_size*len(num_10),width_size),min(height_size,height_size*len(num_10))))
ax = fig.gca()
#ax.set_xticklabels (df.columns, tolist(), size=10)
sns.violinplot(data=df_norm, orient='v', fliersize=5, scale='width',
linewidth=3, notch=False, saturations=0.5, ax=ax, inner='box')
fig.suptitle('Violin Plot of all Continuous Variables', fontsize=20,y=1.08)
if verbose == 1:
plt.show();
if verbose == 2:
imgdata_list.append(save_image_data(fig, image_count, chart_format))
image_count += 1
else :
###### This is for Classification problems only ##########################
image_count = 0
classes = df[dep].factorize()[1].tolist()
######################### Add Box plots here ##################################
numb = len(nums)
target_vars = df[dep].unique()
if len(othernums) >= 1:
width_size = 15
height_size = 7
count = 0
data = pd.DataFrame(index=df.index)
cols = 2
noplots = len(nums)
rows = int((noplots/cols)+0.99 )
fig = plt.figure(figsize=(width_size,rows*height_size))
for col in nums:
ax = plt.subplot(rows,cols,count+1)
for targetvar in target_vars:
data[targetvar] = np.nan
mask = df[dep]==targetvar
data.loc[mask,targetvar] = df.loc[mask,col]
ax = sns.boxplot(data=data, orient='v', fliersize=5, ax=ax,
linewidth=3, notch=False, saturation=0.5, showfliers=False)
ax.set_title('%s for each %s' %(col,dep))
count += 1
fig.suptitle('Box Plots without Outliers shown', fontsize=20,y=1.08)
if verbose == 1:
plt.show();
if verbose == 2:
imgdata_list.append(save_image_data(fig, image_count, chart_format))
image_count += 1
#########################################
return imgdata_list
########## End of Violin Plots #########
#### Drawing Date Variables is very important in Time Series data
def draw_date_vars(df,dep,datevars, num_vars,verbose, chart_format, modeltype='Regression'):
#### Now you want to display 2 variables at a time to see how they change over time
### Don't change the number of cols since you will have to change rows formula as well
imgdata_list = list()
image_count = 0
N = len(num_vars)
df = df.set_index(pd.to_datetime(df.pop(datevars[0])))
if N < 2:
var1 = num_vars[0]
width_size = 5
height_size = 5
fig = plt.figure(figsize=(width_size,height_size))
df[var1].plot(title=var1, label=var1)
fig.suptitle('Time Series Plot of %s' %var1, fontsize=20,y=1.08)
if verbose == 2:
imgdata_list.append(save_image_data(fig, image_count, chart_format))
image_count += 1
return imgdata_list
if isinstance(df.index, pd.DatetimeIndex) :
df = df[:]
pass
else:
df = df[:]
try:
col = datevars[0]
if df[col].map(lambda x: 0 if len(str(x)) == 4 else 1).sum() == 0:
if df[col].min() > 1900 or df[col].max() < 2100:
df[col] = df[col].map(lambda x: '01-01-'+str(x) if len(str(x)) == 4 else x)
df.index = pd.to_datetime(df.pop(col), infer_datetime_format=True)
else:
print('%s could not be indexed. Could not draw date_vars.' %col)
return imgdata_list
else:
df.index = pd.to_datetime(df.pop(col), infer_datetime_format=True)
except:
print('%s could not be indexed. Could not draw date_vars.' %col)
return imgdata_list
####### Draw the time series for Regression and DepVar
if modeltype == 'Regression' or dep == None or dep == '':
width_size = 15
height_size = 4
image_count = 0
cols = 2
combos = combinations(num_vars, 2)
combs = copy.deepcopy(combos)
noplots = int((N**2-N)/2)
rows = int((noplots/cols)+0.99)
counter = 1
fig = plt.figure(figsize=(width_size,rows*height_size))
for (var1,var2) in combos:
plt.subplot(rows,cols,counter)
ax1 = plt.gca()
df[var1].plot(secondary_y=True, label=var1, ax=ax1)
df[var2].plot(title=var2 +' (left_axis) vs. ' + var1+' (right_axis)', ax=ax1)
plt.legend(loc='best')
counter += 1
fig.suptitle('Time Series Plot by %s: Pairwise Continuous Variables' %col, fontsize=20,y=1.08)
#fig.tight_layout();
if verbose == 1:
plt.show();
if verbose == 2:
imgdata_list.append(save_image_data(fig, image_count, chart_format))
image_count += 1
else:
######## This is for Classification problems only
image_count = 0
classes = df[dep].factorize()[1].tolist()
classes = copy.deepcopy(classes)
##### Now separate out the drawing of time series data by the number of classes ###
colors = cycle('gkbyrcmgkbyrcmgkbyrcmgkbyr')
target_vars = df[dep].unique()
if type(classes[0])==int or type(classes[0])==float:
classes = [str(x) for x in classes]
cols = 2
noplots = int((N**2-N)/2)
rows = int((noplots/cols)+0.99)
fig = plt.figure(figsize=(width_size,rows*height_size))
for target_var, class_label, color2 in zip(target_vars, classes, colors):
## Once the date var has been set as the index, you can draw num variables against it
df_target = df[df[dep]==target_var]
combos = combinations(num_vars, 2)
combs = copy.deepcopy(combos)
counter = 1
for (var1,var2) in combos :
plt.subplot(rows,cols,counter)
ax1 = plt.gca()
df_target[var1].plot(secondary_y=True, label=var1,ax=ax1)
df_target[var2].plot(title='Target = '+class_label+': '+var2 +' (left_axis) vs. '+var1,ax=ax1)
plt.legend(loc='best')
counter += 1
fig.suptitle('Time Series Plot by %s: Continuous Variables Pair' %col, fontsize=20,y=1.08)
if verbose == 1:
plt.show();
if verbose == 2:
imgdata_list.append(save_image_data(fig, image_count, chart_format))
image_count += 1
return imgdata_list
############# End of Date vars plotting #########################
# This little function classifies columns into 4 types: categorical, continuous, boolean and
# certain columns that have only one value repeated that they are useless and must be removed from dataset
#Subtract RIGHT_LIST from LEFT_LIST to produce a new list
### This program is USED VERY HEAVILY so be careful about changing it
def list_difference(l1,l2):
lst = []
for i in l1:
if i not in l2:
lst.append(i)
return lst
######## Find ANY word in columns to identify ANY TYPE OF columns
####### search_for_list = ["Date","DATE", "date"], any words you want to search for it
####### columns__list and word refer to columns in the dataset that is the target dataset
####### Both columns_list and search_for_list must be lists - otherwise it won't work
def search_for_word_in_list(columns_list, search_for_list):
columns_list = columns_list[:]
search_for_list = search_for_list[:]
lst=[]
for src in search_for_list:
for word in columns_list:
result = re.findall (src, word)
if len(result)>0:
if word.endswith(src) and not word in lst:
lst.append(word)
elif (word == 'id' or word == 'ID') and not word in lst:
lst.append(word)
else:
continue
return lst
### This is a small program to look for keywords such as "id" in a dataset to see if they are ID variables
### If that doesn't work, it then compares the len of the dataframe to the variable's unique values. If
### they match, it means that the variable could be an ID variable. If not, it goes with the name of
### of the ID variable through a keyword match with "id" or some such keyword in dataset's columns.
### This is a small program to look for keywords such as "id" in a dataset to see if they are ID variables
### If that doesn't work, it then compares the len of the dataframe to the variable's unique values. If
### they match, it means that the variable could be an ID variable. If not, it goes with the name of
### of the ID variable through a keyword match with "id" or some such keyword in dataset's columns.
def analyze_ID_columns(dfin,columns_list):
columns_list = columns_list[:]
dfin = dfin[:]
IDcols_final = []
IDcols = search_for_word_in_list(columns_list,
['ID','Identifier','NUMBER','No','Id','Num','num','_no','.no','Number','number','_id','.id'])
if IDcols == []:
for eachcol in columns_list:
if len(dfin) == len(dfin[eachcol].unique()) and dfin[eachcol].dtype != float:
IDcols_final.append(eachcol)
else:
for each_col in IDcols:
if len(dfin) == len(dfin[each_col].unique()) and dfin[each_col].dtype != float:
IDcols_final.append(each_col)
if IDcols_final == [] and IDcols != []:
IDcols_final = IDcols
return IDcols_final
# THESE FUNCTIONS ASSUME A DIRTY DATASET" IN A PANDAS DATAFRAME AS Inum_j}lotsUT
# AND CONVERT THEM INTO A DATASET FIT FOR ANALYSIS IN THE END
# In [ ]:
# this function starts with dividing columns into 4 types: categorical, continuous, boolean and to_delete
# The To_Delete columns have only one unique value and can be removed from the dataset
def start_classifying_vars(dfin, verbose):
dfin = dfin[:]
cols_to_delete = []
boolean_vars = []
categorical_vars = []
continuous_vars = []
discrete_vars = []
totrows = dfin.shape[0]
if totrows == 0:
print('Error: No rows in dataset. Check your input again...')
return cols_to_delete, boolean_vars, categorical_vars, continuous_vars, discrete_vars, dfin
for col in dfin.columns:
if col == 'source':
continue
elif len(dfin[col].value_counts()) <= 1:
cols_to_delete.append(dfin[col].name)
print(' Column %s has only one value hence it will be dropped' %dfin[col].name)
elif dfin[col].dtype==object:
if (dfin[col].str.len()).any()>50:
cols_to_delete.append(dfin[col].name)
continue
elif search_for_word_in_list([col],['DESCRIPTION','DESC','desc','Text','text']):
cols_to_delete.append(dfin[col].name)
continue
elif len(dfin.groupby(col)) == 1:
cols_to_delete.append(dfin[col].name)
continue
elif dfin[col].isnull().sum() > 0:
missing_rows=dfin[col].isnull().sum()
pct_missing = float(missing_rows)/float(totrows)
if pct_missing > 0.90:
if verbose == 1:
print('Pct of Missing Values in %s exceed 90 pct, hence will be dropped...' %col)
cols_to_delete.append(dfin[col].name)
continue
elif len(dfin.groupby(col)) == 2:
boolean_vars.append(dfin[col].name)
py_version = sys.version_info[0]
if py_version < 3:
# This is the Python 2 Version
try:
item_mode = dfin[col].mode().mode[0]
except:
print('''Scipy.stats package not installed in your Python2. Get it installed''')
else:
# This is the Python 3 Version
try:
item_mode = dfin[col].mode()[0]
except:
print('''Statistics package not installed in your Python3. Get it installed''')
dfin[col].fillna(item_mode,inplace=True)
continue
elif len(dfin.groupby(col)) < 20 and len(dfin.groupby(col)) > 1:
categorical_vars.append(dfin[col].name)
continue
else:
discrete_vars.append(dfin[col].name)
continue
elif len(dfin.groupby(col)) == 2:
boolean_vars.append(dfin[col].name)
continue
elif len(dfin.groupby(col)) < 20 and len(dfin.groupby(col)) > 1:
categorical_vars.append(dfin[col].name)
continue
else:
discrete_vars.append(dfin[col].name)
elif dfin[col].dtype=='int64' or dfin[col].dtype=='int32':
if len(dfin[col].value_counts()) <= 15:
categorical_vars.append(dfin[col].name)
else:
if dfin[col].isnull().sum() > 0:
missing_rows=dfin[col].isnull().sum()
pct_missing = float(missing_rows)/float(totrows)
if pct_missing > 0.90:
if verbose == 1:
print('Pct of Missing Values in %s exceed 90 pct, hence will be dropped...' %col)
cols_to_delete.append(dfin[col].name)
continue
elif len(dfin.groupby(col)) == 2:
boolean_vars.append(dfin[col].name)
py_version = sys.version_info[0]
if py_version < 3:
# This is the Python 2 Version
try:
item_mode = dfin[col].mode().mode[0]
except:
print('''Scipy.stats package not installed in your Python2. Get it installed''')
else:
# This is the Python 3 Version
try:
item_mode = dfin[col].mode()[0]
except:
print('''Statistics package not installed in your Python3. Get it installed''')
dfin[col].fillna(item_mode,inplace=True)
continue
else:
if len(dfin[col].value_counts()) <= 25 and len(dfin) >= 250:
categorical_vars.append(dfin[col].name)
else:
continuous_vars.append(dfin[col].name)
elif len(dfin.groupby(col)) == 2:
boolean_vars.append(dfin[col].name)
continue
else:
if len(dfin[col].value_counts()) <= 25 and len(dfin) >= 250:
categorical_vars.append(dfin[col].name)
else:
continuous_vars.append(dfin[col].name)
return cols_to_delete, boolean_vars, categorical_vars, continuous_vars, discrete_vars, dfin
#### this is the MAIN ANALYSIS function that calls the start_classifying_vars and then
#### takes that result and divides categorical vars into 2 additional types: discrete vars and bool vars
def analyze_columns_in_dataset(dfx,IDcolse,verbose):
dfx = dfx[:]
IDcolse = IDcolse[:]
cols_delete, bool_vars, cats, nums, discrete_string_vars, dft = start_classifying_vars(dfx,verbose)
continuous_vars = nums
if nums != []:
for k in nums:
if len(dft[k].unique())==2:
bool_vars.append(k)
elif len(dft[k].unique())<=20:
cats.append(k)
elif (np.array(dft[k]).dtype=='float64' or np.array(dft[k]).dtype=='int64') and (k not in continuous_vars):
if len(dft[k].value_counts()) <= 25:
cats.append(k)
else:
continuous_vars.append(k)
elif dft[k].dtype==object:
discrete_string_vars.append(k)
elif k in continuous_vars:
continue
else:
print('The %s variable could not be classified into any known type' % k)
#print(cols_delete, bool_vars, cats, continuous_vars, discrete_string_vars)
date_vars = search_for_word_in_list(dfx.columns.tolist(),['Date','DATE','date','TIME','time',
'Time','Year','Yr','year','yr','timestamp',
'TimeStamp','TIMESTAMP','Timestamp','Time Stamp'])
date_vars = [x for x in date_vars if x not in cats+bool_vars ]
if date_vars == []:
for col in continuous_vars:
if dfx[col].dtype==int:
if dfx[col].min() > 1900 or dfx[col].max() < 2100:
date_vars.append(col)
for col in discrete_string_vars:
try:
dfx.index = pd.to_datetime(dfx.pop(col), infer_datetime_format=True)
except:
continue
if isinstance(dfx.index, pd.DatetimeIndex):
date_vars = [dfx.index.name]
continuous_vars=list_difference(list_difference(continuous_vars,date_vars),IDcolse)
#cats = list_difference(continuous_vars, cats)
cats=list_difference(cats,date_vars)
discrete_string_vars=list_difference(list_difference(discrete_string_vars,date_vars),IDcolse)
return cols_delete, bool_vars, cats, continuous_vars, discrete_string_vars,date_vars, dft
# Removes duplicates from a list to return unique values - USED ONLYONCE
def find_remove_duplicates(values):
output = []
seen = set()
for value in values:
if value not in seen:
output.append(value)
seen.add(value)
return output
##################
def classify_print_vars(filename,sep, max_rows_analyzed,max_cols_analyzed,
depVar='',dfte=None, header=0,verbose=0):
start_time=time.time()
if filename == '':
dft = dfte[:]
pass
elif filename != '' and not filename.endswith(('.xls', '.xlsx')):
codex = ['utf-8', 'iso-8859-11', 'cpl252', 'latin1']
for code in codex:
try:
dfte = pd.read_csv(filename,sep=sep,index_col=None,encoding=code)
break
except:
print('File encoding decoder %s does not work for this file' %code)
continue
elif filename != '' and filename.endswith(('xlsx','xls')):
try:
dfte = pd.read_excel(filename, header=header)
except:
print('Could not load your Excel file')
return
else:
print('Could not read your data file')
return
try:
print('Shape of your Data Set: %s' %(dfte.shape,))
except:
print('None of the decoders work...')
return
orig_preds = [x for x in list(dfte) if x not in [depVar]]
################# CLASSIFY COLUMNS HERE ######################
var_df = classify_columns(dfte[orig_preds], verbose)
##### Classify Columns ################
IDcols = var_df['id_vars']
discrete_string_vars = var_df['nlp_vars']+var_df['discrete_string_vars']
cols_delete = var_df['cols_delete']
bool_vars = var_df['string_bool_vars'] + var_df['num_bool_vars']
categorical_vars = var_df['cat_vars'] + var_df['factor_vars'] + var_df['int_vars'] + bool_vars
continuous_vars = var_df['continuous_vars']
date_vars = var_df['date_vars']
if len(var_df['continuous_vars'])==0 and len(var_df['int_vars'])>0:
continuous_vars = var_df['int_vars']
int_vars = []
else:
int_vars = var_df['int_vars']
preds = [x for x in orig_preds if x not in IDcols+cols_delete+discrete_string_vars]
if len(IDcols+cols_delete+discrete_string_vars) == 0:
print(' No variables removed since no ID or low-information variables found in data set')
else:
print(' %d variables removed since they were ID or low-information variables'
%len(IDcols+cols_delete+discrete_string_vars))
if verbose >= 1:
print(' List of variables removed: %s' %(IDcols+cols_delete+discrete_string_vars))
############# Sample data if too big and find problem type #############################
if dfte.shape[0]>= max_rows_analyzed:
print('Since Number of Rows in data %d exceeds maximum, randomly sampling %d rows for EDA...' %(len(dfte),max_rows_analyzed))
dft = dfte.sample(max_rows_analyzed, random_state=0)
else:
dft = copy.deepcopy(dfte)
if type(depVar) == str:
if depVar == '':
cols_list = list(dft)
problem_type = 'Clustering'
classes = []
else:
try:
problem_type = analyze_problem_type(dft, depVar,verbose)
except:
print('Could not find given target var in data set. Please check input')
### return the data frame as is ############
return dfte
cols_list = list_difference(list(dft),depVar)
if dft[depVar].dtype == object:
classes = dft[depVar].factorize()[1].tolist()
#### You dont have to convert it since most charts can take string vars as target ####
#dft[depVar] = dft[depVar].factorize()[0]
elif dft[depVar].dtype == np.int64:
classes = dft[depVar].factorize()[1].tolist()
elif dft[depVar].dtype == bool:
classes = dft[depVar].unique().astype(int).tolist()
elif dft[depVar].dtype == float and problem_type.endswith('Classification'):
classes = dft[depVar].factorize()[1].tolist()
else:
classes = []
elif depVar == None:
cols_list = list(dft)
problem_type = 'Clustering'
classes = []
else:
depVar1 = depVar[0]
problem_type = analyze_problem_type(dft, depVar1)
cols_list = list_difference(list(dft), depVar1)
if dft[depVar1].dtype == object:
classes = dft[depVar1].factorize()[1].tolist()
#### You dont have to convert it since most charts can take string vars as target ####
#dft[depVar] = dft[depVar].factorize()[0]
elif dft[depVar1].dtype == np.int64:
classes = dft[depVar1].factorize()[1].tolist()
elif dft[depVar].dtype == bool:
classes = dft[depVar].unique().astype(int).tolist()
elif dft[depVar1].dtype == float and problem_type.endswith('Classification'):
classes = dft[depVar1].factorize()[1].tolist()
else:
classes = []
############# Check if there are too many columns to visualize ################
if len(continuous_vars) >= max_cols_analyzed:
######### In that case, SELECT IMPORTANT FEATURES HERE ######################
if problem_type.endswith('Classification') or problem_type == 'Regression':
print('%d numeric variables in data exceeds limit, taking top %d variables' %(len(
continuous_vars), max_cols_analyzed))
important_features,num_vars = find_top_features_xgb(dft,preds,continuous_vars,
depVar,problem_type,verbose)
if len(important_features) >= max_cols_analyzed:
### Limit the number of features to max columns analyzed ########
important_features = important_features[:max_cols_analyzed]
dft = dft[important_features+[depVar]]
#### Time to classify the important columns again ###
var_df = classify_columns(dft[important_features], verbose)
IDcols = var_df['id_vars']
discrete_string_vars = var_df['nlp_vars']+var_df['discrete_string_vars']
cols_delete = var_df['cols_delete']
bool_vars = var_df['string_bool_vars'] + var_df['num_bool_vars']
categorical_vars = var_df['cat_vars'] + var_df['factor_vars'] + var_df['int_vars'] + bool_vars
continuous_vars = var_df['continuous_vars']
date_vars = var_df['date_vars']
int_vars = var_df['int_vars']
preds = [x for x in important_features if x not in IDcols+cols_delete+discrete_string_vars]
if len(IDcols+cols_delete+discrete_string_vars) == 0:
print(' No variables removed since no ID or low-information variables found in data')
else:
print(' %d variables removed since they were ID or low-information variables'
%len(IDcols+cols_delete+discrete_string_vars))
if verbose >= 1:
print(' List of variables removed: %s' %(IDcols+cols_delete+discrete_string_vars))
dft = dft[preds+[depVar]]
else:
continuous_vars = continuous_vars[:max_cols_analyzed]
print('%d numeric variables in data exceeds limit, taking top %d variables' %(len(
continuous_vars, max_cols_analyzed)))
if verbose >= 1:
print(' List of variables selected: %s' %(continuous_vars[:max_cols_analyzed]))
elif len(continuous_vars) < 1:
print('No continuous variables in this data set. No visualization can be performed')
### Return data frame as is #####
return dfte
else:
######### If above 1 but below limit, leave features as it is ######################
if depVar != '':
dft = dft[preds+[depVar]]
else:
dft = dft[preds]
################### Time to reduce cat vars which have more than 30 categories #############
#discrete_string_vars += np.array(categorical_vars)[dft[categorical_vars].nunique()>30].tolist()
#categorical_vars = left_subtract(categorical_vars,np.array(
# categorical_vars)[dft[categorical_vars].nunique()>30].tolist())
############# Next you can print them if verbose is set to print #########
ppt = pprint.PrettyPrinter(indent=4)
if verbose==1 and len(cols_list) <= max_cols_analyzed:
marthas_columns(dft,verbose)
print(" Columns to delete:")
ppt.pprint(' %s' % cols_delete)
print(" Boolean variables %s ")
ppt.pprint(' %s' % bool_vars)
print(" Categorical variables %s ")
ppt.pprint(' %s' % categorical_vars)
print(" Continuous variables %s " )
ppt.pprint(' %s' % continuous_vars)
print(" Discrete string variables %s " )
ppt.pprint(' %s' % discrete_string_vars)
print(" Date and time variables %s " )
ppt.pprint(' %s' % date_vars)
print(" ID variables %s ")
ppt.pprint(' %s' % IDcols)
print(" Target variable %s ")
ppt.pprint(' %s' % depVar)
elif verbose==1 and len(cols_list) > max_cols_analyzed:
print(' Total columns > %d, too numerous to list.' %max_cols_analyzed)
return dft,depVar,IDcols,bool_vars,categorical_vars,continuous_vars,discrete_string_vars,date_vars,classes,problem_type
####################################################################
def marthas_columns(data,verbose=0):
"""
This program is named in honor of my one of students who came up with the idea for it.
It's a neat way of looking at data compared to the boring describe() function in Pandas.
"""
data = data[:]
print('Data Set Shape: %d rows, %d cols\n' % data.shape)
if data.shape[1] > 25:
print('Too many columns to print')
else:
if verbose==1:
print('Data Set columns info:')
for col in data.columns:
print('* %s: %d nulls, %d unique vals, most common: %s' % (
col,
data[col].isnull().sum(),
data[col].nunique(),
data[col].value_counts().head(2).to_dict()
))
print('\n------\n')
################################################
######### NEW And FAST WAY to CLASSIFY COLUMNS IN A DATA SET #######
def classify_columns(df_preds, verbose=0):
"""
Takes a dataframe containing only predictors to be classified into various types.
DO NOT SEND IN A TARGET COLUMN since it will try to include that into various columns.
Returns a data frame containing columns and the class it belongs to such as numeric,
categorical, date or id column, boolean, nlp, discrete_string and cols to delete...
####### Returns a dictionary with 10 kinds of vars like the following: # continuous_vars,int_vars
# cat_vars,factor_vars, bool_vars,discrete_string_vars,nlp_vars,date_vars,id_vars,cols_delete
"""
print('Classifying variables in data set...')
#### Cat_Limit defines the max number of categories a column can have to be called a categorical colum
cat_limit = 50
def add(a,b):
return a+b
train = df_preds[:]
sum_all_cols = dict()
orig_cols_total = train.shape[1]
#Types of columns
cols_delete = [col for col in list(train) if (len(train[col].value_counts()) == 1
) | (train[col].isnull().sum()/len(train) >= 0.90)]
train = train[left_subtract(list(train),cols_delete)]
var_df = pd.Series(dict(train.dtypes)).reset_index(drop=False).rename(
columns={0:'type_of_column'})
sum_all_cols['cols_delete'] = cols_delete
var_df['bool'] = var_df.apply(lambda x: 1 if x['type_of_column'] in ['bool','object']
and len(train[x['index']].value_counts()) == 2 else 0, axis=1)
string_bool_vars = list(var_df[(var_df['bool'] ==1)]['index'])
sum_all_cols['string_bool_vars'] = string_bool_vars
var_df['num_bool'] = var_df.apply(lambda x: 1 if x['type_of_column'] in [
'int8','int16','int32','int64',
'float16','float32','float64'] and len(
train[x['index']].value_counts()) == 2 else 0, axis=1)
num_bool_vars = list(var_df[(var_df['num_bool'] ==1)]['index'])
sum_all_cols['num_bool_vars'] = num_bool_vars
###### This is where we take all Object vars and split them into diff kinds ###
discrete_or_nlp = var_df.apply(lambda x: 1 if x['type_of_column'] in ['object'] and x[
'index'] not in string_bool_vars+cols_delete else 0,axis=1)
######### This is where we figure out whether a string var is nlp or discrete_string var ###
var_df['nlp_strings'] = 0
var_df['discrete_strings'] = 0
var_df['cat'] = 0
var_df['id_col'] = 0
discrete_or_nlp_vars = var_df.loc[discrete_or_nlp==1]['index'].values.tolist()
if len(var_df.loc[discrete_or_nlp==1]) != 0:
for col in discrete_or_nlp_vars:
#### first fill empty or missing vals since it will blowup ###
train[col] = train[col].fillna(' ')
if train[col].map(lambda x: len(x) if type(x)==str else 0).mean(
) >= 50 and len(train[col].value_counts()
) < len(train) and col not in string_bool_vars:
var_df.loc[var_df['index']==col,'nlp_strings'] = 1
elif len(train[col].value_counts()) > cat_limit and len(train[col].value_counts()
) < len(train) and col not in string_bool_vars:
var_df.loc[var_df['index']==col,'discrete_strings'] = 1
elif len(train[col].value_counts()) > cat_limit and len(train[col].value_counts()
) == len(train) and col not in string_bool_vars:
var_df.loc[var_df['index']==col,'id_col'] = 1
else:
var_df.loc[var_df['index']==col,'cat'] = 1
nlp_vars = list(var_df[(var_df['nlp_strings'] ==1)]['index'])
sum_all_cols['nlp_vars'] = nlp_vars
discrete_string_vars = list(var_df[(var_df['discrete_strings'] ==1) ]['index'])
sum_all_cols['discrete_string_vars'] = discrete_string_vars
###### This happens only if a string column happens to be an ID column #######
#### DO NOT Add this to ID_VARS yet. It will be done later.. Dont change it easily...
#### Category DTYPE vars are very special = they can be left as is and not disturbed in Python. ###
var_df['dcat'] = var_df.apply(lambda x: 1 if str(x['type_of_column'])=='category' else 0,
axis=1)
factor_vars = list(var_df[(var_df['dcat'] ==1)]['index'])
sum_all_cols['factor_vars'] = factor_vars
########################################################################
date_or_id = var_df.apply(lambda x: 1 if x['type_of_column'] in ['int8','int16',
'int32','int64'] and x[
'index'] not in string_bool_vars+num_bool_vars+discrete_string_vars+nlp_vars else 0,
axis=1)
######### This is where we figure out whether a numeric col is date or id variable ###
var_df['int'] = 0
var_df['date_time'] = 0
### if a particular column is date-time type, now set it as a date time variable ##
var_df['date_time'] = var_df.apply(lambda x: 1 if x['type_of_column'] in ['<M8[ns]','datetime64[ns]'] and x[
'index'] not in string_bool_vars+num_bool_vars+discrete_string_vars+nlp_vars else 0,
axis=1)
### this is where we save them as date time variables ###
if len(var_df.loc[date_or_id==1]) != 0:
for col in var_df.loc[date_or_id==1]['index'].values.tolist():
if len(train[col].value_counts()) == len(train):
if train[col].min() < 1900 or train[col].max() > 2050:
var_df.loc[var_df['index']==col,'id_col'] = 1
else:
try:
pd.to_datetime(train[col],infer_datetime_format=True)
var_df.loc[var_df['index']==col,'date_time'] = 1
except:
var_df.loc[var_df['index']==col,'id_col'] = 1
else:
if train[col].min() < 1900 or train[col].max() > 2050:
if col not in num_bool_vars:
var_df.loc[var_df['index']==col,'int'] = 1
else:
try:
| pd.to_datetime(train[col],infer_datetime_format=True) | pandas.to_datetime |
import random
from concurrent.futures import ThreadPoolExecutor
import concurrent.futures
from io import StringIO
from urllib.parse import urljoin
import numpy as np
import pandas as pd
import requests
from urllib.error import HTTPError
X_SOURCE = 'API de Series de Tiempo: Test de Integración'
def read_source_csv(serie_id: str, metadata: pd.DataFrame):
serie_metadata = metadata.loc[serie_id, :]
if metadata is None:
return None
download_url = serie_metadata.distribucion_url_descarga
title = serie_metadata.serie_titulo
try:
csv = pd.read_csv(download_url, parse_dates=['indice_tiempo'], index_col='indice_tiempo')
return csv[[title]]
except (HTTPError, KeyError):
return None
def get_equality_array(api_df: pd.DataFrame, original_df: pd.DataFrame):
df = | pd.merge(api_df, original_df, left_index=True, right_index=True) | pandas.merge |
import torch
import numpy as np
import pandas as pd
from tqdm import tqdm
import os
import time
import pickle
import jieba
from collections import Counter
from gensim.models import KeyedVectors
from torch.utils.data import Dataset, DataLoader
from sklearn.model_selection import StratifiedShuffleSplit, train_test_split
from matplotlib import pyplot as plt
class WVEmbedding():
def __init__(self, wv_path, data_path, vocab_size=29000,
emb_path=None):
self.wv_path =wv_path
self.data_path = data_path
self.vocab_size = vocab_size
self.word_list = self.get_word_list()
self.word_to_id, self.id_to_word = self.get_vocab()
# load data from saved data, save lots of time
if emb_path:
self.embedding = np.load(emb_path)
else:
self.embedding = self.get_embedding()
def get_embedding(self):
self.wv = KeyedVectors.load_word2vec_format(self.wv_path)
# get embedding dim
embedding_dim = self.wv.vector_size
emb = np.zeros((self.vocab_size, embedding_dim))
wv_dict = self.wv.vocab.keys()
num_found = 0
for idx in tqdm(range(self.vocab_size)):
word = self.id_to_word[idx]
if word == '<pad>' or word == '<unk>':
emb[idx] = np.zeros([embedding_dim])
elif word in wv_dict:
emb[idx] = self.wv.get_vector(word)
num_found += 1
print("{} of {} found, rate:{:.2f}".format(num_found, self.vocab_size, num_found/self.vocab_size))
return emb
# get all words from train data, dev data, test data
def get_word_list(self):
data = pd.read_csv(self.data_path, sep=',')
word_list = []
for i, line in enumerate(data['review'].values):
word_list += jieba.lcut(line)
return word_list
def get_vocab(self):
counts = Counter(self.word_list)
vocab = sorted(counts, key=counts.get, reverse=True)
# add <pad>
vocab = ['<pad>', '<unk>'] + vocab
print('total word size:{}'.format(len(vocab)))
# trunk vocabulary
if len(vocab) < self.vocab_size:
raise Exception('Vocab less than requested!!!')
else:
vocab = vocab[:self.vocab_size]
word_to_id = {word: i for i, word in enumerate(vocab)}
id_to_word = {i: word for i, word in enumerate(vocab)}
return word_to_id, id_to_word
class WaiMaiDataSet(Dataset):
def __init__(self, data_path, word_to_id, max_len=40, use_unk=False):
self.datas, self.labels = self.load_data(data_path)
self.max_len = max_len
self.word_to_id = word_to_id
self.pad_int = word_to_id['<pad>']
self.use_unk = use_unk
# internal data
self.conversation_list, self.total_len = self.process_data(self.datas)
def load_data(self, data_path):
data = pd.read_csv(data_path)
return data['review'].tolist(), data['label'].tolist()
# turn sentence to id
def sent_to_ids(self, text):
tokens = jieba.lcut(text)
# if use_unk is True, it will use <unk> vectors
# else just remove this word
if self.use_unk:
token_ids = [self.word_to_id[x] if x in self.word_to_id else self.word_to_id['<unk>'] for x in tokens]
else:
token_ids = [self.word_to_id[x] for x in tokens if x in self.word_to_id]
# Trunking or PADDING
if len(token_ids) > self.max_len:
token_ids = token_ids[: self.max_len]
text_len = self.max_len
else:
text_len = len(token_ids)
token_ids = token_ids + [self.pad_int] * (self.max_len - len(token_ids))
return token_ids, text_len
def process_data(self, data_list):
conversation_list= []
total_len = []
for line in data_list:
conversation, conver_len = self.sent_to_ids(line)
conversation_list.append(conversation)
total_len.append(conver_len)
return conversation_list, total_len
def __len__(self):
return len(self.conversation_list)
def __getitem__(self, idx):
return torch.LongTensor(self.conversation_list[idx]),\
self.total_len[idx], \
self.labels[idx]
# turn sentence to vector represent,
# average all the word vector as the sentence vector
#
def to_avg_sv(path, save_path, wv_embedding):
data = | pd.read_csv(path) | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 10 21:26:32 2019
@author: alexandradarmon
"""
import numpy as np
import pandas as pd
import gutenberg.acquire
import logging
from logs.logger import logging_function
from punctuation.utils.utils import splitter_function
logger = logging.getLogger(__name__)
from gutenberg.query import list_supported_metadatas
print(list_supported_metadatas())
@logging_function(logger)
def get_max_book_id():
max_book_id = 1000
return max_book_id
@logging_function(logger)
def get_min_book_id():
min_book_id = 0
return min_book_id
@logging_function(logger)
def get_list_book_id():
list_book_id = range(0,1000)
return list_book_id
@logging_function(logger)
def random_book_ids(n , list_n=None):
if list_n is None:
list_n = get_list_book_id()
return np.random.choice(list_n, n).tolist()
@logging_function(logger)
def random_book_id(list_n=None):
if list_n is None:
list_n = get_list_book_id()
return np.random.choice(list_n,1).tolist()[0]
def get_cache_info(list_epubs,
verbose=True,
cache_data_directory='data/cache/epub'):
titles = []
authors = []
author_birthdates = []
author_deathdates = []
languages = []
genres = []
subjects = []
book_ids = []
count = 0
for directory_nb in list_epubs:
if count%100==0 and verbose : print(count)
count+=1
book_ids.append(directory_nb)
file_name = cache_data_directory+'/'+str(directory_nb)+'/pg'+str(directory_nb)+'.rdf'
try:
data = open(file_name, 'r').read()
title = splitter_function(data, '<dcterms:title>','</dcterms:title>')
titles.append(title)
book_shelf = splitter_function(data, '<pgterms:bookshelf>', '</pgterms:bookshelf>')
genre = splitter_function(book_shelf, '<rdf:value>', '</rdf:value>')
genres.append(genre)
res_subjects = []
if '<dcterms:subject>' in data:
subject_sections = data.split('<dcterms:subject>')
for subject_section in subject_sections[1:]:
subject = splitter_function(subject_section, '<rdf:value>', '</rdf:value>')
res_subjects.append(subject)
subjects.append(res_subjects)
author_section = splitter_function(data, '<dcterms:creator>', '</dcterms:creator>')
author = splitter_function(author_section, '<pgterms:name>','</pgterms:name>')
authors.append(author)
bithdate_section = splitter_function(author_section,
'<pgterms:birthdate',
'</pgterms:birthdate>')
if bithdate_section is not None:
bithdate = bithdate_section.split('>')[-1]
else:
bithdate = None
author_birthdates.append(bithdate)
deathdate_section = splitter_function(author_section,
'<pgterms:deathdate',
'</pgterms:deathdate>')
if deathdate_section is not None:
deathdate = deathdate_section.split('>')[-1]
else:
deathdate = None
author_deathdates.append(deathdate)
language_section = splitter_function(data, '<dcterms:language>', '</dcterms:language>')
language = splitter_function(language_section,
'<rdf:value rdf:datatype="http://purl.org/dc/terms/RFC4646">',
'</rdf:value>')
languages.append(language)
except:
titles.append(None)
authors.append(None)
author_birthdates.append(None)
author_deathdates.append(None)
genres.append(None)
subjects.append(None)
languages.append(None)
df_res = | pd.DataFrame() | pandas.DataFrame |
from featureEngineering.feature_engineering import DataCleaning,VariableReduction
from modelBuilding.segmentation_algo import DistBasedAlgo
from evaluationMetrices.evaluation_metrices import EMSegmentation
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
dc = DataCleaning()
vr = VariableReduction()
dba = DistBasedAlgo()
ems = EMSegmentation()
df = dc.create_dataframe('cc_seg_data.csv')
df_num = dc.get_num_vars(df)
df_cat = dc.get_cat_vars(df)
num_summary = df_num.apply(lambda x: dc.dataSummary_num(x)).T
#num_summary.to_csv('num_summary.csv')
df_new = df_num.apply(lambda x: dc.fillna_median(x))
df_new = df_new.apply(lambda x: dc.outlier_capping(x))
num_summary2 = df_new.apply(lambda x: dc.dataSummary_num(x)).T
#num_summary2.to_csv('num_summary2.csv')
df_scaled = vr.data_standardization(df_new)
scaled = pd.DataFrame(df_scaled).describe()
pca = vr.get_PCA(df_scaled, 17)
cumsum_var = vr.get_cumsum_exp_var_ratio(pca.explained_variance_ratio_)
#plt.plot(cumsum_var)
pc_final = vr.get_PCA(df_scaled, 8)
'''
calculate loadings
'''
loadings = vr.get_PCA_loadings(df_num, pc_final.components_, pc_final.explained_variance_)
loadings.to_csv('loadings.csv')
selected_columns = ['PURCHASES','PURCHASES_TRX',
'PURCHASES_FREQUENCY','INSTALLMENTS_PURCHASES','ONEOFF_PURCHASES',
'CASH_ADVANCE','BALANCE','CASH_ADVANCE_TRX','CASH_ADVANCE_FREQUENCY','TENURE']
select_columns2 = ['PURCHASES','PURCHASES_TRX','PURCHASES_FREQUENCY','CASH_ADVANCE','BALANCE',
'PURCHASES_INSTALLMENTS_FREQUENCY','TENURE','CREDIT_LIMIT','PRC_FULL_PAYMENT']
df_scaled_1 = pd.DataFrame(df_scaled, columns = df_num.columns)
df_scaled_final_1 = df_scaled_1[selected_columns]
df_scaled_final_2 = df_scaled_1[select_columns2]
km_3_1 = dba.k_means(df_scaled_final_1, 3)
km_4_1 = dba.k_means(df_scaled_final_1, 4)
km_5_1 = dba.k_means(df_scaled_final_1, 5)
km_6_1 = dba.k_means(df_scaled_final_1, 6)
km_7_1 = dba.k_means(df_scaled_final_1, 7)
km_8_1 = dba.k_means(df_scaled_final_1, 8)
km_9_1 = dba.k_means(df_scaled_final_1, 9)
km_10_1 = dba.k_means(df_scaled_final_1, 10)
df_num['cluster_3'] = km_3_1.labels_
df_num['cluster_4'] = km_4_1.labels_
df_num['cluster_5'] = km_5_1.labels_
df_num['cluster_6'] = km_6_1.labels_
df_num['cluster_7'] = km_7_1.labels_
df_num['cluster_8'] = km_8_1.labels_
df_num['cluster_9'] = km_9_1.labels_
df_num['cluster_10'] = km_10_1.labels_
#
#k_range = range(2, 11)
#score = []
#
#for k in k_range:
# km = dba.k_means(df_scaled_final_2, k)
# score.append(ems.check_silhouette_score(df_scaled_final_2, km.labels_))
#
#plt.plot(k_range, score)
#plt.xlabel('no of clusters')
#plt.ylabel('silhouette co-eff')
#plt.grid(True)
'''
get total size using any cluster
get size for each cluster and each segment
'''
size = pd.concat([pd.Series(df_num.cluster_3.size), pd.Series.sort_index(df_num.cluster_3.value_counts()),
pd.Series.sort_index(df_num.cluster_4.value_counts()),pd.Series.sort_index(df_num.cluster_5.value_counts()),
pd.Series.sort_index(df_num.cluster_6.value_counts()),pd.Series.sort_index(df_num.cluster_7.value_counts()),
pd.Series.sort_index(df_num.cluster_8.value_counts()), pd.Series.sort_index(df_num.cluster_9.value_counts()),
pd.Series.sort_index(df_num.cluster_10.value_counts())])
Seg_size = pd.DataFrame(size, columns=['Seg_size'])
Seg_pct = pd.DataFrame(size/df_num.cluster_3.size, columns=['Seg_pct'])
'''
get total mean for each column
get mean for each cluster and each segment
'''
Profiling_output = pd.concat([df_num.apply(lambda x: x.mean()).T, df_num.groupby('cluster_3').apply(lambda x: x.mean()).T,
df_num.groupby('cluster_4').apply(lambda x: x.mean()).T,df_num.groupby('cluster_5').apply(lambda x: x.mean()).T,
df_num.groupby('cluster_6').apply(lambda x: x.mean()).T,df_num.groupby('cluster_7').apply(lambda x: x.mean()).T,
df_num.groupby('cluster_8').apply(lambda x: x.mean()).T, df_num.groupby('cluster_9').apply(lambda x: x.mean()).T,
df_num.groupby('cluster_10').apply(lambda x: x.mean()).T], axis=1)
Profiling_output_final = | pd.concat([Seg_size.T, Seg_pct.T, Profiling_output], axis=0) | pandas.concat |
import pandas as pd
from sktime.transformers.series_as_features.base import \
BaseSeriesAsFeaturesTransformer
from sktime.utils.data_container import tabularize
from sktime.utils.validation.series_as_features import check_X
__author__ = "<NAME>"
class PAA(BaseSeriesAsFeaturesTransformer):
""" (PAA) Piecewise Aggregate Approximation Transformer, as described in
<NAME>, <NAME>, <NAME>, and <NAME>.
Dimensionality reduction for fast similarity search in large time series
databases.
Knowledge and information Systems, 3(3), 263-286, 2001.
For each series reduce the dimensionality to num_intervals, where each
value is the mean of values in
the interval.
TO DO: pythonise it to make it more efficient. Maybe check vs this version
http://vigne.sh/posts/piecewise-aggregate-approx/
Could have: Tune the interval size in fit somehow?
Parameters
----------
num_intervals : int, dimension of the transformed data (default 8)
"""
def __init__(self,
num_intervals=8
):
self.num_intervals = num_intervals
super(PAA, self).__init__()
def set_num_intervals(self, n):
self.num_intervals = n
def transform(self, X, y=None):
"""
Parameters
----------
X : nested pandas DataFrame of shape [n_instances, 1]
Nested dataframe with univariate time-series in cells.
Returns
-------
dims: Pandas data frame with first dimension in column zero
"""
self.check_is_fitted()
X = check_X(X, enforce_univariate=True)
X = tabularize(X, return_array=True)
num_atts = X.shape[1]
num_insts = X.shape[0]
dims = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
import pickle
import csv
import glob
import errno
import re
from sklearn.preprocessing import Imputer, StandardScaler
from sklearn.cluster import KMeans
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
from keras.layers import Dense, Embedding, Dropout, Reshape, Merge, Input, LSTM, concatenate
from keras.layers import TimeDistributed
from keras.models import Sequential, Model
from keras.optimizers import Adam, Adamax
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.preprocessing import sequence
from keras.models import load_model
from keras import utils
class ModelDataSettings:
def __init__(self):
self.label_column = ''
self.label_type = ''
self.key_column = ''
self.category_columns = []
self.numeric_columns = []
self.sequence_columns = []
self.sequence_length = 10
self.sequence_pad = 'post'
self.value_index = {}
self.index_value = {}
self.imputers = {}
self.scalers = {}
class ModelData:
def __init__(self, input_data, settings_filename=''):
self.input_data = input_data
self.prep_data = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
from scipy import interpolate
import pandas as pd
from .checkarrays import checkarrays, checkarrays_tvd, checkarrays_monotonic_tvd
def interpolate_deviation(md, inc, azi, md_step=1):
"""
Interpolate a well deviation to a given step.
Parameters
----------
md: float, measured depth (units not defined)
inc: float, well inclination in degrees from vertical
azi: float, well azimuth in degrees from North
md_step: int or float, md increment to interpolate to
Returns
-------
Deviation intepolated to new md_step:
md, inc, azi
Notes
-----
This function should not be used before md->tvd conversion.
Note that the input arrays must not contain NaN values.
"""
md, inc, azi = checkarrays(md, inc, azi)
for input_array in [md, inc, azi]:
if np.isnan(input_array).any():
raise ValueError('md, inc and azi cannot contain NaN values.')
try:
new_md = np.arange(md.min(), md.max() + md_step, md_step)
new_md[-1] = md.max()
except TypeError:
raise TypeError('md_step must be int or float')
f_inc = interpolate.interp1d(md, inc)
new_inc = f_inc(new_md)
f_azi = interpolate.interp1d(md, azi)
new_azi = f_azi(new_md)
new_deviation = pd.DataFrame({'new_md':new_md,'new_inc':new_inc,'new_azi':new_azi})
return new_deviation
def interpolate_position(tvd, easting, northing, tvd_step=1):
"""
Interpolate a well positional log to a given step.
Parameters
----------
tvd: float, true verical depth (units not defined)
northing: float, north-offset from zero reference point
the units should be the same as the input deviation
or the results will be wrong
easting: float, east-offset from zero reference point
the units should be the same as the input deviation
or the results will be wrong
tvd_step: int or float, tvd increment to interpolate to
Returns
-------
Deviation intepolated to new step:
tvd, easting, northing
Notes
-----
This function should not be used before tvd->md conversion.
Note that the input arrays must not contain NaN values.
The tvd values must be strictly increasing, i.e. this
method will not work on horizontal wells, use
`interpolate_deviation` for those wells.
"""
tvd, easting, northing = checkarrays_monotonic_tvd(tvd, easting, northing)
for input_array in [tvd, northing, easting]:
if np.isnan(input_array).any():
raise ValueError('tvd, northing and easting cannot contain NaN values.')
try:
new_tvd = np.arange(tvd[0], tvd[-1] + tvd_step, tvd_step)
new_tvd[-1] = tvd[-1]
except TypeError:
raise TypeError('tvd_step must be int or float')
f_easting = interpolate.interp1d(tvd, easting)
new_easting = f_easting(new_tvd)
f_northing = interpolate.interp1d(tvd, northing)
new_northing = f_northing(new_tvd)
new_position = | pd.DataFrame({'new_tvd':new_tvd,'new_easting':new_easting,'new_northing':new_northing}) | pandas.DataFrame |
import pandas as pd
import numpy as np
import nltk
import os
import cv2
import imutils
import matplotlib.pyplot as plt
import re
from nltk.corpus import stopwords
from IPython.display import clear_output, display
import time
# Montamos el Drive al Notebook
from google.colab import drive
drive.mount('/content/drive', force_remount=True)
nltk.download("punkt")
nltk.download('cess_esp')
nltk.download('stopwords')
os.chdir("/content/drive/My Drive/Hackaton2021/codigo/Entregables/Reto2/")
import spaghetti as sgt
#@title Funciones
def split_text(BigString):
"""Split texto completo por retornos de carro o signos de puntuacion."""
pruebat = BigString
splited = re.split("[\,\.]\n", pruebat)
return splited
def etiqueta_RIIA(word):
"""Etiquetar palabras completas con cadenas posibles"""
try:
expr = re.compile(".*{0}.*".format(word))
busca_coincidencia = lambda lista, expr: list(filter(lambda x: expr.match(x), lista))
newtag = []
for optiontag, lista in zip(["per", "per", "pla", "org"] , [listProsecuted, listcivilservs, listplaces, listorgs]):
if any(busca_coincidencia(lista, expr)) and optiontag not in newtag:
newtag.append(optiontag)
if len(newtag) == 0:
newtag = ["dato"]
except Exception as error:
print(error)
print("Causada por:", word)
newtag = ["Err"]
finally:
return "".join(newtag)
def etiqueta_simbolo(word):
"""Etiquetar palabras que no hayan sido etiquetadas pos corpus."""
numeric_expr = re.compile("\d+$")
alphanum_expr = re.compile("[\w\d]+")
char_expr = re.compile("\w+$")
symbol_expr = re.compile("\W*.*")
if numeric_expr.match(word) is not None:
newtag = "numero"
elif char_expr.match(word) is not None:
newtag = "plbr"
elif alphanum_expr.match(word) is not None:
newtag = "datoN"
elif symbol_expr.match(word) is not None:
newtag = "unknown"
else:
newtag = None
return newtag
def etiqueta_entidades_RIIA(word, currtag):
"""Seleccion de etiqueta de simbolo o palabra en RIIA."""
if (currtag is None) and (len(word) >= 4):
newtag = etiqueta_RIIA(word)
else:
newtag = etiqueta_simbolo(word)
return newtag
def tagging(phrase):
"""Generar tags para palabras de una frase."""
limpiar = lambda x: re.sub("[*+/\-_\\\?\'\\\n\|]", "", x)
phrase = limpiar(phrase)
tokens = nltk.word_tokenize(phrase)
# limpiar palabras raras
norare = lambda x: re.search(r"[^a-zA-ZÀ-ÿ\d]", x) is None or len(x) > 3
# quitar stopwords
noincluir = stopwords.words("spanish")
seincluye = lambda x: ((x not in noincluir) or (x.isupper() or x.istitle())) and (norare(x))
tokens = list(filter(lambda x: seincluye(x), tokens))
tokens_low = list(map(lambda x: x.lower(), tokens))
tagged = sgt.pos_tag(tokens_low)
# filtrar los que resulten None
result = []
for (word, tag), word_unch in zip(tagged, tokens):
if (tag is None) or (tag == ""):
# compararlos con las entidades que se tienen de propuesta
newtag = etiqueta_entidades_RIIA(word, tag)
result.append((word_unch, word, newtag))
else:
result.append((word_unch, word, tag))
return result
def get_chunks(grammar, tagged0):
"""Buscar expresion en frase mediante formulas gramaticales."""
cp = nltk.RegexpParser(grammar)
#print(tagged0)
tagged = list(map(lambda x: (x[1], x[2]), tagged0))
chunked = cp.parse(tagged)
entities = []
get_position = lambda x: np.where(list(map(lambda y: x==y[0], tagged)))[0][0]
entitycase = lambda ind: not(tagged0[ind][0].islower())
entitytagRIIA = lambda x: re.match(r"(per|pla|org)\w+", x) is not None
entitycode = lambda x: x in ["Z", "numero", "Fz", "datoN"]
entityplbr = lambda x: x in ["plbr"]
for i, subtree in enumerate(chunked):
if isinstance(subtree, nltk.Tree) and subtree.label() == "NP":
inds = list(map(lambda x: get_position(x[0]), subtree.leaves()))
withUppercase = list(map(lambda ind: entitycase(ind), inds))
withNumbers = list(map(lambda x: entitycode(x[1]), subtree.leaves()))
withtagRIIA = list(map(lambda x: entitytagRIIA(x[1]), subtree.leaves()))
withplbr = list(map(lambda x: entityplbr(x[1]), subtree.leaves()))
tokens = list(map(lambda ind: tagged0[ind][0], inds))
tags = list(map(lambda ind: tagged0[ind][2], inds))
percnum = float(np.sum(withNumbers)) / len(tokens)
percplbr = float(np.sum(withplbr)) / len(tokens)
if (percnum > 0.3) or (percplbr >= 0.5):
entities.append(("numb", {"value":" ".join(tokens), "tags": " ".join(tags)}))
elif any(withUppercase) or np.sum(withtagRIIA) >= 2:
entities.append(("1st", {"value":" ".join(tokens), "tags": " ".join(tags)}))
else:
entities.append(("2nd", {"value":" ".join(tokens), "tags": " ".join(tags)}))
return entities
if __name__ == "__main__":
#@title String fields
filename = "./output/Evaluacion_Reto2A" #@param {type:"string"}
fileoutput = "./output/Entities_Reto2A" #@param {type:"string"}
tabla = pd.read_csv(f"{filename}.csv", header=None)
strip = False #@param {type:"boolean"}
tabla
grammar = r"""Q: {<per(\w*)|(np\w+)|nc(\w+)|pla(\w*)|org(\w*)|datoN|Z|numero|Fz|plbr>}
NP: {<Q> <(sp\w+)|cc>* <Q>+}
NP: {<Q>+}
"""
# posibles entidades
prosecuted = pd.read_csv("./insumos/prosecuted.csv", sep="\t")
listProsecuted = prosecuted[prosecuted.columns[0]].tolist()
civilservs = pd.read_csv("./insumos/civilservants.csv", sep="\t")
listcivilservs = civilservs[civilservs.columns[0]].tolist()
places = | pd.read_csv("./insumos/places.csv", sep="\t") | pandas.read_csv |
def performance_visualizer(trials_obj,n_models,choice=False,**choice_var):
import pandas as pd
performance = [1-t['result']['loss'] for t in trials_obj.trials]
hyperparam= list(trials_obj.trials[0]['misc']['vals'].keys())
values_dict ={}
for i in hyperparam:
values_dict[i]=[]
for j in trials_obj.trials:
if(len(j['misc']['vals'][i])==0):
values_dict[i].append(np.NaN)
else:
values_dict[i].append(j['misc']['vals'][i][0])
out = | pd.DataFrame.from_dict(values_dict) | pandas.DataFrame.from_dict |
import pandas as pd
import numpy as np
"""
1. Subset the Census_Crime_All_Right_Sorted_Missing_Census_Filled df to get the fixed and YEAR columns
"""
nat_cen_all_sorted = | pd.read_csv('/Users/salma/Studies/Research/Criminal_Justice/research_projects/US_Crime_Analytics/data/merge_files/census_crime/Census_90-15_Final_Sorted.csv') | pandas.read_csv |
from datetime import datetime
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
Index,
MultiIndex,
Series,
qcut,
)
import pandas._testing as tm
def cartesian_product_for_groupers(result, args, names, fill_value=np.NaN):
"""Reindex to a cartesian production for the groupers,
preserving the nature (Categorical) of each grouper
"""
def f(a):
if isinstance(a, (CategoricalIndex, Categorical)):
categories = a.categories
a = Categorical.from_codes(
np.arange(len(categories)), categories=categories, ordered=a.ordered
)
return a
index = MultiIndex.from_product(map(f, args), names=names)
return result.reindex(index, fill_value=fill_value).sort_index()
_results_for_groupbys_with_missing_categories = {
# This maps the builtin groupby functions to their expected outputs for
# missing categories when they are called on a categorical grouper with
# observed=False. Some functions are expected to return NaN, some zero.
# These expected values can be used across several tests (i.e. they are
# the same for SeriesGroupBy and DataFrameGroupBy) but they should only be
# hardcoded in one place.
"all": np.NaN,
"any": np.NaN,
"count": 0,
"corrwith": np.NaN,
"first": np.NaN,
"idxmax": np.NaN,
"idxmin": np.NaN,
"last": np.NaN,
"mad": np.NaN,
"max": np.NaN,
"mean": np.NaN,
"median": np.NaN,
"min": np.NaN,
"nth": np.NaN,
"nunique": 0,
"prod": np.NaN,
"quantile": np.NaN,
"sem": np.NaN,
"size": 0,
"skew": np.NaN,
"std": np.NaN,
"sum": 0,
"var": np.NaN,
}
def test_apply_use_categorical_name(df):
cats = qcut(df.C, 4)
def get_stats(group):
return {
"min": group.min(),
"max": group.max(),
"count": group.count(),
"mean": group.mean(),
}
result = df.groupby(cats, observed=False).D.apply(get_stats)
assert result.index.names[0] == "C"
def test_basic():
cats = Categorical(
["a", "a", "a", "b", "b", "b", "c", "c", "c"],
categories=["a", "b", "c", "d"],
ordered=True,
)
data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats})
exp_index = CategoricalIndex(list("abcd"), name="b", ordered=True)
expected = DataFrame({"a": [1, 2, 4, np.nan]}, index=exp_index)
result = data.groupby("b", observed=False).mean()
tm.assert_frame_equal(result, expected)
cat1 = Categorical(["a", "a", "b", "b"], categories=["a", "b", "z"], ordered=True)
cat2 = Categorical(["c", "d", "c", "d"], categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
# single grouper
gb = df.groupby("A", observed=False)
exp_idx = CategoricalIndex(["a", "b", "z"], name="A", ordered=True)
expected = DataFrame({"values": Series([3, 7, 0], index=exp_idx)})
result = gb.sum()
tm.assert_frame_equal(result, expected)
# GH 8623
x = DataFrame(
[[1, "<NAME>"], [2, "<NAME>"], [1, "<NAME>"]],
columns=["person_id", "person_name"],
)
x["person_name"] = Categorical(x.person_name)
g = x.groupby(["person_id"], observed=False)
result = g.transform(lambda x: x)
tm.assert_frame_equal(result, x[["person_name"]])
result = x.drop_duplicates("person_name")
expected = x.iloc[[0, 1]]
tm.assert_frame_equal(result, expected)
def f(x):
return x.drop_duplicates("person_name").iloc[0]
result = g.apply(f)
expected = x.iloc[[0, 1]].copy()
expected.index = Index([1, 2], name="person_id")
expected["person_name"] = expected["person_name"].astype("object")
tm.assert_frame_equal(result, expected)
# GH 9921
# Monotonic
df = DataFrame({"a": [5, 15, 25]})
c = pd.cut(df.a, bins=[0, 10, 20, 30, 40])
result = df.a.groupby(c, observed=False).transform(sum)
tm.assert_series_equal(result, df["a"])
tm.assert_series_equal(
df.a.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df["a"]
)
tm.assert_frame_equal(df.groupby(c, observed=False).transform(sum), df[["a"]])
tm.assert_frame_equal(
df.groupby(c, observed=False).transform(lambda xs: np.max(xs)), df[["a"]]
)
# Filter
tm.assert_series_equal(df.a.groupby(c, observed=False).filter(np.all), df["a"])
tm.assert_frame_equal(df.groupby(c, observed=False).filter(np.all), df)
# Non-monotonic
df = DataFrame({"a": [5, 15, 25, -5]})
c = pd.cut(df.a, bins=[-10, 0, 10, 20, 30, 40])
result = df.a.groupby(c, observed=False).transform(sum)
tm.assert_series_equal(result, df["a"])
tm.assert_series_equal(
df.a.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df["a"]
)
tm.assert_frame_equal(df.groupby(c, observed=False).transform(sum), df[["a"]])
tm.assert_frame_equal(
df.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df[["a"]]
)
# GH 9603
df = DataFrame({"a": [1, 0, 0, 0]})
c = pd.cut(df.a, [0, 1, 2, 3, 4], labels=Categorical(list("abcd")))
result = df.groupby(c, observed=False).apply(len)
exp_index = CategoricalIndex(c.values.categories, ordered=c.values.ordered)
expected = Series([1, 0, 0, 0], index=exp_index)
expected.index.name = "a"
tm.assert_series_equal(result, expected)
# more basic
levels = ["foo", "bar", "baz", "qux"]
codes = np.random.randint(0, 4, size=100)
cats = Categorical.from_codes(codes, levels, ordered=True)
data = DataFrame(np.random.randn(100, 4))
result = data.groupby(cats, observed=False).mean()
expected = data.groupby(np.asarray(cats), observed=False).mean()
exp_idx = CategoricalIndex(levels, categories=cats.categories, ordered=True)
expected = expected.reindex(exp_idx)
tm.assert_frame_equal(result, expected)
grouped = data.groupby(cats, observed=False)
desc_result = grouped.describe()
idx = cats.codes.argsort()
ord_labels = np.asarray(cats).take(idx)
ord_data = data.take(idx)
exp_cats = Categorical(
ord_labels, ordered=True, categories=["foo", "bar", "baz", "qux"]
)
expected = ord_data.groupby(exp_cats, sort=False, observed=False).describe()
tm.assert_frame_equal(desc_result, expected)
# GH 10460
expc = Categorical.from_codes(np.arange(4).repeat(8), levels, ordered=True)
exp = CategoricalIndex(expc)
tm.assert_index_equal((desc_result.stack().index.get_level_values(0)), exp)
exp = Index(["count", "mean", "std", "min", "25%", "50%", "75%", "max"] * 4)
tm.assert_index_equal((desc_result.stack().index.get_level_values(1)), exp)
def test_level_get_group(observed):
# GH15155
df = DataFrame(
data=np.arange(2, 22, 2),
index=MultiIndex(
levels=[CategoricalIndex(["a", "b"]), range(10)],
codes=[[0] * 5 + [1] * 5, range(10)],
names=["Index1", "Index2"],
),
)
g = df.groupby(level=["Index1"], observed=observed)
# expected should equal test.loc[["a"]]
# GH15166
expected = DataFrame(
data=np.arange(2, 12, 2),
index=MultiIndex(
levels=[CategoricalIndex(["a", "b"]), range(5)],
codes=[[0] * 5, range(5)],
names=["Index1", "Index2"],
),
)
result = g.get_group("a")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("ordered", [True, False])
def test_apply(ordered):
# GH 10138
dense = Categorical(list("abc"), ordered=ordered)
# 'b' is in the categories but not in the list
missing = Categorical(list("aaa"), categories=["a", "b"], ordered=ordered)
values = np.arange(len(dense))
df = DataFrame({"missing": missing, "dense": dense, "values": values})
grouped = df.groupby(["missing", "dense"], observed=True)
# missing category 'b' should still exist in the output index
idx = MultiIndex.from_arrays([missing, dense], names=["missing", "dense"])
expected = DataFrame([0, 1, 2.0], index=idx, columns=["values"])
# GH#21636 tracking down the xfail, in some builds np.mean(df.loc[[0]])
# is coming back as Series([0., 1., 0.], index=["missing", "dense", "values"])
# when we expect Series(0., index=["values"])
result = grouped.apply(lambda x: np.mean(x))
tm.assert_frame_equal(result, expected)
# we coerce back to ints
expected = expected.astype("int")
result = grouped.mean()
tm.assert_frame_equal(result, expected)
result = grouped.agg(np.mean)
tm.assert_frame_equal(result, expected)
# but for transform we should still get back the original index
idx = MultiIndex.from_arrays([missing, dense], names=["missing", "dense"])
expected = Series(1, index=idx)
result = grouped.apply(lambda x: 1)
tm.assert_series_equal(result, expected)
def test_observed(observed):
# multiple groupers, don't re-expand the output space
# of the grouper
# gh-14942 (implement)
# gh-10132 (back-compat)
# gh-8138 (back-compat)
# gh-8869
cat1 = Categorical(["a", "a", "b", "b"], categories=["a", "b", "z"], ordered=True)
cat2 = Categorical(["c", "d", "c", "d"], categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
df["C"] = ["foo", "bar"] * 2
# multiple groupers with a non-cat
gb = df.groupby(["A", "B", "C"], observed=observed)
exp_index = MultiIndex.from_arrays(
[cat1, cat2, ["foo", "bar"] * 2], names=["A", "B", "C"]
)
expected = DataFrame({"values": Series([1, 2, 3, 4], index=exp_index)}).sort_index()
result = gb.sum()
if not observed:
expected = cartesian_product_for_groupers(
expected, [cat1, cat2, ["foo", "bar"]], list("ABC"), fill_value=0
)
tm.assert_frame_equal(result, expected)
gb = df.groupby(["A", "B"], observed=observed)
exp_index = MultiIndex.from_arrays([cat1, cat2], names=["A", "B"])
expected = DataFrame({"values": [1, 2, 3, 4]}, index=exp_index)
result = gb.sum()
if not observed:
expected = cartesian_product_for_groupers(
expected, [cat1, cat2], list("AB"), fill_value=0
)
tm.assert_frame_equal(result, expected)
# https://github.com/pandas-dev/pandas/issues/8138
d = {
"cat": Categorical(
["a", "b", "a", "b"], categories=["a", "b", "c"], ordered=True
),
"ints": [1, 1, 2, 2],
"val": [10, 20, 30, 40],
}
df = DataFrame(d)
# Grouping on a single column
groups_single_key = df.groupby("cat", observed=observed)
result = groups_single_key.mean()
exp_index = CategoricalIndex(
list("ab"), name="cat", categories=list("abc"), ordered=True
)
expected = DataFrame({"ints": [1.5, 1.5], "val": [20.0, 30]}, index=exp_index)
if not observed:
index = CategoricalIndex(
list("abc"), name="cat", categories=list("abc"), ordered=True
)
expected = expected.reindex(index)
tm.assert_frame_equal(result, expected)
# Grouping on two columns
groups_double_key = df.groupby(["cat", "ints"], observed=observed)
result = groups_double_key.agg("mean")
expected = DataFrame(
{
"val": [10, 30, 20, 40],
"cat": Categorical(
["a", "a", "b", "b"], categories=["a", "b", "c"], ordered=True
),
"ints": [1, 2, 1, 2],
}
).set_index(["cat", "ints"])
if not observed:
expected = cartesian_product_for_groupers(
expected, [df.cat.values, [1, 2]], ["cat", "ints"]
)
tm.assert_frame_equal(result, expected)
# GH 10132
for key in [("a", 1), ("b", 2), ("b", 1), ("a", 2)]:
c, i = key
result = groups_double_key.get_group(key)
expected = df[(df.cat == c) & (df.ints == i)]
tm.assert_frame_equal(result, expected)
# gh-8869
# with as_index
d = {
"foo": [10, 8, 4, 8, 4, 1, 1],
"bar": [10, 20, 30, 40, 50, 60, 70],
"baz": ["d", "c", "e", "a", "a", "d", "c"],
}
df = DataFrame(d)
cat = pd.cut(df["foo"], np.linspace(0, 10, 3))
df["range"] = cat
groups = df.groupby(["range", "baz"], as_index=False, observed=observed)
result = groups.agg("mean")
groups2 = df.groupby(["range", "baz"], as_index=True, observed=observed)
expected = groups2.agg("mean").reset_index()
tm.assert_frame_equal(result, expected)
def test_observed_codes_remap(observed):
d = {"C1": [3, 3, 4, 5], "C2": [1, 2, 3, 4], "C3": [10, 100, 200, 34]}
df = DataFrame(d)
values = pd.cut(df["C1"], [1, 2, 3, 6])
values.name = "cat"
groups_double_key = df.groupby([values, "C2"], observed=observed)
idx = MultiIndex.from_arrays([values, [1, 2, 3, 4]], names=["cat", "C2"])
expected = DataFrame({"C1": [3, 3, 4, 5], "C3": [10, 100, 200, 34]}, index=idx)
if not observed:
expected = cartesian_product_for_groupers(
expected, [values.values, [1, 2, 3, 4]], ["cat", "C2"]
)
result = groups_double_key.agg("mean")
tm.assert_frame_equal(result, expected)
def test_observed_perf():
# we create a cartesian product, so this is
# non-performant if we don't use observed values
# gh-14942
df = DataFrame(
{
"cat": np.random.randint(0, 255, size=30000),
"int_id": np.random.randint(0, 255, size=30000),
"other_id": np.random.randint(0, 10000, size=30000),
"foo": 0,
}
)
df["cat"] = df.cat.astype(str).astype("category")
grouped = df.groupby(["cat", "int_id", "other_id"], observed=True)
result = grouped.count()
assert result.index.levels[0].nunique() == df.cat.nunique()
assert result.index.levels[1].nunique() == df.int_id.nunique()
assert result.index.levels[2].nunique() == df.other_id.nunique()
def test_observed_groups(observed):
# gh-20583
# test that we have the appropriate groups
cat = Categorical(["a", "c", "a"], categories=["a", "b", "c"])
df = DataFrame({"cat": cat, "vals": [1, 2, 3]})
g = df.groupby("cat", observed=observed)
result = g.groups
if observed:
expected = {"a": Index([0, 2], dtype="int64"), "c": Index([1], dtype="int64")}
else:
expected = {
"a": Index([0, 2], dtype="int64"),
"b": Index([], dtype="int64"),
"c": Index([1], dtype="int64"),
}
tm.assert_dict_equal(result, expected)
def test_observed_groups_with_nan(observed):
# GH 24740
df = DataFrame(
{
"cat": Categorical(["a", np.nan, "a"], categories=["a", "b", "d"]),
"vals": [1, 2, 3],
}
)
g = df.groupby("cat", observed=observed)
result = g.groups
if observed:
expected = {"a": Index([0, 2], dtype="int64")}
else:
expected = {
"a": Index([0, 2], dtype="int64"),
"b": Index([], dtype="int64"),
"d": Index([], dtype="int64"),
}
tm.assert_dict_equal(result, expected)
def test_observed_nth():
# GH 26385
cat = Categorical(["a", np.nan, np.nan], categories=["a", "b", "c"])
ser = Series([1, 2, 3])
df = DataFrame({"cat": cat, "ser": ser})
result = df.groupby("cat", observed=False)["ser"].nth(0)
index = Categorical(["a", "b", "c"], categories=["a", "b", "c"])
expected = Series([1, np.nan, np.nan], index=index, name="ser")
expected.index.name = "cat"
tm.assert_series_equal(result, expected)
def test_dataframe_categorical_with_nan(observed):
# GH 21151
s1 = Categorical([np.nan, "a", np.nan, "a"], categories=["a", "b", "c"])
s2 = Series([1, 2, 3, 4])
df = DataFrame({"s1": s1, "s2": s2})
result = df.groupby("s1", observed=observed).first().reset_index()
if observed:
expected = DataFrame(
{"s1": Categorical(["a"], categories=["a", "b", "c"]), "s2": [2]}
)
else:
expected = DataFrame(
{
"s1": Categorical(["a", "b", "c"], categories=["a", "b", "c"]),
"s2": [2, np.nan, np.nan],
}
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("ordered", [True, False])
@pytest.mark.parametrize("observed", [True, False])
@pytest.mark.parametrize("sort", [True, False])
def test_dataframe_categorical_ordered_observed_sort(ordered, observed, sort):
# GH 25871: Fix groupby sorting on ordered Categoricals
# GH 25167: Groupby with observed=True doesn't sort
# Build a dataframe with cat having one unobserved category ('missing'),
# and a Series with identical values
label = Categorical(
["d", "a", "b", "a", "d", "b"],
categories=["a", "b", "missing", "d"],
ordered=ordered,
)
val = Series(["d", "a", "b", "a", "d", "b"])
df = DataFrame({"label": label, "val": val})
# aggregate on the Categorical
result = df.groupby("label", observed=observed, sort=sort)["val"].aggregate("first")
# If ordering works, we expect index labels equal to aggregation results,
# except for 'observed=False': label 'missing' has aggregation None
label = Series(result.index.array, dtype="object")
aggr = Series(result.array)
if not observed:
aggr[aggr.isna()] = "missing"
if not all(label == aggr):
msg = (
"Labels and aggregation results not consistently sorted\n"
f"for (ordered={ordered}, observed={observed}, sort={sort})\n"
f"Result:\n{result}"
)
assert False, msg
def test_datetime():
# GH9049: ensure backward compatibility
levels = pd.date_range("2014-01-01", periods=4)
codes = np.random.randint(0, 4, size=100)
cats = Categorical.from_codes(codes, levels, ordered=True)
data = DataFrame(np.random.randn(100, 4))
result = data.groupby(cats, observed=False).mean()
expected = data.groupby(np.asarray(cats), observed=False).mean()
expected = expected.reindex(levels)
expected.index = CategoricalIndex(
expected.index, categories=expected.index, ordered=True
)
tm.assert_frame_equal(result, expected)
grouped = data.groupby(cats, observed=False)
desc_result = grouped.describe()
idx = cats.codes.argsort()
ord_labels = cats.take(idx)
ord_data = data.take(idx)
expected = ord_data.groupby(ord_labels, observed=False).describe()
tm.assert_frame_equal(desc_result, expected)
tm.assert_index_equal(desc_result.index, expected.index)
tm.assert_index_equal(
desc_result.index.get_level_values(0), expected.index.get_level_values(0)
)
# GH 10460
expc = Categorical.from_codes(np.arange(4).repeat(8), levels, ordered=True)
exp = CategoricalIndex(expc)
tm.assert_index_equal((desc_result.stack().index.get_level_values(0)), exp)
exp = Index(["count", "mean", "std", "min", "25%", "50%", "75%", "max"] * 4)
tm.assert_index_equal((desc_result.stack().index.get_level_values(1)), exp)
def test_categorical_index():
s = np.random.RandomState(12345)
levels = ["foo", "bar", "baz", "qux"]
codes = s.randint(0, 4, size=20)
cats = Categorical.from_codes(codes, levels, ordered=True)
df = DataFrame(np.repeat(np.arange(20), 4).reshape(-1, 4), columns=list("abcd"))
df["cats"] = cats
# with a cat index
result = df.set_index("cats").groupby(level=0, observed=False).sum()
expected = df[list("abcd")].groupby(cats.codes, observed=False).sum()
expected.index = CategoricalIndex(
Categorical.from_codes([0, 1, 2, 3], levels, ordered=True), name="cats"
)
tm.assert_frame_equal(result, expected)
# with a cat column, should produce a cat index
result = df.groupby("cats", observed=False).sum()
expected = df[list("abcd")].groupby(cats.codes, observed=False).sum()
expected.index = CategoricalIndex(
Categorical.from_codes([0, 1, 2, 3], levels, ordered=True), name="cats"
)
tm.assert_frame_equal(result, expected)
def test_describe_categorical_columns():
# GH 11558
cats = CategoricalIndex(
["qux", "foo", "baz", "bar"],
categories=["foo", "bar", "baz", "qux"],
ordered=True,
)
df = DataFrame(np.random.randn(20, 4), columns=cats)
result = df.groupby([1, 2, 3, 4] * 5).describe()
tm.assert_index_equal(result.stack().columns, cats)
tm.assert_categorical_equal(result.stack().columns.values, cats.values)
def test_unstack_categorical():
# GH11558 (example is taken from the original issue)
df = DataFrame(
{"a": range(10), "medium": ["A", "B"] * 5, "artist": list("XYXXY") * 2}
)
df["medium"] = df["medium"].astype("category")
gcat = df.groupby(["artist", "medium"], observed=False)["a"].count().unstack()
result = gcat.describe()
exp_columns = CategoricalIndex(["A", "B"], ordered=False, name="medium")
tm.assert_index_equal(result.columns, exp_columns)
tm.assert_categorical_equal(result.columns.values, exp_columns.values)
result = gcat["A"] + gcat["B"]
expected = Series([6, 4], index=Index(["X", "Y"], name="artist"))
tm.assert_series_equal(result, expected)
def test_bins_unequal_len():
# GH3011
series = Series([np.nan, np.nan, 1, 1, 2, 2, 3, 3, 4, 4])
bins = pd.cut(series.dropna().values, 4)
# len(bins) != len(series) here
msg = r"Length of grouper \(8\) and axis \(10\) must be same length"
with pytest.raises(ValueError, match=msg):
series.groupby(bins).mean()
def test_as_index():
# GH13204
df = DataFrame(
{
"cat": Categorical([1, 2, 2], [1, 2, 3]),
"A": [10, 11, 11],
"B": [101, 102, 103],
}
)
result = df.groupby(["cat", "A"], as_index=False, observed=True).sum()
expected = DataFrame(
{
"cat": Categorical([1, 2], categories=df.cat.cat.categories),
"A": [10, 11],
"B": [101, 205],
},
columns=["cat", "A", "B"],
)
tm.assert_frame_equal(result, expected)
# function grouper
f = lambda r: df.loc[r, "A"]
result = df.groupby(["cat", f], as_index=False, observed=True).sum()
expected = DataFrame(
{
"cat": Categorical([1, 2], categories=df.cat.cat.categories),
"A": [10, 22],
"B": [101, 205],
},
columns=["cat", "A", "B"],
)
tm.assert_frame_equal(result, expected)
# another not in-axis grouper (conflicting names in index)
s = Series(["a", "b", "b"], name="cat")
result = df.groupby(["cat", s], as_index=False, observed=True).sum()
tm.assert_frame_equal(result, expected)
# is original index dropped?
group_columns = ["cat", "A"]
expected = DataFrame(
{
"cat": Categorical([1, 2], categories=df.cat.cat.categories),
"A": [10, 11],
"B": [101, 205],
},
columns=["cat", "A", "B"],
)
for name in [None, "X", "B"]:
df.index = Index(list("abc"), name=name)
result = df.groupby(group_columns, as_index=False, observed=True).sum()
tm.assert_frame_equal(result, expected)
def test_preserve_categories():
# GH-13179
categories = list("abc")
# ordered=True
df = DataFrame({"A": Categorical(list("ba"), categories=categories, ordered=True)})
index = CategoricalIndex(categories, categories, ordered=True, name="A")
tm.assert_index_equal(
df.groupby("A", sort=True, observed=False).first().index, index
)
tm.assert_index_equal(
df.groupby("A", sort=False, observed=False).first().index, index
)
# ordered=False
df = DataFrame({"A": Categorical(list("ba"), categories=categories, ordered=False)})
sort_index = CategoricalIndex(categories, categories, ordered=False, name="A")
nosort_index = CategoricalIndex(list("bac"), list("bac"), ordered=False, name="A")
tm.assert_index_equal(
df.groupby("A", sort=True, observed=False).first().index, sort_index
)
tm.assert_index_equal(
df.groupby("A", sort=False, observed=False).first().index, nosort_index
)
def test_preserve_categorical_dtype():
# GH13743, GH13854
df = DataFrame(
{
"A": [1, 2, 1, 1, 2],
"B": [10, 16, 22, 28, 34],
"C1": Categorical(list("abaab"), categories=list("bac"), ordered=False),
"C2": Categorical(list("abaab"), categories=list("bac"), ordered=True),
}
)
# single grouper
exp_full = DataFrame(
{
"A": [2.0, 1.0, np.nan],
"B": [25.0, 20.0, np.nan],
"C1": Categorical(list("bac"), categories=list("bac"), ordered=False),
"C2": Categorical(list("bac"), categories=list("bac"), ordered=True),
}
)
for col in ["C1", "C2"]:
result1 = df.groupby(by=col, as_index=False, observed=False).mean()
result2 = df.groupby(by=col, as_index=True, observed=False).mean().reset_index()
expected = exp_full.reindex(columns=result1.columns)
tm.assert_frame_equal(result1, expected)
tm.assert_frame_equal(result2, expected)
@pytest.mark.parametrize(
"func, values",
[
("first", ["second", "first"]),
("last", ["fourth", "third"]),
("min", ["fourth", "first"]),
("max", ["second", "third"]),
],
)
def test_preserve_on_ordered_ops(func, values):
# gh-18502
# preserve the categoricals on ops
c = Categorical(["first", "second", "third", "fourth"], ordered=True)
df = DataFrame({"payload": [-1, -2, -1, -2], "col": c})
g = df.groupby("payload")
result = getattr(g, func)()
expected = DataFrame(
{"payload": [-2, -1], "col": Series(values, dtype=c.dtype)}
).set_index("payload")
tm.assert_frame_equal(result, expected)
def test_categorical_no_compress():
data = Series(np.random.randn(9))
codes = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
cats = Categorical.from_codes(codes, [0, 1, 2], ordered=True)
result = data.groupby(cats, observed=False).mean()
exp = data.groupby(codes, observed=False).mean()
exp.index = CategoricalIndex(
exp.index, categories=cats.categories, ordered=cats.ordered
)
tm.assert_series_equal(result, exp)
codes = np.array([0, 0, 0, 1, 1, 1, 3, 3, 3])
cats = Categorical.from_codes(codes, [0, 1, 2, 3], ordered=True)
result = data.groupby(cats, observed=False).mean()
exp = data.groupby(codes, observed=False).mean().reindex(cats.categories)
exp.index = CategoricalIndex(
exp.index, categories=cats.categories, ordered=cats.ordered
)
tm.assert_series_equal(result, exp)
cats = Categorical(
["a", "a", "a", "b", "b", "b", "c", "c", "c"],
categories=["a", "b", "c", "d"],
ordered=True,
)
data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats})
result = data.groupby("b", observed=False).mean()
result = result["a"].values
exp = np.array([1, 2, 4, np.nan])
tm.assert_numpy_array_equal(result, exp)
def test_groupby_empty_with_category():
# GH-9614
# test fix for when group by on None resulted in
# coercion of dtype categorical -> float
df = DataFrame({"A": [None] * 3, "B": Categorical(["train", "train", "test"])})
result = df.groupby("A").first()["B"]
expected = Series(
Categorical([], categories=["test", "train"]),
index=Series([], dtype="object", name="A"),
name="B",
)
tm.assert_series_equal(result, expected)
def test_sort():
# https://stackoverflow.com/questions/23814368/sorting-pandas-
# categorical-labels-after-groupby
# This should result in a properly sorted Series so that the plot
# has a sorted x axis
# self.cat.groupby(['value_group'])['value_group'].count().plot(kind='bar')
df = DataFrame({"value": np.random.randint(0, 10000, 100)})
labels = [f"{i} - {i+499}" for i in range(0, 10000, 500)]
cat_labels = Categorical(labels, labels)
df = df.sort_values(by=["value"], ascending=True)
df["value_group"] = pd.cut(
df.value, range(0, 10500, 500), right=False, labels=cat_labels
)
res = df.groupby(["value_group"], observed=False)["value_group"].count()
exp = res[sorted(res.index, key=lambda x: float(x.split()[0]))]
exp.index = CategoricalIndex(exp.index, name=exp.index.name)
tm.assert_series_equal(res, exp)
def test_sort2():
# dataframe groupby sort was being ignored # GH 8868
df = DataFrame(
[
["(7.5, 10]", 10, 10],
["(7.5, 10]", 8, 20],
["(2.5, 5]", 5, 30],
["(5, 7.5]", 6, 40],
["(2.5, 5]", 4, 50],
["(0, 2.5]", 1, 60],
["(5, 7.5]", 7, 70],
],
columns=["range", "foo", "bar"],
)
df["range"] = Categorical(df["range"], ordered=True)
index = CategoricalIndex(
["(0, 2.5]", "(2.5, 5]", "(5, 7.5]", "(7.5, 10]"], name="range", ordered=True
)
expected_sort = DataFrame(
[[1, 60], [5, 30], [6, 40], [10, 10]], columns=["foo", "bar"], index=index
)
col = "range"
result_sort = df.groupby(col, sort=True, observed=False).first()
tm.assert_frame_equal(result_sort, expected_sort)
# when categories is ordered, group is ordered by category's order
expected_sort = result_sort
result_sort = df.groupby(col, sort=False, observed=False).first()
tm.assert_frame_equal(result_sort, expected_sort)
df["range"] = Categorical(df["range"], ordered=False)
index = CategoricalIndex(
["(0, 2.5]", "(2.5, 5]", "(5, 7.5]", "(7.5, 10]"], name="range"
)
expected_sort = DataFrame(
[[1, 60], [5, 30], [6, 40], [10, 10]], columns=["foo", "bar"], index=index
)
index = CategoricalIndex(
["(7.5, 10]", "(2.5, 5]", "(5, 7.5]", "(0, 2.5]"],
categories=["(7.5, 10]", "(2.5, 5]", "(5, 7.5]", "(0, 2.5]"],
name="range",
)
expected_nosort = DataFrame(
[[10, 10], [5, 30], [6, 40], [1, 60]], index=index, columns=["foo", "bar"]
)
col = "range"
# this is an unordered categorical, but we allow this ####
result_sort = df.groupby(col, sort=True, observed=False).first()
tm.assert_frame_equal(result_sort, expected_sort)
result_nosort = df.groupby(col, sort=False, observed=False).first()
tm.assert_frame_equal(result_nosort, expected_nosort)
def test_sort_datetimelike():
# GH10505
# use same data as test_groupby_sort_categorical, which category is
# corresponding to datetime.month
df = DataFrame(
{
"dt": [
datetime(2011, 7, 1),
datetime(2011, 7, 1),
datetime(2011, 2, 1),
datetime(2011, 5, 1),
datetime(2011, 2, 1),
datetime(2011, 1, 1),
datetime(2011, 5, 1),
],
"foo": [10, 8, 5, 6, 4, 1, 7],
"bar": [10, 20, 30, 40, 50, 60, 70],
},
columns=["dt", "foo", "bar"],
)
# ordered=True
df["dt"] = Categorical(df["dt"], ordered=True)
index = [
datetime(2011, 1, 1),
datetime(2011, 2, 1),
datetime(2011, 5, 1),
datetime(2011, 7, 1),
]
result_sort = DataFrame(
[[1, 60], [5, 30], [6, 40], [10, 10]], columns=["foo", "bar"]
)
result_sort.index = CategoricalIndex(index, name="dt", ordered=True)
index = [
datetime(2011, 7, 1),
datetime(2011, 2, 1),
datetime(2011, 5, 1),
datetime(2011, 1, 1),
]
result_nosort = DataFrame(
[[10, 10], [5, 30], [6, 40], [1, 60]], columns=["foo", "bar"]
)
result_nosort.index = CategoricalIndex(
index, categories=index, name="dt", ordered=True
)
col = "dt"
tm.assert_frame_equal(
result_sort, df.groupby(col, sort=True, observed=False).first()
)
# when categories is ordered, group is ordered by category's order
tm.assert_frame_equal(
result_sort, df.groupby(col, sort=False, observed=False).first()
)
# ordered = False
df["dt"] = Categorical(df["dt"], ordered=False)
index = [
datetime(2011, 1, 1),
datetime(2011, 2, 1),
datetime(2011, 5, 1),
datetime(2011, 7, 1),
]
result_sort = DataFrame(
[[1, 60], [5, 30], [6, 40], [10, 10]], columns=["foo", "bar"]
)
result_sort.index = CategoricalIndex(index, name="dt")
index = [
datetime(2011, 7, 1),
datetime(2011, 2, 1),
datetime(2011, 5, 1),
datetime(2011, 1, 1),
]
result_nosort = DataFrame(
[[10, 10], [5, 30], [6, 40], [1, 60]], columns=["foo", "bar"]
)
result_nosort.index = CategoricalIndex(index, categories=index, name="dt")
col = "dt"
tm.assert_frame_equal(
result_sort, df.groupby(col, sort=True, observed=False).first()
)
tm.assert_frame_equal(
result_nosort, df.groupby(col, sort=False, observed=False).first()
)
def test_empty_sum():
# https://github.com/pandas-dev/pandas/issues/18678
df = DataFrame(
{"A": Categorical(["a", "a", "b"], categories=["a", "b", "c"]), "B": [1, 2, 1]}
)
expected_idx = CategoricalIndex(["a", "b", "c"], name="A")
# 0 by default
result = df.groupby("A", observed=False).B.sum()
expected = Series([3, 1, 0], expected_idx, name="B")
tm.assert_series_equal(result, expected)
# min_count=0
result = df.groupby("A", observed=False).B.sum(min_count=0)
expected = Series([3, 1, 0], expected_idx, name="B")
tm.assert_series_equal(result, expected)
# min_count=1
result = df.groupby("A", observed=False).B.sum(min_count=1)
expected = Series([3, 1, np.nan], expected_idx, name="B")
tm.assert_series_equal(result, expected)
# min_count>1
result = df.groupby("A", observed=False).B.sum(min_count=2)
expected = Series([3, np.nan, np.nan], expected_idx, name="B")
tm.assert_series_equal(result, expected)
def test_empty_prod():
# https://github.com/pandas-dev/pandas/issues/18678
df = DataFrame(
{"A": Categorical(["a", "a", "b"], categories=["a", "b", "c"]), "B": [1, 2, 1]}
)
expected_idx = CategoricalIndex(["a", "b", "c"], name="A")
# 1 by default
result = df.groupby("A", observed=False).B.prod()
expected = Series([2, 1, 1], expected_idx, name="B")
tm.assert_series_equal(result, expected)
# min_count=0
result = df.groupby("A", observed=False).B.prod(min_count=0)
expected = Series([2, 1, 1], expected_idx, name="B")
tm.assert_series_equal(result, expected)
# min_count=1
result = df.groupby("A", observed=False).B.prod(min_count=1)
expected = Series([2, 1, np.nan], expected_idx, name="B")
tm.assert_series_equal(result, expected)
def test_groupby_multiindex_categorical_datetime():
# https://github.com/pandas-dev/pandas/issues/21390
df = DataFrame(
{
"key1": Categorical(list("<KEY>")),
"key2": Categorical(
list(pd.date_range("2018-06-01 00", freq="1T", periods=3)) * 3
),
"values": np.arange(9),
}
)
result = df.groupby(["key1", "key2"]).mean()
idx = MultiIndex.from_product(
[
Categorical(["a", "b", "c"]),
Categorical(pd.date_range("2018-06-01 00", freq="1T", periods=3)),
],
names=["key1", "key2"],
)
expected = DataFrame({"values": [0, 4, 8, 3, 4, 5, 6, np.nan, 2]}, index=idx)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"as_index, expected",
[
(
True,
Series(
index=MultiIndex.from_arrays(
[Series([1, 1, 2], dtype="category"), [1, 2, 2]], names=["a", "b"]
),
data=[1, 2, 3],
name="x",
),
),
(
False,
DataFrame(
{
"a": Series([1, 1, 2], dtype="category"),
"b": [1, 2, 2],
"x": [1, 2, 3],
}
),
),
],
)
def test_groupby_agg_observed_true_single_column(as_index, expected):
# GH-23970
df = DataFrame(
{"a": Series([1, 1, 2], dtype="category"), "b": [1, 2, 2], "x": [1, 2, 3]}
)
result = df.groupby(["a", "b"], as_index=as_index, observed=True)["x"].sum()
tm.assert_equal(result, expected)
@pytest.mark.parametrize("fill_value", [None, np.nan, pd.NaT])
def test_shift(fill_value):
ct = Categorical(
["a", "b", "c", "d"], categories=["a", "b", "c", "d"], ordered=False
)
expected = Categorical(
[None, "a", "b", "c"], categories=["a", "b", "c", "d"], ordered=False
)
res = ct.shift(1, fill_value=fill_value)
tm.assert_equal(res, expected)
@pytest.fixture
def df_cat(df):
"""
DataFrame with multiple categorical columns and a column of integers.
Shortened so as not to contain all possible combinations of categories.
Useful for testing `observed` kwarg functionality on GroupBy objects.
Parameters
----------
df: DataFrame
Non-categorical, longer DataFrame from another fixture, used to derive
this one
Returns
-------
df_cat: DataFrame
"""
df_cat = df.copy()[:4] # leave out some groups
df_cat["A"] = df_cat["A"].astype("category")
df_cat["B"] = df_cat["B"].astype("category")
df_cat["C"] = Series([1, 2, 3, 4])
df_cat = df_cat.drop(["D"], axis=1)
return df_cat
@pytest.mark.parametrize(
"operation, kwargs", [("agg", {"dtype": "category"}), ("apply", {})]
)
def test_seriesgroupby_observed_true(df_cat, operation, kwargs):
# GH 24880
index = MultiIndex.from_frame(
DataFrame(
{"A": ["foo", "foo", "bar", "bar"], "B": ["one", "two", "one", "three"]},
**kwargs,
)
)
expected = Series(data=[1, 3, 2, 4], index=index, name="C")
grouped = df_cat.groupby(["A", "B"], observed=True)["C"]
result = getattr(grouped, operation)(sum)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("operation", ["agg", "apply"])
@pytest.mark.parametrize("observed", [False, None])
def test_seriesgroupby_observed_false_or_none(df_cat, observed, operation):
# GH 24880
index, _ = MultiIndex.from_product(
[
CategoricalIndex(["bar", "foo"], ordered=False),
CategoricalIndex(["one", "three", "two"], ordered=False),
],
names=["A", "B"],
).sortlevel()
expected = Series(data=[2, 4, np.nan, 1, np.nan, 3], index=index, name="C")
if operation == "agg":
expected = expected.fillna(0, downcast="infer")
grouped = df_cat.groupby(["A", "B"], observed=observed)["C"]
result = getattr(grouped, operation)(sum)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"observed, index, data",
[
(
True,
MultiIndex.from_tuples(
[
("foo", "one", "min"),
("foo", "one", "max"),
("foo", "two", "min"),
("foo", "two", "max"),
("bar", "one", "min"),
("bar", "one", "max"),
("bar", "three", "min"),
("bar", "three", "max"),
],
names=["A", "B", None],
),
[1, 1, 3, 3, 2, 2, 4, 4],
),
(
False,
MultiIndex.from_product(
[
CategoricalIndex(["bar", "foo"], ordered=False),
CategoricalIndex(["one", "three", "two"], ordered=False),
Index(["min", "max"]),
],
names=["A", "B", None],
),
[2, 2, 4, 4, np.nan, np.nan, 1, 1, np.nan, np.nan, 3, 3],
),
(
None,
MultiIndex.from_product(
[
CategoricalIndex(["bar", "foo"], ordered=False),
CategoricalIndex(["one", "three", "two"], ordered=False),
Index(["min", "max"]),
],
names=["A", "B", None],
),
[2, 2, 4, 4, np.nan, np.nan, 1, 1, np.nan, np.nan, 3, 3],
),
],
)
def test_seriesgroupby_observed_apply_dict(df_cat, observed, index, data):
# GH 24880
expected = Series(data=data, index=index, name="C")
result = df_cat.groupby(["A", "B"], observed=observed)["C"].apply(
lambda x: {"min": x.min(), "max": x.max()}
)
tm.assert_series_equal(result, expected)
def test_groupby_categorical_series_dataframe_consistent(df_cat):
# GH 20416
expected = df_cat.groupby(["A", "B"])["C"].mean()
result = df_cat.groupby(["A", "B"]).mean()["C"]
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("code", [([1, 0, 0]), ([0, 0, 0])])
def test_groupby_categorical_axis_1(code):
# GH 13420
df = DataFrame({"a": [1, 2, 3, 4], "b": [-1, -2, -3, -4], "c": [5, 6, 7, 8]})
cat = Categorical.from_codes(code, categories=list("abc"))
result = df.groupby(cat, axis=1).mean()
expected = df.T.groupby(cat, axis=0).mean().T
tm.assert_frame_equal(result, expected)
def test_groupby_cat_preserves_structure(observed, ordered):
# GH 28787
df = DataFrame(
{"Name": Categorical(["Bob", "Greg"], ordered=ordered), "Item": [1, 2]},
columns=["Name", "Item"],
)
expected = df.copy()
result = (
df.groupby("Name", observed=observed)
.agg(DataFrame.sum, skipna=True)
.reset_index()
)
tm.assert_frame_equal(result, expected)
def test_get_nonexistent_category():
# Accessing a Category that is not in the dataframe
df = DataFrame({"var": ["a", "a", "b", "b"], "val": range(4)})
with pytest.raises(KeyError, match="'vau'"):
df.groupby("var").apply(
lambda rows: DataFrame(
{"var": [rows.iloc[-1]["var"]], "val": [rows.iloc[-1]["vau"]]}
)
)
def test_series_groupby_on_2_categoricals_unobserved(reduction_func, observed, request):
# GH 17605
if reduction_func == "ngroup":
pytest.skip("ngroup is not truly a reduction")
if reduction_func == "corrwith": # GH 32293
mark = pytest.mark.xfail(
reason="TODO: implemented SeriesGroupBy.corrwith. See GH 32293"
)
request.node.add_marker(mark)
df = DataFrame(
{
"cat_1": Categorical(list("AABB"), categories=list("ABCD")),
"cat_2": Categorical(list("AB") * 2, categories=list("ABCD")),
"value": [0.1] * 4,
}
)
args = {"nth": [0]}.get(reduction_func, [])
expected_length = 4 if observed else 16
series_groupby = df.groupby(["cat_1", "cat_2"], observed=observed)["value"]
agg = getattr(series_groupby, reduction_func)
result = agg(*args)
assert len(result) == expected_length
def test_series_groupby_on_2_categoricals_unobserved_zeroes_or_nans(
reduction_func, request
):
# GH 17605
# Tests whether the unobserved categories in the result contain 0 or NaN
if reduction_func == "ngroup":
pytest.skip("ngroup is not truly a reduction")
if reduction_func == "corrwith": # GH 32293
mark = pytest.mark.xfail(
reason="TODO: implemented SeriesGroupBy.corrwith. See GH 32293"
)
request.node.add_marker(mark)
df = DataFrame(
{
"cat_1": Categorical(list("AABB"), categories=list("ABC")),
"cat_2": Categorical(list("AB") * 2, categories=list("ABC")),
"value": [0.1] * 4,
}
)
unobserved = [tuple("AC"), tuple("BC"), tuple("CA"), tuple("CB"), tuple("CC")]
args = {"nth": [0]}.get(reduction_func, [])
series_groupby = df.groupby(["cat_1", "cat_2"], observed=False)["value"]
agg = getattr(series_groupby, reduction_func)
result = agg(*args)
zero_or_nan = _results_for_groupbys_with_missing_categories[reduction_func]
for idx in unobserved:
val = result.loc[idx]
assert (pd.isna(zero_or_nan) and pd.isna(val)) or (val == zero_or_nan)
# If we expect unobserved values to be zero, we also expect the dtype to be int.
# Except for .sum(). If the observed categories sum to dtype=float (i.e. their
# sums have decimals), then the zeros for the missing categories should also be
# floats.
if zero_or_nan == 0 and reduction_func != "sum":
assert np.issubdtype(result.dtype, np.integer)
def test_dataframe_groupby_on_2_categoricals_when_observed_is_true(reduction_func):
# GH 23865
# GH 27075
# Ensure that df.groupby, when 'by' is two Categorical variables,
# does not return the categories that are not in df when observed=True
if reduction_func == "ngroup":
pytest.skip("ngroup does not return the Categories on the index")
df = DataFrame(
{
"cat_1": Categorical(list("AABB"), categories=list("ABC")),
"cat_2": Categorical(list("1111"), categories=list("12")),
"value": [0.1, 0.1, 0.1, 0.1],
}
)
unobserved_cats = [("A", "2"), ("B", "2"), ("C", "1"), ("C", "2")]
df_grp = df.groupby(["cat_1", "cat_2"], observed=True)
args = {"nth": [0], "corrwith": [df]}.get(reduction_func, [])
res = getattr(df_grp, reduction_func)(*args)
for cat in unobserved_cats:
assert cat not in res.index
@pytest.mark.parametrize("observed", [False, None])
def test_dataframe_groupby_on_2_categoricals_when_observed_is_false(
reduction_func, observed, request
):
# GH 23865
# GH 27075
# Ensure that df.groupby, when 'by' is two Categorical variables,
# returns the categories that are not in df when observed=False/None
if reduction_func == "ngroup":
pytest.skip("ngroup does not return the Categories on the index")
df = DataFrame(
{
"cat_1": Categorical(list("AABB"), categories=list("ABC")),
"cat_2": Categorical(list("1111"), categories=list("12")),
"value": [0.1, 0.1, 0.1, 0.1],
}
)
unobserved_cats = [("A", "2"), ("B", "2"), ("C", "1"), ("C", "2")]
df_grp = df.groupby(["cat_1", "cat_2"], observed=observed)
args = {"nth": [0], "corrwith": [df]}.get(reduction_func, [])
res = getattr(df_grp, reduction_func)(*args)
expected = _results_for_groupbys_with_missing_categories[reduction_func]
if expected is np.nan:
assert res.loc[unobserved_cats].isnull().all().all()
else:
assert (res.loc[unobserved_cats] == expected).all().all()
def test_series_groupby_categorical_aggregation_getitem():
# GH 8870
d = {"foo": [10, 8, 4, 1], "bar": [10, 20, 30, 40], "baz": ["d", "c", "d", "c"]}
df = DataFrame(d)
cat = pd.cut(df["foo"], np.linspace(0, 20, 5))
df["range"] = cat
groups = df.groupby(["range", "baz"], as_index=True, sort=True)
result = groups["foo"].agg("mean")
expected = groups.agg("mean")["foo"]
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"func, expected_values",
[(Series.nunique, [1, 1, 2]), (Series.count, [1, 2, 2])],
)
def test_groupby_agg_categorical_columns(func, expected_values):
# 31256
df = DataFrame(
{
"id": [0, 1, 2, 3, 4],
"groups": [0, 1, 1, 2, 2],
"value": Categorical([0, 0, 0, 0, 1]),
}
).set_index("id")
result = df.groupby("groups").agg(func)
expected = DataFrame(
{"value": expected_values}, index=Index([0, 1, 2], name="groups")
)
tm.assert_frame_equal(result, expected)
def test_groupby_agg_non_numeric():
df = DataFrame({"A": Categorical(["a", "a", "b"], categories=["a", "b", "c"])})
expected = DataFrame({"A": [2, 1]}, index=[1, 2])
result = df.groupby([1, 2, 1]).agg(Series.nunique)
tm.assert_frame_equal(result, expected)
result = df.groupby([1, 2, 1]).nunique()
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("func", ["first", "last"])
def test_groupy_first_returned_categorical_instead_of_dataframe(func):
# GH 28641: groupby drops index, when grouping over categorical column with
# first/last. Renamed Categorical instead of DataFrame previously.
df = DataFrame({"A": [1997], "B": Series(["b"], dtype="category").cat.as_ordered()})
df_grouped = df.groupby("A")["B"]
result = getattr(df_grouped, func)()
expected = Series(["b"], index=Index([1997], name="A"), name="B")
tm.assert_series_equal(result, expected)
def test_read_only_category_no_sort():
# GH33410
cats = np.array([1, 2])
cats.flags.writeable = False
df = DataFrame(
{"a": [1, 3, 5, 7], "b": Categorical([1, 1, 2, 2], categories=Index(cats))}
)
expected = DataFrame(data={"a": [2, 6]}, index=CategoricalIndex([1, 2], name="b"))
result = df.groupby("b", sort=False).mean()
tm.assert_frame_equal(result, expected)
def test_sorted_missing_category_values():
# GH 28597
df = DataFrame(
{
"foo": [
"small",
"large",
"large",
"large",
"medium",
"large",
"large",
"medium",
],
"bar": ["C", "A", "A", "C", "A", "C", "A", "C"],
}
)
df["foo"] = (
df["foo"]
.astype("category")
.cat.set_categories(["tiny", "small", "medium", "large"], ordered=True)
)
expected = DataFrame(
{
"tiny": {"A": 0, "C": 0},
"small": {"A": 0, "C": 1},
"medium": {"A": 1, "C": 1},
"large": {"A": 3, "C": 2},
}
)
expected = expected.rename_axis("bar", axis="index")
expected.columns = CategoricalIndex(
["tiny", "small", "medium", "large"],
categories=["tiny", "small", "medium", "large"],
ordered=True,
name="foo",
dtype="category",
)
result = df.groupby(["bar", "foo"]).size().unstack()
tm.assert_frame_equal(result, expected)
def test_agg_cython_category_not_implemented_fallback():
# https://github.com/pandas-dev/pandas/issues/31450
df = DataFrame({"col_num": [1, 1, 2, 3]})
df["col_cat"] = df["col_num"].astype("category")
result = df.groupby("col_num").col_cat.first()
expected = Series([1, 2, 3], index=Index([1, 2, 3], name="col_num"), name="col_cat")
tm.assert_series_equal(result, expected)
result = df.groupby("col_num").agg({"col_cat": "first"})
expected = expected.to_frame()
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("func", ["min", "max"])
def test_aggregate_categorical_lost_index(func: str):
# GH: 28641 groupby drops index, when grouping over categorical column with min/max
ds = Series(["b"], dtype="category").cat.as_ordered()
df = DataFrame({"A": [1997], "B": ds})
result = df.groupby("A").agg({"B": func})
expected = DataFrame({"B": ["b"]}, index=Index([1997], name="A"))
tm.assert_frame_equal(result, expected)
def test_aggregate_categorical_with_isnan():
# GH 29837
df = DataFrame(
{
"A": [1, 1, 1, 1],
"B": [1, 2, 1, 2],
"numerical_col": [0.1, 0.2, np.nan, 0.3],
"object_col": ["foo", "bar", "foo", "fee"],
"categorical_col": ["foo", "bar", "foo", "fee"],
}
)
df = df.astype({"categorical_col": "category"})
result = df.groupby(["A", "B"]).agg(lambda df: df.isna().sum())
index = pd.MultiIndex.from_arrays([[1, 1], [1, 2]], names=("A", "B"))
expected = DataFrame(
data={
"numerical_col": [1.0, 0.0],
"object_col": [0, 0],
"categorical_col": [0, 0],
},
index=index,
)
tm.assert_frame_equal(result, expected)
def test_categorical_transform():
# GH 29037
df = DataFrame(
{
"package_id": [1, 1, 1, 2, 2, 3],
"status": [
"Waiting",
"OnTheWay",
"Delivered",
"Waiting",
"OnTheWay",
"Waiting",
],
}
)
delivery_status_type = pd.CategoricalDtype(
categories=["Waiting", "OnTheWay", "Delivered"], ordered=True
)
df["status"] = df["status"].astype(delivery_status_type)
df["last_status"] = df.groupby("package_id")["status"].transform(max)
result = df.copy()
expected = DataFrame(
{
"package_id": [1, 1, 1, 2, 2, 3],
"status": [
"Waiting",
"OnTheWay",
"Delivered",
"Waiting",
"OnTheWay",
"Waiting",
],
"last_status": [
"Delivered",
"Delivered",
"Delivered",
"OnTheWay",
"OnTheWay",
"Waiting",
],
}
)
expected["status"] = expected["status"].astype(delivery_status_type)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("func", ["first", "last"])
def test_series_groupby_first_on_categorical_col_grouped_on_2_categoricals(
func: str, observed: bool
):
# GH 34951
cat = | Categorical([0, 0, 1, 1]) | pandas.Categorical |
# =============================================================================
# Created By : <NAME>
# Created Date: 2021-09
# =============================================================================
"""Module containing plotting functions.
"""
# =============================================================================
# Import packages
# =============================================================================
import json
import pandas as pd
import numpy as np
from scipy import stats
from itertools import combinations
import plotly.express as px
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import plotly.io as pio
# =============================================================================
# Global parameters
# =============================================================================
colors = px.colors.qualitative.Plotly
pio.templates.default = "simple_white"
# Load variant color dictionary
with open('../Resources/var_dict.json') as json_file:
var_dict = json.load(json_file)
# =============================================================================
# Plotting functions for participants
# =============================================================================
def profile(self, germline=True):
"""Plot all variant trajectories in a participant class object.
Parameters:
- germilne: Boolean. Include trajectories with
attribute germline = True.
Return: plotly graphical object.
"""
# Initialize figure
fig = go.Figure()
# If germline is True plot all all trajectories
if germline is True:
for traj in self.trajectories:
fig.add_trace(go.Scatter(x=traj.data.age, y=traj.data.AF,
mode='lines+markers',
name=traj.mutation))
# If germline is False plot trajectories with germline attribute==False
else:
for traj in self.trajectories:
if traj.germline is False:
fig.add_trace(go.Scatter(x=traj.data.age, y=traj.data.AF,
mode='lines+markers',
name=traj.mutation))
# Update figure layout
fig.update_layout(title=f'Trajectories of participant {self.id}',
xaxis_title='Age (in years)',
yaxis_title='VAF')
return fig
def plot_id(cohort, participant_id, germline=False):
"""Given a participant's id of a cohort, plot all its variant trajectories.
Parameters:
- cohort: list of participant class objects. Cohort where we search
for the participant.
- participant_id: string. Participant's id.
- germilne: Boolean. Include trajectories with
attribute germline = True.
Return: plotly graphical object.
"""
# Plot trajectories by participant_id
for part in cohort:
if part.id == participant_id:
fig = part.profile(germline=germline)
return fig
def synonymous_profile(part_lbc, syn):
fig = go.Figure()
# Plot non-synonymous trajectory for legend
traj = part_lbc.trajectories[0]
fig.add_trace(
go.Scatter(x=traj.data.age,
y=traj.data.AF,
marker_color=colors[0],
opacity=0.3,
name='Non-Synonymous variant',
legendgroup='Non-synonymous variant'))
# Plot all non-synonymous trajectories
for traj in part_lbc.trajectories:
fig.add_trace(
go.Scatter(x=traj.data.age,
y=traj.data.AF,
marker_color=colors[0],
opacity=0.3,
showlegend=False,
legendgroup='Non-synonymous variant'))
# Find synonymous trajectories of participant
for part in syn:
if part.id == part_lbc.id:
# Plot synonymous trajectory for legend
traj = part.trajectories[0]
fig.add_trace(
go.Scatter(x=traj.data.age,
y=traj.data.AF,
marker_color='Orange',
name='Synonymous variant',
legendgroup='Synonymous variant'))
# Plot all synonymous trajectories
for traj in part.trajectories:
fig.add_trace(
go.Scatter(x=traj.data.age,
y=traj.data.AF,
marker_color='Orange',
showlegend=False,
legendgroup='Synonymous variant'))
fig.update_layout(title='Synonymous mutations',
template='plotly_white',
legend=dict(
y=0.95,
x=0.1,
))
return fig
# =============================================================================
# Plotting functions for longitudinal trajectories
# =============================================================================
def mutation(cohort, mutation):
# plot all trajectories with a mutations
fig = go.Figure()
for part in cohort:
for i, word in enumerate(part.mutation_list):
if mutation in word.split():
traj = part.trajectories[i]
fig.add_trace(go.Scatter(x=traj.data.age,
y=traj.data.AF,
mode='lines+markers',
name=traj.mutation,
hovertemplate=f"{part.id}"
))
# Edit the layout
fig.update_layout(title=f'Trajectories containing mutation {mutation}',
xaxis_title='Time (years since first age)',
yaxis_title='VAF')
return fig
def gene_plot(df, gene):
# filter by gene
data_gene = df[df['PreferredSymbol'] == gene]
# Create figure
fig = go.Figure()
# create list of all keys
participants = data_gene.participant_id.unique()
for part in participants:
data_part = data_gene[data_gene['participant_id'] == part]
keys = data_part.key.unique()
for key in keys:
data = data_part[data_part['key'] == key]
v_type = data['Variant_Classification'].unique()[0]
if gene == 'TET2' and max(data.AF) > 0.4:
continue
fig.add_trace(
go.Scatter(x=data['wave'], y=data['AF'],
marker_size=10,
marker_color=var_dict[v_type], showlegend=False))
fig.update_layout(template='simple_white')
fig.update_yaxes(title='VAF',
linewidth=2,
dtick=0.1)
fig.update_layout(title=gene,
xaxis=dict(linewidth=2,
tickmode='array',
tickvals=[1, 2, 3, 4, 5],
ticktext=['1 <br>~70 years<br>~79 years',
'2 <br>~73 years<br>~82 years',
'3 <br>~76 years<br>~85 years',
'4 <br>~79 years<br>~88 years',
'5 <br>~82 years<br>~91 years']))
return fig
def participant_model_plot(model_list, id):
""" Returns scatter plot of data and model predictions
of fit trajectories in a participant"""
part = []
for traj in model_list:
if traj.id == id:
part.append(traj)
if len(part) == 0:
return go.Figure()
# Extract min and max time
min_time = []
max_time = []
for traj in part:
min_time.append(min(list(traj.data_vaf.keys())))
max_time.append(max(list(traj.data_vaf.keys())))
min_time = min(min_time)
max_time = min(max_time)
fig = go.Figure()
for i, traj in enumerate(part):
x = list(traj.data_vaf.keys())
y = list(traj.data_vaf.values())
fig.add_trace(
go.Scatter(x=x, y=y,
mode='markers',
marker_color=var_dict[traj.variant_class],
name=traj.mutation))
# x_prediction = list(traj.vaf_plot.keys())
# y_prediction = list(traj.vaf_plot.values())
x_prediction = [time
for time in list(traj.vaf_plot.keys())
if min_time - 3 < time < max_time + 3]
y_prediction = [traj.vaf_plot[time]
for time in list(traj.vaf_plot.keys())
if min_time - 3 < time < max_time + 3]
fig.add_trace(
go.Scatter(x=x_prediction,
y=y_prediction, mode='lines',
marker_color=var_dict[traj.variant_class],
text=(f'id: {traj.id}<br>'
f'fitness: {round(traj.fitness,3)}<br>'
f'origin: {round(traj.origin,3)}<br>'
f'r2: {round(traj.r2,3)}'),
name=traj.mutation))
fig.update_layout(
title=(f'Trajectory fit of participant {part[0].id} <br>'
f'aic: {int(traj.fit.aic)}'),
xaxis_title='Age (in years)',
yaxis_title='VAF')
# fig.update_xaxes(range=[min_time - 3, max_time + 3])
return fig
# =============================================================================
# Plotting functions for cohort statistics
# =============================================================================
def top_bar(cohort, n_genes=10, all=False):
gene_list = []
for part in cohort:
for traj in part.trajectories:
gene_list.append(traj.mutation.split()[0])
gene_dict = {element: 0 for element in set(gene_list)}
for part in cohort:
for traj in part.trajectories:
gene_dict[traj.mutation.split()[0]] = gene_dict[traj.mutation.split()[0]] + 1
gene_dict = dict(sorted(gene_dict.items(),
key=lambda item: item[1], reverse=True))
if all is False:
# Filter top mutations
top_genes = list(gene_dict.keys())[0:n_genes]
gene_dict = {gene: gene_dict[gene] for gene in top_genes}
# Bar plot
fig = go.Figure([go.Bar(x=list(gene_dict.keys()),
y=list(gene_dict.values()))])
return fig
def gradients(cohort, mutations):
# violin plots of gradients by mutation
data = pd.DataFrame(columns=['gradient', 'participant', 'mutation'])
for part in cohort:
for traj in part.trajectories:
if traj.mutation.split()[0] in mutations:
data = data.append({'gradient': traj.gradient,
'participant': part.id,
'mutation': traj.mutation.split()[0]},
ignore_index=True)
# violin plot of data
fig = px.box(data, y="gradient", x='mutation',
color='mutation', points='all',
hover_name="participant", hover_data=["mutation"])
fig.update_layout(title='Trajectory gradients by mutation')
return fig
def gene_box(cohort, order='median', percentage=False):
"""Box plot with counts of filtered mutations by gene.
percentage computes fitness as the increase with respect to
the self-renewing replication rate lambda=1.3.
Color allows you to use a dictionary of colors by gene.
Returns a figure."""
# Load gene color dictionary
with open('../Resources/gene_color_dict.json') as json_file:
color_dict = json.load(json_file)
# Create a dictionary with all filtered genes
gene_list = []
for traj in cohort:
gene_list.append(traj.gene)
gene_dict = {element: [] for element in set(gene_list)}
# update the counts for each gene
if percentage is False:
y_label = 'Fitness'
for traj in cohort:
fitness = traj.fitness
gene_dict[traj.gene].append(fitness)
if percentage is True:
y_label = 'fitness_percentage'
for traj in cohort:
fitness = traj.fitness_percentage
gene_dict[traj.gene].append(fitness)
# sort dictionary in descending order
if order == 'mean':
gene_dict = dict(sorted(gene_dict.items(),
key=lambda item: np.mean(item[1]),
reverse=True))
if order == 'median':
gene_dict = dict(sorted(gene_dict.items(),
key=lambda item: np.median(item[1]),
reverse=True))
if order == 'max':
gene_dict = dict(sorted(gene_dict.items(),
key=lambda item: np.max(item[1]),
reverse=True))
# Bar plot
fig = go.Figure()
# color_dict = dict()
# if isinstance(color, dict):
# color_dict = color
for i, key in enumerate(gene_dict):
fig.add_trace(
go.Box(y=gene_dict[key],
marker_color=color_dict[key],
name=key, boxpoints='all', showlegend=False))
fig.update_layout(title='Gene distribution of filtered mutations',
yaxis_title=y_label,
template="simple_white")
fig.update_xaxes(linewidth=2)
fig.update_yaxes(linewidth=2)
if percentage is False:
fig.update_yaxes(type='log', tickvals=[0.05, 0.1, 0.2, 0.4])
fig.update_layout(xaxis_tickangle=-45)
return fig, gene_dict
def gene_statistic(gene_dict, statistic='kruskal-wallis', filter=True):
""" compute a statistical test to find significant differences in the
distribution of fitness by gene.
statistic parameter accepts: 'kruskal' or 'anova'.
Returns:
* heatmap with significant statistical differences.
* dataframe."""
# Check if statistic is allowed
if statistic not in ['kruskal-wallis', 'anova']:
return 'Statistic not recognised.'
# extract all possible gene combinations
gene_list = []
for gene in gene_dict.keys():
if len(gene_dict[gene]) > 2:
gene_list.append(gene)
# Create dataframe to store statistics
test_df = pd.DataFrame(index=gene_list, columns=gene_list)
for gene1, gene2 in combinations(gene_list, 2):
# compute statistic for each possible comination of genes
if statistic == 'kruskal-wallis':
stat, pvalue = stats.kruskal(gene_dict[gene1], gene_dict[gene2])
if statistic == 'anova':
stat, pvalue = stats.f_oneway(gene_dict[gene1], gene_dict[gene2])
# if statistic is significant store value in dataframe
if pvalue < 0.05:
test_df.loc[gene1, gene2] = stat
# Clean dataset from nan
if filter is True:
test_df = test_df.dropna(how='all', axis=1)
test_df = test_df.dropna(how='all', axis=0)
test_df = test_df.reindex(index=test_df.index[::-1])
y = test_df.index
x = test_df.columns
fig = go.Figure(data=go.Heatmap(
z=np.array(test_df),
x=x,
y=y,
colorscale='Cividis',
colorbar=dict(title=f'{statistic} score')))
fig.update_xaxes(side="top", mirror=True)
fig.update_yaxes(side='top', mirror=True)
fig.update_layout(template='simple_white')
return fig, test_df
# =============================================================================
# Plotting functions for protein damage prediction
# =============================================================================
def damage_class(model):
""" Box plot of variant predicted protein damage class ~ Fitness.
Parameters:
model: List. List of all fitted trajectories
Returns:
* Box plot
* Modified model with damage_class attribute."""
# Set default color
default_color = 'Grey'
# Load dataset with prediction damage
xls = | pd.ExcelFile('../Datasets/variants_damage.xlsx') | pandas.ExcelFile |
""" test feather-format compat """
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.io.feather_format import read_feather, to_feather # isort:skip
pyarrow = pytest.importorskip("pyarrow", minversion="1.0.1")
filter_sparse = pytest.mark.filterwarnings("ignore:The Sparse")
@filter_sparse
@pytest.mark.single_cpu
@pytest.mark.filterwarnings("ignore:CategoricalBlock is deprecated:DeprecationWarning")
class TestFeather:
def check_error_on_write(self, df, exc, err_msg):
# check that we are raising the exception
# on writing
with pytest.raises(exc, match=err_msg):
with tm.ensure_clean() as path:
to_feather(df, path)
def check_external_error_on_write(self, df):
# check that we are raising the exception
# on writing
with tm.external_error_raised(Exception):
with tm.ensure_clean() as path:
to_feather(df, path)
def check_round_trip(self, df, expected=None, write_kwargs={}, **read_kwargs):
if expected is None:
expected = df
with tm.ensure_clean() as path:
to_feather(df, path, **write_kwargs)
result = read_feather(path, **read_kwargs)
tm.assert_frame_equal(result, expected)
def test_error(self):
msg = "feather only support IO with DataFrames"
for obj in [
pd.Series([1, 2, 3]),
1,
"foo",
pd.Timestamp("20130101"),
np.array([1, 2, 3]),
]:
self.check_error_on_write(obj, ValueError, msg)
def test_basic(self):
df = pd.DataFrame(
{
"string": list("abc"),
"int": list(range(1, 4)),
"uint": np.arange(3, 6).astype("u1"),
"float": np.arange(4.0, 7.0, dtype="float64"),
"float_with_null": [1.0, np.nan, 3],
"bool": [True, False, True],
"bool_with_null": [True, np.nan, False],
"cat": pd.Categorical(list("abc")),
"dt": pd.DatetimeIndex(
list(pd.date_range("20130101", periods=3)), freq=None
),
"dttz": pd.DatetimeIndex(
list(pd.date_range("20130101", periods=3, tz="US/Eastern")),
freq=None,
),
"dt_with_null": [
pd.Timestamp("20130101"),
pd.NaT,
pd.Timestamp("20130103"),
],
"dtns": pd.DatetimeIndex(
list(pd.date_range("20130101", periods=3, freq="ns")), freq=None
),
}
)
df["periods"] = pd.period_range("2013", freq="M", periods=3)
df["timedeltas"] = pd.timedelta_range("1 day", periods=3)
df["intervals"] = pd.interval_range(0, 3, 3)
assert df.dttz.dtype.tz.zone == "US/Eastern"
self.check_round_trip(df)
def test_duplicate_columns(self):
# https://github.com/wesm/feather/issues/53
# not currently able to handle duplicate columns
df = pd.DataFrame(np.arange(12).reshape(4, 3), columns=list("aaa")).copy()
self.check_external_error_on_write(df)
def test_stringify_columns(self):
df = pd.DataFrame(np.arange(12).reshape(4, 3)).copy()
msg = "feather must have string column names"
self.check_error_on_write(df, ValueError, msg)
def test_read_columns(self):
# GH 24025
df = pd.DataFrame(
{
"col1": list("abc"),
"col2": list(range(1, 4)),
"col3": list("xyz"),
"col4": list(range(4, 7)),
}
)
columns = ["col1", "col3"]
self.check_round_trip(df, expected=df[columns], columns=columns)
def read_columns_different_order(self):
# GH 33878
df = pd.DataFrame({"A": [1, 2], "B": ["x", "y"], "C": [True, False]})
self.check_round_trip(df, columns=["B", "A"])
def test_unsupported_other(self):
# mixed python objects
df = pd.DataFrame({"a": ["a", 1, 2.0]})
self.check_external_error_on_write(df)
def test_rw_use_threads(self):
df = pd.DataFrame({"A": np.arange(100000)})
self.check_round_trip(df, use_threads=True)
self.check_round_trip(df, use_threads=False)
def test_write_with_index(self):
df = pd.DataFrame({"A": [1, 2, 3]})
self.check_round_trip(df)
msg = (
r"feather does not support serializing .* for the index; "
r"you can \.reset_index\(\) to make the index into column\(s\)"
)
# non-default index
for index in [
[2, 3, 4],
pd.date_range("20130101", periods=3),
list("abc"),
[1, 3, 4],
pd.MultiIndex.from_tuples([("a", 1), ("a", 2), ("b", 1)]),
]:
df.index = index
self.check_error_on_write(df, ValueError, msg)
# index with meta-data
df.index = [0, 1, 2]
df.index.name = "foo"
msg = "feather does not serialize index meta-data on a default index"
self.check_error_on_write(df, ValueError, msg)
# column multi-index
df.index = [0, 1, 2]
df.columns = pd.MultiIndex.from_tuples([("a", 1)])
msg = "feather must have string column names"
self.check_error_on_write(df, ValueError, msg)
def test_path_pathlib(self):
df = tm.makeDataFrame().reset_index()
result = tm.round_trip_pathlib(df.to_feather, read_feather)
tm.assert_frame_equal(df, result)
def test_path_localpath(self):
df = | tm.makeDataFrame() | pandas._testing.makeDataFrame |
import gc
import numpy as np
import pandas as pd
import tables
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from icu_benchmarks.common import constants
def gather_cat_values(common_path, cat_values):
# not too many, so read all of them
df_cat = pd.read_parquet(common_path, columns=list(cat_values))
d = {}
for c in df_cat.columns:
d[c] = [x for x in df_cat[c].unique() if not np.isnan(x)]
return d
def gather_stats_over_dataset(parts, to_standard_scale, to_min_max_scale, train_split_pids, fill_string):
minmax_scaler = MinMaxScaler()
for p in parts:
df_part = impute_df(pd.read_parquet(p, engine='pyarrow', columns=to_min_max_scale + [constants.PID],
filters=[(constants.PID, "in", train_split_pids)]), fill_string=fill_string)
df_part = df_part.replace(np.inf, np.nan).replace(-np.inf, np.nan)
minmax_scaler.partial_fit(df_part[to_min_max_scale])
gc.collect()
means = []
stds = []
# cannot read all to_standard_scale columns in memory, one-by-one would very slow, so read a certain number
# of columns at a time
batch_size = 20
batches = (to_standard_scale[pos:pos + batch_size] for pos in range(0, len(to_standard_scale), batch_size))
for s in batches:
dfs = impute_df(pd.read_parquet(parts[0].parent, engine='pyarrow', columns=[constants.PID] + s,
filters=[(constants.PID, "in", train_split_pids)]),
fill_string=fill_string)
dfs = dfs.replace(np.inf, np.nan).replace(-np.inf, np.nan)
# don't rely on sklearn StandardScaler as partial_fit does not seem to work correctly if in one iteration all values
# of a column are nan (i.e. the then mean becomes nan)
means.extend(dfs[s].mean())
stds.extend(dfs[s].std(ddof=0)) # ddof=0 to be consistent with sklearn StandardScalar
gc.collect()
return (means, stds), minmax_scaler
def to_ml(save_path, parts, labels, features, endpoint_names, df_var_ref, fill_string, split_path=None, random_seed=42):
df_part = | pd.read_parquet(parts[0]) | pandas.read_parquet |
import os
import json
import numpy as np
try:
import requests
except ImportError:
requests = None
import pandas as pd
from pmagpy import find_pmag_dir
from pmag_env import set_env
DM = []
CRIT_MAP = []
class DataModel():
"""
Contains the MagIC data model and validation information.
self.dm is a dictionary of DataFrames for each table.
self.crit_map is a DataFrame with all of the columns validations.
"""
def __init__(self, offline=False):
global DM, CRIT_MAP
self.offline = offline
if not len(DM):
self.dm, self.crit_map = self.get_data_model()
DM = self.dm
CRIT_MAP = self.crit_map
else:
self.dm = DM
self.crit_map = CRIT_MAP
def get_data_model(self):
"""
Try to download the data model from Earthref.
If that fails, grab the cached data model.
"""
if len(DM):
self.dm = DM
self.crit_map = CRIT_MAP
return
if not set_env.OFFLINE:
dm = self.get_dm_online()
if dm:
print('-I- Using online data model')
#self.cache_data_model(dm)
return self.parse_response(dm)
# if online is not available, get cached dm
dm = self.get_dm_offline()
print('-I- Using cached data model')
return self.parse_cache(dm)
def get_dm_offline(self):
"""
Grab the 3.0 data model from the PmagPy/pmagpy directory
Returns
---------
full : DataFrame
cached data model json in DataFrame format
"""
model_file = self.find_cached_dm()
try:
f = open(model_file, 'r', encoding='utf-8-sig')
except TypeError:
f = open(model_file, 'r')
string = '\n'.join(f.readlines())
f.close()
raw = json.loads(string)
full = pd.DataFrame(raw)
return full
def get_dm_online(self):
"""
Use requests module to get data model from Earthref.
If this fails or times out, return false.
Returns
---------
result : requests.models.Response, False if unsuccessful
"""
if not requests:
return False
try:
req = requests.get("https://earthref.org/MagIC/data-models/3.0.json", timeout=3)
if not req.ok:
return False
return req
except (requests.exceptions.ConnectTimeout, requests.exceptions.ConnectionError,
requests.exceptions.ReadTimeout):
return False
def parse_cache(self, full_df):
"""
Format the cached data model into a dictionary of DataFrames
and a criteria map DataFrame.
Parameters
----------
full_df : DataFrame
result of self.get_dm_offline()
Returns
----------
data_model : dictionary of DataFrames
crit_map : DataFrame
"""
data_model = {}
levels = ['specimens', 'samples', 'sites', 'locations',
'ages', 'measurements', 'criteria', 'contribution',
'images']
criteria_map = pd.DataFrame(full_df['criteria_map'])
for level in levels:
df = | pd.DataFrame(full_df['tables'][level]['columns']) | pandas.DataFrame |
import folium
import geopandas as gpd
import pandas as pd
import streamlit as st
from matplotlib import pyplot as plt
from streamlit_folium import folium_static
# Caching allows to store return variables in memory
# Saving execution time for time costly operations
@st.cache
def load_files():
"""
Loads necessary files and transforms them (in case they already shouldn't be in) to
CRS::EPSG:4326
"""
europe = pd.read_pickle("../data/europe_attacks.p").to_crs("EPSG:4326")
bl = pd.read_pickle("../data/bl_attacks.p").to_crs("EPSG:4326")
kreise = pd.read_pickle("../data/kreise_attacks.p").to_crs("EPSG:4326")
kreise_full = pd.read_pickle("../data/kreise_full.p").to_crs("EPSG:4326")
gdf_europe = | pd.read_pickle("../data/gdf_europe.p") | pandas.read_pickle |
import glob
import os
import pandas as pd
import pytz
from dateutil import parser, tz
from matplotlib import pyplot as plt
fp = "C:\\Users\\Robert\\Documents\\Uni\\SOLARNET\\HomogenizationCampaign\\catania\\"
df = [ | pd.read_csv(file, delimiter=" ", names=["file", "date", "time", "tz"]) | pandas.read_csv |
import os
import numpy as np
import pandas as pd
import boto3
import yaml
import utils
from scipy import signal
# import config
with open("02_munge/params_config_munge_noaa_nos.yaml", 'r') as stream:
config = yaml.safe_load(stream)
# check where to read data inputs from
read_location = config['read_location']
# set up write location data outputs
write_location = config['write_location']
s3_client = utils.prep_write_location(write_location, config['aws_profile'])
s3_bucket = config['s3_bucket']
def get_datafile_list(station_id, read_location, s3_client=None, s3_bucket=None):
raw_datafiles = {}
if read_location=='S3':
raw_datafiles = [obj['Key'] for obj in s3_client.list_objects_v2(Bucket=s3_bucket, Prefix=f'01_fetch/out/noaa_nos_{station_id}')['Contents']]
elif read_location=='local':
prefix = os.path.join('01_fetch', 'out')
file_prefix=f'noaa_nos_{station_id}'
raw_datafiles = [os.path.join(prefix, f) for f in os.listdir(prefix) if f.startswith(file_prefix)]
return raw_datafiles
def read_data(raw_datafile):
if read_location == 'local':
print(f'reading data from local: {raw_datafile}')
# read in raw data as pandas df
df = pd.read_csv(raw_datafile)
elif read_location == 'S3':
print(f'reading data from s3: {raw_datafile}')
obj = s3_client.get_object(Bucket=s3_bucket, Key=raw_datafile)
# read in raw data as pandas df
df = pd.read_csv(obj.get("Body"))
return df
def fill_gaps(x):
'''fills any data gaps in the middle of the input series
using linear interpolation; returns gap-filled time series
'''
#find nans
bd = np.isnan(x)
#early exit if there are no nans
if not bd.any():
return x
#find nonnans index numbers
gd = np.flatnonzero(~bd)
#ignore leading and trailing nans
bd[:gd.min()]=False
bd[(gd.max()+1):]=False
#interpolate nans
x[bd] = np.interp(np.flatnonzero(bd),gd,x[gd])
return x
def butterworth_filter(df, butterworth_filter_params):
#parameter for butterworth filter
# filter order
order_butter = butterworth_filter_params['order_butter']
# cutoff frequency
fc= butterworth_filter_params['fc']
# sample interval
fs = butterworth_filter_params['fs']
# filter accepts 1d array
prod = butterworth_filter_params['product']
# get only product of interest
x = df[prod]
# apply butterworth filter
b,a = signal.butter(order_butter, fc, 'low', fs=fs, output='ba')
x_signal = signal.filtfilt(b,a,x[x.notnull()])
df.loc[x.notnull(), prod+'_filtered'] = x_signal
return df
def process_data_to_csv(site, site_raw_datafiles, qa_to_drop, flags_to_drop_by_var, agg_level, prop_obs_required, butterworth_filter_params):
'''
process raw data text files into clean csvs, including:
dropping unwanted flags
converting datetime column to datetime format
converting all data columns to numeric type
removing metadata columns so that only datetime and data columns remain
'''
print(f'processing and saving locally')
combined_df = | pd.DataFrame(columns=['datetime']) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Created by <NAME>
from typing import Dict, Optional
from cached_property import cached_property
import pandas as pd
from skbio import TabularMSA, DNA, Sequence
from allfreqs.classes import Reference, MultiAlignment
from allfreqs.constants import AMBIGUOUS_COLS, STANDARD_COLS
class AlleleFreqs:
"""Class used to calculate allele frequencies from a multialignment.
Input can be either a fasta or csv file with multialigned sequences,
which may or may not contain the reference sequence in the first
position. In the latter case, an additional reference sequence file
is needed, either in fasta or csv format.
"""
def __init__(self,
multialg: MultiAlignment,
reference: Reference,
ambiguous: bool = False):
self.multialg = multialg
self.reference = reference
self.ambiguous = ambiguous
if len(self.multialg.tabmsa.sequence[0]) != len(self.reference):
raise ValueError("Reference and aligned sequences must have "
"the same length.")
@classmethod
def from_fasta(cls,
sequences: str,
reference: Optional[str] = None,
ambiguous: bool = False):
"""Read a multialignment from a fasta file.
If `reference` is not provided, it is assumed that the first
sequence of the multialignment is the reference sequence.
Otherwise, an additional fasta file with the reference sequence
is needed.
Args:
sequences: input fasta file with multialignment
reference: optional fasta file with reference sequence
ambiguous: show frequencies for ambiguous nucleotides too
[default: False]
"""
msa = TabularMSA.read(sequences, constructor=DNA)
if not reference:
refer = msa[0]
multialg = {seq.metadata.get("id"): str(seq) for seq in msa[1:]}
else:
refer = Sequence.read(reference)
multialg = {seq.metadata.get("id"): str(seq) for seq in msa}
ref = Reference(refer)
alg = MultiAlignment(multialg)
return cls(multialg=alg, reference=ref, ambiguous=ambiguous)
@classmethod
def from_csv(cls,
sequences: str,
reference: Optional[str] = None,
ambiguous: bool = False,
**kwargs):
"""Read a multialignment from a csv file.
If `reference` is not provided, it is assumed that the first
sequence of the multialignment is the reference sequence.
Otherwise, an additional csv file with the reference sequence is
needed. In both cases, the input csv file must be composed of
two columns only, one for sequences ids and the other for the
actual sequences; if not, you can provide additional options for
pandas to restrict the number of columns read.
Args:
sequences: input csv file with multialignment
reference: optional csv file with reference sequence
ambiguous: show frequencies for ambiguous nucleotides too
[default: False]
**kwargs: additional options for pandas.read_csv()
"""
msa = pd.read_csv(sequences, **kwargs)
if msa.shape[1] != 2:
raise ValueError("Please make sure the input only contains two "
"columns.")
if not reference:
refer = msa.iloc[0, 1]
msa = msa.iloc[1:, :]
multialg = dict(zip(msa.iloc[:, 0], msa.iloc[:, 1]))
else:
refer = | pd.read_csv(reference, **kwargs) | pandas.read_csv |
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import sqlite3 as sqlite
import pandas as pd
from scipy import stats
import pylab
from sklearn.neighbors import KernelDensity
from scipy.stats import mode
import json
from json2html import *
from scipy.stats import norm
from sklearn.preprocessing import StandardScaler
import warnings
warnings.filterwarnings('ignore')
def ImportData():
conn = sqlite.connect("/Users/selinerguncu/Desktop/PythonProjects/Fun Projects/Yelp/data/yelpCleanDB.sqlite")
data = pd.read_sql_query("SELECT * FROM DataCompetitionCityAdded", conn)
# for index, row in data.iterrows():
# if row['city'] == 'San Francisco - Downtown' or row['city'] == 'San Francisco - Outer':
# # print(row)
# del row
print(len(data))
data = data[data['category'] != 'food']
data = data[data['category'] != 'nightlife']
data = data[data['city'] != 'San Francisco - Downtown']
data = data[data['city'] != 'San Francisco - Outer']
print(len(data))
return data
def Stats(data):
pd.options.display.float_format = '{:,.2f}'.format #display a pandas dataframe with a given format
data = data[['rating', 'review_count', 'category', 'query_price',
'closest2Distance','closest2Price', 'closest2Rating',
'closest5Distance', 'closest5Price','closest5Rating',
'closest10Distance', 'closest10Price','closest10Rating',
'closest15Distance', 'closest15Price','closest15Rating']]
# data.columns =
dataByCategory = data.groupby('category')
dataPivotTable = | pd.DataFrame(columns=['category', 'variable', 'Stats', 'value']) | pandas.DataFrame |
import unittest
import numpy
import pandas
from helpers.general import add_temporal_noise
from ..features import TimeRange, get_event_series, add_roll, roll
from .. import general
class TimeShiftTests(unittest.TestCase):
@staticmethod
def _get_data(n=100):
X = numpy.asarray(range(n))
return numpy.vstack([X, X ** 2, X ** 3]).transpose()
def test_errors(self):
X = self._get_data()
self.assertRaises(ValueError, general.prepare_time_matrix, X, 0)
self.assertRaises(ValueError, general.prepare_time_matrix, X, -4)
def test_identity(self):
X = self._get_data()
X_1 = general.prepare_time_matrix(X, 1)
self.assertEqual(X.shape[0], X_1.shape[0])
self.assertEqual(X.shape[1], X_1.shape[2])
self.assertEqual(1, X_1.shape[1])
self.assertTrue(numpy.array_equal(X, X_1.reshape(X.shape)))
def test_simple(self):
X = self._get_data(10)
# basic tests - each row is x, x**2, x**3
self.assertEqual(X[0, 1], 0)
self.assertEqual(X[5, 1], 25)
self.assertEqual(X[5, 2], 125)
X_time = general.prepare_time_matrix(X, 5)
self.assertSequenceEqual((X.shape[0], 5, X.shape[1]), X_time.shape)
# the last index is the current value
self.assertEqual(X_time[0, -1, 1], 0)
self.assertEqual(X_time[5, -1, 1], 25)
self.assertEqual(X_time[5, -1, 2], 125)
# test shifted into past 1 step
self.assertEqual(X_time[5, -2, 0], 4)
self.assertEqual(X_time[5, -2, 1], 16)
self.assertEqual(X_time[5, -2, 2], 64)
self.assertEqual(X_time[5, -5, 0], 1)
self.assertEqual(X_time[5, -5, 1], 1)
self.assertEqual(X_time[5, -5, 2], 1)
self.assertEqual(X_time[9, -5, 0], 5)
self.assertEqual(X_time[9, -5, 1], 25)
self.assertEqual(X_time[9, -5, 2], 125)
# by default it wraps around
self.assertEqual(X_time[0, -2, 0], 9)
self.assertEqual(X_time[0, -2, 1], 81)
self.assertEqual(X_time[0, -2, 2], 729)
def test_no_rotation(self):
X = self._get_data(10)
X_time = general.prepare_time_matrix(X, 5, fill_value=-1)
self.assertEqual(X_time[5, -5, 0], 1)
self.assertEqual(X_time[5, -5, 1], 1)
self.assertEqual(X_time[5, -5, 2], 1)
self.assertEqual(X_time[0, -2, 0], -1)
self.assertEqual(X_time[0, -2, 1], -1)
self.assertEqual(X_time[0, -2, 2], -1)
# just check the squares cause the fill val is negative
self.assertEqual(X_time[2, -2, 1], 1)
self.assertEqual(X_time[2, -3, 1], 0)
self.assertEqual(X_time[2, -4, 1], -1)
self.assertEqual(X_time[2, -5, 1], -1)
def test_temporal_noise(self):
ones = numpy.ones((10, 10))
self.assertAlmostEqual(0, (ones - add_temporal_noise(ones)).sum())
# random shouldn't be close
r = numpy.random.rand(10, 10)
self.assertNotAlmostEqual(0, (r - add_temporal_noise(r)).sum())
# try with identical random features
random_features = numpy.random.rand(1, 10)
ident_rows = numpy.repeat(random_features, 20, axis=0)
def_ident_rows = add_temporal_noise(ident_rows)
self.assertAlmostEqual(0, (ident_rows - def_ident_rows).sum())
# try with monotonic increasing
monotonic = numpy.repeat(numpy.asarray(range(1, 1001)).reshape((1000, 1)), 20, axis=1)
monotonic_deformed = add_temporal_noise(monotonic)
self.assertLessEqual((abs(monotonic - monotonic_deformed) / monotonic).mean(), .05)
class TimeRangeTests(unittest.TestCase):
def _make_time(self, start_time, duration_minutes=30):
duration = pandas.Timedelta(minutes=duration_minutes)
end_time = start_time + duration
return TimeRange(start_time, end_time)
def test_simple(self):
"""Test basic event-filling functionality"""
hourly_index = pandas.DatetimeIndex(freq="1H", start=pandas.datetime(year=2016, month=4, day=1), periods=1000)
dummy_events = [self._make_time(pandas.datetime(year=2016, month=4, day=1, hour=5, minute=50)), self._make_time(pandas.datetime(year=2016, month=4, day=1, hour=7, minute=20))]
indicatored = get_event_series(hourly_index, dummy_events)
self.assertEqual(1, indicatored.sum())
minute_index = pandas.DatetimeIndex(freq="1Min", start= | pandas.datetime(year=2016, month=4, day=1) | pandas.datetime |
from process_cuwb_data.uwb_extract_data import extract_by_data_type_and_format
from process_cuwb_data.uwb_motion_features import FeatureExtraction
import numpy as np
import pandas as pd
class TestUWBMotionFeatures:
@classmethod
def prep_test_cuwb_data(cls, cuwb_dataframe):
# Build dataframe with:
# 1 tray device that has both position and acceleration
# 1 person device that has both position and acceleration
# 1 person device that has only position
test_device_ids = []
has_tray_with_position_and_acceleration = None
has_person_with_position_and_acceleration = None
has_person_with_position_only = None
for device_id in pd.unique(cuwb_dataframe['device_id']):
device_position_filter = ((cuwb_dataframe['device_id'] == device_id) & (cuwb_dataframe['type'] == 'position'))
device_accelerometer_filter = ((cuwb_dataframe['device_id'] == device_id) & (cuwb_dataframe['type'] == 'accelerometer'))
if has_tray_with_position_and_acceleration is None:
if (len(cuwb_dataframe[device_position_filter]) > 0 and
len(cuwb_dataframe[device_accelerometer_filter]) > 0 and
cuwb_dataframe[device_position_filter]['entity_type'][0] == 'Tray'):
test_device_ids.append(device_id)
has_tray_with_position_and_acceleration = device_id
continue
if has_person_with_position_and_acceleration is None:
if (len(cuwb_dataframe[device_position_filter]) > 0 and
len(cuwb_dataframe[device_accelerometer_filter]) > 0 and
cuwb_dataframe[device_position_filter]['entity_type'][0] == 'Person'):
test_device_ids.append(device_id)
has_person_with_position_and_acceleration = device_id
continue
if has_person_with_position_only is None:
if (len(cuwb_dataframe[device_position_filter]) > 0 and
len(cuwb_dataframe[device_accelerometer_filter]) == 0 and
cuwb_dataframe[device_position_filter]['entity_type'][0] == 'Person'):
test_device_ids.append(device_id)
has_person_with_position_only = device_id
continue
assert has_tray_with_position_and_acceleration is not None, "Expected tray device with position and acceleration data"
assert has_person_with_position_and_acceleration is not None, "Expected person device with position and acceleration data"
assert has_person_with_position_only is not None, "Expected person device with position data only"
return cuwb_dataframe[cuwb_dataframe['device_id'].isin(test_device_ids)]
def test_extract_motion_features_handles_missing_accelerometer_data(self, cuwb_dataframe):
df_test_cuwb_data = TestUWBMotionFeatures.prep_test_cuwb_data(cuwb_dataframe)
f = FeatureExtraction()
df_motion_features = f.extract_motion_features_for_multiple_devices(
df_position=extract_by_data_type_and_format(df_test_cuwb_data, data_type='position'),
df_acceleration=extract_by_data_type_and_format(df_test_cuwb_data, data_type='accelerometer'),
entity_type='all'
)
count_unique_devices_original = len(pd.unique(df_test_cuwb_data['device_id']))
count_unique_devices_motion_data = len( | pd.unique(df_motion_features['device_id']) | pandas.unique |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 17 12:25:20 2018
@author: kazuki.onodera
cd Home-Credit-Default-Risk/py
python run.py 817_cv_LB804_Branden.py
"""
import gc, os
#from tqdm import tqdm
import pandas as pd
import numpy as np
import sys
sys.path.append(f'/home/{os.environ.get("USER")}/PythonLibrary')
import lgbextension as ex
import lightgbm as lgb
from multiprocessing import cpu_count
#from glob import glob
#import count
import utils, utils_best
utils.start(__file__)
#==============================================================================
SEED = np.random.randint(99999)
NFOLD = 7
param = {
'objective': 'binary',
'metric': 'auc',
'learning_rate': 0.01,
'max_depth': 6,
'num_leaves': 63,
'max_bin': 255,
'min_child_weight': 10,
'min_data_in_leaf': 150,
'reg_lambda': 0.5, # L2 regularization term on weights.
'reg_alpha': 0.5, # L1 regularization term on weights.
'colsample_bytree': 0.9,
'subsample': 0.9,
# 'nthread': 32,
'nthread': cpu_count(),
'bagging_freq': 1,
'verbose':-1,
'seed': SEED
}
loader = utils_best.Loader('LB804')
category_branden = ['Bra_papp_max_SK_ID_CURR_WEEKDAY_APPR_PROCESS_START_int',
'Bra_papp_min_SK_ID_CURR_PRODUCT_COMBINATION_int',
'Bra_WEEKDAY_APPR_PROCESS_START_int',
'Bra_papp_max_SK_ID_CURR_NAME_TYPE_SUITE_int',
'Bra_papp_min_SK_ID_CURR_NAME_GOODS_CATEGORY_int',
'Bra_papp_min_SK_ID_CURR_WEEKDAY_APPR_PROCESS_START_int',
'Bra_papp_min_SK_ID_CURR_NAME_SELLER_INDUSTRY_int',
'Bra_CODE_GENDER_int',
'Bra_papp_min_SK_ID_CURR_CODE_REJECT_REASON_int',
'Bra_papp_max_SK_ID_CURR_PRODUCT_COMBINATION_int',
'Bra_papp_min_SK_ID_CURR_NAME_CONTRACT_STATUS_int',
'Bra_NAME_FAMILY_STATUS_int',
'Bra_papp_max_SK_ID_CURR_NAME_GOODS_CATEGORY_int',
'Bra_OCCUPATION_TYPE_int',
'Bra_papp_max_SK_ID_CURR_NAME_SELLER_INDUSTRY_int',
'Bra_papp_max_SK_ID_CURR_CODE_REJECT_REASON_int',
'Bra_papp_min_SK_ID_CURR_NAME_YIELD_GROUP_int',
'Bra_papp_min_SK_ID_CURR_NAME_PRODUCT_TYPE_int',
'Bra_papp_max_SK_ID_CURR_CHANNEL_TYPE_int',
'Bra_WALLSMATERIAL_MODE_int',
'Bra_ORGANIZATION_TYPE_int',
'Bra_papp_min_SK_ID_CURR_NAME_CLIENT_TYPE_int',
'Bra_papp_min_SK_ID_CURR_NAME_CASH_LOAN_PURPOSE_int',
'Bra_papp_max_SK_ID_CURR_NAME_CONTRACT_STATUS_int',
'Bra_papp_min_SK_ID_CURR_NAME_PORTFOLIO_int',
'Bra_papp_max_SK_ID_CURR_NAME_YIELD_GROUP_int',
'Bra_papp_min_SK_ID_CURR_CHANNEL_TYPE_int',
'Bra_NAME_EDUCATION_TYPE_int',
'Bra_FONDKAPREMONT_MODE_int',
'Bra_papp_max_SK_ID_CURR_NAME_PRODUCT_TYPE_int',
'Bra_NAME_INCOME_TYPE_int',
'Bra_papp_min_SK_ID_CURR_NAME_CONTRACT_TYPE_int',
'Bra_papp_min_SK_ID_CURR_NAME_TYPE_SUITE_int',
'Bra_NAME_TYPE_SUITE_int',
'Bra_NAME_HOUSING_TYPE_int',
'Bra_papp_max_SK_ID_CURR_NAME_PAYMENT_TYPE_int',
'Bra_papp_max_SK_ID_CURR_NAME_CLIENT_TYPE_int',
'Bra_papp_min_SK_ID_CURR_NAME_PAYMENT_TYPE_int',
'Bra_papp_max_SK_ID_CURR_NAME_CONTRACT_TYPE_int',
'Bra_papp_max_SK_ID_CURR_NAME_CASH_LOAN_PURPOSE_int']
print('seed:', SEED)
# =============================================================================
# load
# =============================================================================
X = pd.concat([
loader.train(),
pd.read_feather('../feature_someone/branden/X_train.f')
], axis=1)
y = utils.read_pickles('../data/label').TARGET
if X.columns.duplicated().sum()>0:
raise Exception(f'duplicated!: { X.columns[X.columns.duplicated()] }')
print('no dup :) ')
print(f'X.shape {X.shape}')
gc.collect()
CAT = list( set(X.columns) & set(loader.category()) ) + category_branden
print('category:', CAT)
# =============================================================================
# cv
# =============================================================================
dtrain = lgb.Dataset(X, y, categorical_feature=CAT )
gc.collect()
ret, models = lgb.cv(param, dtrain, 99999, nfold=NFOLD,
early_stopping_rounds=100, verbose_eval=50,
seed=SEED)
result = f"CV auc-mean(seed:{SEED}): {ret['auc-mean'][-1]} + {ret['auc-stdv'][-1]}"
print(result)
utils.send_line(result)
imp = ex.getImp(models)
imp.to_csv(f'LOG/imp_{__file__}.csv', index=False)
# =============================================================================
# predict
# =============================================================================
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import roc_auc_score
X_test = pd.concat([
loader.test(),
pd.read_feather('../feature_someone/branden/X_test.f')
], axis=1)[X.columns]
sub_train = | pd.DataFrame(index=X.index) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
"""Unit tests for (dunder) composition functionality attached to the base class."""
__author__ = ["fkiraly"]
__all__ = []
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sktime.transformations.compose import FeatureUnion, TransformerPipeline
from sktime.transformations.panel.padder import PaddingTransformer
from sktime.transformations.series.exponent import ExponentTransformer
from sktime.transformations.series.impute import Imputer
from sktime.utils._testing.deep_equals import deep_equals
from sktime.utils._testing.estimator_checks import _assert_array_almost_equal
def test_dunder_mul():
"""Test the mul dunder method."""
X = pd.DataFrame({"a": [1, 2], "b": [3, 4]})
t1 = ExponentTransformer(power=2)
t2 = ExponentTransformer(power=5)
t3 = ExponentTransformer(power=0.1)
t4 = ExponentTransformer(power=1)
t12 = t1 * t2
t123 = t12 * t3
t312 = t3 * t12
t1234 = t123 * t4
t1234_2 = t12 * (t3 * t4)
assert isinstance(t12, TransformerPipeline)
assert isinstance(t123, TransformerPipeline)
assert isinstance(t312, TransformerPipeline)
assert isinstance(t1234, TransformerPipeline)
assert isinstance(t1234_2, TransformerPipeline)
assert [x.power for x in t12.steps] == [2, 5]
assert [x.power for x in t123.steps] == [2, 5, 0.1]
assert [x.power for x in t312.steps] == [0.1, 2, 5]
assert [x.power for x in t1234.steps] == [2, 5, 0.1, 1]
assert [x.power for x in t1234_2.steps] == [2, 5, 0.1, 1]
_assert_array_almost_equal(X, t123.fit_transform(X))
_assert_array_almost_equal(X, t312.fit_transform(X))
_assert_array_almost_equal(X, t1234.fit_transform(X))
_assert_array_almost_equal(X, t1234_2.fit_transform(X))
_assert_array_almost_equal(t12.fit_transform(X), t3.fit(X).inverse_transform(X))
def test_dunder_add():
"""Test the add dunder method."""
X = | pd.DataFrame({"a": [1, 2], "b": [3, 4]}) | pandas.DataFrame |
#!/usr/bin/env python
#
# MIT License
# Copyright (c) 2020 Dr. <NAME>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
This program is part of https://github.com/jgehrcke/covid-19-analysis
"""
import logging
import sys
import re
import time
from datetime import datetime
from textwrap import dedent
from difflib import SequenceMatcher
import numpy as np
import pandas as pd
import scipy.optimize
import requests
from bokeh.plotting import figure, output_file, show
from bokeh.layouts import column, layout
from bokeh.models import ColumnDataSource, Div
log = logging.getLogger()
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s.%(msecs)03d %(levelname)s: %(message)s",
datefmt="%y%m%d-%H:%M:%S",
)
def main():
data_file_path = sys.argv[1]
location_name = sys.argv[2]
df = jhu_csse_csv_to_dataframe(data_file_path, location_name)
# lower-case for lookup in DataFrame, and for filename generation.
loc = location_name.lower()
modified = False
if loc == "germany":
# correct manually for https://github.com/CSSEGISandData/COVID-19/issues/804
df["germany"]["2020-03-12"] = 2369
df, modified = germany_try_to_get_todays_value_from_zeit_de(df)
now = datetime.utcnow()
zeit_de_source = 'and <a href="https://zeit.de">zeit.de</a>' if modified else ""
preamble_text = f"""
Analysis based on confirmed COVID-19 cases for {location_name.upper()}.
Data source: <a href="https://github.com/CSSEGISandData/COVID-19">github.com/CSSEGISandData/COVID-19</a> {zeit_de_source}
Author: <a href="https://gehrcke.de">Dr. <NAME></a>
Code: <a href="https://github.com/jgehrcke/covid-19-analysis">github.com/jgehrcke/covid-19-analysis</a>
Data points from before February 28 are ignored.
Generated at {now .strftime('%Y-%m-%d %H:%M UTC')}
Current count: {df[loc][-1]} ({df.index[-1].strftime("%Y-%m-%d")})
"""
preamble_text = dedent(preamble_text.replace("\n\n", "<br />"))
create_bokeh_html(df, location_name, preamble_text)
def create_bokeh_html(df, loc, preamble_text):
output_file(f"plot-{loc}.html")
preamble = Div(text=preamble_text, height=120)
cases_total = df
cases_new = df[loc].diff()
cases_new.name = "diff"
cases_new = cases_new.to_frame()
# cleaning step: a change of 0 implies that data wasn't updated on
# that day. Set to NaN.
cases_new["diff"].replace(0, np.NaN)
cases_total_fit = expfit(cases_total, loc)
f1 = figure(
title="evolution of total case count (half-logarithmic)",
x_axis_type="datetime",
y_axis_type="log",
toolbar_location=None,
background_fill_color="#F2F2F7",
)
f1.scatter(
"date",
loc,
marker="x",
size=8,
line_width=3,
legend_label="raw data",
source=ColumnDataSource(data=cases_total),
)
f1.toolbar.active_drag = None
f1.toolbar.active_scroll = None
f1.toolbar.active_tap = None
f1.y_range.bounds = (1, cases_total[loc].max() * 10)
f1.y_range.start = 1
f1.y_range.end = cases_total[loc].max() * 10
f1.xaxis.axis_label = "Date"
f1.yaxis.axis_label = "total number of confirmed cases"
f1.xaxis.ticker.desired_num_ticks = 15
f1.outline_line_width = 4
f1.outline_line_alpha = 0.3
f1.outline_line_color = "#aec6cf"
f1.line(
"date",
"expfit",
legend_label="exponential fit",
source=ColumnDataSource(data=cases_total_fit),
)
# f1.legend.title = "Legend"
f1.legend.location = "top_left"
flin = figure(
title="evolution of total case count (linear)",
x_axis_type="datetime",
toolbar_location=None,
background_fill_color="#F2F2F7",
)
flin.scatter(
"date",
loc,
marker="x",
size=8,
line_width=3,
legend_label="raw data",
source=ColumnDataSource(data=cases_total),
)
flin.toolbar.active_drag = None
flin.toolbar.active_scroll = None
flin.toolbar.active_tap = None
flin.y_range.start = 1
flin.y_range.end = cases_total[loc].max() * 1.3
flin.xaxis.axis_label = "Date"
flin.yaxis.axis_label = "total number of confirmed cases"
flin.xaxis.ticker.desired_num_ticks = 15
flin.outline_line_width = 4
flin.outline_line_alpha = 0.3
flin.outline_line_color = "#aec6cf"
flin.line(
"date",
"expfit",
legend_label="exponential fit",
source=ColumnDataSource(data=cases_total_fit),
)
flin.legend.location = "top_left"
f2 = figure(
title="evolution of newly confirmed cases per day",
x_axis_type="datetime",
y_axis_type="log",
toolbar_location=None,
background_fill_color="#F2F2F7",
)
f2.scatter(
"date",
"diff",
marker="x",
size=8,
line_width=3,
source=ColumnDataSource(data=cases_new),
)
f2.toolbar.active_drag = None
f2.toolbar.active_scroll = None
f2.toolbar.active_tap = None
f2.y_range.start = 1
f2.y_range.end = cases_new["diff"].max() * 10
f2.xaxis.axis_label = "Date"
f2.yaxis.axis_label = "newly registered cases, per day"
f2.xaxis.ticker.desired_num_ticks = 15
f2.outline_line_width = 4
f2.outline_line_alpha = 0.3
f2.outline_line_color = "#aec6cf"
show(
column(preamble, f1, flin, f2, sizing_mode="stretch_both", max_width=900),
browser="firefox",
)
def expfit(df, loc):
# Parameterize a simple linear function.
def linfunc(x, a, b):
return a + x * b
# Get date-representing x values as numpy array containing float data type.
x = np.array(df.index.to_pydatetime(), dtype=np.datetime64).astype("float")
minx = x.min()
# For the fit don't deal with crazy large x values, transform to time
# deltas (by substracting mininum), and also transform from nanoseconds
# seconds.
fitx = (x - minx) / 10 ** 9
# Get natural logarithm of data values
y = np.log(df[loc].to_numpy())
# log.info(fitx)
# log.info(y)
# log.info(", ".join("{%s, %s}" % (x, y) for x, y in zip(fitx, y)))
# Choose starting parameters for iterative fit.
p0 = [minx, 3]
popt, pcov = scipy.optimize.curve_fit(linfunc, fitx, y, p0=p0)
log.info("fit paramters: %s, %s", popt, pcov)
# Get data values from fit for the time values corresponding to the time
# values in the original time series used for fitting.
fit_ys_log = linfunc(fitx, *popt)
# Generate corresponding fit values by inverting logarithm.
fit_ys = np.exp(fit_ys_log)
# Create a data frame with the original time values as index, and the
# values from the fit as a series named `expfit`
df_fit = df.copy()
df_fit["expfit"] = fit_ys
return df_fit
def jhu_csse_csv_to_dataframe(data_file_path, location_name):
"""
data_file_path: expect an instance of `time_series_19-covid-Confirmed.csv`
from https://github.com/CSSEGISandData/COVID-19/
location_name: the lower-cased version of this must be a column in the
processed data set.
"""
log.info("parse data file")
df = | pd.read_csv(data_file_path) | pandas.read_csv |
import unittest
import numpy as np
import pandas as pd
from numpy import testing as nptest
from operational_analysis.methods import plant_analysis
from examples.project_ENGIE import Project_Engie
class TestPandasPrufPlantAnalysis(unittest.TestCase):
def setUp(self):
np.random.seed(42)
# Set up data to use for testing (ENGIE example plant)
self.project = Project_Engie('./examples/data/la_haute_borne')
self.project.prepare()
# Test inputs to the regression model, at monthly time resolution
def test_plant_analysis(self):
# ____________________________________________________________________
# Test inputs to the regression model, at monthly time resolution
self.analysis = plant_analysis.MonteCarloAEP(self.project,
reanal_products=['merra2', 'era5'],
time_resolution = 'M',
reg_temperature = True,
reg_winddirection = True)
df = self.analysis._aggregate.df
# Check the pre-processing functions
self.check_process_revenue_meter_energy_monthly(df)
self.check_process_loss_estimates_monthly(df)
self.check_process_reanalysis_data_monthly(df)
# ____________________________________________________________________
# Test inputs to the regression model, at daily time resolution
self.analysis = plant_analysis.MonteCarloAEP(self.project,
reanal_products=['merra2', 'era5'],
time_resolution = 'D',
reg_temperature = True,
reg_winddirection = True)
df = self.analysis._aggregate.df
# Check the pre-processing functions
self.check_process_revenue_meter_energy_daily(df)
self.check_process_loss_estimates_daily(df)
self.check_process_reanalysis_data_daily(df)
# ____________________________________________________________________
# Test linear regression model, at monthly time resolution
self.analysis = plant_analysis.MonteCarloAEP(self.project,
reanal_products=['merra2', 'era5'],
time_resolution = 'M',
reg_model = 'lin',
reg_temperature = False,
reg_winddirection = False)
# Run Monte Carlo AEP analysis, confirm the results are consistent
self.analysis.run(num_sim=30)
sim_results = self.analysis.results
self.check_simulation_results_lin_monthly(sim_results)
# ____________________________________________________________________
# Test linear regression model, at daily time resolution
self.analysis = plant_analysis.MonteCarloAEP(self.project,
reanal_products=['merra2', 'era5'],
time_resolution = 'D',
reg_model = 'lin',
reg_temperature = False,
reg_winddirection = False)
# Run Monte Carlo AEP analysis, confirm the results are consistent
self.analysis.run(num_sim=30)
sim_results = self.analysis.results
self.check_simulation_results_lin_daily(sim_results)
# ____________________________________________________________________
# Test GAM regression model (can be used at daily time resolution only)
self.analysis = plant_analysis.MonteCarloAEP(self.project,
reanal_products=['merra2', 'era5'],
time_resolution = 'D',
reg_model = 'gam',
reg_temperature = False,
reg_winddirection = True)
# Run Monte Carlo AEP analysis, confirm the results are consistent
self.analysis.run(num_sim=10)
sim_results = self.analysis.results
self.check_simulation_results_gam_daily(sim_results)
# ____________________________________________________________________
# Test GBM regression model (can be used at daily time resolution only)
self.analysis = plant_analysis.MonteCarloAEP(self.project,
reanal_products=['merra2', 'era5'],
time_resolution = 'D',
reg_model = 'gbm',
reg_temperature = True,
reg_winddirection = True)
# Run Monte Carlo AEP analysis, confirm the results are consistent
self.analysis.run(num_sim=10)
sim_results = self.analysis.results
self.check_simulation_results_gbm_daily(sim_results)
# ____________________________________________________________________
# Test ETR regression model (can be used at daily time resolution only)
self.analysis = plant_analysis.MonteCarloAEP(self.project,
reanal_products=['merra2', 'era5'],
time_resolution = 'D',
reg_model = 'etr',
reg_temperature = False,
reg_winddirection = False)
# Run Monte Carlo AEP analysis, confirm the results are consistent
self.analysis.run(num_sim=10)
sim_results = self.analysis.results
self.check_simulation_results_etr_daily(sim_results)
def check_process_revenue_meter_energy_monthly(self, df):
# Energy Nan flags are all zero
nptest.assert_array_equal(df['energy_nan_perc'].values, np.repeat(0.0, df.shape[0]))
# Expected number of days per month are equal to number of actual days
nptest.assert_array_equal(df['num_days_expected'], df['num_days_actual'])
# Check a few energy values
expected_gwh = pd.Series([0.692400, 1.471730, 0.580035])
actual_gwh = df.loc[ | pd.to_datetime(['2014-06-01', '2014-12-01', '2015-10-01']) | pandas.to_datetime |
import streamlit as st
import pandas as pd
import altair as alt
import pickle
import numpy as np
from map import create_map
from airdata import AirData
from utils import parse_time, parse_time_hms
from vega_datasets import data
#st.set_page_config(layout="wide")
# Getting data ready, Refresh every hour (same data when user refreshes within an hour)
@st.cache(ttl=60 * 60, suppress_st_warning=True)
def get_AD_data():
ad = AirData()
flight_df = ad.get_flights_df()
flight_df = ad.add_time_to_df(flight_df)
return ad, flight_df
# Cache to prevent computation on every rerun
@st.cache
def save_AD_data(df):
return df.to_csv().encode('utf-8')
ad, flight_df = get_AD_data()
# Definitions for flight delay
## Prepare data
# load in files
origin = pickle.load(open('flight-price/DestState.sav','rb'))
dest = pickle.load(open('flight-price/DestState.sav','rb'))
air = pickle.load(open('flight-price/AirlineCompany.sav','rb'))
miles_dic = pickle.load(open('flight-price/miles_dic.sav','rb'))
quarter_dic= {'Spring':'Q1','Summer':'Q2','Fall':'Q3','Winter':'Q4'}
df_viz = pd.read_csv('flight-price/df_viz.csv').iloc[:,:]
# fit the prediction model, get prediction and prediction interval
def get_pi(X):
all_models = pickle.load(open('flight-price/all_models.sav', 'rb'))
lb = all_models[0].predict(X)
pred = all_models[2].predict(X)
ub = all_models[1].predict(X)
return (round(np.exp(lb[0]),2), round(np.exp(pred[0]),2), round(np.exp(ub[0]),2))
# load data for non ML visual
def load_data_viz():
return pd.read_csv('flight-price/train_viz.csv').iloc[:,:]
# visual for price comparison
@st.cache
def get_slice_ogstate(df, ogstate=None):
labels = pd.Series([1] * len(df), index=df.index)
labels &= df['OriginState'] == ogstate
return labels
def get_slice_destate(df, destate=None):
labels = pd.Series([1] * len(df), index=df.index)
labels &= df['DestState'] == destate
return labels
def get_slice_membership(df, ogstate=None, destate=None, quarter=None,airline=None):
labels = pd.Series([1] * len(df), index=df.index)
if ogstate:
labels &= df['OriginState'] == ogstate
if destate is not None:
labels &= df['DestState'] == destate
if quarter:
labels &= df['Quarter'].isin(quarter)
if airline:
labels &= df['AirlineCompany'].isin(airline)
return labels
#-------------------- Price Heat Map-------------------------------------------
def load_data(url):
file = url
df = pd.read_csv(file)
return df
def get_season(df, quarter):
sub = df[df['Quarter']== quarter]
return sub
menu_selection = st.sidebar.radio("Menu", ["Introduction","Flight Map", "Flight Delay Analysis",
"Flight Price Analysis"])
if menu_selection == 'Introduction':
#col1, col2, col3,col4 = st.columns([0.5,1,2,1])
#col2.image("image/flight-logo.jpg", width=150)
#col3.markdown("<h1 style='text-align: left; color: #072F5F;'>Flight Traffic Brain</h1>",
# unsafe_allow_html=True)
col1, col2, col3 = st.columns([0.5,1,4])
col2.image("image/flight-logo.jpg", width=150)
col3.markdown("<h1 style='text-align: left; color: #072F5F;'>Flight Traffic Brain</h1>",
unsafe_allow_html=True)
text = "<p style='font-size:18px'>Nowadays, air traffic control has become a complicated task as there are\
more and more flights and airlines. There has also been rising cases of flight delays possibly due to poor\
management and massive volume of traffic. While air traffic is important to manage from the perspective of\
airports and airlines, flight prices are crucial for customers who usually make decisions of their travel\
plans based on them. In this project we hope to help airports better manage airlines and control airline\
traffic and passengers make wiser decisions about airline flights.</p>"
st.write(text, unsafe_allow_html=True)
text = "<p style='font-size:18px'>A <span style='color: #1167b1'> real-time map of flights </span> with interactive information such as speed and altitude can help the specialists\
to make better decisions. Meanwhile, an <span style='color: #1167b1'> interactive network graph </span> that shows the connections between airports and\
flights can also improve the handling of dependencies among the traffic. A <span style='color: #1167b1'> data visualization section of delay time </span>\
can also enable users to analyze different flights in real time and in more detail. By filtering the flight according to their\
departure airport, the users can not only view the delay time of different flights, but also have a high-level overview of\
the delay information of flights of different airlines. This information will help airport specialists to better communicate\
with the airports and passengers, and make better decisions in terms of resource distribution. In addition, a <span style='color: #1167b1'> \
machine learning model </span> using historical data to <span style='color: #1167b1'> predict flight price </span> can help passengers\
estimate the potential fare of flight of their interest. An <span style='color: #1167b1'> interactive platform with visualizations of airline comparisons </span> can also allow\
them to compare different flight prices by modifying parameters of interest, thus helping optimize their travel plan.</p>"
st.write(text, unsafe_allow_html=True)
text = "<br><br><br>This project was created by [<NAME>](<EMAIL>), [<NAME>](<EMAIL>), \
[<NAME>](<EMAIL>) and [<NAME>](<EMAIL>) for the [Interactive Data Science](https://dig.cmu.edu/ids2022) course at\
[Carnegie Mellon University](https://www.cmu.edu)"
st.write(text, unsafe_allow_html=True)
elif menu_selection == "Flight Map":
st.title("Real-time Flight Data Visualization")
# ------------ Map starts ---------------------
with st.sidebar.expander("Analysis for flights/airports"):
st.write("This is an analysis tool from the perspective of flights or airports")
to_show = st.selectbox("Data to look at", ["flight", "airport"])
if to_show == "flight":
field = st.selectbox("Variable of interest", ["heading", "altitude", "ground_speed"])
else:
field = st.selectbox("Variable of interest", ["origin_airport_iata", "destination_airport_iata"])
st.write("This is a map of real-time flights and airports. The blue circles are \
the airport, while the red squares are the flights. You can utilize \
the tool bar on the left tab to explore the data. You can also \
move your mouse over the map to see more information.")
map_air = create_map(flight_df, field, to_show)
st.altair_chart(map_air,use_container_width=True)
st.sidebar.title("Note")
st.sidebar.write("This visualization consists of three components.\
The first component is a map that shows real-time flights and airports\
in the U.S. The second component, linked to the first component, \
is an analysis tool for the real-time flight and airport data. \
The third component displays the time information of a flight.")
st.sidebar.download_button("Download real-time data", data=save_AD_data(flight_df),
file_name='airdata.csv', mime='text/csv')
# ------------ Map ends ---------------------
# ------------ Flight time starts ---------------------
st.write("Here we display the time information of a flight.")
option = st.selectbox("Which flight number are you looking into?",
flight_df['number'].sort_values())
# Get the corresponding flight row in the dataframe
option_row = flight_df[flight_df['number'] == option]
option_id = option_row.id.values[0]
option_detail = ad.get_flight_details(option_id)
option_time = option_detail['time']
# Display scheduled and actual time for departual and arrival using metric
col1, col2 = st.columns(2)
col1.metric("Scheduled departure time",
parse_time(option_time['scheduled']['departure']))
if option_time['real']['departure'] and option_time['scheduled']['departure']:
depart_delta = option_time['real']['departure'] - option_time['scheduled']['departure']
else:
depart_delta = None
col2.metric("Actual departure time",
parse_time(option_time['real']['departure']),
parse_time_hms(depart_delta),
delta_color='inverse')
col3, col4 = st.columns(2)
col3.metric("Scheduled arrival time", parse_time(option_time['scheduled']['arrival']))
arrival_time = option_time['real']['arrival']
if not arrival_time:
arrival_time = option_time['estimated']['arrival']
col4.metric("Estimated/Actual arrival time", parse_time(arrival_time))
# Note that some flights are not displayed due to... so the number of routes
# may appear larger than...
# ------------ Flight time ends ---------------------
elif menu_selection == "Flight Delay Analysis":
# ------------ Delay Analysis starts ---------------------
st.title("Flight Delay Analysis")
st.sidebar.title("Note")
st.sidebar.write("This flight delay analysis consists of four parts: \
The first part is a data slicing tool that allows the users to filter any flight data according to the different departure airport.\
The second part lists out all the flights flying from the selected departure airport, and displays the relevant delay time information of the flights. \
The third part displays a stripplot graph to allow the users to visually compare the different departure delay time of flights of different airlines.\
The last part compares the average delay time of different airlines. ")
ad = AirData()
flight_df = ad.get_flights_df()
st.header("Slice Data")
st.write("You can filter the airline data by choosing the different departure airport.")
with st.expander("Airports"):
origin_airport_list = flight_df['origin_airport_iata'].drop_duplicates()
option1 = st.selectbox("Departure Airport:",
(origin_airport_list))
flight_df_selected1 = flight_df[(flight_df['origin_airport_iata'] == option1)]
st.header("Data Visualization")
with st.expander("Flight delay from different departure airports"):
st.write("This data indicates all the current flights coming from the departure airport and their related delay times.")
index = 0
for row in flight_df_selected1.iterrows():
flight_number = flight_df_selected1['number'].values[index]
option_id = flight_df_selected1['id'].values[index]
option_detail = ad.get_flight_details(option_id)
option_time = option_detail['time']
if option_time['real']['departure'] is None:
continue
elif option_time['real']['arrival'] is None:
depart_delta = option_time['real']['departure'] - option_time['scheduled']['departure']
arrive_delta = None
col1, col2, col3 = st.columns(3)
col1.metric("Flight number",
flight_number)
col2.metric("Departure delay",
parse_time_hms(depart_delta))
col3.metric("Arrival delay",
arrive_delta)
else:
depart_delta = option_time['real']['departure'] - option_time['scheduled']['departure']
arrive_delta = option_time['real']['arrival'] - option_time['scheduled']['arrival']
col1, col2, col3 = st.columns(3)
col1.metric("Flight number",
flight_number)
col2.metric("Departure delay",
parse_time_hms(depart_delta))
col3.metric("Arrival delay",
parse_time_hms(arrive_delta))
index = index + 1
with st.expander("Flight delay of different airlines"):
st.write("This data compares the punctuality and departure delay times between different airlines.")
depart_delay = []
index = 0
for row in flight_df_selected1.iterrows():
option_id = flight_df_selected1['id'].values[index]
option_detail = ad.get_flight_details(option_id)
option_time = option_detail['time']
if option_time['real']['departure'] is None:
continue
else:
depart_delta = option_time['real']['departure'] - option_time['scheduled']['departure']
depart_delta = parse_time_hms(depart_delta)
depart_delay.append(depart_delta)
index = index + 1
flight_df_selected1['depart_delay'] = depart_delay
stripplot = alt.Chart(flight_df_selected1, width=640).mark_circle(size=30).encode(
x=alt.X(
'depart_delay',
title='Departure delay',
scale=alt.Scale()),
y=alt.Y(
'airline_iata',
title='Airline iata'),
color=alt.Color('airline_iata', legend=alt.Legend(orient="right")),
tooltip=['number', 'airline_iata', 'depart_delay']
).transform_calculate(
jitter='sqrt(-2*log(random()))*cos(2*PI*random())'
).configure_facet(
spacing=0
).configure_view(
stroke=None
)
stripplot
with st.expander("Compare average departure delay of different airlines"):
depart_delay = []
index = 0
for row in flight_df_selected1.iterrows():
option_id = flight_df_selected1['id'].values[index]
option_detail = ad.get_flight_details(option_id)
option_time = option_detail['time']
if option_time['real']['departure'] is None:
continue
else:
depart_delta = option_time['real']['departure'] - option_time['scheduled']['departure']
# depart_delta = parse_time_hms(depart_delta)
depart_delay.append(depart_delta)
index = index + 1
flight_df_selected1['depart_delay'] = depart_delay
average_delay = []
airline_average_delay_parsed = []
index = 0
for row in flight_df_selected1.iterrows():
ite_airline = flight_df_selected1['airline_iata'].values[index]
airline_data = flight_df_selected1[flight_df_selected1['airline_iata'] == ite_airline]
airline_average_delay = airline_data['depart_delay'].mean()
average_delay_parsed = parse_time_hms(airline_average_delay)
average_delay_parsed = str(average_delay_parsed).rstrip(':0')
airline_average_delay = round(airline_average_delay, 2)
# airline_average_delay = parse_time_hms(airline_average_delay)
average_delay.append(airline_average_delay)
airline_average_delay_parsed.append(average_delay_parsed)
index = index + 1
flight_df_selected1['airline_average_delay'] = average_delay
flight_df_selected1['average_delay_parsed'] = airline_average_delay_parsed
flight_df_selected2 = flight_df_selected1.drop_duplicates(subset=['airline_iata'], keep='first')
flight_df_selected2 = flight_df_selected2.sort_values(by=['airline_average_delay'], ascending=False)
barchart = alt.Chart(flight_df_selected2, width=640).mark_bar().encode(
x=alt.X('airline_average_delay', axis=alt.Axis(labels=False)),
y=alt.Y('airline_iata', sort=alt.EncodingSortField(field="airline_average_delay", op="count", order='ascending')),
tooltip=['airline_iata', 'average_delay_parsed']
)
text = barchart.mark_text(
align='left',
baseline='middle',
dx=3 # Nudges text to right so it doesn't appear on top of the bar
).encode(
text='average_delay_parsed'
)
(barchart + text).properties(height=900)
barchart + text
index = 0
for row in flight_df_selected2.iterrows():
ite_airline = flight_df_selected2['airline_iata'].values[index]
ite_delay = flight_df_selected2['average_delay_parsed'].values[index]
# ite_delay = parse_time_hms(ite_delay)
ite_delay = str(ite_delay).rstrip(':0')
col1, col2 = st.columns(2)
col1.metric("Airline",
ite_airline)
col2.metric("Average departure delay",
ite_delay)
index = index + 1
# ------------ Delay Analysis ends ---------------------
else:
# ------------------------ Flight price prediction starts ------------------------------
## Price Prediction
st.title("Flight Price Analysis")
# 1. ML prediction
st.header("Flight Price Prediction")
st.write("Tell us your intended flight information and get predicted flight price value and range.")
X_train=pd.read_csv('flight-price/X_train.csv')
features = list(X_train.columns)
del X_train
df_pred = pd.DataFrame(0, index=np.arange(1), columns=features)
col1, col2 = st.columns([3, 2])
with col2:
og = st.selectbox('Origin', np.array(origin),index=30)
de = st.selectbox('Destination', np.array(dest),index=4)
season = st.selectbox('Season', ['Spring','Summer','Fall','Winter'])
airline = st.selectbox('Airline Company', np.array(air))
numT = st.slider('Number of tickets', 1, 15, 1)
if og != "Virgin Islands":
df_pred[f'o{og}'] = 1
else:
df_pred['oU.S. Virgin Islands']=1
if de != "Virgin Islands":
df_pred[f'd{de}'] = 1
else:
df_pred['dU.S. Virgin Islands']=1
if season!='Spring':
df_pred[quarter_dic[season]] = 1
if airline[-3:-1]!='AA':
df_pred[airline[-3:-1]] = 1
df_pred['NumTicketsOrdered'] = numT
if og!=de:
try:
miles = miles_dic[(og,de)]
except:
miles = miles_dic[(de,og)]
df_pred['log_miles']=np.log(miles)
else:
st.markdown(" ")
if og!=de:
low, mean, high = get_pi( | pd.DataFrame(df_pred) | pandas.DataFrame |
import datetime
import logging
import pandas as pd
from django.core.exceptions import ValidationError
from django.db import transaction
from reversion import revisions as reversion
from xlrd import XLRDError
from app.productdb.models import Product, CURRENCY_CHOICES, ProductGroup, ProductMigrationSource, ProductMigrationOption
from app.productdb.models import Vendor
logger = logging.getLogger("productdb")
class InvalidExcelFileFormat(Exception):
"""Exception thrown if there is an issue with the low level file format"""
pass
class InvalidImportFormatException(Exception):
"""Exception thrown if the format of the Excel file for the import is invalid"""
pass
class BaseExcelImporter:
"""
Base class for the Excel Import
"""
sheetname = "products"
required_keys = {"product id", "description", "list price", "vendor"}
import_converter = None
drop_na_columns = None
workbook = None
path = None
valid_file = False
user_for_revision = None
__wb_data_frame__ = None
import_result_messages = None
def __init__(self, path_to_excel_file=None, user_for_revision=None):
self.path_to_excel_file = path_to_excel_file
if self.import_result_messages is None:
self.import_result_messages = []
if self.import_converter is None:
self.import_converter = {}
if self.drop_na_columns is None:
self.drop_na_columns = []
if user_for_revision:
self.user_for_revision = user_for_revision
def _load_workbook(self):
try:
self.workbook = pd.ExcelFile(self.path_to_excel_file)
except XLRDError as ex:
logger.error("invalid format of excel file '%s' (%s)" % (self.path_to_excel_file, ex), exc_info=True)
raise InvalidExcelFileFormat("invalid file format") from ex
except Exception:
logger.fatal("unable to read workbook at '%s'" % self.path_to_excel_file, exc_info=True)
raise
def _create_data_frame(self):
self.__wb_data_frame__ = self.workbook.parse(
self.sheetname, converters=self.import_converter
)
# normalize the column names (all lowercase, strip whitespace if any)
self.__wb_data_frame__.columns = [x.lower() for x in self.__wb_data_frame__.columns]
self.__wb_data_frame__.columns = [x.strip() for x in self.__wb_data_frame__.columns]
# drop NA columns if defined
if len(self.drop_na_columns) != 0:
self.__wb_data_frame__.dropna(axis=0, subset=self.drop_na_columns, inplace=True)
def verify_file(self):
if self.workbook is None:
self._load_workbook()
self.valid_file = False
sheets = self.workbook.sheet_names
# verify worksheet that is required
if self.sheetname not in sheets:
raise InvalidImportFormatException("sheet '%s' not found" % self.sheetname)
# verify keys in file
dframe = self.workbook.parse(self.sheetname)
keys = [x.lower() for x in set(dframe.keys())]
if len(self.required_keys.intersection(keys)) != len(self.required_keys):
req_key_str = ", ".join(sorted(self.required_keys))
raise InvalidImportFormatException("not all required keys are found in the Excel file, required keys "
"are: %s" % req_key_str)
self.valid_file = True
def is_valid_file(self):
return self.valid_file
@staticmethod
def _import_datetime_column_from_file(row_key, row, target_key, product):
"""
helper method to import an optional columns from the excel file
"""
changed = False
faulty_entry = False
msg = ""
try:
if row_key in row:
if not pd.isnull(row[row_key]):
currval = getattr(product, target_key)
if (type(row[row_key]) is pd.tslib.Timestamp) or (type(row[row_key]) is datetime.datetime):
newval = row[row_key].date()
else:
newval = None
if currval != newval:
setattr(product, target_key, row[row_key].date())
changed = True
except Exception as ex: # catch any exception
faulty_entry = True
msg = "cannot set %s for <code>%s</code> (%s)" % (row_key, row["product id"], ex)
return changed, faulty_entry, msg
def import_to_database(self, status_callback=None, update_only=False):
"""
Base method that is triggered for the update
"""
pass
class ProductsExcelImporter(BaseExcelImporter):
"""
Excel Importer class for Products
"""
sheetname = "products"
required_keys = {"product id", "description", "list price", "vendor"}
import_converter = {
"product id": str,
"description": str,
"list price": str,
"currency": str,
"vendor": str
}
drop_na_columns = ["product id"]
valid_imported_products = 0
invalid_products = 0
@property
def amount_of_products(self):
return len(self.__wb_data_frame__) if self.__wb_data_frame__ is not None else -1
def import_to_database(self, status_callback=None, update_only=False):
"""
Import products from the associated excel sheet to the database
:param status_callback: optional status message callback function
:param update_only: don't create new entries
"""
if self.workbook is None:
self._load_workbook()
if self.__wb_data_frame__ is None:
self._create_data_frame()
self.valid_imported_products = 0
self.invalid_products = 0
self.import_result_messages.clear()
amount_of_entries = len(self.__wb_data_frame__.index)
# process entries in file
current_entry = 1
for index, row in self.__wb_data_frame__.iterrows():
# update status message if defined
if status_callback and (current_entry % 100 == 0):
status_callback("Process entry <strong>%s</strong> of "
"<strong>%s</strong>..." % (current_entry, amount_of_entries))
faulty_entry = False # indicates an invalid entry
created = False # indicates that the product was created
skip = False # skip the current entry (used in update_only mode)
msg = "import successful" # message to describe the result of the product import
if update_only:
try:
p = Product.objects.get(product_id=row["product id"])
except Product.DoesNotExist:
# element doesn't exist
skip = True
except Exception as ex: # catch any exception
logger.warn("unexpected exception occurred during the lookup "
"of product %s (%s)" % (row["product id"], ex))
else:
p, created = Product.objects.get_or_create(product_id=row["product id"])
changed = created
if not skip:
# apply changes (only if a value is set, otherwise ignore it)
row_key = "description"
try:
# set the description value
if not | pd.isnull(row[row_key]) | pandas.isnull |
#!/usr/bin/env python
# Author: <NAME> (@Cyb3rPandaH)
###### Importing Python Libraries
import yaml
yaml.Dumper.ignore_aliases = lambda *args : True
import glob
from os import path
# Libraries to manipulate data
import pandas as pd
from pandas import json_normalize
pd.set_option('display.max_columns', None)
# Libraries to interact with up-to-date ATT&CK content available in STIX format via public TAXII server
from attackcti import attack_client
# Libraries to create CSV file
import csv
###### Aggregating Data Source Object YAML files from Contribution Folder
print("[+] Accessing data source objects yaml files..")
yaml_files = glob.glob(path.join(path.dirname(__file__),"../..", "contribution", "*.yml"))
yaml_loaded = [yaml.safe_load(open(yaml_file).read()) for yaml_file in yaml_files]
print("[+] Creating data source objects aggregated yaml file..")
with open(f'../attack_data_sources_objects.yaml', 'w') as file:
yaml.dump(yaml_loaded, file, sort_keys = False)
###### Creating YAML file with (Sub)Techniques to Data Components Mapping
# Getting ATT&CK - Enterprise Matrix
print("[+] Getting ATT&CK - Enterprise form TAXII Server..")
# Instantiating attack_client class
lift = attack_client()
# Getting techniques for windows platform - enterprise matrix
attck = lift.get_enterprise_techniques(stix_format = False)
# Removing revoked techniques
attck = lift.remove_revoked(attck)
# Creating Dataframe
attck = | json_normalize(attck) | pandas.json_normalize |
import sqlite3
import uuid
import numpy as np
import pandas as pd
import time
import sys
import ast
import os
import re
from random import shuffle as shuffle_list
from datetime import datetime
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import stats
#from tensorflow import set_random_seed #valid for tensorflow1
from tensorflow import random #valid for tensorflow1
from sklearn.metrics import mean_squared_error
from keras.preprocessing.sequence import TimeseriesGenerator
from keras.models import model_from_json
import preprocessing.config as cfg
sns.set() # nicer graphics
def measure_time(func):
"""time measuring decorator"""
def wrapped(*args, **kwargs):
start_time = time.time()
ret = func(*args, **kwargs)
end_time = time.time()
print('took {:.3} seconds'.format(end_time-start_time))
return ret
return wrapped
def get_available_gpus():
from tensorflow.python.client import device_lib
local_device_protos = device_lib.list_local_devices()
return [x.name for x in local_device_protos if x.device_type == 'GPU']
class LoadprofileGenerator(TimeseriesGenerator):
"""This is a customized version of keras TimeseriesGenerator. Its
intention is to neglect strides and sampling rates but to incorporate
iteration through several timeseries or loadprofiles of arbitrary length."""
def __init__(self, data, targets, length, start_index=0,
shuffle=False, reverse=False, batch_size=128):
super().__init__(data, targets, length, start_index=start_index,
shuffle=shuffle, reverse=reverse,
end_index=len(data[0]), batch_size=batch_size)
assert isinstance(data, list), 'data must be list of timeseries'
if any(isinstance(i, pd.DataFrame) for i in self.data):
self.data = [i.values for i in self.data]
if any(isinstance(i, pd.DataFrame) for i in self.targets):
self.targets = [i.values for i in self.targets]
if self.shuffle:
zippd = list(zip(self.data, self.targets))
shuffle_list(zippd) # inplace operation
self.data, self.targets = list(zip(*zippd))
# start index is the same for each profile
# for each profile there's a different end_index
self.end_index = [len(d)-1 for d in self.data]
batches_per_profile = [(e - self.start_index + self.batch_size)//
self.batch_size for e in self.end_index]
self.data_len = sum(batches_per_profile)
self.batch_cumsum = np.cumsum(batches_per_profile)
def __len__(self):
return self.data_len
def _empty_batch(self, num_rows):
# shape of first profile suffices
samples_shape = [num_rows, self.length]
samples_shape.extend(self.data[0].shape[1:])
targets_shape = [num_rows]
targets_shape.extend(self.targets[0].shape[1:])
return np.empty(samples_shape), np.empty(targets_shape)
def __getitem__(self, index):
# index is the enumerated batch index starting at 0
# find corresponding profile
p_idx = np.nonzero(index < self.batch_cumsum)[0][0]
prev_sum = 0 if p_idx == 0 else self.batch_cumsum[p_idx-1]
if self.shuffle:
rows = np.random.randint(
self.start_index, self.end_index[p_idx] + 1,
size=self.batch_size)
else:
i = self.start_index + self.batch_size * (index - prev_sum)
rows = np.arange(i, min(i + self.batch_size,
self.end_index[p_idx] + 2))
# +2 to get the last element, too
samples, targets = self._empty_batch(len(rows))
for j, row in enumerate(rows):
indices = range(row - self.length, row)
samples[j] = self.data[p_idx][indices]
targets[j] = self.targets[p_idx][row-1]
if self.reverse:
return samples[:, ::-1, ...], targets
return samples, targets
class Report:
"""Summary of an experiment/trial"""
TARGET_SCHEME = cfg.data_cfg['db_target_scheme']
TABLE_SCHEMES = \
{'predictions': ['id text', 'idx int'] +
['{} real' for _ in range(len(TARGET_SCHEME))] +
['{} real' for _ in range(len(TARGET_SCHEME))],
'meta_experiments': ['id text', 'target text', 'testset text',
'score real', 'loss_metric text', 'seed text',
'scriptname text', 'start_time text',
'end_time text', 'config text']
}
def __init__(self, uid, seed,
score=None, yhat=None, actual=None, history=None,
used_loss=None, model=None):
self.score = score
self.yhat_te = yhat
self.actual = actual
self.history = history
self.uid = uid
self.seed = seed
self.yhat_tr = None
self.start_time = datetime.now().strftime("%Y-%m-%d %H:%M")
self.used_loss = used_loss
self.model = model
self.cfg_blob = {}
def save_to_db(self):
if cfg.data_cfg['save_predictions'] and not cfg.debug_cfg['DEBUG']:
cols = self.yhat_te.columns.tolist()
assert all(t in self.TARGET_SCHEME for t in cols), \
'config has unknown target specified'
# fill up missing targets up to TARGET_SCHEME
df_to_db = self.yhat_te.copy()
df_to_db = df_to_db.assign(**{t: 0 for t in self.TARGET_SCHEME
if t not in cols})
df_to_db = df_to_db.loc[:, self.TARGET_SCHEME] # reorder cols
gtruth_to_db = self.actual.copy()
gtruth_to_db = gtruth_to_db.assign(**{t: 0 for t in
self.TARGET_SCHEME
if t not in cols})
gtruth_to_db = gtruth_to_db.loc[:, self.TARGET_SCHEME]\
.rename(columns={t:t+'_gtruth' for t in gtruth_to_db.columns})
df_to_db = pd.concat([df_to_db, gtruth_to_db], axis=1)
with sqlite3.connect(cfg.data_cfg['db_path']) as con:
# predictions
table_name = 'predictions'
table_scheme = self.TABLE_SCHEMES[table_name]
query = "CREATE TABLE IF NOT EXISTS " + \
"{}{}".format(table_name, tuple(table_scheme))\
.replace("'", "")
query = query.format(*df_to_db.columns)
con.execute(query)
df_to_db['id'] = self.uid
df_to_db['idx'] = self.yhat_te.index
entries = [tuple(x) for x in np.roll(df_to_db.values,
shift=2, axis=1)]
query = f'INSERT INTO {table_name} ' + \
'VALUES ({})'.format(
', '.join('?' * len(df_to_db.columns)))
con.executemany(query, entries)
# meta experiments
table_name = 'meta_experiments'
table_scheme = self.TABLE_SCHEMES[table_name]
query = "CREATE TABLE IF NOT EXISTS " + \
"{}{}".format(table_name, tuple(table_scheme))\
.replace("'", "")
con.execute(query)
config_blob = {**cfg.data_cfg, **cfg.keras_cfg, **cfg.lgbm_cfg}
if hasattr(self.model, 'sk_params'):
config_blob['sk_params'] = self.model.sk_params
entry = (self.uid,
str(cfg.data_cfg['Target_param_names']),
str(cfg.data_cfg['testset']),
str(self.score),
cfg.data_cfg['loss'],
str(self.seed),
os.path.basename(sys.argv[0]),
self.start_time,
datetime.now().strftime("%Y-%m-%d %H:%M"),
str(config_blob),
)
query = f'INSERT INTO {table_name} VALUES {entry}'
con.execute(query)
print(f'Predictions and meta of model with uuid {self.uid} '
f'saved to db.')
def save_model(self):
if not cfg.debug_cfg['DEBUG'] and self.model is not None:
self.model.save(self.uid)
print(f'Model arch and weights dumped for {self.uid}.')
def load_model(self):
path = os.path.join(cfg.data_cfg['model_dump_path'],
self.uid+'_arch.json')
with open(path, 'r') as f:
self.model = model_from_json(f.read())
self.model.compile(optimizer='adam', loss='mse')
self.model.load_weights(os.path.join(cfg.data_cfg['model_dump_path'],
self.uid+'_weights.h5'))
return self
@classmethod
def load(clf, uid, truncate_at=None):
"""Return a Report object from uid. Uid must exist in database."""
with sqlite3.connect(cfg.data_cfg['db_path']) as con:
query = """SELECT * FROM predictions WHERE id=?"""
pred_table = pd.read_sql_query(query, con, params=(uid,))
query = """SELECT * FROM meta_experiments WHERE id=?"""
meta_table = | pd.read_sql_query(query, con, params=(uid,)) | pandas.read_sql_query |
from surprise import Dataset, Reader, SVD, dump
from definitions import ROOT_DIR
import logging.config
import pandas as pd
import numpy as np
import helpers
import time
class SurpriseSVD:
logging.config.fileConfig(ROOT_DIR + "/logging.conf", disable_existing_loggers=False)
log = logging.getLogger(__name__)
# Can save and load the svd array to file
def __init__(self, ds=None, normalisation=None, save=True, load=False, sl_filename="svd", base_folder="/svd_dumps/",
movies_filename="/datasets/ml-latest-small/movies.csv"):
# Create mapper from movie id to title
self.mid2title = helpers.generate_id2movietitle_mapper(filename=movies_filename)
# Read data from file (or ds)
if ds is None:
df = pd.read_csv(ROOT_DIR+"/datasets/ml-latest-small/ratings.csv", usecols=[0, 1, 2])
elif type(ds) is str:
df = pd.read_csv(ROOT_DIR + ds, usecols=[0, 1, 2])
else:
df = | pd.DataFrame(ds[:, 0:3], columns=["userId", "movieId", "rating"]) | pandas.DataFrame |
import pyarrow.parquet as pq
import pandas as pd
import json
from typing import List, Callable, Iterator, Union, Optional
from sportsdataverse.config import WBB_BASE_URL, WBB_TEAM_BOX_URL, WBB_PLAYER_BOX_URL, WBB_TEAM_SCHEDULE_URL
from sportsdataverse.errors import SeasonNotFoundError
from sportsdataverse.dl_utils import download
def load_wbb_pbp(seasons: List[int]) -> pd.DataFrame:
"""Load women's college basketball play by play data going back to 2002
Example:
`wbb_df = sportsdataverse.wbb.load_wbb_pbp(seasons=range(2002,2022))`
Args:
seasons (list): Used to define different seasons. 2002 is the earliest available season.
Returns:
pd.DataFrame: Pandas dataframe containing the
play-by-plays available for the requested seasons.
Raises:
ValueError: If `season` is less than 2002.
"""
data = pd.DataFrame()
if type(seasons) is int:
seasons = [seasons]
for i in seasons:
if int(i) < 2002:
raise SeasonNotFoundError("season cannot be less than 2002")
i_data = pd.read_parquet(WBB_BASE_URL.format(season=i), engine='auto', columns=None)
data = data.append(i_data)
#Give each row a unique index
data.reset_index(drop=True, inplace=True)
return data
def load_wbb_team_boxscore(seasons: List[int]) -> pd.DataFrame:
"""Load women's college basketball team boxscore data
Example:
`wbb_df = sportsdataverse.wbb.load_wbb_team_boxscore(seasons=range(2002,2022))`
Args:
seasons (list): Used to define different seasons. 2002 is the earliest available season.
Returns:
pd.DataFrame: Pandas dataframe containing the
team boxscores available for the requested seasons.
Raises:
ValueError: If `season` is less than 2002.
"""
data = | pd.DataFrame() | pandas.DataFrame |
from backend.lib import sql_queries
import pandas as pd
from pandas.testing import assert_frame_equal, assert_series_equal
def test_get_user_info_for_existing_user(refresh_db_once, db_connection_sqlalchemy):
engine = db_connection_sqlalchemy
user_id = sql_queries.get_user_id(engine, email='<EMAIL>', password='<PASSWORD>')
assert user_id == 1
def test_get_user_info_with_wrong_password_results_in_no_users_found(refresh_db_once, db_connection_sqlalchemy):
engine = db_connection_sqlalchemy
user_id = sql_queries.get_user_id(engine, email='<EMAIL>', password='<PASSWORD>')
assert user_id is None
def test_get_user_info_with_wildcard_email_address_results_in_no_users_found(refresh_db_once, db_connection_sqlalchemy):
engine = db_connection_sqlalchemy
user_id = sql_queries.get_user_id(engine, email='*', password='<PASSWORD>')
assert user_id is None
def test_get_user_info_with_wildcard_password_results_in_no_users_found(refresh_db_once, db_connection_sqlalchemy):
engine = db_connection_sqlalchemy
user_id = sql_queries.get_user_id(engine, email='<EMAIL>', password='*')
assert user_id is None
def test_get_user_info_for_non_existant_user(refresh_db_once, db_connection_sqlalchemy):
engine = db_connection_sqlalchemy
user_id = sql_queries.get_user_id(engine, email='<EMAIL>', password='<PASSWORD>')
assert user_id is None
def test_count_input_data_items_for_all_users_and_label_tasks(refresh_db_once, db_connection_sqlalchemy):
df_test = pd.DataFrame()
df_test['user_id'] = [1, 1, 1, 1, 2, 2, 2, 3, None, None]
df_test['label_task_id'] = [1, 2, 3, 5, 1, 2, 5, 1, 4, 6]
df_test['total_items'] = [5, 3, 1, 5, 5, 2, 5, 1, 1, 1]
df_test['num_unlabeled'] = [2, 2, 1, 5, 4, 2, 5, 0, 1, 1]
df_test['num_labeled'] = [3, 1, 0, 0, 1, 0, 0, 1, 0, 0]
engine = db_connection_sqlalchemy
df = sql_queries.count_input_data_items_per_user_per_label_task(engine, label_task_id=None, user_id=None)
assert_series_equal(df['user_id'], df_test['user_id'])
assert_series_equal(df['label_task_id'], df_test['label_task_id'])
assert_series_equal(df['total_items'], df_test['total_items'])
assert_series_equal(df['num_labeled'], df_test['num_labeled'])
def test_get_all_input_data_items(refresh_db_once, db_connection_sqlalchemy):
df_test = pd.DataFrame()
df_test['input_data_id'] = [1, 2, 3, 4, 5]
df_test['dataset_group_id'] = [1, 1, 1, 1, 1]
df_test['dataset_id'] = [1, 1, 2, 2, 2]
engine = db_connection_sqlalchemy
df = sql_queries.get_all_input_data_items(engine, label_task_id=1)
| assert_frame_equal(df, df_test) | pandas.testing.assert_frame_equal |
import os
import cx_Oracle
import logging
import numpy as np
import pandas as pd
import re
import zipfile
import logging
from datetime import datetime
from glob import glob
from os.path import split, normpath, join, relpath, basename
from pathlib import Path
from piper.decorators import shape
from piper.text import _file_with_ext
from piper.text import _get_qual_file
from piper.verbs import clean_names
from piper.verbs import str_trim
from zipfile import ZipFile, ZIP_DEFLATED
from piper.xl import WorkBook
logger = logging.getLogger(__name__)
from typing import (
Any,
Callable,
Dict,
Hashable,
Iterable,
List,
NamedTuple,
Optional,
Pattern,
Set,
Tuple,
Union,
)
logger = logging.getLogger(__name__)
# duplicate_files() {{{1
def duplicate_files(source=None,
glob_pattern='*.*',
recurse=False,
filesize=1,
keep=False,
xl_file=None):
''' select files that have the same file size.
This files are are assumed to be 'duplicates'.
Parameters
----------
source
source directory, default None
glob_pattern
filter extension suffix, default '*.*'
recurse
default False, if True, recurse source directory provided
filesize
file size filter, default 1 (kb)
keep
{‘first’, ‘last’, False}, default ‘first’
Determines which duplicates (if any) to mark.
first: Mark duplicates as True except for the first occurrence.
last: Mark duplicates as True except for the last occurrence.
False: Mark all duplicates as True.
xl_file
default None: output results to Excel workbook to xl_file
Returns
-------
pd.DataFrame
Examples
--------
.. code-block::
from piper.io import duplicate_files
source = '/home/mike/Documents'
duplicate_files(source,
glob_pattern='*.*',
recurse=True,
filesize=2000000,
keep=False).query("duplicate == True")
**References**
https://docs.python.org/3/library/pathlib.html
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.duplicated.html
'''
def func(f):
try:
size = os.stat(f.as_posix()).st_size
except OSError as e:
size = 0
data = {'parent': f.parents[0].as_posix(),
'name': f.name,
# 'stem': f.stem, 'suffix': f.suffix,
'size': size}
return data
file_data = list_files(source=source, glob_pattern = glob_pattern,
recurse=recurse, regex=None)
file_data = [func(x) for x in file_data]
df = (pd.DataFrame(file_data)
.assign(duplicate = lambda x: x['size'].duplicated(keep=keep)))
if filesize is not None:
df = df.query("size >= @filesize")
df = (df.sort_values(['size', 'name'], ascending=[False, True])
.reset_index(drop=True))
if xl_file is not None:
logger.info(f'{xl_file} written, {df.shape[0]} rows')
df.to_excel(xl_file, index=False)
return df
# list_files() {{{1
def list_files(source='inputs/', glob_pattern='*.xls*', recurse=False,
as_posix=False, regex=''):
''' return a list of files.
Criteria parameter allows one to focus on one or a group of files.
Examples
--------
List *ALL* files in /inputs directory, returning a list of paths:
list_files(glob_pattern = '*', regex='Test', as_posix=True)
Parameters
----------
source
source folder, default - 'inputs/'
glob_pattern
file extension filter (str), default - '*.xls*'
regex
if specified, allows regular expression to further filter the file
selection. Default is ''
*example*
.. code-block::
list_files(glob_pattern = '*.tsv', regex='Test', as_posix=True)
>['inputs/Test XL WorkBook.tsv']
recurse
recurse directory (boolean), default False
as_posix
If True, return list of files strings, default False
'''
if glob_pattern in ('', None):
raise ValueError(f'criteria {glob_pattern} value is invalid')
files = list(Path(source).glob(glob_pattern))
if recurse:
files = list(Path(source).rglob(glob_pattern))
if as_posix:
files = [x.as_posix() for x in files]
if regex not in (False, None):
regexp = re.compile(regex)
if as_posix:
files = list(filter(lambda x: regexp.search(x), files))
else:
files = list(filter(lambda x: regexp.search(x.as_posix()), files))
return files
# read_csv() {{{1
@shape(debug=False)
def read_csv(file_name: str,
sep: str = ',',
strip_blanks: bool = True,
clean_cols: bool = True,
encoding: str = None,
info: bool = True) -> pd.DataFrame:
''' pd.read_csv wrapper function
Parameters
----------
file_name
csv formatted file
sep
column separator
strip_blanks
Default True
If True, strip all column (row) values of leading and trailing blanks
clean_cols
default True
If True, lowercase column names, replace spaces with underscore ('_')
encoding
default None. Tip:: For EU data try 'latin-1'
info
if True, display additional logging information
Returns
-------
pandas dataframe
'''
logger.info(f'{file_name}')
df = pd.read_csv(file_name, sep=sep, encoding=encoding)
if strip_blanks:
if info:
logger.info(f'Warning: Dataframe strip_blanks = {strip_blanks}')
df = str_trim(df)
if clean_cols:
df = clean_names(df)
return df
# read_csvs() {{{1
@shape(debug=False)
def read_csvs(source = 'inputs/',
glob_pattern = '*.csv',
sep = ',',
strip_blanks = True,
clean_cols = True,
encoding = 'latin-1',
include_file = False,
info = False):
'''pd.read_csv wrapper function
Parameters
----------
source
source folder containing csv text files (that is, files ending with '\*.csv')
glob_pattern
global pattern for file selection
sep
Column separator
strip_blanks
Default True
If True, strip all column (row) values of leading and trailing blanks
clean_cols
default True. Lowercase column names, replace spaces with underscore ('\_')
encoding
default 'latin-1'
include_file
include filename in returned dataframe - default False
info
if True, display additional logging information
Returns
-------
pandas dataframe
'''
dataframes = []
for file_ in list_files(source, glob_pattern=glob_pattern, as_posix=False):
dx = pd.read_csv(file_.as_posix(), sep=sep)
if strip_blanks:
dx = str_trim(dx)
if info:
logger.info(f'Warning: Dataframe strip_blanks = {strip_blanks}')
if clean_cols:
dx = clean_names(dx)
if include_file:
dx['filename'] = file_.name
dataframes.append(dx)
df = pd.concat(dataframes)
return df
# read_excels() {{{1
def read_excels(source = 'inputs/',
glob_pattern = '*.xls*',
func = None,
include_file = False,
reset_index = True,
info = False):
'''
Read, concatenate, combine and return a pandas dataframe based on
workbook(s) data within a given source folder.
Parameters
----------
source
source folder containing workbook data (that is, files ending with '.xls*')
glob_pattern
file extension filter (str), default - '*.xls*'
func
pass a custom function to read/transform each workbook/sheet. default is
None (just performs a read_excel())
Note: custom function will recieve Path object. To obtain the 'string'
filename - use Path.as_posix()
include_file
include filename in returned dataframe - default False
reset_index
default True
info
Provide debugging information - default False
Returns
-------
pd.DataFrame - pandas DataFrame
Examples
--------
Using a custom function, use below as a guide.
.. code-block::
def clean_data(xl_file):
df = pd.read_excel(xl_file.as_posix(), header=None)
# Combine first two rows to correct column headings
df.columns = df.loc[0].values + ' ' + df.loc[1].values
# Read remaining data and reference back to dataframe reference
df = df.iloc[2:]
# clean up column names to make it easier to use them in calculations
df = clean_names(df)
# Set date data types for order and ship dates
cols = ['order_date', 'ship_date']
date_data = [pd.to_datetime(df[col]) for col in cols]
df[cols] = pd.concat(date_data, axis=1)
# Separate shipping mode from container
df_split = df['ship_mode_container'].str.extract(r'(.*)-(.*)')
df_split.columns = ['ship', 'container']
priority_categories = ['Low', 'Medium', 'High', 'Critical', 'Not Specified']
priority_categories = pd.CategoricalDtype(priority_categories, ordered=True)
# split out and define calculated fields using lambdas
sales_amount = lambda x: (x.order_quantity * x.unit_sell_price
* (1 - x.discount_percent)).astype(float).round(2)
days_to_ship=lambda x: ((x.ship_date - x.order_date) / pd.Timedelta(1, 'day')).astype(int)
sales_person=lambda x: x.sales_person.str.extract('(Mr|Miss|Mrs) (\w+) (\w+)').loc[:, 1]
order_priority=lambda x: x.order_priority.astype(priority_categories)
# Define/assign new column (values)
df = (df.assign(ship_mode=df_split.ship,
container=df_split.container,
sales_amount=sales_amount,
days_to_ship=days_to_ship,
sales_person=sales_person,
order_priority=order_priority)
.drop(columns=['ship_mode_container'])
.pipe(move_column, 'days_to_ship', 'after', 'ship_date')
.pipe(move_column, 'sales_amount', 'after', 'unit_sell_price')
.pipe(clean_names, replace_char=('_', ' '), title=True)
)
return df
data = read_excels('inputs/Data', func=clean_data)
head(data, 2)
'''
dataframes = []
for xl_file in list_files(source,
glob_pattern=glob_pattern,
as_posix=False):
if func is None:
df = | pd.read_excel(xl_file) | pandas.read_excel |
import gdalnumeric
import pandas as pd
import numpy as np
import gdal
from sklearn.linear_model import LogisticRegression
######################################################################################
#Write out a raster from a numpy array.
#Template: a raster file on disk to use for pixel size, height/width, and spatial reference.
#array: array to write out. Should be an exact match in height/width as the template.
#filename: file name of new raster
#inspired by: http://geoexamples.blogspot.com/2012/12/raster-calculations-with-gdal-and-numpy.html
def write_raster(array, template, filename):
driver = gdal.GetDriverByName("GTiff")
raster_out = driver.Create(filename, template.RasterXSize, template.RasterYSize, 1, template.GetRasterBand(1).DataType)
gdalnumeric.CopyDatasetInfo(template,raster_out)
bandOut=raster_out.GetRasterBand(1)
gdalnumeric.BandWriteArray(bandOut, array)
#Read in a list of rasters and stack them into a single array. (rows x column x numRasters)
def stackImages(fileList):
fullYear=gdalnumeric.LoadFile(fileList[0]).astype(np.int)
for thisImage in fileList[1:]:
image=gdalnumeric.LoadFile(thisImage)
fullYear=np.dstack((fullYear, image))
return(fullYear)
#Predictor values are the current state of a pixel x,y + the state of surrounding pixels n
#image: 2d x y array
#x,y: location of the focal pixel
#n : number of surrounding pixels to consider
def extract_predictor_values(image, row, col, n):
all_pixel_data={}
all_pixel_data['t0']=image[row,col]
if n==8:
surrounding=image[row-1:row+2 , col-1:col+2].reshape((9))
#Delete the focal pixel that is in this 3x3 array
surrounding=np.delete(surrounding, 4)
elif n==24:
surrounding=image[row-2:row+3 , col-2:col+3].reshape((25))
surrounding=np.delete(surrounding, 12)
#Convert surrounding pixel values to percent of each class
surrounding_size=len(surrounding)
cover_catagories=list(range(1,15))
for catagory in range(1, len(cover_catagories)+1):
all_pixel_data['surrounding_cat'+str(catagory)]= np.sum(surrounding==catagory) / surrounding_size
return(all_pixel_data)
#Extract and organize a timeseries of arrays for model fitting
def array_to_model_input_fitting(a):
array_data=[]
for row in range(1, a.shape[0]-1):
for col in range(1, a.shape[1]-1):
for time in range(0, a.shape[2]-1):
array_data.append(extract_predictor_values(a[:,:,time], row, col, 8))
array_data[-1]['t1'] = a[row,col,time+1]
array_data= | pd.DataFrame(array_data) | pandas.DataFrame |
import types
from functools import wraps
import numpy as np
import datetime
import collections
from pandas.compat import(
zip, builtins, range, long, lzip,
OrderedDict, callable
)
from pandas import compat
from pandas.core.base import PandasObject
from pandas.core.categorical import Categorical
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame
from pandas.core.index import Index, MultiIndex, _ensure_index, _union_indexes
from pandas.core.internals import BlockManager, make_block
from pandas.core.series import Series
from pandas.core.panel import Panel
from pandas.util.decorators import cache_readonly, Appender
import pandas.core.algorithms as algos
import pandas.core.common as com
from pandas.core.common import(_possibly_downcast_to_dtype, isnull,
notnull, _DATELIKE_DTYPES, is_numeric_dtype,
is_timedelta64_dtype, is_datetime64_dtype,
is_categorical_dtype, _values_from_object)
from pandas.core.config import option_context
from pandas import _np_version_under1p7
import pandas.lib as lib
from pandas.lib import Timestamp
import pandas.tslib as tslib
import pandas.algos as _algos
import pandas.hashtable as _hash
_agg_doc = """Aggregate using input function or dict of {column -> function}
Parameters
----------
arg : function or dict
Function to use for aggregating groups. If a function, must either
work when passed a DataFrame or when passed to DataFrame.apply. If
passed a dict, the keys must be DataFrame column names.
Notes
-----
Numpy functions mean/median/prod/sum/std/var are special cased so the
default behavior is applying the function along axis=0
(e.g., np.mean(arr_2d, axis=0)) as opposed to
mimicking the default Numpy behavior (e.g., np.mean(arr_2d)).
Returns
-------
aggregated : DataFrame
"""
# special case to prevent duplicate plots when catching exceptions when
# forwarding methods from NDFrames
_plotting_methods = frozenset(['plot', 'boxplot', 'hist'])
_common_apply_whitelist = frozenset([
'last', 'first',
'head', 'tail', 'median',
'mean', 'sum', 'min', 'max',
'cumsum', 'cumprod', 'cummin', 'cummax', 'cumcount',
'resample',
'describe',
'rank', 'quantile', 'count',
'fillna',
'mad',
'any', 'all',
'irow', 'take',
'idxmax', 'idxmin',
'shift', 'tshift',
'ffill', 'bfill',
'pct_change', 'skew',
'corr', 'cov', 'diff',
]) | _plotting_methods
_series_apply_whitelist = \
(_common_apply_whitelist - set(['boxplot'])) | \
frozenset(['dtype', 'value_counts', 'unique', 'nunique',
'nlargest', 'nsmallest'])
_dataframe_apply_whitelist = \
_common_apply_whitelist | frozenset(['dtypes', 'corrwith'])
class GroupByError(Exception):
pass
class DataError(GroupByError):
pass
class SpecificationError(GroupByError):
pass
def _groupby_function(name, alias, npfunc, numeric_only=True,
_convert=False):
def f(self):
self._set_selection_from_grouper()
try:
return self._cython_agg_general(alias, numeric_only=numeric_only)
except AssertionError as e:
raise SpecificationError(str(e))
except Exception:
result = self.aggregate(lambda x: npfunc(x, axis=self.axis))
if _convert:
result = result.convert_objects()
return result
f.__doc__ = "Compute %s of group values" % name
f.__name__ = name
return f
def _first_compat(x, axis=0):
def _first(x):
x = np.asarray(x)
x = x[notnull(x)]
if len(x) == 0:
return np.nan
return x[0]
if isinstance(x, DataFrame):
return x.apply(_first, axis=axis)
else:
return _first(x)
def _last_compat(x, axis=0):
def _last(x):
x = np.asarray(x)
x = x[notnull(x)]
if len(x) == 0:
return np.nan
return x[-1]
if isinstance(x, DataFrame):
return x.apply(_last, axis=axis)
else:
return _last(x)
def _count_compat(x, axis=0):
try:
return x.size
except:
return x.count()
class Grouper(object):
"""
A Grouper allows the user to specify a groupby instruction for a target object
This specification will select a column via the key parameter, or if the level and/or
axis parameters are given, a level of the index of the target object.
These are local specifications and will override 'global' settings, that is the parameters
axis and level which are passed to the groupby itself.
Parameters
----------
key : string, defaults to None
groupby key, which selects the grouping column of the target
level : name/number, defaults to None
the level for the target index
freq : string / freqency object, defaults to None
This will groupby the specified frequency if the target selection (via key or level) is
a datetime-like object
axis : number/name of the axis, defaults to None
sort : boolean, default to False
whether to sort the resulting labels
additional kwargs to control time-like groupers (when freq is passed)
closed : closed end of interval; left or right
label : interval boundary to use for labeling; left or right
convention : {'start', 'end', 'e', 's'}
If grouper is PeriodIndex
Returns
-------
A specification for a groupby instruction
Examples
--------
>>> df.groupby(Grouper(key='A')) : syntatic sugar for df.groupby('A')
>>> df.groupby(Grouper(key='date',freq='60s')) : specify a resample on the column 'date'
>>> df.groupby(Grouper(level='date',freq='60s',axis=1)) :
specify a resample on the level 'date' on the columns axis with a frequency of 60s
"""
def __new__(cls, *args, **kwargs):
if kwargs.get('freq') is not None:
from pandas.tseries.resample import TimeGrouper
cls = TimeGrouper
return super(Grouper, cls).__new__(cls)
def __init__(self, key=None, level=None, freq=None, axis=None, sort=False):
self.key=key
self.level=level
self.freq=freq
self.axis=axis
self.sort=sort
self.grouper=None
self.obj=None
self.indexer=None
self.binner=None
self.grouper=None
@property
def ax(self):
return self.grouper
def _get_grouper(self, obj):
"""
Parameters
----------
obj : the subject object
Returns
-------
a tuple of binner, grouper, obj (possibly sorted)
"""
self._set_grouper(obj)
return self.binner, self.grouper, self.obj
def _set_grouper(self, obj, sort=False):
"""
given an object and the specifcations, setup the internal grouper for this particular specification
Parameters
----------
obj : the subject object
"""
if self.key is not None and self.level is not None:
raise ValueError("The Grouper cannot specify both a key and a level!")
# the key must be a valid info item
if self.key is not None:
key = self.key
if key not in obj._info_axis:
raise KeyError("The grouper name {0} is not found".format(key))
ax = Index(obj[key],name=key)
else:
ax = obj._get_axis(self.axis)
if self.level is not None:
level = self.level
# if a level is given it must be a mi level or
# equivalent to the axis name
if isinstance(ax, MultiIndex):
if isinstance(level, compat.string_types):
if obj.index.name != level:
raise ValueError('level name %s is not the name of the '
'index' % level)
elif level > 0:
raise ValueError('level > 0 only valid with MultiIndex')
ax = Index(ax.get_level_values(level), name=level)
else:
if not (level == 0 or level == ax.name):
raise ValueError("The grouper level {0} is not valid".format(level))
# possibly sort
if (self.sort or sort) and not ax.is_monotonic:
indexer = self.indexer = ax.argsort(kind='quicksort')
ax = ax.take(indexer)
obj = obj.take(indexer, axis=self.axis, convert=False, is_copy=False)
self.obj = obj
self.grouper = ax
return self.grouper
def _get_binner_for_grouping(self, obj):
raise NotImplementedError
@property
def groups(self):
return self.grouper.groups
class GroupBy(PandasObject):
"""
Class for grouping and aggregating relational data. See aggregate,
transform, and apply functions on this object.
It's easiest to use obj.groupby(...) to use GroupBy, but you can also do:
::
grouped = groupby(obj, ...)
Parameters
----------
obj : pandas object
axis : int, default 0
level : int, default None
Level of MultiIndex
groupings : list of Grouping objects
Most users should ignore this
exclusions : array-like, optional
List of columns to exclude
name : string
Most users should ignore this
Notes
-----
After grouping, see aggregate, apply, and transform functions. Here are
some other brief notes about usage. When grouping by multiple groups, the
result index will be a MultiIndex (hierarchical) by default.
Iteration produces (key, group) tuples, i.e. chunking the data by group. So
you can write code like:
::
grouped = obj.groupby(keys, axis=axis)
for key, group in grouped:
# do something with the data
Function calls on GroupBy, if not specially implemented, "dispatch" to the
grouped data. So if you group a DataFrame and wish to invoke the std()
method on each group, you can simply do:
::
df.groupby(mapper).std()
rather than
::
df.groupby(mapper).aggregate(np.std)
You can pass arguments to these "wrapped" functions, too.
See the online documentation for full exposition on these topics and much
more
Returns
-------
**Attributes**
groups : dict
{group name -> group labels}
len(grouped) : int
Number of groups
"""
_apply_whitelist = _common_apply_whitelist
_internal_names = ['_cache']
_internal_names_set = set(_internal_names)
_group_selection = None
def __init__(self, obj, keys=None, axis=0, level=None,
grouper=None, exclusions=None, selection=None, as_index=True,
sort=True, group_keys=True, squeeze=False):
self._selection = selection
if isinstance(obj, NDFrame):
obj._consolidate_inplace()
self.level = level
if not as_index:
if not isinstance(obj, DataFrame):
raise TypeError('as_index=False only valid with DataFrame')
if axis != 0:
raise ValueError('as_index=False only valid for axis=0')
self.as_index = as_index
self.keys = keys
self.sort = sort
self.group_keys = group_keys
self.squeeze = squeeze
if grouper is None:
grouper, exclusions, obj = _get_grouper(obj, keys, axis=axis,
level=level, sort=sort)
self.obj = obj
self.axis = obj._get_axis_number(axis)
self.grouper = grouper
self.exclusions = set(exclusions) if exclusions else set()
def __len__(self):
return len(self.indices)
def __unicode__(self):
# TODO: Better unicode/repr for GroupBy object
return object.__repr__(self)
@property
def groups(self):
""" dict {group name -> group labels} """
return self.grouper.groups
@property
def ngroups(self):
return self.grouper.ngroups
@property
def indices(self):
""" dict {group name -> group indices} """
return self.grouper.indices
def _get_index(self, name):
""" safe get index, translate keys for datelike to underlying repr """
def convert(key, s):
# possibly convert to they actual key types
# in the indices, could be a Timestamp or a np.datetime64
if isinstance(s, (Timestamp,datetime.datetime)):
return Timestamp(key)
elif isinstance(s, np.datetime64):
return Timestamp(key).asm8
return key
sample = next(iter(self.indices))
if isinstance(sample, tuple):
if not isinstance(name, tuple):
raise ValueError("must supply a tuple to get_group with multiple grouping keys")
if not len(name) == len(sample):
raise ValueError("must supply a a same-length tuple to get_group with multiple grouping keys")
name = tuple([ convert(n, k) for n, k in zip(name,sample) ])
else:
name = convert(name, sample)
return self.indices[name]
@property
def name(self):
if self._selection is None:
return None # 'result'
else:
return self._selection
@property
def _selection_list(self):
if not isinstance(self._selection, (list, tuple, Series, Index, np.ndarray)):
return [self._selection]
return self._selection
@cache_readonly
def _selected_obj(self):
if self._selection is None or isinstance(self.obj, Series):
if self._group_selection is not None:
return self.obj[self._group_selection]
return self.obj
else:
return self.obj[self._selection]
def _set_selection_from_grouper(self):
""" we may need create a selection if we have non-level groupers """
grp = self.grouper
if self.as_index and getattr(grp,'groupings',None) is not None and self.obj.ndim > 1:
ax = self.obj._info_axis
groupers = [ g.name for g in grp.groupings if g.level is None and g.name is not None and g.name in ax ]
if len(groupers):
self._group_selection = (ax-Index(groupers)).tolist()
def _local_dir(self):
return sorted(set(self.obj._local_dir() + list(self._apply_whitelist)))
def __getattr__(self, attr):
if attr in self._internal_names_set:
return object.__getattribute__(self, attr)
if attr in self.obj:
return self[attr]
if hasattr(self.obj, attr):
return self._make_wrapper(attr)
raise AttributeError("%r object has no attribute %r" %
(type(self).__name__, attr))
def __getitem__(self, key):
raise NotImplementedError('Not implemented: %s' % key)
def _make_wrapper(self, name):
if name not in self._apply_whitelist:
is_callable = callable(getattr(self._selected_obj, name, None))
kind = ' callable ' if is_callable else ' '
msg = ("Cannot access{0}attribute {1!r} of {2!r} objects, try "
"using the 'apply' method".format(kind, name,
type(self).__name__))
raise AttributeError(msg)
# need to setup the selection
# as are not passed directly but in the grouper
self._set_selection_from_grouper()
f = getattr(self._selected_obj, name)
if not isinstance(f, types.MethodType):
return self.apply(lambda self: getattr(self, name))
f = getattr(type(self._selected_obj), name)
def wrapper(*args, **kwargs):
# a little trickery for aggregation functions that need an axis
# argument
kwargs_with_axis = kwargs.copy()
if 'axis' not in kwargs_with_axis:
kwargs_with_axis['axis'] = self.axis
def curried_with_axis(x):
return f(x, *args, **kwargs_with_axis)
def curried(x):
return f(x, *args, **kwargs)
# preserve the name so we can detect it when calling plot methods,
# to avoid duplicates
curried.__name__ = curried_with_axis.__name__ = name
# special case otherwise extra plots are created when catching the
# exception below
if name in _plotting_methods:
return self.apply(curried)
try:
return self.apply(curried_with_axis)
except Exception:
try:
return self.apply(curried)
except Exception:
# related to : GH3688
# try item-by-item
# this can be called recursively, so need to raise ValueError if
# we don't have this method to indicated to aggregate to
# mark this column as an error
try:
return self._aggregate_item_by_item(name, *args, **kwargs)
except (AttributeError):
raise ValueError
return wrapper
def get_group(self, name, obj=None):
"""
Constructs NDFrame from group with provided name
Parameters
----------
name : object
the name of the group to get as a DataFrame
obj : NDFrame, default None
the NDFrame to take the DataFrame out of. If
it is None, the object groupby was called on will
be used
Returns
-------
group : type of obj
"""
if obj is None:
obj = self._selected_obj
inds = self._get_index(name)
return obj.take(inds, axis=self.axis, convert=False)
def __iter__(self):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
return self.grouper.get_iterator(self.obj, axis=self.axis)
def apply(self, func, *args, **kwargs):
"""
Apply function and combine results together in an intelligent way. The
split-apply-combine combination rules attempt to be as common sense
based as possible. For example:
case 1:
group DataFrame
apply aggregation function (f(chunk) -> Series)
yield DataFrame, with group axis having group labels
case 2:
group DataFrame
apply transform function ((f(chunk) -> DataFrame with same indexes)
yield DataFrame with resulting chunks glued together
case 3:
group Series
apply function with f(chunk) -> DataFrame
yield DataFrame with result of chunks glued together
Parameters
----------
func : function
Notes
-----
See online documentation for full exposition on how to use apply.
In the current implementation apply calls func twice on the
first group to decide whether it can take a fast or slow code
path. This can lead to unexpected behavior if func has
side-effects, as they will take effect twice for the first
group.
See also
--------
aggregate, transform
Returns
-------
applied : type depending on grouped object and function
"""
func = _intercept_function(func)
@wraps(func)
def f(g):
return func(g, *args, **kwargs)
# ignore SettingWithCopy here in case the user mutates
with option_context('mode.chained_assignment',None):
return self._python_apply_general(f)
def _python_apply_general(self, f):
keys, values, mutated = self.grouper.apply(f, self._selected_obj,
self.axis)
return self._wrap_applied_output(keys, values,
not_indexed_same=mutated)
def aggregate(self, func, *args, **kwargs):
raise NotImplementedError
@Appender(_agg_doc)
def agg(self, func, *args, **kwargs):
return self.aggregate(func, *args, **kwargs)
def _iterate_slices(self):
yield self.name, self._selected_obj
def transform(self, func, *args, **kwargs):
raise NotImplementedError
def mean(self):
"""
Compute mean of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
try:
return self._cython_agg_general('mean')
except GroupByError:
raise
except Exception: # pragma: no cover
self._set_selection_from_grouper()
f = lambda x: x.mean(axis=self.axis)
return self._python_agg_general(f)
def median(self):
"""
Compute median of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
try:
return self._cython_agg_general('median')
except GroupByError:
raise
except Exception: # pragma: no cover
self._set_selection_from_grouper()
def f(x):
if isinstance(x, np.ndarray):
x = Series(x)
return x.median(axis=self.axis)
return self._python_agg_general(f)
def std(self, ddof=1):
"""
Compute standard deviation of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
# todo, implement at cython level?
return np.sqrt(self.var(ddof=ddof))
def var(self, ddof=1):
"""
Compute variance of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
if ddof == 1:
return self._cython_agg_general('var')
else:
self._set_selection_from_grouper()
f = lambda x: x.var(ddof=ddof)
return self._python_agg_general(f)
def sem(self, ddof=1):
"""
Compute standard error of the mean of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
return self.std(ddof=ddof)/np.sqrt(self.count())
def size(self):
"""
Compute group sizes
"""
return self.grouper.size()
sum = _groupby_function('sum', 'add', np.sum)
prod = _groupby_function('prod', 'prod', np.prod)
min = _groupby_function('min', 'min', np.min, numeric_only=False)
max = _groupby_function('max', 'max', np.max, numeric_only=False)
first = _groupby_function('first', 'first', _first_compat,
numeric_only=False, _convert=True)
last = _groupby_function('last', 'last', _last_compat, numeric_only=False,
_convert=True)
_count = _groupby_function('_count', 'count', _count_compat,
numeric_only=False)
def count(self, axis=0):
return self._count().astype('int64')
def ohlc(self):
"""
Compute sum of values, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
return self._apply_to_column_groupbys(
lambda x: x._cython_agg_general('ohlc'))
def nth(self, n, dropna=None):
"""
Take the nth row from each group.
If dropna, will not show nth non-null row, dropna is either
Truthy (if a Series) or 'all', 'any' (if a DataFrame); this is equivalent
to calling dropna(how=dropna) before the groupby.
Examples
--------
>>> df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B'])
>>> g = df.groupby('A')
>>> g.nth(0)
A B
0 1 NaN
2 5 6
>>> g.nth(1)
A B
1 1 4
>>> g.nth(-1)
A B
1 1 4
2 5 6
>>> g.nth(0, dropna='any')
B
A
1 4
5 6
>>> g.nth(1, dropna='any') # NaNs denote group exhausted when using dropna
B
A
1 NaN
5 NaN
"""
self._set_selection_from_grouper()
if not dropna: # good choice
m = self.grouper._max_groupsize
if n >= m or n < -m:
return self._selected_obj.loc[[]]
rng = np.zeros(m, dtype=bool)
if n >= 0:
rng[n] = True
is_nth = self._cumcount_array(rng)
else:
rng[- n - 1] = True
is_nth = self._cumcount_array(rng, ascending=False)
result = self._selected_obj[is_nth]
# the result index
if self.as_index:
ax = self.obj._info_axis
names = self.grouper.names
if self.obj.ndim == 1:
# this is a pass-thru
pass
elif all([ n in ax for n in names ]):
result.index = Index(self.obj[names][is_nth].values.ravel()).set_names(names)
elif self._group_selection is not None:
result.index = self.obj._get_axis(self.axis)[is_nth]
result = result.sort_index()
return result
if (isinstance(self._selected_obj, DataFrame)
and dropna not in ['any', 'all']):
# Note: when agg-ing picker doesn't raise this, just returns NaN
raise ValueError("For a DataFrame groupby, dropna must be "
"either None, 'any' or 'all', "
"(was passed %s)." % (dropna),)
# old behaviour, but with all and any support for DataFrames.
# modified in GH 7559 to have better perf
max_len = n if n >= 0 else - 1 - n
dropped = self.obj.dropna(how=dropna, axis=self.axis)
# get a new grouper for our dropped obj
if self.keys is None and self.level is None:
# we don't have the grouper info available (e.g. we have selected out
# a column that is not in the current object)
axis = self.grouper.axis
grouper = axis[axis.isin(dropped.index)]
keys = self.grouper.names
else:
# create a grouper with the original parameters, but on the dropped object
grouper, _, _ = _get_grouper(dropped, key=self.keys, axis=self.axis,
level=self.level, sort=self.sort)
sizes = dropped.groupby(grouper).size()
result = dropped.groupby(grouper).nth(n)
mask = (sizes<max_len).values
# set the results which don't meet the criteria
if len(result) and mask.any():
result.loc[mask] = np.nan
# reset/reindex to the original groups
if len(self.obj) == len(dropped) or len(result) == len(self.grouper.result_index):
result.index = self.grouper.result_index
else:
result = result.reindex(self.grouper.result_index)
return result
def cumcount(self, **kwargs):
"""
Number each item in each group from 0 to the length of that group - 1.
Essentially this is equivalent to
>>> self.apply(lambda x: Series(np.arange(len(x)), x.index))
Parameters
----------
ascending : bool, default True
If False, number in reverse, from length of group - 1 to 0.
Example
-------
>>> df = pd.DataFrame([['a'], ['a'], ['a'], ['b'], ['b'], ['a']],
... columns=['A'])
>>> df
A
0 a
1 a
2 a
3 b
4 b
5 a
>>> df.groupby('A').cumcount()
0 0
1 1
2 2
3 0
4 1
5 3
dtype: int64
>>> df.groupby('A').cumcount(ascending=False)
0 3
1 2
2 1
3 1
4 0
5 0
dtype: int64
"""
self._set_selection_from_grouper()
ascending = kwargs.pop('ascending', True)
index = self._selected_obj.index
cumcounts = self._cumcount_array(ascending=ascending)
return Series(cumcounts, index)
def head(self, n=5):
"""
Returns first n rows of each group.
Essentially equivalent to ``.apply(lambda x: x.head(n))``,
except ignores as_index flag.
Example
-------
>>> df = DataFrame([[1, 2], [1, 4], [5, 6]],
columns=['A', 'B'])
>>> df.groupby('A', as_index=False).head(1)
A B
0 1 2
2 5 6
>>> df.groupby('A').head(1)
A B
0 1 2
2 5 6
"""
obj = self._selected_obj
in_head = self._cumcount_array() < n
head = obj[in_head]
return head
def tail(self, n=5):
"""
Returns last n rows of each group
Essentially equivalent to ``.apply(lambda x: x.tail(n))``,
except ignores as_index flag.
Example
-------
>>> df = DataFrame([[1, 2], [1, 4], [5, 6]],
columns=['A', 'B'])
>>> df.groupby('A', as_index=False).tail(1)
A B
0 1 2
2 5 6
>>> df.groupby('A').head(1)
A B
0 1 2
2 5 6
"""
obj = self._selected_obj
rng = np.arange(0, -self.grouper._max_groupsize, -1, dtype='int64')
in_tail = self._cumcount_array(rng, ascending=False) > -n
tail = obj[in_tail]
return tail
def _cumcount_array(self, arr=None, **kwargs):
"""
arr is where cumcount gets its values from
note: this is currently implementing sort=False (though the default is sort=True)
for groupby in general
"""
ascending = kwargs.pop('ascending', True)
if arr is None:
arr = np.arange(self.grouper._max_groupsize, dtype='int64')
len_index = len(self._selected_obj.index)
cumcounts = np.zeros(len_index, dtype=arr.dtype)
if not len_index:
return cumcounts
indices, values = [], []
for v in self.indices.values():
indices.append(v)
if ascending:
values.append(arr[:len(v)])
else:
values.append(arr[len(v)-1::-1])
indices = np.concatenate(indices)
values = np.concatenate(values)
cumcounts[indices] = values
return cumcounts
def _index_with_as_index(self, b):
"""
Take boolean mask of index to be returned from apply, if as_index=True
"""
# TODO perf, it feels like this should already be somewhere...
from itertools import chain
original = self._selected_obj.index
gp = self.grouper
levels = chain((gp.levels[i][gp.labels[i][b]]
for i in range(len(gp.groupings))),
(original.get_level_values(i)[b]
for i in range(original.nlevels)))
new = MultiIndex.from_arrays(list(levels))
new.names = gp.names + original.names
return new
def _try_cast(self, result, obj):
"""
try to cast the result to our obj original type,
we may have roundtripped thru object in the mean-time
"""
if obj.ndim > 1:
dtype = obj.values.dtype
else:
dtype = obj.dtype
if not np.isscalar(result):
result = _possibly_downcast_to_dtype(result, dtype)
return result
def _cython_agg_general(self, how, numeric_only=True):
output = {}
for name, obj in self._iterate_slices():
is_numeric = is_numeric_dtype(obj.dtype)
if numeric_only and not is_numeric:
continue
try:
result, names = self.grouper.aggregate(obj.values, how)
except AssertionError as e:
raise GroupByError(str(e))
output[name] = self._try_cast(result, obj)
if len(output) == 0:
raise DataError('No numeric types to aggregate')
return self._wrap_aggregated_output(output, names)
def _python_agg_general(self, func, *args, **kwargs):
func = _intercept_function(func)
f = lambda x: func(x, *args, **kwargs)
# iterate through "columns" ex exclusions to populate output dict
output = {}
for name, obj in self._iterate_slices():
try:
result, counts = self.grouper.agg_series(obj, f)
output[name] = self._try_cast(result, obj)
except TypeError:
continue
if len(output) == 0:
return self._python_apply_general(f)
if self.grouper._filter_empty_groups:
mask = counts.ravel() > 0
for name, result in compat.iteritems(output):
# since we are masking, make sure that we have a float object
values = result
if is_numeric_dtype(values.dtype):
values = com.ensure_float(values)
output[name] = self._try_cast(values[mask], result)
return self._wrap_aggregated_output(output)
def _wrap_applied_output(self, *args, **kwargs):
raise NotImplementedError
def _concat_objects(self, keys, values, not_indexed_same=False):
from pandas.tools.merge import concat
if not not_indexed_same:
result = concat(values, axis=self.axis)
ax = self._selected_obj._get_axis(self.axis)
if isinstance(result, Series):
result = result.reindex(ax)
else:
result = result.reindex_axis(ax, axis=self.axis)
elif self.group_keys:
if self.as_index:
# possible MI return case
group_keys = keys
group_levels = self.grouper.levels
group_names = self.grouper.names
result = concat(values, axis=self.axis, keys=group_keys,
levels=group_levels, names=group_names)
else:
# GH5610, returns a MI, with the first level being a
# range index
keys = list(range(len(values)))
result = concat(values, axis=self.axis, keys=keys)
else:
result = concat(values, axis=self.axis)
return result
def _apply_filter(self, indices, dropna):
if len(indices) == 0:
indices = []
else:
indices = np.sort(np.concatenate(indices))
if dropna:
filtered = self._selected_obj.take(indices)
else:
mask = np.empty(len(self._selected_obj.index), dtype=bool)
mask.fill(False)
mask[indices.astype(int)] = True
# mask fails to broadcast when passed to where; broadcast manually.
mask = np.tile(mask, list(self._selected_obj.shape[1:]) + [1]).T
filtered = self._selected_obj.where(mask) # Fill with NaNs.
return filtered
@Appender(GroupBy.__doc__)
def groupby(obj, by, **kwds):
if isinstance(obj, Series):
klass = SeriesGroupBy
elif isinstance(obj, DataFrame):
klass = DataFrameGroupBy
else: # pragma: no cover
raise TypeError('invalid type: %s' % type(obj))
return klass(obj, by, **kwds)
def _get_axes(group):
if isinstance(group, Series):
return [group.index]
else:
return group.axes
def _is_indexed_like(obj, axes):
if isinstance(obj, Series):
if len(axes) > 1:
return False
return obj.index.equals(axes[0])
elif isinstance(obj, DataFrame):
return obj.index.equals(axes[0])
return False
class BaseGrouper(object):
"""
This is an internal Grouper class, which actually holds the generated groups
"""
def __init__(self, axis, groupings, sort=True, group_keys=True):
self.axis = axis
self.groupings = groupings
self.sort = sort
self.group_keys = group_keys
self.compressed = True
@property
def shape(self):
return tuple(ping.ngroups for ping in self.groupings)
def __iter__(self):
return iter(self.indices)
@property
def nkeys(self):
return len(self.groupings)
def get_iterator(self, data, axis=0):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
splitter = self._get_splitter(data, axis=axis)
keys = self._get_group_keys()
for key, (i, group) in zip(keys, splitter):
yield key, group
def _get_splitter(self, data, axis=0):
comp_ids, _, ngroups = self.group_info
return get_splitter(data, comp_ids, ngroups, axis=axis)
def _get_group_keys(self):
if len(self.groupings) == 1:
return self.levels[0]
else:
comp_ids, _, ngroups = self.group_info
# provide "flattened" iterator for multi-group setting
mapper = _KeyMapper(comp_ids, ngroups, self.labels, self.levels)
return [mapper.get_key(i) for i in range(ngroups)]
def apply(self, f, data, axis=0):
mutated = False
splitter = self._get_splitter(data, axis=axis)
group_keys = self._get_group_keys()
# oh boy
f_name = com._get_callable_name(f)
if (f_name not in _plotting_methods and
hasattr(splitter, 'fast_apply') and axis == 0):
try:
values, mutated = splitter.fast_apply(f, group_keys)
return group_keys, values, mutated
except (lib.InvalidApply):
# we detect a mutation of some kind
# so take slow path
pass
except (Exception) as e:
# raise this error to the caller
pass
result_values = []
for key, (i, group) in zip(group_keys, splitter):
object.__setattr__(group, 'name', key)
# group might be modified
group_axes = _get_axes(group)
res = f(group)
if not _is_indexed_like(res, group_axes):
mutated = True
result_values.append(res)
return group_keys, result_values, mutated
@cache_readonly
def indices(self):
""" dict {group name -> group indices} """
if len(self.groupings) == 1:
return self.groupings[0].indices
else:
label_list = [ping.labels for ping in self.groupings]
keys = [_values_from_object(ping.group_index) for ping in self.groupings]
return _get_indices_dict(label_list, keys)
@property
def labels(self):
return [ping.labels for ping in self.groupings]
@property
def levels(self):
return [ping.group_index for ping in self.groupings]
@property
def names(self):
return [ping.name for ping in self.groupings]
def size(self):
"""
Compute group sizes
"""
# TODO: better impl
labels, _, ngroups = self.group_info
bin_counts = algos.value_counts(labels, sort=False)
bin_counts = bin_counts.reindex(np.arange(ngroups))
bin_counts.index = self.result_index
return bin_counts
@cache_readonly
def _max_groupsize(self):
'''
Compute size of largest group
'''
# For many items in each group this is much faster than
# self.size().max(), in worst case marginally slower
if self.indices:
return max(len(v) for v in self.indices.values())
else:
return 0
@cache_readonly
def groups(self):
""" dict {group name -> group labels} """
if len(self.groupings) == 1:
return self.groupings[0].groups
else:
to_groupby = lzip(*(ping.grouper for ping in self.groupings))
to_groupby = Index(to_groupby)
return self.axis.groupby(to_groupby.values)
@cache_readonly
def group_info(self):
comp_ids, obs_group_ids = self._get_compressed_labels()
ngroups = len(obs_group_ids)
comp_ids = com._ensure_int64(comp_ids)
return comp_ids, obs_group_ids, ngroups
def _get_compressed_labels(self):
all_labels = [ping.labels for ping in self.groupings]
if self._overflow_possible:
tups = lib.fast_zip(all_labels)
labs, uniques = algos.factorize(tups)
if self.sort:
uniques, labs = _reorder_by_uniques(uniques, labs)
return labs, uniques
else:
if len(all_labels) > 1:
group_index = get_group_index(all_labels, self.shape)
comp_ids, obs_group_ids = _compress_group_index(group_index)
else:
ping = self.groupings[0]
comp_ids = ping.labels
obs_group_ids = np.arange(len(ping.group_index))
self.compressed = False
self._filter_empty_groups = False
return comp_ids, obs_group_ids
@cache_readonly
def _overflow_possible(self):
return _int64_overflow_possible(self.shape)
@cache_readonly
def ngroups(self):
return len(self.result_index)
@cache_readonly
def result_index(self):
recons = self.get_group_levels()
return MultiIndex.from_arrays(recons, names=self.names)
def get_group_levels(self):
obs_ids = self.group_info[1]
if not self.compressed and len(self.groupings) == 1:
return [self.groupings[0].group_index]
if self._overflow_possible:
recons_labels = [np.array(x) for x in zip(*obs_ids)]
else:
recons_labels = decons_group_index(obs_ids, self.shape)
name_list = []
for ping, labels in zip(self.groupings, recons_labels):
labels = com._ensure_platform_int(labels)
levels = ping.group_index.take(labels)
name_list.append(levels)
return name_list
#------------------------------------------------------------
# Aggregation functions
_cython_functions = {
'add': 'group_add',
'prod': 'group_prod',
'min': 'group_min',
'max': 'group_max',
'mean': 'group_mean',
'median': {
'name': 'group_median'
},
'var': 'group_var',
'first': {
'name': 'group_nth',
'f': lambda func, a, b, c, d: func(a, b, c, d, 1)
},
'last': 'group_last',
'count': 'group_count',
}
_cython_arity = {
'ohlc': 4, # OHLC
}
_name_functions = {}
_filter_empty_groups = True
def _get_aggregate_function(self, how, values):
dtype_str = values.dtype.name
def get_func(fname):
# find the function, or use the object function, or return a
# generic
for dt in [dtype_str, 'object']:
f = getattr(_algos, "%s_%s" % (fname, dtype_str), None)
if f is not None:
return f
return getattr(_algos, fname, None)
ftype = self._cython_functions[how]
if isinstance(ftype, dict):
func = afunc = get_func(ftype['name'])
# a sub-function
f = ftype.get('f')
if f is not None:
def wrapper(*args, **kwargs):
return f(afunc, *args, **kwargs)
# need to curry our sub-function
func = wrapper
else:
func = get_func(ftype)
if func is None:
raise NotImplementedError("function is not implemented for this"
"dtype: [how->%s,dtype->%s]" %
(how, dtype_str))
return func, dtype_str
def aggregate(self, values, how, axis=0):
arity = self._cython_arity.get(how, 1)
vdim = values.ndim
swapped = False
if vdim == 1:
values = values[:, None]
out_shape = (self.ngroups, arity)
else:
if axis > 0:
swapped = True
values = values.swapaxes(0, axis)
if arity > 1:
raise NotImplementedError
out_shape = (self.ngroups,) + values.shape[1:]
if is_numeric_dtype(values.dtype):
values = com.ensure_float(values)
is_numeric = True
out_dtype = 'f%d' % values.dtype.itemsize
else:
is_numeric = issubclass(values.dtype.type, (np.datetime64,
np.timedelta64))
if is_numeric:
out_dtype = 'float64'
values = values.view('int64')
else:
out_dtype = 'object'
values = values.astype(object)
# will be filled in Cython function
result = np.empty(out_shape, dtype=out_dtype)
result.fill(np.nan)
counts = np.zeros(self.ngroups, dtype=np.int64)
result = self._aggregate(result, counts, values, how, is_numeric)
if self._filter_empty_groups:
if result.ndim == 2:
try:
result = lib.row_bool_subset(
result, (counts > 0).view(np.uint8))
except ValueError:
result = lib.row_bool_subset_object(
result, (counts > 0).view(np.uint8))
else:
result = result[counts > 0]
if vdim == 1 and arity == 1:
result = result[:, 0]
if how in self._name_functions:
# TODO
names = self._name_functions[how]()
else:
names = None
if swapped:
result = result.swapaxes(0, axis)
return result, names
def _aggregate(self, result, counts, values, how, is_numeric):
agg_func, dtype = self._get_aggregate_function(how, values)
comp_ids, _, ngroups = self.group_info
if values.ndim > 3:
# punting for now
raise NotImplementedError
elif values.ndim > 2:
for i, chunk in enumerate(values.transpose(2, 0, 1)):
chunk = chunk.squeeze()
agg_func(result[:, :, i], counts, chunk, comp_ids)
else:
agg_func(result, counts, values, comp_ids)
return result
def agg_series(self, obj, func):
try:
return self._aggregate_series_fast(obj, func)
except Exception:
return self._aggregate_series_pure_python(obj, func)
def _aggregate_series_fast(self, obj, func):
func = _intercept_function(func)
if obj.index._has_complex_internals:
raise TypeError('Incompatible index for Cython grouper')
group_index, _, ngroups = self.group_info
# avoids object / Series creation overhead
dummy = obj._get_values(slice(None, 0)).to_dense()
indexer = _algos.groupsort_indexer(group_index, ngroups)[0]
obj = obj.take(indexer, convert=False)
group_index = com.take_nd(group_index, indexer, allow_fill=False)
grouper = lib.SeriesGrouper(obj, func, group_index, ngroups,
dummy)
result, counts = grouper.get_result()
return result, counts
def _aggregate_series_pure_python(self, obj, func):
group_index, _, ngroups = self.group_info
counts = np.zeros(ngroups, dtype=int)
result = None
splitter = get_splitter(obj, group_index, ngroups, axis=self.axis)
for label, group in splitter:
res = func(group)
if result is None:
if (isinstance(res, (Series, Index, np.ndarray)) or
isinstance(res, list)):
raise ValueError('Function does not reduce')
result = np.empty(ngroups, dtype='O')
counts[label] = group.shape[0]
result[label] = res
result = lib.maybe_convert_objects(result, try_float=0)
return result, counts
def generate_bins_generic(values, binner, closed):
"""
Generate bin edge offsets and bin labels for one array using another array
which has bin edge values. Both arrays must be sorted.
Parameters
----------
values : array of values
binner : a comparable array of values representing bins into which to bin
the first array. Note, 'values' end-points must fall within 'binner'
end-points.
closed : which end of bin is closed; left (default), right
Returns
-------
bins : array of offsets (into 'values' argument) of bins.
Zero and last edge are excluded in result, so for instance the first
bin is values[0:bin[0]] and the last is values[bin[-1]:]
"""
lenidx = len(values)
lenbin = len(binner)
if lenidx <= 0 or lenbin <= 0:
raise ValueError("Invalid length for values or for binner")
# check binner fits data
if values[0] < binner[0]:
raise ValueError("Values falls before first bin")
if values[lenidx - 1] > binner[lenbin - 1]:
raise ValueError("Values falls after last bin")
bins = np.empty(lenbin - 1, dtype=np.int64)
j = 0 # index into values
bc = 0 # bin count
# linear scan, presume nothing about values/binner except that it fits ok
for i in range(0, lenbin - 1):
r_bin = binner[i + 1]
# count values in current bin, advance to next bin
while j < lenidx and (values[j] < r_bin or
(closed == 'right' and values[j] == r_bin)):
j += 1
bins[bc] = j
bc += 1
return bins
class BinGrouper(BaseGrouper):
def __init__(self, bins, binlabels, filter_empty=False):
self.bins = com._ensure_int64(bins)
self.binlabels = _ensure_index(binlabels)
self._filter_empty_groups = filter_empty
@cache_readonly
def groups(self):
""" dict {group name -> group labels} """
# this is mainly for compat
# GH 3881
result = {}
for key, value in zip(self.binlabels, self.bins):
if key is not tslib.NaT:
result[key] = value
return result
@property
def nkeys(self):
return 1
def get_iterator(self, data, axis=0):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
if isinstance(data, NDFrame):
slicer = lambda start,edge: data._slice(slice(start,edge),axis=axis)
length = len(data.axes[axis])
else:
slicer = lambda start,edge: data[slice(start,edge)]
length = len(data)
start = 0
for edge, label in zip(self.bins, self.binlabels):
if label is not tslib.NaT:
yield label, slicer(start,edge)
start = edge
if start < length:
yield self.binlabels[-1], slicer(start,None)
def apply(self, f, data, axis=0):
result_keys = []
result_values = []
mutated = False
for key, group in self.get_iterator(data, axis=axis):
object.__setattr__(group, 'name', key)
# group might be modified
group_axes = _get_axes(group)
res = f(group)
if not _is_indexed_like(res, group_axes):
mutated = True
result_keys.append(key)
result_values.append(res)
return result_keys, result_values, mutated
@cache_readonly
def indices(self):
indices = collections.defaultdict(list)
i = 0
for label, bin in zip(self.binlabels, self.bins):
if i < bin:
if label is not tslib.NaT:
indices[label] = list(range(i, bin))
i = bin
return indices
@cache_readonly
def ngroups(self):
return len(self.binlabels)
@cache_readonly
def result_index(self):
mask = self.binlabels.asi8 == tslib.iNaT
return self.binlabels[~mask]
@property
def levels(self):
return [self.binlabels]
@property
def names(self):
return [self.binlabels.name]
@property
def groupings(self):
# for compat
return None
def size(self):
"""
Compute group sizes
"""
base = Series(np.zeros(len(self.result_index), dtype=np.int64),
index=self.result_index)
indices = self.indices
for k, v in compat.iteritems(indices):
indices[k] = len(v)
bin_counts = Series(indices, dtype=np.int64)
result = base.add(bin_counts, fill_value=0)
# addition with fill_value changes dtype to float64
result = result.astype(np.int64)
return result
#----------------------------------------------------------------------
# cython aggregation
_cython_functions = {
'add': 'group_add_bin',
'prod': 'group_prod_bin',
'mean': 'group_mean_bin',
'min': 'group_min_bin',
'max': 'group_max_bin',
'var': 'group_var_bin',
'ohlc': 'group_ohlc',
'first': {
'name': 'group_nth_bin',
'f': lambda func, a, b, c, d: func(a, b, c, d, 1)
},
'last': 'group_last_bin',
'count': 'group_count_bin',
}
_name_functions = {
'ohlc': lambda *args: ['open', 'high', 'low', 'close']
}
_filter_empty_groups = True
def _aggregate(self, result, counts, values, how, is_numeric=True):
agg_func, dtype = self._get_aggregate_function(how, values)
if values.ndim > 3:
# punting for now
raise NotImplementedError
elif values.ndim > 2:
for i, chunk in enumerate(values.transpose(2, 0, 1)):
agg_func(result[:, :, i], counts, chunk, self.bins)
else:
agg_func(result, counts, values, self.bins)
return result
def agg_series(self, obj, func):
dummy = obj[:0]
grouper = lib.SeriesBinGrouper(obj, func, self.bins, dummy)
return grouper.get_result()
class Grouping(object):
"""
Holds the grouping information for a single key
Parameters
----------
index : Index
grouper :
obj :
name :
level :
Returns
-------
**Attributes**:
* indices : dict of {group -> index_list}
* labels : ndarray, group labels
* ids : mapping of label -> group
* counts : array of group counts
* group_index : unique groups
* groups : dict of {group -> label_list}
"""
def __init__(self, index, grouper=None, obj=None, name=None, level=None,
sort=True):
self.name = name
self.level = level
self.grouper = _convert_grouper(index, grouper)
self.index = index
self.sort = sort
self.obj = obj
# right place for this?
if isinstance(grouper, (Series, Index)) and name is None:
self.name = grouper.name
if isinstance(grouper, MultiIndex):
self.grouper = grouper.values
# pre-computed
self._was_factor = False
self._should_compress = True
# we have a single grouper which may be a myriad of things, some of which are
# dependent on the passing in level
#
if level is not None:
if not isinstance(level, int):
if level not in index.names:
raise AssertionError('Level %s not in index' % str(level))
level = index.names.index(level)
inds = index.labels[level]
level_index = index.levels[level]
if self.name is None:
self.name = index.names[level]
# XXX complete hack
if grouper is not None:
level_values = index.levels[level].take(inds)
self.grouper = level_values.map(self.grouper)
else:
self._was_factor = True
# all levels may not be observed
labels, uniques = algos.factorize(inds, sort=True)
if len(uniques) > 0 and uniques[0] == -1:
# handle NAs
mask = inds != -1
ok_labels, uniques = algos.factorize(inds[mask], sort=True)
labels = np.empty(len(inds), dtype=inds.dtype)
labels[mask] = ok_labels
labels[~mask] = -1
if len(uniques) < len(level_index):
level_index = level_index.take(uniques)
self._labels = labels
self._group_index = level_index
self.grouper = level_index.take(labels)
else:
if isinstance(self.grouper, (list, tuple)):
self.grouper = com._asarray_tuplesafe(self.grouper)
# a passed Categorical
elif isinstance(self.grouper, Categorical):
factor = self.grouper
self._was_factor = True
# Is there any way to avoid this?
self.grouper = np.asarray(factor)
self._labels = factor.codes
self._group_index = factor.levels
if self.name is None:
self.name = factor.name
# a passed Grouper like
elif isinstance(self.grouper, Grouper):
# get the new grouper
grouper = self.grouper._get_binner_for_grouping(self.obj)
self.obj = self.grouper.obj
self.grouper = grouper
if self.name is None:
self.name = grouper.name
# no level passed
if not isinstance(self.grouper, (Series, Index, np.ndarray)):
self.grouper = self.index.map(self.grouper)
if not (hasattr(self.grouper, "__len__") and
len(self.grouper) == len(self.index)):
errmsg = ('Grouper result violates len(labels) == '
'len(data)\nresult: %s' %
com.pprint_thing(self.grouper))
self.grouper = None # Try for sanity
raise AssertionError(errmsg)
# if we have a date/time-like grouper, make sure that we have Timestamps like
if getattr(self.grouper,'dtype',None) is not None:
if is_datetime64_dtype(self.grouper):
from pandas import to_datetime
self.grouper = to_datetime(self.grouper)
elif is_timedelta64_dtype(self.grouper):
from pandas import to_timedelta
self.grouper = to_timedelta(self.grouper)
def __repr__(self):
return 'Grouping(%s)' % self.name
def __iter__(self):
return iter(self.indices)
_labels = None
_group_index = None
@property
def ngroups(self):
return len(self.group_index)
@cache_readonly
def indices(self):
return _groupby_indices(self.grouper)
@property
def labels(self):
if self._labels is None:
self._make_labels()
return self._labels
@property
def group_index(self):
if self._group_index is None:
self._make_labels()
return self._group_index
def _make_labels(self):
if self._was_factor: # pragma: no cover
raise Exception('Should not call this method grouping by level')
else:
labels, uniques = algos.factorize(self.grouper, sort=self.sort)
uniques = Index(uniques, name=self.name)
self._labels = labels
self._group_index = uniques
_groups = None
@property
def groups(self):
if self._groups is None:
self._groups = self.index.groupby(self.grouper)
return self._groups
def _get_grouper(obj, key=None, axis=0, level=None, sort=True):
"""
create and return a BaseGrouper, which is an internal
mapping of how to create the grouper indexers.
This may be composed of multiple Grouping objects, indicating
multiple groupers
Groupers are ultimately index mappings. They can originate as:
index mappings, keys to columns, functions, or Groupers
Groupers enable local references to axis,level,sort, while
the passed in axis, level, and sort are 'global'.
This routine tries to figure of what the passing in references
are and then creates a Grouping for each one, combined into
a BaseGrouper.
"""
group_axis = obj._get_axis(axis)
# validate thatthe passed level is compatible with the passed
# axis of the object
if level is not None:
if not isinstance(group_axis, MultiIndex):
if isinstance(level, compat.string_types):
if obj.index.name != level:
raise ValueError('level name %s is not the name of the '
'index' % level)
elif level > 0:
raise ValueError('level > 0 only valid with MultiIndex')
level = None
key = group_axis
# a passed in Grouper, directly convert
if isinstance(key, Grouper):
binner, grouper, obj = key._get_grouper(obj)
if key.key is None:
return grouper, [], obj
else:
return grouper, set([key.key]), obj
# already have a BaseGrouper, just return it
elif isinstance(key, BaseGrouper):
return key, [], obj
if not isinstance(key, (tuple, list)):
keys = [key]
else:
keys = key
# what are we after, exactly?
match_axis_length = len(keys) == len(group_axis)
any_callable = any(callable(g) or isinstance(g, dict) for g in keys)
any_arraylike = any(isinstance(g, (list, tuple, Series, Index, np.ndarray))
for g in keys)
try:
if isinstance(obj, DataFrame):
all_in_columns = all(g in obj.columns for g in keys)
else:
all_in_columns = False
except Exception:
all_in_columns = False
if (not any_callable and not all_in_columns
and not any_arraylike and match_axis_length
and level is None):
keys = [com._asarray_tuplesafe(keys)]
if isinstance(level, (tuple, list)):
if key is None:
keys = [None] * len(level)
levels = level
else:
levels = [level] * len(keys)
groupings = []
exclusions = []
for i, (gpr, level) in enumerate(zip(keys, levels)):
name = None
try:
obj._data.items.get_loc(gpr)
in_axis = True
except Exception:
in_axis = False
if _is_label_like(gpr) or in_axis:
exclusions.append(gpr)
name = gpr
gpr = obj[gpr]
if isinstance(gpr, Categorical) and len(gpr) != len(obj):
errmsg = "Categorical grouper must have len(grouper) == len(data)"
raise AssertionError(errmsg)
ping = Grouping(group_axis, gpr, obj=obj, name=name, level=level, sort=sort)
groupings.append(ping)
if len(groupings) == 0:
raise ValueError('No group keys passed!')
# create the internals grouper
grouper = BaseGrouper(group_axis, groupings, sort=sort)
return grouper, exclusions, obj
def _is_label_like(val):
return isinstance(val, compat.string_types) or np.isscalar(val)
def _convert_grouper(axis, grouper):
if isinstance(grouper, dict):
return grouper.get
elif isinstance(grouper, Series):
if grouper.index.equals(axis):
return grouper.values
else:
return grouper.reindex(axis).values
elif isinstance(grouper, (list, Series, Index, np.ndarray)):
if len(grouper) != len(axis):
raise AssertionError('Grouper and axis must be same length')
return grouper
else:
return grouper
class SeriesGroupBy(GroupBy):
_apply_whitelist = _series_apply_whitelist
def aggregate(self, func_or_funcs, *args, **kwargs):
"""
Apply aggregation function or functions to groups, yielding most likely
Series but in some cases DataFrame depending on the output of the
aggregation function
Parameters
----------
func_or_funcs : function or list / dict of functions
List/dict of functions will produce DataFrame with column names
determined by the function names themselves (list) or the keys in
the dict
Notes
-----
agg is an alias for aggregate. Use it.
Examples
--------
>>> series
bar 1.0
baz 2.0
qot 3.0
qux 4.0
>>> mapper = lambda x: x[0] # first letter
>>> grouped = series.groupby(mapper)
>>> grouped.aggregate(np.sum)
b 3.0
q 7.0
>>> grouped.aggregate([np.sum, np.mean, np.std])
mean std sum
b 1.5 0.5 3
q 3.5 0.5 7
>>> grouped.agg({'result' : lambda x: x.mean() / x.std(),
... 'total' : np.sum})
result total
b 2.121 3
q 4.95 7
See also
--------
apply, transform
Returns
-------
Series or DataFrame
"""
if isinstance(func_or_funcs, compat.string_types):
return getattr(self, func_or_funcs)(*args, **kwargs)
if hasattr(func_or_funcs, '__iter__'):
ret = self._aggregate_multiple_funcs(func_or_funcs)
else:
cyfunc = _intercept_cython(func_or_funcs)
if cyfunc and not args and not kwargs:
return getattr(self, cyfunc)()
if self.grouper.nkeys > 1:
return self._python_agg_general(func_or_funcs, *args, **kwargs)
try:
return self._python_agg_general(func_or_funcs, *args, **kwargs)
except Exception:
result = self._aggregate_named(func_or_funcs, *args, **kwargs)
index = Index(sorted(result), name=self.grouper.names[0])
ret = Series(result, index=index)
if not self.as_index: # pragma: no cover
print('Warning, ignoring as_index=True')
return ret
def _aggregate_multiple_funcs(self, arg):
if isinstance(arg, dict):
columns = list(arg.keys())
arg = list(arg.items())
elif any(isinstance(x, (tuple, list)) for x in arg):
arg = [(x, x) if not isinstance(x, (tuple, list)) else x
for x in arg]
# indicated column order
columns = lzip(*arg)[0]
else:
# list of functions / function names
columns = []
for f in arg:
if isinstance(f, compat.string_types):
columns.append(f)
else:
# protect against callables without names
columns.append(com._get_callable_name(f))
arg = lzip(columns, arg)
results = {}
for name, func in arg:
if name in results:
raise SpecificationError('Function names must be unique, '
'found multiple named %s' % name)
results[name] = self.aggregate(func)
return DataFrame(results, columns=columns)
def _wrap_aggregated_output(self, output, names=None):
# sort of a kludge
output = output[self.name]
index = self.grouper.result_index
if names is not None:
return DataFrame(output, index=index, columns=names)
else:
name = self.name
if name is None:
name = self._selected_obj.name
return Series(output, index=index, name=name)
def _wrap_applied_output(self, keys, values, not_indexed_same=False):
if len(keys) == 0:
# GH #6265
return Series([], name=self.name)
def _get_index():
if self.grouper.nkeys > 1:
index = MultiIndex.from_tuples(keys, names=self.grouper.names)
else:
index = Index(keys, name=self.grouper.names[0])
return index
if isinstance(values[0], dict):
# GH #823
index = _get_index()
return DataFrame(values, index=index).stack()
if isinstance(values[0], (Series, dict)):
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
elif isinstance(values[0], DataFrame):
# possible that Series -> DataFrame by applied function
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
else:
# GH #6265
return Series(values, index=_get_index(), name=self.name)
def _aggregate_named(self, func, *args, **kwargs):
result = {}
for name, group in self:
group.name = name
output = func(group, *args, **kwargs)
if isinstance(output, (Series, Index, np.ndarray)):
raise Exception('Must produce aggregated value')
result[name] = self._try_cast(output, group)
return result
def transform(self, func, *args, **kwargs):
"""
Call function producing a like-indexed Series on each group and return
a Series with the transformed values
Parameters
----------
func : function
To apply to each group. Should return a Series with the same index
Examples
--------
>>> grouped.transform(lambda x: (x - x.mean()) / x.std())
Returns
-------
transformed : Series
"""
# if string function
if isinstance(func, compat.string_types):
return self._transform_fast(lambda : getattr(self, func)(*args, **kwargs))
# do we have a cython function
cyfunc = _intercept_cython(func)
if cyfunc and not args and not kwargs:
return self._transform_fast(cyfunc)
# reg transform
dtype = self._selected_obj.dtype
result = self._selected_obj.values.copy()
wrapper = lambda x: func(x, *args, **kwargs)
for i, (name, group) in enumerate(self):
object.__setattr__(group, 'name', name)
res = wrapper(group)
if hasattr(res, 'values'):
res = res.values
# may need to astype
try:
common_type = np.common_type(np.array(res), result)
if common_type != result.dtype:
result = result.astype(common_type)
except:
pass
indexer = self._get_index(name)
result[indexer] = res
result = _possibly_downcast_to_dtype(result, dtype)
return self._selected_obj.__class__(result,
index=self._selected_obj.index,
name=self._selected_obj.name)
def _transform_fast(self, func):
"""
fast version of transform, only applicable to builtin/cythonizable functions
"""
if isinstance(func, compat.string_types):
func = getattr(self,func)
values = func().values
counts = self.count().values
values = np.repeat(values, com._ensure_platform_int(counts))
# the values/counts are repeated according to the group index
indices = self.indices
# shortcut of we have an already ordered grouper
if Index(self.grouper.group_info[0]).is_monotonic:
result = Series(values, index=self.obj.index)
else:
index = Index(np.concatenate([ indices[v] for v in self.grouper.result_index ]))
result = Series(values, index=index).sort_index()
result.index = self.obj.index
return result
def filter(self, func, dropna=True, *args, **kwargs):
"""
Return a copy of a Series excluding elements from groups that
do not satisfy the boolean criterion specified by func.
Parameters
----------
func : function
To apply to each group. Should return True or False.
dropna : Drop groups that do not pass the filter. True by default;
if False, groups that evaluate False are filled with NaNs.
Example
-------
>>> grouped.filter(lambda x: x.mean() > 0)
Returns
-------
filtered : Series
"""
if isinstance(func, compat.string_types):
wrapper = lambda x: getattr(x, func)(*args, **kwargs)
else:
wrapper = lambda x: func(x, *args, **kwargs)
# Interpret np.nan as False.
def true_and_notnull(x, *args, **kwargs):
b = wrapper(x, *args, **kwargs)
return b and notnull(b)
try:
indices = [self._get_index(name) if true_and_notnull(group) else []
for name, group in self]
except ValueError:
raise TypeError("the filter must return a boolean result")
except TypeError:
raise TypeError("the filter must return a boolean result")
filtered = self._apply_filter(indices, dropna)
return filtered
def _apply_to_column_groupbys(self, func):
""" return a pass thru """
return func(self)
class NDFrameGroupBy(GroupBy):
def _iterate_slices(self):
if self.axis == 0:
# kludge
if self._selection is None:
slice_axis = self.obj.columns
else:
slice_axis = self._selection_list
slicer = lambda x: self.obj[x]
else:
slice_axis = self.obj.index
slicer = self.obj.xs
for val in slice_axis:
if val in self.exclusions:
continue
yield val, slicer(val)
def _cython_agg_general(self, how, numeric_only=True):
new_items, new_blocks = self._cython_agg_blocks(how, numeric_only=numeric_only)
return self._wrap_agged_blocks(new_items, new_blocks)
def _wrap_agged_blocks(self, items, blocks):
obj = self._obj_with_exclusions
new_axes = list(obj._data.axes)
# more kludge
if self.axis == 0:
new_axes[0], new_axes[1] = new_axes[1], self.grouper.result_index
else:
new_axes[self.axis] = self.grouper.result_index
# Make sure block manager integrity check passes.
assert new_axes[0].equals(items)
new_axes[0] = items
mgr = BlockManager(blocks, new_axes)
new_obj = type(obj)(mgr)
return self._post_process_cython_aggregate(new_obj)
_block_agg_axis = 0
def _cython_agg_blocks(self, how, numeric_only=True):
data, agg_axis = self._get_data_to_aggregate()
new_blocks = []
if numeric_only:
data = data.get_numeric_data(copy=False)
for block in data.blocks:
values = block._try_operate(block.values)
if block.is_numeric:
values = com.ensure_float(values)
result, _ = self.grouper.aggregate(values, how, axis=agg_axis)
# see if we can cast the block back to the original dtype
result = block._try_coerce_and_cast_result(result)
newb = make_block(result, placement=block.mgr_locs)
new_blocks.append(newb)
if len(new_blocks) == 0:
raise DataError('No numeric types to aggregate')
return data.items, new_blocks
def _get_data_to_aggregate(self):
obj = self._obj_with_exclusions
if self.axis == 0:
return obj.swapaxes(0, 1)._data, 1
else:
return obj._data, self.axis
def _post_process_cython_aggregate(self, obj):
# undoing kludge from below
if self.axis == 0:
obj = obj.swapaxes(0, 1)
return obj
@cache_readonly
def _obj_with_exclusions(self):
if self._selection is not None:
return self.obj.reindex(columns=self._selection_list)
if len(self.exclusions) > 0:
return self.obj.drop(self.exclusions, axis=1)
else:
return self.obj
@Appender(_agg_doc)
def aggregate(self, arg, *args, **kwargs):
if isinstance(arg, compat.string_types):
return getattr(self, arg)(*args, **kwargs)
result = OrderedDict()
if isinstance(arg, dict):
if self.axis != 0: # pragma: no cover
raise ValueError('Can only pass dict with axis=0')
obj = self._selected_obj
if any(isinstance(x, (list, tuple, dict)) for x in arg.values()):
new_arg = | OrderedDict() | pandas.compat.OrderedDict |
import pandas as pd
import numpy as np
import glob, math
# 合併月營收資料
val = pd.read_excel(r"./Data/營收/上市電子業_營收.xlsx")
val['年份'] = val['年月'].apply(lambda x: x[:4])
val['月份'] = val['年月'].apply(lambda x: int(x[5:]))
val['季'] = val['年月'].apply(lambda x: math.ceil(int(x[5:]) / 3))
val_season = val.groupby(['代號', '年份', '季'])['單月營收(千元)'].count().to_frame('紀錄').reset_index()
val_season = val_season[val_season['紀錄']==3]
save_list = list(zip(val_season['代號'], val_season['年份'], val_season['季']))
new_val = pd.DataFrame()
for x, y, z in save_list:
a = val['代號']==x
b = val['年份']==y
c = val['季']==z
tmp = val[(a)&(b)&(c)]
new_val = pd.concat([new_val, tmp], 0)
season = new_val.groupby(['代號', '年份', '季'])['單月營收(千元)'].sum().to_frame('單季營收(千元)').reset_index()
new_val = pd.merge(new_val, season, on=['代號', '年份', '季'], how='left')
new_val['單季營收成長'] = new_val['單月營收(千元)'].shift(-2)
new_val['單季營收成長'] = (new_val['單季營收成長'] - new_val['單月營收(千元)']) / new_val['單月營收(千元)']
new_val['單季營收成長'] = new_val.apply(lambda r:r['單季營收成長'] if r['月份'] in [1, 4, 7, 10] else None, 1)
new_val = new_val[['代號', '年份', '季', '單季營收(千元)', '單季營收成長']][new_val['單季營收成長'].notnull()]
new_val.to_csv(r'./Data/完整季營收.csv', index=0, header=True, encoding='utf-8')
# 統整TEJ資料
for i, x in enumerate(glob.glob(r"./Data/上市電子族群財報DATA/*.xlsx")):
print(x, end='\r')
if i == 0:
df = pd.read_excel(x)
else:
df1 = | pd.read_excel(x) | pandas.read_excel |
# Library
import pandas as pd
import numpy as np
import datetime as dt
import time,datetime
import math
from math import sin, asin, cos, radians, fabs, sqrt
from geopy.distance import geodesic
from numpy import NaN
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import KFold
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from sklearn.pipeline import Pipeline
import sklearn
from sklearn import tree
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
from sklearn import tree
from sklearn.model_selection import GridSearchCV
from sklearn import metrics
from IPython.display import Image
from sklearn.metrics import classification_report
from sklearn.metrics import precision_recall_curve
import matplotlib.pyplot as plt
import random
from sklearn.ensemble import RandomForestClassifier
import eli5
from eli5.sklearn import PermutationImportance
from matplotlib import pyplot as plt
from pdpbox import pdp, get_dataset, info_plots
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import precision_score,recall_score,f1_score,roc_auc_score,roc_curve
import sys
EARTH_RADIUS=6371
# Common Utilities
def num2date(num):
# Convert eventid in GTD to standard time format
num = str(num)
d = num[:4]+'/'+num[4:6]+'/'+num[6:8]
tmp = dt.datetime.strptime(d, '%Y/%m/%d').date()
return tmp
def num2date_(num):
# Convert time of market data to standard time format
num = str(num)
d = num[:4]+'/'+num[5:7]+'/'+num[8:10]
tmp = dt.datetime.strptime(d, '%Y/%m/%d').date()
return tmp
def get_week_day(date):
day = date.weekday()
return day
def hav(theta):
s = sin(theta / 2)
return s * s
def get_distance_hav(lat0, lng0, lat1, lng1):
# The distance between two points of a sphere is calculated by the haversine formula
# Longitude and latitude convert to radians
lat0 = radians(lat0)
lat1 = radians(lat1)
lng0 = radians(lng0)
lng1 = radians(lng1)
dlng = fabs(lng0 - lng1)
dlat = fabs(lat0 - lat1)
h = hav(dlat) + cos(lat0) * cos(lat1) * hav(dlng)
distance = 2 * EARTH_RADIUS * asin(sqrt(h))
return distance
# Load the population density data - https://sedac.ciesin.columbia.edu/data/set/spatialecon-gecon-v4
def load_eco(filename,country):
basic_ec_file1 = filename
basic_ec = pd.read_excel(basic_ec_file1, country,header=0) # Load the page of Israel
lonlat_list = []
for i in range(basic_ec.shape[0]):
temp = []
temp.append(basic_ec.iloc[i]['LONGITUDE'])
temp.append(basic_ec.iloc[i]['LAT'])
lonlat_list.append(temp)
return lonlat_list
# Make terrorist attack features
def gtd_one_hot(gtd):
# Group the features at daily level
gtd_grouped = gtd.groupby(gtd['Timestamp']).sum()
# Occurrence measure
gtd_grouped['occur_count'] = gtd.groupby(gtd['Timestamp']).size()
# Maintain the max nightlight value each day
gtd_grouped['nightlight'] = gtd.groupby(gtd['Timestamp'])['nightlight'].max()
# Obtain the weekday of certain timestamp
gtd_grouped['week'] = gtd.groupby(gtd['Timestamp'])['week'].mean()
return gtd_grouped
def lag(df,col_name,count):
# Shift the column
for i in range(1,count+1):
df[col_name + '_' + str(i)] = df[col_name].shift(i)
return df
def compute_nl(lon,lat):
# Map certain geographic position to corresponding value of nightlight intensity
round_lon = round((lon+180)*37.5)
round_lat = 6750-round((lat+90)*37.5)
try:
return nl[int(round_lat)][int(round_lon)]
except:
return 0
def contain_or_not(string,list_):
if string in list_:
return 1
else:
return 0
def adjust_week(timestamp,week):
# Adjust the weekend to friday
if week == 5:
return (timestamp+datetime.timedelta(days=2)).strftime("%Y/%m/%d")
elif week == 6:
return (timestamp+datetime.timedelta(days=1)).strftime("%Y/%m/%d")
return timestamp.strftime("%Y/%m/%d")
# Make the market features
def get_market_data(start,end,ref,goal,host,user,password,db):
con = pymysql.connect(host,user,password,db, charset='utf8' )
# Reference Index
cmd1 = "select * from " + ref + " where Timestamp >= " + start + ' and Timestamp <= ' + end
ref_df = pd.read_sql(cmd1, con)
#Goal Index
cmd2 = "select * from " + goal + " where Timestamp >= " + start + ' and Timestamp <= ' + end
goal_df = pd.read_sql(cmd2, con)
return ref_df,goal_df
def get_diff(origin_md,name):
md = origin_md.copy()
str1 = 'logdiff_' + name
str2 = 'twologdiff_' + name
md['close_shift1'] = md['Trade Close'].shift(1)
md['onediff'] = md['Trade Close'].diff()
md['open_shift_minus1'] = md['Trade Open'].shift(-1)
md['twodiff'] = md['open_shift_minus1']-md['close_shift1']
md = md.dropna()
md[str1] = md['onediff']/md['close_shift1'] < 0
md[str2] = md['twodiff']/md['close_shift1'] < 0
md_onediff = pd.DataFrame(md,columns = ['Timestamp',str1]).dropna()
md_twodiff = pd.DataFrame(md,columns = ['Timestamp',str2]).dropna()
return md_onediff,md_twodiff
# Merge terrorist attack features and market features
def diff_merge(gtd_grouped,diff_list):
for i in range(1,len(diff_list)):
diff_feature = pd.merge(diff_list[i-1],diff_list[i],on='Timestamp')
diff_feature = diff_feature.dropna()
diff_feature = | pd.merge(gtd_grouped,diff_feature,on='Timestamp',how='right') | pandas.merge |
import pandas as pd
import numpy
class DataSplitter:
@classmethod
def split_to_x_and_y(self, data, timesteps):
x, y = [], []
for i in range(len(data) - timesteps):
x.append(data.iloc[i:(i + timesteps)].drop('date', axis=1).as_matrix())
y.append([data.iloc[i + timesteps]['nikkei']])
return numpy.array(x), numpy.array(y)
@classmethod
def split_to_train_val_test(self, data):
train_start = | pd.to_datetime('2003-01-22') | pandas.to_datetime |
import distutils
import sys
import subprocess
import re
import os
import difflib
from functools import wraps
from pkg_resources import resource_filename
from io import StringIO
from collections import namedtuple
from contextlib import contextmanager
import numpy
import pandas
import pytest
def get_img_tolerance():
return int(os.environ.get("MPL_IMGCOMP_TOLERANCE", 15))
def seed(func):
""" Decorator to seed the RNG before any function. """
@wraps(func)
def wrapper(*args, **kwargs):
numpy.random.seed(0)
return func(*args, **kwargs)
return wrapper
def raises(error):
"""Wrapper around pytest.raises to support None."""
if error:
return pytest.raises(error)
else:
@contextmanager
def not_raises():
try:
yield
except Exception as e:
raise e
return not_raises()
def requires(module, modulename):
def outer_wrapper(function):
@wraps(function)
def inner_wrapper(*args, **kwargs):
if module is None:
raise RuntimeError(
"{} required for `{}`".format(modulename, function.__name__)
)
else:
return function(*args, **kwargs)
return inner_wrapper
return outer_wrapper
@seed
def make_dc_data(ndval="ND", rescol="res", qualcol="qual"):
dl_map = {
"A": 0.1,
"B": 0.2,
"C": 0.3,
"D": 0.4,
"E": 0.1,
"F": 0.2,
"G": 0.3,
"H": 0.4,
}
index = pandas.MultiIndex.from_product(
[
list("ABCDEFGH"),
list("1234567"),
["GA", "AL", "OR", "CA"],
["Inflow", "Outflow", "Reference"],
],
names=["param", "bmp", "state", "loc"],
)
array = numpy.random.lognormal(mean=0.75, sigma=1.25, size=len(index))
data = pandas.DataFrame(data=array, index=index, columns=[rescol])
data["DL"] = data.apply(lambda r: dl_map.get(r.name[0]), axis=1)
data[rescol] = data.apply(
lambda r: dl_map.get(r.name[0]) if r[rescol] < r["DL"] else r[rescol], axis=1
)
data[qualcol] = data.apply(lambda r: ndval if r[rescol] <= r["DL"] else "=", axis=1)
return data
@seed
def make_dc_data_complex(dropsome=True):
dl_map = {
"A": 0.25,
"B": 0.50,
"C": 0.10,
"D": 1.00,
"E": 0.25,
"F": 0.50,
"G": 0.10,
"H": 1.00,
}
index = pandas.MultiIndex.from_product(
[
list("ABCDEFGH"),
list("1234567"),
["GA", "AL", "OR", "CA"],
["Inflow", "Outflow", "Reference"],
],
names=["param", "bmp", "state", "loc"],
)
xtab = (
| pandas.DataFrame(index=index, columns=["res"]) | pandas.DataFrame |
import pandas as pd
import numpy as np
df = | pd.read_csv('datasets_672162_1182853_dataset.csv') | pandas.read_csv |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 15 10:31:23 2017
@author: robertmarsland
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import subprocess
import os
import pickle
import datetime
from sklearn.decomposition import PCA
StateData = ['ACI', 'ACII', 'CIATP', 'CIIATP', 'pU', 'pT', 'pD', 'pS']
def FormatPath(folder):
if folder==None:
folder=''
else:
if folder != '':
if folder[-1] != '/':
folder = folder+'/'
return folder
def LoadData(name, folder = None, suffix = '.dat'):
folder = FormatPath(folder)
col_ind = list(range(22))
del col_ind[5]
return pd.read_table(folder+name+suffix,index_col=0,usecols=col_ind)
def RunModel(paramdict = {}, name = 'data', default = 'default.par', folder = None, extra_mem = False):
if folder != None:
cwd = os.getcwd()
os.chdir(folder)
linelist = []
with open(default) as f:
for line in f:
for item in paramdict:
if line[:len(item)] == item:
line = item + ' ' + str(paramdict[item]) + '\n'
if line[:15] == 'output_filename':
line = 'output_filename ' + name + '\n'
linelist.append(line)
with open(name + '.par','w') as f:
for line in linelist:
f.write(line)
if extra_mem:
subprocess.check_call('ulimit -s 65532; ./KMCKaiC ' + name + '.par', shell = True)
else:
subprocess.check_call('./KMCKaiC ' + name + '.par', shell = True)
if folder != None:
os.chdir(cwd)
return LoadData(name, folder=folder)
else:
return LoadData(name)
def Current(data,species):
J = [0]
t = [data.index[0]]
center = [np.mean(data[species[0]]),np.mean(data[species[1]])]
values = [[],[]]
for k in range(len(data)-1):
if data[species[0]].iloc[k] < center[0] and data[species[0]].iloc[k+1] < center[0]:
if data[species[1]].iloc[k] <= center[1] and data[species[1]].iloc[k+1] > center[1]:
J.append(J[-1]-1)
t.append(data.index[k])
values[0].append(data[species[0]].iloc[k])
values[1].append(data[species[1]].iloc[k])
if data[species[1]].iloc[k] > center[1] and data[species[1]].iloc[k+1] <= center[1]:
J.append(J[-1]+1)
t.append(data.index[k])
values[0].append(data[species[0]].iloc[k])
values[1].append(data[species[1]].iloc[k])
J = np.asarray(J,dtype=int)
t = np.asarray(t,dtype=float)
T = np.nan
if len(J) > 1:
if J[-1]>J[1]:
T = (t[-1]-t[1])/(J[-1]-J[1])
return t, J, T, center
def Current_PCA(data,center=[1,0]):
J = [0]
t = [data.index[0]]
values = [[],[]]
data_PCA = PCA(n_components=2).fit_transform(data[StateData])
for k in range(len(data_PCA)-1):
if data_PCA[k,1] >= center[1] and data_PCA[k+1,1] >= center[1]:
if data_PCA[k,0] <= center[0] and data_PCA[k+1,0] > center[0]:
J.append(J[-1]-1)
t.append(data.index[k])
values[0].append(data_PCA[k,0])
values[1].append(data_PCA[k,1])
if data_PCA[k,0] > center[0] and data_PCA[k+1,0] <= center[0]:
J.append(J[-1]+1)
t.append(data.index[k])
values[0].append(data_PCA[k,0])
values[1].append(data_PCA[k,1])
J = np.asarray(J,dtype=int)
t = np.asarray(t,dtype=float)
T = np.nan
if len(J) > 1:
if J[-1] < 0:
J = -J
if J[-1]>J[1]:
T = (t[-1]-t[1])/(J[-1]-J[1])
return t, J, T
def EntropyRate(data,name='data',folder=None):
NA = 6.02e23
conv = 1e-21
ATPcons_hex = (data['CIATPcons'].iloc[-1] + data['CIIATPcons'].iloc[-1] -
data['CIATPcons'].iloc[0] - data['CIIATPcons'].iloc[0])
ATPcons = (6*conv*NA*FindParam('volume',name,folder=folder)*
FindParam('KaiC0',name,folder=folder)*ATPcons_hex)
return (FindParam('Delmu',name,folder=folder)*ATPcons/
(data.index[-1]-data.index[0]))
def FirstPassageSingleTraj(t,J):
tau_list = []
for k in range(2,max(J)+1):
tau_list.append(t[np.where(J>=k)[0][0]]-t[np.where(J>=k-1)[0][0]])
return tau_list
def FindParam(param,par_file,folder=None):
folder = FormatPath(folder)
if param == 'Delmu':
paramdict = {}
with open(folder+par_file+'.par') as f:
for line in f:
words = line.split()
if words != []:
if words[0] in ['Khyd','ATPfrac','Piconc']:
paramdict[words[0]] = float(words[1])
return np.log(paramdict['Khyd']/paramdict['Piconc']) + np.log(1/((1/paramdict['ATPfrac'])-1))
else:
with open(folder+par_file+'.par') as f:
for line in f:
words = line.split()
if words != []:
if words[0] == param:
return float(words[1])
def EntropyProduction(data,name='data'):
NA = 6.02e23
conv = 1e-21
ATPcons = 6*conv*NA*FindParam('volume',name)*FindParam('KaiC0',name)*(data['CIATPcons'] + data['CIIATPcons'])
return FindParam('Delmu',name)*ATPcons
def Ensemble(paramdict,ns,species=['pT','pS'],folder=None,run_number=1):
results = []
Tvec = []
Sdotvec = []
path = FormatPath(folder)
date = str(datetime.datetime.now()).split()[0]
name = '_'.join([str(run_number),date])
filename = path + 'RawData_' + name + '.dat'
for k in range(ns):
paramdict['rnd_seed'] = np.random.rand()*1000000
data = None
count = 0
while data is None and count < 10:
try:
datname = 'data_'+str(np.random.randint(1000000))
data = RunModel(paramdict=paramdict,name=datname,folder=folder)
except:
subprocess.check_call('rm -f '+path+datname+'.par', shell = True)
count += 1
assert data is not None, 'KMCKaiC failed to run.'
t, J, T_new, center = Current(data,species)
Sdot_new = EntropyRate(data,name=datname,folder=folder)
Tvec.append(T_new)
Sdotvec.append(Sdot_new)
results.append({'t': t, 'J': J})
subprocess.check_call('rm -f '+'\''+path+datname+'.dat'+'\'', shell = True)
subprocess.check_call('rm -f '+'\''+path+datname+'.par'+'\'', shell = True)
T = np.nanmean(Tvec)
Sdot = np.nanmean(Sdotvec)
with open(filename,'wb') as f:
pickle.dump([results,T,Sdot],f)
def Ensemble_PCA(paramdict,ns,folder=None,run_number=1):
results = []
Tvec = []
Sdotvec = []
path = FormatPath(folder)
date = str(datetime.datetime.now()).split()[0]
name = '_'.join([str(run_number),date])
filename = path + 'RawData_' + name + '.dat'
for k in range(ns):
paramdict['rnd_seed'] = np.random.rand()*1000000
data = None
count = 0
while data is None and count < 10:
try:
datname = 'data_'+str(np.random.randint(1000000))
data = RunModel(paramdict=paramdict,name=datname,folder=folder)
except:
subprocess.check_call('rm -f '+path+datname+'.par', shell = True)
count += 1
assert data is not None, 'KMCKaiC failed to run.'
t, J, T_new = Current_PCA(data)
Sdot_new = EntropyRate(data,name=datname,folder=folder)
Tvec.append(T_new)
Sdotvec.append(Sdot_new)
results.append({'t': t, 'J': J})
subprocess.check_call('rm -f '+'\''+path+datname+'.dat'+'\'', shell = True)
subprocess.check_call('rm -f '+'\''+path+datname+'.par'+'\'', shell = True)
T = np.nanmean(Tvec)
Sdot = np.nanmean(Sdotvec)
with open(filename,'wb') as f:
pickle.dump([results,T,Sdot],f)
def FirstPassage(results,Ncyc = 1,all=False):
tau = []
if all:
for item in results:
tau = tau + FirstPassageSingleTraj(item['t'],item['J'])
else:
for item in results:
inds1 = np.where(item['J'] >= 1)[0]
inds2 = np.where(item['J'] >= 1+Ncyc)[0]
if len(inds1) != 0 and len(inds2) != 0:
t1 = item['t'][inds1[0]]
t2 = item['t'][inds2[0]]
tau.append(t2-t1)
else:
tau.append(np.nan)
return tau
def LoadExperiment(param_name,run_numbers,date,folder='data'):
folder = FormatPath(folder)
name = '_'.join([param_name,str(run_numbers[0]),date])
filename1 = folder + 'FirstPassageData_' + name + '.csv'
filename2 = folder + 'Sdot_' + name + '.csv'
filename3 = folder + 'AllData_' + name + '.dat'
tau=pd.read_csv(filename1,index_col=0)
Sdot=pd.read_csv(filename2,index_col=0)
with open(filename3,'rb') as f:
results=pickle.load(f)
for run_number in run_numbers[1:]:
name = '_'.join([param_name,str(run_number),date])
filename1 = folder + 'FirstPassageData_' + name + '.csv'
filename2 = folder + 'Sdot_' + name + '.csv'
filename3 = folder + 'AllData_' + name + '.dat'
tau = tau.join(pd.read_csv(filename1,index_col=0))
Sdot = Sdot.join(pd.read_csv(filename2,index_col=0))
with open(filename3,'rb') as f:
results_new=pickle.load(f)
results.update(results_new)
return tau, Sdot, results
def RunExperiment(vol = 0.5, param_val = 25, param_name = 'Delmu', ens_size = 5, CIIhyd = True,
sample_cnt = 3e6, code_folder = None, run_number = 1, use_PCA = False):
paramdict = {}
paramdict['volume'] = vol
paramdict['sample_cnt'] = sample_cnt
paramdict['tequ'] = 50
if not CIIhyd:
paramdict['kCIIhyd0'] = 0.1
if param_name == 'Delmu':
paramdict['Khyd'] = (np.exp(param_val)*FindParam('Piconc','default',folder=code_folder)*
((1/FindParam('ATPfrac','default',folder=code_folder))-1))
else:
paramdict[param_name] = param_val
if use_PCA:
Ensemble_PCA(paramdict,ens_size,folder=code_folder,run_number=run_number)
else:
Ensemble(paramdict,ens_size,folder=code_folder,run_number=run_number)
def ProcessExperiment(run_number = 1, date = str(datetime.datetime.now()).split()[0], all = False,
param_name = 'Delmu', param_val = 20, folder = 'data', code_folder = None, Ncyc = 30):
if all:
Ncyc = 1
folder = FormatPath(folder)
code_folder = FormatPath(code_folder)
filename0 = code_folder + 'RawData_' + '_'.join([str(run_number),date]) + '.dat'
name = '_'.join([param_name,str(run_number),date])
filename1 = folder + 'FirstPassageData_' + name + '.csv'
filename2 = folder + 'Sdot_' + name + '.csv'
filename3 = folder + 'AllData_' + name + '.dat'
keyname = param_name + ' = ' + str(param_val)
results = {}
tau = {}
Sdot = {}
with open(filename0,'rb') as f:
results[keyname], T, Sdot[keyname] = pickle.load(f)
tau[keyname] = FirstPassage(results[keyname],Ncyc=Ncyc,all=all)
tau = pd.DataFrame.from_dict(tau)
tau.to_csv(filename1)
Sdot = | pd.DataFrame.from_dict(Sdot,orient='index') | pandas.DataFrame.from_dict |
"""
Python script for all analysis
"""
import pandas as pd
from _variable_definitions import *
from _optimization import optimization
from _parameter_calculations import *
from _file_import_optimization import *
import datetime
from _utils import *
# ---------------------------------------------------------------------------------------------------------------------
# generating sequence of node visits
# ---------------------------------------------------------------------------------------------------------------------
dist_max = segments_gdf.length.sum() / 3
# direction = 0
path_dictionaries = {}
seg_ids = segments_gdf.ID.to_list()
for id in seg_ids:
direction = 0
name = str(id) + "_" + str(direction) + "_0"
segm_tree = {name: Node((id, direction))}
unfiltered_path = get_children_from_seg(
id,
name,
direction,
segments_gdf,
links_gdf,
pois_df,
segm_tree,
0,
dist_max,
stop_then=False,
)
path = filter_path(unfiltered_path, segments_gdf, name)
path_dictionaries[str(id) + "_" + str(direction)] = path
print(id, direction, "done")
a_file = open("data/path_data.pkl", "wb")
pickle.dump(path_dictionaries, a_file)
a_file.close()
# a big dictionary with a dict for each seg_id and direction
path_dictionaries = {}
seg_ids = segments_gdf.ID.to_list()
for id in seg_ids:
direction = 1
name = str(id) + "_" + str(direction) + "_0"
segm_tree = {name: Node((id, direction))}
unfiltered_path = get_children_from_seg(
id,
name,
direction,
segments_gdf,
links_gdf,
pois_df,
segm_tree,
0,
dist_max,
stop_then=False,
)
path = filter_path(unfiltered_path, segments_gdf, name)
path_dictionaries[str(id) + "_" + str(direction)] = path
print(id, direction, "done")
a_file = open("data/path_data_2.pkl", "wb")
pickle.dump(path_dictionaries, a_file)
a_file.close()
# ---------------------------------------------------------------------------------------------------------------------
# SCENARIO calculation
# ---------------------------------------------------------------------------------------------------------------------
scenario_file = pd.read_csv("scenarios/scenario_parameters.csv")
l = len(scenario_file)
names = scenario_file["scenario name"].to_list()
etas = scenario_file["eta"].to_list()
mus = scenario_file["mu"].to_list()
gamma_hs = scenario_file["gamma_h"].to_list()
a_s = scenario_file["a"].to_list()
specific_demands = scenario_file["specific_demand"]
cxs = scenario_file["cx"]
cys = scenario_file["cy"]
dist_ranges = scenario_file["dist_range"]
p_max_bevs = scenario_file["p_max_bev"]
pole_peak_cap = 350
output_file = pd.DataFrame()
existing_infr["installed_infrastructure"] = existing_infr["350kW"]
existing_infr_0, existing_infr_1 = split_by_dir(existing_infr, "dir", reindex=True)
for ij in range(0, l):
scenario_name = names[ij]
if not etas[ij] >= 0:
eta = default_eta
else:
eta = etas[ij]
if not cxs[ij] >= 0:
cx = default_cx
else:
cx = cxs[ij]
if not cys[ij] >= 0:
cy = default_cy
else:
cy = cys[ij]
if not dist_ranges[ij] >= 0:
dist_range = default_dmax
else:
dist_range = dist_ranges[ij]
if not mus[ij] >= 0:
mu = default_mu
else:
mu = mus[ij]
if not gamma_hs[ij] >= 0:
gamma_h = default_gamma_h
else:
gamma_h = gamma_hs[ij]
if not a_s[ij] >= 0:
a = default_a
else:
a = a_s[ij]
if not p_max_bevs[ij] >= 0:
p_max_bev = default_charging_capacity
else:
p_max_bev = p_max_bevs[ij]
if not specific_demands[ij] >= 0:
specific_demand = default_specific_demand
else:
specific_demand = specific_demands[ij]
nb_cs, nb_poles, costs, non_covered_energy, perc_not_charged = optimization(
dir,
dir_0,
dir_1,
segments_gdf,
links_gdf,
cx,
cy,
calculate_dist_max(dist_range),
eta / 100,
default_acc,
mu,
gamma_h,
a,
p_max_bev,
pole_peak_cap,
specific_demand,
True,
False,
existing_infr_0,
existing_infr_1,
0,
scenario_name,
path_res="scenarios/results/"
)
output_file = output_file.append(
{
"nb_cs": nb_cs,
"nb_poles": nb_poles,
"costs": costs,
"non_covered_energy": non_covered_energy,
"perc_not_charged": perc_not_charged,
"datetime_of_calculation": datetime.datetime.now(),
},
ignore_index=True,
)
output_file.to_csv("scenarios/scenario_results.csv")
# ---------------------------------------------------------------------------------------------------------------------
# REDUCTION POTENTIALS
# ---------------------------------------------------------------------------------------------------------------------
existing_infr["installed_infrastructure"] = existing_infr["350kW"]
existing_infr_0, existing_infr_1 = split_by_dir(existing_infr, "dir", reindex=True)
scenario_file = | pd.read_csv('scenarios/scenario_parameters.csv') | pandas.read_csv |
"""
Views and helper functions for downloading analyses.
"""
import tempfile
import openpyxl
from openpyxl.worksheet.hyperlink import Hyperlink
from openpyxl.styles import PatternFill, Border, Side, Alignment, Protection, Font
from django.http import HttpResponse, HttpResponseForbidden, HttpResponseBadRequest
from django.core.files.storage import default_storage
from django.contrib.contenttypes.models import ContentType
import pandas as pd
import io
import pickle
import numpy as np
import zipfile
import os.path
import textwrap
from .models import Analysis
from .views import CARD_VIEW_FLAVORS
from .utils import mangle_sheet_name
from ..manager.models import Surface
#######################################################################
# Download views
#######################################################################
def download_analyses(request, ids, card_view_flavor, file_format):
"""Returns a file comprised from analyses results.
:param request:
:param ids: comma separated string with analyses ids
:param card_view_flavor: card view flavor, see CARD_VIEW_FLAVORS
:param file_format: requested file format
:return:
"""
#
# Check permissions and collect analyses
#
user = request.user
if not user.is_authenticated:
return HttpResponseForbidden()
analyses_ids = [int(i) for i in ids.split(',')]
analyses = []
surface_ct = ContentType.objects.get_for_model(Surface)
for aid in analyses_ids:
analysis = Analysis.objects.get(id=aid)
#
# Check whether user has view permission for requested analysis
#
if not analysis.is_visible_for_user(user):
return HttpResponseForbidden()
#
# Exclude analysis for surfaces having only one topography
# (this is useful for averages - the only surface analyses so far -
# and may be controlled by other means later, if needed)
#
if (analysis.subject_type == surface_ct) and (analysis.subject.num_topographies() <= 1):
continue
analyses.append(analysis)
#
# Check flavor and format argument
#
card_view_flavor = card_view_flavor.replace('_', ' ') # may be given with underscore in URL
if card_view_flavor not in CARD_VIEW_FLAVORS:
return HttpResponseBadRequest("Unknown card view flavor '{}'.".format(card_view_flavor))
download_response_functions = {
('plot', 'xlsx'): download_plot_analyses_to_xlsx,
('plot', 'txt'): download_plot_analyses_to_txt,
('roughness parameters', 'xlsx'): download_roughness_parameters_to_xlsx,
('roughness parameters', 'txt'): download_roughness_parameters_to_txt,
('contact mechanics', 'zip'): download_contact_mechanics_analyses_as_zip,
}
#
# Dispatch
#
key = (card_view_flavor, file_format)
if key not in download_response_functions:
return HttpResponseBadRequest(
"Cannot provide a download for card view flavor {} in file format ".format(card_view_flavor))
return download_response_functions[key](request, analyses)
def _analyses_meta_data_dataframe(analyses, request):
"""Generates a pandas.DataFrame with meta data about analyses.
Parameters
----------
analyses:
sequence of Analysis instances
request:
current request
Returns
-------
pandas.DataFrame, can be inserted as extra sheet
"""
properties = []
values = []
for i, analysis in enumerate(analyses):
surface = analysis.related_surface
pub = surface.publication if surface.is_published else None
if i == 0:
# list function name and a blank line
properties = ["Function", ""]
values = [str(analysis.function), ""]
properties += ['Subject Type', 'Subject Name',
'Creator',
'Further arguments of analysis function', 'Start time of analysis task',
'End time of analysis task', 'Duration of analysis task']
values += [str(analysis.subject.get_content_type().model), str(analysis.subject.name),
str(analysis.subject.creator),
analysis.get_kwargs_display(), str(analysis.start_time),
str(analysis.end_time), str(analysis.duration())]
if analysis.configuration is None:
properties.append("Versions of dependencies")
values.append("Unknown. Please recalculate this analysis in order to have version information here.")
else:
versions_used = analysis.configuration.versions.order_by('dependency__import_name')
for version in versions_used:
properties.append(f"Version of '{version.dependency.import_name}'")
values.append(f"{version.number_as_string()}")
if pub:
# If the surface of the topography was published, the URL is inserted
properties.append("Publication URL (surface data)")
values.append(request.build_absolute_uri(pub.get_absolute_url()))
# We want an empty line on the properties sheet in order to distinguish the topographies
properties.append("")
values.append("")
df = | pd.DataFrame({'Property': properties, 'Value': values}) | pandas.DataFrame |
#!/usr/bin/env python
from __future__ import print_function
import matplotlib as mpl
import matplotlib.pyplot as plt
import pandas as pd
pd.options.mode.chained_assignment = None
import numpy as np
import numpy.ma as ma
import numpy.random as rd
from numpy import inf
from scipy.stats import norm
from scipy.special import erfinv
import progressbar
import gc
import cv2
import os
import glob
import json
import argparse
import math as m
import copy
ap = argparse.ArgumentParser()
ap.add_argument("-d", "--dataset", required=True,
help="path to input dataset")
ap.add_argument("-s", "--target-size", required=True,
help="JSON file encoding target size distributions")
ap.add_argument("-c", "--target-colour", required=True,
help="JSON file encoding target colour distributions")
ap.add_argument("-o", "--outdir", default="OUT",
help="path to the output directory [OUT]")
ap.add_argument("-l", "--labels", default=[], nargs='+',
help="list of allowed class labels []")
ap.add_argument("-i", "--imageHeight", type=int, default=1080,
help="height of output images, in pixels [1080].")
ap.add_argument("-r", "--resolveLim", type=float, default=2.0,
help="limiting over-resolution factor for zooms [2].")
ap.add_argument("-p", "--save-plots", action="store_true",
help="save plots of the shift for each class [no]")
ap.add_argument("-D", "--debug", action="store_true",
help="debug mode (annotates full field)")
ap.add_argument("-f", "--file", default="",
help="filename or partial to filter on [none]")
ap.add_argument("-v", "--verbose", action="store_true",
help="print messages rather than a progress bar")
args = vars(ap.parse_args())
def main():
annFile = os.path.join(args['dataset'], "BOXES.csv")
print("[INFO] reading {}".format(annFile))
annTab = pd.read_csv(annFile, header=None, skipinitialspace=True,
names=["imageName","x1","y1","x2","y2","label",
"nXpix", "nYpix", "date", "location", "qual"])
if os.path.exists(args["target_size"]):
with open(args["target_size"], 'r') as FH:
sizeDistDict = json.load(FH)
if os.path.exists(args["target_colour"]):
with open(args["target_colour"], 'r') as FH:
colDistDict = json.load(FH)
inFile = os.path.join(args["dataset"], "colours_by_box.hdf5")
colTab = | pd.read_hdf(inFile, "colourTab") | pandas.read_hdf |
import numpy as np
from pandas.tseries.holiday import USFederalHolidayCalendar
import datetime
import pandas as pd
def mape(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
def rmse(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.sqrt(np.mean((y_true - y_pred) ** 2))
def DR_Temp_data_cleaning(dataframe):
'''
inplace change of the dataframe, for the structure purpose, return this dataframe
'''
dataframe['Date'] = pd.to_datetime(dataframe['Date'])
test = dataframe[
['Date', 'Hour', 'Weekday', 'Month', 'Load', 'Mean_Temp', 'Mean_Humi', 'RIV_Temp', 'RIV_Humi', 'LAX_Temp',
'LAX_Humi', 'USC_Temp', 'USC_Humi', 'WJF_Temp', 'WJF_Humi', 'TRM_Temp', 'TRM_Humi']]
test.loc[:, 'RIV_Temp_Log'] = np.log(dataframe['RIV_Temp'])
test.loc[:, 'LAX_Temp_Log'] = np.log(dataframe['LAX_Temp'])
test.loc[:, 'USC_Temp_Log'] = np.log(dataframe['USC_Temp'])
test.loc[:, 'WJF_Temp_Log'] = np.log(dataframe['WJF_Temp'])
test.loc[:, 'TRM_Temp_Log'] = np.log(dataframe['TRM_Temp'])
test.loc[:, 'Load_Log'] = np.log(dataframe['Load'])
test['Load_Lag_48'] = test['Load_Log'].shift(48, axis=0)
test['Humi_Lag_48'] = test['Mean_Humi'].shift(48, axis=0)
test['RIV_Temp_Log_Lag_48'] = test['RIV_Temp_Log'].shift(48, axis=0)
test['LAX_Temp_Log_Lag_48'] = test['LAX_Temp_Log'].shift(48, axis=0)
test['USC_Temp_Log_Lag_48'] = test['USC_Temp_Log'].shift(48, axis=0)
test['WJF_Temp_Log_Lag_48'] = test['WJF_Temp_Log'].shift(48, axis=0)
test['TRM_Temp_Log_Lag_48'] = test['TRM_Temp_Log'].shift(48, axis=0)
cal = USFederalHolidayCalendar()
holidays = cal.holidays(start='2014-01-01', end=str(datetime.datetime.now()), return_name=True)
holidays = pd.DataFrame(holidays)
holidays = holidays.reset_index()
holidays = holidays.rename(columns={'index': "Date", 0: 'Holiday'})
holidays['Date'] = pd.to_datetime(holidays['Date'])
test['Date'] = pd.to_datetime(test['Date'])
lm_data = test.loc[49:len(test), ].merge(holidays, how='left', on='Date')
lm_data['Holiday'] = lm_data['Holiday'].fillna("Not Holiday")
lm_data[["Hour", "Weekday", "Month", "Holiday"]] = lm_data[["Hour", "Weekday", "Month", "Holiday"]].astype(
'category')
DateTime = pd.DataFrame(
lm_data.apply(lambda line: pd.to_datetime(line['Date']) + datetime.timedelta(hours=line['Hour']), axis=1))
DateTime.columns = ['DateTime']
lm_data = | pd.concat([DateTime, lm_data], axis=1) | pandas.concat |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
# In[2]:
df=pd.read_csv('car_data.csv')
# In[3]:
df.head()
# In[5]:
df.shape
# In[6]:
print(df['Seller_Type'].unique())
# In[26]:
print(df['Transmission'].unique())
print(df['Owner'].unique())
print(df['Fuel_Type'].unique())
# In[8]:
# check missing or null values
df.isnull().sum()
# In[9]:
df.describe()
# In[11]:
df.columns
# In[12]:
final_dataset=df[['Year', 'Selling_Price', 'Present_Price', 'Kms_Driven','Fuel_Type', 'Seller_Type', 'Transmission', 'Owner']]
# In[13]:
final_dataset.head()
# In[14]:
final_dataset['Current_Year']=2020
# In[15]:
final_dataset.head()
# In[16]:
final_dataset['no_of_year']=final_dataset['Current_Year']-final_dataset['Year']
# In[17]:
final_dataset.head()
# In[19]:
final_dataset.drop(['Year'],axis=1,inplace=True)
# In[20]:
final_dataset.head()
# In[21]:
final_dataset.drop(['Current_Year'],axis=1,inplace=True)
# In[22]:
final_dataset.head()
# In[30]:
final_dataset= | pd.get_dummies(final_dataset,drop_first=True) | pandas.get_dummies |
"""Defines the base classes to be extended by specific types of models."""
import sys
from os import makedirs
from os.path import join, exists, dirname, splitext, basename
import logging
from glob import glob
import multiprocessing as mp
from collections import defaultdict
from typing import Any, Dict, Tuple, List
from tqdm import tqdm
import numpy as np
import pandas as pd
from natsort import natsorted
import torch
from torch.utils.data import DataLoader
import torch.nn.functional as F
import wandb
import matplotlib.pyplot as plt
from cac.config import Config, DATA_ROOT
from cac.analysis.utils import get_audio_type, get_unique_id
from cac.utils.io import read_yml, load_pkl
from cac.utils.logger import color
from cac.decomposition.methods import factory as decomposition_factory
from cac.analysis.base import ModelAnalyzer
class ClassificationAnalyzer(ModelAnalyzer):
"""Base analyzer class for neural network based models
:param config: config object on which the model was trained
:type config: Config
:param checkpoint: model checkpoint to analyze
:type checkpoint: int
:param load_best: flag to decide whether or not to load best model
:type load_best: bool
:param debug: flag to decide if it is a sample run, only loads val dataloader
:type debug: bool
:param load_attributes: flag to decide whether to load attributes
:type load_attributes: bool, defaults to True
# TOOD: Loading attributes is specific to wiai-* datasets.
# TODO: FIX-`pandas.DataFrame.combine_first` causes `NumExpr defaulting to 4 threads.`
"""
def __init__(self, config: Config, checkpoint: int, load_best: bool, debug: bool = False, load_attributes: bool = True):
super(ClassificationAnalyzer, self).__init__(
config, checkpoint, load_best, debug, load_attributes)
def set_ckpt(self, checkpoint, load_best):
"""Modifies the model config to load required checkpoints
:param checkpoint: model checkpoint to analyze
:type checkpoint: int
:load_best: flag to decide whether to load best saved model
:type load_best: bool
"""
self.base_config.model['load'] = {
'epoch': checkpoint,
'load_best': load_best,
'resume_epoch': False,
'resume_optimizer': False,
'version': splitext(self.base_config.version)[0]
}
def load_epochwise_logs(self, mode: str, ext='pt', get_metrics: bool = True):
"""Load instance and batch level logs for all epochs.
:param mode: train/val/test
:type mode: str
"""
log_dir = join(self.base_config.log_dir, 'epochwise', mode)
all_logfiles = natsorted(glob(join(log_dir, f'*.{ext}')))
instance_losses = defaultdict(list)
predict_probs = defaultdict(list)
predict_labels = defaultdict(list)
batch_losses = defaultdict(list)
thresholds = defaultdict(list)
numeric_metrics = defaultdict(list)
display_metrics = defaultdict(list)
for file in tqdm(all_logfiles, dynamic_ncols=True, desc='Loading logs'):
epochID = splitext(basename(file))[0]
key = 'epoch_{}'.format(epochID)
if ext == 'pkl':
epoch_logs = load_pkl(file)
elif ext == 'pt':
epoch_logs = torch.load(file)
paths = list(epoch_logs['paths'])
audio_types = [get_audio_type(path) for path in paths]
unique_ids = [get_unique_id(path) for path in paths]
targets = epoch_logs['targets'].tolist()
for df in [instance_losses, predict_probs, predict_labels, batch_losses]:
df['audio_type'] = audio_types
df['unique_id'] = unique_ids
df['targets'] = targets
instance_losses[key] = epoch_logs['instance_loss'].tolist()
_predict_probs = F.softmax(epoch_logs['predictions'], -1)
predict_probs[key] = _predict_probs.tolist()
if get_metrics:
metrics = self.compute_metrics(
_predict_probs, np.array(targets)
)
numeric_metrics[key], display_metrics[key] = metrics
if 'batch_loss' in epoch_logs:
batch_losses[key] = epoch_logs['batch_loss'].tolist()
if 'threshold' in epoch_logs:
if self.model_config['type'] == 'binary':
predict_labels[key] = _predict_probs[:, 1].ge(
epoch_logs['threshold']).int().tolist()
thresholds[key] = epoch_logs['threshold']
attributes = self.match_attributes_by_paths(paths)
results = dict()
results['instance_loss'] = | pd.DataFrame(instance_losses) | pandas.DataFrame |
from music21 import *
import music21 as m21
import time
# import requests
# httpx appears to be faster than requests, will fit better with an async version
import httpx
from pathlib import Path
import pandas as pd
import numpy as np
import xml.etree.ElementTree as ET
from itertools import combinations
# Unncessary at the moment
# MEINSURI = 'http://www.music-encoding.org/ns/mei'
# MEINS = '{%s}' % MEINSURI
# mei_doc = ET.fromstring(requests.get(path).text)
# # Find the title from the MEI file and update the Music21 Score metadata
# title = mei_doc.find(f'{MEINS}meiHead//{MEINS}titleStmt/{MEINS}title').text
# score.metadata.title = title
# mei_doc = ET.fromstring(requests.get(path).text)
# # Find the composer from the MEI file and update the Music21 Score metadata
# composer = mei_doc.find(f'{MEINS}meiHead//{MEINS}respStmt/{MEINS}persName').text
# score.metadata.composer = composer
# An extension of the music21 note class with more information easily accessible
pathDict = {}
class NoteListElement:
"""
An extension of the music21 note class
Attributes
----------
note : m21.note.Note
music21 note class
offset : int
cumulative offset of note
id : int
unique music21 id
metadata : music21.metadata
piece metadata- not normally attached to a music21 note
part : str
voice name
partNumber : int
voice number, not 0 indexed
duration : int
note duration
piece_url : str
piece url for note
prev_note : NoteListElement
prior non-rest note element
"""
def __init__(self, note: m21.note.Note, metadata, part, partNumber, duration, piece_url, prev_note=None):
self.note = note
self.prev_note = prev_note
self.offset = self.note.offset
self.id = self.note.id
self.metadata = metadata
self.part = part
self.partNumber = partNumber
self.duration = duration
self.piece_url = piece_url
def __str__(self):
return "<NoteListElement: {}>".format(self.note.name)
class ImportedPiece:
def __init__(self, score):
self.score = score
self.analyses = {'note_list': None}
self._intervalMethods = {
# (quality, directed, compound): function returning the specified type of interval
# diatonic with quality
('q', True, True): ImportedPiece._qualityUndirectedCompound,
('q', True, False): ImportedPiece._qualityDirectedSimple,
('q', False, True): lambda cell: cell.name if hasattr(cell, 'name') else cell,
('q', False, False): lambda cell: cell.semiSimpleName if hasattr(cell, 'semiSimpleName') else cell,
# diatonic interals without quality
('d', True, True): lambda cell: cell.directedName[1:] if hasattr(cell, 'directedName') else cell,
('d', True, False): ImportedPiece._noQualityDirectedSimple,
('d', False, True): lambda cell: cell.name[1:] if hasattr(cell, 'name') else cell,
('d', False, False): lambda cell: cell.semiSimpleName[1:] if hasattr(cell, 'semiSimpleName') else cell,
# chromatic intervals
('c', True, True): lambda cell: str(cell.semitones) if hasattr(cell, 'semitones') else cell,
('c', True, False): lambda cell: str(cell.semitones % 12) if hasattr(cell, 'semitones') else cell,
('c', False, True): lambda cell: str(abs(cell.semitones)) if hasattr(cell, 'semitones') else cell,
('c', False, False): lambda cell: str(abs(cell.semitones) % 12) if hasattr(cell, 'semitones') else cell
}
def _getPartSeries(self):
if 'PartSeries' not in self.analyses:
part_series = []
for i, flat_part in enumerate(self._getSemiFlatParts()):
notesAndRests = flat_part.getElementsByClass(['Note', 'Rest'])
part_name = flat_part.partName or 'Part_' + str(i + 1)
ser = | pd.Series(notesAndRests, name=part_name) | pandas.Series |
import pandas as pd
import re
from sklearn.model_selection import train_test_split
import numpy as np
def df_to_letor(df, queries_df: pd.DataFrame) -> pd.DataFrame:
# ensure that df has qid, docid, pid
expected_cols = ("QID", "DocID", "PassageID")
feat_cols = [col for col in df.columns if col not in expected_cols]
assert set(expected_cols).issubset(
set(df.columns)
), """DataFrame does not have some columns from ("QID", "DocID", "PassageID")"""
def fn(row):
a = queries_df[
(queries_df["QID"] == row["QID"])
& (queries_df["DocumentID"] == row["DocID"])
& (queries_df["RelevantPassages"] == row["PassageID"])
]
if a.shape[0] == 0:
return 0
else:
return 1
df["REL"] = df.apply(fn, axis=1)
expected_cols = expected_cols + ("REL",)
# initialize df
letor_df = pd.DataFrame(columns=df.columns)
i = 1
for column in df.columns:
print(column)
if column == "QID":
df[column] = [f"qid:{qid}" for qid in df[column]]
elif column == "DocID":
df[column] = [f"#docid = {docid}" for docid in df[column]]
elif column == "PassageID":
df[column] = [f"pid = {pid}" for pid in df[column]]
elif column == "REL":
pass
else:
df[column] = [f"{i}:{feat}" for feat in df[column]]
i += 1
letor_df[column] = df[column]
letor_df = letor_df[["REL", "QID"] + feat_cols + ["DocID", "PassageID"]]
return letor_df
if __name__ == "__main__":
# df = pd.DataFrame(
# {
# "QID": [1, 2, 3, 884],
# "DocID": [11, 21, 23, 61],
# "PassageID": [3, 4, 5, 22],
# "f1": [33, 0, 2, 2],
# "f2": [33, 0, 2, 2],
# "f3": [33, 0, 2, 2],
# }
# )
# df1 = pd.read_csv("data/processed/L2R_features.csv", index_col=0)
# df1 = df1.rename({"QId": "QID", "DocId": "DocID", "PassId": "PassageID"}, axis=1)
# df2 = pd.read_csv("data/processed/ql_scores.csv", index_col=0)
# df3 = pd.read_csv("data/processed/vsm_test_results_1000_dev.csv")
df1 = pd.read_csv("data/processed/L2R_features_WebAP.csv", index_col=0)
df1 = df1.rename({"QId": "QID", "DocId": "DocID", "PassId": "PassageID"}, axis=1)
df2 = pd.read_csv("data/processed/ql_scores_webap.csv", index_col=0)
df3 = pd.read_csv("data/processed/vsm_test_results_2000_WebAP.csv")
df = pd.merge(df1, df2, how="inner", on=["DocID", "PassageID", "QID"])
df = pd.merge(df, df3, how="inner", on=["DocID", "PassageID", "QID"])
queries_df = pd.read_csv("data/extracted/webap_queries.csv")
letor_df = df_to_letor(df, queries_df)
letor_df.to_csv("data/processed/letor.csv", index=False)
letor_df = | pd.read_csv("data/processed/letor.csv") | pandas.read_csv |
# -*- coding: utf-8 -*-
__all__ = [
'get_dataframe'
]
import builtins
import pandas as pd
import six
from .exceptions import OasisException
def get_dataframe(
src_fp=None,
src_type='csv',
src_buf=None,
src_data=None,
float_precision='high',
lowercase_cols=True,
index_col=True,
non_na_cols=(),
col_dtypes={},
sort_col=None,
sort_ascending=None
):
if not (src_fp or src_buf or src_data is not None):
raise OasisException(
'A CSV or JSON file path or a string buffer of such a file or an '
'appropriate data structure or dataframe must be provided'
)
df = None
if src_fp and src_type == 'csv':
df = pd.read_csv(src_fp, float_precision=float_precision)
elif src_buf and src_type == 'csv':
df = pd.read_csv(io.StringIO(src_buf), float_precision=float_precision)
elif src_fp and src_type == 'json':
df = pd.read_json(src_fp, precise_float=(True if float_precision == 'high' else False))
elif src_buf and src_type == 'json':
df = pd.read_json(io.StringIO(src_buf), precise_float=(True if float_precision == 'high' else False))
elif src_data and (isinstance(src_data, list) or isinstance(src_data, pd.DataFrame)):
df = | pd.DataFrame(data=src_data, dtype=object) | pandas.DataFrame |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy import fftpack
from scipy.integrate import cumtrapz
import numbers
class Quaternion:
def __init__(self, w, x=None, y=None, z=None):
q = []
if isinstance(w, Quaternion):
q = w.q
elif isinstance(w, np.ndarray):
if len(w) == 4:
q = w
elif len(w) == 3:
q = np.append(0, w)
elif x is not None and y is not None and z is not None:
q = [w, x, y, z]
elif isinstance(w,list):
q=w
self.q=q
self.normalize()
def normalize(self):
norm = np.linalg.norm(self.q)
self.q = self.q/norm
self.w = self.q[0]
self.x = self.q[1]
self.y = self.q[2]
self.z = self.q[3]
def to_array(self):
return self.q
def conjugate(self):
return Quaternion(self.q * np.array([1.0, -1.0, -1.0, -1.0]))
def product(self, p):
return Quaternion(np.array([
self.w * p.w - self.x * p.x - self.y * p.y - self.z * p.z,
self.w * p.x + self.x * p.w + self.y * p.z - self.z * p.y,
self.w * p.y - self.x * p.z + self.y * p.w + self.z * p.x,
self.w * p.z + self.x * p.y - self.y * p.x + self.z * p.w,
]))
def __str__(self):
return "({:-.4f} {:+.4f}i {:+.4f}j {:+.4f}k)".format(self.w, self.x, self.y, self.z)
def __add__(self, p):
return Quaternion(self.to_array() + p.to_array())
def __sub__(self, p):
return Quaternion(self.to_array() - p.to_array())
def __mul__(self, p):
if isinstance(p, Quaternion):
return self.product(p)
elif isinstance(p, numbers.Number):
q = self.q * p
return Quaternion(q)
def __rmul__(self, p):
if isinstance(p, Quaternion):
return self.product(p)
elif isinstance(p, numbers.Number):
q = self.q * p
return Quaternion(q)
def to_angles(self) -> np.ndarray:
"""
Return corresponding Euler angles of quaternion.
Given a unit quaternions :math:`\\mathbf{q} = (q_w, q_x, q_y, q_z)`,
its corresponding Euler angles [WikiConversions]_ are:
.. math::
\\begin{bmatrix}
\\phi \\\\ \\theta \\\\ \\psi
\\end{bmatrix} =
\\begin{bmatrix}
\\mathrm{atan2}\\big(2(q_wq_x + q_yq_z), 1-2(q_x^2+q_y^2)\\big) \\\\
\\arcsin\\big(2(q_wq_y - q_zq_x)\\big) \\\\
\\mathrm{atan2}\\big(2(q_wq_z + q_xq_y), 1-2(q_y^2+q_z^2)\\big)
\\end{bmatrix}
Returns
-------
angles : numpy.ndarray
Euler angles of quaternion.
"""
phi = np.arctan2(2.0 * (self.w * self.x + self.y * self.z),
1.0 - 2.0 * (self.x**2 + self.y**2))
theta = np.arcsin(2.0 * (self.w * self.y - self.z * self.x))
psi = np.arctan2(2.0 * (self.w * self.z + self.x * self.y),
1.0 - 2.0 * (self.y**2 + self.z**2))
return np.array([phi, theta, psi])
def as_rotation_matrix(self):
R = np.array([
[self.w**2 + self.x**2 - self.y**2 - self.z**2, 2 * (self.x * self.y - self.w * self.z),
2 * (self.w * self.y + self.x * self.z)],
[2 * (self.x * self.y + self.w * self.z),self.w**2 - self.x**2
+ self.y**2 - self.z**2, 2 * (self.y * self.z - self.w * self.x)],
[2 * (self.x * self.z - self.w * self.y), 2 *
(self.w * self.x + self.y * self.z), self.w**2 - self.x**2
- self.y**2 + self.z**2 ]
])
return R
def rotate_vector(self, v):
#V = [0,v[0],v[1],v[2]]
# V = Quaternion(V)
R = self.as_rotation_matrix()
return R @ v
def madgwickUpdate(q, a, g, m, dt=0.01, gain=0.41):
if g is None or not np.linalg.norm(g) > 0:
return q
qEst = 0.5 * (q * Quaternion(g)).to_array()
if np.linalg.norm(a) == 0:
return q
a_norm = np.linalg.norm(a)
a = a / a_norm
if m is None or not np.linalg.norm(m) > 0:
return q
h = q * (Quaternion(m) * q.conjugate())
bx = np.linalg.norm([h.x, h.y])
bz = h.z
f = np.array([
2.0 * (q.x * q.z - q.w * q.y) - a[0],
2.0 * (q.w * q.x + q.y * q.z) - a[1],
2.0 * (0.5 - q.x**2 - q.y**2) - a[2],
2.0 * bx * (0.5 - q.y**2 - q.z**2) + 2.0 *
bz * (q.x * q.z - q.w * q.y) - m[0],
2.0 * bx * (q.x * q.y - q.w * q.z) + 2.0 *
bz * (q.w * q.x + q.y * q.z) - m[1],
2.0 * bx * (q.w * q.y + q.x * q.z) + 2.0 *
bz * (0.5 - q.x**2 - q.y**2) - m[2]
])
J = np.array([[-2.0 * q.y, 2.0 * q.z, -2.0 * q.w, 2.0 * q.x],
[2.0 * q.x, 2.0 * q.w, 2.0 * q.z, 2.0 * q.y],
[0.0, -4.0 * q.x, -4.0 * q.y, 0.0],
[-2.0 * bz * q.y, 2.0 * bz * q.z, -4.0 * bx * q.y -
2.0 * bz * q.w, -4.0 * bx * q.z + 2.0 * bz * q.x],
[-2.0 * bx * q.z + 2.0 * bz * q.x, 2.0 * bx * q.y + 2.0 * bz * q.w,
2.0 * bx * q.x + 2.0 * bz * q.z, -2.0 * bx * q.w + 2.0 * bz * q.y],
[2.0 * bx * q.y, 2.0 * bx * q.z - 4.0 * bz * q.x,
2.0 * bx * q.w - 4.0 * bz * q.y, 2.0 * bx * q.x]
])
gradient = J.T @ f
grad_norm = np.linalg.norm(gradient)
gradient = gradient / grad_norm
qEst = qEst - gain * gradient
q = q + Quaternion(qEst * dt)
return q
class Madgwick:
def __init__(self, acc=None, gyr=None, mag=None, **kwargs):
self.acc = acc
self.gyr = gyr
self.mag = mag
self.frequency = kwargs.get('frequency', 100.0)
self.dt = kwargs.get('dt', 1.0 / self.frequency)
self.q0 = kwargs.get('q0', Quaternion(np.array([1, 0, 0, 0])))
if self.acc is not None and self.gyr is not None:
self.Q, self.earthAcc = self.compute()
def compute(self):
N = len(self.acc)
Q = []
Q.append(self.q0)
for i in range(1, N):
Q.append(madgwickUpdate(
Q[i - 1], self.acc[i], self.gyr[i], self.mag[i], dt=self.dt))
earthAcc = np.zeros((N, 3))
for i in range(1, N):
earthAcc[i] = Q[i].conjugate().rotate_vector(self.acc[i])
print('computed earth frame acceleration vector')
return Q, earthAcc
def get_position(acc, dt):
vx = cumtrapz(acc[:, 0], dx=dt)
vy = cumtrapz(acc[:, 1], dx=dt)
vz = cumtrapz(acc[:, 2], dx=dt)
fig, ax = plt.subplots()
ax.plot(vx, label='vx')
ax.plot(vy, label='vy')
ax.plot(vz, label='vz')
ax.legend()
x = cumtrapz(vx, dx=dt)
y = cumtrapz(vy, dx=dt)
z = cumtrapz(vz, dx=dt)
print('got position')
return | pd.DataFrame([x, y, z]) | pandas.DataFrame |
import numpy as np
import pandas as pd
from datetime import datetime, timedelta
from tqdm import tqdm
import yaml
import os
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OneHotEncoder
from joblib import dump, load
from category_encoders import OrdinalEncoder
from src.data.spdat import get_spdat_data
def load_df(path):
'''
Load a Pandas dataframe from a CSV file
:param path: The file path of the CSV file
:return: A Pandas dataframe
'''
# Read HIFIS data into a Pandas dataframe
df = pd.read_csv(path, encoding="ISO-8859-1", low_memory=False)
return df
def classify_cat_features(df, cat_features):
'''
Classify categorical features as either single- or multi-valued.
:param df: Pandas dataframe
:param cat_features: List of categorical features
:return: list of single-valued categorical features, list of multi-valued categorical features
'''
def classify_features(client_df):
'''
Helper function for categorical feature classification, distributed across clients.
:param client_df: Dataframe with 1 client's records
:return List of single-valued categorical features, list of multi-valued categorical features
'''
for feature in cat_features:
# If this feature takes more than 1 value per client, move it to the list of multi-valued features
if client_df[feature].nunique() > 1:
sv_cat_features.remove(feature)
mv_cat_features.append(feature)
return
sv_cat_features = cat_features # First, assume all categorical features are single-valued
mv_cat_features = []
df.groupby('ClientID').progress_apply(classify_features)
return sv_cat_features, mv_cat_features
def get_mv_cat_feature_names(df, mv_cat_features):
'''
Build list of possible multi-valued categorical features
:param df: DataFrame containing HIFIS data
:param mv_cat_features: List of multi-valued categorical features
:return: List of all individual multi-valued categorical features
'''
mv_vec_cat_features = []
for f in mv_cat_features:
mv_vec_cat_features += [(f + '_' + v) for v in list(df[f].unique()) if type(v) == str]
return mv_vec_cat_features
def vec_multi_value_cat_features(df, mv_cat_features, cfg, load_ct=False, categories=None):
'''
Converts multi-valued categorical features to vectorized format and appends to the dataframe
:param df: A Pandas dataframe
:param mv_categorical_features: The names of the categorical features to vectorize
:param cfg: project config
:param load_ct: Flag indicating whether to load a saved column transformer
:param categories: List of columns containing all possible values to encode
:return: dataframe containing vectorized features, list of vectorized feature names
'''
orig_col_names = df.columns
if categories is None:
categories = 'auto'
# One hot encode the multi-valued categorical features
mv_cat_feature_idxs = [df.columns.get_loc(c) for c in mv_cat_features if c in df] # List of categorical column indices
if load_ct:
col_trans_ohe = load(cfg['PATHS']['OHE_COL_TRANSFORMER_MV'])
df_ohe = pd.DataFrame(col_trans_ohe.transform(df), index=df.index.copy())
else:
col_trans_ohe = ColumnTransformer(
transformers=[('col_trans_mv_ohe', OneHotEncoder(categories=categories, sparse=False, handle_unknown='ignore', dtype=int), mv_cat_feature_idxs)],
remainder='passthrough'
)
df_ohe = pd.DataFrame(col_trans_ohe.fit_transform(df), index=df.index.copy())
dump(col_trans_ohe, cfg['PATHS']['OHE_COL_TRANSFORMER_MV'], compress=True) # Save the column transformer
# Build list of feature names for the new DataFrame
mv_vec_cat_features = []
for i in range(len(mv_cat_features)):
feat_names = list(col_trans_ohe.transformers_[0][1].categories_[i])
for j in range(len(feat_names)):
mv_vec_cat_features.append(mv_cat_features[i] + '_' + feat_names[j])
ohe_feat_names = mv_vec_cat_features.copy()
for feat in orig_col_names:
if feat not in mv_cat_features:
ohe_feat_names.append(feat)
df_ohe.columns = ohe_feat_names
return df_ohe, mv_vec_cat_features
def vec_single_value_cat_features(df, sv_cat_features, cfg, load_ct=False):
'''
Converts single-valued categorical features to one-hot encoded format (i.e. vectorization) and appends to the dataframe.
Keeps track of a mapping from feature indices to categorical values, for interpretability purposes.
:param df: A Pandas dataframe
:param sv_cat_features: The names of the categorical features to encode
:param cfg: project config dict
:param load_ct: Flag indicating whether to load saved column transformers
:return: dataframe containing one-hot encoded features, list of one-hot encoded feature names
'''
# Convert single-valued categorical features to numeric data
cat_feature_idxs = [df.columns.get_loc(c) for c in sv_cat_features if c in df] # List of categorical column indices
cat_value_names = {} # Dictionary of categorical feature indices and corresponding names of feature values
if load_ct:
col_trans_ordinal = load(cfg['PATHS']['ORDINAL_COL_TRANSFORMER'])
df[sv_cat_features] = col_trans_ordinal.transform(df)
else:
col_trans_ordinal = ColumnTransformer(transformers=[('col_trans_ordinal', OrdinalEncoder(handle_unknown='value'), sv_cat_features)])
df[sv_cat_features] = col_trans_ordinal.fit_transform(df) # Want integer representation of features to start at 0
dump(col_trans_ordinal, cfg['PATHS']['ORDINAL_COL_TRANSFORMER'], compress=True) # Save the column transformer
# Preserve named values of each categorical feature
for i in range(len(sv_cat_features)):
cat_value_names[cat_feature_idxs[i]] = []
for j in range(len(col_trans_ordinal.transformers_[0][1].category_mapping[i])):
# Last one is nan; we don't want that
cat_value_names[cat_feature_idxs[i]] = col_trans_ordinal.transformers_[0][1].category_mapping[i]['mapping'].index.tolist()[:-1]
# One hot encode the single-valued categorical features
if load_ct:
col_trans_ohe = load(cfg['PATHS']['OHE_COL_TRANSFORMER_SV'])
df_ohe = pd.DataFrame(col_trans_ohe.transform(df), index=df.index.copy())
else:
col_trans_ohe = ColumnTransformer(
transformers=[('col_trans_ohe', OneHotEncoder(sparse=False, handle_unknown='ignore'), cat_feature_idxs)],
remainder='passthrough'
)
df_ohe = pd.DataFrame(col_trans_ohe.fit_transform(df), index=df.index.copy())
dump(col_trans_ohe, cfg['PATHS']['OHE_COL_TRANSFORMER_SV'], compress=True) # Save the column transformer
# Build list of feature names for OHE dataset
ohe_feat_names = []
for i in range(len(sv_cat_features)):
for value in cat_value_names[cat_feature_idxs[i]]:
ohe_feat_names.append(sv_cat_features[i] + '_' + str(value))
vec_sv_cat_features = ohe_feat_names.copy()
for feat in df.columns:
if feat not in sv_cat_features:
ohe_feat_names.append(feat)
df_ohe.columns = ohe_feat_names
cat_feat_info = {} # To store info for later use in LIME
cat_feat_info['SV_CAT_FEATURES'] = sv_cat_features
cat_feat_info['VEC_SV_CAT_FEATURES'] = vec_sv_cat_features
cat_feat_info['SV_CAT_FEATURE_IDXS'] = cat_feature_idxs
# To use sparse matrices in LIME, ordinal encoded values must start at 1. Add dummy value to MV categorical features name lists.
for i in range(len(sv_cat_features)):
cat_value_names[cat_feature_idxs[i]].insert(0, 'DUMMY_VAL')
cat_feat_info['SV_CAT_VALUES'] = cat_value_names
return df, df_ohe, cat_feat_info
def process_timestamps(df):
'''
Convert timestamps in raw date to datetimes
:param df: A Pandas dataframe
:return: The dataframe with its datetime fields updated accordingly
'''
features_list = list(df) # Get a list of features
for feature in features_list:
if ('Date' in feature) or ('Start' in feature) or ('End' in feature) or (feature == 'DOB'):
df[feature] = pd.to_datetime(df[feature], errors='coerce')
return df
def remove_n_weeks(df, train_end_date, dated_feats):
'''
Remove records from the dataframe that have timestamps in the n weeks leading up to the ground truth date
:param df: Pandas dataframe
:param train_end_date: the most recent date that should appear in the dataset
:param dated_feats: list of feature names with dated events
:return: updated dataframe with the relevant rows removed
'''
df = df[df['DateStart'] <= train_end_date] # Delete rows where service occurred after this date
df['DateEnd'] = df['DateEnd'].clip(upper=train_end_date) # Set end date for ongoing services to this date
# Update client age
if 'DOB' in df.columns:
df['CurrentAge'] = (train_end_date - df['DOB']).astype('<m8[Y]')
return df.copy()
def calculate_ground_truth(df, chronic_threshold, days, end_date):
'''
Iterate through dataset by client to calculate ground truth
:param df: a Pandas dataframe
:param chronic_threshold: Minimum # of days spent in shelter to be considered chronically homeless
:param days: Number of days over which to count # days spent in shelter
:param end_date: The last date of the time period to consider
:return: a DataSeries mapping ClientID to ground truth
'''
def client_gt(client_df):
'''
Helper function ground truth calculation.
:param client_df: A dataframe containing all rows for a client
:return: the client dataframe ground truth calculated correctly
'''
client_df.sort_values(by=['DateStart'], inplace=True) # Sort records by service start date
gt_stays = 0 # Keep track of total stays, as well as # stays during ground truth time range
last_stay_end = pd.to_datetime(0)
last_stay_start = pd.to_datetime(0)
# Iterate over all of client's records. Note itertuples() is faster than iterrows().
for row in client_df.itertuples():
stay_start = getattr(row, 'DateStart')
stay_end = min(getattr(row, 'DateEnd'), end_date) # If stay is ongoing through end_date, set end of stay as end_date
service_type = getattr(row, 'ServiceType')
if (stay_start > last_stay_start) and (stay_end > last_stay_end) and (service_type == 'Stay'):
if (stay_start.date() >= start_date.date()) or (stay_end.date() >= start_date.date()):
# Account for cases where stay start earlier than start of range, or stays overlapping from previous stay
stay_start = max(start_date, stay_start, last_stay_end)
if (stay_end - stay_start).total_seconds() >= min_stay_seconds:
gt_stays += (stay_end.date() - stay_start.date()).days + (stay_start.date() != last_stay_end.date())
last_stay_end = stay_end
last_stay_start = stay_start
# Determine if client meets ground truth threshold
if gt_stays >= chronic_threshold:
client_df['GroundTruth'] = 1
return client_df
start_date = end_date - timedelta(days=days) # Get start of ground truth window
min_stay_seconds = 60 * 15 # Stays must be at least 15 minutes
df_temp = df[['ClientID', 'ServiceType', 'DateStart', 'DateEnd']]
df_temp['GroundTruth'] = 0
df_temp = df_temp.groupby('ClientID').progress_apply(client_gt)
if df_temp.shape[0] == 0:
return None
if 'ClientID' not in df_temp.index:
df_temp.set_index(['ClientID'], append=True, inplace=True)
df_gt = df_temp['GroundTruth']
df_gt = df_gt.groupby(['ClientID']).agg({'GroundTruth': 'max'})
return df_gt
def calculate_client_features(df, end_date, noncat_feats, counted_services, timed_services, start_date=None):
'''
Iterate through dataset by client to calculate numerical features from services received by a client
:param df: a Pandas dataframe
:param end_date: The latest date of the time period to consider
:param noncat_feats: List of noncategorical features
:param counted_services: Service features for which we wish to count occurrences and create a feature for
:param timed_services: Service features for which we wish to count total time received and create a feature for
:param start_date: The earliest date of the time period to consider
:return: the dataframe with the new service features included, updated list of noncategorical features
'''
def client_features(client_df):
'''
Helper function for total stay, total income and ground truth calculation.
To be used on a subset of the dataframe
:param client_df: A dataframe containing all rows for a client
:return: the client dataframe with total stays and ground truth columns appended
'''
if start_date is not None:
client_df = client_df[client_df['DateEnd'] >= start_date]
client_df['DateStart'].clip(lower=start_date, inplace=True)
client_df = client_df[client_df['DateStart'] <= end_date]
client_df['DateEnd'].clip(upper=end_date, inplace=True) # If ongoing through end_date, set end as end_date
client_df.sort_values(by=['DateStart'], inplace=True) # Sort records by service start date
total_services = dict.fromkeys(total_timed_service_feats, 0) # Keep track of total days of service prior to training data end date
last_service_end = dict.fromkeys(timed_services + counted_services, pd.to_datetime(0)) # Unix Epoch (1970-01-01 00:00:00)
last_service_start = dict.fromkeys(timed_services + counted_services, pd.to_datetime(0))
last_service = ''
# Iterate over all of client's records. Note: itertuples() is faster than iterrows().
for row in client_df.itertuples():
service_start = getattr(row, 'DateStart')
service_end = getattr(row, 'DateEnd')
service = getattr(row, 'ServiceType')
if (service in timed_services):
if (service_start > last_service_start[service]) and (service_end > last_service_end[service]):
service_start = max(service_start, last_service_end[service]) # Don't count any service overlapping from previous service
if (service == 'Stay') and ((service_end - service_start).total_seconds() < min_stay_seconds):
continue # Don't count a stay if it's less than 15 minutes
total_services['Total_' + service] += (service_end.date() - service_start.date()).days + \
(service_start.date() != last_service_end[service].date())
last_service_end[service] = service_end
last_service_start[service] = service_start
elif (service in counted_services) and \
((service_end != last_service_end[service]) or (getattr(row, 'ServiceType') != last_service)):
service = getattr(row, 'ServiceType')
client_df['Num_' + service] += 1 # Increment # of times this service was accessed by this client
last_service_end[service] = service_end
last_service = service
# Set total length of timed service features in client's records
for feat in total_services:
client_df[feat] = total_services[feat]
# Calculate total monthly income for client
client_income_df = client_df[['IncomeType', 'MonthlyAmount', 'DateStart', 'DateEnd']]\
.sort_values(by=['DateStart']).drop_duplicates(subset=['IncomeType'], keep='last')
client_df['IncomeTotal'] = client_income_df['MonthlyAmount'].sum()
return client_df
total_timed_service_feats = ['Total_' + s for s in timed_services]
for feat in total_timed_service_feats:
df[feat] = 0
df['IncomeTotal'] = 0
df['MonthlyAmount'] = | pd.to_numeric(df['MonthlyAmount']) | pandas.to_numeric |
import os
import numpy as np
import pytest
from pandas.compat import is_platform_little_endian
import pandas as pd
from pandas import DataFrame, HDFStore, Series, _testing as tm, read_hdf
from pandas.tests.io.pytables.common import (
_maybe_remove,
ensure_clean_path,
ensure_clean_store,
tables,
)
from pandas.io import pytables as pytables
from pandas.io.pytables import ClosedFileError, PossibleDataLossError, Term
pytestmark = pytest.mark.single
def test_mode(setup_path):
df = tm.makeTimeDataFrame()
def check(mode):
msg = r"[\S]* does not exist"
with ensure_clean_path(setup_path) as path:
# constructor
if mode in ["r", "r+"]:
with pytest.raises(IOError, match=msg):
HDFStore(path, mode=mode)
else:
store = HDFStore(path, mode=mode)
assert store._handle.mode == mode
store.close()
with ensure_clean_path(setup_path) as path:
# context
if mode in ["r", "r+"]:
with pytest.raises(IOError, match=msg):
with HDFStore(path, mode=mode) as store:
pass
else:
with HDFStore(path, mode=mode) as store:
assert store._handle.mode == mode
with ensure_clean_path(setup_path) as path:
# conv write
if mode in ["r", "r+"]:
with pytest.raises(IOError, match=msg):
df.to_hdf(path, "df", mode=mode)
df.to_hdf(path, "df", mode="w")
else:
df.to_hdf(path, "df", mode=mode)
# conv read
if mode in ["w"]:
msg = (
"mode w is not allowed while performing a read. "
r"Allowed modes are r, r\+ and a."
)
with pytest.raises(ValueError, match=msg):
| read_hdf(path, "df", mode=mode) | pandas.read_hdf |
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 2 11:37:09 2017
@author: <NAME>
"""
# =============================================================================
# 调用所需的库
# =============================================================================
import pandas as pd
import numpy as np
from sklearn.cross_validation import train_test_split
import xgboost as xgb
import operator
import datetime as dt
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
# =============================================================================
# 读取数据集
# =============================================================================
df_train = pd.read_csv("../input/train.csv", low_memory=False)
df_test = pd.read_csv("../input/test.csv", low_memory=False)
df_store = pd.read_csv("../input/store.csv", low_memory=False)
# =============================================================================
# 异常值去除
# =============================================================================
def remove_outliers(data):
df_0 = data.loc[data.Sales ==0]
q1 = np.percentile(data.Sales, 25, axis=0)
q3 = np.percentile(data.Sales, 75, axis=0)
# k = 3
# k = 2.5
# k = 2.8
# k = 2
k = 1.5
iqr = q3 - q1
df_temp = data.loc[data.Sales > q1 - k*iqr]
df_temp = data.loc[data.Sales < q3 + k*iqr]
frames = [df_0, df_temp]
result = pd.concat(frames)
return result
# =============================================================================
# 提取时间格式数据
# =============================================================================
time_format = '%Y-%m-%d'
def seperate_date(data):
# split date feature
data_time = pd.to_datetime(data.Date, format=time_format)
data['Year']= data_time.dt.year
data['Month'] = data_time.dt.month
data['DayOfYear'] = data_time.dt.dayofyear
data['DayOfMonth'] = data_time.dt.day
data['WeekOfYear'] = data_time.dt.week
return data
# =============================================================================
# 添加销售均值
# =============================================================================
mean_store_sales = []
mean_store_sales_promo = []
mean_store_sales_not_promo = []
mean_store_sales_2013 = []
mean_store_sales_2014 = []
mean_store_sales_2015 = []
mean_store_sales_m1 = []
mean_store_sales_m2 = []
mean_store_sales_m3 = []
mean_store_sales_m4 = []
mean_store_sales_m5 = []
mean_store_sales_m6 = []
mean_store_sales_m7 = []
mean_store_sales_m8 = []
mean_store_sales_m9 = []
mean_store_sales_m10 = []
mean_store_sales_m11 = []
mean_store_sales_m12 = []
mean_store_sales_d1 = []
mean_store_sales_d2 = []
mean_store_sales_d3 = []
mean_store_sales_d4 = []
mean_store_sales_d5 = []
mean_store_sales_d6 = []
mean_store_sales_d7 = []
mean_store_sales_1month = []
mean_store_sales_2months = []
mean_store_sales_3months = []
mean_store_sales_6months = []
def add_mean_sales(data, data_store = df_store):
# mean of sales
stores = data.Store.unique()
for store in stores:
serie = data[data.Store == store]
# mean of sales by Promo or not
mean_store_sales.append(np.mean(serie.Sales))
mean_store_sales_promo.append(serie[serie['Promo'] == 1]['Sales'].mean())
mean_store_sales_not_promo.append(serie[serie['Promo'] == 0]['Sales'].mean())
# mean of salse by year
mean_store_sales_2013.append(serie[serie['Year'] == 2013]['Sales'].mean())
mean_store_sales_2014.append(serie[serie['Year'] == 2014]['Sales'].mean())
mean_store_sales_2015.append(serie[serie['Year'] == 2015]['Sales'].mean())
# mean of sales by last months
mean_store_sales_1month.append(serie[(serie['Month'] == 7) & (serie['Year'] == 2015)]['Sales'].mean())
mean_store_sales_2months.append(serie[(serie['Month'] <= 7) ^(serie['Month'] >= 6) & (serie['Year'] == 2015)]['Sales'].mean())
mean_store_sales_3months.append(serie[(serie['Month'] <= 7) ^(serie['Month'] >= 5) & (serie['Year'] == 2015)]['Sales'].mean())
mean_store_sales_6months.append(serie[(serie['Month'] <= 7) ^(serie['Month'] >= 2) & (serie['Year'] == 2015)]['Sales'].mean())
data_store['mean_sotre_sales_promo'] = mean_store_sales_promo
data_store['mean_store_sales_not_promo'] = mean_store_sales_not_promo
data_store['mean_store_sales_2013'] = mean_store_sales_2013
data_store['mean_store_sales_2014'] = mean_store_sales_2014
data_store['mean_store_sales_2015'] = mean_store_sales_2015
data_store['mean_store_sales_1month'] = mean_store_sales_1month
data_store['mean_store_sales_2months'] = mean_store_sales_2months
data_store['mean_store_sales_3months'] = mean_store_sales_3months
data_store['mean_store_sales_6months'] = mean_store_sales_6months
return data_store
# =============================================================================
# 去除没有出现的Store信息
# =============================================================================
def drop_stores(data_test, data):
stores = data_test.Store.unique()
for store in stores:
serie = data[data.Store == store]
data = serie
return data
# =============================================================================
# 补充特征工程
# =============================================================================
def feature_eng_compl(data):
# merge store dataset
data = data.join(df_store, on='Store', rsuffix='_')
data = data.drop('Store_',axis=1)
# handle the competition and promo2 feature, combination and drop
data['CompetitionLastMonths'] = 12 * (data['Year'] - data['CompetitionOpenSinceYear'].apply(lambda x: x if x > 0 else np.nan) - 1 + data['CompetitionOpenSinceMonth'].apply(lambda x: x if x > 0 else np.nan))
data['Promo2LastDays'] = 365 * (data['Year'] - data['Promo2SinceYear'].apply(lambda x: x if x > 0 else np.nan))/4.0 + (data['DayOfYear'] - 7*(data['Promo2SinceWeek'].apply(lambda x: x if x > 0 else np.nan)) - 1)
data = data.drop(['CompetitionOpenSinceMonth', 'CompetitionOpenSinceYear', 'Promo2SinceWeek', 'Promo2SinceYear'], axis=1)
# mapping
data['Year'] = data['Year'].map({2013:1, 2014:2, 2015:3})
data['StateHoliday'] = data['StateHoliday'].map({'0':0, 'a':1, 'b':2, 'c':3})
data['StoreType'] = data['StoreType'].map({'0':0, 'a':1, 'b':2, 'c':3, 'd':4})
data['Assortment'] = data['Assortment'].map({'0':0, 'a':1, 'b':2, 'c':3})
data['PromoInterval'] = data['PromoInterval'].map({'0':0,'Jan,Apr,Jul,Oct':1,'Feb,May,Aug,Nov':2,'Mar,Jun,Sept,Dec':3})
return data
# =============================================================================
# 开始执行特征工程
# =============================================================================
# 提取时间格式数据
print ('seperate date ...............')
df_train = seperate_date(df_train)
df_test = seperate_date(df_test)
# 添加销售均值
df_store = add_mean_sales(df_train, df_store)
print ('add mean sales ...............')
# 补充特征工程
print ('more feature engineering ...............')
df_train = feature_eng_compl(df_train).drop('Customers', axis=1)
df_test = feature_eng_compl(df_test)
# =============================================================================
# 特征组合,添加'DaysToHoliday'
# =============================================================================
holidaysofyear = df_train[(df_train['StateHoliday'] == 1)].DayOfYear.reset_index(name='DayOfHoliday').DayOfHoliday.unique()
holidaysofyear = sorted(holidaysofyear)
for holiday in holidaysofyear:
df_train['DaysToHoliday' + str(holiday)] = holiday - df_train['DayOfYear']
for holiday in holidaysofyear:
df_test['DaysToHoliday' + str(holiday)] = holiday - df_test['DayOfYear']
# drop useless store information
print ('drop useless store information ...............')
df_store = drop_stores(df_test, df_store)
# 去除异常值
print ('remove outliers ...............')
df_train = remove_outliers(df_train)
# drop 'Date'
df_train = df_train.drop('Date', axis=1)
df_test = df_test.drop('Date', axis=1)
# =============================================================================
# 使用Xgboost进行训练,代码的实现,部分参考以下链接
# https://www.kaggle.com/mmueller/liberty-mutual-group-property-inspection-prediction/xgb-feature-importance-python/code
# =============================================================================
def create_feature_map(features):
# 建立feature绘图
outfile = open('xgb.fmap', 'w')
for i, feat in enumerate(features):
outfile.write('{0}\t{1}\tq\n'.format(i, feat))
outfile.close()
def rmspe(y, yhat):
# rmspe计算
return np.sqrt(np.mean((yhat/y-1) ** 2))
def rmspe_xg(yhat, y):
# xgboost中对rmspe参数赋值
y = np.expm1(y.get_label())
yhat = np.expm1(yhat)
return "rmspe", rmspe(y,yhat)
train = df_train
test = df_test
train = train[train["Open"] != 0]
train = train[train["Sales"] > 0]
print('training beging ................')
for iter in range(0,7):
params = {'objective': 'reg:linear',
'min_child_weight': 50,
'booster' : 'gbtree',
'eta': 0.1,
'alpha': 2,
'gamma': 2,
'max_depth': 12 - iter,
'subsample': 0.9,
'colsample_bytree': 0.9,
'silent': 1,
'seed': 1301,
'tree_method': 'gpu_hist',
'max_bin': 600
}
num_boost_round = 5000 # 5000
print("Train a XGBoost model .....................")
features = list(train.drop('Sales', axis=1))
X_train, X_valid = train_test_split(train, test_size=0.01, random_state=1)
y_train = np.log1p(X_train.Sales)
y_valid = np.log1p(X_valid.Sales)
dtrain = xgb.DMatrix(X_train[features], y_train)
dvalid = xgb.DMatrix(X_valid[features], y_valid)
watchlist = [(dtrain, 'train'), (dvalid, 'eval')]
gbm = xgb.train(params, dtrain, num_boost_round, evals=watchlist, \
early_stopping_rounds=200,
feval=rmspe_xg,
verbose_eval=True)
print("Validating")
yhat = gbm.predict(xgb.DMatrix(X_valid[features]))
error = rmspe(X_valid.Sales.values, np.expm1(yhat))
print('RMSPE: {:.6f}'.format(error))
print("Make predictions on the test set")
dtest = xgb.DMatrix(test[features])
test_probs = gbm.predict(dtest)
# Make Submission
file_name = iter
result = pd.DataFrame({"Id": test["Id"], 'Sales': np.expm1(test_probs)})
result.to_csv("XG_"+str(file_name)+'.csv', index=False)
gbm.save_model(str(file_name)+'.model')
# =============================================================================
# 绘制特征重要性图,这部分代码基于:
# https://www.kaggle.com/mmueller/liberty-mutual-group-property-inspection-prediction/xgb-feature-importance-python/code
# =============================================================================
create_feature_map(features)
importance = gbm.get_fscore(fmap='xgb.fmap')
importance = sorted(importance.items(), key=operator.itemgetter(1))
df = | pd.DataFrame(importance, columns=['feature', 'fscore']) | pandas.DataFrame |
# standard library imports
import os
import datetime
import re
import math
import copy
import collections
from functools import wraps
from itertools import combinations
import warnings
import pytz
import importlib
# anaconda distribution defaults
import dateutil
import numpy as np
import pandas as pd
# anaconda distribution defaults
# statistics and machine learning imports
import statsmodels.formula.api as smf
from scipy import stats
# from sklearn.covariance import EllipticEnvelope
import sklearn.covariance as sk_cv
# anaconda distribution defaults
# visualization library imports
import matplotlib.pyplot as plt
from bokeh.io import output_notebook, show
from bokeh.plotting import figure
from bokeh.palettes import Category10, Category20c, Category20b
from bokeh.layouts import gridplot
from bokeh.models import Legend, HoverTool, tools, ColumnDataSource
# visualization library imports
hv_spec = importlib.util.find_spec('holoviews')
if hv_spec is not None:
import holoviews as hv
from holoviews.plotting.links import DataLink
else:
warnings.warn('Some plotting functions will not work without the '
'holoviews package.')
# pvlib imports
pvlib_spec = importlib.util.find_spec('pvlib')
if pvlib_spec is not None:
from pvlib.location import Location
from pvlib.pvsystem import PVSystem
from pvlib.tracking import SingleAxisTracker
from pvlib.pvsystem import retrieve_sam
from pvlib.modelchain import ModelChain
from pvlib.clearsky import detect_clearsky
else:
warnings.warn('Clear sky functions will not work without the '
'pvlib package.')
plot_colors_brewer = {'real_pwr': ['#2b8cbe', '#7bccc4', '#bae4bc', '#f0f9e8'],
'irr-poa': ['#e31a1c', '#fd8d3c', '#fecc5c', '#ffffb2'],
'irr-ghi': ['#91003f', '#e7298a', '#c994c7', '#e7e1ef'],
'temp-amb': ['#238443', '#78c679', '#c2e699', '#ffffcc'],
'temp-mod': ['#88419d', '#8c96c6', '#b3cde3', '#edf8fb'],
'wind': ['#238b45', '#66c2a4', '#b2e2e2', '#edf8fb']}
met_keys = ['poa', 't_amb', 'w_vel', 'power']
# The search strings for types cannot be duplicated across types.
type_defs = collections.OrderedDict([
('irr', [['irradiance', 'irr', 'plane of array', 'poa', 'ghi',
'global', 'glob', 'w/m^2', 'w/m2', 'w/m', 'w/'],
(-10, 1500)]),
('temp', [['temperature', 'temp', 'degrees', 'deg', 'ambient',
'amb', 'cell temperature', 'TArray'],
(-49, 127)]),
('wind', [['wind', 'speed'],
(0, 18)]),
('pf', [['power factor', 'factor', 'pf'],
(-1, 1)]),
('op_state', [['operating state', 'state', 'op', 'status'],
(0, 10)]),
('real_pwr', [['real power', 'ac power', 'e_grid'],
(-1000000, 1000000000000)]), # set to very lax bounds
('shade', [['fshdbm', 'shd', 'shade'], (0, 1)]),
('pvsyt_losses', [['IL Pmax', 'IL Pmin', 'IL Vmax', 'IL Vmin'],
(-1000000000, 100000000)]),
('index', [['index'], ('', 'z')])])
sub_type_defs = collections.OrderedDict([
('ghi', [['sun2', 'global horizontal', 'ghi', 'global',
'GlobHor']]),
('poa', [['sun', 'plane of array', 'poa', 'GlobInc']]),
('amb', [['TempF', 'ambient', 'amb']]),
('mod', [['Temp1', 'module', 'mod', 'TArray']]),
('mtr', [['revenue meter', 'rev meter', 'billing meter', 'meter']]),
('inv', [['inverter', 'inv']])])
irr_sensors_defs = {'ref_cell': [['reference cell', 'reference', 'ref',
'referance', 'pvel']],
'pyran': [['pyranometer', 'pyran']],
'clear_sky':[['csky']]}
columns = ['pts_after_filter', 'pts_removed', 'filter_arguments']
def update_summary(func):
"""
Todo
----
not in place
Check if summary is updated when function is called with inplace=False.
It should not be.
"""
@wraps(func)
def wrapper(self, *args, **kwargs):
pts_before = self.df_flt.shape[0]
if pts_before == 0:
pts_before = self.df.shape[0]
self.summary_ix.append((self.name, 'count'))
self.summary.append({columns[0]: pts_before,
columns[1]: 0,
columns[2]: 'no filters'})
ret_val = func(self, *args, **kwargs)
arg_str = args.__repr__()
lst = arg_str.split(',')
arg_lst = [item.strip("'() ") for item in lst]
# arg_lst_one = arg_lst[0]
# if arg_lst_one == 'das' or arg_lst_one == 'sim':
# arg_lst = arg_lst[1:]
# arg_str = ', '.join(arg_lst)
kwarg_str = kwargs.__repr__()
kwarg_str = kwarg_str.strip('{}')
if len(arg_str) == 0 and len(kwarg_str) == 0:
arg_str = 'no arguments'
elif len(arg_str) == 0:
arg_str = kwarg_str
else:
arg_str = arg_str + ', ' + kwarg_str
pts_after = self.df_flt.shape[0]
pts_removed = pts_before - pts_after
self.summary_ix.append((self.name, func.__name__))
self.summary.append({columns[0]: pts_after,
columns[1]: pts_removed,
columns[2]: arg_str})
if pts_after == 0:
warnings.warn('The last filter removed all data! '
'Calling additional filtering or visualization '
'methods that reference the df_flt attribute will '
'raise an error.')
return ret_val
return wrapper
def cntg_eoy(df, start, end):
"""
Shifts data before or after new year to form a contigous time period.
This function shifts data from the end of the year a year back or data from
the begining of the year a year forward, to create a contiguous time period.
Intended to be used on historical typical year data.
If start date is in dataframe, then data at the beginning of the year will
be moved ahead one year. If end date is in dataframe, then data at the end
of the year will be moved back one year.
cntg (contiguous); eoy (end of year)
Parameters
----------
df: pandas DataFrame
Dataframe to be adjusted.
start: pandas Timestamp
Start date for time period.
end: pandas Timestamp
End date for time period.
Todo
----
Need to test and debug this for years not matching.
"""
if df.index[0].year == start.year:
df_beg = df.loc[start:, :]
df_end = df.copy()
df_end.index = df_end.index + pd.DateOffset(days=365)
df_end = df_end.loc[:end, :]
elif df.index[0].year == end.year:
df_end = df.loc[:end, :]
df_beg = df.copy()
df_beg.index = df_beg.index - pd.DateOffset(days=365)
df_beg = df_beg.loc[start:, :]
df_return = pd.concat([df_beg, df_end], axis=0)
ix_ser = df_return.index.to_series()
df_return['index'] = ix_ser.apply(lambda x: x.strftime('%m/%d/%Y %H %M'))
return df_return
def spans_year(start_date, end_date):
"""
Returns boolean indicating if dates passes are in the same year.
Parameters
----------
start_date: pandas Timestamp
end_date: pandas Timestamp
"""
if start_date.year != end_date.year:
return True
else:
return False
def wrap_seasons(df, freq):
"""
Rearrange an 8760 so a quarterly groupby will result in seasonal groups.
Parameters
----------
df : DataFrame
Dataframe to be rearranged.
freq : str
String pandas offset alias to specify aggregattion frequency
for reporting condition calculation.
Returns
-------
DataFrame
Todo
----
Write unit test
BQ-NOV vs BQS vs QS
Need to review if BQ is the correct offset alias vs BQS or QS.
"""
check_freqs = ['BQ-JAN', 'BQ-FEB', 'BQ-APR', 'BQ-MAY', 'BQ-JUL',
'BQ-AUG', 'BQ-OCT', 'BQ-NOV']
mnth_int = {'JAN': 1, 'FEB': 2, 'APR': 4, 'MAY': 5, 'JUL': 7,
'AUG': 8, 'OCT': 10, 'NOV': 11}
if freq in check_freqs:
warnings.warn('DataFrame index adjusted to be continous through new'
'year, but not returned or set to attribute for user.'
'This is not an issue if using RCs with'
'predict_capacities.')
if isinstance(freq, str):
mnth = mnth_int[freq.split('-')[1]]
else:
mnth = freq.startingMonth
year = df.index[0].year
mnths_eoy = 12 - mnth
mnths_boy = 3 - mnths_eoy
if int(mnth) >= 10:
str_date = str(mnths_boy) + '/' + str(year)
else:
str_date = str(mnth) + '/' + str(year)
tdelta = df.index[1] - df.index[0]
date_to_offset = df.loc[str_date].index[-1].to_pydatetime()
start = date_to_offset + tdelta
end = date_to_offset + pd.DateOffset(years=1)
if mnth < 8 or mnth >= 10:
df = cntg_eoy(df, start, end)
else:
df = cntg_eoy(df, end, start)
return df
else:
return df
def perc_wrap(p):
def numpy_percentile(x):
return np.percentile(x.T, p, interpolation='nearest')
return numpy_percentile
def perc_bounds(perc):
"""
perc_flt : float or tuple, default None
Percentage or tuple of percentages used to filter around reporting
irradiance in the irrRC_balanced function. Required argument when
irr_bal is True.
"""
if isinstance(perc, tuple):
perc_low = perc[0] / 100
perc_high = perc[1] / 100
else:
perc_low = perc / 100
perc_high = perc / 100
low = 1 - (perc_low)
high = 1 + (perc_high)
return (low, high)
def perc_difference(x, y):
"""
Calculate percent difference of two values.
"""
if x == y == 0:
return 0
else:
return abs(x - y) / ((x + y) / 2)
def check_all_perc_diff_comb(series, perc_diff):
"""
Check series for pairs of values with percent difference above perc_diff.
Calculates the percent difference between all combinations of two values in
the passed series and checks if all of them are below the passed perc_diff.
Parameters
----------
series : pd.Series
Pandas series of values to check.
perc_diff : float
Percent difference threshold value as decimal i.e. 5% is 0.05.
Returns
-------
bool
"""
c = combinations(series.__iter__(), 2)
return all([perc_difference(x, y) < perc_diff for x, y in c])
def sensor_filter(df, perc_diff):
"""
Check dataframe for rows with inconsistent values.
Applies check_all_perc_diff_comb function along rows of passed dataframe.
Parameters
----------
df : pandas DataFrame
perc_diff : float
Percent difference as decimal.
"""
if df.shape[1] >= 2:
bool_ser = df.apply(check_all_perc_diff_comb, perc_diff=perc_diff,
axis=1)
return df[bool_ser].index
elif df.shape[1] == 1:
return df.index
def flt_irr(df, irr_col, low, high, ref_val=None):
"""
Top level filter on irradiance values.
Parameters
----------
df : DataFrame
Dataframe to be filtered.
irr_col : str
String that is the name of the column with the irradiance data.
low : float or int
Minimum value as fraction (0.8) or absolute 200 (W/m^2)
high : float or int
Max value as fraction (1.2) or absolute 800 (W/m^2)
ref_val : float or int
Must provide arg when min/max are fractions
Returns
-------
DataFrame
"""
if ref_val is not None:
low *= ref_val
high *= ref_val
df_renamed = df.rename(columns={irr_col: 'poa'})
flt_str = '@low <= ' + 'poa' + ' <= @high'
indx = df_renamed.query(flt_str).index
return df.loc[indx, :]
def filter_grps(grps, rcs, irr_col, low, high, **kwargs):
"""
Apply irradiance filter around passsed reporting irradiances to groupby.
For each group in the grps argument the irradiance is filtered by a
percentage around the reporting irradiance provided in rcs.
Parameters
----------
grps : pandas groupby
Groupby object with time groups (months, seasons, etc.).
rcs : pandas DataFrame
Dataframe of reporting conditions. Use the rep_cond method to generate
a dataframe for this argument.
**kwargs
Passed to pandas Grouper to control label and closed side of intervals.
See pandas Grouper doucmentation for details. Default is left labeled
and left closed.
Returns
-------
pandas groupby
"""
flt_dfs = []
freq = list(grps.groups.keys())[0].freq
for grp_name, grp_df in grps:
ref_val = rcs.loc[grp_name, 'poa']
grp_df_flt = flt_irr(grp_df, irr_col, low, high, ref_val=ref_val)
flt_dfs.append(grp_df_flt)
df_flt = pd.concat(flt_dfs)
df_flt_grpby = df_flt.groupby(pd.Grouper(freq=freq, **kwargs))
return df_flt_grpby
def irrRC_balanced(df, low, high, irr_col='GlobInc', plot=False):
"""
Iteratively calculates reporting irradiance that achieves 40/60 balance.
This function is intended to implement a strict interpratation of common
contract language that specifies the reporting irradiance be determined by
finding the irradiance that results in a balance of points within a
+/- percent range of the reporting irradiance. This function
iterates to a solution for the reporting irradiance by calculating the
irradiance that has 10 datpoints in the filtered dataset above it, then
filtering for a percentage of points around that irradiance, calculating
what percentile the reporting irradiance is in. This procedure continues
until 40% of the points in the filtered dataset are above the calculated
reporting irradiance.
Parameters
----------
df: pandas DataFrame
DataFrame containing irradiance data for calculating the irradiance
reporting condition.
low: float
Bottom value for irradiance filter, usually between 0.5 and 0.8.
high: float
Top value for irradiance filter, usually between 1.2 and 1.5.
irr_col: str
String that is the name of the column with the irradiance data.
plot: bool, default False
Plots graphical view of algorithim searching for reporting irradiance.
Useful for troubleshooting or understanding the method.
Returns
-------
Tuple
Float reporting irradiance and filtered dataframe.
"""
if plot:
irr = df[irr_col].values
x = np.ones(irr.shape[0])
plt.plot(x, irr, 'o', markerfacecolor=(0.5, 0.7, 0.5, 0.1))
plt.ylabel('irr')
x_inc = 1.01
vals_above = 10
perc = 100.
pt_qty = 0
loop_cnt = 0
pt_qty_array = []
# print('--------------- MONTH START --------------')
while perc > 0.6 or pt_qty < 50:
# print('####### LOOP START #######')
df_count = df.shape[0]
df_perc = 1 - (vals_above / df_count)
# print('in percent: {}'.format(df_perc))
irr_RC = (df[irr_col].agg(perc_wrap(df_perc * 100)))
# print('ref irr: {}'.format(irr_RC))
flt_df = flt_irr(df, irr_col, low, high, ref_val=irr_RC)
# print('number of vals: {}'.format(df.shape))
pt_qty = flt_df.shape[0]
# print('flt pt qty: {}'.format(pt_qty))
perc = stats.percentileofscore(flt_df[irr_col], irr_RC) / 100
# print('out percent: {}'.format(perc))
vals_above += 1
pt_qty_array.append(pt_qty)
if perc <= 0.6 and pt_qty <= pt_qty_array[loop_cnt - 1]:
break
loop_cnt += 1
if plot:
x_inc += 0.02
y1 = irr_RC * low
y2 = irr_RC * high
plt.plot(x_inc, irr_RC, 'ro')
plt.plot([x_inc, x_inc], [y1, y2])
if plot:
plt.show()
return(irr_RC, flt_df)
def fit_model(df, fml='power ~ poa + I(poa * poa) + I(poa * t_amb) + I(poa * w_vel) - 1'):
"""
Fits linear regression using statsmodels to dataframe passed.
Dataframe must be first argument for use with pandas groupby object
apply method.
Parameters
----------
df : pandas dataframe
fml : str
Formula to fit refer to statsmodels and patsy documentation for format.
Default is the formula in ASTM E2848.
Returns
-------
Statsmodels linear model regression results wrapper object.
"""
mod = smf.ols(formula=fml, data=df)
reg = mod.fit()
return reg
def predict(regs, rcs):
"""
Calculates predicted values for given linear models and predictor values.
Evaluates the first linear model in the iterable with the first row of the
predictor values in the dataframe. Passed arguments must be aligned.
Parameters
----------
regs : iterable of statsmodels regression results wrappers
rcs : pandas dataframe
Dataframe of predictor values used to evaluate each linear model.
The column names must match the strings used in the regression formuala.
Returns
-------
Pandas series of predicted values.
"""
pred_cap = pd.Series()
for i, mod in enumerate(regs):
RC_df = | pd.DataFrame(rcs.iloc[i, :]) | pandas.DataFrame |
import numpy as np
import pandas as pd
from scipy import stats
from scipy.optimize import curve_fit
import os
import re
from fuelcell import utils
from fuelcell.model import Datum
dlm_default = utils.dlm_default
col_default_labels = {'current':'i', 'potential':'v', 'time':'t', 'current_err':'i_sd', 'potential_err':'v_sd', 'overpotential':'eta', 'tafelcurrent':'log(ioi)', 'realcurr':'real', 'imagcurr':'imag'}
col_default_ids = {'current':2, 'potential':1, 'time':0, 'current_err':2, 'potential_err':3, 'overpotential':2, 'tafelcurrent':3}
ref_electrodes = {'she':0, 'sce':0.241}
thermo_potentials = {'none':0, 'oer':1.23}
expt_types_all = ['cv', 'cp', 'ca', 'lsv', 'eis']
### functions to load raw data ###
def load_data(filename=None, folder=None, pattern='', expt_type='', filetype='', delimiter=dlm_default):
"""
Loads data file(s) as a Datum Object
Function to load electrochemical data files as a Datum object. If called with no
arguments, loads all supported data files in the present folder.
Parameters
___________
filename: str, path object, or file-like (default=None)
Full filename of a file in the present directory or a complete path to an individual file. If filename is specified, all other arguments except delimiter are ignored.
folder: str, path object, or path-like (default=None)
Directory in which data files are stored. If none, defaults to the present directory.
pattern: str or regex
If specified, only files matching this pattern in the specified folder are loaded. Ignored if filename is specified.
expt_type: str (default='')
Alternative to specifying pattern; ignored if pattern is specified. All files containing expt_type anywhere in the file name will be loaded. Ex: to load all chronopotentiometry files, specify expt_type='cp'.
filetype : str
Any supported filetype. Only files of the specified file type will be loaded. Can be used in conjunction with pattern or expt_type.
delimiter : char (default = '\t')
Delimiting character if the file is a text file. Defaults to '\t' (tab-delimiting).
Returns
________
data:list of Datum
Returns a list of Datum objects, with each entry corresponding to an individual data file
"""
data = []
if filename:
if type(filename) != list:
filename = [filename]
# for f in filename:
# data.append(utils.read_file(f, delimiter))
if folder:
dirpath = os.path.realpath(folder)
else:
dirpath = os.getcwd()
if expt_type and not pattern:
pattern = r'.*' + expt_type + r'.*'
files = utils.get_files(dirpath, pattern, filetype, filename)
for f in files:
path = os.path.join(dirpath, f)
this_data = utils.read_file(path, delimiter)
if expt_type:
this_data.set_expt_type(expt_type.lower())
else:
for this_type in expt_types_all:
pattern = r'.*' + this_type + r'.*'
if re.match(pattern, f):
this_data.set_expt_type(this_type.lower())
break
if this_data is not None:
data.append(this_data)
return data
def ca_raw(filename=None, folder=None, pattern='', filetype='', delimiter=dlm_default):
"""
Loads chronoamperometry data
Efficient way to load multiple chronoamperometry data files at once; equivalent to calling load_data and specifying expt_type='ca'. If called with no arguments, loads all chronoamperometryd files in the present folder.
Parameters
___________
filename: str, path object, or file-like (default=None)
Full filename of a file in the present directory or a complete path to an individual file. If filename is specified, all other arguments except delimiter are ignored.
folder: str, path object, or path-like (default=None)
Directory in which data files are stored. If none, defaults to the present directory.
pattern: str or regex
If specified, only files matching this pattern in the specified folder are loaded. Ignored if filename is specified.
filetype : str
Any supported filetype. Only files of the specified file type will be loaded. Can be used in conjunction with pattern or expt_type.
delimiter : char (default = '\t')
Delimiting character if the file is a text file. Defaults to '\t' (tab-delimiting).
Returns
________
data:list of Datum
Returns a list of Datum objects, with each entry corresponding to an individual data file
"""
data = load_data(filename, folder, pattern, 'ca', filetype, delimiter)
return data
def cp_raw(filename=None, folder=None, pattern='', filetype='', delimiter=dlm_default):
"""
Loads chronoamperometry data
Efficient way to load multiple chornopotentiometry files at once; equivalent to calling load_data and specifying expt_type='cp'. If called with no arguments, loads all chronopotentiometry files in the present folder.
Parameters
___________
filename: str, path object, or file-like (default=None)
Full filename of a file in the present directory or a complete path to an individual file. If filename is specified, all other arguments except delimiter are ignored.
folder: str, path object, or path-like (default=None)
Directory in which data files are stored. If none, defaults to the present directory.
pattern: str or regex
If specified, only files matching this pattern in the specified folder are loaded. Ignored if filename is specified.
filetype : str
Any supported filetype. Only files of the specified file type will be loaded. Can be used in conjunction with pattern or expt_type.
delimiter : char (default = '\t')
Delimiting character if the file is a text file. Defaults to '\t' (tab-delimiting).
Returns
________
data:list of Datum
Returns a list of Datum objects, with each entry corresponding to an individual data file
"""
data = load_data(filename, folder, pattern, 'cp', filetype, delimiter)
return data
def cv_raw(filename=None, folder=None, pattern='', filetype='', delimiter=dlm_default):
"""
Loads cyclic voltammetry data
Efficient way to load multiple cyclic voltammetry files at once; equivalent to calling load_data and specifying expt_type='cv'. If called with no arguments, loads all cyclic voltammetry files in the present folder.
Parameters
___________
filename: str, path object, or file-like (default=None)
Full filename of a file in the present directory or a complete path to an individual file. If filename is specified, all other arguments except delimiter are ignored.
folder: str, path object, or path-like (default=None)
Directory in which data files are stored. If none, defaults to the present directory.
pattern: str or regex
If specified, only files matching this pattern in the specified folder are loaded. Ignored if filename is specified.
filetype : str
Any supported filetype. Only files of the specified file type will be loaded. Can be used in conjunction with pattern or expt_type.
delimiter : char (default = '\t')
Delimiting character if the file is a text file. Defaults to '\t' (tab-delimiting).
Returns
________
data:list of Datum
Returns a list of Datum objects, with each entry corresponding to an individual data file
"""
data = load_data(filename, folder, pattern, 'cv', filetype, delimiter)
return data
def lsv_raw(filename=None, folder=None, pattern='', filetype='', delimiter=dlm_default):
"""
Loads linear sweep voltammetry data
Efficient way to load multiple linear sweep voltammetry files at once; equivalent to calling load_data and specifying expt_type='lsv'. If called with no arguments, loads all linear sweep voltammetry files in the present folder.
Parameters
___________
filename: str, path object, or file-like (default=None)
Full filename of a file in the present directory or a complete path to an individual file. If filename is specified, all other arguments except delimiter are ignored.
folder: str, path object, or path-like (default=None)
Directory in which data files are stored. If none, defaults to the present directory.
pattern: str or regex
If specified, only files matching this pattern in the specified folder are loaded. Ignored if filename is specified.
filetype : str
Any supported filetype. Only files of the specified file type will be loaded. Can be used in conjunction with pattern or expt_type.
delimiter : char (default = '\t')
Delimiting character if the file is a text file. Defaults to '\t' (tab-delimiting).
Returns
________
data:list of Datum
Returns a list of Datum objects, with each entry corresponding to an individual data file
"""
data = load_data(filename, folder, pattern, 'lsv', filetype, delimiter)
return data
def eis_raw(filename=None, folder=None, pattern='', filetype='', delimiter=dlm_default):
"""
Loads electrochemical impedance spectroscopy data
Efficient way to load multiple electrochemical impedance spectroscopy files at once; equivalent to calling load_data and specifying expt_type='eis'. If called with no arguments, loads all electrochemical impedance spectroscopy files in the present folder.
Parameters
___________
filename: str, path object, or file-like (default=None)
Full filename of a file in the present directory or a complete path to an individual file. If filename is specified, all other arguments except delimiter are ignored.
folder: str, path object, or path-like (default=None)
Directory in which data files are stored. If none, defaults to the present directory.
pattern: str or regex
If specified, only files matching this pattern in the specified folder are loaded. Ignored if filename is specified.
filetype : str
Any supported filetype. Only files of the specified file type will be loaded. Can be used in conjunction with pattern or expt_type.
delimiter : char (default = '\t')
Delimiting character if the file is a text file. Defaults to '\t' (tab-delimiting).
Returns
________
data:list of Datum
Returns a list of Datum objects, with each entry corresponding to an individual data file
"""
data = load_data(filename, folder, pattern, 'eis', filetype, delimiter)
return data
### high-level functions for processing data ###
def ca_process(data=None, current_column=2, potential_column=1, area=5, reference='she', thermo_potential=0, export_data=False, save_dir='processed', threshold=5, min_step_length=50, pts_to_average=300, pyramid=False, **kwargs):
"""
Processes chronoamperometry data
Can either process pre-loaded data or load and process data files. If called with no arguments, loads and processes all 'ca' files in the present folder. See process_steps for details on the operations performed.
Parameters
___________
data: list of Datum
List of Datum objects containing CA data. If unspecified, data will be loaded using ca_raw before processing.
current_column : int or str (default=1)
Index or label of the column containing current data. Used only if automatic column identification fails
potential_column : int or str (default=2)
Index or label of the column containing potential data. Used only if automatic column identification fails
threshold: int (default=5)
Minimum consecutive absolute difference which constitutes a step
min_step_length: int (default=25)
Minimum length of the arrays which result from spliting the intial array. Arrays shorter than this value will be discarded
pts_to_average: int (default=300)
Steady-state average and sd are calculated using the last pts_to_average values of the array. Default is 300 points, which is the last 30 seconds of each hold at the instrument's default collection rate of 10 Hz.
pyramid: bool (default=True)
Specifies whether the current is ramped in both directions. Set pyramid=False if only ramping up or only ramping down.
area: int or float (default=5)
Geometric active area of the MEA. Scaling factor to convert current to current density.
reference: {'she', 'sce'}, int, or float (default='she')
Either a string identifying the reference electrode (ie 'she' or 'sce'), or the potential of the reference electrode used. sce=0.241
**kwargs:
Remaining arguments are passed to ca_raw to load data
"""
if data is None:
data = ca_raw(**kwargs)
for d in data:
if d.get_expt_type() == 'ca':
raw = d.get_raw_data()
processed = process_steps(raw, potential_column, current_column, threshold, min_step_length, pts_to_average, pyramid, 'ca', area, reference, thermo_potential)
d.set_processed_data(processed)
d.set_current_data(processed['i'])
d.set_potential_data(processed['v'])
d.set_overpotential_data(processed['eta'])
d.set_error_data(processed['i_sd'])
set_datum_params(d, area, reference, thermo_potential)
if export_data:
name = d.get_name()
utils.save_data(processed, name+'.csv', save_dir)
return data
def cp_process(data=None, current_column=2, potential_column=1, area=5, reference='she', thermo_potential=0, export_data=False, save_dir='processed', threshold=5, min_step_length=25, pts_to_average=300, pyramid=True, **kwargs):
"""
Processes chronopotentiometry data
Can either process pre-loaded data or load and process data files. If called with no arguments, loads and processes all 'cp' files in the present folder. See process_steps for details on the operations performed.
Parameters
___________
data: list of Datum
List of Datum objects containing CP data. If unspecified, data will be loaded using cp_raw before processing.
current_column : int or str (default=1)
Index or label of the column containing current data. Used only if automatic column identification fails
potential_column : int or str (default=2)
Index or label of the column containing potential data. Used only if automatic column identification fails
threshold: int (default=5)
Minimum consecutive absolute difference which constitutes a step
min_step_length: int (default=25)
Minimum length of the arrays which result from spliting the intial array. Arrays shorter than this value will be discarded
pts_to_average: int (default=300)
Steady-state average and sd are calculated using the last pts_to_average values of the array. Default is 300 points, which is the last 30 seconds of each hold at the instrument's default collection rate of 10 Hz.
pyramid: bool (default=True)
Specifies whether the current is ramped in both directions. Set pyramid=False if only ramping up or only ramping down.
area: int or float (default=5)
Geometric active area of the MEA. Scaling factor to convert current to current density.
reference: {'she', 'sce'}, int, or float (default='she')
Either a string identifying the reference electrode (ie 'she' or 'sce'), or the potential of the reference electrode used. sce=0.241
**kwargs:
Remaining arguments are passed to cp_raw to load data
"""
if data is None:
data = cp_raw(**kwargs)
for d in data:
if d.get_expt_type() == 'cp':
raw = d.get_raw_data()
processed = process_steps(raw, current_column, potential_column, threshold, min_step_length, pts_to_average, pyramid, 'cp', area, reference, thermo_potential)
d.set_processed_data(processed)
d.set_current_data(processed['i'])
d.set_potential_data(processed['v'])
d.set_overpotential_data(processed['eta'])
d.set_error_data(processed['v_sd'])
set_datum_params(d, area, reference, thermo_potential)
if export_data:
name = d.get_name()
utils.save_data(processed, name+'.csv', save_dir)
return data
def cv_process(data=None, current_column=1, potential_column=0, area=5, reference='she', thermo_potential=0, export_data=False, save_dir='processed', **kwargs):
"""
Processes cyclic voltammetry data
Can either process pre-loaded data or load and process data files. If called with no arguments, loads and processes all 'cv' files in the present folder. Peforms the following operations in order:
1. Parse column labels to find columns containing current and potential data. If parsing fails, specified labels/indices are used
2. Convert current to current density using the specified area
Parameters
__________
data: list of Datum
List of Datum objects containing CV data. If unspecified, data will be loaded using cv _raw before processing.
area : int or float (default=5)
Geometric active area of the MEA. Scaling factor to convert current to durrent density
current_column : int or str (default=1)
Index or label of the column containing current data. Used only if automatic column identification fails
potential_column : int or str (default=0)
Index or label of the column containing potential data. Used only if automatic column identification fails
**kwargs:
Remaining arguments are passed to cv_raw to load data
"""
if data is None:
data = cv_raw(**kwargs)
for d in data:
if d.get_expt_type() == 'cv':
raw = d.get_raw_data()
current = find_col(raw, 'current', current_column)
current = current / area
potential = find_col(raw, 'potential', potential_column)
potential = electrode_correct(potential, reference)
overpotential = overpotential_correct(potential, thermo_potential)
processed = pd.DataFrame({'i':current, 'v':potential, 'eta':overpotential})
d.set_processed_data(processed)
d.set_current_data(current)
d.set_potential_data(potential)
d.set_overpotential_data(overpotential)
set_datum_params(d, area, reference, thermo_potential)
if export_data:
name = d.get_name()
utils.save_data(processed, name+'.csv', save_dir)
return data
def lsv_process(data=None, potential_column=0, current_column=1, area=5, reference='she', thermo_potential=0, export_data=False, save_dir='processed', **kwargs):
"""
Processes linear sweep voltammetry data
Can either process pre-loaded data or load and process data files. If called with no arguments, loads and processes all 'lsv' files in the present folder. Peforms the following operations in order:
Parameters
__________
data: list of Datum
List of Datum objects containing LSV data. If unspecified, data will be loaded using lsv_raw before processing.
area : int or float (default=5)
Geometric active area of the MEA. Scaling factor to convert current to durrent density
current_column : int or str (default=1)
Index or label of the column containing current data. Used only if automatic column identification fails
potential_column : int or str (default=0)
Index or label of the column containing potential data. Used only if automatic column identification fails
**kwargs:
Remaining arguments are passed to cv_raw to load data
"""
area = area / 10000 #cm2 to m2
if data is None:
data = lsv_raw(**kwargs)
for d in data:
if d.get_expt_type() == 'lsv':
raw = d.get_raw_data()
potential = find_col(raw, 'potential', potential_column)
potential = electrode_correct(potential, reference)
overpotential = overpotential_correct(potential, thermo_potential)
current = find_col(raw, 'current', current_column)
current = current / area
log_current = current - min(current) + 0.000001
log_current = np.log10(log_current)
processed = pd.DataFrame({'v':potential, 'i':current, 'eta':overpotential, 'log(ioi)':log_current})
d.set_processed_data(processed)
d.set_potential_data(potential)
d.set_overpotential_data(potential)
d.set_current_data(current)
d.set_logcurrent_data(log_current)
set_datum_params(d, area, reference, thermo_potential)
if export_data:
name = d.get_name()
utils.save_data(processed, name+'.csv', save_dir)
return data
def eis_process(data=None, freq_column=10, real_column=0, imag_column=1, area=5, threshold=5, min_step_length=5, export_data=False, save_dir='processed', **kwargs):
"""
Processes electrochemical impedance spectroscopy data
Can either process pre-loaded data or load and process data files. If called with no arguments, loads and processes all 'eis' files in the present folder. Peforms the following operations in order:
Parameters
__________
data: list of Datum
List of Datum objects containing EIS data. If unspecified, data will be loaded using eis_raw before processing.
area : int or float (default=5)
Geometric active area of the MEA. Scaling factor to convert current to durrent density
current_column : int or str (default=1)
Index or label of the column containing current data. Used only if automatic column identification fails
potential_column : int or str (default=0)
Index or label of the column containing potential data. Used only if automatic column identification fails
**kwargs:
Remaining arguments are passed to cv_raw to load data
"""
if data is None:
data = eis_raw(**kwargs)
new_data = []
for d in data:
if d.get_expt_type() == 'eis':
basename = d.get_name()
raw = d.get_raw_data()
### TODO: add support for GEIS and PEIS specifically, as well as frequency analysis ###
# freq_all = np.asarray(raw.iloc[:,freq_column])
real_all = np.asarray(raw.iloc[:,real_column])
imag_all = np.asarray(raw.iloc[:,imag_column])
# current_all = np.asarray(find_col(raw, 'current', 2))
# split_pts = find_steps(current_all, threshold=threshold)
# current_splits = split_and_filter(current_all, split_pts, min_length=min_step_length)
# real_splits = split_and_filter(real_all, split_pts, min_length=min_step_length)
# imag_splits = split_and_filter(imag_all, split_pts, min_length=min_step_length)
# freq_splits = split_and_filter(freq_all, split_pts, min_length=min_step_length)
real_splits, imag_splits = split_at_zeros(real_all, imag_all)
real_splits, imag_splits = drop_neg(real_splits, imag_splits)
i = 0
# for f, re, im, curr in zip(freq_splits, real_splits, imag_splits, current_splits):
for re, im in zip(real_splits, imag_splits):
re, im = np.asarray(re), np.asarray(im)
# f = np.asarray(f)
this_re = re[(im > 0) & (re > 0)]
this_im = im[(im > 0) & (re > 0)]
# this_f = f[(im > 0) & (re > 0)]
# curr = np.asarray(curr) / area
# mean_curr = int(np.abs(curr.mean()))
# df = pd.DataFrame({'freq':this_f, 'real':this_re, 'imag':this_im})
df = | pd.DataFrame({'real':this_re, 'imag':this_im}) | pandas.DataFrame |
from collections import defaultdict
from datetime import datetime, timedelta, timezone
import pickle
from sqlite3 import OperationalError
import numpy as np
from numpy.testing import assert_array_equal
import pandas as pd
from pandas.testing import assert_frame_equal
import pytest
from sqlalchemy import delete, func, insert, select, sql
from sqlalchemy.schema import CreateTable
from athenian.api.async_utils import read_sql_query
from athenian.api.controllers.miners.filters import JIRAFilter, LabelFilter
from athenian.api.controllers.miners.github.commit import _empty_dag, _fetch_commit_history_dag, \
fetch_repository_commits
from athenian.api.controllers.miners.github.dag_accelerated import extract_subdag, join_dags, \
mark_dag_access, mark_dag_parents, partition_dag
from athenian.api.controllers.miners.github.precomputed_prs import store_precomputed_done_facts
from athenian.api.controllers.miners.github.pull_request import PullRequestFactsMiner
from athenian.api.controllers.miners.github.release_load import group_repos_by_release_match
from athenian.api.controllers.miners.github.release_match import PullRequestToReleaseMapper, \
ReleaseToPullRequestMapper
from athenian.api.controllers.miners.github.release_mine import mine_releases, \
mine_releases_by_name
from athenian.api.controllers.miners.github.released_pr import matched_by_column
from athenian.api.controllers.miners.types import released_prs_columns
from athenian.api.controllers.settings import LogicalRepositorySettings, ReleaseMatch, \
ReleaseMatchSetting, ReleaseSettings
from athenian.api.db import Database
from athenian.api.defer import wait_deferred, with_defer
from athenian.api.models.metadata.github import Branch, NodeCommit, PullRequest, \
PullRequestLabel, Release
from athenian.api.models.persistentdata.models import ReleaseNotification
from athenian.api.models.precomputed.models import GitHubCommitHistory, \
GitHubRelease as PrecomputedRelease
from tests.conftest import _metadata_db
from tests.controllers.test_filter_controller import force_push_dropped_go_git_pr_numbers
def generate_repo_settings(prs: pd.DataFrame) -> ReleaseSettings:
return ReleaseSettings({
"github.com/" + r: ReleaseMatchSetting(
branches="", tags=".*", events=".*", match=ReleaseMatch.tag)
for r in prs.index.get_level_values(1).values
})
@with_defer
async def test_map_prs_to_releases_cache(
branches, default_branches, dag, mdb, pdb, rdb, cache, release_loader, prefixer):
prs = await read_sql_query(
select([PullRequest]).where(PullRequest.number == 1126),
mdb, PullRequest, index=[PullRequest.node_id.name, PullRequest.repository_full_name.name])
prs["dead"] = False
time_to = datetime(year=2020, month=4, day=1, tzinfo=timezone.utc)
time_from = time_to - timedelta(days=5 * 365)
release_settings = generate_repo_settings(prs)
releases, matched_bys = await release_loader.load_releases(
["src-d/go-git"], branches, default_branches, time_from, time_to,
release_settings, LogicalRepositorySettings.empty(),
prefixer, 1, (6366825,), mdb, pdb, rdb, None)
tag = "https://github.com/src-d/go-git/releases/tag/v4.12.0"
for i in range(2):
released_prs, facts, _ = await PullRequestToReleaseMapper.map_prs_to_releases(
prs, releases, matched_bys, branches, default_branches, time_to, dag, release_settings,
prefixer, 1, (6366825,), mdb, pdb, cache)
await wait_deferred()
assert isinstance(facts, dict)
assert len(facts) == 0
assert len(cache.mem) > 0
assert len(released_prs) == 1, str(i)
assert released_prs.iloc[0][Release.url.name] == tag
assert released_prs.iloc[0][Release.published_at.name] == \
pd.Timestamp("2019-06-18 22:57:34+0000", tzinfo=timezone.utc)
assert released_prs.iloc[0][Release.author.name] == "mcuadros"
released_prs, _, _ = await PullRequestToReleaseMapper.map_prs_to_releases(
prs, releases, matched_bys, branches, default_branches, time_to, dag, release_settings,
prefixer, 1, (6366825,), mdb, pdb, None)
# the PR was merged and released in the past, we must detect that
assert len(released_prs) == 1
assert released_prs.iloc[0][Release.url.name] == tag
@with_defer
async def test_map_prs_to_releases_pdb(branches, default_branches, dag, mdb, pdb, rdb,
release_loader, prefixer):
prs = await read_sql_query(
select([PullRequest]).where(PullRequest.number.in_((1126, 1180))),
mdb, PullRequest, index=[PullRequest.node_id.name, PullRequest.repository_full_name.name])
prs["dead"] = False
time_to = datetime(year=2020, month=4, day=1, tzinfo=timezone.utc)
time_from = time_to - timedelta(days=5 * 365)
release_settings = generate_repo_settings(prs)
releases, matched_bys = await release_loader.load_releases(
["src-d/go-git"], branches, default_branches, time_from, time_to, release_settings,
LogicalRepositorySettings.empty(), prefixer, 1, (6366825,), mdb, pdb, rdb, None)
released_prs, _, _ = await PullRequestToReleaseMapper.map_prs_to_releases(
prs, releases, matched_bys, branches, default_branches, time_to, dag,
release_settings, prefixer, 1, (6366825,), mdb, pdb, None)
await wait_deferred()
assert len(released_prs) == 1
dummy_mdb = await Database("sqlite://").connect()
try:
prlt = PullRequestLabel.__table__
if prlt.schema:
for table in (PullRequestLabel, NodeCommit):
table = table.__table__
table.name = "%s.%s" % (table.schema, table.name)
table.schema = None
for table in (PullRequestLabel, NodeCommit):
await dummy_mdb.execute(CreateTable(table.__table__))
released_prs, _, _ = await PullRequestToReleaseMapper.map_prs_to_releases(
prs, releases, matched_bys, branches, default_branches, time_to, dag, release_settings,
prefixer, 1, (6366825,), dummy_mdb, pdb, None)
assert len(released_prs) == 1
finally:
if "." in prlt.name:
for table in (PullRequestLabel, NodeCommit):
table = table.__table__
table.schema, table.name = table.name.split(".")
await dummy_mdb.disconnect()
@with_defer
async def test_map_prs_to_releases_empty(branches, default_branches, dag, mdb, pdb, rdb, cache,
release_loader, prefixer):
prs = await read_sql_query(
select([PullRequest]).where(PullRequest.number == 1231),
mdb, PullRequest, index=[PullRequest.node_id.name, PullRequest.repository_full_name.name])
prs["dead"] = False
time_to = datetime(year=2020, month=4, day=1, tzinfo=timezone.utc)
time_from = time_to - timedelta(days=5 * 365)
release_settings = generate_repo_settings(prs)
releases, matched_bys = await release_loader.load_releases(
["src-d/go-git"], branches, default_branches, time_from, time_to,
release_settings, LogicalRepositorySettings.empty(),
prefixer, 1, (6366825,), mdb, pdb, rdb, None)
for i in range(2):
released_prs, _, _ = await PullRequestToReleaseMapper.map_prs_to_releases(
prs, releases, matched_bys, branches, default_branches, time_to, dag, release_settings,
prefixer, 1, (6366825,), mdb, pdb, cache)
assert len(cache.mem) == 1, i
assert released_prs.empty
prs = prs.iloc[:0]
released_prs, _, _ = await PullRequestToReleaseMapper.map_prs_to_releases(
prs, releases, matched_bys, branches, default_branches, time_to, dag, release_settings,
prefixer, 1, (6366825,), mdb, pdb, cache)
assert len(cache.mem) == 1
assert released_prs.empty
@with_defer
async def test_map_prs_to_releases_precomputed_released(
branches, default_branches, dag, mdb, pdb, rdb, release_match_setting_tag,
release_loader, pr_miner, prefixer, bots):
time_to = datetime(year=2019, month=8, day=2, tzinfo=timezone.utc)
time_from = time_to - timedelta(days=2)
miner, _, _, _ = await pr_miner.mine(
time_from.date(),
time_to.date(),
time_from,
time_to,
{"src-d/go-git"},
{},
LabelFilter.empty(),
JIRAFilter.empty(),
False,
branches, default_branches,
False,
release_match_setting_tag,
LogicalRepositorySettings.empty(),
prefixer,
1,
(6366825,),
mdb,
pdb,
rdb,
None,
)
facts_miner = PullRequestFactsMiner(bots)
true_prs = [pr for pr in miner if pr.release[Release.published_at.name] is not None]
facts = [facts_miner(pr) for pr in true_prs]
prs = pd.DataFrame([pr.pr for pr in true_prs]).set_index(
[PullRequest.node_id.name, PullRequest.repository_full_name.name])
releases, matched_bys = await release_loader.load_releases(
["src-d/go-git"], branches, default_branches, time_from, time_to,
release_match_setting_tag, LogicalRepositorySettings.empty(), prefixer,
1, (6366825,), mdb, pdb, rdb, None)
await pdb.execute(delete(GitHubCommitHistory))
dummy_mdb = await Database("sqlite://").connect()
prlt = PullRequestLabel.__table__
try:
if prlt.schema:
for table in (PullRequestLabel, NodeCommit):
table = table.__table__
table.name = "%s.%s" % (table.schema, table.name)
table.schema = None
for table in (PullRequestLabel, NodeCommit):
await dummy_mdb.execute(CreateTable(table.__table__))
await store_precomputed_done_facts(
true_prs, facts, default_branches, release_match_setting_tag, 1, pdb)
released_prs, _, _ = await PullRequestToReleaseMapper.map_prs_to_releases(
prs, releases, matched_bys, branches, default_branches, time_to, dag,
release_match_setting_tag, prefixer, 1, (6366825,), dummy_mdb, pdb, None)
assert len(released_prs) == len(prs)
finally:
if "." in prlt.name:
for table in (PullRequestLabel, NodeCommit):
table = table.__table__
table.schema, table.name = table.name.split(".")
await dummy_mdb.disconnect()
@with_defer
async def test_map_releases_to_prs_early_merges(
branches, default_branches, mdb, pdb, rdb, release_match_setting_tag,
releases_to_prs_mapper, prefixer):
prs, releases, _, matched_bys, dag, _ = await releases_to_prs_mapper.map_releases_to_prs(
["src-d/go-git"],
branches, default_branches,
datetime(year=2018, month=1, day=7, tzinfo=timezone.utc),
datetime(year=2018, month=1, day=9, tzinfo=timezone.utc),
[], [], JIRAFilter.empty(),
release_match_setting_tag, LogicalRepositorySettings.empty(), None, None, None,
prefixer, 1, (6366825,), mdb, pdb, rdb, None)
assert len(releases) == 1
assert len(prs) == 61
assert (prs[PullRequest.merged_at.name] >
datetime(year=2017, month=9, day=4, tzinfo=timezone.utc)).all()
assert isinstance(dag, dict)
dag = dag["src-d/go-git"]
assert len(dag) == 3
assert len(dag[0]) == 1012
assert dag[0].dtype == np.dtype("S40")
assert len(dag[1]) == 1013
assert dag[1].dtype == np.uint32
assert len(dag[2]) == dag[1][-1]
assert dag[2].dtype == np.uint32
@with_defer
async def test_map_releases_to_prs_smoke(
branches, default_branches, mdb, pdb, rdb, cache, release_match_setting_tag_or_branch,
releases_to_prs_mapper, prefixer):
for _ in range(2):
prs, releases, new_settings, matched_bys, dag, _ = \
await releases_to_prs_mapper.map_releases_to_prs(
["src-d/go-git"],
branches, default_branches,
datetime(year=2019, month=7, day=31, tzinfo=timezone.utc),
datetime(year=2019, month=12, day=2, tzinfo=timezone.utc),
[], [], JIRAFilter.empty(),
release_match_setting_tag_or_branch, LogicalRepositorySettings.empty(),
None, None, None, prefixer, 1, (6366825,), mdb, pdb, rdb, cache)
await wait_deferred()
assert len(prs) == 7
assert len(dag["src-d/go-git"][0]) == 1508
assert (prs[PullRequest.merged_at.name] < pd.Timestamp(
"2019-07-31 00:00:00", tzinfo=timezone.utc)).all()
assert (prs[PullRequest.merged_at.name] > pd.Timestamp(
"2019-06-19 00:00:00", tzinfo=timezone.utc)).all()
assert len(releases) == 2
assert set(releases[Release.sha.name]) == {
"0d1a009cbb604db18be960db5f1525b99a55d727",
"6241d0e70427cb0db4ca00182717af88f638268c",
}
assert new_settings == ReleaseSettings({
"github.com/src-d/go-git": ReleaseMatchSetting(
branches="master", tags=".*", events=".*", match=ReleaseMatch.tag),
})
assert matched_bys == {"src-d/go-git": ReleaseMatch.tag}
@with_defer
async def test_map_releases_to_prs_no_truncate(
branches, default_branches, mdb, pdb, rdb, release_match_setting_tag,
releases_to_prs_mapper, prefixer):
prs, releases, _, matched_bys, _, _ = await releases_to_prs_mapper.map_releases_to_prs(
["src-d/go-git"],
branches, default_branches,
datetime(year=2018, month=7, day=31, tzinfo=timezone.utc),
datetime(year=2018, month=12, day=2, tzinfo=timezone.utc),
[], [], JIRAFilter.empty(),
release_match_setting_tag, LogicalRepositorySettings.empty(), None, None, None,
prefixer, 1, (6366825,), mdb, pdb, rdb, None, truncate=False)
assert len(prs) == 8
assert len(releases) == 5 + 7
assert releases[Release.published_at.name].is_monotonic_decreasing
assert releases.index.is_monotonic
assert "v4.13.1" in releases[Release.tag.name].values
@with_defer
async def test_map_releases_to_prs_empty(
branches, default_branches, mdb, pdb, rdb, cache, release_match_setting_tag,
releases_to_prs_mapper, prefixer):
prs, releases, _, matched_bys, _, _ = await releases_to_prs_mapper.map_releases_to_prs(
["src-d/go-git"],
branches, default_branches,
datetime(year=2019, month=7, day=1, tzinfo=timezone.utc),
datetime(year=2019, month=12, day=2, tzinfo=timezone.utc),
[], [], JIRAFilter.empty(), release_match_setting_tag, LogicalRepositorySettings.empty(),
None, None, None, prefixer, 1, (6366825,), mdb, pdb, rdb, cache)
await wait_deferred()
assert prs.empty
assert len(cache.mem) == 5
assert len(releases) == 2
assert set(releases[Release.sha.name]) == {
"0d1a009cbb604db18be960db5f1525b99a55d727",
"6241d0e70427cb0db4ca00182717af88f638268c",
}
assert matched_bys == {"src-d/go-git": ReleaseMatch.tag}
prs, releases, _, matched_bys, _, _ = await releases_to_prs_mapper.map_releases_to_prs(
["src-d/go-git"],
branches, default_branches,
datetime(year=2019, month=7, day=1, tzinfo=timezone.utc),
datetime(year=2019, month=12, day=2, tzinfo=timezone.utc),
[], [], JIRAFilter.empty(), ReleaseSettings({
"github.com/src-d/go-git": ReleaseMatchSetting(
branches="master", tags=".*", events=".*", match=ReleaseMatch.branch),
}), LogicalRepositorySettings.empty(), None, None, None, prefixer,
1, (6366825,), mdb, pdb, rdb, cache)
assert prs.empty
assert len(cache.mem) == 11
assert len(releases) == 19
assert matched_bys == {"src-d/go-git": ReleaseMatch.branch}
@with_defer
async def test_map_releases_to_prs_blacklist(
branches, default_branches, mdb, pdb, rdb, cache, release_match_setting_tag,
releases_to_prs_mapper, prefixer):
prs, releases, _, matched_bys, _, _ = await releases_to_prs_mapper.map_releases_to_prs(
["src-d/go-git"],
branches, default_branches,
datetime(year=2019, month=7, day=31, tzinfo=timezone.utc),
datetime(year=2019, month=12, day=2, tzinfo=timezone.utc),
[], [], JIRAFilter.empty(), release_match_setting_tag, LogicalRepositorySettings.empty(),
None, None, None, prefixer, 1, (6366825,), mdb, pdb, rdb, cache,
pr_blacklist=PullRequest.node_id.notin_([
163378, 163380,
163395, 163375,
163377, 163397,
163396,
]))
assert prs.empty
assert len(releases) == 2
assert set(releases[Release.sha.name]) == {
"<KEY>",
"6241d0e70427cb0db4ca00182717af88f638268c",
}
assert matched_bys == {"src-d/go-git": ReleaseMatch.tag}
@pytest.mark.parametrize("authors, mergers, n", [(["mcuadros"], [], 2),
([], ["mcuadros"], 7),
(["mcuadros"], ["mcuadros"], 7)])
@with_defer
async def test_map_releases_to_prs_authors_mergers(
branches, default_branches, mdb, pdb, rdb, cache, prefixer,
release_match_setting_tag, authors, mergers, n, releases_to_prs_mapper):
prs, releases, new_settings, matched_bys, _, _ = \
await releases_to_prs_mapper.map_releases_to_prs(
["src-d/go-git"],
branches, default_branches,
datetime(year=2019, month=7, day=31, tzinfo=timezone.utc),
datetime(year=2019, month=12, day=2, tzinfo=timezone.utc),
authors, mergers, JIRAFilter.empty(), release_match_setting_tag,
LogicalRepositorySettings.empty(),
None, None, None, prefixer, 1, (6366825,), mdb, pdb, rdb, cache)
assert len(prs) == n
assert len(releases) == 2
assert set(releases[Release.sha.name]) == {
"0d1a009cbb604db18be960db5f1525b99a55d727",
"6241d0e70427cb0db4ca00182717af88f638268c",
}
assert new_settings == release_match_setting_tag
assert matched_bys == {"src-d/go-git": ReleaseMatch.tag}
@with_defer
async def test_map_releases_to_prs_hard(
branches, default_branches, mdb, pdb, rdb, cache, release_match_setting_tag,
releases_to_prs_mapper, prefixer):
prs, releases, _, matched_bys, _, _ = await releases_to_prs_mapper.map_releases_to_prs(
["src-d/go-git"],
branches, default_branches,
datetime(year=2019, month=6, day=18, tzinfo=timezone.utc),
datetime(year=2019, month=6, day=30, tzinfo=timezone.utc),
[], [], JIRAFilter.empty(),
release_match_setting_tag, LogicalRepositorySettings.empty(), None, None, None,
prefixer, 1, (6366825,), mdb, pdb, rdb, cache)
assert len(prs) == 24
assert len(releases) == 1
assert set(releases[Release.sha.name]) == {
"f9a30199e7083bdda8adad3a4fa2ec42d25c1fdb",
}
assert matched_bys == {"src-d/go-git": ReleaseMatch.tag}
@with_defer
async def test_map_releases_to_prs_future(
branches, default_branches, mdb, pdb, rdb, release_match_setting_tag,
releases_to_prs_mapper, prefixer):
prs, releases, _, matched_bys, _, _ = await releases_to_prs_mapper.map_releases_to_prs(
["src-d/go-git"],
branches, default_branches,
datetime(year=2018, month=7, day=31, tzinfo=timezone.utc),
datetime(year=2030, month=12, day=2, tzinfo=timezone.utc),
[], [], JIRAFilter.empty(),
release_match_setting_tag, LogicalRepositorySettings.empty(), None, None, None,
prefixer, 1, (6366825,), mdb, pdb, rdb, None, truncate=False)
assert len(prs) == 8
assert releases is not None
assert len(releases) == 12
@with_defer
async def test_map_releases_to_prs_precomputed_observed(
branches, default_branches, mdb, pdb, rdb, release_match_setting_tag,
releases_to_prs_mapper, prefixer):
args = [
["src-d/go-git"],
branches, default_branches,
datetime(year=2018, month=7, day=31, tzinfo=timezone.utc),
datetime(year=2030, month=12, day=2, tzinfo=timezone.utc),
[], [], JIRAFilter.empty(),
release_match_setting_tag, LogicalRepositorySettings.empty(), None, None, None,
prefixer, 1, (6366825,), mdb, pdb, rdb, None,
]
prs1, _, _, _, _, precomputed_observed = \
await releases_to_prs_mapper.map_releases_to_prs(*args, truncate=False)
prs2 = await releases_to_prs_mapper.map_releases_to_prs(
*args, truncate=False, precomputed_observed=precomputed_observed)
assert_frame_equal(prs1, prs2)
@pytest.mark.flaky(reruns=2)
@with_defer
async def test_map_prs_to_releases_smoke_metrics(
branches, default_branches, dag, mdb, pdb, rdb, release_loader, worker_id,
prefixer):
try:
await mdb.fetch_val(select([func.count(PullRequestLabel.node_id)]))
except OperationalError as e:
# this happens sometimes, we have to reset the DB and proceed to the second lap
await mdb.disconnect()
_metadata_db(worker_id, True)
raise e from None
time_from = datetime(year=2015, month=10, day=13, tzinfo=timezone.utc)
time_to = datetime(year=2020, month=1, day=24, tzinfo=timezone.utc)
filters = [
sql.and_(PullRequest.merged_at > time_from, PullRequest.created_at < time_to),
PullRequest.repository_full_name.in_(["src-d/go-git"]),
PullRequest.user_login.in_(["mcuadros", "vmarkovtsev"]),
]
prs = await read_sql_query(
select([PullRequest]).where(sql.and_(*filters)),
mdb, PullRequest, index=[PullRequest.node_id.name, PullRequest.repository_full_name.name])
prs["dead"] = False
time_to = datetime(year=2020, month=4, day=1, tzinfo=timezone.utc)
time_from = time_to - timedelta(days=5 * 365)
release_settings = generate_repo_settings(prs)
releases, matched_bys = await release_loader.load_releases(
["src-d/go-git"], branches, default_branches, time_from, time_to,
release_settings, LogicalRepositorySettings.empty(),
prefixer, 1, (6366825,), mdb, pdb, rdb, None)
released_prs, _, _ = await PullRequestToReleaseMapper.map_prs_to_releases(
prs, releases, matched_bys, branches, default_branches, time_to, dag, release_settings,
prefixer, 1, (6366825,), mdb, pdb, None)
assert set(released_prs[Release.url.name].unique()) == {
"https://github.com/src-d/go-git/releases/tag/v4.0.0-rc10",
"https://github.com/src-d/go-git/releases/tag/v4.0.0-rc11",
"https://github.com/src-d/go-git/releases/tag/v4.0.0-rc13",
"https://github.com/src-d/go-git/releases/tag/v4.0.0-rc12",
"https://github.com/src-d/go-git/releases/tag/v4.0.0-rc14",
"https://github.com/src-d/go-git/releases/tag/v4.0.0-rc15",
"https://github.com/src-d/go-git/releases/tag/v4.0.0",
"https://github.com/src-d/go-git/releases/tag/v4.2.0",
"https://github.com/src-d/go-git/releases/tag/v4.1.1",
"https://github.com/src-d/go-git/releases/tag/v4.2.1",
"https://github.com/src-d/go-git/releases/tag/v4.5.0",
"https://github.com/src-d/go-git/releases/tag/v4.11.0",
"https://github.com/src-d/go-git/releases/tag/v4.7.1",
"https://github.com/src-d/go-git/releases/tag/v4.8.0",
"https://github.com/src-d/go-git/releases/tag/v4.10.0",
"https://github.com/src-d/go-git/releases/tag/v4.12.0",
"https://github.com/src-d/go-git/releases/tag/v4.13.0",
}
def check_branch_releases(releases: pd.DataFrame, n: int, time_from: datetime, time_to: datetime):
assert len(releases) == n
assert "mcuadros" in set(releases[Release.author.name])
assert len(releases[Release.commit_id.name].unique()) == n
assert releases[Release.node_id.name].all()
assert all(len(n) == 40 for n in releases[Release.name.name])
assert releases[Release.published_at.name].between(time_from, time_to).all()
assert (releases[Release.repository_full_name.name] == "src-d/go-git").all()
assert all(len(n) == 40 for n in releases[Release.sha.name])
assert len(releases[Release.sha.name].unique()) == n
assert (~releases[Release.tag.name].values.astype(bool)).all()
assert releases[Release.url.name].str.startswith("http").all()
@pytest.mark.parametrize("branches_", ["{{default}}", "master", "m.*"])
@with_defer
async def test_load_releases_branches(branches, default_branches, mdb, pdb, rdb, cache, branches_,
release_loader, prefixer):
time_from = datetime(year=2017, month=10, day=13, tzinfo=timezone.utc)
time_to = datetime(year=2020, month=1, day=24, tzinfo=timezone.utc)
releases, matched_bys = await release_loader.load_releases(
["src-d/go-git"],
branches, default_branches,
time_from,
time_to,
ReleaseSettings({"github.com/src-d/go-git": ReleaseMatchSetting(
branches=branches_, tags="", events="", match=ReleaseMatch.branch)}),
LogicalRepositorySettings.empty(),
prefixer,
1,
(6366825,),
mdb,
pdb,
rdb,
cache,
)
assert matched_bys == {"src-d/go-git": ReleaseMatch.branch}
check_branch_releases(releases, 240, time_from, time_to)
@with_defer
async def test_load_releases_branches_empty(branches, default_branches, mdb, pdb, rdb, cache,
release_loader, prefixer):
time_from = datetime(year=2017, month=10, day=13, tzinfo=timezone.utc)
time_to = datetime(year=2020, month=1, day=24, tzinfo=timezone.utc)
releases, matched_bys = await release_loader.load_releases(
["src-d/go-git"],
branches, default_branches,
time_from,
time_to,
ReleaseSettings({"github.com/src-d/go-git": ReleaseMatchSetting(
branches="unknown", tags="", events="", match=ReleaseMatch.branch)}),
LogicalRepositorySettings.empty(),
prefixer,
1,
(6366825,),
mdb,
pdb,
rdb,
cache,
)
assert len(releases) == 0
assert matched_bys == {"src-d/go-git": ReleaseMatch.branch}
@pytest.mark.parametrize("time_from, n, pretag", [
(datetime(year=2017, month=10, day=4, tzinfo=timezone.utc), 45, False),
(datetime(year=2017, month=9, day=4, tzinfo=timezone.utc), 1, False),
(datetime(year=2017, month=12, day=8, tzinfo=timezone.utc), 0, False),
(datetime(year=2017, month=9, day=4, tzinfo=timezone.utc), 1, True),
])
@with_defer
async def test_load_releases_tag_or_branch_dates(
branches, default_branches, release_match_setting_tag, mdb, pdb, rdb, cache,
time_from, n, pretag, release_loader, with_preloading_enabled, prefixer):
time_to = datetime(year=2017, month=12, day=8, tzinfo=timezone.utc)
if pretag:
await release_loader.load_releases(
["src-d/go-git"],
branches, default_branches,
time_from,
time_to,
release_match_setting_tag,
LogicalRepositorySettings.empty(),
prefixer,
1,
(6366825,),
mdb,
pdb,
rdb,
cache,
)
await wait_deferred()
if with_preloading_enabled:
await pdb.cache.refresh()
release_settings = ReleaseSettings({"github.com/src-d/go-git": ReleaseMatchSetting(
branches="master", tags=".*", events=".*", match=ReleaseMatch.tag_or_branch)})
releases, matched_bys = await release_loader.load_releases(
["src-d/go-git"],
branches, default_branches,
time_from,
time_to,
release_settings,
LogicalRepositorySettings.empty(),
prefixer,
1,
(6366825,),
mdb,
pdb,
rdb,
cache,
)
await wait_deferred()
if with_preloading_enabled:
await pdb.cache.refresh()
match_groups, repos_count = group_repos_by_release_match(
["src-d/go-git"], default_branches, release_settings)
spans = (await release_loader.fetch_precomputed_release_match_spans(
match_groups, 1, pdb))["src-d/go-git"]
assert ReleaseMatch.tag in spans
if n > 1:
assert ReleaseMatch.branch in spans
check_branch_releases(releases, n, time_from, time_to)
assert matched_bys == {"src-d/go-git": ReleaseMatch.branch}
else:
assert len(releases) == n
if n > 0:
if pretag:
assert ReleaseMatch.branch not in spans
assert matched_bys == {"src-d/go-git": ReleaseMatch.tag}
else:
assert ReleaseMatch.branch in spans
assert matched_bys == {"src-d/go-git": ReleaseMatch.branch}
@with_defer
async def test_load_releases_tag_or_branch_initial(branches, default_branches, mdb, pdb, rdb,
release_loader, prefixer):
time_from = datetime(year=2015, month=1, day=1, tzinfo=timezone.utc)
time_to = datetime(year=2015, month=10, day=22, tzinfo=timezone.utc)
releases, matched_bys = await release_loader.load_releases(
["src-d/go-git"],
branches, default_branches,
time_from,
time_to,
ReleaseSettings({"github.com/src-d/go-git": ReleaseMatchSetting(
branches="master", tags="", events=".*", match=ReleaseMatch.branch)}),
LogicalRepositorySettings.empty(),
prefixer,
1,
(6366825,),
mdb,
pdb,
rdb,
None,
)
assert matched_bys == {"src-d/go-git": ReleaseMatch.branch}
check_branch_releases(releases, 17, time_from, time_to)
@with_defer
async def test_load_releases_tag_logical(
branches, default_branches, mdb, pdb, rdb, release_loader, prefixer,
logical_settings, release_match_setting_tag_logical):
time_from = datetime(year=2015, month=1, day=1, tzinfo=timezone.utc)
time_to = datetime(year=2020, month=10, day=22, tzinfo=timezone.utc)
releases, matched_bys = await release_loader.load_releases(
["src-d/go-git/alpha", "src-d/go-git/beta"],
branches, default_branches,
time_from,
time_to,
release_match_setting_tag_logical,
logical_settings,
prefixer,
1,
(6366825,),
mdb,
pdb,
rdb,
None,
)
await wait_deferred()
assert matched_bys == {
"src-d/go-git/alpha": ReleaseMatch.tag,
"src-d/go-git/beta": ReleaseMatch.tag,
}
assert (releases[Release.repository_full_name.name] == "src-d/go-git/alpha").sum() == 53
assert (releases[Release.repository_full_name.name] == "src-d/go-git/beta").sum() == 37
releases, matched_bys = await release_loader.load_releases(
["src-d/go-git", "src-d/go-git/alpha", "src-d/go-git/beta"],
branches, default_branches,
time_from,
time_to,
release_match_setting_tag_logical,
logical_settings,
prefixer,
1,
(6366825,),
mdb,
pdb,
rdb,
None,
)
assert matched_bys == {
"src-d/go-git": ReleaseMatch.tag,
"src-d/go-git/alpha": ReleaseMatch.tag,
"src-d/go-git/beta": ReleaseMatch.tag,
}
assert (releases[Release.repository_full_name.name] == "src-d/go-git").sum() == 53
assert (releases[Release.repository_full_name.name] == "src-d/go-git/alpha").sum() == 53
assert (releases[Release.repository_full_name.name] == "src-d/go-git/beta").sum() == 37
releases, matched_bys = await release_loader.load_releases(
["src-d/go-git", "src-d/go-git/beta"],
branches, default_branches,
time_from,
time_to,
release_match_setting_tag_logical,
logical_settings,
prefixer,
1,
(6366825,),
mdb,
pdb,
rdb,
None,
)
assert matched_bys == {
"src-d/go-git": ReleaseMatch.tag,
"src-d/go-git/beta": ReleaseMatch.tag,
}
assert (releases[Release.repository_full_name.name] == "src-d/go-git").sum() == 53
assert (releases[Release.repository_full_name.name] == "src-d/go-git/beta").sum() == 37
@with_defer
async def test_map_releases_to_prs_branches(
branches, default_branches, mdb, pdb, rdb, releases_to_prs_mapper, prefixer):
time_from = datetime(year=2015, month=4, day=1, tzinfo=timezone.utc)
time_to = datetime(year=2015, month=5, day=1, tzinfo=timezone.utc)
release_settings = ReleaseSettings({"github.com/src-d/go-git": ReleaseMatchSetting(
branches="master", tags="", events=".*", match=ReleaseMatch.branch)})
prs, releases, new_settings, matched_bys, _, _ = \
await releases_to_prs_mapper.map_releases_to_prs(
["src-d/go-git"],
branches, default_branches,
time_from, time_to,
[], [], JIRAFilter.empty(),
release_settings,
LogicalRepositorySettings.empty(),
None, None, None,
prefixer, 1, (6366825,), mdb, pdb, rdb, None)
assert prs.empty
assert len(releases) == 1
assert releases[Release.sha.name][0] == "5d7303c49ac984a9fec60523f2d5297682e16646"
assert new_settings == release_settings
assert matched_bys == {"src-d/go-git": ReleaseMatch.branch}
@with_defer
async def test_map_releases_to_prs_updated_min_max(
branches, default_branches, release_match_setting_tag, mdb, pdb, rdb,
releases_to_prs_mapper, prefixer):
prs, releases, _, matched_bys, _, _ = await releases_to_prs_mapper.map_releases_to_prs(
["src-d/go-git"],
branches, default_branches,
datetime(year=2018, month=7, day=31, tzinfo=timezone.utc),
datetime(year=2030, month=12, day=2, tzinfo=timezone.utc),
[], [], JIRAFilter.empty(),
release_match_setting_tag, LogicalRepositorySettings.empty(),
datetime(2018, 7, 20, tzinfo=timezone.utc), datetime(2019, 1, 1, tzinfo=timezone.utc),
None, prefixer, 1, (6366825,), mdb, pdb, rdb, None, truncate=False)
assert len(prs) == 5
assert releases is not None
assert len(releases) == 12
@pytest.mark.parametrize("repos", [["src-d/gitbase"], []])
@with_defer
async def test_load_releases_empty(
branches, default_branches, mdb, pdb, rdb, repos, release_loader, prefixer):
releases, matched_bys = await release_loader.load_releases(
repos,
branches, default_branches,
datetime(year=2020, month=6, day=30, tzinfo=timezone.utc),
datetime(year=2020, month=7, day=30, tzinfo=timezone.utc),
ReleaseSettings({"github.com/src-d/gitbase": ReleaseMatchSetting(
branches=".*", tags=".*", events=".*", match=ReleaseMatch.branch)}),
LogicalRepositorySettings.empty(),
prefixer,
1,
(6366825,),
mdb,
pdb,
rdb,
None,
index=Release.node_id.name)
assert releases.empty
if repos:
assert matched_bys == {"src-d/gitbase": ReleaseMatch.branch}
time_from = datetime(year=2017, month=3, day=4, tzinfo=timezone.utc)
time_to = datetime(year=2017, month=12, day=8, tzinfo=timezone.utc)
releases, matched_bys = await release_loader.load_releases(
["src-d/go-git"],
branches, default_branches,
time_from,
time_to,
ReleaseSettings({"github.com/src-d/go-git": ReleaseMatchSetting(
branches="master", tags="", events=".*", match=ReleaseMatch.tag)}),
LogicalRepositorySettings.empty(),
prefixer,
1,
(6366825,),
mdb,
pdb,
rdb,
None,
)
assert releases.empty
assert matched_bys == {"src-d/go-git": ReleaseMatch.tag}
releases, matched_bys = await release_loader.load_releases(
["src-d/go-git"],
branches, default_branches,
time_from,
time_to,
ReleaseSettings({"github.com/src-d/go-git": ReleaseMatchSetting(
branches="", tags=".*", events=".*", match=ReleaseMatch.branch)}),
LogicalRepositorySettings.empty(),
prefixer,
1,
(6366825,),
mdb,
pdb,
rdb,
None,
)
assert releases.empty
assert matched_bys == {"src-d/go-git": ReleaseMatch.branch}
@with_defer
async def test_load_releases_events_settings(
branches, default_branches, mdb, pdb, rdb, release_loader, prefixer):
await rdb.execute(insert(ReleaseNotification).values(ReleaseNotification(
account_id=1,
repository_node_id=40550,
commit_hash_prefix="8d20cc5",
name="Pushed!",
author_node_id=40020,
url="www",
published_at=datetime(2020, 1, 1, tzinfo=timezone.utc),
).create_defaults().explode(with_primary_keys=True)))
releases, _ = await release_loader.load_releases(
["src-d/go-git"],
branches, default_branches,
datetime(year=2019, month=1, day=30, tzinfo=timezone.utc),
datetime(year=2020, month=7, day=30, tzinfo=timezone.utc),
ReleaseSettings({"github.com/src-d/go-git": ReleaseMatchSetting(
branches=".*", tags=".*", events=".*", match=ReleaseMatch.tag)}),
LogicalRepositorySettings.empty(),
prefixer,
1,
(6366825,),
mdb,
pdb,
rdb,
None,
index=Release.node_id.name)
await wait_deferred()
assert len(releases) == 7
assert (releases[matched_by_column] == ReleaseMatch.tag).all()
releases, matched_bys = await release_loader.load_releases(
["src-d/go-git"],
branches, default_branches,
datetime(year=2019, month=1, day=30, tzinfo=timezone.utc),
datetime(year=2020, month=7, day=30, tzinfo=timezone.utc),
ReleaseSettings({"github.com/src-d/go-git": ReleaseMatchSetting(
branches=".*", tags=".*", events=".*", match=ReleaseMatch.event)}),
LogicalRepositorySettings.empty(),
prefixer,
1,
(6366825,),
mdb,
pdb,
rdb,
None,
index=Release.node_id.name)
await wait_deferred()
assert matched_bys == {"src-d/go-git": ReleaseMatch.event}
assert (releases[matched_by_column] == ReleaseMatch.event).all()
assert len(releases) == 1
assert releases.index[0] == 2756775
assert releases.iloc[0].to_dict() == {
Release.repository_full_name.name: "src-d/go-git",
Release.repository_node_id.name: 40550,
Release.author.name: "vmarkovtsev",
Release.author_node_id.name: 40020,
Release.name.name: "Pushed!",
Release.published_at.name: pd.Timestamp("2020-01-01 00:00:00", tzinfo=timezone.utc),
Release.tag.name: None,
Release.url.name: "www",
Release.sha.name: "8d20cc5916edf7cfa6a9c5ed069f0640dc823c12",
Release.commit_id.name: 2756775,
matched_by_column: ReleaseMatch.event,
}
rows = await rdb.fetch_all(select([ReleaseNotification]))
assert len(rows) == 1
values = dict(rows[0])
assert \
values[ReleaseNotification.updated_at.name] > values[ReleaseNotification.created_at.name]
del values[ReleaseNotification.updated_at.name]
del values[ReleaseNotification.created_at.name]
if rdb.url.dialect == "sqlite":
tzinfo = None
else:
tzinfo = timezone.utc
assert values == {
"account_id": 1,
"repository_node_id": 40550,
"commit_hash_prefix": "8d20cc5",
"resolved_commit_hash": "8d20cc5916edf7cfa6a9c5ed069f0640dc823c12",
"resolved_commit_node_id": 2756775, # noqa
"name": "Pushed!",
"author_node_id": 40020,
"url": "www",
"published_at": datetime(2020, 1, 1, tzinfo=tzinfo),
"cloned": False,
}
@with_defer
async def test_load_releases_events_unresolved(
branches, default_branches, mdb, pdb, rdb, release_loader, prefixer):
await rdb.execute(insert(ReleaseNotification).values(ReleaseNotification(
account_id=1,
repository_node_id=40550,
commit_hash_prefix="whatever",
name="Pushed!",
author_node_id=40020,
url="www",
published_at=datetime(2020, 1, 1, tzinfo=timezone.utc),
).create_defaults().explode(with_primary_keys=True)))
releases, matched_bys = await release_loader.load_releases(
["src-d/go-git"],
branches, default_branches,
datetime(year=2019, month=1, day=30, tzinfo=timezone.utc),
datetime(year=2020, month=7, day=30, tzinfo=timezone.utc),
ReleaseSettings({"github.com/src-d/go-git": ReleaseMatchSetting(
branches=".*", tags=".*", events=".*", match=ReleaseMatch.event)}),
LogicalRepositorySettings.empty(),
prefixer,
1,
(6366825,),
mdb,
pdb,
rdb,
None,
index=Release.node_id.name)
assert releases.empty
assert matched_bys == {"src-d/go-git": ReleaseMatch.event}
@pytest.mark.parametrize("prune", [False, True])
@with_defer
async def test__fetch_repository_commits_smoke(mdb, pdb, prune):
dags = await fetch_repository_commits(
{"src-d/go-git": _empty_dag()},
pd.DataFrame([
("d2a38b4a5965d529566566640519d03d2bd10f6c",
2757677,
525,
"src-d/go-git"),
("31eae7b619d166c366bf5df4991f04ba8cebea0a",
2755667,
611,
"src-d/go-git")],
columns=["1", "2", "3", "4"],
),
("1", "2", "3", "4"),
prune, 1, (6366825,), mdb, pdb, None)
assert isinstance(dags, dict)
assert len(dags) == 1
hashes, vertexes, edges = dags["src-d/go-git"]
ground_truth = {
"31eae7b619d166c366bf5df4991f04ba8cebea0a": ["b977a025ca21e3b5ca123d8093bd7917694f6da7",
"d2a38b4a5965d529566566640519d03d2bd10f6c"],
"b977a025ca21e3b5ca123d8093bd7917694f6da7": ["35b585759cbf29f8ec428ef89da20705d59f99ec"],
"d2a38b4a5965d529566566640519d03d2bd10f6c": ["35b585759cbf29f8ec428ef89da20705d59f99ec"],
"35b585759cbf29f8ec428ef89da20705d59f99ec": ["<KEY>"],
"<KEY>": ["<KEY>"],
"<KEY>": ["<KEY>"],
"<KEY>": ["5fddbeb678bd2c36c5e5c891ab8f2b143ced5baf"],
"5fddbeb678bd2c36c5e5c891ab8f2b143ced5baf": ["5d7303c49ac984a9fec60523f2d5297682e16646"],
"5d7303c49ac984a9fec60523f2d5297682e16646": [],
}
for k, v in ground_truth.items():
vertex = np.where(hashes == k.encode())[0][0]
assert hashes[edges[vertexes[vertex]:vertexes[vertex + 1]]].astype("U40").tolist() == v
assert len(hashes) == 9
await wait_deferred()
dags2 = await fetch_repository_commits(
dags,
pd.DataFrame([
("d2a38b4a5965d529566566640519d03d2bd10f6c",
2757677,
525,
"src-d/go-git"),
("31eae7b619d166c366bf5df4991f04ba8cebea0a",
2755667,
611,
"src-d/go-git")],
columns=["1", "2", "3", "4"],
),
("1", "2", "3", "4"),
prune, 1, (6366825,), Database("sqlite://"), pdb, None)
assert pickle.dumps(dags2) == pickle.dumps(dags)
with pytest.raises(Exception):
await fetch_repository_commits(
dags,
pd.DataFrame([
("1353ccd6944ab41082099b79979ded3223db98ec",
2755667, # noqa
525,
"src-d/go-git"),
("31eae7b619d166c366bf5df4991f04ba8cebea0a",
2755667, # noqa
611,
"src-d/go-git")],
columns=["1", "2", "3", "4"],
),
("1", "2", "3", "4"),
prune, 1, (6366825,), Database("sqlite://"), pdb, None)
@pytest.mark.parametrize("prune", [False, True])
@with_defer
async def test__fetch_repository_commits_initial_commit(mdb, pdb, prune):
dags = await fetch_repository_commits(
{"src-d/go-git": _empty_dag()},
pd.DataFrame([
("5d7303c49ac984a9fec60523f2d5297682e16646",
2756216,
525,
"src-d/go-git")],
columns=["1", "2", "3", "4"],
),
("1", "2", "3", "4"),
prune, 1, (6366825,), mdb, pdb, None)
hashes, vertexes, edges = dags["src-d/go-git"]
assert hashes == np.array(["5d7303c49ac984a9fec60523f2d5297682e16646"], dtype="S40")
assert (vertexes == np.array([0, 0], dtype=np.uint32)).all()
assert (edges == np.array([], dtype=np.uint32)).all()
@with_defer
async def test__fetch_repository_commits_cache(mdb, pdb, cache):
dags1 = await fetch_repository_commits(
{"src-d/go-git": _empty_dag()},
pd.DataFrame([
("d2a38b4a5965d529566566640519d03d2bd10f6c",
2757677,
525,
"src-d/go-git"),
("31eae7b619d166c366bf5df4991f04ba8cebea0a",
2755667,
611,
"src-d/go-git")],
columns=["1", "2", "3", "4"],
),
("1", "2", "3", "4"),
False, 1, (6366825,), mdb, pdb, cache)
await wait_deferred()
dags2 = await fetch_repository_commits(
{"src-d/go-git": _empty_dag()},
pd.DataFrame([
("d2a38b4a5965d529566566640519d03d2bd10f6c",
2757677,
525,
"src-d/go-git"),
("31eae7b619d166c366bf5df4991f04ba8cebea0a",
2755667,
611,
"src-d/go-git")],
columns=["1", "2", "3", "4"],
),
("1", "2", "3", "4"),
False, 1, (6366825,), None, None, cache)
assert pickle.dumps(dags1) == pickle.dumps(dags2)
fake_pdb = Database("sqlite://")
class FakeMetrics:
def get(self):
return defaultdict(int)
fake_pdb.metrics = {"hits": FakeMetrics(), "misses": FakeMetrics()}
with pytest.raises(Exception):
await fetch_repository_commits(
{"src-d/go-git": _empty_dag()},
pd.DataFrame([
("d2a38b4a5965d529566566640519d03d2bd10f6c",
2757677, # noqa
525,
"src-d/go-git"),
("31eae7b619d166c366bf5df4991f04ba8cebea0a",
2755667, # noqa
611,
"src-d/go-git")],
columns=["1", "2", "3", "4"],
),
("1", "2", "3", "4"),
True, 1, (6366825,), None, fake_pdb, cache)
@with_defer
async def test__fetch_repository_commits_many(mdb, pdb):
dags = await fetch_repository_commits(
{"src-d/go-git": _empty_dag()},
pd.DataFrame([
("d2a38b4a5965d529566566640519d03d2bd10f6c",
2757677,
525,
"src-d/go-git"),
("31eae7b619d166c366bf5df4991f04ba8cebea0a",
2755667,
611,
"src-d/go-git")] * 50,
columns=["1", "2", "3", "4"],
),
("1", "2", "3", "4"),
False, 1, (6366825,), mdb, pdb, None)
assert len(dags["src-d/go-git"][0]) == 9
@with_defer
async def test__fetch_repository_commits_full(mdb, pdb, dag, cache, branch_miner, prefixer):
branches, _ = await branch_miner.extract_branches(dag, prefixer, (6366825,), mdb, None)
commit_ids = branches[Branch.commit_id.name].values
commit_dates = await mdb.fetch_all(select([NodeCommit.id, NodeCommit.committed_date])
.where(NodeCommit.id.in_(commit_ids)))
commit_dates = {r[0]: r[1] for r in commit_dates}
if mdb.url.dialect == "sqlite":
commit_dates = {k: v.replace(tzinfo=timezone.utc) for k, v in commit_dates.items()}
now = datetime.now(timezone.utc)
branches[Branch.commit_date] = [commit_dates.get(commit_id, now) for commit_id in commit_ids]
cols = (Branch.commit_sha.name, Branch.commit_id.name, Branch.commit_date,
Branch.repository_full_name.name)
commits = await fetch_repository_commits(
dag, branches, cols, False, 1, (6366825,), mdb, pdb, cache)
await wait_deferred()
assert len(commits) == 1
assert len(commits["src-d/go-git"][0]) == 1919
branches = branches[branches[Branch.branch_name.name] == "master"]
commits = await fetch_repository_commits(
commits, branches, cols, False, 1, (6366825,), mdb, pdb, cache)
await wait_deferred()
assert len(commits) == 1
assert len(commits["src-d/go-git"][0]) == 1919 # with force-pushed commits
commits = await fetch_repository_commits(
commits, branches, cols, True, 1, (6366825,), mdb, pdb, cache)
await wait_deferred()
assert len(commits) == 1
assert len(commits["src-d/go-git"][0]) == 1538 # without force-pushed commits
@with_defer
async def test__find_dead_merged_prs_smoke(mdb):
prs = await read_sql_query(
select([PullRequest]).where(PullRequest.merged_at.isnot(None)),
mdb, PullRequest, index=[PullRequest.node_id.name, PullRequest.repository_full_name.name])
prs["dead"] = False
prs.loc[prs[PullRequest.number.name].isin(force_push_dropped_go_git_pr_numbers), "dead"] = True
dead_prs = await PullRequestToReleaseMapper._find_dead_merged_prs(prs)
assert len(dead_prs) == len(force_push_dropped_go_git_pr_numbers)
assert dead_prs[Release.published_at.name].isnull().all()
assert (dead_prs[matched_by_column] == ReleaseMatch.force_push_drop).all()
dead_prs = await mdb.fetch_all(
select([PullRequest.number])
.where(PullRequest.node_id.in_(dead_prs.index.get_level_values(0).values)))
assert {pr[0] for pr in dead_prs} == set(force_push_dropped_go_git_pr_numbers)
@with_defer
async def test__fetch_repository_first_commit_dates_pdb_cache(
mdb, pdb, cache, releases_to_prs_mapper):
fcd1 = await releases_to_prs_mapper._fetch_repository_first_commit_dates(
["src-d/go-git"], 1, (6366825,), mdb, pdb, cache)
await wait_deferred()
fcd2 = await releases_to_prs_mapper._fetch_repository_first_commit_dates(
["src-d/go-git"], 1, (6366825,), Database("sqlite://"), pdb, None)
fcd3 = await releases_to_prs_mapper._fetch_repository_first_commit_dates(
["src-d/go-git"], 1, (6366825,), Database("sqlite://"),
Database("sqlite://"), cache)
assert len(fcd1) == len(fcd2) == len(fcd3) == 1
assert fcd1["src-d/go-git"] == fcd2["src-d/go-git"] == fcd3["src-d/go-git"]
assert fcd1["src-d/go-git"].tzinfo == timezone.utc
def test_extract_subdag_smoke():
hashes = np.array(["308a9f90707fb9d12cbcd28da1bc33da436386fe",
"33cafc14532228edca160e46af10341a8a632e3e",
"61a719e0ff7522cc0d129acb3b922c94a8a5dbca",
"a444ccadf5fddad6ad432c13a239c74636c7f94f"],
dtype="S40")
vertexes = np.array([0, 1, 2, 3, 3], dtype=np.uint32)
edges = np.array([3, 0, 0], dtype=np.uint32)
heads = np.array(["61a719e0ff7522cc0d129acb3b922c94a8a5dbca"], dtype="S40")
new_hashes, new_vertexes, new_edges = extract_subdag(hashes, vertexes, edges, heads)
assert (new_hashes == np.array(["308a9f90707fb9d12cbcd28da1bc33da436386fe",
"61a719e0ff7522cc0d129acb3b922c94a8a5dbca",
"a444ccadf5fddad6ad432c13a239c74636c7f94f"],
dtype="S40")).all()
assert (new_vertexes == np.array([0, 1, 2, 2], dtype=np.uint32)).all()
assert (new_edges == np.array([2, 0], dtype=np.uint32)).all()
def test_extract_subdag_empty():
hashes = np.array([], dtype="S40")
vertexes = np.array([0], dtype=np.uint32)
edges = np.array([], dtype=np.uint32)
heads = np.array(["61a719e0ff7522cc0d129acb3b922c94a8a5dbca"], dtype="S40")
new_hashes, new_vertexes, new_edges = extract_subdag(hashes, vertexes, edges, heads)
assert len(new_hashes) == 0
assert (new_vertexes == vertexes).all()
assert len(new_edges) == 0
def test_join_dags_smoke():
hashes = np.array(["308a9f90707fb9d12cbcd28da1bc33da436386fe",
"33cafc14532228edca160e46af10341a8a632e3e",
"a444ccadf5fddad6ad432c13a239c74636c7f94f"],
dtype="S40")
vertexes = np.array([0, 1, 2, 2], dtype=np.uint32)
edges = np.array([2, 0], dtype=np.uint32)
new_hashes, new_vertexes, new_edges = join_dags(
hashes, vertexes, edges, [("61a719e0ff7522cc0d129acb3b922c94a8a5dbca",
"308a9f90707fb9d12cbcd28da1bc33da436386fe",
0),
("308a9f90707fb9d12cbcd28da1bc33da436386fe",
"a444ccadf5fddad6ad432c13a239c74636c7f94f",
0),
("8d27ef15cc9b334667d8adc9ce538222c5ac3607",
"33cafc14532228edca160e46af10341a8a632e3e",
1),
("8d27ef15cc9b334667d8adc9ce538222c5ac3607",
"308a9f90707fb9d12cbcd28da1bc33da436386fe",
0)])
assert (new_hashes == np.array(["308a9f90707fb9d12cbcd28da1bc33da436386fe",
"33cafc14532228edca160e46af10341a8a632e3e",
"61a719e0ff7522cc0d129acb3b922c94a8a5dbca",
"8d27ef15cc9b334667d8adc9ce538222c5ac3607",
"a444ccadf5fddad6ad432c13a239c74636c7f94f"],
dtype="S40")).all()
assert (new_vertexes == np.array([0, 1, 2, 3, 5, 5], dtype=np.uint32)).all()
assert (new_edges == np.array([4, 0, 0, 0, 1], dtype=np.uint32)).all()
def test_mark_dag_access_smoke():
hashes = np.array(["308a9f90707fb9d12cbcd28da1bc33da436386fe",
"33cafc14532228edca160e46af10341a8a632e3e",
"61a719e0ff7522cc0d129acb3b922c94a8a5dbca",
"a444ccadf5fddad6ad432c13a239c74636c7f94f"],
dtype="S40")
# 33cafc14532228edca160e46af10341a8a632e3e -> 308a9f90707fb9d12cbcd28da1bc33da436386fe
# 33cafc14532228edca160e46af10341a8a632e3e -> 61a719e0ff7522cc0d129acb3b922c94a8a5dbca
# <KEY> -> 308a9f90707fb9d12cbcd28da1bc33da436386fe
# 308a9f90707fb9d12cbcd28da1bc33da436386fe -> a444ccadf5fddad6ad432c13a239c74636c7f94f
vertexes = np.array([0, 1, 3, 4, 4], dtype=np.uint32)
edges = np.array([3, 0, 2, 0], dtype=np.uint32)
heads = np.array(["33cafc14532228edca160e46af10341a8a632e3e",
"61a719e0ff7522cc0d129acb3b922c94a8a5dbca"], dtype="S40")
marks = mark_dag_access(hashes, vertexes, edges, heads, True)
assert_array_equal(marks, np.array([1, 0, 1, 1], dtype=np.int32))
heads = np.array(["<KEY>",
"33cafc14532228edca160e46af10341a8a632e3e"], dtype="S40")
# 33cafc14532228edca160e46af10341a8a632e3e shows the oldest, but it's the entry => takes all
marks = mark_dag_access(hashes, vertexes, edges, heads, True)
assert_array_equal(marks, np.array([1, 1, 1, 1], dtype=np.int32))
marks = mark_dag_access(hashes, vertexes, edges, heads, False)
assert_array_equal(marks, np.array([0, 1, 0, 0], dtype=np.int32))
def test_mark_dag_access_empty():
hashes = np.array([], dtype="S40")
vertexes = np.array([0], dtype=np.uint32)
edges = np.array([], dtype=np.uint32)
heads = np.array(["33cafc14532228edca160e46af10341a8a632e3e",
"61a719e0ff7522cc0d129acb3b922c94a8a5dbca"], dtype="S40")
marks = mark_dag_access(hashes, vertexes, edges, heads, True)
assert len(marks) == 0
async def test_partition_dag(dag):
hashes, vertexes, edges = dag["src-d/go-git"]
p = partition_dag(hashes, vertexes, edges, [b"ad9456267524e08efcf4486cadfb6cef8d182677"])
assert p.tolist() == [b"ad9456267524e08efcf4486cadfb6cef8d182677"]
p = partition_dag(hashes, vertexes, edges, [b"7cd021554eb318165dd28988fe1675a5e5c32601"])
assert p.tolist() == [b"7cd021554eb318165dd28988fe1675a5e5c32601",
b"ced875aec7bef9113e1c37b1b811a59e17dbd138"]
def test_partition_dag_empty():
hashes = np.array([], dtype="S40")
vertexes = np.array([0], dtype=np.uint32)
edges = np.array([], dtype=np.uint32)
p = partition_dag(hashes, vertexes, edges, ["ad9456267524e08efcf4486cadfb6cef8d182677"])
assert len(p) == 0
async def test__fetch_commit_history_dag_stops(mdb, dag):
hashes, vertexes, edges = dag["src-d/go-git"]
subhashes, subvertexes, subedges = extract_subdag(
hashes, vertexes, edges,
np.array([b"364866fc77fac656e103c1048dd7da4764c6d9d9"], dtype="S40"))
assert len(subhashes) < len(hashes)
_, newhashes, newvertexes, newedges = await _fetch_commit_history_dag(
subhashes, subvertexes, subedges,
["f6305131a06bd94ef39e444b60f773db75b054f6"],
[2755363],
"src-d/go-git", (6366825,), mdb)
assert (newhashes == hashes).all()
assert (newvertexes == vertexes).all()
assert (newedges == edges).all()
@with_defer
async def test_mark_dag_parents_smoke(
branches, default_branches, mdb, pdb, rdb, release_match_setting_tag, dag,
release_loader, prefixer):
hashes, vertexes, edges = dag["src-d/go-git"]
time_from = datetime(year=2015, month=1, day=1, tzinfo=timezone.utc)
time_to = datetime(year=2020, month=12, day=1, tzinfo=timezone.utc)
releases, matched_bys = await release_loader.load_releases(
["src-d/go-git"],
branches, default_branches,
time_from,
time_to,
release_match_setting_tag,
LogicalRepositorySettings.empty(),
prefixer,
1,
(6366825,),
mdb,
pdb,
rdb,
None,
)
release_hashes = releases[Release.sha.name].values.astype("S40")
release_dates = releases[Release.published_at.name].values
ownership = mark_dag_access(hashes, vertexes, edges, release_hashes, True)
parents = mark_dag_parents(hashes, vertexes, edges, release_hashes, release_dates, ownership)
array = np.array
uint32 = np.uint32
ground_truth = array([array([1], dtype=uint32), array([2], dtype=uint32),
array([3], dtype=uint32), array([4], dtype=uint32),
array([5, 8, 9], dtype=uint32), array([6], dtype=uint32),
array([7], dtype=uint32), array([8], dtype=uint32),
array([9], dtype=uint32), array([10, 11, 14, 19], dtype=uint32),
array([11, 12], dtype=uint32), array([12], dtype=uint32),
array([13], dtype=uint32), array([14], dtype=uint32),
array([15], dtype=uint32), array([16], dtype=uint32),
array([17], dtype=uint32), array([18], dtype=uint32),
array([19], dtype=uint32), array([20], dtype=uint32),
array([21], dtype=uint32), array([22, 23], dtype=uint32),
array([23], dtype=uint32), array([24], dtype=uint32),
array([25], dtype=uint32), array([26], dtype=uint32),
array([27], dtype=uint32), array([28], dtype=uint32),
array([29], dtype=uint32), array([30], dtype=uint32),
array([31], dtype=uint32), array([32], dtype=uint32),
array([34], dtype=uint32), array([], dtype=uint32),
array([35], dtype=uint32), array([36], dtype=uint32),
array([37], dtype=uint32), array([38], dtype=uint32),
array([39], dtype=uint32), array([40], dtype=uint32),
array([41], dtype=uint32), array([42], dtype=uint32),
array([43], dtype=uint32), array([44], dtype=uint32),
array([46, 47], dtype=uint32), array([], dtype=uint32),
array([47], dtype=uint32), array([48], dtype=uint32),
array([49], dtype=uint32), array([50], dtype=uint32),
array([51], dtype=uint32), array([52], dtype=uint32),
array([], dtype=uint32)], dtype=object)
for yours, mine in zip(parents, ground_truth):
assert (yours == mine).all()
@with_defer
async def test_mark_dag_parents_empty(
branches, default_branches, mdb, pdb, rdb, release_match_setting_tag, release_loader,
prefixer):
time_from = datetime(year=2015, month=1, day=1, tzinfo=timezone.utc)
time_to = datetime(year=2020, month=12, day=1, tzinfo=timezone.utc)
releases, matched_bys = await release_loader.load_releases(
["src-d/go-git"],
branches, default_branches,
time_from,
time_to,
release_match_setting_tag,
LogicalRepositorySettings.empty(),
prefixer,
1,
(6366825,),
mdb,
pdb,
rdb,
None,
)
release_hashes = releases[Release.sha.name].values
release_dates = releases[Release.published_at.name].values
hashes = np.array([], dtype="S40")
vertexes = np.array([0], dtype=np.uint32)
edges = np.array([], dtype=np.uint32)
ownership = mark_dag_access(hashes, vertexes, edges, release_hashes, True)
parents = mark_dag_parents(hashes, vertexes, edges, release_hashes, release_dates, ownership)
assert len(parents) == len(release_hashes)
for p in parents:
assert p == []
@with_defer
async def test_mine_releases_full_span(mdb, pdb, rdb, release_match_setting_tag, prefixer):
time_from = datetime(year=2015, month=1, day=1, tzinfo=timezone.utc)
time_to = datetime(year=2020, month=12, day=1, tzinfo=timezone.utc)
releases, avatars, matched_bys, _ = await mine_releases(
["src-d/go-git"], {}, None, {}, time_from, time_to, LabelFilter.empty(),
JIRAFilter.empty(), release_match_setting_tag, LogicalRepositorySettings.empty(),
prefixer, 1, (6366825,), mdb, pdb, rdb, None, with_deployments=False)
assert len(releases) == 53
assert len(avatars) == 124
assert matched_bys == {"github.com/src-d/go-git": ReleaseMatch.tag}
for details, facts in releases:
assert details[Release.name.name]
assert details[Release.url.name]
assert details[Release.repository_full_name.name] == "github.com/src-d/go-git"
assert len(facts.commit_authors) > 0
assert all(a >= 0 for a in facts.commit_authors)
assert facts.age
assert facts.publisher >= 0
assert facts.additions > 0
assert facts.deletions > 0
assert facts.commits_count > 0
assert len(facts["prs_" + PullRequest.number.name]) or \
facts.published <= | pd.Timestamp("2017-02-01 09:51:10") | pandas.Timestamp |
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import dask.dataframe as dd
from dask.dataframe.utils import (shard_df_on_index, meta_nonempty, make_meta,
raise_on_meta_error)
import pytest
def test_shard_df_on_index():
df = pd.DataFrame({'x': [1, 2, 3, 4, 5, 6], 'y': list('abdabd')},
index=[10, 20, 30, 40, 50, 60])
result = list(shard_df_on_index(df, [20, 50]))
assert list(result[0].index) == [10]
assert list(result[1].index) == [20, 30, 40]
assert list(result[2].index) == [50, 60]
def test_make_meta():
df = pd.DataFrame({'a': [1, 2, 3], 'b': list('abc'), 'c': [1., 2., 3.]},
index=[10, 20, 30])
# Pandas dataframe
meta = make_meta(df)
assert len(meta) == 0
assert (meta.dtypes == df.dtypes).all()
assert isinstance(meta.index, type(df.index))
# Pandas series
meta = make_meta(df.a)
assert len(meta) == 0
assert meta.dtype == df.a.dtype
assert isinstance(meta.index, type(df.index))
# Pandas index
meta = make_meta(df.index)
assert isinstance(meta, type(df.index))
assert len(meta) == 0
# Dask object
ddf = dd.from_pandas(df, npartitions=2)
assert make_meta(ddf) is ddf._meta
# Dict
meta = make_meta({'a': 'i8', 'b': 'O', 'c': 'f8'})
assert isinstance(meta, pd.DataFrame)
assert len(meta) == 0
assert (meta.dtypes == df.dtypes).all()
assert isinstance(meta.index, pd.RangeIndex)
# Iterable
meta = make_meta([('a', 'i8'), ('c', 'f8'), ('b', 'O')])
assert (meta.columns == ['a', 'c', 'b']).all()
assert len(meta) == 0
assert (meta.dtypes == df.dtypes[meta.dtypes.index]).all()
assert isinstance(meta.index, pd.RangeIndex)
# Tuple
meta = make_meta(('a', 'i8'))
assert isinstance(meta, pd.Series)
assert len(meta) == 0
assert meta.dtype == 'i8'
assert meta.name == 'a'
# With index
meta = make_meta({'a': 'i8', 'b': 'i4'}, pd.Int64Index([1, 2], name='foo'))
assert isinstance(meta.index, pd.Int64Index)
assert len(meta.index) == 0
meta = make_meta(('a', 'i8'), pd.Int64Index([1, 2], name='foo'))
assert isinstance(meta.index, pd.Int64Index)
assert len(meta.index) == 0
# Numpy scalar
meta = make_meta(np.float64(1.0))
assert isinstance(meta, np.float64)
# Python scalar
meta = make_meta(1.0)
assert isinstance(meta, np.float64)
# Timestamp
x = pd.Timestamp(2000, 1, 1)
meta = make_meta(x)
assert meta is x
# Dtype expressions
meta = make_meta('i8')
assert isinstance(meta, np.int64)
meta = make_meta(float)
assert isinstance(meta, np.dtype(float).type)
meta = make_meta(np.dtype('bool'))
assert isinstance(meta, np.bool_)
assert pytest.raises(TypeError, lambda: make_meta(None))
def test_meta_nonempty():
df1 = pd.DataFrame({'A': pd.Categorical(['Alice', 'Bob', 'Carol']),
'B': list('abc'),
'C': 'bar',
'D': np.float32(1),
'E': np.int32(1),
'F': pd.Timestamp('2016-01-01'),
'G': pd.date_range('2016-01-01', periods=3,
tz='America/New_York'),
'H': pd.Timedelta('1 hours', 'ms'),
'I': np.void(b' ')},
columns=list('DCBAHGFEI'))
df2 = df1.iloc[0:0]
df3 = meta_nonempty(df2)
assert (df3.dtypes == df2.dtypes).all()
assert df3['A'][0] == 'Alice'
assert df3['B'][0] == 'foo'
assert df3['C'][0] == 'foo'
assert df3['D'][0] == np.float32(1)
assert df3['D'][0].dtype == 'f4'
assert df3['E'][0] == np.int32(1)
assert df3['E'][0].dtype == 'i4'
assert df3['F'][0] == pd.Timestamp('1970-01-01 00:00:00')
assert df3['G'][0] == pd.Timestamp('1970-01-01 00:00:00',
tz='America/New_York')
assert df3['H'][0] == pd.Timedelta('1', 'ms')
assert df3['I'][0] == 'foo'
s = meta_nonempty(df2['A'])
assert s.dtype == df2['A'].dtype
assert (df3['A'] == s).all()
def test_meta_duplicated():
df = pd.DataFrame(columns=['A', 'A', 'B'])
res = meta_nonempty(df)
exp = pd.DataFrame([['foo', 'foo', 'foo'],
['foo', 'foo', 'foo']],
index=['a', 'b'],
columns=['A', 'A', 'B'])
tm.assert_frame_equal(res, exp)
def test_meta_nonempty_index():
idx = pd.RangeIndex(1, name='foo')
res = meta_nonempty(idx)
assert type(res) is pd.RangeIndex
assert res.name == idx.name
idx = pd.Int64Index([1], name='foo')
res = meta_nonempty(idx)
assert type(res) is pd.Int64Index
assert res.name == idx.name
idx = pd.Index(['a'], name='foo')
res = meta_nonempty(idx)
assert type(res) is pd.Index
assert res.name == idx.name
idx = pd.DatetimeIndex(['1970-01-01'], freq='d',
tz='America/New_York', name='foo')
res = meta_nonempty(idx)
assert type(res) is pd.DatetimeIndex
assert res.tz == idx.tz
assert res.freq == idx.freq
assert res.name == idx.name
idx = | pd.PeriodIndex(['1970-01-01'], freq='d', name='foo') | pandas.PeriodIndex |
import numpy as np
#np.set_printoptions(precision=2)
import pandas as pd
from typing import Any, Dict, List, Tuple, NoReturn
import argparse
import os
import pickle
import json
from sklearn.mixture import BayesianGaussianMixture
def parse_arguments() -> Any:
"""Parse command line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument(
"--data_dir",
required=True,
type=str,
help="Directory where the features (npy files) are saved",
)
parser.add_argument(
"--model_dir",
required=True,
type=str,
help="Directory where the model is saved",
)
parser.add_argument(
"--result_dir",
required=True,
type=str,
help="Directory where the model is saved",
)
parser.add_argument("--mode",
required=True,
type=str,
help="train/val/test",
choices=['train', 'test', 'val'])
parser.add_argument("--obs_len",
default=2,
type=int,
help="Observed length of the trajectory in seconds",
choices=[1,2,3,4,5])
parser.add_argument("--filter",
default='ekf',
type=str,
help="Filter to process the data noise. (ekf/none/ekf-savgol/savgol",
choices=['ekf', 'none', 'ekf-savgol', 'savgol'])
return parser.parse_args()
def train(data:np.ndarray,
obs_len:int,
filter_name:str,
model_dir:str,
result_dir:str,
save_model:bool=True)->NoReturn:
print('[Bayesian Gaussian Mixture Clustering][train] creating model...')
bgm = BayesianGaussianMixture(n_components=3,
covariance_type="full",
max_iter=1000,
tol=1e-5,
n_init=10,
random_state=7,
weight_concentration_prior_type='dirichlet_process',
init_params="kmeans")
print('[Bayesian Gaussian Mixture Clustering][train] training...')
_y = bgm.fit_predict(X=data)
_y = np.expand_dims(_y, axis=1)
print(f'[Bayesian Gaussian Mixture Clustering][train] converged?:{bgm.converged_}')
print('[Bayesian Gaussian Mixture Clustering][train] params (center and covariance):')
for i, m, c, w in zip(range(1, 4), bgm.means_, bgm.covariances_, bgm.weights_):
print(f'\tc_{i}-> mean: {m}')
print(f'\t\tcov: {c}')
print(f'\t\tweight: {w}')
print('[Bayesian Gaussian Mixture Clustering][train] results:')
_c, _l = np.unique(_y, return_counts=True)
for i, c in zip(_c,_l):
print (f'\tc_{i}: {c}')
if save_model:
model_file=f'bgm_{obs_len}s_{filter_name}.pkl'
print (f'[Bayesian Gaussian Mixture Clustering][train] saving model ({model_file})...')
with open(os.path.join(model_dir, model_file), 'wb') as f:
pickle.dump(bgm, f)
result_file = f'results_bgm_train_{obs_len}s_{filter_name}.csv'
print (f'[Bayesian Gaussian Mixture Clustering][train] saving results ({result_file})...')
labels = ['mean_velocity',
'mean_acceleration',
'mean_deceleration',
'std_lateral_jerk',
'driving_style']
result = np.concatenate((data, _y), axis=1)
df = | pd.DataFrame(data=result, columns=labels) | pandas.DataFrame |
import re
from pathlib import Path
from urllib.parse import urlparse, parse_qsl
import lxml.html
import pandas as pd
import requests
from boatrace.util import Config
config = Config(path=Path(__file__).parent / "params.yaml")
racer_class = config.get_racer_class()
field_name2code = config.get_field_code()
class AdvanceInfo:
def __init__(self, url):
self.parse_url = urlparse(url)
self.url_pat = "beforeinfo"
if self.url_pat not in self.parse_url.path:
raise ValueError("url not matched {}".format(self.url_pat))
request = requests.get(url)
root = lxml.html.fromstring(request.text)
self.table = self.scrape(root)
def scrape(self, root):
players = 6
table = []
enf_info = []
xpath_race_prefix = "/html/body/main/div/div/div/div[2]/div[4]/div[1]/div[1]/table/"
xpath_weather_prefix = "/html/body/main/div/div/div/div[2]/div[4]/div[2]/div[2]/div[1]/"
temp_xpath = xpath_weather_prefix + "div[1]/div/span[2]/text()"
weather_xpath = xpath_weather_prefix + "div[2]/div/span/text()"
wind_speed_xpath = xpath_weather_prefix + "div[3]/div/span[2]/text()"
water_temp_xpath = xpath_weather_prefix + "div[5]/div/span[2]/text()"
wave_height_xpath = xpath_weather_prefix + "div[6]/div/span[2]/text()"
for xpath in [temp_xpath, weather_xpath, wind_speed_xpath,
water_temp_xpath, wave_height_xpath]:
for elem in root.xpath(xpath):
enf_info.append(elem.strip())
for idx in range(1, players + 1):
elements = []
player_elem = xpath_race_prefix + "tbody[{}]/".format(idx)
weight_xpath = player_elem + "tr[1]/td[4]/text()"
exhibition_xpath = player_elem + "tr[1]/td[5]/text()"
tilt_xpath = player_elem + "tr[1]/td[6]/text()"
assigned_weight_xpath = player_elem + "tr[3]/td[1]/text()"
for xpath in [weight_xpath, exhibition_xpath, tilt_xpath,
assigned_weight_xpath]:
for elem in root.xpath(xpath):
elements.append(elem.strip())
table.append(enf_info + elements)
return table
def preprocess(self):
pass
class StartTable:
def __init__(self, url=None, path=None):
self.header = ["date", "field_name", "race_idx", "registration_number",
"age", "weight", "class",
"global_win_perc", "global_win_in_second",
"local_win_perc", "local_win_in_second",
"mortar", "mortar_win_in_second", "board",
"board_win_in_second"]
self.racer_class = racer_class
self.field_name2code = field_name2code
self.is_scrape = False
if url or path:
if url:
self.parse_url = urlparse(url)
self.url_pat = "racelist"
if self.url_pat not in self.parse_url.path:
raise ValueError("url not matched {}".format(self.url_pat))
request = requests.get(url)
root = lxml.html.fromstring(request.text)
self.start_table = self.scrape(root)
self.is_scrape = True
elif path:
if not path.exists():
raise FileExistsError("{} is not exist".format(path))
self.__parse(path)
else:
raise ValueError("set url or path")
def __parse(self, path, encoding="cp932"):
date = path.stem[1:]
tables = []
raw_lines = []
begin_idx = []
end_idx = []
race_header_length = 12
result_header_length = 5
interval_per_race_length = 1
interval_per_day_length = 12
players = 6
with path.open("r", encoding=encoding) as lines:
for line_no, line in enumerate(lines):
raw_line = line.strip()
raw_lines.append(raw_line)
if "BBGN" in raw_line:
begin_idx.append(line_no)
if "BEND" in raw_line:
end_idx.append(line_no)
for b_idx, e_idx in zip(begin_idx, end_idx):
# skip race because not data
if e_idx - b_idx < 10:
continue
# skip headers
race_info = raw_lines[b_idx + 1].strip().replace("\u3000",
"").split()
field_name = race_info[0].replace("ボートレース", "")
one_day_lines = raw_lines[b_idx + race_header_length:e_idx]
end_race_idx = 0
for race_idx in range(interval_per_day_length):
if race_idx == 0:
begin_race_idx = race_idx * players + result_header_length
else:
begin_race_idx = end_race_idx + result_header_length + interval_per_race_length
end_race_idx = begin_race_idx + players
for line in one_day_lines[begin_race_idx:end_race_idx]:
tables.append([date, field_name,
race_idx + 1] + self.__preprocess_line(line))
self.start_table = tables
def __preprocess_line(self, line):
# pre_define length split line
win_perc_length = 5
second_win_perc_length = 6
id_length = 3
lengths = [win_perc_length, second_win_perc_length] * 2 + \
[id_length, second_win_perc_length] * 2
raw_line = line.strip().replace("\u3000", "")
c = None
for c in self.racer_class.keys():
if c in raw_line:
break
# split racer_class(A1/A2/B1/B2)
split_line = line.split(c)
# split_line[0] -> "1 3789..."
racer_idx = split_line[0][0].strip()
racer_info = split_line[0][1:].strip()
reg_number = re.match(r"\d+", racer_info).group()
racer_info = racer_info.replace(reg_number, "")
age = re.search(r"\d+", racer_info).group()
# replace age = weight
racer_info = racer_info.replace(age, "", 1)
weight = re.search(r"\d+", racer_info).group()
race_times = split_line[1]
start = 0
time_info = []
for length in lengths:
time_info.append(race_times[start:start + length].strip())
start += length
return [racer_idx, reg_number, age, weight, c] + time_info
def scrape(self, root):
players = 6
start_table = []
xpath_prefix = "/html/body/main/div/div/div/div[2]/div[4]/table/"
query_params = dict(parse_qsl(self.parse_url.query))
date = query_params["hd"]
race_idx = query_params["rno"]
field_name = query_params["jcd"]
for idx in range(1, players + 1):
player_elem_1 = xpath_prefix + "tbody[{}]/tr[1]/td[3]/div[1]/".format(
idx)
player_elem_2 = xpath_prefix + "tbody[{}]/tr[1]/td[3]/div[2]/".format(
idx)
player_elem_3 = xpath_prefix + "tbody[{}]/tr[1]/td[3]/div[3]/".format(
idx)
race_results = []
for td_idx in range(4, 9):
race_results.append(
xpath_prefix + "tbody[{}]/tr[1]/td[{}]/text()".format(idx,
td_idx))
reg_number_xpath = player_elem_1 + "text()"
class_xpath = player_elem_1 + "span/text()"
profile_url_xpath = player_elem_2 + "a/@href"
player_info_xpath = player_elem_3 + "text()"
elements = []
for elem in root.xpath(reg_number_xpath):
match = re.search(r"\d+", elem.strip())
if match:
elements.append(match.group())
for elem in root.xpath(class_xpath):
elements.append(elem.strip())
for elem in root.xpath(profile_url_xpath):
parse_profile_url = urlparse(elem.strip())
profile_url = self.parse_url._replace(
path=parse_profile_url.path)
profile_url = profile_url._replace(
query=parse_profile_url.query)
elements.append(profile_url.geturl())
for elem in root.xpath(player_info_xpath):
for e in elem.strip().split("/"):
elements.append(e.strip())
for xpath in race_results:
for elem in root.xpath(xpath):
elements.append(elem.strip())
start_table.append([date, field_name, race_idx] + elements)
return start_table
def preprocess(self):
int_cols = ["age"]
float_cols = ["weight", "global_win_perc", "global_win_in_second",
"local_win_perc", "local_win_in_second",
"mortar_win_in_second", "board_win_in_second"]
cat_cols = ["registration_number", "mortar", "board"]
if self.is_scrape:
df = pd.DataFrame(self.start_table).drop(
columns=[5, 6, 7, 10, 11, 12,
15, 18, 21, 24])
df = df.loc[:,
[0, 1, 2, 3, 8, 9, 4, 13, 14, 16, 17, 19, 20, 22, 23]]
df.columns = self.header
df["field_name"] = df["field_name"].astype(int)
df["date"] = pd.to_datetime(df["date"], format="%Y%m%d")
df["race_idx"] = df["race_idx"].astype(int)
df["age"] = df["age"].str.replace("歳", "")
df["weight"] = df["weight"].str.replace("kg", "")
df["class"] = df["class"].map(self.racer_class)
for col in int_cols:
df[col] = df[col].astype(int)
for col in float_cols:
df[col] = df[col].astype(float)
for col in cat_cols:
df[col] = df[col].astype("category")
return df
else:
# drop idx
df = pd.DataFrame(self.start_table).drop(columns=[3])
df.columns = self.header
df["field_name"] = df["field_name"].map(self.field_name2code)
df["date"] = | pd.to_datetime(df["date"], format="%y%m%d") | pandas.to_datetime |
#!/usr/bin/env python
# encoding: utf-8
'''
\ \ / /__| | ___ _ _ __ / ___| | | | / \ |_ _|
\ V / _ \ |/ / | | | '_ \ | | | |_| | / _ \ | |
| | __/ <| |_| | | | | | |___| _ |/ ___ \ | |
|_|\___|_|\_\\__,_|_| |_| \____|_| |_/_/ \_\___
==========================================================================
@author: CYK
@license: School of Informatics, Edinburgh
@contact: <EMAIL>
@file: plot_graph.py
@time: 08/03/2018 19:41
@desc:
'''
import matplotlib.pyplot as plt
import os
import pandas as pd
# pp = []
# bleu = []
# val_loss=[]
# mean_loss=[]
data = {'pp':[],
'bleu':[],
'mean_loss':[] ,
'val_loss':[]
}
save_dir='plot_data'
def save_data(data, csv_name, subdir=False, save_dir=save_dir):
assert csv_name[-4:] == '.csv', "Error: didnot give a valid csv_name!"
if not os.path.exists(save_dir):
os.mkdir(save_dir)
if subdir is not False:
subdir = os.path.join(save_dir, subdir)
if not os.path.isdir(subdir):
os.mkdir(subdir)
csv_file = os.path.join(subdir, csv_name)
# =========================
# 1. save to txt
# with open(filename, 'w') as f:
# f.write(str(history))
# ==========================
hist = pd.DataFrame.from_dict(data, orient='columns')
hist.to_csv(csv_file)
print('History is written into {}'.format(csv_file))
print('-'*80)
def save_fig(plt, plot_filename, plot_dir='plot_data'):
print("plot_dir:", plot_dir)
if not os.path.exists(plot_dir):
os.mkdir(plot_dir)
filename = os.path.join(plot_dir, plot_filename)
plt.savefig('{}'.format(filename))
print('{} saved!'.format(filename))
def plot_all_history(subdir, plot_filename='default.pdf', figsize=(16, 9)):
subdir = os.path.join(save_dir, subdir)
assert os.path.isdir(subdir) == True, "Error: {} does not exists!".format(subdir)
# sum_plot = os.path.join(save_dir, 'plot_all')
sum_plot = subdir
if not os.path.isdir(sum_plot):
os.mkdir(sum_plot)
# set color list
# colors = [c for c in list(matplotlib.colors.cnames.keys()) if not c.startswith('light')]
colors = ['green','red','blue','goldenrod','black','lime','cyan','chartreuse','yellow','m','purple','olive','salmon','darkred','pink']
markers = ['d', '^', 's', '*']
fontsize = 10
plt.figure(figsize=figsize)
plt.subplot(221)
for i, filename in enumerate(os.listdir(subdir)):
if filename[-4:] != '.csv': continue
csv_file = os.path.join(subdir, filename)
data = pd.read_csv(csv_file)
line_label = filename[:-4]
pp = data['pp']
epochs = range(1, len(pp) + 1)
# plot pp
# plt.plot(epochs, acc, color=colors[i%len(colors)], linestyle='-', label='{} training acc'.format(line_label))
plt.plot(epochs, pp, color=colors[i % len(colors)], marker=markers[i % len(markers)], linestyle='-',
label='{}'.format(line_label))
# plt.title('Perplexity', fontsize=fontsize)
plt.xlabel('Epochs')
plt.ylabel('Perplexity')
plt.legend()
plt.grid()
plt.subplot(222)
for i, filename in enumerate(os.listdir(subdir)):
if filename[-4:] != '.csv': continue
csv_file = os.path.join(subdir, filename)
data = pd.read_csv(csv_file)
line_label = filename[:-4]
bleu = data['bleu']
epochs = range(1, len(bleu) + 1)
# plot bleu
# plt.plot(epochs, acc, color=colors[i%len(colors)], linestyle='-', label='{} training acc'.format(line_label))
plt.plot(epochs, bleu, color=colors[i % len(colors)], marker=markers[i % len(markers)], linestyle='-.',
label='{}'.format(line_label))
# plt.title('BLEU', fontsize=fontsize)
plt.xlabel('Epochs')
plt.ylabel('BLEU')
plt.legend()
plt.grid()
# # plot
plt.subplot(223)
for i, filename in enumerate(os.listdir(subdir)):
if filename[-4:] != '.csv': continue
csv_file = os.path.join(subdir, filename)
data = | pd.read_csv(csv_file) | pandas.read_csv |
#! /usr/bin/env python
import maple
import maple.data as data
import maple.audio as audio
import numpy as np
import joblib
import pandas as pd
import argparse
import datetime
import sounddevice as sd
from scipy import signal
from pathlib import Path
from sklearn import preprocessing
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import RandomForestClassifier
labels = {
0: 'none',
1: 'whine',
2: 'howl',
3: 'bark',
4: 'play',
5: 'scratch_cage',
6: 'scratch_door',
}
class LabelAudio(object):
"""Label audio from Audio"""
def __init__(self, args=argparse.Namespace()):
self.menu = {
'home': {
'msg': 'Press [s] to start, Press [q] to quit. Response: ',
'function': self.menu_handle,
},
'preview': {
'msg': 'Use clip? [y]es, [n]o, [r]epeat, [q]uit. Response: ',
'function': self.preview_handle,
},
'label': {
'msg': '[0] none, [1] whine, [2] howl, [3] bark, [4] play, [5] cage, [6] door, [r]epeat, [R]epeat full, [s] to skip, [q]uit. Reponse: '.\
format(', '.join(['[' + str(key) + '] ' + val for key, val in labels.items()])),
'function': self.label_handle,
},
}
if args.label_data is None:
raise Exception("You must supply --label-data")
self.state = 'home'
self.subevent_time = 0.25 # in seconds
self.cols = [
'session_id',
'event_id',
'subevent_id',
't_start',
't_end',
'label',
'date_labeled',
]
self.label_data_path = Path(args.label_data)
if self.label_data_path.exists():
self.data = pd.read_csv(self.label_data_path, sep='\t')
else:
self.data = | pd.DataFrame({}, columns=self.cols) | pandas.DataFrame |
"""
Generating data from the CarRacing gym environment.
!!! DOES NOT WORK ON TITANIC, DO IT AT HOME, THEN SCP !!!
"""
import argparse
from os import makedirs
from os.path import join, exists
import gym
import numpy as np
from utils.misc import sample_continuous_policy
import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
# matplotlib.use('Agg')
import datetime
import sys
sys.path.append("..")
from finrl.config import config
from finrl.marketdata.yahoodownloader import YahooDownloader
from finrl.preprocessing.preprocessors import FeatureEngineer
from finrl.preprocessing.data import data_split
from finrl.env.env_stocktrading import StockTradingEnv
from finrl.model.models import DRLAgent
# from finrl.trade.backtest import backtest_stats, backtest_plot, get_daily_return, get_baseline
import itertools
def generate_data(rollouts, data_dir, noise_type): # pylint: disable=R0914
""" Generates data """
assert exists(data_dir), "The data directory does not exist..."
df = YahooDownloader(start_date = '2009-01-01',
end_date = '2021-01-01',
ticker_list = ['AAPL']).fetch_data()
df.sort_values(['date','tic'],ignore_index=True)
fe = FeatureEngineer(
use_technical_indicator=True,
tech_indicator_list = config.TECHNICAL_INDICATORS_LIST,
use_turbulence=True,
user_defined_feature = False)
processed = fe.preprocess_data(df)
list_ticker = processed["tic"].unique().tolist()
list_date = list(pd.date_range(processed['date'].min(),processed['date'].max()).astype(str))
combination = list(itertools.product(list_date,list_ticker))
processed_full = | pd.DataFrame(combination,columns=["date","tic"]) | pandas.DataFrame |
"""
Module contains tools for processing files into DataFrames or other objects
"""
from collections import abc, defaultdict
import csv
import datetime
from io import StringIO
import itertools
import re
import sys
from textwrap import fill
from typing import (
Any,
Dict,
Iterable,
Iterator,
List,
Optional,
Sequence,
Set,
Type,
cast,
)
import warnings
import numpy as np
import pandas._libs.lib as lib
import pandas._libs.ops as libops
import pandas._libs.parsers as parsers
from pandas._libs.parsers import STR_NA_VALUES
from pandas._libs.tslibs import parsing
from pandas._typing import FilePathOrBuffer, StorageOptions, Union
from pandas.errors import (
AbstractMethodError,
EmptyDataError,
ParserError,
ParserWarning,
)
from pandas.util._decorators import Appender
from pandas.core.dtypes.cast import astype_nansafe
from pandas.core.dtypes.common import (
ensure_object,
ensure_str,
is_bool_dtype,
is_categorical_dtype,
is_dict_like,
is_dtype_equal,
is_extension_array_dtype,
is_file_like,
is_float,
is_integer,
is_integer_dtype,
is_list_like,
is_object_dtype,
is_scalar,
is_string_dtype,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.missing import isna
from pandas.core import algorithms, generic
from pandas.core.arrays import Categorical
from pandas.core.frame import DataFrame
from pandas.core.indexes.api import (
Index,
MultiIndex,
RangeIndex,
ensure_index_from_sequences,
)
from pandas.core.series import Series
from pandas.core.tools import datetimes as tools
from pandas.io.common import IOHandles, get_handle, validate_header_arg
from pandas.io.date_converters import generic_parser
# BOM character (byte order mark)
# This exists at the beginning of a file to indicate endianness
# of a file (stream). Unfortunately, this marker screws up parsing,
# so we need to remove it if we see it.
_BOM = "\ufeff"
_doc_read_csv_and_table = (
r"""
{summary}
Also supports optionally iterating or breaking of the file
into chunks.
Additional help can be found in the online docs for
`IO Tools <https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html>`_.
Parameters
----------
filepath_or_buffer : str, path object or file-like object
Any valid string path is acceptable. The string could be a URL. Valid
URL schemes include http, ftp, s3, gs, and file. For file URLs, a host is
expected. A local file could be: file://localhost/path/to/table.csv.
If you want to pass in a path object, pandas accepts any ``os.PathLike``.
By file-like object, we refer to objects with a ``read()`` method, such as
a file handle (e.g. via builtin ``open`` function) or ``StringIO``.
sep : str, default {_default_sep}
Delimiter to use. If sep is None, the C engine cannot automatically detect
the separator, but the Python parsing engine can, meaning the latter will
be used and automatically detect the separator by Python's builtin sniffer
tool, ``csv.Sniffer``. In addition, separators longer than 1 character and
different from ``'\s+'`` will be interpreted as regular expressions and
will also force the use of the Python parsing engine. Note that regex
delimiters are prone to ignoring quoted data. Regex example: ``'\r\t'``.
delimiter : str, default ``None``
Alias for sep.
header : int, list of int, default 'infer'
Row number(s) to use as the column names, and the start of the
data. Default behavior is to infer the column names: if no names
are passed the behavior is identical to ``header=0`` and column
names are inferred from the first line of the file, if column
names are passed explicitly then the behavior is identical to
``header=None``. Explicitly pass ``header=0`` to be able to
replace existing names. The header can be a list of integers that
specify row locations for a multi-index on the columns
e.g. [0,1,3]. Intervening rows that are not specified will be
skipped (e.g. 2 in this example is skipped). Note that this
parameter ignores commented lines and empty lines if
``skip_blank_lines=True``, so ``header=0`` denotes the first line of
data rather than the first line of the file.
names : array-like, optional
List of column names to use. If the file contains a header row,
then you should explicitly pass ``header=0`` to override the column names.
Duplicates in this list are not allowed.
index_col : int, str, sequence of int / str, or False, default ``None``
Column(s) to use as the row labels of the ``DataFrame``, either given as
string name or column index. If a sequence of int / str is given, a
MultiIndex is used.
Note: ``index_col=False`` can be used to force pandas to *not* use the first
column as the index, e.g. when you have a malformed file with delimiters at
the end of each line.
usecols : list-like or callable, optional
Return a subset of the columns. If list-like, all elements must either
be positional (i.e. integer indices into the document columns) or strings
that correspond to column names provided either by the user in `names` or
inferred from the document header row(s). For example, a valid list-like
`usecols` parameter would be ``[0, 1, 2]`` or ``['foo', 'bar', 'baz']``.
Element order is ignored, so ``usecols=[0, 1]`` is the same as ``[1, 0]``.
To instantiate a DataFrame from ``data`` with element order preserved use
``pd.read_csv(data, usecols=['foo', 'bar'])[['foo', 'bar']]`` for columns
in ``['foo', 'bar']`` order or
``pd.read_csv(data, usecols=['foo', 'bar'])[['bar', 'foo']]``
for ``['bar', 'foo']`` order.
If callable, the callable function will be evaluated against the column
names, returning names where the callable function evaluates to True. An
example of a valid callable argument would be ``lambda x: x.upper() in
['AAA', 'BBB', 'DDD']``. Using this parameter results in much faster
parsing time and lower memory usage.
squeeze : bool, default False
If the parsed data only contains one column then return a Series.
prefix : str, optional
Prefix to add to column numbers when no header, e.g. 'X' for X0, X1, ...
mangle_dupe_cols : bool, default True
Duplicate columns will be specified as 'X', 'X.1', ...'X.N', rather than
'X'...'X'. Passing in False will cause data to be overwritten if there
are duplicate names in the columns.
dtype : Type name or dict of column -> type, optional
Data type for data or columns. E.g. {{'a': np.float64, 'b': np.int32,
'c': 'Int64'}}
Use `str` or `object` together with suitable `na_values` settings
to preserve and not interpret dtype.
If converters are specified, they will be applied INSTEAD
of dtype conversion.
engine : {{'c', 'python'}}, optional
Parser engine to use. The C engine is faster while the python engine is
currently more feature-complete.
converters : dict, optional
Dict of functions for converting values in certain columns. Keys can either
be integers or column labels.
true_values : list, optional
Values to consider as True.
false_values : list, optional
Values to consider as False.
skipinitialspace : bool, default False
Skip spaces after delimiter.
skiprows : list-like, int or callable, optional
Line numbers to skip (0-indexed) or number of lines to skip (int)
at the start of the file.
If callable, the callable function will be evaluated against the row
indices, returning True if the row should be skipped and False otherwise.
An example of a valid callable argument would be ``lambda x: x in [0, 2]``.
skipfooter : int, default 0
Number of lines at bottom of file to skip (Unsupported with engine='c').
nrows : int, optional
Number of rows of file to read. Useful for reading pieces of large files.
na_values : scalar, str, list-like, or dict, optional
Additional strings to recognize as NA/NaN. If dict passed, specific
per-column NA values. By default the following values are interpreted as
NaN: '"""
+ fill("', '".join(sorted(STR_NA_VALUES)), 70, subsequent_indent=" ")
+ """'.
keep_default_na : bool, default True
Whether or not to include the default NaN values when parsing the data.
Depending on whether `na_values` is passed in, the behavior is as follows:
* If `keep_default_na` is True, and `na_values` are specified, `na_values`
is appended to the default NaN values used for parsing.
* If `keep_default_na` is True, and `na_values` are not specified, only
the default NaN values are used for parsing.
* If `keep_default_na` is False, and `na_values` are specified, only
the NaN values specified `na_values` are used for parsing.
* If `keep_default_na` is False, and `na_values` are not specified, no
strings will be parsed as NaN.
Note that if `na_filter` is passed in as False, the `keep_default_na` and
`na_values` parameters will be ignored.
na_filter : bool, default True
Detect missing value markers (empty strings and the value of na_values). In
data without any NAs, passing na_filter=False can improve the performance
of reading a large file.
verbose : bool, default False
Indicate number of NA values placed in non-numeric columns.
skip_blank_lines : bool, default True
If True, skip over blank lines rather than interpreting as NaN values.
parse_dates : bool or list of int or names or list of lists or dict, \
default False
The behavior is as follows:
* boolean. If True -> try parsing the index.
* list of int or names. e.g. If [1, 2, 3] -> try parsing columns 1, 2, 3
each as a separate date column.
* list of lists. e.g. If [[1, 3]] -> combine columns 1 and 3 and parse as
a single date column.
* dict, e.g. {{'foo' : [1, 3]}} -> parse columns 1, 3 as date and call
result 'foo'
If a column or index cannot be represented as an array of datetimes,
say because of an unparsable value or a mixture of timezones, the column
or index will be returned unaltered as an object data type. For
non-standard datetime parsing, use ``pd.to_datetime`` after
``pd.read_csv``. To parse an index or column with a mixture of timezones,
specify ``date_parser`` to be a partially-applied
:func:`pandas.to_datetime` with ``utc=True``. See
:ref:`io.csv.mixed_timezones` for more.
Note: A fast-path exists for iso8601-formatted dates.
infer_datetime_format : bool, default False
If True and `parse_dates` is enabled, pandas will attempt to infer the
format of the datetime strings in the columns, and if it can be inferred,
switch to a faster method of parsing them. In some cases this can increase
the parsing speed by 5-10x.
keep_date_col : bool, default False
If True and `parse_dates` specifies combining multiple columns then
keep the original columns.
date_parser : function, optional
Function to use for converting a sequence of string columns to an array of
datetime instances. The default uses ``dateutil.parser.parser`` to do the
conversion. Pandas will try to call `date_parser` in three different ways,
advancing to the next if an exception occurs: 1) Pass one or more arrays
(as defined by `parse_dates`) as arguments; 2) concatenate (row-wise) the
string values from the columns defined by `parse_dates` into a single array
and pass that; and 3) call `date_parser` once for each row using one or
more strings (corresponding to the columns defined by `parse_dates`) as
arguments.
dayfirst : bool, default False
DD/MM format dates, international and European format.
cache_dates : bool, default True
If True, use a cache of unique, converted dates to apply the datetime
conversion. May produce significant speed-up when parsing duplicate
date strings, especially ones with timezone offsets.
.. versionadded:: 0.25.0
iterator : bool, default False
Return TextFileReader object for iteration or getting chunks with
``get_chunk()``.
.. versionchanged:: 1.2
``TextFileReader`` is a context manager.
chunksize : int, optional
Return TextFileReader object for iteration.
See the `IO Tools docs
<https://pandas.pydata.org/pandas-docs/stable/io.html#io-chunking>`_
for more information on ``iterator`` and ``chunksize``.
.. versionchanged:: 1.2
``TextFileReader`` is a context manager.
compression : {{'infer', 'gzip', 'bz2', 'zip', 'xz', None}}, default 'infer'
For on-the-fly decompression of on-disk data. If 'infer' and
`filepath_or_buffer` is path-like, then detect compression from the
following extensions: '.gz', '.bz2', '.zip', or '.xz' (otherwise no
decompression). If using 'zip', the ZIP file must contain only one data
file to be read in. Set to None for no decompression.
thousands : str, optional
Thousands separator.
decimal : str, default '.'
Character to recognize as decimal point (e.g. use ',' for European data).
lineterminator : str (length 1), optional
Character to break file into lines. Only valid with C parser.
quotechar : str (length 1), optional
The character used to denote the start and end of a quoted item. Quoted
items can include the delimiter and it will be ignored.
quoting : int or csv.QUOTE_* instance, default 0
Control field quoting behavior per ``csv.QUOTE_*`` constants. Use one of
QUOTE_MINIMAL (0), QUOTE_ALL (1), QUOTE_NONNUMERIC (2) or QUOTE_NONE (3).
doublequote : bool, default ``True``
When quotechar is specified and quoting is not ``QUOTE_NONE``, indicate
whether or not to interpret two consecutive quotechar elements INSIDE a
field as a single ``quotechar`` element.
escapechar : str (length 1), optional
One-character string used to escape other characters.
comment : str, optional
Indicates remainder of line should not be parsed. If found at the beginning
of a line, the line will be ignored altogether. This parameter must be a
single character. Like empty lines (as long as ``skip_blank_lines=True``),
fully commented lines are ignored by the parameter `header` but not by
`skiprows`. For example, if ``comment='#'``, parsing
``#empty\\na,b,c\\n1,2,3`` with ``header=0`` will result in 'a,b,c' being
treated as the header.
encoding : str, optional
Encoding to use for UTF when reading/writing (ex. 'utf-8'). `List of Python
standard encodings
<https://docs.python.org/3/library/codecs.html#standard-encodings>`_ .
dialect : str or csv.Dialect, optional
If provided, this parameter will override values (default or not) for the
following parameters: `delimiter`, `doublequote`, `escapechar`,
`skipinitialspace`, `quotechar`, and `quoting`. If it is necessary to
override values, a ParserWarning will be issued. See csv.Dialect
documentation for more details.
error_bad_lines : bool, default True
Lines with too many fields (e.g. a csv line with too many commas) will by
default cause an exception to be raised, and no DataFrame will be returned.
If False, then these "bad lines" will dropped from the DataFrame that is
returned.
warn_bad_lines : bool, default True
If error_bad_lines is False, and warn_bad_lines is True, a warning for each
"bad line" will be output.
delim_whitespace : bool, default False
Specifies whether or not whitespace (e.g. ``' '`` or ``'\t'``) will be
used as the sep. Equivalent to setting ``sep='\\s+'``. If this option
is set to True, nothing should be passed in for the ``delimiter``
parameter.
low_memory : bool, default True
Internally process the file in chunks, resulting in lower memory use
while parsing, but possibly mixed type inference. To ensure no mixed
types either set False, or specify the type with the `dtype` parameter.
Note that the entire file is read into a single DataFrame regardless,
use the `chunksize` or `iterator` parameter to return the data in chunks.
(Only valid with C parser).
memory_map : bool, default False
If a filepath is provided for `filepath_or_buffer`, map the file object
directly onto memory and access the data directly from there. Using this
option can improve performance because there is no longer any I/O overhead.
float_precision : str, optional
Specifies which converter the C engine should use for floating-point
values. The options are ``None`` or 'high' for the ordinary converter,
'legacy' for the original lower precision pandas converter, and
'round_trip' for the round-trip converter.
.. versionchanged:: 1.2
{storage_options}
.. versionadded:: 1.2
Returns
-------
DataFrame or TextParser
A comma-separated values (csv) file is returned as two-dimensional
data structure with labeled axes.
See Also
--------
DataFrame.to_csv : Write DataFrame to a comma-separated values (csv) file.
read_csv : Read a comma-separated values (csv) file into DataFrame.
read_fwf : Read a table of fixed-width formatted lines into DataFrame.
Examples
--------
>>> pd.{func_name}('data.csv') # doctest: +SKIP
"""
)
def validate_integer(name, val, min_val=0):
"""
Checks whether the 'name' parameter for parsing is either
an integer OR float that can SAFELY be cast to an integer
without losing accuracy. Raises a ValueError if that is
not the case.
Parameters
----------
name : string
Parameter name (used for error reporting)
val : int or float
The value to check
min_val : int
Minimum allowed value (val < min_val will result in a ValueError)
"""
msg = f"'{name:s}' must be an integer >={min_val:d}"
if val is not None:
if is_float(val):
if int(val) != val:
raise ValueError(msg)
val = int(val)
elif not (is_integer(val) and val >= min_val):
raise ValueError(msg)
return val
def _validate_names(names):
"""
Raise ValueError if the `names` parameter contains duplicates or has an
invalid data type.
Parameters
----------
names : array-like or None
An array containing a list of the names used for the output DataFrame.
Raises
------
ValueError
If names are not unique or are not ordered (e.g. set).
"""
if names is not None:
if len(names) != len(set(names)):
raise ValueError("Duplicate names are not allowed.")
if not (
is_list_like(names, allow_sets=False) or isinstance(names, abc.KeysView)
):
raise ValueError("Names should be an ordered collection.")
def _read(filepath_or_buffer: FilePathOrBuffer, kwds):
"""Generic reader of line files."""
if kwds.get("date_parser", None) is not None:
if isinstance(kwds["parse_dates"], bool):
kwds["parse_dates"] = True
# Extract some of the arguments (pass chunksize on).
iterator = kwds.get("iterator", False)
chunksize = validate_integer("chunksize", kwds.get("chunksize", None), 1)
nrows = kwds.get("nrows", None)
# Check for duplicates in names.
_validate_names(kwds.get("names", None))
# Create the parser.
parser = TextFileReader(filepath_or_buffer, **kwds)
if chunksize or iterator:
return parser
with parser:
return parser.read(nrows)
_parser_defaults = {
"delimiter": None,
"escapechar": None,
"quotechar": '"',
"quoting": csv.QUOTE_MINIMAL,
"doublequote": True,
"skipinitialspace": False,
"lineterminator": None,
"header": "infer",
"index_col": None,
"names": None,
"prefix": None,
"skiprows": None,
"skipfooter": 0,
"nrows": None,
"na_values": None,
"keep_default_na": True,
"true_values": None,
"false_values": None,
"converters": None,
"dtype": None,
"cache_dates": True,
"thousands": None,
"comment": None,
"decimal": ".",
# 'engine': 'c',
"parse_dates": False,
"keep_date_col": False,
"dayfirst": False,
"date_parser": None,
"usecols": None,
# 'iterator': False,
"chunksize": None,
"verbose": False,
"encoding": None,
"squeeze": False,
"compression": None,
"mangle_dupe_cols": True,
"infer_datetime_format": False,
"skip_blank_lines": True,
}
_c_parser_defaults = {
"delim_whitespace": False,
"na_filter": True,
"low_memory": True,
"memory_map": False,
"error_bad_lines": True,
"warn_bad_lines": True,
"float_precision": None,
}
_fwf_defaults = {"colspecs": "infer", "infer_nrows": 100, "widths": None}
_c_unsupported = {"skipfooter"}
_python_unsupported = {"low_memory", "float_precision"}
_deprecated_defaults: Dict[str, Any] = {}
_deprecated_args: Set[str] = set()
@Appender(
_doc_read_csv_and_table.format(
func_name="read_csv",
summary="Read a comma-separated values (csv) file into DataFrame.",
_default_sep="','",
storage_options=generic._shared_docs["storage_options"],
)
)
def read_csv(
filepath_or_buffer: FilePathOrBuffer,
sep=lib.no_default,
delimiter=None,
# Column and Index Locations and Names
header="infer",
names=None,
index_col=None,
usecols=None,
squeeze=False,
prefix=None,
mangle_dupe_cols=True,
# General Parsing Configuration
dtype=None,
engine=None,
converters=None,
true_values=None,
false_values=None,
skipinitialspace=False,
skiprows=None,
skipfooter=0,
nrows=None,
# NA and Missing Data Handling
na_values=None,
keep_default_na=True,
na_filter=True,
verbose=False,
skip_blank_lines=True,
# Datetime Handling
parse_dates=False,
infer_datetime_format=False,
keep_date_col=False,
date_parser=None,
dayfirst=False,
cache_dates=True,
# Iteration
iterator=False,
chunksize=None,
# Quoting, Compression, and File Format
compression="infer",
thousands=None,
decimal: str = ".",
lineterminator=None,
quotechar='"',
quoting=csv.QUOTE_MINIMAL,
doublequote=True,
escapechar=None,
comment=None,
encoding=None,
dialect=None,
# Error Handling
error_bad_lines=True,
warn_bad_lines=True,
# Internal
delim_whitespace=False,
low_memory=_c_parser_defaults["low_memory"],
memory_map=False,
float_precision=None,
storage_options: StorageOptions = None,
):
kwds = locals()
del kwds["filepath_or_buffer"]
del kwds["sep"]
kwds_defaults = _refine_defaults_read(
dialect, delimiter, delim_whitespace, engine, sep, defaults={"delimiter": ","}
)
kwds.update(kwds_defaults)
return _read(filepath_or_buffer, kwds)
@Appender(
_doc_read_csv_and_table.format(
func_name="read_table",
summary="Read general delimited file into DataFrame.",
_default_sep=r"'\\t' (tab-stop)",
storage_options=generic._shared_docs["storage_options"],
)
)
def read_table(
filepath_or_buffer: FilePathOrBuffer,
sep=lib.no_default,
delimiter=None,
# Column and Index Locations and Names
header="infer",
names=None,
index_col=None,
usecols=None,
squeeze=False,
prefix=None,
mangle_dupe_cols=True,
# General Parsing Configuration
dtype=None,
engine=None,
converters=None,
true_values=None,
false_values=None,
skipinitialspace=False,
skiprows=None,
skipfooter=0,
nrows=None,
# NA and Missing Data Handling
na_values=None,
keep_default_na=True,
na_filter=True,
verbose=False,
skip_blank_lines=True,
# Datetime Handling
parse_dates=False,
infer_datetime_format=False,
keep_date_col=False,
date_parser=None,
dayfirst=False,
cache_dates=True,
# Iteration
iterator=False,
chunksize=None,
# Quoting, Compression, and File Format
compression="infer",
thousands=None,
decimal: str = ".",
lineterminator=None,
quotechar='"',
quoting=csv.QUOTE_MINIMAL,
doublequote=True,
escapechar=None,
comment=None,
encoding=None,
dialect=None,
# Error Handling
error_bad_lines=True,
warn_bad_lines=True,
# Internal
delim_whitespace=False,
low_memory=_c_parser_defaults["low_memory"],
memory_map=False,
float_precision=None,
):
kwds = locals()
del kwds["filepath_or_buffer"]
del kwds["sep"]
kwds_defaults = _refine_defaults_read(
dialect, delimiter, delim_whitespace, engine, sep, defaults={"delimiter": "\t"}
)
kwds.update(kwds_defaults)
return _read(filepath_or_buffer, kwds)
def read_fwf(
filepath_or_buffer: FilePathOrBuffer,
colspecs="infer",
widths=None,
infer_nrows=100,
**kwds,
):
r"""
Read a table of fixed-width formatted lines into DataFrame.
Also supports optionally iterating or breaking of the file
into chunks.
Additional help can be found in the `online docs for IO Tools
<https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html>`_.
Parameters
----------
filepath_or_buffer : str, path object or file-like object
Any valid string path is acceptable. The string could be a URL. Valid
URL schemes include http, ftp, s3, and file. For file URLs, a host is
expected. A local file could be:
``file://localhost/path/to/table.csv``.
If you want to pass in a path object, pandas accepts any
``os.PathLike``.
By file-like object, we refer to objects with a ``read()`` method,
such as a file handle (e.g. via builtin ``open`` function)
or ``StringIO``.
colspecs : list of tuple (int, int) or 'infer'. optional
A list of tuples giving the extents of the fixed-width
fields of each line as half-open intervals (i.e., [from, to[ ).
String value 'infer' can be used to instruct the parser to try
detecting the column specifications from the first 100 rows of
the data which are not being skipped via skiprows (default='infer').
widths : list of int, optional
A list of field widths which can be used instead of 'colspecs' if
the intervals are contiguous.
infer_nrows : int, default 100
The number of rows to consider when letting the parser determine the
`colspecs`.
.. versionadded:: 0.24.0
**kwds : optional
Optional keyword arguments can be passed to ``TextFileReader``.
Returns
-------
DataFrame or TextParser
A comma-separated values (csv) file is returned as two-dimensional
data structure with labeled axes.
See Also
--------
DataFrame.to_csv : Write DataFrame to a comma-separated values (csv) file.
read_csv : Read a comma-separated values (csv) file into DataFrame.
Examples
--------
>>> pd.read_fwf('data.csv') # doctest: +SKIP
"""
# Check input arguments.
if colspecs is None and widths is None:
raise ValueError("Must specify either colspecs or widths")
elif colspecs not in (None, "infer") and widths is not None:
raise ValueError("You must specify only one of 'widths' and 'colspecs'")
# Compute 'colspecs' from 'widths', if specified.
if widths is not None:
colspecs, col = [], 0
for w in widths:
colspecs.append((col, col + w))
col += w
kwds["colspecs"] = colspecs
kwds["infer_nrows"] = infer_nrows
kwds["engine"] = "python-fwf"
return _read(filepath_or_buffer, kwds)
class TextFileReader(abc.Iterator):
"""
Passed dialect overrides any of the related parser options
"""
def __init__(self, f, engine=None, **kwds):
self.f = f
if engine is not None:
engine_specified = True
else:
engine = "python"
engine_specified = False
self.engine = engine
self._engine_specified = kwds.get("engine_specified", engine_specified)
_validate_skipfooter(kwds)
dialect = _extract_dialect(kwds)
if dialect is not None:
kwds = _merge_with_dialect_properties(dialect, kwds)
if kwds.get("header", "infer") == "infer":
kwds["header"] = 0 if kwds.get("names") is None else None
self.orig_options = kwds
# miscellanea
self._currow = 0
options = self._get_options_with_defaults(engine)
options["storage_options"] = kwds.get("storage_options", None)
self.chunksize = options.pop("chunksize", None)
self.nrows = options.pop("nrows", None)
self.squeeze = options.pop("squeeze", False)
self._check_file_or_buffer(f, engine)
self.options, self.engine = self._clean_options(options, engine)
if "has_index_names" in kwds:
self.options["has_index_names"] = kwds["has_index_names"]
self._engine = self._make_engine(self.engine)
def close(self):
self._engine.close()
def _get_options_with_defaults(self, engine):
kwds = self.orig_options
options = {}
for argname, default in _parser_defaults.items():
value = kwds.get(argname, default)
# see gh-12935
if argname == "mangle_dupe_cols" and not value:
raise ValueError("Setting mangle_dupe_cols=False is not supported yet")
else:
options[argname] = value
for argname, default in _c_parser_defaults.items():
if argname in kwds:
value = kwds[argname]
if engine != "c" and value != default:
if "python" in engine and argname not in _python_unsupported:
pass
elif value == _deprecated_defaults.get(argname, default):
pass
else:
raise ValueError(
f"The {repr(argname)} option is not supported with the "
f"{repr(engine)} engine"
)
else:
value = _deprecated_defaults.get(argname, default)
options[argname] = value
if engine == "python-fwf":
# pandas\io\parsers.py:907: error: Incompatible types in assignment
# (expression has type "object", variable has type "Union[int, str,
# None]") [assignment]
for argname, default in _fwf_defaults.items(): # type: ignore[assignment]
options[argname] = kwds.get(argname, default)
return options
def _check_file_or_buffer(self, f, engine):
# see gh-16530
if is_file_like(f) and engine != "c" and not hasattr(f, "__next__"):
# The C engine doesn't need the file-like to have the "__next__"
# attribute. However, the Python engine explicitly calls
# "__next__(...)" when iterating through such an object, meaning it
# needs to have that attribute
raise ValueError(
"The 'python' engine cannot iterate through this file buffer."
)
def _clean_options(self, options, engine):
result = options.copy()
fallback_reason = None
# C engine not supported yet
if engine == "c":
if options["skipfooter"] > 0:
fallback_reason = "the 'c' engine does not support skipfooter"
engine = "python"
sep = options["delimiter"]
delim_whitespace = options["delim_whitespace"]
if sep is None and not delim_whitespace:
if engine == "c":
fallback_reason = (
"the 'c' engine does not support "
"sep=None with delim_whitespace=False"
)
engine = "python"
elif sep is not None and len(sep) > 1:
if engine == "c" and sep == r"\s+":
result["delim_whitespace"] = True
del result["delimiter"]
elif engine not in ("python", "python-fwf"):
# wait until regex engine integrated
fallback_reason = (
"the 'c' engine does not support "
"regex separators (separators > 1 char and "
r"different from '\s+' are interpreted as regex)"
)
engine = "python"
elif delim_whitespace:
if "python" in engine:
result["delimiter"] = r"\s+"
elif sep is not None:
encodeable = True
encoding = sys.getfilesystemencoding() or "utf-8"
try:
if len(sep.encode(encoding)) > 1:
encodeable = False
except UnicodeDecodeError:
encodeable = False
if not encodeable and engine not in ("python", "python-fwf"):
fallback_reason = (
f"the separator encoded in {encoding} "
"is > 1 char long, and the 'c' engine "
"does not support such separators"
)
engine = "python"
quotechar = options["quotechar"]
if quotechar is not None and isinstance(quotechar, (str, bytes)):
if (
len(quotechar) == 1
and ord(quotechar) > 127
and engine not in ("python", "python-fwf")
):
fallback_reason = (
"ord(quotechar) > 127, meaning the "
"quotechar is larger than one byte, "
"and the 'c' engine does not support such quotechars"
)
engine = "python"
if fallback_reason and self._engine_specified:
raise ValueError(fallback_reason)
if engine == "c":
for arg in _c_unsupported:
del result[arg]
if "python" in engine:
for arg in _python_unsupported:
if fallback_reason and result[arg] != _c_parser_defaults[arg]:
raise ValueError(
"Falling back to the 'python' engine because "
f"{fallback_reason}, but this causes {repr(arg)} to be "
"ignored as it is not supported by the 'python' engine."
)
del result[arg]
if fallback_reason:
warnings.warn(
(
"Falling back to the 'python' engine because "
f"{fallback_reason}; you can avoid this warning by specifying "
"engine='python'."
),
ParserWarning,
stacklevel=5,
)
index_col = options["index_col"]
names = options["names"]
converters = options["converters"]
na_values = options["na_values"]
skiprows = options["skiprows"]
validate_header_arg(options["header"])
for arg in _deprecated_args:
parser_default = _c_parser_defaults[arg]
depr_default = _deprecated_defaults[arg]
if result.get(arg, depr_default) != depr_default:
msg = (
f"The {arg} argument has been deprecated and will be "
"removed in a future version.\n\n"
)
warnings.warn(msg, FutureWarning, stacklevel=2)
else:
result[arg] = parser_default
if index_col is True:
raise ValueError("The value of index_col couldn't be 'True'")
if _is_index_col(index_col):
if not isinstance(index_col, (list, tuple, np.ndarray)):
index_col = [index_col]
result["index_col"] = index_col
names = list(names) if names is not None else names
# type conversion-related
if converters is not None:
if not isinstance(converters, dict):
raise TypeError(
"Type converters must be a dict or subclass, "
f"input was a {type(converters).__name__}"
)
else:
converters = {}
# Converting values to NA
keep_default_na = options["keep_default_na"]
na_values, na_fvalues = _clean_na_values(na_values, keep_default_na)
# handle skiprows; this is internally handled by the
# c-engine, so only need for python parsers
if engine != "c":
if is_integer(skiprows):
skiprows = list(range(skiprows))
if skiprows is None:
skiprows = set()
elif not callable(skiprows):
skiprows = set(skiprows)
# put stuff back
result["names"] = names
result["converters"] = converters
result["na_values"] = na_values
result["na_fvalues"] = na_fvalues
result["skiprows"] = skiprows
return result, engine
def __next__(self):
try:
return self.get_chunk()
except StopIteration:
self.close()
raise
def _make_engine(self, engine="c"):
mapping: Dict[str, Type[ParserBase]] = {
"c": CParserWrapper,
"python": PythonParser,
"python-fwf": FixedWidthFieldParser,
}
if engine not in mapping:
raise ValueError(
f"Unknown engine: {engine} (valid options are {mapping.keys()})"
)
# error: Too many arguments for "ParserBase"
return mapping[engine](self.f, **self.options) # type: ignore[call-arg]
def _failover_to_python(self):
raise AbstractMethodError(self)
def read(self, nrows=None):
nrows = validate_integer("nrows", nrows)
index, columns, col_dict = self._engine.read(nrows)
if index is None:
if col_dict:
# Any column is actually fine:
new_rows = len(next(iter(col_dict.values())))
index = RangeIndex(self._currow, self._currow + new_rows)
else:
new_rows = 0
else:
new_rows = len(index)
df = DataFrame(col_dict, columns=columns, index=index)
self._currow += new_rows
if self.squeeze and len(df.columns) == 1:
return df[df.columns[0]].copy()
return df
def get_chunk(self, size=None):
if size is None:
size = self.chunksize
if self.nrows is not None:
if self._currow >= self.nrows:
raise StopIteration
size = min(size, self.nrows - self._currow)
return self.read(nrows=size)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def _is_index_col(col):
return col is not None and col is not False
def _is_potential_multi_index(
columns, index_col: Optional[Union[bool, Sequence[int]]] = None
):
"""
Check whether or not the `columns` parameter
could be converted into a MultiIndex.
Parameters
----------
columns : array-like
Object which may or may not be convertible into a MultiIndex
index_col : None, bool or list, optional
Column or columns to use as the (possibly hierarchical) index
Returns
-------
boolean : Whether or not columns could become a MultiIndex
"""
if index_col is None or isinstance(index_col, bool):
index_col = []
return (
len(columns)
and not isinstance(columns, MultiIndex)
and all(isinstance(c, tuple) for c in columns if c not in list(index_col))
)
def _evaluate_usecols(usecols, names):
"""
Check whether or not the 'usecols' parameter
is a callable. If so, enumerates the 'names'
parameter and returns a set of indices for
each entry in 'names' that evaluates to True.
If not a callable, returns 'usecols'.
"""
if callable(usecols):
return {i for i, name in enumerate(names) if usecols(name)}
return usecols
def _validate_usecols_names(usecols, names):
"""
Validates that all usecols are present in a given
list of names. If not, raise a ValueError that
shows what usecols are missing.
Parameters
----------
usecols : iterable of usecols
The columns to validate are present in names.
names : iterable of names
The column names to check against.
Returns
-------
usecols : iterable of usecols
The `usecols` parameter if the validation succeeds.
Raises
------
ValueError : Columns were missing. Error message will list them.
"""
missing = [c for c in usecols if c not in names]
if len(missing) > 0:
raise ValueError(
f"Usecols do not match columns, columns expected but not found: {missing}"
)
return usecols
def _validate_skipfooter_arg(skipfooter):
"""
Validate the 'skipfooter' parameter.
Checks whether 'skipfooter' is a non-negative integer.
Raises a ValueError if that is not the case.
Parameters
----------
skipfooter : non-negative integer
The number of rows to skip at the end of the file.
Returns
-------
validated_skipfooter : non-negative integer
The original input if the validation succeeds.
Raises
------
ValueError : 'skipfooter' was not a non-negative integer.
"""
if not is_integer(skipfooter):
raise ValueError("skipfooter must be an integer")
if skipfooter < 0:
raise ValueError("skipfooter cannot be negative")
return skipfooter
def _validate_usecols_arg(usecols):
"""
Validate the 'usecols' parameter.
Checks whether or not the 'usecols' parameter contains all integers
(column selection by index), strings (column by name) or is a callable.
Raises a ValueError if that is not the case.
Parameters
----------
usecols : list-like, callable, or None
List of columns to use when parsing or a callable that can be used
to filter a list of table columns.
Returns
-------
usecols_tuple : tuple
A tuple of (verified_usecols, usecols_dtype).
'verified_usecols' is either a set if an array-like is passed in or
'usecols' if a callable or None is passed in.
'usecols_dtype` is the inferred dtype of 'usecols' if an array-like
is passed in or None if a callable or None is passed in.
"""
msg = (
"'usecols' must either be list-like of all strings, all unicode, "
"all integers or a callable."
)
if usecols is not None:
if callable(usecols):
return usecols, None
if not is_list_like(usecols):
# see gh-20529
#
# Ensure it is iterable container but not string.
raise ValueError(msg)
usecols_dtype = lib.infer_dtype(usecols, skipna=False)
if usecols_dtype not in ("empty", "integer", "string"):
raise ValueError(msg)
usecols = set(usecols)
return usecols, usecols_dtype
return usecols, None
def _validate_parse_dates_arg(parse_dates):
"""
Check whether or not the 'parse_dates' parameter
is a non-boolean scalar. Raises a ValueError if
that is the case.
"""
msg = (
"Only booleans, lists, and dictionaries are accepted "
"for the 'parse_dates' parameter"
)
if parse_dates is not None:
if is_scalar(parse_dates):
if not lib.is_bool(parse_dates):
raise TypeError(msg)
elif not isinstance(parse_dates, (list, dict)):
raise TypeError(msg)
return parse_dates
class ParserBase:
def __init__(self, kwds):
self.names = kwds.get("names")
self.orig_names: Optional[List] = None
self.prefix = kwds.pop("prefix", None)
self.index_col = kwds.get("index_col", None)
self.unnamed_cols: Set = set()
self.index_names: Optional[List] = None
self.col_names = None
self.parse_dates = _validate_parse_dates_arg(kwds.pop("parse_dates", False))
self.date_parser = kwds.pop("date_parser", None)
self.dayfirst = kwds.pop("dayfirst", False)
self.keep_date_col = kwds.pop("keep_date_col", False)
self.na_values = kwds.get("na_values")
self.na_fvalues = kwds.get("na_fvalues")
self.na_filter = kwds.get("na_filter", False)
self.keep_default_na = kwds.get("keep_default_na", True)
self.true_values = kwds.get("true_values")
self.false_values = kwds.get("false_values")
self.mangle_dupe_cols = kwds.get("mangle_dupe_cols", True)
self.infer_datetime_format = kwds.pop("infer_datetime_format", False)
self.cache_dates = kwds.pop("cache_dates", True)
self._date_conv = _make_date_converter(
date_parser=self.date_parser,
dayfirst=self.dayfirst,
infer_datetime_format=self.infer_datetime_format,
cache_dates=self.cache_dates,
)
# validate header options for mi
self.header = kwds.get("header")
if isinstance(self.header, (list, tuple, np.ndarray)):
if not all(map(is_integer, self.header)):
raise ValueError("header must be integer or list of integers")
if any(i < 0 for i in self.header):
raise ValueError(
"cannot specify multi-index header with negative integers"
)
if kwds.get("usecols"):
raise ValueError(
"cannot specify usecols when specifying a multi-index header"
)
if kwds.get("names"):
raise ValueError(
"cannot specify names when specifying a multi-index header"
)
# validate index_col that only contains integers
if self.index_col is not None:
is_sequence = isinstance(self.index_col, (list, tuple, np.ndarray))
if not (
is_sequence
and all(map(is_integer, self.index_col))
or is_integer(self.index_col)
):
raise ValueError(
"index_col must only contain row numbers "
"when specifying a multi-index header"
)
elif self.header is not None:
# GH 27394
if self.prefix is not None:
raise ValueError(
"Argument prefix must be None if argument header is not None"
)
# GH 16338
elif not | is_integer(self.header) | pandas.core.dtypes.common.is_integer |
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Series, concat
from pandas.core.base import DataError
from pandas.util import testing as tm
def test_rank_apply():
lev1 = tm.rands_array(10, 100)
lev2 = tm.rands_array(10, 130)
lab1 = np.random.randint(0, 100, size=500)
lab2 = np.random.randint(0, 130, size=500)
df = DataFrame(
{
"value": np.random.randn(500),
"key1": lev1.take(lab1),
"key2": lev2.take(lab2),
}
)
result = df.groupby(["key1", "key2"]).value.rank()
expected = [piece.value.rank() for key, piece in df.groupby(["key1", "key2"])]
expected = concat(expected, axis=0)
expected = expected.reindex(result.index)
tm.assert_series_equal(result, expected)
result = df.groupby(["key1", "key2"]).value.rank(pct=True)
expected = [
piece.value.rank(pct=True) for key, piece in df.groupby(["key1", "key2"])
]
expected = concat(expected, axis=0)
expected = expected.reindex(result.index)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("grps", [["qux"], ["qux", "quux"]])
@pytest.mark.parametrize(
"vals",
[
[2, 2, 8, 2, 6],
[
pd.Timestamp("2018-01-02"),
pd.Timestamp("2018-01-02"),
pd.Timestamp("2018-01-08"),
pd.Timestamp("2018-01-02"),
pd.Timestamp("2018-01-06"),
],
],
)
@pytest.mark.parametrize(
"ties_method,ascending,pct,exp",
[
("average", True, False, [2.0, 2.0, 5.0, 2.0, 4.0]),
("average", True, True, [0.4, 0.4, 1.0, 0.4, 0.8]),
("average", False, False, [4.0, 4.0, 1.0, 4.0, 2.0]),
("average", False, True, [0.8, 0.8, 0.2, 0.8, 0.4]),
("min", True, False, [1.0, 1.0, 5.0, 1.0, 4.0]),
("min", True, True, [0.2, 0.2, 1.0, 0.2, 0.8]),
("min", False, False, [3.0, 3.0, 1.0, 3.0, 2.0]),
("min", False, True, [0.6, 0.6, 0.2, 0.6, 0.4]),
("max", True, False, [3.0, 3.0, 5.0, 3.0, 4.0]),
("max", True, True, [0.6, 0.6, 1.0, 0.6, 0.8]),
("max", False, False, [5.0, 5.0, 1.0, 5.0, 2.0]),
("max", False, True, [1.0, 1.0, 0.2, 1.0, 0.4]),
("first", True, False, [1.0, 2.0, 5.0, 3.0, 4.0]),
("first", True, True, [0.2, 0.4, 1.0, 0.6, 0.8]),
("first", False, False, [3.0, 4.0, 1.0, 5.0, 2.0]),
("first", False, True, [0.6, 0.8, 0.2, 1.0, 0.4]),
("dense", True, False, [1.0, 1.0, 3.0, 1.0, 2.0]),
("dense", True, True, [1.0 / 3.0, 1.0 / 3.0, 3.0 / 3.0, 1.0 / 3.0, 2.0 / 3.0]),
("dense", False, False, [3.0, 3.0, 1.0, 3.0, 2.0]),
("dense", False, True, [3.0 / 3.0, 3.0 / 3.0, 1.0 / 3.0, 3.0 / 3.0, 2.0 / 3.0]),
],
)
def test_rank_args(grps, vals, ties_method, ascending, pct, exp):
key = np.repeat(grps, len(vals))
vals = vals * len(grps)
df = DataFrame({"key": key, "val": vals})
result = df.groupby("key").rank(method=ties_method, ascending=ascending, pct=pct)
exp_df = DataFrame(exp * len(grps), columns=["val"])
tm.assert_frame_equal(result, exp_df)
@pytest.mark.parametrize("grps", [["qux"], ["qux", "quux"]])
@pytest.mark.parametrize(
"vals", [[-np.inf, -np.inf, np.nan, 1.0, np.nan, np.inf, np.inf]]
)
@pytest.mark.parametrize(
"ties_method,ascending,na_option,exp",
[
("average", True, "keep", [1.5, 1.5, np.nan, 3, np.nan, 4.5, 4.5]),
("average", True, "top", [3.5, 3.5, 1.5, 5.0, 1.5, 6.5, 6.5]),
("average", True, "bottom", [1.5, 1.5, 6.5, 3.0, 6.5, 4.5, 4.5]),
("average", False, "keep", [4.5, 4.5, np.nan, 3, np.nan, 1.5, 1.5]),
("average", False, "top", [6.5, 6.5, 1.5, 5.0, 1.5, 3.5, 3.5]),
("average", False, "bottom", [4.5, 4.5, 6.5, 3.0, 6.5, 1.5, 1.5]),
("min", True, "keep", [1.0, 1.0, np.nan, 3.0, np.nan, 4.0, 4.0]),
("min", True, "top", [3.0, 3.0, 1.0, 5.0, 1.0, 6.0, 6.0]),
("min", True, "bottom", [1.0, 1.0, 6.0, 3.0, 6.0, 4.0, 4.0]),
("min", False, "keep", [4.0, 4.0, np.nan, 3.0, np.nan, 1.0, 1.0]),
("min", False, "top", [6.0, 6.0, 1.0, 5.0, 1.0, 3.0, 3.0]),
("min", False, "bottom", [4.0, 4.0, 6.0, 3.0, 6.0, 1.0, 1.0]),
("max", True, "keep", [2.0, 2.0, np.nan, 3.0, np.nan, 5.0, 5.0]),
("max", True, "top", [4.0, 4.0, 2.0, 5.0, 2.0, 7.0, 7.0]),
("max", True, "bottom", [2.0, 2.0, 7.0, 3.0, 7.0, 5.0, 5.0]),
("max", False, "keep", [5.0, 5.0, np.nan, 3.0, np.nan, 2.0, 2.0]),
("max", False, "top", [7.0, 7.0, 2.0, 5.0, 2.0, 4.0, 4.0]),
("max", False, "bottom", [5.0, 5.0, 7.0, 3.0, 7.0, 2.0, 2.0]),
("first", True, "keep", [1.0, 2.0, np.nan, 3.0, np.nan, 4.0, 5.0]),
("first", True, "top", [3.0, 4.0, 1.0, 5.0, 2.0, 6.0, 7.0]),
("first", True, "bottom", [1.0, 2.0, 6.0, 3.0, 7.0, 4.0, 5.0]),
("first", False, "keep", [4.0, 5.0, np.nan, 3.0, np.nan, 1.0, 2.0]),
("first", False, "top", [6.0, 7.0, 1.0, 5.0, 2.0, 3.0, 4.0]),
("first", False, "bottom", [4.0, 5.0, 6.0, 3.0, 7.0, 1.0, 2.0]),
("dense", True, "keep", [1.0, 1.0, np.nan, 2.0, np.nan, 3.0, 3.0]),
("dense", True, "top", [2.0, 2.0, 1.0, 3.0, 1.0, 4.0, 4.0]),
("dense", True, "bottom", [1.0, 1.0, 4.0, 2.0, 4.0, 3.0, 3.0]),
("dense", False, "keep", [3.0, 3.0, np.nan, 2.0, np.nan, 1.0, 1.0]),
("dense", False, "top", [4.0, 4.0, 1.0, 3.0, 1.0, 2.0, 2.0]),
("dense", False, "bottom", [3.0, 3.0, 4.0, 2.0, 4.0, 1.0, 1.0]),
],
)
def test_infs_n_nans(grps, vals, ties_method, ascending, na_option, exp):
# GH 20561
key = np.repeat(grps, len(vals))
vals = vals * len(grps)
df = DataFrame({"key": key, "val": vals})
result = df.groupby("key").rank(
method=ties_method, ascending=ascending, na_option=na_option
)
exp_df = DataFrame(exp * len(grps), columns=["val"])
tm.assert_frame_equal(result, exp_df)
@pytest.mark.parametrize("grps", [["qux"], ["qux", "quux"]])
@pytest.mark.parametrize(
"vals",
[
[2, 2, np.nan, 8, 2, 6, np.nan, np.nan],
[
pd.Timestamp("2018-01-02"),
pd.Timestamp("2018-01-02"),
np.nan,
pd.Timestamp("2018-01-08"),
pd.Timestamp("2018-01-02"),
pd.Timestamp("2018-01-06"),
np.nan,
np.nan,
],
],
)
@pytest.mark.parametrize(
"ties_method,ascending,na_option,pct,exp",
[
(
"average",
True,
"keep",
False,
[2.0, 2.0, np.nan, 5.0, 2.0, 4.0, np.nan, np.nan],
),
(
"average",
True,
"keep",
True,
[0.4, 0.4, np.nan, 1.0, 0.4, 0.8, np.nan, np.nan],
),
(
"average",
False,
"keep",
False,
[4.0, 4.0, np.nan, 1.0, 4.0, 2.0, np.nan, np.nan],
),
(
"average",
False,
"keep",
True,
[0.8, 0.8, np.nan, 0.2, 0.8, 0.4, np.nan, np.nan],
),
("min", True, "keep", False, [1.0, 1.0, np.nan, 5.0, 1.0, 4.0, np.nan, np.nan]),
("min", True, "keep", True, [0.2, 0.2, np.nan, 1.0, 0.2, 0.8, np.nan, np.nan]),
(
"min",
False,
"keep",
False,
[3.0, 3.0, np.nan, 1.0, 3.0, 2.0, np.nan, np.nan],
),
("min", False, "keep", True, [0.6, 0.6, np.nan, 0.2, 0.6, 0.4, np.nan, np.nan]),
("max", True, "keep", False, [3.0, 3.0, np.nan, 5.0, 3.0, 4.0, np.nan, np.nan]),
("max", True, "keep", True, [0.6, 0.6, np.nan, 1.0, 0.6, 0.8, np.nan, np.nan]),
(
"max",
False,
"keep",
False,
[5.0, 5.0, np.nan, 1.0, 5.0, 2.0, np.nan, np.nan],
),
("max", False, "keep", True, [1.0, 1.0, np.nan, 0.2, 1.0, 0.4, np.nan, np.nan]),
(
"first",
True,
"keep",
False,
[1.0, 2.0, np.nan, 5.0, 3.0, 4.0, np.nan, np.nan],
),
(
"first",
True,
"keep",
True,
[0.2, 0.4, np.nan, 1.0, 0.6, 0.8, np.nan, np.nan],
),
(
"first",
False,
"keep",
False,
[3.0, 4.0, np.nan, 1.0, 5.0, 2.0, np.nan, np.nan],
),
(
"first",
False,
"keep",
True,
[0.6, 0.8, np.nan, 0.2, 1.0, 0.4, np.nan, np.nan],
),
(
"dense",
True,
"keep",
False,
[1.0, 1.0, np.nan, 3.0, 1.0, 2.0, np.nan, np.nan],
),
(
"dense",
True,
"keep",
True,
[
1.0 / 3.0,
1.0 / 3.0,
np.nan,
3.0 / 3.0,
1.0 / 3.0,
2.0 / 3.0,
np.nan,
np.nan,
],
),
(
"dense",
False,
"keep",
False,
[3.0, 3.0, np.nan, 1.0, 3.0, 2.0, np.nan, np.nan],
),
(
"dense",
False,
"keep",
True,
[
3.0 / 3.0,
3.0 / 3.0,
np.nan,
1.0 / 3.0,
3.0 / 3.0,
2.0 / 3.0,
np.nan,
np.nan,
],
),
("average", True, "bottom", False, [2.0, 2.0, 7.0, 5.0, 2.0, 4.0, 7.0, 7.0]),
(
"average",
True,
"bottom",
True,
[0.25, 0.25, 0.875, 0.625, 0.25, 0.5, 0.875, 0.875],
),
("average", False, "bottom", False, [4.0, 4.0, 7.0, 1.0, 4.0, 2.0, 7.0, 7.0]),
(
"average",
False,
"bottom",
True,
[0.5, 0.5, 0.875, 0.125, 0.5, 0.25, 0.875, 0.875],
),
("min", True, "bottom", False, [1.0, 1.0, 6.0, 5.0, 1.0, 4.0, 6.0, 6.0]),
(
"min",
True,
"bottom",
True,
[0.125, 0.125, 0.75, 0.625, 0.125, 0.5, 0.75, 0.75],
),
("min", False, "bottom", False, [3.0, 3.0, 6.0, 1.0, 3.0, 2.0, 6.0, 6.0]),
(
"min",
False,
"bottom",
True,
[0.375, 0.375, 0.75, 0.125, 0.375, 0.25, 0.75, 0.75],
),
("max", True, "bottom", False, [3.0, 3.0, 8.0, 5.0, 3.0, 4.0, 8.0, 8.0]),
("max", True, "bottom", True, [0.375, 0.375, 1.0, 0.625, 0.375, 0.5, 1.0, 1.0]),
("max", False, "bottom", False, [5.0, 5.0, 8.0, 1.0, 5.0, 2.0, 8.0, 8.0]),
(
"max",
False,
"bottom",
True,
[0.625, 0.625, 1.0, 0.125, 0.625, 0.25, 1.0, 1.0],
),
("first", True, "bottom", False, [1.0, 2.0, 6.0, 5.0, 3.0, 4.0, 7.0, 8.0]),
(
"first",
True,
"bottom",
True,
[0.125, 0.25, 0.75, 0.625, 0.375, 0.5, 0.875, 1.0],
),
("first", False, "bottom", False, [3.0, 4.0, 6.0, 1.0, 5.0, 2.0, 7.0, 8.0]),
(
"first",
False,
"bottom",
True,
[0.375, 0.5, 0.75, 0.125, 0.625, 0.25, 0.875, 1.0],
),
("dense", True, "bottom", False, [1.0, 1.0, 4.0, 3.0, 1.0, 2.0, 4.0, 4.0]),
("dense", True, "bottom", True, [0.25, 0.25, 1.0, 0.75, 0.25, 0.5, 1.0, 1.0]),
("dense", False, "bottom", False, [3.0, 3.0, 4.0, 1.0, 3.0, 2.0, 4.0, 4.0]),
("dense", False, "bottom", True, [0.75, 0.75, 1.0, 0.25, 0.75, 0.5, 1.0, 1.0]),
],
)
def test_rank_args_missing(grps, vals, ties_method, ascending, na_option, pct, exp):
key = np.repeat(grps, len(vals))
vals = vals * len(grps)
df = DataFrame({"key": key, "val": vals})
result = df.groupby("key").rank(
method=ties_method, ascending=ascending, na_option=na_option, pct=pct
)
exp_df = DataFrame(exp * len(grps), columns=["val"])
tm.assert_frame_equal(result, exp_df)
@pytest.mark.parametrize(
"pct,exp", [(False, [3.0, 3.0, 3.0, 3.0, 3.0]), (True, [0.6, 0.6, 0.6, 0.6, 0.6])]
)
def test_rank_resets_each_group(pct, exp):
df = DataFrame(
{"key": ["a", "a", "a", "a", "a", "b", "b", "b", "b", "b"], "val": [1] * 10}
)
result = df.groupby("key").rank(pct=pct)
exp_df = DataFrame(exp * 2, columns=["val"])
tm.assert_frame_equal(result, exp_df)
def test_rank_avg_even_vals():
df = DataFrame({"key": ["a"] * 4, "val": [1] * 4})
result = df.groupby("key").rank()
exp_df = | DataFrame([2.5, 2.5, 2.5, 2.5], columns=["val"]) | pandas.DataFrame |
import datetime
import math
import sys
import warnings
import numpy as np
import pandas as pd
import pytest
from scipy.stats import randint as sp_randint
from sklearn.metrics import explained_variance_score
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.pipeline import Pipeline
from greykite.common.constants import ACTUAL_COL
from greykite.common.constants import ADJUSTMENT_DELTA_COL
from greykite.common.constants import END_DATE_COL
from greykite.common.constants import FRACTION_OUTSIDE_TOLERANCE
from greykite.common.constants import METRIC_COL
from greykite.common.constants import PREDICTED_COL
from greykite.common.constants import START_DATE_COL
from greykite.common.constants import TIME_COL
from greykite.common.constants import VALUE_COL
from greykite.common.constants import R2_null_model_score
from greykite.common.evaluation import EvaluationMetricEnum
from greykite.common.evaluation import add_finite_filter_to_scorer
from greykite.common.evaluation import add_preaggregation_to_scorer
from greykite.common.python_utils import assert_equal
from greykite.common.python_utils import unique_elements_in_list
from greykite.common.testing_utils import generate_df_for_tests
from greykite.common.testing_utils import generate_df_with_reg_for_tests
from greykite.framework.constants import CV_REPORT_METRICS_ALL
from greykite.framework.constants import FRACTION_OUTSIDE_TOLERANCE_NAME
from greykite.framework.input.univariate_time_series import UnivariateTimeSeries
from greykite.framework.pipeline.pipeline import forecast_pipeline
from greykite.framework.templates.prophet_template import ProphetTemplate
from greykite.framework.utils.framework_testing_utils import check_forecast_pipeline_result
from greykite.framework.utils.framework_testing_utils import mock_pipeline
from greykite.sklearn.estimator.null_model import DummyEstimator
from greykite.sklearn.estimator.prophet_estimator import ProphetEstimator
from greykite.sklearn.estimator.silverkite_estimator import SilverkiteEstimator
from greykite.sklearn.estimator.simple_silverkite_estimator import SimpleSilverkiteEstimator
from greykite.sklearn.transform.column_selector import ColumnSelector
from greykite.sklearn.transform.drop_degenerate_transformer import DropDegenerateTransformer
from greykite.sklearn.transform.dtype_column_selector import DtypeColumnSelector
from greykite.sklearn.transform.normalize_transformer import NormalizeTransformer
from greykite.sklearn.transform.null_transformer import NullTransformer
from greykite.sklearn.transform.pandas_feature_union import PandasFeatureUnion
from greykite.sklearn.transform.zscore_outlier_transformer import ZscoreOutlierTransformer
try:
import fbprophet # noqa
except ModuleNotFoundError:
pass
@pytest.fixture
def df():
"""8 months of daily data"""
data = generate_df_for_tests(freq="D", periods=30*8)
df = data["df"][[TIME_COL, VALUE_COL]]
return df
@pytest.fixture
def df_reg():
"""100 days of hourly data with regressors"""
data = generate_df_with_reg_for_tests(
freq="H",
periods=24*100,
remove_extra_cols=True,
mask_test_actuals=True)
reg_cols = ["regressor1", "regressor2", "regressor_bool", "regressor_categ"]
keep_cols = [TIME_COL, VALUE_COL] + reg_cols
df = data["df"][keep_cols]
return df
def get_dummy_pipeline(include_preprocessing=False, regressor_cols=None, lagged_regressor_cols=None):
"""Returns a ``pipeline`` argument to ``forecast_pipeline``
that uses ``DummyEstimator`` to make it easy to unit test
``forecast_pipeline``.
Parameters
----------
include_preprocessing : `bool`, default False
If True, includes preprocessing steps.
regressor_cols : `list` [`str`] or None, default None
Names of regressors in ``df`` passed to ``forecast_pipeline``.
Only used if ``include_preprocessing=True``.
lagged_regressor_cols : `list` [`str`] or None, default None
Names of lagged regressor columns in ``df`` passed to ``forecast_pipeline``.
Only used if ``include_preprocessing=True``.
Returns
-------
pipeline : `sklearn.pipeline.Pipeline`
sklearn Pipeline for univariate forecasting.
"""
if regressor_cols is None:
regressor_cols = []
if lagged_regressor_cols is None:
lagged_regressor_cols = []
all_reg_cols = unique_elements_in_list(regressor_cols + lagged_regressor_cols)
steps = []
if include_preprocessing:
steps += [
("input", PandasFeatureUnion([
("date", Pipeline([
("select_date", ColumnSelector([TIME_COL])) # leaves time column unmodified
])),
("response", Pipeline([ # applies outlier and null transformation to value column
("select_val", ColumnSelector([VALUE_COL])),
("outlier", ZscoreOutlierTransformer()),
("null", NullTransformer())
])),
("regressors_numeric", Pipeline([
("select_reg", ColumnSelector(all_reg_cols)),
("select_reg_numeric", DtypeColumnSelector(include="number")),
("outlier", ZscoreOutlierTransformer()),
("normalize", NormalizeTransformer()), # no normalization by default
("null", NullTransformer())
])),
("regressors_other", Pipeline([
("select_reg", ColumnSelector(all_reg_cols)),
("select_reg_non_numeric", DtypeColumnSelector(exclude="number"))
]))
])),
("degenerate", DropDegenerateTransformer()), # default `drop_degenerate=False`
]
steps += [
("estimator", DummyEstimator()) # predicts a constant
]
return Pipeline(steps)
def test_validate_pipeline_input():
"""Tests for validate_pipeline_input function"""
df = pd.DataFrame({
TIME_COL: | pd.date_range("2018-01-01", periods=1000, freq="D") | pandas.date_range |
import json
import os
import time
import pandas as pd
import sys
sys.path.append("..")
from utils.resource import Resource
from utils.mail import Mail
from utils.handler import xlyHandler
class Monitor(xlyHandler):
"""正在运行任务监控"""
def __init__(self):
self.required_labels = [
'广州-CPU集群', '保定-CPU集群', '保定-GPU-v100', '北京-GPU-V100',
'Paddle-windows', 'Paddle-mac-py3', 'nTeslaV100-16', 'nTeslaP4'
] #, 'Paddle-mac', 'Paddle-mac-py3', 'Paddle-windows', 'Paddle-windows-cpu', 'Paddle-approval-cpu', 'Paddle-benchmark-P40', 'Paddle-Kunlun', 'Paddle-musl']
self.Paddle_sa_cardType = [
'Paddle-mac-py3', 'Paddle-windows', 'Paddle-windows-cpu',
'Paddle-approval-cpu', 'Paddle-benchmark-P40', 'Paddle-Kunlun',
'Paddle-musl', 'Paddle-Sugon-DCU', 'Paddle-Ascend910-x86_64'
]
self.labels_full_count = {
'广州-CPU集群': 8,
'保定-CPU集群': 12,
'保定-GPU-v100': 15,
'北京-GPU-V100': 10,
'nTeslaP4': 5,
'Paddle-windows': 14,
'Paddle-mac-py3': 5,
'nTeslaV100-16': 3
}
def getRunningJob(self):
"""
this function will get all running list. Include containerJob and SaJob.
Returns:
"""
running_job_dict = {}
#cpu-gpu分离的任务
xly_container_running_task_list = self.getJobList('cpu_gpu_running')
for task in xly_container_running_task_list:
task['pid'] = str(task['pid'])
task['commit'] = task['commit'][0:6]
if task['label'] not in running_job_dict:
running_job_dict[task['label']] = []
running_job_dict[task['label']].append(task)
#V100/P4 旧集群
xly_container_running_task_list = self.getJobList('running')
for task in xly_container_running_task_list:
task['pid'] = str(task['pid'])
task['commit'] = task['commit'][0:6]
if task['label'] not in running_job_dict:
running_job_dict[task['label']] = []
running_job_dict[task['label']].append(task)
#SA机器
xly_sa_running_task_list = self.getJobList('sarunning')
for task in xly_sa_running_task_list:
task['pid'] = str(task['pid'])
task['commit'] = task['commit'][0:6]
if task['label'] not in running_job_dict:
running_job_dict[task['label']] = []
running_job_dict[task['label']].append(task)
return running_job_dict
def monitor(self):
running_job_dict = self.getRunningJob()
filename = '../buildLog/runningJobMonitor.csv'
if os.path.exists(filename) == False:
self.create_runningJob_monitor_csv(filename)
for label in running_job_dict:
if label in self.required_labels:
running_job_size = len(running_job_dict[label])
for task in running_job_dict[label]:
target_url = 'https://xly.bce.baidu.com/paddlepaddle/paddle/newipipe/detail/%s/job/%s' % (
task['bid'], task['jobId'])
data = {
'TIME':
time.strftime("%Y%m%d %H:%M:%S", time.localtime()),
'cardType': label,
'running_job_size': running_job_size,
'runningJob': [task['name']],
'target_url': target_url,
'runningTime': task['running']
}
write_data = pd.DataFrame(data)
write_data.to_csv(filename, mode='a', header=False)
filename = '../buildLog/resourcemonitor.csv'
if os.path.exists(filename) == False:
self.create_resource_monitor_csv(filename, 'runningJob')
with open("../buildLog/wait_task.json", 'r') as load_f:
all_waiting_task = json.load(load_f)
load_f.close()
waitting_job_list = {}
for task in all_waiting_task:
if task['label'] in self.required_labels:
if task['label'] not in waitting_job_list:
waitting_job_list[task['label']] = []
waitting_job_list[task['label']].append(task)
for key in self.required_labels:
if key not in all_waiting_task:
waitting_job_list[key] = []
idle_machineSize = {}
for label in running_job_dict:
if label in self.required_labels:
idle_machineSize[label] = self.labels_full_count[label] - len(
running_job_dict[label])
for label in waitting_job_list:
data = {
'TIME': time.strftime("%Y%m%d %H:%M:%S", time.localtime()),
'cardType': label,
'waittingJobSize': [len(waitting_job_list[label])],
'IdleMachineSize': idle_machineSize[label]
}
write_data = | pd.DataFrame(data) | pandas.DataFrame |
# PriceVelocity was developed by <NAME> and <NAME>
import os
import datetime
import numpy
import sqlalchemy as sa
import pandas as pd
import traceback
# create a parent class for all types of fuel
class Fuel(object):
dburl = os.environ.get('SOME_ENV_VAR')
engine = sa.create_engine(dburl)
restricted = False
grade = 'regular'
age = {'regular': 'reg_price_age', 'midgrade': 'mid_price_age', 'premium': 'pre_price_age',
'diesel': 'des_price_age'}
def __init__(self, id, miles, numdays, retail=None, refresh_retail_data=True):
self.id = id
self.miles = miles
self.numdays = numdays
self.DIST = self.get_distance_matrix
if refresh_retail_data:
self.retail = self.get_retail_data()
if retail != 'Not Fuel':
self.retail = self.clean_retail(numdays=numdays)
self.prices = self.get_prices(id, miles)
# self.price_velocity = self.price_velocity()
# Queries a distance matrix data of stations. Should contain a origin location, destination location,
# and the distance between them.
@property
def get_distance_matrix(self):
query = 'select origin_id, destination_id, distance from distance_matrix ' \
'where origin_id = ' + str(self.id) + \
' and distance < ' + str(self.miles) + ';'
return pd.read_sql(query, self.engine)
# Queries from a table called retail that has a collection of observed prices for each station
# Always want args to be either empty or to have at least 'location_id' and 'last_update'
def get_retail_data(self, *args):
# the code below will obviously only work if you have a database full of gas station price data
# for the purposes of testing this out, just consume the csv data included in the repo
# you might use something like:
# return pd.read_csv('/path/to/sample_data.csv'
try:
variables = '*'
if args: variables = self.list_to_string(args)
stations = self.DIST.destination_id.tolist()
if len(stations) > 1:
stations = self.list_to_string(self.DIST.destination_id.tolist()) + ', ' + str(self.id)
query = 'select ' + variables + ' from retail where location_id in (' + stations + ');'
elif len(stations) == 0:
query = 'select ' + variables + ' from retail where location_id = {0}'.format(self.id)
return pd.read_sql(query, self.engine)
except Exception as e:
# if there are any issues with gcloud then return an error
return 'Error: Damn! Had trouble retrieving data from your query. Please check query\n ' + traceback.print_exc()
pass
def clean_retail(self, numdays=5, *args):
r = self.retail.rename(index=str, columns={"location_id": "station_id", "last_update": "date"})
r = r[(r['station_id'].notnull()) & (r[args[1]] <= 24.1) & (r[args[0]] != 0)] if args else r[
r['station_id'].notnull()]
r['station_id'] = [int(id) for id in r['station_id']]
r["date"] = pd.to_datetime(r.date)
r.date = [d.date() for d in r.date]
return r.drop_duplicates()
def get_prices(self, id, miles, *args):
p = pd.merge(self.retail, self.DIST[(self.DIST['origin_id'] == id) & (self.DIST.distance < miles)],
left_on='station_id',
right_on='destination_id')
if args: p = p[list(args)]
p = pd.concat([Fuel.get(id, df=self.retail), p]).drop(self.age[self.grade], axis=1).fillna(0)
p['station_id'] = p['station_id'].astype('category')
# self.data = self.data.drop_duplicates(subset='price_date', keep='last')
return p.drop_duplicates(subset=('date', 'station_id'), keep='last')
# a little method to provide some statistical data for a specific station
def compare(self, to_sheet=False, print_output=False):
df = self.prices.describe()
d = datetime.datetime.today()
d_str = str(d.date().year) + str(d.date().month) + str(d.date().day) + '_' + str(d.hour) + str(d.minute)
analysis_id = d_str + '_' + str(self.id)
cluster = df[self.grade]
station = Fuel.get(self.id, df=self.prices)[self.grade]
headers = ['date', 'station', 'fuel_type', 'station_mean', 'cluster_mean', 'station_min', 'cluster_min',
'station_max', 'cluster_max', 'analysis_id']
data = [datetime.datetime.today().date(), self.id, self.grade, station.mean(), cluster['mean'],
station.min(), cluster['min'], station.max(), cluster['max'], analysis_id]
df = pd.DataFrame(data=[data], columns=headers)
if print_output == True:
print(
'Statistical summary for ' + str(len(numpy.unique(self.prices.station_id))) + ' stations that are ' + str(
self.miles) + ' mile radius of station '
+ str(self.id) + ' for the span of ' + str(self.numdays) + ' days.')
print('You are above the mean by ' + str(station.mean() - cluster['mean']) + '\n') if station.mean() > \
cluster[
'mean'] else 'You are below the mean by ' + str(
station.mean() - cluster['mean'])
return df
def compare_by_date(self):
datelist = Fuel.get(self.id, df=self.prices).date.unique().tolist()
for date in datelist:
p = Fuel.get_by_date(str(date), df=self.prices)
self.compare(p)
@staticmethod
def list_to_string(arr):
return str(arr).strip('[').strip(']')
@staticmethod
def format_date(d):
index = d.find("/", 3)
return d[:index + 1] + d[index + 1:]
@staticmethod
def get_datetime(date, format="%Y-%m-%d %H:%M:%S", reformat=False):
if reformat: date = Fuel.format_date(date)
return datetime.datetime.strptime(date, format).date()
@staticmethod
def get(value, df, variable='station_id'):
return df[df[variable] == value]
@staticmethod
def get_by_date(date, df, format='%Y-%m-%d', variable='date'):
return df[df[variable] == Fuel.get_datetime(date, format)]
@staticmethod
def ucount(arr):
return len(numpy.unique(arr))
# child classes for each grade of fuel
class Regular(Fuel):
restricted = True
grade = 'regular'
def __init__(self, id, miles, numdays, retail='Not Fuel'):
super(Regular, self).__init__(id, miles, numdays, retail)
self.retail = self.clean_retail(numdays, self.grade, self.age[self.grade])
self.prices = self.get_prices(id, miles, 'station_id', self.grade, 'date', 'distance')
class Midgrade(Fuel):
restricted = True
grade = 'midgrade'
def __init__(self, id, miles, numdays, retail='Not Fuel'):
super(Midgrade, self).__init__(id, miles, numdays, retail)
self.retail = self.clean_retail(numdays, self.grade, self.age[self.grade])
self.prices = self.get_prices(id, miles, 'station_id', self.grade, 'date', 'distance')
class Premium(Fuel):
restricted = True
grade = 'premium'
def __init__(self, id, miles, numdays, retail='Not Fuel'):
super(Premium, self).__init__(id, miles, numdays, retail)
self.retail = self.clean_retail(numdays, self.grade, self.age[self.grade])
self.prices = self.get_prices(id, miles, 'station_id', self.grade, 'date', 'distance')
class Diesel(Fuel):
restricted = True
grade = 'diesel'
def __init__(self, id, miles, numdays, retail='Not Fuel'):
super(Diesel, self).__init__(id, miles, numdays, retail)
self.retail = self.clean_retail(numdays, self.grade, self.age[self.grade])
self.prices = self.get_prices(id, miles, 'station_id', self.grade, 'date', 'distance')
# *************************** END OF CLASS DEFINITION *******************************************
# SAMPLE USAGE
# f = Fuel(1022,5,15)
# r = Regular(1022,5,15)
# m = Midgrade(1022,5,15)
# p = Premium(1022,5,15)
# d = Diesel(1022,5,15)
# r.compare() this will compare target_id prices with the rest and upload to google sheet
# r.price_velocity() this will rank the market drivers
# ************************** Above is sample code to run to test out the classes *******************
# price_velocity accepts the following parameters:
# prices - a pandas dataframe of all of the prices for the stations in the cluster
# station - the target station (i.e. the one that is used to identify the cluster
# period - the number of days used for price change comparison
# iter - the number of iterations to run for comparison purposes (i.e. how many times to run a comparison over
# a period shifted back by a day
# grade - which fuel grade to use for comparison purposes
# last_day - function defaults to the maximum date contained within the dataframe.
def price_velocity(fuel_obj, station, grade='regular', period=30, iter=1, last_day=None):
prices = fuel_obj.prices
grade = fuel_obj.grade
df_pv = pd.DataFrame(columns=['rank', 'station_id', 'day_lag', 'price_change', 'start_date', 'end_date'])
ranker = []
print ('stations: {0}'.format(numpy.unique(prices.station_id).tolist()))
for it in range(0, iter):
l = {}
new_dict = {}
price_differences = {}
data = []
for id in numpy.unique(prices.station_id).tolist():
# df = Fuel.get(id, df=self.prices.sort_values(['date'], axis=0))
df = prices[prices.station_id == id].drop_duplicates()
if last_day == None:
end_date = df.date.max() - datetime.timedelta(days=it)
else:
d = datetime.datetime.strptime(last_day, '%Y-%m-%d')
end_date = d.date() - datetime.timedelta(days=it)
start_date = end_date - datetime.timedelta(days=period)
df = df[(df.date >= start_date) & (df.date <= end_date)]
initial_price = df[grade].iloc[0]
for d in df.get('date'):
p = Fuel.get(d, df, 'date')[grade].get_values()[0]
if initial_price != p:
l[id] = d # dict
price_differences[id] = p - initial_price
break
best_date = min(l.itervalues())
for k, v in l.iteritems():
new_dict.setdefault(v, []).append(k)
for i, w in enumerate(sorted(new_dict)):
for element in new_dict[w]:
days = ((w - best_date).total_seconds()) / 86400
data.append([i + 1, element, days, float(price_differences[element])])
# print i + 1, w, new_dict[w], w - best_date
df = | pd.DataFrame(data, columns=['rank', 'station_id', 'day_lag', 'price_change']) | pandas.DataFrame |
import os
os.environ['PROJ_LIB'] = '/home/jlee/.conda/envs/mmc_sgp/share/proj'
import glob
import xarray as xr
import wrf
from netCDF4 import Dataset
import numpy as np
import pandas as pd
file_dir = '/projects/wfip2les/cdraxl/2020100300/'
# file_dir = '/home/jlee/wfip/test_case/'
out_dir = '/home/jlee/wfip/'
file_list = glob.glob(file_dir+'custom_wrfout_d03*')
file_list.sort()
base = Dataset((file_dir+'wrfout_d03_2020-10-03_12_00_00'), 'r')
fino_ij = wrf.ll_to_xy(base, 55.006928, 13.154189)
baltic_ij = wrf.ll_to_xy(base, 54.9733, 13.1778)
hgt = wrf.g_geoht.get_height(base, msl=False)
def get_target_ws(hgt_list, target_hgt, ws):
near_z = min(enumerate(hgt_list), key=lambda x: abs(x[1]-target_hgt))
alpha = np.log(ws[near_z[0]+1]/ws[near_z[0]])/np.log(hgt_list[near_z[0]+1]/near_z[1])
target_ws = ws[near_z[0]]*(target_hgt/near_z[1])**alpha
return np.round(target_ws, 4)
fino_df = | pd.DataFrame(columns=['time', 'wind-speed_62m', 'wind-speed_72m', 'wind-speed_82m', 'wind-speed_92m']) | pandas.DataFrame |
from inspect import isclass
import dask.dataframe as dd
import numpy as np
import pandas as pd
import pytest
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Boolean, Datetime
import featuretools as ft
from featuretools.computational_backends.feature_set import FeatureSet
from featuretools.computational_backends.feature_set_calculator import (
FeatureSetCalculator
)
from featuretools.primitives import (
Absolute,
AddNumeric,
AddNumericScalar,
Age,
Count,
Day,
Diff,
DivideByFeature,
DivideNumeric,
DivideNumericScalar,
Equal,
EqualScalar,
GreaterThanEqualToScalar,
GreaterThanScalar,
Haversine,
Hour,
IsIn,
IsNull,
Latitude,
LessThanEqualToScalar,
LessThanScalar,
Longitude,
Minute,
Mode,
Month,
MultiplyNumeric,
MultiplyNumericScalar,
Not,
NotEqual,
NotEqualScalar,
NumCharacters,
NumWords,
Percentile,
ScalarSubtractNumericFeature,
Second,
SubtractNumeric,
SubtractNumericScalar,
Sum,
TimeSince,
TransformPrimitive,
Year,
get_transform_primitives
)
from featuretools.primitives.base import make_trans_primitive
from featuretools.primitives.utils import (
PrimitivesDeserializer,
serialize_primitive
)
from featuretools.synthesis.deep_feature_synthesis import match
from featuretools.tests.testing_utils import feature_with_name, to_pandas
from featuretools.utils.gen_utils import Library
from featuretools.utils.koalas_utils import pd_to_ks_clean
def test_init_and_name(es):
log = es['log']
rating = ft.Feature(ft.IdentityFeature(es["products"].ww["rating"]), "log")
log_features = [ft.Feature(es['log'].ww[col]) for col in log.columns] +\
[ft.Feature(rating, primitive=GreaterThanScalar(2.5)),
ft.Feature(rating, primitive=GreaterThanScalar(3.5))]
# Add Timedelta feature
# features.append(pd.Timestamp.now() - ft.Feature(log['datetime']))
customers_features = [ft.Feature(es["customers"].ww[col]) for col in es["customers"].columns]
# check all transform primitives have a name
for attribute_string in dir(ft.primitives):
attr = getattr(ft.primitives, attribute_string)
if isclass(attr):
if issubclass(attr, TransformPrimitive) and attr != TransformPrimitive:
assert getattr(attr, "name") is not None
trans_primitives = get_transform_primitives().values()
# If Dask EntitySet use only Dask compatible primitives
if es.dataframe_type == Library.DASK.value:
trans_primitives = [prim for prim in trans_primitives if Library.DASK in prim.compatibility]
if es.dataframe_type == Library.KOALAS.value:
trans_primitives = [prim for prim in trans_primitives if Library.KOALAS in prim.compatibility]
for transform_prim in trans_primitives:
# skip automated testing if a few special cases
features_to_use = log_features
if transform_prim in [NotEqual, Equal]:
continue
if transform_prim in [Age]:
features_to_use = customers_features
# use the input_types matching function from DFS
input_types = transform_prim.input_types
if type(input_types[0]) == list:
matching_inputs = match(input_types[0], features_to_use)
else:
matching_inputs = match(input_types, features_to_use)
if len(matching_inputs) == 0:
raise Exception(
"Transform Primitive %s not tested" % transform_prim.name)
for prim in matching_inputs:
instance = ft.Feature(prim, primitive=transform_prim)
# try to get name and calculate
instance.get_name()
ft.calculate_feature_matrix([instance], entityset=es)
def test_relationship_path(es):
f = ft.TransformFeature(ft.Feature(es['log'].ww['datetime']), Hour)
assert len(f.relationship_path) == 0
def test_serialization(es):
value = ft.IdentityFeature(es['log'].ww['value'])
primitive = ft.primitives.MultiplyNumericScalar(value=2)
value_x2 = ft.TransformFeature(value, primitive)
dictionary = {
'name': None,
'base_features': [value.unique_name()],
'primitive': serialize_primitive(primitive),
}
assert dictionary == value_x2.get_arguments()
assert value_x2 == \
ft.TransformFeature.from_dictionary(dictionary, es,
{value.unique_name(): value},
PrimitivesDeserializer())
def test_make_trans_feat(es):
f = ft.Feature(es['log'].ww['datetime'], primitive=Hour)
feature_set = FeatureSet([f])
calculator = FeatureSetCalculator(es, feature_set=feature_set)
df = to_pandas(calculator.run(np.array([0])))
v = df[f.get_name()][0]
assert v == 10
@pytest.fixture
def pd_simple_es():
df = pd.DataFrame({
'id': range(4),
'value': pd.Categorical(['a', 'c', 'b', 'd']),
'value2': pd.Categorical(['a', 'b', 'a', 'd']),
'object': ['time1', 'time2', 'time3', 'time4'],
'datetime': pd.Series([pd.Timestamp('2001-01-01'),
pd.Timestamp('2001-01-02'),
pd.Timestamp('2001-01-03'),
| pd.Timestamp('2001-01-04') | pandas.Timestamp |
import cv2 as cv
import numpy as np
import pandas as pd
import os
def Microplate(am, title):
def click(event, x, y, flag, param):
ix = x
iy = y
if event == cv.EVENT_LBUTTONDOWN:
for i in circles[0, :]:
menorX = (i[0] - i[2])
maiorX = i[0] + i[2]
menorY = (i[1] - i[2])
maiorY = i[1] + i[2]
if i[0] < i[2]:
menorX = 0
if i[1] < i[2]:
menorY = 0
if ix >= menorX and ix <= maiorX and iy >= menorY and iy <= maiorY:
center = (i[0], i[1])
radius = i[2]
raios.append(radius)
if len(S[h]) < am:
if S[h].count(center) < 1:
cv.circle(image, center, radius, (0, 0, 255), 3)
S[h].append(center)
font = cv.FONT_HERSHEY_SIMPLEX
cv.putText(image, f'ID{id[h]}', (ix, iy), font, .5, (0, 0, 0), 2)
cv.imshow('image', image)
for g in range(1, am+1):
if g != h:
if center in S[g]:
cv.circle(image, center, radius, (255, 0, 0), 3) # círculo azul: contagem 2
cv.imshow('image', image)
S = {}
testes = []
padrao = {}
pxamostra_b = {}
pxamostra_g = {}
pxamostra_r = {}
raios = []
pxpadrao_b = {}
pxpadrao_g = {}
pxpadrao_r = {}
id = {}
ID1 = []
ID2 = []
IDpx = []
cutoff = 5
df = | pd.DataFrame(columns=['ID1', 'ID2', 'pixel']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from pandas.compat import range
import pandas.util.testing as tm
from pandas import read_csv
import os
import nose
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
import pandas.tools.rplot as rplot
def curpath():
pth, _ = os.path.split(os.path.abspath(__file__))
return pth
def between(a, b, x):
"""Check if x is in the somewhere between a and b.
Parameters:
-----------
a: float, interval start
b: float, interval end
x: float, value to test for
Returns:
--------
True if x is between a and b, False otherwise
"""
if a < b:
return x >= a and x <= b
else:
return x <= a and x >= b
@tm.mplskip
class TestUtilityFunctions(tm.TestCase):
"""
Tests for RPlot utility functions.
"""
def setUp(self):
path = os.path.join(curpath(), 'data/iris.csv')
self.data = | read_csv(path, sep=',') | pandas.read_csv |
"""
RF Prediction
"""
# import
import pickle
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
# from sklearn.model_selection import train_test_split
# RF training
def RF_training(num_tree, X, y):
"""
train the RF model with the given dataset.
return a trained model.
- train_data: with 'label' indicate the classes.
"""
RF_model = RandomForestClassifier(
n_estimators=num_tree, oob_score=True
)
print("Traing...")
RF_model = RF_model.fit(X, y)
return RF_model
# temporal test
def temporal_test(horizon, train=True, num_tree=1000):
"""
temporal testing
"""
model_name = "RF_{}".format(num_tree)
# =========================== Train ? =============================
if train:
# load patient ID
separate_id = pickle.load(open('data/ids/1h_ID.pickle', 'rb'))
sepsis_id, nonsep_id = separate_id['sepsis'], separate_id['nonsep']
# load data
train_data = pd.read_csv(
"data/feature_data/train_data.csv",
index_col=False
)
# drop lab variables
train_data = train_data.drop([
'paO2_FiO2', 'platelets_x_1000', 'total_bilirubin',
'urinary_creatinine', 'creatinine', 'HCO3', 'pH', 'paCO2',
'direct_bilirubin', 'excess', 'ast', 'bun', 'calcium',
'glucose', 'lactate', 'magnesium', 'phosphate', 'potassium',
'hct', 'hgb', 'ptt', 'wbc', 'fibrinogen', 'troponin',
'GCS_Score', 'ventilator'
], axis=1)
# split data, ensure balanced train data
sepsis_data = train_data.loc[
train_data['patientunitstayid'].isin(sepsis_id)
]
sepsis_data = sepsis_data.drop(['patientunitstayid', 'label'], axis=1)
nonsep_data = train_data.loc[
train_data['patientunitstayid'].isin(nonsep_id)
]
nonsep_data = nonsep_data.drop(['patientunitstayid', 'label'], axis=1)
# combined
X_tr = sepsis_data.append(nonsep_data)
y_tr = [1] * sepsis_data.shape[0] + [0] * nonsep_data.shape[0]
# ========================= Train Model ============================
# train model
model = RF_training(num_tree, X_tr, y_tr)
# save model
pickle.dump(model, open(
"models/RF_{}.pickle".format(num_tree), "wb"
))
else:
# load model
model = pickle.load(open(
"models/{}.pickle".format(model_name), "rb"
))
# =========================== Test =============================
print("Tesing...")
# load patient id
separate_id = pickle.load(open('data/ids/12h_ID.pickle', 'rb'))
sepsis_id, nonsep_id = separate_id['sepsis'], separate_id['nonsep']
all_id = sepsis_id + nonsep_id
# prediction results
col_names = ['patientunitstayid', 'label']\
+ list(range(-60 * horizon, -55, 5))
cohort_pr = | pd.DataFrame(columns=col_names) | pandas.DataFrame |
import matplotlib.pyplot as plt
import numpy
import pandas as pd
import math
import numpy.fft as fourier
import scipy.interpolate as inter
# READ DATA FROM SIMULATION
iT = 0
nT = 3
nend = 30000 #Interrompi risultati qui, perchè dopo non ha più senso
nend = 180000
df1 = pd.read_csv('Bl1outin.txt', header=None)
bl1mom = df1.values[iT:nend:nT,:]
df2 = pd.read_csv('Bl2outin.txt', header=None)
bl2mom = df2.values[iT:nend:nT,:]
df3 = pd.read_csv('Bl3outin.txt', header=None)
bl3mom = df3.values[iT:nend:nT,:]
df4 = pd.read_csv('Azimuth.txt', header=None)
turbinfo = df4.values[iT:nend:nT,:]
df5 = pd.read_csv('/home/antonio/SOWFA/exampleCases/UniWind_3Turb_SC_OBS+YAWERROR_DEMOD/5MW_Baseline/Wind/WindSim.uniform', sep='\t', header=None)
windinfo = df5.values
df6 = pd.read_csv('ECROSS.txt', header=None)
data6 = df6.values[iT:nend:nT,:]
df7 = pd.read_csv('EMOM.txt', header=None)
data7 = df7.values[iT:nend:nT,:]
#GIVEN PARAMETERS
R = 63 #TURBINE RADIUS
print(windinfo)
V0 = windinfo[0,1] # it's a constant vector, so take only 1 value
yawerr = -windinfo[:,2]*numpy.pi/180
vert_shear = windinfo[:,5]
u0_p = V0*numpy.sin(yawerr) #CROSS_WIND
k1_p = vert_shear #VERTICAL WIND SHEAR POWER EXPONENT
dtFAST = 0.005
time = turbinfo[:,3]
timewind = windinfo[:,0]
u0_int = inter.interp1d(timewind, u0_p)
k1_int = inter.interp1d(timewind, k1_p)
wr = turbinfo[:,1]
azimuth1 = turbinfo[:,0]
azimuth2 = turbinfo[:,0] + 2*numpy.pi/3
azimuth3 = turbinfo[:,0] + 4*numpy.pi/3
u0bar = numpy.multiply(u0_int(time), 1/(wr*R))
V0bar = V0/(wr*R)
k1bar = numpy.multiply(k1_int(time), V0bar)
Tper = (2*numpy.pi) / wr
tau = Tper/1.5 #DA ABBASSARE IN SEGUITO per filtrare meglio la risposta a 3P-->1,5P #CAZZO ATTENTO 3 o 1/3
print(V0)
m_out_notfil = numpy.zeros([len(bl1mom[:,0])*3])
m_in_notfil = numpy.zeros([len(bl1mom[:,0])*3])
for i in range(len(bl1mom[:,0])): # REARRANGING THE MOMENT BLADE VECTOR FOR CALCULATIONS
m_out_notfil[3*i:3*i+3] = numpy.array([bl1mom[i,0], bl2mom[i,0], bl3mom[i,0]])
m_in_notfil[3*i:3*i+3] = numpy.array([bl1mom[i,1], bl2mom[i,1], bl3mom[i,1]])
def ColTransf(ang1, ang2, ang3): #COLEMAN MBC TRANSFORMATION
out = numpy.array([[1, 1, 1], [2*math.cos(ang1), 2*math.cos(ang2), 2*math.cos(ang3)], [2*math.sin(ang1), 2*math.sin(ang2), 2*math.sin(ang3)]])/3
return out
m_out_tr = numpy.zeros([len(bl1mom[:,0])*3])
m_in_tr = numpy.zeros([len(bl1mom[:,0])*3])
for i in range(len(bl1mom[:,0])): #APPLYING MBC TRANSF. TO MOMENT VECTOR
ColT = ColTransf(azimuth1[i], azimuth2[i], azimuth3[i])
m_out_tr[3*i:3*i+3] = numpy.dot(ColT, m_out_notfil[3*i:3*i+3].transpose())
m_in_tr[3*i:3*i+3] = numpy.dot(ColT, m_in_notfil[3*i:3*i+3].transpose())
#NOW I GO IN FREQUENCY DOMAIN
m_out_tr_time1 = m_out_tr[0::3]
m_out_tr_time2 = m_out_tr[1::3]
m_out_tr_time3 = m_out_tr[2::3]
m_in_tr_time1 = m_in_tr[0::3]
m_in_tr_time2 = m_in_tr[1::3]
m_in_tr_time3 = m_in_tr[2::3]
print(m_out_tr_time1)
plt.plot(time, bl1mom[:,0])
plt.title("M_OUT_TR_1")
plt.show()
print(m_out_tr_time1)
plt.plot(time, bl2mom[:,0])
plt.title("M_OUT_TR_1")
plt.show()
print(m_out_tr_time1)
plt.plot(time, bl3mom[:,0])
plt.title("M_OUT_TR_1")
plt.show()
print(m_out_tr_time1)
plt.plot(time, bl1mom[:,1])
plt.title("M_OUT_TR_1")
plt.show()
print(m_out_tr_time1)
plt.plot(time, bl2mom[:,1])
plt.title("M_OUT_TR_1")
plt.show()
print(m_out_tr_time1)
plt.plot(time, bl3mom[:,1])
plt.title("M_OUT_TR_1")
plt.show()
plt.plot(time, wr)
plt.title("WR")
plt.show()
plt.plot(time, m_out_tr_time1)
plt.title("M_OUT_1")
plt.show()
freq = fourier.fftfreq(len(m_out_tr_time1), d=dtFAST)
m_out_tr_freq1 = fourier.fft(m_out_tr_time1)
m_out_tr_freq2 = fourier.fft(m_out_tr_time2)
m_out_tr_freq3 = fourier.fft(m_out_tr_time3)
m_in_tr_freq1 = fourier.fft(m_in_tr_time1)
m_in_tr_freq2 = fourier.fft(m_in_tr_time2)
m_in_tr_freq3 = fourier.fft(m_in_tr_time3)
def FILTER_LP(input, freq, tau):
s = 2*numpy.pi*freq*1j
output = (1/(tau*s + 1))*input
return output
m_out_freq1 = numpy.zeros([len(m_out_tr_freq1)], dtype=complex)
m_out_freq2 = numpy.zeros([len(m_out_tr_freq2)], dtype=complex)
m_out_freq3 = numpy.zeros([len(m_out_tr_freq3)], dtype=complex)
m_in_freq1 = numpy.zeros([len(m_in_tr_freq1)], dtype=complex)
m_in_freq2 = numpy.zeros([len(m_in_tr_freq2)], dtype=complex)
m_in_freq3 = numpy.zeros([len(m_in_tr_freq3)], dtype=complex)
for i in range(len(m_out_tr_freq1)):
m_out_freq1[i] = FILTER_LP(m_out_tr_freq1[i], freq[i], tau[i])
m_out_freq2[i] = FILTER_LP(m_out_tr_freq2[i], freq[i], tau[i])
m_out_freq3[i] = FILTER_LP(m_out_tr_freq3[i], freq[i], tau[i])
m_in_freq1[i] = FILTER_LP(m_in_tr_freq1[i], freq[i], tau[i])
m_in_freq2[i] = FILTER_LP(m_in_tr_freq2[i], freq[i], tau[i])
m_in_freq3[i] = FILTER_LP(m_in_tr_freq3[i], freq[i], tau[i])
m_out_time1 = fourier.ifft(m_out_freq1).real # I CAN DO IT---> NEGATIVE PART IS NEGLIGIBLE (about 0) + the signal is real
m_out_time2 = fourier.ifft(m_out_freq2).real
m_out_time3 = fourier.ifft(m_out_freq3).real
m_in_time1 = fourier.ifft(m_in_freq1).real
m_in_time2 = fourier.ifft(m_in_freq2).real
m_in_time3 = fourier.ifft(m_in_freq3).real
print(m_out_time1)
print(data7)
plt.plot(time, m_out_time1,'b',data7[:,6], data7[:,0],'r')
plt.title("M_OUT_1")
plt.show()
plt.plot(time, m_out_time2,'b',data7[:,6], data7[:,1],'r')
plt.title("M_OUT_2")
plt.show()
plt.plot(time, m_out_time3,'b',data7[:,6], data7[:,2],'r')
plt.title("M_OUT_3")
plt.show()
plt.plot(time, m_in_time1,'b',data7[:,6], data7[:,3],'r')
plt.title("M_IN_1")
plt.show()
plt.plot(time, m_in_time2,'b',data7[:,6], data7[:,4],'r')
plt.title("M_IN_2")
plt.show()
plt.plot(time, m_in_time3,'b',data7[:,6], data7[:,5],'r')
plt.title("M_IN_3")
plt.show()
ind = numpy.random.randint(low = 0, high=len(m_out_time1), size=10000)
m_u0 = numpy.zeros((4,10000))
m_k1V0 = numpy.zeros((5,10000))
m_u0 = numpy.array([[numpy.multiply(m_out_time2[ind], 1/m_out_time1[ind])], [numpy.multiply(m_out_time3[ind], 1/m_out_time1[ind])], [numpy.multiply(m_in_time2[ind], 1/m_in_time1[ind])], [numpy.multiply(m_in_time3[ind], 1/m_in_time1[ind])]])
m_k1V0 = numpy.array([[numpy.ones((10000,))], [m_out_time2[ind]], [m_out_time3[ind]], [m_in_time2[ind]], [m_in_time3[ind]]])
w_vec = numpy.array([u0bar[ind], k1bar[ind]])
print(m_u0)
print(m_k1V0)
print(w_vec)
m_u0 = numpy.reshape(m_u0, (4,10000))
m_k1V0 = numpy.reshape(m_k1V0, (5,10000))
print(numpy.shape(m_k1V0))
Tu0 = numpy.dot(u0bar[ind], numpy.linalg.pinv(m_u0))
print(Tu0)
Tk1V0 = numpy.dot(k1bar[ind], numpy.linalg.pinv(m_k1V0))
print(Tk1V0)
m_prova = m_u0
m_prova = numpy.vstack((m_prova, m_k1V0))
m_prova = numpy.reshape(m_prova, (9,10000))
print(m_prova)
T = numpy.zeros([2,9])
T[0,0:4] = Tu0
T[1,4:9] = Tk1V0
print(T)
w_prova = numpy.dot(T, m_prova)
print(numpy.shape(w_prova))
print(w_vec)
print(w_prova[0,:]-u0bar[ind])
CWIND = numpy.multiply(w_prova[0,:], wr[ind])*R
CREAL_ind = u0_int(time)
CREAL = CREAL_ind[ind]
print(numpy.mean(numpy.abs(CWIND-CREAL)))
timep = time[ind]
i1 = numpy.argsort(timep)
plt.plot(timep[i1], CWIND[i1],'b', timep[i1], CREAL[i1], 'r')
plt.title("RESULTS")
plt.show()
print(numpy.shape(CREAL[i1]))
T_tocsv = numpy.hstack((numpy.array([[V0], [V0]]), T))
dataset = pd.DataFrame(data=T_tocsv)
dataset.to_csv('Tmatrices.txt', sep='\t', header=None)
dfa = pd.read_csv('T_DEMOD.txt', sep='\t', header=None)
Tmat1 = dfa.values
Tmat = Tmat1[:,2:]
Vel = Tmat1[::2,1]
print(Tmat)
print(Vel)
T00_int = inter.interp1d(Vel, Tmat[::2,0])
T01_int = inter.interp1d(Vel, Tmat[::2,1])
T02_int = inter.interp1d(Vel, Tmat[::2,2])
T03_int = inter.interp1d(Vel, Tmat[::2,3])
T14_int = inter.interp1d(Vel, Tmat[1::2,4])
T15_int = inter.interp1d(Vel, Tmat[1::2,5])
T16_int = inter.interp1d(Vel, Tmat[1::2,6])
T17_int = inter.interp1d(Vel, Tmat[1::2,7])
T18_int = inter.interp1d(Vel, Tmat[1::2,8])
def Tmat_int(V):
T00 = T00_int(V)
T01 = T01_int(V)
T02 = T02_int(V)
T03 = T03_int(V)
T14 = T14_int(V)
T15 = T15_int(V)
T16 = T16_int(V)
T17 = T17_int(V)
T18 = T18_int(V)
Tret = numpy.array([[T00, T01, T02, T03, 0, 0, 0, 0, 0], [0, 0, 0, 0, T14, T15, T16, T17, T18]])
return Tret
V0a = windinfo[:,1] # it's a constant vector, so take only 1 value
yawerr = -windinfo[:,2]*numpy.pi/180
vert_shear = windinfo[:,5]
V0a_int = inter.interp1d(timewind, V0a)
yawerr_int = inter.interp1d(timewind, yawerr)
u0_tot = numpy.multiply(V0a_int(time), numpy.sin(yawerr_int(time)))
exp = numpy.zeros((2,len(m_out_time1)))
for i in range(len(m_out_time1)):
mmm = numpy.array([m_out_time2[i]/m_out_time1[i], m_out_time3[i]/m_out_time1[i], m_in_time2[i]/m_in_time1[i], m_in_time3[i]/m_in_time1[i], 1, m_out_time2[i], m_out_time3[i], m_in_time2[i], m_in_time3[i]])
exp[:,i] = numpy.dot(Tmat_int(V0a_int(time[i])), mmm.transpose())
exp1 = numpy.multiply(exp, wr*R)
plt.plot(time, exp1[0,:], 'b', time, u0_tot,'r')
plt.title("CROSS WIND ESTIMATE")
plt.show()
dfdata = | pd.read_csv('t3.T3.out', sep='\t', header=None, skiprows=10) | pandas.read_csv |
# -*- coding: utf-8 -*-
# %%
import pandas as pd
import numpy as np
import tkinter as tk
class package:
def __init__(self):
# elements defined
C = 12
H = 1.007825
N = 14.003074
O = 15.994915
P = 30.973763
S = 31.972072
Na = 22.98977
Cl = 34.968853
self.elements = [C,H,N,O,P,S,Na,Cl]
self.elementsymbol = ['C','H','N','O','P','S','Na','Cl']
ionname = ['M','M+H','M+2H','M+H-H2O','M+2H-H2O','M+Na','M+2Na','M+2Na-H','M+NH4',
'M-H','M-2H','M-3H','M-4H','M-5H','M-H-H2O','M-2H-H2O','M-CH3','M+Cl','M+HCOO','M+OAc']
ionfunc = []
ionfunc.append(lambda ms: ms)
ionfunc.append(lambda ms: ms+package().elements[1])
ionfunc.append(lambda ms: (ms+2*package().elements[1])/2)
ionfunc.append(lambda ms: ms-package().elements[1]-package().elements[3])
ionfunc.append(lambda ms: (ms-package().elements[3])/2)
ionfunc.append(lambda ms: ms+package().elements[6])
ionfunc.append(lambda ms: (ms+2*package().elements[6])/2)
ionfunc.append(lambda ms: ms-package().elements[1]+2*package().elements[6])
ionfunc.append(lambda ms: ms+4*package().elements[1]+package().elements[2])
ionfunc.append(lambda ms: ms-package().elements[1])
ionfunc.append(lambda ms: (ms-2*package().elements[1])/2)
ionfunc.append(lambda ms: (ms-3*package().elements[1])/3)
ionfunc.append(lambda ms: (ms-4*package().elements[1])/4)
ionfunc.append(lambda ms: (ms-5*package().elements[1])/5)
ionfunc.append(lambda ms: ms-3*package().elements[1]-package().elements[3])
ionfunc.append(lambda ms: (ms-4*package().elements[1]-package().elements[3])/2)
ionfunc.append(lambda ms: ms-package().elements[0]-3*package().elements[1])
ionfunc.append(lambda ms: ms+package().elements[7])
ionfunc.append(lambda ms: ms+package().elements[0]+package().elements[1]+2*package().elements[3])
ionfunc.append(lambda ms: ms+2*package().elements[0]+3*package().elements[1]+2*package().elements[3])
self.ion = {}
for i,j in enumerate(ionname):
self.ion[j] = ionfunc[i]
# %% [markdown]
# Package for Sphingolipids
# %%
class package_sl(package):
def __init__(self):
# base structure defined
self.base = {'Cer': np.array([0,3,1,0]+[0]*(len(package().elements)-4)),
'Sphingosine': np.array([0,3,1,0]+[0]*(len(package().elements)-4)),
'Sphinganine': np.array([0,3,1,0]+[0]*(len(package().elements)-4))}
# headgroups defined
headgroup = ['Pi','Choline','Ethanolamine','Inositol','Glc','Gal','GalNAc','NeuAc','Fuc','NeuGc']
formula = []
formula.append(np.array([0,3,0,4,1]+[0]*(len(package().elements)-5)))
formula.append(np.array([5,13,1,1]+[0]*(len(package().elements)-4)))
formula.append(np.array([2,7,1,1]+[0]*(len(package().elements)-4)))
formula.append(np.array([6,12,0,6]+[0]*(len(package().elements)-4)))
formula.append(np.array([6,12,0,6]+[0]*(len(package().elements)-4)))
formula.append(np.array([6,12,0,6]+[0]*(len(package().elements)-4)))
formula.append(np.array([8,15,1,6]+[0]*(len(package().elements)-4)))
formula.append(np.array([11,19,1,9]+[0]*(len(package().elements)-4)))
formula.append(np.array([6,12,0,5]+[0]*(len(package().elements)-4)))
formula.append(np.array([11,19,1,10]+[0]*(len(package().elements)-4)))
self.components = self.base.copy()
for i,j in enumerate(headgroup):
self.components[j] = formula[i]
# sn type defined
sntype = ['none','d','t']
snformula = []
snformula.append(lambda carbon,db: np.array([carbon,2*carbon-2*db,0,2]+[0]*(len(package().elements)-4)))
snformula.append(lambda carbon,db: np.array([carbon,2*carbon+2-2*db,0,3]+[0]*(len(package().elements)-4)))
snformula.append(lambda carbon,db: np.array([carbon,2*carbon+2-2*db,0,4]+[0]*(len(package().elements)-4)))
self.sn = {}
for i,j in enumerate(sntype):
self.sn[j] = snformula[i]
# extended structure
nana = ['M','D','T','Q','P']
iso = ['1a','1b','1c']
namedf = pd.DataFrame({'0-series': ['LacCer'],'a-series': ['GM3'],'b-series': ['GD3'],'c-series': ['GT3']})
namedf = namedf.append(pd.Series(['G'+'A'+'2' for name in namedf.iloc[0,0:1]]+['G'+i+'2' for i in nana[0:3]],index = namedf.columns), ignore_index=True)
namedf = namedf.append(pd.Series(['G'+'A'+'1' for name in namedf.iloc[0,0:1]]+['G'+i+j for i,j in zip(nana[0:3],iso)],index = namedf.columns), ignore_index=True)
namedf = namedf.append(pd.Series(['G'+'M'+'1b' for name in namedf.iloc[0,0:1]]+['G'+i+j for i,j in zip(nana[1:4],iso)],index = namedf.columns), ignore_index=True)
namedf = namedf.append(pd.Series(['G'+'D'+'1c' for name in namedf.iloc[0,0:1]]+['G'+i+j for i,j in zip(nana[2:],iso)],index = namedf.columns), ignore_index=True)
namedf = namedf.append(pd.Series(['G'+'D'+'1α' for name in namedf.iloc[0,0:1]]+[i+'α' for i in namedf.iloc[4,1:]],index = namedf.columns), ignore_index=True)
sequencedf = pd.DataFrame({'0-series': ['Gal-Glc-Cer'],'a-series': ['(NeuAc)-Gal-Glc-Cer'],'b-series': ['(NeuAc-NeuAc)-Gal-Glc-Cer'],'c-series': ['(NeuAc-NeuAc-NeuAc)-Gal-Glc-Cer']})
sequencedf = sequencedf.append(pd.Series(['GalNAc-'+formula for formula in sequencedf.iloc[0,:]],index = namedf.columns), ignore_index=True)
sequencedf = sequencedf.append(pd.Series(['Gal-'+formula for formula in sequencedf.iloc[1,:]],index = namedf.columns), ignore_index=True)
sequencedf = sequencedf.append(pd.Series(['NeuAc-'+formula for formula in sequencedf.iloc[2,:]],index = namedf.columns), ignore_index=True)
sequencedf = sequencedf.append(pd.Series(['NeuAc-'+formula for formula in sequencedf.iloc[3,:]],index = namedf.columns), ignore_index=True)
sequencedf = sequencedf.append(pd.Series(['NeuAc-Gal-(NeuAc)-GalNAc-'+formula for formula in sequencedf.iloc[0,:]],index = namedf.columns), ignore_index=True)
self.base = {'Cer': 'Cer','Sphingosine': 'Sphingosine','Sphinganine': 'Sphinganine','Sphingosine-1-Phosphate': 'Pi-Sphingosine','Sphinganine-1-Phosphate': 'Pi-Sphinganine',
'CerP': 'Pi-Cer','SM': 'Choline-Pi-Cer','CerPEtn': 'Ethanolamine-Pi-Cer','CerPIns': 'Inositol-Pi-Cer',
'LysoSM(dH)': 'Choline-Pi-Sphinganine','LysoSM': 'Choline-Pi-Sphingosine',
'GlcCer': 'Glc-Cer','GalCer': 'Gal-Cer'}
for i in namedf:
for j,k in enumerate(namedf[i]):
self.base[k] = sequencedf[i][j]
def basesn(self,base,typ):
typ = base[typ].split('-')[-1]
if 'Cer' == base[typ]:
return [['d','t'],list(range(18,23)),':',[0,1],'/',['none','h'],list(range(12,33)),':',[0,1]]
elif 'Sphingosine' == base[typ]:
return [['d','t'],list(range(18,23)),':','1']
elif 'Sphinganine' == base[typ]:
return [['d','t'],list(range(18,23)),':','0']
else:
return 0
def iterate(self,base,typ,start,end):
typ = base[typ].split('-')[-1]
start = pd.Series(start)
end = | pd.Series(end) | pandas.Series |
from datetime import (
datetime,
timedelta,
timezone,
)
import numpy as np
import pytest
import pytz
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
Timestamp,
date_range,
isna,
)
import pandas._testing as tm
class TestSeriesFillNA:
def test_fillna_nat(self):
series = Series([0, 1, 2, NaT.value], dtype="M8[ns]")
filled = series.fillna(method="pad")
filled2 = series.fillna(value=series.values[2])
expected = series.copy()
expected.values[3] = expected.values[2]
tm.assert_series_equal(filled, expected)
tm.assert_series_equal(filled2, expected)
df = DataFrame({"A": series})
filled = df.fillna(method="pad")
filled2 = df.fillna(value=series.values[2])
expected = DataFrame({"A": expected})
tm.assert_frame_equal(filled, expected)
tm.assert_frame_equal(filled2, expected)
series = Series([NaT.value, 0, 1, 2], dtype="M8[ns]")
filled = series.fillna(method="bfill")
filled2 = series.fillna(value=series[1])
expected = series.copy()
expected[0] = expected[1]
tm.assert_series_equal(filled, expected)
tm.assert_series_equal(filled2, expected)
df = DataFrame({"A": series})
filled = df.fillna(method="bfill")
filled2 = df.fillna(value=series[1])
expected = DataFrame({"A": expected})
tm.assert_frame_equal(filled, expected)
tm.assert_frame_equal(filled2, expected)
def test_fillna_value_or_method(self, datetime_series):
msg = "Cannot specify both 'value' and 'method'"
with pytest.raises(ValueError, match=msg):
datetime_series.fillna(value=0, method="ffill")
def test_fillna(self):
ts = Series([0.0, 1.0, 2.0, 3.0, 4.0], index=tm.makeDateIndex(5))
tm.assert_series_equal(ts, ts.fillna(method="ffill"))
ts[2] = np.NaN
exp = Series([0.0, 1.0, 1.0, 3.0, 4.0], index=ts.index)
tm.assert_series_equal(ts.fillna(method="ffill"), exp)
exp = Series([0.0, 1.0, 3.0, 3.0, 4.0], index=ts.index)
tm.assert_series_equal(ts.fillna(method="backfill"), exp)
exp = Series([0.0, 1.0, 5.0, 3.0, 4.0], index=ts.index)
tm.assert_series_equal(ts.fillna(value=5), exp)
msg = "Must specify a fill 'value' or 'method'"
with pytest.raises(ValueError, match=msg):
ts.fillna()
def test_fillna_nonscalar(self):
# GH#5703
s1 = Series([np.nan])
s2 = Series([1])
result = s1.fillna(s2)
expected = Series([1.0])
tm.assert_series_equal(result, expected)
result = s1.fillna({})
tm.assert_series_equal(result, s1)
result = s1.fillna(Series((), dtype=object))
tm.assert_series_equal(result, s1)
result = s2.fillna(s1)
tm.assert_series_equal(result, s2)
result = s1.fillna({0: 1})
tm.assert_series_equal(result, expected)
result = s1.fillna({1: 1})
tm.assert_series_equal(result, Series([np.nan]))
result = s1.fillna({0: 1, 1: 1})
tm.assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}))
tm.assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}, index=[4, 5]))
tm.assert_series_equal(result, s1)
def test_fillna_aligns(self):
s1 = Series([0, 1, 2], list("abc"))
s2 = Series([0, np.nan, 2], list("bac"))
result = s2.fillna(s1)
expected = Series([0, 0, 2.0], list("bac"))
tm.assert_series_equal(result, expected)
def test_fillna_limit(self):
ser = Series(np.nan, index=[0, 1, 2])
result = ser.fillna(999, limit=1)
expected = Series([999, np.nan, np.nan], index=[0, 1, 2])
tm.assert_series_equal(result, expected)
result = ser.fillna(999, limit=2)
expected = Series([999, 999, np.nan], index=[0, 1, 2])
tm.assert_series_equal(result, expected)
def test_fillna_dont_cast_strings(self):
# GH#9043
# make sure a string representation of int/float values can be filled
# correctly without raising errors or being converted
vals = ["0", "1.5", "-0.3"]
for val in vals:
ser = Series([0, 1, np.nan, np.nan, 4], dtype="float64")
result = ser.fillna(val)
expected = Series([0, 1, val, val, 4], dtype="object")
tm.assert_series_equal(result, expected)
def test_fillna_consistency(self):
# GH#16402
# fillna with a tz aware to a tz-naive, should result in object
ser = Series([Timestamp("20130101"), NaT])
result = ser.fillna(Timestamp("20130101", tz="US/Eastern"))
expected = Series(
[Timestamp("20130101"), Timestamp("2013-01-01", tz="US/Eastern")],
dtype="object",
)
tm.assert_series_equal(result, expected)
msg = "The 'errors' keyword in "
with tm.assert_produces_warning(FutureWarning, match=msg):
# where (we ignore the errors=)
result = ser.where(
[True, False], Timestamp("20130101", tz="US/Eastern"), errors="ignore"
)
tm.assert_series_equal(result, expected)
with tm.assert_produces_warning(FutureWarning, match=msg):
result = ser.where(
[True, False], Timestamp("20130101", tz="US/Eastern"), errors="ignore"
)
tm.assert_series_equal(result, expected)
# with a non-datetime
result = ser.fillna("foo")
expected = Series([Timestamp("20130101"), "foo"])
tm.assert_series_equal(result, expected)
# assignment
ser2 = ser.copy()
ser2[1] = "foo"
tm.assert_series_equal(ser2, expected)
def test_fillna_downcast(self):
# GH#15277
# infer int64 from float64
ser = Series([1.0, np.nan])
result = ser.fillna(0, downcast="infer")
expected = Series([1, 0])
tm.assert_series_equal(result, expected)
# infer int64 from float64 when fillna value is a dict
ser = Series([1.0, np.nan])
result = ser.fillna({1: 0}, downcast="infer")
expected = Series([1, 0])
tm.assert_series_equal(result, expected)
def test_timedelta_fillna(self, frame_or_series):
# GH#3371
ser = Series(
[
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130102"),
Timestamp("20130103 9:01:01"),
]
)
td = ser.diff()
obj = frame_or_series(td)
# reg fillna
result = obj.fillna(Timedelta(seconds=0))
expected = Series(
[
timedelta(0),
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
]
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
# interpreted as seconds, no longer supported
msg = "value should be a 'Timedelta', 'NaT', or array of those. Got 'int'"
with pytest.raises(TypeError, match=msg):
obj.fillna(1)
result = obj.fillna(Timedelta(seconds=1))
expected = Series(
[
timedelta(seconds=1),
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
]
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
result = obj.fillna(timedelta(days=1, seconds=1))
expected = Series(
[
timedelta(days=1, seconds=1),
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
]
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
result = obj.fillna(np.timedelta64(10 ** 9))
expected = Series(
[
timedelta(seconds=1),
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
]
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
result = obj.fillna(NaT)
expected = Series(
[
NaT,
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
],
dtype="m8[ns]",
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
# ffill
td[2] = np.nan
obj = frame_or_series(td)
result = obj.ffill()
expected = td.fillna(Timedelta(seconds=0))
expected[0] = np.nan
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
# bfill
td[2] = np.nan
obj = frame_or_series(td)
result = obj.bfill()
expected = td.fillna(Timedelta(seconds=0))
expected[2] = timedelta(days=1, seconds=9 * 3600 + 60 + 1)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
def test_datetime64_fillna(self):
ser = Series(
[
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130102"),
Timestamp("20130103 9:01:01"),
]
)
ser[2] = np.nan
# ffill
result = ser.ffill()
expected = Series(
[
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130103 9:01:01"),
]
)
tm.assert_series_equal(result, expected)
# bfill
result = ser.bfill()
expected = Series(
[
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130103 9:01:01"),
Timestamp("20130103 9:01:01"),
]
)
tm.assert_series_equal(result, expected)
def test_datetime64_fillna_backfill(self):
# GH#6587
# make sure that we are treating as integer when filling
msg = "containing strings is deprecated"
with tm.assert_produces_warning(FutureWarning, match=msg):
# this also tests inference of a datetime-like with NaT's
ser = Series([NaT, NaT, "2013-08-05 15:30:00.000001"])
expected = Series(
[
"2013-08-05 15:30:00.000001",
"2013-08-05 15:30:00.000001",
"2013-08-05 15:30:00.000001",
],
dtype="M8[ns]",
)
result = ser.fillna(method="backfill")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("tz", ["US/Eastern", "Asia/Tokyo"])
def test_datetime64_tz_fillna(self, tz):
# DatetimeLikeBlock
ser = Series(
[
Timestamp("2011-01-01 10:00"),
NaT,
Timestamp("2011-01-03 10:00"),
NaT,
]
)
null_loc = Series([False, True, False, True])
result = ser.fillna(Timestamp("2011-01-02 10:00"))
expected = Series(
[
Timestamp("2011-01-01 10:00"),
Timestamp("2011-01-02 10:00"),
Timestamp("2011-01-03 10:00"),
Timestamp("2011-01-02 10:00"),
]
)
tm.assert_series_equal(expected, result)
# check s is not changed
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(Timestamp("2011-01-02 10:00", tz=tz))
expected = Series(
[
Timestamp("2011-01-01 10:00"),
Timestamp("2011-01-02 10:00", tz=tz),
Timestamp("2011-01-03 10:00"),
Timestamp("2011-01-02 10:00", tz=tz),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna("AAA")
expected = Series(
[
Timestamp("2011-01-01 10:00"),
"AAA",
Timestamp("2011-01-03 10:00"),
"AAA",
],
dtype=object,
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(
{
1: Timestamp("2011-01-02 10:00", tz=tz),
3: Timestamp("2011-01-04 10:00"),
}
)
expected = Series(
[
Timestamp("2011-01-01 10:00"),
Timestamp("2011-01-02 10:00", tz=tz),
Timestamp("2011-01-03 10:00"),
| Timestamp("2011-01-04 10:00") | pandas.Timestamp |
from enum import Enum
import sys
import os
import re
from typing import Any, Callable, Tuple
from pandas.core.frame import DataFrame
from tqdm import tqdm
import yaml
from icecream import ic
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.dirname(SCRIPT_DIR))
from argparse import ArgumentParser
from configparser import ConfigParser
import pandas as pd
import numpy as np
from scipy.sparse.csr import csr_matrix
from utils.colored_print import ColoredPrint
from sklearn.preprocessing import KBinsDiscretizer
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MaxAbsScaler
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import Normalizer
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import PolynomialFeatures
from sklearn.preprocessing import Binarizer
import warnings
warnings.simplefilter(action='ignore', category=pd.errors.PerformanceWarning)
log = ColoredPrint()
def log_call(func: Callable) -> Callable:
def wrapper(*args, **kwargs):
name: str = args[0].name if type(args[0]) == pd.Series else type(args[0])
log.info(f'{name}\t{func.__name__}')
return func(*args, **kwargs)
return wrapper
class Result(Enum):
DataFrame = 0,
Series = 1
@log_call
def one_hot_encode(train_series: pd.Series, test_series: pd.Series) -> Tuple[pd.DataFrame, pd.Series]:
# TODO Should one hot encode train and test datasets
def __replace_non_letters_with_underscore(name: str) -> str:
return re.sub('\W', '_', name).lower()
ohe: OneHotEncoder = OneHotEncoder(handle_unknown='ignore', dtype=np.int0)
sprs: csr_matrix = ohe.fit_transform(pd.DataFrame(train_series))
fixed_series_name: str = __replace_non_letters_with_underscore(train_series.name)
columns: list[str] = [f'{fixed_series_name}_{__replace_non_letters_with_underscore(col)}' for col in ohe.categories_[0]]
train_tmp: pd.DataFrame = pd.DataFrame.sparse.from_spmatrix(sprs, columns=columns)
sprs = ohe.transform( | pd.DataFrame(test_series) | pandas.DataFrame |
from context import tables
import os
import pandas as pd
def test_tables_fetcher():
try:
tables.fetcher()
tables_dir=os.listdir(tables.TABLES_PATH)
print(f'\n----------------------------------\ntest_tables_fetcher worked,\ncontent of {tables.TABLES_PATH} is:\n{tables_dir}\n----------------------------------\n')
except:
print('test_tables_fetcher broke')
def test_tables_updated():
try:
os.chdir(tables.TABLES_PATH)
ret=tables.updated()
with open('log', 'r') as log:
date = log.read()
os.chdir(tables.CWD)
print(f'----------------------------------\ntest_tables_updated worked, returned {ret}\nlog content is:\n{date}\n----------------------------------\n')
except:
print('test_tables_updated broke')
def test_tables_importer():
#null case
try:
ret=tables.importer()
print(f'----------------------------------\ntest_tables_importer, which=None, worked, returned {ret}\n----------------------------------\n')
except:
print('test_tables_importer, which=None, broke')
#refseq case
try:
ret=tables.importer(which='refseq')
ret= | pd.DataFrame.head(ret) | pandas.DataFrame.head |
import os
import math
import torch
import torch.nn as nn
import traceback
import pandas as pd
import time
import numpy as np
import argparse
from utils.generic_utils import load_config, save_config_file
from utils.generic_utils import set_init_dict
from utils.generic_utils import NoamLR, binary_acc
from utils.generic_utils import save_best_checkpoint
from utils.tensorboard import TensorboardWriter
from utils.dataset import test_dataloader
from models.spiraconv import SpiraConvV1, SpiraConvV2
from utils.audio_processor import AudioProcessor
import random
# set random seed
random.seed(42)
torch.manual_seed(42)
torch.cuda.manual_seed(42)
np.random.seed(42)
def test(criterion, ap, model, c, testloader, step, cuda, confusion_matrix=False):
padding_with_max_lenght = c.dataset['padding_with_max_lenght']
losses = []
accs = []
model.zero_grad()
model.eval()
loss = 0
acc = 0
preds = []
targets = []
with torch.no_grad():
for feature, target, slices, targets_org in testloader:
#try:
if cuda:
feature = feature.cuda()
target = target.cuda()
output = model(feature).float()
# output = torch.round(output * 10**4) / (10**4)
# Calculate loss
if not padding_with_max_lenght and not c.dataset['split_wav_using_overlapping']:
target = target[:, :output.shape[1],:target.shape[2]]
if c.dataset['split_wav_using_overlapping']:
# unpack overlapping for calculation loss and accuracy
if slices is not None and targets_org is not None:
idx = 0
new_output = []
new_target = []
for i in range(slices.size(0)):
num_samples = int(slices[i].cpu().numpy())
samples_output = output[idx:idx+num_samples]
output_mean = samples_output.mean()
samples_target = target[idx:idx+num_samples]
target_mean = samples_target.mean()
new_target.append(target_mean)
new_output.append(output_mean)
idx += num_samples
target = torch.stack(new_target, dim=0)
output = torch.stack(new_output, dim=0)
#print(target, targets_org)
if cuda:
output = output.cuda()
target = target.cuda()
targets_org = targets_org.cuda()
if not torch.equal(targets_org, target):
raise RuntimeError("Integrity problem during the unpack of the overlay for the calculation of accuracy and loss. Check the dataloader !!")
loss += criterion(output, target).item()
# calculate binnary accuracy
y_pred_tag = torch.round(output)
acc += (y_pred_tag == target).float().sum().item()
preds += y_pred_tag.reshape(-1).int().cpu().numpy().tolist()
targets += target.reshape(-1).int().cpu().numpy().tolist()
if confusion_matrix:
print("======== Confusion Matrix ==========")
y_target = pd.Series(targets, name='Target')
y_pred = | pd.Series(preds, name='Predicted') | pandas.Series |
from typing import List
import torch
import numpy as np
import pandas as pd
from transformers import AutoModelForSequenceClassification, AutoTokenizer
import utils
class Predictor:
result_df = None
def predict_group(self, samples: List[str], group_name: str):
raise NotImplementedError
def save_results(self, save_path: str):
raise NotImplementedError
class TransformerPredictor(Predictor):
def __init__(self, checkpoint_path: str):
self.model = AutoModelForSequenceClassification.from_pretrained(checkpoint_path)
self.tokenizer = AutoTokenizer.from_pretrained(checkpoint_path)
def inference_from_texts(self, input_texts: List[str]):
tokenized_input = self.tokenizer(input_texts, return_tensors="pt", padding=True, truncation=True,
max_length=512)
output = self.model(**tokenized_input)
return output.logits.detach()
def predict_group(self, samples: List[str], group_name: str):
raise NotImplementedError
def save_results(self, save_path: str):
raise NotImplementedError
class DiagnosisPredictor(TransformerPredictor):
def __init__(self, checkpoint_path: str, test_set_path: str, gpu: bool = False, code_label: str = "short_codes",
**args):
super().__init__(checkpoint_path)
self.gpu = gpu
self.code_filter = utils.codes_that_occur_n_times_in_dataset(n=100, dataset_path=test_set_path,
code_label=code_label)
self.label_list = list(self.model.config.label2id.keys())
self.label_list_filter = [self.label_list.index(label) for label in self.code_filter]
self.result_df = pd.DataFrame(columns=["group"] + self.code_filter)
def reset_results(self):
self.result_df = pd.DataFrame(columns=["group"] + self.code_filter)
def predict_group(self, samples: List[str], group_name: str):
logits_all_batches = self.inference_from_texts(samples)
result_batch = torch.sigmoid(logits_all_batches)
all_probs_per_label = []
for result in result_batch:
prob_per_label = [result[i] for i in self.label_list_filter]
all_probs_per_label.append(prob_per_label)
all_probs_per_label = np.array(all_probs_per_label)
mean_prob_per_label = np.mean(all_probs_per_label, axis=0)
df_row = dict(zip(self.code_filter, mean_prob_per_label))
df_row["group"] = group_name
self.result_df = self.result_df.append(df_row, ignore_index=True)
def save_results(self, save_path):
self.result_df.to_csv(save_path, index=False, float_format="%.4f")
self.reset_results()
class MortalityPredictor(TransformerPredictor):
def __init__(self, checkpoint_path: str, gpu: bool = False, **args):
super().__init__(checkpoint_path)
self.gpu = gpu
self.result_df = pd.DataFrame(columns=["group"])
def reset_results(self):
self.result_df = | pd.DataFrame(columns=["group"]) | pandas.DataFrame |
import torch
from torch import optim
import os
import os.path
import time
import numpy as np
import pandas as pd
from collections import defaultdict
import argparse
import utils
from utils import read_vocab, Tokenizer, vocab_pad_idx, timeSince, try_cuda
from env import R2RBatch, ImageFeatures
from model import EncoderLSTM, AttnDecoderLSTM
from follower import Seq2SeqAgent
import eval
from vocab import SUBTRAIN_VOCAB, TRAINVAL_VOCAB, TRAIN_VOCAB
RESULT_DIR = 'tasks/R2R/results/'
SNAPSHOT_DIR = 'tasks/R2R/snapshots/'
PLOT_DIR = 'tasks/R2R/plots/'
# TODO: how much is this truncating instructions?
MAX_INPUT_LENGTH = 80
BATCH_SIZE = 100
max_episode_len = 10
word_embedding_size = 300
glove_path = 'tasks/R2R/data/train_glove.npy'
action_embedding_size = 2048+128
hidden_size = 512
dropout_ratio = 0.5
# feedback_method = 'sample' # teacher or sample
learning_rate = 0.0001
weight_decay = 0.0005
FEATURE_SIZE = 2048+128
log_every = 100
save_every = 1000
def get_model_prefix(args, image_feature_list):
image_feature_name = "+".join(
[featurizer.get_name() for featurizer in image_feature_list])
model_prefix = 'follower_{}_{}'.format(
args.feedback_method, image_feature_name)
if args.use_train_subset:
model_prefix = 'trainsub_' + model_prefix
if args.bidirectional:
model_prefix = model_prefix + "_bidirectional"
if args.use_pretraining:
model_prefix = model_prefix.replace(
'follower', 'follower_with_pretraining', 1)
return model_prefix
def eval_model(agent, results_path, use_dropout, feedback, allow_cheat=False):
agent.results_path = results_path
agent.test(
use_dropout=use_dropout, feedback=feedback, allow_cheat=allow_cheat)
def filter_param(param_list):
return [p for p in param_list if p.requires_grad]
def train(args, train_env, agent, encoder_optimizer, decoder_optimizer,
n_iters, log_every=log_every, val_envs=None):
''' Train on training set, validating on both seen and unseen. '''
if val_envs is None:
val_envs = {}
print('Training with %s feedback' % args.feedback_method)
data_log = defaultdict(list)
start = time.time()
split_string = "-".join(train_env.splits)
def make_path(n_iter):
return os.path.join(
SNAPSHOT_DIR, '%s_%s_iter_%d' % (
get_model_prefix(args, train_env.image_features_list),
split_string, n_iter))
best_metrics = {}
last_model_saved = {}
for idx in range(0, n_iters, log_every):
agent.env = train_env
interval = min(log_every, n_iters-idx)
iter = idx + interval
data_log['iteration'].append(iter)
# Train for log_every interval
agent.train(encoder_optimizer, decoder_optimizer, interval,
feedback=args.feedback_method)
train_losses = np.array(agent.losses)
assert len(train_losses) == interval
train_loss_avg = np.average(train_losses)
data_log['train loss'].append(train_loss_avg)
loss_str = 'train loss: %.4f' % train_loss_avg
save_log = []
# Run validation
for env_name, (val_env, evaluator) in sorted(val_envs.items()):
agent.env = val_env
# Get validation loss under the same conditions as training
agent.test(use_dropout=True, feedback=args.feedback_method,
allow_cheat=True)
val_losses = np.array(agent.losses)
val_loss_avg = np.average(val_losses)
data_log['%s loss' % env_name].append(val_loss_avg)
agent.results_path = '%s%s_%s_iter_%d.json' % (
RESULT_DIR, get_model_prefix(
args, train_env.image_features_list),
env_name, iter)
# Get validation distance from goal under evaluation conditions
agent.test(use_dropout=False, feedback='argmax')
if not args.no_save:
agent.write_results()
print("evaluating on {}".format(env_name))
score_summary, _ = evaluator.score_results(agent.results)
loss_str += ', %s loss: %.4f' % (env_name, val_loss_avg)
for metric, val in sorted(score_summary.items()):
data_log['%s %s' % (env_name, metric)].append(val)
if metric in ['success_rate']:
loss_str += ', %s: %.3f' % (metric, val)
key = (env_name, metric)
if key not in best_metrics or best_metrics[key] < val:
best_metrics[key] = val
if not args.no_save:
model_path = make_path(iter) + "_%s-%s=%.3f" % (
env_name, metric, val)
save_log.append(
"new best, saved model to %s" % model_path)
agent.save(model_path)
if key in last_model_saved:
for old_model_path in \
agent._encoder_and_decoder_paths(
last_model_saved[key]):
os.remove(old_model_path)
last_model_saved[key] = model_path
print(('%s (%d %d%%) %s' % (
timeSince(start, float(iter)/n_iters),
iter, float(iter)/n_iters*100, loss_str)))
for s in save_log:
print(s)
if not args.no_save:
if save_every and iter % save_every == 0:
agent.save(make_path(iter))
df = | pd.DataFrame(data_log) | pandas.DataFrame |
import argparse
import json
import os
import pandas as pd
from PIL import Image
def load_json(path):
with open(path, 'r') as f:
labels = json.load(f)['annotations']
return labels
def load_filenames(path, validate=True):
files = os.listdir(path)
if validate:
files = validate_images(files, path)
files = [file.split('.')[0] for file in files
if file.endswith('.jpg')]
return files
def validate_images(files, path):
paths_files = [(os.path.join(path, file), file)
for file in files]
valid_files = []
for path, file in paths_files:
try:
Image.open(path)
valid_files.append(file)
except OSError:
continue
return valid_files
def create_dataframe(labels, dataset, prefix=None):
df = pd.DataFrame(labels)
if prefix:
df['imageId'] = df['imageId'].apply(lambda f: '{}_{}'.format(prefix, f))
# Ensure file is downloaded
files = load_filenames(dataset)
df = df[df['imageId'].isin(files)]
df['labelId'] = df['labelId'].apply(" ".join)
return df
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', required=True)
parser.add_argument('--train_json', required=True)
parser.add_argument('--valid_json', required=True)
parser.add_argument('--outpath', required=True)
parser.add_argument('--validate', default=True, type=bool)
return parser.parse_args()
def main():
args = parse_args()
train_labels = load_json(args.train_json)
valid_labels = load_json(args.valid_json)
df_train = create_dataframe(train_labels, args.dataset, prefix='train')
df_valid = create_dataframe(valid_labels, args.dataset, prefix='valid')
df = | pd.concat([df_train, df_valid]) | pandas.concat |
import pandas as pd
import numpy as np
import datetime
import os
from scipy import array
from scipy.interpolate import interp1d
def subst(x, str_re, loc):
"""
Parameters:
-----------
x : str, the string to be updated
str_re : str, the new string to replace
loc : int or numpy.array, the index of where to replace x with y
Returns:
--------
x_new : updated new string
"""
if isinstance(loc, int):
if loc == -1:
x_new = x[:loc] + str_re
else:
x_new = x[:loc] + str_re + x[loc+1:]
elif loc[-1] == -1:
x_new = x[:loc[0]] + str_re
else:
x_new = x[:loc[0]] + str_re + x[loc[1]+1:]
return x_new
# End subst()
def prep_cq(concentration, cons_name, discharge):
"""
Parameters:
------------
fnames : list, list of file names
index_file : int, the index of file to read
loc : np.array, location where the string to be replaced
savefile : Bool, optional, save files if True
Returns:
------------
files saved to the given directory.
"""
concentration.rename(columns={concentration.columns[0]: 'Time',
concentration.columns[1]: '126001A-' + cons_name + '(mg/l)'}, inplace=True)
concentration.drop(index=[0, 1], inplace=True)
concentration.dropna(how='all', inplace=True, axis=1)
# Match C-Q data according to time
# Convert the string in "Time" to datetime
concentration = conc_time_shift(concentration, time_format="%Y/%m/%d %H:%M")
concentration = duplicate_average(concentration).set_index('Time')
# concentration.drop_duplicates(keep='first', inplace=True)
cq = combine_cq(concentration, discharge)
return cq
# End prep_cq()
def combine_cq(concentration, discharge):
cq = pd.concat([concentration, discharge], axis=1, join='inner')
cq.reset_index(inplace=True)
cols = cq.columns
cq.loc[:, cols[1:]] = cq.loc[:, cols[1:]].astype(float)
cq.rename(columns = {cols[-1] : 'Flow(m3)'}, inplace=True)
return cq
# End combine_cq()
def conc_time_convert(concentration, loc, time_format="%Y/%m/%d %H:%M"):
""" Assumptions: 1) If there are more than one obs C in an hour, the average of the C is used
2) the mean of flow
"""
monitor_minute = concentration['Time'][2][-2:]
if monitor_minute == '00':
concentration['Time'] = concentration['Time'].apply(subst, args=('00', loc[1]))
else:
concentration['Time'] = concentration['Time'].apply(subst, args=('00', loc[0]))
concentration['Time'] = | pd.to_datetime(concentration['Time'], format=time_format) | pandas.to_datetime |
# _*_ coding: utf-8 _*_
"""
Bill Searcher.
Author: <NAME>
"""
import faiss
import nmslib
import joblib
import numpy as np
import pandas as pd
from typing import List, Tuple
# Own customized variables
from bill_helper.tokenizer import MyTokenizer
from bill_helper.global_variables import (BILL_DATA_FILEPATH,
DATABASE_VECTORS_FILEPATH,
T2_VECTORIZER_FILEPATH,
ORDINAL_2_ID_DICT_FILEPATH,
INDEX_TIME_PARAMS)
class FaissBillSearcher(object):
def __init__(self) -> None:
self._db_df = pd.read_csv(BILL_DATA_FILEPATH)
self._db_vects = joblib.load(DATABASE_VECTORS_FILEPATH).toarray().astype('float32')
self._texts_df = self._generate_text_dataframe()
self._tokenizer = MyTokenizer()
self._vectorizer = joblib.load(T2_VECTORIZER_FILEPATH)
self._ordinal_2_id = joblib.load(ORDINAL_2_ID_DICT_FILEPATH)
self._d = self._db_vects.shape[1]
self._index = faiss.IndexFlatL2(self._d)
self._index.add(self._db_vects)
def _generate_text_dataframe(self) -> pd.DataFrame:
feature_cols = ['bill_name', 'bill_desc', 'unit']
texts_df = self._db_df.copy()
texts_df['bill_text'] = texts_df[feature_cols[0]].str.cat(
texts_df[feature_cols[1:]], sep=' '
)
texts_df.drop(columns=feature_cols, inplace=True)
return texts_df
def _find_k_nearest_indexes(self, query_texts: List[str], k: int = 5) -> Tuple[np.ndarray, np.ndarray]:
text_segmented = [self._tokenizer.segment(text) for text in query_texts]
query_vects = self._vectorizer.transform(text_segmented).toarray().astype('float32')
D, I = self._index.search(query_vects, k)
return D, I
def find_k_nearest_texts(self, query_texts: List[str], k: int = 5) -> List[List[tuple]]:
_, I = self._find_k_nearest_indexes(query_texts, k)
ans = []
for text, ordinals in zip(query_texts, I):
ids = [self._ordinal_2_id[ordinal] for ordinal in ordinals]
k_nearest_texts = []
for _id in ids:
record = tuple(self._db_df.loc[self._db_df.bill_id == _id].values.ravel())
k_nearest_texts.append(record)
ans.append(k_nearest_texts)
return ans
def find_k_nearest_bills(self, query_texts: List[str], k: int = 5) -> List[pd.DataFrame]:
D, I = self._find_k_nearest_indexes(query_texts, k)
results = []
for i, text in enumerate(query_texts):
ordinals, distances = I[i], list(D[i])
ids = [self._ordinal_2_id[ordinal] for ordinal in ordinals]
k_nearest_bills = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
import os
import argparse
import time
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('clint.mpl')
from pprint import pprint
import scipy.signal as signal
import itertools
from pygama import DataSet
import pygama.utils as pu
import pygama.analysis.histograms as ph
import pygama.analysis.peak_fitting as pf
def main():
"""
to get the best energy resolution, we want to explore the possible values
of our DSP processor list, especially trap filter and RC decay constants.
a flexible + easy way to vary a bunch of parameters at once is to create
a DataFrame with each row corresponding to a set of parameters.
We then use this DF as an input/output for the other functions.
it could also easily be extended to loop over individual detectors, or vary
any other set of parameters in the processor list ......
"""
par = argparse.ArgumentParser(description="pygama dsp optimizer")
arg, st, sf = par.add_argument, "store_true", "store_false"
arg("-ds", nargs='*', action="store", help="load runs for a DS")
arg("-r", "--run", nargs=1, help="load a single run")
arg("-g", "--grid", action=st, help="set DSP parameters to be varied")
arg("-w", "--window", action=st, help="generate a small waveform file")
arg("-p", "--process", action=st, help="run DSP processing")
arg("-f", "--fit", action=st, help="fit outputs to peakshape function")
arg("-t", "--plot", action=st, help="find optimal parameters & make plots")
arg("-v", "--verbose", action=st, help="set verbose mode")
args = vars(par.parse_args())
ds = pu.get_dataset_from_cmdline(args, "runDB.json", "calDB.json")
# pprint(ds.paths)
# set I/O locations
d_out = os.path.expanduser('~') + "/Data/cage"
f_grid = f"{d_out}/cage_optimizer_grid.h5"
f_tier1 = f"{d_out}/cage_optimizer_t1.h5"
f_tier2 = f"{d_out}/cage_optimizer_t2.h5"
f_opt = f"{d_out}/cage_optimizer_data.h5"
# -- run routines --
if args["grid"]:
# set the combination of processor params to vary to optimize resolution
set_grid(f_grid)
if args["window"]:
# generate a small single-peak file w/ uncalibrated energy to reanalyze
window_ds(ds, f_tier1)
if args["process"]:
# create a file with DataFrames for each set of parameters
process_ds(ds, f_grid, f_opt, f_tier1, f_tier2)
if args["fit"]:
# fit all outputs to the peakshape function and find the best resolution
get_fwhm(f_grid, f_opt, verbose=args["verbose"])
if args["plot"]:
# show results
plot_fwhm(f_grid)
def set_grid(f_grid):
"""
"""
# # this is pretty ambitious, but maybe doable -- 3500 entries
# e_rises = np.arange(1, 6, 0.2)
# e_flats = np.arange(0.5, 4, 0.5)
# rc_consts = np.arange(50, 150, 5) # ~same as MJD charge trapping correction
# this runs more quickly -- 100 entries, 3 minutes on my mac
e_rises = np.arange(2, 3, 0.2)
e_flats = np.arange(1, 3, 1)
rc_consts = np.arange(52, 152, 10)
# TODO: jason's suggestions, knowing the expected shape of the noise curve
# e_rises = np.linspace(-1, 0, sqrt(sqrt(3)) # jason says try this
# e_rises # make another list which is 10^pwr of this list
# np.linspace(log_tau_min, log_tau_max) # jason says try this too
lists = [e_rises, e_flats, rc_consts]
prod = list(itertools.product(*lists)) # clint <3 stackoverflow
df = pd.DataFrame(prod, columns=['rise','flat','rc'])
# print(df)
df.to_hdf(f_grid, key="pygama_optimization")
print("Wrote master grid file:", f_grid)
def window_ds(ds, f_tier1):
"""
Take a single DataSet and window it so that the output file only contains
events near an expected peak location.
"""
# a user has to figure out the uncalibrated energy range of the K40 peak
# xlo, xhi, xpb = 0, 2e6, 2000 # show phys. spectrum (top feature is 2615 pk)
xlo, xhi, xpb = 990000, 1030000, 250 # k40 peak, ds 3
t2df = ds.get_t2df()
hE, xE = ph.get_hist(t2df["energy"], range=(xlo, xhi), dx=xpb)
plt.semilogy(xE, hE, ls='steps', lw=1, c='r')
import matplotlib.ticker as ticker
plt.gca().xaxis.set_major_formatter(ticker.FormatStrFormatter('%0.4e'))
plt.locator_params(axis='x', nbins=5)
plt.xlabel("Energy (uncal.)", ha='right', x=1)
plt.ylabel("Counts", ha='right', y=1)
plt.savefig(f"./plots/cage_ds{ds.ds_lo}_winK40.pdf")
# exit()
# write a windowed tier 1 file containing only waveforms near the peak
t1df = pd.DataFrame()
for run in ds.paths:
ft1 = ds.paths[run]["t1_path"]
print(f"Scanning ds {ds.ds_lo}, run {run}\n file: {ft1}")
for chunk in pd.read_hdf(ft1, 'ORSIS3302DecoderForEnergy', chunksize=5e4):
t1df_win = chunk.loc[(chunk.energy > xlo) & (chunk.energy < xhi)]
print(t1df_win.shape)
t1df = pd.concat([t1df, t1df_win], ignore_index=True)
# -- save to HDF5 output file --
h5_opts = {
"mode":"w", # overwrite existing
"append":False,
"format":"table",
# "complib":"blosc:zlib", # no compression, increases I/O speed
# "complevel":1,
# "data_columns":["ievt"]
}
t1df.reset_index(inplace=True)
t1df.to_hdf(f_tier1, key="df_windowed", **h5_opts)
print("wrote file:", f_tier1)
def process_ds(ds, f_grid, f_opt, f_tier1, f_tier2):
"""
and determine the trapezoid parameters that minimize
the FWHM of the peak (fitting to the peakshape function).
NOTE: I don't think we need to multiprocess this, since that's already
being done in ProcessTier1
"""
from pygama.dsp.base import Intercom
from pygama.io.tier1 import ProcessTier1
import pygama.io.decoders.digitizers as pgd
df_grid = pd.read_hdf(f_grid)
if os.path.exists(f_opt):
os.remove(f_opt)
# check the windowed file
# tmp = pd.read_hdf(f_tier1)
# nevt = len(tmp)
t_start = time.time()
for i, row in df_grid.iterrows():
# estimate remaining time in scan
if i == 4:
diff = time.time() - t_start
tot = diff/5 * len(df_grid) / 60
tot -= diff / 60
print(f"Estimated remaining time: {tot:.2f} mins")
rise, flat, rc = row
print(f"Row {i}/{len(df_grid)}, rise {rise} flat {flat} rc {rc}")
# custom tier 1 processor list -- very minimal
proc_list = {
"clk" : 100e6,
"fit_bl" : {"ihi":500, "order":1},
"blsub" : {},
"trap" : [
{"wfout":"wf_etrap", "wfin":"wf_blsub",
"rise":rise, "flat":flat, "decay":rc},
{"wfout":"wf_atrap", "wfin":"wf_blsub",
"rise":0.04, "flat":0.1, "fall":2} # could vary these too
],
"get_max" : [{"wfin":"wf_etrap"}, {"wfin":"wf_atrap"}],
# "ftp" : {"test":1}
"ftp" : {}
}
proc = Intercom(proc_list)
dig = pgd.SIS3302Decoder
dig.decoder_name = "df_windowed"
dig.class_name = None
out_dir = "/".join(f_tier2.split("/")[:-1])
# process silently
ProcessTier1(f_tier1, proc, output_dir=out_dir, overwrite=True,
verbose=False, multiprocess=True, nevt=np.inf, ioff=0,
chunk=ds.config["chunksize"], run=ds.runs[0],
t2_file=f_tier2, digitizers=[dig])
# load the temporary file and append to the main output file
df_key = f"opt_{i}"
t2df = pd.read_hdf(f_tier2)
t2df.to_hdf(f_opt, df_key)
def get_fwhm(f_grid, f_opt, verbose=False):
"""
duplicate the plot from Figure 2.7 of Kris Vorren's thesis (and much more!)
this code fits the e_ftp peak to the HPGe peakshape function (same as in
calibration.py) and writes a new column to df_grid, "fwhm".
"""
df_grid = pd.read_hdf(f_grid)
# declare some new columns for df_grid
cols = ["fwhm", "rchi2"]
for col in cols:
df_grid[col] = np.nan
# loop over the keys and fit each e_ftp spectrum to the peakshape function
print("i rise flat rc fwhm rchi2")
for i, row in df_grid.iterrows():
key = f"opt_{i}"
t2df = pd.read_hdf(f_opt, key=f"opt_{i}")
# auto-histogram spectrum near the uncalibrated peak
hE, xE, vE = ph.get_hist(t2df["e_ftp"], bins=1000, trim=False)
# shift the histogram to be roughly centered at 0 and symmetric
mu = xE[np.argmax(hE)]
xE -= mu
imax = np.argmax(hE)
hmax = hE[imax]
idx = np.where(hE > hmax/2) # fwhm
ilo, ihi = idx[0][0], idx[0][-1]
sig = (xE[ihi] - xE[ilo]) / 2.355
idx = np.where((xE > -8 * sig) & (xE < 8 * sig))
ilo, ihi = idx[0][0], idx[0][-1]-1
xE = xE[ilo-1:ihi]
hE, vE = hE[ilo:ihi], vE[ilo:ihi]
# plt.plot(xE[1:], hE, ls='steps', c='r', lw=3)
# plt.show()
# exit()
# set initial guesses for the peakshape function. could all be improved
mu = 0
sigma = 5 # radford uses an input linear function
hstep = 0.001
htail = 0.5
tau = 10
bg0 = np.mean(hE[:20])
amp = np.sum(hE)
x0 = [mu, sigma, hstep, htail, tau, bg0, amp]
xF, xF_cov = pf.fit_hist(pf.radford_peak, hE, xE, var=vE, guess=x0)
# goodness of fit
chisq = []
for j, h in enumerate(hE):
model = pf.radford_peak(xE[j], *xF)
diff = (model - h)**2 / model
chisq.append(abs(diff))
# update the master dataframe
fwhm = xF[1] * 2.355
rchi2 = sum(np.array(chisq) / len(hE))
df_grid.at[i, "fwhm"] = fwhm
df_grid.at[i, "rchi2"] = rchi2
rise, flat, rc = row[:3]
label = f"{i} {rise:.2f} {flat:.2f} {rc:.0f} {fwhm:.2f} {rchi2:.2f}"
print(label)
if verbose:
# plot every dang fit
plt.cla()
# peakshape function
plt.plot(xE, pf.radford_peak(xE, *x0), c='orange', label='guess')
plt.plot(xE, pf.radford_peak(xE, *xF), c='r', label='peakshape')
plt.axvline(mu, c='g')
# plot individual components
# tail_hi, gaus, bg, step, tail_lo = pf.radford_peak(xE, *xF, components=True)
# gaus = np.array(gaus)
# step = np.array(step)
# tail_lo = np.array(tail_lo)
# plt.plot(xE, gaus * tail_hi, ls="--", lw=2, c='g', label="gaus+hi_tail")
# plt.plot(xE, step + bg, ls='--', lw=2, c='m', label='step + bg')
# plt.plot(xE, tail_lo, ls='--', lw=2, c='k', label='tail_lo')
plt.plot(xE[1:], hE, ls='steps', lw=1, c='b', label="data")
plt.plot(np.nan, np.nan, c='w', label=f"fwhm = {fwhm:.2f} uncal.")
plt.plot(np.nan, np.nan, c='w', label=label)
plt.xlabel("Energy (uncal.)", ha='right', x=1)
plt.ylabel("Counts", ha='right', y=1)
plt.legend(loc=2, fontsize=12)
plt.show()
# write the updated df_grid to the output file.
if not verbose:
df_grid.to_hdf(f_grid, key="pygama_optimization")
print("wrote output file")
def plot_fwhm(f_grid):
"""
"""
df_grid = | pd.read_hdf(f_grid) | pandas.read_hdf |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `perfume` package.
perfume is fairly visualization-heavy and deals with stochastic
events, so end-to-end testing isn't really aimed for here. But we can
test the transformations in analyze somewhat.
"""
import unittest
import numpy as np
import numpy.testing as npt
import pandas as pd
import pandas.util.testing as pdt
from perfume import analyze
class TestAnalyze(unittest.TestCase):
"""Tests for `perfume.analyze` module."""
def setUp(self):
samples = []
t = 1.0
for i in range(20):
sample = []
sample.append(t)
t += 1.1
sample.append(t)
t += 0.2
sample.append(t)
t += 1.5
sample.append(t)
t += 0.1
samples.append(sample)
self.samples = pd.DataFrame(
data=samples,
columns=pd.MultiIndex(
levels=[["fn1", "fn2"], ["begin", "end"]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
),
)
def tearDown(self):
del self.samples
def test_timings(self):
"""Test that timings gives us the right results."""
timings = analyze.timings(self.samples)
pdt.assert_index_equal(timings.index, self.samples.index)
self.assertSetEqual(
set(timings.columns), set(self.samples.columns.get_level_values(0))
)
self.assertEqual(len(timings.columns), len(self.samples.columns) / 2)
npt.assert_array_almost_equal(timings["fn1"], 1.1)
npt.assert_array_almost_equal(timings["fn2"], 1.5)
def test_isolate(self):
"""Test that isolate gives us the right results."""
isolated = analyze.isolate(self.samples)
pdt.assert_index_equal(isolated.index, self.samples.index)
pdt.assert_index_equal(isolated.columns, self.samples.columns)
pdt.assert_frame_equal(
analyze.timings(isolated), analyze.timings(self.samples)
)
npt.assert_array_almost_equal(
isolated["fn1"]["begin"], np.arange(20) * 1.1
)
npt.assert_array_almost_equal(
isolated["fn1"]["end"], 1.1 + (np.arange(20) * 1.1)
)
npt.assert_array_almost_equal(
isolated["fn2"]["begin"], np.arange(20) * 1.5
)
npt.assert_array_almost_equal(
isolated["fn2"]["end"], 1.5 + (np.arange(20) * 1.5)
)
def test_timings_in_context(self):
"""Test that timings_in_context gives us the right results."""
in_context = analyze.timings_in_context(self.samples)
# Since each "function" has a fixed frequency, we can create
# two series with TimedeltaIndexes and align them into the
# same DataFrame, which should be what timings_in_context
# gives us.
fn1_expected = pd.Series(
1.1,
index=pd.timedelta_range(
freq= | pd.Timedelta("1.1s") | pandas.Timedelta |
# pylint: disable-msg=W0612,E1101,W0141
import nose
from numpy.random import randn
import numpy as np
from pandas.core.index import Index, MultiIndex
from pandas import Panel, DataFrame, Series, notnull, isnull
from pandas.util.testing import (assert_almost_equal,
assert_series_equal,
assert_frame_equal,
assertRaisesRegexp)
import pandas.core.common as com
import pandas.util.testing as tm
from pandas.compat import (range, lrange, StringIO, lzip, u, cPickle,
product as cart_product, zip)
import pandas as pd
import pandas.index as _index
class TestMultiLevel(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
self.frame = DataFrame(np.random.randn(10, 3), index=index,
columns=Index(['A', 'B', 'C'], name='exp'))
self.single_level = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]],
names=['first'])
# create test series object
arrays = [['bar', 'bar', 'baz', 'baz', 'qux', 'qux', 'foo', 'foo'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
tuples = lzip(*arrays)
index = MultiIndex.from_tuples(tuples)
s = Series(randn(8), index=index)
s[3] = np.NaN
self.series = s
tm.N = 100
self.tdf = tm.makeTimeDataFrame()
self.ymd = self.tdf.groupby([lambda x: x.year, lambda x: x.month,
lambda x: x.day]).sum()
# use Int64Index, to make sure things work
self.ymd.index.set_levels([lev.astype('i8')
for lev in self.ymd.index.levels],
inplace=True)
self.ymd.index.set_names(['year', 'month', 'day'],
inplace=True)
def test_append(self):
a, b = self.frame[:5], self.frame[5:]
result = a.append(b)
tm.assert_frame_equal(result, self.frame)
result = a['A'].append(b['A'])
tm.assert_series_equal(result, self.frame['A'])
def test_dataframe_constructor(self):
multi = DataFrame(np.random.randn(4, 4),
index=[np.array(['a', 'a', 'b', 'b']),
np.array(['x', 'y', 'x', 'y'])])
tm.assert_isinstance(multi.index, MultiIndex)
self.assertNotIsInstance(multi.columns, MultiIndex)
multi = DataFrame(np.random.randn(4, 4),
columns=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assert_isinstance(multi.columns, MultiIndex)
def test_series_constructor(self):
multi = Series(1., index=[np.array(['a', 'a', 'b', 'b']),
np.array(['x', 'y', 'x', 'y'])])
tm.assert_isinstance(multi.index, MultiIndex)
multi = Series(1., index=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assert_isinstance(multi.index, MultiIndex)
multi = Series(lrange(4), index=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assert_isinstance(multi.index, MultiIndex)
def test_reindex_level(self):
# axis=0
month_sums = self.ymd.sum(level='month')
result = month_sums.reindex(self.ymd.index, level=1)
expected = self.ymd.groupby(level='month').transform(np.sum)
assert_frame_equal(result, expected)
# Series
result = month_sums['A'].reindex(self.ymd.index, level=1)
expected = self.ymd['A'].groupby(level='month').transform(np.sum)
assert_series_equal(result, expected)
# axis=1
month_sums = self.ymd.T.sum(axis=1, level='month')
result = month_sums.reindex(columns=self.ymd.index, level=1)
expected = self.ymd.groupby(level='month').transform(np.sum).T
assert_frame_equal(result, expected)
def test_binops_level(self):
def _check_op(opname):
op = getattr(DataFrame, opname)
month_sums = self.ymd.sum(level='month')
result = op(self.ymd, month_sums, level='month')
broadcasted = self.ymd.groupby(level='month').transform(np.sum)
expected = op(self.ymd, broadcasted)
assert_frame_equal(result, expected)
# Series
op = getattr(Series, opname)
result = op(self.ymd['A'], month_sums['A'], level='month')
broadcasted = self.ymd['A'].groupby(
level='month').transform(np.sum)
expected = op(self.ymd['A'], broadcasted)
assert_series_equal(result, expected)
_check_op('sub')
_check_op('add')
_check_op('mul')
_check_op('div')
def test_pickle(self):
def _test_roundtrip(frame):
pickled = cPickle.dumps(frame)
unpickled = cPickle.loads(pickled)
assert_frame_equal(frame, unpickled)
_test_roundtrip(self.frame)
_test_roundtrip(self.frame.T)
_test_roundtrip(self.ymd)
_test_roundtrip(self.ymd.T)
def test_reindex(self):
reindexed = self.frame.ix[[('foo', 'one'), ('bar', 'one')]]
expected = self.frame.ix[[0, 3]]
assert_frame_equal(reindexed, expected)
def test_reindex_preserve_levels(self):
new_index = self.ymd.index[::10]
chunk = self.ymd.reindex(new_index)
self.assertIs(chunk.index, new_index)
chunk = self.ymd.ix[new_index]
self.assertIs(chunk.index, new_index)
ymdT = self.ymd.T
chunk = ymdT.reindex(columns=new_index)
self.assertIs(chunk.columns, new_index)
chunk = ymdT.ix[:, new_index]
self.assertIs(chunk.columns, new_index)
def test_sort_index_preserve_levels(self):
result = self.frame.sort_index()
self.assertEquals(result.index.names, self.frame.index.names)
def test_repr_to_string(self):
repr(self.frame)
repr(self.ymd)
repr(self.frame.T)
repr(self.ymd.T)
buf = StringIO()
self.frame.to_string(buf=buf)
self.ymd.to_string(buf=buf)
self.frame.T.to_string(buf=buf)
self.ymd.T.to_string(buf=buf)
def test_repr_name_coincide(self):
index = MultiIndex.from_tuples([('a', 0, 'foo'), ('b', 1, 'bar')],
names=['a', 'b', 'c'])
df = DataFrame({'value': [0, 1]}, index=index)
lines = repr(df).split('\n')
self.assert_(lines[2].startswith('a 0 foo'))
def test_getitem_simple(self):
df = self.frame.T
col = df['foo', 'one']
assert_almost_equal(col.values, df.values[:, 0])
self.assertRaises(KeyError, df.__getitem__, ('foo', 'four'))
self.assertRaises(KeyError, df.__getitem__, 'foobar')
def test_series_getitem(self):
s = self.ymd['A']
result = s[2000, 3]
result2 = s.ix[2000, 3]
expected = s.reindex(s.index[42:65])
expected.index = expected.index.droplevel(0).droplevel(0)
assert_series_equal(result, expected)
result = s[2000, 3, 10]
expected = s[49]
self.assertEquals(result, expected)
# fancy
result = s.ix[[(2000, 3, 10), (2000, 3, 13)]]
expected = s.reindex(s.index[49:51])
assert_series_equal(result, expected)
# key error
self.assertRaises(KeyError, s.__getitem__, (2000, 3, 4))
def test_series_getitem_corner(self):
s = self.ymd['A']
# don't segfault, GH #495
# out of bounds access
self.assertRaises(IndexError, s.__getitem__, len(self.ymd))
# generator
result = s[(x > 0 for x in s)]
expected = s[s > 0]
assert_series_equal(result, expected)
def test_series_setitem(self):
s = self.ymd['A']
s[2000, 3] = np.nan
self.assert_(isnull(s.values[42:65]).all())
self.assert_(notnull(s.values[:42]).all())
self.assert_(notnull(s.values[65:]).all())
s[2000, 3, 10] = np.nan
self.assert_(isnull(s[49]))
def test_series_slice_partial(self):
pass
def test_frame_getitem_setitem_boolean(self):
df = self.frame.T.copy()
values = df.values
result = df[df > 0]
expected = df.where(df > 0)
assert_frame_equal(result, expected)
df[df > 0] = 5
values[values > 0] = 5
assert_almost_equal(df.values, values)
df[df == 5] = 0
values[values == 5] = 0
assert_almost_equal(df.values, values)
# a df that needs alignment first
df[df[:-1] < 0] = 2
np.putmask(values[:-1], values[:-1] < 0, 2)
assert_almost_equal(df.values, values)
with assertRaisesRegexp(TypeError, 'boolean values only'):
df[df * 0] = 2
def test_frame_getitem_setitem_slice(self):
# getitem
result = self.frame.ix[:4]
expected = self.frame[:4]
assert_frame_equal(result, expected)
# setitem
cp = self.frame.copy()
cp.ix[:4] = 0
self.assert_((cp.values[:4] == 0).all())
self.assert_((cp.values[4:] != 0).all())
def test_frame_getitem_setitem_multislice(self):
levels = [['t1', 't2'], ['a', 'b', 'c']]
labels = [[0, 0, 0, 1, 1], [0, 1, 2, 0, 1]]
midx = MultiIndex(labels=labels, levels=levels, names=[None, 'id'])
df = DataFrame({'value': [1, 2, 3, 7, 8]}, index=midx)
result = df.ix[:, 'value']
assert_series_equal(df['value'], result)
result = df.ix[1:3, 'value']
assert_series_equal(df['value'][1:3], result)
result = df.ix[:, :]
assert_frame_equal(df, result)
result = df
df.ix[:, 'value'] = 10
result['value'] = 10
assert_frame_equal(df, result)
df.ix[:, :] = 10
assert_frame_equal(df, result)
def test_frame_getitem_multicolumn_empty_level(self):
f = DataFrame({'a': ['1', '2', '3'],
'b': ['2', '3', '4']})
f.columns = [['level1 item1', 'level1 item2'],
['', 'level2 item2'],
['level3 item1', 'level3 item2']]
result = f['level1 item1']
expected = DataFrame([['1'], ['2'], ['3']], index=f.index,
columns=['level3 item1'])
assert_frame_equal(result, expected)
def test_frame_setitem_multi_column(self):
df = DataFrame(randn(10, 4), columns=[['a', 'a', 'b', 'b'],
[0, 1, 0, 1]])
cp = df.copy()
cp['a'] = cp['b']
assert_frame_equal(cp['a'], cp['b'])
# set with ndarray
cp = df.copy()
cp['a'] = cp['b'].values
assert_frame_equal(cp['a'], cp['b'])
#----------------------------------------
# #1803
columns = MultiIndex.from_tuples([('A', '1'), ('A', '2'), ('B', '1')])
df = DataFrame(index=[1, 3, 5], columns=columns)
# Works, but adds a column instead of updating the two existing ones
df['A'] = 0.0 # Doesn't work
self.assertTrue((df['A'].values == 0).all())
# it broadcasts
df['B', '1'] = [1, 2, 3]
df['A'] = df['B', '1']
assert_series_equal(df['A', '1'], df['B', '1'])
assert_series_equal(df['A', '2'], df['B', '1'])
def test_getitem_tuple_plus_slice(self):
# GH #671
df = DataFrame({'a': lrange(10),
'b': lrange(10),
'c': np.random.randn(10),
'd': np.random.randn(10)})
idf = df.set_index(['a', 'b'])
result = idf.ix[(0, 0), :]
expected = idf.ix[0, 0]
expected2 = idf.xs((0, 0))
assert_series_equal(result, expected)
assert_series_equal(result, expected2)
def test_getitem_setitem_tuple_plus_columns(self):
# GH #1013
df = self.ymd[:5]
result = df.ix[(2000, 1, 6), ['A', 'B', 'C']]
expected = df.ix[2000, 1, 6][['A', 'B', 'C']]
assert_series_equal(result, expected)
def test_getitem_multilevel_index_tuple_unsorted(self):
index_columns = list("abc")
df = DataFrame([[0, 1, 0, "x"], [0, 0, 1, "y"]],
columns=index_columns + ["data"])
df = df.set_index(index_columns)
query_index = df.index[:1]
rs = df.ix[query_index, "data"]
xp = Series(['x'], index=MultiIndex.from_tuples([(0, 1, 0)]))
assert_series_equal(rs, xp)
def test_xs(self):
xs = self.frame.xs(('bar', 'two'))
xs2 = self.frame.ix[('bar', 'two')]
assert_series_equal(xs, xs2)
assert_almost_equal(xs.values, self.frame.values[4])
# GH 6574
# missing values in returned index should be preserrved
acc = [
('a','abcde',1),
('b','bbcde',2),
('y','yzcde',25),
('z','xbcde',24),
('z',None,26),
('z','zbcde',25),
('z','ybcde',26),
]
df = DataFrame(acc, columns=['a1','a2','cnt']).set_index(['a1','a2'])
expected = DataFrame({ 'cnt' : [24,26,25,26] }, index=Index(['xbcde',np.nan,'zbcde','ybcde'],name='a2'))
result = df.xs('z',level='a1')
assert_frame_equal(result, expected)
def test_xs_partial(self):
result = self.frame.xs('foo')
result2 = self.frame.ix['foo']
expected = self.frame.T['foo'].T
assert_frame_equal(result, expected)
assert_frame_equal(result, result2)
result = self.ymd.xs((2000, 4))
expected = self.ymd.ix[2000, 4]
assert_frame_equal(result, expected)
# ex from #1796
index = MultiIndex(levels=[['foo', 'bar'], ['one', 'two'], [-1, 1]],
labels=[[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 0, 0, 1, 1],
[0, 1, 0, 1, 0, 1, 0, 1]])
df = DataFrame(np.random.randn(8, 4), index=index,
columns=list('abcd'))
result = df.xs(['foo', 'one'])
expected = df.ix['foo', 'one']
assert_frame_equal(result, expected)
def test_xs_level(self):
result = self.frame.xs('two', level='second')
expected = self.frame[self.frame.index.get_level_values(1) == 'two']
expected.index = expected.index.droplevel(1)
assert_frame_equal(result, expected)
index = MultiIndex.from_tuples([('x', 'y', 'z'), ('a', 'b', 'c'),
('p', 'q', 'r')])
df = DataFrame(np.random.randn(3, 5), index=index)
result = df.xs('c', level=2)
expected = df[1:2]
expected.index = expected.index.droplevel(2)
assert_frame_equal(result, expected)
# this is a copy in 0.14
result = self.frame.xs('two', level='second')
# setting this will give a SettingWithCopyError
# as we are trying to write a view
def f(x):
x[:] = 10
self.assertRaises(com.SettingWithCopyError, f, result)
def test_xs_level_multiple(self):
from pandas import read_table
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
df = read_table(StringIO(text), sep='\s+', engine='python')
result = df.xs(('a', 4), level=['one', 'four'])
expected = df.xs('a').xs(4, level='four')
assert_frame_equal(result, expected)
# this is a copy in 0.14
result = df.xs(('a', 4), level=['one', 'four'])
# setting this will give a SettingWithCopyError
# as we are trying to write a view
def f(x):
x[:] = 10
self.assertRaises(com.SettingWithCopyError, f, result)
# GH2107
dates = lrange(20111201, 20111205)
ids = 'abcde'
idx = MultiIndex.from_tuples([x for x in cart_product(dates, ids)])
idx.names = ['date', 'secid']
df = DataFrame(np.random.randn(len(idx), 3), idx, ['X', 'Y', 'Z'])
rs = df.xs(20111201, level='date')
xp = df.ix[20111201, :]
assert_frame_equal(rs, xp)
def test_xs_level0(self):
from pandas import read_table
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
df = read_table(StringIO(text), sep='\s+', engine='python')
result = df.xs('a', level=0)
expected = df.xs('a')
self.assertEqual(len(result), 2)
assert_frame_equal(result, expected)
def test_xs_level_series(self):
s = self.frame['A']
result = s[:, 'two']
expected = self.frame.xs('two', level=1)['A']
assert_series_equal(result, expected)
s = self.ymd['A']
result = s[2000, 5]
expected = self.ymd.ix[2000, 5]['A']
assert_series_equal(result, expected)
# not implementing this for now
self.assertRaises(TypeError, s.__getitem__, (2000, slice(3, 4)))
# result = s[2000, 3:4]
# lv =s.index.get_level_values(1)
# expected = s[(lv == 3) | (lv == 4)]
# expected.index = expected.index.droplevel(0)
# assert_series_equal(result, expected)
# can do this though
def test_get_loc_single_level(self):
s = Series(np.random.randn(len(self.single_level)),
index=self.single_level)
for k in self.single_level.values:
s[k]
def test_getitem_toplevel(self):
df = self.frame.T
result = df['foo']
expected = df.reindex(columns=df.columns[:3])
expected.columns = expected.columns.droplevel(0)
assert_frame_equal(result, expected)
result = df['bar']
result2 = df.ix[:, 'bar']
expected = df.reindex(columns=df.columns[3:5])
expected.columns = expected.columns.droplevel(0)
assert_frame_equal(result, expected)
assert_frame_equal(result, result2)
def test_getitem_setitem_slice_integers(self):
index = MultiIndex(levels=[[0, 1, 2], [0, 2]],
labels=[[0, 0, 1, 1, 2, 2],
[0, 1, 0, 1, 0, 1]])
frame = DataFrame(np.random.randn(len(index), 4), index=index,
columns=['a', 'b', 'c', 'd'])
res = frame.ix[1:2]
exp = frame.reindex(frame.index[2:])
assert_frame_equal(res, exp)
frame.ix[1:2] = 7
self.assert_((frame.ix[1:2] == 7).values.all())
series = Series(np.random.randn(len(index)), index=index)
res = series.ix[1:2]
exp = series.reindex(series.index[2:])
assert_series_equal(res, exp)
series.ix[1:2] = 7
self.assert_((series.ix[1:2] == 7).values.all())
def test_getitem_int(self):
levels = [[0, 1], [0, 1, 2]]
labels = [[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]]
index = MultiIndex(levels=levels, labels=labels)
frame = DataFrame(np.random.randn(6, 2), index=index)
result = frame.ix[1]
expected = frame[-3:]
expected.index = expected.index.droplevel(0)
assert_frame_equal(result, expected)
# raises exception
self.assertRaises(KeyError, frame.ix.__getitem__, 3)
# however this will work
result = self.frame.ix[2]
expected = self.frame.xs(self.frame.index[2])
assert_series_equal(result, expected)
def test_getitem_partial(self):
ymd = self.ymd.T
result = ymd[2000, 2]
expected = ymd.reindex(columns=ymd.columns[ymd.columns.labels[1] == 1])
expected.columns = expected.columns.droplevel(0).droplevel(0)
assert_frame_equal(result, expected)
def test_getitem_slice_not_sorted(self):
df = self.frame.sortlevel(1).T
# buglet with int typechecking
result = df.ix[:, :np.int32(3)]
expected = df.reindex(columns=df.columns[:3])
assert_frame_equal(result, expected)
def test_setitem_change_dtype(self):
dft = self.frame.T
s = dft['foo', 'two']
dft['foo', 'two'] = s > s.median()
assert_series_equal(dft['foo', 'two'], s > s.median())
# tm.assert_isinstance(dft._data.blocks[1].items, MultiIndex)
reindexed = dft.reindex(columns=[('foo', 'two')])
assert_series_equal(reindexed['foo', 'two'], s > s.median())
def test_frame_setitem_ix(self):
self.frame.ix[('bar', 'two'), 'B'] = 5
self.assertEquals(self.frame.ix[('bar', 'two'), 'B'], 5)
# with integer labels
df = self.frame.copy()
df.columns = lrange(3)
df.ix[('bar', 'two'), 1] = 7
self.assertEquals(df.ix[('bar', 'two'), 1], 7)
def test_fancy_slice_partial(self):
result = self.frame.ix['bar':'baz']
expected = self.frame[3:7]
assert_frame_equal(result, expected)
result = self.ymd.ix[(2000, 2):(2000, 4)]
lev = self.ymd.index.labels[1]
expected = self.ymd[(lev >= 1) & (lev <= 3)]
assert_frame_equal(result, expected)
def test_getitem_partial_column_select(self):
idx = MultiIndex(labels=[[0, 0, 0], [0, 1, 1], [1, 0, 1]],
levels=[['a', 'b'], ['x', 'y'], ['p', 'q']])
df = DataFrame(np.random.rand(3, 2), index=idx)
result = df.ix[('a', 'y'), :]
expected = df.ix[('a', 'y')]
assert_frame_equal(result, expected)
result = df.ix[('a', 'y'), [1, 0]]
expected = df.ix[('a', 'y')][[1, 0]]
assert_frame_equal(result, expected)
self.assertRaises(KeyError, df.ix.__getitem__,
(('a', 'foo'), slice(None, None)))
def test_sortlevel(self):
df = self.frame.copy()
df.index = np.arange(len(df))
assertRaisesRegexp(TypeError, 'hierarchical index', df.sortlevel, 0)
# axis=1
# series
a_sorted = self.frame['A'].sortlevel(0)
with assertRaisesRegexp(TypeError, 'hierarchical index'):
self.frame.reset_index()['A'].sortlevel()
# preserve names
self.assertEquals(a_sorted.index.names, self.frame.index.names)
# inplace
rs = self.frame.copy()
rs.sortlevel(0, inplace=True)
assert_frame_equal(rs, self.frame.sortlevel(0))
def test_sortlevel_large_cardinality(self):
# #2684 (int64)
index = MultiIndex.from_arrays([np.arange(4000)]*3)
df = DataFrame(np.random.randn(4000), index=index, dtype = np.int64)
# it works!
result = df.sortlevel(0)
self.assertTrue(result.index.lexsort_depth == 3)
# #2684 (int32)
index = MultiIndex.from_arrays([np.arange(4000)]*3)
df = DataFrame(np.random.randn(4000), index=index, dtype = np.int32)
# it works!
result = df.sortlevel(0)
self.assert_((result.dtypes.values == df.dtypes.values).all() == True)
self.assertTrue(result.index.lexsort_depth == 3)
def test_delevel_infer_dtype(self):
tuples = [tuple for tuple in cart_product(['foo', 'bar'],
[10, 20], [1.0, 1.1])]
index = MultiIndex.from_tuples(tuples,
names=['prm0', 'prm1', 'prm2'])
df = DataFrame(np.random.randn(8, 3), columns=['A', 'B', 'C'],
index=index)
deleveled = df.reset_index()
self.assert_(com.is_integer_dtype(deleveled['prm1']))
self.assert_(com.is_float_dtype(deleveled['prm2']))
def test_reset_index_with_drop(self):
deleveled = self.ymd.reset_index(drop=True)
self.assertEquals(len(deleveled.columns), len(self.ymd.columns))
deleveled = self.series.reset_index()
tm.assert_isinstance(deleveled, DataFrame)
self.assertEqual(len(deleveled.columns),
len(self.series.index.levels) + 1)
deleveled = self.series.reset_index(drop=True)
tm.assert_isinstance(deleveled, Series)
def test_sortlevel_by_name(self):
self.frame.index.names = ['first', 'second']
result = self.frame.sortlevel(level='second')
expected = self.frame.sortlevel(level=1)
assert_frame_equal(result, expected)
def test_sortlevel_mixed(self):
sorted_before = self.frame.sortlevel(1)
df = self.frame.copy()
df['foo'] = 'bar'
sorted_after = df.sortlevel(1)
assert_frame_equal(sorted_before, sorted_after.drop(['foo'], axis=1))
dft = self.frame.T
sorted_before = dft.sortlevel(1, axis=1)
dft['foo', 'three'] = 'bar'
sorted_after = dft.sortlevel(1, axis=1)
assert_frame_equal(sorted_before.drop([('foo', 'three')], axis=1),
sorted_after.drop([('foo', 'three')], axis=1))
def test_count_level(self):
def _check_counts(frame, axis=0):
index = frame._get_axis(axis)
for i in range(index.nlevels):
result = frame.count(axis=axis, level=i)
expected = frame.groupby(axis=axis, level=i).count(axis=axis)
expected = expected.reindex_like(result).astype('i8')
assert_frame_equal(result, expected)
self.frame.ix[1, [1, 2]] = np.nan
self.frame.ix[7, [0, 1]] = np.nan
self.ymd.ix[1, [1, 2]] = np.nan
self.ymd.ix[7, [0, 1]] = np.nan
_check_counts(self.frame)
_check_counts(self.ymd)
_check_counts(self.frame.T, axis=1)
_check_counts(self.ymd.T, axis=1)
# can't call with level on regular DataFrame
df = tm.makeTimeDataFrame()
assertRaisesRegexp(TypeError, 'hierarchical', df.count, level=0)
self.frame['D'] = 'foo'
result = self.frame.count(level=0, numeric_only=True)
assert_almost_equal(result.columns, ['A', 'B', 'C'])
def test_count_level_series(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz'],
['one', 'two', 'three', 'four']],
labels=[[0, 0, 0, 2, 2],
[2, 0, 1, 1, 2]])
s = Series(np.random.randn(len(index)), index=index)
result = s.count(level=0)
expected = s.groupby(level=0).count()
assert_series_equal(result.astype('f8'),
expected.reindex(result.index).fillna(0))
result = s.count(level=1)
expected = s.groupby(level=1).count()
assert_series_equal(result.astype('f8'),
expected.reindex(result.index).fillna(0))
def test_count_level_corner(self):
s = self.frame['A'][:0]
result = s.count(level=0)
expected = Series(0, index=s.index.levels[0])
assert_series_equal(result, expected)
df = self.frame[:0]
result = df.count(level=0)
expected = DataFrame({}, index=s.index.levels[0],
columns=df.columns).fillna(0).astype(np.int64)
assert_frame_equal(result, expected)
def test_unstack(self):
# just check that it works for now
unstacked = self.ymd.unstack()
unstacked2 = unstacked.unstack()
# test that ints work
unstacked = self.ymd.astype(int).unstack()
# test that int32 work
unstacked = self.ymd.astype(np.int32).unstack()
def test_unstack_multiple_no_empty_columns(self):
index = MultiIndex.from_tuples([(0, 'foo', 0), (0, 'bar', 0),
(1, 'baz', 1), (1, 'qux', 1)])
s = Series(np.random.randn(4), index=index)
unstacked = s.unstack([1, 2])
expected = unstacked.dropna(axis=1, how='all')
assert_frame_equal(unstacked, expected)
def test_stack(self):
# regular roundtrip
unstacked = self.ymd.unstack()
restacked = unstacked.stack()
assert_frame_equal(restacked, self.ymd)
unlexsorted = self.ymd.sortlevel(2)
unstacked = unlexsorted.unstack(2)
restacked = unstacked.stack()
assert_frame_equal(restacked.sortlevel(0), self.ymd)
unlexsorted = unlexsorted[::-1]
unstacked = unlexsorted.unstack(1)
restacked = unstacked.stack().swaplevel(1, 2)
assert_frame_equal(restacked.sortlevel(0), self.ymd)
unlexsorted = unlexsorted.swaplevel(0, 1)
unstacked = unlexsorted.unstack(0).swaplevel(0, 1, axis=1)
restacked = unstacked.stack(0).swaplevel(1, 2)
assert_frame_equal(restacked.sortlevel(0), self.ymd)
# columns unsorted
unstacked = self.ymd.unstack()
unstacked = unstacked.sort(axis=1, ascending=False)
restacked = unstacked.stack()
assert_frame_equal(restacked, self.ymd)
# more than 2 levels in the columns
unstacked = self.ymd.unstack(1).unstack(1)
result = unstacked.stack(1)
expected = self.ymd.unstack()
assert_frame_equal(result, expected)
result = unstacked.stack(2)
expected = self.ymd.unstack(1)
assert_frame_equal(result, expected)
result = unstacked.stack(0)
expected = self.ymd.stack().unstack(1).unstack(1)
assert_frame_equal(result, expected)
# not all levels present in each echelon
unstacked = self.ymd.unstack(2).ix[:, ::3]
stacked = unstacked.stack().stack()
ymd_stacked = self.ymd.stack()
assert_series_equal(stacked, ymd_stacked.reindex(stacked.index))
# stack with negative number
result = self.ymd.unstack(0).stack(-2)
expected = self.ymd.unstack(0).stack(0)
def test_unstack_odd_failure(self):
data = """day,time,smoker,sum,len
Fri,Dinner,No,8.25,3.
Fri,Dinner,Yes,27.03,9
Fri,Lunch,No,3.0,1
Fri,Lunch,Yes,13.68,6
Sat,Dinner,No,139.63,45
Sat,Dinner,Yes,120.77,42
Sun,Dinner,No,180.57,57
Sun,Dinner,Yes,66.82,19
Thur,Dinner,No,3.0,1
Thur,Lunch,No,117.32,44
Thur,Lunch,Yes,51.51,17"""
df = pd.read_csv(StringIO(data)).set_index(['day', 'time', 'smoker'])
# it works, #2100
result = df.unstack(2)
recons = result.stack()
assert_frame_equal(recons, df)
def test_stack_mixed_dtype(self):
df = self.frame.T
df['foo', 'four'] = 'foo'
df = df.sortlevel(1, axis=1)
stacked = df.stack()
assert_series_equal(stacked['foo'], df['foo'].stack())
self.assertEqual(stacked['bar'].dtype, np.float_)
def test_unstack_bug(self):
df = DataFrame({'state': ['naive', 'naive', 'naive',
'activ', 'activ', 'activ'],
'exp': ['a', 'b', 'b', 'b', 'a', 'a'],
'barcode': [1, 2, 3, 4, 1, 3],
'v': ['hi', 'hi', 'bye', 'bye', 'bye', 'peace'],
'extra': np.arange(6.)})
result = df.groupby(['state', 'exp', 'barcode', 'v']).apply(len)
unstacked = result.unstack()
restacked = unstacked.stack()
assert_series_equal(restacked,
result.reindex(restacked.index).astype(float))
def test_stack_unstack_preserve_names(self):
unstacked = self.frame.unstack()
self.assertEquals(unstacked.index.name, 'first')
self.assertEquals(unstacked.columns.names, ['exp', 'second'])
restacked = unstacked.stack()
self.assertEquals(restacked.index.names, self.frame.index.names)
def test_unstack_level_name(self):
result = self.frame.unstack('second')
expected = self.frame.unstack(level=1)
assert_frame_equal(result, expected)
def test_stack_level_name(self):
unstacked = self.frame.unstack('second')
result = unstacked.stack('exp')
expected = self.frame.unstack().stack(0)
assert_frame_equal(result, expected)
result = self.frame.stack('exp')
expected = self.frame.stack()
assert_series_equal(result, expected)
def test_stack_unstack_multiple(self):
unstacked = self.ymd.unstack(['year', 'month'])
expected = self.ymd.unstack('year').unstack('month')
| assert_frame_equal(unstacked, expected) | pandas.util.testing.assert_frame_equal |
import pandas as pd
import numpy as np
import sklearn as sk
import matplotlib.pyplot as plt
from sklearn import metrics
from json import *
import requests
pd.set_option('display.max_rows', 21000)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 150)
def read_csv():
dataset = | pd.read_csv('earthquakes.csv') | pandas.read_csv |
from os.path import abspath, dirname, join
import h5py
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
from utils import make_dir, numpy_ewma_vectorized_v2, plot_postprocess, print_init, label_converter, series_indexer, \
color4label
class LogData(object):
def __init__(self):
self.mean = []
self.std = []
self.min = []
self.max = []
def log(self, sample):
"""
:param sample: data for logging specified as a numpy.array
:return:
"""
self.mean.append(sample.mean())
self.std.append(sample.std())
self.min.append(sample.min())
self.max.append(sample.max())
def save(self, group):
"""
:param group: the reference to the group level hierarchy of a .hdf5 file to save the data
:return:
"""
for key, val in self.__dict__.items():
group.create_dataset(key, data=val)
def load(self, group, decimate_step=100):
"""
:param decimate_step:
:param group: the reference to the group level hierarchy of a .hdf5 file to load
:return:
"""
# read in parameters
# [()] is needed to read in the whole array if you don't do that,
# it doesn't read the whole data but instead gives you lazy access to sub-parts
# (very useful when the array is huge but you only need a small part of it).
# https://stackoverflow.com/questions/10274476/how-to-export-hdf5-file-to-numpy-using-h5py
self.mean = group["mean"][()][::decimate_step]
self.std = group["std"][()][::decimate_step]
self.min = group["min"][()][::decimate_step]
self.max = group["max"][()][::decimate_step]
def plot_mean_min_max(self, label):
plt.fill_between(range(len(self.mean)), self.max, self.min, alpha=.5)
plt.plot(self.mean, label=label)
def plot_mean_std(self, label):
mean = np.array(self.mean)
plt.fill_between(range(len(self.mean)), mean + self.std, mean - self.std, alpha=.5)
plt.plot(self.mean, label=label)
class TemporalLogger(object):
def __init__(self, env_name, timestamp, log_dir, *args):
"""
Creates a TemporalLogger object. If the folder structure is nonexistent, it will also be created
:param *args:
:param env_name: name of the environment
:param timestamp: timestamp as a string
:param log_dir: logging directory, if it is None, then logging will be at the same hierarchy level as src/
"""
super().__init__()
self.timestamp = timestamp
# file structure
self.base_dir = join(dirname(dirname(abspath(__file__))), "log") if log_dir is None else log_dir
self.data_dir = join(self.base_dir, env_name)
make_dir(self.base_dir)
make_dir(self.data_dir)
# data
for data in args:
self.__dict__[data] = LogData()
def log(self, **kwargs):
"""
Function for storing the new values of the given attribute
:param **kwargs:
:return:
"""
for key, value in kwargs.items():
self.__dict__[key].log(value)
def save(self, *args):
"""
Saves the temporal statistics into a .hdf5 file
:param **kwargs:
:return:
"""
with h5py.File(join(self.data_dir, 'time_log_' + self.timestamp + '.hdf5'), 'w') as f:
for arg in args:
self.__dict__[arg].save(f.create_group(arg))
def load(self, filename, decimate_step=100):
"""
Loads the temporal statistics and fills the attributes of the class
:param decimate_step:
:param filename: name of the .hdf5 file to load
:return:
"""
if not filename.endswith('.hdf5'):
filename = filename + '.hdf5'
with h5py.File(join(self.data_dir, filename), 'r') as f:
for key, value in self.__dict__.items():
if isinstance(value, LogData):
value.load(f[key], decimate_step)
def plot_mean_min_max(self, *args):
fig, ax, _ = print_init(False)
for arg in args:
# breakpoint()
if arg in self.__dict__.keys(): # and isinstance(self.__dict__[arg], LogData):
self.__dict__[arg].plot_mean_min_max(arg)
plt.title("Mean and min-max statistics")
plot_postprocess(ax, f"Mean and min-max statistics of {args}",
ylabel=r"$\mu$")
def plot_mean_std(self, *args):
fig, ax, _ = print_init(False)
for arg in args:
if arg in self.__dict__.keys():
self.__dict__[arg].plot_mean_std(arg)
plt.title("Mean and standard deviation statistics")
plot_postprocess(ax, f"Mean and standard deviation statistics of {args}",
ylabel=r"$\mu$")
class EnvLogger(object):
def __init__(self, env_name, log_dir, decimate_step=250) -> None:
super().__init__()
self.env_name = env_name
self.log_dir = log_dir
self.decimate_step = decimate_step
self.data_dir = join(self.log_dir, self.env_name)
self.fig_dir = self.base_dir = join(dirname(dirname(abspath(__file__))), join("figures", self.env_name))
make_dir(self.fig_dir)
self.params_df = pd.read_csv(join(self.data_dir, "params.tsv"), "\t")
self.logs = {}
mean_reward = []
mean_feat_std = []
mean_proxy = []
# load trainings
for timestamp in self.params_df.timestamp:
self.logs[timestamp] = TemporalLogger(self.env_name, timestamp, self.log_dir, *["rewards", "features"])
self.logs[timestamp].load(join(self.data_dir, f"time_log_{timestamp}"), self.decimate_step)
# calculate statistics
mean_reward.append(self.logs[timestamp].__dict__["rewards"].mean.mean())
mean_feat_std.append(self.logs[timestamp].__dict__["features"].std.mean())
mean_proxy.append(mean_reward[-1] * mean_feat_std[-1])
# append statistics to df
self.params_df["mean_reward"] = pd.Series(mean_reward, index=self.params_df.index)
self.params_df["mean_feat_std"] = pd.Series(mean_feat_std, index=self.params_df.index)
self.params_df["mean_proxy"] = | pd.Series(mean_proxy, index=self.params_df.index) | pandas.Series |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.